Repository: apple/swift-nio-ssl Branch: main Commit: 3f337058ccd7 Files: 769 Total size: 14.3 MB Directory structure: gitextract_pqg_y_t3/ ├── .editorconfig ├── .gitattributes ├── .github/ │ ├── release.yml │ └── workflows/ │ ├── main.yml │ ├── pull_request.yml │ └── pull_request_label.yml ├── .gitignore ├── .licenseignore ├── .spi.yml ├── .swift-format ├── .unacceptablelanguageignore ├── Benchmarks/ │ ├── Benchmarks/ │ │ └── NIOSSLBenchmarks/ │ │ ├── Benchmarks.swift │ │ ├── ManyWrites.swift │ │ ├── Shared.swift │ │ └── SimpleHandshake.swift │ ├── Package.swift │ └── Thresholds/ │ ├── 6.1/ │ │ ├── NIOSSLBenchmarks.ManyWrites.p90.json │ │ └── NIOSSLBenchmarks.SimpleHandshake.p90.json │ ├── 6.2/ │ │ ├── NIOSSLBenchmarks.ManyWrites.p90.json │ │ └── NIOSSLBenchmarks.SimpleHandshake.p90.json │ ├── 6.3/ │ │ ├── NIOSSLBenchmarks.ManyWrites.p90.json │ │ └── NIOSSLBenchmarks.SimpleHandshake.p90.json │ ├── nightly-main/ │ │ ├── NIOSSLBenchmarks.ManyWrites.p90.json │ │ └── NIOSSLBenchmarks.SimpleHandshake.p90.json │ └── nightly-next/ │ ├── NIOSSLBenchmarks.ManyWrites.p90.json │ └── NIOSSLBenchmarks.SimpleHandshake.p90.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CONTRIBUTORS.txt ├── IntegrationTests/ │ ├── plugin_echo.sh │ ├── plugin_junit_xml.sh │ ├── run-single-test.sh │ ├── run-tests.sh │ ├── test_functions.sh │ └── tests_01_general/ │ ├── defines.sh │ ├── test_01_renegotiation.sh │ └── test_02_execstack.sh ├── LICENSE.txt ├── NOTICE.txt ├── Package.swift ├── README.md ├── SECURITY.md ├── Sources/ │ ├── CNIOBoringSSL/ │ │ ├── crypto/ │ │ │ ├── asn1/ │ │ │ │ ├── a_bitstr.cc │ │ │ │ ├── a_bool.cc │ │ │ │ ├── a_d2i_fp.cc │ │ │ │ ├── a_dup.cc │ │ │ │ ├── a_gentm.cc │ │ │ │ ├── a_i2d_fp.cc │ │ │ │ ├── a_int.cc │ │ │ │ ├── a_mbstr.cc │ │ │ │ ├── a_object.cc │ │ │ │ ├── a_octet.cc │ │ │ │ ├── a_strex.cc │ │ │ │ ├── a_strnid.cc │ │ │ │ ├── a_time.cc │ │ │ │ ├── a_type.cc │ │ │ │ ├── a_utctm.cc │ │ │ │ ├── asn1_lib.cc │ │ │ │ ├── asn1_par.cc │ │ │ │ ├── asn_pack.cc │ │ │ │ ├── f_int.cc │ │ │ │ ├── f_string.cc │ │ │ │ ├── internal.h │ │ │ │ ├── posix_time.cc │ │ │ │ ├── tasn_dec.cc │ │ │ │ ├── tasn_enc.cc │ │ │ │ ├── tasn_fre.cc │ │ │ │ ├── tasn_new.cc │ │ │ │ ├── tasn_typ.cc │ │ │ │ └── tasn_utl.cc │ │ │ ├── base64/ │ │ │ │ └── base64.cc │ │ │ ├── bcm_support.h │ │ │ ├── bio/ │ │ │ │ ├── bio.cc │ │ │ │ ├── bio_mem.cc │ │ │ │ ├── connect.cc │ │ │ │ ├── errno.cc │ │ │ │ ├── fd.cc │ │ │ │ ├── file.cc │ │ │ │ ├── hexdump.cc │ │ │ │ ├── internal.h │ │ │ │ ├── pair.cc │ │ │ │ ├── printf.cc │ │ │ │ ├── socket.cc │ │ │ │ └── socket_helper.cc │ │ │ ├── blake2/ │ │ │ │ └── blake2.cc │ │ │ ├── bn/ │ │ │ │ ├── bn_asn1.cc │ │ │ │ └── convert.cc │ │ │ ├── buf/ │ │ │ │ └── buf.cc │ │ │ ├── bytestring/ │ │ │ │ ├── asn1_compat.cc │ │ │ │ ├── ber.cc │ │ │ │ ├── cbb.cc │ │ │ │ ├── cbs.cc │ │ │ │ ├── internal.h │ │ │ │ └── unicode.cc │ │ │ ├── chacha/ │ │ │ │ ├── chacha.cc │ │ │ │ └── internal.h │ │ │ ├── cipher/ │ │ │ │ ├── derive_key.cc │ │ │ │ ├── e_aesctrhmac.cc │ │ │ │ ├── e_aesgcmsiv.cc │ │ │ │ ├── e_chacha20poly1305.cc │ │ │ │ ├── e_des.cc │ │ │ │ ├── e_null.cc │ │ │ │ ├── e_rc2.cc │ │ │ │ ├── e_rc4.cc │ │ │ │ ├── e_tls.cc │ │ │ │ ├── get_cipher.cc │ │ │ │ ├── internal.h │ │ │ │ └── tls_cbc.cc │ │ │ ├── conf/ │ │ │ │ ├── conf.cc │ │ │ │ └── internal.h │ │ │ ├── cpu_aarch64_apple.cc │ │ │ ├── cpu_aarch64_fuchsia.cc │ │ │ ├── cpu_aarch64_linux.cc │ │ │ ├── cpu_aarch64_openbsd.cc │ │ │ ├── cpu_aarch64_sysreg.cc │ │ │ ├── cpu_aarch64_win.cc │ │ │ ├── cpu_arm_freebsd.cc │ │ │ ├── cpu_arm_linux.cc │ │ │ ├── cpu_arm_linux.h │ │ │ ├── cpu_intel.cc │ │ │ ├── crypto.cc │ │ │ ├── curve25519/ │ │ │ │ ├── asm/ │ │ │ │ │ └── x25519-asm-arm.S │ │ │ │ ├── curve25519.cc │ │ │ │ ├── curve25519_64_adx.cc │ │ │ │ ├── curve25519_tables.h │ │ │ │ ├── internal.h │ │ │ │ └── spake25519.cc │ │ │ ├── des/ │ │ │ │ ├── des.cc │ │ │ │ └── internal.h │ │ │ ├── dh/ │ │ │ │ ├── dh_asn1.cc │ │ │ │ └── params.cc │ │ │ ├── digest/ │ │ │ │ └── digest_extra.cc │ │ │ ├── dsa/ │ │ │ │ ├── dsa.cc │ │ │ │ ├── dsa_asn1.cc │ │ │ │ └── internal.h │ │ │ ├── ec/ │ │ │ │ ├── ec_asn1.cc │ │ │ │ ├── ec_derive.cc │ │ │ │ ├── hash_to_curve.cc │ │ │ │ └── internal.h │ │ │ ├── ecdh/ │ │ │ │ └── ecdh.cc │ │ │ ├── ecdsa/ │ │ │ │ └── ecdsa_asn1.cc │ │ │ ├── engine/ │ │ │ │ └── engine.cc │ │ │ ├── err/ │ │ │ │ ├── err.cc │ │ │ │ └── internal.h │ │ │ ├── evp/ │ │ │ │ ├── evp.cc │ │ │ │ ├── evp_asn1.cc │ │ │ │ ├── evp_ctx.cc │ │ │ │ ├── internal.h │ │ │ │ ├── p_dh.cc │ │ │ │ ├── p_dh_asn1.cc │ │ │ │ ├── p_dsa_asn1.cc │ │ │ │ ├── p_ec.cc │ │ │ │ ├── p_ec_asn1.cc │ │ │ │ ├── p_ed25519.cc │ │ │ │ ├── p_ed25519_asn1.cc │ │ │ │ ├── p_hkdf.cc │ │ │ │ ├── p_rsa.cc │ │ │ │ ├── p_rsa_asn1.cc │ │ │ │ ├── p_x25519.cc │ │ │ │ ├── p_x25519_asn1.cc │ │ │ │ ├── pbkdf.cc │ │ │ │ ├── print.cc │ │ │ │ ├── scrypt.cc │ │ │ │ └── sign.cc │ │ │ ├── ex_data.cc │ │ │ ├── fipsmodule/ │ │ │ │ ├── aes/ │ │ │ │ │ ├── aes.cc.inc │ │ │ │ │ ├── aes_nohw.cc.inc │ │ │ │ │ ├── cbc.cc.inc │ │ │ │ │ ├── cfb.cc.inc │ │ │ │ │ ├── ctr.cc.inc │ │ │ │ │ ├── gcm.cc.inc │ │ │ │ │ ├── gcm_nohw.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ ├── key_wrap.cc.inc │ │ │ │ │ ├── mode_wrappers.cc.inc │ │ │ │ │ ├── ofb.cc.inc │ │ │ │ │ └── polyval.cc.inc │ │ │ │ ├── bcm.cc │ │ │ │ ├── bcm_interface.h │ │ │ │ ├── bn/ │ │ │ │ │ ├── add.cc.inc │ │ │ │ │ ├── asm/ │ │ │ │ │ │ └── x86_64-gcc.cc.inc │ │ │ │ │ ├── bn.cc.inc │ │ │ │ │ ├── bytes.cc.inc │ │ │ │ │ ├── cmp.cc.inc │ │ │ │ │ ├── ctx.cc.inc │ │ │ │ │ ├── div.cc.inc │ │ │ │ │ ├── div_extra.cc.inc │ │ │ │ │ ├── exponentiation.cc.inc │ │ │ │ │ ├── gcd.cc.inc │ │ │ │ │ ├── gcd_extra.cc.inc │ │ │ │ │ ├── generic.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ ├── jacobi.cc.inc │ │ │ │ │ ├── montgomery.cc.inc │ │ │ │ │ ├── montgomery_inv.cc.inc │ │ │ │ │ ├── mul.cc.inc │ │ │ │ │ ├── prime.cc.inc │ │ │ │ │ ├── random.cc.inc │ │ │ │ │ ├── rsaz_exp.cc.inc │ │ │ │ │ ├── rsaz_exp.h │ │ │ │ │ ├── shift.cc.inc │ │ │ │ │ └── sqrt.cc.inc │ │ │ │ ├── cipher/ │ │ │ │ │ ├── aead.cc.inc │ │ │ │ │ ├── cipher.cc.inc │ │ │ │ │ ├── e_aes.cc.inc │ │ │ │ │ ├── e_aesccm.cc.inc │ │ │ │ │ └── internal.h │ │ │ │ ├── cmac/ │ │ │ │ │ └── cmac.cc.inc │ │ │ │ ├── delocate.h │ │ │ │ ├── dh/ │ │ │ │ │ ├── check.cc.inc │ │ │ │ │ ├── dh.cc.inc │ │ │ │ │ └── internal.h │ │ │ │ ├── digest/ │ │ │ │ │ ├── digest.cc.inc │ │ │ │ │ ├── digests.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ └── md32_common.h │ │ │ │ ├── digestsign/ │ │ │ │ │ └── digestsign.cc.inc │ │ │ │ ├── ec/ │ │ │ │ │ ├── builtin_curves.h │ │ │ │ │ ├── ec.cc.inc │ │ │ │ │ ├── ec_key.cc.inc │ │ │ │ │ ├── ec_montgomery.cc.inc │ │ │ │ │ ├── felem.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ ├── oct.cc.inc │ │ │ │ │ ├── p224-64.cc.inc │ │ │ │ │ ├── p256-nistz-table.h │ │ │ │ │ ├── p256-nistz.cc.inc │ │ │ │ │ ├── p256-nistz.h │ │ │ │ │ ├── p256.cc.inc │ │ │ │ │ ├── p256_table.h │ │ │ │ │ ├── scalar.cc.inc │ │ │ │ │ ├── simple.cc.inc │ │ │ │ │ ├── simple_mul.cc.inc │ │ │ │ │ ├── util.cc.inc │ │ │ │ │ └── wnaf.cc.inc │ │ │ │ ├── ecdh/ │ │ │ │ │ └── ecdh.cc.inc │ │ │ │ ├── ecdsa/ │ │ │ │ │ ├── ecdsa.cc.inc │ │ │ │ │ └── internal.h │ │ │ │ ├── fips_shared_support.cc │ │ │ │ ├── hkdf/ │ │ │ │ │ └── hkdf.cc.inc │ │ │ │ ├── hmac/ │ │ │ │ │ └── hmac.cc.inc │ │ │ │ ├── keccak/ │ │ │ │ │ ├── internal.h │ │ │ │ │ └── keccak.cc.inc │ │ │ │ ├── mldsa/ │ │ │ │ │ └── mldsa.cc.inc │ │ │ │ ├── mlkem/ │ │ │ │ │ └── mlkem.cc.inc │ │ │ │ ├── rand/ │ │ │ │ │ ├── ctrdrbg.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ └── rand.cc.inc │ │ │ │ ├── rsa/ │ │ │ │ │ ├── blinding.cc.inc │ │ │ │ │ ├── internal.h │ │ │ │ │ ├── padding.cc.inc │ │ │ │ │ ├── rsa.cc.inc │ │ │ │ │ └── rsa_impl.cc.inc │ │ │ │ ├── self_check/ │ │ │ │ │ ├── fips.cc.inc │ │ │ │ │ └── self_check.cc.inc │ │ │ │ ├── service_indicator/ │ │ │ │ │ ├── internal.h │ │ │ │ │ └── service_indicator.cc.inc │ │ │ │ ├── sha/ │ │ │ │ │ ├── internal.h │ │ │ │ │ ├── sha1.cc.inc │ │ │ │ │ ├── sha256.cc.inc │ │ │ │ │ └── sha512.cc.inc │ │ │ │ ├── slhdsa/ │ │ │ │ │ ├── address.h │ │ │ │ │ ├── fors.cc.inc │ │ │ │ │ ├── fors.h │ │ │ │ │ ├── merkle.cc.inc │ │ │ │ │ ├── merkle.h │ │ │ │ │ ├── params.h │ │ │ │ │ ├── slhdsa.cc.inc │ │ │ │ │ ├── thash.cc.inc │ │ │ │ │ ├── thash.h │ │ │ │ │ ├── wots.cc.inc │ │ │ │ │ └── wots.h │ │ │ │ └── tls/ │ │ │ │ ├── internal.h │ │ │ │ └── kdf.cc.inc │ │ │ ├── hpke/ │ │ │ │ └── hpke.cc │ │ │ ├── hrss/ │ │ │ │ ├── asm/ │ │ │ │ │ └── poly_rq_mul.S │ │ │ │ ├── hrss.cc │ │ │ │ └── internal.h │ │ │ ├── internal.h │ │ │ ├── kyber/ │ │ │ │ ├── internal.h │ │ │ │ └── kyber.cc │ │ │ ├── lhash/ │ │ │ │ ├── internal.h │ │ │ │ └── lhash.cc │ │ │ ├── md4/ │ │ │ │ └── md4.cc │ │ │ ├── md5/ │ │ │ │ ├── internal.h │ │ │ │ └── md5.cc │ │ │ ├── mem.cc │ │ │ ├── mldsa/ │ │ │ │ └── mldsa.cc │ │ │ ├── mlkem/ │ │ │ │ └── mlkem.cc │ │ │ ├── obj/ │ │ │ │ ├── obj.cc │ │ │ │ ├── obj_dat.h │ │ │ │ └── obj_xref.cc │ │ │ ├── pem/ │ │ │ │ ├── internal.h │ │ │ │ ├── pem_all.cc │ │ │ │ ├── pem_info.cc │ │ │ │ ├── pem_lib.cc │ │ │ │ ├── pem_oth.cc │ │ │ │ ├── pem_pk8.cc │ │ │ │ ├── pem_pkey.cc │ │ │ │ ├── pem_x509.cc │ │ │ │ └── pem_xaux.cc │ │ │ ├── pkcs7/ │ │ │ │ ├── internal.h │ │ │ │ ├── pkcs7.cc │ │ │ │ └── pkcs7_x509.cc │ │ │ ├── pkcs8/ │ │ │ │ ├── internal.h │ │ │ │ ├── p5_pbev2.cc │ │ │ │ ├── pkcs8.cc │ │ │ │ └── pkcs8_x509.cc │ │ │ ├── poly1305/ │ │ │ │ ├── internal.h │ │ │ │ ├── poly1305.cc │ │ │ │ ├── poly1305_arm.cc │ │ │ │ ├── poly1305_arm_asm.S │ │ │ │ └── poly1305_vec.cc │ │ │ ├── pool/ │ │ │ │ ├── internal.h │ │ │ │ └── pool.cc │ │ │ ├── rand/ │ │ │ │ ├── deterministic.cc │ │ │ │ ├── fork_detect.cc │ │ │ │ ├── forkunsafe.cc │ │ │ │ ├── getentropy.cc │ │ │ │ ├── getrandom_fillin.h │ │ │ │ ├── ios.cc │ │ │ │ ├── passive.cc │ │ │ │ ├── rand.cc │ │ │ │ ├── sysrand_internal.h │ │ │ │ ├── trusty.cc │ │ │ │ ├── urandom.cc │ │ │ │ └── windows.cc │ │ │ ├── rc4/ │ │ │ │ └── rc4.cc │ │ │ ├── refcount.cc │ │ │ ├── rsa/ │ │ │ │ ├── internal.h │ │ │ │ ├── rsa_asn1.cc │ │ │ │ ├── rsa_crypt.cc │ │ │ │ ├── rsa_extra.cc │ │ │ │ └── rsa_print.cc │ │ │ ├── sha/ │ │ │ │ ├── sha1.cc │ │ │ │ ├── sha256.cc │ │ │ │ └── sha512.cc │ │ │ ├── siphash/ │ │ │ │ └── siphash.cc │ │ │ ├── slhdsa/ │ │ │ │ └── slhdsa.cc │ │ │ ├── spake2plus/ │ │ │ │ ├── internal.h │ │ │ │ └── spake2plus.cc │ │ │ ├── stack/ │ │ │ │ └── stack.cc │ │ │ ├── thread.cc │ │ │ ├── thread_none.cc │ │ │ ├── thread_pthread.cc │ │ │ ├── thread_win.cc │ │ │ ├── trust_token/ │ │ │ │ ├── internal.h │ │ │ │ ├── pmbtoken.cc │ │ │ │ ├── trust_token.cc │ │ │ │ └── voprf.cc │ │ │ └── x509/ │ │ │ ├── a_digest.cc │ │ │ ├── a_sign.cc │ │ │ ├── a_verify.cc │ │ │ ├── algorithm.cc │ │ │ ├── asn1_gen.cc │ │ │ ├── by_dir.cc │ │ │ ├── by_file.cc │ │ │ ├── ext_dat.h │ │ │ ├── i2d_pr.cc │ │ │ ├── internal.h │ │ │ ├── name_print.cc │ │ │ ├── policy.cc │ │ │ ├── rsa_pss.cc │ │ │ ├── t_crl.cc │ │ │ ├── t_req.cc │ │ │ ├── t_x509.cc │ │ │ ├── t_x509a.cc │ │ │ ├── v3_akey.cc │ │ │ ├── v3_akeya.cc │ │ │ ├── v3_alt.cc │ │ │ ├── v3_bcons.cc │ │ │ ├── v3_bitst.cc │ │ │ ├── v3_conf.cc │ │ │ ├── v3_cpols.cc │ │ │ ├── v3_crld.cc │ │ │ ├── v3_enum.cc │ │ │ ├── v3_extku.cc │ │ │ ├── v3_genn.cc │ │ │ ├── v3_ia5.cc │ │ │ ├── v3_info.cc │ │ │ ├── v3_int.cc │ │ │ ├── v3_lib.cc │ │ │ ├── v3_ncons.cc │ │ │ ├── v3_ocsp.cc │ │ │ ├── v3_pcons.cc │ │ │ ├── v3_pmaps.cc │ │ │ ├── v3_prn.cc │ │ │ ├── v3_purp.cc │ │ │ ├── v3_skey.cc │ │ │ ├── v3_utl.cc │ │ │ ├── x509.cc │ │ │ ├── x509_att.cc │ │ │ ├── x509_cmp.cc │ │ │ ├── x509_d2.cc │ │ │ ├── x509_def.cc │ │ │ ├── x509_ext.cc │ │ │ ├── x509_lu.cc │ │ │ ├── x509_obj.cc │ │ │ ├── x509_req.cc │ │ │ ├── x509_set.cc │ │ │ ├── x509_trs.cc │ │ │ ├── x509_txt.cc │ │ │ ├── x509_v3.cc │ │ │ ├── x509_vfy.cc │ │ │ ├── x509_vpm.cc │ │ │ ├── x509cset.cc │ │ │ ├── x509name.cc │ │ │ ├── x509rset.cc │ │ │ ├── x509spki.cc │ │ │ ├── x_algor.cc │ │ │ ├── x_all.cc │ │ │ ├── x_attrib.cc │ │ │ ├── x_crl.cc │ │ │ ├── x_exten.cc │ │ │ ├── x_name.cc │ │ │ ├── x_pubkey.cc │ │ │ ├── x_req.cc │ │ │ ├── x_sig.cc │ │ │ ├── x_spki.cc │ │ │ ├── x_val.cc │ │ │ ├── x_x509.cc │ │ │ └── x_x509a.cc │ │ ├── gen/ │ │ │ ├── bcm/ │ │ │ │ ├── aes-gcm-avx10-x86_64-apple.S │ │ │ │ ├── aes-gcm-avx10-x86_64-linux.S │ │ │ │ ├── aes-gcm-avx2-x86_64-apple.S │ │ │ │ ├── aes-gcm-avx2-x86_64-linux.S │ │ │ │ ├── aesni-gcm-x86_64-apple.S │ │ │ │ ├── aesni-gcm-x86_64-linux.S │ │ │ │ ├── aesni-x86-apple.S │ │ │ │ ├── aesni-x86-linux.S │ │ │ │ ├── aesni-x86_64-apple.S │ │ │ │ ├── aesni-x86_64-linux.S │ │ │ │ ├── aesv8-armv7-linux.S │ │ │ │ ├── aesv8-armv8-apple.S │ │ │ │ ├── aesv8-armv8-linux.S │ │ │ │ ├── aesv8-armv8-win.S │ │ │ │ ├── aesv8-gcm-armv8-apple.S │ │ │ │ ├── aesv8-gcm-armv8-linux.S │ │ │ │ ├── aesv8-gcm-armv8-win.S │ │ │ │ ├── armv4-mont-linux.S │ │ │ │ ├── armv8-mont-apple.S │ │ │ │ ├── armv8-mont-linux.S │ │ │ │ ├── armv8-mont-win.S │ │ │ │ ├── bn-586-apple.S │ │ │ │ ├── bn-586-linux.S │ │ │ │ ├── bn-armv8-apple.S │ │ │ │ ├── bn-armv8-linux.S │ │ │ │ ├── bn-armv8-win.S │ │ │ │ ├── bsaes-armv7-linux.S │ │ │ │ ├── co-586-apple.S │ │ │ │ ├── co-586-linux.S │ │ │ │ ├── ghash-armv4-linux.S │ │ │ │ ├── ghash-neon-armv8-apple.S │ │ │ │ ├── ghash-neon-armv8-linux.S │ │ │ │ ├── ghash-neon-armv8-win.S │ │ │ │ ├── ghash-ssse3-x86-apple.S │ │ │ │ ├── ghash-ssse3-x86-linux.S │ │ │ │ ├── ghash-ssse3-x86_64-apple.S │ │ │ │ ├── ghash-ssse3-x86_64-linux.S │ │ │ │ ├── ghash-x86-apple.S │ │ │ │ ├── ghash-x86-linux.S │ │ │ │ ├── ghash-x86_64-apple.S │ │ │ │ ├── ghash-x86_64-linux.S │ │ │ │ ├── ghashv8-armv7-linux.S │ │ │ │ ├── ghashv8-armv8-apple.S │ │ │ │ ├── ghashv8-armv8-linux.S │ │ │ │ ├── ghashv8-armv8-win.S │ │ │ │ ├── p256-armv8-asm-apple.S │ │ │ │ ├── p256-armv8-asm-linux.S │ │ │ │ ├── p256-armv8-asm-win.S │ │ │ │ ├── p256-x86_64-asm-apple.S │ │ │ │ ├── p256-x86_64-asm-linux.S │ │ │ │ ├── p256_beeu-armv8-asm-apple.S │ │ │ │ ├── p256_beeu-armv8-asm-linux.S │ │ │ │ ├── p256_beeu-armv8-asm-win.S │ │ │ │ ├── p256_beeu-x86_64-asm-apple.S │ │ │ │ ├── p256_beeu-x86_64-asm-linux.S │ │ │ │ ├── rdrand-x86_64-apple.S │ │ │ │ ├── rdrand-x86_64-linux.S │ │ │ │ ├── rsaz-avx2-apple.S │ │ │ │ ├── rsaz-avx2-linux.S │ │ │ │ ├── sha1-586-apple.S │ │ │ │ ├── sha1-586-linux.S │ │ │ │ ├── sha1-armv4-large-linux.S │ │ │ │ ├── sha1-armv8-apple.S │ │ │ │ ├── sha1-armv8-linux.S │ │ │ │ ├── sha1-armv8-win.S │ │ │ │ ├── sha1-x86_64-apple.S │ │ │ │ ├── sha1-x86_64-linux.S │ │ │ │ ├── sha256-586-apple.S │ │ │ │ ├── sha256-586-linux.S │ │ │ │ ├── sha256-armv4-linux.S │ │ │ │ ├── sha256-armv8-apple.S │ │ │ │ ├── sha256-armv8-linux.S │ │ │ │ ├── sha256-armv8-win.S │ │ │ │ ├── sha256-x86_64-apple.S │ │ │ │ ├── sha256-x86_64-linux.S │ │ │ │ ├── sha512-586-apple.S │ │ │ │ ├── sha512-586-linux.S │ │ │ │ ├── sha512-armv4-linux.S │ │ │ │ ├── sha512-armv8-apple.S │ │ │ │ ├── sha512-armv8-linux.S │ │ │ │ ├── sha512-armv8-win.S │ │ │ │ ├── sha512-x86_64-apple.S │ │ │ │ ├── sha512-x86_64-linux.S │ │ │ │ ├── vpaes-armv7-linux.S │ │ │ │ ├── vpaes-armv8-apple.S │ │ │ │ ├── vpaes-armv8-linux.S │ │ │ │ ├── vpaes-armv8-win.S │ │ │ │ ├── vpaes-x86-apple.S │ │ │ │ ├── vpaes-x86-linux.S │ │ │ │ ├── vpaes-x86_64-apple.S │ │ │ │ ├── vpaes-x86_64-linux.S │ │ │ │ ├── x86-mont-apple.S │ │ │ │ ├── x86-mont-linux.S │ │ │ │ ├── x86_64-mont-apple.S │ │ │ │ ├── x86_64-mont-linux.S │ │ │ │ ├── x86_64-mont5-apple.S │ │ │ │ └── x86_64-mont5-linux.S │ │ │ └── crypto/ │ │ │ ├── aes128gcmsiv-x86_64-apple.S │ │ │ ├── aes128gcmsiv-x86_64-linux.S │ │ │ ├── chacha-armv4-linux.S │ │ │ ├── chacha-armv8-apple.S │ │ │ ├── chacha-armv8-linux.S │ │ │ ├── chacha-armv8-win.S │ │ │ ├── chacha-x86-apple.S │ │ │ ├── chacha-x86-linux.S │ │ │ ├── chacha-x86_64-apple.S │ │ │ ├── chacha-x86_64-linux.S │ │ │ ├── chacha20_poly1305_armv8-apple.S │ │ │ ├── chacha20_poly1305_armv8-linux.S │ │ │ ├── chacha20_poly1305_armv8-win.S │ │ │ ├── chacha20_poly1305_x86_64-apple.S │ │ │ ├── chacha20_poly1305_x86_64-linux.S │ │ │ ├── err_data.cc │ │ │ ├── md5-586-apple.S │ │ │ ├── md5-586-linux.S │ │ │ ├── md5-x86_64-apple.S │ │ │ └── md5-x86_64-linux.S │ │ ├── hash.txt │ │ ├── include/ │ │ │ ├── CNIOBoringSSL.h │ │ │ ├── CNIOBoringSSL_aead.h │ │ │ ├── CNIOBoringSSL_aes.h │ │ │ ├── CNIOBoringSSL_arm_arch.h │ │ │ ├── CNIOBoringSSL_asm_base.h │ │ │ ├── CNIOBoringSSL_asn1.h │ │ │ ├── CNIOBoringSSL_asn1_mac.h │ │ │ ├── CNIOBoringSSL_asn1t.h │ │ │ ├── CNIOBoringSSL_base.h │ │ │ ├── CNIOBoringSSL_base64.h │ │ │ ├── CNIOBoringSSL_bcm_public.h │ │ │ ├── CNIOBoringSSL_bio.h │ │ │ ├── CNIOBoringSSL_blake2.h │ │ │ ├── CNIOBoringSSL_blowfish.h │ │ │ ├── CNIOBoringSSL_bn.h │ │ │ ├── CNIOBoringSSL_boringssl_prefix_symbols.h │ │ │ ├── CNIOBoringSSL_boringssl_prefix_symbols_asm.h │ │ │ ├── CNIOBoringSSL_buf.h │ │ │ ├── CNIOBoringSSL_buffer.h │ │ │ ├── CNIOBoringSSL_bytestring.h │ │ │ ├── CNIOBoringSSL_cast.h │ │ │ ├── CNIOBoringSSL_chacha.h │ │ │ ├── CNIOBoringSSL_cipher.h │ │ │ ├── CNIOBoringSSL_cmac.h │ │ │ ├── CNIOBoringSSL_conf.h │ │ │ ├── CNIOBoringSSL_cpu.h │ │ │ ├── CNIOBoringSSL_crypto.h │ │ │ ├── CNIOBoringSSL_ctrdrbg.h │ │ │ ├── CNIOBoringSSL_curve25519.h │ │ │ ├── CNIOBoringSSL_des.h │ │ │ ├── CNIOBoringSSL_dh.h │ │ │ ├── CNIOBoringSSL_digest.h │ │ │ ├── CNIOBoringSSL_dsa.h │ │ │ ├── CNIOBoringSSL_dtls1.h │ │ │ ├── CNIOBoringSSL_e_os2.h │ │ │ ├── CNIOBoringSSL_ec.h │ │ │ ├── CNIOBoringSSL_ec_key.h │ │ │ ├── CNIOBoringSSL_ecdh.h │ │ │ ├── CNIOBoringSSL_ecdsa.h │ │ │ ├── CNIOBoringSSL_engine.h │ │ │ ├── CNIOBoringSSL_err.h │ │ │ ├── CNIOBoringSSL_evp.h │ │ │ ├── CNIOBoringSSL_evp_errors.h │ │ │ ├── CNIOBoringSSL_ex_data.h │ │ │ ├── CNIOBoringSSL_hkdf.h │ │ │ ├── CNIOBoringSSL_hmac.h │ │ │ ├── CNIOBoringSSL_hpke.h │ │ │ ├── CNIOBoringSSL_hrss.h │ │ │ ├── CNIOBoringSSL_is_boringssl.h │ │ │ ├── CNIOBoringSSL_kdf.h │ │ │ ├── CNIOBoringSSL_lhash.h │ │ │ ├── CNIOBoringSSL_md4.h │ │ │ ├── CNIOBoringSSL_md5.h │ │ │ ├── CNIOBoringSSL_mem.h │ │ │ ├── CNIOBoringSSL_mldsa.h │ │ │ ├── CNIOBoringSSL_mlkem.h │ │ │ ├── CNIOBoringSSL_nid.h │ │ │ ├── CNIOBoringSSL_obj.h │ │ │ ├── CNIOBoringSSL_obj_mac.h │ │ │ ├── CNIOBoringSSL_objects.h │ │ │ ├── CNIOBoringSSL_opensslconf.h │ │ │ ├── CNIOBoringSSL_opensslv.h │ │ │ ├── CNIOBoringSSL_ossl_typ.h │ │ │ ├── CNIOBoringSSL_pem.h │ │ │ ├── CNIOBoringSSL_pkcs12.h │ │ │ ├── CNIOBoringSSL_pkcs7.h │ │ │ ├── CNIOBoringSSL_pkcs8.h │ │ │ ├── CNIOBoringSSL_poly1305.h │ │ │ ├── CNIOBoringSSL_pool.h │ │ │ ├── CNIOBoringSSL_posix_time.h │ │ │ ├── CNIOBoringSSL_rand.h │ │ │ ├── CNIOBoringSSL_rc4.h │ │ │ ├── CNIOBoringSSL_ripemd.h │ │ │ ├── CNIOBoringSSL_rsa.h │ │ │ ├── CNIOBoringSSL_safestack.h │ │ │ ├── CNIOBoringSSL_service_indicator.h │ │ │ ├── CNIOBoringSSL_sha.h │ │ │ ├── CNIOBoringSSL_siphash.h │ │ │ ├── CNIOBoringSSL_slhdsa.h │ │ │ ├── CNIOBoringSSL_span.h │ │ │ ├── CNIOBoringSSL_srtp.h │ │ │ ├── CNIOBoringSSL_ssl.h │ │ │ ├── CNIOBoringSSL_ssl3.h │ │ │ ├── CNIOBoringSSL_stack.h │ │ │ ├── CNIOBoringSSL_target.h │ │ │ ├── CNIOBoringSSL_thread.h │ │ │ ├── CNIOBoringSSL_time.h │ │ │ ├── CNIOBoringSSL_tls1.h │ │ │ ├── CNIOBoringSSL_trust_token.h │ │ │ ├── CNIOBoringSSL_type_check.h │ │ │ ├── CNIOBoringSSL_x509.h │ │ │ ├── CNIOBoringSSL_x509_vfy.h │ │ │ ├── CNIOBoringSSL_x509v3.h │ │ │ ├── CNIOBoringSSL_x509v3_errors.h │ │ │ ├── boringssl_prefix_symbols_nasm.inc │ │ │ ├── experimental/ │ │ │ │ └── CNIOBoringSSL_kyber.h │ │ │ └── module.modulemap │ │ ├── ssl/ │ │ │ ├── bio_ssl.cc │ │ │ ├── d1_both.cc │ │ │ ├── d1_lib.cc │ │ │ ├── d1_pkt.cc │ │ │ ├── d1_srtp.cc │ │ │ ├── dtls_method.cc │ │ │ ├── dtls_record.cc │ │ │ ├── encrypted_client_hello.cc │ │ │ ├── extensions.cc │ │ │ ├── handoff.cc │ │ │ ├── handshake.cc │ │ │ ├── handshake_client.cc │ │ │ ├── handshake_server.cc │ │ │ ├── internal.h │ │ │ ├── s3_both.cc │ │ │ ├── s3_lib.cc │ │ │ ├── s3_pkt.cc │ │ │ ├── ssl_aead_ctx.cc │ │ │ ├── ssl_asn1.cc │ │ │ ├── ssl_buffer.cc │ │ │ ├── ssl_cert.cc │ │ │ ├── ssl_cipher.cc │ │ │ ├── ssl_credential.cc │ │ │ ├── ssl_file.cc │ │ │ ├── ssl_key_share.cc │ │ │ ├── ssl_lib.cc │ │ │ ├── ssl_privkey.cc │ │ │ ├── ssl_session.cc │ │ │ ├── ssl_stat.cc │ │ │ ├── ssl_transcript.cc │ │ │ ├── ssl_versions.cc │ │ │ ├── ssl_x509.cc │ │ │ ├── t1_enc.cc │ │ │ ├── tls13_both.cc │ │ │ ├── tls13_client.cc │ │ │ ├── tls13_enc.cc │ │ │ ├── tls13_server.cc │ │ │ ├── tls_method.cc │ │ │ └── tls_record.cc │ │ └── third_party/ │ │ └── fiat/ │ │ ├── asm/ │ │ │ ├── fiat_curve25519_adx_mul.S │ │ │ ├── fiat_curve25519_adx_square.S │ │ │ ├── fiat_p256_adx_mul.S │ │ │ └── fiat_p256_adx_sqr.S │ │ ├── curve25519_32.h │ │ ├── curve25519_64.h │ │ ├── curve25519_64_adx.h │ │ ├── curve25519_64_msvc.h │ │ ├── p256_32.h │ │ ├── p256_64.h │ │ └── p256_64_msvc.h │ ├── CNIOBoringSSLShims/ │ │ ├── include/ │ │ │ └── CNIOBoringSSLShims.h │ │ └── shims.c │ ├── NIOSSL/ │ │ ├── AndroidCABundle.swift │ │ ├── ByteBufferBIO.swift │ │ ├── CustomPrivateKey.swift │ │ ├── Docs.docc/ │ │ │ ├── TLSConfiguration.md │ │ │ ├── index.md │ │ │ ├── quantum-secure-tls.md │ │ │ └── trust-roots-behavior.md │ │ ├── IdentityVerification.swift │ │ ├── LinuxCABundle.swift │ │ ├── NIOSSLClientHandler.swift │ │ ├── NIOSSLHandler+Configuration.swift │ │ ├── NIOSSLHandler.swift │ │ ├── NIOSSLServerHandler.swift │ │ ├── ObjectIdentifier.swift │ │ ├── PosixPort.swift │ │ ├── PrivacyInfo.xcprivacy │ │ ├── SSLCallbacks.swift │ │ ├── SSLCertificate.swift │ │ ├── SSLCertificateExtensions.swift │ │ ├── SSLCertificateName.swift │ │ ├── SSLConnection.swift │ │ ├── SSLContext.swift │ │ ├── SSLErrors.swift │ │ ├── SSLInit.swift │ │ ├── SSLPKCS12Bundle.swift │ │ ├── SSLPrivateKey.swift │ │ ├── SSLPublicKey.swift │ │ ├── SecurityFrameworkCertificateVerification.swift │ │ ├── String+unsafeUninitializedCapacity.swift │ │ ├── SubjectAlternativeName.swift │ │ ├── SwiftCrypto/ │ │ │ ├── NIOSSLSecureBytes.swift │ │ │ ├── RNG.swift │ │ │ ├── SafeCompare.swift │ │ │ └── Zeroization.swift │ │ ├── TLSConfiguration.swift │ │ ├── UniversalBootstrapSupport.swift │ │ └── UnsafeKeyAndChainTarget.swift │ ├── NIOSSLHTTP1Client/ │ │ ├── README.md │ │ └── main.swift │ ├── NIOSSLPerformanceTester/ │ │ ├── BenchManyWrites.swift │ │ ├── BenchRepeatedHandshakes.swift │ │ ├── Benchmark.swift │ │ ├── main.swift │ │ └── shared.swift │ └── NIOTLSServer/ │ ├── README.md │ └── main.swift ├── Tests/ │ └── NIOSSLTests/ │ ├── ByteBufferBIOTest.swift │ ├── CertificateVerificationTests.swift │ ├── ClientSNITests.swift │ ├── CustomPrivateKeyTests.swift │ ├── IdentityVerificationTest.swift │ ├── NIOSSLALPNTest.swift │ ├── NIOSSLIntegrationTest.swift │ ├── NIOSSLSecureBytesTests.swift │ ├── NIOSSLTestHelpers.swift │ ├── ObjectIdentifierTests.swift │ ├── SSLCertificateExtensionsTests.swift │ ├── SSLCertificateTest.swift │ ├── SSLContextTests.swift │ ├── SSLPKCS12BundleTest.swift │ ├── SSLPrivateKeyTests.swift │ ├── SecurityFrameworkVerificationTests.swift │ ├── TLS13RecordObserver.swift │ ├── TLSConfigurationTest.swift │ ├── UnsafeTransfer.swift │ └── UnwrappingTests.swift ├── dev/ │ └── git.commit.template ├── docker/ │ ├── Dockerfile │ ├── docker-compose.2204.510.yaml │ ├── docker-compose.2204.58.yaml │ ├── docker-compose.2204.59.yaml │ ├── docker-compose.2204.main.yaml │ └── docker-compose.yaml └── scripts/ ├── analyze_performance_results.rb ├── build-asm.py ├── integration_tests.sh ├── patch-1-inttypes.patch ├── patch-2-inttypes.patch ├── patch-3-more-inttypes.patch └── vendor-boringssl.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .editorconfig ================================================ root = true [*] indent_style = space indent_size = 4 end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true ================================================ FILE: .gitattributes ================================================ Sources/CNIOBoringSSL/* linguist-vendored ================================================ FILE: .github/release.yml ================================================ changelog: categories: - title: SemVer Major labels: - ⚠️ semver/major - title: SemVer Minor labels: - 🆕 semver/minor - title: SemVer Patch labels: - 🔨 semver/patch - title: Other Changes labels: - semver/none ================================================ FILE: .github/workflows/main.yml ================================================ name: Main permissions: contents: read on: push: branches: [main] schedule: - cron: "0 8,20 * * *" jobs: unit-tests: name: Unit tests uses: apple/swift-nio/.github/workflows/unit_tests.yml@main with: linux_5_10_arguments_override: "-Xswiftc -warnings-as-errors --explicit-target-dependency-import-check error" linux_6_1_arguments_override: "-Xswiftc -warnings-as-errors --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_6_2_arguments_override: "-Xswiftc -warnings-as-errors --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_6_3_arguments_override: "-Xswiftc -warnings-as-errors --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_nightly_next_arguments_override: "--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_nightly_main_arguments_override: "--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" construct-integration-test-matrix: name: Construct integration matrix runs-on: ubuntu-latest outputs: integration-test-matrix: '${{ steps.generate-matrix.outputs.integration-test-matrix }}' steps: - name: Checkout repository uses: actions/checkout@v6 with: persist-credentials: false - id: generate-matrix run: echo "integration-test-matrix=$(curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/generate_matrix.sh | bash)" >> "$GITHUB_OUTPUT" env: MATRIX_LINUX_SETUP_COMMAND: apt-get update -y && apt-get install -yq execstack lsof dnsutils netcat-openbsd net-tools expect curl jq MATRIX_LINUX_COMMAND: ./scripts/integration_tests.sh -f test_01_renegotiation integration-test: name: Integration test needs: construct-integration-test-matrix uses: apple/swift-nio/.github/workflows/swift_test_matrix.yml@main with: name: "Integration test" matrix_string: '${{ needs.construct-integration-test-matrix.outputs.integration-test-matrix }}' benchmarks: name: Benchmarks uses: apple/swift-nio/.github/workflows/benchmarks.yml@main with: benchmark_package_path: "Benchmarks" macos-tests: name: macOS tests uses: apple/swift-nio/.github/workflows/macos_tests.yml@main with: runner_pool: nightly build_scheme: swift-nio-ssl-Package xcode_16_2_build_arguments_override: "-Xswiftc -Xfrontend -Xswiftc -require-explicit-sendable" xcode_16_3_build_arguments_override: "-Xswiftc -Xfrontend -Xswiftc -require-explicit-sendable" static-sdk: name: Static SDK uses: apple/swift-nio/.github/workflows/static_sdk.yml@main release-builds: name: Release builds uses: apple/swift-nio/.github/workflows/release_builds.yml@main ================================================ FILE: .github/workflows/pull_request.yml ================================================ name: PR permissions: contents: read on: pull_request: types: [opened, reopened, synchronize] jobs: soundness: name: Soundness uses: swiftlang/github-workflows/.github/workflows/soundness.yml@0.0.7 with: license_header_check_project_name: "SwiftNIO" unit-tests: name: Unit tests uses: apple/swift-nio/.github/workflows/unit_tests.yml@main with: linux_5_10_arguments_override: "-Xswiftc -warnings-as-errors --explicit-target-dependency-import-check error" # We can't set warnings-as-errors for 6.1 because we can't suppress the ImplementationOnly import warning. linux_6_1_arguments_override: "--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_6_2_arguments_override: "-Xswiftc -warnings-as-errors -Xswiftc -Wwarning -Xswiftc ImplementationOnlyDeprecated --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_6_3_arguments_override: "-Xswiftc -warnings-as-errors -Xswiftc -Wwarning -Xswiftc ImplementationOnlyDeprecated --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_nightly_next_arguments_override: "-Xswiftc -warnings-as-errors -Xswiftc -Wwarning -Xswiftc ImplementationOnlyDeprecated --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" linux_nightly_main_arguments_override: "-Xswiftc -warnings-as-errors -Xswiftc -Wwarning -Xswiftc ImplementationOnlyDeprecated --explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable" construct-integration-test-matrix: name: Construct integration matrix runs-on: ubuntu-latest outputs: integration-test-matrix: '${{ steps.generate-matrix.outputs.integration-test-matrix }}' steps: - name: Checkout repository uses: actions/checkout@v6 with: persist-credentials: false - id: generate-matrix run: echo "integration-test-matrix=$(curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/generate_matrix.sh | bash)" >> "$GITHUB_OUTPUT" env: MATRIX_LINUX_SETUP_COMMAND: apt-get update -y && apt-get install -yq execstack lsof dnsutils netcat-openbsd net-tools expect curl jq MATRIX_LINUX_COMMAND: ./scripts/integration_tests.sh -f test_01_renegotiation integration-test: name: Integration test needs: construct-integration-test-matrix uses: apple/swift-nio/.github/workflows/swift_test_matrix.yml@main with: name: "Integration test" matrix_string: '${{ needs.construct-integration-test-matrix.outputs.integration-test-matrix }}' benchmarks: name: Benchmarks uses: apple/swift-nio/.github/workflows/benchmarks.yml@main with: benchmark_package_path: "Benchmarks" cxx-interop: name: Cxx interop uses: apple/swift-nio/.github/workflows/cxx_interop.yml@main macos-tests: name: macOS tests uses: apple/swift-nio/.github/workflows/macos_tests.yml@main with: runner_pool: general build_scheme: swift-nio-ssl-Package xcode_16_2_build_arguments_override: "-Xswiftc -Xfrontend -Xswiftc -require-explicit-sendable" xcode_16_3_build_arguments_override: "-Xswiftc -Xfrontend -Xswiftc -require-explicit-sendable" static-sdk: name: Static SDK uses: apple/swift-nio/.github/workflows/static_sdk.yml@main release-builds: name: Release builds uses: apple/swift-nio/.github/workflows/release_builds.yml@main ================================================ FILE: .github/workflows/pull_request_label.yml ================================================ name: PR label permissions: contents: read on: pull_request: types: [labeled, unlabeled, opened, reopened, synchronize] jobs: semver-label-check: name: Semantic version label check runs-on: ubuntu-latest timeout-minutes: 1 steps: - name: Checkout repository uses: actions/checkout@v6 with: persist-credentials: false - name: Check for Semantic Version label uses: apple/swift-nio/.github/actions/pull_request_semver_label_checker@main ================================================ FILE: .gitignore ================================================ .DS_Store /.build /Packages /*.xcodeproj Package.pins Package.resolved *.pem /docs DerivedData /.idea .swiftpm ================================================ FILE: .licenseignore ================================================ .gitignore **/.gitignore .licenseignore .gitattributes .git-blame-ignore-revs .mailfilter .mailmap .spi.yml .swift-format .editorconfig .github/* *.md *.txt *.yml *.yaml *.json Package.swift **/Package.swift Package@-*.swift **/Package@-*.swift Package.resolved **/Package.resolved Makefile *.modulemap **/*.modulemap **/*.docc/* *.xcprivacy **/*.xcprivacy *.symlink **/*.symlink Dockerfile **/Dockerfile Snippets/* Sources/CNIOBoringSSL/* dev/alloc-limits-from-test-output dev/boxed-existentials.d dev/git.commit.template dev/lldb-smoker dev/make-single-file-spm dev/malloc-aggregation.d dev/update-alloc-limits-to-last-completed-ci-build scripts/patch-1-inttypes.patch scripts/patch-2-inttypes.patch scripts/patch-3-more-inttypes.patch .unacceptablelanguageignore ================================================ FILE: .spi.yml ================================================ version: 1 builder: configs: - documentation_targets: [NIOSSL] ================================================ FILE: .swift-format ================================================ { "version" : 1, "indentation" : { "spaces" : 4 }, "tabWidth" : 4, "fileScopedDeclarationPrivacy" : { "accessLevel" : "private" }, "spacesAroundRangeFormationOperators" : false, "indentConditionalCompilationBlocks" : false, "indentSwitchCaseLabels" : false, "lineBreakAroundMultilineExpressionChainComponents" : false, "lineBreakBeforeControlFlowKeywords" : false, "lineBreakBeforeEachArgument" : true, "lineBreakBeforeEachGenericRequirement" : true, "lineLength" : 120, "maximumBlankLines" : 1, "respectsExistingLineBreaks" : true, "prioritizeKeepingFunctionOutputTogether" : true, "rules" : { "AllPublicDeclarationsHaveDocumentation" : false, "AlwaysUseLiteralForEmptyCollectionInit" : false, "AlwaysUseLowerCamelCase" : false, "AmbiguousTrailingClosureOverload" : true, "BeginDocumentationCommentWithOneLineSummary" : false, "DoNotUseSemicolons" : true, "DontRepeatTypeInStaticProperties" : true, "FileScopedDeclarationPrivacy" : true, "FullyIndirectEnum" : true, "GroupNumericLiterals" : true, "IdentifiersMustBeASCII" : true, "NeverForceUnwrap" : false, "NeverUseForceTry" : false, "NeverUseImplicitlyUnwrappedOptionals" : false, "NoAccessLevelOnExtensionDeclaration" : true, "NoAssignmentInExpressions" : true, "NoBlockComments" : true, "NoCasesWithOnlyFallthrough" : true, "NoEmptyTrailingClosureParentheses" : true, "NoLabelsInCasePatterns" : true, "NoLeadingUnderscores" : false, "NoParensAroundConditions" : true, "NoVoidReturnOnFunctionSignature" : true, "OmitExplicitReturns" : true, "OneCasePerLine" : true, "OneVariableDeclarationPerLine" : true, "OnlyOneTrailingClosureArgument" : true, "OrderedImports" : true, "ReplaceForEachWithForLoop" : true, "ReturnVoidInsteadOfEmptyTuple" : true, "UseEarlyExits" : false, "UseExplicitNilCheckInConditions" : false, "UseLetInEveryBoundCaseVariable" : false, "UseShorthandTypeNames" : true, "UseSingleLinePropertyGetter" : false, "UseSynthesizedInitializer" : false, "UseTripleSlashForDocumentationComments" : true, "UseWhereClausesInForLoops" : false, "ValidateDocumentationComments" : false } } ================================================ FILE: .unacceptablelanguageignore ================================================ Sources/CNIOBoringSSL/* NOTICE.txt ================================================ FILE: Benchmarks/Benchmarks/NIOSSLBenchmarks/Benchmarks.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2024 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Benchmark import NIOCore import NIOEmbedded import NIOSSL let benchmarks: @Sendable () -> Void = { let defaultMetrics: [BenchmarkMetric] = [ .mallocCountTotal ] Benchmark( "SimpleHandshake", configuration: .init( metrics: defaultMetrics, scalingFactor: .kilo, maxDuration: .seconds(10_000_000), maxIterations: 10, thresholds: [.mallocCountTotal: .init(absolute: [.p90: 50])] ) ) { benchmark in try runSimpleHandshake( handshakeCount: benchmark.scaledIterations.upperBound ) } Benchmark( "ManyWrites", configuration: .init( metrics: defaultMetrics, scalingFactor: .kilo, maxDuration: .seconds(10_000_000), maxIterations: 10, thresholds: [.mallocCountTotal: .init(absolute: [.p90: 50])] ) ) { benchmark in try runManyWrites( writeCount: benchmark.scaledIterations.upperBound ) } } ================================================ FILE: Benchmarks/Benchmarks/NIOSSLBenchmarks/ManyWrites.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOEmbedded import NIOSSL func runManyWrites(writeCount: Int) throws { let serverContext = try NIOSSLContext( configuration: .makeServerConfiguration( certificateChain: [.certificate(.forTesting())], privateKey: .privateKey(.forTesting()) ) ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = try .certificates([.forTesting()]) let clientContext = try NIOSSLContext(configuration: clientConfig) let dummyAddress = try SocketAddress(ipAddress: "1.2.3.4", port: 5678) let backToBack = BackToBackEmbeddedChannel() let serverHandler = NIOSSLServerHandler(context: serverContext) let clientHandler = try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost") try backToBack.client.pipeline.addHandler(clientHandler).wait() try backToBack.server.pipeline.addHandler(serverHandler).wait() // To trigger activation of both channels we use connect(). try backToBack.client.connect(to: dummyAddress).wait() try backToBack.server.connect(to: dummyAddress).wait() try backToBack.interactInMemory() // Let's try 512 bytes. var buffer = backToBack.client.allocator.buffer(capacity: 512) buffer.writeBytes(repeatElement(0, count: 512)) for _ in 0.. NIOSSLCertificate { try .init(bytes: certificatePemBytes, format: .pem) } } extension NIOSSLPrivateKey { static func forTesting() throws -> NIOSSLPrivateKey { try .init(bytes: keyPemBytes, format: .pem) } } private let certificatePemBytes = Array( """ -----BEGIN CERTIFICATE----- MIIBTzCB9qADAgECAhQkvv72Je/v+B/cgJ53f84O82z6WTAKBggqhkjOPQQDAjAU MRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkxMTI3MTAxMjMwWhcNMjkxMTI0MTAx MjMwWjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjOPQMB BwNCAAShtZ9TRt7I+7Y0o99XUkrgSYmUmpr4K8CB0IkTCX6b1tXp3Xqs1V5BckTd qrls+zsm3AfeiNBb9EDdxiX9DdzuoyYwJDAUBgNVHREEDTALgglsb2NhbGhvc3Qw DAYDVR0TAQH/BAIwADAKBggqhkjOPQQDAgNIADBFAiAKxYON+YTnIHNR0R6SLP8R R7hjsjV5NDs18XLoeRnA1gIhANwyggmE6NQW/r9l59fexj/ZrjaS3jYOTNCfC1Lo 5NgJ -----END CERTIFICATE----- """.utf8 ) private let keyPemBytes = Array( """ -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgCn182hBmYVMAiNPO +7w05F40SlAqqxgBEYJZOeK47aihRANCAAShtZ9TRt7I+7Y0o99XUkrgSYmUmpr4 K8CB0IkTCX6b1tXp3Xqs1V5BckTdqrls+zsm3AfeiNBb9EDdxiX9Ddzu -----END PRIVATE KEY----- """.utf8 ) ================================================ FILE: Benchmarks/Benchmarks/NIOSSLBenchmarks/SimpleHandshake.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOEmbedded import NIOSSL func runSimpleHandshake(handshakeCount: Int) throws { let serverContext = try NIOSSLContext( configuration: .makeServerConfiguration( certificateChain: [.certificate(.forTesting())], privateKey: .privateKey(.forTesting()) ) ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = try .certificates([.forTesting()]) let clientContext = try NIOSSLContext(configuration: clientConfig) let dummyAddress = try SocketAddress(ipAddress: "1.2.3.4", port: 5678) for _ in 0.. ================================================ FILE: CONTRIBUTING.md ================================================ ## Legal By submitting a pull request, you represent that you have the right to license your contribution to Apple and the community, and agree by submitting the patch that your contributions are licensed under the Apache 2.0 license (see `LICENSE.txt`). ## How to submit a bug report Please ensure to specify the following: * SwiftNIO commit hash * Contextual information (e.g. what you were trying to achieve with SwiftNIO) * Simplest possible steps to reproduce * More complex the steps are, lower the priority will be. * A pull request with failing test case is preferred, but it's just fine to paste the test case into the issue description. * Anything that might be relevant in your opinion, such as: * Swift version or the output of `swift --version` * OS version and the output of `uname -a` * Network configuration ### Example ``` SwiftNIO commit hash: 22ec043dc9d24bb011b47ece4f9ee97ee5be2757 Context: While load testing my HTTP web server written with SwiftNIO, I noticed that one file descriptor is leaked per request. Steps to reproduce: 1. ... 2. ... 3. ... 4. ... $ swift --version Swift version 4.0.2 (swift-4.0.2-RELEASE) Target: x86_64-unknown-linux-gnu Operating system: Ubuntu Linux 16.04 64-bit $ uname -a Linux beefy.machine 4.4.0-101-generic #124-Ubuntu SMP Fri Nov 10 18:29:59 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux My system has IPv6 disabled. ``` ## Writing a Patch A good SwiftNIO patch is: 1. Concise, and contains as few changes as needed to achieve the end result. 2. Tested, ensuring that any tests provided failed before the patch and pass after it. 3. Documented, adding API documentation as needed to cover new functions and properties. 4. Accompanied by a great commit message, using our commit message template. ### Commit Message Template We require that your commit messages match our template. The easiest way to do that is to get git to help you by explicitly using the template. To do that, `cd` to the root of our repository and run: git config commit.template dev/git.commit.template ### Run CI checks locally You can run the Github Actions workflows locally using [act](https://github.com/nektos/act). For detailed steps on how to do this please see [https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally](https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally). ## How to contribute your work Please open a pull request at https://github.com/apple/swift-nio. Make sure the CI passes, and then wait for code review. ================================================ FILE: CONTRIBUTORS.txt ================================================ For the purpose of tracking copyright, this is the list of individuals and organizations who have contributed source code to SwiftNIO. For employees of an organization/company where the copyright of work done by employees of that company is held by the company itself, only the company needs to be listed here. ## COPYRIGHT HOLDERS - Apple Inc. (all contributors with '@apple.com') ### Contributors - Cory Benfield - Johannes Weiß - Norman Maurer - Tom Doron ================================================ FILE: IntegrationTests/plugin_echo.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## function plugin_echo_test_suite_begin() { echo "Running test suite '$1'" } function plugin_echo_test_suite_end() { true } # test_name function plugin_echo_test_begin() { echo -n "Running test '$1'... " } function plugin_echo_test_skip() { echo "Skipping test '$1'" } function plugin_echo_test_ok() { echo "OK (${1}s)" } function plugin_echo_test_fail() { echo "FAILURE ($1)" echo "--- OUTPUT BEGIN ---" cat "$2" echo "--- OUTPUT END ---" } function plugin_echo_test_end() { true } function plugin_echo_summary_ok() { echo "OK (ran $1 tests successfully)" } function plugin_echo_summary_fail() { echo "FAILURE (oks: $1, failures: $2)" } function plugin_echo_init() { true } ================================================ FILE: IntegrationTests/plugin_junit_xml.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## junit_testsuite_time=0 function junit_output_write() { extra_flags="" if [[ "$1" == "-n" ]]; then extra_flags="-n" shift fi test -n "$junit_xml_output" echo $extra_flags "$*" >> "$junit_xml_output" } function junit_output_cat() { cat "$@" >> "$junit_xml_output" } # search, replace function junit_output_replace() { test -n "$junit_xml_output" case "$(uname -s)" in Linux) sed -i "s/$1/$2/g" "$junit_xml_output" ;; *) sed -i "" "s/$1/$2/g" "$junit_xml_output" ;; esac } function plugin_junit_xml_test_suite_begin() { junit_testsuite_time=0 junit_output_write "" } function plugin_junit_xml_test_suite_end() { junit_repl_success_and_fail "$1" "$2" junit_output_write "" } # test_name function plugin_junit_xml_test_begin() { junit_output_write -n " " junit_testsuite_time=$((junit_testsuite_time + time_ms)) } function plugin_junit_xml_test_fail() { time_ms=$1 junit_output_write " time='$time_ms'>" junit_output_write " " junit_output_write " " junit_output_write ' ' junit_output_write " " junit_output_write " " } function plugin_junit_xml_test_end() { junit_output_write " " } function junit_repl_success_and_fail() { junit_output_replace XXX-TESTS-XXX "$(($1 + $2))" junit_output_replace XXX-FAILURES-XXX "$2" junit_output_replace XXX-TIME-XXX "$junit_testsuite_time" } function plugin_junit_xml_summary_ok() { junit_output_write "" } function plugin_junit_xml_summary_fail() { junit_output_write "" } function plugin_junit_xml_init() { junit_xml_output="" for f in "$@"; do if [[ "$junit_xml_output" = "PLACEHOLDER" ]]; then junit_xml_output="$f" fi if [[ "$f" == "--junit-xml" && -z "$junit_xml_output" ]]; then junit_xml_output="PLACEHOLDER" fi done if [[ -z "$junit_xml_output" || "$junit_xml_output" = "PLACEHOLDER" ]]; then echo >&2 "ERROR: you need to specify the output after the --junit-xml argument" false fi echo "" > "$junit_xml_output" } ================================================ FILE: IntegrationTests/run-single-test.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## ( # this sub-shell is where the actual test is run set -eu set -x set -o pipefail test="$1" # shellcheck disable=SC2034 # Used by whatever we source transpile in tmp="$2" # shellcheck disable=SC2034 # Used by whatever we source transpile in root="$3" # shellcheck disable=SC2034 # Used by whatever we source transpile in g_show_info="$4" here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # shellcheck source=IntegrationTests/test_functions.sh source "$here/test_functions.sh" # shellcheck source=/dev/null source "$test" wait ) exit_code=$? exit $exit_code ================================================ FILE: IntegrationTests/run-tests.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## set -eu shopt -s nullglob here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" tmp=$(mktemp -d /tmp/.swift-nio-http1-server-sh-tests_XXXXXX) # start_time function time_diff_to_now() { echo "$(( $(date +%s) - $1 ))" } function plugins_do() { local method method="$1" shift for plugin in $plugins; do cd "$orig_cwd" "plugin_${plugin}_${method}" "$@" cd - > /dev/null done } # shellcheck source=IntegrationTests/plugin_echo.sh source "$here/plugin_echo.sh" # shellcheck source=/dev/null source "$here/plugin_junit_xml.sh" plugins="echo" plugin_opts_ind=0 if [[ "${1-default}" == "--junit-xml" ]]; then plugins="echo junit_xml" plugin_opts_ind=2 fi function usage() { echo >&2 "Usage: $0 [OPTIONS]" echo >&2 echo >&2 "OPTIONS:" echo >&2 " -f FILTER: Only run tests matching FILTER (regex)" } orig_cwd=$(pwd) cd "$here" plugins_do init "$@" shift $plugin_opts_ind filter="." verbose=false show_info=false debug=false while getopts "f:vid" opt; do case $opt in f) filter="$OPTARG" ;; v) verbose=true ;; i) show_info=true ;; d) debug=true ;; \?) usage exit 1 ;; esac done function run_test() { if $verbose; then "$@" 2>&1 | tee -a "$out" # we need to return the return value of the first command return "${PIPESTATUS[0]}" else "$@" >> "$out" 2>&1 fi } exec 3>&1 4>&2 # copy stdout/err to fd 3/4 to we can output control messages cnt_ok=0 cnt_fail=0 for f in tests_*; do suite_ok=0 suite_fail=0 plugins_do test_suite_begin "$f" start_suite=$(date +%s) cd "$f" for t in test_*.sh; do if [[ ! "$f/$t" =~ $filter ]]; then plugins_do test_skip "$t" continue fi out=$(mktemp "$tmp/test.out_XXXXXX") test_tmp=$(mktemp -d "$tmp/test.tmp_XXXXXX") plugins_do test_begin "$t" "$f" start=$(date +%s) if run_test "$here/run-single-test.sh" "$here/$f/$t" "$test_tmp" "$here/.." "$show_info"; then plugins_do test_ok "$(time_diff_to_now "$start")" suite_ok=$((suite_ok+1)) if $verbose; then cat "$out" fi else plugins_do test_fail "$(time_diff_to_now "$start")" "$out" suite_fail=$((suite_fail+1)) fi if ! $debug; then rm "$out" rm -rf "$test_tmp" fi plugins_do test_end done cnt_ok=$((cnt_ok + suite_ok)) cnt_fail=$((cnt_fail + suite_fail)) cd .. plugins_do test_suite_end "$(time_diff_to_now "$start_suite")" "$suite_ok" "$suite_fail" done if ! $debug; then rm -rf "$tmp" else echo >&2 "debug mode, not deleting '$tmp'" fi # report if [[ $cnt_fail -gt 0 ]]; then # terminate leftovers (the whole process group) trap '' TERM kill 0 # ignore-unacceptable-language plugins_do summary_fail "$cnt_ok" "$cnt_fail" else plugins_do summary_ok "$cnt_ok" "$cnt_fail" fi if [[ $cnt_fail -gt 0 ]]; then exit 1 else exit 0 fi ================================================ FILE: IntegrationTests/test_functions.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## function fail() { echo >&2 "FAILURE: $*" false } function assert_equal() { if [[ "$1" != "$2" ]]; then fail "expected '$1', got '$2' ${3-}" fi } function assert_equal_files() { if ! cmp -s "$1" "$2"; then diff -u "$1" "$2" || true echo echo "--- SNIP ($1, size=$(wc "$1"), SHA=$(shasum "$1")) ---" cat "$1" echo "--- SNAP ($1)---" echo "--- SNIP ($2, size=$(wc "$2"), SHA=$(shasum "$2")) ---" cat "$2" echo "--- SNAP ($2) ---" fail "file '$1' not equal to '$2'" fi } function assert_less_than() { if [[ ! "$1" -lt "$2" ]]; then fail "assertion '$1' < '$2' failed" fi } function assert_less_than_or_equal() { if [[ ! "$1" -le "$2" ]]; then fail "assertion '$1' <= '$2' failed" fi } function assert_greater_than() { if [[ ! "$1" -gt "$2" ]]; then fail "assertion '$1' > '$2' failed" fi } function assert_greater_than_or_equal() { if [[ ! "$1" -ge "$2" ]]; then fail "assertion '$1' >= '$2' failed" fi } g_has_previously_infoed=false function info() { # shellcheck disable=SC2154 # Defined by an include our by being source transpiled in if $g_show_info; then if ! $g_has_previously_infoed; then echo >&3 || true # echo an extra newline so it looks better g_has_previously_infoed=true fi echo >&3 "info: $*" || true fi } function warn() { echo >&4 "warning: $*" } ================================================ FILE: IntegrationTests/tests_01_general/defines.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## function client_path() { echo "$(swift build --show-bin-path)/NIOSSLHTTP1Client" } ================================================ FILE: IntegrationTests/tests_01_general/test_01_renegotiation.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## # shellcheck source=IntegrationTests/tests_01_general/defines.sh source defines.sh swift build # Generate a self-signed certificate. # shellcheck disable=SC2154 # Provided by framework cat << EOF > "$tmp/openssl.cnf" [ req ] distinguished_name = subject req_extensions = req_ext x509_extensions = x509_ext [ subject ] countryName = Country Name (2 letter code) countryName_default = US stateOrProvinceName = State or Province Name (full name) stateOrProvinceName_default = NY localityName = Locality Name (eg, city) localityName_default = New York organizationName = Organization Name (eg, company) organizationName_default = Example, LLC [ req_ext ] basicConstraints = CA:FALSE [ x509_ext ] subjectKeyIdentifier = hash subjectAltName = @alternate_names [ alternate_names ] DNS.1 = localhost EOF openssl req -new -newkey rsa:4096 -days 365 -nodes -config "$tmp/openssl.cnf" -x509 \ -subj "/C=US/ST=NJ/L=Wall/O=NIO/CN=localhost" \ -keyout "$tmp/key.pem" -out "$tmp/cert.pem" expect -c " spawn openssl s_server -no_tls1_3 -cert \"$tmp/cert.pem\" -key \"$tmp/key.pem\" set serverspawn \$spawn_id expect { \"ACCEPT\" { } timeout { exit 1 } } spawn $(client_path) http://localhost:4433/get \"$tmp/cert.pem\" \"$tmp/key.pem\" \"$tmp/cert.pem\" set spawn_id \$serverspawn expect { \"close\\r\\r\" { } timeout { exit 2 } } send \"R\r\" expect { \"Read BLOCK\\r\" { } timeout { exit 3 } } " ================================================ FILE: IntegrationTests/tests_01_general/test_02_execstack.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## # shellcheck source=IntegrationTests/tests_01_general/defines.sh source defines.sh if [[ "$(uname -s)" == "Darwin" ]]; then echo "No need to run execstack on Darwin" exit 0 fi swift build -c debug swift build -c release DEBUG_SERVER_PATH="$(swift build --show-bin-path)/NIOTLSServer" RELEASE_SERVER_PATH="$(swift build --show-bin-path -c release)/NIOTLSServer" results=$(execstack "$DEBUG_SERVER_PATH" "$RELEASE_SERVER_PATH") count=$(echo "$results" | grep -c '^X' || true) if [ "$count" -ne 0 ]; then exit 1 else exit 0 fi ================================================ FILE: LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: NOTICE.txt ================================================ The SwiftNIO Project ==================== Please visit the SwiftNIO web site for more information: * https://github.com/apple/swift-nio Copyright 2017, 2018 The SwiftNIO Project The SwiftNIO Project licenses this file to you under the Apache License, version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Also, please refer to each LICENSE..txt file, which is located in the 'license' directory of the distribution file, for the license terms of the components that this product depends on. ------------------------------------------------------------------------------- This product is heavily influenced by Netty. * LICENSE (Apache License 2.0): * https://github.com/netty/netty/blob/4.1/LICENSE.txt * HOMEPAGE: * https://netty.io --- This product contains a derivation of the Tony Stone's 'process_test_files.rb'. * LICENSE (Apache License 2.0): * https://www.apache.org/licenses/LICENSE-2.0 * HOMEPAGE: * https://codegists.com/snippet/ruby/generate_xctest_linux_runnerrb_tonystone_ruby --- This product contains code derived from grpc-swift. * LICENSE (Apache License 2.0): * https://github.com/grpc/grpc-swift/blob/0.7.0/LICENSE * HOMEPAGE: * https://github.com/grpc/grpc-swift --- This product contains code from boringssl. * LICENSE (Combination ISC and OpenSSL license) * https://boringssl.googlesource.com/boringssl/+/refs/heads/master/LICENSE * HOMEPAGE: * https://boringssl.googlesource.com/boringssl/ ================================================ FILE: Package.swift ================================================ // swift-tools-version:6.1 //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import PackageDescription // This package contains a vendored copy of BoringSSL. For ease of tracking // down problems with the copy of BoringSSL in use, we include a copy of the // commit hash of the revision of BoringSSL included in the given release. // This is also reproduced in a file called hash.txt in the // Sources/CNIOBoringSSL directory. The source repository is at // https://boringssl.googlesource.com/boringssl. // // BoringSSL Commit: 817ab07ebb53da35afea409ab9328f578492832d /// This function generates the dependencies we want to express. /// /// Importantly, it tolerates the possibility that we are being used as part /// of the Swift toolchain, and so need to use local checkouts of our /// dependencies. func generateDependencies() -> [Package.Dependency] { if Context.environment["SWIFTCI_USE_LOCAL_DEPS"] == nil { return [ .package(url: "https://github.com/apple/swift-nio.git", from: "2.80.0") ] } else { return [ .package(path: "../swift-nio") ] } } // This doesn't work when cross-compiling: the privacy manifest will be included in the Bundle and // Foundation will be linked. This is, however, strictly better than unconditionally adding the // resource. #if canImport(Darwin) let includePrivacyManifest = true #else let includePrivacyManifest = false #endif let strictConcurrencyDevelopment = false let strictConcurrencySettings: [SwiftSetting] = { var initialSettings: [SwiftSetting] = [] if strictConcurrencyDevelopment { // -warnings-as-errors here is a workaround so that IDE-based development can // get tripped up on -require-explicit-sendable. initialSettings.append(.unsafeFlags(["-Xfrontend", "-require-explicit-sendable", "-warnings-as-errors"])) } return initialSettings }() // swift-format-ignore: NoBlockComments let package = Package( name: "swift-nio-ssl", products: [ .library(name: "NIOSSL", targets: ["NIOSSL"]), .executable(name: "NIOTLSServer", targets: ["NIOTLSServer"]), .executable(name: "NIOSSLHTTP1Client", targets: ["NIOSSLHTTP1Client"]), /* This target is used only for symbol mangling. It's added and removed automatically because it emits build warnings. MANGLE_START .library(name: "CNIOBoringSSL", type: .static, targets: ["CNIOBoringSSL"]), MANGLE_END */ ], dependencies: generateDependencies(), targets: [ .target( name: "CNIOBoringSSL", cSettings: [ .define("_GNU_SOURCE"), .define("_POSIX_C_SOURCE", to: "200112L"), .define("_DARWIN_C_SOURCE"), ] ), .target( name: "CNIOBoringSSLShims", dependencies: [ "CNIOBoringSSL" ], cSettings: [ .define("_GNU_SOURCE") ] ), .target( name: "NIOSSL", dependencies: [ "CNIOBoringSSL", "CNIOBoringSSLShims", .product(name: "NIO", package: "swift-nio"), .product(name: "NIOCore", package: "swift-nio"), .product(name: "NIOConcurrencyHelpers", package: "swift-nio"), .product(name: "NIOTLS", package: "swift-nio"), ], exclude: includePrivacyManifest ? [] : ["PrivacyInfo.xcprivacy"], resources: includePrivacyManifest ? [.copy("PrivacyInfo.xcprivacy")] : [], swiftSettings: strictConcurrencySettings ), .executableTarget( name: "NIOTLSServer", dependencies: [ "NIOSSL", .product(name: "NIOCore", package: "swift-nio"), .product(name: "NIOPosix", package: "swift-nio"), .product(name: "NIOConcurrencyHelpers", package: "swift-nio"), ], exclude: [ "README.md" ], swiftSettings: strictConcurrencySettings ), .executableTarget( name: "NIOSSLHTTP1Client", dependencies: [ "NIOSSL", .product(name: "NIOCore", package: "swift-nio"), .product(name: "NIOPosix", package: "swift-nio"), .product(name: "NIOHTTP1", package: "swift-nio"), .product(name: "NIOFoundationCompat", package: "swift-nio"), ], exclude: [ "README.md" ], swiftSettings: strictConcurrencySettings ), .executableTarget( name: "NIOSSLPerformanceTester", dependencies: [ "NIOSSL", .product(name: "NIOCore", package: "swift-nio"), .product(name: "NIOEmbedded", package: "swift-nio"), .product(name: "NIOTLS", package: "swift-nio"), ], swiftSettings: strictConcurrencySettings ), .testTarget( name: "NIOSSLTests", dependencies: [ "NIOSSL", .product(name: "NIOCore", package: "swift-nio"), .product(name: "NIOEmbedded", package: "swift-nio"), .product(name: "NIOPosix", package: "swift-nio"), .product(name: "NIOTLS", package: "swift-nio"), ], swiftSettings: strictConcurrencySettings ), ], cxxLanguageStandard: .cxx17 ) // --- STANDARD CROSS-REPO SETTINGS DO NOT EDIT --- // for target in package.targets { switch target.type { case .regular, .test, .executable: var settings = target.swiftSettings ?? [] // https://github.com/swiftlang/swift-evolution/blob/main/proposals/0444-member-import-visibility.md settings.append(.enableUpcomingFeature("MemberImportVisibility")) target.swiftSettings = settings case .macro, .plugin, .system, .binary: () // not applicable @unknown default: () // we don't know what to do here, do nothing } } // --- END: STANDARD CROSS-REPO SETTINGS DO NOT EDIT --- // ================================================ FILE: README.md ================================================ # SwiftNIO SSL SwiftNIO SSL is a Swift package that contains an implementation of TLS based on BoringSSL. This package allows users of [SwiftNIO](https://github.com/apple/swift-nio) to write protocol clients and servers that use TLS to secure data in flight. The name is inspired primarily by the names of the library this package uses (BoringSSL), and not because we don't know the name of the protocol. We know the protocol is TLS! To get started, check out the [API docs](https://swiftpackageindex.com/apple/swift-nio-ssl/main/documentation/niossl). ## Using SwiftNIO SSL SwiftNIO SSL provides two `ChannelHandler`s to use to secure a data stream: the `NIOSSLClientHandler` and the `NIOSSLServerHandler`. Each of these can be added to a `Channel` to secure the communications on that channel. Additionally, we provide a number of low-level primitives for configuring your TLS connections. These will be shown below. To secure a server connection, you will need a X.509 certificate chain in a file (either PEM or DER, but PEM is far easier), and the associated private key for the leaf certificate. These objects can then be wrapped up in a `TLSConfiguration` object that is used to initialize the `ChannelHandler`. For example: ```swift let configuration = TLSConfiguration.makeServerConfiguration( certificateChain: try NIOSSLCertificate.fromPEMFile("cert.pem").map { .certificate($0) }, privateKey: try .privateKey(.init(file: "key.pem", format: .pem)) ) let sslContext = try NIOSSLContext(configuration: configuration) let server = ServerBootstrap(group: group) .childChannelInitializer { channel in // important: The handler must be initialized _inside_ the `childChannelInitializer` let handler = NIOSSLServerHandler(context: sslContext) [...] channel.pipeline.syncOperations.addHandler(handler) [...] } ``` For clients, it is a bit simpler as there is no need to have a certificate chain or private key (though clients *may* have these things). Setup for clients may be done like this: ```swift let configuration = TLSConfiguration.makeClientConfiguration() let sslContext = try NIOSSLContext(configuration: configuration) let client = ClientBootstrap(group: group) .channelInitializer { channel in // important: The handler must be initialized _inside_ the `channelInitializer` let handler = try NIOSSLClientHandler(context: sslContext) [...] channel.pipeline.syncOperations.addHandler(handler) [...] } ``` The most recent versions of SwiftNIO SSL support Swift 5.7 and newer. The minimum Swift version supported by SwiftNIO SSL releases are detailed below: SwiftNIO SSL | Minimum Swift Version --------------------|---------------------- `2.0.0 ..< 2.14.0` | 5.0 `2.14.0 ..< 2.19.0` | 5.2 `2.19.0 ..< 2.23.0` | 5.4 `2.23.0 ..< 2.23.2` | 5.5.2 `2.23.2 ..< 2.26.0` | 5.6 `2.26.0 ..< 2.27.0` | 5.7 `2.27.0 ..< 2.29.3` | 5.8 `2.29.3 ..< 2.31.0` | 5.9 `2.31.0 ..< 2.35.0` | 5.10 `2.35.0 ..< 2.37.0` | 6.0 `2.37.0 ..<` | 6.1 ================================================ FILE: SECURITY.md ================================================ # Security Please refer to the security guidelines set out in the [apple/swift-nio](https://github.com/apple/swift-nio) repository: ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_bitstr.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" int ASN1_BIT_STRING_set(ASN1_BIT_STRING *x, const unsigned char *d, ossl_ssize_t len) { return ASN1_STRING_set(x, d, len); } int asn1_bit_string_length(const ASN1_BIT_STRING *str, uint8_t *out_padding_bits) { int len = str->length; if (str->flags & ASN1_STRING_FLAG_BITS_LEFT) { // If the string is already empty, it cannot have padding bits. *out_padding_bits = len == 0 ? 0 : str->flags & 0x07; return len; } // TODO(https://crbug.com/boringssl/447): If we move this logic to // |ASN1_BIT_STRING_set_bit|, can we remove this representation? while (len > 0 && str->data[len - 1] == 0) { len--; } uint8_t padding_bits = 0; if (len > 0) { uint8_t last = str->data[len - 1]; assert(last != 0); for (; padding_bits < 7; padding_bits++) { if (last & (1 << padding_bits)) { break; } } } *out_padding_bits = padding_bits; return len; } int ASN1_BIT_STRING_num_bytes(const ASN1_BIT_STRING *str, size_t *out) { uint8_t padding_bits; int len = asn1_bit_string_length(str, &padding_bits); if (padding_bits != 0) { return 0; } *out = len; return 1; } int i2c_ASN1_BIT_STRING(const ASN1_BIT_STRING *a, unsigned char **pp) { if (a == NULL) { return 0; } uint8_t bits; int len = asn1_bit_string_length(a, &bits); if (len > INT_MAX - 1) { OPENSSL_PUT_ERROR(ASN1, ERR_R_OVERFLOW); return 0; } int ret = 1 + len; if (pp == NULL) { return ret; } uint8_t *p = *pp; *(p++) = bits; OPENSSL_memcpy(p, a->data, len); if (len > 0) { p[len - 1] &= (0xff << bits); } p += len; *pp = p; return ret; } ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a, const unsigned char **pp, long len) { ASN1_BIT_STRING *ret = NULL; const unsigned char *p; unsigned char *s; int padding; uint8_t padding_mask; if (len < 1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_SHORT); goto err; } if (len > INT_MAX) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_LONG); goto err; } if ((a == NULL) || ((*a) == NULL)) { if ((ret = ASN1_BIT_STRING_new()) == NULL) { return NULL; } } else { ret = (*a); } p = *pp; padding = *(p++); len--; if (padding > 7) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_BIT_STRING_BITS_LEFT); goto err; } // Unused bits in a BIT STRING must be zero. padding_mask = (1 << padding) - 1; if (padding != 0 && (len < 1 || (p[len - 1] & padding_mask) != 0)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_BIT_STRING_PADDING); goto err; } // We do this to preserve the settings. If we modify the settings, via // the _set_bit function, we will recalculate on output ret->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); // clear ret->flags |= (ASN1_STRING_FLAG_BITS_LEFT | padding); // set if (len > 0) { s = reinterpret_cast(OPENSSL_memdup(p, len)); if (s == NULL) { goto err; } p += len; } else { s = NULL; } ret->length = (int)len; OPENSSL_free(ret->data); ret->data = s; ret->type = V_ASN1_BIT_STRING; if (a != NULL) { (*a) = ret; } *pp = p; return ret; err: if ((ret != NULL) && ((a == NULL) || (*a != ret))) { ASN1_BIT_STRING_free(ret); } return NULL; } // These next 2 functions from Goetz Babin-Ebell int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value) { int w, v, iv; unsigned char *c; w = n / 8; v = 1 << (7 - (n & 0x07)); iv = ~v; if (!value) { v = 0; } if (a == NULL) { return 0; } a->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); // clear, set on write if ((a->length < (w + 1)) || (a->data == NULL)) { if (!value) { return 1; // Don't need to set } if (a->data == NULL) { c = (unsigned char *)OPENSSL_malloc(w + 1); } else { c = (unsigned char *)OPENSSL_realloc(a->data, w + 1); } if (c == NULL) { return 0; } if (w + 1 - a->length > 0) { OPENSSL_memset(c + a->length, 0, w + 1 - a->length); } a->data = c; a->length = w + 1; } a->data[w] = ((a->data[w]) & iv) | v; while ((a->length > 0) && (a->data[a->length - 1] == 0)) { a->length--; } return 1; } int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *a, int n) { int w, v; w = n / 8; v = 1 << (7 - (n & 0x07)); if ((a == NULL) || (a->length < (w + 1)) || (a->data == NULL)) { return 0; } return ((a->data[w] & v) != 0); } // Checks if the given bit string contains only bits specified by // the flags vector. Returns 0 if there is at least one bit set in 'a' // which is not specified in 'flags', 1 otherwise. // 'len' is the length of 'flags'. int ASN1_BIT_STRING_check(const ASN1_BIT_STRING *a, const unsigned char *flags, int flags_len) { int i, ok; // Check if there is one bit set at all. if (!a || !a->data) { return 1; } // Check each byte of the internal representation of the bit string. ok = 1; for (i = 0; i < a->length && ok; ++i) { unsigned char mask = i < flags_len ? ~flags[i] : 0xff; // We are done if there is an unneeded bit set. ok = (a->data[i] & mask) == 0; } return ok; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_bool.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../bytestring/internal.h" int i2d_ASN1_BOOLEAN(ASN1_BOOLEAN a, unsigned char **outp) { CBB cbb; if (!CBB_init(&cbb, 3) || // !CBB_add_asn1_bool(&cbb, a != ASN1_BOOLEAN_FALSE)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } ASN1_BOOLEAN d2i_ASN1_BOOLEAN(ASN1_BOOLEAN *out, const unsigned char **inp, long len) { if (len < 0) { return ASN1_BOOLEAN_NONE; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); int val; if (!CBS_get_asn1_bool(&cbs, &val)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return ASN1_BOOLEAN_NONE; } ASN1_BOOLEAN ret = val ? ASN1_BOOLEAN_TRUE : ASN1_BOOLEAN_FALSE; if (out != NULL) { *out = ret; } *inp = CBS_data(&cbs); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_d2i_fp.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x) { uint8_t *data; size_t len; // Historically, this function did not impose a limit in OpenSSL and is used // to read CRLs, so we leave this without an external bound. if (!BIO_read_asn1(in, &data, &len, INT_MAX)) { return NULL; } const uint8_t *ptr = data; void *ret = ASN1_item_d2i(reinterpret_cast(x), &ptr, len, it); OPENSSL_free(data); return ret; } void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x) { BIO *b = BIO_new_fp(in, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_BUF_LIB); return NULL; } void *ret = ASN1_item_d2i_bio(it, b, x); BIO_free(b); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_dup.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include // ASN1_ITEM version of dup: this follows the model above except we don't // need to allocate the buffer. At some point this could be rewritten to // directly dup the underlying structure instead of doing and encode and // decode. void *ASN1_item_dup(const ASN1_ITEM *it, void *x) { unsigned char *b = NULL; const unsigned char *p; long i; void *ret; if (x == NULL) { return NULL; } i = ASN1_item_i2d(reinterpret_cast(x), &b, it); if (b == NULL) { return NULL; } p = b; ret = ASN1_item_d2i(NULL, &p, i, it); OPENSSL_free(b); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_gentm.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "internal.h" int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d) { if (d->type != V_ASN1_GENERALIZEDTIME) { return 0; } CBS cbs; CBS_init(&cbs, d->data, (size_t)d->length); if (!CBS_parse_generalized_time(&cbs, tm, /*allow_timezone_offset=*/0)) { return 0; } return 1; } int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *d) { return asn1_generalizedtime_to_tm(NULL, d); } int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str) { size_t len = strlen(str); CBS cbs; CBS_init(&cbs, (const uint8_t *)str, len); if (!CBS_parse_generalized_time(&cbs, /*out_tm=*/NULL, /*allow_timezone_offset=*/0)) { return 0; } if (s != NULL) { if (!ASN1_STRING_set(s, str, len)) { return 0; } s->type = V_ASN1_GENERALIZEDTIME; } return 1; } ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set(ASN1_GENERALIZEDTIME *s, int64_t posix_time) { return ASN1_GENERALIZEDTIME_adj(s, posix_time, 0, 0); } ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj(ASN1_GENERALIZEDTIME *s, int64_t posix_time, int offset_day, long offset_sec) { struct tm data; if (!OPENSSL_posix_to_tm(posix_time, &data)) { return NULL; } if (offset_day || offset_sec) { if (!OPENSSL_gmtime_adj(&data, offset_day, offset_sec)) { return NULL; } } if (data.tm_year < 0 - 1900 || data.tm_year > 9999 - 1900) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_TIME_VALUE); return NULL; } char buf[16]; int ret = snprintf(buf, sizeof(buf), "%04d%02d%02d%02d%02d%02dZ", data.tm_year + 1900, data.tm_mon + 1, data.tm_mday, data.tm_hour, data.tm_min, data.tm_sec); // |snprintf| must write exactly 15 bytes (plus the NUL) to the buffer. BSSL_CHECK(ret == static_cast(sizeof(buf) - 1)); int free_s = 0; if (s == NULL) { free_s = 1; s = ASN1_UTCTIME_new(); if (s == NULL) { return NULL; } } if (!ASN1_STRING_set(s, buf, strlen(buf))) { if (free_s) { ASN1_UTCTIME_free(s); } return NULL; } s->type = V_ASN1_GENERALIZEDTIME; return s; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_i2d_fp.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *x) { BIO *b = BIO_new_fp(out, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_BUF_LIB); return 0; } int ret = ASN1_item_i2d_bio(it, b, x); BIO_free(b); return ret; } int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *x) { unsigned char *b = NULL; int n = ASN1_item_i2d(reinterpret_cast(x), &b, it); if (b == NULL) { return 0; } int ret = BIO_write_all(out, b, n); OPENSSL_free(b); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_int.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../internal.h" ASN1_INTEGER *ASN1_INTEGER_dup(const ASN1_INTEGER *x) { return ASN1_STRING_dup(x); } int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, const ASN1_INTEGER *y) { // Compare signs. int neg = x->type & V_ASN1_NEG; if (neg != (y->type & V_ASN1_NEG)) { return neg ? -1 : 1; } int ret = ASN1_STRING_cmp(x, y); if (neg) { // This could be |-ret|, but |ASN1_STRING_cmp| is not forbidden from // returning |INT_MIN|. if (ret < 0) { return 1; } else if (ret > 0) { return -1; } else { return 0; } } return ret; } // negate_twos_complement negates |len| bytes from |buf| in-place, interpreted // as a signed, big-endian two's complement value. static void negate_twos_complement(uint8_t *buf, size_t len) { uint8_t borrow = 0; for (size_t i = len - 1; i < len; i--) { uint8_t t = buf[i]; buf[i] = 0u - borrow - t; borrow |= t != 0; } } static int is_all_zeros(const uint8_t *in, size_t len) { for (size_t i = 0; i < len; i++) { if (in[i] != 0) { return 0; } } return 1; } int i2c_ASN1_INTEGER(const ASN1_INTEGER *in, unsigned char **outp) { if (in == NULL) { return 0; } // |ASN1_INTEGER|s should be represented minimally, but it is possible to // construct invalid ones. Skip leading zeros so this does not produce an // invalid encoding or break invariants. CBS cbs; CBS_init(&cbs, in->data, in->length); while (CBS_len(&cbs) > 0 && CBS_data(&cbs)[0] == 0) { CBS_skip(&cbs, 1); } int is_negative = (in->type & V_ASN1_NEG) != 0; size_t pad; CBS copy = cbs; uint8_t msb; if (!CBS_get_u8(©, &msb)) { // Zero is represented as a single byte. is_negative = 0; pad = 1; } else if (is_negative) { // 0x80...01 through 0xff...ff have a two's complement of 0x7f...ff // through 0x00...01 and need an extra byte to be negative. // 0x01...00 through 0x80...00 have a two's complement of 0xfe...ff // through 0x80...00 and can be negated as-is. pad = msb > 0x80 || (msb == 0x80 && !is_all_zeros(CBS_data(©), CBS_len(©))); } else { // If the high bit is set, the signed representation needs an extra // byte to be positive. pad = (msb & 0x80) != 0; } if (CBS_len(&cbs) > INT_MAX - pad) { OPENSSL_PUT_ERROR(ASN1, ERR_R_OVERFLOW); return 0; } int len = (int)(pad + CBS_len(&cbs)); assert(len > 0); if (outp == NULL) { return len; } if (pad) { (*outp)[0] = 0; } OPENSSL_memcpy(*outp + pad, CBS_data(&cbs), CBS_len(&cbs)); if (is_negative) { negate_twos_complement(*outp, len); assert((*outp)[0] >= 0x80); } else { assert((*outp)[0] < 0x80); } *outp += len; return len; } ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **out, const unsigned char **inp, long len) { // This function can handle lengths up to INT_MAX - 1, but the rest of the // legacy ASN.1 code mixes integer types, so avoid exposing it to // ASN1_INTEGERS with larger lengths. if (len < 0 || len > INT_MAX / 2) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG); return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); int is_negative; if (!CBS_is_valid_asn1_integer(&cbs, &is_negative)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_INTEGER); return NULL; } ASN1_INTEGER *ret = NULL; if (out == NULL || *out == NULL) { ret = ASN1_INTEGER_new(); if (ret == NULL) { return NULL; } } else { ret = *out; } // Convert to |ASN1_INTEGER|'s sign-and-magnitude representation. First, // determine the size needed for a minimal result. if (is_negative) { // 0xff00...01 through 0xff7f..ff have a two's complement of 0x00ff...ff // through 0x000100...001 and need one leading zero removed. 0x8000...00 // through 0xff00...00 have a two's complement of 0x8000...00 through // 0x0100...00 and will be minimally-encoded as-is. if (CBS_len(&cbs) > 0 && CBS_data(&cbs)[0] == 0xff && !is_all_zeros(CBS_data(&cbs) + 1, CBS_len(&cbs) - 1)) { CBS_skip(&cbs, 1); } } else { // Remove the leading zero byte, if any. if (CBS_len(&cbs) > 0 && CBS_data(&cbs)[0] == 0x00) { CBS_skip(&cbs, 1); } } if (!ASN1_STRING_set(ret, CBS_data(&cbs), CBS_len(&cbs))) { goto err; } if (is_negative) { ret->type = V_ASN1_NEG_INTEGER; negate_twos_complement(ret->data, ret->length); } else { ret->type = V_ASN1_INTEGER; } // The value should be minimally-encoded. assert(ret->length == 0 || ret->data[0] != 0); // Zero is not negative. assert(!is_negative || ret->length > 0); *inp += len; if (out != NULL) { *out = ret; } return ret; err: if (ret != NULL && (out == NULL || *out != ret)) { ASN1_INTEGER_free(ret); } return NULL; } int ASN1_INTEGER_set_int64(ASN1_INTEGER *a, int64_t v) { if (v >= 0) { return ASN1_INTEGER_set_uint64(a, (uint64_t)v); } if (!ASN1_INTEGER_set_uint64(a, 0 - (uint64_t)v)) { return 0; } a->type = V_ASN1_NEG_INTEGER; return 1; } int ASN1_ENUMERATED_set_int64(ASN1_ENUMERATED *a, int64_t v) { if (v >= 0) { return ASN1_ENUMERATED_set_uint64(a, (uint64_t)v); } if (!ASN1_ENUMERATED_set_uint64(a, 0 - (uint64_t)v)) { return 0; } a->type = V_ASN1_NEG_ENUMERATED; return 1; } int ASN1_INTEGER_set(ASN1_INTEGER *a, long v) { static_assert(sizeof(long) <= sizeof(int64_t), "long fits in int64_t"); return ASN1_INTEGER_set_int64(a, v); } int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v) { static_assert(sizeof(long) <= sizeof(int64_t), "long fits in int64_t"); return ASN1_ENUMERATED_set_int64(a, v); } static int asn1_string_set_uint64(ASN1_STRING *out, uint64_t v, int type) { uint8_t buf[sizeof(uint64_t)]; CRYPTO_store_u64_be(buf, v); size_t leading_zeros; for (leading_zeros = 0; leading_zeros < sizeof(buf); leading_zeros++) { if (buf[leading_zeros] != 0) { break; } } if (!ASN1_STRING_set(out, buf + leading_zeros, sizeof(buf) - leading_zeros)) { return 0; } out->type = type; return 1; } int ASN1_INTEGER_set_uint64(ASN1_INTEGER *out, uint64_t v) { return asn1_string_set_uint64(out, v, V_ASN1_INTEGER); } int ASN1_ENUMERATED_set_uint64(ASN1_ENUMERATED *out, uint64_t v) { return asn1_string_set_uint64(out, v, V_ASN1_ENUMERATED); } static int asn1_string_get_abs_uint64(uint64_t *out, const ASN1_STRING *a, int type) { if ((a->type & ~V_ASN1_NEG) != type) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_INTEGER_TYPE); return 0; } uint8_t buf[sizeof(uint64_t)] = {0}; if (a->length > (int)sizeof(buf)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_INTEGER); return 0; } OPENSSL_memcpy(buf + sizeof(buf) - a->length, a->data, a->length); *out = CRYPTO_load_u64_be(buf); return 1; } static int asn1_string_get_uint64(uint64_t *out, const ASN1_STRING *a, int type) { if (!asn1_string_get_abs_uint64(out, a, type)) { return 0; } if (a->type & V_ASN1_NEG) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_INTEGER); return 0; } return 1; } int ASN1_INTEGER_get_uint64(uint64_t *out, const ASN1_INTEGER *a) { return asn1_string_get_uint64(out, a, V_ASN1_INTEGER); } int ASN1_ENUMERATED_get_uint64(uint64_t *out, const ASN1_ENUMERATED *a) { return asn1_string_get_uint64(out, a, V_ASN1_ENUMERATED); } static int asn1_string_get_int64(int64_t *out, const ASN1_STRING *a, int type) { uint64_t v; if (!asn1_string_get_abs_uint64(&v, a, type)) { return 0; } int64_t i64; int fits_in_i64; // Check |v != 0| to handle manually-constructed negative zeros. if ((a->type & V_ASN1_NEG) && v != 0) { i64 = (int64_t)(0u - v); fits_in_i64 = i64 < 0; } else { i64 = (int64_t)v; fits_in_i64 = i64 >= 0; } if (!fits_in_i64) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_INTEGER); return 0; } *out = i64; return 1; } int ASN1_INTEGER_get_int64(int64_t *out, const ASN1_INTEGER *a) { return asn1_string_get_int64(out, a, V_ASN1_INTEGER); } int ASN1_ENUMERATED_get_int64(int64_t *out, const ASN1_ENUMERATED *a) { return asn1_string_get_int64(out, a, V_ASN1_ENUMERATED); } static long asn1_string_get_long(const ASN1_STRING *a, int type) { if (a == NULL) { return 0; } int64_t v; if (!asn1_string_get_int64(&v, a, type) || // v < LONG_MIN || v > LONG_MAX) { // This function's return value does not distinguish overflow from -1. ERR_clear_error(); return -1; } return (long)v; } long ASN1_INTEGER_get(const ASN1_INTEGER *a) { return asn1_string_get_long(a, V_ASN1_INTEGER); } long ASN1_ENUMERATED_get(const ASN1_ENUMERATED *a) { return asn1_string_get_long(a, V_ASN1_ENUMERATED); } static ASN1_STRING *bn_to_asn1_string(const BIGNUM *bn, ASN1_STRING *ai, int type) { ASN1_INTEGER *ret; if (ai == NULL) { ret = ASN1_STRING_type_new(type); } else { ret = ai; } int len; if (ret == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } if (BN_is_negative(bn) && !BN_is_zero(bn)) { ret->type = type | V_ASN1_NEG; } else { ret->type = type; } len = BN_num_bytes(bn); if (!ASN1_STRING_set(ret, NULL, len) || !BN_bn2bin_padded(ret->data, len, bn)) { goto err; } return ret; err: if (ret != ai) { ASN1_STRING_free(ret); } return NULL; } ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai) { return bn_to_asn1_string(bn, ai, V_ASN1_INTEGER); } ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(const BIGNUM *bn, ASN1_ENUMERATED *ai) { return bn_to_asn1_string(bn, ai, V_ASN1_ENUMERATED); } static BIGNUM *asn1_string_to_bn(const ASN1_STRING *ai, BIGNUM *bn, int type) { if ((ai->type & ~V_ASN1_NEG) != type) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_INTEGER_TYPE); return NULL; } BIGNUM *ret; if ((ret = BN_bin2bn(ai->data, ai->length, bn)) == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BN_LIB); } else if (ai->type & V_ASN1_NEG) { BN_set_negative(ret, 1); } return ret; } BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai, BIGNUM *bn) { return asn1_string_to_bn(ai, bn, V_ASN1_INTEGER); } BIGNUM *ASN1_ENUMERATED_to_BN(const ASN1_ENUMERATED *ai, BIGNUM *bn) { return asn1_string_to_bn(ai, bn, V_ASN1_ENUMERATED); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_mbstr.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../bytestring/internal.h" #include "internal.h" // These functions take a string in UTF8, ASCII or multibyte form and a mask // of permissible ASN1 string types. It then works out the minimal type // (using the order Printable < IA5 < T61 < BMP < Universal < UTF8) and // creates a string of the correct type with the supplied data. Yes this is // horrible: it has to be :-( The 'ncopy' form checks minimum and maximum // size limits too. int ASN1_mbstring_copy(ASN1_STRING **out, const unsigned char *in, ossl_ssize_t len, int inform, unsigned long mask) { return ASN1_mbstring_ncopy(out, in, len, inform, mask, /*minsize=*/0, /*maxsize=*/0); } OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_BMPSTRING) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_UNIVERSALSTRING) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_UTF8STRING) int ASN1_mbstring_ncopy(ASN1_STRING **out, const unsigned char *in, ossl_ssize_t len, int inform, unsigned long mask, ossl_ssize_t minsize, ossl_ssize_t maxsize) { if (len == -1) { len = strlen((const char *)in); } if (!mask) { mask = DIRSTRING_TYPE; } int (*decode_func)(CBS *, uint32_t *); int error; switch (inform) { case MBSTRING_BMP: decode_func = CBS_get_ucs2_be; error = ASN1_R_INVALID_BMPSTRING; break; case MBSTRING_UNIV: decode_func = CBS_get_utf32_be; error = ASN1_R_INVALID_UNIVERSALSTRING; break; case MBSTRING_UTF8: decode_func = CBS_get_utf8; error = ASN1_R_INVALID_UTF8STRING; break; case MBSTRING_ASC: decode_func = CBS_get_latin1; error = ERR_R_INTERNAL_ERROR; // Latin-1 inputs are never invalid. break; default: OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_FORMAT); return -1; } // Check |minsize| and |maxsize| and work out the minimal type, if any. CBS cbs; CBS_init(&cbs, in, len); size_t utf8_len = 0, nchar = 0; while (CBS_len(&cbs) != 0) { uint32_t c; if (!decode_func(&cbs, &c)) { OPENSSL_PUT_ERROR(ASN1, error); return -1; } if (nchar == 0 && (inform == MBSTRING_BMP || inform == MBSTRING_UNIV) && c == 0xfeff) { // Reject byte-order mark. We could drop it but that would mean // adding ambiguity around whether a BOM was included or not when // matching strings. // // For a little-endian UCS-2 string, the BOM will appear as 0xfffe // and will be rejected as noncharacter, below. OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_CHARACTERS); return -1; } // Update which output formats are still possible. if ((mask & B_ASN1_PRINTABLESTRING) && !asn1_is_printable(c)) { mask &= ~B_ASN1_PRINTABLESTRING; } if ((mask & B_ASN1_IA5STRING) && (c > 127)) { mask &= ~B_ASN1_IA5STRING; } if ((mask & B_ASN1_T61STRING) && (c > 0xff)) { mask &= ~B_ASN1_T61STRING; } if ((mask & B_ASN1_BMPSTRING) && (c > 0xffff)) { mask &= ~B_ASN1_BMPSTRING; } if (!mask) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_CHARACTERS); return -1; } nchar++; utf8_len += CBB_get_utf8_len(c); if (maxsize > 0 && nchar > (size_t)maxsize) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_LONG); ERR_add_error_dataf("maxsize=%zu", (size_t)maxsize); return -1; } } if (minsize > 0 && nchar < (size_t)minsize) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_SHORT); ERR_add_error_dataf("minsize=%zu", (size_t)minsize); return -1; } // Now work out output format and string type int str_type; int (*encode_func)(CBB *, uint32_t) = CBB_add_latin1; size_t size_estimate = nchar; int outform = MBSTRING_ASC; if (mask & B_ASN1_PRINTABLESTRING) { str_type = V_ASN1_PRINTABLESTRING; } else if (mask & B_ASN1_IA5STRING) { str_type = V_ASN1_IA5STRING; } else if (mask & B_ASN1_T61STRING) { str_type = V_ASN1_T61STRING; } else if (mask & B_ASN1_BMPSTRING) { str_type = V_ASN1_BMPSTRING; outform = MBSTRING_BMP; encode_func = CBB_add_ucs2_be; size_estimate = 2 * nchar; } else if (mask & B_ASN1_UNIVERSALSTRING) { str_type = V_ASN1_UNIVERSALSTRING; encode_func = CBB_add_utf32_be; size_estimate = 4 * nchar; outform = MBSTRING_UNIV; } else if (mask & B_ASN1_UTF8STRING) { str_type = V_ASN1_UTF8STRING; outform = MBSTRING_UTF8; encode_func = CBB_add_utf8; size_estimate = utf8_len; } else { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_CHARACTERS); return -1; } if (!out) { return str_type; } int free_dest = 0; ASN1_STRING *dest; if (*out) { dest = *out; } else { free_dest = 1; dest = ASN1_STRING_type_new(str_type); if (!dest) { return -1; } } CBB cbb; CBB_zero(&cbb); // If both the same type just copy across uint8_t *data = NULL; size_t data_len = 0; if (inform == outform) { if (!ASN1_STRING_set(dest, in, len)) { goto err; } dest->type = str_type; *out = dest; return str_type; } if (!CBB_init(&cbb, size_estimate + 1)) { goto err; } CBS_init(&cbs, in, len); while (CBS_len(&cbs) != 0) { uint32_t c; if (!decode_func(&cbs, &c) || !encode_func(&cbb, c)) { OPENSSL_PUT_ERROR(ASN1, ERR_R_INTERNAL_ERROR); goto err; } } if (/* OpenSSL historically NUL-terminated this value with a single byte, * even for |MBSTRING_BMP| and |MBSTRING_UNIV|. */ !CBB_add_u8(&cbb, 0) || // !CBB_finish(&cbb, &data, &data_len) || // data_len < 1 || // data_len > INT_MAX) { OPENSSL_PUT_ERROR(ASN1, ERR_R_INTERNAL_ERROR); OPENSSL_free(data); goto err; } dest->type = str_type; ASN1_STRING_set0(dest, data, (int)data_len - 1); *out = dest; return str_type; err: if (free_dest) { ASN1_STRING_free(dest); } CBB_cleanup(&cbb); return -1; } int asn1_is_printable(uint32_t value) { if (value > 0x7f) { return 0; } return OPENSSL_isalnum(value) || // value == ' ' || value == '\'' || value == '(' || value == ')' || value == '+' || value == ',' || value == '-' || value == '.' || value == '/' || value == ':' || value == '=' || value == '?'; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_object.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../internal.h" #include "internal.h" int i2d_ASN1_OBJECT(const ASN1_OBJECT *in, unsigned char **outp) { if (in == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_PASSED_NULL_PARAMETER); return -1; } if (in->length <= 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_OBJECT); return -1; } CBB cbb, child; if (!CBB_init(&cbb, (size_t)in->length + 2) || !CBB_add_asn1(&cbb, &child, CBS_ASN1_OBJECT) || !CBB_add_bytes(&child, in->data, in->length)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } int i2t_ASN1_OBJECT(char *buf, int buf_len, const ASN1_OBJECT *a) { return OBJ_obj2txt(buf, buf_len, a, 0); } static int write_str(BIO *bp, const char *str) { size_t len = strlen(str); if (len > INT_MAX) { OPENSSL_PUT_ERROR(ASN1, ERR_R_OVERFLOW); return -1; } return BIO_write(bp, str, (int)len) == (int)len ? (int)len : -1; } int i2a_ASN1_OBJECT(BIO *bp, const ASN1_OBJECT *a) { if (a == NULL || a->data == NULL) { return write_str(bp, "NULL"); } char buf[80], *allocated = NULL; const char *str = buf; int len = i2t_ASN1_OBJECT(buf, sizeof(buf), a); if (len > (int)sizeof(buf) - 1) { // The input was truncated. Allocate a buffer that fits. allocated = reinterpret_cast(OPENSSL_malloc(len + 1)); if (allocated == NULL) { return -1; } len = i2t_ASN1_OBJECT(allocated, len + 1, a); str = allocated; } if (len <= 0) { str = ""; } int ret = write_str(bp, str); OPENSSL_free(allocated); return ret; } ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **out, const unsigned char **inp, long len) { if (len < 0) { return NULL; } CBS cbs, child; CBS_init(&cbs, *inp, (size_t)len); if (!CBS_get_asn1(&cbs, &child, CBS_ASN1_OBJECT)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return NULL; } const uint8_t *contents = CBS_data(&child); ASN1_OBJECT *ret = c2i_ASN1_OBJECT(out, &contents, CBS_len(&child)); if (ret != NULL) { // |c2i_ASN1_OBJECT| should have consumed the entire input. assert(CBS_data(&cbs) == contents); *inp = CBS_data(&cbs); } return ret; } ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **out, const unsigned char **inp, long len) { if (len < 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_OBJECT_ENCODING); return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); if (!CBS_is_valid_asn1_oid(&cbs)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_OBJECT_ENCODING); return NULL; } ASN1_OBJECT *ret = ASN1_OBJECT_create(NID_undef, *inp, (size_t)len, /*sn=*/NULL, /*ln=*/NULL); if (ret == NULL) { return NULL; } if (out != NULL) { ASN1_OBJECT_free(*out); *out = ret; } *inp += len; // All bytes were consumed. return ret; } ASN1_OBJECT *ASN1_OBJECT_new(void) { ASN1_OBJECT *ret; ret = (ASN1_OBJECT *)OPENSSL_malloc(sizeof(ASN1_OBJECT)); if (ret == NULL) { return NULL; } ret->length = 0; ret->data = NULL; ret->nid = 0; ret->sn = NULL; ret->ln = NULL; ret->flags = ASN1_OBJECT_FLAG_DYNAMIC; return ret; } void ASN1_OBJECT_free(ASN1_OBJECT *a) { if (a == NULL) { return; } if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC_STRINGS) { OPENSSL_free((void *)a->sn); OPENSSL_free((void *)a->ln); a->sn = a->ln = NULL; } if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC_DATA) { OPENSSL_free((void *)a->data); a->data = NULL; a->length = 0; } if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC) { OPENSSL_free(a); } } ASN1_OBJECT *ASN1_OBJECT_create(int nid, const unsigned char *data, size_t len, const char *sn, const char *ln) { if (len > INT_MAX) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_LONG); return NULL; } ASN1_OBJECT o; o.sn = sn; o.ln = ln; o.data = data; o.nid = nid; o.length = (int)len; o.flags = ASN1_OBJECT_FLAG_DYNAMIC | ASN1_OBJECT_FLAG_DYNAMIC_STRINGS | ASN1_OBJECT_FLAG_DYNAMIC_DATA; return OBJ_dup(&o); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_octet.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup(const ASN1_OCTET_STRING *x) { return ASN1_STRING_dup(x); } int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a, const ASN1_OCTET_STRING *b) { return ASN1_STRING_cmp(a, b); } int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *x, const unsigned char *d, int len) { return ASN1_STRING_set(x, d, len); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_strex.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../internal.h" #include "internal.h" #define ESC_FLAGS \ (ASN1_STRFLGS_ESC_2253 | ASN1_STRFLGS_ESC_QUOTE | ASN1_STRFLGS_ESC_CTRL | \ ASN1_STRFLGS_ESC_MSB) static int maybe_write(BIO *out, const void *buf, int len) { // If |out| is NULL, ignore the output but report the length. return out == NULL || BIO_write(out, buf, len) == len; } static int is_control_character(unsigned char c) { return c < 32 || c == 127; } static int do_esc_char(uint32_t c, unsigned long flags, char *do_quotes, BIO *out, int is_first, int is_last) { // |c| is a |uint32_t| because, depending on |ASN1_STRFLGS_UTF8_CONVERT|, // we may be escaping bytes or Unicode codepoints. char buf[16]; // Large enough for "\\W01234567". unsigned char u8 = (unsigned char)c; if (c > 0xffff) { snprintf(buf, sizeof(buf), "\\W%08" PRIX32, c); } else if (c > 0xff) { snprintf(buf, sizeof(buf), "\\U%04" PRIX32, c); } else if ((flags & ASN1_STRFLGS_ESC_MSB) && c > 0x7f) { snprintf(buf, sizeof(buf), "\\%02X", c); } else if ((flags & ASN1_STRFLGS_ESC_CTRL) && is_control_character(c)) { snprintf(buf, sizeof(buf), "\\%02X", c); } else if (flags & ASN1_STRFLGS_ESC_2253) { // See RFC 2253, sections 2.4 and 4. if (c == '\\' || c == '"') { // Quotes and backslashes are always escaped, quoted or not. snprintf(buf, sizeof(buf), "\\%c", (int)c); } else if (c == ',' || c == '+' || c == '<' || c == '>' || c == ';' || (is_first && (c == ' ' || c == '#')) || (is_last && (c == ' '))) { if (flags & ASN1_STRFLGS_ESC_QUOTE) { // No need to escape, just tell the caller to quote. if (do_quotes != NULL) { *do_quotes = 1; } return maybe_write(out, &u8, 1) ? 1 : -1; } snprintf(buf, sizeof(buf), "\\%c", (int)c); } else { return maybe_write(out, &u8, 1) ? 1 : -1; } } else if ((flags & ESC_FLAGS) && c == '\\') { // If any escape flags are set, also escape backslashes. snprintf(buf, sizeof(buf), "\\%c", (int)c); } else { return maybe_write(out, &u8, 1) ? 1 : -1; } static_assert(sizeof(buf) < INT_MAX, "len may not fit in int"); int len = (int)strlen(buf); return maybe_write(out, buf, len) ? len : -1; } // This function sends each character in a buffer to do_esc_char(). It // interprets the content formats and converts to or from UTF8 as // appropriate. static int do_buf(const unsigned char *buf, int buflen, int encoding, unsigned long flags, char *quotes, BIO *out) { int (*get_char)(CBS *cbs, uint32_t *out); int get_char_error; switch (encoding) { case MBSTRING_UNIV: get_char = CBS_get_utf32_be; get_char_error = ASN1_R_INVALID_UNIVERSALSTRING; break; case MBSTRING_BMP: get_char = CBS_get_ucs2_be; get_char_error = ASN1_R_INVALID_BMPSTRING; break; case MBSTRING_ASC: get_char = CBS_get_latin1; get_char_error = ERR_R_INTERNAL_ERROR; // Should not be possible. break; case MBSTRING_UTF8: get_char = CBS_get_utf8; get_char_error = ASN1_R_INVALID_UTF8STRING; break; default: assert(0); return -1; } CBS cbs; CBS_init(&cbs, buf, buflen); int outlen = 0; while (CBS_len(&cbs) != 0) { const int is_first = CBS_data(&cbs) == buf; uint32_t c; if (!get_char(&cbs, &c)) { OPENSSL_PUT_ERROR(ASN1, get_char_error); return -1; } const int is_last = CBS_len(&cbs) == 0; if (flags & ASN1_STRFLGS_UTF8_CONVERT) { uint8_t utf8_buf[6]; CBB utf8_cbb; CBB_init_fixed(&utf8_cbb, utf8_buf, sizeof(utf8_buf)); if (!CBB_add_utf8(&utf8_cbb, c)) { OPENSSL_PUT_ERROR(ASN1, ERR_R_INTERNAL_ERROR); return 1; } size_t utf8_len = CBB_len(&utf8_cbb); for (size_t i = 0; i < utf8_len; i++) { int len = do_esc_char(utf8_buf[i], flags, quotes, out, is_first && i == 0, is_last && i == utf8_len - 1); if (len < 0) { return -1; } outlen += len; } } else { int len = do_esc_char(c, flags, quotes, out, is_first, is_last); if (len < 0) { return -1; } outlen += len; } } return outlen; } // This function hex dumps a buffer of characters static int do_hex_dump(BIO *out, unsigned char *buf, int buflen) { static const char hexdig[] = "0123456789ABCDEF"; unsigned char *p, *q; char hextmp[2]; if (out) { p = buf; q = buf + buflen; while (p != q) { hextmp[0] = hexdig[*p >> 4]; hextmp[1] = hexdig[*p & 0xf]; if (!maybe_write(out, hextmp, 2)) { return -1; } p++; } } return buflen << 1; } // "dump" a string. This is done when the type is unknown, or the flags // request it. We can either dump the content octets or the entire DER // encoding. This uses the RFC 2253 #01234 format. static int do_dump(unsigned long flags, BIO *out, const ASN1_STRING *str) { if (!maybe_write(out, "#", 1)) { return -1; } // If we don't dump DER encoding just dump content octets if (!(flags & ASN1_STRFLGS_DUMP_DER)) { int outlen = do_hex_dump(out, str->data, str->length); if (outlen < 0) { return -1; } return outlen + 1; } // Placing the ASN1_STRING in a temporary ASN1_TYPE allows the DER encoding // to readily obtained. ASN1_TYPE t; OPENSSL_memset(&t, 0, sizeof(ASN1_TYPE)); asn1_type_set0_string(&t, (ASN1_STRING *)str); unsigned char *der_buf = NULL; int der_len = i2d_ASN1_TYPE(&t, &der_buf); if (der_len < 0) { return -1; } int outlen = do_hex_dump(out, der_buf, der_len); OPENSSL_free(der_buf); if (outlen < 0) { return -1; } return outlen + 1; } // string_type_to_encoding returns the |MBSTRING_*| constant for the encoding // used by the |ASN1_STRING| type |type|, or -1 if |tag| is not a string // type. static int string_type_to_encoding(int type) { // This function is sometimes passed ASN.1 universal types and sometimes // passed |ASN1_STRING| type values switch (type) { case V_ASN1_UTF8STRING: return MBSTRING_UTF8; case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_ISO64STRING: // |MBSTRING_ASC| refers to Latin-1, not ASCII. return MBSTRING_ASC; case V_ASN1_UNIVERSALSTRING: return MBSTRING_UNIV; case V_ASN1_BMPSTRING: return MBSTRING_BMP; } return -1; } // This is the main function, print out an ASN1_STRING taking note of various // escape and display options. Returns number of characters written or -1 if // an error occurred. int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, unsigned long flags) { int type = str->type; int outlen = 0; if (flags & ASN1_STRFLGS_SHOW_TYPE) { const char *tagname = ASN1_tag2str(type); outlen += strlen(tagname); if (!maybe_write(out, tagname, outlen) || !maybe_write(out, ":", 1)) { return -1; } outlen++; } // Decide what to do with |str|, either dump the contents or display it. int encoding; if (flags & ASN1_STRFLGS_DUMP_ALL) { // Dump everything. encoding = -1; } else if (flags & ASN1_STRFLGS_IGNORE_TYPE) { // Ignore the string type and interpret the contents as Latin-1. encoding = MBSTRING_ASC; } else { encoding = string_type_to_encoding(type); if (encoding == -1 && (flags & ASN1_STRFLGS_DUMP_UNKNOWN) == 0) { encoding = MBSTRING_ASC; } } if (encoding == -1) { int len = do_dump(flags, out, str); if (len < 0) { return -1; } outlen += len; return outlen; } // Measure the length. char quotes = 0; int len = do_buf(str->data, str->length, encoding, flags, "es, NULL); if (len < 0) { return -1; } outlen += len; if (quotes) { outlen += 2; } if (!out) { return outlen; } // Encode the value. if ((quotes && !maybe_write(out, "\"", 1)) || do_buf(str->data, str->length, encoding, flags, NULL, out) < 0 || (quotes && !maybe_write(out, "\"", 1))) { return -1; } return outlen; } int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str, unsigned long flags) { BIO *bio = NULL; if (fp != NULL) { // If |fp| is NULL, this function returns the number of bytes without // writing. bio = BIO_new_fp(fp, BIO_NOCLOSE); if (bio == NULL) { return -1; } } int ret = ASN1_STRING_print_ex(bio, str, flags); BIO_free(bio); return ret; } int ASN1_STRING_to_UTF8(unsigned char **out, const ASN1_STRING *in) { if (!in) { return -1; } int mbflag = string_type_to_encoding(in->type); if (mbflag == -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_TAG); return -1; } ASN1_STRING stmp, *str = &stmp; stmp.data = NULL; stmp.length = 0; stmp.flags = 0; int ret = ASN1_mbstring_copy(&str, in->data, in->length, mbflag, B_ASN1_UTF8STRING); if (ret < 0) { return ret; } *out = stmp.data; return stmp.length; } int ASN1_STRING_print(BIO *bp, const ASN1_STRING *v) { int i, n; char buf[80]; const char *p; if (v == NULL) { return 0; } n = 0; p = (const char *)v->data; for (i = 0; i < v->length; i++) { if ((p[i] > '~') || ((p[i] < ' ') && (p[i] != '\n') && (p[i] != '\r'))) { buf[n] = '.'; } else { buf[n] = p[i]; } n++; if (n >= 80) { if (BIO_write(bp, buf, n) <= 0) { return 0; } n = 0; } } if (n > 0) { if (BIO_write(bp, buf, n) <= 0) { return 0; } } return 1; } int ASN1_TIME_print(BIO *bp, const ASN1_TIME *tm) { if (tm->type == V_ASN1_UTCTIME) { return ASN1_UTCTIME_print(bp, tm); } if (tm->type == V_ASN1_GENERALIZEDTIME) { return ASN1_GENERALIZEDTIME_print(bp, tm); } BIO_puts(bp, "Bad time value"); return 0; } static const char *const mon[12] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; int ASN1_GENERALIZEDTIME_print(BIO *bp, const ASN1_GENERALIZEDTIME *tm) { CBS cbs; CBS_init(&cbs, tm->data, tm->length); struct tm utc; if (!CBS_parse_generalized_time(&cbs, &utc, /*allow_timezone_offset=*/0)) { BIO_puts(bp, "Bad time value"); return 0; } return BIO_printf(bp, "%s %2d %02d:%02d:%02d %d GMT", mon[utc.tm_mon], utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec, utc.tm_year + 1900) > 0; } int ASN1_UTCTIME_print(BIO *bp, const ASN1_UTCTIME *tm) { CBS cbs; CBS_init(&cbs, tm->data, tm->length); struct tm utc; if (!CBS_parse_utc_time(&cbs, &utc, /*allow_timezone_offset=*/0)) { BIO_puts(bp, "Bad time value"); return 0; } return BIO_printf(bp, "%s %2d %02d:%02d:%02d %d GMT", mon[utc.tm_mon], utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec, utc.tm_year + 1900) > 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_strnid.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../internal.h" #include "../lhash/internal.h" #include "internal.h" DEFINE_LHASH_OF(ASN1_STRING_TABLE) static LHASH_OF(ASN1_STRING_TABLE) *string_tables = NULL; static CRYPTO_MUTEX string_tables_lock = CRYPTO_MUTEX_INIT; void ASN1_STRING_set_default_mask(unsigned long mask) {} unsigned long ASN1_STRING_get_default_mask(void) { return B_ASN1_UTF8STRING; } int ASN1_STRING_set_default_mask_asc(const char *p) { return 1; } static const ASN1_STRING_TABLE *asn1_string_table_get(int nid); // The following function generates an ASN1_STRING based on limits in a // table. Frequently the types and length of an ASN1_STRING are restricted by // a corresponding OID. For example certificates and certificate requests. ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out, const unsigned char *in, ossl_ssize_t len, int inform, int nid) { ASN1_STRING *str = NULL; int ret; if (!out) { out = &str; } const ASN1_STRING_TABLE *tbl = asn1_string_table_get(nid); if (tbl != NULL) { unsigned long mask = tbl->mask; if (!(tbl->flags & STABLE_NO_MASK)) { mask &= B_ASN1_UTF8STRING; } ret = ASN1_mbstring_ncopy(out, in, len, inform, mask, tbl->minsize, tbl->maxsize); } else { ret = ASN1_mbstring_copy(out, in, len, inform, B_ASN1_UTF8STRING); } if (ret <= 0) { return NULL; } return *out; } // Now the tables and helper functions for the string table: // See RFC 5280. #define ub_name 32768 #define ub_common_name 64 #define ub_locality_name 128 #define ub_state_name 128 #define ub_organization_name 64 #define ub_organization_unit_name 64 #define ub_email_address 128 #define ub_serial_number 64 // This table must be kept in NID order static const ASN1_STRING_TABLE tbl_standard[] = { {NID_commonName, 1, ub_common_name, DIRSTRING_TYPE, 0}, {NID_countryName, 2, 2, B_ASN1_PRINTABLESTRING, STABLE_NO_MASK}, {NID_localityName, 1, ub_locality_name, DIRSTRING_TYPE, 0}, {NID_stateOrProvinceName, 1, ub_state_name, DIRSTRING_TYPE, 0}, {NID_organizationName, 1, ub_organization_name, DIRSTRING_TYPE, 0}, {NID_organizationalUnitName, 1, ub_organization_unit_name, DIRSTRING_TYPE, 0}, {NID_pkcs9_emailAddress, 1, ub_email_address, B_ASN1_IA5STRING, STABLE_NO_MASK}, {NID_pkcs9_unstructuredName, 1, -1, PKCS9STRING_TYPE, 0}, {NID_pkcs9_challengePassword, 1, -1, PKCS9STRING_TYPE, 0}, {NID_pkcs9_unstructuredAddress, 1, -1, DIRSTRING_TYPE, 0}, {NID_givenName, 1, ub_name, DIRSTRING_TYPE, 0}, {NID_surname, 1, ub_name, DIRSTRING_TYPE, 0}, {NID_initials, 1, ub_name, DIRSTRING_TYPE, 0}, {NID_serialNumber, 1, ub_serial_number, B_ASN1_PRINTABLESTRING, STABLE_NO_MASK}, {NID_friendlyName, -1, -1, B_ASN1_BMPSTRING, STABLE_NO_MASK}, {NID_name, 1, ub_name, DIRSTRING_TYPE, 0}, {NID_dnQualifier, -1, -1, B_ASN1_PRINTABLESTRING, STABLE_NO_MASK}, {NID_domainComponent, 1, -1, B_ASN1_IA5STRING, STABLE_NO_MASK}, {NID_ms_csp_name, -1, -1, B_ASN1_BMPSTRING, STABLE_NO_MASK}}; static int table_cmp(const ASN1_STRING_TABLE *a, const ASN1_STRING_TABLE *b) { if (a->nid < b->nid) { return -1; } if (a->nid > b->nid) { return 1; } return 0; } static int table_cmp_void(const void *a, const void *b) { return table_cmp(reinterpret_cast(a), reinterpret_cast(b)); } static uint32_t table_hash(const ASN1_STRING_TABLE *tbl) { return OPENSSL_hash32(&tbl->nid, sizeof(tbl->nid)); } static const ASN1_STRING_TABLE *asn1_string_table_get(int nid) { ASN1_STRING_TABLE key; key.nid = nid; const ASN1_STRING_TABLE *tbl = reinterpret_cast( bsearch(&key, tbl_standard, OPENSSL_ARRAY_SIZE(tbl_standard), sizeof(ASN1_STRING_TABLE), table_cmp_void)); if (tbl != NULL) { return tbl; } CRYPTO_MUTEX_lock_read(&string_tables_lock); if (string_tables != NULL) { tbl = lh_ASN1_STRING_TABLE_retrieve(string_tables, &key); } CRYPTO_MUTEX_unlock_read(&string_tables_lock); // Note returning |tbl| without the lock is only safe because // |ASN1_STRING_TABLE_add| cannot modify or delete existing entries. If we // wish to support that, this function must copy the result under a lock. return tbl; } int ASN1_STRING_TABLE_add(int nid, long minsize, long maxsize, unsigned long mask, unsigned long flags) { // Existing entries cannot be overwritten. if (asn1_string_table_get(nid) != NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } int ret = 0; CRYPTO_MUTEX_lock_write(&string_tables_lock); ASN1_STRING_TABLE *tbl = NULL; if (string_tables == NULL) { string_tables = lh_ASN1_STRING_TABLE_new(table_hash, table_cmp); if (string_tables == NULL) { goto err; } } else { // Check again for an existing entry. One may have been added while // unlocked. ASN1_STRING_TABLE key; key.nid = nid; if (lh_ASN1_STRING_TABLE_retrieve(string_tables, &key) != NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); goto err; } } tbl = reinterpret_cast( OPENSSL_malloc(sizeof(ASN1_STRING_TABLE))); if (tbl == NULL) { goto err; } tbl->nid = nid; tbl->flags = flags; tbl->minsize = minsize; tbl->maxsize = maxsize; tbl->mask = mask; ASN1_STRING_TABLE *old_tbl; if (!lh_ASN1_STRING_TABLE_insert(string_tables, &old_tbl, tbl)) { OPENSSL_free(tbl); goto err; } assert(old_tbl == NULL); ret = 1; err: CRYPTO_MUTEX_unlock_write(&string_tables_lock); return ret; } void ASN1_STRING_TABLE_cleanup(void) {} void asn1_get_string_table_for_testing(const ASN1_STRING_TABLE **out_ptr, size_t *out_len) { *out_ptr = tbl_standard; *out_len = OPENSSL_ARRAY_SIZE(tbl_standard); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_time.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "internal.h" // This is an implementation of the ASN1 Time structure which is: Time ::= // CHOICE { utcTime UTCTime, generalTime GeneralizedTime } written by Steve // Henson. IMPLEMENT_ASN1_MSTRING(ASN1_TIME, B_ASN1_TIME) IMPLEMENT_ASN1_FUNCTIONS_const(ASN1_TIME) ASN1_TIME *ASN1_TIME_set_posix(ASN1_TIME *s, int64_t posix_time) { return ASN1_TIME_adj(s, posix_time, 0, 0); } ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t time) { return ASN1_TIME_adj(s, time, 0, 0); } static int fits_in_utc_time(const struct tm *tm) { return 50 <= tm->tm_year && tm->tm_year < 150; } ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, int64_t posix_time, int offset_day, long offset_sec) { struct tm tm; if (!OPENSSL_posix_to_tm(posix_time, &tm)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ERROR_GETTING_TIME); return NULL; } if (offset_day || offset_sec) { if (!OPENSSL_gmtime_adj(&tm, offset_day, offset_sec)) { return NULL; } } if (fits_in_utc_time(&tm)) { return ASN1_UTCTIME_adj(s, posix_time, offset_day, offset_sec); } return ASN1_GENERALIZEDTIME_adj(s, posix_time, offset_day, offset_sec); } int ASN1_TIME_check(const ASN1_TIME *t) { if (t->type == V_ASN1_GENERALIZEDTIME) { return ASN1_GENERALIZEDTIME_check(t); } else if (t->type == V_ASN1_UTCTIME) { return ASN1_UTCTIME_check(t); } return 0; } // Convert an ASN1_TIME structure to GeneralizedTime ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(const ASN1_TIME *in, ASN1_GENERALIZEDTIME **out) { if (!ASN1_TIME_check(in)) { return NULL; } ASN1_GENERALIZEDTIME *ret = NULL; if (!out || !*out) { if (!(ret = ASN1_GENERALIZEDTIME_new())) { goto err; } } else { ret = *out; } // If already GeneralizedTime just copy across if (in->type == V_ASN1_GENERALIZEDTIME) { if (!ASN1_STRING_set(ret, in->data, in->length)) { goto err; } goto done; } // Grow the string to accomodate the two-digit century. if (!ASN1_STRING_set(ret, NULL, in->length + 2)) { goto err; } { char *const out_str = (char *)ret->data; // |ASN1_STRING_set| also allocates an additional byte for a trailing NUL. const size_t out_str_capacity = in->length + 2 + 1; // Work out the century and prepend if (in->data[0] >= '5') { OPENSSL_strlcpy(out_str, "19", out_str_capacity); } else { OPENSSL_strlcpy(out_str, "20", out_str_capacity); } OPENSSL_strlcat(out_str, (const char *)in->data, out_str_capacity); } done: if (out != NULL && *out == NULL) { *out = ret; } return ret; err: if (out == NULL || *out != ret) { ASN1_GENERALIZEDTIME_free(ret); } return NULL; } int ASN1_TIME_set_string(ASN1_TIME *s, const char *str) { return ASN1_UTCTIME_set_string(s, str) || ASN1_GENERALIZEDTIME_set_string(s, str); } int ASN1_TIME_set_string_X509(ASN1_TIME *s, const char *str) { CBS cbs; CBS_init(&cbs, (const uint8_t *)str, strlen(str)); int type; struct tm tm; if (CBS_parse_utc_time(&cbs, /*out_tm=*/NULL, /*allow_timezone_offset=*/0)) { type = V_ASN1_UTCTIME; } else if (CBS_parse_generalized_time(&cbs, &tm, /*allow_timezone_offset=*/0)) { type = V_ASN1_GENERALIZEDTIME; if (fits_in_utc_time(&tm)) { type = V_ASN1_UTCTIME; CBS_skip(&cbs, 2); } } else { return 0; } if (s != NULL) { if (!ASN1_STRING_set(s, CBS_data(&cbs), CBS_len(&cbs))) { return 0; } s->type = type; } return 1; } static int asn1_time_to_tm(struct tm *tm, const ASN1_TIME *t, int allow_timezone_offset) { if (t == NULL) { if (OPENSSL_posix_to_tm(time(NULL), tm)) { return 1; } return 0; } if (t->type == V_ASN1_UTCTIME) { return asn1_utctime_to_tm(tm, t, allow_timezone_offset); } else if (t->type == V_ASN1_GENERALIZEDTIME) { return asn1_generalizedtime_to_tm(tm, t); } return 0; } int ASN1_TIME_diff(int *out_days, int *out_seconds, const ASN1_TIME *from, const ASN1_TIME *to) { struct tm tm_from, tm_to; if (!asn1_time_to_tm(&tm_from, from, /*allow_timezone_offset=*/1)) { return 0; } if (!asn1_time_to_tm(&tm_to, to, /*allow_timezone_offset=*/1)) { return 0; } return OPENSSL_gmtime_diff(out_days, out_seconds, &tm_from, &tm_to); } int ASN1_TIME_to_posix_nonstandard(const ASN1_TIME *t, int64_t *out_time) { struct tm tm; if (!asn1_time_to_tm(&tm, t, /*allow_timezone_offset=*/1)) { return 0; } return OPENSSL_tm_to_posix(&tm, out_time); } // The functions below do *not* permissively allow the use of four digit // timezone offsets in UTC times, as is done elsewhere in the code. They are // both new API, and used internally to X509_cmp_time. This is to discourage the // use of nonstandard times in new code, and to ensure that this code behaves // correctly in X509_cmp_time which historically did its own time validations // slightly different than the many other copies of X.509 time validation // sprinkled through the codebase. The custom checks in X509_cmp_time meant that // it did not allow four digit timezone offsets in UTC times. int ASN1_TIME_to_time_t(const ASN1_TIME *t, time_t *out_time) { struct tm tm; if (!asn1_time_to_tm(&tm, t, /*allow_timezone_offset=*/0)) { return 0; } return OPENSSL_timegm(&tm, out_time); } int ASN1_TIME_to_posix(const ASN1_TIME *t, int64_t *out_time) { struct tm tm; if (!asn1_time_to_tm(&tm, t, /*allow_timezone_offset=*/0)) { return 0; } return OPENSSL_tm_to_posix(&tm, out_time); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_type.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" int ASN1_TYPE_get(const ASN1_TYPE *a) { switch (a->type) { case V_ASN1_NULL: case V_ASN1_BOOLEAN: return a->type; case V_ASN1_OBJECT: return a->value.object != NULL ? a->type : 0; default: return a->value.asn1_string != NULL ? a->type : 0; } } const void *asn1_type_value_as_pointer(const ASN1_TYPE *a) { switch (a->type) { case V_ASN1_NULL: return NULL; case V_ASN1_BOOLEAN: return a->value.boolean ? (void *)0xff : NULL; case V_ASN1_OBJECT: return a->value.object; default: return a->value.asn1_string; } } void asn1_type_set0_string(ASN1_TYPE *a, ASN1_STRING *str) { // |ASN1_STRING| types are almost the same as |ASN1_TYPE| types, except that // the negative flag is not reflected into |ASN1_TYPE|. int type = str->type; if (type == V_ASN1_NEG_INTEGER) { type = V_ASN1_INTEGER; } else if (type == V_ASN1_NEG_ENUMERATED) { type = V_ASN1_ENUMERATED; } // These types are not |ASN1_STRING| types and use a different // representation when stored in |ASN1_TYPE|. assert(type != V_ASN1_NULL && type != V_ASN1_OBJECT && type != V_ASN1_BOOLEAN); ASN1_TYPE_set(a, type, str); } void asn1_type_cleanup(ASN1_TYPE *a) { switch (a->type) { case V_ASN1_NULL: a->value.ptr = NULL; break; case V_ASN1_BOOLEAN: a->value.boolean = ASN1_BOOLEAN_NONE; break; case V_ASN1_OBJECT: ASN1_OBJECT_free(a->value.object); a->value.object = NULL; break; default: ASN1_STRING_free(a->value.asn1_string); a->value.asn1_string = NULL; break; } } void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value) { asn1_type_cleanup(a); a->type = type; switch (type) { case V_ASN1_NULL: a->value.ptr = NULL; break; case V_ASN1_BOOLEAN: a->value.boolean = value ? ASN1_BOOLEAN_TRUE : ASN1_BOOLEAN_FALSE; break; case V_ASN1_OBJECT: a->value.object = reinterpret_cast(value); break; default: a->value.asn1_string = reinterpret_cast(value); break; } } int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value) { if (!value || (type == V_ASN1_BOOLEAN)) { void *p = (void *)value; ASN1_TYPE_set(a, type, p); } else if (type == V_ASN1_OBJECT) { ASN1_OBJECT *odup; odup = OBJ_dup(reinterpret_cast(value)); if (!odup) { return 0; } ASN1_TYPE_set(a, type, odup); } else { ASN1_STRING *sdup; sdup = ASN1_STRING_dup(reinterpret_cast(value)); if (!sdup) { return 0; } ASN1_TYPE_set(a, type, sdup); } return 1; } // Returns 0 if they are equal, != 0 otherwise. int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b) { int result = -1; if (!a || !b || a->type != b->type) { return -1; } switch (a->type) { case V_ASN1_OBJECT: result = OBJ_cmp(a->value.object, b->value.object); break; case V_ASN1_NULL: result = 0; // They do not have content. break; case V_ASN1_BOOLEAN: result = a->value.boolean - b->value.boolean; break; case V_ASN1_INTEGER: case V_ASN1_ENUMERATED: case V_ASN1_BIT_STRING: case V_ASN1_OCTET_STRING: case V_ASN1_SEQUENCE: case V_ASN1_SET: case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_VIDEOTEXSTRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_GRAPHICSTRING: case V_ASN1_VISIBLESTRING: case V_ASN1_GENERALSTRING: case V_ASN1_UNIVERSALSTRING: case V_ASN1_BMPSTRING: case V_ASN1_UTF8STRING: case V_ASN1_OTHER: default: result = ASN1_STRING_cmp(a->value.asn1_string, b->value.asn1_string); break; } return result; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/a_utctm.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "internal.h" int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d, int allow_timezone_offset) { if (d->type != V_ASN1_UTCTIME) { return 0; } CBS cbs; CBS_init(&cbs, d->data, (size_t)d->length); if (!CBS_parse_utc_time(&cbs, tm, allow_timezone_offset)) { return 0; } return 1; } int ASN1_UTCTIME_check(const ASN1_UTCTIME *d) { return asn1_utctime_to_tm(NULL, d, /*allow_timezone_offset=*/1); } int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str) { // Although elsewhere we allow timezone offsets with UTCTime, to be compatible // with some existing misissued certificates, this function is used to // construct new certificates and can be stricter. size_t len = strlen(str); CBS cbs; CBS_init(&cbs, (const uint8_t *)str, len); if (!CBS_parse_utc_time(&cbs, /*out_tm=*/NULL, /*allow_timezone_offset=*/0)) { return 0; } if (s != NULL) { if (!ASN1_STRING_set(s, str, len)) { return 0; } s->type = V_ASN1_UTCTIME; } return 1; } ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s, int64_t posix_time) { return ASN1_UTCTIME_adj(s, posix_time, 0, 0); } ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, int64_t posix_time, int offset_day, long offset_sec) { struct tm data; if (!OPENSSL_posix_to_tm(posix_time, &data)) { return NULL; } if (offset_day || offset_sec) { if (!OPENSSL_gmtime_adj(&data, offset_day, offset_sec)) { return NULL; } } if (data.tm_year < 50 || data.tm_year >= 150) { return NULL; } char buf[14]; int ret = snprintf(buf, sizeof(buf), "%02d%02d%02d%02d%02d%02dZ", data.tm_year % 100, data.tm_mon + 1, data.tm_mday, data.tm_hour, data.tm_min, data.tm_sec); // |snprintf| must write exactly 15 bytes (plus the NUL) to the buffer. BSSL_CHECK(ret == static_cast(sizeof(buf) - 1)); int free_s = 0; if (s == NULL) { free_s = 1; s = ASN1_UTCTIME_new(); if (s == NULL) { return NULL; } } if (!ASN1_STRING_set(s, buf, strlen(buf))) { if (free_s) { ASN1_UTCTIME_free(s); } return NULL; } s->type = V_ASN1_UTCTIME; return s; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/asn1_lib.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../internal.h" #include "internal.h" // Cross-module errors from crypto/x509/i2d_pr.c. OPENSSL_DECLARE_ERROR_REASON(ASN1, UNSUPPORTED_PUBLIC_KEY_TYPE) // Cross-module errors from crypto/x509/algorithm.c. OPENSSL_DECLARE_ERROR_REASON(ASN1, CONTEXT_NOT_INITIALISED) OPENSSL_DECLARE_ERROR_REASON(ASN1, DIGEST_AND_KEY_TYPE_NOT_SUPPORTED) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_MESSAGE_DIGEST_ALGORITHM) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_SIGNATURE_ALGORITHM) OPENSSL_DECLARE_ERROR_REASON(ASN1, WRONG_PUBLIC_KEY_TYPE) // Cross-module errors from crypto/x509/asn1_gen.c. TODO(davidben): Remove // these once asn1_gen.c is gone. OPENSSL_DECLARE_ERROR_REASON(ASN1, DEPTH_EXCEEDED) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_BITSTRING_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_BOOLEAN) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_HEX) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_IMPLICIT_TAG) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_INTEGER) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_NESTED_TAGGING) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_NULL_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_OBJECT) OPENSSL_DECLARE_ERROR_REASON(ASN1, ILLEGAL_TIME_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, INTEGER_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_MODIFIER) OPENSSL_DECLARE_ERROR_REASON(ASN1, INVALID_NUMBER) OPENSSL_DECLARE_ERROR_REASON(ASN1, LIST_ERROR) OPENSSL_DECLARE_ERROR_REASON(ASN1, MISSING_VALUE) OPENSSL_DECLARE_ERROR_REASON(ASN1, NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, OBJECT_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, SEQUENCE_OR_SET_NEEDS_CONFIG) OPENSSL_DECLARE_ERROR_REASON(ASN1, TIME_NOT_ASCII_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_FORMAT) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNKNOWN_TAG) OPENSSL_DECLARE_ERROR_REASON(ASN1, UNSUPPORTED_TYPE) // Limit |ASN1_STRING|s to 64 MiB of data. Most of this module, as well as // downstream code, does not correctly handle overflow. We cap string fields // more tightly than strictly necessary to fit in |int|. This is not expected to // impact real world uses of this field. // // In particular, this limit is small enough that the bit count of a BIT STRING // comfortably fits in an |int|, with room for arithmetic. #define ASN1_STRING_MAX (64 * 1024 * 1024) static void asn1_put_length(unsigned char **pp, int length); int ASN1_get_object(const unsigned char **inp, long *out_len, int *out_tag, int *out_class, long in_len) { if (in_len < 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_HEADER_TOO_LONG); return 0x80; } CBS_ASN1_TAG tag; CBS cbs, body; CBS_init(&cbs, *inp, (size_t)in_len); if (!CBS_get_any_asn1(&cbs, &body, &tag) || // Bound the length to comfortably fit in an int. Lengths in this // module often switch between int and long without overflow checks. CBS_len(&body) > INT_MAX / 2) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_HEADER_TOO_LONG); return 0x80; } // Convert between tag representations. int tag_class = (tag & CBS_ASN1_CLASS_MASK) >> CBS_ASN1_TAG_SHIFT; int constructed = (tag & CBS_ASN1_CONSTRUCTED) >> CBS_ASN1_TAG_SHIFT; int tag_number = tag & CBS_ASN1_TAG_NUMBER_MASK; // To avoid ambiguity with V_ASN1_NEG, impose a limit on universal tags. if (tag_class == V_ASN1_UNIVERSAL && tag_number > V_ASN1_MAX_UNIVERSAL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_HEADER_TOO_LONG); return 0x80; } *inp = CBS_data(&body); *out_len = CBS_len(&body); *out_tag = tag_number; *out_class = tag_class; return constructed; } // class 0 is constructed constructed == 2 for indefinite length constructed void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag, int xclass) { unsigned char *p = *pp; int i, ttag; i = (constructed) ? V_ASN1_CONSTRUCTED : 0; i |= (xclass & V_ASN1_PRIVATE); if (tag < 31) { *(p++) = i | (tag & V_ASN1_PRIMITIVE_TAG); } else { *(p++) = i | V_ASN1_PRIMITIVE_TAG; for (i = 0, ttag = tag; ttag > 0; i++) { ttag >>= 7; } ttag = i; while (i-- > 0) { p[i] = tag & 0x7f; if (i != (ttag - 1)) { p[i] |= 0x80; } tag >>= 7; } p += ttag; } if (constructed == 2) { *(p++) = 0x80; } else { asn1_put_length(&p, length); } *pp = p; } int ASN1_put_eoc(unsigned char **pp) { // This function is no longer used in the library, but some external code // uses it. unsigned char *p = *pp; *p++ = 0; *p++ = 0; *pp = p; return 2; } static void asn1_put_length(unsigned char **pp, int length) { unsigned char *p = *pp; int i, l; if (length <= 127) { *(p++) = (unsigned char)length; } else { l = length; for (i = 0; l > 0; i++) { l >>= 8; } *(p++) = i | 0x80; l = i; while (i-- > 0) { p[i] = length & 0xff; length >>= 8; } p += l; } *pp = p; } int ASN1_object_size(int constructed, int length, int tag) { int ret = 1; if (length < 0) { return -1; } if (tag >= 31) { while (tag > 0) { tag >>= 7; ret++; } } if (constructed == 2) { ret += 3; } else { ret++; if (length > 127) { int tmplen = length; while (tmplen > 0) { tmplen >>= 8; ret++; } } } if (ret >= INT_MAX - length) { return -1; } return ret + length; } int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str) { if (str == NULL) { return 0; } if (!ASN1_STRING_set(dst, str->data, str->length)) { return 0; } dst->type = str->type; dst->flags = str->flags; return 1; } ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *str) { ASN1_STRING *ret; if (!str) { return NULL; } ret = ASN1_STRING_new(); if (!ret) { return NULL; } if (!ASN1_STRING_copy(ret, str)) { ASN1_STRING_free(ret); return NULL; } return ret; } int ASN1_STRING_set(ASN1_STRING *str, const void *_data, ossl_ssize_t len_s) { const char *data = reinterpret_cast(_data); size_t len; if (len_s < 0) { if (data == NULL) { return 0; } len = strlen(data); } else { len = (size_t)len_s; } static_assert(ASN1_STRING_MAX < INT_MAX, "len will not overflow int"); if (len > ASN1_STRING_MAX) { OPENSSL_PUT_ERROR(ASN1, ERR_R_OVERFLOW); return 0; } if (str->length <= (int)len || str->data == NULL) { unsigned char *c = str->data; if (c == NULL) { str->data = reinterpret_cast(OPENSSL_malloc(len + 1)); } else { str->data = reinterpret_cast(OPENSSL_realloc(c, len + 1)); } if (str->data == NULL) { str->data = c; return 0; } } str->length = (int)len; if (data != NULL) { OPENSSL_memcpy(str->data, data, len); // Historically, OpenSSL would NUL-terminate most (but not all) // |ASN1_STRING|s, in case anyone accidentally passed |str->data| into a // function expecting a C string. We retain this behavior for compatibility, // but code must not rely on this. See CVE-2021-3712. str->data[len] = '\0'; } return 1; } void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len) { OPENSSL_free(str->data); str->data = reinterpret_cast(data); str->length = len; } ASN1_STRING *ASN1_STRING_new(void) { return (ASN1_STRING_type_new(V_ASN1_OCTET_STRING)); } ASN1_STRING *ASN1_STRING_type_new(int type) { ASN1_STRING *ret; ret = (ASN1_STRING *)OPENSSL_malloc(sizeof(ASN1_STRING)); if (ret == NULL) { return NULL; } ret->length = 0; ret->type = type; ret->data = NULL; ret->flags = 0; return ret; } void ASN1_STRING_free(ASN1_STRING *str) { if (str == NULL) { return; } OPENSSL_free(str->data); OPENSSL_free(str); } int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b) { // Capture padding bits and implicit truncation in BIT STRINGs. int a_length = a->length, b_length = b->length; uint8_t a_padding = 0, b_padding = 0; if (a->type == V_ASN1_BIT_STRING) { a_length = asn1_bit_string_length(a, &a_padding); } if (b->type == V_ASN1_BIT_STRING) { b_length = asn1_bit_string_length(b, &b_padding); } if (a_length < b_length) { return -1; } if (a_length > b_length) { return 1; } // In a BIT STRING, the number of bits is 8 * length - padding. Invert this // comparison so we compare by lengths. if (a_padding > b_padding) { return -1; } if (a_padding < b_padding) { return 1; } int ret = OPENSSL_memcmp(a->data, b->data, a_length); if (ret != 0) { return ret; } // Comparing the type first is more natural, but this matches OpenSSL. if (a->type < b->type) { return -1; } if (a->type > b->type) { return 1; } return 0; } int ASN1_STRING_length(const ASN1_STRING *str) { return str->length; } int ASN1_STRING_type(const ASN1_STRING *str) { return str->type; } unsigned char *ASN1_STRING_data(ASN1_STRING *str) { return str->data; } const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *str) { return str->data; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/asn1_par.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include const char *ASN1_tag2str(int tag) { static const char *const tag2str[] = { "EOC", "BOOLEAN", "INTEGER", "BIT STRING", "OCTET STRING", "NULL", "OBJECT", "OBJECT DESCRIPTOR", "EXTERNAL", "REAL", "ENUMERATED", "", "UTF8STRING", "", "", "", "SEQUENCE", "SET", "NUMERICSTRING", "PRINTABLESTRING", "T61STRING", "VIDEOTEXSTRING", "IA5STRING", "UTCTIME", "GENERALIZEDTIME", "GRAPHICSTRING", "VISIBLESTRING", "GENERALSTRING", "UNIVERSALSTRING", "", "BMPSTRING", }; if ((tag == V_ASN1_NEG_INTEGER) || (tag == V_ASN1_NEG_ENUMERATED)) { tag &= ~V_ASN1_NEG; } if (tag < 0 || tag > 30) { return "(unknown)"; } return tag2str[tag]; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/asn_pack.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, ASN1_STRING **out) { uint8_t *new_data = NULL; int len = ASN1_item_i2d(reinterpret_cast(obj), &new_data, it); if (len <= 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ENCODE_ERROR); return NULL; } ASN1_STRING *ret = NULL; if (out == NULL || *out == NULL) { ret = ASN1_STRING_new(); if (ret == NULL) { OPENSSL_free(new_data); return NULL; } } else { ret = *out; } ASN1_STRING_set0(ret, new_data, len); if (out != NULL) { *out = ret; } return ret; } void *ASN1_item_unpack(const ASN1_STRING *oct, const ASN1_ITEM *it) { const unsigned char *p = oct->data; void *ret = ASN1_item_d2i(NULL, &p, oct->length, it); if (ret == NULL || p != oct->data + oct->length) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); ASN1_item_free(reinterpret_cast(ret), it); return NULL; } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/f_int.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include int i2a_ASN1_INTEGER(BIO *bp, const ASN1_INTEGER *a) { int i, n = 0; static const char *h = "0123456789ABCDEF"; char buf[2]; if (a == NULL) { return 0; } if (a->type & V_ASN1_NEG) { if (BIO_write(bp, "-", 1) != 1) { goto err; } n = 1; } if (a->length == 0) { if (BIO_write(bp, "00", 2) != 2) { goto err; } n += 2; } else { for (i = 0; i < a->length; i++) { if ((i != 0) && (i % 35 == 0)) { if (BIO_write(bp, "\\\n", 2) != 2) { goto err; } n += 2; } buf[0] = h[((unsigned char)a->data[i] >> 4) & 0x0f]; buf[1] = h[((unsigned char)a->data[i]) & 0x0f]; if (BIO_write(bp, buf, 2) != 2) { goto err; } n += 2; } } return n; err: return -1; } int i2a_ASN1_ENUMERATED(BIO *bp, const ASN1_ENUMERATED *a) { return i2a_ASN1_INTEGER(bp, a); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/f_string.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include int i2a_ASN1_STRING(BIO *bp, const ASN1_STRING *a, int type) { int i, n = 0; static const char *h = "0123456789ABCDEF"; char buf[2]; if (a == NULL) { return 0; } if (a->length == 0) { if (BIO_write(bp, "0", 1) != 1) { goto err; } n = 1; } else { for (i = 0; i < a->length; i++) { if ((i != 0) && (i % 35 == 0)) { if (BIO_write(bp, "\\\n", 2) != 2) { goto err; } n += 2; } buf[0] = h[((unsigned char)a->data[i] >> 4) & 0x0f]; buf[1] = h[((unsigned char)a->data[i]) & 0x0f]; if (BIO_write(bp, buf, 2) != 2) { goto err; } n += 2; } } return n; err: return -1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/internal.h ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ASN1_INTERNAL_H #define OPENSSL_HEADER_ASN1_INTERNAL_H #include #include #include #if defined(__cplusplus) extern "C" { #endif // Wrapper functions for time functions. // OPENSSL_gmtime converts a time_t value in |time| which must be in the range // of year 0000 to 9999 to a broken out time value in |tm|. On success |tm| is // returned. On failure NULL is returned. OPENSSL_EXPORT struct tm *OPENSSL_gmtime(const time_t *time, struct tm *result); // OPENSSL_gmtime_adj returns one on success, and updates |tm| by adding // |offset_day| days and |offset_sec| seconds. It returns zero on failure. |tm| // must be in the range of year 0000 to 9999 both before and after the update or // a failure will be returned. OPENSSL_EXPORT int OPENSSL_gmtime_adj(struct tm *tm, int offset_day, int64_t offset_sec); // OPENSSL_gmtime_diff calculates the difference between |from| and |to|. It // returns one, and outputs the difference as a number of days and seconds in // |*out_days| and |*out_secs| on success. It returns zero on failure. Both // |from| and |to| must be in the range of year 0000 to 9999 or a failure will // be returned. OPENSSL_EXPORT int OPENSSL_gmtime_diff(int *out_days, int *out_secs, const struct tm *from, const struct tm *to); // Internal ASN1 structures and functions: not for application use // These are used internally in the ASN1_OBJECT to keep track of // whether the names and data need to be free()ed #define ASN1_OBJECT_FLAG_DYNAMIC 0x01 // internal use #define ASN1_OBJECT_FLAG_DYNAMIC_STRINGS 0x04 // internal use #define ASN1_OBJECT_FLAG_DYNAMIC_DATA 0x08 // internal use // An asn1_object_st (aka |ASN1_OBJECT|) represents an ASN.1 OBJECT IDENTIFIER. // Note: Mutating an |ASN1_OBJECT| is only permitted when initializing it. The // library maintains a table of static |ASN1_OBJECT|s, which may be referenced // by non-const |ASN1_OBJECT| pointers. Code which receives an |ASN1_OBJECT| // pointer externally must assume it is immutable, even if the pointer is not // const. struct asn1_object_st { const char *sn, *ln; int nid; int length; const unsigned char *data; // data remains const after init int flags; // Should we free this one }; ASN1_OBJECT *ASN1_OBJECT_new(void); // ASN1_ENCODING is used to save the received encoding of an ASN.1 type. This // avoids problems with invalid encodings that break signatures. typedef struct ASN1_ENCODING_st { // enc is the saved DER encoding. Its ownership is determined by |buf|. uint8_t *enc; // len is the length of |enc|. If zero, there is no saved encoding. size_t len; // buf, if non-NULL, is the |CRYPTO_BUFFER| that |enc| points into. If NULL, // |enc| must be released with |OPENSSL_free|. CRYPTO_BUFFER *buf; } ASN1_ENCODING; OPENSSL_EXPORT int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d, int allow_timezone_offset); OPENSSL_EXPORT int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d); int ASN1_item_ex_new(ASN1_VALUE **pval, const ASN1_ITEM *it); void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it); void ASN1_template_free(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); // ASN1_item_ex_d2i parses |len| bytes from |*in| as a structure of type |it| // and writes the result to |*pval|. If |tag| is non-negative, |it| is // implicitly tagged with the tag specified by |tag| and |aclass|. If |opt| is // non-zero, the value is optional. If |buf| is non-NULL, |*in| must point into // |buf|. // // This function returns one and advances |*in| if an object was successfully // parsed, -1 if an optional value was successfully skipped, and zero on error. int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt, CRYPTO_BUFFER *buf); // ASN1_item_ex_i2d encodes |*pval| as a value of type |it| to |out| under the // i2d output convention. It returns a non-zero length on success and -1 on // error. If |tag| is -1. the tag and class come from |it|. Otherwise, the tag // number is |tag| and the class is |aclass|. This is used for implicit tagging. // This function treats a missing value as an error, not an optional field. int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass); void ASN1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it); // asn1_get_choice_selector returns the CHOICE selector value for |*pval|, which // must of type |it|. int asn1_get_choice_selector(ASN1_VALUE **pval, const ASN1_ITEM *it); int asn1_set_choice_selector(ASN1_VALUE **pval, int value, const ASN1_ITEM *it); // asn1_get_field_ptr returns a pointer to the field in |*pval| corresponding to // |tt|. ASN1_VALUE **asn1_get_field_ptr(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); // asn1_do_adb returns the |ASN1_TEMPLATE| for the ANY DEFINED BY field |tt|, // based on the selector INTEGER or OID in |*pval|. If |tt| is not an ADB field, // it returns |tt|. If the selector does not match any value, it returns NULL. // If |nullerr| is non-zero, it will additionally push an error to the error // queue when there is no match. const ASN1_TEMPLATE *asn1_do_adb(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt, int nullerr); void asn1_refcount_set_one(ASN1_VALUE **pval, const ASN1_ITEM *it); int asn1_refcount_dec_and_test_zero(ASN1_VALUE **pval, const ASN1_ITEM *it); void asn1_enc_init(ASN1_VALUE **pval, const ASN1_ITEM *it); void asn1_enc_free(ASN1_VALUE **pval, const ASN1_ITEM *it); // asn1_enc_restore, if |*pval| has a saved encoding, writes it to |out| under // the i2d output convention, sets |*len| to the length, and returns one. If it // has no saved encoding, it returns zero. int asn1_enc_restore(int *len, unsigned char **out, ASN1_VALUE **pval, const ASN1_ITEM *it); // asn1_enc_save saves |inlen| bytes from |in| as |*pval|'s saved encoding. It // returns one on success and zero on error. If |buf| is non-NULL, |in| must // point into |buf|. int asn1_enc_save(ASN1_VALUE **pval, const uint8_t *in, size_t inlen, const ASN1_ITEM *it, CRYPTO_BUFFER *buf); // asn1_encoding_clear clears the cached encoding in |enc|. void asn1_encoding_clear(ASN1_ENCODING *enc); // asn1_type_value_as_pointer returns |a|'s value in pointer form. This is // usually the value object but, for BOOLEAN values, is 0 or 0xff cast to // a pointer. const void *asn1_type_value_as_pointer(const ASN1_TYPE *a); // asn1_type_set0_string sets |a|'s value to the object represented by |str| and // takes ownership of |str|. void asn1_type_set0_string(ASN1_TYPE *a, ASN1_STRING *str); // asn1_type_cleanup releases memory associated with |a|'s value, without // freeing |a| itself. void asn1_type_cleanup(ASN1_TYPE *a); // asn1_is_printable returns one if |value| is a valid Unicode codepoint for an // ASN.1 PrintableString, and zero otherwise. int asn1_is_printable(uint32_t value); // asn1_bit_string_length returns the number of bytes in |str| and sets // |*out_padding_bits| to the number of padding bits. // // This function should be used instead of |ASN1_STRING_length| to correctly // handle the non-|ASN1_STRING_FLAG_BITS_LEFT| case. int asn1_bit_string_length(const ASN1_BIT_STRING *str, uint8_t *out_padding_bits); typedef struct { int nid; long minsize; long maxsize; unsigned long mask; unsigned long flags; } ASN1_STRING_TABLE; // asn1_get_string_table_for_testing sets |*out_ptr| and |*out_len| to the table // of built-in |ASN1_STRING_TABLE| values. It is exported for testing. OPENSSL_EXPORT void asn1_get_string_table_for_testing( const ASN1_STRING_TABLE **out_ptr, size_t *out_len); typedef ASN1_VALUE *ASN1_new_func(void); typedef void ASN1_free_func(ASN1_VALUE *a); typedef ASN1_VALUE *ASN1_d2i_func(ASN1_VALUE **a, const unsigned char **in, long length); typedef int ASN1_i2d_func(ASN1_VALUE *a, unsigned char **in); typedef int ASN1_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int opt, ASN1_TLC *ctx); typedef int ASN1_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it); typedef int ASN1_ex_new_func(ASN1_VALUE **pval, const ASN1_ITEM *it); typedef void ASN1_ex_free_func(ASN1_VALUE **pval, const ASN1_ITEM *it); typedef struct ASN1_EXTERN_FUNCS_st { ASN1_ex_new_func *asn1_ex_new; ASN1_ex_free_func *asn1_ex_free; ASN1_ex_d2i *asn1_ex_d2i; ASN1_ex_i2d *asn1_ex_i2d; } ASN1_EXTERN_FUNCS; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_ASN1_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/posix_time.cc ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Time conversion to/from POSIX time_t and struct tm, with no support // for time zones other than UTC #include #include #include #include #include #include #include "internal.h" #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (INT64_C(24) * SECS_PER_HOUR) // Is a year/month/day combination valid, in the range from year 0000 // to 9999? static int is_valid_date(int64_t year, int64_t month, int64_t day) { if (day < 1 || month < 1 || year < 0 || year > 9999) { return 0; } switch (month) { case 1: case 3: case 5: case 7: case 8: case 10: case 12: return day > 0 && day <= 31; case 4: case 6: case 9: case 11: return day > 0 && day <= 30; case 2: if ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0) { return day > 0 && day <= 29; } else { return day > 0 && day <= 28; } default: return 0; } } // Is a time valid? Leap seconds of 60 are not considered valid, as // the POSIX time in seconds does not include them. static int is_valid_time(int64_t hours, int64_t minutes, int64_t seconds) { if (hours < 0 || minutes < 0 || seconds < 0 || hours > 23 || minutes > 59 || seconds > 59) { return 0; } return 1; } // 0000-01-01 00:00:00 UTC #define MIN_POSIX_TIME INT64_C(-62167219200) // 9999-12-31 23:59:59 UTC #define MAX_POSIX_TIME INT64_C(253402300799) // Is an int64 time within our expected range? static int is_valid_posix_time(int64_t time) { return MIN_POSIX_TIME <= time && time <= MAX_POSIX_TIME; } // Inspired by algorithms presented in // https://howardhinnant.github.io/date_algorithms.html // (Public Domain) static int posix_time_from_utc(int64_t year, int64_t month, int64_t day, int64_t hours, int64_t minutes, int64_t seconds, int64_t *out_time) { if (!is_valid_date(year, month, day) || !is_valid_time(hours, minutes, seconds)) { return 0; } if (month <= 2) { year--; // Start years on Mar 1, so leap days always finish a year. } // At this point year will be in the range -1 and 9999. assert(-1 <= year && year <= 9999); int64_t era = (year >= 0 ? year : year - 399) / 400; int64_t year_of_era = year - era * 400; int64_t day_of_year = (153 * (month > 2 ? month - 3 : month + 9) + 2) / 5 + day - 1; int64_t day_of_era = year_of_era * 365 + year_of_era / 4 - year_of_era / 100 + day_of_year; int64_t posix_days = era * 146097 + day_of_era - 719468; *out_time = posix_days * SECS_PER_DAY + hours * SECS_PER_HOUR + minutes * 60 + seconds; return 1; } // Inspired by algorithms presented in // https://howardhinnant.github.io/date_algorithms.html // (Public Domain) static int utc_from_posix_time(int64_t time, int *out_year, int *out_month, int *out_day, int *out_hours, int *out_minutes, int *out_seconds) { if (!is_valid_posix_time(time)) { return 0; } int64_t days = time / SECS_PER_DAY; int64_t leftover_seconds = time % SECS_PER_DAY; if (leftover_seconds < 0) { days--; leftover_seconds += SECS_PER_DAY; } days += 719468; // Shift to starting epoch of Mar 1 0000. // At this point, days will be in the range -61 and 3652364. assert(-61 <= days && days <= 3652364); int64_t era = (days > 0 ? days : days - 146096) / 146097; int64_t day_of_era = days - era * 146097; int64_t year_of_era = (day_of_era - day_of_era / 1460 + day_of_era / 36524 - day_of_era / 146096) / 365; *out_year = (int)(year_of_era + era * 400); // Year starting on Mar 1. int64_t day_of_year = day_of_era - (365 * year_of_era + year_of_era / 4 - year_of_era / 100); int64_t month_of_year = (5 * day_of_year + 2) / 153; *out_month = (int)(month_of_year < 10 ? month_of_year + 3 : month_of_year - 9); if (*out_month <= 2) { (*out_year)++; // Adjust year back to Jan 1 start of year. } *out_day = (int)(day_of_year - (153 * month_of_year + 2) / 5 + 1); *out_hours = (int)(leftover_seconds / SECS_PER_HOUR); leftover_seconds %= SECS_PER_HOUR; *out_minutes = (int)(leftover_seconds / 60); *out_seconds = (int)(leftover_seconds % 60); return 1; } int OPENSSL_tm_to_posix(const struct tm *tm, int64_t *out) { return posix_time_from_utc(tm->tm_year + INT64_C(1900), tm->tm_mon + INT64_C(1), tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, out); } int OPENSSL_posix_to_tm(int64_t time, struct tm *out_tm) { struct tm tmp_tm = {}; if (!utc_from_posix_time(time, &tmp_tm.tm_year, &tmp_tm.tm_mon, &tmp_tm.tm_mday, &tmp_tm.tm_hour, &tmp_tm.tm_min, &tmp_tm.tm_sec)) { return 0; } tmp_tm.tm_year -= 1900; tmp_tm.tm_mon -= 1; *out_tm = tmp_tm; return 1; } int OPENSSL_timegm(const struct tm *tm, time_t *out) { static_assert( sizeof(time_t) == sizeof(int32_t) || sizeof(time_t) == sizeof(int64_t), "time_t is broken"); int64_t posix_time; if (!OPENSSL_tm_to_posix(tm, &posix_time)) { return 0; } if (sizeof(time_t) == sizeof(int32_t) && (posix_time > INT32_MAX || posix_time < INT32_MIN)) { return 0; } *out = (time_t)posix_time; return 1; } struct tm *OPENSSL_gmtime(const time_t *time, struct tm *out_tm) { static_assert( sizeof(time_t) == sizeof(int32_t) || sizeof(time_t) == sizeof(int64_t), "time_t is broken"); int64_t posix_time = *time; if (!OPENSSL_posix_to_tm(posix_time, out_tm)) { return NULL; } return out_tm; } int OPENSSL_gmtime_adj(struct tm *tm, int offset_day, int64_t offset_sec) { int64_t posix_time; if (!OPENSSL_tm_to_posix(tm, &posix_time)) { return 0; } static_assert(INT_MAX <= INT64_MAX / SECS_PER_DAY, "day offset in seconds cannot overflow"); static_assert(MAX_POSIX_TIME <= INT64_MAX - INT_MAX * SECS_PER_DAY, "addition cannot overflow"); static_assert(MIN_POSIX_TIME >= INT64_MIN - INT_MIN * SECS_PER_DAY, "addition cannot underflow"); posix_time += offset_day * SECS_PER_DAY; if (posix_time > 0 && offset_sec > INT64_MAX - posix_time) { return 0; } if (posix_time < 0 && offset_sec < INT64_MIN - posix_time) { return 0; } posix_time += offset_sec; if (!OPENSSL_posix_to_tm(posix_time, tm)) { return 0; } return 1; } int OPENSSL_gmtime_diff(int *out_days, int *out_secs, const struct tm *from, const struct tm *to) { int64_t time_to, time_from; if (!OPENSSL_tm_to_posix(to, &time_to) || !OPENSSL_tm_to_posix(from, &time_from)) { return 0; } // Times are in range, so these calculations can not overflow. static_assert(SECS_PER_DAY <= INT_MAX, "seconds per day does not fit in int"); static_assert((MAX_POSIX_TIME - MIN_POSIX_TIME) / SECS_PER_DAY <= INT_MAX, "range of valid POSIX times, in days, does not fit in int"); int64_t timediff = time_to - time_from; int64_t daydiff = timediff / SECS_PER_DAY; timediff %= SECS_PER_DAY; *out_secs = (int)timediff; *out_days = (int)daydiff; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_dec.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../internal.h" #include "internal.h" // Constructed types with a recursive definition (such as can be found in PKCS7) // could eventually exceed the stack given malicious input with excessive // recursion. Therefore we limit the stack depth. This is the maximum number of // recursive invocations of asn1_item_embed_d2i(). #define ASN1_MAX_CONSTRUCTED_NEST 30 static int asn1_check_tlen(long *olen, int *otag, unsigned char *oclass, char *cst, const unsigned char **in, long len, int exptag, int expclass, char opt); static int asn1_template_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_TEMPLATE *tt, char opt, CRYPTO_BUFFER *buf, int depth); static int asn1_template_noexp_d2i(ASN1_VALUE **val, const unsigned char **in, long len, const ASN1_TEMPLATE *tt, char opt, CRYPTO_BUFFER *buf, int depth); static int asn1_ex_c2i(ASN1_VALUE **pval, const unsigned char *cont, long len, int utype, const ASN1_ITEM *it); static int asn1_d2i_ex_primitive(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt); static int asn1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt, CRYPTO_BUFFER *buf, int depth); // Table to convert tags to bit values, used for MSTRING type static const unsigned long tag2bit[31] = { 0, // (reserved) 0, // BOOLEAN 0, // INTEGER B_ASN1_BIT_STRING, B_ASN1_OCTET_STRING, 0, // NULL 0, // OBJECT IDENTIFIER B_ASN1_UNKNOWN, // ObjectDescriptor B_ASN1_UNKNOWN, // EXTERNAL B_ASN1_UNKNOWN, // REAL B_ASN1_UNKNOWN, // ENUMERATED B_ASN1_UNKNOWN, // EMBEDDED PDV B_ASN1_UTF8STRING, B_ASN1_UNKNOWN, // RELATIVE-OID B_ASN1_UNKNOWN, // TIME B_ASN1_UNKNOWN, // (reserved) B_ASN1_SEQUENCE, 0, // SET B_ASN1_NUMERICSTRING, B_ASN1_PRINTABLESTRING, B_ASN1_T61STRING, B_ASN1_VIDEOTEXSTRING, B_ASN1_IA5STRING, B_ASN1_UTCTIME, B_ASN1_GENERALIZEDTIME, B_ASN1_GRAPHICSTRING, B_ASN1_ISO64STRING, B_ASN1_GENERALSTRING, B_ASN1_UNIVERSALSTRING, B_ASN1_UNKNOWN, // CHARACTER STRING B_ASN1_BMPSTRING, }; unsigned long ASN1_tag2bit(int tag) { if (tag < 0 || tag > 30) { return 0; } return tag2bit[tag]; } static int is_supported_universal_type(int tag, int aclass) { if (aclass != V_ASN1_UNIVERSAL) { return 0; } return tag == V_ASN1_OBJECT || tag == V_ASN1_NULL || tag == V_ASN1_BOOLEAN || tag == V_ASN1_BIT_STRING || tag == V_ASN1_INTEGER || tag == V_ASN1_ENUMERATED || tag == V_ASN1_OCTET_STRING || tag == V_ASN1_NUMERICSTRING || tag == V_ASN1_PRINTABLESTRING || tag == V_ASN1_T61STRING || tag == V_ASN1_VIDEOTEXSTRING || tag == V_ASN1_IA5STRING || tag == V_ASN1_UTCTIME || tag == V_ASN1_GENERALIZEDTIME || tag == V_ASN1_GRAPHICSTRING || tag == V_ASN1_VISIBLESTRING || tag == V_ASN1_GENERALSTRING || tag == V_ASN1_UNIVERSALSTRING || tag == V_ASN1_BMPSTRING || tag == V_ASN1_UTF8STRING || tag == V_ASN1_SET || tag == V_ASN1_SEQUENCE; } // Macro to initialize and invalidate the cache // Decode an ASN1 item, this currently behaves just like a standard 'd2i' // function. 'in' points to a buffer to read the data from, in future we // will have more advanced versions that can input data a piece at a time and // this will simply be a special case. ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it) { ASN1_VALUE *ret = NULL; if (asn1_item_ex_d2i(&ret, in, len, it, /*tag=*/-1, /*aclass=*/0, /*opt=*/0, /*buf=*/NULL, /*depth=*/0) <= 0) { // Clean up, in case the caller left a partial object. // // TODO(davidben): I don't think it can leave one, but the codepaths below // are a bit inconsistent. Revisit this when rewriting this function. ASN1_item_ex_free(&ret, it); } // If the caller supplied an output pointer, free the old one and replace it // with |ret|. This differs from OpenSSL slightly in that we don't support // object reuse. We run this on both success and failure. On failure, even // with object reuse, OpenSSL destroys the previous object. if (pval != NULL) { ASN1_item_ex_free(pval, it); *pval = ret; } return ret; } // Decode an item, taking care of IMPLICIT tagging, if any. If 'opt' set and // tag mismatch return -1 to handle OPTIONAL // // TODO(davidben): Historically, all functions in this file had to account for // |*pval| containing an arbitrary existing value. This is no longer the case // because |ASN1_item_d2i| now always starts from NULL. As part of rewriting // this function, take the simplified assumptions into account. Though we must // still account for the internal calls to |ASN1_item_ex_new|. static int asn1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt, CRYPTO_BUFFER *buf, int depth) { const ASN1_TEMPLATE *tt, *errtt = NULL; const unsigned char *p = NULL, *q; unsigned char oclass; char cst, isopt; int i; int otag; int ret = 0; ASN1_VALUE **pchptr; if (!pval) { return 0; } if (buf != NULL) { assert(CRYPTO_BUFFER_data(buf) <= *in && *in + len <= CRYPTO_BUFFER_data(buf) + CRYPTO_BUFFER_len(buf)); } // Bound |len| to comfortably fit in an int. Lengths in this module often // switch between int and long without overflow checks. if (len > INT_MAX / 2) { len = INT_MAX / 2; } if (++depth > ASN1_MAX_CONSTRUCTED_NEST) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_TOO_DEEP); goto err; } switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) { // tagging or OPTIONAL is currently illegal on an item template // because the flags can't get passed down. In practice this // isn't a problem: we include the relevant flags from the item // template in the template itself. if ((tag != -1) || opt) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE); goto err; } return asn1_template_ex_d2i(pval, in, len, it->templates, opt, buf, depth); } return asn1_d2i_ex_primitive(pval, in, len, it, tag, aclass, opt); break; case ASN1_ITYPE_MSTRING: // It never makes sense for multi-strings to have implicit tagging, so // if tag != -1, then this looks like an error in the template. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); goto err; } p = *in; // Just read in tag and class ret = asn1_check_tlen(NULL, &otag, &oclass, NULL, &p, len, -1, 0, 1); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } // Must be UNIVERSAL class if (oclass != V_ASN1_UNIVERSAL) { // If OPTIONAL, assume this is OK if (opt) { return -1; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_MSTRING_NOT_UNIVERSAL); goto err; } // Check tag matches bit map if (!(ASN1_tag2bit(otag) & it->utype)) { // If OPTIONAL, assume this is OK if (opt) { return -1; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_MSTRING_WRONG_TAG); goto err; } return asn1_d2i_ex_primitive(pval, in, len, it, otag, 0, 0); case ASN1_ITYPE_EXTERN: { // We don't support implicit tagging with external types. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); goto err; } const ASN1_EXTERN_FUNCS *ef = reinterpret_cast(it->funcs); return ef->asn1_ex_d2i(pval, in, len, it, opt, NULL); } case ASN1_ITYPE_CHOICE: { // It never makes sense for CHOICE types to have implicit tagging, so if // tag != -1, then this looks like an error in the template. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); goto err; } const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb && !asn1_cb(ASN1_OP_D2I_PRE, pval, it, NULL)) { goto auxerr; } if (*pval) { // Free up and zero CHOICE value if initialised i = asn1_get_choice_selector(pval, it); if ((i >= 0) && (i < it->tcount)) { tt = it->templates + i; pchptr = asn1_get_field_ptr(pval, tt); ASN1_template_free(pchptr, tt); asn1_set_choice_selector(pval, -1, it); } } else if (!ASN1_item_ex_new(pval, it)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } // CHOICE type, try each possibility in turn p = *in; for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) { pchptr = asn1_get_field_ptr(pval, tt); // We mark field as OPTIONAL so its absence can be recognised. ret = asn1_template_ex_d2i(pchptr, &p, len, tt, 1, buf, depth); // If field not present, try the next one if (ret == -1) { continue; } // If positive return, read OK, break loop if (ret > 0) { break; } // Otherwise must be an ASN1 parsing error errtt = tt; OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } // Did we fall off the end without reading anything? if (i == it->tcount) { // If OPTIONAL, this is OK if (opt) { // Free and zero it ASN1_item_ex_free(pval, it); return -1; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_NO_MATCHING_CHOICE_TYPE); goto err; } asn1_set_choice_selector(pval, i, it); if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL)) { goto auxerr; } *in = p; return 1; } case ASN1_ITYPE_SEQUENCE: { p = *in; // If no IMPLICIT tagging set to SEQUENCE, UNIVERSAL if (tag == -1) { tag = V_ASN1_SEQUENCE; aclass = V_ASN1_UNIVERSAL; } // Get SEQUENCE length and update len, p ret = asn1_check_tlen(&len, NULL, NULL, &cst, &p, len, tag, aclass, opt); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } else if (ret == -1) { return -1; } if (!cst) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_SEQUENCE_NOT_CONSTRUCTED); goto err; } if (!*pval && !ASN1_item_ex_new(pval, it)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb && !asn1_cb(ASN1_OP_D2I_PRE, pval, it, NULL)) { goto auxerr; } // Free up and zero any ADB found for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) { if (tt->flags & ASN1_TFLG_ADB_MASK) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 0); if (seqtt == NULL) { continue; } pseqval = asn1_get_field_ptr(pval, seqtt); ASN1_template_free(pseqval, seqtt); } } // Get each field entry for (i = 0, tt = it->templates; i < it->tcount; i++, tt++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 1); if (seqtt == NULL) { goto err; } pseqval = asn1_get_field_ptr(pval, seqtt); // Have we ran out of data? if (!len) { break; } q = p; // This determines the OPTIONAL flag value. The field cannot be // omitted if it is the last of a SEQUENCE and there is still // data to be read. This isn't strictly necessary but it // increases efficiency in some cases. if (i == (it->tcount - 1)) { isopt = 0; } else { isopt = (seqtt->flags & ASN1_TFLG_OPTIONAL) != 0; } // attempt to read in field, allowing each to be OPTIONAL ret = asn1_template_ex_d2i(pseqval, &p, len, seqtt, isopt, buf, depth); if (!ret) { errtt = seqtt; goto err; } else if (ret == -1) { // OPTIONAL component absent. Free and zero the field. ASN1_template_free(pseqval, seqtt); continue; } // Update length len -= p - q; } // Check all data read if (len) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_SEQUENCE_LENGTH_MISMATCH); goto err; } // If we get here we've got no more data in the SEQUENCE, however we // may not have read all fields so check all remaining are OPTIONAL // and clear any that are. for (; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; seqtt = asn1_do_adb(pval, tt, 1); if (seqtt == NULL) { goto err; } if (seqtt->flags & ASN1_TFLG_OPTIONAL) { ASN1_VALUE **pseqval; pseqval = asn1_get_field_ptr(pval, seqtt); ASN1_template_free(pseqval, seqtt); } else { errtt = seqtt; OPENSSL_PUT_ERROR(ASN1, ASN1_R_FIELD_MISSING); goto err; } } // Save encoding if (!asn1_enc_save(pval, *in, p - *in, it, buf)) { goto auxerr; } if (asn1_cb && !asn1_cb(ASN1_OP_D2I_POST, pval, it, NULL)) { goto auxerr; } *in = p; return 1; } default: return 0; } auxerr: OPENSSL_PUT_ERROR(ASN1, ASN1_R_AUX_ERROR); err: ASN1_item_ex_free(pval, it); if (errtt) { ERR_add_error_data(4, "Field=", errtt->field_name, ", Type=", it->sname); } else { ERR_add_error_data(2, "Type=", it->sname); } return 0; } int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt, CRYPTO_BUFFER *buf) { return asn1_item_ex_d2i(pval, in, len, it, tag, aclass, opt, buf, /*depth=*/0); } // Templates are handled with two separate functions. One handles any // EXPLICIT tag and the other handles the rest. static int asn1_template_ex_d2i(ASN1_VALUE **val, const unsigned char **in, long inlen, const ASN1_TEMPLATE *tt, char opt, CRYPTO_BUFFER *buf, int depth) { int aclass; int ret; long len; const unsigned char *p, *q; if (!val) { return 0; } uint32_t flags = tt->flags; aclass = flags & ASN1_TFLG_TAG_CLASS; p = *in; // Check if EXPLICIT tag expected if (flags & ASN1_TFLG_EXPTAG) { char cst; // Need to work out amount of data available to the inner content and // where it starts: so read in EXPLICIT header to get the info. ret = asn1_check_tlen(&len, NULL, NULL, &cst, &p, inlen, tt->tag, aclass, opt); q = p; if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); return 0; } else if (ret == -1) { return -1; } if (!cst) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED); return 0; } // We've found the field so it can't be OPTIONAL now ret = asn1_template_noexp_d2i(val, &p, len, tt, /*opt=*/0, buf, depth); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); return 0; } // We read the field in OK so update length len -= p - q; // Check for trailing data. if (len) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_EXPLICIT_LENGTH_MISMATCH); goto err; } } else { return asn1_template_noexp_d2i(val, in, inlen, tt, opt, buf, depth); } *in = p; return 1; err: ASN1_template_free(val, tt); return 0; } static int asn1_template_noexp_d2i(ASN1_VALUE **val, const unsigned char **in, long len, const ASN1_TEMPLATE *tt, char opt, CRYPTO_BUFFER *buf, int depth) { int aclass; int ret; const unsigned char *p; if (!val) { return 0; } uint32_t flags = tt->flags; aclass = flags & ASN1_TFLG_TAG_CLASS; p = *in; if (flags & ASN1_TFLG_SK_MASK) { // SET OF, SEQUENCE OF int sktag, skaclass; // First work out expected inner tag value if (flags & ASN1_TFLG_IMPTAG) { sktag = tt->tag; skaclass = aclass; } else { skaclass = V_ASN1_UNIVERSAL; if (flags & ASN1_TFLG_SET_OF) { sktag = V_ASN1_SET; } else { sktag = V_ASN1_SEQUENCE; } } // Get the tag ret = asn1_check_tlen(&len, NULL, NULL, NULL, &p, len, sktag, skaclass, opt); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); return 0; } else if (ret == -1) { return -1; } if (!*val) { *val = (ASN1_VALUE *)sk_ASN1_VALUE_new_null(); } else { // We've got a valid STACK: free up any items present STACK_OF(ASN1_VALUE) *sktmp = (STACK_OF(ASN1_VALUE) *)*val; ASN1_VALUE *vtmp; while (sk_ASN1_VALUE_num(sktmp) > 0) { vtmp = sk_ASN1_VALUE_pop(sktmp); ASN1_item_ex_free(&vtmp, ASN1_ITEM_ptr(tt->item)); } } if (!*val) { goto err; } // Read as many items as we can while (len > 0) { ASN1_VALUE *skfield; const unsigned char *q = p; skfield = NULL; if (!asn1_item_ex_d2i(&skfield, &p, len, ASN1_ITEM_ptr(tt->item), /*tag=*/-1, /*aclass=*/0, /*opt=*/0, buf, depth)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } len -= p - q; if (!sk_ASN1_VALUE_push((STACK_OF(ASN1_VALUE) *)*val, skfield)) { ASN1_item_ex_free(&skfield, ASN1_ITEM_ptr(tt->item)); goto err; } } } else if (flags & ASN1_TFLG_IMPTAG) { // IMPLICIT tagging ret = asn1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item), tt->tag, aclass, opt, buf, depth); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } else if (ret == -1) { return -1; } } else { // Nothing special ret = asn1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item), /*tag=*/-1, /*aclass=*/0, opt, buf, depth); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); goto err; } else if (ret == -1) { return -1; } } *in = p; return 1; err: ASN1_template_free(val, tt); return 0; } static int asn1_d2i_ex_primitive(ASN1_VALUE **pval, const unsigned char **in, long inlen, const ASN1_ITEM *it, int tag, int aclass, char opt) { int ret = 0, utype; long plen; char cst; const unsigned char *p; const unsigned char *cont = NULL; long len; if (!pval) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_NULL); return 0; // Should never happen } if (it->itype == ASN1_ITYPE_MSTRING) { utype = tag; tag = -1; } else { utype = it->utype; } if (utype == V_ASN1_ANY) { // If type is ANY need to figure out type from tag unsigned char oclass; if (tag >= 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_TAGGED_ANY); return 0; } if (opt) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_OPTIONAL_ANY); return 0; } p = *in; ret = asn1_check_tlen(NULL, &utype, &oclass, NULL, &p, inlen, -1, 0, 0); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); return 0; } if (!is_supported_universal_type(utype, oclass)) { utype = V_ASN1_OTHER; } } if (tag == -1) { tag = utype; aclass = V_ASN1_UNIVERSAL; } p = *in; // Check header ret = asn1_check_tlen(&plen, NULL, NULL, &cst, &p, inlen, tag, aclass, opt); if (!ret) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NESTED_ASN1_ERROR); return 0; } else if (ret == -1) { return -1; } ret = 0; // SEQUENCE, SET and "OTHER" are left in encoded form if ((utype == V_ASN1_SEQUENCE) || (utype == V_ASN1_SET) || (utype == V_ASN1_OTHER)) { // SEQUENCE and SET must be constructed if (utype != V_ASN1_OTHER && !cst) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TYPE_NOT_CONSTRUCTED); return 0; } cont = *in; len = p - cont + plen; p += plen; } else if (cst) { // This parser historically supported BER constructed strings. We no // longer do and will gradually tighten this parser into a DER // parser. BER types should use |CBS_asn1_ber_to_der|. OPENSSL_PUT_ERROR(ASN1, ASN1_R_TYPE_NOT_PRIMITIVE); return 0; } else { cont = p; len = plen; p += plen; } // We now have content length and type: translate into a structure if (!asn1_ex_c2i(pval, cont, len, utype, it)) { goto err; } *in = p; ret = 1; err: return ret; } // Translate ASN1 content octets into a structure static int asn1_ex_c2i(ASN1_VALUE **pval, const unsigned char *cont, long len, int utype, const ASN1_ITEM *it) { ASN1_VALUE **opval = NULL; ASN1_STRING *stmp; ASN1_TYPE *typ = NULL; int ret = 0; ASN1_INTEGER **tint; // Historically, |it->funcs| for primitive types contained an // |ASN1_PRIMITIVE_FUNCS| table of callbacks. assert(it->funcs == NULL); // If ANY type clear type and set pointer to internal value if (it->utype == V_ASN1_ANY) { if (!*pval) { typ = ASN1_TYPE_new(); if (typ == NULL) { goto err; } *pval = (ASN1_VALUE *)typ; } else { typ = (ASN1_TYPE *)*pval; } if (utype != typ->type) { ASN1_TYPE_set(typ, utype, NULL); } opval = pval; pval = &typ->value.asn1_value; } switch (utype) { case V_ASN1_OBJECT: if (!c2i_ASN1_OBJECT((ASN1_OBJECT **)pval, &cont, len)) { goto err; } break; case V_ASN1_NULL: if (len) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NULL_IS_WRONG_LENGTH); goto err; } *pval = (ASN1_VALUE *)1; break; case V_ASN1_BOOLEAN: if (len != 1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BOOLEAN_IS_WRONG_LENGTH); goto err; } else { ASN1_BOOLEAN *tbool; tbool = (ASN1_BOOLEAN *)pval; *tbool = *cont; } break; case V_ASN1_BIT_STRING: if (!c2i_ASN1_BIT_STRING((ASN1_BIT_STRING **)pval, &cont, len)) { goto err; } break; case V_ASN1_INTEGER: case V_ASN1_ENUMERATED: tint = (ASN1_INTEGER **)pval; if (!c2i_ASN1_INTEGER(tint, &cont, len)) { goto err; } // Fixup type to match the expected form (*tint)->type = utype | ((*tint)->type & V_ASN1_NEG); break; case V_ASN1_OCTET_STRING: case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_VIDEOTEXSTRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_GRAPHICSTRING: case V_ASN1_VISIBLESTRING: case V_ASN1_GENERALSTRING: case V_ASN1_UNIVERSALSTRING: case V_ASN1_BMPSTRING: case V_ASN1_UTF8STRING: case V_ASN1_OTHER: case V_ASN1_SET: case V_ASN1_SEQUENCE: // TODO(crbug.com/boringssl/412): This default case should be removed, now // that we've resolved https://crbug.com/boringssl/561. However, it is still // needed to support some edge cases in |ASN1_PRINTABLE|. |ASN1_PRINTABLE| // broadly doesn't tolerate unrecognized universal tags, but except for // eight values that map to |B_ASN1_UNKNOWN| instead of zero. See the // X509Test.NameAttributeValues test. default: { CBS cbs; CBS_init(&cbs, cont, (size_t)len); if (utype == V_ASN1_BMPSTRING) { while (CBS_len(&cbs) != 0) { uint32_t c; if (!CBS_get_ucs2_be(&cbs, &c)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_BMPSTRING); goto err; } } } if (utype == V_ASN1_UNIVERSALSTRING) { while (CBS_len(&cbs) != 0) { uint32_t c; if (!CBS_get_utf32_be(&cbs, &c)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_UNIVERSALSTRING); goto err; } } } if (utype == V_ASN1_UTF8STRING) { while (CBS_len(&cbs) != 0) { uint32_t c; if (!CBS_get_utf8(&cbs, &c)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_UTF8STRING); goto err; } } } if (utype == V_ASN1_UTCTIME) { if (!CBS_parse_utc_time(&cbs, NULL, /*allow_timezone_offset=*/1)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_TIME_FORMAT); goto err; } } if (utype == V_ASN1_GENERALIZEDTIME) { if (!CBS_parse_generalized_time(&cbs, NULL, /*allow_timezone_offset=*/0)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_TIME_FORMAT); goto err; } } // TODO(https://crbug.com/boringssl/427): Check other string types. // All based on ASN1_STRING and handled the same if (!*pval) { stmp = ASN1_STRING_type_new(utype); if (!stmp) { goto err; } *pval = (ASN1_VALUE *)stmp; } else { stmp = (ASN1_STRING *)*pval; stmp->type = utype; } if (!ASN1_STRING_set(stmp, cont, len)) { ASN1_STRING_free(stmp); *pval = NULL; goto err; } break; } } // If ASN1_ANY and NULL type fix up value if (typ && (utype == V_ASN1_NULL)) { typ->value.ptr = NULL; } ret = 1; err: if (!ret) { ASN1_TYPE_free(typ); if (opval) { *opval = NULL; } } return ret; } // Check an ASN1 tag and length: a bit like ASN1_get_object but it // checks the expected tag. static int asn1_check_tlen(long *olen, int *otag, unsigned char *oclass, char *cst, const unsigned char **in, long len, int exptag, int expclass, char opt) { int i; int ptag, pclass; long plen; const unsigned char *p; p = *in; i = ASN1_get_object(&p, &plen, &ptag, &pclass, len); if (i & 0x80) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_OBJECT_HEADER); return 0; } if (exptag >= 0) { if ((exptag != ptag) || (expclass != pclass)) { // If type is OPTIONAL, not an error: indicate missing type. if (opt) { return -1; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TAG); return 0; } } if (cst) { *cst = i & V_ASN1_CONSTRUCTED; } if (olen) { *olen = plen; } if (oclass) { *oclass = pclass; } if (otag) { *otag = ptag; } *in = p; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_enc.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static int asn1_item_ex_i2d_opt(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass, int optional); static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass, int optional); static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cont, int *out_omit, int *putype, const ASN1_ITEM *it); static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort); static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int aclass, int optional); // Top level i2d equivalents int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it) { if (out && !*out) { unsigned char *p, *buf; int len = ASN1_item_ex_i2d(&val, NULL, it, /*tag=*/-1, /*aclass=*/0); if (len <= 0) { return len; } buf = reinterpret_cast(OPENSSL_malloc(len)); if (!buf) { return -1; } p = buf; int len2 = ASN1_item_ex_i2d(&val, &p, it, /*tag=*/-1, /*aclass=*/0); if (len2 <= 0) { OPENSSL_free(buf); return len2; } assert(len == len2); *out = buf; return len; } return ASN1_item_ex_i2d(&val, out, it, /*tag=*/-1, /*aclass=*/0); } // Encode an item, taking care of IMPLICIT tagging (if any). This function // performs the normal item handling: it can be used in external types. int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass) { int ret = asn1_item_ex_i2d_opt(pval, out, it, tag, aclass, /*optional=*/0); assert(ret != 0); return ret; } // asn1_item_ex_i2d_opt behaves like |ASN1_item_ex_i2d| but, if |optional| is // non-zero and |*pval| is omitted, it returns zero and writes no bytes. int asn1_item_ex_i2d_opt(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass, int optional) { const ASN1_TEMPLATE *tt = NULL; int i, seqcontlen, seqlen; // Historically, |aclass| was repurposed to pass additional flags into the // encoding process. assert((aclass & ASN1_TFLG_TAG_CLASS) == aclass); // If not overridding the tag, |aclass| is ignored and should be zero. assert(tag != -1 || aclass == 0); // All fields are pointers, except for boolean |ASN1_ITYPE_PRIMITIVE|s. // Optional primitives are handled later. if ((it->itype != ASN1_ITYPE_PRIMITIVE) && !*pval) { if (optional) { return 0; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_MISSING_VALUE); return -1; } switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) { // This is an |ASN1_ITEM_TEMPLATE|. if (it->templates->flags & ASN1_TFLG_OPTIONAL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } return asn1_template_ex_i2d(pval, out, it->templates, tag, aclass, optional); } return asn1_i2d_ex_primitive(pval, out, it, tag, aclass, optional); case ASN1_ITYPE_MSTRING: // It never makes sense for multi-strings to have implicit tagging, so // if tag != -1, then this looks like an error in the template. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } return asn1_i2d_ex_primitive(pval, out, it, -1, 0, optional); case ASN1_ITYPE_CHOICE: { // It never makes sense for CHOICE types to have implicit tagging, so if // tag != -1, then this looks like an error in the template. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } i = asn1_get_choice_selector(pval, it); if (i < 0 || i >= it->tcount) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NO_MATCHING_CHOICE_TYPE); return -1; } const ASN1_TEMPLATE *chtt = it->templates + i; if (chtt->flags & ASN1_TFLG_OPTIONAL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } ASN1_VALUE **pchval = asn1_get_field_ptr(pval, chtt); return asn1_template_ex_i2d(pchval, out, chtt, -1, 0, /*optional=*/0); } case ASN1_ITYPE_EXTERN: { // We don't support implicit tagging with external types. if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } const ASN1_EXTERN_FUNCS *ef = reinterpret_cast(it->funcs); int ret = ef->asn1_ex_i2d(pval, out, it); if (ret == 0) { // |asn1_ex_i2d| should never return zero. We have already checked // for optional values generically, and |ASN1_ITYPE_EXTERN| fields // must be pointers. OPENSSL_PUT_ERROR(ASN1, ERR_R_INTERNAL_ERROR); return -1; } return ret; } case ASN1_ITYPE_SEQUENCE: { i = asn1_enc_restore(&seqcontlen, out, pval, it); // An error occurred if (i < 0) { return -1; } // We have a valid cached encoding... if (i > 0) { return seqcontlen; } // Otherwise carry on seqcontlen = 0; // If no IMPLICIT tagging set to SEQUENCE, UNIVERSAL if (tag == -1) { tag = V_ASN1_SEQUENCE; aclass = V_ASN1_UNIVERSAL; } // First work out sequence content length for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; int tmplen; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) { return -1; } pseqval = asn1_get_field_ptr(pval, seqtt); tmplen = asn1_template_ex_i2d(pseqval, NULL, seqtt, -1, 0, /*optional=*/0); if (tmplen == -1 || (tmplen > INT_MAX - seqcontlen)) { return -1; } seqcontlen += tmplen; } seqlen = ASN1_object_size(/*constructed=*/1, seqcontlen, tag); if (!out || seqlen == -1) { return seqlen; } // Output SEQUENCE header ASN1_put_object(out, /*constructed=*/1, seqcontlen, tag, aclass); for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { const ASN1_TEMPLATE *seqtt; ASN1_VALUE **pseqval; seqtt = asn1_do_adb(pval, tt, 1); if (!seqtt) { return -1; } pseqval = asn1_get_field_ptr(pval, seqtt); if (asn1_template_ex_i2d(pseqval, out, seqtt, -1, 0, /*optional=*/0) < 0) { return -1; } } return seqlen; } default: OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } } // asn1_template_ex_i2d behaves like |asn1_item_ex_i2d_opt| but uses an // |ASN1_TEMPLATE| instead of an |ASN1_ITEM|. An |ASN1_TEMPLATE| wraps an // |ASN1_ITEM| with modifiers such as tagging, SEQUENCE or SET, etc. static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int iclass, int optional) { int i, ret, ttag, tclass; size_t j; uint32_t flags = tt->flags; // Historically, |iclass| was repurposed to pass additional flags into the // encoding process. assert((iclass & ASN1_TFLG_TAG_CLASS) == iclass); // If not overridding the tag, |iclass| is ignored and should be zero. assert(tag != -1 || iclass == 0); // Work out tag and class to use: tagging may come either from the // template or the arguments, not both because this would create // ambiguity. if (flags & ASN1_TFLG_TAG_MASK) { // Error if argument and template tagging if (tag != -1) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BAD_TEMPLATE); return -1; } // Get tagging from template ttag = tt->tag; tclass = flags & ASN1_TFLG_TAG_CLASS; } else if (tag != -1) { // No template tagging, get from arguments ttag = tag; tclass = iclass & ASN1_TFLG_TAG_CLASS; } else { ttag = -1; tclass = 0; } // The template may itself by marked as optional, or this may be the template // of an |ASN1_ITEM_TEMPLATE| type which was contained inside an outer // optional template. (They cannot both be true because the // |ASN1_ITEM_TEMPLATE| codepath rejects optional templates.) assert(!optional || (flags & ASN1_TFLG_OPTIONAL) == 0); optional = optional || (flags & ASN1_TFLG_OPTIONAL) != 0; // At this point 'ttag' contains the outer tag to use, and 'tclass' is the // class. if (flags & ASN1_TFLG_SK_MASK) { // SET OF, SEQUENCE OF STACK_OF(ASN1_VALUE) *sk = (STACK_OF(ASN1_VALUE) *)*pval; int isset, sktag, skaclass; int skcontlen, sklen; ASN1_VALUE *skitem; if (!*pval) { if (optional) { return 0; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_MISSING_VALUE); return -1; } if (flags & ASN1_TFLG_SET_OF) { isset = 1; // Historically, types with both bits set were mutated when // serialized to apply the sort. We no longer support this. assert((flags & ASN1_TFLG_SEQUENCE_OF) == 0); } else { isset = 0; } // Work out inner tag value: if EXPLICIT or no tagging use underlying // type. if ((ttag != -1) && !(flags & ASN1_TFLG_EXPTAG)) { sktag = ttag; skaclass = tclass; } else { skaclass = V_ASN1_UNIVERSAL; if (isset) { sktag = V_ASN1_SET; } else { sktag = V_ASN1_SEQUENCE; } } // Determine total length of items skcontlen = 0; for (j = 0; j < sk_ASN1_VALUE_num(sk); j++) { int tmplen; skitem = sk_ASN1_VALUE_value(sk, j); tmplen = ASN1_item_ex_i2d(&skitem, NULL, ASN1_ITEM_ptr(tt->item), -1, 0); if (tmplen == -1 || (skcontlen > INT_MAX - tmplen)) { return -1; } skcontlen += tmplen; } sklen = ASN1_object_size(/*constructed=*/1, skcontlen, sktag); if (sklen == -1) { return -1; } // If EXPLICIT need length of surrounding tag if (flags & ASN1_TFLG_EXPTAG) { ret = ASN1_object_size(/*constructed=*/1, sklen, ttag); } else { ret = sklen; } if (!out || ret == -1) { return ret; } // Now encode this lot... // EXPLICIT tag if (flags & ASN1_TFLG_EXPTAG) { ASN1_put_object(out, /*constructed=*/1, sklen, ttag, tclass); } // SET or SEQUENCE and IMPLICIT tag ASN1_put_object(out, /*constructed=*/1, skcontlen, sktag, skaclass); // And the stuff itself if (!asn1_set_seq_out(sk, out, skcontlen, ASN1_ITEM_ptr(tt->item), isset)) { return -1; } return ret; } if (flags & ASN1_TFLG_EXPTAG) { // EXPLICIT tagging // Find length of tagged item i = asn1_item_ex_i2d_opt(pval, NULL, ASN1_ITEM_ptr(tt->item), -1, 0, optional); if (i <= 0) { return i; } // Find length of EXPLICIT tag ret = ASN1_object_size(/*constructed=*/1, i, ttag); if (out && ret != -1) { // Output tag and item ASN1_put_object(out, /*constructed=*/1, i, ttag, tclass); if (ASN1_item_ex_i2d(pval, out, ASN1_ITEM_ptr(tt->item), -1, 0) < 0) { return -1; } } return ret; } // Either normal or IMPLICIT tagging return asn1_item_ex_i2d_opt(pval, out, ASN1_ITEM_ptr(tt->item), ttag, tclass, optional); } // Temporary structure used to hold DER encoding of items for SET OF typedef struct { unsigned char *data; int length; } DER_ENC; static int der_cmp(const void *a, const void *b) { const DER_ENC *d1 = reinterpret_cast(a), *d2 = reinterpret_cast(b); int cmplen, i; cmplen = (d1->length < d2->length) ? d1->length : d2->length; i = OPENSSL_memcmp(d1->data, d2->data, cmplen); if (i) { return i; } return d1->length - d2->length; } // asn1_set_seq_out writes |sk| to |out| under the i2d output convention, // excluding the tag and length. It returns one on success and zero on error. // |skcontlen| must be the total encoded size. If |do_sort| is non-zero, the // elements are sorted for a SET OF type. Each element of |sk| has type // |item|. static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out, int skcontlen, const ASN1_ITEM *item, int do_sort) { // No need to sort if there are fewer than two items. if (!do_sort || sk_ASN1_VALUE_num(sk) < 2) { for (size_t i = 0; i < sk_ASN1_VALUE_num(sk); i++) { ASN1_VALUE *skitem = sk_ASN1_VALUE_value(sk, i); if (ASN1_item_ex_i2d(&skitem, out, item, -1, 0) < 0) { return 0; } } return 1; } int ret = 0; uint8_t *const buf = reinterpret_cast(OPENSSL_malloc(skcontlen)); DER_ENC *encoded = reinterpret_cast( OPENSSL_calloc(sk_ASN1_VALUE_num(sk), sizeof(*encoded))); uint8_t *p = buf; if (encoded == NULL || buf == NULL) { goto err; } // Encode all the elements into |buf| and populate |encoded|. for (size_t i = 0; i < sk_ASN1_VALUE_num(sk); i++) { ASN1_VALUE *skitem = sk_ASN1_VALUE_value(sk, i); encoded[i].data = p; encoded[i].length = ASN1_item_ex_i2d(&skitem, &p, item, -1, 0); if (encoded[i].length < 0) { goto err; } assert(p - buf <= skcontlen); } qsort(encoded, sk_ASN1_VALUE_num(sk), sizeof(*encoded), der_cmp); // Output the elements in sorted order. p = *out; for (size_t i = 0; i < sk_ASN1_VALUE_num(sk); i++) { OPENSSL_memcpy(p, encoded[i].data, encoded[i].length); p += encoded[i].length; } *out = p; ret = 1; err: OPENSSL_free(encoded); OPENSSL_free(buf); return ret; } // asn1_i2d_ex_primitive behaves like |ASN1_item_ex_i2d| but |item| must be a // a PRIMITIVE or MSTRING type that is not an |ASN1_ITEM_TEMPLATE|. static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass, int optional) { // Get length of content octets and maybe find out the underlying type. int omit; int utype = it->utype; int len = asn1_ex_i2c(pval, NULL, &omit, &utype, it); if (len < 0) { return -1; } if (omit) { if (optional) { return 0; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_MISSING_VALUE); return -1; } // If SEQUENCE, SET or OTHER then header is included in pseudo content // octets so don't include tag+length. We need to check here because the // call to asn1_ex_i2c() could change utype. int usetag = utype != V_ASN1_SEQUENCE && utype != V_ASN1_SET && utype != V_ASN1_OTHER; // If not implicitly tagged get tag from underlying type if (tag == -1) { tag = utype; } // Output tag+length followed by content octets if (out) { if (usetag) { ASN1_put_object(out, /*constructed=*/0, len, tag, aclass); } int len2 = asn1_ex_i2c(pval, *out, &omit, &utype, it); if (len2 < 0) { return -1; } assert(len == len2); assert(!omit); *out += len; } if (usetag) { return ASN1_object_size(/*constructed=*/0, len, tag); } return len; } // asn1_ex_i2c writes the |*pval| to |cout| under the i2d output convention, // excluding the tag and length. It returns the number of bytes written, // possibly zero, on success or -1 on error. If |*pval| should be omitted, it // returns zero and sets |*out_omit| to true. // // If |it| is an MSTRING or ANY type, it gets the underlying type from |*pval|, // which must be an |ASN1_STRING| or |ASN1_TYPE|, respectively. It then updates // |*putype| with the tag number of type used, or |V_ASN1_OTHER| if it was not a // universal type. If |*putype| is set to |V_ASN1_SEQUENCE|, |V_ASN1_SET|, or // |V_ASN1_OTHER|, it additionally outputs the tag and length, so the caller // must not do so. // // Otherwise, |*putype| must contain |it->utype|. // // WARNING: Unlike most functions in this file, |asn1_ex_i2c| can return zero // without omitting the element. ASN.1 values may have empty contents. static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *out_omit, int *putype, const ASN1_ITEM *it) { ASN1_BOOLEAN *tbool = NULL; ASN1_STRING *strtmp; ASN1_OBJECT *otmp; int utype; const unsigned char *cont; unsigned char c; int len; // Historically, |it->funcs| for primitive types contained an // |ASN1_PRIMITIVE_FUNCS| table of callbacks. assert(it->funcs == NULL); *out_omit = 0; // Should type be omitted? if ((it->itype != ASN1_ITYPE_PRIMITIVE) || (it->utype != V_ASN1_BOOLEAN)) { if (!*pval) { *out_omit = 1; return 0; } } if (it->itype == ASN1_ITYPE_MSTRING) { // If MSTRING type set the underlying type strtmp = (ASN1_STRING *)*pval; utype = strtmp->type; if (utype < 0 && utype != V_ASN1_OTHER) { // MSTRINGs can have type -1 when default-constructed. OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); return -1; } // Negative INTEGER and ENUMERATED values use |ASN1_STRING| type values // that do not match their corresponding utype values. INTEGERs cannot // participate in MSTRING types, but ENUMERATEDs can. // // TODO(davidben): Is this a bug? Although arguably one of the MSTRING // types should contain more values, rather than less. See // https://crbug.com/boringssl/412. But it is not possible to fit all // possible ANY values into an |ASN1_STRING|, so matching the spec here // is somewhat hopeless. if (utype == V_ASN1_NEG_INTEGER) { utype = V_ASN1_INTEGER; } else if (utype == V_ASN1_NEG_ENUMERATED) { utype = V_ASN1_ENUMERATED; } *putype = utype; } else if (it->utype == V_ASN1_ANY) { // If ANY set type and pointer to value ASN1_TYPE *typ; typ = (ASN1_TYPE *)*pval; utype = typ->type; if (utype < 0 && utype != V_ASN1_OTHER) { // |ASN1_TYPE|s can have type -1 when default-constructed. OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); return -1; } *putype = utype; pval = &typ->value.asn1_value; } else { utype = *putype; } switch (utype) { case V_ASN1_OBJECT: otmp = (ASN1_OBJECT *)*pval; cont = otmp->data; len = otmp->length; if (len == 0) { // Some |ASN1_OBJECT|s do not have OIDs and cannot be serialized. OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_OBJECT); return -1; } break; case V_ASN1_NULL: cont = NULL; len = 0; break; case V_ASN1_BOOLEAN: tbool = (ASN1_BOOLEAN *)pval; if (*tbool == ASN1_BOOLEAN_NONE) { *out_omit = 1; return 0; } if (it->utype != V_ASN1_ANY) { // Default handling if value == size field then omit if ((*tbool && (it->size > 0)) || (!*tbool && !it->size)) { *out_omit = 1; return 0; } } c = *tbool ? 0xff : 0x00; cont = &c; len = 1; break; case V_ASN1_BIT_STRING: { int ret = i2c_ASN1_BIT_STRING((ASN1_BIT_STRING *)*pval, cout ? &cout : NULL); // |i2c_ASN1_BIT_STRING| returns zero on error instead of -1. return ret <= 0 ? -1 : ret; } case V_ASN1_INTEGER: case V_ASN1_ENUMERATED: { // |i2c_ASN1_INTEGER| also handles ENUMERATED. int ret = i2c_ASN1_INTEGER((ASN1_INTEGER *)*pval, cout ? &cout : NULL); // |i2c_ASN1_INTEGER| returns zero on error instead of -1. return ret <= 0 ? -1 : ret; } case V_ASN1_OCTET_STRING: case V_ASN1_NUMERICSTRING: case V_ASN1_PRINTABLESTRING: case V_ASN1_T61STRING: case V_ASN1_VIDEOTEXSTRING: case V_ASN1_IA5STRING: case V_ASN1_UTCTIME: case V_ASN1_GENERALIZEDTIME: case V_ASN1_GRAPHICSTRING: case V_ASN1_VISIBLESTRING: case V_ASN1_GENERALSTRING: case V_ASN1_UNIVERSALSTRING: case V_ASN1_BMPSTRING: case V_ASN1_UTF8STRING: case V_ASN1_SEQUENCE: case V_ASN1_SET: // This is not a valid |ASN1_ITEM| type, but it appears in |ASN1_TYPE|. case V_ASN1_OTHER: // TODO(crbug.com/boringssl/412): This default case should be removed, now // that we've resolved https://crbug.com/boringssl/561. However, it is still // needed to support some edge cases in |ASN1_PRINTABLE|. |ASN1_PRINTABLE| // broadly doesn't tolerate unrecognized universal tags, but except for // eight values that map to |B_ASN1_UNKNOWN| instead of zero. See the // X509Test.NameAttributeValues test. default: // All based on ASN1_STRING and handled the same strtmp = (ASN1_STRING *)*pval; cont = strtmp->data; len = strtmp->length; break; } if (cout && len) { OPENSSL_memcpy(cout, cont, len); } return len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_fre.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" // Free up an ASN1 structure void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it) { ASN1_item_ex_free(&val, it); } void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { if (!pval) { return; } if ((it->itype != ASN1_ITYPE_PRIMITIVE) && !*pval) { return; } switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) { ASN1_template_free(pval, it->templates); } else { ASN1_primitive_free(pval, it); } break; case ASN1_ITYPE_MSTRING: ASN1_primitive_free(pval, it); break; case ASN1_ITYPE_CHOICE: { const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { if (asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL) == 2) { return; } } int i = asn1_get_choice_selector(pval, it); if ((i >= 0) && (i < it->tcount)) { const ASN1_TEMPLATE *tt = it->templates + i; ASN1_VALUE **pchval = asn1_get_field_ptr(pval, tt); ASN1_template_free(pchval, tt); } if (asn1_cb) { asn1_cb(ASN1_OP_FREE_POST, pval, it, NULL); } OPENSSL_free(*pval); *pval = NULL; break; } case ASN1_ITYPE_EXTERN: { const ASN1_EXTERN_FUNCS *ef = reinterpret_cast(it->funcs); if (ef && ef->asn1_ex_free) { ef->asn1_ex_free(pval, it); } break; } case ASN1_ITYPE_SEQUENCE: { if (!asn1_refcount_dec_and_test_zero(pval, it)) { return; } const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { if (asn1_cb(ASN1_OP_FREE_PRE, pval, it, NULL) == 2) { return; } } asn1_enc_free(pval, it); // If we free up as normal we will invalidate any ANY DEFINED BY // field and we wont be able to determine the type of the field it // defines. So free up in reverse order. for (int i = it->tcount - 1; i >= 0; i--) { const ASN1_TEMPLATE *seqtt = asn1_do_adb(pval, &it->templates[i], 0); if (!seqtt) { continue; } ASN1_VALUE **pseqval = asn1_get_field_ptr(pval, seqtt); ASN1_template_free(pseqval, seqtt); } if (asn1_cb) { asn1_cb(ASN1_OP_FREE_POST, pval, it, NULL); } OPENSSL_free(*pval); *pval = NULL; break; } } } void ASN1_template_free(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt) { if (tt->flags & ASN1_TFLG_SK_MASK) { STACK_OF(ASN1_VALUE) *sk = (STACK_OF(ASN1_VALUE) *)*pval; for (size_t i = 0; i < sk_ASN1_VALUE_num(sk); i++) { ASN1_VALUE *vtmp = sk_ASN1_VALUE_value(sk, i); ASN1_item_ex_free(&vtmp, ASN1_ITEM_ptr(tt->item)); } sk_ASN1_VALUE_free(sk); *pval = NULL; } else { ASN1_item_ex_free(pval, ASN1_ITEM_ptr(tt->item)); } } void ASN1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { // Historically, |it->funcs| for primitive types contained an // |ASN1_PRIMITIVE_FUNCS| table of calbacks. assert(it->funcs == NULL); int utype = it->itype == ASN1_ITYPE_MSTRING ? -1 : it->utype; switch (utype) { case V_ASN1_OBJECT: ASN1_OBJECT_free((ASN1_OBJECT *)*pval); break; case V_ASN1_BOOLEAN: if (it) { *(ASN1_BOOLEAN *)pval = (ASN1_BOOLEAN)it->size; } else { *(ASN1_BOOLEAN *)pval = ASN1_BOOLEAN_NONE; } return; case V_ASN1_NULL: break; case V_ASN1_ANY: if (*pval != NULL) { asn1_type_cleanup((ASN1_TYPE *)*pval); OPENSSL_free(*pval); } break; default: ASN1_STRING_free((ASN1_STRING *)*pval); *pval = NULL; break; } *pval = NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_new.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static void asn1_item_clear(ASN1_VALUE **pval, const ASN1_ITEM *it); static int ASN1_template_new(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); static void asn1_template_clear(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); static int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it); static void asn1_primitive_clear(ASN1_VALUE **pval, const ASN1_ITEM *it); ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it) { ASN1_VALUE *ret = NULL; if (ASN1_item_ex_new(&ret, it) > 0) { return ret; } return NULL; } // Allocate an ASN1 structure int ASN1_item_ex_new(ASN1_VALUE **pval, const ASN1_ITEM *it) { const ASN1_TEMPLATE *tt = NULL; const ASN1_EXTERN_FUNCS *ef; ASN1_VALUE **pseqval; int i; switch (it->itype) { case ASN1_ITYPE_EXTERN: ef = reinterpret_cast(it->funcs); if (ef && ef->asn1_ex_new) { if (!ef->asn1_ex_new(pval, it)) { goto memerr; } } break; case ASN1_ITYPE_PRIMITIVE: if (it->templates) { if (!ASN1_template_new(pval, it->templates)) { goto memerr; } } else if (!ASN1_primitive_new(pval, it)) { goto memerr; } break; case ASN1_ITYPE_MSTRING: if (!ASN1_primitive_new(pval, it)) { goto memerr; } break; case ASN1_ITYPE_CHOICE: { const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { i = asn1_cb(ASN1_OP_NEW_PRE, pval, it, NULL); if (!i) { goto auxerr; } if (i == 2) { return 1; } } *pval = reinterpret_cast(OPENSSL_zalloc(it->size)); if (!*pval) { goto memerr; } asn1_set_choice_selector(pval, -1, it); if (asn1_cb && !asn1_cb(ASN1_OP_NEW_POST, pval, it, NULL)) { goto auxerr2; } break; } case ASN1_ITYPE_SEQUENCE: { const ASN1_AUX *aux = reinterpret_cast(it->funcs); ASN1_aux_cb *asn1_cb = aux != NULL ? aux->asn1_cb : NULL; if (asn1_cb) { i = asn1_cb(ASN1_OP_NEW_PRE, pval, it, NULL); if (!i) { goto auxerr; } if (i == 2) { return 1; } } *pval = reinterpret_cast(OPENSSL_zalloc(it->size)); if (!*pval) { goto memerr; } asn1_refcount_set_one(pval, it); asn1_enc_init(pval, it); for (i = 0, tt = it->templates; i < it->tcount; tt++, i++) { pseqval = asn1_get_field_ptr(pval, tt); if (!ASN1_template_new(pseqval, tt)) { goto memerr2; } } if (asn1_cb && !asn1_cb(ASN1_OP_NEW_POST, pval, it, NULL)) { goto auxerr2; } break; } } return 1; memerr2: ASN1_item_ex_free(pval, it); memerr: return 0; auxerr2: ASN1_item_ex_free(pval, it); auxerr: OPENSSL_PUT_ERROR(ASN1, ASN1_R_AUX_ERROR); return 0; } static void asn1_item_clear(ASN1_VALUE **pval, const ASN1_ITEM *it) { switch (it->itype) { case ASN1_ITYPE_EXTERN: *pval = NULL; break; case ASN1_ITYPE_PRIMITIVE: if (it->templates) { asn1_template_clear(pval, it->templates); } else { asn1_primitive_clear(pval, it); } break; case ASN1_ITYPE_MSTRING: asn1_primitive_clear(pval, it); break; case ASN1_ITYPE_CHOICE: case ASN1_ITYPE_SEQUENCE: *pval = NULL; break; } } static int ASN1_template_new(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt) { const ASN1_ITEM *it = ASN1_ITEM_ptr(tt->item); int ret; if (tt->flags & ASN1_TFLG_OPTIONAL) { asn1_template_clear(pval, tt); return 1; } // If ANY DEFINED BY nothing to do if (tt->flags & ASN1_TFLG_ADB_MASK) { *pval = NULL; return 1; } // If SET OF or SEQUENCE OF, its a STACK if (tt->flags & ASN1_TFLG_SK_MASK) { STACK_OF(ASN1_VALUE) *skval; skval = sk_ASN1_VALUE_new_null(); if (!skval) { ret = 0; goto done; } *pval = (ASN1_VALUE *)skval; ret = 1; goto done; } // Otherwise pass it back to the item routine ret = ASN1_item_ex_new(pval, it); done: return ret; } static void asn1_template_clear(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt) { // If ADB or STACK just NULL the field if (tt->flags & (ASN1_TFLG_ADB_MASK | ASN1_TFLG_SK_MASK)) { *pval = NULL; } else { asn1_item_clear(pval, ASN1_ITEM_ptr(tt->item)); } } // NB: could probably combine most of the real XXX_new() behaviour and junk // all the old functions. static int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it) { if (!it) { return 0; } // Historically, |it->funcs| for primitive types contained an // |ASN1_PRIMITIVE_FUNCS| table of calbacks. assert(it->funcs == NULL); int utype; if (it->itype == ASN1_ITYPE_MSTRING) { utype = -1; } else { utype = it->utype; } switch (utype) { case V_ASN1_OBJECT: *pval = (ASN1_VALUE *)OBJ_get_undef(); return 1; case V_ASN1_BOOLEAN: *(ASN1_BOOLEAN *)pval = (ASN1_BOOLEAN)it->size; return 1; case V_ASN1_NULL: *pval = (ASN1_VALUE *)1; return 1; case V_ASN1_ANY: { ASN1_TYPE *typ = reinterpret_cast(OPENSSL_malloc(sizeof(ASN1_TYPE))); if (!typ) { return 0; } typ->value.ptr = NULL; typ->type = -1; *pval = (ASN1_VALUE *)typ; break; } default: *pval = (ASN1_VALUE *)ASN1_STRING_type_new(utype); break; } if (*pval) { return 1; } return 0; } static void asn1_primitive_clear(ASN1_VALUE **pval, const ASN1_ITEM *it) { int utype; // Historically, |it->funcs| for primitive types contained an // |ASN1_PRIMITIVE_FUNCS| table of calbacks. assert(it == NULL || it->funcs == NULL); if (!it || (it->itype == ASN1_ITYPE_MSTRING)) { utype = -1; } else { utype = it->utype; } if (utype == V_ASN1_BOOLEAN) { *(ASN1_BOOLEAN *)pval = (ASN1_BOOLEAN)it->size; } else { *pval = NULL; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_typ.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include // Declarations for string types #define IMPLEMENT_ASN1_STRING_FUNCTIONS(sname) \ IMPLEMENT_ASN1_TYPE(sname) \ IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(sname, sname, sname) \ sname *sname##_new(void) { return ASN1_STRING_type_new(V_##sname); } \ void sname##_free(sname *x) { ASN1_STRING_free(x); } IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_OCTET_STRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_INTEGER) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_ENUMERATED) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_BIT_STRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_UTF8STRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_PRINTABLESTRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_T61STRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_IA5STRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_GENERALSTRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_UTCTIME) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_GENERALIZEDTIME) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_VISIBLESTRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_UNIVERSALSTRING) IMPLEMENT_ASN1_STRING_FUNCTIONS(ASN1_BMPSTRING) IMPLEMENT_ASN1_TYPE(ASN1_NULL) IMPLEMENT_ASN1_FUNCTIONS_const(ASN1_NULL) IMPLEMENT_ASN1_TYPE(ASN1_OBJECT) IMPLEMENT_ASN1_TYPE(ASN1_ANY) // Just swallow an ASN1_SEQUENCE in an ASN1_STRING IMPLEMENT_ASN1_TYPE(ASN1_SEQUENCE) IMPLEMENT_ASN1_FUNCTIONS_const_fname(ASN1_TYPE, ASN1_ANY, ASN1_TYPE) // Multistring types IMPLEMENT_ASN1_MSTRING(ASN1_PRINTABLE, B_ASN1_PRINTABLE) IMPLEMENT_ASN1_FUNCTIONS_const_fname(ASN1_STRING, ASN1_PRINTABLE, ASN1_PRINTABLE) IMPLEMENT_ASN1_MSTRING(DISPLAYTEXT, B_ASN1_DISPLAYTEXT) IMPLEMENT_ASN1_FUNCTIONS_const_fname(ASN1_STRING, DISPLAYTEXT, DISPLAYTEXT) IMPLEMENT_ASN1_MSTRING(DIRECTORYSTRING, B_ASN1_DIRECTORYSTRING) IMPLEMENT_ASN1_FUNCTIONS_const_fname(ASN1_STRING, DIRECTORYSTRING, DIRECTORYSTRING) // Three separate BOOLEAN type: normal, DEFAULT TRUE and DEFAULT FALSE IMPLEMENT_ASN1_TYPE_ex(ASN1_BOOLEAN, ASN1_BOOLEAN, ASN1_BOOLEAN_NONE) IMPLEMENT_ASN1_TYPE_ex(ASN1_TBOOLEAN, ASN1_BOOLEAN, ASN1_BOOLEAN_TRUE) IMPLEMENT_ASN1_TYPE_ex(ASN1_FBOOLEAN, ASN1_BOOLEAN, ASN1_BOOLEAN_FALSE) ASN1_ITEM_TEMPLATE(ASN1_SEQUENCE_ANY) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, ASN1_SEQUENCE_ANY, ASN1_ANY) ASN1_ITEM_TEMPLATE_END(ASN1_SEQUENCE_ANY) ASN1_ITEM_TEMPLATE(ASN1_SET_ANY) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SET_OF, 0, ASN1_SET_ANY, ASN1_ANY) ASN1_ITEM_TEMPLATE_END(ASN1_SET_ANY) IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(ASN1_SEQUENCE_ANY, ASN1_SEQUENCE_ANY, ASN1_SEQUENCE_ANY) IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(ASN1_SEQUENCE_ANY, ASN1_SET_ANY, ASN1_SET_ANY) ================================================ FILE: Sources/CNIOBoringSSL/crypto/asn1/tasn_utl.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" // Utility functions for manipulating fields and offsets // Add 'offset' to 'addr' #define offset2ptr(addr, offset) (void *)(((char *)(addr)) + (offset)) // Given an ASN1_ITEM CHOICE type return the selector value int asn1_get_choice_selector(ASN1_VALUE **pval, const ASN1_ITEM *it) { int *sel = reinterpret_cast(offset2ptr(*pval, it->utype)); return *sel; } // Given an ASN1_ITEM CHOICE type set the selector value, return old value. int asn1_set_choice_selector(ASN1_VALUE **pval, int value, const ASN1_ITEM *it) { int *sel, ret; sel = reinterpret_cast(offset2ptr(*pval, it->utype)); ret = *sel; *sel = value; return ret; } static CRYPTO_refcount_t *asn1_get_references(ASN1_VALUE **pval, const ASN1_ITEM *it) { if (it->itype != ASN1_ITYPE_SEQUENCE) { return NULL; } const ASN1_AUX *aux = reinterpret_cast(it->funcs); if (!aux || !(aux->flags & ASN1_AFLG_REFCOUNT)) { return NULL; } return reinterpret_cast( offset2ptr(*pval, aux->ref_offset)); } void asn1_refcount_set_one(ASN1_VALUE **pval, const ASN1_ITEM *it) { CRYPTO_refcount_t *references = asn1_get_references(pval, it); if (references != NULL) { *references = 1; } } int asn1_refcount_dec_and_test_zero(ASN1_VALUE **pval, const ASN1_ITEM *it) { CRYPTO_refcount_t *references = asn1_get_references(pval, it); if (references != NULL) { return CRYPTO_refcount_dec_and_test_zero(references); } return 1; } static ASN1_ENCODING *asn1_get_enc_ptr(ASN1_VALUE **pval, const ASN1_ITEM *it) { assert(it->itype == ASN1_ITYPE_SEQUENCE); const ASN1_AUX *aux; if (!pval || !*pval) { return NULL; } aux = reinterpret_cast(it->funcs); if (!aux || !(aux->flags & ASN1_AFLG_ENCODING)) { return NULL; } return reinterpret_cast(offset2ptr(*pval, aux->enc_offset)); } void asn1_enc_init(ASN1_VALUE **pval, const ASN1_ITEM *it) { ASN1_ENCODING *enc = asn1_get_enc_ptr(pval, it); if (enc) { enc->enc = NULL; enc->len = 0; enc->buf = NULL; } } void asn1_enc_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { ASN1_ENCODING *enc = asn1_get_enc_ptr(pval, it); if (enc) { asn1_encoding_clear(enc); } } int asn1_enc_save(ASN1_VALUE **pval, const uint8_t *in, size_t in_len, const ASN1_ITEM *it, CRYPTO_BUFFER *buf) { ASN1_ENCODING *enc; enc = asn1_get_enc_ptr(pval, it); if (!enc) { return 1; } asn1_encoding_clear(enc); if (buf != NULL) { assert(CRYPTO_BUFFER_data(buf) <= in && in + in_len <= CRYPTO_BUFFER_data(buf) + CRYPTO_BUFFER_len(buf)); CRYPTO_BUFFER_up_ref(buf); enc->buf = buf; enc->enc = (uint8_t *)in; } else { enc->enc = reinterpret_cast(OPENSSL_memdup(in, in_len)); if (!enc->enc) { return 0; } } enc->len = in_len; return 1; } void asn1_encoding_clear(ASN1_ENCODING *enc) { if (enc->buf != NULL) { CRYPTO_BUFFER_free(enc->buf); } else { OPENSSL_free(enc->enc); } enc->enc = NULL; enc->len = 0; enc->buf = NULL; } int asn1_enc_restore(int *len, unsigned char **out, ASN1_VALUE **pval, const ASN1_ITEM *it) { ASN1_ENCODING *enc = asn1_get_enc_ptr(pval, it); if (!enc || enc->len == 0) { return 0; } if (out) { OPENSSL_memcpy(*out, enc->enc, enc->len); *out += enc->len; } if (len) { *len = enc->len; } return 1; } // Given an ASN1_TEMPLATE get a pointer to a field ASN1_VALUE **asn1_get_field_ptr(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt) { ASN1_VALUE **pvaltmp = reinterpret_cast(offset2ptr(*pval, tt->offset)); // NOTE for BOOLEAN types the field is just a plain int so we can't return // int **, so settle for (int *). return pvaltmp; } // Handle ANY DEFINED BY template, find the selector, look up the relevant // ASN1_TEMPLATE in the table and return it. const ASN1_TEMPLATE *asn1_do_adb(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt, int nullerr) { const ASN1_ADB *adb; const ASN1_ADB_TABLE *atbl; ASN1_VALUE **sfld; int i; if (!(tt->flags & ASN1_TFLG_ADB_MASK)) { return tt; } // Else ANY DEFINED BY ... get the table adb = ASN1_ADB_ptr(tt->item); // Get the selector field sfld = reinterpret_cast(offset2ptr(*pval, adb->offset)); // Check if NULL int selector; if (*sfld == NULL) { if (!adb->null_tt) { goto err; } return adb->null_tt; } // Convert type to a NID: // NB: don't check for NID_undef here because it // might be a legitimate value in the table assert(tt->flags & ASN1_TFLG_ADB_OID); selector = OBJ_obj2nid((ASN1_OBJECT *)*sfld); // Try to find matching entry in table Maybe should check application types // first to allow application override? Might also be useful to have a flag // which indicates table is sorted and we can do a binary search. For now // stick to a linear search. for (atbl = adb->tbl, i = 0; i < adb->tblcount; i++, atbl++) { if (atbl->value == selector) { return &atbl->tt; } } // FIXME: need to search application table too // No match, return default type if (!adb->default_tt) { goto err; } return adb->default_tt; err: // FIXME: should log the value or OID of unsupported type if (nullerr) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE); } return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/base64/base64.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../internal.h" // constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t| // arguments for a slightly simpler implementation. static inline uint8_t constant_time_lt_args_8(uint8_t a, uint8_t b) { crypto_word_t aw = a; crypto_word_t bw = b; // |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same // MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1. return constant_time_msb_w(aw - bw); } // constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max| // and |CONSTTIME_FALSE_8| otherwise. static inline uint8_t constant_time_in_range_8(uint8_t a, uint8_t min, uint8_t max) { a -= min; return constant_time_lt_args_8(a, max - min + 1); } // Encoding. static uint8_t conv_bin2ascii(uint8_t a) { // Since PEM is sometimes used to carry private keys, we encode base64 data // itself in constant-time. a &= 0x3f; uint8_t ret = constant_time_select_8(constant_time_eq_8(a, 62), '+', '/'); ret = constant_time_select_8(constant_time_lt_args_8(a, 62), a - 52 + '0', ret); ret = constant_time_select_8(constant_time_lt_args_8(a, 52), a - 26 + 'a', ret); ret = constant_time_select_8(constant_time_lt_args_8(a, 26), a + 'A', ret); return ret; } static_assert(sizeof(((EVP_ENCODE_CTX *)(NULL))->data) % 3 == 0, "data length must be a multiple of base64 chunk size"); int EVP_EncodedLength(size_t *out_len, size_t len) { if (len + 2 < len) { return 0; } len += 2; len /= 3; if (((len << 2) >> 2) != len) { return 0; } len <<= 2; if (len + 1 < len) { return 0; } len++; *out_len = len; return 1; } EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void) { return reinterpret_cast( OPENSSL_zalloc(sizeof(EVP_ENCODE_CTX))); } void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx) { OPENSSL_free(ctx); } void EVP_EncodeInit(EVP_ENCODE_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_ENCODE_CTX)); } void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len) { size_t total = 0; *out_len = 0; if (in_len == 0) { return; } assert(ctx->data_used < sizeof(ctx->data)); if (sizeof(ctx->data) - ctx->data_used > in_len) { OPENSSL_memcpy(&ctx->data[ctx->data_used], in, in_len); ctx->data_used += (unsigned)in_len; return; } if (ctx->data_used != 0) { const size_t todo = sizeof(ctx->data) - ctx->data_used; OPENSSL_memcpy(&ctx->data[ctx->data_used], in, todo); in += todo; in_len -= todo; size_t encoded = EVP_EncodeBlock(out, ctx->data, sizeof(ctx->data)); ctx->data_used = 0; out += encoded; *(out++) = '\n'; *out = '\0'; total = encoded + 1; } while (in_len >= sizeof(ctx->data)) { size_t encoded = EVP_EncodeBlock(out, in, sizeof(ctx->data)); in += sizeof(ctx->data); in_len -= sizeof(ctx->data); out += encoded; *(out++) = '\n'; *out = '\0'; if (total + encoded + 1 < total) { *out_len = 0; return; } total += encoded + 1; } if (in_len != 0) { OPENSSL_memcpy(ctx->data, in, in_len); } ctx->data_used = (unsigned)in_len; if (total > INT_MAX) { // We cannot signal an error, but we can at least avoid making *out_len // negative. total = 0; } *out_len = (int)total; } void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len) { if (ctx->data_used == 0) { *out_len = 0; return; } size_t encoded = EVP_EncodeBlock(out, ctx->data, ctx->data_used); out[encoded++] = '\n'; out[encoded] = '\0'; ctx->data_used = 0; // ctx->data_used is bounded by sizeof(ctx->data), so this does not // overflow. assert(encoded <= INT_MAX); *out_len = (int)encoded; } size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { uint32_t l; size_t remaining = src_len, ret = 0; while (remaining) { if (remaining >= 3) { l = (((uint32_t)src[0]) << 16L) | (((uint32_t)src[1]) << 8L) | src[2]; *(dst++) = conv_bin2ascii(l >> 18L); *(dst++) = conv_bin2ascii(l >> 12L); *(dst++) = conv_bin2ascii(l >> 6L); *(dst++) = conv_bin2ascii(l); remaining -= 3; } else { l = ((uint32_t)src[0]) << 16L; if (remaining == 2) { l |= ((uint32_t)src[1] << 8L); } *(dst++) = conv_bin2ascii(l >> 18L); *(dst++) = conv_bin2ascii(l >> 12L); *(dst++) = (remaining == 1) ? '=' : conv_bin2ascii(l >> 6L); *(dst++) = '='; remaining = 0; } ret += 4; src += 3; } *dst = '\0'; return ret; } // Decoding. int EVP_DecodedLength(size_t *out_len, size_t len) { if (len % 4 != 0) { return 0; } *out_len = (len / 4) * 3; return 1; } void EVP_DecodeInit(EVP_ENCODE_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_ENCODE_CTX)); } static uint8_t base64_ascii_to_bin(uint8_t a) { // Since PEM is sometimes used to carry private keys, we decode base64 data // itself in constant-time. const uint8_t is_upper = constant_time_in_range_8(a, 'A', 'Z'); const uint8_t is_lower = constant_time_in_range_8(a, 'a', 'z'); const uint8_t is_digit = constant_time_in_range_8(a, '0', '9'); const uint8_t is_plus = constant_time_eq_8(a, '+'); const uint8_t is_slash = constant_time_eq_8(a, '/'); const uint8_t is_equals = constant_time_eq_8(a, '='); uint8_t ret = 0; ret |= is_upper & (a - 'A'); // [0,26) ret |= is_lower & (a - 'a' + 26); // [26,52) ret |= is_digit & (a - '0' + 52); // [52,62) ret |= is_plus & 62; ret |= is_slash & 63; // Invalid inputs, 'A', and '=' have all been mapped to zero. Map invalid // inputs to 0xff. Note '=' is padding and handled separately by the caller. const uint8_t is_valid = is_upper | is_lower | is_digit | is_plus | is_slash | is_equals; ret |= ~is_valid; return ret; } // base64_decode_quad decodes a single “quad” (i.e. four characters) of base64 // data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the // number of bytes written, which will be less than three if the quad ended // with padding. It returns one on success or zero on error. static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes, const uint8_t *in) { const uint8_t a = base64_ascii_to_bin(in[0]); const uint8_t b = base64_ascii_to_bin(in[1]); const uint8_t c = base64_ascii_to_bin(in[2]); const uint8_t d = base64_ascii_to_bin(in[3]); if (a == 0xff || b == 0xff || c == 0xff || d == 0xff) { return 0; } const uint32_t v = ((uint32_t)a) << 18 | ((uint32_t)b) << 12 | ((uint32_t)c) << 6 | (uint32_t)d; const unsigned padding_pattern = (in[0] == '=') << 3 | // (in[1] == '=') << 2 | // (in[2] == '=') << 1 | // (in[3] == '='); // In presence of padding, the lowest bits of v are unused. Canonical encoding // (RFC 4648, section 3.5) requires that these bits all be set to zero. Common // PEM parsers accept noncanonical base64, adding to the malleability of the // format. This decoder follows OpenSSL's and Go's PEM parsers and accepts it. switch (padding_pattern) { case 0: // The common case of no padding. *out_num_bytes = 3; out[0] = v >> 16; out[1] = v >> 8; out[2] = v; break; case 1: // xxx= *out_num_bytes = 2; out[0] = v >> 16; out[1] = v >> 8; break; case 3: // xx== *out_num_bytes = 1; out[0] = v >> 16; break; default: return 0; } return 1; } int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len) { *out_len = 0; if (ctx->error_encountered) { return -1; } size_t bytes_out = 0, i; for (i = 0; i < in_len; i++) { const char c = in[i]; switch (c) { case ' ': case '\t': case '\r': case '\n': continue; } if (ctx->eof_seen) { ctx->error_encountered = 1; return -1; } ctx->data[ctx->data_used++] = c; if (ctx->data_used == 4) { size_t num_bytes_resulting; if (!base64_decode_quad(out, &num_bytes_resulting, ctx->data)) { ctx->error_encountered = 1; return -1; } ctx->data_used = 0; bytes_out += num_bytes_resulting; out += num_bytes_resulting; if (num_bytes_resulting < 3) { ctx->eof_seen = 1; } } } if (bytes_out > INT_MAX) { ctx->error_encountered = 1; *out_len = 0; return -1; } *out_len = (int)bytes_out; if (ctx->eof_seen) { return 0; } return 1; } int EVP_DecodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len) { *out_len = 0; if (ctx->error_encountered || ctx->data_used != 0) { return -1; } return 1; } int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len) { *out_len = 0; if (in_len % 4 != 0) { return 0; } size_t max_len; if (!EVP_DecodedLength(&max_len, in_len) || max_out < max_len) { return 0; } size_t i, bytes_out = 0; for (i = 0; i < in_len; i += 4) { size_t num_bytes_resulting; if (!base64_decode_quad(out, &num_bytes_resulting, &in[i])) { return 0; } bytes_out += num_bytes_resulting; out += num_bytes_resulting; if (num_bytes_resulting != 3 && i != in_len - 4) { return 0; } } *out_len = bytes_out; return 1; } int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { // Trim spaces and tabs from the beginning of the input. while (src_len > 0) { if (src[0] != ' ' && src[0] != '\t') { break; } src++; src_len--; } // Trim newlines, spaces and tabs from the end of the line. while (src_len > 0) { switch (src[src_len - 1]) { case ' ': case '\t': case '\r': case '\n': src_len--; continue; } break; } size_t dst_len; if (!EVP_DecodedLength(&dst_len, src_len) || dst_len > INT_MAX || !EVP_DecodeBase64(dst, &dst_len, dst_len, src, src_len)) { return -1; } // EVP_DecodeBlock does not take padding into account, so put the // NULs back in... so the caller can strip them back out. while (dst_len % 3 != 0) { dst[dst_len++] = '\0'; } assert(dst_len <= INT_MAX); return (int)dst_len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bcm_support.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_BCM_SUPPORT_H #define OPENSSL_HEADER_CRYPTO_BCM_SUPPORT_H #include #include // Provided by libcrypto, called from BCM #if defined(__cplusplus) extern "C" { #endif #if defined(OPENSSL_LINUX) // On linux we use MADVISE instead of pthread_atfork(), due // to concerns about clone() being used for address space // duplication. #define OPENSSL_FORK_DETECTION #define OPENSSL_FORK_DETECTION_MADVISE #elif defined(OPENSSL_MACOS) || defined(OPENSSL_IOS) || \ defined(OPENSSL_OPENBSD) || defined(OPENSSL_FREEBSD) // These platforms may detect address space duplication with pthread_atfork. // iOS doesn't normally allow fork in apps, but it's there. #define OPENSSL_FORK_DETECTION #define OPENSSL_FORK_DETECTION_PTHREAD_ATFORK #elif defined(OPENSSL_WINDOWS) || defined(OPENSSL_TRUSTY) || \ defined(__ZEPHYR__) || defined(CROS_EC) // These platforms do not fork. #define OPENSSL_DOES_NOT_FORK #endif #if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) #define OPENSSL_RAND_DETERMINISTIC #elif defined(OPENSSL_TRUSTY) #define OPENSSL_RAND_TRUSTY #elif defined(OPENSSL_WINDOWS) #define OPENSSL_RAND_WINDOWS #elif defined(OPENSSL_LINUX) #define OPENSSL_RAND_URANDOM #elif defined(OPENSSL_APPLE) && !defined(OPENSSL_MACOS) // Unlike macOS, iOS and similar hide away getentropy(). #define OPENSSL_RAND_IOS #else // By default if you are integrating BoringSSL we expect you to // provide getentropy from the header file. #define OPENSSL_RAND_GETENTROPY #endif // Provided by libcrypto, called from BCM // CRYPTO_init_sysrand initializes long-lived resources needed to draw entropy // from the operating system, if the operating system requires initialization. void CRYPTO_init_sysrand(void); // CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating // system. void CRYPTO_sysrand(uint8_t *buf, size_t len); // CRYPTO_sysrand_if_available fills |len| bytes at |buf| with entropy from the // operating system, or early /dev/urandom data, and returns 1, _if_ the entropy // pool is initialized or if getrandom() is not available and not in FIPS mode. // Otherwise it will not block and will instead fill |buf| with all zeros and // return 0. int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len); // CRYPTO_sysrand_for_seed fills |len| bytes at |buf| with entropy from the // operating system. It may draw from the |GRND_RANDOM| pool on Android, // depending on the vendor's configuration. void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len); // RAND_need_entropy is called whenever the BCM module has stopped because it // has run out of entropy. void RAND_need_entropy(size_t bytes_needed); // crypto_get_fork_generation returns the fork generation number for the current // process, or zero if not supported on the platform. The fork generation number // is a non-zero, strictly-monotonic counter with the property that, if queried // in an address space and then again in a subsequently forked copy, the forked // address space will observe a greater value. // // This function may be used to clear cached values across a fork. When // initializing a cache, record the fork generation. Before using the cache, // check if the fork generation has changed. If so, drop the cache and update // the save fork generation. Note this logic transparently handles platforms // which always return zero. // // This is not reliably supported on all platforms which implement |fork|, so it // should only be used as a hardening measure. OPENSSL_EXPORT uint64_t CRYPTO_get_fork_generation(void); // CRYPTO_fork_detect_force_madv_wipeonfork_for_testing is an internal detail // used for testing purposes. OPENSSL_EXPORT void CRYPTO_fork_detect_force_madv_wipeonfork_for_testing( int on); // CRYPTO_get_stderr returns stderr. This function exists to avoid BCM needing // a data dependency on libc. FILE *CRYPTO_get_stderr(void); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_BCM_SUPPORT_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/bio.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../internal.h" static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; BIO *BIO_new(const BIO_METHOD *method) { BIO *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(BIO))); if (ret == NULL) { return NULL; } ret->method = method; ret->shutdown = 1; ret->references = 1; CRYPTO_new_ex_data(&ret->ex_data); if (method->create != NULL && !method->create(ret)) { OPENSSL_free(ret); return NULL; } return ret; } int BIO_free(BIO *bio) { BIO *next_bio; for (; bio != NULL; bio = next_bio) { if (!CRYPTO_refcount_dec_and_test_zero(&bio->references)) { return 0; } next_bio = BIO_pop(bio); if (bio->method != NULL && bio->method->destroy != NULL) { bio->method->destroy(bio); } CRYPTO_free_ex_data(&g_ex_data_class, bio, &bio->ex_data); OPENSSL_free(bio); } return 1; } int BIO_up_ref(BIO *bio) { CRYPTO_refcount_inc(&bio->references); return 1; } void BIO_vfree(BIO *bio) { BIO_free(bio); } void BIO_free_all(BIO *bio) { BIO_free(bio); } int BIO_read(BIO *bio, void *buf, int len) { if (bio == NULL || bio->method == NULL || bio->method->bread == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return -2; } if (!bio->init) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); return -2; } if (len <= 0) { return 0; } int ret = bio->method->bread(bio, reinterpret_cast(buf), len); if (ret > 0) { bio->num_read += ret; } return ret; } int BIO_gets(BIO *bio, char *buf, int len) { if (bio == NULL || bio->method == NULL || bio->method->bgets == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return -2; } if (!bio->init) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); return -2; } if (len <= 0) { return 0; } int ret = bio->method->bgets(bio, buf, len); if (ret > 0) { bio->num_read += ret; } return ret; } int BIO_write(BIO *bio, const void *in, int inl) { if (bio == NULL || bio->method == NULL || bio->method->bwrite == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return -2; } if (!bio->init) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); return -2; } if (inl <= 0) { return 0; } int ret = bio->method->bwrite(bio, reinterpret_cast(in), inl); if (ret > 0) { bio->num_write += ret; } return ret; } int BIO_write_all(BIO *bio, const void *data, size_t len) { const uint8_t *data_u8 = reinterpret_cast(data); while (len > 0) { int ret = BIO_write(bio, data_u8, len > INT_MAX ? INT_MAX : (int)len); if (ret <= 0) { return 0; } data_u8 += ret; len -= ret; } return 1; } int BIO_puts(BIO *bio, const char *in) { size_t len = strlen(in); if (len > INT_MAX) { // |BIO_write| and the return value both assume the string fits in |int|. OPENSSL_PUT_ERROR(BIO, ERR_R_OVERFLOW); return -1; } return BIO_write(bio, in, (int)len); } int BIO_flush(BIO *bio) { return (int)BIO_ctrl(bio, BIO_CTRL_FLUSH, 0, NULL); } long BIO_ctrl(BIO *bio, int cmd, long larg, void *parg) { if (bio == NULL) { return 0; } if (bio->method == NULL || bio->method->ctrl == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return -2; } return bio->method->ctrl(bio, cmd, larg, parg); } char *BIO_ptr_ctrl(BIO *b, int cmd, long larg) { char *p = NULL; if (BIO_ctrl(b, cmd, larg, (void *)&p) <= 0) { return NULL; } return p; } long BIO_int_ctrl(BIO *b, int cmd, long larg, int iarg) { int i = iarg; return BIO_ctrl(b, cmd, larg, (void *)&i); } int BIO_reset(BIO *bio) { return (int)BIO_ctrl(bio, BIO_CTRL_RESET, 0, NULL); } int BIO_eof(BIO *bio) { return (int)BIO_ctrl(bio, BIO_CTRL_EOF, 0, NULL); } void BIO_set_flags(BIO *bio, int flags) { bio->flags |= flags; } int BIO_test_flags(const BIO *bio, int flags) { return bio->flags & flags; } int BIO_should_read(const BIO *bio) { return BIO_test_flags(bio, BIO_FLAGS_READ); } int BIO_should_write(const BIO *bio) { return BIO_test_flags(bio, BIO_FLAGS_WRITE); } int BIO_should_retry(const BIO *bio) { return BIO_test_flags(bio, BIO_FLAGS_SHOULD_RETRY); } int BIO_should_io_special(const BIO *bio) { return BIO_test_flags(bio, BIO_FLAGS_IO_SPECIAL); } int BIO_get_retry_reason(const BIO *bio) { return bio->retry_reason; } void BIO_set_retry_reason(BIO *bio, int reason) { bio->retry_reason = reason; } void BIO_clear_flags(BIO *bio, int flags) { bio->flags &= ~flags; } void BIO_set_retry_read(BIO *bio) { bio->flags |= BIO_FLAGS_READ | BIO_FLAGS_SHOULD_RETRY; } void BIO_set_retry_write(BIO *bio) { bio->flags |= BIO_FLAGS_WRITE | BIO_FLAGS_SHOULD_RETRY; } static const int kRetryFlags = BIO_FLAGS_RWS | BIO_FLAGS_SHOULD_RETRY; int BIO_get_retry_flags(BIO *bio) { return bio->flags & kRetryFlags; } void BIO_clear_retry_flags(BIO *bio) { bio->flags &= ~kRetryFlags; bio->retry_reason = 0; } int BIO_method_type(const BIO *bio) { return bio->method->type; } void BIO_copy_next_retry(BIO *bio) { BIO_clear_retry_flags(bio); BIO_set_flags(bio, BIO_get_retry_flags(bio->next_bio)); bio->retry_reason = bio->next_bio->retry_reason; } long BIO_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { if (bio == NULL) { return 0; } if (bio->method == NULL || bio->method->callback_ctrl == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return 0; } return bio->method->callback_ctrl(bio, cmd, fp); } size_t BIO_pending(const BIO *bio) { const long r = BIO_ctrl((BIO *)bio, BIO_CTRL_PENDING, 0, NULL); assert(r >= 0); if (r < 0) { return 0; } return r; } size_t BIO_ctrl_pending(const BIO *bio) { return BIO_pending(bio); } size_t BIO_wpending(const BIO *bio) { const long r = BIO_ctrl((BIO *)bio, BIO_CTRL_WPENDING, 0, NULL); assert(r >= 0); if (r < 0) { return 0; } return r; } int BIO_set_close(BIO *bio, int close_flag) { return (int)BIO_ctrl(bio, BIO_CTRL_SET_CLOSE, close_flag, NULL); } OPENSSL_EXPORT uint64_t BIO_number_read(const BIO *bio) { return bio->num_read; } OPENSSL_EXPORT uint64_t BIO_number_written(const BIO *bio) { return bio->num_write; } BIO *BIO_push(BIO *bio, BIO *appended_bio) { BIO *last_bio; if (bio == NULL) { return bio; } last_bio = bio; while (last_bio->next_bio != NULL) { last_bio = last_bio->next_bio; } last_bio->next_bio = appended_bio; return bio; } BIO *BIO_pop(BIO *bio) { BIO *ret; if (bio == NULL) { return NULL; } ret = bio->next_bio; bio->next_bio = NULL; return ret; } BIO *BIO_next(BIO *bio) { if (!bio) { return NULL; } return bio->next_bio; } BIO *BIO_find_type(BIO *bio, int type) { int method_type, mask; if (!bio) { return NULL; } mask = type & 0xff; do { if (bio->method != NULL) { method_type = bio->method->type; if (!mask) { if (method_type & type) { return bio; } } else if (method_type == type) { return bio; } } bio = bio->next_bio; } while (bio != NULL); return NULL; } int BIO_indent(BIO *bio, unsigned indent, unsigned max_indent) { if (indent > max_indent) { indent = max_indent; } while (indent--) { if (BIO_puts(bio, " ") != 1) { return 0; } } return 1; } static int print_bio(const char *str, size_t len, void *bio) { return BIO_write_all((BIO *)bio, str, len); } void ERR_print_errors(BIO *bio) { ERR_print_errors_cb(print_bio, bio); } // bio_read_all reads everything from |bio| and prepends |prefix| to it. On // success, |*out| is set to an allocated buffer (which should be freed with // |OPENSSL_free|), |*out_len| is set to its length and one is returned. The // buffer will contain |prefix| followed by the contents of |bio|. On failure, // zero is returned. // // The function will fail if the size of the output would equal or exceed // |max_len|. static int bio_read_all(BIO *bio, uint8_t **out, size_t *out_len, const uint8_t *prefix, size_t prefix_len, size_t max_len) { static const size_t kChunkSize = 4096; size_t len = prefix_len + kChunkSize; if (len > max_len) { len = max_len; } if (len < prefix_len) { return 0; } *out = reinterpret_cast(OPENSSL_malloc(len)); if (*out == NULL) { return 0; } OPENSSL_memcpy(*out, prefix, prefix_len); size_t done = prefix_len; for (;;) { if (done == len) { OPENSSL_free(*out); return 0; } size_t todo = len - done; if (todo > INT_MAX) { todo = INT_MAX; } const int n = BIO_read(bio, *out + done, (int)todo); if (n == 0) { *out_len = done; return 1; } else if (n == -1) { OPENSSL_free(*out); return 0; } done += n; if (len < max_len && len - done < kChunkSize / 2) { len += kChunkSize; if (len < kChunkSize || len > max_len) { len = max_len; } uint8_t *new_buf = reinterpret_cast(OPENSSL_realloc(*out, len)); if (new_buf == NULL) { OPENSSL_free(*out); return 0; } *out = new_buf; } } } // bio_read_full reads |len| bytes |bio| and writes them into |out|. It // tolerates partial reads from |bio| and returns one on success or zero if a // read fails before |len| bytes are read. On failure, it additionally sets // |*out_eof_on_first_read| to whether the error was due to |bio| returning zero // on the first read. |out_eof_on_first_read| may be NULL to discard the value. static int bio_read_full(BIO *bio, uint8_t *out, int *out_eof_on_first_read, size_t len) { int first_read = 1; while (len > 0) { int todo = len <= INT_MAX ? (int)len : INT_MAX; int ret = BIO_read(bio, out, todo); if (ret <= 0) { if (out_eof_on_first_read != NULL) { *out_eof_on_first_read = first_read && ret == 0; } return 0; } out += ret; len -= (size_t)ret; first_read = 0; } return 1; } // For compatibility with existing |d2i_*_bio| callers, |BIO_read_asn1| uses // |ERR_LIB_ASN1| errors. OPENSSL_DECLARE_ERROR_REASON(ASN1, ASN1_R_DECODE_ERROR) OPENSSL_DECLARE_ERROR_REASON(ASN1, ASN1_R_HEADER_TOO_LONG) OPENSSL_DECLARE_ERROR_REASON(ASN1, ASN1_R_NOT_ENOUGH_DATA) OPENSSL_DECLARE_ERROR_REASON(ASN1, ASN1_R_TOO_LONG) int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) { uint8_t header[6]; static const size_t kInitialHeaderLen = 2; int eof_on_first_read; if (!bio_read_full(bio, header, &eof_on_first_read, kInitialHeaderLen)) { if (eof_on_first_read) { // Historically, OpenSSL returned |ASN1_R_HEADER_TOO_LONG| when // |d2i_*_bio| could not read anything. CPython conditions on this to // determine if |bio| was empty. OPENSSL_PUT_ERROR(ASN1, ASN1_R_HEADER_TOO_LONG); } else { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA); } return 0; } const uint8_t tag = header[0]; const uint8_t length_byte = header[1]; if ((tag & 0x1f) == 0x1f) { // Long form tags are not supported. OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return 0; } size_t len, header_len; if ((length_byte & 0x80) == 0) { // Short form length. len = length_byte; header_len = kInitialHeaderLen; } else { const size_t num_bytes = length_byte & 0x7f; if ((tag & 0x20 /* constructed */) != 0 && num_bytes == 0) { // indefinite length. if (!bio_read_all(bio, out, out_len, header, kInitialHeaderLen, max_len)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA); return 0; } return 1; } if (num_bytes == 0 || num_bytes > 4) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return 0; } if (!bio_read_full(bio, header + kInitialHeaderLen, NULL, num_bytes)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA); return 0; } header_len = kInitialHeaderLen + num_bytes; uint32_t len32 = 0; for (unsigned i = 0; i < num_bytes; i++) { len32 <<= 8; len32 |= header[kInitialHeaderLen + i]; } if (len32 < 128) { // Length should have used short-form encoding. OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return 0; } if ((len32 >> ((num_bytes - 1) * 8)) == 0) { // Length should have been at least one byte shorter. OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return 0; } len = len32; } if (len + header_len < len || len + header_len > max_len || len > INT_MAX) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG); return 0; } len += header_len; *out_len = len; *out = reinterpret_cast(OPENSSL_malloc(len)); if (*out == NULL) { return 0; } OPENSSL_memcpy(*out, header, header_len); if (!bio_read_full(bio, (*out) + header_len, NULL, len - header_len)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ENOUGH_DATA); OPENSSL_free(*out); return 0; } return 1; } void BIO_set_retry_special(BIO *bio) { bio->flags |= BIO_FLAGS_READ | BIO_FLAGS_IO_SPECIAL; } int BIO_set_write_buffer_size(BIO *bio, int buffer_size) { return 0; } static CRYPTO_MUTEX g_index_lock = CRYPTO_MUTEX_INIT; static int g_index = BIO_TYPE_START; int BIO_get_new_index(void) { CRYPTO_MUTEX_lock_write(&g_index_lock); // If |g_index| exceeds 255, it will collide with the flags bits. int ret = g_index > 255 ? -1 : g_index++; CRYPTO_MUTEX_unlock_write(&g_index_lock); return ret; } BIO_METHOD *BIO_meth_new(int type, const char *name) { BIO_METHOD *method = reinterpret_cast(OPENSSL_zalloc(sizeof(BIO_METHOD))); if (method == NULL) { return NULL; } method->type = type; method->name = name; return method; } void BIO_meth_free(BIO_METHOD *method) { OPENSSL_free(method); } int BIO_meth_set_create(BIO_METHOD *method, int (*create_func)(BIO *)) { method->create = create_func; return 1; } int BIO_meth_set_destroy(BIO_METHOD *method, int (*destroy_func)(BIO *)) { method->destroy = destroy_func; return 1; } int BIO_meth_set_write(BIO_METHOD *method, int (*write_func)(BIO *, const char *, int)) { method->bwrite = write_func; return 1; } int BIO_meth_set_read(BIO_METHOD *method, int (*read_func)(BIO *, char *, int)) { method->bread = read_func; return 1; } int BIO_meth_set_gets(BIO_METHOD *method, int (*gets_func)(BIO *, char *, int)) { method->bgets = gets_func; return 1; } int BIO_meth_set_ctrl(BIO_METHOD *method, long (*ctrl_func)(BIO *, int, long, void *)) { method->ctrl = ctrl_func; return 1; } void BIO_set_data(BIO *bio, void *ptr) { bio->ptr = ptr; } void *BIO_get_data(BIO *bio) { return bio->ptr; } void BIO_set_init(BIO *bio, int init) { bio->init = init; } int BIO_get_init(BIO *bio) { return bio->init; } void BIO_set_shutdown(BIO *bio, int shutdown) { bio->shutdown = shutdown; } int BIO_get_shutdown(BIO *bio) { return bio->shutdown; } int BIO_meth_set_puts(BIO_METHOD *method, int (*puts)(BIO *, const char *)) { // Ignore the parameter. We implement |BIO_puts| using |BIO_write|. return 1; } int BIO_get_ex_new_index(long argl, void *argp, // CRYPTO_EX_unused *unused, // CRYPTO_EX_dup *dup_unused, // CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int BIO_set_ex_data(BIO *bio, int idx, void *data) { return CRYPTO_set_ex_data(&bio->ex_data, idx, data); } void *BIO_get_ex_data(const BIO *bio, int idx) { return CRYPTO_get_ex_data(&bio->ex_data, idx); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/bio_mem.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../internal.h" BIO *BIO_new_mem_buf(const void *buf, ossl_ssize_t len) { BIO *ret; BUF_MEM *b; const size_t size = len < 0 ? strlen((char *)buf) : (size_t)len; if (!buf && len != 0) { OPENSSL_PUT_ERROR(BIO, BIO_R_NULL_PARAMETER); return NULL; } ret = BIO_new(BIO_s_mem()); if (ret == NULL) { return NULL; } b = (BUF_MEM *)ret->ptr; // BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. b->data = reinterpret_cast(const_cast(buf)); b->length = size; b->max = size; ret->flags |= BIO_FLAGS_MEM_RDONLY; // |num| is used to store the value that this BIO will return when it runs // out of data. If it's negative then the retry flags will also be set. Since // this is static data, retrying wont help ret->num = 0; return ret; } static int mem_new(BIO *bio) { BUF_MEM *b; b = BUF_MEM_new(); if (b == NULL) { return 0; } // |shutdown| is used to store the close flag: whether the BIO has ownership // of the BUF_MEM. bio->shutdown = 1; bio->init = 1; bio->num = -1; bio->ptr = (char *)b; return 1; } static int mem_free(BIO *bio) { if (!bio->shutdown || !bio->init || bio->ptr == NULL) { return 1; } BUF_MEM *b = (BUF_MEM *)bio->ptr; if (bio->flags & BIO_FLAGS_MEM_RDONLY) { b->data = NULL; } BUF_MEM_free(b); bio->ptr = NULL; return 1; } static int mem_read(BIO *bio, char *out, int outl) { BIO_clear_retry_flags(bio); if (outl <= 0) { return 0; } BUF_MEM *b = reinterpret_cast(bio->ptr); int ret = outl; if ((size_t)ret > b->length) { ret = (int)b->length; } if (ret > 0) { OPENSSL_memcpy(out, b->data, ret); b->length -= ret; if (bio->flags & BIO_FLAGS_MEM_RDONLY) { b->data += ret; } else { OPENSSL_memmove(b->data, &b->data[ret], b->length); } } else if (b->length == 0) { ret = bio->num; if (ret != 0) { BIO_set_retry_read(bio); } } return ret; } static int mem_write(BIO *bio, const char *in, int inl) { BIO_clear_retry_flags(bio); if (inl <= 0) { return 0; // Successfully write zero bytes. } if (bio->flags & BIO_FLAGS_MEM_RDONLY) { OPENSSL_PUT_ERROR(BIO, BIO_R_WRITE_TO_READ_ONLY_BIO); return -1; } BUF_MEM *b = reinterpret_cast(bio->ptr); if (!BUF_MEM_append(b, in, inl)) { return -1; } return inl; } static int mem_gets(BIO *bio, char *buf, int size) { BIO_clear_retry_flags(bio); if (size <= 0) { return 0; } // The buffer size includes space for the trailing NUL, so we can read at most // one fewer byte. BUF_MEM *b = reinterpret_cast(bio->ptr); int ret = size - 1; if ((size_t)ret > b->length) { ret = (int)b->length; } // Stop at the first newline. const char *newline = reinterpret_cast(OPENSSL_memchr(b->data, '\n', ret)); if (newline != NULL) { ret = (int)(newline - b->data + 1); } ret = mem_read(bio, buf, ret); if (ret >= 0) { buf[ret] = '\0'; } return ret; } static long mem_ctrl(BIO *bio, int cmd, long num, void *ptr) { long ret = 1; BUF_MEM *b = (BUF_MEM *)bio->ptr; switch (cmd) { case BIO_CTRL_RESET: if (b->data != NULL) { // For read only case reset to the start again if (bio->flags & BIO_FLAGS_MEM_RDONLY) { b->data -= b->max - b->length; b->length = b->max; } else { OPENSSL_memset(b->data, 0, b->max); b->length = 0; } } break; case BIO_CTRL_EOF: ret = (long)(b->length == 0); break; case BIO_C_SET_BUF_MEM_EOF_RETURN: bio->num = (int)num; break; case BIO_CTRL_INFO: ret = (long)b->length; if (ptr != NULL) { char **pptr = reinterpret_cast(ptr); *pptr = b->data; } break; case BIO_C_SET_BUF_MEM: mem_free(bio); bio->shutdown = (int)num; bio->ptr = ptr; break; case BIO_C_GET_BUF_MEM_PTR: if (ptr != NULL) { BUF_MEM **pptr = reinterpret_cast(ptr); *pptr = b; } break; case BIO_CTRL_GET_CLOSE: ret = (long)bio->shutdown; break; case BIO_CTRL_SET_CLOSE: bio->shutdown = (int)num; break; case BIO_CTRL_WPENDING: ret = 0L; break; case BIO_CTRL_PENDING: ret = (long)b->length; break; case BIO_CTRL_FLUSH: ret = 1; break; default: ret = 0; break; } return ret; } static const BIO_METHOD mem_method = { BIO_TYPE_MEM, "memory buffer", mem_write, mem_read, NULL /* puts */, mem_gets, mem_ctrl, mem_new, mem_free, NULL /* callback_ctrl */, }; const BIO_METHOD *BIO_s_mem(void) { return &mem_method; } int BIO_mem_contents(const BIO *bio, const uint8_t **out_contents, size_t *out_len) { const BUF_MEM *b; if (bio->method != &mem_method) { return 0; } b = (BUF_MEM *)bio->ptr; *out_contents = (uint8_t *)b->data; *out_len = b->length; return 1; } long BIO_get_mem_data(BIO *bio, char **contents) { return BIO_ctrl(bio, BIO_CTRL_INFO, 0, contents); } int BIO_get_mem_ptr(BIO *bio, BUF_MEM **out) { return (int)BIO_ctrl(bio, BIO_C_GET_BUF_MEM_PTR, 0, out); } int BIO_set_mem_buf(BIO *bio, BUF_MEM *b, int take_ownership) { return (int)BIO_ctrl(bio, BIO_C_SET_BUF_MEM, take_ownership, b); } int BIO_set_mem_eof_return(BIO *bio, int eof_value) { return (int)BIO_ctrl(bio, BIO_C_SET_BUF_MEM_EOF_RETURN, eof_value, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/connect.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #if !defined(OPENSSL_NO_SOCK) #include #include #include #if !defined(OPENSSL_WINDOWS) #include #include #include #include #else OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #include #include #include "../internal.h" #include "internal.h" enum { BIO_CONN_S_BEFORE, BIO_CONN_S_BLOCKED_CONNECT, BIO_CONN_S_OK, }; namespace { typedef struct bio_connect_st { int state; char *param_hostname; char *param_port; int nbio; unsigned short port; struct sockaddr_storage them; socklen_t them_length; // the file descriptor is kept in bio->num in order to match the socket // BIO. // info_callback is called when the connection is initially made // callback(BIO,state,ret); The callback should return 'ret', state is for // compatibility with the SSL info_callback. int (*info_callback)(const BIO *bio, int state, int ret); } BIO_CONNECT; } // namespace #if !defined(OPENSSL_WINDOWS) static int closesocket(int sock) { return close(sock); } #endif // split_host_and_port sets |*out_host| and |*out_port| to the host and port // parsed from |name|. It returns one on success or zero on error. Even when // successful, |*out_port| may be NULL on return if no port was specified. static int split_host_and_port(char **out_host, char **out_port, const char *name) { const char *host, *port = NULL; size_t host_len = 0; *out_host = NULL; *out_port = NULL; if (name[0] == '[') { // bracketed IPv6 address const char *close = strchr(name, ']'); if (close == NULL) { return 0; } host = name + 1; host_len = close - host; if (close[1] == ':') { // [IP]:port port = close + 2; } else if (close[1] != 0) { return 0; } } else { const char *colon = strchr(name, ':'); if (colon == NULL || strchr(colon + 1, ':') != NULL) { // IPv6 address host = name; host_len = strlen(name); } else { // host:port host = name; host_len = colon - name; port = colon + 1; } } *out_host = OPENSSL_strndup(host, host_len); if (*out_host == NULL) { return 0; } if (port == NULL) { *out_port = NULL; return 1; } *out_port = OPENSSL_strdup(port); if (*out_port == NULL) { OPENSSL_free(*out_host); *out_host = NULL; return 0; } return 1; } static int conn_state(BIO *bio, BIO_CONNECT *c) { int ret = -1, i; int (*cb)(const BIO *, int, int) = NULL; if (c->info_callback != NULL) { cb = c->info_callback; } for (;;) { switch (c->state) { case BIO_CONN_S_BEFORE: // If there's a hostname and a port, assume that both are // exactly what they say. If there is only a hostname, try // (just once) to split it into a hostname and port. if (c->param_hostname == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_HOSTNAME_SPECIFIED); goto exit_loop; } if (c->param_port == NULL) { char *host, *port; if (!split_host_and_port(&host, &port, c->param_hostname) || port == NULL) { OPENSSL_free(host); OPENSSL_free(port); OPENSSL_PUT_ERROR(BIO, BIO_R_NO_PORT_SPECIFIED); ERR_add_error_data(2, "host=", c->param_hostname); goto exit_loop; } OPENSSL_free(c->param_port); c->param_port = port; OPENSSL_free(c->param_hostname); c->param_hostname = host; } if (!bio_ip_and_port_to_socket_and_addr( &bio->num, &c->them, &c->them_length, c->param_hostname, c->param_port)) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNABLE_TO_CREATE_SOCKET); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } if (c->nbio) { if (!bio_socket_nbio(bio->num, 1)) { OPENSSL_PUT_ERROR(BIO, BIO_R_ERROR_SETTING_NBIO); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } } i = 1; ret = setsockopt(bio->num, SOL_SOCKET, SO_KEEPALIVE, (char *)&i, sizeof(i)); if (ret < 0) { OPENSSL_PUT_SYSTEM_ERROR(); OPENSSL_PUT_ERROR(BIO, BIO_R_KEEPALIVE); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); goto exit_loop; } BIO_clear_retry_flags(bio); ret = connect(bio->num, (struct sockaddr *)&c->them, c->them_length); if (ret < 0) { if (bio_socket_should_retry(ret)) { BIO_set_flags(bio, (BIO_FLAGS_IO_SPECIAL | BIO_FLAGS_SHOULD_RETRY)); c->state = BIO_CONN_S_BLOCKED_CONNECT; bio->retry_reason = BIO_RR_CONNECT; } else { OPENSSL_PUT_SYSTEM_ERROR(); OPENSSL_PUT_ERROR(BIO, BIO_R_CONNECT_ERROR); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); } goto exit_loop; } else { c->state = BIO_CONN_S_OK; } break; case BIO_CONN_S_BLOCKED_CONNECT: i = bio_sock_error(bio->num); if (i) { if (bio_socket_should_retry(ret)) { BIO_set_flags(bio, (BIO_FLAGS_IO_SPECIAL | BIO_FLAGS_SHOULD_RETRY)); c->state = BIO_CONN_S_BLOCKED_CONNECT; bio->retry_reason = BIO_RR_CONNECT; ret = -1; } else { BIO_clear_retry_flags(bio); OPENSSL_PUT_SYSTEM_ERROR(); OPENSSL_PUT_ERROR(BIO, BIO_R_NBIO_CONNECT_ERROR); ERR_add_error_data(4, "host=", c->param_hostname, ":", c->param_port); ret = 0; } goto exit_loop; } else { c->state = BIO_CONN_S_OK; } break; case BIO_CONN_S_OK: ret = 1; goto exit_loop; default: assert(0); goto exit_loop; } if (cb != NULL) { ret = cb((BIO *)bio, c->state, ret); if (ret == 0) { goto end; } } } exit_loop: if (cb != NULL) { ret = cb((BIO *)bio, c->state, ret); } end: return ret; } static BIO_CONNECT *BIO_CONNECT_new(void) { BIO_CONNECT *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(BIO_CONNECT))); if (ret == NULL) { return NULL; } ret->state = BIO_CONN_S_BEFORE; return ret; } static void BIO_CONNECT_free(BIO_CONNECT *c) { if (c == nullptr) { return; } OPENSSL_free(c->param_hostname); OPENSSL_free(c->param_port); OPENSSL_free(c); } static int conn_new(BIO *bio) { bio->init = 0; bio->num = -1; bio->flags = 0; bio->ptr = BIO_CONNECT_new(); return bio->ptr != NULL; } static void conn_close_socket(BIO *bio) { BIO_CONNECT *c = (BIO_CONNECT *)bio->ptr; if (bio->num == -1) { return; } // Only do a shutdown if things were established if (c->state == BIO_CONN_S_OK) { shutdown(bio->num, 2); } closesocket(bio->num); bio->num = -1; } static int conn_free(BIO *bio) { if (bio->shutdown) { conn_close_socket(bio); } BIO_CONNECT_free((BIO_CONNECT *)bio->ptr); return 1; } static int conn_read(BIO *bio, char *out, int out_len) { int ret = 0; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; if (data->state != BIO_CONN_S_OK) { ret = conn_state(bio, data); if (ret <= 0) { return ret; } } bio_clear_socket_error(); ret = (int)recv(bio->num, out, out_len, 0); BIO_clear_retry_flags(bio); if (ret <= 0) { if (bio_socket_should_retry(ret)) { BIO_set_retry_read(bio); } } return ret; } static int conn_write(BIO *bio, const char *in, int in_len) { int ret; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; if (data->state != BIO_CONN_S_OK) { ret = conn_state(bio, data); if (ret <= 0) { return ret; } } bio_clear_socket_error(); ret = (int)send(bio->num, in, in_len, 0); BIO_clear_retry_flags(bio); if (ret <= 0) { if (bio_socket_should_retry(ret)) { BIO_set_retry_write(bio); } } return ret; } static long conn_ctrl(BIO *bio, int cmd, long num, void *ptr) { int *ip; long ret = 1; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; switch (cmd) { case BIO_CTRL_RESET: ret = 0; data->state = BIO_CONN_S_BEFORE; conn_close_socket(bio); bio->flags = 0; break; case BIO_C_DO_STATE_MACHINE: // use this one to start the connection if (data->state != BIO_CONN_S_OK) { ret = (long)conn_state(bio, data); } else { ret = 1; } break; case BIO_C_SET_CONNECT: if (ptr != NULL) { bio->init = 1; if (num == 0) { OPENSSL_free(data->param_hostname); data->param_hostname = OPENSSL_strdup(reinterpret_cast(ptr)); if (data->param_hostname == NULL) { ret = 0; } } else if (num == 1) { OPENSSL_free(data->param_port); data->param_port = OPENSSL_strdup(reinterpret_cast(ptr)); if (data->param_port == NULL) { ret = 0; } } else { ret = 0; } } break; case BIO_C_SET_NBIO: data->nbio = (int)num; break; case BIO_C_GET_FD: if (bio->init) { ip = (int *)ptr; if (ip != NULL) { *ip = bio->num; } ret = bio->num; } else { ret = -1; } break; case BIO_CTRL_GET_CLOSE: ret = bio->shutdown; break; case BIO_CTRL_SET_CLOSE: bio->shutdown = (int)num; break; case BIO_CTRL_PENDING: case BIO_CTRL_WPENDING: ret = 0; break; case BIO_CTRL_FLUSH: break; case BIO_CTRL_GET_CALLBACK: { int (**fptr)(const BIO *bio, int state, int xret); fptr = reinterpret_cast(ptr); *fptr = data->info_callback; } break; default: ret = 0; break; } return ret; } static long conn_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { long ret = 1; BIO_CONNECT *data; data = (BIO_CONNECT *)bio->ptr; switch (cmd) { case BIO_CTRL_SET_CALLBACK: // This is the actual type signature of |fp|. The caller is expected to // cast it to |bio_info_cb| due to the |BIO_callback_ctrl| calling // convention. OPENSSL_MSVC_PRAGMA(warning(push)) OPENSSL_MSVC_PRAGMA(warning(disable : 4191)) OPENSSL_CLANG_PRAGMA("clang diagnostic push") OPENSSL_CLANG_PRAGMA( "clang diagnostic ignored \"-Wunknown-warning-option\"") OPENSSL_CLANG_PRAGMA("clang diagnostic ignored \"-Wcast-function-type\"") data->info_callback = (int (*)(const struct bio_st *, int, int))fp; OPENSSL_CLANG_PRAGMA("clang diagnostic pop") OPENSSL_MSVC_PRAGMA(warning(pop)) break; default: ret = 0; break; } return ret; } BIO *BIO_new_connect(const char *hostname) { BIO *ret; ret = BIO_new(BIO_s_connect()); if (ret == NULL) { return NULL; } if (!BIO_set_conn_hostname(ret, hostname)) { BIO_free(ret); return NULL; } return ret; } static const BIO_METHOD methods_connectp = { BIO_TYPE_CONNECT, "socket connect", conn_write, conn_read, NULL /* puts */, NULL /* gets */, conn_ctrl, conn_new, conn_free, conn_callback_ctrl, }; const BIO_METHOD *BIO_s_connect(void) { return &methods_connectp; } int BIO_set_conn_hostname(BIO *bio, const char *name) { return (int)BIO_ctrl(bio, BIO_C_SET_CONNECT, 0, (void *)name); } int BIO_set_conn_port(BIO *bio, const char *port_str) { return (int)BIO_ctrl(bio, BIO_C_SET_CONNECT, 1, (void *)port_str); } int BIO_set_conn_int_port(BIO *bio, const int *port) { char buf[DECIMAL_SIZE(int) + 1]; snprintf(buf, sizeof(buf), "%d", *port); return BIO_set_conn_port(bio, buf); } int BIO_set_nbio(BIO *bio, int on) { return (int)BIO_ctrl(bio, BIO_C_SET_NBIO, on, NULL); } int BIO_do_connect(BIO *bio) { return (int)BIO_ctrl(bio, BIO_C_DO_STATE_MACHINE, 0, NULL); } #endif // OPENSSL_NO_SOCK ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/errno.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" int bio_errno_should_retry(int return_value) { if (return_value != -1) { return 0; } return #ifdef EWOULDBLOCK errno == EWOULDBLOCK || #endif #ifdef ENOTCONN errno == ENOTCONN || #endif #ifdef EINTR errno == EINTR || #endif #ifdef EAGAIN errno == EAGAIN || #endif #ifdef EPROTO errno == EPROTO || #endif #ifdef EINPROGRESS errno == EINPROGRESS || #endif #ifdef EALREADY errno == EALREADY || #endif 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/fd.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #if !defined(OPENSSL_NO_POSIX_IO) #include #include #if !defined(OPENSSL_WINDOWS) #include #else #include #endif #include #include #include "internal.h" #include "../internal.h" #if defined(OPENSSL_WINDOWS) #define BORINGSSL_CLOSE _close #define BORINGSSL_LSEEK _lseek #define BORINGSSL_READ _read #define BORINGSSL_WRITE _write #else #define BORINGSSL_CLOSE close #define BORINGSSL_LSEEK lseek #define BORINGSSL_READ read #define BORINGSSL_WRITE write #endif BIO *BIO_new_fd(int fd, int close_flag) { BIO *ret = BIO_new(BIO_s_fd()); if (ret == NULL) { return NULL; } BIO_set_fd(ret, fd, close_flag); return ret; } static int fd_new(BIO *bio) { // num is used to store the file descriptor. bio->num = -1; return 1; } static int fd_free(BIO *bio) { if (bio->shutdown) { if (bio->init) { BORINGSSL_CLOSE(bio->num); } bio->init = 0; } return 1; } static int fd_read(BIO *b, char *out, int outl) { int ret = 0; ret = (int)BORINGSSL_READ(b->num, out, outl); BIO_clear_retry_flags(b); if (ret <= 0) { if (bio_errno_should_retry(ret)) { BIO_set_retry_read(b); } } return ret; } static int fd_write(BIO *b, const char *in, int inl) { int ret = (int)BORINGSSL_WRITE(b->num, in, inl); BIO_clear_retry_flags(b); if (ret <= 0) { if (bio_errno_should_retry(ret)) { BIO_set_retry_write(b); } } return ret; } static long fd_ctrl(BIO *b, int cmd, long num, void *ptr) { long ret = 1; int *ip; switch (cmd) { case BIO_CTRL_RESET: num = 0; [[fallthrough]]; case BIO_C_FILE_SEEK: ret = 0; if (b->init) { ret = (long)BORINGSSL_LSEEK(b->num, num, SEEK_SET); } break; case BIO_C_FILE_TELL: case BIO_CTRL_INFO: ret = 0; if (b->init) { ret = (long)BORINGSSL_LSEEK(b->num, 0, SEEK_CUR); } break; case BIO_C_SET_FD: fd_free(b); b->num = *((int *)ptr); b->shutdown = (int)num; b->init = 1; break; case BIO_C_GET_FD: if (b->init) { ip = (int *)ptr; if (ip != NULL) { *ip = b->num; } return b->num; } else { ret = -1; } break; case BIO_CTRL_GET_CLOSE: ret = b->shutdown; break; case BIO_CTRL_SET_CLOSE: b->shutdown = (int)num; break; case BIO_CTRL_PENDING: case BIO_CTRL_WPENDING: ret = 0; break; case BIO_CTRL_FLUSH: ret = 1; break; default: ret = 0; break; } return ret; } static int fd_gets(BIO *bp, char *buf, int size) { if (size <= 0) { return 0; } char *ptr = buf; char *end = buf + size - 1; while (ptr < end && fd_read(bp, ptr, 1) > 0) { char c = ptr[0]; ptr++; if (c == '\n') { break; } } ptr[0] = '\0'; // The output length is bounded by |size|. return (int)(ptr - buf); } static const BIO_METHOD methods_fdp = { BIO_TYPE_FD, "file descriptor", fd_write, fd_read, NULL /* puts */, fd_gets, fd_ctrl, fd_new, fd_free, NULL /* callback_ctrl */, }; const BIO_METHOD *BIO_s_fd(void) { return &methods_fdp; } #endif // OPENSSL_NO_POSIX_IO int BIO_set_fd(BIO *bio, int fd, int close_flag) { return (int)BIO_int_ctrl(bio, BIO_C_SET_FD, close_flag, fd); } int BIO_get_fd(BIO *bio, int *out_fd) { return (int)BIO_ctrl(bio, BIO_C_GET_FD, 0, (char *) out_fd); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/file.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #if defined(__linux) || defined(__sun) || defined(__hpux) // Following definition aliases fopen to fopen64 on above mentioned // platforms. This makes it possible to open and sequentially access // files larger than 2GB from 32-bit application. It does not allow to // traverse them beyond 2GB with fseek/ftell, but on the other hand *no* // 32-bit platform permits that, not with fseek/ftell. Not to mention // that breaking 2GB limit for seeking would require surgery to *our* // API. But sequential access suffices for practical cases when you // can run into large files, such as fingerprinting, so we can let API // alone. For reference, the list of 32-bit platforms which allow for // sequential access of large files without extra "magic" comprise *BSD, // Darwin, IRIX... #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif #endif #include #include #include #include #include #include #include #include "../internal.h" #if defined(OPENSSL_WINDOWS) #include #include #endif #define BIO_FP_READ 0x02 #define BIO_FP_WRITE 0x04 #define BIO_FP_APPEND 0x08 #if !defined(OPENSSL_NO_FILESYSTEM) #define fopen_if_available fopen #else static FILE *fopen_if_available(const char *path, const char *mode) { errno = ENOENT; return NULL; } #endif BIO *BIO_new_file(const char *filename, const char *mode) { BIO *ret; FILE *file; file = fopen_if_available(filename, mode); if (file == NULL) { OPENSSL_PUT_SYSTEM_ERROR(); ERR_add_error_data(5, "fopen('", filename, "','", mode, "')"); if (errno == ENOENT) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_SUCH_FILE); } else { OPENSSL_PUT_ERROR(BIO, BIO_R_SYS_LIB); } return NULL; } ret = BIO_new_fp(file, BIO_CLOSE); if (ret == NULL) { fclose(file); return NULL; } return ret; } BIO *BIO_new_fp(FILE *stream, int flags) { BIO *ret = BIO_new(BIO_s_file()); if (ret == NULL) { return NULL; } BIO_set_fp(ret, stream, flags); return ret; } static int file_free(BIO *bio) { if (!bio->shutdown) { return 1; } if (bio->init && bio->ptr != NULL) { fclose(reinterpret_cast(bio->ptr)); bio->ptr = NULL; } bio->init = 0; return 1; } static int file_read(BIO *b, char *out, int outl) { if (!b->init) { return 0; } size_t ret = fread(out, 1, outl, (FILE *)b->ptr); if (ret == 0 && ferror((FILE *)b->ptr)) { OPENSSL_PUT_SYSTEM_ERROR(); OPENSSL_PUT_ERROR(BIO, ERR_R_SYS_LIB); return -1; } // fread reads at most |outl| bytes, so |ret| fits in an int. return (int)ret; } static int file_write(BIO *b, const char *in, int inl) { if (!b->init) { return 0; } int ret = (int)fwrite(in, inl, 1, (FILE *)b->ptr); if (ret > 0) { ret = inl; } return ret; } static long file_ctrl(BIO *b, int cmd, long num, void *ptr) { long ret = 1; FILE *fp = (FILE *)b->ptr; FILE **fpp; switch (cmd) { case BIO_CTRL_RESET: num = 0; [[fallthrough]]; case BIO_C_FILE_SEEK: ret = (long)fseek(fp, num, 0); break; case BIO_CTRL_EOF: ret = (long)feof(fp); break; case BIO_C_FILE_TELL: case BIO_CTRL_INFO: ret = ftell(fp); break; case BIO_C_SET_FILE_PTR: file_free(b); static_assert((BIO_CLOSE & BIO_FP_TEXT) == 0, "BIO_CLOSE and BIO_FP_TEXT must not collide"); #if defined(OPENSSL_WINDOWS) // If |BIO_FP_TEXT| is not set, OpenSSL will switch the file to binary // mode. BoringSSL intentionally diverges here because it means code // tested under POSIX will inadvertently change the state of |FILE| // objects when wrapping them in a |BIO|. if (num & BIO_FP_TEXT) { _setmode(_fileno(reinterpret_cast(ptr)), _O_TEXT); } #endif b->shutdown = (int)num & BIO_CLOSE; b->ptr = ptr; b->init = 1; break; case BIO_C_SET_FILENAME: file_free(b); b->shutdown = (int)num & BIO_CLOSE; const char *mode; if (num & BIO_FP_APPEND) { if (num & BIO_FP_READ) { mode = "ab+"; } else { mode = "ab"; } } else if ((num & BIO_FP_READ) && (num & BIO_FP_WRITE)) { mode = "rb+"; } else if (num & BIO_FP_WRITE) { mode = "wb"; } else if (num & BIO_FP_READ) { mode = "rb"; } else { OPENSSL_PUT_ERROR(BIO, BIO_R_BAD_FOPEN_MODE); ret = 0; break; } fp = fopen_if_available(reinterpret_cast(ptr), mode); if (fp == NULL) { OPENSSL_PUT_SYSTEM_ERROR(); ERR_add_error_data(5, "fopen('", ptr, "','", mode, "')"); OPENSSL_PUT_ERROR(BIO, ERR_R_SYS_LIB); ret = 0; break; } b->ptr = fp; b->init = 1; break; case BIO_C_GET_FILE_PTR: // the ptr parameter is actually a FILE ** in this case. if (ptr != NULL) { fpp = (FILE **)ptr; *fpp = (FILE *)b->ptr; } break; case BIO_CTRL_GET_CLOSE: ret = (long)b->shutdown; break; case BIO_CTRL_SET_CLOSE: b->shutdown = (int)num; break; case BIO_CTRL_FLUSH: ret = 0 == fflush((FILE *)b->ptr); break; case BIO_CTRL_WPENDING: case BIO_CTRL_PENDING: default: ret = 0; break; } return ret; } static int file_gets(BIO *bp, char *buf, int size) { if (size == 0) { return 0; } if (!fgets(buf, size, (FILE *)bp->ptr)) { buf[0] = 0; // TODO(davidben): This doesn't distinguish error and EOF. This should check // |ferror| as in |file_read|. return 0; } return (int)strlen(buf); } static const BIO_METHOD methods_filep = { BIO_TYPE_FILE, "FILE pointer", file_write, file_read, NULL /* puts */, file_gets, file_ctrl, NULL /* create */, file_free, NULL /* callback_ctrl */, }; const BIO_METHOD *BIO_s_file(void) { return &methods_filep; } int BIO_get_fp(BIO *bio, FILE **out_file) { return (int)BIO_ctrl(bio, BIO_C_GET_FILE_PTR, 0, (char *)out_file); } int BIO_set_fp(BIO *bio, FILE *file, int flags) { return (int)BIO_ctrl(bio, BIO_C_SET_FILE_PTR, flags, (char *)file); } int BIO_read_filename(BIO *bio, const char *filename) { return (int)BIO_ctrl(bio, BIO_C_SET_FILENAME, BIO_CLOSE | BIO_FP_READ, (char *)filename); } int BIO_write_filename(BIO *bio, const char *filename) { return (int)BIO_ctrl(bio, BIO_C_SET_FILENAME, BIO_CLOSE | BIO_FP_WRITE, (char *)filename); } int BIO_append_filename(BIO *bio, const char *filename) { return (int)BIO_ctrl(bio, BIO_C_SET_FILENAME, BIO_CLOSE | BIO_FP_APPEND, (char *)filename); } int BIO_rw_filename(BIO *bio, const char *filename) { return (int)BIO_ctrl(bio, BIO_C_SET_FILENAME, BIO_CLOSE | BIO_FP_READ | BIO_FP_WRITE, (char *)filename); } long BIO_tell(BIO *bio) { return BIO_ctrl(bio, BIO_C_FILE_TELL, 0, NULL); } long BIO_seek(BIO *bio, long offset) { return BIO_ctrl(bio, BIO_C_FILE_SEEK, offset, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/hexdump.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../internal.h" namespace { // hexdump_ctx contains the state of a hexdump. struct hexdump_ctx { BIO *bio; char right_chars[18]; // the contents of the right-hand side, ASCII dump. unsigned used; // number of bytes in the current line. size_t n; // number of bytes total. unsigned indent; }; } // namespace static void hexbyte(char *out, uint8_t b) { static const char hextable[] = "0123456789abcdef"; out[0] = hextable[b >> 4]; out[1] = hextable[b & 0x0f]; } static char to_char(uint8_t b) { if (b < 32 || b > 126) { return '.'; } return b; } // hexdump_write adds |len| bytes of |data| to the current hex dump described by // |ctx|. static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, size_t len) { char buf[10]; unsigned l; // Output lines look like: // 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=| // ^ offset ^ extra space ^ ASCII of line for (size_t i = 0; i < len; i++) { if (ctx->used == 0) { // The beginning of a line. BIO_indent(ctx->bio, ctx->indent, UINT_MAX); hexbyte(&buf[0], ctx->n >> 24); hexbyte(&buf[2], ctx->n >> 16); hexbyte(&buf[4], ctx->n >> 8); hexbyte(&buf[6], ctx->n); buf[8] = buf[9] = ' '; if (BIO_write(ctx->bio, buf, 10) < 0) { return 0; } } hexbyte(buf, data[i]); buf[2] = ' '; l = 3; if (ctx->used == 7) { // There's an additional space after the 8th byte. buf[3] = ' '; l = 4; } else if (ctx->used == 15) { // At the end of the line there's an extra space and the bar for the // right column. buf[3] = ' '; buf[4] = '|'; l = 5; } if (BIO_write(ctx->bio, buf, l) < 0) { return 0; } ctx->right_chars[ctx->used] = to_char(data[i]); ctx->used++; ctx->n++; if (ctx->used == 16) { ctx->right_chars[16] = '|'; ctx->right_chars[17] = '\n'; if (BIO_write(ctx->bio, ctx->right_chars, sizeof(ctx->right_chars)) < 0) { return 0; } ctx->used = 0; } } return 1; } // finish flushes any buffered data in |ctx|. static int finish(struct hexdump_ctx *ctx) { // See the comments in |hexdump| for the details of this format. const unsigned n_bytes = ctx->used; unsigned l; char buf[5]; if (n_bytes == 0) { return 1; } OPENSSL_memset(buf, ' ', 4); buf[4] = '|'; for (; ctx->used < 16; ctx->used++) { l = 3; if (ctx->used == 7) { l = 4; } else if (ctx->used == 15) { l = 5; } if (BIO_write(ctx->bio, buf, l) < 0) { return 0; } } ctx->right_chars[n_bytes] = '|'; ctx->right_chars[n_bytes + 1] = '\n'; if (BIO_write(ctx->bio, ctx->right_chars, n_bytes + 2) < 0) { return 0; } return 1; } int BIO_hexdump(BIO *bio, const uint8_t *data, size_t len, unsigned indent) { struct hexdump_ctx ctx; OPENSSL_memset(&ctx, 0, sizeof(ctx)); ctx.bio = bio; ctx.indent = indent; if (!hexdump_write(&ctx, data, len) || !finish(&ctx)) { return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BIO_INTERNAL_H #define OPENSSL_HEADER_BIO_INTERNAL_H #include #if !defined(OPENSSL_NO_SOCK) #if !defined(OPENSSL_WINDOWS) #if defined(OPENSSL_PNACL) // newlib uses u_short in socket.h without defining it. typedef unsigned short u_short; #endif #include #include #else OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) typedef int socklen_t; #endif #endif // !OPENSSL_NO_SOCK #if defined(__cplusplus) extern "C" { #endif #if !defined(OPENSSL_NO_SOCK) // bio_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr| // and |*out_addr_length| with the correct values for connecting to |hostname| // on |port_str|. It returns one on success or zero on error. int bio_ip_and_port_to_socket_and_addr(int *out_sock, struct sockaddr_storage *out_addr, socklen_t *out_addr_length, const char *hostname, const char *port_str); // bio_socket_nbio sets whether |sock| is non-blocking. It returns one on // success and zero otherwise. int bio_socket_nbio(int sock, int on); // bio_clear_socket_error clears the last system socket error. // // TODO(fork): remove all callers of this. void bio_clear_socket_error(void); // bio_sock_error returns the last socket error on |sock|. int bio_sock_error(int sock); // bio_socket_should_retry returns non-zero if |return_value| indicates an error // and the last socket error indicates that it's non-fatal. int bio_socket_should_retry(int return_value); #endif // !OPENSSL_NO_SOCK // bio_errno_should_retry returns non-zero if |return_value| indicates an error // and |errno| indicates that it's non-fatal. int bio_errno_should_retry(int return_value); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BIO_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/pair.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" namespace { struct bio_bio_st { BIO *peer; // NULL if buf == NULL. // If peer != NULL, then peer->ptr is also a bio_bio_st, // and its "peer" member points back to us. // peer != NULL iff init != 0 in the BIO. // This is for what we write (i.e. reading uses peer's struct): int closed; // valid iff peer != NULL size_t len; // valid iff buf != NULL; 0 if peer == NULL size_t offset; // valid iff buf != NULL; 0 if len == 0 size_t size; uint8_t *buf; // "size" elements (if != NULL) size_t request; // valid iff peer != NULL; 0 if len != 0, // otherwise set by peer to number of bytes // it (unsuccessfully) tried to read, // never more than buffer space (size-len) warrants. }; } // namespace static int bio_new(BIO *bio) { struct bio_bio_st *b = reinterpret_cast(OPENSSL_zalloc(sizeof *b)); if (b == NULL) { return 0; } b->size = 17 * 1024; // enough for one TLS record (just a default) bio->ptr = b; return 1; } static void bio_destroy_pair(BIO *bio) { struct bio_bio_st *b = reinterpret_cast(bio->ptr); BIO *peer_bio; struct bio_bio_st *peer_b; if (b == NULL) { return; } peer_bio = b->peer; if (peer_bio == NULL) { return; } peer_b = reinterpret_cast(peer_bio->ptr); assert(peer_b != NULL); assert(peer_b->peer == bio); peer_b->peer = NULL; peer_bio->init = 0; assert(peer_b->buf != NULL); peer_b->len = 0; peer_b->offset = 0; b->peer = NULL; bio->init = 0; assert(b->buf != NULL); b->len = 0; b->offset = 0; } static int bio_free(BIO *bio) { struct bio_bio_st *b = reinterpret_cast(bio->ptr); assert(b != NULL); if (b->peer) { bio_destroy_pair(bio); } OPENSSL_free(b->buf); OPENSSL_free(b); return 1; } static int bio_read(BIO *bio, char *buf, int size_) { size_t size = size_; size_t rest; struct bio_bio_st *b, *peer_b; BIO_clear_retry_flags(bio); if (!bio->init) { return 0; } b = reinterpret_cast(bio->ptr); assert(b != NULL); assert(b->peer != NULL); peer_b = reinterpret_cast(b->peer->ptr); assert(peer_b != NULL); assert(peer_b->buf != NULL); peer_b->request = 0; // will be set in "retry_read" situation if (buf == NULL || size == 0) { return 0; } if (peer_b->len == 0) { if (peer_b->closed) { return 0; // writer has closed, and no data is left } else { BIO_set_retry_read(bio); // buffer is empty if (size <= peer_b->size) { peer_b->request = size; } else { // don't ask for more than the peer can // deliver in one write peer_b->request = peer_b->size; } return -1; } } // we can read if (peer_b->len < size) { size = peer_b->len; } // now read "size" bytes rest = size; assert(rest > 0); // one or two iterations do { size_t chunk; assert(rest <= peer_b->len); if (peer_b->offset + rest <= peer_b->size) { chunk = rest; } else { // wrap around ring buffer chunk = peer_b->size - peer_b->offset; } assert(peer_b->offset + chunk <= peer_b->size); OPENSSL_memcpy(buf, peer_b->buf + peer_b->offset, chunk); peer_b->len -= chunk; if (peer_b->len) { peer_b->offset += chunk; assert(peer_b->offset <= peer_b->size); if (peer_b->offset == peer_b->size) { peer_b->offset = 0; } buf += chunk; } else { // buffer now empty, no need to advance "buf" assert(chunk == rest); peer_b->offset = 0; } rest -= chunk; } while (rest); // |size| is bounded by the buffer size, which fits in |int|. return (int)size; } static int bio_write(BIO *bio, const char *buf, int num_) { size_t num = num_; size_t rest; struct bio_bio_st *b; BIO_clear_retry_flags(bio); if (!bio->init || buf == NULL || num == 0) { return 0; } b = reinterpret_cast(bio->ptr); assert(b != NULL); assert(b->peer != NULL); assert(b->buf != NULL); b->request = 0; if (b->closed) { // we already closed OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE); return -1; } assert(b->len <= b->size); if (b->len == b->size) { BIO_set_retry_write(bio); // buffer is full return -1; } // we can write if (num > b->size - b->len) { num = b->size - b->len; } // now write "num" bytes rest = num; assert(rest > 0); // one or two iterations do { size_t write_offset; size_t chunk; assert(b->len + rest <= b->size); write_offset = b->offset + b->len; if (write_offset >= b->size) { write_offset -= b->size; } // b->buf[write_offset] is the first byte we can write to. if (write_offset + rest <= b->size) { chunk = rest; } else { // wrap around ring buffer chunk = b->size - write_offset; } OPENSSL_memcpy(b->buf + write_offset, buf, chunk); b->len += chunk; assert(b->len <= b->size); rest -= chunk; buf += chunk; } while (rest); // |num| is bounded by the buffer size, which fits in |int|. return (int)num; } static int bio_make_pair(BIO *bio1, BIO *bio2, size_t writebuf1_len, size_t writebuf2_len) { struct bio_bio_st *b1, *b2; assert(bio1 != NULL); assert(bio2 != NULL); b1 = reinterpret_cast(bio1->ptr); b2 = reinterpret_cast(bio2->ptr); if (b1->peer != NULL || b2->peer != NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_IN_USE); return 0; } if (b1->buf == NULL) { if (writebuf1_len) { b1->size = writebuf1_len; } b1->buf = reinterpret_cast(OPENSSL_malloc(b1->size)); if (b1->buf == NULL) { return 0; } b1->len = 0; b1->offset = 0; } if (b2->buf == NULL) { if (writebuf2_len) { b2->size = writebuf2_len; } b2->buf = reinterpret_cast(OPENSSL_malloc(b2->size)); if (b2->buf == NULL) { return 0; } b2->len = 0; b2->offset = 0; } b1->peer = bio2; b1->closed = 0; b1->request = 0; b2->peer = bio1; b2->closed = 0; b2->request = 0; bio1->init = 1; bio2->init = 1; return 1; } static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { long ret; struct bio_bio_st *b = reinterpret_cast(bio->ptr); assert(b != NULL); switch (cmd) { // Specific control codes first: case BIO_C_GET_WRITE_BUF_SIZE: ret = (long)b->size; break; case BIO_C_GET_WRITE_GUARANTEE: // How many bytes can the caller feed to the next write // without having to keep any? if (b->peer == NULL || b->closed) { ret = 0; } else { ret = (long)b->size - b->len; } break; case BIO_C_GET_READ_REQUEST: // If the peer unsuccessfully tried to read, how many bytes // were requested? (As with BIO_CTRL_PENDING, that number // can usually be treated as boolean.) ret = (long)b->request; break; case BIO_C_RESET_READ_REQUEST: // Reset request. (Can be useful after read attempts // at the other side that are meant to be non-blocking, // e.g. when probing SSL_read to see if any data is // available.) b->request = 0; ret = 1; break; case BIO_C_SHUTDOWN_WR: // similar to shutdown(..., SHUT_WR) b->closed = 1; ret = 1; break; // Standard control codes: case BIO_CTRL_GET_CLOSE: ret = bio->shutdown; break; case BIO_CTRL_SET_CLOSE: bio->shutdown = (int)num; ret = 1; break; case BIO_CTRL_PENDING: if (b->peer != NULL) { struct bio_bio_st *peer_b = reinterpret_cast(b->peer->ptr); ret = (long)peer_b->len; } else { ret = 0; } break; case BIO_CTRL_WPENDING: ret = 0; if (b->buf != NULL) { ret = (long)b->len; } break; case BIO_CTRL_FLUSH: ret = 1; break; case BIO_CTRL_EOF: { BIO *other_bio = reinterpret_cast(ptr); if (other_bio) { struct bio_bio_st *other_b = reinterpret_cast(other_bio->ptr); assert(other_b != NULL); ret = other_b->len == 0 && other_b->closed; } else { ret = 1; } } break; default: ret = 0; } return ret; } static const BIO_METHOD methods_biop = { BIO_TYPE_BIO, "BIO pair", bio_write, bio_read, NULL /* puts */, NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */, }; static const BIO_METHOD *bio_s_bio(void) { return &methods_biop; } int BIO_new_bio_pair(BIO **bio1_p, size_t writebuf1_len, BIO **bio2_p, size_t writebuf2_len) { BIO *bio1 = BIO_new(bio_s_bio()); BIO *bio2 = BIO_new(bio_s_bio()); if (bio1 == NULL || bio2 == NULL || !bio_make_pair(bio1, bio2, writebuf1_len, writebuf2_len)) { BIO_free(bio1); BIO_free(bio2); *bio1_p = NULL; *bio2_p = NULL; return 0; } *bio1_p = bio1; *bio2_p = bio2; return 1; } size_t BIO_ctrl_get_read_request(BIO *bio) { return BIO_ctrl(bio, BIO_C_GET_READ_REQUEST, 0, NULL); } size_t BIO_ctrl_get_write_guarantee(BIO *bio) { return BIO_ctrl(bio, BIO_C_GET_WRITE_GUARANTEE, 0, NULL); } int BIO_shutdown_wr(BIO *bio) { return (int)BIO_ctrl(bio, BIO_C_SHUTDOWN_WR, 0, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/printf.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include int BIO_printf(BIO *bio, const char *format, ...) { va_list args; char buf[256], *out, out_malloced = 0; int out_len, ret; va_start(args, format); out_len = vsnprintf(buf, sizeof(buf), format, args); va_end(args); if (out_len < 0) { return -1; } if ((size_t)out_len >= sizeof(buf)) { const size_t requested_len = (size_t)out_len; // The output was truncated. Note that vsnprintf's return value does not // include a trailing NUL, but the buffer must be sized for it. out = reinterpret_cast(OPENSSL_malloc(requested_len + 1)); out_malloced = 1; if (out == NULL) { return -1; } va_start(args, format); out_len = vsnprintf(out, requested_len + 1, format, args); va_end(args); assert(out_len == (int)requested_len); } else { out = buf; } ret = BIO_write(bio, out, out_len); if (out_malloced) { OPENSSL_free(out); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/socket.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #if !defined(OPENSSL_NO_SOCK) #include #include #if !defined(OPENSSL_WINDOWS) #include #else OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) OPENSSL_MSVC_PRAGMA(comment(lib, "Ws2_32.lib")) #endif #include "internal.h" #if !defined(OPENSSL_WINDOWS) static int closesocket(int sock) { return close(sock); } #endif static int sock_free(BIO *bio) { if (bio->shutdown) { if (bio->init) { closesocket(bio->num); } bio->init = 0; bio->flags = 0; } return 1; } static int sock_read(BIO *b, char *out, int outl) { if (out == NULL) { return 0; } bio_clear_socket_error(); #if defined(OPENSSL_WINDOWS) int ret = recv(b->num, out, outl, 0); #else int ret = (int)read(b->num, out, outl); #endif BIO_clear_retry_flags(b); if (ret <= 0) { if (bio_socket_should_retry(ret)) { BIO_set_retry_read(b); } } return ret; } static int sock_write(BIO *b, const char *in, int inl) { bio_clear_socket_error(); #if defined(OPENSSL_WINDOWS) int ret = send(b->num, in, inl, 0); #else int ret = (int)write(b->num, in, inl); #endif BIO_clear_retry_flags(b); if (ret <= 0) { if (bio_socket_should_retry(ret)) { BIO_set_retry_write(b); } } return ret; } static long sock_ctrl(BIO *b, int cmd, long num, void *ptr) { long ret = 1; int *ip; switch (cmd) { case BIO_C_SET_FD: sock_free(b); b->num = *((int *)ptr); b->shutdown = (int)num; b->init = 1; break; case BIO_C_GET_FD: if (b->init) { ip = (int *)ptr; if (ip != NULL) { *ip = b->num; } ret = b->num; } else { ret = -1; } break; case BIO_CTRL_GET_CLOSE: ret = b->shutdown; break; case BIO_CTRL_SET_CLOSE: b->shutdown = (int)num; break; case BIO_CTRL_FLUSH: ret = 1; break; default: ret = 0; break; } return ret; } static const BIO_METHOD methods_sockp = { BIO_TYPE_SOCKET, "socket", sock_write, sock_read, NULL /* puts */, NULL /* gets, */, sock_ctrl, NULL /* create */, sock_free, NULL /* callback_ctrl */, }; const BIO_METHOD *BIO_s_socket(void) { return &methods_sockp; } BIO *BIO_new_socket(int fd, int close_flag) { BIO *ret; ret = BIO_new(BIO_s_socket()); if (ret == NULL) { return NULL; } BIO_set_fd(ret, fd, close_flag); return ret; } #endif // OPENSSL_NO_SOCK ================================================ FILE: Sources/CNIOBoringSSL/crypto/bio/socket_helper.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if defined(__linux__) #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 200112L #endif #include #include #if !defined(OPENSSL_NO_SOCK) #include #include #include #if !defined(OPENSSL_WINDOWS) #include #include #else OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #include "internal.h" #include "../internal.h" int bio_ip_and_port_to_socket_and_addr(int *out_sock, struct sockaddr_storage *out_addr, socklen_t *out_addr_length, const char *hostname, const char *port_str) { struct addrinfo hint, *result, *cur; int ret; *out_sock = -1; OPENSSL_memset(&hint, 0, sizeof(hint)); hint.ai_family = AF_UNSPEC; hint.ai_socktype = SOCK_STREAM; ret = getaddrinfo(hostname, port_str, &hint, &result); if (ret != 0) { OPENSSL_PUT_ERROR(SYS, 0); #if defined(OPENSSL_WINDOWS) ERR_add_error_data(1, gai_strerrorA(ret)); #else ERR_add_error_data(1, gai_strerror(ret)); #endif return 0; } ret = 0; for (cur = result; cur; cur = cur->ai_next) { if ((size_t) cur->ai_addrlen > sizeof(struct sockaddr_storage)) { continue; } OPENSSL_memset(out_addr, 0, sizeof(struct sockaddr_storage)); OPENSSL_memcpy(out_addr, cur->ai_addr, cur->ai_addrlen); *out_addr_length = cur->ai_addrlen; *out_sock = socket(cur->ai_family, cur->ai_socktype, cur->ai_protocol); if (*out_sock < 0) { OPENSSL_PUT_SYSTEM_ERROR(); goto out; } ret = 1; break; } out: freeaddrinfo(result); return ret; } int bio_socket_nbio(int sock, int on) { #if defined(OPENSSL_WINDOWS) u_long arg = on; return 0 == ioctlsocket(sock, FIONBIO, &arg); #else int flags = fcntl(sock, F_GETFL, 0); if (flags < 0) { return 0; } if (!on) { flags &= ~O_NONBLOCK; } else { flags |= O_NONBLOCK; } return fcntl(sock, F_SETFL, flags) == 0; #endif } void bio_clear_socket_error(void) {} int bio_sock_error(int sock) { int error; socklen_t error_size = sizeof(error); if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (char *)&error, &error_size) < 0) { return 1; } return error; } int bio_socket_should_retry(int return_value) { #if defined(OPENSSL_WINDOWS) return return_value == -1 && WSAGetLastError() == WSAEWOULDBLOCK; #else // On POSIX platforms, sockets and fds are the same. return bio_errno_should_retry(return_value); #endif } #endif // OPENSSL_NO_SOCK ================================================ FILE: Sources/CNIOBoringSSL/crypto/blake2/blake2.cc ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../internal.h" // https://tools.ietf.org/html/rfc7693#section-2.6 static const uint64_t kIV[8] = { UINT64_C(0x6a09e667f3bcc908), UINT64_C(0xbb67ae8584caa73b), UINT64_C(0x3c6ef372fe94f82b), UINT64_C(0xa54ff53a5f1d36f1), UINT64_C(0x510e527fade682d1), UINT64_C(0x9b05688c2b3e6c1f), UINT64_C(0x1f83d9abfb41bd6b), UINT64_C(0x5be0cd19137e2179), }; // https://tools.ietf.org/html/rfc7693#section-2.7 static const uint8_t kSigma[10 * 16] = { // clang-format off 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3, 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4, 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8, 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13, 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9, 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11, 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10, 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5, 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0, // clang-format on }; // https://tools.ietf.org/html/rfc7693#section-3.1 static void blake2b_mix(uint64_t v[16], int a, int b, int c, int d, uint64_t x, uint64_t y) { v[a] = v[a] + v[b] + x; v[d] = CRYPTO_rotr_u64(v[d] ^ v[a], 32); v[c] = v[c] + v[d]; v[b] = CRYPTO_rotr_u64(v[b] ^ v[c], 24); v[a] = v[a] + v[b] + y; v[d] = CRYPTO_rotr_u64(v[d] ^ v[a], 16); v[c] = v[c] + v[d]; v[b] = CRYPTO_rotr_u64(v[b] ^ v[c], 63); } static uint64_t blake2b_load(const uint8_t block[BLAKE2B_CBLOCK], size_t i) { return CRYPTO_load_u64_le(block + 8 * i); } static void blake2b_transform(BLAKE2B_CTX *b2b, const uint8_t block[BLAKE2B_CBLOCK], size_t num_bytes, int is_final_block) { // https://tools.ietf.org/html/rfc7693#section-3.2 uint64_t v[16]; static_assert(sizeof(v) == sizeof(b2b->h) + sizeof(kIV), ""); OPENSSL_memcpy(v, b2b->h, sizeof(b2b->h)); OPENSSL_memcpy(&v[8], kIV, sizeof(kIV)); b2b->t_low += num_bytes; if (b2b->t_low < num_bytes) { b2b->t_high++; } v[12] ^= b2b->t_low; v[13] ^= b2b->t_high; if (is_final_block) { v[14] = ~v[14]; } for (int round = 0; round < 12; round++) { const uint8_t *const s = &kSigma[16 * (round % 10)]; blake2b_mix(v, 0, 4, 8, 12, blake2b_load(block, s[0]), blake2b_load(block, s[1])); blake2b_mix(v, 1, 5, 9, 13, blake2b_load(block, s[2]), blake2b_load(block, s[3])); blake2b_mix(v, 2, 6, 10, 14, blake2b_load(block, s[4]), blake2b_load(block, s[5])); blake2b_mix(v, 3, 7, 11, 15, blake2b_load(block, s[6]), blake2b_load(block, s[7])); blake2b_mix(v, 0, 5, 10, 15, blake2b_load(block, s[8]), blake2b_load(block, s[9])); blake2b_mix(v, 1, 6, 11, 12, blake2b_load(block, s[10]), blake2b_load(block, s[11])); blake2b_mix(v, 2, 7, 8, 13, blake2b_load(block, s[12]), blake2b_load(block, s[13])); blake2b_mix(v, 3, 4, 9, 14, blake2b_load(block, s[14]), blake2b_load(block, s[15])); } for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(b2b->h); i++) { b2b->h[i] ^= v[i]; b2b->h[i] ^= v[i + 8]; } } void BLAKE2B256_Init(BLAKE2B_CTX *b2b) { OPENSSL_memset(b2b, 0, sizeof(BLAKE2B_CTX)); static_assert(sizeof(kIV) == sizeof(b2b->h), ""); OPENSSL_memcpy(&b2b->h, kIV, sizeof(kIV)); // https://tools.ietf.org/html/rfc7693#section-2.5 b2b->h[0] ^= 0x01010000 | BLAKE2B256_DIGEST_LENGTH; } void BLAKE2B256_Update(BLAKE2B_CTX *b2b, const void *in_data, size_t len) { if (len == 0) { // Work around a C language bug. See https://crbug.com/1019588. return; } const uint8_t *data = reinterpret_cast(in_data); size_t todo = sizeof(b2b->block) - b2b->block_used; if (todo > len) { todo = len; } OPENSSL_memcpy(&b2b->block[b2b->block_used], data, todo); b2b->block_used += todo; data += todo; len -= todo; if (!len) { return; } // More input remains therefore we must have filled |b2b->block|. assert(b2b->block_used == BLAKE2B_CBLOCK); blake2b_transform(b2b, b2b->block, BLAKE2B_CBLOCK, /*is_final_block=*/0); b2b->block_used = 0; while (len > BLAKE2B_CBLOCK) { blake2b_transform(b2b, data, BLAKE2B_CBLOCK, /*is_final_block=*/0); data += BLAKE2B_CBLOCK; len -= BLAKE2B_CBLOCK; } OPENSSL_memcpy(b2b->block, data, len); b2b->block_used = len; } void BLAKE2B256_Final(uint8_t out[BLAKE2B256_DIGEST_LENGTH], BLAKE2B_CTX *b2b) { OPENSSL_memset(&b2b->block[b2b->block_used], 0, sizeof(b2b->block) - b2b->block_used); blake2b_transform(b2b, b2b->block, b2b->block_used, /*is_final_block=*/1); static_assert(BLAKE2B256_DIGEST_LENGTH <= sizeof(b2b->h), ""); memcpy(out, b2b->h, BLAKE2B256_DIGEST_LENGTH); } void BLAKE2B256(const uint8_t *data, size_t len, uint8_t out[BLAKE2B256_DIGEST_LENGTH]) { BLAKE2B_CTX ctx; BLAKE2B256_Init(&ctx); BLAKE2B256_Update(&ctx, data, len); BLAKE2B256_Final(out, &ctx); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bn/bn_asn1.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret) { CBS child; int is_negative; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_INTEGER) || !CBS_is_valid_asn1_integer(&child, &is_negative)) { OPENSSL_PUT_ERROR(BN, BN_R_BAD_ENCODING); return 0; } if (is_negative) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } return BN_bin2bn(CBS_data(&child), CBS_len(&child), ret) != NULL; } int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) { // Negative numbers are unsupported. if (BN_is_negative(bn)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER) || // The number must be padded with a leading zero if the high bit would // otherwise be set or if |bn| is zero. (BN_num_bits(bn) % 8 == 0 && !CBB_add_u8(&child, 0x00)) || !BN_bn2cbb_padded(&child, BN_num_bytes(bn), bn) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(BN, BN_R_ENCODE_ERROR); return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bn/convert.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../fipsmodule/bn/internal.h" int BN_bn2cbb_padded(CBB *out, size_t len, const BIGNUM *in) { uint8_t *ptr; return CBB_add_space(out, &ptr, len) && BN_bn2bin_padded(ptr, len, in); } static const char hextable[] = "0123456789abcdef"; char *BN_bn2hex(const BIGNUM *bn) { int width = bn_minimal_width(bn); char *buf = reinterpret_cast( OPENSSL_malloc(1 /* leading '-' */ + 1 /* zero is non-empty */ + width * BN_BYTES * 2 + 1 /* trailing NUL */)); if (buf == NULL) { return NULL; } char *p = buf; if (bn->neg) { *(p++) = '-'; } if (BN_is_zero(bn)) { *(p++) = '0'; } int z = 0; for (int i = width - 1; i >= 0; i--) { for (int j = BN_BITS2 - 8; j >= 0; j -= 8) { // strip leading zeros int v = ((int)(bn->d[i] >> (long)j)) & 0xff; if (z || v != 0) { *(p++) = hextable[v >> 4]; *(p++) = hextable[v & 0x0f]; z = 1; } } } *p = '\0'; return buf; } // decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. static int decode_hex(BIGNUM *bn, const char *in, int in_len) { if (in_len > INT_MAX / 4) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } // |in_len| is the number of hex digits. if (!bn_expand(bn, in_len * 4)) { return 0; } int i = 0; while (in_len > 0) { // Decode one |BN_ULONG| at a time. int todo = BN_BYTES * 2; if (todo > in_len) { todo = in_len; } BN_ULONG word = 0; int j; for (j = todo; j > 0; j--) { uint8_t hex = 0; if (!OPENSSL_fromxdigit(&hex, in[in_len - j])) { // This shouldn't happen. The caller checks |OPENSSL_isxdigit|. assert(0); } word = (word << 4) | hex; } bn->d[i++] = word; in_len -= todo; } assert(i <= bn->dmax); bn->width = i; return 1; } // decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. static int decode_dec(BIGNUM *bn, const char *in, int in_len) { int i, j; BN_ULONG l = 0; // Decode |BN_DEC_NUM| digits at a time. j = BN_DEC_NUM - (in_len % BN_DEC_NUM); if (j == BN_DEC_NUM) { j = 0; } l = 0; for (i = 0; i < in_len; i++) { l *= 10; l += in[i] - '0'; if (++j == BN_DEC_NUM) { if (!BN_mul_word(bn, BN_DEC_CONV) || !BN_add_word(bn, l)) { return 0; } l = 0; j = 0; } } return 1; } typedef int (*decode_func)(BIGNUM *bn, const char *in, int in_len); typedef int (*char_test_func)(int c); static int bn_x2bn(BIGNUM **outp, const char *in, decode_func decode, char_test_func want_char) { BIGNUM *ret = NULL; int neg = 0, i; int num; if (in == NULL || *in == 0) { return 0; } if (*in == '-') { neg = 1; in++; } for (i = 0; want_char((unsigned char)in[i]) && i + neg < INT_MAX; i++) { } num = i + neg; if (outp == NULL) { return num; } // in is the start of the hex digits, and it is 'i' long if (*outp == NULL) { ret = BN_new(); if (ret == NULL) { return 0; } } else { ret = *outp; BN_zero(ret); } if (!decode(ret, in, i)) { goto err; } bn_set_minimal_width(ret); if (!BN_is_zero(ret)) { ret->neg = neg; } *outp = ret; return num; err: if (*outp == NULL) { BN_free(ret); } return 0; } int BN_hex2bn(BIGNUM **outp, const char *in) { return bn_x2bn(outp, in, decode_hex, OPENSSL_isxdigit); } char *BN_bn2dec(const BIGNUM *a) { // It is easier to print strings little-endian, so we assemble it in reverse // and fix at the end. BIGNUM *copy = NULL; CBB cbb; if (!CBB_init(&cbb, 16) || // !CBB_add_u8(&cbb, 0 /* trailing NUL */)) { goto err; } if (BN_is_zero(a)) { if (!CBB_add_u8(&cbb, '0')) { goto err; } } else { copy = BN_dup(a); if (copy == NULL) { goto err; } while (!BN_is_zero(copy)) { BN_ULONG word = BN_div_word(copy, BN_DEC_CONV); if (word == (BN_ULONG)-1) { goto err; } const int add_leading_zeros = !BN_is_zero(copy); for (int i = 0; i < BN_DEC_NUM && (add_leading_zeros || word != 0); i++) { if (!CBB_add_u8(&cbb, '0' + word % 10)) { goto err; } word /= 10; } assert(word == 0); } } if (BN_is_negative(a) && // !CBB_add_u8(&cbb, '-')) { goto err; } uint8_t *data; size_t len; if (!CBB_finish(&cbb, &data, &len)) { goto err; } // Reverse the buffer. for (size_t i = 0; i < len / 2; i++) { uint8_t tmp = data[i]; data[i] = data[len - 1 - i]; data[len - 1 - i] = tmp; } BN_free(copy); return (char *)data; err: BN_free(copy); CBB_cleanup(&cbb); return NULL; } int BN_dec2bn(BIGNUM **outp, const char *in) { return bn_x2bn(outp, in, decode_dec, OPENSSL_isdigit); } int BN_asc2bn(BIGNUM **outp, const char *in) { const char *const orig_in = in; if (*in == '-') { in++; } if (in[0] == '0' && (in[1] == 'X' || in[1] == 'x')) { if (!BN_hex2bn(outp, in + 2)) { return 0; } } else { if (!BN_dec2bn(outp, in)) { return 0; } } if (*orig_in == '-' && !BN_is_zero(*outp)) { (*outp)->neg = 1; } return 1; } int BN_print(BIO *bp, const BIGNUM *a) { int i, j, v, z = 0; int ret = 0; if (a->neg && BIO_write(bp, "-", 1) != 1) { goto end; } if (BN_is_zero(a) && BIO_write(bp, "0", 1) != 1) { goto end; } for (i = bn_minimal_width(a) - 1; i >= 0; i--) { for (j = BN_BITS2 - 4; j >= 0; j -= 4) { // strip leading zeros v = ((int)(a->d[i] >> (long)j)) & 0x0f; if (z || v != 0) { if (BIO_write(bp, &hextable[v], 1) != 1) { goto end; } z = 1; } } } ret = 1; end: return ret; } int BN_print_fp(FILE *fp, const BIGNUM *a) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { return 0; } int ret = BN_print(b, a); BIO_free(b); return ret; } size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) { const size_t bits = BN_num_bits(in); const size_t bytes = (bits + 7) / 8; // If the number of bits is a multiple of 8, i.e. if the MSB is set, // prefix with a zero byte. int extend = 0; if (bytes != 0 && (bits & 0x07) == 0) { extend = 1; } const size_t len = bytes + extend; if (len < bytes || 4 + len < len || (len & 0xffffffff) != len) { // If we cannot represent the number then we emit zero as the interface // doesn't allow an error to be signalled. if (out) { OPENSSL_memset(out, 0, 4); } return 4; } if (out == NULL) { return 4 + len; } out[0] = len >> 24; out[1] = len >> 16; out[2] = len >> 8; out[3] = len; if (extend) { out[4] = 0; } BN_bn2bin(in, out + 4 + extend); if (in->neg && len > 0) { out[4] |= 0x80; } return len + 4; } BIGNUM *BN_mpi2bn(const uint8_t *in, size_t len, BIGNUM *out) { if (len < 4) { OPENSSL_PUT_ERROR(BN, BN_R_BAD_ENCODING); return NULL; } const size_t in_len = ((size_t)in[0] << 24) | // ((size_t)in[1] << 16) | // ((size_t)in[2] << 8) | // ((size_t)in[3]); if (in_len != len - 4) { OPENSSL_PUT_ERROR(BN, BN_R_BAD_ENCODING); return NULL; } int out_is_alloced = 0; if (out == NULL) { out = BN_new(); if (out == NULL) { return NULL; } out_is_alloced = 1; } if (in_len == 0) { BN_zero(out); return out; } in += 4; if (BN_bin2bn(in, in_len, out) == NULL) { if (out_is_alloced) { BN_free(out); } return NULL; } out->neg = ((*in) & 0x80) != 0; if (out->neg) { BN_clear_bit(out, BN_num_bits(out) - 1); } return out; } int BN_bn2binpad(const BIGNUM *in, uint8_t *out, int len) { if (len < 0 || // !BN_bn2bin_padded(out, (size_t)len, in)) { return -1; } return len; } int BN_bn2lebinpad(const BIGNUM *in, uint8_t *out, int len) { if (len < 0 || // !BN_bn2le_padded(out, (size_t)len, in)) { return -1; } return len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/buf/buf.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../internal.h" BUF_MEM *BUF_MEM_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(BUF_MEM))); } void BUF_MEM_free(BUF_MEM *buf) { if (buf == nullptr) { return; } OPENSSL_free(buf->data); OPENSSL_free(buf); } int BUF_MEM_reserve(BUF_MEM *buf, size_t cap) { if (buf->max >= cap) { return 1; } size_t n = cap + 3; if (n < cap) { OPENSSL_PUT_ERROR(BUF, ERR_R_OVERFLOW); return 0; } n = n / 3; size_t alloc_size = n * 4; if (alloc_size / 4 != n) { OPENSSL_PUT_ERROR(BUF, ERR_R_OVERFLOW); return 0; } char *new_buf = reinterpret_cast(OPENSSL_realloc(buf->data, alloc_size)); if (new_buf == NULL) { return 0; } buf->data = new_buf; buf->max = alloc_size; return 1; } size_t BUF_MEM_grow(BUF_MEM *buf, size_t len) { if (!BUF_MEM_reserve(buf, len)) { return 0; } if (buf->length < len) { OPENSSL_memset(&buf->data[buf->length], 0, len - buf->length); } buf->length = len; return len; } size_t BUF_MEM_grow_clean(BUF_MEM *buf, size_t len) { return BUF_MEM_grow(buf, len); } int BUF_MEM_append(BUF_MEM *buf, const void *in, size_t len) { // Work around a C language bug. See https://crbug.com/1019588. if (len == 0) { return 1; } size_t new_len = buf->length + len; if (new_len < len) { OPENSSL_PUT_ERROR(BUF, ERR_R_OVERFLOW); return 0; } if (!BUF_MEM_reserve(buf, new_len)) { return 0; } OPENSSL_memcpy(buf->data + buf->length, in, len); buf->length = new_len; return 1; } char *BUF_strdup(const char *str) { return OPENSSL_strdup(str); } size_t BUF_strnlen(const char *str, size_t max_len) { return OPENSSL_strnlen(str, max_len); } char *BUF_strndup(const char *str, size_t size) { return OPENSSL_strndup(str, size); } size_t BUF_strlcpy(char *dst, const char *src, size_t dst_size) { return OPENSSL_strlcpy(dst, src, dst_size); } size_t BUF_strlcat(char *dst, const char *src, size_t dst_size) { return OPENSSL_strlcat(dst, src, dst_size); } void *BUF_memdup(const void *data, size_t size) { return OPENSSL_memdup(data, size); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/asn1_compat.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "internal.h" #include "../internal.h" int CBB_finish_i2d(CBB *cbb, uint8_t **outp) { assert(!cbb->is_child); assert(cbb->u.base.can_resize); uint8_t *der; size_t der_len; if (!CBB_finish(cbb, &der, &der_len)) { CBB_cleanup(cbb); return -1; } if (der_len > INT_MAX) { OPENSSL_free(der); return -1; } if (outp != NULL) { if (*outp == NULL) { *outp = der; der = NULL; } else { OPENSSL_memcpy(*outp, der, der_len); *outp += der_len; } } OPENSSL_free(der); return (int)der_len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/ber.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "internal.h" // kMaxDepth limits the recursion depth to avoid overflowing the stack. static const uint32_t kMaxDepth = 128; // is_string_type returns one if |tag| is a string type and zero otherwise. It // ignores the constructed bit. static int is_string_type(CBS_ASN1_TAG tag) { // While BER supports constructed BIT STRINGS, OpenSSL misparses them. To // avoid acting on an ambiguous input, we do not support constructed BIT // STRINGS. See https://github.com/openssl/openssl/issues/12810. switch (tag & ~CBS_ASN1_CONSTRUCTED) { case CBS_ASN1_OCTETSTRING: case CBS_ASN1_UTF8STRING: case CBS_ASN1_NUMERICSTRING: case CBS_ASN1_PRINTABLESTRING: case CBS_ASN1_T61STRING: case CBS_ASN1_VIDEOTEXSTRING: case CBS_ASN1_IA5STRING: case CBS_ASN1_GRAPHICSTRING: case CBS_ASN1_VISIBLESTRING: case CBS_ASN1_GENERALSTRING: case CBS_ASN1_UNIVERSALSTRING: case CBS_ASN1_BMPSTRING: return 1; default: return 0; } } // cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found| // depending on whether an indefinite length element or constructed string was // found. The value of |orig_in| is not changed. It returns one on success (i.e. // |*ber_found| was set) and zero on error. static int cbs_find_ber(const CBS *orig_in, int *ber_found, uint32_t depth) { if (depth > kMaxDepth) { return 0; } CBS in = *orig_in; *ber_found = 0; while (CBS_len(&in) > 0) { CBS contents; CBS_ASN1_TAG tag; size_t header_len; int indefinite; if (!CBS_get_any_ber_asn1_element(&in, &contents, &tag, &header_len, ber_found, &indefinite)) { return 0; } if (*ber_found) { return 1; } if (tag & CBS_ASN1_CONSTRUCTED) { if (is_string_type(tag)) { // Constructed strings are only legal in BER and require conversion. *ber_found = 1; return 1; } if (!CBS_skip(&contents, header_len) || !cbs_find_ber(&contents, ber_found, depth + 1)) { return 0; } if (*ber_found) { // We already found BER. No need to continue parsing. return 1; } } } return 1; } // cbs_get_eoc returns one if |cbs| begins with an "end of contents" (EOC) value // and zero otherwise. If an EOC was found, it advances |cbs| past it. static int cbs_get_eoc(CBS *cbs) { if (CBS_len(cbs) >= 2 && CBS_data(cbs)[0] == 0 && CBS_data(cbs)[1] == 0) { return CBS_skip(cbs, 2); } return 0; } // cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If // |string_tag| is non-zero, then all elements must match |string_tag| up to the // constructed bit and primitive element bodies are written to |out| without // element headers. This is used when concatenating the fragments of a // constructed string. If |looking_for_eoc| is set then any EOC elements found // will cause the function to return after consuming it. It returns one on // success and zero on error. static int cbs_convert_ber(CBS *in, CBB *out, CBS_ASN1_TAG string_tag, int looking_for_eoc, uint32_t depth) { assert(!(string_tag & CBS_ASN1_CONSTRUCTED)); if (depth > kMaxDepth) { return 0; } while (CBS_len(in) > 0) { if (looking_for_eoc && cbs_get_eoc(in)) { return 1; } CBS contents; CBS_ASN1_TAG tag, child_string_tag = string_tag; size_t header_len; int indefinite; CBB *out_contents, out_contents_storage; if (!CBS_get_any_ber_asn1_element(in, &contents, &tag, &header_len, /*out_ber_found=*/NULL, &indefinite)) { return 0; } if (string_tag != 0) { // This is part of a constructed string. All elements must match // |string_tag| up to the constructed bit and get appended to |out| // without a child element. if ((tag & ~CBS_ASN1_CONSTRUCTED) != string_tag) { return 0; } out_contents = out; } else { CBS_ASN1_TAG out_tag = tag; if ((tag & CBS_ASN1_CONSTRUCTED) && is_string_type(tag)) { // If a constructed string, clear the constructed bit and inform // children to concatenate bodies. out_tag &= ~CBS_ASN1_CONSTRUCTED; child_string_tag = out_tag; } if (!CBB_add_asn1(out, &out_contents_storage, out_tag)) { return 0; } out_contents = &out_contents_storage; } if (indefinite) { if (!cbs_convert_ber(in, out_contents, child_string_tag, /*looking_for_eoc=*/1, depth + 1) || !CBB_flush(out)) { return 0; } continue; } if (!CBS_skip(&contents, header_len)) { return 0; } if (tag & CBS_ASN1_CONSTRUCTED) { // Recurse into children. if (!cbs_convert_ber(&contents, out_contents, child_string_tag, /*looking_for_eoc=*/0, depth + 1)) { return 0; } } else { // Copy primitive contents as-is. if (!CBB_add_bytes(out_contents, CBS_data(&contents), CBS_len(&contents))) { return 0; } } if (!CBB_flush(out)) { return 0; } } return looking_for_eoc == 0; } int CBS_asn1_ber_to_der(CBS *in, CBS *out, uint8_t **out_storage) { CBB cbb; // First, do a quick walk to find any indefinite-length elements. Most of the // time we hope that there aren't any and thus we can quickly return. int conversion_needed; if (!cbs_find_ber(in, &conversion_needed, 0)) { return 0; } if (!conversion_needed) { if (!CBS_get_any_asn1_element(in, out, NULL, NULL)) { return 0; } *out_storage = NULL; return 1; } size_t len; if (!CBB_init(&cbb, CBS_len(in)) || !cbs_convert_ber(in, &cbb, 0, 0, 0) || !CBB_finish(&cbb, out_storage, &len)) { CBB_cleanup(&cbb); return 0; } CBS_init(out, *out_storage, len); return 1; } int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, CBS_ASN1_TAG outer_tag, CBS_ASN1_TAG inner_tag) { assert(!(outer_tag & CBS_ASN1_CONSTRUCTED)); assert(!(inner_tag & CBS_ASN1_CONSTRUCTED)); assert(is_string_type(inner_tag)); if (CBS_peek_asn1_tag(in, outer_tag)) { // Normal implicitly-tagged string. *out_storage = NULL; return CBS_get_asn1(in, out, outer_tag); } // Otherwise, try to parse an implicitly-tagged constructed string. // |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep // of nesting. CBB result; CBS child; if (!CBB_init(&result, CBS_len(in)) || !CBS_get_asn1(in, &child, outer_tag | CBS_ASN1_CONSTRUCTED)) { goto err; } while (CBS_len(&child) > 0) { CBS chunk; if (!CBS_get_asn1(&child, &chunk, inner_tag) || !CBB_add_bytes(&result, CBS_data(&chunk), CBS_len(&chunk))) { goto err; } } uint8_t *data; size_t len; if (!CBB_finish(&result, &data, &len)) { goto err; } CBS_init(out, data, len); *out_storage = data; return 1; err: CBB_cleanup(&result); return 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/cbb.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../internal.h" void CBB_zero(CBB *cbb) { OPENSSL_memset(cbb, 0, sizeof(CBB)); } static void cbb_init(CBB *cbb, uint8_t *buf, size_t cap, int can_resize) { cbb->is_child = 0; cbb->child = NULL; cbb->u.base.buf = buf; cbb->u.base.len = 0; cbb->u.base.cap = cap; cbb->u.base.can_resize = can_resize; cbb->u.base.error = 0; } int CBB_init(CBB *cbb, size_t initial_capacity) { CBB_zero(cbb); uint8_t *buf = reinterpret_cast(OPENSSL_malloc(initial_capacity)); if (initial_capacity > 0 && buf == NULL) { return 0; } cbb_init(cbb, buf, initial_capacity, /*can_resize=*/1); return 1; } int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len) { CBB_zero(cbb); cbb_init(cbb, buf, len, /*can_resize=*/0); return 1; } void CBB_cleanup(CBB *cbb) { // Child |CBB|s are non-owning. They are implicitly discarded and should not // be used with |CBB_cleanup| or |ScopedCBB|. assert(!cbb->is_child); if (cbb->is_child) { return; } if (cbb->u.base.can_resize) { OPENSSL_free(cbb->u.base.buf); } } static int cbb_buffer_reserve(struct cbb_buffer_st *base, uint8_t **out, size_t len) { if (base == NULL) { return 0; } size_t newlen = base->len + len; if (newlen < base->len) { // Overflow OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); goto err; } if (newlen > base->cap) { if (!base->can_resize) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); goto err; } size_t newcap = base->cap * 2; if (newcap < base->cap || newcap < newlen) { newcap = newlen; } uint8_t *newbuf = reinterpret_cast(OPENSSL_realloc(base->buf, newcap)); if (newbuf == NULL) { goto err; } base->buf = newbuf; base->cap = newcap; } if (out) { *out = base->buf + base->len; } return 1; err: base->error = 1; return 0; } static int cbb_buffer_add(struct cbb_buffer_st *base, uint8_t **out, size_t len) { if (!cbb_buffer_reserve(base, out, len)) { return 0; } // This will not overflow or |cbb_buffer_reserve| would have failed. base->len += len; return 1; } int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) { if (cbb->is_child) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (!CBB_flush(cbb)) { return 0; } if (cbb->u.base.can_resize && (out_data == NULL || out_len == NULL)) { // |out_data| and |out_len| can only be NULL if the CBB is fixed. return 0; } if (out_data != NULL) { *out_data = cbb->u.base.buf; } if (out_len != NULL) { *out_len = cbb->u.base.len; } cbb->u.base.buf = NULL; CBB_cleanup(cbb); return 1; } static struct cbb_buffer_st *cbb_get_base(CBB *cbb) { if (cbb->is_child) { return cbb->u.child.base; } return &cbb->u.base; } static void cbb_on_error(CBB *cbb) { // Due to C's lack of destructors and |CBB|'s auto-flushing API, a failing // |CBB|-taking function may leave a dangling pointer to a child |CBB|. As a // result, the convention is callers may not write to |CBB|s that have failed. // But, as a safety measure, we lock the |CBB| into an error state. Once the // error bit is set, |cbb->child| will not be read. // // TODO(davidben): This still isn't quite ideal. A |CBB| function *outside* // this file may originate an error while the |CBB| points to a local child. // In that case we don't set the error bit and are reliant on the error // convention. Perhaps we allow |CBB_cleanup| on child |CBB|s and make every // child's |CBB_cleanup| set the error bit if unflushed. That will be // convenient for C++ callers, but very tedious for C callers. So C callers // perhaps should get a |CBB_on_error| function that can be, less tediously, // stuck in a |goto err| block. cbb_get_base(cbb)->error = 1; // Clearing the pointer is not strictly necessary, but GCC's dangling pointer // warning does not know |cbb->child| will not be read once |error| is set // above. cbb->child = NULL; } // CBB_flush recurses and then writes out any pending length prefix. The // current length of the underlying base is taken to be the length of the // length-prefixed data. int CBB_flush(CBB *cbb) { // If |base| has hit an error, the buffer is in an undefined state, so // fail all following calls. In particular, |cbb->child| may point to invalid // memory. struct cbb_buffer_st *base = cbb_get_base(cbb); if (base == NULL || base->error) { return 0; } if (cbb->child == NULL) { // Nothing to flush. return 1; } assert(cbb->child->is_child); struct cbb_child_st *child = &cbb->child->u.child; assert(child->base == base); size_t child_start = child->offset + child->pending_len_len; size_t len; if (!CBB_flush(cbb->child) || child_start < child->offset || base->len < child_start) { goto err; } len = base->len - child_start; if (child->pending_is_asn1) { // For ASN.1 we assume that we'll only need a single byte for the length. // If that turned out to be incorrect, we have to move the contents along // in order to make space. uint8_t len_len; uint8_t initial_length_byte; assert(child->pending_len_len == 1); if (len > 0xfffffffe) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); // Too large. goto err; } else if (len > 0xffffff) { len_len = 5; initial_length_byte = 0x80 | 4; } else if (len > 0xffff) { len_len = 4; initial_length_byte = 0x80 | 3; } else if (len > 0xff) { len_len = 3; initial_length_byte = 0x80 | 2; } else if (len > 0x7f) { len_len = 2; initial_length_byte = 0x80 | 1; } else { len_len = 1; initial_length_byte = (uint8_t)len; len = 0; } if (len_len != 1) { // We need to move the contents along in order to make space. size_t extra_bytes = len_len - 1; if (!cbb_buffer_add(base, NULL, extra_bytes)) { goto err; } OPENSSL_memmove(base->buf + child_start + extra_bytes, base->buf + child_start, len); } base->buf[child->offset++] = initial_length_byte; child->pending_len_len = len_len - 1; } for (size_t i = child->pending_len_len - 1; i < child->pending_len_len; i--) { base->buf[child->offset + i] = (uint8_t)len; len >>= 8; } if (len != 0) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); goto err; } child->base = NULL; cbb->child = NULL; return 1; err: cbb_on_error(cbb); return 0; } const uint8_t *CBB_data(const CBB *cbb) { assert(cbb->child == NULL); if (cbb->is_child) { return cbb->u.child.base->buf + cbb->u.child.offset + cbb->u.child.pending_len_len; } return cbb->u.base.buf; } size_t CBB_len(const CBB *cbb) { assert(cbb->child == NULL); if (cbb->is_child) { assert(cbb->u.child.offset + cbb->u.child.pending_len_len <= cbb->u.child.base->len); return cbb->u.child.base->len - cbb->u.child.offset - cbb->u.child.pending_len_len; } return cbb->u.base.len; } static int cbb_add_child(CBB *cbb, CBB *out_child, uint8_t len_len, int is_asn1) { assert(cbb->child == NULL); assert(!is_asn1 || len_len == 1); struct cbb_buffer_st *base = cbb_get_base(cbb); size_t offset = base->len; // Reserve space for the length prefix. uint8_t *prefix_bytes; if (!cbb_buffer_add(base, &prefix_bytes, len_len)) { return 0; } OPENSSL_memset(prefix_bytes, 0, len_len); CBB_zero(out_child); out_child->is_child = 1; out_child->u.child.base = base; out_child->u.child.offset = offset; out_child->u.child.pending_len_len = len_len; out_child->u.child.pending_is_asn1 = is_asn1; cbb->child = out_child; return 1; } static int cbb_add_length_prefixed(CBB *cbb, CBB *out_contents, uint8_t len_len) { if (!CBB_flush(cbb)) { return 0; } return cbb_add_child(cbb, out_contents, len_len, /*is_asn1=*/0); } int CBB_add_u8_length_prefixed(CBB *cbb, CBB *out_contents) { return cbb_add_length_prefixed(cbb, out_contents, 1); } int CBB_add_u16_length_prefixed(CBB *cbb, CBB *out_contents) { return cbb_add_length_prefixed(cbb, out_contents, 2); } int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents) { return cbb_add_length_prefixed(cbb, out_contents, 3); } // add_base128_integer encodes |v| as a big-endian base-128 integer where the // high bit of each byte indicates where there is more data. This is the // encoding used in DER for both high tag number form and OID components. static int add_base128_integer(CBB *cbb, uint64_t v) { unsigned len_len = 0; uint64_t copy = v; while (copy > 0) { len_len++; copy >>= 7; } if (len_len == 0) { len_len = 1; // Zero is encoded with one byte. } for (unsigned i = len_len - 1; i < len_len; i--) { uint8_t byte = (v >> (7 * i)) & 0x7f; if (i != 0) { // The high bit denotes whether there is more data. byte |= 0x80; } if (!CBB_add_u8(cbb, byte)) { return 0; } } return 1; } int CBB_add_asn1(CBB *cbb, CBB *out_contents, CBS_ASN1_TAG tag) { if (!CBB_flush(cbb)) { return 0; } // Split the tag into leading bits and tag number. uint8_t tag_bits = (tag >> CBS_ASN1_TAG_SHIFT) & 0xe0; CBS_ASN1_TAG tag_number = tag & CBS_ASN1_TAG_NUMBER_MASK; if (tag_number >= 0x1f) { // Set all the bits in the tag number to signal high tag number form. if (!CBB_add_u8(cbb, tag_bits | 0x1f) || !add_base128_integer(cbb, tag_number)) { return 0; } } else if (!CBB_add_u8(cbb, tag_bits | tag_number)) { return 0; } // Reserve one byte of length prefix. |CBB_flush| will finish it later. return cbb_add_child(cbb, out_contents, /*len_len=*/1, /*is_asn1=*/1); } int CBB_add_bytes(CBB *cbb, const uint8_t *data, size_t len) { uint8_t *out; if (!CBB_add_space(cbb, &out, len)) { return 0; } OPENSSL_memcpy(out, data, len); return 1; } int CBB_add_zeros(CBB *cbb, size_t len) { uint8_t *out; if (!CBB_add_space(cbb, &out, len)) { return 0; } OPENSSL_memset(out, 0, len); return 1; } int CBB_add_space(CBB *cbb, uint8_t **out_data, size_t len) { if (!CBB_flush(cbb) || !cbb_buffer_add(cbb_get_base(cbb), out_data, len)) { return 0; } return 1; } int CBB_reserve(CBB *cbb, uint8_t **out_data, size_t len) { if (!CBB_flush(cbb) || !cbb_buffer_reserve(cbb_get_base(cbb), out_data, len)) { return 0; } return 1; } int CBB_did_write(CBB *cbb, size_t len) { struct cbb_buffer_st *base = cbb_get_base(cbb); size_t newlen = base->len + len; if (cbb->child != NULL || newlen < base->len || newlen > base->cap) { return 0; } base->len = newlen; return 1; } static int cbb_add_u(CBB *cbb, uint64_t v, size_t len_len) { uint8_t *buf; if (!CBB_add_space(cbb, &buf, len_len)) { return 0; } for (size_t i = len_len - 1; i < len_len; i--) { buf[i] = v; v >>= 8; } // |v| must fit in |len_len| bytes. if (v != 0) { cbb_on_error(cbb); return 0; } return 1; } int CBB_add_u8(CBB *cbb, uint8_t value) { return cbb_add_u(cbb, value, 1); } int CBB_add_u16(CBB *cbb, uint16_t value) { return cbb_add_u(cbb, value, 2); } int CBB_add_u16le(CBB *cbb, uint16_t value) { return CBB_add_u16(cbb, CRYPTO_bswap2(value)); } int CBB_add_u24(CBB *cbb, uint32_t value) { return cbb_add_u(cbb, value, 3); } int CBB_add_u32(CBB *cbb, uint32_t value) { return cbb_add_u(cbb, value, 4); } int CBB_add_u32le(CBB *cbb, uint32_t value) { return CBB_add_u32(cbb, CRYPTO_bswap4(value)); } int CBB_add_u64(CBB *cbb, uint64_t value) { return cbb_add_u(cbb, value, 8); } int CBB_add_u64le(CBB *cbb, uint64_t value) { return CBB_add_u64(cbb, CRYPTO_bswap8(value)); } void CBB_discard_child(CBB *cbb) { if (cbb->child == NULL) { return; } struct cbb_buffer_st *base = cbb_get_base(cbb); assert(cbb->child->is_child); base->len = cbb->child->u.child.offset; cbb->child->u.child.base = NULL; cbb->child = NULL; } int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) { return CBB_add_asn1_uint64_with_tag(cbb, value, CBS_ASN1_INTEGER); } int CBB_add_asn1_uint64_with_tag(CBB *cbb, uint64_t value, CBS_ASN1_TAG tag) { CBB child; int started = 0; if (!CBB_add_asn1(cbb, &child, tag)) { goto err; } for (size_t i = 0; i < 8; i++) { uint8_t byte = (value >> 8 * (7 - i)) & 0xff; if (!started) { if (byte == 0) { // Don't encode leading zeros. continue; } // If the high bit is set, add a padding byte to make it // unsigned. if ((byte & 0x80) && !CBB_add_u8(&child, 0)) { goto err; } started = 1; } if (!CBB_add_u8(&child, byte)) { goto err; } } // 0 is encoded as a single 0, not the empty string. if (!started && !CBB_add_u8(&child, 0)) { goto err; } return CBB_flush(cbb); err: cbb_on_error(cbb); return 0; } int CBB_add_asn1_int64(CBB *cbb, int64_t value) { return CBB_add_asn1_int64_with_tag(cbb, value, CBS_ASN1_INTEGER); } int CBB_add_asn1_int64_with_tag(CBB *cbb, int64_t value, CBS_ASN1_TAG tag) { if (value >= 0) { return CBB_add_asn1_uint64_with_tag(cbb, (uint64_t)value, tag); } uint8_t bytes[sizeof(int64_t)]; memcpy(bytes, &value, sizeof(value)); int start = 7; // Skip leading sign-extension bytes unless they are necessary. while (start > 0 && (bytes[start] == 0xff && (bytes[start - 1] & 0x80))) { start--; } CBB child; if (!CBB_add_asn1(cbb, &child, tag)) { goto err; } for (int i = start; i >= 0; i--) { if (!CBB_add_u8(&child, bytes[i])) { goto err; } } return CBB_flush(cbb); err: cbb_on_error(cbb); return 0; } int CBB_add_asn1_octet_string(CBB *cbb, const uint8_t *data, size_t data_len) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&child, data, data_len) || !CBB_flush(cbb)) { cbb_on_error(cbb); return 0; } return 1; } int CBB_add_asn1_bool(CBB *cbb, int value) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_BOOLEAN) || !CBB_add_u8(&child, value != 0 ? 0xff : 0) || !CBB_flush(cbb)) { cbb_on_error(cbb); return 0; } return 1; } // parse_dotted_decimal parses one decimal component from |cbs|, where |cbs| is // an OID literal, e.g., "1.2.840.113554.4.1.72585". It consumes both the // component and the dot, so |cbs| may be passed into the function again for the // next value. static int parse_dotted_decimal(CBS *cbs, uint64_t *out) { if (!CBS_get_u64_decimal(cbs, out)) { return 0; } // The integer must have either ended at the end of the string, or a // non-terminal dot, which should be consumed. If the string ends with a dot, // this is not a valid OID string. uint8_t dot; return !CBS_get_u8(cbs, &dot) || (dot == '.' && CBS_len(cbs) > 0); } int CBB_add_asn1_oid_from_text(CBB *cbb, const char *text, size_t len) { if (!CBB_flush(cbb)) { return 0; } CBS cbs; CBS_init(&cbs, (const uint8_t *)text, len); // OIDs must have at least two components. uint64_t a, b; if (!parse_dotted_decimal(&cbs, &a) || !parse_dotted_decimal(&cbs, &b)) { return 0; } // The first component is encoded as 40 * |a| + |b|. This assumes that |a| is // 0, 1, or 2 and that, when it is 0 or 1, |b| is at most 39. if (a > 2 || (a < 2 && b > 39) || b > UINT64_MAX - 80 || !add_base128_integer(cbb, 40u * a + b)) { return 0; } // The remaining components are encoded unmodified. while (CBS_len(&cbs) > 0) { if (!parse_dotted_decimal(&cbs, &a) || !add_base128_integer(cbb, a)) { return 0; } } return 1; } static int compare_set_of_element(const void *a_ptr, const void *b_ptr) { // See X.690, section 11.6 for the ordering. They are sorted in ascending // order by their DER encoding. const CBS *a = reinterpret_cast(a_ptr), *b = reinterpret_cast(b_ptr); size_t a_len = CBS_len(a), b_len = CBS_len(b); size_t min_len = a_len < b_len ? a_len : b_len; int ret = OPENSSL_memcmp(CBS_data(a), CBS_data(b), min_len); if (ret != 0) { return ret; } if (a_len == b_len) { return 0; } // If one is a prefix of the other, the shorter one sorts first. (This is not // actually reachable. No DER encoding is a prefix of another DER encoding.) return a_len < b_len ? -1 : 1; } int CBB_flush_asn1_set_of(CBB *cbb) { if (!CBB_flush(cbb)) { return 0; } CBS cbs; size_t num_children = 0; CBS_init(&cbs, CBB_data(cbb), CBB_len(cbb)); while (CBS_len(&cbs) != 0) { if (!CBS_get_any_asn1_element(&cbs, NULL, NULL, NULL)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } num_children++; } if (num_children < 2) { return 1; // Nothing to do. This is the common case for X.509. } // Parse out the children and sort. We alias them into a copy of so they // remain valid as we rewrite |cbb|. int ret = 0; size_t buf_len = CBB_len(cbb); uint8_t *buf = reinterpret_cast(OPENSSL_memdup(CBB_data(cbb), buf_len)); CBS *children = reinterpret_cast(OPENSSL_calloc(num_children, sizeof(CBS))); uint8_t *out; size_t offset = 0; if (buf == NULL || children == NULL) { goto err; } CBS_init(&cbs, buf, buf_len); for (size_t i = 0; i < num_children; i++) { if (!CBS_get_any_asn1_element(&cbs, &children[i], NULL, NULL)) { goto err; } } qsort(children, num_children, sizeof(CBS), compare_set_of_element); // Write the contents back in the new order. out = (uint8_t *)CBB_data(cbb); for (size_t i = 0; i < num_children; i++) { OPENSSL_memcpy(out + offset, CBS_data(&children[i]), CBS_len(&children[i])); offset += CBS_len(&children[i]); } assert(offset == buf_len); ret = 1; err: OPENSSL_free(buf); OPENSSL_free(children); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/cbs.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../internal.h" #include "internal.h" static int cbs_get(CBS *cbs, const uint8_t **p, size_t n) { if (cbs->len < n) { return 0; } *p = cbs->data; cbs->data += n; cbs->len -= n; return 1; } int CBS_skip(CBS *cbs, size_t len) { const uint8_t *dummy; return cbs_get(cbs, &dummy, len); } int CBS_stow(const CBS *cbs, uint8_t **out_ptr, size_t *out_len) { OPENSSL_free(*out_ptr); *out_ptr = NULL; *out_len = 0; if (cbs->len == 0) { return 1; } *out_ptr = reinterpret_cast(OPENSSL_memdup(cbs->data, cbs->len)); if (*out_ptr == NULL) { return 0; } *out_len = cbs->len; return 1; } int CBS_strdup(const CBS *cbs, char **out_ptr) { if (*out_ptr != NULL) { OPENSSL_free(*out_ptr); } *out_ptr = OPENSSL_strndup((const char *)cbs->data, cbs->len); return (*out_ptr != NULL); } int CBS_contains_zero_byte(const CBS *cbs) { return OPENSSL_memchr(cbs->data, 0, cbs->len) != NULL; } int CBS_mem_equal(const CBS *cbs, const uint8_t *data, size_t len) { if (len != cbs->len) { return 0; } return CRYPTO_memcmp(cbs->data, data, len) == 0; } static int cbs_get_u(CBS *cbs, uint64_t *out, size_t len) { uint64_t result = 0; const uint8_t *data; if (!cbs_get(cbs, &data, len)) { return 0; } for (size_t i = 0; i < len; i++) { result <<= 8; result |= data[i]; } *out = result; return 1; } int CBS_get_u8(CBS *cbs, uint8_t *out) { const uint8_t *v; if (!cbs_get(cbs, &v, 1)) { return 0; } *out = *v; return 1; } int CBS_get_u16(CBS *cbs, uint16_t *out) { uint64_t v; if (!cbs_get_u(cbs, &v, 2)) { return 0; } *out = v; return 1; } int CBS_get_u16le(CBS *cbs, uint16_t *out) { if (!CBS_get_u16(cbs, out)) { return 0; } *out = CRYPTO_bswap2(*out); return 1; } int CBS_get_u24(CBS *cbs, uint32_t *out) { uint64_t v; if (!cbs_get_u(cbs, &v, 3)) { return 0; } *out = (uint32_t)v; return 1; } int CBS_get_u32(CBS *cbs, uint32_t *out) { uint64_t v; if (!cbs_get_u(cbs, &v, 4)) { return 0; } *out = (uint32_t)v; return 1; } int CBS_get_u32le(CBS *cbs, uint32_t *out) { if (!CBS_get_u32(cbs, out)) { return 0; } *out = CRYPTO_bswap4(*out); return 1; } int CBS_get_u64(CBS *cbs, uint64_t *out) { return cbs_get_u(cbs, out, 8); } int CBS_get_u64le(CBS *cbs, uint64_t *out) { if (!cbs_get_u(cbs, out, 8)) { return 0; } *out = CRYPTO_bswap8(*out); return 1; } int CBS_get_last_u8(CBS *cbs, uint8_t *out) { if (cbs->len == 0) { return 0; } *out = cbs->data[cbs->len - 1]; cbs->len--; return 1; } int CBS_get_bytes(CBS *cbs, CBS *out, size_t len) { const uint8_t *v; if (!cbs_get(cbs, &v, len)) { return 0; } CBS_init(out, v, len); return 1; } int CBS_copy_bytes(CBS *cbs, uint8_t *out, size_t len) { const uint8_t *v; if (!cbs_get(cbs, &v, len)) { return 0; } OPENSSL_memcpy(out, v, len); return 1; } static int cbs_get_length_prefixed(CBS *cbs, CBS *out, size_t len_len) { uint64_t len; if (!cbs_get_u(cbs, &len, len_len)) { return 0; } // If |len_len| <= 3 then we know that |len| will fit into a |size_t|, even on // 32-bit systems. assert(len_len <= 3); return CBS_get_bytes(cbs, out, len); } int CBS_get_u8_length_prefixed(CBS *cbs, CBS *out) { return cbs_get_length_prefixed(cbs, out, 1); } int CBS_get_u16_length_prefixed(CBS *cbs, CBS *out) { return cbs_get_length_prefixed(cbs, out, 2); } int CBS_get_u24_length_prefixed(CBS *cbs, CBS *out) { return cbs_get_length_prefixed(cbs, out, 3); } int CBS_get_until_first(CBS *cbs, CBS *out, uint8_t c) { const uint8_t *split = reinterpret_cast( OPENSSL_memchr(CBS_data(cbs), c, CBS_len(cbs))); if (split == NULL) { return 0; } return CBS_get_bytes(cbs, out, split - CBS_data(cbs)); } int CBS_get_u64_decimal(CBS *cbs, uint64_t *out) { uint64_t v = 0; int seen_digit = 0; while (CBS_len(cbs) != 0) { uint8_t c = CBS_data(cbs)[0]; if (!OPENSSL_isdigit(c)) { break; } CBS_skip(cbs, 1); if (/* Forbid stray leading zeros */ (v == 0 && seen_digit) || // Check for overflow. v > UINT64_MAX / 10 || // v * 10 > UINT64_MAX - (c - '0')) { return 0; } v = v * 10 + (c - '0'); seen_digit = 1; } *out = v; return seen_digit; } // parse_base128_integer reads a big-endian base-128 integer from |cbs| and sets // |*out| to the result. This is the encoding used in DER for both high tag // number form and OID components. static int parse_base128_integer(CBS *cbs, uint64_t *out) { uint64_t v = 0; uint8_t b; do { if (!CBS_get_u8(cbs, &b)) { return 0; } if ((v >> (64 - 7)) != 0) { // The value is too large. return 0; } if (v == 0 && b == 0x80) { // The value must be minimally encoded. return 0; } v = (v << 7) | (b & 0x7f); // Values end at an octet with the high bit cleared. } while (b & 0x80); *out = v; return 1; } static int parse_asn1_tag(CBS *cbs, CBS_ASN1_TAG *out) { uint8_t tag_byte; if (!CBS_get_u8(cbs, &tag_byte)) { return 0; } // ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag // number no greater than 30. // // If the number portion is 31 (0x1f, the largest value that fits in the // allotted bits), then the tag is more than one byte long and the // continuation bytes contain the tag number. CBS_ASN1_TAG tag = ((CBS_ASN1_TAG)tag_byte & 0xe0) << CBS_ASN1_TAG_SHIFT; CBS_ASN1_TAG tag_number = tag_byte & 0x1f; if (tag_number == 0x1f) { uint64_t v; if (!parse_base128_integer(cbs, &v) || // Check the tag number is within our supported bounds. v > CBS_ASN1_TAG_NUMBER_MASK || // Small tag numbers should have used low tag number form, even in BER. v < 0x1f) { return 0; } tag_number = (CBS_ASN1_TAG)v; } tag |= tag_number; // Tag [UNIVERSAL 0] is reserved for use by the encoding. Reject it here to // avoid some ambiguity around ANY values and BER indefinite-length EOCs. See // https://crbug.com/boringssl/455. if ((tag & ~CBS_ASN1_CONSTRUCTED) == 0) { return 0; } *out = tag; return 1; } static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag, size_t *out_header_len, int *out_ber_found, int *out_indefinite, int ber_ok) { CBS header = *cbs; CBS throwaway; if (out == NULL) { out = &throwaway; } if (ber_ok) { *out_ber_found = 0; *out_indefinite = 0; } else { assert(out_ber_found == NULL); assert(out_indefinite == NULL); } CBS_ASN1_TAG tag; if (!parse_asn1_tag(&header, &tag)) { return 0; } if (out_tag != NULL) { *out_tag = tag; } uint8_t length_byte; if (!CBS_get_u8(&header, &length_byte)) { return 0; } size_t header_len = CBS_len(cbs) - CBS_len(&header); size_t len; // The format for the length encoding is specified in ITU-T X.690 section // 8.1.3. if ((length_byte & 0x80) == 0) { // Short form length. len = ((size_t)length_byte) + header_len; if (out_header_len != NULL) { *out_header_len = header_len; } } else { // The high bit indicate that this is the long form, while the next 7 bits // encode the number of subsequent octets used to encode the length (ITU-T // X.690 clause 8.1.3.5.b). const size_t num_bytes = length_byte & 0x7f; uint64_t len64; if (ber_ok && (tag & CBS_ASN1_CONSTRUCTED) != 0 && num_bytes == 0) { // indefinite length if (out_header_len != NULL) { *out_header_len = header_len; } *out_ber_found = 1; *out_indefinite = 1; return CBS_get_bytes(cbs, out, header_len); } // ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be // used as the first byte of the length. If this parser encounters that // value, num_bytes will be parsed as 127, which will fail this check. if (num_bytes == 0 || num_bytes > 4) { return 0; } if (!cbs_get_u(&header, &len64, num_bytes)) { return 0; } // ITU-T X.690 section 10.1 (DER length forms) requires encoding the // length with the minimum number of octets. BER could, technically, have // 125 superfluous zero bytes. We do not attempt to handle that and still // require that the length fit in a |uint32_t| for BER. if (len64 < 128) { // Length should have used short-form encoding. if (ber_ok) { *out_ber_found = 1; } else { return 0; } } if ((len64 >> ((num_bytes - 1) * 8)) == 0) { // Length should have been at least one byte shorter. if (ber_ok) { *out_ber_found = 1; } else { return 0; } } len = len64; if (len + header_len + num_bytes < len) { // Overflow. return 0; } len += header_len + num_bytes; if (out_header_len != NULL) { *out_header_len = header_len + num_bytes; } } return CBS_get_bytes(cbs, out, len); } int CBS_get_any_asn1(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag) { size_t header_len; if (!CBS_get_any_asn1_element(cbs, out, out_tag, &header_len)) { return 0; } if (!CBS_skip(out, header_len)) { assert(0); return 0; } return 1; } int CBS_get_any_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag, size_t *out_header_len) { return cbs_get_any_asn1_element(cbs, out, out_tag, out_header_len, NULL, NULL, /*ber_ok=*/0); } int CBS_get_any_ber_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag, size_t *out_header_len, int *out_ber_found, int *out_indefinite) { int ber_found_temp; return cbs_get_any_asn1_element( cbs, out, out_tag, out_header_len, out_ber_found ? out_ber_found : &ber_found_temp, out_indefinite, /*ber_ok=*/1); } static int cbs_get_asn1(CBS *cbs, CBS *out, CBS_ASN1_TAG tag_value, int skip_header) { size_t header_len; CBS_ASN1_TAG tag; CBS throwaway; if (out == NULL) { out = &throwaway; } if (!CBS_get_any_asn1_element(cbs, out, &tag, &header_len) || tag != tag_value) { return 0; } if (skip_header && !CBS_skip(out, header_len)) { assert(0); return 0; } return 1; } int CBS_get_asn1(CBS *cbs, CBS *out, CBS_ASN1_TAG tag_value) { return cbs_get_asn1(cbs, out, tag_value, 1 /* skip header */); } int CBS_get_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG tag_value) { return cbs_get_asn1(cbs, out, tag_value, 0 /* include header */); } int CBS_peek_asn1_tag(const CBS *cbs, CBS_ASN1_TAG tag_value) { CBS copy = *cbs; CBS_ASN1_TAG actual_tag; return parse_asn1_tag(©, &actual_tag) && tag_value == actual_tag; } int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) { CBS bytes; if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER) || !CBS_is_unsigned_asn1_integer(&bytes)) { return 0; } *out = 0; const uint8_t *data = CBS_data(&bytes); size_t len = CBS_len(&bytes); for (size_t i = 0; i < len; i++) { if ((*out >> 56) != 0) { // Too large to represent as a uint64_t. return 0; } *out <<= 8; *out |= data[i]; } return 1; } int CBS_get_asn1_int64(CBS *cbs, int64_t *out) { int is_negative; CBS bytes; if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER) || !CBS_is_valid_asn1_integer(&bytes, &is_negative)) { return 0; } const uint8_t *data = CBS_data(&bytes); const size_t len = CBS_len(&bytes); if (len > sizeof(int64_t)) { return 0; } uint8_t sign_extend[sizeof(int64_t)]; OPENSSL_memset(sign_extend, is_negative ? 0xff : 0, sizeof(sign_extend)); OPENSSL_memcpy(sign_extend + sizeof(int64_t) - len, data, len); *out = CRYPTO_load_u64_be(sign_extend); return 1; } int CBS_get_asn1_bool(CBS *cbs, int *out) { CBS bytes; if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_BOOLEAN) || CBS_len(&bytes) != 1) { return 0; } const uint8_t value = *CBS_data(&bytes); if (value != 0 && value != 0xff) { return 0; } *out = !!value; return 1; } int CBS_get_optional_asn1(CBS *cbs, CBS *out, int *out_present, CBS_ASN1_TAG tag) { int present = 0; if (CBS_peek_asn1_tag(cbs, tag)) { if (!CBS_get_asn1(cbs, out, tag)) { return 0; } present = 1; } if (out_present != NULL) { *out_present = present; } return 1; } int CBS_get_optional_asn1_octet_string(CBS *cbs, CBS *out, int *out_present, CBS_ASN1_TAG tag) { CBS child; int present; if (!CBS_get_optional_asn1(cbs, &child, &present, tag)) { return 0; } if (present) { assert(out); if (!CBS_get_asn1(&child, out, CBS_ASN1_OCTETSTRING) || CBS_len(&child) != 0) { return 0; } } else { CBS_init(out, NULL, 0); } if (out_present) { *out_present = present; } return 1; } int CBS_get_optional_asn1_uint64(CBS *cbs, uint64_t *out, CBS_ASN1_TAG tag, uint64_t default_value) { CBS child; int present; if (!CBS_get_optional_asn1(cbs, &child, &present, tag)) { return 0; } if (present) { if (!CBS_get_asn1_uint64(&child, out) || CBS_len(&child) != 0) { return 0; } } else { *out = default_value; } return 1; } int CBS_get_optional_asn1_bool(CBS *cbs, int *out, CBS_ASN1_TAG tag, int default_value) { CBS child, child2; int present; if (!CBS_get_optional_asn1(cbs, &child, &present, tag)) { return 0; } if (present) { uint8_t boolean; if (!CBS_get_asn1(&child, &child2, CBS_ASN1_BOOLEAN) || CBS_len(&child2) != 1 || CBS_len(&child) != 0) { return 0; } boolean = CBS_data(&child2)[0]; if (boolean == 0) { *out = 0; } else if (boolean == 0xff) { *out = 1; } else { return 0; } } else { *out = default_value; } return 1; } int CBS_is_valid_asn1_bitstring(const CBS *cbs) { CBS in = *cbs; uint8_t num_unused_bits; if (!CBS_get_u8(&in, &num_unused_bits) || num_unused_bits > 7) { return 0; } if (num_unused_bits == 0) { return 1; } // All num_unused_bits bits must exist and be zeros. uint8_t last; if (!CBS_get_last_u8(&in, &last) || (last & ((1 << num_unused_bits) - 1)) != 0) { return 0; } return 1; } int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit) { if (!CBS_is_valid_asn1_bitstring(cbs)) { return 0; } const unsigned byte_num = (bit >> 3) + 1; const unsigned bit_num = 7 - (bit & 7); // Unused bits are zero, and this function does not distinguish between // missing and unset bits. Thus it is sufficient to do a byte-level length // check. return byte_num < CBS_len(cbs) && (CBS_data(cbs)[byte_num] & (1 << bit_num)) != 0; } int CBS_is_valid_asn1_integer(const CBS *cbs, int *out_is_negative) { CBS copy = *cbs; uint8_t first_byte, second_byte; if (!CBS_get_u8(©, &first_byte)) { return 0; // INTEGERs may not be empty. } if (out_is_negative != NULL) { *out_is_negative = (first_byte & 0x80) != 0; } if (!CBS_get_u8(©, &second_byte)) { return 1; // One byte INTEGERs are always minimal. } if ((first_byte == 0x00 && (second_byte & 0x80) == 0) || (first_byte == 0xff && (second_byte & 0x80) != 0)) { return 0; // The value is minimal iff the first 9 bits are not all equal. } return 1; } int CBS_is_unsigned_asn1_integer(const CBS *cbs) { int is_negative; return CBS_is_valid_asn1_integer(cbs, &is_negative) && !is_negative; } static int add_decimal(CBB *out, uint64_t v) { char buf[DECIMAL_SIZE(uint64_t) + 1]; snprintf(buf, sizeof(buf), "%" PRIu64, v); return CBB_add_bytes(out, (const uint8_t *)buf, strlen(buf)); } int CBS_is_valid_asn1_oid(const CBS *cbs) { if (CBS_len(cbs) == 0) { return 0; // OID encodings cannot be empty. } CBS copy = *cbs; uint8_t v, prev = 0; while (CBS_get_u8(©, &v)) { // OID encodings are a sequence of minimally-encoded base-128 integers (see // |parse_base128_integer|). If |prev|'s MSB was clear, it was the last byte // of an integer (or |v| is the first byte). |v| is then the first byte of // the next integer. If first byte of an integer is 0x80, it is not // minimally-encoded. if ((prev & 0x80) == 0 && v == 0x80) { return 0; } prev = v; } // The last byte should must end an integer encoding. return (prev & 0x80) == 0; } char *CBS_asn1_oid_to_text(const CBS *cbs) { CBS copy = *cbs; CBB cbb; if (!CBB_init(&cbb, 32)) { goto err; } // The first component is 40 * value1 + value2, where value1 is 0, 1, or 2. uint64_t v; if (!parse_base128_integer(©, &v)) { goto err; } if (v >= 80) { if (!CBB_add_bytes(&cbb, (const uint8_t *)"2.", 2) || !add_decimal(&cbb, v - 80)) { goto err; } } else if (!add_decimal(&cbb, v / 40) || !CBB_add_u8(&cbb, '.') || !add_decimal(&cbb, v % 40)) { goto err; } while (CBS_len(©) != 0) { if (!parse_base128_integer(©, &v) || !CBB_add_u8(&cbb, '.') || !add_decimal(&cbb, v)) { goto err; } } uint8_t *txt; size_t txt_len; if (!CBB_add_u8(&cbb, '\0') || !CBB_finish(&cbb, &txt, &txt_len)) { goto err; } return (char *)txt; err: CBB_cleanup(&cbb); return NULL; } static int cbs_get_two_digits(CBS *cbs, int *out) { uint8_t first_digit, second_digit; if (!CBS_get_u8(cbs, &first_digit)) { return 0; } if (!OPENSSL_isdigit(first_digit)) { return 0; } if (!CBS_get_u8(cbs, &second_digit)) { return 0; } if (!OPENSSL_isdigit(second_digit)) { return 0; } *out = (first_digit - '0') * 10 + (second_digit - '0'); return 1; } static int is_valid_day(int year, int month, int day) { if (day < 1) { return 0; } switch (month) { case 1: case 3: case 5: case 7: case 8: case 10: case 12: return day <= 31; case 4: case 6: case 9: case 11: return day <= 30; case 2: if ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0) { return day <= 29; } else { return day <= 28; } default: return 0; } } static int CBS_parse_rfc5280_time_internal(const CBS *cbs, int is_gentime, int allow_timezone_offset, struct tm *out_tm) { int year, month, day, hour, min, sec, tmp; CBS copy = *cbs; uint8_t tz; if (is_gentime) { if (!cbs_get_two_digits(©, &tmp)) { return 0; } year = tmp * 100; if (!cbs_get_two_digits(©, &tmp)) { return 0; } year += tmp; } else { year = 1900; if (!cbs_get_two_digits(©, &tmp)) { return 0; } year += tmp; if (year < 1950) { year += 100; } if (year >= 2050) { return 0; // A Generalized time must be used. } } if (!cbs_get_two_digits(©, &month) || month < 1 || month > 12 || // Reject invalid months. !cbs_get_two_digits(©, &day) || !is_valid_day(year, month, day) || // Reject invalid days. !cbs_get_two_digits(©, &hour) || hour > 23 || // Reject invalid hours. !cbs_get_two_digits(©, &min) || min > 59 || // Reject invalid minutes. !cbs_get_two_digits(©, &sec) || sec > 59 || !CBS_get_u8(©, &tz)) { return 0; } int offset_sign = 0; switch (tz) { case 'Z': break; // We correctly have 'Z' on the end as per spec. case '+': offset_sign = 1; break; // Should not be allowed per RFC 5280. case '-': offset_sign = -1; break; // Should not be allowed per RFC 5280. default: return 0; // Reject anything else after the time. } // If allow_timezone_offset is non-zero, allow for a four digit timezone // offset to be specified even though this is not allowed by RFC 5280. We are // permissive of this for UTCTimes due to the unfortunate existence of // artisinally rolled long lived certificates that were baked into places that // are now difficult to change. These certificates were generated with the // 'openssl' command that permissively allowed the creation of certificates // with notBefore and notAfter times specified as strings for direct // certificate inclusion on the command line. For context see cl/237068815. // // TODO(bbe): This has been expunged from public web-pki as the ecosystem has // managed to encourage CA compliance with standards. We should find a way to // get rid of this or make it off by default. int offset_seconds = 0; if (offset_sign != 0) { if (!allow_timezone_offset) { return 0; } int offset_hours, offset_minutes; if (!cbs_get_two_digits(©, &offset_hours) || offset_hours > 23 || // Reject invalid hours. !cbs_get_two_digits(©, &offset_minutes) || offset_minutes > 59) { // Reject invalid minutes. return 0; } offset_seconds = offset_sign * (offset_hours * 3600 + offset_minutes * 60); } if (CBS_len(©) != 0) { return 0; // Reject invalid lengths. } if (out_tm != NULL) { // Fill in the tm fields corresponding to what we validated. out_tm->tm_year = year - 1900; out_tm->tm_mon = month - 1; out_tm->tm_mday = day; out_tm->tm_hour = hour; out_tm->tm_min = min; out_tm->tm_sec = sec; if (offset_seconds && !OPENSSL_gmtime_adj(out_tm, 0, offset_seconds)) { return 0; } } return 1; } int CBS_parse_generalized_time(const CBS *cbs, struct tm *out_tm, int allow_timezone_offset) { return CBS_parse_rfc5280_time_internal(cbs, 1, allow_timezone_offset, out_tm); } int CBS_parse_utc_time(const CBS *cbs, struct tm *out_tm, int allow_timezone_offset) { return CBS_parse_rfc5280_time_internal(cbs, 0, allow_timezone_offset, out_tm); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/internal.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_BYTESTRING_INTERNAL_H #define OPENSSL_HEADER_BYTESTRING_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif // CBS_asn1_ber_to_der reads a BER element from |in|. If it finds // indefinite-length elements or constructed strings then it converts the BER // data to DER, sets |out| to the converted contents and |*out_storage| to a // buffer which the caller must release with |OPENSSL_free|. Otherwise, it sets // |out| to the original BER element in |in| and |*out_storage| to NULL. // Additionally, |*in| will be advanced over the BER element. // // This function should successfully process any valid BER input, however it // will not convert all of BER's deviations from DER. BER is ambiguous between // implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed // strings. Implicitly-tagged strings must be parsed with // |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller // must also account for BER variations in the contents of a primitive. // // It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_asn1_ber_to_der(CBS *in, CBS *out, uint8_t **out_storage); // CBS_get_asn1_implicit_string parses a BER string of primitive type // |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the // contents. If concatenation was needed, it sets |*out_storage| to a buffer // which the caller must release with |OPENSSL_free|. Otherwise, it sets // |*out_storage| to NULL. // // This function does not parse all of BER. It requires the string be // definite-length. Constructed strings are allowed, but all children of the // outermost element must be primitive. The caller should use // |CBS_asn1_ber_to_der| before running this function. // // It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, CBS_ASN1_TAG outer_tag, CBS_ASN1_TAG inner_tag); // CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized // with |CBB_init|. If |outp| is not NULL then the result is written to |*outp| // and |*outp| is advanced just past the output. It returns the number of bytes // in the result, whether written or not, or a negative value on error. On // error, it calls |CBB_cleanup| on |cbb|. // // This function may be used to help implement legacy i2d ASN.1 functions. int CBB_finish_i2d(CBB *cbb, uint8_t **outp); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BYTESTRING_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/bytestring/unicode.cc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "internal.h" static int is_valid_code_point(uint32_t v) { // References in the following are to Unicode 15.0.0. if (// The Unicode space runs from zero to 0x10ffff (3.4 D9). v > 0x10ffff || // Values 0x...fffe, 0x...ffff, and 0xfdd0-0xfdef are permanently reserved // as noncharacters (3.4 D14). See also 23.7. As our APIs are intended for // "open interchange", such as ASN.1, we reject them. (v & 0xfffe) == 0xfffe || (v >= 0xfdd0 && v <= 0xfdef) || // Surrogate code points are invalid (3.2 C1). (v >= 0xd800 && v <= 0xdfff)) { return 0; } return 1; } // BOTTOM_BITS returns a byte with the bottom |n| bits set. #define BOTTOM_BITS(n) (uint8_t)((1u << (n)) - 1) // TOP_BITS returns a byte with the top |n| bits set. #define TOP_BITS(n) ((uint8_t)~BOTTOM_BITS(8 - (n))) int CBS_get_utf8(CBS *cbs, uint32_t *out) { uint8_t c; if (!CBS_get_u8(cbs, &c)) { return 0; } if (c <= 0x7f) { *out = c; return 1; } uint32_t v, lower_bound; size_t len; if ((c & TOP_BITS(3)) == TOP_BITS(2)) { v = c & BOTTOM_BITS(5); len = 1; lower_bound = 0x80; } else if ((c & TOP_BITS(4)) == TOP_BITS(3)) { v = c & BOTTOM_BITS(4); len = 2; lower_bound = 0x800; } else if ((c & TOP_BITS(5)) == TOP_BITS(4)) { v = c & BOTTOM_BITS(3); len = 3; lower_bound = 0x10000; } else { return 0; } for (size_t i = 0; i < len; i++) { if (!CBS_get_u8(cbs, &c) || (c & TOP_BITS(2)) != TOP_BITS(1)) { return 0; } v <<= 6; v |= c & BOTTOM_BITS(6); } if (!is_valid_code_point(v) || v < lower_bound) { return 0; } *out = v; return 1; } int CBS_get_latin1(CBS *cbs, uint32_t *out) { uint8_t c; if (!CBS_get_u8(cbs, &c)) { return 0; } *out = c; return 1; } int CBS_get_ucs2_be(CBS *cbs, uint32_t *out) { // Note UCS-2 (used by BMPString) does not support surrogates. uint16_t c; if (!CBS_get_u16(cbs, &c) || !is_valid_code_point(c)) { return 0; } *out = c; return 1; } int CBS_get_utf32_be(CBS *cbs, uint32_t *out) { return CBS_get_u32(cbs, out) && is_valid_code_point(*out); } size_t CBB_get_utf8_len(uint32_t u) { if (u <= 0x7f) { return 1; } if (u <= 0x7ff) { return 2; } if (u <= 0xffff) { return 3; } return 4; } int CBB_add_utf8(CBB *cbb, uint32_t u) { if (!is_valid_code_point(u)) { return 0; } if (u <= 0x7f) { return CBB_add_u8(cbb, (uint8_t)u); } if (u <= 0x7ff) { return CBB_add_u8(cbb, TOP_BITS(2) | (u >> 6)) && CBB_add_u8(cbb, TOP_BITS(1) | (u & BOTTOM_BITS(6))); } if (u <= 0xffff) { return CBB_add_u8(cbb, TOP_BITS(3) | (u >> 12)) && CBB_add_u8(cbb, TOP_BITS(1) | ((u >> 6) & BOTTOM_BITS(6))) && CBB_add_u8(cbb, TOP_BITS(1) | (u & BOTTOM_BITS(6))); } if (u <= 0x10ffff) { return CBB_add_u8(cbb, TOP_BITS(4) | (u >> 18)) && CBB_add_u8(cbb, TOP_BITS(1) | ((u >> 12) & BOTTOM_BITS(6))) && CBB_add_u8(cbb, TOP_BITS(1) | ((u >> 6) & BOTTOM_BITS(6))) && CBB_add_u8(cbb, TOP_BITS(1) | (u & BOTTOM_BITS(6))); } return 0; } int CBB_add_latin1(CBB *cbb, uint32_t u) { return u <= 0xff && CBB_add_u8(cbb, (uint8_t)u); } int CBB_add_ucs2_be(CBB *cbb, uint32_t u) { return u <= 0xffff && is_valid_code_point(u) && CBB_add_u16(cbb, (uint16_t)u); } int CBB_add_utf32_be(CBB *cbb, uint32_t u) { return is_valid_code_point(u) && CBB_add_u32(cbb, u); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/chacha/chacha.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Adapted from the public domain, estream code by D. Bernstein. #include #include #include #include "../internal.h" #include "internal.h" // sigma contains the ChaCha constants, which happen to be an ASCII string. static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' }; // QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. #define QUARTERROUND(a, b, c, d) \ x[a] += x[b]; \ x[d] = CRYPTO_rotl_u32(x[d] ^ x[a], 16); \ x[c] += x[d]; \ x[b] = CRYPTO_rotl_u32(x[b] ^ x[c], 12); \ x[a] += x[b]; \ x[d] = CRYPTO_rotl_u32(x[d] ^ x[a], 8); \ x[c] += x[d]; \ x[b] = CRYPTO_rotl_u32(x[b] ^ x[c], 7); void CRYPTO_hchacha20(uint8_t out[32], const uint8_t key[32], const uint8_t nonce[16]) { uint32_t x[16]; OPENSSL_memcpy(x, sigma, sizeof(sigma)); OPENSSL_memcpy(&x[4], key, 32); OPENSSL_memcpy(&x[12], nonce, 16); for (size_t i = 0; i < 20; i += 2) { QUARTERROUND(0, 4, 8, 12) QUARTERROUND(1, 5, 9, 13) QUARTERROUND(2, 6, 10, 14) QUARTERROUND(3, 7, 11, 15) QUARTERROUND(0, 5, 10, 15) QUARTERROUND(1, 6, 11, 12) QUARTERROUND(2, 7, 8, 13) QUARTERROUND(3, 4, 9, 14) } OPENSSL_memcpy(out, &x[0], sizeof(uint32_t) * 4); OPENSSL_memcpy(&out[16], &x[12], sizeof(uint32_t) * 4); } #if defined(CHACHA20_ASM_NOHW) static void ChaCha20_ctr32(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]) { #if defined(CHACHA20_ASM_NEON) if (ChaCha20_ctr32_neon_capable(in_len)) { ChaCha20_ctr32_neon(out, in, in_len, key, counter); return; } #endif #if defined(CHACHA20_ASM_AVX2) if (ChaCha20_ctr32_avx2_capable(in_len)) { ChaCha20_ctr32_avx2(out, in, in_len, key, counter); return; } #endif #if defined(CHACHA20_ASM_SSSE3_4X) if (ChaCha20_ctr32_ssse3_4x_capable(in_len)) { ChaCha20_ctr32_ssse3_4x(out, in, in_len, key, counter); return; } #endif #if defined(CHACHA20_ASM_SSSE3) if (ChaCha20_ctr32_ssse3_capable(in_len)) { ChaCha20_ctr32_ssse3(out, in, in_len, key, counter); return; } #endif if (in_len > 0) { ChaCha20_ctr32_nohw(out, in, in_len, key, counter); } } #endif #if defined(CHACHA20_ASM_NOHW) void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t key[32], const uint8_t nonce[12], uint32_t counter) { assert(!buffers_alias(out, in_len, in, in_len) || in == out); uint32_t counter_nonce[4]; counter_nonce[0] = counter; counter_nonce[1] = CRYPTO_load_u32_le(nonce + 0); counter_nonce[2] = CRYPTO_load_u32_le(nonce + 4); counter_nonce[3] = CRYPTO_load_u32_le(nonce + 8); const uint32_t *key_ptr = (const uint32_t *)key; #if !defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) // The assembly expects the key to be four-byte aligned. uint32_t key_u32[8]; if ((((uintptr_t)key) & 3) != 0) { key_u32[0] = CRYPTO_load_u32_le(key + 0); key_u32[1] = CRYPTO_load_u32_le(key + 4); key_u32[2] = CRYPTO_load_u32_le(key + 8); key_u32[3] = CRYPTO_load_u32_le(key + 12); key_u32[4] = CRYPTO_load_u32_le(key + 16); key_u32[5] = CRYPTO_load_u32_le(key + 20); key_u32[6] = CRYPTO_load_u32_le(key + 24); key_u32[7] = CRYPTO_load_u32_le(key + 28); key_ptr = key_u32; } #endif while (in_len > 0) { // The assembly functions do not have defined overflow behavior. While // overflow is almost always a bug in the caller, we prefer our functions to // behave the same across platforms, so divide into multiple calls to avoid // this case. uint64_t todo = 64 * ((UINT64_C(1) << 32) - counter_nonce[0]); if (todo > in_len) { todo = in_len; } ChaCha20_ctr32(out, in, (size_t)todo, key_ptr, counter_nonce); in += todo; out += todo; in_len -= todo; // We're either done and will next break out of the loop, or we stopped at // the wraparound point and the counter should continue at zero. counter_nonce[0] = 0; } } #else // chacha_core performs 20 rounds of ChaCha on the input words in // |input| and writes the 64 output bytes to |output|. static void chacha_core(uint8_t output[64], const uint32_t input[16]) { uint32_t x[16]; int i; OPENSSL_memcpy(x, input, sizeof(uint32_t) * 16); for (i = 20; i > 0; i -= 2) { QUARTERROUND(0, 4, 8, 12) QUARTERROUND(1, 5, 9, 13) QUARTERROUND(2, 6, 10, 14) QUARTERROUND(3, 7, 11, 15) QUARTERROUND(0, 5, 10, 15) QUARTERROUND(1, 6, 11, 12) QUARTERROUND(2, 7, 8, 13) QUARTERROUND(3, 4, 9, 14) } for (i = 0; i < 16; ++i) { x[i] += input[i]; } for (i = 0; i < 16; ++i) { CRYPTO_store_u32_le(output + 4 * i, x[i]); } } void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t key[32], const uint8_t nonce[12], uint32_t counter) { assert(!buffers_alias(out, in_len, in, in_len) || in == out); uint32_t input[16]; uint8_t buf[64]; size_t todo, i; input[0] = CRYPTO_load_u32_le(sigma + 0); input[1] = CRYPTO_load_u32_le(sigma + 4); input[2] = CRYPTO_load_u32_le(sigma + 8); input[3] = CRYPTO_load_u32_le(sigma + 12); input[4] = CRYPTO_load_u32_le(key + 0); input[5] = CRYPTO_load_u32_le(key + 4); input[6] = CRYPTO_load_u32_le(key + 8); input[7] = CRYPTO_load_u32_le(key + 12); input[8] = CRYPTO_load_u32_le(key + 16); input[9] = CRYPTO_load_u32_le(key + 20); input[10] = CRYPTO_load_u32_le(key + 24); input[11] = CRYPTO_load_u32_le(key + 28); input[12] = counter; input[13] = CRYPTO_load_u32_le(nonce + 0); input[14] = CRYPTO_load_u32_le(nonce + 4); input[15] = CRYPTO_load_u32_le(nonce + 8); while (in_len > 0) { todo = sizeof(buf); if (in_len < todo) { todo = in_len; } chacha_core(buf, input); for (i = 0; i < todo; i++) { out[i] = in[i] ^ buf[i]; } out += todo; in += todo; in_len -= todo; input[12]++; } } #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/chacha/internal.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CHACHA_INTERNAL #define OPENSSL_HEADER_CHACHA_INTERNAL #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif // CRYPTO_hchacha20 computes the HChaCha20 function, which should only be used // as part of XChaCha20. void CRYPTO_hchacha20(uint8_t out[32], const uint8_t key[32], const uint8_t nonce[16]); #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) #define CHACHA20_ASM_NOHW #define CHACHA20_ASM_SSSE3 inline int ChaCha20_ctr32_ssse3_capable(size_t len) { // Unlike the x86_64 version, the x86 SSSE3 routine runs for all non-zero // lengths. return len > 0 && CRYPTO_is_SSSE3_capable() && CRYPTO_is_FXSR_capable(); } void ChaCha20_ctr32_ssse3(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #elif !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) #define CHACHA20_ASM_NOHW #define CHACHA20_ASM_NEON inline int ChaCha20_ctr32_neon_capable(size_t len) { return len >= 192 && CRYPTO_is_NEON_capable(); } void ChaCha20_ctr32_neon(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) #define CHACHA20_ASM_NOHW #define CHACHA20_ASM_AVX2 inline int ChaCha20_ctr32_avx2_capable(size_t len) { return len > 128 && CRYPTO_is_AVX2_capable(); } void ChaCha20_ctr32_avx2(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #define CHACHA20_ASM_SSSE3_4X inline int ChaCha20_ctr32_ssse3_4x_capable(size_t len) { int capable = len > 128 && CRYPTO_is_SSSE3_capable(); int faster = len > 192 || !CRYPTO_cpu_perf_is_like_silvermont(); return capable && faster; } void ChaCha20_ctr32_ssse3_4x(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #define CHACHA20_ASM_SSSE3 inline int ChaCha20_ctr32_ssse3_capable(size_t len) { return len > 128 && CRYPTO_is_SSSE3_capable(); } void ChaCha20_ctr32_ssse3(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #endif #if defined(CHACHA20_ASM_NOHW) // ChaCha20_ctr32_nohw encrypts |in_len| bytes from |in| and writes the result // to |out|. If |in| and |out| alias, they must be equal. |in_len| may not be // zero. // // |counter[0]| is the initial 32-bit block counter, and the remainder is the // 96-bit nonce. If the counter overflows, the output is undefined. The function // will produce output, but the output may vary by machine and may not be // self-consistent. (On some architectures, the assembly implements a mix of // 64-bit and 32-bit counters.) void ChaCha20_ctr32_nohw(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); #endif #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CHACHA_INTERNAL ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/derive_key.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #define PKCS5_SALT_LEN 8 int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, const uint8_t *salt, const uint8_t *data, size_t data_len, unsigned count, uint8_t *key, uint8_t *iv) { EVP_MD_CTX c; uint8_t md_buf[EVP_MAX_MD_SIZE]; unsigned addmd = 0; unsigned mds = 0, i; int rv = 0; unsigned nkey = EVP_CIPHER_key_length(type); unsigned niv = EVP_CIPHER_iv_length(type); assert(nkey <= EVP_MAX_KEY_LENGTH); assert(niv <= EVP_MAX_IV_LENGTH); if (data == NULL) { return nkey; } EVP_MD_CTX_init(&c); for (;;) { if (!EVP_DigestInit_ex(&c, md, NULL)) { goto err; } if (addmd++) { if (!EVP_DigestUpdate(&c, md_buf, mds)) { goto err; } } if (!EVP_DigestUpdate(&c, data, data_len)) { goto err; } if (salt != NULL) { if (!EVP_DigestUpdate(&c, salt, PKCS5_SALT_LEN)) { goto err; } } if (!EVP_DigestFinal_ex(&c, md_buf, &mds)) { goto err; } for (i = 1; i < count; i++) { if (!EVP_DigestInit_ex(&c, md, NULL) || !EVP_DigestUpdate(&c, md_buf, mds) || !EVP_DigestFinal_ex(&c, md_buf, &mds)) { goto err; } } i = 0; if (nkey) { for (;;) { if (nkey == 0 || i == mds) { break; } if (key != NULL) { *(key++) = md_buf[i]; } nkey--; i++; } } if (niv && i != mds) { for (;;) { if (niv == 0 || i == mds) { break; } if (iv != NULL) { *(iv++) = md_buf[i]; } niv--; i++; } } if (nkey == 0 && niv == 0) { break; } } rv = EVP_CIPHER_key_length(type); err: EVP_MD_CTX_cleanup(&c); OPENSSL_cleanse(md_buf, EVP_MAX_MD_SIZE); return rv; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_aesctrhmac.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../fipsmodule/aes/internal.h" #include "../fipsmodule/cipher/internal.h" #define EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN SHA256_DIGEST_LENGTH #define EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN 12 struct aead_aes_ctr_hmac_sha256_ctx { union { double align; AES_KEY ks; } ks; ctr128_f ctr; block128_f block; SHA256_CTX inner_init_state; SHA256_CTX outer_init_state; }; static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_ctr_hmac_sha256_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_ctr_hmac_sha256_ctx), "AEAD state has insufficient alignment"); static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer, const uint8_t hmac_key[32]) { static const size_t hmac_key_len = 32; uint8_t block[SHA256_CBLOCK]; OPENSSL_memcpy(block, hmac_key, hmac_key_len); OPENSSL_memset(block + hmac_key_len, 0x36, sizeof(block) - hmac_key_len); unsigned i; for (i = 0; i < hmac_key_len; i++) { block[i] ^= 0x36; } SHA256_Init(out_inner); SHA256_Update(out_inner, block, sizeof(block)); OPENSSL_memset(block + hmac_key_len, 0x5c, sizeof(block) - hmac_key_len); for (i = 0; i < hmac_key_len; i++) { block[i] ^= (0x36 ^ 0x5c); } SHA256_Init(out_outer); SHA256_Update(out_outer, block, sizeof(block)); } static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = (struct aead_aes_ctr_hmac_sha256_ctx *)&ctx->state; static const size_t hmac_key_len = 32; if (key_len < hmac_key_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } const size_t aes_key_len = key_len - hmac_key_len; if (aes_key_len != 16 && aes_key_len != 32) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN; } if (tag_len > EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } aes_ctx->ctr = aes_ctr_set_key(&aes_ctx->ks.ks, NULL, &aes_ctx->block, key, aes_key_len); ctx->tag_len = tag_len; hmac_init(&aes_ctx->inner_init_state, &aes_ctx->outer_init_state, key + aes_key_len); return 1; } static void aead_aes_ctr_hmac_sha256_cleanup(EVP_AEAD_CTX *ctx) {} static void hmac_update_uint64(SHA256_CTX *sha256, uint64_t value) { unsigned i; uint8_t bytes[8]; for (i = 0; i < sizeof(bytes); i++) { bytes[i] = value & 0xff; value >>= 8; } SHA256_Update(sha256, bytes, sizeof(bytes)); } static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH], const SHA256_CTX *inner_init_state, const SHA256_CTX *outer_init_state, const uint8_t *ad, size_t ad_len, const uint8_t *nonce, const uint8_t *ciphertext, size_t ciphertext_len) { SHA256_CTX sha256; OPENSSL_memcpy(&sha256, inner_init_state, sizeof(sha256)); hmac_update_uint64(&sha256, ad_len); hmac_update_uint64(&sha256, ciphertext_len); SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); SHA256_Update(&sha256, ad, ad_len); // Pad with zeros to the end of the SHA-256 block. const unsigned num_padding = (SHA256_CBLOCK - ((sizeof(uint64_t)*2 + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) % SHA256_CBLOCK)) % SHA256_CBLOCK; uint8_t padding[SHA256_CBLOCK]; OPENSSL_memset(padding, 0, num_padding); SHA256_Update(&sha256, padding, num_padding); SHA256_Update(&sha256, ciphertext, ciphertext_len); uint8_t inner_digest[SHA256_DIGEST_LENGTH]; SHA256_Final(inner_digest, &sha256); OPENSSL_memcpy(&sha256, outer_init_state, sizeof(sha256)); SHA256_Update(&sha256, inner_digest, sizeof(inner_digest)); SHA256_Final(out, &sha256); } static void aead_aes_ctr_hmac_sha256_crypt( const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out, const uint8_t *in, size_t len, const uint8_t *nonce) { // Since the AEAD operation is one-shot, keeping a buffer of unused keystream // bytes is pointless. However, |CRYPTO_ctr128_encrypt_ctr32| requires it. uint8_t partial_block_buffer[AES_BLOCK_SIZE]; unsigned partial_block_offset = 0; OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer)); uint8_t counter[AES_BLOCK_SIZE]; OPENSSL_memcpy(counter, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); OPENSSL_memset(counter + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN, 0, 4); CRYPTO_ctr128_encrypt_ctr32(in, out, len, &aes_ctx->ks.ks, counter, partial_block_buffer, &partial_block_offset, aes_ctx->ctr); } static int aead_aes_ctr_hmac_sha256_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = (struct aead_aes_ctr_hmac_sha256_ctx *) &ctx->state; const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) { // This input is so large it would overflow the 32-bit block counter. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce); uint8_t hmac_result[SHA256_DIGEST_LENGTH]; hmac_calculate(hmac_result, &aes_ctx->inner_init_state, &aes_ctx->outer_init_state, ad, ad_len, nonce, out, in_len); OPENSSL_memcpy(out_tag, hmac_result, ctx->tag_len); *out_tag_len = ctx->tag_len; return 1; } static int aead_aes_ctr_hmac_sha256_open_gather( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = (struct aead_aes_ctr_hmac_sha256_ctx *) &ctx->state; if (in_tag_len != ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } uint8_t hmac_result[SHA256_DIGEST_LENGTH]; hmac_calculate(hmac_result, &aes_ctx->inner_init_state, &aes_ctx->outer_init_state, ad, ad_len, nonce, in, in_len); if (CRYPTO_memcmp(hmac_result, in_tag, ctx->tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce); return 1; } static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = { 16 /* AES key */ + 32 /* HMAC key */, 12, // nonce length EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_ctr_hmac_sha256_init, NULL /* init_with_direction */, aead_aes_ctr_hmac_sha256_cleanup, NULL /* open */, aead_aes_ctr_hmac_sha256_seal_scatter, aead_aes_ctr_hmac_sha256_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = { 32 /* AES key */ + 32 /* HMAC key */, 12, // nonce length EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_ctr_hmac_sha256_init, NULL /* init_with_direction */, aead_aes_ctr_hmac_sha256_cleanup, NULL /* open */, aead_aes_ctr_hmac_sha256_seal_scatter, aead_aes_ctr_hmac_sha256_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void) { return &aead_aes_128_ctr_hmac_sha256; } const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void) { return &aead_aes_256_ctr_hmac_sha256; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_aesgcmsiv.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../fipsmodule/aes/internal.h" #include "../fipsmodule/cipher/internal.h" #include "../internal.h" #define EVP_AEAD_AES_GCM_SIV_NONCE_LEN 12 #define EVP_AEAD_AES_GCM_SIV_TAG_LEN 16 // TODO(davidben): AES-GCM-SIV assembly is not correct for Windows. It must save // and restore xmm6 through xmm15. #if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ !defined(OPENSSL_WINDOWS) #define AES_GCM_SIV_ASM // Optimised AES-GCM-SIV namespace { struct aead_aes_gcm_siv_asm_ctx { alignas(16) uint8_t key[16 * 15]; int is_128_bit; }; } // namespace // The assembly code assumes 8-byte alignment of the EVP_AEAD_CTX's state, and // aligns to 16 bytes itself. static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) + 8 >= sizeof(struct aead_aes_gcm_siv_asm_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= 8, "AEAD state has insufficient alignment"); // asm_ctx_from_ctx returns a 16-byte aligned context pointer from |ctx|. static struct aead_aes_gcm_siv_asm_ctx *asm_ctx_from_ctx( const EVP_AEAD_CTX *ctx) { // ctx->state must already be 8-byte aligned. Thus, at most, we may need to // add eight to align it to 16 bytes. const uintptr_t offset = ((uintptr_t)&ctx->state) & 8; return (struct aead_aes_gcm_siv_asm_ctx *)(&ctx->state.opaque[offset]); } extern "C" { // aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to // |out_expanded_key|. extern void aes128gcmsiv_aes_ks(const uint8_t key[16], uint8_t out_expanded_key[16 * 15]); // aes256gcmsiv_aes_ks writes an AES-256 key schedule for |key| to // |out_expanded_key|. extern void aes256gcmsiv_aes_ks(const uint8_t key[32], uint8_t out_expanded_key[16 * 15]); } static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { const size_t key_bits = key_len * 8; if (key_bits != 128 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; } if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx); assert((((uintptr_t)gcm_siv_ctx) & 15) == 0); if (key_bits == 128) { aes128gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]); gcm_siv_ctx->is_128_bit = 1; } else { aes256gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]); gcm_siv_ctx->is_128_bit = 0; } ctx->tag_len = tag_len; return 1; } static void aead_aes_gcm_siv_asm_cleanup(EVP_AEAD_CTX *ctx) {} extern "C" { // aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to // include a number (|in_blocks|) of 16-byte blocks of data from |in|, given // the POLYVAL key in |key|. extern void aesgcmsiv_polyval_horner(const uint8_t in_out_poly[16], const uint8_t key[16], const uint8_t *in, size_t in_blocks); // aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|. extern void aesgcmsiv_htable_init(uint8_t out_htable[16 * 8], const uint8_t auth_key[16]); // aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|. extern void aesgcmsiv_htable6_init(uint8_t out_htable[16 * 6], const uint8_t auth_key[16]); // aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to // include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple // of 16.) It uses the precomputed powers of the key given in |htable|. extern void aesgcmsiv_htable_polyval(const uint8_t htable[16 * 8], const uint8_t *in, size_t in_len, uint8_t in_out_poly[16]); // aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to // |in|. |in| and |out| may be equal, but must not otherwise alias. // // |in_out_calculated_tag_and_scratch|, on entry, must contain: // 1. The current value of the calculated tag, which will be updated during // decryption and written back to the beginning of this buffer on exit. // 2. The claimed tag, which is needed to derive counter values. // // While decrypting, the whole of |in_out_calculated_tag_and_scratch| may be // used for other purposes. In order to decrypt and update the POLYVAL value, it // uses the expanded key from |key| and the table of powers in |htable|. extern void aes128gcmsiv_dec(const uint8_t *in, uint8_t *out, uint8_t in_out_calculated_tag_and_scratch[16 * 8], const uint8_t htable[16 * 6], const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); // aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256. extern void aes256gcmsiv_dec(const uint8_t *in, uint8_t *out, uint8_t in_out_calculated_tag_and_scratch[16 * 8], const uint8_t htable[16 * 6], const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); // aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from // |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of // the nonce are used, 16 bytes are read and so the value must be // right-padded. extern void aes128gcmsiv_kdf(const uint8_t nonce[16], uint64_t out_key_material[8], const uint8_t *key_schedule); // aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256. extern void aes256gcmsiv_kdf(const uint8_t nonce[16], uint64_t out_key_material[12], const uint8_t *key_schedule); // aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in // |key|, writes the expanded key to |out_expanded_key| and encrypts a single // block from |in| to |out|. extern void aes128gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], uint8_t out_expanded_key[16 * 15], const uint64_t key[2]); // aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for // AES-256. extern void aes256gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], uint8_t out_expanded_key[16 * 15], const uint64_t key[4]); // aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using // the expanded key in |expanded_key|. extern void aes128gcmsiv_ecb_enc_block( const uint8_t in[16], uint8_t out[16], const struct aead_aes_gcm_siv_asm_ctx *expanded_key); // aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for // AES-256. extern void aes256gcmsiv_ecb_enc_block( const uint8_t in[16], uint8_t out[16], const struct aead_aes_gcm_siv_asm_ctx *expanded_key); // aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the // expanded key from |key|. (The value of |in_len| must be a multiple of 16.) // The |in| and |out| buffers may be equal but must not otherwise overlap. The // initial counter is constructed from the given |tag| as required by // AES-GCM-SIV. extern void aes128gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); // aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for // AES-256. extern void aes256gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); // aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is // optimised for longer messages. extern void aes128gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); // aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is // optimised for longer messages. extern void aes256gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, const uint8_t *tag, const struct aead_aes_gcm_siv_asm_ctx *key, size_t in_len); } // gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext // and AD. The result is written to |out_tag|. static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len, const uint8_t auth_key[16], const uint8_t nonce[12]) { OPENSSL_memset(out_tag, 0, 16); const size_t ad_blocks = ad_len / 16; const size_t in_blocks = in_len / 16; int htable_init = 0; alignas(16) uint8_t htable[16 * 8]; if (ad_blocks > 8 || in_blocks > 8) { htable_init = 1; aesgcmsiv_htable_init(htable, auth_key); } if (htable_init) { aesgcmsiv_htable_polyval(htable, ad, ad_len & ~15, out_tag); } else { aesgcmsiv_polyval_horner(out_tag, auth_key, ad, ad_blocks); } uint8_t scratch[16]; if (ad_len & 15) { OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1); } if (htable_init) { aesgcmsiv_htable_polyval(htable, in, in_len & ~15, out_tag); } else { aesgcmsiv_polyval_horner(out_tag, auth_key, in, in_blocks); } if (in_len & 15) { OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15); aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1); } uint8_t length_block[16]; CRYPTO_store_u64_le(length_block, ad_len * 8); CRYPTO_store_u64_le(length_block + 8, in_len * 8); aesgcmsiv_polyval_horner(out_tag, auth_key, length_block, 1); for (size_t i = 0; i < 12; i++) { out_tag[i] ^= nonce[i]; } out_tag[15] &= 0x7f; } // aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption // (same thing in CTR mode) of the final block of a plaintext/ciphertext. It // writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter // derived from |tag|. static void aead_aes_gcm_siv_asm_crypt_last_block( int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t tag[16], const struct aead_aes_gcm_siv_asm_ctx *enc_key_expanded) { alignas(16) uint8_t counter[16]; OPENSSL_memcpy(&counter, tag, sizeof(counter)); counter[15] |= 0x80; CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + in_len / 16); if (is_128_bit) { aes128gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded); } else { aes256gcmsiv_ecb_enc_block(counter, counter, enc_key_expanded); } const size_t last_bytes_offset = in_len & ~15; const size_t last_bytes_len = in_len & 15; uint8_t *last_bytes_out = &out[last_bytes_offset]; const uint8_t *last_bytes_in = &in[last_bytes_offset]; for (size_t i = 0; i < last_bytes_len; i++) { last_bytes_out[i] = last_bytes_in[i] ^ counter[i]; } } // aead_aes_gcm_siv_kdf calculates the record encryption and authentication // keys given the |nonce|. static void aead_aes_gcm_siv_kdf( int is_128_bit, const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx, uint64_t out_record_auth_key[2], uint64_t out_record_enc_key[4], const uint8_t nonce[12]) { alignas(16) uint8_t padded_nonce[16]; OPENSSL_memcpy(padded_nonce, nonce, 12); alignas(16) uint64_t key_material[12]; if (is_128_bit) { aes128gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]); out_record_enc_key[0] = key_material[4]; out_record_enc_key[1] = key_material[6]; } else { aes256gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]); out_record_enc_key[0] = key_material[4]; out_record_enc_key[1] = key_material[6]; out_record_enc_key[2] = key_material[8]; out_record_enc_key[3] = key_material[10]; } out_record_auth_key[0] = key_material[0]; out_record_auth_key[1] = key_material[2]; } static int aead_aes_gcm_siv_asm_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx); const uint64_t in_len_64 = in_len; const uint64_t ad_len_64 = ad_len; if (in_len_64 > (UINT64_C(1) << 36) || ad_len_64 >= (UINT64_C(1) << 61)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } alignas(16) uint64_t record_auth_key[2]; alignas(16) uint64_t record_enc_key[4]; aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key, record_enc_key, nonce); alignas(16) uint8_t tag[16] = {0}; gcm_siv_asm_polyval(tag, in, in_len, ad, ad_len, (const uint8_t *)record_auth_key, nonce); struct aead_aes_gcm_siv_asm_ctx enc_key_expanded; if (gcm_siv_ctx->is_128_bit) { aes128gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0], record_enc_key); if (in_len < 128) { aes128gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15); } else { aes128gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15); } } else { aes256gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0], record_enc_key); if (in_len < 128) { aes256gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15); } else { aes256gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15); } } if (in_len & 15) { aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in, in_len, tag, &enc_key_expanded); } OPENSSL_memcpy(out_tag, tag, sizeof(tag)); *out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; return 1; } static int aead_aes_gcm_siv_asm_open_gather( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const uint64_t ad_len_64 = ad_len; if (ad_len_64 >= (UINT64_C(1) << 61)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } const uint64_t in_len_64 = in_len; if (in_len_64 > UINT64_C(1) << 36 || in_tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = asm_ctx_from_ctx(ctx); alignas(16) uint64_t record_auth_key[2]; alignas(16) uint64_t record_enc_key[4]; aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key, record_enc_key, nonce); struct aead_aes_gcm_siv_asm_ctx expanded_key; if (gcm_siv_ctx->is_128_bit) { aes128gcmsiv_aes_ks((const uint8_t *)record_enc_key, &expanded_key.key[0]); } else { aes256gcmsiv_aes_ks((const uint8_t *)record_enc_key, &expanded_key.key[0]); } // calculated_tag is 16*8 bytes, rather than 16 bytes, because // aes[128|256]gcmsiv_dec uses the extra as scratch space. alignas(16) uint8_t calculated_tag[16 * 8] = {0}; OPENSSL_memset(calculated_tag, 0, EVP_AEAD_AES_GCM_SIV_TAG_LEN); const size_t ad_blocks = ad_len / 16; aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, ad, ad_blocks); uint8_t scratch[16]; if (ad_len & 15) { OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, scratch, 1); } alignas(16) uint8_t htable[16 * 6]; aesgcmsiv_htable6_init(htable, (const uint8_t *)record_auth_key); // aes[128|256]gcmsiv_dec needs access to the claimed tag. So it's put into // its scratch space. memcpy(calculated_tag + 16, in_tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN); if (gcm_siv_ctx->is_128_bit) { aes128gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, in_len); } else { aes256gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, in_len); } if (in_len & 15) { aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in, in_len, in_tag, &expanded_key); OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, out + (in_len & ~15), in_len & 15); aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, scratch, 1); } uint8_t length_block[16]; CRYPTO_store_u64_le(length_block, ad_len * 8); CRYPTO_store_u64_le(length_block + 8, in_len * 8); aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, length_block, 1); for (size_t i = 0; i < 12; i++) { calculated_tag[i] ^= nonce[i]; } calculated_tag[15] &= 0x7f; if (gcm_siv_ctx->is_128_bit) { aes128gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key); } else { aes256gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key); } if (CRYPTO_memcmp(calculated_tag, in_tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; } static const EVP_AEAD aead_aes_128_gcm_siv_asm = { 16, // key length EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_asm_init, NULL /* init_with_direction */, aead_aes_gcm_siv_asm_cleanup, NULL /* open */, aead_aes_gcm_siv_asm_seal_scatter, aead_aes_gcm_siv_asm_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; static const EVP_AEAD aead_aes_256_gcm_siv_asm = { 32, // key length EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_asm_init, NULL /* init_with_direction */, aead_aes_gcm_siv_asm_cleanup, NULL /* open */, aead_aes_gcm_siv_asm_seal_scatter, aead_aes_gcm_siv_asm_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; #endif // X86_64 && !NO_ASM && !WINDOWS namespace { struct aead_aes_gcm_siv_ctx { union { double align; AES_KEY ks; } ks; block128_f kgk_block; unsigned is_256 : 1; }; } // namespace static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_gcm_siv_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_gcm_siv_ctx), "AEAD state has insufficient alignment"); static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { const size_t key_bits = key_len * 8; if (key_bits != 128 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; } if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = (struct aead_aes_gcm_siv_ctx *)&ctx->state; OPENSSL_memset(gcm_siv_ctx, 0, sizeof(struct aead_aes_gcm_siv_ctx)); aes_ctr_set_key(&gcm_siv_ctx->ks.ks, NULL, &gcm_siv_ctx->kgk_block, key, key_len); gcm_siv_ctx->is_256 = (key_len == 32); ctx->tag_len = tag_len; return 1; } static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) {} // gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from // |in| to |out|, using the block function |enc_block| with |key| in counter // mode, starting at |initial_counter|. This differs from the traditional // counter mode code in that the counter is handled little-endian, only the // first four bytes are used and the GCM-SIV tweak to the final byte is // applied. The |in| and |out| pointers may be equal but otherwise must not // alias. static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t initial_counter[AES_BLOCK_SIZE], block128_f enc_block, const AES_KEY *key) { uint8_t counter[16]; OPENSSL_memcpy(counter, initial_counter, AES_BLOCK_SIZE); counter[15] |= 0x80; for (size_t done = 0; done < in_len;) { uint8_t keystream[AES_BLOCK_SIZE]; enc_block(counter, keystream, key); CRYPTO_store_u32_le(counter, CRYPTO_load_u32_le(counter) + 1); size_t todo = AES_BLOCK_SIZE; if (in_len - done < todo) { todo = in_len - done; } for (size_t i = 0; i < todo; i++) { out[done + i] = keystream[i] ^ in[done + i]; } done += todo; } } // gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and // AD. The result is written to |out_tag|. static void gcm_siv_polyval( uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len, const uint8_t auth_key[16], const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { struct polyval_ctx polyval_ctx; CRYPTO_POLYVAL_init(&polyval_ctx, auth_key); CRYPTO_POLYVAL_update_blocks(&polyval_ctx, ad, ad_len & ~15); uint8_t scratch[16]; if (ad_len & 15) { OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); } CRYPTO_POLYVAL_update_blocks(&polyval_ctx, in, in_len & ~15); if (in_len & 15) { OPENSSL_memset(scratch, 0, sizeof(scratch)); OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15); CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); } uint8_t length_block[16]; CRYPTO_store_u64_le(length_block, ((uint64_t)ad_len) * 8); CRYPTO_store_u64_le(length_block + 8, ((uint64_t)in_len) * 8); CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block, sizeof(length_block)); CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag); for (size_t i = 0; i < EVP_AEAD_AES_GCM_SIV_NONCE_LEN; i++) { out_tag[i] ^= nonce[i]; } out_tag[15] &= 0x7f; } namespace { // gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. struct gcm_siv_record_keys { uint8_t auth_key[16]; union { double align; AES_KEY ks; } enc_key; block128_f enc_block; }; } // namespace // gcm_siv_keys calculates the keys for a specific GCM-SIV record with the // given nonce and writes them to |*out_keys|. static void gcm_siv_keys(const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx, struct gcm_siv_record_keys *out_keys, const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { const AES_KEY *const key = &gcm_siv_ctx->ks.ks; uint8_t key_material[(128 /* POLYVAL key */ + 256 /* max AES key */) / 8]; const size_t blocks_needed = gcm_siv_ctx->is_256 ? 6 : 4; uint8_t counter[AES_BLOCK_SIZE]; OPENSSL_memset(counter, 0, AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN); OPENSSL_memcpy(counter + AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, nonce, EVP_AEAD_AES_GCM_SIV_NONCE_LEN); for (size_t i = 0; i < blocks_needed; i++) { counter[0] = i; uint8_t ciphertext[AES_BLOCK_SIZE]; gcm_siv_ctx->kgk_block(counter, ciphertext, key); OPENSSL_memcpy(&key_material[i * 8], ciphertext, 8); } OPENSSL_memcpy(out_keys->auth_key, key_material, 16); // Note the |ctr128_f| function uses a big-endian couner, while AES-GCM-SIV // uses a little-endian counter. We ignore the return value and only use // |block128_f|. This has a significant performance cost for the fallback // bitsliced AES implementations (bsaes and aes_nohw). // // We currently do not consider AES-GCM-SIV to be performance-sensitive on // client hardware. If this changes, we can write little-endian |ctr128_f| // functions. aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block, key_material + 16, gcm_siv_ctx->is_256 ? 32 : 16); } static int aead_aes_gcm_siv_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = (struct aead_aes_gcm_siv_ctx *)&ctx->state; const uint64_t in_len_64 = in_len; const uint64_t ad_len_64 = ad_len; if (in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN < in_len || in_len_64 > (UINT64_C(1) << 36) || ad_len_64 >= (UINT64_C(1) << 61)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } struct gcm_siv_record_keys keys; gcm_siv_keys(gcm_siv_ctx, &keys, nonce); uint8_t tag[16]; gcm_siv_polyval(tag, in, in_len, ad, ad_len, keys.auth_key, nonce); keys.enc_block(tag, tag, &keys.enc_key.ks); gcm_siv_crypt(out, in, in_len, tag, keys.enc_block, &keys.enc_key.ks); OPENSSL_memcpy(out_tag, tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN); *out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; return 1; } static int aead_aes_gcm_siv_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const uint64_t ad_len_64 = ad_len; if (ad_len_64 >= (UINT64_C(1) << 61)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } const uint64_t in_len_64 = in_len; if (in_tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN || in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = (struct aead_aes_gcm_siv_ctx *)&ctx->state; struct gcm_siv_record_keys keys; gcm_siv_keys(gcm_siv_ctx, &keys, nonce); gcm_siv_crypt(out, in, in_len, in_tag, keys.enc_block, &keys.enc_key.ks); uint8_t expected_tag[EVP_AEAD_AES_GCM_SIV_TAG_LEN]; gcm_siv_polyval(expected_tag, out, in_len, ad, ad_len, keys.auth_key, nonce); keys.enc_block(expected_tag, expected_tag, &keys.enc_key.ks); if (CRYPTO_memcmp(expected_tag, in_tag, sizeof(expected_tag)) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; } static const EVP_AEAD aead_aes_128_gcm_siv = { 16, // key length EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_init, NULL /* init_with_direction */, aead_aes_gcm_siv_cleanup, NULL /* open */, aead_aes_gcm_siv_seal_scatter, aead_aes_gcm_siv_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; static const EVP_AEAD aead_aes_256_gcm_siv = { 32, // key length EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length 0, // seal_scatter_supports_extra_in aead_aes_gcm_siv_init, NULL /* init_with_direction */, aead_aes_gcm_siv_cleanup, NULL /* open */, aead_aes_gcm_siv_seal_scatter, aead_aes_gcm_siv_open_gather, NULL /* get_iv */, NULL /* tag_len */, }; #if defined(AES_GCM_SIV_ASM) const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { if (CRYPTO_is_AVX_capable() && CRYPTO_is_AESNI_capable()) { return &aead_aes_128_gcm_siv_asm; } return &aead_aes_128_gcm_siv; } const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { if (CRYPTO_is_AVX_capable() && CRYPTO_is_AESNI_capable()) { return &aead_aes_256_gcm_siv_asm; } return &aead_aes_256_gcm_siv; } #else const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { return &aead_aes_128_gcm_siv; } const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { return &aead_aes_256_gcm_siv; } #endif // AES_GCM_SIV_ASM ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_chacha20poly1305.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include "internal.h" #include "../chacha/internal.h" #include "../fipsmodule/cipher/internal.h" #include "../internal.h" struct aead_chacha20_poly1305_ctx { uint8_t key[32]; }; static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_chacha20_poly1305_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_chacha20_poly1305_ctx), "AEAD state has insufficient alignment"); static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { struct aead_chacha20_poly1305_ctx *c20_ctx = (struct aead_chacha20_poly1305_ctx *)&ctx->state; if (tag_len == 0) { tag_len = POLY1305_TAG_LEN; } if (tag_len > POLY1305_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (key_len != sizeof(c20_ctx->key)) { return 0; // internal error - EVP_AEAD_CTX_init should catch this. } OPENSSL_memcpy(c20_ctx->key, key, key_len); ctx->tag_len = tag_len; return 1; } static void aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx) {} static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) { uint8_t length_bytes[8]; for (unsigned i = 0; i < sizeof(length_bytes); i++) { length_bytes[i] = data_len; data_len >>= 8; } CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes)); } // calc_tag fills |tag| with the authentication tag for the given inputs. static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], const uint8_t *key, const uint8_t nonce[12], const uint8_t *ad, size_t ad_len, const uint8_t *ciphertext, size_t ciphertext_len, const uint8_t *ciphertext_extra, size_t ciphertext_extra_len) { alignas(16) uint8_t poly1305_key[32]; OPENSSL_memset(poly1305_key, 0, sizeof(poly1305_key)); CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), key, nonce, 0); static const uint8_t padding[16] = { 0 }; // Padding is all zeros. poly1305_state ctx; CRYPTO_poly1305_init(&ctx, poly1305_key); CRYPTO_poly1305_update(&ctx, ad, ad_len); if (ad_len % 16 != 0) { CRYPTO_poly1305_update(&ctx, padding, sizeof(padding) - (ad_len % 16)); } CRYPTO_poly1305_update(&ctx, ciphertext, ciphertext_len); CRYPTO_poly1305_update(&ctx, ciphertext_extra, ciphertext_extra_len); const size_t ciphertext_total = ciphertext_len + ciphertext_extra_len; if (ciphertext_total % 16 != 0) { CRYPTO_poly1305_update(&ctx, padding, sizeof(padding) - (ciphertext_total % 16)); } poly1305_update_length(&ctx, ad_len); poly1305_update_length(&ctx, ciphertext_total); CRYPTO_poly1305_finish(&ctx, tag); } static int chacha20_poly1305_seal_scatter( const uint8_t *key, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len, size_t tag_len) { if (extra_in_len + tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < tag_len + extra_in_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != 12) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow // individual operations that work on more than 256GB at a time. // |in_len_64| is needed because, on 32-bit platforms, size_t is only // 32-bits and this produces a warning because it's always false. // Casting to uint64_t inside the conditional is not sufficient to stop // the warning. const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } // The the extra input is given, it is expected to be very short and so is // encrypted byte-by-byte first. if (extra_in_len) { static const size_t kChaChaBlockSize = 64; uint32_t block_counter = (uint32_t)(1 + (in_len / kChaChaBlockSize)); size_t offset = in_len % kChaChaBlockSize; uint8_t block[64 /* kChaChaBlockSize */]; for (size_t done = 0; done < extra_in_len; block_counter++) { memset(block, 0, sizeof(block)); CRYPTO_chacha_20(block, block, sizeof(block), key, nonce, block_counter); for (size_t i = offset; i < sizeof(block) && done < extra_in_len; i++, done++) { out_tag[done] = extra_in[done] ^ block[i]; } offset = 0; } } union chacha20_poly1305_seal_data data; if (chacha20_poly1305_asm_capable()) { OPENSSL_memcpy(data.in.key, key, 32); data.in.counter = 0; OPENSSL_memcpy(data.in.nonce, nonce, 12); data.in.extra_ciphertext = out_tag; data.in.extra_ciphertext_len = extra_in_len; chacha20_poly1305_seal(out, in, in_len, ad, ad_len, &data); } else { CRYPTO_chacha_20(out, in, in_len, key, nonce, 1); calc_tag(data.out.tag, key, nonce, ad, ad_len, out, in_len, out_tag, extra_in_len); } OPENSSL_memcpy(out_tag + extra_in_len, data.out.tag, tag_len); *out_tag_len = extra_in_len + tag_len; return 1; } static int aead_chacha20_poly1305_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_chacha20_poly1305_ctx *c20_ctx = (struct aead_chacha20_poly1305_ctx *)&ctx->state; return chacha20_poly1305_seal_scatter( c20_ctx->key, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len); } static int aead_xchacha20_poly1305_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_chacha20_poly1305_ctx *c20_ctx = (struct aead_chacha20_poly1305_ctx *)&ctx->state; if (nonce_len != 24) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } alignas(4) uint8_t derived_key[32]; alignas(4) uint8_t derived_nonce[12]; CRYPTO_hchacha20(derived_key, c20_ctx->key, nonce); OPENSSL_memset(derived_nonce, 0, 4); OPENSSL_memcpy(&derived_nonce[4], &nonce[16], 8); return chacha20_poly1305_seal_scatter( derived_key, out, out_tag, out_tag_len, max_out_tag_len, derived_nonce, sizeof(derived_nonce), in, in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len); } static int chacha20_poly1305_open_gather( const uint8_t *key, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len, size_t tag_len) { if (nonce_len != 12) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } if (in_tag_len != tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow // individual operations that work on more than 256GB at a time. // |in_len_64| is needed because, on 32-bit platforms, size_t is only // 32-bits and this produces a warning because it's always false. // Casting to uint64_t inside the conditional is not sufficient to stop // the warning. const uint64_t in_len_64 = in_len; if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } union chacha20_poly1305_open_data data; if (chacha20_poly1305_asm_capable()) { OPENSSL_memcpy(data.in.key, key, 32); data.in.counter = 0; OPENSSL_memcpy(data.in.nonce, nonce, 12); chacha20_poly1305_open(out, in, in_len, ad, ad_len, &data); } else { calc_tag(data.out.tag, key, nonce, ad, ad_len, in, in_len, NULL, 0); CRYPTO_chacha_20(out, in, in_len, key, nonce, 1); } if (CRYPTO_memcmp(data.out.tag, in_tag, tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; } static int aead_chacha20_poly1305_open_gather( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_chacha20_poly1305_ctx *c20_ctx = (struct aead_chacha20_poly1305_ctx *)&ctx->state; return chacha20_poly1305_open_gather(c20_ctx->key, out, nonce, nonce_len, in, in_len, in_tag, in_tag_len, ad, ad_len, ctx->tag_len); } static int aead_xchacha20_poly1305_open_gather( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_chacha20_poly1305_ctx *c20_ctx = (struct aead_chacha20_poly1305_ctx *)&ctx->state; if (nonce_len != 24) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } alignas(4) uint8_t derived_key[32]; alignas(4) uint8_t derived_nonce[12]; CRYPTO_hchacha20(derived_key, c20_ctx->key, nonce); OPENSSL_memset(derived_nonce, 0, 4); OPENSSL_memcpy(&derived_nonce[4], &nonce[16], 8); return chacha20_poly1305_open_gather( derived_key, out, derived_nonce, sizeof(derived_nonce), in, in_len, in_tag, in_tag_len, ad, ad_len, ctx->tag_len); } static const EVP_AEAD aead_chacha20_poly1305 = { 32, // key len 12, // nonce len POLY1305_TAG_LEN, // overhead POLY1305_TAG_LEN, // max tag length 1, // seal_scatter_supports_extra_in aead_chacha20_poly1305_init, NULL, // init_with_direction aead_chacha20_poly1305_cleanup, NULL /* open */, aead_chacha20_poly1305_seal_scatter, aead_chacha20_poly1305_open_gather, NULL, // get_iv NULL, // tag_len }; static const EVP_AEAD aead_xchacha20_poly1305 = { 32, // key len 24, // nonce len POLY1305_TAG_LEN, // overhead POLY1305_TAG_LEN, // max tag length 1, // seal_scatter_supports_extra_in aead_chacha20_poly1305_init, NULL, // init_with_direction aead_chacha20_poly1305_cleanup, NULL /* open */, aead_xchacha20_poly1305_seal_scatter, aead_xchacha20_poly1305_open_gather, NULL, // get_iv NULL, // tag_len }; const EVP_AEAD *EVP_aead_chacha20_poly1305(void) { return &aead_chacha20_poly1305; } const EVP_AEAD *EVP_aead_xchacha20_poly1305(void) { return &aead_xchacha20_poly1305; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_des.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../des/internal.h" #include "../fipsmodule/cipher/internal.h" #include "internal.h" typedef struct { union { double align; DES_key_schedule ks; } ks; } EVP_DES_KEY; static int des_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data; DES_set_key_ex(key, &dat->ks.ks); return 1; } static int des_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data; DES_ncbc_encrypt_ex(in, out, in_len, &dat->ks.ks, ctx->iv, ctx->encrypt); return 1; } static const EVP_CIPHER evp_des_cbc = { /*nid=*/NID_des_cbc, /*block_size=*/8, /*key_len=*/8, /*iv_len=*/8, /*ctx_size=*/sizeof(EVP_DES_KEY), /*flags=*/EVP_CIPH_CBC_MODE, /*init=*/des_init_key, /*cipher=*/des_cbc_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_cbc(void) { return &evp_des_cbc; } static int des_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { if (in_len < ctx->cipher->block_size) { return 1; } in_len -= ctx->cipher->block_size; EVP_DES_KEY *dat = (EVP_DES_KEY *)ctx->cipher_data; for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) { DES_ecb_encrypt_ex(in + i, out + i, &dat->ks.ks, ctx->encrypt); } return 1; } static const EVP_CIPHER evp_des_ecb = { /*nid=*/NID_des_ecb, /*block_size=*/8, /*key_len=*/8, /*iv_len=*/0, /*ctx_size=*/sizeof(EVP_DES_KEY), /*flags=*/EVP_CIPH_ECB_MODE, /*init=*/des_init_key, /*cipher=*/des_ecb_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_ecb(void) { return &evp_des_ecb; } typedef struct { union { double align; DES_key_schedule ks[3]; } ks; } DES_EDE_KEY; static int des_ede3_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data; DES_set_key_ex(key, &dat->ks.ks[0]); DES_set_key_ex(key + 8, &dat->ks.ks[1]); DES_set_key_ex(key + 16, &dat->ks.ks[2]); return 1; } static int des_ede3_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data; DES_ede3_cbc_encrypt_ex(in, out, in_len, &dat->ks.ks[0], &dat->ks.ks[1], &dat->ks.ks[2], ctx->iv, ctx->encrypt); return 1; } static const EVP_CIPHER evp_des_ede3_cbc = { /*nid=*/NID_des_ede3_cbc, /*block_size=*/8, /*key_len=*/24, /*iv_len=*/8, /*ctx_size=*/sizeof(DES_EDE_KEY), /*flags=*/EVP_CIPH_CBC_MODE, /*init=*/des_ede3_init_key, /*cipher=*/des_ede3_cbc_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_ede3_cbc(void) { return &evp_des_ede3_cbc; } static int des_ede_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data; // 2-DES is 3-DES with the first key used twice. DES_set_key_ex(key, &dat->ks.ks[0]); DES_set_key_ex(key + 8, &dat->ks.ks[1]); DES_set_key_ex(key, &dat->ks.ks[2]); return 1; } static const EVP_CIPHER evp_des_ede_cbc = { /*nid=*/NID_des_ede_cbc, /*block_size=*/8, /*key_len=*/16, /*iv_len=*/8, /*ctx_size=*/sizeof(DES_EDE_KEY), /*flags=*/EVP_CIPH_CBC_MODE, /*init=*/des_ede_init_key, /*cipher=*/des_ede3_cbc_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_ede_cbc(void) { return &evp_des_ede_cbc; } static int des_ede_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { if (in_len < ctx->cipher->block_size) { return 1; } in_len -= ctx->cipher->block_size; DES_EDE_KEY *dat = (DES_EDE_KEY *)ctx->cipher_data; for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) { DES_ecb3_encrypt_ex(in + i, out + i, &dat->ks.ks[0], &dat->ks.ks[1], &dat->ks.ks[2], ctx->encrypt); } return 1; } static const EVP_CIPHER evp_des_ede = { /*nid=*/NID_des_ede_ecb, /*block_size=*/8, /*key_len=*/16, /*iv_len=*/0, /*ctx_size=*/sizeof(DES_EDE_KEY), /*flags=*/EVP_CIPH_ECB_MODE, /*init=*/des_ede_init_key, /*cipher=*/des_ede_ecb_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_ede(void) { return &evp_des_ede; } static const EVP_CIPHER evp_des_ede3 = { /*nid=*/NID_des_ede3_ecb, /*block_size=*/8, /*key_len=*/24, /*iv_len=*/0, /*ctx_size=*/sizeof(DES_EDE_KEY), /*flags=*/EVP_CIPH_ECB_MODE, /*init=*/des_ede3_init_key, /*cipher=*/des_ede_ecb_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_des_ede3(void) { return &evp_des_ede3; } const EVP_CIPHER *EVP_des_ede3_ecb(void) { return EVP_des_ede3(); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_null.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../fipsmodule/cipher/internal.h" #include "../internal.h" static int null_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { return 1; } static int null_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { if (in != out) { OPENSSL_memcpy(out, in, in_len); } return 1; } static const EVP_CIPHER n_cipher = { /*nid=*/NID_undef, /*block_size=*/1, /*key_len=*/0, /*iv_len=*/0, /*ctx_size=*/0, /*flags=*/0, /*init=*/null_init_key, /*cipher=*/null_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_enc_null(void) { return &n_cipher; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_rc2.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../fipsmodule/cipher/internal.h" #include "../internal.h" #define c2l(c, l) \ do { \ (l) = ((uint32_t)(*((c)++))); \ (l) |= ((uint32_t)(*((c)++))) << 8L; \ (l) |= ((uint32_t)(*((c)++))) << 16L; \ (l) |= ((uint32_t)(*((c)++))) << 24L; \ } while (0) #define c2ln(c, l1, l2, n) \ do { \ (c) += (n); \ (l1) = (l2) = 0; \ switch (n) { \ case 8: \ (l2) = ((uint32_t)(*(--(c)))) << 24L; \ [[fallthrough]]; \ case 7: \ (l2) |= ((uint32_t)(*(--(c)))) << 16L; \ [[fallthrough]]; \ case 6: \ (l2) |= ((uint32_t)(*(--(c)))) << 8L; \ [[fallthrough]]; \ case 5: \ (l2) |= ((uint32_t)(*(--(c)))); \ [[fallthrough]]; \ case 4: \ (l1) = ((uint32_t)(*(--(c)))) << 24L; \ [[fallthrough]]; \ case 3: \ (l1) |= ((uint32_t)(*(--(c)))) << 16L; \ [[fallthrough]]; \ case 2: \ (l1) |= ((uint32_t)(*(--(c)))) << 8L; \ [[fallthrough]]; \ case 1: \ (l1) |= ((uint32_t)(*(--(c)))); \ } \ } while (0) #define l2c(l, c) \ do { \ *((c)++) = (uint8_t)(((l)) & 0xff); \ *((c)++) = (uint8_t)(((l) >> 8L) & 0xff); \ *((c)++) = (uint8_t)(((l) >> 16L) & 0xff); \ *((c)++) = (uint8_t)(((l) >> 24L) & 0xff); \ } while (0) #define l2cn(l1, l2, c, n) \ do { \ (c) += (n); \ switch (n) { \ case 8: \ *(--(c)) = (uint8_t)(((l2) >> 24L) & 0xff); \ [[fallthrough]]; \ case 7: \ *(--(c)) = (uint8_t)(((l2) >> 16L) & 0xff); \ [[fallthrough]]; \ case 6: \ *(--(c)) = (uint8_t)(((l2) >> 8L) & 0xff); \ [[fallthrough]]; \ case 5: \ *(--(c)) = (uint8_t)(((l2)) & 0xff); \ [[fallthrough]]; \ case 4: \ *(--(c)) = (uint8_t)(((l1) >> 24L) & 0xff); \ [[fallthrough]]; \ case 3: \ *(--(c)) = (uint8_t)(((l1) >> 16L) & 0xff); \ [[fallthrough]]; \ case 2: \ *(--(c)) = (uint8_t)(((l1) >> 8L) & 0xff); \ [[fallthrough]]; \ case 1: \ *(--(c)) = (uint8_t)(((l1)) & 0xff); \ } \ } while (0) typedef struct rc2_key_st { uint16_t data[64]; } RC2_KEY; static void RC2_encrypt(uint32_t *d, RC2_KEY *key) { int i, n; uint16_t *p0, *p1; uint16_t x0, x1, x2, x3, t; uint32_t l; l = d[0]; x0 = (uint16_t)l & 0xffff; x1 = (uint16_t)(l >> 16L); l = d[1]; x2 = (uint16_t)l & 0xffff; x3 = (uint16_t)(l >> 16L); n = 3; i = 5; p0 = p1 = &key->data[0]; for (;;) { t = (x0 + (x1 & ~x3) + (x2 & x3) + *(p0++)) & 0xffff; x0 = (t << 1) | (t >> 15); t = (x1 + (x2 & ~x0) + (x3 & x0) + *(p0++)) & 0xffff; x1 = (t << 2) | (t >> 14); t = (x2 + (x3 & ~x1) + (x0 & x1) + *(p0++)) & 0xffff; x2 = (t << 3) | (t >> 13); t = (x3 + (x0 & ~x2) + (x1 & x2) + *(p0++)) & 0xffff; x3 = (t << 5) | (t >> 11); if (--i == 0) { if (--n == 0) { break; } i = (n == 2) ? 6 : 5; x0 += p1[x3 & 0x3f]; x1 += p1[x0 & 0x3f]; x2 += p1[x1 & 0x3f]; x3 += p1[x2 & 0x3f]; } } d[0] = (uint32_t)(x0 & 0xffff) | ((uint32_t)(x1 & 0xffff) << 16L); d[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(x3 & 0xffff) << 16L); } static void RC2_decrypt(uint32_t *d, RC2_KEY *key) { int i, n; uint16_t *p0, *p1; uint16_t x0, x1, x2, x3, t; uint32_t l; l = d[0]; x0 = (uint16_t)l & 0xffff; x1 = (uint16_t)(l >> 16L); l = d[1]; x2 = (uint16_t)l & 0xffff; x3 = (uint16_t)(l >> 16L); n = 3; i = 5; p0 = &key->data[63]; p1 = &key->data[0]; for (;;) { t = ((x3 << 11) | (x3 >> 5)) & 0xffff; x3 = (t - (x0 & ~x2) - (x1 & x2) - *(p0--)) & 0xffff; t = ((x2 << 13) | (x2 >> 3)) & 0xffff; x2 = (t - (x3 & ~x1) - (x0 & x1) - *(p0--)) & 0xffff; t = ((x1 << 14) | (x1 >> 2)) & 0xffff; x1 = (t - (x2 & ~x0) - (x3 & x0) - *(p0--)) & 0xffff; t = ((x0 << 15) | (x0 >> 1)) & 0xffff; x0 = (t - (x1 & ~x3) - (x2 & x3) - *(p0--)) & 0xffff; if (--i == 0) { if (--n == 0) { break; } i = (n == 2) ? 6 : 5; x3 = (x3 - p1[x2 & 0x3f]) & 0xffff; x2 = (x2 - p1[x1 & 0x3f]) & 0xffff; x1 = (x1 - p1[x0 & 0x3f]) & 0xffff; x0 = (x0 - p1[x3 & 0x3f]) & 0xffff; } } d[0] = (uint32_t)(x0 & 0xffff) | ((uint32_t)(x1 & 0xffff) << 16L); d[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(x3 & 0xffff) << 16L); } static void RC2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, RC2_KEY *ks, uint8_t *iv, int encrypt) { uint32_t tin0, tin1; uint32_t tout0, tout1, xor0, xor1; long l = length; uint32_t tin[2]; if (encrypt) { c2l(iv, tout0); c2l(iv, tout1); iv -= 8; for (l -= 8; l >= 0; l -= 8) { c2l(in, tin0); c2l(in, tin1); tin0 ^= tout0; tin1 ^= tout1; tin[0] = tin0; tin[1] = tin1; RC2_encrypt(tin, ks); tout0 = tin[0]; l2c(tout0, out); tout1 = tin[1]; l2c(tout1, out); } if (l != -8) { c2ln(in, tin0, tin1, l + 8); tin0 ^= tout0; tin1 ^= tout1; tin[0] = tin0; tin[1] = tin1; RC2_encrypt(tin, ks); tout0 = tin[0]; l2c(tout0, out); tout1 = tin[1]; l2c(tout1, out); } l2c(tout0, iv); l2c(tout1, iv); } else { c2l(iv, xor0); c2l(iv, xor1); iv -= 8; for (l -= 8; l >= 0; l -= 8) { c2l(in, tin0); tin[0] = tin0; c2l(in, tin1); tin[1] = tin1; RC2_decrypt(tin, ks); tout0 = tin[0] ^ xor0; tout1 = tin[1] ^ xor1; l2c(tout0, out); l2c(tout1, out); xor0 = tin0; xor1 = tin1; } if (l != -8) { c2l(in, tin0); tin[0] = tin0; c2l(in, tin1); tin[1] = tin1; RC2_decrypt(tin, ks); tout0 = tin[0] ^ xor0; tout1 = tin[1] ^ xor1; l2cn(tout0, tout1, out, l + 8); xor0 = tin0; xor1 = tin1; } l2c(xor0, iv); l2c(xor1, iv); } tin[0] = tin[1] = 0; } static const uint8_t key_table[256] = { 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, }; static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { int i, j; uint8_t *k; uint16_t *ki; unsigned int c, d; k = (uint8_t *)&key->data[0]; *k = 0; // for if there is a zero length key if (len > 128) { len = 128; } if (bits <= 0) { bits = 1024; } if (bits > 1024) { bits = 1024; } for (i = 0; i < len; i++) { k[i] = data[i]; } // expand table d = k[len - 1]; j = 0; for (i = len; i < 128; i++, j++) { d = key_table[(k[j] + d) & 0xff]; k[i] = d; } // hmm.... key reduction to 'bits' bits j = (bits + 7) >> 3; i = 128 - j; c = (0xff >> (-bits & 0x07)); d = key_table[k[i] & c]; k[i] = d; while (i--) { d = key_table[k[i + j] ^ d]; k[i] = d; } // copy from bytes into uint16_t's ki = &(key->data[63]); for (i = 127; i >= 0; i -= 2) { *(ki--) = ((k[i] << 8) | k[i - 1]) & 0xffff; } } typedef struct { int key_bits; // effective key bits RC2_KEY ks; // key schedule } EVP_RC2_KEY; static int rc2_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { EVP_RC2_KEY *rc2_key = (EVP_RC2_KEY *)ctx->cipher_data; RC2_set_key(&rc2_key->ks, EVP_CIPHER_CTX_key_length(ctx), key, rc2_key->key_bits); return 1; } static int rc2_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t inl) { EVP_RC2_KEY *key = (EVP_RC2_KEY *)ctx->cipher_data; static const size_t kChunkSize = 0x10000; while (inl >= kChunkSize) { RC2_cbc_encrypt(in, out, kChunkSize, &key->ks, ctx->iv, ctx->encrypt); inl -= kChunkSize; in += kChunkSize; out += kChunkSize; } if (inl) { RC2_cbc_encrypt(in, out, inl, &key->ks, ctx->iv, ctx->encrypt); } return 1; } static int rc2_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) { EVP_RC2_KEY *key = (EVP_RC2_KEY *)ctx->cipher_data; switch (type) { case EVP_CTRL_INIT: key->key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8; return 1; case EVP_CTRL_SET_RC2_KEY_BITS: // Should be overridden by later call to |EVP_CTRL_INIT|, but // people call it, so it may as well work. key->key_bits = arg; return 1; default: return -1; } } static const EVP_CIPHER rc2_40_cbc = { /*nid=*/NID_rc2_40_cbc, /*block_size=*/8, /*key_len=*/5 /* 40 bit */, /*iv_len=*/8, /*ctx_size=*/sizeof(EVP_RC2_KEY), /*flags=*/EVP_CIPH_CBC_MODE | EVP_CIPH_VARIABLE_LENGTH | EVP_CIPH_CTRL_INIT, /*init=*/rc2_init_key, /*cipher=*/rc2_cbc_cipher, /*cleanup=*/nullptr, /*ctrl=*/rc2_ctrl, }; const EVP_CIPHER *EVP_rc2_40_cbc(void) { return &rc2_40_cbc; } static const EVP_CIPHER rc2_cbc = { /*nid=*/NID_rc2_cbc, /*block_size=*/8, /*key_len=*/16 /* 128 bit */, /*iv_len=*/8, /*ctx_size=*/sizeof(EVP_RC2_KEY), /*flags=*/EVP_CIPH_CBC_MODE | EVP_CIPH_VARIABLE_LENGTH | EVP_CIPH_CTRL_INIT, /*init=*/rc2_init_key, /*cipher=*/rc2_cbc_cipher, /*cleanup=*/nullptr, /*ctrl=*/rc2_ctrl, }; const EVP_CIPHER *EVP_rc2_cbc(void) { return &rc2_cbc; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_rc4.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../fipsmodule/cipher/internal.h" static int rc4_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { RC4_KEY *rc4key = (RC4_KEY *)ctx->cipher_data; RC4_set_key(rc4key, EVP_CIPHER_CTX_key_length(ctx), key); return 1; } static int rc4_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { RC4_KEY *rc4key = (RC4_KEY *)ctx->cipher_data; RC4(rc4key, in_len, in, out); return 1; } static const EVP_CIPHER rc4 = { /*nid=*/NID_rc4, /*block_size=*/1, /*key_len=*/16, /*iv_len=*/0, /*ctx_size=*/sizeof(RC4_KEY), /*flags=*/EVP_CIPH_VARIABLE_LENGTH, /*init=*/rc4_init_key, /*cipher=*/rc4_cipher, /*cleanup=*/nullptr, /*ctrl=*/nullptr, }; const EVP_CIPHER *EVP_rc4(void) { return &rc4; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/e_tls.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "../fipsmodule/cipher/internal.h" #include "../internal.h" #include "internal.h" typedef struct { EVP_CIPHER_CTX cipher_ctx; HMAC_CTX hmac_ctx; // mac_key is the portion of the key used for the MAC. It is retained // separately for the constant-time CBC code. uint8_t mac_key[EVP_MAX_MD_SIZE]; uint8_t mac_key_len; // implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit // IV. char implicit_iv; } AEAD_TLS_CTX; static_assert(EVP_MAX_MD_SIZE < 256, "mac_key_len does not fit in uint8_t"); static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(AEAD_TLS_CTX), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(AEAD_TLS_CTX), "AEAD state has insufficient alignment"); static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx); HMAC_CTX_cleanup(&tls_ctx->hmac_ctx); } static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir, const EVP_CIPHER *cipher, const EVP_MD *md, char implicit_iv) { if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH && tag_len != EVP_MD_size(md)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE); return 0; } if (key_len != EVP_AEAD_key_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; } size_t mac_key_len = EVP_MD_size(md); size_t enc_key_len = EVP_CIPHER_key_length(cipher); assert(mac_key_len + enc_key_len + (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len); AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx); HMAC_CTX_init(&tls_ctx->hmac_ctx); assert(mac_key_len <= EVP_MAX_MD_SIZE); OPENSSL_memcpy(tls_ctx->mac_key, key, mac_key_len); tls_ctx->mac_key_len = (uint8_t)mac_key_len; tls_ctx->implicit_iv = implicit_iv; if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len], implicit_iv ? &key[mac_key_len + enc_key_len] : NULL, dir == evp_aead_seal) || !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) { aead_tls_cleanup(ctx); return 0; } EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0); return 1; } static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, const size_t extra_in_len) { assert(extra_in_len == 0); const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; const size_t hmac_len = HMAC_size(&tls_ctx->hmac_ctx); if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) { // The NULL cipher. return hmac_len; } const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); // An overflow of |in_len + hmac_len| doesn't affect the result mod // |block_size|, provided that |block_size| is a smaller power of two. assert(block_size != 0 && (block_size & (block_size - 1)) == 0); const size_t pad_len = block_size - (in_len + hmac_len) % block_size; return hmac_len + pad_len; } static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, const size_t max_out_tag_len, const uint8_t *nonce, const size_t nonce_len, const uint8_t *in, const size_t in_len, const uint8_t *extra_in, const size_t extra_in_len, const uint8_t *ad, const size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; if (!tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); // Compute the MAC. This must be first in case the operation is being done // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) || !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) { return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Encrypt the input. int len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); // Feed the MAC into the cipher in two steps. First complete the final partial // block from encrypting the input and split the result between |out| and // |out_tag|. Then feed the rest. const size_t early_mac_len = (block_size - (in_len % block_size)) % block_size; if (early_mac_len != 0) { assert(len + block_size - early_mac_len == in_len); uint8_t buf[EVP_MAX_BLOCK_LENGTH]; int buf_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac, (int)early_mac_len)) { return 0; } assert(buf_len == (int)block_size); OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); } size_t tag_len = early_mac_len; if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, mac + tag_len, mac_len - tag_len)) { return 0; } tag_len += len; if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); // Compute padding and feed that into the cipher. uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, padding_len - 1, padding_len); if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, padding, (int)padding_len)) { return 0; } tag_len += len; } if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } assert(len == 0); // Padding is explicit. assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); *out_tag_len = tag_len; return 1; } static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; if (tls_ctx->cipher_ctx.encrypt) { // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } if (max_out_len < in_len) { // This requires that the caller provide space for the MAC, even though it // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (ad_len != 13 - 2 /* length bytes */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE); return 0; } if (in_len > INT_MAX) { // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } total += len; if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { return 0; } total += len; assert(total == in_len); CONSTTIME_SECRET(out, total); // Remove CBC padding. Code from here on is timing-sensitive with respect to // |padding_ok| and |data_plus_mac_len| for CBC ciphers. size_t data_plus_mac_len; crypto_word_t padding_ok; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { if (!EVP_tls_cbc_remove_padding( &padding_ok, &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), HMAC_size(&tls_ctx->hmac_ctx))) { // Publicly invalid. This can be rejected in non-constant time. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } else { padding_ok = CONSTTIME_TRUE_W; data_plus_mac_len = total; // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has // already been checked against the MAC size at the top of the function. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); // At this point, if the padding is valid, the first |data_plus_mac_len| bytes // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is // still large enough to extract a MAC, but it will be irrelevant. // To allow for CBC mode which changes cipher length, |ad| doesn't include the // length for legacy ciphers. uint8_t ad_fixed[13]; OPENSSL_memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; // Compute the MAC and extract the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; uint8_t *record_mac; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) { if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len, ad_fixed, out, data_len, total, tls_ctx->mac_key, tls_ctx->mac_key_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { // We should support the constant-time path for all CBC-mode ciphers // implemented. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); unsigned mac_len_u; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) || !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) || !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) { return 0; } mac_len = mac_len_u; assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx)); record_mac = &out[data_len]; } // Perform the MAC check and the padding check in constant-time. It should be // safe to simply perform the padding check first, but it would not be under a // different choice of MAC location on padding failure. See // EVP_tls_cbc_remove_padding. crypto_word_t good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= padding_ok; CONSTTIME_DECLASSIFY(&good, sizeof(good)); if (!good) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len)); CONSTTIME_DECLASSIFY(out, data_len); // End of timing-sensitive code. *out_len = data_len; return 1; } static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), EVP_sha1(), 0); } static int aead_aes_128_cbc_sha1_tls_implicit_iv_init( EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), EVP_sha1(), 1); } static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(), EVP_sha256(), 0); } static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(), EVP_sha1(), 0); } static int aead_aes_256_cbc_sha1_tls_implicit_iv_init( EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(), EVP_sha1(), 1); } static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(), EVP_sha1(), 0); } static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init( EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(), EVP_sha1(), 1); } static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_iv_len) { const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state; const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx); if (iv_len <= 1) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } *out_iv = tls_ctx->cipher_ctx.iv; *out_iv_len = iv_len; return 1; } static const EVP_AEAD aead_aes_128_cbc_sha1_tls = { SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128) 16, // nonce len (IV) 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_aes_128_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = { SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) 0, // nonce len 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_aes_128_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather aead_tls_get_iv, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha256_tls = { SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128) 16, // nonce len (IV) 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) SHA256_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_aes_128_cbc_sha256_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls = { SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256) 16, // nonce len (IV) 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_aes_256_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = { SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) 0, // nonce len 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_aes_256_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather aead_tls_get_iv, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = { SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES) 8, // nonce len (IV) 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_des_ede3_cbc_sha1_tls_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather NULL, // get_iv aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = { SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) 0, // nonce len 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) SHA_DIGEST_LENGTH, // max tag length 0, // seal_scatter_supports_extra_in NULL, // init aead_des_ede3_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, aead_tls_open, aead_tls_seal_scatter, NULL, // open_gather aead_tls_get_iv, // get_iv aead_tls_tag_len, }; const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) { return &aead_aes_128_cbc_sha1_tls; } const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) { return &aead_aes_128_cbc_sha1_tls_implicit_iv; } const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) { return &aead_aes_128_cbc_sha256_tls; } const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) { return &aead_aes_256_cbc_sha1_tls; } const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) { return &aead_aes_256_cbc_sha1_tls_implicit_iv; } const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) { return &aead_des_ede3_cbc_sha1_tls; } const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) { return &aead_des_ede3_cbc_sha1_tls_implicit_iv; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/get_cipher.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "internal.h" #include "../internal.h" static const struct { int nid; const char *name; const EVP_CIPHER *(*func)(void); } kCiphers[] = { {NID_aes_128_cbc, "aes-128-cbc", EVP_aes_128_cbc}, {NID_aes_128_ctr, "aes-128-ctr", EVP_aes_128_ctr}, {NID_aes_128_ecb, "aes-128-ecb", EVP_aes_128_ecb}, {NID_aes_128_gcm, "aes-128-gcm", EVP_aes_128_gcm}, {NID_aes_128_ofb128, "aes-128-ofb", EVP_aes_128_ofb}, {NID_aes_192_cbc, "aes-192-cbc", EVP_aes_192_cbc}, {NID_aes_192_ctr, "aes-192-ctr", EVP_aes_192_ctr}, {NID_aes_192_ecb, "aes-192-ecb", EVP_aes_192_ecb}, {NID_aes_192_gcm, "aes-192-gcm", EVP_aes_192_gcm}, {NID_aes_192_ofb128, "aes-192-ofb", EVP_aes_192_ofb}, {NID_aes_256_cbc, "aes-256-cbc", EVP_aes_256_cbc}, {NID_aes_256_ctr, "aes-256-ctr", EVP_aes_256_ctr}, {NID_aes_256_ecb, "aes-256-ecb", EVP_aes_256_ecb}, {NID_aes_256_gcm, "aes-256-gcm", EVP_aes_256_gcm}, {NID_aes_256_ofb128, "aes-256-ofb", EVP_aes_256_ofb}, {NID_des_cbc, "des-cbc", EVP_des_cbc}, {NID_des_ecb, "des-ecb", EVP_des_ecb}, {NID_des_ede_cbc, "des-ede-cbc", EVP_des_ede_cbc}, {NID_des_ede_ecb, "des-ede", EVP_des_ede}, {NID_des_ede3_cbc, "des-ede3-cbc", EVP_des_ede3_cbc}, {NID_rc2_cbc, "rc2-cbc", EVP_rc2_cbc}, {NID_rc4, "rc4", EVP_rc4}, }; const EVP_CIPHER *EVP_get_cipherbynid(int nid) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCiphers); i++) { if (kCiphers[i].nid == nid) { return kCiphers[i].func(); } } return NULL; } const EVP_CIPHER *EVP_get_cipherbyname(const char *name) { if (name == NULL) { return NULL; } // This is not a name used by OpenSSL, but tcpdump registers it with // |EVP_add_cipher_alias|. Our |EVP_add_cipher_alias| is a no-op, so we // support the name here. if (OPENSSL_strcasecmp(name, "3des") == 0) { name = "des-ede3-cbc"; } for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCiphers); i++) { if (OPENSSL_strcasecmp(kCiphers[i].name, name) == 0) { return kCiphers[i].func(); } } return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H #define OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H #include #include #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif // EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC // record in |in|. This decrypted record should not include any "decrypted" // explicit IV. If the record is publicly invalid, it returns zero. Otherwise, // it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the // padding is valid and zero otherwise. It then sets |*out_len| to the length // with the padding removed or |in_len| if invalid. // // If the function returns one, it runs in time independent of the contents of // |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying // |EVP_tls_cbc_copy_mac|'s precondition. int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, const uint8_t *in, size_t in_len, size_t block_size, size_t mac_size); // EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first // |in_len| bytes of |in| to |out| in constant time (independent of the concrete // value of |in_len|, which may vary within a 256-byte window). |in| must point // to a buffer of |orig_len| bytes. // // On entry: // orig_len >= in_len >= md_size // md_size <= EVP_MAX_MD_SIZE void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, size_t in_len, size_t orig_len); // EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function // which EVP_tls_cbc_digest_record supports. int EVP_tls_cbc_record_digest_supported(const EVP_MD *md); // EVP_sha1_final_with_secret_suffix computes the result of hashing |len| bytes // from |in| to |ctx| and writes the resulting hash to |out|. |len| is treated // as secret and must be at most |max_len|, which is treated as public. |in| // must point to a buffer of at least |max_len| bytes. It returns one on success // and zero if inputs are too long. // // This function is exported for unit tests. OPENSSL_EXPORT int EVP_sha1_final_with_secret_suffix( SHA_CTX *ctx, uint8_t out[SHA_DIGEST_LENGTH], const uint8_t *in, size_t len, size_t max_len); // EVP_sha256_final_with_secret_suffix acts like // |EVP_sha1_final_with_secret_suffix|, but for SHA-256. // // This function is exported for unit tests. OPENSSL_EXPORT int EVP_sha256_final_with_secret_suffix( SHA256_CTX *ctx, uint8_t out[SHA256_DIGEST_LENGTH], const uint8_t *in, size_t len, size_t max_len); // EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS // record. // // md: the hash function used in the HMAC. // EVP_tls_cbc_record_digest_supported must return true for this hash. // md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. // md_out_size: the number of output bytes is written here. // header: the 13-byte, TLS record header. // data: the record data itself // data_size: the secret, reported length of the data once the padding and MAC // have been removed. // data_plus_mac_plus_padding_size: the public length of the whole // record, including padding. // // On entry: by virtue of having been through one of the remove_padding // functions, above, we know that data_plus_mac_size is large enough to contain // a padding byte and MAC. (If the padding was invalid, it might contain the // padding too. ) int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length); #define POLY1305_TAG_LEN 16 // For convenience (the x86_64 calling convention allows only six parameters in // registers), the final parameter for the assembly functions is both an input // and output parameter. union chacha20_poly1305_open_data { struct { alignas(16) uint8_t key[32]; uint32_t counter; uint8_t nonce[12]; } in; struct { uint8_t tag[POLY1305_TAG_LEN]; } out; }; union chacha20_poly1305_seal_data { struct { alignas(16) uint8_t key[32]; uint32_t counter; uint8_t nonce[12]; const uint8_t *extra_ciphertext; size_t extra_ciphertext_len; } in; struct { uint8_t tag[POLY1305_TAG_LEN]; } out; }; #if (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_NO_ASM) static_assert(sizeof(union chacha20_poly1305_open_data) == 48, "wrong chacha20_poly1305_open_data size"); static_assert(sizeof(union chacha20_poly1305_seal_data) == 48 + 8 + 8, "wrong chacha20_poly1305_seal_data size"); inline int chacha20_poly1305_asm_capable(void) { #if defined(OPENSSL_X86_64) return CRYPTO_is_SSE4_1_capable(); #elif defined(OPENSSL_AARCH64) return CRYPTO_is_NEON_capable(); #endif } // chacha20_poly1305_open is defined in chacha20_poly1305_*.pl. It decrypts // |plaintext_len| bytes from |ciphertext| and writes them to |out_plaintext|. // Additional input parameters are passed in |aead_data->in|. On exit, it will // write calculated tag value to |aead_data->out.tag|, which the caller must // check. #if defined(OPENSSL_X86_64) extern void chacha20_poly1305_open_nohw( uint8_t *out_plaintext, const uint8_t *ciphertext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_open_data *data); extern void chacha20_poly1305_open_avx2( uint8_t *out_plaintext, const uint8_t *ciphertext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_open_data *data); inline void chacha20_poly1305_open(uint8_t *out_plaintext, const uint8_t *ciphertext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_open_data *data) { if (CRYPTO_is_AVX2_capable() && CRYPTO_is_BMI2_capable()) { chacha20_poly1305_open_avx2(out_plaintext, ciphertext, plaintext_len, ad, ad_len, data); } else { chacha20_poly1305_open_nohw(out_plaintext, ciphertext, plaintext_len, ad, ad_len, data); } } #else extern void chacha20_poly1305_open(uint8_t *out_plaintext, const uint8_t *ciphertext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_open_data *data); #endif // chacha20_poly1305_open is defined in chacha20_poly1305_*.pl. It encrypts // |plaintext_len| bytes from |plaintext| and writes them to |out_ciphertext|. // Additional input parameters are passed in |aead_data->in|. The calculated tag // value is over the computed ciphertext concatenated with |extra_ciphertext| // and written to |aead_data->out.tag|. #if defined(OPENSSL_X86_64) extern void chacha20_poly1305_seal_nohw( uint8_t *out_ciphertext, const uint8_t *plaintext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_seal_data *data); extern void chacha20_poly1305_seal_avx2( uint8_t *out_ciphertext, const uint8_t *plaintext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_seal_data *data); inline void chacha20_poly1305_seal(uint8_t *out_ciphertext, const uint8_t *plaintext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_seal_data *data) { if (CRYPTO_is_AVX2_capable() && CRYPTO_is_BMI2_capable()) { chacha20_poly1305_seal_avx2(out_ciphertext, plaintext, plaintext_len, ad, ad_len, data); } else { chacha20_poly1305_seal_nohw(out_ciphertext, plaintext, plaintext_len, ad, ad_len, data); } } #else extern void chacha20_poly1305_seal(uint8_t *out_ciphertext, const uint8_t *plaintext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_seal_data *data); #endif #else inline int chacha20_poly1305_asm_capable(void) { return 0; } inline void chacha20_poly1305_open(uint8_t *out_plaintext, const uint8_t *ciphertext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_open_data *data) { abort(); } inline void chacha20_poly1305_seal(uint8_t *out_ciphertext, const uint8_t *plaintext, size_t plaintext_len, const uint8_t *ad, size_t ad_len, union chacha20_poly1305_seal_data *data) { abort(); } #endif #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/cipher/tls_cbc.cc ================================================ /* * Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" #include "../fipsmodule/cipher/internal.h" int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, const uint8_t *in, size_t in_len, size_t block_size, size_t mac_size) { const size_t overhead = 1 /* padding length byte */ + mac_size; // These lengths are all public so we can test them in non-constant time. if (overhead > in_len) { return 0; } size_t padding_length = in[in_len - 1]; crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length); // The padding consists of a length byte at the end of the record and // then that many bytes of padding, all with the same value as the // length byte. Thus, with the length byte included, there are i+1 // bytes of padding. // // We can't check just |padding_length+1| bytes because that leaks // decrypted information. Therefore we always have to check the maximum // amount of padding possible. (Again, the length of the record is // public information so we can use it.) size_t to_check = 256; // maximum amount of padding, inc length byte. if (to_check > in_len) { to_check = in_len; } for (size_t i = 0; i < to_check; i++) { uint8_t mask = constant_time_ge_8(padding_length, i); uint8_t b = in[in_len - 1 - i]; // The final |padding_length+1| bytes should all have the value // |padding_length|. Therefore the XOR should be zero. good &= ~(mask & (padding_length ^ b)); } // If any of the final |padding_length+1| bytes had the wrong value, // one or more of the lower eight bits of |good| will be cleared. good = constant_time_eq_w(0xff, good & 0xff); // Always treat |padding_length| as zero on error. If, assuming block size of // 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 // and returned -1, distinguishing good MAC and bad padding from bad MAC and // bad padding would give POODLE's padding oracle. padding_length = good & (padding_length + 1); *out_len = in_len - padding_length; *out_padding_ok = good; return 1; } void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, size_t in_len, size_t orig_len) { uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE]; uint8_t *rotated_mac = rotated_mac1; uint8_t *rotated_mac_tmp = rotated_mac2; // mac_end is the index of |in| just after the end of the MAC. size_t mac_end = in_len; size_t mac_start = mac_end - md_size; declassify_assert(orig_len >= in_len); declassify_assert(in_len >= md_size); assert(md_size <= EVP_MAX_MD_SIZE); assert(md_size > 0); // scan_start contains the number of bytes that we can ignore because // the MAC's position can only vary by 255 bytes. size_t scan_start = 0; // This information is public so it's safe to branch based on it. if (orig_len > md_size + 255 + 1) { scan_start = orig_len - (md_size + 255 + 1); } size_t rotate_offset = 0; uint8_t mac_started = 0; OPENSSL_memset(rotated_mac, 0, md_size); for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) { if (j >= md_size) { j -= md_size; } crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start); mac_started |= is_mac_start; uint8_t mac_ended = constant_time_ge_8(i, mac_end); rotated_mac[j] |= in[i] & mac_started & ~mac_ended; // Save the offset that |mac_start| is mapped to. rotate_offset |= j & is_mac_start; } // Now rotate the MAC. We rotate in log(md_size) steps, one for each bit // position. for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) { // Rotate by |offset| iff the corresponding bit is set in // |rotate_offset|, placing the result in |rotated_mac_tmp|. const uint8_t skip_rotate = (rotate_offset & 1) - 1; for (size_t i = 0, j = offset; i < md_size; i++, j++) { if (j >= md_size) { j -= md_size; } rotated_mac_tmp[i] = constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); } // Swap pointers so |rotated_mac| contains the (possibly) rotated value. // Note the number of iterations and thus the identity of these pointers is // public information. uint8_t *tmp = rotated_mac; rotated_mac = rotated_mac_tmp; rotated_mac_tmp = tmp; } OPENSSL_memcpy(out, rotated_mac, md_size); } int EVP_sha1_final_with_secret_suffix(SHA_CTX *ctx, uint8_t out[SHA_DIGEST_LENGTH], const uint8_t *in, size_t len, size_t max_len) { // Bound the input length so |total_bits| below fits in four bytes. This is // redundant with TLS record size limits. This also ensures |input_idx| below // does not overflow. size_t max_len_bits = max_len << 3; if (ctx->Nh != 0 || (max_len_bits >> 3) != max_len || // Overflow ctx->Nl + max_len_bits < max_len_bits || ctx->Nl + max_len_bits > UINT32_MAX) { return 0; } // We need to hash the following into |ctx|: // // - ctx->data[:ctx->num] // - in[:len] // - A 0x80 byte // - However many zero bytes are needed to pad up to a block. // - Eight bytes of length. size_t num_blocks = (ctx->num + len + 1 + 8 + SHA_CBLOCK - 1) >> 6; size_t last_block = num_blocks - 1; size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA_CBLOCK - 1) >> 6; // The bounds above imply |total_bits| fits in four bytes. size_t total_bits = ctx->Nl + (len << 3); uint8_t length_bytes[4]; length_bytes[0] = (uint8_t)(total_bits >> 24); length_bytes[1] = (uint8_t)(total_bits >> 16); length_bytes[2] = (uint8_t)(total_bits >> 8); length_bytes[3] = (uint8_t)total_bits; // We now construct and process each expected block in constant-time. uint8_t block[SHA_CBLOCK] = {0}; uint32_t result[5] = {0}; // input_idx is the index into |in| corresponding to the current block. // However, we allow this index to overflow beyond |max_len|, to simplify the // 0x80 byte. size_t input_idx = 0; for (size_t i = 0; i < max_blocks; i++) { // Fill |block| with data from the partial block in |ctx| and |in|. We copy // as if we were hashing up to |max_len| and then zero the excess later. size_t block_start = 0; if (i == 0) { OPENSSL_memcpy(block, ctx->data, ctx->num); block_start = ctx->num; } if (input_idx < max_len) { size_t to_copy = SHA_CBLOCK - block_start; if (to_copy > max_len - input_idx) { to_copy = max_len - input_idx; } OPENSSL_memcpy(block + block_start, in + input_idx, to_copy); } // Zero any bytes beyond |len| and add the 0x80 byte. for (size_t j = block_start; j < SHA_CBLOCK; j++) { // input[idx] corresponds to block[j]. size_t idx = input_idx + j - block_start; // The barriers on |len| are not strictly necessary. However, without // them, GCC compiles this code by incorporating |len| into the loop // counter and subtracting it out later. This is still constant-time, but // it frustrates attempts to validate this. uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len)); uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len)); block[j] &= is_in_bounds; block[j] |= 0x80 & is_padding_byte; } input_idx += SHA_CBLOCK - block_start; // Fill in the length if this is the last block. crypto_word_t is_last_block = constant_time_eq_w(i, last_block); for (size_t j = 0; j < 4; j++) { block[SHA_CBLOCK - 4 + j] |= is_last_block & length_bytes[j]; } // Process the block and save the hash state if it is the final value. SHA1_Transform(ctx, block); for (size_t j = 0; j < 5; j++) { result[j] |= is_last_block & ctx->h[j]; } } // Write the output. for (size_t i = 0; i < 5; i++) { CRYPTO_store_u32_be(out + 4 * i, result[i]); } return 1; } int EVP_sha256_final_with_secret_suffix(SHA256_CTX *ctx, uint8_t out[SHA256_DIGEST_LENGTH], const uint8_t *in, size_t len, size_t max_len) { // Bound the input length so |total_bits| below fits in four bytes. This is // redundant with TLS record size limits. This also ensures |input_idx| below // does not overflow. size_t max_len_bits = max_len << 3; if (ctx->Nh != 0 || (max_len_bits >> 3) != max_len || // Overflow ctx->Nl + max_len_bits < max_len_bits || ctx->Nl + max_len_bits > UINT32_MAX) { return 0; } // We need to hash the following into |ctx|: // // - ctx->data[:ctx->num] // - in[:len] // - A 0x80 byte // - However many zero bytes are needed to pad up to a block. // - Eight bytes of length. size_t num_blocks = (ctx->num + len + 1 + 8 + SHA256_CBLOCK - 1) >> 6; size_t last_block = num_blocks - 1; size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA256_CBLOCK - 1) >> 6; // The bounds above imply |total_bits| fits in four bytes. size_t total_bits = ctx->Nl + (len << 3); uint8_t length_bytes[4]; length_bytes[0] = (uint8_t)(total_bits >> 24); length_bytes[1] = (uint8_t)(total_bits >> 16); length_bytes[2] = (uint8_t)(total_bits >> 8); length_bytes[3] = (uint8_t)total_bits; // We now construct and process each expected block in constant-time. uint8_t block[SHA256_CBLOCK] = {0}; uint32_t result[8] = {0}; // input_idx is the index into |in| corresponding to the current block. // However, we allow this index to overflow beyond |max_len|, to simplify the // 0x80 byte. size_t input_idx = 0; for (size_t i = 0; i < max_blocks; i++) { // Fill |block| with data from the partial block in |ctx| and |in|. We copy // as if we were hashing up to |max_len| and then zero the excess later. size_t block_start = 0; if (i == 0) { OPENSSL_memcpy(block, ctx->data, ctx->num); block_start = ctx->num; } if (input_idx < max_len) { size_t to_copy = SHA256_CBLOCK - block_start; if (to_copy > max_len - input_idx) { to_copy = max_len - input_idx; } OPENSSL_memcpy(block + block_start, in + input_idx, to_copy); } // Zero any bytes beyond |len| and add the 0x80 byte. for (size_t j = block_start; j < SHA256_CBLOCK; j++) { // input[idx] corresponds to block[j]. size_t idx = input_idx + j - block_start; // The barriers on |len| are not strictly necessary. However, without // them, GCC compiles this code by incorporating |len| into the loop // counter and subtracting it out later. This is still constant-time, but // it frustrates attempts to validate this. uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len)); uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len)); block[j] &= is_in_bounds; block[j] |= 0x80 & is_padding_byte; } input_idx += SHA256_CBLOCK - block_start; // Fill in the length if this is the last block. crypto_word_t is_last_block = constant_time_eq_w(i, last_block); for (size_t j = 0; j < 4; j++) { block[SHA256_CBLOCK - 4 + j] |= is_last_block & length_bytes[j]; } // Process the block and save the hash state if it is the final value. SHA256_Transform(ctx, block); for (size_t j = 0; j < 8; j++) { result[j] |= is_last_block & ctx->h[j]; } } // Write the output. for (size_t i = 0; i < 8; i++) { CRYPTO_store_u32_be(out + 4 * i, result[i]); } return 1; } int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) { switch (EVP_MD_type(md)) { case NID_sha1: case NID_sha256: return 1; default: return 0; } } static int tls_cbc_digest_record_sha1(uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length) { if (mac_secret_length > SHA_CBLOCK) { // HMAC pads small keys with zeros and hashes large keys down. This function // should never reach the large key case. assert(0); return 0; } // Compute the initial HMAC block. uint8_t hmac_pad[SHA_CBLOCK]; OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad)); OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); for (size_t i = 0; i < SHA_CBLOCK; i++) { hmac_pad[i] ^= 0x36; } SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK); SHA1_Update(&ctx, header, 13); // There are at most 256 bytes of padding, so we can compute the public // minimum length for |data_size|. size_t min_data_size = 0; if (data_plus_mac_plus_padding_size > SHA_DIGEST_LENGTH + 256) { min_data_size = data_plus_mac_plus_padding_size - SHA_DIGEST_LENGTH - 256; } // Hash the public minimum length directly. This reduces the number of blocks // that must be computed in constant-time. SHA1_Update(&ctx, data, min_data_size); // Hash the remaining data without leaking |data_size|. uint8_t mac_out[SHA_DIGEST_LENGTH]; if (!EVP_sha1_final_with_secret_suffix( &ctx, mac_out, data + min_data_size, data_size - min_data_size, data_plus_mac_plus_padding_size - min_data_size)) { return 0; } // Complete the HMAC in the standard manner. SHA1_Init(&ctx); for (size_t i = 0; i < SHA_CBLOCK; i++) { hmac_pad[i] ^= 0x6a; } SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK); SHA1_Update(&ctx, mac_out, SHA_DIGEST_LENGTH); SHA1_Final(md_out, &ctx); *md_out_size = SHA_DIGEST_LENGTH; return 1; } static int tls_cbc_digest_record_sha256(uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length) { if (mac_secret_length > SHA256_CBLOCK) { // HMAC pads small keys with zeros and hashes large keys down. This function // should never reach the large key case. assert(0); return 0; } // Compute the initial HMAC block. uint8_t hmac_pad[SHA256_CBLOCK]; OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad)); OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); for (size_t i = 0; i < SHA256_CBLOCK; i++) { hmac_pad[i] ^= 0x36; } SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK); SHA256_Update(&ctx, header, 13); // There are at most 256 bytes of padding, so we can compute the public // minimum length for |data_size|. size_t min_data_size = 0; if (data_plus_mac_plus_padding_size > SHA256_DIGEST_LENGTH + 256) { min_data_size = data_plus_mac_plus_padding_size - SHA256_DIGEST_LENGTH - 256; } // Hash the public minimum length directly. This reduces the number of blocks // that must be computed in constant-time. SHA256_Update(&ctx, data, min_data_size); // Hash the remaining data without leaking |data_size|. uint8_t mac_out[SHA256_DIGEST_LENGTH]; if (!EVP_sha256_final_with_secret_suffix( &ctx, mac_out, data + min_data_size, data_size - min_data_size, data_plus_mac_plus_padding_size - min_data_size)) { return 0; } // Complete the HMAC in the standard manner. SHA256_Init(&ctx); for (size_t i = 0; i < SHA256_CBLOCK; i++) { hmac_pad[i] ^= 0x6a; } SHA256_Update(&ctx, hmac_pad, SHA256_CBLOCK); SHA256_Update(&ctx, mac_out, SHA256_DIGEST_LENGTH); SHA256_Final(md_out, &ctx); *md_out_size = SHA256_DIGEST_LENGTH; return 1; } int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, size_t *md_out_size, const uint8_t header[13], const uint8_t *data, size_t data_size, size_t data_plus_mac_plus_padding_size, const uint8_t *mac_secret, unsigned mac_secret_length) { switch (EVP_MD_type(md)) { case NID_sha1: return tls_cbc_digest_record_sha1( md_out, md_out_size, header, data, data_size, data_plus_mac_plus_padding_size, mac_secret, mac_secret_length); case NID_sha256: return tls_cbc_digest_record_sha256( md_out, md_out_size, header, data, data_size, data_plus_mac_plus_padding_size, mac_secret, mac_secret_length); default: // EVP_tls_cbc_record_digest_supported should have been called first to // check that the hash function is supported. assert(0); *md_out_size = 0; return 0; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/conf/conf.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" struct conf_section_st { char *name; // values contains non-owning pointers to the values in the section. STACK_OF(CONF_VALUE) *values; }; static const char kDefaultSectionName[] = "default"; static uint32_t conf_section_hash(const CONF_SECTION *s) { return OPENSSL_strhash(s->name); } static int conf_section_cmp(const CONF_SECTION *a, const CONF_SECTION *b) { return strcmp(a->name, b->name); } static uint32_t conf_value_hash(const CONF_VALUE *v) { const uint32_t section_hash = OPENSSL_strhash(v->section); const uint32_t name_hash = OPENSSL_strhash(v->name); return (section_hash << 2) ^ name_hash; } static int conf_value_cmp(const CONF_VALUE *a, const CONF_VALUE *b) { int cmp = strcmp(a->section, b->section); if (cmp != 0) { return cmp; } return strcmp(a->name, b->name); } CONF *NCONF_new(void *method) { if (method != NULL) { return NULL; } CONF *conf = reinterpret_cast(OPENSSL_malloc(sizeof(CONF))); if (conf == NULL) { return NULL; } conf->sections = lh_CONF_SECTION_new(conf_section_hash, conf_section_cmp); conf->values = lh_CONF_VALUE_new(conf_value_hash, conf_value_cmp); if (conf->sections == NULL || conf->values == NULL) { NCONF_free(conf); return NULL; } return conf; } CONF_VALUE *CONF_VALUE_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(CONF_VALUE))); } static void value_free(CONF_VALUE *value) { if (value == NULL) { return; } OPENSSL_free(value->section); OPENSSL_free(value->name); OPENSSL_free(value->value); OPENSSL_free(value); } static void section_free(CONF_SECTION *section) { if (section == NULL) { return; } OPENSSL_free(section->name); sk_CONF_VALUE_free(section->values); OPENSSL_free(section); } static void value_free_arg(CONF_VALUE *value, void *arg) { value_free(value); } static void section_free_arg(CONF_SECTION *section, void *arg) { section_free(section); } void NCONF_free(CONF *conf) { if (conf == NULL) { return; } lh_CONF_SECTION_doall_arg(conf->sections, section_free_arg, NULL); lh_CONF_SECTION_free(conf->sections); lh_CONF_VALUE_doall_arg(conf->values, value_free_arg, NULL); lh_CONF_VALUE_free(conf->values); OPENSSL_free(conf); } static CONF_SECTION *NCONF_new_section(const CONF *conf, const char *section) { CONF_SECTION *s = reinterpret_cast(OPENSSL_malloc(sizeof(CONF_SECTION))); if (!s) { return NULL; } s->name = OPENSSL_strdup(section); s->values = sk_CONF_VALUE_new_null(); if (s->name == NULL || s->values == NULL) { goto err; } CONF_SECTION *old_section; if (!lh_CONF_SECTION_insert(conf->sections, &old_section, s)) { goto err; } section_free(old_section); return s; err: section_free(s); return NULL; } static int is_comment(char c) { return c == '#'; } static int is_quote(char c) { return c == '"' || c == '\'' || c == '`'; } static int is_esc(char c) { return c == '\\'; } static int is_conf_ws(char c) { // This differs from |OPENSSL_isspace| in that CONF does not accept '\v' and // '\f' as whitespace. return c == ' ' || c == '\t' || c == '\r' || c == '\n'; } static int is_name_char(char c) { // Alphanumeric characters, and a handful of symbols, may appear in value and // section names without escaping. return OPENSSL_isalnum(c) || c == '_' || c == '!' || c == '.' || c == '%' || c == '&' || c == '*' || c == '+' || c == ',' || c == '/' || c == ';' || c == '?' || c == '@' || c == '^' || c == '~' || c == '|' || c == '-'; } static int str_copy(CONF *conf, char *section, char **pto, char *from) { int q, to = 0, len = 0; char v; BUF_MEM *buf; buf = BUF_MEM_new(); if (buf == NULL) { return 0; } len = strlen(from) + 1; if (!BUF_MEM_grow(buf, len)) { goto err; } for (;;) { if (is_quote(*from)) { q = *from; from++; while (*from != '\0' && *from != q) { if (is_esc(*from)) { from++; if (*from == '\0') { break; } } buf->data[to++] = *(from++); } if (*from == q) { from++; } } else if (is_esc(*from)) { from++; v = *(from++); if (v == '\0') { break; } else if (v == 'r') { v = '\r'; } else if (v == 'n') { v = '\n'; } else if (v == 'b') { v = '\b'; } else if (v == 't') { v = '\t'; } buf->data[to++] = v; } else if (*from == '\0') { break; } else if (*from == '$') { // Historically, $foo would expand to a previously-parsed value. This // feature has been removed as it was unused and is a DoS vector. If // trying to embed '$' in a line, either escape it or wrap the value in // quotes. OPENSSL_PUT_ERROR(CONF, CONF_R_VARIABLE_EXPANSION_NOT_SUPPORTED); goto err; } else { buf->data[to++] = *(from++); } } buf->data[to] = '\0'; OPENSSL_free(*pto); *pto = buf->data; OPENSSL_free(buf); return 1; err: BUF_MEM_free(buf); return 0; } static CONF_SECTION *get_section(const CONF *conf, const char *section) { CONF_SECTION templ; OPENSSL_memset(&templ, 0, sizeof(templ)); templ.name = (char *)section; return lh_CONF_SECTION_retrieve(conf->sections, &templ); } const STACK_OF(CONF_VALUE) *NCONF_get_section(const CONF *conf, const char *section) { const CONF_SECTION *section_obj = get_section(conf, section); if (section_obj == NULL) { return NULL; } return section_obj->values; } const char *NCONF_get_string(const CONF *conf, const char *section, const char *name) { CONF_VALUE templ, *value; if (section == NULL) { section = kDefaultSectionName; } OPENSSL_memset(&templ, 0, sizeof(templ)); templ.section = (char *)section; templ.name = (char *)name; value = lh_CONF_VALUE_retrieve(conf->values, &templ); if (value == NULL) { return NULL; } return value->value; } static int add_string(const CONF *conf, CONF_SECTION *section, CONF_VALUE *value) { value->section = OPENSSL_strdup(section->name); if (value->section == NULL) { return 0; } if (!sk_CONF_VALUE_push(section->values, value)) { return 0; } CONF_VALUE *old_value; if (!lh_CONF_VALUE_insert(conf->values, &old_value, value)) { // Remove |value| from |section->values|, so we do not leave a dangling // pointer. sk_CONF_VALUE_pop(section->values); return 0; } if (old_value != NULL) { (void)sk_CONF_VALUE_delete_ptr(section->values, old_value); value_free(old_value); } return 1; } static char *eat_ws(char *p) { while (*p != '\0' && is_conf_ws(*p)) { p++; } return p; } static char *scan_esc(char *p) { assert(p[0] == '\\'); return p[1] == '\0' ? p + 1 : p + 2; } static char *eat_name(char *p) { for (;;) { if (is_esc(*p)) { p = scan_esc(p); continue; } if (!is_name_char(*p)) { return p; } p++; } } static char *scan_quote(char *p) { int q = *p; p++; while (*p != '\0' && *p != q) { if (is_esc(*p)) { p++; if (*p == '\0') { return p; } } p++; } if (*p == q) { p++; } return p; } static void clear_comments(char *p) { for (;;) { if (!is_conf_ws(*p)) { break; } p++; } for (;;) { if (is_comment(*p)) { *p = '\0'; return; } if (is_quote(*p)) { p = scan_quote(p); continue; } if (is_esc(*p)) { p = scan_esc(p); continue; } if (*p == '\0') { return; } else { p++; } } } int NCONF_load_bio(CONF *conf, BIO *in, long *out_error_line) { static const size_t CONFBUFSIZE = 512; int bufnum = 0, i, ii; BUF_MEM *buff = NULL; char *s, *p, *end; int again; long eline = 0; CONF_VALUE *v = NULL; CONF_SECTION *sv = NULL; char *section = NULL, *buf; char *start, *psection, *pname; if ((buff = BUF_MEM_new()) == NULL) { OPENSSL_PUT_ERROR(CONF, ERR_R_BUF_LIB); goto err; } section = OPENSSL_strdup(kDefaultSectionName); if (section == NULL) { goto err; } sv = NCONF_new_section(conf, section); if (sv == NULL) { OPENSSL_PUT_ERROR(CONF, CONF_R_UNABLE_TO_CREATE_NEW_SECTION); goto err; } bufnum = 0; again = 0; for (;;) { if (!BUF_MEM_grow(buff, bufnum + CONFBUFSIZE)) { OPENSSL_PUT_ERROR(CONF, ERR_R_BUF_LIB); goto err; } p = &(buff->data[bufnum]); *p = '\0'; BIO_gets(in, p, CONFBUFSIZE - 1); p[CONFBUFSIZE - 1] = '\0'; ii = i = strlen(p); if (i == 0 && !again) { break; } again = 0; while (i > 0) { if ((p[i - 1] != '\r') && (p[i - 1] != '\n')) { break; } else { i--; } } // we removed some trailing stuff so there is a new // line on the end. if (ii && i == ii) { again = 1; // long line } else { p[i] = '\0'; eline++; // another input line } // we now have a line with trailing \r\n removed // i is the number of bytes bufnum += i; v = NULL; // check for line continuation if (bufnum >= 1) { // If we have bytes and the last char '\\' and // second last char is not '\\' p = &(buff->data[bufnum - 1]); if (is_esc(p[0]) && ((bufnum <= 1) || !is_esc(p[-1]))) { bufnum--; again = 1; } } if (again) { continue; } bufnum = 0; buf = buff->data; clear_comments(buf); s = eat_ws(buf); if (*s == '\0') { continue; // blank line } if (*s == '[') { char *ss; s++; start = eat_ws(s); ss = start; again: end = eat_name(ss); p = eat_ws(end); if (*p != ']') { if (*p != '\0' && ss != p) { ss = p; goto again; } OPENSSL_PUT_ERROR(CONF, CONF_R_MISSING_CLOSE_SQUARE_BRACKET); goto err; } *end = '\0'; if (!str_copy(conf, NULL, §ion, start)) { goto err; } if ((sv = get_section(conf, section)) == NULL) { sv = NCONF_new_section(conf, section); } if (sv == NULL) { OPENSSL_PUT_ERROR(CONF, CONF_R_UNABLE_TO_CREATE_NEW_SECTION); goto err; } continue; } else { pname = s; psection = NULL; end = eat_name(s); if ((end[0] == ':') && (end[1] == ':')) { *end = '\0'; end += 2; psection = pname; pname = end; end = eat_name(end); } p = eat_ws(end); if (*p != '=') { OPENSSL_PUT_ERROR(CONF, CONF_R_MISSING_EQUAL_SIGN); goto err; } *end = '\0'; p++; start = eat_ws(p); while (*p != '\0') { p++; } p--; while (p != start && is_conf_ws(*p)) { p--; } p++; *p = '\0'; if (!(v = CONF_VALUE_new())) { goto err; } if (psection == NULL) { psection = section; } v->name = OPENSSL_strdup(pname); if (v->name == NULL) { goto err; } if (!str_copy(conf, psection, &(v->value), start)) { goto err; } CONF_SECTION *tv; if (strcmp(psection, section) != 0) { if ((tv = get_section(conf, psection)) == NULL) { tv = NCONF_new_section(conf, psection); } if (tv == NULL) { OPENSSL_PUT_ERROR(CONF, CONF_R_UNABLE_TO_CREATE_NEW_SECTION); goto err; } } else { tv = sv; } if (add_string(conf, tv, v) == 0) { goto err; } v = NULL; } } BUF_MEM_free(buff); OPENSSL_free(section); return 1; err: BUF_MEM_free(buff); OPENSSL_free(section); if (out_error_line != NULL) { *out_error_line = eline; } ERR_add_error_dataf("line %ld", eline); value_free(v); return 0; } int NCONF_load(CONF *conf, const char *filename, long *out_error_line) { BIO *in = BIO_new_file(filename, "rb"); int ret; if (in == NULL) { OPENSSL_PUT_ERROR(CONF, ERR_R_SYS_LIB); return 0; } ret = NCONF_load_bio(conf, in, out_error_line); BIO_free(in); return ret; } int CONF_parse_list(const char *list, char sep, int remove_whitespace, int (*list_cb)(const char *elem, size_t len, void *usr), void *arg) { int ret; const char *lstart, *tmpend, *p; if (list == NULL) { OPENSSL_PUT_ERROR(CONF, CONF_R_LIST_CANNOT_BE_NULL); return 0; } lstart = list; for (;;) { if (remove_whitespace) { while (*lstart && OPENSSL_isspace((unsigned char)*lstart)) { lstart++; } } p = strchr(lstart, sep); if (p == lstart || !*lstart) { ret = list_cb(NULL, 0, arg); } else { if (p) { tmpend = p - 1; } else { tmpend = lstart + strlen(lstart) - 1; } if (remove_whitespace) { while (OPENSSL_isspace((unsigned char)*tmpend)) { tmpend--; } } ret = list_cb(lstart, tmpend - lstart + 1, arg); } if (ret <= 0) { return ret; } if (p == NULL) { return 1; } lstart = p + 1; } } int CONF_modules_load_file(const char *filename, const char *appname, unsigned long flags) { return 1; } void CONF_modules_free(void) {} void OPENSSL_config(const char *config_name) {} void OPENSSL_no_config(void) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/conf/internal.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H #include #include "../lhash/internal.h" #if defined(__cplusplus) extern "C" { #endif typedef struct conf_section_st CONF_SECTION; DEFINE_LHASH_OF(CONF_SECTION) DEFINE_LHASH_OF(CONF_VALUE) struct conf_st { LHASH_OF(CONF_VALUE) *values; LHASH_OF(CONF_SECTION) *sections; }; // CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. CONF_VALUE *CONF_VALUE_new(void); // CONF_parse_list takes a list separated by 'sep' and calls |list_cb| giving // the start and length of each member, optionally stripping leading and // trailing whitespace. This can be used to parse comma separated lists for // example. If |list_cb| returns <= 0, then the iteration is halted and that // value is returned immediately. Otherwise it returns one. Note that |list_cb| // may be called on an empty member. OPENSSL_EXPORT int CONF_parse_list( const char *list, char sep, int remove_whitespace, int (*list_cb)(const char *elem, size_t len, void *usr), void *arg); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_apple.cc ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if defined(OPENSSL_AARCH64) && defined(OPENSSL_APPLE) && \ !defined(OPENSSL_STATIC_ARMCAP) && !defined(OPENSSL_NO_ASM) #include #include #include static int has_hw_feature(const char *name) { int value; size_t len = sizeof(value); if (sysctlbyname(name, &value, &len, NULL, 0) != 0) { return 0; } if (len != sizeof(int)) { // This should not happen. All the values queried should be integer-valued. assert(0); return 0; } // Per sys/sysctl.h: // // Selectors that return errors are not support on the system. Supported // features will return 1 if they are recommended or 0 if they are supported // but are not expected to help performance. Future versions of these // selectors may return larger values as necessary so it is best to test for // non zero. return value != 0; } void OPENSSL_cpuid_setup(void) { // Apple ARM64 platforms have NEON and cryptography extensions available // statically, so we do not need to query them. In particular, there sometimes // are no sysctls corresponding to such features. See below. #if !defined(__ARM_NEON) || !defined(__ARM_FEATURE_AES) || \ !defined(__ARM_FEATURE_SHA2) #error "NEON and crypto extensions should be statically available." #endif OPENSSL_armcap_P = ARMV7_NEON | ARMV8_AES | ARMV8_PMULL | ARMV8_SHA1 | ARMV8_SHA256; // See Apple's documentation for sysctl names: // https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics // // The new feature names, e.g. "hw.optional.arm.FEAT_SHA512", are only // available in macOS 12. For compatibility with macOS 11, we also support // the old names. The old names don't have values for features like FEAT_AES, // so instead we detect them statically above. // // If querying new sysctls, update the Chromium sandbox definition. See // https://crrev.com/c/4415225. if (has_hw_feature("hw.optional.arm.FEAT_SHA512") || has_hw_feature("hw.optional.armv8_2_sha512")) { OPENSSL_armcap_P |= ARMV8_SHA512; } } #endif // OPENSSL_AARCH64 && OPENSSL_APPLE && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_fuchsia.cc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if defined(OPENSSL_AARCH64) && defined(OPENSSL_FUCHSIA) && \ !defined(OPENSSL_STATIC_ARMCAP) && !defined(OPENSSL_NO_ASM) #include #include #include #include void OPENSSL_cpuid_setup(void) { uint32_t hwcap; zx_status_t rc = zx_system_get_features(ZX_FEATURE_KIND_CPU, &hwcap); if (rc != ZX_OK || (hwcap & ZX_ARM64_FEATURE_ISA_ASIMD) == 0) { // If NEON/ASIMD is missing, don't report other features either. This // matches OpenSSL, and the other features depend on SIMD registers. return; } OPENSSL_armcap_P |= ARMV7_NEON; if (hwcap & ZX_ARM64_FEATURE_ISA_AES) { OPENSSL_armcap_P |= ARMV8_AES; } if (hwcap & ZX_ARM64_FEATURE_ISA_PMULL) { OPENSSL_armcap_P |= ARMV8_PMULL; } if (hwcap & ZX_ARM64_FEATURE_ISA_SHA1) { OPENSSL_armcap_P |= ARMV8_SHA1; } if (hwcap & ZX_ARM64_FEATURE_ISA_SHA256) { OPENSSL_armcap_P |= ARMV8_SHA256; } if (hwcap & ZX_ARM64_FEATURE_ISA_SHA512) { OPENSSL_armcap_P |= ARMV8_SHA512; } } #endif // OPENSSL_AARCH64 && OPENSSL_FUCHSIA && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_linux.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if defined(OPENSSL_AARCH64) && defined(OPENSSL_LINUX) && \ !defined(OPENSSL_STATIC_ARMCAP) && !defined(OPENSSL_NO_ASM) #include #include void OPENSSL_cpuid_setup(void) { unsigned long hwcap = getauxval(AT_HWCAP); // See /usr/include/asm/hwcap.h on an aarch64 installation for the source of // these values. static const unsigned long kNEON = 1 << 1; static const unsigned long kAES = 1 << 3; static const unsigned long kPMULL = 1 << 4; static const unsigned long kSHA1 = 1 << 5; static const unsigned long kSHA256 = 1 << 6; static const unsigned long kSHA512 = 1 << 21; if ((hwcap & kNEON) == 0) { // Matching OpenSSL, if NEON is missing, don't report other features // either. return; } OPENSSL_armcap_P |= ARMV7_NEON; if (hwcap & kAES) { OPENSSL_armcap_P |= ARMV8_AES; } if (hwcap & kPMULL) { OPENSSL_armcap_P |= ARMV8_PMULL; } if (hwcap & kSHA1) { OPENSSL_armcap_P |= ARMV8_SHA1; } if (hwcap & kSHA256) { OPENSSL_armcap_P |= ARMV8_SHA256; } if (hwcap & kSHA512) { OPENSSL_armcap_P |= ARMV8_SHA512; } } #endif // OPENSSL_AARCH64 && OPENSSL_LINUX && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_openbsd.cc ================================================ /* Copyright (c) 2022, Robert Nagy * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #if defined(OPENSSL_AARCH64) && defined(OPENSSL_OPENBSD) && \ !defined(OPENSSL_STATIC_ARMCAP) && !defined(OPENSSL_NO_ASM) #include #include #include #include #include "internal.h" void OPENSSL_cpuid_setup(void) { int isar0_mib[] = {CTL_MACHDEP, CPU_ID_AA64ISAR0}; uint64_t cpu_id = 0; size_t len = sizeof(cpu_id); if (sysctl(isar0_mib, 2, &cpu_id, &len, NULL, 0) < 0) { return; } OPENSSL_armcap_P |= ARMV7_NEON; if (ID_AA64ISAR0_AES(cpu_id) >= ID_AA64ISAR0_AES_BASE) { OPENSSL_armcap_P |= ARMV8_AES; } if (ID_AA64ISAR0_AES(cpu_id) >= ID_AA64ISAR0_AES_PMULL) { OPENSSL_armcap_P |= ARMV8_PMULL; } if (ID_AA64ISAR0_SHA1(cpu_id) >= ID_AA64ISAR0_SHA1_BASE) { OPENSSL_armcap_P |= ARMV8_SHA1; } if (ID_AA64ISAR0_SHA2(cpu_id) >= ID_AA64ISAR0_SHA2_BASE) { OPENSSL_armcap_P |= ARMV8_SHA256; } if (ID_AA64ISAR0_SHA2(cpu_id) >= ID_AA64ISAR0_SHA2_512) { OPENSSL_armcap_P |= ARMV8_SHA512; } } #endif // OPENSSL_AARCH64 && OPENSSL_OPENBSD && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_sysreg.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" // While Arm system registers are normally not available to userspace, FreeBSD // expects userspace to simply read them. It traps the reads and fills in CPU // capabilities. #if defined(OPENSSL_AARCH64) && !defined(OPENSSL_STATIC_ARMCAP) && \ (defined(ANDROID_BAREMETAL) || defined(OPENSSL_FREEBSD)) && \ !defined(OPENSSL_NO_ASM) #include #define ID_AA64PFR0_EL1_ADVSIMD 5 #define ID_AA64ISAR0_EL1_AES 1 #define ID_AA64ISAR0_EL1_SHA1 2 #define ID_AA64ISAR0_EL1_SHA2 3 #define NBITS_ID_FIELD 4 #define READ_SYSREG(name) \ ({ \ uint64_t _r; \ __asm__("mrs %0, " name : "=r"(_r)); \ _r; \ }) static unsigned get_id_field(uint64_t reg, unsigned field) { return (reg >> (field * NBITS_ID_FIELD)) & ((1 << NBITS_ID_FIELD) - 1); } static int get_signed_id_field(uint64_t reg, unsigned field) { unsigned value = get_id_field(reg, field); if (value & (1 << (NBITS_ID_FIELD - 1))) { return (int)(value | (UINT64_MAX << NBITS_ID_FIELD)); } else { return (int)value; } } static uint32_t read_armcap(void) { uint32_t armcap = ARMV7_NEON; uint64_t id_aa64pfr0_el1 = READ_SYSREG("id_aa64pfr0_el1"); if (get_signed_id_field(id_aa64pfr0_el1, ID_AA64PFR0_EL1_ADVSIMD) < 0) { // If AdvSIMD ("NEON") is missing, don't report other features either. // This matches OpenSSL. return 0; } uint64_t id_aa64isar0_el1 = READ_SYSREG("id_aa64isar0_el1"); unsigned aes = get_id_field(id_aa64isar0_el1, ID_AA64ISAR0_EL1_AES); if (aes > 0) { armcap |= ARMV8_AES; } if (aes > 1) { armcap |= ARMV8_PMULL; } unsigned sha1 = get_id_field(id_aa64isar0_el1, ID_AA64ISAR0_EL1_SHA1); if (sha1 > 0) { armcap |= ARMV8_SHA1; } unsigned sha2 = get_id_field(id_aa64isar0_el1, ID_AA64ISAR0_EL1_SHA2); if (sha2 > 0) { armcap |= ARMV8_SHA256; } if (sha2 > 1) { armcap |= ARMV8_SHA512; } return armcap; } void OPENSSL_cpuid_setup(void) { OPENSSL_armcap_P |= read_armcap(); } #endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP && // (ANDROID_BAREMETAL || OPENSSL_FREEBSD) ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_aarch64_win.cc ================================================ /* Copyright 2018 The BoringSSL Authors * Copyright (c) 2020, Arm Ltd. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if defined(OPENSSL_AARCH64) && defined(OPENSSL_WINDOWS) && \ !defined(OPENSSL_STATIC_ARMCAP) && !defined(OPENSSL_NO_ASM) #include #include void OPENSSL_cpuid_setup(void) { // We do not need to check for the presence of NEON, as Armv8-A always has it OPENSSL_armcap_P |= ARMV7_NEON; if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) { // These are all covered by one call in Windows OPENSSL_armcap_P |= ARMV8_AES; OPENSSL_armcap_P |= ARMV8_PMULL; OPENSSL_armcap_P |= ARMV8_SHA1; OPENSSL_armcap_P |= ARMV8_SHA256; } // As of writing, Windows does not have a |PF_*| value for ARMv8.2 SHA-512 // extensions. When it does, add it here. } #endif // OPENSSL_AARCH64 && OPENSSL_WINDOWS && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_arm_freebsd.cc ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && \ defined(OPENSSL_FREEBSD) && !defined(OPENSSL_STATIC_ARMCAP) #include #include #include #include void OPENSSL_cpuid_setup(void) { unsigned long hwcap = 0, hwcap2 = 0; // |elf_aux_info| may fail, in which case |hwcap| and |hwcap2| will be // left at zero. The rest of this function will then gracefully report // the features are absent. elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); elf_aux_info(AT_HWCAP2, &hwcap2, sizeof(hwcap2)); // Matching OpenSSL, only report other features if NEON is present. if (hwcap & HWCAP_NEON) { OPENSSL_armcap_P |= ARMV7_NEON; if (hwcap2 & HWCAP2_AES) { OPENSSL_armcap_P |= ARMV8_AES; } if (hwcap2 & HWCAP2_PMULL) { OPENSSL_armcap_P |= ARMV8_PMULL; } if (hwcap2 & HWCAP2_SHA1) { OPENSSL_armcap_P |= ARMV8_SHA1; } if (hwcap2 & HWCAP2_SHA2) { OPENSSL_armcap_P |= ARMV8_SHA256; } } } #endif // OPENSSL_ARM && OPENSSL_OPENBSD && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_arm_linux.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && \ defined(OPENSSL_LINUX) && !defined(OPENSSL_STATIC_ARMCAP) #include #include #include #include #include #include #include #include "cpu_arm_linux.h" static int open_eintr(const char *path, int flags) { int ret; do { ret = open(path, flags); } while (ret < 0 && errno == EINTR); return ret; } static ssize_t read_eintr(int fd, void *out, size_t len) { ssize_t ret; do { ret = read(fd, out, len); } while (ret < 0 && errno == EINTR); return ret; } // read_file opens |path| and reads until end-of-file. On success, it returns // one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the // contents. Otherwise, it returns zero. static int read_file(char **out_ptr, size_t *out_len, const char *path) { int fd = open_eintr(path, O_RDONLY); if (fd < 0) { return 0; } static const size_t kReadSize = 1024; int ret = 0; size_t cap = kReadSize, len = 0; char *buf = reinterpret_cast(OPENSSL_malloc(cap)); if (buf == NULL) { goto err; } for (;;) { if (cap - len < kReadSize) { size_t new_cap = cap * 2; if (new_cap < cap) { goto err; } char *new_buf = reinterpret_cast(OPENSSL_realloc(buf, new_cap)); if (new_buf == NULL) { goto err; } buf = new_buf; cap = new_cap; } ssize_t bytes_read = read_eintr(fd, buf + len, kReadSize); if (bytes_read < 0) { goto err; } if (bytes_read == 0) { break; } len += bytes_read; } *out_ptr = buf; *out_len = len; ret = 1; buf = NULL; err: OPENSSL_free(buf); close(fd); return ret; } static int g_needs_hwcap2_workaround; void OPENSSL_cpuid_setup(void) { // We ignore the return value of |read_file| and proceed with an empty // /proc/cpuinfo on error. If |getauxval| works, we will still detect // capabilities. char *cpuinfo_data = NULL; size_t cpuinfo_len = 0; read_file(&cpuinfo_data, &cpuinfo_len, "/proc/cpuinfo"); STRING_PIECE cpuinfo; cpuinfo.data = cpuinfo_data; cpuinfo.len = cpuinfo_len; // Matching OpenSSL, only report other features if NEON is present. unsigned long hwcap = getauxval(AT_HWCAP); if (hwcap & HWCAP_NEON) { OPENSSL_armcap_P |= ARMV7_NEON; // Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to // /proc/cpuinfo. See https://crbug.com/boringssl/46. As of February 2021, // this is now rare (see Chrome's Net.NeedsHWCAP2Workaround metric), but AES // and PMULL extensions are very useful, so we still carry the workaround // for now. unsigned long hwcap2 = getauxval(AT_HWCAP2); if (hwcap2 == 0) { hwcap2 = crypto_get_arm_hwcap2_from_cpuinfo(&cpuinfo); g_needs_hwcap2_workaround = hwcap2 != 0; } if (hwcap2 & HWCAP2_AES) { OPENSSL_armcap_P |= ARMV8_AES; } if (hwcap2 & HWCAP2_PMULL) { OPENSSL_armcap_P |= ARMV8_PMULL; } if (hwcap2 & HWCAP2_SHA1) { OPENSSL_armcap_P |= ARMV8_SHA1; } if (hwcap2 & HWCAP2_SHA2) { OPENSSL_armcap_P |= ARMV8_SHA256; } } OPENSSL_free(cpuinfo_data); } int CRYPTO_has_broken_NEON(void) { return 0; } int CRYPTO_needs_hwcap2_workaround(void) { OPENSSL_init_cpuid(); return g_needs_hwcap2_workaround; } #endif // OPENSSL_ARM && OPENSSL_LINUX && !OPENSSL_STATIC_ARMCAP ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_arm_linux.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_CPU_ARM_LINUX_H #define OPENSSL_HEADER_CRYPTO_CPU_ARM_LINUX_H #include #include #include "internal.h" #if defined(__cplusplus) extern "C" { #endif // The cpuinfo parser lives in a header file so it may be accessible from // cross-platform fuzzers without adding code to those platforms normally. #define HWCAP_NEON (1 << 12) // See /usr/include/asm/hwcap.h on an ARM installation for the source of // these values. #define HWCAP2_AES (1 << 0) #define HWCAP2_PMULL (1 << 1) #define HWCAP2_SHA1 (1 << 2) #define HWCAP2_SHA2 (1 << 3) typedef struct { const char *data; size_t len; } STRING_PIECE; static int STRING_PIECE_equals(const STRING_PIECE *a, const char *b) { size_t b_len = strlen(b); return a->len == b_len && OPENSSL_memcmp(a->data, b, b_len) == 0; } // STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found, // sets |*out_left| and |*out_right| to |in| split before and after it. It // returns one if |sep| was found and zero otherwise. static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right, const STRING_PIECE *in, char sep) { const char *p = (const char *)OPENSSL_memchr(in->data, sep, in->len); if (p == NULL) { return 0; } // |out_left| or |out_right| may alias |in|, so make a copy. STRING_PIECE in_copy = *in; out_left->data = in_copy.data; out_left->len = p - in_copy.data; out_right->data = in_copy.data + out_left->len + 1; out_right->len = in_copy.len - out_left->len - 1; return 1; } // STRING_PIECE_get_delimited reads a |sep|-delimited entry from |s|, writing it // to |out| and updating |s| to point beyond it. It returns one on success and // zero if |s| is empty. If |s| is has no copies of |sep| and is non-empty, it // reads the entire string to |out|. static int STRING_PIECE_get_delimited(STRING_PIECE *s, STRING_PIECE *out, char sep) { if (s->len == 0) { return 0; } if (!STRING_PIECE_split(out, s, s, sep)) { // |s| had no instances of |sep|. Return the entire string. *out = *s; s->data += s->len; s->len = 0; } return 1; } // STRING_PIECE_trim removes leading and trailing whitespace from |s|. static void STRING_PIECE_trim(STRING_PIECE *s) { while (s->len != 0 && (s->data[0] == ' ' || s->data[0] == '\t')) { s->data++; s->len--; } while (s->len != 0 && (s->data[s->len - 1] == ' ' || s->data[s->len - 1] == '\t')) { s->len--; } } // extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from // |in|. If found, it sets |*out| to the value and returns one. Otherwise, it // returns zero. static int extract_cpuinfo_field(STRING_PIECE *out, const STRING_PIECE *in, const char *field) { // Process |in| one line at a time. STRING_PIECE remaining = *in, line; while (STRING_PIECE_get_delimited(&remaining, &line, '\n')) { STRING_PIECE key, value; if (!STRING_PIECE_split(&key, &value, &line, ':')) { continue; } STRING_PIECE_trim(&key); if (STRING_PIECE_equals(&key, field)) { STRING_PIECE_trim(&value); *out = value; return 1; } } return 0; } // has_list_item treats |list| as a space-separated list of items and returns // one if |item| is contained in |list| and zero otherwise. static int has_list_item(const STRING_PIECE *list, const char *item) { STRING_PIECE remaining = *list, feature; while (STRING_PIECE_get_delimited(&remaining, &feature, ' ')) { if (STRING_PIECE_equals(&feature, item)) { return 1; } } return 0; } // crypto_get_arm_hwcap2_from_cpuinfo returns an equivalent ARM |AT_HWCAP2| // value from |cpuinfo|. static unsigned long crypto_get_arm_hwcap2_from_cpuinfo( const STRING_PIECE *cpuinfo) { STRING_PIECE features; if (!extract_cpuinfo_field(&features, cpuinfo, "Features")) { return 0; } unsigned long ret = 0; if (has_list_item(&features, "aes")) { ret |= HWCAP2_AES; } if (has_list_item(&features, "pmull")) { ret |= HWCAP2_PMULL; } if (has_list_item(&features, "sha1")) { ret |= HWCAP2_SHA1; } if (has_list_item(&features, "sha2")) { ret |= HWCAP2_SHA2; } return ret; } #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_CPU_ARM_LINUX_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/cpu_intel.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64)) #include #include #include #include #if defined(_MSC_VER) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #include "internal.h" // OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX // is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through // |*out_edx|. static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, uint32_t *out_ecx, uint32_t *out_edx, uint32_t leaf) { #if defined(_MSC_VER) int tmp[4]; __cpuid(tmp, (int)leaf); *out_eax = (uint32_t)tmp[0]; *out_ebx = (uint32_t)tmp[1]; *out_ecx = (uint32_t)tmp[2]; *out_edx = (uint32_t)tmp[3]; #elif defined(__pic__) && defined(OPENSSL_32_BIT) // Inline assembly may not clobber the PIC register. For 32-bit, this is EBX. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. __asm__ volatile( "xor %%ecx, %%ecx\n" "mov %%ebx, %%edi\n" "cpuid\n" "xchg %%edi, %%ebx\n" : "=a"(*out_eax), "=D"(*out_ebx), "=c"(*out_ecx), "=d"(*out_edx) : "a"(leaf)); #else __asm__ volatile( "xor %%ecx, %%ecx\n" "cpuid\n" : "=a"(*out_eax), "=b"(*out_ebx), "=c"(*out_ecx), "=d"(*out_edx) : "a"(leaf)); #endif } // OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR). // Currently only XCR0 is defined by Intel so |xcr| should always be zero. static uint64_t OPENSSL_xgetbv(uint32_t xcr) { #if defined(_MSC_VER) return (uint64_t)_xgetbv(xcr); #else uint32_t eax, edx; __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr)); return (((uint64_t)edx) << 32) | eax; #endif } static bool os_supports_avx512(uint64_t xcr0) { #if defined(__APPLE__) // The Darwin kernel had a bug where it could corrupt the opmask registers. // See // https://community.intel.com/t5/Software-Tuning-Performance/MacOS-Darwin-kernel-bug-clobbers-AVX-512-opmask-register-state/m-p/1327259 // Darwin also does not initially set the XCR0 bits for AVX512, but they are // set if the thread tries to use AVX512 anyway. Thus, to safely and // consistently use AVX512 on macOS we'd need to check the kernel version as // well as detect AVX512 support using a macOS-specific method. We don't // bother with this, especially given Apple's transition to arm64. return false; #else return (xcr0 & 0xe6) == 0xe6; #endif } // handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| // and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. static void handle_cpu_env(uint32_t *out, const char *in) { const int invert_op = in[0] == '~'; const int or_op = in[0] == '|'; const int skip_first_byte = invert_op || or_op; const int hex = in[skip_first_byte] == '0' && in[skip_first_byte + 1] == 'x'; int sscanf_result; uint64_t v; if (hex) { sscanf_result = sscanf(in + invert_op + 2, "%" PRIx64, &v); } else { sscanf_result = sscanf(in + invert_op, "%" PRIu64, &v); } if (!sscanf_result) { return; } if (invert_op) { out[0] &= ~v; out[1] &= ~(v >> 32); } else if (or_op) { out[0] |= v; out[1] |= (v >> 32); } else { out[0] = v; out[1] = v >> 32; } } void OPENSSL_cpuid_setup(void) { // Determine the vendor and maximum input value. uint32_t eax, ebx, ecx, edx; OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0); uint32_t num_ids = eax; int is_intel = ebx == 0x756e6547 /* Genu */ && // edx == 0x49656e69 /* ineI */ && // ecx == 0x6c65746e /* ntel */; int is_amd = ebx == 0x68747541 /* Auth */ && // edx == 0x69746e65 /* enti */ && // ecx == 0x444d4163 /* cAMD */; uint32_t extended_features[2] = {0}; if (num_ids >= 7) { OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 7); extended_features[0] = ebx; extended_features[1] = ecx; } OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 1); const uint32_t base_family = (eax >> 8) & 15; const uint32_t base_model = (eax >> 4) & 15; uint32_t family = base_family; uint32_t model = base_model; if (base_family == 15) { const uint32_t ext_family = (eax >> 20) & 255; family += ext_family; } if (base_family == 6 || base_family == 15) { const uint32_t ext_model = (eax >> 16) & 15; model |= ext_model << 4; } if (is_amd) { if (family < 0x17 || (family == 0x17 && 0x70 <= model && model <= 0x7f)) { // Disable RDRAND on AMD families before 0x17 (Zen) due to reported // failures after suspend. // https://bugzilla.redhat.com/show_bug.cgi?id=1150286 // Also disable for family 0x17, models 0x70–0x7f, due to possible RDRAND // failures there too. ecx &= ~(1u << 30); } } // Force the hyper-threading bit so that the more conservative path is always // chosen. edx |= 1u << 28; // Reserved bit #20 was historically repurposed to control the in-memory // representation of RC4 state. Always set it to zero. edx &= ~(1u << 20); // Reserved bit #30 is repurposed to signal an Intel CPU. if (is_intel) { edx |= (1u << 30); } else { edx &= ~(1u << 30); } // The SDBG bit is repurposed to denote AMD XOP support. Don't ever use AMD // XOP code paths. ecx &= ~(1u << 11); uint64_t xcr0 = 0; if (ecx & (1u << 27)) { // XCR0 may only be queried if the OSXSAVE bit is set. xcr0 = OPENSSL_xgetbv(0); } // See Intel manual, volume 1, section 14.3. if ((xcr0 & 6) != 6) { // YMM registers cannot be used. ecx &= ~(1u << 28); // AVX ecx &= ~(1u << 12); // FMA ecx &= ~(1u << 11); // AMD XOP extended_features[0] &= ~(1u << 5); // AVX2 extended_features[1] &= ~(1u << 9); // VAES extended_features[1] &= ~(1u << 10); // VPCLMULQDQ } // See Intel manual, volume 1, sections 15.2 ("Detection of AVX-512 Foundation // Instructions") through 15.4 ("Detection of Intel AVX-512 Instruction Groups // Operating at 256 and 128-bit Vector Lengths"). if (!os_supports_avx512(xcr0)) { // Without XCR0.111xx11x, no AVX512 feature can be used. This includes ZMM // registers, masking, SIMD registers 16-31 (even if accessed as YMM or // XMM), and EVEX-coded instructions (even on YMM or XMM). Even if only // XCR0.ZMM_Hi256 is missing, it isn't valid to use AVX512 features on // shorter vectors, since AVX512 ties everything to the availability of // 512-bit vectors. See the above-mentioned sections of the Intel manual, // which say that *all* these XCR0 bits must be checked even when just using // 128-bit or 256-bit vectors, and also volume 2a section 2.7.11 ("#UD // Equations for EVEX") which says that all EVEX-coded instructions raise an // undefined-instruction exception if any of these XCR0 bits is zero. // // AVX10 fixes this by reorganizing the features that used to be part of // "AVX512" and allowing them to be used independently of 512-bit support. // TODO: add AVX10 detection. extended_features[0] &= ~(1u << 16); // AVX512F extended_features[0] &= ~(1u << 17); // AVX512DQ extended_features[0] &= ~(1u << 21); // AVX512IFMA extended_features[0] &= ~(1u << 26); // AVX512PF extended_features[0] &= ~(1u << 27); // AVX512ER extended_features[0] &= ~(1u << 28); // AVX512CD extended_features[0] &= ~(1u << 30); // AVX512BW extended_features[0] &= ~(1u << 31); // AVX512VL extended_features[1] &= ~(1u << 1); // AVX512VBMI extended_features[1] &= ~(1u << 6); // AVX512VBMI2 extended_features[1] &= ~(1u << 11); // AVX512VNNI extended_features[1] &= ~(1u << 12); // AVX512BITALG extended_features[1] &= ~(1u << 14); // AVX512VPOPCNTDQ } // Repurpose the bit for the removed MPX feature to indicate when using zmm // registers should be avoided even when they are supported. (When set, AVX512 // features can still be used, but only using ymm or xmm registers.) Skylake // suffered from severe downclocking when zmm registers were used, which // affected unrelated code running on the system, making zmm registers not too // useful outside of benchmarks. The situation improved significantly by Ice // Lake, but a small amount of downclocking remained. (See // https://lore.kernel.org/linux-crypto/e8ce1146-3952-6977-1d0e-a22758e58914@intel.com/) // We take a conservative approach of not allowing zmm registers until after // Ice Lake and Tiger Lake, i.e. until Sapphire Rapids on the server side. // // AMD CPUs, which support AVX512 starting with Zen 4, have not been reported // to have any downclocking problem when zmm registers are used. if (is_intel && family == 6 && (model == 85 || // Skylake, Cascade Lake, Cooper Lake (server) model == 106 || // Ice Lake (server) model == 108 || // Ice Lake (micro server) model == 125 || // Ice Lake (client) model == 126 || // Ice Lake (mobile) model == 140 || // Tiger Lake (mobile) model == 141)) { // Tiger Lake (client) extended_features[0] |= 1u << 14; } else { extended_features[0] &= ~(1u << 14); } OPENSSL_ia32cap_P[0] = edx; OPENSSL_ia32cap_P[1] = ecx; OPENSSL_ia32cap_P[2] = extended_features[0]; OPENSSL_ia32cap_P[3] = extended_features[1]; const char *env1, *env2; env1 = getenv("OPENSSL_ia32cap"); if (env1 == NULL) { return; } // OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'. // Each value is a 64-bit, unsigned value which may start with "0x" to // indicate a hex value. Prior to the 64-bit value, a '~' or '|' may be given. // // If the '~' prefix is present: // the value is inverted and ANDed with the probed CPUID result // If the '|' prefix is present: // the value is ORed with the probed CPUID result // Otherwise: // the value is taken as the result of the CPUID // // The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2] // and [3]. handle_cpu_env(&OPENSSL_ia32cap_P[0], env1); env2 = strchr(env1, ':'); if (env2 != NULL) { handle_cpu_env(&OPENSSL_ia32cap_P[2], env2 + 1); } } #endif // !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) ================================================ FILE: Sources/CNIOBoringSSL/crypto/crypto.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "bcm_support.h" #include "fipsmodule/rand/internal.h" #include "internal.h" static_assert(sizeof(ossl_ssize_t) == sizeof(size_t), "ossl_ssize_t should be the same size as size_t"); // Our assembly does not use the GOT to reference symbols, which means // references to visible symbols will often require a TEXTREL. This is // undesirable, so all assembly-referenced symbols should be hidden. CPU // capabilities are the only such symbols defined in C. Explicitly hide them, // rather than rely on being built with -fvisibility=hidden. #if defined(OPENSSL_WINDOWS) #define HIDDEN #else #define HIDDEN __attribute__((visibility("hidden"))) #endif // The capability variables are defined in this file in order to work around a // linker bug. When linking with a .a, if no symbols in a .o are referenced // then the .o is discarded, even if it has constructor functions. // // This still means that any binaries that don't include some functionality // that tests the capability values will still skip the constructor but, so // far, the init constructor function only sets the capability variables. #if defined(BORINGSSL_DISPATCH_TEST) // This value must be explicitly initialised to zero in order to work around a // bug in libtool or the linker on OS X. // // If not initialised then it becomes a "common symbol". When put into an // archive, linking on OS X will fail to resolve common symbols. By // initialising it to zero, it becomes a "data symbol", which isn't so // affected. HIDDEN uint8_t BORINGSSL_function_hit[9] = {0}; #endif #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) // This value must be explicitly initialized to zero. See similar comment above. HIDDEN uint32_t OPENSSL_ia32cap_P[4] = {0}; uint32_t OPENSSL_get_ia32cap(int idx) { OPENSSL_init_cpuid(); return OPENSSL_ia32cap_P[idx]; } #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #include #if defined(OPENSSL_STATIC_ARMCAP) // See ARM ACLE for the definitions of these macros. Note |__ARM_FEATURE_AES| // covers both AES and PMULL and |__ARM_FEATURE_SHA2| covers SHA-1 and SHA-256. // https://developer.arm.com/architectures/system-architectures/software-standards/acle // https://github.com/ARM-software/acle/issues/152 // // TODO(davidben): Do we still need |OPENSSL_STATIC_ARMCAP_*| or are the // standard flags and -march sufficient? HIDDEN uint32_t OPENSSL_armcap_P = #if defined(OPENSSL_STATIC_ARMCAP_NEON) || defined(__ARM_NEON) ARMV7_NEON | #endif #if defined(OPENSSL_STATIC_ARMCAP_AES) || defined(__ARM_FEATURE_AES) ARMV8_AES | #endif #if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_AES) ARMV8_PMULL | #endif #if defined(OPENSSL_STATIC_ARMCAP_SHA1) || defined(__ARM_FEATURE_SHA2) ARMV8_SHA1 | #endif #if defined(OPENSSL_STATIC_ARMCAP_SHA256) || defined(__ARM_FEATURE_SHA2) ARMV8_SHA256 | #endif #if defined(__ARM_FEATURE_SHA512) ARMV8_SHA512 | #endif 0; #else HIDDEN uint32_t OPENSSL_armcap_P = 0; uint32_t *OPENSSL_get_armcap_pointer_for_test(void) { OPENSSL_init_cpuid(); return &OPENSSL_armcap_P; } #endif uint32_t OPENSSL_get_armcap(void) { OPENSSL_init_cpuid(); return OPENSSL_armcap_P; } #endif #if defined(NEED_CPUID) static CRYPTO_once_t once = CRYPTO_ONCE_INIT; void OPENSSL_init_cpuid(void) { CRYPTO_once(&once, OPENSSL_cpuid_setup); } #endif void CRYPTO_library_init(void) {} int CRYPTO_is_confidential_build(void) { #if defined(BORINGSSL_CONFIDENTIAL) return 1; #else return 0; #endif } void CRYPTO_pre_sandbox_init(void) { // Read from /proc/cpuinfo if needed. OPENSSL_init_cpuid(); // Open /dev/urandom if needed. CRYPTO_init_sysrand(); // Set up MADV_WIPEONFORK state if needed. CRYPTO_get_fork_generation(); } const char *SSLeay_version(int which) { return OpenSSL_version(which); } const char *OpenSSL_version(int which) { switch (which) { case OPENSSL_VERSION: return "BoringSSL"; case OPENSSL_CFLAGS: return "compiler: n/a"; case OPENSSL_BUILT_ON: return "built on: n/a"; case OPENSSL_PLATFORM: return "platform: n/a"; case OPENSSL_DIR: return "OPENSSLDIR: n/a"; default: return "not available"; } } unsigned long SSLeay(void) { return OPENSSL_VERSION_NUMBER; } unsigned long OpenSSL_version_num(void) { return OPENSSL_VERSION_NUMBER; } int CRYPTO_malloc_init(void) { return 1; } int OPENSSL_malloc_init(void) { return 1; } void ENGINE_load_builtin_engines(void) {} int ENGINE_register_all_complete(void) { return 1; } void OPENSSL_load_builtin_modules(void) {} int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) { return 1; } void OPENSSL_cleanup(void) {} FILE *CRYPTO_get_stderr(void) { return stderr; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/asm/x25519-asm-arm.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #if defined(__arm__) && defined(__linux__) /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file is taken from crypto_scalarmult/curve25519/neon2/scalarmult.s in * SUPERCOP 20141124 (http://bench.cr.yp.to/supercop.html). That code is public * domain licensed but the standard ISC license is included above to keep * licensing simple. */ #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) .fpu neon .text .align 4 .global x25519_NEON .hidden x25519_NEON .type x25519_NEON, %function x25519_NEON: vpush {q4,q5,q6,q7} mov r12,sp sub sp,sp,#736 and sp,sp,#0xffffffe0 strd r4,[sp,#0] strd r6,[sp,#8] strd r8,[sp,#16] strd r10,[sp,#24] str r12,[sp,#480] str r14,[sp,#484] mov r0,r0 mov r1,r1 mov r2,r2 add r3,sp,#32 ldr r4,=0 ldr r5,=254 vmov.i32 q0,#1 vshr.u64 q1,q0,#7 vshr.u64 q0,q0,#8 vmov.i32 d4,#19 vmov.i32 d5,#38 add r6,sp,#512 vst1.8 {d2-d3},[r6,: 128] add r6,sp,#528 vst1.8 {d0-d1},[r6,: 128] add r6,sp,#544 vst1.8 {d4-d5},[r6,: 128] add r6,r3,#0 vmov.i32 q2,#0 vst1.8 {d4-d5},[r6,: 128]! vst1.8 {d4-d5},[r6,: 128]! vst1.8 d4,[r6,: 64] add r6,r3,#0 ldr r7,=960 sub r7,r7,#2 neg r7,r7 sub r7,r7,r7,LSL #7 str r7,[r6] add r6,sp,#704 vld1.8 {d4-d5},[r1]! vld1.8 {d6-d7},[r1] vst1.8 {d4-d5},[r6,: 128]! vst1.8 {d6-d7},[r6,: 128] sub r1,r6,#16 ldrb r6,[r1] and r6,r6,#248 strb r6,[r1] ldrb r6,[r1,#31] and r6,r6,#127 orr r6,r6,#64 strb r6,[r1,#31] vmov.i64 q2,#0xffffffff vshr.u64 q3,q2,#7 vshr.u64 q2,q2,#6 vld1.8 {d8},[r2] vld1.8 {d10},[r2] add r2,r2,#6 vld1.8 {d12},[r2] vld1.8 {d14},[r2] add r2,r2,#6 vld1.8 {d16},[r2] add r2,r2,#4 vld1.8 {d18},[r2] vld1.8 {d20},[r2] add r2,r2,#6 vld1.8 {d22},[r2] add r2,r2,#2 vld1.8 {d24},[r2] vld1.8 {d26},[r2] vshr.u64 q5,q5,#26 vshr.u64 q6,q6,#3 vshr.u64 q7,q7,#29 vshr.u64 q8,q8,#6 vshr.u64 q10,q10,#25 vshr.u64 q11,q11,#3 vshr.u64 q12,q12,#12 vshr.u64 q13,q13,#38 vand q4,q4,q2 vand q6,q6,q2 vand q8,q8,q2 vand q10,q10,q2 vand q2,q12,q2 vand q5,q5,q3 vand q7,q7,q3 vand q9,q9,q3 vand q11,q11,q3 vand q3,q13,q3 add r2,r3,#48 vadd.i64 q12,q4,q1 vadd.i64 q13,q10,q1 vshr.s64 q12,q12,#26 vshr.s64 q13,q13,#26 vadd.i64 q5,q5,q12 vshl.i64 q12,q12,#26 vadd.i64 q14,q5,q0 vadd.i64 q11,q11,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q11,q0 vsub.i64 q4,q4,q12 vshr.s64 q12,q14,#25 vsub.i64 q10,q10,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q12 vshl.i64 q12,q12,#25 vadd.i64 q14,q6,q1 vadd.i64 q2,q2,q13 vsub.i64 q5,q5,q12 vshr.s64 q12,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q1 vadd.i64 q7,q7,q12 vshl.i64 q12,q12,#26 vadd.i64 q15,q7,q0 vsub.i64 q11,q11,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q12 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q3,q0 vadd.i64 q8,q8,q12 vshl.i64 q12,q12,#25 vadd.i64 q15,q8,q1 add r2,r2,#8 vsub.i64 q2,q2,q13 vshr.s64 q13,q14,#25 vsub.i64 q7,q7,q12 vshr.s64 q12,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q9,q9,q12 vtrn.32 d12,d14 vshl.i64 q12,q12,#26 vtrn.32 d13,d15 vadd.i64 q0,q9,q0 vadd.i64 q4,q4,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q6,q13,#4 vsub.i64 q7,q8,q12 vshr.s64 q0,q0,#25 vadd.i64 q4,q4,q6 vadd.i64 q6,q10,q0 vshl.i64 q0,q0,#25 vadd.i64 q8,q6,q1 vadd.i64 q4,q4,q13 vshl.i64 q10,q13,#25 vadd.i64 q1,q4,q1 vsub.i64 q0,q9,q0 vshr.s64 q8,q8,#26 vsub.i64 q3,q3,q10 vtrn.32 d14,d0 vshr.s64 q1,q1,#26 vtrn.32 d15,d1 vadd.i64 q0,q11,q8 vst1.8 d14,[r2,: 64] vshl.i64 q7,q8,#26 vadd.i64 q5,q5,q1 vtrn.32 d4,d6 vshl.i64 q1,q1,#26 vtrn.32 d5,d7 vsub.i64 q3,q6,q7 add r2,r2,#16 vsub.i64 q1,q4,q1 vst1.8 d4,[r2,: 64] vtrn.32 d6,d0 vtrn.32 d7,d1 sub r2,r2,#8 vtrn.32 d2,d10 vtrn.32 d3,d11 vst1.8 d6,[r2,: 64] sub r2,r2,#24 vst1.8 d2,[r2,: 64] add r2,r3,#96 vmov.i32 q0,#0 vmov.i64 d2,#0xff vmov.i64 d3,#0 vshr.u32 q1,q1,#7 vst1.8 {d2-d3},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#144 vmov.i32 q0,#0 vst1.8 {d0-d1},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#240 vmov.i32 q0,#0 vmov.i64 d2,#0xff vmov.i64 d3,#0 vshr.u32 q1,q1,#7 vst1.8 {d2-d3},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#48 add r6,r3,#192 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r6,: 128]! vst1.8 {d2-d3},[r6,: 128]! vst1.8 d4,[r6,: 64] ._mainloop: mov r2,r5,LSR #3 and r6,r5,#7 ldrb r2,[r1,r2] mov r2,r2,LSR r6 and r2,r2,#1 str r5,[sp,#488] eor r4,r4,r2 str r2,[sp,#492] neg r2,r4 add r4,r3,#96 add r5,r3,#192 add r6,r3,#144 vld1.8 {d8-d9},[r4,: 128]! add r7,r3,#240 vld1.8 {d10-d11},[r5,: 128]! veor q6,q4,q5 vld1.8 {d14-d15},[r6,: 128]! vdup.i32 q8,r2 vld1.8 {d18-d19},[r7,: 128]! veor q10,q7,q9 vld1.8 {d22-d23},[r4,: 128]! vand q6,q6,q8 vld1.8 {d24-d25},[r5,: 128]! vand q10,q10,q8 vld1.8 {d26-d27},[r6,: 128]! veor q4,q4,q6 vld1.8 {d28-d29},[r7,: 128]! veor q5,q5,q6 vld1.8 {d0},[r4,: 64] veor q6,q7,q10 vld1.8 {d2},[r5,: 64] veor q7,q9,q10 vld1.8 {d4},[r6,: 64] veor q9,q11,q12 vld1.8 {d6},[r7,: 64] veor q10,q0,q1 sub r2,r4,#32 vand q9,q9,q8 sub r4,r5,#32 vand q10,q10,q8 sub r5,r6,#32 veor q11,q11,q9 sub r6,r7,#32 veor q0,q0,q10 veor q9,q12,q9 veor q1,q1,q10 veor q10,q13,q14 veor q12,q2,q3 vand q10,q10,q8 vand q8,q12,q8 veor q12,q13,q10 veor q2,q2,q8 veor q10,q14,q10 veor q3,q3,q8 vadd.i32 q8,q4,q6 vsub.i32 q4,q4,q6 vst1.8 {d16-d17},[r2,: 128]! vadd.i32 q6,q11,q12 vst1.8 {d8-d9},[r5,: 128]! vsub.i32 q4,q11,q12 vst1.8 {d12-d13},[r2,: 128]! vadd.i32 q6,q0,q2 vst1.8 {d8-d9},[r5,: 128]! vsub.i32 q0,q0,q2 vst1.8 d12,[r2,: 64] vadd.i32 q2,q5,q7 vst1.8 d0,[r5,: 64] vsub.i32 q0,q5,q7 vst1.8 {d4-d5},[r4,: 128]! vadd.i32 q2,q9,q10 vst1.8 {d0-d1},[r6,: 128]! vsub.i32 q0,q9,q10 vst1.8 {d4-d5},[r4,: 128]! vadd.i32 q2,q1,q3 vst1.8 {d0-d1},[r6,: 128]! vsub.i32 q0,q1,q3 vst1.8 d4,[r4,: 64] vst1.8 d0,[r6,: 64] add r2,sp,#544 add r4,r3,#96 add r5,r3,#144 vld1.8 {d0-d1},[r2,: 128] vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4-d5},[r5,: 128]! vzip.i32 q1,q2 vld1.8 {d6-d7},[r4,: 128]! vld1.8 {d8-d9},[r5,: 128]! vshl.i32 q5,q1,#1 vzip.i32 q3,q4 vshl.i32 q6,q2,#1 vld1.8 {d14},[r4,: 64] vshl.i32 q8,q3,#1 vld1.8 {d15},[r5,: 64] vshl.i32 q9,q4,#1 vmul.i32 d21,d7,d1 vtrn.32 d14,d15 vmul.i32 q11,q4,q0 vmul.i32 q0,q7,q0 vmull.s32 q12,d2,d2 vmlal.s32 q12,d11,d1 vmlal.s32 q12,d12,d0 vmlal.s32 q12,d13,d23 vmlal.s32 q12,d16,d22 vmlal.s32 q12,d7,d21 vmull.s32 q10,d2,d11 vmlal.s32 q10,d4,d1 vmlal.s32 q10,d13,d0 vmlal.s32 q10,d6,d23 vmlal.s32 q10,d17,d22 vmull.s32 q13,d10,d4 vmlal.s32 q13,d11,d3 vmlal.s32 q13,d13,d1 vmlal.s32 q13,d16,d0 vmlal.s32 q13,d17,d23 vmlal.s32 q13,d8,d22 vmull.s32 q1,d10,d5 vmlal.s32 q1,d11,d4 vmlal.s32 q1,d6,d1 vmlal.s32 q1,d17,d0 vmlal.s32 q1,d8,d23 vmull.s32 q14,d10,d6 vmlal.s32 q14,d11,d13 vmlal.s32 q14,d4,d4 vmlal.s32 q14,d17,d1 vmlal.s32 q14,d18,d0 vmlal.s32 q14,d9,d23 vmull.s32 q11,d10,d7 vmlal.s32 q11,d11,d6 vmlal.s32 q11,d12,d5 vmlal.s32 q11,d8,d1 vmlal.s32 q11,d19,d0 vmull.s32 q15,d10,d8 vmlal.s32 q15,d11,d17 vmlal.s32 q15,d12,d6 vmlal.s32 q15,d13,d5 vmlal.s32 q15,d19,d1 vmlal.s32 q15,d14,d0 vmull.s32 q2,d10,d9 vmlal.s32 q2,d11,d8 vmlal.s32 q2,d12,d7 vmlal.s32 q2,d13,d6 vmlal.s32 q2,d14,d1 vmull.s32 q0,d15,d1 vmlal.s32 q0,d10,d14 vmlal.s32 q0,d11,d19 vmlal.s32 q0,d12,d8 vmlal.s32 q0,d13,d17 vmlal.s32 q0,d6,d6 add r2,sp,#512 vld1.8 {d18-d19},[r2,: 128] vmull.s32 q3,d16,d7 vmlal.s32 q3,d10,d15 vmlal.s32 q3,d11,d14 vmlal.s32 q3,d12,d9 vmlal.s32 q3,d13,d8 add r2,sp,#528 vld1.8 {d8-d9},[r2,: 128] vadd.i64 q5,q12,q9 vadd.i64 q6,q15,q9 vshr.s64 q5,q5,#26 vshr.s64 q6,q6,#26 vadd.i64 q7,q10,q5 vshl.i64 q5,q5,#26 vadd.i64 q8,q7,q4 vadd.i64 q2,q2,q6 vshl.i64 q6,q6,#26 vadd.i64 q10,q2,q4 vsub.i64 q5,q12,q5 vshr.s64 q8,q8,#25 vsub.i64 q6,q15,q6 vshr.s64 q10,q10,#25 vadd.i64 q12,q13,q8 vshl.i64 q8,q8,#25 vadd.i64 q13,q12,q9 vadd.i64 q0,q0,q10 vsub.i64 q7,q7,q8 vshr.s64 q8,q13,#26 vshl.i64 q10,q10,#25 vadd.i64 q13,q0,q9 vadd.i64 q1,q1,q8 vshl.i64 q8,q8,#26 vadd.i64 q15,q1,q4 vsub.i64 q2,q2,q10 vshr.s64 q10,q13,#26 vsub.i64 q8,q12,q8 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q10 vshl.i64 q10,q10,#26 vadd.i64 q13,q3,q4 vadd.i64 q14,q14,q12 add r2,r3,#288 vshl.i64 q12,q12,#25 add r4,r3,#336 vadd.i64 q15,q14,q9 add r2,r2,#8 vsub.i64 q0,q0,q10 add r4,r4,#8 vshr.s64 q10,q13,#25 vsub.i64 q1,q1,q12 vshr.s64 q12,q15,#26 vadd.i64 q13,q10,q10 vadd.i64 q11,q11,q12 vtrn.32 d16,d2 vshl.i64 q12,q12,#26 vtrn.32 d17,d3 vadd.i64 q1,q11,q4 vadd.i64 q4,q5,q13 vst1.8 d16,[r2,: 64]! vshl.i64 q5,q10,#4 vst1.8 d17,[r4,: 64]! vsub.i64 q8,q14,q12 vshr.s64 q1,q1,#25 vadd.i64 q4,q4,q5 vadd.i64 q5,q6,q1 vshl.i64 q1,q1,#25 vadd.i64 q6,q5,q9 vadd.i64 q4,q4,q10 vshl.i64 q10,q10,#25 vadd.i64 q9,q4,q9 vsub.i64 q1,q11,q1 vshr.s64 q6,q6,#26 vsub.i64 q3,q3,q10 vtrn.32 d16,d2 vshr.s64 q9,q9,#26 vtrn.32 d17,d3 vadd.i64 q1,q2,q6 vst1.8 d16,[r2,: 64] vshl.i64 q2,q6,#26 vst1.8 d17,[r4,: 64] vadd.i64 q6,q7,q9 vtrn.32 d0,d6 vshl.i64 q7,q9,#26 vtrn.32 d1,d7 vsub.i64 q2,q5,q2 add r2,r2,#16 vsub.i64 q3,q4,q7 vst1.8 d0,[r2,: 64] add r4,r4,#16 vst1.8 d1,[r4,: 64] vtrn.32 d4,d2 vtrn.32 d5,d3 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d6,d12 vtrn.32 d7,d13 vst1.8 d4,[r2,: 64] vst1.8 d5,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d6,[r2,: 64] vst1.8 d7,[r4,: 64] add r2,r3,#240 add r4,r3,#96 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#144 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#192 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#144 vshl.i64 q7,q7,#25 add r4,r3,#96 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] add r2,r3,#288 add r4,r3,#336 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vsub.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4-d5},[r4,: 128]! vsub.i32 q1,q1,q2 add r5,r3,#240 vld1.8 {d4},[r2,: 64] vld1.8 {d6},[r4,: 64] vsub.i32 q2,q2,q3 vst1.8 {d0-d1},[r5,: 128]! vst1.8 {d2-d3},[r5,: 128]! vst1.8 d4,[r5,: 64] add r2,r3,#144 add r4,r3,#96 add r5,r3,#144 add r6,r3,#192 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vsub.i32 q2,q0,q1 vadd.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d6-d7},[r4,: 128]! vsub.i32 q4,q1,q3 vadd.i32 q1,q1,q3 vld1.8 {d6},[r2,: 64] vld1.8 {d10},[r4,: 64] vsub.i32 q6,q3,q5 vadd.i32 q3,q3,q5 vst1.8 {d4-d5},[r5,: 128]! vst1.8 {d0-d1},[r6,: 128]! vst1.8 {d8-d9},[r5,: 128]! vst1.8 {d2-d3},[r6,: 128]! vst1.8 d12,[r5,: 64] vst1.8 d6,[r6,: 64] add r2,r3,#0 add r4,r3,#240 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#336 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#288 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#288 vshl.i64 q7,q7,#25 add r4,r3,#96 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] add r2,sp,#544 add r4,r3,#144 add r5,r3,#192 vld1.8 {d0-d1},[r2,: 128] vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4-d5},[r5,: 128]! vzip.i32 q1,q2 vld1.8 {d6-d7},[r4,: 128]! vld1.8 {d8-d9},[r5,: 128]! vshl.i32 q5,q1,#1 vzip.i32 q3,q4 vshl.i32 q6,q2,#1 vld1.8 {d14},[r4,: 64] vshl.i32 q8,q3,#1 vld1.8 {d15},[r5,: 64] vshl.i32 q9,q4,#1 vmul.i32 d21,d7,d1 vtrn.32 d14,d15 vmul.i32 q11,q4,q0 vmul.i32 q0,q7,q0 vmull.s32 q12,d2,d2 vmlal.s32 q12,d11,d1 vmlal.s32 q12,d12,d0 vmlal.s32 q12,d13,d23 vmlal.s32 q12,d16,d22 vmlal.s32 q12,d7,d21 vmull.s32 q10,d2,d11 vmlal.s32 q10,d4,d1 vmlal.s32 q10,d13,d0 vmlal.s32 q10,d6,d23 vmlal.s32 q10,d17,d22 vmull.s32 q13,d10,d4 vmlal.s32 q13,d11,d3 vmlal.s32 q13,d13,d1 vmlal.s32 q13,d16,d0 vmlal.s32 q13,d17,d23 vmlal.s32 q13,d8,d22 vmull.s32 q1,d10,d5 vmlal.s32 q1,d11,d4 vmlal.s32 q1,d6,d1 vmlal.s32 q1,d17,d0 vmlal.s32 q1,d8,d23 vmull.s32 q14,d10,d6 vmlal.s32 q14,d11,d13 vmlal.s32 q14,d4,d4 vmlal.s32 q14,d17,d1 vmlal.s32 q14,d18,d0 vmlal.s32 q14,d9,d23 vmull.s32 q11,d10,d7 vmlal.s32 q11,d11,d6 vmlal.s32 q11,d12,d5 vmlal.s32 q11,d8,d1 vmlal.s32 q11,d19,d0 vmull.s32 q15,d10,d8 vmlal.s32 q15,d11,d17 vmlal.s32 q15,d12,d6 vmlal.s32 q15,d13,d5 vmlal.s32 q15,d19,d1 vmlal.s32 q15,d14,d0 vmull.s32 q2,d10,d9 vmlal.s32 q2,d11,d8 vmlal.s32 q2,d12,d7 vmlal.s32 q2,d13,d6 vmlal.s32 q2,d14,d1 vmull.s32 q0,d15,d1 vmlal.s32 q0,d10,d14 vmlal.s32 q0,d11,d19 vmlal.s32 q0,d12,d8 vmlal.s32 q0,d13,d17 vmlal.s32 q0,d6,d6 add r2,sp,#512 vld1.8 {d18-d19},[r2,: 128] vmull.s32 q3,d16,d7 vmlal.s32 q3,d10,d15 vmlal.s32 q3,d11,d14 vmlal.s32 q3,d12,d9 vmlal.s32 q3,d13,d8 add r2,sp,#528 vld1.8 {d8-d9},[r2,: 128] vadd.i64 q5,q12,q9 vadd.i64 q6,q15,q9 vshr.s64 q5,q5,#26 vshr.s64 q6,q6,#26 vadd.i64 q7,q10,q5 vshl.i64 q5,q5,#26 vadd.i64 q8,q7,q4 vadd.i64 q2,q2,q6 vshl.i64 q6,q6,#26 vadd.i64 q10,q2,q4 vsub.i64 q5,q12,q5 vshr.s64 q8,q8,#25 vsub.i64 q6,q15,q6 vshr.s64 q10,q10,#25 vadd.i64 q12,q13,q8 vshl.i64 q8,q8,#25 vadd.i64 q13,q12,q9 vadd.i64 q0,q0,q10 vsub.i64 q7,q7,q8 vshr.s64 q8,q13,#26 vshl.i64 q10,q10,#25 vadd.i64 q13,q0,q9 vadd.i64 q1,q1,q8 vshl.i64 q8,q8,#26 vadd.i64 q15,q1,q4 vsub.i64 q2,q2,q10 vshr.s64 q10,q13,#26 vsub.i64 q8,q12,q8 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q10 vshl.i64 q10,q10,#26 vadd.i64 q13,q3,q4 vadd.i64 q14,q14,q12 add r2,r3,#144 vshl.i64 q12,q12,#25 add r4,r3,#192 vadd.i64 q15,q14,q9 add r2,r2,#8 vsub.i64 q0,q0,q10 add r4,r4,#8 vshr.s64 q10,q13,#25 vsub.i64 q1,q1,q12 vshr.s64 q12,q15,#26 vadd.i64 q13,q10,q10 vadd.i64 q11,q11,q12 vtrn.32 d16,d2 vshl.i64 q12,q12,#26 vtrn.32 d17,d3 vadd.i64 q1,q11,q4 vadd.i64 q4,q5,q13 vst1.8 d16,[r2,: 64]! vshl.i64 q5,q10,#4 vst1.8 d17,[r4,: 64]! vsub.i64 q8,q14,q12 vshr.s64 q1,q1,#25 vadd.i64 q4,q4,q5 vadd.i64 q5,q6,q1 vshl.i64 q1,q1,#25 vadd.i64 q6,q5,q9 vadd.i64 q4,q4,q10 vshl.i64 q10,q10,#25 vadd.i64 q9,q4,q9 vsub.i64 q1,q11,q1 vshr.s64 q6,q6,#26 vsub.i64 q3,q3,q10 vtrn.32 d16,d2 vshr.s64 q9,q9,#26 vtrn.32 d17,d3 vadd.i64 q1,q2,q6 vst1.8 d16,[r2,: 64] vshl.i64 q2,q6,#26 vst1.8 d17,[r4,: 64] vadd.i64 q6,q7,q9 vtrn.32 d0,d6 vshl.i64 q7,q9,#26 vtrn.32 d1,d7 vsub.i64 q2,q5,q2 add r2,r2,#16 vsub.i64 q3,q4,q7 vst1.8 d0,[r2,: 64] add r4,r4,#16 vst1.8 d1,[r4,: 64] vtrn.32 d4,d2 vtrn.32 d5,d3 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d6,d12 vtrn.32 d7,d13 vst1.8 d4,[r2,: 64] vst1.8 d5,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d6,[r2,: 64] vst1.8 d7,[r4,: 64] add r2,r3,#336 add r4,r3,#288 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vadd.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4-d5},[r4,: 128]! vadd.i32 q1,q1,q2 add r5,r3,#288 vld1.8 {d4},[r2,: 64] vld1.8 {d6},[r4,: 64] vadd.i32 q2,q2,q3 vst1.8 {d0-d1},[r5,: 128]! vst1.8 {d2-d3},[r5,: 128]! vst1.8 d4,[r5,: 64] add r2,r3,#48 add r4,r3,#144 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#288 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#240 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#240 vshl.i64 q7,q7,#25 add r4,r3,#144 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] ldr r2,[sp,#488] ldr r4,[sp,#492] subs r5,r2,#1 bge ._mainloop add r1,r3,#144 add r2,r3,#336 vld1.8 {d0-d1},[r1,: 128]! vld1.8 {d2-d3},[r1,: 128]! vld1.8 {d4},[r1,: 64] vst1.8 {d0-d1},[r2,: 128]! vst1.8 {d2-d3},[r2,: 128]! vst1.8 d4,[r2,: 64] ldr r1,=0 ._invertloop: add r2,r3,#144 ldr r4,=0 ldr r5,=2 cmp r1,#1 ldreq r5,=1 addeq r2,r3,#336 addeq r4,r3,#48 cmp r1,#2 ldreq r5,=1 addeq r2,r3,#48 cmp r1,#3 ldreq r5,=5 addeq r4,r3,#336 cmp r1,#4 ldreq r5,=10 cmp r1,#5 ldreq r5,=20 cmp r1,#6 ldreq r5,=10 addeq r2,r3,#336 addeq r4,r3,#336 cmp r1,#7 ldreq r5,=50 cmp r1,#8 ldreq r5,=100 cmp r1,#9 ldreq r5,=50 addeq r2,r3,#336 cmp r1,#10 ldreq r5,=5 addeq r2,r3,#48 cmp r1,#11 ldreq r5,=0 addeq r2,r3,#96 add r6,r3,#144 add r7,r3,#288 vld1.8 {d0-d1},[r6,: 128]! vld1.8 {d2-d3},[r6,: 128]! vld1.8 {d4},[r6,: 64] vst1.8 {d0-d1},[r7,: 128]! vst1.8 {d2-d3},[r7,: 128]! vst1.8 d4,[r7,: 64] cmp r5,#0 beq ._skipsquaringloop ._squaringloop: add r6,r3,#288 add r7,r3,#288 add r8,r3,#288 vmov.i32 q0,#19 vmov.i32 q1,#0 vmov.i32 q2,#1 vzip.i32 q1,q2 vld1.8 {d4-d5},[r7,: 128]! vld1.8 {d6-d7},[r7,: 128]! vld1.8 {d9},[r7,: 64] vld1.8 {d10-d11},[r6,: 128]! add r7,sp,#416 vld1.8 {d12-d13},[r6,: 128]! vmul.i32 q7,q2,q0 vld1.8 {d8},[r6,: 64] vext.32 d17,d11,d10,#1 vmul.i32 q9,q3,q0 vext.32 d16,d10,d8,#1 vshl.u32 q10,q5,q1 vext.32 d22,d14,d4,#1 vext.32 d24,d18,d6,#1 vshl.u32 q13,q6,q1 vshl.u32 d28,d8,d2 vrev64.i32 d22,d22 vmul.i32 d1,d9,d1 vrev64.i32 d24,d24 vext.32 d29,d8,d13,#1 vext.32 d0,d1,d9,#1 vrev64.i32 d0,d0 vext.32 d2,d9,d1,#1 vext.32 d23,d15,d5,#1 vmull.s32 q4,d20,d4 vrev64.i32 d23,d23 vmlal.s32 q4,d21,d1 vrev64.i32 d2,d2 vmlal.s32 q4,d26,d19 vext.32 d3,d5,d15,#1 vmlal.s32 q4,d27,d18 vrev64.i32 d3,d3 vmlal.s32 q4,d28,d15 vext.32 d14,d12,d11,#1 vmull.s32 q5,d16,d23 vext.32 d15,d13,d12,#1 vmlal.s32 q5,d17,d4 vst1.8 d8,[r7,: 64]! vmlal.s32 q5,d14,d1 vext.32 d12,d9,d8,#0 vmlal.s32 q5,d15,d19 vmov.i64 d13,#0 vmlal.s32 q5,d29,d18 vext.32 d25,d19,d7,#1 vmlal.s32 q6,d20,d5 vrev64.i32 d25,d25 vmlal.s32 q6,d21,d4 vst1.8 d11,[r7,: 64]! vmlal.s32 q6,d26,d1 vext.32 d9,d10,d10,#0 vmlal.s32 q6,d27,d19 vmov.i64 d8,#0 vmlal.s32 q6,d28,d18 vmlal.s32 q4,d16,d24 vmlal.s32 q4,d17,d5 vmlal.s32 q4,d14,d4 vst1.8 d12,[r7,: 64]! vmlal.s32 q4,d15,d1 vext.32 d10,d13,d12,#0 vmlal.s32 q4,d29,d19 vmov.i64 d11,#0 vmlal.s32 q5,d20,d6 vmlal.s32 q5,d21,d5 vmlal.s32 q5,d26,d4 vext.32 d13,d8,d8,#0 vmlal.s32 q5,d27,d1 vmov.i64 d12,#0 vmlal.s32 q5,d28,d19 vst1.8 d9,[r7,: 64]! vmlal.s32 q6,d16,d25 vmlal.s32 q6,d17,d6 vst1.8 d10,[r7,: 64] vmlal.s32 q6,d14,d5 vext.32 d8,d11,d10,#0 vmlal.s32 q6,d15,d4 vmov.i64 d9,#0 vmlal.s32 q6,d29,d1 vmlal.s32 q4,d20,d7 vmlal.s32 q4,d21,d6 vmlal.s32 q4,d26,d5 vext.32 d11,d12,d12,#0 vmlal.s32 q4,d27,d4 vmov.i64 d10,#0 vmlal.s32 q4,d28,d1 vmlal.s32 q5,d16,d0 sub r6,r7,#32 vmlal.s32 q5,d17,d7 vmlal.s32 q5,d14,d6 vext.32 d30,d9,d8,#0 vmlal.s32 q5,d15,d5 vld1.8 {d31},[r6,: 64]! vmlal.s32 q5,d29,d4 vmlal.s32 q15,d20,d0 vext.32 d0,d6,d18,#1 vmlal.s32 q15,d21,d25 vrev64.i32 d0,d0 vmlal.s32 q15,d26,d24 vext.32 d1,d7,d19,#1 vext.32 d7,d10,d10,#0 vmlal.s32 q15,d27,d23 vrev64.i32 d1,d1 vld1.8 {d6},[r6,: 64] vmlal.s32 q15,d28,d22 vmlal.s32 q3,d16,d4 add r6,r6,#24 vmlal.s32 q3,d17,d2 vext.32 d4,d31,d30,#0 vmov d17,d11 vmlal.s32 q3,d14,d1 vext.32 d11,d13,d13,#0 vext.32 d13,d30,d30,#0 vmlal.s32 q3,d15,d0 vext.32 d1,d8,d8,#0 vmlal.s32 q3,d29,d3 vld1.8 {d5},[r6,: 64] sub r6,r6,#16 vext.32 d10,d6,d6,#0 vmov.i32 q1,#0xffffffff vshl.i64 q4,q1,#25 add r7,sp,#512 vld1.8 {d14-d15},[r7,: 128] vadd.i64 q9,q2,q7 vshl.i64 q1,q1,#26 vshr.s64 q10,q9,#26 vld1.8 {d0},[r6,: 64]! vadd.i64 q5,q5,q10 vand q9,q9,q1 vld1.8 {d16},[r6,: 64]! add r6,sp,#528 vld1.8 {d20-d21},[r6,: 128] vadd.i64 q11,q5,q10 vsub.i64 q2,q2,q9 vshr.s64 q9,q11,#25 vext.32 d12,d5,d4,#0 vand q11,q11,q4 vadd.i64 q0,q0,q9 vmov d19,d7 vadd.i64 q3,q0,q7 vsub.i64 q5,q5,q11 vshr.s64 q11,q3,#26 vext.32 d18,d11,d10,#0 vand q3,q3,q1 vadd.i64 q8,q8,q11 vadd.i64 q11,q8,q10 vsub.i64 q0,q0,q3 vshr.s64 q3,q11,#25 vand q11,q11,q4 vadd.i64 q3,q6,q3 vadd.i64 q6,q3,q7 vsub.i64 q8,q8,q11 vshr.s64 q11,q6,#26 vand q6,q6,q1 vadd.i64 q9,q9,q11 vadd.i64 d25,d19,d21 vsub.i64 q3,q3,q6 vshr.s64 d23,d25,#25 vand q4,q12,q4 vadd.i64 d21,d23,d23 vshl.i64 d25,d23,#4 vadd.i64 d21,d21,d23 vadd.i64 d25,d25,d21 vadd.i64 d4,d4,d25 vzip.i32 q0,q8 vadd.i64 d12,d4,d14 add r6,r8,#8 vst1.8 d0,[r6,: 64] vsub.i64 d19,d19,d9 add r6,r6,#16 vst1.8 d16,[r6,: 64] vshr.s64 d22,d12,#26 vand q0,q6,q1 vadd.i64 d10,d10,d22 vzip.i32 q3,q9 vsub.i64 d4,d4,d0 sub r6,r6,#8 vst1.8 d6,[r6,: 64] add r6,r6,#16 vst1.8 d18,[r6,: 64] vzip.i32 q2,q5 sub r6,r6,#32 vst1.8 d4,[r6,: 64] subs r5,r5,#1 bhi ._squaringloop ._skipsquaringloop: mov r2,r2 add r5,r3,#288 add r6,r3,#144 vmov.i32 q0,#19 vmov.i32 q1,#0 vmov.i32 q2,#1 vzip.i32 q1,q2 vld1.8 {d4-d5},[r5,: 128]! vld1.8 {d6-d7},[r5,: 128]! vld1.8 {d9},[r5,: 64] vld1.8 {d10-d11},[r2,: 128]! add r5,sp,#416 vld1.8 {d12-d13},[r2,: 128]! vmul.i32 q7,q2,q0 vld1.8 {d8},[r2,: 64] vext.32 d17,d11,d10,#1 vmul.i32 q9,q3,q0 vext.32 d16,d10,d8,#1 vshl.u32 q10,q5,q1 vext.32 d22,d14,d4,#1 vext.32 d24,d18,d6,#1 vshl.u32 q13,q6,q1 vshl.u32 d28,d8,d2 vrev64.i32 d22,d22 vmul.i32 d1,d9,d1 vrev64.i32 d24,d24 vext.32 d29,d8,d13,#1 vext.32 d0,d1,d9,#1 vrev64.i32 d0,d0 vext.32 d2,d9,d1,#1 vext.32 d23,d15,d5,#1 vmull.s32 q4,d20,d4 vrev64.i32 d23,d23 vmlal.s32 q4,d21,d1 vrev64.i32 d2,d2 vmlal.s32 q4,d26,d19 vext.32 d3,d5,d15,#1 vmlal.s32 q4,d27,d18 vrev64.i32 d3,d3 vmlal.s32 q4,d28,d15 vext.32 d14,d12,d11,#1 vmull.s32 q5,d16,d23 vext.32 d15,d13,d12,#1 vmlal.s32 q5,d17,d4 vst1.8 d8,[r5,: 64]! vmlal.s32 q5,d14,d1 vext.32 d12,d9,d8,#0 vmlal.s32 q5,d15,d19 vmov.i64 d13,#0 vmlal.s32 q5,d29,d18 vext.32 d25,d19,d7,#1 vmlal.s32 q6,d20,d5 vrev64.i32 d25,d25 vmlal.s32 q6,d21,d4 vst1.8 d11,[r5,: 64]! vmlal.s32 q6,d26,d1 vext.32 d9,d10,d10,#0 vmlal.s32 q6,d27,d19 vmov.i64 d8,#0 vmlal.s32 q6,d28,d18 vmlal.s32 q4,d16,d24 vmlal.s32 q4,d17,d5 vmlal.s32 q4,d14,d4 vst1.8 d12,[r5,: 64]! vmlal.s32 q4,d15,d1 vext.32 d10,d13,d12,#0 vmlal.s32 q4,d29,d19 vmov.i64 d11,#0 vmlal.s32 q5,d20,d6 vmlal.s32 q5,d21,d5 vmlal.s32 q5,d26,d4 vext.32 d13,d8,d8,#0 vmlal.s32 q5,d27,d1 vmov.i64 d12,#0 vmlal.s32 q5,d28,d19 vst1.8 d9,[r5,: 64]! vmlal.s32 q6,d16,d25 vmlal.s32 q6,d17,d6 vst1.8 d10,[r5,: 64] vmlal.s32 q6,d14,d5 vext.32 d8,d11,d10,#0 vmlal.s32 q6,d15,d4 vmov.i64 d9,#0 vmlal.s32 q6,d29,d1 vmlal.s32 q4,d20,d7 vmlal.s32 q4,d21,d6 vmlal.s32 q4,d26,d5 vext.32 d11,d12,d12,#0 vmlal.s32 q4,d27,d4 vmov.i64 d10,#0 vmlal.s32 q4,d28,d1 vmlal.s32 q5,d16,d0 sub r2,r5,#32 vmlal.s32 q5,d17,d7 vmlal.s32 q5,d14,d6 vext.32 d30,d9,d8,#0 vmlal.s32 q5,d15,d5 vld1.8 {d31},[r2,: 64]! vmlal.s32 q5,d29,d4 vmlal.s32 q15,d20,d0 vext.32 d0,d6,d18,#1 vmlal.s32 q15,d21,d25 vrev64.i32 d0,d0 vmlal.s32 q15,d26,d24 vext.32 d1,d7,d19,#1 vext.32 d7,d10,d10,#0 vmlal.s32 q15,d27,d23 vrev64.i32 d1,d1 vld1.8 {d6},[r2,: 64] vmlal.s32 q15,d28,d22 vmlal.s32 q3,d16,d4 add r2,r2,#24 vmlal.s32 q3,d17,d2 vext.32 d4,d31,d30,#0 vmov d17,d11 vmlal.s32 q3,d14,d1 vext.32 d11,d13,d13,#0 vext.32 d13,d30,d30,#0 vmlal.s32 q3,d15,d0 vext.32 d1,d8,d8,#0 vmlal.s32 q3,d29,d3 vld1.8 {d5},[r2,: 64] sub r2,r2,#16 vext.32 d10,d6,d6,#0 vmov.i32 q1,#0xffffffff vshl.i64 q4,q1,#25 add r5,sp,#512 vld1.8 {d14-d15},[r5,: 128] vadd.i64 q9,q2,q7 vshl.i64 q1,q1,#26 vshr.s64 q10,q9,#26 vld1.8 {d0},[r2,: 64]! vadd.i64 q5,q5,q10 vand q9,q9,q1 vld1.8 {d16},[r2,: 64]! add r2,sp,#528 vld1.8 {d20-d21},[r2,: 128] vadd.i64 q11,q5,q10 vsub.i64 q2,q2,q9 vshr.s64 q9,q11,#25 vext.32 d12,d5,d4,#0 vand q11,q11,q4 vadd.i64 q0,q0,q9 vmov d19,d7 vadd.i64 q3,q0,q7 vsub.i64 q5,q5,q11 vshr.s64 q11,q3,#26 vext.32 d18,d11,d10,#0 vand q3,q3,q1 vadd.i64 q8,q8,q11 vadd.i64 q11,q8,q10 vsub.i64 q0,q0,q3 vshr.s64 q3,q11,#25 vand q11,q11,q4 vadd.i64 q3,q6,q3 vadd.i64 q6,q3,q7 vsub.i64 q8,q8,q11 vshr.s64 q11,q6,#26 vand q6,q6,q1 vadd.i64 q9,q9,q11 vadd.i64 d25,d19,d21 vsub.i64 q3,q3,q6 vshr.s64 d23,d25,#25 vand q4,q12,q4 vadd.i64 d21,d23,d23 vshl.i64 d25,d23,#4 vadd.i64 d21,d21,d23 vadd.i64 d25,d25,d21 vadd.i64 d4,d4,d25 vzip.i32 q0,q8 vadd.i64 d12,d4,d14 add r2,r6,#8 vst1.8 d0,[r2,: 64] vsub.i64 d19,d19,d9 add r2,r2,#16 vst1.8 d16,[r2,: 64] vshr.s64 d22,d12,#26 vand q0,q6,q1 vadd.i64 d10,d10,d22 vzip.i32 q3,q9 vsub.i64 d4,d4,d0 sub r2,r2,#8 vst1.8 d6,[r2,: 64] add r2,r2,#16 vst1.8 d18,[r2,: 64] vzip.i32 q2,q5 sub r2,r2,#32 vst1.8 d4,[r2,: 64] cmp r4,#0 beq ._skippostcopy add r2,r3,#144 mov r4,r4 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r4,: 128]! vst1.8 {d2-d3},[r4,: 128]! vst1.8 d4,[r4,: 64] ._skippostcopy: cmp r1,#1 bne ._skipfinalcopy add r2,r3,#288 add r4,r3,#144 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r4,: 128]! vst1.8 {d2-d3},[r4,: 128]! vst1.8 d4,[r4,: 64] ._skipfinalcopy: add r1,r1,#1 cmp r1,#12 blo ._invertloop add r1,r3,#144 ldr r2,[r1],#4 ldr r3,[r1],#4 ldr r4,[r1],#4 ldr r5,[r1],#4 ldr r6,[r1],#4 ldr r7,[r1],#4 ldr r8,[r1],#4 ldr r9,[r1],#4 ldr r10,[r1],#4 ldr r1,[r1] add r11,r1,r1,LSL #4 add r11,r11,r1,LSL #1 add r11,r11,#16777216 mov r11,r11,ASR #25 add r11,r11,r2 mov r11,r11,ASR #26 add r11,r11,r3 mov r11,r11,ASR #25 add r11,r11,r4 mov r11,r11,ASR #26 add r11,r11,r5 mov r11,r11,ASR #25 add r11,r11,r6 mov r11,r11,ASR #26 add r11,r11,r7 mov r11,r11,ASR #25 add r11,r11,r8 mov r11,r11,ASR #26 add r11,r11,r9 mov r11,r11,ASR #25 add r11,r11,r10 mov r11,r11,ASR #26 add r11,r11,r1 mov r11,r11,ASR #25 add r2,r2,r11 add r2,r2,r11,LSL #1 add r2,r2,r11,LSL #4 mov r11,r2,ASR #26 add r3,r3,r11 sub r2,r2,r11,LSL #26 mov r11,r3,ASR #25 add r4,r4,r11 sub r3,r3,r11,LSL #25 mov r11,r4,ASR #26 add r5,r5,r11 sub r4,r4,r11,LSL #26 mov r11,r5,ASR #25 add r6,r6,r11 sub r5,r5,r11,LSL #25 mov r11,r6,ASR #26 add r7,r7,r11 sub r6,r6,r11,LSL #26 mov r11,r7,ASR #25 add r8,r8,r11 sub r7,r7,r11,LSL #25 mov r11,r8,ASR #26 add r9,r9,r11 sub r8,r8,r11,LSL #26 mov r11,r9,ASR #25 add r10,r10,r11 sub r9,r9,r11,LSL #25 mov r11,r10,ASR #26 add r1,r1,r11 sub r10,r10,r11,LSL #26 mov r11,r1,ASR #25 sub r1,r1,r11,LSL #25 add r2,r2,r3,LSL #26 mov r3,r3,LSR #6 add r3,r3,r4,LSL #19 mov r4,r4,LSR #13 add r4,r4,r5,LSL #13 mov r5,r5,LSR #19 add r5,r5,r6,LSL #6 add r6,r7,r8,LSL #25 mov r7,r8,LSR #7 add r7,r7,r9,LSL #19 mov r8,r9,LSR #13 add r8,r8,r10,LSL #12 mov r9,r10,LSR #20 add r1,r9,r1,LSL #6 str r2,[r0],#4 str r3,[r0],#4 str r4,[r0],#4 str r5,[r0],#4 str r6,[r0],#4 str r7,[r0],#4 str r8,[r0],#4 str r1,[r0] ldrd r4,[sp,#0] ldrd r6,[sp,#8] ldrd r8,[sp,#16] ldrd r10,[sp,#24] ldr r12,[sp,#480] ldr r14,[sp,#484] ldr r0,=0 mov sp,r12 vpop {q4,q5,q6,q7} bx lr #endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */ #endif // defined(__arm__) && defined(__linux__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/curve25519.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Some of this code is taken from the ref10 version of Ed25519 in SUPERCOP // 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as // public domain. Other parts have been replaced to call into code generated by // Fiat (https://github.com/mit-plv/fiat-crypto) in //third_party/fiat. // // The field functions are shared by Ed25519 and X25519 where possible. #include #include #include #include #include #include "../internal.h" #include "internal.h" // Various pre-computed constants. #include "./curve25519_tables.h" #if defined(BORINGSSL_HAS_UINT128) #include "../../third_party/fiat/curve25519_64.h" #elif defined(OPENSSL_64_BIT) #include "../../third_party/fiat/curve25519_64_msvc.h" #else #include "../../third_party/fiat/curve25519_32.h" #endif // Low-level intrinsic operations static uint64_t load_3(const uint8_t *in) { uint64_t result; result = (uint64_t)in[0]; result |= ((uint64_t)in[1]) << 8; result |= ((uint64_t)in[2]) << 16; return result; } static uint64_t load_4(const uint8_t *in) { uint64_t result; result = (uint64_t)in[0]; result |= ((uint64_t)in[1]) << 8; result |= ((uint64_t)in[2]) << 16; result |= ((uint64_t)in[3]) << 24; return result; } // Field operations. #if defined(OPENSSL_64_BIT) typedef uint64_t fe_limb_t; #define FE_NUM_LIMBS 5 // assert_fe asserts that |f| satisfies bounds: // // [[0x0 ~> 0x8cccccccccccc], // [0x0 ~> 0x8cccccccccccc], // [0x0 ~> 0x8cccccccccccc], // [0x0 ~> 0x8cccccccccccc], // [0x0 ~> 0x8cccccccccccc]] // // See comments in curve25519_64.h for which functions use these bounds for // inputs or outputs. #define assert_fe(f) \ do { \ for (unsigned _assert_fe_i = 0; _assert_fe_i < 5; _assert_fe_i++) { \ declassify_assert(f[_assert_fe_i] <= UINT64_C(0x8cccccccccccc)); \ } \ } while (0) // assert_fe_loose asserts that |f| satisfies bounds: // // [[0x0 ~> 0x1a666666666664], // [0x0 ~> 0x1a666666666664], // [0x0 ~> 0x1a666666666664], // [0x0 ~> 0x1a666666666664], // [0x0 ~> 0x1a666666666664]] // // See comments in curve25519_64.h for which functions use these bounds for // inputs or outputs. #define assert_fe_loose(f) \ do { \ for (unsigned _assert_fe_i = 0; _assert_fe_i < 5; _assert_fe_i++) { \ declassify_assert(f[_assert_fe_i] <= UINT64_C(0x1a666666666664)); \ } \ } while (0) #else typedef uint32_t fe_limb_t; #define FE_NUM_LIMBS 10 // assert_fe asserts that |f| satisfies bounds: // // [[0x0 ~> 0x4666666], [0x0 ~> 0x2333333], // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333], // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333], // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333], // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333]] // // See comments in curve25519_32.h for which functions use these bounds for // inputs or outputs. #define assert_fe(f) \ do { \ for (unsigned _assert_fe_i = 0; _assert_fe_i < 10; _assert_fe_i++) { \ declassify_assert(f[_assert_fe_i] <= \ ((_assert_fe_i & 1) ? 0x2333333u : 0x4666666u)); \ } \ } while (0) // assert_fe_loose asserts that |f| satisfies bounds: // // [[0x0 ~> 0xd333332], [0x0 ~> 0x6999999], // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999], // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999], // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999], // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999]] // // See comments in curve25519_32.h for which functions use these bounds for // inputs or outputs. #define assert_fe_loose(f) \ do { \ for (unsigned _assert_fe_i = 0; _assert_fe_i < 10; _assert_fe_i++) { \ declassify_assert(f[_assert_fe_i] <= \ ((_assert_fe_i & 1) ? 0x6999999u : 0xd333332u)); \ } \ } while (0) #endif // OPENSSL_64_BIT static_assert(sizeof(fe) == sizeof(fe_limb_t) * FE_NUM_LIMBS, "fe_limb_t[FE_NUM_LIMBS] is inconsistent with fe"); static void fe_frombytes_strict(fe *h, const uint8_t s[32]) { // |fiat_25519_from_bytes| requires the top-most bit be clear. declassify_assert((s[31] & 0x80) == 0); fiat_25519_from_bytes(h->v, s); assert_fe(h->v); } static void fe_frombytes(fe *h, const uint8_t s[32]) { uint8_t s_copy[32]; OPENSSL_memcpy(s_copy, s, 32); s_copy[31] &= 0x7f; fe_frombytes_strict(h, s_copy); } static void fe_tobytes(uint8_t s[32], const fe *f) { assert_fe(f->v); fiat_25519_to_bytes(s, f->v); } // h = 0 static void fe_0(fe *h) { OPENSSL_memset(h, 0, sizeof(fe)); } static void fe_loose_0(fe_loose *h) { OPENSSL_memset(h, 0, sizeof(fe_loose)); } // h = 1 static void fe_1(fe *h) { OPENSSL_memset(h, 0, sizeof(fe)); h->v[0] = 1; } static void fe_loose_1(fe_loose *h) { OPENSSL_memset(h, 0, sizeof(fe_loose)); h->v[0] = 1; } // h = f + g // Can overlap h with f or g. static void fe_add(fe_loose *h, const fe *f, const fe *g) { assert_fe(f->v); assert_fe(g->v); fiat_25519_add(h->v, f->v, g->v); assert_fe_loose(h->v); } // h = f - g // Can overlap h with f or g. static void fe_sub(fe_loose *h, const fe *f, const fe *g) { assert_fe(f->v); assert_fe(g->v); fiat_25519_sub(h->v, f->v, g->v); assert_fe_loose(h->v); } static void fe_carry(fe *h, const fe_loose *f) { assert_fe_loose(f->v); fiat_25519_carry(h->v, f->v); assert_fe(h->v); } static void fe_mul_impl(fe_limb_t out[FE_NUM_LIMBS], const fe_limb_t in1[FE_NUM_LIMBS], const fe_limb_t in2[FE_NUM_LIMBS]) { assert_fe_loose(in1); assert_fe_loose(in2); fiat_25519_carry_mul(out, in1, in2); assert_fe(out); } static void fe_mul_ltt(fe_loose *h, const fe *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_mul_llt(fe_loose *h, const fe_loose *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_mul_ttt(fe *h, const fe *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_mul_ttl(fe *h, const fe *f, const fe_loose *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) { fe_mul_impl(h->v, f->v, g->v); } static void fe_sq_tl(fe *h, const fe_loose *f) { assert_fe_loose(f->v); fiat_25519_carry_square(h->v, f->v); assert_fe(h->v); } static void fe_sq_tt(fe *h, const fe *f) { assert_fe_loose(f->v); fiat_25519_carry_square(h->v, f->v); assert_fe(h->v); } // Replace (f,g) with (g,f) if b == 1; // replace (f,g) with (f,g) if b == 0. // // Preconditions: b in {0,1}. static void fe_cswap(fe *f, fe *g, fe_limb_t b) { b = 0 - b; for (unsigned i = 0; i < FE_NUM_LIMBS; i++) { fe_limb_t x = f->v[i] ^ g->v[i]; x &= b; f->v[i] ^= x; g->v[i] ^= x; } } static void fe_mul121666(fe *h, const fe_loose *f) { assert_fe_loose(f->v); fiat_25519_carry_scmul_121666(h->v, f->v); assert_fe(h->v); } // h = -f static void fe_neg(fe_loose *h, const fe *f) { assert_fe(f->v); fiat_25519_opp(h->v, f->v); assert_fe_loose(h->v); } // Replace (f,g) with (g,g) if b == 1; // replace (f,g) with (f,g) if b == 0. // // Preconditions: b in {0,1}. static void fe_cmov(fe_loose *f, const fe_loose *g, fe_limb_t b) { // Silence an unused function warning. |fiat_25519_selectznz| isn't quite the // calling convention the rest of this code wants, so implement it by hand. // // TODO(davidben): Switch to fiat's calling convention, or ask fiat to emit a // different one. b = 0 - b; for (unsigned i = 0; i < FE_NUM_LIMBS; i++) { fe_limb_t x = f->v[i] ^ g->v[i]; x &= b; f->v[i] ^= x; } } // h = f static void fe_copy(fe *h, const fe *f) { OPENSSL_memmove(h, f, sizeof(fe)); } static void fe_copy_lt(fe_loose *h, const fe *f) { static_assert(sizeof(fe_loose) == sizeof(fe), "fe and fe_loose mismatch"); OPENSSL_memmove(h, f, sizeof(fe)); } static void fe_loose_invert(fe *out, const fe_loose *z) { fe t0; fe t1; fe t2; fe t3; int i; fe_sq_tl(&t0, z); fe_sq_tt(&t1, &t0); for (i = 1; i < 2; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_tlt(&t1, z, &t1); fe_mul_ttt(&t0, &t0, &t1); fe_sq_tt(&t2, &t0); fe_mul_ttt(&t1, &t1, &t2); fe_sq_tt(&t2, &t1); for (i = 1; i < 5; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t2, &t1); for (i = 1; i < 10; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t2, &t2, &t1); fe_sq_tt(&t3, &t2); for (i = 1; i < 20; ++i) { fe_sq_tt(&t3, &t3); } fe_mul_ttt(&t2, &t3, &t2); fe_sq_tt(&t2, &t2); for (i = 1; i < 10; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t2, &t1); for (i = 1; i < 50; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t2, &t2, &t1); fe_sq_tt(&t3, &t2); for (i = 1; i < 100; ++i) { fe_sq_tt(&t3, &t3); } fe_mul_ttt(&t2, &t3, &t2); fe_sq_tt(&t2, &t2); for (i = 1; i < 50; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t1, &t1); for (i = 1; i < 5; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(out, &t1, &t0); } static void fe_invert(fe *out, const fe *z) { fe_loose l; fe_copy_lt(&l, z); fe_loose_invert(out, &l); } // return 0 if f == 0 // return 1 if f != 0 static int fe_isnonzero(const fe_loose *f) { fe tight; fe_carry(&tight, f); uint8_t s[32]; fe_tobytes(s, &tight); static const uint8_t zero[32] = {0}; return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0; } // return 1 if f is in {1,3,5,...,q-2} // return 0 if f is in {0,2,4,...,q-1} static int fe_isnegative(const fe *f) { uint8_t s[32]; fe_tobytes(s, f); return s[0] & 1; } static void fe_sq2_tt(fe *h, const fe *f) { // h = f^2 fe_sq_tt(h, f); // h = h + h fe_loose tmp; fe_add(&tmp, h, h); fe_carry(h, &tmp); } static void fe_pow22523(fe *out, const fe *z) { fe t0; fe t1; fe t2; int i; fe_sq_tt(&t0, z); fe_sq_tt(&t1, &t0); for (i = 1; i < 2; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t1, z, &t1); fe_mul_ttt(&t0, &t0, &t1); fe_sq_tt(&t0, &t0); fe_mul_ttt(&t0, &t1, &t0); fe_sq_tt(&t1, &t0); for (i = 1; i < 5; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t0, &t1, &t0); fe_sq_tt(&t1, &t0); for (i = 1; i < 10; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t1, &t1, &t0); fe_sq_tt(&t2, &t1); for (i = 1; i < 20; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t1, &t1); for (i = 1; i < 10; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t0, &t1, &t0); fe_sq_tt(&t1, &t0); for (i = 1; i < 50; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t1, &t1, &t0); fe_sq_tt(&t2, &t1); for (i = 1; i < 100; ++i) { fe_sq_tt(&t2, &t2); } fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t1, &t1); for (i = 1; i < 50; ++i) { fe_sq_tt(&t1, &t1); } fe_mul_ttt(&t0, &t1, &t0); fe_sq_tt(&t0, &t0); for (i = 1; i < 2; ++i) { fe_sq_tt(&t0, &t0); } fe_mul_ttt(out, &t0, z); } // Group operations. void x25519_ge_tobytes(uint8_t s[32], const ge_p2 *h) { fe recip; fe x; fe y; fe_invert(&recip, &h->Z); fe_mul_ttt(&x, &h->X, &recip); fe_mul_ttt(&y, &h->Y, &recip); fe_tobytes(s, &y); s[31] ^= fe_isnegative(&x) << 7; } static void ge_p3_tobytes(uint8_t s[32], const ge_p3 *h) { fe recip; fe x; fe y; fe_invert(&recip, &h->Z); fe_mul_ttt(&x, &h->X, &recip); fe_mul_ttt(&y, &h->Y, &recip); fe_tobytes(s, &y); s[31] ^= fe_isnegative(&x) << 7; } int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t s[32]) { fe u; fe_loose v; fe w; fe vxx; fe_loose check; fe_frombytes(&h->Y, s); fe_1(&h->Z); fe_sq_tt(&w, &h->Y); fe_mul_ttt(&vxx, &w, &d); fe_sub(&v, &w, &h->Z); // u = y^2-1 fe_carry(&u, &v); fe_add(&v, &vxx, &h->Z); // v = dy^2+1 fe_mul_ttl(&w, &u, &v); // w = u*v fe_pow22523(&h->X, &w); // x = w^((q-5)/8) fe_mul_ttt(&h->X, &h->X, &u); // x = u*w^((q-5)/8) fe_sq_tt(&vxx, &h->X); fe_mul_ttl(&vxx, &vxx, &v); fe_sub(&check, &vxx, &u); if (fe_isnonzero(&check)) { fe_add(&check, &vxx, &u); if (fe_isnonzero(&check)) { return 0; } fe_mul_ttt(&h->X, &h->X, &sqrtm1); } if (fe_isnegative(&h->X) != (s[31] >> 7)) { fe_loose t; fe_neg(&t, &h->X); fe_carry(&h->X, &t); } fe_mul_ttt(&h->T, &h->X, &h->Y); return 1; } static void ge_p2_0(ge_p2 *h) { fe_0(&h->X); fe_1(&h->Y); fe_1(&h->Z); } static void ge_p3_0(ge_p3 *h) { fe_0(&h->X); fe_1(&h->Y); fe_1(&h->Z); fe_0(&h->T); } static void ge_cached_0(ge_cached *h) { fe_loose_1(&h->YplusX); fe_loose_1(&h->YminusX); fe_loose_1(&h->Z); fe_loose_0(&h->T2d); } static void ge_precomp_0(ge_precomp *h) { fe_loose_1(&h->yplusx); fe_loose_1(&h->yminusx); fe_loose_0(&h->xy2d); } // r = p static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) { fe_copy(&r->X, &p->X); fe_copy(&r->Y, &p->Y); fe_copy(&r->Z, &p->Z); } // r = p void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) { fe_add(&r->YplusX, &p->Y, &p->X); fe_sub(&r->YminusX, &p->Y, &p->X); fe_copy_lt(&r->Z, &p->Z); fe_mul_ltt(&r->T2d, &p->T, &d2); } // r = p void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) { fe_mul_tll(&r->X, &p->X, &p->T); fe_mul_tll(&r->Y, &p->Y, &p->Z); fe_mul_tll(&r->Z, &p->Z, &p->T); } // r = p void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) { fe_mul_tll(&r->X, &p->X, &p->T); fe_mul_tll(&r->Y, &p->Y, &p->Z); fe_mul_tll(&r->Z, &p->Z, &p->T); fe_mul_tll(&r->T, &p->X, &p->Y); } // r = p static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) { ge_p3 t; x25519_ge_p1p1_to_p3(&t, p); x25519_ge_p3_to_cached(r, &t); } // r = 2 * p static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) { fe trX, trZ, trT; fe t0; fe_sq_tt(&trX, &p->X); fe_sq_tt(&trZ, &p->Y); fe_sq2_tt(&trT, &p->Z); fe_add(&r->Y, &p->X, &p->Y); fe_sq_tl(&t0, &r->Y); fe_add(&r->Y, &trZ, &trX); fe_sub(&r->Z, &trZ, &trX); fe_carry(&trZ, &r->Y); fe_sub(&r->X, &t0, &trZ); fe_carry(&trZ, &r->Z); fe_sub(&r->T, &trT, &trZ); } // r = 2 * p static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) { ge_p2 q; ge_p3_to_p2(&q, p); ge_p2_dbl(r, &q); } // r = p + q static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe trY, trZ, trT; fe_add(&r->X, &p->Y, &p->X); fe_sub(&r->Y, &p->Y, &p->X); fe_mul_tll(&trZ, &r->X, &q->yplusx); fe_mul_tll(&trY, &r->Y, &q->yminusx); fe_mul_tlt(&trT, &q->xy2d, &p->T); fe_add(&r->T, &p->Z, &p->Z); fe_sub(&r->X, &trZ, &trY); fe_add(&r->Y, &trZ, &trY); fe_carry(&trZ, &r->T); fe_add(&r->Z, &trZ, &trT); fe_sub(&r->T, &trZ, &trT); } // r = p - q static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { fe trY, trZ, trT; fe_add(&r->X, &p->Y, &p->X); fe_sub(&r->Y, &p->Y, &p->X); fe_mul_tll(&trZ, &r->X, &q->yminusx); fe_mul_tll(&trY, &r->Y, &q->yplusx); fe_mul_tlt(&trT, &q->xy2d, &p->T); fe_add(&r->T, &p->Z, &p->Z); fe_sub(&r->X, &trZ, &trY); fe_add(&r->Y, &trZ, &trY); fe_carry(&trZ, &r->T); fe_sub(&r->Z, &trZ, &trT); fe_add(&r->T, &trZ, &trT); } // r = p + q void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { fe trX, trY, trZ, trT; fe_add(&r->X, &p->Y, &p->X); fe_sub(&r->Y, &p->Y, &p->X); fe_mul_tll(&trZ, &r->X, &q->YplusX); fe_mul_tll(&trY, &r->Y, &q->YminusX); fe_mul_tlt(&trT, &q->T2d, &p->T); fe_mul_ttl(&trX, &p->Z, &q->Z); fe_add(&r->T, &trX, &trX); fe_sub(&r->X, &trZ, &trY); fe_add(&r->Y, &trZ, &trY); fe_carry(&trZ, &r->T); fe_add(&r->Z, &trZ, &trT); fe_sub(&r->T, &trZ, &trT); } // r = p - q void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { fe trX, trY, trZ, trT; fe_add(&r->X, &p->Y, &p->X); fe_sub(&r->Y, &p->Y, &p->X); fe_mul_tll(&trZ, &r->X, &q->YminusX); fe_mul_tll(&trY, &r->Y, &q->YplusX); fe_mul_tlt(&trT, &q->T2d, &p->T); fe_mul_ttl(&trX, &p->Z, &q->Z); fe_add(&r->T, &trX, &trX); fe_sub(&r->X, &trZ, &trY); fe_add(&r->Y, &trZ, &trY); fe_carry(&trZ, &r->T); fe_sub(&r->Z, &trZ, &trT); fe_add(&r->T, &trZ, &trT); } static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) { fe_cmov(&t->yplusx, &u->yplusx, b); fe_cmov(&t->yminusx, &u->yminusx, b); fe_cmov(&t->xy2d, &u->xy2d, b); } void x25519_ge_scalarmult_small_precomp( ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) { // precomp_table is first expanded into matching |ge_precomp| // elements. ge_precomp multiples[15]; unsigned i; for (i = 0; i < 15; i++) { // The precomputed table is assumed to already clear the top bit, so // |fe_frombytes_strict| may be used directly. const uint8_t *bytes = &precomp_table[i * (2 * 32)]; fe x, y; fe_frombytes_strict(&x, bytes); fe_frombytes_strict(&y, bytes + 32); ge_precomp *out = &multiples[i]; fe_add(&out->yplusx, &y, &x); fe_sub(&out->yminusx, &y, &x); fe_mul_ltt(&out->xy2d, &x, &y); fe_mul_llt(&out->xy2d, &out->xy2d, &d2); } // See the comment above |k25519SmallPrecomp| about the structure of the // precomputed elements. This loop does 64 additions and 64 doublings to // calculate the result. ge_p3_0(h); for (i = 63; i < 64; i--) { unsigned j; signed char index = 0; for (j = 0; j < 4; j++) { const uint8_t bit = 1 & (a[(8 * j) + (i / 8)] >> (i & 7)); index |= (bit << j); } ge_precomp e; ge_precomp_0(&e); for (j = 1; j < 16; j++) { cmov(&e, &multiples[j - 1], 1 & constant_time_eq_w(index, j)); } ge_cached cached; ge_p1p1 r; x25519_ge_p3_to_cached(&cached, h); x25519_ge_add(&r, h, &cached); x25519_ge_p1p1_to_p3(h, &r); ge_madd(&r, h, &e); x25519_ge_p1p1_to_p3(h, &r); } } #if defined(OPENSSL_SMALL) void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) { x25519_ge_scalarmult_small_precomp(h, a, k25519SmallPrecomp); } #else static void table_select(ge_precomp *t, const int pos, const signed char b) { uint8_t bnegative = constant_time_msb_w(b); uint8_t babs = b - ((bnegative & b) << 1); uint8_t t_bytes[3][32] = { {static_cast(constant_time_is_zero_w(b) & 1)}, {static_cast(constant_time_is_zero_w(b) & 1)}, {0}}; #if defined(__clang__) // materialize for vectorization, 6% speedup __asm__("" : "+m"(t_bytes) : /*no inputs*/); #endif static_assert(sizeof(t_bytes) == sizeof(k25519Precomp[pos][0]), ""); for (int i = 0; i < 8; i++) { constant_time_conditional_memxor(t_bytes, k25519Precomp[pos][i], sizeof(t_bytes), constant_time_eq_w(babs, 1 + i)); } fe yplusx, yminusx, xy2d; fe_frombytes_strict(&yplusx, t_bytes[0]); fe_frombytes_strict(&yminusx, t_bytes[1]); fe_frombytes_strict(&xy2d, t_bytes[2]); fe_copy_lt(&t->yplusx, &yplusx); fe_copy_lt(&t->yminusx, &yminusx); fe_copy_lt(&t->xy2d, &xy2d); ge_precomp minust; fe_copy_lt(&minust.yplusx, &yminusx); fe_copy_lt(&minust.yminusx, &yplusx); fe_neg(&minust.xy2d, &xy2d); cmov(t, &minust, bnegative >> 7); } // h = a * B // where a = a[0]+256*a[1]+...+256^31 a[31] // B is the Ed25519 base point (x,4/5) with x positive. // // Preconditions: // a[31] <= 127 void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) { #if defined(BORINGSSL_FE25519_ADX) if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { uint8_t t[4][32]; x25519_ge_scalarmult_base_adx(t, a); fiat_25519_from_bytes(h->X.v, t[0]); fiat_25519_from_bytes(h->Y.v, t[1]); fiat_25519_from_bytes(h->Z.v, t[2]); fiat_25519_from_bytes(h->T.v, t[3]); return; } #endif signed char e[64]; signed char carry; ge_p1p1 r; ge_p2 s; ge_precomp t; int i; for (i = 0; i < 32; ++i) { e[2 * i + 0] = (a[i] >> 0) & 15; e[2 * i + 1] = (a[i] >> 4) & 15; } // each e[i] is between 0 and 15 // e[63] is between 0 and 7 carry = 0; for (i = 0; i < 63; ++i) { e[i] += carry; carry = e[i] + 8; carry >>= 4; e[i] -= carry << 4; } e[63] += carry; // each e[i] is between -8 and 8 ge_p3_0(h); for (i = 1; i < 64; i += 2) { table_select(&t, i / 2, e[i]); ge_madd(&r, h, &t); x25519_ge_p1p1_to_p3(h, &r); } ge_p3_dbl(&r, h); x25519_ge_p1p1_to_p2(&s, &r); ge_p2_dbl(&r, &s); x25519_ge_p1p1_to_p2(&s, &r); ge_p2_dbl(&r, &s); x25519_ge_p1p1_to_p2(&s, &r); ge_p2_dbl(&r, &s); x25519_ge_p1p1_to_p3(h, &r); for (i = 0; i < 64; i += 2) { table_select(&t, i / 2, e[i]); ge_madd(&r, h, &t); x25519_ge_p1p1_to_p3(h, &r); } } #endif static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) { fe_cmov(&t->YplusX, &u->YplusX, b); fe_cmov(&t->YminusX, &u->YminusX, b); fe_cmov(&t->Z, &u->Z, b); fe_cmov(&t->T2d, &u->T2d, b); } // r = scalar * A. // where a = a[0]+256*a[1]+...+256^31 a[31]. void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) { ge_p2 Ai_p2[8]; ge_cached Ai[16]; ge_p1p1 t; ge_cached_0(&Ai[0]); x25519_ge_p3_to_cached(&Ai[1], A); ge_p3_to_p2(&Ai_p2[1], A); unsigned i; for (i = 2; i < 16; i += 2) { ge_p2_dbl(&t, &Ai_p2[i / 2]); ge_p1p1_to_cached(&Ai[i], &t); if (i < 8) { x25519_ge_p1p1_to_p2(&Ai_p2[i], &t); } x25519_ge_add(&t, A, &Ai[i]); ge_p1p1_to_cached(&Ai[i + 1], &t); if (i < 7) { x25519_ge_p1p1_to_p2(&Ai_p2[i + 1], &t); } } ge_p2_0(r); ge_p3 u; for (i = 0; i < 256; i += 4) { ge_p2_dbl(&t, r); x25519_ge_p1p1_to_p2(r, &t); ge_p2_dbl(&t, r); x25519_ge_p1p1_to_p2(r, &t); ge_p2_dbl(&t, r); x25519_ge_p1p1_to_p2(r, &t); ge_p2_dbl(&t, r); x25519_ge_p1p1_to_p3(&u, &t); uint8_t index = scalar[31 - i / 8]; index >>= 4 - (i & 4); index &= 0xf; unsigned j; ge_cached selected; ge_cached_0(&selected); for (j = 0; j < 16; j++) { cmov_cached(&selected, &Ai[j], 1 & constant_time_eq_w(index, j)); } x25519_ge_add(&t, &u, &selected); x25519_ge_p1p1_to_p2(r, &t); } } static void slide(signed char *r, const uint8_t *a) { int i; int b; int k; for (i = 0; i < 256; ++i) { r[i] = 1 & (a[i >> 3] >> (i & 7)); } for (i = 0; i < 256; ++i) { if (r[i]) { for (b = 1; b <= 6 && i + b < 256; ++b) { if (r[i + b]) { if (r[i] + (r[i + b] << b) <= 15) { r[i] += r[i + b] << b; r[i + b] = 0; } else if (r[i] - (r[i + b] << b) >= -15) { r[i] -= r[i + b] << b; for (k = i + b; k < 256; ++k) { if (!r[k]) { r[k] = 1; break; } r[k] = 0; } } else { break; } } } } } } // r = a * A + b * B // where a = a[0]+256*a[1]+...+256^31 a[31]. // and b = b[0]+256*b[1]+...+256^31 b[31]. // B is the Ed25519 base point (x,4/5) with x positive. static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a, const ge_p3 *A, const uint8_t *b) { signed char aslide[256]; signed char bslide[256]; ge_cached Ai[8]; // A,3A,5A,7A,9A,11A,13A,15A ge_p1p1 t; ge_p3 u; ge_p3 A2; int i; slide(aslide, a); slide(bslide, b); x25519_ge_p3_to_cached(&Ai[0], A); ge_p3_dbl(&t, A); x25519_ge_p1p1_to_p3(&A2, &t); x25519_ge_add(&t, &A2, &Ai[0]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[1], &u); x25519_ge_add(&t, &A2, &Ai[1]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[2], &u); x25519_ge_add(&t, &A2, &Ai[2]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[3], &u); x25519_ge_add(&t, &A2, &Ai[3]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[4], &u); x25519_ge_add(&t, &A2, &Ai[4]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[5], &u); x25519_ge_add(&t, &A2, &Ai[5]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[6], &u); x25519_ge_add(&t, &A2, &Ai[6]); x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_p3_to_cached(&Ai[7], &u); ge_p2_0(r); for (i = 255; i >= 0; --i) { if (aslide[i] || bslide[i]) { break; } } for (; i >= 0; --i) { ge_p2_dbl(&t, r); if (aslide[i] > 0) { x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_add(&t, &u, &Ai[aslide[i] / 2]); } else if (aslide[i] < 0) { x25519_ge_p1p1_to_p3(&u, &t); x25519_ge_sub(&t, &u, &Ai[(-aslide[i]) / 2]); } if (bslide[i] > 0) { x25519_ge_p1p1_to_p3(&u, &t); ge_madd(&t, &u, &Bi[bslide[i] / 2]); } else if (bslide[i] < 0) { x25519_ge_p1p1_to_p3(&u, &t); ge_msub(&t, &u, &Bi[(-bslide[i]) / 2]); } x25519_ge_p1p1_to_p2(r, &t); } } // int64_lshift21 returns |a << 21| but is defined when shifting bits into the // sign bit. This works around a language flaw in C. static inline int64_t int64_lshift21(int64_t a) { return (int64_t)((uint64_t)a << 21); } // The set of scalars is \Z/l // where l = 2^252 + 27742317777372353535851937790883648493. // Input: // s[0]+256*s[1]+...+256^63*s[63] = s // // Output: // s[0]+256*s[1]+...+256^31*s[31] = s mod l // where l = 2^252 + 27742317777372353535851937790883648493. // Overwrites s in place. void x25519_sc_reduce(uint8_t s[64]) { int64_t s0 = 2097151 & load_3(s); int64_t s1 = 2097151 & (load_4(s + 2) >> 5); int64_t s2 = 2097151 & (load_3(s + 5) >> 2); int64_t s3 = 2097151 & (load_4(s + 7) >> 7); int64_t s4 = 2097151 & (load_4(s + 10) >> 4); int64_t s5 = 2097151 & (load_3(s + 13) >> 1); int64_t s6 = 2097151 & (load_4(s + 15) >> 6); int64_t s7 = 2097151 & (load_3(s + 18) >> 3); int64_t s8 = 2097151 & load_3(s + 21); int64_t s9 = 2097151 & (load_4(s + 23) >> 5); int64_t s10 = 2097151 & (load_3(s + 26) >> 2); int64_t s11 = 2097151 & (load_4(s + 28) >> 7); int64_t s12 = 2097151 & (load_4(s + 31) >> 4); int64_t s13 = 2097151 & (load_3(s + 34) >> 1); int64_t s14 = 2097151 & (load_4(s + 36) >> 6); int64_t s15 = 2097151 & (load_3(s + 39) >> 3); int64_t s16 = 2097151 & load_3(s + 42); int64_t s17 = 2097151 & (load_4(s + 44) >> 5); int64_t s18 = 2097151 & (load_3(s + 47) >> 2); int64_t s19 = 2097151 & (load_4(s + 49) >> 7); int64_t s20 = 2097151 & (load_4(s + 52) >> 4); int64_t s21 = 2097151 & (load_3(s + 55) >> 1); int64_t s22 = 2097151 & (load_4(s + 57) >> 6); int64_t s23 = (load_4(s + 60) >> 3); int64_t carry0; int64_t carry1; int64_t carry2; int64_t carry3; int64_t carry4; int64_t carry5; int64_t carry6; int64_t carry7; int64_t carry8; int64_t carry9; int64_t carry10; int64_t carry11; int64_t carry12; int64_t carry13; int64_t carry14; int64_t carry15; int64_t carry16; s11 += s23 * 666643; s12 += s23 * 470296; s13 += s23 * 654183; s14 -= s23 * 997805; s15 += s23 * 136657; s16 -= s23 * 683901; s23 = 0; s10 += s22 * 666643; s11 += s22 * 470296; s12 += s22 * 654183; s13 -= s22 * 997805; s14 += s22 * 136657; s15 -= s22 * 683901; s22 = 0; s9 += s21 * 666643; s10 += s21 * 470296; s11 += s21 * 654183; s12 -= s21 * 997805; s13 += s21 * 136657; s14 -= s21 * 683901; s21 = 0; s8 += s20 * 666643; s9 += s20 * 470296; s10 += s20 * 654183; s11 -= s20 * 997805; s12 += s20 * 136657; s13 -= s20 * 683901; s20 = 0; s7 += s19 * 666643; s8 += s19 * 470296; s9 += s19 * 654183; s10 -= s19 * 997805; s11 += s19 * 136657; s12 -= s19 * 683901; s19 = 0; s6 += s18 * 666643; s7 += s18 * 470296; s8 += s18 * 654183; s9 -= s18 * 997805; s10 += s18 * 136657; s11 -= s18 * 683901; s18 = 0; carry6 = (s6 + (1 << 20)) >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry8 = (s8 + (1 << 20)) >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry10 = (s10 + (1 << 20)) >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry12 = (s12 + (1 << 20)) >> 21; s13 += carry12; s12 -= int64_lshift21(carry12); carry14 = (s14 + (1 << 20)) >> 21; s15 += carry14; s14 -= int64_lshift21(carry14); carry16 = (s16 + (1 << 20)) >> 21; s17 += carry16; s16 -= int64_lshift21(carry16); carry7 = (s7 + (1 << 20)) >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry9 = (s9 + (1 << 20)) >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry11 = (s11 + (1 << 20)) >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); carry13 = (s13 + (1 << 20)) >> 21; s14 += carry13; s13 -= int64_lshift21(carry13); carry15 = (s15 + (1 << 20)) >> 21; s16 += carry15; s15 -= int64_lshift21(carry15); s5 += s17 * 666643; s6 += s17 * 470296; s7 += s17 * 654183; s8 -= s17 * 997805; s9 += s17 * 136657; s10 -= s17 * 683901; s17 = 0; s4 += s16 * 666643; s5 += s16 * 470296; s6 += s16 * 654183; s7 -= s16 * 997805; s8 += s16 * 136657; s9 -= s16 * 683901; s16 = 0; s3 += s15 * 666643; s4 += s15 * 470296; s5 += s15 * 654183; s6 -= s15 * 997805; s7 += s15 * 136657; s8 -= s15 * 683901; s15 = 0; s2 += s14 * 666643; s3 += s14 * 470296; s4 += s14 * 654183; s5 -= s14 * 997805; s6 += s14 * 136657; s7 -= s14 * 683901; s14 = 0; s1 += s13 * 666643; s2 += s13 * 470296; s3 += s13 * 654183; s4 -= s13 * 997805; s5 += s13 * 136657; s6 -= s13 * 683901; s13 = 0; s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = (s0 + (1 << 20)) >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry2 = (s2 + (1 << 20)) >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry4 = (s4 + (1 << 20)) >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry6 = (s6 + (1 << 20)) >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry8 = (s8 + (1 << 20)) >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry10 = (s10 + (1 << 20)) >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry1 = (s1 + (1 << 20)) >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry3 = (s3 + (1 << 20)) >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry5 = (s5 + (1 << 20)) >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry7 = (s7 + (1 << 20)) >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry9 = (s9 + (1 << 20)) >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry11 = (s11 + (1 << 20)) >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = s0 >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry1 = s1 >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry2 = s2 >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry3 = s3 >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry4 = s4 >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry5 = s5 >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry6 = s6 >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry7 = s7 >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry8 = s8 >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry9 = s9 >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry10 = s10 >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry11 = s11 >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = s0 >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry1 = s1 >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry2 = s2 >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry3 = s3 >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry4 = s4 >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry5 = s5 >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry6 = s6 >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry7 = s7 >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry8 = s8 >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry9 = s9 >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry10 = s10 >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); s[0] = s0 >> 0; s[1] = s0 >> 8; s[2] = (s0 >> 16) | (s1 << 5); s[3] = s1 >> 3; s[4] = s1 >> 11; s[5] = (s1 >> 19) | (s2 << 2); s[6] = s2 >> 6; s[7] = (s2 >> 14) | (s3 << 7); s[8] = s3 >> 1; s[9] = s3 >> 9; s[10] = (s3 >> 17) | (s4 << 4); s[11] = s4 >> 4; s[12] = s4 >> 12; s[13] = (s4 >> 20) | (s5 << 1); s[14] = s5 >> 7; s[15] = (s5 >> 15) | (s6 << 6); s[16] = s6 >> 2; s[17] = s6 >> 10; s[18] = (s6 >> 18) | (s7 << 3); s[19] = s7 >> 5; s[20] = s7 >> 13; s[21] = s8 >> 0; s[22] = s8 >> 8; s[23] = (s8 >> 16) | (s9 << 5); s[24] = s9 >> 3; s[25] = s9 >> 11; s[26] = (s9 >> 19) | (s10 << 2); s[27] = s10 >> 6; s[28] = (s10 >> 14) | (s11 << 7); s[29] = s11 >> 1; s[30] = s11 >> 9; s[31] = s11 >> 17; } // Input: // a[0]+256*a[1]+...+256^31*a[31] = a // b[0]+256*b[1]+...+256^31*b[31] = b // c[0]+256*c[1]+...+256^31*c[31] = c // // Output: // s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l // where l = 2^252 + 27742317777372353535851937790883648493. static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b, const uint8_t *c) { int64_t a0 = 2097151 & load_3(a); int64_t a1 = 2097151 & (load_4(a + 2) >> 5); int64_t a2 = 2097151 & (load_3(a + 5) >> 2); int64_t a3 = 2097151 & (load_4(a + 7) >> 7); int64_t a4 = 2097151 & (load_4(a + 10) >> 4); int64_t a5 = 2097151 & (load_3(a + 13) >> 1); int64_t a6 = 2097151 & (load_4(a + 15) >> 6); int64_t a7 = 2097151 & (load_3(a + 18) >> 3); int64_t a8 = 2097151 & load_3(a + 21); int64_t a9 = 2097151 & (load_4(a + 23) >> 5); int64_t a10 = 2097151 & (load_3(a + 26) >> 2); int64_t a11 = (load_4(a + 28) >> 7); int64_t b0 = 2097151 & load_3(b); int64_t b1 = 2097151 & (load_4(b + 2) >> 5); int64_t b2 = 2097151 & (load_3(b + 5) >> 2); int64_t b3 = 2097151 & (load_4(b + 7) >> 7); int64_t b4 = 2097151 & (load_4(b + 10) >> 4); int64_t b5 = 2097151 & (load_3(b + 13) >> 1); int64_t b6 = 2097151 & (load_4(b + 15) >> 6); int64_t b7 = 2097151 & (load_3(b + 18) >> 3); int64_t b8 = 2097151 & load_3(b + 21); int64_t b9 = 2097151 & (load_4(b + 23) >> 5); int64_t b10 = 2097151 & (load_3(b + 26) >> 2); int64_t b11 = (load_4(b + 28) >> 7); int64_t c0 = 2097151 & load_3(c); int64_t c1 = 2097151 & (load_4(c + 2) >> 5); int64_t c2 = 2097151 & (load_3(c + 5) >> 2); int64_t c3 = 2097151 & (load_4(c + 7) >> 7); int64_t c4 = 2097151 & (load_4(c + 10) >> 4); int64_t c5 = 2097151 & (load_3(c + 13) >> 1); int64_t c6 = 2097151 & (load_4(c + 15) >> 6); int64_t c7 = 2097151 & (load_3(c + 18) >> 3); int64_t c8 = 2097151 & load_3(c + 21); int64_t c9 = 2097151 & (load_4(c + 23) >> 5); int64_t c10 = 2097151 & (load_3(c + 26) >> 2); int64_t c11 = (load_4(c + 28) >> 7); int64_t s0; int64_t s1; int64_t s2; int64_t s3; int64_t s4; int64_t s5; int64_t s6; int64_t s7; int64_t s8; int64_t s9; int64_t s10; int64_t s11; int64_t s12; int64_t s13; int64_t s14; int64_t s15; int64_t s16; int64_t s17; int64_t s18; int64_t s19; int64_t s20; int64_t s21; int64_t s22; int64_t s23; int64_t carry0; int64_t carry1; int64_t carry2; int64_t carry3; int64_t carry4; int64_t carry5; int64_t carry6; int64_t carry7; int64_t carry8; int64_t carry9; int64_t carry10; int64_t carry11; int64_t carry12; int64_t carry13; int64_t carry14; int64_t carry15; int64_t carry16; int64_t carry17; int64_t carry18; int64_t carry19; int64_t carry20; int64_t carry21; int64_t carry22; s0 = c0 + a0 * b0; s1 = c1 + a0 * b1 + a1 * b0; s2 = c2 + a0 * b2 + a1 * b1 + a2 * b0; s3 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0; s4 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0; s5 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0; s6 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0; s7 = c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 + a6 * b1 + a7 * b0; s8 = c8 + a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 + a6 * b2 + a7 * b1 + a8 * b0; s9 = c9 + a0 * b9 + a1 * b8 + a2 * b7 + a3 * b6 + a4 * b5 + a5 * b4 + a6 * b3 + a7 * b2 + a8 * b1 + a9 * b0; s10 = c10 + a0 * b10 + a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 + a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1 + a10 * b0; s11 = c11 + a0 * b11 + a1 * b10 + a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 + a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2 + a10 * b1 + a11 * b0; s12 = a1 * b11 + a2 * b10 + a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 + a8 * b4 + a9 * b3 + a10 * b2 + a11 * b1; s13 = a2 * b11 + a3 * b10 + a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 + a9 * b4 + a10 * b3 + a11 * b2; s14 = a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 + a10 * b4 + a11 * b3; s15 = a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 + a11 * b4; s16 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5; s17 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6; s18 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7; s19 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8; s20 = a9 * b11 + a10 * b10 + a11 * b9; s21 = a10 * b11 + a11 * b10; s22 = a11 * b11; s23 = 0; carry0 = (s0 + (1 << 20)) >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry2 = (s2 + (1 << 20)) >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry4 = (s4 + (1 << 20)) >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry6 = (s6 + (1 << 20)) >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry8 = (s8 + (1 << 20)) >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry10 = (s10 + (1 << 20)) >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry12 = (s12 + (1 << 20)) >> 21; s13 += carry12; s12 -= int64_lshift21(carry12); carry14 = (s14 + (1 << 20)) >> 21; s15 += carry14; s14 -= int64_lshift21(carry14); carry16 = (s16 + (1 << 20)) >> 21; s17 += carry16; s16 -= int64_lshift21(carry16); carry18 = (s18 + (1 << 20)) >> 21; s19 += carry18; s18 -= int64_lshift21(carry18); carry20 = (s20 + (1 << 20)) >> 21; s21 += carry20; s20 -= int64_lshift21(carry20); carry22 = (s22 + (1 << 20)) >> 21; s23 += carry22; s22 -= int64_lshift21(carry22); carry1 = (s1 + (1 << 20)) >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry3 = (s3 + (1 << 20)) >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry5 = (s5 + (1 << 20)) >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry7 = (s7 + (1 << 20)) >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry9 = (s9 + (1 << 20)) >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry11 = (s11 + (1 << 20)) >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); carry13 = (s13 + (1 << 20)) >> 21; s14 += carry13; s13 -= int64_lshift21(carry13); carry15 = (s15 + (1 << 20)) >> 21; s16 += carry15; s15 -= int64_lshift21(carry15); carry17 = (s17 + (1 << 20)) >> 21; s18 += carry17; s17 -= int64_lshift21(carry17); carry19 = (s19 + (1 << 20)) >> 21; s20 += carry19; s19 -= int64_lshift21(carry19); carry21 = (s21 + (1 << 20)) >> 21; s22 += carry21; s21 -= int64_lshift21(carry21); s11 += s23 * 666643; s12 += s23 * 470296; s13 += s23 * 654183; s14 -= s23 * 997805; s15 += s23 * 136657; s16 -= s23 * 683901; s23 = 0; s10 += s22 * 666643; s11 += s22 * 470296; s12 += s22 * 654183; s13 -= s22 * 997805; s14 += s22 * 136657; s15 -= s22 * 683901; s22 = 0; s9 += s21 * 666643; s10 += s21 * 470296; s11 += s21 * 654183; s12 -= s21 * 997805; s13 += s21 * 136657; s14 -= s21 * 683901; s21 = 0; s8 += s20 * 666643; s9 += s20 * 470296; s10 += s20 * 654183; s11 -= s20 * 997805; s12 += s20 * 136657; s13 -= s20 * 683901; s20 = 0; s7 += s19 * 666643; s8 += s19 * 470296; s9 += s19 * 654183; s10 -= s19 * 997805; s11 += s19 * 136657; s12 -= s19 * 683901; s19 = 0; s6 += s18 * 666643; s7 += s18 * 470296; s8 += s18 * 654183; s9 -= s18 * 997805; s10 += s18 * 136657; s11 -= s18 * 683901; s18 = 0; carry6 = (s6 + (1 << 20)) >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry8 = (s8 + (1 << 20)) >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry10 = (s10 + (1 << 20)) >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry12 = (s12 + (1 << 20)) >> 21; s13 += carry12; s12 -= int64_lshift21(carry12); carry14 = (s14 + (1 << 20)) >> 21; s15 += carry14; s14 -= int64_lshift21(carry14); carry16 = (s16 + (1 << 20)) >> 21; s17 += carry16; s16 -= int64_lshift21(carry16); carry7 = (s7 + (1 << 20)) >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry9 = (s9 + (1 << 20)) >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry11 = (s11 + (1 << 20)) >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); carry13 = (s13 + (1 << 20)) >> 21; s14 += carry13; s13 -= int64_lshift21(carry13); carry15 = (s15 + (1 << 20)) >> 21; s16 += carry15; s15 -= int64_lshift21(carry15); s5 += s17 * 666643; s6 += s17 * 470296; s7 += s17 * 654183; s8 -= s17 * 997805; s9 += s17 * 136657; s10 -= s17 * 683901; s17 = 0; s4 += s16 * 666643; s5 += s16 * 470296; s6 += s16 * 654183; s7 -= s16 * 997805; s8 += s16 * 136657; s9 -= s16 * 683901; s16 = 0; s3 += s15 * 666643; s4 += s15 * 470296; s5 += s15 * 654183; s6 -= s15 * 997805; s7 += s15 * 136657; s8 -= s15 * 683901; s15 = 0; s2 += s14 * 666643; s3 += s14 * 470296; s4 += s14 * 654183; s5 -= s14 * 997805; s6 += s14 * 136657; s7 -= s14 * 683901; s14 = 0; s1 += s13 * 666643; s2 += s13 * 470296; s3 += s13 * 654183; s4 -= s13 * 997805; s5 += s13 * 136657; s6 -= s13 * 683901; s13 = 0; s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = (s0 + (1 << 20)) >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry2 = (s2 + (1 << 20)) >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry4 = (s4 + (1 << 20)) >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry6 = (s6 + (1 << 20)) >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry8 = (s8 + (1 << 20)) >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry10 = (s10 + (1 << 20)) >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry1 = (s1 + (1 << 20)) >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry3 = (s3 + (1 << 20)) >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry5 = (s5 + (1 << 20)) >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry7 = (s7 + (1 << 20)) >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry9 = (s9 + (1 << 20)) >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry11 = (s11 + (1 << 20)) >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = s0 >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry1 = s1 >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry2 = s2 >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry3 = s3 >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry4 = s4 >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry5 = s5 >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry6 = s6 >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry7 = s7 >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry8 = s8 >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry9 = s9 >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry10 = s10 >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); carry11 = s11 >> 21; s12 += carry11; s11 -= int64_lshift21(carry11); s0 += s12 * 666643; s1 += s12 * 470296; s2 += s12 * 654183; s3 -= s12 * 997805; s4 += s12 * 136657; s5 -= s12 * 683901; s12 = 0; carry0 = s0 >> 21; s1 += carry0; s0 -= int64_lshift21(carry0); carry1 = s1 >> 21; s2 += carry1; s1 -= int64_lshift21(carry1); carry2 = s2 >> 21; s3 += carry2; s2 -= int64_lshift21(carry2); carry3 = s3 >> 21; s4 += carry3; s3 -= int64_lshift21(carry3); carry4 = s4 >> 21; s5 += carry4; s4 -= int64_lshift21(carry4); carry5 = s5 >> 21; s6 += carry5; s5 -= int64_lshift21(carry5); carry6 = s6 >> 21; s7 += carry6; s6 -= int64_lshift21(carry6); carry7 = s7 >> 21; s8 += carry7; s7 -= int64_lshift21(carry7); carry8 = s8 >> 21; s9 += carry8; s8 -= int64_lshift21(carry8); carry9 = s9 >> 21; s10 += carry9; s9 -= int64_lshift21(carry9); carry10 = s10 >> 21; s11 += carry10; s10 -= int64_lshift21(carry10); s[0] = s0 >> 0; s[1] = s0 >> 8; s[2] = (s0 >> 16) | (s1 << 5); s[3] = s1 >> 3; s[4] = s1 >> 11; s[5] = (s1 >> 19) | (s2 << 2); s[6] = s2 >> 6; s[7] = (s2 >> 14) | (s3 << 7); s[8] = s3 >> 1; s[9] = s3 >> 9; s[10] = (s3 >> 17) | (s4 << 4); s[11] = s4 >> 4; s[12] = s4 >> 12; s[13] = (s4 >> 20) | (s5 << 1); s[14] = s5 >> 7; s[15] = (s5 >> 15) | (s6 << 6); s[16] = s6 >> 2; s[17] = s6 >> 10; s[18] = (s6 >> 18) | (s7 << 3); s[19] = s7 >> 5; s[20] = s7 >> 13; s[21] = s8 >> 0; s[22] = s8 >> 8; s[23] = (s8 >> 16) | (s9 << 5); s[24] = s9 >> 3; s[25] = s9 >> 11; s[26] = (s9 >> 19) | (s10 << 2); s[27] = s10 >> 6; s[28] = (s10 >> 14) | (s11 << 7); s[29] = s11 >> 1; s[30] = s11 >> 9; s[31] = s11 >> 17; } void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]) { uint8_t seed[32]; RAND_bytes(seed, 32); ED25519_keypair_from_seed(out_public_key, out_private_key, seed); } int ED25519_sign(uint8_t out_sig[64], const uint8_t *message, size_t message_len, const uint8_t private_key[64]) { // NOTE: The documentation on this function says that it returns zero on // allocation failure. While that can't happen with the current // implementation, we want to reserve the ability to allocate in this // implementation in the future. uint8_t az[SHA512_DIGEST_LENGTH]; SHA512(private_key, 32, az); az[0] &= 248; az[31] &= 63; az[31] |= 64; SHA512_CTX hash_ctx; SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, az + 32, 32); SHA512_Update(&hash_ctx, message, message_len); uint8_t nonce[SHA512_DIGEST_LENGTH]; SHA512_Final(nonce, &hash_ctx); x25519_sc_reduce(nonce); ge_p3 R; x25519_ge_scalarmult_base(&R, nonce); ge_p3_tobytes(out_sig, &R); SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, out_sig, 32); SHA512_Update(&hash_ctx, private_key + 32, 32); SHA512_Update(&hash_ctx, message, message_len); uint8_t hram[SHA512_DIGEST_LENGTH]; SHA512_Final(hram, &hash_ctx); x25519_sc_reduce(hram); sc_muladd(out_sig + 32, hram, az, nonce); // The signature is computed from the private key, but is public. CONSTTIME_DECLASSIFY(out_sig, 64); return 1; } int ED25519_verify(const uint8_t *message, size_t message_len, const uint8_t signature[64], const uint8_t public_key[32]) { ge_p3 A; if ((signature[63] & 224) != 0 || !x25519_ge_frombytes_vartime(&A, public_key)) { return 0; } fe_loose t; fe_neg(&t, &A.X); fe_carry(&A.X, &t); fe_neg(&t, &A.T); fe_carry(&A.T, &t); uint8_t pkcopy[32]; OPENSSL_memcpy(pkcopy, public_key, 32); uint8_t rcopy[32]; OPENSSL_memcpy(rcopy, signature, 32); uint8_t scopy[32]; OPENSSL_memcpy(scopy, signature + 32, 32); // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in // the range [0, order) in order to prevent signature malleability. // kOrder is the order of Curve25519 in little-endian form. static const uint64_t kOrder[4] = { UINT64_C(0x5812631a5cf5d3ed), UINT64_C(0x14def9dea2f79cd6), 0, UINT64_C(0x1000000000000000), }; for (size_t i = 3;; i--) { uint64_t word = CRYPTO_load_u64_le(scopy + i * 8); if (word > kOrder[i]) { return 0; } else if (word < kOrder[i]) { break; } else if (i == 0) { return 0; } } SHA512_CTX hash_ctx; SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, signature, 32); SHA512_Update(&hash_ctx, public_key, 32); SHA512_Update(&hash_ctx, message, message_len); uint8_t h[SHA512_DIGEST_LENGTH]; SHA512_Final(h, &hash_ctx); x25519_sc_reduce(h); ge_p2 R; ge_double_scalarmult_vartime(&R, h, &A, scopy); uint8_t rcheck[32]; x25519_ge_tobytes(rcheck, &R); return CRYPTO_memcmp(rcheck, rcopy, sizeof(rcheck)) == 0; } void ED25519_keypair_from_seed(uint8_t out_public_key[32], uint8_t out_private_key[64], const uint8_t seed[32]) { uint8_t az[SHA512_DIGEST_LENGTH]; SHA512(seed, 32, az); az[0] &= 248; az[31] &= 127; az[31] |= 64; ge_p3 A; x25519_ge_scalarmult_base(&A, az); ge_p3_tobytes(out_public_key, &A); // The public key is derived from the private key, but it is public. CONSTTIME_DECLASSIFY(out_public_key, 32); OPENSSL_memcpy(out_private_key, seed, 32); OPENSSL_memcpy(out_private_key + 32, out_public_key, 32); } static void x25519_scalar_mult_generic(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]) { fe x1, x2, z2, x3, z3, tmp0, tmp1; fe_loose x2l, z2l, x3l, tmp0l, tmp1l; uint8_t e[32]; OPENSSL_memcpy(e, scalar, 32); e[0] &= 248; e[31] &= 127; e[31] |= 64; // The following implementation was transcribed to Coq and proven to // correspond to unary scalar multiplication in affine coordinates given that // x1 != 0 is the x coordinate of some point on the curve. It was also checked // in Coq that doing a ladderstep with x1 = x3 = 0 gives z2' = z3' = 0, and z2 // = z3 = 0 gives z2' = z3' = 0. The statement was quantified over the // underlying field, so it applies to Curve25519 itself and the quadratic // twist of Curve25519. It was not proven in Coq that prime-field arithmetic // correctly simulates extension-field arithmetic on prime-field values. // The decoding of the byte array representation of e was not considered. // Specification of Montgomery curves in affine coordinates: // // Proof that these form a group that is isomorphic to a Weierstrass curve: // // Coq transcription and correctness proof of the loop (where scalarbits=255): // // // preconditions: 0 <= e < 2^255 (not necessarily e < order), fe_invert(0) = 0 fe_frombytes(&x1, point); fe_1(&x2); fe_0(&z2); fe_copy(&x3, &x1); fe_1(&z3); unsigned swap = 0; int pos; for (pos = 254; pos >= 0; --pos) { // loop invariant as of right before the test, for the case where x1 != 0: // pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 is nonzero // let r := e >> (pos+1) in the following equalities of projective points: // to_xz (r*P) === if swap then (x3, z3) else (x2, z2) // to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) // x1 is the nonzero x coordinate of the nonzero point (r*P-(r+1)*P) unsigned b = 1 & (e[pos / 8] >> (pos & 7)); swap ^= b; fe_cswap(&x2, &x3, swap); fe_cswap(&z2, &z3, swap); swap = b; // Coq transcription of ladderstep formula (called from transcribed loop): // // // x1 != 0 // // x1 = 0 // fe_sub(&tmp0l, &x3, &z3); fe_sub(&tmp1l, &x2, &z2); fe_add(&x2l, &x2, &z2); fe_add(&z2l, &x3, &z3); fe_mul_tll(&z3, &tmp0l, &x2l); fe_mul_tll(&z2, &z2l, &tmp1l); fe_sq_tl(&tmp0, &tmp1l); fe_sq_tl(&tmp1, &x2l); fe_add(&x3l, &z3, &z2); fe_sub(&z2l, &z3, &z2); fe_mul_ttt(&x2, &tmp1, &tmp0); fe_sub(&tmp1l, &tmp1, &tmp0); fe_sq_tl(&z2, &z2l); fe_mul121666(&z3, &tmp1l); fe_sq_tl(&x3, &x3l); fe_add(&tmp0l, &tmp0, &z3); fe_mul_ttt(&z3, &x1, &z2); fe_mul_tll(&z2, &tmp1l, &tmp0l); } // here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) else (x2, z2) fe_cswap(&x2, &x3, swap); fe_cswap(&z2, &z3, swap); fe_invert(&z2, &z2); fe_mul_ttt(&x2, &x2, &z2); fe_tobytes(out, &x2); } static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]) { #if defined(BORINGSSL_X25519_NEON) if (CRYPTO_is_NEON_capable()) { x25519_NEON(out, scalar, point); return; } #elif defined(BORINGSSL_FE25519_ADX) if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { x25519_scalar_mult_adx(out, scalar, point); return; } #endif x25519_scalar_mult_generic(out, scalar, point); } void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) { RAND_bytes(out_private_key, 32); // All X25519 implementations should decode scalars correctly (see // https://tools.ietf.org/html/rfc7748#section-5). However, if an // implementation doesn't then it might interoperate with random keys a // fraction of the time because they'll, randomly, happen to be correctly // formed. // // Thus we do the opposite of the masking here to make sure that our private // keys are never correctly masked and so, hopefully, any incorrect // implementations are deterministically broken. // // This does not affect security because, although we're throwing away // entropy, a valid implementation of scalarmult should throw away the exact // same bits anyway. out_private_key[0] |= ~248; out_private_key[31] &= ~64; out_private_key[31] |= ~127; X25519_public_from_private(out_public_value, out_private_key); } int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], const uint8_t peer_public_value[32]) { static const uint8_t kZeros[32] = {0}; x25519_scalar_mult(out_shared_key, private_key, peer_public_value); // The all-zero output results when the input is a point of small order. return constant_time_declassify_int( CRYPTO_memcmp(kZeros, out_shared_key, 32)) != 0; } void X25519_public_from_private(uint8_t out_public_value[32], const uint8_t private_key[32]) { #if defined(BORINGSSL_X25519_NEON) if (CRYPTO_is_NEON_capable()) { static const uint8_t kMongomeryBasePoint[32] = {9}; x25519_NEON(out_public_value, private_key, kMongomeryBasePoint); return; } #endif uint8_t e[32]; OPENSSL_memcpy(e, private_key, 32); e[0] &= 248; e[31] &= 127; e[31] |= 64; ge_p3 A; x25519_ge_scalarmult_base(&A, e); // We only need the u-coordinate of the curve25519 point. The map is // u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). fe_loose zplusy, zminusy; fe zminusy_inv; fe_add(&zplusy, &A.Z, &A.Y); fe_sub(&zminusy, &A.Z, &A.Y); fe_loose_invert(&zminusy_inv, &zminusy); fe_mul_tlt(&zminusy_inv, &zplusy, &zminusy_inv); fe_tobytes(out_public_value, &zminusy_inv); CONSTTIME_DECLASSIFY(out_public_value, 32); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/curve25519_64_adx.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if defined(BORINGSSL_FE25519_ADX) #include "../../third_party/fiat/curve25519_64_adx.h" #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/curve25519_tables.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This file is generated from // ./make_curve25519_tables.py > curve25519_tables.h static const fe d = {{ #if defined(OPENSSL_64_BIT) 929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575 #else 56195235, 13857412, 51736253, 6949390, 114729, 24766616, 60832955, 30306712, 48412415, 21499315 #endif }}; static const fe sqrtm1 = {{ #if defined(OPENSSL_64_BIT) 1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133 #else 34513072, 25610706, 9377949, 3500415, 12389472, 33281959, 41962654, 31548777, 326685, 11406482 #endif }}; static const fe d2 = {{ #if defined(OPENSSL_64_BIT) 1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903 #else 45281625, 27714825, 36363642, 13898781, 229458, 15978800, 54557047, 27058993, 29715967, 9444199 #endif }}; #if defined(OPENSSL_SMALL) // This block of code replaces the standard base-point table with a much smaller // one. The standard table is 30,720 bytes while this one is just 960. // // This table contains 15 pairs of group elements, (x, y), where each field // element is serialised with |fe_tobytes|. If |i| is the index of the group // element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀ // is the most significant bit). The value of the group element is then: // (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = { 0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95, 0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0, 0xfe, 0x53, 0x6e, 0xcd, 0xd3, 0x36, 0x69, 0x21, 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x02, 0xa2, 0xed, 0xf4, 0x8f, 0x6b, 0x0b, 0x3e, 0xeb, 0x35, 0x1a, 0xd5, 0x7e, 0xdb, 0x78, 0x00, 0x96, 0x8a, 0xa0, 0xb4, 0xcf, 0x60, 0x4b, 0xd4, 0xd5, 0xf9, 0x2d, 0xbf, 0x88, 0xbd, 0x22, 0x62, 0x13, 0x53, 0xe4, 0x82, 0x57, 0xfa, 0x1e, 0x8f, 0x06, 0x2b, 0x90, 0xba, 0x08, 0xb6, 0x10, 0x54, 0x4f, 0x7c, 0x1b, 0x26, 0xed, 0xda, 0x6b, 0xdd, 0x25, 0xd0, 0x4e, 0xea, 0x42, 0xbb, 0x25, 0x03, 0xa2, 0xfb, 0xcc, 0x61, 0x67, 0x06, 0x70, 0x1a, 0xc4, 0x78, 0x3a, 0xff, 0x32, 0x62, 0xdd, 0x2c, 0xab, 0x50, 0x19, 0x3b, 0xf2, 0x9b, 0x7d, 0xb8, 0xfd, 0x4f, 0x29, 0x9c, 0xa7, 0x91, 0xba, 0x0e, 0x46, 0x5e, 0x51, 0xfe, 0x1d, 0xbf, 0xe5, 0xe5, 0x9b, 0x95, 0x0d, 0x67, 0xf8, 0xd1, 0xb5, 0x5a, 0xa1, 0x93, 0x2c, 0xc3, 0xde, 0x0e, 0x97, 0x85, 0x2d, 0x7f, 0xea, 0xab, 0x3e, 0x47, 0x30, 0x18, 0x24, 0xe8, 0xb7, 0x60, 0xae, 0x47, 0x80, 0xfc, 0xe5, 0x23, 0xe7, 0xc2, 0xc9, 0x85, 0xe6, 0x98, 0xa0, 0x29, 0x4e, 0xe1, 0x84, 0x39, 0x2d, 0x95, 0x2c, 0xf3, 0x45, 0x3c, 0xff, 0xaf, 0x27, 0x4c, 0x6b, 0xa6, 0xf5, 0x4b, 0x11, 0xbd, 0xba, 0x5b, 0x9e, 0xc4, 0xa4, 0x51, 0x1e, 0xbe, 0xd0, 0x90, 0x3a, 0x9c, 0xc2, 0x26, 0xb6, 0x1e, 0xf1, 0x95, 0x7d, 0xc8, 0x6d, 0x52, 0xe6, 0x99, 0x2c, 0x5f, 0x9a, 0x96, 0x0c, 0x68, 0x29, 0xfd, 0xe2, 0xfb, 0xe6, 0xbc, 0xec, 0x31, 0x08, 0xec, 0xe6, 0xb0, 0x53, 0x60, 0xc3, 0x8c, 0xbe, 0xc1, 0xb3, 0x8a, 0x8f, 0xe4, 0x88, 0x2b, 0x55, 0xe5, 0x64, 0x6e, 0x9b, 0xd0, 0xaf, 0x7b, 0x64, 0x2a, 0x35, 0x25, 0x10, 0x52, 0xc5, 0x9e, 0x58, 0x11, 0x39, 0x36, 0x45, 0x51, 0xb8, 0x39, 0x93, 0xfc, 0x9d, 0x6a, 0xbe, 0x58, 0xcb, 0xa4, 0x0f, 0x51, 0x3c, 0x38, 0x05, 0xca, 0xab, 0x43, 0x63, 0x0e, 0xf3, 0x8b, 0x41, 0xa6, 0xf8, 0x9b, 0x53, 0x70, 0x80, 0x53, 0x86, 0x5e, 0x8f, 0xe3, 0xc3, 0x0d, 0x18, 0xc8, 0x4b, 0x34, 0x1f, 0xd8, 0x1d, 0xbc, 0xf2, 0x6d, 0x34, 0x3a, 0xbe, 0xdf, 0xd9, 0xf6, 0xf3, 0x89, 0xa1, 0xe1, 0x94, 0x9f, 0x5d, 0x4c, 0x5d, 0xe9, 0xa1, 0x49, 0x92, 0xef, 0x0e, 0x53, 0x81, 0x89, 0x58, 0x87, 0xa6, 0x37, 0xf1, 0xdd, 0x62, 0x60, 0x63, 0x5a, 0x9d, 0x1b, 0x8c, 0xc6, 0x7d, 0x52, 0xea, 0x70, 0x09, 0x6a, 0xe1, 0x32, 0xf3, 0x73, 0x21, 0x1f, 0x07, 0x7b, 0x7c, 0x9b, 0x49, 0xd8, 0xc0, 0xf3, 0x25, 0x72, 0x6f, 0x9d, 0xed, 0x31, 0x67, 0x36, 0x36, 0x54, 0x40, 0x92, 0x71, 0xe6, 0x11, 0x28, 0x11, 0xad, 0x93, 0x32, 0x85, 0x7b, 0x3e, 0xb7, 0x3b, 0x49, 0x13, 0x1c, 0x07, 0xb0, 0x2e, 0x93, 0xaa, 0xfd, 0xfd, 0x28, 0x47, 0x3d, 0x8d, 0xd2, 0xda, 0xc7, 0x44, 0xd6, 0x7a, 0xdb, 0x26, 0x7d, 0x1d, 0xb8, 0xe1, 0xde, 0x9d, 0x7a, 0x7d, 0x17, 0x7e, 0x1c, 0x37, 0x04, 0x8d, 0x2d, 0x7c, 0x5e, 0x18, 0x38, 0x1e, 0xaf, 0xc7, 0x1b, 0x33, 0x48, 0x31, 0x00, 0x59, 0xf6, 0xf2, 0xca, 0x0f, 0x27, 0x1b, 0x63, 0x12, 0x7e, 0x02, 0x1d, 0x49, 0xc0, 0x5d, 0x79, 0x87, 0xef, 0x5e, 0x7a, 0x2f, 0x1f, 0x66, 0x55, 0xd8, 0x09, 0xd9, 0x61, 0x38, 0x68, 0xb0, 0x07, 0xa3, 0xfc, 0xcc, 0x85, 0x10, 0x7f, 0x4c, 0x65, 0x65, 0xb3, 0xfa, 0xfa, 0xa5, 0x53, 0x6f, 0xdb, 0x74, 0x4c, 0x56, 0x46, 0x03, 0xe2, 0xd5, 0x7a, 0x29, 0x1c, 0xc6, 0x02, 0xbc, 0x59, 0xf2, 0x04, 0x75, 0x63, 0xc0, 0x84, 0x2f, 0x60, 0x1c, 0x67, 0x76, 0xfd, 0x63, 0x86, 0xf3, 0xfa, 0xbf, 0xdc, 0xd2, 0x2d, 0x90, 0x91, 0xbd, 0x33, 0xa9, 0xe5, 0x66, 0x0c, 0xda, 0x42, 0x27, 0xca, 0xf4, 0x66, 0xc2, 0xec, 0x92, 0x14, 0x57, 0x06, 0x63, 0xd0, 0x4d, 0x15, 0x06, 0xeb, 0x69, 0x58, 0x4f, 0x77, 0xc5, 0x8b, 0xc7, 0xf0, 0x8e, 0xed, 0x64, 0xa0, 0xb3, 0x3c, 0x66, 0x71, 0xc6, 0x2d, 0xda, 0x0a, 0x0d, 0xfe, 0x70, 0x27, 0x64, 0xf8, 0x27, 0xfa, 0xf6, 0x5f, 0x30, 0xa5, 0x0d, 0x6c, 0xda, 0xf2, 0x62, 0x5e, 0x78, 0x47, 0xd3, 0x66, 0x00, 0x1c, 0xfd, 0x56, 0x1f, 0x5d, 0x3f, 0x6f, 0xf4, 0x4c, 0xd8, 0xfd, 0x0e, 0x27, 0xc9, 0x5c, 0x2b, 0xbc, 0xc0, 0xa4, 0xe7, 0x23, 0x29, 0x02, 0x9f, 0x31, 0xd6, 0xe9, 0xd7, 0x96, 0xf4, 0xe0, 0x5e, 0x0b, 0x0e, 0x13, 0xee, 0x3c, 0x09, 0xed, 0xf2, 0x3d, 0x76, 0x91, 0xc3, 0xa4, 0x97, 0xae, 0xd4, 0x87, 0xd0, 0x5d, 0xf6, 0x18, 0x47, 0x1f, 0x1d, 0x67, 0xf2, 0xcf, 0x63, 0xa0, 0x91, 0x27, 0xf8, 0x93, 0x45, 0x75, 0x23, 0x3f, 0xd1, 0xf1, 0xad, 0x23, 0xdd, 0x64, 0x93, 0x96, 0x41, 0x70, 0x7f, 0xf7, 0xf5, 0xa9, 0x89, 0xa2, 0x34, 0xb0, 0x8d, 0x1b, 0xae, 0x19, 0x15, 0x49, 0x58, 0x23, 0x6d, 0x87, 0x15, 0x4f, 0x81, 0x76, 0xfb, 0x23, 0xb5, 0xea, 0xcf, 0xac, 0x54, 0x8d, 0x4e, 0x42, 0x2f, 0xeb, 0x0f, 0x63, 0xdb, 0x68, 0x37, 0xa8, 0xcf, 0x8b, 0xab, 0xf5, 0xa4, 0x6e, 0x96, 0x2a, 0xb2, 0xd6, 0xbe, 0x9e, 0xbd, 0x0d, 0xb4, 0x42, 0xa9, 0xcf, 0x01, 0x83, 0x8a, 0x17, 0x47, 0x76, 0xc4, 0xc6, 0x83, 0x04, 0x95, 0x0b, 0xfc, 0x11, 0xc9, 0x62, 0xb8, 0x0c, 0x76, 0x84, 0xd9, 0xb9, 0x37, 0xfa, 0xfc, 0x7c, 0xc2, 0x6d, 0x58, 0x3e, 0xb3, 0x04, 0xbb, 0x8c, 0x8f, 0x48, 0xbc, 0x91, 0x27, 0xcc, 0xf9, 0xb7, 0x22, 0x19, 0x83, 0x2e, 0x09, 0xb5, 0x72, 0xd9, 0x54, 0x1c, 0x4d, 0xa1, 0xea, 0x0b, 0xf1, 0xc6, 0x08, 0x72, 0x46, 0x87, 0x7a, 0x6e, 0x80, 0x56, 0x0a, 0x8a, 0xc0, 0xdd, 0x11, 0x6b, 0xd6, 0xdd, 0x47, 0xdf, 0x10, 0xd9, 0xd8, 0xea, 0x7c, 0xb0, 0x8f, 0x03, 0x00, 0x2e, 0xc1, 0x8f, 0x44, 0xa8, 0xd3, 0x30, 0x06, 0x89, 0xa2, 0xf9, 0x34, 0xad, 0xdc, 0x03, 0x85, 0xed, 0x51, 0xa7, 0x82, 0x9c, 0xe7, 0x5d, 0x52, 0x93, 0x0c, 0x32, 0x9a, 0x5b, 0xe1, 0xaa, 0xca, 0xb8, 0x02, 0x6d, 0x3a, 0xd4, 0xb1, 0x3a, 0xf0, 0x5f, 0xbe, 0xb5, 0x0d, 0x10, 0x6b, 0x38, 0x32, 0xac, 0x76, 0x80, 0xbd, 0xca, 0x94, 0x71, 0x7a, 0xf2, 0xc9, 0x35, 0x2a, 0xde, 0x9f, 0x42, 0x49, 0x18, 0x01, 0xab, 0xbc, 0xef, 0x7c, 0x64, 0x3f, 0x58, 0x3d, 0x92, 0x59, 0xdb, 0x13, 0xdb, 0x58, 0x6e, 0x0a, 0xe0, 0xb7, 0x91, 0x4a, 0x08, 0x20, 0xd6, 0x2e, 0x3c, 0x45, 0xc9, 0x8b, 0x17, 0x79, 0xe7, 0xc7, 0x90, 0x99, 0x3a, 0x18, 0x25, }; #else // k25519Precomp[i][j] = (j+1)*256^i*B const uint8_t k25519Precomp[32][8][3][32] = { { { {0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0xe, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0xb, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x7}, {0x3e, 0x91, 0x40, 0xd7, 0x5, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x5, 0x9f, 0x39, 0xfd, 0x9, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, {0x68, 0xaa, 0x7a, 0x87, 0x5, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0xc, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, }, { {0xd7, 0x71, 0x3c, 0x93, 0xfc, 0xe7, 0x24, 0x92, 0xb5, 0xf5, 0xf, 0x7a, 0x96, 0x9d, 0x46, 0x9f, 0x2, 0x7, 0xd6, 0xe1, 0x65, 0x9a, 0xa6, 0x5a, 0x2e, 0x2e, 0x7d, 0xa8, 0x3f, 0x6, 0xc, 0x59}, {0xa8, 0xd5, 0xb4, 0x42, 0x60, 0xa5, 0x99, 0x8a, 0xf6, 0xac, 0x60, 0x4e, 0xc, 0x81, 0x2b, 0x8f, 0xaa, 0x37, 0x6e, 0xb1, 0x6b, 0x23, 0x9e, 0xe0, 0x55, 0x25, 0xc9, 0x69, 0xa6, 0x95, 0xb5, 0x6b}, {0x5f, 0x7a, 0x9b, 0xa5, 0xb3, 0xa8, 0xfa, 0x43, 0x78, 0xcf, 0x9a, 0x5d, 0xdd, 0x6b, 0xc1, 0x36, 0x31, 0x6a, 0x3d, 0xb, 0x84, 0xa0, 0xf, 0x50, 0x73, 0xb, 0xa5, 0x3e, 0xb1, 0xf5, 0x1a, 0x70}, }, { {0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x2, 0x32, 0x67, 0x1, 0x9f, 0x2, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, {0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, {0x89, 0xd8, 0xd0, 0xd, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, }, { {0x9f, 0x9, 0xfc, 0x8e, 0xb9, 0x51, 0x73, 0x28, 0x38, 0x25, 0xfd, 0x7d, 0xf4, 0xc6, 0x65, 0x67, 0x65, 0x92, 0xa, 0xfb, 0x3d, 0x8d, 0x34, 0xca, 0x27, 0x87, 0xe5, 0x21, 0x3, 0x91, 0xe, 0x68}, {0xbf, 0x18, 0x68, 0x5, 0xa, 0x5, 0xfe, 0x95, 0xa9, 0xfa, 0x60, 0x56, 0x71, 0x89, 0x7e, 0x32, 0x73, 0x50, 0xa0, 0x6, 0xcd, 0xe3, 0xe8, 0xc3, 0x9a, 0xa4, 0x45, 0x74, 0x4c, 0x3f, 0x93, 0x27}, {0x9, 0xff, 0x76, 0xc4, 0xe9, 0xfb, 0x13, 0x5a, 0x72, 0xc1, 0x5c, 0x7b, 0x45, 0x39, 0x9e, 0x6e, 0x94, 0x44, 0x2b, 0x10, 0xf9, 0xdc, 0xdb, 0x5d, 0x2b, 0x3e, 0x55, 0x63, 0xbf, 0xc, 0x9d, 0x7f}, }, { {0x33, 0xbb, 0xa5, 0x8, 0x44, 0xbc, 0x12, 0xa2, 0x2, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0xc, 0xeb, 0x1b, 0xdd, 0xeb, 0x6, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, {0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x0, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, {0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x7, 0x8, 0xe, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0xb, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, }, { {0x31, 0x71, 0x15, 0x77, 0xeb, 0xee, 0xc, 0x3a, 0x88, 0xaf, 0xc8, 0x0, 0x89, 0x15, 0x27, 0x9b, 0x36, 0xa7, 0x59, 0xda, 0x68, 0xb6, 0x65, 0x80, 0xbd, 0x38, 0xcc, 0xa2, 0xb6, 0x7b, 0xe5, 0x51}, {0xa4, 0x8c, 0x7d, 0x7b, 0xb6, 0x6, 0x98, 0x49, 0x39, 0x27, 0xd2, 0x27, 0x84, 0xe2, 0x5b, 0x57, 0xb9, 0x53, 0x45, 0x20, 0xe7, 0x5c, 0x8, 0xbb, 0x84, 0x78, 0x41, 0xae, 0x41, 0x4c, 0xb6, 0x38}, {0x71, 0x4b, 0xea, 0x2, 0x67, 0x32, 0xac, 0x85, 0x1, 0xbb, 0xa1, 0x41, 0x3, 0xe0, 0x70, 0xbe, 0x44, 0xc1, 0x3b, 0x8, 0x4b, 0xa2, 0xe4, 0x53, 0xe3, 0x61, 0xd, 0x9f, 0x1a, 0xe9, 0xb8, 0x10}, }, { {0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, {0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, {0xa2, 0xb3, 0xb8, 0x1, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x5, 0x47, 0x5f, 0x3, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0xf, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, }, { {0x8f, 0x3e, 0xdd, 0x4, 0x66, 0x59, 0xb7, 0x59, 0x2c, 0x70, 0x88, 0xe2, 0x77, 0x3, 0xb3, 0x6c, 0x23, 0xc3, 0xd9, 0x5e, 0x66, 0x9c, 0x33, 0xb1, 0x2f, 0xe5, 0xbc, 0x61, 0x60, 0xe7, 0x15, 0x9}, {0xd9, 0x34, 0x92, 0xf3, 0xed, 0x5d, 0xa7, 0xe2, 0xf9, 0x58, 0xb5, 0xe1, 0x80, 0x76, 0x3d, 0x96, 0xfb, 0x23, 0x3c, 0x6e, 0xac, 0x41, 0x27, 0x2c, 0xc3, 0x1, 0xe, 0x32, 0xa1, 0x24, 0x90, 0x3a}, {0x1a, 0x91, 0xa2, 0xc9, 0xd9, 0xf5, 0xc1, 0xe7, 0xd7, 0xa7, 0xcc, 0x8b, 0x78, 0x71, 0xa3, 0xb8, 0x32, 0x2a, 0xb6, 0xe, 0x19, 0x12, 0x64, 0x63, 0x95, 0x4e, 0xcc, 0x2e, 0x5c, 0x7c, 0x90, 0x26}, }, }, { { {0x1d, 0x9c, 0x2f, 0x63, 0xe, 0xdd, 0xcc, 0x2e, 0x15, 0x31, 0x89, 0x76, 0x96, 0xb6, 0xd0, 0x51, 0x58, 0x7a, 0x63, 0xa8, 0x6b, 0xb7, 0xdf, 0x52, 0x39, 0xef, 0xe, 0xa0, 0x49, 0x7d, 0xd3, 0x6d}, {0x5e, 0x51, 0xaa, 0x49, 0x54, 0x63, 0x5b, 0xed, 0x3a, 0x82, 0xc6, 0xb, 0x9f, 0xc4, 0x65, 0xa8, 0xc4, 0xd1, 0x42, 0x5b, 0xe9, 0x1f, 0xc, 0x85, 0xb9, 0x15, 0xd3, 0x3, 0x6f, 0x6d, 0xd7, 0x30}, {0xc7, 0xe4, 0x6, 0x21, 0x17, 0x44, 0x44, 0x6c, 0x69, 0x7f, 0x8d, 0x92, 0x80, 0xd6, 0x53, 0xfb, 0x26, 0x3f, 0x4d, 0x69, 0xa4, 0x9e, 0x73, 0xb4, 0xb0, 0x4b, 0x86, 0x2e, 0x11, 0x97, 0xc6, 0x10}, }, { {0x5, 0xc8, 0x58, 0x83, 0xa0, 0x2a, 0xa6, 0xc, 0x47, 0x42, 0x20, 0x7a, 0xe3, 0x4a, 0x3d, 0x6a, 0xdc, 0xed, 0x11, 0x3b, 0xa6, 0xd3, 0x64, 0x74, 0xef, 0x6, 0x8, 0x55, 0xaf, 0x9b, 0xbf, 0x3}, {0xde, 0x5f, 0xbe, 0x7d, 0x27, 0xc4, 0x93, 0x64, 0xa2, 0x7e, 0xad, 0x19, 0xad, 0x4f, 0x5d, 0x26, 0x90, 0x45, 0x30, 0x46, 0xc8, 0xdf, 0x0, 0xe, 0x9, 0xfe, 0x66, 0xed, 0xab, 0x1c, 0xe6, 0x25}, {0x4, 0x66, 0x58, 0xcc, 0x28, 0xe1, 0x13, 0x3f, 0x7e, 0x74, 0x59, 0xb4, 0xec, 0x73, 0x58, 0x6f, 0xf5, 0x68, 0x12, 0xcc, 0xed, 0x3d, 0xb6, 0xa0, 0x2c, 0xe2, 0x86, 0x45, 0x63, 0x78, 0x6d, 0x56}, }, { {0xd0, 0x2f, 0x5a, 0xc6, 0x85, 0x42, 0x5, 0xa1, 0xc3, 0x67, 0x16, 0xf3, 0x2a, 0x11, 0x64, 0x6c, 0x58, 0xee, 0x1a, 0x73, 0x40, 0xe2, 0xa, 0x68, 0x2a, 0xb2, 0x93, 0x47, 0xf3, 0xa5, 0xfb, 0x14}, {0x34, 0x8, 0xc1, 0x9c, 0x9f, 0xa4, 0x37, 0x16, 0x51, 0xc4, 0x9b, 0xa8, 0xd5, 0x56, 0x8e, 0xbc, 0xdb, 0xd2, 0x7f, 0x7f, 0xf, 0xec, 0xb5, 0x1c, 0xd9, 0x35, 0xcc, 0x5e, 0xca, 0x5b, 0x97, 0x33}, {0xd4, 0xf7, 0x85, 0x69, 0x16, 0x46, 0xd7, 0x3c, 0x57, 0x0, 0xc8, 0xc9, 0x84, 0x5e, 0x3e, 0x59, 0x1e, 0x13, 0x61, 0x7b, 0xb6, 0xf2, 0xc3, 0x2f, 0x6c, 0x52, 0xfc, 0x83, 0xea, 0x9c, 0x82, 0x14}, }, { {0xb8, 0xec, 0x71, 0x4e, 0x2f, 0xb, 0xe7, 0x21, 0xe3, 0x77, 0xa4, 0x40, 0xb9, 0xdd, 0x56, 0xe6, 0x80, 0x4f, 0x1d, 0xce, 0xce, 0x56, 0x65, 0xbf, 0x7e, 0x7b, 0x5d, 0x53, 0xc4, 0x3b, 0xfc, 0x5}, {0xc2, 0x95, 0xdd, 0x97, 0x84, 0x7b, 0x43, 0xff, 0xa7, 0xb5, 0x4e, 0xaa, 0x30, 0x4e, 0x74, 0x6c, 0x8b, 0xe8, 0x85, 0x3c, 0x61, 0x5d, 0xc, 0x9e, 0x73, 0x81, 0x75, 0x5f, 0x1e, 0xc7, 0xd9, 0x2f}, {0xdd, 0xde, 0xaf, 0x52, 0xae, 0xb3, 0xb8, 0x24, 0xcf, 0x30, 0x3b, 0xed, 0x8c, 0x63, 0x95, 0x34, 0x95, 0x81, 0xbe, 0xa9, 0x83, 0xbc, 0xa4, 0x33, 0x4, 0x1f, 0x65, 0x5c, 0x47, 0x67, 0x37, 0x37}, }, { {0x90, 0x65, 0x24, 0x14, 0xcb, 0x95, 0x40, 0x63, 0x35, 0x55, 0xc1, 0x16, 0x40, 0x14, 0x12, 0xef, 0x60, 0xbc, 0x10, 0x89, 0xc, 0x14, 0x38, 0x9e, 0x8c, 0x7c, 0x90, 0x30, 0x57, 0x90, 0xf5, 0x6b}, {0xd9, 0xad, 0xd1, 0x40, 0xfd, 0x99, 0xba, 0x2f, 0x27, 0xd0, 0xf4, 0x96, 0x6f, 0x16, 0x7, 0xb3, 0xae, 0x3b, 0xf0, 0x15, 0x52, 0xf0, 0x63, 0x43, 0x99, 0xf9, 0x18, 0x3b, 0x6c, 0xa5, 0xbe, 0x1f}, {0x8a, 0x5b, 0x41, 0xe1, 0xf1, 0x78, 0xa7, 0xf, 0x7e, 0xa7, 0xc3, 0xba, 0xf7, 0x9f, 0x40, 0x6, 0x50, 0x9a, 0xa2, 0x9a, 0xb8, 0xd7, 0x52, 0x6f, 0x56, 0x5a, 0x63, 0x7a, 0xf6, 0x1c, 0x52, 0x2}, }, { {0xe4, 0x5e, 0x2f, 0x77, 0x20, 0x67, 0x14, 0xb1, 0xce, 0x9a, 0x7, 0x96, 0xb1, 0x94, 0xf8, 0xe8, 0x4a, 0x82, 0xac, 0x0, 0x4d, 0x22, 0xf8, 0x4a, 0xc4, 0x6c, 0xcd, 0xf7, 0xd9, 0x53, 0x17, 0x0}, {0x94, 0x52, 0x9d, 0xa, 0xb, 0xee, 0x3f, 0x51, 0x66, 0x5a, 0xdf, 0xf, 0x5c, 0xe7, 0x98, 0x8f, 0xce, 0x7, 0xe1, 0xbf, 0x88, 0x86, 0x61, 0xd4, 0xed, 0x2c, 0x38, 0x71, 0x7e, 0xa, 0xa0, 0x3f}, {0x34, 0xdb, 0x3d, 0x96, 0x2d, 0x23, 0x69, 0x3c, 0x58, 0x38, 0x97, 0xb4, 0xda, 0x87, 0xde, 0x1d, 0x85, 0xf2, 0x91, 0xa0, 0xf9, 0xd1, 0xd7, 0xaa, 0xb6, 0xed, 0x48, 0xa0, 0x2f, 0xfe, 0xb5, 0x12}, }, { {0x92, 0x1e, 0x6f, 0xad, 0x26, 0x7c, 0x2b, 0xdf, 0x13, 0x89, 0x4b, 0x50, 0x23, 0xd3, 0x66, 0x4b, 0xc3, 0x8b, 0x1c, 0x75, 0xc0, 0x9d, 0x40, 0x8c, 0xb8, 0xc7, 0x96, 0x7, 0xc2, 0x93, 0x7e, 0x6f}, {0x4d, 0xe3, 0xfc, 0x96, 0xc4, 0xfb, 0xf0, 0x71, 0xed, 0x5b, 0xf3, 0xad, 0x6b, 0x82, 0xb9, 0x73, 0x61, 0xc5, 0x28, 0xff, 0x61, 0x72, 0x4, 0xd2, 0x6f, 0x20, 0xb1, 0x6f, 0xf9, 0x76, 0x9b, 0x74}, {0x5, 0xae, 0xa6, 0xae, 0x4, 0xf6, 0x5a, 0x1f, 0x99, 0x9c, 0xe4, 0xbe, 0xf1, 0x51, 0x23, 0xc1, 0x66, 0x6b, 0xff, 0xee, 0xb5, 0x8, 0xa8, 0x61, 0x51, 0x21, 0xe0, 0x1, 0xf, 0xc1, 0xce, 0xf}, }, { {0x45, 0x4e, 0x24, 0xc4, 0x9d, 0xd2, 0xf2, 0x3d, 0xa, 0xde, 0xd8, 0x93, 0x74, 0xe, 0x2, 0x2b, 0x4d, 0x21, 0xc, 0x82, 0x7e, 0x6, 0xc8, 0x6c, 0xa, 0xb9, 0xea, 0x6f, 0x16, 0x79, 0x37, 0x41}, {0x44, 0x1e, 0xfe, 0x49, 0xa6, 0x58, 0x4d, 0x64, 0x7e, 0x77, 0xad, 0x31, 0xa2, 0xae, 0xfc, 0x21, 0xd2, 0xd0, 0x7f, 0x88, 0x5a, 0x1c, 0x44, 0x2, 0xf3, 0x11, 0xc5, 0x83, 0x71, 0xaa, 0x1, 0x49}, {0xf0, 0xf8, 0x1a, 0x8c, 0x54, 0xb7, 0xb1, 0x8, 0xb4, 0x99, 0x62, 0x24, 0x7c, 0x7a, 0xf, 0xce, 0x39, 0xd9, 0x6, 0x1e, 0xf9, 0xb0, 0x60, 0xf7, 0x13, 0x12, 0x6d, 0x72, 0x7b, 0x88, 0xbb, 0x41}, }, }, { { {0xae, 0x91, 0x66, 0x7c, 0x59, 0x4c, 0x23, 0x7e, 0xc8, 0xb4, 0x85, 0xa, 0x3d, 0x9d, 0x88, 0x64, 0xe7, 0xfa, 0x4a, 0x35, 0xc, 0xc9, 0xe2, 0xda, 0x1d, 0x9e, 0x6a, 0xc, 0x7, 0x1e, 0x87, 0xa}, {0xbe, 0x46, 0x43, 0x74, 0x44, 0x7d, 0xe8, 0x40, 0x25, 0x2b, 0xb5, 0x15, 0xd4, 0xda, 0x48, 0x1d, 0x3e, 0x60, 0x3b, 0xa1, 0x18, 0x8a, 0x3a, 0x7c, 0xf7, 0xbd, 0xcd, 0x2f, 0xc1, 0x28, 0xb7, 0x4e}, {0x89, 0x89, 0xbc, 0x4b, 0x99, 0xb5, 0x1, 0x33, 0x60, 0x42, 0xdd, 0x5b, 0x3a, 0xae, 0x6b, 0x73, 0x3c, 0x9e, 0xd5, 0x19, 0xe2, 0xad, 0x61, 0xd, 0x64, 0xd4, 0x85, 0x26, 0xf, 0x30, 0xe7, 0x3e}, }, { {0x18, 0x75, 0x1e, 0x84, 0x47, 0x79, 0xfa, 0x43, 0xd7, 0x46, 0x9c, 0x63, 0x59, 0xfa, 0xc6, 0xe5, 0x74, 0x2b, 0x5, 0xe3, 0x1d, 0x5e, 0x6, 0xa1, 0x30, 0x90, 0xb8, 0xcf, 0xa2, 0xc6, 0x47, 0x7d}, {0xb7, 0xd6, 0x7d, 0x9e, 0xe4, 0x55, 0xd2, 0xf5, 0xac, 0x1e, 0xb, 0x61, 0x5c, 0x11, 0x16, 0x80, 0xca, 0x87, 0xe1, 0x92, 0x5d, 0x97, 0x99, 0x3c, 0xc2, 0x25, 0x91, 0x97, 0x62, 0x57, 0x81, 0x13}, {0xe0, 0xd6, 0xf0, 0x8e, 0x14, 0xd0, 0xda, 0x3f, 0x3c, 0x6f, 0x54, 0x91, 0x9a, 0x74, 0x3e, 0x9d, 0x57, 0x81, 0xbb, 0x26, 0x10, 0x62, 0xec, 0x71, 0x80, 0xec, 0xc9, 0x34, 0x8d, 0xf5, 0x8c, 0x14}, }, { {0x6d, 0x75, 0xe4, 0x9a, 0x7d, 0x2f, 0x57, 0xe2, 0x7f, 0x48, 0xf3, 0x88, 0xbb, 0x45, 0xc3, 0x56, 0x8d, 0xa8, 0x60, 0x69, 0x6d, 0xb, 0xd1, 0x9f, 0xb9, 0xa1, 0xae, 0x4e, 0xad, 0xeb, 0x8f, 0x27}, {0x27, 0xf0, 0x34, 0x79, 0xf6, 0x92, 0xa4, 0x46, 0xa9, 0xa, 0x84, 0xf6, 0xbe, 0x84, 0x99, 0x46, 0x54, 0x18, 0x61, 0x89, 0x2a, 0xbc, 0xa1, 0x5c, 0xd4, 0xbb, 0x5d, 0xbd, 0x1e, 0xfa, 0xf2, 0x3f}, {0x66, 0x39, 0x93, 0x8c, 0x1f, 0x68, 0xaa, 0xb1, 0x98, 0xc, 0x29, 0x20, 0x9c, 0x94, 0x21, 0x8c, 0x52, 0x3c, 0x9d, 0x21, 0x91, 0x52, 0x11, 0x39, 0x7b, 0x67, 0x9c, 0xfe, 0x2, 0xdd, 0x4, 0x41}, }, { {0xb8, 0x6a, 0x9, 0xdb, 0x6, 0x4e, 0x21, 0x81, 0x35, 0x4f, 0xe4, 0xc, 0xc9, 0xb6, 0xa8, 0x21, 0xf5, 0x2a, 0x9e, 0x40, 0x2a, 0xc1, 0x24, 0x65, 0x81, 0xa4, 0xfc, 0x8e, 0xa4, 0xb5, 0x65, 0x1}, {0x2a, 0x42, 0x24, 0x11, 0x5e, 0xbf, 0xb2, 0x72, 0xb5, 0x3a, 0xa3, 0x98, 0x33, 0xc, 0xfa, 0xa1, 0x66, 0xb6, 0x52, 0xfa, 0x1, 0x61, 0xcb, 0x94, 0xd5, 0x53, 0xaf, 0xaf, 0x0, 0x3b, 0x86, 0x2c}, {0x76, 0x6a, 0x84, 0xa0, 0x74, 0xa4, 0x90, 0xf1, 0xc0, 0x7c, 0x2f, 0xcd, 0x84, 0xf9, 0xef, 0x12, 0x8f, 0x2b, 0xaa, 0x58, 0x6, 0x29, 0x5e, 0x69, 0xb8, 0xc8, 0xfe, 0xbf, 0xd9, 0x67, 0x1b, 0x59}, }, { {0x5d, 0xb5, 0x18, 0x9f, 0x71, 0xb3, 0xb9, 0x99, 0x1e, 0x64, 0x8c, 0xa1, 0xfa, 0xe5, 0x65, 0xe4, 0xed, 0x5, 0x9f, 0xc2, 0x36, 0x11, 0x8, 0x61, 0x8b, 0x12, 0x30, 0x70, 0x86, 0x4f, 0x9b, 0x48}, {0xfa, 0x9b, 0xb4, 0x80, 0x1c, 0xd, 0x2f, 0x31, 0x8a, 0xec, 0xf3, 0xab, 0x5e, 0x51, 0x79, 0x59, 0x88, 0x1c, 0xf0, 0x9e, 0xc0, 0x33, 0x70, 0x72, 0xcb, 0x7b, 0x8f, 0xca, 0xc7, 0x2e, 0xe0, 0x3d}, {0xef, 0x92, 0xeb, 0x3a, 0x2d, 0x10, 0x32, 0xd2, 0x61, 0xa8, 0x16, 0x61, 0xb4, 0x53, 0x62, 0xe1, 0x24, 0xaa, 0xb, 0x19, 0xe7, 0xab, 0x7e, 0x3d, 0xbf, 0xbe, 0x6c, 0x49, 0xba, 0xfb, 0xf5, 0x49}, }, { {0x2e, 0x57, 0x9c, 0x1e, 0x8c, 0x62, 0x5d, 0x15, 0x41, 0x47, 0x88, 0xc5, 0xac, 0x86, 0x4d, 0x8a, 0xeb, 0x63, 0x57, 0x51, 0xf6, 0x52, 0xa3, 0x91, 0x5b, 0x51, 0x67, 0x88, 0xc2, 0xa6, 0xa1, 0x6}, {0xd4, 0xcf, 0x5b, 0x8a, 0x10, 0x9a, 0x94, 0x30, 0xeb, 0x73, 0x64, 0xbc, 0x70, 0xdd, 0x40, 0xdc, 0x1c, 0xd, 0x7c, 0x30, 0xc1, 0x94, 0xc2, 0x92, 0x74, 0x6e, 0xfa, 0xcb, 0x6d, 0xa8, 0x4, 0x56}, {0xb6, 0x64, 0x17, 0x7c, 0xd4, 0xd1, 0x88, 0x72, 0x51, 0x8b, 0x41, 0xe0, 0x40, 0x11, 0x54, 0x72, 0xd1, 0xf6, 0xac, 0x18, 0x60, 0x1a, 0x3, 0x9f, 0xc6, 0x42, 0x27, 0xfe, 0x89, 0x9e, 0x98, 0x20}, }, { {0x2e, 0xec, 0xea, 0x85, 0x8b, 0x27, 0x74, 0x16, 0xdf, 0x2b, 0xcb, 0x7a, 0x7, 0xdc, 0x21, 0x56, 0x5a, 0xf4, 0xcb, 0x61, 0x16, 0x4c, 0xa, 0x64, 0xd3, 0x95, 0x5, 0xf7, 0x50, 0x99, 0xb, 0x73}, {0x7f, 0xcc, 0x2d, 0x3a, 0xfd, 0x77, 0x97, 0x49, 0x92, 0xd8, 0x4f, 0xa5, 0x2c, 0x7c, 0x85, 0x32, 0xa0, 0xe3, 0x7, 0xd2, 0x64, 0xd8, 0x79, 0xa2, 0x29, 0x7e, 0xa6, 0xc, 0x1d, 0xed, 0x3, 0x4}, {0x52, 0xc5, 0x4e, 0x87, 0x35, 0x2d, 0x4b, 0xc9, 0x8d, 0x6f, 0x24, 0x98, 0xcf, 0xc8, 0xe6, 0xc5, 0xce, 0x35, 0xc0, 0x16, 0xfa, 0x46, 0xcb, 0xf7, 0xcc, 0x3d, 0x30, 0x8, 0x43, 0x45, 0xd7, 0x5b}, }, { {0x2a, 0x79, 0xe7, 0x15, 0x21, 0x93, 0xc4, 0x85, 0xc9, 0xdd, 0xcd, 0xbd, 0xa2, 0x89, 0x4c, 0xc6, 0x62, 0xd7, 0xa3, 0xad, 0xa8, 0x3d, 0x1e, 0x9d, 0x2c, 0xf8, 0x67, 0x30, 0x12, 0xdb, 0xb7, 0x5b}, {0xc2, 0x4c, 0xb2, 0x28, 0x95, 0xd1, 0x9a, 0x7f, 0x81, 0xc1, 0x35, 0x63, 0x65, 0x54, 0x6b, 0x7f, 0x36, 0x72, 0xc0, 0x4f, 0x6e, 0xb6, 0xb8, 0x66, 0x83, 0xad, 0x80, 0x73, 0x0, 0x78, 0x3a, 0x13}, {0xbe, 0x62, 0xca, 0xc6, 0x67, 0xf4, 0x61, 0x9, 0xee, 0x52, 0x19, 0x21, 0xd6, 0x21, 0xec, 0x4, 0x70, 0x47, 0xd5, 0x9b, 0x77, 0x60, 0x23, 0x18, 0xd2, 0xe0, 0xf0, 0x58, 0x6d, 0xca, 0xd, 0x74}, }, }, { { {0x3c, 0x43, 0x78, 0x4, 0x57, 0x8c, 0x1a, 0x23, 0x9d, 0x43, 0x81, 0xc2, 0xe, 0x27, 0xb5, 0xb7, 0x9f, 0x7, 0xd9, 0xe3, 0xea, 0x99, 0xaa, 0xdb, 0xd9, 0x3, 0x2b, 0x6c, 0x25, 0xf5, 0x3, 0x2c}, {0x4e, 0xce, 0xcf, 0x52, 0x7, 0xee, 0x48, 0xdf, 0xb7, 0x8, 0xec, 0x6, 0xf3, 0xfa, 0xff, 0xc3, 0xc4, 0x59, 0x54, 0xb9, 0x2a, 0xb, 0x71, 0x5, 0x8d, 0xa3, 0x3e, 0x96, 0xfa, 0x25, 0x1d, 0x16}, {0x7d, 0xa4, 0x53, 0x7b, 0x75, 0x18, 0xf, 0x79, 0x79, 0x58, 0xc, 0xcf, 0x30, 0x1, 0x7b, 0x30, 0xf9, 0xf7, 0x7e, 0x25, 0x77, 0x3d, 0x90, 0x31, 0xaf, 0xbb, 0x96, 0xbd, 0xbd, 0x68, 0x94, 0x69}, }, { {0x48, 0x19, 0xa9, 0x6a, 0xe6, 0x3d, 0xdd, 0xd8, 0xcc, 0xd2, 0xc0, 0x2f, 0xc2, 0x64, 0x50, 0x48, 0x2f, 0xea, 0xfd, 0x34, 0x66, 0x24, 0x48, 0x9b, 0x3a, 0x2e, 0x4a, 0x6c, 0x4e, 0x1c, 0x3e, 0x29}, {0xcf, 0xfe, 0xda, 0xf4, 0x46, 0x2f, 0x1f, 0xbd, 0xf7, 0xd6, 0x7f, 0xa4, 0x14, 0x1, 0xef, 0x7c, 0x7f, 0xb3, 0x47, 0x4a, 0xda, 0xfd, 0x1f, 0xd3, 0x85, 0x57, 0x90, 0x73, 0xa4, 0x19, 0x52, 0x52}, {0xe1, 0x12, 0x51, 0x92, 0x4b, 0x13, 0x6e, 0x37, 0xa0, 0x5d, 0xa1, 0xdc, 0xb5, 0x78, 0x37, 0x70, 0x11, 0x31, 0x1c, 0x46, 0xaf, 0x89, 0x45, 0xb0, 0x23, 0x28, 0x3, 0x7f, 0x44, 0x5c, 0x60, 0x5b}, }, { {0x4c, 0xf0, 0xe7, 0xf0, 0xc6, 0xfe, 0xe9, 0x3b, 0x62, 0x49, 0xe3, 0x75, 0x9e, 0x57, 0x6a, 0x86, 0x1a, 0xe6, 0x1d, 0x1e, 0x16, 0xef, 0x42, 0x55, 0xd5, 0xbd, 0x5a, 0xcc, 0xf4, 0xfe, 0x12, 0x2f}, {0x89, 0x7c, 0xc4, 0x20, 0x59, 0x80, 0x65, 0xb9, 0xcc, 0x8f, 0x3b, 0x92, 0xc, 0x10, 0xf0, 0xe7, 0x77, 0xef, 0xe2, 0x2, 0x65, 0x25, 0x1, 0x0, 0xee, 0xb3, 0xae, 0xa8, 0xce, 0x6d, 0xa7, 0x24}, {0x40, 0xc7, 0xc0, 0xdf, 0xb2, 0x22, 0x45, 0xa, 0x7, 0xa4, 0xc9, 0x40, 0x7f, 0x6e, 0xd0, 0x10, 0x68, 0xf6, 0xcf, 0x78, 0x41, 0x14, 0xcf, 0xc6, 0x90, 0x37, 0xa4, 0x18, 0x25, 0x7b, 0x60, 0x5e}, }, { {0x14, 0xcf, 0x96, 0xa5, 0x1c, 0x43, 0x2c, 0xa0, 0x0, 0xe4, 0xd3, 0xae, 0x40, 0x2d, 0xc4, 0xe3, 0xdb, 0x26, 0xf, 0x2e, 0x80, 0x26, 0x45, 0xd2, 0x68, 0x70, 0x45, 0x9e, 0x13, 0x33, 0x1f, 0x20}, {0x18, 0x18, 0xdf, 0x6c, 0x8f, 0x1d, 0xb3, 0x58, 0xa2, 0x58, 0x62, 0xc3, 0x4f, 0xa7, 0xcf, 0x35, 0x6e, 0x1d, 0xe6, 0x66, 0x4f, 0xff, 0xb3, 0xe1, 0xf7, 0xd5, 0xcd, 0x6c, 0xab, 0xac, 0x67, 0x50}, {0x51, 0x9d, 0x3, 0x8, 0x6b, 0x7f, 0x52, 0xfd, 0x6, 0x0, 0x7c, 0x1, 0x64, 0x49, 0xb1, 0x18, 0xa8, 0xa4, 0x25, 0x2e, 0xb0, 0xe, 0x22, 0xd5, 0x75, 0x3, 0x46, 0x62, 0x88, 0xba, 0x7c, 0x39}, }, { {0xe7, 0x79, 0x13, 0xc8, 0xfb, 0xc3, 0x15, 0x78, 0xf1, 0x2a, 0xe1, 0xdd, 0x20, 0x94, 0x61, 0xa6, 0xd5, 0xfd, 0xa8, 0x85, 0xf8, 0xc0, 0xa9, 0xff, 0x52, 0xc2, 0xe1, 0xc1, 0x22, 0x40, 0x1b, 0x77}, {0xb2, 0x59, 0x59, 0xf0, 0x93, 0x30, 0xc1, 0x30, 0x76, 0x79, 0xa9, 0xe9, 0x8d, 0xa1, 0x3a, 0xe2, 0x26, 0x5e, 0x1d, 0x72, 0x91, 0xd4, 0x2f, 0x22, 0x3a, 0x6c, 0x6e, 0x76, 0x20, 0xd3, 0x39, 0x23}, {0xa7, 0x2f, 0x3a, 0x51, 0x86, 0xd9, 0x7d, 0xd8, 0x8, 0xcf, 0xd4, 0xf9, 0x71, 0x9b, 0xac, 0xf5, 0xb3, 0x83, 0xa2, 0x1e, 0x1b, 0xc3, 0x6b, 0xd0, 0x76, 0x1a, 0x97, 0x19, 0x92, 0x18, 0x1a, 0x33}, }, { {0xaf, 0x72, 0x75, 0x9d, 0x3a, 0x2f, 0x51, 0x26, 0x9e, 0x4a, 0x7, 0x68, 0x88, 0xe2, 0xcb, 0x5b, 0xc4, 0xf7, 0x80, 0x11, 0xc1, 0xc1, 0xed, 0x84, 0x7b, 0xa6, 0x49, 0xf6, 0x9f, 0x61, 0xc9, 0x1a}, {0xc6, 0x80, 0x4f, 0xfb, 0x45, 0x6f, 0x16, 0xf5, 0xcf, 0x75, 0xc7, 0x61, 0xde, 0xc7, 0x36, 0x9c, 0x1c, 0xd9, 0x41, 0x90, 0x1b, 0xe8, 0xd4, 0xe3, 0x21, 0xfe, 0xbd, 0x83, 0x6b, 0x7c, 0x16, 0x31}, {0x68, 0x10, 0x4b, 0x52, 0x42, 0x38, 0x2b, 0xf2, 0x87, 0xe9, 0x9c, 0xee, 0x3b, 0x34, 0x68, 0x50, 0xc8, 0x50, 0x62, 0x4a, 0x84, 0x71, 0x9d, 0xfc, 0x11, 0xb1, 0x8, 0x1f, 0x34, 0x36, 0x24, 0x61}, }, { {0x38, 0x26, 0x2d, 0x1a, 0xe3, 0x49, 0x63, 0x8b, 0x35, 0xfd, 0xd3, 0x9b, 0x0, 0xb7, 0xdf, 0x9d, 0xa4, 0x6b, 0xa0, 0xa3, 0xb8, 0xf1, 0x8b, 0x7f, 0x45, 0x4, 0xd9, 0x78, 0x31, 0xaa, 0x22, 0x15}, {0x8d, 0x89, 0x4e, 0x87, 0xdb, 0x41, 0x9d, 0xd9, 0x20, 0xdc, 0x7, 0x6c, 0xf1, 0xa5, 0xfe, 0x9, 0xbc, 0x9b, 0xf, 0xd0, 0x67, 0x2c, 0x3d, 0x79, 0x40, 0xff, 0x5e, 0x9e, 0x30, 0xe2, 0xeb, 0x46}, {0x38, 0x49, 0x61, 0x69, 0x53, 0x2f, 0x38, 0x2c, 0x10, 0x6d, 0x2d, 0xb7, 0x9a, 0x40, 0xfe, 0xda, 0x27, 0xf2, 0x46, 0xb6, 0x91, 0x33, 0xc8, 0xe8, 0x6c, 0x30, 0x24, 0x5, 0xf5, 0x70, 0xfe, 0x45}, }, { {0x91, 0x14, 0x95, 0xc8, 0x20, 0x49, 0xf2, 0x62, 0xa2, 0xc, 0x63, 0x3f, 0xc8, 0x7, 0xf0, 0x5, 0xb8, 0xd4, 0xc9, 0xf5, 0xd2, 0x45, 0xbb, 0x6f, 0x45, 0x22, 0x7a, 0xb5, 0x6d, 0x9f, 0x61, 0x16}, {0x8c, 0xb, 0xc, 0x96, 0xa6, 0x75, 0x48, 0xda, 0x20, 0x2f, 0xe, 0xef, 0x76, 0xd0, 0x68, 0x5b, 0xd4, 0x8f, 0xb, 0x3d, 0xcf, 0x51, 0xfb, 0x7, 0xd4, 0x92, 0xe3, 0xa0, 0x23, 0x16, 0x8d, 0x42}, {0xfd, 0x8, 0xa3, 0x1, 0x44, 0x4a, 0x4f, 0x8, 0xac, 0xca, 0xa5, 0x76, 0xc3, 0x19, 0x22, 0xa8, 0x7d, 0xbc, 0xd1, 0x43, 0x46, 0xde, 0xb8, 0xde, 0xc6, 0x38, 0xbd, 0x60, 0x2d, 0x59, 0x81, 0x1d}, }, }, { { {0xe8, 0xc5, 0x85, 0x7b, 0x9f, 0xb6, 0x65, 0x87, 0xb2, 0xba, 0x68, 0xd1, 0x8b, 0x67, 0xf0, 0x6f, 0x9b, 0xf, 0x33, 0x1d, 0x7c, 0xe7, 0x70, 0x3a, 0x7c, 0x8e, 0xaf, 0xb0, 0x51, 0x6d, 0x5f, 0x3a}, {0x5f, 0xac, 0xd, 0xa6, 0x56, 0x87, 0x36, 0x61, 0x57, 0xdc, 0xab, 0xeb, 0x6a, 0x2f, 0xe0, 0x17, 0x7d, 0xf, 0xce, 0x4c, 0x2d, 0x3f, 0x19, 0x7f, 0xf0, 0xdc, 0xec, 0x89, 0x77, 0x4a, 0x23, 0x20}, {0x52, 0xb2, 0x78, 0x71, 0xb6, 0xd, 0xd2, 0x76, 0x60, 0xd1, 0x1e, 0xd5, 0xf9, 0x34, 0x1c, 0x7, 0x70, 0x11, 0xe4, 0xb3, 0x20, 0x4a, 0x2a, 0xf6, 0x66, 0xe3, 0xff, 0x3c, 0x35, 0x82, 0xd6, 0x7c}, }, { {0xf3, 0xf4, 0xac, 0x68, 0x60, 0xcd, 0x65, 0xa6, 0xd3, 0xe3, 0xd7, 0x3c, 0x18, 0x2d, 0xd9, 0x42, 0xd9, 0x25, 0x60, 0x33, 0x9d, 0x38, 0x59, 0x57, 0xff, 0xd8, 0x2c, 0x2b, 0x3b, 0x25, 0xf0, 0x3e}, {0xb6, 0xfa, 0x87, 0xd8, 0x5b, 0xa4, 0xe1, 0xb, 0x6e, 0x3b, 0x40, 0xba, 0x32, 0x6a, 0x84, 0x2a, 0x0, 0x60, 0x6e, 0xe9, 0x12, 0x10, 0x92, 0xd9, 0x43, 0x9, 0xdc, 0x3b, 0x86, 0xc8, 0x38, 0x28}, {0x30, 0x50, 0x46, 0x4a, 0xcf, 0xb0, 0x6b, 0xd1, 0xab, 0x77, 0xc5, 0x15, 0x41, 0x6b, 0x49, 0xfa, 0x9d, 0x41, 0xab, 0xf4, 0x8a, 0xae, 0xcf, 0x82, 0x12, 0x28, 0xa8, 0x6, 0xa6, 0xb8, 0xdc, 0x21}, }, { {0xba, 0x31, 0x77, 0xbe, 0xfa, 0x0, 0x8d, 0x9a, 0x89, 0x18, 0x9e, 0x62, 0x7e, 0x60, 0x3, 0x82, 0x7f, 0xd9, 0xf3, 0x43, 0x37, 0x2, 0xcc, 0xb2, 0x8b, 0x67, 0x6f, 0x6c, 0xbf, 0xd, 0x84, 0x5d}, {0xc8, 0x9f, 0x9d, 0x8c, 0x46, 0x4, 0x60, 0x5c, 0xcb, 0xa3, 0x2a, 0xd4, 0x6e, 0x9, 0x40, 0x25, 0x9c, 0x2f, 0xee, 0x12, 0x4c, 0x4d, 0x5b, 0x12, 0xab, 0x1d, 0xa3, 0x94, 0x81, 0xd0, 0xc3, 0xb}, {0x8b, 0xe1, 0x9f, 0x30, 0xd, 0x38, 0x6e, 0x70, 0xc7, 0x65, 0xe1, 0xb9, 0xa6, 0x2d, 0xb0, 0x6e, 0xab, 0x20, 0xae, 0x7d, 0x99, 0xba, 0xbb, 0x57, 0xdd, 0x96, 0xc1, 0x2a, 0x23, 0x76, 0x42, 0x3a}, }, { {0xcb, 0x7e, 0x44, 0xdb, 0x72, 0xc1, 0xf8, 0x3b, 0xbd, 0x2d, 0x28, 0xc6, 0x1f, 0xc4, 0xcf, 0x5f, 0xfe, 0x15, 0xaa, 0x75, 0xc0, 0xff, 0xac, 0x80, 0xf9, 0xa9, 0xe1, 0x24, 0xe8, 0xc9, 0x70, 0x7}, {0xfa, 0x84, 0x70, 0x8a, 0x2c, 0x43, 0x42, 0x4b, 0x45, 0xe5, 0xb9, 0xdf, 0xe3, 0x19, 0x8a, 0x89, 0x5d, 0xe4, 0x58, 0x9c, 0x21, 0x0, 0x9f, 0xbe, 0xd1, 0xeb, 0x6d, 0xa1, 0xce, 0x77, 0xf1, 0x1f}, {0xfd, 0xb5, 0xb5, 0x45, 0x9a, 0xd9, 0x61, 0xcf, 0x24, 0x79, 0x3a, 0x1b, 0xe9, 0x84, 0x9, 0x86, 0x89, 0x3e, 0x3e, 0x30, 0x19, 0x9, 0x30, 0xe7, 0x1e, 0xb, 0x50, 0x41, 0xfd, 0x64, 0xf2, 0x39}, }, { {0xe1, 0x7b, 0x9, 0xfe, 0xab, 0x4a, 0x9b, 0xd1, 0x29, 0x19, 0xe0, 0xdf, 0xe1, 0xfc, 0x6d, 0xa4, 0xff, 0xf1, 0xa6, 0x2c, 0x94, 0x8, 0xc9, 0xc3, 0x4e, 0xf1, 0x35, 0x2c, 0x27, 0x21, 0xc6, 0x65}, {0x9c, 0xe2, 0xe7, 0xdb, 0x17, 0x34, 0xad, 0xa7, 0x9c, 0x13, 0x9c, 0x2b, 0x6a, 0x37, 0x94, 0xbd, 0xa9, 0x7b, 0x59, 0x93, 0x8e, 0x1b, 0xe9, 0xa0, 0x40, 0x98, 0x88, 0x68, 0x34, 0xd7, 0x12, 0x17}, {0xdd, 0x93, 0x31, 0xce, 0xf8, 0x89, 0x2b, 0xe7, 0xbb, 0xc0, 0x25, 0xa1, 0x56, 0x33, 0x10, 0x4d, 0x83, 0xfe, 0x1c, 0x2e, 0x3d, 0xa9, 0x19, 0x4, 0x72, 0xe2, 0x9c, 0xb1, 0xa, 0x80, 0xf9, 0x22}, }, { {0xac, 0xfd, 0x6e, 0x9a, 0xdd, 0x9f, 0x2, 0x42, 0x41, 0x49, 0xa5, 0x34, 0xbe, 0xce, 0x12, 0xb9, 0x7b, 0xf3, 0xbd, 0x87, 0xb9, 0x64, 0xf, 0x64, 0xb4, 0xca, 0x98, 0x85, 0xd3, 0xa4, 0x71, 0x41}, {0xcb, 0xf8, 0x9e, 0x3e, 0x8a, 0x36, 0x5a, 0x60, 0x15, 0x47, 0x50, 0xa5, 0x22, 0xc0, 0xe9, 0xe3, 0x8f, 0x24, 0x24, 0x5f, 0xb0, 0x48, 0x3d, 0x55, 0xe5, 0x26, 0x76, 0x64, 0xcd, 0x16, 0xf4, 0x13}, {0x8c, 0x4c, 0xc9, 0x99, 0xaa, 0x58, 0x27, 0xfa, 0x7, 0xb8, 0x0, 0xb0, 0x6f, 0x6f, 0x0, 0x23, 0x92, 0x53, 0xda, 0xad, 0xdd, 0x91, 0xd2, 0xfb, 0xab, 0xd1, 0x4b, 0x57, 0xfa, 0x14, 0x82, 0x50}, }, { {0xd6, 0x3, 0xd0, 0x53, 0xbb, 0x15, 0x1a, 0x46, 0x65, 0xc9, 0xf3, 0xbc, 0x88, 0x28, 0x10, 0xb2, 0x5a, 0x3a, 0x68, 0x6c, 0x75, 0x76, 0xc5, 0x27, 0x47, 0xb4, 0x6c, 0xc8, 0xa4, 0x58, 0x77, 0x3a}, {0x4b, 0xfe, 0xd6, 0x3e, 0x15, 0x69, 0x2, 0xc2, 0xc4, 0x77, 0x1d, 0x51, 0x39, 0x67, 0x5a, 0xa6, 0x94, 0xaf, 0x14, 0x2c, 0x46, 0x26, 0xde, 0xcb, 0x4b, 0xa7, 0xab, 0x6f, 0xec, 0x60, 0xf9, 0x22}, {0x76, 0x50, 0xae, 0x93, 0xf6, 0x11, 0x81, 0x54, 0xa6, 0x54, 0xfd, 0x1d, 0xdf, 0x21, 0xae, 0x1d, 0x65, 0x5e, 0x11, 0xf3, 0x90, 0x8c, 0x24, 0x12, 0x94, 0xf4, 0xe7, 0x8d, 0x5f, 0xd1, 0x9f, 0x5d}, }, { {0x1e, 0x52, 0xd7, 0xee, 0x2a, 0x4d, 0x24, 0x3f, 0x15, 0x96, 0x2e, 0x43, 0x28, 0x90, 0x3a, 0x8e, 0xd4, 0x16, 0x9c, 0x2e, 0x77, 0xba, 0x64, 0xe1, 0xd8, 0x98, 0xeb, 0x47, 0xfa, 0x87, 0xc1, 0x3b}, {0x7f, 0x72, 0x63, 0x6d, 0xd3, 0x8, 0x14, 0x3, 0x33, 0xb5, 0xc7, 0xd7, 0xef, 0x9a, 0x37, 0x6a, 0x4b, 0xe2, 0xae, 0xcc, 0xc5, 0x8f, 0xe1, 0xa9, 0xd3, 0xbe, 0x8f, 0x4f, 0x91, 0x35, 0x2f, 0x33}, {0xc, 0xc2, 0x86, 0xea, 0x15, 0x1, 0x47, 0x6d, 0x25, 0xd1, 0x46, 0x6c, 0xcb, 0xb7, 0x8a, 0x99, 0x88, 0x1, 0x66, 0x3a, 0xb5, 0x32, 0x78, 0xd7, 0x3, 0xba, 0x6f, 0x90, 0xce, 0x81, 0xd, 0x45}, }, }, { { {0x3f, 0x74, 0xae, 0x1c, 0x96, 0xd8, 0x74, 0xd0, 0xed, 0x63, 0x1c, 0xee, 0xf5, 0x18, 0x6d, 0xf8, 0x29, 0xed, 0xf4, 0xe7, 0x5b, 0xc5, 0xbd, 0x97, 0x8, 0xb1, 0x3a, 0x66, 0x79, 0xd2, 0xba, 0x4c}, {0x75, 0x52, 0x20, 0xa6, 0xa1, 0xb6, 0x7b, 0x6e, 0x83, 0x8e, 0x3c, 0x41, 0xd7, 0x21, 0x4f, 0xaa, 0xb2, 0x5c, 0x8f, 0xe8, 0x55, 0xd1, 0x56, 0x6f, 0xe1, 0x5b, 0x34, 0xa6, 0x4b, 0x5d, 0xe2, 0x2d}, {0xcd, 0x1f, 0xd7, 0xa0, 0x24, 0x90, 0xd1, 0x80, 0xf8, 0x8a, 0x28, 0xfb, 0xa, 0xc2, 0x25, 0xc5, 0x19, 0x64, 0x3a, 0x5f, 0x4b, 0x97, 0xa3, 0xb1, 0x33, 0x72, 0x0, 0xe2, 0xef, 0xbc, 0x7f, 0x7d}, }, { {0x94, 0x90, 0xc2, 0xf3, 0xc5, 0x5d, 0x7c, 0xcd, 0xab, 0x5, 0x91, 0x2a, 0x9a, 0xa2, 0x81, 0xc7, 0x58, 0x30, 0x1c, 0x42, 0x36, 0x1d, 0xc6, 0x80, 0xd7, 0xd4, 0xd8, 0xdc, 0x96, 0xd1, 0x9c, 0x4f}, {0x1, 0x28, 0x6b, 0x26, 0x6a, 0x1e, 0xef, 0xfa, 0x16, 0x9f, 0x73, 0xd5, 0xc4, 0x68, 0x6c, 0x86, 0x2c, 0x76, 0x3, 0x1b, 0xbc, 0x2f, 0x8a, 0xf6, 0x8d, 0x5a, 0xb7, 0x87, 0x5e, 0x43, 0x75, 0x59}, {0x68, 0x37, 0x7b, 0x6a, 0xd8, 0x97, 0x92, 0x19, 0x63, 0x7a, 0xd1, 0x1a, 0x24, 0x58, 0xd0, 0xd0, 0x17, 0xc, 0x1c, 0x5c, 0xad, 0x9c, 0x2, 0xba, 0x7, 0x3, 0x7a, 0x38, 0x84, 0xd0, 0xcd, 0x7c}, }, { {0x93, 0xcc, 0x60, 0x67, 0x18, 0x84, 0xc, 0x9b, 0x99, 0x2a, 0xb3, 0x1a, 0x7a, 0x0, 0xae, 0xcd, 0x18, 0xda, 0xb, 0x62, 0x86, 0xec, 0x8d, 0xa8, 0x44, 0xca, 0x90, 0x81, 0x84, 0xca, 0x93, 0x35}, {0x17, 0x4, 0x26, 0x6d, 0x2c, 0x42, 0xa6, 0xdc, 0xbd, 0x40, 0x82, 0x94, 0x50, 0x3d, 0x15, 0xae, 0x77, 0xc6, 0x68, 0xfb, 0xb4, 0xc1, 0xc0, 0xa9, 0x53, 0xcf, 0xd0, 0x61, 0xed, 0xd0, 0x8b, 0x42}, {0xa7, 0x9a, 0x84, 0x5e, 0x9a, 0x18, 0x13, 0x92, 0xcd, 0xfa, 0xd8, 0x65, 0x35, 0xc3, 0xd8, 0xd4, 0xd1, 0xbb, 0xfd, 0x53, 0x5b, 0x54, 0x52, 0x8c, 0xe6, 0x63, 0x2d, 0xda, 0x8, 0x83, 0x39, 0x27}, }, { {0x53, 0x24, 0x70, 0xa, 0x4c, 0xe, 0xa1, 0xb9, 0xde, 0x1b, 0x7d, 0xd5, 0x66, 0x58, 0xa2, 0xf, 0xf7, 0xda, 0x27, 0xcd, 0xb5, 0xd9, 0xb9, 0xff, 0xfd, 0x33, 0x2c, 0x49, 0x45, 0x29, 0x2c, 0x57}, {0x13, 0xd4, 0x5e, 0x43, 0x28, 0x8d, 0xc3, 0x42, 0xc9, 0xcc, 0x78, 0x32, 0x60, 0xf3, 0x50, 0xbd, 0xef, 0x3, 0xda, 0x79, 0x1a, 0xab, 0x7, 0xbb, 0x55, 0x33, 0x8c, 0xbe, 0xae, 0x97, 0x95, 0x26}, {0xbe, 0x30, 0xcd, 0xd6, 0x45, 0xc7, 0x7f, 0xc7, 0xfb, 0xae, 0xba, 0xe3, 0xd3, 0xe8, 0xdf, 0xe4, 0xc, 0xda, 0x5d, 0xaa, 0x30, 0x88, 0x2c, 0xa2, 0x80, 0xca, 0x5b, 0xc0, 0x98, 0x54, 0x98, 0x7f}, }, { {0x63, 0x63, 0xbf, 0xf, 0x52, 0x15, 0x56, 0xd3, 0xa6, 0xfb, 0x4d, 0xcf, 0x45, 0x5a, 0x4, 0x8, 0xc2, 0xa0, 0x3f, 0x87, 0xbc, 0x4f, 0xc2, 0xee, 0xe7, 0x12, 0x9b, 0xd6, 0x3c, 0x65, 0xf2, 0x30}, {0x17, 0xe1, 0xb, 0x9f, 0x88, 0xce, 0x49, 0x38, 0x88, 0xa2, 0x54, 0x7b, 0x1b, 0xad, 0x5, 0x80, 0x1c, 0x92, 0xfc, 0x23, 0x9f, 0xc3, 0xa3, 0x3d, 0x4, 0xf3, 0x31, 0xa, 0x47, 0xec, 0xc2, 0x76}, {0x85, 0xc, 0xc1, 0xaa, 0x38, 0xc9, 0x8, 0x8a, 0xcb, 0x6b, 0x27, 0xdb, 0x60, 0x9b, 0x17, 0x46, 0x70, 0xac, 0x6f, 0xe, 0x1e, 0xc0, 0x20, 0xa9, 0xda, 0x73, 0x64, 0x59, 0xf1, 0x73, 0x12, 0x2f}, }, { {0xc0, 0xb, 0xa7, 0x55, 0xd7, 0x8b, 0x48, 0x30, 0xe7, 0x42, 0xd4, 0xf1, 0xa4, 0xb5, 0xd6, 0x6, 0x62, 0x61, 0x59, 0xbc, 0x9e, 0xa6, 0xd1, 0xea, 0x84, 0xf7, 0xc5, 0xed, 0x97, 0x19, 0xac, 0x38}, {0x11, 0x1e, 0xe0, 0x8a, 0x7c, 0xfc, 0x39, 0x47, 0x9f, 0xab, 0x6a, 0x4a, 0x90, 0x74, 0x52, 0xfd, 0x2e, 0x8f, 0x72, 0x87, 0x82, 0x8a, 0xd9, 0x41, 0xf2, 0x69, 0x5b, 0xd8, 0x2a, 0x57, 0x9e, 0x5d}, {0x3b, 0xb1, 0x51, 0xa7, 0x17, 0xb5, 0x66, 0x6, 0x8c, 0x85, 0x9b, 0x7e, 0x86, 0x6, 0x7d, 0x74, 0x49, 0xde, 0x4d, 0x45, 0x11, 0xc0, 0xac, 0xac, 0x9c, 0xe6, 0xe9, 0xbf, 0x9c, 0xcd, 0xdf, 0x22}, }, { {0xa1, 0xe0, 0x3b, 0x10, 0xb4, 0x59, 0xec, 0x56, 0x69, 0xf9, 0x59, 0xd2, 0xec, 0xba, 0xe3, 0x2e, 0x32, 0xcd, 0xf5, 0x13, 0x94, 0xb2, 0x7c, 0x79, 0x72, 0xe4, 0xcd, 0x24, 0x78, 0x87, 0xe9, 0xf}, {0xd9, 0xc, 0xd, 0xc3, 0xe0, 0xd2, 0xdb, 0x8d, 0x33, 0x43, 0xbb, 0xac, 0x5f, 0x66, 0x8e, 0xad, 0x1f, 0x96, 0x2a, 0x32, 0x8c, 0x25, 0x6b, 0x8f, 0xc7, 0xc1, 0x48, 0x54, 0xc0, 0x16, 0x29, 0x6b}, {0x3b, 0x91, 0xba, 0xa, 0xd1, 0x34, 0xdb, 0x7e, 0xe, 0xac, 0x6d, 0x2e, 0x82, 0xcd, 0xa3, 0x4e, 0x15, 0xf8, 0x78, 0x65, 0xff, 0x3d, 0x8, 0x66, 0x17, 0xa, 0xf0, 0x7f, 0x30, 0x3f, 0x30, 0x4c}, }, { {0x0, 0x45, 0xd9, 0xd, 0x58, 0x3, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0xa, 0x13, 0xd5, 0xa1, 0x6, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, {0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0xa, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0xd}, {0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x7, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, }, }, { { {0xfb, 0xe, 0x46, 0x4f, 0x43, 0x2b, 0xe6, 0x9f, 0xd6, 0x7, 0x36, 0xa6, 0xd4, 0x3, 0xd3, 0xde, 0x24, 0xda, 0xa0, 0xb7, 0xe, 0x21, 0x52, 0xf0, 0x93, 0x5b, 0x54, 0x0, 0xbe, 0x7d, 0x7e, 0x23}, {0x31, 0x14, 0x3c, 0xc5, 0x4b, 0xf7, 0x16, 0xce, 0xde, 0xed, 0x72, 0x20, 0xce, 0x25, 0x97, 0x2b, 0xe7, 0x3e, 0xb2, 0xb5, 0x6f, 0xc3, 0xb9, 0xb8, 0x8, 0xc9, 0x5c, 0xb, 0x45, 0xe, 0x2e, 0x7e}, {0x30, 0xb4, 0x1, 0x67, 0xed, 0x75, 0x35, 0x1, 0x10, 0xfd, 0xb, 0x9f, 0xe6, 0x94, 0x10, 0x23, 0x22, 0x7f, 0xe4, 0x83, 0x15, 0xf, 0x32, 0x75, 0xe3, 0x55, 0x11, 0xb1, 0x99, 0xa6, 0xaf, 0x71}, }, { {0xd6, 0x50, 0x3b, 0x47, 0x1c, 0x3c, 0x42, 0xea, 0x10, 0xef, 0x38, 0x3b, 0x1f, 0x7a, 0xe8, 0x51, 0x95, 0xbe, 0xc9, 0xb2, 0x5f, 0xbf, 0x84, 0x9b, 0x1c, 0x9a, 0xf8, 0x78, 0xbc, 0x1f, 0x73, 0x0}, {0x1d, 0xb6, 0x53, 0x39, 0x9b, 0x6f, 0xce, 0x65, 0xe6, 0x41, 0xa1, 0xaf, 0xea, 0x39, 0x58, 0xc6, 0xfe, 0x59, 0xf7, 0xa9, 0xfd, 0x5f, 0x43, 0xf, 0x8e, 0xc2, 0xb1, 0xc2, 0xe9, 0x42, 0x11, 0x2}, {0x80, 0x18, 0xf8, 0x48, 0x18, 0xc7, 0x30, 0xe4, 0x19, 0xc1, 0xce, 0x5e, 0x22, 0xc, 0x96, 0xbf, 0xe3, 0x15, 0xba, 0x6b, 0x83, 0xe0, 0xda, 0xb6, 0x8, 0x58, 0xe1, 0x47, 0x33, 0x6f, 0x4d, 0x4c}, }, { {0x70, 0x19, 0x8f, 0x98, 0xfc, 0xdd, 0xc, 0x2f, 0x1b, 0xf5, 0xb9, 0xb0, 0x27, 0x62, 0x91, 0x6b, 0xbe, 0x76, 0x91, 0x77, 0xc4, 0xb6, 0xc7, 0x6e, 0xa8, 0x9f, 0x8f, 0xa8, 0x0, 0x95, 0xbf, 0x38}, {0xc9, 0x1f, 0x7d, 0xc1, 0xcf, 0xec, 0xf7, 0x18, 0x14, 0x3c, 0x40, 0x51, 0xa6, 0xf5, 0x75, 0x6c, 0xdf, 0xc, 0xee, 0xf7, 0x2b, 0x71, 0xde, 0xdb, 0x22, 0x7a, 0xe4, 0xa7, 0xaa, 0xdd, 0x3f, 0x19}, {0x6f, 0x87, 0xe8, 0x37, 0x3c, 0xc9, 0xd2, 0x1f, 0x2c, 0x46, 0xd1, 0x18, 0x5a, 0x1e, 0xf6, 0xa2, 0x76, 0x12, 0x24, 0x39, 0x82, 0xf5, 0x80, 0x50, 0x69, 0x49, 0xd, 0xbf, 0x9e, 0xb9, 0x6f, 0x6a}, }, { {0xc6, 0x23, 0xe4, 0xb6, 0xb5, 0x22, 0xb1, 0xee, 0x8e, 0xff, 0x86, 0xf2, 0x10, 0x70, 0x9d, 0x93, 0x8c, 0x5d, 0xcf, 0x1d, 0x83, 0x2a, 0xa9, 0x90, 0x10, 0xeb, 0xc5, 0x42, 0x9f, 0xda, 0x6f, 0x13}, {0xeb, 0x55, 0x8, 0x56, 0xbb, 0xc1, 0x46, 0x6a, 0x9d, 0xf0, 0x93, 0xf8, 0x38, 0xbb, 0x16, 0x24, 0xc1, 0xac, 0x71, 0x8f, 0x37, 0x11, 0x1d, 0xd7, 0xea, 0x96, 0x18, 0xa3, 0x14, 0x69, 0xf7, 0x75}, {0xd1, 0xbd, 0x5, 0xa3, 0xb1, 0xdf, 0x4c, 0xf9, 0x8, 0x2c, 0xf8, 0x9f, 0x9d, 0x4b, 0x36, 0xf, 0x8a, 0x58, 0xbb, 0xc3, 0xa5, 0xd8, 0x87, 0x2a, 0xba, 0xdc, 0xe8, 0xb, 0x51, 0x83, 0x21, 0x2}, }, { {0x7f, 0x7a, 0x30, 0x43, 0x1, 0x71, 0x5a, 0x9d, 0x5f, 0xa4, 0x7d, 0xc4, 0x9e, 0xde, 0x63, 0xb0, 0xd3, 0x7a, 0x92, 0xbe, 0x52, 0xfe, 0xbb, 0x22, 0x6c, 0x42, 0x40, 0xfd, 0x41, 0xc4, 0x87, 0x13}, {0x14, 0x2d, 0xad, 0x5e, 0x38, 0x66, 0xf7, 0x4a, 0x30, 0x58, 0x7c, 0xca, 0x80, 0xd8, 0x8e, 0xa0, 0x3d, 0x1e, 0x21, 0x10, 0xe6, 0xa6, 0x13, 0xd, 0x3, 0x6c, 0x80, 0x7b, 0xe1, 0x1c, 0x7, 0x6a}, {0xf8, 0x8a, 0x97, 0x87, 0xd1, 0xc3, 0xd3, 0xb5, 0x13, 0x44, 0xe, 0x7f, 0x3d, 0x5a, 0x2b, 0x72, 0xa0, 0x7c, 0x47, 0xbb, 0x48, 0x48, 0x7b, 0xd, 0x92, 0xdc, 0x1e, 0xaf, 0x6a, 0xb2, 0x71, 0x31}, }, { {0xd1, 0x47, 0x8a, 0xb2, 0xd8, 0xb7, 0xd, 0xa6, 0xf1, 0xa4, 0x70, 0x17, 0xd6, 0x14, 0xbf, 0xa6, 0x58, 0xbd, 0xdd, 0x53, 0x93, 0xf8, 0xa1, 0xd4, 0xe9, 0x43, 0x42, 0x34, 0x63, 0x4a, 0x51, 0x6c}, {0xa8, 0x4c, 0x56, 0x97, 0x90, 0x31, 0x2f, 0xa9, 0x19, 0xe1, 0x75, 0x22, 0x4c, 0xb8, 0x7b, 0xff, 0x50, 0x51, 0x87, 0xa4, 0x37, 0xfe, 0x55, 0x4f, 0x5a, 0x83, 0xf0, 0x3c, 0x87, 0xd4, 0x1f, 0x22}, {0x41, 0x63, 0x15, 0x3a, 0x4f, 0x20, 0x22, 0x23, 0x2d, 0x3, 0xa, 0xba, 0xe9, 0xe0, 0x73, 0xfb, 0xe, 0x3, 0xf, 0x41, 0x4c, 0xdd, 0xe0, 0xfc, 0xaa, 0x4a, 0x92, 0xfb, 0x96, 0xa5, 0xda, 0x48}, }, { {0x93, 0x97, 0x4c, 0xc8, 0x5d, 0x1d, 0xf6, 0x14, 0x6, 0x82, 0x41, 0xef, 0xe3, 0xf9, 0x41, 0x99, 0xac, 0x77, 0x62, 0x34, 0x8f, 0xb8, 0xf5, 0xcd, 0xa9, 0x79, 0x8a, 0xe, 0xfa, 0x37, 0xc8, 0x58}, {0xc7, 0x9c, 0xa5, 0x5c, 0x66, 0x8e, 0xca, 0x6e, 0xa0, 0xac, 0x38, 0x2e, 0x4b, 0x25, 0x47, 0xa8, 0xce, 0x17, 0x1e, 0xd2, 0x8, 0xc7, 0xaf, 0x31, 0xf7, 0x4a, 0xd8, 0xca, 0xfc, 0xd6, 0x6d, 0x67}, {0x58, 0x90, 0xfc, 0x96, 0x85, 0x68, 0xf9, 0xc, 0x1b, 0xa0, 0x56, 0x7b, 0xf3, 0xbb, 0xdc, 0x1d, 0x6a, 0xd6, 0x35, 0x49, 0x7d, 0xe7, 0xc2, 0xdc, 0xa, 0x7f, 0xa5, 0xc6, 0xf2, 0x73, 0x4f, 0x1c}, }, { {0x84, 0x34, 0x7c, 0xfc, 0x6e, 0x70, 0x6e, 0xb3, 0x61, 0xcf, 0xc1, 0xc3, 0xb4, 0xc9, 0xdf, 0x73, 0xe5, 0xc7, 0x1c, 0x78, 0xc9, 0x79, 0x1d, 0xeb, 0x5c, 0x67, 0xaf, 0x7d, 0xdb, 0x9a, 0x45, 0x70}, {0xbb, 0xa0, 0x5f, 0x30, 0xbd, 0x4f, 0x7a, 0xe, 0xad, 0x63, 0xc6, 0x54, 0xe0, 0x4c, 0x9d, 0x82, 0x48, 0x38, 0xe3, 0x2f, 0x83, 0xc3, 0x21, 0xf4, 0x42, 0x4c, 0xf6, 0x1b, 0xd, 0xc8, 0x5a, 0x79}, {0xb3, 0x2b, 0xb4, 0x91, 0x49, 0xdb, 0x91, 0x1b, 0xca, 0xdc, 0x2, 0x4b, 0x23, 0x96, 0x26, 0x57, 0xdc, 0x78, 0x8c, 0x1f, 0xe5, 0x9e, 0xdf, 0x9f, 0xd3, 0x1f, 0xe2, 0x8c, 0x84, 0x62, 0xe1, 0x5f}, }, }, { { {0x8, 0xb2, 0x7c, 0x5d, 0x2d, 0x85, 0x79, 0x28, 0xe7, 0xf2, 0x7d, 0x68, 0x70, 0xdd, 0xde, 0xb8, 0x91, 0x78, 0x68, 0x21, 0xab, 0xff, 0xb, 0xdc, 0x35, 0xaa, 0x7d, 0x67, 0x43, 0xc0, 0x44, 0x2b}, {0x1a, 0x96, 0x94, 0xe1, 0x4f, 0x21, 0x59, 0x4e, 0x4f, 0xcd, 0x71, 0xd, 0xc7, 0x7d, 0xbe, 0x49, 0x2d, 0xf2, 0x50, 0x3b, 0xd2, 0xcf, 0x0, 0x93, 0x32, 0x72, 0x91, 0xfc, 0x46, 0xd4, 0x89, 0x47}, {0x8e, 0xb7, 0x4e, 0x7, 0xab, 0x87, 0x1c, 0x1a, 0x67, 0xf4, 0xda, 0x99, 0x8e, 0xd1, 0xc6, 0xfa, 0x67, 0x90, 0x4f, 0x48, 0xcd, 0xbb, 0xac, 0x3e, 0xe4, 0xa4, 0xb9, 0x2b, 0xef, 0x2e, 0xc5, 0x60}, }, { {0x11, 0x6d, 0xae, 0x7c, 0xc2, 0xc5, 0x2b, 0x70, 0xab, 0x8c, 0xa4, 0x54, 0x9b, 0x69, 0xc7, 0x44, 0xb2, 0x2e, 0x49, 0xba, 0x56, 0x40, 0xbc, 0xef, 0x6d, 0x67, 0xb6, 0xd9, 0x48, 0x72, 0xd7, 0x70}, {0xf1, 0x8b, 0xfd, 0x3b, 0xbc, 0x89, 0x5d, 0xb, 0x1a, 0x55, 0xf3, 0xc9, 0x37, 0x92, 0x6b, 0xb0, 0xf5, 0x28, 0x30, 0xd5, 0xb0, 0x16, 0x4c, 0xe, 0xab, 0xca, 0xcf, 0x2c, 0x31, 0x9c, 0xbc, 0x10}, {0x5b, 0xa0, 0xc2, 0x3e, 0x4b, 0xe8, 0x8a, 0xaa, 0xe0, 0x81, 0x17, 0xed, 0xf4, 0x9e, 0x69, 0x98, 0xd1, 0x85, 0x8e, 0x70, 0xe4, 0x13, 0x45, 0x79, 0x13, 0xf4, 0x76, 0xa9, 0xd3, 0x5b, 0x75, 0x63}, }, { {0xb7, 0xac, 0xf1, 0x97, 0x18, 0x10, 0xc7, 0x3d, 0xd8, 0xbb, 0x65, 0xc1, 0x5e, 0x7d, 0xda, 0x5d, 0xf, 0x2, 0xa1, 0xf, 0x9c, 0x5b, 0x8e, 0x50, 0x56, 0x2a, 0xc5, 0x37, 0x17, 0x75, 0x63, 0x27}, {0x53, 0x8, 0xd1, 0x2a, 0x3e, 0xa0, 0x5f, 0xb5, 0x69, 0x35, 0xe6, 0x9e, 0x90, 0x75, 0x6f, 0x35, 0x90, 0xb8, 0x69, 0xbe, 0xfd, 0xf1, 0xf9, 0x9f, 0x84, 0x6f, 0xc1, 0x8b, 0xc4, 0xc1, 0x8c, 0xd}, {0xa9, 0x19, 0xb4, 0x6e, 0xd3, 0x2, 0x94, 0x2, 0xa5, 0x60, 0xb4, 0x77, 0x7e, 0x4e, 0xb4, 0xf0, 0x56, 0x49, 0x3c, 0xd4, 0x30, 0x62, 0xa8, 0xcf, 0xe7, 0x66, 0xd1, 0x7a, 0x8a, 0xdd, 0xc2, 0x70}, }, { {0x13, 0x7e, 0xed, 0xb8, 0x7d, 0x96, 0xd4, 0x91, 0x7a, 0x81, 0x76, 0xd7, 0xa, 0x2f, 0x25, 0x74, 0x64, 0x25, 0x85, 0xd, 0xe0, 0x82, 0x9, 0xe4, 0xe5, 0x3c, 0xa5, 0x16, 0x38, 0x61, 0xb8, 0x32}, {0xe, 0xec, 0x6f, 0x9f, 0x50, 0x94, 0x61, 0x65, 0x8d, 0x51, 0xc6, 0x46, 0xa9, 0x7e, 0x2e, 0xee, 0x5c, 0x9b, 0xe0, 0x67, 0xf3, 0xc1, 0x33, 0x97, 0x95, 0x84, 0x94, 0x63, 0x63, 0xac, 0xf, 0x2e}, {0x64, 0xcd, 0x48, 0xe4, 0xbe, 0xf7, 0xe7, 0x79, 0xd0, 0x86, 0x78, 0x8, 0x67, 0x3a, 0xc8, 0x6a, 0x2e, 0xdb, 0xe4, 0xa0, 0xd9, 0xd4, 0x9f, 0xf8, 0x41, 0x4f, 0x5a, 0x73, 0x5c, 0x21, 0x79, 0x41}, }, { {0x34, 0xcd, 0x6b, 0x28, 0xb9, 0x33, 0xae, 0xe4, 0xdc, 0xd6, 0x9d, 0x55, 0xb6, 0x7e, 0xef, 0xb7, 0x1f, 0x8e, 0xd3, 0xb3, 0x1f, 0x14, 0x8b, 0x27, 0x86, 0xc2, 0x41, 0x22, 0x66, 0x85, 0xfa, 0x31}, {0x2a, 0xed, 0xdc, 0xd7, 0xe7, 0x94, 0x70, 0x8c, 0x70, 0x9c, 0xd3, 0x47, 0xc3, 0x8a, 0xfb, 0x97, 0x2, 0xd9, 0x6, 0xa9, 0x33, 0xe0, 0x3b, 0xe1, 0x76, 0x9d, 0xd9, 0xc, 0xa3, 0x44, 0x3, 0x70}, {0xf4, 0x22, 0x36, 0x2e, 0x42, 0x6c, 0x82, 0xaf, 0x2d, 0x50, 0x33, 0x98, 0x87, 0x29, 0x20, 0xc1, 0x23, 0x91, 0x38, 0x2b, 0xe1, 0xb7, 0xc1, 0x9b, 0x89, 0x24, 0x95, 0xa9, 0x12, 0x23, 0xbb, 0x24}, }, { {0x6b, 0x5c, 0xf8, 0xf5, 0x2a, 0xc, 0xf8, 0x41, 0x94, 0x67, 0xfa, 0x4, 0xc3, 0x84, 0x72, 0x68, 0xad, 0x1b, 0xba, 0xa3, 0x99, 0xdf, 0x45, 0x89, 0x16, 0x5d, 0xeb, 0xff, 0xf9, 0x2a, 0x1d, 0xd}, {0xc3, 0x67, 0xde, 0x32, 0x17, 0xed, 0xa8, 0xb1, 0x48, 0x49, 0x1b, 0x46, 0x18, 0x94, 0xb4, 0x3c, 0xd2, 0xbc, 0xcf, 0x76, 0x43, 0x43, 0xbd, 0x8e, 0x8, 0x80, 0x18, 0x1e, 0x87, 0x3e, 0xee, 0xf}, {0xdf, 0x1e, 0x62, 0x32, 0xa1, 0x8a, 0xda, 0xa9, 0x79, 0x65, 0x22, 0x59, 0xa1, 0x22, 0xb8, 0x30, 0x93, 0xc1, 0x9a, 0xa7, 0x7b, 0x19, 0x4, 0x40, 0x76, 0x1d, 0x53, 0x18, 0x97, 0xd7, 0xac, 0x16}, }, { {0xad, 0xb6, 0x87, 0x78, 0xc5, 0xc6, 0x59, 0xc9, 0xba, 0xfe, 0x90, 0x5f, 0xad, 0x9e, 0xe1, 0x94, 0x4, 0xf5, 0x42, 0xa3, 0x62, 0x4e, 0xe2, 0x16, 0x0, 0x17, 0x16, 0x18, 0x4b, 0xd3, 0x4e, 0x16}, {0x3d, 0x1d, 0x9b, 0x2d, 0xaf, 0x72, 0xdf, 0x72, 0x5a, 0x24, 0x32, 0xa4, 0x36, 0x2a, 0x46, 0x63, 0x37, 0x96, 0xb3, 0x16, 0x79, 0xa0, 0xce, 0x3e, 0x9, 0x23, 0x30, 0xb9, 0xf6, 0xe, 0x3e, 0x12}, {0x9a, 0xe6, 0x2f, 0x19, 0x4c, 0xd9, 0x7e, 0x48, 0x13, 0x15, 0x91, 0x3a, 0xea, 0x2c, 0xae, 0x61, 0x27, 0xde, 0xa4, 0xb9, 0xd3, 0xf6, 0x7b, 0x87, 0xeb, 0xf3, 0x73, 0x10, 0xc6, 0xf, 0xda, 0x78}, }, { {0x94, 0x3a, 0xc, 0x68, 0xf1, 0x80, 0x9f, 0xa2, 0xe6, 0xe7, 0xe9, 0x1a, 0x15, 0x7e, 0xf7, 0x71, 0x73, 0x79, 0x1, 0x48, 0x58, 0xf1, 0x0, 0x11, 0xdd, 0x8d, 0xb3, 0x16, 0xb3, 0xa4, 0x4a, 0x5}, {0x6a, 0xc6, 0x2b, 0xe5, 0x28, 0x5d, 0xf1, 0x5b, 0x8e, 0x1a, 0xf0, 0x70, 0x18, 0xe3, 0x47, 0x2c, 0xdd, 0x8b, 0xc2, 0x6, 0xbc, 0xaf, 0x19, 0x24, 0x3a, 0x17, 0x6b, 0x25, 0xeb, 0xde, 0x25, 0x2d}, {0xb8, 0x7c, 0x26, 0x19, 0x8d, 0x46, 0xc8, 0xdf, 0xaf, 0x4d, 0xe5, 0x66, 0x9c, 0x78, 0x28, 0xb, 0x17, 0xec, 0x6e, 0x66, 0x2a, 0x1d, 0xeb, 0x2a, 0x60, 0xa7, 0x7d, 0xab, 0xa6, 0x10, 0x46, 0x13}, }, }, { { {0x15, 0xf5, 0xd1, 0x77, 0xe7, 0x65, 0x2a, 0xcd, 0xf1, 0x60, 0xaa, 0x8f, 0x87, 0x91, 0x89, 0x54, 0xe5, 0x6, 0xbc, 0xda, 0xbc, 0x3b, 0xb7, 0xb1, 0xfb, 0xc9, 0x7c, 0xa9, 0xcb, 0x78, 0x48, 0x65}, {0xfe, 0xb0, 0xf6, 0x8d, 0xc7, 0x8e, 0x13, 0x51, 0x1b, 0xf5, 0x75, 0xe5, 0x89, 0xda, 0x97, 0x53, 0xb9, 0xf1, 0x7a, 0x71, 0x1d, 0x7a, 0x20, 0x9, 0x50, 0xd6, 0x20, 0x2b, 0xba, 0xfd, 0x2, 0x21}, {0xa1, 0xe6, 0x5c, 0x5, 0x5, 0xe4, 0x9e, 0x96, 0x29, 0xad, 0x51, 0x12, 0x68, 0xa7, 0xbc, 0x36, 0x15, 0xa4, 0x7d, 0xaa, 0x17, 0xf5, 0x1a, 0x3a, 0xba, 0xb2, 0xec, 0x29, 0xdb, 0x25, 0xd7, 0xa}, }, { {0x85, 0x6f, 0x5, 0x9b, 0xc, 0xbc, 0xc7, 0xfe, 0xd7, 0xff, 0xf5, 0xe7, 0x68, 0x52, 0x7d, 0x53, 0xfa, 0xae, 0x12, 0x43, 0x62, 0xc6, 0xaf, 0x77, 0xd9, 0x9f, 0x39, 0x2, 0x53, 0x5f, 0x67, 0x4f}, {0x57, 0x24, 0x4e, 0x83, 0xb1, 0x67, 0x42, 0xdc, 0xc5, 0x1b, 0xce, 0x70, 0xb5, 0x44, 0x75, 0xb6, 0xd7, 0x5e, 0xd1, 0xf7, 0xb, 0x7a, 0xf0, 0x1a, 0x50, 0x36, 0xa0, 0x71, 0xfb, 0xcf, 0xef, 0x4a}, {0x1e, 0x17, 0x15, 0x4, 0x36, 0x36, 0x2d, 0xc3, 0x3b, 0x48, 0x98, 0x89, 0x11, 0xef, 0x2b, 0xcd, 0x10, 0x51, 0x94, 0xd0, 0xad, 0x6e, 0xa, 0x87, 0x61, 0x65, 0xa8, 0xa2, 0x72, 0xbb, 0xcc, 0xb}, }, { {0x96, 0x12, 0xfe, 0x50, 0x4c, 0x5e, 0x6d, 0x18, 0x7e, 0x9f, 0xe8, 0xfe, 0x82, 0x7b, 0x39, 0xe0, 0xb0, 0x31, 0x70, 0x50, 0xc5, 0xf6, 0xc7, 0x3b, 0xc2, 0x37, 0x8f, 0x10, 0x69, 0xfd, 0x78, 0x66}, {0xc8, 0xa9, 0xb1, 0xea, 0x2f, 0x96, 0x5e, 0x18, 0xcd, 0x7d, 0x14, 0x65, 0x35, 0xe6, 0xe7, 0x86, 0xf2, 0x6d, 0x5b, 0xbb, 0x31, 0xe0, 0x92, 0xb0, 0x3e, 0xb7, 0xd6, 0x59, 0xab, 0xf0, 0x24, 0x40}, {0xc2, 0x63, 0x68, 0x63, 0x31, 0xfa, 0x86, 0x15, 0xf2, 0x33, 0x2d, 0x57, 0x48, 0x8c, 0xf6, 0x7, 0xfc, 0xae, 0x9e, 0x78, 0x9f, 0xcc, 0x73, 0x4f, 0x1, 0x47, 0xad, 0x8e, 0x10, 0xe2, 0x42, 0x2d}, }, { {0x93, 0x75, 0x53, 0xf, 0xd, 0x7b, 0x71, 0x21, 0x4c, 0x6, 0x1e, 0x13, 0xb, 0x69, 0x4e, 0x91, 0x9f, 0xe0, 0x2a, 0x75, 0xae, 0x87, 0xb6, 0x1b, 0x6e, 0x3c, 0x42, 0x9b, 0xa7, 0xf3, 0xb, 0x42}, {0x9b, 0xd2, 0xdf, 0x94, 0x15, 0x13, 0xf5, 0x97, 0x6a, 0x4c, 0x3f, 0x31, 0x5d, 0x98, 0x55, 0x61, 0x10, 0x50, 0x45, 0x8, 0x7, 0x3f, 0xa1, 0xeb, 0x22, 0xd3, 0xd2, 0xb8, 0x8, 0x26, 0x6b, 0x67}, {0x47, 0x2b, 0x5b, 0x1c, 0x65, 0xba, 0x38, 0x81, 0x80, 0x1b, 0x1b, 0x31, 0xec, 0xb6, 0x71, 0x86, 0xb0, 0x35, 0x31, 0xbc, 0xb1, 0xc, 0xff, 0x7b, 0xe0, 0xf1, 0xc, 0x9c, 0xfa, 0x2f, 0x5d, 0x74}, }, { {0x6a, 0x4e, 0xd3, 0x21, 0x57, 0xdf, 0x36, 0x60, 0xd0, 0xb3, 0x7b, 0x99, 0x27, 0x88, 0xdb, 0xb1, 0xfa, 0x6a, 0x75, 0xc8, 0xc3, 0x9, 0xc2, 0xd3, 0x39, 0xc8, 0x1d, 0x4c, 0xe5, 0x5b, 0xe1, 0x6}, {0xbd, 0xc8, 0xc9, 0x2b, 0x1e, 0x5a, 0x52, 0xbf, 0x81, 0x9d, 0x47, 0x26, 0x8, 0x26, 0x5b, 0xea, 0xdb, 0x55, 0x1, 0xdf, 0xe, 0xc7, 0x11, 0xd5, 0xd0, 0xf5, 0xc, 0x96, 0xeb, 0x3c, 0xe2, 0x1a}, {0x4a, 0x99, 0x32, 0x19, 0x87, 0x5d, 0x72, 0x5b, 0xb0, 0xda, 0xb1, 0xce, 0xb5, 0x1c, 0x35, 0x32, 0x5, 0xca, 0xb7, 0xda, 0x49, 0x15, 0xc4, 0x7d, 0xf7, 0xc1, 0x8e, 0x27, 0x61, 0xd8, 0xde, 0x58}, }, { {0xa8, 0xc9, 0xc2, 0xb6, 0xa8, 0x5b, 0xfb, 0x2d, 0x8c, 0x59, 0x2c, 0xf5, 0x8e, 0xef, 0xee, 0x48, 0x73, 0x15, 0x2d, 0xf1, 0x7, 0x91, 0x80, 0x33, 0xd8, 0x5b, 0x1d, 0x53, 0x6b, 0x69, 0xba, 0x8}, {0x5c, 0xc5, 0x66, 0xf2, 0x93, 0x37, 0x17, 0xd8, 0x49, 0x4e, 0x45, 0xcc, 0xc5, 0x76, 0xc9, 0xc8, 0xa8, 0xc3, 0x26, 0xbc, 0xf8, 0x82, 0xe3, 0x5c, 0xf9, 0xf6, 0x85, 0x54, 0xe8, 0x9d, 0xf3, 0x2f}, {0x7a, 0xc5, 0xef, 0xc3, 0xee, 0x3e, 0xed, 0x77, 0x11, 0x48, 0xff, 0xd4, 0x17, 0x55, 0xe0, 0x4, 0xcb, 0x71, 0xa6, 0xf1, 0x3f, 0x7a, 0x3d, 0xea, 0x54, 0xfe, 0x7c, 0x94, 0xb4, 0x33, 0x6, 0x12}, }, { {0xa, 0x10, 0x12, 0x49, 0x47, 0x31, 0xbd, 0x82, 0x6, 0xbe, 0x6f, 0x7e, 0x6d, 0x7b, 0x23, 0xde, 0xc6, 0x79, 0xea, 0x11, 0x19, 0x76, 0x1e, 0xe1, 0xde, 0x3b, 0x39, 0xcb, 0xe3, 0x3b, 0x43, 0x7}, {0x42, 0x0, 0x61, 0x91, 0x78, 0x98, 0x94, 0xb, 0xe8, 0xfa, 0xeb, 0xec, 0x3c, 0xb1, 0xe7, 0x4e, 0xc0, 0xa4, 0xf0, 0x94, 0x95, 0x73, 0xbe, 0x70, 0x85, 0x91, 0xd5, 0xb4, 0x99, 0xa, 0xd3, 0x35}, {0xf4, 0x97, 0xe9, 0x5c, 0xc0, 0x44, 0x79, 0xff, 0xa3, 0x51, 0x5c, 0xb0, 0xe4, 0x3d, 0x5d, 0x57, 0x7c, 0x84, 0x76, 0x5a, 0xfd, 0x81, 0x33, 0x58, 0x9f, 0xda, 0xf6, 0x7a, 0xde, 0x3e, 0x87, 0x2d}, }, { {0x81, 0xf9, 0x5d, 0x4e, 0xe1, 0x2, 0x62, 0xaa, 0xf5, 0xe1, 0x15, 0x50, 0x17, 0x59, 0xd, 0xa2, 0x6c, 0x1d, 0xe2, 0xba, 0xd3, 0x75, 0xa2, 0x18, 0x53, 0x2, 0x60, 0x1, 0x8a, 0x61, 0x43, 0x5}, {0x9, 0x34, 0x37, 0x43, 0x64, 0x31, 0x7a, 0x15, 0xd9, 0x81, 0xaa, 0xf4, 0xee, 0xb7, 0xb8, 0xfa, 0x6, 0x48, 0xa6, 0xf5, 0xe6, 0xfe, 0x93, 0xb0, 0xb6, 0xa7, 0x7f, 0x70, 0x54, 0x36, 0x77, 0x2e}, {0xc1, 0x23, 0x4c, 0x97, 0xf4, 0xbd, 0xea, 0xd, 0x93, 0x46, 0xce, 0x9d, 0x25, 0xa, 0x6f, 0xaa, 0x2c, 0xba, 0x9a, 0xa2, 0xb8, 0x2c, 0x20, 0x4, 0xd, 0x96, 0x7, 0x2d, 0x36, 0x43, 0x14, 0x4b}, }, }, { { {0xcb, 0x9c, 0x52, 0x1c, 0xe9, 0x54, 0x7c, 0x96, 0xfb, 0x35, 0xc6, 0x64, 0x92, 0x26, 0xf6, 0x30, 0x65, 0x19, 0x12, 0x78, 0xf4, 0xaf, 0x47, 0x27, 0x5c, 0x6f, 0xf6, 0xea, 0x18, 0x84, 0x3, 0x17}, {0x7a, 0x1f, 0x6e, 0xb6, 0xc7, 0xb7, 0xc4, 0xcc, 0x7e, 0x2f, 0xc, 0xf5, 0x25, 0x7e, 0x15, 0x44, 0x1c, 0xaf, 0x3e, 0x71, 0xfc, 0x6d, 0xf0, 0x3e, 0xf7, 0x63, 0xda, 0x52, 0x67, 0x44, 0x2f, 0x58}, {0xe4, 0x4c, 0x32, 0x20, 0xd3, 0x7b, 0x31, 0xc6, 0xc4, 0x8b, 0x48, 0xa4, 0xe8, 0x42, 0x10, 0xa8, 0x64, 0x13, 0x5a, 0x4e, 0x8b, 0xf1, 0x1e, 0xb2, 0xc9, 0x8d, 0xa2, 0xcd, 0x4b, 0x1c, 0x2a, 0xc}, }, { {0x45, 0x69, 0xbd, 0x69, 0x48, 0x81, 0xc4, 0xed, 0x22, 0x8d, 0x1c, 0xbe, 0x7d, 0x90, 0x6d, 0xd, 0xab, 0xc5, 0x5c, 0xd5, 0x12, 0xd2, 0x3b, 0xc6, 0x83, 0xdc, 0x14, 0xa3, 0x30, 0x9b, 0x6a, 0x5a}, {0x47, 0x4, 0x1f, 0x6f, 0xd0, 0xc7, 0x4d, 0xd2, 0x59, 0xc0, 0x87, 0xdb, 0x3e, 0x9e, 0x26, 0xb2, 0x8f, 0xd2, 0xb2, 0xfb, 0x72, 0x2, 0x5b, 0xd1, 0x77, 0x48, 0xf6, 0xc6, 0xd1, 0x8b, 0x55, 0x7c}, {0x3d, 0x46, 0x96, 0xd3, 0x24, 0x15, 0xec, 0xd0, 0xf0, 0x24, 0x5a, 0xc3, 0x8a, 0x62, 0xbb, 0x12, 0xa4, 0x5f, 0xbc, 0x1c, 0x79, 0x3a, 0xc, 0xa5, 0xc3, 0xaf, 0xfb, 0xa, 0xca, 0xa5, 0x4, 0x4}, }, { {0xd1, 0x6f, 0x41, 0x2a, 0x1b, 0x9e, 0xbc, 0x62, 0x8b, 0x59, 0x50, 0xe3, 0x28, 0xf7, 0xc6, 0xb5, 0x67, 0x69, 0x5d, 0x3d, 0xd8, 0x3f, 0x34, 0x4, 0x98, 0xee, 0xf8, 0xe7, 0x16, 0x75, 0x52, 0x39}, {0xd6, 0x43, 0xa7, 0xa, 0x7, 0x40, 0x1f, 0x8c, 0xe8, 0x5e, 0x26, 0x5b, 0xcb, 0xd0, 0xba, 0xcc, 0xde, 0xd2, 0x8f, 0x66, 0x6b, 0x4, 0x4b, 0x57, 0x33, 0x96, 0xdd, 0xca, 0xfd, 0x5b, 0x39, 0x46}, {0x9c, 0x9a, 0x5d, 0x1a, 0x2d, 0xdb, 0x7f, 0x11, 0x2a, 0x5c, 0x0, 0xd1, 0xbc, 0x45, 0x77, 0x9c, 0xea, 0x6f, 0xd5, 0x54, 0xf1, 0xbe, 0xd4, 0xef, 0x16, 0xd0, 0x22, 0xe8, 0x29, 0x9a, 0x57, 0x76}, }, { {0xf2, 0x34, 0xb4, 0x52, 0x13, 0xb5, 0x3c, 0x33, 0xe1, 0x80, 0xde, 0x93, 0x49, 0x28, 0x32, 0xd8, 0xce, 0x35, 0xd, 0x75, 0x87, 0x28, 0x51, 0xb5, 0xc1, 0x77, 0x27, 0x2a, 0xbb, 0x14, 0xc5, 0x2}, {0x17, 0x2a, 0xc0, 0x49, 0x7e, 0x8e, 0xb6, 0x45, 0x7f, 0xa3, 0xa9, 0xbc, 0xa2, 0x51, 0xcd, 0x23, 0x1b, 0x4c, 0x22, 0xec, 0x11, 0x5f, 0xd6, 0x3e, 0xb1, 0xbd, 0x5, 0x9e, 0xdc, 0x84, 0xa3, 0x43}, {0x45, 0xb6, 0xf1, 0x8b, 0xda, 0xd5, 0x4b, 0x68, 0x53, 0x4b, 0xb5, 0xf6, 0x7e, 0xd3, 0x8b, 0xfb, 0x53, 0xd2, 0xb0, 0xa9, 0xd7, 0x16, 0x39, 0x31, 0x59, 0x80, 0x54, 0x61, 0x9, 0x92, 0x60, 0x11}, }, { {0xcd, 0x4d, 0x9b, 0x36, 0x16, 0x56, 0x38, 0x7a, 0x63, 0x35, 0x5c, 0x65, 0xa7, 0x2c, 0xc0, 0x75, 0x21, 0x80, 0xf1, 0xd4, 0xf9, 0x1b, 0xc2, 0x7d, 0x42, 0xe0, 0xe6, 0x91, 0x74, 0x7d, 0x63, 0x2f}, {0xaa, 0xcf, 0xda, 0x29, 0x69, 0x16, 0x4d, 0xb4, 0x8f, 0x59, 0x13, 0x84, 0x4c, 0x9f, 0x52, 0xda, 0x59, 0x55, 0x3d, 0x45, 0xca, 0x63, 0xef, 0xe9, 0xb, 0x8e, 0x69, 0xc5, 0x5b, 0x12, 0x1e, 0x35}, {0xbe, 0x7b, 0xf6, 0x1a, 0x46, 0x9b, 0xb4, 0xd4, 0x61, 0x89, 0xab, 0xc8, 0x7a, 0x3, 0x3, 0xd6, 0xfb, 0x99, 0xa6, 0xf9, 0x9f, 0xe1, 0xde, 0x71, 0x9a, 0x2a, 0xce, 0xe7, 0x6, 0x2d, 0x18, 0x7f}, }, { {0x22, 0x75, 0x21, 0x8e, 0x72, 0x4b, 0x45, 0x9, 0xd8, 0xb8, 0x84, 0xd4, 0xf4, 0xe8, 0x58, 0xaa, 0x3c, 0x90, 0x46, 0x7f, 0x4d, 0x25, 0x58, 0xd3, 0x17, 0x52, 0x1c, 0x24, 0x43, 0xc0, 0xac, 0x44}, {0xec, 0x68, 0x1, 0xab, 0x64, 0x8e, 0x7c, 0x7a, 0x43, 0xc5, 0xed, 0x15, 0x55, 0x4a, 0x5a, 0xcb, 0xda, 0xe, 0xcd, 0x47, 0xd3, 0x19, 0x55, 0x9, 0xb0, 0x93, 0x3e, 0x34, 0x8c, 0xac, 0xd4, 0x67}, {0x77, 0x57, 0x7a, 0x4f, 0xbb, 0x6b, 0x7d, 0x1c, 0xe1, 0x13, 0x83, 0x91, 0xd4, 0xfe, 0x35, 0x8b, 0x84, 0x46, 0x6b, 0xc9, 0xc6, 0xa1, 0xdc, 0x4a, 0xbd, 0x71, 0xad, 0x12, 0x83, 0x1c, 0x6d, 0x55}, }, { {0x21, 0xe8, 0x1b, 0xb1, 0x56, 0x67, 0xf0, 0x81, 0xdd, 0xf3, 0xa3, 0x10, 0x23, 0xf8, 0xaf, 0xf, 0x5d, 0x46, 0x99, 0x6a, 0x55, 0xd0, 0xb2, 0xf8, 0x5, 0x7f, 0x8c, 0xcc, 0x38, 0xbe, 0x7a, 0x9}, {0x82, 0x39, 0x8d, 0xc, 0xe3, 0x40, 0xef, 0x17, 0x34, 0xfa, 0xa3, 0x15, 0x3e, 0x7, 0xf7, 0x31, 0x6e, 0x64, 0x73, 0x7, 0xcb, 0xf3, 0x21, 0x4f, 0xff, 0x4e, 0x82, 0x1d, 0x6d, 0x6c, 0x6c, 0x74}, {0xa4, 0x2d, 0xa5, 0x7e, 0x87, 0xc9, 0x49, 0xc, 0x43, 0x1d, 0xdc, 0x9b, 0x55, 0x69, 0x43, 0x4c, 0xd2, 0xeb, 0xcc, 0xf7, 0x9, 0x38, 0x2c, 0x2, 0xbd, 0x84, 0xee, 0x4b, 0xa3, 0x14, 0x7e, 0x57}, }, { {0x2b, 0xd7, 0x4d, 0xbd, 0xbe, 0xce, 0xfe, 0x94, 0x11, 0x22, 0xf, 0x6, 0xda, 0x4f, 0x6a, 0xf4, 0xff, 0xd1, 0xc8, 0xc0, 0x77, 0x59, 0x4a, 0x12, 0x95, 0x92, 0x0, 0xfb, 0xb8, 0x4, 0x53, 0x70}, {0xa, 0x3b, 0xa7, 0x61, 0xac, 0x68, 0xe2, 0xf0, 0xf5, 0xa5, 0x91, 0x37, 0x10, 0xfa, 0xfa, 0xf2, 0xe9, 0x0, 0x6d, 0x6b, 0x82, 0x3e, 0xe1, 0xc1, 0x42, 0x8f, 0xd7, 0x6f, 0xe9, 0x7e, 0xfa, 0x60}, {0xc6, 0x6e, 0x29, 0x4d, 0x35, 0x1d, 0x3d, 0xb6, 0xd8, 0x31, 0xad, 0x5f, 0x3e, 0x5, 0xc3, 0xf3, 0xec, 0x42, 0xbd, 0xb4, 0x8c, 0x95, 0xb, 0x67, 0xfd, 0x53, 0x63, 0xa1, 0xc, 0x8e, 0x39, 0x21}, }, }, { { {0x1, 0x56, 0xb7, 0xb4, 0xf9, 0xaa, 0x98, 0x27, 0x72, 0xad, 0x8d, 0x5c, 0x13, 0x72, 0xac, 0x5e, 0x23, 0xa0, 0xb7, 0x61, 0x61, 0xaa, 0xce, 0xd2, 0x4e, 0x7d, 0x8f, 0xe9, 0x84, 0xb2, 0xbf, 0x1b}, {0xf3, 0x33, 0x2b, 0x38, 0x8a, 0x5, 0xf5, 0x89, 0xb4, 0xc0, 0x48, 0xad, 0xb, 0xba, 0xe2, 0x5a, 0x6e, 0xb3, 0x3d, 0xa5, 0x3, 0xb5, 0x93, 0x8f, 0xe6, 0x32, 0xa2, 0x95, 0x9d, 0xed, 0xa3, 0x5a}, {0x61, 0x65, 0xd9, 0xc7, 0xe9, 0x77, 0x67, 0x65, 0x36, 0x80, 0xc7, 0x72, 0x54, 0x12, 0x2b, 0xcb, 0xee, 0x6e, 0x50, 0xd9, 0x99, 0x32, 0x5, 0x65, 0xcc, 0x57, 0x89, 0x5e, 0x4e, 0xe1, 0x7, 0x4a}, }, { {0x9b, 0xa4, 0x77, 0xc4, 0xcd, 0x58, 0xb, 0x24, 0x17, 0xf0, 0x47, 0x64, 0xde, 0xda, 0x38, 0xfd, 0xad, 0x6a, 0xc8, 0xa7, 0x32, 0x8d, 0x92, 0x19, 0x81, 0xa0, 0xaf, 0x84, 0xed, 0x7a, 0xaf, 0x50}, {0x99, 0xf9, 0xd, 0x98, 0xcb, 0x12, 0xe4, 0x4e, 0x71, 0xc7, 0x6e, 0x3c, 0x6f, 0xd7, 0x15, 0xa3, 0xfd, 0x77, 0x5c, 0x92, 0xde, 0xed, 0xa5, 0xbb, 0x2, 0x34, 0x31, 0x1d, 0x39, 0xac, 0xb, 0x3f}, {0xe5, 0x5b, 0xf6, 0x15, 0x1, 0xde, 0x4f, 0x6e, 0xb2, 0x9, 0x61, 0x21, 0x21, 0x26, 0x98, 0x29, 0xd9, 0xd6, 0xad, 0xb, 0x81, 0x5, 0x2, 0x78, 0x6, 0xd0, 0xeb, 0xba, 0x16, 0xa3, 0x21, 0x19}, }, { {0x8b, 0xc1, 0xf3, 0xd9, 0x9a, 0xad, 0x5a, 0xd7, 0x9c, 0xc1, 0xb1, 0x60, 0xef, 0xe, 0x6a, 0x56, 0xd9, 0xe, 0x5c, 0x25, 0xac, 0xb, 0x9a, 0x3e, 0xf5, 0xc7, 0x62, 0xa0, 0xec, 0x9d, 0x4, 0x7b}, {0xfc, 0x70, 0xb8, 0xdf, 0x7e, 0x2f, 0x42, 0x89, 0xbd, 0xb3, 0x76, 0x4f, 0xeb, 0x6b, 0x29, 0x2c, 0xf7, 0x4d, 0xc2, 0x36, 0xd4, 0xf1, 0x38, 0x7, 0xb0, 0xae, 0x73, 0xe2, 0x41, 0xdf, 0x58, 0x64}, {0x83, 0x44, 0x44, 0x35, 0x7a, 0xe3, 0xcb, 0xdc, 0x93, 0xbe, 0xed, 0xf, 0x33, 0x79, 0x88, 0x75, 0x87, 0xdd, 0xc5, 0x12, 0xc3, 0x4, 0x60, 0x78, 0x64, 0xe, 0x95, 0xc2, 0xcb, 0xdc, 0x93, 0x60}, }, { {0x4b, 0x3, 0x84, 0x60, 0xbe, 0xee, 0xde, 0x6b, 0x54, 0xb8, 0xf, 0x78, 0xb6, 0xc2, 0x99, 0x31, 0x95, 0x6, 0x2d, 0xb6, 0xab, 0x76, 0x33, 0x97, 0x90, 0x7d, 0x64, 0x8b, 0xc9, 0x80, 0x31, 0x6e}, {0x6d, 0x70, 0xe0, 0x85, 0x85, 0x9a, 0xf3, 0x1f, 0x33, 0x39, 0xe7, 0xb3, 0xd8, 0xa5, 0xd0, 0x36, 0x3b, 0x45, 0x8f, 0x71, 0xe1, 0xf2, 0xb9, 0x43, 0x7c, 0xa9, 0x27, 0x48, 0x8, 0xea, 0xd1, 0x57}, {0x71, 0xb0, 0x28, 0xa1, 0xe7, 0xb6, 0x7a, 0xee, 0xaa, 0x8b, 0xa8, 0x93, 0x6d, 0x59, 0xc1, 0xa4, 0x30, 0x61, 0x21, 0xb2, 0x82, 0xde, 0xb4, 0xf7, 0x18, 0xbd, 0x97, 0xdd, 0x9d, 0x99, 0x3e, 0x36}, }, { {0xc6, 0xae, 0x4b, 0xe2, 0xdc, 0x48, 0x18, 0x2f, 0x60, 0xaf, 0xbc, 0xba, 0x55, 0x72, 0x9b, 0x76, 0x31, 0xe9, 0xef, 0x3c, 0x6e, 0x3c, 0xcb, 0x90, 0x55, 0xb3, 0xf9, 0xc6, 0x9b, 0x97, 0x1f, 0x23}, {0xc4, 0x1f, 0xee, 0x35, 0xc1, 0x43, 0xa8, 0x96, 0xcf, 0xc8, 0xe4, 0x8, 0x55, 0xb3, 0x6e, 0x97, 0x30, 0xd3, 0x8c, 0xb5, 0x1, 0x68, 0x2f, 0xb4, 0x2b, 0x5, 0x3a, 0x69, 0x78, 0x9b, 0xee, 0x48}, {0xc6, 0xf3, 0x2a, 0xcc, 0x4b, 0xde, 0x31, 0x5c, 0x1f, 0x8d, 0x20, 0xfe, 0x30, 0xb0, 0x4b, 0xb0, 0x66, 0xb4, 0x4f, 0xc1, 0x9, 0x70, 0x8d, 0xb7, 0x13, 0x24, 0x79, 0x8, 0x9b, 0xfa, 0x9b, 0x7}, }, { {0x45, 0x42, 0xd5, 0xa2, 0x80, 0xed, 0xc9, 0xf3, 0x52, 0x39, 0xf6, 0x77, 0x78, 0x8b, 0xa0, 0xa, 0x75, 0x54, 0x8, 0xd1, 0x63, 0xac, 0x6d, 0xd7, 0x6b, 0x63, 0x70, 0x94, 0x15, 0xfb, 0xf4, 0x1e}, {0xf4, 0xd, 0x30, 0xda, 0x51, 0x3a, 0x90, 0xe3, 0xb0, 0x5a, 0xa9, 0x3d, 0x23, 0x64, 0x39, 0x84, 0x80, 0x64, 0x35, 0xb, 0x2d, 0xf1, 0x3c, 0xed, 0x94, 0x71, 0x81, 0x84, 0xf6, 0x77, 0x8c, 0x3}, {0xec, 0x7b, 0x16, 0x5b, 0xe6, 0x5e, 0x4e, 0x85, 0xc2, 0xcd, 0xd0, 0x96, 0x42, 0xa, 0x59, 0x59, 0x99, 0x21, 0x10, 0x98, 0x34, 0xdf, 0xb2, 0x72, 0x56, 0xff, 0xb, 0x4a, 0x2a, 0xe9, 0x5e, 0x57}, }, { {0x1, 0xd8, 0xa4, 0xa, 0x45, 0xbc, 0x46, 0x5d, 0xd8, 0xb9, 0x33, 0xa5, 0x27, 0x12, 0xaf, 0xc3, 0xc2, 0x6, 0x89, 0x2b, 0x26, 0x3b, 0x9e, 0x38, 0x1b, 0x58, 0x2f, 0x38, 0x7e, 0x1e, 0xa, 0x20}, {0xcf, 0x2f, 0x18, 0x8a, 0x90, 0x80, 0xc0, 0xd4, 0xbd, 0x9d, 0x48, 0x99, 0xc2, 0x70, 0xe1, 0x30, 0xde, 0x33, 0xf7, 0x52, 0x57, 0xbd, 0xba, 0x5, 0x0, 0xfd, 0xd3, 0x2c, 0x11, 0xe7, 0xd4, 0x43}, {0xc5, 0x3a, 0xf9, 0xea, 0x67, 0xb9, 0x8d, 0x51, 0xc0, 0x52, 0x66, 0x5, 0x9b, 0x98, 0xbc, 0x71, 0xf5, 0x97, 0x71, 0x56, 0xd9, 0x85, 0x2b, 0xfe, 0x38, 0x4e, 0x1e, 0x65, 0x52, 0xca, 0xe, 0x5}, }, { {0xea, 0x68, 0xe6, 0x60, 0x76, 0x39, 0xac, 0x97, 0x97, 0xb4, 0x3a, 0x15, 0xfe, 0xbb, 0x19, 0x9b, 0x9f, 0xa7, 0xec, 0x34, 0xb5, 0x79, 0xb1, 0x4c, 0x57, 0xae, 0x31, 0xa1, 0x9f, 0xc0, 0x51, 0x61}, {0x9c, 0xc, 0x3f, 0x45, 0xde, 0x1a, 0x43, 0xc3, 0x9b, 0x3b, 0x70, 0xff, 0x5e, 0x4, 0xf5, 0xe9, 0x3d, 0x7b, 0x84, 0xed, 0xc9, 0x7a, 0xd9, 0xfc, 0xc6, 0xf4, 0x58, 0x1c, 0xc2, 0xe6, 0xe, 0x4b}, {0x96, 0x5d, 0xf0, 0xfd, 0xd, 0x5c, 0xf5, 0x3a, 0x7a, 0xee, 0xb4, 0x2a, 0xe0, 0x2e, 0x26, 0xdd, 0x9, 0x17, 0x17, 0x12, 0x87, 0xbb, 0xb2, 0x11, 0xb, 0x3, 0xf, 0x80, 0xfa, 0x24, 0xef, 0x1f}, }, }, { { {0x86, 0x6b, 0x97, 0x30, 0xf5, 0xaf, 0xd2, 0x22, 0x4, 0x46, 0xd2, 0xc2, 0x6, 0xb8, 0x90, 0x8d, 0xe5, 0xba, 0xe5, 0x4d, 0x6c, 0x89, 0xa1, 0xdc, 0x17, 0xc, 0x34, 0xc8, 0xe6, 0x5f, 0x0, 0x28}, {0x96, 0x31, 0xa7, 0x1a, 0xfb, 0x53, 0xd6, 0x37, 0x18, 0x64, 0xd7, 0x3f, 0x30, 0x95, 0x94, 0xf, 0xb2, 0x17, 0x3a, 0xfb, 0x9, 0xb, 0x20, 0xad, 0x3e, 0x61, 0xc8, 0x2f, 0x29, 0x49, 0x4d, 0x54}, {0x88, 0x86, 0x52, 0x34, 0x9f, 0xba, 0xef, 0x6a, 0xa1, 0x7d, 0x10, 0x25, 0x94, 0xff, 0x1b, 0x5c, 0x36, 0x4b, 0xd9, 0x66, 0xcd, 0xbb, 0x5b, 0xf7, 0xfa, 0x6d, 0x31, 0xf, 0x93, 0x72, 0xe4, 0x72}, }, { {0x27, 0x76, 0x2a, 0xd3, 0x35, 0xf6, 0xf3, 0x7, 0xf0, 0x66, 0x65, 0x5f, 0x86, 0x4d, 0xaa, 0x7a, 0x50, 0x44, 0xd0, 0x28, 0x97, 0xe7, 0x85, 0x3c, 0x38, 0x64, 0xe0, 0xf, 0x0, 0x7f, 0xee, 0x1f}, {0x4f, 0x8, 0x81, 0x97, 0x8c, 0x20, 0x95, 0x26, 0xe1, 0xe, 0x45, 0x23, 0xb, 0x2a, 0x50, 0xb1, 0x2, 0xde, 0xef, 0x3, 0xa6, 0xae, 0x9d, 0xfd, 0x4c, 0xa3, 0x33, 0x27, 0x8c, 0x2e, 0x9d, 0x5a}, {0xe5, 0xf7, 0xdb, 0x3, 0xda, 0x5, 0x53, 0x76, 0xbd, 0xcd, 0x34, 0x14, 0x49, 0xf2, 0xda, 0xa4, 0xec, 0x88, 0x4a, 0xd2, 0xcd, 0xd5, 0x4a, 0x7b, 0x43, 0x5, 0x4, 0xee, 0x51, 0x40, 0xf9, 0x0}, }, { {0x53, 0x97, 0xaf, 0x7, 0xbb, 0x93, 0xef, 0xd7, 0xa7, 0x66, 0xb7, 0x3d, 0xcf, 0xd0, 0x3e, 0x58, 0xc5, 0x1e, 0xb, 0x6e, 0xbf, 0x98, 0x69, 0xce, 0x52, 0x4, 0xd4, 0x5d, 0xd2, 0xff, 0xb7, 0x47}, {0xb2, 0x30, 0xd3, 0xc3, 0x23, 0x6b, 0x35, 0x8d, 0x6, 0x1b, 0x47, 0xb0, 0x9b, 0x8b, 0x1c, 0xf2, 0x3c, 0xb8, 0x42, 0x6e, 0x6c, 0x31, 0x6c, 0xb3, 0xd, 0xb1, 0xea, 0x8b, 0x7e, 0x9c, 0xd7, 0x7}, {0x12, 0xdd, 0x8, 0xbc, 0x9c, 0xfb, 0xfb, 0x87, 0x9b, 0xc2, 0xee, 0xe1, 0x3a, 0x6b, 0x6, 0x8a, 0xbf, 0xc1, 0x1f, 0xdb, 0x2b, 0x24, 0x57, 0xd, 0xb6, 0x4b, 0xa6, 0x5e, 0xa3, 0x20, 0x35, 0x1c}, }, { {0x59, 0xc0, 0x6b, 0x21, 0x40, 0x6f, 0xa8, 0xcd, 0x7e, 0xd8, 0xbc, 0x12, 0x1d, 0x23, 0xbb, 0x1f, 0x90, 0x9, 0xc7, 0x17, 0x9e, 0x6a, 0x95, 0xb4, 0x55, 0x2e, 0xd1, 0x66, 0x3b, 0xc, 0x75, 0x38}, {0x4a, 0xa3, 0xcb, 0xbc, 0xa6, 0x53, 0xd2, 0x80, 0x9b, 0x21, 0x38, 0x38, 0xa1, 0xc3, 0x61, 0x3e, 0x96, 0xe3, 0x82, 0x98, 0x1, 0xb6, 0xc3, 0x90, 0x6f, 0xe6, 0xe, 0x5d, 0x77, 0x5, 0x3d, 0x1c}, {0x1a, 0xe5, 0x22, 0x94, 0x40, 0xf1, 0x2e, 0x69, 0x71, 0xf6, 0x5d, 0x2b, 0x3c, 0xc7, 0xc0, 0xcb, 0x29, 0xe0, 0x4c, 0x74, 0xe7, 0x4f, 0x1, 0x21, 0x7c, 0x48, 0x30, 0xd3, 0xc7, 0xe2, 0x21, 0x6}, }, { {0xf3, 0xf0, 0xdb, 0xb0, 0x96, 0x17, 0xae, 0xb7, 0x96, 0xe1, 0x7c, 0xe1, 0xb9, 0xaf, 0xdf, 0x54, 0xb4, 0xa3, 0xaa, 0xe9, 0x71, 0x30, 0x92, 0x25, 0x9d, 0x2e, 0x0, 0xa1, 0x9c, 0x58, 0x8e, 0x5d}, {0x8d, 0x83, 0x59, 0x82, 0xcc, 0x60, 0x98, 0xaf, 0xdc, 0x9a, 0x9f, 0xc6, 0xc1, 0x48, 0xea, 0x90, 0x30, 0x1e, 0x58, 0x65, 0x37, 0x48, 0x26, 0x65, 0xbc, 0xa5, 0xd3, 0x7b, 0x9, 0xd6, 0x7, 0x0}, {0x4b, 0xa9, 0x42, 0x8, 0x95, 0x1d, 0xbf, 0xc0, 0x3e, 0x2e, 0x8f, 0x58, 0x63, 0xc3, 0xd3, 0xb2, 0xef, 0xe2, 0x51, 0xbb, 0x38, 0x14, 0x96, 0xa, 0x86, 0xbf, 0x1c, 0x3c, 0x78, 0xd7, 0x83, 0x15}, }, { {0xc7, 0x28, 0x9d, 0xcc, 0x4, 0x47, 0x3, 0x90, 0x8f, 0xc5, 0x2c, 0xf7, 0x9e, 0x67, 0x1b, 0x1d, 0x26, 0x87, 0x5b, 0xbe, 0x5f, 0x2b, 0xe1, 0x16, 0xa, 0x58, 0xc5, 0x83, 0x4e, 0x6, 0x58, 0x49}, {0xe1, 0x7a, 0xa2, 0x5d, 0xef, 0xa2, 0xee, 0xec, 0x74, 0x1, 0x67, 0x55, 0x14, 0x3a, 0x7c, 0x59, 0x7a, 0x16, 0x9, 0x66, 0x12, 0x2a, 0xa6, 0xc9, 0x70, 0x8f, 0xed, 0x81, 0x2e, 0x5f, 0x2a, 0x25}, {0xd, 0xe8, 0x66, 0x50, 0x26, 0x94, 0x28, 0xd, 0x6b, 0x8c, 0x7c, 0x30, 0x85, 0xf7, 0xc3, 0xfc, 0xfd, 0x12, 0x11, 0xc, 0x78, 0xda, 0x53, 0x1b, 0x88, 0xb3, 0x43, 0xd8, 0xb, 0x17, 0x9c, 0x7}, }, { {0x56, 0xd0, 0xd5, 0xc0, 0x50, 0xcd, 0xd6, 0xcd, 0x3b, 0x57, 0x3, 0xbb, 0x6d, 0x68, 0xf7, 0x9a, 0x48, 0xef, 0xc3, 0xf3, 0x3f, 0x72, 0xa6, 0x3c, 0xcc, 0x8a, 0x7b, 0x31, 0xd7, 0xc0, 0x68, 0x67}, {0xff, 0x6f, 0xfa, 0x64, 0xe4, 0xec, 0x6, 0x5, 0x23, 0xe5, 0x5, 0x62, 0x1e, 0x43, 0xe3, 0xbe, 0x42, 0xea, 0xb8, 0x51, 0x24, 0x42, 0x79, 0x35, 0x0, 0xfb, 0xc9, 0x4a, 0xe3, 0x5, 0xec, 0x6d}, {0xb3, 0xc1, 0x55, 0xf1, 0xe5, 0x25, 0xb6, 0x94, 0x91, 0x7b, 0x7b, 0x99, 0xa7, 0xf3, 0x7b, 0x41, 0x0, 0x26, 0x6b, 0x6d, 0xdc, 0xbd, 0x2c, 0xc2, 0xf4, 0x52, 0xcd, 0xdd, 0x14, 0x5e, 0x44, 0x51}, }, { {0x55, 0xa4, 0xbe, 0x2b, 0xab, 0x47, 0x31, 0x89, 0x29, 0x91, 0x7, 0x92, 0x4f, 0xa2, 0x53, 0x8c, 0xa7, 0xf7, 0x30, 0xbe, 0x48, 0xf9, 0x49, 0x4b, 0x3d, 0xd4, 0x4f, 0x6e, 0x8, 0x90, 0xe9, 0x12}, {0x51, 0x49, 0x14, 0x3b, 0x4b, 0x2b, 0x50, 0x57, 0xb3, 0xbc, 0x4b, 0x44, 0x6b, 0xff, 0x67, 0x8e, 0xdb, 0x85, 0x63, 0x16, 0x27, 0x69, 0xbd, 0xb8, 0xc8, 0x95, 0x92, 0xe3, 0x31, 0x6f, 0x18, 0x13}, {0x2e, 0xbb, 0xdf, 0x7f, 0xb3, 0x96, 0xc, 0xf1, 0xf9, 0xea, 0x1c, 0x12, 0x5e, 0x93, 0x9a, 0x9f, 0x3f, 0x98, 0x5b, 0x3a, 0xc4, 0x36, 0x11, 0xdf, 0xaf, 0x99, 0x3e, 0x5d, 0xf0, 0xe3, 0xb2, 0x77}, }, }, { { {0xa4, 0xb0, 0xdd, 0x12, 0x9c, 0x63, 0x98, 0xd5, 0x6b, 0x86, 0x24, 0xc0, 0x30, 0x9f, 0xd1, 0xa5, 0x60, 0xe4, 0xfc, 0x58, 0x3, 0x2f, 0x7c, 0xd1, 0x8a, 0x5e, 0x9, 0x2e, 0x15, 0x95, 0xa1, 0x7}, {0xde, 0xc4, 0x2e, 0x9c, 0xc5, 0xa9, 0x6f, 0x29, 0xcb, 0xf3, 0x84, 0x4f, 0xbf, 0x61, 0x8b, 0xbc, 0x8, 0xf9, 0xa8, 0x17, 0xd9, 0x6, 0x77, 0x1c, 0x5d, 0x25, 0xd3, 0x7a, 0xfc, 0x95, 0xb7, 0x63}, {0xc8, 0x5f, 0x9e, 0x38, 0x2, 0x8f, 0x36, 0xa8, 0x3b, 0xe4, 0x8d, 0xcf, 0x2, 0x3b, 0x43, 0x90, 0x43, 0x26, 0x41, 0xc5, 0x5d, 0xfd, 0xa1, 0xaf, 0x37, 0x1, 0x2f, 0x3, 0x3d, 0xe8, 0x8f, 0x3e}, }, { {0x3c, 0xd1, 0xef, 0xe8, 0x8d, 0x4c, 0x70, 0x8, 0x31, 0x37, 0xe0, 0x33, 0x8e, 0x1a, 0xc5, 0xdf, 0xe3, 0xcd, 0x60, 0x12, 0xa5, 0x5d, 0x9d, 0xa5, 0x86, 0x8c, 0x25, 0xa6, 0x99, 0x8, 0xd6, 0x22}, {0x94, 0xa2, 0x70, 0x5, 0xb9, 0x15, 0x8b, 0x2f, 0x49, 0x45, 0x8, 0x67, 0x70, 0x42, 0xf2, 0x94, 0x84, 0xfd, 0xbb, 0x61, 0xe1, 0x5a, 0x1c, 0xde, 0x7, 0x40, 0xac, 0x7f, 0x79, 0x3b, 0xba, 0x75}, {0x96, 0xd1, 0xcd, 0x70, 0xc0, 0xdb, 0x39, 0x62, 0x9a, 0x8a, 0x7d, 0x6c, 0x8b, 0x8a, 0xfe, 0x60, 0x60, 0x12, 0x40, 0xeb, 0xbc, 0x47, 0x88, 0xb3, 0x5e, 0x9e, 0x77, 0x87, 0x7b, 0xd0, 0x4, 0x9}, }, { {0xb9, 0x40, 0xf9, 0x48, 0x66, 0x2d, 0x32, 0xf4, 0x39, 0xc, 0x2d, 0xbd, 0xc, 0x2f, 0x95, 0x6, 0x31, 0xf9, 0x81, 0xa0, 0xad, 0x97, 0x76, 0x16, 0x6c, 0x2a, 0xf7, 0xba, 0xce, 0xaa, 0x40, 0x62}, {0x9c, 0x91, 0xba, 0xdd, 0xd4, 0x1f, 0xce, 0xb4, 0xaa, 0x8d, 0x4c, 0xc7, 0x3e, 0xdb, 0x31, 0xcf, 0x51, 0xcc, 0x86, 0xad, 0x63, 0xcc, 0x63, 0x2c, 0x7, 0xde, 0x1d, 0xbc, 0x3f, 0x14, 0xe2, 0x43}, {0xa0, 0x95, 0xa2, 0x5b, 0x9c, 0x74, 0x34, 0xf8, 0x5a, 0xd2, 0x37, 0xca, 0x5b, 0x7c, 0x94, 0xd6, 0x6a, 0x31, 0xc9, 0xe7, 0xa7, 0x3b, 0xf1, 0x66, 0xac, 0xc, 0xb4, 0x8d, 0x23, 0xaf, 0xbd, 0x56}, }, { {0xb2, 0x3b, 0x9d, 0xc1, 0x6c, 0xd3, 0x10, 0x13, 0xb9, 0x86, 0x23, 0x62, 0xb7, 0x6b, 0x2a, 0x6, 0x5c, 0x4f, 0xa1, 0xd7, 0x91, 0x85, 0x9b, 0x7c, 0x54, 0x57, 0x1e, 0x7e, 0x50, 0x31, 0xaa, 0x3}, {0xeb, 0x33, 0x35, 0xf5, 0xe3, 0xb9, 0x2a, 0x36, 0x40, 0x3d, 0xb9, 0x6e, 0xd5, 0x68, 0x85, 0x33, 0x72, 0x55, 0x5a, 0x1d, 0x52, 0x14, 0xe, 0x9e, 0x18, 0x13, 0x74, 0x83, 0x6d, 0xa8, 0x24, 0x1d}, {0x1f, 0xce, 0xd4, 0xff, 0x48, 0x76, 0xec, 0xf4, 0x1c, 0x8c, 0xac, 0x54, 0xf0, 0xea, 0x45, 0xe0, 0x7c, 0x35, 0x9, 0x1d, 0x82, 0x25, 0xd2, 0x88, 0x59, 0x48, 0xeb, 0x9a, 0xdc, 0x61, 0xb2, 0x43}, }, { {0x64, 0x13, 0x95, 0x6c, 0x8b, 0x3d, 0x51, 0x19, 0x7b, 0xf4, 0xb, 0x0, 0x26, 0x71, 0xfe, 0x94, 0x67, 0x95, 0x4f, 0xd5, 0xdd, 0x10, 0x8d, 0x2, 0x64, 0x9, 0x94, 0x42, 0xe2, 0xd5, 0xb4, 0x2}, {0xbb, 0x79, 0xbb, 0x88, 0x19, 0x1e, 0x5b, 0xe5, 0x9d, 0x35, 0x7a, 0xc1, 0x7d, 0xd0, 0x9e, 0xa0, 0x33, 0xea, 0x3d, 0x60, 0xe2, 0x2e, 0x2c, 0xb0, 0xc2, 0x6b, 0x27, 0x5b, 0xcf, 0x55, 0x60, 0x32}, {0xf2, 0x8d, 0xd1, 0x28, 0xcb, 0x55, 0xa1, 0xb4, 0x8, 0xe5, 0x6c, 0x18, 0x46, 0x46, 0xcc, 0xea, 0x89, 0x43, 0x82, 0x6c, 0x93, 0xf4, 0x9c, 0xc4, 0x10, 0x34, 0x5d, 0xae, 0x9, 0xc8, 0xa6, 0x27}, }, { {0x54, 0x69, 0x3d, 0xc4, 0xa, 0x27, 0x2c, 0xcd, 0xb2, 0xca, 0x66, 0x6a, 0x57, 0x3e, 0x4a, 0xdd, 0x6c, 0x3, 0xd7, 0x69, 0x24, 0x59, 0xfa, 0x79, 0x99, 0x25, 0x8c, 0x3d, 0x60, 0x3, 0x15, 0x22}, {0x88, 0xb1, 0xd, 0x1f, 0xcd, 0xeb, 0xa6, 0x8b, 0xe8, 0x5b, 0x5a, 0x67, 0x3a, 0xd7, 0xd3, 0x37, 0x5a, 0x58, 0xf5, 0x15, 0xa3, 0xdf, 0x2e, 0xf2, 0x7e, 0xa1, 0x60, 0xff, 0x74, 0x71, 0xb6, 0x2c}, {0xd0, 0xe1, 0xb, 0x39, 0xf9, 0xcd, 0xee, 0x59, 0xf1, 0xe3, 0x8c, 0x72, 0x44, 0x20, 0x42, 0xa9, 0xf4, 0xf0, 0x94, 0x7a, 0x66, 0x1c, 0x89, 0x82, 0x36, 0xf4, 0x90, 0x38, 0xb7, 0xf4, 0x1d, 0x7b}, }, { {0x8c, 0xf5, 0xf8, 0x7, 0x18, 0x22, 0x2e, 0x5f, 0xd4, 0x9, 0x94, 0xd4, 0x9f, 0x5c, 0x55, 0xe3, 0x30, 0xa6, 0xb6, 0x1f, 0x8d, 0xa8, 0xaa, 0xb2, 0x3d, 0xe0, 0x52, 0xd3, 0x45, 0x82, 0x69, 0x68}, {0x24, 0xa2, 0xb2, 0xb3, 0xe0, 0xf2, 0x92, 0xe4, 0x60, 0x11, 0x55, 0x2b, 0x6, 0x9e, 0x6c, 0x7c, 0xe, 0x7b, 0x7f, 0xd, 0xe2, 0x8f, 0xeb, 0x15, 0x92, 0x59, 0xfc, 0x58, 0x26, 0xef, 0xfc, 0x61}, {0x7a, 0x18, 0x18, 0x2a, 0x85, 0x5d, 0xb1, 0xdb, 0xd7, 0xac, 0xdd, 0x86, 0xd3, 0xaa, 0xe4, 0xf3, 0x82, 0xc4, 0xf6, 0xf, 0x81, 0xe2, 0xba, 0x44, 0xcf, 0x1, 0xaf, 0x3d, 0x47, 0x4c, 0xcf, 0x46}, }, { {0x40, 0x81, 0x49, 0xf1, 0xa7, 0x6e, 0x3c, 0x21, 0x54, 0x48, 0x2b, 0x39, 0xf8, 0x7e, 0x1e, 0x7c, 0xba, 0xce, 0x29, 0x56, 0x8c, 0xc3, 0x88, 0x24, 0xbb, 0xc5, 0x8c, 0xd, 0xe5, 0xaa, 0x65, 0x10}, {0xf9, 0xe5, 0xc4, 0x9e, 0xed, 0x25, 0x65, 0x42, 0x3, 0x33, 0x90, 0x16, 0x1, 0xda, 0x5e, 0xe, 0xdc, 0xca, 0xe5, 0xcb, 0xf2, 0xa7, 0xb1, 0x72, 0x40, 0x5f, 0xeb, 0x14, 0xcd, 0x7b, 0x38, 0x29}, {0x57, 0xd, 0x20, 0xdf, 0x25, 0x45, 0x2c, 0x1c, 0x4a, 0x67, 0xca, 0xbf, 0xd6, 0x2d, 0x3b, 0x5c, 0x30, 0x40, 0x83, 0xe1, 0xb1, 0xe7, 0x7, 0xa, 0x16, 0xe7, 0x1c, 0x4f, 0xe6, 0x98, 0xa1, 0x69}, }, }, { { {0xed, 0xca, 0xc5, 0xdc, 0x34, 0x44, 0x1, 0xe1, 0x33, 0xfb, 0x84, 0x3c, 0x96, 0x5d, 0xed, 0x47, 0xe7, 0xa0, 0x86, 0xed, 0x76, 0x95, 0x1, 0x70, 0xe4, 0xf9, 0x67, 0xd2, 0x7b, 0x69, 0xb2, 0x25}, {0xbc, 0x78, 0x1a, 0xd9, 0xe0, 0xb2, 0x62, 0x90, 0x67, 0x96, 0x50, 0xc8, 0x9c, 0x88, 0xc9, 0x47, 0xb8, 0x70, 0x50, 0x40, 0x66, 0x4a, 0xf5, 0x9d, 0xbf, 0xa1, 0x93, 0x24, 0xa9, 0xe6, 0x69, 0x73}, {0x64, 0x68, 0x98, 0x13, 0xfb, 0x3f, 0x67, 0x9d, 0xb8, 0xc7, 0x5d, 0x41, 0xd9, 0xfb, 0xa5, 0x3c, 0x5e, 0x3b, 0x27, 0xdf, 0x3b, 0xcc, 0x4e, 0xe0, 0xd2, 0x4c, 0x4e, 0xb5, 0x3d, 0x68, 0x20, 0x14}, }, { {0xd0, 0x5a, 0xcc, 0xc1, 0x6f, 0xbb, 0xee, 0x34, 0x8b, 0xac, 0x46, 0x96, 0xe9, 0xc, 0x1b, 0x6a, 0x53, 0xde, 0x6b, 0xa6, 0x49, 0xda, 0xb0, 0xd3, 0xc1, 0x81, 0xd0, 0x61, 0x41, 0x3b, 0xe8, 0x31}, {0x97, 0xd1, 0x9d, 0x24, 0x1e, 0xbd, 0x78, 0xb4, 0x2, 0xc1, 0x58, 0x5e, 0x0, 0x35, 0xc, 0x62, 0x5c, 0xac, 0xba, 0xcc, 0x2f, 0xd3, 0x2, 0xfb, 0x2d, 0xa7, 0x8, 0xf5, 0xeb, 0x3b, 0xb6, 0x60}, {0x4f, 0x2b, 0x6, 0x9e, 0x12, 0xc7, 0xe8, 0x97, 0xd8, 0xa, 0x32, 0x29, 0x4f, 0x8f, 0xe4, 0x49, 0x3f, 0x68, 0x18, 0x6f, 0x4b, 0xe1, 0xec, 0x5b, 0x17, 0x3, 0x55, 0x2d, 0xb6, 0x1e, 0xcf, 0x55}, }, { {0x52, 0x8c, 0xf5, 0x7d, 0xe3, 0xb5, 0x76, 0x30, 0x36, 0xcc, 0x99, 0xe7, 0xdd, 0xb9, 0x3a, 0xd7, 0x20, 0xee, 0x13, 0x49, 0xe3, 0x1c, 0x83, 0xbd, 0x33, 0x1, 0xba, 0x62, 0xaa, 0xfb, 0x56, 0x1a}, {0x58, 0x3d, 0xc2, 0x65, 0x10, 0x10, 0x79, 0x58, 0x9c, 0x81, 0x94, 0x50, 0x6d, 0x8, 0x9d, 0x8b, 0xa7, 0x5f, 0xc5, 0x12, 0xa9, 0x2f, 0x40, 0xe2, 0xd4, 0x91, 0x8, 0x57, 0x64, 0x65, 0x9a, 0x66}, {0xec, 0xc9, 0x9d, 0x5c, 0x50, 0x6b, 0x3e, 0x94, 0x1a, 0x37, 0x7c, 0xa7, 0xbb, 0x57, 0x25, 0x30, 0x51, 0x76, 0x34, 0x41, 0x56, 0xae, 0x73, 0x98, 0x5c, 0x8a, 0xc5, 0x99, 0x67, 0x83, 0xc4, 0x13}, }, { {0x80, 0xd0, 0x8b, 0x5d, 0x6a, 0xfb, 0xdc, 0xc4, 0x42, 0x48, 0x1a, 0x57, 0xec, 0xc4, 0xeb, 0xde, 0x65, 0x53, 0xe5, 0xb8, 0x83, 0xe8, 0xb2, 0xd4, 0x27, 0xb8, 0xe5, 0xc8, 0x7d, 0xc8, 0xbd, 0x50}, {0xb9, 0xe1, 0xb3, 0x5a, 0x46, 0x5d, 0x3a, 0x42, 0x61, 0x3f, 0xf1, 0xc7, 0x87, 0xc1, 0x13, 0xfc, 0xb6, 0xb9, 0xb5, 0xec, 0x64, 0x36, 0xf8, 0x19, 0x7, 0xb6, 0x37, 0xa6, 0x93, 0xc, 0xf8, 0x66}, {0x11, 0xe1, 0xdf, 0x6e, 0x83, 0x37, 0x6d, 0x60, 0xd9, 0xab, 0x11, 0xf0, 0x15, 0x3e, 0x35, 0x32, 0x96, 0x3b, 0xb7, 0x25, 0xc3, 0x3a, 0xb0, 0x64, 0xae, 0xd5, 0x5f, 0x72, 0x44, 0x64, 0xd5, 0x1d}, }, { {0x9a, 0xc8, 0xba, 0x8, 0x0, 0xe6, 0x97, 0xc2, 0xe0, 0xc3, 0xe1, 0xea, 0x11, 0xea, 0x4c, 0x7d, 0x7c, 0x97, 0xe7, 0x9f, 0xe1, 0x8b, 0xe3, 0xf3, 0xcd, 0x5, 0xa3, 0x63, 0xf, 0x45, 0x3a, 0x3a}, {0x7d, 0x12, 0x62, 0x33, 0xf8, 0x7f, 0xa4, 0x8f, 0x15, 0x7c, 0xcd, 0x71, 0xc4, 0x6a, 0x9f, 0xbc, 0x8b, 0xc, 0x22, 0x49, 0x43, 0x45, 0x71, 0x6e, 0x2e, 0x73, 0x9f, 0x21, 0x12, 0x59, 0x64, 0xe}, {0x27, 0x46, 0x39, 0xd8, 0x31, 0x2f, 0x8f, 0x7, 0x10, 0xa5, 0x94, 0xde, 0x83, 0x31, 0x9d, 0x38, 0x80, 0x6f, 0x99, 0x17, 0x6d, 0x6c, 0xe3, 0xd1, 0x7b, 0xa8, 0xa9, 0x93, 0x93, 0x8d, 0x8c, 0x31}, }, { {0x98, 0xd3, 0x1d, 0xab, 0x29, 0x9e, 0x66, 0x5d, 0x3b, 0x9e, 0x2d, 0x34, 0x58, 0x16, 0x92, 0xfc, 0xcd, 0x73, 0x59, 0xf3, 0xfd, 0x1d, 0x85, 0x55, 0xf6, 0xa, 0x95, 0x25, 0xc3, 0x41, 0x9a, 0x50}, {0x19, 0xfe, 0xff, 0x2a, 0x3, 0x5d, 0x74, 0xf2, 0x66, 0xdb, 0x24, 0x7f, 0x49, 0x3c, 0x9f, 0xc, 0xef, 0x98, 0x85, 0xba, 0xe3, 0xd3, 0x98, 0xbc, 0x14, 0x53, 0x1d, 0x9a, 0x67, 0x7c, 0x4c, 0x22}, {0xe9, 0x25, 0xf9, 0xa6, 0xdc, 0x6e, 0xc0, 0xbd, 0x33, 0x1f, 0x1b, 0x64, 0xf4, 0xf3, 0x3e, 0x79, 0x89, 0x3e, 0x83, 0x9d, 0x80, 0x12, 0xec, 0x82, 0x89, 0x13, 0xa1, 0x28, 0x23, 0xf0, 0xbf, 0x5}, }, { {0xe4, 0x12, 0xc5, 0xd, 0xdd, 0xa0, 0x81, 0x68, 0xfe, 0xfa, 0xa5, 0x44, 0xc8, 0xd, 0xe7, 0x4f, 0x40, 0x52, 0x4a, 0x8f, 0x6b, 0x8e, 0x74, 0x1f, 0xea, 0xa3, 0x1, 0xee, 0xcd, 0x77, 0x62, 0x57}, {0xb, 0xe0, 0xca, 0x23, 0x70, 0x13, 0x32, 0x36, 0x59, 0xcf, 0xac, 0xd1, 0xa, 0xcf, 0x4a, 0x54, 0x88, 0x1c, 0x1a, 0xd2, 0x49, 0x10, 0x74, 0x96, 0xa7, 0x44, 0x2a, 0xfa, 0xc3, 0x8c, 0xb, 0x78}, {0x5f, 0x30, 0x4f, 0x23, 0xbc, 0x8a, 0xf3, 0x1e, 0x8, 0xde, 0x5, 0x14, 0xbd, 0x7f, 0x57, 0x9a, 0xd, 0x2a, 0xe6, 0x34, 0x14, 0xa5, 0x82, 0x5e, 0xa1, 0xb7, 0x71, 0x62, 0x72, 0x18, 0xf4, 0x5f}, }, { {0x40, 0x95, 0xb6, 0x13, 0xe8, 0x47, 0xdb, 0xe5, 0xe1, 0x10, 0x26, 0x43, 0x3b, 0x2a, 0x5d, 0xf3, 0x76, 0x12, 0x78, 0x38, 0xe9, 0x26, 0x1f, 0xac, 0x69, 0xcb, 0xa0, 0xa0, 0x8c, 0xdb, 0xd4, 0x29}, {0x9d, 0xdb, 0x89, 0x17, 0xc, 0x8, 0x8e, 0x39, 0xf5, 0x78, 0xe7, 0xf3, 0x25, 0x20, 0x60, 0xa7, 0x5d, 0x3, 0xbd, 0x6, 0x4c, 0x89, 0x98, 0xfa, 0xbe, 0x66, 0xa9, 0x25, 0xdc, 0x3, 0x6a, 0x10}, {0xd0, 0x53, 0x33, 0x33, 0xaf, 0xa, 0xad, 0xd9, 0xe5, 0x9, 0xd3, 0xac, 0xa5, 0x9d, 0x66, 0x38, 0xf0, 0xf7, 0x88, 0xc8, 0x8a, 0x65, 0x57, 0x3c, 0xfa, 0xbe, 0x2c, 0x5, 0x51, 0x8a, 0xb3, 0x4a}, }, }, { { {0x9c, 0xc0, 0xdd, 0x5f, 0xef, 0xd1, 0xcf, 0xd6, 0xce, 0x5d, 0x57, 0xf7, 0xfd, 0x3e, 0x2b, 0xe8, 0xc2, 0x34, 0x16, 0x20, 0x5d, 0x6b, 0xd5, 0x25, 0x9b, 0x2b, 0xed, 0x4, 0xbb, 0xc6, 0x41, 0x30}, {0x93, 0xd5, 0x68, 0x67, 0x25, 0x2b, 0x7c, 0xda, 0x13, 0xca, 0x22, 0x44, 0x57, 0xc0, 0xc1, 0x98, 0x1d, 0xce, 0xa, 0xca, 0xd5, 0xb, 0xa8, 0xf1, 0x90, 0xa6, 0x88, 0xc0, 0xad, 0xd1, 0xcd, 0x29}, {0x48, 0xe1, 0x56, 0xd9, 0xf9, 0xf2, 0xf2, 0xf, 0x2e, 0x6b, 0x35, 0x9f, 0x75, 0x97, 0xe7, 0xad, 0x5c, 0x2, 0x6c, 0x5f, 0xbb, 0x98, 0x46, 0x1a, 0x7b, 0x9a, 0x4, 0x14, 0x68, 0xbd, 0x4b, 0x10}, }, { {0x63, 0xf1, 0x7f, 0xd6, 0x5f, 0x9a, 0x5d, 0xa9, 0x81, 0x56, 0xc7, 0x4c, 0x9d, 0xe6, 0x2b, 0xe9, 0x57, 0xf2, 0x20, 0xde, 0x4c, 0x2, 0xf8, 0xb7, 0xf5, 0x2d, 0x7, 0xfb, 0x20, 0x2a, 0x4f, 0x20}, {0x67, 0xed, 0xf1, 0x68, 0x31, 0xfd, 0xf0, 0x51, 0xc2, 0x3b, 0x6f, 0xd8, 0xcd, 0x1d, 0x81, 0x2c, 0xde, 0xf2, 0xd2, 0x4, 0x43, 0x5c, 0xdc, 0x44, 0x49, 0x71, 0x2a, 0x9, 0x57, 0xcc, 0xe8, 0x5b}, {0x79, 0xb0, 0xeb, 0x30, 0x3d, 0x3b, 0x14, 0xc8, 0x30, 0x2e, 0x65, 0xbd, 0x5a, 0x15, 0x89, 0x75, 0x31, 0x5c, 0x6d, 0x8f, 0x31, 0x3c, 0x3c, 0x65, 0x1f, 0x16, 0x79, 0xc2, 0x17, 0xfb, 0x70, 0x25}, }, { {0x5a, 0x24, 0xb8, 0xb, 0x55, 0xa9, 0x2e, 0x19, 0xd1, 0x50, 0x90, 0x8f, 0xa8, 0xfb, 0xe6, 0xc8, 0x35, 0xc9, 0xa4, 0x88, 0x2d, 0xea, 0x86, 0x79, 0x68, 0x86, 0x1, 0xde, 0x91, 0x5f, 0x1c, 0x24}, {0x75, 0x15, 0xb6, 0x2c, 0x7f, 0x36, 0xfa, 0x3e, 0x6c, 0x2, 0xd6, 0x1c, 0x76, 0x6f, 0xf9, 0xf5, 0x62, 0x25, 0xb5, 0x65, 0x2a, 0x14, 0xc7, 0xe8, 0xcd, 0xa, 0x3, 0x53, 0xea, 0x65, 0xcb, 0x3d}, {0xaa, 0x6c, 0xde, 0x40, 0x29, 0x17, 0xd8, 0x28, 0x3a, 0x73, 0xd9, 0x22, 0xf0, 0x2c, 0xbf, 0x8f, 0xd1, 0x1, 0x5b, 0x23, 0xdd, 0xfc, 0xd7, 0x16, 0xe5, 0xf0, 0xcd, 0x5f, 0xdd, 0xe, 0x42, 0x8}, }, { {0xce, 0x10, 0xf4, 0x4, 0x4e, 0xc3, 0x58, 0x3, 0x85, 0x6, 0x6e, 0x27, 0x5a, 0x5b, 0x13, 0xb6, 0x21, 0x15, 0xb9, 0xeb, 0xc7, 0x70, 0x96, 0x5d, 0x9c, 0x88, 0xdb, 0x21, 0xf3, 0x54, 0xd6, 0x4}, {0x4a, 0xfa, 0x62, 0x83, 0xab, 0x20, 0xff, 0xcd, 0x6e, 0x3e, 0x1a, 0xe2, 0xd4, 0x18, 0xe1, 0x57, 0x2b, 0xe6, 0x39, 0xfc, 0x17, 0x96, 0x17, 0xe3, 0xfd, 0x69, 0x17, 0xbc, 0xef, 0x53, 0x9a, 0xd}, {0xd5, 0xb5, 0xbd, 0xdd, 0x16, 0xc1, 0x7d, 0x5e, 0x2d, 0xdd, 0xa5, 0x8d, 0xb6, 0xde, 0x54, 0x29, 0x92, 0xa2, 0x34, 0x33, 0x17, 0x8, 0xb6, 0x1c, 0xd7, 0x1a, 0x99, 0x18, 0x26, 0x4f, 0x7a, 0x4a}, }, { {0x4b, 0x2a, 0x37, 0xaf, 0x91, 0xb2, 0xc3, 0x24, 0xf2, 0x47, 0x81, 0x71, 0x70, 0x82, 0xda, 0x93, 0xf2, 0x9e, 0x89, 0x86, 0x64, 0x85, 0x84, 0xdd, 0x33, 0xee, 0xe0, 0x23, 0x42, 0x31, 0x96, 0x4a}, {0x95, 0x5f, 0xb1, 0x5f, 0x2, 0x18, 0xa7, 0xf4, 0x8f, 0x1b, 0x5c, 0x6b, 0x34, 0x5f, 0xf6, 0x3d, 0x12, 0x11, 0xe0, 0x0, 0x85, 0xf0, 0xfc, 0xcd, 0x48, 0x18, 0xd3, 0xdd, 0x4c, 0xc, 0xb5, 0x11}, {0xd6, 0xff, 0xa4, 0x8, 0x44, 0x27, 0xe8, 0xa6, 0xd9, 0x76, 0x15, 0x9c, 0x7e, 0x17, 0x8e, 0x73, 0xf2, 0xb3, 0x2, 0x3d, 0xb6, 0x48, 0x33, 0x77, 0x51, 0xcc, 0x6b, 0xce, 0x4d, 0xce, 0x4b, 0x4f}, }, { {0x6f, 0xb, 0x9d, 0xc4, 0x6e, 0x61, 0xe2, 0x30, 0x17, 0x23, 0xec, 0xca, 0x8f, 0x71, 0x56, 0xe4, 0xa6, 0x4f, 0x6b, 0xf2, 0x9b, 0x40, 0xeb, 0x48, 0x37, 0x5f, 0x59, 0x61, 0xe5, 0xce, 0x42, 0x30}, {0x84, 0x25, 0x24, 0xe2, 0x5a, 0xce, 0x1f, 0xa7, 0x9e, 0x8a, 0xf5, 0x92, 0x56, 0x72, 0xea, 0x26, 0xf4, 0x3c, 0xea, 0x1c, 0xd7, 0x9, 0x1a, 0xd2, 0xe6, 0x1, 0x1c, 0xb7, 0x14, 0xdd, 0xfc, 0x73}, {0x41, 0xac, 0x9b, 0x44, 0x79, 0x70, 0x7e, 0x42, 0xa, 0x31, 0xe2, 0xbc, 0x6d, 0xe3, 0x5a, 0x85, 0x7c, 0x1a, 0x84, 0x5f, 0x21, 0x76, 0xae, 0x4c, 0xd6, 0xe1, 0x9c, 0x9a, 0xc, 0x74, 0x9e, 0x38}, }, { {0x28, 0xac, 0xe, 0x57, 0xf6, 0x78, 0xbd, 0xc9, 0xe1, 0x9c, 0x91, 0x27, 0x32, 0xb, 0x5b, 0xe5, 0xed, 0x91, 0x9b, 0xa1, 0xab, 0x3e, 0xfc, 0x65, 0x90, 0x36, 0x26, 0xd6, 0xe5, 0x25, 0xc4, 0x25}, {0xce, 0xb9, 0xdc, 0x34, 0xae, 0xb3, 0xfc, 0x64, 0xad, 0xd0, 0x48, 0xe3, 0x23, 0x3, 0x50, 0x97, 0x1b, 0x38, 0xc6, 0x62, 0x7d, 0xf0, 0xb3, 0x45, 0x88, 0x67, 0x5a, 0x46, 0x79, 0x53, 0x54, 0x61}, {0x6e, 0xde, 0xd7, 0xf1, 0xa6, 0x6, 0x3e, 0x3f, 0x8, 0x23, 0x6, 0x8e, 0x27, 0x76, 0xf9, 0x3e, 0x77, 0x6c, 0x8a, 0x4e, 0x26, 0xf6, 0x14, 0x8c, 0x59, 0x47, 0x48, 0x15, 0x89, 0xa0, 0x39, 0x65}, }, { {0x19, 0x4a, 0xbb, 0x14, 0xd4, 0xdb, 0xc4, 0xdd, 0x8e, 0x4f, 0x42, 0x98, 0x3c, 0xbc, 0xb2, 0x19, 0x69, 0x71, 0xca, 0x36, 0xd7, 0x9f, 0xa8, 0x48, 0x90, 0xbd, 0x19, 0xf0, 0xe, 0x32, 0x65, 0xf}, {0x73, 0xf7, 0xd2, 0xc3, 0x74, 0x1f, 0xd2, 0xe9, 0x45, 0x68, 0xc4, 0x25, 0x41, 0x54, 0x50, 0xc1, 0x33, 0x9e, 0xb9, 0xf9, 0xe8, 0x5c, 0x4e, 0x62, 0x6c, 0x18, 0xcd, 0xc5, 0xaa, 0xe4, 0xc5, 0x11}, {0xc6, 0xe0, 0xfd, 0xca, 0xb1, 0xd1, 0x86, 0xd4, 0x81, 0x51, 0x3b, 0x16, 0xe3, 0xe6, 0x3f, 0x4f, 0x9a, 0x93, 0xf2, 0xfa, 0xd, 0xaf, 0xa8, 0x59, 0x2a, 0x7, 0x33, 0xec, 0xbd, 0xc7, 0xab, 0x4c}, }, }, { { {0x89, 0xd2, 0x78, 0x3f, 0x8f, 0x78, 0x8f, 0xc0, 0x9f, 0x4d, 0x40, 0xa1, 0x2c, 0xa7, 0x30, 0xfe, 0x9d, 0xcc, 0x65, 0xcf, 0xfc, 0x8b, 0x77, 0xf2, 0x21, 0x20, 0xcb, 0x5a, 0x16, 0x98, 0xe4, 0x7e}, {0x2e, 0xa, 0x9c, 0x8, 0x24, 0x96, 0x9e, 0x23, 0x38, 0x47, 0xfe, 0x3a, 0xc0, 0xc4, 0x48, 0xc7, 0x2a, 0xa1, 0x4f, 0x76, 0x2a, 0xed, 0xdb, 0x17, 0x82, 0x85, 0x1c, 0x32, 0xf0, 0x93, 0x9b, 0x63}, {0xc3, 0xa1, 0x11, 0x91, 0xe3, 0x8, 0xd5, 0x7b, 0x89, 0x74, 0x90, 0x80, 0xd4, 0x90, 0x2b, 0x2b, 0x19, 0xfd, 0x72, 0xae, 0xc2, 0xae, 0xd2, 0xe7, 0xa6, 0x2, 0xb6, 0x85, 0x3c, 0x49, 0xdf, 0xe}, }, { {0x13, 0x41, 0x76, 0x84, 0xd2, 0xc4, 0x67, 0x67, 0x35, 0xf8, 0xf5, 0xf7, 0x3f, 0x40, 0x90, 0xa0, 0xde, 0xbe, 0xe6, 0xca, 0xfa, 0xcf, 0x8f, 0x1c, 0x69, 0xa3, 0xdf, 0xd1, 0x54, 0xc, 0xc0, 0x4}, {0x68, 0x5a, 0x9b, 0x59, 0x58, 0x81, 0xcc, 0xae, 0xe, 0xe2, 0xad, 0xeb, 0xf, 0x4f, 0x57, 0xea, 0x7, 0x7f, 0xb6, 0x22, 0x74, 0x1d, 0xe4, 0x4f, 0xb4, 0x4f, 0x9d, 0x1, 0xe3, 0x92, 0x3b, 0x40}, {0xf8, 0x5c, 0x46, 0x8b, 0x81, 0x2f, 0xc2, 0x4d, 0xf8, 0xef, 0x80, 0x14, 0x5a, 0xf3, 0xa0, 0x71, 0x57, 0xd6, 0xc7, 0x4, 0xad, 0xbf, 0xe8, 0xae, 0xf4, 0x76, 0x61, 0xb2, 0x2a, 0xb1, 0x5b, 0x35}, }, { {0x18, 0x73, 0x8c, 0x5a, 0xc7, 0xda, 0x1, 0xa3, 0x11, 0xaa, 0xce, 0xb3, 0x9d, 0x3, 0x90, 0xed, 0x2d, 0x3f, 0xae, 0x3b, 0xbf, 0x7c, 0x7, 0x6f, 0x8e, 0xad, 0x52, 0xe0, 0xf8, 0xea, 0x18, 0x75}, {0xf4, 0xbb, 0x93, 0x74, 0xcc, 0x64, 0x1e, 0xa7, 0xc3, 0xb0, 0xa3, 0xec, 0xd9, 0x84, 0xbd, 0xe5, 0x85, 0xe7, 0x5, 0xfa, 0xc, 0xc5, 0x6b, 0xa, 0x12, 0xc3, 0x2e, 0x18, 0x32, 0x81, 0x9b, 0xf}, {0x32, 0x6c, 0x7f, 0x1b, 0xc4, 0x59, 0x88, 0xa4, 0x98, 0x32, 0x38, 0xf4, 0xbc, 0x60, 0x2d, 0xf, 0xd9, 0xd1, 0xb1, 0xc9, 0x29, 0xa9, 0x15, 0x18, 0xc4, 0x55, 0x17, 0xbb, 0x1b, 0x87, 0xc3, 0x47}, }, { {0xb0, 0x66, 0x50, 0xc8, 0x50, 0x5d, 0xe6, 0xfb, 0xb0, 0x99, 0xa2, 0xb3, 0xb0, 0xc4, 0xec, 0x62, 0xe0, 0xe8, 0x1a, 0x44, 0xea, 0x54, 0x37, 0xe5, 0x5f, 0x8d, 0xd4, 0xe8, 0x2c, 0xa0, 0xfe, 0x8}, {0x48, 0x4f, 0xec, 0x71, 0x97, 0x53, 0x44, 0x51, 0x6e, 0x5d, 0x8c, 0xc9, 0x7d, 0xb1, 0x5, 0xf8, 0x6b, 0xc6, 0xc3, 0x47, 0x1a, 0xc1, 0x62, 0xf7, 0xdc, 0x99, 0x46, 0x76, 0x85, 0x9b, 0xb8, 0x0}, {0xd0, 0xea, 0xde, 0x68, 0x76, 0xdd, 0x4d, 0x82, 0x23, 0x5d, 0x68, 0x4b, 0x20, 0x45, 0x64, 0xc8, 0x65, 0xd6, 0x89, 0x5d, 0xcd, 0xcf, 0x14, 0xb5, 0x37, 0xd5, 0x75, 0x4f, 0xa7, 0x29, 0x38, 0x47}, }, { {0xc9, 0x2, 0x39, 0xad, 0x3a, 0x53, 0xd9, 0x23, 0x8f, 0x58, 0x3, 0xef, 0xce, 0xdd, 0xc2, 0x64, 0xb4, 0x2f, 0xe1, 0xcf, 0x90, 0x73, 0x25, 0x15, 0x90, 0xd3, 0xe4, 0x44, 0x4d, 0x8b, 0x66, 0x6c}, {0x18, 0xc4, 0x79, 0x46, 0x75, 0xda, 0xd2, 0x82, 0xf0, 0x8d, 0x61, 0xb2, 0xd8, 0xd7, 0x3b, 0xe6, 0xa, 0xeb, 0x47, 0xac, 0x24, 0xef, 0x5e, 0x35, 0xb4, 0xc6, 0x33, 0x48, 0x4c, 0x68, 0x78, 0x20}, {0xc, 0x82, 0x78, 0x7a, 0x21, 0xcf, 0x48, 0x3b, 0x97, 0x3e, 0x27, 0x81, 0xb2, 0xa, 0x6a, 0xf7, 0x7b, 0xed, 0x8e, 0x8c, 0xa7, 0x65, 0x6c, 0xa9, 0x3f, 0x43, 0x8a, 0x4f, 0x5, 0xa6, 0x11, 0x74}, }, { {0xb4, 0x75, 0xb1, 0x18, 0x3d, 0xe5, 0x9a, 0x57, 0x2, 0xa1, 0x92, 0xf3, 0x59, 0x31, 0x71, 0x68, 0xf5, 0x35, 0xef, 0x1e, 0xba, 0xec, 0x55, 0x84, 0x8f, 0x39, 0x8c, 0x45, 0x72, 0xa8, 0xc9, 0x1e}, {0x6d, 0xc8, 0x9d, 0xb9, 0x32, 0x9d, 0x65, 0x4d, 0x15, 0xf1, 0x3a, 0x60, 0x75, 0xdc, 0x4c, 0x4, 0x88, 0xe4, 0xc2, 0xdc, 0x2c, 0x71, 0x4c, 0xb3, 0xff, 0x34, 0x81, 0xfb, 0x74, 0x65, 0x13, 0x7c}, {0x9b, 0x50, 0xa2, 0x0, 0xd4, 0xa4, 0xe6, 0xb8, 0xb4, 0x82, 0xc8, 0xb, 0x2, 0xd7, 0x81, 0x9b, 0x61, 0x75, 0x95, 0xf1, 0x9b, 0xcc, 0xe7, 0x57, 0x60, 0x64, 0xcd, 0xc7, 0xa5, 0x88, 0xdd, 0x3a}, }, { {0x46, 0x30, 0x39, 0x59, 0xd4, 0x98, 0xc2, 0x85, 0xec, 0x59, 0xf6, 0x5f, 0x98, 0x35, 0x7e, 0x8f, 0x3a, 0x6e, 0xf6, 0xf2, 0x2a, 0xa2, 0x2c, 0x1d, 0x20, 0xa7, 0x6, 0xa4, 0x31, 0x11, 0xba, 0x61}, {0xf2, 0xdc, 0x35, 0xb6, 0x70, 0x57, 0x89, 0xab, 0xbc, 0x1f, 0x6c, 0xf6, 0x6c, 0xef, 0xdf, 0x2, 0x87, 0xd1, 0xb6, 0xbe, 0x68, 0x2, 0x53, 0x85, 0x74, 0x9e, 0x87, 0xcc, 0xfc, 0x29, 0x99, 0x24}, {0x29, 0x90, 0x95, 0x16, 0xf1, 0xa0, 0xd0, 0xa3, 0x89, 0xbd, 0x7e, 0xba, 0x6c, 0x6b, 0x3b, 0x2, 0x7, 0x33, 0x78, 0x26, 0x3e, 0x5a, 0xf1, 0x7b, 0xe7, 0xec, 0xd8, 0xbb, 0xc, 0x31, 0x20, 0x56}, }, { {0xd6, 0x85, 0xe2, 0x77, 0xf4, 0xb5, 0x46, 0x66, 0x93, 0x61, 0x8f, 0x6c, 0x67, 0xff, 0xe8, 0x40, 0xdd, 0x94, 0xb5, 0xab, 0x11, 0x73, 0xec, 0xa6, 0x4d, 0xec, 0x8c, 0x65, 0xf3, 0x46, 0xc8, 0x7e}, {0x43, 0xd6, 0x34, 0x49, 0x43, 0x93, 0x89, 0x52, 0xf5, 0x22, 0x12, 0xa5, 0x6, 0xf8, 0xdb, 0xb9, 0x22, 0x1c, 0xf4, 0xc3, 0x8f, 0x87, 0x6d, 0x8f, 0x30, 0x97, 0x9d, 0x4d, 0x2a, 0x6a, 0x67, 0x37}, {0xc7, 0x2e, 0xa2, 0x1d, 0x3f, 0x8f, 0x5e, 0x9b, 0x13, 0xcd, 0x1, 0x6c, 0x77, 0x1d, 0xf, 0x13, 0xb8, 0x9f, 0x98, 0xa2, 0xcf, 0x8f, 0x4c, 0x21, 0xd5, 0x9d, 0x9b, 0x39, 0x23, 0xf7, 0xaa, 0x6d}, }, }, { { {0xa2, 0x8e, 0xad, 0xac, 0xbf, 0x4, 0x3b, 0x58, 0x84, 0xe8, 0x8b, 0x14, 0xe8, 0x43, 0xb7, 0x29, 0xdb, 0xc5, 0x10, 0x8, 0x3b, 0x58, 0x1e, 0x2b, 0xaa, 0xbb, 0xb3, 0x8e, 0xe5, 0x49, 0x54, 0x2b}, {0x47, 0xbe, 0x3d, 0xeb, 0x62, 0x75, 0x3a, 0x5f, 0xb8, 0xa0, 0xbd, 0x8e, 0x54, 0x38, 0xea, 0xf7, 0x99, 0x72, 0x74, 0x45, 0x31, 0xe5, 0xc3, 0x0, 0x51, 0xd5, 0x27, 0x16, 0xe7, 0xe9, 0x4, 0x13}, {0xfe, 0x9c, 0xdc, 0x6a, 0xd2, 0x14, 0x98, 0x78, 0xb, 0xdd, 0x48, 0x8b, 0x3f, 0xab, 0x1b, 0x3c, 0xa, 0xc6, 0x79, 0xf9, 0xff, 0xe1, 0xf, 0xda, 0x93, 0xd6, 0x2d, 0x7c, 0x2d, 0xde, 0x68, 0x44}, }, { {0xce, 0x7, 0x63, 0xf8, 0xc6, 0xd8, 0x9a, 0x4b, 0x28, 0xc, 0x5d, 0x43, 0x31, 0x35, 0x11, 0x21, 0x2c, 0x77, 0x7a, 0x65, 0xc5, 0x66, 0xa8, 0xd4, 0x52, 0x73, 0x24, 0x63, 0x7e, 0x42, 0xa6, 0x5d}, {0x9e, 0x46, 0x19, 0x94, 0x5e, 0x35, 0xbb, 0x51, 0x54, 0xc7, 0xdd, 0x23, 0x4c, 0xdc, 0xe6, 0x33, 0x62, 0x99, 0x7f, 0x44, 0xd6, 0xb6, 0xa5, 0x93, 0x63, 0xbd, 0x44, 0xfb, 0x6f, 0x7c, 0xce, 0x6c}, {0xca, 0x22, 0xac, 0xde, 0x88, 0xc6, 0x94, 0x1a, 0xf8, 0x1f, 0xae, 0xbb, 0xf7, 0x6e, 0x6, 0xb9, 0xf, 0x58, 0x59, 0x8d, 0x38, 0x8c, 0xad, 0x88, 0xa8, 0x2c, 0x9f, 0xe7, 0xbf, 0x9a, 0xf2, 0x58}, }, { {0xf6, 0xcd, 0xe, 0x71, 0xbf, 0x64, 0x5a, 0x4b, 0x3c, 0x29, 0x2c, 0x46, 0x38, 0xe5, 0x4c, 0xb1, 0xb9, 0x3a, 0xb, 0xd5, 0x56, 0xd0, 0x43, 0x36, 0x70, 0x48, 0x5b, 0x18, 0x24, 0x37, 0xf9, 0x6a}, {0x68, 0x3e, 0xe7, 0x8d, 0xab, 0xcf, 0xe, 0xe9, 0xa5, 0x76, 0x7e, 0x37, 0x9f, 0x6f, 0x3, 0x54, 0x82, 0x59, 0x1, 0xbe, 0xb, 0x5b, 0x49, 0xf0, 0x36, 0x1e, 0xf4, 0xa7, 0xc4, 0x29, 0x76, 0x57}, {0x88, 0xa8, 0xc6, 0x9, 0x45, 0x2, 0x20, 0x32, 0x73, 0x89, 0x55, 0x4b, 0x13, 0x36, 0xe0, 0xd2, 0x9f, 0x28, 0x33, 0x3c, 0x23, 0x36, 0xe2, 0x83, 0x8f, 0xc1, 0xae, 0xc, 0xbb, 0x25, 0x1f, 0x70}, }, { {0x13, 0xc1, 0xbe, 0x7c, 0xd9, 0xf6, 0x18, 0x9d, 0xe4, 0xdb, 0xbf, 0x74, 0xe6, 0x6, 0x4a, 0x84, 0xd6, 0x60, 0x4e, 0xac, 0x22, 0xb5, 0xf5, 0x20, 0x51, 0x5e, 0x95, 0x50, 0xc0, 0x5b, 0xa, 0x72}, {0xed, 0x6c, 0x61, 0xe4, 0xf8, 0xb0, 0xa8, 0xc3, 0x7d, 0xa8, 0x25, 0x9e, 0xe, 0x66, 0x0, 0xf7, 0x9c, 0xa5, 0xbc, 0xf4, 0x1f, 0x6, 0xe3, 0x61, 0xe9, 0xb, 0xc4, 0xbd, 0xbf, 0x92, 0xc, 0x2e}, {0x35, 0x5a, 0x80, 0x9b, 0x43, 0x9, 0x3f, 0xc, 0xfc, 0xab, 0x42, 0x62, 0x37, 0x8b, 0x4e, 0xe8, 0x46, 0x93, 0x22, 0x5c, 0xf3, 0x17, 0x14, 0x69, 0xec, 0xf0, 0x4e, 0x14, 0xbb, 0x9c, 0x9b, 0xe}, }, { {0xee, 0xbe, 0xb1, 0x5d, 0xd5, 0x9b, 0xee, 0x8d, 0xb9, 0x3f, 0x72, 0xa, 0x37, 0xab, 0xc3, 0xc9, 0x91, 0xd7, 0x68, 0x1c, 0xbf, 0xf1, 0xa8, 0x44, 0xde, 0x3c, 0xfd, 0x1c, 0x19, 0x44, 0x6d, 0x36}, {0xad, 0x20, 0x57, 0xfb, 0x8f, 0xd4, 0xba, 0xfb, 0xe, 0xd, 0xf9, 0xdb, 0x6b, 0x91, 0x81, 0xee, 0xbf, 0x43, 0x55, 0x63, 0x52, 0x31, 0x81, 0xd4, 0xd8, 0x7b, 0x33, 0x3f, 0xeb, 0x4, 0x11, 0x22}, {0x14, 0x8c, 0xbc, 0xf2, 0x43, 0x17, 0x3c, 0x9e, 0x3b, 0x6c, 0x85, 0xb5, 0xfc, 0x26, 0xda, 0x2e, 0x97, 0xfb, 0xa7, 0x68, 0xe, 0x2f, 0xb8, 0xcc, 0x44, 0x32, 0x59, 0xbc, 0xe6, 0xa4, 0x67, 0x41}, }, { {0xee, 0x8f, 0xce, 0xf8, 0x65, 0x26, 0xbe, 0xc2, 0x2c, 0xd6, 0x80, 0xe8, 0x14, 0xff, 0x67, 0xe9, 0xee, 0x4e, 0x36, 0x2f, 0x7e, 0x6e, 0x2e, 0xf1, 0xf6, 0xd2, 0x7e, 0xcb, 0x70, 0x33, 0xb3, 0x34}, {0x0, 0x27, 0xf6, 0x76, 0x28, 0x9d, 0x3b, 0x64, 0xeb, 0x68, 0x76, 0xe, 0x40, 0x9d, 0x1d, 0x5d, 0x84, 0x6, 0xfc, 0x21, 0x3, 0x43, 0x4b, 0x1b, 0x6a, 0x24, 0x55, 0x22, 0x7e, 0xbb, 0x38, 0x79}, {0xcc, 0xd6, 0x81, 0x86, 0xee, 0x91, 0xc5, 0xcd, 0x53, 0xa7, 0x85, 0xed, 0x9c, 0x10, 0x2, 0xce, 0x83, 0x88, 0x80, 0x58, 0xc1, 0x85, 0x74, 0xed, 0xe4, 0x65, 0xfe, 0x2d, 0x6e, 0xfc, 0x76, 0x11}, }, { {0xb8, 0xe, 0x77, 0x49, 0x89, 0xe2, 0x90, 0xdb, 0xa3, 0x40, 0xf4, 0xac, 0x2a, 0xcc, 0xfb, 0x98, 0x9b, 0x87, 0xd7, 0xde, 0xfe, 0x4f, 0x35, 0x21, 0xb6, 0x6, 0x69, 0xf2, 0x54, 0x3e, 0x6a, 0x1f}, {0x9b, 0x61, 0x9c, 0x5b, 0xd0, 0x6c, 0xaf, 0xb4, 0x80, 0x84, 0xa5, 0xb2, 0xf4, 0xc9, 0xdf, 0x2d, 0xc4, 0x4d, 0xe9, 0xeb, 0x2, 0xa5, 0x4f, 0x3d, 0x34, 0x5f, 0x7d, 0x67, 0x4c, 0x3a, 0xfc, 0x8}, {0xea, 0x34, 0x7, 0xd3, 0x99, 0xc1, 0xa4, 0x60, 0xd6, 0x5c, 0x16, 0x31, 0xb6, 0x85, 0xc0, 0x40, 0x95, 0x82, 0x59, 0xf7, 0x23, 0x3e, 0x33, 0xe2, 0xd1, 0x0, 0xb9, 0x16, 0x1, 0xad, 0x2f, 0x4f}, }, { {0x38, 0xb6, 0x3b, 0xb7, 0x1d, 0xd9, 0x2c, 0x96, 0x8, 0x9c, 0x12, 0xfc, 0xaa, 0x77, 0x5, 0xe6, 0x89, 0x16, 0xb6, 0xf3, 0x39, 0x9b, 0x61, 0x6f, 0x81, 0xee, 0x44, 0x29, 0x5f, 0x99, 0x51, 0x34}, {0x54, 0x4e, 0xae, 0x94, 0x41, 0xb2, 0xbe, 0x44, 0x6c, 0xef, 0x57, 0x18, 0x51, 0x1c, 0x54, 0x5f, 0x98, 0x4, 0x8d, 0x36, 0x2d, 0x6b, 0x1e, 0xa6, 0xab, 0xf7, 0x2e, 0x97, 0xa4, 0x84, 0x54, 0x44}, {0x7c, 0x7d, 0xea, 0x9f, 0xd0, 0xfc, 0x52, 0x91, 0xf6, 0x5c, 0x93, 0xb0, 0x94, 0x6c, 0x81, 0x4a, 0x40, 0x5c, 0x28, 0x47, 0xaa, 0x9a, 0x8e, 0x25, 0xb7, 0x93, 0x28, 0x4, 0xa6, 0x9c, 0xb8, 0x10}, }, }, { { {0x6e, 0xf0, 0x45, 0x5a, 0xbe, 0x41, 0x39, 0x75, 0x65, 0x5f, 0x9c, 0x6d, 0xed, 0xae, 0x7c, 0xd0, 0xb6, 0x51, 0xff, 0x72, 0x9c, 0x6b, 0x77, 0x11, 0xa9, 0x4d, 0xd, 0xef, 0xd9, 0xd1, 0xd2, 0x17}, {0x9c, 0x28, 0x18, 0x97, 0x49, 0x47, 0x59, 0x3d, 0x26, 0x3f, 0x53, 0x24, 0xc5, 0xf8, 0xeb, 0x12, 0x15, 0xef, 0xc3, 0x14, 0xcb, 0xbf, 0x62, 0x2, 0x8e, 0x51, 0xb7, 0x77, 0xd5, 0x78, 0xb8, 0x20}, {0x6a, 0x3e, 0x3f, 0x7, 0x18, 0xaf, 0xf2, 0x27, 0x69, 0x10, 0x52, 0xd7, 0x19, 0xe5, 0x3f, 0xfd, 0x22, 0x0, 0xa6, 0x3c, 0x2c, 0xb7, 0xe3, 0x22, 0xa7, 0xc6, 0x65, 0xcc, 0x63, 0x4f, 0x21, 0x72}, }, { {0xc9, 0x29, 0x3b, 0xf4, 0xb9, 0xb7, 0x9d, 0x1d, 0x75, 0x8f, 0x51, 0x4f, 0x4a, 0x82, 0x5, 0xd6, 0xc4, 0x9d, 0x2f, 0x31, 0xbd, 0x72, 0xc0, 0xf2, 0xb0, 0x45, 0x15, 0x5a, 0x85, 0xac, 0x24, 0x1f}, {0x93, 0xa6, 0x7, 0x53, 0x40, 0x7f, 0xe3, 0xb4, 0x95, 0x67, 0x33, 0x2f, 0xd7, 0x14, 0xa7, 0xab, 0x99, 0x10, 0x76, 0x73, 0xa7, 0xd0, 0xfb, 0xd6, 0xc9, 0xcb, 0x71, 0x81, 0xc5, 0x48, 0xdf, 0x5f}, {0xaa, 0x5, 0x95, 0x8e, 0x32, 0x8, 0xd6, 0x24, 0xee, 0x20, 0x14, 0xc, 0xd1, 0xc1, 0x48, 0x47, 0xa2, 0x25, 0xfb, 0x6, 0x5c, 0xe4, 0xff, 0xc7, 0xe6, 0x95, 0xe3, 0x2a, 0x9e, 0x73, 0xba, 0x0}, }, { {0x26, 0xbb, 0x88, 0xea, 0xf5, 0x26, 0x44, 0xae, 0xfb, 0x3b, 0x97, 0x84, 0xd9, 0x79, 0x6, 0x36, 0x50, 0x4e, 0x69, 0x26, 0xc, 0x3, 0x9f, 0x5c, 0x26, 0xd2, 0x18, 0xd5, 0xe7, 0x7d, 0x29, 0x72}, {0xd6, 0x90, 0x87, 0x5c, 0xde, 0x98, 0x2e, 0x59, 0xdf, 0xa2, 0xc2, 0x45, 0xd3, 0xb7, 0xbf, 0xe5, 0x22, 0x99, 0xb4, 0xf9, 0x60, 0x3b, 0x5a, 0x11, 0xf3, 0x78, 0xad, 0x67, 0x3e, 0x3a, 0x28, 0x3}, {0x39, 0xb9, 0xc, 0xbe, 0xc7, 0x1d, 0x24, 0x48, 0x80, 0x30, 0x63, 0x8b, 0x4d, 0x9b, 0xf1, 0x32, 0x8, 0x93, 0x28, 0x2, 0xd, 0xc9, 0xdf, 0xd3, 0x45, 0x19, 0x27, 0x46, 0x68, 0x29, 0xe1, 0x5}, }, { {0x50, 0x45, 0x2c, 0x24, 0xc8, 0xbb, 0xbf, 0xad, 0xd9, 0x81, 0x30, 0xd0, 0xec, 0xc, 0xc8, 0xbc, 0x92, 0xdf, 0xc8, 0xf5, 0xa6, 0x66, 0x35, 0x84, 0x4c, 0xce, 0x58, 0x82, 0xd3, 0x25, 0xcf, 0x78}, {0x5a, 0x49, 0x9c, 0x2d, 0xb3, 0xee, 0x82, 0xba, 0x7c, 0xb9, 0x2b, 0xf1, 0xfc, 0xc8, 0xef, 0xce, 0xe0, 0xd1, 0xb5, 0x93, 0xae, 0xab, 0x2d, 0xb0, 0x9b, 0x8d, 0x69, 0x13, 0x9c, 0xc, 0xc0, 0x39}, {0x68, 0x9d, 0x48, 0x31, 0x8e, 0x6b, 0xae, 0x15, 0x87, 0xf0, 0x2b, 0x9c, 0xab, 0x1c, 0x85, 0xaa, 0x5, 0xfa, 0x4e, 0xf0, 0x97, 0x5a, 0xa7, 0xc9, 0x32, 0xf8, 0x3f, 0x6b, 0x7, 0x52, 0x6b, 0x0}, }, { {0x2d, 0x8, 0xce, 0xb9, 0x16, 0x7e, 0xcb, 0xf5, 0x29, 0xbc, 0x7a, 0x41, 0x4c, 0xf1, 0x7, 0x34, 0xab, 0xa7, 0xf4, 0x2b, 0xce, 0x6b, 0xb3, 0xd4, 0xce, 0x75, 0x9f, 0x1a, 0x56, 0xe9, 0xe2, 0x7d}, {0x1c, 0x78, 0x95, 0x9d, 0xe1, 0xcf, 0xe0, 0x29, 0xe2, 0x10, 0x63, 0x96, 0x18, 0xdf, 0x81, 0xb6, 0x39, 0x6b, 0x51, 0x70, 0xd3, 0x39, 0xdf, 0x57, 0x22, 0x61, 0xc7, 0x3b, 0x44, 0xe3, 0x57, 0x4d}, {0xcb, 0x5e, 0xa5, 0xb6, 0xf4, 0xd4, 0x70, 0xde, 0x99, 0xdb, 0x85, 0x5d, 0x7f, 0x52, 0x1, 0x48, 0x81, 0x9a, 0xee, 0xd3, 0x40, 0xc4, 0xc9, 0xdb, 0xed, 0x29, 0x60, 0x1a, 0xaf, 0x90, 0x2a, 0x6b}, }, { {0xa, 0xd8, 0xb2, 0x5b, 0x24, 0xf3, 0xeb, 0x77, 0x9b, 0x7, 0xb9, 0x2f, 0x47, 0x1b, 0x30, 0xd8, 0x33, 0x73, 0xee, 0x4c, 0xf2, 0xe6, 0x47, 0xc6, 0x9, 0x21, 0x6c, 0x27, 0xc8, 0x12, 0x58, 0x46}, {0x97, 0x1e, 0xe6, 0x9a, 0xfc, 0xf4, 0x23, 0x69, 0xd1, 0x5f, 0x3f, 0xe0, 0x1d, 0x28, 0x35, 0x57, 0x2d, 0xd1, 0xed, 0xe6, 0x43, 0xae, 0x64, 0xa7, 0x4a, 0x3e, 0x2d, 0xd1, 0xe9, 0xf4, 0xd8, 0x5f}, {0xd9, 0x62, 0x10, 0x2a, 0xb2, 0xbe, 0x43, 0x4d, 0x16, 0xdc, 0x31, 0x38, 0x75, 0xfb, 0x65, 0x70, 0xd7, 0x68, 0x29, 0xde, 0x7b, 0x4a, 0xd, 0x18, 0x90, 0x67, 0xb1, 0x1c, 0x2b, 0x2c, 0xb3, 0x5}, }, { {0x95, 0x81, 0xd5, 0x7a, 0x2c, 0xa4, 0xfc, 0xf7, 0xcc, 0xf3, 0x33, 0x43, 0x6e, 0x28, 0x14, 0x32, 0x9d, 0x97, 0xb, 0x34, 0xd, 0x9d, 0xc2, 0xb6, 0xe1, 0x7, 0x73, 0x56, 0x48, 0x1a, 0x77, 0x31}, {0xfd, 0xa8, 0x4d, 0xd2, 0xcc, 0x5e, 0xc0, 0xc8, 0x83, 0xef, 0xdf, 0x5, 0xac, 0x1a, 0xcf, 0xa1, 0x61, 0xcd, 0xf9, 0x7d, 0xf2, 0xef, 0xbe, 0xdb, 0x99, 0x1e, 0x47, 0x7b, 0xa3, 0x56, 0x55, 0x3b}, {0x82, 0xd4, 0x4d, 0xe1, 0x24, 0xc5, 0xb0, 0x32, 0xb6, 0xa4, 0x2b, 0x1a, 0x54, 0x51, 0xb3, 0xed, 0xf3, 0x5a, 0x2b, 0x28, 0x48, 0x60, 0xd1, 0xa3, 0xeb, 0x36, 0x73, 0x7a, 0xd2, 0x79, 0xc0, 0x4f}, }, { {0xd, 0xc5, 0x86, 0xc, 0x44, 0x8b, 0x34, 0xdc, 0x51, 0xe6, 0x94, 0xcc, 0xc9, 0xcb, 0x37, 0x13, 0xb9, 0x3c, 0x3e, 0x64, 0x4d, 0xf7, 0x22, 0x64, 0x8, 0xcd, 0xe3, 0xba, 0xc2, 0x70, 0x11, 0x24}, {0x7f, 0x2f, 0xbf, 0x89, 0xb0, 0x38, 0xc9, 0x51, 0xa7, 0xe9, 0xdf, 0x2, 0x65, 0xbd, 0x97, 0x24, 0x53, 0xe4, 0x80, 0x78, 0x9c, 0xc0, 0xff, 0xff, 0x92, 0x8e, 0xf9, 0xca, 0xce, 0x67, 0x45, 0x12}, {0xb4, 0x73, 0xc4, 0xa, 0x86, 0xab, 0xf9, 0x3f, 0x35, 0xe4, 0x13, 0x1, 0xee, 0x1d, 0x91, 0xf0, 0xaf, 0xc4, 0xc6, 0xeb, 0x60, 0x50, 0xe7, 0x4a, 0xd, 0x0, 0x87, 0x6c, 0x96, 0x12, 0x86, 0x3f}, }, }, { { {0x13, 0x8d, 0x4, 0x36, 0xfa, 0xfc, 0x18, 0x9c, 0xdd, 0x9d, 0x89, 0x73, 0xb3, 0x9d, 0x15, 0x29, 0xaa, 0xd0, 0x92, 0x9f, 0xb, 0x35, 0x9f, 0xdc, 0xd4, 0x19, 0x8a, 0x87, 0xee, 0x7e, 0xf5, 0x26}, {0xde, 0xd, 0x2a, 0x78, 0xc9, 0xc, 0x9a, 0x55, 0x85, 0x83, 0x71, 0xea, 0xb2, 0xcd, 0x1d, 0x55, 0x8c, 0x23, 0xef, 0x31, 0x5b, 0x86, 0x62, 0x7f, 0x3d, 0x61, 0x73, 0x79, 0x76, 0xa7, 0x4a, 0x50}, {0xb1, 0xef, 0x87, 0x56, 0xd5, 0x2c, 0xab, 0xc, 0x7b, 0xf1, 0x7a, 0x24, 0x62, 0xd1, 0x80, 0x51, 0x67, 0x24, 0x5a, 0x4f, 0x34, 0x5a, 0xc1, 0x85, 0x69, 0x30, 0xba, 0x9d, 0x3d, 0x94, 0x41, 0x40}, }, { {0xdd, 0xaa, 0x6c, 0xa2, 0x43, 0x77, 0x21, 0x4b, 0xce, 0xb7, 0x8a, 0x64, 0x24, 0xb4, 0xa6, 0x47, 0xe3, 0xc9, 0xfb, 0x3, 0x7a, 0x4f, 0x1d, 0xcb, 0x19, 0xd0, 0x0, 0x98, 0x42, 0x31, 0xd9, 0x12}, {0x96, 0xcc, 0xeb, 0x43, 0xba, 0xee, 0xc0, 0xc3, 0xaf, 0x9c, 0xea, 0x26, 0x9c, 0x9c, 0x74, 0x8d, 0xc6, 0xcc, 0x77, 0x1c, 0xee, 0x95, 0xfa, 0xd9, 0xf, 0x34, 0x84, 0x76, 0xd9, 0xa1, 0x20, 0x14}, {0x4f, 0x59, 0x37, 0xd3, 0x99, 0x77, 0xc6, 0x0, 0x7b, 0xa4, 0x3a, 0xb2, 0x40, 0x51, 0x3c, 0x5e, 0x95, 0xf3, 0x5f, 0xe3, 0x54, 0x28, 0x18, 0x44, 0x12, 0xa0, 0x59, 0x43, 0x31, 0x92, 0x4f, 0x1b}, }, { {0xb1, 0x66, 0x98, 0xa4, 0x30, 0x30, 0xcf, 0x33, 0x59, 0x48, 0x5f, 0x21, 0xd2, 0x73, 0x1f, 0x25, 0xf6, 0xf4, 0xde, 0x51, 0x40, 0xaa, 0x82, 0xab, 0xf6, 0x23, 0x9a, 0x6f, 0xd5, 0x91, 0xf1, 0x5f}, {0x51, 0x9, 0x15, 0x89, 0x9d, 0x10, 0x5c, 0x3e, 0x6a, 0x69, 0xe9, 0x2d, 0x91, 0xfa, 0xce, 0x39, 0x20, 0x30, 0x5f, 0x97, 0x3f, 0xe4, 0xea, 0x20, 0xae, 0x2d, 0x13, 0x7f, 0x2a, 0x57, 0x9b, 0x23}, {0x68, 0x90, 0x2d, 0xac, 0x33, 0xd4, 0x9e, 0x81, 0x23, 0x85, 0xc9, 0x5f, 0x79, 0xab, 0x83, 0x28, 0x3d, 0xeb, 0x93, 0x55, 0x80, 0x72, 0x45, 0xef, 0xcb, 0x36, 0x8f, 0x75, 0x6a, 0x52, 0xc, 0x2}, }, { {0x89, 0xcc, 0x42, 0xf0, 0x59, 0xef, 0x31, 0xe9, 0xb6, 0x4b, 0x12, 0x8e, 0x9d, 0x9c, 0x58, 0x2c, 0x97, 0x59, 0xc7, 0xae, 0x8a, 0xe1, 0xc8, 0xad, 0xc, 0xc5, 0x2, 0x56, 0xa, 0xfe, 0x2c, 0x45}, {0xbc, 0xdb, 0xd8, 0x9e, 0xf8, 0x34, 0x98, 0x77, 0x6c, 0xa4, 0x7c, 0xdc, 0xf9, 0xaa, 0xf2, 0xc8, 0x74, 0xb0, 0xe1, 0xa3, 0xdc, 0x4c, 0x52, 0xa9, 0x77, 0x38, 0x31, 0x15, 0x46, 0xcc, 0xaa, 0x2}, {0xdf, 0x77, 0x78, 0x64, 0xa0, 0xf7, 0xa0, 0x86, 0x9f, 0x7c, 0x60, 0xe, 0x27, 0x64, 0xc4, 0xbb, 0xc9, 0x11, 0xfb, 0xf1, 0x25, 0xea, 0x17, 0xab, 0x7b, 0x87, 0x4b, 0x30, 0x7b, 0x7d, 0xfb, 0x4c}, }, { {0x12, 0xef, 0x89, 0x97, 0xc2, 0x99, 0x86, 0xe2, 0xd, 0x19, 0x57, 0xdf, 0x71, 0xcd, 0x6e, 0x2b, 0xd0, 0x70, 0xc9, 0xec, 0x57, 0xc8, 0x43, 0xc3, 0xc5, 0x3a, 0x4d, 0x43, 0xbc, 0x4c, 0x1d, 0x5b}, {0xfe, 0x75, 0x9b, 0xb8, 0x6c, 0x3d, 0xb4, 0x72, 0x80, 0xdc, 0x6a, 0x9c, 0xd9, 0x94, 0xc6, 0x54, 0x9f, 0x4c, 0xe3, 0x3e, 0x37, 0xaa, 0xc3, 0xb8, 0x64, 0x53, 0x7, 0x39, 0x2b, 0x62, 0xb4, 0x14}, {0x26, 0x9f, 0xa, 0xcc, 0x15, 0x26, 0xfb, 0xb6, 0xe5, 0xcc, 0x8d, 0xb8, 0x2b, 0xe, 0x4f, 0x3a, 0x5, 0xa7, 0x69, 0x33, 0x8b, 0x49, 0x1, 0x13, 0xd1, 0x2d, 0x59, 0x58, 0x12, 0xf7, 0x98, 0x2f}, }, { {0x1, 0xa7, 0x54, 0x4f, 0x44, 0xae, 0x12, 0x2e, 0xde, 0xd7, 0xcb, 0xa9, 0xf0, 0x3e, 0xfe, 0xfc, 0xe0, 0x5d, 0x83, 0x75, 0xd, 0x89, 0xbf, 0xce, 0x54, 0x45, 0x61, 0xe7, 0xe9, 0x62, 0x80, 0x1d}, {0x56, 0x9e, 0xf, 0xb5, 0x4c, 0xa7, 0x94, 0xc, 0x20, 0x13, 0x8e, 0x8e, 0xa9, 0xf4, 0x1f, 0x5b, 0x67, 0xf, 0x30, 0x82, 0x21, 0xcc, 0x2a, 0x9a, 0xf9, 0xaa, 0x6, 0xd8, 0x49, 0xe2, 0x6a, 0x3a}, {0x5a, 0x7c, 0x90, 0xa9, 0x85, 0xda, 0x7a, 0x65, 0x62, 0xf, 0xb9, 0x91, 0xb5, 0xa8, 0xe, 0x1a, 0xe9, 0xb4, 0x34, 0xdf, 0xfb, 0x1d, 0xe, 0x8d, 0xf3, 0x5f, 0xf2, 0xae, 0xe8, 0x8c, 0x8b, 0x29}, }, { {0xde, 0x65, 0x21, 0xa, 0xea, 0x72, 0x7a, 0x83, 0xf6, 0x79, 0xcf, 0xb, 0xb4, 0x7, 0xab, 0x3f, 0x70, 0xae, 0x38, 0x77, 0xc7, 0x36, 0x16, 0x52, 0xdc, 0xd7, 0xa7, 0x3, 0x18, 0x27, 0xa6, 0x6b}, {0xb2, 0xc, 0xf7, 0xef, 0x53, 0x79, 0x92, 0x2a, 0x76, 0x70, 0x15, 0x79, 0x2a, 0xc9, 0x89, 0x4b, 0x6a, 0xcf, 0xa7, 0x30, 0x7a, 0x45, 0x18, 0x94, 0x85, 0xe4, 0x5c, 0x4d, 0x40, 0xa8, 0xb8, 0x34}, {0x35, 0x33, 0x69, 0x83, 0xb5, 0xec, 0x6e, 0xc2, 0xfd, 0xfe, 0xb5, 0x63, 0xdf, 0x13, 0xa8, 0xd5, 0x73, 0x25, 0xb2, 0xa4, 0x9a, 0xaa, 0x93, 0xa2, 0x6a, 0x1c, 0x5e, 0x46, 0xdd, 0x2b, 0xd6, 0x71}, }, { {0xf5, 0x5e, 0xf7, 0xb1, 0xda, 0xb5, 0x2d, 0xcd, 0xf5, 0x65, 0xb0, 0x16, 0xcf, 0x95, 0x7f, 0xd7, 0x85, 0xf0, 0x49, 0x3f, 0xea, 0x1f, 0x57, 0x14, 0x3d, 0x2b, 0x2b, 0x26, 0x21, 0x36, 0x33, 0x1c}, {0x80, 0xdf, 0x78, 0xd3, 0x28, 0xcc, 0x33, 0x65, 0xb4, 0xa4, 0xf, 0xa, 0x79, 0x43, 0xdb, 0xf6, 0x5a, 0xda, 0x1, 0xf7, 0xf9, 0x5f, 0x64, 0xe3, 0xa4, 0x2b, 0x17, 0xf3, 0x17, 0xf3, 0xd5, 0x74}, {0x81, 0xca, 0xd9, 0x67, 0x54, 0xe5, 0x6f, 0xa8, 0x37, 0x8c, 0x29, 0x2b, 0x75, 0x7c, 0x8b, 0x39, 0x3b, 0x62, 0xac, 0xe3, 0x92, 0x8, 0x6d, 0xda, 0x8c, 0xd9, 0xe9, 0x47, 0x45, 0xcc, 0xeb, 0x4a}, }, }, { { {0x10, 0xb6, 0x54, 0x73, 0x9e, 0x8d, 0x40, 0xb, 0x6e, 0x5b, 0xa8, 0x5b, 0x53, 0x32, 0x6b, 0x80, 0x7, 0xa2, 0x58, 0x4a, 0x3, 0x3a, 0xe6, 0xdb, 0x2c, 0xdf, 0xa1, 0xc9, 0xdd, 0xd9, 0x3b, 0x17}, {0xc9, 0x1, 0x6d, 0x27, 0x1b, 0x7, 0xf0, 0x12, 0x70, 0x8c, 0xc4, 0x86, 0xc5, 0xba, 0xb8, 0xe7, 0xa9, 0xfb, 0xd6, 0x71, 0x9b, 0x12, 0x8, 0x53, 0x92, 0xb7, 0x3d, 0x5a, 0xf9, 0xfb, 0x88, 0x5d}, {0xdf, 0x72, 0x58, 0xfe, 0x1e, 0xf, 0x50, 0x2b, 0xc1, 0x18, 0x39, 0xd4, 0x2e, 0x58, 0xd6, 0x58, 0xe0, 0x3a, 0x67, 0xc9, 0x8e, 0x27, 0xed, 0xe6, 0x19, 0xa3, 0x9e, 0xb1, 0x13, 0xcd, 0xe1, 0x6}, }, { {0x53, 0x3, 0x5b, 0x9e, 0x62, 0xaf, 0x2b, 0x47, 0x47, 0x4, 0x8d, 0x27, 0x90, 0xb, 0xaa, 0x3b, 0x27, 0xbf, 0x43, 0x96, 0x46, 0x5f, 0x78, 0xc, 0x13, 0x7b, 0x83, 0x8d, 0x1a, 0x6a, 0x3a, 0x7f}, {0x23, 0x6f, 0x16, 0x6f, 0x51, 0xad, 0xd0, 0x40, 0xbe, 0x6a, 0xab, 0x1f, 0x93, 0x32, 0x8e, 0x11, 0x8e, 0x8, 0x4d, 0xa0, 0x14, 0x5e, 0xe3, 0x3f, 0x66, 0x62, 0xe1, 0x26, 0x35, 0x60, 0x80, 0x30}, {0xb, 0x80, 0x3d, 0x5d, 0x39, 0x44, 0xe6, 0xf7, 0xf6, 0xed, 0x1, 0xc9, 0x55, 0xd5, 0xa8, 0x95, 0x39, 0x63, 0x2c, 0x59, 0x30, 0x78, 0xcd, 0x68, 0x7e, 0x30, 0x51, 0x2e, 0xed, 0xfd, 0xd0, 0x30}, }, { {0x50, 0x47, 0xb8, 0x68, 0x1e, 0x97, 0xb4, 0x9c, 0xcf, 0xbb, 0x64, 0x66, 0x29, 0x72, 0x95, 0xa0, 0x2b, 0x41, 0xfa, 0x72, 0x26, 0xe7, 0x8d, 0x5c, 0xd9, 0x89, 0xc5, 0x51, 0x43, 0x8, 0x15, 0x46}, {0xb3, 0x33, 0x12, 0xf2, 0x1a, 0x4d, 0x59, 0xe0, 0x9c, 0x4d, 0xcc, 0xf0, 0x8e, 0xe7, 0xdb, 0x1b, 0x77, 0x9a, 0x49, 0x8f, 0x7f, 0x18, 0x65, 0x69, 0x68, 0x98, 0x9, 0x2c, 0x20, 0x14, 0x92, 0xa}, {0x2e, 0xa0, 0xb9, 0xae, 0xc0, 0x19, 0x90, 0xbc, 0xae, 0x4c, 0x3, 0x16, 0xd, 0x11, 0xc7, 0x55, 0xec, 0x32, 0x99, 0x65, 0x1, 0xf5, 0x6d, 0xe, 0xfe, 0x5d, 0xca, 0x95, 0x28, 0xd, 0xca, 0x3b}, }, { {0xbf, 0x1, 0xcc, 0x9e, 0xb6, 0x8e, 0x68, 0x9c, 0x6f, 0x89, 0x44, 0xa6, 0xad, 0x83, 0xbc, 0xf0, 0xe2, 0x9f, 0x7a, 0x5f, 0x5f, 0x95, 0x2d, 0xca, 0x41, 0x82, 0xf2, 0x8d, 0x3, 0xb4, 0xa8, 0x4e}, {0xa4, 0x62, 0x5d, 0x3c, 0xbc, 0x31, 0xf0, 0x40, 0x60, 0x7a, 0xf0, 0xcf, 0x3e, 0x8b, 0xfc, 0x19, 0x45, 0xb5, 0xf, 0x13, 0xa2, 0x3d, 0x18, 0x98, 0xcd, 0x13, 0x8f, 0xae, 0xdd, 0xde, 0x31, 0x56}, {0x2, 0xd2, 0xca, 0xf1, 0xa, 0x46, 0xed, 0x2a, 0x83, 0xee, 0x8c, 0xa4, 0x5, 0x53, 0x30, 0x46, 0x5f, 0x1a, 0xf1, 0x49, 0x45, 0x77, 0x21, 0x91, 0x63, 0xa4, 0x2c, 0x54, 0x30, 0x9, 0xce, 0x24}, }, { {0x85, 0xb, 0xf3, 0xfd, 0x55, 0xa1, 0xcf, 0x3f, 0xa4, 0x2e, 0x37, 0x36, 0x8e, 0x16, 0xf7, 0xd2, 0x44, 0xf8, 0x92, 0x64, 0xde, 0x64, 0xe0, 0xb2, 0x80, 0x42, 0x4f, 0x32, 0xa7, 0x28, 0x99, 0x54}, {0x6, 0xc1, 0x6, 0xfd, 0xf5, 0x90, 0xe8, 0x1f, 0xf2, 0x10, 0x88, 0x5d, 0x35, 0x68, 0xc4, 0xb5, 0x3e, 0xaf, 0x8c, 0x6e, 0xfe, 0x8, 0x78, 0x82, 0x4b, 0xd7, 0x6, 0x8a, 0xc2, 0xe3, 0xd4, 0x41}, {0x2e, 0x1a, 0xee, 0x63, 0xa7, 0x32, 0x6e, 0xf2, 0xea, 0xfd, 0x5f, 0xd2, 0xb7, 0xe4, 0x91, 0xae, 0x69, 0x4d, 0x7f, 0xd1, 0x3b, 0xd3, 0x3b, 0xbc, 0x6a, 0xff, 0xdc, 0xc0, 0xde, 0x66, 0x1b, 0x49}, }, { {0xa1, 0x64, 0xda, 0xd0, 0x8e, 0x4a, 0xf0, 0x75, 0x4b, 0x28, 0xe2, 0x67, 0xaf, 0x2c, 0x22, 0xed, 0xa4, 0x7b, 0x7b, 0x1f, 0x79, 0xa3, 0x34, 0x82, 0x67, 0x8b, 0x1, 0xb7, 0xb0, 0xb8, 0xf6, 0x4c}, {0xa7, 0x32, 0xea, 0xc7, 0x3d, 0xb1, 0xf5, 0x98, 0x98, 0xdb, 0x16, 0x7e, 0xcc, 0xf8, 0xd5, 0xe3, 0x47, 0xd9, 0xf8, 0xcb, 0x52, 0xbf, 0xa, 0xac, 0xac, 0xe4, 0x5e, 0xc8, 0xd0, 0x38, 0xf3, 0x8}, {0xbd, 0x73, 0x1a, 0x99, 0x21, 0xa8, 0x83, 0xc3, 0x7a, 0xc, 0x32, 0xdf, 0x1, 0xbc, 0x27, 0xab, 0x63, 0x70, 0x77, 0x84, 0x1b, 0x33, 0x3d, 0xc1, 0x99, 0x8a, 0x7, 0xeb, 0x82, 0x4a, 0xd, 0x53}, }, { {0x9e, 0xbf, 0x9a, 0x6c, 0x45, 0x73, 0x69, 0x6d, 0x80, 0xa8, 0x0, 0x49, 0xfc, 0xb2, 0x7f, 0x25, 0x50, 0xb8, 0xcf, 0xc8, 0x12, 0xf4, 0xac, 0x2b, 0x5b, 0xbd, 0xbf, 0xc, 0xe0, 0xe7, 0xb3, 0xd}, {0x25, 0x48, 0xf9, 0xe1, 0x30, 0x36, 0x4c, 0x0, 0x5a, 0x53, 0xab, 0x8c, 0x26, 0x78, 0x2d, 0x7e, 0x8b, 0xff, 0x84, 0xcc, 0x23, 0x23, 0x48, 0xc7, 0xb9, 0x70, 0x17, 0x10, 0x3f, 0x75, 0xea, 0x65}, {0x63, 0x63, 0x9, 0xe2, 0x3e, 0xfc, 0x66, 0x3d, 0x6b, 0xcb, 0xb5, 0x61, 0x7f, 0x2c, 0xd6, 0x81, 0x1a, 0x3b, 0x44, 0x13, 0x42, 0x4, 0xbe, 0xf, 0xdb, 0xa1, 0xe1, 0x21, 0x19, 0xec, 0xa4, 0x2}, }, { {0x5f, 0x79, 0xcf, 0xf1, 0x62, 0x61, 0xc8, 0xf5, 0xf2, 0x57, 0xee, 0x26, 0x19, 0x86, 0x8c, 0x11, 0x78, 0x35, 0x6, 0x1c, 0x85, 0x24, 0x21, 0x17, 0xcf, 0x7f, 0x6, 0xec, 0x5d, 0x2b, 0xd1, 0x36}, {0xa2, 0xb8, 0x24, 0x3b, 0x9a, 0x25, 0xe6, 0x5c, 0xb8, 0xa0, 0xaf, 0x45, 0xcc, 0x7a, 0x57, 0xb8, 0x37, 0x70, 0xa0, 0x8b, 0xe8, 0xe6, 0xcb, 0xcc, 0xbf, 0x9, 0x78, 0x12, 0x51, 0x3c, 0x14, 0x3d}, {0x57, 0x45, 0x15, 0x79, 0x91, 0x27, 0x6d, 0x12, 0xa, 0x3a, 0x78, 0xfc, 0x5c, 0x8f, 0xe4, 0xd5, 0xac, 0x9b, 0x17, 0xdf, 0xe8, 0xb6, 0xbd, 0x36, 0x59, 0x28, 0xa8, 0x5b, 0x88, 0x17, 0xf5, 0x2e}, }, }, { { {0x51, 0x2f, 0x5b, 0x30, 0xfb, 0xbf, 0xee, 0x96, 0xb8, 0x96, 0x95, 0x88, 0xad, 0x38, 0xf9, 0xd3, 0x25, 0xdd, 0xd5, 0x46, 0xc7, 0x2d, 0xf5, 0xf0, 0x95, 0x0, 0x3a, 0xbb, 0x90, 0x82, 0x96, 0x57}, {0xdc, 0xae, 0x58, 0x8c, 0x4e, 0x97, 0x37, 0x46, 0xa4, 0x41, 0xf0, 0xab, 0xfb, 0x22, 0xef, 0xb9, 0x8a, 0x71, 0x80, 0xe9, 0x56, 0xd9, 0x85, 0xe1, 0xa6, 0xa8, 0x43, 0xb1, 0xfa, 0x78, 0x1b, 0x2f}, {0x1, 0xe1, 0x20, 0xa, 0x43, 0xb8, 0x1a, 0xf7, 0x47, 0xec, 0xf0, 0x24, 0x8d, 0x65, 0x93, 0xf3, 0xd1, 0xee, 0xe2, 0x6e, 0xa8, 0x9, 0x75, 0xcf, 0xe1, 0xa3, 0x2a, 0xdc, 0x35, 0x3e, 0xc4, 0x7d}, }, { {0x18, 0x97, 0x3e, 0x27, 0x5c, 0x2a, 0x78, 0x5a, 0x94, 0xfd, 0x4e, 0x5e, 0x99, 0xc6, 0x76, 0x35, 0x3e, 0x7d, 0x23, 0x1f, 0x5, 0xd8, 0x2e, 0xf, 0x99, 0xa, 0xd5, 0x82, 0x1d, 0xb8, 0x4f, 0x4}, {0xc3, 0xd9, 0x7d, 0x88, 0x65, 0x66, 0x96, 0x85, 0x55, 0x53, 0xb0, 0x4b, 0x31, 0x9b, 0xf, 0xc9, 0xb1, 0x79, 0x20, 0xef, 0xf8, 0x8d, 0xe0, 0xc6, 0x2f, 0xc1, 0x8c, 0x75, 0x16, 0x20, 0xf7, 0x7e}, {0xd9, 0xe3, 0x7, 0xa9, 0xc5, 0x18, 0xdf, 0xc1, 0x59, 0x63, 0x4c, 0xce, 0x1d, 0x37, 0xb3, 0x57, 0x49, 0xbb, 0x1, 0xb2, 0x34, 0x45, 0x70, 0xca, 0x2e, 0xdd, 0x30, 0x9c, 0x3f, 0x82, 0x79, 0x7f}, }, { {0xba, 0x87, 0xf5, 0x68, 0xf0, 0x1f, 0x9c, 0x6a, 0xde, 0xc8, 0x50, 0x0, 0x4e, 0x89, 0x27, 0x8, 0xe7, 0x5b, 0xed, 0x7d, 0x55, 0x99, 0xbf, 0x3c, 0xf0, 0xd6, 0x6, 0x1c, 0x43, 0xb0, 0xa9, 0x64}, {0xe8, 0x13, 0xb5, 0xa3, 0x39, 0xd2, 0x34, 0x83, 0xd8, 0xa8, 0x1f, 0xb9, 0xd4, 0x70, 0x36, 0xc1, 0x33, 0xbd, 0x90, 0xf5, 0x36, 0x41, 0xb5, 0x12, 0xb4, 0xd9, 0x84, 0xd7, 0x73, 0x3, 0x4e, 0xa}, {0x19, 0x29, 0x7d, 0x5b, 0xa1, 0xd6, 0xb3, 0x2e, 0x35, 0x82, 0x3a, 0xd5, 0xa0, 0xf6, 0xb4, 0xb0, 0x47, 0x5d, 0xa4, 0x89, 0x43, 0xce, 0x56, 0x71, 0x6c, 0x34, 0x18, 0xce, 0xa, 0x7d, 0x1a, 0x7}, }, { {0x31, 0x44, 0xe1, 0x20, 0x52, 0x35, 0xc, 0xcc, 0x41, 0x51, 0xb1, 0x9, 0x7, 0x95, 0x65, 0xd, 0x36, 0x5f, 0x9d, 0x20, 0x1b, 0x62, 0xf5, 0x9a, 0xd3, 0x55, 0x77, 0x61, 0xf7, 0xbc, 0x69, 0x7c}, {0xb, 0xba, 0x87, 0xc8, 0xaa, 0x2d, 0x7, 0xd3, 0xee, 0x62, 0xa5, 0xbf, 0x5, 0x29, 0x26, 0x1, 0x8b, 0x76, 0xef, 0xc0, 0x2, 0x30, 0x54, 0xcf, 0x9c, 0x7e, 0xea, 0x46, 0x71, 0xcc, 0x3b, 0x2c}, {0x5f, 0x29, 0xe8, 0x4, 0xeb, 0xd7, 0xf0, 0x7, 0x7d, 0xf3, 0x50, 0x2f, 0x25, 0x18, 0xdb, 0x10, 0xd7, 0x98, 0x17, 0x17, 0xa3, 0xa9, 0x51, 0xe9, 0x1d, 0xa5, 0xac, 0x22, 0x73, 0x9a, 0x5a, 0x6f}, }, { {0xbe, 0x44, 0xd9, 0xa3, 0xeb, 0xd4, 0x29, 0xe7, 0x9e, 0xaf, 0x78, 0x80, 0x40, 0x9, 0x9e, 0x8d, 0x3, 0x9c, 0x86, 0x47, 0x7a, 0x56, 0x25, 0x45, 0x24, 0x3b, 0x8d, 0xee, 0x80, 0x96, 0xab, 0x2}, {0xc5, 0xc6, 0x41, 0x2f, 0xc, 0x0, 0xa1, 0x8b, 0x9b, 0xfb, 0xfe, 0xc, 0xc1, 0x79, 0x9f, 0xc4, 0x9f, 0x1c, 0xc5, 0x3c, 0x70, 0x47, 0xfa, 0x4e, 0xca, 0xaf, 0x47, 0xe1, 0xa2, 0x21, 0x4e, 0x49}, {0x9a, 0xd, 0xe5, 0xdd, 0x85, 0x8a, 0xa4, 0xef, 0x49, 0xa2, 0xb9, 0xf, 0x4e, 0x22, 0x9a, 0x21, 0xd9, 0xf6, 0x1e, 0xd9, 0x1d, 0x1f, 0x9, 0xfa, 0x34, 0xbb, 0x46, 0xea, 0xcb, 0x76, 0x5d, 0x6b}, }, { {0x22, 0x25, 0x78, 0x1e, 0x17, 0x41, 0xf9, 0xe0, 0xd3, 0x36, 0x69, 0x3, 0x74, 0xae, 0xe6, 0xf1, 0x46, 0xc7, 0xfc, 0xd0, 0xa2, 0x3e, 0x8b, 0x40, 0x3e, 0x31, 0xdd, 0x3, 0x9c, 0x86, 0xfb, 0x16}, {0x94, 0xd9, 0xc, 0xec, 0x6c, 0x55, 0x57, 0x88, 0xba, 0x1d, 0xd0, 0x5c, 0x6f, 0xdc, 0x72, 0x64, 0x77, 0xb4, 0x42, 0x8f, 0x14, 0x69, 0x1, 0xaf, 0x54, 0x73, 0x27, 0x85, 0xf6, 0x33, 0xe3, 0xa}, {0x62, 0x9, 0xb6, 0x33, 0x97, 0x19, 0x8e, 0x28, 0x33, 0xe1, 0xab, 0xd8, 0xb4, 0x72, 0xfc, 0x24, 0x3e, 0xd0, 0x91, 0x9, 0xed, 0xf7, 0x11, 0x48, 0x75, 0xd0, 0x70, 0x8f, 0x8b, 0xe3, 0x81, 0x3f}, }, { {0x24, 0xc8, 0x17, 0x5f, 0x35, 0x7f, 0xdb, 0xa, 0xa4, 0x99, 0x42, 0xd7, 0xc3, 0x23, 0xb9, 0x74, 0xf7, 0xea, 0xf8, 0xcb, 0x8b, 0x3e, 0x7c, 0xd5, 0x3d, 0xdc, 0xde, 0x4c, 0xd3, 0xe2, 0xd3, 0xa}, {0xfe, 0xaf, 0xd9, 0x7e, 0xcc, 0xf, 0x91, 0x7f, 0x4b, 0x87, 0x65, 0x24, 0xa1, 0xb8, 0x5c, 0x54, 0x4, 0x47, 0xc, 0x4b, 0xd2, 0x7e, 0x39, 0xa8, 0x93, 0x9, 0xf5, 0x4, 0xc1, 0xf, 0x51, 0x50}, {0x9d, 0x24, 0x6e, 0x33, 0xc5, 0xf, 0xc, 0x6f, 0xd9, 0xcf, 0x31, 0xc3, 0x19, 0xde, 0x5e, 0x74, 0x1c, 0xfe, 0xee, 0x9, 0x0, 0xfd, 0xd6, 0xf2, 0xbe, 0x1e, 0xfa, 0xf0, 0x8b, 0x15, 0x7c, 0x12}, }, { {0x74, 0xb9, 0x51, 0xae, 0xc4, 0x8f, 0xa2, 0xde, 0x96, 0xfe, 0x4d, 0x74, 0xd3, 0x73, 0x99, 0x1d, 0xa8, 0x48, 0x38, 0x87, 0xb, 0x68, 0x40, 0x62, 0x95, 0xdf, 0x67, 0xd1, 0x79, 0x24, 0xd8, 0x4e}, {0xa2, 0x79, 0x98, 0x2e, 0x42, 0x7c, 0x19, 0xf6, 0x47, 0x36, 0xca, 0x52, 0xd4, 0xdd, 0x4a, 0xa4, 0xcb, 0xac, 0x4e, 0x4b, 0xc1, 0x3f, 0x41, 0x9b, 0x68, 0x4f, 0xef, 0x7, 0x7d, 0xf8, 0x4e, 0x35}, {0x75, 0xd9, 0xc5, 0x60, 0x22, 0xb5, 0xe3, 0xfe, 0xb8, 0xb0, 0x41, 0xeb, 0xfc, 0x2e, 0x35, 0x50, 0x3c, 0x65, 0xf6, 0xa9, 0x30, 0xac, 0x8, 0x88, 0x6d, 0x23, 0x39, 0x5, 0xd2, 0x92, 0x2d, 0x30}, }, }, { { {0x77, 0xf1, 0xe0, 0xe4, 0xb6, 0x6f, 0xbc, 0x2d, 0x93, 0x6a, 0xbd, 0xa4, 0x29, 0xbf, 0xe1, 0x4, 0xe8, 0xf6, 0x7a, 0x78, 0xd4, 0x66, 0x19, 0x5e, 0x60, 0xd0, 0x26, 0xb4, 0x5e, 0x5f, 0xdc, 0xe}, {0x3d, 0x28, 0xa4, 0xbc, 0xa2, 0xc1, 0x13, 0x78, 0xd9, 0x3d, 0x86, 0xa1, 0x91, 0xf0, 0x62, 0xed, 0x86, 0xfa, 0x68, 0xc2, 0xb8, 0xbc, 0xc7, 0xae, 0x4c, 0xae, 0x1c, 0x6f, 0xb7, 0xd3, 0xe5, 0x10}, {0x67, 0x8e, 0xda, 0x53, 0xd6, 0xbf, 0x53, 0x54, 0x41, 0xf6, 0xa9, 0x24, 0xec, 0x1e, 0xdc, 0xe9, 0x23, 0x8a, 0x57, 0x3, 0x3b, 0x26, 0x87, 0xbf, 0x72, 0xba, 0x1c, 0x36, 0x51, 0x6c, 0xb4, 0x45}, }, { {0xe4, 0xe3, 0x7f, 0x8a, 0xdd, 0x4d, 0x9d, 0xce, 0x30, 0xe, 0x62, 0x76, 0x56, 0x64, 0x13, 0xab, 0x58, 0x99, 0xe, 0xb3, 0x7b, 0x4f, 0x59, 0x4b, 0xdf, 0x29, 0x12, 0x32, 0xef, 0xa, 0x1c, 0x5c}, {0xa1, 0x7f, 0x4f, 0x31, 0xbf, 0x2a, 0x40, 0xa9, 0x50, 0xf4, 0x8c, 0x8e, 0xdc, 0xf1, 0x57, 0xe2, 0x84, 0xbe, 0xa8, 0x23, 0x4b, 0xd5, 0xbb, 0x1d, 0x3b, 0x71, 0xcb, 0x6d, 0xa3, 0xbf, 0x77, 0x21}, {0x8f, 0xdb, 0x79, 0xfa, 0xbc, 0x1b, 0x8, 0x37, 0xb3, 0x59, 0x5f, 0xc2, 0x1e, 0x81, 0x48, 0x60, 0x87, 0x24, 0x83, 0x9c, 0x65, 0x76, 0x7a, 0x8, 0xbb, 0xb5, 0x8a, 0x7d, 0x38, 0x19, 0xe6, 0x4a}, }, { {0x83, 0xfb, 0x5b, 0x98, 0x44, 0x7e, 0x11, 0x61, 0x36, 0x31, 0x96, 0x71, 0x2a, 0x46, 0xe0, 0xfc, 0x4b, 0x90, 0x25, 0xd4, 0x48, 0x34, 0xac, 0x83, 0x64, 0x3d, 0xa4, 0x5b, 0xbe, 0x5a, 0x68, 0x75}, {0x2e, 0xa3, 0x44, 0x53, 0xaa, 0xf6, 0xdb, 0x8d, 0x78, 0x40, 0x1b, 0xb4, 0xb4, 0xea, 0x88, 0x7d, 0x60, 0xd, 0x13, 0x4a, 0x97, 0xeb, 0xb0, 0x5e, 0x3, 0x3e, 0xbf, 0x17, 0x1b, 0xd9, 0x0, 0x1a}, {0xb2, 0xf2, 0x61, 0xeb, 0x33, 0x9, 0x96, 0x6e, 0x52, 0x49, 0xff, 0xc9, 0xa8, 0xf, 0x3d, 0x54, 0x69, 0x65, 0xf6, 0x7a, 0x10, 0x75, 0x72, 0xdf, 0xaa, 0xe6, 0xb0, 0x23, 0xb6, 0x29, 0x55, 0x13}, }, { {0xfe, 0x83, 0x2e, 0xe2, 0xbc, 0x16, 0xc7, 0xf5, 0xc1, 0x85, 0x9, 0xe8, 0x19, 0xeb, 0x2b, 0xb4, 0xae, 0x4a, 0x25, 0x14, 0x37, 0xa6, 0x9d, 0xec, 0x13, 0xa6, 0x90, 0x15, 0x5, 0xea, 0x72, 0x59}, {0x18, 0xd5, 0xd1, 0xad, 0xd7, 0xdb, 0xf0, 0x18, 0x11, 0x1f, 0xc1, 0xcf, 0x88, 0x78, 0x9f, 0x97, 0x9b, 0x75, 0x14, 0x71, 0xf0, 0xe1, 0x32, 0x87, 0x1, 0x3a, 0xca, 0x65, 0x1a, 0xb8, 0xb5, 0x79}, {0x11, 0x78, 0x8f, 0xdc, 0x20, 0xac, 0xd4, 0xf, 0xa8, 0x4f, 0x4d, 0xac, 0x94, 0xd2, 0x9a, 0x9a, 0x34, 0x4, 0x36, 0xb3, 0x64, 0x2d, 0x1b, 0xc0, 0xdb, 0x3b, 0x5f, 0x90, 0x95, 0x9c, 0x7e, 0x4f}, }, { {0xfe, 0x99, 0x52, 0x35, 0x3d, 0x44, 0xc8, 0x71, 0xd7, 0xea, 0xeb, 0xdb, 0x1c, 0x3b, 0xcd, 0x8b, 0x66, 0x94, 0xa4, 0xf1, 0x9e, 0x49, 0x92, 0x80, 0xc8, 0xad, 0x44, 0xa1, 0xc4, 0xee, 0x42, 0x19}, {0x2e, 0x30, 0x81, 0x57, 0xbc, 0x4b, 0x67, 0x62, 0xf, 0xdc, 0xad, 0x89, 0x39, 0xf, 0x52, 0xd8, 0xc6, 0xd9, 0xfb, 0x53, 0xae, 0x99, 0x29, 0x8c, 0x4c, 0x8e, 0x63, 0x2e, 0xd9, 0x3a, 0x99, 0x31}, {0x92, 0x49, 0x23, 0xae, 0x19, 0x53, 0xac, 0x7d, 0x92, 0x3e, 0xea, 0xc, 0x91, 0x3d, 0x1b, 0x2c, 0x22, 0x11, 0x3c, 0x25, 0x94, 0xe4, 0x3c, 0x55, 0x75, 0xca, 0xf9, 0x4e, 0x31, 0x65, 0xa, 0x2a}, }, { {0x3a, 0x79, 0x1c, 0x3c, 0xcd, 0x1a, 0x36, 0xcf, 0x3b, 0xbc, 0x35, 0x5a, 0xac, 0xbc, 0x9e, 0x2f, 0xab, 0xa6, 0xcd, 0xa8, 0xe9, 0x60, 0xe8, 0x60, 0x13, 0x1a, 0xea, 0x6d, 0x9b, 0xc3, 0x5d, 0x5}, {0xc2, 0x27, 0xf9, 0xf7, 0x7f, 0x93, 0xb7, 0x2d, 0x35, 0xa6, 0xd0, 0x17, 0x6, 0x1f, 0x74, 0xdb, 0x76, 0xaf, 0x55, 0x11, 0xa2, 0xf3, 0x82, 0x59, 0xed, 0x2d, 0x7c, 0x64, 0x18, 0xe2, 0xf6, 0x4c}, {0xb6, 0x5b, 0x8d, 0xc2, 0x7c, 0x22, 0x19, 0xb1, 0xab, 0xff, 0x4d, 0x77, 0xbc, 0x4e, 0xe2, 0x7, 0x89, 0x2c, 0xa3, 0xe4, 0xce, 0x78, 0x3c, 0xa8, 0xb6, 0x24, 0xaa, 0x10, 0x77, 0x30, 0x1a, 0x12}, }, { {0xc9, 0x83, 0x74, 0xc7, 0x3e, 0x71, 0x59, 0xd6, 0xaf, 0x96, 0x2b, 0xb8, 0x77, 0xe0, 0xbf, 0x88, 0xd3, 0xbc, 0x97, 0x10, 0x23, 0x28, 0x9e, 0x28, 0x9b, 0x3a, 0xed, 0x6c, 0x4a, 0xb9, 0x7b, 0x52}, {0x97, 0x4a, 0x3, 0x9f, 0x5e, 0x5d, 0xdb, 0xe4, 0x2d, 0xbc, 0x34, 0x30, 0x9, 0xfc, 0x53, 0xe1, 0xb1, 0xd3, 0x51, 0x95, 0x91, 0x46, 0x5, 0x46, 0x2d, 0xe5, 0x40, 0x7a, 0x6c, 0xc7, 0x3f, 0x33}, {0x2e, 0x48, 0x5b, 0x99, 0x2a, 0x99, 0x3d, 0x56, 0x1, 0x38, 0x38, 0x6e, 0x7c, 0xd0, 0x5, 0x34, 0xe5, 0xd8, 0x64, 0x2f, 0xde, 0x35, 0x50, 0x48, 0xf7, 0xa9, 0xa7, 0x20, 0x9b, 0x6, 0x89, 0x6b}, }, { {0x77, 0xdb, 0xc7, 0xb5, 0x8c, 0xfa, 0x82, 0x40, 0x55, 0xc1, 0x34, 0xc7, 0xf8, 0x86, 0x86, 0x6, 0x7e, 0xa5, 0xe7, 0xf6, 0xd9, 0xc8, 0xe6, 0x29, 0xcf, 0x9b, 0x63, 0xa7, 0x8, 0xd3, 0x73, 0x4}, {0xd, 0x22, 0x70, 0x62, 0x41, 0xa0, 0x2a, 0x81, 0x4e, 0x5b, 0x24, 0xf9, 0xfa, 0x89, 0x5a, 0x99, 0x5, 0xef, 0x72, 0x50, 0xce, 0xc4, 0xad, 0xff, 0x73, 0xeb, 0x73, 0xaa, 0x3, 0x21, 0xbc, 0x23}, {0x5, 0x9e, 0x58, 0x3, 0x26, 0x79, 0xee, 0xca, 0x92, 0xc4, 0xdc, 0x46, 0x12, 0x42, 0x4b, 0x2b, 0x4f, 0xa9, 0x1, 0xe6, 0x74, 0xef, 0xa1, 0x2, 0x1a, 0x34, 0x4, 0xde, 0xbf, 0x73, 0x2f, 0x10}, }, }, { { {0x9a, 0x1c, 0x51, 0xb5, 0xe0, 0xda, 0xb4, 0xa2, 0x6, 0xff, 0xff, 0x2b, 0x29, 0x60, 0xc8, 0x7a, 0x34, 0x42, 0x50, 0xf5, 0x5d, 0x37, 0x1f, 0x98, 0x2d, 0xa1, 0x4e, 0xda, 0x25, 0xd7, 0x6b, 0x3f}, {0xc6, 0x45, 0x57, 0x7f, 0xab, 0xb9, 0x18, 0xeb, 0x90, 0xc6, 0x87, 0x57, 0xee, 0x8a, 0x3a, 0x2, 0xa9, 0xaf, 0xf7, 0x2d, 0xda, 0x12, 0x27, 0xb7, 0x3d, 0x1, 0x5c, 0xea, 0x25, 0x7d, 0x59, 0x36}, {0xac, 0x58, 0x60, 0x10, 0x7b, 0x8d, 0x4d, 0x73, 0x5f, 0x90, 0xc6, 0x6f, 0x9e, 0x57, 0x40, 0xd9, 0x2d, 0x93, 0x2, 0x92, 0xf9, 0xf8, 0x66, 0x64, 0xd0, 0xd6, 0x60, 0xda, 0x19, 0xcc, 0x7e, 0x7b}, }, { {0x9b, 0xfa, 0x7c, 0xa7, 0x51, 0x4a, 0xae, 0x6d, 0x50, 0x86, 0xa3, 0xe7, 0x54, 0x36, 0x26, 0x82, 0xdb, 0x82, 0x2d, 0x8f, 0xcd, 0xff, 0xbb, 0x9, 0xba, 0xca, 0xf5, 0x1b, 0x66, 0xdc, 0xbe, 0x3}, {0xd, 0x69, 0x5c, 0x69, 0x3c, 0x37, 0xc2, 0x78, 0x6e, 0x90, 0x42, 0x6, 0x66, 0x2e, 0x25, 0xdd, 0xd2, 0x2b, 0xe1, 0x4a, 0x44, 0x44, 0x1d, 0x95, 0x56, 0x39, 0x74, 0x1, 0x76, 0xad, 0x35, 0x42}, {0xf5, 0x75, 0x89, 0x7, 0xd, 0xcb, 0x58, 0x62, 0x98, 0xf2, 0x89, 0x91, 0x54, 0x42, 0x29, 0x49, 0xe4, 0x6e, 0xe3, 0xe2, 0x23, 0xb4, 0xca, 0xa0, 0xa1, 0x66, 0xf0, 0xcd, 0xb0, 0xe2, 0x7c, 0xe}, }, { {0xf9, 0x70, 0x4b, 0xd9, 0xdf, 0xfe, 0xa6, 0xfe, 0x2d, 0xba, 0xfc, 0xc1, 0x51, 0xc0, 0x30, 0xf1, 0x89, 0xab, 0x2f, 0x7f, 0x7e, 0xd4, 0x82, 0x48, 0xb5, 0xee, 0xec, 0x8a, 0x13, 0x56, 0x52, 0x61}, {0xa3, 0x85, 0x8c, 0xc4, 0x3a, 0x64, 0x94, 0xc4, 0xad, 0x39, 0x61, 0x3c, 0xf4, 0x1d, 0x36, 0xfd, 0x48, 0x4d, 0xe9, 0x3a, 0xdd, 0x17, 0xdb, 0x9, 0x4a, 0x67, 0xb4, 0x8f, 0x5d, 0xa, 0x6e, 0x66}, {0xd, 0xcb, 0x70, 0x48, 0x4e, 0xf6, 0xbb, 0x2a, 0x6b, 0x8b, 0x45, 0xaa, 0xf0, 0xbc, 0x65, 0xcd, 0x5d, 0x98, 0xe8, 0x75, 0xba, 0x4e, 0xbe, 0x9a, 0xe4, 0xde, 0x14, 0xd5, 0x10, 0xc8, 0xb, 0x7f}, }, { {0xa0, 0x13, 0x72, 0x73, 0xad, 0x9d, 0xac, 0x83, 0x98, 0x2e, 0xf7, 0x2e, 0xba, 0xf8, 0xf6, 0x9f, 0x57, 0x69, 0xec, 0x43, 0xdd, 0x2e, 0x1e, 0x31, 0x75, 0xab, 0xc5, 0xde, 0x7d, 0x90, 0x3a, 0x1d}, {0x6f, 0x13, 0xf4, 0x26, 0xa4, 0x6b, 0x0, 0xb9, 0x35, 0x30, 0xe0, 0x57, 0x9e, 0x36, 0x67, 0x8d, 0x28, 0x3c, 0x46, 0x4f, 0xd9, 0xdf, 0xc8, 0xcb, 0xf5, 0xdb, 0xee, 0xf8, 0xbc, 0x8d, 0x1f, 0xd}, {0xdc, 0x81, 0xd0, 0x3e, 0x31, 0x93, 0x16, 0xba, 0x80, 0x34, 0x1b, 0x85, 0xad, 0x9f, 0x32, 0x29, 0xcb, 0x21, 0x3, 0x3, 0x3c, 0x1, 0x28, 0x1, 0xe3, 0xfd, 0x1b, 0xa3, 0x44, 0x1b, 0x1, 0x0}, }, { {0x5c, 0xa7, 0xa, 0x6a, 0x69, 0x1f, 0x56, 0x16, 0x6a, 0xbd, 0x52, 0x58, 0x5c, 0x72, 0xbf, 0xc1, 0xad, 0x66, 0x79, 0x9a, 0x7f, 0xdd, 0xa8, 0x11, 0x26, 0x10, 0x85, 0xd2, 0xa2, 0x88, 0xd9, 0x63}, {0xc, 0x6c, 0xc6, 0x3f, 0x6c, 0xa0, 0xdf, 0x3f, 0xd2, 0xd, 0xd6, 0x4d, 0x8e, 0xe3, 0x40, 0x5d, 0x71, 0x4d, 0x8e, 0x26, 0x38, 0x8b, 0xe3, 0x7a, 0xe1, 0x57, 0x83, 0x6e, 0x91, 0x8d, 0xc4, 0x3a}, {0x2e, 0x23, 0xbd, 0xaf, 0x53, 0x7, 0x12, 0x0, 0x83, 0xf6, 0xd8, 0xfd, 0xb8, 0xce, 0x2b, 0xe9, 0x91, 0x2b, 0xe7, 0x84, 0xb3, 0x69, 0x16, 0xf8, 0x66, 0xa0, 0x68, 0x23, 0x2b, 0xd5, 0xfa, 0x33}, }, { {0xe8, 0xcf, 0x22, 0xc4, 0xd0, 0xc8, 0x2c, 0x8d, 0xcb, 0x3a, 0xa1, 0x5, 0x7b, 0x4f, 0x2b, 0x7, 0x6f, 0xa5, 0xf6, 0xec, 0xe6, 0xb6, 0xfe, 0xa3, 0xe2, 0x71, 0xa, 0xb9, 0xcc, 0x55, 0xc3, 0x3c}, {0x16, 0x1e, 0xe4, 0xc5, 0xc6, 0x49, 0x6, 0x54, 0x35, 0x77, 0x3f, 0x33, 0x30, 0x64, 0xf8, 0xa, 0x46, 0xe7, 0x5, 0xf3, 0xd2, 0xfc, 0xac, 0xb2, 0xa7, 0xdc, 0x56, 0xa2, 0x29, 0xf4, 0xc0, 0x16}, {0x31, 0x91, 0x3e, 0x90, 0x43, 0x94, 0xb6, 0xe9, 0xce, 0x37, 0x56, 0x7a, 0xcb, 0x94, 0xa4, 0xb8, 0x44, 0x92, 0xba, 0xba, 0xa4, 0xd1, 0x7c, 0xc8, 0x68, 0x75, 0xae, 0x6b, 0x42, 0xaf, 0x1e, 0x63}, }, { {0xe8, 0xd, 0x70, 0xa3, 0xb9, 0x75, 0xd9, 0x47, 0x52, 0x5, 0xf8, 0xe2, 0xfb, 0xc5, 0x80, 0x72, 0xe1, 0x5d, 0xe4, 0x32, 0x27, 0x8f, 0x65, 0x53, 0xb5, 0x80, 0x5f, 0x66, 0x7f, 0x2c, 0x1f, 0x43}, {0x9f, 0xfe, 0x66, 0xda, 0x10, 0x4, 0xe9, 0xb3, 0xa6, 0xe5, 0x16, 0x6c, 0x52, 0x4b, 0xdd, 0x85, 0x83, 0xbf, 0xf9, 0x1e, 0x61, 0x97, 0x3d, 0xbc, 0xb5, 0x19, 0xa9, 0x1e, 0x8b, 0x64, 0x99, 0x55}, {0x19, 0x7b, 0x8f, 0x85, 0x44, 0x63, 0x2, 0xd6, 0x4a, 0x51, 0xea, 0xa1, 0x2f, 0x35, 0xab, 0x14, 0xd7, 0xa9, 0x90, 0x20, 0x1a, 0x44, 0x0, 0x89, 0x26, 0x3b, 0x25, 0x91, 0x5f, 0x71, 0x4, 0x7b}, }, { {0xc6, 0xba, 0xe6, 0xc4, 0x80, 0xc2, 0x76, 0xb3, 0xb, 0x9b, 0x1d, 0x6d, 0xdd, 0xd3, 0xe, 0x97, 0x44, 0xf9, 0xb, 0x45, 0x58, 0x95, 0x9a, 0xb0, 0x23, 0xe2, 0xcd, 0x57, 0xfa, 0xac, 0xd0, 0x48}, {0x43, 0xae, 0xf6, 0xac, 0x28, 0xbd, 0xed, 0x83, 0xb4, 0x7a, 0x5c, 0x7d, 0x8b, 0x7c, 0x35, 0x86, 0x44, 0x2c, 0xeb, 0xb7, 0x69, 0x47, 0x40, 0xc0, 0x3f, 0x58, 0xf6, 0xc2, 0xf5, 0x7b, 0xb3, 0x59}, {0x71, 0xe6, 0xab, 0x7d, 0xe4, 0x26, 0xf, 0xb6, 0x37, 0x3a, 0x2f, 0x62, 0x97, 0xa1, 0xd1, 0xf1, 0x94, 0x3, 0x96, 0xe9, 0x7e, 0xce, 0x8, 0x42, 0xdb, 0x3b, 0x6d, 0x33, 0x91, 0x41, 0x23, 0x16}, }, }, { { {0x40, 0x86, 0xf3, 0x1f, 0xd6, 0x9c, 0x49, 0xdd, 0xa0, 0x25, 0x36, 0x6, 0xc3, 0x9b, 0xcd, 0x29, 0xc3, 0x3d, 0xd7, 0x3d, 0x2, 0xd8, 0xe2, 0x51, 0x31, 0x92, 0x3b, 0x20, 0x7a, 0x70, 0x25, 0x4a}, {0xf6, 0x7f, 0x26, 0xf6, 0xde, 0x99, 0xe4, 0xb9, 0x43, 0x8, 0x2c, 0x74, 0x7b, 0xca, 0x72, 0x77, 0xb1, 0xf2, 0xa4, 0xe9, 0x3f, 0x15, 0xa0, 0x23, 0x6, 0x50, 0xd0, 0xd5, 0xec, 0xdf, 0xdf, 0x2c}, {0x6a, 0xed, 0xf6, 0x53, 0x8a, 0x66, 0xb7, 0x2a, 0xa1, 0x70, 0xd1, 0x1d, 0x58, 0x42, 0x42, 0x30, 0x61, 0x1, 0xe2, 0x3a, 0x4c, 0x14, 0x0, 0x40, 0xfc, 0x49, 0x8e, 0x24, 0x6d, 0x89, 0x21, 0x57}, }, { {0x4e, 0xda, 0xd0, 0xa1, 0x91, 0x50, 0x5d, 0x28, 0x8, 0x3e, 0xfe, 0xb5, 0xa7, 0x6f, 0xaa, 0x4b, 0xb3, 0x93, 0x93, 0xe1, 0x7c, 0x17, 0xe5, 0x63, 0xfd, 0x30, 0xb0, 0xc4, 0xaf, 0x35, 0xc9, 0x3}, {0xae, 0x1b, 0x18, 0xfd, 0x17, 0x55, 0x6e, 0xb, 0xb4, 0x63, 0xb9, 0x2b, 0x9f, 0x62, 0x22, 0x90, 0x25, 0x46, 0x6, 0x32, 0xe9, 0xbc, 0x9, 0x55, 0xda, 0x13, 0x3c, 0xf6, 0x74, 0xdd, 0x8e, 0x57}, {0x3d, 0xc, 0x2b, 0x49, 0xc6, 0x76, 0x72, 0x99, 0xfc, 0x5, 0xe2, 0xdf, 0xc4, 0xc2, 0xcc, 0x47, 0x3c, 0x3a, 0x62, 0xdd, 0x84, 0x9b, 0xd2, 0xdc, 0xa2, 0xc7, 0x88, 0x2, 0x59, 0xab, 0xc2, 0x3e}, }, { {0xcb, 0xd1, 0x32, 0xae, 0x9, 0x3a, 0x21, 0xa7, 0xd5, 0xc2, 0xf5, 0x40, 0xdf, 0x87, 0x2b, 0xf, 0x29, 0xab, 0x1e, 0xe8, 0xc6, 0xa4, 0xae, 0xb, 0x5e, 0xac, 0xdb, 0x6a, 0x6c, 0xf6, 0x1b, 0xe}, {0xb9, 0x7b, 0xd8, 0xe4, 0x7b, 0xd2, 0xa0, 0xa1, 0xed, 0x1a, 0x39, 0x61, 0xeb, 0x4d, 0x8b, 0xa9, 0x83, 0x9b, 0xcb, 0x73, 0xd0, 0xdd, 0xa0, 0x99, 0xce, 0xca, 0xf, 0x20, 0x5a, 0xc2, 0xd5, 0x2d}, {0x7e, 0x88, 0x2c, 0x79, 0xe9, 0xd5, 0xab, 0xe2, 0x5d, 0x6d, 0x92, 0xcb, 0x18, 0x0, 0x2, 0x1a, 0x1e, 0x5f, 0xae, 0xba, 0xcd, 0x69, 0xba, 0xbf, 0x5f, 0x8f, 0xe8, 0x5a, 0xb3, 0x48, 0x5, 0x73}, }, { {0x34, 0xe3, 0xd6, 0xa1, 0x4b, 0x9, 0x5b, 0x80, 0x19, 0x3f, 0x35, 0x9, 0x77, 0xf1, 0x3e, 0xbf, 0x2b, 0x70, 0x22, 0x6, 0xcb, 0x6, 0x3f, 0x42, 0xdd, 0x45, 0x78, 0xd8, 0x77, 0x22, 0x5a, 0x58}, {0xee, 0xb8, 0xa8, 0xcb, 0xa3, 0x51, 0x35, 0xc4, 0x16, 0x5f, 0x11, 0xb2, 0x1d, 0x6f, 0xa2, 0x65, 0x50, 0x38, 0x8c, 0xab, 0x52, 0x4f, 0xf, 0x76, 0xca, 0xb8, 0x1d, 0x41, 0x3b, 0x44, 0x43, 0x30}, {0x62, 0x89, 0xd4, 0x33, 0x82, 0x5f, 0x8a, 0xa1, 0x7f, 0x25, 0x78, 0xec, 0xb5, 0xc4, 0x98, 0x66, 0xff, 0x41, 0x3e, 0x37, 0xa5, 0x6f, 0x8e, 0xa7, 0x1f, 0x98, 0xef, 0x50, 0x89, 0x27, 0x56, 0x76}, }, { {0x9d, 0xcf, 0x86, 0xea, 0xa3, 0x73, 0x70, 0xe1, 0xdc, 0x5f, 0x15, 0x7, 0xb7, 0xfb, 0x8c, 0x3a, 0x8e, 0x8a, 0x83, 0x31, 0xfc, 0xe7, 0x53, 0x48, 0x16, 0xf6, 0x13, 0xb6, 0x84, 0xf4, 0xbb, 0x28}, {0xc0, 0xc8, 0x1f, 0xd5, 0x59, 0xcf, 0xc3, 0x38, 0xf2, 0xb6, 0x6, 0x5, 0xfd, 0xd2, 0xed, 0x9b, 0x8f, 0xe, 0x57, 0xab, 0x9f, 0x10, 0xbf, 0x26, 0xa6, 0x46, 0xb8, 0xc1, 0xa8, 0x60, 0x41, 0x3f}, {0x7c, 0x6c, 0x13, 0x6f, 0x5c, 0x2f, 0x61, 0xf2, 0xbe, 0x11, 0xdd, 0xf6, 0x7, 0xd1, 0xea, 0xaf, 0x33, 0x6f, 0xde, 0x13, 0xd2, 0x9a, 0x7e, 0x52, 0x5d, 0xf7, 0x88, 0x81, 0x35, 0xcb, 0x79, 0x1e}, }, { {0x81, 0x81, 0xe0, 0xf5, 0xd8, 0x53, 0xe9, 0x77, 0xd9, 0xde, 0x9d, 0x29, 0x44, 0xc, 0xa5, 0x84, 0xe5, 0x25, 0x45, 0x86, 0xc, 0x2d, 0x6c, 0xdc, 0xf4, 0xf2, 0xd1, 0x39, 0x2d, 0xb5, 0x8a, 0x47}, {0xf1, 0xe3, 0xf7, 0xee, 0xc3, 0x36, 0x34, 0x1, 0xf8, 0x10, 0x9e, 0xfe, 0x7f, 0x6a, 0x8b, 0x82, 0xfc, 0xde, 0xf9, 0xbc, 0xe5, 0x8, 0xf9, 0x7f, 0x31, 0x38, 0x3b, 0x3a, 0x1b, 0x95, 0xd7, 0x65}, {0x59, 0xd1, 0x52, 0x92, 0xd3, 0xa4, 0xa6, 0x66, 0x7, 0xc8, 0x1a, 0x87, 0xbc, 0xe1, 0xdd, 0xe5, 0x6f, 0xc9, 0xc1, 0xa6, 0x40, 0x6b, 0x2c, 0xb8, 0x14, 0x22, 0x21, 0x1a, 0x41, 0x7a, 0xd8, 0x16}, }, { {0x83, 0x5, 0x4e, 0xd5, 0xe2, 0xd5, 0xa4, 0xfb, 0xfa, 0x99, 0xbd, 0x2e, 0xd7, 0xaf, 0x1f, 0xe2, 0x8f, 0x77, 0xe9, 0x6e, 0x73, 0xc2, 0x7a, 0x49, 0xde, 0x6d, 0x5a, 0x7a, 0x57, 0xb, 0x99, 0x1f}, {0x15, 0x62, 0x6, 0x42, 0x5a, 0x7e, 0xbd, 0xb3, 0xc1, 0x24, 0x5a, 0xc, 0xcd, 0xe3, 0x9b, 0x87, 0xb7, 0x94, 0xf9, 0xd6, 0xb1, 0x5d, 0xc0, 0x57, 0xa6, 0x8c, 0xf3, 0x65, 0x81, 0x7c, 0xf8, 0x28}, {0xd6, 0xf7, 0xe8, 0x1b, 0xad, 0x4e, 0x34, 0xa3, 0x8f, 0x79, 0xea, 0xac, 0xeb, 0x50, 0x1e, 0x7d, 0x52, 0xe0, 0xd, 0x52, 0x9e, 0x56, 0xc6, 0x77, 0x3e, 0x6d, 0x4d, 0x53, 0xe1, 0x2f, 0x88, 0x45}, }, { {0xe4, 0x6f, 0x3c, 0x94, 0x29, 0x99, 0xac, 0xd8, 0xa2, 0x92, 0x83, 0xa3, 0x61, 0xf1, 0xf9, 0xb5, 0xf3, 0x9a, 0xc8, 0xbe, 0x13, 0xdb, 0x99, 0x26, 0x74, 0xf0, 0x5, 0xe4, 0x3c, 0x84, 0xcf, 0x7d}, {0xd6, 0x83, 0x79, 0x75, 0x5d, 0x34, 0x69, 0x66, 0xa6, 0x11, 0xaa, 0x17, 0x11, 0xed, 0xb6, 0x62, 0x8f, 0x12, 0x5e, 0x98, 0x57, 0x18, 0xdd, 0x7d, 0xdd, 0xf6, 0x26, 0xf6, 0xb8, 0xe5, 0x8f, 0x68}, {0xc0, 0x32, 0x47, 0x4a, 0x48, 0xd6, 0x90, 0x6c, 0x99, 0x32, 0x56, 0xca, 0xfd, 0x43, 0x21, 0xd5, 0xe1, 0xc6, 0x5d, 0x91, 0xc3, 0x28, 0xbe, 0xb3, 0x1b, 0x19, 0x27, 0x73, 0x7e, 0x68, 0x39, 0x67}, }, }, { { {0xc0, 0x1a, 0xc, 0xc8, 0x9d, 0xcc, 0x6d, 0xa6, 0x36, 0xa4, 0x38, 0x1b, 0xf4, 0x5c, 0xa0, 0x97, 0xc6, 0xd7, 0xdb, 0x95, 0xbe, 0xf3, 0xeb, 0xa7, 0xab, 0x7d, 0x7e, 0x8d, 0xf6, 0xb8, 0xa0, 0x7d}, {0xa6, 0x75, 0x56, 0x38, 0x14, 0x20, 0x78, 0xef, 0xe8, 0xa9, 0xfd, 0xaa, 0x30, 0x9f, 0x64, 0xa2, 0xcb, 0xa8, 0xdf, 0x5c, 0x50, 0xeb, 0xd1, 0x4c, 0xb3, 0xc0, 0x4d, 0x1d, 0xba, 0x5a, 0x11, 0x46}, {0x76, 0xda, 0xb5, 0xc3, 0x53, 0x19, 0xf, 0xd4, 0x9b, 0x9e, 0x11, 0x21, 0x73, 0x6f, 0xac, 0x1d, 0x60, 0x59, 0xb2, 0xfe, 0x21, 0x60, 0xcc, 0x3, 0x4b, 0x4b, 0x67, 0x83, 0x7e, 0x88, 0x5f, 0x5a}, }, { {0xb9, 0x43, 0xa6, 0xa0, 0xd3, 0x28, 0x96, 0x9e, 0x64, 0x20, 0xc3, 0xe6, 0x0, 0xcb, 0xc3, 0xb5, 0x32, 0xec, 0x2d, 0x7c, 0x89, 0x2, 0x53, 0x9b, 0xc, 0xc7, 0xd1, 0xd5, 0xe2, 0x7a, 0xe3, 0x43}, {0x11, 0x3d, 0xa1, 0x70, 0xcf, 0x1, 0x63, 0x8f, 0xc4, 0xd0, 0xd, 0x35, 0x15, 0xb8, 0xce, 0xcf, 0x7e, 0xa4, 0xbc, 0xa4, 0xd4, 0x97, 0x2, 0xf7, 0x34, 0x14, 0x4d, 0xe4, 0x56, 0xb6, 0x69, 0x36}, {0x33, 0xe1, 0xa6, 0xed, 0x6, 0x3f, 0x7e, 0x38, 0xc0, 0x3a, 0xa1, 0x99, 0x51, 0x1d, 0x30, 0x67, 0x11, 0x38, 0x26, 0x36, 0xf8, 0xd8, 0x5a, 0xbd, 0xbe, 0xe9, 0xd5, 0x4f, 0xcd, 0xe6, 0x21, 0x6a}, }, { {0xe3, 0xb2, 0x99, 0x66, 0x12, 0x29, 0x41, 0xef, 0x1, 0x13, 0x8d, 0x70, 0x47, 0x8, 0xd3, 0x71, 0xbd, 0xb0, 0x82, 0x11, 0xd0, 0x32, 0x54, 0x32, 0x36, 0x8b, 0x1e, 0x0, 0x7, 0x1b, 0x37, 0x45}, {0x5f, 0xe6, 0x46, 0x30, 0xa, 0x17, 0xc6, 0xf1, 0x24, 0x35, 0xd2, 0x0, 0x2a, 0x2a, 0x71, 0x58, 0x55, 0xb7, 0x82, 0x8c, 0x3c, 0xbd, 0xdb, 0x69, 0x57, 0xff, 0x95, 0xa1, 0xf1, 0xf9, 0x6b, 0x58}, {0xb, 0x79, 0xf8, 0x5e, 0x8d, 0x8, 0xdb, 0xa6, 0xe5, 0x37, 0x9, 0x61, 0xdc, 0xf0, 0x78, 0x52, 0xb8, 0x6e, 0xa1, 0x61, 0xd2, 0x49, 0x3, 0xac, 0x79, 0x21, 0xe5, 0x90, 0x37, 0xb0, 0xaf, 0xe}, }, { {0x1d, 0xae, 0x75, 0xf, 0x5e, 0x80, 0x40, 0x51, 0x30, 0xcc, 0x62, 0x26, 0xe3, 0xfb, 0x2, 0xec, 0x6d, 0x39, 0x92, 0xea, 0x1e, 0xdf, 0xeb, 0x2c, 0xb3, 0x5b, 0x43, 0xc5, 0x44, 0x33, 0xae, 0x44}, {0x2f, 0x4, 0x48, 0x37, 0xc1, 0x55, 0x5, 0x96, 0x11, 0xaa, 0xb, 0x82, 0xe6, 0x41, 0x9a, 0x21, 0xc, 0x6d, 0x48, 0x73, 0x38, 0xf7, 0x81, 0x1c, 0x61, 0xc6, 0x2, 0x5a, 0x67, 0xcc, 0x9a, 0x30}, {0xee, 0x43, 0xa5, 0xbb, 0xb9, 0x89, 0xf2, 0x9c, 0x42, 0x71, 0xc9, 0x5a, 0x9d, 0xe, 0x76, 0xf3, 0xaa, 0x60, 0x93, 0x4f, 0xc6, 0xe5, 0x82, 0x1d, 0x8f, 0x67, 0x94, 0x7f, 0x1b, 0x22, 0xd5, 0x62}, }, { {0x3c, 0x7a, 0xf7, 0x3a, 0x26, 0xd4, 0x85, 0x75, 0x4d, 0x14, 0xe9, 0xfe, 0x11, 0x7b, 0xae, 0xdf, 0x3d, 0x19, 0xf7, 0x59, 0x80, 0x70, 0x6, 0xa5, 0x37, 0x20, 0x92, 0x83, 0x53, 0x9a, 0xf2, 0x14}, {0x6d, 0x93, 0xd0, 0x18, 0x9c, 0x29, 0x4c, 0x52, 0xc, 0x1a, 0xc, 0x8a, 0x6c, 0xb5, 0x6b, 0xc8, 0x31, 0x86, 0x4a, 0xdb, 0x2e, 0x5, 0x75, 0xa3, 0x62, 0x45, 0x75, 0xbc, 0xe4, 0xfd, 0xe, 0x5c}, {0xf5, 0xd7, 0xb2, 0x25, 0xdc, 0x7e, 0x71, 0xdf, 0x40, 0x30, 0xb5, 0x99, 0xdb, 0x70, 0xf9, 0x21, 0x62, 0x4c, 0xed, 0xc3, 0xb7, 0x34, 0x92, 0xda, 0x3e, 0x9, 0xee, 0x7b, 0x5c, 0x36, 0x72, 0x5e}, }, { {0x3e, 0xb3, 0x8, 0x2f, 0x6, 0x39, 0x93, 0x7d, 0xbe, 0x32, 0x9f, 0xdf, 0xe5, 0x59, 0x96, 0x5b, 0xfd, 0xbd, 0x9e, 0x1f, 0xad, 0x3d, 0xff, 0xac, 0xb7, 0x49, 0x73, 0xcb, 0x55, 0x5, 0xb2, 0x70}, {0x7f, 0x21, 0x71, 0x45, 0x7, 0xfc, 0x5b, 0x57, 0x5b, 0xd9, 0x94, 0x6, 0x5d, 0x67, 0x79, 0x37, 0x33, 0x1e, 0x19, 0xf4, 0xbb, 0x37, 0xa, 0x9a, 0xbc, 0xea, 0xb4, 0x47, 0x4c, 0x10, 0xf1, 0x77}, {0x4c, 0x2c, 0x11, 0x55, 0xc5, 0x13, 0x51, 0xbe, 0xcd, 0x1f, 0x88, 0x9a, 0x3a, 0x42, 0x88, 0x66, 0x47, 0x3b, 0x50, 0x5e, 0x85, 0x77, 0x66, 0x44, 0x4a, 0x40, 0x6, 0x4a, 0x8f, 0x39, 0x34, 0xe}, }, { {0x28, 0x19, 0x4b, 0x3e, 0x9, 0xb, 0x93, 0x18, 0x40, 0xf6, 0xf3, 0x73, 0xe, 0xe1, 0xe3, 0x7d, 0x6f, 0x5d, 0x39, 0x73, 0xda, 0x17, 0x32, 0xf4, 0x3e, 0x9c, 0x37, 0xca, 0xd6, 0xde, 0x8a, 0x6f}, {0xe8, 0xbd, 0xce, 0x3e, 0xd9, 0x22, 0x7d, 0xb6, 0x7, 0x2f, 0x82, 0x27, 0x41, 0xe8, 0xb3, 0x9, 0x8d, 0x6d, 0x5b, 0xb0, 0x1f, 0xa6, 0x3f, 0x74, 0x72, 0x23, 0x36, 0x8a, 0x36, 0x5, 0x54, 0x5e}, {0x9a, 0xb2, 0xb7, 0xfd, 0x3d, 0x12, 0x40, 0xe3, 0x91, 0xb2, 0x1a, 0xa2, 0xe1, 0x97, 0x7b, 0x48, 0x9e, 0x94, 0xe6, 0xfd, 0x2, 0x7d, 0x96, 0xf9, 0x97, 0xde, 0xd3, 0xc8, 0x2e, 0xe7, 0xd, 0x78}, }, { {0x72, 0x27, 0xf4, 0x0, 0xf3, 0xea, 0x1f, 0x67, 0xaa, 0x41, 0x8c, 0x2a, 0x2a, 0xeb, 0x72, 0x8f, 0x92, 0x32, 0x37, 0x97, 0xd7, 0x7f, 0xa1, 0x29, 0xa6, 0x87, 0xb5, 0x32, 0xad, 0xc6, 0xef, 0x1d}, {0xbc, 0xe7, 0x9a, 0x8, 0x45, 0x85, 0xe2, 0xa, 0x6, 0x4d, 0x7f, 0x1c, 0xcf, 0xde, 0x8d, 0x38, 0xb8, 0x11, 0x48, 0xa, 0x51, 0x15, 0xac, 0x38, 0xe4, 0x8c, 0x92, 0x71, 0xf6, 0x8b, 0xb2, 0xe}, {0xa7, 0x95, 0x51, 0xef, 0x1a, 0xbe, 0x5b, 0xaf, 0xed, 0x15, 0x7b, 0x91, 0x77, 0x12, 0x8c, 0x14, 0x2e, 0xda, 0xe5, 0x7a, 0xfb, 0xf7, 0x91, 0x29, 0x67, 0x28, 0xdd, 0xf8, 0x1b, 0x20, 0x7d, 0x46}, }, }, { { {0xa9, 0xe7, 0x7a, 0x56, 0xbd, 0xf4, 0x1e, 0xbc, 0xbd, 0x98, 0x44, 0xd6, 0xb2, 0x4c, 0x62, 0x3f, 0xc8, 0x4e, 0x1f, 0x2c, 0xd2, 0x64, 0x10, 0xe4, 0x1, 0x40, 0x38, 0xba, 0xa5, 0xc5, 0xf9, 0x2e}, {0xad, 0x4f, 0xef, 0x74, 0x9a, 0x91, 0xfe, 0x95, 0xa2, 0x8, 0xa3, 0xf6, 0xec, 0x7b, 0x82, 0x3a, 0x1, 0x7b, 0xa4, 0x9, 0xd3, 0x1, 0x4e, 0x96, 0x97, 0xc7, 0xa3, 0x5b, 0x4f, 0x3c, 0xc4, 0x71}, {0xcd, 0x74, 0x9e, 0xfa, 0xf6, 0x6d, 0xfd, 0xb6, 0x7a, 0x26, 0xaf, 0xe4, 0xbc, 0x78, 0x82, 0xf1, 0xe, 0x99, 0xef, 0xf1, 0xd0, 0xb3, 0x55, 0x82, 0x93, 0xf2, 0xc5, 0x90, 0xa3, 0x8c, 0x75, 0x5a}, }, { {0x94, 0xdc, 0x61, 0x1d, 0x8b, 0x91, 0xe0, 0x8c, 0x66, 0x30, 0x81, 0x9a, 0x46, 0x36, 0xed, 0x8d, 0xd3, 0xaa, 0xe8, 0xaf, 0x29, 0xa8, 0xe6, 0xd4, 0x3f, 0xd4, 0x39, 0xf6, 0x27, 0x80, 0x73, 0xa}, {0x95, 0x24, 0x46, 0xd9, 0x10, 0x27, 0xb7, 0xa2, 0x3, 0x50, 0x7d, 0xd5, 0xd2, 0xc6, 0xa8, 0x3a, 0xca, 0x87, 0xb4, 0xa0, 0xbf, 0x0, 0xd4, 0xe3, 0xec, 0x72, 0xeb, 0xb3, 0x44, 0xe2, 0xba, 0x2d}, {0xcc, 0xe1, 0xff, 0x57, 0x2f, 0x4a, 0xf, 0x98, 0x43, 0x98, 0x83, 0xe1, 0xd, 0xd, 0x67, 0x0, 0xfd, 0x15, 0xfb, 0x49, 0x4a, 0x3f, 0x5c, 0x10, 0x9c, 0xa6, 0x26, 0x51, 0x63, 0xca, 0x98, 0x26}, }, { {0xe, 0xd9, 0x3d, 0x5e, 0x2f, 0x70, 0x3d, 0x2e, 0x86, 0x53, 0xd2, 0xe4, 0x18, 0x9, 0x3f, 0x9e, 0x6a, 0xa9, 0x4d, 0x2, 0xf6, 0x3e, 0x77, 0x5e, 0x32, 0x33, 0xfa, 0x4a, 0xc, 0x4b, 0x0, 0x3c}, {0x78, 0xba, 0xb0, 0x32, 0x88, 0x31, 0x65, 0xe7, 0x8b, 0xff, 0x5c, 0x92, 0xf7, 0x31, 0x18, 0x38, 0xcc, 0x1f, 0x29, 0xa0, 0x91, 0x1b, 0xa8, 0x8, 0x7, 0xeb, 0xca, 0x49, 0xcc, 0x3d, 0xb4, 0x1f}, {0x2b, 0xb8, 0xf4, 0x6, 0xac, 0x46, 0xa9, 0x9a, 0xf3, 0xc4, 0x6, 0xa8, 0xa5, 0x84, 0xa2, 0x1c, 0x87, 0x47, 0xcd, 0xc6, 0x5f, 0x26, 0xd3, 0x3e, 0x17, 0xd2, 0x1f, 0xcd, 0x1, 0xfd, 0x43, 0x6b}, }, { {0xf3, 0xe, 0x76, 0x3e, 0x58, 0x42, 0xc7, 0xb5, 0x90, 0xb9, 0xa, 0xee, 0xb9, 0x52, 0xdc, 0x75, 0x3f, 0x92, 0x2b, 0x7, 0xc2, 0x27, 0x14, 0xbf, 0xf0, 0xd9, 0xf0, 0x6f, 0x2d, 0xb, 0x42, 0x73}, {0x44, 0xc5, 0x97, 0x46, 0x4b, 0x5d, 0xa7, 0xc7, 0xbf, 0xff, 0xf, 0xdf, 0x48, 0xf8, 0xfd, 0x15, 0x5a, 0x78, 0x46, 0xaa, 0xeb, 0xb9, 0x68, 0x28, 0x14, 0xf7, 0x52, 0x5b, 0x10, 0xd7, 0x68, 0x5a}, {0x6, 0x1e, 0x85, 0x9e, 0xcb, 0xf6, 0x2c, 0xaf, 0xc4, 0x38, 0x22, 0xc6, 0x13, 0x39, 0x59, 0x8f, 0x73, 0xf3, 0xfb, 0x99, 0x96, 0xb8, 0x8a, 0xda, 0x9e, 0xbc, 0x34, 0xea, 0x2f, 0x63, 0xb5, 0x3d}, }, { {0xd5, 0x25, 0x98, 0x82, 0xb1, 0x90, 0x49, 0x2e, 0x91, 0x89, 0x9a, 0x3e, 0x87, 0xeb, 0xea, 0xed, 0xf8, 0x4a, 0x70, 0x4c, 0x39, 0x3d, 0xf0, 0xee, 0xe, 0x2b, 0xdf, 0x95, 0xa4, 0x7e, 0x19, 0x59}, {0xd8, 0xd9, 0x5d, 0xf7, 0x2b, 0xee, 0x6e, 0xf4, 0xa5, 0x59, 0x67, 0x39, 0xf6, 0xb1, 0x17, 0xd, 0x73, 0x72, 0x9e, 0x49, 0x31, 0xd1, 0xf2, 0x1b, 0x13, 0x5f, 0xd7, 0x49, 0xdf, 0x1a, 0x32, 0x4}, {0xae, 0x5a, 0xe5, 0xe4, 0x19, 0x60, 0xe1, 0x4, 0xe9, 0x92, 0x2f, 0x7e, 0x7a, 0x43, 0x7b, 0xe7, 0xa4, 0x9a, 0x15, 0x6f, 0xc1, 0x2d, 0xce, 0xc7, 0xc0, 0xc, 0xd7, 0xf4, 0xc1, 0xfd, 0xea, 0x45}, }, { {0xed, 0xb1, 0xcc, 0xcf, 0x24, 0x46, 0xe, 0xb6, 0x95, 0x3, 0x5c, 0xbd, 0x92, 0xc2, 0xdb, 0x59, 0xc9, 0x81, 0x4, 0xdc, 0x1d, 0x9d, 0xa0, 0x31, 0x40, 0xd9, 0x56, 0x5d, 0xea, 0xce, 0x73, 0x3f}, {0x2b, 0xd7, 0x45, 0x80, 0x85, 0x1, 0x84, 0x69, 0x51, 0x6, 0x2f, 0xcf, 0xa2, 0xfa, 0x22, 0x4c, 0xc6, 0x2d, 0x22, 0x6b, 0x65, 0x36, 0x1a, 0x94, 0xde, 0xda, 0x62, 0x3, 0xc8, 0xeb, 0x5e, 0x5a}, {0xc6, 0x8d, 0x4e, 0xa, 0xd1, 0xbf, 0xa7, 0xb7, 0x39, 0xb3, 0xc9, 0x44, 0x7e, 0x0, 0x57, 0xbe, 0xfa, 0xae, 0x57, 0x15, 0x7f, 0x20, 0xc1, 0x60, 0xdb, 0x18, 0x62, 0x26, 0x91, 0x88, 0x5, 0x26}, }, { {0x42, 0xe5, 0x76, 0xc6, 0x3c, 0x8e, 0x81, 0x4c, 0xad, 0xcc, 0xce, 0x3, 0x93, 0x2c, 0x42, 0x5e, 0x8, 0x9f, 0x12, 0xb4, 0xca, 0xcc, 0x7, 0xec, 0xb8, 0x43, 0x44, 0xb2, 0x10, 0xfa, 0xed, 0xd}, {0x4, 0xff, 0x60, 0x83, 0xa6, 0x4, 0xf7, 0x59, 0xf4, 0xe6, 0x61, 0x76, 0xde, 0x3f, 0xd9, 0xc3, 0x51, 0x35, 0x87, 0x12, 0x73, 0x2a, 0x1b, 0x83, 0x57, 0x5d, 0x61, 0x4e, 0x2e, 0xc, 0xad, 0x54}, {0x2a, 0x52, 0x2b, 0xb8, 0xd5, 0x67, 0x3b, 0xee, 0xeb, 0xc1, 0xa5, 0x9f, 0x46, 0x63, 0xf1, 0x36, 0xd3, 0x9f, 0xc1, 0x6e, 0xf2, 0xd2, 0xb4, 0xa5, 0x8, 0x94, 0x7a, 0xa7, 0xba, 0xb2, 0xec, 0x62}, }, { {0x74, 0x28, 0xb6, 0xaf, 0x36, 0x28, 0x7, 0x92, 0xa5, 0x4, 0xe1, 0x79, 0x85, 0x5e, 0xcd, 0x5f, 0x4a, 0xa1, 0x30, 0xc6, 0xad, 0x1, 0xad, 0x5a, 0x98, 0x3f, 0x66, 0x75, 0x50, 0x3d, 0x91, 0x61}, {0x3d, 0x2b, 0x15, 0x61, 0x52, 0x79, 0xed, 0xe5, 0xd1, 0xd7, 0xdd, 0xe, 0x7d, 0x35, 0x62, 0x49, 0x71, 0x4c, 0x6b, 0xb9, 0xd0, 0xc8, 0x82, 0x74, 0xbe, 0xd8, 0x66, 0xa9, 0x19, 0xf9, 0x59, 0x2e}, {0xda, 0x31, 0x32, 0x1a, 0x36, 0x2d, 0xc6, 0xd, 0x70, 0x2, 0x20, 0x94, 0x32, 0x58, 0x47, 0xfa, 0xce, 0x94, 0x95, 0x3f, 0x51, 0x1, 0xd8, 0x2, 0x5c, 0x5d, 0xc0, 0x31, 0xa1, 0xc2, 0xdb, 0x3d}, }, }, { { {0x14, 0xbb, 0x96, 0x27, 0xa2, 0x57, 0xaa, 0xf3, 0x21, 0xda, 0x7, 0x9b, 0xb7, 0xba, 0x3a, 0x88, 0x1c, 0x39, 0xa0, 0x31, 0x18, 0xe2, 0x4b, 0xe5, 0xf9, 0x5, 0x32, 0xd8, 0x38, 0xfb, 0xe7, 0x5e}, {0x4b, 0xc5, 0x5e, 0xce, 0xf9, 0xf, 0xdc, 0x9a, 0xd, 0x13, 0x2f, 0x8c, 0x6b, 0x2a, 0x9c, 0x3, 0x15, 0x95, 0xf8, 0xf0, 0xc7, 0x7, 0x80, 0x2, 0x6b, 0xb3, 0x4, 0xac, 0x14, 0x83, 0x96, 0x78}, {0x8e, 0x6a, 0x44, 0x41, 0xcb, 0xfd, 0x8d, 0x53, 0xf9, 0x37, 0x49, 0x43, 0xa9, 0xfd, 0xac, 0xa5, 0x78, 0x8c, 0x3c, 0x26, 0x8d, 0x90, 0xaf, 0x46, 0x9, 0xd, 0xca, 0x9b, 0x3c, 0x63, 0xd0, 0x61}, }, { {0xdf, 0x73, 0xfc, 0xf8, 0xbc, 0x28, 0xa3, 0xad, 0xfc, 0x37, 0xf0, 0xa6, 0x5d, 0x69, 0x84, 0xee, 0x9, 0xa9, 0xc2, 0x38, 0xdb, 0xb4, 0x7f, 0x63, 0xdc, 0x7b, 0x6, 0xf8, 0x2d, 0xac, 0x23, 0x5b}, {0x66, 0x25, 0xdb, 0xff, 0x35, 0x49, 0x74, 0x63, 0xbb, 0x68, 0xb, 0x78, 0x89, 0x6b, 0xbd, 0xc5, 0x3, 0xec, 0x3e, 0x55, 0x80, 0x32, 0x1b, 0x6f, 0xf5, 0xd7, 0xae, 0x47, 0xd8, 0x5f, 0x96, 0x6e}, {0x7b, 0x52, 0x80, 0xee, 0x53, 0xb9, 0xd2, 0x9a, 0x8d, 0x6d, 0xde, 0xfa, 0xaa, 0x19, 0x8f, 0xe8, 0xcf, 0x82, 0xe, 0x15, 0x4, 0x17, 0x71, 0xe, 0xdc, 0xde, 0x95, 0xdd, 0xb9, 0xbb, 0xb9, 0x79}, }, { {0x74, 0x73, 0x9f, 0x8e, 0xae, 0x7d, 0x99, 0xd1, 0x16, 0x8, 0xbb, 0xcf, 0xf8, 0xa2, 0x32, 0xa0, 0xa, 0x5f, 0x44, 0x6d, 0x12, 0xba, 0x6c, 0xcd, 0x34, 0xb8, 0xcc, 0xa, 0x46, 0x11, 0xa8, 0x1b}, {0xc2, 0x26, 0x31, 0x6a, 0x40, 0x55, 0xb3, 0xeb, 0x93, 0xc3, 0xc8, 0x68, 0xa8, 0x83, 0x63, 0xd2, 0x82, 0x7a, 0xb9, 0xe5, 0x29, 0x64, 0xc, 0x6c, 0x47, 0x21, 0xfd, 0xc9, 0x58, 0xf1, 0x65, 0x50}, {0x54, 0x99, 0x42, 0xc, 0xfb, 0x69, 0x81, 0x70, 0x67, 0xcf, 0x6e, 0xd7, 0xac, 0x0, 0x46, 0xe1, 0xba, 0x45, 0xe6, 0x70, 0x8a, 0xb9, 0xaa, 0x2e, 0xf2, 0xfa, 0xa4, 0x58, 0x9e, 0xf3, 0x81, 0x39}, }, { {0xde, 0x6f, 0xe6, 0x6d, 0xa5, 0xdf, 0x45, 0xc8, 0x3a, 0x48, 0x40, 0x2c, 0x0, 0xa5, 0x52, 0xe1, 0x32, 0xf6, 0xb4, 0xc7, 0x63, 0xe1, 0xd2, 0xe9, 0x65, 0x1b, 0xbc, 0xdc, 0x2e, 0x45, 0xf4, 0x30}, {0x93, 0xa, 0x23, 0x59, 0x75, 0x8a, 0xfb, 0x18, 0x5d, 0xf4, 0xe6, 0x60, 0x69, 0x8f, 0x16, 0x1d, 0xb5, 0x3c, 0xa9, 0x14, 0x45, 0xa9, 0x85, 0x3a, 0xfd, 0xd0, 0xac, 0x5, 0x37, 0x8, 0xdc, 0x38}, {0x40, 0x97, 0x75, 0xc5, 0x82, 0x27, 0x6d, 0x85, 0xcc, 0xbe, 0x9c, 0xf9, 0x69, 0x45, 0x13, 0xfa, 0x71, 0x4e, 0xea, 0xc0, 0x73, 0xfc, 0x44, 0x88, 0x69, 0x24, 0x3f, 0x59, 0x1a, 0x9a, 0x2d, 0x63}, }, { {0xa7, 0x84, 0xc, 0xed, 0x11, 0xfd, 0x9, 0xbf, 0x3a, 0x69, 0x9f, 0xd, 0x81, 0x71, 0xf0, 0x63, 0x79, 0x87, 0xcf, 0x57, 0x2d, 0x8c, 0x90, 0x21, 0xa2, 0x4b, 0xf6, 0x8a, 0xf2, 0x7d, 0x5a, 0x3a}, {0xa6, 0xcb, 0x7, 0xb8, 0x15, 0x6b, 0xbb, 0xf6, 0xd7, 0xf0, 0x54, 0xbc, 0xdf, 0xc7, 0x23, 0x18, 0xb, 0x67, 0x29, 0x6e, 0x3, 0x97, 0x1d, 0xbb, 0x57, 0x4a, 0xed, 0x47, 0x88, 0xf4, 0x24, 0xb}, {0xc7, 0xea, 0x1b, 0x51, 0xbe, 0xd4, 0xda, 0xdc, 0xf2, 0xcc, 0x26, 0xed, 0x75, 0x80, 0x53, 0xa4, 0x65, 0x9a, 0x5f, 0x0, 0x9f, 0xff, 0x9c, 0xe1, 0x63, 0x1f, 0x48, 0x75, 0x44, 0xf7, 0xfc, 0x34}, }, { {0x98, 0xaa, 0xcf, 0x78, 0xab, 0x1d, 0xbb, 0xa5, 0xf2, 0x72, 0xb, 0x19, 0x67, 0xa2, 0xed, 0x5c, 0x8e, 0x60, 0x92, 0xa, 0x11, 0xc9, 0x9, 0x93, 0xb0, 0x74, 0xb3, 0x2f, 0x4, 0xa3, 0x19, 0x1}, {0xca, 0x67, 0x97, 0x78, 0x4c, 0xe0, 0x97, 0xc1, 0x7d, 0x46, 0xd9, 0x38, 0xcb, 0x4d, 0x71, 0xb8, 0xa8, 0x5f, 0xf9, 0x83, 0x82, 0x88, 0xde, 0x55, 0xf7, 0x63, 0xfa, 0x4d, 0x16, 0xdc, 0x3b, 0x3d}, {0x7d, 0x17, 0xc2, 0xe8, 0x9c, 0xd8, 0xa2, 0x67, 0xc1, 0xd0, 0x95, 0x68, 0xf6, 0xa5, 0x9d, 0x66, 0xb0, 0xa2, 0x82, 0xb2, 0xe5, 0x98, 0x65, 0xf5, 0x73, 0xa, 0xe2, 0xed, 0xf1, 0x88, 0xc0, 0x56}, }, { {0x2, 0x8f, 0xf3, 0x24, 0xac, 0x5f, 0x1b, 0x58, 0xbd, 0xc, 0xe3, 0xba, 0xfe, 0xe9, 0xb, 0xa9, 0xf0, 0x92, 0xcf, 0x8a, 0x2, 0x69, 0x21, 0x9a, 0x8f, 0x3, 0x59, 0x83, 0xa4, 0x7e, 0x8b, 0x3}, {0x17, 0x6e, 0xa8, 0x10, 0x11, 0x3d, 0x6d, 0x33, 0xfa, 0xb2, 0x75, 0xb, 0x32, 0x88, 0xf3, 0xd7, 0x88, 0x29, 0x7, 0x25, 0x76, 0x33, 0x15, 0xf9, 0x87, 0x8b, 0x10, 0x99, 0x6b, 0x4c, 0x67, 0x9}, {0xf8, 0x6f, 0x31, 0x99, 0x21, 0xf8, 0x4e, 0x9f, 0x4f, 0x8d, 0xa7, 0xea, 0x82, 0xd2, 0x49, 0x2f, 0x74, 0x31, 0xef, 0x5a, 0xab, 0xa5, 0x71, 0x9, 0x65, 0xeb, 0x69, 0x59, 0x2, 0x31, 0x5e, 0x6e}, }, { {0x22, 0x62, 0x6, 0x63, 0xe, 0xfb, 0x4, 0x33, 0x3f, 0xba, 0xac, 0x87, 0x89, 0x6, 0x35, 0xfb, 0xa3, 0x61, 0x10, 0x8c, 0x77, 0x24, 0x19, 0xbd, 0x20, 0x86, 0x83, 0xd1, 0x43, 0xad, 0x58, 0x30}, {0xfb, 0x93, 0xe5, 0x87, 0xf5, 0x62, 0x6c, 0xb1, 0x71, 0x3e, 0x5d, 0xca, 0xde, 0xed, 0x99, 0x49, 0x6d, 0x3e, 0xcc, 0x14, 0xe0, 0xc1, 0x91, 0xb4, 0xa8, 0xdb, 0xa8, 0x89, 0x47, 0x11, 0xf5, 0x8}, {0xd0, 0x63, 0x76, 0xe5, 0xfd, 0xf, 0x3c, 0x32, 0x10, 0xa6, 0x2e, 0xa2, 0x38, 0xdf, 0xc3, 0x5, 0x9a, 0x4f, 0x99, 0xac, 0xbd, 0x8a, 0xc7, 0xbd, 0x99, 0xdc, 0xe3, 0xef, 0xa4, 0x9f, 0x54, 0x26}, }, }, { { {0x6e, 0x66, 0x3f, 0xaf, 0x49, 0x85, 0x46, 0xdb, 0xa5, 0xe, 0x4a, 0xf1, 0x4, 0xcf, 0x7f, 0xd7, 0x47, 0xc, 0xba, 0xa4, 0xf7, 0x3f, 0xf2, 0x3d, 0x85, 0x3c, 0xce, 0x32, 0xe1, 0xdf, 0x10, 0x3a}, {0xd6, 0xf9, 0x6b, 0x1e, 0x46, 0x5a, 0x1d, 0x74, 0x81, 0xa5, 0x77, 0x77, 0xfc, 0xb3, 0x5, 0x23, 0xd9, 0xd3, 0x74, 0x64, 0xa2, 0x74, 0x55, 0xd4, 0xff, 0xe0, 0x1, 0x64, 0xdc, 0xe1, 0x26, 0x19}, {0xa0, 0xce, 0x17, 0xea, 0x8a, 0x4e, 0x7f, 0xe0, 0xfd, 0xc1, 0x1f, 0x3a, 0x46, 0x15, 0xd5, 0x2f, 0xf1, 0xc0, 0xf2, 0x31, 0xfd, 0x22, 0x53, 0x17, 0x15, 0x5d, 0x1e, 0x86, 0x1d, 0xd0, 0xa1, 0x1f}, }, { {0xab, 0x94, 0xdf, 0xd1, 0x0, 0xac, 0xdc, 0x38, 0xe9, 0xd, 0x8, 0xd1, 0xdd, 0x2b, 0x71, 0x2e, 0x62, 0xe2, 0xd5, 0xfd, 0x3e, 0xe9, 0x13, 0x7f, 0xe5, 0x1, 0x9a, 0xee, 0x18, 0xed, 0xfc, 0x73}, {0x32, 0x98, 0x59, 0x7d, 0x94, 0x55, 0x80, 0xcc, 0x20, 0x55, 0xf1, 0x37, 0xda, 0x56, 0x46, 0x1e, 0x20, 0x93, 0x5, 0x4e, 0x74, 0xf7, 0xf6, 0x99, 0x33, 0xcf, 0x75, 0x6a, 0xbc, 0x63, 0x35, 0x77}, {0xb3, 0x9c, 0x13, 0x63, 0x8, 0xe9, 0xb1, 0x6, 0xcd, 0x3e, 0xa0, 0xc5, 0x67, 0xda, 0x93, 0xa4, 0x32, 0x89, 0x63, 0xad, 0xc8, 0xce, 0x77, 0x8d, 0x44, 0x4f, 0x86, 0x1b, 0x70, 0x6b, 0x42, 0x1f}, }, { {0x52, 0x25, 0xa1, 0x91, 0xc8, 0x35, 0x7e, 0xf1, 0x76, 0x9c, 0x5e, 0x57, 0x53, 0x81, 0x6b, 0xb7, 0x3e, 0x72, 0x9b, 0xd, 0x6f, 0x40, 0x83, 0xfa, 0x38, 0xe4, 0xa7, 0x3f, 0x1b, 0xbb, 0x76, 0xb}, {0x1, 0x1c, 0x91, 0x41, 0x4c, 0x26, 0xc9, 0xef, 0x25, 0x2c, 0xa2, 0x17, 0xb8, 0xb7, 0xa3, 0xf1, 0x47, 0x14, 0xf, 0xf3, 0x6b, 0xda, 0x75, 0x58, 0x90, 0xb0, 0x31, 0x1d, 0x27, 0xf5, 0x1a, 0x4e}, {0x9b, 0x93, 0x92, 0x7f, 0xf9, 0xc1, 0xb8, 0x8, 0x6e, 0xab, 0x44, 0xd4, 0xcb, 0x71, 0x67, 0xbe, 0x17, 0x80, 0xbb, 0x99, 0x63, 0x64, 0xe5, 0x22, 0x55, 0xa9, 0x72, 0xb7, 0x1e, 0xd6, 0x6d, 0x7b}, }, { {0xc7, 0xd2, 0x1, 0xab, 0xf9, 0xab, 0x30, 0x57, 0x18, 0x3b, 0x14, 0x40, 0xdc, 0x76, 0xfb, 0x16, 0x81, 0xb2, 0xcb, 0xa0, 0x65, 0xbe, 0x6c, 0x86, 0xfe, 0x6a, 0xff, 0x9b, 0x65, 0x9b, 0xfa, 0x53}, {0x92, 0x3d, 0xf3, 0x50, 0xe8, 0xc1, 0xad, 0xb7, 0xcf, 0xd5, 0x8c, 0x60, 0x4f, 0xfa, 0x98, 0x79, 0xdb, 0x5b, 0xfc, 0x8d, 0xbd, 0x2d, 0x96, 0xad, 0x4f, 0x2f, 0x1d, 0xaf, 0xce, 0x9b, 0x3e, 0x70}, {0x55, 0x54, 0x88, 0x94, 0xe9, 0xc8, 0x14, 0x6c, 0xe5, 0xd4, 0xae, 0x65, 0x66, 0x5d, 0x3a, 0x84, 0xf1, 0x5a, 0xd6, 0xbc, 0x3e, 0xb7, 0x1b, 0x18, 0x50, 0x1f, 0xc6, 0xc4, 0xe5, 0x93, 0x8d, 0x39}, }, { {0xf2, 0xe3, 0xe7, 0xd2, 0x60, 0x7c, 0x87, 0xc3, 0xb1, 0x8b, 0x82, 0x30, 0xa0, 0xaa, 0x34, 0x3b, 0x38, 0xf1, 0x9e, 0x73, 0xe7, 0x26, 0x3e, 0x28, 0x77, 0x5, 0xc3, 0x2, 0x90, 0x9c, 0x9c, 0x69}, {0xf3, 0x48, 0xe2, 0x33, 0x67, 0xd1, 0x4b, 0x1c, 0x5f, 0xa, 0xbf, 0x15, 0x87, 0x12, 0x9e, 0xbd, 0x76, 0x3, 0xb, 0xa1, 0xf0, 0x8c, 0x3f, 0xd4, 0x13, 0x1b, 0x19, 0xdf, 0x5d, 0x9b, 0xb0, 0x53}, {0xcc, 0xf1, 0x46, 0x59, 0x23, 0xa7, 0x6, 0xf3, 0x7d, 0xd9, 0xe5, 0xcc, 0xb5, 0x18, 0x17, 0x92, 0x75, 0xe9, 0xb4, 0x81, 0x47, 0xd2, 0xcd, 0x28, 0x7, 0xd9, 0xcd, 0x6f, 0xc, 0xf3, 0xca, 0x51}, }, { {0xc7, 0x54, 0xac, 0x18, 0x9a, 0xf9, 0x7a, 0x73, 0xf, 0xb3, 0x1c, 0xc5, 0xdc, 0x78, 0x33, 0x90, 0xc7, 0xc, 0xe1, 0x4c, 0x33, 0xbc, 0x89, 0x2b, 0x9a, 0xe9, 0xf8, 0x89, 0xc1, 0x29, 0xae, 0x12}, {0xa, 0xe0, 0x74, 0x76, 0x42, 0xa7, 0xb, 0xa6, 0xf3, 0x7b, 0x7a, 0xa1, 0x70, 0x85, 0xe, 0x63, 0xcc, 0x24, 0x33, 0xcf, 0x3d, 0x56, 0x58, 0x37, 0xaa, 0xfd, 0x83, 0x23, 0x29, 0xaa, 0x4, 0x55}, {0xcf, 0x1, 0xd, 0x1f, 0xcb, 0xc0, 0x9e, 0xa9, 0xae, 0xf7, 0x34, 0x3a, 0xcc, 0xef, 0xd1, 0xd, 0x22, 0x4e, 0x9c, 0xd0, 0x21, 0x75, 0xca, 0x55, 0xea, 0xa5, 0xeb, 0x58, 0xe9, 0x4f, 0xd1, 0x5f}, }, { {0x8e, 0xcb, 0x93, 0xbf, 0x5e, 0xfe, 0x42, 0x3c, 0x5f, 0x56, 0xd4, 0x36, 0x51, 0xa8, 0xdf, 0xbe, 0xe8, 0x20, 0x42, 0x88, 0x9e, 0x85, 0xf0, 0xe0, 0x28, 0xd1, 0x25, 0x7, 0x96, 0x3f, 0xd7, 0x7d}, {0x2c, 0xab, 0x45, 0x28, 0xdf, 0x2d, 0xdc, 0xb5, 0x93, 0xe9, 0x7f, 0xa, 0xb1, 0x91, 0x94, 0x6, 0x46, 0xe3, 0x2, 0x40, 0xd6, 0xf3, 0xaa, 0x4d, 0xd1, 0x74, 0x64, 0x58, 0x6e, 0xf2, 0x3f, 0x9}, {0x29, 0x98, 0x5, 0x68, 0xfe, 0x24, 0xd, 0xb1, 0xe5, 0x23, 0xaf, 0xdb, 0x72, 0x6, 0x73, 0x75, 0x29, 0xac, 0x57, 0xb4, 0x3a, 0x25, 0x67, 0x13, 0xa4, 0x70, 0xb4, 0x86, 0xbc, 0xbc, 0x59, 0x2f}, }, { {0x1, 0xc3, 0x91, 0xb6, 0x60, 0xd5, 0x41, 0x70, 0x1e, 0xe7, 0xd7, 0xad, 0x3f, 0x1b, 0x20, 0x85, 0x85, 0x55, 0x33, 0x11, 0x63, 0xe1, 0xc2, 0x16, 0xb1, 0x28, 0x8, 0x1, 0x3d, 0x5e, 0xa5, 0x2a}, {0x5f, 0x13, 0x17, 0x99, 0x42, 0x7d, 0x84, 0x83, 0xd7, 0x3, 0x7d, 0x56, 0x1f, 0x91, 0x1b, 0xad, 0xd1, 0xaa, 0x77, 0xbe, 0xd9, 0x48, 0x77, 0x7e, 0x4a, 0xaf, 0x51, 0x2e, 0x2e, 0xb4, 0x58, 0x54}, {0x4f, 0x44, 0x7, 0xc, 0xe6, 0x92, 0x51, 0xed, 0x10, 0x1d, 0x42, 0x74, 0x2d, 0x4e, 0xc5, 0x42, 0x64, 0xc8, 0xb5, 0xfd, 0x82, 0x4c, 0x2b, 0x35, 0x64, 0x86, 0x76, 0x8a, 0x4a, 0x0, 0xe9, 0x13}, }, }, { { {0x7f, 0x87, 0x3b, 0x19, 0xc9, 0x0, 0x2e, 0xbb, 0x6b, 0x50, 0xdc, 0xe0, 0x90, 0xa8, 0xe3, 0xec, 0x9f, 0x64, 0xde, 0x36, 0xc0, 0xb7, 0xf3, 0xec, 0x1a, 0x9e, 0xde, 0x98, 0x8, 0x4, 0x46, 0x5f}, {0xdb, 0xce, 0x2f, 0x83, 0x45, 0x88, 0x9d, 0x73, 0x63, 0xf8, 0x6b, 0xae, 0xc9, 0xd6, 0x38, 0xfa, 0xf7, 0xfe, 0x4f, 0xb7, 0xca, 0xd, 0xbc, 0x32, 0x5e, 0xe4, 0xbc, 0x14, 0x88, 0x7e, 0x93, 0x73}, {0x8d, 0xf4, 0x7b, 0x29, 0x16, 0x71, 0x3, 0xb9, 0x34, 0x68, 0xf0, 0xd4, 0x22, 0x3b, 0xd1, 0xa9, 0xc6, 0xbd, 0x96, 0x46, 0x57, 0x15, 0x97, 0xe1, 0x35, 0xe8, 0xd5, 0x91, 0xe8, 0xa4, 0xf8, 0x2c}, }, { {0xa2, 0x6b, 0xd0, 0x17, 0x7e, 0x48, 0xb5, 0x2c, 0x6b, 0x19, 0x50, 0x39, 0x1c, 0x38, 0xd2, 0x24, 0x30, 0x8a, 0x97, 0x85, 0x81, 0x9c, 0x65, 0xd7, 0xf6, 0xa4, 0xd6, 0x91, 0x28, 0x7f, 0x6f, 0x7a}, {0x67, 0xf, 0x11, 0x7, 0x87, 0xfd, 0x93, 0x6d, 0x49, 0xb5, 0x38, 0x7c, 0xd3, 0x9, 0x4c, 0xdd, 0x86, 0x6a, 0x73, 0xc2, 0x4c, 0x6a, 0xb1, 0x7c, 0x9, 0x2a, 0x25, 0x58, 0x6e, 0xbd, 0x49, 0x20}, {0x49, 0xef, 0x9a, 0x6a, 0x8d, 0xfd, 0x9, 0x7d, 0xb, 0xb9, 0x3d, 0x5b, 0xbe, 0x60, 0xee, 0xf0, 0xd4, 0xbf, 0x9e, 0x51, 0x2c, 0xb5, 0x21, 0x4c, 0x1d, 0x94, 0x45, 0xc5, 0xdf, 0xaa, 0x11, 0x60}, }, { {0x90, 0xf8, 0xcb, 0x2, 0xc8, 0xd0, 0xde, 0x63, 0xaa, 0x6a, 0xff, 0xd, 0xca, 0x98, 0xd0, 0xfb, 0x99, 0xed, 0xb6, 0xb9, 0xfd, 0xa, 0x4d, 0x62, 0x1e, 0xb, 0x34, 0x79, 0xb7, 0x18, 0xce, 0x69}, {0x3c, 0xf8, 0x95, 0xcf, 0x6d, 0x92, 0x67, 0x5f, 0x71, 0x90, 0x28, 0x71, 0x61, 0x85, 0x7e, 0x7c, 0x5b, 0x7a, 0x8f, 0x99, 0xf3, 0xe7, 0xa1, 0xd6, 0xe0, 0xf9, 0x62, 0xb, 0x1b, 0xcc, 0xc5, 0x6f}, {0xcb, 0x79, 0x98, 0xb2, 0x28, 0x55, 0xef, 0xd1, 0x92, 0x90, 0x7e, 0xd4, 0x3c, 0xae, 0x1a, 0xdd, 0x52, 0x23, 0x9f, 0x18, 0x42, 0x4, 0x7e, 0x12, 0xf1, 0x1, 0x71, 0xe5, 0x3a, 0x6b, 0x59, 0x15}, }, { {0xca, 0x24, 0x51, 0x7e, 0x16, 0x31, 0xff, 0x9, 0xdf, 0x45, 0xc7, 0xd9, 0x8b, 0x15, 0xe4, 0xb, 0xe5, 0x56, 0xf5, 0x7e, 0x22, 0x7d, 0x2b, 0x29, 0x38, 0xd1, 0xb6, 0xaf, 0x41, 0xe2, 0xa4, 0x3a}, {0xa2, 0x79, 0x91, 0x3f, 0xd2, 0x39, 0x27, 0x46, 0xcf, 0xdd, 0xd6, 0x97, 0x31, 0x12, 0x83, 0xff, 0x8a, 0x14, 0xf2, 0x53, 0xb5, 0xde, 0x7, 0x13, 0xda, 0x4d, 0x5f, 0x7b, 0x68, 0x37, 0x22, 0xd}, {0xf5, 0x5, 0x33, 0x2a, 0xbf, 0x38, 0xc1, 0x2c, 0xc3, 0x26, 0xe9, 0xa2, 0x8f, 0x3f, 0x58, 0x48, 0xeb, 0xd2, 0x49, 0x55, 0xa2, 0xb1, 0x3a, 0x8, 0x6c, 0xa3, 0x87, 0x46, 0x6e, 0xaa, 0xfc, 0x32}, }, { {0xdf, 0xcc, 0x87, 0x27, 0x73, 0xa4, 0x7, 0x32, 0xf8, 0xe3, 0x13, 0xf2, 0x8, 0x19, 0xe3, 0x17, 0x4e, 0x96, 0xd, 0xf6, 0xd7, 0xec, 0xb2, 0xd5, 0xe9, 0xb, 0x60, 0xc2, 0x36, 0x63, 0x6f, 0x74}, {0xf5, 0x9a, 0x7d, 0xc5, 0x8d, 0x6e, 0xc5, 0x7b, 0xf2, 0xbd, 0xf0, 0x9d, 0xed, 0xd2, 0xb, 0x3e, 0xa3, 0xe4, 0xef, 0x22, 0xde, 0x14, 0xc0, 0xaa, 0x5c, 0x6a, 0xbd, 0xfe, 0xce, 0xe9, 0x27, 0x46}, {0x1c, 0x97, 0x6c, 0xab, 0x45, 0xf3, 0x4a, 0x3f, 0x1f, 0x73, 0x43, 0x99, 0x72, 0xeb, 0x88, 0xe2, 0x6d, 0x18, 0x44, 0x3, 0x8a, 0x6a, 0x59, 0x33, 0x93, 0x62, 0xd6, 0x7e, 0x0, 0x17, 0x49, 0x7b}, }, { {0xdd, 0xa2, 0x53, 0xdd, 0x28, 0x1b, 0x34, 0x54, 0x3f, 0xfc, 0x42, 0xdf, 0x5b, 0x90, 0x17, 0xaa, 0xf4, 0xf8, 0xd2, 0x4d, 0xd9, 0x92, 0xf5, 0xf, 0x7d, 0xd3, 0x8c, 0xe0, 0xf, 0x62, 0x3, 0x1d}, {0x64, 0xb0, 0x84, 0xab, 0x5c, 0xfb, 0x85, 0x2d, 0x14, 0xbc, 0xf3, 0x89, 0xd2, 0x10, 0x78, 0x49, 0xc, 0xce, 0x15, 0x7b, 0x44, 0xdc, 0x6a, 0x47, 0x7b, 0xfd, 0x44, 0xf8, 0x76, 0xa3, 0x2b, 0x12}, {0x54, 0xe5, 0xb4, 0xa2, 0xcd, 0x32, 0x2, 0xc2, 0x7f, 0x18, 0x5d, 0x11, 0x42, 0xfd, 0xd0, 0x9e, 0xd9, 0x79, 0xd4, 0x7d, 0xbe, 0xb4, 0xab, 0x2e, 0x4c, 0xec, 0x68, 0x2b, 0xf5, 0xb, 0xc7, 0x2}, }, { {0xe1, 0x72, 0x8d, 0x45, 0xbf, 0x32, 0xe5, 0xac, 0xb5, 0x3c, 0xb7, 0x7c, 0xe0, 0x68, 0xe7, 0x5b, 0xe7, 0xbd, 0x8b, 0xee, 0x94, 0x7d, 0xcf, 0x56, 0x3, 0x3a, 0xb4, 0xfe, 0xe3, 0x97, 0x6, 0x6b}, {0xbb, 0x2f, 0xb, 0x5d, 0x4b, 0xec, 0x87, 0xa2, 0xca, 0x82, 0x48, 0x7, 0x90, 0x57, 0x5c, 0x41, 0x5c, 0x81, 0xd0, 0xc1, 0x1e, 0xa6, 0x44, 0xe0, 0xe0, 0xf5, 0x9e, 0x40, 0xa, 0x4f, 0x33, 0x26}, {0xc0, 0xa3, 0x62, 0xdf, 0x4a, 0xf0, 0xc8, 0xb6, 0x5d, 0xa4, 0x6d, 0x7, 0xef, 0x0, 0xf0, 0x3e, 0xa9, 0xd2, 0xf0, 0x49, 0x58, 0xb9, 0x9c, 0x9c, 0xae, 0x2f, 0x1b, 0x44, 0x43, 0x7f, 0xc3, 0x1c}, }, { {0xb9, 0xae, 0xce, 0xc9, 0xf1, 0x56, 0x66, 0xd7, 0x6a, 0x65, 0xe5, 0x18, 0xf8, 0x15, 0x5b, 0x1c, 0x34, 0x23, 0x4c, 0x84, 0x32, 0x28, 0xe7, 0x26, 0x38, 0x68, 0x19, 0x2f, 0x77, 0x6f, 0x34, 0x3a}, {0x4f, 0x32, 0xc7, 0x5c, 0x5a, 0x56, 0x8f, 0x50, 0x22, 0xa9, 0x6, 0xe5, 0xc0, 0xc4, 0x61, 0xd0, 0x19, 0xac, 0x45, 0x5c, 0xdb, 0xab, 0x18, 0xfb, 0x4a, 0x31, 0x80, 0x3, 0xc1, 0x9, 0x68, 0x6c}, {0xc8, 0x6a, 0xda, 0xe2, 0x12, 0x51, 0xd5, 0xd2, 0xed, 0x51, 0xe8, 0xb1, 0x31, 0x3, 0xbd, 0xe9, 0x62, 0x72, 0xc6, 0x8e, 0xdd, 0x46, 0x7, 0x96, 0xd0, 0xc5, 0xf7, 0x6e, 0x9f, 0x1b, 0x91, 0x5}, }, }, { { {0xef, 0xea, 0x2e, 0x51, 0xf3, 0xac, 0x49, 0x53, 0x49, 0xcb, 0xc1, 0x1c, 0xd3, 0x41, 0xc1, 0x20, 0x8d, 0x68, 0x9a, 0xa9, 0x7, 0xc, 0x18, 0x24, 0x17, 0x2d, 0x4b, 0xc6, 0xd1, 0xf9, 0x5e, 0x55}, {0xbb, 0xe, 0xdf, 0xf5, 0x83, 0x99, 0x33, 0xc1, 0xac, 0x4c, 0x2c, 0x51, 0x8f, 0x75, 0xf3, 0xc0, 0xe1, 0x98, 0xb3, 0xb, 0xa, 0x13, 0xf1, 0x2c, 0x62, 0xc, 0x27, 0xaa, 0xf9, 0xec, 0x3c, 0x6b}, {0x8, 0xbd, 0x73, 0x3b, 0xba, 0x70, 0xa7, 0x36, 0xc, 0xbf, 0xaf, 0xa3, 0x8, 0xef, 0x4a, 0x62, 0xf2, 0x46, 0x9, 0xb4, 0x98, 0xff, 0x37, 0x57, 0x9d, 0x74, 0x81, 0x33, 0xe1, 0x4d, 0x5f, 0x67}, }, { {0x1d, 0xb3, 0xda, 0x3b, 0xd9, 0xf6, 0x2f, 0xa1, 0xfe, 0x2d, 0x65, 0x9d, 0xf, 0xd8, 0x25, 0x7, 0x87, 0x94, 0xbe, 0x9a, 0xf3, 0x4f, 0x9c, 0x1, 0x43, 0x3c, 0xcd, 0x82, 0xb8, 0x50, 0xf4, 0x60}, {0xfc, 0x82, 0x17, 0x6b, 0x3, 0x52, 0x2c, 0xe, 0xb4, 0x83, 0xad, 0x6c, 0x81, 0x6c, 0x81, 0x64, 0x3e, 0x7, 0x64, 0x69, 0xd9, 0xbd, 0xdc, 0xd0, 0x20, 0xc5, 0x64, 0x1, 0xf7, 0x9d, 0xd9, 0x13}, {0xca, 0xc0, 0xe5, 0x21, 0xc3, 0x5e, 0x4b, 0x1, 0xa2, 0xbf, 0x19, 0xd7, 0xc9, 0x69, 0xcb, 0x4f, 0xa0, 0x23, 0x0, 0x75, 0x18, 0x1c, 0x5f, 0x4e, 0x80, 0xac, 0xed, 0x55, 0x9e, 0xde, 0x6, 0x1c}, }, { {0xaa, 0x69, 0x6d, 0xff, 0x40, 0x2b, 0xd5, 0xff, 0xbb, 0x49, 0x40, 0xdc, 0x18, 0xb, 0x53, 0x34, 0x97, 0x98, 0x4d, 0xa3, 0x2f, 0x5c, 0x4a, 0x5e, 0x2d, 0xba, 0x32, 0x7d, 0x8e, 0x6f, 0x9, 0x78}, {0xe2, 0xc4, 0x3e, 0xa3, 0xd6, 0x7a, 0xf, 0x99, 0x8e, 0xe0, 0x2e, 0xbe, 0x38, 0xf9, 0x8, 0x66, 0x15, 0x45, 0x28, 0x63, 0xc5, 0x43, 0xa1, 0x9c, 0xd, 0xb6, 0x2d, 0xec, 0x1f, 0x8a, 0xf3, 0x4c}, {0xe7, 0x5c, 0xfa, 0xd, 0x65, 0xaa, 0xaa, 0xa0, 0x8c, 0x47, 0xb5, 0x48, 0x2a, 0x9e, 0xc4, 0xf9, 0x5b, 0x72, 0x3, 0x70, 0x7d, 0xcc, 0x9, 0x4f, 0xbe, 0x1a, 0x9, 0x26, 0x3a, 0xad, 0x3c, 0x37}, }, { {0xad, 0xbb, 0xdd, 0x89, 0xfb, 0xa8, 0xbe, 0xf1, 0xcb, 0xae, 0xae, 0x61, 0xbc, 0x2c, 0xcb, 0x3b, 0x9d, 0x8d, 0x9b, 0x1f, 0xbb, 0xa7, 0x58, 0x8f, 0x86, 0xa6, 0x12, 0x51, 0xda, 0x7e, 0x54, 0x21}, {0x7c, 0xf5, 0xc9, 0x82, 0x4d, 0x63, 0x94, 0xb2, 0x36, 0x45, 0x93, 0x24, 0xe1, 0xfd, 0xcb, 0x1f, 0x5a, 0xdb, 0x8c, 0x41, 0xb3, 0x4d, 0x9c, 0x9e, 0xfc, 0x19, 0x44, 0x45, 0xd9, 0xf3, 0x40, 0x0}, {0xd3, 0x86, 0x59, 0xfd, 0x39, 0xe9, 0xfd, 0xde, 0xc, 0x38, 0xa, 0x51, 0x89, 0x2c, 0x27, 0xf4, 0xb9, 0x19, 0x31, 0xbb, 0x7, 0xa4, 0x2b, 0xb7, 0xf4, 0x4d, 0x25, 0x4a, 0x33, 0xa, 0x55, 0x63}, }, { {0x49, 0x7b, 0x54, 0x72, 0x45, 0x58, 0xba, 0x9b, 0xe0, 0x8, 0xc4, 0xe2, 0xfa, 0xc6, 0x5, 0xf3, 0x8d, 0xf1, 0x34, 0xc7, 0x69, 0xfa, 0xe8, 0x60, 0x7a, 0x76, 0x7d, 0xaa, 0xaf, 0x2b, 0xa9, 0x39}, {0x37, 0xcf, 0x69, 0xb5, 0xed, 0xd6, 0x7, 0x65, 0xe1, 0x2e, 0xa5, 0xc, 0xb0, 0x29, 0x84, 0x17, 0x5d, 0xd6, 0x6b, 0xeb, 0x90, 0x0, 0x7c, 0xea, 0x51, 0x8f, 0xf7, 0xda, 0xc7, 0x62, 0xea, 0x3e}, {0x4e, 0x27, 0x93, 0xe6, 0x13, 0xc7, 0x24, 0x9d, 0x75, 0xd3, 0xdb, 0x68, 0x77, 0x85, 0x63, 0x5f, 0x9a, 0xb3, 0x8a, 0xeb, 0x60, 0x55, 0x52, 0x70, 0xcd, 0xc4, 0xc9, 0x65, 0x6, 0x6a, 0x43, 0x68}, }, { {0x7c, 0x10, 0x20, 0xe8, 0x17, 0xd3, 0x56, 0x1e, 0x65, 0xe9, 0xa, 0x84, 0x44, 0x68, 0x26, 0xc5, 0x7a, 0xfc, 0xf, 0x32, 0xc6, 0xa1, 0xe0, 0xc1, 0x72, 0x14, 0x61, 0x91, 0x9c, 0x66, 0x73, 0x53}, {0x27, 0x3f, 0x2f, 0x20, 0xe8, 0x35, 0x2, 0xbc, 0xb0, 0x75, 0xf9, 0x64, 0xe2, 0x0, 0x5c, 0xc7, 0x16, 0x24, 0x8c, 0xa3, 0xd5, 0xe9, 0xa4, 0x91, 0xf9, 0x89, 0xb7, 0x8a, 0xf6, 0xe7, 0xb6, 0x17}, {0x57, 0x52, 0xe, 0x9a, 0xab, 0x14, 0x28, 0x5d, 0xfc, 0xb3, 0xca, 0xc9, 0x84, 0x20, 0x8f, 0x90, 0xca, 0x1e, 0x2d, 0x5b, 0x88, 0xf5, 0xca, 0xaf, 0x11, 0x7d, 0xf8, 0x78, 0xa6, 0xb5, 0xb4, 0x1c}, }, { {0xe7, 0x7, 0xa0, 0xa2, 0x62, 0xaa, 0x74, 0x6b, 0xb1, 0xc7, 0x71, 0xf0, 0xb0, 0xe0, 0x11, 0xf3, 0x23, 0xe2, 0xb, 0x0, 0x38, 0xe4, 0x7, 0x57, 0xac, 0x6e, 0xef, 0x82, 0x2d, 0xfd, 0xc0, 0x2d}, {0x6c, 0xfc, 0x4a, 0x39, 0x6b, 0xc0, 0x64, 0xb6, 0xb1, 0x5f, 0xda, 0x98, 0x24, 0xde, 0x88, 0xc, 0x34, 0xd8, 0xca, 0x4b, 0x16, 0x3, 0x8d, 0x4f, 0xa2, 0x34, 0x74, 0xde, 0x78, 0xca, 0xb, 0x33}, {0x4e, 0x74, 0x19, 0x11, 0x84, 0xff, 0x2e, 0x98, 0x24, 0x47, 0x7, 0x2b, 0x96, 0x5e, 0x69, 0xf9, 0xfb, 0x53, 0xc9, 0xbf, 0x4f, 0xc1, 0x8a, 0xc5, 0xf5, 0x1c, 0x9f, 0x36, 0x1b, 0xbe, 0x31, 0x3c}, }, { {0x72, 0x42, 0xcb, 0xf9, 0x93, 0xbc, 0x68, 0xc1, 0x98, 0xdb, 0xce, 0xc7, 0x1f, 0x71, 0xb8, 0xae, 0x7a, 0x8d, 0xac, 0x34, 0xaa, 0x52, 0xe, 0x7f, 0xbb, 0x55, 0x7d, 0x7e, 0x9, 0xc1, 0xce, 0x41}, {0xee, 0x8a, 0x94, 0x8, 0x4d, 0x86, 0xf4, 0xb0, 0x6f, 0x1c, 0xba, 0x91, 0xee, 0x19, 0xdc, 0x7, 0x58, 0xa1, 0xac, 0xa6, 0xae, 0xcd, 0x75, 0x79, 0xbb, 0xd4, 0x62, 0x42, 0x13, 0x61, 0xb, 0x33}, {0x8a, 0x80, 0x6d, 0xa2, 0xd7, 0x19, 0x96, 0xf7, 0x6d, 0x15, 0x9e, 0x1d, 0x9e, 0xd4, 0x1f, 0xbb, 0x27, 0xdf, 0xa1, 0xdb, 0x6c, 0xc3, 0xd7, 0x73, 0x7d, 0x77, 0x28, 0x1f, 0xd9, 0x4c, 0xb4, 0x26}, }, }, { { {0x83, 0x3, 0x73, 0x62, 0x93, 0xf2, 0xb7, 0xe1, 0x2c, 0x8a, 0xca, 0xeb, 0xff, 0x79, 0x52, 0x4b, 0x14, 0x13, 0xd4, 0xbf, 0x8a, 0x77, 0xfc, 0xda, 0xf, 0x61, 0x72, 0x9c, 0x14, 0x10, 0xeb, 0x7d}, {0x75, 0x74, 0x38, 0x8f, 0x47, 0x48, 0xf0, 0x51, 0x3c, 0xcb, 0xbe, 0x9c, 0xf4, 0xbc, 0x5d, 0xb2, 0x55, 0x20, 0x9f, 0xd9, 0x44, 0x12, 0xab, 0x9a, 0xd6, 0xa5, 0x10, 0x1c, 0x6c, 0x9e, 0x70, 0x2c}, {0x7a, 0xee, 0x66, 0x87, 0x6a, 0xaf, 0x62, 0xcb, 0xe, 0xcd, 0x53, 0x55, 0x4, 0xec, 0xcb, 0x66, 0xb5, 0xe4, 0xb, 0xf, 0x38, 0x1, 0x80, 0x58, 0xea, 0xe2, 0x2c, 0xf6, 0x9f, 0x8e, 0xe6, 0x8}, }, { {0xf9, 0xf2, 0xb8, 0xa, 0xd5, 0x9, 0x2d, 0x2f, 0xdf, 0x23, 0x59, 0xc5, 0x8d, 0x21, 0xb9, 0xac, 0xb9, 0x6c, 0x76, 0x73, 0x26, 0x34, 0x8f, 0x4a, 0xf5, 0x19, 0xf7, 0x38, 0xd7, 0x3b, 0xb1, 0x4c}, {0xad, 0x30, 0xc1, 0x4b, 0xa, 0x50, 0xad, 0x34, 0x9c, 0xd4, 0xb, 0x3d, 0x49, 0xdb, 0x38, 0x8d, 0xbe, 0x89, 0xa, 0x50, 0x98, 0x3d, 0x5c, 0xa2, 0x9, 0x3b, 0xba, 0xee, 0x87, 0x3f, 0x1f, 0x2f}, {0x4a, 0xb6, 0x15, 0xe5, 0x75, 0x8c, 0x84, 0xf7, 0x38, 0x90, 0x4a, 0xdb, 0xba, 0x1, 0x95, 0xa5, 0x50, 0x1b, 0x75, 0x3f, 0x3f, 0x31, 0xd, 0xc2, 0xe8, 0x2e, 0xae, 0xc0, 0x53, 0xe3, 0xa1, 0x19}, }, { {0xbd, 0xbd, 0x96, 0xd5, 0xcd, 0x72, 0x21, 0xb4, 0x40, 0xfc, 0xee, 0x98, 0x43, 0x45, 0xe0, 0x93, 0xb5, 0x9, 0x41, 0xb4, 0x47, 0x53, 0xb1, 0x9f, 0x34, 0xae, 0x66, 0x2, 0x99, 0xd3, 0x6b, 0x73}, {0xc3, 0x5, 0xfa, 0xba, 0x60, 0x75, 0x1c, 0x7d, 0x61, 0x5e, 0xe5, 0xc6, 0xa0, 0xa0, 0xe1, 0xb3, 0x73, 0x64, 0xd6, 0xc0, 0x18, 0x97, 0x52, 0xe3, 0x86, 0x34, 0xc, 0xc2, 0x11, 0x6b, 0x54, 0x41}, {0xb4, 0xb3, 0x34, 0x93, 0x50, 0x2d, 0x53, 0x85, 0x73, 0x65, 0x81, 0x60, 0x4b, 0x11, 0xfd, 0x46, 0x75, 0x83, 0x5c, 0x42, 0x30, 0x5f, 0x5f, 0xcc, 0x5c, 0xab, 0x7f, 0xb8, 0xa2, 0x95, 0x22, 0x41}, }, { {0xc6, 0xea, 0x93, 0xe2, 0x61, 0x52, 0x65, 0x2e, 0xdb, 0xac, 0x33, 0x21, 0x3, 0x92, 0x5a, 0x84, 0x6b, 0x99, 0x0, 0x79, 0xcb, 0x75, 0x9, 0x46, 0x80, 0xdd, 0x5a, 0x19, 0x8d, 0xbb, 0x60, 0x7}, {0xe9, 0xd6, 0x7e, 0xf5, 0x88, 0x9b, 0xc9, 0x19, 0x25, 0xc8, 0xf8, 0x6d, 0x26, 0xcb, 0x93, 0x53, 0x73, 0xd2, 0xa, 0xb3, 0x13, 0x32, 0xee, 0x5c, 0x34, 0x2e, 0x2d, 0xb5, 0xeb, 0x53, 0xe1, 0x14}, {0x8a, 0x81, 0xe6, 0xcd, 0x17, 0x1a, 0x3e, 0x41, 0x84, 0xa0, 0x69, 0xed, 0xa9, 0x6d, 0x15, 0x57, 0xb1, 0xcc, 0xca, 0x46, 0x8f, 0x26, 0xbf, 0x2c, 0xf2, 0xc5, 0x3a, 0xc3, 0x9b, 0xbe, 0x34, 0x6b}, }, { {0xd3, 0xf2, 0x71, 0x65, 0x65, 0x69, 0xfc, 0x11, 0x7a, 0x73, 0xe, 0x53, 0x45, 0xe8, 0xc9, 0xc6, 0x35, 0x50, 0xfe, 0xd4, 0xa2, 0xe7, 0x3a, 0xe3, 0xb, 0xd3, 0x6d, 0x2e, 0xb6, 0xc7, 0xb9, 0x1}, {0xb2, 0xc0, 0x78, 0x3a, 0x64, 0x2f, 0xdf, 0xf3, 0x7c, 0x2, 0x2e, 0xf2, 0x1e, 0x97, 0x3e, 0x4c, 0xa3, 0xb5, 0xc1, 0x49, 0x5e, 0x1c, 0x7d, 0xec, 0x2d, 0xdd, 0x22, 0x9, 0x8f, 0xc1, 0x12, 0x20}, {0x29, 0x9d, 0xc8, 0x5a, 0xe5, 0x55, 0xb, 0x88, 0x63, 0xa7, 0xa0, 0x45, 0x1f, 0x24, 0x83, 0x14, 0x1f, 0x6c, 0xe7, 0xc2, 0xdf, 0xef, 0x36, 0x3d, 0xe8, 0xad, 0x4b, 0x4e, 0x78, 0x5b, 0xaf, 0x8}, }, { {0x4b, 0x2c, 0xcc, 0x89, 0xd2, 0x14, 0x73, 0xe2, 0x8d, 0x17, 0x87, 0xa2, 0x11, 0xbd, 0xe4, 0x4b, 0xce, 0x64, 0x33, 0xfa, 0xd6, 0x28, 0xd5, 0x18, 0x6e, 0x82, 0xd9, 0xaf, 0xd5, 0xc1, 0x23, 0x64}, {0x33, 0x25, 0x1f, 0x88, 0xdc, 0x99, 0x34, 0x28, 0xb6, 0x23, 0x93, 0x77, 0xda, 0x25, 0x5, 0x9d, 0xf4, 0x41, 0x34, 0x67, 0xfb, 0xdd, 0x7a, 0x89, 0x8d, 0x16, 0x3a, 0x16, 0x71, 0x9d, 0xb7, 0x32}, {0x6a, 0xb3, 0xfc, 0xed, 0xd9, 0xf8, 0x85, 0xcc, 0xf9, 0xe5, 0x46, 0x37, 0x8f, 0xc2, 0xbc, 0x22, 0xcd, 0xd3, 0xe5, 0xf9, 0x38, 0xe3, 0x9d, 0xe4, 0xcc, 0x2d, 0x3e, 0xc1, 0xfb, 0x5e, 0xa, 0x48}, }, { {0x1f, 0x22, 0xce, 0x42, 0xe4, 0x4c, 0x61, 0xb6, 0x28, 0x39, 0x5, 0x4c, 0xcc, 0x9d, 0x19, 0x6e, 0x3, 0xbe, 0x1c, 0xdc, 0xa4, 0xb4, 0x3f, 0x66, 0x6, 0x8e, 0x1c, 0x69, 0x47, 0x1d, 0xb3, 0x24}, {0x71, 0x20, 0x62, 0x1, 0xb, 0xe7, 0x51, 0xb, 0xc5, 0xaf, 0x1d, 0x8b, 0xcf, 0x5, 0xb5, 0x6, 0xcd, 0xab, 0x5a, 0xef, 0x61, 0xb0, 0x6b, 0x2c, 0x31, 0xbf, 0xb7, 0xc, 0x60, 0x27, 0xaa, 0x47}, {0xc3, 0xf8, 0x15, 0xc0, 0xed, 0x1e, 0x54, 0x2a, 0x7c, 0x3f, 0x69, 0x7c, 0x7e, 0xfe, 0xa4, 0x11, 0xd6, 0x78, 0xa2, 0x4e, 0x13, 0x66, 0xaf, 0xf0, 0x94, 0xa0, 0xdd, 0x14, 0x5d, 0x58, 0x5b, 0x54}, }, { {0xe1, 0x21, 0xb3, 0xe3, 0xd0, 0xe4, 0x4, 0x62, 0x95, 0x1e, 0xff, 0x28, 0x7a, 0x63, 0xaa, 0x3b, 0x9e, 0xbd, 0x99, 0x5b, 0xfd, 0xcf, 0xc, 0xb, 0x71, 0xd0, 0xc8, 0x64, 0x3e, 0xdc, 0x22, 0x4d}, {0xf, 0x3a, 0xd4, 0xa0, 0x5e, 0x27, 0xbf, 0x67, 0xbe, 0xee, 0x9b, 0x8, 0x34, 0x8e, 0xe6, 0xad, 0x2e, 0xe7, 0x79, 0xd4, 0x4c, 0x13, 0x89, 0x42, 0x54, 0x54, 0xba, 0x32, 0xc3, 0xf9, 0x62, 0xf}, {0x39, 0x5f, 0x3b, 0xd6, 0x89, 0x65, 0xb4, 0xfc, 0x61, 0xcf, 0xcb, 0x57, 0x3f, 0x6a, 0xae, 0x5c, 0x5, 0xfa, 0x3a, 0x95, 0xd2, 0xc2, 0xba, 0xfe, 0x36, 0x14, 0x37, 0x36, 0x1a, 0xa0, 0xf, 0x1c}, }, }, { { {0x50, 0x6a, 0x93, 0x8c, 0xe, 0x2b, 0x8, 0x69, 0xb6, 0xc5, 0xda, 0xc1, 0x35, 0xa0, 0xc9, 0xf9, 0x34, 0xb6, 0xdf, 0xc4, 0x54, 0x3e, 0xb7, 0x6f, 0x40, 0xc1, 0x2b, 0x1d, 0x9b, 0x41, 0x5, 0x40}, {0xff, 0x3d, 0x94, 0x22, 0xb6, 0x4, 0xc6, 0xd2, 0xa0, 0xb3, 0xcf, 0x44, 0xce, 0xbe, 0x8c, 0xbc, 0x78, 0x86, 0x80, 0x97, 0xf3, 0x4f, 0x25, 0x5d, 0xbf, 0xa6, 0x1c, 0x3b, 0x4f, 0x61, 0xa3, 0xf}, {0xf0, 0x82, 0xbe, 0xb9, 0xbd, 0xfe, 0x3, 0xa0, 0x90, 0xac, 0x44, 0x3a, 0xaf, 0xc1, 0x89, 0x20, 0x8e, 0xfa, 0x54, 0x19, 0x91, 0x9f, 0x49, 0xf8, 0x42, 0xab, 0x40, 0xef, 0x8a, 0x21, 0xba, 0x1f}, }, { {0x94, 0x1, 0x7b, 0x3e, 0x4, 0x57, 0x3e, 0x4f, 0x7f, 0xaf, 0xda, 0x8, 0xee, 0x3e, 0x1d, 0xa8, 0xf1, 0xde, 0xdc, 0x99, 0xab, 0xc6, 0x39, 0xc8, 0xd5, 0x61, 0x77, 0xff, 0x13, 0x5d, 0x53, 0x6c}, {0x3e, 0xf5, 0xc8, 0xfa, 0x48, 0x94, 0x54, 0xab, 0x41, 0x37, 0xa6, 0x7b, 0x9a, 0xe8, 0xf6, 0x81, 0x1, 0x5e, 0x2b, 0x6c, 0x7d, 0x6c, 0xfd, 0x74, 0x42, 0x6e, 0xc8, 0xa8, 0xca, 0x3a, 0x2e, 0x39}, {0xaf, 0x35, 0x8a, 0x3e, 0xe9, 0x34, 0xbd, 0x4c, 0x16, 0xe8, 0x87, 0x58, 0x44, 0x81, 0x7, 0x2e, 0xab, 0xb0, 0x9a, 0xf2, 0x76, 0x9c, 0x31, 0x19, 0x3b, 0xc1, 0xa, 0xd5, 0xe4, 0x7f, 0xe1, 0x25}, }, { {0xa7, 0x21, 0xf1, 0x76, 0xf5, 0x7f, 0x5f, 0x91, 0xe3, 0x87, 0xcd, 0x2f, 0x27, 0x32, 0x4a, 0xc3, 0x26, 0xe5, 0x1b, 0x4d, 0xde, 0x2f, 0xba, 0xcc, 0x9b, 0x89, 0x69, 0x89, 0x8f, 0x82, 0xba, 0x6b}, {0x76, 0xf6, 0x4, 0x1e, 0xd7, 0x9b, 0x28, 0xa, 0x95, 0xf, 0x42, 0xd6, 0x52, 0x1c, 0x8e, 0x20, 0xab, 0x1f, 0x69, 0x34, 0xb0, 0xd8, 0x86, 0x51, 0x51, 0xb3, 0x9f, 0x2a, 0x44, 0x51, 0x57, 0x25}, {0x1, 0x39, 0xfe, 0x90, 0x66, 0xbc, 0xd1, 0xe2, 0xd5, 0x7a, 0x99, 0xa0, 0x18, 0x4a, 0xb5, 0x4c, 0xd4, 0x60, 0x84, 0xaf, 0x14, 0x69, 0x1d, 0x97, 0xe4, 0x7b, 0x6b, 0x7f, 0x4f, 0x50, 0x9d, 0x55}, }, { {0xfd, 0x66, 0xd2, 0xf6, 0xe7, 0x91, 0x48, 0x9c, 0x1b, 0x78, 0x7, 0x3, 0x9b, 0xa1, 0x44, 0x7, 0x3b, 0xe2, 0x61, 0x60, 0x1d, 0x8f, 0x38, 0x88, 0xe, 0xd5, 0x4b, 0x35, 0xa3, 0xa6, 0x3e, 0x12}, {0xd5, 0x54, 0xeb, 0xb3, 0x78, 0x83, 0x73, 0xa7, 0x7c, 0x3c, 0x55, 0xa5, 0x66, 0xd3, 0x69, 0x1d, 0xba, 0x0, 0x28, 0xf9, 0x62, 0xcf, 0x26, 0xa, 0x17, 0x32, 0x7e, 0x80, 0xd5, 0x12, 0xab, 0x1}, {0x96, 0x2d, 0xe3, 0x41, 0x90, 0x18, 0x8d, 0x11, 0x48, 0x58, 0x31, 0xd8, 0xc2, 0xe3, 0xed, 0xb9, 0xd9, 0x45, 0x32, 0xd8, 0x71, 0x42, 0xab, 0x1e, 0x54, 0xa1, 0x18, 0xc9, 0xe2, 0x61, 0x39, 0x4a}, }, { {0x1e, 0x3f, 0x23, 0xf3, 0x44, 0xd6, 0x27, 0x3, 0x16, 0xf0, 0xfc, 0x34, 0xe, 0x26, 0x9a, 0x49, 0x79, 0xb9, 0xda, 0xf2, 0x16, 0xa7, 0xb5, 0x83, 0x1f, 0x11, 0xd4, 0x9b, 0xad, 0xee, 0xac, 0x68}, {0xa0, 0xbb, 0xe6, 0xf8, 0xe0, 0x3b, 0xdc, 0x71, 0xa, 0xe3, 0xff, 0x7e, 0x34, 0xf8, 0xce, 0xd6, 0x6a, 0x47, 0x3a, 0xe1, 0x5f, 0x42, 0x92, 0xa9, 0x63, 0xb7, 0x1d, 0xfb, 0xe3, 0xbc, 0xd6, 0x2c}, {0x10, 0xc2, 0xd7, 0xf3, 0xe, 0xc9, 0xb4, 0x38, 0xc, 0x4, 0xad, 0xb7, 0x24, 0x6e, 0x8e, 0x30, 0x23, 0x3e, 0xe7, 0xb7, 0xf1, 0xd9, 0x60, 0x38, 0x97, 0xf5, 0x8, 0xb5, 0xd5, 0x60, 0x57, 0x59}, }, { {0x90, 0x27, 0x2, 0xfd, 0xeb, 0xcb, 0x2a, 0x88, 0x60, 0x57, 0x11, 0xc4, 0x5, 0x33, 0xaf, 0x89, 0xf4, 0x73, 0x34, 0x7d, 0xe3, 0x92, 0xf4, 0x65, 0x2b, 0x5a, 0x51, 0x54, 0xdf, 0xc5, 0xb2, 0x2c}, {0x97, 0x63, 0xaa, 0x4, 0xe1, 0xbf, 0x29, 0x61, 0xcb, 0xfc, 0xa7, 0xa4, 0x8, 0x0, 0x96, 0x8f, 0x58, 0x94, 0x90, 0x7d, 0x89, 0xc0, 0x8b, 0x3f, 0xa9, 0x91, 0xb2, 0xdc, 0x3e, 0xa4, 0x9f, 0x70}, {0xca, 0x2a, 0xfd, 0x63, 0x8c, 0x5d, 0xa, 0xeb, 0xff, 0x4e, 0x69, 0x2e, 0x66, 0xc1, 0x2b, 0xd2, 0x3a, 0xb0, 0xcb, 0xf8, 0x6e, 0xf3, 0x23, 0x27, 0x1f, 0x13, 0xc8, 0xf0, 0xec, 0x29, 0xf0, 0x70}, }, { {0xb9, 0xb0, 0x10, 0x5e, 0xaa, 0xaf, 0x6a, 0x2a, 0xa9, 0x1a, 0x4, 0xef, 0x70, 0xa3, 0xf0, 0x78, 0x1f, 0xd6, 0x3a, 0xaa, 0x77, 0xfb, 0x3e, 0x77, 0xe1, 0xd9, 0x4b, 0xa7, 0xa2, 0xa5, 0xec, 0x44}, {0x33, 0x3e, 0xed, 0x2e, 0xb3, 0x7, 0x13, 0x46, 0xe7, 0x81, 0x55, 0xa4, 0x33, 0x2f, 0x4, 0xae, 0x66, 0x3, 0x5f, 0x19, 0xd3, 0x49, 0x44, 0xc9, 0x58, 0x48, 0x31, 0x6c, 0x8a, 0x5d, 0x7d, 0xb}, {0x43, 0xd5, 0x95, 0x7b, 0x32, 0x48, 0xd4, 0x25, 0x1d, 0xf, 0x34, 0xa3, 0x0, 0x83, 0xd3, 0x70, 0x2b, 0xc5, 0xe1, 0x60, 0x1c, 0x53, 0x1c, 0xde, 0xe4, 0xe9, 0x7d, 0x2c, 0x51, 0x24, 0x22, 0x27}, }, { {0xfc, 0x75, 0xa9, 0x42, 0x8a, 0xbb, 0x7b, 0xbf, 0x58, 0xa3, 0xad, 0x96, 0x77, 0x39, 0x5c, 0x8c, 0x48, 0xaa, 0xed, 0xcd, 0x6f, 0xc7, 0x7f, 0xe2, 0xa6, 0x20, 0xbc, 0xf6, 0xd7, 0x5f, 0x73, 0x19}, {0x2e, 0x34, 0xc5, 0x49, 0xaf, 0x92, 0xbc, 0x1a, 0xd0, 0xfa, 0xe6, 0xb2, 0x11, 0xd8, 0xee, 0xff, 0x29, 0x4e, 0xc8, 0xfc, 0x8d, 0x8c, 0xa2, 0xef, 0x43, 0xc5, 0x4c, 0xa4, 0x18, 0xdf, 0xb5, 0x11}, {0x66, 0x42, 0xc8, 0x42, 0xd0, 0x90, 0xab, 0xe3, 0x7e, 0x54, 0x19, 0x7f, 0xf, 0x8e, 0x84, 0xeb, 0xb9, 0x97, 0xa4, 0x65, 0xd0, 0xa1, 0x3, 0x25, 0x5f, 0x89, 0xdf, 0x91, 0x11, 0x91, 0xef, 0xf}, }, }, }; #endif // OPENSSL_SMALL // Bi[i] = (2*i+1)*B static const ge_precomp Bi[8] = { { {{ #if defined(OPENSSL_64_BIT) 1288382639258501, 245678601348599, 269427782077623, 1462984067271730, 137412439391563 #else 25967493, 19198397, 29566455, 3660896, 54414519, 4014786, 27544626, 21800161, 61029707, 2047604 #endif }}, {{ #if defined(OPENSSL_64_BIT) 62697248952638, 204681361388450, 631292143396476, 338455783676468, 1213667448819585 #else 54563134, 934261, 64385954, 3049989, 66381436, 9406985, 12720692, 5043384, 19500929, 18085054 #endif }}, {{ #if defined(OPENSSL_64_BIT) 301289933810280, 1259582250014073, 1422107436869536, 796239922652654, 1953934009299142 #else 58370664, 4489569, 9688441, 18769238, 10184608, 21191052, 29287918, 11864899, 42594502, 29115885 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 1601611775252272, 1720807796594148, 1132070835939856, 1260455018889551, 2147779492816911 #else 15636272, 23865875, 24204772, 25642034, 616976, 16869170, 27787599, 18782243, 28944399, 32004408 #endif }}, {{ #if defined(OPENSSL_64_BIT) 316559037616741, 2177824224946892, 1459442586438991, 1461528397712656, 751590696113597 #else 16568933, 4717097, 55552716, 32452109, 15682895, 21747389, 16354576, 21778470, 7689661, 11199574 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1850748884277385, 1200145853858453, 1068094770532492, 672251375690438, 1586055907191707 #else 30464137, 27578307, 55329429, 17883566, 23220364, 15915852, 7512774, 10017326, 49359771, 23634074 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 769950342298419, 132954430919746, 844085933195555, 974092374476333, 726076285546016 #else 10861363, 11473154, 27284546, 1981175, 37044515, 12577860, 32867885, 14515107, 51670560, 10819379 #endif }}, {{ #if defined(OPENSSL_64_BIT) 425251763115706, 608463272472562, 442562545713235, 837766094556764, 374555092627893 #else 4708026, 6336745, 20377586, 9066809, 55836755, 6594695, 41455196, 12483687, 54440373, 5581305 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1086255230780037, 274979815921559, 1960002765731872, 929474102396301, 1190409889297339 #else 19563141, 16186464, 37722007, 4097518, 10237984, 29206317, 28542349, 13850243, 43430843, 17738489 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 665000864555967, 2065379846933859, 370231110385876, 350988370788628, 1233371373142985 #else 5153727, 9909285, 1723747, 30776558, 30523604, 5516873, 19480852, 5230134, 43156425, 18378665 #endif }}, {{ #if defined(OPENSSL_64_BIT) 2019367628972465, 676711900706637, 110710997811333, 1108646842542025, 517791959672113 #else 36839857, 30090922, 7665485, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701 #endif }}, {{ #if defined(OPENSSL_64_BIT) 965130719900578, 247011430587952, 526356006571389, 91986625355052, 2157223321444601 #else 28881826, 14381568, 9657904, 3680757, 46927229, 7843315, 35708204, 1370707, 29794553, 32145132 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 1802695059465007, 1664899123557221, 593559490740857, 2160434469266659, 927570450755031 #else 44589871, 26862249, 14201701, 24808930, 43598457, 8844725, 18474211, 32192982, 54046167, 13821876 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1725674970513508, 1933645953859181, 1542344539275782, 1767788773573747, 1297447965928905 #else 60653668, 25714560, 3374701, 28813570, 40010246, 22982724, 31655027, 26342105, 18853321, 19333481 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1381809363726107, 1430341051343062, 2061843536018959, 1551778050872521, 2036394857967624 #else 4566811, 20590564, 38133974, 21313742, 59506191, 30723862, 58594505, 23123294, 2207752, 30344648 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 1970894096313054, 528066325833207, 1619374932191227, 2207306624415883, 1169170329061080 #else 41954014, 29368610, 29681143, 7868801, 60254203, 24130566, 54671499, 32891431, 35997400, 17421995 #endif }}, {{ #if defined(OPENSSL_64_BIT) 2070390218572616, 1458919061857835, 624171843017421, 1055332792707765, 433987520732508 #else 25576264, 30851218, 7349803, 21739588, 16472781, 9300885, 3844789, 15725684, 171356, 6466918 #endif }}, {{ #if defined(OPENSSL_64_BIT) 893653801273833, 1168026499324677, 1242553501121234, 1306366254304474, 1086752658510815 #else 23103977, 13316479, 9739013, 17404951, 817874, 18515490, 8965338, 19466374, 36393951, 16193876 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 213454002618221, 939771523987438, 1159882208056014, 317388369627517, 621213314200687 #else 33587053, 3180712, 64714734, 14003686, 50205390, 17283591, 17238397, 4729455, 49034351, 9256799 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1971678598905747, 338026507889165, 762398079972271, 655096486107477, 42299032696322 #else 41926547, 29380300, 32336397, 5036987, 45872047, 11360616, 22616405, 9761698, 47281666, 630304 #endif }}, {{ #if defined(OPENSSL_64_BIT) 177130678690680, 1754759263300204, 1864311296286618, 1180675631479880, 1292726903152791 #else 53388152, 2639452, 42871404, 26147950, 9494426, 27780403, 60554312, 17593437, 64659607, 19263131 #endif }}, }, { {{ #if defined(OPENSSL_64_BIT) 1913163449625248, 460779200291993, 2193883288642314, 1008900146920800, 1721983679009502 #else 63957664, 28508356, 9282713, 6866145, 35201802, 32691408, 48168288, 15033783, 25105118, 25659556 #endif }}, {{ #if defined(OPENSSL_64_BIT) 1070401523076875, 1272492007800961, 1910153608563310, 2075579521696771, 1191169788841221 #else 42782475, 15950225, 35307649, 18961608, 55446126, 28463506, 1573891, 30928545, 2198789, 17749813 #endif }}, {{ #if defined(OPENSSL_64_BIT) 692896803108118, 500174642072499, 2068223309439677, 1162190621851337, 1426986007309901 #else 64009494, 10324966, 64867251, 7453182, 61661885, 30818928, 53296841, 17317989, 34647629, 21263748 #endif }}, }, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/internal.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CURVE25519_INTERNAL_H #define OPENSSL_HEADER_CURVE25519_INTERNAL_H #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif #if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE) #define BORINGSSL_X25519_NEON // x25519_NEON is defined in asm/x25519-arm.S. void x25519_NEON(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]); #endif #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && \ defined(__GNUC__) && defined(__x86_64__) && !defined(OPENSSL_WINDOWS) #define BORINGSSL_FE25519_ADX // fiat_curve25519_adx_mul is defined in // third_party/fiat/asm/fiat_curve25519_adx_mul.S void __attribute__((sysv_abi)) fiat_curve25519_adx_mul(uint64_t out[4], const uint64_t in1[4], const uint64_t in2[4]); // fiat_curve25519_adx_square is defined in // third_party/fiat/asm/fiat_curve25519_adx_square.S void __attribute__((sysv_abi)) fiat_curve25519_adx_square(uint64_t out[4], const uint64_t in[4]); // x25519_scalar_mult_adx is defined in third_party/fiat/curve25519_64_adx.h void x25519_scalar_mult_adx(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]); void x25519_ge_scalarmult_base_adx(uint8_t h[4][32], const uint8_t a[32]); #endif #if defined(OPENSSL_64_BIT) // fe means field element. Here the field is \Z/(2^255-19). An element t, // entries t[0]...t[4], represents the integer t[0]+2^51 t[1]+2^102 t[2]+2^153 // t[3]+2^204 t[4]. // fe limbs are bounded by 1.125*2^51. // Multiplication and carrying produce fe from fe_loose. typedef struct fe { uint64_t v[5]; } fe; // fe_loose limbs are bounded by 3.375*2^51. // Addition and subtraction produce fe_loose from (fe, fe). typedef struct fe_loose { uint64_t v[5]; } fe_loose; #else // fe means field element. Here the field is \Z/(2^255-19). An element t, // entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 // t[3]+2^102 t[4]+...+2^230 t[9]. // fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. // Multiplication and carrying produce fe from fe_loose. typedef struct fe { uint32_t v[10]; } fe; // fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc. // Addition and subtraction produce fe_loose from (fe, fe). typedef struct fe_loose { uint32_t v[10]; } fe_loose; #endif // ge means group element. // // Here the group is the set of pairs (x,y) of field elements (see fe.h) // satisfying -x^2 + y^2 = 1 + d x^2y^2 // where d = -121665/121666. // // Representations: // ge_p2 (projective): (X:Y:Z) satisfying x=X/Z, y=Y/Z // ge_p3 (extended): (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT // ge_p1p1 (completed): ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T // ge_precomp (Duif): (y+x,y-x,2dxy) typedef struct { fe X; fe Y; fe Z; } ge_p2; typedef struct { fe X; fe Y; fe Z; fe T; } ge_p3; typedef struct { fe_loose X; fe_loose Y; fe_loose Z; fe_loose T; } ge_p1p1; typedef struct { fe_loose yplusx; fe_loose yminusx; fe_loose xy2d; } ge_precomp; typedef struct { fe_loose YplusX; fe_loose YminusX; fe_loose Z; fe_loose T2d; } ge_cached; void x25519_ge_tobytes(uint8_t s[32], const ge_p2 *h); int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t s[32]); void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p); void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p); void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p); void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); void x25519_ge_scalarmult_small_precomp( ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]); void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]); void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A); void x25519_sc_reduce(uint8_t s[64]); enum spake2_state_t { spake2_state_init = 0, spake2_state_msg_generated, spake2_state_key_generated, }; struct spake2_ctx_st { uint8_t private_key[32]; uint8_t my_msg[32]; uint8_t password_scalar[32]; uint8_t password_hash[64]; uint8_t *my_name; size_t my_name_len; uint8_t *their_name; size_t their_name_len; enum spake2_role_t my_role; enum spake2_state_t state; char disable_password_scalar_hack; }; extern const uint8_t k25519Precomp[32][8][3][32]; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CURVE25519_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/curve25519/spake25519.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../internal.h" #include "./internal.h" // The following precomputation tables are for the following // points used in the SPAKE2 protocol. // // N: // x: // 49918732221787544735331783592030787422991506689877079631459872391322455579424 // y: // 54629554431565467720832445949441049581317094546788069926228343916274969994000 // encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778 // // M: // x: // 31406539342727633121250288103050113562375374900226415211311216773867585644232 // y: // 21177308356423958466833845032658859666296341766942662650232962324899758529114 // encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e // // These points and their precomputation tables are generated with the // following Python code. For a description of the precomputation table, // see curve25519.c in this directory. // // Exact copies of the source code are kept in bug 27296743. /* import hashlib import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py SEED_N = 'edwards25519 point generation seed (N)' SEED_M = 'edwards25519 point generation seed (M)' def genpoint(seed): v = hashlib.sha256(seed).digest() it = 1 while True: try: x,y = E.decodepoint(v) except Exception, e: print e it += 1 v = hashlib.sha256(v).digest() continue print "Found in %d iterations:" % it print " x = %d" % x print " y = %d" % y print " Encoded (hex)" print E.encodepoint((x,y)).encode('hex') return (x,y) def gentable(P): t = [] for i in range(1,16): k = ((i >> 3 & 1) * (1 << 192) + (i >> 2 & 1) * (1 << 128) + (i >> 1 & 1) * (1 << 64) + (i & 1)) t.append(E.scalarmult(P, k)) return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t) def printtable(table, name): print "static const uint8_t %s[15 * 2 * 32] = {" % name, for i in range(15 * 2 * 32): if i % 12 == 0: print "\n ", print " 0x%02x," % ord(table[i]), print "\n};" if __name__ == "__main__": print "Searching for N" N = genpoint(SEED_N) print "Generating precomputation table for N" Ntable = gentable(N) printtable(Ntable, "kSpakeNSmallPrecomp") print "Searching for M" M = genpoint(SEED_M) print "Generating precomputation table for M" Mtable = gentable(M) printtable(Mtable, "kSpakeMSmallPrecomp") */ static const uint8_t kSpakeNSmallPrecomp[15 * 2 * 32] = { 0x20, 0x1b, 0xc5, 0xb3, 0x43, 0x17, 0x71, 0x10, 0x44, 0x1e, 0x73, 0xb3, 0xae, 0x3f, 0xbf, 0x9f, 0xf5, 0x44, 0xc8, 0x13, 0x8f, 0xd1, 0x01, 0xc2, 0x8a, 0x1a, 0x6d, 0xea, 0x4d, 0x00, 0x5d, 0x6e, 0x10, 0xe3, 0xdf, 0x0a, 0xe3, 0x7d, 0x8e, 0x7a, 0x99, 0xb5, 0xfe, 0x74, 0xb4, 0x46, 0x72, 0x10, 0x3d, 0xbd, 0xdc, 0xbd, 0x06, 0xaf, 0x68, 0x0d, 0x71, 0x32, 0x9a, 0x11, 0x69, 0x3b, 0xc7, 0x78, 0x93, 0xf1, 0x57, 0x97, 0x6e, 0xf0, 0x6e, 0x45, 0x37, 0x4a, 0xf4, 0x0b, 0x18, 0x51, 0xf5, 0x4f, 0x67, 0x3c, 0xdc, 0xec, 0x84, 0xed, 0xd0, 0xeb, 0xca, 0xfb, 0xdb, 0xff, 0x7f, 0xeb, 0xa8, 0x23, 0x68, 0x87, 0x13, 0x64, 0x6a, 0x10, 0xf7, 0x45, 0xe0, 0x0f, 0x32, 0x21, 0x59, 0x7c, 0x0e, 0x50, 0xad, 0x56, 0xd7, 0x12, 0x69, 0x7b, 0x58, 0xf8, 0xb9, 0x3b, 0xa5, 0xbb, 0x4d, 0x1b, 0x87, 0x1c, 0x46, 0xa7, 0x17, 0x9d, 0x6d, 0x84, 0x45, 0xbe, 0x7f, 0x95, 0xd2, 0x34, 0xcd, 0x89, 0x95, 0xc0, 0xf0, 0xd3, 0xdf, 0x6e, 0x10, 0x4a, 0xe3, 0x7b, 0xce, 0x7f, 0x40, 0x27, 0xc7, 0x2b, 0xab, 0x66, 0x03, 0x59, 0xb4, 0x7b, 0xc7, 0xc7, 0xf0, 0x39, 0x9a, 0x33, 0x35, 0xbf, 0xcc, 0x2f, 0xf3, 0x2e, 0x68, 0x9d, 0x53, 0x5c, 0x88, 0x52, 0xe3, 0x77, 0x90, 0xa1, 0x27, 0x85, 0xc5, 0x74, 0x7f, 0x23, 0x0e, 0x93, 0x01, 0x3e, 0xe7, 0x2e, 0x2e, 0x95, 0xf3, 0x0d, 0xc2, 0x25, 0x25, 0x39, 0x39, 0x3d, 0x6e, 0x8e, 0x89, 0xbd, 0xe8, 0xbb, 0x67, 0x5e, 0x8c, 0x66, 0x8b, 0x63, 0x28, 0x1e, 0x4e, 0x74, 0x85, 0xa8, 0xaf, 0x0f, 0x12, 0x5d, 0xb6, 0x8a, 0x83, 0x1a, 0x77, 0x76, 0x5e, 0x62, 0x8a, 0xa7, 0x3c, 0xb8, 0x05, 0x57, 0x2b, 0xaf, 0x36, 0x2e, 0x10, 0x90, 0xb2, 0x39, 0xb4, 0x3e, 0x75, 0x6d, 0x3a, 0xa8, 0x31, 0x35, 0xc2, 0x1e, 0x8f, 0xc2, 0x79, 0x89, 0x35, 0x16, 0x26, 0xd1, 0xc7, 0x0b, 0x04, 0x1f, 0x1d, 0xf9, 0x9c, 0x05, 0xa6, 0x6b, 0xb5, 0x19, 0x5a, 0x24, 0x6d, 0x91, 0xc5, 0x31, 0xfd, 0xc5, 0xfa, 0xe7, 0xa6, 0xcb, 0x0e, 0x4b, 0x18, 0x0d, 0x94, 0xc7, 0xee, 0x1d, 0x46, 0x1f, 0x92, 0xb1, 0xb2, 0x4a, 0x2b, 0x43, 0x37, 0xfe, 0xc2, 0x15, 0x11, 0x89, 0xef, 0x59, 0x73, 0x3c, 0x06, 0x76, 0x78, 0xcb, 0xa6, 0x0d, 0x79, 0x5f, 0x28, 0x0b, 0x5b, 0x8c, 0x9e, 0xe4, 0xaa, 0x51, 0x9a, 0x42, 0x6f, 0x11, 0x50, 0x3d, 0x01, 0xd6, 0x21, 0xc0, 0x99, 0x5e, 0x1a, 0xe8, 0x81, 0x25, 0x80, 0xeb, 0xed, 0x5d, 0x37, 0x47, 0x30, 0x70, 0xa0, 0x4e, 0x0b, 0x43, 0x17, 0xbe, 0xb6, 0x47, 0xe7, 0x2a, 0x62, 0x9d, 0x5d, 0xa6, 0xc5, 0x33, 0x62, 0x9d, 0x56, 0x24, 0x9d, 0x1d, 0xb2, 0x13, 0xbc, 0x17, 0x66, 0x43, 0xd1, 0x68, 0xd5, 0x3b, 0x17, 0x69, 0x17, 0xa6, 0x06, 0x9e, 0x12, 0xb8, 0x7c, 0xd5, 0xaf, 0x3e, 0x21, 0x1b, 0x31, 0xeb, 0x0b, 0xa4, 0x98, 0x1c, 0xf2, 0x6a, 0x5e, 0x7c, 0x9b, 0x45, 0x8f, 0xb2, 0x12, 0x06, 0xd5, 0x8c, 0x1d, 0xb2, 0xa7, 0x57, 0x5f, 0x2f, 0x4f, 0xdb, 0x52, 0x99, 0x7c, 0x58, 0x01, 0x5f, 0xf2, 0xa5, 0xf6, 0x51, 0x86, 0x21, 0x2f, 0x5b, 0x8d, 0x6a, 0xae, 0x83, 0x34, 0x6d, 0x58, 0x4b, 0xef, 0xfe, 0xbf, 0x73, 0x5d, 0xdb, 0xc4, 0x97, 0x2a, 0x85, 0xf3, 0x6c, 0x46, 0x42, 0xb3, 0x90, 0xc1, 0x57, 0x97, 0x50, 0x35, 0xb1, 0x9d, 0xb7, 0xc7, 0x3c, 0x85, 0x6d, 0x6c, 0xfd, 0xce, 0xb0, 0xc9, 0xa2, 0x77, 0xee, 0xc3, 0x6b, 0x0c, 0x37, 0xfa, 0x30, 0x91, 0xd1, 0x2c, 0xb8, 0x5e, 0x7f, 0x81, 0x5f, 0x87, 0xfd, 0x18, 0x02, 0x5a, 0x30, 0x4e, 0x62, 0xbc, 0x65, 0xc6, 0xce, 0x1a, 0xcf, 0x2b, 0xaa, 0x56, 0x3e, 0x4d, 0xcf, 0xba, 0x62, 0x5f, 0x9a, 0xd0, 0x72, 0xff, 0xef, 0x28, 0xbd, 0xbe, 0xd8, 0x57, 0x3d, 0xf5, 0x57, 0x7d, 0xe9, 0x71, 0x31, 0xec, 0x98, 0x90, 0x94, 0xd9, 0x54, 0xbf, 0x84, 0x0b, 0xe3, 0x06, 0x47, 0x19, 0x9a, 0x13, 0x1d, 0xef, 0x9d, 0x13, 0xf3, 0xdb, 0xc3, 0x5c, 0x72, 0x9e, 0xed, 0x24, 0xaa, 0x64, 0xed, 0xe7, 0x0d, 0xa0, 0x7c, 0x73, 0xba, 0x9b, 0x86, 0xa7, 0x3b, 0x55, 0xab, 0x58, 0x30, 0xf1, 0x15, 0x81, 0x83, 0x2f, 0xf9, 0x62, 0x84, 0x98, 0x66, 0xf6, 0x55, 0x21, 0xd8, 0xf2, 0x25, 0x64, 0x71, 0x4b, 0x12, 0x76, 0x59, 0xc5, 0xaa, 0x93, 0x67, 0xc3, 0x86, 0x25, 0xab, 0x4e, 0x4b, 0xf6, 0xd8, 0x3f, 0x44, 0x2e, 0x11, 0xe0, 0xbd, 0x6a, 0xf2, 0x5d, 0xf5, 0xf9, 0x53, 0xea, 0xa4, 0xc8, 0xd9, 0x50, 0x33, 0x81, 0xd9, 0xa8, 0x2d, 0x91, 0x7d, 0x13, 0x2a, 0x11, 0xcf, 0xde, 0x3f, 0x0a, 0xd2, 0xbc, 0x33, 0xb2, 0x62, 0x53, 0xea, 0x77, 0x88, 0x43, 0x66, 0x27, 0x43, 0x85, 0xe9, 0x5f, 0x55, 0xf5, 0x2a, 0x8a, 0xac, 0xdf, 0xff, 0x9b, 0x4c, 0x96, 0x9c, 0xa5, 0x7a, 0xce, 0xd5, 0x79, 0x18, 0xf1, 0x0b, 0x58, 0x95, 0x7a, 0xe7, 0xd3, 0x74, 0x65, 0x0b, 0xa4, 0x64, 0x30, 0xe8, 0x5c, 0xfc, 0x55, 0x56, 0xee, 0x14, 0x14, 0xd3, 0x45, 0x3b, 0xf8, 0xde, 0x05, 0x3e, 0xb9, 0x3c, 0xd7, 0x6a, 0x52, 0x72, 0x5b, 0x39, 0x09, 0xbe, 0x82, 0x23, 0x10, 0x4a, 0xb7, 0xc3, 0xdc, 0x4c, 0x5d, 0xc9, 0xf1, 0x14, 0x83, 0xf9, 0x0b, 0x9b, 0xe9, 0x23, 0x84, 0x6a, 0xc4, 0x08, 0x3d, 0xda, 0x3d, 0x12, 0x95, 0x87, 0x18, 0xa4, 0x7d, 0x3f, 0x23, 0xde, 0xd4, 0x1e, 0xa8, 0x47, 0xc3, 0x71, 0xdb, 0xf5, 0x03, 0x6c, 0x57, 0xe7, 0xa4, 0x43, 0x82, 0x33, 0x7b, 0x62, 0x46, 0x7d, 0xf7, 0x10, 0x69, 0x18, 0x38, 0x27, 0x9a, 0x6f, 0x38, 0xac, 0xfa, 0x92, 0xc5, 0xae, 0x66, 0xa6, 0x73, 0x95, 0x15, 0x0e, 0x4c, 0x04, 0xb6, 0xfc, 0xf5, 0xc7, 0x21, 0x3a, 0x99, 0xdb, 0x0e, 0x36, 0xf0, 0x56, 0xbc, 0x75, 0xf9, 0x87, 0x9b, 0x11, 0x18, 0x92, 0x64, 0x1a, 0xe7, 0xc7, 0xab, 0x5a, 0xc7, 0x26, 0x7f, 0x13, 0x98, 0x42, 0x52, 0x43, 0xdb, 0xc8, 0x6d, 0x0b, 0xb7, 0x31, 0x93, 0x24, 0xd6, 0xe8, 0x24, 0x1f, 0x6f, 0x21, 0xa7, 0x8c, 0xeb, 0xdb, 0x83, 0xb8, 0x89, 0xe3, 0xc1, 0xd7, 0x69, 0x3b, 0x02, 0x6b, 0x54, 0x0f, 0x84, 0x2f, 0xb5, 0x5c, 0x17, 0x77, 0xbe, 0xe5, 0x61, 0x0d, 0xc5, 0xdf, 0x3b, 0xcf, 0x3e, 0x93, 0x4f, 0xf5, 0x89, 0xb9, 0x5a, 0xc5, 0x29, 0x31, 0xc0, 0xc2, 0xff, 0xe5, 0x3f, 0xa6, 0xac, 0x03, 0xca, 0xf5, 0xff, 0xe0, 0x36, 0xce, 0xf3, 0xe2, 0xb7, 0x9c, 0x02, 0xe9, 0x9e, 0xd2, 0xbc, 0x87, 0x2f, 0x3d, 0x9a, 0x1d, 0x8f, 0xc5, 0x72, 0xb8, 0xa2, 0x01, 0xd4, 0x68, 0xb1, 0x84, 0x16, 0x10, 0xf6, 0xf3, 0x52, 0x25, 0xd9, 0xdc, 0x4c, 0xdd, 0x0f, 0xd6, 0x4a, 0xcf, 0x60, 0x96, 0x7e, 0xcc, 0x42, 0x0f, 0x64, 0x9d, 0x72, 0x46, 0x04, 0x07, 0xf2, 0x5b, 0xf4, 0x07, 0xd1, 0xf4, 0x59, 0x71, }; static const uint8_t kSpakeMSmallPrecomp[15 * 2 * 32] = { 0xc8, 0xa6, 0x63, 0xc5, 0x97, 0xf1, 0xee, 0x40, 0xab, 0x62, 0x42, 0xee, 0x25, 0x6f, 0x32, 0x6c, 0x75, 0x2c, 0xa7, 0xd3, 0xbd, 0x32, 0x3b, 0x1e, 0x11, 0x9c, 0xbd, 0x04, 0xa9, 0x78, 0x6f, 0x45, 0x5a, 0xda, 0x7e, 0x4b, 0xf6, 0xdd, 0xd9, 0xad, 0xb6, 0x62, 0x6d, 0x32, 0x13, 0x1c, 0x6b, 0x5c, 0x51, 0xa1, 0xe3, 0x47, 0xa3, 0x47, 0x8f, 0x53, 0xcf, 0xcf, 0x44, 0x1b, 0x88, 0xee, 0xd1, 0x2e, 0x03, 0x89, 0xaf, 0xc0, 0x61, 0x2d, 0x9e, 0x35, 0xeb, 0x0e, 0x03, 0xe0, 0xb7, 0xfb, 0xa5, 0xbc, 0x44, 0xbe, 0x0c, 0x89, 0x0a, 0x0f, 0xd6, 0x59, 0x47, 0x9e, 0xe6, 0x3d, 0x36, 0x9d, 0xff, 0x44, 0x5e, 0xac, 0xab, 0xe5, 0x3a, 0xd5, 0xb0, 0x35, 0x9f, 0x6d, 0x7f, 0xba, 0xc0, 0x85, 0x0e, 0xf4, 0x70, 0x3f, 0x13, 0x90, 0x4c, 0x50, 0x1a, 0xee, 0xc5, 0xeb, 0x69, 0xfe, 0x98, 0x42, 0x87, 0x1d, 0xce, 0x6c, 0x29, 0xaa, 0x2b, 0x31, 0xc2, 0x38, 0x7b, 0x6b, 0xee, 0x88, 0x0b, 0xba, 0xce, 0xa8, 0xca, 0x19, 0x60, 0x1b, 0x16, 0xf1, 0x25, 0x1e, 0xcf, 0x63, 0x66, 0x1e, 0xbb, 0x63, 0xeb, 0x7d, 0xca, 0xd2, 0xb4, 0x23, 0x5a, 0x01, 0x6f, 0x05, 0xd1, 0xdc, 0x41, 0x73, 0x75, 0xc0, 0xfd, 0x30, 0x91, 0x52, 0x68, 0x96, 0x45, 0xb3, 0x66, 0x01, 0x3b, 0x53, 0x89, 0x3c, 0x69, 0xbc, 0x6c, 0x69, 0xe3, 0x51, 0x8f, 0xe3, 0xd2, 0x84, 0xd5, 0x28, 0x66, 0xb5, 0xe6, 0x06, 0x09, 0xfe, 0x6d, 0xb0, 0x72, 0x16, 0xe0, 0x8a, 0xce, 0x61, 0x65, 0xa9, 0x21, 0x32, 0x48, 0xdc, 0x7a, 0x1d, 0xe1, 0x38, 0x7f, 0x8c, 0x75, 0x88, 0x3d, 0x08, 0xa9, 0x4a, 0x6f, 0x3d, 0x9f, 0x7f, 0x3f, 0xbd, 0x57, 0x6b, 0x19, 0xce, 0x3f, 0x4a, 0xc9, 0xd3, 0xf9, 0x6e, 0x72, 0x7b, 0x5b, 0x74, 0xea, 0xbe, 0x9c, 0x7a, 0x6d, 0x9c, 0x40, 0x49, 0xe6, 0xfb, 0x2a, 0x1a, 0x75, 0x70, 0xe5, 0x4e, 0xed, 0x74, 0xe0, 0x75, 0xac, 0xc0, 0xb1, 0x11, 0x3e, 0xf2, 0xaf, 0x88, 0x4d, 0x66, 0xb6, 0xf6, 0x15, 0x4f, 0x3c, 0x6c, 0x77, 0xae, 0x47, 0x51, 0x63, 0x9a, 0xfe, 0xe1, 0xb4, 0x1a, 0x12, 0xdf, 0xe9, 0x54, 0x8d, 0x3b, 0x30, 0x2a, 0x75, 0xe3, 0xe5, 0x29, 0xb1, 0x4c, 0xb0, 0x7c, 0x6d, 0xb5, 0xae, 0x85, 0xdb, 0x1e, 0x38, 0x55, 0x96, 0xa5, 0x5b, 0x9f, 0x15, 0x23, 0x28, 0x36, 0xb8, 0xa2, 0x41, 0xb4, 0xd7, 0x19, 0x91, 0x8d, 0x26, 0x3e, 0xca, 0x9c, 0x05, 0x7a, 0x2b, 0x60, 0x45, 0x86, 0x8b, 0xee, 0x64, 0x6f, 0x5c, 0x09, 0x4d, 0x4b, 0x5a, 0x7f, 0xb0, 0xc3, 0x26, 0x9d, 0x8b, 0xb8, 0x83, 0x69, 0xcf, 0x16, 0x72, 0x62, 0x3e, 0x5e, 0x53, 0x4f, 0x9c, 0x73, 0x76, 0xfc, 0x19, 0xef, 0xa0, 0x74, 0x3a, 0x11, 0x1e, 0xd0, 0x4d, 0xb7, 0x87, 0xa1, 0xd6, 0x87, 0x6c, 0x0e, 0x6c, 0x8c, 0xe9, 0xa0, 0x44, 0xc4, 0x72, 0x3e, 0x73, 0x17, 0x13, 0xd1, 0x4e, 0x3d, 0x8e, 0x1d, 0x5a, 0x8b, 0x75, 0xcb, 0x59, 0x2c, 0x47, 0x87, 0x15, 0x41, 0xfe, 0x08, 0xe9, 0xa6, 0x97, 0x17, 0x08, 0x26, 0x6a, 0xb5, 0xbb, 0x73, 0xaa, 0xb8, 0x5b, 0x65, 0x65, 0x5b, 0x30, 0x9e, 0x62, 0x59, 0x02, 0xf8, 0xb8, 0x0f, 0x32, 0x10, 0xc1, 0x36, 0x08, 0x52, 0x98, 0x4a, 0x1e, 0xf0, 0xab, 0x21, 0x5e, 0xde, 0x16, 0x0c, 0xda, 0x09, 0x99, 0x6b, 0x9e, 0xc0, 0x90, 0xa5, 0x5a, 0xcc, 0xb0, 0xb7, 0xbb, 0xd2, 0x8b, 0x5f, 0xd3, 0x3b, 0x3e, 0x8c, 0xa5, 0x71, 0x66, 0x06, 0xe3, 0x28, 0xd4, 0xf8, 0x3f, 0xe5, 0x27, 0xdf, 0xfe, 0x0f, 0x09, 0xb2, 0x8a, 0x09, 0x5a, 0x23, 0x61, 0x0d, 0x2d, 0xf5, 0x44, 0xf1, 0x5c, 0xf8, 0x82, 0x4e, 0xdc, 0x78, 0x7a, 0xab, 0xc3, 0x57, 0x91, 0xaf, 0x65, 0x6e, 0x71, 0xf1, 0x44, 0xbf, 0xed, 0x43, 0x50, 0xb4, 0x67, 0x48, 0xef, 0x5a, 0x10, 0x46, 0x81, 0xb4, 0x0c, 0xc8, 0x48, 0xed, 0x99, 0x7a, 0x45, 0xa5, 0x92, 0xc3, 0x69, 0xd6, 0xd7, 0x8a, 0x20, 0x1b, 0xeb, 0x8f, 0xb2, 0xff, 0xec, 0x6d, 0x76, 0x04, 0xf8, 0xc2, 0x58, 0x9b, 0xf2, 0x20, 0x53, 0xc4, 0x74, 0x91, 0x19, 0xdd, 0x2d, 0x12, 0x53, 0xc7, 0x6e, 0xd0, 0x02, 0x51, 0x3c, 0xa6, 0x7d, 0x80, 0x75, 0x6b, 0x1d, 0xdf, 0xf8, 0x6a, 0x52, 0xbb, 0x81, 0xf8, 0x30, 0x45, 0xef, 0x51, 0x85, 0x36, 0xbe, 0x8e, 0xcf, 0x0b, 0x9a, 0x46, 0xe8, 0x3f, 0x99, 0xfd, 0xf7, 0xd9, 0x3e, 0x84, 0xe5, 0xe3, 0x37, 0xcf, 0x98, 0x7f, 0xeb, 0x5e, 0x5a, 0x53, 0x77, 0x1c, 0x20, 0xdc, 0xf1, 0x20, 0x99, 0xec, 0x60, 0x40, 0x93, 0xef, 0x5c, 0x1c, 0x81, 0xe2, 0xa5, 0xad, 0x2a, 0xc2, 0xdb, 0x6b, 0xc1, 0x7e, 0x8f, 0xa9, 0x23, 0x5b, 0xd9, 0x0d, 0xfe, 0xa0, 0xac, 0x11, 0x28, 0xba, 0x8e, 0x92, 0x07, 0x2d, 0x07, 0x40, 0x83, 0x14, 0x4c, 0x35, 0x8d, 0xd0, 0x11, 0xff, 0x98, 0xdb, 0x00, 0x30, 0x6f, 0x65, 0xb6, 0xa0, 0x7f, 0x9c, 0x08, 0xb8, 0xce, 0xb3, 0xa8, 0x42, 0xd3, 0x84, 0x45, 0xe1, 0xe3, 0x8f, 0xa6, 0x89, 0x21, 0xd7, 0x74, 0x02, 0x4d, 0x64, 0xdf, 0x54, 0x15, 0x9e, 0xba, 0x12, 0x49, 0x09, 0x41, 0xf6, 0x10, 0x24, 0xa1, 0x84, 0x15, 0xfd, 0x68, 0x6a, 0x57, 0x66, 0xb3, 0x6d, 0x4c, 0xea, 0xbf, 0xbc, 0x60, 0x3f, 0x52, 0x1c, 0x44, 0x1b, 0xc0, 0x4a, 0x25, 0xe3, 0xd9, 0x4c, 0x9a, 0x74, 0xad, 0xfc, 0x9e, 0x8d, 0x0b, 0x18, 0x66, 0x24, 0xd1, 0x06, 0xac, 0x68, 0xc1, 0xae, 0x14, 0xce, 0xb1, 0xf3, 0x86, 0x9f, 0x87, 0x11, 0xd7, 0x9f, 0x30, 0x92, 0xdb, 0xec, 0x0b, 0x4a, 0xe8, 0xf6, 0x53, 0x36, 0x68, 0x12, 0x11, 0x5e, 0xe0, 0x34, 0xa4, 0xff, 0x00, 0x0a, 0x26, 0xb8, 0x62, 0x79, 0x9c, 0x0c, 0xd5, 0xe5, 0xf5, 0x1c, 0x1a, 0x16, 0x84, 0x4d, 0x8e, 0x5d, 0x31, 0x7e, 0xf7, 0xe2, 0xd3, 0xa1, 0x41, 0x90, 0x61, 0x5d, 0x04, 0xb2, 0x9a, 0x18, 0x9e, 0x54, 0xfb, 0xd1, 0x61, 0x95, 0x1b, 0x08, 0xca, 0x7c, 0x49, 0x44, 0x74, 0x1d, 0x2f, 0xca, 0xc4, 0x7a, 0xe1, 0x8b, 0x2f, 0xbb, 0x96, 0xee, 0x19, 0x8a, 0x5d, 0xfb, 0x3e, 0x82, 0xe7, 0x15, 0xdb, 0x29, 0x14, 0xee, 0xc9, 0x4d, 0x9a, 0xfb, 0x9f, 0x8a, 0xbb, 0x17, 0x37, 0x1b, 0x6e, 0x28, 0x6c, 0xf9, 0xff, 0xb5, 0xb5, 0x8b, 0x9d, 0x88, 0x20, 0x08, 0x10, 0xd7, 0xca, 0x58, 0xf6, 0xe1, 0x32, 0x91, 0x6f, 0x36, 0xc0, 0xad, 0xc1, 0x57, 0x5d, 0x76, 0x31, 0x43, 0xf3, 0xdd, 0xec, 0xf1, 0xa9, 0x79, 0xe9, 0xe9, 0x85, 0xd7, 0x91, 0xc7, 0x31, 0x62, 0x3c, 0xd2, 0x90, 0x2c, 0x9c, 0xa4, 0x56, 0x37, 0x7b, 0xbe, 0x40, 0x58, 0xc0, 0x81, 0x83, 0x22, 0xe8, 0x13, 0x79, 0x18, 0xdb, 0x3a, 0x1b, 0x31, 0x0d, 0x00, 0x6c, 0x22, 0x62, 0x75, 0x70, 0xd8, 0x96, 0x59, 0x99, 0x44, 0x79, 0x71, 0xa6, 0x76, 0x81, 0x28, 0xb2, 0x65, 0xe8, 0x47, 0x14, 0xc6, 0x39, 0x06, }; SPAKE2_CTX *SPAKE2_CTX_new(enum spake2_role_t my_role, const uint8_t *my_name, size_t my_name_len, const uint8_t *their_name, size_t their_name_len) { SPAKE2_CTX *ctx = reinterpret_cast(OPENSSL_zalloc(sizeof(SPAKE2_CTX))); if (ctx == NULL) { return NULL; } ctx->my_role = my_role; CBS my_name_cbs, their_name_cbs; CBS_init(&my_name_cbs, my_name, my_name_len); CBS_init(&their_name_cbs, their_name, their_name_len); if (!CBS_stow(&my_name_cbs, &ctx->my_name, &ctx->my_name_len) || !CBS_stow(&their_name_cbs, &ctx->their_name, &ctx->their_name_len)) { SPAKE2_CTX_free(ctx); return NULL; } return ctx; } void SPAKE2_CTX_free(SPAKE2_CTX *ctx) { if (ctx == NULL) { return; } OPENSSL_free(ctx->my_name); OPENSSL_free(ctx->their_name); OPENSSL_free(ctx); } // left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian // order. static void left_shift_3(uint8_t n[32]) { uint8_t carry = 0; unsigned i; for (i = 0; i < 32; i++) { const uint8_t next_carry = n[i] >> 5; n[i] = (n[i] << 3) | carry; carry = next_carry; } } namespace { typedef struct { BN_ULONG words[32 / sizeof(BN_ULONG)]; } scalar; } // namespace // kOrder is the order of the prime-order subgroup of curve25519. static const scalar kOrder = { {TOBN(0x5812631a, 0x5cf5d3ed), TOBN(0x14def9de, 0xa2f79cd6), TOBN(0x00000000, 0x00000000), TOBN(0x10000000, 0x00000000)}}; // scalar_cmov copies |src| to |dest| if |mask| is all ones. static void scalar_cmov(scalar *dest, const scalar *src, crypto_word_t mask) { bn_select_words(dest->words, mask, src->words, dest->words, OPENSSL_ARRAY_SIZE(dest->words)); } // scalar_double sets |s| to |2×s|. static void scalar_double(scalar *s) { bn_add_words(s->words, s->words, s->words, OPENSSL_ARRAY_SIZE(s->words)); } // scalar_add sets |dest| to |dest| plus |src|. static void scalar_add(scalar *dest, const scalar *src) { bn_add_words(dest->words, dest->words, src->words, OPENSSL_ARRAY_SIZE(dest->words)); } int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *password, size_t password_len) { if (ctx->state != spake2_state_init) { return 0; } if (max_out_len < sizeof(ctx->my_msg)) { return 0; } uint8_t private_tmp[64]; RAND_bytes(private_tmp, sizeof(private_tmp)); x25519_sc_reduce(private_tmp); // Multiply by the cofactor (eight) so that we'll clear it when operating on // the peer's point later in the protocol. left_shift_3(private_tmp); OPENSSL_memcpy(ctx->private_key, private_tmp, sizeof(ctx->private_key)); ge_p3 P; x25519_ge_scalarmult_base(&P, ctx->private_key); // mask = h(password) * . uint8_t password_tmp[SHA512_DIGEST_LENGTH]; SHA512(password, password_len, password_tmp); OPENSSL_memcpy(ctx->password_hash, password_tmp, sizeof(ctx->password_hash)); x25519_sc_reduce(password_tmp); // Due to a copy-paste error, the call to |left_shift_3| was omitted after // the |x25519_sc_reduce|, just above. This meant that |ctx->password_scalar| // was not a multiple of eight to clear the cofactor and thus three bits of // the password hash would leak. In order to fix this in a unilateral way, // points of small order are added to the mask point such that it is in the // prime-order subgroup. Since the ephemeral scalar is a multiple of eight, // these points will cancel out when calculating the shared secret. // // Adding points of small order is the same as adding multiples of the prime // order to the password scalar. Since that's faster, that is what is done // below. The prime order (kOrder) is a large prime, thus odd, thus the LSB // is one. So adding it will flip the LSB. Adding twice it will flip the next // bit and so one for all the bottom three bits. scalar password_scalar; OPENSSL_memcpy(&password_scalar, password_tmp, sizeof(password_scalar)); // |password_scalar| is the result of |x25519_sc_reduce| and thus is, at // most, $l-1$ (where $l$ is |kOrder|, the order of the prime-order subgroup // of Ed25519). In the following, we may add $l + 2×l + 4×l$ for a max value // of $8×l-1$. That is < 2**256, as required. if (!ctx->disable_password_scalar_hack) { scalar order = kOrder; scalar tmp; OPENSSL_memset(&tmp, 0, sizeof(tmp)); scalar_cmov(&tmp, &order, constant_time_eq_w(password_scalar.words[0] & 1, 1)); scalar_add(&password_scalar, &tmp); scalar_double(&order); OPENSSL_memset(&tmp, 0, sizeof(tmp)); scalar_cmov(&tmp, &order, constant_time_eq_w(password_scalar.words[0] & 2, 2)); scalar_add(&password_scalar, &tmp); scalar_double(&order); OPENSSL_memset(&tmp, 0, sizeof(tmp)); scalar_cmov(&tmp, &order, constant_time_eq_w(password_scalar.words[0] & 4, 4)); scalar_add(&password_scalar, &tmp); assert((password_scalar.words[0] & 7) == 0); } OPENSSL_memcpy(ctx->password_scalar, password_scalar.words, sizeof(ctx->password_scalar)); ge_p3 mask; x25519_ge_scalarmult_small_precomp(&mask, ctx->password_scalar, ctx->my_role == spake2_role_alice ? kSpakeMSmallPrecomp : kSpakeNSmallPrecomp); // P* = P + mask. ge_cached mask_cached; x25519_ge_p3_to_cached(&mask_cached, &mask); ge_p1p1 Pstar; x25519_ge_add(&Pstar, &P, &mask_cached); // Encode P* ge_p2 Pstar_proj; x25519_ge_p1p1_to_p2(&Pstar_proj, &Pstar); x25519_ge_tobytes(ctx->my_msg, &Pstar_proj); OPENSSL_memcpy(out, ctx->my_msg, sizeof(ctx->my_msg)); *out_len = sizeof(ctx->my_msg); ctx->state = spake2_state_msg_generated; return 1; } static void update_with_length_prefix(SHA512_CTX *sha, const uint8_t *data, const size_t len) { uint8_t len_le[8]; size_t l = len; unsigned i; for (i = 0; i < 8; i++) { len_le[i] = l & 0xff; l >>= 8; } SHA512_Update(sha, len_le, sizeof(len_le)); SHA512_Update(sha, data, len); } int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len, size_t max_out_key_len, const uint8_t *their_msg, size_t their_msg_len) { if (ctx->state != spake2_state_msg_generated || their_msg_len != 32) { return 0; } ge_p3 Qstar; if (!x25519_ge_frombytes_vartime(&Qstar, their_msg)) { // Point received from peer was not on the curve. return 0; } // Unmask peer's value. ge_p3 peers_mask; x25519_ge_scalarmult_small_precomp(&peers_mask, ctx->password_scalar, ctx->my_role == spake2_role_alice ? kSpakeNSmallPrecomp : kSpakeMSmallPrecomp); ge_cached peers_mask_cached; x25519_ge_p3_to_cached(&peers_mask_cached, &peers_mask); ge_p1p1 Q_compl; ge_p3 Q_ext; x25519_ge_sub(&Q_compl, &Qstar, &peers_mask_cached); x25519_ge_p1p1_to_p3(&Q_ext, &Q_compl); ge_p2 dh_shared; x25519_ge_scalarmult(&dh_shared, ctx->private_key, &Q_ext); uint8_t dh_shared_encoded[32]; x25519_ge_tobytes(dh_shared_encoded, &dh_shared); SHA512_CTX sha; SHA512_Init(&sha); if (ctx->my_role == spake2_role_alice) { update_with_length_prefix(&sha, ctx->my_name, ctx->my_name_len); update_with_length_prefix(&sha, ctx->their_name, ctx->their_name_len); update_with_length_prefix(&sha, ctx->my_msg, sizeof(ctx->my_msg)); update_with_length_prefix(&sha, their_msg, 32); } else { update_with_length_prefix(&sha, ctx->their_name, ctx->their_name_len); update_with_length_prefix(&sha, ctx->my_name, ctx->my_name_len); update_with_length_prefix(&sha, their_msg, 32); update_with_length_prefix(&sha, ctx->my_msg, sizeof(ctx->my_msg)); } update_with_length_prefix(&sha, dh_shared_encoded, sizeof(dh_shared_encoded)); update_with_length_prefix(&sha, ctx->password_hash, sizeof(ctx->password_hash)); uint8_t key[SHA512_DIGEST_LENGTH]; SHA512_Final(key, &sha); size_t to_copy = max_out_key_len; if (to_copy > sizeof(key)) { to_copy = sizeof(key); } OPENSSL_memcpy(out_key, key, to_copy); *out_key_len = to_copy; ctx->state = spake2_state_key_generated; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/des/des.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" /* IP and FP * The problem is more of a geometric problem that random bit fiddling. 0 1 2 3 4 5 6 7 62 54 46 38 30 22 14 6 8 9 10 11 12 13 14 15 60 52 44 36 28 20 12 4 16 17 18 19 20 21 22 23 58 50 42 34 26 18 10 2 24 25 26 27 28 29 30 31 to 56 48 40 32 24 16 8 0 32 33 34 35 36 37 38 39 63 55 47 39 31 23 15 7 40 41 42 43 44 45 46 47 61 53 45 37 29 21 13 5 48 49 50 51 52 53 54 55 59 51 43 35 27 19 11 3 56 57 58 59 60 61 62 63 57 49 41 33 25 17 9 1 The output has been subject to swaps of the form 0 1 -> 3 1 but the odd and even bits have been put into 2 3 2 0 different words. The main trick is to remember that t=((l>>size)^r)&(mask); r^=t; l^=(t<> (n)) ^ (b)) & (m)); \ (b) ^= (t); \ (a) ^= ((t) << (n)); \ } while (0) #define IP(l, r) \ do { \ uint32_t tt; \ PERM_OP(r, l, tt, 4, 0x0f0f0f0fL); \ PERM_OP(l, r, tt, 16, 0x0000ffffL); \ PERM_OP(r, l, tt, 2, 0x33333333L); \ PERM_OP(l, r, tt, 8, 0x00ff00ffL); \ PERM_OP(r, l, tt, 1, 0x55555555L); \ } while (0) #define FP(l, r) \ do { \ uint32_t tt; \ PERM_OP(l, r, tt, 1, 0x55555555L); \ PERM_OP(r, l, tt, 8, 0x00ff00ffL); \ PERM_OP(l, r, tt, 2, 0x33333333L); \ PERM_OP(r, l, tt, 16, 0x0000ffffL); \ PERM_OP(l, r, tt, 4, 0x0f0f0f0fL); \ } while (0) #define LOAD_DATA(ks, R, S, u, t, E0, E1) \ do { \ (u) = (R) ^ (ks)->subkeys[S][0]; \ (t) = (R) ^ (ks)->subkeys[S][1]; \ } while (0) #define D_ENCRYPT(ks, LL, R, S) \ do { \ LOAD_DATA(ks, R, S, u, t, E0, E1); \ t = CRYPTO_rotr_u32(t, 4); \ (LL) ^= \ DES_SPtrans[0][(u >> 2L) & 0x3f] ^ DES_SPtrans[2][(u >> 10L) & 0x3f] ^ \ DES_SPtrans[4][(u >> 18L) & 0x3f] ^ \ DES_SPtrans[6][(u >> 26L) & 0x3f] ^ DES_SPtrans[1][(t >> 2L) & 0x3f] ^ \ DES_SPtrans[3][(t >> 10L) & 0x3f] ^ \ DES_SPtrans[5][(t >> 18L) & 0x3f] ^ DES_SPtrans[7][(t >> 26L) & 0x3f]; \ } while (0) #define ITERATIONS 16 #define HALF_ITERATIONS 8 static const uint32_t des_skb[8][64] = { { // for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000, 0x00000010, 0x20000000, 0x20000010, 0x00010000, 0x00010010, 0x20010000, 0x20010010, 0x00000800, 0x00000810, 0x20000800, 0x20000810, 0x00010800, 0x00010810, 0x20010800, 0x20010810, 0x00000020, 0x00000030, 0x20000020, 0x20000030, 0x00010020, 0x00010030, 0x20010020, 0x20010030, 0x00000820, 0x00000830, 0x20000820, 0x20000830, 0x00010820, 0x00010830, 0x20010820, 0x20010830, 0x00080000, 0x00080010, 0x20080000, 0x20080010, 0x00090000, 0x00090010, 0x20090000, 0x20090010, 0x00080800, 0x00080810, 0x20080800, 0x20080810, 0x00090800, 0x00090810, 0x20090800, 0x20090810, 0x00080020, 0x00080030, 0x20080020, 0x20080030, 0x00090020, 0x00090030, 0x20090020, 0x20090030, 0x00080820, 0x00080830, 0x20080820, 0x20080830, 0x00090820, 0x00090830, 0x20090820, 0x20090830, }, { // for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 0x00000000, 0x02000000, 0x00002000, 0x02002000, 0x00200000, 0x02200000, 0x00202000, 0x02202000, 0x00000004, 0x02000004, 0x00002004, 0x02002004, 0x00200004, 0x02200004, 0x00202004, 0x02202004, 0x00000400, 0x02000400, 0x00002400, 0x02002400, 0x00200400, 0x02200400, 0x00202400, 0x02202400, 0x00000404, 0x02000404, 0x00002404, 0x02002404, 0x00200404, 0x02200404, 0x00202404, 0x02202404, 0x10000000, 0x12000000, 0x10002000, 0x12002000, 0x10200000, 0x12200000, 0x10202000, 0x12202000, 0x10000004, 0x12000004, 0x10002004, 0x12002004, 0x10200004, 0x12200004, 0x10202004, 0x12202004, 0x10000400, 0x12000400, 0x10002400, 0x12002400, 0x10200400, 0x12200400, 0x10202400, 0x12202400, 0x10000404, 0x12000404, 0x10002404, 0x12002404, 0x10200404, 0x12200404, 0x10202404, 0x12202404, }, { // for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 0x00000000, 0x00000001, 0x00040000, 0x00040001, 0x01000000, 0x01000001, 0x01040000, 0x01040001, 0x00000002, 0x00000003, 0x00040002, 0x00040003, 0x01000002, 0x01000003, 0x01040002, 0x01040003, 0x00000200, 0x00000201, 0x00040200, 0x00040201, 0x01000200, 0x01000201, 0x01040200, 0x01040201, 0x00000202, 0x00000203, 0x00040202, 0x00040203, 0x01000202, 0x01000203, 0x01040202, 0x01040203, 0x08000000, 0x08000001, 0x08040000, 0x08040001, 0x09000000, 0x09000001, 0x09040000, 0x09040001, 0x08000002, 0x08000003, 0x08040002, 0x08040003, 0x09000002, 0x09000003, 0x09040002, 0x09040003, 0x08000200, 0x08000201, 0x08040200, 0x08040201, 0x09000200, 0x09000201, 0x09040200, 0x09040201, 0x08000202, 0x08000203, 0x08040202, 0x08040203, 0x09000202, 0x09000203, 0x09040202, 0x09040203, }, { // for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 0x00000000, 0x00100000, 0x00000100, 0x00100100, 0x00000008, 0x00100008, 0x00000108, 0x00100108, 0x00001000, 0x00101000, 0x00001100, 0x00101100, 0x00001008, 0x00101008, 0x00001108, 0x00101108, 0x04000000, 0x04100000, 0x04000100, 0x04100100, 0x04000008, 0x04100008, 0x04000108, 0x04100108, 0x04001000, 0x04101000, 0x04001100, 0x04101100, 0x04001008, 0x04101008, 0x04001108, 0x04101108, 0x00020000, 0x00120000, 0x00020100, 0x00120100, 0x00020008, 0x00120008, 0x00020108, 0x00120108, 0x00021000, 0x00121000, 0x00021100, 0x00121100, 0x00021008, 0x00121008, 0x00021108, 0x00121108, 0x04020000, 0x04120000, 0x04020100, 0x04120100, 0x04020008, 0x04120008, 0x04020108, 0x04120108, 0x04021000, 0x04121000, 0x04021100, 0x04121100, 0x04021008, 0x04121008, 0x04021108, 0x04121108, }, { // for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000, 0x10000000, 0x00010000, 0x10010000, 0x00000004, 0x10000004, 0x00010004, 0x10010004, 0x20000000, 0x30000000, 0x20010000, 0x30010000, 0x20000004, 0x30000004, 0x20010004, 0x30010004, 0x00100000, 0x10100000, 0x00110000, 0x10110000, 0x00100004, 0x10100004, 0x00110004, 0x10110004, 0x20100000, 0x30100000, 0x20110000, 0x30110000, 0x20100004, 0x30100004, 0x20110004, 0x30110004, 0x00001000, 0x10001000, 0x00011000, 0x10011000, 0x00001004, 0x10001004, 0x00011004, 0x10011004, 0x20001000, 0x30001000, 0x20011000, 0x30011000, 0x20001004, 0x30001004, 0x20011004, 0x30011004, 0x00101000, 0x10101000, 0x00111000, 0x10111000, 0x00101004, 0x10101004, 0x00111004, 0x10111004, 0x20101000, 0x30101000, 0x20111000, 0x30111000, 0x20101004, 0x30101004, 0x20111004, 0x30111004, }, { // for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 0x00000000, 0x08000000, 0x00000008, 0x08000008, 0x00000400, 0x08000400, 0x00000408, 0x08000408, 0x00020000, 0x08020000, 0x00020008, 0x08020008, 0x00020400, 0x08020400, 0x00020408, 0x08020408, 0x00000001, 0x08000001, 0x00000009, 0x08000009, 0x00000401, 0x08000401, 0x00000409, 0x08000409, 0x00020001, 0x08020001, 0x00020009, 0x08020009, 0x00020401, 0x08020401, 0x00020409, 0x08020409, 0x02000000, 0x0A000000, 0x02000008, 0x0A000008, 0x02000400, 0x0A000400, 0x02000408, 0x0A000408, 0x02020000, 0x0A020000, 0x02020008, 0x0A020008, 0x02020400, 0x0A020400, 0x02020408, 0x0A020408, 0x02000001, 0x0A000001, 0x02000009, 0x0A000009, 0x02000401, 0x0A000401, 0x02000409, 0x0A000409, 0x02020001, 0x0A020001, 0x02020009, 0x0A020009, 0x02020401, 0x0A020401, 0x02020409, 0x0A020409, }, { // for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 0x00000000, 0x00000100, 0x00080000, 0x00080100, 0x01000000, 0x01000100, 0x01080000, 0x01080100, 0x00000010, 0x00000110, 0x00080010, 0x00080110, 0x01000010, 0x01000110, 0x01080010, 0x01080110, 0x00200000, 0x00200100, 0x00280000, 0x00280100, 0x01200000, 0x01200100, 0x01280000, 0x01280100, 0x00200010, 0x00200110, 0x00280010, 0x00280110, 0x01200010, 0x01200110, 0x01280010, 0x01280110, 0x00000200, 0x00000300, 0x00080200, 0x00080300, 0x01000200, 0x01000300, 0x01080200, 0x01080300, 0x00000210, 0x00000310, 0x00080210, 0x00080310, 0x01000210, 0x01000310, 0x01080210, 0x01080310, 0x00200200, 0x00200300, 0x00280200, 0x00280300, 0x01200200, 0x01200300, 0x01280200, 0x01280300, 0x00200210, 0x00200310, 0x00280210, 0x00280310, 0x01200210, 0x01200310, 0x01280210, 0x01280310, }, { // for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 0x00000000, 0x04000000, 0x00040000, 0x04040000, 0x00000002, 0x04000002, 0x00040002, 0x04040002, 0x00002000, 0x04002000, 0x00042000, 0x04042000, 0x00002002, 0x04002002, 0x00042002, 0x04042002, 0x00000020, 0x04000020, 0x00040020, 0x04040020, 0x00000022, 0x04000022, 0x00040022, 0x04040022, 0x00002020, 0x04002020, 0x00042020, 0x04042020, 0x00002022, 0x04002022, 0x00042022, 0x04042022, 0x00000800, 0x04000800, 0x00040800, 0x04040800, 0x00000802, 0x04000802, 0x00040802, 0x04040802, 0x00002800, 0x04002800, 0x00042800, 0x04042800, 0x00002802, 0x04002802, 0x00042802, 0x04042802, 0x00000820, 0x04000820, 0x00040820, 0x04040820, 0x00000822, 0x04000822, 0x00040822, 0x04040822, 0x00002820, 0x04002820, 0x00042820, 0x04042820, 0x00002822, 0x04002822, 0x00042822, 0x04042822, }}; static const uint32_t DES_SPtrans[8][64] = { { // nibble 0 0x02080800, 0x00080000, 0x02000002, 0x02080802, 0x02000000, 0x00080802, 0x00080002, 0x02000002, 0x00080802, 0x02080800, 0x02080000, 0x00000802, 0x02000802, 0x02000000, 0x00000000, 0x00080002, 0x00080000, 0x00000002, 0x02000800, 0x00080800, 0x02080802, 0x02080000, 0x00000802, 0x02000800, 0x00000002, 0x00000800, 0x00080800, 0x02080002, 0x00000800, 0x02000802, 0x02080002, 0x00000000, 0x00000000, 0x02080802, 0x02000800, 0x00080002, 0x02080800, 0x00080000, 0x00000802, 0x02000800, 0x02080002, 0x00000800, 0x00080800, 0x02000002, 0x00080802, 0x00000002, 0x02000002, 0x02080000, 0x02080802, 0x00080800, 0x02080000, 0x02000802, 0x02000000, 0x00000802, 0x00080002, 0x00000000, 0x00080000, 0x02000000, 0x02000802, 0x02080800, 0x00000002, 0x02080002, 0x00000800, 0x00080802, }, { // nibble 1 0x40108010, 0x00000000, 0x00108000, 0x40100000, 0x40000010, 0x00008010, 0x40008000, 0x00108000, 0x00008000, 0x40100010, 0x00000010, 0x40008000, 0x00100010, 0x40108000, 0x40100000, 0x00000010, 0x00100000, 0x40008010, 0x40100010, 0x00008000, 0x00108010, 0x40000000, 0x00000000, 0x00100010, 0x40008010, 0x00108010, 0x40108000, 0x40000010, 0x40000000, 0x00100000, 0x00008010, 0x40108010, 0x00100010, 0x40108000, 0x40008000, 0x00108010, 0x40108010, 0x00100010, 0x40000010, 0x00000000, 0x40000000, 0x00008010, 0x00100000, 0x40100010, 0x00008000, 0x40000000, 0x00108010, 0x40008010, 0x40108000, 0x00008000, 0x00000000, 0x40000010, 0x00000010, 0x40108010, 0x00108000, 0x40100000, 0x40100010, 0x00100000, 0x00008010, 0x40008000, 0x40008010, 0x00000010, 0x40100000, 0x00108000, }, { // nibble 2 0x04000001, 0x04040100, 0x00000100, 0x04000101, 0x00040001, 0x04000000, 0x04000101, 0x00040100, 0x04000100, 0x00040000, 0x04040000, 0x00000001, 0x04040101, 0x00000101, 0x00000001, 0x04040001, 0x00000000, 0x00040001, 0x04040100, 0x00000100, 0x00000101, 0x04040101, 0x00040000, 0x04000001, 0x04040001, 0x04000100, 0x00040101, 0x04040000, 0x00040100, 0x00000000, 0x04000000, 0x00040101, 0x04040100, 0x00000100, 0x00000001, 0x00040000, 0x00000101, 0x00040001, 0x04040000, 0x04000101, 0x00000000, 0x04040100, 0x00040100, 0x04040001, 0x00040001, 0x04000000, 0x04040101, 0x00000001, 0x00040101, 0x04000001, 0x04000000, 0x04040101, 0x00040000, 0x04000100, 0x04000101, 0x00040100, 0x04000100, 0x00000000, 0x04040001, 0x00000101, 0x04000001, 0x00040101, 0x00000100, 0x04040000, }, { // nibble 3 0x00401008, 0x10001000, 0x00000008, 0x10401008, 0x00000000, 0x10400000, 0x10001008, 0x00400008, 0x10401000, 0x10000008, 0x10000000, 0x00001008, 0x10000008, 0x00401008, 0x00400000, 0x10000000, 0x10400008, 0x00401000, 0x00001000, 0x00000008, 0x00401000, 0x10001008, 0x10400000, 0x00001000, 0x00001008, 0x00000000, 0x00400008, 0x10401000, 0x10001000, 0x10400008, 0x10401008, 0x00400000, 0x10400008, 0x00001008, 0x00400000, 0x10000008, 0x00401000, 0x10001000, 0x00000008, 0x10400000, 0x10001008, 0x00000000, 0x00001000, 0x00400008, 0x00000000, 0x10400008, 0x10401000, 0x00001000, 0x10000000, 0x10401008, 0x00401008, 0x00400000, 0x10401008, 0x00000008, 0x10001000, 0x00401008, 0x00400008, 0x00401000, 0x10400000, 0x10001008, 0x00001008, 0x10000000, 0x10000008, 0x10401000, }, { // nibble 4 0x08000000, 0x00010000, 0x00000400, 0x08010420, 0x08010020, 0x08000400, 0x00010420, 0x08010000, 0x00010000, 0x00000020, 0x08000020, 0x00010400, 0x08000420, 0x08010020, 0x08010400, 0x00000000, 0x00010400, 0x08000000, 0x00010020, 0x00000420, 0x08000400, 0x00010420, 0x00000000, 0x08000020, 0x00000020, 0x08000420, 0x08010420, 0x00010020, 0x08010000, 0x00000400, 0x00000420, 0x08010400, 0x08010400, 0x08000420, 0x00010020, 0x08010000, 0x00010000, 0x00000020, 0x08000020, 0x08000400, 0x08000000, 0x00010400, 0x08010420, 0x00000000, 0x00010420, 0x08000000, 0x00000400, 0x00010020, 0x08000420, 0x00000400, 0x00000000, 0x08010420, 0x08010020, 0x08010400, 0x00000420, 0x00010000, 0x00010400, 0x08010020, 0x08000400, 0x00000420, 0x00000020, 0x00010420, 0x08010000, 0x08000020, }, { // nibble 5 0x80000040, 0x00200040, 0x00000000, 0x80202000, 0x00200040, 0x00002000, 0x80002040, 0x00200000, 0x00002040, 0x80202040, 0x00202000, 0x80000000, 0x80002000, 0x80000040, 0x80200000, 0x00202040, 0x00200000, 0x80002040, 0x80200040, 0x00000000, 0x00002000, 0x00000040, 0x80202000, 0x80200040, 0x80202040, 0x80200000, 0x80000000, 0x00002040, 0x00000040, 0x00202000, 0x00202040, 0x80002000, 0x00002040, 0x80000000, 0x80002000, 0x00202040, 0x80202000, 0x00200040, 0x00000000, 0x80002000, 0x80000000, 0x00002000, 0x80200040, 0x00200000, 0x00200040, 0x80202040, 0x00202000, 0x00000040, 0x80202040, 0x00202000, 0x00200000, 0x80002040, 0x80000040, 0x80200000, 0x00202040, 0x00000000, 0x00002000, 0x80000040, 0x80002040, 0x80202000, 0x80200000, 0x00002040, 0x00000040, 0x80200040, }, { // nibble 6 0x00004000, 0x00000200, 0x01000200, 0x01000004, 0x01004204, 0x00004004, 0x00004200, 0x00000000, 0x01000000, 0x01000204, 0x00000204, 0x01004000, 0x00000004, 0x01004200, 0x01004000, 0x00000204, 0x01000204, 0x00004000, 0x00004004, 0x01004204, 0x00000000, 0x01000200, 0x01000004, 0x00004200, 0x01004004, 0x00004204, 0x01004200, 0x00000004, 0x00004204, 0x01004004, 0x00000200, 0x01000000, 0x00004204, 0x01004000, 0x01004004, 0x00000204, 0x00004000, 0x00000200, 0x01000000, 0x01004004, 0x01000204, 0x00004204, 0x00004200, 0x00000000, 0x00000200, 0x01000004, 0x00000004, 0x01000200, 0x00000000, 0x01000204, 0x01000200, 0x00004200, 0x00000204, 0x00004000, 0x01004204, 0x01000000, 0x01004200, 0x00000004, 0x00004004, 0x01004204, 0x01000004, 0x01004200, 0x01004000, 0x00004004, }, { // nibble 7 0x20800080, 0x20820000, 0x00020080, 0x00000000, 0x20020000, 0x00800080, 0x20800000, 0x20820080, 0x00000080, 0x20000000, 0x00820000, 0x00020080, 0x00820080, 0x20020080, 0x20000080, 0x20800000, 0x00020000, 0x00820080, 0x00800080, 0x20020000, 0x20820080, 0x20000080, 0x00000000, 0x00820000, 0x20000000, 0x00800000, 0x20020080, 0x20800080, 0x00800000, 0x00020000, 0x20820000, 0x00000080, 0x00800000, 0x00020000, 0x20000080, 0x20820080, 0x00020080, 0x20000000, 0x00000000, 0x00820000, 0x20800080, 0x20020080, 0x20020000, 0x00800080, 0x20820000, 0x00000080, 0x00800080, 0x20020000, 0x20820080, 0x00800000, 0x20800000, 0x20000080, 0x00820000, 0x00020080, 0x20020080, 0x20800000, 0x00000080, 0x20820000, 0x00820080, 0x00000000, 0x20000000, 0x20800080, 0x00020000, 0x00820080, }}; #define HPERM_OP(a, t, n, m) \ ((t) = ((((a) << (16 - (n))) ^ (a)) & (m)), \ (a) = (a) ^ (t) ^ ((t) >> (16 - (n)))) void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { DES_set_key_ex(key->bytes, schedule); } void DES_set_key_ex(const uint8_t key[8], DES_key_schedule *schedule) { static const int shifts2[16] = {0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0}; uint32_t c, d, t, s, t2; const uint8_t *in; int i; in = key; c2l(in, c); c2l(in, d); // do PC1 in 47 simple operations :-) // Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) // for the inspiration. :-) PERM_OP(d, c, t, 4, 0x0f0f0f0f); HPERM_OP(c, t, -2, 0xcccc0000); HPERM_OP(d, t, -2, 0xcccc0000); PERM_OP(d, c, t, 1, 0x55555555); PERM_OP(c, d, t, 8, 0x00ff00ff); PERM_OP(d, c, t, 1, 0x55555555); d = (((d & 0x000000ff) << 16) | (d & 0x0000ff00) | ((d & 0x00ff0000) >> 16) | ((c & 0xf0000000) >> 4)); c &= 0x0fffffff; for (i = 0; i < ITERATIONS; i++) { if (shifts2[i]) { c = ((c >> 2) | (c << 26)); d = ((d >> 2) | (d << 26)); } else { c = ((c >> 1) | (c << 27)); d = ((d >> 1) | (d << 27)); } c &= 0x0fffffff; d &= 0x0fffffff; // could be a few less shifts but I am to lazy at this // point in time to investigate s = des_skb[0][(c) & 0x3f] | des_skb[1][((c >> 6) & 0x03) | ((c >> 7) & 0x3c)] | des_skb[2][((c >> 13) & 0x0f) | ((c >> 14) & 0x30)] | des_skb[3][((c >> 20) & 0x01) | ((c >> 21) & 0x06) | ((c >> 22) & 0x38)]; t = des_skb[4][(d) & 0x3f] | des_skb[5][((d >> 7) & 0x03) | ((d >> 8) & 0x3c)] | des_skb[6][(d >> 15) & 0x3f] | des_skb[7][((d >> 21) & 0x0f) | ((d >> 22) & 0x30)]; // table contained 0213 4657 t2 = ((t << 16) | (s & 0x0000ffff)) & 0xffffffff; schedule->subkeys[i][0] = CRYPTO_rotr_u32(t2, 30); t2 = ((s >> 16) | (t & 0xffff0000)); schedule->subkeys[i][1] = CRYPTO_rotr_u32(t2, 26); } } static const uint8_t kOddParity[256] = { 1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14, 16, 16, 19, 19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31, 32, 32, 35, 35, 37, 37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47, 49, 49, 50, 50, 52, 52, 55, 55, 56, 56, 59, 59, 61, 61, 62, 62, 64, 64, 67, 67, 69, 69, 70, 70, 73, 73, 74, 74, 76, 76, 79, 79, 81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91, 91, 93, 93, 94, 94, 97, 97, 98, 98, 100, 100, 103, 103, 104, 104, 107, 107, 109, 109, 110, 110, 112, 112, 115, 115, 117, 117, 118, 118, 121, 121, 122, 122, 124, 124, 127, 127, 128, 128, 131, 131, 133, 133, 134, 134, 137, 137, 138, 138, 140, 140, 143, 143, 145, 145, 146, 146, 148, 148, 151, 151, 152, 152, 155, 155, 157, 157, 158, 158, 161, 161, 162, 162, 164, 164, 167, 167, 168, 168, 171, 171, 173, 173, 174, 174, 176, 176, 179, 179, 181, 181, 182, 182, 185, 185, 186, 186, 188, 188, 191, 191, 193, 193, 194, 194, 196, 196, 199, 199, 200, 200, 203, 203, 205, 205, 206, 206, 208, 208, 211, 211, 213, 213, 214, 214, 217, 217, 218, 218, 220, 220, 223, 223, 224, 224, 227, 227, 229, 229, 230, 230, 233, 233, 234, 234, 236, 236, 239, 239, 241, 241, 242, 242, 244, 244, 247, 247, 248, 248, 251, 251, 253, 253, 254, 254 }; void DES_set_odd_parity(DES_cblock *key) { unsigned i; for (i = 0; i < DES_KEY_SZ; i++) { key->bytes[i] = kOddParity[key->bytes[i]]; } } static void DES_encrypt1(uint32_t data[2], const DES_key_schedule *ks, int enc) { uint32_t l, r, t, u; r = data[0]; l = data[1]; IP(r, l); // Things have been modified so that the initial rotate is done outside // the loop. This required the DES_SPtrans values in sp.h to be // rotated 1 bit to the right. One perl script later and things have a // 5% speed up on a sparc2. Thanks to Richard Outerbridge // <71755.204@CompuServe.COM> for pointing this out. // clear the top bits on machines with 8byte longs // shift left by 2 r = CRYPTO_rotr_u32(r, 29); l = CRYPTO_rotr_u32(l, 29); // I don't know if it is worth the effort of loop unrolling the // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); D_ENCRYPT(ks, l, r, 2); D_ENCRYPT(ks, r, l, 3); D_ENCRYPT(ks, l, r, 4); D_ENCRYPT(ks, r, l, 5); D_ENCRYPT(ks, l, r, 6); D_ENCRYPT(ks, r, l, 7); D_ENCRYPT(ks, l, r, 8); D_ENCRYPT(ks, r, l, 9); D_ENCRYPT(ks, l, r, 10); D_ENCRYPT(ks, r, l, 11); D_ENCRYPT(ks, l, r, 12); D_ENCRYPT(ks, r, l, 13); D_ENCRYPT(ks, l, r, 14); D_ENCRYPT(ks, r, l, 15); } else { D_ENCRYPT(ks, l, r, 15); D_ENCRYPT(ks, r, l, 14); D_ENCRYPT(ks, l, r, 13); D_ENCRYPT(ks, r, l, 12); D_ENCRYPT(ks, l, r, 11); D_ENCRYPT(ks, r, l, 10); D_ENCRYPT(ks, l, r, 9); D_ENCRYPT(ks, r, l, 8); D_ENCRYPT(ks, l, r, 7); D_ENCRYPT(ks, r, l, 6); D_ENCRYPT(ks, l, r, 5); D_ENCRYPT(ks, r, l, 4); D_ENCRYPT(ks, l, r, 3); D_ENCRYPT(ks, r, l, 2); D_ENCRYPT(ks, l, r, 1); D_ENCRYPT(ks, r, l, 0); } // rotate and clear the top bits on machines with 8byte longs l = CRYPTO_rotr_u32(l, 3); r = CRYPTO_rotr_u32(r, 3); FP(r, l); data[0] = l; data[1] = r; } static void DES_encrypt2(uint32_t data[2], const DES_key_schedule *ks, int enc) { uint32_t l, r, t, u; r = data[0]; l = data[1]; // Things have been modified so that the initial rotate is done outside the // loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to // the right. One perl script later and things have a 5% speed up on a // sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for // pointing this out. // clear the top bits on machines with 8byte longs r = CRYPTO_rotr_u32(r, 29); l = CRYPTO_rotr_u32(l, 29); // I don't know if it is worth the effort of loop unrolling the // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); D_ENCRYPT(ks, l, r, 2); D_ENCRYPT(ks, r, l, 3); D_ENCRYPT(ks, l, r, 4); D_ENCRYPT(ks, r, l, 5); D_ENCRYPT(ks, l, r, 6); D_ENCRYPT(ks, r, l, 7); D_ENCRYPT(ks, l, r, 8); D_ENCRYPT(ks, r, l, 9); D_ENCRYPT(ks, l, r, 10); D_ENCRYPT(ks, r, l, 11); D_ENCRYPT(ks, l, r, 12); D_ENCRYPT(ks, r, l, 13); D_ENCRYPT(ks, l, r, 14); D_ENCRYPT(ks, r, l, 15); } else { D_ENCRYPT(ks, l, r, 15); D_ENCRYPT(ks, r, l, 14); D_ENCRYPT(ks, l, r, 13); D_ENCRYPT(ks, r, l, 12); D_ENCRYPT(ks, l, r, 11); D_ENCRYPT(ks, r, l, 10); D_ENCRYPT(ks, l, r, 9); D_ENCRYPT(ks, r, l, 8); D_ENCRYPT(ks, l, r, 7); D_ENCRYPT(ks, r, l, 6); D_ENCRYPT(ks, l, r, 5); D_ENCRYPT(ks, r, l, 4); D_ENCRYPT(ks, l, r, 3); D_ENCRYPT(ks, r, l, 2); D_ENCRYPT(ks, l, r, 1); D_ENCRYPT(ks, r, l, 0); } // rotate and clear the top bits on machines with 8byte longs data[0] = CRYPTO_rotr_u32(l, 3); data[1] = CRYPTO_rotr_u32(r, 3); } void DES_encrypt3(uint32_t data[2], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3) { uint32_t l, r; l = data[0]; r = data[1]; IP(l, r); data[0] = l; data[1] = r; DES_encrypt2(data, ks1, DES_ENCRYPT); DES_encrypt2(data, ks2, DES_DECRYPT); DES_encrypt2(data, ks3, DES_ENCRYPT); l = data[0]; r = data[1]; FP(r, l); data[0] = l; data[1] = r; } void DES_decrypt3(uint32_t data[2], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3) { uint32_t l, r; l = data[0]; r = data[1]; IP(l, r); data[0] = l; data[1] = r; DES_encrypt2(data, ks3, DES_DECRYPT); DES_encrypt2(data, ks2, DES_ENCRYPT); DES_encrypt2(data, ks1, DES_DECRYPT); l = data[0]; r = data[1]; FP(r, l); data[0] = l; data[1] = r; } void DES_ecb_encrypt(const DES_cblock *in_block, DES_cblock *out_block, const DES_key_schedule *schedule, int is_encrypt) { DES_ecb_encrypt_ex(in_block->bytes, out_block->bytes, schedule, is_encrypt); } void DES_ecb_encrypt_ex(const uint8_t in[8], uint8_t out[8], const DES_key_schedule *schedule, int is_encrypt) { uint32_t ll[2]; ll[0] = CRYPTO_load_u32_le(in); ll[1] = CRYPTO_load_u32_le(in + 4); DES_encrypt1(ll, schedule, is_encrypt); CRYPTO_store_u32_le(out, ll[0]); CRYPTO_store_u32_le(out + 4, ll[1]); } void DES_ncbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *schedule, DES_cblock *ivec, int enc) { DES_ncbc_encrypt_ex(in, out, len, schedule, ivec->bytes, enc); } void DES_ncbc_encrypt_ex(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *schedule, uint8_t ivec[8], int enc) { uint32_t tin0, tin1; uint32_t tout0, tout1, xor0, xor1; uint32_t tin[2]; unsigned char *iv; iv = ivec; if (enc) { c2l(iv, tout0); c2l(iv, tout1); for (; len >= 8; len -= 8) { c2l(in, tin0); c2l(in, tin1); tin0 ^= tout0; tin[0] = tin0; tin1 ^= tout1; tin[1] = tin1; DES_encrypt1(tin, schedule, DES_ENCRYPT); tout0 = tin[0]; l2c(tout0, out); tout1 = tin[1]; l2c(tout1, out); } if (len != 0) { c2ln(in, tin0, tin1, len); tin0 ^= tout0; tin[0] = tin0; tin1 ^= tout1; tin[1] = tin1; DES_encrypt1(tin, schedule, DES_ENCRYPT); tout0 = tin[0]; l2c(tout0, out); tout1 = tin[1]; l2c(tout1, out); } iv = ivec; l2c(tout0, iv); l2c(tout1, iv); } else { c2l(iv, xor0); c2l(iv, xor1); for (; len >= 8; len -= 8) { c2l(in, tin0); tin[0] = tin0; c2l(in, tin1); tin[1] = tin1; DES_encrypt1(tin, schedule, DES_DECRYPT); tout0 = tin[0] ^ xor0; tout1 = tin[1] ^ xor1; l2c(tout0, out); l2c(tout1, out); xor0 = tin0; xor1 = tin1; } if (len != 0) { c2l(in, tin0); tin[0] = tin0; c2l(in, tin1); tin[1] = tin1; DES_encrypt1(tin, schedule, DES_DECRYPT); tout0 = tin[0] ^ xor0; tout1 = tin[1] ^ xor1; l2cn(tout0, tout1, out, len); xor0 = tin0; xor1 = tin1; } iv = ivec; l2c(xor0, iv); l2c(xor1, iv); } tin[0] = tin[1] = 0; } void DES_ecb3_encrypt(const DES_cblock *input, DES_cblock *output, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, int enc) { DES_ecb3_encrypt_ex(input->bytes, output->bytes, ks1, ks2, ks3, enc); } void DES_ecb3_encrypt_ex(const uint8_t in[8], uint8_t out[8], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, int enc) { uint32_t ll[2]; ll[0] = CRYPTO_load_u32_le(in); ll[1] = CRYPTO_load_u32_le(in + 4); if (enc) { DES_encrypt3(ll, ks1, ks2, ks3); } else { DES_decrypt3(ll, ks1, ks2, ks3); } CRYPTO_store_u32_le(out, ll[0]); CRYPTO_store_u32_le(out + 4, ll[1]); } void DES_ede3_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, DES_cblock *ivec, int enc) { DES_ede3_cbc_encrypt_ex(in, out, len, ks1, ks2, ks3, ivec->bytes, enc); } void DES_ede3_cbc_encrypt_ex(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, uint8_t ivec[8], int enc) { uint32_t tin0, tin1; uint32_t tout0, tout1, xor0, xor1; uint32_t tin[2]; uint8_t *iv; iv = ivec; if (enc) { c2l(iv, tout0); c2l(iv, tout1); for (; len >= 8; len -= 8) { c2l(in, tin0); c2l(in, tin1); tin0 ^= tout0; tin1 ^= tout1; tin[0] = tin0; tin[1] = tin1; DES_encrypt3(tin, ks1, ks2, ks3); tout0 = tin[0]; tout1 = tin[1]; l2c(tout0, out); l2c(tout1, out); } if (len != 0) { c2ln(in, tin0, tin1, len); tin0 ^= tout0; tin1 ^= tout1; tin[0] = tin0; tin[1] = tin1; DES_encrypt3(tin, ks1, ks2, ks3); tout0 = tin[0]; tout1 = tin[1]; l2c(tout0, out); l2c(tout1, out); } iv = ivec; l2c(tout0, iv); l2c(tout1, iv); } else { uint32_t t0, t1; c2l(iv, xor0); c2l(iv, xor1); for (; len >= 8; len -= 8) { c2l(in, tin0); c2l(in, tin1); t0 = tin0; t1 = tin1; tin[0] = tin0; tin[1] = tin1; DES_decrypt3(tin, ks1, ks2, ks3); tout0 = tin[0]; tout1 = tin[1]; tout0 ^= xor0; tout1 ^= xor1; l2c(tout0, out); l2c(tout1, out); xor0 = t0; xor1 = t1; } if (len != 0) { c2l(in, tin0); c2l(in, tin1); t0 = tin0; t1 = tin1; tin[0] = tin0; tin[1] = tin1; DES_decrypt3(tin, ks1, ks2, ks3); tout0 = tin[0]; tout1 = tin[1]; tout0 ^= xor0; tout1 ^= xor1; l2cn(tout0, tout1, out, len); xor0 = t0; xor1 = t1; } iv = ivec; l2c(xor0, iv); l2c(xor1, iv); } tin[0] = tin[1] = 0; } void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, DES_cblock *ivec, int enc) { DES_ede3_cbc_encrypt(in, out, len, ks1, ks2, ks1, ivec, enc); } // Deprecated functions. void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule) { DES_set_key(key, schedule); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/des/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DES_INTERNAL_H #define OPENSSL_HEADER_DES_INTERNAL_H #include #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif // TODO(davidben): Ideally these macros would be replaced with // |CRYPTO_load_u32_le| and |CRYPTO_store_u32_le|. #define c2l(c, l) \ do { \ (l) = ((uint32_t)(*((c)++))); \ (l) |= ((uint32_t)(*((c)++))) << 8L; \ (l) |= ((uint32_t)(*((c)++))) << 16L; \ (l) |= ((uint32_t)(*((c)++))) << 24L; \ } while (0) #define l2c(l, c) \ do { \ *((c)++) = (unsigned char)(((l)) & 0xff); \ *((c)++) = (unsigned char)(((l) >> 8L) & 0xff); \ *((c)++) = (unsigned char)(((l) >> 16L) & 0xff); \ *((c)++) = (unsigned char)(((l) >> 24L) & 0xff); \ } while (0) // NOTE - c is not incremented as per c2l #define c2ln(c, l1, l2, n) \ do { \ (c) += (n); \ (l1) = (l2) = 0; \ switch (n) { \ case 8: \ (l2) = ((uint32_t)(*(--(c)))) << 24L; \ [[fallthrough]]; \ case 7: \ (l2) |= ((uint32_t)(*(--(c)))) << 16L; \ [[fallthrough]]; \ case 6: \ (l2) |= ((uint32_t)(*(--(c)))) << 8L; \ [[fallthrough]]; \ case 5: \ (l2) |= ((uint32_t)(*(--(c)))); \ [[fallthrough]]; \ case 4: \ (l1) = ((uint32_t)(*(--(c)))) << 24L; \ [[fallthrough]]; \ case 3: \ (l1) |= ((uint32_t)(*(--(c)))) << 16L; \ [[fallthrough]]; \ case 2: \ (l1) |= ((uint32_t)(*(--(c)))) << 8L; \ [[fallthrough]]; \ case 1: \ (l1) |= ((uint32_t)(*(--(c)))); \ } \ } while (0) // NOTE - c is not incremented as per l2c #define l2cn(l1, l2, c, n) \ do { \ (c) += (n); \ switch (n) { \ case 8: \ *(--(c)) = (unsigned char)(((l2) >> 24L) & 0xff); \ [[fallthrough]]; \ case 7: \ *(--(c)) = (unsigned char)(((l2) >> 16L) & 0xff); \ [[fallthrough]]; \ case 6: \ *(--(c)) = (unsigned char)(((l2) >> 8L) & 0xff); \ [[fallthrough]]; \ case 5: \ *(--(c)) = (unsigned char)(((l2)) & 0xff); \ [[fallthrough]]; \ case 4: \ *(--(c)) = (unsigned char)(((l1) >> 24L) & 0xff); \ [[fallthrough]]; \ case 3: \ *(--(c)) = (unsigned char)(((l1) >> 16L) & 0xff); \ [[fallthrough]]; \ case 2: \ *(--(c)) = (unsigned char)(((l1) >> 8L) & 0xff); \ [[fallthrough]]; \ case 1: \ *(--(c)) = (unsigned char)(((l1)) & 0xff); \ } \ } while (0) // Correctly-typed versions of DES functions. // // See https://crbug.com/boringssl/683. void DES_set_key_ex(const uint8_t key[8], DES_key_schedule *schedule); void DES_ecb_encrypt_ex(const uint8_t in[8], uint8_t out[8], const DES_key_schedule *schedule, int is_encrypt); void DES_ncbc_encrypt_ex(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *schedule, uint8_t ivec[8], int enc); void DES_ecb3_encrypt_ex(const uint8_t input[8], uint8_t output[8], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, int enc); void DES_ede3_cbc_encrypt_ex(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, uint8_t ivec[8], int enc); // Private functions. // // These functions are only exported for use in |decrepit|. OPENSSL_EXPORT void DES_decrypt3(uint32_t data[2], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3); OPENSSL_EXPORT void DES_encrypt3(uint32_t data[2], const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_DES_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/dh/dh_asn1.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../fipsmodule/dh/internal.h" static int parse_integer(CBS *cbs, BIGNUM **out) { assert(*out == NULL); *out = BN_new(); if (*out == NULL) { return 0; } return BN_parse_asn1_unsigned(cbs, *out); } static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { // A DH object may be missing some components. OPENSSL_PUT_ERROR(DH, ERR_R_PASSED_NULL_PARAMETER); return 0; } return BN_marshal_asn1(cbb, bn); } DH *DH_parse_parameters(CBS *cbs) { DH *ret = DH_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !parse_integer(&child, &ret->p) || !parse_integer(&child, &ret->g)) { goto err; } uint64_t priv_length; if (CBS_len(&child) != 0) { if (!CBS_get_asn1_uint64(&child, &priv_length) || priv_length > UINT_MAX) { goto err; } ret->priv_length = (unsigned)priv_length; } if (CBS_len(&child) != 0) { goto err; } if (!dh_check_params_fast(ret)) { goto err; } return ret; err: OPENSSL_PUT_ERROR(DH, DH_R_DECODE_ERROR); DH_free(ret); return NULL; } int DH_marshal_parameters(CBB *cbb, const DH *dh) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !marshal_integer(&child, dh->p) || !marshal_integer(&child, dh->g) || (dh->priv_length != 0 && !CBB_add_asn1_uint64(&child, dh->priv_length)) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(DH, DH_R_ENCODE_ERROR); return 0; } return 1; } DH *d2i_DHparams(DH **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); DH *ret = DH_parse_parameters(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { DH_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_DHparams(const DH *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !DH_marshal_parameters(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/dh/params.cc ================================================ /* * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/dh/internal.h" static BIGNUM *get_params(BIGNUM *ret, const BN_ULONG *words, size_t num_words) { BIGNUM *alloc = NULL; if (ret == NULL) { alloc = BN_new(); if (alloc == NULL) { return NULL; } ret = alloc; } if (!bn_set_words(ret, words, num_words)) { BN_free(alloc); return NULL; } return ret; } BIGNUM *BN_get_rfc3526_prime_1536(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0xf1746c08, 0xca237327), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } BIGNUM *BN_get_rfc3526_prime_2048(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0x15728e5a, 0x8aacaa68), TOBN(0x15d22618, 0x98fa0510), TOBN(0x3995497c, 0xea956ae5), TOBN(0xde2bcbf6, 0x95581718), TOBN(0xb5c55df0, 0x6f4c52c9), TOBN(0x9b2783a2, 0xec07a28f), TOBN(0xe39e772c, 0x180e8603), TOBN(0x32905e46, 0x2e36ce3b), TOBN(0xf1746c08, 0xca18217c), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } BIGNUM *BN_get_rfc3526_prime_3072(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0x4b82d120, 0xa93ad2ca), TOBN(0x43db5bfc, 0xe0fd108e), TOBN(0x08e24fa0, 0x74e5ab31), TOBN(0x770988c0, 0xbad946e2), TOBN(0xbbe11757, 0x7a615d6c), TOBN(0x521f2b18, 0x177b200c), TOBN(0xd8760273, 0x3ec86a64), TOBN(0xf12ffa06, 0xd98a0864), TOBN(0xcee3d226, 0x1ad2ee6b), TOBN(0x1e8c94e0, 0x4a25619d), TOBN(0xabf5ae8c, 0xdb0933d7), TOBN(0xb3970f85, 0xa6e1e4c7), TOBN(0x8aea7157, 0x5d060c7d), TOBN(0xecfb8504, 0x58dbef0a), TOBN(0xa85521ab, 0xdf1cba64), TOBN(0xad33170d, 0x04507a33), TOBN(0x15728e5a, 0x8aaac42d), TOBN(0x15d22618, 0x98fa0510), TOBN(0x3995497c, 0xea956ae5), TOBN(0xde2bcbf6, 0x95581718), TOBN(0xb5c55df0, 0x6f4c52c9), TOBN(0x9b2783a2, 0xec07a28f), TOBN(0xe39e772c, 0x180e8603), TOBN(0x32905e46, 0x2e36ce3b), TOBN(0xf1746c08, 0xca18217c), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } BIGNUM *BN_get_rfc3526_prime_4096(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0x4df435c9, 0x34063199), TOBN(0x86ffb7dc, 0x90a6c08f), TOBN(0x93b4ea98, 0x8d8fddc1), TOBN(0xd0069127, 0xd5b05aa9), TOBN(0xb81bdd76, 0x2170481c), TOBN(0x1f612970, 0xcee2d7af), TOBN(0x233ba186, 0x515be7ed), TOBN(0x99b2964f, 0xa090c3a2), TOBN(0x287c5947, 0x4e6bc05d), TOBN(0x2e8efc14, 0x1fbecaa6), TOBN(0xdbbbc2db, 0x04de8ef9), TOBN(0x2583e9ca, 0x2ad44ce8), TOBN(0x1a946834, 0xb6150bda), TOBN(0x99c32718, 0x6af4e23c), TOBN(0x88719a10, 0xbdba5b26), TOBN(0x1a723c12, 0xa787e6d7), TOBN(0x4b82d120, 0xa9210801), TOBN(0x43db5bfc, 0xe0fd108e), TOBN(0x08e24fa0, 0x74e5ab31), TOBN(0x770988c0, 0xbad946e2), TOBN(0xbbe11757, 0x7a615d6c), TOBN(0x521f2b18, 0x177b200c), TOBN(0xd8760273, 0x3ec86a64), TOBN(0xf12ffa06, 0xd98a0864), TOBN(0xcee3d226, 0x1ad2ee6b), TOBN(0x1e8c94e0, 0x4a25619d), TOBN(0xabf5ae8c, 0xdb0933d7), TOBN(0xb3970f85, 0xa6e1e4c7), TOBN(0x8aea7157, 0x5d060c7d), TOBN(0xecfb8504, 0x58dbef0a), TOBN(0xa85521ab, 0xdf1cba64), TOBN(0xad33170d, 0x04507a33), TOBN(0x15728e5a, 0x8aaac42d), TOBN(0x15d22618, 0x98fa0510), TOBN(0x3995497c, 0xea956ae5), TOBN(0xde2bcbf6, 0x95581718), TOBN(0xb5c55df0, 0x6f4c52c9), TOBN(0x9b2783a2, 0xec07a28f), TOBN(0xe39e772c, 0x180e8603), TOBN(0x32905e46, 0x2e36ce3b), TOBN(0xf1746c08, 0xca18217c), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } BIGNUM *BN_get_rfc3526_prime_6144(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0xe694f91e, 0x6dcc4024), TOBN(0x12bf2d5b, 0x0b7474d6), TOBN(0x043e8f66, 0x3f4860ee), TOBN(0x387fe8d7, 0x6e3c0468), TOBN(0xda56c9ec, 0x2ef29632), TOBN(0xeb19ccb1, 0xa313d55c), TOBN(0xf550aa3d, 0x8a1fbff0), TOBN(0x06a1d58b, 0xb7c5da76), TOBN(0xa79715ee, 0xf29be328), TOBN(0x14cc5ed2, 0x0f8037e0), TOBN(0xcc8f6d7e, 0xbf48e1d8), TOBN(0x4bd407b2, 0x2b4154aa), TOBN(0x0f1d45b7, 0xff585ac5), TOBN(0x23a97a7e, 0x36cc88be), TOBN(0x59e7c97f, 0xbec7e8f3), TOBN(0xb5a84031, 0x900b1c9e), TOBN(0xd55e702f, 0x46980c82), TOBN(0xf482d7ce, 0x6e74fef6), TOBN(0xf032ea15, 0xd1721d03), TOBN(0x5983ca01, 0xc64b92ec), TOBN(0x6fb8f401, 0x378cd2bf), TOBN(0x33205151, 0x2bd7af42), TOBN(0xdb7f1447, 0xe6cc254b), TOBN(0x44ce6cba, 0xced4bb1b), TOBN(0xda3edbeb, 0xcf9b14ed), TOBN(0x179727b0, 0x865a8918), TOBN(0xb06a53ed, 0x9027d831), TOBN(0xe5db382f, 0x413001ae), TOBN(0xf8ff9406, 0xad9e530e), TOBN(0xc9751e76, 0x3dba37bd), TOBN(0xc1d4dcb2, 0x602646de), TOBN(0x36c3fab4, 0xd27c7026), TOBN(0x4df435c9, 0x34028492), TOBN(0x86ffb7dc, 0x90a6c08f), TOBN(0x93b4ea98, 0x8d8fddc1), TOBN(0xd0069127, 0xd5b05aa9), TOBN(0xb81bdd76, 0x2170481c), TOBN(0x1f612970, 0xcee2d7af), TOBN(0x233ba186, 0x515be7ed), TOBN(0x99b2964f, 0xa090c3a2), TOBN(0x287c5947, 0x4e6bc05d), TOBN(0x2e8efc14, 0x1fbecaa6), TOBN(0xdbbbc2db, 0x04de8ef9), TOBN(0x2583e9ca, 0x2ad44ce8), TOBN(0x1a946834, 0xb6150bda), TOBN(0x99c32718, 0x6af4e23c), TOBN(0x88719a10, 0xbdba5b26), TOBN(0x1a723c12, 0xa787e6d7), TOBN(0x4b82d120, 0xa9210801), TOBN(0x43db5bfc, 0xe0fd108e), TOBN(0x08e24fa0, 0x74e5ab31), TOBN(0x770988c0, 0xbad946e2), TOBN(0xbbe11757, 0x7a615d6c), TOBN(0x521f2b18, 0x177b200c), TOBN(0xd8760273, 0x3ec86a64), TOBN(0xf12ffa06, 0xd98a0864), TOBN(0xcee3d226, 0x1ad2ee6b), TOBN(0x1e8c94e0, 0x4a25619d), TOBN(0xabf5ae8c, 0xdb0933d7), TOBN(0xb3970f85, 0xa6e1e4c7), TOBN(0x8aea7157, 0x5d060c7d), TOBN(0xecfb8504, 0x58dbef0a), TOBN(0xa85521ab, 0xdf1cba64), TOBN(0xad33170d, 0x04507a33), TOBN(0x15728e5a, 0x8aaac42d), TOBN(0x15d22618, 0x98fa0510), TOBN(0x3995497c, 0xea956ae5), TOBN(0xde2bcbf6, 0x95581718), TOBN(0xb5c55df0, 0x6f4c52c9), TOBN(0x9b2783a2, 0xec07a28f), TOBN(0xe39e772c, 0x180e8603), TOBN(0x32905e46, 0x2e36ce3b), TOBN(0xf1746c08, 0xca18217c), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } BIGNUM *BN_get_rfc3526_prime_8192(BIGNUM *ret) { static const BN_ULONG kWords[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0x60c980dd, 0x98edd3df), TOBN(0xc81f56e8, 0x80b96e71), TOBN(0x9e3050e2, 0x765694df), TOBN(0x9558e447, 0x5677e9aa), TOBN(0xc9190da6, 0xfc026e47), TOBN(0x889a002e, 0xd5ee382b), TOBN(0x4009438b, 0x481c6cd7), TOBN(0x359046f4, 0xeb879f92), TOBN(0xfaf36bc3, 0x1ecfa268), TOBN(0xb1d510bd, 0x7ee74d73), TOBN(0xf9ab4819, 0x5ded7ea1), TOBN(0x64f31cc5, 0x0846851d), TOBN(0x4597e899, 0xa0255dc1), TOBN(0xdf310ee0, 0x74ab6a36), TOBN(0x6d2a13f8, 0x3f44f82d), TOBN(0x062b3cf5, 0xb3a278a6), TOBN(0x79683303, 0xed5bdd3a), TOBN(0xfa9d4b7f, 0xa2c087e8), TOBN(0x4bcbc886, 0x2f8385dd), TOBN(0x3473fc64, 0x6cea306b), TOBN(0x13eb57a8, 0x1a23f0c7), TOBN(0x22222e04, 0xa4037c07), TOBN(0xe3fdb8be, 0xfc848ad9), TOBN(0x238f16cb, 0xe39d652d), TOBN(0x3423b474, 0x2bf1c978), TOBN(0x3aab639c, 0x5ae4f568), TOBN(0x2576f693, 0x6ba42466), TOBN(0x741fa7bf, 0x8afc47ed), TOBN(0x3bc832b6, 0x8d9dd300), TOBN(0xd8bec4d0, 0x73b931ba), TOBN(0x38777cb6, 0xa932df8c), TOBN(0x74a3926f, 0x12fee5e4), TOBN(0xe694f91e, 0x6dbe1159), TOBN(0x12bf2d5b, 0x0b7474d6), TOBN(0x043e8f66, 0x3f4860ee), TOBN(0x387fe8d7, 0x6e3c0468), TOBN(0xda56c9ec, 0x2ef29632), TOBN(0xeb19ccb1, 0xa313d55c), TOBN(0xf550aa3d, 0x8a1fbff0), TOBN(0x06a1d58b, 0xb7c5da76), TOBN(0xa79715ee, 0xf29be328), TOBN(0x14cc5ed2, 0x0f8037e0), TOBN(0xcc8f6d7e, 0xbf48e1d8), TOBN(0x4bd407b2, 0x2b4154aa), TOBN(0x0f1d45b7, 0xff585ac5), TOBN(0x23a97a7e, 0x36cc88be), TOBN(0x59e7c97f, 0xbec7e8f3), TOBN(0xb5a84031, 0x900b1c9e), TOBN(0xd55e702f, 0x46980c82), TOBN(0xf482d7ce, 0x6e74fef6), TOBN(0xf032ea15, 0xd1721d03), TOBN(0x5983ca01, 0xc64b92ec), TOBN(0x6fb8f401, 0x378cd2bf), TOBN(0x33205151, 0x2bd7af42), TOBN(0xdb7f1447, 0xe6cc254b), TOBN(0x44ce6cba, 0xced4bb1b), TOBN(0xda3edbeb, 0xcf9b14ed), TOBN(0x179727b0, 0x865a8918), TOBN(0xb06a53ed, 0x9027d831), TOBN(0xe5db382f, 0x413001ae), TOBN(0xf8ff9406, 0xad9e530e), TOBN(0xc9751e76, 0x3dba37bd), TOBN(0xc1d4dcb2, 0x602646de), TOBN(0x36c3fab4, 0xd27c7026), TOBN(0x4df435c9, 0x34028492), TOBN(0x86ffb7dc, 0x90a6c08f), TOBN(0x93b4ea98, 0x8d8fddc1), TOBN(0xd0069127, 0xd5b05aa9), TOBN(0xb81bdd76, 0x2170481c), TOBN(0x1f612970, 0xcee2d7af), TOBN(0x233ba186, 0x515be7ed), TOBN(0x99b2964f, 0xa090c3a2), TOBN(0x287c5947, 0x4e6bc05d), TOBN(0x2e8efc14, 0x1fbecaa6), TOBN(0xdbbbc2db, 0x04de8ef9), TOBN(0x2583e9ca, 0x2ad44ce8), TOBN(0x1a946834, 0xb6150bda), TOBN(0x99c32718, 0x6af4e23c), TOBN(0x88719a10, 0xbdba5b26), TOBN(0x1a723c12, 0xa787e6d7), TOBN(0x4b82d120, 0xa9210801), TOBN(0x43db5bfc, 0xe0fd108e), TOBN(0x08e24fa0, 0x74e5ab31), TOBN(0x770988c0, 0xbad946e2), TOBN(0xbbe11757, 0x7a615d6c), TOBN(0x521f2b18, 0x177b200c), TOBN(0xd8760273, 0x3ec86a64), TOBN(0xf12ffa06, 0xd98a0864), TOBN(0xcee3d226, 0x1ad2ee6b), TOBN(0x1e8c94e0, 0x4a25619d), TOBN(0xabf5ae8c, 0xdb0933d7), TOBN(0xb3970f85, 0xa6e1e4c7), TOBN(0x8aea7157, 0x5d060c7d), TOBN(0xecfb8504, 0x58dbef0a), TOBN(0xa85521ab, 0xdf1cba64), TOBN(0xad33170d, 0x04507a33), TOBN(0x15728e5a, 0x8aaac42d), TOBN(0x15d22618, 0x98fa0510), TOBN(0x3995497c, 0xea956ae5), TOBN(0xde2bcbf6, 0x95581718), TOBN(0xb5c55df0, 0x6f4c52c9), TOBN(0x9b2783a2, 0xec07a28f), TOBN(0xe39e772c, 0x180e8603), TOBN(0x32905e46, 0x2e36ce3b), TOBN(0xf1746c08, 0xca18217c), TOBN(0x670c354e, 0x4abc9804), TOBN(0x9ed52907, 0x7096966d), TOBN(0x1c62f356, 0x208552bb), TOBN(0x83655d23, 0xdca3ad96), TOBN(0x69163fa8, 0xfd24cf5f), TOBN(0x98da4836, 0x1c55d39a), TOBN(0xc2007cb8, 0xa163bf05), TOBN(0x49286651, 0xece45b3d), TOBN(0xae9f2411, 0x7c4b1fe6), TOBN(0xee386bfb, 0x5a899fa5), TOBN(0x0bff5cb6, 0xf406b7ed), TOBN(0xf44c42e9, 0xa637ed6b), TOBN(0xe485b576, 0x625e7ec6), TOBN(0x4fe1356d, 0x6d51c245), TOBN(0x302b0a6d, 0xf25f1437), TOBN(0xef9519b3, 0xcd3a431b), TOBN(0x514a0879, 0x8e3404dd), TOBN(0x020bbea6, 0x3b139b22), TOBN(0x29024e08, 0x8a67cc74), TOBN(0xc4c6628b, 0x80dc1cd1), TOBN(0xc90fdaa2, 0x2168c234), TOBN(0xffffffff, 0xffffffff), }; return get_params(ret, kWords, OPENSSL_ARRAY_SIZE(kWords)); } int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb) { // We generate DH parameters as follows // find a prime q which is prime_bits/2 bits long. // p=(2*q)+1 or (p-1)/2 = q // For this case, g is a generator if // g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1. // Since the factors of p-1 are q and 2, we just need to check // g^2 mod p != 1 and g^q mod p != 1. // // Having said all that, // there is another special case method for the generators 2, 3 and 5. // for 2, p mod 24 == 11 // for 3, p mod 12 == 5 <<<<< does not work for safe primes. // for 5, p mod 10 == 3 or 7 // // Thanks to Phil Karn for the pointers about the // special generators and for answering some of my questions. // // I've implemented the second simple method :-). // Since DH should be using a safe prime (both p and q are prime), // this generator function can take a very very long time to run. // Actually there is no reason to insist that 'generator' be a generator. // It's just as OK (and in some sense better) to use a generator of the // order-q subgroup. if (prime_bits <= 0 || prime_bits > OPENSSL_DH_MAX_MODULUS_BITS) { OPENSSL_PUT_ERROR(DH, DH_R_MODULUS_TOO_LARGE); return 0; } BIGNUM *t1, *t2; int g, ok = 0; BN_CTX *ctx = NULL; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); t1 = BN_CTX_get(ctx); t2 = BN_CTX_get(ctx); if (t1 == NULL || t2 == NULL) { goto err; } // Make sure |dh| has the necessary elements if (dh->p == NULL) { dh->p = BN_new(); if (dh->p == NULL) { goto err; } } if (dh->g == NULL) { dh->g = BN_new(); if (dh->g == NULL) { goto err; } } if (generator <= 1) { OPENSSL_PUT_ERROR(DH, DH_R_BAD_GENERATOR); goto err; } if (generator == DH_GENERATOR_2) { if (!BN_set_word(t1, 24)) { goto err; } if (!BN_set_word(t2, 11)) { goto err; } g = 2; } else if (generator == DH_GENERATOR_5) { if (!BN_set_word(t1, 10)) { goto err; } if (!BN_set_word(t2, 3)) { goto err; } // BN_set_word(t3,7); just have to miss // out on these ones :-( g = 5; } else { // in the general case, don't worry if 'generator' is a // generator or not: since we are using safe primes, // it will generate either an order-q or an order-2q group, // which both is OK if (!BN_set_word(t1, 2)) { goto err; } if (!BN_set_word(t2, 1)) { goto err; } g = generator; } if (!BN_generate_prime_ex(dh->p, prime_bits, 1, t1, t2, cb)) { goto err; } if (!BN_GENCB_call(cb, 3, 0)) { goto err; } if (!BN_set_word(dh->g, g)) { goto err; } ok = 1; err: if (!ok) { OPENSSL_PUT_ERROR(DH, ERR_R_BN_LIB); } if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } return ok; } static int int_dh_bn_cpy(BIGNUM **dst, const BIGNUM *src) { BIGNUM *a = NULL; if (src) { a = BN_dup(src); if (!a) { return 0; } } BN_free(*dst); *dst = a; return 1; } static int int_dh_param_copy(DH *to, const DH *from, int is_x942) { if (is_x942 == -1) { is_x942 = !!from->q; } if (!int_dh_bn_cpy(&to->p, from->p) || !int_dh_bn_cpy(&to->g, from->g)) { return 0; } if (!is_x942) { return 1; } if (!int_dh_bn_cpy(&to->q, from->q)) { return 0; } return 1; } DH *DHparams_dup(const DH *dh) { DH *ret = DH_new(); if (!ret) { return NULL; } if (!int_dh_param_copy(ret, dh, -1)) { DH_free(ret); return NULL; } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/digest/digest_extra.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../fipsmodule/digest/internal.h" #include "../internal.h" struct nid_to_digest { int nid; const EVP_MD *(*md_func)(void); const char *short_name; const char *long_name; }; static const struct nid_to_digest nid_to_digest_mapping[] = { {NID_md4, EVP_md4, SN_md4, LN_md4}, {NID_md5, EVP_md5, SN_md5, LN_md5}, {NID_sha1, EVP_sha1, SN_sha1, LN_sha1}, {NID_sha224, EVP_sha224, SN_sha224, LN_sha224}, {NID_sha256, EVP_sha256, SN_sha256, LN_sha256}, {NID_sha384, EVP_sha384, SN_sha384, LN_sha384}, {NID_sha512, EVP_sha512, SN_sha512, LN_sha512}, {NID_sha512_256, EVP_sha512_256, SN_sha512_256, LN_sha512_256}, {NID_md5_sha1, EVP_md5_sha1, SN_md5_sha1, LN_md5_sha1}, // As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding // hash function when given a signature OID. To avoid unintended lax parsing // of hash OIDs, this is no longer supported for lookup by OID or NID. // Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to // consumers so we retain it there. {NID_undef, EVP_sha1, SN_dsaWithSHA, LN_dsaWithSHA}, {NID_undef, EVP_sha1, SN_dsaWithSHA1, LN_dsaWithSHA1}, {NID_undef, EVP_sha1, SN_ecdsa_with_SHA1, NULL}, {NID_undef, EVP_md5, SN_md5WithRSAEncryption, LN_md5WithRSAEncryption}, {NID_undef, EVP_sha1, SN_sha1WithRSAEncryption, LN_sha1WithRSAEncryption}, {NID_undef, EVP_sha224, SN_sha224WithRSAEncryption, LN_sha224WithRSAEncryption}, {NID_undef, EVP_sha256, SN_sha256WithRSAEncryption, LN_sha256WithRSAEncryption}, {NID_undef, EVP_sha384, SN_sha384WithRSAEncryption, LN_sha384WithRSAEncryption}, {NID_undef, EVP_sha512, SN_sha512WithRSAEncryption, LN_sha512WithRSAEncryption}, }; const EVP_MD *EVP_get_digestbynid(int nid) { if (nid == NID_undef) { // Skip the |NID_undef| entries in |nid_to_digest_mapping|. return NULL; } for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { if (nid_to_digest_mapping[i].nid == nid) { return nid_to_digest_mapping[i].md_func(); } } return NULL; } static const struct { uint8_t oid[9]; uint8_t oid_len; int nid; } kMDOIDs[] = { // 1.2.840.113549.2.4 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04}, 8, NID_md4}, // 1.2.840.113549.2.5 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05}, 8, NID_md5}, // 1.3.14.3.2.26 {{0x2b, 0x0e, 0x03, 0x02, 0x1a}, 5, NID_sha1}, // 2.16.840.1.101.3.4.2.1 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}, 9, NID_sha256}, // 2.16.840.1.101.3.4.2.2 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}, 9, NID_sha384}, // 2.16.840.1.101.3.4.2.3 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03}, 9, NID_sha512}, // 2.16.840.1.101.3.4.2.4 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04}, 9, NID_sha224}, }; static const EVP_MD *cbs_to_md(const CBS *cbs) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMDOIDs); i++) { if (CBS_len(cbs) == kMDOIDs[i].oid_len && OPENSSL_memcmp(CBS_data(cbs), kMDOIDs[i].oid, kMDOIDs[i].oid_len) == 0) { return EVP_get_digestbynid(kMDOIDs[i].nid); } } return NULL; } const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) { // Handle objects with no corresponding OID. Note we don't use |OBJ_obj2nid| // here to avoid pulling in the OID table. if (obj->nid != NID_undef) { return EVP_get_digestbynid(obj->nid); } CBS cbs; CBS_init(&cbs, OBJ_get0_data(obj), OBJ_length(obj)); return cbs_to_md(&cbs); } const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs) { CBS algorithm, oid; if (!CBS_get_asn1(cbs, &algorithm, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&algorithm, &oid, CBS_ASN1_OBJECT)) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_DECODE_ERROR); return NULL; } const EVP_MD *ret = cbs_to_md(&oid); if (ret == NULL) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_UNKNOWN_HASH); return NULL; } // The parameters, if present, must be NULL. Historically, whether the NULL // was included or omitted was not well-specified. When parsing an // AlgorithmIdentifier, we allow both. (Note this code is not used when // verifying RSASSA-PKCS1-v1_5 signatures.) if (CBS_len(&algorithm) > 0) { CBS param; if (!CBS_get_asn1(&algorithm, ¶m, CBS_ASN1_NULL) || CBS_len(¶m) != 0 || // CBS_len(&algorithm) != 0) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_DECODE_ERROR); return NULL; } } return ret; } int EVP_marshal_digest_algorithm(CBB *cbb, const EVP_MD *md) { CBB algorithm, oid, null; if (!CBB_add_asn1(cbb, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT)) { return 0; } int found = 0; int nid = EVP_MD_type(md); for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMDOIDs); i++) { if (nid == kMDOIDs[i].nid) { if (!CBB_add_bytes(&oid, kMDOIDs[i].oid, kMDOIDs[i].oid_len)) { return 0; } found = 1; break; } } if (!found) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_UNKNOWN_HASH); return 0; } // TODO(crbug.com/boringssl/710): Is this correct? See RFC 4055, section 2.1. if (!CBB_add_asn1(&algorithm, &null, CBS_ASN1_NULL) || // !CBB_flush(cbb)) { return 0; } return 1; } const EVP_MD *EVP_get_digestbyname(const char *name) { for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { const char *short_name = nid_to_digest_mapping[i].short_name; const char *long_name = nid_to_digest_mapping[i].long_name; if ((short_name && strcmp(short_name, name) == 0) || (long_name && strcmp(long_name, name) == 0)) { return nid_to_digest_mapping[i].md_func(); } } return NULL; } static void blake2b256_init(EVP_MD_CTX *ctx) { BLAKE2B256_Init(reinterpret_cast(ctx->md_data)); } static void blake2b256_update(EVP_MD_CTX *ctx, const void *data, size_t len) { BLAKE2B256_Update(reinterpret_cast(ctx->md_data), data, len); } static void blake2b256_final(EVP_MD_CTX *ctx, uint8_t *md) { BLAKE2B256_Final(md, reinterpret_cast(ctx->md_data)); } static const EVP_MD evp_md_blake2b256 = { NID_undef, BLAKE2B256_DIGEST_LENGTH, 0, blake2b256_init, blake2b256_update, blake2b256_final, BLAKE2B_CBLOCK, sizeof(BLAKE2B_CTX), }; const EVP_MD *EVP_blake2b256(void) { return &evp_md_blake2b256; } static void md4_init(EVP_MD_CTX *ctx) { BSSL_CHECK(MD4_Init(reinterpret_cast(ctx->md_data))); } static void md4_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BSSL_CHECK( MD4_Update(reinterpret_cast(ctx->md_data), data, count)); } static void md4_final(EVP_MD_CTX *ctx, uint8_t *out) { BSSL_CHECK(MD4_Final(out, reinterpret_cast(ctx->md_data))); } static const EVP_MD evp_md_md4 = { NID_md4, // MD4_DIGEST_LENGTH, // 0, md4_init, md4_update, md4_final, 64, sizeof(MD4_CTX), }; const EVP_MD *EVP_md4(void) { return &evp_md_md4; } static void md5_init(EVP_MD_CTX *ctx) { BSSL_CHECK(MD5_Init(reinterpret_cast(ctx->md_data))); } static void md5_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BSSL_CHECK( MD5_Update(reinterpret_cast(ctx->md_data), data, count)); } static void md5_final(EVP_MD_CTX *ctx, uint8_t *out) { BSSL_CHECK(MD5_Final(out, reinterpret_cast(ctx->md_data))); } static const EVP_MD evp_md_md5 = { NID_md5, MD5_DIGEST_LENGTH, 0, md5_init, md5_update, md5_final, 64, sizeof(MD5_CTX), }; const EVP_MD *EVP_md5(void) { return &evp_md_md5; } typedef struct { MD5_CTX md5; SHA_CTX sha1; } MD5_SHA1_CTX; static void md5_sha1_init(EVP_MD_CTX *md_ctx) { MD5_SHA1_CTX *ctx = reinterpret_cast(md_ctx->md_data); BSSL_CHECK(MD5_Init(&ctx->md5) && SHA1_Init(&ctx->sha1)); } static void md5_sha1_update(EVP_MD_CTX *md_ctx, const void *data, size_t count) { MD5_SHA1_CTX *ctx = reinterpret_cast(md_ctx->md_data); BSSL_CHECK(MD5_Update(&ctx->md5, data, count) && SHA1_Update(&ctx->sha1, data, count)); } static void md5_sha1_final(EVP_MD_CTX *md_ctx, uint8_t *out) { MD5_SHA1_CTX *ctx = reinterpret_cast(md_ctx->md_data); BSSL_CHECK(MD5_Final(out, &ctx->md5) && SHA1_Final(out + MD5_DIGEST_LENGTH, &ctx->sha1)); } const EVP_MD evp_md_md5_sha1 = { NID_md5_sha1, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, 0, md5_sha1_init, md5_sha1_update, md5_sha1_final, 64, sizeof(MD5_SHA1_CTX), }; const EVP_MD *EVP_md5_sha1(void) { return &evp_md_md5_sha1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/dsa/dsa.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/dh/internal.h" #include "../internal.h" #include "internal.h" // Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of // Miller-Rabin. #define DSS_prime_checks 50 static int dsa_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, BIGNUM **out_r); static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; DSA *DSA_new(void) { DSA *dsa = reinterpret_cast(OPENSSL_zalloc(sizeof(DSA))); if (dsa == NULL) { return NULL; } dsa->references = 1; CRYPTO_MUTEX_init(&dsa->method_mont_lock); CRYPTO_new_ex_data(&dsa->ex_data); return dsa; } void DSA_free(DSA *dsa) { if (dsa == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&dsa->references)) { return; } CRYPTO_free_ex_data(&g_ex_data_class, dsa, &dsa->ex_data); BN_clear_free(dsa->p); BN_clear_free(dsa->q); BN_clear_free(dsa->g); BN_clear_free(dsa->pub_key); BN_clear_free(dsa->priv_key); BN_MONT_CTX_free(dsa->method_mont_p); BN_MONT_CTX_free(dsa->method_mont_q); CRYPTO_MUTEX_cleanup(&dsa->method_mont_lock); OPENSSL_free(dsa); } int DSA_up_ref(DSA *dsa) { CRYPTO_refcount_inc(&dsa->references); return 1; } unsigned DSA_bits(const DSA *dsa) { return BN_num_bits(dsa->p); } const BIGNUM *DSA_get0_pub_key(const DSA *dsa) { return dsa->pub_key; } const BIGNUM *DSA_get0_priv_key(const DSA *dsa) { return dsa->priv_key; } const BIGNUM *DSA_get0_p(const DSA *dsa) { return dsa->p; } const BIGNUM *DSA_get0_q(const DSA *dsa) { return dsa->q; } const BIGNUM *DSA_get0_g(const DSA *dsa) { return dsa->g; } void DSA_get0_key(const DSA *dsa, const BIGNUM **out_pub_key, const BIGNUM **out_priv_key) { if (out_pub_key != NULL) { *out_pub_key = dsa->pub_key; } if (out_priv_key != NULL) { *out_priv_key = dsa->priv_key; } } void DSA_get0_pqg(const DSA *dsa, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g) { if (out_p != NULL) { *out_p = dsa->p; } if (out_q != NULL) { *out_q = dsa->q; } if (out_g != NULL) { *out_g = dsa->g; } } int DSA_set0_key(DSA *dsa, BIGNUM *pub_key, BIGNUM *priv_key) { if (dsa->pub_key == NULL && pub_key == NULL) { return 0; } if (pub_key != NULL) { BN_free(dsa->pub_key); dsa->pub_key = pub_key; } if (priv_key != NULL) { BN_free(dsa->priv_key); dsa->priv_key = priv_key; } return 1; } int DSA_set0_pqg(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g) { if ((dsa->p == NULL && p == NULL) || (dsa->q == NULL && q == NULL) || (dsa->g == NULL && g == NULL)) { return 0; } if (p != NULL) { BN_free(dsa->p); dsa->p = p; } if (q != NULL) { BN_free(dsa->q); dsa->q = q; } if (g != NULL) { BN_free(dsa->g); dsa->g = g; } BN_MONT_CTX_free(dsa->method_mont_p); dsa->method_mont_p = NULL; BN_MONT_CTX_free(dsa->method_mont_q); dsa->method_mont_q = NULL; return 1; } int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, size_t seed_len, int *out_counter, unsigned long *out_h, BN_GENCB *cb) { if (bits > OPENSSL_DSA_MAX_MODULUS_BITS) { OPENSSL_PUT_ERROR(DSA, DSA_R_INVALID_PARAMETERS); return 0; } int ok = 0; unsigned char seed[SHA256_DIGEST_LENGTH]; unsigned char md[SHA256_DIGEST_LENGTH]; unsigned char buf[SHA256_DIGEST_LENGTH], buf2[SHA256_DIGEST_LENGTH]; BIGNUM *r0, *W, *X, *c, *test; BIGNUM *g = NULL, *q = NULL, *p = NULL; BN_MONT_CTX *mont = NULL; int k, n = 0, m = 0; int counter = 0; int r = 0; BN_CTX *ctx = NULL; unsigned int h = 2; const EVP_MD *evpmd; evpmd = (bits >= 2048) ? EVP_sha256() : EVP_sha1(); size_t qsize = EVP_MD_size(evpmd); if (bits < 512) { bits = 512; } bits = (bits + 63) / 64 * 64; if (seed_in != NULL) { if (seed_len < qsize) { return 0; } if (seed_len > qsize) { // Only consume as much seed as is expected. seed_len = qsize; } OPENSSL_memcpy(seed, seed_in, seed_len); } ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); r0 = BN_CTX_get(ctx); g = BN_CTX_get(ctx); W = BN_CTX_get(ctx); q = BN_CTX_get(ctx); X = BN_CTX_get(ctx); c = BN_CTX_get(ctx); p = BN_CTX_get(ctx); test = BN_CTX_get(ctx); if (test == NULL || !BN_lshift(test, BN_value_one(), bits - 1)) { goto err; } for (;;) { // Find q. for (;;) { // step 1 if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, m++)) { goto err; } int use_random_seed = (seed_in == NULL); if (use_random_seed) { if (!RAND_bytes(seed, qsize)) { goto err; } // DSA parameters are public. CONSTTIME_DECLASSIFY(seed, qsize); } else { // If we come back through, use random seed next time. seed_in = NULL; } OPENSSL_memcpy(buf, seed, qsize); OPENSSL_memcpy(buf2, seed, qsize); // precompute "SEED + 1" for step 7: for (size_t i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { break; } } // step 2 if (!EVP_Digest(seed, qsize, md, NULL, evpmd, NULL) || !EVP_Digest(buf, qsize, buf2, NULL, evpmd, NULL)) { goto err; } for (size_t i = 0; i < qsize; i++) { md[i] ^= buf2[i]; } // step 3 md[0] |= 0x80; md[qsize - 1] |= 0x01; if (!BN_bin2bn(md, qsize, q)) { goto err; } // step 4 r = BN_is_prime_fasttest_ex(q, DSS_prime_checks, ctx, use_random_seed, cb); if (r > 0) { break; } if (r != 0) { goto err; } // do a callback call // step 5 } if (!BN_GENCB_call(cb, 2, 0) || !BN_GENCB_call(cb, 3, 0)) { goto err; } // step 6 counter = 0; // "offset = 2" n = (bits - 1) / 160; for (;;) { if ((counter != 0) && !BN_GENCB_call(cb, BN_GENCB_GENERATED, counter)) { goto err; } // step 7 BN_zero(W); // now 'buf' contains "SEED + offset - 1" for (k = 0; k <= n; k++) { // obtain "SEED + offset + k" by incrementing: for (size_t i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { break; } } if (!EVP_Digest(buf, qsize, md, NULL, evpmd, NULL)) { goto err; } // step 8 if (!BN_bin2bn(md, qsize, r0) || !BN_lshift(r0, r0, (qsize << 3) * k) || !BN_add(W, W, r0)) { goto err; } } // more of step 8 if (!BN_mask_bits(W, bits - 1) || !BN_copy(X, W) || !BN_add(X, X, test)) { goto err; } // step 9 if (!BN_lshift1(r0, q) || !BN_mod(c, X, r0, ctx) || !BN_sub(r0, c, BN_value_one()) || !BN_sub(p, X, r0)) { goto err; } // step 10 if (BN_cmp(p, test) >= 0) { // step 11 r = BN_is_prime_fasttest_ex(p, DSS_prime_checks, ctx, 1, cb); if (r > 0) { goto end; // found it } if (r != 0) { goto err; } } // step 13 counter++; // "offset = offset + n + 1" // step 14 if (counter >= 4096) { break; } } } end: if (!BN_GENCB_call(cb, 2, 1)) { goto err; } // We now need to generate g // Set r0=(p-1)/q if (!BN_sub(test, p, BN_value_one()) || !BN_div(r0, NULL, test, q, ctx)) { goto err; } mont = BN_MONT_CTX_new_for_modulus(p, ctx); if (mont == NULL || !BN_set_word(test, h)) { goto err; } for (;;) { // g=test^r0%p if (!BN_mod_exp_mont(g, test, r0, p, ctx, mont)) { goto err; } if (!BN_is_one(g)) { break; } if (!BN_add(test, test, BN_value_one())) { goto err; } h++; } if (!BN_GENCB_call(cb, 3, 1)) { goto err; } ok = 1; err: if (ok) { BN_free(dsa->p); BN_free(dsa->q); BN_free(dsa->g); dsa->p = BN_dup(p); dsa->q = BN_dup(q); dsa->g = BN_dup(g); if (dsa->p == NULL || dsa->q == NULL || dsa->g == NULL) { ok = 0; goto err; } if (out_counter != NULL) { *out_counter = counter; } if (out_h != NULL) { *out_h = h; } } if (ctx) { BN_CTX_end(ctx); BN_CTX_free(ctx); } BN_MONT_CTX_free(mont); return ok; } DSA *DSAparams_dup(const DSA *dsa) { DSA *ret = DSA_new(); if (ret == NULL) { return NULL; } ret->p = BN_dup(dsa->p); ret->q = BN_dup(dsa->q); ret->g = BN_dup(dsa->g); if (ret->p == NULL || ret->q == NULL || ret->g == NULL) { DSA_free(ret); return NULL; } return ret; } int DSA_generate_key(DSA *dsa) { if (!dsa_check_key(dsa)) { return 0; } int ok = 0; BIGNUM *pub_key = NULL, *priv_key = NULL; BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } priv_key = dsa->priv_key; if (priv_key == NULL) { priv_key = BN_new(); if (priv_key == NULL) { goto err; } } if (!BN_rand_range_ex(priv_key, 1, dsa->q)) { goto err; } pub_key = dsa->pub_key; if (pub_key == NULL) { pub_key = BN_new(); if (pub_key == NULL) { goto err; } } if (!BN_MONT_CTX_set_locked(&dsa->method_mont_p, &dsa->method_mont_lock, dsa->p, ctx) || !BN_mod_exp_mont_consttime(pub_key, dsa->g, priv_key, dsa->p, ctx, dsa->method_mont_p)) { goto err; } // The public key is computed from the private key, but is public. bn_declassify(pub_key); dsa->priv_key = priv_key; dsa->pub_key = pub_key; ok = 1; err: if (dsa->pub_key == NULL) { BN_free(pub_key); } if (dsa->priv_key == NULL) { BN_free(priv_key); } BN_CTX_free(ctx); return ok; } DSA_SIG *DSA_SIG_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(DSA_SIG))); } void DSA_SIG_free(DSA_SIG *sig) { if (!sig) { return; } BN_free(sig->r); BN_free(sig->s); OPENSSL_free(sig); } void DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **out_r, const BIGNUM **out_s) { if (out_r != NULL) { *out_r = sig->r; } if (out_s != NULL) { *out_s = sig->s; } } int DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s) { if (r == NULL || s == NULL) { return 0; } BN_free(sig->r); BN_free(sig->s); sig->r = r; sig->s = s; return 1; } // mod_mul_consttime sets |r| to |a| * |b| modulo |mont->N|, treating |a| and // |b| as secret. This function internally uses Montgomery reduction, but // neither inputs nor outputs are in Montgomery form. static int mod_mul_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); // |BN_mod_mul_montgomery| removes a factor of R, so we cancel it with a // single |BN_to_montgomery| which adds one factor of R. int ok = tmp != NULL && BN_to_montgomery(tmp, a, mont, ctx) && BN_mod_mul_montgomery(r, tmp, b, mont, ctx); BN_CTX_end(ctx); return ok; } DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, const DSA *dsa) { if (!dsa_check_key(dsa)) { return NULL; } if (dsa->priv_key == NULL) { OPENSSL_PUT_ERROR(DSA, DSA_R_MISSING_PARAMETERS); return NULL; } BIGNUM *kinv = NULL, *r = NULL, *s = NULL; BIGNUM m; BIGNUM xr; BN_CTX *ctx = NULL; DSA_SIG *ret = NULL; BN_init(&m); BN_init(&xr); s = BN_new(); { if (s == NULL) { goto err; } ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } // Cap iterations so that invalid parameters do not infinite loop. This does // not impact valid parameters because the probability of requiring even one // retry is negligible, let alone 32. Unfortunately, DSA was mis-specified, // so invalid parameters are reachable from most callers handling untrusted // private keys. (The |dsa_check_key| call above is not sufficient. Checking // whether arbitrary paremeters form a valid DSA group is expensive.) static const int kMaxIterations = 32; int iters = 0; redo: if (!dsa_sign_setup(dsa, ctx, &kinv, &r)) { goto err; } if (digest_len > BN_num_bytes(dsa->q)) { // If the digest length is greater than the size of |dsa->q| use the // BN_num_bits(dsa->q) leftmost bits of the digest, see FIPS 186-3, 4.2. // Note the above check that |dsa->q| is a multiple of 8 bits. digest_len = BN_num_bytes(dsa->q); } if (BN_bin2bn(digest, digest_len, &m) == NULL) { goto err; } // |m| is bounded by 2^(num_bits(q)), which is slightly looser than q. This // violates |bn_mod_add_consttime| and |mod_mul_consttime|'s preconditions. // (The underlying algorithms could accept looser bounds, but we reduce for // simplicity.) size_t q_width = bn_minimal_width(dsa->q); if (!bn_resize_words(&m, q_width) || !bn_resize_words(&xr, q_width)) { goto err; } bn_reduce_once_in_place(m.d, 0 /* no carry word */, dsa->q->d, xr.d /* scratch space */, q_width); // Compute s = inv(k) (m + xr) mod q. Note |dsa->method_mont_q| is // initialized by |dsa_sign_setup|. if (!mod_mul_consttime(&xr, dsa->priv_key, r, dsa->method_mont_q, ctx) || !bn_mod_add_consttime(s, &xr, &m, dsa->q, ctx) || !mod_mul_consttime(s, s, kinv, dsa->method_mont_q, ctx)) { goto err; } // The signature is computed from the private key, but is public. bn_declassify(r); bn_declassify(s); // Redo if r or s is zero as required by FIPS 186-3: this is // very unlikely. if (BN_is_zero(r) || BN_is_zero(s)) { iters++; if (iters > kMaxIterations) { OPENSSL_PUT_ERROR(DSA, DSA_R_TOO_MANY_ITERATIONS); goto err; } goto redo; } ret = DSA_SIG_new(); if (ret == NULL) { goto err; } ret->r = r; ret->s = s; } err: if (ret == NULL) { OPENSSL_PUT_ERROR(DSA, ERR_R_BN_LIB); BN_free(r); BN_free(s); } BN_CTX_free(ctx); BN_clear_free(&m); BN_clear_free(&xr); BN_clear_free(kinv); return ret; } int DSA_do_verify(const uint8_t *digest, size_t digest_len, const DSA_SIG *sig, const DSA *dsa) { int valid; if (!DSA_do_check_signature(&valid, digest, digest_len, sig, dsa)) { return -1; } return valid; } int DSA_do_check_signature(int *out_valid, const uint8_t *digest, size_t digest_len, const DSA_SIG *sig, const DSA *dsa) { *out_valid = 0; if (!dsa_check_key(dsa)) { return 0; } if (dsa->pub_key == NULL) { OPENSSL_PUT_ERROR(DSA, DSA_R_MISSING_PARAMETERS); return 0; } int ret = 0; BIGNUM u1, u2, t1; BN_init(&u1); BN_init(&u2); BN_init(&t1); BN_CTX *ctx = BN_CTX_new(); { if (ctx == NULL) { goto err; } if (BN_is_zero(sig->r) || BN_is_negative(sig->r) || BN_ucmp(sig->r, dsa->q) >= 0) { ret = 1; goto err; } if (BN_is_zero(sig->s) || BN_is_negative(sig->s) || BN_ucmp(sig->s, dsa->q) >= 0) { ret = 1; goto err; } // Calculate W = inv(S) mod Q // save W in u2 if (BN_mod_inverse(&u2, sig->s, dsa->q, ctx) == NULL) { goto err; } // save M in u1 unsigned q_bits = BN_num_bits(dsa->q); if (digest_len > (q_bits >> 3)) { // if the digest length is greater than the size of q use the // BN_num_bits(dsa->q) leftmost bits of the digest, see // fips 186-3, 4.2 digest_len = (q_bits >> 3); } if (BN_bin2bn(digest, digest_len, &u1) == NULL) { goto err; } // u1 = M * w mod q if (!BN_mod_mul(&u1, &u1, &u2, dsa->q, ctx)) { goto err; } // u2 = r * w mod q if (!BN_mod_mul(&u2, sig->r, &u2, dsa->q, ctx)) { goto err; } if (!BN_MONT_CTX_set_locked((BN_MONT_CTX **)&dsa->method_mont_p, (CRYPTO_MUTEX *)&dsa->method_mont_lock, dsa->p, ctx)) { goto err; } if (!BN_mod_exp2_mont(&t1, dsa->g, &u1, dsa->pub_key, &u2, dsa->p, ctx, dsa->method_mont_p)) { goto err; } // BN_copy(&u1,&t1); // let u1 = u1 mod q if (!BN_mod(&u1, &t1, dsa->q, ctx)) { goto err; } // V is now in u1. If the signature is correct, it will be // equal to R. *out_valid = BN_ucmp(&u1, sig->r) == 0; ret = 1; } err: if (ret != 1) { OPENSSL_PUT_ERROR(DSA, ERR_R_BN_LIB); } BN_CTX_free(ctx); BN_free(&u1); BN_free(&u2); BN_free(&t1); return ret; } int DSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *out_sig, unsigned int *out_siglen, const DSA *dsa) { DSA_SIG *s; s = DSA_do_sign(digest, digest_len, dsa); if (s == NULL) { *out_siglen = 0; return 0; } *out_siglen = i2d_DSA_SIG(s, &out_sig); DSA_SIG_free(s); return 1; } int DSA_verify(int type, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const DSA *dsa) { int valid; if (!DSA_check_signature(&valid, digest, digest_len, sig, sig_len, dsa)) { return -1; } return valid; } int DSA_check_signature(int *out_valid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const DSA *dsa) { DSA_SIG *s = NULL; int ret = 0; uint8_t *der = NULL; s = DSA_SIG_new(); { if (s == NULL) { goto err; } const uint8_t *sigp = sig; if (d2i_DSA_SIG(&s, &sigp, sig_len) == NULL || sigp != sig + sig_len) { goto err; } // Ensure that the signature uses DER and doesn't have trailing garbage. int der_len = i2d_DSA_SIG(s, &der); if (der_len < 0 || (size_t)der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len)) { goto err; } ret = DSA_do_check_signature(out_valid, digest, digest_len, s, dsa); } err: OPENSSL_free(der); DSA_SIG_free(s); return ret; } // der_len_len returns the number of bytes needed to represent a length of |len| // in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; } size_t ret = 1; while (len > 0) { ret++; len >>= 8; } return ret; } int DSA_size(const DSA *dsa) { if (dsa->q == NULL) { return 0; } size_t order_len = BN_num_bytes(dsa->q); // Compute the maximum length of an |order_len| byte integer. Defensively // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } // A DSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; } return ret; } static int dsa_sign_setup(const DSA *dsa, BN_CTX *ctx, BIGNUM **out_kinv, BIGNUM **out_r) { int ret = 0; BIGNUM k; BN_init(&k); BIGNUM *r = BN_new(); BIGNUM *kinv = BN_new(); if (r == NULL || kinv == NULL || // Get random k !BN_rand_range_ex(&k, 1, dsa->q) || !BN_MONT_CTX_set_locked((BN_MONT_CTX **)&dsa->method_mont_p, (CRYPTO_MUTEX *)&dsa->method_mont_lock, dsa->p, ctx) || !BN_MONT_CTX_set_locked((BN_MONT_CTX **)&dsa->method_mont_q, (CRYPTO_MUTEX *)&dsa->method_mont_lock, dsa->q, ctx) || // Compute r = (g^k mod p) mod q !BN_mod_exp_mont_consttime(r, dsa->g, &k, dsa->p, ctx, dsa->method_mont_p)) { OPENSSL_PUT_ERROR(DSA, ERR_R_BN_LIB); goto err; } // Note |BN_mod| below is not constant-time and may leak information about // |r|. |dsa->p| may be significantly larger than |dsa->q|, so this is not // easily performed in constant-time with Montgomery reduction. // // However, |r| at this point is g^k (mod p). It is almost the value of |r| // revealed in the signature anyway (g^k (mod p) (mod q)), going from it to // |k| would require computing a discrete log. bn_declassify(r); if (!BN_mod(r, r, dsa->q, ctx) || // Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little // Theorem. !bn_mod_inverse_prime(kinv, &k, dsa->q, ctx, dsa->method_mont_q)) { OPENSSL_PUT_ERROR(DSA, ERR_R_BN_LIB); goto err; } BN_clear_free(*out_kinv); *out_kinv = kinv; kinv = NULL; BN_clear_free(*out_r); *out_r = r; r = NULL; ret = 1; err: BN_clear_free(&k); BN_clear_free(r); BN_clear_free(kinv); return ret; } int DSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int DSA_set_ex_data(DSA *dsa, int idx, void *arg) { return CRYPTO_set_ex_data(&dsa->ex_data, idx, arg); } void *DSA_get_ex_data(const DSA *dsa, int idx) { return CRYPTO_get_ex_data(&dsa->ex_data, idx); } DH *DSA_dup_DH(const DSA *dsa) { if (dsa == NULL) { return NULL; } DH *ret = DH_new(); if (ret == NULL) { goto err; } if (dsa->q != NULL) { ret->priv_length = BN_num_bits(dsa->q); if ((ret->q = BN_dup(dsa->q)) == NULL) { goto err; } } if ((dsa->p != NULL && (ret->p = BN_dup(dsa->p)) == NULL) || (dsa->g != NULL && (ret->g = BN_dup(dsa->g)) == NULL) || (dsa->pub_key != NULL && (ret->pub_key = BN_dup(dsa->pub_key)) == NULL) || (dsa->priv_key != NULL && (ret->priv_key = BN_dup(dsa->priv_key)) == NULL)) { goto err; } return ret; err: DH_free(ret); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/dsa/dsa_asn1.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "internal.h" #include "../bytestring/internal.h" // This function is in dsa_asn1.c rather than dsa.c because it is reachable from // |EVP_PKEY| parsers. This makes it easier for the static linker to drop most // of the DSA implementation. int dsa_check_key(const DSA *dsa) { if (!dsa->p || !dsa->q || !dsa->g) { OPENSSL_PUT_ERROR(DSA, DSA_R_MISSING_PARAMETERS); return 0; } // Fully checking for invalid DSA groups is expensive, so security and // correctness of the signature scheme depend on how |dsa| was computed. I.e. // we leave "assurance of domain parameter validity" from FIPS 186-4 to the // caller. However, we check bounds on all values to avoid DoS vectors even // when domain parameters are invalid. In particular, signing will infinite // loop if |g| is zero. if (BN_is_negative(dsa->p) || BN_is_negative(dsa->q) || BN_is_zero(dsa->p) || BN_is_zero(dsa->q) || !BN_is_odd(dsa->p) || !BN_is_odd(dsa->q) || // |q| must be a prime divisor of |p - 1|, which implies |q < p|. BN_cmp(dsa->q, dsa->p) >= 0 || // |g| is in the multiplicative group of |p|. BN_is_negative(dsa->g) || BN_is_zero(dsa->g) || BN_cmp(dsa->g, dsa->p) >= 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_INVALID_PARAMETERS); return 0; } // FIPS 186-4 allows only three different sizes for q. unsigned q_bits = BN_num_bits(dsa->q); if (q_bits != 160 && q_bits != 224 && q_bits != 256) { OPENSSL_PUT_ERROR(DSA, DSA_R_BAD_Q_VALUE); return 0; } // Bound |dsa->p| to avoid a DoS vector. Note this limit is much larger than // the one in FIPS 186-4, which only allows L = 1024, 2048, and 3072. if (BN_num_bits(dsa->p) > OPENSSL_DSA_MAX_MODULUS_BITS) { OPENSSL_PUT_ERROR(DSA, DSA_R_MODULUS_TOO_LARGE); return 0; } if (dsa->pub_key != NULL) { // The public key is also in the multiplicative group of |p|. if (BN_is_negative(dsa->pub_key) || BN_is_zero(dsa->pub_key) || BN_cmp(dsa->pub_key, dsa->p) >= 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_INVALID_PARAMETERS); return 0; } } if (dsa->priv_key != NULL) { // The private key is a non-zero element of the scalar field, determined by // |q|. if (BN_is_negative(dsa->priv_key) || constant_time_declassify_int(BN_is_zero(dsa->priv_key)) || constant_time_declassify_int(BN_cmp(dsa->priv_key, dsa->q) >= 0)) { OPENSSL_PUT_ERROR(DSA, DSA_R_INVALID_PARAMETERS); return 0; } } return 1; } static int parse_integer(CBS *cbs, BIGNUM **out) { assert(*out == NULL); *out = BN_new(); if (*out == NULL) { return 0; } return BN_parse_asn1_unsigned(cbs, *out); } static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { // A DSA object may be missing some components. OPENSSL_PUT_ERROR(DSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } return BN_marshal_asn1(cbb, bn); } DSA_SIG *DSA_SIG_parse(CBS *cbs) { DSA_SIG *ret = DSA_SIG_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !parse_integer(&child, &ret->r) || !parse_integer(&child, &ret->s) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_DECODE_ERROR); DSA_SIG_free(ret); return NULL; } return ret; } int DSA_SIG_marshal(CBB *cbb, const DSA_SIG *sig) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !marshal_integer(&child, sig->r) || !marshal_integer(&child, sig->s) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(DSA, DSA_R_ENCODE_ERROR); return 0; } return 1; } DSA *DSA_parse_public_key(CBS *cbs) { DSA *ret = DSA_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !parse_integer(&child, &ret->pub_key) || !parse_integer(&child, &ret->p) || !parse_integer(&child, &ret->q) || !parse_integer(&child, &ret->g) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_DECODE_ERROR); goto err; } if (!dsa_check_key(ret)) { goto err; } return ret; err: DSA_free(ret); return NULL; } int DSA_marshal_public_key(CBB *cbb, const DSA *dsa) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !marshal_integer(&child, dsa->pub_key) || !marshal_integer(&child, dsa->p) || !marshal_integer(&child, dsa->q) || !marshal_integer(&child, dsa->g) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(DSA, DSA_R_ENCODE_ERROR); return 0; } return 1; } DSA *DSA_parse_parameters(CBS *cbs) { DSA *ret = DSA_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !parse_integer(&child, &ret->p) || !parse_integer(&child, &ret->q) || !parse_integer(&child, &ret->g) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_DECODE_ERROR); goto err; } if (!dsa_check_key(ret)) { goto err; } return ret; err: DSA_free(ret); return NULL; } int DSA_marshal_parameters(CBB *cbb, const DSA *dsa) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !marshal_integer(&child, dsa->p) || !marshal_integer(&child, dsa->q) || !marshal_integer(&child, dsa->g) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(DSA, DSA_R_ENCODE_ERROR); return 0; } return 1; } DSA *DSA_parse_private_key(CBS *cbs) { DSA *ret = DSA_new(); if (ret == NULL) { return NULL; } CBS child; uint64_t version; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&child, &version)) { OPENSSL_PUT_ERROR(DSA, DSA_R_DECODE_ERROR); goto err; } if (version != 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_BAD_VERSION); goto err; } if (!parse_integer(&child, &ret->p) || !parse_integer(&child, &ret->q) || !parse_integer(&child, &ret->g) || !parse_integer(&child, &ret->pub_key) || !parse_integer(&child, &ret->priv_key) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(DSA, DSA_R_DECODE_ERROR); goto err; } if (!dsa_check_key(ret)) { goto err; } return ret; err: DSA_free(ret); return NULL; } int DSA_marshal_private_key(CBB *cbb, const DSA *dsa) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&child, 0 /* version */) || !marshal_integer(&child, dsa->p) || !marshal_integer(&child, dsa->q) || !marshal_integer(&child, dsa->g) || !marshal_integer(&child, dsa->pub_key) || !marshal_integer(&child, dsa->priv_key) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(DSA, DSA_R_ENCODE_ERROR); return 0; } return 1; } DSA_SIG *d2i_DSA_SIG(DSA_SIG **out_sig, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); DSA_SIG *ret = DSA_SIG_parse(&cbs); if (ret == NULL) { return NULL; } if (out_sig != NULL) { DSA_SIG_free(*out_sig); *out_sig = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_DSA_SIG(const DSA_SIG *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !DSA_SIG_marshal(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } DSA *d2i_DSAPublicKey(DSA **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); DSA *ret = DSA_parse_public_key(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { DSA_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_DSAPublicKey(const DSA *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !DSA_marshal_public_key(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } DSA *d2i_DSAPrivateKey(DSA **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); DSA *ret = DSA_parse_private_key(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { DSA_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_DSAPrivateKey(const DSA *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !DSA_marshal_private_key(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } DSA *d2i_DSAparams(DSA **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); DSA *ret = DSA_parse_parameters(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { DSA_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_DSAparams(const DSA *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !DSA_marshal_parameters(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/dsa/internal.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_DSA_INTERNAL_H #define OPENSSL_HEADER_DSA_INTERNAL_H #include #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif struct dsa_st { BIGNUM *p; BIGNUM *q; BIGNUM *g; BIGNUM *pub_key; BIGNUM *priv_key; // Normally used to cache montgomery values CRYPTO_MUTEX method_mont_lock; BN_MONT_CTX *method_mont_p; BN_MONT_CTX *method_mont_q; CRYPTO_refcount_t references; CRYPTO_EX_DATA ex_data; }; // dsa_check_key performs cheap self-checks on |dsa|, and ensures it is within // DoS bounds. It returns one on success and zero on error. int dsa_check_key(const DSA *dsa); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_DSA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/ec/ec_asn1.cc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../fipsmodule/ec/internal.h" #include "../internal.h" static const CBS_ASN1_TAG kParametersTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0; static const CBS_ASN1_TAG kPublicKeyTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1; // TODO(https://crbug.com/boringssl/497): Allow parsers to specify a list of // acceptable groups, so parsers don't have to pull in all four. typedef const EC_GROUP *(*ec_group_func)(void); static const ec_group_func kAllGroups[] = { &EC_group_p224, &EC_group_p256, &EC_group_p384, &EC_group_p521, }; EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { CBS ec_private_key, private_key; uint64_t version; if (!CBS_get_asn1(cbs, &ec_private_key, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&ec_private_key, &version) || // version != 1 || !CBS_get_asn1(&ec_private_key, &private_key, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return NULL; } // Parse the optional parameters field. EC_KEY *ret = NULL; BIGNUM *priv_key = NULL; if (CBS_peek_asn1_tag(&ec_private_key, kParametersTag)) { // Per SEC 1, as an alternative to omitting it, one is allowed to specify // this field and put in a NULL to mean inheriting this value. This was // omitted in a previous version of this logic without problems, so leave it // unimplemented. CBS child; if (!CBS_get_asn1(&ec_private_key, &child, kParametersTag)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); goto err; } const EC_GROUP *inner_group = EC_KEY_parse_parameters(&child); if (inner_group == NULL) { goto err; } if (group == NULL) { group = inner_group; } else if (EC_GROUP_cmp(group, inner_group, NULL) != 0) { // If a group was supplied externally, it must match. OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); goto err; } if (CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); goto err; } } if (group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); goto err; } ret = EC_KEY_new(); if (ret == NULL || !EC_KEY_set_group(ret, group)) { goto err; } // Although RFC 5915 specifies the length of the key, OpenSSL historically // got this wrong, so accept any length. See upstream's // 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. priv_key = BN_bin2bn(CBS_data(&private_key), CBS_len(&private_key), NULL); ret->pub_key = EC_POINT_new(group); if (priv_key == NULL || ret->pub_key == NULL || !EC_KEY_set_private_key(ret, priv_key)) { goto err; } if (CBS_peek_asn1_tag(&ec_private_key, kPublicKeyTag)) { CBS child, public_key; uint8_t padding; if (!CBS_get_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBS_get_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || // As in a SubjectPublicKeyInfo, the byte-encoded public key is then // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBS_get_u8(&public_key, &padding) || // padding != 0 || // Explicitly check |public_key| is non-empty to save the conversion // form later. CBS_len(&public_key) == 0 || !EC_POINT_oct2point(group, ret->pub_key, CBS_data(&public_key), CBS_len(&public_key), NULL) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); goto err; } // Save the point conversion form. // TODO(davidben): Consider removing this. ret->conv_form = (point_conversion_form_t)(CBS_data(&public_key)[0] & ~0x01); } else { // Compute the public key instead. if (!ec_point_mul_scalar_base(group, &ret->pub_key->raw, &ret->priv_key->scalar)) { goto err; } // Remember the original private-key-only encoding. // TODO(davidben): Consider removing this. ret->enc_flag |= EC_PKEY_NO_PUBKEY; } if (CBS_len(&ec_private_key) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); goto err; } // Ensure the resulting key is valid. if (!EC_KEY_check_key(ret)) { goto err; } BN_free(priv_key); return ret; err: EC_KEY_free(ret); BN_free(priv_key); return NULL; } int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, unsigned enc_flags) { if (key == NULL || key->group == NULL || key->priv_key == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } CBB ec_private_key, private_key; if (!CBB_add_asn1(cbb, &ec_private_key, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&ec_private_key, 1 /* version */) || !CBB_add_asn1(&ec_private_key, &private_key, CBS_ASN1_OCTETSTRING) || !BN_bn2cbb_padded(&private_key, BN_num_bytes(EC_GROUP_get0_order(key->group)), EC_KEY_get0_private_key(key))) { OPENSSL_PUT_ERROR(EC, EC_R_ENCODE_ERROR); return 0; } if (!(enc_flags & EC_PKEY_NO_PARAMETERS)) { CBB child; if (!CBB_add_asn1(&ec_private_key, &child, kParametersTag) || !EC_KEY_marshal_curve_name(&child, key->group) || !CBB_flush(&ec_private_key)) { OPENSSL_PUT_ERROR(EC, EC_R_ENCODE_ERROR); return 0; } } // TODO(fork): replace this flexibility with sensible default? if (!(enc_flags & EC_PKEY_NO_PUBKEY) && key->pub_key != NULL) { CBB child, public_key; if (!CBB_add_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBB_add_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || // As in a SubjectPublicKeyInfo, the byte-encoded public key is then // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBB_add_u8(&public_key, 0 /* padding */) || !EC_POINT_point2cbb(&public_key, key->group, key->pub_key, key->conv_form, NULL) || !CBB_flush(&ec_private_key)) { OPENSSL_PUT_ERROR(EC, EC_R_ENCODE_ERROR); return 0; } } if (!CBB_flush(cbb)) { OPENSSL_PUT_ERROR(EC, EC_R_ENCODE_ERROR); return 0; } return 1; } // kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. static const uint8_t kPrimeField[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x01}; namespace { struct explicit_prime_curve { CBS prime, a, b, base_x, base_y, order; }; } // namespace static int parse_explicit_prime_curve(CBS *in, struct explicit_prime_curve *out) { // See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an // ECParameters while RFC 5480 calls it a SpecifiedECDomain. CBS params, field_id, field_type, curve, base, cofactor; int has_cofactor; uint64_t version; if (!CBS_get_asn1(in, ¶ms, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(¶ms, &version) || // version != 1 || // !CBS_get_asn1(¶ms, &field_id, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&field_id, &field_type, CBS_ASN1_OBJECT) || CBS_len(&field_type) != sizeof(kPrimeField) || OPENSSL_memcmp(CBS_data(&field_type), kPrimeField, sizeof(kPrimeField)) != 0 || !CBS_get_asn1(&field_id, &out->prime, CBS_ASN1_INTEGER) || !CBS_is_unsigned_asn1_integer(&out->prime) || // CBS_len(&field_id) != 0 || !CBS_get_asn1(¶ms, &curve, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&curve, &out->a, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&curve, &out->b, CBS_ASN1_OCTETSTRING) || // |curve| has an optional BIT STRING seed which we ignore. !CBS_get_optional_asn1(&curve, NULL, NULL, CBS_ASN1_BITSTRING) || CBS_len(&curve) != 0 || !CBS_get_asn1(¶ms, &base, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(¶ms, &out->order, CBS_ASN1_INTEGER) || !CBS_is_unsigned_asn1_integer(&out->order) || !CBS_get_optional_asn1(¶ms, &cofactor, &has_cofactor, CBS_ASN1_INTEGER) || CBS_len(¶ms) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return 0; } if (has_cofactor) { // We only support prime-order curves so the cofactor must be one. if (CBS_len(&cofactor) != 1 || // CBS_data(&cofactor)[0] != 1) { OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); return 0; } } // Require that the base point use uncompressed form. uint8_t form; if (!CBS_get_u8(&base, &form) || form != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FORM); return 0; } if (CBS_len(&base) % 2 != 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return 0; } size_t field_len = CBS_len(&base) / 2; CBS_init(&out->base_x, CBS_data(&base), field_len); CBS_init(&out->base_y, CBS_data(&base) + field_len, field_len); return 1; } // integers_equal returns one if |bytes| is a big-endian encoding of |bn|, and // zero otherwise. static int integers_equal(const CBS *bytes, const BIGNUM *bn) { // Although, in SEC 1, Field-Element-to-Octet-String has a fixed width, // OpenSSL mis-encodes the |a| and |b|, so we tolerate any number of leading // zeros. (This matters for P-521 whose |b| has a leading 0.) CBS copy = *bytes; while (CBS_len(©) > 0 && CBS_data(©)[0] == 0) { CBS_skip(©, 1); } if (CBS_len(©) > EC_MAX_BYTES) { return 0; } uint8_t buf[EC_MAX_BYTES]; if (!BN_bn2bin_padded(buf, CBS_len(©), bn)) { ERR_clear_error(); return 0; } return CBS_mem_equal(©, buf, CBS_len(©)); } EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs) { CBS named_curve; if (!CBS_get_asn1(cbs, &named_curve, CBS_ASN1_OBJECT)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return NULL; } // Look for a matching curve. for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kAllGroups); i++) { const EC_GROUP *group = kAllGroups[i](); if (CBS_mem_equal(&named_curve, group->oid, group->oid_len)) { return (EC_GROUP *)group; } } OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); return NULL; } int EC_KEY_marshal_curve_name(CBB *cbb, const EC_GROUP *group) { if (group->oid_len == 0) { OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); return 0; } CBB child; return CBB_add_asn1(cbb, &child, CBS_ASN1_OBJECT) && CBB_add_bytes(&child, group->oid, group->oid_len) && // CBB_flush(cbb); } EC_GROUP *EC_KEY_parse_parameters(CBS *cbs) { if (!CBS_peek_asn1_tag(cbs, CBS_ASN1_SEQUENCE)) { return EC_KEY_parse_curve_name(cbs); } // OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions // of named curves. // // TODO(davidben): Remove support for this. struct explicit_prime_curve curve; if (!parse_explicit_prime_curve(cbs, &curve)) { return NULL; } const EC_GROUP *ret = NULL; BIGNUM *p = BN_new(), *a = BN_new(), *b = BN_new(), *x = BN_new(), *y = BN_new(); if (p == NULL || a == NULL || b == NULL || x == NULL || y == NULL) { goto err; } for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kAllGroups); i++) { const EC_GROUP *group = kAllGroups[i](); if (!integers_equal(&curve.order, EC_GROUP_get0_order(group))) { continue; } // The order alone uniquely identifies the group, but we check the other // parameters to avoid misinterpreting the group. if (!EC_GROUP_get_curve_GFp(group, p, a, b, NULL)) { goto err; } if (!integers_equal(&curve.prime, p) || !integers_equal(&curve.a, a) || !integers_equal(&curve.b, b)) { break; } if (!EC_POINT_get_affine_coordinates_GFp( group, EC_GROUP_get0_generator(group), x, y, NULL)) { goto err; } if (!integers_equal(&curve.base_x, x) || !integers_equal(&curve.base_y, y)) { break; } ret = group; break; } if (ret == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); } err: BN_free(p); BN_free(a); BN_free(b); BN_free(x); BN_free(y); return (EC_GROUP *)ret; } int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, BN_CTX *ctx) { size_t len = EC_POINT_point2oct(group, point, form, NULL, 0, ctx); if (len == 0) { return 0; } uint8_t *p; return CBB_add_space(out, &p, len) && EC_POINT_point2oct(group, point, form, p, len, ctx) == len; } EC_KEY *d2i_ECPrivateKey(EC_KEY **out, const uint8_t **inp, long len) { // This function treats its |out| parameter differently from other |d2i| // functions. If supplied, take the group from |*out|. const EC_GROUP *group = NULL; if (out != NULL && *out != NULL) { group = EC_KEY_get0_group(*out); } if (len < 0) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EC_KEY *ret = EC_KEY_parse_private_key(&cbs, group); if (ret == NULL) { return NULL; } if (out != NULL) { EC_KEY_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_ECPrivateKey(const EC_KEY *key, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !EC_KEY_marshal_private_key(&cbb, key, EC_KEY_get_enc_flags(key))) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } EC_GROUP *d2i_ECPKParameters(EC_GROUP **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EC_GROUP *ret = EC_KEY_parse_parameters(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { EC_GROUP_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_ECPKParameters(const EC_GROUP *group, uint8_t **outp) { if (group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return -1; } CBB cbb; if (!CBB_init(&cbb, 0) || // !EC_KEY_marshal_curve_name(&cbb, group)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } EC_KEY *d2i_ECParameters(EC_KEY **out_key, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); const EC_GROUP *group = EC_KEY_parse_parameters(&cbs); if (group == NULL) { return NULL; } EC_KEY *ret = EC_KEY_new(); if (ret == NULL || !EC_KEY_set_group(ret, group)) { EC_KEY_free(ret); return NULL; } if (out_key != NULL) { EC_KEY_free(*out_key); *out_key = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_ECParameters(const EC_KEY *key, uint8_t **outp) { if (key == NULL || key->group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return -1; } CBB cbb; if (!CBB_init(&cbb, 0) || // !EC_KEY_marshal_curve_name(&cbb, key->group)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } EC_KEY *o2i_ECPublicKey(EC_KEY **keyp, const uint8_t **inp, long len) { EC_KEY *ret = NULL; if (keyp == NULL || *keyp == NULL || (*keyp)->group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } ret = *keyp; if (ret->pub_key == NULL && (ret->pub_key = EC_POINT_new(ret->group)) == NULL) { return NULL; } if (!EC_POINT_oct2point(ret->group, ret->pub_key, *inp, len, NULL)) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); return NULL; } // save the point conversion form ret->conv_form = (point_conversion_form_t)(*inp[0] & ~0x01); *inp += len; return ret; } int i2o_ECPublicKey(const EC_KEY *key, uint8_t **outp) { if (key == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } CBB cbb; if (!CBB_init(&cbb, 0) || // !EC_POINT_point2cbb(&cbb, key->group, key->pub_key, key->conv_form, NULL)) { CBB_cleanup(&cbb); return -1; } int ret = CBB_finish_i2d(&cbb, outp); // Historically, this function used the wrong return value on error. return ret > 0 ? ret : 0; } size_t EC_get_builtin_curves(EC_builtin_curve *out_curves, size_t max_num_curves) { if (max_num_curves > OPENSSL_ARRAY_SIZE(kAllGroups)) { max_num_curves = OPENSSL_ARRAY_SIZE(kAllGroups); } for (size_t i = 0; i < max_num_curves; i++) { const EC_GROUP *group = kAllGroups[i](); out_curves[i].nid = group->curve_name; out_curves[i].comment = group->comment; } return OPENSSL_ARRAY_SIZE(kAllGroups); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/ec/ec_derive.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../fipsmodule/ec/internal.h" EC_KEY *EC_KEY_derive_from_secret(const EC_GROUP *group, const uint8_t *secret, size_t secret_len) { #define EC_KEY_DERIVE_MAX_NAME_LEN 16 const char *name = EC_curve_nid2nist(EC_GROUP_get_curve_name(group)); if (name == NULL || strlen(name) > EC_KEY_DERIVE_MAX_NAME_LEN) { OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); return NULL; } // Assemble a label string to provide some key separation in case |secret| is // misused, but ultimately it's on the caller to ensure |secret| is suitably // separated. static const char kLabel[] = "derive EC key "; char info[sizeof(kLabel) + EC_KEY_DERIVE_MAX_NAME_LEN]; OPENSSL_strlcpy(info, kLabel, sizeof(info)); OPENSSL_strlcat(info, name, sizeof(info)); // Generate 128 bits beyond the group order so the bias is at most 2^-128. #define EC_KEY_DERIVE_EXTRA_BITS 128 #define EC_KEY_DERIVE_EXTRA_BYTES (EC_KEY_DERIVE_EXTRA_BITS / 8) if (EC_GROUP_order_bits(group) <= EC_KEY_DERIVE_EXTRA_BITS + 8) { // The reduction strategy below requires the group order be large enough. // (The actual bound is a bit tighter, but our curves are much larger than // 128-bit.) OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return NULL; } uint8_t derived[EC_KEY_DERIVE_EXTRA_BYTES + EC_MAX_BYTES]; size_t derived_len = BN_num_bytes(EC_GROUP_get0_order(group)) + EC_KEY_DERIVE_EXTRA_BYTES; assert(derived_len <= sizeof(derived)); if (!HKDF(derived, derived_len, EVP_sha256(), secret, secret_len, /*salt=*/NULL, /*salt_len=*/0, (const uint8_t *)info, strlen(info))) { return NULL; } EC_KEY *key = EC_KEY_new(); BN_CTX *ctx = BN_CTX_new(); BIGNUM *priv = BN_bin2bn(derived, derived_len, NULL); EC_POINT *pub = EC_POINT_new(group); if (key == NULL || ctx == NULL || priv == NULL || pub == NULL || // Reduce |priv| with Montgomery reduction. First, convert "from" // Montgomery form to compute |priv| * R^-1 mod |order|. This requires // |priv| be under order * R, which is true if the group order is large // enough. 2^(num_bytes(order)) < 2^8 * order, so: // // priv < 2^8 * order * 2^128 < order * order < order * R !BN_from_montgomery(priv, priv, &group->order, ctx) || // Multiply by R^2 and do another Montgomery reduction to compute // priv * R^-1 * R^2 * R^-1 = priv mod order. !BN_to_montgomery(priv, priv, &group->order, ctx) || !EC_POINT_mul(group, pub, priv, NULL, NULL, ctx) || !EC_KEY_set_group(key, group) || !EC_KEY_set_public_key(key, pub) || !EC_KEY_set_private_key(key, priv)) { EC_KEY_free(key); key = NULL; goto err; } err: OPENSSL_cleanse(derived, sizeof(derived)); BN_CTX_free(ctx); BN_free(priv); EC_POINT_free(pub); return key; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/ec/hash_to_curve.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/ec/internal.h" #include "../internal.h" #include "internal.h" // This file implements hash-to-curve, as described in RFC 9380. // // This hash-to-curve implementation is written generically with the // expectation that we will eventually wish to support other curves. If it // becomes a performance bottleneck, some possible optimizations by // specializing it to the curve: // // - Rather than using a generic |felem_exp|, specialize the exponentation to // c2 with a faster addition chain. // // - |felem_mul| and |felem_sqr| are indirect calls to generic Montgomery // code. Given the few curves, we could specialize // |map_to_curve_simple_swu|. But doing this reasonably without duplicating // code in C is difficult. (C++ templates would be useful here.) // // - P-521's Z and c2 have small power-of-two absolute values. We could save // two multiplications in SSWU. (Other curves have reasonable values of Z // and inconvenient c2.) This is unlikely to be worthwhile without C++ // templates to make specializing more convenient. // expand_message_xmd implements the operation described in section 5.3.1 of // RFC 9380. It returns one on success and zero on error. static int expand_message_xmd(const EVP_MD *md, uint8_t *out, size_t out_len, const uint8_t *msg, size_t msg_len, const uint8_t *dst, size_t dst_len) { // See https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/issues/352 if (dst_len == 0) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } int ret = 0; const size_t block_size = EVP_MD_block_size(md); const size_t md_size = EVP_MD_size(md); EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); { // Long DSTs are hashed down to size. See section 5.3.3. static_assert(EVP_MAX_MD_SIZE < 256, "hashed DST still too large"); uint8_t dst_buf[EVP_MAX_MD_SIZE]; if (dst_len >= 256) { static const char kPrefix[] = "H2C-OVERSIZE-DST-"; if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, kPrefix, sizeof(kPrefix) - 1) || !EVP_DigestUpdate(&ctx, dst, dst_len) || !EVP_DigestFinal_ex(&ctx, dst_buf, NULL)) { goto err; } dst = dst_buf; dst_len = md_size; } uint8_t dst_len_u8 = (uint8_t)dst_len; // Compute b_0. static const uint8_t kZeros[EVP_MAX_MD_BLOCK_SIZE] = {0}; // If |out_len| exceeds 16 bits then |i| will wrap below causing an error to // be returned. This depends on the static assert above. uint8_t l_i_b_str_zero[3] = {static_cast(out_len >> 8), static_cast(out_len), 0}; uint8_t b_0[EVP_MAX_MD_SIZE]; if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, kZeros, block_size) || !EVP_DigestUpdate(&ctx, msg, msg_len) || !EVP_DigestUpdate(&ctx, l_i_b_str_zero, sizeof(l_i_b_str_zero)) || !EVP_DigestUpdate(&ctx, dst, dst_len) || !EVP_DigestUpdate(&ctx, &dst_len_u8, 1) || !EVP_DigestFinal_ex(&ctx, b_0, NULL)) { goto err; } uint8_t b_i[EVP_MAX_MD_SIZE]; uint8_t i = 1; while (out_len > 0) { if (i == 0) { // Input was too large. OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; } if (i > 1) { for (size_t j = 0; j < md_size; j++) { b_i[j] ^= b_0[j]; } } else { OPENSSL_memcpy(b_i, b_0, md_size); } if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, b_i, md_size) || !EVP_DigestUpdate(&ctx, &i, 1) || !EVP_DigestUpdate(&ctx, dst, dst_len) || !EVP_DigestUpdate(&ctx, &dst_len_u8, 1) || !EVP_DigestFinal_ex(&ctx, b_i, NULL)) { goto err; } size_t todo = out_len >= md_size ? md_size : out_len; OPENSSL_memcpy(out, b_i, todo); out += todo; out_len -= todo; i++; } ret = 1; } err: EVP_MD_CTX_cleanup(&ctx); return ret; } // num_bytes_to_derive determines the number of bytes to derive when hashing to // a number modulo |modulus|. See the hash_to_field operation defined in // section 5.2 of RFC 9380. static int num_bytes_to_derive(size_t *out, const BIGNUM *modulus, unsigned k) { size_t bits = BN_num_bits(modulus); size_t L = (bits + k + 7) / 8; // We require 2^(8*L) < 2^(2*bits - 2) <= n^2 so to fit in bounds for // |felem_reduce| and |ec_scalar_reduce|. All defined hash-to-curve suites // define |k| to be well under this bound. (|k| is usually around half of // |p_bits|.) if (L * 8 >= 2 * bits - 2 || L > 2 * EC_MAX_BYTES) { assert(0); OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return 0; } *out = L; return 1; } // big_endian_to_words decodes |in| as a big-endian integer and writes the // result to |out|. |num_words| must be large enough to contain the output. static void big_endian_to_words(BN_ULONG *out, size_t num_words, const uint8_t *in, size_t len) { assert(len <= num_words * sizeof(BN_ULONG)); // Ensure any excess bytes are zeroed. OPENSSL_memset(out, 0, num_words * sizeof(BN_ULONG)); uint8_t *out_u8 = (uint8_t *)out; for (size_t i = 0; i < len; i++) { out_u8[len - 1 - i] = in[i]; } } // hash_to_field implements the operation described in section 5.2 // of RFC 9380, with count = 2. |k| is the security factor. static int hash_to_field2(const EC_GROUP *group, const EVP_MD *md, EC_FELEM *out1, EC_FELEM *out2, const uint8_t *dst, size_t dst_len, unsigned k, const uint8_t *msg, size_t msg_len) { size_t L; uint8_t buf[4 * EC_MAX_BYTES]; if (!num_bytes_to_derive(&L, &group->field.N, k) || !expand_message_xmd(md, buf, 2 * L, msg, msg_len, dst, dst_len)) { return 0; } BN_ULONG words[2 * EC_MAX_WORDS]; size_t num_words = 2 * group->field.N.width; big_endian_to_words(words, num_words, buf, L); group->meth->felem_reduce(group, out1, words, num_words); big_endian_to_words(words, num_words, buf + L, L); group->meth->felem_reduce(group, out2, words, num_words); return 1; } // hash_to_scalar behaves like |hash_to_field2| but returns a value modulo the // group order rather than a field element. |k| is the security factor. static int hash_to_scalar(const EC_GROUP *group, const EVP_MD *md, EC_SCALAR *out, const uint8_t *dst, size_t dst_len, unsigned k, const uint8_t *msg, size_t msg_len) { const BIGNUM *order = EC_GROUP_get0_order(group); size_t L; uint8_t buf[EC_MAX_BYTES * 2]; if (!num_bytes_to_derive(&L, order, k) || !expand_message_xmd(md, buf, L, msg, msg_len, dst, dst_len)) { return 0; } BN_ULONG words[2 * EC_MAX_WORDS]; size_t num_words = 2 * order->width; big_endian_to_words(words, num_words, buf, L); ec_scalar_reduce(group, out, words, num_words); return 1; } static inline void mul_A(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *in) { assert(group->a_is_minus3); EC_FELEM tmp; ec_felem_add(group, &tmp, in, in); // tmp = 2*in ec_felem_add(group, &tmp, &tmp, &tmp); // tmp = 4*in ec_felem_sub(group, out, in, &tmp); // out = -3*in } // sgn0 implements the operation described in section 4.1.2 of RFC 9380. static BN_ULONG sgn0(const EC_GROUP *group, const EC_FELEM *a) { uint8_t buf[EC_MAX_BYTES]; size_t len; ec_felem_to_bytes(group, buf, &len, a); return buf[len - 1] & 1; } [[maybe_unused]] static int is_3mod4(const EC_GROUP *group) { return group->field.N.width > 0 && (group->field.N.d[0] & 3) == 3; } // sqrt_ratio_3mod4 implements the operation described in appendix F.2.1.2 // of RFC 9380. static BN_ULONG sqrt_ratio_3mod4(const EC_GROUP *group, const EC_FELEM *Z, const BN_ULONG *c1, size_t num_c1, const EC_FELEM *c2, EC_FELEM *out_y, const EC_FELEM *u, const EC_FELEM *v) { assert(is_3mod4(group)); void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; EC_FELEM tv1, tv2, tv3, y1, y2; felem_sqr(group, &tv1, v); // 1. tv1 = v^2 felem_mul(group, &tv2, u, v); // 2. tv2 = u * v felem_mul(group, &tv1, &tv1, &tv2); // 3. tv1 = tv1 * tv2 group->meth->felem_exp(group, &y1, &tv1, c1, num_c1); // 4. y1 = tv1^c1 felem_mul(group, &y1, &y1, &tv2); // 5. y1 = y1 * tv2 felem_mul(group, &y2, &y1, c2); // 6. y2 = y1 * c2 felem_sqr(group, &tv3, &y1); // 7. tv3 = y1^2 felem_mul(group, &tv3, &tv3, v); // 8. tv3 = tv3 * v // 9. isQR = tv3 == u // 10. y = CMOV(y2, y1, isQR) // 11. return (isQR, y) // // Note the specification's CMOV function and our |ec_felem_select| have the // opposite argument order. ec_felem_sub(group, &tv1, &tv3, u); const BN_ULONG isQR = ~ec_felem_non_zero_mask(group, &tv1); ec_felem_select(group, out_y, isQR, &y1, &y2); return isQR; } // map_to_curve_simple_swu implements the operation described in section 6.6.2 // of RFC 9380, using the straight-line implementation in appendix F.2. static void map_to_curve_simple_swu(const EC_GROUP *group, const EC_FELEM *Z, const BN_ULONG *c1, size_t num_c1, const EC_FELEM *c2, EC_JACOBIAN *out, const EC_FELEM *u) { // This function requires the prime be 3 mod 4, and that A = -3. assert(is_3mod4(group)); assert(group->a_is_minus3); void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; EC_FELEM tv1, tv2, tv3, tv4, tv5, tv6, x, y, y1; felem_sqr(group, &tv1, u); // 1. tv1 = u^2 felem_mul(group, &tv1, Z, &tv1); // 2. tv1 = Z * tv1 felem_sqr(group, &tv2, &tv1); // 3. tv2 = tv1^2 ec_felem_add(group, &tv2, &tv2, &tv1); // 4. tv2 = tv2 + tv1 ec_felem_add(group, &tv3, &tv2, ec_felem_one(group)); // 5. tv3 = tv2 + 1 felem_mul(group, &tv3, &group->b, &tv3); // 6. tv3 = B * tv3 // 7. tv4 = CMOV(Z, -tv2, tv2 != 0) const BN_ULONG tv2_non_zero = ec_felem_non_zero_mask(group, &tv2); ec_felem_neg(group, &tv4, &tv2); ec_felem_select(group, &tv4, tv2_non_zero, &tv4, Z); mul_A(group, &tv4, &tv4); // 8. tv4 = A * tv4 felem_sqr(group, &tv2, &tv3); // 9. tv2 = tv3^2 felem_sqr(group, &tv6, &tv4); // 10. tv6 = tv4^2 mul_A(group, &tv5, &tv6); // 11. tv5 = A * tv6 ec_felem_add(group, &tv2, &tv2, &tv5); // 12. tv2 = tv2 + tv5 felem_mul(group, &tv2, &tv2, &tv3); // 13. tv2 = tv2 * tv3 felem_mul(group, &tv6, &tv6, &tv4); // 14. tv6 = tv6 * tv4 felem_mul(group, &tv5, &group->b, &tv6); // 15. tv5 = B * tv6 ec_felem_add(group, &tv2, &tv2, &tv5); // 16. tv2 = tv2 + tv5 felem_mul(group, &x, &tv1, &tv3); // 17. x = tv1 * tv3 // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6) const BN_ULONG is_gx1_square = sqrt_ratio_3mod4(group, Z, c1, num_c1, c2, &y1, &tv2, &tv6); felem_mul(group, &y, &tv1, u); // 19. y = tv1 * u felem_mul(group, &y, &y, &y1); // 20. y = y * y1 // 21. x = CMOV(x, tv3, is_gx1_square) ec_felem_select(group, &x, is_gx1_square, &tv3, &x); // 22. y = CMOV(y, y1, is_gx1_square) ec_felem_select(group, &y, is_gx1_square, &y1, &y); // 23. e1 = sgn0(u) == sgn0(y) BN_ULONG sgn0_u = sgn0(group, u); BN_ULONG sgn0_y = sgn0(group, &y); BN_ULONG not_e1 = sgn0_u ^ sgn0_y; not_e1 = ((BN_ULONG)0) - not_e1; // 24. y = CMOV(-y, y, e1) ec_felem_neg(group, &tv1, &y); ec_felem_select(group, &y, not_e1, &tv1, &y); // 25. x = x / tv4 // // Our output is in projective coordinates, so rather than inverting |tv4| // now, represent (x / tv4, y) as (x * tv4, y * tv4^3, tv4). This is much more // efficient if the caller will do further computation on the output. (If the // caller will immediately convert to affine coordinates, it is slightly less // efficient, but only by a few field multiplications.) felem_mul(group, &out->X, &x, &tv4); felem_mul(group, &out->Y, &y, &tv6); out->Z = tv4; } static int hash_to_curve(const EC_GROUP *group, const EVP_MD *md, const EC_FELEM *Z, const EC_FELEM *c2, unsigned k, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { EC_FELEM u0, u1; if (!hash_to_field2(group, md, &u0, &u1, dst, dst_len, k, msg, msg_len)) { return 0; } // Compute |c1| = (p - 3) / 4. BN_ULONG c1[EC_MAX_WORDS]; size_t num_c1 = group->field.N.width; if (!bn_copy_words(c1, num_c1, &group->field.N)) { return 0; } bn_rshift_words(c1, c1, /*shift=*/2, /*num=*/num_c1); EC_JACOBIAN Q0, Q1; map_to_curve_simple_swu(group, Z, c1, num_c1, c2, &Q0, &u0); map_to_curve_simple_swu(group, Z, c1, num_c1, c2, &Q1, &u1); group->meth->add(group, out, &Q0, &Q1); // R = Q0 + Q1 // All our curves have cofactor one, so |clear_cofactor| is a no-op. return 1; } static int felem_from_u8(const EC_GROUP *group, EC_FELEM *out, uint8_t a) { uint8_t bytes[EC_MAX_BYTES] = {0}; size_t len = BN_num_bytes(&group->field.N); bytes[len - 1] = a; return ec_felem_from_bytes(group, out, bytes, len); } // kP256Sqrt10 is sqrt(10) in P-256's field. It was computed as follows in // python3: // // p = 2**256 - 2**224 + 2**192 + 2**96 - 1 // c2 = pow(10, (p+1)//4, p) // assert pow(c2, 2, p) == 10 // ", ".join("0x%02x" % b for b in c2.to_bytes(256//8, 'big')) static const uint8_t kP256Sqrt10[] = { 0xda, 0x53, 0x8e, 0x3b, 0xe1, 0xd8, 0x9b, 0x99, 0xc9, 0x78, 0xfc, 0x67, 0x51, 0x80, 0xaa, 0xb2, 0x7b, 0x8d, 0x1f, 0xf8, 0x4c, 0x55, 0xd5, 0xb6, 0x2c, 0xcd, 0x34, 0x27, 0xe4, 0x33, 0xc4, 0x7f}; // kP384Sqrt12 is sqrt(12) in P-384's field. It was computed as follows in // python3: // // p = 2**384 - 2**128 - 2**96 + 2**32 - 1 // c2 = pow(12, (p+1)//4, p) // assert pow(c2, 2, p) == 12 // ", ".join("0x%02x" % b for b in c2.to_bytes(384//8, 'big')) static const uint8_t kP384Sqrt12[] = { 0x2a, 0xcc, 0xb4, 0xa6, 0x56, 0xb0, 0x24, 0x9c, 0x71, 0xf0, 0x50, 0x0e, 0x83, 0xda, 0x2f, 0xdd, 0x7f, 0x98, 0xe3, 0x83, 0xd6, 0x8b, 0x53, 0x87, 0x1f, 0x87, 0x2f, 0xcb, 0x9c, 0xcb, 0x80, 0xc5, 0x3c, 0x0d, 0xe1, 0xf8, 0xa8, 0x0f, 0x7e, 0x19, 0x14, 0xe2, 0xec, 0x69, 0xf5, 0xa6, 0x26, 0xb3}; int ec_hash_to_curve_p256_xmd_sha256_sswu(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { // See section 8.3 of RFC 9380. if (EC_GROUP_get_curve_name(group) != NID_X9_62_prime256v1) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } // Z = -10, c2 = sqrt(10) EC_FELEM Z, c2; if (!felem_from_u8(group, &Z, 10) || !ec_felem_from_bytes(group, &c2, kP256Sqrt10, sizeof(kP256Sqrt10))) { return 0; } ec_felem_neg(group, &Z, &Z); return hash_to_curve(group, EVP_sha256(), &Z, &c2, /*k=*/128, out, dst, dst_len, msg, msg_len); } int EC_hash_to_curve_p256_xmd_sha256_sswu(const EC_GROUP *group, EC_POINT *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { if (EC_GROUP_cmp(group, out->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_hash_to_curve_p256_xmd_sha256_sswu(group, &out->raw, dst, dst_len, msg, msg_len); } int ec_hash_to_curve_p384_xmd_sha384_sswu(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { // See section 8.3 of RFC 9380. if (EC_GROUP_get_curve_name(group) != NID_secp384r1) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } // Z = -12, c2 = sqrt(12) EC_FELEM Z, c2; if (!felem_from_u8(group, &Z, 12) || !ec_felem_from_bytes(group, &c2, kP384Sqrt12, sizeof(kP384Sqrt12))) { return 0; } ec_felem_neg(group, &Z, &Z); return hash_to_curve(group, EVP_sha384(), &Z, &c2, /*k=*/192, out, dst, dst_len, msg, msg_len); } int EC_hash_to_curve_p384_xmd_sha384_sswu(const EC_GROUP *group, EC_POINT *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { if (EC_GROUP_cmp(group, out->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_hash_to_curve_p384_xmd_sha384_sswu(group, &out->raw, dst, dst_len, msg, msg_len); } int ec_hash_to_scalar_p384_xmd_sha384(const EC_GROUP *group, EC_SCALAR *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { if (EC_GROUP_get_curve_name(group) != NID_secp384r1) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } return hash_to_scalar(group, EVP_sha384(), out, dst, dst_len, /*k=*/192, msg, msg_len); } int ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { // See section 8.3 of draft-irtf-cfrg-hash-to-curve-07. if (EC_GROUP_get_curve_name(group) != NID_secp384r1) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } // Z = -12, c2 = sqrt(12) EC_FELEM Z, c2; if (!felem_from_u8(group, &Z, 12) || !ec_felem_from_bytes(group, &c2, kP384Sqrt12, sizeof(kP384Sqrt12))) { return 0; } ec_felem_neg(group, &Z, &Z); return hash_to_curve(group, EVP_sha512(), &Z, &c2, /*k=*/192, out, dst, dst_len, msg, msg_len); } int ec_hash_to_scalar_p384_xmd_sha512_draft07( const EC_GROUP *group, EC_SCALAR *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len) { if (EC_GROUP_get_curve_name(group) != NID_secp384r1) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } return hash_to_scalar(group, EVP_sha512(), out, dst, dst_len, /*k=*/192, msg, msg_len); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/ec/internal.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_EC_EXTRA_INTERNAL_H #define OPENSSL_HEADER_EC_EXTRA_INTERNAL_H #include #include "../fipsmodule/ec/internal.h" #if defined(__cplusplus) extern "C" { #endif // Hash-to-curve. // // Internal |EC_JACOBIAN| versions of the corresponding public APIs. // ec_hash_to_curve_p256_xmd_sha256_sswu hashes |msg| to a point on |group| and // writes the result to |out|, implementing the P256_XMD:SHA-256_SSWU_RO_ suite // from RFC 9380. It returns one on success and zero on error. OPENSSL_EXPORT int ec_hash_to_curve_p256_xmd_sha256_sswu( const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // ec_hash_to_curve_p384_xmd_sha384_sswu hashes |msg| to a point on |group| and // writes the result to |out|, implementing the P384_XMD:SHA-384_SSWU_RO_ suite // from RFC 9380. It returns one on success and zero on error. OPENSSL_EXPORT int ec_hash_to_curve_p384_xmd_sha384_sswu( const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // ec_hash_to_scalar_p384_xmd_sha384 hashes |msg| to a scalar on |group| // and writes the result to |out|, using the hash_to_field operation from the // P384_XMD:SHA-384_SSWU_RO_ suite from RFC 9380, but generating a value modulo // the group order rather than a field element. OPENSSL_EXPORT int ec_hash_to_scalar_p384_xmd_sha384( const EC_GROUP *group, EC_SCALAR *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 hashes |msg| to a point on // |group| and writes the result to |out|, implementing the // P384_XMD:SHA-512_SSWU_RO_ suite from draft-irtf-cfrg-hash-to-curve-07. It // returns one on success and zero on error. // // TODO(https://crbug.com/1414562): Migrate this to the final version. OPENSSL_EXPORT int ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // ec_hash_to_scalar_p384_xmd_sha512_draft07 hashes |msg| to a scalar on |group| // and writes the result to |out|, using the hash_to_field operation from the // P384_XMD:SHA-512_SSWU_RO_ suite from draft-irtf-cfrg-hash-to-curve-07, but // generating a value modulo the group order rather than a field element. // // TODO(https://crbug.com/1414562): Migrate this to the final version. OPENSSL_EXPORT int ec_hash_to_scalar_p384_xmd_sha512_draft07( const EC_GROUP *group, EC_SCALAR *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_EC_EXTRA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/ecdh/ecdh.cc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../fipsmodule/ec/internal.h" #include "../internal.h" int ECDH_compute_key(void *out, size_t out_len, const EC_POINT *pub_key, const EC_KEY *priv_key, void *(*kdf)(const void *in, size_t inlen, void *out, size_t *out_len)) { if (priv_key->priv_key == NULL) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_NO_PRIVATE_VALUE); return -1; } const EC_SCALAR *const priv = &priv_key->priv_key->scalar; const EC_GROUP *const group = EC_KEY_get0_group(priv_key); if (EC_GROUP_cmp(group, pub_key->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return -1; } EC_JACOBIAN shared_point; uint8_t buf[EC_MAX_BYTES]; size_t buf_len; if (!ec_point_mul_scalar(group, &shared_point, &pub_key->raw, priv) || !ec_get_x_coordinate_as_bytes(group, buf, &buf_len, sizeof(buf), &shared_point)) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_POINT_ARITHMETIC_FAILURE); return -1; } if (kdf != NULL) { if (kdf(buf, buf_len, out, &out_len) == NULL) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_KDF_FAILED); return -1; } } else { // no KDF, just copy as much as we can if (buf_len < out_len) { out_len = buf_len; } OPENSSL_memcpy(out, buf, out_len); } if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(ECDH, ERR_R_OVERFLOW); return -1; } return (int)out_len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/ecdsa/ecdsa_asn1.cc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../fipsmodule/ecdsa/internal.h" #include "../internal.h" static ECDSA_SIG *ecdsa_sig_from_fixed(const EC_KEY *key, const uint8_t *in, size_t len) { const EC_GROUP *group = EC_KEY_get0_group(key); if (group == NULL) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); return NULL; } size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); if (len != 2 * scalar_len) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); return NULL; } ECDSA_SIG *ret = ECDSA_SIG_new(); if (ret == NULL || !BN_bin2bn(in, scalar_len, ret->r) || !BN_bin2bn(in + scalar_len, scalar_len, ret->s)) { ECDSA_SIG_free(ret); return NULL; } return ret; } static int ecdsa_sig_to_fixed(const EC_KEY *key, uint8_t *out, size_t *out_len, size_t max_out, const ECDSA_SIG *sig) { const EC_GROUP *group = EC_KEY_get0_group(key); if (group == NULL) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); if (max_out < 2 * scalar_len) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } if (BN_is_negative(sig->r) || !BN_bn2bin_padded(out, scalar_len, sig->r) || BN_is_negative(sig->s) || !BN_bn2bin_padded(out + scalar_len, scalar_len, sig->s)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); return 0; } *out_len = 2 * scalar_len; return 1; } int ECDSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *sig, unsigned int *out_sig_len, const EC_KEY *eckey) { if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { return eckey->ecdsa_meth->sign(digest, digest_len, sig, out_sig_len, (EC_KEY *)eckey /* cast away const */); } *out_sig_len = 0; uint8_t fixed[ECDSA_MAX_FIXED_LEN]; size_t fixed_len; if (!ecdsa_sign_fixed(digest, digest_len, fixed, &fixed_len, sizeof(fixed), eckey)) { return 0; } // TODO(davidben): We can actually do better and go straight from the DER // format to the fixed-width format without a malloc. ECDSA_SIG *s = ecdsa_sig_from_fixed(eckey, fixed, fixed_len); if (s == NULL) { return 0; } int ret = 0; CBB cbb; CBB_init_fixed(&cbb, sig, ECDSA_size(eckey)); size_t len; if (!ECDSA_SIG_marshal(&cbb, s) || !CBB_finish(&cbb, NULL, &len)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_ENCODE_ERROR); goto err; } *out_sig_len = (unsigned)len; ret = 1; err: ECDSA_SIG_free(s); return ret; } int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *eckey) { // Decode the ECDSA signature. // // TODO(davidben): We can actually do better and go straight from the DER // format to the fixed-width format without a malloc. int ret = 0; uint8_t *der = NULL; ECDSA_SIG *s = ECDSA_SIG_from_bytes(sig, sig_len); if (s == NULL) { goto err; } // Defend against potential laxness in the DER parser. size_t der_len; if (!ECDSA_SIG_to_bytes(&der, &der_len, s) || der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len) != 0) { // This should never happen. crypto/bytestring is strictly DER. OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR); goto err; } uint8_t fixed[ECDSA_MAX_FIXED_LEN]; size_t fixed_len; ret = ecdsa_sig_to_fixed(eckey, fixed, &fixed_len, sizeof(fixed), s) && ecdsa_verify_fixed(digest, digest_len, fixed, fixed_len, eckey); err: OPENSSL_free(der); ECDSA_SIG_free(s); return ret; } size_t ECDSA_size(const EC_KEY *key) { if (key == NULL) { return 0; } const EC_GROUP *group = EC_KEY_get0_group(key); if (group == NULL) { return 0; } size_t group_order_size = BN_num_bytes(EC_GROUP_get0_order(group)); return ECDSA_SIG_max_len(group_order_size); } ECDSA_SIG *ECDSA_SIG_new(void) { ECDSA_SIG *sig = reinterpret_cast(OPENSSL_malloc(sizeof(ECDSA_SIG))); if (sig == NULL) { return NULL; } sig->r = BN_new(); sig->s = BN_new(); if (sig->r == NULL || sig->s == NULL) { ECDSA_SIG_free(sig); return NULL; } return sig; } void ECDSA_SIG_free(ECDSA_SIG *sig) { if (sig == NULL) { return; } BN_free(sig->r); BN_free(sig->s); OPENSSL_free(sig); } const BIGNUM *ECDSA_SIG_get0_r(const ECDSA_SIG *sig) { return sig->r; } const BIGNUM *ECDSA_SIG_get0_s(const ECDSA_SIG *sig) { return sig->s; } void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **out_r, const BIGNUM **out_s) { if (out_r != NULL) { *out_r = sig->r; } if (out_s != NULL) { *out_s = sig->s; } } int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s) { if (r == NULL || s == NULL) { return 0; } BN_free(sig->r); BN_free(sig->s); sig->r = r; sig->s = s; return 1; } int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, const ECDSA_SIG *sig, const EC_KEY *eckey) { uint8_t fixed[ECDSA_MAX_FIXED_LEN]; size_t fixed_len; return ecdsa_sig_to_fixed(eckey, fixed, &fixed_len, sizeof(fixed), sig) && ecdsa_verify_fixed(digest, digest_len, fixed, fixed_len, eckey); } // This function is only exported for testing and is not called in production // code. ECDSA_SIG *ECDSA_sign_with_nonce_and_leak_private_key_for_testing( const uint8_t *digest, size_t digest_len, const EC_KEY *eckey, const uint8_t *nonce, size_t nonce_len) { uint8_t sig[ECDSA_MAX_FIXED_LEN]; size_t sig_len; if (!ecdsa_sign_fixed_with_nonce_for_known_answer_test( digest, digest_len, sig, &sig_len, sizeof(sig), eckey, nonce, nonce_len)) { return NULL; } return ecdsa_sig_from_fixed(eckey, sig, sig_len); } ECDSA_SIG *ECDSA_do_sign(const uint8_t *digest, size_t digest_len, const EC_KEY *eckey) { uint8_t sig[ECDSA_MAX_FIXED_LEN]; size_t sig_len; if (!ecdsa_sign_fixed(digest, digest_len, sig, &sig_len, sizeof(sig), eckey)) { return NULL; } return ecdsa_sig_from_fixed(eckey, sig, sig_len); } ECDSA_SIG *ECDSA_SIG_parse(CBS *cbs) { ECDSA_SIG *ret = ECDSA_SIG_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !BN_parse_asn1_unsigned(&child, ret->r) || !BN_parse_asn1_unsigned(&child, ret->s) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); ECDSA_SIG_free(ret); return NULL; } return ret; } ECDSA_SIG *ECDSA_SIG_from_bytes(const uint8_t *in, size_t in_len) { CBS cbs; CBS_init(&cbs, in, in_len); ECDSA_SIG *ret = ECDSA_SIG_parse(&cbs); if (ret == NULL || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); ECDSA_SIG_free(ret); return NULL; } return ret; } int ECDSA_SIG_marshal(CBB *cbb, const ECDSA_SIG *sig) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !BN_marshal_asn1(&child, sig->r) || !BN_marshal_asn1(&child, sig->s) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_ENCODE_ERROR); return 0; } return 1; } int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len, const ECDSA_SIG *sig) { CBB cbb; CBB_zero(&cbb); if (!CBB_init(&cbb, 0) || !ECDSA_SIG_marshal(&cbb, sig) || !CBB_finish(&cbb, out_bytes, out_len)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_ENCODE_ERROR); CBB_cleanup(&cbb); return 0; } return 1; } // der_len_len returns the number of bytes needed to represent a length of |len| // in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; } size_t ret = 1; while (len > 0) { ret++; len >>= 8; } return ret; } size_t ECDSA_SIG_max_len(size_t order_len) { // Compute the maximum length of an |order_len| byte integer. Defensively // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } // An ECDSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; } return ret; } ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); ECDSA_SIG *ret = ECDSA_SIG_parse(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { ECDSA_SIG_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_ECDSA_SIG(const ECDSA_SIG *sig, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !ECDSA_SIG_marshal(&cbb, sig)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/engine/engine.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include "../internal.h" struct engine_st { RSA_METHOD *rsa_method; ECDSA_METHOD *ecdsa_method; }; ENGINE *ENGINE_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(ENGINE))); } int ENGINE_free(ENGINE *engine) { // Methods are currently required to be static so are not unref'ed. OPENSSL_free(engine); return 1; } // set_method takes a pointer to a method and its given size and sets // |*out_member| to point to it. This function might want to be extended in the // future to support making a copy of the method so that a stable ABI for // ENGINEs can be supported. But, for the moment, all *_METHODS must be // static. static int set_method(void **out_member, const void *method, size_t method_size, size_t compiled_size) { const struct openssl_method_common_st *common = reinterpret_cast(method); if (method_size != compiled_size || !common->is_static) { return 0; } *out_member = (void *)method; return 1; } int ENGINE_set_RSA_method(ENGINE *engine, const RSA_METHOD *method, size_t method_size) { return set_method((void **)&engine->rsa_method, method, method_size, sizeof(RSA_METHOD)); } RSA_METHOD *ENGINE_get_RSA_method(const ENGINE *engine) { return engine->rsa_method; } int ENGINE_set_ECDSA_method(ENGINE *engine, const ECDSA_METHOD *method, size_t method_size) { return set_method((void **)&engine->ecdsa_method, method, method_size, sizeof(ECDSA_METHOD)); } ECDSA_METHOD *ENGINE_get_ECDSA_method(const ENGINE *engine) { return engine->ecdsa_method; } void METHOD_ref(void *method_in) { assert(((struct openssl_method_common_st *)method_in)->is_static); } void METHOD_unref(void *method_in) { struct openssl_method_common_st *method = reinterpret_cast(method_in); if (method == NULL) { return; } assert(method->is_static); } OPENSSL_DECLARE_ERROR_REASON(ENGINE, OPERATION_NOT_SUPPORTED) ================================================ FILE: Sources/CNIOBoringSSL/crypto/err/err.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ // Ensure we can't call OPENSSL_malloc circularly. #define _BORINGSSL_PROHIBIT_OPENSSL_MALLOC #include #include #include #include #include #include #include #if defined(OPENSSL_WINDOWS) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #include #include #include "../internal.h" #include "./internal.h" namespace { struct err_error_st { // file contains the filename where the error occurred. const char *file; // data contains a NUL-terminated string with optional data. It is allocated // with system |malloc| and must be freed with |free| (not |OPENSSL_free|) char *data; // packed contains the error library and reason, as packed by ERR_PACK. uint32_t packed; // line contains the line number where the error occurred. uint16_t line; // mark indicates a reversion point in the queue. See |ERR_pop_to_mark|. unsigned mark : 1; }; // ERR_STATE contains the per-thread, error queue. typedef struct err_state_st { // errors contains up to ERR_NUM_ERRORS - 1 most recent errors, organised as a // ring buffer. struct err_error_st errors[ERR_NUM_ERRORS]; // top contains the index of the most recent error. If |top| equals |bottom| // then the queue is empty. unsigned top; // bottom contains the index before the least recent error in the queue. unsigned bottom; // to_free, if not NULL, contains a pointer owned by this structure that was // previously a |data| pointer of one of the elements of |errors|. void *to_free; } ERR_STATE; } // namespace extern const uint32_t kOpenSSLReasonValues[]; extern const size_t kOpenSSLReasonValuesLen; extern const char kOpenSSLReasonStringData[]; static char *strdup_libc_malloc(const char *str) { // |strdup| is not in C until C23, so MSVC triggers deprecation warnings, and // glibc and musl gate it on a feature macro. Reimplementing it is easier. size_t len = strlen(str); char *ret = reinterpret_cast(malloc(len + 1)); if (ret != NULL) { memcpy(ret, str, len + 1); } return ret; } // err_clear clears the given queued error. static void err_clear(struct err_error_st *error) { free(error->data); OPENSSL_memset(error, 0, sizeof(struct err_error_st)); } static void err_copy(struct err_error_st *dst, const struct err_error_st *src) { err_clear(dst); dst->file = src->file; if (src->data != NULL) { // We can't use OPENSSL_strdup because we don't want to call OPENSSL_malloc, // which can affect the error stack. dst->data = strdup_libc_malloc(src->data); } dst->packed = src->packed; dst->line = src->line; } // global_next_library contains the next custom library value to return. static int global_next_library = ERR_NUM_LIBS; // global_next_library_mutex protects |global_next_library| from concurrent // updates. static CRYPTO_MUTEX global_next_library_mutex = CRYPTO_MUTEX_INIT; static void err_state_free(void *statep) { ERR_STATE *state = reinterpret_cast(statep); if (state == NULL) { return; } for (unsigned i = 0; i < ERR_NUM_ERRORS; i++) { err_clear(&state->errors[i]); } free(state->to_free); free(state); } // err_get_state gets the ERR_STATE object for the current thread. static ERR_STATE *err_get_state(void) { ERR_STATE *state = reinterpret_cast( CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_ERR)); if (state == NULL) { state = reinterpret_cast(malloc(sizeof(ERR_STATE))); if (state == NULL) { return NULL; } OPENSSL_memset(state, 0, sizeof(ERR_STATE)); if (!CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_ERR, state, err_state_free)) { return NULL; } } return state; } static uint32_t get_error_values(int inc, int top, const char **file, int *line, const char **data, int *flags) { unsigned i = 0; ERR_STATE *state; struct err_error_st *error; uint32_t ret; state = err_get_state(); if (state == NULL || state->bottom == state->top) { return 0; } if (top) { assert(!inc); // last error i = state->top; } else { i = (state->bottom + 1) % ERR_NUM_ERRORS; } error = &state->errors[i]; ret = error->packed; if (file != NULL && line != NULL) { if (error->file == NULL) { *file = "NA"; *line = 0; } else { *file = error->file; *line = error->line; } } if (data != NULL) { if (error->data == NULL) { *data = ""; if (flags != NULL) { *flags = 0; } } else { *data = error->data; if (flags != NULL) { // Without |ERR_FLAG_MALLOCED|, rust-openssl assumes the string has a // static lifetime. In both cases, we retain ownership of the string, // and the caller is not expected to free it. *flags = ERR_FLAG_STRING | ERR_FLAG_MALLOCED; } // If this error is being removed, take ownership of data from // the error. The semantics are such that the caller doesn't // take ownership either. Instead the error system takes // ownership and retains it until the next call that affects the // error queue. if (inc) { if (error->data != NULL) { free(state->to_free); state->to_free = error->data; } error->data = NULL; } } } if (inc) { assert(!top); err_clear(error); state->bottom = i; } return ret; } uint32_t ERR_get_error(void) { return get_error_values(1 /* inc */, 0 /* bottom */, NULL, NULL, NULL, NULL); } uint32_t ERR_get_error_line(const char **file, int *line) { return get_error_values(1 /* inc */, 0 /* bottom */, file, line, NULL, NULL); } uint32_t ERR_get_error_line_data(const char **file, int *line, const char **data, int *flags) { return get_error_values(1 /* inc */, 0 /* bottom */, file, line, data, flags); } uint32_t ERR_peek_error(void) { return get_error_values(0 /* peek */, 0 /* bottom */, NULL, NULL, NULL, NULL); } uint32_t ERR_peek_error_line(const char **file, int *line) { return get_error_values(0 /* peek */, 0 /* bottom */, file, line, NULL, NULL); } uint32_t ERR_peek_error_line_data(const char **file, int *line, const char **data, int *flags) { return get_error_values(0 /* peek */, 0 /* bottom */, file, line, data, flags); } uint32_t ERR_peek_last_error(void) { return get_error_values(0 /* peek */, 1 /* top */, NULL, NULL, NULL, NULL); } uint32_t ERR_peek_last_error_line(const char **file, int *line) { return get_error_values(0 /* peek */, 1 /* top */, file, line, NULL, NULL); } uint32_t ERR_peek_last_error_line_data(const char **file, int *line, const char **data, int *flags) { return get_error_values(0 /* peek */, 1 /* top */, file, line, data, flags); } void ERR_clear_error(void) { ERR_STATE *const state = err_get_state(); unsigned i; if (state == NULL) { return; } for (i = 0; i < ERR_NUM_ERRORS; i++) { err_clear(&state->errors[i]); } free(state->to_free); state->to_free = NULL; state->top = state->bottom = 0; } void ERR_remove_thread_state(const CRYPTO_THREADID *tid) { if (tid != NULL) { assert(0); return; } ERR_clear_error(); } int ERR_get_next_error_library(void) { int ret; CRYPTO_MUTEX_lock_write(&global_next_library_mutex); ret = global_next_library++; CRYPTO_MUTEX_unlock_write(&global_next_library_mutex); return ret; } void ERR_remove_state(unsigned long pid) { ERR_clear_error(); } void ERR_clear_system_error(void) { errno = 0; } // err_string_cmp is a compare function for searching error values with // |bsearch| in |err_string_lookup|. static int err_string_cmp(const void *a, const void *b) { const uint32_t a_key = *((const uint32_t *)a) >> 15; const uint32_t b_key = *((const uint32_t *)b) >> 15; if (a_key < b_key) { return -1; } else if (a_key > b_key) { return 1; } else { return 0; } } // err_string_lookup looks up the string associated with |lib| and |key| in // |values| and |string_data|. It returns the string or NULL if not found. static const char *err_string_lookup(uint32_t lib, uint32_t key, const uint32_t *values, size_t num_values, const char *string_data) { // |values| points to data in err_data.h, which is generated by // err_data_generate.go. It's an array of uint32_t values. Each value has the // following structure: // | lib | key | offset | // |6 bits| 11 bits | 15 bits | // // The |lib| value is a library identifier: one of the |ERR_LIB_*| values. // The |key| is a reason code, depending on the context. // The |offset| is the number of bytes from the start of |string_data| where // the (NUL terminated) string for this value can be found. // // Values are sorted based on treating the |lib| and |key| part as an // unsigned integer. if (lib >= (1 << 6) || key >= (1 << 11)) { return NULL; } uint32_t search_key = lib << 26 | key << 15; const uint32_t *result = reinterpret_cast(bsearch( &search_key, values, num_values, sizeof(uint32_t), err_string_cmp)); if (result == NULL) { return NULL; } return &string_data[(*result) & 0x7fff]; } namespace { typedef struct library_name_st { const char *str; const char *symbol; const char *reason_symbol; } LIBRARY_NAME; } // namespace static const LIBRARY_NAME kLibraryNames[ERR_NUM_LIBS] = { {"invalid library (0)", NULL, NULL}, {"unknown library", "NONE", "NONE_LIB"}, {"system library", "SYS", "SYS_LIB"}, {"bignum routines", "BN", "BN_LIB"}, {"RSA routines", "RSA", "RSA_LIB"}, {"Diffie-Hellman routines", "DH", "DH_LIB"}, {"public key routines", "EVP", "EVP_LIB"}, {"memory buffer routines", "BUF", "BUF_LIB"}, {"object identifier routines", "OBJ", "OBJ_LIB"}, {"PEM routines", "PEM", "PEM_LIB"}, {"DSA routines", "DSA", "DSA_LIB"}, {"X.509 certificate routines", "X509", "X509_LIB"}, {"ASN.1 encoding routines", "ASN1", "ASN1_LIB"}, {"configuration file routines", "CONF", "CONF_LIB"}, {"common libcrypto routines", "CRYPTO", "CRYPTO_LIB"}, {"elliptic curve routines", "EC", "EC_LIB"}, {"SSL routines", "SSL", "SSL_LIB"}, {"BIO routines", "BIO", "BIO_LIB"}, {"PKCS7 routines", "PKCS7", "PKCS7_LIB"}, {"PKCS8 routines", "PKCS8", "PKCS8_LIB"}, {"X509 V3 routines", "X509V3", "X509V3_LIB"}, {"random number generator", "RAND", "RAND_LIB"}, {"ENGINE routines", "ENGINE", "ENGINE_LIB"}, {"OCSP routines", "OCSP", "OCSP_LIB"}, {"UI routines", "UI", "UI_LIB"}, {"COMP routines", "COMP", "COMP_LIB"}, {"ECDSA routines", "ECDSA", "ECDSA_LIB"}, {"ECDH routines", "ECDH", "ECDH_LIB"}, {"HMAC routines", "HMAC", "HMAC_LIB"}, {"Digest functions", "DIGEST", "DIGEST_LIB"}, {"Cipher functions", "CIPHER", "CIPHER_LIB"}, {"HKDF functions", "HKDF", "HKDF_LIB"}, {"Trust Token functions", "TRUST_TOKEN", "TRUST_TOKEN_LIB"}, {"User defined functions", "USER", "USER_LIB"}, }; static const char *err_lib_error_string(uint32_t packed_error) { const uint32_t lib = ERR_GET_LIB(packed_error); return lib >= ERR_NUM_LIBS ? NULL : kLibraryNames[lib].str; } const char *ERR_lib_error_string(uint32_t packed_error) { const char *ret = err_lib_error_string(packed_error); return ret == NULL ? "unknown library" : ret; } const char *ERR_lib_symbol_name(uint32_t packed_error) { const uint32_t lib = ERR_GET_LIB(packed_error); return lib >= ERR_NUM_LIBS ? NULL : kLibraryNames[lib].symbol; } const char *ERR_func_error_string(uint32_t packed_error) { return "OPENSSL_internal"; } static const char *err_reason_error_string(uint32_t packed_error, int symbol) { const uint32_t lib = ERR_GET_LIB(packed_error); const uint32_t reason = ERR_GET_REASON(packed_error); if (lib == ERR_LIB_SYS) { if (!symbol && reason < 127) { return strerror(reason); } return NULL; } if (reason < ERR_NUM_LIBS) { return symbol ? kLibraryNames[reason].reason_symbol : kLibraryNames[reason].str; } if (reason < 100) { // TODO(davidben): All our other reason strings match the symbol name. Only // the common ones differ. Should we just consistently return the symbol // name? switch (reason) { case ERR_R_MALLOC_FAILURE: return symbol ? "MALLOC_FAILURE" : "malloc failure"; case ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED: return symbol ? "SHOULD_NOT_HAVE_BEEN_CALLED" : "function should not have been called"; case ERR_R_PASSED_NULL_PARAMETER: return symbol ? "PASSED_NULL_PARAMETER" : "passed a null parameter"; case ERR_R_INTERNAL_ERROR: return symbol ? "INTERNAL_ERROR" : "internal error"; case ERR_R_OVERFLOW: return symbol ? "OVERFLOW" : "overflow"; default: return NULL; } } // Unlike OpenSSL, BoringSSL's reason strings already match symbol name, so we // do not need to check |symbol|. return err_string_lookup(lib, reason, kOpenSSLReasonValues, kOpenSSLReasonValuesLen, kOpenSSLReasonStringData); } const char *ERR_reason_error_string(uint32_t packed_error) { const char *ret = err_reason_error_string(packed_error, /*symbol=*/0); return ret == NULL ? "unknown error" : ret; } const char *ERR_reason_symbol_name(uint32_t packed_error) { return err_reason_error_string(packed_error, /*symbol=*/1); } char *ERR_error_string(uint32_t packed_error, char *ret) { static char buf[ERR_ERROR_STRING_BUF_LEN]; if (ret == NULL) { // TODO(fork): remove this. ret = buf; } #if !defined(NDEBUG) // This is aimed to help catch callers who don't provide // |ERR_ERROR_STRING_BUF_LEN| bytes of space. OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN); #endif return ERR_error_string_n(packed_error, ret, ERR_ERROR_STRING_BUF_LEN); } char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { if (len == 0) { return NULL; } unsigned lib = ERR_GET_LIB(packed_error); unsigned reason = ERR_GET_REASON(packed_error); const char *lib_str = err_lib_error_string(packed_error); const char *reason_str = err_reason_error_string(packed_error, /*symbol=*/0); char lib_buf[32], reason_buf[32]; if (lib_str == NULL) { snprintf(lib_buf, sizeof(lib_buf), "lib(%u)", lib); lib_str = lib_buf; } if (reason_str == NULL) { snprintf(reason_buf, sizeof(reason_buf), "reason(%u)", reason); reason_str = reason_buf; } int ret = snprintf(buf, len, "error:%08" PRIx32 ":%s:OPENSSL_internal:%s", packed_error, lib_str, reason_str); if (ret >= 0 && (size_t)ret >= len) { // The output was truncated; make sure we always have 5 colon-separated // fields, i.e. 4 colons. static const unsigned num_colons = 4; unsigned i; char *s = buf; if (len <= num_colons) { // In this situation it's not possible to ensure that the correct number // of colons are included in the output. return buf; } for (i = 0; i < num_colons; i++) { char *colon = strchr(s, ':'); char *last_pos = &buf[len - 1] - num_colons + i; if (colon == NULL || colon > last_pos) { // set colon |i| at last possible position (buf[len-1] is the // terminating 0). If we're setting this colon, then all whole of the // rest of the string must be colons in order to have the correct // number. OPENSSL_memset(last_pos, ':', num_colons - i); break; } s = colon + 1; } } return buf; } void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) { char buf[ERR_ERROR_STRING_BUF_LEN]; char buf2[1024]; const char *file, *data; int line, flags; uint32_t packed_error; // thread_hash is the least-significant bits of the |ERR_STATE| pointer value // for this thread. const unsigned long thread_hash = (uintptr_t)err_get_state(); for (;;) { packed_error = ERR_get_error_line_data(&file, &line, &data, &flags); if (packed_error == 0) { break; } ERR_error_string_n(packed_error, buf, sizeof(buf)); snprintf(buf2, sizeof(buf2), "%lu:%s:%s:%d:%s\n", thread_hash, buf, file, line, (flags & ERR_FLAG_STRING) ? data : ""); if (callback(buf2, strlen(buf2), ctx) <= 0) { break; } } } static int print_errors_to_file(const char *msg, size_t msg_len, void *ctx) { assert(msg[msg_len] == '\0'); FILE *fp = reinterpret_cast(ctx); int res = fputs(msg, fp); return res < 0 ? 0 : 1; } void ERR_print_errors_fp(FILE *file) { ERR_print_errors_cb(print_errors_to_file, file); } // err_set_error_data sets the data on the most recent error. static void err_set_error_data(char *data) { ERR_STATE *const state = err_get_state(); struct err_error_st *error; if (state == NULL || state->top == state->bottom) { free(data); return; } error = &state->errors[state->top]; free(error->data); error->data = data; } void ERR_put_error(int library, int unused, int reason, const char *file, unsigned line) { ERR_STATE *const state = err_get_state(); struct err_error_st *error; if (state == NULL) { return; } if (library == ERR_LIB_SYS && reason == 0) { #if defined(OPENSSL_WINDOWS) reason = GetLastError(); #else reason = errno; #endif } state->top = (state->top + 1) % ERR_NUM_ERRORS; if (state->top == state->bottom) { state->bottom = (state->bottom + 1) % ERR_NUM_ERRORS; } error = &state->errors[state->top]; err_clear(error); error->file = file; error->line = line; error->packed = ERR_PACK(library, reason); } // ERR_add_error_data_vdata takes a variable number of const char* pointers, // concatenates them and sets the result as the data on the most recent // error. static void err_add_error_vdata(unsigned num, va_list args) { size_t total_size = 0; const char *substr; char *buf; va_list args_copy; va_copy(args_copy, args); for (size_t i = 0; i < num; i++) { substr = va_arg(args_copy, const char *); if (substr == NULL) { continue; } size_t substr_len = strlen(substr); if (SIZE_MAX - total_size < substr_len) { return; // Would overflow. } total_size += substr_len; } va_end(args_copy); if (total_size == SIZE_MAX) { return; // Would overflow. } total_size += 1; // NUL terminator. if ((buf = reinterpret_cast(malloc(total_size))) == NULL) { return; } buf[0] = '\0'; for (size_t i = 0; i < num; i++) { substr = va_arg(args, const char *); if (substr == NULL) { continue; } if (OPENSSL_strlcat(buf, substr, total_size) >= total_size) { assert(0); // should not be possible. } } err_set_error_data(buf); } void ERR_add_error_data(unsigned count, ...) { va_list args; va_start(args, count); err_add_error_vdata(count, args); va_end(args); } void ERR_add_error_dataf(const char *format, ...) { char *buf = NULL; va_list ap; va_start(ap, format); if (OPENSSL_vasprintf_internal(&buf, format, ap, /*system_malloc=*/1) == -1) { return; } va_end(ap); err_set_error_data(buf); } void ERR_set_error_data(char *data, int flags) { if (!(flags & ERR_FLAG_STRING)) { // We do not support non-string error data. assert(0); return; } // We can not use OPENSSL_strdup because we don't want to call OPENSSL_malloc, // which can affect the error stack. char *copy = strdup_libc_malloc(data); if (copy != NULL) { err_set_error_data(copy); } if (flags & ERR_FLAG_MALLOCED) { // We can not take ownership of |data| directly because it is allocated with // |OPENSSL_malloc| and we will free it with system |free| later. OPENSSL_free(data); } } int ERR_set_mark(void) { ERR_STATE *const state = err_get_state(); if (state == NULL || state->bottom == state->top) { return 0; } state->errors[state->top].mark = 1; return 1; } int ERR_pop_to_mark(void) { ERR_STATE *const state = err_get_state(); if (state == NULL) { return 0; } while (state->bottom != state->top) { struct err_error_st *error = &state->errors[state->top]; if (error->mark) { error->mark = 0; return 1; } err_clear(error); if (state->top == 0) { state->top = ERR_NUM_ERRORS - 1; } else { state->top--; } } return 0; } void ERR_load_crypto_strings(void) {} void ERR_free_strings(void) {} void ERR_load_BIO_strings(void) {} void ERR_load_ERR_strings(void) {} void ERR_load_RAND_strings(void) {} struct err_save_state_st { struct err_error_st *errors; size_t num_errors; }; void ERR_SAVE_STATE_free(ERR_SAVE_STATE *state) { if (state == NULL) { return; } for (size_t i = 0; i < state->num_errors; i++) { err_clear(&state->errors[i]); } free(state->errors); free(state); } ERR_SAVE_STATE *ERR_save_state(void) { ERR_STATE *const state = err_get_state(); if (state == NULL || state->top == state->bottom) { return NULL; } ERR_SAVE_STATE *ret = reinterpret_cast(malloc(sizeof(ERR_SAVE_STATE))); if (ret == NULL) { return NULL; } // Errors are stored in the range (bottom, top]. size_t num_errors = state->top >= state->bottom ? state->top - state->bottom : ERR_NUM_ERRORS + state->top - state->bottom; assert(num_errors < ERR_NUM_ERRORS); ret->errors = reinterpret_cast( malloc(num_errors * sizeof(struct err_error_st))); if (ret->errors == NULL) { free(ret); return NULL; } OPENSSL_memset(ret->errors, 0, num_errors * sizeof(struct err_error_st)); ret->num_errors = num_errors; for (size_t i = 0; i < num_errors; i++) { size_t j = (state->bottom + i + 1) % ERR_NUM_ERRORS; err_copy(&ret->errors[i], &state->errors[j]); } return ret; } void ERR_restore_state(const ERR_SAVE_STATE *state) { if (state == NULL || state->num_errors == 0) { ERR_clear_error(); return; } if (state->num_errors >= ERR_NUM_ERRORS) { abort(); } ERR_STATE *const dst = err_get_state(); if (dst == NULL) { return; } for (size_t i = 0; i < state->num_errors; i++) { err_copy(&dst->errors[i], &state->errors[i]); } dst->top = (unsigned)(state->num_errors - 1); dst->bottom = ERR_NUM_ERRORS - 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/err/internal.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif // Private error queue functions. // ERR_SAVE_STATE contains a saved representation of the error queue. It is // slightly more compact than |ERR_STATE| as the error queue will typically not // contain |ERR_NUM_ERRORS| entries. typedef struct err_save_state_st ERR_SAVE_STATE; // ERR_SAVE_STATE_free releases all memory associated with |state|. OPENSSL_EXPORT void ERR_SAVE_STATE_free(ERR_SAVE_STATE *state); // ERR_save_state returns a newly-allocated |ERR_SAVE_STATE| structure // containing the current state of the error queue or NULL on allocation // error. It should be released with |ERR_SAVE_STATE_free|. OPENSSL_EXPORT ERR_SAVE_STATE *ERR_save_state(void); // ERR_restore_state clears the error queue and replaces it with |state|. OPENSSL_EXPORT void ERR_restore_state(const ERR_SAVE_STATE *state); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(ERR_SAVE_STATE, ERR_SAVE_STATE_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/evp.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" // Node depends on |EVP_R_NOT_XOF_OR_INVALID_LENGTH|. // // TODO(davidben): Fix Node to not touch the error queue itself and remove this. OPENSSL_DECLARE_ERROR_REASON(EVP, NOT_XOF_OR_INVALID_LENGTH) // The HPKE module uses the EVP error namespace, but it lives in another // directory. OPENSSL_DECLARE_ERROR_REASON(EVP, EMPTY_PSK) EVP_PKEY *EVP_PKEY_new(void) { EVP_PKEY *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(EVP_PKEY))); if (ret == NULL) { return NULL; } ret->type = EVP_PKEY_NONE; ret->references = 1; return ret; } static void free_it(EVP_PKEY *pkey) { if (pkey->ameth && pkey->ameth->pkey_free) { pkey->ameth->pkey_free(pkey); pkey->pkey = NULL; pkey->type = EVP_PKEY_NONE; } } void EVP_PKEY_free(EVP_PKEY *pkey) { if (pkey == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&pkey->references)) { return; } free_it(pkey); OPENSSL_free(pkey); } int EVP_PKEY_up_ref(EVP_PKEY *pkey) { CRYPTO_refcount_inc(&pkey->references); return 1; } int EVP_PKEY_is_opaque(const EVP_PKEY *pkey) { if (pkey->ameth && pkey->ameth->pkey_opaque) { return pkey->ameth->pkey_opaque(pkey); } return 0; } int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (a->type != b->type) { return -1; } if (a->ameth) { int ret; // Compare parameters if the algorithm has them if (a->ameth->param_cmp) { ret = a->ameth->param_cmp(a, b); if (ret <= 0) { return ret; } } if (a->ameth->pub_cmp) { return a->ameth->pub_cmp(a, b); } } return -2; } int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from) { if (to->type == EVP_PKEY_NONE) { evp_pkey_set_method(to, from->ameth); } else if (to->type != from->type) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_KEY_TYPES); return 0; } if (EVP_PKEY_missing_parameters(from)) { OPENSSL_PUT_ERROR(EVP, EVP_R_MISSING_PARAMETERS); return 0; } // Once set, parameters may not change. if (!EVP_PKEY_missing_parameters(to)) { if (EVP_PKEY_cmp_parameters(to, from) == 1) { return 1; } OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_PARAMETERS); return 0; } if (from->ameth && from->ameth->param_copy) { return from->ameth->param_copy(to, from); } // TODO(https://crbug.com/boringssl/536): If the algorithm takes no // parameters, copying them should vacuously succeed. return 0; } int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey) { if (pkey->ameth && pkey->ameth->param_missing) { return pkey->ameth->param_missing(pkey); } return 0; } int EVP_PKEY_size(const EVP_PKEY *pkey) { if (pkey && pkey->ameth && pkey->ameth->pkey_size) { return pkey->ameth->pkey_size(pkey); } return 0; } int EVP_PKEY_bits(const EVP_PKEY *pkey) { if (pkey && pkey->ameth && pkey->ameth->pkey_bits) { return pkey->ameth->pkey_bits(pkey); } return 0; } int EVP_PKEY_id(const EVP_PKEY *pkey) { return pkey->type; } // evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which // should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is // unknown. static const EVP_PKEY_ASN1_METHOD *evp_pkey_asn1_find(int nid) { switch (nid) { case EVP_PKEY_RSA: return &rsa_asn1_meth; case EVP_PKEY_EC: return &ec_asn1_meth; case EVP_PKEY_DSA: return &dsa_asn1_meth; case EVP_PKEY_ED25519: return &ed25519_asn1_meth; case EVP_PKEY_X25519: return &x25519_asn1_meth; default: return NULL; } } void evp_pkey_set_method(EVP_PKEY *pkey, const EVP_PKEY_ASN1_METHOD *method) { free_it(pkey); pkey->ameth = method; pkey->type = pkey->ameth->pkey_id; } int EVP_PKEY_type(int nid) { // In OpenSSL, this was used to map between type aliases. BoringSSL supports // no type aliases, so this function is just the identity. return nid; } int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key) { // This function can only be used to assign RSA, DSA, EC, and DH keys. Other // key types have internal representations which are not exposed through the // public API. switch (type) { case EVP_PKEY_RSA: return EVP_PKEY_assign_RSA(pkey, reinterpret_cast(key)); case EVP_PKEY_DSA: return EVP_PKEY_assign_DSA(pkey, reinterpret_cast(key)); case EVP_PKEY_EC: return EVP_PKEY_assign_EC_KEY(pkey, reinterpret_cast(key)); case EVP_PKEY_DH: return EVP_PKEY_assign_DH(pkey, reinterpret_cast(key)); } OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); ERR_add_error_dataf("algorithm %d", type); return 0; } int EVP_PKEY_set_type(EVP_PKEY *pkey, int type) { if (pkey && pkey->pkey) { // This isn't strictly necessary, but historically |EVP_PKEY_set_type| would // clear |pkey| even if |evp_pkey_asn1_find| failed, so we preserve that // behavior. free_it(pkey); } const EVP_PKEY_ASN1_METHOD *ameth = evp_pkey_asn1_find(type); if (ameth == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); ERR_add_error_dataf("algorithm %d", type); return 0; } if (pkey) { evp_pkey_set_method(pkey, ameth); } return 1; } EVP_PKEY *EVP_PKEY_new_raw_private_key(int type, ENGINE *unused, const uint8_t *in, size_t len) { // To avoid pulling in all key types, look for specifically the key types that // support |set_priv_raw|. const EVP_PKEY_ASN1_METHOD *method; switch (type) { case EVP_PKEY_X25519: method = &x25519_asn1_meth; break; case EVP_PKEY_ED25519: method = &ed25519_asn1_meth; break; default: OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } evp_pkey_set_method(ret.get(), method); if (!ret->ameth->set_priv_raw(ret.get(), in, len)) { return nullptr; } return ret.release(); } EVP_PKEY *EVP_PKEY_new_raw_public_key(int type, ENGINE *unused, const uint8_t *in, size_t len) { // To avoid pulling in all key types, look for specifically the key types that // support |set_pub_raw|. const EVP_PKEY_ASN1_METHOD *method; switch (type) { case EVP_PKEY_X25519: method = &x25519_asn1_meth; break; case EVP_PKEY_ED25519: method = &ed25519_asn1_meth; break; default: OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } evp_pkey_set_method(ret.get(), method); if (!ret->ameth->set_pub_raw(ret.get(), in, len)) { return nullptr; } return ret.release(); } int EVP_PKEY_get_raw_private_key(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { if (pkey->ameth->get_priv_raw == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return pkey->ameth->get_priv_raw(pkey, out, out_len); } int EVP_PKEY_get_raw_public_key(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { if (pkey->ameth->get_pub_raw == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return pkey->ameth->get_pub_raw(pkey, out, out_len); } int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) { if (a->type != b->type) { return -1; } if (a->ameth && a->ameth->param_cmp) { return a->ameth->param_cmp(a, b); } // TODO(https://crbug.com/boringssl/536): If the algorithm doesn't use // parameters, they should compare as vacuously equal. return -2; } int EVP_PKEY_CTX_set_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_TYPE_SIG, EVP_PKEY_CTRL_MD, 0, (void *)md); } int EVP_PKEY_CTX_get_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md) { return EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_TYPE_SIG, EVP_PKEY_CTRL_GET_MD, 0, (void *)out_md); } void *EVP_PKEY_get0(const EVP_PKEY *pkey) { // Node references, but never calls this function, so for now we return NULL. // If other projects require complete support, call |EVP_PKEY_get0_RSA|, etc., // rather than reading |pkey->pkey| directly. This avoids problems if our // internal representation does not match the type the caller expects from // OpenSSL. return NULL; } void OpenSSL_add_all_algorithms(void) {} void OPENSSL_add_all_algorithms_conf(void) {} void OpenSSL_add_all_ciphers(void) {} void OpenSSL_add_all_digests(void) {} void EVP_cleanup(void) {} int EVP_PKEY_set1_tls_encodedpoint(EVP_PKEY *pkey, const uint8_t *in, size_t len) { if (pkey->ameth->set1_tls_encodedpoint == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return pkey->ameth->set1_tls_encodedpoint(pkey, in, len); } size_t EVP_PKEY_get1_tls_encodedpoint(const EVP_PKEY *pkey, uint8_t **out_ptr) { if (pkey->ameth->get1_tls_encodedpoint == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return pkey->ameth->get1_tls_encodedpoint(pkey, out_ptr); } int EVP_PKEY_base_id(const EVP_PKEY *pkey) { // OpenSSL has two notions of key type because it supports multiple OIDs for // the same algorithm: NID_rsa vs NID_rsaEncryption and five distinct spelling // of DSA. We do not support these, so the base ID is simply the ID. return EVP_PKEY_id(pkey); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/evp_asn1.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "internal.h" #include "../bytestring/internal.h" #include "../internal.h" // We intentionally omit |dh_asn1_meth| from this list. It is not serializable. static const EVP_PKEY_ASN1_METHOD *const kASN1Methods[] = { &rsa_asn1_meth, &ec_asn1_meth, &dsa_asn1_meth, &ed25519_asn1_meth, &x25519_asn1_meth, }; static const EVP_PKEY_ASN1_METHOD *parse_key_type(CBS *cbs) { CBS oid; if (!CBS_get_asn1(cbs, &oid, CBS_ASN1_OBJECT)) { return NULL; } for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kASN1Methods); i++) { const EVP_PKEY_ASN1_METHOD *method = kASN1Methods[i]; if (CBS_len(&oid) == method->oid_len && OPENSSL_memcmp(CBS_data(&oid), method->oid, method->oid_len) == 0) { return method; } } return NULL; } EVP_PKEY *EVP_parse_public_key(CBS *cbs) { // Parse the SubjectPublicKeyInfo. CBS spki, algorithm, key; uint8_t padding; if (!CBS_get_asn1(cbs, &spki, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&spki, &key, CBS_ASN1_BITSTRING) || CBS_len(&spki) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return nullptr; } const EVP_PKEY_ASN1_METHOD *method = parse_key_type(&algorithm); if (method == nullptr) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } if (// Every key type defined encodes the key as a byte string with the same // conversion to BIT STRING. !CBS_get_u8(&key, &padding) || padding != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return nullptr; } // Set up an |EVP_PKEY| of the appropriate type. bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } evp_pkey_set_method(ret.get(), method); // Call into the type-specific SPKI decoding function. if (ret->ameth->pub_decode == nullptr) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } if (!ret->ameth->pub_decode(ret.get(), &algorithm, &key)) { return nullptr; } return ret.release(); } int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key) { if (key->ameth == NULL || key->ameth->pub_encode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return 0; } return key->ameth->pub_encode(cbb, key); } EVP_PKEY *EVP_parse_private_key(CBS *cbs) { // Parse the PrivateKeyInfo. CBS pkcs8, algorithm, key; uint64_t version; if (!CBS_get_asn1(cbs, &pkcs8, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&pkcs8, &version) || version != 0 || !CBS_get_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&pkcs8, &key, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return nullptr; } const EVP_PKEY_ASN1_METHOD *method = parse_key_type(&algorithm); if (method == nullptr) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } // A PrivateKeyInfo ends with a SET of Attributes which we ignore. // Set up an |EVP_PKEY| of the appropriate type. bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } evp_pkey_set_method(ret.get(), method); // Call into the type-specific PrivateKeyInfo decoding function. if (ret->ameth->priv_decode == nullptr) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return nullptr; } if (!ret->ameth->priv_decode(ret.get(), &algorithm, &key)) { return nullptr; } return ret.release(); } int EVP_marshal_private_key(CBB *cbb, const EVP_PKEY *key) { if (key->ameth == NULL || key->ameth->priv_encode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); return 0; } return key->ameth->priv_encode(cbb, key); } static bssl::UniquePtr old_priv_decode(CBS *cbs, int type) { bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } switch (type) { case EVP_PKEY_EC: { bssl::UniquePtr ec_key(EC_KEY_parse_private_key(cbs, nullptr)); if (ec_key == nullptr) { return nullptr; } EVP_PKEY_assign_EC_KEY(ret.get(), ec_key.release()); return ret; } case EVP_PKEY_DSA: { bssl::UniquePtr dsa(DSA_parse_private_key(cbs)); if (dsa == nullptr) { return nullptr; } EVP_PKEY_assign_DSA(ret.get(), dsa.release()); return ret; } case EVP_PKEY_RSA: { bssl::UniquePtr rsa(RSA_parse_private_key(cbs)); if (rsa == nullptr) { return nullptr; } EVP_PKEY_assign_RSA(ret.get(), rsa.release()); return ret; } default: OPENSSL_PUT_ERROR(EVP, EVP_R_UNKNOWN_PUBLIC_KEY_TYPE); return nullptr; } } EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, long len) { if (len < 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return nullptr; } // Parse with the legacy format. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); bssl::UniquePtr ret = old_priv_decode(&cbs, type); if (ret == nullptr) { // Try again with PKCS#8. ERR_clear_error(); CBS_init(&cbs, *inp, (size_t)len); ret.reset(EVP_parse_private_key(&cbs)); if (ret == nullptr) { return nullptr; } if (ret->type != type) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_KEY_TYPES); return nullptr; } } if (out != nullptr) { EVP_PKEY_free(*out); *out = ret.get(); } *inp = CBS_data(&cbs); return ret.release(); } // num_elements parses one SEQUENCE from |in| and returns the number of elements // in it. On parse error, it returns zero. static size_t num_elements(const uint8_t *in, size_t in_len) { CBS cbs, sequence; CBS_init(&cbs, in, (size_t)in_len); if (!CBS_get_asn1(&cbs, &sequence, CBS_ASN1_SEQUENCE)) { return 0; } size_t count = 0; while (CBS_len(&sequence) > 0) { if (!CBS_get_any_asn1_element(&sequence, NULL, NULL, NULL)) { return 0; } count++; } return count; } EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) { if (len < 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return NULL; } // Parse the input as a PKCS#8 PrivateKeyInfo. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *ret = EVP_parse_private_key(&cbs); if (ret != NULL) { if (out != NULL) { EVP_PKEY_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } ERR_clear_error(); // Count the elements to determine the legacy key format. switch (num_elements(*inp, (size_t)len)) { case 4: return d2i_PrivateKey(EVP_PKEY_EC, out, inp, len); case 6: return d2i_PrivateKey(EVP_PKEY_DSA, out, inp, len); default: return d2i_PrivateKey(EVP_PKEY_RSA, out, inp, len); } } int i2d_PublicKey(const EVP_PKEY *key, uint8_t **outp) { switch (key->type) { case EVP_PKEY_RSA: return i2d_RSAPublicKey(EVP_PKEY_get0_RSA(key), outp); case EVP_PKEY_DSA: return i2d_DSAPublicKey(EVP_PKEY_get0_DSA(key), outp); case EVP_PKEY_EC: return i2o_ECPublicKey(EVP_PKEY_get0_EC_KEY(key), outp); default: OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_PUBLIC_KEY_TYPE); return -1; } } EVP_PKEY *d2i_PublicKey(int type, EVP_PKEY **out, const uint8_t **inp, long len) { bssl::UniquePtr ret(EVP_PKEY_new()); if (ret == nullptr) { return nullptr; } CBS cbs; CBS_init(&cbs, *inp, len < 0 ? 0 : (size_t)len); switch (type) { case EVP_PKEY_RSA: { bssl::UniquePtr rsa(RSA_parse_public_key(&cbs)); if (rsa == nullptr) { return nullptr; } EVP_PKEY_assign_RSA(ret.get(), rsa.release()); break; } // Unlike OpenSSL, we do not support EC keys with this API. The raw EC // public key serialization requires knowing the group. In OpenSSL, calling // this function with |EVP_PKEY_EC| and setting |out| to nullptr does not // work. It requires |*out| to include a partially-initialized |EVP_PKEY| to // extract the group. default: OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_PUBLIC_KEY_TYPE); return nullptr; } *inp = CBS_data(&cbs); if (out != nullptr) { EVP_PKEY_free(*out); *out = ret.get(); } return ret.release(); } EVP_PKEY *d2i_PUBKEY(EVP_PKEY **out, const uint8_t **inp, long len) { if (len < 0) { return nullptr; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); bssl::UniquePtr ret(EVP_parse_public_key(&cbs)); if (ret == nullptr) { return nullptr; } if (out != nullptr) { EVP_PKEY_free(*out); *out = ret.get(); } *inp = CBS_data(&cbs); return ret.release(); } int i2d_PUBKEY(const EVP_PKEY *pkey, uint8_t **outp) { if (pkey == NULL) { return 0; } CBB cbb; if (!CBB_init(&cbb, 128) || !EVP_marshal_public_key(&cbb, pkey)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } RSA *d2i_RSA_PUBKEY(RSA **out, const uint8_t **inp, long len) { if (len < 0) { return nullptr; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); bssl::UniquePtr pkey(EVP_parse_public_key(&cbs)); if (pkey == nullptr) { return nullptr; } bssl::UniquePtr rsa(EVP_PKEY_get1_RSA(pkey.get())); if (rsa == nullptr) { return nullptr; } if (out != nullptr) { RSA_free(*out); *out = rsa.get(); } *inp = CBS_data(&cbs); return rsa.release(); } int i2d_RSA_PUBKEY(const RSA *rsa, uint8_t **outp) { if (rsa == nullptr) { return 0; } bssl::UniquePtr pkey(EVP_PKEY_new()); if (pkey == nullptr || !EVP_PKEY_set1_RSA(pkey.get(), const_cast(rsa))) { return -1; } return i2d_PUBKEY(pkey.get(), outp); } DSA *d2i_DSA_PUBKEY(DSA **out, const uint8_t **inp, long len) { if (len < 0) { return nullptr; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); bssl::UniquePtr pkey(EVP_parse_public_key(&cbs)); if (pkey == nullptr) { return nullptr; } bssl::UniquePtr dsa(EVP_PKEY_get1_DSA(pkey.get())); if (dsa == nullptr) { return nullptr; } if (out != nullptr) { DSA_free(*out); *out = dsa.get(); } *inp = CBS_data(&cbs); return dsa.release(); } int i2d_DSA_PUBKEY(const DSA *dsa, uint8_t **outp) { if (dsa == nullptr) { return 0; } bssl::UniquePtr pkey(EVP_PKEY_new()); if (pkey == nullptr || !EVP_PKEY_set1_DSA(pkey.get(), const_cast(dsa))) { return -1; } return i2d_PUBKEY(pkey.get(), outp); } EC_KEY *d2i_EC_PUBKEY(EC_KEY **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *pkey = EVP_parse_public_key(&cbs); if (pkey == NULL) { return NULL; } EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(pkey); EVP_PKEY_free(pkey); if (ec_key == NULL) { return NULL; } if (out != NULL) { EC_KEY_free(*out); *out = ec_key; } *inp = CBS_data(&cbs); return ec_key; } int i2d_EC_PUBKEY(const EC_KEY *ec_key, uint8_t **outp) { if (ec_key == NULL) { return 0; } bssl::UniquePtr pkey(EVP_PKEY_new()); if (pkey == nullptr || !EVP_PKEY_set1_EC_KEY(pkey.get(), const_cast(ec_key))) { return -1; } return i2d_PUBKEY(pkey.get(), outp); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/evp_ctx.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" static const EVP_PKEY_METHOD *const evp_methods[] = { &rsa_pkey_meth, &ec_pkey_meth, &ed25519_pkey_meth, &x25519_pkey_meth, &hkdf_pkey_meth, }; static const EVP_PKEY_METHOD *evp_pkey_meth_find(int type) { for (size_t i = 0; i < sizeof(evp_methods) / sizeof(EVP_PKEY_METHOD *); i++) { if (evp_methods[i]->pkey_id == type) { return evp_methods[i]; } } return NULL; } static EVP_PKEY_CTX *evp_pkey_ctx_new(EVP_PKEY *pkey, ENGINE *e, const EVP_PKEY_METHOD *pmeth) { EVP_PKEY_CTX *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(EVP_PKEY_CTX))); if (!ret) { return NULL; } ret->engine = e; ret->pmeth = pmeth; ret->operation = EVP_PKEY_OP_UNDEFINED; if (pkey) { EVP_PKEY_up_ref(pkey); ret->pkey = pkey; } if (pmeth->init) { if (pmeth->init(ret) <= 0) { EVP_PKEY_free(ret->pkey); OPENSSL_free(ret); return NULL; } } return ret; } EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e) { if (pkey == NULL || pkey->ameth == NULL) { OPENSSL_PUT_ERROR(EVP, ERR_R_PASSED_NULL_PARAMETER); return NULL; } const EVP_PKEY_METHOD *pkey_method = pkey->ameth->pkey_method; if (pkey_method == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); ERR_add_error_dataf("algorithm %d", pkey->ameth->pkey_id); return NULL; } return evp_pkey_ctx_new(pkey, e, pkey_method); } EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e) { const EVP_PKEY_METHOD *pkey_method = evp_pkey_meth_find(id); if (pkey_method == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); ERR_add_error_dataf("algorithm %d", id); return NULL; } return evp_pkey_ctx_new(NULL, e, pkey_method); } void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx) { if (ctx == NULL) { return; } if (ctx->pmeth && ctx->pmeth->cleanup) { ctx->pmeth->cleanup(ctx); } EVP_PKEY_free(ctx->pkey); EVP_PKEY_free(ctx->peerkey); OPENSSL_free(ctx); } EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *ctx) { if (!ctx->pmeth || !ctx->pmeth->copy) { return NULL; } EVP_PKEY_CTX *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(EVP_PKEY_CTX))); if (!ret) { return NULL; } ret->pmeth = ctx->pmeth; ret->engine = ctx->engine; ret->operation = ctx->operation; if (ctx->pkey != NULL) { EVP_PKEY_up_ref(ctx->pkey); ret->pkey = ctx->pkey; } if (ctx->peerkey != NULL) { EVP_PKEY_up_ref(ctx->peerkey); ret->peerkey = ctx->peerkey; } if (ctx->pmeth->copy(ret, ctx) <= 0) { ret->pmeth = NULL; EVP_PKEY_CTX_free(ret); OPENSSL_PUT_ERROR(EVP, ERR_LIB_EVP); return NULL; } return ret; } EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx) { return ctx->pkey; } int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2) { if (!ctx || !ctx->pmeth || !ctx->pmeth->ctrl) { OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } if (keytype != -1 && ctx->pmeth->pkey_id != keytype) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation == EVP_PKEY_OP_UNDEFINED) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_OPERATION_SET); return 0; } if (optype != -1 && !(ctx->operation & optype)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_OPERATION); return 0; } return ctx->pmeth->ctrl(ctx, cmd, p1, p2); } int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx) { if (ctx == NULL || ctx->pmeth == NULL || (ctx->pmeth->sign == NULL && ctx->pmeth->sign_message == NULL)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_SIGN; return 1; } int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *sig_len, const uint8_t *digest, size_t digest_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->sign) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_SIGN) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->sign(ctx, sig, sig_len, digest, digest_len); } int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx) { if (ctx == NULL || ctx->pmeth == NULL || (ctx->pmeth->verify == NULL && ctx->pmeth->verify_message == NULL)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_VERIFY; return 1; } int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t sig_len, const uint8_t *digest, size_t digest_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->verify) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_VERIFY) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->verify(ctx, sig, sig_len, digest, digest_len); } int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->encrypt) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_ENCRYPT; return 1; } int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen) { if (!ctx || !ctx->pmeth || !ctx->pmeth->encrypt) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_ENCRYPT) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->encrypt(ctx, out, outlen, in, inlen); } int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->decrypt) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_DECRYPT; return 1; } int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen) { if (!ctx || !ctx->pmeth || !ctx->pmeth->decrypt) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_DECRYPT) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->decrypt(ctx, out, outlen, in, inlen); } int EVP_PKEY_verify_recover_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->verify_recover) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_VERIFYRECOVER; return 1; } int EVP_PKEY_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t sig_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->verify_recover) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_VERIFYRECOVER) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->verify_recover(ctx, out, out_len, sig, sig_len); } int EVP_PKEY_derive_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->derive) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_DERIVE; return 1; } int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer) { int ret; if (!ctx || !ctx->pmeth || !(ctx->pmeth->derive || ctx->pmeth->encrypt || ctx->pmeth->decrypt) || !ctx->pmeth->ctrl) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_DERIVE && ctx->operation != EVP_PKEY_OP_ENCRYPT && ctx->operation != EVP_PKEY_OP_DECRYPT) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } ret = ctx->pmeth->ctrl(ctx, EVP_PKEY_CTRL_PEER_KEY, 0, peer); if (ret <= 0) { return 0; } if (ret == 2) { return 1; } if (!ctx->pkey) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } if (ctx->pkey->type != peer->type) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_KEY_TYPES); return 0; } // ran@cryptocom.ru: For clarity. The error is if parameters in peer are // present (!missing) but don't match. EVP_PKEY_cmp_parameters may return // 1 (match), 0 (don't match) and -2 (comparison is not defined). -1 // (different key types) is impossible here because it is checked earlier. // -2 is OK for us here, as well as 1, so we can check for 0 only. if (!EVP_PKEY_missing_parameters(peer) && !EVP_PKEY_cmp_parameters(ctx->pkey, peer)) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_PARAMETERS); return 0; } EVP_PKEY_free(ctx->peerkey); ctx->peerkey = peer; ret = ctx->pmeth->ctrl(ctx, EVP_PKEY_CTRL_PEER_KEY, 1, peer); if (ret <= 0) { ctx->peerkey = NULL; return 0; } EVP_PKEY_up_ref(peer); return 1; } int EVP_PKEY_derive(EVP_PKEY_CTX *ctx, uint8_t *key, size_t *out_key_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->derive) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_DERIVE) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } return ctx->pmeth->derive(ctx, key, out_key_len); } int EVP_PKEY_keygen_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->keygen) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_KEYGEN; return 1; } int EVP_PKEY_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY **out_pkey) { if (!ctx || !ctx->pmeth || !ctx->pmeth->keygen) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_KEYGEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } if (!out_pkey) { return 0; } if (!*out_pkey) { *out_pkey = EVP_PKEY_new(); if (!*out_pkey) { OPENSSL_PUT_ERROR(EVP, ERR_LIB_EVP); return 0; } } if (!ctx->pmeth->keygen(ctx, *out_pkey)) { EVP_PKEY_free(*out_pkey); *out_pkey = NULL; return 0; } return 1; } int EVP_PKEY_paramgen_init(EVP_PKEY_CTX *ctx) { if (!ctx || !ctx->pmeth || !ctx->pmeth->paramgen) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } ctx->operation = EVP_PKEY_OP_PARAMGEN; return 1; } int EVP_PKEY_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY **out_pkey) { if (!ctx || !ctx->pmeth || !ctx->pmeth->paramgen) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (ctx->operation != EVP_PKEY_OP_PARAMGEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } if (!out_pkey) { return 0; } if (!*out_pkey) { *out_pkey = EVP_PKEY_new(); if (!*out_pkey) { OPENSSL_PUT_ERROR(EVP, ERR_LIB_EVP); return 0; } } if (!ctx->pmeth->paramgen(ctx, *out_pkey)) { EVP_PKEY_free(*out_pkey); *out_pkey = NULL; return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/internal.h ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EVP_INTERNAL_H #define OPENSSL_HEADER_EVP_INTERNAL_H #include #include #if defined(__cplusplus) extern "C" { #endif typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD; typedef struct evp_pkey_method_st EVP_PKEY_METHOD; struct evp_pkey_asn1_method_st { int pkey_id; uint8_t oid[9]; uint8_t oid_len; const EVP_PKEY_METHOD *pkey_method; // pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo // and writes the result into |out|. It returns one on success and zero on // error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER // type field, and |key| is the contents of the subjectPublicKey with the // leading padding byte checked and removed. Although X.509 uses BIT STRINGs // to represent SubjectPublicKeyInfo, every key type defined encodes the key // as a byte string with the same conversion to BIT STRING. int (*pub_decode)(EVP_PKEY *out, CBS *params, CBS *key); // pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result // to |out|. It returns one on success and zero on error. int (*pub_encode)(CBB *out, const EVP_PKEY *key); int (*pub_cmp)(const EVP_PKEY *a, const EVP_PKEY *b); // priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the // result into |out|. It returns one on success and zero on error. |params| is // the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key| // is the contents of the OCTET STRING privateKey field. int (*priv_decode)(EVP_PKEY *out, CBS *params, CBS *key); // priv_encode encodes |key| as a PrivateKeyInfo and appends the result to // |out|. It returns one on success and zero on error. int (*priv_encode)(CBB *out, const EVP_PKEY *key); int (*set_priv_raw)(EVP_PKEY *pkey, const uint8_t *in, size_t len); int (*set_pub_raw)(EVP_PKEY *pkey, const uint8_t *in, size_t len); int (*get_priv_raw)(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len); int (*get_pub_raw)(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len); // TODO(davidben): Can these be merged with the functions above? OpenSSL does // not implement |EVP_PKEY_get_raw_public_key|, etc., for |EVP_PKEY_EC|, but // the distinction seems unimportant. OpenSSL 3.0 has since renamed // |EVP_PKEY_get1_tls_encodedpoint| to |EVP_PKEY_get1_encoded_public_key|, and // what is the difference between "raw" and an "encoded" public key. // // One nuisance is the notion of "raw" is slightly ambiguous for EC keys. Is // it a DER ECPrivateKey or just the scalar? int (*set1_tls_encodedpoint)(EVP_PKEY *pkey, const uint8_t *in, size_t len); size_t (*get1_tls_encodedpoint)(const EVP_PKEY *pkey, uint8_t **out_ptr); // pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by // custom implementations which do not expose key material and parameters. int (*pkey_opaque)(const EVP_PKEY *pk); int (*pkey_size)(const EVP_PKEY *pk); int (*pkey_bits)(const EVP_PKEY *pk); int (*param_missing)(const EVP_PKEY *pk); int (*param_copy)(EVP_PKEY *to, const EVP_PKEY *from); int (*param_cmp)(const EVP_PKEY *a, const EVP_PKEY *b); void (*pkey_free)(EVP_PKEY *pkey); } /* EVP_PKEY_ASN1_METHOD */; struct evp_pkey_st { CRYPTO_refcount_t references; // type contains one of the EVP_PKEY_* values or NID_undef and determines // the type of |pkey|. int type; // pkey contains a pointer to a structure dependent on |type|. void *pkey; // ameth contains a pointer to a method table that contains many ASN.1 // methods for the key type. const EVP_PKEY_ASN1_METHOD *ameth; } /* EVP_PKEY */; #define EVP_PKEY_OP_UNDEFINED 0 #define EVP_PKEY_OP_KEYGEN (1 << 2) #define EVP_PKEY_OP_SIGN (1 << 3) #define EVP_PKEY_OP_VERIFY (1 << 4) #define EVP_PKEY_OP_VERIFYRECOVER (1 << 5) #define EVP_PKEY_OP_ENCRYPT (1 << 6) #define EVP_PKEY_OP_DECRYPT (1 << 7) #define EVP_PKEY_OP_DERIVE (1 << 8) #define EVP_PKEY_OP_PARAMGEN (1 << 9) #define EVP_PKEY_OP_TYPE_SIG \ (EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY | EVP_PKEY_OP_VERIFYRECOVER) #define EVP_PKEY_OP_TYPE_CRYPT (EVP_PKEY_OP_ENCRYPT | EVP_PKEY_OP_DECRYPT) #define EVP_PKEY_OP_TYPE_NOGEN \ (EVP_PKEY_OP_SIG | EVP_PKEY_OP_CRYPT | EVP_PKEY_OP_DERIVE) #define EVP_PKEY_OP_TYPE_GEN (EVP_PKEY_OP_KEYGEN | EVP_PKEY_OP_PARAMGEN) // EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype| // arguments can be -1 to specify that any type and operation are acceptable, // otherwise |keytype| must match the type of |ctx| and the bits of |optype| // must intersect the operation flags set on |ctx|. // // The |p1| and |p2| arguments depend on the value of |cmd|. // // It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2); #define EVP_PKEY_CTRL_MD 1 #define EVP_PKEY_CTRL_GET_MD 2 // EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|: // 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key. // If the return value is <= 0, the key is rejected. // 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a // peer key. If the return value is <= 0, the key is rejected. // 2: Is called with |p2| == NULL to test whether the peer's key was used. // (EC)DH always return one in this case. // 3: Is called with |p2| == NULL to set whether the peer's key was used. // (EC)DH always return one in this case. This was only used for GOST. #define EVP_PKEY_CTRL_PEER_KEY 3 // EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl // commands are numbered. #define EVP_PKEY_ALG_CTRL 0x1000 #define EVP_PKEY_CTRL_RSA_PADDING (EVP_PKEY_ALG_CTRL + 1) #define EVP_PKEY_CTRL_GET_RSA_PADDING (EVP_PKEY_ALG_CTRL + 2) #define EVP_PKEY_CTRL_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 3) #define EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 4) #define EVP_PKEY_CTRL_RSA_KEYGEN_BITS (EVP_PKEY_ALG_CTRL + 5) #define EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP (EVP_PKEY_ALG_CTRL + 6) #define EVP_PKEY_CTRL_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 7) #define EVP_PKEY_CTRL_GET_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 8) #define EVP_PKEY_CTRL_RSA_MGF1_MD (EVP_PKEY_ALG_CTRL + 9) #define EVP_PKEY_CTRL_GET_RSA_MGF1_MD (EVP_PKEY_ALG_CTRL + 10) #define EVP_PKEY_CTRL_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 11) #define EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 12) #define EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID (EVP_PKEY_ALG_CTRL + 13) #define EVP_PKEY_CTRL_HKDF_MODE (EVP_PKEY_ALG_CTRL + 14) #define EVP_PKEY_CTRL_HKDF_MD (EVP_PKEY_ALG_CTRL + 15) #define EVP_PKEY_CTRL_HKDF_KEY (EVP_PKEY_ALG_CTRL + 16) #define EVP_PKEY_CTRL_HKDF_SALT (EVP_PKEY_ALG_CTRL + 17) #define EVP_PKEY_CTRL_HKDF_INFO (EVP_PKEY_ALG_CTRL + 18) #define EVP_PKEY_CTRL_DH_PAD (EVP_PKEY_ALG_CTRL + 19) struct evp_pkey_ctx_st { // Method associated with this operation const EVP_PKEY_METHOD *pmeth; // Engine that implements this method or NULL if builtin ENGINE *engine; // Key: may be NULL EVP_PKEY *pkey; // Peer key for key agreement, may be NULL EVP_PKEY *peerkey; // operation contains one of the |EVP_PKEY_OP_*| values. int operation; // Algorithm specific data void *data; } /* EVP_PKEY_CTX */; struct evp_pkey_method_st { int pkey_id; int (*init)(EVP_PKEY_CTX *ctx); int (*copy)(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src); void (*cleanup)(EVP_PKEY_CTX *ctx); int (*keygen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey); int (*sign)(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen); int (*sign_message)(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen); int (*verify)(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen); int (*verify_message)(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen); int (*verify_recover)(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t sig_len); int (*encrypt)(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen); int (*decrypt)(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen); int (*derive)(EVP_PKEY_CTX *ctx, uint8_t *key, size_t *keylen); int (*paramgen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey); int (*ctrl)(EVP_PKEY_CTX *ctx, int type, int p1, void *p2); } /* EVP_PKEY_METHOD */; typedef struct { // key is the concatenation of the private seed and public key. It is stored // as a single 64-bit array to allow passing to |ED25519_sign|. If // |has_private| is false, the first 32 bytes are uninitialized and the public // key is in the last 32 bytes. uint8_t key[64]; char has_private; } ED25519_KEY; #define ED25519_PUBLIC_KEY_OFFSET 32 typedef struct { uint8_t pub[32]; uint8_t priv[32]; char has_private; } X25519_KEY; extern const EVP_PKEY_ASN1_METHOD dsa_asn1_meth; extern const EVP_PKEY_ASN1_METHOD ec_asn1_meth; extern const EVP_PKEY_ASN1_METHOD rsa_asn1_meth; extern const EVP_PKEY_ASN1_METHOD ed25519_asn1_meth; extern const EVP_PKEY_ASN1_METHOD x25519_asn1_meth; extern const EVP_PKEY_ASN1_METHOD dh_asn1_meth; extern const EVP_PKEY_METHOD rsa_pkey_meth; extern const EVP_PKEY_METHOD ec_pkey_meth; extern const EVP_PKEY_METHOD ed25519_pkey_meth; extern const EVP_PKEY_METHOD x25519_pkey_meth; extern const EVP_PKEY_METHOD hkdf_pkey_meth; extern const EVP_PKEY_METHOD dh_pkey_meth; // evp_pkey_set_method behaves like |EVP_PKEY_set_type|, but takes a pointer to // a method table. This avoids depending on every |EVP_PKEY_ASN1_METHOD|. void evp_pkey_set_method(EVP_PKEY *pkey, const EVP_PKEY_ASN1_METHOD *method); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_EVP_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_dh.cc ================================================ /* * Copyright 2006-2019 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" namespace { typedef struct dh_pkey_ctx_st { int pad; } DH_PKEY_CTX; } // namespace static int pkey_dh_init(EVP_PKEY_CTX *ctx) { DH_PKEY_CTX *dctx = reinterpret_cast(OPENSSL_zalloc(sizeof(DH_PKEY_CTX))); if (dctx == NULL) { return 0; } ctx->data = dctx; return 1; } static int pkey_dh_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { if (!pkey_dh_init(dst)) { return 0; } const DH_PKEY_CTX *sctx = reinterpret_cast(src->data); DH_PKEY_CTX *dctx = reinterpret_cast(dst->data); dctx->pad = sctx->pad; return 1; } static void pkey_dh_cleanup(EVP_PKEY_CTX *ctx) { OPENSSL_free(ctx->data); ctx->data = NULL; } static int pkey_dh_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { DH *dh = DH_new(); if (dh == NULL || !EVP_PKEY_assign_DH(pkey, dh)) { DH_free(dh); return 0; } if (ctx->pkey != NULL && !EVP_PKEY_copy_parameters(pkey, ctx->pkey)) { return 0; } return DH_generate_key(dh); } static int pkey_dh_derive(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len) { DH_PKEY_CTX *dctx = reinterpret_cast(ctx->data); if (ctx->pkey == NULL || ctx->peerkey == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } DH *our_key = reinterpret_cast(ctx->pkey->pkey); DH *peer_key = reinterpret_cast(ctx->peerkey->pkey); if (our_key == NULL || peer_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } const BIGNUM *pub_key = DH_get0_pub_key(peer_key); if (pub_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } if (out == NULL) { *out_len = DH_size(our_key); return 1; } if (*out_len < (size_t)DH_size(our_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } int ret = dctx->pad ? DH_compute_key_padded(out, pub_key, our_key) : DH_compute_key(out, pub_key, our_key); if (ret < 0) { return 0; } assert(ret <= DH_size(our_key)); *out_len = (size_t)ret; return 1; } static int pkey_dh_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { DH_PKEY_CTX *dctx = reinterpret_cast(ctx->data); switch (type) { case EVP_PKEY_CTRL_PEER_KEY: // |EVP_PKEY_derive_set_peer| requires the key implement this command, // even if it is a no-op. return 1; case EVP_PKEY_CTRL_DH_PAD: dctx->pad = p1; return 1; default: OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } } const EVP_PKEY_METHOD dh_pkey_meth = { /*pkey_id=*/EVP_PKEY_DH, /*init=*/pkey_dh_init, /*copy=*/pkey_dh_copy, /*cleanup=*/pkey_dh_cleanup, /*keygen=*/pkey_dh_keygen, /*sign=*/nullptr, /*sign_message=*/nullptr, /*verify=*/nullptr, /*verify_message=*/nullptr, /*verify_recover=*/nullptr, /*encrypt=*/nullptr, /*decrypt=*/nullptr, /*derive=*/pkey_dh_derive, /*paramgen=*/nullptr, /*ctrl=*/pkey_dh_ctrl, }; int EVP_PKEY_CTX_set_dh_pad(EVP_PKEY_CTX *ctx, int pad) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_DH_PAD, pad, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_dh_asn1.cc ================================================ /* * Copyright 2006-2021 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../internal.h" #include "internal.h" static void dh_free(EVP_PKEY *pkey) { DH_free(reinterpret_cast(pkey->pkey)); pkey->pkey = NULL; } static int dh_size(const EVP_PKEY *pkey) { return DH_size(reinterpret_cast(pkey->pkey)); } static int dh_bits(const EVP_PKEY *pkey) { return DH_bits(reinterpret_cast(pkey->pkey)); } static int dh_param_missing(const EVP_PKEY *pkey) { const DH *dh = reinterpret_cast(pkey->pkey); return dh == NULL || DH_get0_p(dh) == NULL || DH_get0_g(dh) == NULL; } static int dh_param_copy(EVP_PKEY *to, const EVP_PKEY *from) { if (dh_param_missing(from)) { OPENSSL_PUT_ERROR(EVP, EVP_R_MISSING_PARAMETERS); return 0; } const DH *dh = reinterpret_cast(from->pkey); const BIGNUM *q_old = DH_get0_q(dh); BIGNUM *p = BN_dup(DH_get0_p(dh)); BIGNUM *q = q_old == NULL ? NULL : BN_dup(q_old); BIGNUM *g = BN_dup(DH_get0_g(dh)); if (p == NULL || (q_old != NULL && q == NULL) || g == NULL || !DH_set0_pqg(reinterpret_cast(to->pkey), p, q, g)) { BN_free(p); BN_free(q); BN_free(g); return 0; } // |DH_set0_pqg| took ownership of |p|, |q|, and |g|. return 1; } static int dh_param_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (dh_param_missing(a) || dh_param_missing(b)) { return -2; } // Matching OpenSSL, only compare p and g for PKCS#3-style Diffie-Hellman. // OpenSSL only checks q in X9.42-style Diffie-Hellman ("DHX"). const DH *a_dh = reinterpret_cast(a->pkey); const DH *b_dh = reinterpret_cast(b->pkey); return BN_cmp(DH_get0_p(a_dh), DH_get0_p(b_dh)) == 0 && BN_cmp(DH_get0_g(a_dh), DH_get0_g(b_dh)) == 0; } static int dh_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (dh_param_cmp(a, b) <= 0) { return 0; } const DH *a_dh = reinterpret_cast(a->pkey); const DH *b_dh = reinterpret_cast(b->pkey); return BN_cmp(DH_get0_pub_key(a_dh), DH_get0_pub_key(b_dh)) == 0; } const EVP_PKEY_ASN1_METHOD dh_asn1_meth = { /*pkey_id=*/EVP_PKEY_DH, /*oid=*/{0}, /*oid_len=*/0, /*pkey_method=*/&dh_pkey_meth, /*pub_decode=*/nullptr, /*pub_encode=*/nullptr, /*pub_cmp=*/dh_pub_cmp, /*priv_decode=*/nullptr, /*priv_encode=*/nullptr, /*set_priv_raw=*/nullptr, /*set_pub_raw=*/nullptr, /*get_priv_raw=*/nullptr, /*get_pub_raw=*/nullptr, /*set1_tls_encodedpoint=*/nullptr, /*get1_tls_encodedpoint=*/nullptr, /*pkey_opaque=*/nullptr, /*pkey_size=*/dh_size, /*pkey_bits=*/dh_bits, /*param_missing=*/dh_param_missing, /*param_copy=*/dh_param_copy, /*param_cmp=*/dh_param_cmp, /*pkey_free=*/dh_free, }; int EVP_PKEY_set1_DH(EVP_PKEY *pkey, DH *key) { if (EVP_PKEY_assign_DH(pkey, key)) { DH_up_ref(key); return 1; } return 0; } int EVP_PKEY_assign_DH(EVP_PKEY *pkey, DH *key) { evp_pkey_set_method(pkey, &dh_asn1_meth); pkey->pkey = key; return key != NULL; } DH *EVP_PKEY_get0_DH(const EVP_PKEY *pkey) { if (pkey->type != EVP_PKEY_DH) { OPENSSL_PUT_ERROR(EVP, EVP_R_EXPECTING_A_DH_KEY); return NULL; } return reinterpret_cast(const_cast(pkey)->pkey); } DH *EVP_PKEY_get1_DH(const EVP_PKEY *pkey) { DH *dh = EVP_PKEY_get0_DH(pkey); if (dh != NULL) { DH_up_ref(dh); } return dh; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_dsa_asn1.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../dsa/internal.h" #include "internal.h" static int dsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 3279, section 2.3.2. // Parameters may or may not be present. bssl::UniquePtr dsa; if (CBS_len(params) == 0) { dsa.reset(DSA_new()); if (dsa == nullptr) { return 0; } } else { dsa.reset(DSA_parse_parameters(params)); if (dsa == nullptr || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } } dsa->pub_key = BN_new(); if (dsa->pub_key == nullptr) { return 0; } if (!BN_parse_asn1_unsigned(key, dsa->pub_key) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } EVP_PKEY_assign_DSA(out, dsa.release()); return 1; } static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) { const DSA *dsa = reinterpret_cast(key->pkey); const int has_params = dsa->p != nullptr && dsa->q != nullptr && dsa->g != nullptr; // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, dsa_asn1_meth.oid, dsa_asn1_meth.oid_len) || (has_params && !DSA_marshal_parameters(&algorithm, dsa)) || !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || !CBB_add_u8(&key_bitstring, 0 /* padding */) || !BN_marshal_asn1(&key_bitstring, dsa->pub_key) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See PKCS#11, v2.40, section 2.5. // Decode parameters. bssl::UniquePtr dsa(DSA_parse_parameters(params)); if (dsa == nullptr || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } dsa->priv_key = BN_new(); if (dsa->priv_key == nullptr) { return 0; } if (!BN_parse_asn1_unsigned(key, dsa->priv_key) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } // To avoid DoS attacks when importing private keys, check bounds on |dsa|. // This bounds |dsa->priv_key| against |dsa->q| and bounds |dsa->q|'s bit // width. if (!dsa_check_key(dsa.get())) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } // Calculate the public key. bssl::UniquePtr ctx(BN_CTX_new()); dsa->pub_key = BN_new(); if (ctx == nullptr || dsa->pub_key == nullptr || !BN_mod_exp_mont_consttime(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, ctx.get(), nullptr)) { return 0; } EVP_PKEY_assign_DSA(out, dsa.release()); return 1; } static int dsa_priv_encode(CBB *out, const EVP_PKEY *key) { const DSA *dsa = reinterpret_cast(key->pkey); if (dsa == nullptr || dsa->priv_key == nullptr) { OPENSSL_PUT_ERROR(EVP, EVP_R_MISSING_PARAMETERS); return 0; } // See PKCS#11, v2.40, section 2.5. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, dsa_asn1_meth.oid, dsa_asn1_meth.oid_len) || !DSA_marshal_parameters(&algorithm, dsa) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !BN_marshal_asn1(&private_key, dsa->priv_key) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int int_dsa_size(const EVP_PKEY *pkey) { const DSA *dsa = reinterpret_cast(pkey->pkey); return DSA_size(dsa); } static int dsa_bits(const EVP_PKEY *pkey) { const DSA *dsa = reinterpret_cast(pkey->pkey); return BN_num_bits(DSA_get0_p(dsa)); } static int dsa_missing_parameters(const EVP_PKEY *pkey) { const DSA *dsa = reinterpret_cast(pkey->pkey); if (DSA_get0_p(dsa) == nullptr || DSA_get0_q(dsa) == nullptr || DSA_get0_g(dsa) == nullptr) { return 1; } return 0; } static int dup_bn_into(BIGNUM **out, BIGNUM *src) { bssl::UniquePtr a(BN_dup(src)); if (a == nullptr) { return 0; } BN_free(*out); *out = a.release(); return 1; } static int dsa_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from) { DSA *to_dsa = reinterpret_cast(to->pkey); const DSA *from_dsa = reinterpret_cast(from->pkey); if (!dup_bn_into(&to_dsa->p, from_dsa->p) || !dup_bn_into(&to_dsa->q, from_dsa->q) || !dup_bn_into(&to_dsa->g, from_dsa->g)) { return 0; } return 1; } static int dsa_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) { const DSA *a_dsa = reinterpret_cast(a->pkey); const DSA *b_dsa = reinterpret_cast(b->pkey); return BN_cmp(DSA_get0_p(a_dsa), DSA_get0_p(b_dsa)) == 0 && BN_cmp(DSA_get0_q(a_dsa), DSA_get0_q(b_dsa)) == 0 && BN_cmp(DSA_get0_g(a_dsa), DSA_get0_g(b_dsa)) == 0; } static int dsa_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { const DSA *a_dsa = reinterpret_cast(a->pkey); const DSA *b_dsa = reinterpret_cast(b->pkey); return BN_cmp(DSA_get0_pub_key(b_dsa), DSA_get0_pub_key(a_dsa)) == 0; } static void int_dsa_free(EVP_PKEY *pkey) { DSA_free(reinterpret_cast(pkey->pkey)); pkey->pkey = nullptr; } const EVP_PKEY_ASN1_METHOD dsa_asn1_meth = { EVP_PKEY_DSA, // 1.2.840.10040.4.1 {0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x01}, 7, /*pkey_method=*/nullptr, dsa_pub_decode, dsa_pub_encode, dsa_pub_cmp, dsa_priv_decode, dsa_priv_encode, /*set_priv_raw=*/nullptr, /*set_pub_raw=*/nullptr, /*get_priv_raw=*/nullptr, /*get_pub_raw=*/nullptr, /*set1_tls_encodedpoint=*/nullptr, /*get1_tls_encodedpoint=*/nullptr, /*pkey_opaque=*/nullptr, int_dsa_size, dsa_bits, dsa_missing_parameters, dsa_copy_parameters, dsa_cmp_parameters, int_dsa_free, }; int EVP_PKEY_CTX_set_dsa_paramgen_bits(EVP_PKEY_CTX *ctx, int nbits) { // BoringSSL does not support DSA in |EVP_PKEY_CTX|. OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } int EVP_PKEY_CTX_set_dsa_paramgen_q_bits(EVP_PKEY_CTX *ctx, int qbits) { // BoringSSL does not support DSA in |EVP_PKEY_CTX|. OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } int EVP_PKEY_set1_DSA(EVP_PKEY *pkey, DSA *key) { if (EVP_PKEY_assign_DSA(pkey, key)) { DSA_up_ref(key); return 1; } return 0; } int EVP_PKEY_assign_DSA(EVP_PKEY *pkey, DSA *key) { evp_pkey_set_method(pkey, &dsa_asn1_meth); pkey->pkey = key; return key != nullptr; } DSA *EVP_PKEY_get0_DSA(const EVP_PKEY *pkey) { if (pkey->type != EVP_PKEY_DSA) { OPENSSL_PUT_ERROR(EVP, EVP_R_EXPECTING_A_DSA_KEY); return nullptr; } return reinterpret_cast(pkey->pkey); } DSA *EVP_PKEY_get1_DSA(const EVP_PKEY *pkey) { DSA *dsa = EVP_PKEY_get0_DSA(pkey); if (dsa != nullptr) { DSA_up_ref(dsa); } return dsa; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_ec.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "../fipsmodule/ec/internal.h" #include "../internal.h" #include "internal.h" typedef struct { // message digest const EVP_MD *md; const EC_GROUP *gen_group; } EC_PKEY_CTX; static int pkey_ec_init(EVP_PKEY_CTX *ctx) { EC_PKEY_CTX *dctx = reinterpret_cast(OPENSSL_zalloc(sizeof(EC_PKEY_CTX))); if (!dctx) { return 0; } ctx->data = dctx; return 1; } static int pkey_ec_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { if (!pkey_ec_init(dst)) { return 0; } const EC_PKEY_CTX *sctx = reinterpret_cast(src->data); EC_PKEY_CTX *dctx = reinterpret_cast(dst->data); dctx->md = sctx->md; dctx->gen_group = sctx->gen_group; return 1; } static void pkey_ec_cleanup(EVP_PKEY_CTX *ctx) { EC_PKEY_CTX *dctx = reinterpret_cast(ctx->data); if (!dctx) { return; } OPENSSL_free(dctx); } static int pkey_ec_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen) { const EC_KEY *ec = reinterpret_cast(ctx->pkey->pkey); if (!sig) { *siglen = ECDSA_size(ec); return 1; } else if (*siglen < (size_t)ECDSA_size(ec)) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } unsigned int sltmp; if (!ECDSA_sign(0, tbs, tbslen, sig, &sltmp, ec)) { return 0; } *siglen = (size_t)sltmp; return 1; } static int pkey_ec_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen) { const EC_KEY *ec_key = reinterpret_cast(ctx->pkey->pkey); return ECDSA_verify(0, tbs, tbslen, sig, siglen, ec_key); } static int pkey_ec_derive(EVP_PKEY_CTX *ctx, uint8_t *key, size_t *keylen) { if (!ctx->pkey || !ctx->peerkey) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } const EC_KEY *eckey = reinterpret_cast(ctx->pkey->pkey); if (!key) { const EC_GROUP *group; group = EC_KEY_get0_group(eckey); *keylen = (EC_GROUP_get_degree(group) + 7) / 8; return 1; } const EC_KEY *eckey_peer = reinterpret_cast(ctx->peerkey->pkey); const EC_POINT *pubkey = EC_KEY_get0_public_key(eckey_peer); // NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is // not an error, the result is truncated. size_t outlen = *keylen; int ret = ECDH_compute_key(key, outlen, pubkey, eckey, 0); if (ret < 0) { return 0; } *keylen = ret; return 1; } static int pkey_ec_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { EC_PKEY_CTX *dctx = reinterpret_cast(ctx->data); switch (type) { case EVP_PKEY_CTRL_MD: { const EVP_MD *md = reinterpret_cast(p2); int md_type = EVP_MD_type(md); if (md_type != NID_sha1 && md_type != NID_sha224 && md_type != NID_sha256 && md_type != NID_sha384 && md_type != NID_sha512) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_DIGEST_TYPE); return 0; } dctx->md = md; return 1; } case EVP_PKEY_CTRL_GET_MD: *(const EVP_MD **)p2 = dctx->md; return 1; case EVP_PKEY_CTRL_PEER_KEY: // Default behaviour is OK return 1; case EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID: { const EC_GROUP *group = EC_GROUP_new_by_curve_name(p1); if (group == NULL) { return 0; } dctx->gen_group = group; return 1; } default: OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } } static int pkey_ec_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { EC_PKEY_CTX *dctx = reinterpret_cast(ctx->data); const EC_GROUP *group = dctx->gen_group; if (group == NULL) { if (ctx->pkey == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_PARAMETERS_SET); return 0; } group = EC_KEY_get0_group(reinterpret_cast(ctx->pkey->pkey)); } EC_KEY *ec = EC_KEY_new(); if (ec == NULL || !EC_KEY_set_group(ec, group) || !EC_KEY_generate_key(ec)) { EC_KEY_free(ec); return 0; } EVP_PKEY_assign_EC_KEY(pkey, ec); return 1; } static int pkey_ec_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { EC_PKEY_CTX *dctx = reinterpret_cast(ctx->data); if (dctx->gen_group == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_PARAMETERS_SET); return 0; } EC_KEY *ec = EC_KEY_new(); if (ec == NULL || !EC_KEY_set_group(ec, dctx->gen_group)) { EC_KEY_free(ec); return 0; } EVP_PKEY_assign_EC_KEY(pkey, ec); return 1; } const EVP_PKEY_METHOD ec_pkey_meth = { EVP_PKEY_EC, pkey_ec_init, pkey_ec_copy, pkey_ec_cleanup, pkey_ec_keygen, pkey_ec_sign, NULL /* sign_message */, pkey_ec_verify, NULL /* verify_message */, NULL /* verify_recover */, NULL /* encrypt */, NULL /* decrypt */, pkey_ec_derive, pkey_ec_paramgen, pkey_ec_ctrl, }; int EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, EVP_PKEY_OP_TYPE_GEN, EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID, nid, NULL); } int EVP_PKEY_CTX_set_ec_param_enc(EVP_PKEY_CTX *ctx, int encoding) { // BoringSSL only supports named curve syntax. if (encoding != OPENSSL_EC_NAMED_CURVE) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PARAMETERS); return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_ec_asn1.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "internal.h" static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) { const EC_KEY *ec_key = reinterpret_cast(key->pkey); const EC_GROUP *group = EC_KEY_get0_group(ec_key); const EC_POINT *public_key = EC_KEY_get0_public_key(ec_key); // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, ec_asn1_meth.oid, ec_asn1_meth.oid_len) || !EC_KEY_marshal_curve_name(&algorithm, group) || !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || !CBB_add_u8(&key_bitstring, 0 /* padding */) || !EC_POINT_point2cbb(&key_bitstring, group, public_key, POINT_CONVERSION_UNCOMPRESSED, NULL) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int eckey_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 5480, section 2. // The parameters are a named curve. const EC_GROUP *group = EC_KEY_parse_curve_name(params); if (group == NULL || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } bssl::UniquePtr eckey(EC_KEY_new()); if (eckey == nullptr || // !EC_KEY_set_group(eckey.get(), group) || !EC_KEY_oct2key(eckey.get(), CBS_data(key), CBS_len(key), nullptr)) { return 0; } EVP_PKEY_assign_EC_KEY(out, eckey.release()); return 1; } static int eckey_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { const EC_KEY *a_ec = reinterpret_cast(a->pkey); const EC_KEY *b_ec = reinterpret_cast(b->pkey); const EC_GROUP *group = EC_KEY_get0_group(b_ec); const EC_POINT *pa = EC_KEY_get0_public_key(a_ec), *pb = EC_KEY_get0_public_key(b_ec); int r = EC_POINT_cmp(group, pa, pb, NULL); if (r == 0) { return 1; } else if (r == 1) { return 0; } else { return -2; } } static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 5915. const EC_GROUP *group = EC_KEY_parse_parameters(params); if (group == NULL || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } EC_KEY *ec_key = EC_KEY_parse_private_key(key, group); if (ec_key == NULL || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); EC_KEY_free(ec_key); return 0; } EVP_PKEY_assign_EC_KEY(out, ec_key); return 1; } static int eckey_priv_encode(CBB *out, const EVP_PKEY *key) { const EC_KEY *ec_key = reinterpret_cast(key->pkey); // Omit the redundant copy of the curve name. This contradicts RFC 5915 but // aligns with PKCS #11. SEC 1 only says they may be omitted if known by other // means. Both OpenSSL and NSS omit the redundant parameters, so we omit them // as well. unsigned enc_flags = EC_KEY_get_enc_flags(ec_key) | EC_PKEY_NO_PARAMETERS; // See RFC 5915. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, ec_asn1_meth.oid, ec_asn1_meth.oid_len) || !EC_KEY_marshal_curve_name(&algorithm, EC_KEY_get0_group(ec_key)) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !EC_KEY_marshal_private_key(&private_key, ec_key, enc_flags) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int eckey_set1_tls_encodedpoint(EVP_PKEY *pkey, const uint8_t *in, size_t len) { EC_KEY *ec_key = reinterpret_cast(pkey->pkey); if (ec_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } return EC_KEY_oct2key(ec_key, in, len, NULL); } static size_t eckey_get1_tls_encodedpoint(const EVP_PKEY *pkey, uint8_t **out_ptr) { const EC_KEY *ec_key = reinterpret_cast(pkey->pkey); if (ec_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } return EC_KEY_key2buf(ec_key, POINT_CONVERSION_UNCOMPRESSED, out_ptr, NULL); } static int int_ec_size(const EVP_PKEY *pkey) { const EC_KEY *ec_key = reinterpret_cast(pkey->pkey); return ECDSA_size(ec_key); } static int ec_bits(const EVP_PKEY *pkey) { const EC_KEY *ec_key = reinterpret_cast(pkey->pkey); const EC_GROUP *group = EC_KEY_get0_group(ec_key); if (group == NULL) { ERR_clear_error(); return 0; } return EC_GROUP_order_bits(group); } static int ec_missing_parameters(const EVP_PKEY *pkey) { const EC_KEY *ec_key = reinterpret_cast(pkey->pkey); return ec_key == NULL || EC_KEY_get0_group(ec_key) == NULL; } static int ec_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from) { const EC_KEY *from_key = reinterpret_cast(from->pkey); if (from_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } const EC_GROUP *group = EC_KEY_get0_group(from_key); if (group == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_MISSING_PARAMETERS); return 0; } if (to->pkey == NULL) { to->pkey = EC_KEY_new(); if (to->pkey == NULL) { return 0; } } return EC_KEY_set_group(reinterpret_cast(to->pkey), group); } static int ec_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) { const EC_KEY *a_ec = reinterpret_cast(a->pkey); const EC_KEY *b_ec = reinterpret_cast(b->pkey); if (a_ec == NULL || b_ec == NULL) { return -2; } const EC_GROUP *group_a = EC_KEY_get0_group(a_ec), *group_b = EC_KEY_get0_group(b_ec); if (group_a == NULL || group_b == NULL) { return -2; } if (EC_GROUP_cmp(group_a, group_b, NULL) != 0) { // mismatch return 0; } return 1; } static void int_ec_free(EVP_PKEY *pkey) { EC_KEY_free(reinterpret_cast(pkey->pkey)); pkey->pkey = NULL; } static int eckey_opaque(const EVP_PKEY *pkey) { const EC_KEY *ec_key = reinterpret_cast(pkey->pkey); return EC_KEY_is_opaque(ec_key); } const EVP_PKEY_ASN1_METHOD ec_asn1_meth = { EVP_PKEY_EC, // 1.2.840.10045.2.1 {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01}, 7, &ec_pkey_meth, eckey_pub_decode, eckey_pub_encode, eckey_pub_cmp, eckey_priv_decode, eckey_priv_encode, /*set_priv_raw=*/NULL, /*set_pub_raw=*/NULL, /*get_priv_raw=*/NULL, /*get_pub_raw=*/NULL, eckey_set1_tls_encodedpoint, eckey_get1_tls_encodedpoint, eckey_opaque, int_ec_size, ec_bits, ec_missing_parameters, ec_copy_parameters, ec_cmp_parameters, int_ec_free, }; int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, EC_KEY *key) { if (EVP_PKEY_assign_EC_KEY(pkey, key)) { EC_KEY_up_ref(key); return 1; } return 0; } int EVP_PKEY_assign_EC_KEY(EVP_PKEY *pkey, EC_KEY *key) { evp_pkey_set_method(pkey, &ec_asn1_meth); pkey->pkey = key; return key != NULL; } EC_KEY *EVP_PKEY_get0_EC_KEY(const EVP_PKEY *pkey) { if (pkey->type != EVP_PKEY_EC) { OPENSSL_PUT_ERROR(EVP, EVP_R_EXPECTING_AN_EC_KEY_KEY); return NULL; } return reinterpret_cast(pkey->pkey); } EC_KEY *EVP_PKEY_get1_EC_KEY(const EVP_PKEY *pkey) { EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey); if (ec_key != NULL) { EC_KEY_up_ref(ec_key); } return ec_key; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_ed25519.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "internal.h" // Ed25519 has no parameters to copy. static int pkey_ed25519_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { return 1; } static int pkey_ed25519_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { ED25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(ED25519_KEY))); if (key == NULL) { return 0; } evp_pkey_set_method(pkey, &ed25519_asn1_meth); uint8_t pubkey_unused[32]; ED25519_keypair(pubkey_unused, key->key); key->has_private = 1; OPENSSL_free(pkey->pkey); pkey->pkey = key; return 1; } static int pkey_ed25519_sign_message(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen) { const ED25519_KEY *key = reinterpret_cast(ctx->pkey->pkey); if (!key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } if (sig == NULL) { *siglen = 64; return 1; } if (*siglen < 64) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (!ED25519_sign(sig, tbs, tbslen, key->key)) { return 0; } *siglen = 64; return 1; } static int pkey_ed25519_verify_message(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen) { const ED25519_KEY *key = reinterpret_cast(ctx->pkey->pkey); if (siglen != 64 || !ED25519_verify(tbs, tbslen, sig, key->key + ED25519_PUBLIC_KEY_OFFSET)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_SIGNATURE); return 0; } return 1; } const EVP_PKEY_METHOD ed25519_pkey_meth = { /*pkey_id=*/EVP_PKEY_ED25519, /*init=*/nullptr, /*copy=*/pkey_ed25519_copy, /*cleanup=*/nullptr, /*keygen=*/pkey_ed25519_keygen, /*sign=*/nullptr, /*sign_message=*/pkey_ed25519_sign_message, /*verify=*/nullptr, /*verify_message=*/pkey_ed25519_verify_message, /*verify_recover=*/nullptr, /*encrypt=*/nullptr, /*decrypt=*/nullptr, /*derive=*/nullptr, /*paramgen=*/nullptr, /*ctrl=*/nullptr, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_ed25519_asn1.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../internal.h" #include "internal.h" static void ed25519_free(EVP_PKEY *pkey) { OPENSSL_free(pkey->pkey); pkey->pkey = NULL; } static int ed25519_set_priv_raw(EVP_PKEY *pkey, const uint8_t *in, size_t len) { if (len != 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } ED25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(ED25519_KEY))); if (key == NULL) { return 0; } // The RFC 8032 encoding stores only the 32-byte seed, so we must recover the // full representation which we use from it. uint8_t pubkey_unused[32]; ED25519_keypair_from_seed(pubkey_unused, key->key, in); key->has_private = 1; ed25519_free(pkey); pkey->pkey = key; return 1; } static int ed25519_set_pub_raw(EVP_PKEY *pkey, const uint8_t *in, size_t len) { if (len != 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } ED25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(ED25519_KEY))); if (key == NULL) { return 0; } OPENSSL_memcpy(key->key + ED25519_PUBLIC_KEY_OFFSET, in, 32); key->has_private = 0; ed25519_free(pkey); pkey->pkey = key; return 1; } static int ed25519_get_priv_raw(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { const ED25519_KEY *key = reinterpret_cast(pkey->pkey); if (!key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } if (out == NULL) { *out_len = 32; return 1; } if (*out_len < 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } // The raw private key format is the first 32 bytes of the private key. OPENSSL_memcpy(out, key->key, 32); *out_len = 32; return 1; } static int ed25519_get_pub_raw(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { const ED25519_KEY *key = reinterpret_cast(pkey->pkey); if (out == NULL) { *out_len = 32; return 1; } if (*out_len < 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } OPENSSL_memcpy(out, key->key + ED25519_PUBLIC_KEY_OFFSET, 32); *out_len = 32; return 1; } static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 8410, section 4. // The parameters must be omitted. Public keys have length 32. if (CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } return ed25519_set_pub_raw(out, CBS_data(key), CBS_len(key)); } static int ed25519_pub_encode(CBB *out, const EVP_PKEY *pkey) { const ED25519_KEY *key = reinterpret_cast(pkey->pkey); // See RFC 8410, section 4. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) || !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || !CBB_add_u8(&key_bitstring, 0 /* padding */) || !CBB_add_bytes(&key_bitstring, key->key + ED25519_PUBLIC_KEY_OFFSET, 32) || !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int ed25519_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { const ED25519_KEY *a_key = reinterpret_cast(a->pkey); const ED25519_KEY *b_key = reinterpret_cast(b->pkey); return OPENSSL_memcmp(a_key->key + ED25519_PUBLIC_KEY_OFFSET, b_key->key + ED25519_PUBLIC_KEY_OFFSET, 32) == 0; } static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 8410, section 7. // Parameters must be empty. The key is a 32-byte value wrapped in an extra // OCTET STRING layer. CBS inner; if (CBS_len(params) != 0 || !CBS_get_asn1(key, &inner, CBS_ASN1_OCTETSTRING) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } return ed25519_set_priv_raw(out, CBS_data(&inner), CBS_len(&inner)); } static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) { const ED25519_KEY *key = reinterpret_cast(pkey->pkey); if (!key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } // See RFC 8410, section 7. CBB pkcs8, algorithm, oid, private_key, inner; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !CBB_add_asn1(&private_key, &inner, CBS_ASN1_OCTETSTRING) || // The PKCS#8 encoding stores only the 32-byte seed which is the first 32 // bytes of the private key. !CBB_add_bytes(&inner, key->key, 32) || // !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int ed25519_size(const EVP_PKEY *pkey) { return 64; } static int ed25519_bits(const EVP_PKEY *pkey) { return 253; } const EVP_PKEY_ASN1_METHOD ed25519_asn1_meth = { EVP_PKEY_ED25519, {0x2b, 0x65, 0x70}, 3, &ed25519_pkey_meth, ed25519_pub_decode, ed25519_pub_encode, ed25519_pub_cmp, ed25519_priv_decode, ed25519_priv_encode, ed25519_set_priv_raw, ed25519_set_pub_raw, ed25519_get_priv_raw, ed25519_get_pub_raw, /*set1_tls_encodedpoint=*/NULL, /*get1_tls_encodedpoint=*/NULL, /*pkey_opaque=*/NULL, ed25519_size, ed25519_bits, /*param_missing=*/NULL, /*param_copy=*/NULL, /*param_cmp=*/NULL, ed25519_free, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_hkdf.cc ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../internal.h" #include "internal.h" typedef struct { int mode; const EVP_MD *md; uint8_t *key; size_t key_len; uint8_t *salt; size_t salt_len; CBB info; } HKDF_PKEY_CTX; static int pkey_hkdf_init(EVP_PKEY_CTX *ctx) { HKDF_PKEY_CTX *hctx = reinterpret_cast(OPENSSL_zalloc(sizeof(HKDF_PKEY_CTX))); if (hctx == NULL) { return 0; } if (!CBB_init(&hctx->info, 0)) { OPENSSL_free(hctx); return 0; } ctx->data = hctx; return 1; } static int pkey_hkdf_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { if (!pkey_hkdf_init(dst)) { return 0; } HKDF_PKEY_CTX *hctx_dst = reinterpret_cast(dst->data); const HKDF_PKEY_CTX *hctx_src = reinterpret_cast(src->data); hctx_dst->mode = hctx_src->mode; hctx_dst->md = hctx_src->md; if (hctx_src->key_len != 0) { hctx_dst->key = reinterpret_cast( OPENSSL_memdup(hctx_src->key, hctx_src->key_len)); if (hctx_dst->key == NULL) { return 0; } hctx_dst->key_len = hctx_src->key_len; } if (hctx_src->salt_len != 0) { hctx_dst->salt = reinterpret_cast( OPENSSL_memdup(hctx_src->salt, hctx_src->salt_len)); if (hctx_dst->salt == NULL) { return 0; } hctx_dst->salt_len = hctx_src->salt_len; } if (!CBB_add_bytes(&hctx_dst->info, CBB_data(&hctx_src->info), CBB_len(&hctx_src->info))) { return 0; } return 1; } static void pkey_hkdf_cleanup(EVP_PKEY_CTX *ctx) { HKDF_PKEY_CTX *hctx = reinterpret_cast(ctx->data); if (hctx != NULL) { OPENSSL_free(hctx->key); OPENSSL_free(hctx->salt); CBB_cleanup(&hctx->info); OPENSSL_free(hctx); ctx->data = NULL; } } static int pkey_hkdf_derive(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len) { HKDF_PKEY_CTX *hctx = reinterpret_cast(ctx->data); if (hctx->md == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_MISSING_PARAMETERS); return 0; } if (hctx->key_len == 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } if (out == NULL) { if (hctx->mode == EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY) { *out_len = EVP_MD_size(hctx->md); } // HKDF-Expand is variable-length and returns |*out_len| bytes. "Output" the // input length by leaving it alone. return 1; } switch (hctx->mode) { case EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND: return HKDF(out, *out_len, hctx->md, hctx->key, hctx->key_len, hctx->salt, hctx->salt_len, CBB_data(&hctx->info), CBB_len(&hctx->info)); case EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY: if (*out_len < EVP_MD_size(hctx->md)) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } return HKDF_extract(out, out_len, hctx->md, hctx->key, hctx->key_len, hctx->salt, hctx->salt_len); case EVP_PKEY_HKDEF_MODE_EXPAND_ONLY: return HKDF_expand(out, *out_len, hctx->md, hctx->key, hctx->key_len, CBB_data(&hctx->info), CBB_len(&hctx->info)); } OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR); return 0; } static int pkey_hkdf_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { HKDF_PKEY_CTX *hctx = reinterpret_cast(ctx->data); switch (type) { case EVP_PKEY_CTRL_HKDF_MODE: if (p1 != EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND && p1 != EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY && p1 != EVP_PKEY_HKDEF_MODE_EXPAND_ONLY) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_OPERATION); return 0; } hctx->mode = p1; return 1; case EVP_PKEY_CTRL_HKDF_MD: hctx->md = reinterpret_cast(p2); return 1; case EVP_PKEY_CTRL_HKDF_KEY: { const CBS *key = reinterpret_cast(p2); if (!CBS_stow(key, &hctx->key, &hctx->key_len)) { return 0; } return 1; } case EVP_PKEY_CTRL_HKDF_SALT: { const CBS *salt = reinterpret_cast(p2); if (!CBS_stow(salt, &hctx->salt, &hctx->salt_len)) { return 0; } return 1; } case EVP_PKEY_CTRL_HKDF_INFO: { const CBS *info = reinterpret_cast(p2); // |EVP_PKEY_CTX_add1_hkdf_info| appends to the info string, rather than // replacing it. if (!CBB_add_bytes(&hctx->info, CBS_data(info), CBS_len(info))) { return 0; } return 1; } default: OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } } const EVP_PKEY_METHOD hkdf_pkey_meth = { /*pkey_id=*/EVP_PKEY_HKDF, pkey_hkdf_init, pkey_hkdf_copy, pkey_hkdf_cleanup, /*keygen=*/NULL, /*sign=*/NULL, /*sign_message=*/NULL, /*verify=*/NULL, /*verify_message=*/NULL, /*verify_recover=*/NULL, /*encrypt=*/NULL, /*decrypt=*/NULL, pkey_hkdf_derive, /*paramgen=*/NULL, pkey_hkdf_ctrl, }; int EVP_PKEY_CTX_hkdf_mode(EVP_PKEY_CTX *ctx, int mode) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_HKDF, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_HKDF_MODE, mode, NULL); } int EVP_PKEY_CTX_set_hkdf_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_HKDF, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_HKDF_MD, 0, (void *)md); } int EVP_PKEY_CTX_set1_hkdf_key(EVP_PKEY_CTX *ctx, const uint8_t *key, size_t key_len) { CBS cbs; CBS_init(&cbs, key, key_len); return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_HKDF, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_HKDF_KEY, 0, &cbs); } int EVP_PKEY_CTX_set1_hkdf_salt(EVP_PKEY_CTX *ctx, const uint8_t *salt, size_t salt_len) { CBS cbs; CBS_init(&cbs, salt, salt_len); return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_HKDF, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_HKDF_SALT, 0, &cbs); } int EVP_PKEY_CTX_add1_hkdf_info(EVP_PKEY_CTX *ctx, const uint8_t *info, size_t info_len) { CBS cbs; CBS_init(&cbs, info, info_len); return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_HKDF, EVP_PKEY_OP_DERIVE, EVP_PKEY_CTRL_HKDF_INFO, 0, &cbs); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_rsa.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "../rsa/internal.h" #include "internal.h" typedef struct { // Key gen parameters int nbits; BIGNUM *pub_exp; // RSA padding mode int pad_mode; // message digest const EVP_MD *md; // message digest for MGF1 const EVP_MD *mgf1md; // PSS salt length int saltlen; // tbuf is a buffer which is either NULL, or is the size of the RSA modulus. // It's used to store the output of RSA operations. uint8_t *tbuf; // OAEP label uint8_t *oaep_label; size_t oaep_labellen; } RSA_PKEY_CTX; typedef struct { uint8_t *data; size_t len; } RSA_OAEP_LABEL_PARAMS; static int pkey_rsa_init(EVP_PKEY_CTX *ctx) { RSA_PKEY_CTX *rctx = reinterpret_cast(OPENSSL_zalloc(sizeof(RSA_PKEY_CTX))); if (!rctx) { return 0; } rctx->nbits = 2048; rctx->pad_mode = RSA_PKCS1_PADDING; rctx->saltlen = -2; ctx->data = rctx; return 1; } static int pkey_rsa_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { RSA_PKEY_CTX *dctx, *sctx; if (!pkey_rsa_init(dst)) { return 0; } sctx = reinterpret_cast(src->data); dctx = reinterpret_cast(dst->data); dctx->nbits = sctx->nbits; if (sctx->pub_exp) { dctx->pub_exp = BN_dup(sctx->pub_exp); if (!dctx->pub_exp) { return 0; } } dctx->pad_mode = sctx->pad_mode; dctx->md = sctx->md; dctx->mgf1md = sctx->mgf1md; dctx->saltlen = sctx->saltlen; if (sctx->oaep_label) { OPENSSL_free(dctx->oaep_label); dctx->oaep_label = reinterpret_cast( OPENSSL_memdup(sctx->oaep_label, sctx->oaep_labellen)); if (!dctx->oaep_label) { return 0; } dctx->oaep_labellen = sctx->oaep_labellen; } return 1; } static void pkey_rsa_cleanup(EVP_PKEY_CTX *ctx) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); if (rctx == NULL) { return; } BN_free(rctx->pub_exp); OPENSSL_free(rctx->tbuf); OPENSSL_free(rctx->oaep_label); OPENSSL_free(rctx); } static int setup_tbuf(RSA_PKEY_CTX *ctx, EVP_PKEY_CTX *pk) { if (ctx->tbuf) { return 1; } ctx->tbuf = reinterpret_cast(OPENSSL_malloc(EVP_PKEY_size(pk->pkey))); if (!ctx->tbuf) { return 0; } return 1; } static int pkey_rsa_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); RSA *rsa = reinterpret_cast(ctx->pkey->pkey); const size_t key_len = EVP_PKEY_size(ctx->pkey); if (!sig) { *siglen = key_len; return 1; } if (*siglen < key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (rctx->md) { unsigned out_len; switch (rctx->pad_mode) { case RSA_PKCS1_PADDING: if (!RSA_sign(EVP_MD_type(rctx->md), tbs, tbslen, sig, &out_len, rsa)) { return 0; } *siglen = out_len; return 1; case RSA_PKCS1_PSS_PADDING: return RSA_sign_pss_mgf1(rsa, siglen, sig, *siglen, tbs, tbslen, rctx->md, rctx->mgf1md, rctx->saltlen); default: return 0; } } return RSA_sign_raw(rsa, siglen, sig, *siglen, tbs, tbslen, rctx->pad_mode); } static int pkey_rsa_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); RSA *rsa = reinterpret_cast(ctx->pkey->pkey); if (rctx->md) { switch (rctx->pad_mode) { case RSA_PKCS1_PADDING: return RSA_verify(EVP_MD_type(rctx->md), tbs, tbslen, sig, siglen, rsa); case RSA_PKCS1_PSS_PADDING: return RSA_verify_pss_mgf1(rsa, tbs, tbslen, rctx->md, rctx->mgf1md, rctx->saltlen, sig, siglen); default: return 0; } } size_t rslen; const size_t key_len = EVP_PKEY_size(ctx->pkey); if (!setup_tbuf(rctx, ctx) || !RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, siglen, rctx->pad_mode)) { return 0; } if (rslen != tbslen || CRYPTO_memcmp(tbs, rctx->tbuf, rslen) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); return 0; } return 1; } static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t sig_len) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); RSA *rsa = reinterpret_cast(ctx->pkey->pkey); const size_t key_len = EVP_PKEY_size(ctx->pkey); if (out == NULL) { *out_len = key_len; return 1; } if (*out_len < key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (rctx->md == NULL) { return RSA_verify_raw(rsa, out_len, out, *out_len, sig, sig_len, rctx->pad_mode); } if (rctx->pad_mode != RSA_PKCS1_PADDING) { return 0; } // Assemble the encoded hash, using a placeholder hash value. static const uint8_t kDummyHash[EVP_MAX_MD_SIZE] = {0}; const size_t hash_len = EVP_MD_size(rctx->md); uint8_t *asn1_prefix; size_t asn1_prefix_len; int asn1_prefix_allocated; if (!setup_tbuf(rctx, ctx) || !RSA_add_pkcs1_prefix(&asn1_prefix, &asn1_prefix_len, &asn1_prefix_allocated, EVP_MD_type(rctx->md), kDummyHash, hash_len)) { return 0; } size_t rslen; int ok = 1; if (!RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, sig_len, RSA_PKCS1_PADDING) || rslen != asn1_prefix_len || // Compare all but the hash suffix. CRYPTO_memcmp(rctx->tbuf, asn1_prefix, asn1_prefix_len - hash_len) != 0) { ok = 0; } if (asn1_prefix_allocated) { OPENSSL_free(asn1_prefix); } if (!ok) { return 0; } if (out != NULL) { OPENSSL_memcpy(out, rctx->tbuf + rslen - hash_len, hash_len); } *out_len = hash_len; return 1; } static int pkey_rsa_encrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); RSA *rsa = reinterpret_cast(ctx->pkey->pkey); const size_t key_len = EVP_PKEY_size(ctx->pkey); if (!out) { *outlen = key_len; return 1; } if (*outlen < key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (rctx->pad_mode == RSA_PKCS1_OAEP_PADDING) { if (!setup_tbuf(rctx, ctx) || !RSA_padding_add_PKCS1_OAEP_mgf1(rctx->tbuf, key_len, in, inlen, rctx->oaep_label, rctx->oaep_labellen, rctx->md, rctx->mgf1md) || !RSA_encrypt(rsa, outlen, out, *outlen, rctx->tbuf, key_len, RSA_NO_PADDING)) { return 0; } return 1; } return RSA_encrypt(rsa, outlen, out, *outlen, in, inlen, rctx->pad_mode); } static int pkey_rsa_decrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *outlen, const uint8_t *in, size_t inlen) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); RSA *rsa = reinterpret_cast(ctx->pkey->pkey); const size_t key_len = EVP_PKEY_size(ctx->pkey); if (!out) { *outlen = key_len; return 1; } if (*outlen < key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (rctx->pad_mode == RSA_PKCS1_OAEP_PADDING) { size_t padded_len; if (!setup_tbuf(rctx, ctx) || !RSA_decrypt(rsa, &padded_len, rctx->tbuf, key_len, in, inlen, RSA_NO_PADDING) || !RSA_padding_check_PKCS1_OAEP_mgf1( out, outlen, key_len, rctx->tbuf, padded_len, rctx->oaep_label, rctx->oaep_labellen, rctx->md, rctx->mgf1md)) { return 0; } return 1; } return RSA_decrypt(rsa, outlen, out, key_len, in, inlen, rctx->pad_mode); } static int check_padding_md(const EVP_MD *md, int padding) { if (!md) { return 1; } if (padding == RSA_NO_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PADDING_MODE); return 0; } return 1; } static int is_known_padding(int padding_mode) { switch (padding_mode) { case RSA_PKCS1_PADDING: case RSA_NO_PADDING: case RSA_PKCS1_OAEP_PADDING: case RSA_PKCS1_PSS_PADDING: return 1; default: return 0; } } static int pkey_rsa_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); switch (type) { case EVP_PKEY_CTRL_RSA_PADDING: if (!is_known_padding(p1) || !check_padding_md(rctx->md, p1) || (p1 == RSA_PKCS1_PSS_PADDING && 0 == (ctx->operation & (EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY))) || (p1 == RSA_PKCS1_OAEP_PADDING && 0 == (ctx->operation & EVP_PKEY_OP_TYPE_CRYPT))) { OPENSSL_PUT_ERROR(EVP, EVP_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE); return 0; } if ((p1 == RSA_PKCS1_PSS_PADDING || p1 == RSA_PKCS1_OAEP_PADDING) && rctx->md == NULL) { rctx->md = EVP_sha1(); } rctx->pad_mode = p1; return 1; case EVP_PKEY_CTRL_GET_RSA_PADDING: *(int *)p2 = rctx->pad_mode; return 1; case EVP_PKEY_CTRL_RSA_PSS_SALTLEN: case EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN: if (rctx->pad_mode != RSA_PKCS1_PSS_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PSS_SALTLEN); return 0; } if (type == EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN) { *(int *)p2 = rctx->saltlen; } else { if (p1 < -2) { return 0; } rctx->saltlen = p1; } return 1; case EVP_PKEY_CTRL_RSA_KEYGEN_BITS: if (p1 < 256) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_KEYBITS); return 0; } rctx->nbits = p1; return 1; case EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP: if (!p2) { return 0; } BN_free(rctx->pub_exp); rctx->pub_exp = reinterpret_cast(p2); return 1; case EVP_PKEY_CTRL_RSA_OAEP_MD: case EVP_PKEY_CTRL_GET_RSA_OAEP_MD: if (rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PADDING_MODE); return 0; } if (type == EVP_PKEY_CTRL_GET_RSA_OAEP_MD) { *(const EVP_MD **)p2 = rctx->md; } else { rctx->md = reinterpret_cast(p2); } return 1; case EVP_PKEY_CTRL_MD: if (!check_padding_md(reinterpret_cast(p2), rctx->pad_mode)) { return 0; } rctx->md = reinterpret_cast(p2); return 1; case EVP_PKEY_CTRL_GET_MD: *(const EVP_MD **)p2 = rctx->md; return 1; case EVP_PKEY_CTRL_RSA_MGF1_MD: case EVP_PKEY_CTRL_GET_RSA_MGF1_MD: if (rctx->pad_mode != RSA_PKCS1_PSS_PADDING && rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_MGF1_MD); return 0; } if (type == EVP_PKEY_CTRL_GET_RSA_MGF1_MD) { if (rctx->mgf1md) { *(const EVP_MD **)p2 = rctx->mgf1md; } else { *(const EVP_MD **)p2 = rctx->md; } } else { rctx->mgf1md = reinterpret_cast(p2); } return 1; case EVP_PKEY_CTRL_RSA_OAEP_LABEL: { if (rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PADDING_MODE); return 0; } OPENSSL_free(rctx->oaep_label); RSA_OAEP_LABEL_PARAMS *params = reinterpret_cast(p2); rctx->oaep_label = params->data; rctx->oaep_labellen = params->len; return 1; } case EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL: if (rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PADDING_MODE); return 0; } CBS_init((CBS *)p2, rctx->oaep_label, rctx->oaep_labellen); return 1; default: OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } } static int pkey_rsa_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { RSA *rsa = NULL; RSA_PKEY_CTX *rctx = reinterpret_cast(ctx->data); if (!rctx->pub_exp) { rctx->pub_exp = BN_new(); if (!rctx->pub_exp || !BN_set_word(rctx->pub_exp, RSA_F4)) { return 0; } } rsa = RSA_new(); if (!rsa) { return 0; } if (!RSA_generate_key_ex(rsa, rctx->nbits, rctx->pub_exp, NULL)) { RSA_free(rsa); return 0; } EVP_PKEY_assign_RSA(pkey, rsa); return 1; } const EVP_PKEY_METHOD rsa_pkey_meth = { EVP_PKEY_RSA, pkey_rsa_init, pkey_rsa_copy, pkey_rsa_cleanup, pkey_rsa_keygen, pkey_rsa_sign, NULL /* sign_message */, pkey_rsa_verify, NULL /* verify_message */, pkey_rsa_verify_recover, pkey_rsa_encrypt, pkey_rsa_decrypt, NULL /* derive */, NULL /* paramgen */, pkey_rsa_ctrl, }; int EVP_PKEY_CTX_set_rsa_padding(EVP_PKEY_CTX *ctx, int padding) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, EVP_PKEY_CTRL_RSA_PADDING, padding, NULL); } int EVP_PKEY_CTX_get_rsa_padding(EVP_PKEY_CTX *ctx, int *out_padding) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, -1, EVP_PKEY_CTRL_GET_RSA_PADDING, 0, out_padding); } int EVP_PKEY_CTX_set_rsa_pss_keygen_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return 0; } int EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen(EVP_PKEY_CTX *ctx, int salt_len) { return 0; } int EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return 0; } int EVP_PKEY_CTX_set_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int salt_len) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, (EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY), EVP_PKEY_CTRL_RSA_PSS_SALTLEN, salt_len, NULL); } int EVP_PKEY_CTX_get_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int *out_salt_len) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, (EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY), EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN, 0, out_salt_len); } int EVP_PKEY_CTX_set_rsa_keygen_bits(EVP_PKEY_CTX *ctx, int bits) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_KEYGEN, EVP_PKEY_CTRL_RSA_KEYGEN_BITS, bits, NULL); } int EVP_PKEY_CTX_set_rsa_keygen_pubexp(EVP_PKEY_CTX *ctx, BIGNUM *e) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_KEYGEN, EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP, 0, e); } int EVP_PKEY_CTX_set_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_RSA_OAEP_MD, 0, (void *)md); } int EVP_PKEY_CTX_get_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_GET_RSA_OAEP_MD, 0, (void *)out_md); } int EVP_PKEY_CTX_set_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_SIG | EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_RSA_MGF1_MD, 0, (void *)md); } int EVP_PKEY_CTX_get_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md) { return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_SIG | EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_GET_RSA_MGF1_MD, 0, (void *)out_md); } int EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, uint8_t *label, size_t label_len) { RSA_OAEP_LABEL_PARAMS params = {label, label_len}; return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_RSA_OAEP_LABEL, 0, ¶ms); } int EVP_PKEY_CTX_get0_rsa_oaep_label(EVP_PKEY_CTX *ctx, const uint8_t **out_label) { CBS label; if (!EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL, 0, &label)) { return -1; } if (CBS_len(&label) > INT_MAX) { OPENSSL_PUT_ERROR(EVP, ERR_R_OVERFLOW); return -1; } *out_label = CBS_data(&label); return (int)CBS_len(&label); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_rsa_asn1.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../fipsmodule/rsa/internal.h" #include "internal.h" static int rsa_pub_encode(CBB *out, const EVP_PKEY *key) { // See RFC 3279, section 2.3.1. const RSA *rsa = reinterpret_cast(key->pkey); CBB spki, algorithm, oid, null, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, rsa_asn1_meth.oid, rsa_asn1_meth.oid_len) || !CBB_add_asn1(&algorithm, &null, CBS_ASN1_NULL) || !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || !CBB_add_u8(&key_bitstring, 0 /* padding */) || !RSA_marshal_public_key(&key_bitstring, rsa) || // !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 3279, section 2.3.1. // The parameters must be NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } RSA *rsa = RSA_parse_public_key(key); if (rsa == NULL || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); RSA_free(rsa); return 0; } EVP_PKEY_assign_RSA(out, rsa); return 1; } static int rsa_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { const RSA *a_rsa = reinterpret_cast(a->pkey); const RSA *b_rsa = reinterpret_cast(b->pkey); return BN_cmp(RSA_get0_n(b_rsa), RSA_get0_n(a_rsa)) == 0 && BN_cmp(RSA_get0_e(b_rsa), RSA_get0_e(a_rsa)) == 0; } static int rsa_priv_encode(CBB *out, const EVP_PKEY *key) { const RSA *rsa = reinterpret_cast(key->pkey); CBB pkcs8, algorithm, oid, null, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, rsa_asn1_meth.oid, rsa_asn1_meth.oid_len) || !CBB_add_asn1(&algorithm, &null, CBS_ASN1_NULL) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !RSA_marshal_private_key(&private_key, rsa) || // !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int rsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { // Per RFC 3447, A.1, the parameters have type NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } RSA *rsa = RSA_parse_private_key(key); if (rsa == NULL || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); RSA_free(rsa); return 0; } EVP_PKEY_assign_RSA(out, rsa); return 1; } static int rsa_opaque(const EVP_PKEY *pkey) { const RSA *rsa = reinterpret_cast(pkey->pkey); return RSA_is_opaque(rsa); } static int int_rsa_size(const EVP_PKEY *pkey) { const RSA *rsa = reinterpret_cast(pkey->pkey); return RSA_size(rsa); } static int rsa_bits(const EVP_PKEY *pkey) { const RSA *rsa = reinterpret_cast(pkey->pkey); return RSA_bits(rsa); } static void int_rsa_free(EVP_PKEY *pkey) { RSA_free(reinterpret_cast(pkey->pkey)); pkey->pkey = NULL; } const EVP_PKEY_ASN1_METHOD rsa_asn1_meth = { EVP_PKEY_RSA, // 1.2.840.113549.1.1.1 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01}, 9, &rsa_pkey_meth, rsa_pub_decode, rsa_pub_encode, rsa_pub_cmp, rsa_priv_decode, rsa_priv_encode, /*set_priv_raw=*/NULL, /*set_pub_raw=*/NULL, /*get_priv_raw=*/NULL, /*get_pub_raw=*/NULL, /*set1_tls_encodedpoint=*/NULL, /*get1_tls_encodedpoint=*/NULL, rsa_opaque, int_rsa_size, rsa_bits, 0, 0, 0, int_rsa_free, }; int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, RSA *key) { if (EVP_PKEY_assign_RSA(pkey, key)) { RSA_up_ref(key); return 1; } return 0; } int EVP_PKEY_assign_RSA(EVP_PKEY *pkey, RSA *key) { evp_pkey_set_method(pkey, &rsa_asn1_meth); pkey->pkey = key; return key != NULL; } RSA *EVP_PKEY_get0_RSA(const EVP_PKEY *pkey) { if (pkey->type != EVP_PKEY_RSA) { OPENSSL_PUT_ERROR(EVP, EVP_R_EXPECTING_AN_RSA_KEY); return NULL; } return reinterpret_cast(pkey->pkey); } RSA *EVP_PKEY_get1_RSA(const EVP_PKEY *pkey) { RSA *rsa = EVP_PKEY_get0_RSA(pkey); if (rsa != NULL) { RSA_up_ref(rsa); } return rsa; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_x25519.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "internal.h" // X25519 has no parameters to copy. static int pkey_x25519_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { return 1; } static int pkey_x25519_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey) { X25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(X25519_KEY))); if (key == NULL) { return 0; } evp_pkey_set_method(pkey, &x25519_asn1_meth); X25519_keypair(key->pub, key->priv); key->has_private = 1; OPENSSL_free(pkey->pkey); pkey->pkey = key; return 1; } static int pkey_x25519_derive(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len) { if (ctx->pkey == NULL || ctx->peerkey == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } const X25519_KEY *our_key = reinterpret_cast(ctx->pkey->pkey); const X25519_KEY *peer_key = reinterpret_cast(ctx->peerkey->pkey); if (our_key == NULL || peer_key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_KEYS_NOT_SET); return 0; } if (!our_key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } if (out != NULL) { if (*out_len < 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } if (!X25519(out, our_key->priv, peer_key->pub)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } } *out_len = 32; return 1; } static int pkey_x25519_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { switch (type) { case EVP_PKEY_CTRL_PEER_KEY: // |EVP_PKEY_derive_set_peer| requires the key implement this command, // even if it is a no-op. return 1; default: OPENSSL_PUT_ERROR(EVP, EVP_R_COMMAND_NOT_SUPPORTED); return 0; } } const EVP_PKEY_METHOD x25519_pkey_meth = { /*pkey_id=*/EVP_PKEY_X25519, /*init=*/NULL, /*copy=*/pkey_x25519_copy, /*cleanup=*/NULL, /*keygen=*/pkey_x25519_keygen, /*sign=*/NULL, /*sign_message=*/NULL, /*verify=*/NULL, /*verify_message=*/NULL, /*verify_recover=*/NULL, /*encrypt=*/NULL, /*decrypt=*/NULL, /*derive=*/pkey_x25519_derive, /*paramgen=*/NULL, /*ctrl=*/pkey_x25519_ctrl, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/p_x25519_asn1.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../internal.h" #include "internal.h" static void x25519_free(EVP_PKEY *pkey) { OPENSSL_free(pkey->pkey); pkey->pkey = NULL; } static int x25519_set_priv_raw(EVP_PKEY *pkey, const uint8_t *in, size_t len) { if (len != 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } X25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(X25519_KEY))); if (key == NULL) { return 0; } OPENSSL_memcpy(key->priv, in, 32); X25519_public_from_private(key->pub, key->priv); key->has_private = 1; x25519_free(pkey); pkey->pkey = key; return 1; } static int x25519_set_pub_raw(EVP_PKEY *pkey, const uint8_t *in, size_t len) { if (len != 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } X25519_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(X25519_KEY))); if (key == NULL) { return 0; } OPENSSL_memcpy(key->pub, in, 32); key->has_private = 0; x25519_free(pkey); pkey->pkey = key; return 1; } static int x25519_get_priv_raw(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { const X25519_KEY *key = reinterpret_cast(pkey->pkey); if (!key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } if (out == NULL) { *out_len = 32; return 1; } if (*out_len < 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } OPENSSL_memcpy(out, key->priv, 32); *out_len = 32; return 1; } static int x25519_get_pub_raw(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len) { const X25519_KEY *key = reinterpret_cast(pkey->pkey); if (out == NULL) { *out_len = 32; return 1; } if (*out_len < 32) { OPENSSL_PUT_ERROR(EVP, EVP_R_BUFFER_TOO_SMALL); return 0; } OPENSSL_memcpy(out, key->pub, 32); *out_len = 32; return 1; } static int x25519_set1_tls_encodedpoint(EVP_PKEY *pkey, const uint8_t *in, size_t len) { return x25519_set_pub_raw(pkey, in, len); } static size_t x25519_get1_tls_encodedpoint(const EVP_PKEY *pkey, uint8_t **out_ptr) { const X25519_KEY *key = reinterpret_cast(pkey->pkey); if (key == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_KEY_SET); return 0; } *out_ptr = reinterpret_cast(OPENSSL_memdup(key->pub, 32)); return *out_ptr == NULL ? 0 : 32; } static int x25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 8410, section 4. // The parameters must be omitted. Public keys have length 32. if (CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } return x25519_set_pub_raw(out, CBS_data(key), CBS_len(key)); } static int x25519_pub_encode(CBB *out, const EVP_PKEY *pkey) { const X25519_KEY *key = reinterpret_cast(pkey->pkey); // See RFC 8410, section 4. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, x25519_asn1_meth.oid, x25519_asn1_meth.oid_len) || !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || !CBB_add_u8(&key_bitstring, 0 /* padding */) || !CBB_add_bytes(&key_bitstring, key->pub, 32) || // !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int x25519_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { const X25519_KEY *a_key = reinterpret_cast(a->pkey); const X25519_KEY *b_key = reinterpret_cast(b->pkey); return OPENSSL_memcmp(a_key->pub, b_key->pub, 32) == 0; } static int x25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { // See RFC 8410, section 7. // Parameters must be empty. The key is a 32-byte value wrapped in an extra // OCTET STRING layer. CBS inner; if (CBS_len(params) != 0 || !CBS_get_asn1(key, &inner, CBS_ASN1_OCTETSTRING) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } return x25519_set_priv_raw(out, CBS_data(&inner), CBS_len(&inner)); } static int x25519_priv_encode(CBB *out, const EVP_PKEY *pkey) { const X25519_KEY *key = reinterpret_cast(pkey->pkey); if (!key->has_private) { OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); return 0; } // See RFC 8410, section 7. CBB pkcs8, algorithm, oid, private_key, inner; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, x25519_asn1_meth.oid, x25519_asn1_meth.oid_len) || !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || !CBB_add_asn1(&private_key, &inner, CBS_ASN1_OCTETSTRING) || // The PKCS#8 encoding stores only the 32-byte seed which is the first 32 // bytes of the private key. !CBB_add_bytes(&inner, key->priv, 32) || // !CBB_flush(out)) { OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); return 0; } return 1; } static int x25519_size(const EVP_PKEY *pkey) { return 32; } static int x25519_bits(const EVP_PKEY *pkey) { return 253; } const EVP_PKEY_ASN1_METHOD x25519_asn1_meth = { EVP_PKEY_X25519, {0x2b, 0x65, 0x6e}, 3, &x25519_pkey_meth, x25519_pub_decode, x25519_pub_encode, x25519_pub_cmp, x25519_priv_decode, x25519_priv_encode, x25519_set_priv_raw, x25519_set_pub_raw, x25519_get_priv_raw, x25519_get_pub_raw, x25519_set1_tls_encodedpoint, x25519_get1_tls_encodedpoint, /*pkey_opaque=*/NULL, x25519_size, x25519_bits, /*param_missing=*/NULL, /*param_copy=*/NULL, /*param_cmp=*/NULL, x25519_free, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/pbkdf.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../internal.h" int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint32_t iterations, const EVP_MD *digest, size_t key_len, uint8_t *out_key) { // See RFC 8018, section 5.2. bssl::ScopedHMAC_CTX hctx; if (!HMAC_Init_ex(hctx.get(), password, password_len, digest, NULL)) { return 0; } uint32_t i = 1; size_t md_len = EVP_MD_size(digest); while (key_len > 0) { size_t todo = md_len; if (todo > key_len) { todo = key_len; } uint8_t i_buf[4]; i_buf[0] = (uint8_t)((i >> 24) & 0xff); i_buf[1] = (uint8_t)((i >> 16) & 0xff); i_buf[2] = (uint8_t)((i >> 8) & 0xff); i_buf[3] = (uint8_t)(i & 0xff); // Compute U_1. uint8_t digest_tmp[EVP_MAX_MD_SIZE]; if (!HMAC_Init_ex(hctx.get(), NULL, 0, NULL, NULL) || !HMAC_Update(hctx.get(), salt, salt_len) || !HMAC_Update(hctx.get(), i_buf, 4) || !HMAC_Final(hctx.get(), digest_tmp, NULL)) { return 0; } OPENSSL_memcpy(out_key, digest_tmp, todo); for (uint32_t j = 1; j < iterations; j++) { // Compute the remaining U_* values and XOR. if (!HMAC_Init_ex(hctx.get(), NULL, 0, NULL, NULL) || !HMAC_Update(hctx.get(), digest_tmp, md_len) || !HMAC_Final(hctx.get(), digest_tmp, NULL)) { return 0; } for (size_t k = 0; k < todo; k++) { out_key[k] ^= digest_tmp[k]; } } key_len -= todo; out_key += todo; i++; } // RFC 8018 describes iterations (c) as being a "positive integer", so a // value of 0 is an error. // // Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return // value, expecting it to succeed and unconditionally using |out_key|. As a // precaution for such callsites in external code, the old behavior of // iterations < 1 being treated as iterations == 1 is preserved, but // additionally an error result is returned. // // TODO(eroman): Figure out how to remove this compatibility hack, or change // the default to something more sensible like 2048. if (iterations == 0) { return 0; } return 1; } int PKCS5_PBKDF2_HMAC_SHA1(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint32_t iterations, size_t key_len, uint8_t *out_key) { return PKCS5_PBKDF2_HMAC(password, password_len, salt, salt_len, iterations, EVP_sha1(), key_len, out_key); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/print.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../fipsmodule/rsa/internal.h" #include "../internal.h" static int print_hex(BIO *bp, const uint8_t *data, size_t len, int off) { for (size_t i = 0; i < len; i++) { if ((i % 15) == 0) { if (BIO_puts(bp, "\n") <= 0 || // !BIO_indent(bp, off + 4, 128)) { return 0; } } if (BIO_printf(bp, "%02x%s", data[i], (i + 1 == len) ? "" : ":") <= 0) { return 0; } } if (BIO_write(bp, "\n", 1) <= 0) { return 0; } return 1; } static int bn_print(BIO *bp, const char *name, const BIGNUM *num, int off) { if (num == NULL) { return 1; } if (!BIO_indent(bp, off, 128)) { return 0; } if (BN_is_zero(num)) { if (BIO_printf(bp, "%s 0\n", name) <= 0) { return 0; } return 1; } uint64_t u64; if (BN_get_u64(num, &u64)) { const char *neg = BN_is_negative(num) ? "-" : ""; return BIO_printf(bp, "%s %s%" PRIu64 " (%s0x%" PRIx64 ")\n", name, neg, u64, neg, u64) > 0; } if (BIO_printf(bp, "%s%s", name, (BN_is_negative(num)) ? " (Negative)" : "") <= 0) { return 0; } // Print |num| in hex, adding a leading zero, as in ASN.1, if the high bit // is set. // // TODO(davidben): Do we need to do this? We already print "(Negative)" above // and negative values are never valid in keys anyway. size_t len = BN_num_bytes(num); uint8_t *buf = reinterpret_cast(OPENSSL_malloc(len + 1)); if (buf == NULL) { return 0; } buf[0] = 0; BN_bn2bin(num, buf + 1); int ret; if (len > 0 && (buf[1] & 0x80) != 0) { // Print the whole buffer. ret = print_hex(bp, buf, len + 1, off); } else { // Skip the leading zero. ret = print_hex(bp, buf + 1, len, off); } OPENSSL_free(buf); return ret; } // RSA keys. static int do_rsa_print(BIO *out, const RSA *rsa, int off, int include_private) { int mod_len = 0; if (rsa->n != NULL) { mod_len = BN_num_bits(rsa->n); } if (!BIO_indent(out, off, 128)) { return 0; } const char *s, *str; if (include_private && rsa->d) { if (BIO_printf(out, "Private-Key: (%d bit)\n", mod_len) <= 0) { return 0; } str = "modulus:"; s = "publicExponent:"; } else { if (BIO_printf(out, "Public-Key: (%d bit)\n", mod_len) <= 0) { return 0; } str = "Modulus:"; s = "Exponent:"; } if (!bn_print(out, str, rsa->n, off) || !bn_print(out, s, rsa->e, off)) { return 0; } if (include_private) { if (!bn_print(out, "privateExponent:", rsa->d, off) || !bn_print(out, "prime1:", rsa->p, off) || !bn_print(out, "prime2:", rsa->q, off) || !bn_print(out, "exponent1:", rsa->dmp1, off) || !bn_print(out, "exponent2:", rsa->dmq1, off) || !bn_print(out, "coefficient:", rsa->iqmp, off)) { return 0; } } return 1; } static int rsa_pub_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_rsa_print(bp, EVP_PKEY_get0_RSA(pkey), indent, 0); } static int rsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_rsa_print(bp, EVP_PKEY_get0_RSA(pkey), indent, 1); } // DSA keys. static int do_dsa_print(BIO *bp, const DSA *x, int off, int ptype) { const BIGNUM *priv_key = NULL; if (ptype == 2) { priv_key = DSA_get0_priv_key(x); } const BIGNUM *pub_key = NULL; if (ptype > 0) { pub_key = DSA_get0_pub_key(x); } const char *ktype = "DSA-Parameters"; if (ptype == 2) { ktype = "Private-Key"; } else if (ptype == 1) { ktype = "Public-Key"; } if (!BIO_indent(bp, off, 128) || BIO_printf(bp, "%s: (%u bit)\n", ktype, BN_num_bits(DSA_get0_p(x))) <= 0 || // |priv_key| and |pub_key| may be NULL, in which case |bn_print| will // silently skip them. !bn_print(bp, "priv:", priv_key, off) || !bn_print(bp, "pub:", pub_key, off) || !bn_print(bp, "P:", DSA_get0_p(x), off) || !bn_print(bp, "Q:", DSA_get0_q(x), off) || !bn_print(bp, "G:", DSA_get0_g(x), off)) { return 0; } return 1; } static int dsa_param_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_dsa_print(bp, EVP_PKEY_get0_DSA(pkey), indent, 0); } static int dsa_pub_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_dsa_print(bp, EVP_PKEY_get0_DSA(pkey), indent, 1); } static int dsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_dsa_print(bp, EVP_PKEY_get0_DSA(pkey), indent, 2); } // EC keys. static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) { const EC_GROUP *group; if (x == NULL || (group = EC_KEY_get0_group(x)) == NULL) { OPENSSL_PUT_ERROR(EVP, ERR_R_PASSED_NULL_PARAMETER); return 0; } const char *ecstr; if (ktype == 2) { ecstr = "Private-Key"; } else if (ktype == 1) { ecstr = "Public-Key"; } else { ecstr = "ECDSA-Parameters"; } if (!BIO_indent(bp, off, 128)) { return 0; } int curve_name = EC_GROUP_get_curve_name(group); if (BIO_printf(bp, "%s: (%s)\n", ecstr, curve_name == NID_undef ? "unknown curve" : EC_curve_nid2nist(curve_name)) <= 0) { return 0; } if (ktype == 2) { const BIGNUM *priv_key = EC_KEY_get0_private_key(x); if (priv_key != NULL && // !bn_print(bp, "priv:", priv_key, off)) { return 0; } } if (ktype > 0 && EC_KEY_get0_public_key(x) != NULL) { uint8_t *pub = NULL; size_t pub_len = EC_KEY_key2buf(x, EC_KEY_get_conv_form(x), &pub, NULL); if (pub_len == 0) { return 0; } int ret = BIO_indent(bp, off, 128) && // BIO_puts(bp, "pub:") > 0 && // print_hex(bp, pub, pub_len, off); OPENSSL_free(pub); if (!ret) { return 0; } } return 1; } static int eckey_param_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_EC_KEY_print(bp, EVP_PKEY_get0_EC_KEY(pkey), indent, 0); } static int eckey_pub_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_EC_KEY_print(bp, EVP_PKEY_get0_EC_KEY(pkey), indent, 1); } static int eckey_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent) { return do_EC_KEY_print(bp, EVP_PKEY_get0_EC_KEY(pkey), indent, 2); } typedef struct { int type; int (*pub_print)(BIO *out, const EVP_PKEY *pkey, int indent); int (*priv_print)(BIO *out, const EVP_PKEY *pkey, int indent); int (*param_print)(BIO *out, const EVP_PKEY *pkey, int indent); } EVP_PKEY_PRINT_METHOD; static EVP_PKEY_PRINT_METHOD kPrintMethods[] = { { EVP_PKEY_RSA, rsa_pub_print, rsa_priv_print, NULL /* param_print */, }, { EVP_PKEY_DSA, dsa_pub_print, dsa_priv_print, dsa_param_print, }, { EVP_PKEY_EC, eckey_pub_print, eckey_priv_print, eckey_param_print, }, }; static size_t kPrintMethodsLen = OPENSSL_ARRAY_SIZE(kPrintMethods); static EVP_PKEY_PRINT_METHOD *find_method(int type) { for (size_t i = 0; i < kPrintMethodsLen; i++) { if (kPrintMethods[i].type == type) { return &kPrintMethods[i]; } } return NULL; } static int print_unsupported(BIO *out, const EVP_PKEY *pkey, int indent, const char *kstr) { BIO_indent(out, indent, 128); BIO_printf(out, "%s algorithm unsupported\n", kstr); return 1; } int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx) { EVP_PKEY_PRINT_METHOD *method = find_method(EVP_PKEY_id(pkey)); if (method != NULL && method->pub_print != NULL) { return method->pub_print(out, pkey, indent); } return print_unsupported(out, pkey, indent, "Public Key"); } int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx) { EVP_PKEY_PRINT_METHOD *method = find_method(EVP_PKEY_id(pkey)); if (method != NULL && method->priv_print != NULL) { return method->priv_print(out, pkey, indent); } return print_unsupported(out, pkey, indent, "Private Key"); } int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx) { EVP_PKEY_PRINT_METHOD *method = find_method(EVP_PKEY_id(pkey)); if (method != NULL && method->param_print != NULL) { return method->param_print(out, pkey, indent); } return print_unsupported(out, pkey, indent, "Parameters"); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/scrypt.cc ================================================ /* * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../internal.h" // This file implements scrypt, described in RFC 7914. // // Note scrypt refers to both "blocks" and a "block size" parameter, r. These // are two different notions of blocks. A Salsa20 block is 64 bytes long, // represented in this implementation by 16 |uint32_t|s. |r| determines the // number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r| // Salsa20 blocks. This implementation refers to them as Salsa20 blocks and // scrypt blocks, respectively. // A block_t is a Salsa20 block. typedef struct { uint32_t words[16]; } block_t; static_assert(sizeof(block_t) == 64, "block_t has padding"); // salsa208_word_specification implements the Salsa20/8 core function, also // described in RFC 7914, section 3. It modifies the block at |inout| // in-place. static void salsa208_word_specification(block_t *inout) { block_t x; OPENSSL_memcpy(&x, inout, sizeof(x)); for (int i = 8; i > 0; i -= 2) { x.words[4] ^= CRYPTO_rotl_u32(x.words[0] + x.words[12], 7); x.words[8] ^= CRYPTO_rotl_u32(x.words[4] + x.words[0], 9); x.words[12] ^= CRYPTO_rotl_u32(x.words[8] + x.words[4], 13); x.words[0] ^= CRYPTO_rotl_u32(x.words[12] + x.words[8], 18); x.words[9] ^= CRYPTO_rotl_u32(x.words[5] + x.words[1], 7); x.words[13] ^= CRYPTO_rotl_u32(x.words[9] + x.words[5], 9); x.words[1] ^= CRYPTO_rotl_u32(x.words[13] + x.words[9], 13); x.words[5] ^= CRYPTO_rotl_u32(x.words[1] + x.words[13], 18); x.words[14] ^= CRYPTO_rotl_u32(x.words[10] + x.words[6], 7); x.words[2] ^= CRYPTO_rotl_u32(x.words[14] + x.words[10], 9); x.words[6] ^= CRYPTO_rotl_u32(x.words[2] + x.words[14], 13); x.words[10] ^= CRYPTO_rotl_u32(x.words[6] + x.words[2], 18); x.words[3] ^= CRYPTO_rotl_u32(x.words[15] + x.words[11], 7); x.words[7] ^= CRYPTO_rotl_u32(x.words[3] + x.words[15], 9); x.words[11] ^= CRYPTO_rotl_u32(x.words[7] + x.words[3], 13); x.words[15] ^= CRYPTO_rotl_u32(x.words[11] + x.words[7], 18); x.words[1] ^= CRYPTO_rotl_u32(x.words[0] + x.words[3], 7); x.words[2] ^= CRYPTO_rotl_u32(x.words[1] + x.words[0], 9); x.words[3] ^= CRYPTO_rotl_u32(x.words[2] + x.words[1], 13); x.words[0] ^= CRYPTO_rotl_u32(x.words[3] + x.words[2], 18); x.words[6] ^= CRYPTO_rotl_u32(x.words[5] + x.words[4], 7); x.words[7] ^= CRYPTO_rotl_u32(x.words[6] + x.words[5], 9); x.words[4] ^= CRYPTO_rotl_u32(x.words[7] + x.words[6], 13); x.words[5] ^= CRYPTO_rotl_u32(x.words[4] + x.words[7], 18); x.words[11] ^= CRYPTO_rotl_u32(x.words[10] + x.words[9], 7); x.words[8] ^= CRYPTO_rotl_u32(x.words[11] + x.words[10], 9); x.words[9] ^= CRYPTO_rotl_u32(x.words[8] + x.words[11], 13); x.words[10] ^= CRYPTO_rotl_u32(x.words[9] + x.words[8], 18); x.words[12] ^= CRYPTO_rotl_u32(x.words[15] + x.words[14], 7); x.words[13] ^= CRYPTO_rotl_u32(x.words[12] + x.words[15], 9); x.words[14] ^= CRYPTO_rotl_u32(x.words[13] + x.words[12], 13); x.words[15] ^= CRYPTO_rotl_u32(x.words[14] + x.words[13], 18); } for (int i = 0; i < 16; ++i) { inout->words[i] += x.words[i]; } } // xor_block sets |*out| to be |*a| XOR |*b|. static void xor_block(block_t *out, const block_t *a, const block_t *b) { for (size_t i = 0; i < 16; i++) { out->words[i] = a->words[i] ^ b->words[i]; } } // scryptBlockMix implements the function described in RFC 7914, section 4. B' // is written to |out|. |out| and |B| may not alias and must be each one scrypt // block (2 * |r| Salsa20 blocks) long. static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) { assert(out != B); block_t X; OPENSSL_memcpy(&X, &B[r * 2 - 1], sizeof(X)); for (uint64_t i = 0; i < r * 2; i++) { xor_block(&X, &X, &B[i]); salsa208_word_specification(&X); // This implements the permutation in step 3. OPENSSL_memcpy(&out[i / 2 + (i & 1) * r], &X, sizeof(X)); } } // scryptROMix implements the function described in RFC 7914, section 5. |B| is // an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and // |V| are scratch space allocated by the caller. |T| must have space for one // scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt // blocks (2 * |r| * |N| Salsa20 blocks). static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, block_t *V) { // Steps 1 and 2. OPENSSL_memcpy(V, B, 2 * r * sizeof(block_t)); for (uint64_t i = 1; i < N; i++) { scryptBlockMix(&V[2 * r * i /* scrypt block i */], &V[2 * r * (i - 1) /* scrypt block i-1 */], r); } scryptBlockMix(B, &V[2 * r * (N - 1) /* scrypt block N-1 */], r); // Step 3. for (uint64_t i = 0; i < N; i++) { // Note this assumes |N| <= 2^32 and is a power of 2. uint32_t j = B[2 * r - 1].words[0] & (N - 1); for (size_t k = 0; k < 2 * r; k++) { xor_block(&T[k], &B[k], &V[2 * r * j + k]); } scryptBlockMix(B, T, r); } } // SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the // bounds on p in section 6: // // p <= ((2^32-1) * hLen) / MFLen iff // p <= ((2^32-1) * 32) / (128 * r) iff // p * r <= (2^30-1) #define SCRYPT_PR_MAX ((1 << 30) - 1) // SCRYPT_MAX_MEM is the default maximum memory that may be allocated by // |EVP_PBE_scrypt|. #define SCRYPT_MAX_MEM (1024 * 1024 * 65) int EVP_PBE_scrypt(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint64_t N, uint64_t r, uint64_t p, size_t max_mem, uint8_t *out_key, size_t key_len) { if (r == 0 || p == 0 || p > SCRYPT_PR_MAX / r || // |N| must be a power of two. N < 2 || (N & (N - 1)) || // We only support |N| <= 2^32 in |scryptROMix|. N > UINT64_C(1) << 32 || // Check that |N| < 2^(128×r / 8). (16 * r <= 63 && N >= UINT64_C(1) << (16 * r))) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PARAMETERS); return 0; } // Determine the amount of memory needed. B, T, and V are |p|, 1, and |N| // scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s. if (max_mem == 0) { max_mem = SCRYPT_MAX_MEM; } size_t max_scrypt_blocks = max_mem / (2 * r * sizeof(block_t)); if (max_scrypt_blocks < p + 1 || max_scrypt_blocks - p - 1 < N) { OPENSSL_PUT_ERROR(EVP, EVP_R_MEMORY_LIMIT_EXCEEDED); return 0; } // Allocate and divide up the scratch space. |max_mem| fits in a size_t, which // is no bigger than uint64_t, so none of these operations may overflow. static_assert(UINT64_MAX >= SIZE_MAX, "size_t exceeds uint64_t"); size_t B_blocks = p * 2 * r; size_t B_bytes = B_blocks * sizeof(block_t); size_t T_blocks = 2 * r; size_t V_blocks = N * 2 * r; block_t *B = reinterpret_cast( OPENSSL_calloc(B_blocks + T_blocks + V_blocks, sizeof(block_t))); if (B == NULL) { return 0; } int ret = 0; block_t *T = B + B_blocks; block_t *V = T + T_blocks; // NOTE: PKCS5_PBKDF2_HMAC can only fail due to allocation failure // or |iterations| of 0 (we pass 1 here). This is consistent with // the documented failure conditions of EVP_PBE_scrypt. if (!PKCS5_PBKDF2_HMAC(password, password_len, salt, salt_len, 1, EVP_sha256(), B_bytes, (uint8_t *)B)) { goto err; } for (uint64_t i = 0; i < p; i++) { scryptROMix(B + 2 * r * i, r, N, T, V); } if (!PKCS5_PBKDF2_HMAC(password, password_len, (const uint8_t *)B, B_bytes, 1, EVP_sha256(), key_len, out_key)) { goto err; } ret = 1; err: OPENSSL_free(B); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/evp/sign.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" int EVP_SignInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) { return EVP_DigestInit_ex(ctx, type, impl); } int EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type) { return EVP_DigestInit(ctx, type); } int EVP_SignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { return EVP_DigestUpdate(ctx, data, len); } int EVP_SignFinal(const EVP_MD_CTX *ctx, uint8_t *sig, unsigned *out_sig_len, EVP_PKEY *pkey) { uint8_t m[EVP_MAX_MD_SIZE]; unsigned m_len; int ret = 0; EVP_MD_CTX tmp_ctx; EVP_PKEY_CTX *pkctx = NULL; size_t sig_len = EVP_PKEY_size(pkey); // Ensure the final result will fit in |unsigned|. if (sig_len > UINT_MAX) { sig_len = UINT_MAX; } *out_sig_len = 0; EVP_MD_CTX_init(&tmp_ctx); if (!EVP_MD_CTX_copy_ex(&tmp_ctx, ctx) || !EVP_DigestFinal_ex(&tmp_ctx, m, &m_len)) { goto out; } EVP_MD_CTX_cleanup(&tmp_ctx); pkctx = EVP_PKEY_CTX_new(pkey, NULL); if (!pkctx || // !EVP_PKEY_sign_init(pkctx) || !EVP_PKEY_CTX_set_signature_md(pkctx, ctx->digest) || !EVP_PKEY_sign(pkctx, sig, &sig_len, m, m_len)) { goto out; } *out_sig_len = (unsigned)sig_len; ret = 1; out: EVP_PKEY_CTX_free(pkctx); return ret; } int EVP_VerifyInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) { return EVP_DigestInit_ex(ctx, type, impl); } int EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type) { return EVP_DigestInit(ctx, type); } int EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { return EVP_DigestUpdate(ctx, data, len); } int EVP_VerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, EVP_PKEY *pkey) { uint8_t m[EVP_MAX_MD_SIZE]; unsigned m_len; int ret = 0; EVP_MD_CTX tmp_ctx; EVP_PKEY_CTX *pkctx = NULL; EVP_MD_CTX_init(&tmp_ctx); if (!EVP_MD_CTX_copy_ex(&tmp_ctx, ctx) || !EVP_DigestFinal_ex(&tmp_ctx, m, &m_len)) { EVP_MD_CTX_cleanup(&tmp_ctx); goto out; } EVP_MD_CTX_cleanup(&tmp_ctx); pkctx = EVP_PKEY_CTX_new(pkey, NULL); if (!pkctx || !EVP_PKEY_verify_init(pkctx) || !EVP_PKEY_CTX_set_signature_md(pkctx, ctx->digest)) { goto out; } ret = EVP_PKEY_verify(pkctx, sig, sig_len, m, m_len); out: EVP_PKEY_CTX_free(pkctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/ex_data.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "internal.h" DEFINE_STACK_OF(CRYPTO_EX_DATA_FUNCS) struct crypto_ex_data_func_st { long argl; // Arbitary long void *argp; // Arbitary void pointer CRYPTO_EX_free *free_func; // next points to the next |CRYPTO_EX_DATA_FUNCS| or NULL if this is the last // one. It may only be read if synchronized with a read from |num_funcs|. CRYPTO_EX_DATA_FUNCS *next; }; int CRYPTO_get_ex_new_index_ex(CRYPTO_EX_DATA_CLASS *ex_data_class, long argl, void *argp, CRYPTO_EX_free *free_func) { CRYPTO_EX_DATA_FUNCS *funcs = reinterpret_cast( OPENSSL_malloc(sizeof(CRYPTO_EX_DATA_FUNCS))); if (funcs == NULL) { return -1; } funcs->argl = argl; funcs->argp = argp; funcs->free_func = free_func; funcs->next = NULL; CRYPTO_MUTEX_lock_write(&ex_data_class->lock); uint32_t num_funcs = CRYPTO_atomic_load_u32(&ex_data_class->num_funcs); // The index must fit in |int|. if (num_funcs > (size_t)(INT_MAX - ex_data_class->num_reserved)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); CRYPTO_MUTEX_unlock_write(&ex_data_class->lock); return -1; } // Append |funcs| to the linked list. if (ex_data_class->last == NULL) { assert(num_funcs == 0); ex_data_class->funcs = funcs; ex_data_class->last = funcs; } else { ex_data_class->last->next = funcs; ex_data_class->last = funcs; } CRYPTO_atomic_store_u32(&ex_data_class->num_funcs, num_funcs + 1); CRYPTO_MUTEX_unlock_write(&ex_data_class->lock); return (int)num_funcs + ex_data_class->num_reserved; } int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val) { if (index < 0) { // A caller that can accidentally pass in an invalid index into this // function will hit an memory error if |index| happened to be valid, and // expected |val| to be of a different type. abort(); } if (ad->sk == NULL) { ad->sk = sk_void_new_null(); if (ad->sk == NULL) { return 0; } } // Add NULL values until the stack is long enough. for (size_t i = sk_void_num(ad->sk); i <= (size_t)index; i++) { if (!sk_void_push(ad->sk, NULL)) { return 0; } } sk_void_set(ad->sk, (size_t)index, val); return 1; } void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx) { if (ad->sk == NULL || idx < 0 || (size_t)idx >= sk_void_num(ad->sk)) { return NULL; } return sk_void_value(ad->sk, idx); } void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad) { ad->sk = NULL; } void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad) { if (ad->sk == NULL) { // Nothing to do. return; } uint32_t num_funcs = CRYPTO_atomic_load_u32(&ex_data_class->num_funcs); // |CRYPTO_get_ex_new_index_ex| will not allocate indices beyond |INT_MAX|. assert(num_funcs <= (size_t)(INT_MAX - ex_data_class->num_reserved)); // Defer dereferencing |ex_data_class->funcs| and |funcs->next|. It must come // after the |num_funcs| comparison to be correctly synchronized. CRYPTO_EX_DATA_FUNCS *const *funcs = &ex_data_class->funcs; for (uint32_t i = 0; i < num_funcs; i++) { if ((*funcs)->free_func != NULL) { int index = (int)i + ex_data_class->num_reserved; void *ptr = CRYPTO_get_ex_data(ad, index); (*funcs)->free_func(obj, ptr, ad, index, (*funcs)->argl, (*funcs)->argp); } funcs = &(*funcs)->next; } sk_void_free(ad->sk); ad->sk = NULL; } void CRYPTO_cleanup_all_ex_data(void) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/aes.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" // Be aware that different sets of AES functions use incompatible key // representations, varying in format of the key schedule, the |AES_KEY.rounds| // value, or both. Therefore they cannot mix. Also, on AArch64, the plain-C // code, above, is incompatible with the |aes_hw_*| functions. void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { if (hwaes_capable()) { aes_hw_encrypt(in, out, key); } else if (vpaes_capable()) { vpaes_encrypt(in, out, key); } else { aes_nohw_encrypt(in, out, key); } } void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { if (hwaes_capable()) { aes_hw_decrypt(in, out, key); } else if (vpaes_capable()) { vpaes_decrypt(in, out, key); } else { aes_nohw_decrypt(in, out, key); } } int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { if (bits != 128 && bits != 192 && bits != 256) { return -2; } if (hwaes_capable()) { return aes_hw_set_encrypt_key(key, bits, aeskey); } else if (vpaes_capable()) { return vpaes_set_encrypt_key(key, bits, aeskey); } else { return aes_nohw_set_encrypt_key(key, bits, aeskey); } } int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { if (bits != 128 && bits != 192 && bits != 256) { return -2; } if (hwaes_capable()) { return aes_hw_set_decrypt_key(key, bits, aeskey); } else if (vpaes_capable()) { return vpaes_set_decrypt_key(key, bits, aeskey); } else { return aes_nohw_set_decrypt_key(key, bits, aeskey); } } #if defined(HWAES) && (defined(OPENSSL_X86) || defined(OPENSSL_X86_64)) // On x86 and x86_64, |aes_hw_set_decrypt_key|, we implement // |aes_hw_encrypt_key_to_decrypt_key| in assembly and rely on C code to combine // the operations. int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { int ret = aes_hw_set_encrypt_key(user_key, bits, key); if (ret == 0) { aes_hw_encrypt_key_to_decrypt_key(key); } return ret; } int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { if (aes_hw_set_encrypt_key_alt_preferred()) { return aes_hw_set_encrypt_key_alt(user_key, bits, key); } else { return aes_hw_set_encrypt_key_base(user_key, bits, key); } } #endif #if defined(VPAES) && defined(OPENSSL_X86) // On x86, there is no |vpaes_ctr32_encrypt_blocks|, so we implement it // ourselves. This avoids all callers needing to account for a missing function. void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t iv[16]) { uint32_t ctr = CRYPTO_load_u32_be(iv + 12); uint8_t iv_buf[16], enc[16]; OPENSSL_memcpy(iv_buf, iv, 12); for (size_t i = 0; i < blocks; i++) { CRYPTO_store_u32_be(iv_buf + 12, ctr); vpaes_encrypt(iv_buf, enc, key); CRYPTO_xor16(out, in, enc); ctr++; in += 16; out += 16; } } #endif #if defined(BSAES) void vpaes_ctr32_encrypt_blocks_with_bsaes(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t ivec[16]) { // |bsaes_ctr32_encrypt_blocks| is faster than |vpaes_ctr32_encrypt_blocks|, // but it takes at least one full 8-block batch to amortize the conversion. if (blocks < 8) { vpaes_ctr32_encrypt_blocks(in, out, blocks, key, ivec); return; } size_t bsaes_blocks = blocks; if (bsaes_blocks % 8 < 6) { // |bsaes_ctr32_encrypt_blocks| internally works in 8-block batches. If the // final batch is too small (under six blocks), it is faster to loop over // |vpaes_encrypt|. Round |bsaes_blocks| down to a multiple of 8. bsaes_blocks -= bsaes_blocks % 8; } AES_KEY bsaes; vpaes_encrypt_key_to_bsaes(&bsaes, key); bsaes_ctr32_encrypt_blocks(in, out, bsaes_blocks, &bsaes, ivec); OPENSSL_cleanse(&bsaes, sizeof(bsaes)); in += 16 * bsaes_blocks; out += 16 * bsaes_blocks; blocks -= bsaes_blocks; uint8_t new_ivec[16]; memcpy(new_ivec, ivec, 12); uint32_t ctr = CRYPTO_load_u32_be(ivec + 12) + bsaes_blocks; CRYPTO_store_u32_be(new_ivec + 12, ctr); // Finish any remaining blocks with |vpaes_ctr32_encrypt_blocks|. vpaes_ctr32_encrypt_blocks(in, out, blocks, key, new_ivec); } #endif // BSAES ctr128_f aes_ctr_set_key(AES_KEY *aes_key, int *out_is_hwaes, block128_f *out_block, const uint8_t *key, size_t key_bytes) { // This function assumes the key length was previously validated. assert(key_bytes == 128 / 8 || key_bytes == 192 / 8 || key_bytes == 256 / 8); if (hwaes_capable()) { aes_hw_set_encrypt_key(key, (int)key_bytes * 8, aes_key); if (out_is_hwaes) { *out_is_hwaes = 1; } if (out_block) { *out_block = aes_hw_encrypt; } return aes_hw_ctr32_encrypt_blocks; } if (vpaes_capable()) { vpaes_set_encrypt_key(key, (int)key_bytes * 8, aes_key); if (out_block) { *out_block = vpaes_encrypt; } if (out_is_hwaes) { *out_is_hwaes = 0; } #if defined(BSAES) assert(bsaes_capable()); return vpaes_ctr32_encrypt_blocks_with_bsaes; #else return vpaes_ctr32_encrypt_blocks; #endif } aes_nohw_set_encrypt_key(key, (int)key_bytes * 8, aes_key); if (out_is_hwaes) { *out_is_hwaes = 0; } if (out_block) { *out_block = aes_nohw_encrypt; } return aes_nohw_ctr32_encrypt_blocks; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/aes_nohw.cc.inc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../../internal.h" #include "internal.h" #if defined(OPENSSL_SSE2) #include #endif // This file contains a constant-time implementation of AES, bitsliced with // 32-bit, 64-bit, or 128-bit words, operating on two-, four-, and eight-block // batches, respectively. The 128-bit implementation requires SSE2 intrinsics. // // This implementation is based on the algorithms described in the following // references: // - https://bearssl.org/constanttime.html#aes // - https://eprint.iacr.org/2009/129.pdf // - https://eprint.iacr.org/2009/191.pdf // Word operations. // // An aes_word_t is the word used for this AES implementation. Throughout this // file, bits and bytes are ordered little-endian, though "left" and "right" // shifts match the operations themselves, which makes them reversed in a // little-endian, left-to-right reading. // // Eight |aes_word_t|s contain |AES_NOHW_BATCH_SIZE| blocks. The bits in an // |aes_word_t| are divided into 16 consecutive groups of |AES_NOHW_BATCH_SIZE| // bits each, each corresponding to a byte in an AES block in column-major // order (AES's byte order). We refer to these as "logical bytes". Note, in the // 32-bit and 64-bit implementations, they are smaller than a byte. (The // contents of a logical byte will be described later.) // // MSVC does not support C bit operators on |__m128i|, so the wrapper functions // |aes_nohw_and|, etc., should be used instead. Note |aes_nohw_shift_left| and // |aes_nohw_shift_right| measure the shift in logical bytes. That is, the shift // value ranges from 0 to 15 independent of |aes_word_t| and // |AES_NOHW_BATCH_SIZE|. // // This ordering is different from https://eprint.iacr.org/2009/129.pdf, which // uses row-major order. Matching the AES order was easier to reason about, and // we do not have PSHUFB available to arbitrarily permute bytes. #if defined(OPENSSL_SSE2) typedef __m128i aes_word_t; // AES_NOHW_WORD_SIZE is sizeof(aes_word_t). alignas(sizeof(T)) does not work in // MSVC, so we define a constant. #define AES_NOHW_WORD_SIZE 16 #define AES_NOHW_BATCH_SIZE 8 #define AES_NOHW_ROW0_MASK \ _mm_set_epi32(0x000000ff, 0x000000ff, 0x000000ff, 0x000000ff) #define AES_NOHW_ROW1_MASK \ _mm_set_epi32(0x0000ff00, 0x0000ff00, 0x0000ff00, 0x0000ff00) #define AES_NOHW_ROW2_MASK \ _mm_set_epi32(0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000) #define AES_NOHW_ROW3_MASK \ _mm_set_epi32(0xff000000, 0xff000000, 0xff000000, 0xff000000) #define AES_NOHW_COL01_MASK \ _mm_set_epi32(0x00000000, 0x00000000, 0xffffffff, 0xffffffff) #define AES_NOHW_COL2_MASK \ _mm_set_epi32(0x00000000, 0xffffffff, 0x00000000, 0x00000000) #define AES_NOHW_COL3_MASK \ _mm_set_epi32(0xffffffff, 0x00000000, 0x00000000, 0x00000000) static inline aes_word_t aes_nohw_and(aes_word_t a, aes_word_t b) { return _mm_and_si128(a, b); } static inline aes_word_t aes_nohw_or(aes_word_t a, aes_word_t b) { return _mm_or_si128(a, b); } static inline aes_word_t aes_nohw_xor(aes_word_t a, aes_word_t b) { return _mm_xor_si128(a, b); } static inline aes_word_t aes_nohw_not(aes_word_t a) { return _mm_xor_si128( a, _mm_set_epi32(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff)); } // These are macros because parameters to |_mm_slli_si128| and |_mm_srli_si128| // must be constants. #define aes_nohw_shift_left(/* aes_word_t */ a, /* const */ i) \ _mm_slli_si128((a), (i)) #define aes_nohw_shift_right(/* aes_word_t */ a, /* const */ i) \ _mm_srli_si128((a), (i)) #else // !OPENSSL_SSE2 #if defined(OPENSSL_64_BIT) typedef uint64_t aes_word_t; #define AES_NOHW_WORD_SIZE 8 #define AES_NOHW_BATCH_SIZE 4 #define AES_NOHW_ROW0_MASK UINT64_C(0x000f000f000f000f) #define AES_NOHW_ROW1_MASK UINT64_C(0x00f000f000f000f0) #define AES_NOHW_ROW2_MASK UINT64_C(0x0f000f000f000f00) #define AES_NOHW_ROW3_MASK UINT64_C(0xf000f000f000f000) #define AES_NOHW_COL01_MASK UINT64_C(0x00000000ffffffff) #define AES_NOHW_COL2_MASK UINT64_C(0x0000ffff00000000) #define AES_NOHW_COL3_MASK UINT64_C(0xffff000000000000) #else // !OPENSSL_64_BIT typedef uint32_t aes_word_t; #define AES_NOHW_WORD_SIZE 4 #define AES_NOHW_BATCH_SIZE 2 #define AES_NOHW_ROW0_MASK 0x03030303 #define AES_NOHW_ROW1_MASK 0x0c0c0c0c #define AES_NOHW_ROW2_MASK 0x30303030 #define AES_NOHW_ROW3_MASK 0xc0c0c0c0 #define AES_NOHW_COL01_MASK 0x0000ffff #define AES_NOHW_COL2_MASK 0x00ff0000 #define AES_NOHW_COL3_MASK 0xff000000 #endif // OPENSSL_64_BIT static inline aes_word_t aes_nohw_and(aes_word_t a, aes_word_t b) { return a & b; } static inline aes_word_t aes_nohw_or(aes_word_t a, aes_word_t b) { return a | b; } static inline aes_word_t aes_nohw_xor(aes_word_t a, aes_word_t b) { return a ^ b; } static inline aes_word_t aes_nohw_not(aes_word_t a) { return ~a; } static inline aes_word_t aes_nohw_shift_left(aes_word_t a, aes_word_t i) { return a << (i * AES_NOHW_BATCH_SIZE); } static inline aes_word_t aes_nohw_shift_right(aes_word_t a, aes_word_t i) { return a >> (i * AES_NOHW_BATCH_SIZE); } #endif // OPENSSL_SSE2 static_assert(AES_NOHW_BATCH_SIZE * 128 == 8 * 8 * sizeof(aes_word_t), "batch size does not match word size"); static_assert(AES_NOHW_WORD_SIZE == sizeof(aes_word_t), "AES_NOHW_WORD_SIZE is incorrect"); // Block representations. // // This implementation uses three representations for AES blocks. First, the // public API represents blocks as uint8_t[16] in the usual way. Second, most // AES steps are evaluated in bitsliced form, stored in an |AES_NOHW_BATCH|. // This stores |AES_NOHW_BATCH_SIZE| blocks in bitsliced order. For 64-bit words // containing bitsliced blocks a, b, c, d, this would be as follows (vertical // bars divide logical bytes): // // batch.w[0] = a0 b0 c0 d0 | a8 b8 c8 d8 | a16 b16 c16 d16 ... // batch.w[1] = a1 b1 c1 d1 | a9 b9 c9 d9 | a17 b17 c17 d17 ... // batch.w[2] = a2 b2 c2 d2 | a10 b10 c10 d10 | a18 b18 c18 d18 ... // batch.w[3] = a3 b3 c3 d3 | a11 b11 c11 d11 | a19 b19 c19 d19 ... // ... // // Finally, an individual block may be stored as an intermediate form in an // aes_word_t[AES_NOHW_BLOCK_WORDS]. In this form, we permute the bits in each // block, so that block[0]'s ith logical byte contains least-significant // |AES_NOHW_BATCH_SIZE| bits of byte i, block[1] contains the next group of // |AES_NOHW_BATCH_SIZE| bits, and so on. We refer to this transformation as // "compacting" the block. Note this is no-op with 128-bit words because then // |AES_NOHW_BLOCK_WORDS| is one and |AES_NOHW_BATCH_SIZE| is eight. For 64-bit // words, one block would be stored in two words: // // block[0] = a0 a1 a2 a3 | a8 a9 a10 a11 | a16 a17 a18 a19 ... // block[1] = a4 a5 a6 a7 | a12 a13 a14 a15 | a20 a21 a22 a23 ... // // Observe that the distances between corresponding bits in bitsliced and // compact bit orders match. If we line up corresponding words of each block, // the bitsliced and compact representations may be converted by tranposing bits // in corresponding logical bytes. Continuing the 64-bit example: // // block_a[0] = a0 a1 a2 a3 | a8 a9 a10 a11 | a16 a17 a18 a19 ... // block_b[0] = b0 b1 b2 b3 | b8 b9 b10 b11 | b16 b17 b18 b19 ... // block_c[0] = c0 c1 c2 c3 | c8 c9 c10 c11 | c16 c17 c18 c19 ... // block_d[0] = d0 d1 d2 d3 | d8 d9 d10 d11 | d16 d17 d18 d19 ... // // batch.w[0] = a0 b0 c0 d0 | a8 b8 c8 d8 | a16 b16 c16 d16 ... // batch.w[1] = a1 b1 c1 d1 | a9 b9 c9 d9 | a17 b17 c17 d17 ... // batch.w[2] = a2 b2 c2 d2 | a10 b10 c10 d10 | a18 b18 c18 d18 ... // batch.w[3] = a3 b3 c3 d3 | a11 b11 c11 d11 | a19 b19 c19 d19 ... // // Note also that bitwise operations and (logical) byte permutations on an // |aes_word_t| work equally for the bitsliced and compact words. // // We use the compact form in the |AES_KEY| representation to save work // inflating round keys into |AES_NOHW_BATCH|. The compact form also exists // temporarily while moving blocks in or out of an |AES_NOHW_BATCH|, immediately // before or after |aes_nohw_transpose|. #define AES_NOHW_BLOCK_WORDS (16 / sizeof(aes_word_t)) // An AES_NOHW_BATCH stores |AES_NOHW_BATCH_SIZE| blocks. Unless otherwise // specified, it is in bitsliced form. typedef struct { aes_word_t w[8]; } AES_NOHW_BATCH; // An AES_NOHW_SCHEDULE is an expanded bitsliced AES key schedule. It is // suitable for encryption or decryption. It is as large as |AES_NOHW_BATCH| // |AES_KEY|s so it should not be used as a long-term key representation. typedef struct { // keys is an array of batches, one for each round key. Each batch stores // |AES_NOHW_BATCH_SIZE| copies of the round key in bitsliced form. AES_NOHW_BATCH keys[AES_MAXNR + 1]; } AES_NOHW_SCHEDULE; // aes_nohw_batch_set sets the |i|th block of |batch| to |in|. |batch| is in // compact form. static inline void aes_nohw_batch_set(AES_NOHW_BATCH *batch, const aes_word_t in[AES_NOHW_BLOCK_WORDS], size_t i) { // Note the words are interleaved. The order comes from |aes_nohw_transpose|. // If |i| is zero and this is the 64-bit implementation, in[0] contains bits // 0-3 and in[1] contains bits 4-7. We place in[0] at w[0] and in[1] at // w[4] so that bits 0 and 4 are in the correct position. (In general, bits // along diagonals of |AES_NOHW_BATCH_SIZE| by |AES_NOHW_BATCH_SIZE| squares // will be correctly placed.) assert(i < AES_NOHW_BATCH_SIZE); #if defined(OPENSSL_SSE2) batch->w[i] = in[0]; #elif defined(OPENSSL_64_BIT) batch->w[i] = in[0]; batch->w[i + 4] = in[1]; #else batch->w[i] = in[0]; batch->w[i + 2] = in[1]; batch->w[i + 4] = in[2]; batch->w[i + 6] = in[3]; #endif } // aes_nohw_batch_get writes the |i|th block of |batch| to |out|. |batch| is in // compact form. static inline void aes_nohw_batch_get(const AES_NOHW_BATCH *batch, aes_word_t out[AES_NOHW_BLOCK_WORDS], size_t i) { assert(i < AES_NOHW_BATCH_SIZE); #if defined(OPENSSL_SSE2) out[0] = batch->w[i]; #elif defined(OPENSSL_64_BIT) out[0] = batch->w[i]; out[1] = batch->w[i + 4]; #else out[0] = batch->w[i]; out[1] = batch->w[i + 2]; out[2] = batch->w[i + 4]; out[3] = batch->w[i + 6]; #endif } #if !defined(OPENSSL_SSE2) // aes_nohw_delta_swap returns |a| with bits |a & mask| and // |a & (mask << shift)| swapped. |mask| and |mask << shift| may not overlap. static inline aes_word_t aes_nohw_delta_swap(aes_word_t a, aes_word_t mask, aes_word_t shift) { // See // https://reflectionsonsecurity.wordpress.com/2014/05/11/efficient-bit-permutation-using-delta-swaps/ aes_word_t b = (a ^ (a >> shift)) & mask; return a ^ b ^ (b << shift); } // In the 32-bit and 64-bit implementations, a block spans multiple words. // |aes_nohw_compact_block| must permute bits across different words. First we // implement |aes_nohw_compact_word| which performs a smaller version of the // transformation which stays within a single word. // // These transformations are generalizations of the output of // http://programming.sirrida.de/calcperm.php on smaller inputs. #if defined(OPENSSL_64_BIT) static inline uint64_t aes_nohw_compact_word(uint64_t a) { // Numbering the 64/2 = 16 4-bit chunks, least to most significant, we swap // quartets of those chunks: // 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 => // 0 2 1 3 | 4 6 5 7 | 8 10 9 11 | 12 14 13 15 a = aes_nohw_delta_swap(a, UINT64_C(0x00f000f000f000f0), 4); // Swap quartets of 8-bit chunks (still numbering by 4-bit chunks): // 0 2 1 3 | 4 6 5 7 | 8 10 9 11 | 12 14 13 15 => // 0 2 4 6 | 1 3 5 7 | 8 10 12 14 | 9 11 13 15 a = aes_nohw_delta_swap(a, UINT64_C(0x0000ff000000ff00), 8); // Swap quartets of 16-bit chunks (still numbering by 4-bit chunks): // 0 2 4 6 | 1 3 5 7 | 8 10 12 14 | 9 11 13 15 => // 0 2 4 6 | 8 10 12 14 | 1 3 5 7 | 9 11 13 15 a = aes_nohw_delta_swap(a, UINT64_C(0x00000000ffff0000), 16); return a; } static inline uint64_t aes_nohw_uncompact_word(uint64_t a) { // Reverse the steps of |aes_nohw_uncompact_word|. a = aes_nohw_delta_swap(a, UINT64_C(0x00000000ffff0000), 16); a = aes_nohw_delta_swap(a, UINT64_C(0x0000ff000000ff00), 8); a = aes_nohw_delta_swap(a, UINT64_C(0x00f000f000f000f0), 4); return a; } #else // !OPENSSL_64_BIT static inline uint32_t aes_nohw_compact_word(uint32_t a) { // Numbering the 32/2 = 16 pairs of bits, least to most significant, we swap: // 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 => // 0 4 2 6 | 1 5 3 7 | 8 12 10 14 | 9 13 11 15 // Note: 0x00cc = 0b0000_0000_1100_1100 // 0x00cc << 6 = 0b0011_0011_0000_0000 a = aes_nohw_delta_swap(a, 0x00cc00cc, 6); // Now we swap groups of four bits (still numbering by pairs): // 0 4 2 6 | 1 5 3 7 | 8 12 10 14 | 9 13 11 15 => // 0 4 8 12 | 1 5 9 13 | 2 6 10 14 | 3 7 11 15 // Note: 0x0000_f0f0 << 12 = 0x0f0f_0000 a = aes_nohw_delta_swap(a, 0x0000f0f0, 12); return a; } static inline uint32_t aes_nohw_uncompact_word(uint32_t a) { // Reverse the steps of |aes_nohw_uncompact_word|. a = aes_nohw_delta_swap(a, 0x0000f0f0, 12); a = aes_nohw_delta_swap(a, 0x00cc00cc, 6); return a; } static inline uint32_t aes_nohw_word_from_bytes(uint8_t a0, uint8_t a1, uint8_t a2, uint8_t a3) { return (uint32_t)a0 | ((uint32_t)a1 << 8) | ((uint32_t)a2 << 16) | ((uint32_t)a3 << 24); } #endif // OPENSSL_64_BIT #endif // !OPENSSL_SSE2 static inline void aes_nohw_compact_block(aes_word_t out[AES_NOHW_BLOCK_WORDS], const uint8_t in[16]) { memcpy(out, in, 16); #if defined(OPENSSL_SSE2) // No conversions needed. #elif defined(OPENSSL_64_BIT) uint64_t a0 = aes_nohw_compact_word(out[0]); uint64_t a1 = aes_nohw_compact_word(out[1]); out[0] = (a0 & UINT64_C(0x00000000ffffffff)) | (a1 << 32); out[1] = (a1 & UINT64_C(0xffffffff00000000)) | (a0 >> 32); #else uint32_t a0 = aes_nohw_compact_word(out[0]); uint32_t a1 = aes_nohw_compact_word(out[1]); uint32_t a2 = aes_nohw_compact_word(out[2]); uint32_t a3 = aes_nohw_compact_word(out[3]); // Note clang, when building for ARM Thumb2, will sometimes miscompile // expressions such as (a0 & 0x0000ff00) << 8, particularly when building // without optimizations. This bug was introduced in // https://reviews.llvm.org/rL340261 and fixed in // https://reviews.llvm.org/rL351310. The following is written to avoid this. out[0] = aes_nohw_word_from_bytes(a0, a1, a2, a3); out[1] = aes_nohw_word_from_bytes(a0 >> 8, a1 >> 8, a2 >> 8, a3 >> 8); out[2] = aes_nohw_word_from_bytes(a0 >> 16, a1 >> 16, a2 >> 16, a3 >> 16); out[3] = aes_nohw_word_from_bytes(a0 >> 24, a1 >> 24, a2 >> 24, a3 >> 24); #endif } static inline void aes_nohw_uncompact_block( uint8_t out[16], const aes_word_t in[AES_NOHW_BLOCK_WORDS]) { #if defined(OPENSSL_SSE2) memcpy(out, in, 16); // No conversions needed. #elif defined(OPENSSL_64_BIT) uint64_t a0 = in[0]; uint64_t a1 = in[1]; uint64_t b0 = aes_nohw_uncompact_word((a0 & UINT64_C(0x00000000ffffffff)) | (a1 << 32)); uint64_t b1 = aes_nohw_uncompact_word((a1 & UINT64_C(0xffffffff00000000)) | (a0 >> 32)); memcpy(out, &b0, 8); memcpy(out + 8, &b1, 8); #else uint32_t a0 = in[0]; uint32_t a1 = in[1]; uint32_t a2 = in[2]; uint32_t a3 = in[3]; // Note clang, when building for ARM Thumb2, will sometimes miscompile // expressions such as (a0 & 0x0000ff00) << 8, particularly when building // without optimizations. This bug was introduced in // https://reviews.llvm.org/rL340261 and fixed in // https://reviews.llvm.org/rL351310. The following is written to avoid this. uint32_t b0 = aes_nohw_word_from_bytes(a0, a1, a2, a3); uint32_t b1 = aes_nohw_word_from_bytes(a0 >> 8, a1 >> 8, a2 >> 8, a3 >> 8); uint32_t b2 = aes_nohw_word_from_bytes(a0 >> 16, a1 >> 16, a2 >> 16, a3 >> 16); uint32_t b3 = aes_nohw_word_from_bytes(a0 >> 24, a1 >> 24, a2 >> 24, a3 >> 24); b0 = aes_nohw_uncompact_word(b0); b1 = aes_nohw_uncompact_word(b1); b2 = aes_nohw_uncompact_word(b2); b3 = aes_nohw_uncompact_word(b3); memcpy(out, &b0, 4); memcpy(out + 4, &b1, 4); memcpy(out + 8, &b2, 4); memcpy(out + 12, &b3, 4); #endif } // aes_nohw_swap_bits is a variation on a delta swap. It swaps the bits in // |*a & (mask << shift)| with the bits in |*b & mask|. |mask| and // |mask << shift| must not overlap. |mask| is specified as a |uint32_t|, but it // is repeated to the full width of |aes_word_t|. #if defined(OPENSSL_SSE2) // This must be a macro because |_mm_srli_epi32| and |_mm_slli_epi32| require // constant shift values. #define aes_nohw_swap_bits(/*__m128i* */ a, /*__m128i* */ b, \ /* uint32_t */ mask, /* const */ shift) \ do { \ __m128i swap = \ _mm_and_si128(_mm_xor_si128(_mm_srli_epi32(*(a), (shift)), *(b)), \ _mm_set_epi32((mask), (mask), (mask), (mask))); \ *(a) = _mm_xor_si128(*(a), _mm_slli_epi32(swap, (shift))); \ *(b) = _mm_xor_si128(*(b), swap); \ \ } while (0) #else static inline void aes_nohw_swap_bits(aes_word_t *a, aes_word_t *b, uint32_t mask, aes_word_t shift) { #if defined(OPENSSL_64_BIT) aes_word_t mask_w = (((uint64_t)mask) << 32) | mask; #else aes_word_t mask_w = mask; #endif // This is a variation on a delta swap. aes_word_t swap = ((*a >> shift) ^ *b) & mask_w; *a ^= swap << shift; *b ^= swap; } #endif // OPENSSL_SSE2 // aes_nohw_transpose converts |batch| to and from bitsliced form. It divides // the 8 × word_size bits into AES_NOHW_BATCH_SIZE × AES_NOHW_BATCH_SIZE squares // and transposes each square. static void aes_nohw_transpose(AES_NOHW_BATCH *batch) { // Swap bits with index 0 and 1 mod 2 (0x55 = 0b01010101). aes_nohw_swap_bits(&batch->w[0], &batch->w[1], 0x55555555, 1); aes_nohw_swap_bits(&batch->w[2], &batch->w[3], 0x55555555, 1); aes_nohw_swap_bits(&batch->w[4], &batch->w[5], 0x55555555, 1); aes_nohw_swap_bits(&batch->w[6], &batch->w[7], 0x55555555, 1); #if AES_NOHW_BATCH_SIZE >= 4 // Swap bits with index 0-1 and 2-3 mod 4 (0x33 = 0b00110011). aes_nohw_swap_bits(&batch->w[0], &batch->w[2], 0x33333333, 2); aes_nohw_swap_bits(&batch->w[1], &batch->w[3], 0x33333333, 2); aes_nohw_swap_bits(&batch->w[4], &batch->w[6], 0x33333333, 2); aes_nohw_swap_bits(&batch->w[5], &batch->w[7], 0x33333333, 2); #endif #if AES_NOHW_BATCH_SIZE >= 8 // Swap bits with index 0-3 and 4-7 mod 8 (0x0f = 0b00001111). aes_nohw_swap_bits(&batch->w[0], &batch->w[4], 0x0f0f0f0f, 4); aes_nohw_swap_bits(&batch->w[1], &batch->w[5], 0x0f0f0f0f, 4); aes_nohw_swap_bits(&batch->w[2], &batch->w[6], 0x0f0f0f0f, 4); aes_nohw_swap_bits(&batch->w[3], &batch->w[7], 0x0f0f0f0f, 4); #endif } // aes_nohw_to_batch initializes |out| with the |num_blocks| blocks from |in|. // |num_blocks| must be at most |AES_NOHW_BATCH|. static void aes_nohw_to_batch(AES_NOHW_BATCH *out, const uint8_t *in, size_t num_blocks) { // Don't leave unused blocks uninitialized. memset(out, 0, sizeof(AES_NOHW_BATCH)); assert(num_blocks <= AES_NOHW_BATCH_SIZE); for (size_t i = 0; i < num_blocks; i++) { aes_word_t block[AES_NOHW_BLOCK_WORDS]; aes_nohw_compact_block(block, in + 16 * i); aes_nohw_batch_set(out, block, i); } aes_nohw_transpose(out); } // aes_nohw_to_batch writes the first |num_blocks| blocks in |batch| to |out|. // |num_blocks| must be at most |AES_NOHW_BATCH|. static void aes_nohw_from_batch(uint8_t *out, size_t num_blocks, const AES_NOHW_BATCH *batch) { AES_NOHW_BATCH copy = *batch; aes_nohw_transpose(©); assert(num_blocks <= AES_NOHW_BATCH_SIZE); for (size_t i = 0; i < num_blocks; i++) { aes_word_t block[AES_NOHW_BLOCK_WORDS]; aes_nohw_batch_get(©, block, i); aes_nohw_uncompact_block(out + 16 * i, block); } } // AES round steps. static void aes_nohw_add_round_key(AES_NOHW_BATCH *batch, const AES_NOHW_BATCH *key) { for (size_t i = 0; i < 8; i++) { batch->w[i] = aes_nohw_xor(batch->w[i], key->w[i]); } } static void aes_nohw_sub_bytes(AES_NOHW_BATCH *batch) { // See https://eprint.iacr.org/2009/191.pdf, Appendix C. aes_word_t x0 = batch->w[7]; aes_word_t x1 = batch->w[6]; aes_word_t x2 = batch->w[5]; aes_word_t x3 = batch->w[4]; aes_word_t x4 = batch->w[3]; aes_word_t x5 = batch->w[2]; aes_word_t x6 = batch->w[1]; aes_word_t x7 = batch->w[0]; // Figure 2, the top linear transformation. aes_word_t y14 = aes_nohw_xor(x3, x5); aes_word_t y13 = aes_nohw_xor(x0, x6); aes_word_t y9 = aes_nohw_xor(x0, x3); aes_word_t y8 = aes_nohw_xor(x0, x5); aes_word_t t0 = aes_nohw_xor(x1, x2); aes_word_t y1 = aes_nohw_xor(t0, x7); aes_word_t y4 = aes_nohw_xor(y1, x3); aes_word_t y12 = aes_nohw_xor(y13, y14); aes_word_t y2 = aes_nohw_xor(y1, x0); aes_word_t y5 = aes_nohw_xor(y1, x6); aes_word_t y3 = aes_nohw_xor(y5, y8); aes_word_t t1 = aes_nohw_xor(x4, y12); aes_word_t y15 = aes_nohw_xor(t1, x5); aes_word_t y20 = aes_nohw_xor(t1, x1); aes_word_t y6 = aes_nohw_xor(y15, x7); aes_word_t y10 = aes_nohw_xor(y15, t0); aes_word_t y11 = aes_nohw_xor(y20, y9); aes_word_t y7 = aes_nohw_xor(x7, y11); aes_word_t y17 = aes_nohw_xor(y10, y11); aes_word_t y19 = aes_nohw_xor(y10, y8); aes_word_t y16 = aes_nohw_xor(t0, y11); aes_word_t y21 = aes_nohw_xor(y13, y16); aes_word_t y18 = aes_nohw_xor(x0, y16); // Figure 3, the middle non-linear section. aes_word_t t2 = aes_nohw_and(y12, y15); aes_word_t t3 = aes_nohw_and(y3, y6); aes_word_t t4 = aes_nohw_xor(t3, t2); aes_word_t t5 = aes_nohw_and(y4, x7); aes_word_t t6 = aes_nohw_xor(t5, t2); aes_word_t t7 = aes_nohw_and(y13, y16); aes_word_t t8 = aes_nohw_and(y5, y1); aes_word_t t9 = aes_nohw_xor(t8, t7); aes_word_t t10 = aes_nohw_and(y2, y7); aes_word_t t11 = aes_nohw_xor(t10, t7); aes_word_t t12 = aes_nohw_and(y9, y11); aes_word_t t13 = aes_nohw_and(y14, y17); aes_word_t t14 = aes_nohw_xor(t13, t12); aes_word_t t15 = aes_nohw_and(y8, y10); aes_word_t t16 = aes_nohw_xor(t15, t12); aes_word_t t17 = aes_nohw_xor(t4, t14); aes_word_t t18 = aes_nohw_xor(t6, t16); aes_word_t t19 = aes_nohw_xor(t9, t14); aes_word_t t20 = aes_nohw_xor(t11, t16); aes_word_t t21 = aes_nohw_xor(t17, y20); aes_word_t t22 = aes_nohw_xor(t18, y19); aes_word_t t23 = aes_nohw_xor(t19, y21); aes_word_t t24 = aes_nohw_xor(t20, y18); aes_word_t t25 = aes_nohw_xor(t21, t22); aes_word_t t26 = aes_nohw_and(t21, t23); aes_word_t t27 = aes_nohw_xor(t24, t26); aes_word_t t28 = aes_nohw_and(t25, t27); aes_word_t t29 = aes_nohw_xor(t28, t22); aes_word_t t30 = aes_nohw_xor(t23, t24); aes_word_t t31 = aes_nohw_xor(t22, t26); aes_word_t t32 = aes_nohw_and(t31, t30); aes_word_t t33 = aes_nohw_xor(t32, t24); aes_word_t t34 = aes_nohw_xor(t23, t33); aes_word_t t35 = aes_nohw_xor(t27, t33); aes_word_t t36 = aes_nohw_and(t24, t35); aes_word_t t37 = aes_nohw_xor(t36, t34); aes_word_t t38 = aes_nohw_xor(t27, t36); aes_word_t t39 = aes_nohw_and(t29, t38); aes_word_t t40 = aes_nohw_xor(t25, t39); aes_word_t t41 = aes_nohw_xor(t40, t37); aes_word_t t42 = aes_nohw_xor(t29, t33); aes_word_t t43 = aes_nohw_xor(t29, t40); aes_word_t t44 = aes_nohw_xor(t33, t37); aes_word_t t45 = aes_nohw_xor(t42, t41); aes_word_t z0 = aes_nohw_and(t44, y15); aes_word_t z1 = aes_nohw_and(t37, y6); aes_word_t z2 = aes_nohw_and(t33, x7); aes_word_t z3 = aes_nohw_and(t43, y16); aes_word_t z4 = aes_nohw_and(t40, y1); aes_word_t z5 = aes_nohw_and(t29, y7); aes_word_t z6 = aes_nohw_and(t42, y11); aes_word_t z7 = aes_nohw_and(t45, y17); aes_word_t z8 = aes_nohw_and(t41, y10); aes_word_t z9 = aes_nohw_and(t44, y12); aes_word_t z10 = aes_nohw_and(t37, y3); aes_word_t z11 = aes_nohw_and(t33, y4); aes_word_t z12 = aes_nohw_and(t43, y13); aes_word_t z13 = aes_nohw_and(t40, y5); aes_word_t z14 = aes_nohw_and(t29, y2); aes_word_t z15 = aes_nohw_and(t42, y9); aes_word_t z16 = aes_nohw_and(t45, y14); aes_word_t z17 = aes_nohw_and(t41, y8); // Figure 4, bottom linear transformation. aes_word_t t46 = aes_nohw_xor(z15, z16); aes_word_t t47 = aes_nohw_xor(z10, z11); aes_word_t t48 = aes_nohw_xor(z5, z13); aes_word_t t49 = aes_nohw_xor(z9, z10); aes_word_t t50 = aes_nohw_xor(z2, z12); aes_word_t t51 = aes_nohw_xor(z2, z5); aes_word_t t52 = aes_nohw_xor(z7, z8); aes_word_t t53 = aes_nohw_xor(z0, z3); aes_word_t t54 = aes_nohw_xor(z6, z7); aes_word_t t55 = aes_nohw_xor(z16, z17); aes_word_t t56 = aes_nohw_xor(z12, t48); aes_word_t t57 = aes_nohw_xor(t50, t53); aes_word_t t58 = aes_nohw_xor(z4, t46); aes_word_t t59 = aes_nohw_xor(z3, t54); aes_word_t t60 = aes_nohw_xor(t46, t57); aes_word_t t61 = aes_nohw_xor(z14, t57); aes_word_t t62 = aes_nohw_xor(t52, t58); aes_word_t t63 = aes_nohw_xor(t49, t58); aes_word_t t64 = aes_nohw_xor(z4, t59); aes_word_t t65 = aes_nohw_xor(t61, t62); aes_word_t t66 = aes_nohw_xor(z1, t63); aes_word_t s0 = aes_nohw_xor(t59, t63); aes_word_t s6 = aes_nohw_xor(t56, aes_nohw_not(t62)); aes_word_t s7 = aes_nohw_xor(t48, aes_nohw_not(t60)); aes_word_t t67 = aes_nohw_xor(t64, t65); aes_word_t s3 = aes_nohw_xor(t53, t66); aes_word_t s4 = aes_nohw_xor(t51, t66); aes_word_t s5 = aes_nohw_xor(t47, t65); aes_word_t s1 = aes_nohw_xor(t64, aes_nohw_not(s3)); aes_word_t s2 = aes_nohw_xor(t55, aes_nohw_not(t67)); batch->w[0] = s7; batch->w[1] = s6; batch->w[2] = s5; batch->w[3] = s4; batch->w[4] = s3; batch->w[5] = s2; batch->w[6] = s1; batch->w[7] = s0; } // aes_nohw_sub_bytes_inv_affine inverts the affine transform portion of the AES // S-box, defined in FIPS PUB 197, section 5.1.1, step 2. static void aes_nohw_sub_bytes_inv_affine(AES_NOHW_BATCH *batch) { aes_word_t a0 = batch->w[0]; aes_word_t a1 = batch->w[1]; aes_word_t a2 = batch->w[2]; aes_word_t a3 = batch->w[3]; aes_word_t a4 = batch->w[4]; aes_word_t a5 = batch->w[5]; aes_word_t a6 = batch->w[6]; aes_word_t a7 = batch->w[7]; // Apply the circulant [0 0 1 0 0 1 0 1]. This is the inverse of the circulant // [1 0 0 0 1 1 1 1]. aes_word_t b0 = aes_nohw_xor(a2, aes_nohw_xor(a5, a7)); aes_word_t b1 = aes_nohw_xor(a3, aes_nohw_xor(a6, a0)); aes_word_t b2 = aes_nohw_xor(a4, aes_nohw_xor(a7, a1)); aes_word_t b3 = aes_nohw_xor(a5, aes_nohw_xor(a0, a2)); aes_word_t b4 = aes_nohw_xor(a6, aes_nohw_xor(a1, a3)); aes_word_t b5 = aes_nohw_xor(a7, aes_nohw_xor(a2, a4)); aes_word_t b6 = aes_nohw_xor(a0, aes_nohw_xor(a3, a5)); aes_word_t b7 = aes_nohw_xor(a1, aes_nohw_xor(a4, a6)); // XOR 0x05. Equivalently, we could XOR 0x63 before applying the circulant, // but 0x05 has lower Hamming weight. (0x05 is the circulant applied to 0x63.) batch->w[0] = aes_nohw_not(b0); batch->w[1] = b1; batch->w[2] = aes_nohw_not(b2); batch->w[3] = b3; batch->w[4] = b4; batch->w[5] = b5; batch->w[6] = b6; batch->w[7] = b7; } static void aes_nohw_inv_sub_bytes(AES_NOHW_BATCH *batch) { // We implement the inverse S-box using the forwards implementation with the // technique described in https://www.bearssl.org/constanttime.html#aes. // // The forwards S-box inverts its input and applies an affine transformation: // S(x) = A(Inv(x)). Thus Inv(x) = InvA(S(x)). The inverse S-box is then: // // InvS(x) = Inv(InvA(x)). // = InvA(S(InvA(x))) aes_nohw_sub_bytes_inv_affine(batch); aes_nohw_sub_bytes(batch); aes_nohw_sub_bytes_inv_affine(batch); } // aes_nohw_rotate_cols_right returns |v| with the columns in each row rotated // to the right by |n|. This is a macro because |aes_nohw_shift_*| require // constant shift counts in the SSE2 implementation. #define aes_nohw_rotate_cols_right(/* aes_word_t */ v, /* const */ n) \ (aes_nohw_or(aes_nohw_shift_right((v), (n)*4), \ aes_nohw_shift_left((v), 16 - (n)*4))) static void aes_nohw_shift_rows(AES_NOHW_BATCH *batch) { for (size_t i = 0; i < 8; i++) { aes_word_t row0 = aes_nohw_and(batch->w[i], AES_NOHW_ROW0_MASK); aes_word_t row1 = aes_nohw_and(batch->w[i], AES_NOHW_ROW1_MASK); aes_word_t row2 = aes_nohw_and(batch->w[i], AES_NOHW_ROW2_MASK); aes_word_t row3 = aes_nohw_and(batch->w[i], AES_NOHW_ROW3_MASK); row1 = aes_nohw_rotate_cols_right(row1, 1); row2 = aes_nohw_rotate_cols_right(row2, 2); row3 = aes_nohw_rotate_cols_right(row3, 3); batch->w[i] = aes_nohw_or(aes_nohw_or(row0, row1), aes_nohw_or(row2, row3)); } } static void aes_nohw_inv_shift_rows(AES_NOHW_BATCH *batch) { for (size_t i = 0; i < 8; i++) { aes_word_t row0 = aes_nohw_and(batch->w[i], AES_NOHW_ROW0_MASK); aes_word_t row1 = aes_nohw_and(batch->w[i], AES_NOHW_ROW1_MASK); aes_word_t row2 = aes_nohw_and(batch->w[i], AES_NOHW_ROW2_MASK); aes_word_t row3 = aes_nohw_and(batch->w[i], AES_NOHW_ROW3_MASK); row1 = aes_nohw_rotate_cols_right(row1, 3); row2 = aes_nohw_rotate_cols_right(row2, 2); row3 = aes_nohw_rotate_cols_right(row3, 1); batch->w[i] = aes_nohw_or(aes_nohw_or(row0, row1), aes_nohw_or(row2, row3)); } } // aes_nohw_rotate_rows_down returns |v| with the rows in each column rotated // down by one. static inline aes_word_t aes_nohw_rotate_rows_down(aes_word_t v) { #if defined(OPENSSL_SSE2) return _mm_or_si128(_mm_srli_epi32(v, 8), _mm_slli_epi32(v, 24)); #elif defined(OPENSSL_64_BIT) return ((v >> 4) & UINT64_C(0x0fff0fff0fff0fff)) | ((v << 12) & UINT64_C(0xf000f000f000f000)); #else return ((v >> 2) & 0x3f3f3f3f) | ((v << 6) & 0xc0c0c0c0); #endif } // aes_nohw_rotate_rows_twice returns |v| with the rows in each column rotated // by two. static inline aes_word_t aes_nohw_rotate_rows_twice(aes_word_t v) { #if defined(OPENSSL_SSE2) return _mm_or_si128(_mm_srli_epi32(v, 16), _mm_slli_epi32(v, 16)); #elif defined(OPENSSL_64_BIT) return ((v >> 8) & UINT64_C(0x00ff00ff00ff00ff)) | ((v << 8) & UINT64_C(0xff00ff00ff00ff00)); #else return ((v >> 4) & 0x0f0f0f0f) | ((v << 4) & 0xf0f0f0f0); #endif } static void aes_nohw_mix_columns(AES_NOHW_BATCH *batch) { // See https://eprint.iacr.org/2009/129.pdf, section 4.4 and appendix A. aes_word_t a0 = batch->w[0]; aes_word_t a1 = batch->w[1]; aes_word_t a2 = batch->w[2]; aes_word_t a3 = batch->w[3]; aes_word_t a4 = batch->w[4]; aes_word_t a5 = batch->w[5]; aes_word_t a6 = batch->w[6]; aes_word_t a7 = batch->w[7]; aes_word_t r0 = aes_nohw_rotate_rows_down(a0); aes_word_t a0_r0 = aes_nohw_xor(a0, r0); aes_word_t r1 = aes_nohw_rotate_rows_down(a1); aes_word_t a1_r1 = aes_nohw_xor(a1, r1); aes_word_t r2 = aes_nohw_rotate_rows_down(a2); aes_word_t a2_r2 = aes_nohw_xor(a2, r2); aes_word_t r3 = aes_nohw_rotate_rows_down(a3); aes_word_t a3_r3 = aes_nohw_xor(a3, r3); aes_word_t r4 = aes_nohw_rotate_rows_down(a4); aes_word_t a4_r4 = aes_nohw_xor(a4, r4); aes_word_t r5 = aes_nohw_rotate_rows_down(a5); aes_word_t a5_r5 = aes_nohw_xor(a5, r5); aes_word_t r6 = aes_nohw_rotate_rows_down(a6); aes_word_t a6_r6 = aes_nohw_xor(a6, r6); aes_word_t r7 = aes_nohw_rotate_rows_down(a7); aes_word_t a7_r7 = aes_nohw_xor(a7, r7); batch->w[0] = aes_nohw_xor(aes_nohw_xor(a7_r7, r0), aes_nohw_rotate_rows_twice(a0_r0)); batch->w[1] = aes_nohw_xor(aes_nohw_xor(a0_r0, a7_r7), aes_nohw_xor(r1, aes_nohw_rotate_rows_twice(a1_r1))); batch->w[2] = aes_nohw_xor(aes_nohw_xor(a1_r1, r2), aes_nohw_rotate_rows_twice(a2_r2)); batch->w[3] = aes_nohw_xor(aes_nohw_xor(a2_r2, a7_r7), aes_nohw_xor(r3, aes_nohw_rotate_rows_twice(a3_r3))); batch->w[4] = aes_nohw_xor(aes_nohw_xor(a3_r3, a7_r7), aes_nohw_xor(r4, aes_nohw_rotate_rows_twice(a4_r4))); batch->w[5] = aes_nohw_xor(aes_nohw_xor(a4_r4, r5), aes_nohw_rotate_rows_twice(a5_r5)); batch->w[6] = aes_nohw_xor(aes_nohw_xor(a5_r5, r6), aes_nohw_rotate_rows_twice(a6_r6)); batch->w[7] = aes_nohw_xor(aes_nohw_xor(a6_r6, r7), aes_nohw_rotate_rows_twice(a7_r7)); } static void aes_nohw_inv_mix_columns(AES_NOHW_BATCH *batch) { aes_word_t a0 = batch->w[0]; aes_word_t a1 = batch->w[1]; aes_word_t a2 = batch->w[2]; aes_word_t a3 = batch->w[3]; aes_word_t a4 = batch->w[4]; aes_word_t a5 = batch->w[5]; aes_word_t a6 = batch->w[6]; aes_word_t a7 = batch->w[7]; // bsaes-x86_64.pl describes the following decomposition of the inverse // MixColumns matrix, credited to Jussi Kivilinna. This gives a much simpler // multiplication. // // | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 | // | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 | // | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 | // | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 | // // First, apply the [5 0 4 0] matrix. Multiplying by 4 in F_(2^8) is described // by the following bit equations: // // b0 = a6 // b1 = a6 ^ a7 // b2 = a0 ^ a7 // b3 = a1 ^ a6 // b4 = a2 ^ a6 ^ a7 // b5 = a3 ^ a7 // b6 = a4 // b7 = a5 // // Each coefficient is given by: // // b_ij = 05·a_ij ⊕ 04·a_i(j+2) = 04·(a_ij ⊕ a_i(j+2)) ⊕ a_ij // // We combine the two equations below. Note a_i(j+2) is a row rotation. aes_word_t a0_r0 = aes_nohw_xor(a0, aes_nohw_rotate_rows_twice(a0)); aes_word_t a1_r1 = aes_nohw_xor(a1, aes_nohw_rotate_rows_twice(a1)); aes_word_t a2_r2 = aes_nohw_xor(a2, aes_nohw_rotate_rows_twice(a2)); aes_word_t a3_r3 = aes_nohw_xor(a3, aes_nohw_rotate_rows_twice(a3)); aes_word_t a4_r4 = aes_nohw_xor(a4, aes_nohw_rotate_rows_twice(a4)); aes_word_t a5_r5 = aes_nohw_xor(a5, aes_nohw_rotate_rows_twice(a5)); aes_word_t a6_r6 = aes_nohw_xor(a6, aes_nohw_rotate_rows_twice(a6)); aes_word_t a7_r7 = aes_nohw_xor(a7, aes_nohw_rotate_rows_twice(a7)); batch->w[0] = aes_nohw_xor(a0, a6_r6); batch->w[1] = aes_nohw_xor(a1, aes_nohw_xor(a6_r6, a7_r7)); batch->w[2] = aes_nohw_xor(a2, aes_nohw_xor(a0_r0, a7_r7)); batch->w[3] = aes_nohw_xor(a3, aes_nohw_xor(a1_r1, a6_r6)); batch->w[4] = aes_nohw_xor(aes_nohw_xor(a4, a2_r2), aes_nohw_xor(a6_r6, a7_r7)); batch->w[5] = aes_nohw_xor(a5, aes_nohw_xor(a3_r3, a7_r7)); batch->w[6] = aes_nohw_xor(a6, a4_r4); batch->w[7] = aes_nohw_xor(a7, a5_r5); // Apply the [02 03 01 01] matrix, which is just MixColumns. aes_nohw_mix_columns(batch); } static void aes_nohw_encrypt_batch(const AES_NOHW_SCHEDULE *key, size_t num_rounds, AES_NOHW_BATCH *batch) { aes_nohw_add_round_key(batch, &key->keys[0]); for (size_t i = 1; i < num_rounds; i++) { aes_nohw_sub_bytes(batch); aes_nohw_shift_rows(batch); aes_nohw_mix_columns(batch); aes_nohw_add_round_key(batch, &key->keys[i]); } aes_nohw_sub_bytes(batch); aes_nohw_shift_rows(batch); aes_nohw_add_round_key(batch, &key->keys[num_rounds]); } static void aes_nohw_decrypt_batch(const AES_NOHW_SCHEDULE *key, size_t num_rounds, AES_NOHW_BATCH *batch) { aes_nohw_add_round_key(batch, &key->keys[num_rounds]); aes_nohw_inv_shift_rows(batch); aes_nohw_inv_sub_bytes(batch); for (size_t i = num_rounds - 1; i > 0; i--) { aes_nohw_add_round_key(batch, &key->keys[i]); aes_nohw_inv_mix_columns(batch); aes_nohw_inv_shift_rows(batch); aes_nohw_inv_sub_bytes(batch); } aes_nohw_add_round_key(batch, &key->keys[0]); } // Key schedule. static void aes_nohw_expand_round_keys(AES_NOHW_SCHEDULE *out, const AES_KEY *key) { for (size_t i = 0; i <= key->rounds; i++) { // Copy the round key into each block in the batch. for (size_t j = 0; j < AES_NOHW_BATCH_SIZE; j++) { aes_word_t tmp[AES_NOHW_BLOCK_WORDS]; memcpy(tmp, key->rd_key + 4 * i, 16); aes_nohw_batch_set(&out->keys[i], tmp, j); } aes_nohw_transpose(&out->keys[i]); } } static const uint8_t aes_nohw_rcon[10] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36}; // aes_nohw_rcon_slice returns the |i|th group of |AES_NOHW_BATCH_SIZE| bits in // |rcon|, stored in a |aes_word_t|. static inline aes_word_t aes_nohw_rcon_slice(uint8_t rcon, size_t i) { rcon = (rcon >> (i * AES_NOHW_BATCH_SIZE)) & ((1 << AES_NOHW_BATCH_SIZE) - 1); #if defined(OPENSSL_SSE2) return _mm_set_epi32(0, 0, 0, rcon); #else return ((aes_word_t)rcon); #endif } static void aes_nohw_sub_block(aes_word_t out[AES_NOHW_BLOCK_WORDS], const aes_word_t in[AES_NOHW_BLOCK_WORDS]) { AES_NOHW_BATCH batch; memset(&batch, 0, sizeof(batch)); aes_nohw_batch_set(&batch, in, 0); aes_nohw_transpose(&batch); aes_nohw_sub_bytes(&batch); aes_nohw_transpose(&batch); aes_nohw_batch_get(&batch, out, 0); } static void aes_nohw_setup_key_128(AES_KEY *key, const uint8_t in[16]) { key->rounds = 10; aes_word_t block[AES_NOHW_BLOCK_WORDS]; aes_nohw_compact_block(block, in); memcpy(key->rd_key, block, 16); for (size_t i = 1; i <= 10; i++) { aes_word_t sub[AES_NOHW_BLOCK_WORDS]; aes_nohw_sub_block(sub, block); uint8_t rcon = aes_nohw_rcon[i - 1]; for (size_t j = 0; j < AES_NOHW_BLOCK_WORDS; j++) { // Incorporate |rcon| and the transformed word into the first word. block[j] = aes_nohw_xor(block[j], aes_nohw_rcon_slice(rcon, j)); block[j] = aes_nohw_xor( block[j], aes_nohw_shift_right(aes_nohw_rotate_rows_down(sub[j]), 12)); // Propagate to the remaining words. Note this is reordered from the usual // formulation to avoid needing masks. aes_word_t v = block[j]; block[j] = aes_nohw_xor(block[j], aes_nohw_shift_left(v, 4)); block[j] = aes_nohw_xor(block[j], aes_nohw_shift_left(v, 8)); block[j] = aes_nohw_xor(block[j], aes_nohw_shift_left(v, 12)); } memcpy(key->rd_key + 4 * i, block, 16); } } static void aes_nohw_setup_key_192(AES_KEY *key, const uint8_t in[24]) { key->rounds = 12; aes_word_t storage1[AES_NOHW_BLOCK_WORDS], storage2[AES_NOHW_BLOCK_WORDS]; aes_word_t *block1 = storage1, *block2 = storage2; // AES-192's key schedule is complex because each key schedule iteration // produces six words, but we compute on blocks and each block is four words. // We maintain a sliding window of two blocks, filled to 1.5 blocks at a time. // We loop below every three blocks or two key schedule iterations. // // On entry to the loop, |block1| and the first half of |block2| contain the // previous key schedule iteration. |block1| has been written to |key|, but // |block2| has not as it is incomplete. aes_nohw_compact_block(block1, in); memcpy(key->rd_key, block1, 16); uint8_t half_block[16] = {0}; memcpy(half_block, in + 16, 8); aes_nohw_compact_block(block2, half_block); for (size_t i = 0; i < 4; i++) { aes_word_t sub[AES_NOHW_BLOCK_WORDS]; aes_nohw_sub_block(sub, block2); uint8_t rcon = aes_nohw_rcon[2 * i]; for (size_t j = 0; j < AES_NOHW_BLOCK_WORDS; j++) { // Compute the first two words of the next key schedule iteration, which // go in the second half of |block2|. The first two words of the previous // iteration are in the first half of |block1|. Apply |rcon| here too // because the shifts match. block2[j] = aes_nohw_or( block2[j], aes_nohw_shift_left( aes_nohw_xor(block1[j], aes_nohw_rcon_slice(rcon, j)), 8)); // Incorporate the transformed word and propagate. Note the last word of // the previous iteration corresponds to the second word of |copy|. This // is incorporated into the first word of the next iteration, or the third // word of |block2|. block2[j] = aes_nohw_xor( block2[j], aes_nohw_and(aes_nohw_shift_left( aes_nohw_rotate_rows_down(sub[j]), 4), AES_NOHW_COL2_MASK)); block2[j] = aes_nohw_xor( block2[j], aes_nohw_and(aes_nohw_shift_left(block2[j], 4), AES_NOHW_COL3_MASK)); // Compute the remaining four words, which fill |block1|. Begin by moving // the corresponding words of the previous iteration: the second half of // |block1| and the first half of |block2|. block1[j] = aes_nohw_shift_right(block1[j], 8); block1[j] = aes_nohw_or(block1[j], aes_nohw_shift_left(block2[j], 8)); // Incorporate the second word, computed previously in |block2|, and // propagate. block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_right(block2[j], 12)); aes_word_t v = block1[j]; block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 4)); block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 8)); block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 12)); } // This completes two round keys. Note half of |block2| was computed in the // previous loop iteration but was not yet output. memcpy(key->rd_key + 4 * (3 * i + 1), block2, 16); memcpy(key->rd_key + 4 * (3 * i + 2), block1, 16); aes_nohw_sub_block(sub, block1); rcon = aes_nohw_rcon[2 * i + 1]; for (size_t j = 0; j < AES_NOHW_BLOCK_WORDS; j++) { // Compute the first four words of the next key schedule iteration in // |block2|. Begin by moving the corresponding words of the previous // iteration: the second half of |block2| and the first half of |block1|. block2[j] = aes_nohw_shift_right(block2[j], 8); block2[j] = aes_nohw_or(block2[j], aes_nohw_shift_left(block1[j], 8)); // Incorporate rcon and the transformed word. Note the last word of the // previous iteration corresponds to the last word of |copy|. block2[j] = aes_nohw_xor(block2[j], aes_nohw_rcon_slice(rcon, j)); block2[j] = aes_nohw_xor( block2[j], aes_nohw_shift_right(aes_nohw_rotate_rows_down(sub[j]), 12)); // Propagate to the remaining words. aes_word_t v = block2[j]; block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 4)); block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 8)); block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 12)); // Compute the last two words, which go in the first half of |block1|. The // last two words of the previous iteration are in the second half of // |block1|. block1[j] = aes_nohw_shift_right(block1[j], 8); // Propagate blocks and mask off the excess. block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_right(block2[j], 12)); block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(block1[j], 4)); block1[j] = aes_nohw_and(block1[j], AES_NOHW_COL01_MASK); } // |block2| has a complete round key. |block1| will be completed in the next // iteration. memcpy(key->rd_key + 4 * (3 * i + 3), block2, 16); // Swap blocks to restore the invariant. aes_word_t *tmp = block1; block1 = block2; block2 = tmp; } } static void aes_nohw_setup_key_256(AES_KEY *key, const uint8_t in[32]) { key->rounds = 14; // Each key schedule iteration produces two round keys. aes_word_t block1[AES_NOHW_BLOCK_WORDS], block2[AES_NOHW_BLOCK_WORDS]; aes_nohw_compact_block(block1, in); memcpy(key->rd_key, block1, 16); aes_nohw_compact_block(block2, in + 16); memcpy(key->rd_key + 4, block2, 16); for (size_t i = 2; i <= 14; i += 2) { aes_word_t sub[AES_NOHW_BLOCK_WORDS]; aes_nohw_sub_block(sub, block2); uint8_t rcon = aes_nohw_rcon[i / 2 - 1]; for (size_t j = 0; j < AES_NOHW_BLOCK_WORDS; j++) { // Incorporate |rcon| and the transformed word into the first word. block1[j] = aes_nohw_xor(block1[j], aes_nohw_rcon_slice(rcon, j)); block1[j] = aes_nohw_xor( block1[j], aes_nohw_shift_right(aes_nohw_rotate_rows_down(sub[j]), 12)); // Propagate to the remaining words. aes_word_t v = block1[j]; block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 4)); block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 8)); block1[j] = aes_nohw_xor(block1[j], aes_nohw_shift_left(v, 12)); } memcpy(key->rd_key + 4 * i, block1, 16); if (i == 14) { break; } aes_nohw_sub_block(sub, block1); for (size_t j = 0; j < AES_NOHW_BLOCK_WORDS; j++) { // Incorporate the transformed word into the first word. block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_right(sub[j], 12)); // Propagate to the remaining words. aes_word_t v = block2[j]; block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 4)); block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 8)); block2[j] = aes_nohw_xor(block2[j], aes_nohw_shift_left(v, 12)); } memcpy(key->rd_key + 4 * (i + 1), block2, 16); } } // External API. int aes_nohw_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { switch (bits) { case 128: aes_nohw_setup_key_128(aeskey, key); return 0; case 192: aes_nohw_setup_key_192(aeskey, key); return 0; case 256: aes_nohw_setup_key_256(aeskey, key); return 0; } return 1; } int aes_nohw_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { return aes_nohw_set_encrypt_key(key, bits, aeskey); } void aes_nohw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { AES_NOHW_SCHEDULE sched; aes_nohw_expand_round_keys(&sched, key); AES_NOHW_BATCH batch; aes_nohw_to_batch(&batch, in, /*num_blocks=*/1); aes_nohw_encrypt_batch(&sched, key->rounds, &batch); aes_nohw_from_batch(out, /*num_blocks=*/1, &batch); } void aes_nohw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { AES_NOHW_SCHEDULE sched; aes_nohw_expand_round_keys(&sched, key); AES_NOHW_BATCH batch; aes_nohw_to_batch(&batch, in, /*num_blocks=*/1); aes_nohw_decrypt_batch(&sched, key->rounds, &batch); aes_nohw_from_batch(out, /*num_blocks=*/1, &batch); } static inline void aes_nohw_xor_block(uint8_t out[16], const uint8_t a[16], const uint8_t b[16]) { for (size_t i = 0; i < 16; i += sizeof(aes_word_t)) { aes_word_t x, y; memcpy(&x, a + i, sizeof(aes_word_t)); memcpy(&y, b + i, sizeof(aes_word_t)); x = aes_nohw_xor(x, y); memcpy(out + i, &x, sizeof(aes_word_t)); } } void aes_nohw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t ivec[16]) { if (blocks == 0) { return; } AES_NOHW_SCHEDULE sched; aes_nohw_expand_round_keys(&sched, key); // Make |AES_NOHW_BATCH_SIZE| copies of |ivec|. alignas(AES_NOHW_WORD_SIZE) uint8_t ivs[AES_NOHW_BATCH_SIZE * 16]; alignas(AES_NOHW_WORD_SIZE) uint8_t enc_ivs[AES_NOHW_BATCH_SIZE * 16]; for (size_t i = 0; i < AES_NOHW_BATCH_SIZE; i++) { memcpy(ivs + 16 * i, ivec, 16); } uint32_t ctr = CRYPTO_load_u32_be(ivs + 12); for (;;) { // Update counters. for (size_t i = 0; i < AES_NOHW_BATCH_SIZE; i++) { CRYPTO_store_u32_be(ivs + 16 * i + 12, ctr + (uint32_t)i); } size_t todo = blocks >= AES_NOHW_BATCH_SIZE ? AES_NOHW_BATCH_SIZE : blocks; AES_NOHW_BATCH batch; aes_nohw_to_batch(&batch, ivs, todo); aes_nohw_encrypt_batch(&sched, key->rounds, &batch); aes_nohw_from_batch(enc_ivs, todo, &batch); for (size_t i = 0; i < todo; i++) { aes_nohw_xor_block(out + 16 * i, in + 16 * i, enc_ivs + 16 * i); } blocks -= todo; if (blocks == 0) { break; } in += 16 * AES_NOHW_BATCH_SIZE; out += 16 * AES_NOHW_BATCH_SIZE; ctr += AES_NOHW_BATCH_SIZE; } } void aes_nohw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, const int enc) { assert(len % 16 == 0); size_t blocks = len / 16; if (blocks == 0) { return; } AES_NOHW_SCHEDULE sched; aes_nohw_expand_round_keys(&sched, key); alignas(AES_NOHW_WORD_SIZE) uint8_t iv[16]; memcpy(iv, ivec, 16); if (enc) { // CBC encryption is not parallelizable. while (blocks > 0) { aes_nohw_xor_block(iv, iv, in); AES_NOHW_BATCH batch; aes_nohw_to_batch(&batch, iv, /*num_blocks=*/1); aes_nohw_encrypt_batch(&sched, key->rounds, &batch); aes_nohw_from_batch(out, /*num_blocks=*/1, &batch); memcpy(iv, out, 16); in += 16; out += 16; blocks--; } memcpy(ivec, iv, 16); return; } for (;;) { size_t todo = blocks >= AES_NOHW_BATCH_SIZE ? AES_NOHW_BATCH_SIZE : blocks; // Make a copy of the input so we can decrypt in-place. alignas(AES_NOHW_WORD_SIZE) uint8_t copy[AES_NOHW_BATCH_SIZE * 16]; memcpy(copy, in, todo * 16); AES_NOHW_BATCH batch; aes_nohw_to_batch(&batch, in, todo); aes_nohw_decrypt_batch(&sched, key->rounds, &batch); aes_nohw_from_batch(out, todo, &batch); aes_nohw_xor_block(out, out, iv); for (size_t i = 1; i < todo; i++) { aes_nohw_xor_block(out + 16 * i, out + 16 * i, copy + 16 * (i - 1)); } // Save the last block as the IV. memcpy(iv, copy + 16 * (todo - 1), 16); blocks -= todo; if (blocks == 0) { break; } in += 16 * AES_NOHW_BATCH_SIZE; out += 16 * AES_NOHW_BATCH_SIZE; } memcpy(ivec, iv, 16); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/cbc.cc.inc ================================================ /* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" #include "../../internal.h" void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block) { assert(key != NULL && ivec != NULL); if (len == 0) { // Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C. return; } assert(in != NULL && out != NULL); size_t n; const uint8_t *iv = ivec; while (len >= 16) { CRYPTO_xor16(out, in, iv); (*block)(out, out, key); iv = out; len -= 16; in += 16; out += 16; } while (len) { for (n = 0; n < 16 && n < len; ++n) { out[n] = in[n] ^ iv[n]; } for (; n < 16; ++n) { out[n] = iv[n]; } (*block)(out, out, key); iv = out; if (len <= 16) { break; } len -= 16; in += 16; out += 16; } OPENSSL_memcpy(ivec, iv, 16); } void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block) { assert(key != NULL && ivec != NULL); if (len == 0) { // Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C. return; } assert(in != NULL && out != NULL); const uintptr_t inptr = (uintptr_t) in; const uintptr_t outptr = (uintptr_t) out; // If |in| and |out| alias, |in| must be ahead. assert(inptr >= outptr || inptr + len <= outptr); size_t n; alignas(16) uint8_t tmp[16]; if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) { // If |out| is at least two blocks behind |in| or completely disjoint, there // is no need to decrypt to a temporary block. const uint8_t *iv = ivec; while (len >= 16) { (*block)(in, out, key); CRYPTO_xor16(out, out, iv); iv = in; len -= 16; in += 16; out += 16; } OPENSSL_memcpy(ivec, iv, 16); } else { static_assert(16 % sizeof(crypto_word_t) == 0, "block cannot be evenly divided into words"); while (len >= 16) { (*block)(in, tmp, key); for (n = 0; n < 16; n += sizeof(crypto_word_t)) { crypto_word_t c = CRYPTO_load_word_le(in + n); CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(tmp + n) ^ CRYPTO_load_word_le(ivec + n)); CRYPTO_store_word_le(ivec + n, c); } len -= 16; in += 16; out += 16; } } while (len) { uint8_t c; (*block)(in, tmp, key); for (n = 0; n < 16 && n < len; ++n) { c = in[n]; out[n] = tmp[n] ^ ivec[n]; ivec[n] = c; } if (len <= 16) { for (; n < 16; ++n) { ivec[n] = in[n]; } break; } len -= 16; in += 16; out += 16; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/cfb.cc.inc ================================================ /* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" static_assert(16 % sizeof(size_t) == 0, "block cannot be divided into size_t"); void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block) { assert(in && out && key && ivec && num); unsigned n = *num; if (enc) { while (n && len) { *(out++) = ivec[n] ^= *(in++); --len; n = (n + 1) % 16; } while (len >= 16) { (*block)(ivec, ivec, key); for (; n < 16; n += sizeof(crypto_word_t)) { crypto_word_t tmp = CRYPTO_load_word_le(ivec + n) ^ CRYPTO_load_word_le(in + n); CRYPTO_store_word_le(ivec + n, tmp); CRYPTO_store_word_le(out + n, tmp); } len -= 16; out += 16; in += 16; n = 0; } if (len) { (*block)(ivec, ivec, key); while (len--) { out[n] = ivec[n] ^= in[n]; ++n; } } *num = n; return; } else { while (n && len) { uint8_t c; *(out++) = ivec[n] ^ (c = *(in++)); ivec[n] = c; --len; n = (n + 1) % 16; } while (len >= 16) { (*block)(ivec, ivec, key); for (; n < 16; n += sizeof(crypto_word_t)) { crypto_word_t t = CRYPTO_load_word_le(in + n); CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(ivec + n) ^ t); CRYPTO_store_word_le(ivec + n, t); } len -= 16; out += 16; in += 16; n = 0; } if (len) { (*block)(ivec, ivec, key); while (len--) { uint8_t c; out[n] = ivec[n] ^ (c = in[n]); ivec[n] = c; ++n; } } *num = n; return; } } /* This expects a single block of size nbits for both in and out. Note that it corrupts any extra bits in the last byte of out */ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits, const AES_KEY *key, uint8_t ivec[16], int enc, block128_f block) { int n, rem, num; uint8_t ovec[16 * 2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */ if (nbits <= 0 || nbits > 128) { return; } // fill in the first half of the new IV with the current IV OPENSSL_memcpy(ovec, ivec, 16); // construct the new IV (*block)(ivec, ivec, key); num = (nbits + 7) / 8; if (enc) { // encrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n] ^ ivec[n]); } } else { // decrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n]) ^ ivec[n]; } } // shift ovec left... rem = nbits % 8; num = nbits / 8; if (rem == 0) { OPENSSL_memcpy(ivec, ovec + num, 16); } else { for (n = 0; n < 16; ++n) { ivec[n] = ovec[n + num] << rem | ovec[n + num + 1] >> (8 - rem); } } // it is not necessary to cleanse ovec, since the IV is not secret } // N.B. This expects the input to be packed, MS bit first void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const AES_KEY *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block) { size_t n; uint8_t c[1], d[1]; assert(in && out && key && ivec && num); assert(*num == 0); for (n = 0; n < bits; ++n) { c[0] = (in[n / 8] & (1 << (7 - n % 8))) ? 0x80 : 0; cfbr_encrypt_block(c, d, 1, key, ivec, enc, block); out[n / 8] = (out[n / 8] & ~(1 << (unsigned int)(7 - n % 8))) | ((d[0] & 0x80) >> (unsigned int)(n % 8)); } } void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out, size_t length, const AES_KEY *key, unsigned char ivec[16], unsigned *num, int enc, block128_f block) { size_t n; assert(in && out && key && ivec && num); assert(*num == 0); for (n = 0; n < length; ++n) { cfbr_encrypt_block(&in[n], &out[n], 8, key, ivec, enc, block); } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/ctr.cc.inc ================================================ /* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" #include "../../internal.h" static_assert(16 % sizeof(crypto_word_t) == 0, "block cannot be divided into crypto_word_t"); // increment upper 96 bits of 128-bit counter by 1 static void ctr96_inc(uint8_t *counter) { uint32_t n = 12, c = 1; do { --n; c += counter[n]; counter[n] = (uint8_t) c; c >>= 8; } while (n); } void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned int *num, ctr128_f func) { unsigned int n, ctr32; assert(key && ecount_buf && num); assert(len == 0 || (in && out)); assert(*num < 16); n = *num; while (n && len) { *(out++) = *(in++) ^ ecount_buf[n]; --len; n = (n + 1) % 16; } ctr32 = CRYPTO_load_u32_be(ivec + 12); while (len >= 16) { size_t blocks = len / 16; // 1<<28 is just a not-so-small yet not-so-large number... // Below condition is practically never met, but it has to // be checked for code correctness. if (sizeof(size_t) > sizeof(unsigned int) && blocks > (1U << 28)) { blocks = (1U << 28); } // As (*func) operates on 32-bit counter, caller // has to handle overflow. 'if' below detects the // overflow, which is then handled by limiting the // amount of blocks to the exact overflow point... ctr32 += (uint32_t)blocks; if (ctr32 < blocks) { blocks -= ctr32; ctr32 = 0; } (*func)(in, out, blocks, key, ivec); // (*func) does not update ivec, caller does: CRYPTO_store_u32_be(ivec + 12, ctr32); // ... overflow was detected, propogate carry. if (ctr32 == 0) { ctr96_inc(ivec); } blocks *= 16; len -= blocks; out += blocks; in += blocks; } if (len) { OPENSSL_memset(ecount_buf, 0, 16); (*func)(ecount_buf, ecount_buf, 1, key, ivec); ++ctr32; CRYPTO_store_u32_be(ivec + 12, ctr32); if (ctr32 == 0) { ctr96_inc(ivec); } while (len--) { out[n] = in[n] ^ ecount_buf[n]; ++n; } } *num = n; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/gcm.cc.inc ================================================ /* * Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../../internal.h" #include "../aes/internal.h" #include "internal.h" // kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four // bits of a |size_t|. static const size_t kSizeTWithoutLower4Bits = (size_t) -16; #define GCM_MUL(key, ctx, Xi) gcm_gmult_nohw((ctx)->Xi, (key)->Htable) #define GHASH(key, ctx, in, len) \ gcm_ghash_nohw((ctx)->Xi, (key)->Htable, in, len) // GHASH_CHUNK is "stride parameter" missioned to mitigate cache // trashing effect. In other words idea is to hash data while it's // still in L1 cache after encryption pass... #define GHASH_CHUNK (3 * 1024) #if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86) static inline void gcm_reduce_1bit(u128 *V) { if (sizeof(crypto_word_t) == 8) { uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V->hi & 1)); V->hi = (V->lo << 63) | (V->hi >> 1); V->lo = (V->lo >> 1) ^ T; } else { uint32_t T = 0xe1000000U & (0 - (uint32_t)(V->hi & 1)); V->hi = (V->lo << 63) | (V->hi >> 1); V->lo = (V->lo >> 1) ^ ((uint64_t)T << 32); } } void gcm_init_ssse3(u128 Htable[16], const uint64_t H[2]) { Htable[0].hi = 0; Htable[0].lo = 0; u128 V; V.hi = H[1]; V.lo = H[0]; Htable[8] = V; gcm_reduce_1bit(&V); Htable[4] = V; gcm_reduce_1bit(&V); Htable[2] = V; gcm_reduce_1bit(&V); Htable[1] = V; Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo; V = Htable[4]; Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo; Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo; Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo; V = Htable[8]; Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo; Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo; Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo; Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo; Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo; Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo; Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo; // Treat |Htable| as a 16x16 byte table and transpose it. Thus, Htable[i] // contains the i'th byte of j*H for all j. uint8_t *Hbytes = (uint8_t *)Htable; for (int i = 0; i < 16; i++) { for (int j = 0; j < i; j++) { uint8_t tmp = Hbytes[16*i + j]; Hbytes[16*i + j] = Hbytes[16*j + i]; Hbytes[16*j + i] = tmp; } } } #endif // GHASH_ASM_X86_64 || GHASH_ASM_X86 #ifdef GCM_FUNCREF #undef GCM_MUL #define GCM_MUL(key, ctx, Xi) (*gcm_gmult_p)((ctx)->Xi, (key)->Htable) #undef GHASH #define GHASH(key, ctx, in, len) \ (*gcm_ghash_p)((ctx)->Xi, (key)->Htable, in, len) #endif // GCM_FUNCREF #if defined(HW_GCM) && defined(OPENSSL_X86_64) static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t Xi[16], const u128 Htable[16], enum gcm_impl_t impl) { switch (impl) { case gcm_x86_vaes_avx2: len &= kSizeTWithoutLower4Bits; aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi); CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); return len; case gcm_x86_vaes_avx10_512: len &= kSizeTWithoutLower4Bits; aes_gcm_enc_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi); CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); return len; default: return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi); } } static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t Xi[16], const u128 Htable[16], enum gcm_impl_t impl) { switch (impl) { case gcm_x86_vaes_avx2: len &= kSizeTWithoutLower4Bits; aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi); CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); return len; case gcm_x86_vaes_avx10_512: len &= kSizeTWithoutLower4Bits; aes_gcm_dec_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi); CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16); return len; default: return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi); } } #endif // HW_GCM && X86_64 #if defined(HW_GCM) && defined(OPENSSL_AARCH64) static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t Xi[16], const u128 Htable[16], enum gcm_impl_t impl) { const size_t len_blocks = len & kSizeTWithoutLower4Bits; if (!len_blocks) { return 0; } aes_gcm_enc_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable); return len_blocks; } static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t Xi[16], const u128 Htable[16], enum gcm_impl_t impl) { const size_t len_blocks = len & kSizeTWithoutLower4Bits; if (!len_blocks) { return 0; } aes_gcm_dec_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable); return len_blocks; } #endif // HW_GCM && AARCH64 void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, u128 out_table[16], const uint8_t gcm_key[16]) { // H is passed to |gcm_init_*| as a pair of byte-swapped, 64-bit values. uint64_t H[2] = {CRYPTO_load_u64_be(gcm_key), CRYPTO_load_u64_be(gcm_key + 8)}; #if defined(GHASH_ASM_X86_64) if (crypto_gcm_clmul_enabled()) { if (CRYPTO_is_VPCLMULQDQ_capable() && CRYPTO_is_AVX2_capable()) { if (CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() && CRYPTO_is_BMI2_capable() && !CRYPTO_cpu_avoid_zmm_registers()) { gcm_init_vpclmulqdq_avx10_512(out_table, H); *out_mult = gcm_gmult_vpclmulqdq_avx10; *out_hash = gcm_ghash_vpclmulqdq_avx10_512; return; } gcm_init_vpclmulqdq_avx2(out_table, H); *out_mult = gcm_gmult_vpclmulqdq_avx2; *out_hash = gcm_ghash_vpclmulqdq_avx2; return; } if (CRYPTO_is_AVX_capable() && CRYPTO_is_MOVBE_capable()) { gcm_init_avx(out_table, H); *out_mult = gcm_gmult_avx; *out_hash = gcm_ghash_avx; return; } gcm_init_clmul(out_table, H); *out_mult = gcm_gmult_clmul; *out_hash = gcm_ghash_clmul; return; } if (CRYPTO_is_SSSE3_capable()) { gcm_init_ssse3(out_table, H); *out_mult = gcm_gmult_ssse3; *out_hash = gcm_ghash_ssse3; return; } #elif defined(GHASH_ASM_X86) if (crypto_gcm_clmul_enabled()) { gcm_init_clmul(out_table, H); *out_mult = gcm_gmult_clmul; *out_hash = gcm_ghash_clmul; return; } if (CRYPTO_is_SSSE3_capable()) { gcm_init_ssse3(out_table, H); *out_mult = gcm_gmult_ssse3; *out_hash = gcm_ghash_ssse3; return; } #elif defined(GHASH_ASM_ARM) if (gcm_pmull_capable()) { gcm_init_v8(out_table, H); *out_mult = gcm_gmult_v8; *out_hash = gcm_ghash_v8; return; } if (gcm_neon_capable()) { gcm_init_neon(out_table, H); *out_mult = gcm_gmult_neon; *out_hash = gcm_ghash_neon; return; } #endif gcm_init_nohw(out_table, H); *out_mult = gcm_gmult_nohw; *out_hash = gcm_ghash_nohw; } void CRYPTO_gcm128_init_aes_key(GCM128_KEY *gcm_key, const uint8_t *key, size_t key_bytes) { switch (key_bytes) { case 16: boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm); break; case 32: boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm); break; } OPENSSL_memset(gcm_key, 0, sizeof(*gcm_key)); int is_hwaes; gcm_key->ctr = aes_ctr_set_key(&gcm_key->aes, &is_hwaes, &gcm_key->block, key, key_bytes); uint8_t ghash_key[16]; OPENSSL_memset(ghash_key, 0, sizeof(ghash_key)); gcm_key->block(ghash_key, ghash_key, &gcm_key->aes); CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, gcm_key->Htable, ghash_key); #if !defined(OPENSSL_NO_ASM) #if defined(OPENSSL_X86_64) if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx10_512 && CRYPTO_is_VAES_capable()) { gcm_key->impl = gcm_x86_vaes_avx10_512; } else if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx2 && CRYPTO_is_VAES_capable()) { gcm_key->impl = gcm_x86_vaes_avx2; } else if (gcm_key->ghash == gcm_ghash_avx && is_hwaes) { gcm_key->impl = gcm_x86_aesni; } #elif defined(OPENSSL_AARCH64) if (gcm_pmull_capable() && is_hwaes) { gcm_key->impl = gcm_arm64_aes; } #endif #endif } void CRYPTO_gcm128_init_ctx(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *iv, size_t iv_len) { #ifdef GCM_FUNCREF void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult; #endif OPENSSL_memset(&ctx->Yi, 0, sizeof(ctx->Yi)); OPENSSL_memset(&ctx->Xi, 0, sizeof(ctx->Xi)); ctx->len.aad = 0; ctx->len.msg = 0; ctx->ares = 0; ctx->mres = 0; uint32_t ctr; if (iv_len == 12) { OPENSSL_memcpy(ctx->Yi, iv, 12); ctx->Yi[15] = 1; ctr = 1; } else { uint64_t len0 = iv_len; while (iv_len >= 16) { CRYPTO_xor16(ctx->Yi, ctx->Yi, iv); GCM_MUL(key, ctx, Yi); iv += 16; iv_len -= 16; } if (iv_len) { for (size_t i = 0; i < iv_len; ++i) { ctx->Yi[i] ^= iv[i]; } GCM_MUL(key, ctx, Yi); } uint8_t len_block[16]; OPENSSL_memset(len_block, 0, 8); CRYPTO_store_u64_be(len_block + 8, len0 << 3); CRYPTO_xor16(ctx->Yi, ctx->Yi, len_block); GCM_MUL(key, ctx, Yi); ctr = CRYPTO_load_u32_be(ctx->Yi + 12); } key->block(ctx->Yi, ctx->EK0, &key->aes); ++ctr; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); } int CRYPTO_gcm128_aad(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *aad, size_t aad_len) { #ifdef GCM_FUNCREF void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult; void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len) = key->ghash; #endif if (ctx->len.msg != 0) { // The caller must have finished the AAD before providing other input. return 0; } uint64_t alen = ctx->len.aad + aad_len; if (alen > (UINT64_C(1) << 61) || (sizeof(aad_len) == 8 && alen < aad_len)) { return 0; } ctx->len.aad = alen; unsigned n = ctx->ares; if (n) { while (n && aad_len) { ctx->Xi[n] ^= *(aad++); --aad_len; n = (n + 1) % 16; } if (n == 0) { GCM_MUL(key, ctx, Xi); } else { ctx->ares = n; return 1; } } // Process a whole number of blocks. size_t len_blocks = aad_len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { GHASH(key, ctx, aad, len_blocks); aad += len_blocks; aad_len -= len_blocks; } // Process the remainder. if (aad_len != 0) { n = (unsigned int)aad_len; for (size_t i = 0; i < aad_len; ++i) { ctx->Xi[i] ^= aad[i]; } } ctx->ares = n; return 1; } int CRYPTO_gcm128_encrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *in, uint8_t *out, size_t len) { #ifdef GCM_FUNCREF void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult; void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len) = key->ghash; #endif uint64_t mlen = ctx->len.msg + len; if (mlen > ((UINT64_C(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) { return 0; } ctx->len.msg = mlen; if (ctx->ares) { // First call to encrypt finalizes GHASH(AAD) GCM_MUL(key, ctx, Xi); ctx->ares = 0; } unsigned n = ctx->mres; if (n) { while (n && len) { ctx->Xi[n] ^= *(out++) = *(in++) ^ ctx->EKi[n]; --len; n = (n + 1) % 16; } if (n == 0) { GCM_MUL(key, ctx, Xi); } else { ctx->mres = n; return 1; } } #if defined(HW_GCM) // Check |len| to work around a C language bug. See https://crbug.com/1019588. if (key->impl != gcm_separate && len > 0) { // |hw_gcm_encrypt| may not process all the input given to it. It may // not process *any* of its input if it is deemed too small. size_t bulk = hw_gcm_encrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi, key->Htable, key->impl); in += bulk; out += bulk; len -= bulk; } #endif uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12); ctr128_f stream = key->ctr; while (len >= GHASH_CHUNK) { (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi); ctr += GHASH_CHUNK / 16; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); GHASH(key, ctx, out, GHASH_CHUNK); out += GHASH_CHUNK; in += GHASH_CHUNK; len -= GHASH_CHUNK; } size_t len_blocks = len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { size_t j = len_blocks / 16; (*stream)(in, out, j, &key->aes, ctx->Yi); ctr += (uint32_t)j; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); in += len_blocks; len -= len_blocks; GHASH(key, ctx, out, len_blocks); out += len_blocks; } if (len) { key->block(ctx->Yi, ctx->EKi, &key->aes); ++ctr; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); while (len--) { ctx->Xi[n] ^= out[n] = in[n] ^ ctx->EKi[n]; ++n; } } ctx->mres = n; return 1; } int CRYPTO_gcm128_decrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *in, uint8_t *out, size_t len) { #ifdef GCM_FUNCREF void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult; void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len) = key->ghash; #endif uint64_t mlen = ctx->len.msg + len; if (mlen > ((UINT64_C(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len)) { return 0; } ctx->len.msg = mlen; if (ctx->ares) { // First call to decrypt finalizes GHASH(AAD) GCM_MUL(key, ctx, Xi); ctx->ares = 0; } unsigned n = ctx->mres; if (n) { while (n && len) { uint8_t c = *(in++); *(out++) = c ^ ctx->EKi[n]; ctx->Xi[n] ^= c; --len; n = (n + 1) % 16; } if (n == 0) { GCM_MUL(key, ctx, Xi); } else { ctx->mres = n; return 1; } } #if defined(HW_GCM) // Check |len| to work around a C language bug. See https://crbug.com/1019588. if (key->impl != gcm_separate && len > 0) { // |hw_gcm_decrypt| may not process all the input given to it. It may // not process *any* of its input if it is deemed too small. size_t bulk = hw_gcm_decrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi, key->Htable, key->impl); in += bulk; out += bulk; len -= bulk; } #endif uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12); ctr128_f stream = key->ctr; while (len >= GHASH_CHUNK) { GHASH(key, ctx, in, GHASH_CHUNK); (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi); ctr += GHASH_CHUNK / 16; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); out += GHASH_CHUNK; in += GHASH_CHUNK; len -= GHASH_CHUNK; } size_t len_blocks = len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { size_t j = len_blocks / 16; GHASH(key, ctx, in, len_blocks); (*stream)(in, out, j, &key->aes, ctx->Yi); ctr += (uint32_t)j; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); out += len_blocks; in += len_blocks; len -= len_blocks; } if (len) { key->block(ctx->Yi, ctx->EKi, &key->aes); ++ctr; CRYPTO_store_u32_be(ctx->Yi + 12, ctr); while (len--) { uint8_t c = in[n]; ctx->Xi[n] ^= c; out[n] = c ^ ctx->EKi[n]; ++n; } } ctx->mres = n; return 1; } int CRYPTO_gcm128_finish(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len) { #ifdef GCM_FUNCREF void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult; #endif if (ctx->mres || ctx->ares) { GCM_MUL(key, ctx, Xi); } uint8_t len_block[16]; CRYPTO_store_u64_be(len_block, ctx->len.aad << 3); CRYPTO_store_u64_be(len_block + 8, ctx->len.msg << 3); CRYPTO_xor16(ctx->Xi, ctx->Xi, len_block); GCM_MUL(key, ctx, Xi); CRYPTO_xor16(ctx->Xi, ctx->Xi, ctx->EK0); if (tag && len <= sizeof(ctx->Xi)) { return CRYPTO_memcmp(ctx->Xi, tag, len) == 0; } else { return 0; } } void CRYPTO_gcm128_tag(const GCM128_KEY *key, GCM128_CONTEXT *ctx, uint8_t *tag, size_t len) { CRYPTO_gcm128_finish(key, ctx, NULL, 0); OPENSSL_memcpy(tag, ctx->Xi, len <= sizeof(ctx->Xi) ? len : sizeof(ctx->Xi)); } #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) int crypto_gcm_clmul_enabled(void) { #if defined(GHASH_ASM_X86) || defined(GHASH_ASM_X86_64) return CRYPTO_is_FXSR_capable() && CRYPTO_is_PCLMUL_capable(); #else return 0; #endif } #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/gcm_nohw.cc.inc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../../internal.h" #include "internal.h" #if !defined(BORINGSSL_HAS_UINT128) && defined(OPENSSL_SSE2) #include #endif // This file contains a constant-time implementation of GHASH based on the notes // in https://bearssl.org/constanttime.html#ghash-for-gcm and the reduction // algorithm described in // https://crypto.stanford.edu/RealWorldCrypto/slides/gueron.pdf. // // Unlike the BearSSL notes, we use uint128_t in the 64-bit implementation. Our // primary compilers (clang, clang-cl, and gcc) all support it. MSVC will run // the 32-bit implementation, but we can use its intrinsics if necessary. #if defined(BORINGSSL_HAS_UINT128) static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a, uint64_t b) { // One term every four bits means the largest term is 64/4 = 16, which barely // overflows into the next term. Using one term every five bits would cost 25 // multiplications instead of 16. It is faster to mask off the bottom four // bits of |a|, giving a largest term of 60/4 = 15, and apply the bottom bits // separately. uint64_t a0 = a & UINT64_C(0x1111111111111110); uint64_t a1 = a & UINT64_C(0x2222222222222220); uint64_t a2 = a & UINT64_C(0x4444444444444440); uint64_t a3 = a & UINT64_C(0x8888888888888880); uint64_t b0 = b & UINT64_C(0x1111111111111111); uint64_t b1 = b & UINT64_C(0x2222222222222222); uint64_t b2 = b & UINT64_C(0x4444444444444444); uint64_t b3 = b & UINT64_C(0x8888888888888888); uint128_t c0 = (a0 * (uint128_t)b0) ^ (a1 * (uint128_t)b3) ^ (a2 * (uint128_t)b2) ^ (a3 * (uint128_t)b1); uint128_t c1 = (a0 * (uint128_t)b1) ^ (a1 * (uint128_t)b0) ^ (a2 * (uint128_t)b3) ^ (a3 * (uint128_t)b2); uint128_t c2 = (a0 * (uint128_t)b2) ^ (a1 * (uint128_t)b1) ^ (a2 * (uint128_t)b0) ^ (a3 * (uint128_t)b3); uint128_t c3 = (a0 * (uint128_t)b3) ^ (a1 * (uint128_t)b2) ^ (a2 * (uint128_t)b1) ^ (a3 * (uint128_t)b0); // Multiply the bottom four bits of |a| with |b|. uint64_t a0_mask = UINT64_C(0) - (a & 1); uint64_t a1_mask = UINT64_C(0) - ((a >> 1) & 1); uint64_t a2_mask = UINT64_C(0) - ((a >> 2) & 1); uint64_t a3_mask = UINT64_C(0) - ((a >> 3) & 1); uint128_t extra = (a0_mask & b) ^ ((uint128_t)(a1_mask & b) << 1) ^ ((uint128_t)(a2_mask & b) << 2) ^ ((uint128_t)(a3_mask & b) << 3); *out_lo = (((uint64_t)c0) & UINT64_C(0x1111111111111111)) ^ (((uint64_t)c1) & UINT64_C(0x2222222222222222)) ^ (((uint64_t)c2) & UINT64_C(0x4444444444444444)) ^ (((uint64_t)c3) & UINT64_C(0x8888888888888888)) ^ ((uint64_t)extra); *out_hi = (((uint64_t)(c0 >> 64)) & UINT64_C(0x1111111111111111)) ^ (((uint64_t)(c1 >> 64)) & UINT64_C(0x2222222222222222)) ^ (((uint64_t)(c2 >> 64)) & UINT64_C(0x4444444444444444)) ^ (((uint64_t)(c3 >> 64)) & UINT64_C(0x8888888888888888)) ^ ((uint64_t)(extra >> 64)); } #elif defined(OPENSSL_SSE2) static __m128i gcm_mul32_nohw(uint32_t a, uint32_t b) { // One term every four bits means the largest term is 32/4 = 8, which does not // overflow into the next term. __m128i aa = _mm_setr_epi32(a, 0, a, 0); __m128i bb = _mm_setr_epi32(b, 0, b, 0); __m128i a0a0 = _mm_and_si128(aa, _mm_setr_epi32(0x11111111, 0, 0x11111111, 0)); __m128i a2a2 = _mm_and_si128(aa, _mm_setr_epi32(0x44444444, 0, 0x44444444, 0)); __m128i b0b1 = _mm_and_si128(bb, _mm_setr_epi32(0x11111111, 0, 0x22222222, 0)); __m128i b2b3 = _mm_and_si128(bb, _mm_setr_epi32(0x44444444, 0, 0x88888888, 0)); __m128i c0c1 = _mm_xor_si128(_mm_mul_epu32(a0a0, b0b1), _mm_mul_epu32(a2a2, b2b3)); __m128i c2c3 = _mm_xor_si128(_mm_mul_epu32(a2a2, b0b1), _mm_mul_epu32(a0a0, b2b3)); __m128i a1a1 = _mm_and_si128(aa, _mm_setr_epi32(0x22222222, 0, 0x22222222, 0)); __m128i a3a3 = _mm_and_si128(aa, _mm_setr_epi32(0x88888888, 0, 0x88888888, 0)); __m128i b3b0 = _mm_and_si128(bb, _mm_setr_epi32(0x88888888, 0, 0x11111111, 0)); __m128i b1b2 = _mm_and_si128(bb, _mm_setr_epi32(0x22222222, 0, 0x44444444, 0)); c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a1a1, b3b0)); c0c1 = _mm_xor_si128(c0c1, _mm_mul_epu32(a3a3, b1b2)); c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a3a3, b3b0)); c2c3 = _mm_xor_si128(c2c3, _mm_mul_epu32(a1a1, b1b2)); c0c1 = _mm_and_si128( c0c1, _mm_setr_epi32(0x11111111, 0x11111111, 0x22222222, 0x22222222)); c2c3 = _mm_and_si128( c2c3, _mm_setr_epi32(0x44444444, 0x44444444, 0x88888888, 0x88888888)); c0c1 = _mm_xor_si128(c0c1, c2c3); // c0 ^= c1 c0c1 = _mm_xor_si128(c0c1, _mm_srli_si128(c0c1, 8)); return c0c1; } static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a, uint64_t b) { uint32_t a0 = a & 0xffffffff; uint32_t a1 = a >> 32; uint32_t b0 = b & 0xffffffff; uint32_t b1 = b >> 32; // Karatsuba multiplication. __m128i lo = gcm_mul32_nohw(a0, b0); __m128i hi = gcm_mul32_nohw(a1, b1); __m128i mid = gcm_mul32_nohw(a0 ^ a1, b0 ^ b1); mid = _mm_xor_si128(mid, lo); mid = _mm_xor_si128(mid, hi); __m128i ret = _mm_unpacklo_epi64(lo, hi); mid = _mm_slli_si128(mid, 4); mid = _mm_and_si128(mid, _mm_setr_epi32(0, 0xffffffff, 0xffffffff, 0)); ret = _mm_xor_si128(ret, mid); memcpy(out_lo, &ret, 8); memcpy(out_hi, ((char*)&ret) + 8, 8); } #else // !BORINGSSL_HAS_UINT128 && !OPENSSL_SSE2 static uint64_t gcm_mul32_nohw(uint32_t a, uint32_t b) { // One term every four bits means the largest term is 32/4 = 8, which does not // overflow into the next term. uint32_t a0 = a & 0x11111111; uint32_t a1 = a & 0x22222222; uint32_t a2 = a & 0x44444444; uint32_t a3 = a & 0x88888888; uint32_t b0 = b & 0x11111111; uint32_t b1 = b & 0x22222222; uint32_t b2 = b & 0x44444444; uint32_t b3 = b & 0x88888888; uint64_t c0 = (a0 * (uint64_t)b0) ^ (a1 * (uint64_t)b3) ^ (a2 * (uint64_t)b2) ^ (a3 * (uint64_t)b1); uint64_t c1 = (a0 * (uint64_t)b1) ^ (a1 * (uint64_t)b0) ^ (a2 * (uint64_t)b3) ^ (a3 * (uint64_t)b2); uint64_t c2 = (a0 * (uint64_t)b2) ^ (a1 * (uint64_t)b1) ^ (a2 * (uint64_t)b0) ^ (a3 * (uint64_t)b3); uint64_t c3 = (a0 * (uint64_t)b3) ^ (a1 * (uint64_t)b2) ^ (a2 * (uint64_t)b1) ^ (a3 * (uint64_t)b0); return (c0 & UINT64_C(0x1111111111111111)) | (c1 & UINT64_C(0x2222222222222222)) | (c2 & UINT64_C(0x4444444444444444)) | (c3 & UINT64_C(0x8888888888888888)); } static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a, uint64_t b) { uint32_t a0 = a & 0xffffffff; uint32_t a1 = a >> 32; uint32_t b0 = b & 0xffffffff; uint32_t b1 = b >> 32; // Karatsuba multiplication. uint64_t lo = gcm_mul32_nohw(a0, b0); uint64_t hi = gcm_mul32_nohw(a1, b1); uint64_t mid = gcm_mul32_nohw(a0 ^ a1, b0 ^ b1) ^ lo ^ hi; *out_lo = lo ^ (mid << 32); *out_hi = hi ^ (mid >> 32); } #endif // BORINGSSL_HAS_UINT128 void gcm_init_nohw(u128 Htable[16], const uint64_t Xi[2]) { // We implement GHASH in terms of POLYVAL, as described in RFC 8452. This // avoids a shift by 1 in the multiplication, needed to account for bit // reversal losing a bit after multiplication, that is, // rev128(X) * rev128(Y) = rev255(X*Y). // // Per Appendix A, we run mulX_POLYVAL. Note this is the same transformation // applied by |gcm_init_clmul|, etc. Note |Xi| has already been byteswapped. // // See also slide 16 of // https://crypto.stanford.edu/RealWorldCrypto/slides/gueron.pdf Htable[0].lo = Xi[1]; Htable[0].hi = Xi[0]; uint64_t carry = Htable[0].hi >> 63; carry = 0u - carry; Htable[0].hi <<= 1; Htable[0].hi |= Htable[0].lo >> 63; Htable[0].lo <<= 1; // The irreducible polynomial is 1 + x^121 + x^126 + x^127 + x^128, so we // conditionally add 0xc200...0001. Htable[0].lo ^= carry & 1; Htable[0].hi ^= carry & UINT64_C(0xc200000000000000); // This implementation does not use the rest of |Htable|. } static void gcm_polyval_nohw(uint64_t Xi[2], const u128 *H) { // Karatsuba multiplication. The product of |Xi| and |H| is stored in |r0| // through |r3|. Note there is no byte or bit reversal because we are // evaluating POLYVAL. uint64_t r0, r1; gcm_mul64_nohw(&r0, &r1, Xi[0], H->lo); uint64_t r2, r3; gcm_mul64_nohw(&r2, &r3, Xi[1], H->hi); uint64_t mid0, mid1; gcm_mul64_nohw(&mid0, &mid1, Xi[0] ^ Xi[1], H->hi ^ H->lo); mid0 ^= r0 ^ r2; mid1 ^= r1 ^ r3; r2 ^= mid1; r1 ^= mid0; // Now we multiply our 256-bit result by x^-128 and reduce. |r2| and // |r3| shifts into position and we must multiply |r0| and |r1| by x^-128. We // have: // // 1 = x^121 + x^126 + x^127 + x^128 // x^-128 = x^-7 + x^-2 + x^-1 + 1 // // This is the GHASH reduction step, but with bits flowing in reverse. // The x^-7, x^-2, and x^-1 terms shift bits past x^0, which would require // another reduction steps. Instead, we gather the excess bits, incorporate // them into |r0| and |r1| and reduce once. See slides 17-19 // of https://crypto.stanford.edu/RealWorldCrypto/slides/gueron.pdf. r1 ^= (r0 << 63) ^ (r0 << 62) ^ (r0 << 57); // 1 r2 ^= r0; r3 ^= r1; // x^-1 r2 ^= r0 >> 1; r2 ^= r1 << 63; r3 ^= r1 >> 1; // x^-2 r2 ^= r0 >> 2; r2 ^= r1 << 62; r3 ^= r1 >> 2; // x^-7 r2 ^= r0 >> 7; r2 ^= r1 << 57; r3 ^= r1 >> 7; Xi[0] = r2; Xi[1] = r3; } void gcm_gmult_nohw(uint8_t Xi[16], const u128 Htable[16]) { uint64_t swapped[2]; swapped[0] = CRYPTO_load_u64_be(Xi + 8); swapped[1] = CRYPTO_load_u64_be(Xi); gcm_polyval_nohw(swapped, &Htable[0]); CRYPTO_store_u64_be(Xi, swapped[1]); CRYPTO_store_u64_be(Xi + 8, swapped[0]); } void gcm_ghash_nohw(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len) { uint64_t swapped[2]; swapped[0] = CRYPTO_load_u64_be(Xi + 8); swapped[1] = CRYPTO_load_u64_be(Xi); while (len >= 16) { swapped[0] ^= CRYPTO_load_u64_be(inp + 8); swapped[1] ^= CRYPTO_load_u64_be(inp); gcm_polyval_nohw(swapped, &Htable[0]); inp += 16; len -= 16; } CRYPTO_store_u64_be(Xi, swapped[1]); CRYPTO_store_u64_be(Xi + 8, swapped[0]); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/internal.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_AES_INTERNAL_H #define OPENSSL_HEADER_AES_INTERNAL_H #include #include #include "../../internal.h" extern "C" { // block128_f is the type of an AES block cipher implementation. // // Unlike upstream OpenSSL, it and the other functions in this file hard-code // |AES_KEY|. It is undefined in C to call a function pointer with anything // other than the original type. Thus we either must match |block128_f| to the // type signature of |AES_encrypt| and friends or pass in |void*| wrapper // functions. // // These functions are called exclusively with AES, so we use the former. typedef void (*block128_f)(const uint8_t in[16], uint8_t out[16], const AES_KEY *key); // ctr128_f is the type of a function that performs CTR-mode encryption. typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t ivec[16]); // aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|, // where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is // set to a function that encrypts single blocks. If not NULL, |*out_is_hwaes| // is set to whether the hardware AES implementation was used. It returns a // function for optimised CTR-mode. ctr128_f aes_ctr_set_key(AES_KEY *aes_key, int *out_is_hwaes, block128_f *out_block, const uint8_t *key, size_t key_bytes); // AES implementations. #if !defined(OPENSSL_NO_ASM) #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) #define HWAES #define HWAES_ECB inline int hwaes_capable(void) { return CRYPTO_is_AESNI_capable(); } #define VPAES #define VPAES_CBC inline int vpaes_capable(void) { return CRYPTO_is_SSSE3_capable(); } #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #define HWAES inline int hwaes_capable(void) { return CRYPTO_is_ARMv8_AES_capable(); } #if defined(OPENSSL_ARM) #define BSAES #define VPAES inline int bsaes_capable(void) { return CRYPTO_is_NEON_capable(); } inline int vpaes_capable(void) { return CRYPTO_is_NEON_capable(); } #endif #if defined(OPENSSL_AARCH64) #define VPAES #define VPAES_CBC inline int vpaes_capable(void) { return CRYPTO_is_NEON_capable(); } #endif #endif #endif // !NO_ASM #if defined(HWAES) int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, AES_KEY *key); int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, AES_KEY *key); void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int enc); void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]); #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) // On x86 and x86_64, |aes_hw_set_decrypt_key| is implemented in terms of // |aes_hw_set_encrypt_key| and a conversion function. void aes_hw_encrypt_key_to_decrypt_key(AES_KEY *key); // There are two variants of this function, one which uses aeskeygenassist // ("base") and one which uses aesenclast + pshufb ("alt"). aesenclast is // overall faster but is slower on some older processors. It doesn't use AVX, // but AVX is used as a proxy to detecting this. See // https://groups.google.com/g/mailing.openssl.dev/c/OuFXwW4NfO8/m/7d2ZXVjkxVkJ // // TODO(davidben): It is unclear if the aeskeygenassist version is still // worthwhile. However, the aesenclast version requires SSSE3. SSSE3 long // predates AES-NI, but it's not clear if AES-NI implies SSSE3. In OpenSSL, the // CCM AES-NI assembly seems to assume it does. inline int aes_hw_set_encrypt_key_alt_capable(void) { return hwaes_capable() && CRYPTO_is_SSSE3_capable(); } inline int aes_hw_set_encrypt_key_alt_preferred(void) { return hwaes_capable() && CRYPTO_is_AVX_capable(); } int aes_hw_set_encrypt_key_base(const uint8_t *user_key, int bits, AES_KEY *key); int aes_hw_set_encrypt_key_alt(const uint8_t *user_key, int bits, AES_KEY *key); #endif // OPENSSL_X86 || OPENSSL_X86_64 #else // If HWAES isn't defined then we provide dummy functions for each of the hwaes // functions. inline int hwaes_capable(void) { return 0; } inline int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { abort(); } inline int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { abort(); } inline void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { abort(); } inline void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { abort(); } inline void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int enc) { abort(); } inline void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]) { abort(); } #endif // !HWAES #if defined(HWAES_ECB) void aes_hw_ecb_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, int enc); #endif // HWAES_ECB #if defined(BSAES) // Note |bsaes_cbc_encrypt| requires |enc| to be zero. void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t ivec[16], int enc); void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]); // VPAES to BSAES conversions are available on all BSAES platforms. void vpaes_encrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes); void vpaes_decrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes); void vpaes_ctr32_encrypt_blocks_with_bsaes(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t ivec[16]); #else inline int bsaes_capable(void) { return 0; } // On other platforms, bsaes_capable() will always return false and so the // following will never be called. inline void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t ivec[16], int enc) { abort(); } inline void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]) { abort(); } inline void vpaes_encrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes) { abort(); } inline void vpaes_decrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes) { abort(); } #endif // !BSAES #if defined(VPAES) // On platforms where VPAES gets defined (just above), then these functions are // provided by asm. int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); #if defined(VPAES_CBC) void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int enc); #endif void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]); #else inline int vpaes_capable(void) { return 0; } // On other platforms, vpaes_capable() will always return false and so the // following will never be called. inline int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) { abort(); } inline int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key) { abort(); } inline void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { abort(); } inline void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { abort(); } inline void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int enc) { abort(); } inline void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16]) { abort(); } #endif // !VPAES int aes_nohw_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); int aes_nohw_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); void aes_nohw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void aes_nohw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void aes_nohw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks, const AES_KEY *key, const uint8_t ivec[16]); void aes_nohw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int enc); // Modes inline void CRYPTO_xor16(uint8_t out[16], const uint8_t a[16], const uint8_t b[16]) { // TODO(davidben): Ideally we'd leave this to the compiler, which could use // vector registers, etc. But the compiler doesn't know that |in| and |out| // cannot partially alias. |restrict| is slightly two strict (we allow exact // aliasing), but perhaps in-place could be a separate function? static_assert(16 % sizeof(crypto_word_t) == 0, "block cannot be evenly divided into words"); for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) { CRYPTO_store_word_le( out + i, CRYPTO_load_word_le(a + i) ^ CRYPTO_load_word_le(b + i)); } } // CTR. // CRYPTO_ctr128_encrypt_ctr32 encrypts (or decrypts, it's the same in CTR mode) // |len| bytes from |in| to |out| using |block| in counter mode. There's no // requirement that |len| be a multiple of any value and any partial blocks are // stored in |ecount_buf| and |*num|, which must be zeroed before the initial // call. The counter is a 128-bit, big-endian value in |ivec| and is // incremented by this function. If the counter overflows, it wraps around. // |ctr| must be a function that performs CTR mode but only deals with the lower // 32 bits of the counter. void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned *num, ctr128_f ctr); // GCM. // // This API differs from the upstream API slightly. The |GCM128_CONTEXT| does // not have a |key| pointer that points to the key as upstream's version does. // Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT| // can be safely copied. Additionally, |gcm_key| is split into a separate // struct. // gcm_impl_t specifies an assembly implementation of AES-GCM. enum gcm_impl_t { gcm_separate = 0, // No combined AES-GCM, but may have AES-CTR and GHASH. gcm_x86_aesni, gcm_x86_vaes_avx2, gcm_x86_vaes_avx10_512, gcm_arm64_aes, }; typedef struct { uint64_t hi,lo; } u128; // gmult_func multiplies |Xi| by the GCM key and writes the result back to // |Xi|. typedef void (*gmult_func)(uint8_t Xi[16], const u128 Htable[16]); // ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from // |inp|. The result is written back to |Xi| and the |len| argument must be a // multiple of 16. typedef void (*ghash_func)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len); typedef struct gcm128_key_st { u128 Htable[16]; gmult_func gmult; ghash_func ghash; AES_KEY aes; ctr128_f ctr; block128_f block; enum gcm_impl_t impl; } GCM128_KEY; // GCM128_CONTEXT contains state for a single GCM operation. The structure // should be zero-initialized before use. typedef struct { // The following 5 names follow names in GCM specification uint8_t Yi[16]; uint8_t EKi[16]; uint8_t EK0[16]; struct { uint64_t aad; uint64_t msg; } len; uint8_t Xi[16]; unsigned mres, ares; } GCM128_CONTEXT; #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) // crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is // used. int crypto_gcm_clmul_enabled(void); #endif // CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to // |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware // accelerated) functions for performing operations in the GHASH field. void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, u128 out_table[16], const uint8_t gcm_key[16]); // CRYPTO_gcm128_init_aes_key initialises |gcm_key| to with AES key |key|. void CRYPTO_gcm128_init_aes_key(GCM128_KEY *gcm_key, const uint8_t *key, size_t key_bytes); // CRYPTO_gcm128_init_ctx initializes |ctx| to encrypt with |key| and |iv|. void CRYPTO_gcm128_init_ctx(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *iv, size_t iv_len); // CRYPTO_gcm128_aad adds to the authenticated data for an instance of GCM. // This must be called before and data is encrypted. |key| must be the same // value that was passed to |CRYPTO_gcm128_init_ctx|. It returns one on success // and zero otherwise. int CRYPTO_gcm128_aad(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *aad, size_t aad_len); // CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. |key| must be // the same value that was passed to |CRYPTO_gcm128_init_ctx|. It returns one on // success and zero otherwise. int CRYPTO_gcm128_encrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *in, uint8_t *out, size_t len); // CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. |key| must be // the same value that was passed to |CRYPTO_gcm128_init_ctx|. It returns one on // success and zero otherwise. int CRYPTO_gcm128_decrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *in, uint8_t *out, size_t len); // CRYPTO_gcm128_finish calculates the authenticator and compares it against // |len| bytes of |tag|. |key| must be the same value that was passed to // |CRYPTO_gcm128_init_ctx|. It returns one on success and zero otherwise. int CRYPTO_gcm128_finish(const GCM128_KEY *key, GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len); // CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. // The minimum of |len| and 16 bytes are copied into |tag|. |key| must be the // same value that was passed to |CRYPTO_gcm128_init_ctx|. void CRYPTO_gcm128_tag(const GCM128_KEY *key, GCM128_CONTEXT *ctx, uint8_t *tag, size_t len); // GCM assembly. void gcm_init_nohw(u128 Htable[16], const uint64_t H[2]); void gcm_gmult_nohw(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_nohw(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len); #if !defined(OPENSSL_NO_ASM) #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) #define GCM_FUNCREF void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]); void gcm_gmult_clmul(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_clmul(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len); void gcm_init_ssse3(u128 Htable[16], const uint64_t Xi[2]); void gcm_gmult_ssse3(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_ssse3(uint8_t Xi[16], const u128 Htable[16], const uint8_t *in, size_t len); #if defined(OPENSSL_X86_64) #define GHASH_ASM_X86_64 void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]); void gcm_gmult_avx(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_avx(uint8_t Xi[16], const u128 Htable[16], const uint8_t *in, size_t len); #define HW_GCM size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); void gcm_init_vpclmulqdq_avx2(u128 Htable[16], const uint64_t H[2]); void gcm_gmult_vpclmulqdq_avx2(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_vpclmulqdq_avx2(uint8_t Xi[16], const u128 Htable[16], const uint8_t *in, size_t len); void aes_gcm_enc_update_vaes_avx2(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); void aes_gcm_dec_update_vaes_avx2(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); void gcm_init_vpclmulqdq_avx10_512(u128 Htable[16], const uint64_t H[2]); void gcm_gmult_vpclmulqdq_avx10(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_vpclmulqdq_avx10_512(uint8_t Xi[16], const u128 Htable[16], const uint8_t *in, size_t len); void aes_gcm_enc_update_vaes_avx10_512(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); void aes_gcm_dec_update_vaes_avx10_512(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, const uint8_t ivec[16], const u128 Htable[16], uint8_t Xi[16]); #endif // OPENSSL_X86_64 #if defined(OPENSSL_X86) #define GHASH_ASM_X86 #endif // OPENSSL_X86 #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #define GHASH_ASM_ARM #define GCM_FUNCREF inline int gcm_pmull_capable(void) { return CRYPTO_is_ARMv8_PMULL_capable(); } void gcm_init_v8(u128 Htable[16], const uint64_t H[2]); void gcm_gmult_v8(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_v8(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len); inline int gcm_neon_capable(void) { return CRYPTO_is_NEON_capable(); } void gcm_init_neon(u128 Htable[16], const uint64_t H[2]); void gcm_gmult_neon(uint8_t Xi[16], const u128 Htable[16]); void gcm_ghash_neon(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp, size_t len); #if defined(OPENSSL_AARCH64) #define HW_GCM // These functions are defined in aesv8-gcm-armv8.pl. void aes_gcm_enc_kernel(const uint8_t *in, uint64_t in_bits, void *out, void *Xi, uint8_t *ivec, const AES_KEY *key, const u128 Htable[16]); void aes_gcm_dec_kernel(const uint8_t *in, uint64_t in_bits, void *out, void *Xi, uint8_t *ivec, const AES_KEY *key, const u128 Htable[16]); #endif #endif #endif // OPENSSL_NO_ASM // CBC. // cbc128_f is the type of a function that performs CBC-mode encryption. typedef void (*cbc128_f)(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], int enc); // CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the // given IV and block cipher in CBC mode. The input need not be a multiple of // 128 bits long, but the output will round up to the nearest 128 bit multiple, // zero padding the input if needed. The IV will be updated on return. void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block); // CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the // given IV and block cipher in CBC mode. If |len| is not a multiple of 128 // bits then only that many bytes will be written, but a multiple of 128 bits // is always read from |in|. The IV will be updated on return. void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block); // OFB. // CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode) // |len| bytes from |in| to |out| using |block| in OFB mode. There's no // requirement that |len| be a multiple of any value and any partial blocks are // stored in |ivec| and |*num|, the latter must be zero before the initial // call. void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, block128_f block); // CFB. // CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes // from |in| to |out| using |block| in CFB mode. There's no requirement that // |len| be a multiple of any value and any partial blocks are stored in |ivec| // and |*num|, the latter must be zero before the initial call. void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); // CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes // from |in| to |out| using |block| in CFB-8 mode. Prior to the first call // |num| should be set to zero. void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); // CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes // from |in| to |out| using |block| in CFB-1 mode. Prior to the first call // |num| should be set to zero. void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const AES_KEY *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block); // POLYVAL. // // POLYVAL is a polynomial authenticator that operates over a field very // similar to the one that GHASH uses. See // https://www.rfc-editor.org/rfc/rfc8452.html#section-3. struct polyval_ctx { uint8_t S[16]; u128 Htable[16]; gmult_func gmult; ghash_func ghash; }; // CRYPTO_POLYVAL_init initialises |ctx| using |key|. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]); // CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the // blocks from |in|. Only a whole number of blocks can be processed so |in_len| // must be a multiple of 16. void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in, size_t in_len); // CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]); } // extern C #endif // OPENSSL_HEADER_AES_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/key_wrap.cc.inc ================================================ /* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" #include "../service_indicator/internal.h" // kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. static const uint8_t kDefaultIV[] = { 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, }; static const unsigned kBound = 6; int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { // See RFC 3394, section 2.2.1. Additionally, note that section 2 requires the // plaintext be at least two 8-byte blocks. if (in_len > INT_MAX - 8 || in_len < 16 || in_len % 8 != 0) { return -1; } if (iv == NULL) { iv = kDefaultIV; } OPENSSL_memmove(out + 8, in, in_len); uint8_t A[AES_BLOCK_SIZE]; OPENSSL_memcpy(A, iv, 8); size_t n = in_len / 8; for (unsigned j = 0; j < kBound; j++) { for (size_t i = 1; i <= n; i++) { OPENSSL_memcpy(A + 8, out + 8 * i, 8); AES_encrypt(A, A, key); uint32_t t = (uint32_t)(n * j + i); A[7] ^= t & 0xff; A[6] ^= (t >> 8) & 0xff; A[5] ^= (t >> 16) & 0xff; A[4] ^= (t >> 24) & 0xff; OPENSSL_memcpy(out + 8 * i, A + 8, 8); } } OPENSSL_memcpy(out, A, 8); FIPS_service_indicator_update_state(); return (int)in_len + 8; } // aes_unwrap_key_inner performs steps one and two from // https://tools.ietf.org/html/rfc3394#section-2.2.2 static int aes_unwrap_key_inner(const AES_KEY *key, uint8_t *out, uint8_t out_iv[8], const uint8_t *in, size_t in_len) { // See RFC 3394, section 2.2.2. Additionally, note that section 2 requires the // plaintext be at least two 8-byte blocks, so the ciphertext must be at least // three blocks. if (in_len > INT_MAX || in_len < 24 || in_len % 8 != 0) { return 0; } uint8_t A[AES_BLOCK_SIZE]; OPENSSL_memcpy(A, in, 8); OPENSSL_memmove(out, in + 8, in_len - 8); size_t n = (in_len / 8) - 1; for (unsigned j = kBound - 1; j < kBound; j--) { for (size_t i = n; i > 0; i--) { uint32_t t = (uint32_t)(n * j + i); A[7] ^= t & 0xff; A[6] ^= (t >> 8) & 0xff; A[5] ^= (t >> 16) & 0xff; A[4] ^= (t >> 24) & 0xff; OPENSSL_memcpy(A + 8, out + 8 * (i - 1), 8); AES_decrypt(A, A, key); OPENSSL_memcpy(out + 8 * (i - 1), A + 8, 8); } } memcpy(out_iv, A, 8); return 1; } int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { uint8_t calculated_iv[8]; if (!aes_unwrap_key_inner(key, out, calculated_iv, in, in_len)) { return -1; } if (iv == NULL) { iv = kDefaultIV; } if (CRYPTO_memcmp(calculated_iv, iv, 8) != 0) { return -1; } FIPS_service_indicator_update_state(); return (int)in_len - 8; } // kPaddingConstant is used in Key Wrap with Padding. See // https://tools.ietf.org/html/rfc5649#section-3 static const uint8_t kPaddingConstant[4] = {0xa6, 0x59, 0x59, 0xa6}; int AES_wrap_key_padded(const AES_KEY *key, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len) { // See https://tools.ietf.org/html/rfc5649#section-4.1 const uint64_t in_len64 = in_len; const size_t padded_len = (in_len + 7) & ~7; *out_len = 0; if (in_len == 0 || in_len64 > 0xffffffffu || in_len + 7 < in_len || padded_len + 8 < padded_len || max_out < padded_len + 8) { return 0; } uint8_t block[AES_BLOCK_SIZE]; memcpy(block, kPaddingConstant, sizeof(kPaddingConstant)); CRYPTO_store_u32_be(block + 4, (uint32_t)in_len); if (in_len <= 8) { memset(block + 8, 0, 8); memcpy(block + 8, in, in_len); AES_encrypt(block, out, key); *out_len = AES_BLOCK_SIZE; return 1; } uint8_t *padded_in = reinterpret_cast(OPENSSL_malloc(padded_len)); if (padded_in == NULL) { return 0; } assert(padded_len >= 8); memset(padded_in + padded_len - 8, 0, 8); memcpy(padded_in, in, in_len); FIPS_service_indicator_lock_state(); const int ret = AES_wrap_key(key, block, out, padded_in, padded_len); FIPS_service_indicator_unlock_state(); OPENSSL_free(padded_in); if (ret < 0) { return 0; } *out_len = ret; FIPS_service_indicator_update_state(); return 1; } int AES_unwrap_key_padded(const AES_KEY *key, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len) { *out_len = 0; if (in_len < AES_BLOCK_SIZE || max_out < in_len - 8) { return 0; } uint8_t iv[8]; if (in_len == AES_BLOCK_SIZE) { uint8_t block[AES_BLOCK_SIZE]; AES_decrypt(in, block, key); memcpy(iv, block, sizeof(iv)); memcpy(out, block + 8, 8); } else if (!aes_unwrap_key_inner(key, out, iv, in, in_len)) { return 0; } assert(in_len % 8 == 0); crypto_word_t ok = constant_time_eq_int( CRYPTO_memcmp(iv, kPaddingConstant, sizeof(kPaddingConstant)), 0); const size_t claimed_len = CRYPTO_load_u32_be(iv + 4); ok &= ~constant_time_is_zero_w(claimed_len); ok &= constant_time_eq_w((claimed_len - 1) >> 3, (in_len - 9) >> 3); // Check that padding bytes are all zero. for (size_t i = in_len - 15; i < in_len - 8; i++) { ok &= constant_time_is_zero_w(constant_time_ge_8(i, claimed_len) & out[i]); } *out_len = constant_time_select_w(ok, claimed_len, 0); const int ret = ok & 1; if (ret) { FIPS_service_indicator_update_state(); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/mode_wrappers.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../aes/internal.h" #include "../service_indicator/internal.h" void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE], uint8_t ecount_buf[AES_BLOCK_SIZE], unsigned int *num) { if (hwaes_capable()) { CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, aes_hw_ctr32_encrypt_blocks); } else if (vpaes_capable()) { // TODO(davidben): On ARM, where |BSAES| is additionally defined, this could // use |vpaes_ctr32_encrypt_blocks_with_bsaes|. CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, vpaes_ctr32_encrypt_blocks); } else { CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, aes_nohw_ctr32_encrypt_blocks); } FIPS_service_indicator_update_state(); } void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key, const int enc) { assert(in && out && key); assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc)); if (AES_ENCRYPT == enc) { AES_encrypt(in, out, key); } else { AES_decrypt(in, out, key); } FIPS_service_indicator_update_state(); } void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, const int enc) { if (hwaes_capable()) { aes_hw_cbc_encrypt(in, out, len, key, ivec, enc); } else if (!vpaes_capable()) { aes_nohw_cbc_encrypt(in, out, len, key, ivec, enc); } else if (enc) { CRYPTO_cbc128_encrypt(in, out, len, key, ivec, AES_encrypt); } else { CRYPTO_cbc128_decrypt(in, out, len, key, ivec, AES_decrypt); } FIPS_service_indicator_update_state(); } void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int *num) { unsigned num_u = (unsigned)(*num); CRYPTO_ofb128_encrypt(in, out, length, key, ivec, &num_u, AES_encrypt); *num = (int)num_u; } void AES_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int *num, int enc) { unsigned num_u = (unsigned)(*num); CRYPTO_cfb128_encrypt(in, out, length, key, ivec, &num_u, enc, AES_encrypt); *num = (int)num_u; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/ofb.cc.inc ================================================ /* * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" static_assert(16 % sizeof(size_t) == 0, "block cannot be divided into size_t"); void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, block128_f block) { assert(key != NULL && ivec != NULL && num != NULL); assert(len == 0 || (in != NULL && out != NULL)); unsigned n = *num; while (n && len) { *(out++) = *(in++) ^ ivec[n]; --len; n = (n + 1) % 16; } while (len >= 16) { (*block)(ivec, ivec, key); CRYPTO_xor16(out, in, ivec); len -= 16; out += 16; in += 16; n = 0; } if (len) { (*block)(ivec, ivec, key); while (len--) { out[n] = in[n] ^ ivec[n]; ++n; } } *num = n; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/aes/polyval.cc.inc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "internal.h" #include "../../internal.h" // byte_reverse reverses the order of the bytes in |b->c|. static void byte_reverse(uint8_t b[16]) { uint64_t hi = CRYPTO_load_u64_le(b); uint64_t lo = CRYPTO_load_u64_le(b + 8); CRYPTO_store_u64_le(b, CRYPTO_bswap8(lo)); CRYPTO_store_u64_le(b + 8, CRYPTO_bswap8(hi)); } // reverse_and_mulX_ghash interprets |b| as a reversed element of the GHASH // field, multiplies that by 'x' and serialises the result back into |b|, but // with GHASH's backwards bit ordering. static void reverse_and_mulX_ghash(uint8_t b[16]) { uint64_t hi = CRYPTO_load_u64_le(b); uint64_t lo = CRYPTO_load_u64_le(b + 8); const crypto_word_t carry = constant_time_eq_w(hi & 1, 1); hi >>= 1; hi |= lo << 63; lo >>= 1; lo ^= ((uint64_t) constant_time_select_w(carry, 0xe1, 0)) << 56; CRYPTO_store_u64_le(b, CRYPTO_bswap8(lo)); CRYPTO_store_u64_le(b + 8, CRYPTO_bswap8(hi)); } // POLYVAL(H, X_1, ..., X_n) = // ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ..., // ByteReverse(X_n))). // // See https://www.rfc-editor.org/rfc/rfc8452.html#appendix-A. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]) { alignas(8) uint8_t H[16]; OPENSSL_memcpy(H, key, 16); reverse_and_mulX_ghash(H); CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, ctx->Htable, H); OPENSSL_memset(&ctx->S, 0, sizeof(ctx->S)); } void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in, size_t in_len) { assert((in_len & 15) == 0); alignas(8) uint8_t buf[32 * 16]; while (in_len > 0) { size_t todo = in_len; if (todo > sizeof(buf)) { todo = sizeof(buf); } OPENSSL_memcpy(buf, in, todo); in += todo; in_len -= todo; size_t blocks = todo / 16; for (size_t i = 0; i < blocks; i++) { byte_reverse(buf + 16 * i); } ctx->ghash(ctx->S, ctx->Htable, buf, todo); } } void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]) { OPENSSL_memcpy(out, &ctx->S, 16); byte_reverse(out); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bcm.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_GNU_SOURCE) #define _GNU_SOURCE // needed for syscall() on Linux. #endif #include #include #if defined(BORINGSSL_FIPS) #include #include #endif #include #include #include #include "../bcm_support.h" #include "../internal.h" #include "bcm_interface.h" // TODO(crbug.com/362530616): When delocate is removed, build these files as // separate compilation units again. #include "aes/aes.cc.inc" #include "aes/aes_nohw.cc.inc" #include "aes/cbc.cc.inc" #include "aes/cfb.cc.inc" #include "aes/ctr.cc.inc" #include "aes/gcm.cc.inc" #include "aes/gcm_nohw.cc.inc" #include "aes/key_wrap.cc.inc" #include "aes/mode_wrappers.cc.inc" #include "aes/ofb.cc.inc" #include "aes/polyval.cc.inc" #include "bn/add.cc.inc" #include "bn/asm/x86_64-gcc.cc.inc" #include "bn/bn.cc.inc" #include "bn/bytes.cc.inc" #include "bn/cmp.cc.inc" #include "bn/ctx.cc.inc" #include "bn/div.cc.inc" #include "bn/div_extra.cc.inc" #include "bn/exponentiation.cc.inc" #include "bn/gcd.cc.inc" #include "bn/gcd_extra.cc.inc" #include "bn/generic.cc.inc" #include "bn/jacobi.cc.inc" #include "bn/montgomery.cc.inc" #include "bn/montgomery_inv.cc.inc" #include "bn/mul.cc.inc" #include "bn/prime.cc.inc" #include "bn/random.cc.inc" #include "bn/rsaz_exp.cc.inc" #include "bn/shift.cc.inc" #include "bn/sqrt.cc.inc" #include "cipher/aead.cc.inc" #include "cipher/cipher.cc.inc" #include "cipher/e_aes.cc.inc" #include "cipher/e_aesccm.cc.inc" #include "cmac/cmac.cc.inc" #include "dh/check.cc.inc" #include "dh/dh.cc.inc" #include "digest/digest.cc.inc" #include "digest/digests.cc.inc" #include "digestsign/digestsign.cc.inc" #include "ec/ec.cc.inc" #include "ec/ec_key.cc.inc" #include "ec/ec_montgomery.cc.inc" #include "ec/felem.cc.inc" #include "ec/oct.cc.inc" #include "ec/p224-64.cc.inc" #include "ec/p256-nistz.cc.inc" #include "ec/p256.cc.inc" #include "ec/scalar.cc.inc" #include "ec/simple.cc.inc" #include "ec/simple_mul.cc.inc" #include "ec/util.cc.inc" #include "ec/wnaf.cc.inc" #include "ecdh/ecdh.cc.inc" #include "ecdsa/ecdsa.cc.inc" #include "hkdf/hkdf.cc.inc" #include "hmac/hmac.cc.inc" #include "keccak/keccak.cc.inc" #include "mldsa/mldsa.cc.inc" #include "mlkem/mlkem.cc.inc" #include "rand/ctrdrbg.cc.inc" #include "rand/rand.cc.inc" #include "rsa/blinding.cc.inc" #include "rsa/padding.cc.inc" #include "rsa/rsa.cc.inc" #include "rsa/rsa_impl.cc.inc" #include "self_check/fips.cc.inc" #include "self_check/self_check.cc.inc" #include "service_indicator/service_indicator.cc.inc" #include "sha/sha1.cc.inc" #include "sha/sha256.cc.inc" #include "sha/sha512.cc.inc" #include "slhdsa/fors.cc.inc" #include "slhdsa/merkle.cc.inc" #include "slhdsa/slhdsa.cc.inc" #include "slhdsa/thash.cc.inc" #include "slhdsa/wots.cc.inc" #include "tls/kdf.cc.inc" #if defined(BORINGSSL_FIPS) #if !defined(OPENSSL_ASAN) // These symbols are filled in by delocate.go (in static builds) or a linker // script (in shared builds). They point to the start and end of the module, and // the location of the integrity hash, respectively. extern const uint8_t BORINGSSL_bcm_text_start[]; extern const uint8_t BORINGSSL_bcm_text_end[]; extern const uint8_t BORINGSSL_bcm_text_hash[]; #if defined(BORINGSSL_SHARED_LIBRARY) extern const uint8_t BORINGSSL_bcm_rodata_start[]; extern const uint8_t BORINGSSL_bcm_rodata_end[]; #endif // assert_within is used to sanity check that certain symbols are within the // bounds of the integrity check. It checks that start <= symbol < end and // aborts otherwise. static void assert_within(const void *start, const void *symbol, const void *end) { const uintptr_t start_val = (uintptr_t)start; const uintptr_t symbol_val = (uintptr_t)symbol; const uintptr_t end_val = (uintptr_t)end; if (start_val <= symbol_val && symbol_val < end_val) { return; } fprintf(CRYPTO_get_stderr(), "FIPS module doesn't span expected symbol. Expected %p <= %p < %p\n", start, symbol, end); BORINGSSL_FIPS_abort(); } #if defined(OPENSSL_ANDROID) && defined(OPENSSL_AARCH64) static void BORINGSSL_maybe_set_module_text_permissions(int permission) { // Android may be compiled in execute-only-memory mode, in which case the // .text segment cannot be read. That conflicts with the need for a FIPS // module to hash its own contents, therefore |mprotect| is used to make // the module's .text readable for the duration of the hashing process. In // other build configurations this is a no-op. const uintptr_t page_size = getpagesize(); const uintptr_t page_start = ((uintptr_t)BORINGSSL_bcm_text_start) & ~(page_size - 1); if (mprotect((void *)page_start, ((uintptr_t)BORINGSSL_bcm_text_end) - page_start, permission) != 0) { perror("BoringSSL: mprotect"); } } #else static void BORINGSSL_maybe_set_module_text_permissions(int permission) {} #endif // !ANDROID #endif // !ASAN static void __attribute__((constructor)) BORINGSSL_bcm_power_on_self_test(void) { #if !defined(OPENSSL_ASAN) // Integrity tests cannot run under ASAN because it involves reading the full // .text section, which triggers the global-buffer overflow detection. if (!BORINGSSL_integrity_test()) { goto err; } #endif // OPENSSL_ASAN if (!boringssl_self_test_startup()) { goto err; } return; err: BORINGSSL_FIPS_abort(); } #if !defined(OPENSSL_ASAN) int BORINGSSL_integrity_test(void) { const uint8_t *const start = BORINGSSL_bcm_text_start; const uint8_t *const end = BORINGSSL_bcm_text_end; assert_within(start, reinterpret_cast(AES_encrypt), end); assert_within(start, reinterpret_cast(RSA_sign), end); assert_within(start, reinterpret_cast(BCM_rand_bytes), end); assert_within(start, reinterpret_cast(EC_GROUP_cmp), end); assert_within(start, reinterpret_cast(BCM_sha256_update), end); assert_within(start, reinterpret_cast(ecdsa_verify_fixed), end); assert_within(start, reinterpret_cast(EVP_AEAD_CTX_seal), end); #if defined(BORINGSSL_SHARED_LIBRARY) const uint8_t *const rodata_start = BORINGSSL_bcm_rodata_start; const uint8_t *const rodata_end = BORINGSSL_bcm_rodata_end; #else // In the static build, read-only data is placed within the .text segment. const uint8_t *const rodata_start = BORINGSSL_bcm_text_start; const uint8_t *const rodata_end = BORINGSSL_bcm_text_end; #endif assert_within(rodata_start, kPrimes, rodata_end); assert_within(rodata_start, kP256Field, rodata_end); assert_within(rodata_start, kPKCS1SigPrefixes, rodata_end); uint8_t result[SHA256_DIGEST_LENGTH]; const EVP_MD *const kHashFunction = EVP_sha256(); if (!boringssl_self_test_sha256() || !boringssl_self_test_hmac_sha256()) { return 0; } static const uint8_t kHMACKey[64] = {0}; unsigned result_len; HMAC_CTX hmac_ctx; HMAC_CTX_init(&hmac_ctx); if (!HMAC_Init_ex(&hmac_ctx, kHMACKey, sizeof(kHMACKey), kHashFunction, NULL /* no ENGINE */)) { fprintf(CRYPTO_get_stderr(), "HMAC_Init_ex failed.\n"); return 0; } BORINGSSL_maybe_set_module_text_permissions(PROT_READ | PROT_EXEC); #if defined(BORINGSSL_SHARED_LIBRARY) uint64_t length = end - start; HMAC_Update(&hmac_ctx, (const uint8_t *)&length, sizeof(length)); HMAC_Update(&hmac_ctx, start, length); length = rodata_end - rodata_start; HMAC_Update(&hmac_ctx, (const uint8_t *)&length, sizeof(length)); HMAC_Update(&hmac_ctx, rodata_start, length); #else HMAC_Update(&hmac_ctx, start, end - start); #endif BORINGSSL_maybe_set_module_text_permissions(PROT_EXEC); if (!HMAC_Final(&hmac_ctx, result, &result_len) || result_len != sizeof(result)) { fprintf(CRYPTO_get_stderr(), "HMAC failed.\n"); return 0; } HMAC_CTX_cleanse(&hmac_ctx); // FIPS 140-3, AS05.10. const uint8_t *expected = BORINGSSL_bcm_text_hash; if (!check_test(expected, result, sizeof(result), "FIPS integrity test")) { #if !defined(BORINGSSL_FIPS_BREAK_TESTS) return 0; #endif } OPENSSL_cleanse(result, sizeof(result)); // FIPS 140-3, AS05.10. return 1; } const uint8_t *FIPS_module_hash(void) { return BORINGSSL_bcm_text_hash; } #endif // OPENSSL_ASAN void BORINGSSL_FIPS_abort(void) { for (;;) { abort(); exit(1); } } #endif // BORINGSSL_FIPS ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bcm_interface.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_BCM_INTERFACE_H #define OPENSSL_HEADER_CRYPTO_BCM_INTERFACE_H #include // This header will eventually become the interface between BCM and the // rest of libcrypto. More cleanly separating the two is still a work in // progress (see https://crbug.com/boringssl/722) so, at the moment, we // consider this no different from any other header in BCM. // // Over time, calls from libcrypto to BCM will all move to this header // and the separation will become more meaningful. #if defined(__cplusplus) extern "C" { #endif // Enumerated types for return values from bcm functions, both infallible // and fallible functions. Two success values are used to correspond to the // FIPS service indicator. For the moment, the official service indicator // remains the counter, not these values. Once we fully transition to // these return values from bcm we will change that. enum class bcm_infallible_t { approved, not_approved, }; enum class bcm_status_t { approved, not_approved, failure, }; typedef enum bcm_status_t bcm_status; typedef enum bcm_infallible_t bcm_infallible; inline int bcm_success(bcm_status status) { return status == bcm_status::approved || status == bcm_status::not_approved; } inline bcm_status_t bcm_as_approved_status(int result) { return result ? bcm_status::approved : bcm_status::failure; } // Random number generator. #if defined(BORINGSSL_FIPS) // We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to whiten. // TODO(bbe): disentangle this value which is used to calculate the size of the // stack buffer in RAND_need entropy based on a calculation. #define BORINGSSL_FIPS_OVERREAD 10 #endif // BORINGSSL_FIPS // BCM_rand_load_entropy supplies |entropy_len| bytes of entropy to the BCM // module. The |want_additional_input| parameter is true iff the entropy was // obtained from a source other than the system, e.g. directly from the CPU. bcm_infallible BCM_rand_load_entropy(const uint8_t *entropy, size_t entropy_len, int want_additional_input); // BCM_rand_bytes is the same as the public |RAND_bytes| function, other // than returning a bcm_infallible status indicator. OPENSSL_EXPORT bcm_infallible BCM_rand_bytes(uint8_t *out, size_t out_len); // BCM_rand_bytes_hwrng attempts to fill |out| with |len| bytes of entropy from // the CPU hardware random number generator if one is present. // bcm_status_approved is returned on success, and a failure status is // returned otherwise. bcm_status BCM_rand_bytes_hwrng(uint8_t *out, size_t len); // BCM_rand_bytes_with_additional_data samples from the RNG after mixing 32 // bytes from |user_additional_data| in. bcm_infallible BCM_rand_bytes_with_additional_data( uint8_t *out, size_t out_len, const uint8_t user_additional_data[32]); // SHA-1 // BCM_SHA_DIGEST_LENGTH is the length of a SHA-1 digest. #define BCM_SHA_DIGEST_LENGTH 20 // BCM_sha1_init initialises |sha|. bcm_infallible BCM_sha1_init(SHA_CTX *sha); // BCM_SHA1_transform is a low-level function that performs a single, SHA-1 // block transformation using the state from |sha| and |SHA_CBLOCK| bytes from // |block|. bcm_infallible BCM_sha1_transform(SHA_CTX *c, const uint8_t data[BCM_SHA_CBLOCK]); // BCM_sha1_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha1_update(SHA_CTX *c, const void *data, size_t len); // BCM_sha1_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. bcm_infallible BCM_sha1_final(uint8_t out[BCM_SHA_DIGEST_LENGTH], SHA_CTX *c); // BCM_fips_186_2_prf derives |out_len| bytes from |xkey| using the PRF // defined in FIPS 186-2, Appendix 3.1, with change notice 1 applied. The b // parameter is 160 and seed, XKEY, is also 160 bits. The optional XSEED user // input is all zeros. // // The PRF generates a sequence of 320-bit numbers. Each number is encoded as a // 40-byte string in big-endian and then concatenated to form |out|. If // |out_len| is not a multiple of 40, the result is truncated. This matches the // construction used in Section 7 of RFC 4186 and Section 7 of RFC 4187. // // This PRF is based on SHA-1, a weak hash function, and should not be used // in new protocols. It is provided for compatibility with some legacy EAP // methods. bcm_infallible BCM_fips_186_2_prf(uint8_t *out, size_t out_len, const uint8_t xkey[BCM_SHA_DIGEST_LENGTH]); // SHA-224 // SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. #define BCM_SHA224_DIGEST_LENGTH 28 // BCM_sha224_unit initialises |sha|. bcm_infallible BCM_sha224_init(SHA256_CTX *sha); // BCM_sha224_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha224_update(SHA256_CTX *sha, const void *data, size_t len); // BCM_sha224_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |SHA224_DIGEST_LENGTH| bytes of // space. It aborts on programmer error. bcm_infallible BCM_sha224_final(uint8_t out[BCM_SHA224_DIGEST_LENGTH], SHA256_CTX *sha); // SHA-256 // BCM_SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. #define BCM_SHA256_DIGEST_LENGTH 32 // BCM_sha256_init initialises |sha|. bcm_infallible BCM_sha256_init(SHA256_CTX *sha); // BCM_sha256_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha256_update(SHA256_CTX *sha, const void *data, size_t len); // BCM_sha256_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |BCM_SHA256_DIGEST_LENGTH| bytes of // space. It aborts on programmer error. bcm_infallible BCM_sha256_final(uint8_t out[BCM_SHA256_DIGEST_LENGTH], SHA256_CTX *sha); // BCM_sha256_transform is a low-level function that performs a single, SHA-256 // block transformation using the state from |sha| and |BCM_SHA256_CBLOCK| bytes // from |block|. bcm_infallible BCM_sha256_transform(SHA256_CTX *sha, const uint8_t block[BCM_SHA256_CBLOCK]); // BCM_sha256_transform_blocks is a low-level function that takes |num_blocks| * // |BCM_SHA256_CBLOCK| bytes of data and performs SHA-256 transforms on it to // update |state|. bcm_infallible BCM_sha256_transform_blocks(uint32_t state[8], const uint8_t *data, size_t num_blocks); // SHA-384. // BCM_SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. #define BCM_SHA384_DIGEST_LENGTH 48 // BCM_sha384_init initialises |sha|. bcm_infallible BCM_sha384_init(SHA512_CTX *sha); // BCM_sha384_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha384_update(SHA512_CTX *sha, const void *data, size_t len); // BCM_sha384_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |BCM_sha384_DIGEST_LENGTH| bytes of // space. It may abort on programmer error. bcm_infallible BCM_sha384_final(uint8_t out[BCM_SHA384_DIGEST_LENGTH], SHA512_CTX *sha); // SHA-512. // BCM_SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. #define BCM_SHA512_DIGEST_LENGTH 64 // BCM_sha512_init initialises |sha|. bcm_infallible BCM_sha512_init(SHA512_CTX *sha); // BCM_sha512_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha512_update(SHA512_CTX *sha, const void *data, size_t len); // BCM_sha512_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |BCM_sha512_DIGEST_LENGTH| bytes of // space. bcm_infallible BCM_sha512_final(uint8_t out[BCM_SHA512_DIGEST_LENGTH], SHA512_CTX *sha); // BCM_sha512_transform is a low-level function that performs a single, SHA-512 // block transformation using the state from |sha| and |BCM_sha512_CBLOCK| bytes // from |block|. bcm_infallible BCM_sha512_transform(SHA512_CTX *sha, const uint8_t block[BCM_SHA512_CBLOCK]); // SHA-512-256 // // See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf section 5.3.6 #define BCM_SHA512_256_DIGEST_LENGTH 32 // BCM_sha512_256_init initialises |sha|. bcm_infallible BCM_sha512_256_init(SHA512_CTX *sha); // BCM_sha512_256_update adds |len| bytes from |data| to |sha|. bcm_infallible BCM_sha512_256_update(SHA512_CTX *sha, const void *data, size_t len); // BCM_sha512_256_final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |BCM_sha512_256_DIGEST_LENGTH| // bytes of space. It may abort on programmer error. bcm_infallible BCM_sha512_256_final(uint8_t out[BCM_SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha); // ML-DSA // // Where not commented, these functions have the same signature as the // corresponding public function. // BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES is the number of bytes of uniformly // random entropy necessary to generate a signature in randomized mode. #define BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES 32 // BCM_MLDSA_SEED_BYTES is the number of bytes in an ML-DSA seed value. #define BCM_MLDSA_SEED_BYTES 32 // BCM_MLDSA65_PRIVATE_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 // private key. #define BCM_MLDSA65_PRIVATE_KEY_BYTES 4032 // BCM_MLDSA65_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 // public key. #define BCM_MLDSA65_PUBLIC_KEY_BYTES 1952 // BCM_MLDSA65_SIGNATURE_BYTES is the number of bytes in an encoded ML-DSA-65 // signature. #define BCM_MLDSA65_SIGNATURE_BYTES 3309 struct BCM_mldsa65_private_key { union { uint8_t bytes[32 + 32 + 64 + 256 * 4 * (5 + 6 + 6)]; uint32_t alignment; } opaque; }; struct BCM_mldsa65_public_key { union { uint8_t bytes[32 + 64 + 256 * 4 * 6]; uint32_t alignment; } opaque; }; OPENSSL_EXPORT bcm_status BCM_mldsa65_generate_key( uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], uint8_t out_seed[BCM_MLDSA_SEED_BYTES], struct BCM_mldsa65_private_key *out_private_key); OPENSSL_EXPORT bcm_status BCM_mldsa65_private_key_from_seed( struct BCM_mldsa65_private_key *out_private_key, const uint8_t seed[BCM_MLDSA_SEED_BYTES]); OPENSSL_EXPORT bcm_status BCM_mldsa65_public_from_private( struct BCM_mldsa65_public_key *out_public_key, const struct BCM_mldsa65_private_key *private_key); OPENSSL_EXPORT bcm_status BCM_mldsa65_sign( uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_mldsa65_verify( const struct BCM_mldsa65_public_key *public_key, const uint8_t signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_mldsa65_marshal_public_key( CBB *out, const struct BCM_mldsa65_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mldsa65_parse_public_key( struct BCM_mldsa65_public_key *public_key, CBS *in); OPENSSL_EXPORT bcm_status BCM_mldsa65_parse_private_key( struct BCM_mldsa65_private_key *private_key, CBS *in); // BCM_mldsa65_generate_key_external_entropy generates a public/private key pair // using the given seed, writes the encoded public key to // |out_encoded_public_key| and sets |out_private_key| to the private key. OPENSSL_EXPORT bcm_status BCM_mldsa65_generate_key_external_entropy( uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], struct BCM_mldsa65_private_key *out_private_key, const uint8_t entropy[BCM_MLDSA_SEED_BYTES]); // BCM_mldsa5_sign_internal signs |msg| using |private_key| and writes the // signature to |out_encoded_signature|. The |context_prefix| and |context| are // prefixed to the message, in that order, before signing. The |randomizer| // value can be set to zero bytes in order to make a deterministic signature, or // else filled with entropy for the usual |MLDSA_sign| behavior. OPENSSL_EXPORT bcm_status BCM_mldsa65_sign_internal( uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len, const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]); // BCM_mldsa5_verify_internal verifies that |encoded_signature| is a valid // signature of |msg| by |public_key|. The |context_prefix| and |context| are // prefixed to the message before verification, in that order. OPENSSL_EXPORT bcm_status BCM_mldsa65_verify_internal( const struct BCM_mldsa65_public_key *public_key, const uint8_t encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len); // BCM_mldsa65_marshal_private_key serializes |private_key| to |out| in the // NIST format for ML-DSA-65 private keys. OPENSSL_EXPORT bcm_status BCM_mldsa65_marshal_private_key( CBB *out, const struct BCM_mldsa65_private_key *private_key); // BCM_MLDSA87_PRIVATE_KEY_BYTES is the number of bytes in an encoded ML-DSA-87 // private key. #define BCM_MLDSA87_PRIVATE_KEY_BYTES 4896 // BCM_MLDSA87_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-DSA-87 // public key. #define BCM_MLDSA87_PUBLIC_KEY_BYTES 2592 // BCM_MLDSA87_SIGNATURE_BYTES is the number of bytes in an encoded ML-DSA-87 // signature. #define BCM_MLDSA87_SIGNATURE_BYTES 4627 struct BCM_mldsa87_private_key { union { uint8_t bytes[32 + 32 + 64 + 256 * 4 * (7 + 8 + 8)]; uint32_t alignment; } opaque; }; struct BCM_mldsa87_public_key { union { uint8_t bytes[32 + 64 + 256 * 4 * 8]; uint32_t alignment; } opaque; }; OPENSSL_EXPORT bcm_status BCM_mldsa87_generate_key( uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], uint8_t out_seed[BCM_MLDSA_SEED_BYTES], struct BCM_mldsa87_private_key *out_private_key); OPENSSL_EXPORT bcm_status BCM_mldsa87_private_key_from_seed( struct BCM_mldsa87_private_key *out_private_key, const uint8_t seed[BCM_MLDSA_SEED_BYTES]); OPENSSL_EXPORT bcm_status BCM_mldsa87_public_from_private( struct BCM_mldsa87_public_key *out_public_key, const struct BCM_mldsa87_private_key *private_key); OPENSSL_EXPORT bcm_status BCM_mldsa87_sign( uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_mldsa87_verify(const struct BCM_mldsa87_public_key *public_key, const uint8_t *signature, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_mldsa87_marshal_public_key( CBB *out, const struct BCM_mldsa87_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mldsa87_parse_public_key( struct BCM_mldsa87_public_key *public_key, CBS *in); OPENSSL_EXPORT bcm_status BCM_mldsa87_parse_private_key( struct BCM_mldsa87_private_key *private_key, CBS *in); // BCM_mldsa87_generate_key_external_entropy generates a public/private key pair // using the given seed, writes the encoded public key to // |out_encoded_public_key| and sets |out_private_key| to the private key. OPENSSL_EXPORT bcm_status BCM_mldsa87_generate_key_external_entropy( uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], struct BCM_mldsa87_private_key *out_private_key, const uint8_t entropy[BCM_MLDSA_SEED_BYTES]); // BCM_mldsa87_sign_internal signs |msg| using |private_key| and writes the // signature to |out_encoded_signature|. The |context_prefix| and |context| are // prefixed to the message, in that order, before signing. The |randomizer| // value can be set to zero bytes in order to make a deterministic signature, or // else filled with entropy for the usual |MLDSA_sign| behavior. OPENSSL_EXPORT bcm_status BCM_mldsa87_sign_internal( uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len, const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]); // BCM_mldsa87_verify_internal verifies that |encoded_signature| is a valid // signature of |msg| by |public_key|. The |context_prefix| and |context| are // prefixed to the message before verification, in that order. OPENSSL_EXPORT bcm_status BCM_mldsa87_verify_internal( const struct BCM_mldsa87_public_key *public_key, const uint8_t encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len); // BCM_mldsa87_marshal_private_key serializes |private_key| to |out| in the // NIST format for ML-DSA-87 private keys. OPENSSL_EXPORT bcm_status BCM_mldsa87_marshal_private_key( CBB *out, const struct BCM_mldsa87_private_key *private_key); // ML-KEM // // Where not commented, these functions have the same signature as the // corresponding public function. // BCM_MLKEM_ENCAP_ENTROPY is the number of bytes of uniformly random entropy // necessary to encapsulate a secret. The entropy will be leaked to the // decapsulating party. #define BCM_MLKEM_ENCAP_ENTROPY 32 // BCM_MLKEM768_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-KEM-768 // public key. #define BCM_MLKEM768_PUBLIC_KEY_BYTES 1184 // BCM_MLKEM1024_PUBLIC_KEY_BYTES is the number of bytes in an encoded // ML-KEM-1024 public key. #define BCM_MLKEM1024_PUBLIC_KEY_BYTES 1568 // BCM_MLKEM768_CIPHERTEXT_BYTES is number of bytes in the ML-KEM-768 // ciphertext. #define BCM_MLKEM768_CIPHERTEXT_BYTES 1088 // BCM_MLKEM1024_CIPHERTEXT_BYTES is number of bytes in the ML-KEM-1024 // ciphertext. #define BCM_MLKEM1024_CIPHERTEXT_BYTES 1568 // BCM_MLKEM768_PRIVATE_KEY_BYTES is the length of the data produced by // |BCM_mlkem768_marshal_private_key|. #define BCM_MLKEM768_PRIVATE_KEY_BYTES 2400 // BCM_MLKEM1024_PRIVATE_KEY_BYTES is the length of the data produced by // |BCM_mlkem1024_marshal_private_key|. #define BCM_MLKEM1024_PRIVATE_KEY_BYTES 3168 // BCM_MLKEM_SEED_BYTES is the number of bytes in an ML-KEM seed. #define BCM_MLKEM_SEED_BYTES 64 // BCM_mlkem_SHARED_SECRET_BYTES is the number of bytes in an ML-KEM shared // secret. #define BCM_MLKEM_SHARED_SECRET_BYTES 32 struct BCM_mlkem768_public_key { union { uint8_t bytes[512 * (3 + 9) + 32 + 32]; uint16_t alignment; } opaque; }; struct BCM_mlkem768_private_key { union { uint8_t bytes[512 * (3 + 3 + 9) + 32 + 32 + 32]; uint16_t alignment; } opaque; }; OPENSSL_EXPORT bcm_infallible BCM_mlkem768_generate_key( uint8_t out_encoded_public_key[BCM_MLKEM768_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[BCM_MLKEM_SEED_BYTES], struct BCM_mlkem768_private_key *out_private_key); OPENSSL_EXPORT bcm_status BCM_mlkem768_private_key_from_seed( struct BCM_mlkem768_private_key *out_private_key, const uint8_t *seed, size_t seed_len); OPENSSL_EXPORT bcm_infallible BCM_mlkem768_public_from_private( struct BCM_mlkem768_public_key *out_public_key, const struct BCM_mlkem768_private_key *private_key); OPENSSL_EXPORT bcm_infallible BCM_mlkem768_encap(uint8_t out_ciphertext[BCM_MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem768_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mlkem768_decap(uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct BCM_mlkem768_private_key *private_key); OPENSSL_EXPORT bcm_status BCM_mlkem768_marshal_public_key( CBB *out, const struct BCM_mlkem768_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mlkem768_parse_public_key( struct BCM_mlkem768_public_key *out_public_key, CBS *in); // BCM_mlkem768_parse_private_key parses a private key, in NIST's format for // private keys, from |in| and writes the result to |out_private_key|. It // returns one on success or zero on parse error or if there are trailing bytes // in |in|. This format is verbose and should be avoided. Private keys should be // stored as seeds and parsed using |BCM_mlkem768_private_key_from_seed|. OPENSSL_EXPORT bcm_status BCM_mlkem768_parse_private_key( struct BCM_mlkem768_private_key *out_private_key, CBS *in); // BCM_mlkem768_generate_key_external_seed is a deterministic function to create // a pair of ML-KEM-768 keys, using the supplied seed. The seed needs to be // uniformly random. This function should only be used for tests; regular // callers should use the non-deterministic |BCM_mlkem768_generate_key| // directly. OPENSSL_EXPORT bcm_infallible BCM_mlkem768_generate_key_external_seed( uint8_t out_encoded_public_key[BCM_MLKEM768_PUBLIC_KEY_BYTES], struct BCM_mlkem768_private_key *out_private_key, const uint8_t seed[BCM_MLKEM_SEED_BYTES]); // BCM_mlkem768_encap_external_entropy behaves like |MLKEM768_encap|, but uses // |MLKEM_ENCAP_ENTROPY| bytes of |entropy| for randomization. The decapsulating // side will be able to recover |entropy| in full. This function should only be // used for tests, regular callers should use the non-deterministic // |BCM_mlkem768_encap| directly. OPENSSL_EXPORT bcm_infallible BCM_mlkem768_encap_external_entropy( uint8_t out_ciphertext[BCM_MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem768_public_key *public_key, const uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]); // BCM_mlkem768_marshal_private_key serializes |private_key| to |out| in the // NIST format for ML-KEM-768 private keys. (Note that one can also save just // the seed value produced by |BCM_mlkem768_generate_key|, which is // significantly smaller.) OPENSSL_EXPORT bcm_status BCM_mlkem768_marshal_private_key( CBB *out, const struct BCM_mlkem768_private_key *private_key); struct BCM_mlkem1024_public_key { union { uint8_t bytes[512 * (4 + 16) + 32 + 32]; uint16_t alignment; } opaque; }; struct BCM_mlkem1024_private_key { union { uint8_t bytes[512 * (4 + 4 + 16) + 32 + 32 + 32]; uint16_t alignment; } opaque; }; OPENSSL_EXPORT bcm_infallible BCM_mlkem1024_generate_key( uint8_t out_encoded_public_key[BCM_MLKEM1024_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[BCM_MLKEM_SEED_BYTES], struct BCM_mlkem1024_private_key *out_private_key); OPENSSL_EXPORT bcm_status BCM_mlkem1024_private_key_from_seed( struct BCM_mlkem1024_private_key *out_private_key, const uint8_t *seed, size_t seed_len); OPENSSL_EXPORT bcm_infallible BCM_mlkem1024_public_from_private( struct BCM_mlkem1024_public_key *out_public_key, const struct BCM_mlkem1024_private_key *private_key); OPENSSL_EXPORT bcm_infallible BCM_mlkem1024_encap(uint8_t out_ciphertext[BCM_MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem1024_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mlkem1024_decap(uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct BCM_mlkem1024_private_key *private_key); OPENSSL_EXPORT bcm_status BCM_mlkem1024_marshal_public_key( CBB *out, const struct BCM_mlkem1024_public_key *public_key); OPENSSL_EXPORT bcm_status BCM_mlkem1024_parse_public_key( struct BCM_mlkem1024_public_key *out_public_key, CBS *in); // BCM_mlkem1024_parse_private_key parses a private key, in NIST's format for // private keys, from |in| and writes the result to |out_private_key|. It // returns one on success or zero on parse error or if there are trailing bytes // in |in|. This format is verbose and should be avoided. Private keys should be // stored as seeds and parsed using |BCM_mlkem1024_private_key_from_seed|. OPENSSL_EXPORT bcm_status BCM_mlkem1024_parse_private_key( struct BCM_mlkem1024_private_key *out_private_key, CBS *in); // BCM_mlkem1024_generate_key_external_seed is a deterministic function to // create a pair of ML-KEM-1024 keys, using the supplied seed. The seed needs to // be uniformly random. This function should only be used for tests, regular // callers should use the non-deterministic |BCM_mlkem1024_generate_key| // directly. OPENSSL_EXPORT bcm_infallible BCM_mlkem1024_generate_key_external_seed( uint8_t out_encoded_public_key[BCM_MLKEM1024_PUBLIC_KEY_BYTES], struct BCM_mlkem1024_private_key *out_private_key, const uint8_t seed[BCM_MLKEM_SEED_BYTES]); // BCM_mlkem1024_encap_external_entropy behaves like |MLKEM1024_encap|, but uses // |MLKEM_ENCAP_ENTROPY| bytes of |entropy| for randomization. The // decapsulating side will be able to recover |entropy| in full. This function // should only be used for tests, regular callers should use the // non-deterministic |BCM_mlkem1024_encap| directly. OPENSSL_EXPORT bcm_infallible BCM_mlkem1024_encap_external_entropy( uint8_t out_ciphertext[BCM_MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem1024_public_key *public_key, const uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]); // BCM_mlkem1024_marshal_private_key serializes |private_key| to |out| in the // NIST format for ML-KEM-1024 private keys. (Note that one can also save just // the seed value produced by |BCM_mlkem1024_generate_key|, which is // significantly smaller.) OPENSSL_EXPORT bcm_status BCM_mlkem1024_marshal_private_key( CBB *out, const struct BCM_mlkem1024_private_key *private_key); // SLH-DSA // Output length of the hash function. #define BCM_SLHDSA_SHA2_128S_N 16 // The number of bytes at the beginning of M', the augmented message, before the // context. #define BCM_SLHDSA_M_PRIME_HEADER_LEN 2 // SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s public key. #define BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES 32 // BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s private key. #define BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES 64 // BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s signature. #define BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES 7856 // SLHDSA_SHA2_128S_generate_key_from_seed generates an SLH-DSA-SHA2-128s key // pair from a 48-byte seed and writes the result to |out_public_key| and // |out_secret_key|. OPENSSL_EXPORT bcm_infallible BCM_slhdsa_sha2_128s_generate_key_from_seed( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_secret_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t seed[3 * BCM_SLHDSA_SHA2_128S_N]); // BCM_slhdsa_sha2_128s_sign_internal acts like |SLHDSA_SHA2_128S_sign| but // accepts an explicit entropy input, which can be PK.seed (bytes 32..48 of // the private key) to generate deterministic signatures. It also takes the // input message in three parts so that the "internal" version of the signing // function, from section 9.2, can be implemented. The |header| argument may be // NULL to omit it. OPENSSL_EXPORT bcm_infallible BCM_slhdsa_sha2_128s_sign_internal( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t secret_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *context, size_t context_len, const uint8_t *msg, size_t msg_len, const uint8_t entropy[BCM_SLHDSA_SHA2_128S_N]); // BCM_slhdsa_sha2_128s_verify_internal acts like |SLHDSA_SHA2_128S_verify| but // takes the input message in three parts so that the "internal" version of the // verification function, from section 9.3, can be implemented. The |header| // argument may be NULL to omit it. OPENSSL_EXPORT bcm_status BCM_slhdsa_sha2_128s_verify_internal( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *context, size_t context_len, const uint8_t *msg, size_t msg_len); OPENSSL_EXPORT bcm_infallible BCM_slhdsa_sha2_128s_generate_key( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]); OPENSSL_EXPORT bcm_infallible BCM_slhdsa_sha2_128s_public_from_private( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]); OPENSSL_EXPORT bcm_status BCM_slhdsa_sha2_128s_sign( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_slhdsa_sha2_128s_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_slhdsa_sha2_128s_prehash_sign( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); OPENSSL_EXPORT bcm_status BCM_slhdsa_sha2_128s_prehash_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_BCM_INTERFACE_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/add.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../../internal.h" #include "internal.h" int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { const BIGNUM *tmp; int a_neg = a->neg, ret; // a + b a+b // a + -b a-b // -a + b b-a // -a + -b -(a+b) if (a_neg ^ b->neg) { // only one is negative if (a_neg) { tmp = a; a = b; b = tmp; } // we are now a - b if (BN_ucmp(a, b) < 0) { if (!BN_usub(r, b, a)) { return 0; } r->neg = 1; } else { if (!BN_usub(r, a, b)) { return 0; } r->neg = 0; } return 1; } ret = BN_uadd(r, a, b); r->neg = a_neg; return ret; } int bn_uadd_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { // Widths are public, so we normalize to make |a| the larger one. if (a->width < b->width) { const BIGNUM *tmp = a; a = b; b = tmp; } int max = a->width; int min = b->width; if (!bn_wexpand(r, max + 1)) { return 0; } r->width = max + 1; BN_ULONG carry = bn_add_words(r->d, a->d, b->d, min); for (int i = min; i < max; i++) { r->d[i] = CRYPTO_addc_w(a->d[i], 0, carry, &carry); } r->d[max] = carry; return 1; } int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { if (!bn_uadd_consttime(r, a, b)) { return 0; } bn_set_minimal_width(r); return 1; } int BN_add_word(BIGNUM *a, BN_ULONG w) { BN_ULONG l; int i; // degenerate case: w is zero if (!w) { return 1; } // degenerate case: a is zero if (BN_is_zero(a)) { return BN_set_word(a, w); } // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_sub_word(a, w); if (!BN_is_zero(a)) { a->neg = !(a->neg); } return i; } for (i = 0; w != 0 && i < a->width; i++) { a->d[i] = l = a->d[i] + w; w = (w > l) ? 1 : 0; } if (w && i == a->width) { if (!bn_wexpand(a, a->width + 1)) { return 0; } a->width++; a->d[i] = w; } return 1; } int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int add = 0, neg = 0; const BIGNUM *tmp; // a - b a-b // a - -b a+b // -a - b -(a+b) // -a - -b b-a if (a->neg) { if (b->neg) { tmp = a; a = b; b = tmp; } else { add = 1; neg = 1; } } else { if (b->neg) { add = 1; neg = 0; } } if (add) { if (!BN_uadd(r, a, b)) { return 0; } r->neg = neg; return 1; } if (BN_ucmp(a, b) < 0) { if (!BN_usub(r, b, a)) { return 0; } r->neg = 1; } else { if (!BN_usub(r, a, b)) { return 0; } r->neg = 0; } return 1; } int bn_usub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { // |b| may have more words than |a| given non-minimal inputs, but all words // beyond |a->width| must then be zero. int b_width = b->width; if (b_width > a->width) { if (!bn_fits_in_words(b, a->width)) { OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3); return 0; } b_width = a->width; } if (!bn_wexpand(r, a->width)) { return 0; } BN_ULONG borrow = bn_sub_words(r->d, a->d, b->d, b_width); for (int i = b_width; i < a->width; i++) { r->d[i] = CRYPTO_subc_w(a->d[i], 0, borrow, &borrow); } if (borrow) { OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3); return 0; } r->width = a->width; r->neg = 0; return 1; } int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { if (!bn_usub_consttime(r, a, b)) { return 0; } bn_set_minimal_width(r); return 1; } int BN_sub_word(BIGNUM *a, BN_ULONG w) { int i; // degenerate case: w is zero if (!w) { return 1; } // degenerate case: a is zero if (BN_is_zero(a)) { i = BN_set_word(a, w); if (i != 0) { BN_set_negative(a, 1); } return i; } // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_add_word(a, w); a->neg = 1; return i; } if ((bn_minimal_width(a) == 1) && (a->d[0] < w)) { a->d[0] = w - a->d[0]; a->neg = 1; return 1; } i = 0; for (;;) { if (a->d[i] >= w) { a->d[i] -= w; break; } else { a->d[i] -= w; i++; w = 1; } } if ((a->d[i] == 0) && (i == (a->width - 1))) { a->width--; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/asm/x86_64-gcc.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* x86_64 BIGNUM accelerator version 0.1, December 2002. * * Implemented by Andy Polyakov for the OpenSSL * project. * * Rights for redistribution and usage in source and binary forms are * granted according to the OpenSSL license. Warranty of any kind is * disclaimed. * * Q. Version 0.1? It doesn't sound like Andy, he used to assign real * versions, like 1.0... * A. Well, that's because this code is basically a quick-n-dirty * proof-of-concept hack. As you can see it's implemented with * inline assembler, which means that you're bound to GCC and that * there might be enough room for further improvement. * * Q. Why inline assembler? * A. x86_64 features own ABI which I'm not familiar with. This is * why I decided to let the compiler take care of subroutine * prologue/epilogue as well as register allocation. For reference. * Win64 implements different ABI for AMD64, different from Linux. * * Q. How much faster does it get? * A. 'apps/openssl speed rsa dsa' output with no-asm: * * sign verify sign/s verify/s * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6 * sign verify sign/s verify/s * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0 * * 'apps/openssl speed rsa dsa' output with this module: * * sign verify sign/s verify/s * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8 * sign verify sign/s verify/s * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6 * * For the reference. IA-32 assembler implementation performs * very much like 64-bit code compiled with no-asm on the same * machine. */ #include // TODO(davidben): Get this file working on MSVC x64. #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__GNUC__) || defined(__clang__)) #include "../internal.h" #undef mul #undef mul_add // "m"(a), "+m"(r) is the way to favor DirectPath µ-code; // "g"(0) let the compiler to decide where does it // want to keep the value of zero; #define mul_add(r, a, word, carry) \ do { \ BN_ULONG high, low; \ __asm__("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "m"(a) : "cc"); \ __asm__("addq %2,%0; adcq %3,%1" \ : "+r"(carry), "+d"(high) \ : "a"(low), "g"(0) \ : "cc"); \ __asm__("addq %2,%0; adcq %3,%1" \ : "+m"(r), "+d"(high) \ : "r"(carry), "g"(0) \ : "cc"); \ (carry) = high; \ } while (0) #define mul(r, a, word, carry) \ do { \ BN_ULONG high, low; \ __asm__("mulq %3" : "=a"(low), "=d"(high) : "a"(word), "g"(a) : "cc"); \ __asm__("addq %2,%0; adcq %3,%1" \ : "+r"(carry), "+d"(high) \ : "a"(low), "g"(0) \ : "cc"); \ (r) = (carry); \ (carry) = high; \ } while (0) #undef sqr #define sqr(r0, r1, a) __asm__("mulq %2" : "=a"(r0), "=d"(r1) : "a"(a) : "cc"); BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w) { BN_ULONG c1 = 0; if (num == 0) { return (c1); } while (num & ~3) { mul_add(rp[0], ap[0], w, c1); mul_add(rp[1], ap[1], w, c1); mul_add(rp[2], ap[2], w, c1); mul_add(rp[3], ap[3], w, c1); ap += 4; rp += 4; num -= 4; } if (num) { mul_add(rp[0], ap[0], w, c1); if (--num == 0) { return c1; } mul_add(rp[1], ap[1], w, c1); if (--num == 0) { return c1; } mul_add(rp[2], ap[2], w, c1); return c1; } return c1; } BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w) { BN_ULONG c1 = 0; if (num == 0) { return c1; } while (num & ~3) { mul(rp[0], ap[0], w, c1); mul(rp[1], ap[1], w, c1); mul(rp[2], ap[2], w, c1); mul(rp[3], ap[3], w, c1); ap += 4; rp += 4; num -= 4; } if (num) { mul(rp[0], ap[0], w, c1); if (--num == 0) { return c1; } mul(rp[1], ap[1], w, c1); if (--num == 0) { return c1; } mul(rp[2], ap[2], w, c1); } return c1; } void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, size_t n) { if (n == 0) { return; } while (n & ~3) { sqr(r[0], r[1], a[0]); sqr(r[2], r[3], a[1]); sqr(r[4], r[5], a[2]); sqr(r[6], r[7], a[3]); a += 4; r += 8; n -= 4; } if (n) { sqr(r[0], r[1], a[0]); if (--n == 0) { return; } sqr(r[2], r[3], a[1]); if (--n == 0) { return; } sqr(r[4], r[5], a[2]); } } BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, size_t n) { BN_ULONG ret; size_t i = 0; if (n == 0) { return 0; } __asm__ volatile( " subq %0,%0 \n" // clear carry " jmp 1f \n" ".p2align 4 \n" "1:" " movq (%4,%2,8),%0 \n" " adcq (%5,%2,8),%0 \n" " movq %0,(%3,%2,8) \n" " lea 1(%2),%2 \n" " dec %1 \n" " jnz 1b \n" " sbbq %0,%0 \n" : "=&r"(ret), "+c"(n), "+r"(i) : "r"(rp), "r"(ap), "r"(bp) : "cc", "memory"); return ret & 1; } BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, size_t n) { BN_ULONG ret; size_t i = 0; if (n == 0) { return 0; } __asm__ volatile( " subq %0,%0 \n" // clear borrow " jmp 1f \n" ".p2align 4 \n" "1:" " movq (%4,%2,8),%0 \n" " sbbq (%5,%2,8),%0 \n" " movq %0,(%3,%2,8) \n" " lea 1(%2),%2 \n" " dec %1 \n" " jnz 1b \n" " sbbq %0,%0 \n" : "=&r"(ret), "+c"(n), "+r"(i) : "r"(rp), "r"(ap), "r"(bp) : "cc", "memory"); return ret & 1; } // mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) // mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) // sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) // sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) // Keep in mind that carrying into high part of multiplication result can not // overflow, because it cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG t1, t2; \ __asm__("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \ : "+r"(c0), "+r"(c1), "+r"(c2) \ : "r"(t1), "r"(t2), "g"(0) \ : "cc"); \ } while (0) #define sqr_add_c(a, i, c0, c1, c2) \ do { \ BN_ULONG t1, t2; \ __asm__("mulq %2" : "=a"(t1), "=d"(t2) : "a"((a)[i]) : "cc"); \ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \ : "+r"(c0), "+r"(c1), "+r"(c2) \ : "r"(t1), "r"(t2), "g"(0) \ : "cc"); \ } while (0) #define mul_add_c2(a, b, c0, c1, c2) \ do { \ BN_ULONG t1, t2; \ __asm__("mulq %3" : "=a"(t1), "=d"(t2) : "a"(a), "m"(b) : "cc"); \ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \ : "+r"(c0), "+r"(c1), "+r"(c2) \ : "r"(t1), "r"(t2), "g"(0) \ : "cc"); \ __asm__("addq %3,%0; adcq %4,%1; adcq %5,%2" \ : "+r"(c0), "+r"(c1), "+r"(c2) \ : "r"(t1), "r"(t2), "g"(0) \ : "cc"); \ } while (0) #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2) void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; mul_add_c(a[0], b[0], c1, c2, c3); r[0] = c1; c1 = 0; mul_add_c(a[0], b[1], c2, c3, c1); mul_add_c(a[1], b[0], c2, c3, c1); r[1] = c2; c2 = 0; mul_add_c(a[2], b[0], c3, c1, c2); mul_add_c(a[1], b[1], c3, c1, c2); mul_add_c(a[0], b[2], c3, c1, c2); r[2] = c3; c3 = 0; mul_add_c(a[0], b[3], c1, c2, c3); mul_add_c(a[1], b[2], c1, c2, c3); mul_add_c(a[2], b[1], c1, c2, c3); mul_add_c(a[3], b[0], c1, c2, c3); r[3] = c1; c1 = 0; mul_add_c(a[4], b[0], c2, c3, c1); mul_add_c(a[3], b[1], c2, c3, c1); mul_add_c(a[2], b[2], c2, c3, c1); mul_add_c(a[1], b[3], c2, c3, c1); mul_add_c(a[0], b[4], c2, c3, c1); r[4] = c2; c2 = 0; mul_add_c(a[0], b[5], c3, c1, c2); mul_add_c(a[1], b[4], c3, c1, c2); mul_add_c(a[2], b[3], c3, c1, c2); mul_add_c(a[3], b[2], c3, c1, c2); mul_add_c(a[4], b[1], c3, c1, c2); mul_add_c(a[5], b[0], c3, c1, c2); r[5] = c3; c3 = 0; mul_add_c(a[6], b[0], c1, c2, c3); mul_add_c(a[5], b[1], c1, c2, c3); mul_add_c(a[4], b[2], c1, c2, c3); mul_add_c(a[3], b[3], c1, c2, c3); mul_add_c(a[2], b[4], c1, c2, c3); mul_add_c(a[1], b[5], c1, c2, c3); mul_add_c(a[0], b[6], c1, c2, c3); r[6] = c1; c1 = 0; mul_add_c(a[0], b[7], c2, c3, c1); mul_add_c(a[1], b[6], c2, c3, c1); mul_add_c(a[2], b[5], c2, c3, c1); mul_add_c(a[3], b[4], c2, c3, c1); mul_add_c(a[4], b[3], c2, c3, c1); mul_add_c(a[5], b[2], c2, c3, c1); mul_add_c(a[6], b[1], c2, c3, c1); mul_add_c(a[7], b[0], c2, c3, c1); r[7] = c2; c2 = 0; mul_add_c(a[7], b[1], c3, c1, c2); mul_add_c(a[6], b[2], c3, c1, c2); mul_add_c(a[5], b[3], c3, c1, c2); mul_add_c(a[4], b[4], c3, c1, c2); mul_add_c(a[3], b[5], c3, c1, c2); mul_add_c(a[2], b[6], c3, c1, c2); mul_add_c(a[1], b[7], c3, c1, c2); r[8] = c3; c3 = 0; mul_add_c(a[2], b[7], c1, c2, c3); mul_add_c(a[3], b[6], c1, c2, c3); mul_add_c(a[4], b[5], c1, c2, c3); mul_add_c(a[5], b[4], c1, c2, c3); mul_add_c(a[6], b[3], c1, c2, c3); mul_add_c(a[7], b[2], c1, c2, c3); r[9] = c1; c1 = 0; mul_add_c(a[7], b[3], c2, c3, c1); mul_add_c(a[6], b[4], c2, c3, c1); mul_add_c(a[5], b[5], c2, c3, c1); mul_add_c(a[4], b[6], c2, c3, c1); mul_add_c(a[3], b[7], c2, c3, c1); r[10] = c2; c2 = 0; mul_add_c(a[4], b[7], c3, c1, c2); mul_add_c(a[5], b[6], c3, c1, c2); mul_add_c(a[6], b[5], c3, c1, c2); mul_add_c(a[7], b[4], c3, c1, c2); r[11] = c3; c3 = 0; mul_add_c(a[7], b[5], c1, c2, c3); mul_add_c(a[6], b[6], c1, c2, c3); mul_add_c(a[5], b[7], c1, c2, c3); r[12] = c1; c1 = 0; mul_add_c(a[6], b[7], c2, c3, c1); mul_add_c(a[7], b[6], c2, c3, c1); r[13] = c2; c2 = 0; mul_add_c(a[7], b[7], c3, c1, c2); r[14] = c3; r[15] = c1; } void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; mul_add_c(a[0], b[0], c1, c2, c3); r[0] = c1; c1 = 0; mul_add_c(a[0], b[1], c2, c3, c1); mul_add_c(a[1], b[0], c2, c3, c1); r[1] = c2; c2 = 0; mul_add_c(a[2], b[0], c3, c1, c2); mul_add_c(a[1], b[1], c3, c1, c2); mul_add_c(a[0], b[2], c3, c1, c2); r[2] = c3; c3 = 0; mul_add_c(a[0], b[3], c1, c2, c3); mul_add_c(a[1], b[2], c1, c2, c3); mul_add_c(a[2], b[1], c1, c2, c3); mul_add_c(a[3], b[0], c1, c2, c3); r[3] = c1; c1 = 0; mul_add_c(a[3], b[1], c2, c3, c1); mul_add_c(a[2], b[2], c2, c3, c1); mul_add_c(a[1], b[3], c2, c3, c1); r[4] = c2; c2 = 0; mul_add_c(a[2], b[3], c3, c1, c2); mul_add_c(a[3], b[2], c3, c1, c2); r[5] = c3; c3 = 0; mul_add_c(a[3], b[3], c1, c2, c3); r[6] = c1; r[7] = c2; } void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; sqr_add_c(a, 0, c1, c2, c3); r[0] = c1; c1 = 0; sqr_add_c2(a, 1, 0, c2, c3, c1); r[1] = c2; c2 = 0; sqr_add_c(a, 1, c3, c1, c2); sqr_add_c2(a, 2, 0, c3, c1, c2); r[2] = c3; c3 = 0; sqr_add_c2(a, 3, 0, c1, c2, c3); sqr_add_c2(a, 2, 1, c1, c2, c3); r[3] = c1; c1 = 0; sqr_add_c(a, 2, c2, c3, c1); sqr_add_c2(a, 3, 1, c2, c3, c1); sqr_add_c2(a, 4, 0, c2, c3, c1); r[4] = c2; c2 = 0; sqr_add_c2(a, 5, 0, c3, c1, c2); sqr_add_c2(a, 4, 1, c3, c1, c2); sqr_add_c2(a, 3, 2, c3, c1, c2); r[5] = c3; c3 = 0; sqr_add_c(a, 3, c1, c2, c3); sqr_add_c2(a, 4, 2, c1, c2, c3); sqr_add_c2(a, 5, 1, c1, c2, c3); sqr_add_c2(a, 6, 0, c1, c2, c3); r[6] = c1; c1 = 0; sqr_add_c2(a, 7, 0, c2, c3, c1); sqr_add_c2(a, 6, 1, c2, c3, c1); sqr_add_c2(a, 5, 2, c2, c3, c1); sqr_add_c2(a, 4, 3, c2, c3, c1); r[7] = c2; c2 = 0; sqr_add_c(a, 4, c3, c1, c2); sqr_add_c2(a, 5, 3, c3, c1, c2); sqr_add_c2(a, 6, 2, c3, c1, c2); sqr_add_c2(a, 7, 1, c3, c1, c2); r[8] = c3; c3 = 0; sqr_add_c2(a, 7, 2, c1, c2, c3); sqr_add_c2(a, 6, 3, c1, c2, c3); sqr_add_c2(a, 5, 4, c1, c2, c3); r[9] = c1; c1 = 0; sqr_add_c(a, 5, c2, c3, c1); sqr_add_c2(a, 6, 4, c2, c3, c1); sqr_add_c2(a, 7, 3, c2, c3, c1); r[10] = c2; c2 = 0; sqr_add_c2(a, 7, 4, c3, c1, c2); sqr_add_c2(a, 6, 5, c3, c1, c2); r[11] = c3; c3 = 0; sqr_add_c(a, 6, c1, c2, c3); sqr_add_c2(a, 7, 5, c1, c2, c3); r[12] = c1; c1 = 0; sqr_add_c2(a, 7, 6, c2, c3, c1); r[13] = c2; c2 = 0; sqr_add_c(a, 7, c3, c1, c2); r[14] = c3; r[15] = c1; } void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; sqr_add_c(a, 0, c1, c2, c3); r[0] = c1; c1 = 0; sqr_add_c2(a, 1, 0, c2, c3, c1); r[1] = c2; c2 = 0; sqr_add_c(a, 1, c3, c1, c2); sqr_add_c2(a, 2, 0, c3, c1, c2); r[2] = c3; c3 = 0; sqr_add_c2(a, 3, 0, c1, c2, c3); sqr_add_c2(a, 2, 1, c1, c2, c3); r[3] = c1; c1 = 0; sqr_add_c(a, 2, c2, c3, c1); sqr_add_c2(a, 3, 1, c2, c3, c1); r[4] = c2; c2 = 0; sqr_add_c2(a, 3, 2, c3, c1, c2); r[5] = c3; c3 = 0; sqr_add_c(a, 3, c1, c2, c3); r[6] = c1; r[7] = c2; } #undef mul_add #undef mul #undef sqr #undef mul_add_c #undef sqr_add_c #undef mul_add_c2 #undef sqr_add_c2 #endif // !NO_ASM && X86_64 && (__GNUC__ || __clang__) ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/bn.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../delocate.h" #include "internal.h" // BN_MAX_WORDS is the maximum number of words allowed in a |BIGNUM|. It is // sized so byte and bit counts of a |BIGNUM| always fit in |int|, with room to // spare. #define BN_MAX_WORDS (INT_MAX / (4 * BN_BITS2)) BIGNUM *BN_new(void) { BIGNUM *bn = reinterpret_cast(OPENSSL_malloc(sizeof(BIGNUM))); if (bn == NULL) { return NULL; } OPENSSL_memset(bn, 0, sizeof(BIGNUM)); bn->flags = BN_FLG_MALLOCED; return bn; } BIGNUM *BN_secure_new(void) { return BN_new(); } void BN_init(BIGNUM *bn) { OPENSSL_memset(bn, 0, sizeof(BIGNUM)); } void BN_free(BIGNUM *bn) { if (bn == NULL) { return; } if ((bn->flags & BN_FLG_STATIC_DATA) == 0) { OPENSSL_free(bn->d); } if (bn->flags & BN_FLG_MALLOCED) { OPENSSL_free(bn); } else { bn->d = NULL; } } void BN_clear_free(BIGNUM *bn) { BN_free(bn); } BIGNUM *BN_dup(const BIGNUM *src) { BIGNUM *copy; if (src == NULL) { return NULL; } copy = BN_new(); if (copy == NULL) { return NULL; } if (!BN_copy(copy, src)) { BN_free(copy); return NULL; } return copy; } BIGNUM *BN_copy(BIGNUM *dest, const BIGNUM *src) { if (src == dest) { return dest; } if (!bn_wexpand(dest, src->width)) { return NULL; } OPENSSL_memcpy(dest->d, src->d, sizeof(src->d[0]) * src->width); dest->width = src->width; dest->neg = src->neg; return dest; } void BN_clear(BIGNUM *bn) { if (bn->d != NULL) { OPENSSL_memset(bn->d, 0, bn->dmax * sizeof(bn->d[0])); } bn->width = 0; bn->neg = 0; } DEFINE_METHOD_FUNCTION(BIGNUM, BN_value_one) { static const BN_ULONG kOneLimbs[1] = {1}; out->d = (BN_ULONG *)kOneLimbs; out->width = 1; out->dmax = 1; out->neg = 0; out->flags = BN_FLG_STATIC_DATA; } // BN_num_bits_word returns the minimum number of bits needed to represent the // value in |l|. unsigned BN_num_bits_word(BN_ULONG l) { // |BN_num_bits| is often called on RSA prime factors. These have public bit // lengths, but all bits beyond the high bit are secret, so count bits in // constant time. BN_ULONG x, mask; int bits = (l != 0); #if BN_BITS2 > 32 // Look at the upper half of |x|. |x| is at most 64 bits long. x = l >> 32; // Set |mask| to all ones if |x| (the top 32 bits of |l|) is non-zero and all // all zeros otherwise. mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); // If |x| is non-zero, the lower half is included in the bit count in full, // and we count the upper half. Otherwise, we count the lower half. bits += 32 & mask; l ^= (x ^ l) & mask; // |l| is |x| if |mask| and remains |l| otherwise. #endif // The remaining blocks are analogous iterations at lower powers of two. x = l >> 16; mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); bits += 16 & mask; l ^= (x ^ l) & mask; x = l >> 8; mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); bits += 8 & mask; l ^= (x ^ l) & mask; x = l >> 4; mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); bits += 4 & mask; l ^= (x ^ l) & mask; x = l >> 2; mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); bits += 2 & mask; l ^= (x ^ l) & mask; x = l >> 1; mask = 0u - x; mask = (0u - (mask >> (BN_BITS2 - 1))); bits += 1 & mask; return bits; } unsigned BN_num_bits(const BIGNUM *bn) { const int width = bn_minimal_width(bn); if (width == 0) { return 0; } return (width - 1) * BN_BITS2 + BN_num_bits_word(bn->d[width - 1]); } unsigned BN_num_bytes(const BIGNUM *bn) { return (BN_num_bits(bn) + 7) / 8; } void BN_zero(BIGNUM *bn) { bn->width = bn->neg = 0; } int BN_one(BIGNUM *bn) { return BN_set_word(bn, 1); } int BN_set_word(BIGNUM *bn, BN_ULONG value) { if (value == 0) { BN_zero(bn); return 1; } if (!bn_wexpand(bn, 1)) { return 0; } bn->neg = 0; bn->d[0] = value; bn->width = 1; return 1; } int BN_set_u64(BIGNUM *bn, uint64_t value) { #if BN_BITS2 == 64 return BN_set_word(bn, value); #elif BN_BITS2 == 32 if (value <= BN_MASK2) { return BN_set_word(bn, (BN_ULONG)value); } if (!bn_wexpand(bn, 2)) { return 0; } bn->neg = 0; bn->d[0] = (BN_ULONG)value; bn->d[1] = (BN_ULONG)(value >> 32); bn->width = 2; return 1; #else #error "BN_BITS2 must be 32 or 64." #endif } int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { if (!bn_wexpand(bn, num)) { return 0; } OPENSSL_memmove(bn->d, words, num * sizeof(BN_ULONG)); // |bn_wexpand| verified that |num| isn't too large. bn->width = (int)num; bn->neg = 0; return 1; } void bn_set_static_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { if ((bn->flags & BN_FLG_STATIC_DATA) == 0) { OPENSSL_free(bn->d); } bn->d = (BN_ULONG *)words; assert(num <= BN_MAX_WORDS); bn->width = (int)num; bn->dmax = (int)num; bn->neg = 0; bn->flags |= BN_FLG_STATIC_DATA; } int bn_fits_in_words(const BIGNUM *bn, size_t num) { // All words beyond |num| must be zero. BN_ULONG mask = 0; for (size_t i = num; i < (size_t)bn->width; i++) { mask |= bn->d[i]; } return mask == 0; } int bn_copy_words(BN_ULONG *out, size_t num, const BIGNUM *bn) { if (bn->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } size_t width = (size_t)bn->width; if (width > num) { if (!bn_fits_in_words(bn, num)) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } width = num; } OPENSSL_memset(out, 0, sizeof(BN_ULONG) * num); OPENSSL_memcpy(out, bn->d, sizeof(BN_ULONG) * width); return 1; } int BN_is_negative(const BIGNUM *bn) { return bn->neg != 0; } void BN_set_negative(BIGNUM *bn, int sign) { if (sign && !BN_is_zero(bn)) { bn->neg = 1; } else { bn->neg = 0; } } int bn_wexpand(BIGNUM *bn, size_t words) { BN_ULONG *a; if (words <= (size_t)bn->dmax) { return 1; } if (words > BN_MAX_WORDS) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } if (bn->flags & BN_FLG_STATIC_DATA) { OPENSSL_PUT_ERROR(BN, BN_R_EXPAND_ON_STATIC_BIGNUM_DATA); return 0; } a = reinterpret_cast(OPENSSL_calloc(words, sizeof(BN_ULONG))); if (a == NULL) { return 0; } OPENSSL_memcpy(a, bn->d, sizeof(BN_ULONG) * bn->width); OPENSSL_free(bn->d); bn->d = a; bn->dmax = (int)words; return 1; } int bn_expand(BIGNUM *bn, size_t bits) { if (bits + BN_BITS2 - 1 < bits) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } return bn_wexpand(bn, (bits + BN_BITS2 - 1) / BN_BITS2); } int bn_resize_words(BIGNUM *bn, size_t words) { if ((size_t)bn->width <= words) { if (!bn_wexpand(bn, words)) { return 0; } OPENSSL_memset(bn->d + bn->width, 0, (words - bn->width) * sizeof(BN_ULONG)); bn->width = (int)words; return 1; } // All words beyond the new width must be zero. if (!bn_fits_in_words(bn, words)) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } bn->width = (int)words; return 1; } void bn_select_words(BN_ULONG *r, BN_ULONG mask, const BN_ULONG *a, const BN_ULONG *b, size_t num) { for (size_t i = 0; i < num; i++) { static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); r[i] = constant_time_select_w(mask, a[i], b[i]); } } int bn_minimal_width(const BIGNUM *bn) { int ret = bn->width; while (ret > 0 && bn->d[ret - 1] == 0) { ret--; } return ret; } void bn_set_minimal_width(BIGNUM *bn) { bn->width = bn_minimal_width(bn); if (bn->width == 0) { bn->neg = 0; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/bytes.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "internal.h" void bn_big_endian_to_words(BN_ULONG *out, size_t out_len, const uint8_t *in, size_t in_len) { // The caller should have sized |out| to fit |in| without truncating. This // condition ensures we do not overflow |out|, so use a runtime check. BSSL_CHECK(in_len <= out_len * sizeof(BN_ULONG)); // Load whole words. while (in_len >= sizeof(BN_ULONG)) { in_len -= sizeof(BN_ULONG); out[0] = CRYPTO_load_word_be(in + in_len); out++; out_len--; } // Load the last partial word. if (in_len != 0) { BN_ULONG word = 0; for (size_t i = 0; i < in_len; i++) { word = (word << 8) | in[i]; } out[0] = word; out++; out_len--; } // Fill the remainder with zeros. OPENSSL_memset(out, 0, out_len * sizeof(BN_ULONG)); } BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { BIGNUM *bn = NULL; if (ret == NULL) { bn = BN_new(); if (bn == NULL) { return NULL; } ret = bn; } if (len == 0) { ret->width = 0; return ret; } size_t num_words = ((len - 1) / BN_BYTES) + 1; if (!bn_wexpand(ret, num_words)) { BN_free(bn); return NULL; } // |bn_wexpand| must check bounds on |num_words| to write it into // |ret->dmax|. assert(num_words <= INT_MAX); ret->width = (int)num_words; ret->neg = 0; bn_big_endian_to_words(ret->d, ret->width, in, len); return ret; } BIGNUM *BN_lebin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { BIGNUM *bn = NULL; if (ret == NULL) { bn = BN_new(); if (bn == NULL) { return NULL; } ret = bn; } if (len == 0) { ret->width = 0; ret->neg = 0; return ret; } // Reserve enough space in |ret|. size_t num_words = ((len - 1) / BN_BYTES) + 1; if (!bn_wexpand(ret, num_words)) { BN_free(bn); return NULL; } ret->width = (int)num_words; // Make sure the top bytes will be zeroed. ret->d[num_words - 1] = 0; // We only support little-endian platforms, so we can simply memcpy the // internal representation. OPENSSL_memcpy(ret->d, in, len); return ret; } BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) { return BN_lebin2bn(in, len, ret); } // fits_in_bytes returns one if the |num_words| words in |words| can be // represented in |num_bytes| bytes. static int fits_in_bytes(const BN_ULONG *words, size_t num_words, size_t num_bytes) { const uint8_t *bytes = (const uint8_t *)words; size_t tot_bytes = num_words * sizeof(BN_ULONG); uint8_t mask = 0; for (size_t i = num_bytes; i < tot_bytes; i++) { mask |= bytes[i]; } return mask == 0; } void bn_assert_fits_in_bytes(const BIGNUM *bn, size_t num) { const uint8_t *bytes = (const uint8_t *)bn->d; size_t tot_bytes = bn->width * sizeof(BN_ULONG); if (tot_bytes > num) { CONSTTIME_DECLASSIFY(bytes + num, tot_bytes - num); for (size_t i = num; i < tot_bytes; i++) { assert(bytes[i] == 0); } (void)bytes; } } void bn_words_to_big_endian(uint8_t *out, size_t out_len, const BN_ULONG *in, size_t in_len) { // The caller should have selected an output length without truncation. declassify_assert(fits_in_bytes(in, in_len, out_len)); // We only support little-endian platforms, so the internal representation is // also little-endian as bytes. We can simply copy it in reverse. const uint8_t *bytes = (const uint8_t *)in; size_t num_bytes = in_len * sizeof(BN_ULONG); if (out_len < num_bytes) { num_bytes = out_len; } for (size_t i = 0; i < num_bytes; i++) { out[out_len - i - 1] = bytes[i]; } // Pad out the rest of the buffer with zeroes. OPENSSL_memset(out, 0, out_len - num_bytes); } size_t BN_bn2bin(const BIGNUM *in, uint8_t *out) { size_t n = BN_num_bytes(in); bn_words_to_big_endian(out, n, in->d, in->width); return n; } int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) { if (!fits_in_bytes(in->d, in->width, len)) { return 0; } // We only support little-endian platforms, so we can simply memcpy into the // internal representation. const uint8_t *bytes = (const uint8_t *)in->d; size_t num_bytes = in->width * BN_BYTES; if (len < num_bytes) { num_bytes = len; } OPENSSL_memcpy(out, bytes, num_bytes); // Pad out the rest of the buffer with zeroes. OPENSSL_memset(out + num_bytes, 0, len - num_bytes); return 1; } int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) { if (!fits_in_bytes(in->d, in->width, len)) { return 0; } bn_words_to_big_endian(out, len, in->d, in->width); return 1; } BN_ULONG BN_get_word(const BIGNUM *bn) { switch (bn_minimal_width(bn)) { case 0: return 0; case 1: return bn->d[0]; default: return BN_MASK2; } } int BN_get_u64(const BIGNUM *bn, uint64_t *out) { switch (bn_minimal_width(bn)) { case 0: *out = 0; return 1; case 1: *out = bn->d[0]; return 1; #if defined(OPENSSL_32_BIT) case 2: *out = (uint64_t) bn->d[0] | (((uint64_t) bn->d[1]) << 32); return 1; #endif default: return 0; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/cmp.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "internal.h" #include "../../internal.h" static int bn_cmp_words_consttime(const BN_ULONG *a, size_t a_len, const BN_ULONG *b, size_t b_len) { static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); int ret = 0; // Process the common words in little-endian order. size_t min = a_len < b_len ? a_len : b_len; for (size_t i = 0; i < min; i++) { crypto_word_t eq = constant_time_eq_w(a[i], b[i]); crypto_word_t lt = constant_time_lt_w(a[i], b[i]); ret = constant_time_select_int(eq, ret, constant_time_select_int(lt, -1, 1)); } // If |a| or |b| has non-zero words beyond |min|, they take precedence. if (a_len < b_len) { crypto_word_t mask = 0; for (size_t i = a_len; i < b_len; i++) { mask |= b[i]; } ret = constant_time_select_int(constant_time_is_zero_w(mask), ret, -1); } else if (b_len < a_len) { crypto_word_t mask = 0; for (size_t i = b_len; i < a_len; i++) { mask |= a[i]; } ret = constant_time_select_int(constant_time_is_zero_w(mask), ret, 1); } return ret; } int BN_ucmp(const BIGNUM *a, const BIGNUM *b) { return bn_cmp_words_consttime(a->d, a->width, b->d, b->width); } int BN_cmp(const BIGNUM *a, const BIGNUM *b) { if ((a == NULL) || (b == NULL)) { if (a != NULL) { return -1; } else if (b != NULL) { return 1; } else { return 0; } } // We do not attempt to process the sign bit in constant time. Negative // |BIGNUM|s should never occur in crypto, only calculators. if (a->neg != b->neg) { if (a->neg) { return -1; } return 1; } int ret = BN_ucmp(a, b); return a->neg ? -ret : ret; } int bn_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len) { return bn_cmp_words_consttime(a, len, b, len) < 0; } int BN_abs_is_word(const BIGNUM *bn, BN_ULONG w) { if (bn->width == 0) { return w == 0; } BN_ULONG mask = bn->d[0] ^ w; for (int i = 1; i < bn->width; i++) { mask |= bn->d[i]; } return mask == 0; } int BN_cmp_word(const BIGNUM *a, BN_ULONG b) { BIGNUM b_bn; BN_init(&b_bn); b_bn.d = &b; b_bn.width = b > 0; b_bn.dmax = 1; b_bn.flags = BN_FLG_STATIC_DATA; return BN_cmp(a, &b_bn); } int BN_is_zero(const BIGNUM *bn) { return bn_fits_in_words(bn, 0); } int BN_is_one(const BIGNUM *bn) { return bn->neg == 0 && BN_abs_is_word(bn, 1); } int BN_is_word(const BIGNUM *bn, BN_ULONG w) { return BN_abs_is_word(bn, w) && (w == 0 || bn->neg == 0); } int BN_is_odd(const BIGNUM *bn) { return bn->width > 0 && (bn->d[0] & 1) == 1; } int BN_is_pow2(const BIGNUM *bn) { int width = bn_minimal_width(bn); if (width == 0 || bn->neg) { return 0; } for (int i = 0; i < width - 1; i++) { if (bn->d[i] != 0) { return 0; } } return 0 == (bn->d[width-1] & (bn->d[width-1] - 1)); } int BN_equal_consttime(const BIGNUM *a, const BIGNUM *b) { BN_ULONG mask = 0; // If |a| or |b| has more words than the other, all those words must be zero. for (int i = a->width; i < b->width; i++) { mask |= b->d[i]; } for (int i = b->width; i < a->width; i++) { mask |= a->d[i]; } // Common words must match. int min = a->width < b->width ? a->width : b->width; for (int i = 0; i < min; i++) { mask |= (a->d[i] ^ b->d[i]); } // The sign bit must match. mask |= (a->neg ^ b->neg); return mask == 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/ctx.cc.inc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" // The stack frame info is resizing, set a first-time expansion size; #define BN_CTX_START_FRAMES 32 // BN_STACK // A |BN_STACK| is a stack of |size_t| values. typedef struct { // Array of indexes into |ctx->bignums|. size_t *indexes; // Number of stack frames, and the size of the allocated array size_t depth, size; } BN_STACK; static void BN_STACK_init(BN_STACK *); static void BN_STACK_cleanup(BN_STACK *); static int BN_STACK_push(BN_STACK *, size_t idx); static size_t BN_STACK_pop(BN_STACK *); // BN_CTX DEFINE_STACK_OF(BIGNUM) // The opaque BN_CTX type struct bignum_ctx { // bignums is the stack of |BIGNUM|s managed by this |BN_CTX|. STACK_OF(BIGNUM) *bignums; // stack is the stack of |BN_CTX_start| frames. It is the value of |used| at // the time |BN_CTX_start| was called. BN_STACK stack; // used is the number of |BIGNUM|s from |bignums| that have been used. size_t used; // error is one if any operation on this |BN_CTX| failed. All subsequent // operations will fail. char error; // defer_error is one if an operation on this |BN_CTX| has failed, but no // error has been pushed to the queue yet. This is used to defer errors from // |BN_CTX_start| to |BN_CTX_get|. char defer_error; }; BN_CTX *BN_CTX_new(void) { BN_CTX *ret = reinterpret_cast(OPENSSL_malloc(sizeof(BN_CTX))); if (!ret) { return NULL; } // Initialise the structure ret->bignums = NULL; BN_STACK_init(&ret->stack); ret->used = 0; ret->error = 0; ret->defer_error = 0; return ret; } void BN_CTX_free(BN_CTX *ctx) { // All |BN_CTX_start| calls must be matched with |BN_CTX_end|, otherwise the // function may use more memory than expected, potentially without bound if // done in a loop. Assert that all |BIGNUM|s have been released. if (ctx == nullptr) { return; } assert(ctx->used == 0 || ctx->error); sk_BIGNUM_pop_free(ctx->bignums, BN_free); BN_STACK_cleanup(&ctx->stack); OPENSSL_free(ctx); } void BN_CTX_start(BN_CTX *ctx) { if (ctx->error) { // Once an operation has failed, |ctx->stack| no longer matches the number // of |BN_CTX_end| calls to come. Do nothing. return; } if (!BN_STACK_push(&ctx->stack, ctx->used)) { ctx->error = 1; // |BN_CTX_start| cannot fail, so defer the error to |BN_CTX_get|. ctx->defer_error = 1; } } BIGNUM *BN_CTX_get(BN_CTX *ctx) { // Once any operation has failed, they all do. if (ctx->error) { if (ctx->defer_error) { OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); ctx->defer_error = 0; } return NULL; } if (ctx->bignums == NULL) { ctx->bignums = sk_BIGNUM_new_null(); if (ctx->bignums == NULL) { ctx->error = 1; return NULL; } } if (ctx->used == sk_BIGNUM_num(ctx->bignums)) { BIGNUM *bn = BN_new(); if (bn == NULL || !sk_BIGNUM_push(ctx->bignums, bn)) { OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); BN_free(bn); ctx->error = 1; return NULL; } } BIGNUM *ret = sk_BIGNUM_value(ctx->bignums, ctx->used); BN_zero(ret); // This is bounded by |sk_BIGNUM_num|, so it cannot overflow. ctx->used++; return ret; } void BN_CTX_end(BN_CTX *ctx) { if (ctx->error) { // Once an operation has failed, |ctx->stack| no longer matches the number // of |BN_CTX_end| calls to come. Do nothing. return; } ctx->used = BN_STACK_pop(&ctx->stack); } // BN_STACK static void BN_STACK_init(BN_STACK *st) { st->indexes = NULL; st->depth = st->size = 0; } static void BN_STACK_cleanup(BN_STACK *st) { OPENSSL_free(st->indexes); } static int BN_STACK_push(BN_STACK *st, size_t idx) { if (st->depth == st->size) { // This function intentionally does not push to the error queue on error. // Error-reporting is deferred to |BN_CTX_get|. size_t new_size = st->size != 0 ? st->size * 3 / 2 : BN_CTX_START_FRAMES; if (new_size <= st->size || new_size > SIZE_MAX / sizeof(size_t)) { return 0; } size_t *new_indexes = reinterpret_cast( OPENSSL_realloc(st->indexes, new_size * sizeof(size_t))); if (new_indexes == NULL) { return 0; } st->indexes = new_indexes; st->size = new_size; } st->indexes[st->depth] = idx; st->depth++; return 1; } static size_t BN_STACK_pop(BN_STACK *st) { assert(st->depth > 0); st->depth--; return st->indexes[st->depth]; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/div.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" // bn_div_words divides a double-width |h|,|l| by |d| and returns the result, // which must fit in a |BN_ULONG|. static inline BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { BN_ULONG dh, dl, q, ret = 0, th, tl, t; int i, count = 2; if (d == 0) { return BN_MASK2; } i = BN_num_bits_word(d); assert((i == BN_BITS2) || (h <= (BN_ULONG)1 << i)); i = BN_BITS2 - i; if (h >= d) { h -= d; } if (i) { d <<= i; h = (h << i) | (l >> (BN_BITS2 - i)); l <<= i; } dh = (d & BN_MASK2h) >> BN_BITS4; dl = (d & BN_MASK2l); for (;;) { if ((h >> BN_BITS4) == dh) { q = BN_MASK2l; } else { q = h / dh; } th = q * dh; tl = dl * q; for (;;) { t = h - th; if ((t & BN_MASK2h) || ((tl) <= ((t << BN_BITS4) | ((l & BN_MASK2h) >> BN_BITS4)))) { break; } q--; th -= dh; tl -= dl; } t = (tl >> BN_BITS4); tl = (tl << BN_BITS4) & BN_MASK2h; th += t; if (l < tl) { th++; } l -= tl; if (h < th) { h += d; q--; } h -= th; if (--count == 0) { break; } ret = q << BN_BITS4; h = (h << BN_BITS4) | (l >> BN_BITS4); l = (l & BN_MASK2l) << BN_BITS4; } ret |= q; return ret; } static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) { // GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when // the |BN_ULLONG|-based C code is used. // // GCC bugs: // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224 // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183 // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897 // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668 // // Clang bugs: // * https://github.com/llvm/llvm-project/issues/6769 // * https://github.com/llvm/llvm-project/issues/12790 // // These is specific to x86 and x86_64; Arm and RISC-V do not have double-wide // division instructions. #if defined(BN_CAN_USE_INLINE_ASM) && defined(OPENSSL_X86) __asm__ volatile("divl %4" : "=a"(*quotient_out), "=d"(*rem_out) : "a"(n1), "d"(n0), "rm"(d0) : "cc"); #elif defined(BN_CAN_USE_INLINE_ASM) && defined(OPENSSL_X86_64) __asm__ volatile("divq %4" : "=a"(*quotient_out), "=d"(*rem_out) : "a"(n1), "d"(n0), "rm"(d0) : "cc"); #else #if defined(BN_CAN_DIVIDE_ULLONG) BN_ULLONG n = (((BN_ULLONG)n0) << BN_BITS2) | n1; *quotient_out = (BN_ULONG)(n / d0); #else *quotient_out = bn_div_words(n0, n1, d0); #endif *rem_out = n1 - (*quotient_out * d0); #endif } int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, const BIGNUM *divisor, BN_CTX *ctx) { // This function implements long division, per Knuth, The Art of Computer // Programming, Volume 2, Chapter 4.3.1, Algorithm D. This algorithm only // divides non-negative integers, but we round towards zero, so we divide // absolute values and adjust the signs separately. // // Inputs to this function are assumed public and may be leaked by timing and // cache side channels. Division with secret inputs should use other // implementation strategies such as Montgomery reduction. if (BN_is_zero(divisor)) { OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); return 0; } BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); BIGNUM *snum = BN_CTX_get(ctx); BIGNUM *sdiv = BN_CTX_get(ctx); BIGNUM *res = quotient == NULL ? BN_CTX_get(ctx) : quotient; int norm_shift, num_n, loop, div_n; BN_ULONG d0, d1; if (tmp == NULL || snum == NULL || sdiv == NULL || res == NULL) { goto err; } // Knuth step D1: Normalise the numbers such that the divisor's MSB is set. // This ensures, in Knuth's terminology, that v1 >= b/2, needed for the // quotient estimation step. norm_shift = BN_BITS2 - (BN_num_bits(divisor) % BN_BITS2); if (!BN_lshift(sdiv, divisor, norm_shift) || !BN_lshift(snum, numerator, norm_shift)) { goto err; } // This algorithm relies on |sdiv| being minimal width. We do not use this // function on secret inputs, so leaking this is fine. Also minimize |snum| to // avoid looping on leading zeros, as we're not trying to be leak-free. bn_set_minimal_width(sdiv); bn_set_minimal_width(snum); div_n = sdiv->width; d0 = sdiv->d[div_n - 1]; d1 = (div_n == 1) ? 0 : sdiv->d[div_n - 2]; assert(d0 & (((BN_ULONG)1) << (BN_BITS2 - 1))); // Extend |snum| with zeros to satisfy the long division invariants: // - |snum| must have at least |div_n| + 1 words. // - |snum|'s most significant word must be zero to guarantee the first loop // iteration works with a prefix greater than |sdiv|. (This is the extra u0 // digit in Knuth step D1.) num_n = snum->width <= div_n ? div_n + 1 : snum->width + 1; if (!bn_resize_words(snum, num_n)) { goto err; } // Knuth step D2: The quotient's width is the difference between numerator and // denominator. Also set up its sign and size a temporary for the loop. loop = num_n - div_n; res->neg = snum->neg ^ sdiv->neg; if (!bn_wexpand(res, loop) || // !bn_wexpand(tmp, div_n + 1)) { goto err; } res->width = loop; // Knuth steps D2 through D7: Compute the quotient with a word-by-word long // division. Note that Knuth indexes words from most to least significant, so // our index is reversed. Each loop iteration computes res->d[i] of the // quotient and updates snum with the running remainder. Before each loop // iteration, the div_n words beginning at snum->d[i+1] must be less than // snum. for (int i = loop - 1; i >= 0; i--) { // The next word of the quotient, q, is floor(wnum / sdiv), where wnum is // the div_n + 1 words beginning at snum->d[i]. i starts at // num_n - div_n - 1, so there are at least div_n + 1 words available. // // Knuth step D3: Compute q', an estimate of q by looking at the top words // of wnum and sdiv. We must estimate such that q' = q or q' = q + 1. BN_ULONG q, rm = 0; BN_ULONG *wnum = snum->d + i; BN_ULONG n0 = wnum[div_n]; BN_ULONG n1 = wnum[div_n - 1]; if (n0 == d0) { // Estimate q' = b - 1, where b is the base. q = BN_MASK2; // Knuth also runs the fixup routine in this case, but this would require // computing rm and is unnecessary. q' is already close enough. That is, // the true quotient, q is either b - 1 or b - 2. // // By the loop invariant, q <= b - 1, so we must show that q >= b - 2. We // do this by showing wnum / sdiv >= b - 2. Suppose wnum / sdiv < b - 2. // wnum and sdiv have the same most significant word, so: // // wnum >= n0 * b^div_n // sdiv < (n0 + 1) * b^(d_div - 1) // // Thus: // // b - 2 > wnum / sdiv // > (n0 * b^div_n) / (n0 + 1) * b^(div_n - 1) // = (n0 * b) / (n0 + 1) // // (n0 + 1) * (b - 2) > n0 * b // n0 * b + b - 2 * n0 - 2 > n0 * b // b - 2 > 2 * n0 // b/2 - 1 > n0 // // This contradicts the normalization condition, so q >= b - 2 and our // estimate is close enough. } else { // Estimate q' = floor(n0n1 / d0). Per Theorem B, q' - 2 <= q <= q', which // is slightly outside of our bounds. assert(n0 < d0); bn_div_rem_words(&q, &rm, n0, n1, d0); // Fix the estimate by examining one more word and adjusting q' as needed. // This is the second half of step D3 and is sufficient per exercises 19, // 20, and 21. Although only one iteration is needed to correct q + 2 to // q + 1, Knuth uses a loop. A loop will often also correct q + 1 to q, // saving the slightly more expensive underflow handling below. if (div_n > 1) { BN_ULONG n2 = wnum[div_n - 2]; #ifdef BN_ULLONG BN_ULLONG t2 = (BN_ULLONG)d1 * q; for (;;) { if (t2 <= ((((BN_ULLONG)rm) << BN_BITS2) | n2)) { break; } q--; rm += d0; if (rm < d0) { // If rm overflows, the true value exceeds BN_ULONG and the next // t2 comparison should exit the loop. break; } t2 -= d1; } #else // !BN_ULLONG BN_ULONG t2l, t2h; BN_UMULT_LOHI(t2l, t2h, d1, q); for (;;) { if (t2h < rm || (t2h == rm && t2l <= n2)) { break; } q--; rm += d0; if (rm < d0) { // If rm overflows, the true value exceeds BN_ULONG and the next // t2 comparison should exit the loop. break; } if (t2l < d1) { t2h--; } t2l -= d1; } #endif // !BN_ULLONG } } // Knuth step D4 through D6: Now q' = q or q' = q + 1, and // -sdiv < wnum - sdiv * q < sdiv. If q' = q + 1, the subtraction will // underflow, and we fix it up below. tmp->d[div_n] = bn_mul_words(tmp->d, sdiv->d, div_n, q); if (bn_sub_words(wnum, wnum, tmp->d, div_n + 1)) { q--; // The final addition is expected to overflow, canceling the underflow. wnum[div_n] += bn_add_words(wnum, wnum, sdiv->d, div_n); } // q is now correct, and wnum has been updated to the running remainder. res->d[i] = q; } // Trim leading zeros and correct any negative zeros. bn_set_minimal_width(snum); bn_set_minimal_width(res); // Knuth step D8: Unnormalize. snum now contains the remainder. if (rem != NULL && !BN_rshift(rem, snum, norm_shift)) { goto err; } BN_CTX_end(ctx); return 1; err: BN_CTX_end(ctx); return 0; } int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) { if (!(BN_mod(r, m, d, ctx))) { return 0; } if (!r->neg) { return 1; } // now -d < r < 0, so we have to set r := r + d. Ignoring the sign bits, this // is r = d - r. return BN_usub(r, d, r); } BN_ULONG bn_reduce_once(BN_ULONG *r, const BN_ULONG *a, BN_ULONG carry, const BN_ULONG *m, size_t num) { assert(r != a); // |r| = |a| - |m|. |bn_sub_words| performs the bulk of the subtraction, and // then we apply the borrow to |carry|. carry -= bn_sub_words(r, a, m, num); // We know 0 <= |a| < 2*|m|, so -|m| <= |r| < |m|. // // If 0 <= |r| < |m|, |r| fits in |num| words and |carry| is zero. We then // wish to select |r| as the answer. Otherwise -m <= r < 0 and we wish to // return |r| + |m|, or |a|. |carry| must then be -1 or all ones. In both // cases, |carry| is a suitable input to |bn_select_words|. // // Although |carry| may be one if it was one on input and |bn_sub_words| // returns zero, this would give |r| > |m|, violating our input assumptions. declassify_assert(carry + 1 <= 1); bn_select_words(r, carry, a /* r < 0 */, r /* r >= 0 */, num); return carry; } BN_ULONG bn_reduce_once_in_place(BN_ULONG *r, BN_ULONG carry, const BN_ULONG *m, BN_ULONG *tmp, size_t num) { // See |bn_reduce_once| for why this logic works. carry -= bn_sub_words(tmp, r, m, num); declassify_assert(carry + 1 <= 1); bn_select_words(r, carry, r /* tmp < 0 */, tmp /* tmp >= 0 */, num); return carry; } void bn_mod_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, const BN_ULONG *m, BN_ULONG *tmp, size_t num) { // r = a - b BN_ULONG borrow = bn_sub_words(r, a, b, num); // tmp = a - b + m bn_add_words(tmp, r, m, num); bn_select_words(r, 0 - borrow, tmp /* r < 0 */, r /* r >= 0 */, num); } void bn_mod_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, const BN_ULONG *m, BN_ULONG *tmp, size_t num) { BN_ULONG carry = bn_add_words(r, a, b, num); bn_reduce_once_in_place(r, carry, m, tmp, num); } int bn_div_consttime(BIGNUM *quotient, BIGNUM *remainder, const BIGNUM *numerator, const BIGNUM *divisor, unsigned divisor_min_bits, BN_CTX *ctx) { if (BN_is_negative(numerator) || BN_is_negative(divisor)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } if (BN_is_zero(divisor)) { OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); return 0; } // This function implements long division in binary. It is not very efficient, // but it is simple, easy to make constant-time, and performant enough for RSA // key generation. int ret = 0; BN_CTX_start(ctx); BIGNUM *q = quotient, *r = remainder; if (quotient == NULL || quotient == numerator || quotient == divisor) { q = BN_CTX_get(ctx); } if (remainder == NULL || remainder == numerator || remainder == divisor) { r = BN_CTX_get(ctx); } BIGNUM *tmp = BN_CTX_get(ctx); int initial_words; if (q == NULL || r == NULL || tmp == NULL || !bn_wexpand(q, numerator->width) || !bn_wexpand(r, divisor->width) || !bn_wexpand(tmp, divisor->width)) { goto err; } OPENSSL_memset(q->d, 0, numerator->width * sizeof(BN_ULONG)); q->width = numerator->width; q->neg = 0; OPENSSL_memset(r->d, 0, divisor->width * sizeof(BN_ULONG)); r->width = divisor->width; r->neg = 0; // Incorporate |numerator| into |r|, one bit at a time, reducing after each // step. We maintain the invariant that |0 <= r < divisor| and // |q * divisor + r = n| where |n| is the portion of |numerator| incorporated // so far. // // First, we short-circuit the loop: if we know |divisor| has at least // |divisor_min_bits| bits, the top |divisor_min_bits - 1| can be incorporated // without reductions. This significantly speeds up |RSA_check_key|. For // simplicity, we round down to a whole number of words. declassify_assert(divisor_min_bits <= BN_num_bits(divisor)); initial_words = 0; if (divisor_min_bits > 0) { initial_words = (divisor_min_bits - 1) / BN_BITS2; if (initial_words > numerator->width) { initial_words = numerator->width; } OPENSSL_memcpy(r->d, numerator->d + numerator->width - initial_words, initial_words * sizeof(BN_ULONG)); } for (int i = numerator->width - initial_words - 1; i >= 0; i--) { for (int bit = BN_BITS2 - 1; bit >= 0; bit--) { // Incorporate the next bit of the numerator, by computing // r = 2*r or 2*r + 1. Note the result fits in one more word. We store the // extra word in |carry|. BN_ULONG carry = bn_add_words(r->d, r->d, r->d, divisor->width); r->d[0] |= (numerator->d[i] >> bit) & 1; // |r| was previously fully-reduced, so we know: // 2*0 <= r <= 2*(divisor-1) + 1 // 0 <= r <= 2*divisor - 1 < 2*divisor. // Thus |r| satisfies the preconditions for |bn_reduce_once_in_place|. BN_ULONG subtracted = bn_reduce_once_in_place(r->d, carry, divisor->d, tmp->d, divisor->width); // The corresponding bit of the quotient is set iff we needed to subtract. q->d[i] |= (~subtracted & 1) << bit; } } if ((quotient != NULL && !BN_copy(quotient, q)) || (remainder != NULL && !BN_copy(remainder, r))) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } static BIGNUM *bn_scratch_space_from_ctx(size_t width, BN_CTX *ctx) { BIGNUM *ret = BN_CTX_get(ctx); if (ret == NULL || !bn_wexpand(ret, width)) { return NULL; } ret->neg = 0; ret->width = (int)width; return ret; } // bn_resized_from_ctx returns |bn| with width at least |width| or NULL on // error. This is so it may be used with low-level "words" functions. If // necessary, it allocates a new |BIGNUM| with a lifetime of the current scope // in |ctx|, so the caller does not need to explicitly free it. |bn| must fit in // |width| words. static const BIGNUM *bn_resized_from_ctx(const BIGNUM *bn, size_t width, BN_CTX *ctx) { if ((size_t)bn->width >= width) { // Any excess words must be zero. assert(bn_fits_in_words(bn, width)); return bn; } BIGNUM *ret = bn_scratch_space_from_ctx(width, ctx); if (ret == NULL || !BN_copy(ret, bn) || !bn_resize_words(ret, width)) { return NULL; } return ret; } int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx) { if (!BN_add(r, a, b)) { return 0; } return BN_nnmod(r, r, m, ctx); } int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m) { BN_CTX *ctx = BN_CTX_new(); int ok = ctx != NULL && bn_mod_add_consttime(r, a, b, m, ctx); BN_CTX_free(ctx); return ok; } int bn_mod_add_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx) { BN_CTX_start(ctx); a = bn_resized_from_ctx(a, m->width, ctx); b = bn_resized_from_ctx(b, m->width, ctx); BIGNUM *tmp = bn_scratch_space_from_ctx(m->width, ctx); int ok = a != NULL && b != NULL && tmp != NULL && bn_wexpand(r, m->width); if (ok) { bn_mod_add_words(r->d, a->d, b->d, m->d, tmp->d, m->width); r->width = m->width; r->neg = 0; } BN_CTX_end(ctx); return ok; } int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx) { if (!BN_sub(r, a, b)) { return 0; } return BN_nnmod(r, r, m, ctx); } int bn_mod_sub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx) { BN_CTX_start(ctx); a = bn_resized_from_ctx(a, m->width, ctx); b = bn_resized_from_ctx(b, m->width, ctx); BIGNUM *tmp = bn_scratch_space_from_ctx(m->width, ctx); int ok = a != NULL && b != NULL && tmp != NULL && bn_wexpand(r, m->width); if (ok) { bn_mod_sub_words(r->d, a->d, b->d, m->d, tmp->d, m->width); r->width = m->width; r->neg = 0; } BN_CTX_end(ctx); return ok; } int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m) { BN_CTX *ctx = BN_CTX_new(); int ok = ctx != NULL && bn_mod_sub_consttime(r, a, b, m, ctx); BN_CTX_free(ctx); return ok; } int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx) { BIGNUM *t; int ret = 0; BN_CTX_start(ctx); t = BN_CTX_get(ctx); if (t == NULL) { goto err; } if (a == b) { if (!BN_sqr(t, a, ctx)) { goto err; } } else { if (!BN_mul(t, a, b, ctx)) { goto err; } } if (!BN_nnmod(r, t, m, ctx)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) { if (!BN_sqr(r, a, ctx)) { return 0; } // r->neg == 0, thus we don't need BN_nnmod return BN_mod(r, r, m, ctx); } int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx) { BIGNUM *abs_m = NULL; int ret; if (!BN_nnmod(r, a, m, ctx)) { return 0; } if (m->neg) { abs_m = BN_dup(m); if (abs_m == NULL) { return 0; } abs_m->neg = 0; } ret = bn_mod_lshift_consttime(r, r, n, (abs_m ? abs_m : m), ctx); BN_free(abs_m); return ret; } int bn_mod_lshift_consttime(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx) { if (!BN_copy(r, a) || !bn_resize_words(r, m->width)) { return 0; } BN_CTX_start(ctx); BIGNUM *tmp = bn_scratch_space_from_ctx(m->width, ctx); int ok = tmp != NULL; if (ok) { for (int i = 0; i < n; i++) { bn_mod_add_words(r->d, r->d, r->d, m->d, tmp->d, m->width); } r->neg = 0; } BN_CTX_end(ctx); return ok; } int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) { BN_CTX *ctx = BN_CTX_new(); int ok = ctx != NULL && bn_mod_lshift_consttime(r, a, n, m, ctx); BN_CTX_free(ctx); return ok; } int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) { if (!BN_lshift1(r, a)) { return 0; } return BN_nnmod(r, r, m, ctx); } int bn_mod_lshift1_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) { return bn_mod_add_consttime(r, a, a, m, ctx); } int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m) { BN_CTX *ctx = BN_CTX_new(); int ok = ctx != NULL && bn_mod_lshift1_consttime(r, a, m, ctx); BN_CTX_free(ctx); return ok; } BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { BN_ULONG ret = 0; int i, j; if (!w) { // actually this an error (division by zero) return (BN_ULONG)-1; } if (a->width == 0) { return 0; } // normalize input for |bn_div_rem_words|. j = BN_BITS2 - BN_num_bits_word(w); w <<= j; if (!BN_lshift(a, a, j)) { return (BN_ULONG)-1; } for (i = a->width - 1; i >= 0; i--) { BN_ULONG l = a->d[i]; BN_ULONG d; BN_ULONG unused_rem; bn_div_rem_words(&d, &unused_rem, ret, l, w); ret = l - (d * w); a->d[i] = d; } bn_set_minimal_width(a); ret >>= j; return ret; } BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) { #ifndef BN_CAN_DIVIDE_ULLONG BN_ULONG ret = 0; #else BN_ULLONG ret = 0; #endif int i; if (w == 0) { return (BN_ULONG)-1; } #ifndef BN_CAN_DIVIDE_ULLONG // If |w| is too long and we don't have |BN_ULLONG| division then we need to // fall back to using |BN_div_word|. if (w > ((BN_ULONG)1 << BN_BITS4)) { BIGNUM *tmp = BN_dup(a); if (tmp == NULL) { return (BN_ULONG)-1; } ret = BN_div_word(tmp, w); BN_free(tmp); return ret; } #endif for (i = a->width - 1; i >= 0; i--) { #ifndef BN_CAN_DIVIDE_ULLONG ret = ((ret << BN_BITS4) | ((a->d[i] >> BN_BITS4) & BN_MASK2l)) % w; ret = ((ret << BN_BITS4) | (a->d[i] & BN_MASK2l)) % w; #else ret = (BN_ULLONG)(((ret << (BN_ULLONG)BN_BITS2) | a->d[i]) % (BN_ULLONG)w); #endif } return (BN_ULONG)ret; } int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { if (e == 0 || a->width == 0) { BN_zero(r); return 1; } size_t num_words = 1 + ((e - 1) / BN_BITS2); // If |a| definitely has less than |e| bits, just BN_copy. if ((size_t)a->width < num_words) { return BN_copy(r, a) != NULL; } // Otherwise, first make sure we have enough space in |r|. // Note that this will fail if num_words > INT_MAX. if (!bn_wexpand(r, num_words)) { return 0; } // Copy the content of |a| into |r|. OPENSSL_memcpy(r->d, a->d, num_words * sizeof(BN_ULONG)); // If |e| isn't word-aligned, we have to mask off some of our bits. size_t top_word_exponent = e % (sizeof(BN_ULONG) * 8); if (top_word_exponent != 0) { r->d[num_words - 1] &= (((BN_ULONG)1) << top_word_exponent) - 1; } // Fill in the remaining fields of |r|. r->neg = a->neg; r->width = (int)num_words; bn_set_minimal_width(r); return 1; } int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { if (!BN_mod_pow2(r, a, e)) { return 0; } // If the returned value was non-negative, we're done. if (BN_is_zero(r) || !r->neg) { return 1; } size_t num_words = 1 + (e - 1) / BN_BITS2; // Expand |r| to the size of our modulus. if (!bn_wexpand(r, num_words)) { return 0; } // Clear the upper words of |r|. OPENSSL_memset(&r->d[r->width], 0, (num_words - r->width) * BN_BYTES); // Set parameters of |r|. r->neg = 0; r->width = (int)num_words; // Now, invert every word. The idea here is that we want to compute 2^e-|x|, // which is actually equivalent to the twos-complement representation of |x| // in |e| bits, which is -x = ~x + 1. for (int i = 0; i < r->width; i++) { r->d[i] = ~r->d[i]; } // If our exponent doesn't span the top word, we have to mask the rest. size_t top_word_exponent = e % BN_BITS2; if (top_word_exponent != 0) { r->d[r->width - 1] &= (((BN_ULONG)1) << top_word_exponent) - 1; } // Keep the minimal-width invariant for |BIGNUM|. bn_set_minimal_width(r); // Finally, add one, for the reason described above. return BN_add(r, r, BN_value_one()); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/div_extra.cc.inc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "internal.h" // The following functions use a Barrett reduction variant to avoid leaking the // numerator. See http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html // // We use 32-bit numerator and 16-bit divisor for simplicity. This allows // computing |m| and |q| without architecture-specific code. // mod_u16 returns |n| mod |d|. |p| and |m| are the "magic numbers" for |d| (see // reference). For proof of correctness in Coq, see // https://github.com/davidben/fiat-crypto/blob/barrett/src/Arithmetic/BarrettReduction/RidiculousFish.v // Note the Coq version of |mod_u16| additionally includes the computation of // |p| and |m| from |bn_mod_u16_consttime| below. static uint16_t mod_u16(uint32_t n, uint16_t d, uint32_t p, uint32_t m) { // Compute floor(n/d) per steps 3 through 5. uint32_t q = ((uint64_t)m * n) >> 32; // Note there is a typo in the reference. We right-shift by one, not two. uint32_t t = ((n - q) >> 1) + q; t = t >> (p - 1); // Multiply and subtract to get the remainder. n -= d * t; declassify_assert(n < d); return n; } // shift_and_add_mod_u16 returns |r| * 2^32 + |a| mod |d|. |p| and |m| are the // "magic numbers" for |d| (see reference). static uint16_t shift_and_add_mod_u16(uint16_t r, uint32_t a, uint16_t d, uint32_t p, uint32_t m) { // Incorporate |a| in two 16-bit chunks. uint32_t t = r; t <<= 16; t |= a >> 16; t = mod_u16(t, d, p, m); t <<= 16; t |= a & 0xffff; t = mod_u16(t, d, p, m); return t; } uint16_t bn_mod_u16_consttime(const BIGNUM *bn, uint16_t d) { if (d <= 1) { return 0; } // Compute the "magic numbers" for |d|. See steps 1 and 2. // This computes p = ceil(log_2(d)). uint32_t p = BN_num_bits_word(d - 1); // This operation is not constant-time, but |p| and |d| are public values. // Note that |p| is at most 16, so the computation fits in |uint64_t|. assert(p <= 16); uint32_t m = (uint32_t)(((UINT64_C(1) << (32 + p)) + d - 1) / d); uint16_t ret = 0; for (int i = bn->width - 1; i >= 0; i--) { #if BN_BITS2 == 32 ret = shift_and_add_mod_u16(ret, bn->d[i], d, p, m); #elif BN_BITS2 == 64 ret = shift_and_add_mod_u16(ret, bn->d[i] >> 32, d, p, m); ret = shift_and_add_mod_u16(ret, bn->d[i] & 0xffffffff, d, p, m); #else #error "Unknown BN_ULONG size" #endif } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/exponentiation.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "internal.h" #include "rsaz_exp.h" #if defined(OPENSSL_BN_ASM_MONT5) // bn_mul_mont_gather5 multiples loads index |power| of |table|, multiplies it // by |ap| modulo |np|, and stores the result in |rp|. The values are |num| // words long and represented in Montgomery form. |n0| is a pointer to the // corresponding field in |BN_MONT_CTX|. |table| must be aligned to at least // 16 bytes. |power| must be less than 32 and is treated as secret. // // WARNING: This function implements Almost Montgomery Multiplication from // https://eprint.iacr.org/2011/239. The inputs do not need to be fully reduced. // However, even if they are fully reduced, the output may not be. static void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power) { if (bn_mulx4x_mont_gather5_capable(num)) { bn_mulx4x_mont_gather5(rp, ap, table, np, n0, num, power); } else if (bn_mul4x_mont_gather5_capable(num)) { bn_mul4x_mont_gather5(rp, ap, table, np, n0, num, power); } else { bn_mul_mont_gather5_nohw(rp, ap, table, np, n0, num, power); } } // bn_power5 squares |ap| five times and multiplies it by the value stored at // index |power| of |table|, modulo |np|. It stores the result in |rp|. The // values are |num| words long and represented in Montgomery form. |n0| is a // pointer to the corresponding field in |BN_MONT_CTX|. |num| must be divisible // by 8. |power| must be less than 32 and is treated as secret. // // WARNING: This function implements Almost Montgomery Multiplication from // https://eprint.iacr.org/2011/239. The inputs do not need to be fully reduced. // However, even if they are fully reduced, the output may not be. static void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power) { assert(bn_power5_capable(num)); if (bn_powerx5_capable(num)) { bn_powerx5(rp, ap, table, np, n0, num, power); } else { bn_power5_nohw(rp, ap, table, np, n0, num, power); } } #endif // defined(OPENSSL_BN_ASM_MONT5) int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { int i, bits, ret = 0; BIGNUM *v, *rr; BN_CTX_start(ctx); if (r == a || r == p) { rr = BN_CTX_get(ctx); } else { rr = r; } v = BN_CTX_get(ctx); if (rr == NULL || v == NULL) { goto err; } if (BN_copy(v, a) == NULL) { goto err; } bits = BN_num_bits(p); if (BN_is_odd(p)) { if (BN_copy(rr, a) == NULL) { goto err; } } else { if (!BN_one(rr)) { goto err; } } for (i = 1; i < bits; i++) { if (!BN_sqr(v, v, ctx)) { goto err; } if (BN_is_bit_set(p, i)) { if (!BN_mul(rr, rr, v, ctx)) { goto err; } } } if (r != rr && !BN_copy(r, rr)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } namespace { typedef struct bn_recp_ctx_st { BIGNUM N; // the divisor BIGNUM Nr; // the reciprocal int num_bits; int shift; int flags; } BN_RECP_CTX; } // namespace static void BN_RECP_CTX_init(BN_RECP_CTX *recp) { BN_init(&recp->N); BN_init(&recp->Nr); recp->num_bits = 0; recp->shift = 0; recp->flags = 0; } static void BN_RECP_CTX_free(BN_RECP_CTX *recp) { if (recp == nullptr) { return; } BN_free(&recp->N); BN_free(&recp->Nr); } static int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *d, BN_CTX *ctx) { if (!BN_copy(&(recp->N), d)) { return 0; } BN_zero(&recp->Nr); recp->num_bits = BN_num_bits(d); recp->shift = 0; return 1; } // len is the expected size of the result We actually calculate with an extra // word of precision, so we can do faster division if the remainder is not // required. // r := 2^len / m static int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx) { int ret = -1; BIGNUM *t; BN_CTX_start(ctx); t = BN_CTX_get(ctx); if (t == NULL) { goto err; } if (!BN_set_bit(t, len)) { goto err; } if (!BN_div(r, NULL, t, m, ctx)) { goto err; } ret = len; err: BN_CTX_end(ctx); return ret; } static int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, BN_RECP_CTX *recp, BN_CTX *ctx) { int i, j, ret = 0; BIGNUM *a, *b, *d, *r; BN_CTX_start(ctx); a = BN_CTX_get(ctx); b = BN_CTX_get(ctx); if (dv != NULL) { d = dv; } else { d = BN_CTX_get(ctx); } if (rem != NULL) { r = rem; } else { r = BN_CTX_get(ctx); } if (a == NULL || b == NULL || d == NULL || r == NULL) { goto err; } if (BN_ucmp(m, &recp->N) < 0) { BN_zero(d); if (!BN_copy(r, m)) { goto err; } BN_CTX_end(ctx); return 1; } // We want the remainder // Given input of ABCDEF / ab // we need multiply ABCDEF by 3 digests of the reciprocal of ab // i := max(BN_num_bits(m), 2*BN_num_bits(N)) i = BN_num_bits(m); j = recp->num_bits << 1; if (j > i) { i = j; } // Nr := round(2^i / N) if (i != recp->shift) { recp->shift = BN_reciprocal(&(recp->Nr), &(recp->N), i, ctx); // BN_reciprocal returns i, or -1 for an error } if (recp->shift == -1) { goto err; } // d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - // BN_num_bits(N)))| // = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - // BN_num_bits(N)))| // <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)| // = |m/N| if (!BN_rshift(a, m, recp->num_bits)) { goto err; } if (!BN_mul(b, a, &(recp->Nr), ctx)) { goto err; } if (!BN_rshift(d, b, i - recp->num_bits)) { goto err; } d->neg = 0; if (!BN_mul(b, &(recp->N), d, ctx)) { goto err; } if (!BN_usub(r, m, b)) { goto err; } r->neg = 0; j = 0; while (BN_ucmp(r, &(recp->N)) >= 0) { if (j++ > 2) { OPENSSL_PUT_ERROR(BN, BN_R_BAD_RECIPROCAL); goto err; } if (!BN_usub(r, r, &(recp->N))) { goto err; } if (!BN_add_word(d, 1)) { goto err; } } r->neg = BN_is_zero(r) ? 0 : m->neg; d->neg = m->neg ^ recp->N.neg; ret = 1; err: BN_CTX_end(ctx); return ret; } static int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, BN_RECP_CTX *recp, BN_CTX *ctx) { int ret = 0; BIGNUM *a; const BIGNUM *ca; BN_CTX_start(ctx); a = BN_CTX_get(ctx); if (a == NULL) { goto err; } if (y != NULL) { if (x == y) { if (!BN_sqr(a, x, ctx)) { goto err; } } else { if (!BN_mul(a, x, y, ctx)) { goto err; } } ca = a; } else { ca = x; // Just do the mod } ret = BN_div_recp(NULL, r, ca, recp, ctx); err: BN_CTX_end(ctx); return ret; } // BN_window_bits_for_exponent_size returns sliding window size for mod_exp with // a |b| bit exponent. // // For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of // multiplications is a constant plus on average // // 2^(w-1) + (b-w)/(w+1); // // here 2^(w-1) is for precomputing the table (we actually need entries only // for windows that have the lowest bit set), and (b-w)/(w+1) is an // approximation for the expected number of w-bit windows, not counting the // first one. // // Thus we should use // // w >= 6 if b > 671 // w = 5 if 671 > b > 239 // w = 4 if 239 > b > 79 // w = 3 if 79 > b > 23 // w <= 2 if 23 > b // // (with draws in between). Very small exponents are often selected // with low Hamming weight, so we use w = 1 for b <= 23. static int BN_window_bits_for_exponent_size(size_t b) { if (b > 671) { return 6; } if (b > 239) { return 5; } if (b > 79) { return 4; } if (b > 23) { return 3; } return 1; } // TABLE_SIZE is the maximum precomputation table size for *variable* sliding // windows. This must be 2^(max_window - 1), where max_window is the largest // value returned from |BN_window_bits_for_exponent_size|. #define TABLE_SIZE 32 // TABLE_BITS_SMALL is the smallest value returned from // |BN_window_bits_for_exponent_size| when |b| is at most |BN_BITS2| * // |BN_SMALL_MAX_WORDS| words. #define TABLE_BITS_SMALL 5 // TABLE_SIZE_SMALL is the same as |TABLE_SIZE|, but when |b| is at most // |BN_BITS2| * |BN_SMALL_MAX_WORDS|. #define TABLE_SIZE_SMALL (1 << (TABLE_BITS_SMALL - 1)) static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx) { int i, j, ret = 0, wstart, window; int start = 1; BIGNUM *aa; // Table of variables obtained from 'ctx' BIGNUM *val[TABLE_SIZE]; BN_RECP_CTX recp; // This function is only called on even moduli. assert(!BN_is_odd(m)); int bits = BN_num_bits(p); if (bits == 0) { return BN_one(r); } BN_RECP_CTX_init(&recp); BN_CTX_start(ctx); aa = BN_CTX_get(ctx); val[0] = BN_CTX_get(ctx); if (!aa || !val[0]) { goto err; } if (m->neg) { // ignore sign of 'm' if (!BN_copy(aa, m)) { goto err; } aa->neg = 0; if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0) { goto err; } } else { if (BN_RECP_CTX_set(&recp, m, ctx) <= 0) { goto err; } } if (!BN_nnmod(val[0], a, m, ctx)) { goto err; // 1 } if (BN_is_zero(val[0])) { BN_zero(r); ret = 1; goto err; } window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) { goto err; // 2 } j = 1 << (window - 1); for (i = 1; i < j; i++) { if (((val[i] = BN_CTX_get(ctx)) == NULL) || !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx)) { goto err; } } } start = 1; // This is used to avoid multiplication etc // when there is only the value '1' in the // buffer. wstart = bits - 1; // The top bit of the window if (!BN_one(r)) { goto err; } for (;;) { int wvalue; // The 'value' of the window int wend; // The bottom bit of the window if (!BN_is_bit_set(p, wstart)) { if (!start) { if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) { goto err; } } if (wstart == 0) { break; } wstart--; continue; } // We now have wstart on a 'set' bit, we now need to work out // how bit a window to do. To do this we need to scan // forward until the last set bit before the end of the // window wvalue = 1; wend = 0; for (i = 1; i < window; i++) { if (wstart - i < 0) { break; } if (BN_is_bit_set(p, wstart - i)) { wvalue <<= (i - wend); wvalue |= 1; wend = i; } } // wend is the size of the current window j = wend + 1; // add the 'bytes above' if (!start) { for (i = 0; i < j; i++) { if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) { goto err; } } } // wvalue will be an odd number < 2^window if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) { goto err; } // move the 'window' down further wstart -= wend + 1; start = 0; if (wstart < 0) { break; } } ret = 1; err: BN_CTX_end(ctx); BN_RECP_CTX_free(&recp); return ret; } int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx) { if (m->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } if (a->neg || BN_ucmp(a, m) >= 0) { if (!BN_nnmod(r, a, m, ctx)) { return 0; } a = r; } if (BN_is_odd(m)) { return BN_mod_exp_mont(r, a, p, m, ctx, NULL); } return mod_exp_recp(r, a, p, m, ctx); } int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { if (!BN_is_odd(m)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return 0; } if (m->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } // |a| is secret, but |a < m| is not. if (a->neg || constant_time_declassify_int(BN_ucmp(a, m)) >= 0) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); return 0; } int bits = BN_num_bits(p); if (bits == 0) { // x**0 mod 1 is still zero. if (BN_abs_is_word(m, 1)) { BN_zero(rr); return 1; } return BN_one(rr); } int ret = 0; BIGNUM *val[TABLE_SIZE]; BN_MONT_CTX *new_mont = NULL; BN_CTX_start(ctx); BIGNUM *r = BN_CTX_get(ctx); val[0] = BN_CTX_get(ctx); int window, r_is_one, wstart; if (r == NULL || val[0] == NULL) { goto err; } // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new_consttime(m, ctx); if (new_mont == NULL) { goto err; } mont = new_mont; } // We exponentiate by looking at sliding windows of the exponent and // precomputing powers of |a|. Windows may be shifted so they always end on a // set bit, so only precompute odd powers. We compute val[i] = a^(2*i + 1) // for i = 0 to 2^(window-1), all in Montgomery form. window = BN_window_bits_for_exponent_size(bits); if (!BN_to_montgomery(val[0], a, mont, ctx)) { goto err; } if (window > 1) { BIGNUM *d = BN_CTX_get(ctx); if (d == NULL || !BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx)) { goto err; } for (int i = 1; i < 1 << (window - 1); i++) { val[i] = BN_CTX_get(ctx); if (val[i] == NULL || !BN_mod_mul_montgomery(val[i], val[i - 1], d, mont, ctx)) { goto err; } } } // |p| is non-zero, so at least one window is non-zero. To save some // multiplications, defer initializing |r| until then. r_is_one = 1; wstart = bits - 1; // The top bit of the window. for (;;) { if (!BN_is_bit_set(p, wstart)) { if (!r_is_one && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) { goto err; } if (wstart == 0) { break; } wstart--; continue; } // We now have wstart on a set bit. Find the largest window we can use. int wvalue = 1; int wsize = 0; for (int i = 1; i < window && i <= wstart; i++) { if (BN_is_bit_set(p, wstart - i)) { wvalue <<= (i - wsize); wvalue |= 1; wsize = i; } } // Shift |r| to the end of the window. if (!r_is_one) { for (int i = 0; i < wsize + 1; i++) { if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) { goto err; } } } assert(wvalue & 1); assert(wvalue < (1 << window)); if (r_is_one) { if (!BN_copy(r, val[wvalue >> 1])) { goto err; } } else if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx)) { goto err; } r_is_one = 0; if (wstart == wsize) { break; } wstart -= wsize + 1; } // |p| is non-zero, so |r_is_one| must be cleared at some point. assert(!r_is_one); if (!BN_from_montgomery(rr, r, mont, ctx)) { goto err; } ret = 1; err: BN_MONT_CTX_free(new_mont); BN_CTX_end(ctx); return ret; } void bn_mod_exp_mont_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_ULONG *p, size_t num_p, const BN_MONT_CTX *mont) { if (num != (size_t)mont->N.width || num > BN_SMALL_MAX_WORDS || num_p > SIZE_MAX / BN_BITS2) { abort(); } assert(BN_is_odd(&mont->N)); // Count the number of bits in |p|, skipping leading zeros. Note this function // treats |p| as public. while (num_p != 0 && p[num_p - 1] == 0) { num_p--; } if (num_p == 0) { bn_from_montgomery_small(r, num, mont->RR.d, num, mont); return; } size_t bits = BN_num_bits_word(p[num_p - 1]) + (num_p - 1) * BN_BITS2; assert(bits != 0); // We exponentiate by looking at sliding windows of the exponent and // precomputing powers of |a|. Windows may be shifted so they always end on a // set bit, so only precompute odd powers. We compute val[i] = a^(2*i + 1) for // i = 0 to 2^(window-1), all in Montgomery form. unsigned window = BN_window_bits_for_exponent_size(bits); if (window > TABLE_BITS_SMALL) { window = TABLE_BITS_SMALL; // Tolerate excessively large |p|. } BN_ULONG val[TABLE_SIZE_SMALL][BN_SMALL_MAX_WORDS]; OPENSSL_memcpy(val[0], a, num * sizeof(BN_ULONG)); if (window > 1) { BN_ULONG d[BN_SMALL_MAX_WORDS]; bn_mod_mul_montgomery_small(d, val[0], val[0], num, mont); for (unsigned i = 1; i < 1u << (window - 1); i++) { bn_mod_mul_montgomery_small(val[i], val[i - 1], d, num, mont); } } // |p| is non-zero, so at least one window is non-zero. To save some // multiplications, defer initializing |r| until then. int r_is_one = 1; size_t wstart = bits - 1; // The top bit of the window. for (;;) { if (!bn_is_bit_set_words(p, num_p, wstart)) { if (!r_is_one) { bn_mod_mul_montgomery_small(r, r, r, num, mont); } if (wstart == 0) { break; } wstart--; continue; } // We now have wstart on a set bit. Find the largest window we can use. unsigned wvalue = 1; unsigned wsize = 0; for (unsigned i = 1; i < window && i <= wstart; i++) { if (bn_is_bit_set_words(p, num_p, wstart - i)) { wvalue <<= (i - wsize); wvalue |= 1; wsize = i; } } // Shift |r| to the end of the window. if (!r_is_one) { for (unsigned i = 0; i < wsize + 1; i++) { bn_mod_mul_montgomery_small(r, r, r, num, mont); } } assert(wvalue & 1); assert(wvalue < (1u << window)); if (r_is_one) { OPENSSL_memcpy(r, val[wvalue >> 1], num * sizeof(BN_ULONG)); } else { bn_mod_mul_montgomery_small(r, r, val[wvalue >> 1], num, mont); } r_is_one = 0; if (wstart == wsize) { break; } wstart -= wsize + 1; } // |p| is non-zero, so |r_is_one| must be cleared at some point. assert(!r_is_one); OPENSSL_cleanse(val, sizeof(val)); } void bn_mod_inverse0_prime_mont_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_MONT_CTX *mont) { if (num != (size_t)mont->N.width || num > BN_SMALL_MAX_WORDS) { abort(); } // Per Fermat's Little Theorem, a^-1 = a^(p-2) (mod p) for p prime. BN_ULONG p_minus_two[BN_SMALL_MAX_WORDS]; const BN_ULONG *p = mont->N.d; OPENSSL_memcpy(p_minus_two, p, num * sizeof(BN_ULONG)); if (p_minus_two[0] >= 2) { p_minus_two[0] -= 2; } else { p_minus_two[0] -= 2; for (size_t i = 1; i < num; i++) { if (p_minus_two[i]-- != 0) { break; } } } bn_mod_exp_mont_small(r, a, num, p_minus_two, num, mont); } static void copy_to_prebuf(const BIGNUM *b, int top, BN_ULONG *table, int idx, int window) { int ret = bn_copy_words(table + idx * top, top, b); assert(ret); // |b| is guaranteed to fit. (void)ret; } static int copy_from_prebuf(BIGNUM *b, int top, const BN_ULONG *table, int idx, int window) { if (!bn_wexpand(b, top)) { return 0; } OPENSSL_memset(b->d, 0, sizeof(BN_ULONG) * top); const int width = 1 << window; for (int i = 0; i < width; i++, table += top) { // Use a value barrier to prevent Clang from adding a branch when |i != idx| // and making this copy not constant time. Clang is still allowed to learn // that |mask| is constant across the inner loop, so this won't inhibit any // vectorization it might do. BN_ULONG mask = value_barrier_w(constant_time_eq_int(i, idx)); for (int j = 0; j < top; j++) { b->d[j] |= table[j] & mask; } } b->width = top; return 1; } // Window sizes optimized for fixed window size modular exponentiation // algorithm (BN_mod_exp_mont_consttime). // // TODO(davidben): These window sizes were originally set for 64-byte cache // lines with a cache-line-dependent constant-time mitigation. They can probably // be revised now that our implementation is no longer cache-time-dependent. #define BN_window_bits_for_ctime_exponent_size(b) \ ((b) > 937 ? 6 : (b) > 306 ? 5 : (b) > 89 ? 4 : (b) > 22 ? 3 : 1) #define BN_MAX_MOD_EXP_CTIME_WINDOW (6) // This variant of |BN_mod_exp_mont| uses fixed windows and fixed memory access // patterns to protect secret exponents (cf. the hyper-threading timing attacks // pointed out by Colin Percival, // http://www.daemonology.net/hyperthreading-considered-harmful/) int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { int i, ret = 0, wvalue; BN_MONT_CTX *new_mont = NULL; void *powerbuf_free = NULL; size_t powerbuf_len = 0; BN_ULONG *powerbuf = NULL; if (!BN_is_odd(m)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return 0; } if (m->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } // |a| is secret, but it is required to be in range, so these comparisons may // be leaked. if (a->neg || constant_time_declassify_int(BN_ucmp(a, m) >= 0)) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); return 0; } // Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak // whether the top bits are zero. int max_bits = p->width * BN_BITS2; int bits = max_bits; if (bits == 0) { // x**0 mod 1 is still zero. if (BN_abs_is_word(m, 1)) { BN_zero(rr); return 1; } return BN_one(rr); } // Allocate a montgomery context if it was not supplied by the caller. int top, num_powers, window; if (mont == NULL) { new_mont = BN_MONT_CTX_new_consttime(m, ctx); if (new_mont == NULL) { goto err; } mont = new_mont; } // Use the width in |mont->N|, rather than the copy in |m|. The assembly // implementation assumes it can use |top| to size R. top = mont->N.width; #if defined(OPENSSL_BN_ASM_MONT5) || defined(RSAZ_ENABLED) // Share one large stack-allocated buffer between the RSAZ and non-RSAZ code // paths. If we were to use separate static buffers for each then there is // some chance that both large buffers would be allocated on the stack, // causing the stack space requirement to be truly huge (~10KB). alignas(MOD_EXP_CTIME_ALIGN) BN_ULONG storage[MOD_EXP_CTIME_STORAGE_LEN]; #endif #if defined(RSAZ_ENABLED) // If the size of the operands allow it, perform the optimized RSAZ // exponentiation. For further information see crypto/fipsmodule/bn/rsaz_exp.c // and accompanying assembly modules. if (a->width == 16 && p->width == 16 && BN_num_bits(m) == 1024 && rsaz_avx2_preferred()) { if (!bn_wexpand(rr, 16)) { goto err; } RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, mont->n0[0], storage); rr->width = 16; rr->neg = 0; ret = 1; goto err; } #endif // Get the window size to use with size of p. window = BN_window_bits_for_ctime_exponent_size(bits); assert(window <= BN_MAX_MOD_EXP_CTIME_WINDOW); // Calculating |powerbuf_len| below cannot overflow because of the bound on // Montgomery reduction. assert((size_t)top <= BN_MONTGOMERY_MAX_WORDS); static_assert( BN_MONTGOMERY_MAX_WORDS <= INT_MAX / sizeof(BN_ULONG) / ((1 << BN_MAX_MOD_EXP_CTIME_WINDOW) + 3), "powerbuf_len may overflow"); #if defined(OPENSSL_BN_ASM_MONT5) if (window >= 5) { window = 5; // ~5% improvement for RSA2048 sign, and even for RSA4096 // Reserve space for the |mont->N| copy. powerbuf_len += top * sizeof(mont->N.d[0]); } #endif // Allocate a buffer large enough to hold all of the pre-computed // powers of |am|, |am| itself, and |tmp|. num_powers = 1 << window; powerbuf_len += sizeof(m->d[0]) * top * (num_powers + 2); #if defined(OPENSSL_BN_ASM_MONT5) if (powerbuf_len <= sizeof(storage)) { powerbuf = storage; } // |storage| is more than large enough to handle 1024-bit inputs. assert(powerbuf != NULL || top * BN_BITS2 > 1024); #endif if (powerbuf == NULL) { powerbuf_free = OPENSSL_malloc(powerbuf_len + MOD_EXP_CTIME_ALIGN); if (powerbuf_free == NULL) { goto err; } powerbuf = reinterpret_cast( align_pointer(powerbuf_free, MOD_EXP_CTIME_ALIGN)); } OPENSSL_memset(powerbuf, 0, powerbuf_len); // Place |tmp| and |am| right after powers table. BIGNUM tmp, am; tmp.d = powerbuf + top * num_powers; am.d = tmp.d + top; tmp.width = am.width = 0; tmp.dmax = am.dmax = top; tmp.neg = am.neg = 0; tmp.flags = am.flags = BN_FLG_STATIC_DATA; if (!bn_one_to_montgomery(&tmp, mont, ctx) || !bn_resize_words(&tmp, top)) { goto err; } // Prepare a^1 in the Montgomery domain. assert(!a->neg); declassify_assert(BN_ucmp(a, m) < 0); if (!BN_to_montgomery(&am, a, mont, ctx) || !bn_resize_words(&am, top)) { goto err; } #if defined(OPENSSL_BN_ASM_MONT5) // This optimization uses ideas from https://eprint.iacr.org/2011/239, // specifically optimization of cache-timing attack countermeasures, // pre-computation optimization, and Almost Montgomery Multiplication. // // The paper discusses a 4-bit window to optimize 512-bit modular // exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer // important. // // |bn_mul_mont_gather5| and |bn_power5| implement the "almost" reduction // variant, so the values here may not be fully reduced. They are bounded by R // (i.e. they fit in |top| words), not |m|. Additionally, we pass these // "almost" reduced inputs into |bn_mul_mont|, which implements the normal // reduction variant. Given those inputs, |bn_mul_mont| may not give reduced // output, but it will still produce "almost" reduced output. // // TODO(davidben): Using "almost" reduction complicates analysis of this code, // and its interaction with other parts of the project. Determine whether this // is actually necessary for performance. if (window == 5 && top > 1) { // Copy |mont->N| to improve cache locality. BN_ULONG *np = am.d + top; for (i = 0; i < top; i++) { np[i] = mont->N.d[i]; } // Fill |powerbuf| with the first 32 powers of |am|. const BN_ULONG *n0 = mont->n0; bn_scatter5(tmp.d, top, powerbuf, 0); bn_scatter5(am.d, am.width, powerbuf, 1); bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2); // Square to compute powers of two. for (i = 4; i < 32; i *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, i); } // Compute odd powers |i| based on |i - 1|, then all powers |i * 2^j|. for (i = 3; i < 32; i += 2) { bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); for (int j = 2 * i; j < 32; j *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, j); } } bits--; for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--) { wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } bn_gather5(tmp.d, top, powerbuf, wvalue); // At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit // that has not been read yet.) assert(bits >= -1 && (bits == -1 || bits % 5 == 4)); // Scan the exponent one window at a time starting from the most // significant bits. if (!bn_power5_capable(top)) { while (bits >= 0) { for (wvalue = 0, i = 0; i < 5; i++, bits--) { wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } } else { const uint8_t *p_bytes = (const uint8_t *)p->d; assert(bits < max_bits); // |p = 0| has been handled as a special case, so |max_bits| is at least // one word. assert(max_bits >= 64); // If the first bit to be read lands in the last byte, unroll the first // iteration to avoid reading past the bounds of |p->d|. (After the first // iteration, we are guaranteed to be past the last byte.) Note |bits| // here is the top bit, inclusive. if (bits - 4 >= max_bits - 8) { // Read five bits from |bits-4| through |bits|, inclusive. wvalue = p_bytes[p->width * BN_BYTES - 1]; wvalue >>= (bits - 4) & 7; wvalue &= 0x1f; bits -= 5; bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } while (bits >= 0) { // Read five bits from |bits-4| through |bits|, inclusive. int first_bit = bits - 4; uint16_t val; OPENSSL_memcpy(&val, p_bytes + (first_bit >> 3), sizeof(val)); val >>= first_bit & 7; val &= 0x1f; bits -= 5; bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, val); } } // The result is now in |tmp| in Montgomery form, but it may not be fully // reduced. This is within bounds for |BN_from_montgomery| (tmp < R <= m*R) // so it will, when converting from Montgomery form, produce a fully reduced // result. // // This differs from Figure 2 of the paper, which uses AMM(h, 1) to convert // from Montgomery form with unreduced output, followed by an extra // reduction step. In the paper's terminology, we replace steps 9 and 10 // with MM(h, 1). } else #endif { copy_to_prebuf(&tmp, top, powerbuf, 0, window); copy_to_prebuf(&am, top, powerbuf, 1, window); // If the window size is greater than 1, then calculate // val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) // (even powers could instead be computed as (a^(i/2))^2 // to use the slight performance advantage of sqr over mul). if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) { goto err; } copy_to_prebuf(&tmp, top, powerbuf, 2, window); for (i = 3; i < num_powers; i++) { // Calculate a^i = a^(i-1) * a if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) { goto err; } copy_to_prebuf(&tmp, top, powerbuf, i, window); } } bits--; for (wvalue = 0, i = bits % window; i >= 0; i--, bits--) { wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } if (!copy_from_prebuf(&tmp, top, powerbuf, wvalue, window)) { goto err; } // Scan the exponent one window at a time starting from the most // significant bits. while (bits >= 0) { wvalue = 0; // The 'value' of the window // Scan the window, squaring the result as we go for (i = 0; i < window; i++, bits--) { if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx)) { goto err; } wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } // Fetch the appropriate pre-computed value from the pre-buf if (!copy_from_prebuf(&am, top, powerbuf, wvalue, window)) { goto err; } // Multiply the result into the intermediate result if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx)) { goto err; } } } // Convert the final result from Montgomery to standard format. If we used the // |OPENSSL_BN_ASM_MONT5| codepath, |tmp| may not be fully reduced. It is only // bounded by R rather than |m|. However, that is still within bounds for // |BN_from_montgomery|, which implements full Montgomery reduction, not // "almost" Montgomery reduction. if (!BN_from_montgomery(rr, &tmp, mont, ctx)) { goto err; } ret = 1; err: BN_MONT_CTX_free(new_mont); if (powerbuf != NULL && powerbuf_free == NULL) { OPENSSL_cleanse(powerbuf, powerbuf_len); } OPENSSL_free(powerbuf_free); return ret; } int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { BIGNUM a_bignum; BN_init(&a_bignum); int ret = 0; // BN_mod_exp_mont requires reduced inputs. if (bn_minimal_width(m) == 1) { a %= m->d[0]; } if (!BN_set_word(&a_bignum, a)) { OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); goto err; } ret = BN_mod_exp_mont(rr, &a_bignum, p, m, ctx, mont); err: BN_free(&a_bignum); return ret; } #define TABLE_SIZE 32 int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1, const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { BIGNUM tmp; BN_init(&tmp); int ret = 0; BN_MONT_CTX *new_mont = NULL; // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new_for_modulus(m, ctx); if (new_mont == NULL) { goto err; } mont = new_mont; } // BN_mod_mul_montgomery removes one Montgomery factor, so passing one // Montgomery-encoded and one non-Montgomery-encoded value gives a // non-Montgomery-encoded result. if (!BN_mod_exp_mont(rr, a1, p1, m, ctx, mont) || !BN_mod_exp_mont(&tmp, a2, p2, m, ctx, mont) || !BN_to_montgomery(rr, rr, mont, ctx) || !BN_mod_mul_montgomery(rr, rr, &tmp, mont, ctx)) { goto err; } ret = 1; err: BN_MONT_CTX_free(new_mont); BN_free(&tmp); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/gcd.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { *out_no_inverse = 0; if (!BN_is_odd(n)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return 0; } if (BN_is_negative(a) || BN_cmp(a, n) >= 0) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); return 0; } BIGNUM *A, *B, *X, *Y; int ret = 0; int sign; BN_CTX_start(ctx); A = BN_CTX_get(ctx); B = BN_CTX_get(ctx); X = BN_CTX_get(ctx); Y = BN_CTX_get(ctx); BIGNUM *R = out; if (Y == NULL) { goto err; } BN_zero(Y); if (!BN_one(X) || BN_copy(B, a) == NULL || BN_copy(A, n) == NULL) { goto err; } A->neg = 0; sign = -1; // From B = a mod |n|, A = |n| it follows that // // 0 <= B < A, // -sign*X*a == B (mod |n|), // sign*Y*a == A (mod |n|). // Binary inversion algorithm; requires odd modulus. This is faster than the // general algorithm if the modulus is sufficiently small (about 400 .. 500 // bits on 32-bit systems, but much more on 64-bit systems) int shift; while (!BN_is_zero(B)) { // 0 < B < |n|, // 0 < A <= |n|, // (1) -sign*X*a == B (mod |n|), // (2) sign*Y*a == A (mod |n|) // Now divide B by the maximum possible power of two in the integers, // and divide X by the same value mod |n|. // When we're done, (1) still holds. shift = 0; while (!BN_is_bit_set(B, shift)) { // note that 0 < B shift++; if (BN_is_odd(X)) { if (!BN_uadd(X, X, n)) { goto err; } } // now X is even, so we can easily divide it by two if (!BN_rshift1(X, X)) { goto err; } } if (shift > 0) { if (!BN_rshift(B, B, shift)) { goto err; } } // Same for A and Y. Afterwards, (2) still holds. shift = 0; while (!BN_is_bit_set(A, shift)) { // note that 0 < A shift++; if (BN_is_odd(Y)) { if (!BN_uadd(Y, Y, n)) { goto err; } } // now Y is even if (!BN_rshift1(Y, Y)) { goto err; } } if (shift > 0) { if (!BN_rshift(A, A, shift)) { goto err; } } // We still have (1) and (2). // Both A and B are odd. // The following computations ensure that // // 0 <= B < |n|, // 0 < A < |n|, // (1) -sign*X*a == B (mod |n|), // (2) sign*Y*a == A (mod |n|), // // and that either A or B is even in the next iteration. if (BN_ucmp(B, A) >= 0) { // -sign*(X + Y)*a == B - A (mod |n|) if (!BN_uadd(X, X, Y)) { goto err; } // NB: we could use BN_mod_add_quick(X, X, Y, n), but that // actually makes the algorithm slower if (!BN_usub(B, B, A)) { goto err; } } else { // sign*(X + Y)*a == A - B (mod |n|) if (!BN_uadd(Y, Y, X)) { goto err; } // as above, BN_mod_add_quick(Y, Y, X, n) would slow things down if (!BN_usub(A, A, B)) { goto err; } } } if (!BN_is_one(A)) { *out_no_inverse = 1; OPENSSL_PUT_ERROR(BN, BN_R_NO_INVERSE); goto err; } // The while loop (Euclid's algorithm) ends when // A == gcd(a,n); // we have // sign*Y*a == A (mod |n|), // where Y is non-negative. if (sign < 0) { if (!BN_sub(Y, n, Y)) { goto err; } } // Now Y*a == A (mod |n|). // Y*a == 1 (mod |n|) if (Y->neg || BN_ucmp(Y, n) >= 0) { if (!BN_nnmod(Y, Y, n, ctx)) { goto err; } } if (!BN_copy(R, Y)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } BIGNUM *BN_mod_inverse(BIGNUM *out, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { BIGNUM *new_out = NULL; if (out == NULL) { new_out = BN_new(); if (new_out == NULL) { return NULL; } out = new_out; } int ok = 0; BIGNUM *a_reduced = NULL; if (a->neg || BN_ucmp(a, n) >= 0) { a_reduced = BN_dup(a); if (a_reduced == NULL) { goto err; } if (!BN_nnmod(a_reduced, a_reduced, n, ctx)) { goto err; } a = a_reduced; } int no_inverse; if (!BN_is_odd(n)) { if (!bn_mod_inverse_consttime(out, &no_inverse, a, n, ctx)) { goto err; } } else if (!BN_mod_inverse_odd(out, &no_inverse, a, n, ctx)) { goto err; } ok = 1; err: if (!ok) { BN_free(new_out); out = NULL; } BN_free(a_reduced); return out; } int BN_mod_inverse_blinded(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx) { *out_no_inverse = 0; // |a| is secret, but it is required to be in range, so these comparisons may // be leaked. if (BN_is_negative(a) || constant_time_declassify_int(BN_cmp(a, &mont->N) >= 0)) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); return 0; } int ret = 0; BIGNUM blinding_factor; BN_init(&blinding_factor); // |BN_mod_inverse_odd| is leaky, so generate a secret blinding factor and // blind |a|. This works because (ar)^-1 * r = a^-1, supposing r is // invertible. If r is not invertible, this function will fail. However, we // only use this in RSA, where stumbling on an uninvertible element means // stumbling on the key's factorization. That is, if this function fails, the // RSA key was not actually a product of two large primes. // // TODO(crbug.com/boringssl/677): When the PRNG output is marked secret by // default, the explicit |bn_secret| call can be removed. if (!BN_rand_range_ex(&blinding_factor, 1, &mont->N)) { goto err; } bn_secret(&blinding_factor); if (!BN_mod_mul_montgomery(out, &blinding_factor, a, mont, ctx)) { goto err; } // Once blinded, |out| is no longer secret, so it may be passed to a leaky // mod inverse function. Note |blinding_factor| is secret, so |out| will be // secret again after multiplying. bn_declassify(out); if (!BN_mod_inverse_odd(out, out_no_inverse, out, &mont->N, ctx) || !BN_mod_mul_montgomery(out, &blinding_factor, out, mont, ctx)) { goto err; } ret = 1; err: BN_free(&blinding_factor); return ret; } int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p) { BN_CTX_start(ctx); BIGNUM *p_minus_2 = BN_CTX_get(ctx); int ok = p_minus_2 != NULL && BN_copy(p_minus_2, p) && BN_sub_word(p_minus_2, 2) && BN_mod_exp_mont(out, a, p_minus_2, p, ctx, mont_p); BN_CTX_end(ctx); return ok; } int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p) { BN_CTX_start(ctx); BIGNUM *p_minus_2 = BN_CTX_get(ctx); int ok = p_minus_2 != NULL && BN_copy(p_minus_2, p) && BN_sub_word(p_minus_2, 2) && BN_mod_exp_mont_consttime(out, a, p_minus_2, p, ctx, mont_p); BN_CTX_end(ctx); return ok; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/gcd_extra.cc.inc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "internal.h" static BN_ULONG word_is_odd_mask(BN_ULONG a) { return (BN_ULONG)0 - (a & 1); } static void maybe_rshift1_words(BN_ULONG *a, BN_ULONG mask, BN_ULONG *tmp, size_t num) { bn_rshift1_words(tmp, a, num); bn_select_words(a, mask, tmp, a, num); } static void maybe_rshift1_words_carry(BN_ULONG *a, BN_ULONG carry, BN_ULONG mask, BN_ULONG *tmp, size_t num) { maybe_rshift1_words(a, mask, tmp, num); if (num != 0) { carry &= mask; a[num - 1] |= carry << (BN_BITS2 - 1); } } static BN_ULONG maybe_add_words(BN_ULONG *a, BN_ULONG mask, const BN_ULONG *b, BN_ULONG *tmp, size_t num) { BN_ULONG carry = bn_add_words(tmp, a, b, num); bn_select_words(a, mask, tmp, a, num); return carry & mask; } static int bn_gcd_consttime(BIGNUM *r, unsigned *out_shift, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { size_t width = x->width > y->width ? x->width : y->width; if (width == 0) { *out_shift = 0; BN_zero(r); return 1; } // This is a constant-time implementation of Stein's algorithm (binary GCD). int ret = 0; BN_CTX_start(ctx); BIGNUM *u = BN_CTX_get(ctx); BIGNUM *v = BN_CTX_get(ctx); BIGNUM *tmp = BN_CTX_get(ctx); unsigned x_bits, y_bits, num_iters, shift; if (u == NULL || v == NULL || tmp == NULL || // !BN_copy(u, x) || // !BN_copy(v, y) || // !bn_resize_words(u, width) || // !bn_resize_words(v, width) || // !bn_resize_words(tmp, width)) { goto err; } // Each loop iteration halves at least one of |u| and |v|. Thus we need at // most the combined bit width of inputs for at least one value to be zero. x_bits = x->width * BN_BITS2; y_bits = y->width * BN_BITS2; num_iters = x_bits + y_bits; if (num_iters < x_bits) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); goto err; } shift = 0; for (unsigned i = 0; i < num_iters; i++) { BN_ULONG both_odd = word_is_odd_mask(u->d[0]) & word_is_odd_mask(v->d[0]); // If both |u| and |v| are odd, subtract the smaller from the larger. BN_ULONG u_less_than_v = (BN_ULONG)0 - bn_sub_words(tmp->d, u->d, v->d, width); bn_select_words(u->d, both_odd & ~u_less_than_v, tmp->d, u->d, width); bn_sub_words(tmp->d, v->d, u->d, width); bn_select_words(v->d, both_odd & u_less_than_v, tmp->d, v->d, width); // At least one of |u| and |v| is now even. BN_ULONG u_is_odd = word_is_odd_mask(u->d[0]); BN_ULONG v_is_odd = word_is_odd_mask(v->d[0]); declassify_assert(!(u_is_odd & v_is_odd)); // If both are even, the final GCD gains a factor of two. shift += 1 & (~u_is_odd & ~v_is_odd); // Halve any which are even. maybe_rshift1_words(u->d, ~u_is_odd, tmp->d, width); maybe_rshift1_words(v->d, ~v_is_odd, tmp->d, width); } // One of |u| or |v| is zero at this point. The algorithm usually makes |u| // zero, unless |y| was already zero on input. Fix this by combining the // values. declassify_assert(BN_is_zero(u) | BN_is_zero(v)); for (size_t i = 0; i < width; i++) { v->d[i] |= u->d[i]; } *out_shift = shift; ret = bn_set_words(r, v->d, width); err: BN_CTX_end(ctx); return ret; } int BN_gcd(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { unsigned shift; return bn_gcd_consttime(r, &shift, x, y, ctx) && BN_lshift(r, r, shift); } int bn_is_relatively_prime(int *out_relatively_prime, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { int ret = 0; BN_CTX_start(ctx); unsigned shift; BIGNUM *gcd = BN_CTX_get(ctx); if (gcd == NULL || !bn_gcd_consttime(gcd, &shift, x, y, ctx)) { goto err; } // Check that 2^|shift| * |gcd| is one. if (gcd->width == 0) { *out_relatively_prime = 0; } else { BN_ULONG mask = shift | (gcd->d[0] ^ 1); for (int i = 1; i < gcd->width; i++) { mask |= gcd->d[i]; } *out_relatively_prime = mask == 0; } ret = 1; err: BN_CTX_end(ctx); return ret; } int bn_lcm_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { BN_CTX_start(ctx); unsigned shift; BIGNUM *gcd = BN_CTX_get(ctx); int ret = gcd != NULL && // bn_mul_consttime(r, a, b, ctx) && bn_gcd_consttime(gcd, &shift, a, b, ctx) && // |gcd| has a secret bit width. bn_div_consttime(r, NULL, r, gcd, /*divisor_min_bits=*/0, ctx) && bn_rshift_secret_shift(r, r, shift, ctx); BN_CTX_end(ctx); return ret; } int bn_mod_inverse_consttime(BIGNUM *r, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { *out_no_inverse = 0; if (BN_is_negative(a) || BN_ucmp(a, n) >= 0) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); return 0; } if (BN_is_zero(a)) { if (BN_is_one(n)) { BN_zero(r); return 1; } *out_no_inverse = 1; OPENSSL_PUT_ERROR(BN, BN_R_NO_INVERSE); return 0; } // This is a constant-time implementation of the extended binary GCD // algorithm. It is adapted from the Handbook of Applied Cryptography, section // 14.4.3, algorithm 14.51, and modified to bound coefficients and avoid // negative numbers. // // For more details and proof of correctness, see // https://github.com/mit-plv/fiat-crypto/pull/333. In particular, see |step| // and |mod_inverse_consttime| for the algorithm in Gallina and see // |mod_inverse_consttime_spec| for the correctness result. if (!BN_is_odd(a) && !BN_is_odd(n)) { *out_no_inverse = 1; OPENSSL_PUT_ERROR(BN, BN_R_NO_INVERSE); return 0; } // This function exists to compute the RSA private exponent, where |a| is one // word. We'll thus use |a_width| when available. size_t n_width = n->width, a_width = a->width; if (a_width > n_width) { a_width = n_width; } int ret = 0; BN_CTX_start(ctx); BIGNUM *u = BN_CTX_get(ctx); BIGNUM *v = BN_CTX_get(ctx); BIGNUM *A = BN_CTX_get(ctx); BIGNUM *B = BN_CTX_get(ctx); BIGNUM *C = BN_CTX_get(ctx); BIGNUM *D = BN_CTX_get(ctx); BIGNUM *tmp = BN_CTX_get(ctx); BIGNUM *tmp2 = BN_CTX_get(ctx); size_t a_bits, num_iters, n_bits; if (u == NULL || // v == NULL || // A == NULL || // B == NULL || // C == NULL || // D == NULL || // tmp == NULL || // tmp2 == NULL || // !BN_copy(u, a) || // !BN_copy(v, n) || // !BN_one(A) || // !BN_one(D) || // For convenience, size |u| and |v| equivalently. !bn_resize_words(u, n_width) || // !bn_resize_words(v, n_width) || // |A| and |C| are bounded by |m|. !bn_resize_words(A, n_width) || // !bn_resize_words(C, n_width) || // |B| and |D| are bounded by |a|. !bn_resize_words(B, a_width) || // !bn_resize_words(D, a_width) || // |tmp| and |tmp2| may be used at either size. !bn_resize_words(tmp, n_width) || // !bn_resize_words(tmp2, n_width)) { goto err; } // Each loop iteration halves at least one of |u| and |v|. Thus we need at // most the combined bit width of inputs for at least one value to be zero. // |a_bits| and |n_bits| cannot overflow because |bn_wexpand| ensures bit // counts fit in even |int|. a_bits = a_width * BN_BITS2; n_bits = n_width * BN_BITS2; num_iters = a_bits + n_bits; if (num_iters < a_bits) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); goto err; } // Before and after each loop iteration, the following hold: // // u = A*a - B*n // v = D*n - C*a // 0 < u <= a // 0 <= v <= n // 0 <= A < n // 0 <= B <= a // 0 <= C < n // 0 <= D <= a // // After each loop iteration, u and v only get smaller, and at least one of // them shrinks by at least a factor of two. for (size_t i = 0; i < num_iters; i++) { BN_ULONG both_odd = word_is_odd_mask(u->d[0]) & word_is_odd_mask(v->d[0]); // If both |u| and |v| are odd, subtract the smaller from the larger. BN_ULONG v_less_than_u = (BN_ULONG)0 - bn_sub_words(tmp->d, v->d, u->d, n_width); bn_select_words(v->d, both_odd & ~v_less_than_u, tmp->d, v->d, n_width); bn_sub_words(tmp->d, u->d, v->d, n_width); bn_select_words(u->d, both_odd & v_less_than_u, tmp->d, u->d, n_width); // If we updated one of the values, update the corresponding coefficient. BN_ULONG carry = bn_add_words(tmp->d, A->d, C->d, n_width); carry -= bn_sub_words(tmp2->d, tmp->d, n->d, n_width); bn_select_words(tmp->d, carry, tmp->d, tmp2->d, n_width); bn_select_words(A->d, both_odd & v_less_than_u, tmp->d, A->d, n_width); bn_select_words(C->d, both_odd & ~v_less_than_u, tmp->d, C->d, n_width); bn_add_words(tmp->d, B->d, D->d, a_width); bn_sub_words(tmp2->d, tmp->d, a->d, a_width); bn_select_words(tmp->d, carry, tmp->d, tmp2->d, a_width); bn_select_words(B->d, both_odd & v_less_than_u, tmp->d, B->d, a_width); bn_select_words(D->d, both_odd & ~v_less_than_u, tmp->d, D->d, a_width); // Our loop invariants hold at this point. Additionally, exactly one of |u| // and |v| is now even. BN_ULONG u_is_even = ~word_is_odd_mask(u->d[0]); BN_ULONG v_is_even = ~word_is_odd_mask(v->d[0]); declassify_assert(u_is_even != v_is_even); // Halve the even one and adjust the corresponding coefficient. maybe_rshift1_words(u->d, u_is_even, tmp->d, n_width); BN_ULONG A_or_B_is_odd = word_is_odd_mask(A->d[0]) | word_is_odd_mask(B->d[0]); BN_ULONG A_carry = maybe_add_words(A->d, A_or_B_is_odd & u_is_even, n->d, tmp->d, n_width); BN_ULONG B_carry = maybe_add_words(B->d, A_or_B_is_odd & u_is_even, a->d, tmp->d, a_width); maybe_rshift1_words_carry(A->d, A_carry, u_is_even, tmp->d, n_width); maybe_rshift1_words_carry(B->d, B_carry, u_is_even, tmp->d, a_width); maybe_rshift1_words(v->d, v_is_even, tmp->d, n_width); BN_ULONG C_or_D_is_odd = word_is_odd_mask(C->d[0]) | word_is_odd_mask(D->d[0]); BN_ULONG C_carry = maybe_add_words(C->d, C_or_D_is_odd & v_is_even, n->d, tmp->d, n_width); BN_ULONG D_carry = maybe_add_words(D->d, C_or_D_is_odd & v_is_even, a->d, tmp->d, a_width); maybe_rshift1_words_carry(C->d, C_carry, v_is_even, tmp->d, n_width); maybe_rshift1_words_carry(D->d, D_carry, v_is_even, tmp->d, a_width); } declassify_assert(BN_is_zero(v)); // While the inputs and output are secret, this function considers whether the // input was invertible to be public. It is used as part of RSA key // generation, where inputs are chosen to already be invertible. if (constant_time_declassify_int(!BN_is_one(u))) { *out_no_inverse = 1; OPENSSL_PUT_ERROR(BN, BN_R_NO_INVERSE); goto err; } ret = BN_copy(r, A) != NULL; err: BN_CTX_end(ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/generic.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) // See asm/bn-586.pl. #define BN_ADD_ASM #define BN_MUL_ASM #endif #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__GNUC__) || defined(__clang__)) // See asm/x86_64-gcc.c #define BN_ADD_ASM #define BN_MUL_ASM #endif #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) // See asm/bn-armv8.pl. #define BN_ADD_ASM #endif #if !defined(BN_MUL_ASM) #ifdef BN_ULLONG #define mul_add(r, a, w, c) \ do { \ BN_ULLONG t; \ t = (BN_ULLONG)(w) * (a) + (r) + (c); \ (r) = Lw(t); \ (c) = Hw(t); \ } while (0) #define mul(r, a, w, c) \ do { \ BN_ULLONG t; \ t = (BN_ULLONG)(w) * (a) + (c); \ (r) = Lw(t); \ (c) = Hw(t); \ } while (0) #define sqr(r0, r1, a) \ do { \ BN_ULLONG t; \ t = (BN_ULLONG)(a) * (a); \ (r0) = Lw(t); \ (r1) = Hw(t); \ } while (0) #else #define mul_add(r, a, w, c) \ do { \ BN_ULONG high, low, ret, tmp = (a); \ ret = (r); \ BN_UMULT_LOHI(low, high, w, tmp); \ ret += (c); \ (c) = (ret < (c)) ? 1 : 0; \ (c) += high; \ ret += low; \ (c) += (ret < low) ? 1 : 0; \ (r) = ret; \ } while (0) #define mul(r, a, w, c) \ do { \ BN_ULONG high, low, ret, ta = (a); \ BN_UMULT_LOHI(low, high, w, ta); \ ret = low + (c); \ (c) = high; \ (c) += (ret < low) ? 1 : 0; \ (r) = ret; \ } while (0) #define sqr(r0, r1, a) \ do { \ BN_ULONG tmp = (a); \ BN_UMULT_LOHI(r0, r1, tmp, tmp); \ } while (0) #endif // !BN_ULLONG BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w) { BN_ULONG c1 = 0; if (num == 0) { return c1; } while (num & ~3) { mul_add(rp[0], ap[0], w, c1); mul_add(rp[1], ap[1], w, c1); mul_add(rp[2], ap[2], w, c1); mul_add(rp[3], ap[3], w, c1); ap += 4; rp += 4; num -= 4; } while (num) { mul_add(rp[0], ap[0], w, c1); ap++; rp++; num--; } return c1; } BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w) { BN_ULONG c1 = 0; if (num == 0) { return c1; } while (num & ~3) { mul(rp[0], ap[0], w, c1); mul(rp[1], ap[1], w, c1); mul(rp[2], ap[2], w, c1); mul(rp[3], ap[3], w, c1); ap += 4; rp += 4; num -= 4; } while (num) { mul(rp[0], ap[0], w, c1); ap++; rp++; num--; } return c1; } void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, size_t n) { if (n == 0) { return; } while (n & ~3) { sqr(r[0], r[1], a[0]); sqr(r[2], r[3], a[1]); sqr(r[4], r[5], a[2]); sqr(r[6], r[7], a[3]); a += 4; r += 8; n -= 4; } while (n) { sqr(r[0], r[1], a[0]); a++; r += 2; n--; } } // mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) // mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) // sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) // sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) #ifdef BN_ULLONG // Keep in mind that additions to multiplication result can not overflow, // because its high half cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG hi; \ BN_ULLONG t = (BN_ULLONG)(a) * (b); \ t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ (c1) += (hi); \ (c2) += (c1) < hi; \ } while (0) #define mul_add_c2(a, b, c0, c1, c2) \ do { \ BN_ULONG hi; \ BN_ULLONG t = (BN_ULLONG)(a) * (b); \ BN_ULLONG tt = t + (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(tt); \ hi = (BN_ULONG)Hw(tt); \ (c1) += hi; \ (c2) += (c1) < hi; \ t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ (c1) += hi; \ (c2) += (c1) < hi; \ } while (0) #define sqr_add_c(a, i, c0, c1, c2) \ do { \ BN_ULONG hi; \ BN_ULLONG t = (BN_ULLONG)(a)[i] * (a)[i]; \ t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ (c1) += hi; \ (c2) += (c1) < hi; \ } while (0) #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2) #else // Keep in mind that additions to hi can not overflow, because the high word of // a multiplication result cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG ta = (a), tb = (b); \ BN_ULONG lo, hi; \ BN_UMULT_LOHI(lo, hi, ta, tb); \ (c0) += lo; \ hi += ((c0) < lo) ? 1 : 0; \ (c1) += hi; \ (c2) += ((c1) < hi) ? 1 : 0; \ } while (0) #define mul_add_c2(a, b, c0, c1, c2) \ do { \ BN_ULONG ta = (a), tb = (b); \ BN_ULONG lo, hi, tt; \ BN_UMULT_LOHI(lo, hi, ta, tb); \ (c0) += lo; \ tt = hi + (((c0) < lo) ? 1 : 0); \ (c1) += tt; \ (c2) += ((c1) < tt) ? 1 : 0; \ (c0) += lo; \ hi += (c0 < lo) ? 1 : 0; \ (c1) += hi; \ (c2) += ((c1) < hi) ? 1 : 0; \ } while (0) #define sqr_add_c(a, i, c0, c1, c2) \ do { \ BN_ULONG ta = (a)[i]; \ BN_ULONG lo, hi; \ BN_UMULT_LOHI(lo, hi, ta, ta); \ (c0) += lo; \ hi += (c0 < lo) ? 1 : 0; \ (c1) += hi; \ (c2) += ((c1) < hi) ? 1 : 0; \ } while (0) #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2) #endif // !BN_ULLONG void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; mul_add_c(a[0], b[0], c1, c2, c3); r[0] = c1; c1 = 0; mul_add_c(a[0], b[1], c2, c3, c1); mul_add_c(a[1], b[0], c2, c3, c1); r[1] = c2; c2 = 0; mul_add_c(a[2], b[0], c3, c1, c2); mul_add_c(a[1], b[1], c3, c1, c2); mul_add_c(a[0], b[2], c3, c1, c2); r[2] = c3; c3 = 0; mul_add_c(a[0], b[3], c1, c2, c3); mul_add_c(a[1], b[2], c1, c2, c3); mul_add_c(a[2], b[1], c1, c2, c3); mul_add_c(a[3], b[0], c1, c2, c3); r[3] = c1; c1 = 0; mul_add_c(a[4], b[0], c2, c3, c1); mul_add_c(a[3], b[1], c2, c3, c1); mul_add_c(a[2], b[2], c2, c3, c1); mul_add_c(a[1], b[3], c2, c3, c1); mul_add_c(a[0], b[4], c2, c3, c1); r[4] = c2; c2 = 0; mul_add_c(a[0], b[5], c3, c1, c2); mul_add_c(a[1], b[4], c3, c1, c2); mul_add_c(a[2], b[3], c3, c1, c2); mul_add_c(a[3], b[2], c3, c1, c2); mul_add_c(a[4], b[1], c3, c1, c2); mul_add_c(a[5], b[0], c3, c1, c2); r[5] = c3; c3 = 0; mul_add_c(a[6], b[0], c1, c2, c3); mul_add_c(a[5], b[1], c1, c2, c3); mul_add_c(a[4], b[2], c1, c2, c3); mul_add_c(a[3], b[3], c1, c2, c3); mul_add_c(a[2], b[4], c1, c2, c3); mul_add_c(a[1], b[5], c1, c2, c3); mul_add_c(a[0], b[6], c1, c2, c3); r[6] = c1; c1 = 0; mul_add_c(a[0], b[7], c2, c3, c1); mul_add_c(a[1], b[6], c2, c3, c1); mul_add_c(a[2], b[5], c2, c3, c1); mul_add_c(a[3], b[4], c2, c3, c1); mul_add_c(a[4], b[3], c2, c3, c1); mul_add_c(a[5], b[2], c2, c3, c1); mul_add_c(a[6], b[1], c2, c3, c1); mul_add_c(a[7], b[0], c2, c3, c1); r[7] = c2; c2 = 0; mul_add_c(a[7], b[1], c3, c1, c2); mul_add_c(a[6], b[2], c3, c1, c2); mul_add_c(a[5], b[3], c3, c1, c2); mul_add_c(a[4], b[4], c3, c1, c2); mul_add_c(a[3], b[5], c3, c1, c2); mul_add_c(a[2], b[6], c3, c1, c2); mul_add_c(a[1], b[7], c3, c1, c2); r[8] = c3; c3 = 0; mul_add_c(a[2], b[7], c1, c2, c3); mul_add_c(a[3], b[6], c1, c2, c3); mul_add_c(a[4], b[5], c1, c2, c3); mul_add_c(a[5], b[4], c1, c2, c3); mul_add_c(a[6], b[3], c1, c2, c3); mul_add_c(a[7], b[2], c1, c2, c3); r[9] = c1; c1 = 0; mul_add_c(a[7], b[3], c2, c3, c1); mul_add_c(a[6], b[4], c2, c3, c1); mul_add_c(a[5], b[5], c2, c3, c1); mul_add_c(a[4], b[6], c2, c3, c1); mul_add_c(a[3], b[7], c2, c3, c1); r[10] = c2; c2 = 0; mul_add_c(a[4], b[7], c3, c1, c2); mul_add_c(a[5], b[6], c3, c1, c2); mul_add_c(a[6], b[5], c3, c1, c2); mul_add_c(a[7], b[4], c3, c1, c2); r[11] = c3; c3 = 0; mul_add_c(a[7], b[5], c1, c2, c3); mul_add_c(a[6], b[6], c1, c2, c3); mul_add_c(a[5], b[7], c1, c2, c3); r[12] = c1; c1 = 0; mul_add_c(a[6], b[7], c2, c3, c1); mul_add_c(a[7], b[6], c2, c3, c1); r[13] = c2; c2 = 0; mul_add_c(a[7], b[7], c3, c1, c2); r[14] = c3; r[15] = c1; } void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; mul_add_c(a[0], b[0], c1, c2, c3); r[0] = c1; c1 = 0; mul_add_c(a[0], b[1], c2, c3, c1); mul_add_c(a[1], b[0], c2, c3, c1); r[1] = c2; c2 = 0; mul_add_c(a[2], b[0], c3, c1, c2); mul_add_c(a[1], b[1], c3, c1, c2); mul_add_c(a[0], b[2], c3, c1, c2); r[2] = c3; c3 = 0; mul_add_c(a[0], b[3], c1, c2, c3); mul_add_c(a[1], b[2], c1, c2, c3); mul_add_c(a[2], b[1], c1, c2, c3); mul_add_c(a[3], b[0], c1, c2, c3); r[3] = c1; c1 = 0; mul_add_c(a[3], b[1], c2, c3, c1); mul_add_c(a[2], b[2], c2, c3, c1); mul_add_c(a[1], b[3], c2, c3, c1); r[4] = c2; c2 = 0; mul_add_c(a[2], b[3], c3, c1, c2); mul_add_c(a[3], b[2], c3, c1, c2); r[5] = c3; c3 = 0; mul_add_c(a[3], b[3], c1, c2, c3); r[6] = c1; r[7] = c2; } void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; sqr_add_c(a, 0, c1, c2, c3); r[0] = c1; c1 = 0; sqr_add_c2(a, 1, 0, c2, c3, c1); r[1] = c2; c2 = 0; sqr_add_c(a, 1, c3, c1, c2); sqr_add_c2(a, 2, 0, c3, c1, c2); r[2] = c3; c3 = 0; sqr_add_c2(a, 3, 0, c1, c2, c3); sqr_add_c2(a, 2, 1, c1, c2, c3); r[3] = c1; c1 = 0; sqr_add_c(a, 2, c2, c3, c1); sqr_add_c2(a, 3, 1, c2, c3, c1); sqr_add_c2(a, 4, 0, c2, c3, c1); r[4] = c2; c2 = 0; sqr_add_c2(a, 5, 0, c3, c1, c2); sqr_add_c2(a, 4, 1, c3, c1, c2); sqr_add_c2(a, 3, 2, c3, c1, c2); r[5] = c3; c3 = 0; sqr_add_c(a, 3, c1, c2, c3); sqr_add_c2(a, 4, 2, c1, c2, c3); sqr_add_c2(a, 5, 1, c1, c2, c3); sqr_add_c2(a, 6, 0, c1, c2, c3); r[6] = c1; c1 = 0; sqr_add_c2(a, 7, 0, c2, c3, c1); sqr_add_c2(a, 6, 1, c2, c3, c1); sqr_add_c2(a, 5, 2, c2, c3, c1); sqr_add_c2(a, 4, 3, c2, c3, c1); r[7] = c2; c2 = 0; sqr_add_c(a, 4, c3, c1, c2); sqr_add_c2(a, 5, 3, c3, c1, c2); sqr_add_c2(a, 6, 2, c3, c1, c2); sqr_add_c2(a, 7, 1, c3, c1, c2); r[8] = c3; c3 = 0; sqr_add_c2(a, 7, 2, c1, c2, c3); sqr_add_c2(a, 6, 3, c1, c2, c3); sqr_add_c2(a, 5, 4, c1, c2, c3); r[9] = c1; c1 = 0; sqr_add_c(a, 5, c2, c3, c1); sqr_add_c2(a, 6, 4, c2, c3, c1); sqr_add_c2(a, 7, 3, c2, c3, c1); r[10] = c2; c2 = 0; sqr_add_c2(a, 7, 4, c3, c1, c2); sqr_add_c2(a, 6, 5, c3, c1, c2); r[11] = c3; c3 = 0; sqr_add_c(a, 6, c1, c2, c3); sqr_add_c2(a, 7, 5, c1, c2, c3); r[12] = c1; c1 = 0; sqr_add_c2(a, 7, 6, c2, c3, c1); r[13] = c2; c2 = 0; sqr_add_c(a, 7, c3, c1, c2); r[14] = c3; r[15] = c1; } void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]) { BN_ULONG c1, c2, c3; c1 = 0; c2 = 0; c3 = 0; sqr_add_c(a, 0, c1, c2, c3); r[0] = c1; c1 = 0; sqr_add_c2(a, 1, 0, c2, c3, c1); r[1] = c2; c2 = 0; sqr_add_c(a, 1, c3, c1, c2); sqr_add_c2(a, 2, 0, c3, c1, c2); r[2] = c3; c3 = 0; sqr_add_c2(a, 3, 0, c1, c2, c3); sqr_add_c2(a, 2, 1, c1, c2, c3); r[3] = c1; c1 = 0; sqr_add_c(a, 2, c2, c3, c1); sqr_add_c2(a, 3, 1, c2, c3, c1); r[4] = c2; c2 = 0; sqr_add_c2(a, 3, 2, c3, c1, c2); r[5] = c3; c3 = 0; sqr_add_c(a, 3, c1, c2, c3); r[6] = c1; r[7] = c2; } #undef mul_add #undef mul #undef sqr #undef mul_add_c #undef mul_add_c2 #undef sqr_add_c #undef sqr_add_c2 #endif // !BN_MUL_ASM #if !defined(BN_ADD_ASM) BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, size_t n) { if (n == 0) { return 0; } BN_ULONG carry = 0; while (n & ~3) { r[0] = CRYPTO_addc_w(a[0], b[0], carry, &carry); r[1] = CRYPTO_addc_w(a[1], b[1], carry, &carry); r[2] = CRYPTO_addc_w(a[2], b[2], carry, &carry); r[3] = CRYPTO_addc_w(a[3], b[3], carry, &carry); a += 4; b += 4; r += 4; n -= 4; } while (n) { r[0] = CRYPTO_addc_w(a[0], b[0], carry, &carry); a++; b++; r++; n--; } return carry; } BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, size_t n) { if (n == 0) { return (BN_ULONG)0; } BN_ULONG borrow = 0; while (n & ~3) { r[0] = CRYPTO_subc_w(a[0], b[0], borrow, &borrow); r[1] = CRYPTO_subc_w(a[1], b[1], borrow, &borrow); r[2] = CRYPTO_subc_w(a[2], b[2], borrow, &borrow); r[3] = CRYPTO_subc_w(a[3], b[3], borrow, &borrow); a += 4; b += 4; r += 4; n -= 4; } while (n) { r[0] = CRYPTO_subc_w(a[0], b[0], borrow, &borrow); a++; b++; r++; n--; } return borrow; } #endif // !BN_ADD_ASM ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BN_INTERNAL_H #define OPENSSL_HEADER_BN_INTERNAL_H #include #if defined(OPENSSL_X86_64) && defined(_MSC_VER) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #pragma intrinsic(__umulh, _umul128) #endif #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif #if defined(OPENSSL_64_BIT) #if defined(BORINGSSL_HAS_UINT128) // MSVC doesn't support two-word integers on 64-bit. #define BN_ULLONG uint128_t #if defined(BORINGSSL_CAN_DIVIDE_UINT128) #define BN_CAN_DIVIDE_ULLONG #endif #endif #define BN_BITS2 64 #define BN_BITS2_LG 6 #define BN_BYTES 8 #define BN_BITS4 32 #define BN_MASK2 (0xffffffffffffffffUL) #define BN_MASK2l (0xffffffffUL) #define BN_MASK2h (0xffffffff00000000UL) #define BN_MASK2h1 (0xffffffff80000000UL) #define BN_MONT_CTX_N0_LIMBS 1 #define BN_DEC_CONV (10000000000000000000UL) #define BN_DEC_NUM 19 #define TOBN(hi, lo) ((BN_ULONG)(hi) << 32 | (lo)) #elif defined(OPENSSL_32_BIT) #define BN_ULLONG uint64_t #define BN_CAN_DIVIDE_ULLONG #define BN_BITS2 32 #define BN_BITS2_LG 5 #define BN_BYTES 4 #define BN_BITS4 16 #define BN_MASK2 (0xffffffffUL) #define BN_MASK2l (0xffffUL) #define BN_MASK2h1 (0xffff8000UL) #define BN_MASK2h (0xffff0000UL) // On some 32-bit platforms, Montgomery multiplication is done using 64-bit // arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| // needs to be two words long. Only certain 32-bit platforms actually make use // of n0[1] and shorter R value would suffice for the others. However, // currently only the assembly files know which is which. #define BN_MONT_CTX_N0_LIMBS 2 #define BN_DEC_CONV (1000000000UL) #define BN_DEC_NUM 9 #define TOBN(hi, lo) (lo), (hi) #else #error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" #endif #if !defined(OPENSSL_NO_ASM) && (defined(__GNUC__) || defined(__clang__)) #define BN_CAN_USE_INLINE_ASM #endif // MOD_EXP_CTIME_ALIGN is the alignment needed for |BN_mod_exp_mont_consttime|'s // tables. // // TODO(davidben): Historically, this alignment came from cache line // assumptions, which we've since removed. Is 64-byte alignment still necessary // or ideal? The true alignment requirement seems to now be 32 bytes, coming // from RSAZ's use of VMOVDQA to a YMM register. Non-x86_64 has even fewer // requirements. #define MOD_EXP_CTIME_ALIGN 64 // MOD_EXP_CTIME_STORAGE_LEN is the number of |BN_ULONG|s needed for the // |BN_mod_exp_mont_consttime| stack-allocated storage buffer. The buffer is // just the right size for the RSAZ and is about ~1KB larger than what's // necessary (4480 bytes) for 1024-bit inputs. #define MOD_EXP_CTIME_STORAGE_LEN \ (((320u * 3u) + (32u * 9u * 16u)) / sizeof(BN_ULONG)) #define STATIC_BIGNUM(x) \ { \ (BN_ULONG *)(x), sizeof(x) / sizeof(BN_ULONG), \ sizeof(x) / sizeof(BN_ULONG), 0, BN_FLG_STATIC_DATA \ } #if defined(BN_ULLONG) #define Lw(t) ((BN_ULONG)(t)) #define Hw(t) ((BN_ULONG)((t) >> BN_BITS2)) #endif // bn_minimal_width returns the minimal number of words needed to represent // |bn|. int bn_minimal_width(const BIGNUM *bn); // bn_set_minimal_width sets |bn->width| to |bn_minimal_width(bn)|. If |bn| is // zero, |bn->neg| is set to zero. void bn_set_minimal_width(BIGNUM *bn); // bn_wexpand ensures that |bn| has at least |words| works of space without // altering its value. It returns one on success or zero on allocation // failure. int bn_wexpand(BIGNUM *bn, size_t words); // bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather // than a number of words. int bn_expand(BIGNUM *bn, size_t bits); // bn_resize_words adjusts |bn->width| to be |words|. It returns one on success // and zero on allocation error or if |bn|'s value is too large. OPENSSL_EXPORT int bn_resize_words(BIGNUM *bn, size_t words); // bn_select_words sets |r| to |a| if |mask| is all ones or |b| if |mask| is // all zeros. void bn_select_words(BN_ULONG *r, BN_ULONG mask, const BN_ULONG *a, const BN_ULONG *b, size_t num); // bn_set_words sets |bn| to the value encoded in the |num| words in |words|, // least significant word first. int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); // bn_set_static_words acts like |bn_set_words|, but doesn't copy the data. A // flag is set on |bn| so that |BN_free| won't attempt to free the data. // // The |STATIC_BIGNUM| macro is probably a better solution for this outside of // the FIPS module. Inside of the FIPS module that macro generates rel.ro data, // which doesn't work with FIPS requirements. void bn_set_static_words(BIGNUM *bn, const BN_ULONG *words, size_t num); // bn_fits_in_words returns one if |bn| may be represented in |num| words, plus // a sign bit, and zero otherwise. int bn_fits_in_words(const BIGNUM *bn, size_t num); // bn_copy_words copies the value of |bn| to |out| and returns one if the value // is representable in |num| words. Otherwise, it returns zero. int bn_copy_words(BN_ULONG *out, size_t num, const BIGNUM *bn); // bn_assert_fits_in_bytes asserts that |bn| fits in |num| bytes. This is a // no-op in release builds, but triggers an assert in debug builds, and // declassifies all bytes which are therefore known to be zero in constant-time // validation. void bn_assert_fits_in_bytes(const BIGNUM *bn, size_t num); // bn_secret marks |bn|'s contents, but not its width or sign, as secret. See // |CONSTTIME_SECRET| for details. inline void bn_secret(BIGNUM *bn) { CONSTTIME_SECRET(bn->d, bn->width * sizeof(BN_ULONG)); } // bn_declassify marks |bn|'s value as public. See |CONSTTIME_DECLASSIFY| for // details. inline void bn_declassify(BIGNUM *bn) { CONSTTIME_DECLASSIFY(bn->d, bn->width * sizeof(BN_ULONG)); } // bn_mul_add_words multiples |ap| by |w|, adds the result to |rp|, and places // the result in |rp|. |ap| and |rp| must both be |num| words long. It returns // the carry word of the operation. |ap| and |rp| may be equal but otherwise may // not alias. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w); // bn_mul_words multiples |ap| by |w| and places the result in |rp|. |ap| and // |rp| must both be |num| words long. It returns the carry word of the // operation. |ap| and |rp| may be equal but otherwise may not alias. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w); // bn_sqr_words sets |rp[2*i]| and |rp[2*i+1]| to |ap[i]|'s square, for all |i| // up to |num|. |ap| is an array of |num| words and |rp| an array of |2*num| // words. |ap| and |rp| may not alias. // // This gives the contribution of the |ap[i]*ap[i]| terms when squaring |ap|. void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num); // bn_add_words adds |ap| to |bp| and places the result in |rp|, each of which // are |num| words long. It returns the carry bit, which is one if the operation // overflowed and zero otherwise. Any pair of |ap|, |bp|, and |rp| may be equal // to each other but otherwise may not alias. BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, size_t num); // bn_sub_words subtracts |bp| from |ap| and places the result in |rp|. It // returns the borrow bit, which is one if the computation underflowed and zero // otherwise. Any pair of |ap|, |bp|, and |rp| may be equal to each other but // otherwise may not alias. BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, size_t num); // bn_mul_comba4 sets |r| to the product of |a| and |b|. void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]); // bn_mul_comba8 sets |r| to the product of |a| and |b|. void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]); // bn_sqr_comba8 sets |r| to |a|^2. void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]); // bn_sqr_comba4 sets |r| to |a|^2. void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]); // bn_less_than_words returns one if |a| < |b| and zero otherwise, where |a| // and |b| both are |len| words long. It runs in constant time. int bn_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len); // bn_in_range_words returns one if |min_inclusive| <= |a| < |max_exclusive|, // where |a| and |max_exclusive| both are |len| words long. |a| and // |max_exclusive| are treated as secret. int bn_in_range_words(const BN_ULONG *a, BN_ULONG min_inclusive, const BN_ULONG *max_exclusive, size_t len); // bn_rand_range_words sets |out| to a uniformly distributed random number from // |min_inclusive| to |max_exclusive|. Both |out| and |max_exclusive| are |len| // words long. // // This function runs in time independent of the result, but |min_inclusive| and // |max_exclusive| are public data. (Information about the range is unavoidably // leaked by how many iterations it took to select a number.) int bn_rand_range_words(BN_ULONG *out, BN_ULONG min_inclusive, const BN_ULONG *max_exclusive, size_t len, const uint8_t additional_data[32]); // bn_range_secret_range behaves like |BN_rand_range_ex|, but treats // |max_exclusive| as secret. Because of this constraint, the distribution of // values returned is more complex. // // Rather than repeatedly generating values until one is in range, which would // leak information, it generates one value. If the value is in range, it sets // |*out_is_uniform| to one. Otherwise, it sets |*out_is_uniform| to zero, // fixing up the value to force it in range. // // The subset of calls to |bn_rand_secret_range| which set |*out_is_uniform| to // one are uniformly distributed in the target range. Calls overall are not. // This function is intended for use in situations where the extra values are // still usable and where the number of iterations needed to reach the target // number of uniform outputs may be blinded for negligible probabilities of // timing leaks. // // Although this function treats |max_exclusive| as secret, it treats the number // of bits in |max_exclusive| as public. int bn_rand_secret_range(BIGNUM *r, int *out_is_uniform, BN_ULONG min_inclusive, const BIGNUM *max_exclusive); // BN_MONTGOMERY_MAX_WORDS is the maximum numer of words allowed in a |BIGNUM| // used with Montgomery reduction. Ideally this limit would be applied to all // |BIGNUM|s, in |bn_wexpand|, but the exactfloat library needs to create 8 MiB // values for other operations. #define BN_MONTGOMERY_MAX_WORDS (8 * 1024 / sizeof(BN_ULONG)) #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) #define OPENSSL_BN_ASM_MONT // bn_mul_mont writes |ap| * |bp| mod |np| to |rp|, each |num| words // long. Inputs and outputs are in Montgomery form. |n0| is a pointer to the // corresponding field in |BN_MONT_CTX|. It returns one if |bn_mul_mont| handles // inputs of this size and zero otherwise. // // If at least one of |ap| or |bp| is fully reduced, |rp| will be fully reduced. // If neither is fully-reduced, the output may not be either. // // This function allocates |num| words on the stack, so |num| should be at most // |BN_MONTGOMERY_MAX_WORDS|. // // TODO(davidben): The x86_64 implementation expects a 32-bit input and masks // off upper bits. The aarch64 implementation expects a 64-bit input and does // not. |size_t| is the safer option but not strictly correct for x86_64. But // the |BN_MONTGOMERY_MAX_WORDS| bound makes this moot. // // See also discussion in |ToWord| in abi_test.h for notes on smaller-than-word // inputs. int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); #if defined(OPENSSL_X86_64) inline int bn_mulx_adx_capable(void) { // MULX is in BMI2. return CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable(); } int bn_mul_mont_nohw(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); inline int bn_mul4x_mont_capable(size_t num) { return num >= 8 && (num & 3) == 0; } int bn_mul4x_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); inline int bn_mulx4x_mont_capable(size_t num) { return bn_mul4x_mont_capable(num) && bn_mulx_adx_capable(); } int bn_mulx4x_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); inline int bn_sqr8x_mont_capable(size_t num) { return num >= 8 && (num & 7) == 0; } int bn_sqr8x_mont(BN_ULONG *rp, const BN_ULONG *ap, BN_ULONG mulx_adx_capable, const BN_ULONG *np, const BN_ULONG *n0, size_t num); #elif defined(OPENSSL_ARM) inline int bn_mul8x_mont_neon_capable(size_t num) { return (num & 7) == 0 && CRYPTO_is_NEON_capable(); } int bn_mul8x_mont_neon(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); int bn_mul_mont_nohw(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num); #endif #endif // OPENSSL_BN_ASM_MONT #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) #define OPENSSL_BN_ASM_MONT5 // The following functions implement |bn_mul_mont_gather5|. See // |bn_mul_mont_gather5| for details. inline int bn_mul4x_mont_gather5_capable(int num) { return (num & 7) == 0; } void bn_mul4x_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); inline int bn_mulx4x_mont_gather5_capable(int num) { return bn_mul4x_mont_gather5_capable(num) && CRYPTO_is_ADX_capable() && CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable(); } void bn_mulx4x_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_mul_mont_gather5_nohw(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); // bn_scatter5 stores |inp| to index |power| of |table|. |inp| and each entry of // |table| are |num| words long. |power| must be less than 32 and is treated as // public. |table| must be 32*|num| words long. |table| must be aligned to at // least 16 bytes. void bn_scatter5(const BN_ULONG *inp, size_t num, BN_ULONG *table, size_t power); // bn_gather5 loads index |power| of |table| and stores it in |out|. |out| and // each entry of |table| are |num| words long. |power| must be less than 32 and // is treated as secret. |table| must be aligned to at least 16 bytes. void bn_gather5(BN_ULONG *out, size_t num, const BN_ULONG *table, size_t power); // The following functions implement |bn_power5|. See |bn_power5| for details. void bn_power5_nohw(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); inline int bn_power5_capable(int num) { return (num & 7) == 0; } inline int bn_powerx5_capable(int num) { return bn_power5_capable(num) && CRYPTO_is_ADX_capable() && CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable(); } void bn_powerx5(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); #endif // !OPENSSL_NO_ASM && OPENSSL_X86_64 uint64_t bn_mont_n0(const BIGNUM *n); // bn_mont_ctx_set_RR_consttime initializes |mont->RR|. It returns one on // success and zero on error. |mont->N| and |mont->n0| must have been // initialized already. The bit width of |mont->N| is assumed public, but // |mont->N| is otherwise treated as secret. int bn_mont_ctx_set_RR_consttime(BN_MONT_CTX *mont, BN_CTX *ctx); #if defined(_MSC_VER) #if defined(OPENSSL_X86_64) #define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) #elif defined(OPENSSL_AARCH64) #define BN_UMULT_LOHI(low, high, a, b) \ do { \ const BN_ULONG _a = (a); \ const BN_ULONG _b = (b); \ (low) = _a * _b; \ (high) = __umulh(_a, _b); \ } while (0) #endif #endif // _MSC_VER #if !defined(BN_ULLONG) && !defined(BN_UMULT_LOHI) #error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform." #endif // bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or // -2 on error. int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // bn_is_bit_set_words returns one if bit |bit| is set in |a| and zero // otherwise. int bn_is_bit_set_words(const BN_ULONG *a, size_t num, size_t bit); // bn_one_to_montgomery sets |r| to one in Montgomery form. It returns one on // success and zero on error. This function treats the bit width of the modulus // as public. int bn_one_to_montgomery(BIGNUM *r, const BN_MONT_CTX *mont, BN_CTX *ctx); // bn_less_than_montgomery_R returns one if |bn| is less than the Montgomery R // value for |mont| and zero otherwise. int bn_less_than_montgomery_R(const BIGNUM *bn, const BN_MONT_CTX *mont); // bn_mod_u16_consttime returns |bn| mod |d|, ignoring |bn|'s sign bit. It runs // in time independent of the value of |bn|, but it treats |d| as public. OPENSSL_EXPORT uint16_t bn_mod_u16_consttime(const BIGNUM *bn, uint16_t d); // bn_odd_number_is_obviously_composite returns one if |bn| is divisible by one // of the first several odd primes and zero otherwise. int bn_odd_number_is_obviously_composite(const BIGNUM *bn); // A BN_MILLER_RABIN stores state common to each Miller-Rabin iteration. It is // initialized within an existing |BN_CTX| scope and may not be used after // that scope is released with |BN_CTX_end|. Field names match those in FIPS // 186-4, section C.3.1. typedef struct { // w1 is w-1. BIGNUM *w1; // m is (w-1)/2^a. BIGNUM *m; // one_mont is 1 (mod w) in Montgomery form. BIGNUM *one_mont; // w1_mont is w-1 (mod w) in Montgomery form. BIGNUM *w1_mont; // w_bits is BN_num_bits(w). int w_bits; // a is the largest integer such that 2^a divides w-1. int a; } BN_MILLER_RABIN; // bn_miller_rabin_init initializes |miller_rabin| for testing if |mont->N| is // prime. It returns one on success and zero on error. OPENSSL_EXPORT int bn_miller_rabin_init(BN_MILLER_RABIN *miller_rabin, const BN_MONT_CTX *mont, BN_CTX *ctx); // bn_miller_rabin_iteration performs one Miller-Rabin iteration, checking if // |b| is a composite witness for |mont->N|. |miller_rabin| must have been // initialized with |bn_miller_rabin_setup|. On success, it returns one and sets // |*out_is_possibly_prime| to one if |mont->N| may still be prime or zero if // |b| shows it is composite. On allocation or internal failure, it returns // zero. OPENSSL_EXPORT int bn_miller_rabin_iteration( const BN_MILLER_RABIN *miller_rabin, int *out_is_possibly_prime, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx); // bn_rshift1_words sets |r| to |a| >> 1, where both arrays are |num| bits wide. void bn_rshift1_words(BN_ULONG *r, const BN_ULONG *a, size_t num); // bn_rshift_words sets |r| to |a| >> |shift|, where both arrays are |num| bits // wide. void bn_rshift_words(BN_ULONG *r, const BN_ULONG *a, unsigned shift, size_t num); // bn_rshift_secret_shift behaves like |BN_rshift| but runs in time independent // of both |a| and |n|. OPENSSL_EXPORT int bn_rshift_secret_shift(BIGNUM *r, const BIGNUM *a, unsigned n, BN_CTX *ctx); // bn_reduce_once sets |r| to |a| mod |m| where 0 <= |a| < 2*|m|. It returns // zero if |a| < |m| and a mask of all ones if |a| >= |m|. Each array is |num| // words long, but |a| has an additional word specified by |carry|. |carry| must // be zero or one, as implied by the bounds on |a|. // // |r|, |a|, and |m| may not alias. Use |bn_reduce_once_in_place| if |r| and |a| // must alias. BN_ULONG bn_reduce_once(BN_ULONG *r, const BN_ULONG *a, BN_ULONG carry, const BN_ULONG *m, size_t num); // bn_reduce_once_in_place behaves like |bn_reduce_once| but acts in-place on // |r|, using |tmp| as scratch space. |r|, |tmp|, and |m| may not alias. BN_ULONG bn_reduce_once_in_place(BN_ULONG *r, BN_ULONG carry, const BN_ULONG *m, BN_ULONG *tmp, size_t num); // Constant-time non-modular arithmetic. // // The following functions implement non-modular arithmetic in constant-time // and pessimally set |r->width| to the largest possible word size. // // Note this means that, e.g., repeatedly multiplying by one will cause widths // to increase without bound. The corresponding public API functions minimize // their outputs to avoid regressing calculator consumers. // bn_uadd_consttime behaves like |BN_uadd|, but it pessimally sets // |r->width| = |a->width| + |b->width| + 1. int bn_uadd_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // bn_usub_consttime behaves like |BN_usub|, but it pessimally sets // |r->width| = |a->width|. int bn_usub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // bn_abs_sub_consttime sets |r| to the absolute value of |a| - |b|, treating // both inputs as secret. It returns one on success and zero on error. OPENSSL_EXPORT int bn_abs_sub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // bn_mul_consttime behaves like |BN_mul|, but it rejects negative inputs and // pessimally sets |r->width| to |a->width| + |b->width|, to avoid leaking // information about |a| and |b|. int bn_mul_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // bn_sqrt_consttime behaves like |BN_sqrt|, but it pessimally sets |r->width| // to 2*|a->width|, to avoid leaking information about |a| and |b|. int bn_sqr_consttime(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); // bn_div_consttime behaves like |BN_div|, but it rejects negative inputs and // treats both inputs, including their magnitudes, as secret. It is, as a // result, much slower than |BN_div| and should only be used for rare operations // where Montgomery reduction is not available. |divisor_min_bits| is a // public lower bound for |BN_num_bits(divisor)|. When |divisor|'s bit width is // public, this can speed up the operation. // // Note that |quotient->width| will be set pessimally to |numerator->width|. OPENSSL_EXPORT int bn_div_consttime(BIGNUM *quotient, BIGNUM *remainder, const BIGNUM *numerator, const BIGNUM *divisor, unsigned divisor_min_bits, BN_CTX *ctx); // bn_is_relatively_prime checks whether GCD(|x|, |y|) is one. On success, it // returns one and sets |*out_relatively_prime| to one if the GCD was one and // zero otherwise. On error, it returns zero. OPENSSL_EXPORT int bn_is_relatively_prime(int *out_relatively_prime, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx); // bn_lcm_consttime sets |r| to LCM(|a|, |b|). It returns one and success and // zero on error. |a| and |b| are both treated as secret. OPENSSL_EXPORT int bn_lcm_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // bn_mont_ctx_init zero-initialies |mont|. void bn_mont_ctx_init(BN_MONT_CTX *mont); // bn_mont_ctx_cleanup releases memory associated with |mont|, without freeing // |mont| itself. void bn_mont_ctx_cleanup(BN_MONT_CTX *mont); // Constant-time modular arithmetic. // // The following functions implement basic constant-time modular arithmetic. // bn_mod_add_words sets |r| to |a| + |b| (mod |m|), using |tmp| as scratch // space. Each array is |num| words long. |a| and |b| must be < |m|. Any pair of // |r|, |a|, and |b| may alias. void bn_mod_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, const BN_ULONG *m, BN_ULONG *tmp, size_t num); // bn_mod_add_consttime acts like |BN_mod_add_quick| but takes a |BN_CTX|. int bn_mod_add_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); // bn_mod_sub_words sets |r| to |a| - |b| (mod |m|), using |tmp| as scratch // space. Each array is |num| words long. |a| and |b| must be < |m|. Any pair of // |r|, |a|, and |b| may alias. void bn_mod_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, const BN_ULONG *m, BN_ULONG *tmp, size_t num); // bn_mod_sub_consttime acts like |BN_mod_sub_quick| but takes a |BN_CTX|. int bn_mod_sub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); // bn_mod_lshift1_consttime acts like |BN_mod_lshift1_quick| but takes a // |BN_CTX|. int bn_mod_lshift1_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); // bn_mod_lshift_consttime acts like |BN_mod_lshift_quick| but takes a |BN_CTX|. int bn_mod_lshift_consttime(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx); // bn_mod_inverse_consttime sets |r| to |a|^-1, mod |n|. |a| must be non- // negative and less than |n|. It returns one on success and zero on error. On // failure, if the failure was caused by |a| having no inverse mod |n| then // |*out_no_inverse| will be set to one; otherwise it will be set to zero. // // This function treats both |a| and |n| as secret, provided they are both non- // zero and the inverse exists. It should only be used for even moduli where // none of the less general implementations are applicable. OPENSSL_EXPORT int bn_mod_inverse_consttime(BIGNUM *r, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); // bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|, // computed with Fermat's Little Theorem. It returns one on success and zero on // error. If |mont_p| is NULL, one will be computed temporarily. int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p); // bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses // |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of // protecting the exponent. int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx, const BN_MONT_CTX *mont_p); // BN_MONT_CTX_set_locked takes |lock| and checks whether |*pmont| is NULL. If // so, it creates a new |BN_MONT_CTX| and sets the modulus for it to |mod|. It // then stores it as |*pmont|. It returns one on success and zero on error. Note // this function assumes |mod| is public. // // If |*pmont| is already non-NULL then it does nothing and returns one. int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, const BIGNUM *mod, BN_CTX *bn_ctx); // Low-level operations for small numbers. // // The following functions implement algorithms suitable for use with scalars // and field elements in elliptic curves. They rely on the number being small // both to stack-allocate various temporaries and because they do not implement // optimizations useful for the larger values used in RSA. // BN_SMALL_MAX_WORDS is the largest size input these functions handle. This // limit allows temporaries to be more easily stack-allocated. This limit is set // to accommodate P-521. #if defined(OPENSSL_32_BIT) #define BN_SMALL_MAX_WORDS 17 #else #define BN_SMALL_MAX_WORDS 9 #endif // bn_mul_small sets |r| to |a|*|b|. |num_r| must be |num_a| + |num_b|. |r| may // not alias with |a| or |b|. void bn_mul_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, const BN_ULONG *b, size_t num_b); // bn_sqr_small sets |r| to |a|^2. |num_a| must be at most |BN_SMALL_MAX_WORDS|. // |num_r| must be |num_a|*2. |r| and |a| may not alias. void bn_sqr_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a); // In the following functions, the modulus must be at most |BN_SMALL_MAX_WORDS| // words long. // bn_to_montgomery_small sets |r| to |a| translated to the Montgomery domain. // |r| and |a| are |num| words long, which must be |mont->N.width|. |a| must be // fully reduced and may alias |r|. void bn_to_montgomery_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_MONT_CTX *mont); // bn_from_montgomery_small sets |r| to |a| translated out of the Montgomery // domain. |r| and |a| are |num_r| and |num_a| words long, respectively. |num_r| // must be |mont->N.width|. |a| must be at most |mont->N|^2 and may alias |r|. // // Unlike most of these functions, only |num_r| is bounded by // |BN_SMALL_MAX_WORDS|. |num_a| may exceed it, but must be at most 2 * |num_r|. void bn_from_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, const BN_MONT_CTX *mont); // bn_mod_mul_montgomery_small sets |r| to |a| * |b| mod |mont->N|. Both inputs // and outputs are in the Montgomery domain. Each array is |num| words long, // which must be |mont->N.width|. Any two of |r|, |a|, and |b| may alias. |a| // and |b| must be reduced on input. void bn_mod_mul_montgomery_small(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, size_t num, const BN_MONT_CTX *mont); // bn_mod_exp_mont_small sets |r| to |a|^|p| mod |mont->N|. It returns one on // success and zero on programmer or internal error. Both inputs and outputs are // in the Montgomery domain. |r| and |a| are |num| words long, which must be // |mont->N.width| and at most |BN_SMALL_MAX_WORDS|. |num_p|, measured in bits, // must fit in |size_t|. |a| must be fully-reduced. This function runs in time // independent of |a|, but |p| and |mont->N| are public values. |a| must be // fully-reduced and may alias with |r|. // // Note this function differs from |BN_mod_exp_mont| which uses Montgomery // reduction but takes input and output outside the Montgomery domain. Combine // this function with |bn_from_montgomery_small| and |bn_to_montgomery_small| // if necessary. void bn_mod_exp_mont_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_ULONG *p, size_t num_p, const BN_MONT_CTX *mont); // bn_mod_inverse0_prime_mont_small sets |r| to |a|^-1 mod |mont->N|. If |a| is // zero, |r| is set to zero. |mont->N| must be a prime. |r| and |a| are |num| // words long, which must be |mont->N.width| and at most |BN_SMALL_MAX_WORDS|. // |a| must be fully-reduced and may alias |r|. This function runs in time // independent of |a|, but |mont->N| is a public value. void bn_mod_inverse0_prime_mont_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_MONT_CTX *mont); // Word-based byte conversion functions. // bn_big_endian_to_words interprets |in_len| bytes from |in| as a big-endian, // unsigned integer and writes the result to |out_len| words in |out|. |out_len| // must be large enough to represent any |in_len|-byte value. That is, |in_len| // must be at most |BN_BYTES * out_len|. void bn_big_endian_to_words(BN_ULONG *out, size_t out_len, const uint8_t *in, size_t in_len); // bn_words_to_big_endian represents |in_len| words from |in| as a big-endian, // unsigned integer in |out_len| bytes. It writes the result to |out|. |out_len| // must be large enough to represent |in| without truncation. // // Note |out_len| may be less than |BN_BYTES * in_len| if |in| is known to have // leading zeros. void bn_words_to_big_endian(uint8_t *out, size_t out_len, const BN_ULONG *in, size_t in_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BN_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/jacobi.cc.inc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" // least significant word #define BN_lsw(n) (((n)->width == 0) ? (BN_ULONG) 0 : (n)->d[0]) int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { // In 'tab', only odd-indexed entries are relevant: // For any odd BIGNUM n, // tab[BN_lsw(n) & 7] // is $(-1)^{(n^2-1)/8}$ (using TeX notation). // Note that the sign of n does not matter. static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1}; // The Jacobi symbol is only defined for odd modulus. if (!BN_is_odd(b)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return -2; } // Require b be positive. if (BN_is_negative(b)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return -2; } int ret = -2; BN_CTX_start(ctx); BIGNUM *A = BN_CTX_get(ctx); BIGNUM *B = BN_CTX_get(ctx); if (B == NULL) { goto end; } if (!BN_copy(A, a) || !BN_copy(B, b)) { goto end; } // Adapted from logic to compute the Kronecker symbol, originally implemented // according to Henri Cohen, "A Course in Computational Algebraic Number // Theory" (algorithm 1.4.10). ret = 1; while (1) { // Cohen's step 3: // B is positive and odd if (BN_is_zero(A)) { ret = BN_is_one(B) ? ret : 0; goto end; } // now A is non-zero int i = 0; while (!BN_is_bit_set(A, i)) { i++; } if (!BN_rshift(A, A, i)) { ret = -2; goto end; } if (i & 1) { // i is odd // multiply 'ret' by $(-1)^{(B^2-1)/8}$ ret = ret * tab[BN_lsw(B) & 7]; } // Cohen's step 4: // multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2) { ret = -ret; } // (A, B) := (B mod |A|, |A|) if (!BN_nnmod(B, B, A, ctx)) { ret = -2; goto end; } BIGNUM *tmp = A; A = B; B = tmp; tmp->neg = 0; } end: BN_CTX_end(ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/montgomery.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../../internal.h" #include "internal.h" void bn_mont_ctx_init(BN_MONT_CTX *mont) { OPENSSL_memset(mont, 0, sizeof(BN_MONT_CTX)); BN_init(&mont->RR); BN_init(&mont->N); } void bn_mont_ctx_cleanup(BN_MONT_CTX *mont) { BN_free(&mont->RR); BN_free(&mont->N); } BN_MONT_CTX *BN_MONT_CTX_new(void) { BN_MONT_CTX *ret = reinterpret_cast(OPENSSL_malloc(sizeof(BN_MONT_CTX))); if (ret == NULL) { return NULL; } bn_mont_ctx_init(ret); return ret; } void BN_MONT_CTX_free(BN_MONT_CTX *mont) { if (mont == nullptr) { return; } bn_mont_ctx_cleanup(mont); OPENSSL_free(mont); } BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, const BN_MONT_CTX *from) { if (to == from) { return to; } if (!BN_copy(&to->RR, &from->RR) || !BN_copy(&to->N, &from->N)) { return NULL; } to->n0[0] = from->n0[0]; to->n0[1] = from->n0[1]; return to; } static int bn_mont_ctx_set_N_and_n0(BN_MONT_CTX *mont, const BIGNUM *mod) { if (BN_is_zero(mod)) { OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); return 0; } if (!BN_is_odd(mod)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return 0; } if (BN_is_negative(mod)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } if (!bn_fits_in_words(mod, BN_MONTGOMERY_MAX_WORDS)) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } // Save the modulus. if (!BN_copy(&mont->N, mod)) { OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); return 0; } // |mont->N| is always stored minimally. Computing RR efficiently leaks the // size of the modulus. While the modulus may be private in RSA (one of the // primes), their sizes are public, so this is fine. bn_set_minimal_width(&mont->N); // Find n0 such that n0 * N == -1 (mod r). // // Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the // others, we could use a shorter R value and use faster |BN_ULONG|-based // math instead of |uint64_t|-based math, which would be double-precision. // However, currently only the assembler files know which is which. static_assert(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, "BN_MONT_CTX_N0_LIMBS value is invalid"); static_assert(sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS == sizeof(uint64_t), "uint64_t is insufficient precision for n0"); uint64_t n0 = bn_mont_n0(&mont->N); mont->n0[0] = (BN_ULONG)n0; #if BN_MONT_CTX_N0_LIMBS == 2 mont->n0[1] = (BN_ULONG)(n0 >> BN_BITS2); #else mont->n0[1] = 0; #endif return 1; } int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { if (!bn_mont_ctx_set_N_and_n0(mont, mod)) { return 0; } BN_CTX *new_ctx = NULL; if (ctx == NULL) { new_ctx = BN_CTX_new(); if (new_ctx == NULL) { return 0; } ctx = new_ctx; } // Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS2 such that R // > mod. Even though the assembly on some 32-bit platforms works with 64-bit // values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * // BN_BITS2|, is correct because R**2 will still be a multiple of the latter // as |BN_MONT_CTX_N0_LIMBS| is either one or two. unsigned lgBigR = mont->N.width * BN_BITS2; BN_zero(&mont->RR); int ok = BN_set_bit(&mont->RR, lgBigR * 2) && BN_mod(&mont->RR, &mont->RR, &mont->N, ctx) && bn_resize_words(&mont->RR, mont->N.width); BN_CTX_free(new_ctx); return ok; } BN_MONT_CTX *BN_MONT_CTX_new_for_modulus(const BIGNUM *mod, BN_CTX *ctx) { BN_MONT_CTX *mont = BN_MONT_CTX_new(); if (mont == NULL || !BN_MONT_CTX_set(mont, mod, ctx)) { BN_MONT_CTX_free(mont); return NULL; } return mont; } BN_MONT_CTX *BN_MONT_CTX_new_consttime(const BIGNUM *mod, BN_CTX *ctx) { BN_MONT_CTX *mont = BN_MONT_CTX_new(); if (mont == NULL || !bn_mont_ctx_set_N_and_n0(mont, mod) || !bn_mont_ctx_set_RR_consttime(mont, ctx)) { BN_MONT_CTX_free(mont); return NULL; } return mont; } int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, const BIGNUM *mod, BN_CTX *bn_ctx) { CRYPTO_MUTEX_lock_read(lock); BN_MONT_CTX *ctx = *pmont; CRYPTO_MUTEX_unlock_read(lock); if (ctx) { return 1; } CRYPTO_MUTEX_lock_write(lock); if (*pmont == NULL) { *pmont = BN_MONT_CTX_new_for_modulus(mod, bn_ctx); } const int ok = *pmont != NULL; CRYPTO_MUTEX_unlock_write(lock); return ok; } int BN_to_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx) { return BN_mod_mul_montgomery(ret, a, &mont->RR, mont, ctx); } static int bn_from_montgomery_in_place(BN_ULONG *r, size_t num_r, BN_ULONG *a, size_t num_a, const BN_MONT_CTX *mont) { const BN_ULONG *n = mont->N.d; size_t num_n = mont->N.width; if (num_r != num_n || num_a != 2 * num_n) { OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Add multiples of |n| to |r| until R = 2^(nl * BN_BITS2) divides it. On // input, we had |r| < |n| * R, so now |r| < 2 * |n| * R. Note that |r| // includes |carry| which is stored separately. BN_ULONG n0 = mont->n0[0]; BN_ULONG carry = 0; for (size_t i = 0; i < num_n; i++) { BN_ULONG v = bn_mul_add_words(a + i, n, num_n, a[i] * n0); v += carry + a[i + num_n]; carry |= (v != a[i + num_n]); carry &= (v <= a[i + num_n]); a[i + num_n] = v; } // Shift |num_n| words to divide by R. We have |a| < 2 * |n|. Note that |a| // includes |carry| which is stored separately. a += num_n; // |a| thus requires at most one additional subtraction |n| to be reduced. bn_reduce_once(r, a, carry, n, num_n); return 1; } static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, const BN_MONT_CTX *mont) { if (r->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } const BIGNUM *n = &mont->N; if (n->width == 0) { ret->width = 0; return 1; } int max = 2 * n->width; // carry is stored separately if (!bn_resize_words(r, max) || !bn_wexpand(ret, n->width)) { return 0; } ret->width = n->width; ret->neg = 0; return bn_from_montgomery_in_place(ret->d, ret->width, r->d, r->width, mont); } int BN_from_montgomery(BIGNUM *r, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx) { int ret = 0; BIGNUM *t; BN_CTX_start(ctx); t = BN_CTX_get(ctx); if (t == NULL || !BN_copy(t, a)) { goto err; } ret = BN_from_montgomery_word(r, t, mont); err: BN_CTX_end(ctx); return ret; } int bn_one_to_montgomery(BIGNUM *r, const BN_MONT_CTX *mont, BN_CTX *ctx) { // If the high bit of |n| is set, R = 2^(width*BN_BITS2) < 2 * |n|, so we // compute R - |n| rather than perform Montgomery reduction. const BIGNUM *n = &mont->N; if (n->width > 0 && (n->d[n->width - 1] >> (BN_BITS2 - 1)) != 0) { if (!bn_wexpand(r, n->width)) { return 0; } r->d[0] = 0 - n->d[0]; for (int i = 1; i < n->width; i++) { r->d[i] = ~n->d[i]; } r->width = n->width; r->neg = 0; return 1; } return BN_from_montgomery(r, &mont->RR, mont, ctx); } static int bn_mod_mul_montgomery_fallback(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { int ret = 0; BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } if (a == b) { if (!bn_sqr_consttime(tmp, a, ctx)) { goto err; } } else { if (!bn_mul_consttime(tmp, a, b, ctx)) { goto err; } } // reduce from aRR to aR if (!BN_from_montgomery_word(r, tmp, mont)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { if (a->neg || b->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } #if defined(OPENSSL_BN_ASM_MONT) // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. int num = mont->N.width; if (num >= (128 / BN_BITS2) && a->width == num && b->width == num) { if (!bn_wexpand(r, num)) { return 0; } // This bound is implied by |bn_mont_ctx_set_N_and_n0|. |bn_mul_mont| // allocates |num| words on the stack, so |num| cannot be too large. assert((size_t)num <= BN_MONTGOMERY_MAX_WORDS); if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { // The check above ensures this won't happen. assert(0); OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); return 0; } r->neg = 0; r->width = num; return 1; } #endif return bn_mod_mul_montgomery_fallback(r, a, b, mont, ctx); } int bn_less_than_montgomery_R(const BIGNUM *bn, const BN_MONT_CTX *mont) { return !BN_is_negative(bn) && bn_fits_in_words(bn, mont->N.width); } void bn_to_montgomery_small(BN_ULONG *r, const BN_ULONG *a, size_t num, const BN_MONT_CTX *mont) { bn_mod_mul_montgomery_small(r, a, mont->RR.d, num, mont); } void bn_from_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, const BN_MONT_CTX *mont) { if (num_r != (size_t)mont->N.width || num_r > BN_SMALL_MAX_WORDS || num_a > 2 * num_r) { abort(); } BN_ULONG tmp[BN_SMALL_MAX_WORDS * 2] = {0}; OPENSSL_memcpy(tmp, a, num_a * sizeof(BN_ULONG)); if (!bn_from_montgomery_in_place(r, num_r, tmp, 2 * num_r, mont)) { abort(); } OPENSSL_cleanse(tmp, 2 * num_r * sizeof(BN_ULONG)); } void bn_mod_mul_montgomery_small(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, size_t num, const BN_MONT_CTX *mont) { if (num != (size_t)mont->N.width || num > BN_SMALL_MAX_WORDS) { abort(); } #if defined(OPENSSL_BN_ASM_MONT) // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. if (num >= (128 / BN_BITS2)) { if (!bn_mul_mont(r, a, b, mont->N.d, mont->n0, num)) { abort(); // The check above ensures this won't happen. } return; } #endif // Compute the product. BN_ULONG tmp[2 * BN_SMALL_MAX_WORDS]; if (a == b) { bn_sqr_small(tmp, 2 * num, a, num); } else { bn_mul_small(tmp, 2 * num, a, num, b, num); } // Reduce. if (!bn_from_montgomery_in_place(r, num, tmp, 2 * num, mont)) { abort(); } OPENSSL_cleanse(tmp, 2 * num * sizeof(BN_ULONG)); } #if defined(OPENSSL_BN_ASM_MONT) && defined(OPENSSL_X86_64) int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num) { if (ap == bp && bn_sqr8x_mont_capable(num)) { return bn_sqr8x_mont(rp, ap, bn_mulx_adx_capable(), np, n0, num); } if (bn_mulx4x_mont_capable(num)) { return bn_mulx4x_mont(rp, ap, bp, np, n0, num); } if (bn_mul4x_mont_capable(num)) { return bn_mul4x_mont(rp, ap, bp, np, n0, num); } return bn_mul_mont_nohw(rp, ap, bp, np, n0, num); } #endif #if defined(OPENSSL_BN_ASM_MONT) && defined(OPENSSL_ARM) int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np, const BN_ULONG *n0, size_t num) { if (bn_mul8x_mont_neon_capable(num)) { return bn_mul8x_mont_neon(rp, ap, bp, np, n0, num); } return bn_mul_mont_nohw(rp, ap, bp, np, n0, num); } #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/montgomery_inv.cc.inc ================================================ /* Copyright 2016 Brian Smith. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "internal.h" #include "../../internal.h" static uint64_t bn_neg_inv_mod_r_u64(uint64_t n); static_assert(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, "BN_MONT_CTX_N0_LIMBS value is invalid"); static_assert(sizeof(BN_ULONG) * BN_MONT_CTX_N0_LIMBS == sizeof(uint64_t), "uint64_t is insufficient precision for n0"); // LG_LITTLE_R is log_2(r). #define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2) uint64_t bn_mont_n0(const BIGNUM *n) { // These conditions are checked by the caller, |BN_MONT_CTX_set| or // |BN_MONT_CTX_new_consttime|. assert(!BN_is_zero(n)); assert(!BN_is_negative(n)); assert(BN_is_odd(n)); // r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This // ensures that we can do integer division by |r| by simply ignoring // |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo // |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is // what makes Montgomery multiplication efficient. // // As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography // with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a // multi-limb Montgomery multiplication of |a * b (mod n)|, given the // unreduced product |t == a * b|, we repeatedly calculate: // // t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). // t2 := t1*n0*n // t3 := t + t2 // t := t3 / r copy all limbs of |t3| except the lowest to |t|. // // In the last step, it would only make sense to ignore the lowest limb of // |t3| if it were zero. The middle steps ensure that this is the case: // // t3 == 0 (mod r) // t + t2 == 0 (mod r) // t + t1*n0*n == 0 (mod r) // t1*n0*n == -t (mod r) // t*n0*n == -t (mod r) // n0*n == -1 (mod r) // n0 == -1/n (mod r) // // Thus, in each iteration of the loop, we multiply by the constant factor // |n0|, the negative inverse of n (mod r). // n_mod_r = n % r. As explained above, this is done by taking the lowest // |BN_MONT_CTX_N0_LIMBS| limbs of |n|. uint64_t n_mod_r = n->d[0]; #if BN_MONT_CTX_N0_LIMBS == 2 if (n->width > 1) { n_mod_r |= (uint64_t)n->d[1] << BN_BITS2; } #endif return bn_neg_inv_mod_r_u64(n_mod_r); } // bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| // such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| // must be odd. // // This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery // Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf). // It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and // Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000" // (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21). // // This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" // (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is // constant-time with respect to |n|. We assume uint64_t additions, // subtractions, shifts, and bitwise operations are all constant time, which // may be a large leap of faith on 32-bit targets. We avoid division and // multiplication, which tend to be the most problematic in terms of timing // leaks. // // Most GCD implementations return values such that |u*r + v*n == 1|, so the // caller would have to negate the resultant |v| for the purpose of Montgomery // multiplication. This implementation does the negation implicitly by doing // the computations as a difference instead of a sum. static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { assert(n % 2 == 1); // alpha == 2**(lg r - 1) == r / 2. static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1); const uint64_t beta = n; uint64_t u = 1; uint64_t v = 0; // The invariant maintained from here on is: // 2**(lg r - i) == u*2*alpha - v*beta. for (size_t i = 0; i < LG_LITTLE_R; ++i) { #if BN_BITS2 == 64 && defined(BN_ULLONG) assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); #endif // Delete a common factor of 2 in u and v if |u| is even. Otherwise, set // |u = (u + beta) / 2| and |v = (v / 2) + alpha|. uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0. // The addition can overflow, so use Dietz's method for it. // // Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all // (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values // (embedded in 64 bits to so that overflow can be ignored): // // (declare-fun x () (_ BitVec 64)) // (declare-fun y () (_ BitVec 64)) // (assert (let ( // (one (_ bv1 64)) // (thirtyTwo (_ bv32 64))) // (and // (bvult x (bvshl one thirtyTwo)) // (bvult y (bvshl one thirtyTwo)) // (not (= // (bvadd (bvlshr (bvxor x y) one) (bvand x y)) // (bvlshr (bvadd x y) one))) // ))) // (check-sat) uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0. u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd); uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0. v = (v >> 1) + alpha_if_u_is_odd; } // The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. #if BN_BITS2 == 64 && defined(BN_ULLONG) declassify_assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); #endif return v; } int bn_mont_ctx_set_RR_consttime(BN_MONT_CTX *mont, BN_CTX *ctx) { assert(!BN_is_zero(&mont->N)); assert(!BN_is_negative(&mont->N)); assert(BN_is_odd(&mont->N)); assert(bn_minimal_width(&mont->N) == mont->N.width); unsigned n_bits = BN_num_bits(&mont->N); assert(n_bits != 0); if (n_bits == 1) { BN_zero(&mont->RR); return bn_resize_words(&mont->RR, mont->N.width); } unsigned lgBigR = mont->N.width * BN_BITS2; assert(lgBigR >= n_bits); // RR is R, or 2^lgBigR, in the Montgomery domain. We can compute 2 in the // Montgomery domain, 2R or 2^(lgBigR+1), and then use Montgomery // square-and-multiply to exponentiate. // // The square steps take 2^n R to (2^n)*(2^n) R = 2^2n R. This is the same as // doubling 2^n R, n times (doubling any x, n times, computes 2^n * x). When n // is below some threshold, doubling is faster; when above, squaring is // faster. From benchmarking various 32-bit and 64-bit architectures, the word // count seems to work well as a threshold. (Doubling scales linearly and // Montgomery reduction scales quadratically, so the threshold should scale // roughly linearly.) // // The multiply steps take 2^n R to 2*2^n R = 2^(n+1) R. It is faster to // double the value instead, so the square-and-multiply exponentiation would // become square-and-double. However, when using the word count as the // threshold, it turns out that no multiply/double steps will be needed at // all, because squaring any x, i times, computes x^(2^i): // // (2^threshold)^(2^BN_BITS2_LG) R // (2^mont->N.width)^BN_BITS2 R // = 2^(mont->N.width*BN_BITS2) R // = 2^lgBigR R // = RR int threshold = mont->N.width; // Calculate 2^threshold R = 2^(threshold + lgBigR) by doubling. The // first n_bits - 1 doubles can be skipped because we don't need to reduce. if (!BN_set_bit(&mont->RR, n_bits - 1) || !bn_mod_lshift_consttime(&mont->RR, &mont->RR, threshold + (lgBigR - (n_bits - 1)), &mont->N, ctx)) { return 0; } // The above steps are the same regardless of the threshold. The steps below // need to be modified if the threshold changes. assert(threshold == mont->N.width); for (unsigned i = 0; i < BN_BITS2_LG; i++) { if (!BN_mod_mul_montgomery(&mont->RR, &mont->RR, &mont->RR, mont, ctx)) { return 0; } } return bn_resize_words(&mont->RR, mont->N.width); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/mul.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../../internal.h" #include "internal.h" #define BN_MUL_RECURSIVE_SIZE_NORMAL 16 #define BN_SQR_RECURSIVE_SIZE_NORMAL BN_MUL_RECURSIVE_SIZE_NORMAL static void bn_abs_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, size_t num, BN_ULONG *tmp) { BN_ULONG borrow = bn_sub_words(tmp, a, b, num); bn_sub_words(r, b, a, num); bn_select_words(r, 0 - borrow, r /* tmp < 0 */, tmp /* tmp >= 0 */, num); } static void bn_mul_normal(BN_ULONG *r, const BN_ULONG *a, size_t na, const BN_ULONG *b, size_t nb) { if (na < nb) { size_t itmp = na; na = nb; nb = itmp; const BN_ULONG *ltmp = a; a = b; b = ltmp; } BN_ULONG *rr = &(r[na]); if (nb == 0) { OPENSSL_memset(r, 0, na * sizeof(BN_ULONG)); return; } rr[0] = bn_mul_words(r, a, na, b[0]); for (;;) { if (--nb == 0) { return; } rr[1] = bn_mul_add_words(&(r[1]), a, na, b[1]); if (--nb == 0) { return; } rr[2] = bn_mul_add_words(&(r[2]), a, na, b[2]); if (--nb == 0) { return; } rr[3] = bn_mul_add_words(&(r[3]), a, na, b[3]); if (--nb == 0) { return; } rr[4] = bn_mul_add_words(&(r[4]), a, na, b[4]); rr += 4; r += 4; b += 4; } } // bn_sub_part_words sets |r| to |a| - |b|. It returns the borrow bit, which is // one if the operation underflowed and zero otherwise. |cl| is the common // length, that is, the shorter of len(a) or len(b). |dl| is the delta length, // that is, len(a) - len(b). |r|'s length matches the larger of |a| and |b|, or // cl + abs(dl). // // TODO(davidben): Make this take |size_t|. The |cl| + |dl| calling convention // is confusing. static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { assert(cl >= 0); BN_ULONG borrow = bn_sub_words(r, a, b, cl); if (dl == 0) { return borrow; } r += cl; a += cl; b += cl; if (dl < 0) { // |a| is shorter than |b|. Complete the subtraction as if the excess words // in |a| were zeros. dl = -dl; for (int i = 0; i < dl; i++) { r[i] = CRYPTO_subc_w(0, b[i], borrow, &borrow); } } else { // |b| is shorter than |a|. Complete the subtraction as if the excess words // in |b| were zeros. for (int i = 0; i < dl; i++) { r[i] = CRYPTO_subc_w(a[i], 0, borrow, &borrow); } } return borrow; } // bn_abs_sub_part_words computes |r| = |a| - |b|, storing the absolute value // and returning a mask of all ones if the result was negative and all zeros if // the result was positive. |cl| and |dl| follow the |bn_sub_part_words| calling // convention. // // TODO(davidben): Make this take |size_t|. The |cl| + |dl| calling convention // is confusing. static BN_ULONG bn_abs_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl, BN_ULONG *tmp) { BN_ULONG borrow = bn_sub_part_words(tmp, a, b, cl, dl); bn_sub_part_words(r, b, a, cl, -dl); int r_len = cl + (dl < 0 ? -dl : dl); borrow = 0 - borrow; bn_select_words(r, borrow, r /* tmp < 0 */, tmp /* tmp >= 0 */, r_len); return borrow; } int bn_abs_sub_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { int cl = a->width < b->width ? a->width : b->width; int dl = a->width - b->width; int r_len = a->width < b->width ? b->width : a->width; BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); int ok = tmp != NULL && bn_wexpand(r, r_len) && bn_wexpand(tmp, r_len); if (ok) { bn_abs_sub_part_words(r->d, a->d, b->d, cl, dl, tmp->d); r->width = r_len; } BN_CTX_end(ctx); return ok; } // Karatsuba recursive multiplication algorithm // (cf. Knuth, The Art of Computer Programming, Vol. 2) // bn_mul_recursive sets |r| to |a| * |b|, using |t| as scratch space. |r| has // length 2*|n2|, |a| has length |n2| + |dna|, |b| has length |n2| + |dnb|, and // |t| has length 4*|n2|. |n2| must be a power of two. Finally, we must have // -|BN_MUL_RECURSIVE_SIZE_NORMAL|/2 <= |dna| <= 0 and // -|BN_MUL_RECURSIVE_SIZE_NORMAL|/2 <= |dnb| <= 0. // // TODO(davidben): Simplify and |size_t| the calling convention around lengths // here. static void bn_mul_recursive(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n2, int dna, int dnb, BN_ULONG *t) { // |n2| is a power of two. assert(n2 != 0 && (n2 & (n2 - 1)) == 0); // Check |dna| and |dnb| are in range. assert(-BN_MUL_RECURSIVE_SIZE_NORMAL / 2 <= dna && dna <= 0); assert(-BN_MUL_RECURSIVE_SIZE_NORMAL / 2 <= dnb && dnb <= 0); // Only call bn_mul_comba 8 if n2 == 8 and the // two arrays are complete [steve] if (n2 == 8 && dna == 0 && dnb == 0) { bn_mul_comba8(r, a, b); return; } // Else do normal multiply if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL) { bn_mul_normal(r, a, n2 + dna, b, n2 + dnb); if (dna + dnb < 0) { OPENSSL_memset(&r[2 * n2 + dna + dnb], 0, sizeof(BN_ULONG) * -(dna + dnb)); } return; } // Split |a| and |b| into a0,a1 and b0,b1, where a0 and b0 have size |n|. // Split |t| into t0,t1,t2,t3, each of size |n|, with the remaining 4*|n| used // for recursive calls. // Split |r| into r0,r1,r2,r3. We must contribute a0*b0 to r0,r1, a0*a1+b0*b1 // to r1,r2, and a1*b1 to r2,r3. The middle term we will compute as: // // a0*a1 + b0*b1 = (a0 - a1)*(b1 - b0) + a1*b1 + a0*b0 // // Note that we know |n| >= |BN_MUL_RECURSIVE_SIZE_NORMAL|/2 above, so // |tna| and |tnb| are non-negative. int n = n2 / 2, tna = n + dna, tnb = n + dnb; // t0 = a0 - a1 and t1 = b1 - b0. The result will be multiplied, so we XOR // their sign masks, giving the sign of (a0 - a1)*(b1 - b0). t0 and t1 // themselves store the absolute value. BN_ULONG neg = bn_abs_sub_part_words(t, a, &a[n], tna, n - tna, &t[n2]); neg ^= bn_abs_sub_part_words(&t[n], &b[n], b, tnb, tnb - n, &t[n2]); // Compute: // t2,t3 = t0 * t1 = |(a0 - a1)*(b1 - b0)| // r0,r1 = a0 * b0 // r2,r3 = a1 * b1 if (n == 4 && dna == 0 && dnb == 0) { bn_mul_comba4(&t[n2], t, &t[n]); bn_mul_comba4(r, a, b); bn_mul_comba4(&r[n2], &a[n], &b[n]); } else if (n == 8 && dna == 0 && dnb == 0) { bn_mul_comba8(&t[n2], t, &t[n]); bn_mul_comba8(r, a, b); bn_mul_comba8(&r[n2], &a[n], &b[n]); } else { BN_ULONG *p = &t[n2 * 2]; bn_mul_recursive(&t[n2], t, &t[n], n, 0, 0, p); bn_mul_recursive(r, a, b, n, 0, 0, p); bn_mul_recursive(&r[n2], &a[n], &b[n], n, dna, dnb, p); } // t0,t1,c = r0,r1 + r2,r3 = a0*b0 + a1*b1 BN_ULONG c = bn_add_words(t, r, &r[n2], n2); // t2,t3,c = t0,t1,c + neg*t2,t3 = (a0 - a1)*(b1 - b0) + a1*b1 + a0*b0. // The second term is stored as the absolute value, so we do this with a // constant-time select. BN_ULONG c_neg = c - bn_sub_words(&t[n2 * 2], t, &t[n2], n2); BN_ULONG c_pos = c + bn_add_words(&t[n2], t, &t[n2], n2); bn_select_words(&t[n2], neg, &t[n2 * 2], &t[n2], n2); static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); c = constant_time_select_w(neg, c_neg, c_pos); // We now have our three components. Add them together. // r1,r2,c = r1,r2 + t2,t3,c c += bn_add_words(&r[n], &r[n], &t[n2], n2); // Propagate the carry bit to the end. for (int i = n + n2; i < n2 + n2; i++) { BN_ULONG old = r[i]; r[i] = old + c; c = r[i] < old; } // The product should fit without carries. declassify_assert(c == 0); } // bn_mul_part_recursive sets |r| to |a| * |b|, using |t| as scratch space. |r| // has length 4*|n|, |a| has length |n| + |tna|, |b| has length |n| + |tnb|, and // |t| has length 8*|n|. |n| must be a power of two. Additionally, we must have // 0 <= tna < n and 0 <= tnb < n, and |tna| and |tnb| must differ by at most // one. // // TODO(davidben): Make this take |size_t| and perhaps the actual lengths of |a| // and |b|. static void bn_mul_part_recursive(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n, int tna, int tnb, BN_ULONG *t) { // |n| is a power of two. assert(n != 0 && (n & (n - 1)) == 0); // Check |tna| and |tnb| are in range. assert(0 <= tna && tna < n); assert(0 <= tnb && tnb < n); assert(-1 <= tna - tnb && tna - tnb <= 1); int n2 = n * 2; if (n < 8) { bn_mul_normal(r, a, n + tna, b, n + tnb); OPENSSL_memset(r + n2 + tna + tnb, 0, n2 - tna - tnb); return; } // Split |a| and |b| into a0,a1 and b0,b1, where a0 and b0 have size |n|. |a1| // and |b1| have size |tna| and |tnb|, respectively. // Split |t| into t0,t1,t2,t3, each of size |n|, with the remaining 4*|n| used // for recursive calls. // Split |r| into r0,r1,r2,r3. We must contribute a0*b0 to r0,r1, a0*a1+b0*b1 // to r1,r2, and a1*b1 to r2,r3. The middle term we will compute as: // // a0*a1 + b0*b1 = (a0 - a1)*(b1 - b0) + a1*b1 + a0*b0 // t0 = a0 - a1 and t1 = b1 - b0. The result will be multiplied, so we XOR // their sign masks, giving the sign of (a0 - a1)*(b1 - b0). t0 and t1 // themselves store the absolute value. BN_ULONG neg = bn_abs_sub_part_words(t, a, &a[n], tna, n - tna, &t[n2]); neg ^= bn_abs_sub_part_words(&t[n], &b[n], b, tnb, tnb - n, &t[n2]); // Compute: // t2,t3 = t0 * t1 = |(a0 - a1)*(b1 - b0)| // r0,r1 = a0 * b0 // r2,r3 = a1 * b1 if (n == 8) { bn_mul_comba8(&t[n2], t, &t[n]); bn_mul_comba8(r, a, b); bn_mul_normal(&r[n2], &a[n], tna, &b[n], tnb); // |bn_mul_normal| only writes |tna| + |tna| words. Zero the rest. OPENSSL_memset(&r[n2 + tna + tnb], 0, sizeof(BN_ULONG) * (n2 - tna - tnb)); } else { BN_ULONG *p = &t[n2 * 2]; bn_mul_recursive(&t[n2], t, &t[n], n, 0, 0, p); bn_mul_recursive(r, a, b, n, 0, 0, p); OPENSSL_memset(&r[n2], 0, sizeof(BN_ULONG) * n2); if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL && tnb < BN_MUL_RECURSIVE_SIZE_NORMAL) { bn_mul_normal(&r[n2], &a[n], tna, &b[n], tnb); } else { int i = n; for (;;) { i /= 2; if (i < tna || i < tnb) { // E.g., n == 16, i == 8 and tna == 11. |tna| and |tnb| are within one // of each other, so if |tna| is larger and tna > i, then we know // tnb >= i, and this call is valid. bn_mul_part_recursive(&r[n2], &a[n], &b[n], i, tna - i, tnb - i, p); break; } if (i == tna || i == tnb) { // If there is only a bottom half to the number, just do it. We know // the larger of |tna - i| and |tnb - i| is zero. The other is zero or // -1 by because of |tna| and |tnb| differ by at most one. bn_mul_recursive(&r[n2], &a[n], &b[n], i, tna - i, tnb - i, p); break; } // This loop will eventually terminate when |i| falls below // |BN_MUL_RECURSIVE_SIZE_NORMAL| because we know one of |tna| and |tnb| // exceeds that. } } } // t0,t1,c = r0,r1 + r2,r3 = a0*b0 + a1*b1 BN_ULONG c = bn_add_words(t, r, &r[n2], n2); // t2,t3,c = t0,t1,c + neg*t2,t3 = (a0 - a1)*(b1 - b0) + a1*b1 + a0*b0. // The second term is stored as the absolute value, so we do this with a // constant-time select. BN_ULONG c_neg = c - bn_sub_words(&t[n2 * 2], t, &t[n2], n2); BN_ULONG c_pos = c + bn_add_words(&t[n2], t, &t[n2], n2); bn_select_words(&t[n2], neg, &t[n2 * 2], &t[n2], n2); static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); c = constant_time_select_w(neg, c_neg, c_pos); // We now have our three components. Add them together. // r1,r2,c = r1,r2 + t2,t3,c c += bn_add_words(&r[n], &r[n], &t[n2], n2); // Propagate the carry bit to the end. for (int i = n + n2; i < n2 + n2; i++) { BN_ULONG old = r[i]; r[i] = old + c; c = r[i] < old; } // The product should fit without carries. declassify_assert(c == 0); } // bn_mul_impl implements |BN_mul| and |bn_mul_consttime|. Note this function // breaks |BIGNUM| invariants and may return a negative zero. This is handled by // the callers. static int bn_mul_impl(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { int al = a->width; int bl = b->width; if (al == 0 || bl == 0) { BN_zero(r); return 1; } int ret = 0, i, top; BIGNUM *rr; BN_CTX_start(ctx); if (r == a || r == b) { rr = BN_CTX_get(ctx); if (rr == NULL) { goto err; } } else { rr = r; } rr->neg = a->neg ^ b->neg; i = al - bl; if (i == 0) { if (al == 8) { if (!bn_wexpand(rr, 16)) { goto err; } rr->width = 16; bn_mul_comba8(rr->d, a->d, b->d); goto end; } } top = al + bl; static const int kMulNormalSize = 16; if (al >= kMulNormalSize && bl >= kMulNormalSize) { if (-1 <= i && i <= 1) { // Find the largest power of two less than or equal to the larger length. int j; if (i >= 0) { j = BN_num_bits_word((BN_ULONG)al); } else { j = BN_num_bits_word((BN_ULONG)bl); } j = 1 << (j - 1); assert(j <= al || j <= bl); BIGNUM *t = BN_CTX_get(ctx); if (t == NULL) { goto err; } if (al > j || bl > j) { // We know |al| and |bl| are at most one from each other, so if al > j, // bl >= j, and vice versa. Thus we can use |bn_mul_part_recursive|. // // TODO(davidben): This codepath is almost unused in standard // algorithms. Is this optimization necessary? See notes in // https://boringssl-review.googlesource.com/q/I0bd604e2cd6a75c266f64476c23a730ca1721ea6 assert(al >= j && bl >= j); if (!bn_wexpand(t, j * 8) || !bn_wexpand(rr, j * 4)) { goto err; } bn_mul_part_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d); } else { // al <= j && bl <= j. Additionally, we know j <= al or j <= bl, so one // of al - j or bl - j is zero. The other, by the bound on |i| above, is // zero or -1. Thus, we can use |bn_mul_recursive|. if (!bn_wexpand(t, j * 4) || !bn_wexpand(rr, j * 2)) { goto err; } bn_mul_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d); } rr->width = top; goto end; } } if (!bn_wexpand(rr, top)) { goto err; } rr->width = top; bn_mul_normal(rr->d, a->d, al, b->d, bl); end: if (r != rr && !BN_copy(r, rr)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { if (!bn_mul_impl(r, a, b, ctx)) { return 0; } // This additionally fixes any negative zeros created by |bn_mul_impl|. bn_set_minimal_width(r); return 1; } int bn_mul_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { // Prevent negative zeros. if (a->neg || b->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } return bn_mul_impl(r, a, b, ctx); } void bn_mul_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, const BN_ULONG *b, size_t num_b) { if (num_r != num_a + num_b) { abort(); } // TODO(davidben): Should this call |bn_mul_comba4| too? |BN_mul| does not // hit that code. if (num_a == 8 && num_b == 8) { bn_mul_comba8(r, a, b); } else { bn_mul_normal(r, a, num_a, b, num_b); } } // tmp must have 2*n words static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, size_t n, BN_ULONG *tmp) { if (n == 0) { return; } size_t max = n * 2; const BN_ULONG *ap = a; BN_ULONG *rp = r; rp[0] = rp[max - 1] = 0; rp++; // Compute the contribution of a[i] * a[j] for all i < j. if (n > 1) { ap++; rp[n - 1] = bn_mul_words(rp, ap, n - 1, ap[-1]); rp += 2; } if (n > 2) { for (size_t i = n - 2; i > 0; i--) { ap++; rp[i] = bn_mul_add_words(rp, ap, i, ap[-1]); rp += 2; } } // The final result fits in |max| words, so none of the following operations // will overflow. // Double |r|, giving the contribution of a[i] * a[j] for all i != j. bn_add_words(r, r, r, max); // Add in the contribution of a[i] * a[i] for all i. bn_sqr_words(tmp, a, n); bn_add_words(r, r, tmp, max); } // bn_sqr_recursive sets |r| to |a|^2, using |t| as scratch space. |r| has // length 2*|n2|, |a| has length |n2|, and |t| has length 4*|n2|. |n2| must be // a power of two. static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, size_t n2, BN_ULONG *t) { // |n2| is a power of two. assert(n2 != 0 && (n2 & (n2 - 1)) == 0); if (n2 == 4) { bn_sqr_comba4(r, a); return; } if (n2 == 8) { bn_sqr_comba8(r, a); return; } if (n2 < BN_SQR_RECURSIVE_SIZE_NORMAL) { bn_sqr_normal(r, a, n2, t); return; } // Split |a| into a0,a1, each of size |n|. // Split |t| into t0,t1,t2,t3, each of size |n|, with the remaining 4*|n| used // for recursive calls. // Split |r| into r0,r1,r2,r3. We must contribute a0^2 to r0,r1, 2*a0*a1 to // r1,r2, and a1^2 to r2,r3. size_t n = n2 / 2; BN_ULONG *t_recursive = &t[n2 * 2]; // t0 = |a0 - a1|. bn_abs_sub_words(t, a, &a[n], n, &t[n]); // t2,t3 = t0^2 = |a0 - a1|^2 = a0^2 - 2*a0*a1 + a1^2 bn_sqr_recursive(&t[n2], t, n, t_recursive); // r0,r1 = a0^2 bn_sqr_recursive(r, a, n, t_recursive); // r2,r3 = a1^2 bn_sqr_recursive(&r[n2], &a[n], n, t_recursive); // t0,t1,c = r0,r1 + r2,r3 = a0^2 + a1^2 BN_ULONG c = bn_add_words(t, r, &r[n2], n2); // t2,t3,c = t0,t1,c - t2,t3 = 2*a0*a1 c -= bn_sub_words(&t[n2], t, &t[n2], n2); // We now have our three components. Add them together. // r1,r2,c = r1,r2 + t2,t3,c c += bn_add_words(&r[n], &r[n], &t[n2], n2); // Propagate the carry bit to the end. for (size_t i = n + n2; i < n2 + n2; i++) { BN_ULONG old = r[i]; r[i] = old + c; c = r[i] < old; } // The square should fit without carries. assert(c == 0); } int BN_mul_word(BIGNUM *bn, BN_ULONG w) { if (!bn->width) { return 1; } if (w == 0) { BN_zero(bn); return 1; } BN_ULONG ll = bn_mul_words(bn->d, bn->d, bn->width, w); if (ll) { if (!bn_wexpand(bn, bn->width + 1)) { return 0; } bn->d[bn->width++] = ll; } return 1; } int bn_sqr_consttime(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { int al = a->width; if (al <= 0) { r->width = 0; r->neg = 0; return 1; } int ret = 0, max; BN_CTX_start(ctx); BIGNUM *rr = (a != r) ? r : BN_CTX_get(ctx); BIGNUM *tmp = BN_CTX_get(ctx); if (!rr || !tmp) { goto err; } max = 2 * al; // Non-zero (from above) if (!bn_wexpand(rr, max)) { goto err; } if (al == 4) { bn_sqr_comba4(rr->d, a->d); } else if (al == 8) { bn_sqr_comba8(rr->d, a->d); } else { if (al < BN_SQR_RECURSIVE_SIZE_NORMAL) { BN_ULONG t[BN_SQR_RECURSIVE_SIZE_NORMAL * 2]; bn_sqr_normal(rr->d, a->d, al, t); } else { // If |al| is a power of two, we can use |bn_sqr_recursive|. if (al != 0 && (al & (al - 1)) == 0) { if (!bn_wexpand(tmp, al * 4)) { goto err; } bn_sqr_recursive(rr->d, a->d, al, tmp->d); } else { if (!bn_wexpand(tmp, max)) { goto err; } bn_sqr_normal(rr->d, a->d, al, tmp->d); } } } rr->neg = 0; rr->width = max; if (rr != r && !BN_copy(r, rr)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { if (!bn_sqr_consttime(r, a, ctx)) { return 0; } bn_set_minimal_width(r); return 1; } void bn_sqr_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a) { if (num_r != 2 * num_a || num_a > BN_SMALL_MAX_WORDS) { abort(); } if (num_a == 4) { bn_sqr_comba4(r, a); } else if (num_a == 8) { bn_sqr_comba8(r, a); } else { BN_ULONG tmp[2 * BN_SMALL_MAX_WORDS]; bn_sqr_normal(r, a, num_a, tmp); OPENSSL_cleanse(tmp, 2 * num_a * sizeof(BN_ULONG)); } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/prime.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../../internal.h" #include "internal.h" // kPrimes contains the first 1024 primes. static const uint16_t kPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, }; // BN_prime_checks_for_size returns the number of Miller-Rabin iterations // necessary for generating a 'bits'-bit candidate prime. // // // This table is generated using the algorithm of FIPS PUB 186-4 // Digital Signature Standard (DSS), section F.1, page 117. // (https://doi.org/10.6028/NIST.FIPS.186-4) // The following magma script was used to generate the output: // securitybits:=125; // k:=1024; // for t:=1 to 65 do // for M:=3 to Floor(2*Sqrt(k-1)-1) do // S:=0; // // Sum over m // for m:=3 to M do // s:=0; // // Sum over j // for j:=2 to m do // s+:=(RealField(32)!2)^-(j+(k-1)/j); // end for; // S+:=2^(m-(m-1)*t)*s; // end for; // A:=2^(k-2-M*t); // B:=8*(Pi(RealField(32))^2-6)/3*2^(k-2)*S; // pkt:=2.00743*Log(2)*k*2^-k*(A+B); // seclevel:=Floor(-Log(2,pkt)); // if seclevel ge securitybits then // printf "k: %5o, security: %o bits (t: %o, M: %o)\n",k,seclevel,t,M; // break; // end if; // end for; // if seclevel ge securitybits then break; end if; // end for; // // It can be run online at: http://magma.maths.usyd.edu.au/calc // And will output: // k: 1024, security: 129 bits (t: 6, M: 23) // k is the number of bits of the prime, securitybits is the level we want to // reach. // prime length | RSA key size | # MR tests | security level // -------------+--------------|------------+--------------- // (b) >= 6394 | >= 12788 | 3 | 256 bit // (b) >= 3747 | >= 7494 | 3 | 192 bit // (b) >= 1345 | >= 2690 | 4 | 128 bit // (b) >= 1080 | >= 2160 | 5 | 128 bit // (b) >= 852 | >= 1704 | 5 | 112 bit // (b) >= 476 | >= 952 | 5 | 80 bit // (b) >= 400 | >= 800 | 6 | 80 bit // (b) >= 347 | >= 694 | 7 | 80 bit // (b) >= 308 | >= 616 | 8 | 80 bit // (b) >= 55 | >= 110 | 27 | 64 bit // (b) >= 6 | >= 12 | 34 | 64 bit static int BN_prime_checks_for_size(int bits) { if (bits >= 3747) { return 3; } if (bits >= 1345) { return 4; } if (bits >= 476) { return 5; } if (bits >= 400) { return 6; } if (bits >= 347) { return 7; } if (bits >= 308) { return 8; } if (bits >= 55) { return 27; } return 34; } // num_trial_division_primes returns the number of primes to try with trial // division before using more expensive checks. For larger numbers, the value // of excluding a candidate with trial division is larger. static size_t num_trial_division_primes(const BIGNUM *n) { if (n->width * BN_BITS2 > 1024) { return OPENSSL_ARRAY_SIZE(kPrimes); } return OPENSSL_ARRAY_SIZE(kPrimes) / 2; } // BN_PRIME_CHECKS_BLINDED is the iteration count for blinding the constant-time // primality test. See |BN_primality_test| for details. This number is selected // so that, for a candidate N-bit RSA prime, picking |BN_PRIME_CHECKS_BLINDED| // random N-bit numbers will have at least |BN_prime_checks_for_size(N)| values // in range with high probability. // // The following Python script computes the blinding factor needed for the // corresponding iteration count. /* import math # We choose candidate RSA primes between sqrt(2)/2 * 2^N and 2^N and select # witnesses by generating random N-bit numbers. Thus the probability of # selecting one in range is at least sqrt(2)/2. p = math.sqrt(2) / 2 # Target around 2^-8 probability of the blinding being insufficient given that # key generation is a one-time, noisy operation. epsilon = 2**-8 def choose(a, b): r = 1 for i in xrange(b): r *= a - i r /= (i + 1) return r def failure_rate(min_uniform, iterations): """ Returns the probability that, for |iterations| candidate witnesses, fewer than |min_uniform| of them will be uniform. """ prob = 0.0 for i in xrange(min_uniform): prob += (choose(iterations, i) * p**i * (1-p)**(iterations - i)) return prob for min_uniform in (3, 4, 5, 6, 8, 13, 19, 28): # Find the smallest number of iterations under the target failure rate. iterations = min_uniform while True: prob = failure_rate(min_uniform, iterations) if prob < epsilon: print min_uniform, iterations, prob break iterations += 1 Output: 3 9 0.00368894873911 4 11 0.00363319494662 5 13 0.00336215573898 6 15 0.00300145783158 8 19 0.00225214119331 13 27 0.00385610026955 19 38 0.0021410539126 28 52 0.00325405801769 16 iterations suffices for 400-bit primes and larger (6 uniform samples needed), which is already well below the minimum acceptable key size for RSA. */ #define BN_PRIME_CHECKS_BLINDED 16 static int probable_prime(BIGNUM *rnd, int bits); static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx); static int probable_prime_dh_safe(BIGNUM *rnd, int bits, const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx); BN_GENCB *BN_GENCB_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(BN_GENCB))); } void BN_GENCB_free(BN_GENCB *callback) { OPENSSL_free(callback); } void BN_GENCB_set(BN_GENCB *callback, int (*f)(int event, int n, struct bn_gencb_st *), void *arg) { callback->callback = f; callback->arg = arg; } int BN_GENCB_call(BN_GENCB *callback, int event, int n) { if (!callback) { return 1; } return callback->callback(event, n, callback); } void *BN_GENCB_get_arg(const BN_GENCB *callback) { return callback->arg; } int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, const BIGNUM *rem, BN_GENCB *cb) { BIGNUM *t; int found = 0; int i, j, c1 = 0; BN_CTX *ctx; int checks = BN_prime_checks_for_size(bits); if (bits < 2) { // There are no prime numbers this small. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } else if (bits == 2 && safe) { // The smallest safe prime (7) is three bits. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); t = BN_CTX_get(ctx); if (!t) { goto err; } loop: // make a random number and set the top and bottom bits if (add == NULL) { if (!probable_prime(ret, bits)) { goto err; } } else { if (safe) { if (!probable_prime_dh_safe(ret, bits, add, rem, ctx)) { goto err; } } else { if (!probable_prime_dh(ret, bits, add, rem, ctx)) { goto err; } } } if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, c1++)) { // aborted goto err; } if (!safe) { i = BN_is_prime_fasttest_ex(ret, checks, ctx, 0, cb); if (i == -1) { goto err; } else if (i == 0) { goto loop; } } else { // for "safe prime" generation, check that (p-1)/2 is prime. Since a prime // is odd, We just need to divide by 2 if (!BN_rshift1(t, ret)) { goto err; } // Interleave |ret| and |t|'s primality tests to avoid paying the full // iteration count on |ret| only to quickly discover |t| is composite. // // TODO(davidben): This doesn't quite work because an iteration count of 1 // still runs the blinding mechanism. for (i = 0; i < checks; i++) { j = BN_is_prime_fasttest_ex(ret, 1, ctx, 0, NULL); if (j == -1) { goto err; } else if (j == 0) { goto loop; } j = BN_is_prime_fasttest_ex(t, 1, ctx, 0, NULL); if (j == -1) { goto err; } else if (j == 0) { goto loop; } if (!BN_GENCB_call(cb, BN_GENCB_PRIME_TEST, i)) { goto err; } // We have a safe prime test pass } } // we have a prime :-) found = 1; err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } return found; } static int bn_trial_division(uint16_t *out, const BIGNUM *bn) { const size_t num_primes = num_trial_division_primes(bn); for (size_t i = 1; i < num_primes; i++) { // During RSA key generation, |bn| may be secret, but only if |bn| was // prime, so it is safe to leak failed trial divisions. if (constant_time_declassify_int(bn_mod_u16_consttime(bn, kPrimes[i]) == 0)) { *out = kPrimes[i]; return 1; } } return 0; } int bn_odd_number_is_obviously_composite(const BIGNUM *bn) { uint16_t prime; return bn_trial_division(&prime, bn) && !BN_is_word(bn, prime); } int bn_miller_rabin_init(BN_MILLER_RABIN *miller_rabin, const BN_MONT_CTX *mont, BN_CTX *ctx) { // This function corresponds to steps 1 through 3 of FIPS 186-4, C.3.1. const BIGNUM *w = &mont->N; // Note we do not call |BN_CTX_start| in this function. We intentionally // allocate values in the containing scope so they outlive this function. miller_rabin->w1 = BN_CTX_get(ctx); miller_rabin->m = BN_CTX_get(ctx); miller_rabin->one_mont = BN_CTX_get(ctx); miller_rabin->w1_mont = BN_CTX_get(ctx); if (miller_rabin->w1 == NULL || // miller_rabin->m == NULL || // miller_rabin->one_mont == NULL || // miller_rabin->w1_mont == NULL) { return 0; } // See FIPS 186-4, C.3.1, steps 1 through 3. if (!bn_usub_consttime(miller_rabin->w1, w, BN_value_one())) { return 0; } miller_rabin->a = BN_count_low_zero_bits(miller_rabin->w1); if (!bn_rshift_secret_shift(miller_rabin->m, miller_rabin->w1, miller_rabin->a, ctx)) { return 0; } miller_rabin->w_bits = BN_num_bits(w); // Precompute some values in Montgomery form. if (!bn_one_to_montgomery(miller_rabin->one_mont, mont, ctx) || // w - 1 is -1 mod w, so we can compute it in the Montgomery domain, -R, // with a subtraction. (|one_mont| cannot be zero.) !bn_usub_consttime(miller_rabin->w1_mont, w, miller_rabin->one_mont)) { return 0; } return 1; } int bn_miller_rabin_iteration(const BN_MILLER_RABIN *miller_rabin, int *out_is_possibly_prime, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { // This function corresponds to steps 4.3 through 4.5 of FIPS 186-4, C.3.1. int ret = 0; BN_CTX_start(ctx); // Step 4.3. We use Montgomery-encoding for better performance and to avoid // timing leaks. const BIGNUM *w = &mont->N; BIGNUM *z = BN_CTX_get(ctx); crypto_word_t is_possibly_prime; if (z == NULL || !BN_mod_exp_mont_consttime(z, b, miller_rabin->m, w, ctx, mont) || !BN_to_montgomery(z, z, mont, ctx)) { goto err; } // is_possibly_prime is all ones if we have determined |b| is not a composite // witness for |w|. This is equivalent to going to step 4.7 in the original // algorithm. To avoid timing leaks, we run the algorithm to the end for prime // inputs. is_possibly_prime = 0; // Step 4.4. If z = 1 or z = w-1, b is not a composite witness and w is still // possibly prime. is_possibly_prime = BN_equal_consttime(z, miller_rabin->one_mont) | BN_equal_consttime(z, miller_rabin->w1_mont); is_possibly_prime = 0 - is_possibly_prime; // Make it all zeros or all ones. // Step 4.5. // // To avoid leaking |a|, we run the loop to |w_bits| and mask off all // iterations once |j| = |a|. for (int j = 1; j < miller_rabin->w_bits; j++) { if (constant_time_declassify_w(constant_time_eq_int(j, miller_rabin->a) & ~is_possibly_prime)) { // If the loop is done and we haven't seen z = 1 or z = w-1 yet, the // value is composite and we can break in variable time. break; } // Step 4.5.1. if (!BN_mod_mul_montgomery(z, z, z, mont, ctx)) { goto err; } // Step 4.5.2. If z = w-1 and the loop is not done, this is not a composite // witness. crypto_word_t z_is_w1_mont = BN_equal_consttime(z, miller_rabin->w1_mont); z_is_w1_mont = 0 - z_is_w1_mont; // Make it all zeros or all ones. is_possibly_prime |= z_is_w1_mont; // Go to step 4.7 if |z_is_w1_mont|. // Step 4.5.3. If z = 1 and the loop is not done, the previous value of z // was not -1. There are no non-trivial square roots of 1 modulo a prime, so // w is composite and we may exit in variable time. if (constant_time_declassify_w( BN_equal_consttime(z, miller_rabin->one_mont) & ~is_possibly_prime)) { break; } } *out_is_possibly_prime = constant_time_declassify_w(is_possibly_prime) & 1; ret = 1; err: BN_CTX_end(ctx); return ret; } int BN_primality_test(int *out_is_probably_prime, const BIGNUM *w, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb) { // This function's secrecy and performance requirements come from RSA key // generation. We generate RSA keys by selecting two large, secret primes with // rejection sampling. // // We thus treat |w| as secret if turns out to be a large prime. However, if // |w| is composite, we treat this and |w| itself as public. (Conversely, if // |w| is prime, that it is prime is public. Only the value is secret.) This // is fine for RSA key generation, but note it is important that we use // rejection sampling, with each candidate prime chosen independently. This // would not work for, e.g., an algorithm which looked for primes in // consecutive integers. These assumptions allow us to discard composites // quickly. We additionally treat |w| as public when it is a small prime to // simplify trial decryption and some edge cases. // // One RSA key generation will call this function on exactly two primes and // many more composites. The overall cost is a combination of several factors: // // 1. Checking if |w| is divisible by a small prime is much faster than // learning it is composite by Miller-Rabin (see below for details on that // cost). Trial division by p saves 1/p of Miller-Rabin calls, so this is // worthwhile until p exceeds the ratio of the two costs. // // 2. For a random (i.e. non-adversarial) candidate large prime and candidate // witness, the probability of false witness is very low. (This is why FIPS // 186-4 only requires a few iterations.) Thus composites not discarded by // trial decryption, in practice, cost one Miller-Rabin iteration. Only the // two actual primes cost the full iteration count. // // 3. A Miller-Rabin iteration is a modular exponentiation plus |a| additional // modular squares, where |a| is the number of factors of two in |w-1|. |a| // is likely small (the distribution falls exponentially), but it is also // potentially secret, so we loop up to its log(w) upper bound when |w| is // prime. When |w| is composite, we break early, so only two calls pay this // cost. (Note that all calls pay the modular exponentiation which is, // itself, log(w) modular multiplications and squares.) // // 4. While there are only two prime calls, they multiplicatively pay the full // costs of (2) and (3). // // 5. After the primes are chosen, RSA keys derive some values from the // primes, but this cost is negligible in comparison. *out_is_probably_prime = 0; if (BN_cmp(w, BN_value_one()) <= 0) { return 1; } if (!BN_is_odd(w)) { // The only even prime is two. *out_is_probably_prime = BN_is_word(w, 2); return 1; } // Miller-Rabin does not work for three. if (BN_is_word(w, 3)) { *out_is_probably_prime = 1; return 1; } if (do_trial_division) { // Perform additional trial division checks to discard small primes. uint16_t prime; if (bn_trial_division(&prime, w)) { *out_is_probably_prime = BN_is_word(w, prime); return 1; } if (!BN_GENCB_call(cb, BN_GENCB_PRIME_TEST, -1)) { return 0; } } if (checks == BN_prime_checks_for_generation) { checks = BN_prime_checks_for_size(BN_num_bits(w)); } BN_CTX *new_ctx = NULL; if (ctx == NULL) { new_ctx = BN_CTX_new(); if (new_ctx == NULL) { return 0; } ctx = new_ctx; } // See C.3.1 from FIPS 186-4. int ret = 0; BN_CTX_start(ctx); BIGNUM *b = BN_CTX_get(ctx); BN_MONT_CTX *mont = BN_MONT_CTX_new_consttime(w, ctx); BN_MILLER_RABIN miller_rabin; crypto_word_t uniform_iterations = 0; if (b == NULL || mont == NULL || // Steps 1-3. !bn_miller_rabin_init(&miller_rabin, mont, ctx)) { goto err; } // The following loop performs in inner iteration of the Miller-Rabin // Primality test (Step 4). // // The algorithm as specified in FIPS 186-4 leaks information on |w|, the RSA // private key. Instead, we run through each iteration unconditionally, // performing modular multiplications, masking off any effects to behave // equivalently to the specified algorithm. // // We also blind the number of values of |b| we try. Steps 4.1–4.2 say to // discard out-of-range values. To avoid leaking information on |w|, we use // |bn_rand_secret_range| which, rather than discarding bad values, adjusts // them to be in range. Though not uniformly selected, these adjusted values // are still usable as Miller-Rabin checks. // // Miller-Rabin is already probabilistic, so we could reach the desired // confidence levels by just suitably increasing the iteration count. However, // to align with FIPS 186-4, we use a more pessimal analysis: we do not count // the non-uniform values towards the iteration count. As a result, this // function is more complex and has more timing risk than necessary. // // We count both total iterations and uniform ones and iterate until we've // reached at least |BN_PRIME_CHECKS_BLINDED| and |iterations|, respectively. // If the latter is large enough, it will be the limiting factor with high // probability and we won't leak information. // // Note this blinding does not impact most calls when picking primes because // composites are rejected early. Only the two secret primes see extra work. // Using |constant_time_lt_w| seems to prevent the compiler from optimizing // this into two jumps. for (int i = 1; constant_time_declassify_w( (i <= BN_PRIME_CHECKS_BLINDED) | constant_time_lt_w(uniform_iterations, checks)); i++) { // Step 4.1-4.2 int is_uniform; if (!bn_rand_secret_range(b, &is_uniform, 2, miller_rabin.w1)) { goto err; } uniform_iterations += is_uniform; // Steps 4.3-4.5 int is_possibly_prime = 0; if (!bn_miller_rabin_iteration(&miller_rabin, &is_possibly_prime, b, mont, ctx)) { goto err; } if (!is_possibly_prime) { // Step 4.6. We did not see z = w-1 before z = 1, so w must be composite. *out_is_probably_prime = 0; ret = 1; goto err; } // Step 4.7 if (!BN_GENCB_call(cb, BN_GENCB_PRIME_TEST, i - 1)) { goto err; } } declassify_assert(uniform_iterations >= (crypto_word_t)checks); *out_is_probably_prime = 1; ret = 1; err: BN_MONT_CTX_free(mont); BN_CTX_end(ctx); BN_CTX_free(new_ctx); return ret; } int BN_is_prime_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, BN_GENCB *cb) { return BN_is_prime_fasttest_ex(candidate, checks, ctx, 0, cb); } int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb) { int is_probably_prime; if (!BN_primality_test(&is_probably_prime, a, checks, ctx, do_trial_division, cb)) { return -1; } return is_probably_prime; } int BN_enhanced_miller_rabin_primality_test( enum bn_primality_result_t *out_result, const BIGNUM *w, int checks, BN_CTX *ctx, BN_GENCB *cb) { // Enhanced Miller-Rabin is only valid on odd integers greater than 3. if (!BN_is_odd(w) || BN_cmp_word(w, 3) <= 0) { OPENSSL_PUT_ERROR(BN, BN_R_INVALID_INPUT); return 0; } if (checks == BN_prime_checks_for_generation) { checks = BN_prime_checks_for_size(BN_num_bits(w)); } int ret = 0; BN_MONT_CTX *mont = NULL; BN_CTX_start(ctx); BIGNUM *w1 = BN_CTX_get(ctx); BIGNUM *b, *g, *z, *x, *x1, *m; int a; if (w1 == NULL || !BN_copy(w1, w) || !BN_sub_word(w1, 1)) { goto err; } // Write w1 as m*2^a (Steps 1 and 2). a = 0; while (!BN_is_bit_set(w1, a)) { a++; } m = BN_CTX_get(ctx); if (m == NULL || !BN_rshift(m, w1, a)) { goto err; } b = BN_CTX_get(ctx); g = BN_CTX_get(ctx); z = BN_CTX_get(ctx); x = BN_CTX_get(ctx); x1 = BN_CTX_get(ctx); if (b == NULL || g == NULL || z == NULL || x == NULL || x1 == NULL) { goto err; } // Montgomery setup for computations mod w mont = BN_MONT_CTX_new_for_modulus(w, ctx); if (mont == NULL) { goto err; } // The following loop performs in inner iteration of the Enhanced Miller-Rabin // Primality test (Step 4). for (int i = 1; i <= checks; i++) { // Step 4.1-4.2 if (!BN_rand_range_ex(b, 2, w1)) { goto err; } // Step 4.3-4.4 if (!BN_gcd(g, b, w, ctx)) { goto err; } if (BN_cmp_word(g, 1) > 0) { *out_result = bn_composite; ret = 1; goto err; } // Step 4.5 if (!BN_mod_exp_mont(z, b, m, w, ctx, mont)) { goto err; } // Step 4.6 if (BN_is_one(z) || BN_cmp(z, w1) == 0) { goto loop; } // Step 4.7 for (int j = 1; j < a; j++) { if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { goto err; } if (BN_cmp(z, w1) == 0) { goto loop; } if (BN_is_one(z)) { goto composite; } } // Step 4.8-4.9 if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { goto err; } // Step 4.10-4.11 if (!BN_is_one(z) && !BN_copy(x, z)) { goto err; } composite: // Step 4.12-4.14 if (!BN_copy(x1, x) || !BN_sub_word(x1, 1) || !BN_gcd(g, x1, w, ctx)) { goto err; } if (BN_cmp_word(g, 1) > 0) { *out_result = bn_composite; } else { *out_result = bn_non_prime_power_composite; } ret = 1; goto err; loop: // Step 4.15 if (!BN_GENCB_call(cb, BN_GENCB_PRIME_TEST, i - 1)) { goto err; } } *out_result = bn_probably_prime; ret = 1; err: BN_MONT_CTX_free(mont); BN_CTX_end(ctx); return ret; } static int probable_prime(BIGNUM *rnd, int bits) { do { if (!BN_rand(rnd, bits, BN_RAND_TOP_TWO, BN_RAND_BOTTOM_ODD)) { return 0; } } while (bn_odd_number_is_obviously_composite(rnd)); return 1; } static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx) { int ret = 0; BIGNUM *t1; BN_CTX_start(ctx); size_t num_primes; if ((t1 = BN_CTX_get(ctx)) == NULL) { goto err; } if (!BN_rand(rnd, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD)) { goto err; } // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, rnd, add, ctx)) { goto err; } if (!BN_sub(rnd, rnd, t1)) { goto err; } if (rem == NULL) { if (!BN_add_word(rnd, 1)) { goto err; } } else { if (!BN_add(rnd, rnd, rem)) { goto err; } } // we now have a random number 'rand' to test. num_primes = num_trial_division_primes(rnd); loop: for (size_t i = 1; i < num_primes; i++) { // check that rnd is a prime if (bn_mod_u16_consttime(rnd, kPrimes[i]) <= 1) { if (!BN_add(rnd, rnd, add)) { goto err; } goto loop; } } ret = 1; err: BN_CTX_end(ctx); return ret; } static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, const BIGNUM *rem, BN_CTX *ctx) { int ret = 0; BIGNUM *t1, *qadd, *q; bits--; BN_CTX_start(ctx); t1 = BN_CTX_get(ctx); q = BN_CTX_get(ctx); qadd = BN_CTX_get(ctx); size_t num_primes; if (qadd == NULL) { goto err; } if (!BN_rshift1(qadd, padd)) { goto err; } if (!BN_rand(q, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD)) { goto err; } // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, q, qadd, ctx)) { goto err; } if (!BN_sub(q, q, t1)) { goto err; } if (rem == NULL) { if (!BN_add_word(q, 1)) { goto err; } } else { if (!BN_rshift1(t1, rem)) { goto err; } if (!BN_add(q, q, t1)) { goto err; } } // we now have a random number 'rand' to test. if (!BN_lshift1(p, q)) { goto err; } if (!BN_add_word(p, 1)) { goto err; } num_primes = num_trial_division_primes(p); loop: for (size_t i = 1; i < num_primes; i++) { // check that p and q are prime // check that for p and q // gcd(p-1,primes) == 1 (except for 2) if (bn_mod_u16_consttime(p, kPrimes[i]) == 0 || bn_mod_u16_consttime(q, kPrimes[i]) == 0) { if (!BN_add(p, p, padd)) { goto err; } if (!BN_add(q, q, qadd)) { goto err; } goto loop; } } ret = 1; err: BN_CTX_end(ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/random.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../service_indicator/internal.h" #include "internal.h" int BN_rand(BIGNUM *rnd, int bits, int top, int bottom) { if (rnd == NULL) { return 0; } if (top != BN_RAND_TOP_ANY && top != BN_RAND_TOP_ONE && top != BN_RAND_TOP_TWO) { OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (bottom != BN_RAND_BOTTOM_ANY && bottom != BN_RAND_BOTTOM_ODD) { OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (bits == 0) { BN_zero(rnd); return 1; } if (bits > INT_MAX - (BN_BITS2 - 1)) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } int words = (bits + BN_BITS2 - 1) / BN_BITS2; int bit = (bits - 1) % BN_BITS2; const BN_ULONG kOne = 1; const BN_ULONG kThree = 3; BN_ULONG mask = bit < BN_BITS2 - 1 ? (kOne << (bit + 1)) - 1 : BN_MASK2; if (!bn_wexpand(rnd, words)) { return 0; } FIPS_service_indicator_lock_state(); BCM_rand_bytes((uint8_t *)rnd->d, words * sizeof(BN_ULONG)); FIPS_service_indicator_unlock_state(); rnd->d[words - 1] &= mask; if (top != BN_RAND_TOP_ANY) { if (top == BN_RAND_TOP_TWO && bits > 1) { if (bit == 0) { rnd->d[words - 1] |= 1; rnd->d[words - 2] |= kOne << (BN_BITS2 - 1); } else { rnd->d[words - 1] |= kThree << (bit - 1); } } else { rnd->d[words - 1] |= kOne << bit; } } if (bottom == BN_RAND_BOTTOM_ODD) { rnd->d[0] |= 1; } rnd->neg = 0; rnd->width = words; return 1; } int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom) { return BN_rand(rnd, bits, top, bottom); } // bn_less_than_word_mask returns a mask of all ones if the number represented // by |len| words at |a| is less than |b| and zero otherwise. It performs this // computation in time independent of the value of |a|. |b| is assumed public. static crypto_word_t bn_less_than_word_mask(const BN_ULONG *a, size_t len, BN_ULONG b) { if (b == 0) { return CONSTTIME_FALSE_W; } if (len == 0) { return CONSTTIME_TRUE_W; } // |a| < |b| iff a[1..len-1] are all zero and a[0] < b. static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); crypto_word_t mask = 0; for (size_t i = 1; i < len; i++) { mask |= a[i]; } // |mask| is now zero iff a[1..len-1] are all zero. mask = constant_time_is_zero_w(mask); mask &= constant_time_lt_w(a[0], b); return mask; } int bn_in_range_words(const BN_ULONG *a, BN_ULONG min_inclusive, const BN_ULONG *max_exclusive, size_t len) { crypto_word_t mask = ~bn_less_than_word_mask(a, len, min_inclusive); return mask & bn_less_than_words(a, max_exclusive, len); } static int bn_range_to_mask(size_t *out_words, BN_ULONG *out_mask, size_t min_inclusive, const BN_ULONG *max_exclusive, size_t len) { // The magnitude of |max_exclusive| is assumed public. size_t words = len; while (words > 0 && max_exclusive[words - 1] == 0) { words--; } if (words == 0 || (words == 1 && max_exclusive[0] <= min_inclusive)) { OPENSSL_PUT_ERROR(BN, BN_R_INVALID_RANGE); return 0; } BN_ULONG mask = max_exclusive[words - 1]; // This sets all bits in |mask| below the most significant bit. mask |= mask >> 1; mask |= mask >> 2; mask |= mask >> 4; mask |= mask >> 8; mask |= mask >> 16; #if defined(OPENSSL_64_BIT) mask |= mask >> 32; #endif *out_words = words; *out_mask = mask; return 1; } int bn_rand_range_words(BN_ULONG *out, BN_ULONG min_inclusive, const BN_ULONG *max_exclusive, size_t len, const uint8_t additional_data[32]) { // This function implements the equivalent of steps 4 through 7 of FIPS 186-4 // appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive| // is n and |min_inclusive| is one. // Compute the bit length of |max_exclusive| (step 1), in terms of a number of // |words| worth of entropy to fill and a mask of bits to clear in the top // word. size_t words; BN_ULONG mask; if (!bn_range_to_mask(&words, &mask, min_inclusive, max_exclusive, len)) { return 0; } // Fill any unused words with zero. OPENSSL_memset(out + words, 0, (len - words) * sizeof(BN_ULONG)); unsigned count = 100; do { if (!--count) { OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); return 0; } // Steps 4 and 5. Use |words| and |mask| together to obtain a string of N // bits, where N is the bit length of |max_exclusive|. FIPS_service_indicator_lock_state(); BCM_rand_bytes_with_additional_data( (uint8_t *)out, words * sizeof(BN_ULONG), additional_data); FIPS_service_indicator_unlock_state(); out[words - 1] &= mask; // If out >= max_exclusive or out < min_inclusive, retry. This implements // the equivalent of steps 6 and 7 without leaking the value of |out|. The // result of this comparison may be treated as public. It only reveals how // many attempts were needed before we found a value in range. This is // independent of the final secret output, and has a distribution that // depends only on |min_inclusive| and |max_exclusive|, both of which are // public. } while (!constant_time_declassify_int( bn_in_range_words(out, min_inclusive, max_exclusive, words))); return 1; } int BN_rand_range_ex(BIGNUM *r, BN_ULONG min_inclusive, const BIGNUM *max_exclusive) { static const uint8_t kDefaultAdditionalData[32] = {0}; if (!bn_wexpand(r, max_exclusive->width) || !bn_rand_range_words(r->d, min_inclusive, max_exclusive->d, max_exclusive->width, kDefaultAdditionalData)) { return 0; } r->neg = 0; r->width = max_exclusive->width; return 1; } int bn_rand_secret_range(BIGNUM *r, int *out_is_uniform, BN_ULONG min_inclusive, const BIGNUM *max_exclusive) { size_t words; BN_ULONG mask; if (!bn_range_to_mask(&words, &mask, min_inclusive, max_exclusive->d, max_exclusive->width) || !bn_wexpand(r, words)) { return 0; } assert(words > 0); assert(mask != 0); // The range must be large enough for bit tricks to fix invalid values. if (words == 1 && min_inclusive > mask >> 1) { OPENSSL_PUT_ERROR(BN, BN_R_INVALID_RANGE); return 0; } // Select a uniform random number with num_bits(max_exclusive) bits. FIPS_service_indicator_lock_state(); BCM_rand_bytes((uint8_t *)r->d, words * sizeof(BN_ULONG)); FIPS_service_indicator_unlock_state(); r->d[words - 1] &= mask; // Check, in constant-time, if the value is in range. *out_is_uniform = bn_in_range_words(r->d, min_inclusive, max_exclusive->d, words); crypto_word_t in_range = *out_is_uniform; in_range = 0 - in_range; // If the value is not in range, force it to be in range. r->d[0] |= constant_time_select_w(in_range, 0, min_inclusive); r->d[words - 1] &= constant_time_select_w(in_range, BN_MASK2, mask >> 1); declassify_assert( bn_in_range_words(r->d, min_inclusive, max_exclusive->d, words)); r->neg = 0; r->width = (int)words; return 1; } int BN_rand_range(BIGNUM *r, const BIGNUM *range) { return BN_rand_range_ex(r, 0, range); } int BN_pseudo_rand_range(BIGNUM *r, const BIGNUM *range) { return BN_rand_range(r, range); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/rsaz_exp.cc.inc ================================================ /* * Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2012, Intel Corporation. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html * * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) * (1) Intel Corporation, Israel Development Center, Haifa, Israel * (2) University of Haifa, Israel */ #include "rsaz_exp.h" #if defined(RSAZ_ENABLED) #include #include #include "internal.h" #include "../../internal.h" // rsaz_one is 1 in RSAZ's representation. alignas(64) static const BN_ULONG rsaz_one[40] = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // rsaz_two80 is 2^80 in RSAZ's representation. Note RSAZ uses base 2^29, so this is // 2^(29*2 + 22) = 2^80, not 2^(64*2 + 22). alignas(64) static const BN_ULONG rsaz_two80[40] = { 0, 0, 1 << 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; void RSAZ_1024_mod_exp_avx2(BN_ULONG result_norm[16], const BN_ULONG base_norm[16], const BN_ULONG exponent[16], const BN_ULONG m_norm[16], const BN_ULONG RR[16], BN_ULONG k0, BN_ULONG storage[MOD_EXP_CTIME_STORAGE_LEN]) { static_assert(MOD_EXP_CTIME_ALIGN % 64 == 0, "MOD_EXP_CTIME_ALIGN is too small"); assert((uintptr_t)storage % 64 == 0); BN_ULONG *a_inv, *m, *result, *table_s = storage + 40 * 3, *R2 = table_s; // Note |R2| aliases |table_s|. if (((((uintptr_t)storage & 4095) + 320) >> 12) != 0) { result = storage; a_inv = storage + 40; m = storage + 40 * 2; // should not cross page } else { m = storage; // should not cross page result = storage + 40; a_inv = storage + 40 * 2; } rsaz_1024_norm2red_avx2(m, m_norm); rsaz_1024_norm2red_avx2(a_inv, base_norm); rsaz_1024_norm2red_avx2(R2, RR); // Convert |R2| from the usual radix, giving R = 2^1024, to RSAZ's radix, // giving R = 2^(36*29) = 2^1044. rsaz_1024_mul_avx2(R2, R2, R2, m, k0); // R2 = 2^2048 * 2^2048 / 2^1044 = 2^3052 rsaz_1024_mul_avx2(R2, R2, rsaz_two80, m, k0); // R2 = 2^3052 * 2^80 / 2^1044 = 2^2088 = (2^1044)^2 // table[0] = 1 // table[1] = a_inv^1 rsaz_1024_mul_avx2(result, R2, rsaz_one, m, k0); rsaz_1024_mul_avx2(a_inv, a_inv, R2, m, k0); rsaz_1024_scatter5_avx2(table_s, result, 0); rsaz_1024_scatter5_avx2(table_s, a_inv, 1); // table[2] = a_inv^2 rsaz_1024_sqr_avx2(result, a_inv, m, k0, 1); rsaz_1024_scatter5_avx2(table_s, result, 2); // table[4] = a_inv^4 rsaz_1024_sqr_avx2(result, result, m, k0, 1); rsaz_1024_scatter5_avx2(table_s, result, 4); // table[8] = a_inv^8 rsaz_1024_sqr_avx2(result, result, m, k0, 1); rsaz_1024_scatter5_avx2(table_s, result, 8); // table[16] = a_inv^16 rsaz_1024_sqr_avx2(result, result, m, k0, 1); rsaz_1024_scatter5_avx2(table_s, result, 16); for (int i = 3; i < 32; i += 2) { // table[i] = table[i-1] * a_inv = a_inv^i rsaz_1024_gather5_avx2(result, table_s, i - 1); rsaz_1024_mul_avx2(result, result, a_inv, m, k0); rsaz_1024_scatter5_avx2(table_s, result, i); for (int j = 2 * i; j < 32; j *= 2) { // table[j] = table[j/2]^2 = a_inv^j rsaz_1024_sqr_avx2(result, result, m, k0, 1); rsaz_1024_scatter5_avx2(table_s, result, j); } } // Load the first window. const uint8_t *p_str = (const uint8_t *)exponent; int wvalue = p_str[127] >> 3; rsaz_1024_gather5_avx2(result, table_s, wvalue); int index = 1014; while (index > -1) { // Loop for the remaining 127 windows. rsaz_1024_sqr_avx2(result, result, m, k0, 5); uint16_t wvalue_16; memcpy(&wvalue_16, &p_str[index / 8], sizeof(wvalue_16)); wvalue = wvalue_16; wvalue = (wvalue >> (index % 8)) & 31; index -= 5; rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); // Borrow |a_inv|. rsaz_1024_mul_avx2(result, result, a_inv, m, k0); } // Square four times. rsaz_1024_sqr_avx2(result, result, m, k0, 4); wvalue = p_str[0] & 15; rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); // Borrow |a_inv|. rsaz_1024_mul_avx2(result, result, a_inv, m, k0); // Convert from Montgomery. rsaz_1024_mul_avx2(result, result, rsaz_one, m, k0); rsaz_1024_red2norm_avx2(result_norm, result); BN_ULONG scratch[16]; bn_reduce_once_in_place(result_norm, /*carry=*/0, m_norm, scratch, 16); OPENSSL_cleanse(storage, MOD_EXP_CTIME_STORAGE_LEN * sizeof(BN_ULONG)); } #endif // RSAZ_ENABLED ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/rsaz_exp.h ================================================ /* * Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2012, Intel Corporation. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html * * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) * (1) Intel Corporation, Israel Development Center, Haifa, Israel * (2) University of Haifa, Israel */ #ifndef OPENSSL_HEADER_BN_RSAZ_EXP_H #define OPENSSL_HEADER_BN_RSAZ_EXP_H #include #include "internal.h" #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) #define RSAZ_ENABLED // RSAZ_1024_mod_exp_avx2 sets |result| to |base_norm| raised to |exponent| // modulo |m_norm|. |base_norm| must be fully-reduced and |exponent| must have // the high bit set (it is 1024 bits wide). |RR| and |k0| must be |RR| and |n0|, // respectively, extracted from |m_norm|'s |BN_MONT_CTX|. |storage_words| is a // temporary buffer that must be aligned to |MOD_EXP_CTIME_ALIGN| bytes. void RSAZ_1024_mod_exp_avx2(BN_ULONG result[16], const BN_ULONG base_norm[16], const BN_ULONG exponent[16], const BN_ULONG m_norm[16], const BN_ULONG RR[16], BN_ULONG k0, BN_ULONG storage_words[MOD_EXP_CTIME_STORAGE_LEN]); inline int rsaz_avx2_capable(void) { return CRYPTO_is_AVX2_capable(); } inline int rsaz_avx2_preferred(void) { if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { // If BMI1, BMI2, and ADX are available, x86_64-mont5.pl is faster. See the // .Lmulx4x_enter and .Lpowerx5_enter branches. return 0; } return CRYPTO_is_AVX2_capable(); } // Assembly functions. // RSAZ represents 1024-bit integers using unsaturated 29-bit limbs stored in // 64-bit integers. This requires 36 limbs but padded up to 40. // // See crypto/bn/asm/rsaz-avx2.pl for further details. // rsaz_1024_norm2red_avx2 converts |norm| from |BIGNUM| to RSAZ representation // and writes the result to |red|. void rsaz_1024_norm2red_avx2(BN_ULONG red[40], const BN_ULONG norm[16]); // rsaz_1024_mul_avx2 computes |a| * |b| mod |n| and writes the result to |ret|. // Inputs and outputs are in Montgomery form, using RSAZ's representation. |k| // is -|n|^-1 mod 2^64 or |n0| from |BN_MONT_CTX|. void rsaz_1024_mul_avx2(BN_ULONG ret[40], const BN_ULONG a[40], const BN_ULONG b[40], const BN_ULONG n[40], BN_ULONG k); // rsaz_1024_mul_avx2 computes |a|^(2*|count|) mod |n| and writes the result to // |ret|. Inputs and outputs are in Montgomery form, using RSAZ's // representation. |k| is -|n|^-1 mod 2^64 or |n0| from |BN_MONT_CTX|. void rsaz_1024_sqr_avx2(BN_ULONG ret[40], const BN_ULONG a[40], const BN_ULONG n[40], BN_ULONG k, int count); // rsaz_1024_scatter5_avx2 stores |val| at index |i| of |tbl|. |i| must be // positive and at most 31. It is treated as public. Note the table only uses 18 // |BN_ULONG|s per entry instead of 40. It packs two 29-bit limbs into each // |BN_ULONG| and only stores 36 limbs rather than the padded 40. void rsaz_1024_scatter5_avx2(BN_ULONG tbl[32 * 18], const BN_ULONG val[40], int i); // rsaz_1024_gather5_avx2 loads index |i| of |tbl| and writes it to |val|. |i| // must be positive and at most 31. It is treated as secret. |tbl| must be // aligned to 32 bytes. void rsaz_1024_gather5_avx2(BN_ULONG val[40], const BN_ULONG tbl[32 * 18], int i); // rsaz_1024_red2norm_avx2 converts |red| from RSAZ to |BIGNUM| representation // and writes the result to |norm|. The result will be <= the modulus. // // WARNING: The result of this operation may not be fully reduced. |norm| may be // the modulus instead of zero. This function should be followed by a call to // |bn_reduce_once|. void rsaz_1024_red2norm_avx2(BN_ULONG norm[16], const BN_ULONG red[40]); #endif // !OPENSSL_NO_ASM && OPENSSL_X86_64 #if defined(__cplusplus) } // extern "C" #endif #endif // OPENSSL_HEADER_BN_RSAZ_EXP_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/shift.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" int BN_lshift(BIGNUM *r, const BIGNUM *a, int n) { int i, nw, lb, rb; BN_ULONG *t, *f; BN_ULONG l; if (n < 0) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } r->neg = a->neg; nw = n / BN_BITS2; if (!bn_wexpand(r, a->width + nw + 1)) { return 0; } lb = n % BN_BITS2; rb = BN_BITS2 - lb; f = a->d; t = r->d; t[a->width + nw] = 0; if (lb == 0) { for (i = a->width - 1; i >= 0; i--) { t[nw + i] = f[i]; } } else { for (i = a->width - 1; i >= 0; i--) { l = f[i]; t[nw + i + 1] |= l >> rb; t[nw + i] = l << lb; } } OPENSSL_memset(t, 0, nw * sizeof(t[0])); r->width = a->width + nw + 1; bn_set_minimal_width(r); return 1; } int BN_lshift1(BIGNUM *r, const BIGNUM *a) { BN_ULONG *ap, *rp, t, c; int i; if (r != a) { r->neg = a->neg; if (!bn_wexpand(r, a->width + 1)) { return 0; } r->width = a->width; } else { if (!bn_wexpand(r, a->width + 1)) { return 0; } } ap = a->d; rp = r->d; c = 0; for (i = 0; i < a->width; i++) { t = *(ap++); *(rp++) = (t << 1) | c; c = t >> (BN_BITS2 - 1); } if (c) { *rp = 1; r->width++; } return 1; } void bn_rshift_words(BN_ULONG *r, const BN_ULONG *a, unsigned shift, size_t num) { unsigned shift_bits = shift % BN_BITS2; size_t shift_words = shift / BN_BITS2; if (shift_words >= num) { OPENSSL_memset(r, 0, num * sizeof(BN_ULONG)); return; } if (shift_bits == 0) { OPENSSL_memmove(r, a + shift_words, (num - shift_words) * sizeof(BN_ULONG)); } else { for (size_t i = shift_words; i < num - 1; i++) { r[i - shift_words] = (a[i] >> shift_bits) | (a[i + 1] << (BN_BITS2 - shift_bits)); } r[num - 1 - shift_words] = a[num - 1] >> shift_bits; } OPENSSL_memset(r + num - shift_words, 0, shift_words * sizeof(BN_ULONG)); } int BN_rshift(BIGNUM *r, const BIGNUM *a, int n) { if (n < 0) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } if (!bn_wexpand(r, a->width)) { return 0; } bn_rshift_words(r->d, a->d, n, a->width); r->neg = a->neg; r->width = a->width; bn_set_minimal_width(r); return 1; } int bn_rshift_secret_shift(BIGNUM *r, const BIGNUM *a, unsigned n, BN_CTX *ctx) { int ret = 0; BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); unsigned max_bits; if (tmp == NULL || !BN_copy(r, a) || !bn_wexpand(tmp, r->width)) { goto err; } // Shift conditionally by powers of two. max_bits = BN_BITS2 * r->width; for (unsigned i = 0; (max_bits >> i) != 0; i++) { BN_ULONG mask = (n >> i) & 1; mask = 0 - mask; bn_rshift_words(tmp->d, r->d, 1u << i, r->width); bn_select_words(r->d, mask, tmp->d /* apply shift */, r->d /* ignore shift */, r->width); } ret = 1; err: BN_CTX_end(ctx); return ret; } void bn_rshift1_words(BN_ULONG *r, const BN_ULONG *a, size_t num) { if (num == 0) { return; } for (size_t i = 0; i < num - 1; i++) { r[i] = (a[i] >> 1) | (a[i + 1] << (BN_BITS2 - 1)); } r[num - 1] = a[num - 1] >> 1; } int BN_rshift1(BIGNUM *r, const BIGNUM *a) { if (!bn_wexpand(r, a->width)) { return 0; } bn_rshift1_words(r->d, a->d, a->width); r->width = a->width; r->neg = a->neg; bn_set_minimal_width(r); return 1; } int BN_set_bit(BIGNUM *a, int n) { if (n < 0) { return 0; } int i = n / BN_BITS2; int j = n % BN_BITS2; if (a->width <= i) { if (!bn_wexpand(a, i + 1)) { return 0; } for (int k = a->width; k < i + 1; k++) { a->d[k] = 0; } a->width = i + 1; } a->d[i] |= (((BN_ULONG)1) << j); return 1; } int BN_clear_bit(BIGNUM *a, int n) { int i, j; if (n < 0) { return 0; } i = n / BN_BITS2; j = n % BN_BITS2; if (a->width <= i) { return 0; } a->d[i] &= (~(((BN_ULONG)1) << j)); bn_set_minimal_width(a); return 1; } int bn_is_bit_set_words(const BN_ULONG *a, size_t num, size_t bit) { size_t i = bit / BN_BITS2; size_t j = bit % BN_BITS2; if (i >= num) { return 0; } return (a[i] >> j) & 1; } int BN_is_bit_set(const BIGNUM *a, int n) { if (n < 0) { return 0; } return bn_is_bit_set_words(a->d, a->width, n); } int BN_mask_bits(BIGNUM *a, int n) { if (n < 0) { return 0; } int w = n / BN_BITS2; int b = n % BN_BITS2; if (w >= a->width) { return 1; } if (b == 0) { a->width = w; } else { a->width = w + 1; a->d[w] &= ~(BN_MASK2 << b); } bn_set_minimal_width(a); return 1; } static int bn_count_low_zero_bits_word(BN_ULONG l) { static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); static_assert(sizeof(int) <= sizeof(crypto_word_t), "crypto_word_t is too small"); static_assert(BN_BITS2 == sizeof(BN_ULONG) * 8, "BN_ULONG has padding bits"); // C has very bizarre rules for types smaller than an int. static_assert(sizeof(BN_ULONG) >= sizeof(int), "BN_ULONG gets promoted to int"); crypto_word_t mask; int bits = 0; #if BN_BITS2 > 32 // Check if the lower half of |x| are all zero. mask = constant_time_is_zero_w(l << (BN_BITS2 - 32)); // If the lower half is all zeros, it is included in the bit count and we // count the upper half. Otherwise, we count the lower half. bits += 32 & mask; l = constant_time_select_w(mask, l >> 32, l); #endif // The remaining blocks are analogous iterations at lower powers of two. mask = constant_time_is_zero_w(l << (BN_BITS2 - 16)); bits += 16 & mask; l = constant_time_select_w(mask, l >> 16, l); mask = constant_time_is_zero_w(l << (BN_BITS2 - 8)); bits += 8 & mask; l = constant_time_select_w(mask, l >> 8, l); mask = constant_time_is_zero_w(l << (BN_BITS2 - 4)); bits += 4 & mask; l = constant_time_select_w(mask, l >> 4, l); mask = constant_time_is_zero_w(l << (BN_BITS2 - 2)); bits += 2 & mask; l = constant_time_select_w(mask, l >> 2, l); mask = constant_time_is_zero_w(l << (BN_BITS2 - 1)); bits += 1 & mask; return bits; } int BN_count_low_zero_bits(const BIGNUM *bn) { static_assert(sizeof(BN_ULONG) <= sizeof(crypto_word_t), "crypto_word_t is too small"); static_assert(sizeof(int) <= sizeof(crypto_word_t), "crypto_word_t is too small"); int ret = 0; crypto_word_t saw_nonzero = 0; for (int i = 0; i < bn->width; i++) { crypto_word_t nonzero = ~constant_time_is_zero_w(bn->d[i]); crypto_word_t first_nonzero = ~saw_nonzero & nonzero; saw_nonzero |= nonzero; int bits = bn_count_low_zero_bits_word(bn->d[i]); ret |= first_nonzero & (i * BN_BITS2 + bits); } // If got to the end of |bn| and saw no non-zero words, |bn| is zero. |ret| // will then remain zero. return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/bn/sqrt.cc.inc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { // Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm // (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory", // algorithm 1.5.1). |p| is assumed to be a prime. BIGNUM *ret = in; int err = 1; int r; BIGNUM *A, *b, *q, *t, *x, *y; int e, i, j; if (!BN_is_odd(p) || BN_abs_is_word(p, 1)) { if (BN_abs_is_word(p, 2)) { if (ret == NULL) { ret = BN_new(); } if (ret == NULL || !BN_set_word(ret, BN_is_bit_set(a, 0))) { if (ret != in) { BN_free(ret); } return NULL; } return ret; } OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); return NULL; } if (BN_is_zero(a) || BN_is_one(a)) { if (ret == NULL) { ret = BN_new(); } if (ret == NULL || !BN_set_word(ret, BN_is_one(a))) { if (ret != in) { BN_free(ret); } return NULL; } return ret; } BN_CTX_start(ctx); A = BN_CTX_get(ctx); b = BN_CTX_get(ctx); q = BN_CTX_get(ctx); t = BN_CTX_get(ctx); x = BN_CTX_get(ctx); y = BN_CTX_get(ctx); if (y == NULL) { goto end; } if (ret == NULL) { ret = BN_new(); } if (ret == NULL) { goto end; } // A = a mod p if (!BN_nnmod(A, a, p, ctx)) { goto end; } // now write |p| - 1 as 2^e*q where q is odd e = 1; while (!BN_is_bit_set(p, e)) { e++; } // we'll set q later (if needed) if (e == 1) { // The easy case: (|p|-1)/2 is odd, so 2 has an inverse // modulo (|p|-1)/2, and square roots can be computed // directly by modular exponentiation. // We have // 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), // so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. if (!BN_rshift(q, p, 2)) { goto end; } q->neg = 0; if (!BN_add_word(q, 1) || !BN_mod_exp_mont(ret, A, q, p, ctx, NULL)) { goto end; } err = 0; goto vrfy; } if (e == 2) { // |p| == 5 (mod 8) // // In this case 2 is always a non-square since // Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. // So if a really is a square, then 2*a is a non-square. // Thus for // b := (2*a)^((|p|-5)/8), // i := (2*a)*b^2 // we have // i^2 = (2*a)^((1 + (|p|-5)/4)*2) // = (2*a)^((p-1)/2) // = -1; // so if we set // x := a*b*(i-1), // then // x^2 = a^2 * b^2 * (i^2 - 2*i + 1) // = a^2 * b^2 * (-2*i) // = a*(-i)*(2*a*b^2) // = a*(-i)*i // = a. // // (This is due to A.O.L. Atkin, // , // November 1992.) // t := 2*a if (!bn_mod_lshift1_consttime(t, A, p, ctx)) { goto end; } // b := (2*a)^((|p|-5)/8) if (!BN_rshift(q, p, 3)) { goto end; } q->neg = 0; if (!BN_mod_exp_mont(b, t, q, p, ctx, NULL)) { goto end; } // y := b^2 if (!BN_mod_sqr(y, b, p, ctx)) { goto end; } // t := (2*a)*b^2 - 1 if (!BN_mod_mul(t, t, y, p, ctx) || !BN_sub_word(t, 1)) { goto end; } // x = a*b*t if (!BN_mod_mul(x, A, b, p, ctx) || !BN_mod_mul(x, x, t, p, ctx)) { goto end; } if (!BN_copy(ret, x)) { goto end; } err = 0; goto vrfy; } // e > 2, so we really have to use the Tonelli/Shanks algorithm. // First, find some y that is not a square. if (!BN_copy(q, p)) { goto end; // use 'q' as temp } q->neg = 0; i = 2; do { // For efficiency, try small numbers first; // if this fails, try random numbers. if (i < 22) { if (!BN_set_word(y, i)) { goto end; } } else { if (!BN_pseudo_rand(y, BN_num_bits(p), 0, 0)) { goto end; } if (BN_ucmp(y, p) >= 0) { if (BN_usub(y, y, p)) { goto end; } } // now 0 <= y < |p| if (BN_is_zero(y)) { if (!BN_set_word(y, i)) { goto end; } } } r = bn_jacobi(y, q, ctx); // here 'q' is |p| if (r < -1) { goto end; } if (r == 0) { // m divides p OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); goto end; } } while (r == 1 && ++i < 82); if (r != -1) { // Many rounds and still no non-square -- this is more likely // a bug than just bad luck. // Even if p is not prime, we should have found some y // such that r == -1. OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); goto end; } // Here's our actual 'q': if (!BN_rshift(q, q, e)) { goto end; } // Now that we have some non-square, we can find an element // of order 2^e by computing its q'th power. if (!BN_mod_exp_mont(y, y, q, p, ctx, NULL)) { goto end; } if (BN_is_one(y)) { OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); goto end; } // Now we know that (if p is indeed prime) there is an integer // k, 0 <= k < 2^e, such that // // a^q * y^k == 1 (mod p). // // As a^q is a square and y is not, k must be even. // q+1 is even, too, so there is an element // // X := a^((q+1)/2) * y^(k/2), // // and it satisfies // // X^2 = a^q * a * y^k // = a, // // so it is the square root that we are looking for. // t := (q-1)/2 (note that q is odd) if (!BN_rshift1(t, q)) { goto end; } // x := a^((q-1)/2) if (BN_is_zero(t)) { // special case: p = 2^e + 1 if (!BN_nnmod(t, A, p, ctx)) { goto end; } if (BN_is_zero(t)) { // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; } else if (!BN_one(x)) { goto end; } } else { if (!BN_mod_exp_mont(x, A, t, p, ctx, NULL)) { goto end; } if (BN_is_zero(x)) { // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; } } // b := a*x^2 (= a^q) if (!BN_mod_sqr(b, x, p, ctx) || !BN_mod_mul(b, b, A, p, ctx)) { goto end; } // x := a*x (= a^((q+1)/2)) if (!BN_mod_mul(x, x, A, p, ctx)) { goto end; } while (1) { // Now b is a^q * y^k for some even k (0 <= k < 2^E // where E refers to the original value of e, which we // don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). // // We have a*b = x^2, // y^2^(e-1) = -1, // b^2^(e-1) = 1. if (BN_is_one(b)) { if (!BN_copy(ret, x)) { goto end; } err = 0; goto vrfy; } // Find the smallest i, 0 < i < e, such that b^(2^i) = 1 for (i = 1; i < e; i++) { if (i == 1) { if (!BN_mod_sqr(t, b, p, ctx)) { goto end; } } else { if (!BN_mod_mul(t, t, t, p, ctx)) { goto end; } } if (BN_is_one(t)) { break; } } // If not found, a is not a square or p is not a prime. if (i >= e) { OPENSSL_PUT_ERROR(BN, BN_R_NOT_A_SQUARE); goto end; } // t := y^2^(e - i - 1) if (!BN_copy(t, y)) { goto end; } for (j = e - i - 1; j > 0; j--) { if (!BN_mod_sqr(t, t, p, ctx)) { goto end; } } if (!BN_mod_mul(y, t, t, p, ctx) || !BN_mod_mul(x, x, t, p, ctx) || !BN_mod_mul(b, b, y, p, ctx)) { goto end; } // e decreases each iteration, so this loop will terminate. assert(i < e); e = i; } vrfy: if (!err) { // Verify the result. The input might have been not a square. if (!BN_mod_sqr(x, ret, p, ctx)) { err = 1; } if (!err && 0 != BN_cmp(x, A)) { OPENSSL_PUT_ERROR(BN, BN_R_NOT_A_SQUARE); err = 1; } } end: if (err) { if (ret != in) { BN_clear_free(ret); } ret = NULL; } BN_CTX_end(ctx); return ret; } int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx) { BIGNUM *estimate, *tmp, *delta, *last_delta, *tmp2; int ok = 0, last_delta_valid = 0; if (in->neg) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; } if (BN_is_zero(in)) { BN_zero(out_sqrt); return 1; } BN_CTX_start(ctx); if (out_sqrt == in) { estimate = BN_CTX_get(ctx); } else { estimate = out_sqrt; } tmp = BN_CTX_get(ctx); last_delta = BN_CTX_get(ctx); delta = BN_CTX_get(ctx); if (estimate == NULL || tmp == NULL || last_delta == NULL || delta == NULL) { goto err; } // We estimate that the square root of an n-bit number is 2^{n/2}. if (!BN_lshift(estimate, BN_value_one(), BN_num_bits(in)/2)) { goto err; } // This is Newton's method for finding a root of the equation |estimate|^2 - // |in| = 0. for (;;) { // |estimate| = 1/2 * (|estimate| + |in|/|estimate|) if (!BN_div(tmp, NULL, in, estimate, ctx) || !BN_add(tmp, tmp, estimate) || !BN_rshift1(estimate, tmp) || // |tmp| = |estimate|^2 !BN_sqr(tmp, estimate, ctx) || // |delta| = |in| - |tmp| !BN_sub(delta, in, tmp)) { OPENSSL_PUT_ERROR(BN, ERR_R_BN_LIB); goto err; } delta->neg = 0; // The difference between |in| and |estimate| squared is required to always // decrease. This ensures that the loop always terminates, but I don't have // a proof that it always finds the square root for a given square. if (last_delta_valid && BN_cmp(delta, last_delta) >= 0) { break; } last_delta_valid = 1; tmp2 = last_delta; last_delta = delta; delta = tmp2; } if (BN_cmp(tmp, in) != 0) { OPENSSL_PUT_ERROR(BN, BN_R_NOT_A_SQUARE); goto err; } ok = 1; err: if (ok && out_sqrt == in && !BN_copy(out_sqrt, estimate)) { ok = 0; } BN_CTX_end(ctx); return ok; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cipher/aead.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../../internal.h" #include "internal.h" size_t EVP_AEAD_key_length(const EVP_AEAD *aead) { return aead->key_len; } size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead) { return aead->nonce_len; } size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead) { return aead->overhead; } size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead) { return aead->max_tag_len; } void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_AEAD_CTX)); } EVP_AEAD_CTX *EVP_AEAD_CTX_new(const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len) { EVP_AEAD_CTX *ctx = reinterpret_cast(OPENSSL_malloc(sizeof(EVP_AEAD_CTX))); if (!ctx) { return NULL; } EVP_AEAD_CTX_zero(ctx); if (EVP_AEAD_CTX_init(ctx, aead, key, key_len, tag_len, NULL)) { return ctx; } EVP_AEAD_CTX_free(ctx); return NULL; } void EVP_AEAD_CTX_free(EVP_AEAD_CTX *ctx) { if (ctx == NULL) { return; } EVP_AEAD_CTX_cleanup(ctx); OPENSSL_free(ctx); } int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, ENGINE *impl) { if (!aead->init) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_DIRECTION_SET); ctx->aead = NULL; return 0; } return EVP_AEAD_CTX_init_with_direction(ctx, aead, key, key_len, tag_len, evp_aead_open); } int EVP_AEAD_CTX_init_with_direction(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir) { if (key_len != aead->key_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_KEY_SIZE); ctx->aead = NULL; return 0; } ctx->aead = aead; int ok; if (aead->init) { ok = aead->init(ctx, key, key_len, tag_len); } else { ok = aead->init_with_direction(ctx, key, key_len, tag_len, dir); } if (!ok) { ctx->aead = NULL; } return ok; } void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) { if (ctx->aead == NULL) { return; } ctx->aead->cleanup(ctx); ctx->aead = NULL; } // check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If // |in| and |out| alias, we require that |in| == |out|. static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out, size_t out_len) { if (!buffers_alias(in, in_len, out, out_len)) { return 1; } return in == out; } int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { if (in_len + ctx->aead->overhead < in_len /* overflow */) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); goto error; } if (max_out_len < in_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); goto error; } if (!check_alias(in, in_len, out, max_out_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); goto error; } size_t out_tag_len; if (ctx->aead->seal_scatter(ctx, out, out + in_len, &out_tag_len, max_out_len - in_len, nonce, nonce_len, in, in_len, NULL, 0, ad, ad_len)) { *out_len = in_len + out_tag_len; return 1; } error: // In the event of an error, clear the output buffer so that a caller // that doesn't check the return value doesn't send raw data. OPENSSL_memset(out, 0, max_out_len); *out_len = 0; return 0; } int EVP_AEAD_CTX_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { // |in| and |out| may alias exactly, |out_tag| may not alias. if (!check_alias(in, in_len, out, in_len) || buffers_alias(out, in_len, out_tag, max_out_tag_len) || buffers_alias(in, in_len, out_tag, max_out_tag_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); goto error; } if (!ctx->aead->seal_scatter_supports_extra_in && extra_in_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); goto error; } if (ctx->aead->seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len)) { return 1; } error: // In the event of an error, clear the output buffer so that a caller // that doesn't check the return value doesn't send raw data. OPENSSL_memset(out, 0, in_len); OPENSSL_memset(out_tag, 0, max_out_tag_len); *out_tag_len = 0; return 0; } int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { size_t plaintext_len; if (!check_alias(in, in_len, out, max_out_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); goto error; } if (ctx->aead->open) { if (!ctx->aead->open(ctx, out, out_len, max_out_len, nonce, nonce_len, in, in_len, ad, ad_len)) { goto error; } return 1; } // AEADs that use the default implementation of open() must set |tag_len| at // initialization time. assert(ctx->tag_len); if (in_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); goto error; } plaintext_len = in_len - ctx->tag_len; if (max_out_len < plaintext_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); goto error; } if (EVP_AEAD_CTX_open_gather(ctx, out, nonce, nonce_len, in, plaintext_len, in + plaintext_len, ctx->tag_len, ad, ad_len)) { *out_len = plaintext_len; return 1; } error: // In the event of an error, clear the output buffer so that a caller // that doesn't check the return value doesn't try and process bad // data. OPENSSL_memset(out, 0, max_out_len); *out_len = 0; return 0; } int EVP_AEAD_CTX_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { if (!check_alias(in, in_len, out, in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); goto error; } if (!ctx->aead->open_gather) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED); goto error; } if (ctx->aead->open_gather(ctx, out, nonce, nonce_len, in, in_len, in_tag, in_tag_len, ad, ad_len)) { return 1; } error: // In the event of an error, clear the output buffer so that a caller // that doesn't check the return value doesn't try and process bad // data. OPENSSL_memset(out, 0, in_len); return 0; } const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx) { return ctx->aead; } int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_len) { if (ctx->aead->get_iv == NULL) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return ctx->aead->get_iv(ctx, out_iv, out_len); } int EVP_AEAD_CTX_tag_len(const EVP_AEAD_CTX *ctx, size_t *out_tag_len, const size_t in_len, const size_t extra_in_len) { assert(ctx->aead->seal_scatter_supports_extra_in || !extra_in_len); if (ctx->aead->tag_len) { *out_tag_len = ctx->aead->tag_len(ctx, in_len, extra_in_len); return 1; } if (extra_in_len + ctx->tag_len < extra_in_len) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); *out_tag_len = 0; return 0; } *out_tag_len = extra_in_len + ctx->tag_len; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cipher/cipher.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../service_indicator/internal.h" #include "internal.h" void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_CIPHER_CTX)); } EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) { EVP_CIPHER_CTX *ctx = reinterpret_cast( OPENSSL_malloc(sizeof(EVP_CIPHER_CTX))); if (ctx) { EVP_CIPHER_CTX_init(ctx); } return ctx; } int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *c) { if (c->cipher != NULL && c->cipher->cleanup) { c->cipher->cleanup(c); } OPENSSL_free(c->cipher_data); OPENSSL_memset(c, 0, sizeof(EVP_CIPHER_CTX)); return 1; } void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx) { if (ctx) { EVP_CIPHER_CTX_cleanup(ctx); OPENSSL_free(ctx); } } int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) { if (in == NULL || in->cipher == NULL) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INPUT_NOT_INITIALIZED); return 0; } if (in->poisoned) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } EVP_CIPHER_CTX_cleanup(out); OPENSSL_memcpy(out, in, sizeof(EVP_CIPHER_CTX)); if (in->cipher_data && in->cipher->ctx_size) { out->cipher_data = OPENSSL_memdup(in->cipher_data, in->cipher->ctx_size); if (!out->cipher_data) { out->cipher = NULL; return 0; } } if (in->cipher->flags & EVP_CIPH_CUSTOM_COPY) { if (!in->cipher->ctrl((EVP_CIPHER_CTX *)in, EVP_CTRL_COPY, 0, out)) { out->cipher = NULL; return 0; } } return 1; } int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx) { EVP_CIPHER_CTX_cleanup(ctx); EVP_CIPHER_CTX_init(ctx); return 1; } int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, const uint8_t *key, const uint8_t *iv, int enc) { if (enc == -1) { enc = ctx->encrypt; } else { if (enc) { enc = 1; } ctx->encrypt = enc; } if (cipher) { // Ensure a context left from last time is cleared (the previous check // attempted to avoid this if the same ENGINE and EVP_CIPHER could be // used). if (ctx->cipher) { EVP_CIPHER_CTX_cleanup(ctx); // Restore encrypt and flags ctx->encrypt = enc; } ctx->cipher = cipher; if (ctx->cipher->ctx_size) { ctx->cipher_data = OPENSSL_malloc(ctx->cipher->ctx_size); if (!ctx->cipher_data) { ctx->cipher = NULL; return 0; } } else { ctx->cipher_data = NULL; } ctx->key_len = cipher->key_len; ctx->flags = 0; if (ctx->cipher->flags & EVP_CIPH_CTRL_INIT) { if (!EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_INIT, 0, NULL)) { ctx->cipher = NULL; OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INITIALIZATION_ERROR); return 0; } } } else if (!ctx->cipher) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET); return 0; } // we assume block size is a power of 2 in *cryptUpdate assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 || ctx->cipher->block_size == 16); if (!(EVP_CIPHER_CTX_flags(ctx) & EVP_CIPH_CUSTOM_IV)) { switch (EVP_CIPHER_CTX_mode(ctx)) { case EVP_CIPH_STREAM_CIPHER: case EVP_CIPH_ECB_MODE: break; case EVP_CIPH_CFB_MODE: ctx->num = 0; [[fallthrough]]; case EVP_CIPH_CBC_MODE: assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv)); if (iv) { OPENSSL_memcpy(ctx->oiv, iv, EVP_CIPHER_CTX_iv_length(ctx)); } OPENSSL_memcpy(ctx->iv, ctx->oiv, EVP_CIPHER_CTX_iv_length(ctx)); break; case EVP_CIPH_CTR_MODE: case EVP_CIPH_OFB_MODE: ctx->num = 0; // Don't reuse IV for CTR mode if (iv) { OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); } break; default: return 0; } } if (key || (ctx->cipher->flags & EVP_CIPH_ALWAYS_CALL_INIT)) { if (!ctx->cipher->init(ctx, key, iv, enc)) { return 0; } } ctx->buf_len = 0; ctx->final_used = 0; // Clear the poisoned flag to permit re-use of a CTX that previously had a // failed operation. ctx->poisoned = 0; return 1; } int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv) { return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 1); } int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv) { return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0); } // block_remainder returns the number of bytes to remove from |len| to get a // multiple of |ctx|'s block size. static int block_remainder(const EVP_CIPHER_CTX *ctx, int len) { // |block_size| must be a power of two. assert(ctx->cipher->block_size != 0); assert((ctx->cipher->block_size & (ctx->cipher->block_size - 1)) == 0); return len & (ctx->cipher->block_size - 1); } int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len) { if (ctx->poisoned) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // If the first call to |cipher| succeeds and the second fails, |ctx| may be // left in an indeterminate state. We set a poison flag on failure to ensure // callers do not continue to use the object in that case. ctx->poisoned = 1; // Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output // does not overflow |*out_len|. int bl = ctx->cipher->block_size; if (bl > 1 && in_len > INT_MAX - bl) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); return 0; } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { int ret = ctx->cipher->cipher(ctx, out, in, in_len); if (ret < 0) { return 0; } else { *out_len = ret; } ctx->poisoned = 0; return 1; } if (in_len <= 0) { *out_len = 0; if (in_len == 0) { ctx->poisoned = 0; return 1; } return 0; } if (ctx->buf_len == 0 && block_remainder(ctx, in_len) == 0) { if (ctx->cipher->cipher(ctx, out, in, in_len)) { *out_len = in_len; ctx->poisoned = 0; return 1; } else { *out_len = 0; return 0; } } int i = ctx->buf_len; assert(bl <= (int)sizeof(ctx->buf)); if (i != 0) { if (bl - i > in_len) { OPENSSL_memcpy(&ctx->buf[i], in, in_len); ctx->buf_len += in_len; *out_len = 0; ctx->poisoned = 0; return 1; } else { int j = bl - i; OPENSSL_memcpy(&ctx->buf[i], in, j); if (!ctx->cipher->cipher(ctx, out, ctx->buf, bl)) { return 0; } in_len -= j; in += j; out += bl; *out_len = bl; } } else { *out_len = 0; } i = block_remainder(ctx, in_len); in_len -= i; if (in_len > 0) { if (!ctx->cipher->cipher(ctx, out, in, in_len)) { return 0; } *out_len += in_len; } if (i != 0) { OPENSSL_memcpy(ctx->buf, &in[in_len], i); } ctx->buf_len = i; ctx->poisoned = 0; return 1; } int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { int n; unsigned int i, b, bl; if (ctx->poisoned) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { // When EVP_CIPH_FLAG_CUSTOM_CIPHER is set, the return value of |cipher| is // the number of bytes written, or -1 on error. Otherwise the return value // is one on success and zero on error. const int num_bytes = ctx->cipher->cipher(ctx, out, NULL, 0); if (num_bytes < 0) { return 0; } *out_len = num_bytes; goto out; } b = ctx->cipher->block_size; assert(b <= sizeof(ctx->buf)); if (b == 1) { *out_len = 0; goto out; } bl = ctx->buf_len; if (ctx->flags & EVP_CIPH_NO_PADDING) { if (bl) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); return 0; } *out_len = 0; goto out; } n = b - bl; for (i = bl; i < b; i++) { ctx->buf[i] = n; } if (!ctx->cipher->cipher(ctx, out, ctx->buf, b)) { return 0; } *out_len = b; out: EVP_Cipher_verify_service_indicator(ctx); return 1; } int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len) { if (ctx->poisoned) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output // does not overflow |*out_len|. unsigned int b = ctx->cipher->block_size; if (b > 1 && in_len > INT_MAX - (int)b) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); return 0; } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { int r = ctx->cipher->cipher(ctx, out, in, in_len); if (r < 0) { *out_len = 0; return 0; } else { *out_len = r; } return 1; } if (in_len <= 0) { *out_len = 0; return in_len == 0; } if (ctx->flags & EVP_CIPH_NO_PADDING) { return EVP_EncryptUpdate(ctx, out, out_len, in, in_len); } assert(b <= sizeof(ctx->final)); int fix_len = 0; if (ctx->final_used) { OPENSSL_memcpy(out, ctx->final, b); out += b; fix_len = 1; } if (!EVP_EncryptUpdate(ctx, out, out_len, in, in_len)) { return 0; } // if we have 'decrypted' a multiple of block size, make sure // we have a copy of this last block if (b > 1 && !ctx->buf_len) { *out_len -= b; ctx->final_used = 1; OPENSSL_memcpy(ctx->final, &out[*out_len], b); } else { ctx->final_used = 0; } if (fix_len) { *out_len += b; } return 1; } int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) { int i, n; unsigned int b; *out_len = 0; if (ctx->poisoned) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { i = ctx->cipher->cipher(ctx, out, NULL, 0); if (i < 0) { return 0; } else { *out_len = i; } goto out; } b = ctx->cipher->block_size; if (ctx->flags & EVP_CIPH_NO_PADDING) { if (ctx->buf_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH); return 0; } *out_len = 0; goto out; } if (b > 1) { if (ctx->buf_len || !ctx->final_used) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_WRONG_FINAL_BLOCK_LENGTH); return 0; } assert(b <= sizeof(ctx->final)); // The following assumes that the ciphertext has been authenticated. // Otherwise it provides a padding oracle. n = ctx->final[b - 1]; if (n == 0 || n > (int)b) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } for (i = 0; i < n; i++) { if (ctx->final[--b] != n) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } n = ctx->cipher->block_size - n; for (i = 0; i < n; i++) { out[i] = ctx->final[i]; } *out_len = n; } else { *out_len = 0; } out: EVP_Cipher_verify_service_indicator(ctx); return 1; } int EVP_Cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { const int ret = ctx->cipher->cipher(ctx, out, in, in_len); // |EVP_CIPH_FLAG_CUSTOM_CIPHER| never sets the FIPS indicator via // |EVP_Cipher| because it's complicated whether the operation has completed // or not. E.g. AES-GCM with a non-NULL |in| argument hasn't completed an // operation. Callers should use the |EVP_AEAD| API or, at least, // |EVP_CipherUpdate| etc. // // This call can't be pushed into |EVP_Cipher_verify_service_indicator| // because whether |ret| indicates success or not depends on whether // |EVP_CIPH_FLAG_CUSTOM_CIPHER| is set. (This unreasonable, but matches // OpenSSL.) if (!(ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) && ret) { EVP_Cipher_verify_service_indicator(ctx); } return ret; } int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len) { if (ctx->encrypt) { return EVP_EncryptUpdate(ctx, out, out_len, in, in_len); } else { return EVP_DecryptUpdate(ctx, out, out_len, in, in_len); } } int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { if (ctx->encrypt) { return EVP_EncryptFinal_ex(ctx, out, out_len); } else { return EVP_DecryptFinal_ex(ctx, out, out_len); } } const EVP_CIPHER *EVP_CIPHER_CTX_cipher(const EVP_CIPHER_CTX *ctx) { return ctx->cipher; } int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx) { return ctx->cipher->nid; } int EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) { return ctx->encrypt; } unsigned EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx) { return ctx->cipher->block_size; } unsigned EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx) { return ctx->key_len; } unsigned EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx) { if (EVP_CIPHER_mode(ctx->cipher) == EVP_CIPH_GCM_MODE) { int length; int res = EVP_CIPHER_CTX_ctrl((EVP_CIPHER_CTX *)ctx, EVP_CTRL_GET_IVLEN, 0, &length); // EVP_CIPHER_CTX_ctrl returning an error should be impossible under this // circumstance. If it somehow did, fallback to the static cipher iv_len. if (res == 1) { return length; } } return ctx->cipher->iv_len; } void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx) { return ctx->app_data; } void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data) { ctx->app_data = data; } uint32_t EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx) { return ctx->cipher->flags & ~EVP_CIPH_MODE_MASK; } uint32_t EVP_CIPHER_CTX_mode(const EVP_CIPHER_CTX *ctx) { return ctx->cipher->flags & EVP_CIPH_MODE_MASK; } int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int command, int arg, void *ptr) { int ret; if (!ctx->cipher) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_CIPHER_SET); return 0; } if (!ctx->cipher->ctrl) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED); return 0; } ret = ctx->cipher->ctrl(ctx, command, arg, ptr); if (ret == -1) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED); return 0; } return ret; } int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad) { if (pad) { ctx->flags &= ~EVP_CIPH_NO_PADDING; } else { ctx->flags |= EVP_CIPH_NO_PADDING; } return 1; } int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *c, unsigned key_len) { if (c->key_len == key_len) { return 1; } if (key_len == 0 || !(c->cipher->flags & EVP_CIPH_VARIABLE_LENGTH)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_KEY_LENGTH); return 0; } c->key_len = key_len; return 1; } int EVP_CIPHER_nid(const EVP_CIPHER *cipher) { return cipher->nid; } unsigned EVP_CIPHER_block_size(const EVP_CIPHER *cipher) { return cipher->block_size; } unsigned EVP_CIPHER_key_length(const EVP_CIPHER *cipher) { return cipher->key_len; } unsigned EVP_CIPHER_iv_length(const EVP_CIPHER *cipher) { return cipher->iv_len; } uint32_t EVP_CIPHER_flags(const EVP_CIPHER *cipher) { return cipher->flags & ~EVP_CIPH_MODE_MASK; } uint32_t EVP_CIPHER_mode(const EVP_CIPHER *cipher) { return cipher->flags & EVP_CIPH_MODE_MASK; } int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv, int enc) { if (cipher) { EVP_CIPHER_CTX_init(ctx); } return EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, enc); } int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv) { return EVP_CipherInit(ctx, cipher, key, iv, 1); } int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv) { return EVP_CipherInit(ctx, cipher, key, iv, 0); } int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { return EVP_CipherFinal_ex(ctx, out, out_len); } int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { return EVP_EncryptFinal_ex(ctx, out, out_len); } int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { return EVP_DecryptFinal_ex(ctx, out, out_len); } int EVP_add_cipher_alias(const char *a, const char *b) { return 1; } void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, uint32_t flags) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cipher/e_aes.cc.inc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../../internal.h" #include "../aes/internal.h" #include "../bcm_interface.h" #include "../delocate.h" #include "../service_indicator/internal.h" #include "internal.h" OPENSSL_MSVC_PRAGMA(warning(push)) OPENSSL_MSVC_PRAGMA(warning(disable : 4702)) // Unreachable code. #define AES_GCM_NONCE_LENGTH 12 typedef struct { union { double align; AES_KEY ks; } ks; block128_f block; union { cbc128_f cbc; ctr128_f ctr; } stream; } EVP_AES_KEY; typedef struct { GCM128_KEY key; GCM128_CONTEXT gcm; int key_set; // Set if key initialised int iv_set; // Set if an iv is set uint8_t *iv; // Temporary IV store int ivlen; // IV length int taglen; int iv_gen; // It is OK to generate IVs ctr128_f ctr; } EVP_AES_GCM_CTX; static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { int ret; EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; const int mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; if (mode == EVP_CIPH_CTR_MODE) { switch (ctx->key_len) { case 16: boringssl_fips_inc_counter(fips_counter_evp_aes_128_ctr); break; case 32: boringssl_fips_inc_counter(fips_counter_evp_aes_256_ctr); break; } } if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { if (hwaes_capable()) { ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = aes_hw_decrypt; dat->stream.cbc = NULL; if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = aes_hw_cbc_encrypt; } } else if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) { assert(vpaes_capable()); ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); if (ret == 0) { vpaes_decrypt_key_to_bsaes(&dat->ks.ks, &dat->ks.ks); } // If |dat->stream.cbc| is provided, |dat->block| is never used. dat->block = NULL; dat->stream.cbc = bsaes_cbc_encrypt; } else if (vpaes_capable()) { ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = vpaes_decrypt; dat->stream.cbc = NULL; #if defined(VPAES_CBC) if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = vpaes_cbc_encrypt; } #endif } else { ret = aes_nohw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = aes_nohw_decrypt; dat->stream.cbc = NULL; if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = aes_nohw_cbc_encrypt; } } } else if (hwaes_capable()) { ret = aes_hw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = aes_hw_encrypt; dat->stream.cbc = NULL; if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = aes_hw_cbc_encrypt; } else if (mode == EVP_CIPH_CTR_MODE) { dat->stream.ctr = aes_hw_ctr32_encrypt_blocks; } } else if (vpaes_capable()) { ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = vpaes_encrypt; dat->stream.cbc = NULL; #if defined(VPAES_CBC) if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = vpaes_cbc_encrypt; } #endif if (mode == EVP_CIPH_CTR_MODE) { #if defined(BSAES) assert(bsaes_capable()); dat->stream.ctr = vpaes_ctr32_encrypt_blocks_with_bsaes; #else dat->stream.ctr = vpaes_ctr32_encrypt_blocks; #endif } } else { ret = aes_nohw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); dat->block = aes_nohw_encrypt; dat->stream.cbc = NULL; if (mode == EVP_CIPH_CBC_MODE) { dat->stream.cbc = aes_nohw_cbc_encrypt; } else if (mode == EVP_CIPH_CTR_MODE) { dat->stream.ctr = aes_nohw_ctr32_encrypt_blocks; } } if (ret < 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED); return 0; } return 1; } static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; if (dat->stream.cbc) { (*dat->stream.cbc)(in, out, len, &dat->ks.ks, ctx->iv, ctx->encrypt); } else if (ctx->encrypt) { CRYPTO_cbc128_encrypt(in, out, len, &dat->ks.ks, ctx->iv, dat->block); } else { CRYPTO_cbc128_decrypt(in, out, len, &dat->ks.ks, ctx->iv, dat->block); } return 1; } static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { size_t bl = ctx->cipher->block_size; EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; if (len < bl) { return 1; } len -= bl; for (size_t i = 0; i <= len; i += bl) { (*dat->block)(in + i, out + i, &dat->ks.ks); } return 1; } static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks.ks, ctx->iv, ctx->buf, &ctx->num, dat->stream.ctr); return 1; } static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; CRYPTO_ofb128_encrypt(in, out, len, &dat->ks.ks, ctx->iv, &ctx->num, dat->block); return 1; } static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { EVP_AES_GCM_CTX *gctx = reinterpret_cast(ctx->cipher_data); if (!iv && !key) { return 1; } // We must configure first the key, then the IV, but the caller may pass both // together, or separately in either order. if (key) { OPENSSL_memset(&gctx->gcm, 0, sizeof(gctx->gcm)); CRYPTO_gcm128_init_aes_key(&gctx->key, key, ctx->key_len); // Use the IV if specified. Otherwise, use the saved IV, if any. if (iv == NULL && gctx->iv_set) { iv = gctx->iv; } if (iv) { CRYPTO_gcm128_init_ctx(&gctx->key, &gctx->gcm, iv, gctx->ivlen); gctx->iv_set = 1; } gctx->key_set = 1; } else { if (gctx->key_set) { CRYPTO_gcm128_init_ctx(&gctx->key, &gctx->gcm, iv, gctx->ivlen); } else { // The caller specified the IV before the key. Save the IV for later. OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen); } gctx->iv_set = 1; gctx->iv_gen = 0; } return 1; } static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) { EVP_AES_GCM_CTX *gctx = reinterpret_cast(c->cipher_data); OPENSSL_cleanse(&gctx->key, sizeof(gctx->key)); OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm)); if (gctx->iv != c->iv) { OPENSSL_free(gctx->iv); } } static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { EVP_AES_GCM_CTX *gctx = reinterpret_cast(c->cipher_data); switch (type) { case EVP_CTRL_INIT: gctx->key_set = 0; gctx->iv_set = 0; gctx->ivlen = c->cipher->iv_len; gctx->iv = c->iv; gctx->taglen = -1; gctx->iv_gen = 0; return 1; case EVP_CTRL_AEAD_SET_IVLEN: if (arg <= 0) { return 0; } // Allocate memory for IV if needed if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) { if (gctx->iv != c->iv) { OPENSSL_free(gctx->iv); } gctx->iv = reinterpret_cast(OPENSSL_malloc(arg)); if (!gctx->iv) { return 0; } } gctx->ivlen = arg; return 1; case EVP_CTRL_GET_IVLEN: *(int *)ptr = gctx->ivlen; return 1; case EVP_CTRL_AEAD_SET_TAG: if (arg <= 0 || arg > 16 || c->encrypt) { return 0; } OPENSSL_memcpy(c->buf, ptr, arg); gctx->taglen = arg; return 1; case EVP_CTRL_AEAD_GET_TAG: if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) { return 0; } OPENSSL_memcpy(ptr, c->buf, arg); return 1; case EVP_CTRL_AEAD_SET_IV_FIXED: // Special case: -1 length restores whole IV if (arg == -1) { OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen); gctx->iv_gen = 1; return 1; } // Fixed field must be at least 4 bytes and invocation field // at least 8. if (arg < 4 || (gctx->ivlen - arg) < 8) { return 0; } OPENSSL_memcpy(gctx->iv, ptr, arg); if (c->encrypt) { // |BCM_rand_bytes| calls within the fipsmodule should be wrapped with // state lock functions to avoid updating the service indicator with the // DRBG functions. FIPS_service_indicator_lock_state(); BCM_rand_bytes(gctx->iv + arg, gctx->ivlen - arg); FIPS_service_indicator_unlock_state(); } gctx->iv_gen = 1; return 1; case EVP_CTRL_GCM_IV_GEN: { if (gctx->iv_gen == 0 || gctx->key_set == 0) { return 0; } CRYPTO_gcm128_init_ctx(&gctx->key, &gctx->gcm, gctx->iv, gctx->ivlen); if (arg <= 0 || arg > gctx->ivlen) { arg = gctx->ivlen; } OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); // Invocation field will be at least 8 bytes in size, so no need to check // wrap around or increment more than last 8 bytes. uint8_t *ctr = gctx->iv + gctx->ivlen - 8; CRYPTO_store_u64_be(ctr, CRYPTO_load_u64_be(ctr) + 1); gctx->iv_set = 1; return 1; } case EVP_CTRL_GCM_SET_IV_INV: if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) { return 0; } OPENSSL_memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); CRYPTO_gcm128_init_ctx(&gctx->key, &gctx->gcm, gctx->iv, gctx->ivlen); gctx->iv_set = 1; return 1; case EVP_CTRL_COPY: { EVP_CIPHER_CTX *out = reinterpret_cast(ptr); EVP_AES_GCM_CTX *gctx_out = reinterpret_cast(out->cipher_data); if (gctx->iv == c->iv) { gctx_out->iv = out->iv; } else { gctx_out->iv = reinterpret_cast(OPENSSL_memdup(gctx->iv, gctx->ivlen)); if (!gctx_out->iv) { return 0; } } return 1; } default: return -1; } } static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { EVP_AES_GCM_CTX *gctx = reinterpret_cast(ctx->cipher_data); // If not set up, return error if (!gctx->key_set) { return -1; } if (!gctx->iv_set) { return -1; } if (len > INT_MAX) { // This function signature can only express up to |INT_MAX| bytes encrypted. // // TODO(https://crbug.com/boringssl/494): Make the internal |EVP_CIPHER| // calling convention |size_t|-clean. return -1; } if (in) { if (out == NULL) { if (!CRYPTO_gcm128_aad(&gctx->key, &gctx->gcm, in, len)) { return -1; } } else if (ctx->encrypt) { if (!CRYPTO_gcm128_encrypt(&gctx->key, &gctx->gcm, in, out, len)) { return -1; } } else { if (!CRYPTO_gcm128_decrypt(&gctx->key, &gctx->gcm, in, out, len)) { return -1; } } return (int)len; } else { if (!ctx->encrypt) { if (gctx->taglen < 0 || !CRYPTO_gcm128_finish(&gctx->key, &gctx->gcm, ctx->buf, gctx->taglen)) { return -1; } gctx->iv_set = 0; return 0; } CRYPTO_gcm128_tag(&gctx->key, &gctx->gcm, ctx->buf, 16); gctx->taglen = 16; // Don't reuse the IV gctx->iv_set = 0; return 0; } } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_128_cbc) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_cbc; out->block_size = 16; out->key_len = 16; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CBC_MODE; out->init = aes_init_key; out->cipher = aes_cbc_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_128_ctr) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_ctr; out->block_size = 1; out->key_len = 16; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CTR_MODE; out->init = aes_init_key; out->cipher = aes_ctr_cipher; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_ecb_generic) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_ecb; out->block_size = 16; out->key_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_ecb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_128_ofb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_ofb128; out->block_size = 1; out->key_len = 16; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_OFB_MODE; out->init = aes_init_key; out->cipher = aes_ofb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_128_gcm) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_gcm; out->block_size = 1; out->key_len = 16; out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX); out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; out->init = aes_gcm_init_key; out->cipher = aes_gcm_cipher; out->cleanup = aes_gcm_cleanup; out->ctrl = aes_gcm_ctrl; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_192_cbc) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_cbc; out->block_size = 16; out->key_len = 24; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CBC_MODE; out->init = aes_init_key; out->cipher = aes_cbc_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_192_ctr) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_ctr; out->block_size = 1; out->key_len = 24; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CTR_MODE; out->init = aes_init_key; out->cipher = aes_ctr_cipher; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_ecb_generic) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_ecb; out->block_size = 16; out->key_len = 24; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_ecb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_192_ofb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_ofb128; out->block_size = 1; out->key_len = 24; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_OFB_MODE; out->init = aes_init_key; out->cipher = aes_ofb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_192_gcm) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_gcm; out->block_size = 1; out->key_len = 24; out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX); out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; out->init = aes_gcm_init_key; out->cipher = aes_gcm_cipher; out->cleanup = aes_gcm_cleanup; out->ctrl = aes_gcm_ctrl; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_256_cbc) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_cbc; out->block_size = 16; out->key_len = 32; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CBC_MODE; out->init = aes_init_key; out->cipher = aes_cbc_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_256_ctr) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_ctr; out->block_size = 1; out->key_len = 32; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_CTR_MODE; out->init = aes_init_key; out->cipher = aes_ctr_cipher; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_ecb_generic) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_ecb; out->block_size = 16; out->key_len = 32; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_ecb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_256_ofb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_ofb128; out->block_size = 1; out->key_len = 32; out->iv_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_OFB_MODE; out->init = aes_init_key; out->cipher = aes_ofb_cipher; } DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_aes_256_gcm) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_gcm; out->block_size = 1; out->key_len = 32; out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX); out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; out->init = aes_gcm_init_key; out->cipher = aes_gcm_cipher; out->cleanup = aes_gcm_cleanup; out->ctrl = aes_gcm_ctrl; } #if defined(HWAES_ECB) static int aes_hw_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t len) { size_t bl = ctx->cipher->block_size; if (len < bl) { return 1; } aes_hw_ecb_encrypt(in, out, len, reinterpret_cast(ctx->cipher_data), ctx->encrypt); return 1; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_hw_128_ecb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_128_ecb; out->block_size = 16; out->key_len = 16; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_hw_ecb_cipher; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_hw_192_ecb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_192_ecb; out->block_size = 16; out->key_len = 24; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_hw_ecb_cipher; } DEFINE_LOCAL_DATA(EVP_CIPHER, aes_hw_256_ecb) { memset(out, 0, sizeof(EVP_CIPHER)); out->nid = NID_aes_256_ecb; out->block_size = 16; out->key_len = 32; out->ctx_size = sizeof(EVP_AES_KEY); out->flags = EVP_CIPH_ECB_MODE; out->init = aes_init_key; out->cipher = aes_hw_ecb_cipher; } #define EVP_ECB_CIPHER_FUNCTION(keybits) \ const EVP_CIPHER *EVP_aes_##keybits##_ecb(void) { \ if (hwaes_capable()) { \ return aes_hw_##keybits##_ecb(); \ } \ return aes_##keybits##_ecb_generic(); \ } #else #define EVP_ECB_CIPHER_FUNCTION(keybits) \ const EVP_CIPHER *EVP_aes_##keybits##_ecb(void) { \ return aes_##keybits##_ecb_generic(); \ } #endif // HWAES_ECB EVP_ECB_CIPHER_FUNCTION(128) EVP_ECB_CIPHER_FUNCTION(192) EVP_ECB_CIPHER_FUNCTION(256) #define EVP_AEAD_AES_GCM_TAG_LEN 16 namespace { struct aead_aes_gcm_ctx { GCM128_KEY key; }; } // namespace static int aead_aes_gcm_init_impl(struct aead_aes_gcm_ctx *gcm_ctx, size_t *out_tag_len, const uint8_t *key, size_t key_len, size_t tag_len) { const size_t key_bits = key_len * 8; if (key_bits != 128 && key_bits != 192 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = EVP_AEAD_AES_GCM_TAG_LEN; } if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } CRYPTO_gcm128_init_aes_key(&gcm_ctx->key, key, key_len); *out_tag_len = tag_len; return 1; } static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_gcm_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_gcm_ctx), "AEAD state has insufficient alignment"); static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t requested_tag_len) { struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *)&ctx->state; size_t actual_tag_len; if (!aead_aes_gcm_init_impl(gcm_ctx, &actual_tag_len, key, key_len, requested_tag_len)) { return 0; } ctx->tag_len = actual_tag_len; return 1; } static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) {} static int aead_aes_gcm_seal_scatter_impl( const struct aead_aes_gcm_ctx *gcm_ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len, size_t tag_len) { if (extra_in_len + tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < extra_in_len + tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len == 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } const GCM128_KEY *key = &gcm_ctx->key; GCM128_CONTEXT gcm; CRYPTO_gcm128_init_ctx(key, &gcm, nonce, nonce_len); if (ad_len > 0 && !CRYPTO_gcm128_aad(key, &gcm, ad, ad_len)) { return 0; } if (!CRYPTO_gcm128_encrypt(key, &gcm, in, out, in_len)) { return 0; } if (extra_in_len > 0 && !CRYPTO_gcm128_encrypt(key, &gcm, extra_in, out_tag, extra_in_len)) { return 0; } CRYPTO_gcm128_tag(key, &gcm, out_tag + extra_in_len, tag_len); *out_tag_len = tag_len + extra_in_len; return 1; } static int aead_aes_gcm_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_gcm_ctx *gcm_ctx = (const struct aead_aes_gcm_ctx *)&ctx->state; return aead_aes_gcm_seal_scatter_impl( gcm_ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len); } static int aead_aes_gcm_open_gather_impl(const struct aead_aes_gcm_ctx *gcm_ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len, size_t tag_len) { uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN]; if (nonce_len == 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (in_tag_len != tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } const GCM128_KEY *key = &gcm_ctx->key; GCM128_CONTEXT gcm; CRYPTO_gcm128_init_ctx(key, &gcm, nonce, nonce_len); if (!CRYPTO_gcm128_aad(key, &gcm, ad, ad_len)) { return 0; } if (!CRYPTO_gcm128_decrypt(key, &gcm, in, out, in_len)) { return 0; } CRYPTO_gcm128_tag(key, &gcm, tag, tag_len); if (CRYPTO_memcmp(tag, in_tag, tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } return 1; } static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *)&ctx->state; if (!aead_aes_gcm_open_gather_impl(gcm_ctx, out, nonce, nonce_len, in, in_len, in_tag, in_tag_len, ad, ad_len, ctx->tag_len)) { return 0; } AEAD_GCM_verify_service_indicator(ctx); return 1; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_192_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 24; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } static int aead_aes_gcm_init_randnonce(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t requested_tag_len) { if (requested_tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH) { if (requested_tag_len < AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } requested_tag_len -= AES_GCM_NONCE_LENGTH; } if (!aead_aes_gcm_init(ctx, key, key_len, requested_tag_len)) { return 0; } ctx->tag_len += AES_GCM_NONCE_LENGTH; return 1; } static int aead_aes_gcm_seal_scatter_randnonce( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *external_nonce, size_t external_nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { if (external_nonce_len != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } uint8_t nonce[AES_GCM_NONCE_LENGTH]; if (max_out_tag_len < sizeof(nonce)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } // |BCM_rand_bytes| calls within the fipsmodule should be wrapped with state // lock functions to avoid updating the service indicator with the DRBG // functions. FIPS_service_indicator_lock_state(); BCM_rand_bytes(nonce, sizeof(nonce)); FIPS_service_indicator_unlock_state(); const struct aead_aes_gcm_ctx *gcm_ctx = (const struct aead_aes_gcm_ctx *)&ctx->state; if (!aead_aes_gcm_seal_scatter_impl(gcm_ctx, out, out_tag, out_tag_len, max_out_tag_len - AES_GCM_NONCE_LENGTH, nonce, sizeof(nonce), in, in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len - AES_GCM_NONCE_LENGTH)) { return 0; } assert(*out_tag_len + sizeof(nonce) <= max_out_tag_len); memcpy(out_tag + *out_tag_len, nonce, sizeof(nonce)); *out_tag_len += sizeof(nonce); AEAD_GCM_verify_service_indicator(ctx); return 1; } static int aead_aes_gcm_open_gather_randnonce( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *external_nonce, size_t external_nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { if (external_nonce_len != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (in_tag_len < AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } const uint8_t *nonce = in_tag + in_tag_len - AES_GCM_NONCE_LENGTH; const struct aead_aes_gcm_ctx *gcm_ctx = (const struct aead_aes_gcm_ctx *)&ctx->state; if (!aead_aes_gcm_open_gather_impl( gcm_ctx, out, nonce, AES_GCM_NONCE_LENGTH, in, in_len, in_tag, in_tag_len - AES_GCM_NONCE_LENGTH, ad, ad_len, ctx->tag_len - AES_GCM_NONCE_LENGTH)) { return 0; } AEAD_GCM_verify_service_indicator(ctx); return 1; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_randnonce) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = 0; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_init_randnonce; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_seal_scatter_randnonce; out->open_gather = aead_aes_gcm_open_gather_randnonce; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_randnonce) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; out->nonce_len = 0; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_init_randnonce; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_seal_scatter_randnonce; out->open_gather = aead_aes_gcm_open_gather_randnonce; } namespace { struct aead_aes_gcm_tls12_ctx { struct aead_aes_gcm_ctx gcm_ctx; uint64_t min_next_nonce; }; } // namespace static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_gcm_tls12_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_gcm_tls12_ctx), "AEAD state has insufficient alignment"); static int aead_aes_gcm_tls12_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t requested_tag_len) { struct aead_aes_gcm_tls12_ctx *gcm_ctx = (struct aead_aes_gcm_tls12_ctx *)&ctx->state; gcm_ctx->min_next_nonce = 0; size_t actual_tag_len; if (!aead_aes_gcm_init_impl(&gcm_ctx->gcm_ctx, &actual_tag_len, key, key_len, requested_tag_len)) { return 0; } ctx->tag_len = actual_tag_len; return 1; } static int aead_aes_gcm_tls12_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_tls12_ctx *gcm_ctx = (struct aead_aes_gcm_tls12_ctx *)&ctx->state; if (nonce_len != AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // The given nonces must be strictly monotonically increasing. uint64_t given_counter = CRYPTO_load_u64_be(nonce + nonce_len - sizeof(uint64_t)); if (given_counter == UINT64_MAX || given_counter < gcm_ctx->min_next_nonce) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); return 0; } gcm_ctx->min_next_nonce = given_counter + 1; if (!aead_aes_gcm_seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len)) { return 0; } AEAD_GCM_verify_service_indicator(ctx); return 1; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_tls12) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_tls12_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_tls12_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_tls12) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_tls12_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_tls12_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } namespace { struct aead_aes_gcm_tls13_ctx { struct aead_aes_gcm_ctx gcm_ctx; uint64_t min_next_nonce; uint64_t mask; uint8_t first; }; } // namespace static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_gcm_tls13_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_gcm_tls13_ctx), "AEAD state has insufficient alignment"); static int aead_aes_gcm_tls13_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t requested_tag_len) { struct aead_aes_gcm_tls13_ctx *gcm_ctx = (struct aead_aes_gcm_tls13_ctx *)&ctx->state; gcm_ctx->min_next_nonce = 0; gcm_ctx->first = 1; size_t actual_tag_len; if (!aead_aes_gcm_init_impl(&gcm_ctx->gcm_ctx, &actual_tag_len, key, key_len, requested_tag_len)) { return 0; } ctx->tag_len = actual_tag_len; return 1; } static int aead_aes_gcm_tls13_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { struct aead_aes_gcm_tls13_ctx *gcm_ctx = (struct aead_aes_gcm_tls13_ctx *)&ctx->state; if (nonce_len != AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } // The given nonces must be strictly monotonically increasing. See // https://tools.ietf.org/html/rfc8446#section-5.3 for details of the TLS 1.3 // nonce construction. uint64_t given_counter = CRYPTO_load_u64_be(nonce + nonce_len - sizeof(uint64_t)); if (gcm_ctx->first) { // In the first call the sequence number will be zero and therefore the // given nonce will be 0 ^ mask = mask. gcm_ctx->mask = given_counter; gcm_ctx->first = 0; } given_counter ^= gcm_ctx->mask; if (given_counter == UINT64_MAX || given_counter < gcm_ctx->min_next_nonce) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); return 0; } gcm_ctx->min_next_nonce = given_counter + 1; if (!aead_aes_gcm_seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len)) { return 0; } AEAD_GCM_verify_service_indicator(ctx); return 1; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_tls13) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_tls13_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_tls13_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_tls13) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; out->init = aead_aes_gcm_tls13_init; out->cleanup = aead_aes_gcm_cleanup; out->seal_scatter = aead_aes_gcm_tls13_seal_scatter; out->open_gather = aead_aes_gcm_open_gather; } int EVP_has_aes_hardware(void) { #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) return hwaes_capable() && crypto_gcm_clmul_enabled(); #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) return hwaes_capable() && CRYPTO_is_ARMv8_PMULL_capable(); #else return 0; #endif } OPENSSL_MSVC_PRAGMA(warning(pop)) ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cipher/e_aesccm.cc.inc ================================================ /* * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../aes/internal.h" #include "../delocate.h" #include "../service_indicator/internal.h" #include "internal.h" struct ccm128_context { block128_f block; ctr128_f ctr; unsigned M, L; }; struct ccm128_state { alignas(16) uint8_t nonce[16]; alignas(16) uint8_t cmac[16]; }; static int CRYPTO_ccm128_init(struct ccm128_context *ctx, const AES_KEY *key, block128_f block, ctr128_f ctr, unsigned M, unsigned L) { if (M < 4 || M > 16 || (M & 1) != 0 || L < 2 || L > 8) { return 0; } ctx->block = block; ctx->ctr = ctr; ctx->M = M; ctx->L = L; return 1; } static size_t CRYPTO_ccm128_max_input(const struct ccm128_context *ctx) { return ctx->L >= sizeof(size_t) ? SIZE_MAX : (((size_t)1) << (ctx->L * 8)) - 1; } static int ccm128_init_state(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, const uint8_t *nonce, size_t nonce_len, const uint8_t *aad, size_t aad_len, size_t plaintext_len) { const block128_f block = ctx->block; const unsigned M = ctx->M; const unsigned L = ctx->L; // |L| determines the expected |nonce_len| and the limit for |plaintext_len|. if (plaintext_len > CRYPTO_ccm128_max_input(ctx) // || nonce_len != 15 - L) { return 0; } // Assemble the first block for computing the MAC. OPENSSL_memset(state, 0, sizeof(*state)); state->nonce[0] = (uint8_t)((L - 1) | ((M - 2) / 2) << 3); if (aad_len != 0) { state->nonce[0] |= 0x40; // Set AAD Flag } OPENSSL_memcpy(&state->nonce[1], nonce, nonce_len); for (unsigned i = 0; i < L; i++) { state->nonce[15 - i] = (uint8_t)(plaintext_len >> (8 * i)); } (*block)(state->nonce, state->cmac, key); size_t blocks = 1; if (aad_len != 0) { unsigned i; // Cast to u64 to avoid the compiler complaining about invalid shifts. uint64_t aad_len_u64 = aad_len; if (aad_len_u64 < 0x10000 - 0x100) { state->cmac[0] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac[1] ^= (uint8_t)aad_len_u64; i = 2; } else if (aad_len_u64 <= 0xffffffff) { state->cmac[0] ^= 0xff; state->cmac[1] ^= 0xfe; state->cmac[2] ^= (uint8_t)(aad_len_u64 >> 24); state->cmac[3] ^= (uint8_t)(aad_len_u64 >> 16); state->cmac[4] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac[5] ^= (uint8_t)aad_len_u64; i = 6; } else { state->cmac[0] ^= 0xff; state->cmac[1] ^= 0xff; state->cmac[2] ^= (uint8_t)(aad_len_u64 >> 56); state->cmac[3] ^= (uint8_t)(aad_len_u64 >> 48); state->cmac[4] ^= (uint8_t)(aad_len_u64 >> 40); state->cmac[5] ^= (uint8_t)(aad_len_u64 >> 32); state->cmac[6] ^= (uint8_t)(aad_len_u64 >> 24); state->cmac[7] ^= (uint8_t)(aad_len_u64 >> 16); state->cmac[8] ^= (uint8_t)(aad_len_u64 >> 8); state->cmac[9] ^= (uint8_t)aad_len_u64; i = 10; } do { for (; i < 16 && aad_len != 0; i++) { state->cmac[i] ^= *aad; aad++; aad_len--; } (*block)(state->cmac, state->cmac, key); blocks++; i = 0; } while (aad_len != 0); } // Per RFC 3610, section 2.6, the total number of block cipher operations done // must not exceed 2^61. There are two block cipher operations remaining per // message block, plus one block at the end to encrypt the MAC. size_t remaining_blocks = 2 * ((plaintext_len + 15) / 16) + 1; if (plaintext_len + 15 < plaintext_len || remaining_blocks + blocks < blocks || (uint64_t)remaining_blocks + blocks > UINT64_C(1) << 61) { return 0; } // Assemble the first block for encrypting and decrypting. The bottom |L| // bytes are replaced with a counter and all bit the encoding of |L| is // cleared in the first byte. state->nonce[0] &= 7; return 1; } static int ccm128_encrypt(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, uint8_t *out, const uint8_t *in, size_t len) { // The counter for encryption begins at one. for (unsigned i = 0; i < ctx->L; i++) { state->nonce[15 - i] = 0; } state->nonce[15] = 1; uint8_t partial_buf[16]; unsigned num = 0; CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, state->nonce, partial_buf, &num, ctx->ctr); return 1; } static int ccm128_compute_mac(const struct ccm128_context *ctx, struct ccm128_state *state, const AES_KEY *key, uint8_t *out_tag, size_t tag_len, const uint8_t *in, size_t len) { block128_f block = ctx->block; if (tag_len != ctx->M) { return 0; } // Incorporate |in| into the MAC. while (len >= 16) { CRYPTO_xor16(state->cmac, state->cmac, in); (*block)(state->cmac, state->cmac, key); in += 16; len -= 16; } if (len > 0) { for (size_t i = 0; i < len; i++) { state->cmac[i] ^= in[i]; } (*block)(state->cmac, state->cmac, key); } // Encrypt the MAC with counter zero. for (unsigned i = 0; i < ctx->L; i++) { state->nonce[15 - i] = 0; } alignas(16) uint8_t tmp[16]; (*block)(state->nonce, tmp, key); CRYPTO_xor16(state->cmac, state->cmac, tmp); OPENSSL_memcpy(out_tag, state->cmac, tag_len); return 1; } static int CRYPTO_ccm128_encrypt(const struct ccm128_context *ctx, const AES_KEY *key, uint8_t *out, uint8_t *out_tag, size_t tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t len, const uint8_t *aad, size_t aad_len) { struct ccm128_state state; return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len, len) && ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, in, len) && ccm128_encrypt(ctx, &state, key, out, in, len); } static int CRYPTO_ccm128_decrypt(const struct ccm128_context *ctx, const AES_KEY *key, uint8_t *out, uint8_t *out_tag, size_t tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t len, const uint8_t *aad, size_t aad_len) { struct ccm128_state state; return ccm128_init_state(ctx, &state, key, nonce, nonce_len, aad, aad_len, len) && ccm128_encrypt(ctx, &state, key, out, in, len) && ccm128_compute_mac(ctx, &state, key, out_tag, tag_len, out, len); } #define EVP_AEAD_AES_CCM_MAX_TAG_LEN 16 namespace { struct aead_aes_ccm_ctx { union { double align; AES_KEY ks; } ks; struct ccm128_context ccm; }; } // namespace static_assert(sizeof(((EVP_AEAD_CTX *)NULL)->state) >= sizeof(struct aead_aes_ccm_ctx), "AEAD state is too small"); static_assert(alignof(union evp_aead_ctx_st_state) >= alignof(struct aead_aes_ccm_ctx), "AEAD state has insufficient alignment"); static int aead_aes_ccm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len, unsigned M, unsigned L) { assert(M == EVP_AEAD_max_overhead(ctx->aead)); assert(M == EVP_AEAD_max_tag_len(ctx->aead)); assert(15 - L == EVP_AEAD_nonce_length(ctx->aead)); if (key_len != EVP_AEAD_key_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. } if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { tag_len = M; } if (tag_len != M) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); return 0; } struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; block128_f block; ctr128_f ctr = aes_ctr_set_key(&ccm_ctx->ks.ks, NULL, &block, key, key_len); ctx->tag_len = tag_len; if (!CRYPTO_ccm128_init(&ccm_ctx->ccm, &ccm_ctx->ks.ks, block, ctr, M, L)) { OPENSSL_PUT_ERROR(CIPHER, ERR_R_INTERNAL_ERROR); return 0; } return 1; } static void aead_aes_ccm_cleanup(EVP_AEAD_CTX *ctx) {} static int aead_aes_ccm_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (max_out_tag_len < ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (!CRYPTO_ccm128_encrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, out_tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } *out_tag_len = ctx->tag_len; AEAD_CCM_verify_service_indicator(ctx); return 1; } static int aead_aes_ccm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len) { const struct aead_aes_ccm_ctx *ccm_ctx = (struct aead_aes_ccm_ctx *)&ctx->state; if (in_len > CRYPTO_ccm128_max_input(&ccm_ctx->ccm)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); return 0; } if (in_tag_len != ctx->tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } uint8_t tag[EVP_AEAD_AES_CCM_MAX_TAG_LEN]; assert(ctx->tag_len <= EVP_AEAD_AES_CCM_MAX_TAG_LEN); if (!CRYPTO_ccm128_decrypt(&ccm_ctx->ccm, &ccm_ctx->ks.ks, out, tag, ctx->tag_len, nonce, nonce_len, in, in_len, ad, ad_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } AEAD_CCM_verify_service_indicator(ctx); return 1; } static int aead_aes_ccm_bluetooth_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { return aead_aes_ccm_init(ctx, key, key_len, tag_len, 4, 2); } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_bluetooth) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = 13; out->overhead = 4; out->max_tag_len = 4; out->init = aead_aes_ccm_bluetooth_init; out->cleanup = aead_aes_ccm_cleanup; out->seal_scatter = aead_aes_ccm_seal_scatter; out->open_gather = aead_aes_ccm_open_gather; } static int aead_aes_ccm_bluetooth_8_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { return aead_aes_ccm_init(ctx, key, key_len, tag_len, 8, 2); } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_bluetooth_8) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = 13; out->overhead = 8; out->max_tag_len = 8; out->init = aead_aes_ccm_bluetooth_8_init; out->cleanup = aead_aes_ccm_cleanup; out->seal_scatter = aead_aes_ccm_seal_scatter; out->open_gather = aead_aes_ccm_open_gather; } static int aead_aes_ccm_matter_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len) { return aead_aes_ccm_init(ctx, key, key_len, tag_len, 16, 2); } DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_ccm_matter) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; out->nonce_len = 13; out->overhead = 16; out->max_tag_len = 16; out->init = aead_aes_ccm_matter_init; out->cleanup = aead_aes_ccm_cleanup; out->seal_scatter = aead_aes_ccm_seal_scatter; out->open_gather = aead_aes_ccm_open_gather; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cipher/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CIPHER_INTERNAL_H #define OPENSSL_HEADER_CIPHER_INTERNAL_H #include #include #include #include "../../internal.h" #include "../aes/internal.h" #if defined(__cplusplus) extern "C" { #endif // EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. #define EVP_CIPH_MODE_MASK 0x3f // EVP_AEAD represents a specific AEAD algorithm. struct evp_aead_st { uint8_t key_len; uint8_t nonce_len; uint8_t overhead; uint8_t max_tag_len; int seal_scatter_supports_extra_in; // init initialises an |EVP_AEAD_CTX|. If this call returns zero then // |cleanup| will not be called for that context. int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, size_t tag_len); int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir); void (*cleanup)(EVP_AEAD_CTX *); int (*open)(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); int (*seal_scatter)(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len); int (*open_gather)(const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len); int (*get_iv)(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_len); size_t (*tag_len)(const EVP_AEAD_CTX *ctx, size_t in_Len, size_t extra_in_len); }; struct evp_cipher_st { // type contains a NID identifying the cipher. (e.g. NID_aes_128_gcm.) int nid; // block_size contains the block size, in bytes, of the cipher, or 1 for a // stream cipher. unsigned block_size; // key_len contains the key size, in bytes, for the cipher. If the cipher // takes a variable key size then this contains the default size. unsigned key_len; // iv_len contains the IV size, in bytes, or zero if inapplicable. unsigned iv_len; // ctx_size contains the size, in bytes, of the per-key context for this // cipher. unsigned ctx_size; // flags contains the OR of a number of flags. See |EVP_CIPH_*|. uint32_t flags; int (*init)(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc); int (*cipher)(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t inl); // cleanup, if non-NULL, releases memory associated with the context. It is // called if |EVP_CTRL_INIT| succeeds. Note that |init| may not have been // called at this point. void (*cleanup)(EVP_CIPHER_CTX *); int (*ctrl)(EVP_CIPHER_CTX *, int type, int arg, void *ptr); }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CIPHER_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/cmac/cmac.cc.inc ================================================ /* * Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../service_indicator/internal.h" struct cmac_ctx_st { EVP_CIPHER_CTX cipher_ctx; // k1 and k2 are the CMAC subkeys. See // https://tools.ietf.org/html/rfc4493#section-2.3 uint8_t k1[AES_BLOCK_SIZE]; uint8_t k2[AES_BLOCK_SIZE]; // Last (possibly partial) scratch uint8_t block[AES_BLOCK_SIZE]; // block_used contains the number of valid bytes in |block|. unsigned block_used; }; static void CMAC_CTX_init(CMAC_CTX *ctx) { EVP_CIPHER_CTX_init(&ctx->cipher_ctx); } static void CMAC_CTX_cleanup(CMAC_CTX *ctx) { EVP_CIPHER_CTX_cleanup(&ctx->cipher_ctx); OPENSSL_cleanse(ctx->k1, sizeof(ctx->k1)); OPENSSL_cleanse(ctx->k2, sizeof(ctx->k2)); OPENSSL_cleanse(ctx->block, sizeof(ctx->block)); } int AES_CMAC(uint8_t out[16], const uint8_t *key, size_t key_len, const uint8_t *in, size_t in_len) { const EVP_CIPHER *cipher; switch (key_len) { // WARNING: this code assumes that all supported key sizes are FIPS // Approved. case 16: cipher = EVP_aes_128_cbc(); break; case 32: cipher = EVP_aes_256_cbc(); break; default: return 0; } size_t scratch_out_len; CMAC_CTX ctx; CMAC_CTX_init(&ctx); // We have to verify that all the CMAC services actually succeed before // updating the indicator state, so we lock the state here. FIPS_service_indicator_lock_state(); const int ok = CMAC_Init(&ctx, key, key_len, cipher, NULL /* engine */) && CMAC_Update(&ctx, in, in_len) && CMAC_Final(&ctx, out, &scratch_out_len); FIPS_service_indicator_unlock_state(); if (ok) { FIPS_service_indicator_update_state(); } CMAC_CTX_cleanup(&ctx); return ok; } CMAC_CTX *CMAC_CTX_new(void) { CMAC_CTX *ctx = reinterpret_cast(OPENSSL_malloc(sizeof(*ctx))); if (ctx != NULL) { CMAC_CTX_init(ctx); } return ctx; } void CMAC_CTX_free(CMAC_CTX *ctx) { if (ctx == NULL) { return; } CMAC_CTX_cleanup(ctx); OPENSSL_free(ctx); } int CMAC_CTX_copy(CMAC_CTX *out, const CMAC_CTX *in) { if (!EVP_CIPHER_CTX_copy(&out->cipher_ctx, &in->cipher_ctx)) { return 0; } OPENSSL_memcpy(out->k1, in->k1, AES_BLOCK_SIZE); OPENSSL_memcpy(out->k2, in->k2, AES_BLOCK_SIZE); OPENSSL_memcpy(out->block, in->block, AES_BLOCK_SIZE); out->block_used = in->block_used; return 1; } // binary_field_mul_x_128 treats the 128 bits at |in| as an element of GF(2¹²⁸) // with a hard-coded reduction polynomial and sets |out| as x times the input. // // See https://tools.ietf.org/html/rfc4493#section-2.3 static void binary_field_mul_x_128(uint8_t out[16], const uint8_t in[16]) { unsigned i; // Shift |in| to left, including carry. for (i = 0; i < 15; i++) { out[i] = (in[i] << 1) | (in[i + 1] >> 7); } // If MSB set fixup with R. const uint8_t carry = in[0] >> 7; out[i] = (in[i] << 1) ^ ((0 - carry) & 0x87); } // binary_field_mul_x_64 behaves like |binary_field_mul_x_128| but acts on an // element of GF(2⁶⁴). // // See https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38b.pdf static void binary_field_mul_x_64(uint8_t out[8], const uint8_t in[8]) { unsigned i; // Shift |in| to left, including carry. for (i = 0; i < 7; i++) { out[i] = (in[i] << 1) | (in[i + 1] >> 7); } // If MSB set fixup with R. const uint8_t carry = in[0] >> 7; out[i] = (in[i] << 1) ^ ((0 - carry) & 0x1b); } static const uint8_t kZeroIV[AES_BLOCK_SIZE] = {0}; int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len, const EVP_CIPHER *cipher, ENGINE *engine) { int ret = 0; uint8_t scratch[AES_BLOCK_SIZE]; // We have to avoid the underlying AES-CBC |EVP_CIPHER| services updating the // indicator state, so we lock the state here. FIPS_service_indicator_lock_state(); size_t block_size = EVP_CIPHER_block_size(cipher); if ((block_size != AES_BLOCK_SIZE && block_size != 8 /* 3-DES */) || EVP_CIPHER_key_length(cipher) != key_len || !EVP_EncryptInit_ex(&ctx->cipher_ctx, cipher, NULL, reinterpret_cast(key), kZeroIV) || !EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, block_size) || // Reset context again ready for first data. !EVP_EncryptInit_ex(&ctx->cipher_ctx, NULL, NULL, NULL, kZeroIV)) { goto out; } if (block_size == AES_BLOCK_SIZE) { binary_field_mul_x_128(ctx->k1, scratch); binary_field_mul_x_128(ctx->k2, ctx->k1); } else { binary_field_mul_x_64(ctx->k1, scratch); binary_field_mul_x_64(ctx->k2, ctx->k1); } ctx->block_used = 0; ret = 1; out: FIPS_service_indicator_unlock_state(); return ret; } int CMAC_Reset(CMAC_CTX *ctx) { ctx->block_used = 0; return EVP_EncryptInit_ex(&ctx->cipher_ctx, NULL, NULL, NULL, kZeroIV); } int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) { int ret = 0; // We have to avoid the underlying AES-CBC |EVP_Cipher| services updating the // indicator state, so we lock the state here. FIPS_service_indicator_lock_state(); size_t block_size = EVP_CIPHER_CTX_block_size(&ctx->cipher_ctx); assert(block_size <= AES_BLOCK_SIZE); uint8_t scratch[AES_BLOCK_SIZE]; if (ctx->block_used > 0) { size_t todo = block_size - ctx->block_used; if (in_len < todo) { todo = in_len; } OPENSSL_memcpy(ctx->block + ctx->block_used, in, todo); in += todo; in_len -= todo; ctx->block_used += todo; // If |in_len| is zero then either |ctx->block_used| is less than // |block_size|, in which case we can stop here, or |ctx->block_used| is // exactly |block_size| but there's no more data to process. In the latter // case we don't want to process this block now because it might be the last // block and that block is treated specially. if (in_len == 0) { ret = 1; goto out; } assert(ctx->block_used == block_size); if (!EVP_Cipher(&ctx->cipher_ctx, scratch, ctx->block, block_size)) { goto out; } } // Encrypt all but one of the remaining blocks. while (in_len > block_size) { if (!EVP_Cipher(&ctx->cipher_ctx, scratch, in, block_size)) { goto out; } in += block_size; in_len -= block_size; } OPENSSL_memcpy(ctx->block, in, in_len); // |in_len| is bounded by |block_size|, which fits in |unsigned|. static_assert(EVP_MAX_BLOCK_LENGTH < UINT_MAX, "EVP_MAX_BLOCK_LENGTH is too large"); ctx->block_used = (unsigned)in_len; ret = 1; out: FIPS_service_indicator_unlock_state(); return ret; } int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len) { int ret = 0; size_t block_size = EVP_CIPHER_CTX_block_size(&ctx->cipher_ctx); assert(block_size <= AES_BLOCK_SIZE); // We have to avoid the underlying AES-CBC |EVP_Cipher| services updating the // indicator state, so we lock the state here. FIPS_service_indicator_lock_state(); *out_len = block_size; const uint8_t *mask = ctx->k1; if (out == NULL) { ret = 1; goto out; } if (ctx->block_used != block_size) { // If the last block is incomplete, terminate it with a single 'one' bit // followed by zeros. ctx->block[ctx->block_used] = 0x80; OPENSSL_memset(ctx->block + ctx->block_used + 1, 0, block_size - (ctx->block_used + 1)); mask = ctx->k2; } for (unsigned i = 0; i < block_size; i++) { out[i] = ctx->block[i] ^ mask[i]; } ret = EVP_Cipher(&ctx->cipher_ctx, out, out, block_size); out: FIPS_service_indicator_unlock_state(); if (ret) { FIPS_service_indicator_update_state(); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/delocate.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_FIPSMODULE_DELOCATE_H #define OPENSSL_HEADER_FIPSMODULE_DELOCATE_H #include #include "../internal.h" #if !defined(BORINGSSL_SHARED_LIBRARY) && defined(BORINGSSL_FIPS) && \ !defined(OPENSSL_ASAN) && !defined(OPENSSL_MSAN) #define DEFINE_BSS_GET(type, name, init_value) \ static type name __attribute__((used)) = init_value; \ extern "C" { \ type *name##_bss_get(void) __attribute__((const)); \ } // For FIPS builds we require that CRYPTO_ONCE_INIT be zero. #define DEFINE_STATIC_ONCE(name) \ DEFINE_BSS_GET(CRYPTO_once_t, name, CRYPTO_ONCE_INIT) // For FIPS builds we require that CRYPTO_MUTEX_INIT be zero. #define DEFINE_STATIC_MUTEX(name) \ DEFINE_BSS_GET(CRYPTO_MUTEX, name, CRYPTO_MUTEX_INIT) // For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero. #define DEFINE_STATIC_EX_DATA_CLASS(name) \ DEFINE_BSS_GET(CRYPTO_EX_DATA_CLASS, name, CRYPTO_EX_DATA_CLASS_INIT) #else #define DEFINE_BSS_GET(type, name, init_value) \ static type name = init_value; \ static type *name##_bss_get(void) { return &name; } #define DEFINE_STATIC_ONCE(name) \ static CRYPTO_once_t name = CRYPTO_ONCE_INIT; \ static CRYPTO_once_t *name##_bss_get(void) { return &name; } #define DEFINE_STATIC_MUTEX(name) \ static CRYPTO_MUTEX name = CRYPTO_MUTEX_INIT; \ static CRYPTO_MUTEX *name##_bss_get(void) { return &name; } #define DEFINE_STATIC_EX_DATA_CLASS(name) \ static CRYPTO_EX_DATA_CLASS name = CRYPTO_EX_DATA_CLASS_INIT; \ static CRYPTO_EX_DATA_CLASS *name##_bss_get(void) { return &name; } #endif #define DEFINE_DATA(type, name, accessor_decorations) \ DEFINE_BSS_GET(type, name##_storage, {}) \ DEFINE_STATIC_ONCE(name##_once) \ static void name##_do_init(type *out); \ static void name##_init(void) { name##_do_init(name##_storage_bss_get()); } \ accessor_decorations type *name(void) { \ CRYPTO_once(name##_once_bss_get(), name##_init); \ /* See http://c-faq.com/ansi/constmismatch.html for why the following \ * cast is needed. */ \ return (const type *)name##_storage_bss_get(); \ } \ static void name##_do_init(type *out) // DEFINE_METHOD_FUNCTION defines a function named |name| which returns a // method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it // is split into a CRYPTO_once_t-guarded initializer in the module and // unhashed, non-module accessor functions to space reserved in the BSS. The // method table is initialized by a caller-supplied function which takes a // parameter named |out| of type |type|*. The caller should follow the macro // invocation with the body of this function: // // DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) { // out->type = NID_md4; // out->md_size = MD4_DIGEST_LENGTH; // out->flags = 0; // out->init = md4_init; // out->update = md4_update; // out->final = md4_final; // out->block_size = 64; // out->ctx_size = sizeof(MD4_CTX); // } // // This mechanism does not use a static initializer because their execution // order is undefined. See FIPS.md for more details. #define DEFINE_METHOD_FUNCTION(type, name) DEFINE_DATA(type, name, const) #define DEFINE_LOCAL_DATA(type, name) DEFINE_DATA(type, name, static const) #endif // OPENSSL_HEADER_FIPSMODULE_DELOCATE_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/dh/check.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "internal.h" int dh_check_params_fast(const DH *dh) { // Most operations scale with p and q. if (BN_is_negative(dh->p) || !BN_is_odd(dh->p) || BN_num_bits(dh->p) > OPENSSL_DH_MAX_MODULUS_BITS) { OPENSSL_PUT_ERROR(DH, DH_R_INVALID_PARAMETERS); return 0; } // q must be bounded by p. if (dh->q != NULL && (BN_is_negative(dh->q) || BN_ucmp(dh->q, dh->p) > 0)) { OPENSSL_PUT_ERROR(DH, DH_R_INVALID_PARAMETERS); return 0; } // g must be an element of p's multiplicative group. if (BN_is_negative(dh->g) || BN_is_zero(dh->g) || BN_ucmp(dh->g, dh->p) >= 0) { OPENSSL_PUT_ERROR(DH, DH_R_INVALID_PARAMETERS); return 0; } return 1; } int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) { *out_flags = 0; if (!dh_check_params_fast(dh)) { return 0; } BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } BN_CTX_start(ctx); int ok = 0; // Check |pub_key| is greater than 1. if (BN_cmp(pub_key, BN_value_one()) <= 0) { *out_flags |= DH_CHECK_PUBKEY_TOO_SMALL; } // Check |pub_key| is less than |dh->p| - 1. BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL || !BN_copy(tmp, dh->p) || !BN_sub_word(tmp, 1)) { goto err; } if (BN_cmp(pub_key, tmp) >= 0) { *out_flags |= DH_CHECK_PUBKEY_TOO_LARGE; } if (dh->q != NULL) { // Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114 // groups which are not safe primes but pick a generator on a prime-order // subgroup of size |dh->q|. if (!BN_mod_exp_mont(tmp, pub_key, dh->q, dh->p, ctx, NULL)) { goto err; } if (!BN_is_one(tmp)) { *out_flags |= DH_CHECK_PUBKEY_INVALID; } } ok = 1; err: BN_CTX_end(ctx); BN_CTX_free(ctx); return ok; } int DH_check(const DH *dh, int *out_flags) { *out_flags = 0; if (!dh_check_params_fast(dh)) { return 0; } // Check that p is a safe prime and if g is 2, 3 or 5, check that it is a // suitable generator where: // for 2, p mod 24 == 11 // for 3, p mod 12 == 5 // for 5, p mod 10 == 3 or 7 // should hold. int ok = 0, r; BN_CTX *ctx = NULL; BN_ULONG l; BIGNUM *t1 = NULL, *t2 = NULL; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); t1 = BN_CTX_get(ctx); if (t1 == NULL) { goto err; } t2 = BN_CTX_get(ctx); if (t2 == NULL) { goto err; } if (dh->q) { if (BN_cmp(dh->g, BN_value_one()) <= 0) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } else if (BN_cmp(dh->g, dh->p) >= 0) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } else { // Check g^q == 1 mod p if (!BN_mod_exp_mont(t1, dh->g, dh->q, dh->p, ctx, NULL)) { goto err; } if (!BN_is_one(t1)) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } r = BN_is_prime_ex(dh->q, BN_prime_checks_for_validation, ctx, NULL); if (r < 0) { goto err; } if (!r) { *out_flags |= DH_CHECK_Q_NOT_PRIME; } // Check p == 1 mod q i.e. q divides p - 1 if (!BN_div(t1, t2, dh->p, dh->q, ctx)) { goto err; } if (!BN_is_one(t2)) { *out_flags |= DH_CHECK_INVALID_Q_VALUE; } } else if (BN_is_word(dh->g, DH_GENERATOR_2)) { l = BN_mod_word(dh->p, 24); if (l == (BN_ULONG)-1) { goto err; } if (l != 11) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } else if (BN_is_word(dh->g, DH_GENERATOR_5)) { l = BN_mod_word(dh->p, 10); if (l == (BN_ULONG)-1) { goto err; } if (l != 3 && l != 7) { *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } else { *out_flags |= DH_CHECK_UNABLE_TO_CHECK_GENERATOR; } r = BN_is_prime_ex(dh->p, BN_prime_checks_for_validation, ctx, NULL); if (r < 0) { goto err; } if (!r) { *out_flags |= DH_CHECK_P_NOT_PRIME; } else if (!dh->q) { if (!BN_rshift1(t1, dh->p)) { goto err; } r = BN_is_prime_ex(t1, BN_prime_checks_for_validation, ctx, NULL); if (r < 0) { goto err; } if (!r) { *out_flags |= DH_CHECK_P_NOT_SAFE_PRIME; } } ok = 1; err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } return ok; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/dh/dh.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "../service_indicator/internal.h" #include "internal.h" DH *DH_new(void) { DH *dh = reinterpret_cast(OPENSSL_zalloc(sizeof(DH))); if (dh == NULL) { return NULL; } CRYPTO_MUTEX_init(&dh->method_mont_p_lock); dh->references = 1; return dh; } void DH_free(DH *dh) { if (dh == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&dh->references)) { return; } BN_MONT_CTX_free(dh->method_mont_p); BN_clear_free(dh->p); BN_clear_free(dh->g); BN_clear_free(dh->q); BN_clear_free(dh->pub_key); BN_clear_free(dh->priv_key); CRYPTO_MUTEX_cleanup(&dh->method_mont_p_lock); OPENSSL_free(dh); } unsigned DH_bits(const DH *dh) { return BN_num_bits(dh->p); } const BIGNUM *DH_get0_pub_key(const DH *dh) { return dh->pub_key; } const BIGNUM *DH_get0_priv_key(const DH *dh) { return dh->priv_key; } const BIGNUM *DH_get0_p(const DH *dh) { return dh->p; } const BIGNUM *DH_get0_q(const DH *dh) { return dh->q; } const BIGNUM *DH_get0_g(const DH *dh) { return dh->g; } void DH_get0_key(const DH *dh, const BIGNUM **out_pub_key, const BIGNUM **out_priv_key) { if (out_pub_key != NULL) { *out_pub_key = dh->pub_key; } if (out_priv_key != NULL) { *out_priv_key = dh->priv_key; } } int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key) { if (pub_key != NULL) { BN_free(dh->pub_key); dh->pub_key = pub_key; } if (priv_key != NULL) { BN_free(dh->priv_key); dh->priv_key = priv_key; } return 1; } void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g) { if (out_p != NULL) { *out_p = dh->p; } if (out_q != NULL) { *out_q = dh->q; } if (out_g != NULL) { *out_g = dh->g; } } int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) { if ((dh->p == NULL && p == NULL) || (dh->g == NULL && g == NULL)) { return 0; } if (p != NULL) { BN_free(dh->p); dh->p = p; } if (q != NULL) { BN_free(dh->q); dh->q = q; } if (g != NULL) { BN_free(dh->g); dh->g = g; } // Invalidate the cached Montgomery parameters. BN_MONT_CTX_free(dh->method_mont_p); dh->method_mont_p = NULL; return 1; } int DH_set_length(DH *dh, unsigned priv_length) { dh->priv_length = priv_length; return 1; } int DH_generate_key(DH *dh) { boringssl_ensure_ffdh_self_test(); if (!dh_check_params_fast(dh)) { return 0; } int ok = 0; int generate_new_key = 0; BN_CTX *ctx = NULL; BIGNUM *pub_key = NULL, *priv_key = NULL, *priv_key_limit = NULL; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } if (dh->priv_key == NULL) { priv_key = BN_new(); if (priv_key == NULL) { goto err; } generate_new_key = 1; } else { priv_key = dh->priv_key; } if (dh->pub_key == NULL) { pub_key = BN_new(); if (pub_key == NULL) { goto err; } } else { pub_key = dh->pub_key; } if (!BN_MONT_CTX_set_locked(&dh->method_mont_p, &dh->method_mont_p_lock, dh->p, ctx)) { goto err; } if (generate_new_key) { if (dh->q) { // Section 5.6.1.1.4 of SP 800-56A Rev3 generates a private key uniformly // from [1, min(2^N-1, q-1)]. // // Although SP 800-56A Rev3 now permits a private key length N, // |dh->priv_length| historically was ignored when q is available. We // continue to ignore it and interpret such a configuration as N = len(q). if (!BN_rand_range_ex(priv_key, 1, dh->q)) { goto err; } } else { // If q is unspecified, we expect p to be a safe prime, with g generating // the (p-1)/2 subgroup. So, we use q = (p-1)/2. (If g generates a smaller // prime-order subgroup, q will still divide (p-1)/2.) // // We set N from |dh->priv_length|. Section 5.6.1.1.4 of SP 800-56A Rev3 // says to reject N > len(q), or N > num_bits(p) - 1. However, this logic // originally aligned with PKCS#3, which allows num_bits(p). Instead, we // clamp |dh->priv_length| before invoking the algorithm. // Compute M = min(2^N, q). priv_key_limit = BN_new(); if (priv_key_limit == NULL) { goto err; } if (dh->priv_length == 0 || dh->priv_length >= BN_num_bits(dh->p) - 1) { // M = q = (p - 1) / 2. if (!BN_rshift1(priv_key_limit, dh->p)) { goto err; } } else { // M = 2^N. if (!BN_set_bit(priv_key_limit, dh->priv_length)) { goto err; } } // Choose a private key uniformly from [1, M-1]. if (!BN_rand_range_ex(priv_key, 1, priv_key_limit)) { goto err; } } } if (!BN_mod_exp_mont_consttime(pub_key, dh->g, priv_key, dh->p, ctx, dh->method_mont_p)) { goto err; } dh->pub_key = pub_key; dh->priv_key = priv_key; ok = 1; err: if (ok != 1) { OPENSSL_PUT_ERROR(DH, ERR_R_BN_LIB); } if (dh->pub_key == NULL) { BN_free(pub_key); } if (dh->priv_key == NULL) { BN_free(priv_key); } BN_free(priv_key_limit); BN_CTX_free(ctx); return ok; } static int dh_compute_key(DH *dh, BIGNUM *out_shared_key, const BIGNUM *peers_key, BN_CTX *ctx) { if (!dh_check_params_fast(dh)) { return 0; } if (dh->priv_key == NULL) { OPENSSL_PUT_ERROR(DH, DH_R_NO_PRIVATE_VALUE); return 0; } int check_result; if (!DH_check_pub_key(dh, peers_key, &check_result) || check_result) { OPENSSL_PUT_ERROR(DH, DH_R_INVALID_PUBKEY); return 0; } int ret = 0; BN_CTX_start(ctx); BIGNUM *p_minus_1 = BN_CTX_get(ctx); if (!p_minus_1 || !BN_MONT_CTX_set_locked(&dh->method_mont_p, &dh->method_mont_p_lock, dh->p, ctx)) { goto err; } if (!BN_mod_exp_mont_consttime(out_shared_key, peers_key, dh->priv_key, dh->p, ctx, dh->method_mont_p) || !BN_copy(p_minus_1, dh->p) || !BN_sub_word(p_minus_1, 1)) { OPENSSL_PUT_ERROR(DH, ERR_R_BN_LIB); goto err; } // This performs the check required by SP 800-56Ar3 section 5.7.1.1 step two. if (BN_cmp_word(out_shared_key, 1) <= 0 || BN_cmp(out_shared_key, p_minus_1) == 0) { OPENSSL_PUT_ERROR(DH, DH_R_INVALID_PUBKEY); goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } int dh_compute_key_padded_no_self_test(unsigned char *out, const BIGNUM *peers_key, DH *dh) { BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return -1; } BN_CTX_start(ctx); int dh_size = DH_size(dh); int ret = -1; BIGNUM *shared_key = BN_CTX_get(ctx); if (shared_key && dh_compute_key(dh, shared_key, peers_key, ctx) && BN_bn2bin_padded(out, dh_size, shared_key)) { ret = dh_size; } BN_CTX_end(ctx); BN_CTX_free(ctx); return ret; } int DH_compute_key_padded(unsigned char *out, const BIGNUM *peers_key, DH *dh) { boringssl_ensure_ffdh_self_test(); return dh_compute_key_padded_no_self_test(out, peers_key, dh); } int DH_compute_key(unsigned char *out, const BIGNUM *peers_key, DH *dh) { boringssl_ensure_ffdh_self_test(); BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return -1; } BN_CTX_start(ctx); int ret = -1; BIGNUM *shared_key = BN_CTX_get(ctx); if (shared_key && dh_compute_key(dh, shared_key, peers_key, ctx)) { // A |BIGNUM|'s byte count fits in |int|. ret = (int)BN_bn2bin(shared_key, out); } BN_CTX_end(ctx); BN_CTX_free(ctx); return ret; } int DH_compute_key_hashed(DH *dh, uint8_t *out, size_t *out_len, size_t max_out_len, const BIGNUM *peers_key, const EVP_MD *digest) { *out_len = SIZE_MAX; const size_t digest_len = EVP_MD_size(digest); if (digest_len > max_out_len) { return 0; } FIPS_service_indicator_lock_state(); int ret = 0; const size_t dh_len = DH_size(dh); uint8_t *shared_bytes = reinterpret_cast(OPENSSL_malloc(dh_len)); unsigned out_len_unsigned; if (!shared_bytes || // SP 800-56A is ambiguous about whether the output should be padded prior // to revision three. But revision three, section C.1, awkwardly specifies // padding to the length of p. // // Also, padded output avoids side-channels, so is always strongly // advisable. DH_compute_key_padded(shared_bytes, peers_key, dh) != (int)dh_len || !EVP_Digest(shared_bytes, dh_len, out, &out_len_unsigned, digest, NULL) || out_len_unsigned != digest_len) { goto err; } *out_len = digest_len; ret = 1; err: FIPS_service_indicator_unlock_state(); OPENSSL_free(shared_bytes); return ret; } int DH_size(const DH *dh) { return BN_num_bytes(dh->p); } unsigned DH_num_bits(const DH *dh) { return BN_num_bits(dh->p); } int DH_up_ref(DH *dh) { CRYPTO_refcount_inc(&dh->references); return 1; } DH *DH_get_rfc7919_2048(void) { // This is the prime from https://tools.ietf.org/html/rfc7919#appendix-A.1, // which is specifically approved for FIPS in appendix D of SP 800-56Ar3. static const BN_ULONG kFFDHE2048Data[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0x886b4238, 0x61285c97), TOBN(0xc6f34a26, 0xc1b2effa), TOBN(0xc58ef183, 0x7d1683b2), TOBN(0x3bb5fcbc, 0x2ec22005), TOBN(0xc3fe3b1b, 0x4c6fad73), TOBN(0x8e4f1232, 0xeef28183), TOBN(0x9172fe9c, 0xe98583ff), TOBN(0xc03404cd, 0x28342f61), TOBN(0x9e02fce1, 0xcdf7e2ec), TOBN(0x0b07a7c8, 0xee0a6d70), TOBN(0xae56ede7, 0x6372bb19), TOBN(0x1d4f42a3, 0xde394df4), TOBN(0xb96adab7, 0x60d7f468), TOBN(0xd108a94b, 0xb2c8e3fb), TOBN(0xbc0ab182, 0xb324fb61), TOBN(0x30acca4f, 0x483a797a), TOBN(0x1df158a1, 0x36ade735), TOBN(0xe2a689da, 0xf3efe872), TOBN(0x984f0c70, 0xe0e68b77), TOBN(0xb557135e, 0x7f57c935), TOBN(0x85636555, 0x3ded1af3), TOBN(0x2433f51f, 0x5f066ed0), TOBN(0xd3df1ed5, 0xd5fd6561), TOBN(0xf681b202, 0xaec4617a), TOBN(0x7d2fe363, 0x630c75d8), TOBN(0xcc939dce, 0x249b3ef9), TOBN(0xa9e13641, 0x146433fb), TOBN(0xd8b9c583, 0xce2d3695), TOBN(0xafdc5620, 0x273d3cf1), TOBN(0xadf85458, 0xa2bb4a9a), TOBN(0xffffffff, 0xffffffff), }; BIGNUM *const ffdhe2048_p = BN_new(); BIGNUM *const ffdhe2048_q = BN_new(); BIGNUM *const ffdhe2048_g = BN_new(); DH *const dh = DH_new(); if (!ffdhe2048_p || !ffdhe2048_q || !ffdhe2048_g || !dh) { goto err; } bn_set_static_words(ffdhe2048_p, kFFDHE2048Data, OPENSSL_ARRAY_SIZE(kFFDHE2048Data)); if (!BN_rshift1(ffdhe2048_q, ffdhe2048_p) || !BN_set_word(ffdhe2048_g, 2) || !DH_set0_pqg(dh, ffdhe2048_p, ffdhe2048_q, ffdhe2048_g)) { goto err; } return dh; err: BN_free(ffdhe2048_p); BN_free(ffdhe2048_q); BN_free(ffdhe2048_g); DH_free(dh); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/dh/internal.h ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_DH_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_DH_INTERNAL_H #include #include #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif struct dh_st { BIGNUM *p; BIGNUM *g; BIGNUM *q; BIGNUM *pub_key; // g^x mod p BIGNUM *priv_key; // x // priv_length contains the length, in bits, of the private value. If zero, // the private value will be the same length as |p|. unsigned priv_length; CRYPTO_MUTEX method_mont_p_lock; BN_MONT_CTX *method_mont_p; int flags; CRYPTO_refcount_t references; }; // dh_check_params_fast checks basic invariants on |dh|'s domain parameters. It // does not check that |dh| forms a valid group, only that the sizes are within // DoS bounds. int dh_check_params_fast(const DH *dh); // dh_compute_key_padded_no_self_test does the same as |DH_compute_key_padded|, // but doesn't try to run the self-test first. This is for use in the self tests // themselves, to prevent an infinite loop. int dh_compute_key_padded_no_self_test(unsigned char *out, const BIGNUM *peers_key, DH *dh); #if defined(__cplusplus) } #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_DH_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/digest/digest.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" #include "internal.h" int EVP_MD_type(const EVP_MD *md) { return md->type; } int EVP_MD_nid(const EVP_MD *md) { return EVP_MD_type(md); } uint32_t EVP_MD_flags(const EVP_MD *md) { return md->flags; } size_t EVP_MD_size(const EVP_MD *md) { return md->md_size; } size_t EVP_MD_block_size(const EVP_MD *md) { return md->block_size; } void EVP_MD_CTX_init(EVP_MD_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_MD_CTX)); } EVP_MD_CTX *EVP_MD_CTX_new(void) { EVP_MD_CTX *ctx = reinterpret_cast(OPENSSL_malloc(sizeof(EVP_MD_CTX))); if (ctx) { EVP_MD_CTX_init(ctx); } return ctx; } EVP_MD_CTX *EVP_MD_CTX_create(void) { return EVP_MD_CTX_new(); } int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx) { OPENSSL_free(ctx->md_data); assert(ctx->pctx == NULL || ctx->pctx_ops != NULL); if (ctx->pctx_ops) { ctx->pctx_ops->free(ctx->pctx); } EVP_MD_CTX_init(ctx); return 1; } void EVP_MD_CTX_cleanse(EVP_MD_CTX *ctx) { OPENSSL_cleanse(ctx->md_data, ctx->digest->ctx_size); EVP_MD_CTX_cleanup(ctx); } void EVP_MD_CTX_free(EVP_MD_CTX *ctx) { if (!ctx) { return; } EVP_MD_CTX_cleanup(ctx); OPENSSL_free(ctx); } void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) { EVP_MD_CTX_free(ctx); } int EVP_DigestFinalXOF(EVP_MD_CTX *ctx, uint8_t *out, size_t len) { OPENSSL_PUT_ERROR(DIGEST, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } uint32_t EVP_MD_meth_get_flags(const EVP_MD *md) { return EVP_MD_flags(md); } void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags) {} int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) { // |in->digest| may be NULL if this is a signing |EVP_MD_CTX| for, e.g., // Ed25519 which does not hash with |EVP_MD_CTX|. if (in == NULL || (in->pctx == NULL && in->digest == NULL)) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_INPUT_NOT_INITIALIZED); return 0; } EVP_PKEY_CTX *pctx = NULL; assert(in->pctx == NULL || in->pctx_ops != NULL); if (in->pctx) { pctx = in->pctx_ops->dup(in->pctx); if (!pctx) { return 0; } } uint8_t *tmp_buf = NULL; if (in->digest != NULL) { if (out->digest != in->digest) { assert(in->digest->ctx_size != 0); tmp_buf = reinterpret_cast(OPENSSL_malloc(in->digest->ctx_size)); if (tmp_buf == NULL) { if (pctx) { in->pctx_ops->free(pctx); } return 0; } } else { // |md_data| will be the correct size in this case. It's removed from // |out| so that |EVP_MD_CTX_cleanup| doesn't free it, and then it's // reused. tmp_buf = reinterpret_cast(out->md_data); out->md_data = NULL; } } EVP_MD_CTX_cleanup(out); out->digest = in->digest; out->md_data = tmp_buf; if (in->digest != NULL) { OPENSSL_memcpy(out->md_data, in->md_data, in->digest->ctx_size); } out->pctx = pctx; out->pctx_ops = in->pctx_ops; assert(out->pctx == NULL || out->pctx_ops != NULL); return 1; } void EVP_MD_CTX_move(EVP_MD_CTX *out, EVP_MD_CTX *in) { EVP_MD_CTX_cleanup(out); // While not guaranteed, |EVP_MD_CTX| is currently safe to move with |memcpy|. // bssl-crypto currently relies on this, however, so if we change this, we // need to box the |HMAC_CTX|. (Relying on this is only fine because we assume // BoringSSL and bssl-crypto will always be updated atomically. We do not // allow any version skew between the two.) OPENSSL_memcpy(out, in, sizeof(EVP_MD_CTX)); EVP_MD_CTX_init(in); } int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in) { EVP_MD_CTX_init(out); return EVP_MD_CTX_copy_ex(out, in); } int EVP_MD_CTX_reset(EVP_MD_CTX *ctx) { EVP_MD_CTX_cleanup(ctx); EVP_MD_CTX_init(ctx); return 1; } int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *engine) { if (ctx->digest != type) { assert(type->ctx_size != 0); uint8_t *md_data = reinterpret_cast(OPENSSL_malloc(type->ctx_size)); if (md_data == NULL) { return 0; } OPENSSL_free(ctx->md_data); ctx->md_data = md_data; ctx->digest = type; } assert(ctx->pctx == NULL || ctx->pctx_ops != NULL); ctx->digest->init(ctx); return 1; } int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type) { EVP_MD_CTX_init(ctx); return EVP_DigestInit_ex(ctx, type, NULL); } int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { ctx->digest->update(ctx, data, len); return 1; } int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *size) { assert(ctx->digest->md_size <= EVP_MAX_MD_SIZE); ctx->digest->final(ctx, md_out); if (size != NULL) { *size = ctx->digest->md_size; } OPENSSL_cleanse(ctx->md_data, ctx->digest->ctx_size); return 1; } int EVP_DigestFinal(EVP_MD_CTX *ctx, uint8_t *md, unsigned int *size) { (void)EVP_DigestFinal_ex(ctx, md, size); EVP_MD_CTX_cleanup(ctx); return 1; } int EVP_Digest(const void *data, size_t count, uint8_t *out_md, unsigned int *out_size, const EVP_MD *type, ENGINE *impl) { EVP_MD_CTX ctx; int ret; EVP_MD_CTX_init(&ctx); ret = EVP_DigestInit_ex(&ctx, type, impl) && EVP_DigestUpdate(&ctx, data, count) && EVP_DigestFinal_ex(&ctx, out_md, out_size); EVP_MD_CTX_cleanup(&ctx); return ret; } const EVP_MD *EVP_MD_CTX_get0_md(const EVP_MD_CTX *ctx) { if (ctx == NULL) { return NULL; } return ctx->digest; } const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx) { return EVP_MD_CTX_get0_md(ctx); } size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx) { return EVP_MD_size(EVP_MD_CTX_get0_md(ctx)); } size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx) { return EVP_MD_block_size(EVP_MD_CTX_get0_md(ctx)); } int EVP_MD_CTX_type(const EVP_MD_CTX *ctx) { return EVP_MD_type(EVP_MD_CTX_get0_md(ctx)); } int EVP_add_digest(const EVP_MD *digest) { return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/digest/digests.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../delocate.h" #include "internal.h" #if defined(NDEBUG) #define CHECK(x) (void)(x) #else #define CHECK(x) assert(x) #endif static void sha1_init(EVP_MD_CTX *ctx) { BCM_sha1_init(reinterpret_cast(ctx->md_data)); } static void sha1_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha1_update(reinterpret_cast(ctx->md_data), data, count); } static void sha1_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha1_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha1) { out->type = NID_sha1; out->md_size = BCM_SHA_DIGEST_LENGTH; out->flags = 0; out->init = sha1_init; out->update = sha1_update; out->final = sha1_final; out->block_size = 64; out->ctx_size = sizeof(SHA_CTX); } static void sha224_init(EVP_MD_CTX *ctx) { BCM_sha224_init(reinterpret_cast(ctx->md_data)); } static void sha224_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha224_update(reinterpret_cast(ctx->md_data), data, count); } static void sha224_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha224_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha224) { out->type = NID_sha224; out->md_size = BCM_SHA224_DIGEST_LENGTH; out->flags = 0; out->init = sha224_init; out->update = sha224_update; out->final = sha224_final; out->block_size = 64; out->ctx_size = sizeof(SHA256_CTX); } static void sha256_init(EVP_MD_CTX *ctx) { BCM_sha256_init(reinterpret_cast(ctx->md_data)); } static void sha256_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha256_update(reinterpret_cast(ctx->md_data), data, count); } static void sha256_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha256_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha256) { out->type = NID_sha256; out->md_size = BCM_SHA256_DIGEST_LENGTH; out->flags = 0; out->init = sha256_init; out->update = sha256_update; out->final = sha256_final; out->block_size = 64; out->ctx_size = sizeof(SHA256_CTX); } static void sha384_init(EVP_MD_CTX *ctx) { BCM_sha384_init(reinterpret_cast(ctx->md_data)); } static void sha384_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha384_update(reinterpret_cast(ctx->md_data), data, count); } static void sha384_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha384_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha384) { out->type = NID_sha384; out->md_size = BCM_SHA384_DIGEST_LENGTH; out->flags = 0; out->init = sha384_init; out->update = sha384_update; out->final = sha384_final; out->block_size = 128; out->ctx_size = sizeof(SHA512_CTX); } static void sha512_init(EVP_MD_CTX *ctx) { BCM_sha512_init(reinterpret_cast(ctx->md_data)); } static void sha512_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha512_update(reinterpret_cast(ctx->md_data), data, count); } static void sha512_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha512_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha512) { out->type = NID_sha512; out->md_size = BCM_SHA512_DIGEST_LENGTH; out->flags = 0; out->init = sha512_init; out->update = sha512_update; out->final = sha512_final; out->block_size = 128; out->ctx_size = sizeof(SHA512_CTX); } static void sha512_256_init(EVP_MD_CTX *ctx) { BCM_sha512_256_init(reinterpret_cast(ctx->md_data)); } static void sha512_256_update(EVP_MD_CTX *ctx, const void *data, size_t count) { BCM_sha512_256_update(reinterpret_cast(ctx->md_data), data, count); } static void sha512_256_final(EVP_MD_CTX *ctx, uint8_t *md) { BCM_sha512_256_final(md, reinterpret_cast(ctx->md_data)); } DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha512_256) { out->type = NID_sha512_256; out->md_size = BCM_SHA512_256_DIGEST_LENGTH; out->flags = 0; out->init = sha512_256_init; out->update = sha512_256_update; out->final = sha512_256_final; out->block_size = 128; out->ctx_size = sizeof(SHA512_CTX); } #undef CHECK ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/digest/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DIGEST_INTERNAL_H #define OPENSSL_HEADER_DIGEST_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif struct env_md_st { // type contains a NID identifing the digest function. (For example, // NID_md5.) int type; // md_size contains the size, in bytes, of the resulting digest. unsigned md_size; // flags contains the OR of |EVP_MD_FLAG_*| values. uint32_t flags; // init initialises the state in |ctx->md_data|. void (*init)(EVP_MD_CTX *ctx); // update hashes |len| bytes of |data| into the state in |ctx->md_data|. void (*update)(EVP_MD_CTX *ctx, const void *data, size_t count); // final completes the hash and writes |md_size| bytes of digest to |out|. void (*final)(EVP_MD_CTX *ctx, uint8_t *out); // block_size contains the hash's native block size. unsigned block_size; // ctx_size contains the size, in bytes, of the state of the hash function. unsigned ctx_size; }; // evp_md_pctx_ops contains function pointers to allow the |pctx| member of // |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP // functions. struct evp_md_pctx_ops { // free is called when an |EVP_MD_CTX| is being freed and the |pctx| also // needs to be freed. void (*free) (EVP_PKEY_CTX *pctx); // dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs // to be copied. EVP_PKEY_CTX* (*dup) (EVP_PKEY_CTX *pctx); }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_DIGEST_INTERNAL ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/digest/md32_common.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DIGEST_MD32_COMMON_H #define OPENSSL_HEADER_DIGEST_MD32_COMMON_H #include #include #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif // This is a generic 32-bit "collector" for message digest algorithms. It // collects input character stream into chunks of 32-bit values and invokes the // block function that performs the actual hash calculations. // // To make use of this mechanism, the hash context should be defined with the // following parameters. // // typedef struct _state_st { // uint32_t h[ / sizeof(uint32_t)]; // uint32_t Nl, Nh; // uint8_t data[]; // unsigned num; // ... // } _CTX; // // is the output length of the hash in bytes, before // any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and // SHA-512). // // |h| is the hash state and is updated by a function of type // |crypto_md32_block_func|. |data| is the partial unprocessed block and has // |num| bytes. |Nl| and |Nh| maintain the number of bits processed so far. // A crypto_md32_block_func should incorporate |num_blocks| of input from |data| // into |state|. It is assumed the caller has sized |state| and |data| for the // hash function. typedef void (*crypto_md32_block_func)(uint32_t *state, const uint8_t *data, size_t num_blocks); // crypto_md32_update adds |len| bytes from |in| to the digest. |data| must be a // buffer of length |block_size| with the first |*num| bytes containing a // partial block. This function combines the partial block with |in| and // incorporates any complete blocks into the digest state |h|. It then updates // |data| and |*num| with the new partial block and updates |*Nh| and |*Nl| with // the data consumed. static inline void crypto_md32_update(crypto_md32_block_func block_func, uint32_t *h, uint8_t *data, size_t block_size, unsigned *num, uint32_t *Nh, uint32_t *Nl, const uint8_t *in, size_t len) { if (len == 0) { return; } uint32_t l = *Nl + (((uint32_t)len) << 3); if (l < *Nl) { // Handle carries. (*Nh)++; } *Nh += (uint32_t)(len >> 29); *Nl = l; size_t n = *num; if (n != 0) { if (len >= block_size || len + n >= block_size) { OPENSSL_memcpy(data + n, in, block_size - n); block_func(h, data, 1); n = block_size - n; in += n; len -= n; *num = 0; // Keep |data| zeroed when unused. OPENSSL_memset(data, 0, block_size); } else { OPENSSL_memcpy(data + n, in, len); *num += (unsigned)len; return; } } n = len / block_size; if (n > 0) { block_func(h, in, n); n *= block_size; in += n; len -= n; } if (len != 0) { *num = (unsigned)len; OPENSSL_memcpy(data, in, len); } } // crypto_md32_final incorporates the partial block and trailing length into the // digest state |h|. The trailing length is encoded in little-endian if // |is_big_endian| is zero and big-endian otherwise. |data| must be a buffer of // length |block_size| with the first |*num| bytes containing a partial block. // |Nh| and |Nl| contain the total number of bits processed. On return, this // function clears the partial block in |data| and // |*num|. // // This function does not serialize |h| into a final digest. This is the // responsibility of the caller. static inline void crypto_md32_final(crypto_md32_block_func block_func, uint32_t *h, uint8_t *data, size_t block_size, unsigned *num, uint32_t Nh, uint32_t Nl, int is_big_endian) { // |data| always has room for at least one byte. A full block would have // been consumed. size_t n = *num; assert(n < block_size); data[n] = 0x80; n++; // Fill the block with zeros if there isn't room for a 64-bit length. if (n > block_size - 8) { OPENSSL_memset(data + n, 0, block_size - n); n = 0; block_func(h, data, 1); } OPENSSL_memset(data + n, 0, block_size - 8 - n); // Append a 64-bit length to the block and process it. if (is_big_endian) { CRYPTO_store_u32_be(data + block_size - 8, Nh); CRYPTO_store_u32_be(data + block_size - 4, Nl); } else { CRYPTO_store_u32_le(data + block_size - 8, Nl); CRYPTO_store_u32_le(data + block_size - 4, Nh); } block_func(h, data, 1); *num = 0; OPENSSL_memset(data, 0, block_size); } #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_DIGEST_MD32_COMMON_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/digestsign/digestsign.cc.inc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../../evp/internal.h" #include "../delocate.h" #include "../digest/internal.h" #include "../service_indicator/internal.h" enum evp_sign_verify_t { evp_sign, evp_verify, }; DEFINE_LOCAL_DATA(struct evp_md_pctx_ops, md_pctx_ops) { out->free = EVP_PKEY_CTX_free; out->dup = EVP_PKEY_CTX_dup; } static int uses_prehash(EVP_MD_CTX *ctx, enum evp_sign_verify_t op) { return (op == evp_sign) ? (ctx->pctx->pmeth->sign != NULL) : (ctx->pctx->pmeth->verify != NULL); } static int do_sigver_init(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey, enum evp_sign_verify_t op) { if (ctx->pctx == NULL) { ctx->pctx = EVP_PKEY_CTX_new(pkey, e); } if (ctx->pctx == NULL) { return 0; } ctx->pctx_ops = md_pctx_ops(); if (op == evp_verify) { if (!EVP_PKEY_verify_init(ctx->pctx)) { return 0; } } else { if (!EVP_PKEY_sign_init(ctx->pctx)) { return 0; } } if (type != NULL && !EVP_PKEY_CTX_set_signature_md(ctx->pctx, type)) { return 0; } if (uses_prehash(ctx, op)) { if (type == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_NO_DEFAULT_DIGEST); return 0; } if (!EVP_DigestInit_ex(ctx, type, e)) { return 0; } } if (pctx) { *pctx = ctx->pctx; } return 1; } int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) { return do_sigver_init(ctx, pctx, type, e, pkey, evp_sign); } int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) { return do_sigver_init(ctx, pctx, type, e, pkey, evp_verify); } int EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { if (!uses_prehash(ctx, evp_sign)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return EVP_DigestUpdate(ctx, data, len); } int EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { if (!uses_prehash(ctx, evp_verify)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } return EVP_DigestUpdate(ctx, data, len); } int EVP_DigestSignFinal(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len) { if (!uses_prehash(ctx, evp_sign)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } if (out_sig) { EVP_MD_CTX tmp_ctx; int ret; uint8_t md[EVP_MAX_MD_SIZE]; unsigned int mdlen; FIPS_service_indicator_lock_state(); EVP_MD_CTX_init(&tmp_ctx); ret = EVP_MD_CTX_copy_ex(&tmp_ctx, ctx) && EVP_DigestFinal_ex(&tmp_ctx, md, &mdlen) && EVP_PKEY_sign(ctx->pctx, out_sig, out_sig_len, md, mdlen); EVP_MD_CTX_cleanup(&tmp_ctx); FIPS_service_indicator_unlock_state(); if (ret) { EVP_DigestSign_verify_service_indicator(ctx); } return ret; } else { size_t s = EVP_MD_size(ctx->digest); return EVP_PKEY_sign(ctx->pctx, out_sig, out_sig_len, NULL, s); } } int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len) { if (!uses_prehash(ctx, evp_verify)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } EVP_MD_CTX tmp_ctx; int ret; uint8_t md[EVP_MAX_MD_SIZE]; unsigned int mdlen; FIPS_service_indicator_lock_state(); EVP_MD_CTX_init(&tmp_ctx); ret = EVP_MD_CTX_copy_ex(&tmp_ctx, ctx) && EVP_DigestFinal_ex(&tmp_ctx, md, &mdlen) && EVP_PKEY_verify(ctx->pctx, sig, sig_len, md, mdlen); FIPS_service_indicator_unlock_state(); EVP_MD_CTX_cleanup(&tmp_ctx); if (ret) { EVP_DigestVerify_verify_service_indicator(ctx); } return ret; } int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len, const uint8_t *data, size_t data_len) { FIPS_service_indicator_lock_state(); int ret = 0; if (uses_prehash(ctx, evp_sign)) { // If |out_sig| is NULL, the caller is only querying the maximum output // length. |data| should only be incorporated in the final call. if (out_sig != NULL && !EVP_DigestSignUpdate(ctx, data, data_len)) { goto end; } ret = EVP_DigestSignFinal(ctx, out_sig, out_sig_len); goto end; } if (ctx->pctx->pmeth->sign_message == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); goto end; } ret = ctx->pctx->pmeth->sign_message(ctx->pctx, out_sig, out_sig_len, data, data_len); end: FIPS_service_indicator_unlock_state(); if (ret) { EVP_DigestSign_verify_service_indicator(ctx); } return ret; } int EVP_DigestVerify(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, const uint8_t *data, size_t len) { FIPS_service_indicator_lock_state(); int ret = 0; if (uses_prehash(ctx, evp_verify)) { ret = EVP_DigestVerifyUpdate(ctx, data, len) && EVP_DigestVerifyFinal(ctx, sig, sig_len); goto end; } if (ctx->pctx->pmeth->verify_message == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); goto end; } ret = ctx->pctx->pmeth->verify_message(ctx->pctx, sig, sig_len, data, len); end: FIPS_service_indicator_unlock_state(); if (ret) { EVP_DigestVerify_verify_service_indicator(ctx); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/builtin_curves.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This file is generated by make_tables.go. // P-224 [[maybe_unused]] static const uint64_t kP224FieldN0 = 0xffffffffffffffff; [[maybe_unused]] static const uint64_t kP224OrderN0 = 0xd6e242706a1fc2eb; #if defined(OPENSSL_64_BIT) [[maybe_unused]] static const uint64_t kP224Field[] = { 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000ffffffff}; [[maybe_unused]] static const uint64_t kP224Order[] = { 0x13dd29455c5c2a3d, 0xffff16a2e0b8f03e, 0xffffffffffffffff, 0x00000000ffffffff}; [[maybe_unused]] static const uint64_t kP224B[] = { 0x270b39432355ffb4, 0x5044b0b7d7bfd8ba, 0x0c04b3abf5413256, 0x00000000b4050a85}; [[maybe_unused]] static const uint64_t kP224GX[] = { 0x343280d6115c1d21, 0x4a03c1d356c21122, 0x6bb4bf7f321390b9, 0x00000000b70e0cbd}; [[maybe_unused]] static const uint64_t kP224GY[] = { 0x44d5819985007e34, 0xcd4375a05a074764, 0xb5f723fb4c22dfe6, 0x00000000bd376388}; [[maybe_unused]] static const uint64_t kP224FieldR[] = { 0xffffffff00000000, 0xffffffffffffffff, 0x0000000000000000, 0x0000000000000000}; [[maybe_unused]] static const uint64_t kP224FieldRR[] = { 0xffffffff00000001, 0xffffffff00000000, 0xfffffffe00000000, 0x00000000ffffffff}; [[maybe_unused]] static const uint64_t kP224OrderRR[] = { 0x29947a695f517d15, 0xabc8ff5931d63f4b, 0x6ad15f7cd9714856, 0x00000000b1e97961}; [[maybe_unused]] static const uint64_t kP224MontB[] = { 0xe768cdf663c059cd, 0x107ac2f3ccf01310, 0x3dceba98c8528151, 0x000000007fc02f93}; [[maybe_unused]] static const uint64_t kP224MontGX[] = { 0xbc9052266d0a4aea, 0x852597366018bfaa, 0x6dd3af9bf96bec05, 0x00000000a21b5e60}; [[maybe_unused]] static const uint64_t kP224MontGY[] = { 0x2edca1e5eff3ede8, 0xf8cd672b05335a6b, 0xaea9c5ae03dfe878, 0x00000000614786f1}; #elif defined(OPENSSL_32_BIT) [[maybe_unused]] static const uint32_t kP224Field[] = { 0x00000001, 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}; [[maybe_unused]] static const uint32_t kP224Order[] = { 0x5c5c2a3d, 0x13dd2945, 0xe0b8f03e, 0xffff16a2, 0xffffffff, 0xffffffff, 0xffffffff}; [[maybe_unused]] static const uint32_t kP224B[] = { 0x2355ffb4, 0x270b3943, 0xd7bfd8ba, 0x5044b0b7, 0xf5413256, 0x0c04b3ab, 0xb4050a85}; [[maybe_unused]] static const uint32_t kP224GX[] = { 0x115c1d21, 0x343280d6, 0x56c21122, 0x4a03c1d3, 0x321390b9, 0x6bb4bf7f, 0xb70e0cbd}; [[maybe_unused]] static const uint32_t kP224GY[] = { 0x85007e34, 0x44d58199, 0x5a074764, 0xcd4375a0, 0x4c22dfe6, 0xb5f723fb, 0xbd376388}; [[maybe_unused]] static const uint32_t kP224FieldR[] = { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000}; [[maybe_unused]] static const uint32_t kP224FieldRR[] = { 0x00000001, 0x00000000, 0x00000000, 0xfffffffe, 0xffffffff, 0xffffffff, 0x00000000}; [[maybe_unused]] static const uint32_t kP224OrderRR[] = { 0x3ad01289, 0x6bdaae6c, 0x97a54552, 0x6ad09d91, 0xb1e97961, 0x1822bc47, 0xd4baa4cf}; [[maybe_unused]] static const uint32_t kP224MontB[] = { 0xe768cdf7, 0xccf01310, 0x743b1cc0, 0xc8528150, 0x3dceba98, 0x7fc02f93, 0x9c3fa633}; [[maybe_unused]] static const uint32_t kP224MontGX[] = { 0xbc905227, 0x6018bfaa, 0xf22fe220, 0xf96bec04, 0x6dd3af9b, 0xa21b5e60, 0x92f5b516}; [[maybe_unused]] static const uint32_t kP224MontGY[] = { 0x2edca1e6, 0x05335a6b, 0xe8c15513, 0x03dfe878, 0xaea9c5ae, 0x614786f1, 0x100c1218}; #else #error "unknown word size" #endif // P-256 [[maybe_unused]] static const uint64_t kP256FieldN0 = 0x0000000000000001; [[maybe_unused]] static const uint64_t kP256OrderN0 = 0xccd1c8aaee00bc4f; #if defined(OPENSSL_64_BIT) [[maybe_unused]] static const uint64_t kP256Field[] = { 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001}; [[maybe_unused]] static const uint64_t kP256Order[] = { 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000}; [[maybe_unused]] static const uint64_t kP256FieldR[] = { 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe}; [[maybe_unused]] static const uint64_t kP256FieldRR[] = { 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd}; [[maybe_unused]] static const uint64_t kP256OrderRR[] = { 0x83244c95be79eea2, 0x4699799c49bd6fa6, 0x2845b2392b6bec59, 0x66e12d94f3d95620}; [[maybe_unused]] static const uint64_t kP256MontB[] = { 0xd89cdf6229c4bddf, 0xacf005cd78843090, 0xe5a220abf7212ed6, 0xdc30061d04874834}; [[maybe_unused]] static const uint64_t kP256MontGX[] = { 0x79e730d418a9143c, 0x75ba95fc5fedb601, 0x79fb732b77622510, 0x18905f76a53755c6}; [[maybe_unused]] static const uint64_t kP256MontGY[] = { 0xddf25357ce95560a, 0x8b4ab8e4ba19e45c, 0xd2e88688dd21f325, 0x8571ff1825885d85}; #elif defined(OPENSSL_32_BIT) [[maybe_unused]] static const uint32_t kP256Field[] = { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0xffffffff}; [[maybe_unused]] static const uint32_t kP256Order[] = { 0xfc632551, 0xf3b9cac2, 0xa7179e84, 0xbce6faad, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff}; [[maybe_unused]] static const uint32_t kP256FieldR[] = { 0x00000001, 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0x00000000}; [[maybe_unused]] static const uint32_t kP256FieldRR[] = { 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb, 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004}; [[maybe_unused]] static const uint32_t kP256OrderRR[] = { 0xbe79eea2, 0x83244c95, 0x49bd6fa6, 0x4699799c, 0x2b6bec59, 0x2845b239, 0xf3d95620, 0x66e12d94}; [[maybe_unused]] static const uint32_t kP256MontB[] = { 0x29c4bddf, 0xd89cdf62, 0x78843090, 0xacf005cd, 0xf7212ed6, 0xe5a220ab, 0x04874834, 0xdc30061d}; [[maybe_unused]] static const uint32_t kP256MontGX[] = { 0x18a9143c, 0x79e730d4, 0x5fedb601, 0x75ba95fc, 0x77622510, 0x79fb732b, 0xa53755c6, 0x18905f76}; [[maybe_unused]] static const uint32_t kP256MontGY[] = { 0xce95560a, 0xddf25357, 0xba19e45c, 0x8b4ab8e4, 0xdd21f325, 0xd2e88688, 0x25885d85, 0x8571ff18}; #else #error "unknown word size" #endif // P-384 [[maybe_unused]] static const uint64_t kP384FieldN0 = 0x0000000100000001; [[maybe_unused]] static const uint64_t kP384OrderN0 = 0x6ed46089e88fdc45; #if defined(OPENSSL_64_BIT) [[maybe_unused]] static const uint64_t kP384Field[] = { 0x00000000ffffffff, 0xffffffff00000000, 0xfffffffffffffffe, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; [[maybe_unused]] static const uint64_t kP384Order[] = { 0xecec196accc52973, 0x581a0db248b0a77a, 0xc7634d81f4372ddf, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; [[maybe_unused]] static const uint64_t kP384FieldR[] = { 0xffffffff00000001, 0x00000000ffffffff, 0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}; [[maybe_unused]] static const uint64_t kP384FieldRR[] = { 0xfffffffe00000001, 0x0000000200000000, 0xfffffffe00000000, 0x0000000200000000, 0x0000000000000001, 0x0000000000000000}; [[maybe_unused]] static const uint64_t kP384OrderRR[] = { 0x2d319b2419b409a9, 0xff3d81e5df1aa419, 0xbc3e483afcb82947, 0xd40d49174aab1cc5, 0x3fb05b7a28266895, 0x0c84ee012b39bf21}; [[maybe_unused]] static const uint64_t kP384MontB[] = { 0x081188719d412dcc, 0xf729add87a4c32ec, 0x77f2209b1920022e, 0xe3374bee94938ae2, 0xb62b21f41f022094, 0xcd08114b604fbff9}; [[maybe_unused]] static const uint64_t kP384MontGX[] = { 0x3dd0756649c0b528, 0x20e378e2a0d6ce38, 0x879c3afc541b4d6e, 0x6454868459a30eff, 0x812ff723614ede2b, 0x4d3aadc2299e1513}; [[maybe_unused]] static const uint64_t kP384MontGY[] = { 0x23043dad4b03a4fe, 0xa1bfa8bf7bb4a9ac, 0x8bade7562e83b050, 0xc6c3521968f4ffd9, 0xdd8002263969a840, 0x2b78abc25a15c5e9}; #elif defined(OPENSSL_32_BIT) [[maybe_unused]] static const uint32_t kP384Field[] = { 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0xfffffffe, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}; [[maybe_unused]] static const uint32_t kP384Order[] = { 0xccc52973, 0xecec196a, 0x48b0a77a, 0x581a0db2, 0xf4372ddf, 0xc7634d81, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}; [[maybe_unused]] static const uint32_t kP384FieldR[] = { 0x00000001, 0xffffffff, 0xffffffff, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}; [[maybe_unused]] static const uint32_t kP384FieldRR[] = { 0x00000001, 0xfffffffe, 0x00000000, 0x00000002, 0x00000000, 0xfffffffe, 0x00000000, 0x00000002, 0x00000001, 0x00000000, 0x00000000, 0x00000000}; [[maybe_unused]] static const uint32_t kP384OrderRR[] = { 0x19b409a9, 0x2d319b24, 0xdf1aa419, 0xff3d81e5, 0xfcb82947, 0xbc3e483a, 0x4aab1cc5, 0xd40d4917, 0x28266895, 0x3fb05b7a, 0x2b39bf21, 0x0c84ee01}; [[maybe_unused]] static const uint32_t kP384MontB[] = { 0x9d412dcc, 0x08118871, 0x7a4c32ec, 0xf729add8, 0x1920022e, 0x77f2209b, 0x94938ae2, 0xe3374bee, 0x1f022094, 0xb62b21f4, 0x604fbff9, 0xcd08114b}; [[maybe_unused]] static const uint32_t kP384MontGX[] = { 0x49c0b528, 0x3dd07566, 0xa0d6ce38, 0x20e378e2, 0x541b4d6e, 0x879c3afc, 0x59a30eff, 0x64548684, 0x614ede2b, 0x812ff723, 0x299e1513, 0x4d3aadc2}; [[maybe_unused]] static const uint32_t kP384MontGY[] = { 0x4b03a4fe, 0x23043dad, 0x7bb4a9ac, 0xa1bfa8bf, 0x2e83b050, 0x8bade756, 0x68f4ffd9, 0xc6c35219, 0x3969a840, 0xdd800226, 0x5a15c5e9, 0x2b78abc2}; #else #error "unknown word size" #endif // P-521 [[maybe_unused]] static const uint64_t kP521FieldN0 = 0x0000000000000001; [[maybe_unused]] static const uint64_t kP521OrderN0 = 0x1d2f5ccd79a995c7; #if defined(OPENSSL_64_BIT) [[maybe_unused]] static const uint64_t kP521Field[] = { 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x00000000000001ff}; [[maybe_unused]] static const uint64_t kP521Order[] = { 0xbb6fb71e91386409, 0x3bb5c9b8899c47ae, 0x7fcc0148f709a5d0, 0x51868783bf2f966b, 0xfffffffffffffffa, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x00000000000001ff}; [[maybe_unused]] static const uint64_t kP521FieldR[] = { 0x0080000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}; [[maybe_unused]] static const uint64_t kP521FieldRR[] = { 0x0000000000000000, 0x0000400000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}; [[maybe_unused]] static const uint64_t kP521OrderRR[] = { 0x137cd04dcf15dd04, 0xf707badce5547ea3, 0x12a78d38794573ff, 0xd3721ef557f75e06, 0xdd6e23d82e49c7db, 0xcff3d142b7756e3e, 0x5bcc6d61a8e567bc, 0x2d8e03d1492d0d45, 0x000000000000003d}; [[maybe_unused]] static const uint64_t kP521MontB[] = { 0x8014654fae586387, 0x78f7a28fea35a81f, 0x839ab9efc41e961a, 0xbd8b29605e9dd8df, 0xf0ab0c9ca8f63f49, 0xf9dc5a44c8c77884, 0x77516d392dccd98a, 0x0fc94d10d05b42a0, 0x000000000000004d}; [[maybe_unused]] static const uint64_t kP521MontGX[] = { 0xb331a16381adc101, 0x4dfcbf3f18e172de, 0x6f19a459e0c2b521, 0x947f0ee093d17fd4, 0xdd50a5af3bf7f3ac, 0x90fc1457b035a69e, 0x214e32409c829fda, 0xe6cf1f65b311cada, 0x0000000000000074}; [[maybe_unused]] static const uint64_t kP521MontGY[] = { 0x28460e4a5a9e268e, 0x20445f4a3b4fe8b3, 0xb09a9e3843513961, 0x2062a85c809fd683, 0x164bf7394caf7a13, 0x340bd7de8b939f33, 0xeccc7aa224abcda2, 0x022e452fda163e8d, 0x00000000000001e0}; #elif defined(OPENSSL_32_BIT) [[maybe_unused]] static const uint32_t kP521Field[] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x000001ff}; [[maybe_unused]] static const uint32_t kP521Order[] = { 0x91386409, 0xbb6fb71e, 0x899c47ae, 0x3bb5c9b8, 0xf709a5d0, 0x7fcc0148, 0xbf2f966b, 0x51868783, 0xfffffffa, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x000001ff}; [[maybe_unused]] static const uint32_t kP521FieldR[] = { 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}; [[maybe_unused]] static const uint32_t kP521FieldRR[] = { 0x00000000, 0x00004000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000}; [[maybe_unused]] static const uint32_t kP521OrderRR[] = { 0x61c64ca7, 0x1163115a, 0x4374a642, 0x18354a56, 0x0791d9dc, 0x5d4dd6d3, 0xd3402705, 0x4fb35b72, 0xb7756e3a, 0xcff3d142, 0xa8e567bc, 0x5bcc6d61, 0x492d0d45, 0x2d8e03d1, 0x8c44383d, 0x5b5a3afe, 0x0000019a}; [[maybe_unused]] static const uint32_t kP521MontB[] = { 0x8014654f, 0xea35a81f, 0x78f7a28f, 0xc41e961a, 0x839ab9ef, 0x5e9dd8df, 0xbd8b2960, 0xa8f63f49, 0xf0ab0c9c, 0xc8c77884, 0xf9dc5a44, 0x2dccd98a, 0x77516d39, 0xd05b42a0, 0x0fc94d10, 0xb0c70e4d, 0x0000015c}; [[maybe_unused]] static const uint32_t kP521MontGX[] = { 0xb331a163, 0x18e172de, 0x4dfcbf3f, 0xe0c2b521, 0x6f19a459, 0x93d17fd4, 0x947f0ee0, 0x3bf7f3ac, 0xdd50a5af, 0xb035a69e, 0x90fc1457, 0x9c829fda, 0x214e3240, 0xb311cada, 0xe6cf1f65, 0x5b820274, 0x00000103}; [[maybe_unused]] static const uint32_t kP521MontGY[] = { 0x28460e4a, 0x3b4fe8b3, 0x20445f4a, 0x43513961, 0xb09a9e38, 0x809fd683, 0x2062a85c, 0x4caf7a13, 0x164bf739, 0x8b939f33, 0x340bd7de, 0x24abcda2, 0xeccc7aa2, 0xda163e8d, 0x022e452f, 0x3c4d1de0, 0x000000b5}; #else #error "unknown word size" #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/ec.cc.inc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "../delocate.h" #include "internal.h" #include "builtin_curves.h" static void ec_point_free(EC_POINT *point, int free_group); static void ec_group_init_static_mont(BN_MONT_CTX *mont, size_t num_words, const BN_ULONG *modulus, const BN_ULONG *rr, uint64_t n0) { bn_set_static_words(&mont->N, modulus, num_words); bn_set_static_words(&mont->RR, rr, num_words); #if defined(OPENSSL_64_BIT) mont->n0[0] = n0; #elif defined(OPENSSL_32_BIT) mont->n0[0] = (uint32_t)n0; mont->n0[1] = (uint32_t)(n0 >> 32); #else #error "unknown word length" #endif } static void ec_group_set_a_minus3(EC_GROUP *group) { const EC_FELEM *one = ec_felem_one(group); group->a_is_minus3 = 1; ec_felem_neg(group, &group->a, one); ec_felem_sub(group, &group->a, &group->a, one); ec_felem_sub(group, &group->a, &group->a, one); } DEFINE_METHOD_FUNCTION(EC_GROUP, EC_group_p224) { out->curve_name = NID_secp224r1; out->comment = "NIST P-224"; // 1.3.132.0.33 static const uint8_t kOIDP224[] = {0x2b, 0x81, 0x04, 0x00, 0x21}; OPENSSL_memcpy(out->oid, kOIDP224, sizeof(kOIDP224)); out->oid_len = sizeof(kOIDP224); ec_group_init_static_mont(&out->field, OPENSSL_ARRAY_SIZE(kP224Field), kP224Field, kP224FieldRR, kP224FieldN0); ec_group_init_static_mont(&out->order, OPENSSL_ARRAY_SIZE(kP224Order), kP224Order, kP224OrderRR, kP224OrderN0); #if defined(BORINGSSL_HAS_UINT128) && !defined(OPENSSL_SMALL) out->meth = EC_GFp_nistp224_method(); OPENSSL_memcpy(out->generator.raw.X.words, kP224GX, sizeof(kP224GX)); OPENSSL_memcpy(out->generator.raw.Y.words, kP224GY, sizeof(kP224GY)); out->generator.raw.Z.words[0] = 1; OPENSSL_memcpy(out->b.words, kP224B, sizeof(kP224B)); #else out->meth = EC_GFp_mont_method(); OPENSSL_memcpy(out->generator.raw.X.words, kP224MontGX, sizeof(kP224MontGX)); OPENSSL_memcpy(out->generator.raw.Y.words, kP224MontGY, sizeof(kP224MontGY)); OPENSSL_memcpy(out->generator.raw.Z.words, kP224FieldR, sizeof(kP224FieldR)); OPENSSL_memcpy(out->b.words, kP224MontB, sizeof(kP224MontB)); #endif out->generator.group = out; ec_group_set_a_minus3(out); out->has_order = 1; out->field_greater_than_order = 1; } DEFINE_METHOD_FUNCTION(EC_GROUP, EC_group_p256) { out->curve_name = NID_X9_62_prime256v1; out->comment = "NIST P-256"; // 1.2.840.10045.3.1.7 static const uint8_t kOIDP256[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}; OPENSSL_memcpy(out->oid, kOIDP256, sizeof(kOIDP256)); out->oid_len = sizeof(kOIDP256); ec_group_init_static_mont(&out->field, OPENSSL_ARRAY_SIZE(kP256Field), kP256Field, kP256FieldRR, kP256FieldN0); ec_group_init_static_mont(&out->order, OPENSSL_ARRAY_SIZE(kP256Order), kP256Order, kP256OrderRR, kP256OrderN0); #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_SMALL) out->meth = EC_GFp_nistz256_method(); #else out->meth = EC_GFp_nistp256_method(); #endif out->generator.group = out; OPENSSL_memcpy(out->generator.raw.X.words, kP256MontGX, sizeof(kP256MontGX)); OPENSSL_memcpy(out->generator.raw.Y.words, kP256MontGY, sizeof(kP256MontGY)); OPENSSL_memcpy(out->generator.raw.Z.words, kP256FieldR, sizeof(kP256FieldR)); OPENSSL_memcpy(out->b.words, kP256MontB, sizeof(kP256MontB)); ec_group_set_a_minus3(out); out->has_order = 1; out->field_greater_than_order = 1; } DEFINE_METHOD_FUNCTION(EC_GROUP, EC_group_p384) { out->curve_name = NID_secp384r1; out->comment = "NIST P-384"; // 1.3.132.0.34 static const uint8_t kOIDP384[] = {0x2b, 0x81, 0x04, 0x00, 0x22}; OPENSSL_memcpy(out->oid, kOIDP384, sizeof(kOIDP384)); out->oid_len = sizeof(kOIDP384); ec_group_init_static_mont(&out->field, OPENSSL_ARRAY_SIZE(kP384Field), kP384Field, kP384FieldRR, kP384FieldN0); ec_group_init_static_mont(&out->order, OPENSSL_ARRAY_SIZE(kP384Order), kP384Order, kP384OrderRR, kP384OrderN0); out->meth = EC_GFp_mont_method(); out->generator.group = out; OPENSSL_memcpy(out->generator.raw.X.words, kP384MontGX, sizeof(kP384MontGX)); OPENSSL_memcpy(out->generator.raw.Y.words, kP384MontGY, sizeof(kP384MontGY)); OPENSSL_memcpy(out->generator.raw.Z.words, kP384FieldR, sizeof(kP384FieldR)); OPENSSL_memcpy(out->b.words, kP384MontB, sizeof(kP384MontB)); ec_group_set_a_minus3(out); out->has_order = 1; out->field_greater_than_order = 1; } DEFINE_METHOD_FUNCTION(EC_GROUP, EC_group_p521) { out->curve_name = NID_secp521r1; out->comment = "NIST P-521"; // 1.3.132.0.35 static const uint8_t kOIDP521[] = {0x2b, 0x81, 0x04, 0x00, 0x23}; OPENSSL_memcpy(out->oid, kOIDP521, sizeof(kOIDP521)); out->oid_len = sizeof(kOIDP521); ec_group_init_static_mont(&out->field, OPENSSL_ARRAY_SIZE(kP521Field), kP521Field, kP521FieldRR, kP521FieldN0); ec_group_init_static_mont(&out->order, OPENSSL_ARRAY_SIZE(kP521Order), kP521Order, kP521OrderRR, kP521OrderN0); out->meth = EC_GFp_mont_method(); out->generator.group = out; OPENSSL_memcpy(out->generator.raw.X.words, kP521MontGX, sizeof(kP521MontGX)); OPENSSL_memcpy(out->generator.raw.Y.words, kP521MontGY, sizeof(kP521MontGY)); OPENSSL_memcpy(out->generator.raw.Z.words, kP521FieldR, sizeof(kP521FieldR)); OPENSSL_memcpy(out->b.words, kP521MontB, sizeof(kP521MontB)); ec_group_set_a_minus3(out); out->has_order = 1; out->field_greater_than_order = 1; } EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { if (BN_num_bytes(p) > EC_MAX_BYTES) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); return NULL; } BN_CTX *new_ctx = NULL; if (ctx == NULL) { ctx = new_ctx = BN_CTX_new(); if (ctx == NULL) { return NULL; } } // Historically, |a| and |b| were not required to be fully reduced. // TODO(davidben): Can this be removed? EC_GROUP *ret = NULL; BN_CTX_start(ctx); BIGNUM *a_reduced = BN_CTX_get(ctx); BIGNUM *b_reduced = BN_CTX_get(ctx); if (a_reduced == NULL || b_reduced == NULL || !BN_nnmod(a_reduced, a, p, ctx) || !BN_nnmod(b_reduced, b, p, ctx)) { goto err; } ret = reinterpret_cast(OPENSSL_zalloc(sizeof(EC_GROUP))); if (ret == NULL) { return NULL; } ret->references = 1; ret->meth = EC_GFp_mont_method(); bn_mont_ctx_init(&ret->field); bn_mont_ctx_init(&ret->order); ret->generator.group = ret; if (!ec_GFp_simple_group_set_curve(ret, p, a_reduced, b_reduced, ctx)) { EC_GROUP_free(ret); ret = NULL; goto err; } err: BN_CTX_end(ctx); BN_CTX_free(new_ctx); return ret; } int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor) { if (group->curve_name != NID_undef || group->has_order || generator->group != group) { // |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by // |EC_GROUP_new_curve_GFp| and may only used once on each group. // |generator| must have been created from |EC_GROUP_new_curve_GFp|, not a // copy, so that |generator->group->generator| is set correctly. OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (BN_num_bytes(order) > EC_MAX_BYTES) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); return 0; } // Require a cofactor of one for custom curves, which implies prime order. if (!BN_is_one(cofactor)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COFACTOR); return 0; } // Require that p < 2×order. This simplifies some ECDSA operations. // // Note any curve which did not satisfy this must have been invalid or use a // tiny prime (less than 17). See the proof in |field_element_to_scalar| in // the ECDSA implementation. int ret = 0; BIGNUM *tmp = BN_new(); if (tmp == NULL || !BN_lshift1(tmp, order)) { goto err; } if (BN_cmp(tmp, &group->field.N) <= 0) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); goto err; } EC_AFFINE affine; if (!ec_jacobian_to_affine(group, &affine, &generator->raw) || !BN_MONT_CTX_set(&group->order, order, NULL)) { goto err; } group->field_greater_than_order = BN_cmp(&group->field.N, order) > 0; group->generator.raw.X = affine.X; group->generator.raw.Y = affine.Y; // |raw.Z| was set to 1 by |EC_GROUP_new_curve_GFp|. group->has_order = 1; ret = 1; err: BN_free(tmp); return ret; } EC_GROUP *EC_GROUP_new_by_curve_name(int nid) { switch (nid) { case NID_secp224r1: return (EC_GROUP *)EC_group_p224(); case NID_X9_62_prime256v1: return (EC_GROUP *)EC_group_p256(); case NID_secp384r1: return (EC_GROUP *)EC_group_p384(); case NID_secp521r1: return (EC_GROUP *)EC_group_p521(); default: OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); return NULL; } } void EC_GROUP_free(EC_GROUP *group) { if (group == NULL || // Built-in curves are static. group->curve_name != NID_undef || !CRYPTO_refcount_dec_and_test_zero(&group->references)) { return; } bn_mont_ctx_cleanup(&group->order); bn_mont_ctx_cleanup(&group->field); OPENSSL_free(group); } EC_GROUP *EC_GROUP_dup(const EC_GROUP *a) { if (a == NULL || // Built-in curves are static. a->curve_name != NID_undef) { return (EC_GROUP *)a; } // Groups are logically immutable (but for |EC_GROUP_set_generator| which must // be called early on), so we simply take a reference. EC_GROUP *group = (EC_GROUP *)a; CRYPTO_refcount_inc(&group->references); return group; } int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ignored) { // Note this function returns 0 if equal and non-zero otherwise. if (a == b) { return 0; } if (a->curve_name != b->curve_name) { return 1; } if (a->curve_name != NID_undef) { // Built-in curves may be compared by curve name alone. return 0; } // |a| and |b| are both custom curves. We compare the entire curve // structure. If |a| or |b| is incomplete (due to legacy OpenSSL mistakes, // custom curve construction is sadly done in two parts) but otherwise not the // same object, we consider them always unequal. return a->meth != b->meth || // !a->has_order || !b->has_order || BN_cmp(&a->order.N, &b->order.N) != 0 || BN_cmp(&a->field.N, &b->field.N) != 0 || !ec_felem_equal(a, &a->a, &b->a) || // !ec_felem_equal(a, &a->b, &b->b) || !ec_GFp_simple_points_equal(a, &a->generator.raw, &b->generator.raw); } const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group) { return group->has_order ? &group->generator : NULL; } const BIGNUM *EC_GROUP_get0_order(const EC_GROUP *group) { assert(group->has_order); return &group->order.N; } int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx) { if (BN_copy(order, EC_GROUP_get0_order(group)) == NULL) { return 0; } return 1; } int EC_GROUP_order_bits(const EC_GROUP *group) { return BN_num_bits(&group->order.N); } int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx) { // All |EC_GROUP|s have cofactor 1. return BN_set_word(cofactor, 1); } int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *out_p, BIGNUM *out_a, BIGNUM *out_b, BN_CTX *ctx) { return ec_GFp_simple_group_get_curve(group, out_p, out_a, out_b); } int EC_GROUP_get_curve_name(const EC_GROUP *group) { return group->curve_name; } unsigned EC_GROUP_get_degree(const EC_GROUP *group) { return BN_num_bits(&group->field.N); } const char *EC_curve_nid2nist(int nid) { switch (nid) { case NID_secp224r1: return "P-224"; case NID_X9_62_prime256v1: return "P-256"; case NID_secp384r1: return "P-384"; case NID_secp521r1: return "P-521"; } return NULL; } int EC_curve_nist2nid(const char *name) { if (strcmp(name, "P-224") == 0) { return NID_secp224r1; } if (strcmp(name, "P-256") == 0) { return NID_X9_62_prime256v1; } if (strcmp(name, "P-384") == 0) { return NID_secp384r1; } if (strcmp(name, "P-521") == 0) { return NID_secp521r1; } return NID_undef; } EC_POINT *EC_POINT_new(const EC_GROUP *group) { if (group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } EC_POINT *ret = reinterpret_cast(OPENSSL_malloc(sizeof *ret)); if (ret == NULL) { return NULL; } ret->group = EC_GROUP_dup(group); ec_GFp_simple_point_init(&ret->raw); return ret; } static void ec_point_free(EC_POINT *point, int free_group) { if (!point) { return; } if (free_group) { EC_GROUP_free(point->group); } OPENSSL_free(point); } void EC_POINT_free(EC_POINT *point) { ec_point_free(point, 1 /* free group */); } void EC_POINT_clear_free(EC_POINT *point) { EC_POINT_free(point); } int EC_POINT_copy(EC_POINT *dest, const EC_POINT *src) { if (EC_GROUP_cmp(dest->group, src->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } if (dest == src) { return 1; } ec_GFp_simple_point_copy(&dest->raw, &src->raw); return 1; } EC_POINT *EC_POINT_dup(const EC_POINT *a, const EC_GROUP *group) { if (a == NULL) { return NULL; } EC_POINT *ret = EC_POINT_new(group); if (ret == NULL || !EC_POINT_copy(ret, a)) { EC_POINT_free(ret); return NULL; } return ret; } int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } ec_GFp_simple_point_set_to_infinity(group, &point->raw); return 1; } int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *point) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_GFp_simple_is_at_infinity(group, &point->raw); } int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, BN_CTX *ctx) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_GFp_simple_is_on_curve(group, &point->raw); } int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx) { if (EC_GROUP_cmp(group, a->group, NULL) != 0 || EC_GROUP_cmp(group, b->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return -1; } // Note |EC_POINT_cmp| returns zero for equality and non-zero for inequality. return ec_GFp_simple_points_equal(group, &a->raw, &b->raw) ? 0 : 1; } int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx) { if (group->meth->point_get_affine_coordinates == 0) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } EC_FELEM x_felem, y_felem; if (!group->meth->point_get_affine_coordinates(group, &point->raw, x == NULL ? NULL : &x_felem, y == NULL ? NULL : &y_felem) || (x != NULL && !ec_felem_to_bignum(group, x, &x_felem)) || (y != NULL && !ec_felem_to_bignum(group, y, &y_felem))) { return 0; } return 1; } int EC_POINT_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx) { return EC_POINT_get_affine_coordinates_GFp(group, point, x, y, ctx); } void ec_affine_to_jacobian(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *p) { out->X = p->X; out->Y = p->Y; out->Z = *ec_felem_one(group); } int ec_jacobian_to_affine(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *p) { return group->meth->point_get_affine_coordinates(group, p, &out->X, &out->Y); } int ec_jacobian_to_affine_batch(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *in, size_t num) { if (group->meth->jacobian_to_affine_batch == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return group->meth->jacobian_to_affine_batch(group, out, in, num); } int ec_point_set_affine_coordinates(const EC_GROUP *group, EC_AFFINE *out, const EC_FELEM *x, const EC_FELEM *y) { void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; // Check if the point is on the curve. EC_FELEM lhs, rhs; felem_sqr(group, &lhs, y); // lhs = y^2 felem_sqr(group, &rhs, x); // rhs = x^2 ec_felem_add(group, &rhs, &rhs, &group->a); // rhs = x^2 + a felem_mul(group, &rhs, &rhs, x); // rhs = x^3 + ax ec_felem_add(group, &rhs, &rhs, &group->b); // rhs = x^3 + ax + b if (!ec_felem_equal(group, &lhs, &rhs)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); // In the event of an error, defend against the caller not checking the // return value by setting a known safe value. Note this may not be possible // if the caller is in the process of constructing an arbitrary group and // the generator is missing. if (group->has_order) { out->X = group->generator.raw.X; out->Y = group->generator.raw.Y; } return 0; } out->X = *x; out->Y = *y; return 1; } int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } if (x == NULL || y == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } EC_FELEM x_felem, y_felem; EC_AFFINE affine; if (!ec_bignum_to_felem(group, &x_felem, x) || !ec_bignum_to_felem(group, &y_felem, y) || !ec_point_set_affine_coordinates(group, &affine, &x_felem, &y_felem)) { // In the event of an error, defend against the caller not checking the // return value by setting a known safe value. ec_set_to_safe_point(group, &point->raw); return 0; } ec_affine_to_jacobian(group, &point->raw, &affine); return 1; } int EC_POINT_set_affine_coordinates(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { return EC_POINT_set_affine_coordinates_GFp(group, point, x, y, ctx); } int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx) { if (EC_GROUP_cmp(group, r->group, NULL) != 0 || EC_GROUP_cmp(group, a->group, NULL) != 0 || EC_GROUP_cmp(group, b->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } group->meth->add(group, &r->raw, &a->raw, &b->raw); return 1; } int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, BN_CTX *ctx) { if (EC_GROUP_cmp(group, r->group, NULL) != 0 || EC_GROUP_cmp(group, a->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } group->meth->dbl(group, &r->raw, &a->raw); return 1; } int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx) { if (EC_GROUP_cmp(group, a->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } ec_GFp_simple_invert(group, &a->raw); return 1; } static int arbitrary_bignum_to_scalar(const EC_GROUP *group, EC_SCALAR *out, const BIGNUM *in, BN_CTX *ctx) { if (ec_bignum_to_scalar(group, out, in)) { return 1; } ERR_clear_error(); // This is an unusual input, so we do not guarantee constant-time processing. BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); int ok = tmp != NULL && BN_nnmod(tmp, in, EC_GROUP_get0_order(group), ctx) && ec_bignum_to_scalar(group, out, tmp); BN_CTX_end(ctx); return ok; } int ec_point_mul_no_self_test(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { // Previously, this function set |r| to the point at infinity if there was // nothing to multiply. But, nobody should be calling this function with // nothing to multiply in the first place. if ((g_scalar == NULL && p_scalar == NULL) || (p == NULL) != (p_scalar == NULL)) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (EC_GROUP_cmp(group, r->group, NULL) != 0 || (p != NULL && EC_GROUP_cmp(group, p->group, NULL) != 0)) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } int ret = 0; BN_CTX *new_ctx = NULL; if (ctx == NULL) { new_ctx = BN_CTX_new(); if (new_ctx == NULL) { goto err; } ctx = new_ctx; } // If both |g_scalar| and |p_scalar| are non-NULL, // |ec_point_mul_scalar_public| would share the doublings between the two // products, which would be more efficient. However, we conservatively assume // the caller needs a constant-time operation. (ECDSA verification does not // use this function.) // // Previously, the low-level constant-time multiplication function aligned // with this function's calling convention, but this was misleading. Curves // which combined the two multiplications did not avoid the doubling case // in the incomplete addition formula and were not constant-time. if (g_scalar != NULL) { EC_SCALAR scalar; if (!arbitrary_bignum_to_scalar(group, &scalar, g_scalar, ctx) || !ec_point_mul_scalar_base(group, &r->raw, &scalar)) { goto err; } } if (p_scalar != NULL) { EC_SCALAR scalar; EC_JACOBIAN tmp; if (!arbitrary_bignum_to_scalar(group, &scalar, p_scalar, ctx) || !ec_point_mul_scalar(group, &tmp, &p->raw, &scalar)) { goto err; } if (g_scalar == NULL) { OPENSSL_memcpy(&r->raw, &tmp, sizeof(EC_JACOBIAN)); } else { group->meth->add(group, &r->raw, &r->raw, &tmp); } } ret = 1; err: BN_CTX_free(new_ctx); return ret; } int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { boringssl_ensure_ecc_self_test(); return ec_point_mul_no_self_test(group, r, g_scalar, p, p_scalar, ctx); } int ec_point_mul_scalar_public(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar) { if (g_scalar == NULL || p_scalar == NULL || p == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (group->meth->mul_public == NULL) { return group->meth->mul_public_batch(group, r, g_scalar, p, p_scalar, 1); } group->meth->mul_public(group, r, g_scalar, p, p_scalar); return 1; } int ec_point_mul_scalar_public_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *points, const EC_SCALAR *scalars, size_t num) { if (group->meth->mul_public_batch == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return group->meth->mul_public_batch(group, r, g_scalar, points, scalars, num); } int ec_point_mul_scalar(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar) { if (p == NULL || scalar == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } group->meth->mul(group, r, p, scalar); // Check the result is on the curve to defend against fault attacks or bugs. // This has negligible cost compared to the multiplication. if (!ec_GFp_simple_is_on_curve(group, r)) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int ec_point_mul_scalar_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar) { if (scalar == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } group->meth->mul_base(group, r, scalar); // Check the result is on the curve to defend against fault attacks or bugs. // This has negligible cost compared to the multiplication. This can only // happen on bug or CPU fault, so it okay to leak this. The alternative would // be to proceed with bad data. if (!constant_time_declassify_int(ec_GFp_simple_is_on_curve(group, r))) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int ec_point_mul_scalar_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2) { if (group->meth->mul_batch == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } group->meth->mul_batch(group, r, p0, scalar0, p1, scalar1, p2, scalar2); // Check the result is on the curve to defend against fault attacks or bugs. // This has negligible cost compared to the multiplication. if (!ec_GFp_simple_is_on_curve(group, r)) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int ec_init_precomp(const EC_GROUP *group, EC_PRECOMP *out, const EC_JACOBIAN *p) { if (group->meth->init_precomp == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return group->meth->init_precomp(group, out, p); } int ec_point_mul_scalar_precomp(const EC_GROUP *group, EC_JACOBIAN *r, const EC_PRECOMP *p0, const EC_SCALAR *scalar0, const EC_PRECOMP *p1, const EC_SCALAR *scalar1, const EC_PRECOMP *p2, const EC_SCALAR *scalar2) { if (group->meth->mul_precomp == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } group->meth->mul_precomp(group, r, p0, scalar0, p1, scalar1, p2, scalar2); // Check the result is on the curve to defend against fault attacks or bugs. // This has negligible cost compared to the multiplication. if (!ec_GFp_simple_is_on_curve(group, r)) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); return 0; } return 1; } void ec_point_select(const EC_GROUP *group, EC_JACOBIAN *out, BN_ULONG mask, const EC_JACOBIAN *a, const EC_JACOBIAN *b) { ec_felem_select(group, &out->X, mask, &a->X, &b->X); ec_felem_select(group, &out->Y, mask, &a->Y, &b->Y); ec_felem_select(group, &out->Z, mask, &a->Z, &b->Z); } void ec_affine_select(const EC_GROUP *group, EC_AFFINE *out, BN_ULONG mask, const EC_AFFINE *a, const EC_AFFINE *b) { ec_felem_select(group, &out->X, mask, &a->X, &b->X); ec_felem_select(group, &out->Y, mask, &a->Y, &b->Y); } void ec_precomp_select(const EC_GROUP *group, EC_PRECOMP *out, BN_ULONG mask, const EC_PRECOMP *a, const EC_PRECOMP *b) { static_assert(sizeof(out->comb) == sizeof(*out), "out->comb does not span the entire structure"); for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(out->comb); i++) { ec_affine_select(group, &out->comb[i], mask, &a->comb[i], &b->comb[i]); } } int ec_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r) { return group->meth->cmp_x_coordinate(group, p, r); } int ec_get_x_coordinate_as_scalar(const EC_GROUP *group, EC_SCALAR *out, const EC_JACOBIAN *p) { uint8_t bytes[EC_MAX_BYTES]; size_t len; if (!ec_get_x_coordinate_as_bytes(group, bytes, &len, sizeof(bytes), p)) { return 0; } // The x-coordinate is bounded by p, but we need a scalar, bounded by the // order. These may not have the same size. However, we must have p < 2×order, // assuming p is not tiny (p >= 17). // // Thus |bytes| will fit in |order.width + 1| words, and we can reduce by // performing at most one subtraction. // // Proof: We only work with prime order curves, so the number of points on // the curve is the order. Thus Hasse's theorem gives: // // |order - (p + 1)| <= 2×sqrt(p) // p + 1 - order <= 2×sqrt(p) // p + 1 - 2×sqrt(p) <= order // p + 1 - 2×(p/4) < order (p/4 > sqrt(p) for p >= 17) // p/2 < p/2 + 1 < order // p < 2×order // // Additionally, one can manually check this property for built-in curves. It // is enforced for legacy custom curves in |EC_GROUP_set_generator|. const BIGNUM *order = EC_GROUP_get0_order(group); BN_ULONG words[EC_MAX_WORDS + 1] = {0}; bn_big_endian_to_words(words, order->width + 1, bytes, len); bn_reduce_once(out->words, words, /*carry=*/words[order->width], order->d, order->width); return 1; } int ec_get_x_coordinate_as_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, size_t max_out, const EC_JACOBIAN *p) { size_t len = BN_num_bytes(&group->field.N); assert(len <= EC_MAX_BYTES); if (max_out < len) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } EC_FELEM x; if (!group->meth->point_get_affine_coordinates(group, p, &x, NULL)) { return 0; } ec_felem_to_bytes(group, out, out_len, &x); *out_len = len; return 1; } void ec_set_to_safe_point(const EC_GROUP *group, EC_JACOBIAN *out) { if (group->has_order) { ec_GFp_simple_point_copy(out, &group->generator.raw); } else { // The generator can be missing if the caller is in the process of // constructing an arbitrary group. In this case, we give up and use the // point at infinity. ec_GFp_simple_point_set_to_infinity(group, out); } } void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag) {} int EC_GROUP_get_asn1_flag(const EC_GROUP *group) { return OPENSSL_EC_NAMED_CURVE; } const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group) { // This function exists purely to give callers a way to call // |EC_METHOD_get_field_type|. cryptography.io crashes if |EC_GROUP_method_of| // returns NULL, so return some other garbage pointer. return (const EC_METHOD *)0x12340000; } int EC_METHOD_get_field_type(const EC_METHOD *meth) { return NID_X9_62_prime_field; } void EC_GROUP_set_point_conversion_form(EC_GROUP *group, point_conversion_form_t form) { if (form != POINT_CONVERSION_UNCOMPRESSED) { abort(); } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/ec_key.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../delocate.h" #include "../ecdsa/internal.h" #include "../service_indicator/internal.h" #include "internal.h" DEFINE_STATIC_EX_DATA_CLASS(g_ec_ex_data_class) static EC_WRAPPED_SCALAR *ec_wrapped_scalar_new(const EC_GROUP *group) { EC_WRAPPED_SCALAR *wrapped = reinterpret_cast( OPENSSL_zalloc(sizeof(EC_WRAPPED_SCALAR))); if (wrapped == NULL) { return NULL; } wrapped->bignum.d = wrapped->scalar.words; wrapped->bignum.width = group->order.N.width; wrapped->bignum.dmax = group->order.N.width; wrapped->bignum.flags = BN_FLG_STATIC_DATA; return wrapped; } static void ec_wrapped_scalar_free(EC_WRAPPED_SCALAR *scalar) { OPENSSL_free(scalar); } EC_KEY *EC_KEY_new(void) { return EC_KEY_new_method(NULL); } EC_KEY *EC_KEY_new_method(const ENGINE *engine) { EC_KEY *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(EC_KEY))); if (ret == NULL) { return NULL; } if (engine) { ret->ecdsa_meth = ENGINE_get_ECDSA_method(engine); } if (ret->ecdsa_meth) { METHOD_ref(ret->ecdsa_meth); } ret->conv_form = POINT_CONVERSION_UNCOMPRESSED; ret->references = 1; CRYPTO_new_ex_data(&ret->ex_data); if (ret->ecdsa_meth && ret->ecdsa_meth->init && !ret->ecdsa_meth->init(ret)) { CRYPTO_free_ex_data(g_ec_ex_data_class_bss_get(), ret, &ret->ex_data); if (ret->ecdsa_meth) { METHOD_unref(ret->ecdsa_meth); } OPENSSL_free(ret); return NULL; } return ret; } EC_KEY *EC_KEY_new_by_curve_name(int nid) { EC_KEY *ret = EC_KEY_new(); if (ret == NULL) { return NULL; } ret->group = EC_GROUP_new_by_curve_name(nid); if (ret->group == NULL) { EC_KEY_free(ret); return NULL; } return ret; } void EC_KEY_free(EC_KEY *r) { if (r == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&r->references)) { return; } if (r->ecdsa_meth) { if (r->ecdsa_meth->finish) { r->ecdsa_meth->finish(r); } METHOD_unref(r->ecdsa_meth); } CRYPTO_free_ex_data(g_ec_ex_data_class_bss_get(), r, &r->ex_data); EC_GROUP_free(r->group); EC_POINT_free(r->pub_key); ec_wrapped_scalar_free(r->priv_key); OPENSSL_free(r); } EC_KEY *EC_KEY_dup(const EC_KEY *src) { if (src == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } EC_KEY *ret = EC_KEY_new(); if (ret == NULL) { return NULL; } if ((src->group != NULL && !EC_KEY_set_group(ret, src->group)) || (src->pub_key != NULL && !EC_KEY_set_public_key(ret, src->pub_key)) || (src->priv_key != NULL && !EC_KEY_set_private_key(ret, EC_KEY_get0_private_key(src)))) { EC_KEY_free(ret); return NULL; } ret->enc_flag = src->enc_flag; ret->conv_form = src->conv_form; return ret; } int EC_KEY_up_ref(EC_KEY *r) { CRYPTO_refcount_inc(&r->references); return 1; } int EC_KEY_is_opaque(const EC_KEY *key) { return key->ecdsa_meth && (key->ecdsa_meth->flags & ECDSA_FLAG_OPAQUE); } const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key) { return key->group; } int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) { // If |key| already has a group, it is an error to switch to another one. if (key->group != NULL) { if (EC_GROUP_cmp(key->group, group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } return 1; } assert(key->priv_key == NULL); assert(key->pub_key == NULL); EC_GROUP_free(key->group); key->group = EC_GROUP_dup(group); return key->group != NULL; } const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key) { return key->priv_key != NULL ? &key->priv_key->bignum : NULL; } int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) { if (key->group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } EC_WRAPPED_SCALAR *scalar = ec_wrapped_scalar_new(key->group); if (scalar == NULL) { return 0; } if (!ec_bignum_to_scalar(key->group, &scalar->scalar, priv_key) || // Zero is not a valid private key, so it is safe to leak the result of // this comparison. constant_time_declassify_int( ec_scalar_is_zero(key->group, &scalar->scalar))) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_PRIVATE_KEY); ec_wrapped_scalar_free(scalar); return 0; } ec_wrapped_scalar_free(key->priv_key); key->priv_key = scalar; return 1; } const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key) { return key->pub_key; } int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub_key) { if (key->group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } if (pub_key != NULL && EC_GROUP_cmp(key->group, pub_key->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); return 0; } EC_POINT_free(key->pub_key); key->pub_key = EC_POINT_dup(pub_key, key->group); return (key->pub_key == NULL) ? 0 : 1; } unsigned int EC_KEY_get_enc_flags(const EC_KEY *key) { return key->enc_flag; } void EC_KEY_set_enc_flags(EC_KEY *key, unsigned int flags) { key->enc_flag = flags; } point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key) { return key->conv_form; } void EC_KEY_set_conv_form(EC_KEY *key, point_conversion_form_t cform) { key->conv_form = cform; } int EC_KEY_check_key(const EC_KEY *eckey) { if (!eckey || !eckey->group || !eckey->pub_key) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (EC_POINT_is_at_infinity(eckey->group, eckey->pub_key)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } // Test whether the public key is on the elliptic curve. if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, NULL)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); return 0; } // Check the public and private keys match. // // NOTE: this is a FIPS pair-wise consistency check for the ECDH case. See SP // 800-56Ar3, page 36. if (eckey->priv_key != NULL) { EC_JACOBIAN point; if (!ec_point_mul_scalar_base(eckey->group, &point, &eckey->priv_key->scalar)) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); return 0; } // Leaking this comparison only leaks whether |eckey|'s public key was // correct. if (!constant_time_declassify_int(ec_GFp_simple_points_equal( eckey->group, &point, &eckey->pub_key->raw))) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_PRIVATE_KEY); return 0; } } return 1; } int EC_KEY_check_fips(const EC_KEY *key) { int ret = 0; FIPS_service_indicator_lock_state(); if (!EC_KEY_check_key(key)) { goto end; } if (key->priv_key) { uint8_t digest[BCM_SHA256_DIGEST_LENGTH] = {0}; uint8_t sig[ECDSA_MAX_FIXED_LEN]; size_t sig_len; if (!ecdsa_sign_fixed(digest, sizeof(digest), sig, &sig_len, sizeof(sig), key)) { goto end; } if (boringssl_fips_break_test("ECDSA_PWCT")) { digest[0] = ~digest[0]; } if (!ecdsa_verify_fixed(digest, sizeof(digest), sig, sig_len, key)) { OPENSSL_PUT_ERROR(EC, EC_R_PUBLIC_KEY_VALIDATION_FAILED); goto end; } } ret = 1; end: FIPS_service_indicator_unlock_state(); if (ret) { EC_KEY_keygen_verify_service_indicator(key); } return ret; } int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, const BIGNUM *x, const BIGNUM *y) { EC_POINT *point = NULL; int ok = 0; if (!key || !key->group || !x || !y) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } point = EC_POINT_new(key->group); if (point == NULL || !EC_POINT_set_affine_coordinates_GFp(key->group, point, x, y, NULL) || !EC_KEY_set_public_key(key, point) || !EC_KEY_check_key(key)) { goto err; } ok = 1; err: EC_POINT_free(point); return ok; } int EC_KEY_oct2key(EC_KEY *key, const uint8_t *in, size_t len, BN_CTX *ctx) { if (key->group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } EC_POINT *point = EC_POINT_new(key->group); int ok = point != NULL && EC_POINT_oct2point(key->group, point, in, len, ctx) && EC_KEY_set_public_key(key, point); EC_POINT_free(point); return ok; } size_t EC_KEY_key2buf(const EC_KEY *key, point_conversion_form_t form, uint8_t **out_buf, BN_CTX *ctx) { if (key == NULL || key->pub_key == NULL || key->group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } return EC_POINT_point2buf(key->group, key->pub_key, form, out_buf, ctx); } int EC_KEY_oct2priv(EC_KEY *key, const uint8_t *in, size_t len) { if (key->group == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } if (len != BN_num_bytes(EC_GROUP_get0_order(key->group))) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return 0; } BIGNUM *priv_key = BN_bin2bn(in, len, NULL); int ok = priv_key != NULL && // EC_KEY_set_private_key(key, priv_key); BN_free(priv_key); return ok; } size_t EC_KEY_priv2oct(const EC_KEY *key, uint8_t *out, size_t max_out) { if (key->group == NULL || key->priv_key == NULL) { OPENSSL_PUT_ERROR(EC, EC_R_MISSING_PARAMETERS); return 0; } size_t len = BN_num_bytes(EC_GROUP_get0_order(key->group)); if (out == NULL) { return len; } if (max_out < len) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } size_t bytes_written; ec_scalar_to_bytes(key->group, out, &bytes_written, &key->priv_key->scalar); assert(bytes_written == len); return len; } size_t EC_KEY_priv2buf(const EC_KEY *key, uint8_t **out_buf) { *out_buf = NULL; size_t len = EC_KEY_priv2oct(key, NULL, 0); if (len == 0) { return 0; } uint8_t *buf = reinterpret_cast(OPENSSL_malloc(len)); if (buf == NULL) { return 0; } len = EC_KEY_priv2oct(key, buf, len); if (len == 0) { OPENSSL_free(buf); return 0; } *out_buf = buf; return len; } int EC_KEY_generate_key(EC_KEY *key) { if (key == NULL || key->group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } // Check that the group order is FIPS compliant (FIPS 186-4 B.4.2). if (EC_GROUP_order_bits(key->group) < 160) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); return 0; } static const uint8_t kDefaultAdditionalData[32] = {0}; EC_WRAPPED_SCALAR *priv_key = ec_wrapped_scalar_new(key->group); EC_POINT *pub_key = EC_POINT_new(key->group); if (priv_key == NULL || pub_key == NULL || // Generate the private key by testing candidates (FIPS 186-4 B.4.2). !ec_random_nonzero_scalar(key->group, &priv_key->scalar, kDefaultAdditionalData) || !ec_point_mul_scalar_base(key->group, &pub_key->raw, &priv_key->scalar)) { EC_POINT_free(pub_key); ec_wrapped_scalar_free(priv_key); return 0; } // The public key is derived from the private key, but it is public. // // TODO(crbug.com/boringssl/677): This isn't quite right. While |pub_key| // represents a public point, it is still in Jacobian form and the exact // Jacobian representation is secret. We need to make it affine first. See // discussion in the bug. CONSTTIME_DECLASSIFY(&pub_key->raw, sizeof(pub_key->raw)); ec_wrapped_scalar_free(key->priv_key); key->priv_key = priv_key; EC_POINT_free(key->pub_key); key->pub_key = pub_key; return 1; } int EC_KEY_generate_key_fips(EC_KEY *eckey) { if (eckey == NULL || eckey->group == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } boringssl_ensure_ecc_self_test(); if (EC_KEY_generate_key(eckey) && EC_KEY_check_fips(eckey)) { return 1; } EC_POINT_free(eckey->pub_key); ec_wrapped_scalar_free(eckey->priv_key); eckey->pub_key = NULL; eckey->priv_key = NULL; return 0; } int EC_KEY_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(g_ec_ex_data_class_bss_get(), argl, argp, free_func); } int EC_KEY_set_ex_data(EC_KEY *d, int idx, void *arg) { return CRYPTO_set_ex_data(&d->ex_data, idx, arg); } void *EC_KEY_get_ex_data(const EC_KEY *d, int idx) { return CRYPTO_get_ex_data(&d->ex_data, idx); } void EC_KEY_set_asn1_flag(EC_KEY *key, int flag) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/ec_montgomery.cc.inc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../bn/internal.h" #include "../delocate.h" #include "internal.h" static void ec_GFp_mont_felem_to_montgomery(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *in) { bn_to_montgomery_small(out->words, in->words, group->field.N.width, &group->field); } static void ec_GFp_mont_felem_from_montgomery(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *in) { bn_from_montgomery_small(out->words, group->field.N.width, in->words, group->field.N.width, &group->field); } static void ec_GFp_mont_felem_inv0(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a) { bn_mod_inverse0_prime_mont_small(out->words, a->words, group->field.N.width, &group->field); } void ec_GFp_mont_felem_mul(const EC_GROUP *group, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) { bn_mod_mul_montgomery_small(r->words, a->words, b->words, group->field.N.width, &group->field); } void ec_GFp_mont_felem_sqr(const EC_GROUP *group, EC_FELEM *r, const EC_FELEM *a) { bn_mod_mul_montgomery_small(r->words, a->words, a->words, group->field.N.width, &group->field); } void ec_GFp_mont_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in) { EC_FELEM tmp; ec_GFp_mont_felem_from_montgomery(group, &tmp, in); ec_GFp_simple_felem_to_bytes(group, out, out_len, &tmp); } int ec_GFp_mont_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len) { if (!ec_GFp_simple_felem_from_bytes(group, out, in, len)) { return 0; } ec_GFp_mont_felem_to_montgomery(group, out, out); return 1; } void ec_GFp_mont_felem_reduce(const EC_GROUP *group, EC_FELEM *out, const BN_ULONG *words, size_t num) { // Convert "from" Montgomery form so the value is reduced mod p. bn_from_montgomery_small(out->words, group->field.N.width, words, num, &group->field); // Convert "to" Montgomery form to remove the R^-1 factor added. ec_GFp_mont_felem_to_montgomery(group, out, out); // Convert to Montgomery form to match this implementation's representation. ec_GFp_mont_felem_to_montgomery(group, out, out); } void ec_GFp_mont_felem_exp(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const BN_ULONG *exp, size_t num_exp) { bn_mod_exp_mont_small(out->words, a->words, group->field.N.width, exp, num_exp, &group->field); } static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, const EC_JACOBIAN *point, EC_FELEM *x, EC_FELEM *y) { if (constant_time_declassify_int( ec_GFp_simple_is_at_infinity(group, point))) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } // Transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3). Note the check above // ensures |point->Z| is non-zero, so the inverse always exists. EC_FELEM z1, z2; ec_GFp_mont_felem_inv0(group, &z2, &point->Z); ec_GFp_mont_felem_sqr(group, &z1, &z2); if (x != NULL) { ec_GFp_mont_felem_mul(group, x, &point->X, &z1); } if (y != NULL) { ec_GFp_mont_felem_mul(group, &z1, &z1, &z2); ec_GFp_mont_felem_mul(group, y, &point->Y, &z1); } return 1; } static int ec_GFp_mont_jacobian_to_affine_batch(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *in, size_t num) { if (num == 0) { return 1; } // Compute prefix products of all Zs. Use |out[i].X| as scratch space // to store these values. out[0].X = in[0].Z; for (size_t i = 1; i < num; i++) { ec_GFp_mont_felem_mul(group, &out[i].X, &out[i - 1].X, &in[i].Z); } // Some input was infinity iff the product of all Zs is zero. if (ec_felem_non_zero_mask(group, &out[num - 1].X) == 0) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } // Invert the product of all Zs. EC_FELEM zinvprod; ec_GFp_mont_felem_inv0(group, &zinvprod, &out[num - 1].X); for (size_t i = num - 1; i < num; i--) { // Our loop invariant is that |zinvprod| is Z0^-1 * Z1^-1 * ... * Zi^-1. // Recover Zi^-1 by multiplying by the previous product. EC_FELEM zinv, zinv2; if (i == 0) { zinv = zinvprod; } else { ec_GFp_mont_felem_mul(group, &zinv, &zinvprod, &out[i - 1].X); // Maintain the loop invariant for the next iteration. ec_GFp_mont_felem_mul(group, &zinvprod, &zinvprod, &in[i].Z); } // Compute affine coordinates: x = X * Z^-2 and y = Y * Z^-3. ec_GFp_mont_felem_sqr(group, &zinv2, &zinv); ec_GFp_mont_felem_mul(group, &out[i].X, &in[i].X, &zinv2); ec_GFp_mont_felem_mul(group, &out[i].Y, &in[i].Y, &zinv2); ec_GFp_mont_felem_mul(group, &out[i].Y, &out[i].Y, &zinv); } return 1; } void ec_GFp_mont_add(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN *a, const EC_JACOBIAN *b) { if (a == b) { ec_GFp_mont_dbl(group, out, a); return; } // The method is taken from: // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-add-2007-bl // // Coq transcription and correctness proof: // // EC_FELEM x_out, y_out, z_out; BN_ULONG z1nz = ec_felem_non_zero_mask(group, &a->Z); BN_ULONG z2nz = ec_felem_non_zero_mask(group, &b->Z); // z1z1 = z1z1 = z1**2 EC_FELEM z1z1; ec_GFp_mont_felem_sqr(group, &z1z1, &a->Z); // z2z2 = z2**2 EC_FELEM z2z2; ec_GFp_mont_felem_sqr(group, &z2z2, &b->Z); // u1 = x1*z2z2 EC_FELEM u1; ec_GFp_mont_felem_mul(group, &u1, &a->X, &z2z2); // two_z1z2 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 EC_FELEM two_z1z2; ec_felem_add(group, &two_z1z2, &a->Z, &b->Z); ec_GFp_mont_felem_sqr(group, &two_z1z2, &two_z1z2); ec_felem_sub(group, &two_z1z2, &two_z1z2, &z1z1); ec_felem_sub(group, &two_z1z2, &two_z1z2, &z2z2); // s1 = y1 * z2**3 EC_FELEM s1; ec_GFp_mont_felem_mul(group, &s1, &b->Z, &z2z2); ec_GFp_mont_felem_mul(group, &s1, &s1, &a->Y); // u2 = x2*z1z1 EC_FELEM u2; ec_GFp_mont_felem_mul(group, &u2, &b->X, &z1z1); // h = u2 - u1 EC_FELEM h; ec_felem_sub(group, &h, &u2, &u1); BN_ULONG xneq = ec_felem_non_zero_mask(group, &h); // z_out = two_z1z2 * h ec_GFp_mont_felem_mul(group, &z_out, &h, &two_z1z2); // z1z1z1 = z1 * z1z1 EC_FELEM z1z1z1; ec_GFp_mont_felem_mul(group, &z1z1z1, &a->Z, &z1z1); // s2 = y2 * z1**3 EC_FELEM s2; ec_GFp_mont_felem_mul(group, &s2, &b->Y, &z1z1z1); // r = (s2 - s1)*2 EC_FELEM r; ec_felem_sub(group, &r, &s2, &s1); ec_felem_add(group, &r, &r, &r); BN_ULONG yneq = ec_felem_non_zero_mask(group, &r); // This case will never occur in the constant-time |ec_GFp_mont_mul|. BN_ULONG is_nontrivial_double = ~xneq & ~yneq & z1nz & z2nz; if (constant_time_declassify_w(is_nontrivial_double)) { ec_GFp_mont_dbl(group, out, a); return; } // I = (2h)**2 EC_FELEM i; ec_felem_add(group, &i, &h, &h); ec_GFp_mont_felem_sqr(group, &i, &i); // J = h * I EC_FELEM j; ec_GFp_mont_felem_mul(group, &j, &h, &i); // V = U1 * I EC_FELEM v; ec_GFp_mont_felem_mul(group, &v, &u1, &i); // x_out = r**2 - J - 2V ec_GFp_mont_felem_sqr(group, &x_out, &r); ec_felem_sub(group, &x_out, &x_out, &j); ec_felem_sub(group, &x_out, &x_out, &v); ec_felem_sub(group, &x_out, &x_out, &v); // y_out = r(V-x_out) - 2 * s1 * J ec_felem_sub(group, &y_out, &v, &x_out); ec_GFp_mont_felem_mul(group, &y_out, &y_out, &r); EC_FELEM s1j; ec_GFp_mont_felem_mul(group, &s1j, &s1, &j); ec_felem_sub(group, &y_out, &y_out, &s1j); ec_felem_sub(group, &y_out, &y_out, &s1j); ec_felem_select(group, &x_out, z1nz, &x_out, &b->X); ec_felem_select(group, &out->X, z2nz, &x_out, &a->X); ec_felem_select(group, &y_out, z1nz, &y_out, &b->Y); ec_felem_select(group, &out->Y, z2nz, &y_out, &a->Y); ec_felem_select(group, &z_out, z1nz, &z_out, &b->Z); ec_felem_select(group, &out->Z, z2nz, &z_out, &a->Z); } void ec_GFp_mont_dbl(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a) { if (group->a_is_minus3) { // The method is taken from: // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b // // Coq transcription and correctness proof: // // EC_FELEM delta, gamma, beta, ftmp, ftmp2, tmptmp, alpha, fourbeta; // delta = z^2 ec_GFp_mont_felem_sqr(group, &delta, &a->Z); // gamma = y^2 ec_GFp_mont_felem_sqr(group, &gamma, &a->Y); // beta = x*gamma ec_GFp_mont_felem_mul(group, &beta, &a->X, &gamma); // alpha = 3*(x-delta)*(x+delta) ec_felem_sub(group, &ftmp, &a->X, &delta); ec_felem_add(group, &ftmp2, &a->X, &delta); ec_felem_add(group, &tmptmp, &ftmp2, &ftmp2); ec_felem_add(group, &ftmp2, &ftmp2, &tmptmp); ec_GFp_mont_felem_mul(group, &alpha, &ftmp, &ftmp2); // x' = alpha^2 - 8*beta ec_GFp_mont_felem_sqr(group, &r->X, &alpha); ec_felem_add(group, &fourbeta, &beta, &beta); ec_felem_add(group, &fourbeta, &fourbeta, &fourbeta); ec_felem_add(group, &tmptmp, &fourbeta, &fourbeta); ec_felem_sub(group, &r->X, &r->X, &tmptmp); // z' = (y + z)^2 - gamma - delta ec_felem_add(group, &delta, &gamma, &delta); ec_felem_add(group, &ftmp, &a->Y, &a->Z); ec_GFp_mont_felem_sqr(group, &r->Z, &ftmp); ec_felem_sub(group, &r->Z, &r->Z, &delta); // y' = alpha*(4*beta - x') - 8*gamma^2 ec_felem_sub(group, &r->Y, &fourbeta, &r->X); ec_felem_add(group, &gamma, &gamma, &gamma); ec_GFp_mont_felem_sqr(group, &gamma, &gamma); ec_GFp_mont_felem_mul(group, &r->Y, &alpha, &r->Y); ec_felem_add(group, &gamma, &gamma, &gamma); ec_felem_sub(group, &r->Y, &r->Y, &gamma); } else { // The method is taken from: // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl // // Coq transcription and correctness proof: // // EC_FELEM xx, yy, yyyy, zz; ec_GFp_mont_felem_sqr(group, &xx, &a->X); ec_GFp_mont_felem_sqr(group, &yy, &a->Y); ec_GFp_mont_felem_sqr(group, &yyyy, &yy); ec_GFp_mont_felem_sqr(group, &zz, &a->Z); // s = 2*((x_in + yy)^2 - xx - yyyy) EC_FELEM s; ec_felem_add(group, &s, &a->X, &yy); ec_GFp_mont_felem_sqr(group, &s, &s); ec_felem_sub(group, &s, &s, &xx); ec_felem_sub(group, &s, &s, &yyyy); ec_felem_add(group, &s, &s, &s); // m = 3*xx + a*zz^2 EC_FELEM m; ec_GFp_mont_felem_sqr(group, &m, &zz); ec_GFp_mont_felem_mul(group, &m, &group->a, &m); ec_felem_add(group, &m, &m, &xx); ec_felem_add(group, &m, &m, &xx); ec_felem_add(group, &m, &m, &xx); // x_out = m^2 - 2*s ec_GFp_mont_felem_sqr(group, &r->X, &m); ec_felem_sub(group, &r->X, &r->X, &s); ec_felem_sub(group, &r->X, &r->X, &s); // z_out = (y_in + z_in)^2 - yy - zz ec_felem_add(group, &r->Z, &a->Y, &a->Z); ec_GFp_mont_felem_sqr(group, &r->Z, &r->Z); ec_felem_sub(group, &r->Z, &r->Z, &yy); ec_felem_sub(group, &r->Z, &r->Z, &zz); // y_out = m*(s-x_out) - 8*yyyy ec_felem_add(group, &yyyy, &yyyy, &yyyy); ec_felem_add(group, &yyyy, &yyyy, &yyyy); ec_felem_add(group, &yyyy, &yyyy, &yyyy); ec_felem_sub(group, &r->Y, &s, &r->X); ec_GFp_mont_felem_mul(group, &r->Y, &r->Y, &m); ec_felem_sub(group, &r->Y, &r->Y, &yyyy); } } static int ec_GFp_mont_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r) { if (!group->field_greater_than_order || group->field.N.width != group->order.N.width) { // Do not bother optimizing this case. p > order in all commonly-used // curves. return ec_GFp_simple_cmp_x_coordinate(group, p, r); } if (ec_GFp_simple_is_at_infinity(group, p)) { return 0; } // We wish to compare X/Z^2 with r. This is equivalent to comparing X with // r*Z^2. Note that X and Z are represented in Montgomery form, while r is // not. EC_FELEM r_Z2, Z2_mont, X; ec_GFp_mont_felem_mul(group, &Z2_mont, &p->Z, &p->Z); // r < order < p, so this is valid. OPENSSL_memcpy(r_Z2.words, r->words, group->field.N.width * sizeof(BN_ULONG)); ec_GFp_mont_felem_mul(group, &r_Z2, &r_Z2, &Z2_mont); ec_GFp_mont_felem_from_montgomery(group, &X, &p->X); if (ec_felem_equal(group, &r_Z2, &X)) { return 1; } // During signing the x coefficient is reduced modulo the group order. // Therefore there is a small possibility, less than 1/2^128, that group_order // < p.x < P. in that case we need not only to compare against |r| but also to // compare against r+group_order. BN_ULONG carry = bn_add_words(r_Z2.words, r->words, group->order.N.d, group->field.N.width); if (carry == 0 && bn_less_than_words(r_Z2.words, group->field.N.d, group->field.N.width)) { // r + group_order < p, so compare (r + group_order) * Z^2 against X. ec_GFp_mont_felem_mul(group, &r_Z2, &r_Z2, &Z2_mont); if (ec_felem_equal(group, &r_Z2, &X)) { return 1; } } return 0; } DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_mont_method) { out->point_get_affine_coordinates = ec_GFp_mont_point_get_affine_coordinates; out->jacobian_to_affine_batch = ec_GFp_mont_jacobian_to_affine_batch; out->add = ec_GFp_mont_add; out->dbl = ec_GFp_mont_dbl; out->mul = ec_GFp_mont_mul; out->mul_base = ec_GFp_mont_mul_base; out->mul_batch = ec_GFp_mont_mul_batch; out->mul_public_batch = ec_GFp_mont_mul_public_batch; out->init_precomp = ec_GFp_mont_init_precomp; out->mul_precomp = ec_GFp_mont_mul_precomp; out->felem_mul = ec_GFp_mont_felem_mul; out->felem_sqr = ec_GFp_mont_felem_sqr; out->felem_to_bytes = ec_GFp_mont_felem_to_bytes; out->felem_from_bytes = ec_GFp_mont_felem_from_bytes; out->felem_reduce = ec_GFp_mont_felem_reduce; out->felem_exp = ec_GFp_mont_felem_exp; out->scalar_inv0_montgomery = ec_simple_scalar_inv0_montgomery; out->scalar_to_montgomery_inv_vartime = ec_simple_scalar_to_montgomery_inv_vartime; out->cmp_x_coordinate = ec_GFp_mont_cmp_x_coordinate; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/felem.cc.inc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "internal.h" #include "../bn/internal.h" #include "../../internal.h" const EC_FELEM *ec_felem_one(const EC_GROUP *group) { // We reuse generator.Z as a cache for 1 in the field. return &group->generator.raw.Z; } int ec_bignum_to_felem(const EC_GROUP *group, EC_FELEM *out, const BIGNUM *in) { uint8_t bytes[EC_MAX_BYTES]; size_t len = BN_num_bytes(&group->field.N); assert(sizeof(bytes) >= len); if (BN_is_negative(in) || BN_cmp(in, &group->field.N) >= 0 || !BN_bn2bin_padded(bytes, len, in)) { OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE); return 0; } return ec_felem_from_bytes(group, out, bytes, len); } int ec_felem_to_bignum(const EC_GROUP *group, BIGNUM *out, const EC_FELEM *in) { uint8_t bytes[EC_MAX_BYTES]; size_t len; ec_felem_to_bytes(group, bytes, &len, in); return BN_bin2bn(bytes, len, out) != NULL; } void ec_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in) { group->meth->felem_to_bytes(group, out, out_len, in); } int ec_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len) { return group->meth->felem_from_bytes(group, out, in, len); } void ec_felem_neg(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a) { // -a is zero if a is zero and p-a otherwise. BN_ULONG mask = ec_felem_non_zero_mask(group, a); BN_ULONG borrow = bn_sub_words(out->words, group->field.N.d, a->words, group->field.N.width); assert(borrow == 0); (void)borrow; for (int i = 0; i < group->field.N.width; i++) { out->words[i] &= mask; } } void ec_felem_add(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const EC_FELEM *b) { EC_FELEM tmp; bn_mod_add_words(out->words, a->words, b->words, group->field.N.d, tmp.words, group->field.N.width); } void ec_felem_sub(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const EC_FELEM *b) { EC_FELEM tmp; bn_mod_sub_words(out->words, a->words, b->words, group->field.N.d, tmp.words, group->field.N.width); } BN_ULONG ec_felem_non_zero_mask(const EC_GROUP *group, const EC_FELEM *a) { BN_ULONG mask = 0; for (int i = 0; i < group->field.N.width; i++) { mask |= a->words[i]; } return ~constant_time_is_zero_w(mask); } void ec_felem_select(const EC_GROUP *group, EC_FELEM *out, BN_ULONG mask, const EC_FELEM *a, const EC_FELEM *b) { bn_select_words(out->words, mask, a->words, b->words, group->field.N.width); } int ec_felem_equal(const EC_GROUP *group, const EC_FELEM *a, const EC_FELEM *b) { return CRYPTO_memcmp(a->words, b->words, group->field.N.width * sizeof(BN_ULONG)) == 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/internal.h ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EC_INTERNAL_H #define OPENSSL_HEADER_EC_INTERNAL_H #include #include #include #include #include #include "../bn/internal.h" #if defined(__cplusplus) extern "C" { #endif // EC internals. // Cap the size of all field elements and scalars, including custom curves, to // 66 bytes, large enough to fit secp521r1 and brainpoolP512r1, which appear to // be the largest fields anyone plausibly uses. #define EC_MAX_BYTES 66 #define EC_MAX_WORDS ((EC_MAX_BYTES + BN_BYTES - 1) / BN_BYTES) #define EC_MAX_COMPRESSED (EC_MAX_BYTES + 1) #define EC_MAX_UNCOMPRESSED (2 * EC_MAX_BYTES + 1) static_assert(EC_MAX_WORDS <= BN_SMALL_MAX_WORDS, "bn_*_small functions not usable"); // Scalars. // An EC_SCALAR is an integer fully reduced modulo the order. Only the first // |order->width| words are used. An |EC_SCALAR| is specific to an |EC_GROUP| // and must not be mixed between groups. typedef struct { BN_ULONG words[EC_MAX_WORDS]; } EC_SCALAR; // ec_bignum_to_scalar converts |in| to an |EC_SCALAR| and writes it to // |*out|. It returns one on success and zero if |in| is out of range. OPENSSL_EXPORT int ec_bignum_to_scalar(const EC_GROUP *group, EC_SCALAR *out, const BIGNUM *in); // ec_scalar_to_bytes serializes |in| as a big-endian bytestring to |out| and // sets |*out_len| to the number of bytes written. The number of bytes written // is |BN_num_bytes(&group->order)|, which is at most |EC_MAX_BYTES|. OPENSSL_EXPORT void ec_scalar_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_SCALAR *in); // ec_scalar_from_bytes deserializes |in| and stores the resulting scalar over // group |group| to |out|. It returns one on success and zero if |in| is // invalid. OPENSSL_EXPORT int ec_scalar_from_bytes(const EC_GROUP *group, EC_SCALAR *out, const uint8_t *in, size_t len); // ec_scalar_reduce sets |out| to |words|, reduced modulo the group order. // |words| must be less than order^2. |num| must be at most twice the width of // group order. This function treats |words| as secret. void ec_scalar_reduce(const EC_GROUP *group, EC_SCALAR *out, const BN_ULONG *words, size_t num); // ec_random_nonzero_scalar sets |out| to a uniformly selected random value from // zero to |group->order| - 1. It returns one on success and zero on error. int ec_random_scalar(const EC_GROUP *group, EC_SCALAR *out, const uint8_t additional_data[32]); // ec_random_nonzero_scalar sets |out| to a uniformly selected random value from // 1 to |group->order| - 1. It returns one on success and zero on error. int ec_random_nonzero_scalar(const EC_GROUP *group, EC_SCALAR *out, const uint8_t additional_data[32]); // ec_scalar_equal_vartime returns one if |a| and |b| are equal and zero // otherwise. Both values are treated as public. int ec_scalar_equal_vartime(const EC_GROUP *group, const EC_SCALAR *a, const EC_SCALAR *b); // ec_scalar_is_zero returns one if |a| is zero and zero otherwise. int ec_scalar_is_zero(const EC_GROUP *group, const EC_SCALAR *a); // ec_scalar_add sets |r| to |a| + |b|. void ec_scalar_add(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b); // ec_scalar_sub sets |r| to |a| - |b|. void ec_scalar_sub(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b); // ec_scalar_neg sets |r| to -|a|. void ec_scalar_neg(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); // ec_scalar_to_montgomery sets |r| to |a| in Montgomery form. void ec_scalar_to_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); // ec_scalar_to_montgomery sets |r| to |a| converted from Montgomery form. void ec_scalar_from_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); // ec_scalar_mul_montgomery sets |r| to |a| * |b| where inputs and outputs are // in Montgomery form. void ec_scalar_mul_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b); // ec_scalar_inv0_montgomery sets |r| to |a|^-1 where inputs and outputs are in // Montgomery form. If |a| is zero, |r| is set to zero. void ec_scalar_inv0_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); // ec_scalar_to_montgomery_inv_vartime sets |r| to |a|^-1 R. That is, it takes // in |a| not in Montgomery form and computes the inverse in Montgomery form. It // returns one on success and zero if |a| has no inverse. This function assumes // |a| is public and may leak information about it via timing. // // Note this is not the same operation as |ec_scalar_inv0_montgomery|. int ec_scalar_to_montgomery_inv_vartime(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); // ec_scalar_select, in constant time, sets |out| to |a| if |mask| is all ones // and |b| if |mask| is all zeros. void ec_scalar_select(const EC_GROUP *group, EC_SCALAR *out, BN_ULONG mask, const EC_SCALAR *a, const EC_SCALAR *b); // Field elements. // An EC_FELEM represents a field element. Only the first |field->width| words // are used. An |EC_FELEM| is specific to an |EC_GROUP| and must not be mixed // between groups. Additionally, the representation (whether or not elements are // represented in Montgomery-form) may vary between |EC_METHOD|s. typedef struct { BN_ULONG words[EC_MAX_WORDS]; } EC_FELEM; // ec_felem_one returns one in |group|'s field. const EC_FELEM *ec_felem_one(const EC_GROUP *group); // ec_bignum_to_felem converts |in| to an |EC_FELEM|. It returns one on success // and zero if |in| is out of range. int ec_bignum_to_felem(const EC_GROUP *group, EC_FELEM *out, const BIGNUM *in); // ec_felem_to_bignum converts |in| to a |BIGNUM|. It returns one on success and // zero on allocation failure. int ec_felem_to_bignum(const EC_GROUP *group, BIGNUM *out, const EC_FELEM *in); // ec_felem_to_bytes serializes |in| as a big-endian bytestring to |out| and // sets |*out_len| to the number of bytes written. The number of bytes written // is |BN_num_bytes(&group->order)|, which is at most |EC_MAX_BYTES|. void ec_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in); // ec_felem_from_bytes deserializes |in| and stores the resulting field element // to |out|. It returns one on success and zero if |in| is invalid. int ec_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len); // ec_felem_neg sets |out| to -|a|. void ec_felem_neg(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a); // ec_felem_add sets |out| to |a| + |b|. void ec_felem_add(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const EC_FELEM *b); // ec_felem_add sets |out| to |a| - |b|. void ec_felem_sub(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const EC_FELEM *b); // ec_felem_non_zero_mask returns all ones if |a| is non-zero and all zeros // otherwise. BN_ULONG ec_felem_non_zero_mask(const EC_GROUP *group, const EC_FELEM *a); // ec_felem_select, in constant time, sets |out| to |a| if |mask| is all ones // and |b| if |mask| is all zeros. void ec_felem_select(const EC_GROUP *group, EC_FELEM *out, BN_ULONG mask, const EC_FELEM *a, const EC_FELEM *b); // ec_felem_equal returns one if |a| and |b| are equal and zero otherwise. int ec_felem_equal(const EC_GROUP *group, const EC_FELEM *a, const EC_FELEM *b); // Points. // // Points may represented in affine coordinates as |EC_AFFINE| or Jacobian // coordinates as |EC_JACOBIAN|. Affine coordinates directly represent a // point on the curve, but point addition over affine coordinates requires // costly field inversions, so arithmetic is done in Jacobian coordinates. // Converting from affine to Jacobian is cheap, while converting from Jacobian // to affine costs a field inversion. (Jacobian coordinates amortize the field // inversions needed in a sequence of point operations.) // An EC_JACOBIAN represents an elliptic curve point in Jacobian coordinates. // Unlike |EC_POINT|, it is a plain struct which can be stack-allocated and // needs no cleanup. It is specific to an |EC_GROUP| and must not be mixed // between groups. typedef struct { // X, Y, and Z are Jacobian projective coordinates. They represent // (X/Z^2, Y/Z^3) if Z != 0 and the point at infinity otherwise. EC_FELEM X, Y, Z; } EC_JACOBIAN; // An EC_AFFINE represents an elliptic curve point in affine coordinates. // coordinates. Note the point at infinity cannot be represented in affine // coordinates. typedef struct { EC_FELEM X, Y; } EC_AFFINE; // ec_affine_to_jacobian converts |p| to Jacobian form and writes the result to // |*out|. This operation is very cheap and only costs a few copies. void ec_affine_to_jacobian(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *p); // ec_jacobian_to_affine converts |p| to affine form and writes the result to // |*out|. It returns one on success and zero if |p| was the point at infinity. // This operation performs a field inversion and should only be done once per // point. // // If only extracting the x-coordinate, use |ec_get_x_coordinate_*| which is // slightly faster. OPENSSL_EXPORT int ec_jacobian_to_affine(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *p); // ec_jacobian_to_affine_batch converts |num| points in |in| from Jacobian // coordinates to affine coordinates and writes the results to |out|. It returns // one on success and zero if any of the input points were infinity. // // This function is not implemented for all curves. Add implementations as // needed. int ec_jacobian_to_affine_batch(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *in, size_t num); // ec_point_set_affine_coordinates sets |out|'s to a point with affine // coordinates |x| and |y|. It returns one if the point is on the curve and // zero otherwise. If the point is not on the curve, the value of |out| is // undefined. int ec_point_set_affine_coordinates(const EC_GROUP *group, EC_AFFINE *out, const EC_FELEM *x, const EC_FELEM *y); // ec_point_mul_no_self_test does the same as |EC_POINT_mul|, but doesn't try to // run the self-test first. This is for use in the self tests themselves, to // prevent an infinite loop. int ec_point_mul_no_self_test(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx); // ec_point_mul_scalar sets |r| to |p| * |scalar|. Both inputs are considered // secret. int ec_point_mul_scalar(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar); // ec_point_mul_scalar_base sets |r| to generator * |scalar|. |scalar| is // treated as secret. int ec_point_mul_scalar_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar); // ec_point_mul_scalar_batch sets |r| to |p0| * |scalar0| + |p1| * |scalar1| + // |p2| * |scalar2|. |p2| may be NULL to skip that term. // // The inputs are treated as secret, however, this function leaks information // about whether intermediate computations add a point to itself. Callers must // ensure that discrete logs between |p0|, |p1|, and |p2| are uniformly // distributed and independent of the scalars, which should be uniformly // selected and not under the attackers control. This ensures the doubling case // will occur with negligible probability. // // This function is not implemented for all curves. Add implementations as // needed. // // TODO(davidben): This function does not use base point tables. For now, it is // only used with the generic |EC_GFp_mont_method| implementation which has // none. If generalizing to tuned curves, this may be useful. However, we still // must double up to the least efficient input, so precomputed tables can only // save table setup and allow a wider window size. int ec_point_mul_scalar_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2); #define EC_MONT_PRECOMP_COMB_SIZE 5 // An |EC_PRECOMP| stores precomputed information about a point, to optimize // repeated multiplications involving it. It is a union so different // |EC_METHOD|s can store different information in it. typedef union { EC_AFFINE comb[(1 << EC_MONT_PRECOMP_COMB_SIZE) - 1]; } EC_PRECOMP; // ec_init_precomp precomputes multiples of |p| and writes the result to |out|. // It returns one on success and zero on error. The resulting table may be used // with |ec_point_mul_scalar_precomp|. This function will fail if |p| is the // point at infinity. // // This function is not implemented for all curves. Add implementations as // needed. int ec_init_precomp(const EC_GROUP *group, EC_PRECOMP *out, const EC_JACOBIAN *p); // ec_point_mul_scalar_precomp sets |r| to |p0| * |scalar0| + |p1| * |scalar1| + // |p2| * |scalar2|. |p1| or |p2| may be NULL to skip the corresponding term. // The points are represented as |EC_PRECOMP| and must be initialized with // |ec_init_precomp|. This function runs faster than |ec_point_mul_scalar_batch| // but requires setup work per input point, so it is only appropriate for points // which are used frequently. // // The inputs are treated as secret, however, this function leaks information // about whether intermediate computations add a point to itself. Callers must // ensure that discrete logs between |p0|, |p1|, and |p2| are uniformly // distributed and independent of the scalars, which should be uniformly // selected and not under the attackers control. This ensures the doubling case // will occur with negligible probability. // // This function is not implemented for all curves. Add implementations as // needed. // // TODO(davidben): This function does not use base point tables. For now, it is // only used with the generic |EC_GFp_mont_method| implementation which has // none. If generalizing to tuned curves, we should add a parameter for the base // point and arrange for the generic implementation to have base point tables // available. int ec_point_mul_scalar_precomp(const EC_GROUP *group, EC_JACOBIAN *r, const EC_PRECOMP *p0, const EC_SCALAR *scalar0, const EC_PRECOMP *p1, const EC_SCALAR *scalar1, const EC_PRECOMP *p2, const EC_SCALAR *scalar2); // ec_point_mul_scalar_public sets |r| to // generator * |g_scalar| + |p| * |p_scalar|. It assumes that the inputs are // public so there is no concern about leaking their values through timing. OPENSSL_EXPORT int ec_point_mul_scalar_public(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar); // ec_point_mul_scalar_public_batch sets |r| to the sum of generator * // |g_scalar| and |points[i]| * |scalars[i]| where |points| and |scalars| have // |num| elements. It assumes that the inputs are public so there is no concern // about leaking their values through timing. |g_scalar| may be NULL to skip // that term. // // This function is not implemented for all curves. Add implementations as // needed. int ec_point_mul_scalar_public_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *points, const EC_SCALAR *scalars, size_t num); // ec_point_select, in constant time, sets |out| to |a| if |mask| is all ones // and |b| if |mask| is all zeros. void ec_point_select(const EC_GROUP *group, EC_JACOBIAN *out, BN_ULONG mask, const EC_JACOBIAN *a, const EC_JACOBIAN *b); // ec_affine_select behaves like |ec_point_select| but acts on affine points. void ec_affine_select(const EC_GROUP *group, EC_AFFINE *out, BN_ULONG mask, const EC_AFFINE *a, const EC_AFFINE *b); // ec_precomp_select behaves like |ec_point_select| but acts on |EC_PRECOMP|. void ec_precomp_select(const EC_GROUP *group, EC_PRECOMP *out, BN_ULONG mask, const EC_PRECOMP *a, const EC_PRECOMP *b); // ec_cmp_x_coordinate compares the x (affine) coordinate of |p|, mod the group // order, with |r|. It returns one if the values match and zero if |p| is the // point at infinity of the values do not match. |p| is treated as public. int ec_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r); // ec_get_x_coordinate_as_scalar sets |*out| to |p|'s x-coordinate, modulo // |group->order|. It returns one on success and zero if |p| is the point at // infinity. int ec_get_x_coordinate_as_scalar(const EC_GROUP *group, EC_SCALAR *out, const EC_JACOBIAN *p); // ec_get_x_coordinate_as_bytes writes |p|'s affine x-coordinate to |out|, which // must have at must |max_out| bytes. It sets |*out_len| to the number of bytes // written. The value is written big-endian and zero-padded to the size of the // field. This function returns one on success and zero on failure. int ec_get_x_coordinate_as_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, size_t max_out, const EC_JACOBIAN *p); // ec_point_byte_len returns the number of bytes in the byte representation of // a non-infinity point in |group|, encoded according to |form|, or zero if // |form| is invalid. size_t ec_point_byte_len(const EC_GROUP *group, point_conversion_form_t form); // ec_point_to_bytes encodes |point| according to |form| and writes the result // |buf|. It returns the size of the output on success or zero on error. At most // |max_out| bytes will be written. The buffer should be at least // |ec_point_byte_len| long to guarantee success. size_t ec_point_to_bytes(const EC_GROUP *group, const EC_AFFINE *point, point_conversion_form_t form, uint8_t *buf, size_t max_out); // ec_point_from_uncompressed parses |in| as a point in uncompressed form and // sets the result to |out|. It returns one on success and zero if the input was // invalid. int ec_point_from_uncompressed(const EC_GROUP *group, EC_AFFINE *out, const uint8_t *in, size_t len); // ec_set_to_safe_point sets |out| to an arbitrary point on |group|, either the // generator or the point at infinity. This is used to guard against callers of // external APIs not checking the return value. void ec_set_to_safe_point(const EC_GROUP *group, EC_JACOBIAN *out); // ec_affine_jacobian_equal returns one if |a| and |b| represent the same point // and zero otherwise. It treats both inputs as secret. int ec_affine_jacobian_equal(const EC_GROUP *group, const EC_AFFINE *a, const EC_JACOBIAN *b); // Implementation details. struct ec_method_st { // point_get_affine_coordinates sets |*x| and |*y| to the affine coordinates // of |p|. Either |x| or |y| may be NULL to omit it. It returns one on success // and zero if |p| is the point at infinity. It leaks whether |p| was the // point at infinity, but otherwise treats |p| as secret. int (*point_get_affine_coordinates)(const EC_GROUP *, const EC_JACOBIAN *p, EC_FELEM *x, EC_FELEM *y); // jacobian_to_affine_batch implements |ec_jacobian_to_affine_batch|. int (*jacobian_to_affine_batch)(const EC_GROUP *group, EC_AFFINE *out, const EC_JACOBIAN *in, size_t num); // add sets |r| to |a| + |b|. void (*add)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a, const EC_JACOBIAN *b); // dbl sets |r| to |a| + |a|. void (*dbl)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a); // mul sets |r| to |scalar|*|p|. void (*mul)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar); // mul_base sets |r| to |scalar|*generator. void (*mul_base)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar); // mul_batch implements |ec_mul_scalar_batch|. void (*mul_batch)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2); // mul_public sets |r| to |g_scalar|*generator + |p_scalar|*|p|. It assumes // that the inputs are public so there is no concern about leaking their // values through timing. // // This function may be omitted if |mul_public_batch| is provided. void (*mul_public)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar); // mul_public_batch implements |ec_point_mul_scalar_public_batch|. int (*mul_public_batch)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *points, const EC_SCALAR *scalars, size_t num); // init_precomp implements |ec_init_precomp|. int (*init_precomp)(const EC_GROUP *group, EC_PRECOMP *out, const EC_JACOBIAN *p); // mul_precomp implements |ec_point_mul_scalar_precomp|. void (*mul_precomp)(const EC_GROUP *group, EC_JACOBIAN *r, const EC_PRECOMP *p0, const EC_SCALAR *scalar0, const EC_PRECOMP *p1, const EC_SCALAR *scalar1, const EC_PRECOMP *p2, const EC_SCALAR *scalar2); // felem_mul and felem_sqr implement multiplication and squaring, // respectively, so that the generic |EC_POINT_add| and |EC_POINT_dbl| // implementations can work both with |EC_GFp_mont_method| and the tuned // operations. // // TODO(davidben): This constrains |EC_FELEM|'s internal representation, adds // many indirect calls in the middle of the generic code, and a bunch of // conversions. If p224-64.c were easily convertable to Montgomery form, we // could say |EC_FELEM| is always in Montgomery form. If we routed the rest of // simple.c to |EC_METHOD|, we could give |EC_POINT| an |EC_METHOD|-specific // representation and say |EC_FELEM| is purely a |EC_GFp_mont_method| type. void (*felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b); void (*felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a); void (*felem_to_bytes)(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in); int (*felem_from_bytes)(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len); // felem_reduce sets |out| to |words|, reduced modulo the field size, p. // |words| must be less than p^2. |num| must be at most twice the width of p. // This function treats |words| as secret. // // This function is only used in hash-to-curve and may be omitted in curves // that do not support it. void (*felem_reduce)(const EC_GROUP *group, EC_FELEM *out, const BN_ULONG *words, size_t num); // felem_exp sets |out| to |a|^|exp|. It treats |a| is secret but |exp| as // public. // // This function is used in hash-to-curve and may be NULL in curves not used // with hash-to-curve. // // TODO(https://crbug.com/boringssl/567): hash-to-curve uses this as part of // computing a square root, which is what compressed coordinates ultimately // needs to avoid |BIGNUM|. Can we unify this a bit? By generalizing to // arbitrary exponentiation, we also miss an opportunity to use a specialized // addition chain. void (*felem_exp)(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const BN_ULONG *exp, size_t num_exp); // scalar_inv0_montgomery implements |ec_scalar_inv0_montgomery|. void (*scalar_inv0_montgomery)(const EC_GROUP *group, EC_SCALAR *out, const EC_SCALAR *in); // scalar_to_montgomery_inv_vartime implements // |ec_scalar_to_montgomery_inv_vartime|. int (*scalar_to_montgomery_inv_vartime)(const EC_GROUP *group, EC_SCALAR *out, const EC_SCALAR *in); // cmp_x_coordinate compares the x (affine) coordinate of |p|, mod the group // order, with |r|. It returns one if the values match and zero if |p| is the // point at infinity of the values do not match. int (*cmp_x_coordinate)(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r); } /* EC_METHOD */; const EC_METHOD *EC_GFp_mont_method(void); struct ec_point_st { // group is an owning reference to |group|, unless this is // |group->generator|. EC_GROUP *group; // raw is the group-specific point data. Functions that take |EC_POINT| // typically check consistency with |EC_GROUP| while functions that take // |EC_JACOBIAN| do not. Thus accesses to this field should be externally // checked for consistency. EC_JACOBIAN raw; } /* EC_POINT */; struct ec_group_st { const EC_METHOD *meth; // Unlike all other |EC_POINT|s, |generator| does not own |generator->group| // to avoid a reference cycle. Additionally, Z is guaranteed to be one, so X // and Y are suitable for use as an |EC_AFFINE|. Before |has_order| is set, Z // is one, but X and Y are uninitialized. EC_POINT generator; BN_MONT_CTX order; BN_MONT_CTX field; EC_FELEM a, b; // Curve coefficients. // comment is a human-readable string describing the curve. const char *comment; int curve_name; // optional NID for named curve uint8_t oid[9]; uint8_t oid_len; // a_is_minus3 is one if |a| is -3 mod |field| and zero otherwise. Point // arithmetic is optimized for -3. int a_is_minus3; // has_order is one if |generator| and |order| have been initialized. int has_order; // field_greater_than_order is one if |field| is greate than |order| and zero // otherwise. int field_greater_than_order; CRYPTO_refcount_t references; } /* EC_GROUP */; EC_GROUP *ec_group_new(const EC_METHOD *meth, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); void ec_GFp_mont_mul(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar); void ec_GFp_mont_mul_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar); void ec_GFp_mont_mul_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2); int ec_GFp_mont_init_precomp(const EC_GROUP *group, EC_PRECOMP *out, const EC_JACOBIAN *p); void ec_GFp_mont_mul_precomp(const EC_GROUP *group, EC_JACOBIAN *r, const EC_PRECOMP *p0, const EC_SCALAR *scalar0, const EC_PRECOMP *p1, const EC_SCALAR *scalar1, const EC_PRECOMP *p2, const EC_SCALAR *scalar2); void ec_GFp_mont_felem_reduce(const EC_GROUP *group, EC_FELEM *out, const BN_ULONG *words, size_t num); void ec_GFp_mont_felem_exp(const EC_GROUP *group, EC_FELEM *out, const EC_FELEM *a, const BN_ULONG *exp, size_t num_exp); // ec_compute_wNAF writes the modified width-(w+1) Non-Adjacent Form (wNAF) of // |scalar| to |out|. |out| must have room for |bits| + 1 elements, each of // which will be either zero or odd with an absolute value less than 2^w // satisfying // scalar = \sum_j out[j]*2^j // where at most one of any w+1 consecutive digits is non-zero // with the exception that the most significant digit may be only // w-1 zeros away from that next non-zero digit. void ec_compute_wNAF(const EC_GROUP *group, int8_t *out, const EC_SCALAR *scalar, size_t bits, int w); int ec_GFp_mont_mul_public_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *points, const EC_SCALAR *scalars, size_t num); // method functions in simple.c int ec_GFp_simple_group_set_curve(EC_GROUP *, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int ec_GFp_simple_group_get_curve(const EC_GROUP *, BIGNUM *p, BIGNUM *a, BIGNUM *b); void ec_GFp_simple_point_init(EC_JACOBIAN *); void ec_GFp_simple_point_copy(EC_JACOBIAN *, const EC_JACOBIAN *); void ec_GFp_simple_point_set_to_infinity(const EC_GROUP *, EC_JACOBIAN *); void ec_GFp_mont_add(const EC_GROUP *, EC_JACOBIAN *r, const EC_JACOBIAN *a, const EC_JACOBIAN *b); void ec_GFp_mont_dbl(const EC_GROUP *, EC_JACOBIAN *r, const EC_JACOBIAN *a); void ec_GFp_simple_invert(const EC_GROUP *, EC_JACOBIAN *); int ec_GFp_simple_is_at_infinity(const EC_GROUP *, const EC_JACOBIAN *); int ec_GFp_simple_is_on_curve(const EC_GROUP *, const EC_JACOBIAN *); int ec_GFp_simple_points_equal(const EC_GROUP *, const EC_JACOBIAN *a, const EC_JACOBIAN *b); void ec_simple_scalar_inv0_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); int ec_simple_scalar_to_montgomery_inv_vartime(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a); int ec_GFp_simple_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r); void ec_GFp_simple_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in); int ec_GFp_simple_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len); // method functions in montgomery.c void ec_GFp_mont_felem_mul(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b); void ec_GFp_mont_felem_sqr(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a); void ec_GFp_mont_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in); int ec_GFp_mont_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len); void ec_GFp_nistp_recode_scalar_bits(crypto_word_t *sign, crypto_word_t *digit, crypto_word_t in); const EC_METHOD *EC_GFp_nistp224_method(void); const EC_METHOD *EC_GFp_nistp256_method(void); // EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with // x86-64 optimized P256. See http://eprint.iacr.org/2013/816. const EC_METHOD *EC_GFp_nistz256_method(void); // An EC_WRAPPED_SCALAR is an |EC_SCALAR| with a parallel |BIGNUM| // representation. It exists to support the |EC_KEY_get0_private_key| API. typedef struct { BIGNUM bignum; EC_SCALAR scalar; } EC_WRAPPED_SCALAR; struct ec_key_st { EC_GROUP *group; // Ideally |pub_key| would be an |EC_AFFINE| so serializing it does not pay an // inversion each time, but the |EC_KEY_get0_public_key| API implies public // keys are stored in an |EC_POINT|-compatible form. EC_POINT *pub_key; EC_WRAPPED_SCALAR *priv_key; unsigned int enc_flag; point_conversion_form_t conv_form; CRYPTO_refcount_t references; ECDSA_METHOD *ecdsa_meth; CRYPTO_EX_DATA ex_data; } /* EC_KEY */; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_EC_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/oct.cc.inc ================================================ /* * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "internal.h" size_t ec_point_byte_len(const EC_GROUP *group, point_conversion_form_t form) { if (form != POINT_CONVERSION_COMPRESSED && form != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FORM); return 0; } const size_t field_len = BN_num_bytes(&group->field.N); size_t output_len = 1 /* type byte */ + field_len; if (form == POINT_CONVERSION_UNCOMPRESSED) { // Uncompressed points have a second coordinate. output_len += field_len; } return output_len; } size_t ec_point_to_bytes(const EC_GROUP *group, const EC_AFFINE *point, point_conversion_form_t form, uint8_t *buf, size_t max_out) { size_t output_len = ec_point_byte_len(group, form); if (max_out < output_len) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } size_t field_len; ec_felem_to_bytes(group, buf + 1, &field_len, &point->X); assert(field_len == BN_num_bytes(&group->field.N)); if (form == POINT_CONVERSION_UNCOMPRESSED) { ec_felem_to_bytes(group, buf + 1 + field_len, &field_len, &point->Y); assert(field_len == BN_num_bytes(&group->field.N)); buf[0] = form; } else { uint8_t y_buf[EC_MAX_BYTES]; ec_felem_to_bytes(group, y_buf, &field_len, &point->Y); buf[0] = form + (y_buf[field_len - 1] & 1); } return output_len; } int ec_point_from_uncompressed(const EC_GROUP *group, EC_AFFINE *out, const uint8_t *in, size_t len) { const size_t field_len = BN_num_bytes(&group->field.N); if (len != 1 + 2 * field_len || in[0] != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_ENCODING); return 0; } EC_FELEM x, y; if (!ec_felem_from_bytes(group, &x, in + 1, field_len) || !ec_felem_from_bytes(group, &y, in + 1 + field_len, field_len) || !ec_point_set_affine_coordinates(group, out, &x, &y)) { return 0; } return 1; } static int ec_GFp_simple_oct2point(const EC_GROUP *group, EC_POINT *point, const uint8_t *buf, size_t len, BN_CTX *ctx) { if (len == 0) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } uint8_t form = buf[0]; if (form == static_cast(POINT_CONVERSION_UNCOMPRESSED)) { EC_AFFINE affine; if (!ec_point_from_uncompressed(group, &affine, buf, len)) { // In the event of an error, defend against the caller not checking the // return value by setting a known safe value. ec_set_to_safe_point(group, &point->raw); return 0; } ec_affine_to_jacobian(group, &point->raw, &affine); return 1; } const int y_bit = form & 1; const size_t field_len = BN_num_bytes(&group->field.N); form = form & ~1u; if (form != static_cast(POINT_CONVERSION_COMPRESSED) || len != 1 /* type byte */ + field_len) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_ENCODING); return 0; } // TODO(davidben): Integrate compressed coordinates with the lower-level EC // abstractions. This requires a way to compute square roots, which is tricky // for primes which are not 3 (mod 4), namely P-224 and custom curves. P-224's // prime is particularly inconvenient for compressed coordinates. See // https://cr.yp.to/papers/sqroot.pdf BN_CTX *new_ctx = NULL; if (ctx == NULL) { ctx = new_ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } } int ret = 0; BN_CTX_start(ctx); BIGNUM *x = BN_CTX_get(ctx); if (x == NULL || !BN_bin2bn(buf + 1, field_len, x)) { goto err; } if (BN_ucmp(x, &group->field.N) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_ENCODING); goto err; } if (!EC_POINT_set_compressed_coordinates_GFp(group, point, x, y_bit, ctx)) { goto err; } ret = 1; err: BN_CTX_end(ctx); BN_CTX_free(new_ctx); return ret; } int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *point, const uint8_t *buf, size_t len, BN_CTX *ctx) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_GFp_simple_oct2point(group, point, buf, len, ctx); } size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, uint8_t *buf, size_t max_out, BN_CTX *ctx) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } if (buf == NULL) { // When |buf| is NULL, just return the number of bytes that would be // written, without doing an expensive Jacobian-to-affine conversion. if (ec_GFp_simple_is_at_infinity(group, &point->raw)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } return ec_point_byte_len(group, form); } EC_AFFINE affine; if (!ec_jacobian_to_affine(group, &affine, &point->raw)) { return 0; } return ec_point_to_bytes(group, &affine, form, buf, max_out); } size_t EC_POINT_point2buf(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, uint8_t **out_buf, BN_CTX *ctx) { *out_buf = NULL; size_t len = EC_POINT_point2oct(group, point, form, NULL, 0, ctx); if (len == 0) { return 0; } uint8_t *buf = reinterpret_cast(OPENSSL_malloc(len)); if (buf == NULL) { return 0; } len = EC_POINT_point2oct(group, point, form, buf, len, ctx); if (len == 0) { OPENSSL_free(buf); return 0; } *out_buf = buf; return len; } int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, int y_bit, BN_CTX *ctx) { if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } const BIGNUM *field = &group->field.N; if (BN_is_negative(x) || BN_cmp(x, field) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COMPRESSED_POINT); return 0; } BN_CTX *new_ctx = NULL; int ret = 0; ERR_clear_error(); if (ctx == NULL) { ctx = new_ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } } y_bit = (y_bit != 0); BN_CTX_start(ctx); BIGNUM *tmp1 = BN_CTX_get(ctx); BIGNUM *tmp2 = BN_CTX_get(ctx); BIGNUM *a = BN_CTX_get(ctx); BIGNUM *b = BN_CTX_get(ctx); BIGNUM *y = BN_CTX_get(ctx); if (y == NULL || !EC_GROUP_get_curve_GFp(group, NULL, a, b, ctx)) { goto err; } // Recover y. We have a Weierstrass equation // y^2 = x^3 + a*x + b, // so y is one of the square roots of x^3 + a*x + b. // tmp1 := x^3 if (!BN_mod_sqr(tmp2, x, field, ctx) || !BN_mod_mul(tmp1, tmp2, x, field, ctx)) { goto err; } // tmp1 := tmp1 + a*x if (group->a_is_minus3) { if (!bn_mod_lshift1_consttime(tmp2, x, field, ctx) || !bn_mod_add_consttime(tmp2, tmp2, x, field, ctx) || !bn_mod_sub_consttime(tmp1, tmp1, tmp2, field, ctx)) { goto err; } } else { if (!BN_mod_mul(tmp2, a, x, field, ctx) || !bn_mod_add_consttime(tmp1, tmp1, tmp2, field, ctx)) { goto err; } } // tmp1 := tmp1 + b if (!bn_mod_add_consttime(tmp1, tmp1, b, field, ctx)) { goto err; } if (!BN_mod_sqrt(y, tmp1, field, ctx)) { uint32_t err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_BN && ERR_GET_REASON(err) == BN_R_NOT_A_SQUARE) { ERR_clear_error(); OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COMPRESSED_POINT); } else { OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); } goto err; } if (y_bit != BN_is_odd(y)) { if (BN_is_zero(y)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COMPRESSION_BIT); goto err; } if (!BN_usub(y, field, y)) { goto err; } } if (y_bit != BN_is_odd(y)) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; } if (!EC_POINT_set_affine_coordinates_GFp(group, point, x, y, ctx)) { goto err; } ret = 1; err: BN_CTX_end(ctx); BN_CTX_free(new_ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p224-64.cc.inc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // A 64-bit implementation of the NIST P-224 elliptic curve point multiplication // // Inspired by Daniel J. Bernstein's public domain nistp224 implementation // and Adam Langley's public domain 64-bit C implementation of curve25519. #include #include #include #include #include #include #include #include "internal.h" #include "../delocate.h" #include "../../internal.h" #if defined(BORINGSSL_HAS_UINT128) && !defined(OPENSSL_SMALL) // Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3 // using 64-bit coefficients called 'limbs', and sometimes (for multiplication // results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + // 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb // representation is an 'p224_felem'; a 7-p224_widelimb representation is a // 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we // don't always reduce the representations: we ensure that inputs to each // p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i < // 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients // are then again partially reduced to obtain an p224_felem satisfying a_i < // 2^57. We only reduce to the unique minimal representation at the end of the // computation. typedef uint64_t p224_limb; typedef uint128_t p224_widelimb; typedef p224_limb p224_felem[4]; typedef p224_widelimb p224_widefelem[7]; // Precomputed multiples of the standard generator // Points are given in coordinates (X, Y, Z) where Z normally is 1 // (0 for the point at infinity). // For each field element, slice a_0 is word 0, etc. // // The table has 2 * 16 elements, starting with the following: // index | bits | point // ------+---------+------------------------------ // 0 | 0 0 0 0 | 0G // 1 | 0 0 0 1 | 1G // 2 | 0 0 1 0 | 2^56G // 3 | 0 0 1 1 | (2^56 + 1)G // 4 | 0 1 0 0 | 2^112G // 5 | 0 1 0 1 | (2^112 + 1)G // 6 | 0 1 1 0 | (2^112 + 2^56)G // 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G // 8 | 1 0 0 0 | 2^168G // 9 | 1 0 0 1 | (2^168 + 1)G // 10 | 1 0 1 0 | (2^168 + 2^56)G // 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G // 12 | 1 1 0 0 | (2^168 + 2^112)G // 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G // 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G // 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G // followed by a copy of this with each element multiplied by 2^28. // // The reason for this is so that we can clock bits into four different // locations when doing simple scalar multiplies against the base point, // and then another four locations using the second 16 elements. static const p224_felem g_p224_pre_comp[2][16][3] = { {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf}, {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723}, {1, 0, 0, 0}}, {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5}, {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321}, {1, 0, 0, 0}}, {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748}, {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17}, {1, 0, 0, 0}}, {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe}, {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b}, {1, 0, 0, 0}}, {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3}, {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a}, {1, 0, 0, 0}}, {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c}, {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244}, {1, 0, 0, 0}}, {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849}, {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112}, {1, 0, 0, 0}}, {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47}, {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394}, {1, 0, 0, 0}}, {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d}, {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7}, {1, 0, 0, 0}}, {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24}, {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881}, {1, 0, 0, 0}}, {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984}, {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369}, {1, 0, 0, 0}}, {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3}, {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60}, {1, 0, 0, 0}}, {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057}, {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9}, {1, 0, 0, 0}}, {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9}, {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc}, {1, 0, 0, 0}}, {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58}, {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558}, {1, 0, 0, 0}}}, {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31}, {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d}, {1, 0, 0, 0}}, {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3}, {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a}, {1, 0, 0, 0}}, {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33}, {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100}, {1, 0, 0, 0}}, {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5}, {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea}, {1, 0, 0, 0}}, {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be}, {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51}, {1, 0, 0, 0}}, {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1}, {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb}, {1, 0, 0, 0}}, {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233}, {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def}, {1, 0, 0, 0}}, {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae}, {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45}, {1, 0, 0, 0}}, {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e}, {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb}, {1, 0, 0, 0}}, {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de}, {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3}, {1, 0, 0, 0}}, {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05}, {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58}, {1, 0, 0, 0}}, {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb}, {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0}, {1, 0, 0, 0}}, {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9}, {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea}, {1, 0, 0, 0}}, {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba}, {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405}, {1, 0, 0, 0}}, {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e}, {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e}, {1, 0, 0, 0}}}}; // Helper functions to convert field elements to/from internal representation static void p224_generic_to_felem(p224_felem out, const EC_FELEM *in) { // |p224_felem|'s minimal representation uses four 56-bit words. |EC_FELEM| // uses four 64-bit words. (The top-most word only has 32 bits.) out[0] = in->words[0] & 0x00ffffffffffffff; out[1] = ((in->words[0] >> 56) | (in->words[1] << 8)) & 0x00ffffffffffffff; out[2] = ((in->words[1] >> 48) | (in->words[2] << 16)) & 0x00ffffffffffffff; out[3] = ((in->words[2] >> 40) | (in->words[3] << 24)) & 0x00ffffffffffffff; } // Requires 0 <= in < 2*p (always call p224_felem_reduce first) static void p224_felem_to_generic(EC_FELEM *out, const p224_felem in) { // Reduce to unique minimal representation. static const int64_t two56 = ((p224_limb)1) << 56; // 0 <= in < 2*p, p = 2^224 - 2^96 + 1 // if in > p , reduce in = in - 2^224 + 2^96 - 1 int64_t tmp[4], a; tmp[0] = in[0]; tmp[1] = in[1]; tmp[2] = in[2]; tmp[3] = in[3]; // Case 1: a = 1 iff in >= 2^224 a = (in[3] >> 56); tmp[0] -= a; tmp[1] += a << 40; tmp[3] &= 0x00ffffffffffffff; // Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and // the lower part is non-zero a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) | (((int64_t)(in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63); a &= 0x00ffffffffffffff; // turn a into an all-one mask (if a = 0) or an all-zero mask a = (a - 1) >> 63; // subtract 2^224 - 2^96 + 1 if a is all-one tmp[3] &= a ^ 0xffffffffffffffff; tmp[2] &= a ^ 0xffffffffffffffff; tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff; tmp[0] -= 1 & a; // eliminate negative coefficients: if tmp[0] is negative, tmp[1] must // be non-zero, so we only need one step a = tmp[0] >> 63; tmp[0] += two56 & a; tmp[1] -= 1 & a; // carry 1 -> 2 -> 3 tmp[2] += tmp[1] >> 56; tmp[1] &= 0x00ffffffffffffff; tmp[3] += tmp[2] >> 56; tmp[2] &= 0x00ffffffffffffff; // Now 0 <= tmp < p p224_felem tmp2; tmp2[0] = tmp[0]; tmp2[1] = tmp[1]; tmp2[2] = tmp[2]; tmp2[3] = tmp[3]; // |p224_felem|'s minimal representation uses four 56-bit words. |EC_FELEM| // uses four 64-bit words. (The top-most word only has 32 bits.) out->words[0] = tmp2[0] | (tmp2[1] << 56); out->words[1] = (tmp2[1] >> 8) | (tmp2[2] << 48); out->words[2] = (tmp2[2] >> 16) | (tmp2[3] << 40); out->words[3] = tmp2[3] >> 24; } // Field operations, using the internal representation of field elements. // NB! These operations are specific to our point multiplication and cannot be // expected to be correct in general - e.g., multiplication with a large scalar // will cause an overflow. static void p224_felem_assign(p224_felem out, const p224_felem in) { out[0] = in[0]; out[1] = in[1]; out[2] = in[2]; out[3] = in[3]; } // Sum two field elements: out += in static void p224_felem_sum(p224_felem out, const p224_felem in) { out[0] += in[0]; out[1] += in[1]; out[2] += in[2]; out[3] += in[3]; } // Subtract field elements: out -= in // Assumes in[i] < 2^57 static void p224_felem_diff(p224_felem out, const p224_felem in) { static const p224_limb two58p2 = (((p224_limb)1) << 58) + (((p224_limb)1) << 2); static const p224_limb two58m2 = (((p224_limb)1) << 58) - (((p224_limb)1) << 2); static const p224_limb two58m42m2 = (((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2); // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two58p2; out[1] += two58m42m2; out[2] += two58m2; out[3] += two58m2; out[0] -= in[0]; out[1] -= in[1]; out[2] -= in[2]; out[3] -= in[3]; } // Subtract in unreduced 128-bit mode: out -= in // Assumes in[i] < 2^119 static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) { static const p224_widelimb two120 = ((p224_widelimb)1) << 120; static const p224_widelimb two120m64 = (((p224_widelimb)1) << 120) - (((p224_widelimb)1) << 64); static const p224_widelimb two120m104m64 = (((p224_widelimb)1) << 120) - (((p224_widelimb)1) << 104) - (((p224_widelimb)1) << 64); // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two120; out[1] += two120m64; out[2] += two120m64; out[3] += two120; out[4] += two120m104m64; out[5] += two120m64; out[6] += two120m64; out[0] -= in[0]; out[1] -= in[1]; out[2] -= in[2]; out[3] -= in[3]; out[4] -= in[4]; out[5] -= in[5]; out[6] -= in[6]; } // Subtract in mixed mode: out128 -= in64 // in[i] < 2^63 static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) { static const p224_widelimb two64p8 = (((p224_widelimb)1) << 64) + (((p224_widelimb)1) << 8); static const p224_widelimb two64m8 = (((p224_widelimb)1) << 64) - (((p224_widelimb)1) << 8); static const p224_widelimb two64m48m8 = (((p224_widelimb)1) << 64) - (((p224_widelimb)1) << 48) - (((p224_widelimb)1) << 8); // Add 0 mod 2^224-2^96+1 to ensure out > in out[0] += two64p8; out[1] += two64m48m8; out[2] += two64m8; out[3] += two64m8; out[0] -= in[0]; out[1] -= in[1]; out[2] -= in[2]; out[3] -= in[3]; } // Multiply a field element by a scalar: out = out * scalar // The scalars we actually use are small, so results fit without overflow static void p224_felem_scalar(p224_felem out, const p224_limb scalar) { out[0] *= scalar; out[1] *= scalar; out[2] *= scalar; out[3] *= scalar; } // Multiply an unreduced field element by a scalar: out = out * scalar // The scalars we actually use are small, so results fit without overflow static void p224_widefelem_scalar(p224_widefelem out, const p224_widelimb scalar) { out[0] *= scalar; out[1] *= scalar; out[2] *= scalar; out[3] *= scalar; out[4] *= scalar; out[5] *= scalar; out[6] *= scalar; } // Square a field element: out = in^2 static void p224_felem_square(p224_widefelem out, const p224_felem in) { p224_limb tmp0, tmp1, tmp2; tmp0 = 2 * in[0]; tmp1 = 2 * in[1]; tmp2 = 2 * in[2]; out[0] = ((p224_widelimb)in[0]) * in[0]; out[1] = ((p224_widelimb)in[0]) * tmp1; out[2] = ((p224_widelimb)in[0]) * tmp2 + ((p224_widelimb)in[1]) * in[1]; out[3] = ((p224_widelimb)in[3]) * tmp0 + ((p224_widelimb)in[1]) * tmp2; out[4] = ((p224_widelimb)in[3]) * tmp1 + ((p224_widelimb)in[2]) * in[2]; out[5] = ((p224_widelimb)in[3]) * tmp2; out[6] = ((p224_widelimb)in[3]) * in[3]; } // Multiply two field elements: out = in1 * in2 static void p224_felem_mul(p224_widefelem out, const p224_felem in1, const p224_felem in2) { out[0] = ((p224_widelimb)in1[0]) * in2[0]; out[1] = ((p224_widelimb)in1[0]) * in2[1] + ((p224_widelimb)in1[1]) * in2[0]; out[2] = ((p224_widelimb)in1[0]) * in2[2] + ((p224_widelimb)in1[1]) * in2[1] + ((p224_widelimb)in1[2]) * in2[0]; out[3] = ((p224_widelimb)in1[0]) * in2[3] + ((p224_widelimb)in1[1]) * in2[2] + ((p224_widelimb)in1[2]) * in2[1] + ((p224_widelimb)in1[3]) * in2[0]; out[4] = ((p224_widelimb)in1[1]) * in2[3] + ((p224_widelimb)in1[2]) * in2[2] + ((p224_widelimb)in1[3]) * in2[1]; out[5] = ((p224_widelimb)in1[2]) * in2[3] + ((p224_widelimb)in1[3]) * in2[2]; out[6] = ((p224_widelimb)in1[3]) * in2[3]; } // Reduce seven 128-bit coefficients to four 64-bit coefficients. // Requires in[i] < 2^126, // ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 static void p224_felem_reduce(p224_felem out, const p224_widefelem in) { static const p224_widelimb two127p15 = (((p224_widelimb)1) << 127) + (((p224_widelimb)1) << 15); static const p224_widelimb two127m71 = (((p224_widelimb)1) << 127) - (((p224_widelimb)1) << 71); static const p224_widelimb two127m71m55 = (((p224_widelimb)1) << 127) - (((p224_widelimb)1) << 71) - (((p224_widelimb)1) << 55); p224_widelimb output[5]; // Add 0 mod 2^224-2^96+1 to ensure all differences are positive output[0] = in[0] + two127p15; output[1] = in[1] + two127m71m55; output[2] = in[2] + two127m71; output[3] = in[3]; output[4] = in[4]; // Eliminate in[4], in[5], in[6] output[4] += in[6] >> 16; output[3] += (in[6] & 0xffff) << 40; output[2] -= in[6]; output[3] += in[5] >> 16; output[2] += (in[5] & 0xffff) << 40; output[1] -= in[5]; output[2] += output[4] >> 16; output[1] += (output[4] & 0xffff) << 40; output[0] -= output[4]; // Carry 2 -> 3 -> 4 output[3] += output[2] >> 56; output[2] &= 0x00ffffffffffffff; output[4] = output[3] >> 56; output[3] &= 0x00ffffffffffffff; // Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 // Eliminate output[4] output[2] += output[4] >> 16; // output[2] < 2^56 + 2^56 = 2^57 output[1] += (output[4] & 0xffff) << 40; output[0] -= output[4]; // Carry 0 -> 1 -> 2 -> 3 output[1] += output[0] >> 56; out[0] = output[0] & 0x00ffffffffffffff; output[2] += output[1] >> 56; // output[2] < 2^57 + 2^72 out[1] = output[1] & 0x00ffffffffffffff; output[3] += output[2] >> 56; // output[3] <= 2^56 + 2^16 out[2] = output[2] & 0x00ffffffffffffff; // out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, // out[3] <= 2^56 + 2^16 (due to final carry), // so out < 2*p out[3] = output[3]; } // Get negative value: out = -in // Requires in[i] < 2^63, // ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 static void p224_felem_neg(p224_felem out, const p224_felem in) { p224_widefelem tmp = {0}; p224_felem_diff_128_64(tmp, in); p224_felem_reduce(out, tmp); } // Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field // elements are reduced to in < 2^225, so we only need to check three cases: 0, // 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 static p224_limb p224_felem_is_zero(const p224_felem in) { p224_limb zero = in[0] | in[1] | in[2] | in[3]; zero = (((int64_t)(zero)-1) >> 63) & 1; p224_limb two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000) | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x00ffffffffffffff); two224m96p1 = (((int64_t)(two224m96p1)-1) >> 63) & 1; p224_limb two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000) | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x01ffffffffffffff); two225m97p2 = (((int64_t)(two225m97p2)-1) >> 63) & 1; return (zero | two224m96p1 | two225m97p2); } // Invert a field element // Computation chain copied from djb's code static void p224_felem_inv(p224_felem out, const p224_felem in) { p224_felem ftmp, ftmp2, ftmp3, ftmp4; p224_widefelem tmp; p224_felem_square(tmp, in); p224_felem_reduce(ftmp, tmp); // 2 p224_felem_mul(tmp, in, ftmp); p224_felem_reduce(ftmp, tmp); // 2^2 - 1 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); // 2^3 - 2 p224_felem_mul(tmp, in, ftmp); p224_felem_reduce(ftmp, tmp); // 2^3 - 1 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp2, tmp); // 2^4 - 2 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); // 2^5 - 4 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); // 2^6 - 8 p224_felem_mul(tmp, ftmp2, ftmp); p224_felem_reduce(ftmp, tmp); // 2^6 - 1 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp2, tmp); // 2^7 - 2 for (size_t i = 0; i < 5; ++i) { // 2^12 - 2^6 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); } p224_felem_mul(tmp, ftmp2, ftmp); p224_felem_reduce(ftmp2, tmp); // 2^12 - 1 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp3, tmp); // 2^13 - 2 for (size_t i = 0; i < 11; ++i) { // 2^24 - 2^12 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp3, tmp); } p224_felem_mul(tmp, ftmp3, ftmp2); p224_felem_reduce(ftmp2, tmp); // 2^24 - 1 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp3, tmp); // 2^25 - 2 for (size_t i = 0; i < 23; ++i) { // 2^48 - 2^24 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp3, tmp); } p224_felem_mul(tmp, ftmp3, ftmp2); p224_felem_reduce(ftmp3, tmp); // 2^48 - 1 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp4, tmp); // 2^49 - 2 for (size_t i = 0; i < 47; ++i) { // 2^96 - 2^48 p224_felem_square(tmp, ftmp4); p224_felem_reduce(ftmp4, tmp); } p224_felem_mul(tmp, ftmp3, ftmp4); p224_felem_reduce(ftmp3, tmp); // 2^96 - 1 p224_felem_square(tmp, ftmp3); p224_felem_reduce(ftmp4, tmp); // 2^97 - 2 for (size_t i = 0; i < 23; ++i) { // 2^120 - 2^24 p224_felem_square(tmp, ftmp4); p224_felem_reduce(ftmp4, tmp); } p224_felem_mul(tmp, ftmp2, ftmp4); p224_felem_reduce(ftmp2, tmp); // 2^120 - 1 for (size_t i = 0; i < 6; ++i) { // 2^126 - 2^6 p224_felem_square(tmp, ftmp2); p224_felem_reduce(ftmp2, tmp); } p224_felem_mul(tmp, ftmp2, ftmp); p224_felem_reduce(ftmp, tmp); // 2^126 - 1 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); // 2^127 - 2 p224_felem_mul(tmp, ftmp, in); p224_felem_reduce(ftmp, tmp); // 2^127 - 1 for (size_t i = 0; i < 97; ++i) { // 2^224 - 2^97 p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); } p224_felem_mul(tmp, ftmp, ftmp3); p224_felem_reduce(out, tmp); // 2^224 - 2^96 - 1 } // Copy in constant time: // if icopy == 1, copy in to out, // if icopy == 0, copy out to itself. static void p224_copy_conditional(p224_felem out, const p224_felem in, p224_limb icopy) { // icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one const p224_limb copy = -icopy; for (size_t i = 0; i < 4; ++i) { const p224_limb tmp = copy & (in[i] ^ out[i]); out[i] ^= tmp; } } // ELLIPTIC CURVE POINT OPERATIONS // // Points are represented in Jacobian projective coordinates: // (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3), // or to the point at infinity if Z == 0. // Double an elliptic curve point: // (X', Y', Z') = 2 * (X, Y, Z), where // X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2 // Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2 // Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z // Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed, // while x_out == y_in is not (maybe this works, but it's not tested). static void p224_point_double(p224_felem x_out, p224_felem y_out, p224_felem z_out, const p224_felem x_in, const p224_felem y_in, const p224_felem z_in) { p224_widefelem tmp, tmp2; p224_felem delta, gamma, beta, alpha, ftmp, ftmp2; p224_felem_assign(ftmp, x_in); p224_felem_assign(ftmp2, x_in); // delta = z^2 p224_felem_square(tmp, z_in); p224_felem_reduce(delta, tmp); // gamma = y^2 p224_felem_square(tmp, y_in); p224_felem_reduce(gamma, tmp); // beta = x*gamma p224_felem_mul(tmp, x_in, gamma); p224_felem_reduce(beta, tmp); // alpha = 3*(x-delta)*(x+delta) p224_felem_diff(ftmp, delta); // ftmp[i] < 2^57 + 2^58 + 2 < 2^59 p224_felem_sum(ftmp2, delta); // ftmp2[i] < 2^57 + 2^57 = 2^58 p224_felem_scalar(ftmp2, 3); // ftmp2[i] < 3 * 2^58 < 2^60 p224_felem_mul(tmp, ftmp, ftmp2); // tmp[i] < 2^60 * 2^59 * 4 = 2^121 p224_felem_reduce(alpha, tmp); // x' = alpha^2 - 8*beta p224_felem_square(tmp, alpha); // tmp[i] < 4 * 2^57 * 2^57 = 2^116 p224_felem_assign(ftmp, beta); p224_felem_scalar(ftmp, 8); // ftmp[i] < 8 * 2^57 = 2^60 p224_felem_diff_128_64(tmp, ftmp); // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(x_out, tmp); // z' = (y + z)^2 - gamma - delta p224_felem_sum(delta, gamma); // delta[i] < 2^57 + 2^57 = 2^58 p224_felem_assign(ftmp, y_in); p224_felem_sum(ftmp, z_in); // ftmp[i] < 2^57 + 2^57 = 2^58 p224_felem_square(tmp, ftmp); // tmp[i] < 4 * 2^58 * 2^58 = 2^118 p224_felem_diff_128_64(tmp, delta); // tmp[i] < 2^118 + 2^64 + 8 < 2^119 p224_felem_reduce(z_out, tmp); // y' = alpha*(4*beta - x') - 8*gamma^2 p224_felem_scalar(beta, 4); // beta[i] < 4 * 2^57 = 2^59 p224_felem_diff(beta, x_out); // beta[i] < 2^59 + 2^58 + 2 < 2^60 p224_felem_mul(tmp, alpha, beta); // tmp[i] < 4 * 2^57 * 2^60 = 2^119 p224_felem_square(tmp2, gamma); // tmp2[i] < 4 * 2^57 * 2^57 = 2^116 p224_widefelem_scalar(tmp2, 8); // tmp2[i] < 8 * 2^116 = 2^119 p224_widefelem_diff(tmp, tmp2); // tmp[i] < 2^119 + 2^120 < 2^121 p224_felem_reduce(y_out, tmp); } // Add two elliptic curve points: // (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where // X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - // 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 // Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * // X_1)^2 - X_3) - // Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3 // Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) // // This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. // This function is not entirely constant-time: it includes a branch for // checking whether the two input points are equal, (while not equal to the // point at infinity). This case never happens during single point // multiplication, so there is no timing leak for ECDH or ECDSA signing. static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3, const p224_felem x1, const p224_felem y1, const p224_felem z1, const int mixed, const p224_felem x2, const p224_felem y2, const p224_felem z2) { p224_felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out; p224_widefelem tmp, tmp2; p224_limb z1_is_zero, z2_is_zero, x_equal, y_equal; if (!mixed) { // ftmp2 = z2^2 p224_felem_square(tmp, z2); p224_felem_reduce(ftmp2, tmp); // ftmp4 = z2^3 p224_felem_mul(tmp, ftmp2, z2); p224_felem_reduce(ftmp4, tmp); // ftmp4 = z2^3*y1 p224_felem_mul(tmp2, ftmp4, y1); p224_felem_reduce(ftmp4, tmp2); // ftmp2 = z2^2*x1 p224_felem_mul(tmp2, ftmp2, x1); p224_felem_reduce(ftmp2, tmp2); } else { // We'll assume z2 = 1 (special case z2 = 0 is handled later) // ftmp4 = z2^3*y1 p224_felem_assign(ftmp4, y1); // ftmp2 = z2^2*x1 p224_felem_assign(ftmp2, x1); } // ftmp = z1^2 p224_felem_square(tmp, z1); p224_felem_reduce(ftmp, tmp); // ftmp3 = z1^3 p224_felem_mul(tmp, ftmp, z1); p224_felem_reduce(ftmp3, tmp); // tmp = z1^3*y2 p224_felem_mul(tmp, ftmp3, y2); // tmp[i] < 4 * 2^57 * 2^57 = 2^116 // ftmp3 = z1^3*y2 - z2^3*y1 p224_felem_diff_128_64(tmp, ftmp4); // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(ftmp3, tmp); // tmp = z1^2*x2 p224_felem_mul(tmp, ftmp, x2); // tmp[i] < 4 * 2^57 * 2^57 = 2^116 // ftmp = z1^2*x2 - z2^2*x1 p224_felem_diff_128_64(tmp, ftmp2); // tmp[i] < 2^116 + 2^64 + 8 < 2^117 p224_felem_reduce(ftmp, tmp); // The formulae are incorrect if the points are equal, so we check for this // and do doubling if this happens. x_equal = p224_felem_is_zero(ftmp); y_equal = p224_felem_is_zero(ftmp3); z1_is_zero = p224_felem_is_zero(z1); z2_is_zero = p224_felem_is_zero(z2); // In affine coordinates, (X_1, Y_1) == (X_2, Y_2) p224_limb is_nontrivial_double = x_equal & y_equal & (1 - z1_is_zero) & (1 - z2_is_zero); if (constant_time_declassify_w(is_nontrivial_double)) { p224_point_double(x3, y3, z3, x1, y1, z1); return; } // ftmp5 = z1*z2 if (!mixed) { p224_felem_mul(tmp, z1, z2); p224_felem_reduce(ftmp5, tmp); } else { // special case z2 = 0 is handled later p224_felem_assign(ftmp5, z1); } // z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) p224_felem_mul(tmp, ftmp, ftmp5); p224_felem_reduce(z_out, tmp); // ftmp = (z1^2*x2 - z2^2*x1)^2 p224_felem_assign(ftmp5, ftmp); p224_felem_square(tmp, ftmp); p224_felem_reduce(ftmp, tmp); // ftmp5 = (z1^2*x2 - z2^2*x1)^3 p224_felem_mul(tmp, ftmp, ftmp5); p224_felem_reduce(ftmp5, tmp); // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 p224_felem_mul(tmp, ftmp2, ftmp); p224_felem_reduce(ftmp2, tmp); // tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 p224_felem_mul(tmp, ftmp4, ftmp5); // tmp[i] < 4 * 2^57 * 2^57 = 2^116 // tmp2 = (z1^3*y2 - z2^3*y1)^2 p224_felem_square(tmp2, ftmp3); // tmp2[i] < 4 * 2^57 * 2^57 < 2^116 // tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 p224_felem_diff_128_64(tmp2, ftmp5); // tmp2[i] < 2^116 + 2^64 + 8 < 2^117 // ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 p224_felem_assign(ftmp5, ftmp2); p224_felem_scalar(ftmp5, 2); // ftmp5[i] < 2 * 2^57 = 2^58 /* x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 - 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ p224_felem_diff_128_64(tmp2, ftmp5); // tmp2[i] < 2^117 + 2^64 + 8 < 2^118 p224_felem_reduce(x_out, tmp2); // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out p224_felem_diff(ftmp2, x_out); // ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 // tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) p224_felem_mul(tmp2, ftmp3, ftmp2); // tmp2[i] < 4 * 2^57 * 2^59 = 2^118 /* y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) - z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ p224_widefelem_diff(tmp2, tmp); // tmp2[i] < 2^118 + 2^120 < 2^121 p224_felem_reduce(y_out, tmp2); // the result (x_out, y_out, z_out) is incorrect if one of the inputs is // the point at infinity, so we need to check for this separately // if point 1 is at infinity, copy point 2 to output, and vice versa p224_copy_conditional(x_out, x2, z1_is_zero); p224_copy_conditional(x_out, x1, z2_is_zero); p224_copy_conditional(y_out, y2, z1_is_zero); p224_copy_conditional(y_out, y1, z2_is_zero); p224_copy_conditional(z_out, z2, z1_is_zero); p224_copy_conditional(z_out, z1, z2_is_zero); p224_felem_assign(x3, x_out); p224_felem_assign(y3, y_out); p224_felem_assign(z3, z_out); } // p224_select_point selects the |idx|th point from a precomputation table and // copies it to out. static void p224_select_point(const uint64_t idx, size_t size, const p224_felem pre_comp[/*size*/][3], p224_felem out[3]) { p224_limb *outlimbs = &out[0][0]; OPENSSL_memset(outlimbs, 0, 3 * sizeof(p224_felem)); for (size_t i = 0; i < size; i++) { const p224_limb *inlimbs = &pre_comp[i][0][0]; static_assert(sizeof(uint64_t) <= sizeof(crypto_word_t), "crypto_word_t too small"); static_assert(sizeof(size_t) <= sizeof(crypto_word_t), "crypto_word_t too small"); // Without a value barrier, Clang adds a branch here. uint64_t mask = value_barrier_w(constant_time_eq_w(i, idx)); for (size_t j = 0; j < 4 * 3; j++) { outlimbs[j] |= inlimbs[j] & mask; } } } // p224_get_bit returns the |i|th bit in |in|. static crypto_word_t p224_get_bit(const EC_SCALAR *in, size_t i) { if (i >= 224) { return 0; } static_assert(sizeof(in->words[0]) == 8, "BN_ULONG is not 64-bit"); return (in->words[i >> 6] >> (i & 63)) & 1; } // Takes the Jacobian coordinates (X, Y, Z) of a point and returns // (X', Y') = (X/Z^2, Y/Z^3) static int ec_GFp_nistp224_point_get_affine_coordinates( const EC_GROUP *group, const EC_JACOBIAN *point, EC_FELEM *x, EC_FELEM *y) { if (constant_time_declassify_int( ec_GFp_simple_is_at_infinity(group, point))) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } p224_felem z1, z2; p224_widefelem tmp; p224_generic_to_felem(z1, &point->Z); p224_felem_inv(z2, z1); p224_felem_square(tmp, z2); p224_felem_reduce(z1, tmp); if (x != NULL) { p224_felem x_in, x_out; p224_generic_to_felem(x_in, &point->X); p224_felem_mul(tmp, x_in, z1); p224_felem_reduce(x_out, tmp); p224_felem_to_generic(x, x_out); } if (y != NULL) { p224_felem y_in, y_out; p224_generic_to_felem(y_in, &point->Y); p224_felem_mul(tmp, z1, z2); p224_felem_reduce(z1, tmp); p224_felem_mul(tmp, y_in, z1); p224_felem_reduce(y_out, tmp); p224_felem_to_generic(y, y_out); } return 1; } static void ec_GFp_nistp224_add(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a, const EC_JACOBIAN *b) { p224_felem x1, y1, z1, x2, y2, z2; p224_generic_to_felem(x1, &a->X); p224_generic_to_felem(y1, &a->Y); p224_generic_to_felem(z1, &a->Z); p224_generic_to_felem(x2, &b->X); p224_generic_to_felem(y2, &b->Y); p224_generic_to_felem(z2, &b->Z); p224_point_add(x1, y1, z1, x1, y1, z1, 0 /* both Jacobian */, x2, y2, z2); // The outputs are already reduced, but still need to be contracted. p224_felem_to_generic(&r->X, x1); p224_felem_to_generic(&r->Y, y1); p224_felem_to_generic(&r->Z, z1); } static void ec_GFp_nistp224_dbl(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a) { p224_felem x, y, z; p224_generic_to_felem(x, &a->X); p224_generic_to_felem(y, &a->Y); p224_generic_to_felem(z, &a->Z); p224_point_double(x, y, z, x, y, z); // The outputs are already reduced, but still need to be contracted. p224_felem_to_generic(&r->X, x); p224_felem_to_generic(&r->Y, y); p224_felem_to_generic(&r->Z, z); } static void ec_GFp_nistp224_make_precomp(p224_felem out[17][3], const EC_JACOBIAN *p) { OPENSSL_memset(out[0], 0, sizeof(p224_felem) * 3); p224_generic_to_felem(out[1][0], &p->X); p224_generic_to_felem(out[1][1], &p->Y); p224_generic_to_felem(out[1][2], &p->Z); for (size_t j = 2; j <= 16; ++j) { if (j & 1) { p224_point_add(out[j][0], out[j][1], out[j][2], out[1][0], out[1][1], out[1][2], 0, out[j - 1][0], out[j - 1][1], out[j - 1][2]); } else { p224_point_double(out[j][0], out[j][1], out[j][2], out[j / 2][0], out[j / 2][1], out[j / 2][2]); } } } static void ec_GFp_nistp224_point_mul(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar) { p224_felem p_pre_comp[17][3]; ec_GFp_nistp224_make_precomp(p_pre_comp, p); // Set nq to the point at infinity. p224_felem nq[3], tmp[4]; OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem)); int skip = 1; // Save two point operations in the first round. for (size_t i = 220; i < 221; i--) { if (!skip) { p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } // Add every 5 doublings. if (i % 5 == 0) { crypto_word_t bits = p224_get_bit(scalar, i + 4) << 5; bits |= p224_get_bit(scalar, i + 3) << 4; bits |= p224_get_bit(scalar, i + 2) << 3; bits |= p224_get_bit(scalar, i + 1) << 2; bits |= p224_get_bit(scalar, i) << 1; bits |= p224_get_bit(scalar, i - 1); crypto_word_t sign, digit; ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); // Select the point to add or subtract. p224_select_point(digit, 17, (const p224_felem(*)[3])p_pre_comp, tmp); p224_felem_neg(tmp[3], tmp[1]); // (X, -Y, Z) is the negative point p224_copy_conditional(tmp[1], tmp[3], sign); if (!skip) { p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 0 /* mixed */, tmp[0], tmp[1], tmp[2]); } else { OPENSSL_memcpy(nq, tmp, 3 * sizeof(p224_felem)); skip = 0; } } } // Reduce the output to its unique minimal representation. p224_felem_to_generic(&r->X, nq[0]); p224_felem_to_generic(&r->Y, nq[1]); p224_felem_to_generic(&r->Z, nq[2]); } static void ec_GFp_nistp224_point_mul_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar) { // Set nq to the point at infinity. p224_felem nq[3], tmp[3]; OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem)); int skip = 1; // Save two point operations in the first round. for (size_t i = 27; i < 28; i--) { // double if (!skip) { p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } // First, look 28 bits upwards. crypto_word_t bits = p224_get_bit(scalar, i + 196) << 3; bits |= p224_get_bit(scalar, i + 140) << 2; bits |= p224_get_bit(scalar, i + 84) << 1; bits |= p224_get_bit(scalar, i + 28); // Select the point to add, in constant time. p224_select_point(bits, 16, g_p224_pre_comp[1], tmp); if (!skip) { p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } else { OPENSSL_memcpy(nq, tmp, 3 * sizeof(p224_felem)); skip = 0; } // Second, look at the current position/ bits = p224_get_bit(scalar, i + 168) << 3; bits |= p224_get_bit(scalar, i + 112) << 2; bits |= p224_get_bit(scalar, i + 56) << 1; bits |= p224_get_bit(scalar, i); // Select the point to add, in constant time. p224_select_point(bits, 16, g_p224_pre_comp[0], tmp); p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } // Reduce the output to its unique minimal representation. p224_felem_to_generic(&r->X, nq[0]); p224_felem_to_generic(&r->Y, nq[1]); p224_felem_to_generic(&r->Z, nq[2]); } static void ec_GFp_nistp224_point_mul_public(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar) { // TODO(davidben): If P-224 ECDSA verify performance ever matters, using // |ec_compute_wNAF| for |p_scalar| would likely be an easy improvement. p224_felem p_pre_comp[17][3]; ec_GFp_nistp224_make_precomp(p_pre_comp, p); // Set nq to the point at infinity. p224_felem nq[3], tmp[3]; OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem)); // Loop over both scalars msb-to-lsb, interleaving additions of multiples of // the generator (two in each of the last 28 rounds) and additions of p (every // 5th round). int skip = 1; // Save two point operations in the first round. for (size_t i = 220; i < 221; i--) { if (!skip) { p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } // Add multiples of the generator. if (i <= 27) { // First, look 28 bits upwards. crypto_word_t bits = p224_get_bit(g_scalar, i + 196) << 3; bits |= p224_get_bit(g_scalar, i + 140) << 2; bits |= p224_get_bit(g_scalar, i + 84) << 1; bits |= p224_get_bit(g_scalar, i + 28); size_t index = (size_t)bits; p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, g_p224_pre_comp[1][index][0], g_p224_pre_comp[1][index][1], g_p224_pre_comp[1][index][2]); assert(!skip); // Second, look at the current position. bits = p224_get_bit(g_scalar, i + 168) << 3; bits |= p224_get_bit(g_scalar, i + 112) << 2; bits |= p224_get_bit(g_scalar, i + 56) << 1; bits |= p224_get_bit(g_scalar, i); index = (size_t)bits; p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, g_p224_pre_comp[0][index][0], g_p224_pre_comp[0][index][1], g_p224_pre_comp[0][index][2]); } // Incorporate |p_scalar| every 5 doublings. if (i % 5 == 0) { crypto_word_t bits = p224_get_bit(p_scalar, i + 4) << 5; bits |= p224_get_bit(p_scalar, i + 3) << 4; bits |= p224_get_bit(p_scalar, i + 2) << 3; bits |= p224_get_bit(p_scalar, i + 1) << 2; bits |= p224_get_bit(p_scalar, i) << 1; bits |= p224_get_bit(p_scalar, i - 1); crypto_word_t sign, digit; ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); // Select the point to add or subtract. OPENSSL_memcpy(tmp, p_pre_comp[digit], 3 * sizeof(p224_felem)); if (sign) { p224_felem_neg(tmp[1], tmp[1]); // (X, -Y, Z) is the negative point } if (!skip) { p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 0 /* mixed */, tmp[0], tmp[1], tmp[2]); } else { OPENSSL_memcpy(nq, tmp, 3 * sizeof(p224_felem)); skip = 0; } } } // Reduce the output to its unique minimal representation. p224_felem_to_generic(&r->X, nq[0]); p224_felem_to_generic(&r->Y, nq[1]); p224_felem_to_generic(&r->Z, nq[2]); } static void ec_GFp_nistp224_felem_mul(const EC_GROUP *group, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) { p224_felem felem1, felem2; p224_widefelem wide; p224_generic_to_felem(felem1, a); p224_generic_to_felem(felem2, b); p224_felem_mul(wide, felem1, felem2); p224_felem_reduce(felem1, wide); p224_felem_to_generic(r, felem1); } static void ec_GFp_nistp224_felem_sqr(const EC_GROUP *group, EC_FELEM *r, const EC_FELEM *a) { p224_felem felem; p224_generic_to_felem(felem, a); p224_widefelem wide; p224_felem_square(wide, felem); p224_felem_reduce(felem, wide); p224_felem_to_generic(r, felem); } DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp224_method) { out->point_get_affine_coordinates = ec_GFp_nistp224_point_get_affine_coordinates; out->add = ec_GFp_nistp224_add; out->dbl = ec_GFp_nistp224_dbl; out->mul = ec_GFp_nistp224_point_mul; out->mul_base = ec_GFp_nistp224_point_mul_base; out->mul_public = ec_GFp_nistp224_point_mul_public; out->felem_mul = ec_GFp_nistp224_felem_mul; out->felem_sqr = ec_GFp_nistp224_felem_sqr; out->felem_to_bytes = ec_GFp_simple_felem_to_bytes; out->felem_from_bytes = ec_GFp_simple_felem_from_bytes; out->scalar_inv0_montgomery = ec_simple_scalar_inv0_montgomery; out->scalar_to_montgomery_inv_vartime = ec_simple_scalar_to_montgomery_inv_vartime; out->cmp_x_coordinate = ec_GFp_simple_cmp_x_coordinate; } #endif // BORINGSSL_HAS_UINT128 && !SMALL ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p256-nistz-table.h ================================================ /* * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2015, Intel Inc. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ // This is the precomputed constant time access table for the code in // p256-nistz.c, for the default generator. The table consists of 37 // subtables, each subtable contains 64 affine points. The affine points are // encoded as eight uint64's, four for the x coordinate and four for the y. // Both values are in little-endian order. There are 37 tables because a // signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37. // Within each table there are 64 values because the 6-bit wNAF value can take // 64 values, ignoring the sign bit, which is implemented by performing a // negation of the affine point when required. We would like to align it to 2MB // in order to increase the chances of using a large page but that appears to // lead to invalid ELF files being produced. // This file is generated by make_tables.go. alignas(4096) static const PRECOMP256_ROW ecp_nistz256_precomputed[37] = { {{{TOBN(0x79e730d4, 0x18a9143c), TOBN(0x75ba95fc, 0x5fedb601), TOBN(0x79fb732b, 0x77622510), TOBN(0x18905f76, 0xa53755c6)}, {TOBN(0xddf25357, 0xce95560a), TOBN(0x8b4ab8e4, 0xba19e45c), TOBN(0xd2e88688, 0xdd21f325), TOBN(0x8571ff18, 0x25885d85)}}, {{TOBN(0x850046d4, 0x10ddd64d), TOBN(0xaa6ae3c1, 0xa433827d), TOBN(0x73220503, 0x8d1490d9), TOBN(0xf6bb32e4, 0x3dcf3a3b)}, {TOBN(0x2f3648d3, 0x61bee1a5), TOBN(0x152cd7cb, 0xeb236ff8), TOBN(0x19a8fb0e, 0x92042dbe), TOBN(0x78c57751, 0x0a5b8a3b)}}, {{TOBN(0xffac3f90, 0x4eebc127), TOBN(0xb027f84a, 0x087d81fb), TOBN(0x66ad77dd, 0x87cbbc98), TOBN(0x26936a3f, 0xb6ff747e)}, {TOBN(0xb04c5c1f, 0xc983a7eb), TOBN(0x583e47ad, 0x0861fe1a), TOBN(0x78820831, 0x1a2ee98e), TOBN(0xd5f06a29, 0xe587cc07)}}, {{TOBN(0x74b0b50d, 0x46918dcc), TOBN(0x4650a6ed, 0xc623c173), TOBN(0x0cdaacac, 0xe8100af2), TOBN(0x577362f5, 0x41b0176b)}, {TOBN(0x2d96f24c, 0xe4cbaba6), TOBN(0x17628471, 0xfad6f447), TOBN(0x6b6c36de, 0xe5ddd22e), TOBN(0x84b14c39, 0x4c5ab863)}}, {{TOBN(0xbe1b8aae, 0xc45c61f5), TOBN(0x90ec649a, 0x94b9537d), TOBN(0x941cb5aa, 0xd076c20c), TOBN(0xc9079605, 0x890523c8)}, {TOBN(0xeb309b4a, 0xe7ba4f10), TOBN(0x73c568ef, 0xe5eb882b), TOBN(0x3540a987, 0x7e7a1f68), TOBN(0x73a076bb, 0x2dd1e916)}}, {{TOBN(0x40394737, 0x3e77664a), TOBN(0x55ae744f, 0x346cee3e), TOBN(0xd50a961a, 0x5b17a3ad), TOBN(0x13074b59, 0x54213673)}, {TOBN(0x93d36220, 0xd377e44b), TOBN(0x299c2b53, 0xadff14b5), TOBN(0xf424d44c, 0xef639f11), TOBN(0xa4c9916d, 0x4a07f75f)}}, {{TOBN(0x0746354e, 0xa0173b4f), TOBN(0x2bd20213, 0xd23c00f7), TOBN(0xf43eaab5, 0x0c23bb08), TOBN(0x13ba5119, 0xc3123e03)}, {TOBN(0x2847d030, 0x3f5b9d4d), TOBN(0x6742f2f2, 0x5da67bdd), TOBN(0xef933bdc, 0x77c94195), TOBN(0xeaedd915, 0x6e240867)}}, {{TOBN(0x27f14cd1, 0x9499a78f), TOBN(0x462ab5c5, 0x6f9b3455), TOBN(0x8f90f02a, 0xf02cfc6b), TOBN(0xb763891e, 0xb265230d)}, {TOBN(0xf59da3a9, 0x532d4977), TOBN(0x21e3327d, 0xcf9eba15), TOBN(0x123c7b84, 0xbe60bbf0), TOBN(0x56ec12f2, 0x7706df76)}}, {{TOBN(0x75c96e8f, 0x264e20e8), TOBN(0xabe6bfed, 0x59a7a841), TOBN(0x2cc09c04, 0x44c8eb00), TOBN(0xe05b3080, 0xf0c4e16b)}, {TOBN(0x1eb7777a, 0xa45f3314), TOBN(0x56af7bed, 0xce5d45e3), TOBN(0x2b6e019a, 0x88b12f1a), TOBN(0x086659cd, 0xfd835f9b)}}, {{TOBN(0x2c18dbd1, 0x9dc21ec8), TOBN(0x98f9868a, 0x0fcf8139), TOBN(0x737d2cd6, 0x48250b49), TOBN(0xcc61c947, 0x24b3428f)}, {TOBN(0x0c2b4078, 0x80dd9e76), TOBN(0xc43a8991, 0x383fbe08), TOBN(0x5f7d2d65, 0x779be5d2), TOBN(0x78719a54, 0xeb3b4ab5)}}, {{TOBN(0xea7d260a, 0x6245e404), TOBN(0x9de40795, 0x6e7fdfe0), TOBN(0x1ff3a415, 0x8dac1ab5), TOBN(0x3e7090f1, 0x649c9073)}, {TOBN(0x1a768561, 0x2b944e88), TOBN(0x250f939e, 0xe57f61c8), TOBN(0x0c0daa89, 0x1ead643d), TOBN(0x68930023, 0xe125b88e)}}, {{TOBN(0x04b71aa7, 0xd2697768), TOBN(0xabdedef5, 0xca345a33), TOBN(0x2409d29d, 0xee37385e), TOBN(0x4ee1df77, 0xcb83e156)}, {TOBN(0x0cac12d9, 0x1cbb5b43), TOBN(0x170ed2f6, 0xca895637), TOBN(0x28228cfa, 0x8ade6d66), TOBN(0x7ff57c95, 0x53238aca)}}, {{TOBN(0xccc42563, 0x4b2ed709), TOBN(0x0e356769, 0x856fd30d), TOBN(0xbcbcd43f, 0x559e9811), TOBN(0x738477ac, 0x5395b759)}, {TOBN(0x35752b90, 0xc00ee17f), TOBN(0x68748390, 0x742ed2e3), TOBN(0x7cd06422, 0xbd1f5bc1), TOBN(0xfbc08769, 0xc9e7b797)}}, {{TOBN(0xa242a35b, 0xb0cf664a), TOBN(0x126e48f7, 0x7f9707e3), TOBN(0x1717bf54, 0xc6832660), TOBN(0xfaae7332, 0xfd12c72e)}, {TOBN(0x27b52db7, 0x995d586b), TOBN(0xbe29569e, 0x832237c2), TOBN(0xe8e4193e, 0x2a65e7db), TOBN(0x152706dc, 0x2eaa1bbb)}}, {{TOBN(0x72bcd8b7, 0xbc60055b), TOBN(0x03cc23ee, 0x56e27e4b), TOBN(0xee337424, 0xe4819370), TOBN(0xe2aa0e43, 0x0ad3da09)}, {TOBN(0x40b8524f, 0x6383c45d), TOBN(0xd7663554, 0x42a41b25), TOBN(0x64efa6de, 0x778a4797), TOBN(0x2042170a, 0x7079adf4)}}, {{TOBN(0x808b0b65, 0x0bc6fb80), TOBN(0x5882e075, 0x3ffe2e6b), TOBN(0xd5ef2f7c, 0x2c83f549), TOBN(0x54d63c80, 0x9103b723)}, {TOBN(0xf2f11bd6, 0x52a23f9b), TOBN(0x3670c319, 0x4b0b6587), TOBN(0x55c4623b, 0xb1580e9e), TOBN(0x64edf7b2, 0x01efe220)}}, {{TOBN(0x97091dcb, 0xd53c5c9d), TOBN(0xf17624b6, 0xac0a177b), TOBN(0xb0f13975, 0x2cfe2dff), TOBN(0xc1a35c0a, 0x6c7a574e)}, {TOBN(0x227d3146, 0x93e79987), TOBN(0x0575bf30, 0xe89cb80e), TOBN(0x2f4e247f, 0x0d1883bb), TOBN(0xebd51226, 0x3274c3d0)}}, {{TOBN(0x5f3e51c8, 0x56ada97a), TOBN(0x4afc964d, 0x8f8b403e), TOBN(0xa6f247ab, 0x412e2979), TOBN(0x675abd1b, 0x6f80ebda)}, {TOBN(0x66a2bd72, 0x5e485a1d), TOBN(0x4b2a5caf, 0x8f4f0b3c), TOBN(0x2626927f, 0x1b847bba), TOBN(0x6c6fc7d9, 0x0502394d)}}, {{TOBN(0xfea912ba, 0xa5659ae8), TOBN(0x68363aba, 0x25e1a16e), TOBN(0xb8842277, 0x752c41ac), TOBN(0xfe545c28, 0x2897c3fc)}, {TOBN(0x2d36e9e7, 0xdc4c696b), TOBN(0x5806244a, 0xfba977c5), TOBN(0x85665e9b, 0xe39508c1), TOBN(0xf720ee25, 0x6d12597b)}}, {{TOBN(0x8a979129, 0xd2337a31), TOBN(0x5916868f, 0x0f862bdc), TOBN(0x048099d9, 0x5dd283ba), TOBN(0xe2d1eeb6, 0xfe5bfb4e)}, {TOBN(0x82ef1c41, 0x7884005d), TOBN(0xa2d4ec17, 0xffffcbae), TOBN(0x9161c53f, 0x8aa95e66), TOBN(0x5ee104e1, 0xc5fee0d0)}}, {{TOBN(0x562e4cec, 0xc135b208), TOBN(0x74e1b265, 0x4783f47d), TOBN(0x6d2a506c, 0x5a3f3b30), TOBN(0xecead9f4, 0xc16762fc)}, {TOBN(0xf29dd4b2, 0xe286e5b9), TOBN(0x1b0fadc0, 0x83bb3c61), TOBN(0x7a75023e, 0x7fac29a4), TOBN(0xc086d5f1, 0xc9477fa3)}}, {{TOBN(0x0fc61135, 0x2f6f3076), TOBN(0xc99ffa23, 0xe3912a9a), TOBN(0x6a0b0685, 0xd2f8ba3d), TOBN(0xfdc777e8, 0xe93358a4)}, {TOBN(0x94a787bb, 0x35415f04), TOBN(0x640c2d6a, 0x4d23fea4), TOBN(0x9de917da, 0x153a35b5), TOBN(0x793e8d07, 0x5d5cd074)}}, {{TOBN(0xf4f87653, 0x2de45068), TOBN(0x37c7a7e8, 0x9e2e1f6e), TOBN(0xd0825fa2, 0xa3584069), TOBN(0xaf2cea7c, 0x1727bf42)}, {TOBN(0x0360a4fb, 0x9e4785a9), TOBN(0xe5fda49c, 0x27299f4a), TOBN(0x48068e13, 0x71ac2f71), TOBN(0x83d0687b, 0x9077666f)}}, {{TOBN(0x6d3883b2, 0x15d02819), TOBN(0x6d0d7550, 0x40dd9a35), TOBN(0x61d7cbf9, 0x1d2b469f), TOBN(0xf97b232f, 0x2efc3115)}, {TOBN(0xa551d750, 0xb24bcbc7), TOBN(0x11ea4949, 0x88a1e356), TOBN(0x7669f031, 0x93cb7501), TOBN(0x595dc55e, 0xca737b8a)}}, {{TOBN(0xa4a319ac, 0xd837879f), TOBN(0x6fc1b49e, 0xed6b67b0), TOBN(0xe3959933, 0x32f1f3af), TOBN(0x966742eb, 0x65432a2e)}, {TOBN(0x4b8dc9fe, 0xb4966228), TOBN(0x96cc6312, 0x43f43950), TOBN(0x12068859, 0xc9b731ee), TOBN(0x7b948dc3, 0x56f79968)}}, {{TOBN(0x61e4ad32, 0xed1f8008), TOBN(0xe6c9267a, 0xd8b17538), TOBN(0x1ac7c5eb, 0x857ff6fb), TOBN(0x994baaa8, 0x55f2fb10)}, {TOBN(0x84cf14e1, 0x1d248018), TOBN(0x5a39898b, 0x628ac508), TOBN(0x14fde97b, 0x5fa944f5), TOBN(0xed178030, 0xd12e5ac7)}}, {{TOBN(0x042c2af4, 0x97e2feb4), TOBN(0xd36a42d7, 0xaebf7313), TOBN(0x49d2c9eb, 0x084ffdd7), TOBN(0x9f8aa54b, 0x2ef7c76a)}, {TOBN(0x9200b7ba, 0x09895e70), TOBN(0x3bd0c66f, 0xddb7fb58), TOBN(0x2d97d108, 0x78eb4cbb), TOBN(0x2d431068, 0xd84bde31)}}, {{TOBN(0x4b523eb7, 0x172ccd1f), TOBN(0x7323cb28, 0x30a6a892), TOBN(0x97082ec0, 0xcfe153eb), TOBN(0xe97f6b6a, 0xf2aadb97)}, {TOBN(0x1d3d393e, 0xd1a83da1), TOBN(0xa6a7f9c7, 0x804b2a68), TOBN(0x4a688b48, 0x2d0cb71e), TOBN(0xa9b4cc5f, 0x40585278)}}, {{TOBN(0x5e5db46a, 0xcb66e132), TOBN(0xf1be963a, 0x0d925880), TOBN(0x944a7027, 0x0317b9e2), TOBN(0xe266f959, 0x48603d48)}, {TOBN(0x98db6673, 0x5c208899), TOBN(0x90472447, 0xa2fb18a3), TOBN(0x8a966939, 0x777c619f), TOBN(0x3798142a, 0x2a3be21b)}}, {{TOBN(0xb4241cb1, 0x3298b343), TOBN(0xa3a14e49, 0xb44f65a1), TOBN(0xc5f4d6cd, 0x3ac77acd), TOBN(0xd0288cb5, 0x52b6fc3c)}, {TOBN(0xd5cc8c2f, 0x1c040abc), TOBN(0xb675511e, 0x06bf9b4a), TOBN(0xd667da37, 0x9b3aa441), TOBN(0x460d45ce, 0x51601f72)}}, {{TOBN(0xe2f73c69, 0x6755ff89), TOBN(0xdd3cf7e7, 0x473017e6), TOBN(0x8ef5689d, 0x3cf7600d), TOBN(0x948dc4f8, 0xb1fc87b4)}, {TOBN(0xd9e9fe81, 0x4ea53299), TOBN(0x2d921ca2, 0x98eb6028), TOBN(0xfaecedfd, 0x0c9803fc), TOBN(0xf38ae891, 0x4d7b4745)}}, {{TOBN(0xd8c5fccf, 0xc5e3a3d8), TOBN(0xbefd904c, 0x4079dfbf), TOBN(0xbc6d6a58, 0xfead0197), TOBN(0x39227077, 0x695532a4)}, {TOBN(0x09e23e6d, 0xdbef42f5), TOBN(0x7e449b64, 0x480a9908), TOBN(0x7b969c1a, 0xad9a2e40), TOBN(0x6231d792, 0x9591c2a4)}}, {{TOBN(0x87151456, 0x0f664534), TOBN(0x85ceae7c, 0x4b68f103), TOBN(0xac09c4ae, 0x65578ab9), TOBN(0x33ec6868, 0xf044b10c)}, {TOBN(0x6ac4832b, 0x3a8ec1f1), TOBN(0x5509d128, 0x5847d5ef), TOBN(0xf909604f, 0x763f1574), TOBN(0xb16c4303, 0xc32f63c4)}}, {{TOBN(0xb6ab2014, 0x7ca23cd3), TOBN(0xcaa7a5c6, 0xa391849d), TOBN(0x5b0673a3, 0x75678d94), TOBN(0xc982ddd4, 0xdd303e64)}, {TOBN(0xfd7b000b, 0x5db6f971), TOBN(0xbba2cb1f, 0x6f876f92), TOBN(0xc77332a3, 0x3c569426), TOBN(0xa159100c, 0x570d74f8)}}, {{TOBN(0xfd16847f, 0xdec67ef5), TOBN(0x742ee464, 0x233e76b7), TOBN(0x0b8e4134, 0xefc2b4c8), TOBN(0xca640b86, 0x42a3e521)}, {TOBN(0x653a0190, 0x8ceb6aa9), TOBN(0x313c300c, 0x547852d5), TOBN(0x24e4ab12, 0x6b237af7), TOBN(0x2ba90162, 0x8bb47af8)}}, {{TOBN(0x3d5e58d6, 0xa8219bb7), TOBN(0xc691d0bd, 0x1b06c57f), TOBN(0x0ae4cb10, 0xd257576e), TOBN(0x3569656c, 0xd54a3dc3)}, {TOBN(0xe5ebaebd, 0x94cda03a), TOBN(0x934e82d3, 0x162bfe13), TOBN(0x450ac0ba, 0xe251a0c6), TOBN(0x480b9e11, 0xdd6da526)}}, {{TOBN(0x00467bc5, 0x8cce08b5), TOBN(0xb636458c, 0x7f178d55), TOBN(0xc5748bae, 0xa677d806), TOBN(0x2763a387, 0xdfa394eb)}, {TOBN(0xa12b448a, 0x7d3cebb6), TOBN(0xe7adda3e, 0x6f20d850), TOBN(0xf63ebce5, 0x1558462c), TOBN(0x58b36143, 0x620088a8)}}, {{TOBN(0x8a2cc3ca, 0x4d63c0ee), TOBN(0x51233117, 0x0fe948ce), TOBN(0x7463fd85, 0x222ef33b), TOBN(0xadf0c7dc, 0x7c603d6c)}, {TOBN(0x0ec32d3b, 0xfe7765e5), TOBN(0xccaab359, 0xbf380409), TOBN(0xbdaa84d6, 0x8e59319c), TOBN(0xd9a4c280, 0x9c80c34d)}}, {{TOBN(0xa9d89488, 0xa059c142), TOBN(0x6f5ae714, 0xff0b9346), TOBN(0x068f237d, 0x16fb3664), TOBN(0x5853e4c4, 0x363186ac)}, {TOBN(0xe2d87d23, 0x63c52f98), TOBN(0x2ec4a766, 0x81828876), TOBN(0x47b864fa, 0xe14e7b1c), TOBN(0x0c0bc0e5, 0x69192408)}}, {{TOBN(0xe4d7681d, 0xb82e9f3e), TOBN(0x83200f0b, 0xdf25e13c), TOBN(0x8909984c, 0x66f27280), TOBN(0x462d7b00, 0x75f73227)}, {TOBN(0xd90ba188, 0xf2651798), TOBN(0x74c6e18c, 0x36ab1c34), TOBN(0xab256ea3, 0x5ef54359), TOBN(0x03466612, 0xd1aa702f)}}, {{TOBN(0x624d6049, 0x2ed22e91), TOBN(0x6fdfe0b5, 0x6f072822), TOBN(0xeeca1115, 0x39ce2271), TOBN(0x98100a4f, 0xdb01614f)}, {TOBN(0xb6b0daa2, 0xa35c628f), TOBN(0xb6f94d2e, 0xc87e9a47), TOBN(0xc6773259, 0x1d57d9ce), TOBN(0xf70bfeec, 0x03884a7b)}}, {{TOBN(0x5fb35ccf, 0xed2bad01), TOBN(0xa155cbe3, 0x1da6a5c7), TOBN(0xc2e2594c, 0x30a92f8f), TOBN(0x649c89ce, 0x5bfafe43)}, {TOBN(0xd158667d, 0xe9ff257a), TOBN(0x9b359611, 0xf32c50ae), TOBN(0x4b00b20b, 0x906014cf), TOBN(0xf3a8cfe3, 0x89bc7d3d)}}, {{TOBN(0x4ff23ffd, 0x248a7d06), TOBN(0x80c5bfb4, 0x878873fa), TOBN(0xb7d9ad90, 0x05745981), TOBN(0x179c85db, 0x3db01994)}, {TOBN(0xba41b062, 0x61a6966c), TOBN(0x4d82d052, 0xeadce5a8), TOBN(0x9e91cd3b, 0xa5e6a318), TOBN(0x47795f4f, 0x95b2dda0)}}, {{TOBN(0xecfd7c1f, 0xd55a897c), TOBN(0x009194ab, 0xb29110fb), TOBN(0x5f0e2046, 0xe381d3b0), TOBN(0x5f3425f6, 0xa98dd291)}, {TOBN(0xbfa06687, 0x730d50da), TOBN(0x0423446c, 0x4b083b7f), TOBN(0x397a247d, 0xd69d3417), TOBN(0xeb629f90, 0x387ba42a)}}, {{TOBN(0x1ee426cc, 0xd5cd79bf), TOBN(0x0032940b, 0x946c6e18), TOBN(0x1b1e8ae0, 0x57477f58), TOBN(0xe94f7d34, 0x6d823278)}, {TOBN(0xc747cb96, 0x782ba21a), TOBN(0xc5254469, 0xf72b33a5), TOBN(0x772ef6de, 0xc7f80c81), TOBN(0xd73acbfe, 0x2cd9e6b5)}}, {{TOBN(0x4075b5b1, 0x49ee90d9), TOBN(0x785c339a, 0xa06e9eba), TOBN(0xa1030d5b, 0xabf825e0), TOBN(0xcec684c3, 0xa42931dc)}, {TOBN(0x42ab62c9, 0xc1586e63), TOBN(0x45431d66, 0x5ab43f2b), TOBN(0x57c8b2c0, 0x55f7835d), TOBN(0x033da338, 0xc1b7f865)}}, {{TOBN(0x283c7513, 0xcaa76097), TOBN(0x0a624fa9, 0x36c83906), TOBN(0x6b20afec, 0x715af2c7), TOBN(0x4b969974, 0xeba78bfd)}, {TOBN(0x220755cc, 0xd921d60e), TOBN(0x9b944e10, 0x7baeca13), TOBN(0x04819d51, 0x5ded93d4), TOBN(0x9bbff86e, 0x6dddfd27)}}, {{TOBN(0x6b344130, 0x77adc612), TOBN(0xa7496529, 0xbbd803a0), TOBN(0x1a1baaa7, 0x6d8805bd), TOBN(0xc8403902, 0x470343ad)}, {TOBN(0x39f59f66, 0x175adff1), TOBN(0x0b26d7fb, 0xb7d8c5b7), TOBN(0xa875f5ce, 0x529d75e3), TOBN(0x85efc7e9, 0x41325cc2)}}, {{TOBN(0x21950b42, 0x1ff6acd3), TOBN(0xffe70484, 0x53dc6909), TOBN(0xff4cd0b2, 0x28766127), TOBN(0xabdbe608, 0x4fb7db2b)}, {TOBN(0x837c9228, 0x5e1109e8), TOBN(0x26147d27, 0xf4645b5a), TOBN(0x4d78f592, 0xf7818ed8), TOBN(0xd394077e, 0xf247fa36)}}, {{TOBN(0x0fb9c2d0, 0x488c171a), TOBN(0xa78bfbaa, 0x13685278), TOBN(0xedfbe268, 0xd5b1fa6a), TOBN(0x0dceb8db, 0x2b7eaba7)}, {TOBN(0xbf9e8089, 0x9ae2b710), TOBN(0xefde7ae6, 0xa4449c96), TOBN(0x43b7716b, 0xcc143a46), TOBN(0xd7d34194, 0xc3628c13)}}, {{TOBN(0x508cec1c, 0x3b3f64c9), TOBN(0xe20bc0ba, 0x1e5edf3f), TOBN(0xda1deb85, 0x2f4318d4), TOBN(0xd20ebe0d, 0x5c3fa443)}, {TOBN(0x370b4ea7, 0x73241ea3), TOBN(0x61f1511c, 0x5e1a5f65), TOBN(0x99a5e23d, 0x82681c62), TOBN(0xd731e383, 0xa2f54c2d)}}, {{TOBN(0x2692f36e, 0x83445904), TOBN(0x2e0ec469, 0xaf45f9c0), TOBN(0x905a3201, 0xc67528b7), TOBN(0x88f77f34, 0xd0e5e542)}, {TOBN(0xf67a8d29, 0x5864687c), TOBN(0x23b92eae, 0x22df3562), TOBN(0x5c27014b, 0x9bbec39e), TOBN(0x7ef2f226, 0x9c0f0f8d)}}, {{TOBN(0x97359638, 0x546c4d8d), TOBN(0x5f9c3fc4, 0x92f24679), TOBN(0x912e8bed, 0xa8c8acd9), TOBN(0xec3a318d, 0x306634b0)}, {TOBN(0x80167f41, 0xc31cb264), TOBN(0x3db82f6f, 0x522113f2), TOBN(0xb155bcd2, 0xdcafe197), TOBN(0xfba1da59, 0x43465283)}}, {{TOBN(0xa0425b8e, 0xb212cf53), TOBN(0x4f2e512e, 0xf8557c5f), TOBN(0xc1286ff9, 0x25c4d56c), TOBN(0xbb8a0fea, 0xee26c851)}, {TOBN(0xc28f70d2, 0xe7d6107e), TOBN(0x7ee0c444, 0xe76265aa), TOBN(0x3df277a4, 0x1d1936b1), TOBN(0x1a556e3f, 0xea9595eb)}}, {{TOBN(0x258bbbf9, 0xe7305683), TOBN(0x31eea5bf, 0x07ef5be6), TOBN(0x0deb0e4a, 0x46c814c1), TOBN(0x5cee8449, 0xa7b730dd)}, {TOBN(0xeab495c5, 0xa0182bde), TOBN(0xee759f87, 0x9e27a6b4), TOBN(0xc2cf6a68, 0x80e518ca), TOBN(0x25e8013f, 0xf14cf3f4)}}, {{TOBN(0x8fc44140, 0x7e8d7a14), TOBN(0xbb1ff3ca, 0x9556f36a), TOBN(0x6a844385, 0x14600044), TOBN(0xba3f0c4a, 0x7451ae63)}, {TOBN(0xdfcac25b, 0x1f9af32a), TOBN(0x01e0db86, 0xb1f2214b), TOBN(0x4e9a5bc2, 0xa4b596ac), TOBN(0x83927681, 0x026c2c08)}}, {{TOBN(0x3ec832e7, 0x7acaca28), TOBN(0x1bfeea57, 0xc7385b29), TOBN(0x068212e3, 0xfd1eaf38), TOBN(0xc1329830, 0x6acf8ccc)}, {TOBN(0xb909f2db, 0x2aac9e59), TOBN(0x5748060d, 0xb661782a), TOBN(0xc5ab2632, 0xc79b7a01), TOBN(0xda44c6c6, 0x00017626)}}, {{TOBN(0xf26c00e8, 0xa7ea82f0), TOBN(0x99cac80d, 0xe4299aaf), TOBN(0xd66fe3b6, 0x7ed78be1), TOBN(0x305f725f, 0x648d02cd)}, {TOBN(0x33ed1bc4, 0x623fb21b), TOBN(0xfa70533e, 0x7a6319ad), TOBN(0x17ab562d, 0xbe5ffb3e), TOBN(0x06374994, 0x56674741)}}, {{TOBN(0x69d44ed6, 0x5c46aa8e), TOBN(0x2100d5d3, 0xa8d063d1), TOBN(0xcb9727ea, 0xa2d17c36), TOBN(0x4c2bab1b, 0x8add53b7)}, {TOBN(0xa084e90c, 0x15426704), TOBN(0x778afcd3, 0xa837ebea), TOBN(0x6651f701, 0x7ce477f8), TOBN(0xa0624998, 0x46fb7a8b)}}, {{TOBN(0xdc1e6828, 0xed8a6e19), TOBN(0x33fc2336, 0x4189d9c7), TOBN(0x026f8fe2, 0x671c39bc), TOBN(0xd40c4ccd, 0xbc6f9915)}, {TOBN(0xafa135bb, 0xf80e75ca), TOBN(0x12c651a0, 0x22adff2c), TOBN(0xc40a04bd, 0x4f51ad96), TOBN(0x04820109, 0xbbe4e832)}}, {{TOBN(0x3667eb1a, 0x7f4c04cc), TOBN(0x59556621, 0xa9404f84), TOBN(0x71cdf653, 0x7eceb50a), TOBN(0x994a44a6, 0x9b8335fa)}, {TOBN(0xd7faf819, 0xdbeb9b69), TOBN(0x473c5680, 0xeed4350d), TOBN(0xb6658466, 0xda44bba2), TOBN(0x0d1bc780, 0x872bdbf3)}}, {{TOBN(0xe535f175, 0xa1962f91), TOBN(0x6ed7e061, 0xed58f5a7), TOBN(0x177aa4c0, 0x2089a233), TOBN(0x0dbcb03a, 0xe539b413)}, {TOBN(0xe3dc424e, 0xbb32e38e), TOBN(0x6472e5ef, 0x6806701e), TOBN(0xdd47ff98, 0x814be9ee), TOBN(0x6b60cfff, 0x35ace009)}}, {{TOBN(0xb8d3d931, 0x9ff91fe5), TOBN(0x039c4800, 0xf0518eed), TOBN(0x95c37632, 0x9182cb26), TOBN(0x0763a434, 0x82fc568d)}, {TOBN(0x707c04d5, 0x383e76ba), TOBN(0xac98b930, 0x824e8197), TOBN(0x92bf7c8f, 0x91230de0), TOBN(0x90876a01, 0x40959b70)}}, {{TOBN(0xdb6d96f3, 0x05968b80), TOBN(0x380a0913, 0x089f73b9), TOBN(0x7da70b83, 0xc2c61e01), TOBN(0x95fb8394, 0x569b38c7)}, {TOBN(0x9a3c6512, 0x80edfe2f), TOBN(0x8f726bb9, 0x8faeaf82), TOBN(0x8010a4a0, 0x78424bf8), TOBN(0x29672044, 0x0e844970)}}}, {{{TOBN(0x63c5cb81, 0x7a2ad62a), TOBN(0x7ef2b6b9, 0xac62ff54), TOBN(0x3749bba4, 0xb3ad9db5), TOBN(0xad311f2c, 0x46d5a617)}, {TOBN(0xb77a8087, 0xc2ff3b6d), TOBN(0xb46feaf3, 0x367834ff), TOBN(0xf8aa266d, 0x75d6b138), TOBN(0xfa38d320, 0xec008188)}}, {{TOBN(0x486d8ffa, 0x696946fc), TOBN(0x50fbc6d8, 0xb9cba56d), TOBN(0x7e3d423e, 0x90f35a15), TOBN(0x7c3da195, 0xc0dd962c)}, {TOBN(0xe673fdb0, 0x3cfd5d8b), TOBN(0x0704b7c2, 0x889dfca5), TOBN(0xf6ce581f, 0xf52305aa), TOBN(0x399d49eb, 0x914d5e53)}}, {{TOBN(0x380a496d, 0x6ec293cd), TOBN(0x733dbda7, 0x8e7051f5), TOBN(0x037e388d, 0xb849140a), TOBN(0xee4b32b0, 0x5946dbf6)}, {TOBN(0xb1c4fda9, 0xcae368d1), TOBN(0x5001a7b0, 0xfdb0b2f3), TOBN(0x6df59374, 0x2e3ac46e), TOBN(0x4af675f2, 0x39b3e656)}}, {{TOBN(0x44e38110, 0x39949296), TOBN(0x5b63827b, 0x361db1b5), TOBN(0x3e5323ed, 0x206eaff5), TOBN(0x942370d2, 0xc21f4290)}, {TOBN(0xf2caaf2e, 0xe0d985a1), TOBN(0x192cc64b, 0x7239846d), TOBN(0x7c0b8f47, 0xae6312f8), TOBN(0x7dc61f91, 0x96620108)}}, {{TOBN(0xb830fb5b, 0xc2da7de9), TOBN(0xd0e643df, 0x0ff8d3be), TOBN(0x31ee77ba, 0x188a9641), TOBN(0x4e8aa3aa, 0xbcf6d502)}, {TOBN(0xf9fb6532, 0x9a49110f), TOBN(0xd18317f6, 0x2dd6b220), TOBN(0x7e3ced41, 0x52c3ea5a), TOBN(0x0d296a14, 0x7d579c4a)}}, {{TOBN(0x35d6a53e, 0xed4c3717), TOBN(0x9f8240cf, 0x3d0ed2a3), TOBN(0x8c0d4d05, 0xe5543aa5), TOBN(0x45d5bbfb, 0xdd33b4b4)}, {TOBN(0xfa04cc73, 0x137fd28e), TOBN(0x862ac6ef, 0xc73b3ffd), TOBN(0x403ff9f5, 0x31f51ef2), TOBN(0x34d5e0fc, 0xbc73f5a2)}}, {{TOBN(0xf2526820, 0x08913f4f), TOBN(0xea20ed61, 0xeac93d95), TOBN(0x51ed38b4, 0x6ca6b26c), TOBN(0x8662dcbc, 0xea4327b0)}, {TOBN(0x6daf295c, 0x725d2aaa), TOBN(0xbad2752f, 0x8e52dcda), TOBN(0x2210e721, 0x0b17dacc), TOBN(0xa37f7912, 0xd51e8232)}}, {{TOBN(0x4f7081e1, 0x44cc3add), TOBN(0xd5ffa1d6, 0x87be82cf), TOBN(0x89890b6c, 0x0edd6472), TOBN(0xada26e1a, 0x3ed17863)}, {TOBN(0x276f2715, 0x63483caa), TOBN(0xe6924cd9, 0x2f6077fd), TOBN(0x05a7fe98, 0x0a466e3c), TOBN(0xf1c794b0, 0xb1902d1f)}}, {{TOBN(0xe5213688, 0x82a8042c), TOBN(0xd931cfaf, 0xcd278298), TOBN(0x069a0ae0, 0xf597a740), TOBN(0x0adbb3f3, 0xeb59107c)}, {TOBN(0x983e951e, 0x5eaa8eb8), TOBN(0xe663a8b5, 0x11b48e78), TOBN(0x1631cc0d, 0x8a03f2c5), TOBN(0x7577c11e, 0x11e271e2)}}, {{TOBN(0x33b2385c, 0x08369a90), TOBN(0x2990c59b, 0x190eb4f8), TOBN(0x819a6145, 0xc68eac80), TOBN(0x7a786d62, 0x2ec4a014)}, {TOBN(0x33faadbe, 0x20ac3a8d), TOBN(0x31a21781, 0x5aba2d30), TOBN(0x209d2742, 0xdba4f565), TOBN(0xdb2ce9e3, 0x55aa0fbb)}}, {{TOBN(0x8cef334b, 0x168984df), TOBN(0xe81dce17, 0x33879638), TOBN(0xf6e6949c, 0x263720f0), TOBN(0x5c56feaf, 0xf593cbec)}, {TOBN(0x8bff5601, 0xfde58c84), TOBN(0x74e24117, 0x2eccb314), TOBN(0xbcf01b61, 0x4c9a8a78), TOBN(0xa233e35e, 0x544c9868)}}, {{TOBN(0xb3156bf3, 0x8bd7aff1), TOBN(0x1b5ee4cb, 0x1d81b146), TOBN(0x7ba1ac41, 0xd628a915), TOBN(0x8f3a8f9c, 0xfd89699e)}, {TOBN(0x7329b9c9, 0xa0748be7), TOBN(0x1d391c95, 0xa92e621f), TOBN(0xe51e6b21, 0x4d10a837), TOBN(0xd255f53a, 0x4947b435)}}, {{TOBN(0x07669e04, 0xf1788ee3), TOBN(0xc14f27af, 0xa86938a2), TOBN(0x8b47a334, 0xe93a01c0), TOBN(0xff627438, 0xd9366808)}, {TOBN(0x7a0985d8, 0xca2a5965), TOBN(0x3d9a5542, 0xd6e9b9b3), TOBN(0xc23eb80b, 0x4cf972e8), TOBN(0x5c1c33bb, 0x4fdf72fd)}}, {{TOBN(0x0c4a58d4, 0x74a86108), TOBN(0xf8048a8f, 0xee4c5d90), TOBN(0xe3c7c924, 0xe86d4c80), TOBN(0x28c889de, 0x056a1e60)}, {TOBN(0x57e2662e, 0xb214a040), TOBN(0xe8c48e98, 0x37e10347), TOBN(0x87742862, 0x80ac748a), TOBN(0xf1c24022, 0x186b06f2)}}, {{TOBN(0xac2dd4c3, 0x5f74040a), TOBN(0x409aeb71, 0xfceac957), TOBN(0x4fbad782, 0x55c4ec23), TOBN(0xb359ed61, 0x8a7b76ec)}, {TOBN(0x12744926, 0xed6f4a60), TOBN(0xe21e8d7f, 0x4b912de3), TOBN(0xe2575a59, 0xfc705a59), TOBN(0x72f1d4de, 0xed2dbc0e)}}, {{TOBN(0x3d2b24b9, 0xeb7926b8), TOBN(0xbff88cb3, 0xcdbe5509), TOBN(0xd0f399af, 0xe4dd640b), TOBN(0x3c5fe130, 0x2f76ed45)}, {TOBN(0x6f3562f4, 0x3764fb3d), TOBN(0x7b5af318, 0x3151b62d), TOBN(0xd5bd0bc7, 0xd79ce5f3), TOBN(0xfdaf6b20, 0xec66890f)}}, {{TOBN(0x735c67ec, 0x6063540c), TOBN(0x50b259c2, 0xe5f9cb8f), TOBN(0xb8734f9a, 0x3f99c6ab), TOBN(0xf8cc13d5, 0xa3a7bc85)}, {TOBN(0x80c1b305, 0xc5217659), TOBN(0xfe5364d4, 0x4ec12a54), TOBN(0xbd87045e, 0x681345fe), TOBN(0x7f8efeb1, 0x582f897f)}}, {{TOBN(0xe8cbf1e5, 0xd5923359), TOBN(0xdb0cea9d, 0x539b9fb0), TOBN(0x0c5b34cf, 0x49859b98), TOBN(0x5e583c56, 0xa4403cc6)}, {TOBN(0x11fc1a2d, 0xd48185b7), TOBN(0xc93fbc7e, 0x6e521787), TOBN(0x47e7a058, 0x05105b8b), TOBN(0x7b4d4d58, 0xdb8260c8)}}, {{TOBN(0xe33930b0, 0x46eb842a), TOBN(0x8e844a9a, 0x7bdae56d), TOBN(0x34ef3a9e, 0x13f7fdfc), TOBN(0xb3768f82, 0x636ca176)}, {TOBN(0x2821f4e0, 0x4e09e61c), TOBN(0x414dc3a1, 0xa0c7cddc), TOBN(0xd5379437, 0x54945fcd), TOBN(0x151b6eef, 0xb3555ff1)}}, {{TOBN(0xb31bd613, 0x6339c083), TOBN(0x39ff8155, 0xdfb64701), TOBN(0x7c3388d2, 0xe29604ab), TOBN(0x1e19084b, 0xa6b10442)}, {TOBN(0x17cf54c0, 0xeccd47ef), TOBN(0x89693385, 0x4a5dfb30), TOBN(0x69d023fb, 0x47daf9f6), TOBN(0x9222840b, 0x7d91d959)}}, {{TOBN(0x439108f5, 0x803bac62), TOBN(0x0b7dd91d, 0x379bd45f), TOBN(0xd651e827, 0xca63c581), TOBN(0x5c5d75f6, 0x509c104f)}, {TOBN(0x7d5fc738, 0x1f2dc308), TOBN(0x20faa7bf, 0xd98454be), TOBN(0x95374bee, 0xa517b031), TOBN(0xf036b9b1, 0x642692ac)}}, {{TOBN(0xc5106109, 0x39842194), TOBN(0xb7e2353e, 0x49d05295), TOBN(0xfc8c1d5c, 0xefb42ee0), TOBN(0xe04884eb, 0x08ce811c)}, {TOBN(0xf1f75d81, 0x7419f40e), TOBN(0x5b0ac162, 0xa995c241), TOBN(0x120921bb, 0xc4c55646), TOBN(0x713520c2, 0x8d33cf97)}}, {{TOBN(0xb4a65a5c, 0xe98c5100), TOBN(0x6cec871d, 0x2ddd0f5a), TOBN(0x251f0b7f, 0x9ba2e78b), TOBN(0x224a8434, 0xce3a2a5f)}, {TOBN(0x26827f61, 0x25f5c46f), TOBN(0x6a22bedc, 0x48545ec0), TOBN(0x25ae5fa0, 0xb1bb5cdc), TOBN(0xd693682f, 0xfcb9b98f)}}, {{TOBN(0x32027fe8, 0x91e5d7d3), TOBN(0xf14b7d17, 0x73a07678), TOBN(0xf88497b3, 0xc0dfdd61), TOBN(0xf7c2eec0, 0x2a8c4f48)}, {TOBN(0xaa5573f4, 0x3756e621), TOBN(0xc013a240, 0x1825b948), TOBN(0x1c03b345, 0x63878572), TOBN(0xa0472bea, 0x653a4184)}}, {{TOBN(0xf4222e27, 0x0ac69a80), TOBN(0x34096d25, 0xf51e54f6), TOBN(0x00a648cb, 0x8fffa591), TOBN(0x4e87acdc, 0x69b6527f)}, {TOBN(0x0575e037, 0xe285ccb4), TOBN(0x188089e4, 0x50ddcf52), TOBN(0xaa96c9a8, 0x870ff719), TOBN(0x74a56cd8, 0x1fc7e369)}}, {{TOBN(0x41d04ee2, 0x1726931a), TOBN(0x0bbbb2c8, 0x3660ecfd), TOBN(0xa6ef6de5, 0x24818e18), TOBN(0xe421cc51, 0xe7d57887)}, {TOBN(0xf127d208, 0xbea87be6), TOBN(0x16a475d3, 0xb1cdd682), TOBN(0x9db1b684, 0x439b63f7), TOBN(0x5359b3db, 0xf0f113b6)}}, {{TOBN(0xdfccf1de, 0x8bf06e31), TOBN(0x1fdf8f44, 0xdd383901), TOBN(0x10775cad, 0x5017e7d2), TOBN(0xdfc3a597, 0x58d11eef)}, {TOBN(0x6ec9c8a0, 0xb1ecff10), TOBN(0xee6ed6cc, 0x28400549), TOBN(0xb5ad7bae, 0x1b4f8d73), TOBN(0x61b4f11d, 0xe00aaab9)}}, {{TOBN(0x7b32d69b, 0xd4eff2d7), TOBN(0x88ae6771, 0x4288b60f), TOBN(0x159461b4, 0x37a1e723), TOBN(0x1f3d4789, 0x570aae8c)}, {TOBN(0x869118c0, 0x7f9871da), TOBN(0x35fbda78, 0xf635e278), TOBN(0x738f3641, 0xe1541dac), TOBN(0x6794b13a, 0xc0dae45f)}}, {{TOBN(0x065064ac, 0x09cc0917), TOBN(0x27c53729, 0xc68540fd), TOBN(0x0d2d4c8e, 0xef227671), TOBN(0xd23a9f80, 0xa1785a04)}, {TOBN(0x98c59528, 0x52650359), TOBN(0xfa09ad01, 0x74a1acad), TOBN(0x082d5a29, 0x0b55bf5c), TOBN(0xa40f1c67, 0x419b8084)}}, {{TOBN(0x3a5c752e, 0xdcc18770), TOBN(0x4baf1f2f, 0x8825c3a5), TOBN(0xebd63f74, 0x21b153ed), TOBN(0xa2383e47, 0xb2f64723)}, {TOBN(0xe7bf620a, 0x2646d19a), TOBN(0x56cb44ec, 0x03c83ffd), TOBN(0xaf7267c9, 0x4f6be9f1), TOBN(0x8b2dfd7b, 0xc06bb5e9)}}, {{TOBN(0xb87072f2, 0xa672c5c7), TOBN(0xeacb11c8, 0x0d53c5e2), TOBN(0x22dac29d, 0xff435932), TOBN(0x37bdb99d, 0x4408693c)}, {TOBN(0xf6e62fb6, 0x2899c20f), TOBN(0x3535d512, 0x447ece24), TOBN(0xfbdc6b88, 0xff577ce3), TOBN(0x726693bd, 0x190575f2)}}, {{TOBN(0x6772b0e5, 0xab4b35a2), TOBN(0x1d8b6001, 0xf5eeaacf), TOBN(0x728f7ce4, 0x795b9580), TOBN(0x4a20ed2a, 0x41fb81da)}, {TOBN(0x9f685cd4, 0x4fec01e6), TOBN(0x3ed7ddcc, 0xa7ff50ad), TOBN(0x460fd264, 0x0c2d97fd), TOBN(0x3a241426, 0xeb82f4f9)}}, {{TOBN(0x17d1df2c, 0x6a8ea820), TOBN(0xb2b50d3b, 0xf22cc254), TOBN(0x03856cba, 0xb7291426), TOBN(0x87fd26ae, 0x04f5ee39)}, {TOBN(0x9cb696cc, 0x02bee4ba), TOBN(0x53121804, 0x06820fd6), TOBN(0xa5dfc269, 0x0212e985), TOBN(0x666f7ffa, 0x160f9a09)}}, {{TOBN(0xc503cd33, 0xbccd9617), TOBN(0x365dede4, 0xba7730a3), TOBN(0x798c6355, 0x5ddb0786), TOBN(0xa6c3200e, 0xfc9cd3bc)}, {TOBN(0x060ffb2c, 0xe5e35efd), TOBN(0x99a4e25b, 0x5555a1c1), TOBN(0x11d95375, 0xf70b3751), TOBN(0x0a57354a, 0x160e1bf6)}}, {{TOBN(0xecb3ae4b, 0xf8e4b065), TOBN(0x07a834c4, 0x2e53022b), TOBN(0x1cd300b3, 0x8692ed96), TOBN(0x16a6f792, 0x61ee14ec)}, {TOBN(0x8f1063c6, 0x6a8649ed), TOBN(0xfbcdfcfe, 0x869f3e14), TOBN(0x2cfb97c1, 0x00a7b3ec), TOBN(0xcea49b3c, 0x7130c2f1)}}, {{TOBN(0x462d044f, 0xe9d96488), TOBN(0x4b53d52e, 0x8182a0c1), TOBN(0x84b6ddd3, 0x0391e9e9), TOBN(0x80ab7b48, 0xb1741a09)}, {TOBN(0xec0e15d4, 0x27d3317f), TOBN(0x8dfc1ddb, 0x1a64671e), TOBN(0x93cc5d5f, 0xd49c5b92), TOBN(0xc995d53d, 0x3674a331)}}, {{TOBN(0x302e41ec, 0x090090ae), TOBN(0x2278a0cc, 0xedb06830), TOBN(0x1d025932, 0xfbc99690), TOBN(0x0c32fbd2, 0xb80d68da)}, {TOBN(0xd79146da, 0xf341a6c1), TOBN(0xae0ba139, 0x1bef68a0), TOBN(0xc6b8a563, 0x8d774b3a), TOBN(0x1cf307bd, 0x880ba4d7)}}, {{TOBN(0xc033bdc7, 0x19803511), TOBN(0xa9f97b3b, 0x8888c3be), TOBN(0x3d68aebc, 0x85c6d05e), TOBN(0xc3b88a9d, 0x193919eb)}, {TOBN(0x2d300748, 0xc48b0ee3), TOBN(0x7506bc7c, 0x07a746c1), TOBN(0xfc48437c, 0x6e6d57f3), TOBN(0x5bd71587, 0xcfeaa91a)}}, {{TOBN(0xa4ed0408, 0xc1bc5225), TOBN(0xd0b946db, 0x2719226d), TOBN(0x109ecd62, 0x758d2d43), TOBN(0x75c8485a, 0x2751759b)}, {TOBN(0xb0b75f49, 0x9ce4177a), TOBN(0x4fa61a1e, 0x79c10c3d), TOBN(0xc062d300, 0xa167fcd7), TOBN(0x4df3874c, 0x750f0fa8)}}, {{TOBN(0x29ae2cf9, 0x83dfedc9), TOBN(0xf8437134, 0x8d87631a), TOBN(0xaf571711, 0x7429c8d2), TOBN(0x18d15867, 0x146d9272)}, {TOBN(0x83053ecf, 0x69769bb7), TOBN(0xc55eb856, 0xc479ab82), TOBN(0x5ef7791c, 0x21b0f4b2), TOBN(0xaa5956ba, 0x3d491525)}}, {{TOBN(0x407a96c2, 0x9fe20eba), TOBN(0xf27168bb, 0xe52a5ad3), TOBN(0x43b60ab3, 0xbf1d9d89), TOBN(0xe45c51ef, 0x710e727a)}, {TOBN(0xdfca5276, 0x099b4221), TOBN(0x8dc6407c, 0x2557a159), TOBN(0x0ead8335, 0x91035895), TOBN(0x0a9db957, 0x9c55dc32)}}, {{TOBN(0xe40736d3, 0xdf61bc76), TOBN(0x13a619c0, 0x3f778cdb), TOBN(0x6dd921a4, 0xc56ea28f), TOBN(0x76a52433, 0x2fa647b4)}, {TOBN(0x23591891, 0xac5bdc5d), TOBN(0xff4a1a72, 0xbac7dc01), TOBN(0x9905e261, 0x62df8453), TOBN(0x3ac045df, 0xe63b265f)}}, {{TOBN(0x8a3f341b, 0xad53dba7), TOBN(0x8ec269cc, 0x837b625a), TOBN(0xd71a2782, 0x3ae31189), TOBN(0x8fb4f9a3, 0x55e96120)}, {TOBN(0x804af823, 0xff9875cf), TOBN(0x23224f57, 0x5d442a9b), TOBN(0x1c4d3b9e, 0xecc62679), TOBN(0x91da22fb, 0xa0e7ddb1)}}, {{TOBN(0xa370324d, 0x6c04a661), TOBN(0x9710d3b6, 0x5e376d17), TOBN(0xed8c98f0, 0x3044e357), TOBN(0xc364ebbe, 0x6422701c)}, {TOBN(0x347f5d51, 0x7733d61c), TOBN(0xd55644b9, 0xcea826c3), TOBN(0x80c6e0ad, 0x55a25548), TOBN(0x0aa7641d, 0x844220a7)}}, {{TOBN(0x1438ec81, 0x31810660), TOBN(0x9dfa6507, 0xde4b4043), TOBN(0x10b515d8, 0xcc3e0273), TOBN(0x1b6066dd, 0x28d8cfb2)}, {TOBN(0xd3b04591, 0x9c9efebd), TOBN(0x425d4bdf, 0xa21c1ff4), TOBN(0x5fe5af19, 0xd57607d3), TOBN(0xbbf773f7, 0x54481084)}}, {{TOBN(0x8435bd69, 0x94b03ed1), TOBN(0xd9ad1de3, 0x634cc546), TOBN(0x2cf423fc, 0x00e420ca), TOBN(0xeed26d80, 0xa03096dd)}, {TOBN(0xd7f60be7, 0xa4db09d2), TOBN(0xf47f569d, 0x960622f7), TOBN(0xe5925fd7, 0x7296c729), TOBN(0xeff2db26, 0x26ca2715)}}, {{TOBN(0xa6fcd014, 0xb913e759), TOBN(0x53da4786, 0x8ff4de93), TOBN(0x14616d79, 0xc32068e1), TOBN(0xb187d664, 0xccdf352e)}, {TOBN(0xf7afb650, 0x1dc90b59), TOBN(0x8170e943, 0x7daa1b26), TOBN(0xc8e3bdd8, 0x700c0a84), TOBN(0x6e8d345f, 0x6482bdfa)}}, {{TOBN(0x84cfbfa1, 0xc5c5ea50), TOBN(0xd3baf14c, 0x67960681), TOBN(0x26398403, 0x0dd50942), TOBN(0xe4b7839c, 0x4716a663)}, {TOBN(0xd5f1f794, 0xe7de6dc0), TOBN(0x5cd0f4d4, 0x622aa7ce), TOBN(0x5295f3f1, 0x59acfeec), TOBN(0x8d933552, 0x953e0607)}}, {{TOBN(0xc7db8ec5, 0x776c5722), TOBN(0xdc467e62, 0x2b5f290c), TOBN(0xd4297e70, 0x4ff425a9), TOBN(0x4be924c1, 0x0cf7bb72)}, {TOBN(0x0d5dc5ae, 0xa1892131), TOBN(0x8bf8a8e3, 0xa705c992), TOBN(0x73a0b064, 0x7a305ac5), TOBN(0x00c9ca4e, 0x9a8c77a8)}}, {{TOBN(0x5dfee80f, 0x83774bdd), TOBN(0x63131602, 0x85734485), TOBN(0xa1b524ae, 0x914a69a9), TOBN(0xebc2ffaf, 0xd4e300d7)}, {TOBN(0x52c93db7, 0x7cfa46a5), TOBN(0x71e6161f, 0x21653b50), TOBN(0x3574fc57, 0xa4bc580a), TOBN(0xc09015dd, 0xe1bc1253)}}, {{TOBN(0x4b7b47b2, 0xd174d7aa), TOBN(0x4072d8e8, 0xf3a15d04), TOBN(0xeeb7d47f, 0xd6fa07ed), TOBN(0x6f2b9ff9, 0xedbdafb1)}, {TOBN(0x18c51615, 0x3760fe8a), TOBN(0x7a96e6bf, 0xf06c6c13), TOBN(0x4d7a0410, 0x0ea2d071), TOBN(0xa1914e9b, 0x0be2a5ce)}}, {{TOBN(0x5726e357, 0xd8a3c5cf), TOBN(0x1197ecc3, 0x2abb2b13), TOBN(0x6c0d7f7f, 0x31ae88dd), TOBN(0x15b20d1a, 0xfdbb3efe)}, {TOBN(0xcd06aa26, 0x70584039), TOBN(0x2277c969, 0xa7dc9747), TOBN(0xbca69587, 0x7855d815), TOBN(0x899ea238, 0x5188b32a)}}, {{TOBN(0x37d9228b, 0x760c1c9d), TOBN(0xc7efbb11, 0x9b5c18da), TOBN(0x7f0d1bc8, 0x19f6dbc5), TOBN(0x4875384b, 0x07e6905b)}, {TOBN(0xc7c50baa, 0x3ba8cd86), TOBN(0xb0ce40fb, 0xc2905de0), TOBN(0x70840673, 0x7a231952), TOBN(0xa912a262, 0xcf43de26)}}, {{TOBN(0x9c38ddcc, 0xeb5b76c1), TOBN(0x746f5285, 0x26fc0ab4), TOBN(0x52a63a50, 0xd62c269f), TOBN(0x60049c55, 0x99458621)}, {TOBN(0xe7f48f82, 0x3c2f7c9e), TOBN(0x6bd99043, 0x917d5cf3), TOBN(0xeb1317a8, 0x8701f469), TOBN(0xbd3fe2ed, 0x9a449fe0)}}, {{TOBN(0x421e79ca, 0x12ef3d36), TOBN(0x9ee3c36c, 0x3e7ea5de), TOBN(0xe48198b5, 0xcdff36f7), TOBN(0xaff4f967, 0xc6b82228)}, {TOBN(0x15e19dd0, 0xc47adb7e), TOBN(0x45699b23, 0x032e7dfa), TOBN(0x40680c8b, 0x1fae026a), TOBN(0x5a347a48, 0x550dbf4d)}}, {{TOBN(0xe652533b, 0x3cef0d7d), TOBN(0xd94f7b18, 0x2bbb4381), TOBN(0x838752be, 0x0e80f500), TOBN(0x8e6e2488, 0x9e9c9bfb)}, {TOBN(0xc9751697, 0x16caca6a), TOBN(0x866c49d8, 0x38531ad9), TOBN(0xc917e239, 0x7151ade1), TOBN(0x2d016ec1, 0x6037c407)}}, {{TOBN(0xa407ccc9, 0x00eac3f9), TOBN(0x835f6280, 0xe2ed4748), TOBN(0xcc54c347, 0x1cc98e0d), TOBN(0x0e969937, 0xdcb572eb)}, {TOBN(0x1b16c8e8, 0x8f30c9cb), TOBN(0xa606ae75, 0x373c4661), TOBN(0x47aa689b, 0x35502cab), TOBN(0xf89014ae, 0x4d9bb64f)}}, {{TOBN(0x202f6a9c, 0x31c71f7b), TOBN(0x01f95aa3, 0x296ffe5c), TOBN(0x5fc06014, 0x53cec3a3), TOBN(0xeb991237, 0x5f498a45)}, {TOBN(0xae9a935e, 0x5d91ba87), TOBN(0xc6ac6281, 0x0b564a19), TOBN(0x8a8fe81c, 0x3bd44e69), TOBN(0x7c8b467f, 0x9dd11d45)}}, {{TOBN(0xf772251f, 0xea5b8e69), TOBN(0xaeecb3bd, 0xc5b75fbc), TOBN(0x1aca3331, 0x887ff0e5), TOBN(0xbe5d49ff, 0x19f0a131)}, {TOBN(0x582c13aa, 0xe5c8646f), TOBN(0xdbaa12e8, 0x20e19980), TOBN(0x8f40f31a, 0xf7abbd94), TOBN(0x1f13f5a8, 0x1dfc7663)}}, {{TOBN(0x5d81f1ee, 0xaceb4fc0), TOBN(0x36256002, 0x5e6f0f42), TOBN(0x4b67d6d7, 0x751370c8), TOBN(0x2608b698, 0x03e80589)}, {TOBN(0xcfc0d2fc, 0x05268301), TOBN(0xa6943d39, 0x40309212), TOBN(0x192a90c2, 0x1fd0e1c2), TOBN(0xb209f113, 0x37f1dc76)}}, {{TOBN(0xefcc5e06, 0x97bf1298), TOBN(0xcbdb6730, 0x219d639e), TOBN(0xd009c116, 0xb81e8c6f), TOBN(0xa3ffdde3, 0x1a7ce2e5)}, {TOBN(0xc53fbaaa, 0xa914d3ba), TOBN(0x836d500f, 0x88df85ee), TOBN(0xd98dc71b, 0x66ee0751), TOBN(0x5a3d7005, 0x714516fd)}}, {{TOBN(0x21d3634d, 0x39eedbba), TOBN(0x35cd2e68, 0x0455a46d), TOBN(0xc8cafe65, 0xf9d7eb0c), TOBN(0xbda3ce9e, 0x00cefb3e)}, {TOBN(0xddc17a60, 0x2c9cf7a4), TOBN(0x01572ee4, 0x7bcb8773), TOBN(0xa92b2b01, 0x8c7548df), TOBN(0x732fd309, 0xa84600e3)}}, {{TOBN(0xe22109c7, 0x16543a40), TOBN(0x9acafd36, 0xfede3c6c), TOBN(0xfb206852, 0x6824e614), TOBN(0x2a4544a9, 0xda25dca0)}, {TOBN(0x25985262, 0x91d60b06), TOBN(0x281b7be9, 0x28753545), TOBN(0xec667b1a, 0x90f13b27), TOBN(0x33a83aff, 0x940e2eb4)}}, {{TOBN(0x80009862, 0xd5d721d5), TOBN(0x0c3357a3, 0x5bd3a182), TOBN(0x27f3a83b, 0x7aa2cda4), TOBN(0xb58ae74e, 0xf6f83085)}, {TOBN(0x2a911a81, 0x2e6dad6b), TOBN(0xde286051, 0xf43d6c5b), TOBN(0x4bdccc41, 0xf996c4d8), TOBN(0xe7312ec0, 0x0ae1e24e)}}}, {{{TOBN(0xf8d112e7, 0x6e6485b3), TOBN(0x4d3e24db, 0x771c52f8), TOBN(0x48e3ee41, 0x684a2f6d), TOBN(0x7161957d, 0x21d95551)}, {TOBN(0x19631283, 0xcdb12a6c), TOBN(0xbf3fa882, 0x2e50e164), TOBN(0xf6254b63, 0x3166cc73), TOBN(0x3aefa7ae, 0xaee8cc38)}}, {{TOBN(0x79b0fe62, 0x3b36f9fd), TOBN(0x26543b23, 0xfde19fc0), TOBN(0x136e64a0, 0x958482ef), TOBN(0x23f63771, 0x9b095825)}, {TOBN(0x14cfd596, 0xb6a1142e), TOBN(0x5ea6aac6, 0x335aac0b), TOBN(0x86a0e8bd, 0xf3081dd5), TOBN(0x5fb89d79, 0x003dc12a)}}, {{TOBN(0xf615c33a, 0xf72e34d4), TOBN(0x0bd9ea40, 0x110eec35), TOBN(0x1c12bc5b, 0xc1dea34e), TOBN(0x686584c9, 0x49ae4699)}, {TOBN(0x13ad95d3, 0x8c97b942), TOBN(0x4609561a, 0x4e5c7562), TOBN(0x9e94a4ae, 0xf2737f89), TOBN(0xf57594c6, 0x371c78b6)}}, {{TOBN(0x0f0165fc, 0xe3779ee3), TOBN(0xe00e7f9d, 0xbd495d9e), TOBN(0x1fa4efa2, 0x20284e7a), TOBN(0x4564bade, 0x47ac6219)}, {TOBN(0x90e6312a, 0xc4708e8e), TOBN(0x4f5725fb, 0xa71e9adf), TOBN(0xe95f55ae, 0x3d684b9f), TOBN(0x47f7ccb1, 0x1e94b415)}}, {{TOBN(0x7322851b, 0x8d946581), TOBN(0xf0d13133, 0xbdf4a012), TOBN(0xa3510f69, 0x6584dae0), TOBN(0x03a7c171, 0x3c9f6c6d)}, {TOBN(0x5be97f38, 0xe475381a), TOBN(0xca1ba422, 0x85823334), TOBN(0xf83cc5c7, 0x0be17dda), TOBN(0x158b1494, 0x0b918c0f)}}, {{TOBN(0xda3a77e5, 0x522e6b69), TOBN(0x69c908c3, 0xbbcd6c18), TOBN(0x1f1b9e48, 0xd924fd56), TOBN(0x37c64e36, 0xaa4bb3f7)}, {TOBN(0x5a4fdbdf, 0xee478d7d), TOBN(0xba75c8bc, 0x0193f7a0), TOBN(0x84bc1e84, 0x56cd16df), TOBN(0x1fb08f08, 0x46fad151)}}, {{TOBN(0x8a7cabf9, 0x842e9f30), TOBN(0xa331d4bf, 0x5eab83af), TOBN(0xd272cfba, 0x017f2a6a), TOBN(0x27560abc, 0x83aba0e3)}, {TOBN(0x94b83387, 0x0e3a6b75), TOBN(0x25c6aea2, 0x6b9f50f5), TOBN(0x803d691d, 0xb5fdf6d0), TOBN(0x03b77509, 0xe6333514)}}, {{TOBN(0x36178903, 0x61a341c1), TOBN(0x3604dc60, 0x0cfd6142), TOBN(0x022295eb, 0x8533316c), TOBN(0x3dbde4ac, 0x44af2922)}, {TOBN(0x898afc5d, 0x1c7eef69), TOBN(0x58896805, 0xd14f4fa1), TOBN(0x05002160, 0x203c21ca), TOBN(0x6f0d1f30, 0x40ef730b)}}, {{TOBN(0x8e8c44d4, 0x196224f8), TOBN(0x75a4ab95, 0x374d079d), TOBN(0x79085ecc, 0x7d48f123), TOBN(0x56f04d31, 0x1bf65ad8)}, {TOBN(0xe220bf1c, 0xbda602b2), TOBN(0x73ee1742, 0xf9612c69), TOBN(0x76008fc8, 0x084fd06b), TOBN(0x4000ef9f, 0xf11380d1)}}, {{TOBN(0x48201b4b, 0x12cfe297), TOBN(0x3eee129c, 0x292f74e5), TOBN(0xe1fe114e, 0xc9e874e8), TOBN(0x899b055c, 0x92c5fc41)}, {TOBN(0x4e477a64, 0x3a39c8cf), TOBN(0x82f09efe, 0x78963cc9), TOBN(0x6fd3fd8f, 0xd333f863), TOBN(0x85132b2a, 0xdc949c63)}}, {{TOBN(0x7e06a3ab, 0x516eb17b), TOBN(0x73bec06f, 0xd2c7372b), TOBN(0xe4f74f55, 0xba896da6), TOBN(0xbb4afef8, 0x8e9eb40f)}, {TOBN(0x2d75bec8, 0xe61d66b0), TOBN(0x02bda4b4, 0xef29300b), TOBN(0x8bbaa8de, 0x026baa5a), TOBN(0xff54befd, 0xa07f4440)}}, {{TOBN(0xbd9b8b1d, 0xbe7a2af3), TOBN(0xec51caa9, 0x4fb74a72), TOBN(0xb9937a4b, 0x63879697), TOBN(0x7c9a9d20, 0xec2687d5)}, {TOBN(0x1773e44f, 0x6ef5f014), TOBN(0x8abcf412, 0xe90c6900), TOBN(0x387bd022, 0x8142161e), TOBN(0x50393755, 0xfcb6ff2a)}}, {{TOBN(0x9813fd56, 0xed6def63), TOBN(0x53cf6482, 0x7d53106c), TOBN(0x991a35bd, 0x431f7ac1), TOBN(0xf1e274dd, 0x63e65faf)}, {TOBN(0xf63ffa3c, 0x44cc7880), TOBN(0x411a426b, 0x7c256981), TOBN(0xb698b9fd, 0x93a420e0), TOBN(0x89fdddc0, 0xae53f8fe)}}, {{TOBN(0x766e0722, 0x32398baa), TOBN(0x205fee42, 0x5cfca031), TOBN(0xa49f5341, 0x7a029cf2), TOBN(0xa88c68b8, 0x4023890d)}, {TOBN(0xbc275041, 0x7337aaa8), TOBN(0x9ed364ad, 0x0eb384f4), TOBN(0xe0816f85, 0x29aba92f), TOBN(0x2e9e1941, 0x04e38a88)}}, {{TOBN(0x57eef44a, 0x3dafd2d5), TOBN(0x35d1fae5, 0x97ed98d8), TOBN(0x50628c09, 0x2307f9b1), TOBN(0x09d84aae, 0xd6cba5c6)}, {TOBN(0x67071bc7, 0x88aaa691), TOBN(0x2dea57a9, 0xafe6cb03), TOBN(0xdfe11bb4, 0x3d78ac01), TOBN(0x7286418c, 0x7fd7aa51)}}, {{TOBN(0xfabf7709, 0x77f7195a), TOBN(0x8ec86167, 0xadeb838f), TOBN(0xea1285a8, 0xbb4f012d), TOBN(0xd6883503, 0x9a3eab3f)}, {TOBN(0xee5d24f8, 0x309004c2), TOBN(0xa96e4b76, 0x13ffe95e), TOBN(0x0cdffe12, 0xbd223ea4), TOBN(0x8f5c2ee5, 0xb6739a53)}}, {{TOBN(0x5cb4aaa5, 0xdd968198), TOBN(0xfa131c52, 0x72413a6c), TOBN(0x53d46a90, 0x9536d903), TOBN(0xb270f0d3, 0x48606d8e)}, {TOBN(0x518c7564, 0xa053a3bc), TOBN(0x088254b7, 0x1a86caef), TOBN(0xb3ba8cb4, 0x0ab5efd0), TOBN(0x5c59900e, 0x4605945d)}}, {{TOBN(0xecace1dd, 0xa1887395), TOBN(0x40960f36, 0x932a65de), TOBN(0x9611ff5c, 0x3aa95529), TOBN(0xc58215b0, 0x7c1e5a36)}, {TOBN(0xd48c9b58, 0xf0e1a524), TOBN(0xb406856b, 0xf590dfb8), TOBN(0xc7605e04, 0x9cd95662), TOBN(0x0dd036ee, 0xa33ecf82)}}, {{TOBN(0xa50171ac, 0xc33156b3), TOBN(0xf09d24ea, 0x4a80172e), TOBN(0x4e1f72c6, 0x76dc8eef), TOBN(0xe60caadc, 0x5e3d44ee)}, {TOBN(0x006ef8a6, 0x979b1d8f), TOBN(0x60908a1c, 0x97788d26), TOBN(0x6e08f95b, 0x266feec0), TOBN(0x618427c2, 0x22e8c94e)}}, {{TOBN(0x3d613339, 0x59145a65), TOBN(0xcd9bc368, 0xfa406337), TOBN(0x82d11be3, 0x2d8a52a0), TOBN(0xf6877b27, 0x97a1c590)}, {TOBN(0x837a819b, 0xf5cbdb25), TOBN(0x2a4fd1d8, 0xde090249), TOBN(0x622a7de7, 0x74990e5f), TOBN(0x840fa5a0, 0x7945511b)}}, {{TOBN(0x30b974be, 0x6558842d), TOBN(0x70df8c64, 0x17f3d0a6), TOBN(0x7c803520, 0x7542e46d), TOBN(0x7251fe7f, 0xe4ecc823)}, {TOBN(0xe59134cb, 0x5e9aac9a), TOBN(0x11bb0934, 0xf0045d71), TOBN(0x53e5d9b5, 0xdbcb1d4e), TOBN(0x8d97a905, 0x92defc91)}}, {{TOBN(0xfe289327, 0x7946d3f9), TOBN(0xe132bd24, 0x07472273), TOBN(0xeeeb510c, 0x1eb6ae86), TOBN(0x777708c5, 0xf0595067)}, {TOBN(0x18e2c8cd, 0x1297029e), TOBN(0x2c61095c, 0xbbf9305e), TOBN(0xe466c258, 0x6b85d6d9), TOBN(0x8ac06c36, 0xda1ea530)}}, {{TOBN(0xa365dc39, 0xa1304668), TOBN(0xe4a9c885, 0x07f89606), TOBN(0x65a4898f, 0xacc7228d), TOBN(0x3e2347ff, 0x84ca8303)}, {TOBN(0xa5f6fb77, 0xea7d23a3), TOBN(0x2fac257d, 0x672a71cd), TOBN(0x6908bef8, 0x7e6a44d3), TOBN(0x8ff87566, 0x891d3d7a)}}, {{TOBN(0xe58e90b3, 0x6b0cf82e), TOBN(0x6438d246, 0x2615b5e7), TOBN(0x07b1f8fc, 0x669c145a), TOBN(0xb0d8b2da, 0x36f1e1cb)}, {TOBN(0x54d5dadb, 0xd9184c4d), TOBN(0x3dbb18d5, 0xf93d9976), TOBN(0x0a3e0f56, 0xd1147d47), TOBN(0x2afa8c8d, 0xa0a48609)}}, {{TOBN(0x275353e8, 0xbc36742c), TOBN(0x898f427e, 0xeea0ed90), TOBN(0x26f4947e, 0x3e477b00), TOBN(0x8ad8848a, 0x308741e3)}, {TOBN(0x6c703c38, 0xd74a2a46), TOBN(0x5e3e05a9, 0x9ba17ba2), TOBN(0xc1fa6f66, 0x4ab9a9e4), TOBN(0x474a2d9a, 0x3841d6ec)}}, {{TOBN(0x871239ad, 0x653ae326), TOBN(0x14bcf72a, 0xa74cbb43), TOBN(0x8737650e, 0x20d4c083), TOBN(0x3df86536, 0x110ed4af)}, {TOBN(0xd2d86fe7, 0xb53ca555), TOBN(0x688cb00d, 0xabd5d538), TOBN(0xcf81bda3, 0x1ad38468), TOBN(0x7ccfe3cc, 0xf01167b6)}}, {{TOBN(0xcf4f47e0, 0x6c4c1fe6), TOBN(0x557e1f1a, 0x298bbb79), TOBN(0xf93b974f, 0x30d45a14), TOBN(0x174a1d2d, 0x0baf97c4)}, {TOBN(0x7a003b30, 0xc51fbf53), TOBN(0xd8940991, 0xee68b225), TOBN(0x5b0aa7b7, 0x1c0f4173), TOBN(0x975797c9, 0xa20a7153)}}, {{TOBN(0x26e08c07, 0xe3533d77), TOBN(0xd7222e6a, 0x2e341c99), TOBN(0x9d60ec3d, 0x8d2dc4ed), TOBN(0xbdfe0d8f, 0x7c476cf8)}, {TOBN(0x1fe59ab6, 0x1d056605), TOBN(0xa9ea9df6, 0x86a8551f), TOBN(0x8489941e, 0x47fb8d8c), TOBN(0xfeb874eb, 0x4a7f1b10)}}, {{TOBN(0xfe5fea86, 0x7ee0d98f), TOBN(0x201ad34b, 0xdbf61864), TOBN(0x45d8fe47, 0x37c031d4), TOBN(0xd5f49fae, 0x795f0822)}, {TOBN(0xdb0fb291, 0xc7f4a40c), TOBN(0x2e69d9c1, 0x730ddd92), TOBN(0x754e1054, 0x49d76987), TOBN(0x8a24911d, 0x7662db87)}}, {{TOBN(0x61fc1810, 0x60a71676), TOBN(0xe852d1a8, 0xf66a8ad1), TOBN(0x172bbd65, 0x6417231e), TOBN(0x0d6de7bd, 0x3babb11f)}, {TOBN(0x6fde6f88, 0xc8e347f8), TOBN(0x1c587547, 0x9bd99cc3), TOBN(0x78e54ed0, 0x34076950), TOBN(0x97f0f334, 0x796e83ba)}}, {{TOBN(0xe4dbe1ce, 0x4924867a), TOBN(0xbd5f51b0, 0x60b84917), TOBN(0x37530040, 0x3cb09a79), TOBN(0xdb3fe0f8, 0xff1743d8)}, {TOBN(0xed7894d8, 0x556fa9db), TOBN(0xfa262169, 0x23412fbf), TOBN(0x563be0db, 0xba7b9291), TOBN(0x6ca8b8c0, 0x0c9fb234)}}, {{TOBN(0xed406aa9, 0xbd763802), TOBN(0xc21486a0, 0x65303da1), TOBN(0x61ae291e, 0xc7e62ec4), TOBN(0x622a0492, 0xdf99333e)}, {TOBN(0x7fd80c9d, 0xbb7a8ee0), TOBN(0xdc2ed3bc, 0x6c01aedb), TOBN(0x35c35a12, 0x08be74ec), TOBN(0xd540cb1a, 0x469f671f)}}, {{TOBN(0xd16ced4e, 0xcf84f6c7), TOBN(0x8561fb9c, 0x2d090f43), TOBN(0x7e693d79, 0x6f239db4), TOBN(0xa736f928, 0x77bd0d94)}, {TOBN(0x07b4d929, 0x2c1950ee), TOBN(0xda177543, 0x56dc11b3), TOBN(0xa5dfbbaa, 0x7a6a878e), TOBN(0x1c70cb29, 0x4decb08a)}}, {{TOBN(0xfba28c8b, 0x6f0f7c50), TOBN(0xa8eba2b8, 0x854dcc6d), TOBN(0x5ff8e89a, 0x36b78642), TOBN(0x070c1c8e, 0xf6873adf)}, {TOBN(0xbbd3c371, 0x6484d2e4), TOBN(0xfb78318f, 0x0d414129), TOBN(0x2621a39c, 0x6ad93b0b), TOBN(0x979d74c2, 0xa9e917f7)}}, {{TOBN(0xfc195647, 0x61fb0428), TOBN(0x4d78954a, 0xbee624d4), TOBN(0xb94896e0, 0xb8ae86fd), TOBN(0x6667ac0c, 0xc91c8b13)}, {TOBN(0x9f180512, 0x43bcf832), TOBN(0xfbadf8b7, 0xa0010137), TOBN(0xc69b4089, 0xb3ba8aa7), TOBN(0xfac4bacd, 0xe687ce85)}}, {{TOBN(0x9164088d, 0x977eab40), TOBN(0x51f4c5b6, 0x2760b390), TOBN(0xd238238f, 0x340dd553), TOBN(0x358566c3, 0xdb1d31c9)}, {TOBN(0x3a5ad69e, 0x5068f5ff), TOBN(0xf31435fc, 0xdaff6b06), TOBN(0xae549a5b, 0xd6debff0), TOBN(0x59e5f0b7, 0x75e01331)}}, {{TOBN(0x5d492fb8, 0x98559acf), TOBN(0x96018c2e, 0x4db79b50), TOBN(0x55f4a48f, 0x609f66aa), TOBN(0x1943b3af, 0x4900a14f)}, {TOBN(0xc22496df, 0x15a40d39), TOBN(0xb2a44684, 0x4c20f7c5), TOBN(0x76a35afa, 0x3b98404c), TOBN(0xbec75725, 0xff5d1b77)}}, {{TOBN(0xb67aa163, 0xbea06444), TOBN(0x27e95bb2, 0xf724b6f2), TOBN(0x3c20e3e9, 0xd238c8ab), TOBN(0x1213754e, 0xddd6ae17)}, {TOBN(0x8c431020, 0x716e0f74), TOBN(0x6679c82e, 0xffc095c2), TOBN(0x2eb3adf4, 0xd0ac2932), TOBN(0x2cc970d3, 0x01bb7a76)}}, {{TOBN(0x70c71f2f, 0x740f0e66), TOBN(0x545c616b, 0x2b6b23cc), TOBN(0x4528cfcb, 0xb40a8bd7), TOBN(0xff839633, 0x2ab27722)}, {TOBN(0x049127d9, 0x025ac99a), TOBN(0xd314d4a0, 0x2b63e33b), TOBN(0xc8c310e7, 0x28d84519), TOBN(0x0fcb8983, 0xb3bc84ba)}}, {{TOBN(0x2cc52261, 0x38634818), TOBN(0x501814f4, 0xb44c2e0b), TOBN(0xf7e181aa, 0x54dfdba3), TOBN(0xcfd58ff0, 0xe759718c)}, {TOBN(0xf90cdb14, 0xd3b507a8), TOBN(0x57bd478e, 0xc50bdad8), TOBN(0x29c197e2, 0x50e5f9aa), TOBN(0x4db6eef8, 0xe40bc855)}}, {{TOBN(0x2cc8f21a, 0xd1fc0654), TOBN(0xc71cc963, 0x81269d73), TOBN(0xecfbb204, 0x077f49f9), TOBN(0xdde92571, 0xca56b793)}, {TOBN(0x9abed6a3, 0xf97ad8f7), TOBN(0xe6c19d3f, 0x924de3bd), TOBN(0x8dce92f4, 0xa140a800), TOBN(0x85f44d1e, 0x1337af07)}}, {{TOBN(0x5953c08b, 0x09d64c52), TOBN(0xa1b5e49f, 0xf5df9749), TOBN(0x336a8fb8, 0x52735f7d), TOBN(0xb332b6db, 0x9add676b)}, {TOBN(0x558b88a0, 0xb4511aa4), TOBN(0x09788752, 0xdbd5cc55), TOBN(0x16b43b9c, 0xd8cd52bd), TOBN(0x7f0bc5a0, 0xc2a2696b)}}, {{TOBN(0x146e12d4, 0xc11f61ef), TOBN(0x9ce10754, 0x3a83e79e), TOBN(0x08ec73d9, 0x6cbfca15), TOBN(0x09ff29ad, 0x5b49653f)}, {TOBN(0xe31b72bd, 0xe7da946e), TOBN(0xebf9eb3b, 0xee80a4f2), TOBN(0xd1aabd08, 0x17598ce4), TOBN(0x18b5fef4, 0x53f37e80)}}, {{TOBN(0xd5d5cdd3, 0x5958cd79), TOBN(0x3580a1b5, 0x1d373114), TOBN(0xa36e4c91, 0xfa935726), TOBN(0xa38c534d, 0xef20d760)}, {TOBN(0x7088e40a, 0x2ff5845b), TOBN(0xe5bb40bd, 0xbd78177f), TOBN(0x4f06a7a8, 0x857f9920), TOBN(0xe3cc3e50, 0xe968f05d)}}, {{TOBN(0x1d68b7fe, 0xe5682d26), TOBN(0x5206f76f, 0xaec7f87c), TOBN(0x41110530, 0x041951ab), TOBN(0x58ec52c1, 0xd4b5a71a)}, {TOBN(0xf3488f99, 0x0f75cf9a), TOBN(0xf411951f, 0xba82d0d5), TOBN(0x27ee75be, 0x618895ab), TOBN(0xeae060d4, 0x6d8aab14)}}, {{TOBN(0x9ae1df73, 0x7fb54dc2), TOBN(0x1f3e391b, 0x25963649), TOBN(0x242ec32a, 0xfe055081), TOBN(0x5bd450ef, 0x8491c9bd)}, {TOBN(0x367efc67, 0x981eb389), TOBN(0xed7e1928, 0x3a0550d5), TOBN(0x362e776b, 0xab3ce75c), TOBN(0xe890e308, 0x1f24c523)}}, {{TOBN(0xb961b682, 0xfeccef76), TOBN(0x8b8e11f5, 0x8bba6d92), TOBN(0x8f2ccc4c, 0x2b2375c4), TOBN(0x0d7f7a52, 0xe2f86cfa)}, {TOBN(0xfd94d30a, 0x9efe5633), TOBN(0x2d8d246b, 0x5451f934), TOBN(0x2234c6e3, 0x244e6a00), TOBN(0xde2b5b0d, 0xddec8c50)}}, {{TOBN(0x2ce53c5a, 0xbf776f5b), TOBN(0x6f724071, 0x60357b05), TOBN(0xb2593717, 0x71bf3f7a), TOBN(0x87d2501c, 0x440c4a9f)}, {TOBN(0x440552e1, 0x87b05340), TOBN(0xb7bf7cc8, 0x21624c32), TOBN(0x4155a6ce, 0x22facddb), TOBN(0x5a4228cb, 0x889837ef)}}, {{TOBN(0xef87d6d6, 0xfd4fd671), TOBN(0xa233687e, 0xc2daa10e), TOBN(0x75622244, 0x03c0eb96), TOBN(0x7632d184, 0x8bf19be6)}, {TOBN(0x05d0f8e9, 0x40735ff4), TOBN(0x3a3e6e13, 0xc00931f1), TOBN(0x31ccde6a, 0xdafe3f18), TOBN(0xf381366a, 0xcfe51207)}}, {{TOBN(0x24c222a9, 0x60167d92), TOBN(0x62f9d6f8, 0x7529f18c), TOBN(0x412397c0, 0x0353b114), TOBN(0x334d89dc, 0xef808043)}, {TOBN(0xd9ec63ba, 0x2a4383ce), TOBN(0xcec8e937, 0x5cf92ba0), TOBN(0xfb8b4288, 0xc8be74c0), TOBN(0x67d6912f, 0x105d4391)}}, {{TOBN(0x7b996c46, 0x1b913149), TOBN(0x36aae2ef, 0x3a4e02da), TOBN(0xb68aa003, 0x972de594), TOBN(0x284ec70d, 0x4ec6d545)}, {TOBN(0xf3d2b2d0, 0x61391d54), TOBN(0x69c5d5d6, 0xfe114e92), TOBN(0xbe0f00b5, 0xb4482dff), TOBN(0xe1596fa5, 0xf5bf33c5)}}, {{TOBN(0x10595b56, 0x96a71cba), TOBN(0x944938b2, 0xfdcadeb7), TOBN(0xa282da4c, 0xfccd8471), TOBN(0x98ec05f3, 0x0d37bfe1)}, {TOBN(0xe171ce1b, 0x0698304a), TOBN(0x2d691444, 0x21bdf79b), TOBN(0xd0cd3b74, 0x1b21dec1), TOBN(0x712ecd8b, 0x16a15f71)}}, {{TOBN(0x8d4c00a7, 0x00fd56e1), TOBN(0x02ec9692, 0xf9527c18), TOBN(0x21c44937, 0x4a3e42e1), TOBN(0x9176fbab, 0x1392ae0a)}, {TOBN(0x8726f1ba, 0x44b7b618), TOBN(0xb4d7aae9, 0xf1de491c), TOBN(0xf91df7b9, 0x07b582c0), TOBN(0x7e116c30, 0xef60aa3a)}}, {{TOBN(0x99270f81, 0x466265d7), TOBN(0xb15b6fe2, 0x4df7adf0), TOBN(0xfe33b2d3, 0xf9738f7f), TOBN(0x48553ab9, 0xd6d70f95)}, {TOBN(0x2cc72ac8, 0xc21e94db), TOBN(0x795ac38d, 0xbdc0bbee), TOBN(0x0a1be449, 0x2e40478f), TOBN(0x81bd3394, 0x052bde55)}}, {{TOBN(0x63c8dbe9, 0x56b3c4f2), TOBN(0x017a99cf, 0x904177cc), TOBN(0x947bbddb, 0x4d010fc1), TOBN(0xacf9b00b, 0xbb2c9b21)}, {TOBN(0x2970bc8d, 0x47173611), TOBN(0x1a4cbe08, 0xac7d756f), TOBN(0x06d9f4aa, 0x67d541a2), TOBN(0xa3e8b689, 0x59c2cf44)}}, {{TOBN(0xaad066da, 0x4d88f1dd), TOBN(0xc604f165, 0x7ad35dea), TOBN(0x7edc0720, 0x4478ca67), TOBN(0xa10dfae0, 0xba02ce06)}, {TOBN(0xeceb1c76, 0xaf36f4e4), TOBN(0x994b2292, 0xaf3f8f48), TOBN(0xbf9ed77b, 0x77c8a68c), TOBN(0x74f544ea, 0x51744c9d)}}, {{TOBN(0x82d05bb9, 0x8113a757), TOBN(0x4ef2d2b4, 0x8a9885e4), TOBN(0x1e332be5, 0x1aa7865f), TOBN(0x22b76b18, 0x290d1a52)}, {TOBN(0x308a2310, 0x44351683), TOBN(0x9d861896, 0xa3f22840), TOBN(0x5959ddcd, 0x841ed947), TOBN(0x0def0c94, 0x154b73bf)}}, {{TOBN(0xf0105417, 0x4c7c15e0), TOBN(0x539bfb02, 0x3a277c32), TOBN(0xe699268e, 0xf9dccf5f), TOBN(0x9f5796a5, 0x0247a3bd)}, {TOBN(0x8b839de8, 0x4f157269), TOBN(0xc825c1e5, 0x7a30196b), TOBN(0x6ef0aabc, 0xdc8a5a91), TOBN(0xf4a8ce6c, 0x498b7fe6)}}, {{TOBN(0x1cce35a7, 0x70cbac78), TOBN(0x83488e9b, 0xf6b23958), TOBN(0x0341a070, 0xd76cb011), TOBN(0xda6c9d06, 0xae1b2658)}, {TOBN(0xb701fb30, 0xdd648c52), TOBN(0x994ca02c, 0x52fb9fd1), TOBN(0x06933117, 0x6f563086), TOBN(0x3d2b8100, 0x17856bab)}}, {{TOBN(0xe89f48c8, 0x5963a46e), TOBN(0x658ab875, 0xa99e61c7), TOBN(0x6e296f87, 0x4b8517b4), TOBN(0x36c4fcdc, 0xfc1bc656)}, {TOBN(0xde5227a1, 0xa3906def), TOBN(0x9fe95f57, 0x62418945), TOBN(0x20c91e81, 0xfdd96cde), TOBN(0x5adbe47e, 0xda4480de)}}, {{TOBN(0xa009370f, 0x396de2b6), TOBN(0x98583d4b, 0xf0ecc7bd), TOBN(0xf44f6b57, 0xe51d0672), TOBN(0x03d6b078, 0x556b1984)}, {TOBN(0x27dbdd93, 0xb0b64912), TOBN(0x9b3a3434, 0x15687b09), TOBN(0x0dba6461, 0x51ec20a9), TOBN(0xec93db7f, 0xff28187c)}}, {{TOBN(0x00ff8c24, 0x66e48bdd), TOBN(0x2514f2f9, 0x11ccd78e), TOBN(0xeba11f4f, 0xe1250603), TOBN(0x8a22cd41, 0x243fa156)}, {TOBN(0xa4e58df4, 0xb283e4c6), TOBN(0x78c29859, 0x8b39783f), TOBN(0x5235aee2, 0xa5259809), TOBN(0xc16284b5, 0x0e0227dd)}}, {{TOBN(0xa5f57916, 0x1338830d), TOBN(0x6d4b8a6b, 0xd2123fca), TOBN(0x236ea68a, 0xf9c546f8), TOBN(0xc1d36873, 0xfa608d36)}, {TOBN(0xcd76e495, 0x8d436d13), TOBN(0xd4d9c221, 0x8fb080af), TOBN(0x665c1728, 0xe8ad3fb5), TOBN(0xcf1ebe4d, 0xb3d572e0)}}, {{TOBN(0xa7a8746a, 0x584c5e20), TOBN(0x267e4ea1, 0xb9dc7035), TOBN(0x593a15cf, 0xb9548c9b), TOBN(0x5e6e2135, 0x4bd012f3)}, {TOBN(0xdf31cc6a, 0x8c8f936e), TOBN(0x8af84d04, 0xb5c241dc), TOBN(0x63990a6f, 0x345efb86), TOBN(0x6fef4e61, 0xb9b962cb)}}}, {{{TOBN(0xf6368f09, 0x25722608), TOBN(0x131260db, 0x131cf5c6), TOBN(0x40eb353b, 0xfab4f7ac), TOBN(0x85c78880, 0x37eee829)}, {TOBN(0x4c1581ff, 0xc3bdf24e), TOBN(0x5bff75cb, 0xf5c3c5a8), TOBN(0x35e8c83f, 0xa14e6f40), TOBN(0xb81d1c0f, 0x0295e0ca)}}, {{TOBN(0xfcde7cc8, 0xf43a730f), TOBN(0xe89b6f3c, 0x33ab590e), TOBN(0xc823f529, 0xad03240b), TOBN(0x82b79afe, 0x98bea5db)}, {TOBN(0x568f2856, 0x962fe5de), TOBN(0x0c590adb, 0x60c591f3), TOBN(0x1fc74a14, 0x4a28a858), TOBN(0x3b662498, 0xb3203f4c)}}, {{TOBN(0x91e3cf0d, 0x6c39765a), TOBN(0xa2db3acd, 0xac3cca0b), TOBN(0x288f2f08, 0xcb953b50), TOBN(0x2414582c, 0xcf43cf1a)}, {TOBN(0x8dec8bbc, 0x60eee9a8), TOBN(0x54c79f02, 0x729aa042), TOBN(0xd81cd5ec, 0x6532f5d5), TOBN(0xa672303a, 0xcf82e15f)}}, {{TOBN(0x376aafa8, 0x719c0563), TOBN(0xcd8ad2dc, 0xbc5fc79f), TOBN(0x303fdb9f, 0xcb750cd3), TOBN(0x14ff052f, 0x4418b08e)}, {TOBN(0xf75084cf, 0x3e2d6520), TOBN(0x7ebdf0f8, 0x144ed509), TOBN(0xf43bf0f2, 0xd3f25b98), TOBN(0x86ad71cf, 0xa354d837)}}, {{TOBN(0xb827fe92, 0x26f43572), TOBN(0xdfd3ab5b, 0x5d824758), TOBN(0x315dd23a, 0x539094c1), TOBN(0x85c0e37a, 0x66623d68)}, {TOBN(0x575c7972, 0x7be19ae0), TOBN(0x616a3396, 0xdf0d36b5), TOBN(0xa1ebb3c8, 0x26b1ff7e), TOBN(0x635b9485, 0x140ad453)}}, {{TOBN(0x92bf3cda, 0xda430c0b), TOBN(0x4702850e, 0x3a96dac6), TOBN(0xc91cf0a5, 0x15ac326a), TOBN(0x95de4f49, 0xab8c25e4)}, {TOBN(0xb01bad09, 0xe265c17c), TOBN(0x24e45464, 0x087b3881), TOBN(0xd43e583c, 0xe1fac5ca), TOBN(0xe17cb318, 0x6ead97a6)}}, {{TOBN(0x6cc39243, 0x74dcec46), TOBN(0x33cfc02d, 0x54c2b73f), TOBN(0x82917844, 0xf26cd99c), TOBN(0x8819dd95, 0xd1773f89)}, {TOBN(0x09572aa6, 0x0871f427), TOBN(0x8e0cf365, 0xf6f01c34), TOBN(0x7fa52988, 0xbff1f5af), TOBN(0x4eb357ea, 0xe75e8e50)}}, {{TOBN(0xd9d0c8c4, 0x868af75d), TOBN(0xd7325cff, 0x45c8c7ea), TOBN(0xab471996, 0xcc81ecb0), TOBN(0xff5d55f3, 0x611824ed)}, {TOBN(0xbe314541, 0x1977a0ee), TOBN(0x5085c4c5, 0x722038c6), TOBN(0x2d5335bf, 0xf94bb495), TOBN(0x894ad8a6, 0xc8e2a082)}}, {{TOBN(0x5c3e2341, 0xada35438), TOBN(0xf4a9fc89, 0x049b8c4e), TOBN(0xbeeb355a, 0x9f17cf34), TOBN(0x3f311e0e, 0x6c91fe10)}, {TOBN(0xc2d20038, 0x92ab9891), TOBN(0x257bdcc1, 0x3e8ce9a9), TOBN(0x1b2d9789, 0x88c53bee), TOBN(0x927ce89a, 0xcdba143a)}}, {{TOBN(0xb0a32cca, 0x523db280), TOBN(0x5c889f8a, 0x50d43783), TOBN(0x503e04b3, 0x4897d16f), TOBN(0x8cdb6e78, 0x08f5f2e8)}, {TOBN(0x6ab91cf0, 0x179c8e74), TOBN(0xd8874e52, 0x48211d60), TOBN(0xf948d4d5, 0xea851200), TOBN(0x4076d41e, 0xe6f9840a)}}, {{TOBN(0xc20e263c, 0x47b517ea), TOBN(0x79a448fd, 0x30685e5e), TOBN(0xe55f6f78, 0xf90631a0), TOBN(0x88a790b1, 0xa79e6346)}, {TOBN(0x62160c7d, 0x80969fe8), TOBN(0x54f92fd4, 0x41491bb9), TOBN(0xa6645c23, 0x5c957526), TOBN(0xf44cc5ae, 0xbea3ce7b)}}, {{TOBN(0xf7628327, 0x8b1e68b7), TOBN(0xc731ad7a, 0x303f29d3), TOBN(0xfe5a9ca9, 0x57d03ecb), TOBN(0x96c0d50c, 0x41bc97a7)}, {TOBN(0xc4669fe7, 0x9b4f7f24), TOBN(0xfdd781d8, 0x3d9967ef), TOBN(0x7892c7c3, 0x5d2c208d), TOBN(0x8bf64f7c, 0xae545cb3)}}, {{TOBN(0xc01f862c, 0x467be912), TOBN(0xf4c85ee9, 0xc73d30cc), TOBN(0x1fa6f4be, 0x6ab83ec7), TOBN(0xa07a3c1c, 0x4e3e3cf9)}, {TOBN(0x87f8ef45, 0x0c00beb3), TOBN(0x30e2c2b3, 0x000d4c3e), TOBN(0x1aa00b94, 0xfe08bf5b), TOBN(0x32c133aa, 0x9224ef52)}}, {{TOBN(0x38df16bb, 0x32e5685d), TOBN(0x68a9e069, 0x58e6f544), TOBN(0x495aaff7, 0xcdc5ebc6), TOBN(0xf894a645, 0x378b135f)}, {TOBN(0xf316350a, 0x09e27ecf), TOBN(0xeced201e, 0x58f7179d), TOBN(0x2eec273c, 0xe97861ba), TOBN(0x47ec2cae, 0xd693be2e)}}, {{TOBN(0xfa4c97c4, 0xf68367ce), TOBN(0xe4f47d0b, 0xbe5a5755), TOBN(0x17de815d, 0xb298a979), TOBN(0xd7eca659, 0xc177dc7d)}, {TOBN(0x20fdbb71, 0x49ded0a3), TOBN(0x4cb2aad4, 0xfb34d3c5), TOBN(0x2cf31d28, 0x60858a33), TOBN(0x3b6873ef, 0xa24aa40f)}}, {{TOBN(0x540234b2, 0x2c11bb37), TOBN(0x2d0366dd, 0xed4c74a3), TOBN(0xf9a968da, 0xeec5f25d), TOBN(0x36601068, 0x67b63142)}, {TOBN(0x07cd6d2c, 0x68d7b6d4), TOBN(0xa8f74f09, 0x0c842942), TOBN(0xe2751404, 0x7768b1ee), TOBN(0x4b5f7e89, 0xfe62aee4)}}, {{TOBN(0xc6a77177, 0x89070d26), TOBN(0xa1f28e4e, 0xdd1c8bc7), TOBN(0xea5f4f06, 0x469e1f17), TOBN(0x78fc242a, 0xfbdb78e0)}, {TOBN(0xc9c7c592, 0x8b0588f1), TOBN(0xb6b7a0fd, 0x1535921e), TOBN(0xcc5bdb91, 0xbde5ae35), TOBN(0xb42c485e, 0x12ff1864)}}, {{TOBN(0xa1113e13, 0xdbab98aa), TOBN(0xde9d469b, 0xa17b1024), TOBN(0x23f48b37, 0xc0462d3a), TOBN(0x3752e537, 0x7c5c078d)}, {TOBN(0xe3a86add, 0x15544eb9), TOBN(0xf013aea7, 0x80fba279), TOBN(0x8b5bb76c, 0xf22001b5), TOBN(0xe617ba14, 0xf02891ab)}}, {{TOBN(0xd39182a6, 0x936219d3), TOBN(0x5ce1f194, 0xae51cb19), TOBN(0xc78f8598, 0xbf07a74c), TOBN(0x6d7158f2, 0x22cbf1bc)}, {TOBN(0x3b846b21, 0xe300ce18), TOBN(0x35fba630, 0x2d11275d), TOBN(0x5fe25c36, 0xa0239b9b), TOBN(0xd8beb35d, 0xdf05d940)}}, {{TOBN(0x4db02bb0, 0x1f7e320d), TOBN(0x0641c364, 0x6da320ea), TOBN(0x6d95fa5d, 0x821389a3), TOBN(0x92699748, 0x8fcd8e3d)}, {TOBN(0x316fef17, 0xceb6c143), TOBN(0x67fcb841, 0xd933762b), TOBN(0xbb837e35, 0x118b17f8), TOBN(0x4b92552f, 0x9fd24821)}}, {{TOBN(0xae6bc70e, 0x46aca793), TOBN(0x1cf0b0e4, 0xe579311b), TOBN(0x8dc631be, 0x5802f716), TOBN(0x099bdc6f, 0xbddbee4d)}, {TOBN(0xcc352bb2, 0x0caf8b05), TOBN(0xf74d505a, 0x72d63df2), TOBN(0xb9876d4b, 0x91c4f408), TOBN(0x1ce18473, 0x9e229b2d)}}, {{TOBN(0x49507597, 0x83abdb4a), TOBN(0x850fbcb6, 0xdee84b18), TOBN(0x6325236e, 0x609e67dc), TOBN(0x04d831d9, 0x9336c6d8)}, {TOBN(0x8deaae3b, 0xfa12d45d), TOBN(0xe425f8ce, 0x4746e246), TOBN(0x8004c175, 0x24f5f31e), TOBN(0xaca16d8f, 0xad62c3b7)}}, {{TOBN(0x0dc15a6a, 0x9152f934), TOBN(0xf1235e5d, 0xed0e12c1), TOBN(0xc33c06ec, 0xda477dac), TOBN(0x76be8732, 0xb2ea0006)}, {TOBN(0xcf3f7831, 0x0c0cd313), TOBN(0x3c524553, 0xa614260d), TOBN(0x31a756f8, 0xcab22d15), TOBN(0x03ee10d1, 0x77827a20)}}, {{TOBN(0xd1e059b2, 0x1994ef20), TOBN(0x2a653b69, 0x638ae318), TOBN(0x70d5eb58, 0x2f699010), TOBN(0x279739f7, 0x09f5f84a)}, {TOBN(0x5da4663c, 0x8b799336), TOBN(0xfdfdf14d, 0x203c37eb), TOBN(0x32d8a9dc, 0xa1dbfb2d), TOBN(0xab40cff0, 0x77d48f9b)}}, {{TOBN(0xc018b383, 0xd20b42d5), TOBN(0xf9a810ef, 0x9f78845f), TOBN(0x40af3753, 0xbdba9df0), TOBN(0xb90bdcfc, 0x131dfdf9)}, {TOBN(0x18720591, 0xf01ab782), TOBN(0xc823f211, 0x6af12a88), TOBN(0xa51b80f3, 0x0dc14401), TOBN(0xde248f77, 0xfb2dfbe3)}}, {{TOBN(0xef5a44e5, 0x0cafe751), TOBN(0x73997c9c, 0xd4dcd221), TOBN(0x32fd86d1, 0xde854024), TOBN(0xd5b53adc, 0xa09b84bb)}, {TOBN(0x008d7a11, 0xdcedd8d1), TOBN(0x406bd1c8, 0x74b32c84), TOBN(0x5d4472ff, 0x05dde8b1), TOBN(0x2e25f2cd, 0xfce2b32f)}}, {{TOBN(0xbec0dd5e, 0x29dfc254), TOBN(0x4455fcf6, 0x2b98b267), TOBN(0x0b4d43a5, 0xc72df2ad), TOBN(0xea70e6be, 0x48a75397)}, {TOBN(0x2aad6169, 0x5820f3bf), TOBN(0xf410d2dd, 0x9e37f68f), TOBN(0x70fb7dba, 0x7be5ac83), TOBN(0x636bb645, 0x36ec3eec)}}, {{TOBN(0x27104ea3, 0x9754e21c), TOBN(0xbc87a3e6, 0x8d63c373), TOBN(0x483351d7, 0x4109db9a), TOBN(0x0fa724e3, 0x60134da7)}, {TOBN(0x9ff44c29, 0xb0720b16), TOBN(0x2dd0cf13, 0x06aceead), TOBN(0x5942758c, 0xe26929a6), TOBN(0x96c5db92, 0xb766a92b)}}, {{TOBN(0xcec7d4c0, 0x5f18395e), TOBN(0xd3f22744, 0x1f80d032), TOBN(0x7a68b37a, 0xcb86075b), TOBN(0x074764dd, 0xafef92db)}, {TOBN(0xded1e950, 0x7bc7f389), TOBN(0xc580c850, 0xb9756460), TOBN(0xaeeec2a4, 0x7da48157), TOBN(0x3f0b4e7f, 0x82c587b3)}}, {{TOBN(0x231c6de8, 0xa9f19c53), TOBN(0x5717bd73, 0x6974e34e), TOBN(0xd9e1d216, 0xf1508fa9), TOBN(0x9f112361, 0xdadaa124)}, {TOBN(0x80145e31, 0x823b7348), TOBN(0x4dd8f0d5, 0xac634069), TOBN(0xe3d82fc7, 0x2297c258), TOBN(0x276fcfee, 0x9cee7431)}}, {{TOBN(0x8eb61b5e, 0x2bc0aea9), TOBN(0x4f668fd5, 0xde329431), TOBN(0x03a32ab1, 0x38e4b87e), TOBN(0xe1374517, 0x73d0ef0b)}, {TOBN(0x1a46f7e6, 0x853ac983), TOBN(0xc3bdf42e, 0x68e78a57), TOBN(0xacf20785, 0x2ea96dd1), TOBN(0xa10649b9, 0xf1638460)}}, {{TOBN(0xf2369f0b, 0x879fbbed), TOBN(0x0ff0ae86, 0xda9d1869), TOBN(0x5251d759, 0x56766f45), TOBN(0x4984d8c0, 0x2be8d0fc)}, {TOBN(0x7ecc95a6, 0xd21008f0), TOBN(0x29bd54a0, 0x3a1a1c49), TOBN(0xab9828c5, 0xd26c50f3), TOBN(0x32c0087c, 0x51d0d251)}}, {{TOBN(0x9bac3ce6, 0x0c1cdb26), TOBN(0xcd94d947, 0x557ca205), TOBN(0x1b1bd598, 0x9db1fdcd), TOBN(0x0eda0108, 0xa3d8b149)}, {TOBN(0x95066610, 0x56152fcc), TOBN(0xc2f037e6, 0xe7192b33), TOBN(0xdeffb41a, 0xc92e05a4), TOBN(0x1105f6c2, 0xc2f6c62e)}}, {{TOBN(0x68e73500, 0x8733913c), TOBN(0xcce86163, 0x3f3adc40), TOBN(0xf407a942, 0x38a278e9), TOBN(0xd13c1b9d, 0x2ab21292)}, {TOBN(0x93ed7ec7, 0x1c74cf5c), TOBN(0x8887dc48, 0xf1a4c1b4), TOBN(0x3830ff30, 0x4b3a11f1), TOBN(0x358c5a3c, 0x58937cb6)}}, {{TOBN(0x027dc404, 0x89022829), TOBN(0x40e93977, 0x3b798f79), TOBN(0x90ad3337, 0x38be6ead), TOBN(0x9c23f6bc, 0xf34c0a5d)}, {TOBN(0xd1711a35, 0xfbffd8bb), TOBN(0x60fcfb49, 0x1949d3dd), TOBN(0x09c8ef4b, 0x7825d93a), TOBN(0x24233cff, 0xa0a8c968)}}, {{TOBN(0x67ade46c, 0xe6d982af), TOBN(0xebb6bf3e, 0xe7544d7c), TOBN(0xd6b9ba76, 0x3d8bd087), TOBN(0x46fe382d, 0x4dc61280)}, {TOBN(0xbd39a7e8, 0xb5bdbd75), TOBN(0xab381331, 0xb8f228fe), TOBN(0x0709a77c, 0xce1c4300), TOBN(0x6a247e56, 0xf337ceac)}}, {{TOBN(0x8f34f21b, 0x636288be), TOBN(0x9dfdca74, 0xc8a7c305), TOBN(0x6decfd1b, 0xea919e04), TOBN(0xcdf2688d, 0x8e1991f8)}, {TOBN(0xe607df44, 0xd0f8a67e), TOBN(0xd985df4b, 0x0b58d010), TOBN(0x57f834c5, 0x0c24f8f4), TOBN(0xe976ef56, 0xa0bf01ae)}}, {{TOBN(0x536395ac, 0xa1c32373), TOBN(0x351027aa, 0x734c0a13), TOBN(0xd2f1b5d6, 0x5e6bd5bc), TOBN(0x2b539e24, 0x223debed)}, {TOBN(0xd4994cec, 0x0eaa1d71), TOBN(0x2a83381d, 0x661dcf65), TOBN(0x5f1aed2f, 0x7b54c740), TOBN(0x0bea3fa5, 0xd6dda5ee)}}, {{TOBN(0x9d4fb684, 0x36cc6134), TOBN(0x8eb9bbf3, 0xc0a443dd), TOBN(0xfc500e2e, 0x383b7d2a), TOBN(0x7aad621c, 0x5b775257)}, {TOBN(0x69284d74, 0x0a8f7cc0), TOBN(0xe820c2ce, 0x07562d65), TOBN(0xbf9531b9, 0x499758ee), TOBN(0x73e95ca5, 0x6ee0cc2d)}}, {{TOBN(0xf61790ab, 0xfbaf50a5), TOBN(0xdf55e76b, 0x684e0750), TOBN(0xec516da7, 0xf176b005), TOBN(0x575553bb, 0x7a2dddc7)}, {TOBN(0x37c87ca3, 0x553afa73), TOBN(0x315f3ffc, 0x4d55c251), TOBN(0xe846442a, 0xaf3e5d35), TOBN(0x61b91149, 0x6495ff28)}}, {{TOBN(0x23cc95d3, 0xfa326dc3), TOBN(0x1df4da1f, 0x18fc2cea), TOBN(0x24bf9adc, 0xd0a37d59), TOBN(0xb6710053, 0x320d6e1e)}, {TOBN(0x96f9667e, 0x618344d1), TOBN(0xcc7ce042, 0xa06445af), TOBN(0xa02d8514, 0xd68dbc3a), TOBN(0x4ea109e4, 0x280b5a5b)}}, {{TOBN(0x5741a7ac, 0xb40961bf), TOBN(0x4ada5937, 0x6aa56bfa), TOBN(0x7feb9145, 0x02b765d1), TOBN(0x561e97be, 0xe6ad1582)}, {TOBN(0xbbc4a5b6, 0xda3982f5), TOBN(0x0c2659ed, 0xb546f468), TOBN(0xb8e7e6aa, 0x59612d20), TOBN(0xd83dfe20, 0xac19e8e0)}}, {{TOBN(0x8530c45f, 0xb835398c), TOBN(0x6106a8bf, 0xb38a41c2), TOBN(0x21e8f9a6, 0x35f5dcdb), TOBN(0x39707137, 0xcae498ed)}, {TOBN(0x70c23834, 0xd8249f00), TOBN(0x9f14b58f, 0xab2537a0), TOBN(0xd043c365, 0x5f61c0c2), TOBN(0xdc5926d6, 0x09a194a7)}}, {{TOBN(0xddec0339, 0x8e77738a), TOBN(0xd07a63ef, 0xfba46426), TOBN(0x2e58e79c, 0xee7f6e86), TOBN(0xe59b0459, 0xff32d241)}, {TOBN(0xc5ec84e5, 0x20fa0338), TOBN(0x97939ac8, 0xeaff5ace), TOBN(0x0310a4e3, 0xb4a38313), TOBN(0x9115fba2, 0x8f9d9885)}}, {{TOBN(0x8dd710c2, 0x5fadf8c3), TOBN(0x66be38a2, 0xce19c0e2), TOBN(0xd42a279c, 0x4cfe5022), TOBN(0x597bb530, 0x0e24e1b8)}, {TOBN(0x3cde86b7, 0xc153ca7f), TOBN(0xa8d30fb3, 0x707d63bd), TOBN(0xac905f92, 0xbd60d21e), TOBN(0x98e7ffb6, 0x7b9a54ab)}}, {{TOBN(0xd7147df8, 0xe9726a30), TOBN(0xb5e216ff, 0xafce3533), TOBN(0xb550b799, 0x2ff1ec40), TOBN(0x6b613b87, 0xa1e953fd)}, {TOBN(0x87b88dba, 0x792d5610), TOBN(0x2ee1270a, 0xa190fbe1), TOBN(0x02f4e2dc, 0x2ef581da), TOBN(0x016530e4, 0xeff82a95)}}, {{TOBN(0xcbb93dfd, 0x8fd6ee89), TOBN(0x16d3d986, 0x46848fff), TOBN(0x600eff24, 0x1da47adf), TOBN(0x1b9754a0, 0x0ad47a71)}, {TOBN(0x8f9266df, 0x70c33b98), TOBN(0xaadc87ae, 0xdf34186e), TOBN(0x0d2ce8e1, 0x4ad24132), TOBN(0x8a47cbfc, 0x19946eba)}}, {{TOBN(0x47feeb66, 0x62b5f3af), TOBN(0xcefab561, 0x0abb3734), TOBN(0x449de60e, 0x19f35cb1), TOBN(0x39f8db14, 0x157f0eb9)}, {TOBN(0xffaecc5b, 0x3c61bfd6), TOBN(0xa5a4d41d, 0x41216703), TOBN(0x7f8fabed, 0x224e1cc2), TOBN(0x0d5a8186, 0x871ad953)}}, {{TOBN(0xf10774f7, 0xd22da9a9), TOBN(0x45b8a678, 0xcc8a9b0d), TOBN(0xd9c2e722, 0xbdc32cff), TOBN(0xbf71b5f5, 0x337202a5)}, {TOBN(0x95c57f2f, 0x69fc4db9), TOBN(0xb6dad34c, 0x765d01e1), TOBN(0x7e0bd13f, 0xcb904635), TOBN(0x61751253, 0x763a588c)}}, {{TOBN(0xd85c2997, 0x81af2c2d), TOBN(0xc0f7d9c4, 0x81b9d7da), TOBN(0x838a34ae, 0x08533e8d), TOBN(0x15c4cb08, 0x311d8311)}, {TOBN(0x97f83285, 0x8e121e14), TOBN(0xeea7dc1e, 0x85000a5f), TOBN(0x0c6059b6, 0x5d256274), TOBN(0xec9beace, 0xb95075c0)}}, {{TOBN(0x173daad7, 0x1df97828), TOBN(0xbf851cb5, 0xa8937877), TOBN(0xb083c594, 0x01646f3c), TOBN(0x3bad30cf, 0x50c6d352)}, {TOBN(0xfeb2b202, 0x496bbcea), TOBN(0x3cf9fd4f, 0x18a1e8ba), TOBN(0xd26de7ff, 0x1c066029), TOBN(0x39c81e9e, 0x4e9ed4f8)}}, {{TOBN(0xd8be0cb9, 0x7b390d35), TOBN(0x01df2bbd, 0x964aab27), TOBN(0x3e8c1a65, 0xc3ef64f8), TOBN(0x567291d1, 0x716ed1dd)}, {TOBN(0x95499c6c, 0x5f5406d3), TOBN(0x71fdda39, 0x5ba8e23f), TOBN(0xcfeb320e, 0xd5096ece), TOBN(0xbe7ba92b, 0xca66dd16)}}, {{TOBN(0x4608d36b, 0xc6fb5a7d), TOBN(0xe3eea15a, 0x6d2dd0e0), TOBN(0x75b0a3eb, 0x8f97a36a), TOBN(0xf59814cc, 0x1c83de1e)}, {TOBN(0x56c9c5b0, 0x1c33c23f), TOBN(0xa96c1da4, 0x6faa4136), TOBN(0x46bf2074, 0xde316551), TOBN(0x3b866e7b, 0x1f756c8f)}}, {{TOBN(0x727727d8, 0x1495ed6b), TOBN(0xb2394243, 0xb682dce7), TOBN(0x8ab8454e, 0x758610f3), TOBN(0xc243ce84, 0x857d72a4)}, {TOBN(0x7b320d71, 0xdbbf370f), TOBN(0xff9afa37, 0x78e0f7ca), TOBN(0x0119d1e0, 0xea7b523f), TOBN(0xb997f8cb, 0x058c7d42)}}, {{TOBN(0x285bcd2a, 0x37bbb184), TOBN(0x51dcec49, 0xa45d1fa6), TOBN(0x6ade3b64, 0xe29634cb), TOBN(0x080c94a7, 0x26b86ef1)}, {TOBN(0xba583db1, 0x2283fbe3), TOBN(0x902bddc8, 0x5a9315ed), TOBN(0x07c1ccb3, 0x86964bec), TOBN(0x78f4eacf, 0xb6258301)}}, {{TOBN(0x4bdf3a49, 0x56f90823), TOBN(0xba0f5080, 0x741d777b), TOBN(0x091d71c3, 0xf38bf760), TOBN(0x9633d50f, 0x9b625b02)}, {TOBN(0x03ecb743, 0xb8c9de61), TOBN(0xb4751254, 0x5de74720), TOBN(0x9f9defc9, 0x74ce1cb2), TOBN(0x774a4f6a, 0x00bd32ef)}}, {{TOBN(0xaca385f7, 0x73848f22), TOBN(0x53dad716, 0xf3f8558e), TOBN(0xab7b34b0, 0x93c471f9), TOBN(0xf530e069, 0x19644bc7)}, {TOBN(0x3d9fb1ff, 0xdd59d31a), TOBN(0x4382e0df, 0x08daa795), TOBN(0x165c6f4b, 0xd5cc88d7), TOBN(0xeaa392d5, 0x4a18c900)}}, {{TOBN(0x94203c67, 0x648024ee), TOBN(0x188763f2, 0x8c2fabcd), TOBN(0xa80f87ac, 0xbbaec835), TOBN(0x632c96e0, 0xf29d8d54)}, {TOBN(0x29b0a60e, 0x4c00a95e), TOBN(0x2ef17f40, 0xe011e9fa), TOBN(0xf6c0e1d1, 0x15b77223), TOBN(0xaaec2c62, 0x14b04e32)}}, {{TOBN(0xd35688d8, 0x3d84e58c), TOBN(0x2af5094c, 0x958571db), TOBN(0x4fff7e19, 0x760682a6), TOBN(0x4cb27077, 0xe39a407c)}, {TOBN(0x0f59c547, 0x4ff0e321), TOBN(0x169f34a6, 0x1b34c8ff), TOBN(0x2bff1096, 0x52bc1ba7), TOBN(0xa25423b7, 0x83583544)}}, {{TOBN(0x5d55d5d5, 0x0ac8b782), TOBN(0xff6622ec, 0x2db3c892), TOBN(0x48fce741, 0x6b8bb642), TOBN(0x31d6998c, 0x69d7e3dc)}, {TOBN(0xdbaf8004, 0xcadcaed0), TOBN(0x801b0142, 0xd81d053c), TOBN(0x94b189fc, 0x59630ec6), TOBN(0x120e9934, 0xaf762c8e)}}, {{TOBN(0x53a29aa4, 0xfdc6a404), TOBN(0x19d8e01e, 0xa1909948), TOBN(0x3cfcabf1, 0xd7e89681), TOBN(0x3321a50d, 0x4e132d37)}, {TOBN(0xd0496863, 0xe9a86111), TOBN(0x8c0cde61, 0x06a3bc65), TOBN(0xaf866c49, 0xfc9f8eef), TOBN(0x2066350e, 0xff7f5141)}}, {{TOBN(0x4f8a4689, 0xe56ddfbd), TOBN(0xea1b0c07, 0xfe32983a), TOBN(0x2b317462, 0x873cb8cb), TOBN(0x658deddc, 0x2d93229f)}, {TOBN(0x65efaf4d, 0x0f64ef58), TOBN(0xfe43287d, 0x730cc7a8), TOBN(0xaebc0c72, 0x3d047d70), TOBN(0x92efa539, 0xd92d26c9)}}, {{TOBN(0x06e78457, 0x94b56526), TOBN(0x415cb80f, 0x0961002d), TOBN(0x89e5c565, 0x76dcb10f), TOBN(0x8bbb6982, 0xff9259fe)}, {TOBN(0x4fe8795b, 0x9abc2668), TOBN(0xb5d4f534, 0x1e678fb1), TOBN(0x6601f3be, 0x7b7da2b9), TOBN(0x98da59e2, 0xa13d6805)}}, {{TOBN(0x190d8ea6, 0x01799a52), TOBN(0xa20cec41, 0xb86d2952), TOBN(0x3062ffb2, 0x7fff2a7c), TOBN(0x741b32e5, 0x79f19d37)}, {TOBN(0xf80d8181, 0x4eb57d47), TOBN(0x7a2d0ed4, 0x16aef06b), TOBN(0x09735fb0, 0x1cecb588), TOBN(0x1641caaa, 0xc6061f5b)}}}, {{{TOBN(0x7f99824f, 0x20151427), TOBN(0x206828b6, 0x92430206), TOBN(0xaa9097d7, 0xe1112357), TOBN(0xacf9a2f2, 0x09e414ec)}, {TOBN(0xdbdac9da, 0x27915356), TOBN(0x7e0734b7, 0x001efee3), TOBN(0x54fab5bb, 0xd2b288e2), TOBN(0x4c630fc4, 0xf62dd09c)}}, {{TOBN(0x8537107a, 0x1ac2703b), TOBN(0xb49258d8, 0x6bc857b5), TOBN(0x57df14de, 0xbcdaccd1), TOBN(0x24ab68d7, 0xc4ae8529)}, {TOBN(0x7ed8b5d4, 0x734e59d0), TOBN(0x5f8740c8, 0xc495cc80), TOBN(0x84aedd5a, 0x291db9b3), TOBN(0x80b360f8, 0x4fb995be)}}, {{TOBN(0xae915f5d, 0x5fa067d1), TOBN(0x4134b57f, 0x9668960c), TOBN(0xbd3656d6, 0xa48edaac), TOBN(0xdac1e3e4, 0xfc1d7436)}, {TOBN(0x674ff869, 0xd81fbb26), TOBN(0x449ed3ec, 0xb26c33d4), TOBN(0x85138705, 0xd94203e8), TOBN(0xccde538b, 0xbeeb6f4a)}}, {{TOBN(0x55d5c68d, 0xa61a76fa), TOBN(0x598b441d, 0xca1554dc), TOBN(0xd39923b9, 0x773b279c), TOBN(0x33331d3c, 0x36bf9efc)}, {TOBN(0x2d4c848e, 0x298de399), TOBN(0xcfdb8e77, 0xa1a27f56), TOBN(0x94c855ea, 0x57b8ab70), TOBN(0xdcdb9dae, 0x6f7879ba)}}, {{TOBN(0x7bdff8c2, 0x019f2a59), TOBN(0xb3ce5bb3, 0xcb4fbc74), TOBN(0xea907f68, 0x8a9173dd), TOBN(0x6cd3d0d3, 0x95a75439)}, {TOBN(0x92ecc4d6, 0xefed021c), TOBN(0x09a9f9b0, 0x6a77339a), TOBN(0x87ca6b15, 0x7188c64a), TOBN(0x10c29968, 0x44899158)}}, {{TOBN(0x5859a229, 0xed6e82ef), TOBN(0x16f338e3, 0x65ebaf4e), TOBN(0x0cd31387, 0x5ead67ae), TOBN(0x1c73d228, 0x54ef0bb4)}, {TOBN(0x4cb55131, 0x74a5c8c7), TOBN(0x01cd2970, 0x7f69ad6a), TOBN(0xa04d00dd, 0xe966f87e), TOBN(0xd96fe447, 0x0b7b0321)}}, {{TOBN(0x342ac06e, 0x88fbd381), TOBN(0x02cd4a84, 0x5c35a493), TOBN(0xe8fa89de, 0x54f1bbcd), TOBN(0x341d6367, 0x2575ed4c)}, {TOBN(0xebe357fb, 0xd238202b), TOBN(0x600b4d1a, 0xa984ead9), TOBN(0xc35c9f44, 0x52436ea0), TOBN(0x96fe0a39, 0xa370751b)}}, {{TOBN(0x4c4f0736, 0x7f636a38), TOBN(0x9f943fb7, 0x0e76d5cb), TOBN(0xb03510ba, 0xa8b68b8b), TOBN(0xc246780a, 0x9ed07a1f)}, {TOBN(0x3c051415, 0x6d549fc2), TOBN(0xc2953f31, 0x607781ca), TOBN(0x955e2c69, 0xd8d95413), TOBN(0xb300fadc, 0x7bd282e3)}}, {{TOBN(0x81fe7b50, 0x87e9189f), TOBN(0xdb17375c, 0xf42dda27), TOBN(0x22f7d896, 0xcf0a5904), TOBN(0xa0e57c5a, 0xebe348e6)}, {TOBN(0xa61011d3, 0xf40e3c80), TOBN(0xb1189321, 0x8db705c5), TOBN(0x4ed9309e, 0x50fedec3), TOBN(0xdcf14a10, 0x4d6d5c1d)}}, {{TOBN(0x056c265b, 0x55691342), TOBN(0xe8e08504, 0x91049dc7), TOBN(0x131329f5, 0xc9bae20a), TOBN(0x96c8b3e8, 0xd9dccdb4)}, {TOBN(0x8c5ff838, 0xfb4ee6b4), TOBN(0xfc5a9aeb, 0x41e8ccf0), TOBN(0x7417b764, 0xfae050c6), TOBN(0x0953c3d7, 0x00452080)}}, {{TOBN(0x21372682, 0x38dfe7e8), TOBN(0xea417e15, 0x2bb79d4b), TOBN(0x59641f1c, 0x76e7cf2d), TOBN(0x271e3059, 0xea0bcfcc)}, {TOBN(0x624c7dfd, 0x7253ecbd), TOBN(0x2f552e25, 0x4fca6186), TOBN(0xcbf84ecd, 0x4d866e9c), TOBN(0x73967709, 0xf68d4610)}}, {{TOBN(0xa14b1163, 0xc27901b4), TOBN(0xfd9236e0, 0x899b8bf3), TOBN(0x42b091ec, 0xcbc6da0a), TOBN(0xbb1dac6f, 0x5ad1d297)}, {TOBN(0x80e61d53, 0xa91cf76e), TOBN(0x4110a412, 0xd31f1ee7), TOBN(0x2d87c3ba, 0x13efcf77), TOBN(0x1f374bb4, 0xdf450d76)}}, {{TOBN(0x5e78e2f2, 0x0d188dab), TOBN(0xe3968ed0, 0xf4b885ef), TOBN(0x46c0568e, 0x7314570f), TOBN(0x31616338, 0x01170521)}, {TOBN(0x18e1e7e2, 0x4f0c8afe), TOBN(0x4caa75ff, 0xdeea78da), TOBN(0x82db67f2, 0x7c5d8a51), TOBN(0x36a44d86, 0x6f505370)}}, {{TOBN(0xd72c5bda, 0x0333974f), TOBN(0x5db516ae, 0x27a70146), TOBN(0x34705281, 0x210ef921), TOBN(0xbff17a8f, 0x0c9c38e5)}, {TOBN(0x78f4814e, 0x12476da1), TOBN(0xc1e16613, 0x33c16980), TOBN(0x9e5b386f, 0x424d4bca), TOBN(0x4c274e87, 0xc85740de)}}, {{TOBN(0xb6a9b88d, 0x6c2f5226), TOBN(0x14d1b944, 0x550d7ca8), TOBN(0x580c85fc, 0x1fc41709), TOBN(0xc1da368b, 0x54c6d519)}, {TOBN(0x2b0785ce, 0xd5113cf7), TOBN(0x0670f633, 0x5a34708f), TOBN(0x46e23767, 0x15cc3f88), TOBN(0x1b480cfa, 0x50c72c8f)}}, {{TOBN(0x20288602, 0x4147519a), TOBN(0xd0981eac, 0x26b372f0), TOBN(0xa9d4a7ca, 0xa785ebc8), TOBN(0xd953c50d, 0xdbdf58e9)}, {TOBN(0x9d6361cc, 0xfd590f8f), TOBN(0x72e9626b, 0x44e6c917), TOBN(0x7fd96110, 0x22eb64cf), TOBN(0x863ebb7e, 0x9eb288f3)}}, {{TOBN(0x6e6ab761, 0x6aca8ee7), TOBN(0x97d10b39, 0xd7b40358), TOBN(0x1687d377, 0x1e5feb0d), TOBN(0xc83e50e4, 0x8265a27a)}, {TOBN(0x8f75a9fe, 0xc954b313), TOBN(0xcc2e8f47, 0x310d1f61), TOBN(0xf5ba81c5, 0x6557d0e0), TOBN(0x25f9680c, 0x3eaf6207)}}, {{TOBN(0xf95c6609, 0x4354080b), TOBN(0x5225bfa5, 0x7bf2fe1c), TOBN(0xc5c004e2, 0x5c7d98fa), TOBN(0x3561bf1c, 0x019aaf60)}, {TOBN(0x5e6f9f17, 0xba151474), TOBN(0xdec2f934, 0xb04f6eca), TOBN(0x64e368a1, 0x269acb1e), TOBN(0x1332d9e4, 0x0cdda493)}}, {{TOBN(0x60d6cf69, 0xdf23de05), TOBN(0x66d17da2, 0x009339a0), TOBN(0x9fcac985, 0x0a693923), TOBN(0xbcf057fc, 0xed7c6a6d)}, {TOBN(0xc3c5c8c5, 0xf0b5662c), TOBN(0x25318dd8, 0xdcba4f24), TOBN(0x60e8cb75, 0x082b69ff), TOBN(0x7c23b3ee, 0x1e728c01)}}, {{TOBN(0x15e10a0a, 0x097e4403), TOBN(0xcb3d0a86, 0x19854665), TOBN(0x88d8e211, 0xd67d4826), TOBN(0xb39af66e, 0x0b9d2839)}, {TOBN(0xa5f94588, 0xbd475ca8), TOBN(0xe06b7966, 0xc077b80b), TOBN(0xfedb1485, 0xda27c26c), TOBN(0xd290d33a, 0xfe0fd5e0)}}, {{TOBN(0xa40bcc47, 0xf34fb0fa), TOBN(0xb4760cc8, 0x1fb1ab09), TOBN(0x8fca0993, 0xa273bfe3), TOBN(0x13e4fe07, 0xf70b213c)}, {TOBN(0x3bcdb992, 0xfdb05163), TOBN(0x8c484b11, 0x0c2b19b6), TOBN(0x1acb815f, 0xaaf2e3e2), TOBN(0xc6905935, 0xb89ff1b4)}}, {{TOBN(0xb2ad6f9d, 0x586e74e1), TOBN(0x488883ad, 0x67b80484), TOBN(0x758aa2c7, 0x369c3ddb), TOBN(0x8ab74e69, 0x9f9afd31)}, {TOBN(0x10fc2d28, 0x5e21beb1), TOBN(0x3484518a, 0x318c42f9), TOBN(0x377427dc, 0x53cf40c3), TOBN(0x9de0781a, 0x391bc1d9)}}, {{TOBN(0x8faee858, 0x693807e1), TOBN(0xa3865327, 0x4e81ccc7), TOBN(0x02c30ff2, 0x6f835b84), TOBN(0xb604437b, 0x0d3d38d4)}, {TOBN(0xb3fc8a98, 0x5ca1823d), TOBN(0xb82f7ec9, 0x03be0324), TOBN(0xee36d761, 0xcf684a33), TOBN(0x5a01df0e, 0x9f29bf7d)}}, {{TOBN(0x686202f3, 0x1306583d), TOBN(0x05b10da0, 0x437c622e), TOBN(0xbf9aaa0f, 0x076a7bc8), TOBN(0x25e94efb, 0x8f8f4e43)}, {TOBN(0x8a35c9b7, 0xfa3dc26d), TOBN(0xe0e5fb93, 0x96ff03c5), TOBN(0xa77e3843, 0xebc394ce), TOBN(0xcede6595, 0x8361de60)}}, {{TOBN(0xd27c22f6, 0xa1993545), TOBN(0xab01cc36, 0x24d671ba), TOBN(0x63fa2877, 0xa169c28e), TOBN(0x925ef904, 0x2eb08376)}, {TOBN(0x3b2fa3cf, 0x53aa0b32), TOBN(0xb27beb5b, 0x71c49d7a), TOBN(0xb60e1834, 0xd105e27f), TOBN(0xd6089788, 0x4f68570d)}}, {{TOBN(0x23094ce0, 0xd6fbc2ac), TOBN(0x738037a1, 0x815ff551), TOBN(0xda73b1bb, 0x6bef119c), TOBN(0xdcf6c430, 0xeef506ba)}, {TOBN(0x00e4fe7b, 0xe3ef104a), TOBN(0xebdd9a2c, 0x0a065628), TOBN(0x853a81c3, 0x8792043e), TOBN(0x22ad6ece, 0xb3b59108)}}, {{TOBN(0x9fb813c0, 0x39cd297d), TOBN(0x8ec7e16e, 0x05bda5d9), TOBN(0x2834797c, 0x0d104b96), TOBN(0xcc11a2e7, 0x7c511510)}, {TOBN(0x96ca5a53, 0x96ee6380), TOBN(0x054c8655, 0xcea38742), TOBN(0xb5946852, 0xd54dfa7d), TOBN(0x97c422e7, 0x1f4ab207)}}, {{TOBN(0xbf907509, 0x0c22b540), TOBN(0x2cde42aa, 0xb7c267d4), TOBN(0xba18f9ed, 0x5ab0d693), TOBN(0x3ba62aa6, 0x6e4660d9)}, {TOBN(0xb24bf97b, 0xab9ea96a), TOBN(0x5d039642, 0xe3b60e32), TOBN(0x4e6a4506, 0x7c4d9bd5), TOBN(0x666c5b9e, 0x7ed4a6a4)}}, {{TOBN(0xfa3fdcd9, 0x8edbd7cc), TOBN(0x4660bb87, 0xc6ccd753), TOBN(0x9ae90820, 0x21e6b64f), TOBN(0x8a56a713, 0xb36bfb3f)}, {TOBN(0xabfce096, 0x5726d47f), TOBN(0x9eed01b2, 0x0b1a9a7f), TOBN(0x30e9cad4, 0x4eb74a37), TOBN(0x7b2524cc, 0x53e9666d)}}, {{TOBN(0x6a29683b, 0x8f4b002f), TOBN(0xc2200d7a, 0x41f4fc20), TOBN(0xcf3af47a, 0x3a338acc), TOBN(0x6539a4fb, 0xe7128975)}, {TOBN(0xcec31c14, 0xc33c7fcf), TOBN(0x7eb6799b, 0xc7be322b), TOBN(0x119ef4e9, 0x6646f623), TOBN(0x7b7a26a5, 0x54d7299b)}}, {{TOBN(0xcb37f08d, 0x403f46f2), TOBN(0x94b8fc43, 0x1a0ec0c7), TOBN(0xbb8514e3, 0xc332142f), TOBN(0xf3ed2c33, 0xe80d2a7a)}, {TOBN(0x8d2080af, 0xb639126c), TOBN(0xf7b6be60, 0xe3553ade), TOBN(0x3950aa9f, 0x1c7e2b09), TOBN(0x847ff958, 0x6410f02b)}}, {{TOBN(0x877b7cf5, 0x678a31b0), TOBN(0xd50301ae, 0x3998b620), TOBN(0x734257c5, 0xc00fb396), TOBN(0xf9fb18a0, 0x04e672a6)}, {TOBN(0xff8bd8eb, 0xe8758851), TOBN(0x1e64e4c6, 0x5d99ba44), TOBN(0x4b8eaedf, 0x7dfd93b7), TOBN(0xba2f2a98, 0x04e76b8c)}}, {{TOBN(0x7d790cba, 0xe8053433), TOBN(0xc8e725a0, 0x3d2c9585), TOBN(0x58c5c476, 0xcdd8f5ed), TOBN(0xd106b952, 0xefa9fe1d)}, {TOBN(0x3c5c775b, 0x0eff13a9), TOBN(0x242442ba, 0xe057b930), TOBN(0xe9f458d4, 0xc9b70cbd), TOBN(0x69b71448, 0xa3cdb89a)}}, {{TOBN(0x41ee46f6, 0x0e2ed742), TOBN(0x573f1045, 0x40067493), TOBN(0xb1e154ff, 0x9d54c304), TOBN(0x2ad0436a, 0x8d3a7502)}, {TOBN(0xee4aaa2d, 0x431a8121), TOBN(0xcd38b3ab, 0x886f11ed), TOBN(0x57d49ea6, 0x034a0eb7), TOBN(0xd2b773bd, 0xf7e85e58)}}, {{TOBN(0x4a559ac4, 0x9b5c1f14), TOBN(0xc444be1a, 0x3e54df2b), TOBN(0x13aad704, 0xeda41891), TOBN(0xcd927bec, 0x5eb5c788)}, {TOBN(0xeb3c8516, 0xe48c8a34), TOBN(0x1b7ac812, 0x4b546669), TOBN(0x1815f896, 0x594df8ec), TOBN(0x87c6a79c, 0x79227865)}}, {{TOBN(0xae02a2f0, 0x9b56ddbd), TOBN(0x1339b5ac, 0x8a2f1cf3), TOBN(0xf2b569c7, 0x839dff0d), TOBN(0xb0b9e864, 0xfee9a43d)}, {TOBN(0x4ff8ca41, 0x77bb064e), TOBN(0x145a2812, 0xfd249f63), TOBN(0x3ab7beac, 0xf86f689a), TOBN(0x9bafec27, 0x01d35f5e)}}, {{TOBN(0x28054c65, 0x4265aa91), TOBN(0xa4b18304, 0x035efe42), TOBN(0x6887b0e6, 0x9639dec7), TOBN(0xf4b8f6ad, 0x3d52aea5)}, {TOBN(0xfb9293cc, 0x971a8a13), TOBN(0x3f159e5d, 0x4c934d07), TOBN(0x2c50e9b1, 0x09acbc29), TOBN(0x08eb65e6, 0x7154d129)}}, {{TOBN(0x4feff589, 0x30b75c3e), TOBN(0x0bb82fe2, 0x94491c93), TOBN(0xd8ac377a, 0x89af62bb), TOBN(0xd7b51490, 0x9685e49f)}, {TOBN(0xabca9a7b, 0x04497f19), TOBN(0x1b35ed0a, 0x1a7ad13f), TOBN(0x6b601e21, 0x3ec86ed6), TOBN(0xda91fcb9, 0xce0c76f1)}}, {{TOBN(0x9e28507b, 0xd7ab27e1), TOBN(0x7c19a555, 0x63945b7b), TOBN(0x6b43f0a1, 0xaafc9827), TOBN(0x443b4fbd, 0x3aa55b91)}, {TOBN(0x962b2e65, 0x6962c88f), TOBN(0x139da8d4, 0xce0db0ca), TOBN(0xb93f05dd, 0x1b8d6c4f), TOBN(0x779cdff7, 0x180b9824)}}, {{TOBN(0xbba23fdd, 0xae57c7b7), TOBN(0x345342f2, 0x1b932522), TOBN(0xfd9c80fe, 0x556d4aa3), TOBN(0xa03907ba, 0x6525bb61)}, {TOBN(0x38b010e1, 0xff218933), TOBN(0xc066b654, 0xaa52117b), TOBN(0x8e141920, 0x94f2e6ea), TOBN(0x66a27dca, 0x0d32f2b2)}}, {{TOBN(0x69c7f993, 0x048b3717), TOBN(0xbf5a989a, 0xb178ae1c), TOBN(0x49fa9058, 0x564f1d6b), TOBN(0x27ec6e15, 0xd31fde4e)}, {TOBN(0x4cce0373, 0x7276e7fc), TOBN(0x64086d79, 0x89d6bf02), TOBN(0x5a72f046, 0x4ccdd979), TOBN(0x909c3566, 0x47775631)}}, {{TOBN(0x1c07bc6b, 0x75dd7125), TOBN(0xb4c6bc97, 0x87a0428d), TOBN(0x507ece52, 0xfdeb6b9d), TOBN(0xfca56512, 0xb2c95432)}, {TOBN(0x15d97181, 0xd0e8bd06), TOBN(0x384dd317, 0xc6bb46ea), TOBN(0x5441ea20, 0x3952b624), TOBN(0xbcf70dee, 0x4e7dc2fb)}}, {{TOBN(0x372b016e, 0x6628e8c3), TOBN(0x07a0d667, 0xb60a7522), TOBN(0xcf05751b, 0x0a344ee2), TOBN(0x0ec09a48, 0x118bdeec)}, {TOBN(0x6e4b3d4e, 0xd83dce46), TOBN(0x43a6316d, 0x99d2fc6e), TOBN(0xa99d8989, 0x56cf044c), TOBN(0x7c7f4454, 0xae3e5fb7)}}, {{TOBN(0xb2e6b121, 0xfbabbe92), TOBN(0x281850fb, 0xe1330076), TOBN(0x093581ec, 0x97890015), TOBN(0x69b1dded, 0x75ff77f5)}, {TOBN(0x7cf0b18f, 0xab105105), TOBN(0x953ced31, 0xa89ccfef), TOBN(0x3151f85f, 0xeb914009), TOBN(0x3c9f1b87, 0x88ed48ad)}}, {{TOBN(0xc9aba1a1, 0x4a7eadcb), TOBN(0x928e7501, 0x522e71cf), TOBN(0xeaede727, 0x3a2e4f83), TOBN(0x467e10d1, 0x1ce3bbd3)}, {TOBN(0xf3442ac3, 0xb955dcf0), TOBN(0xba96307d, 0xd3d5e527), TOBN(0xf763a10e, 0xfd77f474), TOBN(0x5d744bd0, 0x6a6e1ff0)}}, {{TOBN(0xd287282a, 0xa777899e), TOBN(0xe20eda8f, 0xd03f3cde), TOBN(0x6a7e75bb, 0x50b07d31), TOBN(0x0b7e2a94, 0x6f379de4)}, {TOBN(0x31cb64ad, 0x19f593cf), TOBN(0x7b1a9e4f, 0x1e76ef1d), TOBN(0xe18c9c9d, 0xb62d609c), TOBN(0x439bad6d, 0xe779a650)}}, {{TOBN(0x219d9066, 0xe032f144), TOBN(0x1db632b8, 0xe8b2ec6a), TOBN(0xff0d0fd4, 0xfda12f78), TOBN(0x56fb4c2d, 0x2a25d265)}, {TOBN(0x5f4e2ee1, 0x255a03f1), TOBN(0x61cd6af2, 0xe96af176), TOBN(0xe0317ba8, 0xd068bc97), TOBN(0x927d6bab, 0x264b988e)}}, {{TOBN(0xa18f07e0, 0xe90fb21e), TOBN(0x00fd2b80, 0xbba7fca1), TOBN(0x20387f27, 0x95cd67b5), TOBN(0x5b89a4e7, 0xd39707f7)}, {TOBN(0x8f83ad3f, 0x894407ce), TOBN(0xa0025b94, 0x6c226132), TOBN(0xc79563c7, 0xf906c13b), TOBN(0x5f548f31, 0x4e7bb025)}}, {{TOBN(0x2b4c6b8f, 0xeac6d113), TOBN(0xa67e3f9c, 0x0e813c76), TOBN(0x3982717c, 0x3fe1f4b9), TOBN(0x58865819, 0x26d8050e)}, {TOBN(0x99f3640c, 0xf7f06f20), TOBN(0xdc610216, 0x2a66ebc2), TOBN(0x52f2c175, 0x767a1e08), TOBN(0x05660e1a, 0x5999871b)}}, {{TOBN(0x6b0f1762, 0x6d3c4693), TOBN(0xf0e7d627, 0x37ed7bea), TOBN(0xc51758c7, 0xb75b226d), TOBN(0x40a88628, 0x1f91613b)}, {TOBN(0x889dbaa7, 0xbbb38ce0), TOBN(0xe0404b65, 0xbddcad81), TOBN(0xfebccd3a, 0x8bc9671f), TOBN(0xfbf9a357, 0xee1f5375)}}, {{TOBN(0x5dc169b0, 0x28f33398), TOBN(0xb07ec11d, 0x72e90f65), TOBN(0xae7f3b4a, 0xfaab1eb1), TOBN(0xd970195e, 0x5f17538a)}, {TOBN(0x52b05cbe, 0x0181e640), TOBN(0xf5debd62, 0x2643313d), TOBN(0x76148154, 0x5df31f82), TOBN(0x23e03b33, 0x3a9e13c5)}}, {{TOBN(0xff758949, 0x4fde0c1f), TOBN(0xbf8a1abe, 0xe5b6ec20), TOBN(0x702278fb, 0x87e1db6c), TOBN(0xc447ad7a, 0x35ed658f)}, {TOBN(0x48d4aa38, 0x03d0ccf2), TOBN(0x80acb338, 0x819a7c03), TOBN(0x9bc7c89e, 0x6e17cecc), TOBN(0x46736b8b, 0x03be1d82)}}, {{TOBN(0xd65d7b60, 0xc0432f96), TOBN(0xddebe7a3, 0xdeb5442f), TOBN(0x79a25307, 0x7dff69a2), TOBN(0x37a56d94, 0x02cf3122)}, {TOBN(0x8bab8aed, 0xf2350d0a), TOBN(0x13c3f276, 0x037b0d9a), TOBN(0xc664957c, 0x44c65cae), TOBN(0x88b44089, 0xc2e71a88)}}, {{TOBN(0xdb88e5a3, 0x5cb02664), TOBN(0x5d4c0bf1, 0x8686c72e), TOBN(0xea3d9b62, 0xa682d53e), TOBN(0x9b605ef4, 0x0b2ad431)}, {TOBN(0x71bac202, 0xc69645d0), TOBN(0xa115f03a, 0x6a1b66e7), TOBN(0xfe2c563a, 0x158f4dc4), TOBN(0xf715b3a0, 0x4d12a78c)}}, {{TOBN(0x8f7f0a48, 0xd413213a), TOBN(0x2035806d, 0xc04becdb), TOBN(0xecd34a99, 0x5d8587f5), TOBN(0x4d8c3079, 0x9f6d3a71)}, {TOBN(0x1b2a2a67, 0x8d95a8f6), TOBN(0xc58c9d7d, 0xf2110d0d), TOBN(0xdeee81d5, 0xcf8fba3f), TOBN(0xa42be3c0, 0x0c7cdf68)}}, {{TOBN(0x2126f742, 0xd43b5eaa), TOBN(0x054a0766, 0xdfa59b85), TOBN(0x9d0d5e36, 0x126bfd45), TOBN(0xa1f8fbd7, 0x384f8a8f)}, {TOBN(0x317680f5, 0xd563fccc), TOBN(0x48ca5055, 0xf280a928), TOBN(0xe00b81b2, 0x27b578cf), TOBN(0x10aad918, 0x2994a514)}}, {{TOBN(0xd9e07b62, 0xb7bdc953), TOBN(0x9f0f6ff2, 0x5bc086dd), TOBN(0x09d1ccff, 0x655eee77), TOBN(0x45475f79, 0x5bef7df1)}, {TOBN(0x3faa28fa, 0x86f702cc), TOBN(0x92e60905, 0x0f021f07), TOBN(0xe9e62968, 0x7f8fa8c6), TOBN(0xbd71419a, 0xf036ea2c)}}, {{TOBN(0x171ee1cc, 0x6028da9a), TOBN(0x5352fe1a, 0xc251f573), TOBN(0xf8ff236e, 0x3fa997f4), TOBN(0xd831b6c9, 0xa5749d5f)}, {TOBN(0x7c872e1d, 0xe350e2c2), TOBN(0xc56240d9, 0x1e0ce403), TOBN(0xf9deb077, 0x6974f5cb), TOBN(0x7d50ba87, 0x961c3728)}}, {{TOBN(0xd6f89426, 0x5a3a2518), TOBN(0xcf817799, 0xc6303d43), TOBN(0x510a0471, 0x619e5696), TOBN(0xab049ff6, 0x3a5e307b)}, {TOBN(0xe4cdf9b0, 0xfeb13ec7), TOBN(0xd5e97117, 0x9d8ff90c), TOBN(0xf6f64d06, 0x9afa96af), TOBN(0x00d0bf5e, 0x9d2012a2)}}, {{TOBN(0xe63f301f, 0x358bcdc0), TOBN(0x07689e99, 0x0a9d47f8), TOBN(0x1f689e2f, 0x4f43d43a), TOBN(0x4d542a16, 0x90920904)}, {TOBN(0xaea293d5, 0x9ca0a707), TOBN(0xd061fe45, 0x8ac68065), TOBN(0x1033bf1b, 0x0090008c), TOBN(0x29749558, 0xc08a6db6)}}, {{TOBN(0x74b5fc59, 0xc1d5d034), TOBN(0xf712e9f6, 0x67e215e0), TOBN(0xfd520cbd, 0x860200e6), TOBN(0x0229acb4, 0x3ea22588)}, {TOBN(0x9cd1e14c, 0xfff0c82e), TOBN(0x87684b62, 0x59c69e73), TOBN(0xda85e61c, 0x96ccb989), TOBN(0x2d5dbb02, 0xa3d06493)}}, {{TOBN(0xf22ad33a, 0xe86b173c), TOBN(0xe8e41ea5, 0xa79ff0e3), TOBN(0x01d2d725, 0xdd0d0c10), TOBN(0x31f39088, 0x032d28f9)}, {TOBN(0x7b3f71e1, 0x7829839e), TOBN(0x0cf691b4, 0x4502ae58), TOBN(0xef658dbd, 0xbefc6115), TOBN(0xa5cd6ee5, 0xb3ab5314)}}, {{TOBN(0x206c8d7b, 0x5f1d2347), TOBN(0x794645ba, 0x4cc2253a), TOBN(0xd517d8ff, 0x58389e08), TOBN(0x4fa20dee, 0x9f847288)}, {TOBN(0xeba072d8, 0xd797770a), TOBN(0x7360c91d, 0xbf429e26), TOBN(0x7200a3b3, 0x80af8279), TOBN(0x6a1c9150, 0x82dadce3)}}, {{TOBN(0x0ee6d3a7, 0xc35d8794), TOBN(0x042e6558, 0x0356bae5), TOBN(0x9f59698d, 0x643322fd), TOBN(0x9379ae15, 0x50a61967)}, {TOBN(0x64b9ae62, 0xfcc9981e), TOBN(0xaed3d631, 0x6d2934c6), TOBN(0x2454b302, 0x5e4e65eb), TOBN(0xab09f647, 0xf9950428)}}}, {{{TOBN(0xb2083a12, 0x22248acc), TOBN(0x1f6ec0ef, 0x3264e366), TOBN(0x5659b704, 0x5afdee28), TOBN(0x7a823a40, 0xe6430bb5)}, {TOBN(0x24592a04, 0xe1900a79), TOBN(0xcde09d4a, 0xc9ee6576), TOBN(0x52b6463f, 0x4b5ea54a), TOBN(0x1efe9ed3, 0xd3ca65a7)}}, {{TOBN(0xe27a6dbe, 0x305406dd), TOBN(0x8eb7dc7f, 0xdd5d1957), TOBN(0xf54a6876, 0x387d4d8f), TOBN(0x9c479409, 0xc7762de4)}, {TOBN(0xbe4d5b5d, 0x99b30778), TOBN(0x25380c56, 0x6e793682), TOBN(0x602d37f3, 0xdac740e3), TOBN(0x140deabe, 0x1566e4ae)}}, {{TOBN(0x4481d067, 0xafd32acf), TOBN(0xd8f0fcca, 0xe1f71ccf), TOBN(0xd208dd0c, 0xb596f2da), TOBN(0xd049d730, 0x9aad93f9)}, {TOBN(0xc79f263d, 0x42ab580e), TOBN(0x09411bb1, 0x23f707b4), TOBN(0x8cfde1ff, 0x835e0eda), TOBN(0x72707490, 0x90f03402)}}, {{TOBN(0xeaee6126, 0xc49a861e), TOBN(0x024f3b65, 0xe14f0d06), TOBN(0x51a3f1e8, 0xc69bfc17), TOBN(0xc3c3a8e9, 0xa7686381)}, {TOBN(0x3400752c, 0xb103d4c8), TOBN(0x02bc4613, 0x9218b36b), TOBN(0xc67f75eb, 0x7651504a), TOBN(0xd6848b56, 0xd02aebfa)}}, {{TOBN(0xbd9802e6, 0xc30fa92b), TOBN(0x5a70d96d, 0x9a552784), TOBN(0x9085c4ea, 0x3f83169b), TOBN(0xfa9423bb, 0x06908228)}, {TOBN(0x2ffebe12, 0xfe97a5b9), TOBN(0x85da6049, 0x71b99118), TOBN(0x9cbc2f7f, 0x63178846), TOBN(0xfd96bc70, 0x9153218e)}}, {{TOBN(0x958381db, 0x1782269b), TOBN(0xae34bf79, 0x2597e550), TOBN(0xbb5c6064, 0x5f385153), TOBN(0x6f0e96af, 0xe3088048)}, {TOBN(0xbf6a0215, 0x77884456), TOBN(0xb3b5688c, 0x69310ea7), TOBN(0x17c94295, 0x04fad2de), TOBN(0xe020f0e5, 0x17896d4d)}}, {{TOBN(0x730ba0ab, 0x0976505f), TOBN(0x567f6813, 0x095e2ec5), TOBN(0x47062010, 0x6331ab71), TOBN(0x72cfa977, 0x41d22b9f)}, {TOBN(0x33e55ead, 0x8a2373da), TOBN(0xa8d0d5f4, 0x7ba45a68), TOBN(0xba1d8f9c, 0x03029d15), TOBN(0x8f34f1cc, 0xfc55b9f3)}}, {{TOBN(0xcca4428d, 0xbbe5a1a9), TOBN(0x8187fd5f, 0x3126bd67), TOBN(0x0036973a, 0x48105826), TOBN(0xa39b6663, 0xb8bd61a0)}, {TOBN(0x6d42deef, 0x2d65a808), TOBN(0x4969044f, 0x94636b19), TOBN(0xf611ee47, 0xdd5d564c), TOBN(0x7b2f3a49, 0xd2873077)}}, {{TOBN(0x94157d45, 0x300eb294), TOBN(0x2b2a656e, 0x169c1494), TOBN(0xc000dd76, 0xd3a47aa9), TOBN(0xa2864e4f, 0xa6243ea4)}, {TOBN(0x82716c47, 0xdb89842e), TOBN(0x12dfd7d7, 0x61479fb7), TOBN(0x3b9a2c56, 0xe0b2f6dc), TOBN(0x46be862a, 0xd7f85d67)}}, {{TOBN(0x03b0d8dd, 0x0f82b214), TOBN(0x460c34f9, 0xf103cbc6), TOBN(0xf32e5c03, 0x18d79e19), TOBN(0x8b8888ba, 0xa84117f8)}, {TOBN(0x8f3c37dc, 0xc0722677), TOBN(0x10d21be9, 0x1c1c0f27), TOBN(0xd47c8468, 0xe0f7a0c6), TOBN(0x9bf02213, 0xadecc0e0)}}, {{TOBN(0x0baa7d12, 0x42b48b99), TOBN(0x1bcb665d, 0x48424096), TOBN(0x8b847cd6, 0xebfb5cfb), TOBN(0x87c2ae56, 0x9ad4d10d)}, {TOBN(0xf1cbb122, 0x0de36726), TOBN(0xe7043c68, 0x3fdfbd21), TOBN(0x4bd0826a, 0x4e79d460), TOBN(0x11f5e598, 0x4bd1a2cb)}}, {{TOBN(0x97554160, 0xb7fe7b6e), TOBN(0x7d16189a, 0x400a3fb2), TOBN(0xd73e9bea, 0xe328ca1e), TOBN(0x0dd04b97, 0xe793d8cc)}, {TOBN(0xa9c83c9b, 0x506db8cc), TOBN(0x5cd47aae, 0xcf38814c), TOBN(0x26fc430d, 0xb64b45e6), TOBN(0x079b5499, 0xd818ea84)}}, {{TOBN(0xebb01102, 0xc1c24a3b), TOBN(0xca24e568, 0x1c161c1a), TOBN(0x103eea69, 0x36f00a4a), TOBN(0x9ad76ee8, 0x76176c7b)}, {TOBN(0x97451fc2, 0x538e0ff7), TOBN(0x94f89809, 0x6604b3b0), TOBN(0x6311436e, 0x3249cfd7), TOBN(0x27b4a7bd, 0x41224f69)}}, {{TOBN(0x03b5d21a, 0xe0ac2941), TOBN(0x279b0254, 0xc2d31937), TOBN(0x3307c052, 0xcac992d0), TOBN(0x6aa7cb92, 0xefa8b1f3)}, {TOBN(0x5a182580, 0x0d37c7a5), TOBN(0x13380c37, 0x342d5422), TOBN(0x92ac2d66, 0xd5d2ef92), TOBN(0x035a70c9, 0x030c63c6)}}, {{TOBN(0xc16025dd, 0x4ce4f152), TOBN(0x1f419a71, 0xf9df7c06), TOBN(0x6d5b2214, 0x91e4bb14), TOBN(0xfc43c6cc, 0x839fb4ce)}, {TOBN(0x49f06591, 0x925d6b2d), TOBN(0x4b37d9d3, 0x62186598), TOBN(0x8c54a971, 0xd01b1629), TOBN(0xe1a9c29f, 0x51d50e05)}}, {{TOBN(0x5109b785, 0x71ba1861), TOBN(0x48b22d5c, 0xd0c8f93d), TOBN(0xe8fa84a7, 0x8633bb93), TOBN(0x53fba6ba, 0x5aebbd08)}, {TOBN(0x7ff27df3, 0xe5eea7d8), TOBN(0x521c8796, 0x68ca7158), TOBN(0xb9d5133b, 0xce6f1a05), TOBN(0x2d50cd53, 0xfd0ebee4)}}, {{TOBN(0xc82115d6, 0xc5a3ef16), TOBN(0x993eff9d, 0xba079221), TOBN(0xe4da2c5e, 0x4b5da81c), TOBN(0x9a89dbdb, 0x8033fd85)}, {TOBN(0x60819ebf, 0x2b892891), TOBN(0x53902b21, 0x5d14a4d5), TOBN(0x6ac35051, 0xd7fda421), TOBN(0xcc6ab885, 0x61c83284)}}, {{TOBN(0x14eba133, 0xf74cff17), TOBN(0x240aaa03, 0xecb813f2), TOBN(0xcfbb6540, 0x6f665bee), TOBN(0x084b1fe4, 0xa425ad73)}, {TOBN(0x009d5d16, 0xd081f6a6), TOBN(0x35304fe8, 0xeef82c90), TOBN(0xf20346d5, 0xaa9eaa22), TOBN(0x0ada9f07, 0xac1c91e3)}}, {{TOBN(0xa6e21678, 0x968a6144), TOBN(0x54c1f77c, 0x07b31a1e), TOBN(0xd6bb787e, 0x5781fbe1), TOBN(0x61bd2ee0, 0xe31f1c4a)}, {TOBN(0xf25aa1e9, 0x781105fc), TOBN(0x9cf2971f, 0x7b2f8e80), TOBN(0x26d15412, 0xcdff919b), TOBN(0x01db4ebe, 0x34bc896e)}}, {{TOBN(0x7d9b3e23, 0xb40df1cf), TOBN(0x59337373, 0x94e971b4), TOBN(0xbf57bd14, 0x669cf921), TOBN(0x865daedf, 0x0c1a1064)}, {TOBN(0x3eb70bd3, 0x83279125), TOBN(0xbc3d5b9f, 0x34ecdaab), TOBN(0x91e3ed7e, 0x5f755caf), TOBN(0x49699f54, 0xd41e6f02)}}, {{TOBN(0x185770e1, 0xd4a7a15b), TOBN(0x08f3587a, 0xeaac87e7), TOBN(0x352018db, 0x473133ea), TOBN(0x674ce719, 0x04fd30fc)}, {TOBN(0x7b8d9835, 0x088b3e0e), TOBN(0x7a0356a9, 0x5d0d47a1), TOBN(0x9d9e7659, 0x6474a3c4), TOBN(0x61ea48a7, 0xff66966c)}}, {{TOBN(0x30417758, 0x0f3e4834), TOBN(0xfdbb21c2, 0x17a9afcb), TOBN(0x756fa17f, 0x2f9a67b3), TOBN(0x2a6b2421, 0xa245c1a8)}, {TOBN(0x64be2794, 0x4af02291), TOBN(0xade465c6, 0x2a5804fe), TOBN(0x8dffbd39, 0xa6f08fd7), TOBN(0xc4efa84c, 0xaa14403b)}}, {{TOBN(0xa1b91b2a, 0x442b0f5c), TOBN(0xb748e317, 0xcf997736), TOBN(0x8d1b62bf, 0xcee90e16), TOBN(0x907ae271, 0x0b2078c0)}, {TOBN(0xdf31534b, 0x0c9bcddd), TOBN(0x043fb054, 0x39adce83), TOBN(0x99031043, 0xd826846a), TOBN(0x61a9c0d6, 0xb144f393)}}, {{TOBN(0xdab48046, 0x47718427), TOBN(0xdf17ff9b, 0x6e830f8b), TOBN(0x408d7ee8, 0xe49a1347), TOBN(0x6ac71e23, 0x91c1d4ae)}, {TOBN(0xc8cbb9fd, 0x1defd73c), TOBN(0x19840657, 0xbbbbfec5), TOBN(0x39db1cb5, 0x9e7ef8ea), TOBN(0x78aa8296, 0x64105f30)}}, {{TOBN(0xa3d9b7f0, 0xa3738c29), TOBN(0x0a2f235a, 0xbc3250a3), TOBN(0x55e506f6, 0x445e4caf), TOBN(0x0974f73d, 0x33475f7a)}, {TOBN(0xd37dbba3, 0x5ba2f5a8), TOBN(0x542c6e63, 0x6af40066), TOBN(0x26d99b53, 0xc5d73e2c), TOBN(0x06060d7d, 0x6c3ca33e)}}, {{TOBN(0xcdbef1c2, 0x065fef4a), TOBN(0x77e60f7d, 0xfd5b92e3), TOBN(0xd7c549f0, 0x26708350), TOBN(0x201b3ad0, 0x34f121bf)}, {TOBN(0x5fcac2a1, 0x0334fc14), TOBN(0x8a9a9e09, 0x344552f6), TOBN(0x7dd8a1d3, 0x97653082), TOBN(0x5fc0738f, 0x79d4f289)}}, {{TOBN(0x787d244d, 0x17d2d8c3), TOBN(0xeffc6345, 0x70830684), TOBN(0x5ddb96dd, 0xe4f73ae5), TOBN(0x8efb14b1, 0x172549a5)}, {TOBN(0x6eb73eee, 0x2245ae7a), TOBN(0xbca4061e, 0xea11f13e), TOBN(0xb577421d, 0x30b01f5d), TOBN(0xaa688b24, 0x782e152c)}}, {{TOBN(0x67608e71, 0xbd3502ba), TOBN(0x4ef41f24, 0xb4de75a0), TOBN(0xb08dde5e, 0xfd6125e5), TOBN(0xde484825, 0xa409543f)}, {TOBN(0x1f198d98, 0x65cc2295), TOBN(0x428a3771, 0x6e0edfa2), TOBN(0x4f9697a2, 0xadf35fc7), TOBN(0x01a43c79, 0xf7cac3c7)}}, {{TOBN(0xb05d7059, 0x0fd3659a), TOBN(0x8927f30c, 0xbb7f2d9a), TOBN(0x4023d1ac, 0x8cf984d3), TOBN(0x32125ed3, 0x02897a45)}, {TOBN(0xfb572dad, 0x3d414205), TOBN(0x73000ef2, 0xe3fa82a9), TOBN(0x4c0868e9, 0xf10a5581), TOBN(0x5b61fc67, 0x6b0b3ca5)}}, {{TOBN(0xc1258d5b, 0x7cae440c), TOBN(0x21c08b41, 0x402b7531), TOBN(0xf61a8955, 0xde932321), TOBN(0x3568faf8, 0x2d1408af)}, {TOBN(0x71b15e99, 0x9ecf965b), TOBN(0xf14ed248, 0xe917276f), TOBN(0xc6f4caa1, 0x820cf9e2), TOBN(0x681b20b2, 0x18d83c7e)}}, {{TOBN(0x6cde738d, 0xc6c01120), TOBN(0x71db0813, 0xae70e0db), TOBN(0x95fc0644, 0x74afe18c), TOBN(0x34619053, 0x129e2be7)}, {TOBN(0x80615cea, 0xdb2a3b15), TOBN(0x0a49a19e, 0xdb4c7073), TOBN(0x0e1b84c8, 0x8fd2d367), TOBN(0xd74bf462, 0x033fb8aa)}}, {{TOBN(0x889f6d65, 0x533ef217), TOBN(0x7158c7e4, 0xc3ca2e87), TOBN(0xfb670dfb, 0xdc2b4167), TOBN(0x75910a01, 0x844c257f)}, {TOBN(0xf336bf07, 0xcf88577d), TOBN(0x22245250, 0xe45e2ace), TOBN(0x2ed92e8d, 0x7ca23d85), TOBN(0x29f8be4c, 0x2b812f58)}}, {{TOBN(0xdd9ebaa7, 0x076fe12b), TOBN(0x3f2400cb, 0xae1537f9), TOBN(0x1aa93528, 0x17bdfb46), TOBN(0xc0f98430, 0x67883b41)}, {TOBN(0x5590ede1, 0x0170911d), TOBN(0x7562f5bb, 0x34d4b17f), TOBN(0xe1fa1df2, 0x1826b8d2), TOBN(0xb40b796a, 0x6bd80d59)}}, {{TOBN(0xd65bf197, 0x3467ba92), TOBN(0x8c9b46db, 0xf70954b0), TOBN(0x97c8a0f3, 0x0e78f15d), TOBN(0xa8f3a69a, 0x85a4c961)}, {TOBN(0x4242660f, 0x61e4ce9b), TOBN(0xbf06aab3, 0x6ea6790c), TOBN(0xc6706f8e, 0xec986416), TOBN(0x9e56dec1, 0x9a9fc225)}}, {{TOBN(0x527c46f4, 0x9a9898d9), TOBN(0xd799e77b, 0x5633cdef), TOBN(0x24eacc16, 0x7d9e4297), TOBN(0xabb61cea, 0x6b1cb734)}, {TOBN(0xbee2e8a7, 0xf778443c), TOBN(0x3bb42bf1, 0x29de2fe6), TOBN(0xcbed86a1, 0x3003bb6f), TOBN(0xd3918e6c, 0xd781cdf6)}}, {{TOBN(0x4bee3271, 0x9a5103f1), TOBN(0x5243efc6, 0xf50eac06), TOBN(0xb8e122cb, 0x6adcc119), TOBN(0x1b7faa84, 0xc0b80a08)}, {TOBN(0x32c3d1bd, 0x6dfcd08c), TOBN(0x129dec4e, 0x0be427de), TOBN(0x98ab679c, 0x1d263c83), TOBN(0xafc83cb7, 0xcef64eff)}}, {{TOBN(0x85eb6088, 0x2fa6be76), TOBN(0x892585fb, 0x1328cbfe), TOBN(0xc154d3ed, 0xcf618dda), TOBN(0xc44f601b, 0x3abaf26e)}, {TOBN(0x7bf57d0b, 0x2be1fdfd), TOBN(0xa833bd2d, 0x21137fee), TOBN(0x9353af36, 0x2db591a8), TOBN(0xc76f26dc, 0x5562a056)}}, {{TOBN(0x1d87e47d, 0x3fdf5a51), TOBN(0x7afb5f93, 0x55c9cab0), TOBN(0x91bbf58f, 0x89e0586e), TOBN(0x7c72c018, 0x0d843709)}, {TOBN(0xa9a5aafb, 0x99b5c3dc), TOBN(0xa48a0f1d, 0x3844aeb0), TOBN(0x7178b7dd, 0xb667e482), TOBN(0x453985e9, 0x6e23a59a)}}, {{TOBN(0x4a54c860, 0x01b25dd8), TOBN(0x0dd37f48, 0xfb897c8a), TOBN(0x5f8aa610, 0x0ea90cd9), TOBN(0xc8892c68, 0x16d5830d)}, {TOBN(0xeb4befc0, 0xef514ca5), TOBN(0x478eb679, 0xe72c9ee6), TOBN(0x9bca20da, 0xdbc40d5f), TOBN(0xf015de21, 0xdde4f64a)}}, {{TOBN(0xaa6a4de0, 0xeaf4b8a5), TOBN(0x68cfd9ca, 0x4bc60e32), TOBN(0x668a4b01, 0x7fd15e70), TOBN(0xd9f0694a, 0xf27dc09d)}, {TOBN(0xf6c3cad5, 0xba708bcd), TOBN(0x5cd2ba69, 0x5bb95c2a), TOBN(0xaa28c1d3, 0x33c0a58f), TOBN(0x23e274e3, 0xabc77870)}}, {{TOBN(0x44c3692d, 0xdfd20a4a), TOBN(0x091c5fd3, 0x81a66653), TOBN(0x6c0bb691, 0x09a0757d), TOBN(0x9072e8b9, 0x667343ea)}, {TOBN(0x31d40eb0, 0x80848bec), TOBN(0x95bd480a, 0x79fd36cc), TOBN(0x01a77c61, 0x65ed43f5), TOBN(0xafccd127, 0x2e0d40bf)}}, {{TOBN(0xeccfc82d, 0x1cc1884b), TOBN(0xc85ac201, 0x5d4753b4), TOBN(0xc7a6caac, 0x658e099f), TOBN(0xcf46369e, 0x04b27390)}, {TOBN(0xe2e7d049, 0x506467ea), TOBN(0x481b63a2, 0x37cdeccc), TOBN(0x4029abd8, 0xed80143a), TOBN(0x28bfe3c7, 0xbcb00b88)}}, {{TOBN(0x3bec1009, 0x0643d84a), TOBN(0x885f3668, 0xabd11041), TOBN(0xdb02432c, 0xf83a34d6), TOBN(0x32f7b360, 0x719ceebe)}, {TOBN(0xf06c7837, 0xdad1fe7a), TOBN(0x60a157a9, 0x5441a0b0), TOBN(0x704970e9, 0xe2d47550), TOBN(0xcd2bd553, 0x271b9020)}}, {{TOBN(0xff57f82f, 0x33e24a0b), TOBN(0x9cbee23f, 0xf2565079), TOBN(0x16353427, 0xeb5f5825), TOBN(0x276feec4, 0xe948d662)}, {TOBN(0xd1b62bc6, 0xda10032b), TOBN(0x718351dd, 0xf0e72a53), TOBN(0x93452076, 0x2420e7ba), TOBN(0x96368fff, 0x3a00118d)}}, {{TOBN(0x00ce2d26, 0x150a49e4), TOBN(0x0c28b636, 0x3f04706b), TOBN(0xbad65a46, 0x58b196d0), TOBN(0x6c8455fc, 0xec9f8b7c)}, {TOBN(0xe90c895f, 0x2d71867e), TOBN(0x5c0be31b, 0xedf9f38c), TOBN(0x2a37a15e, 0xd8f6ec04), TOBN(0x239639e7, 0x8cd85251)}}, {{TOBN(0xd8975315, 0x9c7c4c6b), TOBN(0x603aa3c0, 0xd7409af7), TOBN(0xb8d53d0c, 0x007132fb), TOBN(0x68d12af7, 0xa6849238)}, {TOBN(0xbe0607e7, 0xbf5d9279), TOBN(0x9aa50055, 0xaada74ce), TOBN(0xe81079cb, 0xba7e8ccb), TOBN(0x610c71d1, 0xa5f4ff5e)}}, {{TOBN(0x9e2ee1a7, 0x5aa07093), TOBN(0xca84004b, 0xa75da47c), TOBN(0x074d3951, 0x3de75401), TOBN(0xf938f756, 0xbb311592)}, {TOBN(0x96197618, 0x00a43421), TOBN(0x39a25362, 0x07bc78c8), TOBN(0x278f710a, 0x0a171276), TOBN(0xb28446ea, 0x8d1a8f08)}}, {{TOBN(0x184781bf, 0xe3b6a661), TOBN(0x7751cb1d, 0xe6d279f7), TOBN(0xf8ff95d6, 0xc59eb662), TOBN(0x186d90b7, 0x58d3dea7)}, {TOBN(0x0e4bb6c1, 0xdfb4f754), TOBN(0x5c5cf56b, 0x2b2801dc), TOBN(0xc561e452, 0x1f54564d), TOBN(0xb4fb8c60, 0xf0dd7f13)}}, {{TOBN(0xf8849630, 0x33ff98c7), TOBN(0x9619fffa, 0xcf17769c), TOBN(0xf8090bf6, 0x1bfdd80a), TOBN(0x14d9a149, 0x422cfe63)}, {TOBN(0xb354c360, 0x6f6df9ea), TOBN(0xdbcf770d, 0x218f17ea), TOBN(0x207db7c8, 0x79eb3480), TOBN(0x213dbda8, 0x559b6a26)}}, {{TOBN(0xac4c200b, 0x29fc81b3), TOBN(0xebc3e09f, 0x171d87c1), TOBN(0x91799530, 0x1481aa9e), TOBN(0x051b92e1, 0x92e114fa)}, {TOBN(0xdf8f92e9, 0xecb5537f), TOBN(0x44b1b2cc, 0x290c7483), TOBN(0xa711455a, 0x2adeb016), TOBN(0x964b6856, 0x81a10c2c)}}, {{TOBN(0x4f159d99, 0xcec03623), TOBN(0x05532225, 0xef3271ea), TOBN(0xb231bea3, 0xc5ee4849), TOBN(0x57a54f50, 0x7094f103)}, {TOBN(0x3e2d421d, 0x9598b352), TOBN(0xe865a49c, 0x67412ab4), TOBN(0xd2998a25, 0x1cc3a912), TOBN(0x5d092808, 0x0c74d65d)}}, {{TOBN(0x73f45908, 0x4088567a), TOBN(0xeb6b280e, 0x1f214a61), TOBN(0x8c9adc34, 0xcaf0c13d), TOBN(0x39d12938, 0xf561fb80)}, {TOBN(0xb2dc3a5e, 0xbc6edfb4), TOBN(0x7485b1b1, 0xfe4d210e), TOBN(0x062e0400, 0xe186ae72), TOBN(0x91e32d5c, 0x6eeb3b88)}}, {{TOBN(0x6df574d7, 0x4be59224), TOBN(0xebc88ccc, 0x716d55f3), TOBN(0x26c2e6d0, 0xcad6ed33), TOBN(0xc6e21e7d, 0x0d3e8b10)}, {TOBN(0x2cc5840e, 0x5bcc36bb), TOBN(0x9292445e, 0x7da74f69), TOBN(0x8be8d321, 0x4e5193a8), TOBN(0x3ec23629, 0x8df06413)}}, {{TOBN(0xc7e9ae85, 0xb134defa), TOBN(0x6073b1d0, 0x1bb2d475), TOBN(0xb9ad615e, 0x2863c00d), TOBN(0x9e29493d, 0x525f4ac4)}, {TOBN(0xc32b1dea, 0x4e9acf4f), TOBN(0x3e1f01c8, 0xa50db88d), TOBN(0xb05d70ea, 0x04da916c), TOBN(0x714b0d0a, 0xd865803e)}}, {{TOBN(0x4bd493fc, 0x9920cb5e), TOBN(0x5b44b1f7, 0x92c7a3ac), TOBN(0xa2a77293, 0xbcec9235), TOBN(0x5ee06e87, 0xcd378553)}, {TOBN(0xceff8173, 0xda621607), TOBN(0x2bb03e4c, 0x99f5d290), TOBN(0x2945106a, 0xa6f734ac), TOBN(0xb5056604, 0xd25c4732)}}, {{TOBN(0x5945920c, 0xe079afee), TOBN(0x686e17a0, 0x6789831f), TOBN(0x5966bee8, 0xb74a5ae5), TOBN(0x38a673a2, 0x1e258d46)}, {TOBN(0xbd1cc1f2, 0x83141c95), TOBN(0x3b2ecf4f, 0x0e96e486), TOBN(0xcd3aa896, 0x74e5fc78), TOBN(0x415ec10c, 0x2482fa7a)}}, {{TOBN(0x15234419, 0x80503380), TOBN(0x513d917a, 0xd314b392), TOBN(0xb0b52f4e, 0x63caecae), TOBN(0x07bf22ad, 0x2dc7780b)}, {TOBN(0xe761e8a1, 0xe4306839), TOBN(0x1b3be962, 0x5dd7feaa), TOBN(0x4fe728de, 0x74c778f1), TOBN(0xf1fa0bda, 0x5e0070f6)}}, {{TOBN(0x85205a31, 0x6ec3f510), TOBN(0x2c7e4a14, 0xd2980475), TOBN(0xde3c19c0, 0x6f30ebfd), TOBN(0xdb1c1f38, 0xd4b7e644)}, {TOBN(0xfe291a75, 0x5dce364a), TOBN(0xb7b22a3c, 0x058f5be3), TOBN(0x2cd2c302, 0x37fea38c), TOBN(0x2930967a, 0x2e17be17)}}, {{TOBN(0x87f009de, 0x0c061c65), TOBN(0xcb014aac, 0xedc6ed44), TOBN(0x49bd1cb4, 0x3bafb1eb), TOBN(0x81bd8b5c, 0x282d3688)}, {TOBN(0x1cdab87e, 0xf01a17af), TOBN(0x21f37ac4, 0xe710063b), TOBN(0x5a6c5676, 0x42fc8193), TOBN(0xf4753e70, 0x56a6015c)}}, {{TOBN(0x020f795e, 0xa15b0a44), TOBN(0x8f37c8d7, 0x8958a958), TOBN(0x63b7e89b, 0xa4b675b5), TOBN(0xb4fb0c0c, 0x0fc31aea)}, {TOBN(0xed95e639, 0xa7ff1f2e), TOBN(0x9880f5a3, 0x619614fb), TOBN(0xdeb6ff02, 0x947151ab), TOBN(0x5bc5118c, 0xa868dcdb)}}, {{TOBN(0xd8da2055, 0x4c20cea5), TOBN(0xcac2776e, 0x14c4d69a), TOBN(0xcccb22c1, 0x622d599b), TOBN(0xa4ddb653, 0x68a9bb50)}, {TOBN(0x2c4ff151, 0x1b4941b4), TOBN(0xe1ff19b4, 0x6efba588), TOBN(0x35034363, 0xc48345e0), TOBN(0x45542e3d, 0x1e29dfc4)}}, {{TOBN(0xf197cb91, 0x349f7aed), TOBN(0x3b2b5a00, 0x8fca8420), TOBN(0x7c175ee8, 0x23aaf6d8), TOBN(0x54dcf421, 0x35af32b6)}, {TOBN(0x0ba14307, 0x27d6561e), TOBN(0x879d5ee4, 0xd175b1e2), TOBN(0xc7c43673, 0x99807db5), TOBN(0x77a54455, 0x9cd55bcd)}}, {{TOBN(0xe6c2ff13, 0x0105c072), TOBN(0x18f7a99f, 0x8dda7da4), TOBN(0x4c301820, 0x0e2d35c1), TOBN(0x06a53ca0, 0xd9cc6c82)}, {TOBN(0xaa21cc1e, 0xf1aa1d9e), TOBN(0x32414334, 0x4a75b1e8), TOBN(0x2a6d1328, 0x0ebe9fdc), TOBN(0x16bd173f, 0x98a4755a)}}, {{TOBN(0xfbb9b245, 0x2133ffd9), TOBN(0x39a8b2f1, 0x830f1a20), TOBN(0x484bc97d, 0xd5a1f52a), TOBN(0xd6aebf56, 0xa40eddf8)}, {TOBN(0x32257acb, 0x76ccdac6), TOBN(0xaf4d36ec, 0x1586ff27), TOBN(0x8eaa8863, 0xf8de7dd1), TOBN(0x0045d5cf, 0x88647c16)}}}, {{{TOBN(0xa6f3d574, 0xc005979d), TOBN(0xc2072b42, 0x6a40e350), TOBN(0xfca5c156, 0x8de2ecf9), TOBN(0xa8c8bf5b, 0xa515344e)}, {TOBN(0x97aee555, 0x114df14a), TOBN(0xd4374a4d, 0xfdc5ec6b), TOBN(0x754cc28f, 0x2ca85418), TOBN(0x71cb9e27, 0xd3c41f78)}}, {{TOBN(0x89105079, 0x03605c39), TOBN(0xf0843d9e, 0xa142c96c), TOBN(0xf3744934, 0x16923684), TOBN(0x732caa2f, 0xfa0a2893)}, {TOBN(0xb2e8c270, 0x61160170), TOBN(0xc32788cc, 0x437fbaa3), TOBN(0x39cd818e, 0xa6eda3ac), TOBN(0xe2e94239, 0x9e2b2e07)}}, {{TOBN(0x6967d39b, 0x0260e52a), TOBN(0xd42585cc, 0x90653325), TOBN(0x0d9bd605, 0x21ca7954), TOBN(0x4fa20877, 0x81ed57b3)}, {TOBN(0x60c1eff8, 0xe34a0bbe), TOBN(0x56b0040c, 0x84f6ef64), TOBN(0x28be2b24, 0xb1af8483), TOBN(0xb2278163, 0xf5531614)}}, {{TOBN(0x8df27545, 0x5922ac1c), TOBN(0xa7b3ef5c, 0xa52b3f63), TOBN(0x8e77b214, 0x71de57c4), TOBN(0x31682c10, 0x834c008b)}, {TOBN(0xc76824f0, 0x4bd55d31), TOBN(0xb6d1c086, 0x17b61c71), TOBN(0x31db0903, 0xc2a5089d), TOBN(0x9c092172, 0x184e5d3f)}}, {{TOBN(0xdd7ced5b, 0xc00cc638), TOBN(0x1a2015eb, 0x61278fc2), TOBN(0x2e8e5288, 0x6a37f8d6), TOBN(0xc457786f, 0xe79933ad)}, {TOBN(0xb3fe4cce, 0x2c51211a), TOBN(0xad9b10b2, 0x24c20498), TOBN(0x90d87a4f, 0xd28db5e5), TOBN(0x698cd105, 0x3aca2fc3)}}, {{TOBN(0x4f112d07, 0xe91b536d), TOBN(0xceb982f2, 0x9eba09d6), TOBN(0x3c157b2c, 0x197c396f), TOBN(0xe23c2d41, 0x7b66eb24)}, {TOBN(0x480c57d9, 0x3f330d37), TOBN(0xb3a4c8a1, 0x79108deb), TOBN(0x702388de, 0xcb199ce5), TOBN(0x0b019211, 0xb944a8d4)}}, {{TOBN(0x24f2a692, 0x840bb336), TOBN(0x7c353bdc, 0xa669fa7b), TOBN(0xda20d6fc, 0xdec9c300), TOBN(0x625fbe2f, 0xa13a4f17)}, {TOBN(0xa2b1b61a, 0xdbc17328), TOBN(0x008965bf, 0xa9515621), TOBN(0x49690939, 0xc620ff46), TOBN(0x182dd27d, 0x8717e91c)}}, {{TOBN(0x5ace5035, 0xea6c3997), TOBN(0x54259aaa, 0xc2610bef), TOBN(0xef18bb3f, 0x3c80dd39), TOBN(0x6910b95b, 0x5fc3fa39)}, {TOBN(0xfce2f510, 0x43e09aee), TOBN(0xced56c9f, 0xa7675665), TOBN(0x10e265ac, 0xd872db61), TOBN(0x6982812e, 0xae9fce69)}}, {{TOBN(0x29be11c6, 0xce800998), TOBN(0x72bb1752, 0xb90360d9), TOBN(0x2c193197, 0x5a4ad590), TOBN(0x2ba2f548, 0x9fc1dbc0)}, {TOBN(0x7fe4eebb, 0xe490ebe0), TOBN(0x12a0a4cd, 0x7fae11c0), TOBN(0x7197cf81, 0xe903ba37), TOBN(0xcf7d4aa8, 0xde1c6dd8)}}, {{TOBN(0x92af6bf4, 0x3fd5684c), TOBN(0x2b26eecf, 0x80360aa1), TOBN(0xbd960f30, 0x00546a82), TOBN(0x407b3c43, 0xf59ad8fe)}, {TOBN(0x86cae5fe, 0x249c82ba), TOBN(0x9e0faec7, 0x2463744c), TOBN(0x87f551e8, 0x94916272), TOBN(0x033f9344, 0x6ceb0615)}}, {{TOBN(0x1e5eb0d1, 0x8be82e84), TOBN(0x89967f0e, 0x7a582fef), TOBN(0xbcf687d5, 0xa6e921fa), TOBN(0xdfee4cf3, 0xd37a09ba)}, {TOBN(0x94f06965, 0xb493c465), TOBN(0x638b9a1c, 0x7635c030), TOBN(0x76667864, 0x66f05e9f), TOBN(0xccaf6808, 0xc04da725)}}, {{TOBN(0xca2eb690, 0x768fccfc), TOBN(0xf402d37d, 0xb835b362), TOBN(0x0efac0d0, 0xe2fdfcce), TOBN(0xefc9cdef, 0xb638d990)}, {TOBN(0x2af12b72, 0xd1669a8b), TOBN(0x33c536bc, 0x5774ccbd), TOBN(0x30b21909, 0xfb34870e), TOBN(0xc38fa2f7, 0x7df25aca)}}, {{TOBN(0x74c5f02b, 0xbf81f3f5), TOBN(0x0525a5ae, 0xaf7e4581), TOBN(0x88d2aaba, 0x433c54ae), TOBN(0xed9775db, 0x806a56c5)}, {TOBN(0xd320738a, 0xc0edb37d), TOBN(0x25fdb6ee, 0x66cc1f51), TOBN(0xac661d17, 0x10600d76), TOBN(0x931ec1f3, 0xbdd1ed76)}}, {{TOBN(0x65c11d62, 0x19ee43f1), TOBN(0x5cd57c3e, 0x60829d97), TOBN(0xd26c91a3, 0x984be6e8), TOBN(0xf08d9309, 0x8b0c53bd)}, {TOBN(0x94bc9e5b, 0xc016e4ea), TOBN(0xd3916839, 0x11d43d2b), TOBN(0x886c5ad7, 0x73701155), TOBN(0xe0377626, 0x20b00715)}}, {{TOBN(0x7f01c9ec, 0xaa80ba59), TOBN(0x3083411a, 0x68538e51), TOBN(0x970370f1, 0xe88128af), TOBN(0x625cc3db, 0x91dec14b)}, {TOBN(0xfef9666c, 0x01ac3107), TOBN(0xb2a8d577, 0xd5057ac3), TOBN(0xb0f26299, 0x92be5df7), TOBN(0xf579c8e5, 0x00353924)}}, {{TOBN(0xb8fa3d93, 0x1341ed7a), TOBN(0x4223272c, 0xa7b59d49), TOBN(0x3dcb1947, 0x83b8c4a4), TOBN(0x4e413c01, 0xed1302e4)}, {TOBN(0x6d999127, 0xe17e44ce), TOBN(0xee86bf75, 0x33b3adfb), TOBN(0xf6902fe6, 0x25aa96ca), TOBN(0xb73540e4, 0xe5aae47d)}}, {{TOBN(0x32801d7b, 0x1b4a158c), TOBN(0xe571c99e, 0x27e2a369), TOBN(0x40cb76c0, 0x10d9f197), TOBN(0xc308c289, 0x3167c0ae)}, {TOBN(0xa6ef9dd3, 0xeb7958f2), TOBN(0xa7226dfc, 0x300879b1), TOBN(0x6cd0b362, 0x7edf0636), TOBN(0x4efbce6c, 0x7bc37eed)}}, {{TOBN(0x75f92a05, 0x8d699021), TOBN(0x586d4c79, 0x772566e3), TOBN(0x378ca5f1, 0x761ad23a), TOBN(0x650d86fc, 0x1465a8ac)}, {TOBN(0x7a4ed457, 0x842ba251), TOBN(0x6b65e3e6, 0x42234933), TOBN(0xaf1543b7, 0x31aad657), TOBN(0xa4cefe98, 0xcbfec369)}}, {{TOBN(0xb587da90, 0x9f47befb), TOBN(0x6562e9fb, 0x41312d13), TOBN(0xa691ea59, 0xeff1cefe), TOBN(0xcc30477a, 0x05fc4cf6)}, {TOBN(0xa1632461, 0x0b0ffd3d), TOBN(0xa1f16f3b, 0x5b355956), TOBN(0x5b148d53, 0x4224ec24), TOBN(0xdc834e7b, 0xf977012a)}}, {{TOBN(0x7bfc5e75, 0xb2c69dbc), TOBN(0x3aa77a29, 0x03c3da6c), TOBN(0xde0df03c, 0xca910271), TOBN(0xcbd5ca4a, 0x7806dc55)}, {TOBN(0xe1ca5807, 0x6db476cb), TOBN(0xfde15d62, 0x5f37a31e), TOBN(0xf49af520, 0xf41af416), TOBN(0x96c5c5b1, 0x7d342db5)}}, {{TOBN(0x155c43b7, 0xeb4ceb9b), TOBN(0x2e993010, 0x4e77371a), TOBN(0x1d2987da, 0x675d43af), TOBN(0xef2bc1c0, 0x8599fd72)}, {TOBN(0x96894b7b, 0x9342f6b2), TOBN(0x201eadf2, 0x7c8e71f0), TOBN(0xf3479d9f, 0x4a1f3efc), TOBN(0xe0f8a742, 0x702a9704)}}, {{TOBN(0xeafd44b6, 0xb3eba40c), TOBN(0xf9739f29, 0xc1c1e0d0), TOBN(0x0091471a, 0x619d505e), TOBN(0xc15f9c96, 0x9d7c263e)}, {TOBN(0x5be47285, 0x83afbe33), TOBN(0xa3b6d6af, 0x04f1e092), TOBN(0xe76526b9, 0x751a9d11), TOBN(0x2ec5b26d, 0x9a4ae4d2)}}, {{TOBN(0xeb66f4d9, 0x02f6fb8d), TOBN(0x4063c561, 0x96912164), TOBN(0xeb7050c1, 0x80ef3000), TOBN(0x288d1c33, 0xeaa5b3f0)}, {TOBN(0xe87c68d6, 0x07806fd8), TOBN(0xb2f7f9d5, 0x4bbbf50f), TOBN(0x25972f3a, 0xac8d6627), TOBN(0xf8547774, 0x10e8c13b)}}, {{TOBN(0xcc50ef6c, 0x872b4a60), TOBN(0xab2a34a4, 0x4613521b), TOBN(0x39c5c190, 0x983e15d1), TOBN(0x61dde5df, 0x59905512)}, {TOBN(0xe417f621, 0x9f2275f3), TOBN(0x0750c8b6, 0x451d894b), TOBN(0x75b04ab9, 0x78b0bdaa), TOBN(0x3bfd9fd4, 0x458589bd)}}, {{TOBN(0xf1013e30, 0xee9120b6), TOBN(0x2b51af93, 0x23a4743e), TOBN(0xea96ffae, 0x48d14d9e), TOBN(0x71dc0dbe, 0x698a1d32)}, {TOBN(0x914962d2, 0x0180cca4), TOBN(0x1ae60677, 0xc3568963), TOBN(0x8cf227b1, 0x437bc444), TOBN(0xc650c83b, 0xc9962c7a)}}, {{TOBN(0x23c2c7dd, 0xfe7ccfc4), TOBN(0xf925c89d, 0x1b929d48), TOBN(0x4460f74b, 0x06783c33), TOBN(0xac2c8d49, 0xa590475a)}, {TOBN(0xfb40b407, 0xb807bba0), TOBN(0x9d1e362d, 0x69ff8f3a), TOBN(0xa33e9681, 0xcbef64a4), TOBN(0x67ece5fa, 0x332fb4b2)}}, {{TOBN(0x6900a99b, 0x739f10e3), TOBN(0xc3341ca9, 0xff525925), TOBN(0xee18a626, 0xa9e2d041), TOBN(0xa5a83685, 0x29580ddd)}, {TOBN(0xf3470c81, 0x9d7de3cd), TOBN(0xedf02586, 0x2062cf9c), TOBN(0xf43522fa, 0xc010edb0), TOBN(0x30314135, 0x13a4b1ae)}}, {{TOBN(0xc792e02a, 0xdb22b94b), TOBN(0x993d8ae9, 0xa1eaa45b), TOBN(0x8aad6cd3, 0xcd1e1c63), TOBN(0x89529ca7, 0xc5ce688a)}, {TOBN(0x2ccee3aa, 0xe572a253), TOBN(0xe02b6438, 0x02a21efb), TOBN(0xa7091b6e, 0xc9430358), TOBN(0x06d1b1fa, 0x9d7db504)}}, {{TOBN(0x58846d32, 0xc4744733), TOBN(0x40517c71, 0x379f9e34), TOBN(0x2f65655f, 0x130ef6ca), TOBN(0x526e4488, 0xf1f3503f)}, {TOBN(0x8467bd17, 0x7ee4a976), TOBN(0x1d9dc913, 0x921363d1), TOBN(0xd8d24c33, 0xb069e041), TOBN(0x5eb5da0a, 0x2cdf7f51)}}, {{TOBN(0x1c0f3cb1, 0x197b994f), TOBN(0x3c95a6c5, 0x2843eae9), TOBN(0x7766ffc9, 0xa6097ea5), TOBN(0x7bea4093, 0xd723b867)}, {TOBN(0xb48e1f73, 0x4db378f9), TOBN(0x70025b00, 0xe37b77ac), TOBN(0x943dc8e7, 0xaf24ad46), TOBN(0xb98a15ac, 0x16d00a85)}}, {{TOBN(0x3adc38ba, 0x2743b004), TOBN(0xb1c7f4f7, 0x334415ee), TOBN(0xea43df8f, 0x1e62d05a), TOBN(0x32618905, 0x9d76a3b6)}, {TOBN(0x2fbd0bb5, 0xa23a0f46), TOBN(0x5bc971db, 0x6a01918c), TOBN(0x7801d94a, 0xb4743f94), TOBN(0xb94df65e, 0x676ae22b)}}, {{TOBN(0xaafcbfab, 0xaf95894c), TOBN(0x7b9bdc07, 0x276b2241), TOBN(0xeaf98362, 0x5bdda48b), TOBN(0x5977faf2, 0xa3fcb4df)}, {TOBN(0xbed042ef, 0x052c4b5b), TOBN(0x9fe87f71, 0x067591f0), TOBN(0xc89c73ca, 0x22f24ec7), TOBN(0x7d37fa9e, 0xe64a9f1b)}}, {{TOBN(0x2710841a, 0x15562627), TOBN(0x2c01a613, 0xc243b034), TOBN(0x1d135c56, 0x2bc68609), TOBN(0xc2ca1715, 0x8b03f1f6)}, {TOBN(0xc9966c2d, 0x3eb81d82), TOBN(0xc02abf4a, 0x8f6df13e), TOBN(0x77b34bd7, 0x8f72b43b), TOBN(0xaff6218f, 0x360c82b0)}}, {{TOBN(0x0aa5726c, 0x8d55b9d2), TOBN(0xdc0adbe9, 0x99e9bffb), TOBN(0x9097549c, 0xefb9e72a), TOBN(0x16755712, 0x9dfb3111)}, {TOBN(0xdd8bf984, 0xf26847f9), TOBN(0xbcb8e387, 0xdfb30cb7), TOBN(0xc1fd32a7, 0x5171ef9c), TOBN(0x977f3fc7, 0x389b363f)}}, {{TOBN(0x116eaf2b, 0xf4babda0), TOBN(0xfeab68bd, 0xf7113c8e), TOBN(0xd1e3f064, 0xb7def526), TOBN(0x1ac30885, 0xe0b3fa02)}, {TOBN(0x1c5a6e7b, 0x40142d9d), TOBN(0x839b5603, 0x30921c0b), TOBN(0x48f301fa, 0x36a116a3), TOBN(0x380e1107, 0xcfd9ee6d)}}, {{TOBN(0x7945ead8, 0x58854be1), TOBN(0x4111c12e, 0xcbd4d49d), TOBN(0xece3b1ec, 0x3a29c2ef), TOBN(0x6356d404, 0x8d3616f5)}, {TOBN(0x9f0d6a8f, 0x594d320e), TOBN(0x0989316d, 0xf651ccd2), TOBN(0x6c32117a, 0x0f8fdde4), TOBN(0x9abe5cc5, 0xa26a9bbc)}}, {{TOBN(0xcff560fb, 0x9723f671), TOBN(0x21b2a12d, 0x7f3d593c), TOBN(0xe4cb18da, 0x24ba0696), TOBN(0x186e2220, 0xc3543384)}, {TOBN(0x722f64e0, 0x88312c29), TOBN(0x94282a99, 0x17dc7752), TOBN(0x62467bbf, 0x5a85ee89), TOBN(0xf435c650, 0xf10076a0)}}, {{TOBN(0xc9ff1539, 0x43b3a50b), TOBN(0x7132130c, 0x1a53efbc), TOBN(0x31bfe063, 0xf7b0c5b7), TOBN(0xb0179a7d, 0x4ea994cc)}, {TOBN(0x12d064b3, 0xc85f455b), TOBN(0x47259328, 0x8f6e0062), TOBN(0xf64e590b, 0xb875d6d9), TOBN(0x22dd6225, 0xad92bcc7)}}, {{TOBN(0xb658038e, 0xb9c3bd6d), TOBN(0x00cdb0d6, 0xfbba27c8), TOBN(0x0c681337, 0x1062c45d), TOBN(0xd8515b8c, 0x2d33407d)}, {TOBN(0xcb8f699e, 0x8cbb5ecf), TOBN(0x8c4347f8, 0xc608d7d8), TOBN(0x2c11850a, 0xbb3e00db), TOBN(0x20a8dafd, 0xecb49d19)}}, {{TOBN(0xbd781480, 0x45ee2f40), TOBN(0x75e354af, 0x416b60cf), TOBN(0xde0b58a1, 0x8d49a8c4), TOBN(0xe40e94e2, 0xfa359536)}, {TOBN(0xbd4fa59f, 0x62accd76), TOBN(0x05cf466a, 0x8c762837), TOBN(0xb5abda99, 0x448c277b), TOBN(0x5a9e01bf, 0x48b13740)}}, {{TOBN(0x9d457798, 0x326aad8d), TOBN(0xbdef4954, 0xc396f7e7), TOBN(0x6fb274a2, 0xc253e292), TOBN(0x2800bf0a, 0x1cfe53e7)}, {TOBN(0x22426d31, 0x44438fd4), TOBN(0xef233923, 0x5e259f9a), TOBN(0x4188503c, 0x03f66264), TOBN(0x9e5e7f13, 0x7f9fdfab)}}, {{TOBN(0x565eb76c, 0x5fcc1aba), TOBN(0xea632548, 0x59b5bff8), TOBN(0x5587c087, 0xaab6d3fa), TOBN(0x92b639ea, 0x6ce39c1b)}, {TOBN(0x0706e782, 0x953b135c), TOBN(0x7308912e, 0x425268ef), TOBN(0x599e92c7, 0x090e7469), TOBN(0x83b90f52, 0x9bc35e75)}}, {{TOBN(0x4750b3d0, 0x244975b3), TOBN(0xf3a44358, 0x11965d72), TOBN(0x179c6774, 0x9c8dc751), TOBN(0xff18cdfe, 0xd23d9ff0)}, {TOBN(0xc4013833, 0x2028e247), TOBN(0x96e280e2, 0xf3bfbc79), TOBN(0xf60417bd, 0xd0880a84), TOBN(0x263c9f3d, 0x2a568151)}}, {{TOBN(0x36be15b3, 0x2d2ce811), TOBN(0x846dc0c2, 0xf8291d21), TOBN(0x5cfa0ecb, 0x789fcfdb), TOBN(0x45a0beed, 0xd7535b9a)}, {TOBN(0xec8e9f07, 0x96d69af1), TOBN(0x31a7c5b8, 0x599ab6dc), TOBN(0xd36d45ef, 0xf9e2e09f), TOBN(0x3cf49ef1, 0xdcee954b)}}, {{TOBN(0x6be34cf3, 0x086cff9b), TOBN(0x88dbd491, 0x39a3360f), TOBN(0x1e96b8cc, 0x0dbfbd1d), TOBN(0xc1e5f7bf, 0xcb7e2552)}, {TOBN(0x0547b214, 0x28819d98), TOBN(0xc770dd9c, 0x7aea9dcb), TOBN(0xaef0d4c7, 0x041d68c8), TOBN(0xcc2b9818, 0x13cb9ba8)}}, {{TOBN(0x7fc7bc76, 0xfe86c607), TOBN(0x6b7b9337, 0x502a9a95), TOBN(0x1948dc27, 0xd14dab63), TOBN(0x249dd198, 0xdae047be)}, {TOBN(0xe8356584, 0xa981a202), TOBN(0x3531dd18, 0x3a893387), TOBN(0x1be11f90, 0xc85c7209), TOBN(0x93d2fe1e, 0xe2a52b5a)}}, {{TOBN(0x8225bfe2, 0xec6d6b97), TOBN(0x9cf6d6f4, 0xbd0aa5de), TOBN(0x911459cb, 0x54779f5f), TOBN(0x5649cddb, 0x86aeb1f3)}, {TOBN(0x32133579, 0x3f26ce5a), TOBN(0xc289a102, 0x550f431e), TOBN(0x559dcfda, 0x73b84c6f), TOBN(0x84973819, 0xee3ac4d7)}}, {{TOBN(0xb51e55e6, 0xf2606a82), TOBN(0xe25f7061, 0x90f2fb57), TOBN(0xacef6c2a, 0xb1a4e37c), TOBN(0x864e359d, 0x5dcf2706)}, {TOBN(0x479e6b18, 0x7ce57316), TOBN(0x2cab2500, 0x3a96b23d), TOBN(0xed489862, 0x8ef16df7), TOBN(0x2056538c, 0xef3758b5)}}, {{TOBN(0xa7df865e, 0xf15d3101), TOBN(0x80c5533a, 0x61b553d7), TOBN(0x366e1997, 0x4ed14294), TOBN(0x6620741f, 0xb3c0bcd6)}, {TOBN(0x21d1d9c4, 0xedc45418), TOBN(0x005b859e, 0xc1cc4a9d), TOBN(0xdf01f630, 0xa1c462f0), TOBN(0x15d06cf3, 0xf26820c7)}}, {{TOBN(0x9f7f24ee, 0x3484be47), TOBN(0x2ff33e96, 0x4a0c902f), TOBN(0x00bdf457, 0x5a0bc453), TOBN(0x2378dfaf, 0x1aa238db)}, {TOBN(0x272420ec, 0x856720f2), TOBN(0x2ad9d95b, 0x96797291), TOBN(0xd1242cc6, 0x768a1558), TOBN(0x2e287f8b, 0x5cc86aa8)}}, {{TOBN(0x796873d0, 0x990cecaa), TOBN(0xade55f81, 0x675d4080), TOBN(0x2645eea3, 0x21f0cd84), TOBN(0x7a1efa0f, 0xb4e17d02)}, {TOBN(0xf6858420, 0x037cc061), TOBN(0x682e05f0, 0xd5d43e12), TOBN(0x59c36994, 0x27218710), TOBN(0x85cbba4d, 0x3f7cd2fc)}}, {{TOBN(0x726f9729, 0x7a3cd22a), TOBN(0x9f8cd5dc, 0x4a628397), TOBN(0x17b93ab9, 0xc23165ed), TOBN(0xff5f5dbf, 0x122823d4)}, {TOBN(0xc1e4e4b5, 0x654a446d), TOBN(0xd1a9496f, 0x677257ba), TOBN(0x6387ba94, 0xde766a56), TOBN(0x23608bc8, 0x521ec74a)}}, {{TOBN(0x16a522d7, 0x6688c4d4), TOBN(0x9d6b4282, 0x07373abd), TOBN(0xa62f07ac, 0xb42efaa3), TOBN(0xf73e00f7, 0xe3b90180)}, {TOBN(0x36175fec, 0x49421c3e), TOBN(0xc4e44f9b, 0x3dcf2678), TOBN(0x76df436b, 0x7220f09f), TOBN(0x172755fb, 0x3aa8b6cf)}}, {{TOBN(0xbab89d57, 0x446139cc), TOBN(0x0a0a6e02, 0x5fe0208f), TOBN(0xcdbb63e2, 0x11e5d399), TOBN(0x33ecaa12, 0xa8977f0b)}, {TOBN(0x59598b21, 0xf7c42664), TOBN(0xb3e91b32, 0xab65d08a), TOBN(0x035822ee, 0xf4502526), TOBN(0x1dcf0176, 0x720a82a9)}}, {{TOBN(0x50f8598f, 0x3d589e02), TOBN(0xdf0478ff, 0xb1d63d2c), TOBN(0x8b8068bd, 0x1571cd07), TOBN(0x30c3aa4f, 0xd79670cd)}, {TOBN(0x25e8fd4b, 0x941ade7f), TOBN(0x3d1debdc, 0x32790011), TOBN(0x65b6dcbd, 0x3a3f9ff0), TOBN(0x282736a4, 0x793de69c)}}, {{TOBN(0xef69a0c3, 0xd41d3bd3), TOBN(0xb533b8c9, 0x07a26bde), TOBN(0xe2801d97, 0xdb2edf9f), TOBN(0xdc4a8269, 0xe1877af0)}, {TOBN(0x6c1c5851, 0x3d590dbe), TOBN(0x84632f6b, 0xee4e9357), TOBN(0xd36d36b7, 0x79b33374), TOBN(0xb46833e3, 0x9bbca2e6)}}, {{TOBN(0x37893913, 0xf7fc0586), TOBN(0x385315f7, 0x66bf4719), TOBN(0x72c56293, 0xb31855dc), TOBN(0xd1416d4e, 0x849061fe)}, {TOBN(0xbeb3ab78, 0x51047213), TOBN(0x447f6e61, 0xf040c996), TOBN(0xd06d310d, 0x638b1d0c), TOBN(0xe28a413f, 0xbad1522e)}}, {{TOBN(0x685a76cb, 0x82003f86), TOBN(0x610d07f7, 0x0bcdbca3), TOBN(0x6ff66021, 0x9ca4c455), TOBN(0x7df39b87, 0xcea10eec)}, {TOBN(0xb9255f96, 0xe22db218), TOBN(0x8cc6d9eb, 0x08a34c44), TOBN(0xcd4ffb86, 0x859f9276), TOBN(0x8fa15eb2, 0x50d07335)}}, {{TOBN(0xdf553845, 0xcf2c24b5), TOBN(0x89f66a9f, 0x52f9c3ba), TOBN(0x8f22b5b9, 0xe4a7ceb3), TOBN(0xaffef809, 0x0e134686)}, {TOBN(0x3e53e1c6, 0x8eb8fac2), TOBN(0x93c1e4eb, 0x28aec98e), TOBN(0xb6b91ec5, 0x32a43bcb), TOBN(0x2dbfa947, 0xb2d74a51)}}, {{TOBN(0xe065d190, 0xca84bad7), TOBN(0xfb13919f, 0xad58e65c), TOBN(0x3c41718b, 0xf1cb6e31), TOBN(0x688969f0, 0x06d05c3f)}, {TOBN(0xd4f94ce7, 0x21264d45), TOBN(0xfdfb65e9, 0x7367532b), TOBN(0x5b1be8b1, 0x0945a39d), TOBN(0x229f789c, 0x2b8baf3b)}}, {{TOBN(0xd8f41f3e, 0x6f49f15d), TOBN(0x678ce828, 0x907f0792), TOBN(0xc69ace82, 0xfca6e867), TOBN(0x106451ae, 0xd01dcc89)}, {TOBN(0x1bb4f7f0, 0x19fc32d2), TOBN(0x64633dfc, 0xb00c52d2), TOBN(0x8f13549a, 0xad9ea445), TOBN(0x99a3bf50, 0xfb323705)}}, {{TOBN(0x0c9625a2, 0x534d4dbc), TOBN(0x45b8f1d1, 0xc2a2fea3), TOBN(0x76ec21a1, 0xa530fc1a), TOBN(0x4bac9c2a, 0x9e5bd734)}, {TOBN(0x5996d76a, 0x7b4e3587), TOBN(0x0045cdee, 0x1182d9e3), TOBN(0x1aee24b9, 0x1207f13d), TOBN(0x66452e97, 0x97345a41)}}, {{TOBN(0x16e5b054, 0x9f950cd0), TOBN(0x9cc72fb1, 0xd7fdd075), TOBN(0x6edd61e7, 0x66249663), TOBN(0xde4caa4d, 0xf043cccb)}, {TOBN(0x11b1f57a, 0x55c7ac17), TOBN(0x779cbd44, 0x1a85e24d), TOBN(0x78030f86, 0xe46081e7), TOBN(0xfd4a6032, 0x8e20f643)}}, {{TOBN(0xcc7a6488, 0x0a750c0f), TOBN(0x39bacfe3, 0x4e548e83), TOBN(0x3d418c76, 0x0c110f05), TOBN(0x3e4daa4c, 0xb1f11588)}, {TOBN(0x2733e7b5, 0x5ffc69ff), TOBN(0x46f147bc, 0x92053127), TOBN(0x885b2434, 0xd722df94), TOBN(0x6a444f65, 0xe6fc6b7c)}}}, {{{TOBN(0x7a1a465a, 0xc3f16ea8), TOBN(0x115a461d, 0xb2f1d11c), TOBN(0x4767dd95, 0x6c68a172), TOBN(0x3392f2eb, 0xd13a4698)}, {TOBN(0xc7a99ccd, 0xe526cdc7), TOBN(0x8e537fdc, 0x22292b81), TOBN(0x76d8cf69, 0xa6d39198), TOBN(0xffc5ff43, 0x2446852d)}}, {{TOBN(0x97b14f7e, 0xa90567e6), TOBN(0x513257b7, 0xb6ae5cb7), TOBN(0x85454a3c, 0x9f10903d), TOBN(0xd8d2c9ad, 0x69bc3724)}, {TOBN(0x38da9324, 0x6b29cb44), TOBN(0xb540a21d, 0x77c8cbac), TOBN(0x9bbfe435, 0x01918e42), TOBN(0xfffa707a, 0x56c3614e)}}, {{TOBN(0x0ce4e3f1, 0xd4e353b7), TOBN(0x062d8a14, 0xef46b0a0), TOBN(0x6408d5ab, 0x574b73fd), TOBN(0xbc41d1c9, 0xd3273ffd)}, {TOBN(0x3538e1e7, 0x6be77800), TOBN(0x71fe8b37, 0xc5655031), TOBN(0x1cd91621, 0x6b9b331a), TOBN(0xad825d0b, 0xbb388f73)}}, {{TOBN(0x56c2e05b, 0x1cb76219), TOBN(0x0ec0bf91, 0x71567e7e), TOBN(0xe7076f86, 0x61c4c910), TOBN(0xd67b085b, 0xbabc04d9)}, {TOBN(0x9fb90459, 0x5e93a96a), TOBN(0x7526c1ea, 0xfbdc249a), TOBN(0x0d44d367, 0xecdd0bb7), TOBN(0x95399917, 0x9dc0d695)}}, {{TOBN(0x61360ee9, 0x9e240d18), TOBN(0x057cdcac, 0xb4b94466), TOBN(0xe7667cd1, 0x2fe5325c), TOBN(0x1fa297b5, 0x21974e3b)}, {TOBN(0xfa4081e7, 0xdb083d76), TOBN(0x31993be6, 0xf206bd15), TOBN(0x8949269b, 0x14c19f8c), TOBN(0x21468d72, 0xa9d92357)}}, {{TOBN(0x2ccbc583, 0xa4c506ec), TOBN(0x957ed188, 0xd1acfe97), TOBN(0x8baed833, 0x12f1aea2), TOBN(0xef2a6cb4, 0x8325362d)}, {TOBN(0x130dde42, 0x8e195c43), TOBN(0xc842025a, 0x0e6050c6), TOBN(0x2da972a7, 0x08686a5d), TOBN(0xb52999a1, 0xe508b4a8)}}, {{TOBN(0xd9f090b9, 0x10a5a8bd), TOBN(0xca91d249, 0x096864da), TOBN(0x8e6a93be, 0x3f67dbc1), TOBN(0xacae6fba, 0xf5f4764c)}, {TOBN(0x1563c6e0, 0xd21411a0), TOBN(0x28fa787f, 0xda0a4ad8), TOBN(0xd524491c, 0x908c8030), TOBN(0x1257ba0e, 0x4c795f07)}}, {{TOBN(0x83f49167, 0xceca9754), TOBN(0x426d2cf6, 0x4b7939a0), TOBN(0x2555e355, 0x723fd0bf), TOBN(0xa96e6d06, 0xc4f144e2)}, {TOBN(0x4768a8dd, 0x87880e61), TOBN(0x15543815, 0xe508e4d5), TOBN(0x09d7e772, 0xb1b65e15), TOBN(0x63439dd6, 0xac302fa0)}}, {{TOBN(0xb93f802f, 0xc14e35c2), TOBN(0x71735b7c, 0x4341333c), TOBN(0x03a25104, 0x16d4f362), TOBN(0x3f4d069b, 0xbf433c8e)}, {TOBN(0x0d83ae01, 0xf78f5a7c), TOBN(0x50a8ffbe, 0x7c4eed07), TOBN(0xc74f8906, 0x76e10f83), TOBN(0x7d080966, 0x9ddaf8e1)}}, {{TOBN(0xb11df8e1, 0x698e04cc), TOBN(0x877be203, 0x169005c8), TOBN(0x32749e8c, 0x4f3c6179), TOBN(0x2dbc9d0a, 0x7853fc05)}, {TOBN(0x187d4f93, 0x9454d937), TOBN(0xe682ce9d, 0xb4800e1b), TOBN(0xa9129ad8, 0x165e68e8), TOBN(0x0fe29735, 0xbe7f785b)}}, {{TOBN(0x5303f40c, 0x5b9e02b7), TOBN(0xa37c9692, 0x35ee04e8), TOBN(0x5f46cc20, 0x34d6632b), TOBN(0x55ef72b2, 0x96ac545b)}, {TOBN(0xabec5c1f, 0x7b91b062), TOBN(0x0a79e1c7, 0xbb33e821), TOBN(0xbb04b428, 0x3a9f4117), TOBN(0x0de1f28f, 0xfd2a475a)}}, {{TOBN(0x31019ccf, 0x3a4434b4), TOBN(0xa3458111, 0x1a7954dc), TOBN(0xa9dac80d, 0xe34972a7), TOBN(0xb043d054, 0x74f6b8dd)}, {TOBN(0x021c319e, 0x11137b1a), TOBN(0x00a754ce, 0xed5cc03f), TOBN(0x0aa2c794, 0xcbea5ad4), TOBN(0x093e67f4, 0x70c015b6)}}, {{TOBN(0x72cdfee9, 0xc97e3f6b), TOBN(0xc10bcab4, 0xb6da7461), TOBN(0x3b02d2fc, 0xb59806b9), TOBN(0x85185e89, 0xa1de6f47)}, {TOBN(0x39e6931f, 0x0eb6c4d4), TOBN(0x4d4440bd, 0xd4fa5b04), TOBN(0x5418786e, 0x34be7eb8), TOBN(0x6380e521, 0x9d7259bc)}}, {{TOBN(0x20ac0351, 0xd598d710), TOBN(0x272c4166, 0xcb3a4da4), TOBN(0xdb82fe1a, 0xca71de1f), TOBN(0x746e79f2, 0xd8f54b0f)}, {TOBN(0x6e7fc736, 0x4b573e9b), TOBN(0x75d03f46, 0xfd4b5040), TOBN(0x5c1cc36d, 0x0b98d87b), TOBN(0x513ba3f1, 0x1f472da1)}}, {{TOBN(0x79d0af26, 0xabb177dd), TOBN(0xf82ab568, 0x7891d564), TOBN(0x2b6768a9, 0x72232173), TOBN(0xefbb3bb0, 0x8c1f6619)}, {TOBN(0xb29c11db, 0xa6d18358), TOBN(0x519e2797, 0xb0916d3a), TOBN(0xd4dc18f0, 0x9188e290), TOBN(0x648e86e3, 0x98b0ca7f)}}, {{TOBN(0x859d3145, 0x983c38b5), TOBN(0xb14f176c, 0x637abc8b), TOBN(0x2793fb9d, 0xcaff7be6), TOBN(0xebe5a55f, 0x35a66a5a)}, {TOBN(0x7cec1dcd, 0x9f87dc59), TOBN(0x7c595cd3, 0xfbdbf560), TOBN(0x5b543b22, 0x26eb3257), TOBN(0x69080646, 0xc4c935fd)}}, {{TOBN(0x7f2e4403, 0x81e9ede3), TOBN(0x243c3894, 0xcaf6df0a), TOBN(0x7c605bb1, 0x1c073b11), TOBN(0xcd06a541, 0xba6a4a62)}, {TOBN(0x29168949, 0x49d4e2e5), TOBN(0x33649d07, 0x4af66880), TOBN(0xbfc0c885, 0xe9a85035), TOBN(0xb4e52113, 0xfc410f4b)}}, {{TOBN(0xdca3b706, 0x78a6513b), TOBN(0x92ea4a2a, 0x9edb1943), TOBN(0x02642216, 0xdb6e2dd8), TOBN(0x9b45d0b4, 0x9fd57894)}, {TOBN(0x114e70db, 0xc69d11ae), TOBN(0x1477dd19, 0x4c57595f), TOBN(0xbc2208b4, 0xec77c272), TOBN(0x95c5b4d7, 0xdb68f59c)}}, {{TOBN(0xb8c4fc63, 0x42e532b7), TOBN(0x386ba422, 0x9ae35290), TOBN(0xfb5dda42, 0xd201ecbc), TOBN(0x2353dc8b, 0xa0e38fd6)}, {TOBN(0x9a0b85ea, 0x68f7e978), TOBN(0x96ec5682, 0x2ad6d11f), TOBN(0x5e279d6c, 0xe5f6886d), TOBN(0xd3fe03cd, 0x3cb1914d)}}, {{TOBN(0xfe541fa4, 0x7ea67c77), TOBN(0x952bd2af, 0xe3ea810c), TOBN(0x791fef56, 0x8d01d374), TOBN(0xa3a1c621, 0x0f11336e)}, {TOBN(0x5ad0d5a9, 0xc7ec6d79), TOBN(0xff7038af, 0x3225c342), TOBN(0x003c6689, 0xbc69601b), TOBN(0x25059bc7, 0x45e8747d)}}, {{TOBN(0xfa4965b2, 0xf2086fbf), TOBN(0xf6840ea6, 0x86916078), TOBN(0xd7ac7620, 0x70081d6c), TOBN(0xe600da31, 0xb5328645)}, {TOBN(0x01916f63, 0x529b8a80), TOBN(0xe80e4858, 0x2d7d6f3e), TOBN(0x29eb0fe8, 0xd664ca7c), TOBN(0xf017637b, 0xe7b43b0c)}}, {{TOBN(0x9a75c806, 0x76cb2566), TOBN(0x8f76acb1, 0xb24892d9), TOBN(0x7ae7b9cc, 0x1f08fe45), TOBN(0x19ef7329, 0x6a4907d8)}, {TOBN(0x2db4ab71, 0x5f228bf0), TOBN(0xf3cdea39, 0x817032d7), TOBN(0x0b1f482e, 0xdcabe3c0), TOBN(0x3baf76b4, 0xbb86325c)}}, {{TOBN(0xd49065e0, 0x10089465), TOBN(0x3bab5d29, 0x8e77c596), TOBN(0x7636c3a6, 0x193dbd95), TOBN(0xdef5d294, 0xb246e499)}, {TOBN(0xb22c58b9, 0x286b2475), TOBN(0xa0b93939, 0xcd80862b), TOBN(0x3002c83a, 0xf0992388), TOBN(0x6de01f9b, 0xeacbe14c)}}, {{TOBN(0x6aac688e, 0xadd70482), TOBN(0x708de92a, 0x7b4a4e8a), TOBN(0x75b6dd73, 0x758a6eef), TOBN(0xea4bf352, 0x725b3c43)}, {TOBN(0x10041f2c, 0x87912868), TOBN(0xb1b1be95, 0xef09297a), TOBN(0x19ae23c5, 0xa9f3860a), TOBN(0xc4f0f839, 0x515dcf4b)}}, {{TOBN(0x3c7ecca3, 0x97f6306a), TOBN(0x744c44ae, 0x68a3a4b0), TOBN(0x69cd13a0, 0xb3a1d8a2), TOBN(0x7cad0a1e, 0x5256b578)}, {TOBN(0xea653fcd, 0x33791d9e), TOBN(0x9cc2a05d, 0x74b2e05f), TOBN(0x73b391dc, 0xfd7affa2), TOBN(0xddb7091e, 0xb6b05442)}}, {{TOBN(0xc71e27bf, 0x8538a5c6), TOBN(0x195c63dd, 0x89abff17), TOBN(0xfd315285, 0x1b71e3da), TOBN(0x9cbdfda7, 0xfa680fa0)}, {TOBN(0x9db876ca, 0x849d7eab), TOBN(0xebe2764b, 0x3c273271), TOBN(0x663357e3, 0xf208dcea), TOBN(0x8c5bd833, 0x565b1b70)}}, {{TOBN(0xccc3b4f5, 0x9837fc0d), TOBN(0x9b641ba8, 0xa79cf00f), TOBN(0x7428243d, 0xdfdf3990), TOBN(0x83a594c4, 0x020786b1)}, {TOBN(0xb712451a, 0x526c4502), TOBN(0x9d39438e, 0x6adb3f93), TOBN(0xfdb261e3, 0xe9ff0ccd), TOBN(0x80344e3c, 0xe07af4c3)}}, {{TOBN(0x75900d7c, 0x2fa4f126), TOBN(0x08a3b865, 0x5c99a232), TOBN(0x2478b6bf, 0xdb25e0c3), TOBN(0x482cc2c2, 0x71db2edf)}, {TOBN(0x37df7e64, 0x5f321bb8), TOBN(0x8a93821b, 0x9a8005b4), TOBN(0x3fa2f10c, 0xcc8c1958), TOBN(0x0d332218, 0x2c269d0a)}}, {{TOBN(0x20ab8119, 0xe246b0e6), TOBN(0xb39781e4, 0xd349fd17), TOBN(0xd293231e, 0xb31aa100), TOBN(0x4b779c97, 0xbb032168)}, {TOBN(0x4b3f19e1, 0xc8470500), TOBN(0x45b7efe9, 0x0c4c869d), TOBN(0xdb84f38a, 0xa1a6bbcc), TOBN(0x3b59cb15, 0xb2fddbc1)}}, {{TOBN(0xba5514df, 0x3fd165e8), TOBN(0x499fd6a9, 0x061f8811), TOBN(0x72cd1fe0, 0xbfef9f00), TOBN(0x120a4bb9, 0x79ad7e8a)}, {TOBN(0xf2ffd095, 0x5f4a5ac5), TOBN(0xcfd174f1, 0x95a7a2f0), TOBN(0xd42301ba, 0x9d17baf1), TOBN(0xd2fa487a, 0x77f22089)}}, {{TOBN(0x9cb09efe, 0xb1dc77e1), TOBN(0xe9566939, 0x21c99682), TOBN(0x8c546901, 0x6c6067bb), TOBN(0xfd378574, 0x61c24456)}, {TOBN(0x2b6a6cbe, 0x81796b33), TOBN(0x62d550f6, 0x58e87f8b), TOBN(0x1b763e1c, 0x7f1b01b4), TOBN(0x4b93cfea, 0x1b1b5e12)}}, {{TOBN(0xb9345238, 0x1d531696), TOBN(0x57201c00, 0x88cdde69), TOBN(0xdde92251, 0x9a86afc7), TOBN(0xe3043895, 0xbd35cea8)}, {TOBN(0x7608c1e1, 0x8555970d), TOBN(0x8267dfa9, 0x2535935e), TOBN(0xd4c60a57, 0x322ea38b), TOBN(0xe0bf7977, 0x804ef8b5)}}, {{TOBN(0x1a0dab28, 0xc06fece4), TOBN(0xd405991e, 0x94e7b49d), TOBN(0xc542b6d2, 0x706dab28), TOBN(0xcb228da3, 0xa91618fb)}, {TOBN(0x224e4164, 0x107d1cea), TOBN(0xeb9fdab3, 0xd0f5d8f1), TOBN(0xc02ba386, 0x0d6e41cd), TOBN(0x676a72c5, 0x9b1f7146)}}, {{TOBN(0xffd6dd98, 0x4d6cb00b), TOBN(0xcef9c5ca, 0xde2e8d7c), TOBN(0xa1bbf5d7, 0x641c7936), TOBN(0x1b95b230, 0xee8f772e)}, {TOBN(0xf765a92e, 0xe8ac25b1), TOBN(0xceb04cfc, 0x3a18b7c6), TOBN(0x27944cef, 0x0acc8966), TOBN(0xcbb3c957, 0x434c1004)}}, {{TOBN(0x9c9971a1, 0xa43ff93c), TOBN(0x5bc2db17, 0xa1e358a9), TOBN(0x45b4862e, 0xa8d9bc82), TOBN(0x70ebfbfb, 0x2201e052)}, {TOBN(0xafdf64c7, 0x92871591), TOBN(0xea5bcae6, 0xb42d0219), TOBN(0xde536c55, 0x2ad8f03c), TOBN(0xcd6c3f4d, 0xa76aa33c)}}, {{TOBN(0xbeb5f623, 0x0bca6de3), TOBN(0xdd20dd99, 0xb1e706fd), TOBN(0x90b3ff9d, 0xac9059d4), TOBN(0x2d7b2902, 0x7ccccc4e)}, {TOBN(0x8a090a59, 0xce98840f), TOBN(0xa5d947e0, 0x8410680a), TOBN(0x49ae346a, 0x923379a5), TOBN(0x7dbc84f9, 0xb28a3156)}}, {{TOBN(0xfd40d916, 0x54a1aff2), TOBN(0xabf318ba, 0x3a78fb9b), TOBN(0x50152ed8, 0x3029f95e), TOBN(0x9fc1dd77, 0xc58ad7fa)}, {TOBN(0x5fa57915, 0x13595c17), TOBN(0xb9504668, 0x8f62b3a9), TOBN(0x907b5b24, 0xff3055b0), TOBN(0x2e995e35, 0x9a84f125)}}, {{TOBN(0x87dacf69, 0x7e9bbcfb), TOBN(0x95d0c1d6, 0xe86d96e3), TOBN(0x65726e3c, 0x2d95a75c), TOBN(0x2c3c9001, 0xacd27f21)}, {TOBN(0x1deab561, 0x6c973f57), TOBN(0x108b7e2c, 0xa5221643), TOBN(0x5fee9859, 0xc4ef79d4), TOBN(0xbd62b88a, 0x40d4b8c6)}}, {{TOBN(0xb4dd29c4, 0x197c75d6), TOBN(0x266a6df2, 0xb7076feb), TOBN(0x9512d0ea, 0x4bf2df11), TOBN(0x1320c24f, 0x6b0cc9ec)}, {TOBN(0x6bb1e0e1, 0x01a59596), TOBN(0x8317c5bb, 0xeff9aaac), TOBN(0x65bb405e, 0x385aa6c9), TOBN(0x613439c1, 0x8f07988f)}}, {{TOBN(0xd730049f, 0x16a66e91), TOBN(0xe97f2820, 0xfa1b0e0d), TOBN(0x4131e003, 0x304c28ea), TOBN(0x820ab732, 0x526bac62)}, {TOBN(0xb2ac9ef9, 0x28714423), TOBN(0x54ecfffa, 0xadb10cb2), TOBN(0x8781476e, 0xf886a4cc), TOBN(0x4b2c87b5, 0xdb2f8d49)}}, {{TOBN(0xe857cd20, 0x0a44295d), TOBN(0x707d7d21, 0x58c6b044), TOBN(0xae8521f9, 0xf596757c), TOBN(0x87448f03, 0x67b2b714)}, {TOBN(0x13a9bc45, 0x5ebcd58d), TOBN(0x79bcced9, 0x9122d3c1), TOBN(0x3c644247, 0x9e076642), TOBN(0x0cf22778, 0x2df4767d)}}, {{TOBN(0x5e61aee4, 0x71d444b6), TOBN(0x211236bf, 0xc5084a1d), TOBN(0x7e15bc9a, 0x4fd3eaf6), TOBN(0x68df2c34, 0xab622bf5)}, {TOBN(0x9e674f0f, 0x59bf4f36), TOBN(0xf883669b, 0xd7f34d73), TOBN(0xc48ac1b8, 0x31497b1d), TOBN(0x323b925d, 0x5106703b)}}, {{TOBN(0x22156f42, 0x74082008), TOBN(0xeffc521a, 0xc8482bcb), TOBN(0x5c6831bf, 0x12173479), TOBN(0xcaa2528f, 0xc4739490)}, {TOBN(0x84d2102a, 0x8f1b3c4d), TOBN(0xcf64dfc1, 0x2d9bec0d), TOBN(0x433febad, 0x78a546ef), TOBN(0x1f621ec3, 0x7b73cef1)}}, {{TOBN(0x6aecd627, 0x37338615), TOBN(0x162082ab, 0x01d8edf6), TOBN(0x833a8119, 0x19e86b66), TOBN(0x6023a251, 0xd299b5db)}, {TOBN(0xf5bb0c3a, 0xbbf04b89), TOBN(0x6735eb69, 0xae749a44), TOBN(0xd0e058c5, 0x4713de3b), TOBN(0xfdf2593e, 0x2c3d4ccd)}}, {{TOBN(0x1b8f414e, 0xfdd23667), TOBN(0xdd52aaca, 0xfa2015ee), TOBN(0x3e31b517, 0xbd9625ff), TOBN(0x5ec9322d, 0x8db5918c)}, {TOBN(0xbc73ac85, 0xa96f5294), TOBN(0x82aa5bf3, 0x61a0666a), TOBN(0x49755810, 0xbf08ac42), TOBN(0xd21cdfd5, 0x891cedfc)}}, {{TOBN(0x918cb57b, 0x67f8be10), TOBN(0x365d1a7c, 0x56ffa726), TOBN(0x2435c504, 0x6532de93), TOBN(0xc0fc5e10, 0x2674cd02)}, {TOBN(0x6e51fcf8, 0x9cbbb142), TOBN(0x1d436e5a, 0xafc50692), TOBN(0x766bffff, 0x3fbcae22), TOBN(0x3148c2fd, 0xfd55d3b8)}}, {{TOBN(0x52c7fdc9, 0x233222fa), TOBN(0x89ff1092, 0xe419fb6b), TOBN(0x3cd6db99, 0x25254977), TOBN(0x2e85a161, 0x1cf12ca7)}, {TOBN(0xadd2547c, 0xdc810bc9), TOBN(0xea3f458f, 0x9d257c22), TOBN(0x642c1fbe, 0x27d6b19b), TOBN(0xed07e6b5, 0x140481a6)}}, {{TOBN(0x6ada1d42, 0x86d2e0f8), TOBN(0xe5920122, 0x0e8a9fd5), TOBN(0x02c936af, 0x708c1b49), TOBN(0x60f30fee, 0x2b4bfaff)}, {TOBN(0x6637ad06, 0x858e6a61), TOBN(0xce4c7767, 0x3fd374d0), TOBN(0x39d54b2d, 0x7188defb), TOBN(0xa8c9d250, 0xf56a6b66)}}, {{TOBN(0x58fc0f5e, 0xb24fe1dc), TOBN(0x9eaf9dee, 0x6b73f24c), TOBN(0xa90d588b, 0x33650705), TOBN(0xde5b62c5, 0xaf2ec729)}, {TOBN(0x5c72cfae, 0xd3c2b36e), TOBN(0x868c19d5, 0x034435da), TOBN(0x88605f93, 0xe17ee145), TOBN(0xaa60c4ee, 0x77a5d5b1)}}, {{TOBN(0xbcf5bfd2, 0x3b60c472), TOBN(0xaf4ef13c, 0xeb1d3049), TOBN(0x373f44fc, 0xe13895c9), TOBN(0xf29b382f, 0x0cbc9822)}, {TOBN(0x1bfcb853, 0x73efaef6), TOBN(0xcf56ac9c, 0xa8c96f40), TOBN(0xd7adf109, 0x7a191e24), TOBN(0x98035f44, 0xbf8a8dc2)}}, {{TOBN(0xf40a71b9, 0x1e750c84), TOBN(0xc57f7b0c, 0x5dc6c469), TOBN(0x49a0e79c, 0x6fbc19c1), TOBN(0x6b0f5889, 0xa48ebdb8)}, {TOBN(0x5d3fd084, 0xa07c4e9f), TOBN(0xc3830111, 0xab27de14), TOBN(0x0e4929fe, 0x33e08dcc), TOBN(0xf4a5ad24, 0x40bb73a3)}}, {{TOBN(0xde86c2bf, 0x490f97ca), TOBN(0x288f09c6, 0x67a1ce18), TOBN(0x364bb886, 0x1844478d), TOBN(0x7840fa42, 0xceedb040)}, {TOBN(0x1269fdd2, 0x5a631b37), TOBN(0x94761f1e, 0xa47c8b7d), TOBN(0xfc0c2e17, 0x481c6266), TOBN(0x85e16ea2, 0x3daa5fa7)}}, {{TOBN(0xccd86033, 0x92491048), TOBN(0x0c2f6963, 0xf4d402d7), TOBN(0x6336f7df, 0xdf6a865c), TOBN(0x0a2a463c, 0xb5c02a87)}, {TOBN(0xb0e29be7, 0xbf2f12ee), TOBN(0xf0a22002, 0x66bad988), TOBN(0x27f87e03, 0x9123c1d7), TOBN(0x21669c55, 0x328a8c98)}}, {{TOBN(0x186b9803, 0x92f14529), TOBN(0xd3d056cc, 0x63954df3), TOBN(0x2f03fd58, 0x175a46f6), TOBN(0x63e34ebe, 0x11558558)}, {TOBN(0xe13fedee, 0x5b80cfa5), TOBN(0xe872a120, 0xd401dbd1), TOBN(0x52657616, 0xe8a9d667), TOBN(0xbc8da4b6, 0xe08d6693)}}, {{TOBN(0x370fb9bb, 0x1b703e75), TOBN(0x6773b186, 0xd4338363), TOBN(0x18dad378, 0xecef7bff), TOBN(0xaac787ed, 0x995677da)}, {TOBN(0x4801ea8b, 0x0437164b), TOBN(0xf430ad20, 0x73fe795e), TOBN(0xb164154d, 0x8ee5eb73), TOBN(0x0884ecd8, 0x108f7c0e)}}, {{TOBN(0x0e6ec096, 0x5f520698), TOBN(0x640631fe, 0x44f7b8d9), TOBN(0x92fd34fc, 0xa35a68b9), TOBN(0x9c5a4b66, 0x4d40cf4e)}, {TOBN(0x949454bf, 0x80b6783d), TOBN(0x80e701fe, 0x3a320a10), TOBN(0x8d1a564a, 0x1a0a39b2), TOBN(0x1436d53d, 0x320587db)}}, {{TOBN(0xf5096e6d, 0x6556c362), TOBN(0xbc23a3c0, 0xe2455d7e), TOBN(0x3a7aee54, 0x807230f9), TOBN(0x9ba1cfa6, 0x22ae82fd)}, {TOBN(0x833a057a, 0x99c5d706), TOBN(0x8be85f4b, 0x842315c9), TOBN(0xd083179a, 0x66a72f12), TOBN(0x2fc77d5d, 0xcdcc73cd)}}, {{TOBN(0x22b88a80, 0x5616ee30), TOBN(0xfb09548f, 0xe7ab1083), TOBN(0x8ad6ab0d, 0x511270cd), TOBN(0x61f6c57a, 0x6924d9ab)}, {TOBN(0xa0f7bf72, 0x90aecb08), TOBN(0x849f87c9, 0x0df784a4), TOBN(0x27c79c15, 0xcfaf1d03), TOBN(0xbbf9f675, 0xc463face)}}, {{TOBN(0x91502c65, 0x765ba543), TOBN(0x18ce3cac, 0x42ea60dd), TOBN(0xe5cee6ac, 0x6e43ecb3), TOBN(0x63e4e910, 0x68f2aeeb)}, {TOBN(0x26234fa3, 0xc85932ee), TOBN(0x96883e8b, 0x4c90c44d), TOBN(0x29b9e738, 0xa18a50f6), TOBN(0xbfc62b2a, 0x3f0420df)}}, {{TOBN(0xd22a7d90, 0x6d3e1fa9), TOBN(0x17115618, 0xfe05b8a3), TOBN(0x2a0c9926, 0xbb2b9c01), TOBN(0xc739fcc6, 0xe07e76a2)}, {TOBN(0x540e9157, 0x165e439a), TOBN(0x06353a62, 0x6a9063d8), TOBN(0x84d95594, 0x61e927a3), TOBN(0x013b9b26, 0xe2e0be7f)}}, {{TOBN(0x4feaec3b, 0x973497f1), TOBN(0x15c0f94e, 0x093ebc2d), TOBN(0x6af5f227, 0x33af0583), TOBN(0x0c2af206, 0xc61f3340)}, {TOBN(0xd25dbdf1, 0x4457397c), TOBN(0x2e8ed017, 0xcabcbae0), TOBN(0xe3010938, 0xc2815306), TOBN(0xbaa99337, 0xe8c6cd68)}}, {{TOBN(0x08513182, 0x3b0ec7de), TOBN(0x1e1b822b, 0x58df05df), TOBN(0x5c14842f, 0xa5c3b683), TOBN(0x98fe977e, 0x3eba34ce)}, {TOBN(0xfd2316c2, 0x0d5e8873), TOBN(0xe48d839a, 0xbd0d427d), TOBN(0x495b2218, 0x623fc961), TOBN(0x24ee56e7, 0xb46fba5e)}}, {{TOBN(0x9184a55b, 0x91e4de58), TOBN(0xa7488ca5, 0xdfdea288), TOBN(0xa723862e, 0xa8dcc943), TOBN(0x92d762b2, 0x849dc0fc)}, {TOBN(0x3c444a12, 0x091ff4a9), TOBN(0x581113fa, 0x0cada274), TOBN(0xb9de0a45, 0x30d8eae2), TOBN(0x5e0fcd85, 0xdf6b41ea)}}, {{TOBN(0x6233ea68, 0xc094dbb5), TOBN(0xb77d062e, 0xd968d410), TOBN(0x3e719bbc, 0x58b3002d), TOBN(0x68e7dd3d, 0x3dc49d58)}, {TOBN(0x8d825740, 0x013a5e58), TOBN(0x21311747, 0x3c9e3c1b), TOBN(0x0cb0a2a7, 0x7c99b6ab), TOBN(0x5c48a3b3, 0xc2f888f2)}}}, {{{TOBN(0xc7913e91, 0x991724f3), TOBN(0x5eda799c, 0x39cbd686), TOBN(0xddb595c7, 0x63d4fc1e), TOBN(0x6b63b80b, 0xac4fed54)}, {TOBN(0x6ea0fc69, 0x7e5fb516), TOBN(0x737708ba, 0xd0f1c964), TOBN(0x9628745f, 0x11a92ca5), TOBN(0x61f37958, 0x9a86967a)}}, {{TOBN(0x9af39b2c, 0xaa665072), TOBN(0x78322fa4, 0xefd324ef), TOBN(0x3d153394, 0xc327bd31), TOBN(0x81d5f271, 0x3129dab0)}, {TOBN(0xc72e0c42, 0xf48027f5), TOBN(0xaa40cdbc, 0x8536e717), TOBN(0xf45a657a, 0x2d369d0f), TOBN(0xb03bbfc4, 0xea7f74e6)}}, {{TOBN(0x46a8c418, 0x0d738ded), TOBN(0x6f1a5bb0, 0xe0de5729), TOBN(0xf10230b9, 0x8ba81675), TOBN(0x32c6f30c, 0x112b33d4)}, {TOBN(0x7559129d, 0xd8fffb62), TOBN(0x6a281b47, 0xb459bf05), TOBN(0x77c1bd3a, 0xfa3b6776), TOBN(0x0709b380, 0x7829973a)}}, {{TOBN(0x8c26b232, 0xa3326505), TOBN(0x38d69272, 0xee1d41bf), TOBN(0x0459453e, 0xffe32afa), TOBN(0xce8143ad, 0x7cb3ea87)}, {TOBN(0x932ec1fa, 0x7e6ab666), TOBN(0x6cd2d230, 0x22286264), TOBN(0x459a46fe, 0x6736f8ed), TOBN(0x50bf0d00, 0x9eca85bb)}}, {{TOBN(0x0b825852, 0x877a21ec), TOBN(0x300414a7, 0x0f537a94), TOBN(0x3f1cba40, 0x21a9a6a2), TOBN(0x50824eee, 0x76943c00)}, {TOBN(0xa0dbfcec, 0xf83cba5d), TOBN(0xf9538148, 0x93b4f3c0), TOBN(0x61744162, 0x48f24dd7), TOBN(0x5322d64d, 0xe4fb09dd)}}, {{TOBN(0x57447384, 0x3d9325f3), TOBN(0xa9bef2d0, 0xf371cb84), TOBN(0x77d2188b, 0xa61e36c5), TOBN(0xbbd6a7d7, 0xc602df72)}, {TOBN(0xba3aa902, 0x8f61bc0b), TOBN(0xf49085ed, 0x6ed0b6a1), TOBN(0x8bc625d6, 0xae6e8298), TOBN(0x832b0b1d, 0xa2e9c01d)}}, {{TOBN(0xa337c447, 0xf1f0ced1), TOBN(0x800cc793, 0x9492dd2b), TOBN(0x4b93151d, 0xbea08efa), TOBN(0x820cf3f8, 0xde0a741e)}, {TOBN(0xff1982dc, 0x1c0f7d13), TOBN(0xef921960, 0x84dde6ca), TOBN(0x1ad7d972, 0x45f96ee3), TOBN(0x319c8dbe, 0x29dea0c7)}}, {{TOBN(0xd3ea3871, 0x7b82b99b), TOBN(0x75922d4d, 0x470eb624), TOBN(0x8f66ec54, 0x3b95d466), TOBN(0x66e673cc, 0xbee1e346)}, {TOBN(0x6afe67c4, 0xb5f2b89a), TOBN(0x3de9c1e6, 0x290e5cd3), TOBN(0x8c278bb6, 0x310a2ada), TOBN(0x420fa384, 0x0bdb323b)}}, {{TOBN(0x0ae1d63b, 0x0eb919b0), TOBN(0xd74ee51d, 0xa74b9620), TOBN(0x395458d0, 0xa674290c), TOBN(0x324c930f, 0x4620a510)}, {TOBN(0x2d1f4d19, 0xfbac27d4), TOBN(0x4086e8ca, 0x9bedeeac), TOBN(0x0cdd211b, 0x9b679ab8), TOBN(0x5970167d, 0x7090fec4)}}, {{TOBN(0x3420f2c9, 0xfaf1fc63), TOBN(0x616d333a, 0x328c8bb4), TOBN(0x7d65364c, 0x57f1fe4a), TOBN(0x9343e877, 0x55e5c73a)}, {TOBN(0x5795176b, 0xe970e78c), TOBN(0xa36ccebf, 0x60533627), TOBN(0xfc7c7380, 0x09cdfc1b), TOBN(0xb39a2afe, 0xb3fec326)}}, {{TOBN(0xb7ff1ba1, 0x6224408a), TOBN(0xcc856e92, 0x247cfc5e), TOBN(0x01f102e7, 0xc18bc493), TOBN(0x4613ab74, 0x2091c727)}, {TOBN(0xaa25e89c, 0xc420bf2b), TOBN(0x00a53176, 0x90337ec2), TOBN(0xd2be9f43, 0x7d025fc7), TOBN(0x3316fb85, 0x6e6fe3dc)}}, {{TOBN(0x27520af5, 0x9ac50814), TOBN(0xfdf95e78, 0x9a8e4223), TOBN(0xb7e7df2a, 0x56bec5a0), TOBN(0xf7022f7d, 0xdf159e5d)}, {TOBN(0x93eeeab1, 0xcac1fe8f), TOBN(0x8040188c, 0x37451168), TOBN(0x7ee8aa8a, 0xd967dce6), TOBN(0xfa0e79e7, 0x3abc9299)}}, {{TOBN(0x67332cfc, 0x2064cfd1), TOBN(0x339c31de, 0xb0651934), TOBN(0x719b28d5, 0x2a3bcbea), TOBN(0xee74c82b, 0x9d6ae5c6)}, {TOBN(0x0927d05e, 0xbaf28ee6), TOBN(0x82cecf2c, 0x9d719028), TOBN(0x0b0d353e, 0xddb30289), TOBN(0xfe4bb977, 0xfddb2e29)}}, {{TOBN(0xbb5bb990, 0x640bfd9e), TOBN(0xd226e277, 0x82f62108), TOBN(0x4bf00985, 0x02ffdd56), TOBN(0x7756758a, 0x2ca1b1b5)}, {TOBN(0xc32b62a3, 0x5285fe91), TOBN(0xedbc546a, 0x8c9cd140), TOBN(0x1e47a013, 0xaf5cb008), TOBN(0xbca7e720, 0x073ce8f2)}}, {{TOBN(0xe10b2ab8, 0x17a91cae), TOBN(0xb89aab65, 0x08e27f63), TOBN(0x7b3074a7, 0xdba3ddf9), TOBN(0x1c20ce09, 0x330c2972)}, {TOBN(0x6b9917b4, 0x5fcf7e33), TOBN(0xe6793743, 0x945ceb42), TOBN(0x18fc2215, 0x5c633d19), TOBN(0xad1adb3c, 0xc7485474)}}, {{TOBN(0x646f9679, 0x6424c49b), TOBN(0xf888dfe8, 0x67c241c9), TOBN(0xe12d4b93, 0x24f68b49), TOBN(0x9a6b62d8, 0xa571df20)}, {TOBN(0x81b4b26d, 0x179483cb), TOBN(0x666f9632, 0x9511fae2), TOBN(0xd281b3e4, 0xd53aa51f), TOBN(0x7f96a765, 0x7f3dbd16)}}, {{TOBN(0xa7f8b5bf, 0x074a30ce), TOBN(0xd7f52107, 0x005a32e6), TOBN(0x6f9e0907, 0x50237ed4), TOBN(0x2f21da47, 0x8096fa2b)}, {TOBN(0xf3e19cb4, 0xeec863a0), TOBN(0xd18f77fd, 0x9527620a), TOBN(0x9505c81c, 0x407c1cf8), TOBN(0x9998db4e, 0x1b6ec284)}}, {{TOBN(0x7e3389e5, 0xc247d44d), TOBN(0x12507141, 0x3f4f3d80), TOBN(0xd4ba0110, 0x4a78a6c7), TOBN(0x312874a0, 0x767720be)}, {TOBN(0xded059a6, 0x75944370), TOBN(0xd6123d90, 0x3b2c0bdd), TOBN(0xa56b717b, 0x51c108e3), TOBN(0x9bb7940e, 0x070623e9)}}, {{TOBN(0x794e2d59, 0x84ac066c), TOBN(0xf5954a92, 0xe68c69a0), TOBN(0x28c52458, 0x4fd99dcc), TOBN(0x60e639fc, 0xb1012517)}, {TOBN(0xc2e60125, 0x7de79248), TOBN(0xe9ef6404, 0xf12fc6d7), TOBN(0x4c4f2808, 0x2a3b5d32), TOBN(0x865ad32e, 0xc768eb8a)}}, {{TOBN(0xac02331b, 0x13fb70b6), TOBN(0x037b44c1, 0x95599b27), TOBN(0x1a860fc4, 0x60bd082c), TOBN(0xa2e25745, 0xc980cd01)}, {TOBN(0xee3387a8, 0x1da0263e), TOBN(0x931bfb95, 0x2d10f3d6), TOBN(0x5b687270, 0xa1f24a32), TOBN(0xf140e65d, 0xca494b86)}}, {{TOBN(0x4f4ddf91, 0xb2f1ac7a), TOBN(0xf99eaabb, 0x760fee27), TOBN(0x57f4008a, 0x49c228e5), TOBN(0x090be440, 0x1cf713bb)}, {TOBN(0xac91fbe4, 0x5004f022), TOBN(0xd838c2c2, 0x569e1af6), TOBN(0xd6c7d20b, 0x0f1daaa5), TOBN(0xaa063ac1, 0x1bbb02c0)}}, {{TOBN(0x0938a422, 0x59558a78), TOBN(0x5343c669, 0x8435da2f), TOBN(0x96f67b18, 0x034410dc), TOBN(0x7cc1e424, 0x84510804)}, {TOBN(0x86a1543f, 0x16dfbb7d), TOBN(0x921fa942, 0x5b5bd592), TOBN(0x9dcccb6e, 0xb33dd03c), TOBN(0x8581ddd9, 0xb843f51e)}}, {{TOBN(0x54935fcb, 0x81d73c9e), TOBN(0x6d07e979, 0x0a5e97ab), TOBN(0x4dc7b30a, 0xcf3a6bab), TOBN(0x147ab1f3, 0x170bee11)}, {TOBN(0x0aaf8e3d, 0x9fafdee4), TOBN(0xfab3dbcb, 0x538a8b95), TOBN(0x405df4b3, 0x6ef13871), TOBN(0xf1f4e9cb, 0x088d5a49)}}, {{TOBN(0x9bcd24d3, 0x66b33f1d), TOBN(0x3b97b820, 0x5ce445c0), TOBN(0xe2926549, 0xba93ff61), TOBN(0xd9c341ce, 0x4dafe616)}, {TOBN(0xfb30a76e, 0x16efb6f3), TOBN(0xdf24b8ca, 0x605b953c), TOBN(0x8bd52afe, 0xc2fffb9f), TOBN(0xbbac5ff7, 0xe19d0b96)}}, {{TOBN(0x43c01b87, 0x459afccd), TOBN(0x6bd45143, 0xb7432652), TOBN(0x84734530, 0x55b5d78e), TOBN(0x81088fdb, 0x1554ba7d)}, {TOBN(0xada0a52c, 0x1e269375), TOBN(0xf9f037c4, 0x2dc5ec10), TOBN(0xc0660607, 0x94bfbc11), TOBN(0xc0a630bb, 0xc9c40d2f)}}, {{TOBN(0x5efc797e, 0xab64c31e), TOBN(0xffdb1dab, 0x74507144), TOBN(0xf6124287, 0x1ca6790c), TOBN(0xe9609d81, 0xe69bf1bf)}, {TOBN(0xdb898595, 0x00d24fc9), TOBN(0x9c750333, 0xe51fb417), TOBN(0x51830a91, 0xfef7bbde), TOBN(0x0ce67dc8, 0x945f585c)}}, {{TOBN(0x9a730ed4, 0x4763eb50), TOBN(0x24a0e221, 0xc1ab0d66), TOBN(0x643b6393, 0x648748f3), TOBN(0x1982daa1, 0x6d3c6291)}, {TOBN(0x6f00a9f7, 0x8bbc5549), TOBN(0x7a1783e1, 0x7f36384e), TOBN(0xe8346323, 0xde977f50), TOBN(0x91ab688d, 0xb245502a)}}, {{TOBN(0x331ab6b5, 0x6d0bdd66), TOBN(0x0a6ef32e, 0x64b71229), TOBN(0x1028150e, 0xfe7c352f), TOBN(0x27e04350, 0xce7b39d3)}, {TOBN(0x2a3c8acd, 0xc1070c82), TOBN(0xfb2034d3, 0x80c9feef), TOBN(0x2d729621, 0x709f3729), TOBN(0x8df290bf, 0x62cb4549)}}, {{TOBN(0x02f99f33, 0xfc2e4326), TOBN(0x3b30076d, 0x5eddf032), TOBN(0xbb21f8cf, 0x0c652fb5), TOBN(0x314fb49e, 0xed91cf7b)}, {TOBN(0xa013eca5, 0x2f700750), TOBN(0x2b9e3c23, 0x712a4575), TOBN(0xe5355557, 0xaf30fbb0), TOBN(0x1ada3516, 0x7c77e771)}}, {{TOBN(0x45f6ecb2, 0x7b135670), TOBN(0xe85d19df, 0x7cfc202e), TOBN(0x0f1b50c7, 0x58d1be9f), TOBN(0x5ebf2c0a, 0xead2e344)}, {TOBN(0x1531fe4e, 0xabc199c9), TOBN(0xc7032592, 0x56bab0ae), TOBN(0x16ab2e48, 0x6c1fec54), TOBN(0x0f87fda8, 0x04280188)}}, {{TOBN(0xdc9f46fc, 0x609e4a74), TOBN(0x2a44a143, 0xba667f91), TOBN(0xbc3d8b95, 0xb4d83436), TOBN(0xa01e4bd0, 0xc7bd2958)}, {TOBN(0x7b182932, 0x73483c90), TOBN(0xa79c6aa1, 0xa7c7b598), TOBN(0xbf3983c6, 0xeaaac07e), TOBN(0x8f18181e, 0x96e0d4e6)}}, {{TOBN(0x8553d37c, 0x051af62b), TOBN(0xe9a998eb, 0x0bf94496), TOBN(0xe0844f9f, 0xb0d59aa1), TOBN(0x983fd558, 0xe6afb813)}, {TOBN(0x9670c0ca, 0x65d69804), TOBN(0x732b22de, 0x6ea5ff2d), TOBN(0xd7640ba9, 0x5fd8623b), TOBN(0x9f619163, 0xa6351782)}}, {{TOBN(0x0bfc27ee, 0xacee5043), TOBN(0xae419e73, 0x2eb10f02), TOBN(0x19c028d1, 0x8943fb05), TOBN(0x71f01cf7, 0xff13aa2a)}, {TOBN(0x7790737e, 0x8887a132), TOBN(0x67513309, 0x66318410), TOBN(0x9819e8a3, 0x7ddb795e), TOBN(0xfecb8ef5, 0xdad100b2)}}, {{TOBN(0x59f74a22, 0x3021926a), TOBN(0xb7c28a49, 0x6f9b4c1c), TOBN(0xed1a733f, 0x912ad0ab), TOBN(0x42a910af, 0x01a5659c)}, {TOBN(0x3842c6e0, 0x7bd68cab), TOBN(0x2b57fa38, 0x76d70ac8), TOBN(0x8a6707a8, 0x3c53aaeb), TOBN(0x62c1c510, 0x65b4db18)}}, {{TOBN(0x8de2c1fb, 0xb2d09dc7), TOBN(0xc3dfed12, 0x266bd23b), TOBN(0x927d039b, 0xd5b27db6), TOBN(0x2fb2f0f1, 0x103243da)}, {TOBN(0xf855a07b, 0x80be7399), TOBN(0xed9327ce, 0x1f9f27a8), TOBN(0xa0bd99c7, 0x729bdef7), TOBN(0x2b67125e, 0x28250d88)}}, {{TOBN(0x784b26e8, 0x8670ced7), TOBN(0xe3dfe41f, 0xc31bd3b4), TOBN(0x9e353a06, 0xbcc85cbc), TOBN(0x302e2909, 0x60178a9d)}, {TOBN(0x860abf11, 0xa6eac16e), TOBN(0x76447000, 0xaa2b3aac), TOBN(0x46ff9d19, 0x850afdab), TOBN(0x35bdd6a5, 0xfdb2d4c1)}}, {{TOBN(0xe82594b0, 0x7e5c9ce9), TOBN(0x0f379e53, 0x20af346e), TOBN(0x608b31e3, 0xbc65ad4a), TOBN(0x710c6b12, 0x267c4826)}, {TOBN(0x51c966f9, 0x71954cf1), TOBN(0xb1cec793, 0x0d0aa215), TOBN(0x1f155989, 0x86bd23a8), TOBN(0xae2ff99c, 0xf9452e86)}}, {{TOBN(0xd8dd953c, 0x340ceaa2), TOBN(0x26355275, 0x2e2e9333), TOBN(0x15d4e5f9, 0x8586f06d), TOBN(0xd6bf94a8, 0xf7cab546)}, {TOBN(0x33c59a0a, 0xb76a9af0), TOBN(0x52740ab3, 0xba095af7), TOBN(0xc444de8a, 0x24389ca0), TOBN(0xcc6f9863, 0x706da0cb)}}, {{TOBN(0xb5a741a7, 0x6b2515cf), TOBN(0x71c41601, 0x9585c749), TOBN(0x78350d4f, 0xe683de97), TOBN(0x31d61524, 0x63d0b5f5)}, {TOBN(0x7a0cc5e1, 0xfbce090b), TOBN(0xaac927ed, 0xfbcb2a5b), TOBN(0xe920de49, 0x20d84c35), TOBN(0x8c06a0b6, 0x22b4de26)}}, {{TOBN(0xd34dd58b, 0xafe7ddf3), TOBN(0x55851fed, 0xc1e6e55b), TOBN(0xd1395616, 0x960696e7), TOBN(0x940304b2, 0x5f22705f)}, {TOBN(0x6f43f861, 0xb0a2a860), TOBN(0xcf121282, 0x0e7cc981), TOBN(0x12186212, 0x0ab64a96), TOBN(0x09215b9a, 0xb789383c)}}, {{TOBN(0x311eb305, 0x37387c09), TOBN(0xc5832fce, 0xf03ee760), TOBN(0x30358f58, 0x32f7ea19), TOBN(0xe01d3c34, 0x91d53551)}, {TOBN(0x1ca5ee41, 0xda48ea80), TOBN(0x34e71e8e, 0xcf4fa4c1), TOBN(0x312abd25, 0x7af1e1c7), TOBN(0xe3afcdeb, 0x2153f4a5)}}, {{TOBN(0x9d5c84d7, 0x00235e9a), TOBN(0x0308d3f4, 0x8c4c836f), TOBN(0xc0a66b04, 0x89332de5), TOBN(0x610dd399, 0x89e566ef)}, {TOBN(0xf8eea460, 0xd1ac1635), TOBN(0x84cbb3fb, 0x20a2c0df), TOBN(0x40afb488, 0xe74a48c5), TOBN(0x29738198, 0xd326b150)}}, {{TOBN(0x2a17747f, 0xa6d74081), TOBN(0x60ea4c05, 0x55a26214), TOBN(0x53514bb4, 0x1f88c5fe), TOBN(0xedd64567, 0x7e83426c)}, {TOBN(0xd5d6cbec, 0x96460b25), TOBN(0xa12fd0ce, 0x68dc115e), TOBN(0xc5bc3ed2, 0x697840ea), TOBN(0x969876a8, 0xa6331e31)}}, {{TOBN(0x60c36217, 0x472ff580), TOBN(0xf4229705, 0x4ad41393), TOBN(0x4bd99ef0, 0xa03b8b92), TOBN(0x501c7317, 0xc144f4f6)}, {TOBN(0x159009b3, 0x18464945), TOBN(0x6d5e594c, 0x74c5c6be), TOBN(0x2d587011, 0x321a3660), TOBN(0xd1e184b1, 0x3898d022)}}, {{TOBN(0x5ba04752, 0x4c6a7e04), TOBN(0x47fa1e2b, 0x45550b65), TOBN(0x9419daf0, 0x48c0a9a5), TOBN(0x66362953, 0x7c243236)}, {TOBN(0xcd0744b1, 0x5cb12a88), TOBN(0x561b6f9a, 0x2b646188), TOBN(0x599415a5, 0x66c2c0c0), TOBN(0xbe3f0859, 0x0f83f09a)}}, {{TOBN(0x9141c5be, 0xb92041b8), TOBN(0x01ae38c7, 0x26477d0d), TOBN(0xca8b71f3, 0xd12c7a94), TOBN(0xfab5b31f, 0x765c70db)}, {TOBN(0x76ae7492, 0x487443e9), TOBN(0x8595a310, 0x990d1349), TOBN(0xf8dbeda8, 0x7d460a37), TOBN(0x7f7ad082, 0x1e45a38f)}}, {{TOBN(0xed1d4db6, 0x1059705a), TOBN(0xa3dd492a, 0xe6b9c697), TOBN(0x4b92ee3a, 0x6eb38bd5), TOBN(0xbab2609d, 0x67cc0bb7)}, {TOBN(0x7fc4fe89, 0x6e70ee82), TOBN(0xeff2c56e, 0x13e6b7e3), TOBN(0x9b18959e, 0x34d26fca), TOBN(0x2517ab66, 0x889d6b45)}}, {{TOBN(0xf167b4e0, 0xbdefdd4f), TOBN(0x69958465, 0xf366e401), TOBN(0x5aa368ab, 0xa73bbec0), TOBN(0x12148709, 0x7b240c21)}, {TOBN(0x378c3233, 0x18969006), TOBN(0xcb4d73ce, 0xe1fe53d1), TOBN(0x5f50a80e, 0x130c4361), TOBN(0xd67f5951, 0x7ef5212b)}}, {{TOBN(0xf145e21e, 0x9e70c72e), TOBN(0xb2e52e29, 0x5566d2fb), TOBN(0x44eaba4a, 0x032397f5), TOBN(0x5e56937b, 0x7e31a7de)}, {TOBN(0x68dcf517, 0x456c61e1), TOBN(0xbc2e954a, 0xa8b0a388), TOBN(0xe3552fa7, 0x60a8b755), TOBN(0x03442dae, 0x73ad0cde)}}, {{TOBN(0x37ffe747, 0xceb26210), TOBN(0x983545e8, 0x787baef9), TOBN(0x8b8c8535, 0x86a3de31), TOBN(0xc621dbcb, 0xfacd46db)}, {TOBN(0x82e442e9, 0x59266fbb), TOBN(0xa3514c37, 0x339d471c), TOBN(0x3a11b771, 0x62cdad96), TOBN(0xf0cb3b3c, 0xecf9bdf0)}}, {{TOBN(0x3fcbdbce, 0x478e2135), TOBN(0x7547b5cf, 0xbda35342), TOBN(0xa97e81f1, 0x8a677af6), TOBN(0xc8c2bf83, 0x28817987)}, {TOBN(0xdf07eaaf, 0x45580985), TOBN(0xc68d1f05, 0xc93b45cb), TOBN(0x106aa2fe, 0xc77b4cac), TOBN(0x4c1d8afc, 0x04a7ae86)}}, {{TOBN(0xdb41c3fd, 0x9eb45ab2), TOBN(0x5b234b5b, 0xd4b22e74), TOBN(0xda253dec, 0xf215958a), TOBN(0x67e0606e, 0xa04edfa0)}, {TOBN(0xabbbf070, 0xef751b11), TOBN(0xf352f175, 0xf6f06dce), TOBN(0xdfc4b6af, 0x6839f6b4), TOBN(0x53ddf9a8, 0x9959848e)}}, {{TOBN(0xda49c379, 0xc21520b0), TOBN(0x90864ff0, 0xdbd5d1b6), TOBN(0x2f055d23, 0x5f49c7f7), TOBN(0xe51e4e6a, 0xa796b2d8)}, {TOBN(0xc361a67f, 0x5c9dc340), TOBN(0x5ad53c37, 0xbca7c620), TOBN(0xda1d6588, 0x32c756d0), TOBN(0xad60d911, 0x8bb67e13)}}, {{TOBN(0xd6c47bdf, 0x0eeec8c6), TOBN(0x4a27fec1, 0x078a1821), TOBN(0x081f7415, 0xc3099524), TOBN(0x8effdf0b, 0x82cd8060)}, {TOBN(0xdb70ec1c, 0x65842df8), TOBN(0x8821b358, 0xd319a901), TOBN(0x72ee56ee, 0xde42b529), TOBN(0x5bb39592, 0x236e4286)}}, {{TOBN(0xd1183316, 0xfd6f7140), TOBN(0xf9fadb5b, 0xbd8e81f7), TOBN(0x701d5e0c, 0x5a02d962), TOBN(0xfdee4dbf, 0x1b601324)}, {TOBN(0xbed17407, 0x35d7620e), TOBN(0x04e3c2c3, 0xf48c0012), TOBN(0x9ee29da7, 0x3455449a), TOBN(0x562cdef4, 0x91a836c4)}}, {{TOBN(0x8f682a5f, 0x47701097), TOBN(0x617125d8, 0xff88d0c2), TOBN(0x948fda24, 0x57bb86dd), TOBN(0x348abb8f, 0x289f7286)}, {TOBN(0xeb10eab5, 0x99d94bbd), TOBN(0xd51ba28e, 0x4684d160), TOBN(0xabe0e51c, 0x30c8f41a), TOBN(0x66588b45, 0x13254f4a)}}, {{TOBN(0x147ebf01, 0xfad097a5), TOBN(0x49883ea8, 0x610e815d), TOBN(0xe44d60ba, 0x8a11de56), TOBN(0xa970de6e, 0x827a7a6d)}, {TOBN(0x2be41424, 0x5e17fc19), TOBN(0xd833c657, 0x01214057), TOBN(0x1375813b, 0x363e723f), TOBN(0x6820bb88, 0xe6a52e9b)}}, {{TOBN(0x7e7f6970, 0xd875d56a), TOBN(0xd6a0a9ac, 0x51fbf6bf), TOBN(0x54ba8790, 0xa3083c12), TOBN(0xebaeb23d, 0x6ae7eb64)}, {TOBN(0xa8685c3a, 0xb99a907a), TOBN(0xf1e74550, 0x026bf40b), TOBN(0x7b73a027, 0xc802cd9e), TOBN(0x9a8a927c, 0x4fef4635)}}, {{TOBN(0xe1b6f60c, 0x08191224), TOBN(0xc4126ebb, 0xde4ec091), TOBN(0xe1dff4dc, 0x4ae38d84), TOBN(0xde3f57db, 0x4f2ef985)}, {TOBN(0x34964337, 0xd446a1dd), TOBN(0x7bf217a0, 0x859e77f6), TOBN(0x8ff10527, 0x8e1d13f5), TOBN(0xa304ef03, 0x74eeae27)}}, {{TOBN(0xfc6f5e47, 0xd19dfa5a), TOBN(0xdb007de3, 0x7fad982b), TOBN(0x28205ad1, 0x613715f5), TOBN(0x251e6729, 0x7889529e)}, {TOBN(0x72705184, 0x1ae98e78), TOBN(0xf818537d, 0x271cac32), TOBN(0xc8a15b7e, 0xb7f410f5), TOBN(0xc474356f, 0x81f62393)}}, {{TOBN(0x92dbdc5a, 0xc242316b), TOBN(0xabe060ac, 0xdbf4aff5), TOBN(0x6e8c38fe, 0x909a8ec6), TOBN(0x43e514e5, 0x6116cb94)}, {TOBN(0x2078fa38, 0x07d784f9), TOBN(0x1161a880, 0xf4b5b357), TOBN(0x5283ce79, 0x13adea3d), TOBN(0x0756c3e6, 0xcc6a910b)}}, {{TOBN(0x60bcfe01, 0xaaa79697), TOBN(0x04a73b29, 0x56391db1), TOBN(0xdd8dad47, 0x189b45a0), TOBN(0xbfac0dd0, 0x48d5b8d9)}, {TOBN(0x34ab3af5, 0x7d3d2ec2), TOBN(0x6fa2fc2d, 0x207bd3af), TOBN(0x9ff40092, 0x66550ded), TOBN(0x719b3e87, 0x1fd5b913)}}, {{TOBN(0xa573a496, 0x6d17fbc7), TOBN(0x0cd1a70a, 0x73d2b24e), TOBN(0x34e2c5ca, 0xb2676937), TOBN(0xe7050b06, 0xbf669f21)}, {TOBN(0xfbe948b6, 0x1ede9046), TOBN(0xa0530051, 0x97662659), TOBN(0x58cbd4ed, 0xf10124c5), TOBN(0xde2646e4, 0xdd6c06c8)}}, {{TOBN(0x332f8108, 0x8cad38c0), TOBN(0x471b7e90, 0x6bd68ae2), TOBN(0x56ac3fb2, 0x0d8e27a3), TOBN(0xb54660db, 0x136b4b0d)}, {TOBN(0x123a1e11, 0xa6fd8de4), TOBN(0x44dbffea, 0xa37799ef), TOBN(0x4540b977, 0xce6ac17c), TOBN(0x495173a8, 0xaf60acef)}}}, {{{TOBN(0x9ebb284d, 0x391c2a82), TOBN(0xbcdd4863, 0x158308e8), TOBN(0x006f16ec, 0x83f1edca), TOBN(0xa13e2c37, 0x695dc6c8)}, {TOBN(0x2ab756f0, 0x4a057a87), TOBN(0xa8765500, 0xa6b48f98), TOBN(0x4252face, 0x68651c44), TOBN(0xa52b540b, 0xe1765e02)}}, {{TOBN(0x4f922fc5, 0x16a0d2bb), TOBN(0x0d5cc16c, 0x1a623499), TOBN(0x9241cf3a, 0x57c62c8b), TOBN(0x2f5e6961, 0xfd1b667f)}, {TOBN(0x5c15c70b, 0xf5a01797), TOBN(0x3d20b44d, 0x60956192), TOBN(0x04911b37, 0x071fdb52), TOBN(0xf648f916, 0x8d6f0f7b)}}, {{TOBN(0x6dc1acaf, 0xe60b7cf7), TOBN(0x25860a50, 0x84a9d869), TOBN(0x56fc6f09, 0xe7ba8ac4), TOBN(0x828c5bd0, 0x6148d29e)}, {TOBN(0xac6b435e, 0xdc55ae5f), TOBN(0xa527f56c, 0xc0117411), TOBN(0x94d5045e, 0xfd24342c), TOBN(0x2c4c0a35, 0x70b67c0d)}}, {{TOBN(0x027cc8b8, 0xfac61d9a), TOBN(0x7d25e062, 0xe3c6fe8a), TOBN(0xe08805bf, 0xe5bff503), TOBN(0x13271e6c, 0x6ff632f7)}, {TOBN(0x55dca6c0, 0x232f76a5), TOBN(0x8957c32d, 0x701ef426), TOBN(0xee728bcb, 0xa10a5178), TOBN(0x5ea60411, 0xb62c5173)}}, {{TOBN(0xfc4e964e, 0xd0b8892b), TOBN(0x9ea17683, 0x9301bb74), TOBN(0x6265c5ae, 0xfcc48626), TOBN(0xe60cf82e, 0xbb3e9102)}, {TOBN(0x57adf797, 0xd4df5531), TOBN(0x235b59a1, 0x8deeefe2), TOBN(0x60adcf58, 0x3f306eb1), TOBN(0x105c2753, 0x3d09492d)}}, {{TOBN(0x4090914b, 0xb5def996), TOBN(0x1cb69c83, 0x233dd1e7), TOBN(0xc1e9c1d3, 0x9b3d5e76), TOBN(0x1f3338ed, 0xfccf6012)}, {TOBN(0xb1e95d0d, 0x2f5378a8), TOBN(0xacf4c2c7, 0x2f00cd21), TOBN(0x6e984240, 0xeb5fe290), TOBN(0xd66c038d, 0x248088ae)}}, {{TOBN(0x804d264a, 0xf94d70cf), TOBN(0xbdb802ef, 0x7314bf7e), TOBN(0x8fb54de2, 0x4333ed02), TOBN(0x740461e0, 0x285635d9)}, {TOBN(0x4113b2c8, 0x365e9383), TOBN(0xea762c83, 0x3fdef652), TOBN(0x4eec6e2e, 0x47b956c1), TOBN(0xa3d814be, 0x65620fa4)}}, {{TOBN(0x9ad5462b, 0xb4d8bc50), TOBN(0x181c0b16, 0xa9195770), TOBN(0xebd4fe1c, 0x78412a68), TOBN(0xae0341bc, 0xc0dff48c)}, {TOBN(0xb6bc45cf, 0x7003e866), TOBN(0xf11a6dea, 0x8a24a41b), TOBN(0x5407151a, 0xd04c24c2), TOBN(0x62c9d27d, 0xda5b7b68)}}, {{TOBN(0x2e964235, 0x88cceff6), TOBN(0x8594c54f, 0x8b07ed69), TOBN(0x1578e73c, 0xc84d0d0d), TOBN(0x7b4e1055, 0xff532868)}, {TOBN(0xa348c0d5, 0xb5ec995a), TOBN(0xbf4b9d55, 0x14289a54), TOBN(0x9ba155a6, 0x58fbd777), TOBN(0x186ed7a8, 0x1a84491d)}}, {{TOBN(0xd4992b30, 0x614c0900), TOBN(0xda98d121, 0xbd00c24b), TOBN(0x7f534dc8, 0x7ec4bfa1), TOBN(0x4a5ff674, 0x37dc34bc)}, {TOBN(0x68c196b8, 0x1d7ea1d7), TOBN(0x38cf2893, 0x80a6d208), TOBN(0xfd56cd09, 0xe3cbbd6e), TOBN(0xec72e27e, 0x4205a5b6)}}, {{TOBN(0x15ea68f5, 0xa44f77f7), TOBN(0x7aa5f9fd, 0xb43c52bc), TOBN(0x86ff676f, 0x94f0e609), TOBN(0xa4cde963, 0x2e2d432b)}, {TOBN(0x8cafa0c0, 0xeee470af), TOBN(0x84137d0e, 0x8a3f5ec8), TOBN(0xebb40411, 0xfaa31231), TOBN(0xa239c13f, 0x6f7f7ccf)}}, {{TOBN(0x32865719, 0xa8afd30b), TOBN(0x86798328, 0x8a826dce), TOBN(0xdf04e891, 0xc4a8fbe0), TOBN(0xbb6b6e1b, 0xebf56ad3)}, {TOBN(0x0a695b11, 0x471f1ff0), TOBN(0xd76c3389, 0xbe15baf0), TOBN(0x018edb95, 0xbe96c43e), TOBN(0xf2beaaf4, 0x90794158)}}, {{TOBN(0x152db09e, 0xc3076a27), TOBN(0x5e82908e, 0xe416545d), TOBN(0xa2c41272, 0x356d6f2e), TOBN(0xdc9c9642, 0x31fd74e1)}, {TOBN(0x66ceb88d, 0x519bf615), TOBN(0xe29ecd76, 0x05a2274e), TOBN(0x3a0473c4, 0xbf5e2fa0), TOBN(0x6b6eb671, 0x64284e67)}}, {{TOBN(0xe8b97932, 0xb88756dd), TOBN(0xed4e8652, 0xf17e3e61), TOBN(0xc2dd1499, 0x3ee1c4a4), TOBN(0xc0aaee17, 0x597f8c0e)}, {TOBN(0x15c4edb9, 0x6c168af3), TOBN(0x6563c7bf, 0xb39ae875), TOBN(0xadfadb6f, 0x20adb436), TOBN(0xad55e8c9, 0x9a042ac0)}}, {{TOBN(0x975a1ed8, 0xb76da1f5), TOBN(0x10dfa466, 0xa58acb94), TOBN(0x8dd7f7e3, 0xac060282), TOBN(0x6813e66a, 0x572a051e)}, {TOBN(0xb4ccae1e, 0x350cb901), TOBN(0xb653d656, 0x50cb7822), TOBN(0x42484710, 0xdfab3b87), TOBN(0xcd7ee537, 0x9b670fd0)}}, {{TOBN(0x0a50b12e, 0x523b8bf6), TOBN(0x8009eb5b, 0x8f910c1b), TOBN(0xf535af82, 0x4a167588), TOBN(0x0f835f9c, 0xfb2a2abd)}, {TOBN(0xf59b2931, 0x2afceb62), TOBN(0xc797df2a, 0x169d383f), TOBN(0xeb3f5fb0, 0x66ac02b0), TOBN(0x029d4c6f, 0xdaa2d0ca)}}, {{TOBN(0xd4059bc1, 0xafab4bc5), TOBN(0x833f5c6f, 0x56783247), TOBN(0xb5346630, 0x8d2d3605), TOBN(0x83387891, 0xd34d8433)}, {TOBN(0xd973b30f, 0xadd9419a), TOBN(0xbcca1099, 0xafe3fce8), TOBN(0x08178315, 0x0809aac6), TOBN(0x01b7f21a, 0x540f0f11)}}, {{TOBN(0x65c29219, 0x909523c8), TOBN(0xa62f648f, 0xa3a1c741), TOBN(0x88598d4f, 0x60c9e55a), TOBN(0xbce9141b, 0x0e4f347a)}, {TOBN(0x9af97d84, 0x35f9b988), TOBN(0x0210da62, 0x320475b6), TOBN(0x3c076e22, 0x9191476c), TOBN(0x7520dbd9, 0x44fc7834)}}, {{TOBN(0x6a6b2cfe, 0xc1ab1bbd), TOBN(0xef8a65be, 0xdc650938), TOBN(0x72855540, 0x805d7bc4), TOBN(0xda389396, 0xed11fdfd)}, {TOBN(0xa9d5bd36, 0x74660876), TOBN(0x11d67c54, 0xb45dff35), TOBN(0x6af7d148, 0xa4f5da94), TOBN(0xbb8d4c3f, 0xc0bbeb31)}}, {{TOBN(0x87a7ebd1, 0xe0a1b12a), TOBN(0x1e4ef88d, 0x770ba95f), TOBN(0x8c33345c, 0xdc2ae9cb), TOBN(0xcecf1276, 0x01cc8403)}, {TOBN(0x687c012e, 0x1b39b80f), TOBN(0xfd90d0ad, 0x35c33ba4), TOBN(0xa3ef5a67, 0x5c9661c2), TOBN(0x368fc88e, 0xe017429e)}}, {{TOBN(0xd30c6761, 0x196a2fa2), TOBN(0x931b9817, 0xbd5b312e), TOBN(0xba01000c, 0x72f54a31), TOBN(0xa203d2c8, 0x66eaa541)}, {TOBN(0xf2abdee0, 0x98939db3), TOBN(0xe37d6c2c, 0x3e606c02), TOBN(0xf2921574, 0x521ff643), TOBN(0x2781b3c4, 0xd7e2fca3)}}, {{TOBN(0x664300b0, 0x7850ec06), TOBN(0xac5a38b9, 0x7d3a10cf), TOBN(0x9233188d, 0xe34ab39d), TOBN(0xe77057e4, 0x5072cbb9)}, {TOBN(0xbcf0c042, 0xb59e78df), TOBN(0x4cfc91e8, 0x1d97de52), TOBN(0x4661a26c, 0x3ee0ca4a), TOBN(0x5620a4c1, 0xfb8507bc)}}, {{TOBN(0x4b44d4aa, 0x049f842c), TOBN(0xceabc5d5, 0x1540e82b), TOBN(0x306710fd, 0x15c6f156), TOBN(0xbe5ae52b, 0x63db1d72)}, {TOBN(0x06f1e7e6, 0x334957f1), TOBN(0x57e388f0, 0x31144a70), TOBN(0xfb69bb2f, 0xdf96447b), TOBN(0x0f78ebd3, 0x73e38a12)}}, {{TOBN(0xb8222605, 0x2b7ce542), TOBN(0xe6d4ce99, 0x7472bde1), TOBN(0x53e16ebe, 0x09d2f4da), TOBN(0x180ff42e, 0x53b92b2e)}, {TOBN(0xc59bcc02, 0x2c34a1c6), TOBN(0x3803d6f9, 0x422c46c2), TOBN(0x18aff74f, 0x5c14a8a2), TOBN(0x55aebf80, 0x10a08b28)}}, {{TOBN(0x66097d58, 0x7135593f), TOBN(0x32e6eff7, 0x2be570cd), TOBN(0x584e6a10, 0x2a8c860d), TOBN(0xcd185890, 0xa2eb4163)}, {TOBN(0x7ceae99d, 0x6d97e134), TOBN(0xd42c6b70, 0xdd8447ce), TOBN(0x59ddbb4a, 0xb8c50273), TOBN(0x03c612df, 0x3cf34e1e)}}, {{TOBN(0x84b9ca15, 0x04b6c5a0), TOBN(0x35216f39, 0x18f0e3a3), TOBN(0x3ec2d2bc, 0xbd986c00), TOBN(0x8bf546d9, 0xd19228fe)}, {TOBN(0xd1c655a4, 0x4cd623c3), TOBN(0x366ce718, 0x502b8e5a), TOBN(0x2cfc84b4, 0xeea0bfe7), TOBN(0xe01d5cee, 0xcf443e8e)}}, {{TOBN(0x8ec045d9, 0x036520f8), TOBN(0xdfb3c3d1, 0x92d40e98), TOBN(0x0bac4cce, 0xcc559a04), TOBN(0x35eccae5, 0x240ea6b1)}, {TOBN(0x180b32db, 0xf8a5a0ac), TOBN(0x547972a5, 0xeb699700), TOBN(0xa3765801, 0xca26bca0), TOBN(0x57e09d0e, 0xa647f25a)}}, {{TOBN(0xb956970e, 0x2fdd23cc), TOBN(0xb80288bc, 0x5682e971), TOBN(0xe6e6d91e, 0x9ae86ebc), TOBN(0x0564c83f, 0x8c9f1939)}, {TOBN(0x551932a2, 0x39560368), TOBN(0xe893752b, 0x049c28e2), TOBN(0x0b03cee5, 0xa6a158c3), TOBN(0xe12d656b, 0x04964263)}}, {{TOBN(0x4b47554e, 0x63e3bc1d), TOBN(0xc719b6a2, 0x45044ff7), TOBN(0x4f24d30a, 0xe48daa07), TOBN(0xa3f37556, 0xc8c1edc3)}, {TOBN(0x9a47bf76, 0x0700d360), TOBN(0xbb1a1824, 0x822ae4e2), TOBN(0x22e275a3, 0x89f1fb4c), TOBN(0x72b1aa23, 0x9968c5f5)}}, {{TOBN(0xa75feaca, 0xbe063f64), TOBN(0x9b392f43, 0xbce47a09), TOBN(0xd4241509, 0x1ad07aca), TOBN(0x4b0c591b, 0x8d26cd0f)}, {TOBN(0x2d42ddfd, 0x92f1169a), TOBN(0x63aeb1ac, 0x4cbf2392), TOBN(0x1de9e877, 0x0691a2af), TOBN(0xebe79af7, 0xd98021da)}}, {{TOBN(0xcfdf2a4e, 0x40e50acf), TOBN(0xf0a98ad7, 0xaf01d665), TOBN(0xefb640bf, 0x1831be1f), TOBN(0x6fe8bd2f, 0x80e9ada0)}, {TOBN(0x94c103a1, 0x6cafbc91), TOBN(0x170f8759, 0x8308e08c), TOBN(0x5de2d2ab, 0x9780ff4f), TOBN(0x666466bc, 0x45b201f2)}}, {{TOBN(0x58af2010, 0xf5b343bc), TOBN(0x0f2e400a, 0xf2f142fe), TOBN(0x3483bfde, 0xa85f4bdf), TOBN(0xf0b1d093, 0x03bfeaa9)}, {TOBN(0x2ea01b95, 0xc7081603), TOBN(0xe943e4c9, 0x3dba1097), TOBN(0x47be92ad, 0xb438f3a6), TOBN(0x00bb7742, 0xe5bf6636)}}, {{TOBN(0x136b7083, 0x824297b4), TOBN(0x9d0e5580, 0x5584455f), TOBN(0xab48cedc, 0xf1c7d69e), TOBN(0x53a9e481, 0x2a256e76)}, {TOBN(0x0402b0e0, 0x65eb2413), TOBN(0xdadbbb84, 0x8fc407a7), TOBN(0xa65cd5a4, 0x8d7f5492), TOBN(0x21d44293, 0x74bae294)}}, {{TOBN(0x66917ce6, 0x3b5f1cc4), TOBN(0x37ae52ea, 0xce872e62), TOBN(0xbb087b72, 0x2905f244), TOBN(0x12077086, 0x1e6af74f)}, {TOBN(0x4b644e49, 0x1058edea), TOBN(0x827510e3, 0xb638ca1d), TOBN(0x8cf2b704, 0x6038591c), TOBN(0xffc8b47a, 0xfe635063)}}, {{TOBN(0x3ae220e6, 0x1b4d5e63), TOBN(0xbd864742, 0x9d961b4b), TOBN(0x610c107e, 0x9bd16bed), TOBN(0x4270352a, 0x1127147b)}, {TOBN(0x7d17ffe6, 0x64cfc50e), TOBN(0x50dee01a, 0x1e36cb42), TOBN(0x068a7622, 0x35dc5f9a), TOBN(0x9a08d536, 0xdf53f62c)}}, {{TOBN(0x4ed71457, 0x6be5f7de), TOBN(0xd93006f8, 0xc2263c9e), TOBN(0xe073694c, 0xcacacb36), TOBN(0x2ff7a5b4, 0x3ae118ab)}, {TOBN(0x3cce53f1, 0xcd871236), TOBN(0xf156a39d, 0xc2aa6d52), TOBN(0x9cc5f271, 0xb198d76d), TOBN(0xbc615b6f, 0x81383d39)}}, {{TOBN(0xa54538e8, 0xde3eee6b), TOBN(0x58c77538, 0xab910d91), TOBN(0x31e5bdbc, 0x58d278bd), TOBN(0x3cde4adf, 0xb963acae)}, {TOBN(0xb1881fd2, 0x5302169c), TOBN(0x8ca60fa0, 0xa989ed8b), TOBN(0xa1999458, 0xff96a0ee), TOBN(0xc1141f03, 0xac6c283d)}}, {{TOBN(0x7677408d, 0x6dfafed3), TOBN(0x33a01653, 0x39661588), TOBN(0x3c9c15ec, 0x0b726fa0), TOBN(0x090cfd93, 0x6c9b56da)}, {TOBN(0xe34f4bae, 0xa3c40af5), TOBN(0x3469eadb, 0xd21129f1), TOBN(0xcc51674a, 0x1e207ce8), TOBN(0x1e293b24, 0xc83b1ef9)}}, {{TOBN(0x17173d13, 0x1e6c0bb4), TOBN(0x19004695, 0x90776d35), TOBN(0xe7980e34, 0x6de6f922), TOBN(0x873554cb, 0xf4dd9a22)}, {TOBN(0x0316c627, 0xcbf18a51), TOBN(0x4d93651b, 0x3032c081), TOBN(0x207f2771, 0x3946834d), TOBN(0x2c08d7b4, 0x30cdbf80)}}, {{TOBN(0x137a4fb4, 0x86df2a61), TOBN(0xa1ed9c07, 0xecf7b4a2), TOBN(0xb2e460e2, 0x7bd042ff), TOBN(0xb7f5e2fa, 0x5f62f5ec)}, {TOBN(0x7aa6ec6b, 0xcc2423b7), TOBN(0x75ce0a7f, 0xba63eea7), TOBN(0x67a45fb1, 0xf250a6e1), TOBN(0x93bc919c, 0xe53cdc9f)}}, {{TOBN(0x9271f56f, 0x871942df), TOBN(0x2372ff6f, 0x7859ad66), TOBN(0x5f4c2b96, 0x33cb1a78), TOBN(0xe3e29101, 0x5838aa83)}, {TOBN(0xa7ed1611, 0xe4e8110c), TOBN(0x2a2d70d5, 0x330198ce), TOBN(0xbdf132e8, 0x6720efe0), TOBN(0xe61a8962, 0x66a471bf)}}, {{TOBN(0x796d3a85, 0x825808bd), TOBN(0x51dc3cb7, 0x3fd6e902), TOBN(0x643c768a, 0x916219d1), TOBN(0x36cd7685, 0xa2ad7d32)}, {TOBN(0xe3db9d05, 0xb22922a4), TOBN(0x6494c87e, 0xdba29660), TOBN(0xf0ac91df, 0xbcd2ebc7), TOBN(0x4deb57a0, 0x45107f8d)}}, {{TOBN(0x42271f59, 0xc3d12a73), TOBN(0x5f71687c, 0xa5c2c51d), TOBN(0xcb1f50c6, 0x05797bcb), TOBN(0x29ed0ed9, 0xd6d34eb0)}, {TOBN(0xe5fe5b47, 0x4683c2eb), TOBN(0x4956eeb5, 0x97447c46), TOBN(0x5b163a43, 0x71207167), TOBN(0x93fa2fed, 0x0248c5ef)}}, {{TOBN(0x67930af2, 0x31f63950), TOBN(0xa77797c1, 0x14caa2c9), TOBN(0x526e80ee, 0x27ac7e62), TOBN(0xe1e6e626, 0x58b28aec)}, {TOBN(0x636178b0, 0xb3c9fef0), TOBN(0xaf7752e0, 0x6d5f90be), TOBN(0x94ecaf18, 0xeece51cf), TOBN(0x2864d0ed, 0xca806e1f)}}, {{TOBN(0x6de2e383, 0x97c69134), TOBN(0x5a42c316, 0xeb291293), TOBN(0xc7779219, 0x6a60bae0), TOBN(0xa24de346, 0x6b7599d1)}, {TOBN(0x49d374aa, 0xb75d4941), TOBN(0x98900586, 0x2d501ff0), TOBN(0x9f16d40e, 0xeb7974cf), TOBN(0x1033860b, 0xcdd8c115)}}, {{TOBN(0xb6c69ac8, 0x2094cec3), TOBN(0x9976fb88, 0x403b770c), TOBN(0x1dea026c, 0x4859590d), TOBN(0xb6acbb46, 0x8562d1fd)}, {TOBN(0x7cd6c461, 0x44569d85), TOBN(0xc3190a36, 0x97f0891d), TOBN(0xc6f53195, 0x48d5a17d), TOBN(0x7d919966, 0xd749abc8)}}, {{TOBN(0x65104837, 0xdd1c8a20), TOBN(0x7e5410c8, 0x2f683419), TOBN(0x958c3ca8, 0xbe94022e), TOBN(0x605c3197, 0x6145dac2)}, {TOBN(0x3fc07501, 0x01683d54), TOBN(0x1d7127c5, 0x595b1234), TOBN(0x10b8f87c, 0x9481277f), TOBN(0x677db2a8, 0xe65a1adb)}}, {{TOBN(0xec2fccaa, 0xddce3345), TOBN(0x2a6811b7, 0x012a4350), TOBN(0x96760ff1, 0xac598bdc), TOBN(0x054d652a, 0xd1bf4128)}, {TOBN(0x0a1151d4, 0x92a21005), TOBN(0xad7f3971, 0x33110fdf), TOBN(0x8c95928c, 0x1960100f), TOBN(0x6c91c825, 0x7bf03362)}}, {{TOBN(0xc8c8b2a2, 0xce309f06), TOBN(0xfdb27b59, 0xca27204b), TOBN(0xd223eaa5, 0x0848e32e), TOBN(0xb93e4b2e, 0xe7bfaf1e)}, {TOBN(0xc5308ae6, 0x44aa3ded), TOBN(0x317a666a, 0xc015d573), TOBN(0xc888ce23, 0x1a979707), TOBN(0xf141c1e6, 0x0d5c4958)}}, {{TOBN(0xb53b7de5, 0x61906373), TOBN(0x858dbade, 0xeb999595), TOBN(0x8cbb47b2, 0xa59e5c36), TOBN(0x660318b3, 0xdcf4e842)}, {TOBN(0xbd161ccd, 0x12ba4b7a), TOBN(0xf399daab, 0xf8c8282a), TOBN(0x1587633a, 0xeeb2130d), TOBN(0xa465311a, 0xda38dd7d)}}, {{TOBN(0x5f75eec8, 0x64d3779b), TOBN(0x3c5d0476, 0xad64c171), TOBN(0x87410371, 0x2a914428), TOBN(0x8096a891, 0x90e2fc29)}, {TOBN(0xd3d2ae9d, 0x23b3ebc2), TOBN(0x90bdd6db, 0xa580cfd6), TOBN(0x52dbb7f3, 0xc5b01f6c), TOBN(0xe68eded4, 0xe102a2dc)}}, {{TOBN(0x17785b77, 0x99eb6df0), TOBN(0x26c3cc51, 0x7386b779), TOBN(0x345ed988, 0x6417a48e), TOBN(0xe990b4e4, 0x07d6ef31)}, {TOBN(0x0f456b7e, 0x2586abba), TOBN(0x239ca6a5, 0x59c96e9a), TOBN(0xe327459c, 0xe2eb4206), TOBN(0x3a4c3313, 0xa002b90a)}}, {{TOBN(0x2a114806, 0xf6a3f6fb), TOBN(0xad5cad2f, 0x85c251dd), TOBN(0x92c1f613, 0xf5a784d3), TOBN(0xec7bfacf, 0x349766d5)}, {TOBN(0x04b3cd33, 0x3e23cb3b), TOBN(0x3979fe84, 0xc5a64b2d), TOBN(0x192e2720, 0x7e589106), TOBN(0xa60c43d1, 0xa15b527f)}}, {{TOBN(0x2dae9082, 0xbe7cf3a6), TOBN(0xcc86ba92, 0xbc967274), TOBN(0xf28a2ce8, 0xaea0a8a9), TOBN(0x404ca6d9, 0x6ee988b3)}, {TOBN(0xfd7e9c5d, 0x005921b8), TOBN(0xf56297f1, 0x44e79bf9), TOBN(0xa163b460, 0x0d75ddc2), TOBN(0x30b23616, 0xa1f2be87)}}, {{TOBN(0x4b070d21, 0xbfe50e2b), TOBN(0x7ef8cfd0, 0xe1bfede1), TOBN(0xadba0011, 0x2aac4ae0), TOBN(0x2a3e7d01, 0xb9ebd033)}, {TOBN(0x995277ec, 0xe38d9d1c), TOBN(0xb500249e, 0x9c5d2de3), TOBN(0x8912b820, 0xf13ca8c9), TOBN(0xc8798114, 0x877793af)}}, {{TOBN(0x19e6125d, 0xec3f1dec), TOBN(0x07b1f040, 0x911178da), TOBN(0xd93ededa, 0x904a6738), TOBN(0x55187a5a, 0x0bebedcd)}, {TOBN(0xf7d04722, 0xeb329d41), TOBN(0xf449099e, 0xf170b391), TOBN(0xfd317a69, 0xca99f828), TOBN(0x50c3db2b, 0x34a4976d)}}, {{TOBN(0xe9ba7784, 0x3757b392), TOBN(0x326caefd, 0xaa3ca05a), TOBN(0x78e5293b, 0xf1e593d4), TOBN(0x7842a937, 0x0d98fd13)}, {TOBN(0xe694bf96, 0x5f96b10d), TOBN(0x373a9df6, 0x06a8cd05), TOBN(0x997d1e51, 0xe8f0c7fc), TOBN(0x1d019790, 0x63fd972e)}}, {{TOBN(0x0064d858, 0x5499fb32), TOBN(0x7b67bad9, 0x77a8aeb7), TOBN(0x1d3eb977, 0x2d08eec5), TOBN(0x5fc047a6, 0xcbabae1d)}, {TOBN(0x0577d159, 0xe54a64bb), TOBN(0x8862201b, 0xc43497e4), TOBN(0xad6b4e28, 0x2ce0608d), TOBN(0x8b687b7d, 0x0b167aac)}}, {{TOBN(0x6ed4d367, 0x8b2ecfa9), TOBN(0x24dfe62d, 0xa90c3c38), TOBN(0xa1862e10, 0x3fe5c42b), TOBN(0x1ca73dca, 0xd5732a9f)}, {TOBN(0x35f038b7, 0x76bb87ad), TOBN(0x674976ab, 0xf242b81f), TOBN(0x4f2bde7e, 0xb0fd90cd), TOBN(0x6efc172e, 0xa7fdf092)}}, {{TOBN(0x3806b69b, 0x92222f1f), TOBN(0x5a2459ca, 0x6cf7ae70), TOBN(0x6789f69c, 0xa85217ee), TOBN(0x5f232b5e, 0xe3dc85ac)}, {TOBN(0x660e3ec5, 0x48e9e516), TOBN(0x124b4e47, 0x3197eb31), TOBN(0x10a0cb13, 0xaafcca23), TOBN(0x7bd63ba4, 0x8213224f)}}, {{TOBN(0xaffad7cc, 0x290a7f4f), TOBN(0x6b409c9e, 0x0286b461), TOBN(0x58ab809f, 0xffa407af), TOBN(0xc3122eed, 0xc68ac073)}, {TOBN(0x17bf9e50, 0x4ef24d7e), TOBN(0x5d929794, 0x3e2a5811), TOBN(0x519bc867, 0x02902e01), TOBN(0x76bba5da, 0x39c8a851)}}, {{TOBN(0xe9f9669c, 0xda94951e), TOBN(0x4b6af58d, 0x66b8d418), TOBN(0xfa321074, 0x17d426a4), TOBN(0xc78e66a9, 0x9dde6027)}, {TOBN(0x0516c083, 0x4a53b964), TOBN(0xfc659d38, 0xff602330), TOBN(0x0ab55e5c, 0x58c5c897), TOBN(0x985099b2, 0x838bc5df)}}, {{TOBN(0x061d9efc, 0xc52fc238), TOBN(0x712b2728, 0x6ac1da3f), TOBN(0xfb658149, 0x9283fe08), TOBN(0x4954ac94, 0xb8aaa2f7)}, {TOBN(0x85c0ada4, 0x7fb2e74f), TOBN(0xee8ba98e, 0xb89926b0), TOBN(0xe4f9d37d, 0x23d1af5b), TOBN(0x14ccdbf9, 0xba9b015e)}}, {{TOBN(0xb674481b, 0x7bfe7178), TOBN(0x4e1debae, 0x65405868), TOBN(0x061b2821, 0xc48c867d), TOBN(0x69c15b35, 0x513b30ea)}, {TOBN(0x3b4a1666, 0x36871088), TOBN(0xe5e29f5d, 0x1220b1ff), TOBN(0x4b82bb35, 0x233d9f4d), TOBN(0x4e076333, 0x18cdc675)}}}, {{{TOBN(0x0d53f5c7, 0xa3e6fced), TOBN(0xe8cbbdd5, 0xf45fbdeb), TOBN(0xf85c01df, 0x13339a70), TOBN(0x0ff71880, 0x142ceb81)}, {TOBN(0x4c4e8774, 0xbd70437a), TOBN(0x5fb32891, 0xba0bda6a), TOBN(0x1cdbebd2, 0xf18bd26e), TOBN(0x2f9526f1, 0x03a9d522)}}, {{TOBN(0x40ce3051, 0x92c4d684), TOBN(0x8b04d725, 0x7612efcd), TOBN(0xb9dcda36, 0x6f9cae20), TOBN(0x0edc4d24, 0xf058856c)}, {TOBN(0x64f2e6bf, 0x85427900), TOBN(0x3de81295, 0xdc09dfea), TOBN(0xd41b4487, 0x379bf26c), TOBN(0x50b62c6d, 0x6df135a9)}}, {{TOBN(0xd4f8e3b4, 0xc72dfe67), TOBN(0xc416b0f6, 0x90e19fdf), TOBN(0x18b9098d, 0x4c13bd35), TOBN(0xac11118a, 0x15b8cb9e)}, {TOBN(0xf598a318, 0xf0062841), TOBN(0xbfe0602f, 0x89f356f4), TOBN(0x7ae3637e, 0x30177a0c), TOBN(0x34097747, 0x61136537)}}, {{TOBN(0x0db2fb5e, 0xd005832a), TOBN(0x5f5efd3b, 0x91042e4f), TOBN(0x8c4ffdc6, 0xed70f8ca), TOBN(0xe4645d0b, 0xb52da9cc)}, {TOBN(0x9596f58b, 0xc9001d1f), TOBN(0x52c8f0bc, 0x4e117205), TOBN(0xfd4aa0d2, 0xe398a084), TOBN(0x815bfe3a, 0x104f49de)}}, {{TOBN(0x97e5443f, 0x23885e5f), TOBN(0xf72f8f99, 0xe8433aab), TOBN(0xbd00b154, 0xe4d4e604), TOBN(0xd0b35e6a, 0xe5e173ff)}, {TOBN(0x57b2a048, 0x9164722d), TOBN(0x3e3c665b, 0x88761ec8), TOBN(0x6bdd1397, 0x3da83832), TOBN(0x3c8b1a1e, 0x73dafe3b)}}, {{TOBN(0x4497ace6, 0x54317cac), TOBN(0xbe600ab9, 0x521771b3), TOBN(0xb42e409e, 0xb0dfe8b8), TOBN(0x386a67d7, 0x3942310f)}, {TOBN(0x25548d8d, 0x4431cc28), TOBN(0xa7cff142, 0x985dc524), TOBN(0x4d60f5a1, 0x93c4be32), TOBN(0x83ebd5c8, 0xd071c6e1)}}, {{TOBN(0xba3a80a7, 0xb1fd2b0b), TOBN(0x9b3ad396, 0x5bec33e8), TOBN(0xb3868d61, 0x79743fb3), TOBN(0xcfd169fc, 0xfdb462fa)}, {TOBN(0xd3b499d7, 0x9ce0a6af), TOBN(0x55dc1cf1, 0xe42d3ff8), TOBN(0x04fb9e6c, 0xc6c3e1b2), TOBN(0x47e6961d, 0x6f69a474)}}, {{TOBN(0x54eb3acc, 0xe548b37b), TOBN(0xb38e7542, 0x84d40549), TOBN(0x8c3daa51, 0x7b341b4f), TOBN(0x2f6928ec, 0x690bf7fa)}, {TOBN(0x0496b323, 0x86ce6c41), TOBN(0x01be1c55, 0x10adadcd), TOBN(0xc04e67e7, 0x4bb5faf9), TOBN(0x3cbaf678, 0xe15c9985)}}, {{TOBN(0x8cd12145, 0x50ca4247), TOBN(0xba1aa47a, 0xe7dd30aa), TOBN(0x2f81ddf1, 0xe58fee24), TOBN(0x03452936, 0xeec9b0e8)}, {TOBN(0x8bdc3b81, 0x243aea96), TOBN(0x9a2919af, 0x15c3d0e5), TOBN(0x9ea640ec, 0x10948361), TOBN(0x5ac86d5b, 0x6e0bcccf)}}, {{TOBN(0xf892d918, 0xc36cf440), TOBN(0xaed3e837, 0xc939719c), TOBN(0xb07b08d2, 0xc0218b64), TOBN(0x6f1bcbba, 0xce9790dd)}, {TOBN(0x4a84d6ed, 0x60919b8e), TOBN(0xd8900791, 0x8ac1f9eb), TOBN(0xf84941aa, 0x0dd5daef), TOBN(0xb22fe40a, 0x67fd62c5)}}, {{TOBN(0x97e15ba2, 0x157f2db3), TOBN(0xbda2fc8f, 0x8e28ca9c), TOBN(0x5d050da4, 0x37b9f454), TOBN(0x3d57eb57, 0x2379d72e)}, {TOBN(0xe9b5eba2, 0xfb5ee997), TOBN(0x01648ca2, 0xe11538ca), TOBN(0x32bb76f6, 0xf6327974), TOBN(0x338f14b8, 0xff3f4bb7)}}, {{TOBN(0x524d226a, 0xd7ab9a2d), TOBN(0x9c00090d, 0x7dfae958), TOBN(0x0ba5f539, 0x8751d8c2), TOBN(0x8afcbcdd, 0x3ab8262d)}, {TOBN(0x57392729, 0xe99d043b), TOBN(0xef51263b, 0xaebc943a), TOBN(0x9feace93, 0x20862935), TOBN(0x639efc03, 0xb06c817b)}}, {{TOBN(0x1fe054b3, 0x66b4be7a), TOBN(0x3f25a9de, 0x84a37a1e), TOBN(0xf39ef1ad, 0x78d75cd9), TOBN(0xd7b58f49, 0x5062c1b5)}, {TOBN(0x6f74f9a9, 0xff563436), TOBN(0xf718ff29, 0xe8af51e7), TOBN(0x5234d313, 0x15e97fec), TOBN(0xb6a8e2b1, 0x292f1c0a)}}, {{TOBN(0xa7f53aa8, 0x327720c1), TOBN(0x956ca322, 0xba092cc8), TOBN(0x8f03d64a, 0x28746c4d), TOBN(0x51fe1782, 0x66d0d392)}, {TOBN(0xd19b34db, 0x3c832c80), TOBN(0x60dccc5c, 0x6da2e3b4), TOBN(0x245dd62e, 0x0a104ccc), TOBN(0xa7ab1de1, 0x620b21fd)}}, {{TOBN(0xb293ae0b, 0x3893d123), TOBN(0xf7b75783, 0xb15ee71c), TOBN(0x5aa3c614, 0x42a9468b), TOBN(0xd686123c, 0xdb15d744)}, {TOBN(0x8c616891, 0xa7ab4116), TOBN(0x6fcd72c8, 0xa4e6a459), TOBN(0xac219110, 0x77e5fad7), TOBN(0xfb6a20e7, 0x704fa46b)}}, {{TOBN(0xe839be7d, 0x341d81dc), TOBN(0xcddb6889, 0x32148379), TOBN(0xda6211a1, 0xf7026ead), TOBN(0xf3b2575f, 0xf4d1cc5e)}, {TOBN(0x40cfc8f6, 0xa7a73ae6), TOBN(0x83879a5e, 0x61d5b483), TOBN(0xc5acb1ed, 0x41a50ebc), TOBN(0x59a60cc8, 0x3c07d8fa)}}, {{TOBN(0x1b73bdce, 0xb1876262), TOBN(0x2b0d79f0, 0x12af4ee9), TOBN(0x8bcf3b0b, 0xd46e1d07), TOBN(0x17d6af9d, 0xe45d152f)}, {TOBN(0x73520461, 0x6d736451), TOBN(0x43cbbd97, 0x56b0bf5a), TOBN(0xb0833a5b, 0xd5999b9d), TOBN(0x702614f0, 0xeb72e398)}}, {{TOBN(0x0aadf01a, 0x59c3e9f8), TOBN(0x40200e77, 0xce6b3d16), TOBN(0xda22bdd3, 0xdeddafad), TOBN(0x76dedaf4, 0x310d72e1)}, {TOBN(0x49ef807c, 0x4bc2e88f), TOBN(0x6ba81291, 0x146dd5a5), TOBN(0xa1a4077a, 0x7d8d59e9), TOBN(0x87b6a2e7, 0x802db349)}}, {{TOBN(0xd5679997, 0x1b4e598e), TOBN(0xf499ef1f, 0x06fe4b1d), TOBN(0x3978d3ae, 0xfcb267c5), TOBN(0xb582b557, 0x235786d0)}, {TOBN(0x32b3b2ca, 0x1715cb07), TOBN(0x4c3de6a2, 0x8480241d), TOBN(0x63b5ffed, 0xcb571ecd), TOBN(0xeaf53900, 0xed2fe9a9)}}, {{TOBN(0xdec98d4a, 0xc3b81990), TOBN(0x1cb83722, 0x9e0cc8fe), TOBN(0xfe0b0491, 0xd2b427b9), TOBN(0x0f2386ac, 0xe983a66c)}, {TOBN(0x930c4d1e, 0xb3291213), TOBN(0xa2f82b2e, 0x59a62ae4), TOBN(0x77233853, 0xf93e89e3), TOBN(0x7f8063ac, 0x11777c7f)}}, {{TOBN(0xff0eb567, 0x59ad2877), TOBN(0x6f454642, 0x9865c754), TOBN(0xe6fe701a, 0x236e9a84), TOBN(0xc586ef16, 0x06e40fc3)}, {TOBN(0x3f62b6e0, 0x24bafad9), TOBN(0xc8b42bd2, 0x64da906a), TOBN(0xc98e1eb4, 0xda3276a0), TOBN(0x30d0e5fc, 0x06cbf852)}}, {{TOBN(0x1b6b2ae1, 0xe8b4dfd4), TOBN(0xd754d5c7, 0x8301cbac), TOBN(0x66097629, 0x112a39ac), TOBN(0xf86b5999, 0x93ba4ab9)}, {TOBN(0x26c9dea7, 0x99f9d581), TOBN(0x0473b1a8, 0xc2fafeaa), TOBN(0x1469af55, 0x3b2505a5), TOBN(0x227d16d7, 0xd6a43323)}}, {{TOBN(0x3316f73c, 0xad3d97f9), TOBN(0x52bf3bb5, 0x1f137455), TOBN(0x953eafeb, 0x09954e7c), TOBN(0xa721dfed, 0xdd732411)}, {TOBN(0xb4929821, 0x141d4579), TOBN(0x3411321c, 0xaa3bd435), TOBN(0xafb355aa, 0x17fa6015), TOBN(0xb4e7ef4a, 0x18e42f0e)}}, {{TOBN(0x604ac97c, 0x59371000), TOBN(0xe1c48c70, 0x7f759c18), TOBN(0x3f62ecc5, 0xa5db6b65), TOBN(0x0a78b173, 0x38a21495)}, {TOBN(0x6be1819d, 0xbcc8ad94), TOBN(0x70dc04f6, 0xd89c3400), TOBN(0x462557b4, 0xa6b4840a), TOBN(0x544c6ade, 0x60bd21c0)}}, {{TOBN(0x6a00f24e, 0x907a544b), TOBN(0xa7520dcb, 0x313da210), TOBN(0xfe939b75, 0x11e4994b), TOBN(0x918b6ba6, 0xbc275d70)}, {TOBN(0xd3e5e0fc, 0x644be892), TOBN(0x707a9816, 0xfdaf6c42), TOBN(0x60145567, 0xf15c13fe), TOBN(0x4818ebaa, 0xe130a54a)}}, {{TOBN(0x28aad3ad, 0x58d2f767), TOBN(0xdc5267fd, 0xd7e7c773), TOBN(0x4919cc88, 0xc3afcc98), TOBN(0xaa2e6ab0, 0x2db8cd4b)}, {TOBN(0xd46fec04, 0xd0c63eaa), TOBN(0xa1cb92c5, 0x19ffa832), TOBN(0x678dd178, 0xe43a631f), TOBN(0xfb5ae1cd, 0x3dc788b3)}}, {{TOBN(0x68b4fb90, 0x6e77de04), TOBN(0x7992bcf0, 0xf06dbb97), TOBN(0x896e6a13, 0xc417c01d), TOBN(0x8d96332c, 0xb956be01)}, {TOBN(0x902fc93a, 0x413aa2b9), TOBN(0x99a4d915, 0xfc98c8a5), TOBN(0x52c29407, 0x565f1137), TOBN(0x4072690f, 0x21e4f281)}}, {{TOBN(0x36e607cf, 0x02ff6072), TOBN(0xa47d2ca9, 0x8ad98cdc), TOBN(0xbf471d1e, 0xf5f56609), TOBN(0xbcf86623, 0xf264ada0)}, {TOBN(0xb70c0687, 0xaa9e5cb6), TOBN(0xc98124f2, 0x17401c6c), TOBN(0x8189635f, 0xd4a61435), TOBN(0xd28fb8af, 0xa9d98ea6)}}, {{TOBN(0xb9a67c2a, 0x40c251f8), TOBN(0x88cd5d87, 0xa2da44be), TOBN(0x437deb96, 0xe09b5423), TOBN(0x150467db, 0x64287dc1)}, {TOBN(0xe161debb, 0xcdabb839), TOBN(0xa79e9742, 0xf1839a3e), TOBN(0xbb8dd3c2, 0x652d202b), TOBN(0x7b3e67f7, 0xe9f97d96)}}, {{TOBN(0x5aa5d78f, 0xb1cb6ac9), TOBN(0xffa13e8e, 0xca1d0d45), TOBN(0x369295dd, 0x2ba5bf95), TOBN(0xd68bd1f8, 0x39aff05e)}, {TOBN(0xaf0d86f9, 0x26d783f2), TOBN(0x543a59b3, 0xfc3aafc1), TOBN(0x3fcf81d2, 0x7b7da97c), TOBN(0xc990a056, 0xd25dee46)}}, {{TOBN(0x3e6775b8, 0x519cce2c), TOBN(0xfc9af71f, 0xae13d863), TOBN(0x774a4a6f, 0x47c1605c), TOBN(0x46ba4245, 0x2fd205e8)}, {TOBN(0xa06feea4, 0xd3fd524d), TOBN(0x1e724641, 0x6de1acc2), TOBN(0xf53816f1, 0x334e2b42), TOBN(0x49e5918e, 0x922f0024)}}, {{TOBN(0x439530b6, 0x65c7322d), TOBN(0xcf12cc01, 0xb3c1b3fb), TOBN(0xc70b0186, 0x0172f685), TOBN(0xb915ee22, 0x1b58391d)}, {TOBN(0x9afdf03b, 0xa317db24), TOBN(0x87dec659, 0x17b8ffc4), TOBN(0x7f46597b, 0xe4d3d050), TOBN(0x80a1c1ed, 0x006500e7)}}, {{TOBN(0x84902a96, 0x78bf030e), TOBN(0xfb5e9c9a, 0x50560148), TOBN(0x6dae0a92, 0x63362426), TOBN(0xdcaeecf4, 0xa9e30c40)}, {TOBN(0xc0d887bb, 0x518d0c6b), TOBN(0x99181152, 0xcb985b9d), TOBN(0xad186898, 0xef7bc381), TOBN(0x18168ffb, 0x9ee46201)}}, {{TOBN(0x9a04cdaa, 0x2502753c), TOBN(0xbb279e26, 0x51407c41), TOBN(0xeacb03aa, 0xf23564e5), TOBN(0x18336582, 0x71e61016)}, {TOBN(0x8684b8c4, 0xeb809877), TOBN(0xb336e18d, 0xea0e672e), TOBN(0xefb601f0, 0x34ee5867), TOBN(0x2733edbe, 0x1341cfd1)}}, {{TOBN(0xb15e809a, 0x26025c3c), TOBN(0xe6e981a6, 0x9350df88), TOBN(0x92376237, 0x8502fd8e), TOBN(0x4791f216, 0x0c12be9b)}, {TOBN(0xb7256789, 0x25f02425), TOBN(0xec863194, 0x7a974443), TOBN(0x7c0ce882, 0xfb41cc52), TOBN(0xc266ff7e, 0xf25c07f2)}}, {{TOBN(0x3d4da8c3, 0x017025f3), TOBN(0xefcf628c, 0xfb9579b4), TOBN(0x5c4d0016, 0x1f3716ec), TOBN(0x9c27ebc4, 0x6801116e)}, {TOBN(0x5eba0ea1, 0x1da1767e), TOBN(0xfe151452, 0x47004c57), TOBN(0x3ace6df6, 0x8c2373b7), TOBN(0x75c3dffe, 0x5dbc37ac)}}, {{TOBN(0x3dc32a73, 0xddc925fc), TOBN(0xb679c841, 0x2f65ee0b), TOBN(0x715a3295, 0x451cbfeb), TOBN(0xd9889768, 0xf76e9a29)}, {TOBN(0xec20ce7f, 0xb28ad247), TOBN(0xe99146c4, 0x00894d79), TOBN(0x71457d7c, 0x9f5e3ea7), TOBN(0x097b2662, 0x38030031)}}, {{TOBN(0xdb7f6ae6, 0xcf9f82a8), TOBN(0x319decb9, 0x438f473a), TOBN(0xa63ab386, 0x283856c3), TOBN(0x13e3172f, 0xb06a361b)}, {TOBN(0x2959f8dc, 0x7d5a006c), TOBN(0x2dbc27c6, 0x75fba752), TOBN(0xc1227ab2, 0x87c22c9e), TOBN(0x06f61f75, 0x71a268b2)}}, {{TOBN(0x1b6bb971, 0x04779ce2), TOBN(0xaca83812, 0x0aadcb1d), TOBN(0x297ae0bc, 0xaeaab2d5), TOBN(0xa5c14ee7, 0x5bfb9f13)}, {TOBN(0xaa00c583, 0xf17a62c7), TOBN(0x39eb962c, 0x173759f6), TOBN(0x1eeba1d4, 0x86c9a88f), TOBN(0x0ab6c37a, 0xdf016c5e)}}, {{TOBN(0xa2a147db, 0xa28a0749), TOBN(0x246c20d6, 0xee519165), TOBN(0x5068d1b1, 0xd3810715), TOBN(0xb1e7018c, 0x748160b9)}, {TOBN(0x03f5b1fa, 0xf380ff62), TOBN(0xef7fb1dd, 0xf3cb2c1e), TOBN(0xeab539a8, 0xfc91a7da), TOBN(0x83ddb707, 0xf3f9b561)}}, {{TOBN(0xc550e211, 0xfe7df7a4), TOBN(0xa7cd07f2, 0x063f6f40), TOBN(0xb0de3635, 0x2976879c), TOBN(0xb5f83f85, 0xe55741da)}, {TOBN(0x4ea9d25e, 0xf3d8ac3d), TOBN(0x6fe2066f, 0x62819f02), TOBN(0x4ab2b9c2, 0xcef4a564), TOBN(0x1e155d96, 0x5ffa2de3)}}, {{TOBN(0x0eb0a19b, 0xc3a72d00), TOBN(0x4037665b, 0x8513c31b), TOBN(0x2fb2b6bf, 0x04c64637), TOBN(0x45c34d6e, 0x08cdc639)}, {TOBN(0x56f1e10f, 0xf01fd796), TOBN(0x4dfb8101, 0xfe3667b8), TOBN(0xe0eda253, 0x9021d0c0), TOBN(0x7a94e9ff, 0x8a06c6ab)}}, {{TOBN(0x2d3bb0d9, 0xbb9aa882), TOBN(0xea20e4e5, 0xec05fd10), TOBN(0xed7eeb5f, 0x1a1ca64e), TOBN(0x2fa6b43c, 0xc6327cbd)}, {TOBN(0xb577e3cf, 0x3aa91121), TOBN(0x8c6bd5ea, 0x3a34079b), TOBN(0xd7e5ba39, 0x60e02fc0), TOBN(0xf16dd2c3, 0x90141bf8)}}, {{TOBN(0xb57276d9, 0x80101b98), TOBN(0x760883fd, 0xb82f0f66), TOBN(0x89d7de75, 0x4bc3eff3), TOBN(0x03b60643, 0x5dc2ab40)}, {TOBN(0xcd6e53df, 0xe05beeac), TOBN(0xf2f1e862, 0xbc3325cd), TOBN(0xdd0f7921, 0x774f03c3), TOBN(0x97ca7221, 0x4552cc1b)}}, {{TOBN(0x5a0d6afe, 0x1cd19f72), TOBN(0xa20915dc, 0xf183fbeb), TOBN(0x9fda4b40, 0x832c403c), TOBN(0x32738edd, 0xbe425442)}, {TOBN(0x469a1df6, 0xb5eccf1a), TOBN(0x4b5aff42, 0x28bbe1f0), TOBN(0x31359d7f, 0x570dfc93), TOBN(0xa18be235, 0xf0088628)}}, {{TOBN(0xa5b30fba, 0xb00ed3a9), TOBN(0x34c61374, 0x73cdf8be), TOBN(0x2c5c5f46, 0xabc56797), TOBN(0x5cecf93d, 0xb82a8ae2)}, {TOBN(0x7d3dbe41, 0xa968fbf0), TOBN(0xd23d4583, 0x1a5c7f3d), TOBN(0xf28f69a0, 0xc087a9c7), TOBN(0xc2d75471, 0x474471ca)}}, {{TOBN(0x36ec9f4a, 0x4eb732ec), TOBN(0x6c943bbd, 0xb1ca6bed), TOBN(0xd64535e1, 0xf2457892), TOBN(0x8b84a8ea, 0xf7e2ac06)}, {TOBN(0xe0936cd3, 0x2499dd5f), TOBN(0x12053d7e, 0x0ed04e57), TOBN(0x4bdd0076, 0xe4305d9d), TOBN(0x34a527b9, 0x1f67f0a2)}}, {{TOBN(0xe79a4af0, 0x9cec46ea), TOBN(0xb15347a1, 0x658b9bc7), TOBN(0x6bd2796f, 0x35af2f75), TOBN(0xac957990, 0x4051c435)}, {TOBN(0x2669dda3, 0xc33a655d), TOBN(0x5d503c2e, 0x88514aa3), TOBN(0xdfa11337, 0x3753dd41), TOBN(0x3f054673, 0x0b754f78)}}, {{TOBN(0xbf185677, 0x496125bd), TOBN(0xfb0023c8, 0x3775006c), TOBN(0xfa0f072f, 0x3a037899), TOBN(0x4222b6eb, 0x0e4aea57)}, {TOBN(0x3dde5e76, 0x7866d25a), TOBN(0xb6eb04f8, 0x4837aa6f), TOBN(0x5315591a, 0x2cf1cdb8), TOBN(0x6dfb4f41, 0x2d4e683c)}}, {{TOBN(0x7e923ea4, 0x48ee1f3a), TOBN(0x9604d9f7, 0x05a2afd5), TOBN(0xbe1d4a33, 0x40ea4948), TOBN(0x5b45f1f4, 0xb44cbd2f)}, {TOBN(0x5faf8376, 0x4acc757e), TOBN(0xa7cf9ab8, 0x63d68ff7), TOBN(0x8ad62f69, 0xdf0e404b), TOBN(0xd65f33c2, 0x12bdafdf)}}, {{TOBN(0xc365de15, 0xa377b14e), TOBN(0x6bf5463b, 0x8e39f60c), TOBN(0x62030d2d, 0x2ce68148), TOBN(0xd95867ef, 0xe6f843a8)}, {TOBN(0xd39a0244, 0xef5ab017), TOBN(0x0bd2d8c1, 0x4ab55d12), TOBN(0xc9503db3, 0x41639169), TOBN(0x2d4e25b0, 0xf7660c8a)}}, {{TOBN(0x760cb3b5, 0xe224c5d7), TOBN(0xfa3baf8c, 0x68616919), TOBN(0x9fbca113, 0x8d142552), TOBN(0x1ab18bf1, 0x7669ebf5)}, {TOBN(0x55e6f53e, 0x9bdf25dd), TOBN(0x04cc0bf3, 0xcb6cd154), TOBN(0x595bef49, 0x95e89080), TOBN(0xfe9459a8, 0x104a9ac1)}}, {{TOBN(0xad2d89ca, 0xcce9bb32), TOBN(0xddea65e1, 0xf7de8285), TOBN(0x62ed8c35, 0xb351bd4b), TOBN(0x4150ff36, 0x0c0e19a7)}, {TOBN(0x86e3c801, 0x345f4e47), TOBN(0x3bf21f71, 0x203a266c), TOBN(0x7ae110d4, 0x855b1f13), TOBN(0x5d6aaf6a, 0x07262517)}}, {{TOBN(0x1e0f12e1, 0x813d28f1), TOBN(0x6000e11d, 0x7ad7a523), TOBN(0xc7d8deef, 0xc744a17b), TOBN(0x1e990b48, 0x14c05a00)}, {TOBN(0x68fddaee, 0x93e976d5), TOBN(0x696241d1, 0x46610d63), TOBN(0xb204e7c3, 0x893dda88), TOBN(0x8bccfa65, 0x6a3a6946)}}, {{TOBN(0xb59425b4, 0xc5cd1411), TOBN(0x701b4042, 0xff3658b1), TOBN(0xe3e56bca, 0x4784cf93), TOBN(0x27de5f15, 0x8fe68d60)}, {TOBN(0x4ab9cfce, 0xf8d53f19), TOBN(0xddb10311, 0xa40a730d), TOBN(0x6fa73cd1, 0x4eee0a8a), TOBN(0xfd548748, 0x5249719d)}}, {{TOBN(0x49d66316, 0xa8123ef0), TOBN(0x73c32db4, 0xe7f95438), TOBN(0x2e2ed209, 0x0d9e7854), TOBN(0xf98a9329, 0x9d9f0507)}, {TOBN(0xc5d33cf6, 0x0c6aa20a), TOBN(0x9a32ba14, 0x75279bb2), TOBN(0x7e3202cb, 0x774a7307), TOBN(0x64ed4bc4, 0xe8c42dbd)}}, {{TOBN(0xc20f1a06, 0xd4caed0d), TOBN(0xb8021407, 0x171d22b3), TOBN(0xd426ca04, 0xd13268d7), TOBN(0x92377007, 0x25f4d126)}, {TOBN(0x4204cbc3, 0x71f21a85), TOBN(0x18461b7a, 0xf82369ba), TOBN(0xc0c07d31, 0x3fc858f9), TOBN(0x5deb5a50, 0xe2bab569)}}, {{TOBN(0xd5959d46, 0xd5eea89e), TOBN(0xfdff8424, 0x08437f4b), TOBN(0xf21071e4, 0x3cfe254f), TOBN(0x72417696, 0x95468321)}, {TOBN(0x5d8288b9, 0x102cae3e), TOBN(0x2d143e3d, 0xf1965dff), TOBN(0x00c9a376, 0xa078d847), TOBN(0x6fc0da31, 0x26028731)}}, {{TOBN(0xa2baeadf, 0xe45083a2), TOBN(0x66bc7218, 0x5e5b4bcd), TOBN(0x2c826442, 0xd04b8e7f), TOBN(0xc19f5451, 0x6c4b586b)}, {TOBN(0x60182c49, 0x5b7eeed5), TOBN(0xd9954ecd, 0x7aa9dfa1), TOBN(0xa403a8ec, 0xc73884ad), TOBN(0x7fb17de2, 0x9bb39041)}}, {{TOBN(0x694b64c5, 0xabb020e8), TOBN(0x3d18c184, 0x19c4eec7), TOBN(0x9c4673ef, 0x1c4793e5), TOBN(0xc7b8aeb5, 0x056092e6)}, {TOBN(0x3aa1ca43, 0xf0f8c16b), TOBN(0x224ed5ec, 0xd679b2f6), TOBN(0x0d56eeaf, 0x55a205c9), TOBN(0xbfe115ba, 0x4b8e028b)}}, {{TOBN(0x97e60849, 0x3927f4fe), TOBN(0xf91fbf94, 0x759aa7c5), TOBN(0x985af769, 0x6be90a51), TOBN(0xc1277b78, 0x78ccb823)}, {TOBN(0x395b656e, 0xe7a75952), TOBN(0x00df7de0, 0x928da5f5), TOBN(0x09c23175, 0x4ca4454f), TOBN(0x4ec971f4, 0x7aa2d3c1)}}, {{TOBN(0x45c3c507, 0xe75d9ccc), TOBN(0x63b7be8a, 0x3dc90306), TOBN(0x37e09c66, 0x5db44bdc), TOBN(0x50d60da1, 0x6841c6a2)}, {TOBN(0x6f9b65ee, 0x08df1b12), TOBN(0x38734879, 0x7ff089df), TOBN(0x9c331a66, 0x3fe8013d), TOBN(0x017f5de9, 0x5f42fcc8)}}, {{TOBN(0x43077866, 0xe8e57567), TOBN(0xc9f781ce, 0xf9fcdb18), TOBN(0x38131dda, 0x9b12e174), TOBN(0x25d84aa3, 0x8a03752a)}, {TOBN(0x45e09e09, 0x4d0c0ce2), TOBN(0x1564008b, 0x92bebba5), TOBN(0xf7e8ad31, 0xa87284c7), TOBN(0xb7c4b46c, 0x97e7bbaa)}}, {{TOBN(0x3e22a7b3, 0x97acf4ec), TOBN(0x0426c400, 0x5ea8b640), TOBN(0x5e3295a6, 0x4e969285), TOBN(0x22aabc59, 0xa6a45670)}, {TOBN(0xb929714c, 0x5f5942bc), TOBN(0x9a6168bd, 0xfa3182ed), TOBN(0x2216a665, 0x104152ba), TOBN(0x46908d03, 0xb6926368)}}}, {{{TOBN(0xa9f5d874, 0x5a1251fb), TOBN(0x967747a8, 0xc72725c7), TOBN(0x195c33e5, 0x31ffe89e), TOBN(0x609d210f, 0xe964935e)}, {TOBN(0xcafd6ca8, 0x2fe12227), TOBN(0xaf9b5b96, 0x0426469d), TOBN(0x2e9ee04c, 0x5693183c), TOBN(0x1084a333, 0xc8146fef)}}, {{TOBN(0x96649933, 0xaed1d1f7), TOBN(0x566eaff3, 0x50563090), TOBN(0x345057f0, 0xad2e39cf), TOBN(0x148ff65b, 0x1f832124)}, {TOBN(0x042e89d4, 0xcf94cf0d), TOBN(0x319bec84, 0x520c58b3), TOBN(0x2a267626, 0x5361aa0d), TOBN(0xc86fa302, 0x8fbc87ad)}}, {{TOBN(0xfc83d2ab, 0x5c8b06d5), TOBN(0xb1a785a2, 0xfe4eac46), TOBN(0xb99315bc, 0x846f7779), TOBN(0xcf31d816, 0xef9ea505)}, {TOBN(0x2391fe6a, 0x15d7dc85), TOBN(0x2f132b04, 0xb4016b33), TOBN(0x29547fe3, 0x181cb4c7), TOBN(0xdb66d8a6, 0x650155a1)}}, {{TOBN(0x6b66d7e1, 0xadc1696f), TOBN(0x98ebe593, 0x0acd72d0), TOBN(0x65f24550, 0xcc1b7435), TOBN(0xce231393, 0xb4b9a5ec)}, {TOBN(0x234a22d4, 0xdb067df9), TOBN(0x98dda095, 0xcaff9b00), TOBN(0x1bbc75a0, 0x6100c9c1), TOBN(0x1560a9c8, 0x939cf695)}}, {{TOBN(0xcf006d3e, 0x99e0925f), TOBN(0x2dd74a96, 0x6322375a), TOBN(0xc58b446a, 0xb56af5ba), TOBN(0x50292683, 0xe0b9b4f1)}, {TOBN(0xe2c34cb4, 0x1aeaffa3), TOBN(0x8b17203f, 0x9b9587c1), TOBN(0x6d559207, 0xead1350c), TOBN(0x2b66a215, 0xfb7f9604)}}, {{TOBN(0x0850325e, 0xfe51bf74), TOBN(0x9c4f579e, 0x5e460094), TOBN(0x5c87b92a, 0x76da2f25), TOBN(0x889de4e0, 0x6febef33)}, {TOBN(0x6900ec06, 0x646083ce), TOBN(0xbe2a0335, 0xbfe12773), TOBN(0xadd1da35, 0xc5344110), TOBN(0x757568b7, 0xb802cd20)}}, {{TOBN(0x75559779, 0x00f7e6c8), TOBN(0x38e8b94f, 0x0facd2f0), TOBN(0xfea1f3af, 0x03fde375), TOBN(0x5e11a1d8, 0x75881dfc)}, {TOBN(0xb3a6b02e, 0xc1e2f2ef), TOBN(0x193d2bbb, 0xc605a6c5), TOBN(0x325ffeee, 0x339a0b2d), TOBN(0x27b6a724, 0x9e0c8846)}}, {{TOBN(0xe4050f1c, 0xf1c367ca), TOBN(0x9bc85a9b, 0xc90fbc7d), TOBN(0xa373c4a2, 0xe1a11032), TOBN(0xb64232b7, 0xad0393a9)}, {TOBN(0xf5577eb0, 0x167dad29), TOBN(0x1604f301, 0x94b78ab2), TOBN(0x0baa94af, 0xe829348b), TOBN(0x77fbd8dd, 0x41654342)}}, {{TOBN(0xdab50ea5, 0xb964e39a), TOBN(0xd4c29e3c, 0xd0d3c76e), TOBN(0x80dae67c, 0x56d11964), TOBN(0x7307a8bf, 0xe5ffcc2f)}, {TOBN(0x65bbc1aa, 0x91708c3b), TOBN(0xa151e62c, 0x28bf0eeb), TOBN(0x6cb53381, 0x6fa34db7), TOBN(0x5139e05c, 0xa29403a8)}}, {{TOBN(0x6ff651b4, 0x94a7cd2e), TOBN(0x5671ffd1, 0x0699336c), TOBN(0x6f5fd2cc, 0x979a896a), TOBN(0x11e893a8, 0xd8148cef)}, {TOBN(0x988906a1, 0x65cf7b10), TOBN(0x81b67178, 0xc50d8485), TOBN(0x7c0deb35, 0x8a35b3de), TOBN(0x423ac855, 0xc1d29799)}}, {{TOBN(0xaf580d87, 0xdac50b74), TOBN(0x28b2b89f, 0x5869734c), TOBN(0x99a3b936, 0x874e28fb), TOBN(0xbb2c9190, 0x25f3f73a)}, {TOBN(0x199f6918, 0x84a9d5b7), TOBN(0x7ebe2325, 0x7e770374), TOBN(0xf442e107, 0x0738efe2), TOBN(0xcf9f3f56, 0xcf9082d2)}}, {{TOBN(0x719f69e1, 0x09618708), TOBN(0xcc9e8364, 0xc183f9b1), TOBN(0xec203a95, 0x366a21af), TOBN(0x6aec5d6d, 0x068b141f)}, {TOBN(0xee2df78a, 0x994f04e9), TOBN(0xb39ccae8, 0x271245b0), TOBN(0xb875a4a9, 0x97e43f4f), TOBN(0x507dfe11, 0xdb2cea98)}}, {{TOBN(0x4fbf81cb, 0x489b03e9), TOBN(0xdb86ec5b, 0x6ec414fa), TOBN(0xfad444f9, 0xf51b3ae5), TOBN(0xca7d33d6, 0x1914e3fe)}, {TOBN(0xa9c32f5c, 0x0ae6c4d0), TOBN(0xa9ca1d1e, 0x73969568), TOBN(0x98043c31, 0x1aa7467e), TOBN(0xe832e75c, 0xe21b5ac6)}}, {{TOBN(0x314b7aea, 0x5232123d), TOBN(0x08307c8c, 0x65ae86db), TOBN(0x06e7165c, 0xaa4668ed), TOBN(0xb170458b, 0xb4d3ec39)}, {TOBN(0x4d2e3ec6, 0xc19bb986), TOBN(0xc5f34846, 0xae0304ed), TOBN(0x917695a0, 0x6c9f9722), TOBN(0x6c7f7317, 0x4cab1c0a)}}, {{TOBN(0x6295940e, 0x9d6d2e8b), TOBN(0xd318b8c1, 0x549f7c97), TOBN(0x22453204, 0x97713885), TOBN(0x468d834b, 0xa8a440fe)}, {TOBN(0xd81fe5b2, 0xbfba796e), TOBN(0x152364db, 0x6d71f116), TOBN(0xbb8c7c59, 0xb5b66e53), TOBN(0x0b12c61b, 0x2641a192)}}, {{TOBN(0x31f14802, 0xfcf0a7fd), TOBN(0x42fd0789, 0x5488b01e), TOBN(0x71d78d6d, 0x9952b498), TOBN(0x8eb572d9, 0x07ac5201)}, {TOBN(0xe0a2a44c, 0x4d194a88), TOBN(0xd2b63fd9, 0xba017e66), TOBN(0x78efc6c8, 0xf888aefc), TOBN(0xb76f6bda, 0x4a881a11)}}, {{TOBN(0x187f314b, 0xb46c2397), TOBN(0x004cf566, 0x5ded2819), TOBN(0xa9ea5704, 0x38764d34), TOBN(0xbba45217, 0x78084709)}, {TOBN(0x06474571, 0x1171121e), TOBN(0xad7b7eb1, 0xe7c9b671), TOBN(0xdacfbc40, 0x730f7507), TOBN(0x178cd8c6, 0xc7ad7bd1)}}, {{TOBN(0xbf0be101, 0xb2a67238), TOBN(0x3556d367, 0xaf9c14f2), TOBN(0x104b7831, 0xa5662075), TOBN(0x58ca59bb, 0x79d9e60a)}, {TOBN(0x4bc45392, 0xa569a73b), TOBN(0x517a52e8, 0x5698f6c9), TOBN(0x85643da5, 0xaeadd755), TOBN(0x1aed0cd5, 0x2a581b84)}}, {{TOBN(0xb9b4ff84, 0x80af1372), TOBN(0x244c3113, 0xf1ba5d1f), TOBN(0x2a5dacbe, 0xf5f98d31), TOBN(0x2c3323e8, 0x4375bc2a)}, {TOBN(0x17a3ab4a, 0x5594b1dd), TOBN(0xa1928bfb, 0xceb4797e), TOBN(0xe83af245, 0xe4886a19), TOBN(0x8979d546, 0x72b5a74a)}}, {{TOBN(0xa0f726bc, 0x19f9e967), TOBN(0xd9d03152, 0xe8fbbf4e), TOBN(0xcfd6f51d, 0xb7707d40), TOBN(0x633084d9, 0x63f6e6e0)}, {TOBN(0xedcd9cdc, 0x55667eaf), TOBN(0x73b7f92b, 0x2e44d56f), TOBN(0xfb2e39b6, 0x4e962b14), TOBN(0x7d408f6e, 0xf671fcbf)}}, {{TOBN(0xcc634ddc, 0x164a89bb), TOBN(0x74a42bb2, 0x3ef3bd05), TOBN(0x1280dbb2, 0x428decbb), TOBN(0x6103f6bb, 0x402c8596)}, {TOBN(0xfa2bf581, 0x355a5752), TOBN(0x562f96a8, 0x00946674), TOBN(0x4e4ca16d, 0x6da0223b), TOBN(0xfe47819f, 0x28d3aa25)}}, {{TOBN(0x9eea3075, 0xf8dfcf8a), TOBN(0xa284f0aa, 0x95669825), TOBN(0xb3fca250, 0x867d3fd8), TOBN(0x20757b5f, 0x269d691e)}, {TOBN(0xf2c24020, 0x93b8a5de), TOBN(0xd3f93359, 0xebc06da6), TOBN(0x1178293e, 0xb2739c33), TOBN(0xd2a3e770, 0xbcd686e5)}}, {{TOBN(0xa76f49f4, 0xcd941534), TOBN(0x0d37406b, 0xe3c71c0e), TOBN(0x172d9397, 0x3b97f7e3), TOBN(0xec17e239, 0xbd7fd0de)}, {TOBN(0xe3290551, 0x6f496ba2), TOBN(0x6a693172, 0x36ad50e7), TOBN(0xc4e539a2, 0x83e7eff5), TOBN(0x752737e7, 0x18e1b4cf)}}, {{TOBN(0xa2f7932c, 0x68af43ee), TOBN(0x5502468e, 0x703d00bd), TOBN(0xe5dc978f, 0x2fb061f5), TOBN(0xc9a1904a, 0x28c815ad)}, {TOBN(0xd3af538d, 0x470c56a4), TOBN(0x159abc5f, 0x193d8ced), TOBN(0x2a37245f, 0x20108ef3), TOBN(0xfa17081e, 0x223f7178)}}, {{TOBN(0x27b0fb2b, 0x10c8c0f5), TOBN(0x2102c3ea, 0x40650547), TOBN(0x594564df, 0x8ac3bfa7), TOBN(0x98102033, 0x509dad96)}, {TOBN(0x6989643f, 0xf1d18a13), TOBN(0x35eebd91, 0xd7fc5af0), TOBN(0x078d096a, 0xfaeaafd8), TOBN(0xb7a89341, 0xdef3de98)}}, {{TOBN(0x2a206e8d, 0xecf2a73a), TOBN(0x066a6397, 0x8e551994), TOBN(0x3a6a088a, 0xb98d53a2), TOBN(0x0ce7c67c, 0x2d1124aa)}, {TOBN(0x48cec671, 0x759a113c), TOBN(0xe3b373d3, 0x4f6f67fa), TOBN(0x5455d479, 0xfd36727b), TOBN(0xe5a428ee, 0xa13c0d81)}}, {{TOBN(0xb853dbc8, 0x1c86682b), TOBN(0xb78d2727, 0xb8d02b2a), TOBN(0xaaf69bed, 0x8ebc329a), TOBN(0xdb6b40b3, 0x293b2148)}, {TOBN(0xe42ea77d, 0xb8c4961f), TOBN(0xb1a12f7c, 0x20e5e0ab), TOBN(0xa0ec5274, 0x79e8b05e), TOBN(0x68027391, 0xfab60a80)}}, {{TOBN(0x6bfeea5f, 0x16b1bd5e), TOBN(0xf957e420, 0x4de30ad3), TOBN(0xcbaf664e, 0x6a353b9e), TOBN(0x5c873312, 0x26d14feb)}, {TOBN(0x4e87f98c, 0xb65f57cb), TOBN(0xdb60a621, 0x5e0cdd41), TOBN(0x67c16865, 0xa6881440), TOBN(0x1093ef1a, 0x46ab52aa)}}, {{TOBN(0xc095afb5, 0x3f4ece64), TOBN(0x6a6bb02e, 0x7604551a), TOBN(0x55d44b4e, 0x0b26b8cd), TOBN(0xe5f9a999, 0xf971268a)}, {TOBN(0xc08ec425, 0x11a7de84), TOBN(0x83568095, 0xfda469dd), TOBN(0x737bfba1, 0x6c6c90a2), TOBN(0x1cb9c4a0, 0xbe229831)}}, {{TOBN(0x93bccbba, 0xbb2eec64), TOBN(0xa0c23b64, 0xda03adbe), TOBN(0x5f7aa00a, 0xe0e86ac4), TOBN(0x470b941e, 0xfc1401e6)}, {TOBN(0x5ad8d679, 0x9df43574), TOBN(0x4ccfb8a9, 0x0f65d810), TOBN(0x1bce80e3, 0xaa7fbd81), TOBN(0x273291ad, 0x9508d20a)}}, {{TOBN(0xf5c4b46b, 0x42a92806), TOBN(0x810684ec, 0xa86ab44a), TOBN(0x4591640b, 0xca0bc9f8), TOBN(0xb5efcdfc, 0x5c4b6054)}, {TOBN(0x16fc8907, 0x6e9edd12), TOBN(0xe29d0b50, 0xd4d792f9), TOBN(0xa45fd01c, 0x9b03116d), TOBN(0x85035235, 0xc81765a4)}}, {{TOBN(0x1fe2a9b2, 0xb4b4b67c), TOBN(0xc1d10df0, 0xe8020604), TOBN(0x9d64abfc, 0xbc8058d8), TOBN(0x8943b9b2, 0x712a0fbb)}, {TOBN(0x90eed914, 0x3b3def04), TOBN(0x85ab3aa2, 0x4ce775ff), TOBN(0x605fd4ca, 0x7bbc9040), TOBN(0x8b34a564, 0xe2c75dfb)}}, {{TOBN(0x41ffc94a, 0x10358560), TOBN(0x2d8a5072, 0x9e5c28aa), TOBN(0xe915a0fc, 0x4cc7eb15), TOBN(0xe9efab05, 0x8f6d0f5d)}, {TOBN(0xdbab47a9, 0xd19e9b91), TOBN(0x8cfed745, 0x0276154c), TOBN(0x154357ae, 0x2cfede0d), TOBN(0x520630df, 0x19f5a4ef)}}, {{TOBN(0x25759f7c, 0xe382360f), TOBN(0xb6db05c9, 0x88bf5857), TOBN(0x2917d61d, 0x6c58d46c), TOBN(0x14f8e491, 0xfd20cb7a)}, {TOBN(0xb68a727a, 0x11c20340), TOBN(0x0386f86f, 0xaf7ccbb6), TOBN(0x5c8bc6cc, 0xfee09a20), TOBN(0x7d76ff4a, 0xbb7eea35)}}, {{TOBN(0xa7bdebe7, 0xdb15be7a), TOBN(0x67a08054, 0xd89f0302), TOBN(0x56bf0ea9, 0xc1193364), TOBN(0xc8244467, 0x62837ebe)}, {TOBN(0x32bd8e8b, 0x20d841b8), TOBN(0x127a0548, 0xdbb8a54f), TOBN(0x83dd4ca6, 0x63b20236), TOBN(0x87714718, 0x203491fa)}}, {{TOBN(0x4dabcaaa, 0xaa8a5288), TOBN(0x91cc0c8a, 0xaf23a1c9), TOBN(0x34c72c6a, 0x3f220e0c), TOBN(0xbcc20bdf, 0x1232144a)}, {TOBN(0x6e2f42da, 0xa20ede1b), TOBN(0xc441f00c, 0x74a00515), TOBN(0xbf46a5b6, 0x734b8c4b), TOBN(0x57409503, 0x7b56c9a4)}}, {{TOBN(0x9f735261, 0xe4585d45), TOBN(0x9231faed, 0x6734e642), TOBN(0x1158a176, 0xbe70ee6c), TOBN(0x35f1068d, 0x7c3501bf)}, {TOBN(0x6beef900, 0xa2d26115), TOBN(0x649406f2, 0xef0afee3), TOBN(0x3f43a60a, 0xbc2420a1), TOBN(0x509002a7, 0xd5aee4ac)}}, {{TOBN(0xb46836a5, 0x3ff3571b), TOBN(0x24f98b78, 0x837927c1), TOBN(0x6254256a, 0x4533c716), TOBN(0xf27abb0b, 0xd07ee196)}, {TOBN(0xd7cf64fc, 0x5c6d5bfd), TOBN(0x6915c751, 0xf0cd7a77), TOBN(0xd9f59012, 0x8798f534), TOBN(0x772b0da8, 0xf81d8b5f)}}, {{TOBN(0x1244260c, 0x2e03fa69), TOBN(0x36cf0e3a, 0x3be1a374), TOBN(0x6e7c1633, 0xef06b960), TOBN(0xa71a4c55, 0x671f90f6)}, {TOBN(0x7a941251, 0x33c673db), TOBN(0xc0bea510, 0x73e8c131), TOBN(0x61a8a699, 0xd4f6c734), TOBN(0x25e78c88, 0x341ed001)}}, {{TOBN(0x5c18acf8, 0x8e2f7d90), TOBN(0xfdbf33d7, 0x77be32cd), TOBN(0x0a085cd7, 0xd2eb5ee9), TOBN(0x2d702cfb, 0xb3201115)}, {TOBN(0xb6e0ebdb, 0x85c88ce8), TOBN(0x23a3ce3c, 0x1e01d617), TOBN(0x3041618e, 0x567333ac), TOBN(0x9dd0fd8f, 0x157edb6b)}}, {{TOBN(0x27f74702, 0xb57872b8), TOBN(0x2ef26b4f, 0x657d5fe1), TOBN(0x95426f0a, 0x57cf3d40), TOBN(0x847e2ad1, 0x65a6067a)}, {TOBN(0xd474d9a0, 0x09996a74), TOBN(0x16a56acd, 0x2a26115c), TOBN(0x02a615c3, 0xd16f4d43), TOBN(0xcc3fc965, 0xaadb85b7)}}, {{TOBN(0x386bda73, 0xce07d1b0), TOBN(0xd82910c2, 0x58ad4178), TOBN(0x124f82cf, 0xcd2617f4), TOBN(0xcc2f5e8d, 0xef691770)}, {TOBN(0x82702550, 0xb8c30ccc), TOBN(0x7b856aea, 0x1a8e575a), TOBN(0xbb822fef, 0xb1ab9459), TOBN(0x085928bc, 0xec24e38e)}}, {{TOBN(0x5d0402ec, 0xba8f4b4d), TOBN(0xc07cd4ba, 0x00b4d58b), TOBN(0x5d8dffd5, 0x29227e7a), TOBN(0x61d44d0c, 0x31bf386f)}, {TOBN(0xe486dc2b, 0x135e6f4d), TOBN(0x680962eb, 0xe79410ef), TOBN(0xa61bd343, 0xf10088b5), TOBN(0x6aa76076, 0xe2e28686)}}, {{TOBN(0x80463d11, 0x8fb98871), TOBN(0xcb26f5c3, 0xbbc76aff), TOBN(0xd4ab8edd, 0xfbe03614), TOBN(0xc8eb579b, 0xc0cf2dee)}, {TOBN(0xcc004c15, 0xc93bae41), TOBN(0x46fbae5d, 0x3aeca3b2), TOBN(0x671235cf, 0x0f1e9ab1), TOBN(0xadfba934, 0x9ec285c1)}}, {{TOBN(0x88ded013, 0xf216c980), TOBN(0xc8ac4fb8, 0xf79e0bc1), TOBN(0xa29b89c6, 0xfb97a237), TOBN(0xb697b780, 0x9922d8e7)}, {TOBN(0x3142c639, 0xddb945b5), TOBN(0x447b06c7, 0xe094c3a9), TOBN(0xcdcb3642, 0x72266c90), TOBN(0x633aad08, 0xa9385046)}}, {{TOBN(0xa36c936b, 0xb57c6477), TOBN(0x871f8b64, 0xe94dbcc6), TOBN(0x28d0fb62, 0xa591a67b), TOBN(0x9d40e081, 0xc1d926f5)}, {TOBN(0x3111eaf6, 0xf2d84b5a), TOBN(0x228993f9, 0xa565b644), TOBN(0x0ccbf592, 0x2c83188b), TOBN(0xf87b30ab, 0x3df3e197)}}, {{TOBN(0xb8658b31, 0x7642bca8), TOBN(0x1a032d7f, 0x52800f17), TOBN(0x051dcae5, 0x79bf9445), TOBN(0xeba6b8ee, 0x54a2e253)}, {TOBN(0x5c8b9cad, 0xd4485692), TOBN(0x84bda40e, 0x8986e9be), TOBN(0xd16d16a4, 0x2f0db448), TOBN(0x8ec80050, 0xa14d4188)}}, {{TOBN(0xb2b26107, 0x98fa7aaa), TOBN(0x41209ee4, 0xf073aa4e), TOBN(0xf1570359, 0xf2d6b19b), TOBN(0xcbe6868c, 0xfc577caf)}, {TOBN(0x186c4bdc, 0x32c04dd3), TOBN(0xa6c35fae, 0xcfeee397), TOBN(0xb4a1b312, 0xf086c0cf), TOBN(0xe0a5ccc6, 0xd9461fe2)}}, {{TOBN(0xc32278aa, 0x1536189f), TOBN(0x1126c55f, 0xba6df571), TOBN(0x0f71a602, 0xb194560e), TOBN(0x8b2d7405, 0x324bd6e1)}, {TOBN(0x8481939e, 0x3738be71), TOBN(0xb5090b1a, 0x1a4d97a9), TOBN(0x116c65a3, 0xf05ba915), TOBN(0x21863ad3, 0xaae448aa)}}, {{TOBN(0xd24e2679, 0xa7aae5d3), TOBN(0x7076013d, 0x0de5c1c4), TOBN(0x2d50f8ba, 0xbb05b629), TOBN(0x73c1abe2, 0x6e66efbb)}, {TOBN(0xefd4b422, 0xf2488af7), TOBN(0xe4105d02, 0x663ba575), TOBN(0x7eb60a8b, 0x53a69457), TOBN(0x62210008, 0xc945973b)}}, {{TOBN(0xfb255478, 0x77a50ec6), TOBN(0xbf0392f7, 0x0a37a72c), TOBN(0xa0a7a19c, 0x4be18e7a), TOBN(0x90d8ea16, 0x25b1e0af)}, {TOBN(0x7582a293, 0xef953f57), TOBN(0x90a64d05, 0xbdc5465a), TOBN(0xca79c497, 0xe2510717), TOBN(0x560dbb7c, 0x18cb641f)}}, {{TOBN(0x1d8e3286, 0x4b66abfb), TOBN(0xd26f52e5, 0x59030900), TOBN(0x1ee3f643, 0x5584941a), TOBN(0x6d3b3730, 0x569f5958)}, {TOBN(0x9ff2a62f, 0x4789dba5), TOBN(0x91fcb815, 0x72b5c9b7), TOBN(0xf446cb7d, 0x6c8f9a0e), TOBN(0x48f625c1, 0x39b7ecb5)}}, {{TOBN(0xbabae801, 0x1c6219b8), TOBN(0xe7a562d9, 0x28ac2f23), TOBN(0xe1b48732, 0x26e20588), TOBN(0x06ee1cad, 0x775af051)}, {TOBN(0xda29ae43, 0xfaff79f7), TOBN(0xc141a412, 0x652ee9e0), TOBN(0x1e127f6f, 0x195f4bd0), TOBN(0x29c6ab4f, 0x072f34f8)}}, {{TOBN(0x7b7c1477, 0x30448112), TOBN(0x82b51af1, 0xe4a38656), TOBN(0x2bf2028a, 0x2f315010), TOBN(0xc9a4a01f, 0x6ea88cd4)}, {TOBN(0xf63e95d8, 0x257e5818), TOBN(0xdd8efa10, 0xb4519b16), TOBN(0xed8973e0, 0x0da910bf), TOBN(0xed49d077, 0x5c0fe4a9)}}, {{TOBN(0xac3aac5e, 0xb7caee1e), TOBN(0x1033898d, 0xa7f4da57), TOBN(0x42145c0e, 0x5c6669b9), TOBN(0x42daa688, 0xc1aa2aa0)}, {TOBN(0x629cc15c, 0x1a1d885a), TOBN(0x25572ec0, 0xf4b76817), TOBN(0x8312e435, 0x9c8f8f28), TOBN(0x8107f8cd, 0x81965490)}}, {{TOBN(0x516ff3a3, 0x6fa6110c), TOBN(0x74fb1eb1, 0xfb93561f), TOBN(0x6c0c9047, 0x8457522b), TOBN(0xcfd32104, 0x6bb8bdc6)}, {TOBN(0x2d6884a2, 0xcc80ad57), TOBN(0x7c27fc35, 0x86a9b637), TOBN(0x3461baed, 0xadf4e8cd), TOBN(0x1d56251a, 0x617242f0)}}, {{TOBN(0x0b80d209, 0xc955bef4), TOBN(0xdf02cad2, 0x06adb047), TOBN(0xf0d7cb91, 0x5ec74fee), TOBN(0xd2503375, 0x1111ba44)}, {TOBN(0x9671755e, 0xdf53cb36), TOBN(0x54dcb612, 0x3368551b), TOBN(0x66d69aac, 0xc8a025a4), TOBN(0x6be946c6, 0xe77ef445)}}, {{TOBN(0x719946d1, 0xa995e094), TOBN(0x65e848f6, 0xe51e04d8), TOBN(0xe62f3300, 0x6a1e3113), TOBN(0x1541c7c1, 0x501de503)}, {TOBN(0x4daac9fa, 0xf4acfade), TOBN(0x0e585897, 0x44cd0b71), TOBN(0x544fd869, 0x0a51cd77), TOBN(0x60fc20ed, 0x0031016d)}}, {{TOBN(0x58b404ec, 0xa4276867), TOBN(0x46f6c3cc, 0x34f34993), TOBN(0x477ca007, 0xc636e5bd), TOBN(0x8018f5e5, 0x7c458b47)}, {TOBN(0xa1202270, 0xe47b668f), TOBN(0xcef48ccd, 0xee14f203), TOBN(0x23f98bae, 0x62ff9b4d), TOBN(0x55acc035, 0xc589eddd)}}, {{TOBN(0x3fe712af, 0x64db4444), TOBN(0x19e9d634, 0xbecdd480), TOBN(0xe08bc047, 0xa930978a), TOBN(0x2dbf24ec, 0xa1280733)}, {TOBN(0x3c0ae38c, 0x2cd706b2), TOBN(0x5b012a5b, 0x359017b9), TOBN(0x3943c38c, 0x72e0f5ae), TOBN(0x786167ea, 0x57176fa3)}}, {{TOBN(0xe5f9897d, 0x594881dc), TOBN(0x6b5efad8, 0xcfb820c1), TOBN(0xb2179093, 0xd55018de), TOBN(0x39ad7d32, 0x0bac56ce)}, {TOBN(0xb55122e0, 0x2cfc0e81), TOBN(0x117c4661, 0xf6d89daa), TOBN(0x362d01e1, 0xcb64fa09), TOBN(0x6a309b4e, 0x3e9c4ddd)}}, {{TOBN(0xfa979fb7, 0xabea49b1), TOBN(0xb4b1d27d, 0x10e2c6c5), TOBN(0xbd61c2c4, 0x23afde7a), TOBN(0xeb6614f8, 0x9786d358)}, {TOBN(0x4a5d816b, 0x7f6f7459), TOBN(0xe431a44f, 0x09360e7b), TOBN(0x8c27a032, 0xc309914c), TOBN(0xcea5d68a, 0xcaede3d8)}}, {{TOBN(0x3668f665, 0x3a0a3f95), TOBN(0x89369416, 0x7ceba27b), TOBN(0x89981fad, 0xe4728fe9), TOBN(0x7102c8a0, 0x8a093562)}, {TOBN(0xbb80310e, 0x235d21c8), TOBN(0x505e55d1, 0xbefb7f7b), TOBN(0xa0a90811, 0x12958a67), TOBN(0xd67e106a, 0x4d851fef)}}, {{TOBN(0xb84011a9, 0x431dd80e), TOBN(0xeb7c7cca, 0x73306cd9), TOBN(0x20fadd29, 0xd1b3b730), TOBN(0x83858b5b, 0xfe37b3d3)}, {TOBN(0xbf4cd193, 0xb6251d5c), TOBN(0x1cca1fd3, 0x1352d952), TOBN(0xc66157a4, 0x90fbc051), TOBN(0x7990a638, 0x89b98636)}}}, {{{TOBN(0xe5aa692a, 0x87dec0e1), TOBN(0x010ded8d, 0xf7b39d00), TOBN(0x7b1b80c8, 0x54cfa0b5), TOBN(0x66beb876, 0xa0f8ea28)}, {TOBN(0x50d7f531, 0x3476cd0e), TOBN(0xa63d0e65, 0xb08d3949), TOBN(0x1a09eea9, 0x53479fc6), TOBN(0x82ae9891, 0xf499e742)}}, {{TOBN(0xab58b910, 0x5ca7d866), TOBN(0x582967e2, 0x3adb3b34), TOBN(0x89ae4447, 0xcceac0bc), TOBN(0x919c667c, 0x7bf56af5)}, {TOBN(0x9aec17b1, 0x60f5dcd7), TOBN(0xec697b9f, 0xddcaadbc), TOBN(0x0b98f341, 0x463467f5), TOBN(0xb187f1f7, 0xa967132f)}}, {{TOBN(0x90fe7a1d, 0x214aeb18), TOBN(0x1506af3c, 0x741432f7), TOBN(0xbb5565f9, 0xe591a0c4), TOBN(0x10d41a77, 0xb44f1bc3)}, {TOBN(0xa09d65e4, 0xa84bde96), TOBN(0x42f060d8, 0xf20a6a1c), TOBN(0x652a3bfd, 0xf27f9ce7), TOBN(0xb6bdb65c, 0x3b3d739f)}}, {{TOBN(0xeb5ddcb6, 0xec7fae9f), TOBN(0x995f2714, 0xefb66e5a), TOBN(0xdee95d8e, 0x69445d52), TOBN(0x1b6c2d46, 0x09e27620)}, {TOBN(0x32621c31, 0x8129d716), TOBN(0xb03909f1, 0x0958c1aa), TOBN(0x8c468ef9, 0x1af4af63), TOBN(0x162c429f, 0xfba5cdf6)}}, {{TOBN(0x2f682343, 0x753b9371), TOBN(0x29cab45a, 0x5f1f9cd7), TOBN(0x571623ab, 0xb245db96), TOBN(0xc507db09, 0x3fd79999)}, {TOBN(0x4e2ef652, 0xaf036c32), TOBN(0x86f0cc78, 0x05018e5c), TOBN(0xc10a73d4, 0xab8be350), TOBN(0x6519b397, 0x7e826327)}}, {{TOBN(0xe8cb5eef, 0x9c053df7), TOBN(0x8de25b37, 0xb300ea6f), TOBN(0xdb03fa92, 0xc849cffb), TOBN(0x242e43a7, 0xe84169bb)}, {TOBN(0xe4fa51f4, 0xdd6f958e), TOBN(0x6925a77f, 0xf4445a8d), TOBN(0xe6e72a50, 0xe90d8949), TOBN(0xc66648e3, 0x2b1f6390)}}, {{TOBN(0xb2ab1957, 0x173e460c), TOBN(0x1bbbce75, 0x30704590), TOBN(0xc0a90dbd, 0xdb1c7162), TOBN(0x505e399e, 0x15cdd65d)}, {TOBN(0x68434dcb, 0x57797ab7), TOBN(0x60ad35ba, 0x6a2ca8e8), TOBN(0x4bfdb1e0, 0xde3336c1), TOBN(0xbbef99eb, 0xd8b39015)}}, {{TOBN(0x6c3b96f3, 0x1711ebec), TOBN(0x2da40f1f, 0xce98fdc4), TOBN(0xb99774d3, 0x57b4411f), TOBN(0x87c8bdf4, 0x15b65bb6)}, {TOBN(0xda3a89e3, 0xc2eef12d), TOBN(0xde95bb9b, 0x3c7471f3), TOBN(0x600f225b, 0xd812c594), TOBN(0x54907c5d, 0x2b75a56b)}}, {{TOBN(0xa93cc5f0, 0x8db60e35), TOBN(0x743e3cd6, 0xfa833319), TOBN(0x7dad5c41, 0xf81683c9), TOBN(0x70c1e7d9, 0x9c34107e)}, {TOBN(0x0edc4a39, 0xa6be0907), TOBN(0x36d47035, 0x86d0b7d3), TOBN(0x8c76da03, 0x272bfa60), TOBN(0x0b4a07ea, 0x0f08a414)}}, {{TOBN(0x699e4d29, 0x45c1dd53), TOBN(0xcadc5898, 0x231debb5), TOBN(0xdf49fcc7, 0xa77f00e0), TOBN(0x93057bbf, 0xa73e5a0e)}, {TOBN(0x2f8b7ecd, 0x027a4cd1), TOBN(0x114734b3, 0xc614011a), TOBN(0xe7a01db7, 0x67677c68), TOBN(0x89d9be5e, 0x7e273f4f)}}, {{TOBN(0xd225cb2e, 0x089808ef), TOBN(0xf1f7a27d, 0xd59e4107), TOBN(0x53afc761, 0x8211b9c9), TOBN(0x0361bc67, 0xe6819159)}, {TOBN(0x2a865d0b, 0x7f071426), TOBN(0x6a3c1810, 0xe7072567), TOBN(0x3e3bca1e, 0x0d6bcabd), TOBN(0xa1b02bc1, 0x408591bc)}}, {{TOBN(0xe0deee59, 0x31fba239), TOBN(0xf47424d3, 0x98bd91d1), TOBN(0x0f8886f4, 0x071a3c1d), TOBN(0x3f7d41e8, 0xa819233b)}, {TOBN(0x708623c2, 0xcf6eb998), TOBN(0x86bb49af, 0x609a287f), TOBN(0x942bb249, 0x63c90762), TOBN(0x0ef6eea5, 0x55a9654b)}}, {{TOBN(0x5f6d2d72, 0x36f5defe), TOBN(0xfa9922dc, 0x56f99176), TOBN(0x6c8c5ece, 0xf78ce0c7), TOBN(0x7b44589d, 0xbe09b55e)}, {TOBN(0xe11b3bca, 0x9ea83770), TOBN(0xd7fa2c7f, 0x2ab71547), TOBN(0x2a3dd6fa, 0x2a1ddcc0), TOBN(0x09acb430, 0x5a7b7707)}}, {{TOBN(0x4add4a2e, 0x649d4e57), TOBN(0xcd53a2b0, 0x1917526e), TOBN(0xc5262330, 0x20b44ac4), TOBN(0x4028746a, 0xbaa2c31d)}, {TOBN(0x51318390, 0x64291d4c), TOBN(0xbf48f151, 0xee5ad909), TOBN(0xcce57f59, 0x7b185681), TOBN(0x7c3ac1b0, 0x4854d442)}}, {{TOBN(0x65587dc3, 0xc093c171), TOBN(0xae7acb24, 0x24f42b65), TOBN(0x5a338adb, 0x955996cb), TOBN(0xc8e65675, 0x6051f91b)}, {TOBN(0x66711fba, 0x28b8d0b1), TOBN(0x15d74137, 0xb6c10a90), TOBN(0x70cdd7eb, 0x3a232a80), TOBN(0xc9e2f07f, 0x6191ed24)}}, {{TOBN(0xa80d1db6, 0xf79588c0), TOBN(0xfa52fc69, 0xb55768cc), TOBN(0x0b4df1ae, 0x7f54438a), TOBN(0x0cadd1a7, 0xf9b46a4f)}, {TOBN(0xb40ea6b3, 0x1803dd6f), TOBN(0x488e4fa5, 0x55eaae35), TOBN(0x9f047d55, 0x382e4e16), TOBN(0xc9b5b7e0, 0x2f6e0c98)}}, {{TOBN(0x6b1bd2d3, 0x95762649), TOBN(0xa9604ee7, 0xc7aea3f6), TOBN(0x3646ff27, 0x6dc6f896), TOBN(0x9bf0e7f5, 0x2860bad1)}, {TOBN(0x2d92c821, 0x7cb44b92), TOBN(0xa2f5ce63, 0xaea9c182), TOBN(0xd0a2afb1, 0x9154a5fd), TOBN(0x482e474c, 0x95801da6)}}, {{TOBN(0xc19972d0, 0xb611c24b), TOBN(0x1d468e65, 0x60a8f351), TOBN(0xeb758069, 0x7bcf6421), TOBN(0xec9dd0ee, 0x88fbc491)}, {TOBN(0x5b59d2bf, 0x956c2e32), TOBN(0x73dc6864, 0xdcddf94e), TOBN(0xfd5e2321, 0xbcee7665), TOBN(0xa7b4f8ef, 0x5e9a06c4)}}, {{TOBN(0xfba918dd, 0x7280f855), TOBN(0xbbaac260, 0x8baec688), TOBN(0xa3b3f00f, 0x33400f42), TOBN(0x3d2dba29, 0x66f2e6e4)}, {TOBN(0xb6f71a94, 0x98509375), TOBN(0x8f33031f, 0xcea423cc), TOBN(0x009b8dd0, 0x4807e6fb), TOBN(0x5163cfe5, 0x5cdb954c)}}, {{TOBN(0x03cc8f17, 0xcf41c6e8), TOBN(0xf1f03c2a, 0x037b925c), TOBN(0xc39c19cc, 0x66d2427c), TOBN(0x823d24ba, 0x7b6c18e4)}, {TOBN(0x32ef9013, 0x901f0b4f), TOBN(0x684360f1, 0xf8941c2e), TOBN(0x0ebaff52, 0x2c28092e), TOBN(0x7891e4e3, 0x256c932f)}}, {{TOBN(0x51264319, 0xac445e3d), TOBN(0x553432e7, 0x8ea74381), TOBN(0xe6eeaa69, 0x67e9c50a), TOBN(0x27ced284, 0x62e628c7)}, {TOBN(0x3f96d375, 0x7a4afa57), TOBN(0xde0a14c3, 0xe484c150), TOBN(0x364a24eb, 0x38bd9923), TOBN(0x1df18da0, 0xe5177422)}}, {{TOBN(0x174e8f82, 0xd8d38a9b), TOBN(0x2e97c600, 0xe7de1391), TOBN(0xc5709850, 0xa1c175dd), TOBN(0x969041a0, 0x32ae5035)}, {TOBN(0xcbfd533b, 0x76a2086b), TOBN(0xd6bba71b, 0xd7c2e8fe), TOBN(0xb2d58ee6, 0x099dfb67), TOBN(0x3a8b342d, 0x064a85d9)}}, {{TOBN(0x3bc07649, 0x522f9be3), TOBN(0x690c075b, 0xdf1f49a8), TOBN(0x80e1aee8, 0x3854ec42), TOBN(0x2a7dbf44, 0x17689dc7)}, {TOBN(0xc004fc0e, 0x3faf4078), TOBN(0xb2f02e9e, 0xdf11862c), TOBN(0xf10a5e0f, 0xa0a1b7b3), TOBN(0x30aca623, 0x8936ec80)}}, {{TOBN(0xf83cbf05, 0x02f40d9a), TOBN(0x4681c468, 0x2c318a4d), TOBN(0x98575618, 0x0e9c2674), TOBN(0xbe79d046, 0x1847092e)}, {TOBN(0xaf1e480a, 0x78bd01e0), TOBN(0x6dd359e4, 0x72a51db9), TOBN(0x62ce3821, 0xe3afbab6), TOBN(0xc5cee5b6, 0x17733199)}}, {{TOBN(0xe08b30d4, 0x6ffd9fbb), TOBN(0x6e5bc699, 0x36c610b7), TOBN(0xf343cff2, 0x9ce262cf), TOBN(0xca2e4e35, 0x68b914c1)}, {TOBN(0x011d64c0, 0x16de36c5), TOBN(0xe0b10fdd, 0x42e2b829), TOBN(0x78942981, 0x6685aaf8), TOBN(0xe7511708, 0x230ede97)}}, {{TOBN(0x671ed8fc, 0x3b922bf8), TOBN(0xe4d8c0a0, 0x4c29b133), TOBN(0x87eb1239, 0x3b6e99c4), TOBN(0xaff3974c, 0x8793beba)}, {TOBN(0x03749405, 0x2c18df9b), TOBN(0xc5c3a293, 0x91007139), TOBN(0x6a77234f, 0xe37a0b95), TOBN(0x02c29a21, 0xb661c96b)}}, {{TOBN(0xc3aaf1d6, 0x141ecf61), TOBN(0x9195509e, 0x3bb22f53), TOBN(0x29597404, 0x22d51357), TOBN(0x1b083822, 0x537bed60)}, {TOBN(0xcd7d6e35, 0xe07289f0), TOBN(0x1f94c48c, 0x6dd86eff), TOBN(0xc8bb1f82, 0xeb0f9cfa), TOBN(0x9ee0b7e6, 0x1b2eb97d)}}, {{TOBN(0x5a52fe2e, 0x34d74e31), TOBN(0xa352c310, 0x3bf79ab6), TOBN(0x97ff6c5a, 0xabfeeb8f), TOBN(0xbfbe8fef, 0xf5c97305)}, {TOBN(0xd6081ce6, 0xa7904608), TOBN(0x1f812f3a, 0xc4fca249), TOBN(0x9b24bc9a, 0xb9e5e200), TOBN(0x91022c67, 0x38012ee8)}}, {{TOBN(0xe83d9c5d, 0x30a713a1), TOBN(0x4876e3f0, 0x84ef0f93), TOBN(0xc9777029, 0xc1fbf928), TOBN(0xef7a6bb3, 0xbce7d2a4)}, {TOBN(0xb8067228, 0xdfa2a659), TOBN(0xd5cd3398, 0xd877a48f), TOBN(0xbea4fd8f, 0x025d0f3f), TOBN(0xd67d2e35, 0x2eae7c2b)}}, {{TOBN(0x184de7d7, 0xcc5f4394), TOBN(0xb5551b5c, 0x4536e142), TOBN(0x2e89b212, 0xd34aa60a), TOBN(0x14a96fea, 0xf50051d5)}, {TOBN(0x4e21ef74, 0x0d12bb0b), TOBN(0xc522f020, 0x60b9677e), TOBN(0x8b12e467, 0x2df7731d), TOBN(0x39f80382, 0x7b326d31)}}, {{TOBN(0xdfb8630c, 0x39024a94), TOBN(0xaacb96a8, 0x97319452), TOBN(0xd68a3961, 0xeda3867c), TOBN(0x0c58e2b0, 0x77c4ffca)}, {TOBN(0x3d545d63, 0x4da919fa), TOBN(0xef79b69a, 0xf15e2289), TOBN(0x54bc3d3d, 0x808bab10), TOBN(0xc8ab3007, 0x45f82c37)}}, {{TOBN(0xc12738b6, 0x7c4a658a), TOBN(0xb3c47639, 0x40e72182), TOBN(0x3b77be46, 0x8798e44f), TOBN(0xdc047df2, 0x17a7f85f)}, {TOBN(0x2439d4c5, 0x5e59d92d), TOBN(0xcedca475, 0xe8e64d8d), TOBN(0xa724cd0d, 0x87ca9b16), TOBN(0x35e4fd59, 0xa5540dfe)}}, {{TOBN(0xf8c1ff18, 0xe4bcf6b1), TOBN(0x856d6285, 0x295018fa), TOBN(0x433f665c, 0x3263c949), TOBN(0xa6a76dd6, 0xa1f21409)}, {TOBN(0x17d32334, 0xcc7b4f79), TOBN(0xa1d03122, 0x06720e4a), TOBN(0xadb6661d, 0x81d9bed5), TOBN(0xf0d6fb02, 0x11db15d1)}}, {{TOBN(0x7fd11ad5, 0x1fb747d2), TOBN(0xab50f959, 0x3033762b), TOBN(0x2a7e711b, 0xfbefaf5a), TOBN(0xc7393278, 0x3fef2bbf)}, {TOBN(0xe29fa244, 0x0df6f9be), TOBN(0x9092757b, 0x71efd215), TOBN(0xee60e311, 0x4f3d6fd9), TOBN(0x338542d4, 0x0acfb78b)}}, {{TOBN(0x44a23f08, 0x38961a0f), TOBN(0x1426eade, 0x986987ca), TOBN(0x36e6ee2e, 0x4a863cc6), TOBN(0x48059420, 0x628b8b79)}, {TOBN(0x30303ad8, 0x7396e1de), TOBN(0x5c8bdc48, 0x38c5aad1), TOBN(0x3e40e11f, 0x5c8f5066), TOBN(0xabd6e768, 0x8d246bbd)}}, {{TOBN(0x68aa40bb, 0x23330a01), TOBN(0xd23f5ee4, 0xc34eafa0), TOBN(0x3bbee315, 0x5de02c21), TOBN(0x18dd4397, 0xd1d8dd06)}, {TOBN(0x3ba1939a, 0x122d7b44), TOBN(0xe6d3b40a, 0xa33870d6), TOBN(0x8e620f70, 0x1c4fe3f8), TOBN(0xf6bba1a5, 0xd3a50cbf)}}, {{TOBN(0x4a78bde5, 0xcfc0aee0), TOBN(0x847edc46, 0xc08c50bd), TOBN(0xbaa2439c, 0xad63c9b2), TOBN(0xceb4a728, 0x10fc2acb)}, {TOBN(0xa419e40e, 0x26da033d), TOBN(0x6cc3889d, 0x03e02683), TOBN(0x1cd28559, 0xfdccf725), TOBN(0x0fd7e0f1, 0x8d13d208)}}, {{TOBN(0x01b9733b, 0x1f0df9d4), TOBN(0x8cc2c5f3, 0xa2b5e4f3), TOBN(0x43053bfa, 0x3a304fd4), TOBN(0x8e87665c, 0x0a9f1aa7)}, {TOBN(0x087f29ec, 0xd73dc965), TOBN(0x15ace455, 0x3e9023db), TOBN(0x2370e309, 0x2bce28b4), TOBN(0xf9723442, 0xb6b1e84a)}}, {{TOBN(0xbeee662e, 0xb72d9f26), TOBN(0xb19396de, 0xf0e47109), TOBN(0x85b1fa73, 0xe13289d0), TOBN(0x436cf77e, 0x54e58e32)}, {TOBN(0x0ec833b3, 0xe990ef77), TOBN(0x7373e3ed, 0x1b11fc25), TOBN(0xbe0eda87, 0x0fc332ce), TOBN(0xced04970, 0x8d7ea856)}}, {{TOBN(0xf85ff785, 0x7e977ca0), TOBN(0xb66ee8da, 0xdfdd5d2b), TOBN(0xf5e37950, 0x905af461), TOBN(0x587b9090, 0x966d487c)}, {TOBN(0x6a198a1b, 0x32ba0127), TOBN(0xa7720e07, 0x141615ac), TOBN(0xa23f3499, 0x996ef2f2), TOBN(0xef5f64b4, 0x470bcb3d)}}, {{TOBN(0xa526a962, 0x92b8c559), TOBN(0x0c14aac0, 0x69740a0f), TOBN(0x0d41a9e3, 0xa6bdc0a5), TOBN(0x97d52106, 0x9c48aef4)}, {TOBN(0xcf16bd30, 0x3e7c253b), TOBN(0xcc834b1a, 0x47fdedc1), TOBN(0x7362c6e5, 0x373aab2e), TOBN(0x264ed85e, 0xc5f590ff)}}, {{TOBN(0x7a46d9c0, 0x66d41870), TOBN(0xa50c20b1, 0x4787ba09), TOBN(0x185e7e51, 0xe3d44635), TOBN(0xb3b3e080, 0x31e2d8dc)}, {TOBN(0xbed1e558, 0xa179e9d9), TOBN(0x2daa3f79, 0x74a76781), TOBN(0x4372baf2, 0x3a40864f), TOBN(0x46900c54, 0x4fe75cb5)}}, {{TOBN(0xb95f171e, 0xf76765d0), TOBN(0x4ad726d2, 0x95c87502), TOBN(0x2ec769da, 0x4d7c99bd), TOBN(0x5e2ddd19, 0xc36cdfa8)}, {TOBN(0xc22117fc, 0xa93e6dea), TOBN(0xe8a2583b, 0x93771123), TOBN(0xbe2f6089, 0xfa08a3a2), TOBN(0x4809d5ed, 0x8f0e1112)}}, {{TOBN(0x3b414aa3, 0xda7a095e), TOBN(0x9049acf1, 0x26f5aadd), TOBN(0x78d46a4d, 0x6be8b84a), TOBN(0xd66b1963, 0xb732b9b3)}, {TOBN(0x5c2ac2a0, 0xde6e9555), TOBN(0xcf52d098, 0xb5bd8770), TOBN(0x15a15fa6, 0x0fd28921), TOBN(0x56ccb81e, 0x8b27536d)}}, {{TOBN(0x0f0d8ab8, 0x9f4ccbb8), TOBN(0xed5f44d2, 0xdb221729), TOBN(0x43141988, 0x00bed10c), TOBN(0xc94348a4, 0x1d735b8b)}, {TOBN(0x79f3e9c4, 0x29ef8479), TOBN(0x4c13a4e3, 0x614c693f), TOBN(0x32c9af56, 0x8e143a14), TOBN(0xbc517799, 0xe29ac5c4)}}, {{TOBN(0x05e17992, 0x2774856f), TOBN(0x6e52fb05, 0x6c1bf55f), TOBN(0xaeda4225, 0xe4f19e16), TOBN(0x70f4728a, 0xaf5ccb26)}, {TOBN(0x5d2118d1, 0xb2947f22), TOBN(0xc827ea16, 0x281d6fb9), TOBN(0x8412328d, 0x8cf0eabd), TOBN(0x45ee9fb2, 0x03ef9dcf)}}, {{TOBN(0x8e700421, 0xbb937d63), TOBN(0xdf8ff2d5, 0xcc4b37a6), TOBN(0xa4c0d5b2, 0x5ced7b68), TOBN(0x6537c1ef, 0xc7308f59)}, {TOBN(0x25ce6a26, 0x3b37f8e8), TOBN(0x170e9a9b, 0xdeebc6ce), TOBN(0xdd037952, 0x8728d72c), TOBN(0x445b0e55, 0x850154bc)}}, {{TOBN(0x4b7d0e06, 0x83a7337b), TOBN(0x1e3416d4, 0xffecf249), TOBN(0x24840eff, 0x66a2b71f), TOBN(0xd0d9a50a, 0xb37cc26d)}, {TOBN(0xe2198150, 0x6fe28ef7), TOBN(0x3cc5ef16, 0x23324c7f), TOBN(0x220f3455, 0x769b5263), TOBN(0xe2ade2f1, 0xa10bf475)}}, {{TOBN(0x28cd20fa, 0x458d3671), TOBN(0x1549722c, 0x2dc4847b), TOBN(0x6dd01e55, 0x591941e3), TOBN(0x0e6fbcea, 0x27128ccb)}, {TOBN(0xae1a1e6b, 0x3bef0262), TOBN(0xfa8c472c, 0x8f54e103), TOBN(0x7539c0a8, 0x72c052ec), TOBN(0xd7b27369, 0x5a3490e9)}}, {{TOBN(0x143fe1f1, 0x71684349), TOBN(0x36b4722e, 0x32e19b97), TOBN(0xdc059227, 0x90980aff), TOBN(0x175c9c88, 0x9e13d674)}, {TOBN(0xa7de5b22, 0x6e6bfdb1), TOBN(0x5ea5b7b2, 0xbedb4b46), TOBN(0xd5570191, 0xd34a6e44), TOBN(0xfcf60d2e, 0xa24ff7e6)}}, {{TOBN(0x614a392d, 0x677819e1), TOBN(0x7be74c7e, 0xaa5a29e8), TOBN(0xab50fece, 0x63c85f3f), TOBN(0xaca2e2a9, 0x46cab337)}, {TOBN(0x7f700388, 0x122a6fe3), TOBN(0xdb69f703, 0x882a04a8), TOBN(0x9a77935d, 0xcf7aed57), TOBN(0xdf16207c, 0x8d91c86f)}}, {{TOBN(0x2fca49ab, 0x63ed9998), TOBN(0xa3125c44, 0xa77ddf96), TOBN(0x05dd8a86, 0x24344072), TOBN(0xa023dda2, 0xfec3fb56)}, {TOBN(0x421b41fc, 0x0c743032), TOBN(0x4f2120c1, 0x5e438639), TOBN(0xfb7cae51, 0xc83c1b07), TOBN(0xb2370caa, 0xcac2171a)}}, {{TOBN(0x2eb2d962, 0x6cc820fb), TOBN(0x59feee5c, 0xb85a44bf), TOBN(0x94620fca, 0x5b6598f0), TOBN(0x6b922cae, 0x7e314051)}, {TOBN(0xff8745ad, 0x106bed4e), TOBN(0x546e71f5, 0xdfa1e9ab), TOBN(0x935c1e48, 0x1ec29487), TOBN(0x9509216c, 0x4d936530)}}, {{TOBN(0xc7ca3067, 0x85c9a2db), TOBN(0xd6ae5152, 0x6be8606f), TOBN(0x09dbcae6, 0xe14c651d), TOBN(0xc9536e23, 0x9bc32f96)}, {TOBN(0xa90535a9, 0x34521b03), TOBN(0xf39c526c, 0x878756ff), TOBN(0x383172ec, 0x8aedf03c), TOBN(0x20a8075e, 0xefe0c034)}}, {{TOBN(0xf22f9c62, 0x64026422), TOBN(0x8dd10780, 0x24b9d076), TOBN(0x944c742a, 0x3bef2950), TOBN(0x55b9502e, 0x88a2b00b)}, {TOBN(0xa59e14b4, 0x86a09817), TOBN(0xa39dd3ac, 0x47bb4071), TOBN(0x55137f66, 0x3be0592f), TOBN(0x07fcafd4, 0xc9e63f5b)}}, {{TOBN(0x963652ee, 0x346eb226), TOBN(0x7dfab085, 0xec2facb7), TOBN(0x273bf2b8, 0x691add26), TOBN(0x30d74540, 0xf2b46c44)}, {TOBN(0x05e8e73e, 0xf2c2d065), TOBN(0xff9b8a00, 0xd42eeac9), TOBN(0x2fcbd205, 0x97209d22), TOBN(0xeb740ffa, 0xde14ea2c)}}, {{TOBN(0xc71ff913, 0xa8aef518), TOBN(0x7bfc74bb, 0xfff4cfa2), TOBN(0x1716680c, 0xb6b36048), TOBN(0x121b2cce, 0x9ef79af1)}, {TOBN(0xbff3c836, 0xa01eb3d3), TOBN(0x50eb1c6a, 0x5f79077b), TOBN(0xa48c32d6, 0xa004bbcf), TOBN(0x47a59316, 0x7d64f61d)}}, {{TOBN(0x6068147f, 0x93102016), TOBN(0x12c5f654, 0x94d12576), TOBN(0xefb071a7, 0xc9bc6b91), TOBN(0x7c2da0c5, 0x6e23ea95)}, {TOBN(0xf4fd45b6, 0xd4a1dd5d), TOBN(0x3e7ad9b6, 0x9122b13c), TOBN(0x342ca118, 0xe6f57a48), TOBN(0x1c2e94a7, 0x06f8288f)}}, {{TOBN(0x99e68f07, 0x5a97d231), TOBN(0x7c80de97, 0x4d838758), TOBN(0xbce0f5d0, 0x05872727), TOBN(0xbe5d95c2, 0x19c4d016)}, {TOBN(0x921d5cb1, 0x9c2492ee), TOBN(0x42192dc1, 0x404d6fb3), TOBN(0x4c84dcd1, 0x32f988d3), TOBN(0xde26d61f, 0xa17b8e85)}}, {{TOBN(0xc466dcb6, 0x137c7408), TOBN(0x9a38d7b6, 0x36a266da), TOBN(0x7ef5cb06, 0x83bebf1b), TOBN(0xe5cdcbbf, 0x0fd014e3)}, {TOBN(0x30aa376d, 0xf65965a0), TOBN(0x60fe88c2, 0xebb3e95e), TOBN(0x33fd0b61, 0x66ee6f20), TOBN(0x8827dcdb, 0x3f41f0a0)}}, {{TOBN(0xbf8a9d24, 0x0c56c690), TOBN(0x40265dad, 0xddb7641d), TOBN(0x522b05bf, 0x3a6b662b), TOBN(0x466d1dfe, 0xb1478c9b)}, {TOBN(0xaa616962, 0x1484469b), TOBN(0x0db60549, 0x02df8f9f), TOBN(0xc37bca02, 0x3cb8bf51), TOBN(0x5effe346, 0x21371ce8)}}, {{TOBN(0xe8f65264, 0xff112c32), TOBN(0x8a9c736d, 0x7b971fb2), TOBN(0xa4f19470, 0x7b75080d), TOBN(0xfc3f2c5a, 0x8839c59b)}, {TOBN(0x1d6c777e, 0x5aeb49c2), TOBN(0xf3db034d, 0xda1addfe), TOBN(0xd76fee5a, 0x5535affc), TOBN(0x0853ac70, 0xb92251fd)}}, {{TOBN(0x37e3d594, 0x8b2a29d5), TOBN(0x28f1f457, 0x4de00ddb), TOBN(0x8083c1b5, 0xf42c328b), TOBN(0xd8ef1d8f, 0xe493c73b)}, {TOBN(0x96fb6260, 0x41dc61bd), TOBN(0xf74e8a9d, 0x27ee2f8a), TOBN(0x7c605a80, 0x2c946a5d), TOBN(0xeed48d65, 0x3839ccfd)}}, {{TOBN(0x9894344f, 0x3a29467a), TOBN(0xde81e949, 0xc51eba6d), TOBN(0xdaea066b, 0xa5e5c2f2), TOBN(0x3fc8a614, 0x08c8c7b3)}, {TOBN(0x7adff88f, 0x06d0de9f), TOBN(0xbbc11cf5, 0x3b75ce0a), TOBN(0x9fbb7acc, 0xfbbc87d5), TOBN(0xa1458e26, 0x7badfde2)}}}, {{{TOBN(0x1cb43668, 0xe039c256), TOBN(0x5f26fb8b, 0x7c17fd5d), TOBN(0xeee426af, 0x79aa062b), TOBN(0x072002d0, 0xd78fbf04)}, {TOBN(0x4c9ca237, 0xe84fb7e3), TOBN(0xb401d8a1, 0x0c82133d), TOBN(0xaaa52592, 0x6d7e4181), TOBN(0xe9430833, 0x73dbb152)}}, {{TOBN(0xf92dda31, 0xbe24319a), TOBN(0x03f7d28b, 0xe095a8e7), TOBN(0xa52fe840, 0x98782185), TOBN(0x276ddafe, 0x29c24dbc)}, {TOBN(0x80cd5496, 0x1d7a64eb), TOBN(0xe4360889, 0x7f1dbe42), TOBN(0x2f81a877, 0x8438d2d5), TOBN(0x7e4d52a8, 0x85169036)}}, {{TOBN(0x19e3d5b1, 0x1d59715d), TOBN(0xc7eaa762, 0xd788983e), TOBN(0xe5a730b0, 0xabf1f248), TOBN(0xfbab8084, 0xfae3fd83)}, {TOBN(0x65e50d21, 0x53765b2f), TOBN(0xbdd4e083, 0xfa127f3d), TOBN(0x9cf3c074, 0x397b1b10), TOBN(0x59f8090c, 0xb1b59fd3)}}, {{TOBN(0x7b15fd9d, 0x615faa8f), TOBN(0x8fa1eb40, 0x968554ed), TOBN(0x7bb4447e, 0x7aa44882), TOBN(0x2bb2d0d1, 0x029fff32)}, {TOBN(0x075e2a64, 0x6caa6d2f), TOBN(0x8eb879de, 0x22e7351b), TOBN(0xbcd5624e, 0x9a506c62), TOBN(0x218eaef0, 0xa87e24dc)}}, {{TOBN(0x37e56847, 0x44ddfa35), TOBN(0x9ccfc5c5, 0xdab3f747), TOBN(0x9ac1df3f, 0x1ee96cf4), TOBN(0x0c0571a1, 0x3b480b8f)}, {TOBN(0x2fbeb3d5, 0x4b3a7b3c), TOBN(0x35c03669, 0x5dcdbb99), TOBN(0x52a0f5dc, 0xb2415b3a), TOBN(0xd57759b4, 0x4413ed9a)}}, {{TOBN(0x1fe647d8, 0x3d30a2c5), TOBN(0x0857f77e, 0xf78a81dc), TOBN(0x11d5a334, 0x131a4a9b), TOBN(0xc0a94af9, 0x29d393f5)}, {TOBN(0xbc3a5c0b, 0xdaa6ec1a), TOBN(0xba9fe493, 0x88d2d7ed), TOBN(0xbb4335b4, 0xbb614797), TOBN(0x991c4d68, 0x72f83533)}}, {{TOBN(0x53258c28, 0xd2f01cb3), TOBN(0x93d6eaa3, 0xd75db0b1), TOBN(0x419a2b0d, 0xe87d0db4), TOBN(0xa1e48f03, 0xd8fe8493)}, {TOBN(0xf747faf6, 0xc508b23a), TOBN(0xf137571a, 0x35d53549), TOBN(0x9f5e58e2, 0xfcf9b838), TOBN(0xc7186cee, 0xa7fd3cf5)}}, {{TOBN(0x77b868ce, 0xe978a1d3), TOBN(0xe3a68b33, 0x7ab92d04), TOBN(0x51029794, 0x87a5b862), TOBN(0x5f0606c3, 0x3a61d41d)}, {TOBN(0x2814be27, 0x6f9326f1), TOBN(0x2f521c14, 0xc6fe3c2e), TOBN(0x17464d7d, 0xacdf7351), TOBN(0x10f5f9d3, 0x777f7e44)}}, {{TOBN(0xce8e616b, 0x269fb37d), TOBN(0xaaf73804, 0x7de62de5), TOBN(0xaba11175, 0x4fdd4153), TOBN(0x515759ba, 0x3770b49b)}, {TOBN(0x8b09ebf8, 0xaa423a61), TOBN(0x592245a1, 0xcd41fb92), TOBN(0x1cba8ec1, 0x9b4c8936), TOBN(0xa87e91e3, 0xaf36710e)}}, {{TOBN(0x1fd84ce4, 0x3d34a2e3), TOBN(0xee3759ce, 0xb43b5d61), TOBN(0x895bc78c, 0x619186c7), TOBN(0xf19c3809, 0xcbb9725a)}, {TOBN(0xc0be21aa, 0xde744b1f), TOBN(0xa7d222b0, 0x60f8056b), TOBN(0x74be6157, 0xb23efe11), TOBN(0x6fab2b4f, 0x0cd68253)}}, {{TOBN(0xad33ea5f, 0x4bf1d725), TOBN(0x9c1d8ee2, 0x4f6c950f), TOBN(0x544ee78a, 0xa377af06), TOBN(0x54f489bb, 0x94a113e1)}, {TOBN(0x8f11d634, 0x992fb7e8), TOBN(0x0169a7aa, 0xa2a44347), TOBN(0x1d49d4af, 0x95020e00), TOBN(0x95945722, 0xe08e120b)}}, {{TOBN(0xb6e33878, 0xa4d32282), TOBN(0xe36e029d, 0x48020ae7), TOBN(0xe05847fb, 0x37a9b750), TOBN(0xf876812c, 0xb29e3819)}, {TOBN(0x84ad138e, 0xd23a17f0), TOBN(0x6d7b4480, 0xf0b3950e), TOBN(0xdfa8aef4, 0x2fd67ae0), TOBN(0x8d3eea24, 0x52333af6)}}, {{TOBN(0x0d052075, 0xb15d5acc), TOBN(0xc6d9c79f, 0xbd815bc4), TOBN(0x8dcafd88, 0xdfa36cf2), TOBN(0x908ccbe2, 0x38aa9070)}, {TOBN(0x638722c4, 0xba35afce), TOBN(0x5a3da8b0, 0xfd6abf0b), TOBN(0x2dce252c, 0xc9c335c1), TOBN(0x84e7f0de, 0x65aa799b)}}, {{TOBN(0x2101a522, 0xb99a72cb), TOBN(0x06de6e67, 0x87618016), TOBN(0x5ff8c7cd, 0xe6f3653e), TOBN(0x0a821ab5, 0xc7a6754a)}, {TOBN(0x7e3fa52b, 0x7cb0b5a2), TOBN(0xa7fb121c, 0xc9048790), TOBN(0x1a725020, 0x06ce053a), TOBN(0xb490a31f, 0x04e929b0)}}, {{TOBN(0xe17be47d, 0x62dd61ad), TOBN(0x781a961c, 0x6be01371), TOBN(0x1063bfd3, 0xdae3cbba), TOBN(0x35647406, 0x7f73c9ba)}, {TOBN(0xf50e957b, 0x2736a129), TOBN(0xa6313702, 0xed13f256), TOBN(0x9436ee65, 0x3a19fcc5), TOBN(0xcf2bdb29, 0xe7a4c8b6)}}, {{TOBN(0xb06b1244, 0xc5f95cd8), TOBN(0xda8c8af0, 0xf4ab95f4), TOBN(0x1bae59c2, 0xb9e5836d), TOBN(0x07d51e7e, 0x3acffffc)}, {TOBN(0x01e15e6a, 0xc2ccbcda), TOBN(0x3bc1923f, 0x8528c3e0), TOBN(0x43324577, 0xa49fead4), TOBN(0x61a1b884, 0x2aa7a711)}}, {{TOBN(0xf9a86e08, 0x700230ef), TOBN(0x0af585a1, 0xbd19adf8), TOBN(0x7645f361, 0xf55ad8f2), TOBN(0x6e676223, 0x46c3614c)}, {TOBN(0x23cb257c, 0x4e774d3f), TOBN(0x82a38513, 0xac102d1b), TOBN(0x9bcddd88, 0x7b126aa5), TOBN(0xe716998b, 0xeefd3ee4)}}, {{TOBN(0x4239d571, 0xfb167583), TOBN(0xdd011c78, 0xd16c8f8a), TOBN(0x271c2895, 0x69a27519), TOBN(0x9ce0a3b7, 0xd2d64b6a)}, {TOBN(0x8c977289, 0xd5ec6738), TOBN(0xa3b49f9a, 0x8840ef6b), TOBN(0x808c14c9, 0x9a453419), TOBN(0x5c00295b, 0x0cf0a2d5)}}, {{TOBN(0x524414fb, 0x1d4bcc76), TOBN(0xb07691d2, 0x459a88f1), TOBN(0x77f43263, 0xf70d110f), TOBN(0x64ada5e0, 0xb7abf9f3)}, {TOBN(0xafd0f94e, 0x5b544cf5), TOBN(0xb4a13a15, 0xfd2713fe), TOBN(0xb99b7d6e, 0x250c74f4), TOBN(0x097f2f73, 0x20324e45)}}, {{TOBN(0x994b37d8, 0xaffa8208), TOBN(0xc3c31b0b, 0xdc29aafc), TOBN(0x3da74651, 0x7a3a607f), TOBN(0xd8e1b8c1, 0xfe6955d6)}, {TOBN(0x716e1815, 0xc8418682), TOBN(0x541d487f, 0x7dc91d97), TOBN(0x48a04669, 0xc6996982), TOBN(0xf39cab15, 0x83a6502e)}}, {{TOBN(0x025801a0, 0xe68db055), TOBN(0xf3569758, 0xba3338d5), TOBN(0xb0c8c0aa, 0xee2afa84), TOBN(0x4f6985d3, 0xfb6562d1)}, {TOBN(0x351f1f15, 0x132ed17a), TOBN(0x510ed0b4, 0xc04365fe), TOBN(0xa3f98138, 0xe5b1f066), TOBN(0xbc9d95d6, 0x32df03dc)}}, {{TOBN(0xa83ccf6e, 0x19abd09e), TOBN(0x0b4097c1, 0x4ff17edb), TOBN(0x58a5c478, 0xd64a06ce), TOBN(0x2ddcc3fd, 0x544a58fd)}, {TOBN(0xd449503d, 0x9e8153b8), TOBN(0x3324fd02, 0x7774179b), TOBN(0xaf5d47c8, 0xdbd9120c), TOBN(0xeb860162, 0x34fa94db)}}, {{TOBN(0x5817bdd1, 0x972f07f4), TOBN(0xe5579e2e, 0xd27bbceb), TOBN(0x86847a1f, 0x5f11e5a6), TOBN(0xb39ed255, 0x7c3cf048)}, {TOBN(0xe1076417, 0xa2f62e55), TOBN(0x6b9ab38f, 0x1bcf82a2), TOBN(0x4bb7c319, 0x7aeb29f9), TOBN(0xf6d17da3, 0x17227a46)}}, {{TOBN(0xab53ddbd, 0x0f968c00), TOBN(0xa03da7ec, 0x000c880b), TOBN(0x7b239624, 0x6a9ad24d), TOBN(0x612c0401, 0x01ec60d0)}, {TOBN(0x70d10493, 0x109f5df1), TOBN(0xfbda4030, 0x80af7550), TOBN(0x30b93f95, 0xc6b9a9b3), TOBN(0x0c74ec71, 0x007d9418)}}, {{TOBN(0x94175564, 0x6edb951f), TOBN(0x5f4a9d78, 0x7f22c282), TOBN(0xb7870895, 0xb38d1196), TOBN(0xbc593df3, 0xa228ce7c)}, {TOBN(0xc78c5bd4, 0x6af3641a), TOBN(0x7802200b, 0x3d9b3dcc), TOBN(0x0dc73f32, 0x8be33304), TOBN(0x847ed87d, 0x61ffb79a)}}, {{TOBN(0xf85c974e, 0x6d671192), TOBN(0x1e14100a, 0xde16f60f), TOBN(0x45cb0d5a, 0x95c38797), TOBN(0x18923bba, 0x9b022da4)}, {TOBN(0xef2be899, 0xbbe7e86e), TOBN(0x4a1510ee, 0x216067bf), TOBN(0xd98c8154, 0x84d5ce3e), TOBN(0x1af777f0, 0xf92a2b90)}}, {{TOBN(0x9fbcb400, 0x4ef65724), TOBN(0x3e04a4c9, 0x3c0ca6fe), TOBN(0xfb3e2cb5, 0x55002994), TOBN(0x1f3a93c5, 0x5363ecab)}, {TOBN(0x1fe00efe, 0x3923555b), TOBN(0x744bedd9, 0x1e1751ea), TOBN(0x3fb2db59, 0x6ab69357), TOBN(0x8dbd7365, 0xf5e6618b)}}, {{TOBN(0x99d53099, 0xdf1ea40e), TOBN(0xb3f24a0b, 0x57d61e64), TOBN(0xd088a198, 0x596eb812), TOBN(0x22c8361b, 0x5762940b)}, {TOBN(0x66f01f97, 0xf9c0d95c), TOBN(0x88461172, 0x8e43cdae), TOBN(0x11599a7f, 0xb72b15c3), TOBN(0x135a7536, 0x420d95cc)}}, {{TOBN(0x2dcdf0f7, 0x5f7ae2f6), TOBN(0x15fc6e1d, 0xd7fa6da2), TOBN(0x81ca829a, 0xd1d441b6), TOBN(0x84c10cf8, 0x04a106b6)}, {TOBN(0xa9b26c95, 0xa73fbbd0), TOBN(0x7f24e0cb, 0x4d8f6ee8), TOBN(0x48b45937, 0x1e25a043), TOBN(0xf8a74fca, 0x036f3dfe)}}, {{TOBN(0x1ed46585, 0xc9f84296), TOBN(0x7fbaa8fb, 0x3bc278b0), TOBN(0xa8e96cd4, 0x6c4fcbd0), TOBN(0x940a1202, 0x73b60a5f)}, {TOBN(0x34aae120, 0x55a4aec8), TOBN(0x550e9a74, 0xdbd742f0), TOBN(0x794456d7, 0x228c68ab), TOBN(0x492f8868, 0xa4e25ec6)}}, {{TOBN(0x682915ad, 0xb2d8f398), TOBN(0xf13b51cc, 0x5b84c953), TOBN(0xcda90ab8, 0x5bb917d6), TOBN(0x4b615560, 0x4ea3dee1)}, {TOBN(0x578b4e85, 0x0a52c1c8), TOBN(0xeab1a695, 0x20b75fc4), TOBN(0x60c14f3c, 0xaa0bb3c6), TOBN(0x220f448a, 0xb8216094)}}, {{TOBN(0x4fe7ee31, 0xb0e63d34), TOBN(0xf4600572, 0xa9e54fab), TOBN(0xc0493334, 0xd5e7b5a4), TOBN(0x8589fb92, 0x06d54831)}, {TOBN(0xaa70f5cc, 0x6583553a), TOBN(0x0879094a, 0xe25649e5), TOBN(0xcc904507, 0x10044652), TOBN(0xebb0696d, 0x02541c4f)}}, {{TOBN(0x5a171fde, 0xb9718710), TOBN(0x38f1bed8, 0xf374a9f5), TOBN(0xc8c582e1, 0xba39bdc1), TOBN(0xfc457b0a, 0x908cc0ce)}, {TOBN(0x9a187fd4, 0x883841e2), TOBN(0x8ec25b39, 0x38725381), TOBN(0x2553ed05, 0x96f84395), TOBN(0x095c7661, 0x6f6c6897)}}, {{TOBN(0x917ac85c, 0x4bdc5610), TOBN(0xb2885fe4, 0x179eb301), TOBN(0x5fc65547, 0x8b78bdcc), TOBN(0x4a9fc893, 0xe59e4699)}, {TOBN(0xbb7ff0cd, 0x3ce299af), TOBN(0x195be9b3, 0xadf38b20), TOBN(0x6a929c87, 0xd38ddb8f), TOBN(0x55fcc99c, 0xb21a51b9)}}, {{TOBN(0x2b695b4c, 0x721a4593), TOBN(0xed1e9a15, 0x768eaac2), TOBN(0xfb63d71c, 0x7489f914), TOBN(0xf98ba31c, 0x78118910)}, {TOBN(0x80291373, 0x9b128eb4), TOBN(0x7801214e, 0xd448af4a), TOBN(0xdbd2e22b, 0x55418dd3), TOBN(0xeffb3c0d, 0xd3998242)}}, {{TOBN(0xdfa6077c, 0xc7bf3827), TOBN(0xf2165bcb, 0x47f8238f), TOBN(0xfe37cf68, 0x8564d554), TOBN(0xe5f825c4, 0x0a81fb98)}, {TOBN(0x43cc4f67, 0xffed4d6f), TOBN(0xbc609578, 0xb50a34b0), TOBN(0x8aa8fcf9, 0x5041faf1), TOBN(0x5659f053, 0x651773b6)}}, {{TOBN(0xe87582c3, 0x6044d63b), TOBN(0xa6089409, 0x0cdb0ca0), TOBN(0x8c993e0f, 0xbfb2bcf6), TOBN(0xfc64a719, 0x45985cfc)}, {TOBN(0x15c4da80, 0x83dbedba), TOBN(0x804ae112, 0x2be67df7), TOBN(0xda4c9658, 0xa23defde), TOBN(0x12002ddd, 0x5156e0d3)}}, {{TOBN(0xe68eae89, 0x5dd21b96), TOBN(0x8b99f28b, 0xcf44624d), TOBN(0x0ae00808, 0x1ec8897a), TOBN(0xdd0a9303, 0x6712f76e)}, {TOBN(0x96237522, 0x4e233de4), TOBN(0x192445b1, 0x2b36a8a5), TOBN(0xabf9ff74, 0x023993d9), TOBN(0x21f37bf4, 0x2aad4a8f)}}, {{TOBN(0x340a4349, 0xf8bd2bbd), TOBN(0x1d902cd9, 0x4868195d), TOBN(0x3d27bbf1, 0xe5fdb6f1), TOBN(0x7a5ab088, 0x124f9f1c)}, {TOBN(0xc466ab06, 0xf7a09e03), TOBN(0x2f8a1977, 0x31f2c123), TOBN(0xda355dc7, 0x041b6657), TOBN(0xcb840d12, 0x8ece2a7c)}}, {{TOBN(0xb600ad9f, 0x7db32675), TOBN(0x78fea133, 0x07a06f1b), TOBN(0x5d032269, 0xb31f6094), TOBN(0x07753ef5, 0x83ec37aa)}, {TOBN(0x03485aed, 0x9c0bea78), TOBN(0x41bb3989, 0xbc3f4524), TOBN(0x09403761, 0x697f726d), TOBN(0x6109beb3, 0xdf394820)}}, {{TOBN(0x804111ea, 0x3b6d1145), TOBN(0xb6271ea9, 0xa8582654), TOBN(0x619615e6, 0x24e66562), TOBN(0xa2554945, 0xd7b6ad9c)}, {TOBN(0xd9c4985e, 0x99bfe35f), TOBN(0x9770ccc0, 0x7b51cdf6), TOBN(0x7c327013, 0x92881832), TOBN(0x8777d45f, 0x286b26d1)}}, {{TOBN(0x9bbeda22, 0xd847999d), TOBN(0x03aa33b6, 0xc3525d32), TOBN(0x4b7b96d4, 0x28a959a1), TOBN(0xbb3786e5, 0x31e5d234)}, {TOBN(0xaeb5d3ce, 0x6961f247), TOBN(0x20aa85af, 0x02f93d3f), TOBN(0x9cd1ad3d, 0xd7a7ae4f), TOBN(0xbf6688f0, 0x781adaa8)}}, {{TOBN(0xb1b40e86, 0x7469cead), TOBN(0x1904c524, 0x309fca48), TOBN(0x9b7312af, 0x4b54bbc7), TOBN(0xbe24bf8f, 0x593affa2)}, {TOBN(0xbe5e0790, 0xbd98764b), TOBN(0xa0f45f17, 0xa26e299e), TOBN(0x4af0d2c2, 0x6b8fe4c7), TOBN(0xef170db1, 0x8ae8a3e6)}}, {{TOBN(0x0e8d61a0, 0x29e0ccc1), TOBN(0xcd53e87e, 0x60ad36ca), TOBN(0x328c6623, 0xc8173822), TOBN(0x7ee1767d, 0xa496be55)}, {TOBN(0x89f13259, 0x648945af), TOBN(0x9e45a5fd, 0x25c8009c), TOBN(0xaf2febd9, 0x1f61ab8c), TOBN(0x43f6bc86, 0x8a275385)}}, {{TOBN(0x87792348, 0xf2142e79), TOBN(0x17d89259, 0xc6e6238a), TOBN(0x7536d2f6, 0x4a839d9b), TOBN(0x1f428fce, 0x76a1fbdc)}, {TOBN(0x1c109601, 0x0db06dfe), TOBN(0xbfc16bc1, 0x50a3a3cc), TOBN(0xf9cbd9ec, 0x9b30f41b), TOBN(0x5b5da0d6, 0x00138cce)}}, {{TOBN(0xec1d0a48, 0x56ef96a7), TOBN(0xb47eb848, 0x982bf842), TOBN(0x66deae32, 0xec3f700d), TOBN(0x4e43c42c, 0xaa1181e0)}, {TOBN(0xa1d72a31, 0xd1a4aa2a), TOBN(0x440d4668, 0xc004f3ce), TOBN(0x0d6a2d3b, 0x45fe8a7a), TOBN(0x820e52e2, 0xfb128365)}}, {{TOBN(0x29ac5fcf, 0x25e51b09), TOBN(0x180cd2bf, 0x2023d159), TOBN(0xa9892171, 0xa1ebf90e), TOBN(0xf97c4c87, 0x7c132181)}, {TOBN(0x9f1dc724, 0xc03dbb7e), TOBN(0xae043765, 0x018cbbe4), TOBN(0xfb0b2a36, 0x0767d153), TOBN(0xa8e2f4d6, 0x249cbaeb)}}, {{TOBN(0x172a5247, 0xd95ea168), TOBN(0x1758fada, 0x2970764a), TOBN(0xac803a51, 0x1d978169), TOBN(0x299cfe2e, 0xde77e01b)}, {TOBN(0x652a1e17, 0xb0a98927), TOBN(0x2e26e1d1, 0x20014495), TOBN(0x7ae0af9f, 0x7175b56a), TOBN(0xc2e22a80, 0xd64b9f95)}}, {{TOBN(0x4d0ff9fb, 0xd90a060a), TOBN(0x496a27db, 0xbaf38085), TOBN(0x32305401, 0xda776bcf), TOBN(0xb8cdcef6, 0x725f209e)}, {TOBN(0x61ba0f37, 0x436a0bba), TOBN(0x263fa108, 0x76860049), TOBN(0x92beb98e, 0xda3542cf), TOBN(0xa2d4d14a, 0xd5849538)}}, {{TOBN(0x989b9d68, 0x12e9a1bc), TOBN(0x61d9075c, 0x5f6e3268), TOBN(0x352c6aa9, 0x99ace638), TOBN(0xde4e4a55, 0x920f43ff)}, {TOBN(0xe5e4144a, 0xd673c017), TOBN(0x667417ae, 0x6f6e05ea), TOBN(0x613416ae, 0xdcd1bd56), TOBN(0x5eb36201, 0x86693711)}}, {{TOBN(0x2d7bc504, 0x3a1aa914), TOBN(0x175a1299, 0x76dc5975), TOBN(0xe900e0f2, 0x3fc8125c), TOBN(0x569ef68c, 0x11198875)}, {TOBN(0x9012db63, 0x63a113b4), TOBN(0xe3bd3f56, 0x98835766), TOBN(0xa5c94a52, 0x76412dea), TOBN(0xad9e2a09, 0xaa735e5c)}}, {{TOBN(0x405a984c, 0x508b65e9), TOBN(0xbde4a1d1, 0x6df1a0d1), TOBN(0x1a9433a1, 0xdfba80da), TOBN(0xe9192ff9, 0x9440ad2e)}, {TOBN(0x9f649696, 0x5099fe92), TOBN(0x25ddb65c, 0x0b27a54a), TOBN(0x178279dd, 0xc590da61), TOBN(0x5479a999, 0xfbde681a)}}, {{TOBN(0xd0e84e05, 0x013fe162), TOBN(0xbe11dc92, 0x632d471b), TOBN(0xdf0b0c45, 0xfc0e089f), TOBN(0x04fb15b0, 0x4c144025)}, {TOBN(0xa61d5fc2, 0x13c99927), TOBN(0xa033e9e0, 0x3de2eb35), TOBN(0xf8185d5c, 0xb8dacbb4), TOBN(0x9a88e265, 0x8644549d)}}, {{TOBN(0xf717af62, 0x54671ff6), TOBN(0x4bd4241b, 0x5fa58603), TOBN(0x06fba40b, 0xe67773c0), TOBN(0xc1d933d2, 0x6a2847e9)}, {TOBN(0xf4f5acf3, 0x689e2c70), TOBN(0x92aab0e7, 0x46bafd31), TOBN(0x798d76aa, 0x3473f6e5), TOBN(0xcc6641db, 0x93141934)}}, {{TOBN(0xcae27757, 0xd31e535e), TOBN(0x04cc43b6, 0x87c2ee11), TOBN(0x8d1f9675, 0x2e029ffa), TOBN(0xc2150672, 0xe4cc7a2c)}, {TOBN(0x3b03c1e0, 0x8d68b013), TOBN(0xa9d6816f, 0xedf298f3), TOBN(0x1bfbb529, 0xa2804464), TOBN(0x95a52fae, 0x5db22125)}}, {{TOBN(0x55b32160, 0x0e1cb64e), TOBN(0x004828f6, 0x7e7fc9fe), TOBN(0x13394b82, 0x1bb0fb93), TOBN(0xb6293a2d, 0x35f1a920)}, {TOBN(0xde35ef21, 0xd145d2d9), TOBN(0xbe6225b3, 0xbb8fa603), TOBN(0x00fc8f6b, 0x32cf252d), TOBN(0xa28e52e6, 0x117cf8c2)}}, {{TOBN(0x9d1dc89b, 0x4c371e6d), TOBN(0xcebe0675, 0x36ef0f28), TOBN(0x5de05d09, 0xa4292f81), TOBN(0xa8303593, 0x353e3083)}, {TOBN(0xa1715b0a, 0x7e37a9bb), TOBN(0x8c56f61e, 0x2b8faec3), TOBN(0x52507431, 0x33c9b102), TOBN(0x0130cefc, 0xa44431f0)}}, {{TOBN(0x56039fa0, 0xbd865cfb), TOBN(0x4b03e578, 0xbc5f1dd7), TOBN(0x40edf2e4, 0xbabe7224), TOBN(0xc752496d, 0x3a1988f6)}, {TOBN(0xd1572d3b, 0x564beb6b), TOBN(0x0db1d110, 0x39a1c608), TOBN(0x568d1934, 0x16f60126), TOBN(0x05ae9668, 0xf354af33)}}, {{TOBN(0x19de6d37, 0xc92544f2), TOBN(0xcc084353, 0xa35837d5), TOBN(0xcbb6869c, 0x1a514ece), TOBN(0xb633e728, 0x2e1d1066)}, {TOBN(0xf15dd69f, 0x936c581c), TOBN(0x96e7b8ce, 0x7439c4f9), TOBN(0x5e676f48, 0x2e448a5b), TOBN(0xb2ca7d5b, 0xfd916bbb)}}, {{TOBN(0xd55a2541, 0xf5024025), TOBN(0x47bc5769, 0xe4c2d937), TOBN(0x7d31b92a, 0x0362189f), TOBN(0x83f3086e, 0xef7816f9)}, {TOBN(0xf9f46d94, 0xb587579a), TOBN(0xec2d22d8, 0x30e76c5f), TOBN(0x27d57461, 0xb000ffcf), TOBN(0xbb7e65f9, 0x364ffc2c)}}, {{TOBN(0x7c7c9477, 0x6652a220), TOBN(0x61618f89, 0xd696c981), TOBN(0x5021701d, 0x89effff3), TOBN(0xf2c8ff8e, 0x7c314163)}, {TOBN(0x2da413ad, 0x8efb4d3e), TOBN(0x937b5adf, 0xce176d95), TOBN(0x22867d34, 0x2a67d51c), TOBN(0x262b9b10, 0x18eb3ac9)}}, {{TOBN(0x4e314fe4, 0xc43ff28b), TOBN(0x76476627, 0x6a664e7a), TOBN(0x3e90e40b, 0xb7a565c2), TOBN(0x8588993a, 0xc1acf831)}, {TOBN(0xd7b501d6, 0x8f938829), TOBN(0x996627ee, 0x3edd7d4c), TOBN(0x37d44a62, 0x90cd34c7), TOBN(0xa8327499, 0xf3833e8d)}}, {{TOBN(0x2e18917d, 0x4bf50353), TOBN(0x85dd726b, 0x556765fb), TOBN(0x54fe65d6, 0x93d5ab66), TOBN(0x3ddbaced, 0x915c25fe)}, {TOBN(0xa799d9a4, 0x12f22e85), TOBN(0xe2a24867, 0x6d06f6bc), TOBN(0xf4f1ee56, 0x43ca1637), TOBN(0xfda2828b, 0x61ece30a)}}, {{TOBN(0x758c1a3e, 0xa2dee7a6), TOBN(0xdcde2f3c, 0x734b2284), TOBN(0xaba445d2, 0x4eaba6ad), TOBN(0x35aaf668, 0x76cee0a7)}, {TOBN(0x7e0b04a9, 0xe5aa049a), TOBN(0xe74083ad, 0x91103e84), TOBN(0xbeb183ce, 0x40afecc3), TOBN(0x6b89de9f, 0xea043f7a)}}}, {{{TOBN(0x0e299d23, 0xfe67ba66), TOBN(0x91450760, 0x93cf2f34), TOBN(0xf45b5ea9, 0x97fcf913), TOBN(0x5be00843, 0x8bd7ddda)}, {TOBN(0x358c3e05, 0xd53ff04d), TOBN(0xbf7ccdc3, 0x5de91ef7), TOBN(0xad684dbf, 0xb69ec1a0), TOBN(0x367e7cf2, 0x801fd997)}}, {{TOBN(0x0ca1f3b7, 0xb0dc8595), TOBN(0x27de4608, 0x9f1d9f2e), TOBN(0x1af3bf39, 0xbadd82a7), TOBN(0x79356a79, 0x65862448)}, {TOBN(0xc0602345, 0xf5f9a052), TOBN(0x1a8b0f89, 0x139a42f9), TOBN(0xb53eee42, 0x844d40fc), TOBN(0x93b0bfe5, 0x4e5b6368)}}, {{TOBN(0x5434dd02, 0xc024789c), TOBN(0x90dca9ea, 0x41b57bfc), TOBN(0x8aa898e2, 0x243398df), TOBN(0xf607c834, 0x894a94bb)}, {TOBN(0xbb07be97, 0xc2c99b76), TOBN(0x6576ba67, 0x18c29302), TOBN(0x3d79efcc, 0xe703a88c), TOBN(0xf259ced7, 0xb6a0d106)}}, {{TOBN(0x0f893a5d, 0xc8de610b), TOBN(0xe8c515fb, 0x67e223ce), TOBN(0x7774bfa6, 0x4ead6dc5), TOBN(0x89d20f95, 0x925c728f)}, {TOBN(0x7a1e0966, 0x098583ce), TOBN(0xa2eedb94, 0x93f2a7d7), TOBN(0x1b282097, 0x4c304d4a), TOBN(0x0842e3da, 0xc077282d)}}, {{TOBN(0xe4d972a3, 0x3b9e2d7b), TOBN(0x7cc60b27, 0xc48218ff), TOBN(0x8fc70838, 0x84149d91), TOBN(0x5c04346f, 0x2f461ecc)}, {TOBN(0xebe9fdf2, 0x614650a9), TOBN(0x5e35b537, 0xc1f666ac), TOBN(0x645613d1, 0x88babc83), TOBN(0x88cace3a, 0xc5e1c93e)}}, {{TOBN(0x209ca375, 0x3de92e23), TOBN(0xccb03cc8, 0x5fbbb6e3), TOBN(0xccb90f03, 0xd7b1487e), TOBN(0xfa9c2a38, 0xc710941f)}, {TOBN(0x756c3823, 0x6724ceed), TOBN(0x3a902258, 0x192d0323), TOBN(0xb150e519, 0xea5e038e), TOBN(0xdcba2865, 0xc7427591)}}, {{TOBN(0xe549237f, 0x78890732), TOBN(0xc443bef9, 0x53fcb4d9), TOBN(0x9884d8a6, 0xeb3480d6), TOBN(0x8a35b6a1, 0x3048b186)}, {TOBN(0xb4e44716, 0x65e9a90a), TOBN(0x45bf380d, 0x653006c0), TOBN(0x8f3f820d, 0x4fe9ae3b), TOBN(0x244a35a0, 0x979a3b71)}}, {{TOBN(0xa1010e9d, 0x74cd06ff), TOBN(0x9c17c7df, 0xaca3eeac), TOBN(0x74c86cd3, 0x8063aa2b), TOBN(0x8595c4b3, 0x734614ff)}, {TOBN(0xa3de00ca, 0x990f62cc), TOBN(0xd9bed213, 0xca0c3be5), TOBN(0x7886078a, 0xdf8ce9f5), TOBN(0xddb27ce3, 0x5cd44444)}}, {{TOBN(0xed374a66, 0x58926ddd), TOBN(0x138b2d49, 0x908015b8), TOBN(0x886c6579, 0xde1f7ab8), TOBN(0x888b9aa0, 0xc3020b7a)}, {TOBN(0xd3ec034e, 0x3a96e355), TOBN(0xba65b0b8, 0xf30fbe9a), TOBN(0x064c8e50, 0xff21367a), TOBN(0x1f508ea4, 0x0b04b46e)}}, {{TOBN(0x98561a49, 0x747c866c), TOBN(0xbbb1e5fe, 0x0518a062), TOBN(0x20ff4e8b, 0xecdc3608), TOBN(0x7f55cded, 0x20184027)}, {TOBN(0x8d73ec95, 0xf38c85f0), TOBN(0x5b589fdf, 0x8bc3b8c3), TOBN(0xbe95dd98, 0x0f12b66f), TOBN(0xf5bd1a09, 0x0e338e01)}}, {{TOBN(0x65163ae5, 0x5e915918), TOBN(0x6158d6d9, 0x86f8a46b), TOBN(0x8466b538, 0xeeebf99c), TOBN(0xca8761f6, 0xbca477ef)}, {TOBN(0xaf3449c2, 0x9ebbc601), TOBN(0xef3b0f41, 0xe0c3ae2f), TOBN(0xaa6c577d, 0x5de63752), TOBN(0xe9166601, 0x64682a51)}}, {{TOBN(0x5a3097be, 0xfc15aa1e), TOBN(0x40d12548, 0xb54b0745), TOBN(0x5bad4706, 0x519a5f12), TOBN(0xed03f717, 0xa439dee6)}, {TOBN(0x0794bb6c, 0x4a02c499), TOBN(0xf725083d, 0xcffe71d2), TOBN(0x2cad7519, 0x0f3adcaf), TOBN(0x7f68ea1c, 0x43729310)}}, {{TOBN(0xe747c8c7, 0xb7ffd977), TOBN(0xec104c35, 0x80761a22), TOBN(0x8395ebaf, 0x5a3ffb83), TOBN(0xfb3261f4, 0xe4b63db7)}, {TOBN(0x53544960, 0xd883e544), TOBN(0x13520d70, 0x8cc2eeb8), TOBN(0x08f6337b, 0xd3d65f99), TOBN(0x83997db2, 0x781cf95b)}}, {{TOBN(0xce6ff106, 0x0dbd2c01), TOBN(0x4f8eea6b, 0x1f9ce934), TOBN(0x546f7c4b, 0x0e993921), TOBN(0x6236a324, 0x5e753fc7)}, {TOBN(0x65a41f84, 0xa16022e9), TOBN(0x0c18d878, 0x43d1dbb2), TOBN(0x73c55640, 0x2d4cef9c), TOBN(0xa0428108, 0x70444c74)}}, {{TOBN(0x68e4f15e, 0x9afdfb3c), TOBN(0x49a56143, 0x5bdfb6df), TOBN(0xa9bc1bd4, 0x5f823d97), TOBN(0xbceb5970, 0xea111c2a)}, {TOBN(0x366b455f, 0xb269bbc4), TOBN(0x7cd85e1e, 0xe9bc5d62), TOBN(0xc743c41c, 0x4f18b086), TOBN(0xa4b40990, 0x95294fb9)}}, {{TOBN(0x9c7c581d, 0x26ee8382), TOBN(0xcf17dcc5, 0x359d638e), TOBN(0xee8273ab, 0xb728ae3d), TOBN(0x1d112926, 0xf821f047)}, {TOBN(0x11498477, 0x50491a74), TOBN(0x687fa761, 0xfde0dfb9), TOBN(0x2c258022, 0x7ea435ab), TOBN(0x6b8bdb94, 0x91ce7e3f)}}, {{TOBN(0x4c5b5dc9, 0x3bf834aa), TOBN(0x04371819, 0x4f6c7e4b), TOBN(0xc284e00a, 0x3736bcad), TOBN(0x0d881118, 0x21ae8f8d)}, {TOBN(0xf9cf0f82, 0xf48c8e33), TOBN(0xa11fd075, 0xa1bf40db), TOBN(0xdceab0de, 0xdc2733e5), TOBN(0xc560a8b5, 0x8e986bd7)}}, {{TOBN(0x48dd1fe2, 0x3929d097), TOBN(0x3885b290, 0x92f188f1), TOBN(0x0f2ae613, 0xda6fcdac), TOBN(0x9054303e, 0xb662a46c)}, {TOBN(0xb6871e44, 0x0738042a), TOBN(0x98e6a977, 0xbdaf6449), TOBN(0xd8bc0650, 0xd1c9df1b), TOBN(0xef3d6451, 0x36e098f9)}}, {{TOBN(0x03fbae82, 0xb6d72d28), TOBN(0x77ca9db1, 0xf5d84080), TOBN(0x8a112cff, 0xa58efc1c), TOBN(0x518d761c, 0xc564cb4a)}, {TOBN(0x69b5740e, 0xf0d1b5ce), TOBN(0x717039cc, 0xe9eb1785), TOBN(0x3fe29f90, 0x22f53382), TOBN(0x8e54ba56, 0x6bc7c95c)}}, {{TOBN(0x9c806d8a, 0xf7f91d0f), TOBN(0x3b61b0f1, 0xa82a5728), TOBN(0x4640032d, 0x94d76754), TOBN(0x273eb5de, 0x47d834c6)}, {TOBN(0x2988abf7, 0x7b4e4d53), TOBN(0xb7ce66bf, 0xde401777), TOBN(0x9fba6b32, 0x715071b3), TOBN(0x82413c24, 0xad3a1a98)}}, {{TOBN(0x5b7fc8c4, 0xe0e8ad93), TOBN(0xb5679aee, 0x5fab868d), TOBN(0xb1f9d2fa, 0x2b3946f3), TOBN(0x458897dc, 0x5685b50a)}, {TOBN(0x1e98c930, 0x89d0caf3), TOBN(0x39564c5f, 0x78642e92), TOBN(0x1b77729a, 0x0dbdaf18), TOBN(0xf9170722, 0x579e82e6)}}, {{TOBN(0x680c0317, 0xe4515fa5), TOBN(0xf85cff84, 0xfb0c790f), TOBN(0xc7a82aab, 0x6d2e0765), TOBN(0x7446bca9, 0x35c82b32)}, {TOBN(0x5de607aa, 0x6d63184f), TOBN(0x7c1a46a8, 0x262803a6), TOBN(0xd218313d, 0xaebe8035), TOBN(0x92113ffd, 0xc73c51f8)}}, {{TOBN(0x4b38e083, 0x12e7e46c), TOBN(0x69d0a37a, 0x56126bd5), TOBN(0xfb3f324b, 0x73c07e04), TOBN(0xa0c22f67, 0x8fda7267)}, {TOBN(0x8f2c0051, 0x4d2c7d8f), TOBN(0xbc45ced3, 0xcbe2cae5), TOBN(0xe1c6cf07, 0xa8f0f277), TOBN(0xbc392312, 0x1eb99a98)}}, {{TOBN(0x75537b7e, 0x3cc8ac85), TOBN(0x8d725f57, 0xdd02753b), TOBN(0xfd05ff64, 0xb737df2f), TOBN(0x55fe8712, 0xf6d2531d)}, {TOBN(0x57ce04a9, 0x6ab6b01c), TOBN(0x69a02a89, 0x7cd93724), TOBN(0x4f82ac35, 0xcf86699b), TOBN(0x8242d3ad, 0x9cb4b232)}}, {{TOBN(0x713d0f65, 0xd62105e5), TOBN(0xbb222bfa, 0x2d29be61), TOBN(0xf2f9a79e, 0x6cfbef09), TOBN(0xfc24d8d3, 0xd5d6782f)}, {TOBN(0x5db77085, 0xd4129967), TOBN(0xdb81c3cc, 0xdc3c2a43), TOBN(0x9d655fc0, 0x05d8d9a3), TOBN(0x3f5d057a, 0x54298026)}}, {{TOBN(0x1157f56d, 0x88c54694), TOBN(0xb26baba5, 0x9b09573e), TOBN(0x2cab03b0, 0x22adffd1), TOBN(0x60a412c8, 0xdd69f383)}, {TOBN(0xed76e98b, 0x54b25039), TOBN(0xd4ee67d3, 0x687e714d), TOBN(0x87739648, 0x7b00b594), TOBN(0xce419775, 0xc9ef709b)}}, {{TOBN(0x40f76f85, 0x1c203a40), TOBN(0x30d352d6, 0xeafd8f91), TOBN(0xaf196d3d, 0x95578dd2), TOBN(0xea4bb3d7, 0x77cc3f3d)}, {TOBN(0x42a5bd03, 0xb98e782b), TOBN(0xac958c40, 0x0624920d), TOBN(0xb838134c, 0xfc56fcc8), TOBN(0x86ec4ccf, 0x89572e5e)}}, {{TOBN(0x69c43526, 0x9be47be0), TOBN(0x323b7dd8, 0xcb28fea1), TOBN(0xfa5538ba, 0x3a6c67e5), TOBN(0xef921d70, 0x1d378e46)}, {TOBN(0xf92961fc, 0x3c4b880e), TOBN(0x3f6f914e, 0x98940a67), TOBN(0xa990eb0a, 0xfef0ff39), TOBN(0xa6c2920f, 0xf0eeff9c)}}, {{TOBN(0xca804166, 0x51b8d9a3), TOBN(0x42531bc9, 0x0ffb0db1), TOBN(0x72ce4718, 0xaa82e7ce), TOBN(0x6e199913, 0xdf574741)}, {TOBN(0xd5f1b13d, 0xd5d36946), TOBN(0x8255dc65, 0xf68f0194), TOBN(0xdc9df4cd, 0x8710d230), TOBN(0x3453c20f, 0x138c1988)}}, {{TOBN(0x9af98dc0, 0x89a6ef01), TOBN(0x4dbcc3f0, 0x9857df85), TOBN(0x34805601, 0x5c1ad924), TOBN(0x40448da5, 0xd0493046)}, {TOBN(0xf629926d, 0x4ee343e2), TOBN(0x6343f1bd, 0x90e8a301), TOBN(0xefc93491, 0x40815b3f), TOBN(0xf882a423, 0xde8f66fb)}}, {{TOBN(0x3a12d5f4, 0xe7db9f57), TOBN(0x7dfba38a, 0x3c384c27), TOBN(0x7a904bfd, 0x6fc660b1), TOBN(0xeb6c5db3, 0x2773b21c)}, {TOBN(0xc350ee66, 0x1cdfe049), TOBN(0x9baac0ce, 0x44540f29), TOBN(0xbc57b6ab, 0xa5ec6aad), TOBN(0x167ce8c3, 0x0a7c1baa)}}, {{TOBN(0xb23a03a5, 0x53fb2b56), TOBN(0x6ce141e7, 0x4e057f78), TOBN(0x796525c3, 0x89e490d9), TOBN(0x0bc95725, 0xa31a7e75)}, {TOBN(0x1ec56791, 0x1220fd06), TOBN(0x716e3a3c, 0x408b0bd6), TOBN(0x31cd6bf7, 0xe8ebeba9), TOBN(0xa7326ca6, 0xbee6b670)}}, {{TOBN(0x3d9f851c, 0xcd090c43), TOBN(0x561e8f13, 0xf12c3988), TOBN(0x50490b6a, 0x904b7be4), TOBN(0x61690ce1, 0x0410737b)}, {TOBN(0x299e9a37, 0x0f009052), TOBN(0x258758f0, 0xf026092e), TOBN(0x9fa255f3, 0xfdfcdc0f), TOBN(0xdbc9fb1f, 0xc0e1bcd2)}}, {{TOBN(0x35f9dd6e, 0x24651840), TOBN(0xdca45a84, 0xa5c59abc), TOBN(0x103d396f, 0xecca4938), TOBN(0x4532da0a, 0xb97b3f29)}, {TOBN(0xc4135ea5, 0x1999a6bf), TOBN(0x3aa9505a, 0x5e6bf2ee), TOBN(0xf77cef06, 0x3f5be093), TOBN(0x97d1a0f8, 0xa943152e)}}, {{TOBN(0x2cb0ebba, 0x2e1c21dd), TOBN(0xf41b29fc, 0x2c6797c4), TOBN(0xc6e17321, 0xb300101f), TOBN(0x4422b0e9, 0xd0d79a89)}, {TOBN(0x49e4901c, 0x92f1bfc4), TOBN(0x06ab1f8f, 0xe1e10ed9), TOBN(0x84d35577, 0xdb2926b8), TOBN(0xca349d39, 0x356e8ec2)}}, {{TOBN(0x70b63d32, 0x343bf1a9), TOBN(0x8fd3bd28, 0x37d1a6b1), TOBN(0x0454879c, 0x316865b4), TOBN(0xee959ff6, 0xc458efa2)}, {TOBN(0x0461dcf8, 0x9706dc3f), TOBN(0x737db0e2, 0x164e4b2e), TOBN(0x09262680, 0x2f8843c8), TOBN(0x54498bbc, 0x7745e6f6)}}, {{TOBN(0x359473fa, 0xa29e24af), TOBN(0xfcc3c454, 0x70aa87a1), TOBN(0xfd2c4bf5, 0x00573ace), TOBN(0xb65b514e, 0x28dd1965)}, {TOBN(0xe46ae7cf, 0x2193e393), TOBN(0x60e9a4e1, 0xf5444d97), TOBN(0xe7594e96, 0x00ff38ed), TOBN(0x43d84d2f, 0x0a0e0f02)}}, {{TOBN(0x8b6db141, 0xee398a21), TOBN(0xb88a56ae, 0xe3bcc5be), TOBN(0x0a1aa52f, 0x373460ea), TOBN(0x20da1a56, 0x160bb19b)}, {TOBN(0xfb54999d, 0x65bf0384), TOBN(0x71a14d24, 0x5d5a180e), TOBN(0xbc44db7b, 0x21737b04), TOBN(0xd84fcb18, 0x01dd8e92)}}, {{TOBN(0x80de937b, 0xfa44b479), TOBN(0x53505499, 0x5c98fd4f), TOBN(0x1edb12ab, 0x28f08727), TOBN(0x4c58b582, 0xa5f3ef53)}, {TOBN(0xbfb236d8, 0x8327f246), TOBN(0xc3a3bfaa, 0x4d7df320), TOBN(0xecd96c59, 0xb96024f2), TOBN(0xfc293a53, 0x7f4e0433)}}, {{TOBN(0x5341352b, 0x5acf6e10), TOBN(0xc50343fd, 0xafe652c3), TOBN(0x4af3792d, 0x18577a7f), TOBN(0xe1a4c617, 0xaf16823d)}, {TOBN(0x9b26d0cd, 0x33425d0a), TOBN(0x306399ed, 0x9b7bc47f), TOBN(0x2a792f33, 0x706bb20b), TOBN(0x31219614, 0x98111055)}}, {{TOBN(0x864ec064, 0x87f5d28b), TOBN(0x11392d91, 0x962277fd), TOBN(0xb5aa7942, 0xbb6aed5f), TOBN(0x080094dc, 0x47e799d9)}, {TOBN(0x4afa588c, 0x208ba19b), TOBN(0xd3e7570f, 0x8512f284), TOBN(0xcbae64e6, 0x02f5799a), TOBN(0xdeebe7ef, 0x514b9492)}}, {{TOBN(0x30300f98, 0xe5c298ff), TOBN(0x17f561be, 0x3678361f), TOBN(0xf52ff312, 0x98cb9a16), TOBN(0x6233c3bc, 0x5562d490)}, {TOBN(0x7bfa15a1, 0x92e3a2cb), TOBN(0x961bcfd1, 0xe6365119), TOBN(0x3bdd29bf, 0x2c8c53b1), TOBN(0x739704df, 0x822844ba)}}, {{TOBN(0x7dacfb58, 0x7e7b754b), TOBN(0x23360791, 0xa806c9b9), TOBN(0xe7eb88c9, 0x23504452), TOBN(0x2983e996, 0x852c1783)}, {TOBN(0xdd4ae529, 0x958d881d), TOBN(0x026bae03, 0x262c7b3c), TOBN(0x3a6f9193, 0x960b52d1), TOBN(0xd0980f90, 0x92696cfb)}}, {{TOBN(0x4c1f428c, 0xd5f30851), TOBN(0x94dfed27, 0x2a4f6630), TOBN(0x4df53772, 0xfc5d48a4), TOBN(0xdd2d5a2f, 0x933260ce)}, {TOBN(0x574115bd, 0xd44cc7a5), TOBN(0x4ba6b20d, 0xbd12533a), TOBN(0x30e93cb8, 0x243057c9), TOBN(0x794c486a, 0x14de320e)}}, {{TOBN(0xe925d4ce, 0xf21496e4), TOBN(0xf951d198, 0xec696331), TOBN(0x9810e2de, 0x3e8d812f), TOBN(0xd0a47259, 0x389294ab)}, {TOBN(0x513ba2b5, 0x0e3bab66), TOBN(0x462caff5, 0xabad306f), TOBN(0xe2dc6d59, 0xaf04c49e), TOBN(0x1aeb8750, 0xe0b84b0b)}}, {{TOBN(0xc034f12f, 0x2f7d0ca2), TOBN(0x6d2e8128, 0xe06acf2f), TOBN(0x801f4f83, 0x21facc2f), TOBN(0xa1170c03, 0xf40ef607)}, {TOBN(0xfe0a1d4f, 0x7805a99c), TOBN(0xbde56a36, 0xcc26aba5), TOBN(0x5b1629d0, 0x35531f40), TOBN(0xac212c2b, 0x9afa6108)}}, {{TOBN(0x30a06bf3, 0x15697be5), TOBN(0x6f0545dc, 0x2c63c7c1), TOBN(0x5d8cb842, 0x7ccdadaf), TOBN(0xd52e379b, 0xac7015bb)}, {TOBN(0xc4f56147, 0xf462c23e), TOBN(0xd44a4298, 0x46bc24b0), TOBN(0xbc73d23a, 0xe2856d4f), TOBN(0x61cedd8c, 0x0832bcdf)}}, {{TOBN(0x60953556, 0x99f241d7), TOBN(0xee4adbd7, 0x001a349d), TOBN(0x0b35bf6a, 0xaa89e491), TOBN(0x7f0076f4, 0x136f7546)}, {TOBN(0xd19a18ba, 0x9264da3d), TOBN(0x6eb2d2cd, 0x62a7a28b), TOBN(0xcdba941f, 0x8761c971), TOBN(0x1550518b, 0xa3be4a5d)}}, {{TOBN(0xd0e8e2f0, 0x57d0b70c), TOBN(0xeea8612e, 0xcd133ba3), TOBN(0x814670f0, 0x44416aec), TOBN(0x424db6c3, 0x30775061)}, {TOBN(0xd96039d1, 0x16213fd1), TOBN(0xc61e7fa5, 0x18a3478f), TOBN(0xa805bdcc, 0xcb0c5021), TOBN(0xbdd6f3a8, 0x0cc616dd)}}, {{TOBN(0x06009667, 0x5d97f7e2), TOBN(0x31db0fc1, 0xaf0bf4b6), TOBN(0x23680ed4, 0x5491627a), TOBN(0xb99a3c66, 0x7d741fb1)}, {TOBN(0xe9bb5f55, 0x36b1ff92), TOBN(0x29738577, 0x512b388d), TOBN(0xdb8a2ce7, 0x50fcf263), TOBN(0x385346d4, 0x6c4f7b47)}}, {{TOBN(0xbe86c5ef, 0x31631f9e), TOBN(0xbf91da21, 0x03a57a29), TOBN(0xc3b1f796, 0x7b23f821), TOBN(0x0f7d00d2, 0x770db354)}, {TOBN(0x8ffc6c3b, 0xd8fe79da), TOBN(0xcc5e8c40, 0xd525c996), TOBN(0x4640991d, 0xcfff632a), TOBN(0x64d97e8c, 0x67112528)}}, {{TOBN(0xc232d973, 0x02f1cd1e), TOBN(0xce87eacb, 0x1dd212a4), TOBN(0x6e4c8c73, 0xe69802f7), TOBN(0x12ef0290, 0x1fffddbd)}, {TOBN(0x941ec74e, 0x1bcea6e2), TOBN(0xd0b54024, 0x3cb92cbb), TOBN(0x809fb9d4, 0x7e8f9d05), TOBN(0x3bf16159, 0xf2992aae)}}, {{TOBN(0xad40f279, 0xf8a7a838), TOBN(0x11aea631, 0x05615660), TOBN(0xbf52e6f1, 0xa01f6fa1), TOBN(0xef046995, 0x3dc2aec9)}, {TOBN(0x785dbec9, 0xd8080711), TOBN(0xe1aec60a, 0x9fdedf76), TOBN(0xece797b5, 0xfa21c126), TOBN(0xc66e898f, 0x05e52732)}}, {{TOBN(0x39bb69c4, 0x08811fdb), TOBN(0x8bfe1ef8, 0x2fc7f082), TOBN(0xc8e7a393, 0x174f4138), TOBN(0xfba8ad1d, 0xd58d1f98)}, {TOBN(0xbc21d0ce, 0xbfd2fd5b), TOBN(0x0b839a82, 0x6ee60d61), TOBN(0xaacf7658, 0xafd22253), TOBN(0xb526bed8, 0xaae396b3)}}, {{TOBN(0xccc1bbc2, 0x38564464), TOBN(0x9e3ff947, 0x8c45bc73), TOBN(0xcde9bca3, 0x58188a78), TOBN(0x138b8ee0, 0xd73bf8f7)}, {TOBN(0x5c7e234c, 0x4123c489), TOBN(0x66e69368, 0xfa643297), TOBN(0x0629eeee, 0x39a15fa3), TOBN(0x95fab881, 0xa9e2a927)}}, {{TOBN(0xb2497007, 0xeafbb1e1), TOBN(0xd75c9ce6, 0xe75b7a93), TOBN(0x3558352d, 0xefb68d78), TOBN(0xa2f26699, 0x223f6396)}, {TOBN(0xeb911ecf, 0xe469b17a), TOBN(0x62545779, 0xe72d3ec2), TOBN(0x8ea47de7, 0x82cb113f), TOBN(0xebe4b086, 0x4e1fa98d)}}, {{TOBN(0xec2d5ed7, 0x8cdfedb1), TOBN(0xa535c077, 0xfe211a74), TOBN(0x9678109b, 0x11d244c5), TOBN(0xf17c8bfb, 0xbe299a76)}, {TOBN(0xb651412e, 0xfb11fbc4), TOBN(0xea0b5482, 0x94ab3f65), TOBN(0xd8dffd95, 0x0cf78243), TOBN(0x2e719e57, 0xce0361d4)}}, {{TOBN(0x9007f085, 0x304ddc5b), TOBN(0x095e8c6d, 0x4daba2ea), TOBN(0x5a33cdb4, 0x3f9d28a9), TOBN(0x85b95cd8, 0xe2283003)}, {TOBN(0xbcd6c819, 0xb9744733), TOBN(0x29c5f538, 0xfc7f5783), TOBN(0x6c49b2fa, 0xd59038e4), TOBN(0x68349cc1, 0x3bbe1018)}}, {{TOBN(0xcc490c1d, 0x21830ee5), TOBN(0x36f9c4ee, 0xe9bfa297), TOBN(0x58fd7294, 0x48de1a94), TOBN(0xaadb13a8, 0x4e8f2cdc)}, {TOBN(0x515eaaa0, 0x81313dba), TOBN(0xc76bb468, 0xc2152dd8), TOBN(0x357f8d75, 0xa653dbf8), TOBN(0xe4d8c4d1, 0xb14ac143)}}, {{TOBN(0xbdb8e675, 0xb055cb40), TOBN(0x898f8e7b, 0x977b5167), TOBN(0xecc65651, 0xb82fb863), TOBN(0x56544814, 0x6d88f01f)}, {TOBN(0xb0928e95, 0x263a75a9), TOBN(0xcfb6836f, 0x1a22fcda), TOBN(0x651d14db, 0x3f3bd37c), TOBN(0x1d3837fb, 0xb6ad4664)}}, {{TOBN(0x7c5fb538, 0xff4f94ab), TOBN(0x7243c712, 0x6d7fb8f2), TOBN(0xef13d60c, 0xa85c5287), TOBN(0x18cfb7c7, 0x4bb8dd1b)}, {TOBN(0x82f9bfe6, 0x72908219), TOBN(0x35c4592b, 0x9d5144ab), TOBN(0x52734f37, 0x9cf4b42f), TOBN(0x6bac55e7, 0x8c60ddc4)}}, {{TOBN(0xb5cd811e, 0x94dea0f6), TOBN(0x259ecae4, 0xe18cc1a3), TOBN(0x6a0e836e, 0x15e660f8), TOBN(0x6c639ea6, 0x0e02bff2)}, {TOBN(0x8721b8cb, 0x7e1026fd), TOBN(0x9e73b50b, 0x63261942), TOBN(0xb8c70974, 0x77f01da3), TOBN(0x1839e6a6, 0x8268f57f)}}, {{TOBN(0x571b9415, 0x5150b805), TOBN(0x1892389e, 0xf92c7097), TOBN(0x8d69c18e, 0x4a084b95), TOBN(0x7014c512, 0xbe5b495c)}, {TOBN(0x4780db36, 0x1b07523c), TOBN(0x2f6219ce, 0x2c1c64fa), TOBN(0xc38b81b0, 0x602c105a), TOBN(0xab4f4f20, 0x5dc8e360)}}, {{TOBN(0x20d3c982, 0xcf7d62d2), TOBN(0x1f36e29d, 0x23ba8150), TOBN(0x48ae0bf0, 0x92763f9e), TOBN(0x7a527e6b, 0x1d3a7007)}, {TOBN(0xb4a89097, 0x581a85e3), TOBN(0x1f1a520f, 0xdc158be5), TOBN(0xf98db37d, 0x167d726e), TOBN(0x8802786e, 0x1113e862)}}}, {{{TOBN(0xefb2149e, 0x36f09ab0), TOBN(0x03f163ca, 0x4a10bb5b), TOBN(0xd0297045, 0x06e20998), TOBN(0x56f0af00, 0x1b5a3bab)}, {TOBN(0x7af4cfec, 0x70880e0d), TOBN(0x7332a66f, 0xbe3d913f), TOBN(0x32e6c84a, 0x7eceb4bd), TOBN(0xedc4a79a, 0x9c228f55)}}, {{TOBN(0xc37c7dd0, 0xc55c4496), TOBN(0xa6a96357, 0x25bbabd2), TOBN(0x5b7e63f2, 0xadd7f363), TOBN(0x9dce3782, 0x2e73f1df)}, {TOBN(0xe1e5a16a, 0xb2b91f71), TOBN(0xe4489823, 0x5ba0163c), TOBN(0xf2759c32, 0xf6e515ad), TOBN(0xa5e2f1f8, 0x8615eecf)}}, {{TOBN(0x74519be7, 0xabded551), TOBN(0x03d358b8, 0xc8b74410), TOBN(0x4d00b10b, 0x0e10d9a9), TOBN(0x6392b0b1, 0x28da52b7)}, {TOBN(0x6744a298, 0x0b75c904), TOBN(0xc305b0ae, 0xa8f7f96c), TOBN(0x042e421d, 0x182cf932), TOBN(0xf6fc5d50, 0x9e4636ca)}}, {{TOBN(0x795847c9, 0xd64cc78c), TOBN(0x6c50621b, 0x9b6cb27b), TOBN(0x07099bf8, 0xdf8022ab), TOBN(0x48f862eb, 0xc04eda1d)}, {TOBN(0xd12732ed, 0xe1603c16), TOBN(0x19a80e0f, 0x5c9a9450), TOBN(0xe2257f54, 0xb429b4fc), TOBN(0x66d3b2c6, 0x45460515)}}, {{TOBN(0x6ca4f87e, 0x822e37be), TOBN(0x73f237b4, 0x253bda4e), TOBN(0xf747f3a2, 0x41190aeb), TOBN(0xf06fa36f, 0x804cf284)}, {TOBN(0x0a6bbb6e, 0xfc621c12), TOBN(0x5d624b64, 0x40b80ec6), TOBN(0x4b072425, 0x7ba556f3), TOBN(0x7fa0c354, 0x3e2d20a8)}}, {{TOBN(0xe921fa31, 0xe3229d41), TOBN(0xa929c652, 0x94531bd4), TOBN(0x84156027, 0xa6d38209), TOBN(0xf3d69f73, 0x6bdb97bd)}, {TOBN(0x8906d19a, 0x16833631), TOBN(0x68a34c2e, 0x03d51be3), TOBN(0xcb59583b, 0x0e511cd8), TOBN(0x99ce6bfd, 0xfdc132a8)}}, {{TOBN(0x3facdaaa, 0xffcdb463), TOBN(0x658bbc1a, 0x34a38b08), TOBN(0x12a801f8, 0xf1a9078d), TOBN(0x1567bcf9, 0x6ab855de)}, {TOBN(0xe08498e0, 0x3572359b), TOBN(0xcf0353e5, 0x8659e68b), TOBN(0xbb86e9c8, 0x7d23807c), TOBN(0xbc08728d, 0x2198e8a2)}}, {{TOBN(0x8de2b7bc, 0x453cadd6), TOBN(0x203900a7, 0xbc0bc1f8), TOBN(0xbcd86e47, 0xa6abd3af), TOBN(0x911cac12, 0x8502effb)}, {TOBN(0x2d550242, 0xec965469), TOBN(0x0e9f7692, 0x29e0017e), TOBN(0x633f078f, 0x65979885), TOBN(0xfb87d449, 0x4cf751ef)}}, {{TOBN(0xe1790e4b, 0xfc25419a), TOBN(0x36467203, 0x4bff3cfd), TOBN(0xc8db6386, 0x25b6e83f), TOBN(0x6cc69f23, 0x6cad6fd2)}, {TOBN(0x0219e45a, 0x6bc68bb9), TOBN(0xe43d79b6, 0x297f7334), TOBN(0x7d445368, 0x465dc97c), TOBN(0x4b9eea32, 0x2a0b949a)}}, {{TOBN(0x1b96c6ba, 0x6102d021), TOBN(0xeaafac78, 0x2f4461ea), TOBN(0xd4b85c41, 0xc49f19a8), TOBN(0x275c28e4, 0xcf538875)}, {TOBN(0x35451a9d, 0xdd2e54e0), TOBN(0x6991adb5, 0x0605618b), TOBN(0x5b8b4bcd, 0x7b36cd24), TOBN(0x372a4f8c, 0x56f37216)}}, {{TOBN(0xc890bd73, 0xa6a5da60), TOBN(0x6f083da0, 0xdc4c9ff0), TOBN(0xf4e14d94, 0xf0536e57), TOBN(0xf9ee1eda, 0xaaec8243)}, {TOBN(0x571241ec, 0x8bdcf8e7), TOBN(0xa5db8271, 0x0b041e26), TOBN(0x9a0b9a99, 0xe3fff040), TOBN(0xcaaf21dd, 0x7c271202)}}, {{TOBN(0xb4e2b2e1, 0x4f0dd2e8), TOBN(0xe77e7c4f, 0x0a377ac7), TOBN(0x69202c3f, 0x0d7a2198), TOBN(0xf759b7ff, 0x28200eb8)}, {TOBN(0xc87526ed, 0xdcfe314e), TOBN(0xeb84c524, 0x53d5cf99), TOBN(0xb1b52ace, 0x515138b6), TOBN(0x5aa7ff8c, 0x23fca3f4)}}, {{TOBN(0xff0b13c3, 0xb9791a26), TOBN(0x960022da, 0xcdd58b16), TOBN(0xdbd55c92, 0x57aad2de), TOBN(0x3baaaaa3, 0xf30fe619)}, {TOBN(0x9a4b2346, 0x0d881efd), TOBN(0x506416c0, 0x46325e2a), TOBN(0x91381e76, 0x035c18d4), TOBN(0xb3bb68be, 0xf27817b0)}}, {{TOBN(0x15bfb8bf, 0x5116f937), TOBN(0x7c64a586, 0xc1268943), TOBN(0x71e25cc3, 0x8419a2c8), TOBN(0x9fd6b0c4, 0x8335f463)}, {TOBN(0x4bf0ba3c, 0xe8ee0e0e), TOBN(0x6f6fba60, 0x298c21fa), TOBN(0x57d57b39, 0xae66bee0), TOBN(0x292d5130, 0x22672544)}}, {{TOBN(0xf451105d, 0xbab093b3), TOBN(0x012f59b9, 0x02839986), TOBN(0x8a915802, 0x3474a89c), TOBN(0x048c919c, 0x2de03e97)}, {TOBN(0xc476a2b5, 0x91071cd5), TOBN(0x791ed89a, 0x034970a5), TOBN(0x89bd9042, 0xe1b7994b), TOBN(0x8eaf5179, 0xa1057ffd)}}, {{TOBN(0x6066e2a2, 0xd551ee10), TOBN(0x87a8f1d8, 0x727e09a6), TOBN(0x00d08bab, 0x2c01148d), TOBN(0x6da8e4f1, 0x424f33fe)}, {TOBN(0x466d17f0, 0xcf9a4e71), TOBN(0xff502010, 0x3bf5cb19), TOBN(0xdccf97d8, 0xd062ecc0), TOBN(0x80c0d9af, 0x81d80ac4)}}, {{TOBN(0xe87771d8, 0x033f2876), TOBN(0xb0186ec6, 0x7d5cc3db), TOBN(0x58e8bb80, 0x3bc9bc1d), TOBN(0x4d1395cc, 0x6f6ef60e)}, {TOBN(0xa73c62d6, 0x186244a0), TOBN(0x918e5f23, 0x110a5b53), TOBN(0xed4878ca, 0x741b7eab), TOBN(0x3038d71a, 0xdbe03e51)}}, {{TOBN(0x840204b7, 0xa93c3246), TOBN(0x21ab6069, 0xa0b9b4cd), TOBN(0xf5fa6e2b, 0xb1d64218), TOBN(0x1de6ad0e, 0xf3d56191)}, {TOBN(0x570aaa88, 0xff1929c7), TOBN(0xc6df4c6b, 0x640e87b5), TOBN(0xde8a74f2, 0xc65f0ccc), TOBN(0x8b972fd5, 0xe6f6cc01)}}, {{TOBN(0x3fff36b6, 0x0b846531), TOBN(0xba7e45e6, 0x10a5e475), TOBN(0x84a1d10e, 0x4145b6c5), TOBN(0xf1f7f91a, 0x5e046d9d)}, {TOBN(0x0317a692, 0x44de90d7), TOBN(0x951a1d4a, 0xf199c15e), TOBN(0x91f78046, 0xc9d73deb), TOBN(0x74c82828, 0xfab8224f)}}, {{TOBN(0xaa6778fc, 0xe7560b90), TOBN(0xb4073e61, 0xa7e824ce), TOBN(0xff0d693c, 0xd642eba8), TOBN(0x7ce2e57a, 0x5dccef38)}, {TOBN(0x89c2c789, 0x1df1ad46), TOBN(0x83a06922, 0x098346fd), TOBN(0x2d715d72, 0xda2fc177), TOBN(0x7b6dd71d, 0x85b6cf1d)}}, {{TOBN(0xc60a6d0a, 0x73fa9cb0), TOBN(0xedd3992e, 0x328bf5a9), TOBN(0xc380ddd0, 0x832c8c82), TOBN(0xd182d410, 0xa2a0bf50)}, {TOBN(0x7d9d7438, 0xd9a528db), TOBN(0xe8b1a0e9, 0xcaf53994), TOBN(0xddd6e5fe, 0x0e19987c), TOBN(0xacb8df03, 0x190b059d)}}, {{TOBN(0x53703a32, 0x8300129f), TOBN(0x1f637662, 0x68c43bfd), TOBN(0xbcbd1913, 0x00e54051), TOBN(0x812fcc62, 0x7bf5a8c5)}, {TOBN(0x3f969d5f, 0x29fb85da), TOBN(0x72f4e00a, 0x694759e8), TOBN(0x426b6e52, 0x790726b7), TOBN(0x617bbc87, 0x3bdbb209)}}, {{TOBN(0x511f8bb9, 0x97aee317), TOBN(0x812a4096, 0xe81536a8), TOBN(0x137dfe59, 0x3ac09b9b), TOBN(0x0682238f, 0xba8c9a7a)}, {TOBN(0x7072ead6, 0xaeccb4bd), TOBN(0x6a34e9aa, 0x692ba633), TOBN(0xc82eaec2, 0x6fff9d33), TOBN(0xfb753512, 0x1d4d2b62)}}, {{TOBN(0x1a0445ff, 0x1d7aadab), TOBN(0x65d38260, 0xd5f6a67c), TOBN(0x6e62fb08, 0x91cfb26f), TOBN(0xef1e0fa5, 0x5c7d91d6)}, {TOBN(0x47e7c7ba, 0x33db72cd), TOBN(0x017cbc09, 0xfa7c74b2), TOBN(0x3c931590, 0xf50a503c), TOBN(0xcac54f60, 0x616baa42)}}, {{TOBN(0x9b6cd380, 0xb2369f0f), TOBN(0x97d3a70d, 0x23c76151), TOBN(0x5f9dd6fc, 0x9862a9c6), TOBN(0x044c4ab2, 0x12312f51)}, {TOBN(0x035ea0fd, 0x834a2ddc), TOBN(0x49e6b862, 0xcc7b826d), TOBN(0xb03d6883, 0x62fce490), TOBN(0x62f2497a, 0xb37e36e9)}}, {{TOBN(0x04b005b6, 0xc6458293), TOBN(0x36bb5276, 0xe8d10af7), TOBN(0xacf2dc13, 0x8ee617b8), TOBN(0x470d2d35, 0xb004b3d4)}, {TOBN(0x06790832, 0xfeeb1b77), TOBN(0x2bb75c39, 0x85657f9c), TOBN(0xd70bd4ed, 0xc0f60004), TOBN(0xfe797ecc, 0x219b018b)}}, {{TOBN(0x9b5bec2a, 0x753aebcc), TOBN(0xdaf9f3dc, 0xc939eca5), TOBN(0xd6bc6833, 0xd095ad09), TOBN(0x98abdd51, 0xdaa4d2fc)}, {TOBN(0xd9840a31, 0x8d168be5), TOBN(0xcf7c10e0, 0x2325a23c), TOBN(0xa5c02aa0, 0x7e6ecfaf), TOBN(0x2462e7e6, 0xb5bfdf18)}}, {{TOBN(0xab2d8a8b, 0xa0cc3f12), TOBN(0x68dd485d, 0xbc672a29), TOBN(0x72039752, 0x596f2cd3), TOBN(0x5d3eea67, 0xa0cf3d8d)}, {TOBN(0x810a1a81, 0xe6602671), TOBN(0x8f144a40, 0x14026c0c), TOBN(0xbc753a6d, 0x76b50f85), TOBN(0xc4dc21e8, 0x645cd4a4)}}, {{TOBN(0xc5262dea, 0x521d0378), TOBN(0x802b8e0e, 0x05011c6f), TOBN(0x1ba19cbb, 0x0b4c19ea), TOBN(0x21db64b5, 0xebf0aaec)}, {TOBN(0x1f394ee9, 0x70342f9d), TOBN(0x93a10aee, 0x1bc44a14), TOBN(0xa7eed31b, 0x3efd0baa), TOBN(0x6e7c824e, 0x1d154e65)}}, {{TOBN(0xee23fa81, 0x9966e7ee), TOBN(0x64ec4aa8, 0x05b7920d), TOBN(0x2d44462d, 0x2d90aad4), TOBN(0xf44dd195, 0xdf277ad5)}, {TOBN(0x8d6471f1, 0xbb46b6a1), TOBN(0x1e65d313, 0xfd885090), TOBN(0x33a800f5, 0x13a977b4), TOBN(0xaca9d721, 0x0797e1ef)}}, {{TOBN(0x9a5a85a0, 0xfcff6a17), TOBN(0x9970a3f3, 0x1eca7cee), TOBN(0xbb9f0d6b, 0xc9504be3), TOBN(0xe0c504be, 0xadd24ee2)}, {TOBN(0x7e09d956, 0x77fcc2f4), TOBN(0xef1a5227, 0x65bb5fc4), TOBN(0x145d4fb1, 0x8b9286aa), TOBN(0x66fd0c5d, 0x6649028b)}}, {{TOBN(0x98857ceb, 0x1bf4581c), TOBN(0xe635e186, 0xaca7b166), TOBN(0x278ddd22, 0x659722ac), TOBN(0xa0903c4c, 0x1db68007)}, {TOBN(0x366e4589, 0x48f21402), TOBN(0x31b49c14, 0xb96abda2), TOBN(0x329c4b09, 0xe0403190), TOBN(0x97197ca3, 0xd29f43fe)}}, {{TOBN(0x8073dd1e, 0x274983d8), TOBN(0xda1a3bde, 0x55717c8f), TOBN(0xfd3d4da2, 0x0361f9d1), TOBN(0x1332d081, 0x4c7de1ce)}, {TOBN(0x9b7ef7a3, 0xaa6d0e10), TOBN(0x17db2e73, 0xf54f1c4a), TOBN(0xaf3dffae, 0x4cd35567), TOBN(0xaaa2f406, 0xe56f4e71)}}, {{TOBN(0x8966759e, 0x7ace3fc7), TOBN(0x9594eacf, 0x45a8d8c6), TOBN(0x8de3bd8b, 0x91834e0e), TOBN(0xafe4ca53, 0x548c0421)}, {TOBN(0xfdd7e856, 0xe6ee81c6), TOBN(0x8f671beb, 0x6b891a3a), TOBN(0xf7a58f2b, 0xfae63829), TOBN(0x9ab186fb, 0x9c11ac9f)}}, {{TOBN(0x8d6eb369, 0x10b5be76), TOBN(0x046b7739, 0xfb040bcd), TOBN(0xccb4529f, 0xcb73de88), TOBN(0x1df0fefc, 0xcf26be03)}, {TOBN(0xad7757a6, 0xbcfcd027), TOBN(0xa8786c75, 0xbb3165ca), TOBN(0xe9db1e34, 0x7e99a4d9), TOBN(0x99ee86df, 0xb06c504b)}}, {{TOBN(0x5b7c2ddd, 0xc15c9f0a), TOBN(0xdf87a734, 0x4295989e), TOBN(0x59ece47c, 0x03d08fda), TOBN(0xb074d3dd, 0xad5fc702)}, {TOBN(0x20407903, 0x51a03776), TOBN(0x2bb1f77b, 0x2a608007), TOBN(0x25c58f4f, 0xe1153185), TOBN(0xe6df62f6, 0x766e6447)}}, {{TOBN(0xefb3d1be, 0xed51275a), TOBN(0x5de47dc7, 0x2f0f483f), TOBN(0x7932d98e, 0x97c2bedf), TOBN(0xd5c11927, 0x0219f8a1)}, {TOBN(0x9d751200, 0xa73a294e), TOBN(0x5f88434a, 0x9dc20172), TOBN(0xd28d9fd3, 0xa26f506a), TOBN(0xa890cd31, 0x9d1dcd48)}}, {{TOBN(0x0aebaec1, 0x70f4d3b4), TOBN(0xfd1a1369, 0x0ffc8d00), TOBN(0xb9d9c240, 0x57d57838), TOBN(0x45929d26, 0x68bac361)}, {TOBN(0x5a2cd060, 0x25b15ca6), TOBN(0x4b3c83e1, 0x6e474446), TOBN(0x1aac7578, 0xee1e5134), TOBN(0xa418f5d6, 0xc91e2f41)}}, {{TOBN(0x6936fc8a, 0x213ed68b), TOBN(0x860ae7ed, 0x510a5224), TOBN(0x63660335, 0xdef09b53), TOBN(0x641b2897, 0xcd79c98d)}, {TOBN(0x29bd38e1, 0x01110f35), TOBN(0x79c26f42, 0x648b1937), TOBN(0x64dae519, 0x9d9164f4), TOBN(0xd85a2310, 0x0265c273)}}, {{TOBN(0x7173dd5d, 0x4b07e2b1), TOBN(0xd144c4cb, 0x8d9ea221), TOBN(0xe8b04ea4, 0x1105ab14), TOBN(0x92dda542, 0xfe80d8f1)}, {TOBN(0xe9982fa8, 0xcf03dce6), TOBN(0x8b5ea965, 0x1a22cffc), TOBN(0xf7f4ea7f, 0x3fad88c4), TOBN(0x62db773e, 0x6a5ba95c)}}, {{TOBN(0xd20f02fb, 0x93f24567), TOBN(0xfd46c69a, 0x315257ca), TOBN(0x0ac74cc7, 0x8bcab987), TOBN(0x46f31c01, 0x5ceca2f5)}, {TOBN(0x40aedb59, 0x888b219e), TOBN(0xe50ecc37, 0xe1fccd02), TOBN(0x1bcd9dad, 0x911f816c), TOBN(0x583cc1ec, 0x8db9b00c)}}, {{TOBN(0xf3cd2e66, 0xa483bf11), TOBN(0xfa08a6f5, 0xb1b2c169), TOBN(0xf375e245, 0x4be9fa28), TOBN(0x99a7ffec, 0x5b6d011f)}, {TOBN(0x6a3ebddb, 0xc4ae62da), TOBN(0x6cea00ae, 0x374aef5d), TOBN(0xab5fb98d, 0x9d4d05bc), TOBN(0x7cba1423, 0xd560f252)}}, {{TOBN(0x49b2cc21, 0x208490de), TOBN(0x1ca66ec3, 0xbcfb2879), TOBN(0x7f1166b7, 0x1b6fb16f), TOBN(0xfff63e08, 0x65fe5db3)}, {TOBN(0xb8345abe, 0x8b2610be), TOBN(0xb732ed80, 0x39de3df4), TOBN(0x0e24ed50, 0x211c32b4), TOBN(0xd10d8a69, 0x848ff27d)}}, {{TOBN(0xc1074398, 0xed4de248), TOBN(0xd7cedace, 0x10488927), TOBN(0xa4aa6bf8, 0x85673e13), TOBN(0xb46bae91, 0x6daf30af)}, {TOBN(0x07088472, 0xfcef7ad8), TOBN(0x61151608, 0xd4b35e97), TOBN(0xbcfe8f26, 0xdde29986), TOBN(0xeb84c4c7, 0xd5a34c79)}}, {{TOBN(0xc1eec55c, 0x164e1214), TOBN(0x891be86d, 0xa147bb03), TOBN(0x9fab4d10, 0x0ba96835), TOBN(0xbf01e9b8, 0xa5c1ae9f)}, {TOBN(0x6b4de139, 0xb186ebc0), TOBN(0xd5c74c26, 0x85b91bca), TOBN(0x5086a99c, 0xc2d93854), TOBN(0xeed62a7b, 0xa7a9dfbc)}}, {{TOBN(0x8778ed6f, 0x76b7618a), TOBN(0xbff750a5, 0x03b66062), TOBN(0x4cb7be22, 0xb65186db), TOBN(0x369dfbf0, 0xcc3a6d13)}, {TOBN(0xc7dab26c, 0x7191a321), TOBN(0x9edac3f9, 0x40ed718e), TOBN(0xbc142b36, 0xd0cfd183), TOBN(0xc8af82f6, 0x7c991693)}}, {{TOBN(0xb3d1e4d8, 0x97ce0b2a), TOBN(0xe6d7c87f, 0xc3a55cdf), TOBN(0x35846b95, 0x68b81afe), TOBN(0x018d12af, 0xd3c239d8)}, {TOBN(0x2b2c6208, 0x01206e15), TOBN(0xe0e42453, 0xa3b882c6), TOBN(0x854470a3, 0xa50162d5), TOBN(0x08157478, 0x7017a62a)}}, {{TOBN(0x18bd3fb4, 0x820357c7), TOBN(0x992039ae, 0x6f1458ad), TOBN(0x9a1df3c5, 0x25b44aa1), TOBN(0x2d780357, 0xed3d5281)}, {TOBN(0x58cf7e4d, 0xc77ad4d4), TOBN(0xd49a7998, 0xf9df4fc4), TOBN(0x4465a8b5, 0x1d71205e), TOBN(0xa0ee0ea6, 0x649254aa)}}, {{TOBN(0x4b5eeecf, 0xab7bd771), TOBN(0x6c873073, 0x35c262b9), TOBN(0xdc5bd648, 0x3c9d61e7), TOBN(0x233d6d54, 0x321460d2)}, {TOBN(0xd20c5626, 0xfc195bcc), TOBN(0x25445958, 0x04d78b63), TOBN(0xe03fcb3d, 0x17ec8ef3), TOBN(0x54b690d1, 0x46b8f781)}}, {{TOBN(0x82fa2c8a, 0x21230646), TOBN(0xf51aabb9, 0x084f418c), TOBN(0xff4fbec1, 0x1a30ba43), TOBN(0x6a5acf73, 0x743c9df7)}, {TOBN(0x1da2b357, 0xd635b4d5), TOBN(0xc3de68dd, 0xecd5c1da), TOBN(0xa689080b, 0xd61af0dd), TOBN(0xdea5938a, 0xd665bf99)}}, {{TOBN(0x0231d71a, 0xfe637294), TOBN(0x01968aa6, 0xa5a81cd8), TOBN(0x11252d50, 0x048e63b5), TOBN(0xc446bc52, 0x6ca007e9)}, {TOBN(0xef8c50a6, 0x96d6134b), TOBN(0x9361fbf5, 0x9e09a05c), TOBN(0xf17f85a6, 0xdca3291a), TOBN(0xb178d548, 0xff251a21)}}, {{TOBN(0x87f6374b, 0xa4df3915), TOBN(0x566ce1bf, 0x2fd5d608), TOBN(0x425cba4d, 0x7de35102), TOBN(0x6b745f8f, 0x58c5d5e2)}, {TOBN(0x88402af6, 0x63122edf), TOBN(0x3190f9ed, 0x3b989a89), TOBN(0x4ad3d387, 0xebba3156), TOBN(0xef385ad9, 0xc7c469a5)}}, {{TOBN(0xb08281de, 0x3f642c29), TOBN(0x20be0888, 0x910ffb88), TOBN(0xf353dd4a, 0xd5292546), TOBN(0x3f1627de, 0x8377a262)}, {TOBN(0xa5faa013, 0xeefcd638), TOBN(0x8f3bf626, 0x74cc77c3), TOBN(0x32618f65, 0xa348f55e), TOBN(0x5787c0dc, 0x9fefeb9e)}}, {{TOBN(0xf1673aa2, 0xd9a23e44), TOBN(0x88dfa993, 0x4e10690d), TOBN(0x1ced1b36, 0x2bf91108), TOBN(0x9193ceca, 0x3af48649)}, {TOBN(0xfb34327d, 0x2d738fc5), TOBN(0x6697b037, 0x975fee6c), TOBN(0x2f485da0, 0xc04079a5), TOBN(0x2cdf5735, 0x2feaa1ac)}}, {{TOBN(0x76944420, 0xbd55659e), TOBN(0x7973e32b, 0x4376090c), TOBN(0x86bb4fe1, 0x163b591a), TOBN(0x10441aed, 0xc196f0ca)}, {TOBN(0x3b431f4a, 0x045ad915), TOBN(0x6c11b437, 0xa4afacb1), TOBN(0x30b0c7db, 0x71fdbbd8), TOBN(0xb642931f, 0xeda65acd)}}, {{TOBN(0x4baae6e8, 0x9c92b235), TOBN(0xa73bbd0e, 0x6b3993a1), TOBN(0xd06d60ec, 0x693dd031), TOBN(0x03cab91b, 0x7156881c)}, {TOBN(0xd615862f, 0x1db3574b), TOBN(0x485b0185, 0x64bb061a), TOBN(0x27434988, 0xa0181e06), TOBN(0x2cd61ad4, 0xc1c0c757)}}, {{TOBN(0x3effed5a, 0x2ff9f403), TOBN(0x8dc98d8b, 0x62239029), TOBN(0x2206021e, 0x1f17b70d), TOBN(0xafbec0ca, 0xbf510015)}, {TOBN(0x9fed7164, 0x80130dfa), TOBN(0x306dc2b5, 0x8a02dcf5), TOBN(0x48f06620, 0xfeb10fc0), TOBN(0x78d1e1d5, 0x5a57cf51)}}, {{TOBN(0xadef8c5a, 0x192ef710), TOBN(0x88afbd4b, 0x3b7431f9), TOBN(0x7e1f7407, 0x64250c9e), TOBN(0x6e31318d, 0xb58bec07)}, {TOBN(0xfd4fc4b8, 0x24f89b4e), TOBN(0x65a5dd88, 0x48c36a2a), TOBN(0x4f1eccff, 0xf024baa7), TOBN(0x22a21cf2, 0xcba94650)}}, {{TOBN(0x95d29dee, 0x42a554f7), TOBN(0x828983a5, 0x002ec4ba), TOBN(0x8112a1f7, 0x8badb73d), TOBN(0x79ea8897, 0xa27c1839)}, {TOBN(0x8969a5a7, 0xd065fd83), TOBN(0xf49af791, 0xb262a0bc), TOBN(0xfcdea8b6, 0xaf2b5127), TOBN(0x10e913e1, 0x564c2dbc)}}, {{TOBN(0x51239d14, 0xbc21ef51), TOBN(0xe51c3ceb, 0x4ce57292), TOBN(0x795ff068, 0x47bbcc3b), TOBN(0x86b46e1e, 0xbd7e11e6)}, {TOBN(0x0ea6ba23, 0x80041ef4), TOBN(0xd72fe505, 0x6262342e), TOBN(0x8abc6dfd, 0x31d294d4), TOBN(0xbbe017a2, 0x1278c2c9)}}, {{TOBN(0xb1fcfa09, 0xb389328a), TOBN(0x322fbc62, 0xd01771b5), TOBN(0x04c0d063, 0x60b045bf), TOBN(0xdb652edc, 0x10e52d01)}, {TOBN(0x50ef932c, 0x03ec6627), TOBN(0xde1b3b2d, 0xc1ee50e3), TOBN(0x5ab7bdc5, 0xdc37a90d), TOBN(0xfea67213, 0x31e33a96)}}, {{TOBN(0x6482b5cb, 0x4f2999aa), TOBN(0x38476cc6, 0xb8cbf0dd), TOBN(0x93ebfacb, 0x173405bb), TOBN(0x15cdafe7, 0xe52369ec)}, {TOBN(0xd42d5ba4, 0xd935b7db), TOBN(0x648b6004, 0x1c99a4cd), TOBN(0x785101bd, 0xa3b5545b), TOBN(0x4bf2c38a, 0x9dd67faf)}}, {{TOBN(0xb1aadc63, 0x4442449c), TOBN(0xe0e9921a, 0x33ad4fb8), TOBN(0x5c552313, 0xaa686d82), TOBN(0xdee635fa, 0x465d866c)}, {TOBN(0xbc3c224a, 0x18ee6e8a), TOBN(0xeed748a6, 0xed42e02f), TOBN(0xe70f930a, 0xd474cd08), TOBN(0x774ea6ec, 0xfff24adf)}}, {{TOBN(0x03e2de1c, 0xf3480d4a), TOBN(0xf0d8edc7, 0xbc8acf1a), TOBN(0xf23e3303, 0x68295a9c), TOBN(0xfadd5f68, 0xc546a97d)}, {TOBN(0x895597ad, 0x96f8acb1), TOBN(0xbddd49d5, 0x671bdae2), TOBN(0x16fcd528, 0x21dd43f4), TOBN(0xa5a45412, 0x6619141a)}}}, {{{TOBN(0x8ce9b6bf, 0xc360e25a), TOBN(0xe6425195, 0x075a1a78), TOBN(0x9dc756a8, 0x481732f4), TOBN(0x83c0440f, 0x5432b57a)}, {TOBN(0xc670b3f1, 0xd720281f), TOBN(0x2205910e, 0xd135e051), TOBN(0xded14b0e, 0xdb052be7), TOBN(0x697b3d27, 0xc568ea39)}}, {{TOBN(0x2e599b9a, 0xfb3ff9ed), TOBN(0x28c2e0ab, 0x17f6515c), TOBN(0x1cbee4fd, 0x474da449), TOBN(0x071279a4, 0x4f364452)}, {TOBN(0x97abff66, 0x01fbe855), TOBN(0x3ee394e8, 0x5fda51c4), TOBN(0x190385f6, 0x67597c0b), TOBN(0x6e9fccc6, 0xa27ee34b)}}, {{TOBN(0x0b89de93, 0x14092ebb), TOBN(0xf17256bd, 0x428e240c), TOBN(0xcf89a7f3, 0x93d2f064), TOBN(0x4f57841e, 0xe1ed3b14)}, {TOBN(0x4ee14405, 0xe708d855), TOBN(0x856aae72, 0x03f1c3d0), TOBN(0xc8e5424f, 0xbdd7eed5), TOBN(0x3333e4ef, 0x73ab4270)}}, {{TOBN(0x3bc77ade, 0xdda492f8), TOBN(0xc11a3aea, 0x78297205), TOBN(0x5e89a3e7, 0x34931b4c), TOBN(0x17512e2e, 0x9f5694bb)}, {TOBN(0x5dc349f3, 0x177bf8b6), TOBN(0x232ea4ba, 0x08c7ff3e), TOBN(0x9c4f9d16, 0xf511145d), TOBN(0xccf109a3, 0x33b379c3)}}, {{TOBN(0xe75e7a88, 0xa1f25897), TOBN(0x7ac6961f, 0xa1b5d4d8), TOBN(0xe3e10773, 0x08f3ed5c), TOBN(0x208a54ec, 0x0a892dfb)}, {TOBN(0xbe826e19, 0x78660710), TOBN(0x0cf70a97, 0x237df2c8), TOBN(0x418a7340, 0xed704da5), TOBN(0xa3eeb9a9, 0x08ca33fd)}}, {{TOBN(0x49d96233, 0x169bca96), TOBN(0x04d286d4, 0x2da6aafb), TOBN(0xc09606ec, 0xa0c2fa94), TOBN(0x8869d0d5, 0x23ff0fb3)}, {TOBN(0xa99937e5, 0xd0150d65), TOBN(0xa92e2503, 0x240c14c9), TOBN(0x656bf945, 0x108e2d49), TOBN(0x152a733a, 0xa2f59e2b)}}, {{TOBN(0xb4323d58, 0x8434a920), TOBN(0xc0af8e93, 0x622103c5), TOBN(0x667518ef, 0x938dbf9a), TOBN(0xa1843073, 0x83a9cdf2)}, {TOBN(0x350a94aa, 0x5447ab80), TOBN(0xe5e5a325, 0xc75a3d61), TOBN(0x74ba507f, 0x68411a9e), TOBN(0x10581fc1, 0x594f70c5)}}, {{TOBN(0x60e28570, 0x80eb24a9), TOBN(0x7bedfb4d, 0x488e0cfd), TOBN(0x721ebbd7, 0xc259cdb8), TOBN(0x0b0da855, 0xbc6390a9)}, {TOBN(0x2b4d04db, 0xde314c70), TOBN(0xcdbf1fbc, 0x6c32e846), TOBN(0x33833eab, 0xb162fc9e), TOBN(0x9939b48b, 0xb0dd3ab7)}}, {{TOBN(0x5aaa98a7, 0xcb0c9c8c), TOBN(0x75105f30, 0x81c4375c), TOBN(0xceee5057, 0x5ef1c90f), TOBN(0xb31e065f, 0xc23a17bf)}, {TOBN(0x5364d275, 0xd4b6d45a), TOBN(0xd363f3ad, 0x62ec8996), TOBN(0xb5d21239, 0x4391c65b), TOBN(0x84564765, 0xebb41b47)}}, {{TOBN(0x20d18ecc, 0x37107c78), TOBN(0xacff3b6b, 0x570c2a66), TOBN(0x22f975d9, 0x9bd0d845), TOBN(0xef0a0c46, 0xba178fa0)}, {TOBN(0x1a419651, 0x76b6028e), TOBN(0xc49ec674, 0x248612d4), TOBN(0x5b6ac4f2, 0x7338af55), TOBN(0x06145e62, 0x7bee5a36)}}, {{TOBN(0x33e95d07, 0xe75746b5), TOBN(0x1c1e1f6d, 0xc40c78be), TOBN(0x967833ef, 0x222ff8e2), TOBN(0x4bedcf6a, 0xb49180ad)}, {TOBN(0x6b37e9c1, 0x3d7a4c8a), TOBN(0x2748887c, 0x6ddfe760), TOBN(0xf7055123, 0xaa3a5bbc), TOBN(0x954ff225, 0x7bbb8e74)}}, {{TOBN(0xc42b8ab1, 0x97c3dfb9), TOBN(0x55a549b0, 0xcf168154), TOBN(0xad6748e7, 0xc1b50692), TOBN(0x2775780f, 0x6fc5cbcb)}, {TOBN(0x4eab80b8, 0xe1c9d7c8), TOBN(0x8c69dae1, 0x3fdbcd56), TOBN(0x47e6b4fb, 0x9969eace), TOBN(0x002f1085, 0xa705cb5a)}}, {{TOBN(0x4e23ca44, 0x6d3fea55), TOBN(0xb4ae9c86, 0xf4810568), TOBN(0x47bfb91b, 0x2a62f27d), TOBN(0x60deb4c9, 0xd9bac28c)}, {TOBN(0xa892d894, 0x7de6c34c), TOBN(0x4ee68259, 0x4494587d), TOBN(0x914ee14e, 0x1a3f8a5b), TOBN(0xbb113eaa, 0x28700385)}}, {{TOBN(0x81ca03b9, 0x2115b4c9), TOBN(0x7c163d38, 0x8908cad1), TOBN(0xc912a118, 0xaa18179a), TOBN(0xe09ed750, 0x886e3081)}, {TOBN(0xa676e3fa, 0x26f516ca), TOBN(0x753cacf7, 0x8e732f91), TOBN(0x51592aea, 0x833da8b4), TOBN(0xc626f42f, 0x4cbea8aa)}}, {{TOBN(0xef9dc899, 0xa7b56eaf), TOBN(0x00c0e52c, 0x34ef7316), TOBN(0x5b1e4e24, 0xfe818a86), TOBN(0x9d31e20d, 0xc538be47)}, {TOBN(0x22eb932d, 0x3ed68974), TOBN(0xe44bbc08, 0x7c4e87c4), TOBN(0x4121086e, 0x0dde9aef), TOBN(0x8e6b9cff, 0x134f4345)}}, {{TOBN(0x96892c1f, 0x711b0eb9), TOBN(0xb905f2c8, 0x780ab954), TOBN(0xace26309, 0xa20792db), TOBN(0xec8ac9b3, 0x0684e126)}, {TOBN(0x486ad8b6, 0xb40a2447), TOBN(0x60121fc1, 0x9fe3fb24), TOBN(0x5626fccf, 0x1a8e3b3f), TOBN(0x4e568622, 0x6ad1f394)}}, {{TOBN(0xda7aae0d, 0x196aa5a1), TOBN(0xe0df8c77, 0x1041b5fb), TOBN(0x451465d9, 0x26b318b7), TOBN(0xc29b6e55, 0x7ab136e9)}, {TOBN(0x2c2ab48b, 0x71148463), TOBN(0xb5738de3, 0x64454a76), TOBN(0x54ccf9a0, 0x5a03abe4), TOBN(0x377c0296, 0x0427d58e)}}, {{TOBN(0x73f5f0b9, 0x2bb39c1f), TOBN(0x14373f2c, 0xe608d8c5), TOBN(0xdcbfd314, 0x00fbb805), TOBN(0xdf18fb20, 0x83afdcfb)}, {TOBN(0x81a57f42, 0x42b3523f), TOBN(0xe958532d, 0x87f650fb), TOBN(0xaa8dc8b6, 0x8b0a7d7c), TOBN(0x1b75dfb7, 0x150166be)}}, {{TOBN(0x90e4f7c9, 0x2d7d1413), TOBN(0x67e2d6b5, 0x9834f597), TOBN(0x4fd4f4f9, 0xa808c3e8), TOBN(0xaf8237e0, 0xd5281ec1)}, {TOBN(0x25ab5fdc, 0x84687cee), TOBN(0xc5ded6b1, 0xa5b26c09), TOBN(0x8e4a5aec, 0xc8ea7650), TOBN(0x23b73e5c, 0x14cc417f)}}, {{TOBN(0x2bfb4318, 0x3037bf52), TOBN(0xb61e6db5, 0x78c725d7), TOBN(0x8efd4060, 0xbbb3e5d7), TOBN(0x2e014701, 0xdbac488e)}, {TOBN(0xac75cf9a, 0x360aa449), TOBN(0xb70cfd05, 0x79634d08), TOBN(0xa591536d, 0xfffb15ef), TOBN(0xb2c37582, 0xd07c106c)}}, {{TOBN(0xb4293fdc, 0xf50225f9), TOBN(0xc52e175c, 0xb0e12b03), TOBN(0xf649c3ba, 0xd0a8bf64), TOBN(0x745a8fef, 0xeb8ae3c6)}, {TOBN(0x30d7e5a3, 0x58321bc3), TOBN(0xb1732be7, 0x0bc4df48), TOBN(0x1f217993, 0xe9ea5058), TOBN(0xf7a71cde, 0x3e4fd745)}}, {{TOBN(0x86cc533e, 0x894c5bbb), TOBN(0x6915c7d9, 0x69d83082), TOBN(0xa6aa2d05, 0x5815c244), TOBN(0xaeeee592, 0x49b22ce5)}, {TOBN(0x89e39d13, 0x78135486), TOBN(0x3a275c1f, 0x16b76f2f), TOBN(0xdb6bcc1b, 0xe036e8f5), TOBN(0x4df69b21, 0x5e4709f5)}}, {{TOBN(0xa188b250, 0x2d0f39aa), TOBN(0x622118bb, 0x15a85947), TOBN(0x2ebf520f, 0xfde0f4fa), TOBN(0xa40e9f29, 0x4860e539)}, {TOBN(0x7b6a51eb, 0x22b57f0f), TOBN(0x849a33b9, 0x7e80644a), TOBN(0x50e5d16f, 0x1cf095fe), TOBN(0xd754b54e, 0xec55f002)}}, {{TOBN(0x5cfbbb22, 0x236f4a98), TOBN(0x0b0c59e9, 0x066800bb), TOBN(0x4ac69a8f, 0x5a9a7774), TOBN(0x2b33f804, 0xd6bec948)}, {TOBN(0xb3729295, 0x32e6c466), TOBN(0x68956d0f, 0x4e599c73), TOBN(0xa47a249f, 0x155c31cc), TOBN(0x24d80f0d, 0xe1ce284e)}}, {{TOBN(0xcd821dfb, 0x988baf01), TOBN(0xe6331a7d, 0xdbb16647), TOBN(0x1eb8ad33, 0x094cb960), TOBN(0x593cca38, 0xc91bbca5)}, {TOBN(0x384aac8d, 0x26567456), TOBN(0x40fa0309, 0xc04b6490), TOBN(0x97834cd6, 0xdab6c8f6), TOBN(0x68a7318d, 0x3f91e55f)}}, {{TOBN(0xa00fd04e, 0xfc4d3157), TOBN(0xb56f8ab2, 0x2bf3bdea), TOBN(0x014f5648, 0x4fa57172), TOBN(0x948c5860, 0x450abdb3)}, {TOBN(0x342b5df0, 0x0ebd4f08), TOBN(0x3e5168cd, 0x0e82938e), TOBN(0x7aedc1ce, 0xb0df5dd0), TOBN(0x6bbbc6d9, 0xe5732516)}}, {{TOBN(0xc7bfd486, 0x605daaa6), TOBN(0x46fd72b7, 0xbb9a6c9e), TOBN(0xe4847fb1, 0xa124fb89), TOBN(0x75959cbd, 0xa2d8ffbc)}, {TOBN(0x42579f65, 0xc8a588ee), TOBN(0x368c92e6, 0xb80b499d), TOBN(0xea4ef6cd, 0x999a5df1), TOBN(0xaa73bb7f, 0x936fe604)}}, {{TOBN(0xf347a70d, 0x6457d188), TOBN(0x86eda86b, 0x8b7a388b), TOBN(0xb7cdff06, 0x0ccd6013), TOBN(0xbeb1b6c7, 0xd0053fb2)}, {TOBN(0x0b022387, 0x99240a9f), TOBN(0x1bbb384f, 0x776189b2), TOBN(0x8695e71e, 0x9066193a), TOBN(0x2eb50097, 0x06ffac7e)}}, {{TOBN(0x0654a9c0, 0x4a7d2caa), TOBN(0x6f3fb3d1, 0xa5aaa290), TOBN(0x835db041, 0xff476e8f), TOBN(0x540b8b0b, 0xc42295e4)}, {TOBN(0xa5c73ac9, 0x05e214f5), TOBN(0x9a74075a, 0x56a0b638), TOBN(0x2e4b1090, 0xce9e680b), TOBN(0x57a5b479, 0x6b8d9afa)}}, {{TOBN(0x0dca48e7, 0x26bfe65c), TOBN(0x097e391c, 0x7290c307), TOBN(0x683c462e, 0x6669e72e), TOBN(0xf505be1e, 0x062559ac)}, {TOBN(0x5fbe3ea1, 0xe3a3035a), TOBN(0x6431ebf6, 0x9cd50da8), TOBN(0xfd169d5c, 0x1f6407f2), TOBN(0x8d838a95, 0x60fce6b8)}}, {{TOBN(0x2a2bfa7f, 0x650006f0), TOBN(0xdfd7dad3, 0x50c0fbb2), TOBN(0x92452495, 0xccf9ad96), TOBN(0x183bf494, 0xd95635f9)}, {TOBN(0x02d5df43, 0x4a7bd989), TOBN(0x505385cc, 0xa5431095), TOBN(0xdd98e67d, 0xfd43f53e), TOBN(0xd61e1a6c, 0x500c34a9)}}, {{TOBN(0x5a4b46c6, 0x4a8a3d62), TOBN(0x8469c4d0, 0x247743d2), TOBN(0x2bb3a13d, 0x88f7e433), TOBN(0x62b23a10, 0x01be5849)}, {TOBN(0xe83596b4, 0xa63d1a4c), TOBN(0x454e7fea, 0x7d183f3e), TOBN(0x643fce61, 0x17afb01c), TOBN(0x4e65e5e6, 0x1c4c3638)}}, {{TOBN(0x41d85ea1, 0xef74c45b), TOBN(0x2cfbfa66, 0xae328506), TOBN(0x98b078f5, 0x3ada7da9), TOBN(0xd985fe37, 0xec752fbb)}, {TOBN(0xeece68fe, 0x5a0148b4), TOBN(0x6f9a55c7, 0x2d78136d), TOBN(0x232dccc4, 0xd2b729ce), TOBN(0xa27e0dfd, 0x90aafbc4)}}, {{TOBN(0x96474452, 0x12b4603e), TOBN(0xa876c551, 0x6b706d14), TOBN(0xdf145fcf, 0x69a9d412), TOBN(0xe2ab75b7, 0x2d479c34)}, {TOBN(0x12df9a76, 0x1a23ff97), TOBN(0xc6138992, 0x5d359d10), TOBN(0x6e51c7ae, 0xfa835f22), TOBN(0x69a79cb1, 0xc0fcc4d9)}}, {{TOBN(0xf57f350d, 0x594cc7e1), TOBN(0x3079ca63, 0x3350ab79), TOBN(0x226fb614, 0x9aff594a), TOBN(0x35afec02, 0x6d59a62b)}, {TOBN(0x9bee46f4, 0x06ed2c6e), TOBN(0x58da1735, 0x7d939a57), TOBN(0x44c50402, 0x8fd1797e), TOBN(0xd8853e7c, 0x5ccea6ca)}}, {{TOBN(0x4065508d, 0xa35fcd5f), TOBN(0x8965df8c, 0x495ccaeb), TOBN(0x0f2da850, 0x12e1a962), TOBN(0xee471b94, 0xc1cf1cc4)}, {TOBN(0xcef19bc8, 0x0a08fb75), TOBN(0x704958f5, 0x81de3591), TOBN(0x2867f8b2, 0x3aef4f88), TOBN(0x8d749384, 0xea9f9a5f)}}, {{TOBN(0x1b385537, 0x8c9049f4), TOBN(0x5be948f3, 0x7b92d8b6), TOBN(0xd96f725d, 0xb6e2bd6b), TOBN(0x37a222bc, 0x958c454d)}, {TOBN(0xe7c61abb, 0x8809bf61), TOBN(0x46f07fbc, 0x1346f18d), TOBN(0xfb567a7a, 0xe87c0d1c), TOBN(0x84a461c8, 0x7ef3d07a)}}, {{TOBN(0x0a5adce6, 0xd9278d98), TOBN(0x24d94813, 0x9dfc73e1), TOBN(0x4f3528b6, 0x054321c3), TOBN(0x2e03fdde, 0x692ea706)}, {TOBN(0x10e60619, 0x47b533c0), TOBN(0x1a8bc73f, 0x2ca3c055), TOBN(0xae58d4b2, 0x1bb62b8f), TOBN(0xb2045a73, 0x584a24e3)}}, {{TOBN(0x3ab3d5af, 0xbd76e195), TOBN(0x478dd1ad, 0x6938a810), TOBN(0x6ffab393, 0x6ee3d5cb), TOBN(0xdfb693db, 0x22b361e4)}, {TOBN(0xf9694496, 0x51dbf1a7), TOBN(0xcab4b4ef, 0x08a2e762), TOBN(0xe8c92f25, 0xd39bba9a), TOBN(0x850e61bc, 0xf1464d96)}}, {{TOBN(0xb7e830e3, 0xdc09508b), TOBN(0xfaf6d2cf, 0x74317655), TOBN(0x72606ceb, 0xdf690355), TOBN(0x48bb92b3, 0xd0c3ded6)}, {TOBN(0x65b75484, 0x5c7cf892), TOBN(0xf6cd7ac9, 0xd5d5f01f), TOBN(0xc2c30a59, 0x96401d69), TOBN(0x91268650, 0xed921878)}}, {{TOBN(0x380bf913, 0xb78c558f), TOBN(0x43c0baeb, 0xc8afdaa9), TOBN(0x377f61d5, 0x54f169d3), TOBN(0xf8da07e3, 0xae5ff20b)}, {TOBN(0xb676c49d, 0xa8a90ea8), TOBN(0x81c1ff2b, 0x83a29b21), TOBN(0x383297ac, 0x2ad8d276), TOBN(0x3001122f, 0xba89f982)}}, {{TOBN(0xe1d794be, 0x6718e448), TOBN(0x246c1482, 0x7c3e6e13), TOBN(0x56646ef8, 0x5d26b5ef), TOBN(0x80f5091e, 0x88069cdd)}, {TOBN(0xc5992e2f, 0x724bdd38), TOBN(0x02e915b4, 0x8471e8c7), TOBN(0x96ff320a, 0x0d0ff2a9), TOBN(0xbf886487, 0x4384d1a0)}}, {{TOBN(0xbbe1e6a6, 0xc93f72d6), TOBN(0xd5f75d12, 0xcad800ea), TOBN(0xfa40a09f, 0xe7acf117), TOBN(0x32c8cdd5, 0x7581a355)}, {TOBN(0x74221992, 0x7023c499), TOBN(0xa8afe5d7, 0x38ec3901), TOBN(0x5691afcb, 0xa90e83f0), TOBN(0x41bcaa03, 0x0b8f8eac)}}, {{TOBN(0xe38b5ff9, 0x8d2668d5), TOBN(0x0715281a, 0x7ad81965), TOBN(0x1bc8fc7c, 0x03c6ce11), TOBN(0xcbbee6e2, 0x8b650436)}, {TOBN(0x06b00fe8, 0x0cdb9808), TOBN(0x17d6e066, 0xfe3ed315), TOBN(0x2e9d38c6, 0x4d0b5018), TOBN(0xab8bfd56, 0x844dcaef)}}, {{TOBN(0x42894a59, 0x513aed8b), TOBN(0xf77f3b6d, 0x314bd07a), TOBN(0xbbdecb8f, 0x8e42b582), TOBN(0xf10e2fa8, 0xd2390fe6)}, {TOBN(0xefb95022, 0x62a2f201), TOBN(0x4d59ea50, 0x50ee32b0), TOBN(0xd87f7728, 0x6da789a8), TOBN(0xcf98a2cf, 0xf79492c4)}}, {{TOBN(0xf9577239, 0x720943c2), TOBN(0xba044cf5, 0x3990b9d0), TOBN(0x5aa8e823, 0x95f2884a), TOBN(0x834de6ed, 0x0278a0af)}, {TOBN(0xc8e1ee9a, 0x5f25bd12), TOBN(0x9259ceaa, 0x6f7ab271), TOBN(0x7e6d97a2, 0x77d00b76), TOBN(0x5c0c6eea, 0xa437832a)}}, {{TOBN(0x5232c20f, 0x5606b81d), TOBN(0xabd7b375, 0x0d991ee5), TOBN(0x4d2bfe35, 0x8632d951), TOBN(0x78f85146, 0x98ed9364)}, {TOBN(0x951873f0, 0xf30c3282), TOBN(0x0da8ac80, 0xa789230b), TOBN(0x3ac7789c, 0x5398967f), TOBN(0xa69b8f7f, 0xbdda0fb5)}}, {{TOBN(0xe5db7717, 0x6add8545), TOBN(0x1b71cb66, 0x72c49b66), TOBN(0xd8560739, 0x68421d77), TOBN(0x03840fe8, 0x83e3afea)}, {TOBN(0xb391dad5, 0x1ec69977), TOBN(0xae243fb9, 0x307f6726), TOBN(0xc88ac87b, 0xe8ca160c), TOBN(0x5174cced, 0x4ce355f4)}}, {{TOBN(0x98a35966, 0xe58ba37d), TOBN(0xfdcc8da2, 0x7817335d), TOBN(0x5b752830, 0x83fbc7bf), TOBN(0x68e419d4, 0xd9c96984)}, {TOBN(0x409a39f4, 0x02a40380), TOBN(0x88940faf, 0x1fe977bc), TOBN(0xc640a94b, 0x8f8edea6), TOBN(0x1e22cd17, 0xed11547d)}}, {{TOBN(0xe28568ce, 0x59ffc3e2), TOBN(0x60aa1b55, 0xc1dee4e7), TOBN(0xc67497c8, 0x837cb363), TOBN(0x06fb438a, 0x105a2bf2)}, {TOBN(0x30357ec4, 0x500d8e20), TOBN(0x1ad9095d, 0x0670db10), TOBN(0x7f589a05, 0xc73b7cfd), TOBN(0xf544607d, 0x880d6d28)}}, {{TOBN(0x17ba93b1, 0xa20ef103), TOBN(0xad859130, 0x6ba6577b), TOBN(0x65c91cf6, 0x6fa214a0), TOBN(0xd7d49c6c, 0x27990da5)}, {TOBN(0xecd9ec8d, 0x20bb569d), TOBN(0xbd4b2502, 0xeeffbc33), TOBN(0x2056ca5a, 0x6bed0467), TOBN(0x7916a1f7, 0x5b63728c)}}, {{TOBN(0xd4f9497d, 0x53a4f566), TOBN(0x89734664, 0x97b56810), TOBN(0xf8e1da74, 0x0494a621), TOBN(0x82546a93, 0x8d011c68)}, {TOBN(0x1f3acb19, 0xc61ac162), TOBN(0x52f8fa9c, 0xabad0d3e), TOBN(0x15356523, 0xb4b7ea43), TOBN(0x5a16ad61, 0xae608125)}}, {{TOBN(0xb0bcb87f, 0x4faed184), TOBN(0x5f236b1d, 0x5029f45f), TOBN(0xd42c7607, 0x0bc6b1fc), TOBN(0xc644324e, 0x68aefce3)}, {TOBN(0x8e191d59, 0x5c5d8446), TOBN(0xc0208077, 0x13ae1979), TOBN(0xadcaee55, 0x3ba59cc7), TOBN(0x20ed6d6b, 0xa2cb81ba)}}, {{TOBN(0x0952ba19, 0xb6efcffc), TOBN(0x60f12d68, 0x97c0b87c), TOBN(0x4ee2c7c4, 0x9caa30bc), TOBN(0x767238b7, 0x97fbff4e)}, {TOBN(0xebc73921, 0x501b5d92), TOBN(0x3279e3df, 0xc2a37737), TOBN(0x9fc12bc8, 0x6d197543), TOBN(0xfa94dc6f, 0x0a40db4e)}}, {{TOBN(0x7392b41a, 0x530ccbbd), TOBN(0x87c82146, 0xea823525), TOBN(0xa52f984c, 0x05d98d0c), TOBN(0x2ae57d73, 0x5ef6974c)}, {TOBN(0x9377f7bf, 0x3042a6dd), TOBN(0xb1a007c0, 0x19647a64), TOBN(0xfaa9079a, 0x0cca9767), TOBN(0x3d81a25b, 0xf68f72d5)}}, {{TOBN(0x752067f8, 0xff81578e), TOBN(0x78622150, 0x9045447d), TOBN(0xc0c22fcf, 0x0505aa6f), TOBN(0x1030f0a6, 0x6bed1c77)}, {TOBN(0x31f29f15, 0x1f0bd739), TOBN(0x2d7989c7, 0xe6debe85), TOBN(0x5c070e72, 0x8e677e98), TOBN(0x0a817bd3, 0x06e81fd5)}}, {{TOBN(0xc110d830, 0xb0f2ac95), TOBN(0x48d0995a, 0xab20e64e), TOBN(0x0f3e00e1, 0x7729cd9a), TOBN(0x2a570c20, 0xdd556946)}, {TOBN(0x912dbcfd, 0x4e86214d), TOBN(0x2d014ee2, 0xcf615498), TOBN(0x55e2b1e6, 0x3530d76e), TOBN(0xc5135ae4, 0xfd0fd6d1)}}, {{TOBN(0x0066273a, 0xd4f3049f), TOBN(0xbb8e9893, 0xe7087477), TOBN(0x2dba1ddb, 0x14c6e5fd), TOBN(0xdba37886, 0x51f57e6c)}, {TOBN(0x5aaee0a6, 0x5a72f2cf), TOBN(0x1208bfbf, 0x7bea5642), TOBN(0xf5c6aa3b, 0x67872c37), TOBN(0xd726e083, 0x43f93224)}}, {{TOBN(0x1854daa5, 0x061f1658), TOBN(0xc0016df1, 0xdf0cd2b3), TOBN(0xc2a3f23e, 0x833d50de), TOBN(0x73b681d2, 0xbbbd3017)}, {TOBN(0x2f046dc4, 0x3ac343c0), TOBN(0x9c847e7d, 0x85716421), TOBN(0xe1e13c91, 0x0917eed4), TOBN(0x3fc9eebd, 0x63a1b9c6)}}, {{TOBN(0x0f816a72, 0x7fe02299), TOBN(0x6335ccc2, 0x294f3319), TOBN(0x3820179f, 0x4745c5be), TOBN(0xe647b782, 0x922f066e)}, {TOBN(0xc22e49de, 0x02cafb8a), TOBN(0x299bc2ff, 0xfcc2eccc), TOBN(0x9a8feea2, 0x6e0e8282), TOBN(0xa627278b, 0xfe893205)}}, {{TOBN(0xa7e19733, 0x7933e47b), TOBN(0xf4ff6b13, 0x2e766402), TOBN(0xa4d8be0a, 0x98440d9f), TOBN(0x658f5c2f, 0x38938808)}, {TOBN(0x90b75677, 0xc95b3b3e), TOBN(0xfa044269, 0x3137b6ff), TOBN(0x077b039b, 0x43c47c29), TOBN(0xcca95dd3, 0x8a6445b2)}}, {{TOBN(0x0b498ba4, 0x2333fc4c), TOBN(0x274f8e68, 0xf736a1b1), TOBN(0x6ca348fd, 0x5f1d4b2e), TOBN(0x24d3be78, 0xa8f10199)}, {TOBN(0x8535f858, 0xca14f530), TOBN(0xa6e7f163, 0x5b982e51), TOBN(0x847c8512, 0x36e1bf62), TOBN(0xf6a7c58e, 0x03448418)}}, {{TOBN(0x583f3703, 0xf9374ab6), TOBN(0x864f9195, 0x6e564145), TOBN(0x33bc3f48, 0x22526d50), TOBN(0x9f323c80, 0x1262a496)}, {TOBN(0xaa97a7ae, 0x3f046a9a), TOBN(0x70da183e, 0xdf8a039a), TOBN(0x5b68f71c, 0x52aa0ba6), TOBN(0x9be0fe51, 0x21459c2d)}}, {{TOBN(0xc1e17eb6, 0xcbc613e5), TOBN(0x33131d55, 0x497ea61c), TOBN(0x2f69d39e, 0xaf7eded5), TOBN(0x73c2f434, 0xde6af11b)}, {TOBN(0x4ca52493, 0xa4a375fa), TOBN(0x5f06787c, 0xb833c5c2), TOBN(0x814e091f, 0x3e6e71cf), TOBN(0x76451f57, 0x8b746666)}}}, {{{TOBN(0x80f9bdef, 0x694db7e0), TOBN(0xedca8787, 0xb9fcddc6), TOBN(0x51981c34, 0x03b8dce1), TOBN(0x4274dcf1, 0x70e10ba1)}, {TOBN(0xf72743b8, 0x6def6d1a), TOBN(0xd25b1670, 0xebdb1866), TOBN(0xc4491e8c, 0x050c6f58), TOBN(0x2be2b2ab, 0x87fbd7f5)}}, {{TOBN(0x3e0e5c9d, 0xd111f8ec), TOBN(0xbcc33f8d, 0xb7c4e760), TOBN(0x702f9a91, 0xbd392a51), TOBN(0x7da4a795, 0xc132e92d)}, {TOBN(0x1a0b0ae3, 0x0bb1151b), TOBN(0x54febac8, 0x02e32251), TOBN(0xea3a5082, 0x694e9e78), TOBN(0xe58ffec1, 0xe4fe40b8)}}, {{TOBN(0xf85592fc, 0xd1e0cf9e), TOBN(0xdea75f0d, 0xc0e7b2e8), TOBN(0xc04215cf, 0xc135584e), TOBN(0x174fc727, 0x2f57092a)}, {TOBN(0xe7277877, 0xeb930bea), TOBN(0x504caccb, 0x5eb02a5a), TOBN(0xf9fe08f7, 0xf5241b9b), TOBN(0xe7fb62f4, 0x8d5ca954)}}, {{TOBN(0xfbb8349d, 0x29c4120b), TOBN(0x9f94391f, 0xc0d0d915), TOBN(0xc4074fa7, 0x5410ba51), TOBN(0xa66adbf6, 0x150a5911)}, {TOBN(0xc164543c, 0x34bfca38), TOBN(0xe0f27560, 0xb9e1ccfc), TOBN(0x99da0f53, 0xe820219c), TOBN(0xe8234498, 0xc6b4997a)}}, {{TOBN(0xcfb88b76, 0x9d4c5423), TOBN(0x9e56eb10, 0xb0521c49), TOBN(0x418e0b5e, 0xbe8700a1), TOBN(0x00cbaad6, 0xf93cb58a)}, {TOBN(0xe923fbde, 0xd92a5e67), TOBN(0xca4979ac, 0x1f347f11), TOBN(0x89162d85, 0x6bc0585b), TOBN(0xdd6254af, 0xac3c70e3)}}, {{TOBN(0x7b23c513, 0x516e19e4), TOBN(0x56e2e847, 0xc5c4d593), TOBN(0x9f727d73, 0x5ce71ef6), TOBN(0x5b6304a6, 0xf79a44c5)}, {TOBN(0x6638a736, 0x3ab7e433), TOBN(0x1adea470, 0xfe742f83), TOBN(0xe054b854, 0x5b7fc19f), TOBN(0xf935381a, 0xba1d0698)}}, {{TOBN(0x546eab2d, 0x799e9a74), TOBN(0x96239e0e, 0xa949f729), TOBN(0xca274c6b, 0x7090055a), TOBN(0x835142c3, 0x9020c9b0)}, {TOBN(0xa405667a, 0xa2e8807f), TOBN(0x29f2c085, 0x1aa3d39e), TOBN(0xcc555d64, 0x42fc72f5), TOBN(0xe856e0e7, 0xfbeacb3c)}}, {{TOBN(0xb5504f9d, 0x918e4936), TOBN(0x65035ef6, 0xb2513982), TOBN(0x0553a0c2, 0x6f4d9cb9), TOBN(0x6cb10d56, 0xbea85509)}, {TOBN(0x48d957b7, 0xa242da11), TOBN(0x16a4d3dd, 0x672b7268), TOBN(0x3d7e637c, 0x8502a96b), TOBN(0x27c7032b, 0x730d463b)}}, {{TOBN(0xbdc02b18, 0xe4136a14), TOBN(0xbacf969d, 0x678e32bf), TOBN(0xc98d89a3, 0xdd9c3c03), TOBN(0x7b92420a, 0x23becc4f)}, {TOBN(0xd4b41f78, 0xc64d565c), TOBN(0x9f969d00, 0x10f28295), TOBN(0xec7f7f76, 0xb13d051a), TOBN(0x08945e1e, 0xa92da585)}}, {{TOBN(0x55366b7d, 0x5846426f), TOBN(0xe7d09e89, 0x247d441d), TOBN(0x510b404d, 0x736fbf48), TOBN(0x7fa003d0, 0xe784bd7d)}, {TOBN(0x25f7614f, 0x17fd9596), TOBN(0x49e0e0a1, 0x35cb98db), TOBN(0x2c65957b, 0x2e83a76a), TOBN(0x5d40da8d, 0xcddbe0f8)}}, {{TOBN(0xf2b8c405, 0x050bad24), TOBN(0x8918426d, 0xc2aa4823), TOBN(0x2aeab3dd, 0xa38365a7), TOBN(0x72031717, 0x7c91b690)}, {TOBN(0x8b00d699, 0x60a94120), TOBN(0x478a255d, 0xe99eaeec), TOBN(0xbf656a5f, 0x6f60aafd), TOBN(0xdfd7cb75, 0x5dee77b3)}}, {{TOBN(0x37f68bb4, 0xa595939d), TOBN(0x03556479, 0x28740217), TOBN(0x8e740e7c, 0x84ad7612), TOBN(0xd89bc843, 0x9044695f)}, {TOBN(0xf7f3da5d, 0x85a9184d), TOBN(0x562563bb, 0x9fc0b074), TOBN(0x06d2e6aa, 0xf88a888e), TOBN(0x612d8643, 0x161fbe7c)}}, {{TOBN(0x465edba7, 0xf64085e7), TOBN(0xb230f304, 0x29aa8511), TOBN(0x53388426, 0xcda2d188), TOBN(0x90885735, 0x4b666649)}, {TOBN(0x6f02ff9a, 0x652f54f6), TOBN(0x65c82294, 0x5fae2bf0), TOBN(0x7816ade0, 0x62f5eee3), TOBN(0xdcdbdf43, 0xfcc56d70)}}, {{TOBN(0x9fb3bba3, 0x54530bb2), TOBN(0xbde3ef77, 0xcb0869ea), TOBN(0x89bc9046, 0x0b431163), TOBN(0x4d03d7d2, 0xe4819a35)}, {TOBN(0x33ae4f9e, 0x43b6a782), TOBN(0x216db307, 0x9c88a686), TOBN(0x91dd88e0, 0x00ffedd9), TOBN(0xb280da9f, 0x12bd4840)}}, {{TOBN(0x32a7cb8a, 0x1635e741), TOBN(0xfe14008a, 0x78be02a7), TOBN(0x3fafb334, 0x1b7ae030), TOBN(0x7fd508e7, 0x5add0ce9)}, {TOBN(0x72c83219, 0xd607ad51), TOBN(0x0f229c0a, 0x8d40964a), TOBN(0x1be2c336, 0x1c878da2), TOBN(0xe0c96742, 0xeab2ab86)}}, {{TOBN(0x458f8691, 0x3e538cd7), TOBN(0xa7001f6c, 0x8e08ad53), TOBN(0x52b8c6e6, 0xbf5d15ff), TOBN(0x548234a4, 0x011215dd)}, {TOBN(0xff5a9d2d, 0x3d5b4045), TOBN(0xb0ffeeb6, 0x4a904190), TOBN(0x55a3aca4, 0x48607f8b), TOBN(0x8cbd665c, 0x30a0672a)}}, {{TOBN(0x87f834e0, 0x42583068), TOBN(0x02da2aeb, 0xf3f6e683), TOBN(0x6b763e5d, 0x05c12248), TOBN(0x7230378f, 0x65a8aefc)}, {TOBN(0x93bd80b5, 0x71e8e5ca), TOBN(0x53ab041c, 0xb3b62524), TOBN(0x1b860513, 0x6c9c552e), TOBN(0xe84d402c, 0xd5524e66)}}, {{TOBN(0xa37f3573, 0xf37f5937), TOBN(0xeb0f6c7d, 0xd1e4fca5), TOBN(0x2965a554, 0xac8ab0fc), TOBN(0x17fbf56c, 0x274676ac)}, {TOBN(0x2e2f6bd9, 0xacf7d720), TOBN(0x41fc8f88, 0x10224766), TOBN(0x517a14b3, 0x85d53bef), TOBN(0xdae327a5, 0x7d76a7d1)}}, {{TOBN(0x6ad0a065, 0xc4818267), TOBN(0x33aa189b, 0x37c1bbc1), TOBN(0x64970b52, 0x27392a92), TOBN(0x21699a1c, 0x2d1535ea)}, {TOBN(0xcd20779c, 0xc2d7a7fd), TOBN(0xe3186059, 0x99c83cf2), TOBN(0x9b69440b, 0x72c0b8c7), TOBN(0xa81497d7, 0x7b9e0e4d)}}, {{TOBN(0x515d5c89, 0x1f5f82dc), TOBN(0x9a7f67d7, 0x6361079e), TOBN(0xa8da81e3, 0x11a35330), TOBN(0xe44990c4, 0x4b18be1b)}, {TOBN(0xc7d5ed95, 0xaf103e59), TOBN(0xece8aba7, 0x8dac9261), TOBN(0xbe82b099, 0x9394b8d3), TOBN(0x6830f09a, 0x16adfe83)}}, {{TOBN(0x250a29b4, 0x88172d01), TOBN(0x8b20bd65, 0xcaff9e02), TOBN(0xb8a7661e, 0xe8a6329a), TOBN(0x4520304d, 0xd3fce920)}, {TOBN(0xae45da1f, 0x2b47f7ef), TOBN(0xe07f5288, 0x5bffc540), TOBN(0xf7997009, 0x3464f874), TOBN(0x2244c2cd, 0xa6fa1f38)}}, {{TOBN(0x43c41ac1, 0x94d7d9b1), TOBN(0x5bafdd82, 0xc82e7f17), TOBN(0xdf0614c1, 0x5fda0fca), TOBN(0x74b043a7, 0xa8ae37ad)}, {TOBN(0x3ba6afa1, 0x9e71734c), TOBN(0x15d5437e, 0x9c450f2e), TOBN(0x4a5883fe, 0x67e242b1), TOBN(0x5143bdc2, 0x2c1953c2)}}, {{TOBN(0x542b8b53, 0xfc5e8920), TOBN(0x363bf9a8, 0x9a9cee08), TOBN(0x02375f10, 0xc3486e08), TOBN(0x2037543b, 0x8c5e70d2)}, {TOBN(0x7109bccc, 0x625640b4), TOBN(0xcbc1051e, 0x8bc62c3b), TOBN(0xf8455fed, 0x803f26ea), TOBN(0x6badceab, 0xeb372424)}}, {{TOBN(0xa2a9ce7c, 0x6b53f5f9), TOBN(0x64246595, 0x1b176d99), TOBN(0xb1298d36, 0xb95c081b), TOBN(0x53505bb8, 0x1d9a9ee6)}, {TOBN(0x3f6f9e61, 0xf2ba70b0), TOBN(0xd07e16c9, 0x8afad453), TOBN(0x9f1694bb, 0xe7eb4a6a), TOBN(0xdfebced9, 0x3cb0bc8e)}}, {{TOBN(0x92d3dcdc, 0x53868c8b), TOBN(0x174311a2, 0x386107a6), TOBN(0x4109e07c, 0x689b4e64), TOBN(0x30e4587f, 0x2df3dcb6)}, {TOBN(0x841aea31, 0x0811b3b2), TOBN(0x6144d41d, 0x0cce43ea), TOBN(0x464c4581, 0x2a9a7803), TOBN(0xd03d371f, 0x3e158930)}}, {{TOBN(0xc676d7f2, 0xb1f3390b), TOBN(0x9f7a1b8c, 0xa5b61272), TOBN(0x4ebebfc9, 0xc2e127a9), TOBN(0x4602500c, 0x5dd997bf)}, {TOBN(0x7f09771c, 0x4711230f), TOBN(0x058eb37c, 0x020f09c1), TOBN(0xab693d4b, 0xfee5e38b), TOBN(0x9289eb1f, 0x4653cbc0)}}, {{TOBN(0xbecf46ab, 0xd51b9cf5), TOBN(0xd2aa9c02, 0x9f0121af), TOBN(0x36aaf7d2, 0xe90dc274), TOBN(0x909e4ea0, 0x48b95a3c)}, {TOBN(0xe6b70496, 0x6f32dbdb), TOBN(0x672188a0, 0x8b030b3e), TOBN(0xeeffe5b3, 0xcfb617e2), TOBN(0x87e947de, 0x7c82709e)}}, {{TOBN(0xa44d2b39, 0x1770f5a7), TOBN(0xe4d4d791, 0x0e44eb82), TOBN(0x42e69d1e, 0x3f69712a), TOBN(0xbf11c4d6, 0xac6a820e)}, {TOBN(0xb5e7f3e5, 0x42c4224c), TOBN(0xd6b4e81c, 0x449d941c), TOBN(0x5d72bd16, 0x5450e878), TOBN(0x6a61e28a, 0xee25ac54)}}, {{TOBN(0x33272094, 0xe6f1cd95), TOBN(0x7512f30d, 0x0d18673f), TOBN(0x32f7a4ca, 0x5afc1464), TOBN(0x2f095656, 0x6bbb977b)}, {TOBN(0x586f47ca, 0xa8226200), TOBN(0x02c868ad, 0x1ac07369), TOBN(0x4ef2b845, 0xc613acbe), TOBN(0x43d7563e, 0x0386054c)}}, {{TOBN(0x54da9dc7, 0xab952578), TOBN(0xb5423df2, 0x26e84d0b), TOBN(0xa8b64eeb, 0x9b872042), TOBN(0xac205782, 0x5990f6df)}, {TOBN(0x4ff696eb, 0x21f4c77a), TOBN(0x1a79c3e4, 0xaab273af), TOBN(0x29bc922e, 0x9436b3f1), TOBN(0xff807ef8, 0xd6d9a27a)}}, {{TOBN(0x82acea3d, 0x778f22a0), TOBN(0xfb10b2e8, 0x5b5e7469), TOBN(0xc0b16980, 0x2818ee7d), TOBN(0x011afff4, 0xc91c1a2f)}, {TOBN(0x95a6d126, 0xad124418), TOBN(0x31c081a5, 0xe72e295f), TOBN(0x36bb283a, 0xf2f4db75), TOBN(0xd115540f, 0x7acef462)}}, {{TOBN(0xc7f3a8f8, 0x33f6746c), TOBN(0x21e46f65, 0xfea990ca), TOBN(0x915fd5c5, 0xcaddb0a9), TOBN(0xbd41f016, 0x78614555)}, {TOBN(0x346f4434, 0x426ffb58), TOBN(0x80559436, 0x14dbc204), TOBN(0xf3dd20fe, 0x5a969b7f), TOBN(0x9d59e956, 0xe899a39a)}}, {{TOBN(0xf1b0971c, 0x8ad4cf4b), TOBN(0x03448860, 0x2ffb8fb8), TOBN(0xf071ac3c, 0x65340ba4), TOBN(0x408d0596, 0xb27fd758)}, {TOBN(0xe7c78ea4, 0x98c364b0), TOBN(0xa4aac4a5, 0x051e8ab5), TOBN(0xb9e1d560, 0x485d9002), TOBN(0x9acd518a, 0x88844455)}}, {{TOBN(0xe4ca688f, 0xd06f56c0), TOBN(0xa48af70d, 0xdf027972), TOBN(0x691f0f04, 0x5e9a609d), TOBN(0xa9dd82cd, 0xee61270e)}, {TOBN(0x8903ca63, 0xa0ef18d3), TOBN(0x9fb7ee35, 0x3d6ca3bd), TOBN(0xa7b4a09c, 0xabf47d03), TOBN(0x4cdada01, 0x1c67de8e)}}, {{TOBN(0x52003749, 0x9355a244), TOBN(0xe77fd2b6, 0x4f2151a9), TOBN(0x695d6cf6, 0x66b4efcb), TOBN(0xc5a0cacf, 0xda2cfe25)}, {TOBN(0x104efe5c, 0xef811865), TOBN(0xf52813e8, 0x9ea5cc3d), TOBN(0x855683dc, 0x40b58dbc), TOBN(0x0338ecde, 0x175fcb11)}}, {{TOBN(0xf9a05637, 0x74921592), TOBN(0xb4f1261d, 0xb9bb9d31), TOBN(0x551429b7, 0x4e9c5459), TOBN(0xbe182e6f, 0x6ea71f53)}, {TOBN(0xd3a3b07c, 0xdfc50573), TOBN(0x9ba1afda, 0x62be8d44), TOBN(0x9bcfd2cb, 0x52ab65d3), TOBN(0xdf11d547, 0xa9571802)}}, {{TOBN(0x099403ee, 0x02a2404a), TOBN(0x497406f4, 0x21088a71), TOBN(0x99479409, 0x5004ae71), TOBN(0xbdb42078, 0xa812c362)}, {TOBN(0x2b72a30f, 0xd8828442), TOBN(0x283add27, 0xfcb5ed1c), TOBN(0xf7c0e200, 0x66a40015), TOBN(0x3e3be641, 0x08b295ef)}}, {{TOBN(0xac127dc1, 0xe038a675), TOBN(0x729deff3, 0x8c5c6320), TOBN(0xb7df8fd4, 0xa90d2c53), TOBN(0x9b74b0ec, 0x681e7cd3)}, {TOBN(0x5cb5a623, 0xdab407e5), TOBN(0xcdbd3615, 0x76b340c6), TOBN(0xa184415a, 0x7d28392c), TOBN(0xc184c1d8, 0xe96f7830)}}, {{TOBN(0xc3204f19, 0x81d3a80f), TOBN(0xfde0c841, 0xc8e02432), TOBN(0x78203b3e, 0x8149e0c1), TOBN(0x5904bdbb, 0x08053a73)}, {TOBN(0x30fc1dd1, 0x101b6805), TOBN(0x43c223bc, 0x49aa6d49), TOBN(0x9ed67141, 0x7a174087), TOBN(0x311469a0, 0xd5997008)}}, {{TOBN(0xb189b684, 0x5e43fc61), TOBN(0xf3282375, 0xe0d3ab57), TOBN(0x4fa34b67, 0xb1181da8), TOBN(0x621ed0b2, 0x99ee52b8)}, {TOBN(0x9b178de1, 0xad990676), TOBN(0xd51de67b, 0x56d54065), TOBN(0x2a2c27c4, 0x7538c201), TOBN(0x33856ec8, 0x38a40f5c)}}, {{TOBN(0x2522fc15, 0xbe6cdcde), TOBN(0x1e603f33, 0x9f0c6f89), TOBN(0x7994edc3, 0x103e30a6), TOBN(0x033a00db, 0x220c853e)}, {TOBN(0xd3cfa409, 0xf7bb7fd7), TOBN(0x70f8781e, 0x462d18f6), TOBN(0xbbd82980, 0x687fe295), TOBN(0x6eef4c32, 0x595669f3)}}, {{TOBN(0x86a9303b, 0x2f7e85c3), TOBN(0x5fce4621, 0x71988f9b), TOBN(0x5b935bf6, 0xc138acb5), TOBN(0x30ea7d67, 0x25661212)}, {TOBN(0xef1eb5f4, 0xe51ab9a2), TOBN(0x0587c98a, 0xae067c78), TOBN(0xb3ce1b3c, 0x77ca9ca6), TOBN(0x2a553d4d, 0x54b5f057)}}, {{TOBN(0xc7898236, 0x4da29ec2), TOBN(0xdbdd5d13, 0xb9c57316), TOBN(0xc57d6e6b, 0x2cd80d47), TOBN(0x80b460cf, 0xfe9e7391)}, {TOBN(0x98648cab, 0xf963c31e), TOBN(0x67f9f633, 0xcc4d32fd), TOBN(0x0af42a9d, 0xfdf7c687), TOBN(0x55f292a3, 0x0b015ea7)}}, {{TOBN(0x89e468b2, 0xcd21ab3d), TOBN(0xe504f022, 0xc393d392), TOBN(0xab21e1d4, 0xa5013af9), TOBN(0xe3283f78, 0xc2c28acb)}, {TOBN(0xf38b35f6, 0x226bf99f), TOBN(0xe8354274, 0x0e291e69), TOBN(0x61673a15, 0xb20c162d), TOBN(0xc101dc75, 0xb04fbdbe)}}, {{TOBN(0x8323b4c2, 0x255bd617), TOBN(0x6c969693, 0x6c2a9154), TOBN(0xc6e65860, 0x62679387), TOBN(0x8e01db0c, 0xb8c88e23)}, {TOBN(0x33c42873, 0x893a5559), TOBN(0x7630f04b, 0x47a3e149), TOBN(0xb5d80805, 0xddcf35f8), TOBN(0x582ca080, 0x77dfe732)}}, {{TOBN(0x2c7156e1, 0x0b1894a0), TOBN(0x92034001, 0xd81c68c0), TOBN(0xed225d00, 0xc8b115b5), TOBN(0x237f9c22, 0x83b907f2)}, {TOBN(0x0ea2f32f, 0x4470e2c0), TOBN(0xb725f7c1, 0x58be4e95), TOBN(0x0f1dcafa, 0xb1ae5463), TOBN(0x59ed5187, 0x1ba2fc04)}}, {{TOBN(0xf6e0f316, 0xd0115d4d), TOBN(0x5180b12f, 0xd3691599), TOBN(0x157e32c9, 0x527f0a41), TOBN(0x7b0b081d, 0xa8e0ecc0)}, {TOBN(0x6dbaaa8a, 0xbf4f0dd0), TOBN(0x99b289c7, 0x4d252696), TOBN(0x79b7755e, 0xdbf864fe), TOBN(0x6974e2b1, 0x76cad3ab)}}, {{TOBN(0x35dbbee2, 0x06ddd657), TOBN(0xe7cbdd11, 0x2ff3a96d), TOBN(0x88381968, 0x076be758), TOBN(0x2d737e72, 0x08c91f5d)}, {TOBN(0x5f83ab62, 0x86ec3776), TOBN(0x98aa649d, 0x945fa7a1), TOBN(0xf477ec37, 0x72ef0933), TOBN(0x66f52b1e, 0x098c17b1)}}, {{TOBN(0x9eec58fb, 0xd803738b), TOBN(0x91aaade7, 0xe4e86aa4), TOBN(0x6b1ae617, 0xa5b51492), TOBN(0x63272121, 0xbbc45974)}, {TOBN(0x7e0e28f0, 0x862c5129), TOBN(0x0a8f79a9, 0x3321a4a0), TOBN(0xe26d1664, 0x5041c88f), TOBN(0x0571b805, 0x53233e3a)}}, {{TOBN(0xd1b0ccde, 0xc9520711), TOBN(0x55a9e4ed, 0x3c8b84bf), TOBN(0x9426bd39, 0xa1fef314), TOBN(0x4f5f638e, 0x6eb93f2b)}, {TOBN(0xba2a1ed3, 0x2bf9341b), TOBN(0xd63c1321, 0x4d42d5a9), TOBN(0xd2964a89, 0x316dc7c5), TOBN(0xd1759606, 0xca511851)}}, {{TOBN(0xd8a9201f, 0xf9e6ed35), TOBN(0xb7b5ee45, 0x6736925a), TOBN(0x0a83fbbc, 0x99581af7), TOBN(0x3076bc40, 0x64eeb051)}, {TOBN(0x5511c98c, 0x02dec312), TOBN(0x270de898, 0x238dcb78), TOBN(0x2cf4cf9c, 0x539c08c9), TOBN(0xa70cb65e, 0x38d3b06e)}}, {{TOBN(0xb12ec10e, 0xcfe57bbd), TOBN(0x82c7b656, 0x35a0c2b5), TOBN(0xddc7d5cd, 0x161c67bd), TOBN(0xe32e8985, 0xae3a32cc)}, {TOBN(0x7aba9444, 0xd11a5529), TOBN(0xe964ed02, 0x2427fa1a), TOBN(0x1528392d, 0x24a1770a), TOBN(0xa152ce2c, 0x12c72fcd)}}, {{TOBN(0x714553a4, 0x8ec07649), TOBN(0x18b4c290, 0x459dd453), TOBN(0xea32b714, 0x7b64b110), TOBN(0xb871bfa5, 0x2e6f07a2)}, {TOBN(0xb67112e5, 0x9e2e3c9b), TOBN(0xfbf250e5, 0x44aa90f6), TOBN(0xf77aedb8, 0xbd539006), TOBN(0x3b0cdf9a, 0xd172a66f)}}, {{TOBN(0xedf69fea, 0xf8c51187), TOBN(0x05bb67ec, 0x741e4da7), TOBN(0x47df0f32, 0x08114345), TOBN(0x56facb07, 0xbb9792b1)}, {TOBN(0xf3e007e9, 0x8f6229e4), TOBN(0x62d103f4, 0x526fba0f), TOBN(0x4f33bef7, 0xb0339d79), TOBN(0x9841357b, 0xb59bfec1)}}, {{TOBN(0xfa8dbb59, 0xc34e6705), TOBN(0xc3c7180b, 0x7fdaa84c), TOBN(0xf95872fc, 0xa4108537), TOBN(0x8750cc3b, 0x932a3e5a)}, {TOBN(0xb61cc69d, 0xb7275d7d), TOBN(0xffa0168b, 0x2e59b2e9), TOBN(0xca032abc, 0x6ecbb493), TOBN(0x1d86dbd3, 0x2c9082d8)}}, {{TOBN(0xae1e0b67, 0xe28ef5ba), TOBN(0x2c9a4699, 0xcb18e169), TOBN(0x0ecd0e33, 0x1e6bbd20), TOBN(0x571b360e, 0xaf5e81d2)}, {TOBN(0xcd9fea58, 0x101c1d45), TOBN(0x6651788e, 0x18880452), TOBN(0xa9972635, 0x1f8dd446), TOBN(0x44bed022, 0xe37281d0)}}, {{TOBN(0x094b2b2d, 0x33da525d), TOBN(0xf193678e, 0x13144fd8), TOBN(0xb8ab5ba4, 0xf4c1061d), TOBN(0x4343b5fa, 0xdccbe0f4)}, {TOBN(0xa8702371, 0x63812713), TOBN(0x47bf6d2d, 0xf7611d93), TOBN(0x46729b8c, 0xbd21e1d7), TOBN(0x7484d4e0, 0xd629e77d)}}, {{TOBN(0x830e6eea, 0x60dbac1f), TOBN(0x23d8c484, 0xda06a2f7), TOBN(0x896714b0, 0x50ca535b), TOBN(0xdc8d3644, 0xebd97a9b)}, {TOBN(0x106ef9fa, 0xb12177b4), TOBN(0xf79bf464, 0x534d5d9c), TOBN(0x2537a349, 0xa6ab360b), TOBN(0xc7c54253, 0xa00c744f)}}, {{TOBN(0xb3c7a047, 0xe5911a76), TOBN(0x61ffa5c8, 0x647f1ee7), TOBN(0x15aed36f, 0x8f56ab42), TOBN(0x6a0d41b0, 0xa3ff9ac9)}, {TOBN(0x68f469f5, 0xcc30d357), TOBN(0xbe9adf81, 0x6b72be96), TOBN(0x1cd926fe, 0x903ad461), TOBN(0x7e89e38f, 0xcaca441b)}}, {{TOBN(0xf0f82de5, 0xfacf69d4), TOBN(0x363b7e76, 0x4775344c), TOBN(0x6894f312, 0xb2e36d04), TOBN(0x3c6cb4fe, 0x11d1c9a5)}, {TOBN(0x85d9c339, 0x4008e1f2), TOBN(0x5e9a85ea, 0x249f326c), TOBN(0xdc35c60a, 0x678c5e06), TOBN(0xc08b944f, 0x9f86fba9)}}, {{TOBN(0xde40c02c, 0x89f71f0f), TOBN(0xad8f3e31, 0xff3da3c0), TOBN(0x3ea5096b, 0x42125ded), TOBN(0x13879cbf, 0xa7379183)}, {TOBN(0x6f4714a5, 0x6b306a0b), TOBN(0x359c2ea6, 0x67646c5e), TOBN(0xfacf8943, 0x07726368), TOBN(0x07a58935, 0x65ff431e)}}, {{TOBN(0x24d661d1, 0x68754ab0), TOBN(0x801fce1d, 0x6f429a76), TOBN(0xc068a85f, 0xa58ce769), TOBN(0xedc35c54, 0x5d5eca2b)}, {TOBN(0xea31276f, 0xa3f660d1), TOBN(0xa0184ebe, 0xb8fc7167), TOBN(0x0f20f21a, 0x1d8db0ae), TOBN(0xd96d095f, 0x56c35e12)}}, {{TOBN(0xedf402b5, 0xf8c2a25b), TOBN(0x1bb772b9, 0x059204b6), TOBN(0x50cbeae2, 0x19b4e34c), TOBN(0x93109d80, 0x3fa0845a)}, {TOBN(0x54f7ccf7, 0x8ef59fb5), TOBN(0x3b438fe2, 0x88070963), TOBN(0x9e28c659, 0x31f3ba9b), TOBN(0x9cc31b46, 0xead9da92)}}, {{TOBN(0x3c2f0ba9, 0xb733aa5f), TOBN(0xdece47cb, 0xf05af235), TOBN(0xf8e3f715, 0xa2ac82a5), TOBN(0xc97ba641, 0x2203f18a)}, {TOBN(0xc3af5504, 0x09c11060), TOBN(0x56ea2c05, 0x46af512d), TOBN(0xfac28daf, 0xf3f28146), TOBN(0x87fab43a, 0x959ef494)}}}, {{{TOBN(0x09891641, 0xd4c5105f), TOBN(0x1ae80f8e, 0x6d7fbd65), TOBN(0x9d67225f, 0xbee6bdb0), TOBN(0x3b433b59, 0x7fc4d860)}, {TOBN(0x44e66db6, 0x93e85638), TOBN(0xf7b59252, 0xe3e9862f), TOBN(0xdb785157, 0x665c32ec), TOBN(0x702fefd7, 0xae362f50)}}, {{TOBN(0x3754475d, 0x0fefb0c3), TOBN(0xd48fb56b, 0x46d7c35d), TOBN(0xa070b633, 0x363798a4), TOBN(0xae89f3d2, 0x8fdb98e6)}, {TOBN(0x970b89c8, 0x6363d14c), TOBN(0x89817521, 0x67abd27d), TOBN(0x9bf7d474, 0x44d5a021), TOBN(0xb3083baf, 0xcac72aee)}}, {{TOBN(0x389741de, 0xbe949a44), TOBN(0x638e9388, 0x546a4fa5), TOBN(0x3fe6419c, 0xa0047bdc), TOBN(0x7047f648, 0xaaea57ca)}, {TOBN(0x54e48a90, 0x41fbab17), TOBN(0xda8e0b28, 0x576bdba2), TOBN(0xe807eebc, 0xc72afddc), TOBN(0x07d3336d, 0xf42577bf)}}, {{TOBN(0x62a8c244, 0xbfe20925), TOBN(0x91c19ac3, 0x8fdce867), TOBN(0x5a96a5d5, 0xdd387063), TOBN(0x61d587d4, 0x21d324f6)}, {TOBN(0xe87673a2, 0xa37173ea), TOBN(0x23848008, 0x53778b65), TOBN(0x10f8441e, 0x05bab43e), TOBN(0xfa11fe12, 0x4621efbe)}}, {{TOBN(0x047b772e, 0x81685d7b), TOBN(0x23f27d81, 0xbf34a976), TOBN(0xc27608e2, 0x915f48ef), TOBN(0x3b0b43fa, 0xa521d5c3)}, {TOBN(0x7613fb26, 0x63ca7284), TOBN(0x7f5729b4, 0x1d4db837), TOBN(0x87b14898, 0x583b526b), TOBN(0x00b732a6, 0xbbadd3d1)}}, {{TOBN(0x8e02f426, 0x2048e396), TOBN(0x436b50b6, 0x383d9de4), TOBN(0xf78d3481, 0x471e85ad), TOBN(0x8b01ea6a, 0xd005c8d6)}, {TOBN(0xd3c7afee, 0x97015c07), TOBN(0x46cdf1a9, 0x4e3ba2ae), TOBN(0x7a42e501, 0x83d3a1d2), TOBN(0xd54b5268, 0xb541dff4)}}, {{TOBN(0x3f24cf30, 0x4e23e9bc), TOBN(0x4387f816, 0x126e3624), TOBN(0x26a46a03, 0x3b0b6d61), TOBN(0xaf1bc845, 0x8b2d777c)}, {TOBN(0x25c401ba, 0x527de79c), TOBN(0x0e1346d4, 0x4261bbb6), TOBN(0x4b96c44b, 0x287b4bc7), TOBN(0x658493c7, 0x5254562f)}}, {{TOBN(0x23f949fe, 0xb8a24a20), TOBN(0x17ebfed1, 0xf52ca53f), TOBN(0x9b691bbe, 0xbcfb4853), TOBN(0x5617ff6b, 0x6278a05d)}, {TOBN(0x241b34c5, 0xe3c99ebd), TOBN(0xfc64242e, 0x1784156a), TOBN(0x4206482f, 0x695d67df), TOBN(0xb967ce0e, 0xee27c011)}}, {{TOBN(0x65db3751, 0x21c80b5d), TOBN(0x2e7a563c, 0xa31ecca0), TOBN(0xe56ffc4e, 0x5238a07e), TOBN(0x3d6c2966, 0x32ced854)}, {TOBN(0xe99d7d1a, 0xaf70b885), TOBN(0xafc3bad9, 0x2d686459), TOBN(0x9c78bf46, 0x0cc8ba5b), TOBN(0x5a439519, 0x18955aa3)}}, {{TOBN(0xf8b517a8, 0x5fe4e314), TOBN(0xe60234d0, 0xfcb8906f), TOBN(0xffe542ac, 0xf2061b23), TOBN(0x287e191f, 0x6b4cb59c)}, {TOBN(0x21857ddc, 0x09d877d8), TOBN(0x1c23478c, 0x14678941), TOBN(0xbbf0c056, 0xb6e05ea4), TOBN(0x82da4b53, 0xb01594fe)}}, {{TOBN(0xf7526791, 0xfadb8608), TOBN(0x049e832d, 0x7b74cdf6), TOBN(0xa43581cc, 0xc2b90a34), TOBN(0x73639eb8, 0x9360b10c)}, {TOBN(0x4fba331f, 0xe1e4a71b), TOBN(0x6ffd6b93, 0x8072f919), TOBN(0x6e53271c, 0x65679032), TOBN(0x67206444, 0xf14272ce)}}, {{TOBN(0xc0f734a3, 0xb2335834), TOBN(0x9526205a, 0x90ef6860), TOBN(0xcb8be717, 0x04e2bb0d), TOBN(0x2418871e, 0x02f383fa)}, {TOBN(0xd7177681, 0x4082c157), TOBN(0xcc914ad0, 0x29c20073), TOBN(0xf186c1eb, 0xe587e728), TOBN(0x6fdb3c22, 0x61bcd5fd)}}, {{TOBN(0x30d014a6, 0xf2f9f8e9), TOBN(0x963ece23, 0x4fec49d2), TOBN(0x862025c5, 0x9605a8d9), TOBN(0x39874445, 0x19f8929a)}, {TOBN(0x01b6ff65, 0x12bf476a), TOBN(0x598a64d8, 0x09cf7d91), TOBN(0xd7ec7749, 0x93be56ca), TOBN(0x10899785, 0xcbb33615)}}, {{TOBN(0xb8a092fd, 0x02eee3ad), TOBN(0xa86b3d35, 0x30145270), TOBN(0x323d98c6, 0x8512b675), TOBN(0x4b8bc785, 0x62ebb40f)}, {TOBN(0x7d301f54, 0x413f9cde), TOBN(0xa5e4fb4f, 0x2bab5664), TOBN(0x1d2b252d, 0x1cbfec23), TOBN(0xfcd576bb, 0xe177120d)}}, {{TOBN(0x04427d3e, 0x83731a34), TOBN(0x2bb9028e, 0xed836e8e), TOBN(0xb36acff8, 0xb612ca7c), TOBN(0xb88fe5ef, 0xd3d9c73a)}, {TOBN(0xbe2a6bc6, 0xedea4eb3), TOBN(0x43b93133, 0x488eec77), TOBN(0xf41ff566, 0xb17106e1), TOBN(0x469e9172, 0x654efa32)}}, {{TOBN(0xb4480f04, 0x41c23fa3), TOBN(0xb4712eb0, 0xc1989a2e), TOBN(0x3ccbba0f, 0x93a29ca7), TOBN(0x6e205c14, 0xd619428c)}, {TOBN(0x90db7957, 0xb3641686), TOBN(0x0432691d, 0x45ac8b4e), TOBN(0x07a759ac, 0xf64e0350), TOBN(0x0514d89c, 0x9c972517)}}, {{TOBN(0x1701147f, 0xa8e67fc3), TOBN(0x9e2e0b8b, 0xab2085be), TOBN(0xd5651824, 0xac284e57), TOBN(0x890d4325, 0x74893664)}, {TOBN(0x8a7c5e6e, 0xc55e68a3), TOBN(0xbf12e90b, 0x4339c85a), TOBN(0x31846b85, 0xf922b655), TOBN(0x9a54ce4d, 0x0bf4d700)}}, {{TOBN(0xd7f4e83a, 0xf1a14295), TOBN(0x916f955c, 0xb285d4f9), TOBN(0xe57bb0e0, 0x99ffdaba), TOBN(0x28a43034, 0xeab0d152)}, {TOBN(0x0a36ffa2, 0xb8a9cef8), TOBN(0x5517407e, 0xb9ec051a), TOBN(0x9c796096, 0xea68e672), TOBN(0x853db5fb, 0xfb3c77fb)}}, {{TOBN(0x21474ba9, 0xe864a51a), TOBN(0x6c267699, 0x6e8a1b8b), TOBN(0x7c823626, 0x94120a28), TOBN(0xe61e9a48, 0x8383a5db)}, {TOBN(0x7dd75003, 0x9f84216d), TOBN(0xab020d07, 0xad43cd85), TOBN(0x9437ae48, 0xda12c659), TOBN(0x6449c2eb, 0xe65452ad)}}, {{TOBN(0xcc7c4c1c, 0x2cf9d7c1), TOBN(0x1320886a, 0xee95e5ab), TOBN(0xbb7b9056, 0xbeae170c), TOBN(0xc8a5b250, 0xdbc0d662)}, {TOBN(0x4ed81432, 0xc11d2303), TOBN(0x7da66912, 0x1f03769f), TOBN(0x3ac7a5fd, 0x84539828), TOBN(0x14dada94, 0x3bccdd02)}}, {{TOBN(0x8b84c321, 0x7ef6b0d1), TOBN(0x52a9477a, 0x7c933f22), TOBN(0x5ef6728a, 0xfd440b82), TOBN(0x5c3bd859, 0x6ce4bd5e)}, {TOBN(0x918b80f5, 0xf22c2d3e), TOBN(0x368d5040, 0xb7bb6cc5), TOBN(0xb66142a1, 0x2695a11c), TOBN(0x60ac583a, 0xeb19ea70)}}, {{TOBN(0x317cbb98, 0x0eab2437), TOBN(0x8cc08c55, 0x5e2654c8), TOBN(0xfe2d6520, 0xe6d8307f), TOBN(0xe9f147f3, 0x57428993)}, {TOBN(0x5f9c7d14, 0xd2fd6cf1), TOBN(0xa3ecd064, 0x2d4fcbb0), TOBN(0xad83fef0, 0x8e7341f7), TOBN(0x643f23a0, 0x3a63115c)}}, {{TOBN(0xd38a78ab, 0xe65ab743), TOBN(0xbf7c75b1, 0x35edc89c), TOBN(0x3dd8752e, 0x530df568), TOBN(0xf85c4a76, 0xe308c682)}, {TOBN(0x4c9955b2, 0xe68acf37), TOBN(0xa544df3d, 0xab32af85), TOBN(0x4b8ec3f5, 0xa25cf493), TOBN(0x4d8f2764, 0x1a622feb)}}, {{TOBN(0x7bb4f7aa, 0xf0dcbc49), TOBN(0x7de551f9, 0x70bbb45b), TOBN(0xcfd0f3e4, 0x9f2ca2e5), TOBN(0xece58709, 0x1f5c76ef)}, {TOBN(0x32920edd, 0x167d79ae), TOBN(0x039df8a2, 0xfa7d7ec1), TOBN(0xf46206c0, 0xbb30af91), TOBN(0x1ff5e2f5, 0x22676b59)}}, {{TOBN(0x11f4a039, 0x6ea51d66), TOBN(0x506c1445, 0x807d7a26), TOBN(0x60da5705, 0x755a9b24), TOBN(0x8fc8cc32, 0x1f1a319e)}, {TOBN(0x83642d4d, 0x9433d67d), TOBN(0x7fa5cb8f, 0x6a7dd296), TOBN(0x576591db, 0x9b7bde07), TOBN(0x13173d25, 0x419716fb)}}, {{TOBN(0xea30599d, 0xd5b340ff), TOBN(0xfc6b5297, 0xb0fe76c5), TOBN(0x1c6968c8, 0xab8f5adc), TOBN(0xf723c7f5, 0x901c928d)}, {TOBN(0x4203c321, 0x9773d402), TOBN(0xdf7c6aa3, 0x1b51dd47), TOBN(0x3d49e37a, 0x552be23c), TOBN(0x57febee8, 0x0b5a6e87)}}, {{TOBN(0xc5ecbee4, 0x7bd8e739), TOBN(0x79d44994, 0xae63bf75), TOBN(0x168bd00f, 0x38fb8923), TOBN(0x75d48ee4, 0xd0533130)}, {TOBN(0x554f77aa, 0xdb5cdf33), TOBN(0x3396e896, 0x3c696769), TOBN(0x2fdddbf2, 0xd3fd674e), TOBN(0xbbb8f6ee, 0x99d0e3e5)}}, {{TOBN(0x51b90651, 0xcbae2f70), TOBN(0xefc4bc05, 0x93aaa8eb), TOBN(0x8ecd8689, 0xdd1df499), TOBN(0x1aee99a8, 0x22f367a5)}, {TOBN(0x95d485b9, 0xae8274c5), TOBN(0x6c14d445, 0x7d30b39c), TOBN(0xbafea90b, 0xbcc1ef81), TOBN(0x7c5f317a, 0xa459a2ed)}}, {{TOBN(0x01211075, 0x4ef44227), TOBN(0xa17bed6e, 0xdc20f496), TOBN(0x0cdfe424, 0x819853cd), TOBN(0x13793298, 0xf71e2ce7)}, {TOBN(0x3c1f3078, 0xdbbe307b), TOBN(0x6dd1c20e, 0x76ee9936), TOBN(0x23ee4b57, 0x423caa20), TOBN(0x4ac3793b, 0x8efb840e)}}, {{TOBN(0x934438eb, 0xed1f8ca0), TOBN(0x3e546658, 0x4ebb25a2), TOBN(0xc415af0e, 0xc069896f), TOBN(0xc13eddb0, 0x9a5aa43d)}, {TOBN(0x7a04204f, 0xd49eb8f6), TOBN(0xd0d5bdfc, 0xd74f1670), TOBN(0x3697e286, 0x56fc0558), TOBN(0x10207371, 0x01cebade)}}, {{TOBN(0x5f87e690, 0x0647a82b), TOBN(0x908e0ed4, 0x8f40054f), TOBN(0xa9f633d4, 0x79853803), TOBN(0x8ed13c9a, 0x4a28b252)}, {TOBN(0x3e2ef676, 0x1f460f64), TOBN(0x53930b9b, 0x36d06336), TOBN(0x347073ac, 0x8fc4979b), TOBN(0x84380e0e, 0x5ecd5597)}}, {{TOBN(0xe3b22c6b, 0xc4fe3c39), TOBN(0xba4a8153, 0x6c7bebdf), TOBN(0xf23ab6b7, 0x25693459), TOBN(0x53bc3770, 0x14922b11)}, {TOBN(0x4645c8ab, 0x5afc60db), TOBN(0xaa022355, 0x20b9f2a3), TOBN(0x52a2954c, 0xce0fc507), TOBN(0x8c2731bb, 0x7ce1c2e7)}}, {{TOBN(0xf39608ab, 0x18a0339d), TOBN(0xac7a658d, 0x3735436c), TOBN(0xb22c2b07, 0xcd992b4f), TOBN(0x4e83daec, 0xf40dcfd4)}, {TOBN(0x8a34c7be, 0x2f39ea3e), TOBN(0xef0c005f, 0xb0a56d2e), TOBN(0x62731f6a, 0x6edd8038), TOBN(0x5721d740, 0x4e3cb075)}}, {{TOBN(0x1ea41511, 0xfbeeee1b), TOBN(0xd1ef5e73, 0xef1d0c05), TOBN(0x42feefd1, 0x73c07d35), TOBN(0xe530a00a, 0x8a329493)}, {TOBN(0x5d55b7fe, 0xf15ebfb0), TOBN(0x549de03c, 0xd322491a), TOBN(0xf7b5f602, 0x745b3237), TOBN(0x3632a3a2, 0x1ab6e2b6)}}, {{TOBN(0x0d3bba89, 0x0ef59f78), TOBN(0x0dfc6443, 0xc9e52b9a), TOBN(0x1dc79699, 0x72631447), TOBN(0xef033917, 0xb3be20b1)}, {TOBN(0x0c92735d, 0xb1383948), TOBN(0xc1fc29a2, 0xc0dd7d7d), TOBN(0x6485b697, 0x403ed068), TOBN(0x13bfaab3, 0xaac93bdc)}}, {{TOBN(0x410dc6a9, 0x0deeaf52), TOBN(0xb003fb02, 0x4c641c15), TOBN(0x1384978c, 0x5bc504c4), TOBN(0x37640487, 0x864a6a77)}, {TOBN(0x05991bc6, 0x222a77da), TOBN(0x62260a57, 0x5e47eb11), TOBN(0xc7af6613, 0xf21b432c), TOBN(0x22f3acc9, 0xab4953e9)}}, {{TOBN(0x52934922, 0x8e41d155), TOBN(0x4d024568, 0x3ac059ef), TOBN(0xb0201755, 0x4d884411), TOBN(0xce8055cf, 0xa59a178f)}, {TOBN(0xcd77d1af, 0xf6204549), TOBN(0xa0a00a3e, 0xc7066759), TOBN(0x471071ef, 0x0272c229), TOBN(0x009bcf6b, 0xd3c4b6b0)}}, {{TOBN(0x2a2638a8, 0x22305177), TOBN(0xd51d59df, 0x41645bbf), TOBN(0xa81142fd, 0xc0a7a3c0), TOBN(0xa17eca6d, 0x4c7063ee)}, {TOBN(0x0bb887ed, 0x60d9dcec), TOBN(0xd6d28e51, 0x20ad2455), TOBN(0xebed6308, 0xa67102ba), TOBN(0x042c3114, 0x8bffa408)}}, {{TOBN(0xfd099ac5, 0x8aa68e30), TOBN(0x7a6a3d7c, 0x1483513e), TOBN(0xffcc6b75, 0xba2d8f0c), TOBN(0x54dacf96, 0x1e78b954)}, {TOBN(0xf645696f, 0xa4a9af89), TOBN(0x3a411940, 0x06ac98ec), TOBN(0x41b8b3f6, 0x22a67a20), TOBN(0x2d0b1e0f, 0x99dec626)}}, {{TOBN(0x27c89192, 0x40be34e8), TOBN(0xc7162b37, 0x91907f35), TOBN(0x90188ec1, 0xa956702b), TOBN(0xca132f7d, 0xdf93769c)}, {TOBN(0x3ece44f9, 0x0e2025b4), TOBN(0x67aaec69, 0x0c62f14c), TOBN(0xad741418, 0x22e3cc11), TOBN(0xcf9b75c3, 0x7ff9a50e)}}, {{TOBN(0x02fa2b16, 0x4d348272), TOBN(0xbd99d61a, 0x9959d56d), TOBN(0xbc4f19db, 0x18762916), TOBN(0xcc7cce50, 0x49c1ac80)}, {TOBN(0x4d59ebaa, 0xd846bd83), TOBN(0x8775a9dc, 0xa9202849), TOBN(0x07ec4ae1, 0x6e1f4ca9), TOBN(0x27eb5875, 0xba893f11)}}, {{TOBN(0x00284d51, 0x662cc565), TOBN(0x82353a6b, 0x0db4138d), TOBN(0xd9c7aaaa, 0xaa32a594), TOBN(0xf5528b5e, 0xa5669c47)}, {TOBN(0xf3220231, 0x2f23c5ff), TOBN(0xe3e8147a, 0x6affa3a1), TOBN(0xfb423d5c, 0x202ddda0), TOBN(0x3d6414ac, 0x6b871bd4)}}, {{TOBN(0x586f82e1, 0xa51a168a), TOBN(0xb712c671, 0x48ae5448), TOBN(0x9a2e4bd1, 0x76233eb8), TOBN(0x0188223a, 0x78811ca9)}, {TOBN(0x553c5e21, 0xf7c18de1), TOBN(0x7682e451, 0xb27bb286), TOBN(0x3ed036b3, 0x0e51e929), TOBN(0xf487211b, 0xec9cb34f)}}, {{TOBN(0x0d094277, 0x0c24efc8), TOBN(0x0349fd04, 0xbef737a4), TOBN(0x6d1c9dd2, 0x514cdd28), TOBN(0x29c135ff, 0x30da9521)}, {TOBN(0xea6e4508, 0xf78b0b6f), TOBN(0x176f5dd2, 0x678c143c), TOBN(0x08148418, 0x4be21e65), TOBN(0x27f7525c, 0xe7df38c4)}}, {{TOBN(0x1fb70e09, 0x748ab1a4), TOBN(0x9cba50a0, 0x5efe4433), TOBN(0x7846c7a6, 0x15f75af2), TOBN(0x2a7c2c57, 0x5ee73ea8)}, {TOBN(0x42e566a4, 0x3f0a449a), TOBN(0x45474c3b, 0xad90fc3d), TOBN(0x7447be3d, 0x8b61d057), TOBN(0x3e9d1cf1, 0x3a4ec092)}}, {{TOBN(0x1603e453, 0xf380a6e6), TOBN(0x0b86e431, 0x9b1437c2), TOBN(0x7a4173f2, 0xef29610a), TOBN(0x8fa729a7, 0xf03d57f7)}, {TOBN(0x3e186f6e, 0x6c9c217e), TOBN(0xbe1d3079, 0x91919524), TOBN(0x92a62a70, 0x153d4fb1), TOBN(0x32ed3e34, 0xd68c2f71)}}, {{TOBN(0xd785027f, 0x9eb1a8b7), TOBN(0xbc37eb77, 0xc5b22fe8), TOBN(0x466b34f0, 0xb9d6a191), TOBN(0x008a89af, 0x9a05f816)}, {TOBN(0x19b028fb, 0x7d42c10a), TOBN(0x7fe8c92f, 0x49b3f6b8), TOBN(0x58907cc0, 0xa5a0ade3), TOBN(0xb3154f51, 0x559d1a7c)}}, {{TOBN(0x5066efb6, 0xd9790ed6), TOBN(0xa77a0cbc, 0xa6aa793b), TOBN(0x1a915f3c, 0x223e042e), TOBN(0x1c5def04, 0x69c5874b)}, {TOBN(0x0e830078, 0x73b6c1da), TOBN(0x55cf85d2, 0xfcd8557a), TOBN(0x0f7c7c76, 0x0460f3b1), TOBN(0x87052acb, 0x46e58063)}}, {{TOBN(0x09212b80, 0x907eae66), TOBN(0x3cb068e0, 0x4d721c89), TOBN(0xa87941ae, 0xdd45ac1c), TOBN(0xde8d5c0d, 0x0daa0dbb)}, {TOBN(0xda421fdc, 0xe3502e6e), TOBN(0xc8944201, 0x4d89a084), TOBN(0x7307ba5e, 0xf0c24bfb), TOBN(0xda212beb, 0x20bde0ef)}}, {{TOBN(0xea2da24b, 0xf82ce682), TOBN(0x058d3816, 0x07f71fe4), TOBN(0x35a02462, 0x5ffad8de), TOBN(0xcd7b05dc, 0xaadcefab)}, {TOBN(0xd442f8ed, 0x1d9f54ec), TOBN(0x8be3d618, 0xb2d3b5ca), TOBN(0xe2220ed0, 0xe06b2ce2), TOBN(0x82699a5f, 0x1b0da4c0)}}, {{TOBN(0x3ff106f5, 0x71c0c3a7), TOBN(0x8f580f5a, 0x0d34180c), TOBN(0x4ebb120e, 0x22d7d375), TOBN(0x5e5782cc, 0xe9513675)}, {TOBN(0x2275580c, 0x99c82a70), TOBN(0xe8359fbf, 0x15ea8c4c), TOBN(0x53b48db8, 0x7b415e70), TOBN(0xaacf2240, 0x100c6014)}}, {{TOBN(0x9faaccf5, 0xe4652f1d), TOBN(0xbd6fdd2a, 0xd56157b2), TOBN(0xa4f4fb1f, 0x6261ec50), TOBN(0x244e55ad, 0x476bcd52)}, {TOBN(0x881c9305, 0x047d320b), TOBN(0x1ca983d5, 0x6181263f), TOBN(0x354e9a44, 0x278fb8ee), TOBN(0xad2dbc0f, 0x396e4964)}}, {{TOBN(0x723f3aa2, 0x9268b3de), TOBN(0x0d1ca29a, 0xe6e0609a), TOBN(0x794866aa, 0x6cf44252), TOBN(0x0b59f3e3, 0x01af87ed)}, {TOBN(0xe234e5ff, 0x7f4a6c51), TOBN(0xa8768fd2, 0x61dc2f7e), TOBN(0xdafc7332, 0x0a94d81f), TOBN(0xd7f84282, 0x06938ce1)}}, {{TOBN(0xae0b3c0e, 0x0546063e), TOBN(0x7fbadcb2, 0x5d61abc6), TOBN(0xd5d7a2c9, 0x369ac400), TOBN(0xa5978d09, 0xae67d10c)}, {TOBN(0x290f211e, 0x4f85eaac), TOBN(0xe61e2ad1, 0xfacac681), TOBN(0xae125225, 0x388384cd), TOBN(0xa7fb68e9, 0xccfde30f)}}, {{TOBN(0x7a59b936, 0x3daed4c2), TOBN(0x80a9aa40, 0x2606f789), TOBN(0xb40c1ea5, 0xf6a6d90a), TOBN(0x948364d3, 0x514d5885)}, {TOBN(0x062ebc60, 0x70985182), TOBN(0xa6db5b0e, 0x33310895), TOBN(0x64a12175, 0xe329c2f5), TOBN(0xc5f25bd2, 0x90ea237e)}}, {{TOBN(0x7915c524, 0x2d0a4c23), TOBN(0xeb5d26e4, 0x6bb3cc52), TOBN(0x369a9116, 0xc09e2c92), TOBN(0x0c527f92, 0xcf182cf8)}, {TOBN(0x9e591938, 0x2aede0ac), TOBN(0xb2922208, 0x6cc34939), TOBN(0x3c9d8962, 0x99a34361), TOBN(0x3c81836d, 0xc1905fe6)}}, {{TOBN(0x4bfeb57f, 0xa001ec5a), TOBN(0xe993f5bb, 0xa0dc5dba), TOBN(0x47884109, 0x724a1380), TOBN(0x8a0369ab, 0x32fe9a04)}, {TOBN(0xea068d60, 0x8c927db8), TOBN(0xbf5f37cf, 0x94655741), TOBN(0x47d402a2, 0x04b6c7ea), TOBN(0x4551c295, 0x6af259cb)}}, {{TOBN(0x698b71e7, 0xed77ee8b), TOBN(0xbddf7bd0, 0xf309d5c7), TOBN(0x6201c22c, 0x34e780ca), TOBN(0xab04f7d8, 0x4c295ef4)}, {TOBN(0x1c947294, 0x4313a8ce), TOBN(0xe532e4ac, 0x92ca4cfe), TOBN(0x89738f80, 0xd0a7a97a), TOBN(0xec088c88, 0xa580fd5b)}}, {{TOBN(0x612b1ecc, 0x42ce9e51), TOBN(0x8f9840fd, 0xb25fdd2a), TOBN(0x3cda78c0, 0x01e7f839), TOBN(0x546b3d3a, 0xece05480)}, {TOBN(0x271719a9, 0x80d30916), TOBN(0x45497107, 0x584c20c4), TOBN(0xaf8f9478, 0x5bc78608), TOBN(0x28c7d484, 0x277e2a4c)}}, {{TOBN(0xfce01767, 0x88a2ffe4), TOBN(0xdc506a35, 0x28e169a5), TOBN(0x0ea10861, 0x7af9c93a), TOBN(0x1ed24361, 0x03fa0e08)}, {TOBN(0x96eaaa92, 0xa3d694e7), TOBN(0xc0f43b4d, 0xef50bc74), TOBN(0xce6aa58c, 0x64114db4), TOBN(0x8218e8ea, 0x7c000fd4)}}, {{TOBN(0xac815dfb, 0x185f8844), TOBN(0xcd7e90cb, 0x1557abfb), TOBN(0x23d16655, 0xafbfecdf), TOBN(0x80f3271f, 0x085cac4a)}, {TOBN(0x7fc39aa7, 0xd0e62f47), TOBN(0x88d519d1, 0x460a48e5), TOBN(0x59559ac4, 0xd28f101e), TOBN(0x7981d9e9, 0xca9ae816)}}, {{TOBN(0x5c38652c, 0x9ac38203), TOBN(0x86eaf87f, 0x57657fe5), TOBN(0x568fc472, 0xe21f5416), TOBN(0x2afff39c, 0xe7e597b5)}, {TOBN(0x3adbbb07, 0x256d4eab), TOBN(0x22598692, 0x8285ab89), TOBN(0x35f8112a, 0x041caefe), TOBN(0x95df02e3, 0xa5064c8b)}}, {{TOBN(0x4d63356e, 0xc7004bf3), TOBN(0x230a08f4, 0xdb83c7de), TOBN(0xca27b270, 0x8709a7b7), TOBN(0x0d1c4cc4, 0xcb9abd2d)}, {TOBN(0x8a0bc66e, 0x7550fee8), TOBN(0x369cd4c7, 0x9cf7247e), TOBN(0x75562e84, 0x92b5b7e7), TOBN(0x8fed0da0, 0x5802af7b)}}, {{TOBN(0x6a7091c2, 0xe48fb889), TOBN(0x26882c13, 0x7b8a9d06), TOBN(0xa2498663, 0x1b82a0e2), TOBN(0x844ed736, 0x3518152d)}, {TOBN(0x282f476f, 0xd86e27c7), TOBN(0xa04edaca, 0x04afefdc), TOBN(0x8b256ebc, 0x6119e34d), TOBN(0x56a413e9, 0x0787d78b)}}}, {{{TOBN(0x82ee061d, 0x5a74be50), TOBN(0xe41781c4, 0xdea16ff5), TOBN(0xe0b0c81e, 0x99bfc8a2), TOBN(0x624f4d69, 0x0b547e2d)}, {TOBN(0x3a83545d, 0xbdcc9ae4), TOBN(0x2573dbb6, 0x409b1e8e), TOBN(0x482960c4, 0xa6c93539), TOBN(0xf01059ad, 0x5ae18798)}}, {{TOBN(0x715c9f97, 0x3112795f), TOBN(0xe8244437, 0x984e6ee1), TOBN(0x55cb4858, 0xecb66bcd), TOBN(0x7c136735, 0xabaffbee)}, {TOBN(0x54661595, 0x5dbec38e), TOBN(0x51c0782c, 0x388ad153), TOBN(0x9ba4c53a, 0xc6e0952f), TOBN(0x27e6782a, 0x1b21dfa8)}}, {{TOBN(0x682f903d, 0x4ed2dbc2), TOBN(0x0eba59c8, 0x7c3b2d83), TOBN(0x8e9dc84d, 0x9c7e9335), TOBN(0x5f9b21b0, 0x0eb226d7)}, {TOBN(0xe33bd394, 0xaf267bae), TOBN(0xaa86cc25, 0xbe2e15ae), TOBN(0x4f0bf67d, 0x6a8ec500), TOBN(0x5846aa44, 0xf9630658)}}, {{TOBN(0xfeb09740, 0xe2c2bf15), TOBN(0x627a2205, 0xa9e99704), TOBN(0xec8d73d0, 0xc2fbc565), TOBN(0x223eed8f, 0xc20c8de8)}, {TOBN(0x1ee32583, 0xa8363b49), TOBN(0x1a0b6cb9, 0xc9c2b0a6), TOBN(0x49f7c3d2, 0x90dbc85c), TOBN(0xa8dfbb97, 0x1ef4c1ac)}}, {{TOBN(0xafb34d4c, 0x65c7c2ab), TOBN(0x1d4610e7, 0xe2c5ea84), TOBN(0x893f6d1b, 0x973c4ab5), TOBN(0xa3cdd7e9, 0x945ba5c4)}, {TOBN(0x60514983, 0x064417ee), TOBN(0x1459b23c, 0xad6bdf2b), TOBN(0x23b2c341, 0x5cf726c3), TOBN(0x3a829635, 0x32d6354a)}}, {{TOBN(0x294f901f, 0xab192c18), TOBN(0xec5fcbfe, 0x7030164f), TOBN(0xe2e2fcb7, 0xe2246ba6), TOBN(0x1e7c88b3, 0x221a1a0c)}, {TOBN(0x72c7dd93, 0xc92d88c5), TOBN(0x41c2148e, 0x1106fb59), TOBN(0x547dd4f5, 0xa0f60f14), TOBN(0xed9b52b2, 0x63960f31)}}, {{TOBN(0x6c8349eb, 0xb0a5b358), TOBN(0xb154c5c2, 0x9e7e2ed6), TOBN(0xcad5eccf, 0xeda462db), TOBN(0xf2d6dbe4, 0x2de66b69)}, {TOBN(0x426aedf3, 0x8665e5b2), TOBN(0x488a8513, 0x7b7f5723), TOBN(0x15cc43b3, 0x8bcbb386), TOBN(0x27ad0af3, 0xd791d879)}}, {{TOBN(0xc16c236e, 0x846e364f), TOBN(0x7f33527c, 0xdea50ca0), TOBN(0xc4810775, 0x0926b86d), TOBN(0x6c2a3609, 0x0598e70c)}, {TOBN(0xa6755e52, 0xf024e924), TOBN(0xe0fa07a4, 0x9db4afca), TOBN(0x15c3ce7d, 0x66831790), TOBN(0x5b4ef350, 0xa6cbb0d6)}}, {{TOBN(0x2c4aafc4, 0xb6205969), TOBN(0x42563f02, 0xf6c7854f), TOBN(0x016aced5, 0x1d983b48), TOBN(0xfeb356d8, 0x99949755)}, {TOBN(0x8c2a2c81, 0xd1a39bd7), TOBN(0x8f44340f, 0xe6934ae9), TOBN(0x148cf91c, 0x447904da), TOBN(0x7340185f, 0x0f51a926)}}, {{TOBN(0x2f8f00fb, 0x7409ab46), TOBN(0x057e78e6, 0x80e289b2), TOBN(0x03e5022c, 0xa888e5d1), TOBN(0x3c87111a, 0x9dede4e2)}, {TOBN(0x5b9b0e1c, 0x7809460b), TOBN(0xe751c852, 0x71c9abc7), TOBN(0x8b944e28, 0xc7cc1dc9), TOBN(0x4f201ffa, 0x1d3cfa08)}}, {{TOBN(0x02fc905c, 0x3e6721ce), TOBN(0xd52d70da, 0xd0b3674c), TOBN(0x5dc2e5ca, 0x18810da4), TOBN(0xa984b273, 0x5c69dd99)}, {TOBN(0x63b92527, 0x84de5ca4), TOBN(0x2f1c9872, 0xc852dec4), TOBN(0x18b03593, 0xc2e3de09), TOBN(0x19d70b01, 0x9813dc2f)}}, {{TOBN(0x42806b2d, 0xa6dc1d29), TOBN(0xd3030009, 0xf871e144), TOBN(0xa1feb333, 0xaaf49276), TOBN(0xb5583b9e, 0xc70bc04b)}, {TOBN(0x1db0be78, 0x95695f20), TOBN(0xfc841811, 0x89d012b5), TOBN(0x6409f272, 0x05f61643), TOBN(0x40d34174, 0xd5883128)}}, {{TOBN(0xd79196f5, 0x67419833), TOBN(0x6059e252, 0x863b7b08), TOBN(0x84da1817, 0x1c56700c), TOBN(0x5758ee56, 0xb28d3ec4)}, {TOBN(0x7da2771d, 0x013b0ea6), TOBN(0xfddf524b, 0x54c5e9b9), TOBN(0x7df4faf8, 0x24305d80), TOBN(0x58f5c1bf, 0x3a97763f)}}, {{TOBN(0xa5af37f1, 0x7c696042), TOBN(0xd4cba22c, 0x4a2538de), TOBN(0x211cb995, 0x9ea42600), TOBN(0xcd105f41, 0x7b069889)}, {TOBN(0xb1e1cf19, 0xddb81e74), TOBN(0x472f2d89, 0x5157b8ca), TOBN(0x086fb008, 0xee9db885), TOBN(0x365cd570, 0x0f26d131)}}, {{TOBN(0x284b02bb, 0xa2be7053), TOBN(0xdcbbf7c6, 0x7ab9a6d6), TOBN(0x4425559c, 0x20f7a530), TOBN(0x961f2dfa, 0x188767c8)}, {TOBN(0xe2fd9435, 0x70dc80c4), TOBN(0x104d6b63, 0xf0784120), TOBN(0x7f592bc1, 0x53567122), TOBN(0xf6bc1246, 0xf688ad77)}}, {{TOBN(0x05214c05, 0x0f15dde9), TOBN(0xa47a76a8, 0x0d5f2b82), TOBN(0xbb254d30, 0x62e82b62), TOBN(0x11a05fe0, 0x3ec955ee)}, {TOBN(0x7eaff46e, 0x9d529b36), TOBN(0x55ab1301, 0x8f9e3df6), TOBN(0xc463e371, 0x99317698), TOBN(0xfd251438, 0xccda47ad)}}, {{TOBN(0xca9c3547, 0x23d695ea), TOBN(0x48ce626e, 0x16e589b5), TOBN(0x6b5b64c7, 0xb187d086), TOBN(0xd02e1794, 0xb2207948)}, {TOBN(0x8b58e98f, 0x7198111d), TOBN(0x90ca6305, 0xdcf9c3cc), TOBN(0x5691fe72, 0xf34089b0), TOBN(0x60941af1, 0xfc7c80ff)}}, {{TOBN(0xa09bc0a2, 0x22eb51e5), TOBN(0xc0bb7244, 0xaa9cf09a), TOBN(0x36a8077f, 0x80159f06), TOBN(0x8b5c989e, 0xdddc560e)}, {TOBN(0x19d2f316, 0x512e1f43), TOBN(0x02eac554, 0xad08ff62), TOBN(0x012ab84c, 0x07d20b4e), TOBN(0x37d1e115, 0xd6d4e4e1)}}, {{TOBN(0xb6443e1a, 0xab7b19a8), TOBN(0xf08d067e, 0xdef8cd45), TOBN(0x63adf3e9, 0x685e03da), TOBN(0xcf15a10e, 0x4792b916)}, {TOBN(0xf44bcce5, 0xb738a425), TOBN(0xebe131d5, 0x9636b2fd), TOBN(0x94068841, 0x7850d605), TOBN(0x09684eaa, 0xb40d749d)}}, {{TOBN(0x8c3c669c, 0x72ba075b), TOBN(0x89f78b55, 0xba469015), TOBN(0x5706aade, 0x3e9f8ba8), TOBN(0x6d8bd565, 0xb32d7ed7)}, {TOBN(0x25f4e63b, 0x805f08d6), TOBN(0x7f48200d, 0xc3bcc1b5), TOBN(0x4e801968, 0xb025d847), TOBN(0x74afac04, 0x87cbe0a8)}}, {{TOBN(0x43ed2c2b, 0x7e63d690), TOBN(0xefb6bbf0, 0x0223cdb8), TOBN(0x4fec3cae, 0x2884d3fe), TOBN(0x065ecce6, 0xd75e25a4)}, {TOBN(0x6c2294ce, 0x69f79071), TOBN(0x0d9a8e5f, 0x044b8666), TOBN(0x5009f238, 0x17b69d8f), TOBN(0x3c29f8fe, 0xc5dfdaf7)}}, {{TOBN(0x9067528f, 0xebae68c4), TOBN(0x5b385632, 0x30c5ba21), TOBN(0x540df119, 0x1fdd1aec), TOBN(0xcf37825b, 0xcfba4c78)}, {TOBN(0x77eff980, 0xbeb11454), TOBN(0x40a1a991, 0x60c1b066), TOBN(0xe8018980, 0xf889a1c7), TOBN(0xb9c52ae9, 0x76c24be0)}}, {{TOBN(0x05fbbcce, 0x45650ef4), TOBN(0xae000f10, 0x8aa29ac7), TOBN(0x884b7172, 0x4f04c470), TOBN(0x7cd4fde2, 0x19bb5c25)}, {TOBN(0x6477b22a, 0xe8840869), TOBN(0xa8868859, 0x5fbd0686), TOBN(0xf23cc02e, 0x1116dfba), TOBN(0x76cd563f, 0xd87d7776)}}, {{TOBN(0xe2a37598, 0xa9d82abf), TOBN(0x5f188ccb, 0xe6c170f5), TOBN(0x81682200, 0x5066b087), TOBN(0xda22c212, 0xc7155ada)}, {TOBN(0x151e5d3a, 0xfbddb479), TOBN(0x4b606b84, 0x6d715b99), TOBN(0x4a73b54b, 0xf997cb2e), TOBN(0x9a1bfe43, 0x3ecd8b66)}}, {{TOBN(0x1c312809, 0x2a67d48a), TOBN(0xcd6a671e, 0x031fa9e2), TOBN(0xbec3312a, 0x0e43a34a), TOBN(0x1d935639, 0x55ef47d3)}, {TOBN(0x5ea02489, 0x8fea73ea), TOBN(0x8247b364, 0xa035afb2), TOBN(0xb58300a6, 0x5265b54c), TOBN(0x3286662f, 0x722c7148)}}, {{TOBN(0xb77fd76b, 0xb4ec4c20), TOBN(0xf0a12fa7, 0x0f3fe3fd), TOBN(0xf845bbf5, 0x41d8c7e8), TOBN(0xe4d969ca, 0x5ec10aa8)}, {TOBN(0x4c0053b7, 0x43e232a3), TOBN(0xdc7a3fac, 0x37f8a45a), TOBN(0x3c4261c5, 0x20d81c8f), TOBN(0xfd4b3453, 0xb00eab00)}}, {{TOBN(0x76d48f86, 0xd36e3062), TOBN(0x626c5277, 0xa143ff02), TOBN(0x538174de, 0xaf76f42e), TOBN(0x2267aa86, 0x6407ceac)}, {TOBN(0xfad76351, 0x72e572d5), TOBN(0xab861af7, 0xba7330eb), TOBN(0xa0a1c8c7, 0x418d8657), TOBN(0x988821cb, 0x20289a52)}}, {{TOBN(0x79732522, 0xcccc18ad), TOBN(0xaadf3f8d, 0xf1a6e027), TOBN(0xf7382c93, 0x17c2354d), TOBN(0x5ce1680c, 0xd818b689)}, {TOBN(0x359ebbfc, 0xd9ecbee9), TOBN(0x4330689c, 0x1cae62ac), TOBN(0xb55ce5b4, 0xc51ac38a), TOBN(0x7921dfea, 0xfe238ee8)}}, {{TOBN(0x3972bef8, 0x271d1ca5), TOBN(0x3e423bc7, 0xe8aabd18), TOBN(0x57b09f3f, 0x44a3e5e3), TOBN(0x5da886ae, 0x7b444d66)}, {TOBN(0x68206634, 0xa9964375), TOBN(0x356a2fa3, 0x699cd0ff), TOBN(0xaf0faa24, 0xdba515e9), TOBN(0x536e1f5c, 0xb321d79a)}}, {{TOBN(0xd3b9913a, 0x5c04e4ea), TOBN(0xd549dcfe, 0xd6f11513), TOBN(0xee227bf5, 0x79fd1d94), TOBN(0x9f35afee, 0xb43f2c67)}, {TOBN(0xd2638d24, 0xf1314f53), TOBN(0x62baf948, 0xcabcd822), TOBN(0x5542de29, 0x4ef48db0), TOBN(0xb3eb6a04, 0xfc5f6bb2)}}, {{TOBN(0x23c110ae, 0x1208e16a), TOBN(0x1a4d15b5, 0xf8363e24), TOBN(0x30716844, 0x164be00b), TOBN(0xa8e24824, 0xf6f4690d)}, {TOBN(0x548773a2, 0x90b170cf), TOBN(0xa1bef331, 0x42f191f4), TOBN(0x70f418d0, 0x9247aa97), TOBN(0xea06028e, 0x48be9147)}}, {{TOBN(0xe13122f3, 0xdbfb894e), TOBN(0xbe9b79f6, 0xce274b18), TOBN(0x85a49de5, 0xca58aadf), TOBN(0x24957758, 0x11487351)}, {TOBN(0x111def61, 0xbb939099), TOBN(0x1d6a974a, 0x26d13694), TOBN(0x4474b4ce, 0xd3fc253b), TOBN(0x3a1485e6, 0x4c5db15e)}}, {{TOBN(0xe79667b4, 0x147c15b4), TOBN(0xe34f553b, 0x7bc61301), TOBN(0x032b80f8, 0x17094381), TOBN(0x55d8bafd, 0x723eaa21)}, {TOBN(0x5a987995, 0xf1c0e74e), TOBN(0x5a9b292e, 0xebba289c), TOBN(0x413cd4b2, 0xeb4c8251), TOBN(0x98b5d243, 0xd162db0a)}}, {{TOBN(0xbb47bf66, 0x68342520), TOBN(0x08d68949, 0xbaa862d1), TOBN(0x11f349c7, 0xe906abcd), TOBN(0x454ce985, 0xed7bf00e)}, {TOBN(0xacab5c9e, 0xb55b803b), TOBN(0xb03468ea, 0x31e3c16d), TOBN(0x5c24213d, 0xd273bf12), TOBN(0x211538eb, 0x71587887)}}, {{TOBN(0x198e4a2f, 0x731dea2d), TOBN(0xd5856cf2, 0x74ed7b2a), TOBN(0x86a632eb, 0x13a664fe), TOBN(0x932cd909, 0xbda41291)}, {TOBN(0x850e95d4, 0xc0c4ddc0), TOBN(0xc0f422f8, 0x347fc2c9), TOBN(0xe68cbec4, 0x86076bcb), TOBN(0xf9e7c0c0, 0xcd6cd286)}}, {{TOBN(0x65994ddb, 0x0f5f27ca), TOBN(0xe85461fb, 0xa80d59ff), TOBN(0xff05481a, 0x66601023), TOBN(0xc665427a, 0xfc9ebbfb)}, {TOBN(0xb0571a69, 0x7587fd52), TOBN(0x935289f8, 0x8d49efce), TOBN(0x61becc60, 0xea420688), TOBN(0xb22639d9, 0x13a786af)}}, {{TOBN(0x1a8e6220, 0x361ecf90), TOBN(0x001f23e0, 0x25506463), TOBN(0xe4ae9b5d, 0x0a5c2b79), TOBN(0xebc9cdad, 0xd8149db5)}, {TOBN(0xb33164a1, 0x934aa728), TOBN(0x750eb00e, 0xae9b60f3), TOBN(0x5a91615b, 0x9b9cfbfd), TOBN(0x97015cbf, 0xef45f7f6)}}, {{TOBN(0xb462c4a5, 0xbf5151df), TOBN(0x21adcc41, 0xb07118f2), TOBN(0xd60c545b, 0x043fa42c), TOBN(0xfc21aa54, 0xe96be1ab)}, {TOBN(0xe84bc32f, 0x4e51ea80), TOBN(0x3dae45f0, 0x259b5d8d), TOBN(0xbb73c7eb, 0xc38f1b5e), TOBN(0xe405a74a, 0xe8ae617d)}}, {{TOBN(0xbb1ae9c6, 0x9f1c56bd), TOBN(0x8c176b98, 0x49f196a4), TOBN(0xc448f311, 0x6875092b), TOBN(0xb5afe3de, 0x9f976033)}, {TOBN(0xa8dafd49, 0x145813e5), TOBN(0x687fc4d9, 0xe2b34226), TOBN(0xf2dfc92d, 0x4c7ff57f), TOBN(0x004e3fc1, 0x401f1b46)}}, {{TOBN(0x5afddab6, 0x1430c9ab), TOBN(0x0bdd41d3, 0x2238e997), TOBN(0xf0947430, 0x418042ae), TOBN(0x71f9adda, 0xcdddc4cb)}, {TOBN(0x7090c016, 0xc52dd907), TOBN(0xd9bdf44d, 0x29e2047f), TOBN(0xe6f1fe80, 0x1b1011a6), TOBN(0xb63accbc, 0xd9acdc78)}}, {{TOBN(0xcfc7e235, 0x1272a95b), TOBN(0x0c667717, 0xa6276ac8), TOBN(0x3c0d3709, 0xe2d7eef7), TOBN(0x5add2b06, 0x9a685b3e)}, {TOBN(0x363ad32d, 0x14ea5d65), TOBN(0xf8e01f06, 0x8d7dd506), TOBN(0xc9ea2213, 0x75b4aac6), TOBN(0xed2a2bf9, 0x0d353466)}}, {{TOBN(0x439d79b5, 0xe9d3a7c3), TOBN(0x8e0ee5a6, 0x81b7f34b), TOBN(0xcf3dacf5, 0x1dc4ba75), TOBN(0x1d3d1773, 0xeb3310c7)}, {TOBN(0xa8e67112, 0x7747ae83), TOBN(0x31f43160, 0x197d6b40), TOBN(0x0521ccee, 0xcd961400), TOBN(0x67246f11, 0xf6535768)}}, {{TOBN(0x702fcc5a, 0xef0c3133), TOBN(0x247cc45d, 0x7e16693b), TOBN(0xfd484e49, 0xc729b749), TOBN(0x522cef7d, 0xb218320f)}, {TOBN(0xe56ef405, 0x59ab93b3), TOBN(0x225fba11, 0x9f181071), TOBN(0x33bd6595, 0x15330ed0), TOBN(0xc4be69d5, 0x1ddb32f7)}}, {{TOBN(0x264c7668, 0x0448087c), TOBN(0xac30903f, 0x71432dae), TOBN(0x3851b266, 0x00f9bf47), TOBN(0x400ed311, 0x6cdd6d03)}, {TOBN(0x045e79fe, 0xf8fd2424), TOBN(0xfdfd974a, 0xfa6da98b), TOBN(0x45c9f641, 0x0c1e673a), TOBN(0x76f2e733, 0x5b2c5168)}}, {{TOBN(0x1adaebb5, 0x2a601753), TOBN(0xb286514c, 0xc57c2d49), TOBN(0xd8769670, 0x1e0bfd24), TOBN(0x950c547e, 0x04478922)}, {TOBN(0xd1d41969, 0xe5d32bfe), TOBN(0x30bc1472, 0x750d6c3e), TOBN(0x8f3679fe, 0xe0e27f3a), TOBN(0x8f64a7dc, 0xa4a6ee0c)}}, {{TOBN(0x2fe59937, 0x633dfb1f), TOBN(0xea82c395, 0x977f2547), TOBN(0xcbdfdf1a, 0x661ea646), TOBN(0xc7ccc591, 0xb9085451)}, {TOBN(0x82177962, 0x81761e13), TOBN(0xda57596f, 0x9196885c), TOBN(0xbc17e849, 0x28ffbd70), TOBN(0x1e6e0a41, 0x2671d36f)}}, {{TOBN(0x61ae872c, 0x4152fcf5), TOBN(0x441c87b0, 0x9e77e754), TOBN(0xd0799dd5, 0xa34dff09), TOBN(0x766b4e44, 0x88a6b171)}, {TOBN(0xdc06a512, 0x11f1c792), TOBN(0xea02ae93, 0x4be35c3e), TOBN(0xe5ca4d6d, 0xe90c469e), TOBN(0x4df4368e, 0x56e4ff5c)}}, {{TOBN(0x7817acab, 0x4baef62e), TOBN(0x9f5a2202, 0xa85b91e8), TOBN(0x9666ebe6, 0x6ce57610), TOBN(0x32ad31f3, 0xf73bfe03)}, {TOBN(0x628330a4, 0x25bcf4d6), TOBN(0xea950593, 0x515056e6), TOBN(0x59811c89, 0xe1332156), TOBN(0xc89cf1fe, 0x8c11b2d7)}}, {{TOBN(0x75b63913, 0x04e60cc0), TOBN(0xce811e8d, 0x4625d375), TOBN(0x030e43fc, 0x2d26e562), TOBN(0xfbb30b4b, 0x608d36a0)}, {TOBN(0x634ff82c, 0x48528118), TOBN(0x7c6fe085, 0xcd285911), TOBN(0x7f2830c0, 0x99358f28), TOBN(0x2e60a95e, 0x665e6c09)}}, {{TOBN(0x08407d3d, 0x9b785dbf), TOBN(0x530889ab, 0xa759bce7), TOBN(0xf228e0e6, 0x52f61239), TOBN(0x2b6d1461, 0x6879be3c)}, {TOBN(0xe6902c04, 0x51a7bbf7), TOBN(0x30ad99f0, 0x76f24a64), TOBN(0x66d9317a, 0x98bc6da0), TOBN(0xf4f877f3, 0xcb596ac0)}}, {{TOBN(0xb05ff62d, 0x4c44f119), TOBN(0x4555f536, 0xe9b77416), TOBN(0xc7c0d059, 0x8caed63b), TOBN(0x0cd2b7ce, 0xc358b2a9)}, {TOBN(0x3f33287b, 0x46945fa3), TOBN(0xf8785b20, 0xd67c8791), TOBN(0xc54a7a61, 0x9637bd08), TOBN(0x54d4598c, 0x18be79d7)}}, {{TOBN(0x889e5acb, 0xc46d7ce1), TOBN(0x9a515bb7, 0x8b085877), TOBN(0xfac1a03d, 0x0b7a5050), TOBN(0x7d3e738a, 0xf2926035)}, {TOBN(0x861cc2ce, 0x2a6cb0eb), TOBN(0x6f2e2955, 0x8f7adc79), TOBN(0x61c4d451, 0x33016376), TOBN(0xd9fd2c80, 0x5ad59090)}}, {{TOBN(0xe5a83738, 0xb2b836a1), TOBN(0x855b41a0, 0x7c0d6622), TOBN(0x186fe317, 0x7cc19af1), TOBN(0x6465c1ff, 0xfdd99acb)}, {TOBN(0x46e5c23f, 0x6974b99e), TOBN(0x75a7cf8b, 0xa2717cbe), TOBN(0x4d2ebc3f, 0x062be658), TOBN(0x094b4447, 0x5f209c98)}}, {{TOBN(0x4af285ed, 0xb940cb5a), TOBN(0x6706d792, 0x7cc82f10), TOBN(0xc8c8776c, 0x030526fa), TOBN(0xfa8e6f76, 0xa0da9140)}, {TOBN(0x77ea9d34, 0x591ee4f0), TOBN(0x5f46e337, 0x40274166), TOBN(0x1bdf98bb, 0xea671457), TOBN(0xd7c08b46, 0x862a1fe2)}}, {{TOBN(0x46cc303c, 0x1c08ad63), TOBN(0x99543440, 0x4c845e7b), TOBN(0x1b8fbdb5, 0x48f36bf7), TOBN(0x5b82c392, 0x8c8273a7)}, {TOBN(0x08f712c4, 0x928435d5), TOBN(0x071cf0f1, 0x79330380), TOBN(0xc74c2d24, 0xa8da054a), TOBN(0xcb0e7201, 0x43c46b5c)}}, {{TOBN(0x0ad7337a, 0xc0b7eff3), TOBN(0x8552225e, 0xc5e48b3c), TOBN(0xe6f78b0c, 0x73f13a5f), TOBN(0x5e70062e, 0x82349cbe)}, {TOBN(0x6b8d5048, 0xe7073969), TOBN(0x392d2a29, 0xc33cb3d2), TOBN(0xee4f727c, 0x4ecaa20f), TOBN(0xa068c99e, 0x2ccde707)}}, {{TOBN(0xfcd5651f, 0xb87a2913), TOBN(0xea3e3c15, 0x3cc252f0), TOBN(0x777d92df, 0x3b6cd3e4), TOBN(0x7a414143, 0xc5a732e7)}, {TOBN(0xa895951a, 0xa71ff493), TOBN(0xfe980c92, 0xbbd37cf6), TOBN(0x45bd5e64, 0xdecfeeff), TOBN(0x910dc2a9, 0xa44c43e9)}}, {{TOBN(0xcb403f26, 0xcca9f54d), TOBN(0x928bbdfb, 0x9303f6db), TOBN(0x3c37951e, 0xa9eee67c), TOBN(0x3bd61a52, 0xf79961c3)}, {TOBN(0x09a238e6, 0x395c9a79), TOBN(0x6940ca2d, 0x61eb352d), TOBN(0x7d1e5c5e, 0xc1875631), TOBN(0x1e19742c, 0x1e1b20d1)}}, {{TOBN(0x4633d908, 0x23fc2e6e), TOBN(0xa76e29a9, 0x08959149), TOBN(0x61069d9c, 0x84ed7da5), TOBN(0x0baa11cf, 0x5dbcad51)}, {TOBN(0xd01eec64, 0x961849da), TOBN(0x93b75f1f, 0xaf3d8c28), TOBN(0x57bc4f9f, 0x1ca2ee44), TOBN(0x5a26322d, 0x00e00558)}}, {{TOBN(0x1888d658, 0x61a023ef), TOBN(0x1d72aab4, 0xb9e5246e), TOBN(0xa9a26348, 0xe5563ec0), TOBN(0xa0971963, 0xc3439a43)}, {TOBN(0x567dd54b, 0xadb9b5b7), TOBN(0x73fac1a1, 0xc45a524b), TOBN(0x8fe97ef7, 0xfe38e608), TOBN(0x608748d2, 0x3f384f48)}}, {{TOBN(0xb0571794, 0xc486094f), TOBN(0x869254a3, 0x8bf3a8d6), TOBN(0x148a8dd1, 0x310b0e25), TOBN(0x99ab9f3f, 0x9aa3f7d8)}, {TOBN(0x0927c68a, 0x6706c02e), TOBN(0x22b5e76c, 0x69790e6c), TOBN(0x6c325260, 0x6c71376c), TOBN(0x53a57690, 0x09ef6657)}}, {{TOBN(0x8d63f852, 0xedffcf3a), TOBN(0xb4d2ed04, 0x3c0a6f55), TOBN(0xdb3aa8de, 0x12519b9e), TOBN(0x5d38e9c4, 0x1e0a569a)}, {TOBN(0x871528bf, 0x303747e2), TOBN(0xa208e77c, 0xf5b5c18d), TOBN(0x9d129c88, 0xca6bf923), TOBN(0xbcbf197f, 0xbf02839f)}}, {{TOBN(0x9b9bf030, 0x27323194), TOBN(0x3b055a8b, 0x339ca59d), TOBN(0xb46b2312, 0x0f669520), TOBN(0x19789f1f, 0x497e5f24)}, {TOBN(0x9c499468, 0xaaf01801), TOBN(0x72ee1190, 0x8b69d59c), TOBN(0x8bd39595, 0xacf4c079), TOBN(0x3ee11ece, 0x8e0cd048)}}, {{TOBN(0xebde86ec, 0x1ed66f18), TOBN(0x225d906b, 0xd61fce43), TOBN(0x5cab07d6, 0xe8bed74d), TOBN(0x16e4617f, 0x27855ab7)}, {TOBN(0x6568aadd, 0xb2fbc3dd), TOBN(0xedb5484f, 0x8aeddf5b), TOBN(0x878f20e8, 0x6dcf2fad), TOBN(0x3516497c, 0x615f5699)}}}, {{{TOBN(0xef0a3fec, 0xfa181e69), TOBN(0x9ea02f81, 0x30d69a98), TOBN(0xb2e9cf8e, 0x66eab95d), TOBN(0x520f2beb, 0x24720021)}, {TOBN(0x621c540a, 0x1df84361), TOBN(0x12037721, 0x71fa6d5d), TOBN(0x6e3c7b51, 0x0ff5f6ff), TOBN(0x817a069b, 0xabb2bef3)}}, {{TOBN(0x83572fb6, 0xb294cda6), TOBN(0x6ce9bf75, 0xb9039f34), TOBN(0x20e012f0, 0x095cbb21), TOBN(0xa0aecc1b, 0xd063f0da)}, {TOBN(0x57c21c3a, 0xf02909e5), TOBN(0xc7d59ecf, 0x48ce9cdc), TOBN(0x2732b844, 0x8ae336f8), TOBN(0x056e3723, 0x3f4f85f4)}}, {{TOBN(0x8a10b531, 0x89e800ca), TOBN(0x50fe0c17, 0x145208fd), TOBN(0x9e43c0d3, 0xb714ba37), TOBN(0x427d200e, 0x34189acc)}, {TOBN(0x05dee24f, 0xe616e2c0), TOBN(0x9c25f4c8, 0xee1854c1), TOBN(0x4d3222a5, 0x8f342a73), TOBN(0x0807804f, 0xa027c952)}}, {{TOBN(0xc222653a, 0x4f0d56f3), TOBN(0x961e4047, 0xca28b805), TOBN(0x2c03f8b0, 0x4a73434b), TOBN(0x4c966787, 0xab712a19)}, {TOBN(0xcc196c42, 0x864fee42), TOBN(0xc1be93da, 0x5b0ece5c), TOBN(0xa87d9f22, 0xc131c159), TOBN(0x2bb6d593, 0xdce45655)}}, {{TOBN(0x22c49ec9, 0xb809b7ce), TOBN(0x8a41486b, 0xe2c72c2c), TOBN(0x813b9420, 0xfea0bf36), TOBN(0xb3d36ee9, 0xa66dac69)}, {TOBN(0x6fddc08a, 0x328cc987), TOBN(0x0a3bcd2c, 0x3a326461), TOBN(0x7103c49d, 0xd810dbba), TOBN(0xf9d81a28, 0x4b78a4c4)}}, {{TOBN(0x3de865ad, 0xe4d55941), TOBN(0xdedafa5e, 0x30384087), TOBN(0x6f414abb, 0x4ef18b9b), TOBN(0x9ee9ea42, 0xfaee5268)}, {TOBN(0x260faa16, 0x37a55a4a), TOBN(0xeb19a514, 0x015f93b9), TOBN(0x51d7ebd2, 0x9e9c3598), TOBN(0x523fc56d, 0x1932178e)}}, {{TOBN(0x501d070c, 0xb98fe684), TOBN(0xd60fbe9a, 0x124a1458), TOBN(0xa45761c8, 0x92bc6b3f), TOBN(0xf5384858, 0xfe6f27cb)}, {TOBN(0x4b0271f7, 0xb59e763b), TOBN(0x3d4606a9, 0x5b5a8e5e), TOBN(0x1eda5d9b, 0x05a48292), TOBN(0xda7731d0, 0xe6fec446)}}, {{TOBN(0xa3e33693, 0x90d45871), TOBN(0xe9764040, 0x06166d8d), TOBN(0xb5c33682, 0x89a90403), TOBN(0x4bd17983, 0x72f1d637)}, {TOBN(0xa616679e, 0xd5d2c53a), TOBN(0x5ec4bcd8, 0xfdcf3b87), TOBN(0xae6d7613, 0xb66a694e), TOBN(0x7460fc76, 0xe3fc27e5)}}, {{TOBN(0x70469b82, 0x95caabee), TOBN(0xde024ca5, 0x889501e3), TOBN(0x6bdadc06, 0x076ed265), TOBN(0x0cb1236b, 0x5a0ef8b2)}, {TOBN(0x4065ddbf, 0x0972ebf9), TOBN(0xf1dd3875, 0x22aca432), TOBN(0xa88b97cf, 0x744aff76), TOBN(0xd1359afd, 0xfe8e3d24)}}, {{TOBN(0x52a3ba2b, 0x91502cf3), TOBN(0x2c3832a8, 0x084db75d), TOBN(0x04a12ddd, 0xde30b1c9), TOBN(0x7802eabc, 0xe31fd60c)}, {TOBN(0x33707327, 0xa37fddab), TOBN(0x65d6f2ab, 0xfaafa973), TOBN(0x3525c5b8, 0x11e6f91a), TOBN(0x76aeb0c9, 0x5f46530b)}}, {{TOBN(0xe8815ff6, 0x2f93a675), TOBN(0xa6ec9684, 0x05f48679), TOBN(0x6dcbb556, 0x358ae884), TOBN(0x0af61472, 0xe19e3873)}, {TOBN(0x72334372, 0xa5f696be), TOBN(0xc65e57ea, 0x6f22fb70), TOBN(0x268da30c, 0x946cea90), TOBN(0x136a8a87, 0x65681b2a)}}, {{TOBN(0xad5e81dc, 0x0f9f44d4), TOBN(0xf09a6960, 0x2c46585a), TOBN(0xd1649164, 0xc447d1b1), TOBN(0x3b4b36c8, 0x879dc8b1)}, {TOBN(0x20d4177b, 0x3b6b234c), TOBN(0x096a2505, 0x1730d9d0), TOBN(0x0611b9b8, 0xef80531d), TOBN(0xba904b3b, 0x64bb495d)}}, {{TOBN(0x1192d9d4, 0x93a3147a), TOBN(0x9f30a5dc, 0x9a565545), TOBN(0x90b1f9cb, 0x6ef07212), TOBN(0x29958546, 0x0d87fc13)}, {TOBN(0xd3323eff, 0xc17db9ba), TOBN(0xcb18548c, 0xcb1644a8), TOBN(0x18a306d4, 0x4f49ffbc), TOBN(0x28d658f1, 0x4c2e8684)}}, {{TOBN(0x44ba60cd, 0xa99f8c71), TOBN(0x67b7abdb, 0x4bf742ff), TOBN(0x66310f9c, 0x914b3f99), TOBN(0xae430a32, 0xf412c161)}, {TOBN(0x1e6776d3, 0x88ace52f), TOBN(0x4bc0fa24, 0x52d7067d), TOBN(0x03c286aa, 0x8f07cd1b), TOBN(0x4cb8f38c, 0xa985b2c1)}}, {{TOBN(0x83ccbe80, 0x8c3bff36), TOBN(0x005a0bd2, 0x5263e575), TOBN(0x460d7dda, 0x259bdcd1), TOBN(0x4a1c5642, 0xfa5cab6b)}, {TOBN(0x2b7bdbb9, 0x9fe4fc88), TOBN(0x09418e28, 0xcc97bbb5), TOBN(0xd8274fb4, 0xa12321ae), TOBN(0xb137007d, 0x5c87b64e)}}, {{TOBN(0x80531fe1, 0xc63c4962), TOBN(0x50541e89, 0x981fdb25), TOBN(0xdc1291a1, 0xfd4c2b6b), TOBN(0xc0693a17, 0xa6df4fca)}, {TOBN(0xb2c4604e, 0x0117f203), TOBN(0x245f1963, 0x0a99b8d0), TOBN(0xaedc20aa, 0xc6212c44), TOBN(0xb1ed4e56, 0x520f52a8)}}, {{TOBN(0xfe48f575, 0xf8547be3), TOBN(0x0a7033cd, 0xa9e45f98), TOBN(0x4b45d3a9, 0x18c50100), TOBN(0xb2a6cd6a, 0xa61d41da)}, {TOBN(0x60bbb4f5, 0x57933c6b), TOBN(0xa7538ebd, 0x2b0d7ffc), TOBN(0x9ea3ab8d, 0x8cd626b6), TOBN(0x8273a484, 0x3601625a)}}, {{TOBN(0x88859845, 0x0168e508), TOBN(0x8cbc9bb2, 0x99a94abd), TOBN(0x713ac792, 0xfab0a671), TOBN(0xa3995b19, 0x6c9ebffc)}, {TOBN(0xe711668e, 0x1239e152), TOBN(0x56892558, 0xbbb8dff4), TOBN(0x8bfc7dab, 0xdbf17963), TOBN(0x5b59fe5a, 0xb3de1253)}}, {{TOBN(0x7e3320eb, 0x34a9f7ae), TOBN(0xe5e8cf72, 0xd751efe4), TOBN(0x7ea003bc, 0xd9be2f37), TOBN(0xc0f551a0, 0xb6c08ef7)}, {TOBN(0x56606268, 0x038f6725), TOBN(0x1dd38e35, 0x6d92d3b6), TOBN(0x07dfce7c, 0xc3cbd686), TOBN(0x4e549e04, 0x651c5da8)}}, {{TOBN(0x4058f93b, 0x08b19340), TOBN(0xc2fae6f4, 0xcac6d89d), TOBN(0x4bad8a8c, 0x8f159cc7), TOBN(0x0ddba4b3, 0xcb0b601c)}, {TOBN(0xda4fc7b5, 0x1dd95f8c), TOBN(0x1d163cd7, 0xcea5c255), TOBN(0x30707d06, 0x274a8c4c), TOBN(0x79d9e008, 0x2802e9ce)}}, {{TOBN(0x02a29ebf, 0xe6ddd505), TOBN(0x37064e74, 0xb50bed1a), TOBN(0x3f6bae65, 0xa7327d57), TOBN(0x3846f5f1, 0xf83920bc)}, {TOBN(0x87c37491, 0x60df1b9b), TOBN(0x4cfb2895, 0x2d1da29f), TOBN(0x10a478ca, 0x4ed1743c), TOBN(0x390c6030, 0x3edd47c6)}}, {{TOBN(0x8f3e5312, 0x8c0a78de), TOBN(0xccd02bda, 0x1e85df70), TOBN(0xd6c75c03, 0xa61b6582), TOBN(0x0762921c, 0xfc0eebd1)}, {TOBN(0xd34d0823, 0xd85010c0), TOBN(0xd73aaacb, 0x0044cf1f), TOBN(0xfb4159bb, 0xa3b5e78a), TOBN(0x2287c7f7, 0xe5826f3f)}}, {{TOBN(0x4aeaf742, 0x580b1a01), TOBN(0xf080415d, 0x60423b79), TOBN(0xe12622cd, 0xa7dea144), TOBN(0x49ea4996, 0x59d62472)}, {TOBN(0xb42991ef, 0x571f3913), TOBN(0x0610f214, 0xf5b25a8a), TOBN(0x47adc585, 0x30b79e8f), TOBN(0xf90e3df6, 0x07a065a2)}}, {{TOBN(0x5d0a5deb, 0x43e2e034), TOBN(0x53fb5a34, 0x444024aa), TOBN(0xa8628c68, 0x6b0c9f7f), TOBN(0x9c69c29c, 0xac563656)}, {TOBN(0x5a231feb, 0xbace47b6), TOBN(0xbdce0289, 0x9ea5a2ec), TOBN(0x05da1fac, 0x9463853e), TOBN(0x96812c52, 0x509e78aa)}}, {{TOBN(0xd3fb5771, 0x57151692), TOBN(0xeb2721f8, 0xd98e1c44), TOBN(0xc0506087, 0x32399be1), TOBN(0xda5a5511, 0xd979d8b8)}, {TOBN(0x737ed55d, 0xc6f56780), TOBN(0xe20d3004, 0x0dc7a7f4), TOBN(0x02ce7301, 0xf5941a03), TOBN(0x91ef5215, 0xed30f83a)}}, {{TOBN(0x28727fc1, 0x4092d85f), TOBN(0x72d223c6, 0x5c49e41a), TOBN(0xa7cf30a2, 0xba6a4d81), TOBN(0x7c086209, 0xb030d87d)}, {TOBN(0x04844c7d, 0xfc588b09), TOBN(0x728cd499, 0x5874bbb0), TOBN(0xcc1281ee, 0xe84c0495), TOBN(0x0769b5ba, 0xec31958f)}}, {{TOBN(0x665c228b, 0xf99c2471), TOBN(0xf2d8a11b, 0x191eb110), TOBN(0x4594f494, 0xd36d7024), TOBN(0x482ded8b, 0xcdcb25a1)}, {TOBN(0xc958a9d8, 0xdadd4885), TOBN(0x7004477e, 0xf1d2b547), TOBN(0x0a45f6ef, 0x2a0af550), TOBN(0x4fc739d6, 0x2f8d6351)}}, {{TOBN(0x75cdaf27, 0x786f08a9), TOBN(0x8700bb26, 0x42c2737f), TOBN(0x855a7141, 0x1c4e2670), TOBN(0x810188c1, 0x15076fef)}, {TOBN(0xc251d0c9, 0xabcd3297), TOBN(0xae4c8967, 0xf48108eb), TOBN(0xbd146de7, 0x18ceed30), TOBN(0xf9d4f07a, 0xc986bced)}}, {{TOBN(0x5ad98ed5, 0x83fa1e08), TOBN(0x7780d33e, 0xbeabd1fb), TOBN(0xe330513c, 0x903b1196), TOBN(0xba11de9e, 0xa47bc8c4)}, {TOBN(0x684334da, 0x02c2d064), TOBN(0x7ecf360d, 0xa48de23b), TOBN(0x57a1b474, 0x0a9089d8), TOBN(0xf28fa439, 0xff36734c)}}, {{TOBN(0xf2a482cb, 0xea4570b3), TOBN(0xee65d68b, 0xa5ebcee9), TOBN(0x988d0036, 0xb9694cd5), TOBN(0x53edd0e9, 0x37885d32)}, {TOBN(0xe37e3307, 0xbeb9bc6d), TOBN(0xe9abb907, 0x9f5c6768), TOBN(0x4396ccd5, 0x51f2160f), TOBN(0x2500888c, 0x47336da6)}}, {{TOBN(0x383f9ed9, 0x926fce43), TOBN(0x809dd1c7, 0x04da2930), TOBN(0x30f6f596, 0x8a4cb227), TOBN(0x0d700c7f, 0x73a56b38)}, {TOBN(0x1825ea33, 0xab64a065), TOBN(0xaab9b735, 0x1338df80), TOBN(0x1516100d, 0x9b63f57f), TOBN(0x2574395a, 0x27a6a634)}}, {{TOBN(0xb5560fb6, 0x700a1acd), TOBN(0xe823fd73, 0xfd999681), TOBN(0xda915d1f, 0x6cb4e1ba), TOBN(0x0d030118, 0x6ebe00a3)}, {TOBN(0x744fb0c9, 0x89fca8cd), TOBN(0x970d01db, 0xf9da0e0b), TOBN(0x0ad8c564, 0x7931d76f), TOBN(0xb15737bf, 0xf659b96a)}}, {{TOBN(0xdc9933e8, 0xa8b484e7), TOBN(0xb2fdbdf9, 0x7a26dec7), TOBN(0x2349e9a4, 0x9f1f0136), TOBN(0x7860368e, 0x70fddddb)}, {TOBN(0xd93d2c1c, 0xf9ad3e18), TOBN(0x6d6c5f17, 0x689f4e79), TOBN(0x7a544d91, 0xb24ff1b6), TOBN(0x3e12a5eb, 0xfe16cd8c)}}, {{TOBN(0x543574e9, 0xa56b872f), TOBN(0xa1ad550c, 0xfcf68ea2), TOBN(0x689e37d2, 0x3f560ef7), TOBN(0x8c54b9ca, 0xc9d47a8b)}, {TOBN(0x46d40a4a, 0x088ac342), TOBN(0xec450c7c, 0x1576c6d0), TOBN(0xb589e31c, 0x1f9689e9), TOBN(0xdacf2602, 0xb8781718)}}, {{TOBN(0xa89237c6, 0xc8cb6b42), TOBN(0x1326fc93, 0xb96ef381), TOBN(0x55d56c6d, 0xb5f07825), TOBN(0xacba2eea, 0x7449e22d)}, {TOBN(0x74e0887a, 0x633c3000), TOBN(0xcb6cd172, 0xd7cbcf71), TOBN(0x309e81de, 0xc36cf1be), TOBN(0x07a18a6d, 0x60ae399b)}}, {{TOBN(0xb36c2679, 0x9edce57e), TOBN(0x52b892f4, 0xdf001d41), TOBN(0xd884ae5d, 0x16a1f2c6), TOBN(0x9b329424, 0xefcc370a)}, {TOBN(0x3120daf2, 0xbd2e21df), TOBN(0x55298d2d, 0x02470a99), TOBN(0x0b78af6c, 0xa05db32e), TOBN(0x5c76a331, 0x601f5636)}}, {{TOBN(0xaae861ff, 0xf8a4f29c), TOBN(0x70dc9240, 0xd68f8d49), TOBN(0x960e649f, 0x81b1321c), TOBN(0x3d2c801b, 0x8792e4ce)}, {TOBN(0xf479f772, 0x42521876), TOBN(0x0bed93bc, 0x416c79b1), TOBN(0xa67fbc05, 0x263e5bc9), TOBN(0x01e8e630, 0x521db049)}}, {{TOBN(0x76f26738, 0xc6f3431e), TOBN(0xe609cb02, 0xe3267541), TOBN(0xb10cff2d, 0x818c877c), TOBN(0x1f0e75ce, 0x786a13cb)}, {TOBN(0xf4fdca64, 0x1158544d), TOBN(0x5d777e89, 0x6cb71ed0), TOBN(0x3c233737, 0xa9aa4755), TOBN(0x7b453192, 0xe527ab40)}}, {{TOBN(0xdb59f688, 0x39f05ffe), TOBN(0x8f4f4be0, 0x6d82574e), TOBN(0xcce3450c, 0xee292d1b), TOBN(0xaa448a12, 0x61ccd086)}, {TOBN(0xabce91b3, 0xf7914967), TOBN(0x4537f09b, 0x1908a5ed), TOBN(0xa812421e, 0xf51042e7), TOBN(0xfaf5cebc, 0xec0b3a34)}}, {{TOBN(0x730ffd87, 0x4ca6b39a), TOBN(0x70fb72ed, 0x02efd342), TOBN(0xeb4735f9, 0xd75c8edb), TOBN(0xc11f2157, 0xc278aa51)}, {TOBN(0xc459f635, 0xbf3bfebf), TOBN(0x3a1ff0b4, 0x6bd9601f), TOBN(0xc9d12823, 0xc420cb73), TOBN(0x3e9af3e2, 0x3c2915a3)}}, {{TOBN(0xe0c82c72, 0xb41c3440), TOBN(0x175239e5, 0xe3039a5f), TOBN(0xe1084b8a, 0x558795a3), TOBN(0x328d0a1d, 0xd01e5c60)}, {TOBN(0x0a495f2e, 0xd3788a04), TOBN(0x25d8ff16, 0x66c11a9f), TOBN(0xf5155f05, 0x9ed692d6), TOBN(0x954fa107, 0x4f425fe4)}}, {{TOBN(0xd16aabf2, 0xe98aaa99), TOBN(0x90cd8ba0, 0x96b0f88a), TOBN(0x957f4782, 0xc154026a), TOBN(0x54ee0734, 0x52af56d2)}, {TOBN(0xbcf89e54, 0x45b4147a), TOBN(0x3d102f21, 0x9a52816c), TOBN(0x6808517e, 0x39b62e77), TOBN(0x92e25421, 0x69169ad8)}}, {{TOBN(0xd721d871, 0xbb608558), TOBN(0x60e4ebae, 0xf6d4ff9b), TOBN(0x0ba10819, 0x41f2763e), TOBN(0xca2e45be, 0x51ee3247)}, {TOBN(0x66d172ec, 0x2bfd7a5f), TOBN(0x528a8f2f, 0x74d0b12d), TOBN(0xe17f1e38, 0xdabe70dc), TOBN(0x1d5d7316, 0x9f93983c)}}, {{TOBN(0x51b2184a, 0xdf423e31), TOBN(0xcb417291, 0xaedb1a10), TOBN(0x2054ca93, 0x625bcab9), TOBN(0x54396860, 0xa98998f0)}, {TOBN(0x4e53f6c4, 0xa54ae57e), TOBN(0x0ffeb590, 0xee648e9d), TOBN(0xfbbdaadc, 0x6afaf6bc), TOBN(0xf88ae796, 0xaa3bfb8a)}}, {{TOBN(0x209f1d44, 0xd2359ed9), TOBN(0xac68dd03, 0xf3544ce2), TOBN(0xf378da47, 0xfd51e569), TOBN(0xe1abd860, 0x2cc80097)}, {TOBN(0x23ca18d9, 0x343b6e3a), TOBN(0x480797e8, 0xb40a1bae), TOBN(0xd1f0c717, 0x533f3e67), TOBN(0x44896970, 0x06e6cdfc)}}, {{TOBN(0x8ca21055, 0x52a82e8d), TOBN(0xb2caf785, 0x78460cdc), TOBN(0x4c1b7b62, 0xe9037178), TOBN(0xefc09d2c, 0xdb514b58)}, {TOBN(0x5f2df9ee, 0x9113be5c), TOBN(0x2fbda78f, 0xb3f9271c), TOBN(0xe09a81af, 0x8f83fc54), TOBN(0x06b13866, 0x8afb5141)}}, {{TOBN(0x38f6480f, 0x43e3865d), TOBN(0x72dd77a8, 0x1ddf47d9), TOBN(0xf2a8e971, 0x4c205ff7), TOBN(0x46d449d8, 0x9d088ad8)}, {TOBN(0x926619ea, 0x185d706f), TOBN(0xe47e02eb, 0xc7dd7f62), TOBN(0xe7f120a7, 0x8cbc2031), TOBN(0xc18bef00, 0x998d4ac9)}}, {{TOBN(0x18f37a9c, 0x6bdf22da), TOBN(0xefbc432f, 0x90dc82df), TOBN(0xc52cef8e, 0x5d703651), TOBN(0x82887ba0, 0xd99881a5)}, {TOBN(0x7cec9dda, 0xb920ec1d), TOBN(0xd0d7e8c3, 0xec3e8d3b), TOBN(0x445bc395, 0x4ca88747), TOBN(0xedeaa2e0, 0x9fd53535)}}, {{TOBN(0x461b1d93, 0x6cc87475), TOBN(0xd92a52e2, 0x6d2383bd), TOBN(0xfabccb59, 0xd7903546), TOBN(0x6111a761, 0x3d14b112)}, {TOBN(0x0ae584fe, 0xb3d5f612), TOBN(0x5ea69b8d, 0x60e828ec), TOBN(0x6c078985, 0x54087030), TOBN(0x649cab04, 0xac4821fe)}}, {{TOBN(0x25ecedcf, 0x8bdce214), TOBN(0xb5622f72, 0x86af7361), TOBN(0x0e1227aa, 0x7038b9e2), TOBN(0xd0efb273, 0xac20fa77)}, {TOBN(0x817ff88b, 0x79df975b), TOBN(0x856bf286, 0x1999503e), TOBN(0xb4d5351f, 0x5038ec46), TOBN(0x740a52c5, 0xfc42af6e)}}, {{TOBN(0x2e38bb15, 0x2cbb1a3f), TOBN(0xc3eb99fe, 0x17a83429), TOBN(0xca4fcbf1, 0xdd66bb74), TOBN(0x880784d6, 0xcde5e8fc)}, {TOBN(0xddc84c1c, 0xb4e7a0be), TOBN(0x8780510d, 0xbd15a72f), TOBN(0x44bcf1af, 0x81ec30e1), TOBN(0x141e50a8, 0x0a61073e)}}, {{TOBN(0x0d955718, 0x47be87ae), TOBN(0x68a61417, 0xf76a4372), TOBN(0xf57e7e87, 0xc607c3d3), TOBN(0x043afaf8, 0x5252f332)}, {TOBN(0xcc14e121, 0x1552a4d2), TOBN(0xb6dee692, 0xbb4d4ab4), TOBN(0xb6ab74c8, 0xa03816a4), TOBN(0x84001ae4, 0x6f394a29)}}, {{TOBN(0x5bed8344, 0xd795fb45), TOBN(0x57326e7d, 0xb79f55a5), TOBN(0xc9533ce0, 0x4accdffc), TOBN(0x53473caf, 0x3993fa04)}, {TOBN(0x7906eb93, 0xa13df4c8), TOBN(0xa73e51f6, 0x97cbe46f), TOBN(0xd1ab3ae1, 0x0ae4ccf8), TOBN(0x25614508, 0x8a5b3dbc)}}, {{TOBN(0x61eff962, 0x11a71b27), TOBN(0xdf71412b, 0x6bb7fa39), TOBN(0xb31ba6b8, 0x2bd7f3ef), TOBN(0xb0b9c415, 0x69180d29)}, {TOBN(0xeec14552, 0x014cdde5), TOBN(0x702c624b, 0x227b4bbb), TOBN(0x2b15e8c2, 0xd3e988f3), TOBN(0xee3bcc6d, 0xa4f7fd04)}}, {{TOBN(0x9d00822a, 0x42ac6c85), TOBN(0x2db0cea6, 0x1df9f2b7), TOBN(0xd7cad2ab, 0x42de1e58), TOBN(0x346ed526, 0x2d6fbb61)}, {TOBN(0xb3962995, 0x1a2faf09), TOBN(0x2fa8a580, 0x7c25612e), TOBN(0x30ae04da, 0x7cf56490), TOBN(0x75662908, 0x0eea3961)}}, {{TOBN(0x3609f5c5, 0x3d080847), TOBN(0xcb081d39, 0x5241d4f6), TOBN(0xb4fb3810, 0x77961a63), TOBN(0xc20c5984, 0x2abb66fc)}, {TOBN(0x3d40aa7c, 0xf902f245), TOBN(0x9cb12736, 0x4e536b1e), TOBN(0x5eda24da, 0x99b3134f), TOBN(0xafbd9c69, 0x5cd011af)}}, {{TOBN(0x9a16e30a, 0xc7088c7d), TOBN(0x5ab65710, 0x3207389f), TOBN(0x1b09547f, 0xe7407a53), TOBN(0x2322f9d7, 0x4fdc6eab)}, {TOBN(0xc0f2f22d, 0x7430de4d), TOBN(0x19382696, 0xe68ca9a9), TOBN(0x17f1eff1, 0x918e5868), TOBN(0xe3b5b635, 0x586f4204)}}, {{TOBN(0x146ef980, 0x3fbc4341), TOBN(0x359f2c80, 0x5b5eed4e), TOBN(0x9f35744e, 0x7482e41d), TOBN(0x9a9ac3ec, 0xf3b224c2)}, {TOBN(0x9161a6fe, 0x91fc50ae), TOBN(0x89ccc66b, 0xc613fa7c), TOBN(0x89268b14, 0xc732f15a), TOBN(0x7cd6f4e2, 0xb467ed03)}}, {{TOBN(0xfbf79869, 0xce56b40e), TOBN(0xf93e094c, 0xc02dde98), TOBN(0xefe0c3a8, 0xedee2cd7), TOBN(0x90f3ffc0, 0xb268fd42)}, {TOBN(0x81a7fd56, 0x08241aed), TOBN(0x95ab7ad8, 0x00b1afe8), TOBN(0x40127056, 0x3e310d52), TOBN(0xd3ffdeb1, 0x09d9fc43)}}, {{TOBN(0xc8f85c91, 0xd11a8594), TOBN(0x2e74d258, 0x31cf6db8), TOBN(0x829c7ca3, 0x02b5dfd0), TOBN(0xe389cfbe, 0x69143c86)}, {TOBN(0xd01b6405, 0x941768d8), TOBN(0x45103995, 0x03bf825d), TOBN(0xcc4ee166, 0x56cd17e2), TOBN(0xbea3c283, 0xba037e79)}}, {{TOBN(0x4e1ac06e, 0xd9a47520), TOBN(0xfbfe18aa, 0xaf852404), TOBN(0x5615f8e2, 0x8087648a), TOBN(0x7301e47e, 0xb9d150d9)}, {TOBN(0x79f9f9dd, 0xb299b977), TOBN(0x76697a7b, 0xa5b78314), TOBN(0x10d67468, 0x7d7c90e7), TOBN(0x7afffe03, 0x937210b5)}}, {{TOBN(0x5aef3e4b, 0x28c22cee), TOBN(0xefb0ecd8, 0x09fd55ae), TOBN(0x4cea7132, 0x0d2a5d6a), TOBN(0x9cfb5fa1, 0x01db6357)}, {TOBN(0x395e0b57, 0xf36e1ac5), TOBN(0x008fa9ad, 0x36cafb7d), TOBN(0x8f6cdf70, 0x5308c4db), TOBN(0x51527a37, 0x95ed2477)}}, {{TOBN(0xba0dee30, 0x5bd21311), TOBN(0x6ed41b22, 0x909c90d7), TOBN(0xc5f6b758, 0x7c8696d3), TOBN(0x0db8eaa8, 0x3ce83a80)}, {TOBN(0xd297fe37, 0xb24b4b6f), TOBN(0xfe58afe8, 0x522d1f0d), TOBN(0x97358736, 0x8c98dbd9), TOBN(0x6bc226ca, 0x9454a527)}}, {{TOBN(0xa12b384e, 0xce53c2d0), TOBN(0x779d897d, 0x5e4606da), TOBN(0xa53e47b0, 0x73ec12b0), TOBN(0x462dbbba, 0x5756f1ad)}, {TOBN(0x69fe09f2, 0xcafe37b6), TOBN(0x273d1ebf, 0xecce2e17), TOBN(0x8ac1d538, 0x3cf607fd), TOBN(0x8035f7ff, 0x12e10c25)}}}, {{{TOBN(0x854d34c7, 0x7e6c5520), TOBN(0xc27df9ef, 0xdcb9ea58), TOBN(0x405f2369, 0xd686666d), TOBN(0x29d1febf, 0x0417aa85)}, {TOBN(0x9846819e, 0x93470afe), TOBN(0x3e6a9669, 0xe2a27f9e), TOBN(0x24d008a2, 0xe31e6504), TOBN(0xdba7cecf, 0x9cb7680a)}}, {{TOBN(0xecaff541, 0x338d6e43), TOBN(0x56f7dd73, 0x4541d5cc), TOBN(0xb5d426de, 0x96bc88ca), TOBN(0x48d94f6b, 0x9ed3a2c3)}, {TOBN(0x6354a3bb, 0x2ef8279c), TOBN(0xd575465b, 0x0b1867f2), TOBN(0xef99b0ff, 0x95225151), TOBN(0xf3e19d88, 0xf94500d8)}}, {{TOBN(0x92a83268, 0xe32dd620), TOBN(0x913ec99f, 0x627849a2), TOBN(0xedd8fdfa, 0x2c378882), TOBN(0xaf96f33e, 0xee6f8cfe)}, {TOBN(0xc06737e5, 0xdc3fa8a5), TOBN(0x236bb531, 0xb0b03a1d), TOBN(0x33e59f29, 0x89f037b0), TOBN(0x13f9b5a7, 0xd9a12a53)}}, {{TOBN(0x0d0df6ce, 0x51efb310), TOBN(0xcb5b2eb4, 0x958df5be), TOBN(0xd6459e29, 0x36158e59), TOBN(0x82aae2b9, 0x1466e336)}, {TOBN(0xfb658a39, 0x411aa636), TOBN(0x7152ecc5, 0xd4c0a933), TOBN(0xf10c758a, 0x49f026b7), TOBN(0xf4837f97, 0xcb09311f)}}, {{TOBN(0xddfb02c4, 0xc753c45f), TOBN(0x18ca81b6, 0xf9c840fe), TOBN(0x846fd09a, 0xb0f8a3e6), TOBN(0xb1162add, 0xe7733dbc)}, {TOBN(0x7070ad20, 0x236e3ab6), TOBN(0xf88cdaf5, 0xb2a56326), TOBN(0x05fc8719, 0x997cbc7a), TOBN(0x442cd452, 0x4b665272)}}, {{TOBN(0x7807f364, 0xb71698f5), TOBN(0x6ba418d2, 0x9f7b605e), TOBN(0xfd20b00f, 0xa03b2cbb), TOBN(0x883eca37, 0xda54386f)}, {TOBN(0xff0be43f, 0xf3437f24), TOBN(0xe910b432, 0xa48bb33c), TOBN(0x4963a128, 0x329df765), TOBN(0xac1dd556, 0xbe2fe6f7)}}, {{TOBN(0x557610f9, 0x24a0a3fc), TOBN(0x38e17bf4, 0xe881c3f9), TOBN(0x6ba84faf, 0xed0dac99), TOBN(0xd4a222c3, 0x59eeb918)}, {TOBN(0xc79c1dbe, 0x13f542b6), TOBN(0x1fc65e0d, 0xe425d457), TOBN(0xeffb754f, 0x1debb779), TOBN(0x638d8fd0, 0x9e08af60)}}, {{TOBN(0x994f523a, 0x626332d5), TOBN(0x7bc38833, 0x5561bb44), TOBN(0x005ed4b0, 0x3d845ea2), TOBN(0xd39d3ee1, 0xc2a1f08a)}, {TOBN(0x6561fdd3, 0xe7676b0d), TOBN(0x620e35ff, 0xfb706017), TOBN(0x36ce424f, 0xf264f9a8), TOBN(0xc4c3419f, 0xda2681f7)}}, {{TOBN(0xfb6afd2f, 0x69beb6e8), TOBN(0x3a50b993, 0x6d700d03), TOBN(0xc840b2ad, 0x0c83a14f), TOBN(0x573207be, 0x54085bef)}, {TOBN(0x5af882e3, 0x09fe7e5b), TOBN(0x957678a4, 0x3b40a7e1), TOBN(0x172d4bdd, 0x543056e2), TOBN(0x9c1b26b4, 0x0df13c0a)}}, {{TOBN(0x1c30861c, 0xf405ff06), TOBN(0xebac86bd, 0x486e828b), TOBN(0xe791a971, 0x636933fc), TOBN(0x50e7c2be, 0x7aeee947)}, {TOBN(0xc3d4a095, 0xfa90d767), TOBN(0xae60eb7b, 0xe670ab7b), TOBN(0x17633a64, 0x397b056d), TOBN(0x93a21f33, 0x105012aa)}}, {{TOBN(0x663c370b, 0xabb88643), TOBN(0x91df36d7, 0x22e21599), TOBN(0x183ba835, 0x8b761671), TOBN(0x381eea1d, 0x728f3bf1)}, {TOBN(0xb9b2f1ba, 0x39966e6c), TOBN(0x7c464a28, 0xe7295492), TOBN(0x0fd5f70a, 0x09b26b7f), TOBN(0xa9aba1f9, 0xfbe009df)}}, {{TOBN(0x857c1f22, 0x369b87ad), TOBN(0x3c00e5d9, 0x32fca556), TOBN(0x1ad74cab, 0x90b06466), TOBN(0xa7112386, 0x550faaf2)}, {TOBN(0x7435e198, 0x6d9bd5f5), TOBN(0x2dcc7e38, 0x59c3463f), TOBN(0xdc7df748, 0xca7bd4b2), TOBN(0x13cd4c08, 0x9dec2f31)}}, {{TOBN(0x0d3b5df8, 0xe3237710), TOBN(0x0dadb26e, 0xcbd2f7b0), TOBN(0x9f5966ab, 0xe4aa082b), TOBN(0x666ec8de, 0x350e966e)}, {TOBN(0x1bfd1ed5, 0xee524216), TOBN(0xcd93c59b, 0x41dab0b6), TOBN(0x658a8435, 0xd186d6ba), TOBN(0x1b7d34d2, 0x159d1195)}}, {{TOBN(0x5936e460, 0x22caf46b), TOBN(0x6a45dd8f, 0x9a96fe4f), TOBN(0xf7925434, 0xb98f474e), TOBN(0x41410412, 0x0053ef15)}, {TOBN(0x71cf8d12, 0x41de97bf), TOBN(0xb8547b61, 0xbd80bef4), TOBN(0xb47d3970, 0xc4db0037), TOBN(0xf1bcd328, 0xfef20dff)}}, {{TOBN(0x31a92e09, 0x10caad67), TOBN(0x1f591960, 0x5531a1e1), TOBN(0x3bb852e0, 0x5f4fc840), TOBN(0x63e297ca, 0x93a72c6c)}, {TOBN(0x3c2b0b2e, 0x49abad67), TOBN(0x6ec405fc, 0xed3db0d9), TOBN(0xdc14a530, 0x7fef1d40), TOBN(0xccd19846, 0x280896fc)}}, {{TOBN(0x00f83176, 0x9bb81648), TOBN(0xd69eb485, 0x653120d0), TOBN(0xd17d75f4, 0x4ccabc62), TOBN(0x34a07f82, 0xb749fcb1)}, {TOBN(0x2c3af787, 0xbbfb5554), TOBN(0xb06ed4d0, 0x62e283f8), TOBN(0x5722889f, 0xa19213a0), TOBN(0x162b085e, 0xdcf3c7b4)}}, {{TOBN(0xbcaecb31, 0xe0dd3eca), TOBN(0xc6237fbc, 0xe52f13a5), TOBN(0xcc2b6b03, 0x27bac297), TOBN(0x2ae1cac5, 0xb917f54a)}, {TOBN(0x474807d4, 0x7845ae4f), TOBN(0xfec7dd92, 0xce5972e0), TOBN(0xc3bd2541, 0x1d7915bb), TOBN(0x66f85dc4, 0xd94907ca)}}, {{TOBN(0xd981b888, 0xbdbcf0ca), TOBN(0xd75f5da6, 0xdf279e9f), TOBN(0x128bbf24, 0x7054e934), TOBN(0x3c6ff6e5, 0x81db134b)}, {TOBN(0x795b7cf4, 0x047d26e4), TOBN(0xf370f7b8, 0x5049ec37), TOBN(0xc6712d4d, 0xced945af), TOBN(0xdf30b5ec, 0x095642bc)}}, {{TOBN(0x9b034c62, 0x4896246e), TOBN(0x5652c016, 0xee90bbd1), TOBN(0xeb38636f, 0x87fedb73), TOBN(0x5e32f847, 0x0135a613)}, {TOBN(0x0703b312, 0xcf933c83), TOBN(0xd05bb76e, 0x1a7f47e6), TOBN(0x825e4f0c, 0x949c2415), TOBN(0x569e5622, 0x7250d6f8)}}, {{TOBN(0xbbe9eb3a, 0x6568013e), TOBN(0x8dbd203f, 0x22f243fc), TOBN(0x9dbd7694, 0xb342734a), TOBN(0x8f6d12f8, 0x46afa984)}, {TOBN(0xb98610a2, 0xc9eade29), TOBN(0xbab4f323, 0x47dd0f18), TOBN(0x5779737b, 0x671c0d46), TOBN(0x10b6a7c6, 0xd3e0a42a)}}, {{TOBN(0xfb19ddf3, 0x3035b41c), TOBN(0xd336343f, 0x99c45895), TOBN(0x61fe4938, 0x54c857e5), TOBN(0xc4d506be, 0xae4e57d5)}, {TOBN(0x3cd8c8cb, 0xbbc33f75), TOBN(0x7281f08a, 0x9262c77d), TOBN(0x083f4ea6, 0xf11a2823), TOBN(0x8895041e, 0x9fba2e33)}}, {{TOBN(0xfcdfea49, 0x9c438edf), TOBN(0x7678dcc3, 0x91edba44), TOBN(0xf07b3b87, 0xe2ba50f0), TOBN(0xc13888ef, 0x43948c1b)}, {TOBN(0xc2135ad4, 0x1140af42), TOBN(0x8e5104f3, 0x926ed1a7), TOBN(0xf24430cb, 0x88f6695f), TOBN(0x0ce0637b, 0x6d73c120)}}, {{TOBN(0xb2db01e6, 0xfe631e8f), TOBN(0x1c5563d7, 0xd7bdd24b), TOBN(0x8daea3ba, 0x369ad44f), TOBN(0x000c81b6, 0x8187a9f9)}, {TOBN(0x5f48a951, 0xaae1fd9a), TOBN(0xe35626c7, 0x8d5aed8a), TOBN(0x20952763, 0x0498c622), TOBN(0x76d17634, 0x773aa504)}}, {{TOBN(0x36d90dda, 0xeb300f7a), TOBN(0x9dcf7dfc, 0xedb5e801), TOBN(0x645cb268, 0x74d5244c), TOBN(0xa127ee79, 0x348e3aa2)}, {TOBN(0x488acc53, 0x575f1dbb), TOBN(0x95037e85, 0x80e6161e), TOBN(0x57e59283, 0x292650d0), TOBN(0xabe67d99, 0x14938216)}}, {{TOBN(0x3c7f944b, 0x3f8e1065), TOBN(0xed908cb6, 0x330e8924), TOBN(0x08ee8fd5, 0x6f530136), TOBN(0x2227b7d5, 0xd7ffc169)}, {TOBN(0x4f55c893, 0xb5cd6dd5), TOBN(0x82225e11, 0xa62796e8), TOBN(0x5c6cead1, 0xcb18e12c), TOBN(0x4381ae0c, 0x84f5a51a)}}, {{TOBN(0x345913d3, 0x7fafa4c8), TOBN(0x3d918082, 0x0491aac0), TOBN(0x9347871f, 0x3e69264c), TOBN(0xbea9dd3c, 0xb4f4f0cd)}, {TOBN(0xbda5d067, 0x3eadd3e7), TOBN(0x0033c1b8, 0x0573bcd8), TOBN(0x25589379, 0x5da2486c), TOBN(0xcb89ee5b, 0x86abbee7)}}, {{TOBN(0x8fe0a8f3, 0x22532e5d), TOBN(0xb6410ff0, 0x727dfc4c), TOBN(0x619b9d58, 0x226726db), TOBN(0x5ec25669, 0x7a2b2dc7)}, {TOBN(0xaf4d2e06, 0x4c3beb01), TOBN(0x852123d0, 0x7acea556), TOBN(0x0e9470fa, 0xf783487a), TOBN(0x75a7ea04, 0x5664b3eb)}}, {{TOBN(0x4ad78f35, 0x6798e4ba), TOBN(0x9214e6e5, 0xc7d0e091), TOBN(0xc420b488, 0xb1290403), TOBN(0x64049e0a, 0xfc295749)}, {TOBN(0x03ef5af1, 0x3ae9841f), TOBN(0xdbe4ca19, 0xb0b662a6), TOBN(0x46845c5f, 0xfa453458), TOBN(0xf8dabf19, 0x10b66722)}}, {{TOBN(0xb650f0aa, 0xcce2793b), TOBN(0x71db851e, 0xc5ec47c1), TOBN(0x3eb78f3e, 0x3b234fa9), TOBN(0xb0c60f35, 0xfc0106ce)}, {TOBN(0x05427121, 0x774eadbd), TOBN(0x25367faf, 0xce323863), TOBN(0x7541b5c9, 0xcd086976), TOBN(0x4ff069e2, 0xdc507ad1)}}, {{TOBN(0x74145256, 0x8776e667), TOBN(0x6e76142c, 0xb23c6bb5), TOBN(0xdbf30712, 0x1b3a8a87), TOBN(0x60e7363e, 0x98450836)}, {TOBN(0x5741450e, 0xb7366d80), TOBN(0xe4ee14ca, 0x4837dbdf), TOBN(0xa765eb9b, 0x69d4316f), TOBN(0x04548dca, 0x8ef43825)}}, {{TOBN(0x9c9f4e4c, 0x5ae888eb), TOBN(0x733abb51, 0x56e9ac99), TOBN(0xdaad3c20, 0xba6ac029), TOBN(0x9b8dd3d3, 0x2ba3e38e)}, {TOBN(0xa9bb4c92, 0x0bc5d11a), TOBN(0xf20127a7, 0x9c5f88a3), TOBN(0x4f52b06e, 0x161d3cb8), TOBN(0x26c1ff09, 0x6afaf0a6)}}, {{TOBN(0x32670d2f, 0x7189e71f), TOBN(0xc6438748, 0x5ecf91e7), TOBN(0x15758e57, 0xdb757a21), TOBN(0x427d09f8, 0x290a9ce5)}, {TOBN(0x846a308f, 0x38384a7a), TOBN(0xaac3acb4, 0xb0732b99), TOBN(0x9e941009, 0x17845819), TOBN(0x95cba111, 0xa7ce5e03)}}, {{TOBN(0x6f3d4f7f, 0xb00009c4), TOBN(0xb8396c27, 0x8ff28b5f), TOBN(0xb1a9ae43, 0x1c97975d), TOBN(0x9d7ba8af, 0xe5d9fed5)}, {TOBN(0x338cf09f, 0x34f485b6), TOBN(0xbc0ddacc, 0x64122516), TOBN(0xa450da12, 0x05d471fe), TOBN(0x4c3a6250, 0x628dd8c9)}}, {{TOBN(0x69c7d103, 0xd1295837), TOBN(0xa2893e50, 0x3807eb2f), TOBN(0xd6e1e1de, 0xbdb41491), TOBN(0xc630745b, 0x5e138235)}, {TOBN(0xc892109e, 0x48661ae1), TOBN(0x8d17e7eb, 0xea2b2674), TOBN(0x00ec0f87, 0xc328d6b5), TOBN(0x6d858645, 0xf079ff9e)}}, {{TOBN(0x6cdf243e, 0x19115ead), TOBN(0x1ce1393e, 0x4bac4fcf), TOBN(0x2c960ed0, 0x9c29f25b), TOBN(0x59be4d8e, 0x9d388a05)}, {TOBN(0x0d46e06c, 0xd0def72b), TOBN(0xb923db5d, 0xe0342748), TOBN(0xf7d3aacd, 0x936d4a3d), TOBN(0x558519cc, 0x0b0b099e)}}, {{TOBN(0x3ea8ebf8, 0x827097ef), TOBN(0x259353db, 0xd054f55d), TOBN(0x84c89abc, 0x6d2ed089), TOBN(0x5c548b69, 0x8e096a7c)}, {TOBN(0xd587f616, 0x994b995d), TOBN(0x4d1531f6, 0xa5845601), TOBN(0x792ab31e, 0x451fd9f0), TOBN(0xc8b57bb2, 0x65adf6ca)}}, {{TOBN(0x68440fcb, 0x1cd5ad73), TOBN(0xb9c860e6, 0x6144da4f), TOBN(0x2ab286aa, 0x8462beb8), TOBN(0xcc6b8fff, 0xef46797f)}, {TOBN(0xac820da4, 0x20c8a471), TOBN(0x69ae05a1, 0x77ff7faf), TOBN(0xb9163f39, 0xbfb5da77), TOBN(0xbd03e590, 0x2c73ab7a)}}, {{TOBN(0x7e862b5e, 0xb2940d9e), TOBN(0x3c663d86, 0x4b9af564), TOBN(0xd8309031, 0xbde3033d), TOBN(0x298231b2, 0xd42c5bc6)}, {TOBN(0x42090d2c, 0x552ad093), TOBN(0xa4799d1c, 0xff854695), TOBN(0x0a88b5d6, 0xd31f0d00), TOBN(0xf8b40825, 0xa2f26b46)}}, {{TOBN(0xec29b1ed, 0xf1bd7218), TOBN(0xd491c53b, 0x4b24c86e), TOBN(0xd2fe588f, 0x3395ea65), TOBN(0x6f3764f7, 0x4456ef15)}, {TOBN(0xdb43116d, 0xcdc34800), TOBN(0xcdbcd456, 0xc1e33955), TOBN(0xefdb5540, 0x74ab286b), TOBN(0x948c7a51, 0xd18c5d7c)}}, {{TOBN(0xeb81aa37, 0x7378058e), TOBN(0x41c746a1, 0x04411154), TOBN(0xa10c73bc, 0xfb828ac7), TOBN(0x6439be91, 0x9d972b29)}, {TOBN(0x4bf3b4b0, 0x43a2fbad), TOBN(0x39e6dadf, 0x82b5e840), TOBN(0x4f716408, 0x6397bd4c), TOBN(0x0f7de568, 0x7f1eeccb)}}, {{TOBN(0x5865c5a1, 0xd2ffbfc1), TOBN(0xf74211fa, 0x4ccb6451), TOBN(0x66368a88, 0xc0b32558), TOBN(0x5b539dc2, 0x9ad7812e)}, {TOBN(0x579483d0, 0x2f3af6f6), TOBN(0x52132078, 0x99934ece), TOBN(0x50b9650f, 0xdcc9e983), TOBN(0xca989ec9, 0xaee42b8a)}}, {{TOBN(0x6a44c829, 0xd6f62f99), TOBN(0x8f06a309, 0x4c2a7c0c), TOBN(0x4ea2b3a0, 0x98a0cb0a), TOBN(0x5c547b70, 0xbeee8364)}, {TOBN(0x461d40e1, 0x682afe11), TOBN(0x9e0fc77a, 0x7b41c0a8), TOBN(0x79e4aefd, 0xe20d5d36), TOBN(0x2916e520, 0x32dd9f63)}}, {{TOBN(0xf59e52e8, 0x3f883faf), TOBN(0x396f9639, 0x2b868d35), TOBN(0xc902a9df, 0x4ca19881), TOBN(0x0fc96822, 0xdb2401a6)}, {TOBN(0x41237587, 0x66f1c68d), TOBN(0x10fc6de3, 0xfb476c0d), TOBN(0xf8b6b579, 0x841f5d90), TOBN(0x2ba8446c, 0xfa24f44a)}}, {{TOBN(0xa237b920, 0xef4a9975), TOBN(0x60bb6004, 0x2330435f), TOBN(0xd6f4ab5a, 0xcfb7e7b5), TOBN(0xb2ac5097, 0x83435391)}, {TOBN(0xf036ee2f, 0xb0d1ea67), TOBN(0xae779a6a, 0x74c56230), TOBN(0x59bff8c8, 0xab838ae6), TOBN(0xcd83ca99, 0x9b38e6f0)}}, {{TOBN(0xbb27bef5, 0xe33deed3), TOBN(0xe6356f6f, 0x001892a8), TOBN(0xbf3be6cc, 0x7adfbd3e), TOBN(0xaecbc81c, 0x33d1ac9d)}, {TOBN(0xe4feb909, 0xe6e861dc), TOBN(0x90a247a4, 0x53f5f801), TOBN(0x01c50acb, 0x27346e57), TOBN(0xce29242e, 0x461acc1b)}}, {{TOBN(0x04dd214a, 0x2f998a91), TOBN(0x271ee9b1, 0xd4baf27b), TOBN(0x7e3027d1, 0xe8c26722), TOBN(0x21d1645c, 0x1820dce5)}, {TOBN(0x086f242c, 0x7501779c), TOBN(0xf0061407, 0xfa0e8009), TOBN(0xf23ce477, 0x60187129), TOBN(0x05bbdedb, 0x0fde9bd0)}}, {{TOBN(0x682f4832, 0x25d98473), TOBN(0xf207fe85, 0x5c658427), TOBN(0xb6fdd7ba, 0x4166ffa1), TOBN(0x0c314056, 0x9eed799d)}, {TOBN(0x0db8048f, 0x4107e28f), TOBN(0x74ed3871, 0x41216840), TOBN(0x74489f8f, 0x56a3c06e), TOBN(0x1e1c005b, 0x12777134)}}, {{TOBN(0xdb332a73, 0xf37ec3c3), TOBN(0xc65259bd, 0xdd59eba0), TOBN(0x2291709c, 0xdb4d3257), TOBN(0x9a793b25, 0xbd389390)}, {TOBN(0xf39fe34b, 0xe43756f0), TOBN(0x2f76bdce, 0x9afb56c9), TOBN(0x9f37867a, 0x61208b27), TOBN(0xea1d4307, 0x089972c3)}}, {{TOBN(0x8c595330, 0x8bdf623a), TOBN(0x5f5accda, 0x8441fb7d), TOBN(0xfafa9418, 0x32ddfd95), TOBN(0x6ad40c5a, 0x0fde9be7)}, {TOBN(0x43faba89, 0xaeca8709), TOBN(0xc64a7cf1, 0x2c248a9d), TOBN(0x16620252, 0x72637a76), TOBN(0xaee1c791, 0x22b8d1bb)}}, {{TOBN(0xf0f798fd, 0x21a843b2), TOBN(0x56e4ed4d, 0x8d005cb1), TOBN(0x355f7780, 0x1f0d8abe), TOBN(0x197b04cf, 0x34522326)}, {TOBN(0x41f9b31f, 0xfd42c13f), TOBN(0x5ef7feb2, 0xb40f933d), TOBN(0x27326f42, 0x5d60bad4), TOBN(0x027ecdb2, 0x8c92cf89)}}, {{TOBN(0x04aae4d1, 0x4e3352fe), TOBN(0x08414d2f, 0x73591b90), TOBN(0x5ed6124e, 0xb7da7d60), TOBN(0xb985b931, 0x4d13d4ec)}, {TOBN(0xa592d3ab, 0x96bf36f9), TOBN(0x012dbed5, 0xbbdf51df), TOBN(0xa57963c0, 0xdf6c177d), TOBN(0x010ec869, 0x87ca29cf)}}, {{TOBN(0xba1700f6, 0xbf926dff), TOBN(0x7c9fdbd1, 0xf4bf6bc2), TOBN(0xdc18dc8f, 0x64da11f5), TOBN(0xa6074b7a, 0xd938ae75)}, {TOBN(0x14270066, 0xe84f44a4), TOBN(0x99998d38, 0xd27b954e), TOBN(0xc1be8ab2, 0xb4f38e9a), TOBN(0x8bb55bbf, 0x15c01016)}}, {{TOBN(0xf73472b4, 0x0ea2ab30), TOBN(0xd365a340, 0xf73d68dd), TOBN(0xc01a7168, 0x19c2e1eb), TOBN(0x32f49e37, 0x34061719)}, {TOBN(0xb73c57f1, 0x01d8b4d6), TOBN(0x03c8423c, 0x26b47700), TOBN(0x321d0bc8, 0xa4d8826a), TOBN(0x6004213c, 0x4bc0e638)}}, {{TOBN(0xf78c64a1, 0xc1c06681), TOBN(0x16e0a16f, 0xef018e50), TOBN(0x31cbdf91, 0xdb42b2b3), TOBN(0xf8f4ffce, 0xe0d36f58)}, {TOBN(0xcdcc71cd, 0x4cc5e3e0), TOBN(0xd55c7cfa, 0xa129e3e0), TOBN(0xccdb6ba0, 0x0fb2cbf1), TOBN(0x6aba0005, 0xc4bce3cb)}}, {{TOBN(0x501cdb30, 0xd232cfc4), TOBN(0x9ddcf12e, 0xd58a3cef), TOBN(0x02d2cf9c, 0x87e09149), TOBN(0xdc5d7ec7, 0x2c976257)}, {TOBN(0x6447986e, 0x0b50d7dd), TOBN(0x88fdbaf7, 0x807f112a), TOBN(0x58c9822a, 0xb00ae9f6), TOBN(0x6abfb950, 0x6d3d27e0)}}, {{TOBN(0xd0a74487, 0x8a429f4f), TOBN(0x0649712b, 0xdb516609), TOBN(0xb826ba57, 0xe769b5df), TOBN(0x82335df2, 0x1fc7aaf2)}, {TOBN(0x2389f067, 0x5c93d995), TOBN(0x59ac367a, 0x68677be6), TOBN(0xa77985ff, 0x21d9951b), TOBN(0x038956fb, 0x85011cce)}}, {{TOBN(0x608e48cb, 0xbb734e37), TOBN(0xc08c0bf2, 0x2be5b26f), TOBN(0x17bbdd3b, 0xf9b1a0d9), TOBN(0xeac7d898, 0x10483319)}, {TOBN(0xc95c4baf, 0xbc1a6dea), TOBN(0xfdd0e2bf, 0x172aafdb), TOBN(0x40373cbc, 0x8235c41a), TOBN(0x14303f21, 0xfb6f41d5)}}, {{TOBN(0xba063621, 0x0408f237), TOBN(0xcad3b09a, 0xecd2d1ed), TOBN(0x4667855a, 0x52abb6a2), TOBN(0xba9157dc, 0xaa8b417b)}, {TOBN(0xfe7f3507, 0x4f013efb), TOBN(0x1b112c4b, 0xaa38c4a2), TOBN(0xa1406a60, 0x9ba64345), TOBN(0xe53cba33, 0x6993c80b)}}, {{TOBN(0x45466063, 0xded40d23), TOBN(0x3d5f1f4d, 0x54908e25), TOBN(0x9ebefe62, 0x403c3c31), TOBN(0x274ea0b5, 0x0672a624)}, {TOBN(0xff818d99, 0x451d1b71), TOBN(0x80e82643, 0x8f79cf79), TOBN(0xa165df13, 0x73ce37f5), TOBN(0xa744ef4f, 0xfe3a21fd)}}, {{TOBN(0x73f1e7f5, 0xcf551396), TOBN(0xc616898e, 0x868c676b), TOBN(0x671c28c7, 0x8c442c36), TOBN(0xcfe5e558, 0x5e0a317d)}, {TOBN(0x1242d818, 0x7051f476), TOBN(0x56fad2a6, 0x14f03442), TOBN(0x262068bc, 0x0a44d0f6), TOBN(0xdfa2cd6e, 0xce6edf4e)}}, {{TOBN(0x0f43813a, 0xd15d1517), TOBN(0x61214cb2, 0x377d44f5), TOBN(0xd399aa29, 0xc639b35f), TOBN(0x42136d71, 0x54c51c19)}, {TOBN(0x9774711b, 0x08417221), TOBN(0x0a5546b3, 0x52545a57), TOBN(0x80624c41, 0x1150582d), TOBN(0x9ec5c418, 0xfbc555bc)}}, {{TOBN(0x2c87dcad, 0x771849f1), TOBN(0xb0c932c5, 0x01d7bf6f), TOBN(0x6aa5cd3e, 0x89116eb2), TOBN(0xd378c25a, 0x51ca7bd3)}, {TOBN(0xc612a0da, 0x9e6e3e31), TOBN(0x0417a54d, 0xb68ad5d0), TOBN(0x00451e4a, 0x22c6edb8), TOBN(0x9fbfe019, 0xb42827ce)}}, {{TOBN(0x2fa92505, 0xba9384a2), TOBN(0x21b8596e, 0x64ad69c1), TOBN(0x8f4fcc49, 0x983b35a6), TOBN(0xde093760, 0x72754672)}, {TOBN(0x2f14ccc8, 0xf7bffe6d), TOBN(0x27566bff, 0x5d94263d), TOBN(0xb5b4e9c6, 0x2df3ec30), TOBN(0x94f1d7d5, 0x3e6ea6ba)}}, {{TOBN(0x97b7851a, 0xaaca5e9b), TOBN(0x518aa521, 0x56713b97), TOBN(0x3357e8c7, 0x150a61f6), TOBN(0x7842e7e2, 0xec2c2b69)}, {TOBN(0x8dffaf65, 0x6868a548), TOBN(0xd963bd82, 0xe068fc81), TOBN(0x64da5c8b, 0x65917733), TOBN(0x927090ff, 0x7b247328)}}}, {{{TOBN(0x214bc9a7, 0xd298c241), TOBN(0xe3b697ba, 0x56807cfd), TOBN(0xef1c7802, 0x4564eadb), TOBN(0xdde8cdcf, 0xb48149c5)}, {TOBN(0x946bf0a7, 0x5a4d2604), TOBN(0x27154d7f, 0x6c1538af), TOBN(0x95cc9230, 0xde5b1fcc), TOBN(0xd88519e9, 0x66864f82)}}, {{TOBN(0xb828dd1a, 0x7cb1282c), TOBN(0xa08d7626, 0xbe46973a), TOBN(0x6baf8d40, 0xe708d6b2), TOBN(0x72571fa1, 0x4daeb3f3)}, {TOBN(0x85b1732f, 0xf22dfd98), TOBN(0x87ab01a7, 0x0087108d), TOBN(0xaaaafea8, 0x5988207a), TOBN(0xccc832f8, 0x69f00755)}}, {{TOBN(0x964d950e, 0x36ff3bf0), TOBN(0x8ad20f6f, 0xf0b34638), TOBN(0x4d9177b3, 0xb5d7585f), TOBN(0xcf839760, 0xef3f019f)}, {TOBN(0x582fc5b3, 0x8288c545), TOBN(0x2f8e4e9b, 0x13116bd1), TOBN(0xf91e1b2f, 0x332120ef), TOBN(0xcf568724, 0x2a17dd23)}}, {{TOBN(0x488f1185, 0xca8d9d1a), TOBN(0xadf2c77d, 0xd987ded2), TOBN(0x5f3039f0, 0x60c46124), TOBN(0xe5d70b75, 0x71e095f4)}, {TOBN(0x82d58650, 0x6260e70f), TOBN(0x39d75ea7, 0xf750d105), TOBN(0x8cf3d0b1, 0x75bac364), TOBN(0xf3a7564d, 0x21d01329)}}, {{TOBN(0x182f04cd, 0x2f52d2a7), TOBN(0x4fde149a, 0xe2df565a), TOBN(0xb80c5eec, 0xa79fb2f7), TOBN(0xab491d7b, 0x22ddc897)}, {TOBN(0x99d76c18, 0xc6312c7f), TOBN(0xca0d5f3d, 0x6aa41a57), TOBN(0x71207325, 0xd15363a0), TOBN(0xe82aa265, 0xbeb252c2)}}, {{TOBN(0x94ab4700, 0xec3128c2), TOBN(0x6c76d862, 0x8e383f49), TOBN(0xdc36b150, 0xc03024eb), TOBN(0xfb439477, 0x53daac69)}, {TOBN(0xfc68764a, 0x8dc79623), TOBN(0x5b86995d, 0xb440fbb2), TOBN(0xd66879bf, 0xccc5ee0d), TOBN(0x05228942, 0x95aa8bd3)}}, {{TOBN(0xb51a40a5, 0x1e6a75c1), TOBN(0x24327c76, 0x0ea7d817), TOBN(0x06630182, 0x07774597), TOBN(0xd6fdbec3, 0x97fa7164)}, {TOBN(0x20c99dfb, 0x13c90f48), TOBN(0xd6ac5273, 0x686ef263), TOBN(0xc6a50bdc, 0xfef64eeb), TOBN(0xcd87b281, 0x86fdfc32)}}, {{TOBN(0xb24aa43e, 0x3fcd3efc), TOBN(0xdd26c034, 0xb8088e9a), TOBN(0xa5ef4dc9, 0xbd3d46ea), TOBN(0xa2f99d58, 0x8a4c6a6f)}, {TOBN(0xddabd355, 0x2f1da46c), TOBN(0x72c3f8ce, 0x1afacdd1), TOBN(0xd90c4eee, 0x92d40578), TOBN(0xd28bb41f, 0xca623b94)}}, {{TOBN(0x50fc0711, 0x745edc11), TOBN(0x9dd9ad7d, 0x3dc87558), TOBN(0xce6931fb, 0xb49d1e64), TOBN(0x6c77a0a2, 0xc98bd0f9)}, {TOBN(0x62b9a629, 0x6baf7cb1), TOBN(0xcf065f91, 0xccf72d22), TOBN(0x7203cce9, 0x79639071), TOBN(0x09ae4885, 0xf9cb732f)}}, {{TOBN(0x5e7c3bec, 0xee8314f3), TOBN(0x1c068aed, 0xdbea298f), TOBN(0x08d381f1, 0x7c80acec), TOBN(0x03b56be8, 0xe330495b)}, {TOBN(0xaeffb8f2, 0x9222882d), TOBN(0x95ff38f6, 0xc4af8bf7), TOBN(0x50e32d35, 0x1fc57d8c), TOBN(0x6635be52, 0x17b444f0)}}, {{TOBN(0x04d15276, 0xa5177900), TOBN(0x4e1dbb47, 0xf6858752), TOBN(0x5b475622, 0xc615796c), TOBN(0xa6fa0387, 0x691867bf)}, {TOBN(0xed7f5d56, 0x2844c6d0), TOBN(0xc633cf9b, 0x03a2477d), TOBN(0xf6be5c40, 0x2d3721d6), TOBN(0xaf312eb7, 0xe9fd68e6)}}, {{TOBN(0x242792d2, 0xe7417ce1), TOBN(0xff42bc71, 0x970ee7f5), TOBN(0x1ff4dc6d, 0x5c67a41e), TOBN(0x77709b7b, 0x20882a58)}, {TOBN(0x3554731d, 0xbe217f2c), TOBN(0x2af2a8cd, 0x5bb72177), TOBN(0x58eee769, 0x591dd059), TOBN(0xbb2930c9, 0x4bba6477)}}, {{TOBN(0x863ee047, 0x7d930cfc), TOBN(0x4c262ad1, 0x396fd1f4), TOBN(0xf4765bc8, 0x039af7e1), TOBN(0x2519834b, 0x5ba104f6)}, {TOBN(0x7cd61b4c, 0xd105f961), TOBN(0xa5415da5, 0xd63bca54), TOBN(0x778280a0, 0x88a1f17c), TOBN(0xc4968949, 0x2329512c)}}, {{TOBN(0x174a9126, 0xcecdaa7a), TOBN(0xfc8c7e0e, 0x0b13247b), TOBN(0x29c110d2, 0x3484c1c4), TOBN(0xf8eb8757, 0x831dfc3b)}, {TOBN(0x022f0212, 0xc0067452), TOBN(0x3f6f69ee, 0x7b9b926c), TOBN(0x09032da0, 0xef42daf4), TOBN(0x79f00ade, 0x83f80de4)}}, {{TOBN(0x6210db71, 0x81236c97), TOBN(0x74f7685b, 0x3ee0781f), TOBN(0x4df7da7b, 0xa3e41372), TOBN(0x2aae38b1, 0xb1a1553e)}, {TOBN(0x1688e222, 0xf6dd9d1b), TOBN(0x57695448, 0x5b8b6487), TOBN(0x478d2127, 0x4b2edeaa), TOBN(0xb2818fa5, 0x1e85956a)}}, {{TOBN(0x1e6addda, 0xf176f2c0), TOBN(0x01ca4604, 0xe2572658), TOBN(0x0a404ded, 0x85342ffb), TOBN(0x8cf60f96, 0x441838d6)}, {TOBN(0x9bbc691c, 0xc9071c4a), TOBN(0xfd588744, 0x34442803), TOBN(0x97101c85, 0x809c0d81), TOBN(0xa7fb754c, 0x8c456f7f)}}, {{TOBN(0xc95f3c5c, 0xd51805e1), TOBN(0xab4ccd39, 0xb299dca8), TOBN(0x3e03d20b, 0x47eaf500), TOBN(0xfa3165c1, 0xd7b80893)}, {TOBN(0x005e8b54, 0xe160e552), TOBN(0xdc4972ba, 0x9019d11f), TOBN(0x21a6972e, 0x0c9a4a7a), TOBN(0xa52c258f, 0x37840fd7)}}, {{TOBN(0xf8559ff4, 0xc1e99d81), TOBN(0x08e1a7d6, 0xa3c617c0), TOBN(0xb398fd43, 0x248c6ba7), TOBN(0x6ffedd91, 0xd1283794)}, {TOBN(0x8a6a59d2, 0xd629d208), TOBN(0xa9d141d5, 0x3490530e), TOBN(0x42f6fc18, 0x38505989), TOBN(0x09bf250d, 0x479d94ee)}}, {{TOBN(0x223ad3b1, 0xb3822790), TOBN(0x6c5926c0, 0x93b8971c), TOBN(0x609efc7e, 0x75f7fa62), TOBN(0x45d66a6d, 0x1ec2d989)}, {TOBN(0x4422d663, 0x987d2792), TOBN(0x4a73caad, 0x3eb31d2b), TOBN(0xf06c2ac1, 0xa32cb9e6), TOBN(0xd9445c5f, 0x91aeba84)}}, {{TOBN(0x6af7a1d5, 0xaf71013f), TOBN(0xe68216e5, 0x0bedc946), TOBN(0xf4cba30b, 0xd27370a0), TOBN(0x7981afbf, 0x870421cc)}, {TOBN(0x02496a67, 0x9449f0e1), TOBN(0x86cfc4be, 0x0a47edae), TOBN(0x3073c936, 0xb1feca22), TOBN(0xf5694612, 0x03f8f8fb)}}, {{TOBN(0xd063b723, 0x901515ea), TOBN(0x4c6c77a5, 0x749cf038), TOBN(0x6361e360, 0xab9e5059), TOBN(0x596cf171, 0xa76a37c0)}, {TOBN(0x800f53fa, 0x6530ae7a), TOBN(0x0f5e631e, 0x0792a7a6), TOBN(0x5cc29c24, 0xefdb81c9), TOBN(0xa269e868, 0x3f9c40ba)}}, {{TOBN(0xec14f9e1, 0x2cb7191e), TOBN(0x78ea1bd8, 0xe5b08ea6), TOBN(0x3c65aa9b, 0x46332bb9), TOBN(0x84cc22b3, 0xbf80ce25)}, {TOBN(0x0098e9e9, 0xd49d5bf1), TOBN(0xcd4ec1c6, 0x19087da4), TOBN(0x3c9d07c5, 0xaef6e357), TOBN(0x839a0268, 0x9f8f64b8)}}, {{TOBN(0xc5e9eb62, 0xc6d8607f), TOBN(0x759689f5, 0x6aa995e4), TOBN(0x70464669, 0xbbb48317), TOBN(0x921474bf, 0xe402417d)}, {TOBN(0xcabe135b, 0x2a354c8c), TOBN(0xd51e52d2, 0x812fa4b5), TOBN(0xec741096, 0x53311fe8), TOBN(0x4f774535, 0xb864514b)}}, {{TOBN(0xbcadd671, 0x5bde48f8), TOBN(0xc9703873, 0x2189bc7d), TOBN(0x5d45299e, 0xc709ee8a), TOBN(0xd1287ee2, 0x845aaff8)}, {TOBN(0x7d1f8874, 0xdb1dbf1f), TOBN(0xea46588b, 0x990c88d6), TOBN(0x60ba649a, 0x84368313), TOBN(0xd5fdcbce, 0x60d543ae)}}, {{TOBN(0x90b46d43, 0x810d5ab0), TOBN(0x6739d8f9, 0x04d7e5cc), TOBN(0x021c1a58, 0x0d337c33), TOBN(0x00a61162, 0x68e67c40)}, {TOBN(0x95ef413b, 0x379f0a1f), TOBN(0xfe126605, 0xe9e2ab95), TOBN(0x67578b85, 0x2f5f199c), TOBN(0xf5c00329, 0x2cb84913)}}, {{TOBN(0xf7956430, 0x37577dd8), TOBN(0x83b82af4, 0x29c5fe88), TOBN(0x9c1bea26, 0xcdbdc132), TOBN(0x589fa086, 0x9c04339e)}, {TOBN(0x033e9538, 0xb13799df), TOBN(0x85fa8b21, 0xd295d034), TOBN(0xdf17f73f, 0xbd9ddcca), TOBN(0xf32bd122, 0xddb66334)}}, {{TOBN(0x55ef88a7, 0x858b044c), TOBN(0x1f0d69c2, 0x5aa9e397), TOBN(0x55fd9cc3, 0x40d85559), TOBN(0xc774df72, 0x7785ddb2)}, {TOBN(0x5dcce9f6, 0xd3bd2e1c), TOBN(0xeb30da20, 0xa85dfed0), TOBN(0x5ed7f5bb, 0xd3ed09c4), TOBN(0x7d42a35c, 0x82a9c1bd)}}, {{TOBN(0xcf3de995, 0x9890272d), TOBN(0x75f3432a, 0x3e713a10), TOBN(0x5e13479f, 0xe28227b8), TOBN(0xb8561ea9, 0xfefacdc8)}, {TOBN(0xa6a297a0, 0x8332aafd), TOBN(0x9b0d8bb5, 0x73809b62), TOBN(0xd2fa1cfd, 0x0c63036f), TOBN(0x7a16eb55, 0xbd64bda8)}}, {{TOBN(0x3f5cf5f6, 0x78e62ddc), TOBN(0x2267c454, 0x07fd752b), TOBN(0x5e361b6b, 0x5e437bbe), TOBN(0x95c59501, 0x8354e075)}, {TOBN(0xec725f85, 0xf2b254d9), TOBN(0x844b617d, 0x2cb52b4e), TOBN(0xed8554f5, 0xcf425fb5), TOBN(0xab67703e, 0x2af9f312)}}, {{TOBN(0x4cc34ec1, 0x3cf48283), TOBN(0xb09daa25, 0x9c8a705e), TOBN(0xd1e9d0d0, 0x5b7d4f84), TOBN(0x4df6ef64, 0xdb38929d)}, {TOBN(0xe16b0763, 0xaa21ba46), TOBN(0xc6b1d178, 0xa293f8fb), TOBN(0x0ff5b602, 0xd520aabf), TOBN(0x94d671bd, 0xc339397a)}}, {{TOBN(0x7c7d98cf, 0x4f5792fa), TOBN(0x7c5e0d67, 0x11215261), TOBN(0x9b19a631, 0xa7c5a6d4), TOBN(0xc8511a62, 0x7a45274d)}, {TOBN(0x0c16621c, 0xa5a60d99), TOBN(0xf7fbab88, 0xcf5e48cb), TOBN(0xab1e6ca2, 0xf7ddee08), TOBN(0x83bd08ce, 0xe7867f3c)}}, {{TOBN(0xf7e48e8a, 0x2ac13e27), TOBN(0x4494f6df, 0x4eb1a9f5), TOBN(0xedbf84eb, 0x981f0a62), TOBN(0x49badc32, 0x536438f0)}, {TOBN(0x50bea541, 0x004f7571), TOBN(0xbac67d10, 0xdf1c94ee), TOBN(0x253d73a1, 0xb727bc31), TOBN(0xb3d01cf2, 0x30686e28)}}, {{TOBN(0x51b77b1b, 0x55fd0b8b), TOBN(0xa099d183, 0xfeec3173), TOBN(0x202b1fb7, 0x670e72b7), TOBN(0xadc88b33, 0xa8e1635f)}, {TOBN(0x34e8216a, 0xf989d905), TOBN(0xc2e68d20, 0x29b58d01), TOBN(0x11f81c92, 0x6fe55a93), TOBN(0x15f1462a, 0x8f296f40)}}, {{TOBN(0x1915d375, 0xea3d62f2), TOBN(0xa17765a3, 0x01c8977d), TOBN(0x7559710a, 0xe47b26f6), TOBN(0xe0bd29c8, 0x535077a5)}, {TOBN(0x615f976d, 0x08d84858), TOBN(0x370dfe85, 0x69ced5c1), TOBN(0xbbc7503c, 0xa734fa56), TOBN(0xfbb9f1ec, 0x91ac4574)}}, {{TOBN(0x95d7ec53, 0x060dd7ef), TOBN(0xeef2dacd, 0x6e657979), TOBN(0x54511af3, 0xe2a08235), TOBN(0x1e324aa4, 0x1f4aea3d)}, {TOBN(0x550e7e71, 0xe6e67671), TOBN(0xbccd5190, 0xbf52faf7), TOBN(0xf880d316, 0x223cc62a), TOBN(0x0d402c7e, 0x2b32eb5d)}}, {{TOBN(0xa40bc039, 0x306a5a3b), TOBN(0x4e0a41fd, 0x96783a1b), TOBN(0xa1e8d39a, 0x0253cdd4), TOBN(0x6480be26, 0xc7388638)}, {TOBN(0xee365e1d, 0x2285f382), TOBN(0x188d8d8f, 0xec0b5c36), TOBN(0x34ef1a48, 0x1f0f4d82), TOBN(0x1a8f43e1, 0xa487d29a)}}, {{TOBN(0x8168226d, 0x77aefb3a), TOBN(0xf69a751e, 0x1e72c253), TOBN(0x8e04359a, 0xe9594df1), TOBN(0x475ffd7d, 0xd14c0467)}, {TOBN(0xb5a2c2b1, 0x3844e95c), TOBN(0x85caf647, 0xdd12ef94), TOBN(0x1ecd2a9f, 0xf1063d00), TOBN(0x1dd2e229, 0x23843311)}}, {{TOBN(0x38f0e09d, 0x73d17244), TOBN(0x3ede7746, 0x8fc653f1), TOBN(0xae4459f5, 0xdc20e21c), TOBN(0x00db2ffa, 0x6a8599ea)}, {TOBN(0x11682c39, 0x30cfd905), TOBN(0x4934d074, 0xa5c112a6), TOBN(0xbdf063c5, 0x568bfe95), TOBN(0x779a440a, 0x016c441a)}}, {{TOBN(0x0c23f218, 0x97d6fbdc), TOBN(0xd3a5cd87, 0xe0776aac), TOBN(0xcee37f72, 0xd712e8db), TOBN(0xfb28c70d, 0x26f74e8d)}, {TOBN(0xffe0c728, 0xb61301a0), TOBN(0xa6282168, 0xd3724354), TOBN(0x7ff4cb00, 0x768ffedc), TOBN(0xc51b3088, 0x03b02de9)}}, {{TOBN(0xa5a8147c, 0x3902dda5), TOBN(0x35d2f706, 0xfe6973b4), TOBN(0x5ac2efcf, 0xc257457e), TOBN(0x933f48d4, 0x8700611b)}, {TOBN(0xc365af88, 0x4912beb2), TOBN(0x7f5a4de6, 0x162edf94), TOBN(0xc646ba7c, 0x0c32f34b), TOBN(0x632c6af3, 0xb2091074)}}, {{TOBN(0x58d4f2e3, 0x753e43a9), TOBN(0x70e1d217, 0x24d4e23f), TOBN(0xb24bf729, 0xafede6a6), TOBN(0x7f4a94d8, 0x710c8b60)}, {TOBN(0xaad90a96, 0x8d4faa6a), TOBN(0xd9ed0b32, 0xb066b690), TOBN(0x52fcd37b, 0x78b6dbfd), TOBN(0x0b64615e, 0x8bd2b431)}}, {{TOBN(0x228e2048, 0xcfb9fad5), TOBN(0xbeaa386d, 0x240b76bd), TOBN(0x2d6681c8, 0x90dad7bc), TOBN(0x3e553fc3, 0x06d38f5e)}, {TOBN(0xf27cdb9b, 0x9d5f9750), TOBN(0x3e85c52a, 0xd28c5b0e), TOBN(0x190795af, 0x5247c39b), TOBN(0x547831eb, 0xbddd6828)}}, {{TOBN(0xf327a227, 0x4a82f424), TOBN(0x36919c78, 0x7e47f89d), TOBN(0xe4783919, 0x43c7392c), TOBN(0xf101b9aa, 0x2316fefe)}, {TOBN(0xbcdc9e9c, 0x1c5009d2), TOBN(0xfb55ea13, 0x9cd18345), TOBN(0xf5b5e231, 0xa3ce77c7), TOBN(0xde6b4527, 0xd2f2cb3d)}}, {{TOBN(0x10f6a333, 0x9bb26f5f), TOBN(0x1e85db8e, 0x044d85b6), TOBN(0xc3697a08, 0x94197e54), TOBN(0x65e18cc0, 0xa7cb4ea8)}, {TOBN(0xa38c4f50, 0xa471fe6e), TOBN(0xf031747a, 0x2f13439c), TOBN(0x53c4a6ba, 0xc007318b), TOBN(0xa8da3ee5, 0x1deccb3d)}}, {{TOBN(0x0555b31c, 0x558216b1), TOBN(0x90c7810c, 0x2f79e6c2), TOBN(0x9b669f4d, 0xfe8eed3c), TOBN(0x70398ec8, 0xe0fac126)}, {TOBN(0xa96a449e, 0xf701b235), TOBN(0x0ceecdb3, 0xeb94f395), TOBN(0x285fc368, 0xd0cb7431), TOBN(0x0d37bb52, 0x16a18c64)}}, {{TOBN(0x05110d38, 0xb880d2dd), TOBN(0xa60f177b, 0x65930d57), TOBN(0x7da34a67, 0xf36235f5), TOBN(0x47f5e17c, 0x183816b9)}, {TOBN(0xc7664b57, 0xdb394af4), TOBN(0x39ba215d, 0x7036f789), TOBN(0x46d2ca0e, 0x2f27b472), TOBN(0xc42647ee, 0xf73a84b7)}}, {{TOBN(0x44bc7545, 0x64488f1d), TOBN(0xaa922708, 0xf4cf85d5), TOBN(0x721a01d5, 0x53e4df63), TOBN(0x649c0c51, 0x5db46ced)}, {TOBN(0x6bf0d64e, 0x3cffcb6c), TOBN(0xe3bf93fe, 0x50f71d96), TOBN(0x75044558, 0xbcc194a0), TOBN(0x16ae3372, 0x6afdc554)}}, {{TOBN(0xbfc01adf, 0x5ca48f3f), TOBN(0x64352f06, 0xe22a9b84), TOBN(0xcee54da1, 0xc1099e4a), TOBN(0xbbda54e8, 0xfa1b89c0)}, {TOBN(0x166a3df5, 0x6f6e55fb), TOBN(0x1ca44a24, 0x20176f88), TOBN(0x936afd88, 0xdfb7b5ff), TOBN(0xe34c2437, 0x8611d4a0)}}, {{TOBN(0x7effbb75, 0x86142103), TOBN(0x6704ba1b, 0x1f34fc4d), TOBN(0x7c2a468f, 0x10c1b122), TOBN(0x36b3a610, 0x8c6aace9)}, {TOBN(0xabfcc0a7, 0x75a0d050), TOBN(0x066f9197, 0x3ce33e32), TOBN(0xce905ef4, 0x29fe09be), TOBN(0x89ee25ba, 0xa8376351)}}, {{TOBN(0x2a3ede22, 0xfd29dc76), TOBN(0x7fd32ed9, 0x36f17260), TOBN(0x0cadcf68, 0x284b4126), TOBN(0x63422f08, 0xa7951fc8)}, {TOBN(0x562b24f4, 0x0807e199), TOBN(0xfe9ce5d1, 0x22ad4490), TOBN(0xc2f51b10, 0x0db2b1b4), TOBN(0xeb3613ff, 0xe4541d0d)}}, {{TOBN(0xbd2c4a05, 0x2680813b), TOBN(0x527aa55d, 0x561b08d6), TOBN(0xa9f8a40e, 0xa7205558), TOBN(0xe3eea56f, 0x243d0bec)}, {TOBN(0x7b853817, 0xa0ff58b3), TOBN(0xb67d3f65, 0x1a69e627), TOBN(0x0b76bbb9, 0xa869b5d6), TOBN(0xa3afeb82, 0x546723ed)}}, {{TOBN(0x5f24416d, 0x3e554892), TOBN(0x8413b53d, 0x430e2a45), TOBN(0x99c56aee, 0x9032a2a0), TOBN(0x09432bf6, 0xeec367b1)}, {TOBN(0x552850c6, 0xdaf0ecc1), TOBN(0x49ebce55, 0x5bc92048), TOBN(0xdfb66ba6, 0x54811307), TOBN(0x1b84f797, 0x6f298597)}}, {{TOBN(0x79590481, 0x8d1d7a0d), TOBN(0xd9fabe03, 0x3a6fa556), TOBN(0xa40f9c59, 0xba9e5d35), TOBN(0xcb1771c1, 0xf6247577)}, {TOBN(0x542a47ca, 0xe9a6312b), TOBN(0xa34b3560, 0x552dd8c5), TOBN(0xfdf94de0, 0x0d794716), TOBN(0xd46124a9, 0x9c623094)}}, {{TOBN(0x56b7435d, 0x68afe8b4), TOBN(0x27f20540, 0x6c0d8ea1), TOBN(0x12b77e14, 0x73186898), TOBN(0xdbc3dd46, 0x7479490f)}, {TOBN(0x951a9842, 0xc03b0c05), TOBN(0x8b1b3bb3, 0x7921bc96), TOBN(0xa573b346, 0x2b202e0a), TOBN(0x77e4665d, 0x47254d56)}}, {{TOBN(0x08b70dfc, 0xd23e3984), TOBN(0xab86e8bc, 0xebd14236), TOBN(0xaa3e07f8, 0x57114ba7), TOBN(0x5ac71689, 0xab0ef4f2)}, {TOBN(0x88fca384, 0x0139d9af), TOBN(0x72733f88, 0x76644af0), TOBN(0xf122f72a, 0x65d74f4a), TOBN(0x13931577, 0xa5626c7a)}}, {{TOBN(0xd5b5d9eb, 0x70f8d5a4), TOBN(0x375adde7, 0xd7bbb228), TOBN(0x31e88b86, 0x0c1c0b32), TOBN(0xd1f568c4, 0x173edbaa)}, {TOBN(0x1592fc83, 0x5459df02), TOBN(0x2beac0fb, 0x0fcd9a7e), TOBN(0xb0a6fdb8, 0x1b473b0a), TOBN(0xe3224c6f, 0x0fe8fc48)}}, {{TOBN(0x680bd00e, 0xe87edf5b), TOBN(0x30385f02, 0x20e77cf5), TOBN(0xe9ab98c0, 0x4d42d1b2), TOBN(0x72d191d2, 0xd3816d77)}, {TOBN(0x1564daca, 0x0917d9e5), TOBN(0x394eab59, 0x1f8fed7f), TOBN(0xa209aa8d, 0x7fbb3896), TOBN(0x5564f3b9, 0xbe6ac98e)}}, {{TOBN(0xead21d05, 0xd73654ef), TOBN(0x68d1a9c4, 0x13d78d74), TOBN(0x61e01708, 0x6d4973a0), TOBN(0x83da3500, 0x46e6d32a)}, {TOBN(0x6a3dfca4, 0x68ae0118), TOBN(0xa1b9a4c9, 0xd02da069), TOBN(0x0b2ff9c7, 0xebab8302), TOBN(0x98af07c3, 0x944ba436)}}, {{TOBN(0x85997326, 0x995f0f9f), TOBN(0x467fade0, 0x71b58bc6), TOBN(0x47e4495a, 0xbd625a2b), TOBN(0xfdd2d01d, 0x33c3b8cd)}, {TOBN(0x2c38ae28, 0xc693f9fa), TOBN(0x48622329, 0x348f7999), TOBN(0x97bf738e, 0x2161f583), TOBN(0x15ee2fa7, 0x565e8cc9)}}, {{TOBN(0xa1a5c845, 0x5777e189), TOBN(0xcc10bee0, 0x456f2829), TOBN(0x8ad95c56, 0xda762bd5), TOBN(0x152e2214, 0xe9d91da8)}, {TOBN(0x975b0e72, 0x7cb23c74), TOBN(0xfd5d7670, 0xa90c66df), TOBN(0xb5b5b8ad, 0x225ffc53), TOBN(0xab6dff73, 0xfaded2ae)}}, {{TOBN(0xebd56781, 0x6f4cbe9d), TOBN(0x0ed8b249, 0x6a574bd7), TOBN(0x41c246fe, 0x81a881fa), TOBN(0x91564805, 0xc3db9c70)}, {TOBN(0xd7c12b08, 0x5b862809), TOBN(0x1facd1f1, 0x55858d7b), TOBN(0x7693747c, 0xaf09e92a), TOBN(0x3b69dcba, 0x189a425f)}}, {{TOBN(0x0be28e9f, 0x967365ef), TOBN(0x57300eb2, 0xe801f5c9), TOBN(0x93b8ac6a, 0xd583352f), TOBN(0xa2cf1f89, 0xcd05b2b7)}, {TOBN(0x7c0c9b74, 0x4dcc40cc), TOBN(0xfee38c45, 0xada523fb), TOBN(0xb49a4dec, 0x1099cc4d), TOBN(0x325c377f, 0x69f069c6)}}, {{TOBN(0xe12458ce, 0x476cc9ff), TOBN(0x580e0b6c, 0xc6d4cb63), TOBN(0xd561c8b7, 0x9072289b), TOBN(0x0377f264, 0xa619e6da)}, {TOBN(0x26685362, 0x88e591a5), TOBN(0xa453a7bd, 0x7523ca2b), TOBN(0x8a9536d2, 0xc1df4533), TOBN(0xc8e50f2f, 0xbe972f79)}}, {{TOBN(0xd433e50f, 0x6d3549cf), TOBN(0x6f33696f, 0xfacd665e), TOBN(0x695bfdac, 0xce11fcb4), TOBN(0x810ee252, 0xaf7c9860)}, {TOBN(0x65450fe1, 0x7159bb2c), TOBN(0xf7dfbebe, 0x758b357b), TOBN(0x2b057e74, 0xd69fea72), TOBN(0xd485717a, 0x92731745)}}}, {{{TOBN(0x896c42e8, 0xee36860c), TOBN(0xdaf04dfd, 0x4113c22d), TOBN(0x1adbb7b7, 0x44104213), TOBN(0xe5fd5fa1, 0x1fd394ea)}, {TOBN(0x68235d94, 0x1a4e0551), TOBN(0x6772cfbe, 0x18d10151), TOBN(0x276071e3, 0x09984523), TOBN(0xe4e879de, 0x5a56ba98)}}, {{TOBN(0xaaafafb0, 0x285b9491), TOBN(0x01a0be88, 0x1e4c705e), TOBN(0xff1d4f5d, 0x2ad9caab), TOBN(0x6e349a4a, 0xc37a233f)}, {TOBN(0xcf1c1246, 0x4a1c6a16), TOBN(0xd99e6b66, 0x29383260), TOBN(0xea3d4366, 0x5f6d5471), TOBN(0x36974d04, 0xff8cc89b)}}, {{TOBN(0xc26c49a1, 0xcfe89d80), TOBN(0xb42c026d, 0xda9c8371), TOBN(0xca6c013a, 0xdad066d2), TOBN(0xfb8f7228, 0x56a4f3ee)}, {TOBN(0x08b579ec, 0xd850935b), TOBN(0x34c1a74c, 0xd631e1b3), TOBN(0xcb5fe596, 0xac198534), TOBN(0x39ff21f6, 0xe1f24f25)}}, {{TOBN(0x27f29e14, 0x8f929057), TOBN(0x7a64ae06, 0xc0c853df), TOBN(0x256cd183, 0x58e9c5ce), TOBN(0x9d9cce82, 0xded092a5)}, {TOBN(0xcc6e5979, 0x6e93b7c7), TOBN(0xe1e47092, 0x31bb9e27), TOBN(0xb70b3083, 0xaa9e29a0), TOBN(0xbf181a75, 0x3785e644)}}, {{TOBN(0xf53f2c65, 0x8ead09f7), TOBN(0x1335e1d5, 0x9780d14d), TOBN(0x69cc20e0, 0xcd1b66bc), TOBN(0x9b670a37, 0xbbe0bfc8)}, {TOBN(0xce53dc81, 0x28efbeed), TOBN(0x0c74e77c, 0x8326a6e5), TOBN(0x3604e0d2, 0xb88e9a63), TOBN(0xbab38fca, 0x13dc2248)}}, {{TOBN(0x8ed6e8c8, 0x5c0a3f1e), TOBN(0xbcad2492, 0x7c87c37f), TOBN(0xfdfb62bb, 0x9ee3b78d), TOBN(0xeba8e477, 0xcbceba46)}, {TOBN(0x37d38cb0, 0xeeaede4b), TOBN(0x0bc498e8, 0x7976deb6), TOBN(0xb2944c04, 0x6b6147fb), TOBN(0x8b123f35, 0xf71f9609)}}, {{TOBN(0xa155dcc7, 0xde79dc24), TOBN(0xf1168a32, 0x558f69cd), TOBN(0xbac21595, 0x0d1850df), TOBN(0x15c8295b, 0xb204c848)}, {TOBN(0xf661aa36, 0x7d8184ff), TOBN(0xc396228e, 0x30447bdb), TOBN(0x11cd5143, 0xbde4a59e), TOBN(0xe3a26e3b, 0x6beab5e6)}}, {{TOBN(0xd3b3a13f, 0x1402b9d0), TOBN(0x573441c3, 0x2c7bc863), TOBN(0x4b301ec4, 0x578c3e6e), TOBN(0xc26fc9c4, 0x0adaf57e)}, {TOBN(0x96e71bfd, 0x7493cea3), TOBN(0xd05d4b3f, 0x1af81456), TOBN(0xdaca2a8a, 0x6a8c608f), TOBN(0x53ef07f6, 0x0725b276)}}, {{TOBN(0x07a5fbd2, 0x7824fc56), TOBN(0x34675218, 0x13289077), TOBN(0x5bf69fd5, 0xe0c48349), TOBN(0xa613ddd3, 0xb6aa7875)}, {TOBN(0x7f78c19c, 0x5450d866), TOBN(0x46f4409c, 0x8f84a481), TOBN(0x9f1d1928, 0x90fce239), TOBN(0x016c4168, 0xb2ce44b9)}}, {{TOBN(0xbae023f0, 0xc7435978), TOBN(0xb152c888, 0x20e30e19), TOBN(0x9c241645, 0xe3fa6faf), TOBN(0x735d95c1, 0x84823e60)}, {TOBN(0x03197573, 0x03955317), TOBN(0x0b4b02a9, 0xf03b4995), TOBN(0x076bf559, 0x70274600), TOBN(0x32c5cc53, 0xaaf57508)}}, {{TOBN(0xe8af6d1f, 0x60624129), TOBN(0xb7bc5d64, 0x9a5e2b5e), TOBN(0x3814b048, 0x5f082d72), TOBN(0x76f267f2, 0xce19677a)}, {TOBN(0x626c630f, 0xb36eed93), TOBN(0x55230cd7, 0x3bf56803), TOBN(0x78837949, 0xce2736a0), TOBN(0x0d792d60, 0xaa6c55f1)}}, {{TOBN(0x0318dbfd, 0xd5c7c5d2), TOBN(0xb38f8da7, 0x072b342d), TOBN(0x3569bddc, 0x7b8de38a), TOBN(0xf25b5887, 0xa1c94842)}, {TOBN(0xb2d5b284, 0x2946ad60), TOBN(0x854f29ad, 0xe9d1707e), TOBN(0xaa5159dc, 0x2c6a4509), TOBN(0x899f94c0, 0x57189837)}}, {{TOBN(0xcf6adc51, 0xf4a55b03), TOBN(0x261762de, 0x35e3b2d5), TOBN(0x4cc43012, 0x04827b51), TOBN(0xcd22a113, 0xc6021442)}, {TOBN(0xce2fd61a, 0x247c9569), TOBN(0x59a50973, 0xd152beca), TOBN(0x6c835a11, 0x63a716d4), TOBN(0xc26455ed, 0x187dedcf)}}, {{TOBN(0x27f536e0, 0x49ce89e7), TOBN(0x18908539, 0xcc890cb5), TOBN(0x308909ab, 0xd83c2aa1), TOBN(0xecd3142b, 0x1ab73bd3)}, {TOBN(0x6a85bf59, 0xb3f5ab84), TOBN(0x3c320a68, 0xf2bea4c6), TOBN(0xad8dc538, 0x6da4541f), TOBN(0xeaf34eb0, 0xb7c41186)}}, {{TOBN(0x1c780129, 0x977c97c4), TOBN(0x5ff9beeb, 0xc57eb9fa), TOBN(0xa24d0524, 0xc822c478), TOBN(0xfd8eec2a, 0x461cd415)}, {TOBN(0xfbde194e, 0xf027458c), TOBN(0xb4ff5319, 0x1d1be115), TOBN(0x63f874d9, 0x4866d6f4), TOBN(0x35c75015, 0xb21ad0c9)}}, {{TOBN(0xa6b5c9d6, 0x46ac49d2), TOBN(0x42c77c0b, 0x83137aa9), TOBN(0x24d000fc, 0x68225a38), TOBN(0x0f63cfc8, 0x2fe1e907)}, {TOBN(0x22d1b01b, 0xc6441f95), TOBN(0x7d38f719, 0xec8e448f), TOBN(0x9b33fa5f, 0x787fb1ba), TOBN(0x94dcfda1, 0x190158df)}}, {{TOBN(0xc47cb339, 0x5f6d4a09), TOBN(0x6b4f355c, 0xee52b826), TOBN(0x3d100f5d, 0xf51b930a), TOBN(0xf4512fac, 0x9f668f69)}, {TOBN(0x546781d5, 0x206c4c74), TOBN(0xd021d4d4, 0xcb4d2e48), TOBN(0x494a54c2, 0xca085c2d), TOBN(0xf1dbaca4, 0x520850a8)}}, {{TOBN(0x63c79326, 0x490a1aca), TOBN(0xcb64dd9c, 0x41526b02), TOBN(0xbb772591, 0xa2979258), TOBN(0x3f582970, 0x48d97846)}, {TOBN(0xd66b70d1, 0x7c213ba7), TOBN(0xc28febb5, 0xe8a0ced4), TOBN(0x6b911831, 0xc10338c1), TOBN(0x0d54e389, 0xbf0126f3)}}, {{TOBN(0x7048d460, 0x4af206ee), TOBN(0x786c88f6, 0x77e97cb9), TOBN(0xd4375ae1, 0xac64802e), TOBN(0x469bcfe1, 0xd53ec11c)}, {TOBN(0xfc9b340d, 0x47062230), TOBN(0xe743bb57, 0xc5b4a3ac), TOBN(0xfe00b4aa, 0x59ef45ac), TOBN(0x29a4ef23, 0x59edf188)}}, {{TOBN(0x40242efe, 0xb483689b), TOBN(0x2575d3f6, 0x513ac262), TOBN(0xf30037c8, 0x0ca6db72), TOBN(0xc9fcce82, 0x98864be2)}, {TOBN(0x84a112ff, 0x0149362d), TOBN(0x95e57582, 0x1c4ae971), TOBN(0x1fa4b1a8, 0x945cf86c), TOBN(0x4525a734, 0x0b024a2f)}}, {{TOBN(0xe76c8b62, 0x8f338360), TOBN(0x483ff593, 0x28edf32b), TOBN(0x67e8e90a, 0x298b1aec), TOBN(0x9caab338, 0x736d9a21)}, {TOBN(0x5c09d2fd, 0x66892709), TOBN(0x2496b4dc, 0xb55a1d41), TOBN(0x93f5fb1a, 0xe24a4394), TOBN(0x08c75049, 0x6fa8f6c1)}}, {{TOBN(0xcaead1c2, 0xc905d85f), TOBN(0xe9d7f790, 0x0733ae57), TOBN(0x24c9a65c, 0xf07cdd94), TOBN(0x7389359c, 0xa4b55931)}, {TOBN(0xf58709b7, 0x367e45f7), TOBN(0x1f203067, 0xcb7e7adc), TOBN(0x82444bff, 0xc7b72818), TOBN(0x07303b35, 0xbaac8033)}}, {{TOBN(0x1e1ee4e4, 0xd13b7ea1), TOBN(0xe6489b24, 0xe0e74180), TOBN(0xa5f2c610, 0x7e70ef70), TOBN(0xa1655412, 0xbdd10894)}, {TOBN(0x555ebefb, 0x7af4194e), TOBN(0x533c1c3c, 0x8e89bd9c), TOBN(0x735b9b57, 0x89895856), TOBN(0x15fb3cd2, 0x567f5c15)}}, {{TOBN(0x057fed45, 0x526f09fd), TOBN(0xe8a4f10c, 0x8128240a), TOBN(0x9332efc4, 0xff2bfd8d), TOBN(0x214e77a0, 0xbd35aa31)}, {TOBN(0x32896d73, 0x14faa40e), TOBN(0x767867ec, 0x01e5f186), TOBN(0xc9adf8f1, 0x17a1813e), TOBN(0xcb6cda78, 0x54741795)}}, {{TOBN(0xb7521b6d, 0x349d51aa), TOBN(0xf56b5a9e, 0xe3c7b8e9), TOBN(0xc6f1e5c9, 0x32a096df), TOBN(0x083667c4, 0xa3635024)}, {TOBN(0x365ea135, 0x18087f2f), TOBN(0xf1b8eaac, 0xd136e45d), TOBN(0xc8a0e484, 0x73aec989), TOBN(0xd75a324b, 0x142c9259)}}, {{TOBN(0xb7b4d001, 0x01dae185), TOBN(0x45434e0b, 0x9b7a94bc), TOBN(0xf54339af, 0xfbd8cb0b), TOBN(0xdcc4569e, 0xe98ef49e)}, {TOBN(0x7789318a, 0x09a51299), TOBN(0x81b4d206, 0xb2b025d8), TOBN(0xf64aa418, 0xfae85792), TOBN(0x3e50258f, 0xacd7baf7)}}, {{TOBN(0xdce84cdb, 0x2996864b), TOBN(0xa2e67089, 0x1f485fa4), TOBN(0xb28b2bb6, 0x534c6a5a), TOBN(0x31a7ec6b, 0xc94b9d39)}, {TOBN(0x1d217766, 0xd6bc20da), TOBN(0x4acdb5ec, 0x86761190), TOBN(0x68726328, 0x73701063), TOBN(0x4d24ee7c, 0x2128c29b)}}, {{TOBN(0xc072ebd3, 0xa19fd868), TOBN(0x612e481c, 0xdb8ddd3b), TOBN(0xb4e1d754, 0x1a64d852), TOBN(0x00ef95ac, 0xc4c6c4ab)}, {TOBN(0x1536d2ed, 0xaa0a6c46), TOBN(0x61294086, 0x43774790), TOBN(0x54af25e8, 0x343fda10), TOBN(0x9ff9d98d, 0xfd25d6f2)}}, {{TOBN(0x0746af7c, 0x468b8835), TOBN(0x977a31cb, 0x730ecea7), TOBN(0xa5096b80, 0xc2cf4a81), TOBN(0xaa986833, 0x6458c37a)}, {TOBN(0x6af29bf3, 0xa6bd9d34), TOBN(0x6a62fe9b, 0x33c5d854), TOBN(0x50e6c304, 0xb7133b5e), TOBN(0x04b60159, 0x7d6e6848)}}, {{TOBN(0x4cd296df, 0x5579bea4), TOBN(0x10e35ac8, 0x5ceedaf1), TOBN(0x04c4c5fd, 0xe3bcc5b1), TOBN(0x95f9ee8a, 0x89412cf9)}, {TOBN(0x2c9459ee, 0x82b6eb0f), TOBN(0x2e845765, 0x95c2aadd), TOBN(0x774a84ae, 0xd327fcfe), TOBN(0xd8c93722, 0x0368d476)}}, {{TOBN(0x0dbd5748, 0xf83e8a3b), TOBN(0xa579aa96, 0x8d2495f3), TOBN(0x535996a0, 0xae496e9b), TOBN(0x07afbfe9, 0xb7f9bcc2)}, {TOBN(0x3ac1dc6d, 0x5b7bd293), TOBN(0x3b592cff, 0x7022323d), TOBN(0xba0deb98, 0x9c0a3e76), TOBN(0x18e78e9f, 0x4b197acb)}}, {{TOBN(0x211cde10, 0x296c36ef), TOBN(0x7ee89672, 0x82c4da77), TOBN(0xb617d270, 0xa57836da), TOBN(0xf0cd9c31, 0x9cb7560b)}, {TOBN(0x01fdcbf7, 0xe455fe90), TOBN(0x3fb53cbb, 0x7e7334f3), TOBN(0x781e2ea4, 0x4e7de4ec), TOBN(0x8adab3ad, 0x0b384fd0)}}, {{TOBN(0x129eee2f, 0x53d64829), TOBN(0x7a471e17, 0xa261492b), TOBN(0xe4f9adb9, 0xe4cb4a2c), TOBN(0x3d359f6f, 0x97ba2c2d)}, {TOBN(0x346c6786, 0x0aacd697), TOBN(0x92b444c3, 0x75c2f8a8), TOBN(0xc79fa117, 0xd85df44e), TOBN(0x56782372, 0x398ddf31)}}, {{TOBN(0x60e690f2, 0xbbbab3b8), TOBN(0x4851f8ae, 0x8b04816b), TOBN(0xc72046ab, 0x9c92e4d2), TOBN(0x518c74a1, 0x7cf3136b)}, {TOBN(0xff4eb50a, 0xf9877d4c), TOBN(0x14578d90, 0xa919cabb), TOBN(0x8218f8c4, 0xac5eb2b6), TOBN(0xa3ccc547, 0x542016e4)}}, {{TOBN(0x025bf48e, 0x327f8349), TOBN(0xf3e97346, 0xf43cb641), TOBN(0xdc2bafdf, 0x500f1085), TOBN(0x57167876, 0x2f063055)}, {TOBN(0x5bd914b9, 0x411925a6), TOBN(0x7c078d48, 0xa1123de5), TOBN(0xee6bf835, 0x182b165d), TOBN(0xb11b5e5b, 0xba519727)}}, {{TOBN(0xe33ea76c, 0x1eea7b85), TOBN(0x2352b461, 0x92d4f85e), TOBN(0xf101d334, 0xafe115bb), TOBN(0xfabc1294, 0x889175a3)}, {TOBN(0x7f6bcdc0, 0x5233f925), TOBN(0xe0a802db, 0xe77fec55), TOBN(0xbdb47b75, 0x8069b659), TOBN(0x1c5e12de, 0xf98fbd74)}}, {{TOBN(0x869c58c6, 0x4b8457ee), TOBN(0xa5360f69, 0x4f7ea9f7), TOBN(0xe576c09f, 0xf460b38f), TOBN(0x6b70d548, 0x22b7fb36)}, {TOBN(0x3fd237f1, 0x3bfae315), TOBN(0x33797852, 0xcbdff369), TOBN(0x97df25f5, 0x25b516f9), TOBN(0x46f388f2, 0xba38ad2d)}}, {{TOBN(0x656c4658, 0x89d8ddbb), TOBN(0x8830b26e, 0x70f38ee8), TOBN(0x4320fd5c, 0xde1212b0), TOBN(0xc34f30cf, 0xe4a2edb2)}, {TOBN(0xabb131a3, 0x56ab64b8), TOBN(0x7f77f0cc, 0xd99c5d26), TOBN(0x66856a37, 0xbf981d94), TOBN(0x19e76d09, 0x738bd76e)}}, {{TOBN(0xe76c8ac3, 0x96238f39), TOBN(0xc0a482be, 0xa830b366), TOBN(0xb7b8eaff, 0x0b4eb499), TOBN(0x8ecd83bc, 0x4bfb4865)}, {TOBN(0x971b2cb7, 0xa2f3776f), TOBN(0xb42176a4, 0xf4b88adf), TOBN(0xb9617df5, 0xbe1fa446), TOBN(0x8b32d508, 0xcd031bd2)}}, {{TOBN(0x1c6bd47d, 0x53b618c0), TOBN(0xc424f46c, 0x6a227923), TOBN(0x7303ffde, 0xdd92d964), TOBN(0xe9712878, 0x71b5abf2)}, {TOBN(0x8f48a632, 0xf815561d), TOBN(0x85f48ff5, 0xd3c055d1), TOBN(0x222a1427, 0x7525684f), TOBN(0xd0d841a0, 0x67360cc3)}}, {{TOBN(0x4245a926, 0x0b9267c6), TOBN(0xc78913f1, 0xcf07f863), TOBN(0xaa844c8e, 0x4d0d9e24), TOBN(0xa42ad522, 0x3d5f9017)}, {TOBN(0xbd371749, 0xa2c989d5), TOBN(0x928292df, 0xe1f5e78e), TOBN(0x493b383e, 0x0a1ea6da), TOBN(0x5136fd8d, 0x13aee529)}}, {{TOBN(0x860c44b1, 0xf2c34a99), TOBN(0x3b00aca4, 0xbf5855ac), TOBN(0xabf6aaa0, 0xfaaf37be), TOBN(0x65f43682, 0x2a53ec08)}, {TOBN(0x1d9a5801, 0xa11b12e1), TOBN(0x78a7ab2c, 0xe20ed475), TOBN(0x0de1067e, 0x9a41e0d5), TOBN(0x30473f5f, 0x305023ea)}}, {{TOBN(0xdd3ae09d, 0x169c7d97), TOBN(0x5cd5baa4, 0xcfaef9cd), TOBN(0x5cd7440b, 0x65a44803), TOBN(0xdc13966a, 0x47f364de)}, {TOBN(0x077b2be8, 0x2b8357c1), TOBN(0x0cb1b4c5, 0xe9d57c2a), TOBN(0x7a4ceb32, 0x05ff363e), TOBN(0xf310fa4d, 0xca35a9ef)}}, {{TOBN(0xdbb7b352, 0xf97f68c6), TOBN(0x0c773b50, 0x0b02cf58), TOBN(0xea2e4821, 0x3c1f96d9), TOBN(0xffb357b0, 0xeee01815)}, {TOBN(0xb9c924cd, 0xe0f28039), TOBN(0x0b36c95a, 0x46a3fbe4), TOBN(0x1faaaea4, 0x5e46db6c), TOBN(0xcae575c3, 0x1928aaff)}}, {{TOBN(0x7f671302, 0xa70dab86), TOBN(0xfcbd12a9, 0x71c58cfc), TOBN(0xcbef9acf, 0xbee0cb92), TOBN(0x573da0b9, 0xf8c1b583)}, {TOBN(0x4752fcfe, 0x0d41d550), TOBN(0xe7eec0e3, 0x2155cffe), TOBN(0x0fc39fcb, 0x545ae248), TOBN(0x522cb8d1, 0x8065f44e)}}, {{TOBN(0x263c962a, 0x70cbb96c), TOBN(0xe034362a, 0xbcd124a9), TOBN(0xf120db28, 0x3c2ae58d), TOBN(0xb9a38d49, 0xfef6d507)}, {TOBN(0xb1fd2a82, 0x1ff140fd), TOBN(0xbd162f30, 0x20aee7e0), TOBN(0x4e17a5d4, 0xcb251949), TOBN(0x2aebcb83, 0x4f7e1c3d)}}, {{TOBN(0x608eb25f, 0x937b0527), TOBN(0xf42e1e47, 0xeb7d9997), TOBN(0xeba699c4, 0xb8a53a29), TOBN(0x1f921c71, 0xe091b536)}, {TOBN(0xcce29e7b, 0x5b26bbd5), TOBN(0x7a8ef5ed, 0x3b61a680), TOBN(0xe5ef8043, 0xba1f1c7e), TOBN(0x16ea8217, 0x18158dda)}}, {{TOBN(0x01778a2b, 0x599ff0f9), TOBN(0x68a923d7, 0x8104fc6b), TOBN(0x5bfa44df, 0xda694ff3), TOBN(0x4f7199db, 0xf7667f12)}, {TOBN(0xc06d8ff6, 0xe46f2a79), TOBN(0x08b5dead, 0xe9f8131d), TOBN(0x02519a59, 0xabb4ce7c), TOBN(0xc4f710bc, 0xb42aec3e)}}, {{TOBN(0x3d77b057, 0x78bde41a), TOBN(0x6474bf80, 0xb4186b5a), TOBN(0x048b3f67, 0x88c65741), TOBN(0xc64519de, 0x03c7c154)}, {TOBN(0xdf073846, 0x0edfcc4f), TOBN(0x319aa737, 0x48f1aa6b), TOBN(0x8b9f8a02, 0xca909f77), TOBN(0x90258139, 0x7580bfef)}}, {{TOBN(0xd8bfd3ca, 0xc0c22719), TOBN(0xc60209e4, 0xc9ca151e), TOBN(0x7a744ab5, 0xd9a1a69c), TOBN(0x6de5048b, 0x14937f8f)}, {TOBN(0x171938d8, 0xe115ac04), TOBN(0x7df70940, 0x1c6b16d2), TOBN(0xa6aeb663, 0x7f8e94e7), TOBN(0xc130388e, 0x2a2cf094)}}, {{TOBN(0x1850be84, 0x77f54e6e), TOBN(0x9f258a72, 0x65d60fe5), TOBN(0xff7ff0c0, 0x6c9146d6), TOBN(0x039aaf90, 0xe63a830b)}, {TOBN(0x38f27a73, 0x9460342f), TOBN(0x4703148c, 0x3f795f8a), TOBN(0x1bb5467b, 0x9681a97e), TOBN(0x00931ba5, 0xecaeb594)}}, {{TOBN(0xcdb6719d, 0x786f337c), TOBN(0xd9c01cd2, 0xe704397d), TOBN(0x0f4a3f20, 0x555c2fef), TOBN(0x00452509, 0x7c0af223)}, {TOBN(0x54a58047, 0x84db8e76), TOBN(0x3bacf1aa, 0x93c8aa06), TOBN(0x11ca957c, 0xf7919422), TOBN(0x50641053, 0x78cdaa40)}}, {{TOBN(0x7a303874, 0x9f7144ae), TOBN(0x170c963f, 0x43d4acfd), TOBN(0x5e148149, 0x58ddd3ef), TOBN(0xa7bde582, 0x9e72dba8)}, {TOBN(0x0769da8b, 0x6fa68750), TOBN(0xfa64e532, 0x572e0249), TOBN(0xfcaadf9d, 0x2619ad31), TOBN(0x87882daa, 0xa7b349cd)}}, {{TOBN(0x9f6eb731, 0x6c67a775), TOBN(0xcb10471a, 0xefc5d0b1), TOBN(0xb433750c, 0xe1b806b2), TOBN(0x19c5714d, 0x57b1ae7e)}, {TOBN(0xc0dc8b7b, 0xed03fd3f), TOBN(0xdd03344f, 0x31bc194e), TOBN(0xa66c52a7, 0x8c6320b5), TOBN(0x8bc82ce3, 0xd0b6fd93)}}, {{TOBN(0xf8e13501, 0xb35f1341), TOBN(0xe53156dd, 0x25a43e42), TOBN(0xd3adf27e, 0x4daeb85c), TOBN(0xb81d8379, 0xbbeddeb5)}, {TOBN(0x1b0b546e, 0x2e435867), TOBN(0x9020eb94, 0xeba5dd60), TOBN(0x37d91161, 0x8210cb9d), TOBN(0x4c596b31, 0x5c91f1cf)}}, {{TOBN(0xb228a90f, 0x0e0b040d), TOBN(0xbaf02d82, 0x45ff897f), TOBN(0x2aac79e6, 0x00fa6122), TOBN(0x24828817, 0x8e36f557)}, {TOBN(0xb9521d31, 0x113ec356), TOBN(0x9e48861e, 0x15eff1f8), TOBN(0x2aa1d412, 0xe0d41715), TOBN(0x71f86203, 0x53f131b8)}}, {{TOBN(0xf60da8da, 0x3fd19408), TOBN(0x4aa716dc, 0x278d9d99), TOBN(0x394531f7, 0xa8c51c90), TOBN(0xb560b0e8, 0xf59db51c)}, {TOBN(0xa28fc992, 0xfa34bdad), TOBN(0xf024fa14, 0x9cd4f8bd), TOBN(0x5cf530f7, 0x23a9d0d3), TOBN(0x615ca193, 0xe28c9b56)}}, {{TOBN(0x6d2a483d, 0x6f73c51e), TOBN(0xa4cb2412, 0xea0dc2dd), TOBN(0x50663c41, 0x1eb917ff), TOBN(0x3d3a74cf, 0xeade299e)}, {TOBN(0x29b3990f, 0x4a7a9202), TOBN(0xa9bccf59, 0xa7b15c3d), TOBN(0x66a3ccdc, 0xa5df9208), TOBN(0x48027c14, 0x43f2f929)}}, {{TOBN(0xd385377c, 0x40b557f0), TOBN(0xe001c366, 0xcd684660), TOBN(0x1b18ed6b, 0xe2183a27), TOBN(0x879738d8, 0x63210329)}, {TOBN(0xa687c74b, 0xbda94882), TOBN(0xd1bbcc48, 0xa684b299), TOBN(0xaf6f1112, 0x863b3724), TOBN(0x6943d1b4, 0x2c8ce9f8)}}, {{TOBN(0xe044a3bb, 0x098cafb4), TOBN(0x27ed2310, 0x60d48caf), TOBN(0x542b5675, 0x3a31b84d), TOBN(0xcbf3dd50, 0xfcddbed7)}, {TOBN(0x25031f16, 0x41b1d830), TOBN(0xa7ec851d, 0xcb0c1e27), TOBN(0xac1c8fe0, 0xb5ae75db), TOBN(0xb24c7557, 0x08c52120)}}, {{TOBN(0x57f811dc, 0x1d4636c3), TOBN(0xf8436526, 0x681a9939), TOBN(0x1f6bc6d9, 0x9c81adb3), TOBN(0x840f8ac3, 0x5b7d80d4)}, {TOBN(0x731a9811, 0xf4387f1a), TOBN(0x7c501cd3, 0xb5156880), TOBN(0xa5ca4a07, 0xdfe68867), TOBN(0xf123d8f0, 0x5fcea120)}}, {{TOBN(0x1fbb0e71, 0xd607039e), TOBN(0x2b70e215, 0xcd3a4546), TOBN(0x32d2f01d, 0x53324091), TOBN(0xb796ff08, 0x180ab19b)}, {TOBN(0x32d87a86, 0x3c57c4aa), TOBN(0x2aed9caf, 0xb7c49a27), TOBN(0x9fb35eac, 0x31630d98), TOBN(0x338e8cdf, 0x5c3e20a3)}}, {{TOBN(0x80f16182, 0x66cde8db), TOBN(0x4e159980, 0x2d72fd36), TOBN(0xd7b8f13b, 0x9b6e5072), TOBN(0xf5213907, 0x3b7b5dc1)}, {TOBN(0x4d431f1d, 0x8ce4396e), TOBN(0x37a1a680, 0xa7ed2142), TOBN(0xbf375696, 0xd01aaf6b), TOBN(0xaa1c0c54, 0xe63aab66)}}, {{TOBN(0x3014368b, 0x4ed80940), TOBN(0x67e6d056, 0x7a6fcedd), TOBN(0x7c208c49, 0xca97579f), TOBN(0xfe3d7a81, 0xa23597f6)}, {TOBN(0x5e203202, 0x7e096ae2), TOBN(0xb1f3e1e7, 0x24b39366), TOBN(0x26da26f3, 0x2fdcdffc), TOBN(0x79422f1d, 0x6097be83)}}}, {{{TOBN(0x263a2cfb, 0x9db3b381), TOBN(0x9c3a2dee, 0xd4df0a4b), TOBN(0x728d06e9, 0x7d04e61f), TOBN(0x8b1adfbc, 0x42449325)}, {TOBN(0x6ec1d939, 0x7e053a1b), TOBN(0xee2be5c7, 0x66daf707), TOBN(0x80ba1e14, 0x810ac7ab), TOBN(0xdd2ae778, 0xf530f174)}}, {{TOBN(0x0435d97a, 0x205b9d8b), TOBN(0x6eb8f064, 0x056756d4), TOBN(0xd5e88a8b, 0xb6f8210e), TOBN(0x070ef12d, 0xec9fd9ea)}, {TOBN(0x4d849505, 0x3bcc876a), TOBN(0x12a75338, 0xa7404ce3), TOBN(0xd22b49e1, 0xb8a1db5e), TOBN(0xec1f2051, 0x14bfa5ad)}}, {{TOBN(0xadbaeb79, 0xb6828f36), TOBN(0x9d7a0258, 0x01bd5b9e), TOBN(0xeda01e0d, 0x1e844b0c), TOBN(0x4b625175, 0x887edfc9)}, {TOBN(0x14109fdd, 0x9669b621), TOBN(0x88a2ca56, 0xf6f87b98), TOBN(0xfe2eb788, 0x170df6bc), TOBN(0x0cea06f4, 0xffa473f9)}}, {{TOBN(0x43ed81b5, 0xc4e83d33), TOBN(0xd9f35879, 0x5efd488b), TOBN(0x164a620f, 0x9deb4d0f), TOBN(0xc6927bdb, 0xac6a7394)}, {TOBN(0x45c28df7, 0x9f9e0f03), TOBN(0x2868661e, 0xfcd7e1a9), TOBN(0x7cf4e8d0, 0xffa348f1), TOBN(0x6bd4c284, 0x398538e0)}}, {{TOBN(0x2618a091, 0x289a8619), TOBN(0xef796e60, 0x6671b173), TOBN(0x664e46e5, 0x9090c632), TOBN(0xa38062d4, 0x1e66f8fb)}, {TOBN(0x6c744a20, 0x0573274e), TOBN(0xd07b67e4, 0xa9271394), TOBN(0x391223b2, 0x6bdc0e20), TOBN(0xbe2d93f1, 0xeb0a05a7)}}, {{TOBN(0xf23e2e53, 0x3f36d141), TOBN(0xe84bb3d4, 0x4dfca442), TOBN(0xb804a48d, 0x6b7c023a), TOBN(0x1e16a8fa, 0x76431c3b)}, {TOBN(0x1b5452ad, 0xddd472e0), TOBN(0x7d405ee7, 0x0d1ee127), TOBN(0x50fc6f1d, 0xffa27599), TOBN(0x351ac53c, 0xbf391b35)}}, {{TOBN(0x7efa14b8, 0x4444896b), TOBN(0x64974d2f, 0xf94027fb), TOBN(0xefdcd0e8, 0xde84487d), TOBN(0x8c45b260, 0x2b48989b)}, {TOBN(0xa8fcbbc2, 0xd8463487), TOBN(0xd1b2b3f7, 0x3fbc476c), TOBN(0x21d005b7, 0xc8f443c0), TOBN(0x518f2e67, 0x40c0139c)}}, {{TOBN(0x56036e8c, 0x06d75fc1), TOBN(0x2dcf7bb7, 0x3249a89f), TOBN(0x81dd1d3d, 0xe245e7dd), TOBN(0xf578dc4b, 0xebd6e2a7)}, {TOBN(0x4c028903, 0xdf2ce7a0), TOBN(0xaee36288, 0x9c39afac), TOBN(0xdc847c31, 0x146404ab), TOBN(0x6304c0d8, 0xa4e97818)}}, {{TOBN(0xae51dca2, 0xa91f6791), TOBN(0x2abe4190, 0x9baa9efc), TOBN(0xd9d2e2f4, 0x559c7ac1), TOBN(0xe82f4b51, 0xfc9f773a)}, {TOBN(0xa7713027, 0x4073e81c), TOBN(0xc0276fac, 0xfbb596fc), TOBN(0x1d819fc9, 0xa684f70c), TOBN(0x29b47fdd, 0xc9f7b1e0)}}, {{TOBN(0x358de103, 0x459b1940), TOBN(0xec881c59, 0x5b013e93), TOBN(0x51574c93, 0x49532ad3), TOBN(0x2db1d445, 0xb37b46de)}, {TOBN(0xc6445b87, 0xdf239fd8), TOBN(0xc718af75, 0x151d24ee), TOBN(0xaea1c4a4, 0xf43c6259), TOBN(0x40c0e5d7, 0x70be02f7)}}, {{TOBN(0x6a4590f4, 0x721b33f2), TOBN(0x2124f1fb, 0xfedf04ea), TOBN(0xf8e53cde, 0x9745efe7), TOBN(0xe7e10432, 0x65f046d9)}, {TOBN(0xc3fca28e, 0xe4d0c7e6), TOBN(0x847e339a, 0x87253b1b), TOBN(0x9b595348, 0x3743e643), TOBN(0xcb6a0a0b, 0x4fd12fc5)}}, {{TOBN(0xfb6836c3, 0x27d02dcc), TOBN(0x5ad00982, 0x7a68bcc2), TOBN(0x1b24b44c, 0x005e912d), TOBN(0xcc83d20f, 0x811fdcfe)}, {TOBN(0x36527ec1, 0x666fba0c), TOBN(0x69948197, 0x14754635), TOBN(0xfcdcb1a8, 0x556da9c2), TOBN(0xa5934267, 0x81a732b2)}}, {{TOBN(0xec1214ed, 0xa714181d), TOBN(0x609ac13b, 0x6067b341), TOBN(0xff4b4c97, 0xa545df1f), TOBN(0xa1240501, 0x34d2076b)}, {TOBN(0x6efa0c23, 0x1409ca97), TOBN(0x254cc1a8, 0x20638c43), TOBN(0xd4e363af, 0xdcfb46cd), TOBN(0x62c2adc3, 0x03942a27)}}, {{TOBN(0xc67b9df0, 0x56e46483), TOBN(0xa55abb20, 0x63736356), TOBN(0xab93c098, 0xc551bc52), TOBN(0x382b49f9, 0xb15fe64b)}, {TOBN(0x9ec221ad, 0x4dff8d47), TOBN(0x79caf615, 0x437df4d6), TOBN(0x5f13dc64, 0xbb456509), TOBN(0xe4c589d9, 0x191f0714)}}, {{TOBN(0x27b6a8ab, 0x3fd40e09), TOBN(0xe455842e, 0x77313ea9), TOBN(0x8b51d1e2, 0x1f55988b), TOBN(0x5716dd73, 0x062bbbfc)}, {TOBN(0x633c11e5, 0x4e8bf3de), TOBN(0x9a0e77b6, 0x1b85be3b), TOBN(0x56510729, 0x0911cca6), TOBN(0x27e76495, 0xefa6590f)}}, {{TOBN(0xe4ac8b33, 0x070d3aab), TOBN(0x2643672b, 0x9a2cd5e5), TOBN(0x52eff79b, 0x1cfc9173), TOBN(0x665ca49b, 0x90a7c13f)}, {TOBN(0x5a8dda59, 0xb3efb998), TOBN(0x8a5b922d, 0x052f1341), TOBN(0xae9ebbab, 0x3cf9a530), TOBN(0x35986e7b, 0xf56da4d7)}}, {{TOBN(0x3a636b5c, 0xff3513cc), TOBN(0xbb0cf8ba, 0x3198f7dd), TOBN(0xb8d40522, 0x41f16f86), TOBN(0x760575d8, 0xde13a7bf)}, {TOBN(0x36f74e16, 0x9f7aa181), TOBN(0x163a3ecf, 0xf509ed1c), TOBN(0x6aead61f, 0x3c40a491), TOBN(0x158c95fc, 0xdfe8fcaa)}}, {{TOBN(0xa3991b6e, 0x13cda46f), TOBN(0x79482415, 0x342faed0), TOBN(0xf3ba5bde, 0x666b5970), TOBN(0x1d52e6bc, 0xb26ab6dd)}, {TOBN(0x768ba1e7, 0x8608dd3d), TOBN(0x4930db2a, 0xea076586), TOBN(0xd9575714, 0xe7dc1afa), TOBN(0x1fc7bf7d, 0xf7c58817)}}, {{TOBN(0x6b47accd, 0xd9eee96c), TOBN(0x0ca277fb, 0xe58cec37), TOBN(0x113fe413, 0xe702c42a), TOBN(0xdd1764ee, 0xc47cbe51)}, {TOBN(0x041e7cde, 0x7b3ed739), TOBN(0x50cb7459, 0x5ce9e1c0), TOBN(0x35568513, 0x2925b212), TOBN(0x7cff95c4, 0x001b081c)}}, {{TOBN(0x63ee4cbd, 0x8088b454), TOBN(0xdb7f32f7, 0x9a9e0c8a), TOBN(0xb377d418, 0x6b2447cb), TOBN(0xe3e982aa, 0xd370219b)}, {TOBN(0x06ccc1e4, 0xc2a2a593), TOBN(0x72c36865, 0x0773f24f), TOBN(0xa13b4da7, 0x95859423), TOBN(0x8bbf1d33, 0x75040c8f)}}, {{TOBN(0x726f0973, 0xda50c991), TOBN(0x48afcd5b, 0x822d6ee2), TOBN(0xe5fc718b, 0x20fd7771), TOBN(0xb9e8e77d, 0xfd0807a1)}, {TOBN(0x7f5e0f44, 0x99a7703d), TOBN(0x6972930e, 0x618e36f3), TOBN(0x2b7c77b8, 0x23807bbe), TOBN(0xe5b82405, 0xcb27ff50)}}, {{TOBN(0xba8b8be3, 0xbd379062), TOBN(0xd64b7a1d, 0x2dce4a92), TOBN(0x040a73c5, 0xb2952e37), TOBN(0x0a9e252e, 0xd438aeca)}, {TOBN(0xdd43956b, 0xc39d3bcb), TOBN(0x1a31ca00, 0xb32b2d63), TOBN(0xd67133b8, 0x5c417a18), TOBN(0xd08e4790, 0x2ef442c8)}}, {{TOBN(0x98cb1ae9, 0x255c0980), TOBN(0x4bd86381, 0x2b4a739f), TOBN(0x5a5c31e1, 0x1e4a45a1), TOBN(0x1e5d55fe, 0x9cb0db2f)}, {TOBN(0x74661b06, 0x8ff5cc29), TOBN(0x026b389f, 0x0eb8a4f4), TOBN(0x536b21a4, 0x58848c24), TOBN(0x2e5bf8ec, 0x81dc72b0)}}, {{TOBN(0x03c187d0, 0xad886aac), TOBN(0x5c16878a, 0xb771b645), TOBN(0xb07dfc6f, 0xc74045ab), TOBN(0x2c6360bf, 0x7800caed)}, {TOBN(0x24295bb5, 0xb9c972a3), TOBN(0xc9e6f88e, 0x7c9a6dba), TOBN(0x90ffbf24, 0x92a79aa6), TOBN(0xde29d50a, 0x41c26ac2)}}, {{TOBN(0x9f0af483, 0xd309cbe6), TOBN(0x5b020d8a, 0xe0bced4f), TOBN(0x606e986d, 0xb38023e3), TOBN(0xad8f2c9d, 0x1abc6933)}, {TOBN(0x19292e1d, 0xe7400e93), TOBN(0xfe3e18a9, 0x52be5e4d), TOBN(0xe8e9771d, 0x2e0680bf), TOBN(0x8c5bec98, 0xc54db063)}}, {{TOBN(0x2af9662a, 0x74a55d1f), TOBN(0xe3fbf28f, 0x046f66d8), TOBN(0xa3a72ab4, 0xd4dc4794), TOBN(0x09779f45, 0x5c7c2dd8)}, {TOBN(0xd893bdaf, 0xc3d19d8d), TOBN(0xd5a75094, 0x57d6a6df), TOBN(0x8cf8fef9, 0x952e6255), TOBN(0x3da67cfb, 0xda9a8aff)}}, {{TOBN(0x4c23f62a, 0x2c160dcd), TOBN(0x34e6c5e3, 0x8f90eaef), TOBN(0x35865519, 0xa9a65d5a), TOBN(0x07c48aae, 0x8fd38a3d)}, {TOBN(0xb7e7aeda, 0x50068527), TOBN(0x2c09ef23, 0x1c90936a), TOBN(0x31ecfeb6, 0xe879324c), TOBN(0xa0871f6b, 0xfb0ec938)}}, {{TOBN(0xb1f0fb68, 0xd84d835d), TOBN(0xc90caf39, 0x861dc1e6), TOBN(0x12e5b046, 0x7594f8d7), TOBN(0x26897ae2, 0x65012b92)}, {TOBN(0xbcf68a08, 0xa4d6755d), TOBN(0x403ee41c, 0x0991fbda), TOBN(0x733e343e, 0x3bbf17e8), TOBN(0xd2c7980d, 0x679b3d65)}}, {{TOBN(0x33056232, 0xd2e11305), TOBN(0x966be492, 0xf3c07a6f), TOBN(0x6a8878ff, 0xbb15509d), TOBN(0xff221101, 0x0a9b59a4)}, {TOBN(0x6c9f564a, 0xabe30129), TOBN(0xc6f2c940, 0x336e64cf), TOBN(0x0fe75262, 0x8b0c8022), TOBN(0xbe0267e9, 0x6ae8db87)}}, {{TOBN(0x22e192f1, 0x93bc042b), TOBN(0xf085b534, 0xb237c458), TOBN(0xa0d192bd, 0x832c4168), TOBN(0x7a76e9e3, 0xbdf6271d)}, {TOBN(0x52a882fa, 0xb88911b5), TOBN(0xc85345e4, 0xb4db0eb5), TOBN(0xa3be02a6, 0x81a7c3ff), TOBN(0x51889c8c, 0xf0ec0469)}}, {{TOBN(0x9d031369, 0xa5e829e5), TOBN(0xcbb4c6fc, 0x1607aa41), TOBN(0x75ac59a6, 0x241d84c1), TOBN(0xc043f2bf, 0x8829e0ee)}, {TOBN(0x82a38f75, 0x8ea5e185), TOBN(0x8bda40b9, 0xd87cbd9f), TOBN(0x9e65e75e, 0x2d8fc601), TOBN(0x3d515f74, 0xa35690b3)}}, {{TOBN(0x534acf4f, 0xda79e5ac), TOBN(0x68b83b3a, 0x8630215f), TOBN(0x5c748b2e, 0xd085756e), TOBN(0xb0317258, 0xe5d37cb2)}, {TOBN(0x6735841a, 0xc5ccc2c4), TOBN(0x7d7dc96b, 0x3d9d5069), TOBN(0xa147e410, 0xfd1754bd), TOBN(0x65296e94, 0xd399ddd5)}}, {{TOBN(0xf6b5b2d0, 0xbc8fa5bc), TOBN(0x8a5ead67, 0x500c277b), TOBN(0x214625e6, 0xdfa08a5d), TOBN(0x51fdfedc, 0x959cf047)}, {TOBN(0x6bc9430b, 0x289fca32), TOBN(0xe36ff0cf, 0x9d9bdc3f), TOBN(0x2fe187cb, 0x58ea0ede), TOBN(0xed66af20, 0x5a900b3f)}}, {{TOBN(0x00e0968b, 0x5fa9f4d6), TOBN(0x2d4066ce, 0x37a362e7), TOBN(0xa99a9748, 0xbd07e772), TOBN(0x710989c0, 0x06a4f1d0)}, {TOBN(0xd5dedf35, 0xce40cbd8), TOBN(0xab55c5f0, 0x1743293d), TOBN(0x766f1144, 0x8aa24e2c), TOBN(0x94d874f8, 0x605fbcb4)}}, {{TOBN(0xa365f0e8, 0xa518001b), TOBN(0xee605eb6, 0x9d04ef0f), TOBN(0x5a3915cd, 0xba8d4d25), TOBN(0x44c0e1b8, 0xb5113472)}, {TOBN(0xcbb024e8, 0x8b6740dc), TOBN(0x89087a53, 0xee1d4f0c), TOBN(0xa88fa05c, 0x1fc4e372), TOBN(0x8bf395cb, 0xaf8b3af2)}}, {{TOBN(0x1e71c9a1, 0xdeb8568b), TOBN(0xa35daea0, 0x80fb3d32), TOBN(0xe8b6f266, 0x2cf8fb81), TOBN(0x6d51afe8, 0x9490696a)}, {TOBN(0x81beac6e, 0x51803a19), TOBN(0xe3d24b7f, 0x86219080), TOBN(0x727cfd9d, 0xdf6f463c), TOBN(0x8c6865ca, 0x72284ee8)}}, {{TOBN(0x32c88b7d, 0xb743f4ef), TOBN(0x3793909b, 0xe7d11dce), TOBN(0xd398f922, 0x2ff2ebe8), TOBN(0x2c70ca44, 0xe5e49796)}, {TOBN(0xdf4d9929, 0xcb1131b1), TOBN(0x7826f298, 0x25888e79), TOBN(0x4d3a112c, 0xf1d8740a), TOBN(0x00384cb6, 0x270afa8b)}}, {{TOBN(0xcb64125b, 0x3ab48095), TOBN(0x3451c256, 0x62d05106), TOBN(0xd73d577d, 0xa4955845), TOBN(0x39570c16, 0xbf9f4433)}, {TOBN(0xd7dfaad3, 0xadecf263), TOBN(0xf1c3d8d1, 0xdc76e102), TOBN(0x5e774a58, 0x54c6a836), TOBN(0xdad4b672, 0x3e92d47b)}}, {{TOBN(0xbe7e990f, 0xf0d796a0), TOBN(0x5fc62478, 0xdf0e8b02), TOBN(0x8aae8bf4, 0x030c00ad), TOBN(0x3d2db93b, 0x9004ba0f)}, {TOBN(0xe48c8a79, 0xd85d5ddc), TOBN(0xe907caa7, 0x6bb07f34), TOBN(0x58db343a, 0xa39eaed5), TOBN(0x0ea6e007, 0xadaf5724)}}, {{TOBN(0xe00df169, 0xd23233f3), TOBN(0x3e322796, 0x77cb637f), TOBN(0x1f897c0e, 0x1da0cf6c), TOBN(0xa651f5d8, 0x31d6bbdd)}, {TOBN(0xdd61af19, 0x1a230c76), TOBN(0xbd527272, 0xcdaa5e4a), TOBN(0xca753636, 0xd0abcd7e), TOBN(0x78bdd37c, 0x370bd8dc)}}, {{TOBN(0xc23916c2, 0x17cd93fe), TOBN(0x65b97a4d, 0xdadce6e2), TOBN(0xe04ed4eb, 0x174e42f8), TOBN(0x1491ccaa, 0xbb21480a)}, {TOBN(0x145a8280, 0x23196332), TOBN(0x3c3862d7, 0x587b479a), TOBN(0x9f4a88a3, 0x01dcd0ed), TOBN(0x4da2b7ef, 0x3ea12f1f)}}, {{TOBN(0xf8e7ae33, 0xb126e48e), TOBN(0x404a0b32, 0xf494e237), TOBN(0x9beac474, 0xc55acadb), TOBN(0x4ee5cf3b, 0xcbec9fd9)}, {TOBN(0x336b33b9, 0x7df3c8c3), TOBN(0xbd905fe3, 0xb76808fd), TOBN(0x8f436981, 0xaa45c16a), TOBN(0x255c5bfa, 0x3dd27b62)}}, {{TOBN(0x71965cbf, 0xc3dd9b4d), TOBN(0xce23edbf, 0xfc068a87), TOBN(0xb78d4725, 0x745b029b), TOBN(0x74610713, 0xcefdd9bd)}, {TOBN(0x7116f75f, 0x1266bf52), TOBN(0x02046722, 0x18e49bb6), TOBN(0xdf43df9f, 0x3d6f19e3), TOBN(0xef1bc7d0, 0xe685cb2f)}}, {{TOBN(0xcddb27c1, 0x7078c432), TOBN(0xe1961b9c, 0xb77fedb7), TOBN(0x1edc2f5c, 0xc2290570), TOBN(0x2c3fefca, 0x19cbd886)}, {TOBN(0xcf880a36, 0xc2af389a), TOBN(0x96c610fd, 0xbda71cea), TOBN(0xf03977a9, 0x32aa8463), TOBN(0x8eb7763f, 0x8586d90a)}}, {{TOBN(0x3f342454, 0x2a296e77), TOBN(0xc8718683, 0x42837a35), TOBN(0x7dc71090, 0x6a09c731), TOBN(0x54778ffb, 0x51b816db)}, {TOBN(0x6b33bfec, 0xaf06defd), TOBN(0xfe3c105f, 0x8592b70b), TOBN(0xf937fda4, 0x61da6114), TOBN(0x3c13e651, 0x4c266ad7)}}, {{TOBN(0xe363a829, 0x855938e8), TOBN(0x2eeb5d9e, 0x9de54b72), TOBN(0xbeb93b0e, 0x20ccfab9), TOBN(0x3dffbb5f, 0x25e61a25)}, {TOBN(0x7f655e43, 0x1acc093d), TOBN(0x0cb6cc3d, 0x3964ce61), TOBN(0x6ab283a1, 0xe5e9b460), TOBN(0x55d787c5, 0xa1c7e72d)}}, {{TOBN(0x4d2efd47, 0xdeadbf02), TOBN(0x11e80219, 0xac459068), TOBN(0x810c7626, 0x71f311f0), TOBN(0xfa17ef8d, 0x4ab6ef53)}, {TOBN(0xaf47fd25, 0x93e43bff), TOBN(0x5cb5ff3f, 0x0be40632), TOBN(0x54687106, 0x8ee61da3), TOBN(0x7764196e, 0xb08afd0f)}}, {{TOBN(0x831ab3ed, 0xf0290a8f), TOBN(0xcae81966, 0xcb47c387), TOBN(0xaad7dece, 0x184efb4f), TOBN(0xdcfc53b3, 0x4749110e)}, {TOBN(0x6698f23c, 0x4cb632f9), TOBN(0xc42a1ad6, 0xb91f8067), TOBN(0xb116a81d, 0x6284180a), TOBN(0xebedf5f8, 0xe901326f)}}, {{TOBN(0xf2274c9f, 0x97e3e044), TOBN(0x42018520, 0x11d09fc9), TOBN(0x56a65f17, 0xd18e6e23), TOBN(0x2ea61e2a, 0x352b683c)}, {TOBN(0x27d291bc, 0x575eaa94), TOBN(0x9e7bc721, 0xb8ff522d), TOBN(0x5f7268bf, 0xa7f04d6f), TOBN(0x5868c73f, 0xaba41748)}}, {{TOBN(0x9f85c2db, 0x7be0eead), TOBN(0x511e7842, 0xff719135), TOBN(0x5a06b1e9, 0xc5ea90d7), TOBN(0x0c19e283, 0x26fab631)}, {TOBN(0x8af8f0cf, 0xe9206c55), TOBN(0x89389cb4, 0x3553c06a), TOBN(0x39dbed97, 0xf65f8004), TOBN(0x0621b037, 0xc508991d)}}, {{TOBN(0x1c52e635, 0x96e78cc4), TOBN(0x5385c8b2, 0x0c06b4a8), TOBN(0xd84ddfdb, 0xb0e87d03), TOBN(0xc49dfb66, 0x934bafad)}, {TOBN(0x7071e170, 0x59f70772), TOBN(0x3a073a84, 0x3a1db56b), TOBN(0x03494903, 0x3b8af190), TOBN(0x7d882de3, 0xd32920f0)}}, {{TOBN(0x91633f0a, 0xb2cf8940), TOBN(0x72b0b178, 0x6f948f51), TOBN(0x2d28dc30, 0x782653c8), TOBN(0x88829849, 0xdb903a05)}, {TOBN(0xb8095d0c, 0x6a19d2bb), TOBN(0x4b9e7f0c, 0x86f782cb), TOBN(0x7af73988, 0x2d907064), TOBN(0xd12be0fe, 0x8b32643c)}}, {{TOBN(0x358ed23d, 0x0e165dc3), TOBN(0x3d47ce62, 0x4e2378ce), TOBN(0x7e2bb0b9, 0xfeb8a087), TOBN(0x3246e8ae, 0xe29e10b9)}, {TOBN(0x459f4ec7, 0x03ce2b4d), TOBN(0xe9b4ca1b, 0xbbc077cf), TOBN(0x2613b4f2, 0x0e9940c1), TOBN(0xfc598bb9, 0x047d1eb1)}}, {{TOBN(0x9744c62b, 0x45036099), TOBN(0xa9dee742, 0x167c65d8), TOBN(0x0c511525, 0xdabe1943), TOBN(0xda110554, 0x93c6c624)}, {TOBN(0xae00a52c, 0x651a3be2), TOBN(0xcda5111d, 0x884449a6), TOBN(0x063c06f4, 0xff33bed1), TOBN(0x73baaf9a, 0x0d3d76b4)}}, {{TOBN(0x52fb0c9d, 0x7fc63668), TOBN(0x6886c9dd, 0x0c039cde), TOBN(0x602bd599, 0x55b22351), TOBN(0xb00cab02, 0x360c7c13)}, {TOBN(0x8cb616bc, 0x81b69442), TOBN(0x41486700, 0xb55c3cee), TOBN(0x71093281, 0xf49ba278), TOBN(0xad956d9c, 0x64a50710)}}, {{TOBN(0x9561f28b, 0x638a7e81), TOBN(0x54155cdf, 0x5980ddc3), TOBN(0xb2db4a96, 0xd26f247a), TOBN(0x9d774e4e, 0x4787d100)}, {TOBN(0x1a9e6e2e, 0x078637d2), TOBN(0x1c363e2d, 0x5e0ae06a), TOBN(0x7493483e, 0xe9cfa354), TOBN(0x76843cb3, 0x7f74b98d)}}, {{TOBN(0xbaca6591, 0xd4b66947), TOBN(0xb452ce98, 0x04460a8c), TOBN(0x6830d246, 0x43768f55), TOBN(0xf4197ed8, 0x7dff12df)}, {TOBN(0x6521b472, 0x400dd0f7), TOBN(0x59f5ca8f, 0x4b1e7093), TOBN(0x6feff11b, 0x080338ae), TOBN(0x0ada31f6, 0xa29ca3c6)}}, {{TOBN(0x24794eb6, 0x94a2c215), TOBN(0xd83a43ab, 0x05a57ab4), TOBN(0x264a543a, 0x2a6f89fe), TOBN(0x2c2a3868, 0xdd5ec7c2)}, {TOBN(0xd3373940, 0x8439d9b2), TOBN(0x715ea672, 0x0acd1f11), TOBN(0x42c1d235, 0xe7e6cc19), TOBN(0x81ce6e96, 0xb990585c)}}, {{TOBN(0x04e5dfe0, 0xd809c7bd), TOBN(0xd7b2580c, 0x8f1050ab), TOBN(0x6d91ad78, 0xd8a4176f), TOBN(0x0af556ee, 0x4e2e897c)}, {TOBN(0x162a8b73, 0x921de0ac), TOBN(0x52ac9c22, 0x7ea78400), TOBN(0xee2a4eea, 0xefce2174), TOBN(0xbe61844e, 0x6d637f79)}}, {{TOBN(0x0491f1bc, 0x789a283b), TOBN(0x72d3ac3d, 0x880836f4), TOBN(0xaa1c5ea3, 0x88e5402d), TOBN(0x1b192421, 0xd5cc473d)}, {TOBN(0x5c0b9998, 0x9dc84cac), TOBN(0xb0a8482d, 0x9c6e75b8), TOBN(0x639961d0, 0x3a191ce2), TOBN(0xda3bc865, 0x6d837930)}}, {{TOBN(0xca990653, 0x056e6f8f), TOBN(0x84861c41, 0x64d133a7), TOBN(0x8b403276, 0x746abe40), TOBN(0xb7b4d51a, 0xebf8e303)}, {TOBN(0x05b43211, 0x220a255d), TOBN(0xc997152c, 0x02419e6e), TOBN(0x76ff47b6, 0x630c2fea), TOBN(0x50518677, 0x281fdade)}}, {{TOBN(0x3283b8ba, 0xcf902b0b), TOBN(0x8d4b4eb5, 0x37db303b), TOBN(0xcc89f42d, 0x755011bc), TOBN(0xb43d74bb, 0xdd09d19b)}, {TOBN(0x65746bc9, 0x8adba350), TOBN(0x364eaf8c, 0xb51c1927), TOBN(0x13c76596, 0x10ad72ec), TOBN(0x30045121, 0xf8d40c20)}}, {{TOBN(0x6d2d99b7, 0xea7b979b), TOBN(0xcd78cd74, 0xe6fb3bcd), TOBN(0x11e45a9e, 0x86cffbfe), TOBN(0x78a61cf4, 0x637024f6)}, {TOBN(0xd06bc872, 0x3d502295), TOBN(0xf1376854, 0x458cb288), TOBN(0xb9db26a1, 0x342f8586), TOBN(0xf33effcf, 0x4beee09e)}}, {{TOBN(0xd7e0c4cd, 0xb30cfb3a), TOBN(0x6d09b8c1, 0x6c9db4c8), TOBN(0x40ba1a42, 0x07c8d9df), TOBN(0x6fd495f7, 0x1c52c66d)}, {TOBN(0xfb0e169f, 0x275264da), TOBN(0x80c2b746, 0xe57d8362), TOBN(0xedd987f7, 0x49ad7222), TOBN(0xfdc229af, 0x4398ec7b)}}}, {{{TOBN(0xb0d1ed84, 0x52666a58), TOBN(0x4bcb6e00, 0xe6a9c3c2), TOBN(0x3c57411c, 0x26906408), TOBN(0xcfc20755, 0x13556400)}, {TOBN(0xa08b1c50, 0x5294dba3), TOBN(0xa30ba286, 0x8b7dd31e), TOBN(0xd70ba90e, 0x991eca74), TOBN(0x094e142c, 0xe762c2b9)}}, {{TOBN(0xb81d783e, 0x979f3925), TOBN(0x1efd130a, 0xaf4c89a7), TOBN(0x525c2144, 0xfd1bf7fa), TOBN(0x4b296904, 0x1b265a9e)}, {TOBN(0xed8e9634, 0xb9db65b6), TOBN(0x35c82e32, 0x03599d8a), TOBN(0xdaa7a54f, 0x403563f3), TOBN(0x9df088ad, 0x022c38ab)}}, {{TOBN(0xe5cfb066, 0xbb3fd30a), TOBN(0x429169da, 0xeff0354e), TOBN(0x809cf852, 0x3524e36c), TOBN(0x136f4fb3, 0x0155be1d)}, {TOBN(0x4826af01, 0x1fbba712), TOBN(0x6ef0f0b4, 0x506ba1a1), TOBN(0xd9928b31, 0x77aea73e), TOBN(0xe2bf6af2, 0x5eaa244e)}}, {{TOBN(0x8d084f12, 0x4237b64b), TOBN(0x688ebe99, 0xe3ecfd07), TOBN(0x57b8a70c, 0xf6845dd8), TOBN(0x808fc59c, 0x5da4a325)}, {TOBN(0xa9032b2b, 0xa3585862), TOBN(0xb66825d5, 0xedf29386), TOBN(0xb5a5a8db, 0x431ec29b), TOBN(0xbb143a98, 0x3a1e8dc8)}}, {{TOBN(0x35ee94ce, 0x12ae381b), TOBN(0x3a7f176c, 0x86ccda90), TOBN(0xc63a657e, 0x4606eaca), TOBN(0x9ae5a380, 0x43cd04df)}, {TOBN(0x9bec8d15, 0xed251b46), TOBN(0x1f5d6d30, 0xcaca5e64), TOBN(0x347b3b35, 0x9ff20f07), TOBN(0x4d65f034, 0xf7e4b286)}}, {{TOBN(0x9e93ba24, 0xf111661e), TOBN(0xedced484, 0xb105eb04), TOBN(0x96dc9ba1, 0xf424b578), TOBN(0xbf8f66b7, 0xe83e9069)}, {TOBN(0x872d4df4, 0xd7ed8216), TOBN(0xbf07f377, 0x8e2cbecf), TOBN(0x4281d899, 0x98e73754), TOBN(0xfec85fbb, 0x8aab8708)}}, {{TOBN(0x9a3c0dee, 0xa5ba5b0b), TOBN(0xe6a116ce, 0x42d05299), TOBN(0xae9775fe, 0xe9b02d42), TOBN(0x72b05200, 0xa1545cb6)}, {TOBN(0xbc506f7d, 0x31a3b4ea), TOBN(0xe5893078, 0x8bbd9b32), TOBN(0xc8bc5f37, 0xe4b12a97), TOBN(0x6b000c06, 0x4a73b671)}}, {{TOBN(0x13b5bf22, 0x765fa7d0), TOBN(0x59805bf0, 0x1d6a5370), TOBN(0x67a5e29d, 0x4280db98), TOBN(0x4f53916f, 0x776b1ce3)}, {TOBN(0x714ff61f, 0x33ddf626), TOBN(0x4206238e, 0xa085d103), TOBN(0x1c50d4b7, 0xe5809ee3), TOBN(0x999f450d, 0x85f8eb1d)}}, {{TOBN(0x658a6051, 0xe4c79e9b), TOBN(0x1394cb73, 0xc66a9fea), TOBN(0x27f31ed5, 0xc6be7b23), TOBN(0xf4c88f36, 0x5aa6f8fe)}, {TOBN(0x0fb0721f, 0x4aaa499e), TOBN(0x68b3a7d5, 0xe3fb2a6b), TOBN(0xa788097d, 0x3a92851d), TOBN(0x060e7f8a, 0xe96f4913)}}, {{TOBN(0x82eebe73, 0x1a3a93bc), TOBN(0x42bbf465, 0xa21adc1a), TOBN(0xc10b6fa4, 0xef030efd), TOBN(0x247aa4c7, 0x87b097bb)}, {TOBN(0x8b8dc632, 0xf60c77da), TOBN(0x6ffbc26a, 0xc223523e), TOBN(0xa4f6ff11, 0x344579cf), TOBN(0x5825653c, 0x980250f6)}}, {{TOBN(0xb2dd097e, 0xbc1aa2b9), TOBN(0x07889393, 0x37a0333a), TOBN(0x1cf55e71, 0x37a0db38), TOBN(0x2648487f, 0x792c1613)}, {TOBN(0xdad01336, 0x3fcef261), TOBN(0x6239c81d, 0x0eabf129), TOBN(0x8ee761de, 0x9d276be2), TOBN(0x406a7a34, 0x1eda6ad3)}}, {{TOBN(0x4bf367ba, 0x4a493b31), TOBN(0x54f20a52, 0x9bf7f026), TOBN(0xb696e062, 0x9795914b), TOBN(0xcddab96d, 0x8bf236ac)}, {TOBN(0x4ff2c70a, 0xed25ea13), TOBN(0xfa1d09eb, 0x81cbbbe7), TOBN(0x88fc8c87, 0x468544c5), TOBN(0x847a670d, 0x696b3317)}}, {{TOBN(0xf133421e, 0x64bcb626), TOBN(0xaea638c8, 0x26dee0b5), TOBN(0xd6e7680b, 0xb310346c), TOBN(0xe06f4097, 0xd5d4ced3)}, {TOBN(0x09961452, 0x7512a30b), TOBN(0xf3d867fd, 0xe589a59a), TOBN(0x2e73254f, 0x52d0c180), TOBN(0x9063d8a3, 0x333c74ac)}}, {{TOBN(0xeda6c595, 0xd314e7bc), TOBN(0x2ee7464b, 0x467899ed), TOBN(0x1cef423c, 0x0a1ed5d3), TOBN(0x217e76ea, 0x69cc7613)}, {TOBN(0x27ccce1f, 0xe7cda917), TOBN(0x12d8016b, 0x8a893f16), TOBN(0xbcd6de84, 0x9fc74f6b), TOBN(0xfa5817e2, 0xf3144e61)}}, {{TOBN(0x1f354164, 0x0821ee4c), TOBN(0x1583eab4, 0x0bc61992), TOBN(0x7490caf6, 0x1d72879f), TOBN(0x998ad9f3, 0xf76ae7b2)}, {TOBN(0x1e181950, 0xa41157f7), TOBN(0xa9d7e1e6, 0xe8da3a7e), TOBN(0x963784eb, 0x8426b95f), TOBN(0x0ee4ed6e, 0x542e2a10)}}, {{TOBN(0xb79d4cc5, 0xac751e7b), TOBN(0x93f96472, 0xfd4211bd), TOBN(0x8c72d3d2, 0xc8de4fc6), TOBN(0x7b69cbf5, 0xdf44f064)}, {TOBN(0x3da90ca2, 0xf4bf94e1), TOBN(0x1a5325f8, 0xf12894e2), TOBN(0x0a437f6c, 0x7917d60b), TOBN(0x9be70486, 0x96c9cb5d)}}, {{TOBN(0xb4d880bf, 0xe1dc5c05), TOBN(0xd738adda, 0xeebeeb57), TOBN(0x6f0119d3, 0xdf0fe6a3), TOBN(0x5c686e55, 0x66eaaf5a)}, {TOBN(0x9cb10b50, 0xdfd0b7ec), TOBN(0xbdd0264b, 0x6a497c21), TOBN(0xfc093514, 0x8c546c96), TOBN(0x58a947fa, 0x79dbf42a)}}, {{TOBN(0xc0b48d4e, 0x49ccd6d7), TOBN(0xff8fb02c, 0x88bd5580), TOBN(0xc75235e9, 0x07d473b2), TOBN(0x4fab1ac5, 0xa2188af3)}, {TOBN(0x030fa3bc, 0x97576ec0), TOBN(0xe8c946e8, 0x0b7e7d2f), TOBN(0x40a5c9cc, 0x70305600), TOBN(0x6d8260a9, 0xc8b013b4)}}, {{TOBN(0x0368304f, 0x70bba85c), TOBN(0xad090da1, 0xa4a0d311), TOBN(0x7170e870, 0x2415eec1), TOBN(0xbfba35fe, 0x8461ea47)}, {TOBN(0x6279019a, 0xc1e91938), TOBN(0xa47638f3, 0x1afc415f), TOBN(0x36c65cbb, 0xbcba0e0f), TOBN(0x02160efb, 0x034e2c48)}}, {{TOBN(0xe6c51073, 0x615cd9e4), TOBN(0x498ec047, 0xf1243c06), TOBN(0x3e5a8809, 0xb17b3d8c), TOBN(0x5cd99e61, 0x0cc565f1)}, {TOBN(0x81e312df, 0x7851dafe), TOBN(0xf156f5ba, 0xa79061e2), TOBN(0x80d62b71, 0x880c590e), TOBN(0xbec9746f, 0x0a39faa1)}}, {{TOBN(0x1d98a9c1, 0xc8ed1f7a), TOBN(0x09e43bb5, 0xa81d5ff2), TOBN(0xd5f00f68, 0x0da0794a), TOBN(0x412050d9, 0x661aa836)}, {TOBN(0xa89f7c4e, 0x90747e40), TOBN(0x6dc05ebb, 0xb62a3686), TOBN(0xdf4de847, 0x308e3353), TOBN(0x53868fbb, 0x9fb53bb9)}}, {{TOBN(0x2b09d2c3, 0xcfdcf7dd), TOBN(0x41a9fce3, 0x723fcab4), TOBN(0x73d905f7, 0x07f57ca3), TOBN(0x080f9fb1, 0xac8e1555)}, {TOBN(0x7c088e84, 0x9ba7a531), TOBN(0x07d35586, 0xed9a147f), TOBN(0x602846ab, 0xaf48c336), TOBN(0x7320fd32, 0x0ccf0e79)}}, {{TOBN(0xaa780798, 0xb18bd1ff), TOBN(0x52c2e300, 0xafdd2905), TOBN(0xf27ea3d6, 0x434267cd), TOBN(0x8b96d16d, 0x15605b5f)}, {TOBN(0x7bb31049, 0x4b45706b), TOBN(0xe7f58b8e, 0x743d25f8), TOBN(0xe9b5e45b, 0x87f30076), TOBN(0xd19448d6, 0x5d053d5a)}}, {{TOBN(0x1ecc8cb9, 0xd3210a04), TOBN(0x6bc7d463, 0xdafb5269), TOBN(0x3e59b10a, 0x67c3489f), TOBN(0x1769788c, 0x65641e1b)}, {TOBN(0x8a53b82d, 0xbd6cb838), TOBN(0x7066d6e6, 0x236d5f22), TOBN(0x03aa1c61, 0x6908536e), TOBN(0xc971da0d, 0x66ae9809)}}, {{TOBN(0x01b3a86b, 0xc49a2fac), TOBN(0x3b8420c0, 0x3092e77a), TOBN(0x02057300, 0x7d6fb556), TOBN(0x6941b2a1, 0xbff40a87)}, {TOBN(0x140b6308, 0x0658ff2a), TOBN(0x87804363, 0x3424ab36), TOBN(0x0253bd51, 0x5751e299), TOBN(0xc75bcd76, 0x449c3e3a)}}, {{TOBN(0x92eb4090, 0x7f8f875d), TOBN(0x9c9d754e, 0x56c26bbf), TOBN(0x158cea61, 0x8110bbe7), TOBN(0x62a6b802, 0x745f91ea)}, {TOBN(0xa79c41aa, 0xc6e7394b), TOBN(0x445b6a83, 0xad57ef10), TOBN(0x0c5277eb, 0x6ea6f40c), TOBN(0x319fe96b, 0x88633365)}}, {{TOBN(0x0b0fc61f, 0x385f63cb), TOBN(0x41250c84, 0x22bdd127), TOBN(0x67d153f1, 0x09e942c2), TOBN(0x60920d08, 0xc021ad5d)}, {TOBN(0x229f5746, 0x724d81a5), TOBN(0xb7ffb892, 0x5bba3299), TOBN(0x518c51a1, 0xde413032), TOBN(0x2a9bfe77, 0x3c2fd94c)}}, {{TOBN(0xcbcde239, 0x3191f4fd), TOBN(0x43093e16, 0xd3d6ada1), TOBN(0x184579f3, 0x58769606), TOBN(0x2c94a8b3, 0xd236625c)}, {TOBN(0x6922b9c0, 0x5c437d8e), TOBN(0x3d4ae423, 0xd8d9f3c8), TOBN(0xf72c31c1, 0x2e7090a2), TOBN(0x4ac3f5f3, 0xd76a55bd)}}, {{TOBN(0x342508fc, 0x6b6af991), TOBN(0x0d527100, 0x1b5cebbd), TOBN(0xb84740d0, 0xdd440dd7), TOBN(0x748ef841, 0x780162fd)}, {TOBN(0xa8dbfe0e, 0xdfc6fafb), TOBN(0xeadfdf05, 0xf7300f27), TOBN(0x7d06555f, 0xfeba4ec9), TOBN(0x12c56f83, 0x9e25fa97)}}, {{TOBN(0x77f84203, 0xd39b8c34), TOBN(0xed8b1be6, 0x3125eddb), TOBN(0x5bbf2441, 0xf6e39dc5), TOBN(0xb00f6ee6, 0x6a5d678a)}, {TOBN(0xba456ecf, 0x57d0ea99), TOBN(0xdcae0f58, 0x17e06c43), TOBN(0x01643de4, 0x0f5b4baa), TOBN(0x2c324341, 0xd161b9be)}}, {{TOBN(0x80177f55, 0xe126d468), TOBN(0xed325f1f, 0x76748e09), TOBN(0x6116004a, 0xcfa9bdc2), TOBN(0x2d8607e6, 0x3a9fb468)}, {TOBN(0x0e573e27, 0x6009d660), TOBN(0x3a525d2e, 0x8d10c5a1), TOBN(0xd26cb45c, 0x3b9009a0), TOBN(0xb6b0cdc0, 0xde9d7448)}}, {{TOBN(0x949c9976, 0xe1337c26), TOBN(0x6faadebd, 0xd73d68e5), TOBN(0x9e158614, 0xf1b768d9), TOBN(0x22dfa557, 0x9cc4f069)}, {TOBN(0xccd6da17, 0xbe93c6d6), TOBN(0x24866c61, 0xa504f5b9), TOBN(0x2121353c, 0x8d694da1), TOBN(0x1c6ca580, 0x0140b8c6)}}, {{TOBN(0xc245ad8c, 0xe964021e), TOBN(0xb83bffba, 0x032b82b3), TOBN(0xfaa220c6, 0x47ef9898), TOBN(0x7e8d3ac6, 0x982c948a)}, {TOBN(0x1faa2091, 0xbc2d124a), TOBN(0xbd54c3dd, 0x05b15ff4), TOBN(0x386bf3ab, 0xc87c6fb7), TOBN(0xfb2b0563, 0xfdeb6f66)}}, {{TOBN(0x4e77c557, 0x5b45afb4), TOBN(0xe9ded649, 0xefb8912d), TOBN(0x7ec9bbf5, 0x42f6e557), TOBN(0x2570dfff, 0x62671f00)}, {TOBN(0x2b3bfb78, 0x88e084bd), TOBN(0xa024b238, 0xf37fe5b4), TOBN(0x44e7dc04, 0x95649aee), TOBN(0x498ca255, 0x5e7ec1d8)}}, {{TOBN(0x3bc766ea, 0xaaa07e86), TOBN(0x0db6facb, 0xf3608586), TOBN(0xbadd2549, 0xbdc259c8), TOBN(0x95af3c6e, 0x041c649f)}, {TOBN(0xb36a928c, 0x02e30afb), TOBN(0x9b5356ad, 0x008a88b8), TOBN(0x4b67a5f1, 0xcf1d9e9d), TOBN(0xc6542e47, 0xa5d8d8ce)}}, {{TOBN(0x73061fe8, 0x7adfb6cc), TOBN(0xcc826fd3, 0x98678141), TOBN(0x00e758b1, 0x3c80515a), TOBN(0x6afe3247, 0x41485083)}, {TOBN(0x0fcb08b9, 0xb6ae8a75), TOBN(0xb8cf388d, 0x4acf51e1), TOBN(0x344a5560, 0x6961b9d6), TOBN(0x1a6778b8, 0x6a97fd0c)}}, {{TOBN(0xd840fdc1, 0xecc4c7e3), TOBN(0xde9fe47d, 0x16db68cc), TOBN(0xe95f89de, 0xa3e216aa), TOBN(0x84f1a6a4, 0x9594a8be)}, {TOBN(0x7ddc7d72, 0x5a7b162b), TOBN(0xc5cfda19, 0xadc817a3), TOBN(0x80a5d350, 0x78b58d46), TOBN(0x93365b13, 0x82978f19)}}, {{TOBN(0x2e44d225, 0x26a1fc90), TOBN(0x0d6d10d2, 0x4d70705d), TOBN(0xd94b6b10, 0xd70c45f4), TOBN(0x0f201022, 0xb216c079)}, {TOBN(0xcec966c5, 0x658fde41), TOBN(0xa8d2bc7d, 0x7e27601d), TOBN(0xbfcce3e1, 0xff230be7), TOBN(0x3394ff6b, 0x0033ffb5)}}, {{TOBN(0xd890c509, 0x8132c9af), TOBN(0xaac4b0eb, 0x361e7868), TOBN(0x5194ded3, 0xe82d15aa), TOBN(0x4550bd2e, 0x23ae6b7d)}, {TOBN(0x3fda318e, 0xea5399d4), TOBN(0xd989bffa, 0x91638b80), TOBN(0x5ea124d0, 0xa14aa12d), TOBN(0x1fb1b899, 0x3667b944)}}, {{TOBN(0x95ec7969, 0x44c44d6a), TOBN(0x91df144a, 0x57e86137), TOBN(0x915fd620, 0x73adac44), TOBN(0x8f01732d, 0x59a83801)}, {TOBN(0xec579d25, 0x3aa0a633), TOBN(0x06de5e7c, 0xc9d6d59c), TOBN(0xc132f958, 0xb1ef8010), TOBN(0x29476f96, 0xe65c1a02)}}, {{TOBN(0x336a77c0, 0xd34c3565), TOBN(0xef1105b2, 0x1b9f1e9e), TOBN(0x63e6d08b, 0xf9e08002), TOBN(0x9aff2f21, 0xc613809e)}, {TOBN(0xb5754f85, 0x3a80e75d), TOBN(0xde71853e, 0x6bbda681), TOBN(0x86f041df, 0x8197fd7a), TOBN(0x8b332e08, 0x127817fa)}}, {{TOBN(0x05d99be8, 0xb9c20cda), TOBN(0x89f7aad5, 0xd5cd0c98), TOBN(0x7ef936fe, 0x5bb94183), TOBN(0x92ca0753, 0xb05cd7f2)}, {TOBN(0x9d65db11, 0x74a1e035), TOBN(0x02628cc8, 0x13eaea92), TOBN(0xf2d9e242, 0x49e4fbf2), TOBN(0x94fdfd9b, 0xe384f8b7)}}, {{TOBN(0x65f56054, 0x63428c6b), TOBN(0x2f7205b2, 0x90b409a5), TOBN(0xf778bb78, 0xff45ae11), TOBN(0xa13045be, 0xc5ee53b2)}, {TOBN(0xe00a14ff, 0x03ef77fe), TOBN(0x689cd59f, 0xffef8bef), TOBN(0x3578f0ed, 0x1e9ade22), TOBN(0xe99f3ec0, 0x6268b6a8)}}, {{TOBN(0xa2057d91, 0xea1b3c3e), TOBN(0x2d1a7053, 0xb8823a4a), TOBN(0xabbb336a, 0x2cca451e), TOBN(0xcd2466e3, 0x2218bb5d)}, {TOBN(0x3ac1f42f, 0xc8cb762d), TOBN(0x7e312aae, 0x7690211f), TOBN(0xebb9bd73, 0x45d07450), TOBN(0x207c4b82, 0x46c2213f)}}, {{TOBN(0x99d425c1, 0x375913ec), TOBN(0x94e45e96, 0x67908220), TOBN(0xc08f3087, 0xcd67dbf6), TOBN(0xa5670fbe, 0xc0887056)}, {TOBN(0x6717b64a, 0x66f5b8fc), TOBN(0xd5a56aea, 0x786fec28), TOBN(0xa8c3f55f, 0xc0ff4952), TOBN(0xa77fefae, 0x457ac49b)}}, {{TOBN(0x29882d7c, 0x98379d44), TOBN(0xd000bdfb, 0x509edc8a), TOBN(0xc6f95979, 0xe66fe464), TOBN(0x504a6115, 0xfa61bde0)}, {TOBN(0x56b3b871, 0xeffea31a), TOBN(0x2d3de26d, 0xf0c21a54), TOBN(0x21dbff31, 0x834753bf), TOBN(0xe67ecf49, 0x69269d86)}}, {{TOBN(0x7a176952, 0x151fe690), TOBN(0x03515804, 0x7f2adb5f), TOBN(0xee794b15, 0xd1b62a8d), TOBN(0xf004ceec, 0xaae454e6)}, {TOBN(0x0897ea7c, 0xf0386fac), TOBN(0x3b62ff12, 0xd1fca751), TOBN(0x154181df, 0x1b7a04ec), TOBN(0x2008e04a, 0xfb5847ec)}}, {{TOBN(0xd147148e, 0x41dbd772), TOBN(0x2b419f73, 0x22942654), TOBN(0x669f30d3, 0xe9c544f7), TOBN(0x52a2c223, 0xc8540149)}, {TOBN(0x5da9ee14, 0x634dfb02), TOBN(0x5f074ff0, 0xf47869f3), TOBN(0x74ee878d, 0xa3933acc), TOBN(0xe6510651, 0x4fe35ed1)}}, {{TOBN(0xb3eb9482, 0xf1012e7a), TOBN(0x51013cc0, 0xa8a566ae), TOBN(0xdd5e9243, 0x47c00d3b), TOBN(0x7fde089d, 0x946bb0e5)}, {TOBN(0x030754fe, 0xc731b4b3), TOBN(0x12a136a4, 0x99fda062), TOBN(0x7c1064b8, 0x5a1a35bc), TOBN(0xbf1f5763, 0x446c84ef)}}, {{TOBN(0xed29a56d, 0xa16d4b34), TOBN(0x7fba9d09, 0xdca21c4f), TOBN(0x66d7ac00, 0x6d8de486), TOBN(0x60061987, 0x73a2a5e1)}, {TOBN(0x8b400f86, 0x9da28ff0), TOBN(0x3133f708, 0x43c4599c), TOBN(0x9911c9b8, 0xee28cb0d), TOBN(0xcd7e2874, 0x8e0af61d)}}, {{TOBN(0x5a85f0f2, 0x72ed91fc), TOBN(0x85214f31, 0x9cd4a373), TOBN(0x881fe5be, 0x1925253c), TOBN(0xd8dc98e0, 0x91e8bc76)}, {TOBN(0x7120affe, 0x585cc3a2), TOBN(0x724952ed, 0x735bf97a), TOBN(0x5581e7dc, 0x3eb34581), TOBN(0x5cbff4f2, 0xe52ee57d)}}, {{TOBN(0x8d320a0e, 0x87d8cc7b), TOBN(0x9beaa7f3, 0xf1d280d0), TOBN(0x7a0b9571, 0x9beec704), TOBN(0x9126332e, 0x5b7f0057)}, {TOBN(0x01fbc1b4, 0x8ed3bd6d), TOBN(0x35bb2c12, 0xd945eb24), TOBN(0x6404694e, 0x9a8ae255), TOBN(0xb6092eec, 0x8d6abfb3)}}, {{TOBN(0x4d76143f, 0xcc058865), TOBN(0x7b0a5af2, 0x6e249922), TOBN(0x8aef9440, 0x6a50d353), TOBN(0xe11e4bcc, 0x64f0e07a)}, {TOBN(0x4472993a, 0xa14a90fa), TOBN(0x7706e20c, 0xba0c51d4), TOBN(0xf403292f, 0x1532672d), TOBN(0x52573bfa, 0x21829382)}}, {{TOBN(0x6a7bb6a9, 0x3b5bdb83), TOBN(0x08da65c0, 0xa4a72318), TOBN(0xc58d22aa, 0x63eb065f), TOBN(0x1717596c, 0x1b15d685)}, {TOBN(0x112df0d0, 0xb266d88b), TOBN(0xf688ae97, 0x5941945a), TOBN(0x487386e3, 0x7c292cac), TOBN(0x42f3b50d, 0x57d6985c)}}, {{TOBN(0x6da4f998, 0x6a90fc34), TOBN(0xc8f257d3, 0x65ca8a8d), TOBN(0xc2feabca, 0x6951f762), TOBN(0xe1bc81d0, 0x74c323ac)}, {TOBN(0x1bc68f67, 0x251a2a12), TOBN(0x10d86587, 0xbe8a70dc), TOBN(0xd648af7f, 0xf0f84d2e), TOBN(0xf0aa9ebc, 0x6a43ac92)}}, {{TOBN(0x69e3be04, 0x27596893), TOBN(0xb6bb02a6, 0x45bf452b), TOBN(0x0875c11a, 0xf4c698c8), TOBN(0x6652b5c7, 0xbece3794)}, {TOBN(0x7b3755fd, 0x4f5c0499), TOBN(0x6ea16558, 0xb5532b38), TOBN(0xd1c69889, 0xa2e96ef7), TOBN(0x9c773c3a, 0x61ed8f48)}}, {{TOBN(0x2b653a40, 0x9b323abc), TOBN(0xe26605e1, 0xf0e1d791), TOBN(0x45d41064, 0x4a87157a), TOBN(0x8f9a78b7, 0xcbbce616)}, {TOBN(0xcf1e44aa, 0xc407eddd), TOBN(0x81ddd1d8, 0xa35b964f), TOBN(0x473e339e, 0xfd083999), TOBN(0x6c94bdde, 0x8e796802)}}, {{TOBN(0x5a304ada, 0x8545d185), TOBN(0x82ae44ea, 0x738bb8cb), TOBN(0x628a35e3, 0xdf87e10e), TOBN(0xd3624f3d, 0xa15b9fe3)}, {TOBN(0xcc44209b, 0x14be4254), TOBN(0x7d0efcbc, 0xbdbc2ea5), TOBN(0x1f603362, 0x04c37bbe), TOBN(0x21f363f5, 0x56a5852c)}}, {{TOBN(0xa1503d1c, 0xa8501550), TOBN(0x2251e0e1, 0xd8ab10bb), TOBN(0xde129c96, 0x6961c51c), TOBN(0x1f7246a4, 0x81910f68)}, {TOBN(0x2eb744ee, 0x5f2591f2), TOBN(0x3c47d33f, 0x5e627157), TOBN(0x4d6d62c9, 0x22f3bd68), TOBN(0x6120a64b, 0xcb8df856)}}, {{TOBN(0x3a9ac6c0, 0x7b5d07df), TOBN(0xa92b9558, 0x7ef39783), TOBN(0xe128a134, 0xab3a9b4f), TOBN(0x41c18807, 0xb1252f05)}, {TOBN(0xfc7ed089, 0x80ba9b1c), TOBN(0xac8dc6de, 0xc532a9dd), TOBN(0xbf829cef, 0x55246809), TOBN(0x101b784f, 0x5b4ee80f)}}, {{TOBN(0xc09945bb, 0xb6f11603), TOBN(0x57b09dbe, 0x41d2801e), TOBN(0xfba5202f, 0xa97534a8), TOBN(0x7fd8ae5f, 0xc17b9614)}, {TOBN(0xa50ba666, 0x78308435), TOBN(0x9572f77c, 0xd3868c4d), TOBN(0x0cef7bfd, 0x2dd7aab0), TOBN(0xe7958e08, 0x2c7c79ff)}}, {{TOBN(0x81262e42, 0x25346689), TOBN(0x716da290, 0xb07c7004), TOBN(0x35f911ea, 0xb7950ee3), TOBN(0x6fd72969, 0x261d21b5)}, {TOBN(0x52389803, 0x08b640d3), TOBN(0x5b0026ee, 0x887f12a1), TOBN(0x20e21660, 0x742e9311), TOBN(0x0ef6d541, 0x5ff77ff7)}}, {{TOBN(0x969127f0, 0xf9c41135), TOBN(0xf21d60c9, 0x68a64993), TOBN(0x656e5d0c, 0xe541875c), TOBN(0xf1e0f84e, 0xa1d3c233)}, {TOBN(0x9bcca359, 0x06002d60), TOBN(0xbe2da60c, 0x06191552), TOBN(0x5da8bbae, 0x61181ec3), TOBN(0x9f04b823, 0x65806f19)}}, {{TOBN(0xf1604a7d, 0xd4b79bb8), TOBN(0xaee806fb, 0x52c878c8), TOBN(0x34144f11, 0x8d47b8e8), TOBN(0x72edf52b, 0x949f9054)}, {TOBN(0xebfca84e, 0x2127015a), TOBN(0x9051d0c0, 0x9cb7cef3), TOBN(0x86e8fe58, 0x296deec8), TOBN(0x33b28188, 0x41010d74)}}}, {{{TOBN(0x01079383, 0x171b445f), TOBN(0x9bcf21e3, 0x8131ad4c), TOBN(0x8cdfe205, 0xc93987e8), TOBN(0xe63f4152, 0xc92e8c8f)}, {TOBN(0x729462a9, 0x30add43d), TOBN(0x62ebb143, 0xc980f05a), TOBN(0x4f3954e5, 0x3b06e968), TOBN(0xfe1d75ad, 0x242cf6b1)}}, {{TOBN(0x5f95c6c7, 0xaf8685c8), TOBN(0xd4c1c8ce, 0x2f8f01aa), TOBN(0xc44bbe32, 0x2574692a), TOBN(0xb8003478, 0xd4a4a068)}, {TOBN(0x7c8fc6e5, 0x2eca3cdb), TOBN(0xea1db16b, 0xec04d399), TOBN(0xb05bc82e, 0x8f2bc5cf), TOBN(0x763d517f, 0xf44793d2)}}, {{TOBN(0x4451c1b8, 0x08bd98d0), TOBN(0x644b1cd4, 0x6575f240), TOBN(0x6907eb33, 0x7375d270), TOBN(0x56c8bebd, 0xfa2286bd)}, {TOBN(0xc713d2ac, 0xc4632b46), TOBN(0x17da427a, 0xafd60242), TOBN(0x313065b7, 0xc95c7546), TOBN(0xf8239898, 0xbf17a3de)}}, {{TOBN(0xf3b7963f, 0x4c830320), TOBN(0x842c7aa0, 0x903203e3), TOBN(0xaf22ca0a, 0xe7327afb), TOBN(0x38e13092, 0x967609b6)}, {TOBN(0x73b8fb62, 0x757558f1), TOBN(0x3cc3e831, 0xf7eca8c1), TOBN(0xe4174474, 0xf6331627), TOBN(0xa77989ca, 0xc3c40234)}}, {{TOBN(0xe5fd17a1, 0x44a081e0), TOBN(0xd797fb7d, 0xb70e296a), TOBN(0x2b472b30, 0x481f719c), TOBN(0x0e632a98, 0xfe6f8c52)}, {TOBN(0x89ccd116, 0xc5f0c284), TOBN(0xf51088af, 0x2d987c62), TOBN(0x2a2bccda, 0x4c2de6cf), TOBN(0x810f9efe, 0xf679f0f9)}}, {{TOBN(0xb0f394b9, 0x7ffe4b3e), TOBN(0x0b691d21, 0xe5fa5d21), TOBN(0xb0bd7747, 0x9dfbbc75), TOBN(0xd2830fda, 0xfaf78b00)}, {TOBN(0xf78c249c, 0x52434f57), TOBN(0x4b1f7545, 0x98096dab), TOBN(0x73bf6f94, 0x8ff8c0b3), TOBN(0x34aef03d, 0x454e134c)}}, {{TOBN(0xf8d151f4, 0xb7ac7ec5), TOBN(0xd6ceb95a, 0xe50da7d5), TOBN(0xa1b492b0, 0xdc3a0eb8), TOBN(0x75157b69, 0xb3dd2863)}, {TOBN(0xe2c4c74e, 0xc5413d62), TOBN(0xbe329ff7, 0xbc5fc4c7), TOBN(0x835a2aea, 0x60fa9dda), TOBN(0xf117f5ad, 0x7445cb87)}}, {{TOBN(0xae8317f4, 0xb0166f7a), TOBN(0xfbd3e3f7, 0xceec74e6), TOBN(0xfdb516ac, 0xe0874bfd), TOBN(0x3d846019, 0xc681f3a3)}, {TOBN(0x0b12ee5c, 0x7c1620b0), TOBN(0xba68b4dd, 0x2b63c501), TOBN(0xac03cd32, 0x6668c51e), TOBN(0x2a6279f7, 0x4e0bcb5b)}}, {{TOBN(0x17bd69b0, 0x6ae85c10), TOBN(0x72946979, 0x1dfdd3a6), TOBN(0xd9a03268, 0x2c078bec), TOBN(0x41c6a658, 0xbfd68a52)}, {TOBN(0xcdea1024, 0x0e023900), TOBN(0xbaeec121, 0xb10d144d), TOBN(0x5a600e74, 0x058ab8dc), TOBN(0x1333af21, 0xbb89ccdd)}}, {{TOBN(0xdf25eae0, 0x3aaba1f1), TOBN(0x2cada16e, 0x3b7144cf), TOBN(0x657ee27d, 0x71ab98bc), TOBN(0x99088b4c, 0x7a6fc96e)}, {TOBN(0x05d5c0a0, 0x3549dbd4), TOBN(0x42cbdf8f, 0xf158c3ac), TOBN(0x3fb6b3b0, 0x87edd685), TOBN(0x22071cf6, 0x86f064d0)}}, {{TOBN(0xd2d6721f, 0xff2811e5), TOBN(0xdb81b703, 0xfe7fae8c), TOBN(0x3cfb74ef, 0xd3f1f7bb), TOBN(0x0cdbcd76, 0x16cdeb5d)}, {TOBN(0x4f39642a, 0x566a808c), TOBN(0x02b74454, 0x340064d6), TOBN(0xfabbadca, 0x0528fa6f), TOBN(0xe4c3074c, 0xd3fc0bb6)}}, {{TOBN(0xb32cb8b0, 0xb796d219), TOBN(0xc3e95f4f, 0x34741dd9), TOBN(0x87212125, 0x68edf6f5), TOBN(0x7a03aee4, 0xa2b9cb8e)}, {TOBN(0x0cd3c376, 0xf53a89aa), TOBN(0x0d8af9b1, 0x948a28dc), TOBN(0xcf86a3f4, 0x902ab04f), TOBN(0x8aacb62a, 0x7f42002d)}}, {{TOBN(0x106985eb, 0xf62ffd52), TOBN(0xe670b54e, 0x5797bf10), TOBN(0x4b405209, 0xc5e30aef), TOBN(0x12c97a20, 0x4365b5e9)}, {TOBN(0x104646ce, 0x1fe32093), TOBN(0x13cb4ff6, 0x3907a8c9), TOBN(0x8b9f30d1, 0xd46e726b), TOBN(0xe1985e21, 0xaba0f499)}}, {{TOBN(0xc573dea9, 0x10a230cd), TOBN(0x24f46a93, 0xcd30f947), TOBN(0xf2623fcf, 0xabe2010a), TOBN(0x3f278cb2, 0x73f00e4f)}, {TOBN(0xed55c67d, 0x50b920eb), TOBN(0xf1cb9a2d, 0x8e760571), TOBN(0x7c50d109, 0x0895b709), TOBN(0x4207cf07, 0x190d4369)}}, {{TOBN(0x3b027e81, 0xc4127fe1), TOBN(0xa9f8b9ad, 0x3ae9c566), TOBN(0x5ab10851, 0xacbfbba5), TOBN(0xa747d648, 0x569556f5)}, {TOBN(0xcc172b5c, 0x2ba97bf7), TOBN(0x15e0f77d, 0xbcfa3324), TOBN(0xa345b797, 0x7686279d), TOBN(0x5a723480, 0xe38003d3)}}, {{TOBN(0xfd8e139f, 0x8f5fcda8), TOBN(0xf3e558c4, 0xbdee5bfd), TOBN(0xd76cbaf4, 0xe33f9f77), TOBN(0x3a4c97a4, 0x71771969)}, {TOBN(0xda27e84b, 0xf6dce6a7), TOBN(0xff373d96, 0x13e6c2d1), TOBN(0xf115193c, 0xd759a6e9), TOBN(0x3f9b7025, 0x63d2262c)}}, {{TOBN(0xd9764a31, 0x317cd062), TOBN(0x30779d8e, 0x199f8332), TOBN(0xd8074106, 0x16b11b0b), TOBN(0x7917ab9f, 0x78aeaed8)}, {TOBN(0xb67a9cbe, 0x28fb1d8e), TOBN(0x2e313563, 0x136eda33), TOBN(0x010b7069, 0xa371a86c), TOBN(0x44d90fa2, 0x6744e6b7)}}, {{TOBN(0x68190867, 0xd6b3e243), TOBN(0x9fe6cd9d, 0x59048c48), TOBN(0xb900b028, 0x95731538), TOBN(0xa012062f, 0x32cae04f)}, {TOBN(0x8107c8bc, 0x9399d082), TOBN(0x47e8c54a, 0x41df12e2), TOBN(0x14ba5117, 0xb6ef3f73), TOBN(0x22260bea, 0x81362f0b)}}, {{TOBN(0x90ea261e, 0x1a18cc20), TOBN(0x2192999f, 0x2321d636), TOBN(0xef64d314, 0xe311b6a0), TOBN(0xd7401e4c, 0x3b54a1f5)}, {TOBN(0x19019983, 0x6fbca2ba), TOBN(0x46ad3293, 0x8fbffc4b), TOBN(0xa142d3f6, 0x3786bf40), TOBN(0xeb5cbc26, 0xb67039fc)}}, {{TOBN(0x9cb0ae6c, 0x252bd479), TOBN(0x05e0f88a, 0x12b5848f), TOBN(0x78f6d2b2, 0xa5c97663), TOBN(0x6f6e149b, 0xc162225c)}, {TOBN(0xe602235c, 0xde601a89), TOBN(0xd17bbe98, 0xf373be1f), TOBN(0xcaf49a5b, 0xa8471827), TOBN(0x7e1a0a85, 0x18aaa116)}}, {{TOBN(0x6c833196, 0x270580c3), TOBN(0x1e233839, 0xf1c98a14), TOBN(0x67b2f7b4, 0xae34e0a5), TOBN(0x47ac8745, 0xd8ce7289)}, {TOBN(0x2b74779a, 0x100dd467), TOBN(0x274a4337, 0x4ee50d09), TOBN(0x603dcf13, 0x83608bc9), TOBN(0xcd9da6c3, 0xc89e8388)}}, {{TOBN(0x2660199f, 0x355116ac), TOBN(0xcc38bb59, 0xb6d18eed), TOBN(0x3075f31f, 0x2f4bc071), TOBN(0x9774457f, 0x265dc57e)}, {TOBN(0x06a6a9c8, 0xc6db88bb), TOBN(0x6429d07f, 0x4ec98e04), TOBN(0x8d05e57b, 0x05ecaa8b), TOBN(0x20f140b1, 0x7872ea7b)}}, {{TOBN(0xdf8c0f09, 0xca494693), TOBN(0x48d3a020, 0xf252e909), TOBN(0x4c5c29af, 0x57b14b12), TOBN(0x7e6fa37d, 0xbf47ad1c)}, {TOBN(0x66e7b506, 0x49a0c938), TOBN(0xb72c0d48, 0x6be5f41f), TOBN(0x6a6242b8, 0xb2359412), TOBN(0xcd35c774, 0x8e859480)}}, {{TOBN(0x12536fea, 0x87baa627), TOBN(0x58c1fec1, 0xf72aa680), TOBN(0x6c29b637, 0x601e5dc9), TOBN(0x9e3c3c1c, 0xde9e01b9)}, {TOBN(0xefc8127b, 0x2bcfe0b0), TOBN(0x35107102, 0x2a12f50d), TOBN(0x6ccd6cb1, 0x4879b397), TOBN(0xf792f804, 0xf8a82f21)}}, {{TOBN(0x509d4804, 0xa9b46402), TOBN(0xedddf85d, 0xc10f0850), TOBN(0x928410dc, 0x4b6208aa), TOBN(0xf6229c46, 0x391012dc)}, {TOBN(0xc5a7c41e, 0x7727b9b6), TOBN(0x289e4e4b, 0xaa444842), TOBN(0x049ba1d9, 0xe9a947ea), TOBN(0x44f9e47f, 0x83c8debc)}}, {{TOBN(0xfa77a1fe, 0x611f8b8e), TOBN(0xfd2e416a, 0xf518f427), TOBN(0xc5fffa70, 0x114ebac3), TOBN(0xfe57c4e9, 0x5d89697b)}, {TOBN(0xfdd053ac, 0xb1aaf613), TOBN(0x31df210f, 0xea585a45), TOBN(0x318cc10e, 0x24985034), TOBN(0x1a38efd1, 0x5f1d6130)}}, {{TOBN(0xbf86f237, 0x0b1e9e21), TOBN(0xb258514d, 0x1dbe88aa), TOBN(0x1e38a588, 0x90c1baf9), TOBN(0x2936a01e, 0xbdb9b692)}, {TOBN(0xd576de98, 0x6dd5b20c), TOBN(0xb586bf71, 0x70f98ecf), TOBN(0xcccf0f12, 0xc42d2fd7), TOBN(0x8717e61c, 0xfb35bd7b)}}, {{TOBN(0x8b1e5722, 0x35e6fc06), TOBN(0x3477728f, 0x0b3e13d5), TOBN(0x150c294d, 0xaa8a7372), TOBN(0xc0291d43, 0x3bfa528a)}, {TOBN(0xc6c8bc67, 0xcec5a196), TOBN(0xdeeb31e4, 0x5c2e8a7c), TOBN(0xba93e244, 0xfb6e1c51), TOBN(0xb9f8b71b, 0x2e28e156)}}, {{TOBN(0xce65a287, 0x968a2ab9), TOBN(0xe3c5ce69, 0x46bbcb1f), TOBN(0xf8c835b9, 0xe7ae3f30), TOBN(0x16bbee26, 0xff72b82b)}, {TOBN(0x665e2017, 0xfd42cd22), TOBN(0x1e139970, 0xf8b1d2a0), TOBN(0x125cda29, 0x79204932), TOBN(0x7aee94a5, 0x49c3bee5)}}, {{TOBN(0x68c70160, 0x89821a66), TOBN(0xf7c37678, 0x8f981669), TOBN(0xd90829fc, 0x48cc3645), TOBN(0x346af049, 0xd70addfc)}, {TOBN(0x2057b232, 0x370bf29c), TOBN(0xf90c73ce, 0x42e650ee), TOBN(0xe03386ea, 0xa126ab90), TOBN(0x0e266e7e, 0x975a087b)}}, {{TOBN(0x80578eb9, 0x0fca65d9), TOBN(0x7e2989ea, 0x16af45b8), TOBN(0x7438212d, 0xcac75a4e), TOBN(0x38c7ca39, 0x4fef36b8)}, {TOBN(0x8650c494, 0xd402676a), TOBN(0x26ab5a66, 0xf72c7c48), TOBN(0x4e6cb426, 0xce3a464e), TOBN(0xf8f99896, 0x2b72f841)}}, {{TOBN(0x8c318491, 0x1a335cc8), TOBN(0x563459ba, 0x6a5913e4), TOBN(0x1b920d61, 0xc7b32919), TOBN(0x805ab8b6, 0xa02425ad)}, {TOBN(0x2ac512da, 0x8d006086), TOBN(0x6ca4846a, 0xbcf5c0fd), TOBN(0xafea51d8, 0xac2138d7), TOBN(0xcb647545, 0x344cd443)}}, {{TOBN(0x0429ee8f, 0xbd7d9040), TOBN(0xee66a2de, 0x819b9c96), TOBN(0x54f9ec25, 0xdea7d744), TOBN(0x2ffea642, 0x671721bb)}, {TOBN(0x4f19dbd1, 0x114344ea), TOBN(0x04304536, 0xfd0dbc8b), TOBN(0x014b50aa, 0x29ec7f91), TOBN(0xb5fc22fe, 0xbb06014d)}}, {{TOBN(0x60d963a9, 0x1ee682e0), TOBN(0xdf48abc0, 0xfe85c727), TOBN(0x0cadba13, 0x2e707c2d), TOBN(0xde608d3a, 0xa645aeff)}, {TOBN(0x05f1c28b, 0xedafd883), TOBN(0x3c362ede, 0xbd94de1f), TOBN(0x8dd0629d, 0x13593e41), TOBN(0x0a5e736f, 0x766d6eaf)}}, {{TOBN(0xbfa92311, 0xf68cf9d1), TOBN(0xa4f9ef87, 0xc1797556), TOBN(0x10d75a1f, 0x5601c209), TOBN(0x651c374c, 0x09b07361)}, {TOBN(0x49950b58, 0x88b5cead), TOBN(0x0ef00058, 0x6fa9dbaa), TOBN(0xf51ddc26, 0x4e15f33a), TOBN(0x1f8b5ca6, 0x2ef46140)}}, {{TOBN(0x343ac0a3, 0xee9523f0), TOBN(0xbb75eab2, 0x975ea978), TOBN(0x1bccf332, 0x107387f4), TOBN(0x790f9259, 0x9ab0062e)}, {TOBN(0xf1a363ad, 0x1e4f6a5f), TOBN(0x06e08b84, 0x62519a50), TOBN(0x60915187, 0x7265f1ee), TOBN(0x6a80ca34, 0x93ae985e)}}, {{TOBN(0x81b29768, 0xaaba4864), TOBN(0xb13cabf2, 0x8d52a7d6), TOBN(0xb5c36348, 0x8ead03f1), TOBN(0xc932ad95, 0x81c7c1c0)}, {TOBN(0x5452708e, 0xcae1e27b), TOBN(0x9dac4269, 0x1b0df648), TOBN(0x233e3f0c, 0xdfcdb8bc), TOBN(0xe6ceccdf, 0xec540174)}}, {{TOBN(0xbd0d845e, 0x95081181), TOBN(0xcc8a7920, 0x699355d5), TOBN(0x111c0f6d, 0xc3b375a8), TOBN(0xfd95bc6b, 0xfd51e0dc)}, {TOBN(0x4a106a26, 0x6888523a), TOBN(0x4d142bd6, 0xcb01a06d), TOBN(0x79bfd289, 0xadb9b397), TOBN(0x0bdbfb94, 0xe9863914)}}, {{TOBN(0x29d8a229, 0x1660f6a6), TOBN(0x7f6abcd6, 0x551c042d), TOBN(0x13039deb, 0x0ac3ffe8), TOBN(0xa01be628, 0xec8523fb)}, {TOBN(0x6ea34103, 0x0ca1c328), TOBN(0xc74114bd, 0xb903928e), TOBN(0x8aa4ff4e, 0x9e9144b0), TOBN(0x7064091f, 0x7f9a4b17)}}, {{TOBN(0xa3f4f521, 0xe447f2c4), TOBN(0x81b8da7a, 0x604291f0), TOBN(0xd680bc46, 0x7d5926de), TOBN(0x84f21fd5, 0x34a1202f)}, {TOBN(0x1d1e3181, 0x4e9df3d8), TOBN(0x1ca4861a, 0x39ab8d34), TOBN(0x809ddeec, 0x5b19aa4a), TOBN(0x59f72f7e, 0x4d329366)}}, {{TOBN(0xa2f93f41, 0x386d5087), TOBN(0x40bf739c, 0xdd67d64f), TOBN(0xb4494205, 0x66702158), TOBN(0xc33c65be, 0x73b1e178)}, {TOBN(0xcdcd657c, 0x38ca6153), TOBN(0x97f4519a, 0xdc791976), TOBN(0xcc7c7f29, 0xcd6e1f39), TOBN(0x38de9cfb, 0x7e3c3932)}}, {{TOBN(0xe448eba3, 0x7b793f85), TOBN(0xe9f8dbf9, 0xf067e914), TOBN(0xc0390266, 0xf114ae87), TOBN(0x39ed75a7, 0xcd6a8e2a)}, {TOBN(0xadb14848, 0x7ffba390), TOBN(0x67f8cb8b, 0x6af9bc09), TOBN(0x322c3848, 0x9c7476db), TOBN(0xa320fecf, 0x52a538d6)}}, {{TOBN(0xe0493002, 0xb2aced2b), TOBN(0xdfba1809, 0x616bd430), TOBN(0x531c4644, 0xc331be70), TOBN(0xbc04d32e, 0x90d2e450)}, {TOBN(0x1805a0d1, 0x0f9f142d), TOBN(0x2c44a0c5, 0x47ee5a23), TOBN(0x31875a43, 0x3989b4e3), TOBN(0x6b1949fd, 0x0c063481)}}, {{TOBN(0x2dfb9e08, 0xbe0f4492), TOBN(0x3ff0da03, 0xe9d5e517), TOBN(0x03dbe9a1, 0xf79466a8), TOBN(0x0b87bcd0, 0x15ea9932)}, {TOBN(0xeb64fc83, 0xab1f58ab), TOBN(0x6d9598da, 0x817edc8a), TOBN(0x699cff66, 0x1d3b67e5), TOBN(0x645c0f29, 0x92635853)}}, {{TOBN(0x253cdd82, 0xeabaf21c), TOBN(0x82b9602a, 0x2241659e), TOBN(0x2cae07ec, 0x2d9f7091), TOBN(0xbe4c720c, 0x8b48cd9b)}, {TOBN(0x6ce5bc03, 0x6f08d6c9), TOBN(0x36e8a997, 0xaf10bf40), TOBN(0x83422d21, 0x3e10ff12), TOBN(0x7b26d3eb, 0xbcc12494)}}, {{TOBN(0xb240d2d0, 0xc9469ad6), TOBN(0xc4a11b4d, 0x30afa05b), TOBN(0x4b604ace, 0xdd6ba286), TOBN(0x18486600, 0x3ee2864c)}, {TOBN(0x5869d6ba, 0x8d9ce5be), TOBN(0x0d8f68c5, 0xff4bfb0d), TOBN(0xb69f210b, 0x5700cf73), TOBN(0x61f6653a, 0x6d37c135)}}, {{TOBN(0xff3d432b, 0x5aff5a48), TOBN(0x0d81c4b9, 0x72ba3a69), TOBN(0xee879ae9, 0xfa1899ef), TOBN(0xbac7e2a0, 0x2d6acafd)}, {TOBN(0xd6d93f6c, 0x1c664399), TOBN(0x4c288de1, 0x5bcb135d), TOBN(0x83031dab, 0x9dab7cbf), TOBN(0xfe23feb0, 0x3abbf5f0)}}, {{TOBN(0x9f1b2466, 0xcdedca85), TOBN(0x140bb710, 0x1a09538c), TOBN(0xac8ae851, 0x5e11115d), TOBN(0x0d63ff67, 0x6f03f59e)}, {TOBN(0x755e5551, 0x7d234afb), TOBN(0x61c2db4e, 0x7e208fc1), TOBN(0xaa9859ce, 0xf28a4b5d), TOBN(0xbdd6d4fc, 0x34af030f)}}, {{TOBN(0xd1c4a26d, 0x3be01cb1), TOBN(0x9ba14ffc, 0x243aa07c), TOBN(0xf95cd3a9, 0xb2503502), TOBN(0xe379bc06, 0x7d2a93ab)}, {TOBN(0x3efc18e9, 0xd4ca8d68), TOBN(0x083558ec, 0x80bb412a), TOBN(0xd903b940, 0x9645a968), TOBN(0xa499f0b6, 0x9ba6054f)}}, {{TOBN(0x208b573c, 0xb8349abe), TOBN(0x3baab3e5, 0x30b4fc1c), TOBN(0x87e978ba, 0xcb524990), TOBN(0x3524194e, 0xccdf0e80)}, {TOBN(0x62711725, 0x7d4bcc42), TOBN(0xe90a3d9b, 0xb90109ba), TOBN(0x3b1bdd57, 0x1323e1e0), TOBN(0xb78e9bd5, 0x5eae1599)}}, {{TOBN(0x0794b746, 0x9e03d278), TOBN(0x80178605, 0xd70e6297), TOBN(0x171792f8, 0x99c97855), TOBN(0x11b393ee, 0xf5a86b5c)}, {TOBN(0x48ef6582, 0xd8884f27), TOBN(0xbd44737a, 0xbf19ba5f), TOBN(0x8698de4c, 0xa42062c6), TOBN(0x8975eb80, 0x61ce9c54)}}, {{TOBN(0xd50e57c7, 0xd7fe71f3), TOBN(0x15342190, 0xbc97ce38), TOBN(0x51bda2de, 0x4df07b63), TOBN(0xba12aeae, 0x200eb87d)}, {TOBN(0xabe135d2, 0xa9b4f8f6), TOBN(0x04619d65, 0xfad6d99c), TOBN(0x4a6683a7, 0x7994937c), TOBN(0x7a778c8b, 0x6f94f09a)}}, {{TOBN(0x8c508623, 0x20a71b89), TOBN(0x241a2aed, 0x1c229165), TOBN(0x352be595, 0xaaf83a99), TOBN(0x9fbfee7f, 0x1562bac8)}, {TOBN(0xeaf658b9, 0x5c4017e3), TOBN(0x1dc7f9e0, 0x15120b86), TOBN(0xd84f13dd, 0x4c034d6f), TOBN(0x283dd737, 0xeaea3038)}}, {{TOBN(0x197f2609, 0xcd85d6a2), TOBN(0x6ebbc345, 0xfae60177), TOBN(0xb80f031b, 0x4e12fede), TOBN(0xde55d0c2, 0x07a2186b)}, {TOBN(0x1fb3e37f, 0x24dcdd5a), TOBN(0x8d602da5, 0x7ed191fb), TOBN(0x108fb056, 0x76023e0d), TOBN(0x70178c71, 0x459c20c0)}}, {{TOBN(0xfad5a386, 0x3fe54cf0), TOBN(0xa4a3ec4f, 0x02bbb475), TOBN(0x1aa5ec20, 0x919d94d7), TOBN(0x5d3b63b5, 0xa81e4ab3)}, {TOBN(0x7fa733d8, 0x5ad3d2af), TOBN(0xfbc586dd, 0xd1ac7a37), TOBN(0x282925de, 0x40779614), TOBN(0xfe0ffffb, 0xe74a242a)}}, {{TOBN(0x3f39e67f, 0x906151e5), TOBN(0xcea27f5f, 0x55e10649), TOBN(0xdca1d4e1, 0xc17cf7b7), TOBN(0x0c326d12, 0x2fe2362d)}, {TOBN(0x05f7ac33, 0x7dd35df3), TOBN(0x0c3b7639, 0xc396dbdf), TOBN(0x0912f5ac, 0x03b7db1c), TOBN(0x9dea4b70, 0x5c9ed4a9)}}, {{TOBN(0x475e6e53, 0xaae3f639), TOBN(0xfaba0e7c, 0xfc278bac), TOBN(0x16f9e221, 0x9490375f), TOBN(0xaebf9746, 0xa5a7ed0a)}, {TOBN(0x45f9af3f, 0xf41ad5d6), TOBN(0x03c4623c, 0xb2e99224), TOBN(0x82c5bb5c, 0xb3cf56aa), TOBN(0x64311819, 0x34567ed3)}}, {{TOBN(0xec57f211, 0x8be489ac), TOBN(0x2821895d, 0xb9a1104b), TOBN(0x610dc875, 0x6064e007), TOBN(0x8e526f3f, 0x5b20d0fe)}, {TOBN(0x6e71ca77, 0x5b645aee), TOBN(0x3d1dcb9f, 0x800e10ff), TOBN(0x36b51162, 0x189cf6de), TOBN(0x2c5a3e30, 0x6bb17353)}}, {{TOBN(0xc186cd3e, 0x2a6c6fbf), TOBN(0xa74516fa, 0x4bf97906), TOBN(0x5b4b8f4b, 0x279d6901), TOBN(0x0c4e57b4, 0x2b573743)}, {TOBN(0x75fdb229, 0xb6e386b6), TOBN(0xb46793fd, 0x99deac27), TOBN(0xeeec47ea, 0xcf712629), TOBN(0xe965f3c4, 0xcbc3b2dd)}}, {{TOBN(0x8dd1fb83, 0x425c6559), TOBN(0x7fc00ee6, 0x0af06fda), TOBN(0xe98c9225, 0x33d956df), TOBN(0x0f1ef335, 0x4fbdc8a2)}, {TOBN(0x2abb5145, 0xb79b8ea2), TOBN(0x40fd2945, 0xbdbff288), TOBN(0x6a814ac4, 0xd7185db7), TOBN(0xc4329d6f, 0xc084609a)}}, {{TOBN(0xc9ba7b52, 0xed1be45d), TOBN(0x891dd20d, 0xe4cd2c74), TOBN(0x5a4d4a7f, 0x824139b1), TOBN(0x66c17716, 0xb873c710)}, {TOBN(0x5e5bc141, 0x2843c4e0), TOBN(0xd5ac4817, 0xb97eb5bf), TOBN(0xc0f8af54, 0x450c95c7), TOBN(0xc91b3fa0, 0x318406c5)}}, {{TOBN(0x360c340a, 0xab9d97f8), TOBN(0xfb57bd07, 0x90a2d611), TOBN(0x4339ae3c, 0xa6a6f7e5), TOBN(0x9c1fcd2a, 0x2feb8a10)}, {TOBN(0x972bcca9, 0xc7ea7432), TOBN(0x1b0b924c, 0x308076f6), TOBN(0x80b2814a, 0x2a5b4ca5), TOBN(0x2f78f55b, 0x61ef3b29)}}, {{TOBN(0xf838744a, 0xc18a414f), TOBN(0xc611eaae, 0x903d0a86), TOBN(0x94dabc16, 0x2a453f55), TOBN(0xe6f2e3da, 0x14efb279)}, {TOBN(0x5b7a6017, 0x9320dc3c), TOBN(0x692e382f, 0x8df6b5a4), TOBN(0x3f5e15e0, 0x2d40fa90), TOBN(0xc87883ae, 0x643dd318)}}, {{TOBN(0x511053e4, 0x53544774), TOBN(0x834d0ecc, 0x3adba2bc), TOBN(0x4215d7f7, 0xbae371f5), TOBN(0xfcfd57bf, 0x6c8663bc)}, {TOBN(0xded2383d, 0xd6901b1d), TOBN(0x3b49fbb4, 0xb5587dc3), TOBN(0xfd44a08d, 0x07625f62), TOBN(0x3ee4d65b, 0x9de9b762)}}}, {{{TOBN(0x64e5137d, 0x0d63d1fa), TOBN(0x658fc052, 0x02a9d89f), TOBN(0x48894874, 0x50436309), TOBN(0xe9ae30f8, 0xd598da61)}, {TOBN(0x2ed710d1, 0x818baf91), TOBN(0xe27e9e06, 0x8b6a0c20), TOBN(0x1e28dcfb, 0x1c1a6b44), TOBN(0x883acb64, 0xd6ac57dc)}}, {{TOBN(0x8735728d, 0xc2c6ff70), TOBN(0x79d6122f, 0xc5dc2235), TOBN(0x23f5d003, 0x19e277f9), TOBN(0x7ee84e25, 0xdded8cc7)}, {TOBN(0x91a8afb0, 0x63cd880a), TOBN(0x3f3ea7c6, 0x3574af60), TOBN(0x0cfcdc84, 0x02de7f42), TOBN(0x62d0792f, 0xb31aa152)}}, {{TOBN(0x8e1b4e43, 0x8a5807ce), TOBN(0xad283893, 0xe4109a7e), TOBN(0xc30cc9cb, 0xafd59dda), TOBN(0xf65f36c6, 0x3d8d8093)}, {TOBN(0xdf31469e, 0xa60d32b2), TOBN(0xee93df4b, 0x3e8191c8), TOBN(0x9c1017c5, 0x355bdeb5), TOBN(0xd2623185, 0x8616aa28)}}, {{TOBN(0xb02c83f9, 0xdec31a21), TOBN(0x988c8b23, 0x6ad9d573), TOBN(0x53e983ae, 0xa57be365), TOBN(0xe968734d, 0x646f834e)}, {TOBN(0x9137ea8f, 0x5da6309b), TOBN(0x10f3a624, 0xc1f1ce16), TOBN(0x782a9ea2, 0xca440921), TOBN(0xdf94739e, 0x5b46f1b5)}}, {{TOBN(0x9f9be006, 0xcce85c9b), TOBN(0x360e70d6, 0xa4c7c2d3), TOBN(0x2cd5beea, 0xaefa1e60), TOBN(0x64cf63c0, 0x8c3d2b6d)}, {TOBN(0xfb107fa3, 0xe1cf6f90), TOBN(0xb7e937c6, 0xd5e044e6), TOBN(0x74e8ca78, 0xce34db9f), TOBN(0x4f8b36c1, 0x3e210bd0)}}, {{TOBN(0x1df165a4, 0x34a35ea8), TOBN(0x3418e0f7, 0x4d4412f6), TOBN(0x5af1f8af, 0x518836c3), TOBN(0x42ceef4d, 0x130e1965)}, {TOBN(0x5560ca0b, 0x543a1957), TOBN(0xc33761e5, 0x886cb123), TOBN(0x66624b1f, 0xfe98ed30), TOBN(0xf772f4bf, 0x1090997d)}}, {{TOBN(0xf4e540bb, 0x4885d410), TOBN(0x7287f810, 0x9ba5f8d7), TOBN(0x22d0d865, 0xde98dfb1), TOBN(0x49ff51a1, 0xbcfbb8a3)}, {TOBN(0xb6b6fa53, 0x6bc3012e), TOBN(0x3d31fd72, 0x170d541d), TOBN(0x8018724f, 0x4b0f4966), TOBN(0x79e7399f, 0x87dbde07)}}, {{TOBN(0x56f8410e, 0xf4f8b16a), TOBN(0x97241afe, 0xc47b266a), TOBN(0x0a406b8e, 0x6d9c87c1), TOBN(0x803f3e02, 0xcd42ab1b)}, {TOBN(0x7f0309a8, 0x04dbec69), TOBN(0xa83b85f7, 0x3bbad05f), TOBN(0xc6097273, 0xad8e197f), TOBN(0xc097440e, 0x5067adc1)}}, {{TOBN(0x730eafb6, 0x3524ff16), TOBN(0xd7f9b51e, 0x823fc6ce), TOBN(0x27bd0d32, 0x443e4ac0), TOBN(0x40c59ad9, 0x4d66f217)}, {TOBN(0x6c33136f, 0x17c387a4), TOBN(0x5043b8d5, 0xeb86804d), TOBN(0x74970312, 0x675a73c9), TOBN(0x838fdb31, 0xf16669b6)}}, {{TOBN(0xc507b6dd, 0x418e7ddd), TOBN(0x39888d93, 0x472f19d6), TOBN(0x7eae26be, 0x0c27eb4d), TOBN(0x17b53ed3, 0xfbabb884)}, {TOBN(0xfc27021b, 0x2b01ae4f), TOBN(0x88462e87, 0xcf488682), TOBN(0xbee096ec, 0x215e2d87), TOBN(0xeb2fea9a, 0xd242e29b)}}, {{TOBN(0x5d985b5f, 0xb821fc28), TOBN(0x89d2e197, 0xdc1e2ad2), TOBN(0x55b566b8, 0x9030ba62), TOBN(0xe3fd41b5, 0x4f41b1c6)}, {TOBN(0xb738ac2e, 0xb9a96d61), TOBN(0x7f8567ca, 0x369443f4), TOBN(0x8698622d, 0xf803a440), TOBN(0x2b586236, 0x8fe2f4dc)}}, {{TOBN(0xbbcc00c7, 0x56b95bce), TOBN(0x5ec03906, 0x616da680), TOBN(0x79162ee6, 0x72214252), TOBN(0x43132b63, 0x86a892d2)}, {TOBN(0x4bdd3ff2, 0x2f3263bf), TOBN(0xd5b3733c, 0x9cd0a142), TOBN(0x592eaa82, 0x44415ccb), TOBN(0x663e8924, 0x8d5474ea)}}, {{TOBN(0x8058a25e, 0x5236344e), TOBN(0x82e8df9d, 0xbda76ee6), TOBN(0xdcf6efd8, 0x11cc3d22), TOBN(0x00089cda, 0x3b4ab529)}, {TOBN(0x91d3a071, 0xbd38a3db), TOBN(0x4ea97fc0, 0xef72b925), TOBN(0x0c9fc15b, 0xea3edf75), TOBN(0x5a6297cd, 0xa4348ed3)}}, {{TOBN(0x0d38ab35, 0xce7c42d4), TOBN(0x9fd493ef, 0x82feab10), TOBN(0x46056b6d, 0x82111b45), TOBN(0xda11dae1, 0x73efc5c3)}, {TOBN(0xdc740278, 0x5545a7fb), TOBN(0xbdb2601c, 0x40d507e6), TOBN(0x121dfeeb, 0x7066fa58), TOBN(0x214369a8, 0x39ae8c2a)}}, {{TOBN(0x195709cb, 0x06e0956c), TOBN(0x4c9d254f, 0x010cd34b), TOBN(0xf51e13f7, 0x0471a532), TOBN(0xe19d6791, 0x1e73054d)}, {TOBN(0xf702a628, 0xdb5c7be3), TOBN(0xc7141218, 0xb24dde05), TOBN(0xdc18233c, 0xf29b2e2e), TOBN(0x3a6bd1e8, 0x85342dba)}}, {{TOBN(0x3f747fa0, 0xb311898c), TOBN(0xe2a272e4, 0xcd0eac65), TOBN(0x4bba5851, 0xf914d0bc), TOBN(0x7a1a9660, 0xc4a43ee3)}, {TOBN(0xe5a367ce, 0xa1c8cde9), TOBN(0x9d958ba9, 0x7271abe3), TOBN(0xf3ff7eb6, 0x3d1615cd), TOBN(0xa2280dce, 0xf5ae20b0)}}, {{TOBN(0x56dba5c1, 0xcf640147), TOBN(0xea5a2e3d, 0x5e83d118), TOBN(0x04cd6b6d, 0xda24c511), TOBN(0x1c0f4671, 0xe854d214)}, {TOBN(0x91a6b7a9, 0x69565381), TOBN(0xdc966240, 0xdecf1f5b), TOBN(0x1b22d21c, 0xfcf5d009), TOBN(0x2a05f641, 0x9021dbd5)}}, {{TOBN(0x8c0ed566, 0xd4312483), TOBN(0x5179a95d, 0x643e216f), TOBN(0xcc185fec, 0x17044493), TOBN(0xb3063339, 0x54991a21)}, {TOBN(0xd801ecdb, 0x0081a726), TOBN(0x0149b0c6, 0x4fa89bbb), TOBN(0xafe9065a, 0x4391b6b9), TOBN(0xedc92786, 0xd633f3a3)}}, {{TOBN(0xe408c24a, 0xae6a8e13), TOBN(0x85833fde, 0x9f3897ab), TOBN(0x43800e7e, 0xd81a0715), TOBN(0xde08e346, 0xb44ffc5f)}, {TOBN(0x7094184c, 0xcdeff2e0), TOBN(0x49f9387b, 0x165eaed1), TOBN(0x635d6129, 0x777c468a), TOBN(0x8c0dcfd1, 0x538c2dd8)}}, {{TOBN(0xd6d9d9e3, 0x7a6a308b), TOBN(0x62375830, 0x4c2767d3), TOBN(0x874a8bc6, 0xf38cbeb6), TOBN(0xd94d3f1a, 0xccb6fd9e)}, {TOBN(0x92a9735b, 0xba21f248), TOBN(0x272ad0e5, 0x6cd1efb0), TOBN(0x7437b69c, 0x05b03284), TOBN(0xe7f04702, 0x6948c225)}}, {{TOBN(0x8a56c04a, 0xcba2ecec), TOBN(0x0c181270, 0xe3a73e41), TOBN(0x6cb34e9d, 0x03e93725), TOBN(0xf77c8713, 0x496521a9)}, {TOBN(0x94569183, 0xfa7f9f90), TOBN(0xf2e7aa4c, 0x8c9707ad), TOBN(0xced2c9ba, 0x26c1c9a3), TOBN(0x9109fe96, 0x40197507)}}, {{TOBN(0x9ae868a9, 0xe9adfe1c), TOBN(0x3984403d, 0x314e39bb), TOBN(0xb5875720, 0xf2fe378f), TOBN(0x33f901e0, 0xba44a628)}, {TOBN(0xea1125fe, 0x3652438c), TOBN(0xae9ec4e6, 0x9dd1f20b), TOBN(0x1e740d9e, 0xbebf7fbd), TOBN(0x6dbd3ddc, 0x42dbe79c)}}, {{TOBN(0x62082aec, 0xedd36776), TOBN(0xf612c478, 0xe9859039), TOBN(0xa493b201, 0x032f7065), TOBN(0xebd4d8f2, 0x4ff9b211)}, {TOBN(0x3f23a0aa, 0xaac4cb32), TOBN(0xea3aadb7, 0x15ed4005), TOBN(0xacf17ea4, 0xafa27e63), TOBN(0x56125c1a, 0xc11fd66c)}}, {{TOBN(0x266344a4, 0x3794f8dc), TOBN(0xdcca923a, 0x483c5c36), TOBN(0x2d6b6bbf, 0x3f9d10a0), TOBN(0xb320c5ca, 0x81d9bdf3)}, {TOBN(0x620e28ff, 0x47b50a95), TOBN(0x933e3b01, 0xcef03371), TOBN(0xf081bf85, 0x99100153), TOBN(0x183be9a0, 0xc3a8c8d6)}}, {{TOBN(0x4e3ddc5a, 0xd6bbe24d), TOBN(0xc6c74630, 0x53843795), TOBN(0x78193dd7, 0x65ec2d4c), TOBN(0xb8df26cc, 0xcd3c89b2)}, {TOBN(0x98dbe399, 0x5a483f8d), TOBN(0x72d8a957, 0x7dd3313a), TOBN(0x65087294, 0xab0bd375), TOBN(0xfcd89248, 0x7c259d16)}}, {{TOBN(0x8a9443d7, 0x7613aa81), TOBN(0x80100800, 0x85fe6584), TOBN(0x70fc4dbc, 0x7fb10288), TOBN(0xf58280d3, 0xe86beee8)}, {TOBN(0x14fdd82f, 0x7c978c38), TOBN(0xdf1204c1, 0x0de44d7b), TOBN(0xa08a1c84, 0x4160252f), TOBN(0x591554ca, 0xc17646a5)}}, {{TOBN(0x214a37d6, 0xa05bd525), TOBN(0x48d5f09b, 0x07957b3c), TOBN(0x0247cdcb, 0xd7109bc9), TOBN(0x40f9e4bb, 0x30599ce7)}, {TOBN(0xc325fa03, 0xf46ad2ec), TOBN(0x00f766cf, 0xc3e3f9ee), TOBN(0xab556668, 0xd43a4577), TOBN(0x68d30a61, 0x3ee03b93)}}, {{TOBN(0x7ddc81ea, 0x77b46a08), TOBN(0xcf5a6477, 0xc7480699), TOBN(0x43a8cb34, 0x6633f683), TOBN(0x1b867e6b, 0x92363c60)}, {TOBN(0x43921114, 0x1f60558e), TOBN(0xcdbcdd63, 0x2f41450e), TOBN(0x7fc04601, 0xcc630e8b), TOBN(0xea7c66d5, 0x97038b43)}}, {{TOBN(0x7259b8a5, 0x04e99fd8), TOBN(0x98a8dd12, 0x4785549a), TOBN(0x0e459a7c, 0x840552e1), TOBN(0xcdfcf4d0, 0x4bb0909e)}, {TOBN(0x34a86db2, 0x53758da7), TOBN(0xe643bb83, 0xeac997e1), TOBN(0x96400bd7, 0x530c5b7e), TOBN(0x9f97af87, 0xb41c8b52)}}, {{TOBN(0x34fc8820, 0xfbeee3f9), TOBN(0x93e53490, 0x49091afd), TOBN(0x764b9be5, 0x9a31f35c), TOBN(0x71f37864, 0x57e3d924)}, {TOBN(0x02fb34e0, 0x943aa75e), TOBN(0xa18c9c58, 0xab8ff6e4), TOBN(0x080f31b1, 0x33cf0d19), TOBN(0x5c9682db, 0x083518a7)}}, {{TOBN(0x873d4ca6, 0xb709c3de), TOBN(0x64a84262, 0x3575b8f0), TOBN(0x6275da1f, 0x020154bb), TOBN(0x97678caa, 0xd17cf1ab)}, {TOBN(0x8779795f, 0x951a95c3), TOBN(0xdd35b163, 0x50fccc08), TOBN(0x32709627, 0x33d8f031), TOBN(0x3c5ab10a, 0x498dd85c)}}, {{TOBN(0xb6c185c3, 0x41dca566), TOBN(0x7de7feda, 0xd8622aa3), TOBN(0x99e84d92, 0x901b6dfb), TOBN(0x30a02b0e, 0x7c4ad288)}, {TOBN(0xc7c81daa, 0x2fd3cf36), TOBN(0xd1319547, 0xdf89e59f), TOBN(0xb2be8184, 0xcd496733), TOBN(0xd5f449eb, 0x93d3412b)}}, {{TOBN(0x7ea41b1b, 0x25fe531d), TOBN(0xf9797432, 0x6a1d5646), TOBN(0x86067f72, 0x2bde501a), TOBN(0xf91481c0, 0x0c85e89c)}, {TOBN(0xca8ee465, 0xf8b05bc6), TOBN(0x1844e1cf, 0x02e83cda), TOBN(0xca82114a, 0xb4dbe33b), TOBN(0x0f9f8769, 0x4eabfde2)}}, {{TOBN(0x4936b1c0, 0x38b27fe2), TOBN(0x63b6359b, 0xaba402df), TOBN(0x40c0ea2f, 0x656bdbab), TOBN(0x9c992a89, 0x6580c39c)}, {TOBN(0x600e8f15, 0x2a60aed1), TOBN(0xeb089ca4, 0xe0bf49df), TOBN(0x9c233d7d, 0x2d42d99a), TOBN(0x648d3f95, 0x4c6bc2fa)}}, {{TOBN(0xdcc383a8, 0xe1add3f3), TOBN(0xf42c0c6a, 0x4f64a348), TOBN(0x2abd176f, 0x0030dbdb), TOBN(0x4de501a3, 0x7d6c215e)}, {TOBN(0x4a107c1f, 0x4b9a64bc), TOBN(0xa77f0ad3, 0x2496cd59), TOBN(0xfb78ac62, 0x7688dffb), TOBN(0x7025a2ca, 0x67937d8e)}}, {{TOBN(0xfde8b2d1, 0xd1a8f4e7), TOBN(0xf5b3da47, 0x7354927c), TOBN(0xe48606a3, 0xd9205735), TOBN(0xac477cc6, 0xe177b917)}, {TOBN(0xfb1f73d2, 0xa883239a), TOBN(0xe12572f6, 0xcc8b8357), TOBN(0x9d355e9c, 0xfb1f4f86), TOBN(0x89b795f8, 0xd9f3ec6e)}}, {{TOBN(0x27be56f1, 0xb54398dc), TOBN(0x1890efd7, 0x3fedeed5), TOBN(0x62f77f1f, 0x9c6d0140), TOBN(0x7ef0e314, 0x596f0ee4)}, {TOBN(0x50ca6631, 0xcc61dab3), TOBN(0x4a39801d, 0xf4866e4f), TOBN(0x66c8d032, 0xae363b39), TOBN(0x22c591e5, 0x2ead66aa)}}, {{TOBN(0x954ba308, 0xde02a53e), TOBN(0x2a6c060f, 0xd389f357), TOBN(0xe6cfcde8, 0xfbf40b66), TOBN(0x8e02fc56, 0xc6340ce1)}, {TOBN(0xe4957795, 0x73adb4ba), TOBN(0x7b86122c, 0xa7b03805), TOBN(0x63f83512, 0x0c8e6fa6), TOBN(0x83660ea0, 0x057d7804)}}, {{TOBN(0xbad79105, 0x21ba473c), TOBN(0xb6c50bee, 0xded5389d), TOBN(0xee2caf4d, 0xaa7c9bc0), TOBN(0xd97b8de4, 0x8c4e98a7)}, {TOBN(0xa9f63e70, 0xab3bbddb), TOBN(0x3898aabf, 0x2597815a), TOBN(0x7659af89, 0xac15b3d9), TOBN(0xedf7725b, 0x703ce784)}}, {{TOBN(0x25470fab, 0xe085116b), TOBN(0x04a43375, 0x87285310), TOBN(0x4e39187e, 0xe2bfd52f), TOBN(0x36166b44, 0x7d9ebc74)}, {TOBN(0x92ad433c, 0xfd4b322c), TOBN(0x726aa817, 0xba79ab51), TOBN(0xf96eacd8, 0xc1db15eb), TOBN(0xfaf71e91, 0x0476be63)}}, {{TOBN(0xdd69a640, 0x641fad98), TOBN(0xb7995918, 0x29622559), TOBN(0x03c6daa5, 0xde4199dc), TOBN(0x92cadc97, 0xad545eb4)}, {TOBN(0x1028238b, 0x256534e4), TOBN(0x73e80ce6, 0x8595409a), TOBN(0x690d4c66, 0xd05dc59b), TOBN(0xc95f7b8f, 0x981dee80)}}, {{TOBN(0xf4337014, 0xd856ac25), TOBN(0x441bd9dd, 0xac524dca), TOBN(0x640b3d85, 0x5f0499f5), TOBN(0x39cf84a9, 0xd5fda182)}, {TOBN(0x04e7b055, 0xb2aa95a0), TOBN(0x29e33f0a, 0x0ddf1860), TOBN(0x082e74b5, 0x423f6b43), TOBN(0x217edeb9, 0x0aaa2b0f)}}, {{TOBN(0x58b83f35, 0x83cbea55), TOBN(0xc485ee4d, 0xbc185d70), TOBN(0x833ff03b, 0x1e5f6992), TOBN(0xb5b9b9cc, 0xcf0c0dd5)}, {TOBN(0x7caaee8e, 0x4e9e8a50), TOBN(0x462e907b, 0x6269dafd), TOBN(0x6ed5cee9, 0xfbe791c6), TOBN(0x68ca3259, 0xed430790)}}, {{TOBN(0x2b72bdf2, 0x13b5ba88), TOBN(0x60294c8a, 0x35ef0ac4), TOBN(0x9c3230ed, 0x19b99b08), TOBN(0x560fff17, 0x6c2589aa)}, {TOBN(0x552b8487, 0xd6770374), TOBN(0xa373202d, 0x9a56f685), TOBN(0xd3e7f907, 0x45f175d9), TOBN(0x3c2f315f, 0xd080d810)}}, {{TOBN(0x1130e9dd, 0x7b9520e8), TOBN(0xc078f9e2, 0x0af037b5), TOBN(0x38cd2ec7, 0x1e9c104c), TOBN(0x0f684368, 0xc472fe92)}, {TOBN(0xd3f1b5ed, 0x6247e7ef), TOBN(0xb32d33a9, 0x396dfe21), TOBN(0x46f59cf4, 0x4a9aa2c2), TOBN(0x69cd5168, 0xff0f7e41)}}, {{TOBN(0x3f59da0f, 0x4b3234da), TOBN(0xcf0b0235, 0xb4579ebe), TOBN(0x6d1cbb25, 0x6d2476c7), TOBN(0x4f0837e6, 0x9dc30f08)}, {TOBN(0x9a4075bb, 0x906f6e98), TOBN(0x253bb434, 0xc761e7d1), TOBN(0xde2e645f, 0x6e73af10), TOBN(0xb89a4060, 0x0c5f131c)}}, {{TOBN(0xd12840c5, 0xb8cc037f), TOBN(0x3d093a5b, 0x7405bb47), TOBN(0x6202c253, 0x206348b8), TOBN(0xbf5d57fc, 0xc55a3ca7)}, {TOBN(0x89f6c90c, 0x8c3bef48), TOBN(0x23ac7623, 0x5a0a960a), TOBN(0xdfbd3d6b, 0x552b42ab), TOBN(0x3ef22458, 0x132061f6)}}, {{TOBN(0xd74e9bda, 0xc97e6516), TOBN(0x88779360, 0xc230f49e), TOBN(0xa6ec1de3, 0x1e74ea49), TOBN(0x581dcee5, 0x3fb645a2)}, {TOBN(0xbaef2391, 0x8f483f14), TOBN(0x6d2dddfc, 0xd137d13b), TOBN(0x54cde50e, 0xd2743a42), TOBN(0x89a34fc5, 0xe4d97e67)}}, {{TOBN(0x13f1f5b3, 0x12e08ce5), TOBN(0xa80540b8, 0xa7f0b2ca), TOBN(0x854bcf77, 0x01982805), TOBN(0xb8653ffd, 0x233bea04)}, {TOBN(0x8e7b8787, 0x02b0b4c9), TOBN(0x2675261f, 0x9acb170a), TOBN(0x061a9d90, 0x930c14e5), TOBN(0xb59b30e0, 0xdef0abea)}}, {{TOBN(0x1dc19ea6, 0x0200ec7d), TOBN(0xb6f4a3f9, 0x0bce132b), TOBN(0xb8d5de90, 0xf13e27e0), TOBN(0xbaee5ef0, 0x1fade16f)}, {TOBN(0x6f406aaa, 0xe4c6cf38), TOBN(0xab4cfe06, 0xd1369815), TOBN(0x0dcffe87, 0xefd550c6), TOBN(0x9d4f59c7, 0x75ff7d39)}}, {{TOBN(0xb02553b1, 0x51deb6ad), TOBN(0x812399a4, 0xb1877749), TOBN(0xce90f71f, 0xca6006e1), TOBN(0xc32363a6, 0xb02b6e77)}, {TOBN(0x02284fbe, 0xdc36c64d), TOBN(0x86c81e31, 0xa7e1ae61), TOBN(0x2576c7e5, 0xb909d94a), TOBN(0x8b6f7d02, 0x818b2bb0)}}, {{TOBN(0xeca3ed07, 0x56faa38a), TOBN(0xa3790e6c, 0x9305bb54), TOBN(0xd784eeda, 0x7bc73061), TOBN(0xbd56d369, 0x6dd50614)}, {TOBN(0xd6575949, 0x229a8aa9), TOBN(0xdcca8f47, 0x4595ec28), TOBN(0x814305c1, 0x06ab4fe6), TOBN(0xc8c39768, 0x24f43f16)}}, {{TOBN(0xe2a45f36, 0x523f2b36), TOBN(0x995c6493, 0x920d93bb), TOBN(0xf8afdab7, 0x90f1632b), TOBN(0x79ebbecd, 0x1c295954)}, {TOBN(0xc7bb3ddb, 0x79592f48), TOBN(0x67216a7b, 0x5f88e998), TOBN(0xd91f098b, 0xbc01193e), TOBN(0xf7d928a5, 0xb1db83fc)}}, {{TOBN(0x55e38417, 0xe991f600), TOBN(0x2a91113e, 0x2981a934), TOBN(0xcbc9d648, 0x06b13bde), TOBN(0xb011b6ac, 0x0755ff44)}, {TOBN(0x6f4cb518, 0x045ec613), TOBN(0x522d2d31, 0xc2f5930a), TOBN(0x5acae1af, 0x382e65de), TOBN(0x57643067, 0x27bc966f)}}, {{TOBN(0x5e12705d, 0x1c7193f0), TOBN(0xf0f32f47, 0x3be8858e), TOBN(0x785c3d7d, 0x96c6dfc7), TOBN(0xd75b4a20, 0xbf31795d)}, {TOBN(0x91acf17b, 0x342659d4), TOBN(0xe596ea34, 0x44f0378f), TOBN(0x4515708f, 0xce52129d), TOBN(0x17387e1e, 0x79f2f585)}}, {{TOBN(0x72cfd2e9, 0x49dee168), TOBN(0x1ae05223, 0x3e2af239), TOBN(0x009e75be, 0x1d94066a), TOBN(0x6cca31c7, 0x38abf413)}, {TOBN(0xb50bd61d, 0x9bc49908), TOBN(0x4a9b4a8c, 0xf5e2bc1e), TOBN(0xeb6cc5f7, 0x946f83ac), TOBN(0x27da93fc, 0xebffab28)}}, {{TOBN(0xea314c96, 0x4821c8c5), TOBN(0x8de49ded, 0xa83c15f4), TOBN(0x7a64cf20, 0x7af33004), TOBN(0x45f1bfeb, 0xc9627e10)}, {TOBN(0x878b0626, 0x54b9df60), TOBN(0x5e4fdc3c, 0xa95c0b33), TOBN(0xe54a37ca, 0xc2035d8e), TOBN(0x9087cda9, 0x80f20b8c)}}, {{TOBN(0x36f61c23, 0x8319ade4), TOBN(0x766f287a, 0xde8cfdf8), TOBN(0x48821948, 0x346f3705), TOBN(0x49a7b853, 0x16e4f4a2)}, {TOBN(0xb9b3f8a7, 0x5cedadfd), TOBN(0x8f562815, 0x8db2a815), TOBN(0xc0b7d554, 0x01f68f95), TOBN(0x12971e27, 0x688a208e)}}, {{TOBN(0xc9f8b696, 0xd0ff34fc), TOBN(0x20824de2, 0x1222718c), TOBN(0x7213cf9f, 0x0c95284d), TOBN(0xe2ad741b, 0xdc158240)}, {TOBN(0x0ee3a6df, 0x54043ccf), TOBN(0x16ff479b, 0xd84412b3), TOBN(0xf6c74ee0, 0xdfc98af0), TOBN(0xa78a169f, 0x52fcd2fb)}}, {{TOBN(0xd8ae8746, 0x99c930e9), TOBN(0x1d33e858, 0x49e117a5), TOBN(0x7581fcb4, 0x6624759f), TOBN(0xde50644f, 0x5bedc01d)}, {TOBN(0xbeec5d00, 0xcaf3155e), TOBN(0x672d66ac, 0xbc73e75f), TOBN(0x86b9d8c6, 0x270b01db), TOBN(0xd249ef83, 0x50f55b79)}}, {{TOBN(0x6131d6d4, 0x73978fe3), TOBN(0xcc4e4542, 0x754b00a1), TOBN(0x4e05df05, 0x57dfcfe9), TOBN(0x94b29cdd, 0x51ef6bf0)}, {TOBN(0xe4530cff, 0x9bc7edf2), TOBN(0x8ac236fd, 0xd3da65f3), TOBN(0x0faf7d5f, 0xc8eb0b48), TOBN(0x4d2de14c, 0x660eb039)}}, {{TOBN(0xc006bba7, 0x60430e54), TOBN(0x10a2d0d6, 0xda3289ab), TOBN(0x9c037a5d, 0xd7979c59), TOBN(0x04d1f3d3, 0xa116d944)}, {TOBN(0x9ff22473, 0x8a0983cd), TOBN(0x28e25b38, 0xc883cabb), TOBN(0xe968dba5, 0x47a58995), TOBN(0x2c80b505, 0x774eebdf)}}, {{TOBN(0xee763b71, 0x4a953beb), TOBN(0x502e223f, 0x1642e7f6), TOBN(0x6fe4b641, 0x61d5e722), TOBN(0x9d37c5b0, 0xdbef5316)}, {TOBN(0x0115ed70, 0xf8330bc7), TOBN(0x139850e6, 0x75a72789), TOBN(0x27d7faec, 0xffceccc2), TOBN(0x3016a860, 0x4fd9f7f6)}}, {{TOBN(0xc492ec64, 0x4cd8f64c), TOBN(0x58a2d790, 0x279d7b51), TOBN(0x0ced1fc5, 0x1fc75256), TOBN(0x3e658aed, 0x8f433017)}, {TOBN(0x0b61942e, 0x05da59eb), TOBN(0xba3d60a3, 0x0ddc3722), TOBN(0x7c311cd1, 0x742e7f87), TOBN(0x6473ffee, 0xf6b01b6e)}}}, {{{TOBN(0x8303604f, 0x692ac542), TOBN(0xf079ffe1, 0x227b91d3), TOBN(0x19f63e63, 0x15aaf9bd), TOBN(0xf99ee565, 0xf1f344fb)}, {TOBN(0x8a1d661f, 0xd6219199), TOBN(0x8c883bc6, 0xd48ce41c), TOBN(0x1065118f, 0x3c74d904), TOBN(0x713889ee, 0x0faf8b1b)}}, {{TOBN(0x972b3f8f, 0x81a1b3be), TOBN(0x4f3ce145, 0xce2764a0), TOBN(0xe2d0f1cc, 0x28c4f5f7), TOBN(0xdeee0c0d, 0xc7f3985b)}, {TOBN(0x7df4adc0, 0xd39e25c3), TOBN(0x40619820, 0xc467a080), TOBN(0x440ebc93, 0x61cf5a58), TOBN(0x527729a6, 0x422ad600)}}, {{TOBN(0xca6c0937, 0xb1b76ba6), TOBN(0x1a2eab85, 0x4d2026dc), TOBN(0xb1715e15, 0x19d9ae0a), TOBN(0xf1ad9199, 0xbac4a026)}, {TOBN(0x35b3dfb8, 0x07ea7b0e), TOBN(0xedf5496f, 0x3ed9eb89), TOBN(0x8932e5ff, 0x2d6d08ab), TOBN(0xf314874e, 0x25bd2731)}}, {{TOBN(0xefb26a75, 0x3f73f449), TOBN(0x1d1c94f8, 0x8d44fc79), TOBN(0x49f0fbc5, 0x3bc0dc4d), TOBN(0xb747ea0b, 0x3698a0d0)}, {TOBN(0x5218c3fe, 0x228d291e), TOBN(0x35b804b5, 0x43c129d6), TOBN(0xfac859b8, 0xd1acc516), TOBN(0x6c10697d, 0x95d6e668)}}, {{TOBN(0xc38e438f, 0x0876fd4e), TOBN(0x45f0c307, 0x83d2f383), TOBN(0x203cc2ec, 0xb10934cb), TOBN(0x6a8f2439, 0x2c9d46ee)}, {TOBN(0xf16b431b, 0x65ccde7b), TOBN(0x41e2cd18, 0x27e76a6f), TOBN(0xb9c8cf8f, 0x4e3484d7), TOBN(0x64426efd, 0x8315244a)}}, {{TOBN(0x1c0a8e44, 0xfc94dea3), TOBN(0x34c8cdbf, 0xdad6a0b0), TOBN(0x919c3840, 0x04113cef), TOBN(0xfd32fba4, 0x15490ffa)}, {TOBN(0x58d190f6, 0x795dcfb7), TOBN(0xfef01b03, 0x83588baf), TOBN(0x9e6d1d63, 0xca1fc1c0), TOBN(0x53173f96, 0xf0a41ac9)}}, {{TOBN(0x2b1d402a, 0xba16f73b), TOBN(0x2fb31014, 0x8cf9b9fc), TOBN(0x2d51e60e, 0x446ef7bf), TOBN(0xc731021b, 0xb91e1745)}, {TOBN(0x9d3b4724, 0x4fee99d4), TOBN(0x4bca48b6, 0xfac5c1ea), TOBN(0x70f5f514, 0xbbea9af7), TOBN(0x751f55a5, 0x974c283a)}}, {{TOBN(0x6e30251a, 0xcb452fdb), TOBN(0x31ee6965, 0x50f30650), TOBN(0xb0b3e508, 0x933548d9), TOBN(0xb8949a4f, 0xf4b0ef5b)}, {TOBN(0x208b8326, 0x3c88f3bd), TOBN(0xab147c30, 0xdb1d9989), TOBN(0xed6515fd, 0x44d4df03), TOBN(0x17a12f75, 0xe72eb0c5)}}, {{TOBN(0x3b59796d, 0x36cf69db), TOBN(0x1219eee9, 0x56670c18), TOBN(0xfe3341f7, 0x7a070d8e), TOBN(0x9b70130b, 0xa327f90c)}, {TOBN(0x36a32462, 0x0ae18e0e), TOBN(0x2021a623, 0x46c0a638), TOBN(0x251b5817, 0xc62eb0d4), TOBN(0x87bfbcdf, 0x4c762293)}}, {{TOBN(0xf78ab505, 0xcdd61d64), TOBN(0x8c7a53fc, 0xc8c18857), TOBN(0xa653ce6f, 0x16147515), TOBN(0x9c923aa5, 0xea7d52d5)}, {TOBN(0xc24709cb, 0x5c18871f), TOBN(0x7d53bec8, 0x73b3cc74), TOBN(0x59264aff, 0xfdd1d4c4), TOBN(0x5555917e, 0x240da582)}}, {{TOBN(0xcae8bbda, 0x548f5a0e), TOBN(0x1910eaba, 0x3bbfbbe1), TOBN(0xae579685, 0x7677afc3), TOBN(0x49ea61f1, 0x73ff0b5c)}, {TOBN(0x78655478, 0x4f7c3922), TOBN(0x95d337cd, 0x20c68eef), TOBN(0x68f1e1e5, 0xdf779ab9), TOBN(0x14b491b0, 0xb5cf69a8)}}, {{TOBN(0x7a6cbbe0, 0x28e3fe89), TOBN(0xe7e1fee4, 0xc5aac0eb), TOBN(0x7f47eda5, 0x697e5140), TOBN(0x4f450137, 0xb454921f)}, {TOBN(0xdb625f84, 0x95cd8185), TOBN(0x74be0ba1, 0xcdb2e583), TOBN(0xaee4fd7c, 0xdd5e6de4), TOBN(0x4251437d, 0xe8101739)}}, {{TOBN(0x686d72a0, 0xac620366), TOBN(0x4be3fb9c, 0xb6d59344), TOBN(0x6e8b44e7, 0xa1eb75b9), TOBN(0x84e39da3, 0x91a5c10c)}, {TOBN(0x37cc1490, 0xb38f0409), TOBN(0x02951943, 0x2c2ade82), TOBN(0x9b688783, 0x1190a2d8), TOBN(0x25627d14, 0x231182ba)}}, {{TOBN(0x6eb550aa, 0x658a6d87), TOBN(0x1405aaa7, 0xcf9c7325), TOBN(0xd147142e, 0x5c8748c9), TOBN(0x7f637e4f, 0x53ede0e0)}, {TOBN(0xf8ca2776, 0x14ffad2c), TOBN(0xe58fb1bd, 0xbafb6791), TOBN(0x17158c23, 0xbf8f93fc), TOBN(0x7f15b373, 0x0a4a4655)}}, {{TOBN(0x39d4add2, 0xd842ca72), TOBN(0xa71e4391, 0x3ed96305), TOBN(0x5bb09cbe, 0x6700be14), TOBN(0x68d69d54, 0xd8befcf6)}, {TOBN(0xa45f5367, 0x37183bcf), TOBN(0x7152b7bb, 0x3370dff7), TOBN(0xcf887baa, 0xbf12525b), TOBN(0xe7ac7bdd, 0xd6d1e3cd)}}, {{TOBN(0x25914f78, 0x81fdad90), TOBN(0xcf638f56, 0x0d2cf6ab), TOBN(0xb90bc03f, 0xcc054de5), TOBN(0x932811a7, 0x18b06350)}, {TOBN(0x2f00b330, 0x9bbd11ff), TOBN(0x76108a6f, 0xb4044974), TOBN(0x801bb9e0, 0xa851d266), TOBN(0x0dd099be, 0xbf8990c1)}}, {{TOBN(0x58c5aaaa, 0xabe32986), TOBN(0x0fe9dd2a, 0x50d59c27), TOBN(0x84951ff4, 0x8d307305), TOBN(0x6c23f829, 0x86529b78)}, {TOBN(0x50bb2218, 0x0b136a79), TOBN(0x7e2174de, 0x77a20996), TOBN(0x6f00a4b9, 0xc0bb4da6), TOBN(0x89a25a17, 0xefdde8da)}}, {{TOBN(0xf728a27e, 0xc11ee01d), TOBN(0xf900553a, 0xe5f10dfb), TOBN(0x189a83c8, 0x02ec893c), TOBN(0x3ca5bdc1, 0x23f66d77)}, {TOBN(0x98781537, 0x97eada9f), TOBN(0x59c50ab3, 0x10256230), TOBN(0x346042d9, 0x323c69b3), TOBN(0x1b715a6d, 0x2c460449)}}, {{TOBN(0xa41dd476, 0x6ae06e0b), TOBN(0xcdd7888e, 0x9d42e25f), TOBN(0x0f395f74, 0x56b25a20), TOBN(0xeadfe0ae, 0x8700e27e)}, {TOBN(0xb09d52a9, 0x69950093), TOBN(0x3525d9cb, 0x327f8d40), TOBN(0xb8235a94, 0x67df886a), TOBN(0x77e4b0dd, 0x035faec2)}}, {{TOBN(0x115eb20a, 0x517d7061), TOBN(0x77fe3433, 0x6c2df683), TOBN(0x6870ddc7, 0xcdc6fc67), TOBN(0xb1610588, 0x0b87de83)}, {TOBN(0x343584ca, 0xd9c4ddbe), TOBN(0xb3164f1c, 0x3d754be2), TOBN(0x0731ed3a, 0xc1e6c894), TOBN(0x26327dec, 0x4f6b904c)}}, {{TOBN(0x9d49c6de, 0x97b5cd32), TOBN(0x40835dae, 0xb5eceecd), TOBN(0xc66350ed, 0xd9ded7fe), TOBN(0x8aeebb5c, 0x7a678804)}, {TOBN(0x51d42fb7, 0x5b8ee9ec), TOBN(0xd7a17bdd, 0x8e3ca118), TOBN(0x40d7511a, 0x2ef4400e), TOBN(0xc48990ac, 0x875a66f4)}}, {{TOBN(0x8de07d2a, 0x2199e347), TOBN(0xbee75556, 0x2a39e051), TOBN(0x56918786, 0x916e51dc), TOBN(0xeb191313, 0x4a2d89ec)}, {TOBN(0x6679610d, 0x37d341ed), TOBN(0x434fbb41, 0x56d51c2b), TOBN(0xe54b7ee7, 0xd7492dba), TOBN(0xaa33a79a, 0x59021493)}}, {{TOBN(0x49fc5054, 0xe4bd6d3d), TOBN(0x09540f04, 0x5ab551d0), TOBN(0x8acc9085, 0x4942d3a6), TOBN(0x231af02f, 0x2d28323b)}, {TOBN(0x93458cac, 0x0992c163), TOBN(0x1fef8e71, 0x888e3bb4), TOBN(0x27578da5, 0xbe8c268c), TOBN(0xcc8be792, 0xe805ec00)}}, {{TOBN(0x29267bae, 0xc61c3855), TOBN(0xebff429d, 0x58c1fd3b), TOBN(0x22d886c0, 0x8c0b93b8), TOBN(0xca5e00b2, 0x2ddb8953)}, {TOBN(0xcf330117, 0xc3fed8b7), TOBN(0xd49ac6fa, 0x819c01f6), TOBN(0x6ddaa6bd, 0x3c0fbd54), TOBN(0x91743068, 0x8049a2cf)}}, {{TOBN(0xd67f981e, 0xaff2ef81), TOBN(0xc3654d35, 0x2818ae80), TOBN(0x81d05044, 0x1b2aa892), TOBN(0x2db067bf, 0x3d099328)}, {TOBN(0xe7c79e86, 0x703dcc97), TOBN(0xe66f9b37, 0xe133e215), TOBN(0xcdf119a6, 0xe39a7a5c), TOBN(0x47c60de3, 0x876f1b61)}}, {{TOBN(0x6e405939, 0xd860f1b2), TOBN(0x3e9a1dbc, 0xf5ed4d4a), TOBN(0x3f23619e, 0xc9b6bcbd), TOBN(0x5ee790cf, 0x734e4497)}, {TOBN(0xf0a834b1, 0x5bdaf9bb), TOBN(0x02cedda7, 0x4ca295f0), TOBN(0x4619aa2b, 0xcb8e378c), TOBN(0xe5613244, 0xcc987ea4)}}, {{TOBN(0x0bc022cc, 0x76b23a50), TOBN(0x4a2793ad, 0x0a6c21ce), TOBN(0x38328780, 0x89cac3f5), TOBN(0x29176f1b, 0xcba26d56)}, {TOBN(0x06296187, 0x4f6f59eb), TOBN(0x86e9bca9, 0x8bdc658e), TOBN(0x2ca9c4d3, 0x57e30402), TOBN(0x5438b216, 0x516a09bb)}}, {{TOBN(0x0a6a063c, 0x7672765a), TOBN(0x37a3ce64, 0x0547b9bf), TOBN(0x42c099c8, 0x98b1a633), TOBN(0xb5ab800d, 0x05ee6961)}, {TOBN(0xf1963f59, 0x11a5acd6), TOBN(0xbaee6157, 0x46201063), TOBN(0x36d9a649, 0xa596210a), TOBN(0xaed04363, 0x1ba7138c)}}, {{TOBN(0xcf817d1c, 0xa4a82b76), TOBN(0x5586960e, 0xf3806be9), TOBN(0x7ab67c89, 0x09dc6bb5), TOBN(0x52ace7a0, 0x114fe7eb)}, {TOBN(0xcd987618, 0xcbbc9b70), TOBN(0x4f06fd5a, 0x604ca5e1), TOBN(0x90af14ca, 0x6dbde133), TOBN(0x1afe4322, 0x948a3264)}}, {{TOBN(0xa70d2ca6, 0xc44b2c6c), TOBN(0xab726799, 0x0ef87dfe), TOBN(0x310f64dc, 0x2e696377), TOBN(0x49b42e68, 0x4c8126a0)}, {TOBN(0x0ea444c3, 0xcea0b176), TOBN(0x53a8ddf7, 0xcb269182), TOBN(0xf3e674eb, 0xbbba9dcb), TOBN(0x0d2878a8, 0xd8669d33)}}, {{TOBN(0x04b935d5, 0xd019b6a3), TOBN(0xbb5cf88e, 0x406f1e46), TOBN(0xa1912d16, 0x5b57c111), TOBN(0x9803fc21, 0x19ebfd78)}, {TOBN(0x4f231c9e, 0xc07764a9), TOBN(0xd93286ee, 0xb75bd055), TOBN(0x83a9457d, 0x8ee6c9de), TOBN(0x04695915, 0x6087ec90)}}, {{TOBN(0x14c6dd8a, 0x58d6cd46), TOBN(0x9cb633b5, 0x8e6634d2), TOBN(0xc1305047, 0xf81bc328), TOBN(0x12ede0e2, 0x26a177e5)}, {TOBN(0x332cca62, 0x065a6f4f), TOBN(0xc3a47ecd, 0x67be487b), TOBN(0x741eb187, 0x0f47ed1c), TOBN(0x99e66e58, 0xe7598b14)}}, {{TOBN(0x6f0544ca, 0x63d0ff12), TOBN(0xe5efc784, 0xb610a05f), TOBN(0xf72917b1, 0x7cad7b47), TOBN(0x3ff6ea20, 0xf2cac0c0)}, {TOBN(0xcc23791b, 0xf21db8b7), TOBN(0x7dac70b1, 0xd7d93565), TOBN(0x682cda1d, 0x694bdaad), TOBN(0xeb88bb8c, 0x1023516d)}}, {{TOBN(0xc4c634b4, 0xdfdbeb1b), TOBN(0x22f5ca72, 0xb4ee4dea), TOBN(0x1045a368, 0xe6524821), TOBN(0xed9e8a3f, 0x052b18b2)}, {TOBN(0x9b7f2cb1, 0xb961f49a), TOBN(0x7fee2ec1, 0x7b009670), TOBN(0x350d8754, 0x22507a6d), TOBN(0x561bd711, 0x4db55f1d)}}, {{TOBN(0x4c189ccc, 0x320bbcaf), TOBN(0x568434cf, 0xdf1de48c), TOBN(0x6af1b00e, 0x0fa8f128), TOBN(0xf0ba9d02, 0x8907583c)}, {TOBN(0x735a4004, 0x32ff9f60), TOBN(0x3dd8e4b6, 0xc25dcf33), TOBN(0xf2230f16, 0x42c74cef), TOBN(0xd8117623, 0x013fa8ad)}}, {{TOBN(0x36822876, 0xf51fe76e), TOBN(0x8a6811cc, 0x11d62589), TOBN(0xc3fc7e65, 0x46225718), TOBN(0xb7df2c9f, 0xc82fdbcd)}, {TOBN(0x3b1d4e52, 0xdd7b205b), TOBN(0xb6959478, 0x47a2e414), TOBN(0x05e4d793, 0xefa91148), TOBN(0xb47ed446, 0xfd2e9675)}}, {{TOBN(0x1a7098b9, 0x04c9d9bf), TOBN(0x661e2881, 0x1b793048), TOBN(0xb1a16966, 0xb01ee461), TOBN(0xbc521308, 0x2954746f)}, {TOBN(0xc909a0fc, 0x2477de50), TOBN(0xd80bb41c, 0x7dbd51ef), TOBN(0xa85be7ec, 0x53294905), TOBN(0x6d465b18, 0x83958f97)}}, {{TOBN(0x16f6f330, 0xfb6840fd), TOBN(0xfaaeb214, 0x3401e6c8), TOBN(0xaf83d30f, 0xccb5b4f8), TOBN(0x22885739, 0x266dec4b)}, {TOBN(0x51b4367c, 0x7bc467df), TOBN(0x926562e3, 0xd842d27a), TOBN(0xdfcb6614, 0x0fea14a6), TOBN(0xeb394dae, 0xf2734cd9)}}, {{TOBN(0x3eeae5d2, 0x11c0be98), TOBN(0xb1e6ed11, 0x814e8165), TOBN(0x191086bc, 0xe52bce1c), TOBN(0x14b74cc6, 0xa75a04da)}, {TOBN(0x63cf1186, 0x8c060985), TOBN(0x071047de, 0x2dbd7f7c), TOBN(0x4e433b8b, 0xce0942ca), TOBN(0xecbac447, 0xd8fec61d)}}, {{TOBN(0x8f0ed0e2, 0xebf3232f), TOBN(0xfff80f9e, 0xc52a2edd), TOBN(0xad9ab433, 0x75b55fdb), TOBN(0x73ca7820, 0xe42e0c11)}, {TOBN(0x6dace0a0, 0xe6251b46), TOBN(0x89bc6b5c, 0x4c0d932d), TOBN(0x3438cd77, 0x095da19a), TOBN(0x2f24a939, 0x8d48bdfb)}}, {{TOBN(0x99b47e46, 0x766561b7), TOBN(0x736600e6, 0x0ed0322a), TOBN(0x06a47cb1, 0x638e1865), TOBN(0x927c1c2d, 0xcb136000)}, {TOBN(0x29542337, 0x0cc5df69), TOBN(0x99b37c02, 0x09d649a9), TOBN(0xc5f0043c, 0x6aefdb27), TOBN(0x6cdd9987, 0x1be95c27)}}, {{TOBN(0x69850931, 0x390420d2), TOBN(0x299c40ac, 0x0983efa4), TOBN(0x3a05e778, 0xaf39aead), TOBN(0x84274408, 0x43a45193)}, {TOBN(0x6bcd0fb9, 0x91a711a0), TOBN(0x461592c8, 0x9f52ab17), TOBN(0xb49302b4, 0xda3c6ed6), TOBN(0xc51fddc7, 0x330d7067)}}, {{TOBN(0x94babeb6, 0xda50d531), TOBN(0x521b840d, 0xa6a7b9da), TOBN(0x5305151e, 0x404bdc89), TOBN(0x1bcde201, 0xd0d07449)}, {TOBN(0xf427a78b, 0x3b76a59a), TOBN(0xf84841ce, 0x07791a1b), TOBN(0xebd314be, 0xbf91ed1c), TOBN(0x8e61d34c, 0xbf172943)}}, {{TOBN(0x1d5dc451, 0x5541b892), TOBN(0xb186ee41, 0xfc9d9e54), TOBN(0x9d9f345e, 0xd5bf610d), TOBN(0x3e7ba65d, 0xf6acca9f)}, {TOBN(0x9dda787a, 0xa8369486), TOBN(0x09f9dab7, 0x8eb5ba53), TOBN(0x5afb2033, 0xd6481bc3), TOBN(0x76f4ce30, 0xafa62104)}}, {{TOBN(0xa8fa00cf, 0xf4f066b5), TOBN(0x89ab5143, 0x461dafc2), TOBN(0x44339ed7, 0xa3389998), TOBN(0x2ff862f1, 0xbc214903)}, {TOBN(0x2c88f985, 0xb05556e3), TOBN(0xcd96058e, 0x3467081e), TOBN(0x7d6a4176, 0xedc637ea), TOBN(0xe1743d09, 0x36a5acdc)}}, {{TOBN(0x66fd72e2, 0x7eb37726), TOBN(0xf7fa264e, 0x1481a037), TOBN(0x9fbd3bde, 0x45f4aa79), TOBN(0xed1e0147, 0x767c3e22)}, {TOBN(0x7621f979, 0x82e7abe2), TOBN(0x19eedc72, 0x45f633f8), TOBN(0xe69b155e, 0x6137bf3a), TOBN(0xa0ad13ce, 0x414ee94e)}}, {{TOBN(0x93e3d524, 0x1c0e651a), TOBN(0xab1a6e2a, 0x02ce227e), TOBN(0xe7af1797, 0x4ab27eca), TOBN(0x245446de, 0xbd444f39)}, {TOBN(0x59e22a21, 0x56c07613), TOBN(0x43deafce, 0xf4275498), TOBN(0x10834ccb, 0x67fd0946), TOBN(0xa75841e5, 0x47406edf)}}, {{TOBN(0xebd6a677, 0x7b0ac93d), TOBN(0xa6e37b0d, 0x78f5e0d7), TOBN(0x2516c096, 0x76f5492b), TOBN(0x1e4bf888, 0x9ac05f3a)}, {TOBN(0xcdb42ce0, 0x4df0ba2b), TOBN(0x935d5cfd, 0x5062341b), TOBN(0x8a303333, 0x82acac20), TOBN(0x429438c4, 0x5198b00e)}}, {{TOBN(0x1d083bc9, 0x049d33fa), TOBN(0x58b82dda, 0x946f67ff), TOBN(0xac3e2db8, 0x67a1d6a3), TOBN(0x62e6bead, 0x1798aac8)}, {TOBN(0xfc85980f, 0xde46c58c), TOBN(0xa7f69379, 0x69c8d7be), TOBN(0x23557927, 0x837b35ec), TOBN(0x06a933d8, 0xe0790c0c)}}, {{TOBN(0x827c0e9b, 0x077ff55d), TOBN(0x53977798, 0xbb26e680), TOBN(0x59530874, 0x1d9cb54f), TOBN(0xcca3f449, 0x4aac53ef)}, {TOBN(0x11dc5c87, 0xa07eda0f), TOBN(0xc138bccf, 0xfd6400c8), TOBN(0x549680d3, 0x13e5da72), TOBN(0xc93eed82, 0x4540617e)}}, {{TOBN(0xfd3db157, 0x4d0b75c0), TOBN(0x9716eb42, 0x6386075b), TOBN(0x0639605c, 0x817b2c16), TOBN(0x09915109, 0xf1e4f201)}, {TOBN(0x35c9a928, 0x5cca6c3b), TOBN(0xb25f7d1a, 0x3505c900), TOBN(0xeb9f7d20, 0x630480c4), TOBN(0xc3c7b8c6, 0x2a1a501c)}}, {{TOBN(0x3f99183c, 0x5a1f8e24), TOBN(0xfdb118fa, 0x9dd255f0), TOBN(0xb9b18b90, 0xc27f62a6), TOBN(0xe8f732f7, 0x396ec191)}, {TOBN(0x524a2d91, 0x0be786ab), TOBN(0x5d32adef, 0x0ac5a0f5), TOBN(0x9b53d4d6, 0x9725f694), TOBN(0x032a76c6, 0x0510ba89)}}, {{TOBN(0x840391a3, 0xebeb1544), TOBN(0x44b7b88c, 0x3ed73ac3), TOBN(0xd24bae7a, 0x256cb8b3), TOBN(0x7ceb151a, 0xe394cb12)}, {TOBN(0xbd6b66d0, 0x5bc1e6a8), TOBN(0xec70cecb, 0x090f07bf), TOBN(0x270644ed, 0x7d937589), TOBN(0xee9e1a3d, 0x5f1dccfe)}}, {{TOBN(0xb0d40a84, 0x745b98d2), TOBN(0xda429a21, 0x2556ed40), TOBN(0xf676eced, 0x85148cb9), TOBN(0x5a22d40c, 0xded18936)}, {TOBN(0x3bc4b9e5, 0x70e8a4ce), TOBN(0xbfd1445b, 0x9eae0379), TOBN(0xf23f2c0c, 0x1a0bd47e), TOBN(0xa9c0bb31, 0xe1845531)}}, {{TOBN(0x9ddc4d60, 0x0a4c3f6b), TOBN(0xbdfaad79, 0x2c15ef44), TOBN(0xce55a236, 0x7f484acc), TOBN(0x08653ca7, 0x055b1f15)}, {TOBN(0x2efa8724, 0x538873a3), TOBN(0x09299e5d, 0xace1c7e7), TOBN(0x07afab66, 0xade332ba), TOBN(0x9be1fdf6, 0x92dd71b7)}}, {{TOBN(0xa49b5d59, 0x5758b11c), TOBN(0x0b852893, 0xc8654f40), TOBN(0xb63ef6f4, 0x52379447), TOBN(0xd4957d29, 0x105e690c)}, {TOBN(0x7d484363, 0x646559b0), TOBN(0xf4a8273c, 0x49788a8e), TOBN(0xee406cb8, 0x34ce54a9), TOBN(0x1e1c260f, 0xf86fda9b)}}, {{TOBN(0xe150e228, 0xcf6a4a81), TOBN(0x1fa3b6a3, 0x1b488772), TOBN(0x1e6ff110, 0xc5a9c15b), TOBN(0xc6133b91, 0x8ad6aa47)}, {TOBN(0x8ac5d55c, 0x9dffa978), TOBN(0xba1d1c1d, 0x5f3965f2), TOBN(0xf969f4e0, 0x7732b52f), TOBN(0xfceecdb5, 0xa5172a07)}}, {{TOBN(0xb0120a5f, 0x10f2b8f5), TOBN(0xc83a6cdf, 0x5c4c2f63), TOBN(0x4d47a491, 0xf8f9c213), TOBN(0xd9e1cce5, 0xd3f1bbd5)}, {TOBN(0x0d91bc7c, 0xaba7e372), TOBN(0xfcdc74c8, 0xdfd1a2db), TOBN(0x05efa800, 0x374618e5), TOBN(0x11216969, 0x15a7925e)}}, {{TOBN(0xd4c89823, 0xf6021c5d), TOBN(0x880d5e84, 0xeff14423), TOBN(0x6523bc5a, 0x6dcd1396), TOBN(0xd1acfdfc, 0x113c978b)}, {TOBN(0xb0c164e8, 0xbbb66840), TOBN(0xf7f4301e, 0x72b58459), TOBN(0xc29ad4a6, 0xa638e8ec), TOBN(0xf5ab8961, 0x46b78699)}}, {{TOBN(0x9dbd7974, 0x0e954750), TOBN(0x0121de88, 0x64f9d2c6), TOBN(0x2e597b42, 0xd985232e), TOBN(0x55b6c3c5, 0x53451777)}, {TOBN(0xbb53e547, 0x519cb9fb), TOBN(0xf134019f, 0x8428600d), TOBN(0x5a473176, 0xe081791a), TOBN(0x2f3e2263, 0x35fb0c08)}}, {{TOBN(0xb28c3017, 0x73d273b0), TOBN(0xccd21076, 0x7721ef9a), TOBN(0x054cc292, 0xb650dc39), TOBN(0x662246de, 0x6188045e)}, {TOBN(0x904b52fa, 0x6b83c0d1), TOBN(0xa72df267, 0x97e9cd46), TOBN(0x886b43cd, 0x899725e4), TOBN(0x2b651688, 0xd849ff22)}}, {{TOBN(0x60479b79, 0x02f34533), TOBN(0x5e354c14, 0x0c77c148), TOBN(0xb4bb7581, 0xa8537c78), TOBN(0x188043d7, 0xefe1495f)}, {TOBN(0x9ba12f42, 0x8c1d5026), TOBN(0x2e0c8a26, 0x93d4aaab), TOBN(0xbdba7b8b, 0xaa57c450), TOBN(0x140c9ad6, 0x9bbdafef)}}, {{TOBN(0x2067aa42, 0x25ac0f18), TOBN(0xf7b1295b, 0x04d1fbf3), TOBN(0x14829111, 0xa4b04824), TOBN(0x2ce3f192, 0x33bd5e91)}, {TOBN(0x9c7a1d55, 0x8f2e1b72), TOBN(0xfe932286, 0x302aa243), TOBN(0x497ca7b4, 0xd4be9554), TOBN(0xb8e821b8, 0xe0547a6e)}}, {{TOBN(0xfb2838be, 0x67e573e0), TOBN(0x05891db9, 0x4084c44b), TOBN(0x91311373, 0x96c1c2c5), TOBN(0x6aebfa3f, 0xd958444b)}, {TOBN(0xac9cdce9, 0xe56e55c1), TOBN(0x7148ced3, 0x2caa46d0), TOBN(0x2e10c7ef, 0xb61fe8eb), TOBN(0x9fd835da, 0xff97cf4d)}}}, {{{TOBN(0xa36da109, 0x081e9387), TOBN(0xfb9780d7, 0x8c935828), TOBN(0xd5940332, 0xe540b015), TOBN(0xc9d7b51b, 0xe0f466fa)}, {TOBN(0xfaadcd41, 0xd6d9f671), TOBN(0xba6c1e28, 0xb1a2ac17), TOBN(0x066a7833, 0xed201e5f), TOBN(0x19d99719, 0xf90f462b)}}, {{TOBN(0xf431f462, 0x060b5f61), TOBN(0xa56f46b4, 0x7bd057c2), TOBN(0x348dca6c, 0x47e1bf65), TOBN(0x9a38783e, 0x41bcf1ff)}, {TOBN(0x7a5d33a9, 0xda710718), TOBN(0x5a779987, 0x2e0aeaf6), TOBN(0xca87314d, 0x2d29d187), TOBN(0xfa0edc3e, 0xc687d733)}}, {{TOBN(0x9df33621, 0x6a31e09b), TOBN(0xde89e44d, 0xc1350e35), TOBN(0x29214871, 0x4ca0cf52), TOBN(0xdf379672, 0x0b88a538)}, {TOBN(0xc92a510a, 0x2591d61b), TOBN(0x79aa87d7, 0x585b447b), TOBN(0xf67db604, 0xe5287f77), TOBN(0x1697c8bf, 0x5efe7a80)}}, {{TOBN(0x1c894849, 0xcb198ac7), TOBN(0xa884a93d, 0x0f264665), TOBN(0x2da964ef, 0x9b200678), TOBN(0x3c351b87, 0x009834e6)}, {TOBN(0xafb2ef9f, 0xe2c4b44b), TOBN(0x580f6c47, 0x3326790c), TOBN(0xb8480521, 0x0b02264a), TOBN(0x8ba6f9e2, 0x42a194e2)}}, {{TOBN(0xfc87975f, 0x8fb54738), TOBN(0x35160788, 0x27c3ead3), TOBN(0x834116d2, 0xb74a085a), TOBN(0x53c99a73, 0xa62fe996)}, {TOBN(0x87585be0, 0x5b81c51b), TOBN(0x925bafa8, 0xbe0852b7), TOBN(0x76a4fafd, 0xa84d19a7), TOBN(0x39a45982, 0x585206d4)}}, {{TOBN(0x499b6ab6, 0x5eb03c0e), TOBN(0xf19b7954, 0x72bc3fde), TOBN(0xa86b5b9c, 0x6e3a80d2), TOBN(0xe4377508, 0x6d42819f)}, {TOBN(0xc1663650, 0xbb3ee8a3), TOBN(0x75eb14fc, 0xb132075f), TOBN(0xa8ccc906, 0x7ad834f6), TOBN(0xea6a2474, 0xe6e92ffd)}}, {{TOBN(0x9d72fd95, 0x0f8d6758), TOBN(0xcb84e101, 0x408c07dd), TOBN(0xb9114bfd, 0xa5e23221), TOBN(0x358b5fe2, 0xe94e742c)}, {TOBN(0x1c0577ec, 0x95f40e75), TOBN(0xf0155451, 0x3d73f3d6), TOBN(0x9d55cd67, 0xbd1b9b66), TOBN(0x63e86e78, 0xaf8d63c7)}}, {{TOBN(0x39d934ab, 0xd3c095f1), TOBN(0x04b261be, 0xe4b76d71), TOBN(0x1d2e6970, 0xe73e6984), TOBN(0x879fb23b, 0x5e5fcb11)}, {TOBN(0x11506c72, 0xdfd75490), TOBN(0x3a97d085, 0x61bcf1c1), TOBN(0x43201d82, 0xbf5e7007), TOBN(0x7f0ac52f, 0x798232a7)}}, {{TOBN(0x2715cbc4, 0x6eb564d4), TOBN(0x8d6c752c, 0x9e570e29), TOBN(0xf80247c8, 0x9ef5fd5d), TOBN(0xc3c66b46, 0xd53eb514)}, {TOBN(0x9666b401, 0x0f87de56), TOBN(0xce62c06f, 0xc6c603b5), TOBN(0xae7b4c60, 0x7e4fc942), TOBN(0x38ac0b77, 0x663a9c19)}}, {{TOBN(0xcb4d20ee, 0x4b049136), TOBN(0x8b63bf12, 0x356a4613), TOBN(0x1221aef6, 0x70e08128), TOBN(0xe62d8c51, 0x4acb6b16)}, {TOBN(0x71f64a67, 0x379e7896), TOBN(0xb25237a2, 0xcafd7fa5), TOBN(0xf077bd98, 0x3841ba6a), TOBN(0xc4ac0244, 0x3cd16e7e)}}, {{TOBN(0x548ba869, 0x21fea4ca), TOBN(0xd36d0817, 0xf3dfdac1), TOBN(0x09d8d71f, 0xf4685faf), TOBN(0x8eff66be, 0xc52c459a)}, {TOBN(0x182faee7, 0x0b57235e), TOBN(0xee3c39b1, 0x0106712b), TOBN(0x5107331f, 0xc0fcdcb0), TOBN(0x669fb9dc, 0xa51054ba)}}, {{TOBN(0xb25101fb, 0x319d7682), TOBN(0xb0293129, 0x0a982fee), TOBN(0x51c1c9b9, 0x0261b344), TOBN(0x0e008c5b, 0xbfd371fa)}, {TOBN(0xd866dd1c, 0x0278ca33), TOBN(0x666f76a6, 0xe5aa53b1), TOBN(0xe5cfb779, 0x6013a2cf), TOBN(0x1d3a1aad, 0xa3521836)}}, {{TOBN(0xcedd2531, 0x73faa485), TOBN(0xc8ee6c4f, 0xc0a76878), TOBN(0xddbccfc9, 0x2a11667d), TOBN(0x1a418ea9, 0x1c2f695a)}, {TOBN(0xdb11bd92, 0x51f73971), TOBN(0x3e4b3c82, 0xda2ed89f), TOBN(0x9a44f3f4, 0xe73e0319), TOBN(0xd1e3de0f, 0x303431af)}}, {{TOBN(0x3c5604ff, 0x50f75f9c), TOBN(0x1d8eddf3, 0x7e752b22), TOBN(0x0ef074dd, 0x3c9a1118), TOBN(0xd0ffc172, 0xccb86d7b)}, {TOBN(0xabd1ece3, 0x037d90f2), TOBN(0xe3f307d6, 0x6055856c), TOBN(0x422f9328, 0x7e4c6daf), TOBN(0x902aac66, 0x334879a0)}}, {{TOBN(0xb6a1e7bf, 0x94cdfade), TOBN(0x6c97e1ed, 0x7fc6d634), TOBN(0x662ad24d, 0xa2fb63f8), TOBN(0xf81be1b9, 0xa5928405)}, {TOBN(0x86d765e4, 0xd14b4206), TOBN(0xbecc2e0e, 0x8fa0db65), TOBN(0xa28838e0, 0xb17fc76c), TOBN(0xe49a602a, 0xe37cf24e)}}, {{TOBN(0x76b4131a, 0x567193ec), TOBN(0xaf3c305a, 0xe5f6e70b), TOBN(0x9587bd39, 0x031eebdd), TOBN(0x5709def8, 0x71bbe831)}, {TOBN(0x57059983, 0x0eb2b669), TOBN(0x4d80ce1b, 0x875b7029), TOBN(0x838a7da8, 0x0364ac16), TOBN(0x2f431d23, 0xbe1c83ab)}}, {{TOBN(0xe56812a6, 0xf9294dd3), TOBN(0xb448d01f, 0x9b4b0d77), TOBN(0xf3ae6061, 0x04e8305c), TOBN(0x2bead645, 0x94d8c63e)}, {TOBN(0x0a85434d, 0x84fd8b07), TOBN(0x537b983f, 0xf7a9dee5), TOBN(0xedcc5f18, 0xef55bd85), TOBN(0x2041af62, 0x21c6cf8b)}}, {{TOBN(0x8e52874c, 0xb940c71e), TOBN(0x211935a9, 0xdb5f4b3a), TOBN(0x94350492, 0x301b1dc3), TOBN(0x33d2646d, 0x29958620)}, {TOBN(0x16b0d64b, 0xef911404), TOBN(0x9d1f25ea, 0x9a3c5ef4), TOBN(0x20f200eb, 0x4a352c78), TOBN(0x43929f2c, 0x4bd0b428)}}, {{TOBN(0xa5656667, 0xc7196e29), TOBN(0x7992c2f0, 0x9391be48), TOBN(0xaaa97cbd, 0x9ee0cd6e), TOBN(0x51b0310c, 0x3dc8c9bf)}, {TOBN(0x237f8acf, 0xdd9f22cb), TOBN(0xbb1d81a1, 0xb585d584), TOBN(0x8d5d85f5, 0x8c416388), TOBN(0x0d6e5a5a, 0x42fe474f)}}, {{TOBN(0xe7812766, 0x38235d4e), TOBN(0x1c62bd67, 0x496e3298), TOBN(0x8378660c, 0x3f175bc8), TOBN(0x4d04e189, 0x17afdd4d)}, {TOBN(0x32a81601, 0x85a8068c), TOBN(0xdb58e4e1, 0x92b29a85), TOBN(0xe8a65b86, 0xc70d8a3b), TOBN(0x5f0e6f4e, 0x98a0403b)}}, {{TOBN(0x08129684, 0x69ed2370), TOBN(0x34dc30bd, 0x0871ee26), TOBN(0x3a5ce948, 0x7c9c5b05), TOBN(0x7d487b80, 0x43a90c87)}, {TOBN(0x4089ba37, 0xdd0e7179), TOBN(0x45f80191, 0xb4041811), TOBN(0x1c3e1058, 0x98747ba5), TOBN(0x98c4e13a, 0x6e1ae592)}}, {{TOBN(0xd44636e6, 0xe82c9f9e), TOBN(0x711db87c, 0xc33a1043), TOBN(0x6f431263, 0xaa8aec05), TOBN(0x43ff120d, 0x2744a4aa)}, {TOBN(0xd3bd892f, 0xae77779b), TOBN(0xf0fe0cc9, 0x8cdc9f82), TOBN(0xca5f7fe6, 0xf1c5b1bc), TOBN(0xcc63a682, 0x44929a72)}}, {{TOBN(0xc7eaba0c, 0x09dbe19a), TOBN(0x2f3585ad, 0x6b5c73c2), TOBN(0x8ab8924b, 0x0ae50c30), TOBN(0x17fcd27a, 0x638b30ba)}, {TOBN(0xaf414d34, 0x10b3d5a5), TOBN(0x09c107d2, 0x2a9accf1), TOBN(0x15dac49f, 0x946a6242), TOBN(0xaec3df2a, 0xd707d642)}}, {{TOBN(0x2c2492b7, 0x3f894ae0), TOBN(0xf59df3e5, 0xb75f18ce), TOBN(0x7cb740d2, 0x8f53cad0), TOBN(0x3eb585fb, 0xc4f01294)}, {TOBN(0x17da0c86, 0x32c7f717), TOBN(0xeb8c795b, 0xaf943f4c), TOBN(0x4ee23fb5, 0xf67c51d2), TOBN(0xef187575, 0x68889949)}}, {{TOBN(0xa6b4bdb2, 0x0389168b), TOBN(0xc4ecd258, 0xea577d03), TOBN(0x3a63782b, 0x55743082), TOBN(0x6f678f4c, 0xc72f08cd)}, {TOBN(0x553511cf, 0x65e58dd8), TOBN(0xd53b4e3e, 0xd402c0cd), TOBN(0x37de3e29, 0xa037c14c), TOBN(0x86b6c516, 0xc05712aa)}}, {{TOBN(0x2834da3e, 0xb38dff6f), TOBN(0xbe012c52, 0xea636be8), TOBN(0x292d238c, 0x61dd37f8), TOBN(0x0e54523f, 0x8f8142db)}, {TOBN(0xe31eb436, 0x036a05d8), TOBN(0x83e3cdff, 0x1e93c0ff), TOBN(0x3fd2fe0f, 0x50821ddf), TOBN(0xc8e19b0d, 0xff9eb33b)}}, {{TOBN(0xc8cc943f, 0xb569a5fe), TOBN(0xad0090d4, 0xd4342d75), TOBN(0x82090b4b, 0xcaeca000), TOBN(0xca39687f, 0x1bd410eb)}, {TOBN(0xe7bb0df7, 0x65959d77), TOBN(0x39d78218, 0x9c964999), TOBN(0xd87f62e8, 0xb2415451), TOBN(0xe5efb774, 0xbed76108)}}, {{TOBN(0x3ea011a4, 0xe822f0d0), TOBN(0xbc647ad1, 0x5a8704f8), TOBN(0xbb315b35, 0x50c6820f), TOBN(0x863dec3d, 0xb7e76bec)}, {TOBN(0x01ff5d3a, 0xf017bfc7), TOBN(0x20054439, 0x976b8229), TOBN(0x067fca37, 0x0bbd0d3b), TOBN(0xf63dde64, 0x7f5e3d0f)}}, {{TOBN(0x22dbefb3, 0x2a4c94e9), TOBN(0xafbff0fe, 0x96f8278a), TOBN(0x80aea0b1, 0x3503793d), TOBN(0xb2238029, 0x5f06cd29)}, {TOBN(0x65703e57, 0x8ec3feca), TOBN(0x06c38314, 0x393e7053), TOBN(0xa0b751eb, 0x7c6734c4), TOBN(0xd2e8a435, 0xc59f0f1e)}}, {{TOBN(0x147d9052, 0x5e9ca895), TOBN(0x2f4dd31e, 0x972072df), TOBN(0xa16fda8e, 0xe6c6755c), TOBN(0xc66826ff, 0xcf196558)}, {TOBN(0x1f1a76a3, 0x0cf43895), TOBN(0xa9d604e0, 0x83c3097b), TOBN(0xe1908309, 0x66390e0e), TOBN(0xa50bf753, 0xb3c85eff)}}, {{TOBN(0x0696bdde, 0xf6a70251), TOBN(0x548b801b, 0x3c6ab16a), TOBN(0x37fcf704, 0xa4d08762), TOBN(0x090b3def, 0xdff76c4e)}, {TOBN(0x87e8cb89, 0x69cb9158), TOBN(0x44a90744, 0x995ece43), TOBN(0xf85395f4, 0x0ad9fbf5), TOBN(0x49b0f6c5, 0x4fb0c82d)}}, {{TOBN(0x75d9bc15, 0xadf7cccf), TOBN(0x81a3e5d6, 0xdfa1e1b0), TOBN(0x8c39e444, 0x249bc17e), TOBN(0xf37dccb2, 0x8ea7fd43)}, {TOBN(0xda654873, 0x907fba12), TOBN(0x35daa6da, 0x4a372904), TOBN(0x0564cfc6, 0x6283a6c5), TOBN(0xd09fa4f6, 0x4a9395bf)}}, {{TOBN(0x688e9ec9, 0xaeb19a36), TOBN(0xd913f1ce, 0xc7bfbfb4), TOBN(0x797b9a3c, 0x61c2faa6), TOBN(0x2f979bec, 0x6a0a9c12)}, {TOBN(0xb5969d0f, 0x359679ec), TOBN(0xebcf523d, 0x079b0460), TOBN(0xfd6b0008, 0x10fab870), TOBN(0x3f2edcda, 0x9373a39c)}}, {{TOBN(0x0d64f9a7, 0x6f568431), TOBN(0xf848c27c, 0x02f8898c), TOBN(0xf418ade1, 0x260b5bd5), TOBN(0xc1f3e323, 0x6973dee8)}, {TOBN(0x46e9319c, 0x26c185dd), TOBN(0x6d85b7d8, 0x546f0ac4), TOBN(0x427965f2, 0x247f9d57), TOBN(0xb519b636, 0xb0035f48)}}, {{TOBN(0x6b6163a9, 0xab87d59c), TOBN(0xff9f58c3, 0x39caaa11), TOBN(0x4ac39cde, 0x3177387b), TOBN(0x5f6557c2, 0x873e77f9)}, {TOBN(0x67504006, 0x36a83041), TOBN(0x9b1c96ca, 0x75ef196c), TOBN(0xf34283de, 0xb08c7940), TOBN(0x7ea09644, 0x1128c316)}}, {{TOBN(0xb510b3b5, 0x6aa39dff), TOBN(0x59b43da2, 0x9f8e4d8c), TOBN(0xa8ce31fd, 0x9e4c4b9f), TOBN(0x0e20be26, 0xc1303c01)}, {TOBN(0x18187182, 0xe8ee47c9), TOBN(0xd9687cdb, 0x7db98101), TOBN(0x7a520e4d, 0xa1e14ff6), TOBN(0x429808ba, 0x8836d572)}}, {{TOBN(0xa37ca60d, 0x4944b663), TOBN(0xf901f7a9, 0xa3f91ae5), TOBN(0xe4e3e76e, 0x9e36e3b1), TOBN(0x9aa219cf, 0x29d93250)}, {TOBN(0x347fe275, 0x056a2512), TOBN(0xa4d643d9, 0xde65d95c), TOBN(0x9669d396, 0x699fc3ed), TOBN(0xb598dee2, 0xcf8c6bbe)}}, {{TOBN(0x682ac1e5, 0xdda9e5c6), TOBN(0x4e0d3c72, 0xcaa9fc95), TOBN(0x17faaade, 0x772bea44), TOBN(0x5ef8428c, 0xab0009c8)}, {TOBN(0xcc4ce47a, 0x460ff016), TOBN(0xda6d12bf, 0x725281cb), TOBN(0x44c67848, 0x0223aad2), TOBN(0x6e342afa, 0x36256e28)}}, {{TOBN(0x1400bb0b, 0x93a37c04), TOBN(0x62b1bc9b, 0xdd10bd96), TOBN(0x7251adeb, 0x0dac46b7), TOBN(0x7d33b92e, 0x7be4ef51)}, {TOBN(0x28b2a94b, 0xe61fa29a), TOBN(0x4b2be13f, 0x06422233), TOBN(0x36d6d062, 0x330d8d37), TOBN(0x5ef80e1e, 0xb28ca005)}}, {{TOBN(0x174d4699, 0x6d16768e), TOBN(0x9fc4ff6a, 0x628bf217), TOBN(0x77705a94, 0x154e490d), TOBN(0x9d96dd28, 0x8d2d997a)}, {TOBN(0x77e2d9d8, 0xce5d72c4), TOBN(0x9d06c5a4, 0xc11c714f), TOBN(0x02aa5136, 0x79e4a03e), TOBN(0x1386b3c2, 0x030ff28b)}}, {{TOBN(0xfe82e8a6, 0xfb283f61), TOBN(0x7df203e5, 0xf3abc3fb), TOBN(0xeec7c351, 0x3a4d3622), TOBN(0xf7d17dbf, 0xdf762761)}, {TOBN(0xc3956e44, 0x522055f0), TOBN(0xde3012db, 0x8fa748db), TOBN(0xca9fcb63, 0xbf1dcc14), TOBN(0xa56d9dcf, 0xbe4e2f3a)}}, {{TOBN(0xb86186b6, 0x8bcec9c2), TOBN(0x7cf24df9, 0x680b9f06), TOBN(0xc46b45ea, 0xc0d29281), TOBN(0xfff42bc5, 0x07b10e12)}, {TOBN(0x12263c40, 0x4d289427), TOBN(0x3d5f1899, 0xb4848ec4), TOBN(0x11f97010, 0xd040800c), TOBN(0xb4c5f529, 0x300feb20)}}, {{TOBN(0xcc543f8f, 0xde94fdcb), TOBN(0xe96af739, 0xc7c2f05e), TOBN(0xaa5e0036, 0x882692e1), TOBN(0x09c75b68, 0x950d4ae9)}, {TOBN(0x62f63df2, 0xb5932a7a), TOBN(0x2658252e, 0xde0979ad), TOBN(0x2a19343f, 0xb5e69631), TOBN(0x718c7501, 0x525b666b)}}, {{TOBN(0x26a42d69, 0xea40dc3a), TOBN(0xdc84ad22, 0xaecc018f), TOBN(0x25c36c7b, 0x3270f04a), TOBN(0x46ba6d47, 0x50fa72ed)}, {TOBN(0x6c37d1c5, 0x93e58a8e), TOBN(0xa2394731, 0x120c088c), TOBN(0xc3be4263, 0xcb6e86da), TOBN(0x2c417d36, 0x7126d038)}}, {{TOBN(0x5b70f9c5, 0x8b6f8efa), TOBN(0x671a2faa, 0x37718536), TOBN(0xd3ced3c6, 0xb539c92b), TOBN(0xe56f1bd9, 0xa31203c2)}, {TOBN(0x8b096ec4, 0x9ff3c8eb), TOBN(0x2deae432, 0x43491cea), TOBN(0x2465c6eb, 0x17943794), TOBN(0x5d267e66, 0x20586843)}}, {{TOBN(0x9d3d116d, 0xb07159d0), TOBN(0xae07a67f, 0xc1896210), TOBN(0x8fc84d87, 0xbb961579), TOBN(0x30009e49, 0x1c1f8dd6)}, {TOBN(0x8a8caf22, 0xe3132819), TOBN(0xcffa197c, 0xf23ab4ff), TOBN(0x58103a44, 0x205dd687), TOBN(0x57b796c3, 0x0ded67a2)}}, {{TOBN(0x0b9c3a6c, 0xa1779ad7), TOBN(0xa33cfe2e, 0x357c09c5), TOBN(0x2ea29315, 0x3db4a57e), TOBN(0x91959695, 0x8ebeb52e)}, {TOBN(0x118db9a6, 0xe546c879), TOBN(0x8e996df4, 0x6295c8d6), TOBN(0xdd990484, 0x55ec806b), TOBN(0x24f291ca, 0x165c1035)}}, {{TOBN(0xcca523bb, 0x440e2229), TOBN(0x324673a2, 0x73ef4d04), TOBN(0xaf3adf34, 0x3e11ec39), TOBN(0x6136d7f1, 0xdc5968d3)}, {TOBN(0x7a7b2899, 0xb053a927), TOBN(0x3eaa2661, 0xae067ecd), TOBN(0x8549b9c8, 0x02779cd9), TOBN(0x061d7940, 0xc53385ea)}}, {{TOBN(0x3e0ba883, 0xf06d18bd), TOBN(0x4ba6de53, 0xb2700843), TOBN(0xb966b668, 0x591a9e4d), TOBN(0x93f67567, 0x7f4fa0ed)}, {TOBN(0x5a02711b, 0x4347237b), TOBN(0xbc041e2f, 0xe794608e), TOBN(0x55af10f5, 0x70f73d8c), TOBN(0xd2d4d4f7, 0xbb7564f7)}}, {{TOBN(0xd7d27a89, 0xb3e93ce7), TOBN(0xf7b5a875, 0x5d3a2c1b), TOBN(0xb29e68a0, 0x255b218a), TOBN(0xb533837e, 0x8af76754)}, {TOBN(0xd1b05a73, 0x579fab2e), TOBN(0xb41055a1, 0xecd74385), TOBN(0xb2369274, 0x445e9115), TOBN(0x2972a7c4, 0xf520274e)}}, {{TOBN(0x6c08334e, 0xf678e68a), TOBN(0x4e4160f0, 0x99b057ed), TOBN(0x3cfe11b8, 0x52ccb69a), TOBN(0x2fd1823a, 0x21c8f772)}, {TOBN(0xdf7f072f, 0x3298f055), TOBN(0x8c0566f9, 0xfec74a6e), TOBN(0xe549e019, 0x5bb4d041), TOBN(0x7c3930ba, 0x9208d850)}}, {{TOBN(0xe07141fc, 0xaaa2902b), TOBN(0x539ad799, 0xe4f69ad3), TOBN(0xa6453f94, 0x813f9ffd), TOBN(0xc58d3c48, 0x375bc2f7)}, {TOBN(0xb3326fad, 0x5dc64e96), TOBN(0x3aafcaa9, 0xb240e354), TOBN(0x1d1b0903, 0xaca1e7a9), TOBN(0x4ceb9767, 0x1211b8a0)}}, {{TOBN(0xeca83e49, 0xe32a858e), TOBN(0x4c32892e, 0xae907bad), TOBN(0xd5b42ab6, 0x2eb9b494), TOBN(0x7fde3ee2, 0x1eabae1b)}, {TOBN(0x13b5ab09, 0xcaf54957), TOBN(0xbfb028be, 0xe5f5d5d5), TOBN(0x928a0650, 0x2003e2c0), TOBN(0x90793aac, 0x67476843)}}, {{TOBN(0x5e942e79, 0xc81710a0), TOBN(0x557e4a36, 0x27ccadd4), TOBN(0x72a2bc56, 0x4bcf6d0c), TOBN(0x09ee5f43, 0x26d7b80c)}, {TOBN(0x6b70dbe9, 0xd4292f19), TOBN(0x56f74c26, 0x63f16b18), TOBN(0xc23db0f7, 0x35fbb42a), TOBN(0xb606bdf6, 0x6ae10040)}}, {{TOBN(0x1eb15d4d, 0x044573ac), TOBN(0x7dc3cf86, 0x556b0ba4), TOBN(0x97af9a33, 0xc60df6f7), TOBN(0x0b1ef85c, 0xa716ce8c)}, {TOBN(0x2922f884, 0xc96958be), TOBN(0x7c32fa94, 0x35690963), TOBN(0x2d7f667c, 0xeaa00061), TOBN(0xeaaf7c17, 0x3547365c)}}, {{TOBN(0x1eb4de46, 0x87032d58), TOBN(0xc54f3d83, 0x5e2c79e0), TOBN(0x07818df4, 0x5d04ef23), TOBN(0x55faa9c8, 0x673d41b4)}, {TOBN(0xced64f6f, 0x89b95355), TOBN(0x4860d2ea, 0xb7415c84), TOBN(0x5fdb9bd2, 0x050ebad3), TOBN(0xdb53e0cc, 0x6685a5bf)}}, {{TOBN(0xb830c031, 0x9feb6593), TOBN(0xdd87f310, 0x6accff17), TOBN(0x2303ebab, 0x9f555c10), TOBN(0x94603695, 0x287e7065)}, {TOBN(0xf88311c3, 0x2e83358c), TOBN(0x508dd9b4, 0xeefb0178), TOBN(0x7ca23706, 0x2dba8652), TOBN(0x62aac5a3, 0x0047abe5)}}, {{TOBN(0x9a61d2a0, 0x8b1ea7b3), TOBN(0xd495ab63, 0xae8b1485), TOBN(0x38740f84, 0x87052f99), TOBN(0x178ebe5b, 0xb2974eea)}, {TOBN(0x030bbcca, 0x5b36d17f), TOBN(0xb5e4cce3, 0xaaf86eea), TOBN(0xb51a0220, 0x68f8e9e0), TOBN(0xa4348796, 0x09eb3e75)}}, {{TOBN(0xbe592309, 0xeef1a752), TOBN(0x5d7162d7, 0x6f2aa1ed), TOBN(0xaebfb5ed, 0x0f007dd2), TOBN(0x255e14b2, 0xc89edd22)}, {TOBN(0xba85e072, 0x0303b697), TOBN(0xc5d17e25, 0xf05720ff), TOBN(0x02b58d6e, 0x5128ebb6), TOBN(0x2c80242d, 0xd754e113)}}, {{TOBN(0x919fca5f, 0xabfae1ca), TOBN(0x937afaac, 0x1a21459b), TOBN(0x9e0ca91c, 0x1f66a4d2), TOBN(0x194cc7f3, 0x23ec1331)}, {TOBN(0xad25143a, 0x8aa11690), TOBN(0xbe40ad8d, 0x09b59e08), TOBN(0x37d60d9b, 0xe750860a), TOBN(0x6c53b008, 0xc6bf434c)}}, {{TOBN(0xb572415d, 0x1356eb80), TOBN(0xb8bf9da3, 0x9578ded8), TOBN(0x22658e36, 0x5e8fb38b), TOBN(0x9b70ce22, 0x5af8cb22)}, {TOBN(0x7c00018a, 0x829a8180), TOBN(0x84329f93, 0xb81ed295), TOBN(0x7c343ea2, 0x5f3cea83), TOBN(0x38f8655f, 0x67586536)}}, {{TOBN(0xa661a0d0, 0x1d3ec517), TOBN(0x98744652, 0x512321ae), TOBN(0x084ca591, 0xeca92598), TOBN(0xa9bb9dc9, 0x1dcb3feb)}, {TOBN(0x14c54355, 0x78b4c240), TOBN(0x5ed62a3b, 0x610cafdc), TOBN(0x07512f37, 0x1b38846b), TOBN(0x571bb70a, 0xb0e38161)}}, {{TOBN(0xb556b95b, 0x2da705d2), TOBN(0x3ef8ada6, 0xb1a08f98), TOBN(0x85302ca7, 0xddecfbe5), TOBN(0x0e530573, 0x943105cd)}, {TOBN(0x60554d55, 0x21a9255d), TOBN(0x63a32fa1, 0xf2f3802a), TOBN(0x35c8c5b0, 0xcd477875), TOBN(0x97f458ea, 0x6ad42da1)}}, {{TOBN(0x832d7080, 0xeb6b242d), TOBN(0xd30bd023, 0x3b71e246), TOBN(0x7027991b, 0xbe31139d), TOBN(0x68797e91, 0x462e4e53)}, {TOBN(0x423fe20a, 0x6b4e185a), TOBN(0x82f2c67e, 0x42d9b707), TOBN(0x25c81768, 0x4cf7811b), TOBN(0xbd53005e, 0x045bb95d)}}}, {{{TOBN(0xe5f649be, 0x9d8e68fd), TOBN(0xdb0f0533, 0x1b044320), TOBN(0xf6fde9b3, 0xe0c33398), TOBN(0x92f4209b, 0x66c8cfae)}, {TOBN(0xe9d1afcc, 0x1a739d4b), TOBN(0x09aea75f, 0xa28ab8de), TOBN(0x14375fb5, 0xeac6f1d0), TOBN(0x6420b560, 0x708f7aa5)}}, {{TOBN(0x9eae499c, 0x6254dc41), TOBN(0x7e293924, 0x7a837e7e), TOBN(0x74aec08c, 0x090524a7), TOBN(0xf82b9219, 0x8d6f55f2)}, {TOBN(0x493c962e, 0x1402cec5), TOBN(0x9f17ca17, 0xfa2f30e7), TOBN(0xbcd783e8, 0xe9b879cb), TOBN(0xea3d8c14, 0x5a6f145f)}}, {{TOBN(0xdede15e7, 0x5e0dee6e), TOBN(0x74f24872, 0xdc628aa2), TOBN(0xd3e9c4fe, 0x7861bb93), TOBN(0x56d4822a, 0x6187b2e0)}, {TOBN(0xb66417cf, 0xc59826f9), TOBN(0xca260969, 0x2408169e), TOBN(0xedf69d06, 0xc79ef885), TOBN(0x00031f8a, 0xdc7d138f)}}, {{TOBN(0x103c46e6, 0x0ebcf726), TOBN(0x4482b831, 0x6231470e), TOBN(0x6f6dfaca, 0x487c2109), TOBN(0x2e0ace97, 0x62e666ef)}, {TOBN(0x3246a9d3, 0x1f8d1f42), TOBN(0x1b1e83f1, 0x574944d2), TOBN(0x13dfa63a, 0xa57f334b), TOBN(0x0cf8daed, 0x9f025d81)}}, {{TOBN(0x30d78ea8, 0x00ee11c1), TOBN(0xeb053cd4, 0xb5e3dd75), TOBN(0x9b65b13e, 0xd58c43c5), TOBN(0xc3ad49bd, 0xbd151663)}, {TOBN(0x99fd8e41, 0xb6427990), TOBN(0x12cf15bd, 0x707eae1e), TOBN(0x29ad4f1b, 0x1aabb71e), TOBN(0x5143e74d, 0x07545d0e)}}, {{TOBN(0x30266336, 0xc88bdee1), TOBN(0x25f29306, 0x5876767c), TOBN(0x9c078571, 0xc6731996), TOBN(0xc88690b2, 0xed552951)}, {TOBN(0x274f2c2d, 0x852705b4), TOBN(0xb0bf8d44, 0x4e09552d), TOBN(0x7628beeb, 0x986575d1), TOBN(0x407be238, 0x7f864651)}}, {{TOBN(0x0e5e3049, 0xa639fc6b), TOBN(0xe75c35d9, 0x86003625), TOBN(0x0cf35bd8, 0x5dcc1646), TOBN(0x8bcaced2, 0x6c26273a)}, {TOBN(0xe22ecf1d, 0xb5536742), TOBN(0x013dd897, 0x1a9e068b), TOBN(0x17f411cb, 0x8a7909c5), TOBN(0x5757ac98, 0x861dd506)}}, {{TOBN(0x85de1f0d, 0x1e935abb), TOBN(0xdefd10b4, 0x154de37a), TOBN(0xb8d9e392, 0x369cebb5), TOBN(0x54d5ef9b, 0x761324be)}, {TOBN(0x4d6341ba, 0x74f17e26), TOBN(0xc0a0e3c8, 0x78c1dde4), TOBN(0xa6d77581, 0x87d918fd), TOBN(0x66876015, 0x02ca3a13)}}, {{TOBN(0xc7313e9c, 0xf36658f0), TOBN(0xc433ef1c, 0x71f8057e), TOBN(0x85326246, 0x1b6a835a), TOBN(0xc8f05398, 0x7c86394c)}, {TOBN(0xff398cdf, 0xe983c4a1), TOBN(0xbf5e8162, 0x03b7b931), TOBN(0x93193c46, 0xb7b9045b), TOBN(0x1e4ebf5d, 0xa4a6e46b)}}, {{TOBN(0xf9942a60, 0x43a24fe7), TOBN(0x29c1191e, 0xffb3492b), TOBN(0x9f662449, 0x902fde05), TOBN(0xc792a7ac, 0x6713c32d)}, {TOBN(0x2fd88ad8, 0xb737982c), TOBN(0x7e3a0319, 0xa21e60e3), TOBN(0x09b0de44, 0x7383591a), TOBN(0x6df141ee, 0x8310a456)}}, {{TOBN(0xaec1a039, 0xe6d6f471), TOBN(0x14b2ba0f, 0x1198d12e), TOBN(0xebc1a160, 0x3aeee5ac), TOBN(0x401f4836, 0xe0b964ce)}, {TOBN(0x2ee43796, 0x4fd03f66), TOBN(0x3fdb4e49, 0xdd8f3f12), TOBN(0x6ef267f6, 0x29380f18), TOBN(0x3e8e9670, 0x8da64d16)}}, {{TOBN(0xbc19180c, 0x207674f1), TOBN(0x112e09a7, 0x33ae8fdb), TOBN(0x99667554, 0x6aaeb71e), TOBN(0x79432af1, 0xe101b1c7)}, {TOBN(0xd5eb558f, 0xde2ddec6), TOBN(0x81392d1f, 0x5357753f), TOBN(0xa7a76b97, 0x3ae1158a), TOBN(0x416fbbff, 0x4a899991)}}, {{TOBN(0x9e65fdfd, 0x0d4a9dcf), TOBN(0x7bc29e48, 0x944ddf12), TOBN(0xbc1a92d9, 0x3c856866), TOBN(0x273c6905, 0x6e98dfe2)}, {TOBN(0x69fce418, 0xcdfaa6b8), TOBN(0x606bd823, 0x5061c69f), TOBN(0x42d495a0, 0x6af75e27), TOBN(0x8ed3d505, 0x6d873a1f)}}, {{TOBN(0xaf552841, 0x6ab25b6a), TOBN(0xc6c0ffc7, 0x2b1a4523), TOBN(0xab18827b, 0x21c99e03), TOBN(0x060e8648, 0x9034691b)}, {TOBN(0x5207f90f, 0x93c7f398), TOBN(0x9f4a96cb, 0x82f8d10b), TOBN(0xdd71cd79, 0x3ad0f9e3), TOBN(0x84f435d2, 0xfc3a54f5)}}, {{TOBN(0x4b03c55b, 0x8e33787f), TOBN(0xef42f975, 0xa6384673), TOBN(0xff7304f7, 0x5051b9f0), TOBN(0x18aca1dc, 0x741c87c2)}, {TOBN(0x56f120a7, 0x2d4bfe80), TOBN(0xfd823b3d, 0x053e732c), TOBN(0x11bccfe4, 0x7537ca16), TOBN(0xdf6c9c74, 0x1b5a996b)}}, {{TOBN(0xee7332c7, 0x904fc3fa), TOBN(0x14a23f45, 0xc7e3636a), TOBN(0xc38659c3, 0xf091d9aa), TOBN(0x4a995e5d, 0xb12d8540)}, {TOBN(0x20a53bec, 0xf3a5598a), TOBN(0x56534b17, 0xb1eaa995), TOBN(0x9ed3dca4, 0xbf04e03c), TOBN(0x716c563a, 0xd8d56268)}}, {{TOBN(0x27ba77a4, 0x1d6178e7), TOBN(0xe4c80c40, 0x68a1ff8e), TOBN(0x75011099, 0x0a13f63d), TOBN(0x7bf33521, 0xa61d46f3)}, {TOBN(0x0aff218e, 0x10b365bb), TOBN(0x81021804, 0x0fd7ea75), TOBN(0x05a3fd8a, 0xa4b3a925), TOBN(0xb829e75f, 0x9b3db4e6)}}, {{TOBN(0x6bdc75a5, 0x4d53e5fb), TOBN(0x04a5dc02, 0xd52717e3), TOBN(0x86af502f, 0xe9a42ec2), TOBN(0x8867e8fb, 0x2630e382)}, {TOBN(0xbf845c6e, 0xbec9889b), TOBN(0x54f491f2, 0xcb47c98d), TOBN(0xa3091fba, 0x790c2a12), TOBN(0xd7f6fd78, 0xc20f708b)}}, {{TOBN(0xa569ac30, 0xacde5e17), TOBN(0xd0f996d0, 0x6852b4d7), TOBN(0xe51d4bb5, 0x4609ae54), TOBN(0x3fa37d17, 0x0daed061)}, {TOBN(0x62a88684, 0x34b8fb41), TOBN(0x99a2acbd, 0x9efb64f1), TOBN(0xb75c1a5e, 0x6448e1f2), TOBN(0xfa99951a, 0x42b5a069)}}, {{TOBN(0x6d956e89, 0x2f3b26e7), TOBN(0xf4709860, 0xda875247), TOBN(0x3ad15179, 0x2482dda3), TOBN(0xd64110e3, 0x017d82f0)}, {TOBN(0x14928d2c, 0xfad414e4), TOBN(0x2b155f58, 0x2ed02b24), TOBN(0x481a141b, 0xcb821bf1), TOBN(0x12e3c770, 0x4f81f5da)}}, {{TOBN(0xe49c5de5, 0x9fff8381), TOBN(0x11053232, 0x5bbec894), TOBN(0xa0d051cc, 0x454d88c4), TOBN(0x4f6db89c, 0x1f8e531b)}, {TOBN(0x34fe3fd6, 0xca563a44), TOBN(0x7f5c2215, 0x58da8ab9), TOBN(0x8445016d, 0x9474f0a1), TOBN(0x17d34d61, 0xcb7d8a0a)}}, {{TOBN(0x8e9d3910, 0x1c474019), TOBN(0xcaff2629, 0xd52ceefb), TOBN(0xf9cf3e32, 0xc1622c2b), TOBN(0xd4b95e3c, 0xe9071a05)}, {TOBN(0xfbbca61f, 0x1594438c), TOBN(0x1eb6e6a6, 0x04aadedf), TOBN(0x853027f4, 0x68e14940), TOBN(0x221d322a, 0xdfabda9c)}}, {{TOBN(0xed8ea9f6, 0xb7cb179a), TOBN(0xdc7b764d, 0xb7934dcc), TOBN(0xfcb13940, 0x5e09180d), TOBN(0x6629a6bf, 0xb47dc2dd)}, {TOBN(0xbfc55e4e, 0x9f5a915e), TOBN(0xb1db9d37, 0x6204441e), TOBN(0xf82d68cf, 0x930c5f53), TOBN(0x17d3a142, 0xcbb605b1)}}, {{TOBN(0xdd5944ea, 0x308780f2), TOBN(0xdc8de761, 0x3845f5e4), TOBN(0x6beaba7d, 0x7624d7a3), TOBN(0x1e709afd, 0x304df11e)}, {TOBN(0x95364376, 0x02170456), TOBN(0xbf204b3a, 0xc8f94b64), TOBN(0x4e53af7c, 0x5680ca68), TOBN(0x0526074a, 0xe0c67574)}}, {{TOBN(0x95d8cef8, 0xecd92af6), TOBN(0xe6b9fa7a, 0x6cd1745a), TOBN(0x3d546d3d, 0xa325c3e4), TOBN(0x1f57691d, 0x9ae93aae)}, {TOBN(0xe891f3fe, 0x9d2e1a33), TOBN(0xd430093f, 0xac063d35), TOBN(0xeda59b12, 0x5513a327), TOBN(0xdc2134f3, 0x5536f18f)}}, {{TOBN(0xaa51fe2c, 0x5c210286), TOBN(0x3f68aaee, 0x1cab658c), TOBN(0x5a23a00b, 0xf9357292), TOBN(0x9a626f39, 0x7efdabed)}, {TOBN(0xfe2b3bf3, 0x199d78e3), TOBN(0xb7a2af77, 0x71bbc345), TOBN(0x3d19827a, 0x1e59802c), TOBN(0x823bbc15, 0xb487a51c)}}, {{TOBN(0x856139f2, 0x99d0a422), TOBN(0x9ac3df65, 0xf456c6fb), TOBN(0xaddf65c6, 0x701f8bd6), TOBN(0x149f321e, 0x3758df87)}, {TOBN(0xb1ecf714, 0x721b7eba), TOBN(0xe17df098, 0x31a3312a), TOBN(0xdb2fd6ec, 0xd5c4d581), TOBN(0xfd02996f, 0x8fcea1b3)}}, {{TOBN(0xe29fa63e, 0x7882f14f), TOBN(0xc9f6dc35, 0x07c6cadc), TOBN(0x46f22d6f, 0xb882bed0), TOBN(0x1a45755b, 0xd118e52c)}, {TOBN(0x9f2c7c27, 0x7c4608cf), TOBN(0x7ccbdf32, 0x568012c2), TOBN(0xfcb0aedd, 0x61729b0e), TOBN(0x7ca2ca9e, 0xf7d75dbf)}}, {{TOBN(0xf58fecb1, 0x6f640f62), TOBN(0xe274b92b, 0x39f51946), TOBN(0x7f4dfc04, 0x6288af44), TOBN(0x0a91f32a, 0xeac329e5)}, {TOBN(0x43ad274b, 0xd6aaba31), TOBN(0x719a1640, 0x0f6884f9), TOBN(0x685d29f6, 0xdaf91e20), TOBN(0x5ec1cc33, 0x27e49d52)}}, {{TOBN(0x38f4de96, 0x3b54a059), TOBN(0x0e0015e5, 0xefbcfdb3), TOBN(0x177d23d9, 0x4dbb8da6), TOBN(0x98724aa2, 0x97a617ad)}, {TOBN(0x30f0885b, 0xfdb6558e), TOBN(0xf9f7a28a, 0xc7899a96), TOBN(0xd2ae8ac8, 0x872dc112), TOBN(0xfa0642ca, 0x73c3c459)}}, {{TOBN(0x15296981, 0xe7dfc8d6), TOBN(0x67cd4450, 0x1fb5b94a), TOBN(0x0ec71cf1, 0x0eddfd37), TOBN(0xc7e5eeb3, 0x9a8eddc7)}, {TOBN(0x02ac8e3d, 0x81d95028), TOBN(0x0088f172, 0x70b0e35d), TOBN(0xec041fab, 0xe1881fe3), TOBN(0x62cf71b8, 0xd99e7faa)}}, {{TOBN(0x5043dea7, 0xe0f222c2), TOBN(0x309d42ac, 0x72e65142), TOBN(0x94fe9ddd, 0x9216cd30), TOBN(0xd6539c7d, 0x0f87feec)}, {TOBN(0x03c5a57c, 0x432ac7d7), TOBN(0x72692cf0, 0x327fda10), TOBN(0xec28c85f, 0x280698de), TOBN(0x2331fb46, 0x7ec283b1)}}, {{TOBN(0xd34bfa32, 0x2867e633), TOBN(0x78709a82, 0x0a9cc815), TOBN(0xb7fe6964, 0x875e2fa5), TOBN(0x25cc064f, 0x9e98bfb5)}, {TOBN(0x9eb0151c, 0x493a65c5), TOBN(0x5fb5d941, 0x53182464), TOBN(0x69e6f130, 0xf04618e2), TOBN(0xa8ecec22, 0xf89c8ab6)}}, {{TOBN(0xcd6ac88b, 0xb96209bd), TOBN(0x65fa8cdb, 0xb3e1c9e0), TOBN(0xa47d22f5, 0x4a8d8eac), TOBN(0x83895cdf, 0x8d33f963)}, {TOBN(0xa8adca59, 0xb56cd3d1), TOBN(0x10c8350b, 0xdaf38232), TOBN(0x2b161fb3, 0xa5080a9f), TOBN(0xbe7f5c64, 0x3af65b3a)}}, {{TOBN(0x2c754039, 0x97403a11), TOBN(0x94626cf7, 0x121b96af), TOBN(0x431de7c4, 0x6a983ec2), TOBN(0x3780dd3a, 0x52cc3df7)}, {TOBN(0xe28a0e46, 0x2baf8e3b), TOBN(0xabe68aad, 0x51d299ae), TOBN(0x603eb8f9, 0x647a2408), TOBN(0x14c61ed6, 0x5c750981)}}, {{TOBN(0x88b34414, 0xc53352e7), TOBN(0x5a34889c, 0x1337d46e), TOBN(0x612c1560, 0xf95f2bc8), TOBN(0x8a3f8441, 0xd4807a3a)}, {TOBN(0x680d9e97, 0x5224da68), TOBN(0x60cd6e88, 0xc3eb00e9), TOBN(0x3875a98e, 0x9a6bc375), TOBN(0xdc80f924, 0x4fd554c2)}}, {{TOBN(0x6c4b3415, 0x6ac77407), TOBN(0xa1e5ea8f, 0x25420681), TOBN(0x541bfa14, 0x4607a458), TOBN(0x5dbc7e7a, 0x96d7fbf9)}, {TOBN(0x646a851b, 0x31590a47), TOBN(0x039e85ba, 0x15ee6df8), TOBN(0xd19fa231, 0xd7b43fc0), TOBN(0x84bc8be8, 0x299a0e04)}}, {{TOBN(0x2b9d2936, 0xf20df03a), TOBN(0x24054382, 0x8608d472), TOBN(0x76b6ba04, 0x9149202a), TOBN(0xb21c3831, 0x3670e7b7)}, {TOBN(0xddd93059, 0xd6fdee10), TOBN(0x9da47ad3, 0x78488e71), TOBN(0x99cc1dfd, 0xa0fcfb25), TOBN(0x42abde10, 0x64696954)}}, {{TOBN(0x14cc15fc, 0x17eab9fe), TOBN(0xd6e863e4, 0xd3e70972), TOBN(0x29a7765c, 0x6432112c), TOBN(0x88660001, 0x5b0774d8)}, {TOBN(0x3729175a, 0x2c088eae), TOBN(0x13afbcae, 0x8230b8d4), TOBN(0x44768151, 0x915f4379), TOBN(0xf086431a, 0xd8d22812)}}, {{TOBN(0x37461955, 0xc298b974), TOBN(0x905fb5f0, 0xf8711e04), TOBN(0x787abf3a, 0xfe969d18), TOBN(0x392167c2, 0x6f6a494e)}, {TOBN(0xfc7a0d2d, 0x28c511da), TOBN(0xf127c7dc, 0xb66a262d), TOBN(0xf9c4bb95, 0xfd63fdf0), TOBN(0x90016589, 0x3913ef46)}}, {{TOBN(0x74d2a73c, 0x11aa600d), TOBN(0x2f5379bd, 0x9fb5ab52), TOBN(0xe49e53a4, 0x7fb70068), TOBN(0x68dd39e5, 0x404aa9a7)}, {TOBN(0xb9b0cf57, 0x2ecaa9c3), TOBN(0xba0e103b, 0xe824826b), TOBN(0x60c2198b, 0x4631a3c4), TOBN(0xc5ff84ab, 0xfa8966a2)}}, {{TOBN(0x2d6ebe22, 0xac95aff8), TOBN(0x1c9bb6db, 0xb5a46d09), TOBN(0x419062da, 0x53ee4f8d), TOBN(0x7b9042d0, 0xbb97efef)}, {TOBN(0x0f87f080, 0x830cf6bd), TOBN(0x4861d19a, 0x6ec8a6c6), TOBN(0xd3a0daa1, 0x202f01aa), TOBN(0xb0111674, 0xf25afbd5)}}, {{TOBN(0x6d00d6cf, 0x1afb20d9), TOBN(0x13695000, 0x40671bc5), TOBN(0x913ab0dc, 0x2485ea9b), TOBN(0x1f2bed06, 0x9eef61ac)}, {TOBN(0x850c8217, 0x6d799e20), TOBN(0x93415f37, 0x3271c2de), TOBN(0x5afb06e9, 0x6c4f5910), TOBN(0x688a52df, 0xc4e9e421)}}, {{TOBN(0x30495ba3, 0xe2a9a6db), TOBN(0x4601303d, 0x58f9268b), TOBN(0xbe3b0dad, 0x7eb0f04f), TOBN(0x4ea47250, 0x4456936d)}, {TOBN(0x8caf8798, 0xd33fd3e7), TOBN(0x1ccd8a89, 0xeb433708), TOBN(0x9effe3e8, 0x87fd50ad), TOBN(0xbe240a56, 0x6b29c4df)}}, {{TOBN(0xec4ffd98, 0xca0e7ebd), TOBN(0xf586783a, 0xe748616e), TOBN(0xa5b00d8f, 0xc77baa99), TOBN(0x0acada29, 0xb4f34c9c)}, {TOBN(0x36dad67d, 0x0fe723ac), TOBN(0x1d8e53a5, 0x39c36c1e), TOBN(0xe4dd342d, 0x1f4bea41), TOBN(0x64fd5e35, 0xebc9e4e0)}}, {{TOBN(0x96f01f90, 0x57908805), TOBN(0xb5b9ea3d, 0x5ed480dd), TOBN(0x366c5dc2, 0x3efd2dd0), TOBN(0xed2fe305, 0x6e9dfa27)}, {TOBN(0x4575e892, 0x6e9197e2), TOBN(0x11719c09, 0xab502a5d), TOBN(0x264c7bec, 0xe81f213f), TOBN(0x741b9241, 0x55f5c457)}}, {{TOBN(0x78ac7b68, 0x49a5f4f4), TOBN(0xf91d70a2, 0x9fc45b7d), TOBN(0x39b05544, 0xb0f5f355), TOBN(0x11f06bce, 0xeef930d9)}, {TOBN(0xdb84d25d, 0x038d05e1), TOBN(0x04838ee5, 0xbacc1d51), TOBN(0x9da3ce86, 0x9e8ee00b), TOBN(0xc3412057, 0xc36eda1f)}}, {{TOBN(0xae80b913, 0x64d9c2f4), TOBN(0x7468bac3, 0xa010a8ff), TOBN(0xdfd20037, 0x37359d41), TOBN(0x1a0f5ab8, 0x15efeacc)}, {TOBN(0x7c25ad2f, 0x659d0ce0), TOBN(0x4011bcbb, 0x6785cff1), TOBN(0x128b9912, 0x7e2192c7), TOBN(0xa549d8e1, 0x13ccb0e8)}}, {{TOBN(0x805588d8, 0xc85438b1), TOBN(0x5680332d, 0xbc25cb27), TOBN(0xdcd1bc96, 0x1a4bfdf4), TOBN(0x779ff428, 0x706f6566)}, {TOBN(0x8bbee998, 0xf059987a), TOBN(0xf6ce8cf2, 0xcc686de7), TOBN(0xf8ad3c4a, 0x953cfdb2), TOBN(0xd1d426d9, 0x2205da36)}}, {{TOBN(0xb3c0f13f, 0xc781a241), TOBN(0x3e89360e, 0xd75362a8), TOBN(0xccd05863, 0xc8a91184), TOBN(0x9bd0c9b7, 0xefa8a7f4)}, {TOBN(0x97ee4d53, 0x8a912a4b), TOBN(0xde5e15f8, 0xbcf518fd), TOBN(0x6a055bf8, 0xc467e1e0), TOBN(0x10be4b4b, 0x1587e256)}}, {{TOBN(0xd90c14f2, 0x668621c9), TOBN(0xd5518f51, 0xab9c92c1), TOBN(0x8e6a0100, 0xd6d47b3c), TOBN(0xcbe980dd, 0x66716175)}, {TOBN(0x500d3f10, 0xddd83683), TOBN(0x3b6cb35d, 0x99cac73c), TOBN(0x53730c8b, 0x6083d550), TOBN(0xcf159767, 0xdf0a1987)}}, {{TOBN(0x84bfcf53, 0x43ad73b3), TOBN(0x1b528c20, 0x4f035a94), TOBN(0x4294edf7, 0x33eeac69), TOBN(0xb6283e83, 0x817f3240)}, {TOBN(0xc3fdc959, 0x0a5f25b1), TOBN(0xefaf8aa5, 0x5844ee22), TOBN(0xde269ba5, 0xdbdde4de), TOBN(0xe3347160, 0xc56133bf)}}, {{TOBN(0xc1184219, 0x8d9ea9f8), TOBN(0x090de5db, 0xf3fc1ab5), TOBN(0x404c37b1, 0x0bf22cda), TOBN(0x7de20ec8, 0xf5618894)}, {TOBN(0x754c588e, 0xecdaecab), TOBN(0x6ca4b0ed, 0x88342743), TOBN(0x76f08bdd, 0xf4a938ec), TOBN(0xd182de89, 0x91493ccb)}}, {{TOBN(0xd652c53e, 0xc8a4186a), TOBN(0xb3e878db, 0x946d8e33), TOBN(0x088453c0, 0x5f37663c), TOBN(0x5cd9daaa, 0xb407748b)}, {TOBN(0xa1f5197f, 0x586d5e72), TOBN(0x47500be8, 0xc443ca59), TOBN(0x78ef35b2, 0xe2652424), TOBN(0x09c5d26f, 0x6dd7767d)}}, {{TOBN(0x7175a79a, 0xa74d3f7b), TOBN(0x0428fd8d, 0xcf5ea459), TOBN(0x511cb97c, 0xa5d1746d), TOBN(0x36363939, 0xe71d1278)}, {TOBN(0xcf2df955, 0x10350bf4), TOBN(0xb3817439, 0x60aae782), TOBN(0xa748c0e4, 0x3e688809), TOBN(0x98021fbf, 0xd7a5a006)}}, {{TOBN(0x9076a70c, 0x0e367a98), TOBN(0xbea1bc15, 0x0f62b7c2), TOBN(0x2645a68c, 0x30fe0343), TOBN(0xacaffa78, 0x699dc14f)}, {TOBN(0xf4469964, 0x457bf9c4), TOBN(0x0db6407b, 0x0d2ead83), TOBN(0x68d56cad, 0xb2c6f3eb), TOBN(0x3b512e73, 0xf376356c)}}, {{TOBN(0xe43b0e1f, 0xfce10408), TOBN(0x89ddc003, 0x5a5e257d), TOBN(0xb0ae0d12, 0x0362e5b3), TOBN(0x07f983c7, 0xb0519161)}, {TOBN(0xc2e94d15, 0x5d5231e7), TOBN(0xcff22aed, 0x0b4f9513), TOBN(0xb02588dd, 0x6ad0b0b5), TOBN(0xb967d1ac, 0x11d0dcd5)}}, {{TOBN(0x8dac6bc6, 0xcf777b6c), TOBN(0x0062bdbd, 0x4c6d1959), TOBN(0x53da71b5, 0x0ef5cc85), TOBN(0x07012c7d, 0x4006f14f)}, {TOBN(0x4617f962, 0xac47800d), TOBN(0x53365f2b, 0xc102ed75), TOBN(0xb422efcb, 0x4ab8c9d3), TOBN(0x195cb26b, 0x34af31c9)}}, {{TOBN(0x3a926e29, 0x05f2c4ce), TOBN(0xbd2bdecb, 0x9856966c), TOBN(0x5d16ab3a, 0x85527015), TOBN(0x9f81609e, 0x4486c231)}, {TOBN(0xd8b96b2c, 0xda350002), TOBN(0xbd054690, 0xfa1b7d36), TOBN(0xdc90ebf5, 0xe71d79bc), TOBN(0xf241b6f9, 0x08964e4e)}}, {{TOBN(0x7c838643, 0x2fe3cd4c), TOBN(0xe0f33acb, 0xb4bc633c), TOBN(0xb4a9ecec, 0x3d139f1f), TOBN(0x05ce69cd, 0xdc4a1f49)}, {TOBN(0xa19d1b16, 0xf5f98aaf), TOBN(0x45bb71d6, 0x6f23e0ef), TOBN(0x33789fcd, 0x46cdfdd3), TOBN(0x9b8e2978, 0xcee040ca)}}, {{TOBN(0x9c69b246, 0xae0a6828), TOBN(0xba533d24, 0x7078d5aa), TOBN(0x7a2e42c0, 0x7bb4fbdb), TOBN(0xcfb4879a, 0x7035385c)}, {TOBN(0x8c3dd30b, 0x3281705b), TOBN(0x7e361c6c, 0x404fe081), TOBN(0x7b21649c, 0x3f604edf), TOBN(0x5dbf6a3f, 0xe52ffe47)}}, {{TOBN(0xc41b7c23, 0x4b54d9bf), TOBN(0x1374e681, 0x3511c3d9), TOBN(0x1863bf16, 0xc1b2b758), TOBN(0x90e78507, 0x1e9e6a96)}, {TOBN(0xab4bf98d, 0x5d86f174), TOBN(0xd74e0bd3, 0x85e96fe4), TOBN(0x8afde39f, 0xcac5d344), TOBN(0x90946dbc, 0xbd91b847)}}, {{TOBN(0xf5b42358, 0xfe1a838c), TOBN(0x05aae6c5, 0x620ac9d8), TOBN(0x8e193bd8, 0xa1ce5a0b), TOBN(0x8f710571, 0x4dabfd72)}, {TOBN(0x8d8fdd48, 0x182caaac), TOBN(0x8c4aeefa, 0x040745cf), TOBN(0x73c6c30a, 0xf3b93e6d), TOBN(0x991241f3, 0x16f42011)}}, {{TOBN(0xa0158eea, 0xe457a477), TOBN(0xd19857db, 0xee6ddc05), TOBN(0xb3265224, 0x18c41671), TOBN(0x3ffdfc7e, 0x3c2c0d58)}, {TOBN(0x3a3a5254, 0x26ee7cda), TOBN(0x341b0869, 0xdf02c3a8), TOBN(0xa023bf42, 0x723bbfc8), TOBN(0x3d15002a, 0x14452691)}}}, {{{TOBN(0x5ef7324c, 0x85edfa30), TOBN(0x25976554, 0x87d4f3da), TOBN(0x352f5bc0, 0xdcb50c86), TOBN(0x8f6927b0, 0x4832a96c)}, {TOBN(0xd08ee1ba, 0x55f2f94c), TOBN(0x6a996f99, 0x344b45fa), TOBN(0xe133cb8d, 0xa8aa455d), TOBN(0x5d0721ec, 0x758dc1f7)}}, {{TOBN(0x6ba7a920, 0x79e5fb67), TOBN(0xe1331feb, 0x70aa725e), TOBN(0x5080ccf5, 0x7df5d837), TOBN(0xe4cae01d, 0x7ff72e21)}, {TOBN(0xd9243ee6, 0x0412a77d), TOBN(0x06ff7cac, 0xdf449025), TOBN(0xbe75f7cd, 0x23ef5a31), TOBN(0xbc957822, 0x0ddef7a8)}}, {{TOBN(0x8cf7230c, 0xb0ce1c55), TOBN(0x5b534d05, 0x0bbfb607), TOBN(0xee1ef113, 0x0e16363b), TOBN(0x27e0aa7a, 0xb4999e82)}, {TOBN(0xce1dac2d, 0x79362c41), TOBN(0x67920c90, 0x91bb6cb0), TOBN(0x1e648d63, 0x2223df24), TOBN(0x0f7d9eef, 0xe32e8f28)}}, {{TOBN(0x6943f39a, 0xfa833834), TOBN(0x22951722, 0xa6328562), TOBN(0x81d63dd5, 0x4170fc10), TOBN(0x9f5fa58f, 0xaecc2e6d)}, {TOBN(0xb66c8725, 0xe77d9a3b), TOBN(0x11235cea, 0x6384ebe0), TOBN(0x06a8c118, 0x5845e24a), TOBN(0x0137b286, 0xebd093b1)}}, {{TOBN(0xc589e1ce, 0x44ace150), TOBN(0xe0f8d3d9, 0x4381e97c), TOBN(0x59e99b11, 0x62c5a4b8), TOBN(0x90d262f7, 0xfd0ec9f9)}, {TOBN(0xfbc854c9, 0x283e13c9), TOBN(0x2d04fde7, 0xaedc7085), TOBN(0x057d7765, 0x47dcbecb), TOBN(0x8dbdf591, 0x9a76fa5f)}}, {{TOBN(0xd0150695, 0x0de1e578), TOBN(0x2e1463e7, 0xe9f72bc6), TOBN(0xffa68441, 0x1b39eca5), TOBN(0x673c8530, 0x7c037f2f)}, {TOBN(0xd0d6a600, 0x747f91da), TOBN(0xb08d43e1, 0xc9cb78e9), TOBN(0x0fc0c644, 0x27b5cef5), TOBN(0x5c1d160a, 0xa60a2fd6)}}, {{TOBN(0xf98cae53, 0x28c8e13b), TOBN(0x375f10c4, 0xb2eddcd1), TOBN(0xd4eb8b7f, 0x5cce06ad), TOBN(0xb4669f45, 0x80a2e1ef)}, {TOBN(0xd593f9d0, 0x5bbd8699), TOBN(0x5528a4c9, 0xe7976d13), TOBN(0x3923e095, 0x1c7e28d3), TOBN(0xb9293790, 0x3f6bb577)}}, {{TOBN(0xdb567d6a, 0xc42bd6d2), TOBN(0x6df86468, 0xbb1f96ae), TOBN(0x0efe5b1a, 0x4843b28e), TOBN(0x961bbb05, 0x6379b240)}, {TOBN(0xb6caf5f0, 0x70a6a26b), TOBN(0x70686c0d, 0x328e6e39), TOBN(0x80da06cf, 0x895fc8d3), TOBN(0x804d8810, 0xb363fdc9)}}, {{TOBN(0xbe22877b, 0x207f1670), TOBN(0x9b0dd188, 0x4e615291), TOBN(0x625ae8dc, 0x97a3c2bf), TOBN(0x08584ef7, 0x439b86e8)}, {TOBN(0xde7190a5, 0xdcd898ff), TOBN(0x26286c40, 0x2058ee3d), TOBN(0x3db0b217, 0x5f87b1c1), TOBN(0xcc334771, 0x102a6db5)}}, {{TOBN(0xd99de954, 0x2f770fb1), TOBN(0x97c1c620, 0x4cd7535e), TOBN(0xd3b6c448, 0x3f09cefc), TOBN(0xd725af15, 0x5a63b4f8)}, {TOBN(0x0c95d24f, 0xc01e20ec), TOBN(0xdfd37494, 0x9ae7121f), TOBN(0x7d6ddb72, 0xec77b7ec), TOBN(0xfe079d3b, 0x0353a4ae)}}, {{TOBN(0x3066e70a, 0x2e6ac8d2), TOBN(0x9c6b5a43, 0x106e5c05), TOBN(0x52d3c6f5, 0xede59b8c), TOBN(0x30d6a5c3, 0xfccec9ae)}, {TOBN(0xedec7c22, 0x4fc0a9ef), TOBN(0x190ff083, 0x95c16ced), TOBN(0xbe12ec8f, 0x94de0fde), TOBN(0x0d131ab8, 0x852d3433)}}, {{TOBN(0x42ace07e, 0x85701291), TOBN(0x94793ed9, 0x194061a8), TOBN(0x30e83ed6, 0xd7f4a485), TOBN(0x9eec7269, 0xf9eeff4d)}, {TOBN(0x90acba59, 0x0c9d8005), TOBN(0x5feca458, 0x1e79b9d1), TOBN(0x8fbe5427, 0x1d506a1e), TOBN(0xa32b2c8e, 0x2439cfa7)}}, {{TOBN(0x1671c173, 0x73dd0b4e), TOBN(0x37a28214, 0x44a054c6), TOBN(0x81760a1b, 0x4e8b53f1), TOBN(0xa6c04224, 0xf9f93b9e)}, {TOBN(0x18784b34, 0xcf671e3c), TOBN(0x81bbecd2, 0xcda9b994), TOBN(0x38831979, 0xb2ab3848), TOBN(0xef54feb7, 0xf2e03c2d)}}, {{TOBN(0xcf197ca7, 0xfb8088fa), TOBN(0x01427247, 0x4ddc96c5), TOBN(0xa2d2550a, 0x30777176), TOBN(0x53469898, 0x4d0cf71d)}, {TOBN(0x6ce937b8, 0x3a2aaac6), TOBN(0xe9f91dc3, 0x5af38d9b), TOBN(0x2598ad83, 0xc8bf2899), TOBN(0x8e706ac9, 0xb5536c16)}}, {{TOBN(0x40dc7495, 0xf688dc98), TOBN(0x26490cd7, 0x124c4afc), TOBN(0xe651ec84, 0x1f18775c), TOBN(0x393ea6c3, 0xb4fdaf4a)}, {TOBN(0x1e1f3343, 0x7f338e0d), TOBN(0x39fb832b, 0x6053e7b5), TOBN(0x46e702da, 0x619e14d5), TOBN(0x859cacd1, 0xcdeef6e0)}}, {{TOBN(0x63b99ce7, 0x4462007d), TOBN(0xb8ab48a5, 0x4cb5f5b7), TOBN(0x9ec673d2, 0xf55edde7), TOBN(0xd1567f74, 0x8cfaefda)}, {TOBN(0x46381b6b, 0x0887bcec), TOBN(0x694497ce, 0xe178f3c2), TOBN(0x5e6525e3, 0x1e6266cb), TOBN(0x5931de26, 0x697d6413)}}, {{TOBN(0x87f8df7c, 0x0e58d493), TOBN(0xb1ae5ed0, 0x58b73f12), TOBN(0xc368f784, 0xdea0c34d), TOBN(0x9bd0a120, 0x859a91a0)}, {TOBN(0xb00d88b7, 0xcc863c68), TOBN(0x3a1cc11e, 0x3d1f4d65), TOBN(0xea38e0e7, 0x0aa85593), TOBN(0x37f13e98, 0x7dc4aee8)}}, {{TOBN(0x10d38667, 0xbc947bad), TOBN(0x738e07ce, 0x2a36ee2e), TOBN(0xc93470cd, 0xc577fcac), TOBN(0xdee1b616, 0x2782470d)}, {TOBN(0x36a25e67, 0x2e793d12), TOBN(0xd6aa6cae, 0xe0f186da), TOBN(0x474d0fd9, 0x80e07af7), TOBN(0xf7cdc47d, 0xba8a5cd4)}}, {{TOBN(0x28af6d9d, 0xab15247f), TOBN(0x7c789c10, 0x493a537f), TOBN(0x7ac9b110, 0x23a334e7), TOBN(0x0236ac09, 0x12c9c277)}, {TOBN(0xa7e5bd25, 0x1d7a5144), TOBN(0x098b9c2a, 0xf13ec4ec), TOBN(0x3639daca, 0xd3f0abca), TOBN(0x642da81a, 0xa23960f9)}}, {{TOBN(0x7d2e5c05, 0x4f7269b1), TOBN(0xfcf30777, 0xe287c385), TOBN(0x10edc84f, 0xf2a46f21), TOBN(0x35441757, 0x4f43fa36)}, {TOBN(0xf1327899, 0xfd703431), TOBN(0xa438d7a6, 0x16dd587a), TOBN(0x65c34c57, 0xe9c8352d), TOBN(0xa728edab, 0x5cc5a24e)}}, {{TOBN(0xaed78abc, 0x42531689), TOBN(0x0a51a0e8, 0x010963ef), TOBN(0x5776fa0a, 0xd717d9b3), TOBN(0xf356c239, 0x7dd3428b)}, {TOBN(0x29903fff, 0x8d3a3dac), TOBN(0x409597fa, 0x3d94491f), TOBN(0x4cd7a5ff, 0xbf4a56a4), TOBN(0xe5096474, 0x8adab462)}}, {{TOBN(0xa97b5126, 0x5c3427b0), TOBN(0x6401405c, 0xd282c9bd), TOBN(0x3629f8d7, 0x222c5c45), TOBN(0xb1c02c16, 0xe8d50aed)}, {TOBN(0xbea2ed75, 0xd9635bc9), TOBN(0x226790c7, 0x6e24552f), TOBN(0x3c33f2a3, 0x65f1d066), TOBN(0x2a43463e, 0x6dfccc2e)}}, {{TOBN(0x8cc3453a, 0xdb483761), TOBN(0xe7cc6085, 0x65d5672b), TOBN(0x277ed6cb, 0xde3efc87), TOBN(0x19f2f368, 0x69234eaf)}, {TOBN(0x9aaf4317, 0x5c0b800b), TOBN(0x1f1e7c89, 0x8b6da6e2), TOBN(0x6cfb4715, 0xb94ec75e), TOBN(0xd590dd5f, 0x453118c2)}}, {{TOBN(0x14e49da1, 0x1f17a34c), TOBN(0x5420ab39, 0x235a1456), TOBN(0xb7637241, 0x2f50363b), TOBN(0x7b15d623, 0xc3fabb6e)}, {TOBN(0xa0ef40b1, 0xe274e49c), TOBN(0x5cf50744, 0x96b1860a), TOBN(0xd6583fbf, 0x66afe5a4), TOBN(0x44240510, 0xf47e3e9a)}}, {{TOBN(0x99254343, 0x11b2d595), TOBN(0xf1367499, 0xeec8df57), TOBN(0x3cb12c61, 0x3e73dd05), TOBN(0xd248c033, 0x7dac102a)}, {TOBN(0xcf154f13, 0xa77739f5), TOBN(0xbf4288cb, 0x23d2af42), TOBN(0xaa64c9b6, 0x32e4a1cf), TOBN(0xee8c07a8, 0xc8a208f3)}}, {{TOBN(0xe10d4999, 0x6fe8393f), TOBN(0x0f809a3f, 0xe91f3a32), TOBN(0x61096d1c, 0x802f63c8), TOBN(0x289e1462, 0x57750d3d)}, {TOBN(0xed06167e, 0x9889feea), TOBN(0xd5c9c0e2, 0xe0993909), TOBN(0x46fca0d8, 0x56508ac6), TOBN(0x91826047, 0x4f1b8e83)}}, {{TOBN(0x4f2c877a, 0x9a4a2751), TOBN(0x71bd0072, 0xcae6fead), TOBN(0x38df8dcc, 0x06aa1941), TOBN(0x5a074b4c, 0x63beeaa8)}, {TOBN(0xd6d65934, 0xc1cec8ed), TOBN(0xa6ecb49e, 0xaabc03bd), TOBN(0xaade91c2, 0xde8a8415), TOBN(0xcfb0efdf, 0x691136e0)}}, {{TOBN(0x11af45ee, 0x23ab3495), TOBN(0xa132df88, 0x0b77463d), TOBN(0x8923c15c, 0x815d06f4), TOBN(0xc3ceb3f5, 0x0d61a436)}, {TOBN(0xaf52291d, 0xe88fb1da), TOBN(0xea057974, 0x1da12179), TOBN(0xb0d7218c, 0xd2fef720), TOBN(0x6c0899c9, 0x8e1d8845)}}, {{TOBN(0x98157504, 0x752ddad7), TOBN(0xd60bd74f, 0xa1a68a97), TOBN(0x7047a3a9, 0xf658fb99), TOBN(0x1f5d86d6, 0x5f8511e4)}, {TOBN(0xb8a4bc42, 0x4b5a6d88), TOBN(0x69eb2c33, 0x1abefa7d), TOBN(0x95bf39e8, 0x13c9c510), TOBN(0xf571960a, 0xd48aab43)}}, {{TOBN(0x7e8cfbcf, 0x704e23c6), TOBN(0xc71b7d22, 0x28aaa65b), TOBN(0xa041b2bd, 0x245e3c83), TOBN(0x69b98834, 0xd21854ff)}, {TOBN(0x89d227a3, 0x963bfeec), TOBN(0x99947aaa, 0xde7da7cb), TOBN(0x1d9ee9db, 0xee68a9b1), TOBN(0x0a08f003, 0x698ec368)}}, {{TOBN(0xe9ea4094, 0x78ef2487), TOBN(0xc8d2d415, 0x02cfec26), TOBN(0xc52f9a6e, 0xb7dcf328), TOBN(0x0ed489e3, 0x85b6a937)}, {TOBN(0x9b94986b, 0xbef3366e), TOBN(0x0de59c70, 0xedddddb8), TOBN(0xffdb748c, 0xeadddbe2), TOBN(0x9b9784bb, 0x8266ea40)}}, {{TOBN(0x142b5502, 0x1a93507a), TOBN(0xb4cd1187, 0x8d3c06cf), TOBN(0xdf70e76a, 0x91ec3f40), TOBN(0x484e81ad, 0x4e7553c2)}, {TOBN(0x830f87b5, 0x272e9d6e), TOBN(0xea1c93e5, 0xc6ff514a), TOBN(0x67cc2adc, 0xc4192a8e), TOBN(0xc77e27e2, 0x42f4535a)}}, {{TOBN(0x9cdbab36, 0xd2b713c5), TOBN(0x86274ea0, 0xcf7b0cd3), TOBN(0x784680f3, 0x09af826b), TOBN(0xbfcc837a, 0x0c72dea3)}, {TOBN(0xa8bdfe9d, 0xd6529b73), TOBN(0x708aa228, 0x63a88002), TOBN(0x6c7a9a54, 0xc91d45b9), TOBN(0xdf1a38bb, 0xfd004f56)}}, {{TOBN(0x2e8c9a26, 0xb8bad853), TOBN(0x2d52cea3, 0x3723eae7), TOBN(0x054d6d81, 0x56ca2830), TOBN(0xa3317d14, 0x9a8dc411)}, {TOBN(0xa08662fe, 0xfd4ddeda), TOBN(0xed2a153a, 0xb55d792b), TOBN(0x7035c16a, 0xbfc6e944), TOBN(0xb6bc5834, 0x00171cf3)}}, {{TOBN(0xe27152b3, 0x83d102b6), TOBN(0xfe695a47, 0x0646b848), TOBN(0xa5bb09d8, 0x916e6d37), TOBN(0xb4269d64, 0x0d17015e)}, {TOBN(0x8d8156a1, 0x0a1d2285), TOBN(0xfeef6c51, 0x46d26d72), TOBN(0x9dac57c8, 0x4c5434a7), TOBN(0x0282e5be, 0x59d39e31)}}, {{TOBN(0xedfff181, 0x721c486d), TOBN(0x301baf10, 0xbc58824e), TOBN(0x8136a6aa, 0x00570031), TOBN(0x55aaf78c, 0x1cddde68)}, {TOBN(0x26829371, 0x59c63952), TOBN(0x3a3bd274, 0x8bc25baf), TOBN(0xecdf8657, 0xb7e52dc3), TOBN(0x2dd8c087, 0xfd78e6c8)}}, {{TOBN(0x20553274, 0xf5531461), TOBN(0x8b4a1281, 0x5d95499b), TOBN(0xe2c8763a, 0x1a80f9d2), TOBN(0xd1dbe32b, 0x4ddec758)}, {TOBN(0xaf12210d, 0x30c34169), TOBN(0xba74a953, 0x78baa533), TOBN(0x3d133c6e, 0xa438f254), TOBN(0xa431531a, 0x201bef5b)}}, {{TOBN(0x15295e22, 0xf669d7ec), TOBN(0xca374f64, 0x357fb515), TOBN(0x8a8406ff, 0xeaa3fdb3), TOBN(0x106ae448, 0xdf3f2da8)}, {TOBN(0x8f9b0a90, 0x33c8e9a1), TOBN(0x234645e2, 0x71ad5885), TOBN(0x3d083224, 0x1c0aed14), TOBN(0xf10a7d3e, 0x7a942d46)}}, {{TOBN(0x7c11deee, 0x40d5c9be), TOBN(0xb2bae7ff, 0xba84ed98), TOBN(0x93e97139, 0xaad58ddd), TOBN(0x3d872796, 0x3f6d1fa3)}, {TOBN(0x483aca81, 0x8569ff13), TOBN(0x8b89a5fb, 0x9a600f72), TOBN(0x4cbc27c3, 0xc06f2b86), TOBN(0x22130713, 0x63ad9c0b)}}, {{TOBN(0xb5358b1e, 0x48ac2840), TOBN(0x18311294, 0xecba9477), TOBN(0xda58f990, 0xa6946b43), TOBN(0x3098baf9, 0x9ab41819)}, {TOBN(0x66c4c158, 0x4198da52), TOBN(0xab4fc17c, 0x146bfd1b), TOBN(0x2f0a4c3c, 0xbf36a908), TOBN(0x2ae9e34b, 0x58cf7838)}}, {{TOBN(0xf411529e, 0x3fa11b1f), TOBN(0x21e43677, 0x974af2b4), TOBN(0x7c20958e, 0xc230793b), TOBN(0x710ea885, 0x16e840f3)}, {TOBN(0xfc0b21fc, 0xc5dc67cf), TOBN(0x08d51647, 0x88405718), TOBN(0xd955c21f, 0xcfe49eb7), TOBN(0x9722a5d5, 0x56dd4a1f)}}, {{TOBN(0xc9ef50e2, 0xc861baa5), TOBN(0xc0c21a5d, 0x9505ac3e), TOBN(0xaf6b9a33, 0x8b7c063f), TOBN(0xc6370339, 0x2f4779c1)}, {TOBN(0x22df99c7, 0x638167c3), TOBN(0xfe6ffe76, 0x795db30c), TOBN(0x2b822d33, 0xa4854989), TOBN(0xfef031dd, 0x30563aa5)}}, {{TOBN(0x16b09f82, 0xd57c667f), TOBN(0xc70312ce, 0xcc0b76f1), TOBN(0xbf04a9e6, 0xc9118aec), TOBN(0x82fcb419, 0x3409d133)}, {TOBN(0x1a8ab385, 0xab45d44d), TOBN(0xfba07222, 0x617b83a3), TOBN(0xb05f50dd, 0x58e81b52), TOBN(0x1d8db553, 0x21ce5aff)}}, {{TOBN(0x3097b8d4, 0xe344a873), TOBN(0x7d8d116d, 0xfe36d53e), TOBN(0x6db22f58, 0x7875e750), TOBN(0x2dc5e373, 0x43e144ea)}, {TOBN(0xc05f32e6, 0xe799eb95), TOBN(0xe9e5f4df, 0x6899e6ec), TOBN(0xbdc3bd68, 0x1fab23d5), TOBN(0xb72b8ab7, 0x73af60e6)}}, {{TOBN(0x8db27ae0, 0x2cecc84a), TOBN(0x600016d8, 0x7bdb871c), TOBN(0x42a44b13, 0xd7c46f58), TOBN(0xb8919727, 0xc3a77d39)}, {TOBN(0xcfc6bbbd, 0xdafd6088), TOBN(0x1a740146, 0x6bd20d39), TOBN(0x8c747abd, 0x98c41072), TOBN(0x4c91e765, 0xbdf68ea1)}}, {{TOBN(0x7c95e5ca, 0x08819a78), TOBN(0xcf48b729, 0xc9587921), TOBN(0x091c7c5f, 0xdebbcc7d), TOBN(0x6f287404, 0xf0e05149)}, {TOBN(0xf83b5ac2, 0x26cd44ec), TOBN(0x88ae32a6, 0xcfea250e), TOBN(0x6ac5047a, 0x1d06ebc5), TOBN(0xc7e550b4, 0xd434f781)}}, {{TOBN(0x61ab1cf2, 0x5c727bd2), TOBN(0x2e4badb1, 0x1cf915b0), TOBN(0x1b4dadec, 0xf69d3920), TOBN(0xe61b1ca6, 0xf14c1dfe)}, {TOBN(0x90b479cc, 0xbd6bd51f), TOBN(0x8024e401, 0x8045ec30), TOBN(0xcab29ca3, 0x25ef0e62), TOBN(0x4f2e9416, 0x49e4ebc0)}}, {{TOBN(0x45eb40ec, 0x0ccced58), TOBN(0x25cd4b9c, 0x0da44f98), TOBN(0x43e06458, 0x871812c6), TOBN(0x99f80d55, 0x16cef651)}, {TOBN(0x571340c9, 0xce6dc153), TOBN(0x138d5117, 0xd8665521), TOBN(0xacdb45bc, 0x4e07014d), TOBN(0x2f34bb38, 0x84b60b91)}}, {{TOBN(0xf44a4fd2, 0x2ae8921e), TOBN(0xb039288e, 0x892ba1e2), TOBN(0x9da50174, 0xb1c180b2), TOBN(0x6b70ab66, 0x1693dc87)}, {TOBN(0x7e9babc9, 0xe7057481), TOBN(0x4581ddef, 0x9c80dc41), TOBN(0x0c890da9, 0x51294682), TOBN(0x0b5629d3, 0x3f4736e5)}}, {{TOBN(0x2340c79e, 0xb06f5b41), TOBN(0xa42e84ce, 0x4e243469), TOBN(0xf9a20135, 0x045a71a9), TOBN(0xefbfb415, 0xd27b6fb6)}, {TOBN(0x25ebea23, 0x9d33cd6f), TOBN(0x9caedb88, 0xaa6c0af8), TOBN(0x53dc7e9a, 0xd9ce6f96), TOBN(0x3897f9fd, 0x51e0b15a)}}, {{TOBN(0xf51cb1f8, 0x8e5d788e), TOBN(0x1aec7ba8, 0xe1d490ee), TOBN(0x265991e0, 0xcc58cb3c), TOBN(0x9f306e8c, 0x9fc3ad31)}, {TOBN(0x5fed006e, 0x5040a0ac), TOBN(0xca9d5043, 0xfb476f2e), TOBN(0xa19c06e8, 0xbeea7a23), TOBN(0xd2865801, 0x0edabb63)}}, {{TOBN(0xdb92293f, 0x6967469a), TOBN(0x2894d839, 0x8d8a8ed8), TOBN(0x87c9e406, 0xbbc77122), TOBN(0x8671c6f1, 0x2ea3a26a)}, {TOBN(0xe42df8d6, 0xd7de9853), TOBN(0x2e3ce346, 0xb1f2bcc7), TOBN(0xda601dfc, 0x899d50cf), TOBN(0xbfc913de, 0xfb1b598f)}}, {{TOBN(0x81c4909f, 0xe61f7908), TOBN(0x192e304f, 0x9bbc7b29), TOBN(0xc3ed8738, 0xc104b338), TOBN(0xedbe9e47, 0x783f5d61)}, {TOBN(0x0c06e9be, 0x2db30660), TOBN(0xda3e613f, 0xc0eb7d8e), TOBN(0xd8fa3e97, 0x322e096e), TOBN(0xfebd91e8, 0xd336e247)}}, {{TOBN(0x8f13ccc4, 0xdf655a49), TOBN(0xa9e00dfc, 0x5eb20210), TOBN(0x84631d0f, 0xc656b6ea), TOBN(0x93a058cd, 0xd8c0d947)}, {TOBN(0x6846904a, 0x67bd3448), TOBN(0x4a3d4e1a, 0xf394fd5c), TOBN(0xc102c1a5, 0xdb225f52), TOBN(0xe3455bba, 0xfc4f5e9a)}}, {{TOBN(0x6b36985b, 0x4b9ad1ce), TOBN(0xa9818536, 0x5bb7f793), TOBN(0x6c25e1d0, 0x48b1a416), TOBN(0x1381dd53, 0x3c81bee7)}, {TOBN(0xd2a30d61, 0x7a4a7620), TOBN(0xc8412926, 0x39b8944c), TOBN(0x3c1c6fbe, 0x7a97c33a), TOBN(0x941e541d, 0x938664e7)}}, {{TOBN(0x417499e8, 0x4a34f239), TOBN(0x15fdb83c, 0xb90402d5), TOBN(0xb75f46bf, 0x433aa832), TOBN(0xb61e15af, 0x63215db1)}, {TOBN(0xaabe59d4, 0xa127f89a), TOBN(0x5d541e0c, 0x07e816da), TOBN(0xaaba0659, 0xa618b692), TOBN(0x55327733, 0x17266026)}}, {{TOBN(0xaf53a0fc, 0x95f57552), TOBN(0x32947650, 0x6cacb0c9), TOBN(0x253ff58d, 0xc821be01), TOBN(0xb0309531, 0xa06f1146)}, {TOBN(0x59bbbdf5, 0x05c2e54d), TOBN(0x158f27ad, 0x26e8dd22), TOBN(0xcc5b7ffb, 0x397e1e53), TOBN(0xae03f65b, 0x7fc1e50d)}}, {{TOBN(0xa9784ebd, 0x9c95f0f9), TOBN(0x5ed9deb2, 0x24640771), TOBN(0x31244af7, 0x035561c4), TOBN(0x87332f3a, 0x7ee857de)}, {TOBN(0x09e16e9e, 0x2b9e0d88), TOBN(0x52d910f4, 0x56a06049), TOBN(0x507ed477, 0xa9592f48), TOBN(0x85cb917b, 0x2365d678)}}, {{TOBN(0xf8511c93, 0x4c8998d1), TOBN(0x2186a3f1, 0x730ea58f), TOBN(0x50189626, 0xb2029db0), TOBN(0x9137a6d9, 0x02ceb75a)}, {TOBN(0x2fe17f37, 0x748bc82c), TOBN(0x87c2e931, 0x80469f8c), TOBN(0x850f71cd, 0xbf891aa2), TOBN(0x0ca1b89b, 0x75ec3d8d)}}, {{TOBN(0x516c43aa, 0x5e1cd3cd), TOBN(0x89397808, 0x9a887c28), TOBN(0x0059c699, 0xddea1f9f), TOBN(0x7737d6fa, 0x8e6868f7)}, {TOBN(0x6d93746a, 0x60f1524b), TOBN(0x36985e55, 0xba052aa7), TOBN(0x41b1d322, 0xed923ea5), TOBN(0x3429759f, 0x25852a11)}}, {{TOBN(0xbeca6ec3, 0x092e9f41), TOBN(0x3a238c66, 0x62256bbd), TOBN(0xd82958ea, 0x70ad487d), TOBN(0x4ac8aaf9, 0x65610d93)}, {TOBN(0x3fa101b1, 0x5e4ccab0), TOBN(0x9bf430f2, 0x9de14bfb), TOBN(0xa10f5cc6, 0x6531899d), TOBN(0x590005fb, 0xea8ce17d)}}, {{TOBN(0xc437912f, 0x24544cb6), TOBN(0x9987b71a, 0xd79ac2e3), TOBN(0x13e3d9dd, 0xc058a212), TOBN(0x00075aac, 0xd2de9606)}, {TOBN(0x80ab508b, 0x6cac8369), TOBN(0x87842be7, 0xf54f6c89), TOBN(0xa7ad663d, 0x6bc532a4), TOBN(0x67813de7, 0x78a91bc8)}}, {{TOBN(0x5dcb61ce, 0xc3427239), TOBN(0x5f3c7cf0, 0xc56934d9), TOBN(0xc079e0fb, 0xe3191591), TOBN(0xe40896bd, 0xb01aada7)}, {TOBN(0x8d466791, 0x0492d25f), TOBN(0x8aeb30c9, 0xe7408276), TOBN(0xe9437495, 0x9287aacc), TOBN(0x23d4708d, 0x79fe03d4)}}, {{TOBN(0x8cda9cf2, 0xd0c05199), TOBN(0x502fbc22, 0xfae78454), TOBN(0xc0bda9df, 0xf572a182), TOBN(0x5f9b71b8, 0x6158b372)}, {TOBN(0xe0f33a59, 0x2b82dd07), TOBN(0x76302735, 0x9523032e), TOBN(0x7fe1a721, 0xc4505a32), TOBN(0x7b6e3e82, 0xf796409f)}}}, {{{TOBN(0xe3417bc0, 0x35d0b34a), TOBN(0x440b386b, 0x8327c0a7), TOBN(0x8fb7262d, 0xac0362d1), TOBN(0x2c41114c, 0xe0cdf943)}, {TOBN(0x2ba5cef1, 0xad95a0b1), TOBN(0xc09b37a8, 0x67d54362), TOBN(0x26d6cdd2, 0x01e486c9), TOBN(0x20477abf, 0x42ff9297)}}, {{TOBN(0xa004dcb3, 0x292a9287), TOBN(0xddc15cf6, 0x77b092c7), TOBN(0x083a8464, 0x806c0605), TOBN(0x4a68df70, 0x3db997b0)}, {TOBN(0x9c134e45, 0x05bf7dd0), TOBN(0xa4e63d39, 0x8ccf7f8c), TOBN(0xa6e6517f, 0x41b5f8af), TOBN(0xaa8b9342, 0xad7bc1cc)}}, {{TOBN(0x126f35b5, 0x1e706ad9), TOBN(0xb99cebb4, 0xc3a9ebdf), TOBN(0xa75389af, 0xbf608d90), TOBN(0x76113c4f, 0xc6c89858)}, {TOBN(0x80de8eb0, 0x97e2b5aa), TOBN(0x7e1022cc, 0x63b91304), TOBN(0x3bdab605, 0x6ccc066c), TOBN(0x33cbb144, 0xb2edf900)}}, {{TOBN(0xc4176471, 0x7af715d2), TOBN(0xe2f7f594, 0xd0134a96), TOBN(0x2c1873ef, 0xa41ec956), TOBN(0xe4e7b4f6, 0x77821304)}, {TOBN(0xe5c8ff97, 0x88d5374a), TOBN(0x2b915e63, 0x80823d5b), TOBN(0xea6bc755, 0xb2ee8fe2), TOBN(0x6657624c, 0xe7112651)}}, {{TOBN(0x157af101, 0xdace5aca), TOBN(0xc4fdbcf2, 0x11a6a267), TOBN(0xdaddf340, 0xc49c8609), TOBN(0x97e49f52, 0xe9604a65)}, {TOBN(0x9be8e790, 0x937e2ad5), TOBN(0x846e2508, 0x326e17f1), TOBN(0x3f38007a, 0x0bbbc0dc), TOBN(0xcf03603f, 0xb11e16d6)}}, {{TOBN(0xd6f800e0, 0x7442f1d5), TOBN(0x475607d1, 0x66e0e3ab), TOBN(0x82807f16, 0xb7c64047), TOBN(0x8858e1e3, 0xa749883d)}, {TOBN(0x5859120b, 0x8231ee10), TOBN(0x1b80e7eb, 0x638a1ece), TOBN(0xcb72525a, 0xc6aa73a4), TOBN(0xa7cdea3d, 0x844423ac)}}, {{TOBN(0x5ed0c007, 0xf8ae7c38), TOBN(0x6db07a5c, 0x3d740192), TOBN(0xbe5e9c2a, 0x5fe36db3), TOBN(0xd5b9d57a, 0x76e95046)}, {TOBN(0x54ac32e7, 0x8eba20f2), TOBN(0xef11ca8f, 0x71b9a352), TOBN(0x305e373e, 0xff98a658), TOBN(0xffe5a100, 0x823eb667)}}, {{TOBN(0x57477b11, 0xe51732d2), TOBN(0xdfd6eb28, 0x2538fc0e), TOBN(0x5c43b0cc, 0x3b39eec5), TOBN(0x6af12778, 0xcb36cc57)}, {TOBN(0x70b0852d, 0x06c425ae), TOBN(0x6df92f8c, 0x5c221b9b), TOBN(0x6c8d4f9e, 0xce826d9c), TOBN(0xf59aba7b, 0xb49359c3)}}, {{TOBN(0x5c8ed8d5, 0xda64309d), TOBN(0x61a6de56, 0x91b30704), TOBN(0xd6b52f6a, 0x2f9b5808), TOBN(0x0eee4194, 0x98c958a7)}, {TOBN(0xcddd9aab, 0x771e4caa), TOBN(0x83965dfd, 0x78bc21be), TOBN(0x02affce3, 0xb3b504f5), TOBN(0x30847a21, 0x561c8291)}}, {{TOBN(0xd2eb2cf1, 0x52bfda05), TOBN(0xe0e4c4e9, 0x6197b98c), TOBN(0x1d35076c, 0xf8a1726f), TOBN(0x6c06085b, 0x2db11e3d)}, {TOBN(0x15c0c4d7, 0x4463ba14), TOBN(0x9d292f83, 0x0030238c), TOBN(0x1311ee8b, 0x3727536d), TOBN(0xfeea86ef, 0xbeaedc1e)}}, {{TOBN(0xb9d18cd3, 0x66131e2e), TOBN(0xf31d974f, 0x80fe2682), TOBN(0xb6e49e0f, 0xe4160289), TOBN(0x7c48ec0b, 0x08e92799)}, {TOBN(0x818111d8, 0xd1989aa7), TOBN(0xb34fa0aa, 0xebf926f9), TOBN(0xdb5fe2f5, 0xa245474a), TOBN(0xf80a6ebb, 0x3c7ca756)}}, {{TOBN(0xa7f96054, 0xafa05dd8), TOBN(0x26dfcf21, 0xfcaf119e), TOBN(0xe20ef2e3, 0x0564bb59), TOBN(0xef4dca50, 0x61cb02b8)}, {TOBN(0xcda7838a, 0x65d30672), TOBN(0x8b08d534, 0xfd657e86), TOBN(0x4c5b4395, 0x46d595c8), TOBN(0x39b58725, 0x425cb836)}}, {{TOBN(0x8ea61059, 0x3de9abe3), TOBN(0x40434881, 0x9cdc03be), TOBN(0x9b261245, 0xcfedce8c), TOBN(0x78c318b4, 0xcf5234a1)}, {TOBN(0x510bcf16, 0xfde24c99), TOBN(0x2a77cb75, 0xa2c2ff5d), TOBN(0x9c895c2b, 0x27960fb4), TOBN(0xd30ce975, 0xb0eda42b)}}, {{TOBN(0xfda85393, 0x1a62cc26), TOBN(0x23c69b96, 0x50c0e052), TOBN(0xa227df15, 0xbfc633f3), TOBN(0x2ac78848, 0x1bae7d48)}, {TOBN(0x487878f9, 0x187d073d), TOBN(0x6c2be919, 0x967f807d), TOBN(0x765861d8, 0x336e6d8f), TOBN(0x88b8974c, 0xce528a43)}}, {{TOBN(0x09521177, 0xff57d051), TOBN(0x2ff38037, 0xfb6a1961), TOBN(0xfc0aba74, 0xa3d76ad4), TOBN(0x7c764803, 0x25a7ec17)}, {TOBN(0x7532d75f, 0x48879bc8), TOBN(0xea7eacc0, 0x58ce6bc1), TOBN(0xc82176b4, 0x8e896c16), TOBN(0x9a30e0b2, 0x2c750fed)}}, {{TOBN(0xc37e2c2e, 0x421d3aa4), TOBN(0xf926407c, 0xe84fa840), TOBN(0x18abc03d, 0x1454e41c), TOBN(0x26605ecd, 0x3f7af644)}, {TOBN(0x242341a6, 0xd6a5eabf), TOBN(0x1edb84f4, 0x216b668e), TOBN(0xd836edb8, 0x04010102), TOBN(0x5b337ce7, 0x945e1d8c)}}, {{TOBN(0xd2075c77, 0xc055dc14), TOBN(0x2a0ffa25, 0x81d89cdf), TOBN(0x8ce815ea, 0x6ffdcbaf), TOBN(0xa3428878, 0xfb648867)}, {TOBN(0x277699cf, 0x884655fb), TOBN(0xfa5b5bd6, 0x364d3e41), TOBN(0x01f680c6, 0x441e1cb7), TOBN(0x3fd61e66, 0xb70a7d67)}}, {{TOBN(0x666ba2dc, 0xcc78cf66), TOBN(0xb3018174, 0x6fdbff77), TOBN(0x8d4dd0db, 0x168d4668), TOBN(0x259455d0, 0x1dab3a2a)}, {TOBN(0xf58564c5, 0xcde3acec), TOBN(0x77141925, 0x13adb276), TOBN(0x527d725d, 0x8a303f65), TOBN(0x55deb6c9, 0xe6f38f7b)}}, {{TOBN(0xfd5bb657, 0xb1fa70fb), TOBN(0xfa07f50f, 0xd8073a00), TOBN(0xf72e3aa7, 0xbca02500), TOBN(0xf68f895d, 0x9975740d)}, {TOBN(0x30112060, 0x5cae2a6a), TOBN(0x01bd7218, 0x02874842), TOBN(0x3d423891, 0x7ce47bd3), TOBN(0xa66663c1, 0x789544f6)}}, {{TOBN(0x864d05d7, 0x3272d838), TOBN(0xe22924f9, 0xfa6295c5), TOBN(0x8189593f, 0x6c2fda32), TOBN(0x330d7189, 0xb184b544)}, {TOBN(0x79efa62c, 0xbde1f714), TOBN(0x35771c94, 0xe5cb1a63), TOBN(0x2f4826b8, 0x641c8332), TOBN(0x00a894fb, 0xc8cee854)}}, {{TOBN(0xb4b9a39b, 0x36194d40), TOBN(0xe857a7c5, 0x77612601), TOBN(0xf4209dd2, 0x4ecf2f58), TOBN(0x82b9e66d, 0x5a033487)}, {TOBN(0xc1e36934, 0xe4e8b9dd), TOBN(0xd2372c9d, 0xa42377d7), TOBN(0x51dc94c7, 0x0e3ae43b), TOBN(0x4c57761e, 0x04474f6f)}}, {{TOBN(0xdcdacd0a, 0x1058a318), TOBN(0x369cf3f5, 0x78053a9a), TOBN(0xc6c3de50, 0x31c68de2), TOBN(0x4653a576, 0x3c4b6d9f)}, {TOBN(0x1688dd5a, 0xaa4e5c97), TOBN(0x5be80aa1, 0xb7ab3c74), TOBN(0x70cefe7c, 0xbc65c283), TOBN(0x57f95f13, 0x06867091)}}, {{TOBN(0xa39114e2, 0x4415503b), TOBN(0xc08ff7c6, 0x4cbb17e9), TOBN(0x1eff674d, 0xd7dec966), TOBN(0x6d4690af, 0x53376f63)}, {TOBN(0xff6fe32e, 0xea74237b), TOBN(0xc436d17e, 0xcd57508e), TOBN(0x15aa28e1, 0xedcc40fe), TOBN(0x0d769c04, 0x581bbb44)}}, {{TOBN(0xc240b6de, 0x34eaacda), TOBN(0xd9e116e8, 0x2ba0f1de), TOBN(0xcbe45ec7, 0x79438e55), TOBN(0x91787c9d, 0x96f752d7)}, {TOBN(0x897f532b, 0xf129ac2f), TOBN(0xd307b7c8, 0x5a36e22c), TOBN(0x91940675, 0x749fb8f3), TOBN(0xd14f95d0, 0x157fdb28)}}, {{TOBN(0xfe51d029, 0x6ae55043), TOBN(0x8931e98f, 0x44a87de1), TOBN(0xe57f1cc6, 0x09e4fee2), TOBN(0x0d063b67, 0x4e072d92)}, {TOBN(0x70a998b9, 0xed0e4316), TOBN(0xe74a736b, 0x306aca46), TOBN(0xecf0fbf2, 0x4fda97c7), TOBN(0xa40f65cb, 0x3e178d93)}}, {{TOBN(0x16253604, 0x16df4285), TOBN(0xb0c9babb, 0xd0c56ae2), TOBN(0x73032b19, 0xcfc5cfc3), TOBN(0xe497e5c3, 0x09752056)}, {TOBN(0x12096bb4, 0x164bda96), TOBN(0x1ee42419, 0xa0b74da1), TOBN(0x8fc36243, 0x403826ba), TOBN(0x0c8f0069, 0xdc09e660)}}, {{TOBN(0x8667e981, 0xc27253c9), TOBN(0x05a6aefb, 0x92b36a45), TOBN(0xa62c4b36, 0x9cb7bb46), TOBN(0x8394f375, 0x11f7027b)}, {TOBN(0x747bc79c, 0x5f109d0f), TOBN(0xcad88a76, 0x5b8cc60a), TOBN(0x80c5a66b, 0x58f09e68), TOBN(0xe753d451, 0xf6127eac)}}, {{TOBN(0xc44b74a1, 0x5b0ec6f5), TOBN(0x47989fe4, 0x5289b2b8), TOBN(0x745f8484, 0x58d6fc73), TOBN(0xec362a6f, 0xf61c70ab)}, {TOBN(0x070c98a7, 0xb3a8ad41), TOBN(0x73a20fc0, 0x7b63db51), TOBN(0xed2c2173, 0xf44c35f4), TOBN(0x8a56149d, 0x9acc9dca)}}, {{TOBN(0x98f17881, 0x9ac6e0f4), TOBN(0x360fdeaf, 0xa413b5ed), TOBN(0x0625b8f4, 0xa300b0fd), TOBN(0xf1f4d76a, 0x5b3222d3)}, {TOBN(0x9d6f5109, 0x587f76b8), TOBN(0x8b4ee08d, 0x2317fdb5), TOBN(0x88089bb7, 0x8c68b095), TOBN(0x95570e9a, 0x5808d9b9)}}, {{TOBN(0xa395c36f, 0x35d33ae7), TOBN(0x200ea123, 0x50bb5a94), TOBN(0x20c789bd, 0x0bafe84b), TOBN(0x243ef52d, 0x0919276a)}, {TOBN(0x3934c577, 0xe23ae233), TOBN(0xb93807af, 0xa460d1ec), TOBN(0xb72a53b1, 0xf8fa76a4), TOBN(0xd8914cb0, 0xc3ca4491)}}, {{TOBN(0x2e128494, 0x3fb42622), TOBN(0x3b2700ac, 0x500907d5), TOBN(0xf370fb09, 0x1a95ec63), TOBN(0xf8f30be2, 0x31b6dfbd)}, {TOBN(0xf2b2f8d2, 0x69e55f15), TOBN(0x1fead851, 0xcc1323e9), TOBN(0xfa366010, 0xd9e5eef6), TOBN(0x64d487b0, 0xe316107e)}}, {{TOBN(0x4c076b86, 0xd23ddc82), TOBN(0x03fd344c, 0x7e0143f0), TOBN(0xa95362ff, 0x317af2c5), TOBN(0x0add3db7, 0xe18b7a4f)}, {TOBN(0x9c673e3f, 0x8260e01b), TOBN(0xfbeb49e5, 0x54a1cc91), TOBN(0x91351bf2, 0x92f2e433), TOBN(0xc755e7ec, 0x851141eb)}}, {{TOBN(0xc9a95139, 0x29607745), TOBN(0x0ca07420, 0xa26f2b28), TOBN(0xcb2790e7, 0x4bc6f9dd), TOBN(0x345bbb58, 0xadcaffc0)}, {TOBN(0xc65ea38c, 0xbe0f27a2), TOBN(0x67c24d7c, 0x641fcb56), TOBN(0x2c25f0a7, 0xa9e2c757), TOBN(0x93f5cdb0, 0x16f16c49)}}, {{TOBN(0x2ca5a9d7, 0xc5ee30a1), TOBN(0xd1593635, 0xb909b729), TOBN(0x804ce9f3, 0xdadeff48), TOBN(0xec464751, 0xb07c30c3)}, {TOBN(0x89d65ff3, 0x9e49af6a), TOBN(0xf2d6238a, 0x6f3d01bc), TOBN(0x1095561e, 0x0bced843), TOBN(0x51789e12, 0xc8a13fd8)}}, {{TOBN(0xd633f929, 0x763231df), TOBN(0x46df9f7d, 0xe7cbddef), TOBN(0x01c889c0, 0xcb265da8), TOBN(0xfce1ad10, 0xaf4336d2)}, {TOBN(0x8d110df6, 0xfc6a0a7e), TOBN(0xdd431b98, 0x6da425dc), TOBN(0xcdc4aeab, 0x1834aabe), TOBN(0x84deb124, 0x8439b7fc)}}, {{TOBN(0x8796f169, 0x3c2a5998), TOBN(0x9b9247b4, 0x7947190d), TOBN(0x55b9d9a5, 0x11597014), TOBN(0x7e9dd70d, 0x7b1566ee)}, {TOBN(0x94ad78f7, 0xcbcd5e64), TOBN(0x0359ac17, 0x9bd4c032), TOBN(0x3b11baaf, 0x7cc222ae), TOBN(0xa6a6e284, 0xba78e812)}}, {{TOBN(0x8392053f, 0x24cea1a0), TOBN(0xc97bce4a, 0x33621491), TOBN(0x7eb1db34, 0x35399ee9), TOBN(0x473f78ef, 0xece81ad1)}, {TOBN(0x41d72fe0, 0xf63d3d0d), TOBN(0xe620b880, 0xafab62fc), TOBN(0x92096bc9, 0x93158383), TOBN(0x41a21357, 0x8f896f6c)}}, {{TOBN(0x1b5ee2fa, 0xc7dcfcab), TOBN(0x650acfde, 0x9546e007), TOBN(0xc081b749, 0xb1b02e07), TOBN(0xda9e41a0, 0xf9eca03d)}, {TOBN(0x013ba727, 0x175a54ab), TOBN(0xca0cd190, 0xea5d8d10), TOBN(0x85ea52c0, 0x95fd96a9), TOBN(0x2c591b9f, 0xbc5c3940)}}, {{TOBN(0x6fb4d4e4, 0x2bad4d5f), TOBN(0xfa4c3590, 0xfef0059b), TOBN(0x6a10218a, 0xf5122294), TOBN(0x9a78a81a, 0xa85751d1)}, {TOBN(0x04f20579, 0xa98e84e7), TOBN(0xfe1242c0, 0x4997e5b5), TOBN(0xe77a273b, 0xca21e1e4), TOBN(0xfcc8b1ef, 0x9411939d)}}, {{TOBN(0xe20ea302, 0x92d0487a), TOBN(0x1442dbec, 0x294b91fe), TOBN(0x1f7a4afe, 0xbb6b0e8f), TOBN(0x1700ef74, 0x6889c318)}, {TOBN(0xf5bbffc3, 0x70f1fc62), TOBN(0x3b31d4b6, 0x69c79cca), TOBN(0xe8bc2aab, 0xa7f6340d), TOBN(0xb0b08ab4, 0xa725e10a)}}, {{TOBN(0x44f05701, 0xae340050), TOBN(0xba4b3016, 0x1cf0c569), TOBN(0x5aa29f83, 0xfbe19a51), TOBN(0x1b9ed428, 0xb71d752e)}, {TOBN(0x1666e54e, 0xeb4819f5), TOBN(0x616cdfed, 0x9e18b75b), TOBN(0x112ed5be, 0x3ee27b0b), TOBN(0xfbf28319, 0x44c7de4d)}}, {{TOBN(0xd685ec85, 0xe0e60d84), TOBN(0x68037e30, 0x1db7ee78), TOBN(0x5b65bdcd, 0x003c4d6e), TOBN(0x33e7363a, 0x93e29a6a)}, {TOBN(0x995b3a61, 0x08d0756c), TOBN(0xd727f85c, 0x2faf134b), TOBN(0xfac6edf7, 0x1d337823), TOBN(0x99b9aa50, 0x0439b8b4)}}, {{TOBN(0x722eb104, 0xe2b4e075), TOBN(0x49987295, 0x437c4926), TOBN(0xb1e4c0e4, 0x46a9b82d), TOBN(0xd0cb3197, 0x57a006f5)}, {TOBN(0xf3de0f7d, 0xd7808c56), TOBN(0xb5c54d8f, 0x51f89772), TOBN(0x500a114a, 0xadbd31aa), TOBN(0x9afaaaa6, 0x295f6cab)}}, {{TOBN(0x94705e21, 0x04cf667a), TOBN(0xfc2a811b, 0x9d3935d7), TOBN(0x560b0280, 0x6d09267c), TOBN(0xf19ed119, 0xf780e53b)}, {TOBN(0xf0227c09, 0x067b6269), TOBN(0x967b8533, 0x5caef599), TOBN(0x155b9243, 0x68efeebc), TOBN(0xcd6d34f5, 0xc497bae6)}}, {{TOBN(0x1dd8d5d3, 0x6cceb370), TOBN(0x2aeac579, 0xa78d7bf9), TOBN(0x5d65017d, 0x70b67a62), TOBN(0x70c8e44f, 0x17c53f67)}, {TOBN(0xd1fc0950, 0x86a34d09), TOBN(0xe0fca256, 0xe7134907), TOBN(0xe24fa29c, 0x80fdd315), TOBN(0x2c4acd03, 0xd87499ad)}}, {{TOBN(0xbaaf7517, 0x3b5a9ba6), TOBN(0xb9cbe1f6, 0x12e51a51), TOBN(0xd88edae3, 0x5e154897), TOBN(0xe4309c3c, 0x77b66ca0)}, {TOBN(0xf5555805, 0xf67f3746), TOBN(0x85fc37ba, 0xa36401ff), TOBN(0xdf86e2ca, 0xd9499a53), TOBN(0x6270b2a3, 0xecbc955b)}}, {{TOBN(0xafae64f5, 0x974ad33b), TOBN(0x04d85977, 0xfe7b2df1), TOBN(0x2a3db3ff, 0x4ab03f73), TOBN(0x0b87878a, 0x8702740a)}, {TOBN(0x6d263f01, 0x5a061732), TOBN(0xc25430ce, 0xa32a1901), TOBN(0xf7ebab3d, 0xdb155018), TOBN(0x3a86f693, 0x63a9b78e)}}, {{TOBN(0x349ae368, 0xda9f3804), TOBN(0x470f07fe, 0xa164349c), TOBN(0xd52f4cc9, 0x8562baa5), TOBN(0xc74a9e86, 0x2b290df3)}, {TOBN(0xd3a1aa35, 0x43471a24), TOBN(0x239446be, 0xb8194511), TOBN(0xbec2dd00, 0x81dcd44d), TOBN(0xca3d7f0f, 0xc42ac82d)}}, {{TOBN(0x1f3db085, 0xfdaf4520), TOBN(0xbb6d3e80, 0x4549daf2), TOBN(0xf5969d8a, 0x19ad5c42), TOBN(0x7052b13d, 0xdbfd1511)}, {TOBN(0x11890d1b, 0x682b9060), TOBN(0xa71d3883, 0xac34452c), TOBN(0xa438055b, 0x783805b4), TOBN(0x43241277, 0x4725b23e)}}, {{TOBN(0xf20cf96e, 0x4901bbed), TOBN(0x6419c710, 0xf432a2bb), TOBN(0x57a0fbb9, 0xdfa9cd7d), TOBN(0x589111e4, 0x00daa249)}, {TOBN(0x19809a33, 0x7b60554e), TOBN(0xea5f8887, 0xede283a4), TOBN(0x2d713802, 0x503bfd35), TOBN(0x151bb0af, 0x585d2a53)}}, {{TOBN(0x40b08f74, 0x43b30ca8), TOBN(0xe10b5bba, 0xd9934583), TOBN(0xe8a546d6, 0xb51110ad), TOBN(0x1dd50e66, 0x28e0b6c5)}, {TOBN(0x292e9d54, 0xcff2b821), TOBN(0x3882555d, 0x47281760), TOBN(0x134838f8, 0x3724d6e3), TOBN(0xf2c679e0, 0x22ddcda1)}}, {{TOBN(0x40ee8815, 0x6d2a5768), TOBN(0x7f227bd2, 0x1c1e7e2d), TOBN(0x487ba134, 0xd04ff443), TOBN(0x76e2ff3d, 0xc614e54b)}, {TOBN(0x36b88d6f, 0xa3177ec7), TOBN(0xbf731d51, 0x2328fff5), TOBN(0x758caea2, 0x49ba158e), TOBN(0x5ab8ff4c, 0x02938188)}}, {{TOBN(0x33e16056, 0x35edc56d), TOBN(0x5a69d349, 0x7e940d79), TOBN(0x6c4fd001, 0x03866dcb), TOBN(0x20a38f57, 0x4893cdef)}, {TOBN(0xfbf3e790, 0xfac3a15b), TOBN(0x6ed7ea2e, 0x7a4f8e6b), TOBN(0xa663eb4f, 0xbc3aca86), TOBN(0x22061ea5, 0x080d53f7)}}, {{TOBN(0x2480dfe6, 0xf546783f), TOBN(0xd38bc6da, 0x5a0a641e), TOBN(0xfb093cd1, 0x2ede8965), TOBN(0x89654db4, 0xacb455cf)}, {TOBN(0x413cbf9a, 0x26e1adee), TOBN(0x291f3764, 0x373294d4), TOBN(0x00797257, 0x648083fe), TOBN(0x25f504d3, 0x208cc341)}}, {{TOBN(0x635a8e5e, 0xc3a0ee43), TOBN(0x70aaebca, 0x679898ff), TOBN(0x9ee9f547, 0x5dc63d56), TOBN(0xce987966, 0xffb34d00)}, {TOBN(0xf9f86b19, 0x5e26310a), TOBN(0x9e435484, 0x382a8ca8), TOBN(0x253bcb81, 0xc2352fe4), TOBN(0xa4eac8b0, 0x4474b571)}}, {{TOBN(0xc1b97512, 0xc1ad8cf8), TOBN(0x193b4e9e, 0x99e0b697), TOBN(0x939d2716, 0x01e85df0), TOBN(0x4fb265b3, 0xcd44eafd)}, {TOBN(0x321e7dcd, 0xe51e1ae2), TOBN(0x8e3a8ca6, 0xe3d8b096), TOBN(0x8de46cb0, 0x52604998), TOBN(0x91099ad8, 0x39072aa7)}}, {{TOBN(0x2617f91c, 0x93aa96b8), TOBN(0x0fc8716b, 0x7fca2e13), TOBN(0xa7106f5e, 0x95328723), TOBN(0xd1c9c40b, 0x262e6522)}, {TOBN(0xb9bafe86, 0x42b7c094), TOBN(0x1873439d, 0x1543c021), TOBN(0xe1baa5de, 0x5cbefd5d), TOBN(0xa363fc5e, 0x521e8aff)}}, {{TOBN(0xefe6320d, 0xf862eaac), TOBN(0x14419c63, 0x22c647dc), TOBN(0x0e06707c, 0x4e46d428), TOBN(0xcb6c834f, 0x4a178f8f)}, {TOBN(0x0f993a45, 0xd30f917c), TOBN(0xd4c4b049, 0x9879afee), TOBN(0xb6142a1e, 0x70500063), TOBN(0x7c9b41c3, 0xa5d9d605)}}, {{TOBN(0xbc00fc2f, 0x2f8ba2c7), TOBN(0x0966eb2f, 0x7c67aa28), TOBN(0x13f7b516, 0x5a786972), TOBN(0x3bfb7557, 0x8a2fbba0)}, {TOBN(0x131c4f23, 0x5a2b9620), TOBN(0xbff3ed27, 0x6faf46be), TOBN(0x9b4473d1, 0x7e172323), TOBN(0x421e8878, 0x339f6246)}}, {{TOBN(0x0fa8587a, 0x25a41632), TOBN(0xc0814124, 0xa35b6c93), TOBN(0x2b18a9f5, 0x59ebb8db), TOBN(0x264e3357, 0x76edb29c)}, {TOBN(0xaf245ccd, 0xc87c51e2), TOBN(0x16b3015b, 0x501e6214), TOBN(0xbb31c560, 0x0a3882ce), TOBN(0x6961bb94, 0xfec11e04)}}, {{TOBN(0x3b825b8d, 0xeff7a3a0), TOBN(0xbec33738, 0xb1df7326), TOBN(0x68ad747c, 0x99604a1f), TOBN(0xd154c934, 0x9a3bd499)}, {TOBN(0xac33506f, 0x1cc7a906), TOBN(0x73bb5392, 0x6c560e8f), TOBN(0x6428fcbe, 0x263e3944), TOBN(0xc11828d5, 0x1c387434)}}, {{TOBN(0x3cd04be1, 0x3e4b12ff), TOBN(0xc3aad9f9, 0x2d88667c), TOBN(0xc52ddcf8, 0x248120cf), TOBN(0x985a892e, 0x2a389532)}, {TOBN(0xfbb4b21b, 0x3bb85fa0), TOBN(0xf95375e0, 0x8dfc6269), TOBN(0xfb4fb06c, 0x7ee2acea), TOBN(0x6785426e, 0x309c4d1f)}}, {{TOBN(0x659b17c8, 0xd8ceb147), TOBN(0x9b649eee, 0xb70a5554), TOBN(0x6b7fa0b5, 0xac6bc634), TOBN(0xd99fe2c7, 0x1d6e732f)}, {TOBN(0x30e6e762, 0x8d3abba2), TOBN(0x18fee6e7, 0xa797b799), TOBN(0x5c9d360d, 0xc696464d), TOBN(0xe3baeb48, 0x27bfde12)}}, {{TOBN(0x2bf5db47, 0xf23206d5), TOBN(0x2f6d3420, 0x1d260152), TOBN(0x17b87653, 0x3f8ff89a), TOBN(0x5157c30c, 0x378fa458)}, {TOBN(0x7517c5c5, 0x2d4fb936), TOBN(0xef22f7ac, 0xe6518cdc), TOBN(0xdeb483e6, 0xbf847a64), TOBN(0xf5084558, 0x92e0fa89)}}}, {{{TOBN(0xab9659d8, 0xdf7304d4), TOBN(0xb71bcf1b, 0xff210e8e), TOBN(0xa9a2438b, 0xd73fbd60), TOBN(0x4595cd1f, 0x5d11b4de)}, {TOBN(0x9c0d329a, 0x4835859d), TOBN(0x4a0f0d2d, 0x7dbb6e56), TOBN(0xc6038e5e, 0xdf928a4e), TOBN(0xc9429621, 0x8f5ad154)}}, {{TOBN(0x91213462, 0xf23f2d92), TOBN(0x6cab71bd, 0x60b94078), TOBN(0x6bdd0a63, 0x176cde20), TOBN(0x54c9b20c, 0xee4d54bc)}, {TOBN(0x3cd2d8aa, 0x9f2ac02f), TOBN(0x03f8e617, 0x206eedb0), TOBN(0xc7f68e16, 0x93086434), TOBN(0x831469c5, 0x92dd3db9)}}, {{TOBN(0x8521df24, 0x8f981354), TOBN(0x587e23ec, 0x3588a259), TOBN(0xcbedf281, 0xd7a0992c), TOBN(0x06930a55, 0x38961407)}, {TOBN(0x09320deb, 0xbe5bbe21), TOBN(0xa7ffa5b5, 0x2491817f), TOBN(0xe6c8b4d9, 0x09065160), TOBN(0xac4f3992, 0xfff6d2a9)}}, {{TOBN(0x7aa7a158, 0x3ae9c1bd), TOBN(0xe0af6d98, 0xe37ce240), TOBN(0xe54342d9, 0x28ab38b4), TOBN(0xe8b75007, 0x0a1c98ca)}, {TOBN(0xefce86af, 0xe02358f2), TOBN(0x31b8b856, 0xea921228), TOBN(0x052a1912, 0x0a1c67fc), TOBN(0xb4069ea4, 0xe3aead59)}}, {{TOBN(0x3232d6e2, 0x7fa03cb3), TOBN(0xdb938e5b, 0x0fdd7d88), TOBN(0x04c1d2cd, 0x2ccbfc5d), TOBN(0xd2f45c12, 0xaf3a580f)}, {TOBN(0x592620b5, 0x7883e614), TOBN(0x5fd27e68, 0xbe7c5f26), TOBN(0x139e45a9, 0x1567e1e3), TOBN(0x2cc71d2d, 0x44d8aaaf)}}, {{TOBN(0x4a9090cd, 0xe36d0757), TOBN(0xf722d7b1, 0xd9a29382), TOBN(0xfb7fb04c, 0x04b48ddf), TOBN(0x628ad2a7, 0xebe16f43)}, {TOBN(0xcd3fbfb5, 0x20226040), TOBN(0x6c34ecb1, 0x5104b6c4), TOBN(0x30c0754e, 0xc903c188), TOBN(0xec336b08, 0x2d23cab0)}}, {{TOBN(0x473d62a2, 0x1e206ee5), TOBN(0xf1e27480, 0x8c49a633), TOBN(0x87ab956c, 0xe9f6b2c3), TOBN(0x61830b48, 0x62b606ea)}, {TOBN(0x67cd6846, 0xe78e815f), TOBN(0xfe40139f, 0x4c02082a), TOBN(0x52bbbfcb, 0x952ec365), TOBN(0x74c11642, 0x6b9836ab)}}, {{TOBN(0x9f51439e, 0x558df019), TOBN(0x230da4ba, 0xac712b27), TOBN(0x518919e3, 0x55185a24), TOBN(0x4dcefcdd, 0x84b78f50)}, {TOBN(0xa7d90fb2, 0xa47d4c5a), TOBN(0x55ac9abf, 0xb30e009e), TOBN(0xfd2fc359, 0x74eed273), TOBN(0xb72d824c, 0xdbea8faf)}}, {{TOBN(0xce721a74, 0x4513e2ca), TOBN(0x0b418612, 0x38240b2c), TOBN(0x05199968, 0xd5baa450), TOBN(0xeb1757ed, 0x2b0e8c25)}, {TOBN(0x6ebc3e28, 0x3dfac6d5), TOBN(0xb2431e2e, 0x48a237f5), TOBN(0x2acb5e23, 0x52f61499), TOBN(0x5558a2a7, 0xe06c936b)}}, {{TOBN(0xd213f923, 0xcbb13d1b), TOBN(0x98799f42, 0x5bfb9bfe), TOBN(0x1ae8ddc9, 0x701144a9), TOBN(0x0b8b3bb6, 0x4c5595ee)}, {TOBN(0x0ea9ef2e, 0x3ecebb21), TOBN(0x17cb6c4b, 0x3671f9a7), TOBN(0x47ef464f, 0x726f1d1f), TOBN(0x171b9484, 0x6943a276)}}, {{TOBN(0x51a4ae2d, 0x7ef0329c), TOBN(0x08509222, 0x91c4402a), TOBN(0x64a61d35, 0xafd45bbc), TOBN(0x38f096fe, 0x3035a851)}, {TOBN(0xc7468b74, 0xa1dec027), TOBN(0xe8cf10e7, 0x4fc7dcba), TOBN(0xea35ff40, 0xf4a06353), TOBN(0x0b4c0dfa, 0x8b77dd66)}}, {{TOBN(0x779b8552, 0xde7e5c19), TOBN(0xfab28609, 0xc1c0256c), TOBN(0x64f58eee, 0xabd4743d), TOBN(0x4e8ef838, 0x7b6cc93b)}, {TOBN(0xee650d26, 0x4cb1bf3d), TOBN(0x4c1f9d09, 0x73dedf61), TOBN(0xaef7c9d7, 0xbfb70ced), TOBN(0x1ec0507e, 0x1641de1e)}}, {{TOBN(0xcd7e5cc7, 0xcde45079), TOBN(0xde173c9a, 0x516ac9e4), TOBN(0x517a8494, 0xc170315c), TOBN(0x438fd905, 0x91d8e8fb)}, {TOBN(0x5145c506, 0xc7d9630b), TOBN(0x6457a87b, 0xf47d4d75), TOBN(0xd31646bf, 0x0d9a80e8), TOBN(0x453add2b, 0xcef3aabe)}}, {{TOBN(0xc9941109, 0xa607419d), TOBN(0xfaa71e62, 0xbb6bca80), TOBN(0x34158c13, 0x07c431f3), TOBN(0x594abebc, 0x992bc47a)}, {TOBN(0x6dfea691, 0xeb78399f), TOBN(0x48aafb35, 0x3f42cba4), TOBN(0xedcd65af, 0x077c04f0), TOBN(0x1a29a366, 0xe884491a)}}, {{TOBN(0x023a40e5, 0x1c21f2bf), TOBN(0xf99a513c, 0xa5057aee), TOBN(0xa3fe7e25, 0xbcab072e), TOBN(0x8568d2e1, 0x40e32bcf)}, {TOBN(0x904594eb, 0xd3f69d9f), TOBN(0x181a9733, 0x07affab1), TOBN(0xe4d68d76, 0xb6e330f4), TOBN(0x87a6dafb, 0xc75a7fc1)}}, {{TOBN(0x549db2b5, 0xef7d9289), TOBN(0x2480d4a8, 0x197f015a), TOBN(0x61d5590b, 0xc40493b6), TOBN(0x3a55b52e, 0x6f780331)}, {TOBN(0x40eb8115, 0x309eadb0), TOBN(0xdea7de5a, 0x92e5c625), TOBN(0x64d631f0, 0xcc6a3d5a), TOBN(0x9d5e9d7c, 0x93e8dd61)}}, {{TOBN(0xf297bef5, 0x206d3ffc), TOBN(0x23d5e033, 0x7d808bd4), TOBN(0x4a4f6912, 0xd24cf5ba), TOBN(0xe4d8163b, 0x09cdaa8a)}, {TOBN(0x0e0de9ef, 0xd3082e8e), TOBN(0x4fe1246c, 0x0192f360), TOBN(0x1f900150, 0x4b8eee0a), TOBN(0x5219da81, 0xf1da391b)}}, {{TOBN(0x7bf6a5c1, 0xf7ea25aa), TOBN(0xd165e6bf, 0xfbb07d5f), TOBN(0xe3539361, 0x89e78671), TOBN(0xa3fcac89, 0x2bac4219)}, {TOBN(0xdfab6fd4, 0xf0baa8ab), TOBN(0x5a4adac1, 0xe2c1c2e5), TOBN(0x6cd75e31, 0x40d85849), TOBN(0xce263fea, 0x19b39181)}}, {{TOBN(0xcb6803d3, 0x07032c72), TOBN(0x7f40d5ce, 0x790968c8), TOBN(0xa6de86bd, 0xdce978f0), TOBN(0x25547c4f, 0x368f751c)}, {TOBN(0xb1e685fd, 0x65fb2a9e), TOBN(0xce69336f, 0x1eb9179c), TOBN(0xb15d1c27, 0x12504442), TOBN(0xb7df465c, 0xb911a06b)}}, {{TOBN(0xb8d804a3, 0x315980cd), TOBN(0x693bc492, 0xfa3bebf7), TOBN(0x3578aeee, 0x2253c504), TOBN(0x158de498, 0xcd2474a2)}, {TOBN(0x1331f5c7, 0xcfda8368), TOBN(0xd2d7bbb3, 0x78d7177e), TOBN(0xdf61133a, 0xf3c1e46e), TOBN(0x5836ce7d, 0xd30e7be8)}}, {{TOBN(0x83084f19, 0x94f834cb), TOBN(0xd35653d4, 0x429ed782), TOBN(0xa542f16f, 0x59e58243), TOBN(0xc2b52f65, 0x0470a22d)}, {TOBN(0xe3b6221b, 0x18f23d96), TOBN(0xcb05abac, 0x3f5252b4), TOBN(0xca00938b, 0x87d61402), TOBN(0x2f186cdd, 0x411933e4)}}, {{TOBN(0xe042ece5, 0x9a29a5c5), TOBN(0xb19b3c07, 0x3b6c8402), TOBN(0xc97667c7, 0x19d92684), TOBN(0xb5624622, 0xebc66372)}, {TOBN(0x0cb96e65, 0x3c04fa02), TOBN(0x83a7176c, 0x8eaa39aa), TOBN(0x2033561d, 0xeaa1633f), TOBN(0x45a9d086, 0x4533df73)}}, {{TOBN(0xe0542c1d, 0x3dc090bc), TOBN(0x82c996ef, 0xaa59c167), TOBN(0xe3f735e8, 0x0ee7fc4d), TOBN(0x7b179393, 0x7c35db79)}, {TOBN(0xb6419e25, 0xf8c5dbfd), TOBN(0x4d9d7a1e, 0x1f327b04), TOBN(0x979f6f9b, 0x298dfca8), TOBN(0xc7c5dff1, 0x8de9366a)}}, {{TOBN(0x1b7a588d, 0x04c82bdd), TOBN(0x68005534, 0xf8319dfd), TOBN(0xde8a55b5, 0xd8eb9580), TOBN(0x5ea886da, 0x8d5bca81)}, {TOBN(0xe8530a01, 0x252a0b4d), TOBN(0x1bffb4fe, 0x35eaa0a1), TOBN(0x2ad828b1, 0xd8e99563), TOBN(0x7de96ef5, 0x95f9cd87)}}, {{TOBN(0x4abb2d0c, 0xd77d970c), TOBN(0x03cfb933, 0xd33ef9cb), TOBN(0xb0547c01, 0x8b211fe9), TOBN(0x2fe64809, 0xa56ed1c6)}, {TOBN(0xcb7d5624, 0xc2ac98cc), TOBN(0x2a1372c0, 0x1a393e33), TOBN(0xc8d1ec1c, 0x29660521), TOBN(0xf3d31b04, 0xb37ac3e9)}}, {{TOBN(0xa29ae9df, 0x5ece6e7c), TOBN(0x0603ac8f, 0x0facfb55), TOBN(0xcfe85b7a, 0xdda233a5), TOBN(0xe618919f, 0xbd75f0b8)}, {TOBN(0xf555a3d2, 0x99bf1603), TOBN(0x1f43afc9, 0xf184255a), TOBN(0xdcdaf341, 0x319a3e02), TOBN(0xd3b117ef, 0x03903a39)}}, {{TOBN(0xe095da13, 0x65d1d131), TOBN(0x86f16367, 0xc37ad03e), TOBN(0x5f37389e, 0x462cd8dd), TOBN(0xc103fa04, 0xd67a60e6)}, {TOBN(0x57c34344, 0xf4b478f0), TOBN(0xce91edd8, 0xe117c98d), TOBN(0x001777b0, 0x231fc12e), TOBN(0x11ae47f2, 0xb207bccb)}}, {{TOBN(0xd983cf8d, 0x20f8a242), TOBN(0x7aff5b1d, 0xf22e1ad8), TOBN(0x68fd11d0, 0x7fc4feb3), TOBN(0x5d53ae90, 0xb0f1c3e1)}, {TOBN(0x50fb7905, 0xec041803), TOBN(0x85e3c977, 0x14404888), TOBN(0x0e67faed, 0xac628d8f), TOBN(0x2e865150, 0x6668532c)}}, {{TOBN(0x15acaaa4, 0x6a67a6b0), TOBN(0xf4cdee25, 0xb25cec41), TOBN(0x49ee565a, 0xe4c6701e), TOBN(0x2a04ca66, 0xfc7d63d8)}, {TOBN(0xeb105018, 0xef0543fb), TOBN(0xf709a4f5, 0xd1b0d81d), TOBN(0x5b906ee6, 0x2915d333), TOBN(0xf4a87412, 0x96f1f0ab)}}, {{TOBN(0xb6b82fa7, 0x4d82f4c2), TOBN(0x90725a60, 0x6804efb3), TOBN(0xbc82ec46, 0xadc3425e), TOBN(0xb7b80581, 0x2787843e)}, {TOBN(0xdf46d91c, 0xdd1fc74c), TOBN(0xdc1c62cb, 0xe783a6c4), TOBN(0x59d1b9f3, 0x1a04cbba), TOBN(0xd87f6f72, 0x95e40764)}}, {{TOBN(0x02b4cfc1, 0x317f4a76), TOBN(0x8d2703eb, 0x91036bce), TOBN(0x98206cc6, 0xa5e72a56), TOBN(0x57be9ed1, 0xcf53fb0f)}, {TOBN(0x09374571, 0xef0b17ac), TOBN(0x74b2655e, 0xd9181b38), TOBN(0xc8f80ea8, 0x89935d0e), TOBN(0xc0d9e942, 0x91529936)}}, {{TOBN(0x19686041, 0x1e84e0e5), TOBN(0xa5db84d3, 0xaea34c93), TOBN(0xf9d5bb19, 0x7073a732), TOBN(0xb8d2fe56, 0x6bcfd7c0)}, {TOBN(0x45775f36, 0xf3eb82fa), TOBN(0x8cb20ccc, 0xfdff8b58), TOBN(0x1659b65f, 0x8374c110), TOBN(0xb8b4a422, 0x330c789a)}}, {{TOBN(0x75e3c3ea, 0x6fe8208b), TOBN(0xbd74b9e4, 0x286e78fe), TOBN(0x0be2e81b, 0xd7d93a1a), TOBN(0x7ed06e27, 0xdd0a5aae)}, {TOBN(0x721f5a58, 0x6be8b800), TOBN(0x428299d1, 0xd846db28), TOBN(0x95cb8e6b, 0x5be88ed3), TOBN(0xc3186b23, 0x1c034e11)}}, {{TOBN(0xa6312c9e, 0x8977d99b), TOBN(0xbe944331, 0x83f531e7), TOBN(0x8232c0c2, 0x18d3b1d4), TOBN(0x617aae8b, 0xe1247b73)}, {TOBN(0x40153fc4, 0x282aec3b), TOBN(0xc6063d2f, 0xf7b8f823), TOBN(0x68f10e58, 0x3304f94c), TOBN(0x31efae74, 0xee676346)}}, {{TOBN(0xbadb6c6d, 0x40a9b97c), TOBN(0x14702c63, 0x4f666256), TOBN(0xdeb954f1, 0x5184b2e3), TOBN(0x5184a526, 0x94b6ca40)}, {TOBN(0xfff05337, 0x003c32ea), TOBN(0x5aa374dd, 0x205974c7), TOBN(0x9a763854, 0x4b0dd71a), TOBN(0x459cd27f, 0xdeb947ec)}}, {{TOBN(0xa6e28161, 0x459c2b92), TOBN(0x2f020fa8, 0x75ee8ef5), TOBN(0xb132ec2d, 0x30b06310), TOBN(0xc3e15899, 0xbc6a4530)}, {TOBN(0xdc5f53fe, 0xaa3f451a), TOBN(0x3a3c7f23, 0xc2d9acac), TOBN(0x2ec2f892, 0x6b27e58b), TOBN(0x68466ee7, 0xd742799f)}}, {{TOBN(0x98324dd4, 0x1fa26613), TOBN(0xa2dc6dab, 0xbdc29d63), TOBN(0xf9675faa, 0xd712d657), TOBN(0x813994be, 0x21fd8d15)}, {TOBN(0x5ccbb722, 0xfd4f7553), TOBN(0x5135ff8b, 0xf3a36b20), TOBN(0x44be28af, 0x69559df5), TOBN(0x40b65bed, 0x9d41bf30)}}, {{TOBN(0xd98bf2a4, 0x3734e520), TOBN(0x5e3abbe3, 0x209bdcba), TOBN(0x77c76553, 0xbc945b35), TOBN(0x5331c093, 0xc6ef14aa)}, {TOBN(0x518ffe29, 0x76b60c80), TOBN(0x2285593b, 0x7ace16f8), TOBN(0xab1f64cc, 0xbe2b9784), TOBN(0xe8f2c0d9, 0xab2421b6)}}, {{TOBN(0x617d7174, 0xc1df065c), TOBN(0xafeeb5ab, 0x5f6578fa), TOBN(0x16ff1329, 0x263b54a8), TOBN(0x45c55808, 0xc990dce3)}, {TOBN(0x42eab6c0, 0xecc8c177), TOBN(0x799ea9b5, 0x5982ecaa), TOBN(0xf65da244, 0xb607ef8e), TOBN(0x8ab226ce, 0x32a3fc2c)}}, {{TOBN(0x745741e5, 0x7ea973dc), TOBN(0x5c00ca70, 0x20888f2e), TOBN(0x7cdce3cf, 0x45fd9cf1), TOBN(0x8a741ef1, 0x5507f872)}, {TOBN(0x47c51c2f, 0x196b4cec), TOBN(0x70d08e43, 0xc97ea618), TOBN(0x930da15c, 0x15b18a2b), TOBN(0x33b6c678, 0x2f610514)}}, {{TOBN(0xc662e4f8, 0x07ac9794), TOBN(0x1eccf050, 0xba06cb79), TOBN(0x1ff08623, 0xe7d954e5), TOBN(0x6ef2c5fb, 0x24cf71c3)}, {TOBN(0xb2c063d2, 0x67978453), TOBN(0xa0cf3796, 0x1d654af8), TOBN(0x7cb242ea, 0x7ebdaa37), TOBN(0x206e0b10, 0xb86747e0)}}, {{TOBN(0x481dae5f, 0xd5ecfefc), TOBN(0x07084fd8, 0xc2bff8fc), TOBN(0x8040a01a, 0xea324596), TOBN(0x4c646980, 0xd4de4036)}, {TOBN(0x9eb8ab4e, 0xd65abfc3), TOBN(0xe01cb91f, 0x13541ec7), TOBN(0x8f029adb, 0xfd695012), TOBN(0x9ae28483, 0x3c7569ec)}}, {{TOBN(0xa5614c9e, 0xa66d80a1), TOBN(0x680a3e44, 0x75f5f911), TOBN(0x0c07b14d, 0xceba4fc1), TOBN(0x891c285b, 0xa13071c1)}, {TOBN(0xcac67ceb, 0x799ece3c), TOBN(0x29b910a9, 0x41e07e27), TOBN(0x66bdb409, 0xf2e43123), TOBN(0x06f8b137, 0x7ac9ecbe)}}, {{TOBN(0x5981fafd, 0x38547090), TOBN(0x19ab8b9f, 0x85e3415d), TOBN(0xfc28c194, 0xc7e31b27), TOBN(0x843be0aa, 0x6fbcbb42)}, {TOBN(0xf3b1ed43, 0xa6db836c), TOBN(0x2a1330e4, 0x01a45c05), TOBN(0x4f19f3c5, 0x95c1a377), TOBN(0xa85f39d0, 0x44b5ee33)}}, {{TOBN(0x3da18e6d, 0x4ae52834), TOBN(0x5a403b39, 0x7423dcb0), TOBN(0xbb555e0a, 0xf2374aef), TOBN(0x2ad599c4, 0x1e8ca111)}, {TOBN(0x1b3a2fb9, 0x014b3bf8), TOBN(0x73092684, 0xf66d5007), TOBN(0x079f1426, 0xc4340102), TOBN(0x1827cf81, 0x8fddf4de)}}, {{TOBN(0xc83605f6, 0xf10ff927), TOBN(0xd3871451, 0x23739fc6), TOBN(0x6d163450, 0xcac1c2cc), TOBN(0x6b521296, 0xa2ec1ac5)}, {TOBN(0x0606c4f9, 0x6e3cb4a5), TOBN(0xe47d3f41, 0x778abff7), TOBN(0x425a8d5e, 0xbe8e3a45), TOBN(0x53ea9e97, 0xa6102160)}}, {{TOBN(0x477a106e, 0x39cbb688), TOBN(0x532401d2, 0xf3386d32), TOBN(0x8e564f64, 0xb1b9b421), TOBN(0xca9b8388, 0x81dad33f)}, {TOBN(0xb1422b4e, 0x2093913e), TOBN(0x533d2f92, 0x69bc8112), TOBN(0x3fa017be, 0xebe7b2c7), TOBN(0xb2767c4a, 0xcaf197c6)}}, {{TOBN(0xc925ff87, 0xaedbae9f), TOBN(0x7daf0eb9, 0x36880a54), TOBN(0x9284ddf5, 0x9c4d0e71), TOBN(0x1581cf93, 0x316f8cf5)}, {TOBN(0x3eeca887, 0x3ac1f452), TOBN(0xb417fce9, 0xfb6aeffe), TOBN(0xa5918046, 0xeefb8dc3), TOBN(0x73d318ac, 0x02209400)}}, {{TOBN(0xe800400f, 0x728693e5), TOBN(0xe87d814b, 0x339927ed), TOBN(0x93e94d3b, 0x57ea9910), TOBN(0xff8a35b6, 0x2245fb69)}, {TOBN(0x043853d7, 0x7f200d34), TOBN(0x470f1e68, 0x0f653ce1), TOBN(0x81ac05bd, 0x59a06379), TOBN(0xa14052c2, 0x03930c29)}}, {{TOBN(0x6b72fab5, 0x26bc2797), TOBN(0x13670d16, 0x99f16771), TOBN(0x00170052, 0x1e3e48d1), TOBN(0x978fe401, 0xb7adf678)}, {TOBN(0x55ecfb92, 0xd41c5dd4), TOBN(0x5ff8e247, 0xc7b27da5), TOBN(0xe7518272, 0x013fb606), TOBN(0x5768d7e5, 0x2f547a3c)}}, {{TOBN(0xbb24eaa3, 0x60017a5f), TOBN(0x6b18e6e4, 0x9c64ce9b), TOBN(0xc225c655, 0x103dde07), TOBN(0xfc3672ae, 0x7592f7ea)}, {TOBN(0x9606ad77, 0xd06283a1), TOBN(0x542fc650, 0xe4d59d99), TOBN(0xabb57c49, 0x2a40e7c2), TOBN(0xac948f13, 0xa8db9f55)}}, {{TOBN(0x6d4c9682, 0xb04465c3), TOBN(0xe3d062fa, 0x6468bd15), TOBN(0xa51729ac, 0x5f318d7e), TOBN(0x1fc87df6, 0x9eb6fc95)}, {TOBN(0x63d146a8, 0x0591f652), TOBN(0xa861b8f7, 0x589621aa), TOBN(0x59f5f15a, 0xce31348c), TOBN(0x8f663391, 0x440da6da)}}, {{TOBN(0xcfa778ac, 0xb591ffa3), TOBN(0x027ca9c5, 0x4cdfebce), TOBN(0xbe8e05a5, 0x444ea6b3), TOBN(0x8aab4e69, 0xa78d8254)}, {TOBN(0x2437f04f, 0xb474d6b8), TOBN(0x6597ffd4, 0x045b3855), TOBN(0xbb0aea4e, 0xca47ecaa), TOBN(0x568aae83, 0x85c7ebfc)}}, {{TOBN(0x0e966e64, 0xc73b2383), TOBN(0x49eb3447, 0xd17d8762), TOBN(0xde107821, 0x8da05dab), TOBN(0x443d8baa, 0x016b7236)}, {TOBN(0x163b63a5, 0xea7610d6), TOBN(0xe47e4185, 0xce1ca979), TOBN(0xae648b65, 0x80baa132), TOBN(0xebf53de2, 0x0e0d5b64)}}, {{TOBN(0x8d3bfcb4, 0xd3c8c1ca), TOBN(0x0d914ef3, 0x5d04b309), TOBN(0x55ef6415, 0x3de7d395), TOBN(0xbde1666f, 0x26b850e8)}, {TOBN(0xdbe1ca6e, 0xd449ab19), TOBN(0x8902b322, 0xe89a2672), TOBN(0xb1674b7e, 0xdacb7a53), TOBN(0x8e9faf6e, 0xf52523ff)}}, {{TOBN(0x6ba535da, 0x9a85788b), TOBN(0xd21f03ae, 0xbd0626d4), TOBN(0x099f8c47, 0xe873dc64), TOBN(0xcda8564d, 0x018ec97e)}, {TOBN(0x3e8d7a5c, 0xde92c68c), TOBN(0x78e035a1, 0x73323cc4), TOBN(0x3ef26275, 0xf880ff7c), TOBN(0xa4ee3dff, 0x273eedaa)}}, {{TOBN(0x58823507, 0xaf4e18f8), TOBN(0x967ec9b5, 0x0672f328), TOBN(0x9ded19d9, 0x559d3186), TOBN(0x5e2ab3de, 0x6cdce39c)}, {TOBN(0xabad6e4d, 0x11c226df), TOBN(0xf9783f43, 0x87723014), TOBN(0x9a49a0cf, 0x1a885719), TOBN(0xfc0c1a5a, 0x90da9dbf)}}, {{TOBN(0x8bbaec49, 0x571d92ac), TOBN(0x569e85fe, 0x4692517f), TOBN(0x8333b014, 0xa14ea4af), TOBN(0x32f2a62f, 0x12e5c5ad)}, {TOBN(0x98c2ce3a, 0x06d89b85), TOBN(0xb90741aa, 0x2ff77a08), TOBN(0x2530defc, 0x01f795a2), TOBN(0xd6e5ba0b, 0x84b3c199)}}, {{TOBN(0x7d8e8451, 0x12e4c936), TOBN(0xae419f7d, 0xbd0be17b), TOBN(0xa583fc8c, 0x22262bc9), TOBN(0x6b842ac7, 0x91bfe2bd)}, {TOBN(0x33cef4e9, 0x440d6827), TOBN(0x5f69f4de, 0xef81fb14), TOBN(0xf16cf6f6, 0x234fbb92), TOBN(0x76ae3fc3, 0xd9e7e158)}}, {{TOBN(0x4e89f6c2, 0xe9740b33), TOBN(0x677bc85d, 0x4962d6a1), TOBN(0x6c6d8a7f, 0x68d10d15), TOBN(0x5f9a7224, 0x0257b1cd)}, {TOBN(0x7096b916, 0x4ad85961), TOBN(0x5f8c47f7, 0xe657ab4a), TOBN(0xde57d7d0, 0xf7461d7e), TOBN(0x7eb6094d, 0x80ce5ee2)}}, {{TOBN(0x0b1e1dfd, 0x34190547), TOBN(0x8a394f43, 0xf05dd150), TOBN(0x0a9eb24d, 0x97df44e6), TOBN(0x78ca06bf, 0x87675719)}, {TOBN(0x6f0b3462, 0x6ffeec22), TOBN(0x9d91bcea, 0x36cdd8fb), TOBN(0xac83363c, 0xa105be47), TOBN(0x81ba76c1, 0x069710e3)}}, {{TOBN(0x3d1b24cb, 0x28c682c6), TOBN(0x27f25228, 0x8612575b), TOBN(0xb587c779, 0xe8e66e98), TOBN(0x7b0c03e9, 0x405eb1fe)}, {TOBN(0xfdf0d030, 0x15b548e7), TOBN(0xa8be76e0, 0x38b36af7), TOBN(0x4cdab04a, 0x4f310c40), TOBN(0x6287223e, 0xf47ecaec)}}, {{TOBN(0x678e6055, 0x8b399320), TOBN(0x61fe3fa6, 0xc01e4646), TOBN(0xc482866b, 0x03261a5e), TOBN(0xdfcf45b8, 0x5c2f244a)}, {TOBN(0x8fab9a51, 0x2f684b43), TOBN(0xf796c654, 0xc7220a66), TOBN(0x1d90707e, 0xf5afa58f), TOBN(0x2c421d97, 0x4fdbe0de)}}, {{TOBN(0xc4f4cda3, 0xaf2ebc2f), TOBN(0xa0af843d, 0xcb4efe24), TOBN(0x53b857c1, 0x9ccd10b1), TOBN(0xddc9d1eb, 0x914d3e04)}, {TOBN(0x7bdec8bb, 0x62771deb), TOBN(0x829277aa, 0x91c5aa81), TOBN(0x7af18dd6, 0x832391ae), TOBN(0x1740f316, 0xc71a84ca)}}}, {{{TOBN(0x8928e99a, 0xeeaf8c49), TOBN(0xee7aa73d, 0x6e24d728), TOBN(0x4c5007c2, 0xe72b156c), TOBN(0x5fcf57c5, 0xed408a1d)}, {TOBN(0x9f719e39, 0xb6057604), TOBN(0x7d343c01, 0xc2868bbf), TOBN(0x2cca254b, 0x7e103e2d), TOBN(0xe6eb38a9, 0xf131bea2)}}, {{TOBN(0xb33e624f, 0x8be762b4), TOBN(0x2a9ee4d1, 0x058e3413), TOBN(0x968e6369, 0x67d805fa), TOBN(0x9848949b, 0x7db8bfd7)}, {TOBN(0x5308d7e5, 0xd23a8417), TOBN(0x892f3b1d, 0xf3e29da5), TOBN(0xc95c139e, 0x3dee471f), TOBN(0x8631594d, 0xd757e089)}}, {{TOBN(0xe0c82a3c, 0xde918dcc), TOBN(0x2e7b5994, 0x26fdcf4b), TOBN(0x82c50249, 0x32cb1b2d), TOBN(0xea613a9d, 0x7657ae07)}, {TOBN(0xc2eb5f6c, 0xf1fdc9f7), TOBN(0xb6eae8b8, 0x879fe682), TOBN(0x253dfee0, 0x591cbc7f), TOBN(0x000da713, 0x3e1290e6)}}, {{TOBN(0x1083e2ea, 0x1f095615), TOBN(0x0a28ad77, 0x14e68c33), TOBN(0x6bfc0252, 0x3d8818be), TOBN(0xb585113a, 0xf35850cd)}, {TOBN(0x7d935f0b, 0x30df8aa1), TOBN(0xaddda07c, 0x4ab7e3ac), TOBN(0x92c34299, 0x552f00cb), TOBN(0xc33ed1de, 0x2909df6c)}}, {{TOBN(0x22c2195d, 0x80e87766), TOBN(0x9e99e6d8, 0x9ddf4ac0), TOBN(0x09642e4e, 0x65e74934), TOBN(0x2610ffa2, 0xff1ff241)}, {TOBN(0x4d1d47d4, 0x751c8159), TOBN(0x697b4985, 0xaf3a9363), TOBN(0x0318ca46, 0x87477c33), TOBN(0xa90cb565, 0x9441eff3)}}, {{TOBN(0x58bb3848, 0x36f024cb), TOBN(0x85be1f77, 0x36016168), TOBN(0x6c59587c, 0xdc7e07f1), TOBN(0x191be071, 0xaf1d8f02)}, {TOBN(0xbf169fa5, 0xcca5e55c), TOBN(0x3864ba3c, 0xf7d04eac), TOBN(0x915e367f, 0x8d7d05db), TOBN(0xb48a876d, 0xa6549e5d)}}, {{TOBN(0xef89c656, 0x580e40a2), TOBN(0xf194ed8c, 0x728068bc), TOBN(0x74528045, 0xa47990c9), TOBN(0xf53fc7d7, 0x5e1a4649)}, {TOBN(0xbec5ae9b, 0x78593e7d), TOBN(0x2cac4ee3, 0x41db65d7), TOBN(0xa8c1eb24, 0x04a3d39b), TOBN(0x53b7d634, 0x03f8f3ef)}}, {{TOBN(0x2dc40d48, 0x3e07113c), TOBN(0x6e4a5d39, 0x7d8b63ae), TOBN(0x5582a94b, 0x79684c2b), TOBN(0x932b33d4, 0x622da26c)}, {TOBN(0xf534f651, 0x0dbbf08d), TOBN(0x211d07c9, 0x64c23a52), TOBN(0x0eeece0f, 0xee5bdc9b), TOBN(0xdf178168, 0xf7015558)}}, {{TOBN(0xd4294635, 0x0a712229), TOBN(0x93cbe448, 0x09273f8c), TOBN(0x00b095ef, 0x8f13bc83), TOBN(0xbb741972, 0x8798978c)}, {TOBN(0x9d7309a2, 0x56dbe6e7), TOBN(0xe578ec56, 0x5a5d39ec), TOBN(0x3961151b, 0x851f9a31), TOBN(0x2da7715d, 0xe5709eb4)}}, {{TOBN(0x867f3017, 0x53dfabf0), TOBN(0x728d2078, 0xb8e39259), TOBN(0x5c75a0cd, 0x815d9958), TOBN(0xf84867a6, 0x16603be1)}, {TOBN(0xc865b13d, 0x70e35b1c), TOBN(0x02414468, 0x19b03e2c), TOBN(0xe46041da, 0xac1f3121), TOBN(0x7c9017ad, 0x6f028a7c)}}, {{TOBN(0xabc96de9, 0x0a482873), TOBN(0x4265d6b1, 0xb77e54d4), TOBN(0x68c38e79, 0xa57d88e7), TOBN(0xd461d766, 0x9ce82de3)}, {TOBN(0x817a9ec5, 0x64a7e489), TOBN(0xcc5675cd, 0xa0def5f2), TOBN(0x9a00e785, 0x985d494e), TOBN(0xc626833f, 0x1b03514a)}}, {{TOBN(0xabe7905a, 0x83cdd60e), TOBN(0x50602fb5, 0xa1170184), TOBN(0x689886cd, 0xb023642a), TOBN(0xd568d090, 0xa6e1fb00)}, {TOBN(0x5b1922c7, 0x0259217f), TOBN(0x93831cd9, 0xc43141e4), TOBN(0xdfca3587, 0x0c95f86e), TOBN(0xdec2057a, 0x568ae828)}}, {{TOBN(0xc44ea599, 0xf98a759a), TOBN(0x55a0a7a2, 0xf7c23c1d), TOBN(0xd5ffb6e6, 0x94c4f687), TOBN(0x3563cce2, 0x12848478)}, {TOBN(0x812b3517, 0xe7b1fbe1), TOBN(0x8a7dc979, 0x4f7338e0), TOBN(0x211ecee9, 0x52d048db), TOBN(0x2eea4056, 0xc86ea3b8)}}, {{TOBN(0xd8cb68a7, 0xba772b34), TOBN(0xe16ed341, 0x5f4e2541), TOBN(0x9b32f6a6, 0x0fec14db), TOBN(0xeee376f7, 0x391698be)}, {TOBN(0xe9a7aa17, 0x83674c02), TOBN(0x65832f97, 0x5843022a), TOBN(0x29f3a8da, 0x5ba4990f), TOBN(0x79a59c3a, 0xfb8e3216)}}, {{TOBN(0x9cdc4d2e, 0xbd19bb16), TOBN(0xc6c7cfd0, 0xb3262d86), TOBN(0xd4ce14d0, 0x969c0b47), TOBN(0x1fa352b7, 0x13e56128)}, {TOBN(0x383d55b8, 0x973db6d3), TOBN(0x71836850, 0xe8e5b7bf), TOBN(0xc7714596, 0xe6bb571f), TOBN(0x259df31f, 0x2d5b2dd2)}}, {{TOBN(0x568f8925, 0x913cc16d), TOBN(0x18bc5b6d, 0xe1a26f5a), TOBN(0xdfa413be, 0xf5f499ae), TOBN(0xf8835dec, 0xc3f0ae84)}, {TOBN(0xb6e60bd8, 0x65a40ab0), TOBN(0x65596439, 0x194b377e), TOBN(0xbcd85625, 0x92084a69), TOBN(0x5ce433b9, 0x4f23ede0)}}, {{TOBN(0xe8e8f04f, 0x6ad65143), TOBN(0x11511827, 0xd6e14af6), TOBN(0x3d390a10, 0x8295c0c7), TOBN(0x71e29ee4, 0x621eba16)}, {TOBN(0xa588fc09, 0x63717b46), TOBN(0x02be02fe, 0xe06ad4a2), TOBN(0x931558c6, 0x04c22b22), TOBN(0xbb4d4bd6, 0x12f3c849)}}, {{TOBN(0x54a4f496, 0x20efd662), TOBN(0x92ba6d20, 0xc5952d14), TOBN(0x2db8ea1e, 0xcc9784c2), TOBN(0x81cc10ca, 0x4b353644)}, {TOBN(0x40b570ad, 0x4b4d7f6c), TOBN(0x5c9f1d96, 0x84a1dcd2), TOBN(0x01379f81, 0x3147e797), TOBN(0xe5c6097b, 0x2bd499f5)}}, {{TOBN(0x40dcafa6, 0x328e5e20), TOBN(0xf7b5244a, 0x54815550), TOBN(0xb9a4f118, 0x47bfc978), TOBN(0x0ea0e79f, 0xd25825b1)}, {TOBN(0xa50f96eb, 0x646c7ecf), TOBN(0xeb811493, 0x446dea9d), TOBN(0x2af04677, 0xdfabcf69), TOBN(0xbe3a068f, 0xc713f6e8)}}, {{TOBN(0x860d523d, 0x42e06189), TOBN(0xbf077941, 0x4e3aff13), TOBN(0x0b616dca, 0xc1b20650), TOBN(0xe66dd6d1, 0x2131300d)}, {TOBN(0xd4a0fd67, 0xff99abde), TOBN(0xc9903550, 0xc7aac50d), TOBN(0x022ecf8b, 0x7c46b2d7), TOBN(0x3333b1e8, 0x3abf92af)}}, {{TOBN(0x11cc113c, 0x6c491c14), TOBN(0x05976688, 0x80dd3f88), TOBN(0xf5b4d9e7, 0x29d932ed), TOBN(0xe982aad8, 0xa2c38b6d)}, {TOBN(0x6f925347, 0x8be0dcf0), TOBN(0x700080ae, 0x65ca53f2), TOBN(0xd8131156, 0x443ca77f), TOBN(0xe92d6942, 0xec51f984)}}, {{TOBN(0xd2a08af8, 0x85dfe9ae), TOBN(0xd825d9a5, 0x4d2a86ca), TOBN(0x2c53988d, 0x39dff020), TOBN(0xf38b135a, 0x430cdc40)}, {TOBN(0x0c918ae0, 0x62a7150b), TOBN(0xf31fd8de, 0x0c340e9b), TOBN(0xafa0e7ae, 0x4dbbf02e), TOBN(0x5847fb2a, 0x5eba6239)}}, {{TOBN(0x6b1647dc, 0xdccbac8b), TOBN(0xb642aa78, 0x06f485c8), TOBN(0x873f3765, 0x7038ecdf), TOBN(0x2ce5e865, 0xfa49d3fe)}, {TOBN(0xea223788, 0xc98c4400), TOBN(0x8104a8cd, 0xf1fa5279), TOBN(0xbcf7cc7a, 0x06becfd7), TOBN(0x49424316, 0xc8f974ae)}}, {{TOBN(0xc0da65e7, 0x84d6365d), TOBN(0xbcb7443f, 0x8f759fb8), TOBN(0x35c712b1, 0x7ae81930), TOBN(0x80428dff, 0x4c6e08ab)}, {TOBN(0xf19dafef, 0xa4faf843), TOBN(0xced8538d, 0xffa9855f), TOBN(0x20ac409c, 0xbe3ac7ce), TOBN(0x358c1fb6, 0x882da71e)}}, {{TOBN(0xafa9c0e5, 0xfd349961), TOBN(0x2b2cfa51, 0x8421c2fc), TOBN(0x2a80db17, 0xf3a28d38), TOBN(0xa8aba539, 0x5d138e7e)}, {TOBN(0x52012d1d, 0x6e96eb8d), TOBN(0x65d8dea0, 0xcbaf9622), TOBN(0x57735447, 0xb264f56c), TOBN(0xbeebef3f, 0x1b6c8da2)}}, {{TOBN(0xfc346d98, 0xce785254), TOBN(0xd50e8d72, 0xbb64a161), TOBN(0xc03567c7, 0x49794add), TOBN(0x15a76065, 0x752c7ef6)}, {TOBN(0x59f3a222, 0x961f23d6), TOBN(0x378e4438, 0x73ecc0b0), TOBN(0xc74be434, 0x5a82fde4), TOBN(0xae509af2, 0xd8b9cf34)}}, {{TOBN(0x4a61ee46, 0x577f44a1), TOBN(0xe09b748c, 0xb611deeb), TOBN(0xc0481b2c, 0xf5f7b884), TOBN(0x35626678, 0x61acfa6b)}, {TOBN(0x37f4c518, 0xbf8d21e6), TOBN(0x22d96531, 0xb205a76d), TOBN(0x37fb85e1, 0x954073c0), TOBN(0xbceafe4f, 0x65b3a567)}}, {{TOBN(0xefecdef7, 0xbe42a582), TOBN(0xd3fc6080, 0x65046be6), TOBN(0xc9af13c8, 0x09e8dba9), TOBN(0x1e6c9847, 0x641491ff)}, {TOBN(0x3b574925, 0xd30c31f7), TOBN(0xb7eb72ba, 0xac2a2122), TOBN(0x776a0dac, 0xef0859e7), TOBN(0x06fec314, 0x21900942)}}, {{TOBN(0x2464bc10, 0xf8c22049), TOBN(0x9bfbcce7, 0x875ebf69), TOBN(0xd7a88e2a, 0x4336326b), TOBN(0xda05261c, 0x5bc2acfa)}, {TOBN(0xc29f5bdc, 0xeba7efc8), TOBN(0x471237ca, 0x25dbbf2e), TOBN(0xa72773f2, 0x2975f127), TOBN(0xdc744e8e, 0x04d0b326)}}, {{TOBN(0x38a7ed16, 0xa56edb73), TOBN(0x64357e37, 0x2c007e70), TOBN(0xa167d15b, 0x5080b400), TOBN(0x07b41164, 0x23de4be1)}, {TOBN(0xb2d91e32, 0x74c89883), TOBN(0x3c162821, 0x2882e7ed), TOBN(0xad6b36ba, 0x7503e482), TOBN(0x48434e8e, 0x0ea34331)}}, {{TOBN(0x79f4f24f, 0x2c7ae0b9), TOBN(0xc46fbf81, 0x1939b44a), TOBN(0x76fefae8, 0x56595eb1), TOBN(0x417b66ab, 0xcd5f29c7)}, {TOBN(0x5f2332b2, 0xc5ceec20), TOBN(0xd69661ff, 0xe1a1cae2), TOBN(0x5ede7e52, 0x9b0286e6), TOBN(0x9d062529, 0xe276b993)}}, {{TOBN(0x324794b0, 0x7e50122b), TOBN(0xdd744f8b, 0x4af07ca5), TOBN(0x30a12f08, 0xd63fc97b), TOBN(0x39650f1a, 0x76626d9d)}, {TOBN(0x101b47f7, 0x1fa38477), TOBN(0x3d815f19, 0xd4dc124f), TOBN(0x1569ae95, 0xb26eb58a), TOBN(0xc3cde188, 0x95fb1887)}}, {{TOBN(0x54e9f37b, 0xf9539a48), TOBN(0xb0100e06, 0x7408c1a5), TOBN(0x821d9811, 0xea580cbb), TOBN(0x8af52d35, 0x86e50c56)}, {TOBN(0xdfbd9d47, 0xdbbf698b), TOBN(0x2961a1ea, 0x03dc1c73), TOBN(0x203d38f8, 0xe76a5df8), TOBN(0x08a53a68, 0x6def707a)}}, {{TOBN(0x26eefb48, 0x1bee45d4), TOBN(0xb3cee346, 0x3c688036), TOBN(0x463c5315, 0xc42f2469), TOBN(0x19d84d2e, 0x81378162)}, {TOBN(0x22d7c3c5, 0x1c4d349f), TOBN(0x65965844, 0x163d59c5), TOBN(0xcf198c56, 0xb8abceae), TOBN(0x6fb1fb1b, 0x628559d5)}}, {{TOBN(0x8bbffd06, 0x07bf8fe3), TOBN(0x46259c58, 0x3467734b), TOBN(0xd8953cea, 0x35f7f0d3), TOBN(0x1f0bece2, 0xd65b0ff1)}, {TOBN(0xf7d5b4b3, 0xf3c72914), TOBN(0x29e8ea95, 0x3cb53389), TOBN(0x4a365626, 0x836b6d46), TOBN(0xe849f910, 0xea174fde)}}, {{TOBN(0x7ec62fbb, 0xf4737f21), TOBN(0xd8dba5ab, 0x6209f5ac), TOBN(0x24b5d7a9, 0xa5f9adbe), TOBN(0x707d28f7, 0xa61dc768)}, {TOBN(0x7711460b, 0xcaa999ea), TOBN(0xba7b174d, 0x1c92e4cc), TOBN(0x3c4bab66, 0x18d4bf2d), TOBN(0xb8f0c980, 0xeb8bd279)}}, {{TOBN(0x024bea9a, 0x324b4737), TOBN(0xfba9e423, 0x32a83bca), TOBN(0x6e635643, 0xa232dced), TOBN(0x99619367, 0x2571c8ba)}, {TOBN(0xe8c9f357, 0x54b7032b), TOBN(0xf936b3ba, 0x2442d54a), TOBN(0x2263f0f0, 0x8290c65a), TOBN(0x48989780, 0xee2c7fdb)}}, {{TOBN(0xadc5d55a, 0x13d4f95e), TOBN(0x737cff85, 0xad9b8500), TOBN(0x271c557b, 0x8a73f43d), TOBN(0xbed617a4, 0xe18bc476)}, {TOBN(0x66245401, 0x7dfd8ab2), TOBN(0xae7b89ae, 0x3a2870aa), TOBN(0x1b555f53, 0x23a7e545), TOBN(0x6791e247, 0xbe057e4c)}}, {{TOBN(0x860136ad, 0x324fa34d), TOBN(0xea111447, 0x4cbeae28), TOBN(0x023a4270, 0xbedd3299), TOBN(0x3d5c3a7f, 0xc1c35c34)}, {TOBN(0xb0f6db67, 0x8d0412d2), TOBN(0xd92625e2, 0xfcdc6b9a), TOBN(0x92ae5ccc, 0x4e28a982), TOBN(0xea251c36, 0x47a3ce7e)}}, {{TOBN(0x9d658932, 0x790691bf), TOBN(0xed610589, 0x06b736ae), TOBN(0x712c2f04, 0xc0d63b6e), TOBN(0x5cf06fd5, 0xc63d488f)}, {TOBN(0x97363fac, 0xd9588e41), TOBN(0x1f9bf762, 0x2b93257e), TOBN(0xa9d1ffc4, 0x667acace), TOBN(0x1cf4a1aa, 0x0a061ecf)}}, {{TOBN(0x40e48a49, 0xdc1818d0), TOBN(0x0643ff39, 0xa3621ab0), TOBN(0x5768640c, 0xe39ef639), TOBN(0x1fc099ea, 0x04d86854)}, {TOBN(0x9130b9c3, 0xeccd28fd), TOBN(0xd743cbd2, 0x7eec54ab), TOBN(0x052b146f, 0xe5b475b6), TOBN(0x058d9a82, 0x900a7d1f)}}, {{TOBN(0x65e02292, 0x91262b72), TOBN(0x96f924f9, 0xbb0edf03), TOBN(0x5cfa59c8, 0xfe206842), TOBN(0xf6037004, 0x5eafa720)}, {TOBN(0x5f30699e, 0x18d7dd96), TOBN(0x381e8782, 0xcbab2495), TOBN(0x91669b46, 0xdd8be949), TOBN(0xb40606f5, 0x26aae8ef)}}, {{TOBN(0x2812b839, 0xfc6751a4), TOBN(0x16196214, 0xfba800ef), TOBN(0x4398d5ca, 0x4c1a2875), TOBN(0x720c00ee, 0x653d8349)}, {TOBN(0xc2699eb0, 0xd820007c), TOBN(0x880ee660, 0xa39b5825), TOBN(0x70694694, 0x471f6984), TOBN(0xf7d16ea8, 0xe3dda99a)}}, {{TOBN(0x28d675b2, 0xc0519a23), TOBN(0x9ebf94fe, 0x4f6952e3), TOBN(0xf28bb767, 0xa2294a8a), TOBN(0x85512b4d, 0xfe0af3f5)}, {TOBN(0x18958ba8, 0x99b16a0d), TOBN(0x95c2430c, 0xba7548a7), TOBN(0xb30d1b10, 0xa16be615), TOBN(0xe3ebbb97, 0x85bfb74c)}}, {{TOBN(0xa3273cfe, 0x18549fdb), TOBN(0xf6e200bf, 0x4fcdb792), TOBN(0x54a76e18, 0x83aba56c), TOBN(0x73ec66f6, 0x89ef6aa2)}, {TOBN(0x8d17add7, 0xd1b9a305), TOBN(0xa959c5b9, 0xb7ae1b9d), TOBN(0x88643522, 0x6bcc094a), TOBN(0xcc5616c4, 0xd7d429b9)}}, {{TOBN(0xa6dada01, 0xe6a33f7c), TOBN(0xc6217a07, 0x9d4e70ad), TOBN(0xd619a818, 0x09c15b7c), TOBN(0xea06b329, 0x0e80c854)}, {TOBN(0x174811ce, 0xa5f5e7b9), TOBN(0x66dfc310, 0x787c65f4), TOBN(0x4ea7bd69, 0x3316ab54), TOBN(0xc12c4acb, 0x1dcc0f70)}}, {{TOBN(0xe4308d1a, 0x1e407dd9), TOBN(0xe8a3587c, 0x91afa997), TOBN(0xea296c12, 0xab77b7a5), TOBN(0xb5ad49e4, 0x673c0d52)}, {TOBN(0x40f9b2b2, 0x7006085a), TOBN(0xa88ff340, 0x87bf6ec2), TOBN(0x978603b1, 0x4e3066a6), TOBN(0xb3f99fc2, 0xb5e486e2)}}, {{TOBN(0x07b53f5e, 0xb2e63645), TOBN(0xbe57e547, 0x84c84232), TOBN(0xd779c216, 0x7214d5cf), TOBN(0x617969cd, 0x029a3aca)}, {TOBN(0xd17668cd, 0x8a7017a0), TOBN(0x77b4d19a, 0xbe9b7ee8), TOBN(0x58fd0e93, 0x9c161776), TOBN(0xa8c4f4ef, 0xd5968a72)}}, {{TOBN(0x296071cc, 0x67b3de77), TOBN(0xae3c0b8e, 0x634f7905), TOBN(0x67e440c2, 0x8a7100c9), TOBN(0xbb8c3c1b, 0xeb4b9b42)}, {TOBN(0x6d71e8ea, 0xc51b3583), TOBN(0x7591f5af, 0x9525e642), TOBN(0xf73a2f7b, 0x13f509f3), TOBN(0x618487aa, 0x5619ac9b)}}, {{TOBN(0x3a72e5f7, 0x9d61718a), TOBN(0x00413bcc, 0x7592d28c), TOBN(0x7d9b11d3, 0x963c35cf), TOBN(0x77623bcf, 0xb90a46ed)}, {TOBN(0xdeef273b, 0xdcdd2a50), TOBN(0x4a741f9b, 0x0601846e), TOBN(0x33b89e51, 0x0ec6e929), TOBN(0xcb02319f, 0x8b7f22cd)}}, {{TOBN(0xbbe1500d, 0x084bae24), TOBN(0x2f0ae8d7, 0x343d2693), TOBN(0xacffb5f2, 0x7cdef811), TOBN(0xaa0c030a, 0x263fb94f)}, {TOBN(0x6eef0d61, 0xa0f442de), TOBN(0xf92e1817, 0x27b139d3), TOBN(0x1ae6deb7, 0x0ad8bc28), TOBN(0xa89e38dc, 0xc0514130)}}, {{TOBN(0x81eeb865, 0xd2fdca23), TOBN(0x5a15ee08, 0xcc8ef895), TOBN(0x768fa10a, 0x01905614), TOBN(0xeff5b8ef, 0x880ee19b)}, {TOBN(0xf0c0cabb, 0xcb1c8a0e), TOBN(0x2e1ee9cd, 0xb8c838f9), TOBN(0x0587d8b8, 0x8a4a14c0), TOBN(0xf6f27896, 0x2ff698e5)}}, {{TOBN(0xed38ef1c, 0x89ee6256), TOBN(0xf44ee1fe, 0x6b353b45), TOBN(0x9115c0c7, 0x70e903b3), TOBN(0xc78ec0a1, 0x818f31df)}, {TOBN(0x6c003324, 0xb7dccbc6), TOBN(0xd96dd1f3, 0x163bbc25), TOBN(0x33aa82dd, 0x5cedd805), TOBN(0x123aae4f, 0x7f7eb2f1)}}, {{TOBN(0x1723fcf5, 0xa26262cd), TOBN(0x1f7f4d5d, 0x0060ebd5), TOBN(0xf19c5c01, 0xb2eaa3af), TOBN(0x2ccb9b14, 0x9790accf)}, {TOBN(0x1f9c1cad, 0x52324aa6), TOBN(0x63200526, 0x7247df54), TOBN(0x5732fe42, 0xbac96f82), TOBN(0x52fe771f, 0x01a1c384)}}, {{TOBN(0x546ca13d, 0xb1001684), TOBN(0xb56b4eee, 0xa1709f75), TOBN(0x266545a9, 0xd5db8672), TOBN(0xed971c90, 0x1e8f3cfb)}, {TOBN(0x4e7d8691, 0xe3a07b29), TOBN(0x7570d9ec, 0xe4b696b9), TOBN(0xdc5fa067, 0x7bc7e9ae), TOBN(0x68b44caf, 0xc82c4844)}}, {{TOBN(0x519d34b3, 0xbf44da80), TOBN(0x283834f9, 0x5ab32e66), TOBN(0x6e608797, 0x6278a000), TOBN(0x1e62960e, 0x627312f6)}, {TOBN(0x9b87b27b, 0xe6901c55), TOBN(0x80e78538, 0x24fdbc1f), TOBN(0xbbbc0951, 0x2facc27d), TOBN(0x06394239, 0xac143b5a)}}, {{TOBN(0x35bb4a40, 0x376c1944), TOBN(0x7cb62694, 0x63da1511), TOBN(0xafd29161, 0xb7148a3b), TOBN(0xa6f9d9ed, 0x4e2ea2ee)}, {TOBN(0x15dc2ca2, 0x880dd212), TOBN(0x903c3813, 0xa61139a9), TOBN(0x2aa7b46d, 0x6c0f8785), TOBN(0x36ce2871, 0x901c60ff)}}, {{TOBN(0xc683b028, 0xe10d9c12), TOBN(0x7573baa2, 0x032f33d3), TOBN(0x87a9b1f6, 0x67a31b58), TOBN(0xfd3ed11a, 0xf4ffae12)}, {TOBN(0x83dcaa9a, 0x0cb2748e), TOBN(0x8239f018, 0x5d6fdf16), TOBN(0xba67b49c, 0x72753941), TOBN(0x2beec455, 0xc321cb36)}}, {{TOBN(0x88015606, 0x3f8b84ce), TOBN(0x76417083, 0x8d38c86f), TOBN(0x054f1ca7, 0x598953dd), TOBN(0xc939e110, 0x4e8e7429)}, {TOBN(0x9b1ac2b3, 0x5a914f2f), TOBN(0x39e35ed3, 0xe74b8f9c), TOBN(0xd0debdb2, 0x781b2fb0), TOBN(0x1585638f, 0x2d997ba2)}}, {{TOBN(0x9c4b646e, 0x9e2fce99), TOBN(0x68a21081, 0x1e80857f), TOBN(0x06d54e44, 0x3643b52a), TOBN(0xde8d6d63, 0x0d8eb843)}, {TOBN(0x70321563, 0x42146a0a), TOBN(0x8ba826f2, 0x5eaa3622), TOBN(0x227a58bd, 0x86138787), TOBN(0x43b6c03c, 0x10281d37)}}, {{TOBN(0x6326afbb, 0xb54dde39), TOBN(0x744e5e8a, 0xdb6f2d5f), TOBN(0x48b2a99a, 0xcff158e1), TOBN(0xa93c8fa0, 0xef87918f)}, {TOBN(0x2182f956, 0xde058c5c), TOBN(0x216235d2, 0x936f9e7a), TOBN(0xace0c0db, 0xd2e31e67), TOBN(0xc96449bf, 0xf23ac3e7)}}, {{TOBN(0x7e9a2874, 0x170693bd), TOBN(0xa28e14fd, 0xa45e6335), TOBN(0x5757f6b3, 0x56427344), TOBN(0x822e4556, 0xacf8edf9)}, {TOBN(0x2b7a6ee2, 0xe6a285cd), TOBN(0x5866f211, 0xa9df3af0), TOBN(0x40dde2dd, 0xf845b844), TOBN(0x986c3726, 0x110e5e49)}}, {{TOBN(0x73680c2a, 0xf7172277), TOBN(0x57b94f0f, 0x0cccb244), TOBN(0xbdff7267, 0x2d438ca7), TOBN(0xbad1ce11, 0xcf4663fd)}, {TOBN(0x9813ed9d, 0xd8f71cae), TOBN(0xf43272a6, 0x961fdaa6), TOBN(0xbeff0119, 0xbd6d1637), TOBN(0xfebc4f91, 0x30361978)}}, {{TOBN(0x02b37a95, 0x2f41deff), TOBN(0x0e44a59a, 0xe63b89b7), TOBN(0x673257dc, 0x143ff951), TOBN(0x19c02205, 0xd752baf4)}, {TOBN(0x46c23069, 0xc4b7d692), TOBN(0x2e6392c3, 0xfd1502ac), TOBN(0x6057b1a2, 0x1b220846), TOBN(0xe51ff946, 0x0c1b5b63)}}}, {{{TOBN(0x6e85cb51, 0x566c5c43), TOBN(0xcff9c919, 0x3597f046), TOBN(0x9354e90c, 0x4994d94a), TOBN(0xe0a39332, 0x2147927d)}, {TOBN(0x8427fac1, 0x0dc1eb2b), TOBN(0x88cfd8c2, 0x2ff319fa), TOBN(0xe2d4e684, 0x01965274), TOBN(0xfa2e067d, 0x67aaa746)}}, {{TOBN(0xb6d92a7f, 0x3e5f9f11), TOBN(0x9afe153a, 0xd6cb3b8e), TOBN(0x4d1a6dd7, 0xddf800bd), TOBN(0xf6c13cc0, 0xcaf17e19)}, {TOBN(0x15f6c58e, 0x325fc3ee), TOBN(0x71095400, 0xa31dc3b2), TOBN(0x168e7c07, 0xafa3d3e7), TOBN(0x3f8417a1, 0x94c7ae2d)}}, {{TOBN(0xec234772, 0x813b230d), TOBN(0x634d0f5f, 0x17344427), TOBN(0x11548ab1, 0xd77fc56a), TOBN(0x7fab1750, 0xce06af77)}, {TOBN(0xb62c10a7, 0x4f7c4f83), TOBN(0xa7d2edc4, 0x220a67d9), TOBN(0x1c404170, 0x921209a0), TOBN(0x0b9815a0, 0xface59f0)}}, {{TOBN(0x2842589b, 0x319540c3), TOBN(0x18490f59, 0xa283d6f8), TOBN(0xa2731f84, 0xdaae9fcb), TOBN(0x3db6d960, 0xc3683ba0)}, {TOBN(0xc85c63bb, 0x14611069), TOBN(0xb19436af, 0x0788bf05), TOBN(0x905459df, 0x347460d2), TOBN(0x73f6e094, 0xe11a7db1)}}, {{TOBN(0xdc7f938e, 0xb6357f37), TOBN(0xc5d00f79, 0x2bd8aa62), TOBN(0xc878dcb9, 0x2ca979fc), TOBN(0x37e83ed9, 0xeb023a99)}, {TOBN(0x6b23e273, 0x1560bf3d), TOBN(0x1086e459, 0x1d0fae61), TOBN(0x78248316, 0x9a9414bd), TOBN(0x1b956bc0, 0xf0ea9ea1)}}, {{TOBN(0x7b85bb91, 0xc31b9c38), TOBN(0x0c5aa90b, 0x48ef57b5), TOBN(0xdedeb169, 0xaf3bab6f), TOBN(0xe610ad73, 0x2d373685)}, {TOBN(0xf13870df, 0x02ba8e15), TOBN(0x0337edb6, 0x8ca7f771), TOBN(0xe4acf747, 0xb62c036c), TOBN(0xd921d576, 0xb6b94e81)}}, {{TOBN(0xdbc86439, 0x2c422f7a), TOBN(0xfb635362, 0xed348898), TOBN(0x83084668, 0xc45bfcd1), TOBN(0xc357c9e3, 0x2b315e11)}, {TOBN(0xb173b540, 0x5b2e5b8c), TOBN(0x7e946931, 0xe102b9a4), TOBN(0x17c890eb, 0x7b0fb199), TOBN(0xec225a83, 0xd61b662b)}}, {{TOBN(0xf306a3c8, 0xee3c76cb), TOBN(0x3cf11623, 0xd32a1f6e), TOBN(0xe6d5ab64, 0x6863e956), TOBN(0x3b8a4cbe, 0x5c005c26)}, {TOBN(0xdcd529a5, 0x9ce6bb27), TOBN(0xc4afaa52, 0x04d4b16f), TOBN(0xb0624a26, 0x7923798d), TOBN(0x85e56df6, 0x6b307fab)}}, {{TOBN(0x0281893c, 0x2bf29698), TOBN(0x91fc19a4, 0xd7ce7603), TOBN(0x75a5dca3, 0xad9a558f), TOBN(0x40ceb3fa, 0x4d50bf77)}, {TOBN(0x1baf6060, 0xbc9ba369), TOBN(0x927e1037, 0x597888c2), TOBN(0xd936bf19, 0x86a34c07), TOBN(0xd4cf10c1, 0xc34ae980)}}, {{TOBN(0x3a3e5334, 0x859dd614), TOBN(0x9c475b5b, 0x18d0c8ee), TOBN(0x63080d1f, 0x07cd51d5), TOBN(0xc9c0d0a6, 0xb88b4326)}, {TOBN(0x1ac98691, 0xc234296f), TOBN(0x2a0a83a4, 0x94887fb6), TOBN(0x56511427, 0x0cea9cf2), TOBN(0x5230a6e8, 0xa24802f5)}}, {{TOBN(0xf7a2bf0f, 0x72e3d5c1), TOBN(0x37717446, 0x4f21439e), TOBN(0xfedcbf25, 0x9ce30334), TOBN(0xe0030a78, 0x7ce202f9)}, {TOBN(0x6f2d9ebf, 0x1202e9ca), TOBN(0xe79dde6c, 0x75e6e591), TOBN(0xf52072af, 0xf1dac4f8), TOBN(0x6c8d087e, 0xbb9b404d)}}, {{TOBN(0xad0fc73d, 0xbce913af), TOBN(0x909e587b, 0x458a07cb), TOBN(0x1300da84, 0xd4f00c8a), TOBN(0x425cd048, 0xb54466ac)}, {TOBN(0xb59cb9be, 0x90e9d8bf), TOBN(0x991616db, 0x3e431b0e), TOBN(0xd3aa117a, 0x531aecff), TOBN(0x91af92d3, 0x59f4dc3b)}}, {{TOBN(0x9b1ec292, 0xe93fda29), TOBN(0x76bb6c17, 0xe97d91bc), TOBN(0x7509d95f, 0xaface1e6), TOBN(0x3653fe47, 0xbe855ae3)}, {TOBN(0x73180b28, 0x0f680e75), TOBN(0x75eefd1b, 0xeeb6c26c), TOBN(0xa4cdf29f, 0xb66d4236), TOBN(0x2d70a997, 0x6b5821d8)}}, {{TOBN(0x7a3ee207, 0x20445c36), TOBN(0x71d1ac82, 0x59877174), TOBN(0x0fc539f7, 0x949f73e9), TOBN(0xd05cf3d7, 0x982e3081)}, {TOBN(0x8758e20b, 0x7b1c7129), TOBN(0xffadcc20, 0x569e61f2), TOBN(0xb05d3a2f, 0x59544c2d), TOBN(0xbe16f5c1, 0x9fff5e53)}}, {{TOBN(0x73cf65b8, 0xaad58135), TOBN(0x622c2119, 0x037aa5be), TOBN(0x79373b3f, 0x646fd6a0), TOBN(0x0e029db5, 0x0d3978cf)}, {TOBN(0x8bdfc437, 0x94fba037), TOBN(0xaefbd687, 0x620797a6), TOBN(0x3fa5382b, 0xbd30d38e), TOBN(0x7627cfbf, 0x585d7464)}}, {{TOBN(0xb2330fef, 0x4e4ca463), TOBN(0xbcef7287, 0x3566cc63), TOBN(0xd161d2ca, 0xcf780900), TOBN(0x135dc539, 0x5b54827d)}, {TOBN(0x638f052e, 0x27bf1bc6), TOBN(0x10a224f0, 0x07dfa06c), TOBN(0xe973586d, 0x6d3321da), TOBN(0x8b0c5738, 0x26152c8f)}}, {{TOBN(0x07ef4f2a, 0x34606074), TOBN(0x80fe7fe8, 0xa0f7047a), TOBN(0x3d1a8152, 0xe1a0e306), TOBN(0x32cf43d8, 0x88da5222)}, {TOBN(0xbf89a95f, 0x5f02ffe6), TOBN(0x3d9eb9a4, 0x806ad3ea), TOBN(0x012c17bb, 0x79c8e55e), TOBN(0xfdcd1a74, 0x99c81dac)}}, {{TOBN(0x7043178b, 0xb9556098), TOBN(0x4090a1df, 0x801c3886), TOBN(0x759800ff, 0x9b67b912), TOBN(0x3e5c0304, 0x232620c8)}, {TOBN(0x4b9d3c4b, 0x70dceeca), TOBN(0xbb2d3c15, 0x181f648e), TOBN(0xf981d837, 0x6e33345c), TOBN(0xb626289b, 0x0cf2297a)}}, {{TOBN(0x766ac659, 0x8baebdcf), TOBN(0x1a28ae09, 0x75df01e5), TOBN(0xb71283da, 0x375876d8), TOBN(0x4865a96d, 0x607b9800)}, {TOBN(0x25dd1bcd, 0x237936b2), TOBN(0x332f4f4b, 0x60417494), TOBN(0xd0923d68, 0x370a2147), TOBN(0x497f5dfb, 0xdc842203)}}, {{TOBN(0x9dc74cbd, 0x32be5e0f), TOBN(0x7475bcb7, 0x17a01375), TOBN(0x438477c9, 0x50d872b1), TOBN(0xcec67879, 0xffe1d63d)}, {TOBN(0x9b006014, 0xd8578c70), TOBN(0xc9ad99a8, 0x78bb6b8b), TOBN(0x6799008e, 0x11fb3806), TOBN(0xcfe81435, 0xcd44cab3)}}, {{TOBN(0xa2ee1582, 0x2f4fb344), TOBN(0xb8823450, 0x483fa6eb), TOBN(0x622d323d, 0x652c7749), TOBN(0xd8474a98, 0xbeb0a15b)}, {TOBN(0xe43c154d, 0x5d1c00d0), TOBN(0x7fd581d9, 0x0e3e7aac), TOBN(0x2b44c619, 0x2525ddf8), TOBN(0x67a033eb, 0xb8ae9739)}}, {{TOBN(0x113ffec1, 0x9ef2d2e4), TOBN(0x1bf6767e, 0xd5a0ea7f), TOBN(0x57fff75e, 0x03714c0a), TOBN(0xa23c422e, 0x0a23e9ee)}, {TOBN(0xdd5f6b2d, 0x540f83af), TOBN(0xc2c2c27e, 0x55ea46a7), TOBN(0xeb6b4246, 0x672a1208), TOBN(0xd13599f7, 0xae634f7a)}}, {{TOBN(0xcf914b5c, 0xd7b32c6e), TOBN(0x61a5a640, 0xeaf61814), TOBN(0x8dc3df8b, 0x208a1bbb), TOBN(0xef627fd6, 0xb6d79aa5)}, {TOBN(0x44232ffc, 0xc4c86bc8), TOBN(0xe6f9231b, 0x061539fe), TOBN(0x1d04f25a, 0x958b9533), TOBN(0x180cf934, 0x49e8c885)}}, {{TOBN(0x89689595, 0x9884aaf7), TOBN(0xb1959be3, 0x07b348a6), TOBN(0x96250e57, 0x3c147c87), TOBN(0xae0efb3a, 0xdd0c61f8)}, {TOBN(0xed00745e, 0xca8c325e), TOBN(0x3c911696, 0xecff3f70), TOBN(0x73acbc65, 0x319ad41d), TOBN(0x7b01a020, 0xf0b1c7ef)}}, {{TOBN(0xea32b293, 0x63a1483f), TOBN(0x89eabe71, 0x7a248f96), TOBN(0x9c6231d3, 0x343157e5), TOBN(0x93a375e5, 0xdf3c546d)}, {TOBN(0xe76e9343, 0x6a2afe69), TOBN(0xc4f89100, 0xe166c88e), TOBN(0x248efd0d, 0x4f872093), TOBN(0xae0eb3ea, 0x8fe0ea61)}}, {{TOBN(0xaf89790d, 0x9d79046e), TOBN(0x4d650f2d, 0x6cee0976), TOBN(0xa3935d9a, 0x43071eca), TOBN(0x66fcd2c9, 0x283b0bfe)}, {TOBN(0x0e665eb5, 0x696605f1), TOBN(0xe77e5d07, 0xa54cd38d), TOBN(0x90ee050a, 0x43d950cf), TOBN(0x86ddebda, 0xd32e69b5)}}, {{TOBN(0x6ad94a3d, 0xfddf7415), TOBN(0xf7fa1309, 0x3f6e8d5a), TOBN(0xc4831d1d, 0xe9957f75), TOBN(0x7de28501, 0xd5817447)}, {TOBN(0x6f1d7078, 0x9e2aeb6b), TOBN(0xba2b9ff4, 0xf67a53c2), TOBN(0x36963767, 0xdf9defc3), TOBN(0x479deed3, 0x0d38022c)}}, {{TOBN(0xd2edb89b, 0x3a8631e8), TOBN(0x8de855de, 0x7a213746), TOBN(0xb2056cb7, 0xb00c5f11), TOBN(0xdeaefbd0, 0x2c9b85e4)}, {TOBN(0x03f39a8d, 0xd150892d), TOBN(0x37b84686, 0x218b7985), TOBN(0x36296dd8, 0xb7375f1a), TOBN(0x472cd4b1, 0xb78e898e)}}, {{TOBN(0x15dff651, 0xe9f05de9), TOBN(0xd4045069, 0x2ce98ba9), TOBN(0x8466a7ae, 0x9b38024c), TOBN(0xb910e700, 0xe5a6b5ef)}, {TOBN(0xae1c56ea, 0xb3aa8f0d), TOBN(0xbab2a507, 0x7eee74a6), TOBN(0x0dca11e2, 0x4b4c4620), TOBN(0xfd896e2e, 0x4c47d1f4)}}, {{TOBN(0xeb45ae53, 0x308fbd93), TOBN(0x46cd5a2e, 0x02c36fda), TOBN(0x6a3d4e90, 0xbaa48385), TOBN(0xdd55e62e, 0x9dbe9960)}, {TOBN(0xa1406aa0, 0x2a81ede7), TOBN(0x6860dd14, 0xf9274ea7), TOBN(0xcfdcb0c2, 0x80414f86), TOBN(0xff410b10, 0x22f94327)}}, {{TOBN(0x5a33cc38, 0x49ad467b), TOBN(0xefb48b6c, 0x0a7335f1), TOBN(0x14fb54a4, 0xb153a360), TOBN(0x604aa9d2, 0xb52469cc)}, {TOBN(0x5e9dc486, 0x754e48e9), TOBN(0x693cb455, 0x37471e8e), TOBN(0xfb2fd7cd, 0x8d3b37b6), TOBN(0x63345e16, 0xcf09ff07)}}, {{TOBN(0x9910ba6b, 0x23a5d896), TOBN(0x1fe19e35, 0x7fe4364e), TOBN(0x6e1da8c3, 0x9a33c677), TOBN(0x15b4488b, 0x29fd9fd0)}, {TOBN(0x1f439254, 0x1a1f22bf), TOBN(0x920a8a70, 0xab8163e8), TOBN(0x3fd1b249, 0x07e5658e), TOBN(0xf2c4f79c, 0xb6ec839b)}}, {{TOBN(0x1abbc3d0, 0x4aa38d1b), TOBN(0x3b0db35c, 0xb5d9510e), TOBN(0x1754ac78, 0x3e60dec0), TOBN(0x53272fd7, 0xea099b33)}, {TOBN(0x5fb0494f, 0x07a8e107), TOBN(0x4a89e137, 0x6a8191fa), TOBN(0xa113b7f6, 0x3c4ad544), TOBN(0x88a2e909, 0x6cb9897b)}}, {{TOBN(0x17d55de3, 0xb44a3f84), TOBN(0xacb2f344, 0x17c6c690), TOBN(0x32088168, 0x10232390), TOBN(0xf2e8a61f, 0x6c733bf7)}, {TOBN(0xa774aab6, 0x9c2d7652), TOBN(0xfb5307e3, 0xed95c5bc), TOBN(0xa05c73c2, 0x4981f110), TOBN(0x1baae31c, 0xa39458c9)}}, {{TOBN(0x1def185b, 0xcbea62e7), TOBN(0xe8ac9eae, 0xeaf63059), TOBN(0x098a8cfd, 0x9921851c), TOBN(0xd959c3f1, 0x3abe2f5b)}, {TOBN(0xa4f19525, 0x20e40ae5), TOBN(0x320789e3, 0x07a24aa1), TOBN(0x259e6927, 0x7392b2bc), TOBN(0x58f6c667, 0x1918668b)}}, {{TOBN(0xce1db2bb, 0xc55d2d8b), TOBN(0x41d58bb7, 0xf4f6ca56), TOBN(0x7650b680, 0x8f877614), TOBN(0x905e16ba, 0xf4c349ed)}, {TOBN(0xed415140, 0xf661acac), TOBN(0x3b8784f0, 0xcb2270af), TOBN(0x3bc280ac, 0x8a402cba), TOBN(0xd53f7146, 0x0937921a)}}, {{TOBN(0xc03c8ee5, 0xe5681e83), TOBN(0x62126105, 0xf6ac9e4a), TOBN(0x9503a53f, 0x936b1a38), TOBN(0x3d45e2d4, 0x782fecbd)}, {TOBN(0x69a5c439, 0x76e8ae98), TOBN(0xb53b2eeb, 0xbfb4b00e), TOBN(0xf1674712, 0x72386c89), TOBN(0x30ca34a2, 0x4268bce4)}}, {{TOBN(0x7f1ed86c, 0x78341730), TOBN(0x8ef5beb8, 0xb525e248), TOBN(0xbbc489fd, 0xb74fbf38), TOBN(0x38a92a0e, 0x91a0b382)}, {TOBN(0x7a77ba3f, 0x22433ccf), TOBN(0xde8362d6, 0xa29f05a9), TOBN(0x7f6a30ea, 0x61189afc), TOBN(0x693b5505, 0x59ef114f)}}, {{TOBN(0x50266bc0, 0xcd1797a1), TOBN(0xea17b47e, 0xf4b7af2d), TOBN(0xd6c4025c, 0x3df9483e), TOBN(0x8cbb9d9f, 0xa37b18c9)}, {TOBN(0x91cbfd9c, 0x4d8424cf), TOBN(0xdb7048f1, 0xab1c3506), TOBN(0x9eaf641f, 0x028206a3), TOBN(0xf986f3f9, 0x25bdf6ce)}}, {{TOBN(0x262143b5, 0x224c08dc), TOBN(0x2bbb09b4, 0x81b50c91), TOBN(0xc16ed709, 0xaca8c84f), TOBN(0xa6210d9d, 0xb2850ca8)}, {TOBN(0x6d8df67a, 0x09cb54d6), TOBN(0x91eef6e0, 0x500919a4), TOBN(0x90f61381, 0x0f132857), TOBN(0x9acede47, 0xf8d5028b)}}, {{TOBN(0x844d1b71, 0x90b771c3), TOBN(0x563b71e4, 0xba6426be), TOBN(0x2efa2e83, 0xbdb802ff), TOBN(0x3410cbab, 0xab5b4a41)}, {TOBN(0x555b2d26, 0x30da84dd), TOBN(0xd0711ae9, 0xee1cc29a), TOBN(0xcf3e8c60, 0x2f547792), TOBN(0x03d7d5de, 0xdc678b35)}}, {{TOBN(0x071a2fa8, 0xced806b8), TOBN(0x222e6134, 0x697f1478), TOBN(0xdc16fd5d, 0xabfcdbbf), TOBN(0x44912ebf, 0x121b53b8)}, {TOBN(0xac943674, 0x2496c27c), TOBN(0x8ea3176c, 0x1ffc26b0), TOBN(0xb6e224ac, 0x13debf2c), TOBN(0x524cc235, 0xf372a832)}}, {{TOBN(0xd706e1d8, 0x9f6f1b18), TOBN(0x2552f005, 0x44cce35b), TOBN(0x8c8326c2, 0xa88e31fc), TOBN(0xb5468b2c, 0xf9552047)}, {TOBN(0xce683e88, 0x3ff90f2b), TOBN(0x77947bdf, 0x2f0a5423), TOBN(0xd0a1b28b, 0xed56e328), TOBN(0xaee35253, 0xc20134ac)}}, {{TOBN(0x7e98367d, 0x3567962f), TOBN(0x379ed61f, 0x8188bffb), TOBN(0x73bba348, 0xfaf130a1), TOBN(0x6c1f75e1, 0x904ed734)}, {TOBN(0x18956642, 0x3b4a79fc), TOBN(0xf20bc83d, 0x54ef4493), TOBN(0x836d425d, 0x9111eca1), TOBN(0xe5b5c318, 0x009a8dcf)}}, {{TOBN(0x3360b25d, 0x13221bc5), TOBN(0x707baad2, 0x6b3eeaf7), TOBN(0xd7279ed8, 0x743a95a1), TOBN(0x7450a875, 0x969e809f)}, {TOBN(0x32b6bd53, 0xe5d0338f), TOBN(0x1e77f7af, 0x2b883bbc), TOBN(0x90da12cc, 0x1063ecd0), TOBN(0xe2697b58, 0xc315be47)}}, {{TOBN(0x2771a5bd, 0xda85d534), TOBN(0x53e78c1f, 0xff980eea), TOBN(0xadf1cf84, 0x900385e7), TOBN(0x7d3b14f6, 0xc9387b62)}, {TOBN(0x170e74b0, 0xcb8f2bd2), TOBN(0x2d50b486, 0x827fa993), TOBN(0xcdbe8c9a, 0xf6f32bab), TOBN(0x55e906b0, 0xc3b93ab8)}}, {{TOBN(0x747f22fc, 0x8fe280d1), TOBN(0xcd8e0de5, 0xb2e114ab), TOBN(0x5ab7dbeb, 0xe10b68b0), TOBN(0x9dc63a9c, 0xa480d4b2)}, {TOBN(0x78d4bc3b, 0x4be1495f), TOBN(0x25eb3db8, 0x9359122d), TOBN(0x3f8ac05b, 0x0809cbdc), TOBN(0xbf4187bb, 0xd37c702f)}}, {{TOBN(0x84cea069, 0x1416a6a5), TOBN(0x8f860c79, 0x43ef881c), TOBN(0x41311f8a, 0x38038a5d), TOBN(0xe78c2ec0, 0xfc612067)}, {TOBN(0x494d2e81, 0x5ad73581), TOBN(0xb4cc9e00, 0x59604097), TOBN(0xff558aec, 0xf3612cba), TOBN(0x35beef7a, 0x9e36c39e)}}, {{TOBN(0x1845c7cf, 0xdbcf41b9), TOBN(0x5703662a, 0xaea997c0), TOBN(0x8b925afe, 0xe402f6d8), TOBN(0xd0a1b1ae, 0x4dd72162)}, {TOBN(0x9f47b375, 0x03c41c4b), TOBN(0xa023829b, 0x0391d042), TOBN(0x5f5045c3, 0x503b8b0a), TOBN(0x123c2688, 0x98c010e5)}}, {{TOBN(0x324ec0cc, 0x36ba06ee), TOBN(0xface3115, 0x3dd2cc0c), TOBN(0xb364f3be, 0xf333e91f), TOBN(0xef8aff73, 0x28e832b0)}, {TOBN(0x1e9bad04, 0x2d05841b), TOBN(0x42f0e3df, 0x356a21e2), TOBN(0xa3270bcb, 0x4add627e), TOBN(0xb09a8158, 0xd322e711)}}, {{TOBN(0x86e326a1, 0x0fee104a), TOBN(0xad7788f8, 0x3703f65d), TOBN(0x7e765430, 0x47bc4833), TOBN(0x6cee582b, 0x2b9b893a)}, {TOBN(0x9cd2a167, 0xe8f55a7b), TOBN(0xefbee3c6, 0xd9e4190d), TOBN(0x33ee7185, 0xd40c2e9d), TOBN(0x844cc9c5, 0xa380b548)}}, {{TOBN(0x323f8ecd, 0x66926e04), TOBN(0x0001e38f, 0x8110c1ba), TOBN(0x8dbcac12, 0xfc6a7f07), TOBN(0xd65e1d58, 0x0cec0827)}, {TOBN(0xd2cd4141, 0xbe76ca2d), TOBN(0x7895cf5c, 0xe892f33a), TOBN(0x956d230d, 0x367139d2), TOBN(0xa91abd3e, 0xd012c4c1)}}, {{TOBN(0x34fa4883, 0x87eb36bf), TOBN(0xc5f07102, 0x914b8fb4), TOBN(0x90f0e579, 0xadb9c95f), TOBN(0xfe6ea8cb, 0x28888195)}, {TOBN(0x7b9b5065, 0xedfa9284), TOBN(0x6c510bd2, 0x2b8c8d65), TOBN(0xd7b8ebef, 0xcbe8aafd), TOBN(0xedb3af98, 0x96b1da07)}}, {{TOBN(0x28ff779d, 0x6295d426), TOBN(0x0c4f6ac7, 0x3fa3ad7b), TOBN(0xec44d054, 0x8b8e2604), TOBN(0x9b32a66d, 0x8b0050e1)}, {TOBN(0x1f943366, 0xf0476ce2), TOBN(0x7554d953, 0xa602c7b4), TOBN(0xbe35aca6, 0x524f2809), TOBN(0xb6881229, 0xfd4edbea)}}, {{TOBN(0xe8cd0c8f, 0x508efb63), TOBN(0x9eb5b5c8, 0x6abcefc7), TOBN(0xf5621f5f, 0xb441ab4f), TOBN(0x79e6c046, 0xb76a2b22)}, {TOBN(0x74a4792c, 0xe37a1f69), TOBN(0xcbd252cb, 0x03542b60), TOBN(0x785f65d5, 0xb3c20bd3), TOBN(0x8dea6143, 0x4fabc60c)}}, {{TOBN(0x45e21446, 0xde673629), TOBN(0x57f7aa1e, 0x703c2d21), TOBN(0xa0e99b7f, 0x98c868c7), TOBN(0x4e42f66d, 0x8b641676)}, {TOBN(0x602884dc, 0x91077896), TOBN(0xa0d690cf, 0xc2c9885b), TOBN(0xfeb4da33, 0x3b9a5187), TOBN(0x5f789598, 0x153c87ee)}}, {{TOBN(0x2192dd47, 0x52b16dba), TOBN(0xdeefc0e6, 0x3524c1b1), TOBN(0x465ea76e, 0xe4383693), TOBN(0x79401711, 0x361b8d98)}, {TOBN(0xa5f9ace9, 0xf21a15cb), TOBN(0x73d26163, 0xefee9aeb), TOBN(0xcca844b3, 0xe677016c), TOBN(0x6c122b07, 0x57eaee06)}}, {{TOBN(0xb782dce7, 0x15f09690), TOBN(0x508b9b12, 0x2dfc0fc9), TOBN(0x9015ab4b, 0x65d89fc6), TOBN(0x5e79dab7, 0xd6d5bb0f)}, {TOBN(0x64f021f0, 0x6c775aa2), TOBN(0xdf09d8cc, 0x37c7eca1), TOBN(0x9a761367, 0xef2fa506), TOBN(0xed4ca476, 0x5b81eec6)}}, {{TOBN(0x262ede36, 0x10bbb8b5), TOBN(0x0737ce83, 0x0641ada3), TOBN(0x4c94288a, 0xe9831ccc), TOBN(0x487fc1ce, 0x8065e635)}, {TOBN(0xb13d7ab3, 0xb8bb3659), TOBN(0xdea5df3e, 0x855e4120), TOBN(0xb9a18573, 0x85eb0244), TOBN(0x1a1b8ea3, 0xa7cfe0a3)}}, {{TOBN(0x3b837119, 0x67b0867c), TOBN(0x8d5e0d08, 0x9d364520), TOBN(0x52dccc1e, 0xd930f0e3), TOBN(0xefbbcec7, 0xbf20bbaf)}, {TOBN(0x99cffcab, 0x0263ad10), TOBN(0xd8199e6d, 0xfcd18f8a), TOBN(0x64e2773f, 0xe9f10617), TOBN(0x0079e8e1, 0x08704848)}}, {{TOBN(0x1169989f, 0x8a342283), TOBN(0x8097799c, 0xa83012e6), TOBN(0xece966cb, 0x8a6a9001), TOBN(0x93b3afef, 0x072ac7fc)}, {TOBN(0xe6893a2a, 0x2db3d5ba), TOBN(0x263dc462, 0x89bf4fdc), TOBN(0x8852dfc9, 0xe0396673), TOBN(0x7ac70895, 0x3af362b6)}}, {{TOBN(0xbb9cce4d, 0x5c2f342b), TOBN(0xbf80907a, 0xb52d7aae), TOBN(0x97f3d3cd, 0x2161bcd0), TOBN(0xb25b0834, 0x0962744d)}, {TOBN(0xc5b18ea5, 0x6c3a1dda), TOBN(0xfe4ec7eb, 0x06c92317), TOBN(0xb787b890, 0xad1c4afe), TOBN(0xdccd9a92, 0x0ede801a)}}, {{TOBN(0x9ac6ddda, 0xdb58da1f), TOBN(0x22bbc12f, 0xb8cae6ee), TOBN(0xc6f8bced, 0x815c4a43), TOBN(0x8105a92c, 0xf96480c7)}, {TOBN(0x0dc3dbf3, 0x7a859d51), TOBN(0xe3ec7ce6, 0x3041196b), TOBN(0xd9f64b25, 0x0d1067c9), TOBN(0xf2321321, 0x3d1f8dd8)}}, {{TOBN(0x8b5c619c, 0x76497ee8), TOBN(0x5d2b0ac6, 0xc717370e), TOBN(0x98204cb6, 0x4fcf68e1), TOBN(0x0bdec211, 0x62bc6792)}, {TOBN(0x6973ccef, 0xa63b1011), TOBN(0xf9e3fa97, 0xe0de1ac5), TOBN(0x5efb693e, 0x3d0e0c8b), TOBN(0x037248e9, 0xd2d4fcb4)}}}, {{{TOBN(0x80802dc9, 0x1ec34f9e), TOBN(0xd8772d35, 0x33810603), TOBN(0x3f06d66c, 0x530cb4f3), TOBN(0x7be5ed0d, 0xc475c129)}, {TOBN(0xcb9e3c19, 0x31e82b10), TOBN(0xc63d2857, 0xc9ff6b4c), TOBN(0xb92118c6, 0x92a1b45e), TOBN(0x0aec4414, 0x7285bbca)}}, {{TOBN(0xfc189ae7, 0x1e29a3ef), TOBN(0xcbe906f0, 0x4c93302e), TOBN(0xd0107914, 0xceaae10e), TOBN(0xb7a23f34, 0xb68e19f8)}, {TOBN(0xe9d875c2, 0xefd2119d), TOBN(0x03198c6e, 0xfcadc9c8), TOBN(0x65591bf6, 0x4da17113), TOBN(0x3cf0bbf8, 0x3d443038)}}, {{TOBN(0xae485bb7, 0x2b724759), TOBN(0x945353e1, 0xb2d4c63a), TOBN(0x82159d07, 0xde7d6f2c), TOBN(0x389caef3, 0x4ec5b109)}, {TOBN(0x4a8ebb53, 0xdb65ef14), TOBN(0x2dc2cb7e, 0xdd99de43), TOBN(0x816fa3ed, 0x83f2405f), TOBN(0x73429bb9, 0xc14208a3)}}, {{TOBN(0xb618d590, 0xb01e6e27), TOBN(0x047e2ccd, 0xe180b2dc), TOBN(0xd1b299b5, 0x04aea4a9), TOBN(0x412c9e1e, 0x9fa403a4)}, {TOBN(0x88d28a36, 0x79407552), TOBN(0x49c50136, 0xf332b8e3), TOBN(0x3a1b6fcc, 0xe668de19), TOBN(0x178851bc, 0x75122b97)}}, {{TOBN(0xb1e13752, 0xfb85fa4c), TOBN(0xd61257ce, 0x383c8ce9), TOBN(0xd43da670, 0xd2f74dae), TOBN(0xa35aa23f, 0xbf846bbb)}, {TOBN(0x5e74235d, 0x4421fc83), TOBN(0xf6df8ee0, 0xc363473b), TOBN(0x34d7f52a, 0x3c4aa158), TOBN(0x50d05aab, 0x9bc6d22e)}}, {{TOBN(0x8c56e735, 0xa64785f4), TOBN(0xbc56637b, 0x5f29cd07), TOBN(0x53b2bb80, 0x3ee35067), TOBN(0x50235a0f, 0xdc919270)}, {TOBN(0x191ab6d8, 0xf2c4aa65), TOBN(0xc3475831, 0x8396023b), TOBN(0x80400ba5, 0xf0f805ba), TOBN(0x8881065b, 0x5ec0f80f)}}, {{TOBN(0xc370e522, 0xcc1b5e83), TOBN(0xde2d4ad1, 0x860b8bfb), TOBN(0xad364df0, 0x67b256df), TOBN(0x8f12502e, 0xe0138997)}, {TOBN(0x503fa0dc, 0x7783920a), TOBN(0xe80014ad, 0xc0bc866a), TOBN(0x3f89b744, 0xd3064ba6), TOBN(0x03511dcd, 0xcba5dba5)}}, {{TOBN(0x197dd46d, 0x95a7b1a2), TOBN(0x9c4e7ad6, 0x3c6341fb), TOBN(0x426eca29, 0x484c2ece), TOBN(0x9211e489, 0xde7f4f8a)}, {TOBN(0x14997f6e, 0xc78ef1f4), TOBN(0x2b2c0910, 0x06574586), TOBN(0x17286a6e, 0x1c3eede8), TOBN(0x25f92e47, 0x0f60e018)}}, {{TOBN(0x805c5646, 0x31890a36), TOBN(0x703ef600, 0x57feea5b), TOBN(0x389f747c, 0xaf3c3030), TOBN(0xe0e5daeb, 0x54dd3739)}, {TOBN(0xfe24a4c3, 0xc9c9f155), TOBN(0x7e4bf176, 0xb5393962), TOBN(0x37183de2, 0xaf20bf29), TOBN(0x4a1bd7b5, 0xf95a8c3b)}}, {{TOBN(0xa83b9699, 0x46191d3d), TOBN(0x281fc8dd, 0x7b87f257), TOBN(0xb18e2c13, 0x54107588), TOBN(0x6372def7, 0x9b2bafe8)}, {TOBN(0xdaf4bb48, 0x0d8972ca), TOBN(0x3f2dd4b7, 0x56167a3f), TOBN(0x1eace32d, 0x84310cf4), TOBN(0xe3bcefaf, 0xe42700aa)}}, {{TOBN(0x5fe5691e, 0xd785e73d), TOBN(0xa5db5ab6, 0x2ea60467), TOBN(0x02e23d41, 0xdfc6514a), TOBN(0x35e8048e, 0xe03c3665)}, {TOBN(0x3f8b118f, 0x1adaa0f8), TOBN(0x28ec3b45, 0x84ce1a5a), TOBN(0xe8cacc6e, 0x2c6646b8), TOBN(0x1343d185, 0xdbd0e40f)}}, {{TOBN(0xe5d7f844, 0xcaaa358c), TOBN(0x1a1db7e4, 0x9924182a), TOBN(0xd64cd42d, 0x9c875d9a), TOBN(0xb37b515f, 0x042eeec8)}, {TOBN(0x4d4dd409, 0x7b165fbe), TOBN(0xfc322ed9, 0xe206eff3), TOBN(0x7dee4102, 0x59b7e17e), TOBN(0x55a481c0, 0x8236ca00)}}, {{TOBN(0x8c885312, 0xc23fc975), TOBN(0x15715806, 0x05d6297b), TOBN(0xa078868e, 0xf78edd39), TOBN(0x956b31e0, 0x03c45e52)}, {TOBN(0x470275d5, 0xff7b33a6), TOBN(0xc8d5dc3a, 0x0c7e673f), TOBN(0x419227b4, 0x7e2f2598), TOBN(0x8b37b634, 0x4c14a975)}}, {{TOBN(0xd0667ed6, 0x8b11888c), TOBN(0x5e0e8c3e, 0x803e25dc), TOBN(0x34e5d0dc, 0xb987a24a), TOBN(0x9f40ac3b, 0xae920323)}, {TOBN(0x5463de95, 0x34e0f63a), TOBN(0xa128bf92, 0x6b6328f9), TOBN(0x491ccd7c, 0xda64f1b7), TOBN(0x7ef1ec27, 0xc47bde35)}}, {{TOBN(0xa857240f, 0xa36a2737), TOBN(0x35dc1366, 0x63621bc1), TOBN(0x7a3a6453, 0xd4fb6897), TOBN(0x80f1a439, 0xc929319d)}, {TOBN(0xfc18274b, 0xf8cb0ba0), TOBN(0xb0b53766, 0x8078c5eb), TOBN(0xfb0d4924, 0x1e01d0ef), TOBN(0x50d7c67d, 0x372ab09c)}}, {{TOBN(0xb4e370af, 0x3aeac968), TOBN(0xe4f7fee9, 0xc4b63266), TOBN(0xb4acd4c2, 0xe3ac5664), TOBN(0xf8910bd2, 0xceb38cbf)}, {TOBN(0x1c3ae50c, 0xc9c0726e), TOBN(0x15309569, 0xd97b40bf), TOBN(0x70884b7f, 0xfd5a5a1b), TOBN(0x3890896a, 0xef8314cd)}}, {{TOBN(0x58e1515c, 0xa5618c93), TOBN(0xe665432b, 0x77d942d1), TOBN(0xb32181bf, 0xb6f767a8), TOBN(0x753794e8, 0x3a604110)}, {TOBN(0x09afeb7c, 0xe8c0dbcc), TOBN(0x31e02613, 0x598673a3), TOBN(0x5d98e557, 0x7d46db00), TOBN(0xfc21fb8c, 0x9d985b28)}}, {{TOBN(0xc9040116, 0xb0843e0b), TOBN(0x53b1b3a8, 0x69b04531), TOBN(0xdd1649f0, 0x85d7d830), TOBN(0xbb3bcc87, 0xcb7427e8)}, {TOBN(0x77261100, 0xc93dce83), TOBN(0x7e79da61, 0xa1922a2a), TOBN(0x587a2b02, 0xf3149ce8), TOBN(0x147e1384, 0xde92ec83)}}, {{TOBN(0x484c83d3, 0xaf077f30), TOBN(0xea78f844, 0x0658b53a), TOBN(0x912076c2, 0x027aec53), TOBN(0xf34714e3, 0x93c8177d)}, {TOBN(0x37ef5d15, 0xc2376c84), TOBN(0x8315b659, 0x3d1aa783), TOBN(0x3a75c484, 0xef852a90), TOBN(0x0ba0c58a, 0x16086bd4)}}, {{TOBN(0x29688d7a, 0x529a6d48), TOBN(0x9c7f250d, 0xc2f19203), TOBN(0x123042fb, 0x682e2df9), TOBN(0x2b7587e7, 0xad8121bc)}, {TOBN(0x30fc0233, 0xe0182a65), TOBN(0xb82ecf87, 0xe3e1128a), TOBN(0x71682861, 0x93fb098f), TOBN(0x043e21ae, 0x85e9e6a7)}}, {{TOBN(0xab5b49d6, 0x66c834ea), TOBN(0x3be43e18, 0x47414287), TOBN(0xf40fb859, 0x219a2a47), TOBN(0x0e6559e9, 0xcc58df3c)}, {TOBN(0xfe1dfe8e, 0x0c6615b4), TOBN(0x14abc8fd, 0x56459d70), TOBN(0x7be0fa8e, 0x05de0386), TOBN(0x8e63ef68, 0xe9035c7c)}}, {{TOBN(0x116401b4, 0x53b31e91), TOBN(0x0cba7ad4, 0x4436b4d8), TOBN(0x9151f9a0, 0x107afd66), TOBN(0xafaca8d0, 0x1f0ee4c4)}, {TOBN(0x75fe5c1d, 0x9ee9761c), TOBN(0x3497a16b, 0xf0c0588f), TOBN(0x3ee2bebd, 0x0304804c), TOBN(0xa8fb9a60, 0xc2c990b9)}}, {{TOBN(0xd14d32fe, 0x39251114), TOBN(0x36bf25bc, 0xcac73366), TOBN(0xc9562c66, 0xdba7495c), TOBN(0x324d301b, 0x46ad348b)}, {TOBN(0x9f46620c, 0xd670407e), TOBN(0x0ea8d4f1, 0xe3733a01), TOBN(0xd396d532, 0xb0c324e0), TOBN(0x5b211a0e, 0x03c317cd)}}, {{TOBN(0x090d7d20, 0x5ffe7b37), TOBN(0x3b7f3efb, 0x1747d2da), TOBN(0xa2cb525f, 0xb54fc519), TOBN(0x6e220932, 0xf66a971e)}, {TOBN(0xddc160df, 0xb486d440), TOBN(0x7fcfec46, 0x3fe13465), TOBN(0x83da7e4e, 0x76e4c151), TOBN(0xd6fa48a1, 0xd8d302b5)}}, {{TOBN(0xc6304f26, 0x5872cd88), TOBN(0x806c1d3c, 0x278b90a1), TOBN(0x3553e725, 0xcaf0bc1c), TOBN(0xff59e603, 0xbb9d8d5c)}, {TOBN(0xa4550f32, 0x7a0b85dd), TOBN(0xdec5720a, 0x93ecc217), TOBN(0x0b88b741, 0x69d62213), TOBN(0x7212f245, 0x5b365955)}}, {{TOBN(0x20764111, 0xb5cae787), TOBN(0x13cb7f58, 0x1dfd3124), TOBN(0x2dca77da, 0x1175aefb), TOBN(0xeb75466b, 0xffaae775)}, {TOBN(0x74d76f3b, 0xdb6cff32), TOBN(0x7440f37a, 0x61fcda9a), TOBN(0x1bb3ac92, 0xb525028b), TOBN(0x20fbf8f7, 0xa1975f29)}}, {{TOBN(0x982692e1, 0xdf83097f), TOBN(0x28738f6c, 0x554b0800), TOBN(0xdc703717, 0xa2ce2f2f), TOBN(0x7913b93c, 0x40814194)}, {TOBN(0x04924593, 0x1fe89636), TOBN(0x7b98443f, 0xf78834a6), TOBN(0x11c6ab01, 0x5114a5a1), TOBN(0x60deb383, 0xffba5f4c)}}, {{TOBN(0x4caa54c6, 0x01a982e6), TOBN(0x1dd35e11, 0x3491cd26), TOBN(0x973c315f, 0x7cbd6b05), TOBN(0xcab00775, 0x52494724)}, {TOBN(0x04659b1f, 0x6565e15a), TOBN(0xbf30f529, 0x8c8fb026), TOBN(0xfc21641b, 0xa8a0de37), TOBN(0xe9c7a366, 0xfa5e5114)}}, {{TOBN(0xdb849ca5, 0x52f03ad8), TOBN(0xc7e8dbe9, 0x024e35c0), TOBN(0xa1a2bbac, 0xcfc3c789), TOBN(0xbf733e7d, 0x9c26f262)}, {TOBN(0x882ffbf5, 0xb8444823), TOBN(0xb7224e88, 0x6bf8483b), TOBN(0x53023b8b, 0x65bef640), TOBN(0xaabfec91, 0xd4d5f8cd)}}, {{TOBN(0xa40e1510, 0x079ea1bd), TOBN(0x1ad9addc, 0xd05d5d26), TOBN(0xdb3f2eab, 0x13e68d4f), TOBN(0x1cff1ae2, 0x640f803f)}, {TOBN(0xe0e7b749, 0xd4cee117), TOBN(0x8e9f275b, 0x4036d909), TOBN(0xce34e31d, 0x8f4d4c38), TOBN(0x22b37f69, 0xd75130fc)}}, {{TOBN(0x83e0f1fd, 0xb4014604), TOBN(0xa8ce9919, 0x89415078), TOBN(0x82375b75, 0x41792efe), TOBN(0x4f59bf5c, 0x97d4515b)}, {TOBN(0xac4f324f, 0x923a277d), TOBN(0xd9bc9b7d, 0x650f3406), TOBN(0xc6fa87d1, 0x8a39bc51), TOBN(0x82588530, 0x5ccc108f)}}, {{TOBN(0x5ced3c9f, 0x82e4c634), TOBN(0x8efb8314, 0x3a4464f8), TOBN(0xe706381b, 0x7a1dca25), TOBN(0x6cd15a3c, 0x5a2a412b)}, {TOBN(0x9347a8fd, 0xbfcd8fb5), TOBN(0x31db2eef, 0x6e54cd22), TOBN(0xc4aeb11e, 0xf8d8932f), TOBN(0x11e7c1ed, 0x344411af)}}, {{TOBN(0x2653050c, 0xdc9a151e), TOBN(0x9edbfc08, 0x3bb0a859), TOBN(0x926c81c7, 0xfd5691e7), TOBN(0x9c1b2342, 0x6f39019a)}, {TOBN(0x64a81c8b, 0x7f8474b9), TOBN(0x90657c07, 0x01761819), TOBN(0x390b3331, 0x55e0375a), TOBN(0xc676c626, 0xb6ebc47d)}}, {{TOBN(0x51623247, 0xb7d6dee8), TOBN(0x0948d927, 0x79659313), TOBN(0x99700161, 0xe9ab35ed), TOBN(0x06cc32b4, 0x8ddde408)}, {TOBN(0x6f2fd664, 0x061ef338), TOBN(0x1606fa02, 0xc202e9ed), TOBN(0x55388bc1, 0x929ba99b), TOBN(0xc4428c5e, 0x1e81df69)}}, {{TOBN(0xce2028ae, 0xf91b0b2a), TOBN(0xce870a23, 0xf03dfd3f), TOBN(0x66ec2c87, 0x0affe8ed), TOBN(0xb205fb46, 0x284d0c00)}, {TOBN(0xbf5dffe7, 0x44cefa48), TOBN(0xb6fc37a8, 0xa19876d7), TOBN(0xbecfa84c, 0x08b72863), TOBN(0xd7205ff5, 0x2576374f)}}, {{TOBN(0x80330d32, 0x8887de41), TOBN(0x5de0df0c, 0x869ea534), TOBN(0x13f42753, 0x3c56ea17), TOBN(0xeb1f6069, 0x452b1a78)}, {TOBN(0x50474396, 0xe30ea15c), TOBN(0x575816a1, 0xc1494125), TOBN(0xbe1ce55b, 0xfe6bb38f), TOBN(0xb901a948, 0x96ae30f7)}}, {{TOBN(0xe5af0f08, 0xd8fc3548), TOBN(0x5010b5d0, 0xd73bfd08), TOBN(0x993d2880, 0x53fe655a), TOBN(0x99f2630b, 0x1c1309fd)}, {TOBN(0xd8677baf, 0xb4e3b76f), TOBN(0x14e51ddc, 0xb840784b), TOBN(0x326c750c, 0xbf0092ce), TOBN(0xc83d306b, 0xf528320f)}}, {{TOBN(0xc4456715, 0x77d4715c), TOBN(0xd30019f9, 0x6b703235), TOBN(0x207ccb2e, 0xd669e986), TOBN(0x57c824af, 0xf6dbfc28)}, {TOBN(0xf0eb532f, 0xd8f92a23), TOBN(0x4a557fd4, 0x9bb98fd2), TOBN(0xa57acea7, 0xc1e6199a), TOBN(0x0c663820, 0x8b94b1ed)}}, {{TOBN(0x9b42be8f, 0xf83a9266), TOBN(0xc7741c97, 0x0101bd45), TOBN(0x95770c11, 0x07bd9ceb), TOBN(0x1f50250a, 0x8b2e0744)}, {TOBN(0xf762eec8, 0x1477b654), TOBN(0xc65b900e, 0x15efe59a), TOBN(0x88c96148, 0x9546a897), TOBN(0x7e8025b3, 0xc30b4d7c)}}, {{TOBN(0xae4065ef, 0x12045cf9), TOBN(0x6fcb2caf, 0x9ccce8bd), TOBN(0x1fa0ba4e, 0xf2cf6525), TOBN(0xf683125d, 0xcb72c312)}, {TOBN(0xa01da4ea, 0xe312410e), TOBN(0x67e28677, 0x6cd8e830), TOBN(0xabd95752, 0x98fb3f07), TOBN(0x05f11e11, 0xeef649a5)}}, {{TOBN(0xba47faef, 0x9d3472c2), TOBN(0x3adff697, 0xc77d1345), TOBN(0x4761fa04, 0xdd15afee), TOBN(0x64f1f61a, 0xb9e69462)}, {TOBN(0xfa691fab, 0x9bfb9093), TOBN(0x3df8ae8f, 0xa1133dfe), TOBN(0xcd5f8967, 0x58cc710d), TOBN(0xfbb88d50, 0x16c7fe79)}}, {{TOBN(0x8e011b4c, 0xe88c50d1), TOBN(0x7532e807, 0xa8771c4f), TOBN(0x64c78a48, 0xe2278ee4), TOBN(0x0b283e83, 0x3845072a)}, {TOBN(0x98a6f291, 0x49e69274), TOBN(0xb96e9668, 0x1868b21c), TOBN(0x38f0adc2, 0xb1a8908e), TOBN(0x90afcff7, 0x1feb829d)}}, {{TOBN(0x9915a383, 0x210b0856), TOBN(0xa5a80602, 0xdef04889), TOBN(0x800e9af9, 0x7c64d509), TOBN(0x81382d0b, 0xb8996f6f)}, {TOBN(0x490eba53, 0x81927e27), TOBN(0x46c63b32, 0x4af50182), TOBN(0x784c5fd9, 0xd3ad62ce), TOBN(0xe4fa1870, 0xf8ae8736)}}, {{TOBN(0x4ec9d0bc, 0xd7466b25), TOBN(0x84ddbe1a, 0xdb235c65), TOBN(0x5e2645ee, 0x163c1688), TOBN(0x570bd00e, 0x00eba747)}, {TOBN(0xfa51b629, 0x128bfa0f), TOBN(0x92fce1bd, 0x6c1d3b68), TOBN(0x3e7361dc, 0xb66778b1), TOBN(0x9c7d249d, 0x5561d2bb)}}, {{TOBN(0xa40b28bf, 0x0bbc6229), TOBN(0x1c83c05e, 0xdfd91497), TOBN(0x5f9f5154, 0xf083df05), TOBN(0xbac38b3c, 0xeee66c9d)}, {TOBN(0xf71db7e3, 0xec0dfcfd), TOBN(0xf2ecda8e, 0x8b0a8416), TOBN(0x52fddd86, 0x7812aa66), TOBN(0x2896ef10, 0x4e6f4272)}}, {{TOBN(0xff27186a, 0x0fe9a745), TOBN(0x08249fcd, 0x49ca70db), TOBN(0x7425a2e6, 0x441cac49), TOBN(0xf4a0885a, 0xece5ff57)}, {TOBN(0x6e2cb731, 0x7d7ead58), TOBN(0xf96cf7d6, 0x1898d104), TOBN(0xafe67c9d, 0x4f2c9a89), TOBN(0x89895a50, 0x1c7bf5bc)}}, {{TOBN(0xdc7cb8e5, 0x573cecfa), TOBN(0x66497eae, 0xd15f03e6), TOBN(0x6bc0de69, 0x3f084420), TOBN(0x323b9b36, 0xacd532b0)}, {TOBN(0xcfed390a, 0x0115a3c1), TOBN(0x9414c40b, 0x2d65ca0e), TOBN(0x641406bd, 0x2f530c78), TOBN(0x29369a44, 0x833438f2)}}, {{TOBN(0x996884f5, 0x903fa271), TOBN(0xe6da0fd2, 0xb9da921e), TOBN(0xa6f2f269, 0x5db01e54), TOBN(0x1ee3e9bd, 0x6876214e)}, {TOBN(0xa26e181c, 0xe27a9497), TOBN(0x36d254e4, 0x8e215e04), TOBN(0x42f32a6c, 0x252cabca), TOBN(0x99481487, 0x80b57614)}}, {{TOBN(0x4c4dfe69, 0x40d9cae1), TOBN(0x05869580, 0x11a10f09), TOBN(0xca287b57, 0x3491b64b), TOBN(0x77862d5d, 0x3fd4a53b)}, {TOBN(0xbf94856e, 0x50349126), TOBN(0x2be30bd1, 0x71c5268f), TOBN(0x10393f19, 0xcbb650a6), TOBN(0x639531fe, 0x778cf9fd)}}, {{TOBN(0x02556a11, 0xb2935359), TOBN(0xda38aa96, 0xaf8c126e), TOBN(0x47dbe6c2, 0x0960167f), TOBN(0x37bbabb6, 0x501901cd)}, {TOBN(0xb6e979e0, 0x2c947778), TOBN(0xd69a5175, 0x7a1a1dc6), TOBN(0xc3ed5095, 0x9d9faf0c), TOBN(0x4dd9c096, 0x1d5fa5f0)}}, {{TOBN(0xa0c4304d, 0x64f16ea8), TOBN(0x8b1cac16, 0x7e718623), TOBN(0x0b576546, 0x7c67f03e), TOBN(0x559cf5ad, 0xcbd88c01)}, {TOBN(0x074877bb, 0x0e2af19a), TOBN(0x1f717ec1, 0xa1228c92), TOBN(0x70bcb800, 0x326e8920), TOBN(0xec6e2c5c, 0x4f312804)}}, {{TOBN(0x426aea7d, 0x3fca4752), TOBN(0xf12c0949, 0x2211f62a), TOBN(0x24beecd8, 0x7be7b6b5), TOBN(0xb77eaf4c, 0x36d7a27d)}, {TOBN(0x154c2781, 0xfda78fd3), TOBN(0x848a83b0, 0x264eeabe), TOBN(0x81287ef0, 0x4ffe2bc4), TOBN(0x7b6d88c6, 0xb6b6fc2a)}}, {{TOBN(0x805fb947, 0xce417d99), TOBN(0x4b93dcc3, 0x8b916cc4), TOBN(0x72e65bb3, 0x21273323), TOBN(0xbcc1badd, 0x6ea9886e)}, {TOBN(0x0e223011, 0x4bc5ee85), TOBN(0xa561be74, 0xc18ee1e4), TOBN(0x762fd2d4, 0xa6bcf1f1), TOBN(0x50e6a5a4, 0x95231489)}}, {{TOBN(0xca96001f, 0xa00b500b), TOBN(0x5c098cfc, 0x5d7dcdf5), TOBN(0xa64e2d2e, 0x8c446a85), TOBN(0xbae9bcf1, 0x971f3c62)}, {TOBN(0x4ec22683, 0x8435a2c5), TOBN(0x8ceaed6c, 0x4bad4643), TOBN(0xe9f8fb47, 0xccccf4e3), TOBN(0xbd4f3fa4, 0x1ce3b21e)}}, {{TOBN(0xd79fb110, 0xa3db3292), TOBN(0xe28a37da, 0xb536c66a), TOBN(0x279ce87b, 0x8e49e6a9), TOBN(0x70ccfe8d, 0xfdcec8e3)}, {TOBN(0x2193e4e0, 0x3ba464b2), TOBN(0x0f39d60e, 0xaca9a398), TOBN(0x7d7932af, 0xf82c12ab), TOBN(0xd8ff50ed, 0x91e7e0f7)}}, {{TOBN(0xea961058, 0xfa28a7e0), TOBN(0xc726cf25, 0x0bf5ec74), TOBN(0xe74d55c8, 0xdb229666), TOBN(0x0bd9abbf, 0xa57f5799)}, {TOBN(0x7479ef07, 0x4dfc47b3), TOBN(0xd9c65fc3, 0x0c52f91d), TOBN(0x8e0283fe, 0x36a8bde2), TOBN(0xa32a8b5e, 0x7d4b7280)}}, {{TOBN(0x6a677c61, 0x12e83233), TOBN(0x0fbb3512, 0xdcc9bf28), TOBN(0x562e8ea5, 0x0d780f61), TOBN(0x0db8b22b, 0x1dc4e89c)}, {TOBN(0x0a6fd1fb, 0x89be0144), TOBN(0x8c77d246, 0xca57113b), TOBN(0x4639075d, 0xff09c91c), TOBN(0x5b47b17f, 0x5060824c)}}, {{TOBN(0x58aea2b0, 0x16287b52), TOBN(0xa1343520, 0xd0cd8eb0), TOBN(0x6148b4d0, 0xc5d58573), TOBN(0xdd2b6170, 0x291c68ae)}, {TOBN(0xa61b3929, 0x1da3b3b7), TOBN(0x5f946d79, 0x08c4ac10), TOBN(0x4105d4a5, 0x7217d583), TOBN(0x5061da3d, 0x25e6de5e)}}, {{TOBN(0x3113940d, 0xec1b4991), TOBN(0xf12195e1, 0x36f485ae), TOBN(0xa7507fb2, 0x731a2ee0), TOBN(0x95057a8e, 0x6e9e196e)}, {TOBN(0xa3c2c911, 0x2e130136), TOBN(0x97dfbb36, 0x33c60d15), TOBN(0xcaf3c581, 0xb300ee2b), TOBN(0x77f25d90, 0xf4bac8b8)}}, {{TOBN(0xdb1c4f98, 0x6d840cd6), TOBN(0x471d62c0, 0xe634288c), TOBN(0x8ec2f85e, 0xcec8a161), TOBN(0x41f37cbc, 0xfa6f4ae2)}, {TOBN(0x6793a20f, 0x4b709985), TOBN(0x7a7bd33b, 0xefa8985b), TOBN(0x2c6a3fbd, 0x938e6446), TOBN(0x19042619, 0x2a8d47c1)}}, {{TOBN(0x16848667, 0xcc36975f), TOBN(0x02acf168, 0x9d5f1dfb), TOBN(0x62d41ad4, 0x613baa94), TOBN(0xb56fbb92, 0x9f684670)}, {TOBN(0xce610d0d, 0xe9e40569), TOBN(0x7b99c65f, 0x35489fef), TOBN(0x0c88ad1b, 0x3df18b97), TOBN(0x81b7d9be, 0x5d0e9edb)}}, {{TOBN(0xd85218c0, 0xc716cc0a), TOBN(0xf4b5ff90, 0x85691c49), TOBN(0xa4fd666b, 0xce356ac6), TOBN(0x17c72895, 0x4b327a7a)}, {TOBN(0xf93d5085, 0xda6be7de), TOBN(0xff71530e, 0x3301d34e), TOBN(0x4cd96442, 0xd8f448e8), TOBN(0x9283d331, 0x2ed18ffa)}}, {{TOBN(0x4d33dd99, 0x2a849870), TOBN(0xa716964b, 0x41576335), TOBN(0xff5e3a9b, 0x179be0e5), TOBN(0x5b9d6b1b, 0x83b13632)}, {TOBN(0x3b8bd7d4, 0xa52f313b), TOBN(0xc9dd95a0, 0x637a4660), TOBN(0x30035962, 0x0b3e218f), TOBN(0xce1481a3, 0xc7b28a3c)}}, {{TOBN(0xab41b43a, 0x43228d83), TOBN(0x24ae1c30, 0x4ad63f99), TOBN(0x8e525f1a, 0x46a51229), TOBN(0x14af860f, 0xcd26d2b4)}, {TOBN(0xd6baef61, 0x3f714aa1), TOBN(0xf51865ad, 0xeb78795e), TOBN(0xd3e21fce, 0xe6a9d694), TOBN(0x82ceb1dd, 0x8a37b527)}}}}; ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p256-nistz.cc.inc ================================================ /* * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2014, Intel Corporation. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html * * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) * (1) Intel Corporation, Israel Development Center, Haifa, Israel * (2) University of Haifa, Israel * * Reference: * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with * 256 Bit Primes" */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "../delocate.h" #include "internal.h" #include "p256-nistz.h" #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_SMALL) typedef P256_POINT_AFFINE PRECOMP256_ROW[64]; // One converted into the Montgomery domain static const BN_ULONG ONE_MONT[P256_LIMBS] = { TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000), TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe), }; // Precomputed tables for the default generator #include "p256-nistz-table.h" // Recode window to a signed digit, see |ec_GFp_nistp_recode_scalar_bits| in // util.c for details static crypto_word_t booth_recode_w5(crypto_word_t in) { crypto_word_t s, d; s = ~((in >> 5) - 1); d = (1 << 6) - in - 1; d = (d & s) | (in & ~s); d = (d >> 1) + (d & 1); return (d << 1) + (s & 1); } static crypto_word_t booth_recode_w7(crypto_word_t in) { crypto_word_t s, d; s = ~((in >> 7) - 1); d = (1 << 8) - in - 1; d = (d & s) | (in & ~s); d = (d >> 1) + (d & 1); return (d << 1) + (s & 1); } // copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is // if |move| is zero. // // WARNING: this breaks the usual convention of constant-time functions // returning masks. static void copy_conditional(BN_ULONG dst[P256_LIMBS], const BN_ULONG src[P256_LIMBS], BN_ULONG move) { BN_ULONG mask1 = ((BN_ULONG)0) - move; BN_ULONG mask2 = ~mask1; dst[0] = (src[0] & mask1) ^ (dst[0] & mask2); dst[1] = (src[1] & mask1) ^ (dst[1] & mask2); dst[2] = (src[2] & mask1) ^ (dst[2] & mask2); dst[3] = (src[3] & mask1) ^ (dst[3] & mask2); if (P256_LIMBS == 8) { dst[4] = (src[4] & mask1) ^ (dst[4] & mask2); dst[5] = (src[5] & mask1) ^ (dst[5] & mask2); dst[6] = (src[6] & mask1) ^ (dst[6] & mask2); dst[7] = (src[7] & mask1) ^ (dst[7] & mask2); } } // is_not_zero returns one iff in != 0 and zero otherwise. // // WARNING: this breaks the usual convention of constant-time functions // returning masks. // // (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64) // (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f) // ) // // (declare-fun x () (_ BitVec 64)) // // (assert (and (= x #x0000000000000000) (= (is_not_zero x) // #x0000000000000001))) (check-sat) // // (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) // #x0000000000000000))) (check-sat) // static BN_ULONG is_not_zero(BN_ULONG in) { in |= (0 - in); in >>= BN_BITS2 - 1; return in; } #if defined(OPENSSL_X86_64) // Dispatch between CPU variations. The "_adx" suffixed functions use MULX in // addition to ADCX/ADOX. MULX is part of BMI2, not ADX, so we must check both // capabilities. static void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_mul_mont_adx(res, a, b); } else { ecp_nistz256_mul_mont_nohw(res, a, b); } } static void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_sqr_mont_adx(res, a); } else { ecp_nistz256_sqr_mont_nohw(res, a); } } static void ecp_nistz256_ord_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_ord_mul_mont_adx(res, a, b); } else { ecp_nistz256_ord_mul_mont_nohw(res, a, b); } } static void ecp_nistz256_ord_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], BN_ULONG rep) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_ord_sqr_mont_adx(res, a, rep); } else { ecp_nistz256_ord_sqr_mont_nohw(res, a, rep); } } static void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16], int index) { if (CRYPTO_is_AVX2_capable()) { ecp_nistz256_select_w5_avx2(val, in_t, index); } else { ecp_nistz256_select_w5_nohw(val, in_t, index); } } static void ecp_nistz256_select_w7(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index) { if (CRYPTO_is_AVX2_capable()) { ecp_nistz256_select_w7_avx2(val, in_t, index); } else { ecp_nistz256_select_w7_nohw(val, in_t, index); } } static void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_point_double_adx(r, a); } else { ecp_nistz256_point_double_nohw(r, a); } } static void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a, const P256_POINT *b) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_point_add_adx(r, a, b); } else { ecp_nistz256_point_add_nohw(r, a, b); } } static void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b) { if (CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { ecp_nistz256_point_add_affine_adx(r, a, b); } else { ecp_nistz256_point_add_affine_nohw(r, a, b); } } #endif // OPENSSL_X86_64 // ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain // by multiplying with 1. static void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { static const BN_ULONG ONE[P256_LIMBS] = {1}; ecp_nistz256_mul_mont(res, in, ONE); } // ecp_nistz256_mod_inverse_sqr_mont sets |r| to (|in| * 2^-256)^-2 * 2^256 mod // p. That is, |r| is the modular inverse square of |in| for input and output in // the Montgomery domain. static void ecp_nistz256_mod_inverse_sqr_mont(BN_ULONG r[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { // This implements the addition chain described in // https://briansmith.org/ecc-inversion-addition-chains-01#p256_field_inversion BN_ULONG x2[P256_LIMBS], x3[P256_LIMBS], x6[P256_LIMBS], x12[P256_LIMBS], x15[P256_LIMBS], x30[P256_LIMBS], x32[P256_LIMBS]; ecp_nistz256_sqr_mont(x2, in); // 2^2 - 2^1 ecp_nistz256_mul_mont(x2, x2, in); // 2^2 - 2^0 ecp_nistz256_sqr_mont(x3, x2); // 2^3 - 2^1 ecp_nistz256_mul_mont(x3, x3, in); // 2^3 - 2^0 ecp_nistz256_sqr_mont(x6, x3); for (int i = 1; i < 3; i++) { ecp_nistz256_sqr_mont(x6, x6); } // 2^6 - 2^3 ecp_nistz256_mul_mont(x6, x6, x3); // 2^6 - 2^0 ecp_nistz256_sqr_mont(x12, x6); for (int i = 1; i < 6; i++) { ecp_nistz256_sqr_mont(x12, x12); } // 2^12 - 2^6 ecp_nistz256_mul_mont(x12, x12, x6); // 2^12 - 2^0 ecp_nistz256_sqr_mont(x15, x12); for (int i = 1; i < 3; i++) { ecp_nistz256_sqr_mont(x15, x15); } // 2^15 - 2^3 ecp_nistz256_mul_mont(x15, x15, x3); // 2^15 - 2^0 ecp_nistz256_sqr_mont(x30, x15); for (int i = 1; i < 15; i++) { ecp_nistz256_sqr_mont(x30, x30); } // 2^30 - 2^15 ecp_nistz256_mul_mont(x30, x30, x15); // 2^30 - 2^0 ecp_nistz256_sqr_mont(x32, x30); ecp_nistz256_sqr_mont(x32, x32); // 2^32 - 2^2 ecp_nistz256_mul_mont(x32, x32, x2); // 2^32 - 2^0 BN_ULONG ret[P256_LIMBS]; ecp_nistz256_sqr_mont(ret, x32); for (int i = 1; i < 31 + 1; i++) { ecp_nistz256_sqr_mont(ret, ret); } // 2^64 - 2^32 ecp_nistz256_mul_mont(ret, ret, in); // 2^64 - 2^32 + 2^0 for (int i = 0; i < 96 + 32; i++) { ecp_nistz256_sqr_mont(ret, ret); } // 2^192 - 2^160 + 2^128 ecp_nistz256_mul_mont(ret, ret, x32); // 2^192 - 2^160 + 2^128 + 2^32 - 2^0 for (int i = 0; i < 32; i++) { ecp_nistz256_sqr_mont(ret, ret); } // 2^224 - 2^192 + 2^160 + 2^64 - 2^32 ecp_nistz256_mul_mont(ret, ret, x32); // 2^224 - 2^192 + 2^160 + 2^64 - 2^0 for (int i = 0; i < 30; i++) { ecp_nistz256_sqr_mont(ret, ret); } // 2^254 - 2^222 + 2^190 + 2^94 - 2^30 ecp_nistz256_mul_mont(ret, ret, x30); // 2^254 - 2^222 + 2^190 + 2^94 - 2^0 ecp_nistz256_sqr_mont(ret, ret); ecp_nistz256_sqr_mont(r, ret); // 2^256 - 2^224 + 2^192 + 2^96 - 2^2 } // r = p * p_scalar static void ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar) { assert(p != NULL); assert(p_scalar != NULL); assert(group->field.N.width == P256_LIMBS); static const size_t kWindowSize = 5; static const crypto_word_t kMask = (1 << (5 /* kWindowSize */ + 1)) - 1; // A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should // add no more than 63 bytes of overhead. Thus, |table| should require // ~1599 ((96 * 16) + 63) bytes of stack space. alignas(64) P256_POINT table[16]; uint8_t p_str[33]; OPENSSL_memcpy(p_str, p_scalar->words, 32); p_str[32] = 0; // table[0] is implicitly (0,0,0) (the point at infinity), therefore it is // not stored. All other values are actually stored with an offset of -1 in // table. P256_POINT *row = table; assert(group->field.N.width == P256_LIMBS); OPENSSL_memcpy(row[1 - 1].X, p->X.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(row[1 - 1].Y, p->Y.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(row[1 - 1].Z, p->Z.words, P256_LIMBS * sizeof(BN_ULONG)); ecp_nistz256_point_double(&row[2 - 1], &row[1 - 1]); ecp_nistz256_point_add(&row[3 - 1], &row[2 - 1], &row[1 - 1]); ecp_nistz256_point_double(&row[4 - 1], &row[2 - 1]); ecp_nistz256_point_double(&row[6 - 1], &row[3 - 1]); ecp_nistz256_point_double(&row[8 - 1], &row[4 - 1]); ecp_nistz256_point_double(&row[12 - 1], &row[6 - 1]); ecp_nistz256_point_add(&row[5 - 1], &row[4 - 1], &row[1 - 1]); ecp_nistz256_point_add(&row[7 - 1], &row[6 - 1], &row[1 - 1]); ecp_nistz256_point_add(&row[9 - 1], &row[8 - 1], &row[1 - 1]); ecp_nistz256_point_add(&row[13 - 1], &row[12 - 1], &row[1 - 1]); ecp_nistz256_point_double(&row[14 - 1], &row[7 - 1]); ecp_nistz256_point_double(&row[10 - 1], &row[5 - 1]); ecp_nistz256_point_add(&row[15 - 1], &row[14 - 1], &row[1 - 1]); ecp_nistz256_point_add(&row[11 - 1], &row[10 - 1], &row[1 - 1]); ecp_nistz256_point_double(&row[16 - 1], &row[8 - 1]); BN_ULONG tmp[P256_LIMBS]; alignas(32) P256_POINT h; size_t index = 255; crypto_word_t wvalue = p_str[(index - 1) / 8]; wvalue = (wvalue >> ((index - 1) % 8)) & kMask; ecp_nistz256_select_w5(r, table, booth_recode_w5(wvalue) >> 1); while (index >= 5) { if (index != 255) { size_t off = (index - 1) / 8; wvalue = (crypto_word_t)p_str[off] | (crypto_word_t)p_str[off + 1] << 8; wvalue = (wvalue >> ((index - 1) % 8)) & kMask; wvalue = booth_recode_w5(wvalue); ecp_nistz256_select_w5(&h, table, wvalue >> 1); ecp_nistz256_neg(tmp, h.Y); copy_conditional(h.Y, tmp, (wvalue & 1)); ecp_nistz256_point_add(r, r, &h); } index -= kWindowSize; ecp_nistz256_point_double(r, r); ecp_nistz256_point_double(r, r); ecp_nistz256_point_double(r, r); ecp_nistz256_point_double(r, r); ecp_nistz256_point_double(r, r); } // Final window wvalue = p_str[0]; wvalue = (wvalue << 1) & kMask; wvalue = booth_recode_w5(wvalue); ecp_nistz256_select_w5(&h, table, wvalue >> 1); ecp_nistz256_neg(tmp, h.Y); copy_conditional(h.Y, tmp, wvalue & 1); ecp_nistz256_point_add(r, r, &h); } static crypto_word_t calc_first_wvalue(size_t *index, const uint8_t p_str[33]) { static const size_t kWindowSize = 7; static const crypto_word_t kMask = (1 << (7 /* kWindowSize */ + 1)) - 1; *index = kWindowSize; crypto_word_t wvalue = (p_str[0] << 1) & kMask; return booth_recode_w7(wvalue); } static crypto_word_t calc_wvalue(size_t *index, const uint8_t p_str[33]) { static const size_t kWindowSize = 7; static const crypto_word_t kMask = (1 << (7 /* kWindowSize */ + 1)) - 1; const size_t off = (*index - 1) / 8; crypto_word_t wvalue = (crypto_word_t)p_str[off] | (crypto_word_t)p_str[off + 1] << 8; wvalue = (wvalue >> ((*index - 1) % 8)) & kMask; *index += kWindowSize; return booth_recode_w7(wvalue); } static void ecp_nistz256_point_mul(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar) { alignas(32) P256_POINT out; ecp_nistz256_windowed_mul(group, &out, p, scalar); assert(group->field.N.width == P256_LIMBS); OPENSSL_memcpy(r->X.words, out.X, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Y.words, out.Y, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Z.words, out.Z, P256_LIMBS * sizeof(BN_ULONG)); } static void ecp_nistz256_point_mul_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar) { uint8_t p_str[33]; OPENSSL_memcpy(p_str, scalar->words, 32); p_str[32] = 0; // First window size_t index = 0; crypto_word_t wvalue = calc_first_wvalue(&index, p_str); alignas(32) P256_POINT_AFFINE t; alignas(32) P256_POINT p; ecp_nistz256_select_w7(&t, ecp_nistz256_precomputed[0], wvalue >> 1); ecp_nistz256_neg(p.Z, t.Y); copy_conditional(t.Y, p.Z, wvalue & 1); // Convert |t| from affine to Jacobian coordinates. We set Z to zero if |t| // is infinity and |ONE_MONT| otherwise. |t| was computed from the table, so // it is infinity iff |wvalue >> 1| is zero. OPENSSL_memcpy(p.X, t.X, sizeof(p.X)); OPENSSL_memcpy(p.Y, t.Y, sizeof(p.Y)); OPENSSL_memset(p.Z, 0, sizeof(p.Z)); copy_conditional(p.Z, ONE_MONT, is_not_zero(wvalue >> 1)); for (int i = 1; i < 37; i++) { wvalue = calc_wvalue(&index, p_str); ecp_nistz256_select_w7(&t, ecp_nistz256_precomputed[i], wvalue >> 1); alignas(32) BN_ULONG neg_Y[P256_LIMBS]; ecp_nistz256_neg(neg_Y, t.Y); copy_conditional(t.Y, neg_Y, wvalue & 1); // Note |ecp_nistz256_point_add_affine| does not work if |p| and |t| are the // same non-infinity point. ecp_nistz256_point_add_affine(&p, &p, &t); } assert(group->field.N.width == P256_LIMBS); OPENSSL_memcpy(r->X.words, p.X, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Y.words, p.Y, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Z.words, p.Z, P256_LIMBS * sizeof(BN_ULONG)); } static void ecp_nistz256_points_mul_public(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p_, const EC_SCALAR *p_scalar) { assert(p_ != NULL && p_scalar != NULL && g_scalar != NULL); alignas(32) P256_POINT p; uint8_t p_str[33]; OPENSSL_memcpy(p_str, g_scalar->words, 32); p_str[32] = 0; // First window size_t index = 0; size_t wvalue = calc_first_wvalue(&index, p_str); // Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p| // is infinity and |ONE_MONT| otherwise. |p| was computed from the table, so // it is infinity iff |wvalue >> 1| is zero. if ((wvalue >> 1) != 0) { OPENSSL_memcpy(p.X, &ecp_nistz256_precomputed[0][(wvalue >> 1) - 1].X, sizeof(p.X)); OPENSSL_memcpy(p.Y, &ecp_nistz256_precomputed[0][(wvalue >> 1) - 1].Y, sizeof(p.Y)); OPENSSL_memcpy(p.Z, ONE_MONT, sizeof(p.Z)); } else { OPENSSL_memset(p.X, 0, sizeof(p.X)); OPENSSL_memset(p.Y, 0, sizeof(p.Y)); OPENSSL_memset(p.Z, 0, sizeof(p.Z)); } if ((wvalue & 1) == 1) { ecp_nistz256_neg(p.Y, p.Y); } for (int i = 1; i < 37; i++) { wvalue = calc_wvalue(&index, p_str); if ((wvalue >> 1) == 0) { continue; } alignas(32) P256_POINT_AFFINE t; OPENSSL_memcpy(&t, &ecp_nistz256_precomputed[i][(wvalue >> 1) - 1], sizeof(t)); if ((wvalue & 1) == 1) { ecp_nistz256_neg(t.Y, t.Y); } // Note |ecp_nistz256_point_add_affine| does not work if |p| and |t| are // the same non-infinity point, so it is important that we compute the // |g_scalar| term before the |p_scalar| term. ecp_nistz256_point_add_affine(&p, &p, &t); } alignas(32) P256_POINT tmp; ecp_nistz256_windowed_mul(group, &tmp, p_, p_scalar); ecp_nistz256_point_add(&p, &p, &tmp); assert(group->field.N.width == P256_LIMBS); OPENSSL_memcpy(r->X.words, p.X, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Y.words, p.Y, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Z.words, p.Z, P256_LIMBS * sizeof(BN_ULONG)); } static int ecp_nistz256_get_affine(const EC_GROUP *group, const EC_JACOBIAN *point, EC_FELEM *x, EC_FELEM *y) { if (constant_time_declassify_int( ec_GFp_simple_is_at_infinity(group, point))) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } BN_ULONG z_inv2[P256_LIMBS]; assert(group->field.N.width == P256_LIMBS); ecp_nistz256_mod_inverse_sqr_mont(z_inv2, point->Z.words); if (x != NULL) { ecp_nistz256_mul_mont(x->words, z_inv2, point->X.words); } if (y != NULL) { ecp_nistz256_sqr_mont(z_inv2, z_inv2); // z^-4 ecp_nistz256_mul_mont(y->words, point->Y.words, point->Z.words); // y * z ecp_nistz256_mul_mont(y->words, y->words, z_inv2); // y * z^-3 } return 1; } static void ecp_nistz256_add(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a_, const EC_JACOBIAN *b_) { P256_POINT a, b; OPENSSL_memcpy(a.X, a_->X.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(a.Y, a_->Y.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(a.Z, a_->Z.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(b.X, b_->X.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(b.Y, b_->Y.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(b.Z, b_->Z.words, P256_LIMBS * sizeof(BN_ULONG)); ecp_nistz256_point_add(&a, &a, &b); OPENSSL_memcpy(r->X.words, a.X, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Y.words, a.Y, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Z.words, a.Z, P256_LIMBS * sizeof(BN_ULONG)); } static void ecp_nistz256_dbl(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a_) { P256_POINT a; OPENSSL_memcpy(a.X, a_->X.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(a.Y, a_->Y.words, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(a.Z, a_->Z.words, P256_LIMBS * sizeof(BN_ULONG)); ecp_nistz256_point_double(&a, &a); OPENSSL_memcpy(r->X.words, a.X, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Y.words, a.Y, P256_LIMBS * sizeof(BN_ULONG)); OPENSSL_memcpy(r->Z.words, a.Z, P256_LIMBS * sizeof(BN_ULONG)); } static void ecp_nistz256_inv0_mod_ord(const EC_GROUP *group, EC_SCALAR *out, const EC_SCALAR *in) { // table[i] stores a power of |in| corresponding to the matching enum value. enum { // The following indices specify the power in binary. i_1 = 0, i_10, i_11, i_101, i_111, i_1010, i_1111, i_10101, i_101010, i_101111, // The following indices specify 2^N-1, or N ones in a row. i_x6, i_x8, i_x16, i_x32 }; BN_ULONG table[15][P256_LIMBS]; // https://briansmith.org/ecc-inversion-addition-chains-01#p256_scalar_inversion // // Even though this code path spares 12 squarings, 4.5%, and 13 // multiplications, 25%, the overall sign operation is not that much faster, // not more that 2%. Most of the performance of this function comes from the // scalar operations. // Pre-calculate powers. OPENSSL_memcpy(table[i_1], in->words, P256_LIMBS * sizeof(BN_ULONG)); ecp_nistz256_ord_sqr_mont(table[i_10], table[i_1], 1); ecp_nistz256_ord_mul_mont(table[i_11], table[i_1], table[i_10]); ecp_nistz256_ord_mul_mont(table[i_101], table[i_11], table[i_10]); ecp_nistz256_ord_mul_mont(table[i_111], table[i_101], table[i_10]); ecp_nistz256_ord_sqr_mont(table[i_1010], table[i_101], 1); ecp_nistz256_ord_mul_mont(table[i_1111], table[i_1010], table[i_101]); ecp_nistz256_ord_sqr_mont(table[i_10101], table[i_1010], 1); ecp_nistz256_ord_mul_mont(table[i_10101], table[i_10101], table[i_1]); ecp_nistz256_ord_sqr_mont(table[i_101010], table[i_10101], 1); ecp_nistz256_ord_mul_mont(table[i_101111], table[i_101010], table[i_101]); ecp_nistz256_ord_mul_mont(table[i_x6], table[i_101010], table[i_10101]); ecp_nistz256_ord_sqr_mont(table[i_x8], table[i_x6], 2); ecp_nistz256_ord_mul_mont(table[i_x8], table[i_x8], table[i_11]); ecp_nistz256_ord_sqr_mont(table[i_x16], table[i_x8], 8); ecp_nistz256_ord_mul_mont(table[i_x16], table[i_x16], table[i_x8]); ecp_nistz256_ord_sqr_mont(table[i_x32], table[i_x16], 16); ecp_nistz256_ord_mul_mont(table[i_x32], table[i_x32], table[i_x16]); // Compute |in| raised to the order-2. ecp_nistz256_ord_sqr_mont(out->words, table[i_x32], 64); ecp_nistz256_ord_mul_mont(out->words, out->words, table[i_x32]); static const struct { uint8_t p, i; } kChain[27] = {{32, i_x32}, {6, i_101111}, {5, i_111}, {4, i_11}, {5, i_1111}, {5, i_10101}, {4, i_101}, {3, i_101}, {3, i_101}, {5, i_111}, {9, i_101111}, {6, i_1111}, {2, i_1}, {5, i_1}, {6, i_1111}, {5, i_111}, {4, i_111}, {5, i_111}, {5, i_101}, {3, i_11}, {10, i_101111}, {2, i_11}, {5, i_11}, {5, i_11}, {3, i_1}, {7, i_10101}, {6, i_1111}}; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kChain); i++) { ecp_nistz256_ord_sqr_mont(out->words, out->words, kChain[i].p); ecp_nistz256_ord_mul_mont(out->words, out->words, table[kChain[i].i]); } } static int ecp_nistz256_scalar_to_montgomery_inv_vartime(const EC_GROUP *group, EC_SCALAR *out, const EC_SCALAR *in) { #if defined(OPENSSL_X86_64) if (!CRYPTO_is_AVX_capable()) { // No AVX support; fallback to generic code. return ec_simple_scalar_to_montgomery_inv_vartime(group, out, in); } #endif assert(group->order.N.width == P256_LIMBS); if (!beeu_mod_inverse_vartime(out->words, in->words, group->order.N.d)) { return 0; } // The result should be returned in the Montgomery domain. ec_scalar_to_montgomery(group, out, out); return 1; } static int ecp_nistz256_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r) { if (ec_GFp_simple_is_at_infinity(group, p)) { return 0; } assert(group->order.N.width == P256_LIMBS); assert(group->field.N.width == P256_LIMBS); // We wish to compare X/Z^2 with r. This is equivalent to comparing X with // r*Z^2. Note that X and Z are represented in Montgomery form, while r is // not. BN_ULONG r_Z2[P256_LIMBS], Z2_mont[P256_LIMBS], X[P256_LIMBS]; ecp_nistz256_mul_mont(Z2_mont, p->Z.words, p->Z.words); ecp_nistz256_mul_mont(r_Z2, r->words, Z2_mont); ecp_nistz256_from_mont(X, p->X.words); if (OPENSSL_memcmp(r_Z2, X, sizeof(r_Z2)) == 0) { return 1; } // During signing the x coefficient is reduced modulo the group order. // Therefore there is a small possibility, less than 1/2^128, that group_order // < p.x < P. in that case we need not only to compare against |r| but also to // compare against r+group_order. BN_ULONG carry = bn_add_words(r_Z2, r->words, group->order.N.d, P256_LIMBS); if (carry == 0 && bn_less_than_words(r_Z2, group->field.N.d, P256_LIMBS)) { // r + group_order < p, so compare (r + group_order) * Z^2 against X. ecp_nistz256_mul_mont(r_Z2, r_Z2, Z2_mont); if (OPENSSL_memcmp(r_Z2, X, sizeof(r_Z2)) == 0) { return 1; } } return 0; } DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistz256_method) { out->point_get_affine_coordinates = ecp_nistz256_get_affine; out->add = ecp_nistz256_add; out->dbl = ecp_nistz256_dbl; out->mul = ecp_nistz256_point_mul; out->mul_base = ecp_nistz256_point_mul_base; out->mul_public = ecp_nistz256_points_mul_public; out->felem_mul = ec_GFp_mont_felem_mul; out->felem_sqr = ec_GFp_mont_felem_sqr; out->felem_to_bytes = ec_GFp_mont_felem_to_bytes; out->felem_from_bytes = ec_GFp_mont_felem_from_bytes; out->felem_reduce = ec_GFp_mont_felem_reduce; // TODO(davidben): This should use the specialized field arithmetic // implementation, rather than the generic one. out->felem_exp = ec_GFp_mont_felem_exp; out->scalar_inv0_montgomery = ecp_nistz256_inv0_mod_ord; out->scalar_to_montgomery_inv_vartime = ecp_nistz256_scalar_to_montgomery_inv_vartime; out->cmp_x_coordinate = ecp_nistz256_cmp_x_coordinate; } #endif /* !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_SMALL) */ ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p256-nistz.h ================================================ /* * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2014, Intel Corporation. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html * * Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1) * (1) Intel Corporation, Israel Development Center, Haifa, Israel * (2) University of Haifa, Israel * * Reference: * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with * 256 Bit Primes" */ #ifndef OPENSSL_HEADER_EC_P256_X86_64_H #define OPENSSL_HEADER_EC_P256_X86_64_H #include #include #include "../bn/internal.h" #if defined(__cplusplus) extern "C" { #endif #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_SMALL) // P-256 field operations. // // An element mod P in P-256 is represented as a little-endian array of // |P256_LIMBS| |BN_ULONG|s, spanning the full range of values. // // The following functions take fully-reduced inputs mod P and give // fully-reduced outputs. They may be used in-place. #define P256_LIMBS (256 / BN_BITS2) // ecp_nistz256_neg sets |res| to -|a| mod P. void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); // ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. #if defined(OPENSSL_X86_64) void ecp_nistz256_mul_mont_nohw(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); void ecp_nistz256_mul_mont_adx(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); #else void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); #endif // ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. #if defined(OPENSSL_X86_64) void ecp_nistz256_sqr_mont_nohw(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); void ecp_nistz256_sqr_mont_adx(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); #else void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); #endif // P-256 scalar operations. // // The following functions compute modulo N, where N is the order of P-256. They // take fully-reduced inputs and give fully-reduced outputs. // ecp_nistz256_ord_mul_mont sets |res| to |a| * |b| where inputs and outputs // are in Montgomery form. That is, |res| is |a| * |b| * 2^-256 mod N. #if defined(OPENSSL_X86_64) void ecp_nistz256_ord_mul_mont_nohw(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); void ecp_nistz256_ord_mul_mont_adx(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); #else void ecp_nistz256_ord_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); #endif // ecp_nistz256_ord_sqr_mont sets |res| to |a|^(2*|rep|) where inputs and // outputs are in Montgomery form. That is, |res| is // (|a| * 2^-256)^(2*|rep|) * 2^256 mod N. #if defined(OPENSSL_X86_64) void ecp_nistz256_ord_sqr_mont_nohw(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], BN_ULONG rep); void ecp_nistz256_ord_sqr_mont_adx(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], BN_ULONG rep); #else void ecp_nistz256_ord_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], BN_ULONG rep); #endif // beeu_mod_inverse_vartime sets out = a^-1 mod p using a Euclidean algorithm. // Assumption: 0 < a < p < 2^(256) and p is odd. int beeu_mod_inverse_vartime(BN_ULONG out[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG p[P256_LIMBS]); // P-256 point operations. // // The following functions may be used in-place. All coordinates are in the // Montgomery domain. // A P256_POINT represents a P-256 point in Jacobian coordinates. typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; BN_ULONG Z[P256_LIMBS]; } P256_POINT; // A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity // is encoded as (0, 0). typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; } P256_POINT_AFFINE; // ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16 // and all zeros (the point at infinity) if |index| is 0. This is done in // constant time. #if defined(OPENSSL_X86_64) void ecp_nistz256_select_w5_nohw(P256_POINT *val, const P256_POINT in_t[16], int index); void ecp_nistz256_select_w5_avx2(P256_POINT *val, const P256_POINT in_t[16], int index); #else void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16], int index); #endif // ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64 // and all zeros (the point at infinity) if |index| is 0. This is done in // constant time. #if defined(OPENSSL_X86_64) void ecp_nistz256_select_w7_nohw(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index); void ecp_nistz256_select_w7_avx2(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index); #else void ecp_nistz256_select_w7(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index); #endif // ecp_nistz256_point_double sets |r| to |a| doubled. #if defined(OPENSSL_X86_64) void ecp_nistz256_point_double_nohw(P256_POINT *r, const P256_POINT *a); void ecp_nistz256_point_double_adx(P256_POINT *r, const P256_POINT *a); #else void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a); #endif // ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. #if defined(OPENSSL_X86_64) void ecp_nistz256_point_add_nohw(P256_POINT *r, const P256_POINT *a, const P256_POINT *b); void ecp_nistz256_point_add_adx(P256_POINT *r, const P256_POINT *a, const P256_POINT *b); #else void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a, const P256_POINT *b); #endif // ecp_nistz256_point_add_affine adds |a| to |b| and places the result in // |r|. |a| and |b| must not represent the same point unless they are both // infinity. #if defined(OPENSSL_X86_64) void ecp_nistz256_point_add_affine_adx(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b); void ecp_nistz256_point_add_affine_nohw(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b); #else void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b); #endif #endif /* !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_SMALL) */ #if defined(__cplusplus) } // extern C++ #endif #endif // OPENSSL_HEADER_EC_P256_X86_64_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p256.cc.inc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // An implementation of the NIST P-256 elliptic curve point multiplication. // 256-bit Montgomery form for 64 and 32-bit. Field operations are generated by // Fiat, which lives in //third_party/fiat. #include #include #include #include #include #include #include #include "../../internal.h" #include "../delocate.h" #include "./internal.h" #if defined(BORINGSSL_HAS_UINT128) #include "../../../third_party/fiat/p256_64.h" #elif defined(OPENSSL_64_BIT) #include "../../../third_party/fiat/p256_64_msvc.h" #else #include "../../../third_party/fiat/p256_32.h" #endif // utility functions, handwritten #if defined(OPENSSL_64_BIT) #define FIAT_P256_NLIMBS 4 typedef uint64_t fiat_p256_limb_t; typedef uint64_t fiat_p256_felem[FIAT_P256_NLIMBS]; static const fiat_p256_felem fiat_p256_one = {0x1, 0xffffffff00000000, 0xffffffffffffffff, 0xfffffffe}; #else // 64BIT; else 32BIT #define FIAT_P256_NLIMBS 8 typedef uint32_t fiat_p256_limb_t; typedef uint32_t fiat_p256_felem[FIAT_P256_NLIMBS]; static const fiat_p256_felem fiat_p256_one = { 0x1, 0x0, 0x0, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0x0}; #endif // 64BIT static fiat_p256_limb_t fiat_p256_nz( const fiat_p256_limb_t in1[FIAT_P256_NLIMBS]) { fiat_p256_limb_t ret; fiat_p256_nonzero(&ret, in1); return ret; } static void fiat_p256_copy(fiat_p256_limb_t out[FIAT_P256_NLIMBS], const fiat_p256_limb_t in1[FIAT_P256_NLIMBS]) { for (size_t i = 0; i < FIAT_P256_NLIMBS; i++) { out[i] = in1[i]; } } static void fiat_p256_cmovznz(fiat_p256_limb_t out[FIAT_P256_NLIMBS], fiat_p256_limb_t t, const fiat_p256_limb_t z[FIAT_P256_NLIMBS], const fiat_p256_limb_t nz[FIAT_P256_NLIMBS]) { fiat_p256_selectznz(out, !!t, z, nz); } static void fiat_p256_from_words(fiat_p256_felem out, const BN_ULONG in[32 / sizeof(BN_ULONG)]) { // Typically, |BN_ULONG| and |fiat_p256_limb_t| will be the same type, but on // 64-bit platforms without |uint128_t|, they are different. However, on // little-endian systems, |uint64_t[4]| and |uint32_t[8]| have the same // layout. OPENSSL_memcpy(out, in, 32); } static void fiat_p256_from_generic(fiat_p256_felem out, const EC_FELEM *in) { fiat_p256_from_words(out, in->words); } static void fiat_p256_to_generic(EC_FELEM *out, const fiat_p256_felem in) { // See |fiat_p256_from_words|. OPENSSL_memcpy(out->words, in, 32); } // fiat_p256_inv_square calculates |out| = |in|^{-2} // // Based on Fermat's Little Theorem: // a^p = a (mod p) // a^{p-1} = 1 (mod p) // a^{p-3} = a^{-2} (mod p) static void fiat_p256_inv_square(fiat_p256_felem out, const fiat_p256_felem in) { // This implements the addition chain described in // https://briansmith.org/ecc-inversion-addition-chains-01#p256_field_inversion fiat_p256_felem x2, x3, x6, x12, x15, x30, x32; fiat_p256_square(x2, in); // 2^2 - 2^1 fiat_p256_mul(x2, x2, in); // 2^2 - 2^0 fiat_p256_square(x3, x2); // 2^3 - 2^1 fiat_p256_mul(x3, x3, in); // 2^3 - 2^0 fiat_p256_square(x6, x3); for (int i = 1; i < 3; i++) { fiat_p256_square(x6, x6); } // 2^6 - 2^3 fiat_p256_mul(x6, x6, x3); // 2^6 - 2^0 fiat_p256_square(x12, x6); for (int i = 1; i < 6; i++) { fiat_p256_square(x12, x12); } // 2^12 - 2^6 fiat_p256_mul(x12, x12, x6); // 2^12 - 2^0 fiat_p256_square(x15, x12); for (int i = 1; i < 3; i++) { fiat_p256_square(x15, x15); } // 2^15 - 2^3 fiat_p256_mul(x15, x15, x3); // 2^15 - 2^0 fiat_p256_square(x30, x15); for (int i = 1; i < 15; i++) { fiat_p256_square(x30, x30); } // 2^30 - 2^15 fiat_p256_mul(x30, x30, x15); // 2^30 - 2^0 fiat_p256_square(x32, x30); fiat_p256_square(x32, x32); // 2^32 - 2^2 fiat_p256_mul(x32, x32, x2); // 2^32 - 2^0 fiat_p256_felem ret; fiat_p256_square(ret, x32); for (int i = 1; i < 31 + 1; i++) { fiat_p256_square(ret, ret); } // 2^64 - 2^32 fiat_p256_mul(ret, ret, in); // 2^64 - 2^32 + 2^0 for (int i = 0; i < 96 + 32; i++) { fiat_p256_square(ret, ret); } // 2^192 - 2^160 + 2^128 fiat_p256_mul(ret, ret, x32); // 2^192 - 2^160 + 2^128 + 2^32 - 2^0 for (int i = 0; i < 32; i++) { fiat_p256_square(ret, ret); } // 2^224 - 2^192 + 2^160 + 2^64 - 2^32 fiat_p256_mul(ret, ret, x32); // 2^224 - 2^192 + 2^160 + 2^64 - 2^0 for (int i = 0; i < 30; i++) { fiat_p256_square(ret, ret); } // 2^254 - 2^222 + 2^190 + 2^94 - 2^30 fiat_p256_mul(ret, ret, x30); // 2^254 - 2^222 + 2^190 + 2^94 - 2^0 fiat_p256_square(ret, ret); fiat_p256_square(out, ret); // 2^256 - 2^224 + 2^192 + 2^96 - 2^2 } // Group operations // ---------------- // // Building on top of the field operations we have the operations on the // elliptic curve group itself. Points on the curve are represented in Jacobian // coordinates. // // Both operations were transcribed to Coq and proven to correspond to naive // implementations using Affine coordinates, for all suitable fields. In the // Coq proofs, issues of constant-time execution and memory layout (aliasing) // conventions were not considered. Specification of affine coordinates: // // As a sanity check, a proof that these points form a commutative group: // // fiat_p256_point_double calculates 2*(x_in, y_in, z_in) // // The method is taken from: // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b // // Coq transcription and correctness proof: // // // // Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed. // while x_out == y_in is not (maybe this works, but it's not tested). static void fiat_p256_point_double(fiat_p256_felem x_out, fiat_p256_felem y_out, fiat_p256_felem z_out, const fiat_p256_felem x_in, const fiat_p256_felem y_in, const fiat_p256_felem z_in) { fiat_p256_felem delta, gamma, beta, ftmp, ftmp2, tmptmp, alpha, fourbeta; // delta = z^2 fiat_p256_square(delta, z_in); // gamma = y^2 fiat_p256_square(gamma, y_in); // beta = x*gamma fiat_p256_mul(beta, x_in, gamma); // alpha = 3*(x-delta)*(x+delta) fiat_p256_sub(ftmp, x_in, delta); fiat_p256_add(ftmp2, x_in, delta); fiat_p256_add(tmptmp, ftmp2, ftmp2); fiat_p256_add(ftmp2, ftmp2, tmptmp); fiat_p256_mul(alpha, ftmp, ftmp2); // x' = alpha^2 - 8*beta fiat_p256_square(x_out, alpha); fiat_p256_add(fourbeta, beta, beta); fiat_p256_add(fourbeta, fourbeta, fourbeta); fiat_p256_add(tmptmp, fourbeta, fourbeta); fiat_p256_sub(x_out, x_out, tmptmp); // z' = (y + z)^2 - gamma - delta fiat_p256_add(delta, gamma, delta); fiat_p256_add(ftmp, y_in, z_in); fiat_p256_square(z_out, ftmp); fiat_p256_sub(z_out, z_out, delta); // y' = alpha*(4*beta - x') - 8*gamma^2 fiat_p256_sub(y_out, fourbeta, x_out); fiat_p256_add(gamma, gamma, gamma); fiat_p256_square(gamma, gamma); fiat_p256_mul(y_out, alpha, y_out); fiat_p256_add(gamma, gamma, gamma); fiat_p256_sub(y_out, y_out, gamma); } // fiat_p256_point_add calculates (x1, y1, z1) + (x2, y2, z2) // // The method is taken from: // http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl, // adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity). // // Coq transcription and correctness proof: // // // // This function includes a branch for checking whether the two input points // are equal, (while not equal to the point at infinity). This case never // happens during single point multiplication, so there is no timing leak for // ECDH or ECDSA signing. static void fiat_p256_point_add(fiat_p256_felem x3, fiat_p256_felem y3, fiat_p256_felem z3, const fiat_p256_felem x1, const fiat_p256_felem y1, const fiat_p256_felem z1, const int mixed, const fiat_p256_felem x2, const fiat_p256_felem y2, const fiat_p256_felem z2) { fiat_p256_felem x_out, y_out, z_out; fiat_p256_limb_t z1nz = fiat_p256_nz(z1); fiat_p256_limb_t z2nz = fiat_p256_nz(z2); // z1z1 = z1z1 = z1**2 fiat_p256_felem z1z1; fiat_p256_square(z1z1, z1); fiat_p256_felem u1, s1, two_z1z2; if (!mixed) { // z2z2 = z2**2 fiat_p256_felem z2z2; fiat_p256_square(z2z2, z2); // u1 = x1*z2z2 fiat_p256_mul(u1, x1, z2z2); // two_z1z2 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 fiat_p256_add(two_z1z2, z1, z2); fiat_p256_square(two_z1z2, two_z1z2); fiat_p256_sub(two_z1z2, two_z1z2, z1z1); fiat_p256_sub(two_z1z2, two_z1z2, z2z2); // s1 = y1 * z2**3 fiat_p256_mul(s1, z2, z2z2); fiat_p256_mul(s1, s1, y1); } else { // We'll assume z2 = 1 (special case z2 = 0 is handled later). // u1 = x1*z2z2 fiat_p256_copy(u1, x1); // two_z1z2 = 2z1z2 fiat_p256_add(two_z1z2, z1, z1); // s1 = y1 * z2**3 fiat_p256_copy(s1, y1); } // u2 = x2*z1z1 fiat_p256_felem u2; fiat_p256_mul(u2, x2, z1z1); // h = u2 - u1 fiat_p256_felem h; fiat_p256_sub(h, u2, u1); fiat_p256_limb_t xneq = fiat_p256_nz(h); // z_out = two_z1z2 * h fiat_p256_mul(z_out, h, two_z1z2); // z1z1z1 = z1 * z1z1 fiat_p256_felem z1z1z1; fiat_p256_mul(z1z1z1, z1, z1z1); // s2 = y2 * z1**3 fiat_p256_felem s2; fiat_p256_mul(s2, y2, z1z1z1); // r = (s2 - s1)*2 fiat_p256_felem r; fiat_p256_sub(r, s2, s1); fiat_p256_add(r, r, r); fiat_p256_limb_t yneq = fiat_p256_nz(r); fiat_p256_limb_t is_nontrivial_double = constant_time_is_zero_w(xneq | yneq) & ~constant_time_is_zero_w(z1nz) & ~constant_time_is_zero_w(z2nz); if (constant_time_declassify_w(is_nontrivial_double)) { fiat_p256_point_double(x3, y3, z3, x1, y1, z1); return; } // I = (2h)**2 fiat_p256_felem i; fiat_p256_add(i, h, h); fiat_p256_square(i, i); // J = h * I fiat_p256_felem j; fiat_p256_mul(j, h, i); // V = U1 * I fiat_p256_felem v; fiat_p256_mul(v, u1, i); // x_out = r**2 - J - 2V fiat_p256_square(x_out, r); fiat_p256_sub(x_out, x_out, j); fiat_p256_sub(x_out, x_out, v); fiat_p256_sub(x_out, x_out, v); // y_out = r(V-x_out) - 2 * s1 * J fiat_p256_sub(y_out, v, x_out); fiat_p256_mul(y_out, y_out, r); fiat_p256_felem s1j; fiat_p256_mul(s1j, s1, j); fiat_p256_sub(y_out, y_out, s1j); fiat_p256_sub(y_out, y_out, s1j); fiat_p256_cmovznz(x_out, z1nz, x2, x_out); fiat_p256_cmovznz(x3, z2nz, x1, x_out); fiat_p256_cmovznz(y_out, z1nz, y2, y_out); fiat_p256_cmovznz(y3, z2nz, y1, y_out); fiat_p256_cmovznz(z_out, z1nz, z2, z_out); fiat_p256_cmovznz(z3, z2nz, z1, z_out); } #include "./p256_table.h" // fiat_p256_select_point_affine selects the |idx-1|th point from a // precomputation table and copies it to out. If |idx| is zero, the output is // the point at infinity. static void fiat_p256_select_point_affine( const fiat_p256_limb_t idx, size_t size, const fiat_p256_felem pre_comp[/*size*/][2], fiat_p256_felem out[3]) { OPENSSL_memset(out, 0, sizeof(fiat_p256_felem) * 3); for (size_t i = 0; i < size; i++) { fiat_p256_limb_t mismatch = i ^ (idx - 1); fiat_p256_cmovznz(out[0], mismatch, pre_comp[i][0], out[0]); fiat_p256_cmovznz(out[1], mismatch, pre_comp[i][1], out[1]); } fiat_p256_cmovznz(out[2], idx, out[2], fiat_p256_one); } // fiat_p256_select_point selects the |idx|th point from a precomputation table // and copies it to out. static void fiat_p256_select_point(const fiat_p256_limb_t idx, size_t size, const fiat_p256_felem pre_comp[/*size*/][3], fiat_p256_felem out[3]) { OPENSSL_memset(out, 0, sizeof(fiat_p256_felem) * 3); for (size_t i = 0; i < size; i++) { fiat_p256_limb_t mismatch = i ^ idx; fiat_p256_cmovznz(out[0], mismatch, pre_comp[i][0], out[0]); fiat_p256_cmovznz(out[1], mismatch, pre_comp[i][1], out[1]); fiat_p256_cmovznz(out[2], mismatch, pre_comp[i][2], out[2]); } } // fiat_p256_get_bit returns the |i|th bit in |in|. static crypto_word_t fiat_p256_get_bit(const EC_SCALAR *in, int i) { if (i < 0 || i >= 256) { return 0; } #if defined(OPENSSL_64_BIT) static_assert(sizeof(BN_ULONG) == 8, "BN_ULONG was not 64-bit"); return (in->words[i >> 6] >> (i & 63)) & 1; #else static_assert(sizeof(BN_ULONG) == 4, "BN_ULONG was not 32-bit"); return (in->words[i >> 5] >> (i & 31)) & 1; #endif } // OPENSSL EC_METHOD FUNCTIONS // Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') = // (X/Z^2, Y/Z^3). static int ec_GFp_nistp256_point_get_affine_coordinates( const EC_GROUP *group, const EC_JACOBIAN *point, EC_FELEM *x_out, EC_FELEM *y_out) { if (constant_time_declassify_int( ec_GFp_simple_is_at_infinity(group, point))) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); return 0; } fiat_p256_felem z1, z2; fiat_p256_from_generic(z1, &point->Z); fiat_p256_inv_square(z2, z1); if (x_out != NULL) { fiat_p256_felem x; fiat_p256_from_generic(x, &point->X); fiat_p256_mul(x, x, z2); fiat_p256_to_generic(x_out, x); } if (y_out != NULL) { fiat_p256_felem y; fiat_p256_from_generic(y, &point->Y); fiat_p256_square(z2, z2); // z^-4 fiat_p256_mul(y, y, z1); // y * z fiat_p256_mul(y, y, z2); // y * z^-3 fiat_p256_to_generic(y_out, y); } return 1; } static void ec_GFp_nistp256_add(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a, const EC_JACOBIAN *b) { fiat_p256_felem x1, y1, z1, x2, y2, z2; fiat_p256_from_generic(x1, &a->X); fiat_p256_from_generic(y1, &a->Y); fiat_p256_from_generic(z1, &a->Z); fiat_p256_from_generic(x2, &b->X); fiat_p256_from_generic(y2, &b->Y); fiat_p256_from_generic(z2, &b->Z); fiat_p256_point_add(x1, y1, z1, x1, y1, z1, 0 /* both Jacobian */, x2, y2, z2); fiat_p256_to_generic(&r->X, x1); fiat_p256_to_generic(&r->Y, y1); fiat_p256_to_generic(&r->Z, z1); } static void ec_GFp_nistp256_dbl(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *a) { fiat_p256_felem x, y, z; fiat_p256_from_generic(x, &a->X); fiat_p256_from_generic(y, &a->Y); fiat_p256_from_generic(z, &a->Z); fiat_p256_point_double(x, y, z, x, y, z); fiat_p256_to_generic(&r->X, x); fiat_p256_to_generic(&r->Y, y); fiat_p256_to_generic(&r->Z, z); } static void ec_GFp_nistp256_point_mul(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar) { fiat_p256_felem p_pre_comp[17][3]; OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); // Precompute multiples. fiat_p256_from_generic(p_pre_comp[1][0], &p->X); fiat_p256_from_generic(p_pre_comp[1][1], &p->Y); fiat_p256_from_generic(p_pre_comp[1][2], &p->Z); for (size_t j = 2; j <= 16; ++j) { if (j & 1) { fiat_p256_point_add(p_pre_comp[j][0], p_pre_comp[j][1], p_pre_comp[j][2], p_pre_comp[1][0], p_pre_comp[1][1], p_pre_comp[1][2], 0, p_pre_comp[j - 1][0], p_pre_comp[j - 1][1], p_pre_comp[j - 1][2]); } else { fiat_p256_point_double(p_pre_comp[j][0], p_pre_comp[j][1], p_pre_comp[j][2], p_pre_comp[j / 2][0], p_pre_comp[j / 2][1], p_pre_comp[j / 2][2]); } } // Set nq to the point at infinity. fiat_p256_felem nq[3] = {{0}, {0}, {0}}, ftmp, tmp[3]; // Loop over |scalar| msb-to-lsb, incorporating |p_pre_comp| every 5th round. int skip = 1; // Save two point operations in the first round. for (size_t i = 255; i < 256; i--) { // double if (!skip) { fiat_p256_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } // do other additions every 5 doublings if (i % 5 == 0) { crypto_word_t bits = fiat_p256_get_bit(scalar, i + 4) << 5; bits |= fiat_p256_get_bit(scalar, i + 3) << 4; bits |= fiat_p256_get_bit(scalar, i + 2) << 3; bits |= fiat_p256_get_bit(scalar, i + 1) << 2; bits |= fiat_p256_get_bit(scalar, i) << 1; bits |= fiat_p256_get_bit(scalar, i - 1); crypto_word_t sign, digit; ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); // select the point to add or subtract, in constant time. fiat_p256_select_point((fiat_p256_limb_t)digit, 17, (const fiat_p256_felem(*)[3])p_pre_comp, tmp); fiat_p256_opp(ftmp, tmp[1]); // (X, -Y, Z) is the negative point. fiat_p256_cmovznz(tmp[1], (fiat_p256_limb_t)sign, tmp[1], ftmp); if (!skip) { fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 0 /* mixed */, tmp[0], tmp[1], tmp[2]); } else { fiat_p256_copy(nq[0], tmp[0]); fiat_p256_copy(nq[1], tmp[1]); fiat_p256_copy(nq[2], tmp[2]); skip = 0; } } } fiat_p256_to_generic(&r->X, nq[0]); fiat_p256_to_generic(&r->Y, nq[1]); fiat_p256_to_generic(&r->Z, nq[2]); } static void ec_GFp_nistp256_point_mul_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar) { // Set nq to the point at infinity. fiat_p256_felem nq[3] = {{0}, {0}, {0}}, tmp[3]; int skip = 1; // Save two point operations in the first round. for (size_t i = 31; i < 32; i--) { if (!skip) { fiat_p256_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } // First, look 32 bits upwards. crypto_word_t bits = fiat_p256_get_bit(scalar, i + 224) << 3; bits |= fiat_p256_get_bit(scalar, i + 160) << 2; bits |= fiat_p256_get_bit(scalar, i + 96) << 1; bits |= fiat_p256_get_bit(scalar, i + 32); // Select the point to add, in constant time. fiat_p256_select_point_affine((fiat_p256_limb_t)bits, 15, fiat_p256_g_pre_comp[1], tmp); if (!skip) { fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } else { fiat_p256_copy(nq[0], tmp[0]); fiat_p256_copy(nq[1], tmp[1]); fiat_p256_copy(nq[2], tmp[2]); skip = 0; } // Second, look at the current position. bits = fiat_p256_get_bit(scalar, i + 192) << 3; bits |= fiat_p256_get_bit(scalar, i + 128) << 2; bits |= fiat_p256_get_bit(scalar, i + 64) << 1; bits |= fiat_p256_get_bit(scalar, i); // Select the point to add, in constant time. fiat_p256_select_point_affine((fiat_p256_limb_t)bits, 15, fiat_p256_g_pre_comp[0], tmp); fiat_p256_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } fiat_p256_to_generic(&r->X, nq[0]); fiat_p256_to_generic(&r->Y, nq[1]); fiat_p256_to_generic(&r->Z, nq[2]); } static void ec_GFp_nistp256_point_mul_public(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *p, const EC_SCALAR *p_scalar) { #define P256_WSIZE_PUBLIC 4 // Precompute multiples of |p|. p_pre_comp[i] is (2*i+1) * |p|. fiat_p256_felem p_pre_comp[1 << (P256_WSIZE_PUBLIC - 1)][3]; fiat_p256_from_generic(p_pre_comp[0][0], &p->X); fiat_p256_from_generic(p_pre_comp[0][1], &p->Y); fiat_p256_from_generic(p_pre_comp[0][2], &p->Z); fiat_p256_felem p2[3]; fiat_p256_point_double(p2[0], p2[1], p2[2], p_pre_comp[0][0], p_pre_comp[0][1], p_pre_comp[0][2]); for (size_t i = 1; i < OPENSSL_ARRAY_SIZE(p_pre_comp); i++) { fiat_p256_point_add(p_pre_comp[i][0], p_pre_comp[i][1], p_pre_comp[i][2], p_pre_comp[i - 1][0], p_pre_comp[i - 1][1], p_pre_comp[i - 1][2], 0 /* not mixed */, p2[0], p2[1], p2[2]); } // Set up the coefficients for |p_scalar|. int8_t p_wNAF[257]; ec_compute_wNAF(group, p_wNAF, p_scalar, 256, P256_WSIZE_PUBLIC); // Set |ret| to the point at infinity. int skip = 1; // Save some point operations. fiat_p256_felem ret[3] = {{0}, {0}, {0}}; for (int i = 256; i >= 0; i--) { if (!skip) { fiat_p256_point_double(ret[0], ret[1], ret[2], ret[0], ret[1], ret[2]); } // For the |g_scalar|, we use the precomputed table without the // constant-time lookup. if (i <= 31) { // First, look 32 bits upwards. crypto_word_t bits = fiat_p256_get_bit(g_scalar, i + 224) << 3; bits |= fiat_p256_get_bit(g_scalar, i + 160) << 2; bits |= fiat_p256_get_bit(g_scalar, i + 96) << 1; bits |= fiat_p256_get_bit(g_scalar, i + 32); if (bits != 0) { size_t index = (size_t)(bits - 1); fiat_p256_point_add(ret[0], ret[1], ret[2], ret[0], ret[1], ret[2], 1 /* mixed */, fiat_p256_g_pre_comp[1][index][0], fiat_p256_g_pre_comp[1][index][1], fiat_p256_one); skip = 0; } // Second, look at the current position. bits = fiat_p256_get_bit(g_scalar, i + 192) << 3; bits |= fiat_p256_get_bit(g_scalar, i + 128) << 2; bits |= fiat_p256_get_bit(g_scalar, i + 64) << 1; bits |= fiat_p256_get_bit(g_scalar, i); if (bits != 0) { size_t index = (size_t)(bits - 1); fiat_p256_point_add(ret[0], ret[1], ret[2], ret[0], ret[1], ret[2], 1 /* mixed */, fiat_p256_g_pre_comp[0][index][0], fiat_p256_g_pre_comp[0][index][1], fiat_p256_one); skip = 0; } } int digit = p_wNAF[i]; if (digit != 0) { assert(digit & 1); size_t idx = (size_t)(digit < 0 ? (-digit) >> 1 : digit >> 1); fiat_p256_felem *y = &p_pre_comp[idx][1], tmp; if (digit < 0) { fiat_p256_opp(tmp, p_pre_comp[idx][1]); y = &tmp; } if (!skip) { fiat_p256_point_add(ret[0], ret[1], ret[2], ret[0], ret[1], ret[2], 0 /* not mixed */, p_pre_comp[idx][0], *y, p_pre_comp[idx][2]); } else { fiat_p256_copy(ret[0], p_pre_comp[idx][0]); fiat_p256_copy(ret[1], *y); fiat_p256_copy(ret[2], p_pre_comp[idx][2]); skip = 0; } } } fiat_p256_to_generic(&r->X, ret[0]); fiat_p256_to_generic(&r->Y, ret[1]); fiat_p256_to_generic(&r->Z, ret[2]); } static int ec_GFp_nistp256_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r) { if (ec_GFp_simple_is_at_infinity(group, p)) { return 0; } // We wish to compare X/Z^2 with r. This is equivalent to comparing X with // r*Z^2. Note that X and Z are represented in Montgomery form, while r is // not. fiat_p256_felem Z2_mont; fiat_p256_from_generic(Z2_mont, &p->Z); fiat_p256_mul(Z2_mont, Z2_mont, Z2_mont); fiat_p256_felem r_Z2; fiat_p256_from_words(r_Z2, r->words); // r < order < p, so this is valid. fiat_p256_mul(r_Z2, r_Z2, Z2_mont); fiat_p256_felem X; fiat_p256_from_generic(X, &p->X); fiat_p256_from_montgomery(X, X); if (OPENSSL_memcmp(&r_Z2, &X, sizeof(r_Z2)) == 0) { return 1; } // During signing the x coefficient is reduced modulo the group order. // Therefore there is a small possibility, less than 1/2^128, that group_order // < p.x < P. in that case we need not only to compare against |r| but also to // compare against r+group_order. assert(group->field.N.width == group->order.N.width); EC_FELEM tmp; BN_ULONG carry = bn_add_words(tmp.words, r->words, group->order.N.d, group->field.N.width); if (carry == 0 && bn_less_than_words(tmp.words, group->field.N.d, group->field.N.width)) { fiat_p256_from_generic(r_Z2, &tmp); fiat_p256_mul(r_Z2, r_Z2, Z2_mont); if (OPENSSL_memcmp(&r_Z2, &X, sizeof(r_Z2)) == 0) { return 1; } } return 0; } DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp256_method) { out->point_get_affine_coordinates = ec_GFp_nistp256_point_get_affine_coordinates; out->add = ec_GFp_nistp256_add; out->dbl = ec_GFp_nistp256_dbl; out->mul = ec_GFp_nistp256_point_mul; out->mul_base = ec_GFp_nistp256_point_mul_base; out->mul_public = ec_GFp_nistp256_point_mul_public; out->felem_mul = ec_GFp_mont_felem_mul; out->felem_sqr = ec_GFp_mont_felem_sqr; out->felem_to_bytes = ec_GFp_mont_felem_to_bytes; out->felem_from_bytes = ec_GFp_mont_felem_from_bytes; out->felem_reduce = ec_GFp_mont_felem_reduce; // TODO(davidben): This should use the specialized field arithmetic // implementation, rather than the generic one. out->felem_exp = ec_GFp_mont_felem_exp; out->scalar_inv0_montgomery = ec_simple_scalar_inv0_montgomery; out->scalar_to_montgomery_inv_vartime = ec_simple_scalar_to_montgomery_inv_vartime; out->cmp_x_coordinate = ec_GFp_nistp256_cmp_x_coordinate; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/p256_table.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This file is generated by make_tables.go. // Base point pre computation // -------------------------- // // Two different sorts of precomputed tables are used in the following code. // Each contain various points on the curve, where each point is three field // elements (x, y, z). // // For the base point table, z is usually 1 (0 for the point at infinity). // This table has 2 * 16 elements, starting with the following: // index | bits | point // ------+---------+------------------------------ // 0 | 0 0 0 0 | 0G // 1 | 0 0 0 1 | 1G // 2 | 0 0 1 0 | 2^64G // 3 | 0 0 1 1 | (2^64 + 1)G // 4 | 0 1 0 0 | 2^128G // 5 | 0 1 0 1 | (2^128 + 1)G // 6 | 0 1 1 0 | (2^128 + 2^64)G // 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G // 8 | 1 0 0 0 | 2^192G // 9 | 1 0 0 1 | (2^192 + 1)G // 10 | 1 0 1 0 | (2^192 + 2^64)G // 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G // 12 | 1 1 0 0 | (2^192 + 2^128)G // 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G // 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G // 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G // followed by a copy of this with each element multiplied by 2^32. // // The reason for this is so that we can clock bits into four different // locations when doing simple scalar multiplies against the base point, // and then another four locations using the second 16 elements. // // Tables for other points have table[i] = iG for i in 0 .. 16. // fiat_p256_g_pre_comp is the table of precomputed base points #if defined(OPENSSL_64_BIT) static const fiat_p256_felem fiat_p256_g_pre_comp[2][15][2] = { {{{0x79e730d418a9143c, 0x75ba95fc5fedb601, 0x79fb732b77622510, 0x18905f76a53755c6}, {0xddf25357ce95560a, 0x8b4ab8e4ba19e45c, 0xd2e88688dd21f325, 0x8571ff1825885d85}}, {{0x4f922fc516a0d2bb, 0x0d5cc16c1a623499, 0x9241cf3a57c62c8b, 0x2f5e6961fd1b667f}, {0x5c15c70bf5a01797, 0x3d20b44d60956192, 0x04911b37071fdb52, 0xf648f9168d6f0f7b}}, {{0x9e566847e137bbbc, 0xe434469e8a6a0bec, 0xb1c4276179d73463, 0x5abe0285133d0015}, {0x92aa837cc04c7dab, 0x573d9f4c43260c07, 0x0c93156278e6cc37, 0x94bb725b6b6f7383}}, {{0x62a8c244bfe20925, 0x91c19ac38fdce867, 0x5a96a5d5dd387063, 0x61d587d421d324f6}, {0xe87673a2a37173ea, 0x2384800853778b65, 0x10f8441e05bab43e, 0xfa11fe124621efbe}}, {{0x1c891f2b2cb19ffd, 0x01ba8d5bb1923c23, 0xb6d03d678ac5ca8e, 0x586eb04c1f13bedc}, {0x0c35c6e527e8ed09, 0x1e81a33c1819ede2, 0x278fd6c056c652fa, 0x19d5ac0870864f11}}, {{0x62577734d2b533d5, 0x673b8af6a1bdddc0, 0x577e7c9aa79ec293, 0xbb6de651c3b266b1}, {0xe7e9303ab65259b3, 0xd6a0afd3d03a7480, 0xc5ac83d19b3cfc27, 0x60b4619a5d18b99b}}, {{0xbd6a38e11ae5aa1c, 0xb8b7652b49e73658, 0x0b130014ee5f87ed, 0x9d0f27b2aeebffcd}, {0xca9246317a730a55, 0x9c955b2fddbbc83a, 0x07c1dfe0ac019a71, 0x244a566d356ec48d}}, {{0x56f8410ef4f8b16a, 0x97241afec47b266a, 0x0a406b8e6d9c87c1, 0x803f3e02cd42ab1b}, {0x7f0309a804dbec69, 0xa83b85f73bbad05f, 0xc6097273ad8e197f, 0xc097440e5067adc1}}, {{0x846a56f2c379ab34, 0xa8ee068b841df8d1, 0x20314459176c68ef, 0xf1af32d5915f1f30}, {0x99c375315d75bd50, 0x837cffbaf72f67bc, 0x0613a41848d7723f, 0x23d0f130e2d41c8b}}, {{0xed93e225d5be5a2b, 0x6fe799835934f3c6, 0x4314092622626ffc, 0x50bbb4d97990216a}, {0x378191c6e57ec63e, 0x65422c40181dcdb2, 0x41a8099b0236e0f6, 0x2b10011801fe49c3}}, {{0xfc68b5c59b391593, 0xc385f5a2598270fc, 0x7144f3aad19adcbb, 0xdd55899983fbae0c}, {0x93b88b8e74b82ff4, 0xd2e03c4071e734c9, 0x9a7a9eaf43c0322a, 0xe6e4c551149d6041}}, {{0x5fe14bfe80ec21fe, 0xf6ce116ac255be82, 0x98bc5a072f4a5d67, 0xfad27148db7e63af}, {0x90c0b6ac29ab05b3, 0x37a9a83c4e251ae6, 0x0a7dc875c2aade7d, 0x77387de39f0e1a84}}, {{0x1e9ecc49a56c0dd7, 0xa5cffcd846086c74, 0x8f7a1408f505aece, 0xb37b85c0bef0c47e}, {0x3596b6e4cc0e6a8f, 0xfd6d4bbf6b388f23, 0xaba453fac39cef4e, 0x9c135ac8f9f628d5}}, {{0x0a1c729495c8f8be, 0x2961c4803bf362bf, 0x9e418403df63d4ac, 0xc109f9cb91ece900}, {0xc2d095d058945705, 0xb9083d96ddeb85c0, 0x84692b8d7a40449b, 0x9bc3344f2eee1ee1}}, {{0x0d5ae35642913074, 0x55491b2748a542b1, 0x469ca665b310732a, 0x29591d525f1a4cc1}, {0xe76f5b6bb84f983f, 0xbe7eef419f5f84e1, 0x1200d49680baa189, 0x6376551f18ef332c}}}, {{{0x202886024147519a, 0xd0981eac26b372f0, 0xa9d4a7caa785ebc8, 0xd953c50ddbdf58e9}, {0x9d6361ccfd590f8f, 0x72e9626b44e6c917, 0x7fd9611022eb64cf, 0x863ebb7e9eb288f3}}, {{0x4fe7ee31b0e63d34, 0xf4600572a9e54fab, 0xc0493334d5e7b5a4, 0x8589fb9206d54831}, {0xaa70f5cc6583553a, 0x0879094ae25649e5, 0xcc90450710044652, 0xebb0696d02541c4f}}, {{0xabbaa0c03b89da99, 0xa6f2d79eb8284022, 0x27847862b81c05e8, 0x337a4b5905e54d63}, {0x3c67500d21f7794a, 0x207005b77d6d7f61, 0x0a5a378104cfd6e8, 0x0d65e0d5f4c2fbd6}}, {{0xd433e50f6d3549cf, 0x6f33696ffacd665e, 0x695bfdacce11fcb4, 0x810ee252af7c9860}, {0x65450fe17159bb2c, 0xf7dfbebe758b357b, 0x2b057e74d69fea72, 0xd485717a92731745}}, {{0xce1f69bbe83f7669, 0x09f8ae8272877d6b, 0x9548ae543244278d, 0x207755dee3c2c19c}, {0x87bd61d96fef1945, 0x18813cefb12d28c3, 0x9fbcd1d672df64aa, 0x48dc5ee57154b00d}}, {{0xef0f469ef49a3154, 0x3e85a5956e2b2e9a, 0x45aaec1eaa924a9c, 0xaa12dfc8a09e4719}, {0x26f272274df69f1d, 0xe0e4c82ca2ff5e73, 0xb9d8ce73b7a9dd44, 0x6c036e73e48ca901}}, {{0xe1e421e1a47153f0, 0xb86c3b79920418c9, 0x93bdce87705d7672, 0xf25ae793cab79a77}, {0x1f3194a36d869d0c, 0x9d55c8824986c264, 0x49fb5ea3096e945e, 0x39b8e65313db0a3e}}, {{0xe3417bc035d0b34a, 0x440b386b8327c0a7, 0x8fb7262dac0362d1, 0x2c41114ce0cdf943}, {0x2ba5cef1ad95a0b1, 0xc09b37a867d54362, 0x26d6cdd201e486c9, 0x20477abf42ff9297}}, {{0x0f121b41bc0a67d2, 0x62d4760a444d248a, 0x0e044f1d659b4737, 0x08fde365250bb4a8}, {0xaceec3da848bf287, 0xc2a62182d3369d6e, 0x3582dfdc92449482, 0x2f7e2fd2565d6cd7}}, {{0x0a0122b5178a876b, 0x51ff96ff085104b4, 0x050b31ab14f29f76, 0x84abb28b5f87d4e6}, {0xd5ed439f8270790a, 0x2d6cb59d85e3f46b, 0x75f55c1b6c1e2212, 0xe5436f6717655640}}, {{0xc2965ecc9aeb596d, 0x01ea03e7023c92b4, 0x4704b4b62e013961, 0x0ca8fd3f905ea367}, {0x92523a42551b2b61, 0x1eb7a89c390fcd06, 0xe7f1d2be0392a63e, 0x96dca2644ddb0c33}}, {{0x231c210e15339848, 0xe87a28e870778c8d, 0x9d1de6616956e170, 0x4ac3c9382bb09c0b}, {0x19be05516998987d, 0x8b2376c4ae09f4d6, 0x1de0b7651a3f933d, 0x380d94c7e39705f4}}, {{0x3685954b8c31c31d, 0x68533d005bf21a0c, 0x0bd7626e75c79ec9, 0xca17754742c69d54}, {0xcc6edafff6d2dbb2, 0xfd0d8cbd174a9d18, 0x875e8793aa4578e8, 0xa976a7139cab2ce6}}, {{0xce37ab11b43ea1db, 0x0a7ff1a95259d292, 0x851b02218f84f186, 0xa7222beadefaad13}, {0xa2ac78ec2b0a9144, 0x5a024051f2fa59c5, 0x91d1eca56147ce38, 0xbe94d523bc2ac690}}, {{0x2d8daefd79ec1a0f, 0x3bbcd6fdceb39c97, 0xf5575ffc58f61a95, 0xdbd986c4adf7b420}, {0x81aa881415f39eb7, 0x6ee2fcf5b98d976c, 0x5465475dcf2f717d, 0x8e24d3c46860bbd0}}}}; #else static const fiat_p256_felem fiat_p256_g_pre_comp[2][15][2] = { {{{0x18a9143c, 0x79e730d4, 0x5fedb601, 0x75ba95fc, 0x77622510, 0x79fb732b, 0xa53755c6, 0x18905f76}, {0xce95560a, 0xddf25357, 0xba19e45c, 0x8b4ab8e4, 0xdd21f325, 0xd2e88688, 0x25885d85, 0x8571ff18}}, {{0x16a0d2bb, 0x4f922fc5, 0x1a623499, 0x0d5cc16c, 0x57c62c8b, 0x9241cf3a, 0xfd1b667f, 0x2f5e6961}, {0xf5a01797, 0x5c15c70b, 0x60956192, 0x3d20b44d, 0x071fdb52, 0x04911b37, 0x8d6f0f7b, 0xf648f916}}, {{0xe137bbbc, 0x9e566847, 0x8a6a0bec, 0xe434469e, 0x79d73463, 0xb1c42761, 0x133d0015, 0x5abe0285}, {0xc04c7dab, 0x92aa837c, 0x43260c07, 0x573d9f4c, 0x78e6cc37, 0x0c931562, 0x6b6f7383, 0x94bb725b}}, {{0xbfe20925, 0x62a8c244, 0x8fdce867, 0x91c19ac3, 0xdd387063, 0x5a96a5d5, 0x21d324f6, 0x61d587d4}, {0xa37173ea, 0xe87673a2, 0x53778b65, 0x23848008, 0x05bab43e, 0x10f8441e, 0x4621efbe, 0xfa11fe12}}, {{0x2cb19ffd, 0x1c891f2b, 0xb1923c23, 0x01ba8d5b, 0x8ac5ca8e, 0xb6d03d67, 0x1f13bedc, 0x586eb04c}, {0x27e8ed09, 0x0c35c6e5, 0x1819ede2, 0x1e81a33c, 0x56c652fa, 0x278fd6c0, 0x70864f11, 0x19d5ac08}}, {{0xd2b533d5, 0x62577734, 0xa1bdddc0, 0x673b8af6, 0xa79ec293, 0x577e7c9a, 0xc3b266b1, 0xbb6de651}, {0xb65259b3, 0xe7e9303a, 0xd03a7480, 0xd6a0afd3, 0x9b3cfc27, 0xc5ac83d1, 0x5d18b99b, 0x60b4619a}}, {{0x1ae5aa1c, 0xbd6a38e1, 0x49e73658, 0xb8b7652b, 0xee5f87ed, 0x0b130014, 0xaeebffcd, 0x9d0f27b2}, {0x7a730a55, 0xca924631, 0xddbbc83a, 0x9c955b2f, 0xac019a71, 0x07c1dfe0, 0x356ec48d, 0x244a566d}}, {{0xf4f8b16a, 0x56f8410e, 0xc47b266a, 0x97241afe, 0x6d9c87c1, 0x0a406b8e, 0xcd42ab1b, 0x803f3e02}, {0x04dbec69, 0x7f0309a8, 0x3bbad05f, 0xa83b85f7, 0xad8e197f, 0xc6097273, 0x5067adc1, 0xc097440e}}, {{0xc379ab34, 0x846a56f2, 0x841df8d1, 0xa8ee068b, 0x176c68ef, 0x20314459, 0x915f1f30, 0xf1af32d5}, {0x5d75bd50, 0x99c37531, 0xf72f67bc, 0x837cffba, 0x48d7723f, 0x0613a418, 0xe2d41c8b, 0x23d0f130}}, {{0xd5be5a2b, 0xed93e225, 0x5934f3c6, 0x6fe79983, 0x22626ffc, 0x43140926, 0x7990216a, 0x50bbb4d9}, {0xe57ec63e, 0x378191c6, 0x181dcdb2, 0x65422c40, 0x0236e0f6, 0x41a8099b, 0x01fe49c3, 0x2b100118}}, {{0x9b391593, 0xfc68b5c5, 0x598270fc, 0xc385f5a2, 0xd19adcbb, 0x7144f3aa, 0x83fbae0c, 0xdd558999}, {0x74b82ff4, 0x93b88b8e, 0x71e734c9, 0xd2e03c40, 0x43c0322a, 0x9a7a9eaf, 0x149d6041, 0xe6e4c551}}, {{0x80ec21fe, 0x5fe14bfe, 0xc255be82, 0xf6ce116a, 0x2f4a5d67, 0x98bc5a07, 0xdb7e63af, 0xfad27148}, {0x29ab05b3, 0x90c0b6ac, 0x4e251ae6, 0x37a9a83c, 0xc2aade7d, 0x0a7dc875, 0x9f0e1a84, 0x77387de3}}, {{0xa56c0dd7, 0x1e9ecc49, 0x46086c74, 0xa5cffcd8, 0xf505aece, 0x8f7a1408, 0xbef0c47e, 0xb37b85c0}, {0xcc0e6a8f, 0x3596b6e4, 0x6b388f23, 0xfd6d4bbf, 0xc39cef4e, 0xaba453fa, 0xf9f628d5, 0x9c135ac8}}, {{0x95c8f8be, 0x0a1c7294, 0x3bf362bf, 0x2961c480, 0xdf63d4ac, 0x9e418403, 0x91ece900, 0xc109f9cb}, {0x58945705, 0xc2d095d0, 0xddeb85c0, 0xb9083d96, 0x7a40449b, 0x84692b8d, 0x2eee1ee1, 0x9bc3344f}}, {{0x42913074, 0x0d5ae356, 0x48a542b1, 0x55491b27, 0xb310732a, 0x469ca665, 0x5f1a4cc1, 0x29591d52}, {0xb84f983f, 0xe76f5b6b, 0x9f5f84e1, 0xbe7eef41, 0x80baa189, 0x1200d496, 0x18ef332c, 0x6376551f}}}, {{{0x4147519a, 0x20288602, 0x26b372f0, 0xd0981eac, 0xa785ebc8, 0xa9d4a7ca, 0xdbdf58e9, 0xd953c50d}, {0xfd590f8f, 0x9d6361cc, 0x44e6c917, 0x72e9626b, 0x22eb64cf, 0x7fd96110, 0x9eb288f3, 0x863ebb7e}}, {{0xb0e63d34, 0x4fe7ee31, 0xa9e54fab, 0xf4600572, 0xd5e7b5a4, 0xc0493334, 0x06d54831, 0x8589fb92}, {0x6583553a, 0xaa70f5cc, 0xe25649e5, 0x0879094a, 0x10044652, 0xcc904507, 0x02541c4f, 0xebb0696d}}, {{0x3b89da99, 0xabbaa0c0, 0xb8284022, 0xa6f2d79e, 0xb81c05e8, 0x27847862, 0x05e54d63, 0x337a4b59}, {0x21f7794a, 0x3c67500d, 0x7d6d7f61, 0x207005b7, 0x04cfd6e8, 0x0a5a3781, 0xf4c2fbd6, 0x0d65e0d5}}, {{0x6d3549cf, 0xd433e50f, 0xfacd665e, 0x6f33696f, 0xce11fcb4, 0x695bfdac, 0xaf7c9860, 0x810ee252}, {0x7159bb2c, 0x65450fe1, 0x758b357b, 0xf7dfbebe, 0xd69fea72, 0x2b057e74, 0x92731745, 0xd485717a}}, {{0xe83f7669, 0xce1f69bb, 0x72877d6b, 0x09f8ae82, 0x3244278d, 0x9548ae54, 0xe3c2c19c, 0x207755de}, {0x6fef1945, 0x87bd61d9, 0xb12d28c3, 0x18813cef, 0x72df64aa, 0x9fbcd1d6, 0x7154b00d, 0x48dc5ee5}}, {{0xf49a3154, 0xef0f469e, 0x6e2b2e9a, 0x3e85a595, 0xaa924a9c, 0x45aaec1e, 0xa09e4719, 0xaa12dfc8}, {0x4df69f1d, 0x26f27227, 0xa2ff5e73, 0xe0e4c82c, 0xb7a9dd44, 0xb9d8ce73, 0xe48ca901, 0x6c036e73}}, {{0xa47153f0, 0xe1e421e1, 0x920418c9, 0xb86c3b79, 0x705d7672, 0x93bdce87, 0xcab79a77, 0xf25ae793}, {0x6d869d0c, 0x1f3194a3, 0x4986c264, 0x9d55c882, 0x096e945e, 0x49fb5ea3, 0x13db0a3e, 0x39b8e653}}, {{0x35d0b34a, 0xe3417bc0, 0x8327c0a7, 0x440b386b, 0xac0362d1, 0x8fb7262d, 0xe0cdf943, 0x2c41114c}, {0xad95a0b1, 0x2ba5cef1, 0x67d54362, 0xc09b37a8, 0x01e486c9, 0x26d6cdd2, 0x42ff9297, 0x20477abf}}, {{0xbc0a67d2, 0x0f121b41, 0x444d248a, 0x62d4760a, 0x659b4737, 0x0e044f1d, 0x250bb4a8, 0x08fde365}, {0x848bf287, 0xaceec3da, 0xd3369d6e, 0xc2a62182, 0x92449482, 0x3582dfdc, 0x565d6cd7, 0x2f7e2fd2}}, {{0x178a876b, 0x0a0122b5, 0x085104b4, 0x51ff96ff, 0x14f29f76, 0x050b31ab, 0x5f87d4e6, 0x84abb28b}, {0x8270790a, 0xd5ed439f, 0x85e3f46b, 0x2d6cb59d, 0x6c1e2212, 0x75f55c1b, 0x17655640, 0xe5436f67}}, {{0x9aeb596d, 0xc2965ecc, 0x023c92b4, 0x01ea03e7, 0x2e013961, 0x4704b4b6, 0x905ea367, 0x0ca8fd3f}, {0x551b2b61, 0x92523a42, 0x390fcd06, 0x1eb7a89c, 0x0392a63e, 0xe7f1d2be, 0x4ddb0c33, 0x96dca264}}, {{0x15339848, 0x231c210e, 0x70778c8d, 0xe87a28e8, 0x6956e170, 0x9d1de661, 0x2bb09c0b, 0x4ac3c938}, {0x6998987d, 0x19be0551, 0xae09f4d6, 0x8b2376c4, 0x1a3f933d, 0x1de0b765, 0xe39705f4, 0x380d94c7}}, {{0x8c31c31d, 0x3685954b, 0x5bf21a0c, 0x68533d00, 0x75c79ec9, 0x0bd7626e, 0x42c69d54, 0xca177547}, {0xf6d2dbb2, 0xcc6edaff, 0x174a9d18, 0xfd0d8cbd, 0xaa4578e8, 0x875e8793, 0x9cab2ce6, 0xa976a713}}, {{0xb43ea1db, 0xce37ab11, 0x5259d292, 0x0a7ff1a9, 0x8f84f186, 0x851b0221, 0xdefaad13, 0xa7222bea}, {0x2b0a9144, 0xa2ac78ec, 0xf2fa59c5, 0x5a024051, 0x6147ce38, 0x91d1eca5, 0xbc2ac690, 0xbe94d523}}, {{0x79ec1a0f, 0x2d8daefd, 0xceb39c97, 0x3bbcd6fd, 0x58f61a95, 0xf5575ffc, 0xadf7b420, 0xdbd986c4}, {0x15f39eb7, 0x81aa8814, 0xb98d976c, 0x6ee2fcf5, 0xcf2f717d, 0x5465475d, 0x6860bbd0, 0x8e24d3c4}}}}; #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/scalar.cc.inc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "internal.h" int ec_bignum_to_scalar(const EC_GROUP *group, EC_SCALAR *out, const BIGNUM *in) { // Scalars, which are often secret, must be reduced modulo the order. Those // that are not will be discarded, so leaking the result of the comparison is // safe. if (!bn_copy_words(out->words, group->order.N.width, in) || !constant_time_declassify_int(bn_less_than_words( out->words, group->order.N.d, group->order.N.width))) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); return 0; } return 1; } int ec_scalar_equal_vartime(const EC_GROUP *group, const EC_SCALAR *a, const EC_SCALAR *b) { return OPENSSL_memcmp(a->words, b->words, group->order.N.width * sizeof(BN_ULONG)) == 0; } int ec_scalar_is_zero(const EC_GROUP *group, const EC_SCALAR *a) { BN_ULONG mask = 0; for (int i = 0; i < group->order.N.width; i++) { mask |= a->words[i]; } return mask == 0; } int ec_random_scalar(const EC_GROUP *group, EC_SCALAR *out, const uint8_t additional_data[32]) { return bn_rand_range_words(out->words, 0, group->order.N.d, group->order.N.width, additional_data); } int ec_random_nonzero_scalar(const EC_GROUP *group, EC_SCALAR *out, const uint8_t additional_data[32]) { return bn_rand_range_words(out->words, 1, group->order.N.d, group->order.N.width, additional_data); } void ec_scalar_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_SCALAR *in) { size_t len = BN_num_bytes(&group->order.N); bn_words_to_big_endian(out, len, in->words, group->order.N.width); *out_len = len; } int ec_scalar_from_bytes(const EC_GROUP *group, EC_SCALAR *out, const uint8_t *in, size_t len) { if (len != BN_num_bytes(&group->order.N)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); return 0; } bn_big_endian_to_words(out->words, group->order.N.width, in, len); if (!bn_less_than_words(out->words, group->order.N.d, group->order.N.width)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); return 0; } return 1; } void ec_scalar_reduce(const EC_GROUP *group, EC_SCALAR *out, const BN_ULONG *words, size_t num) { // Convert "from" Montgomery form so the value is reduced modulo the order. bn_from_montgomery_small(out->words, group->order.N.width, words, num, &group->order); // Convert "to" Montgomery form to remove the R^-1 factor added. ec_scalar_to_montgomery(group, out, out); } void ec_scalar_add(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b) { const BIGNUM *order = &group->order.N; BN_ULONG tmp[EC_MAX_WORDS]; bn_mod_add_words(r->words, a->words, b->words, order->d, tmp, order->width); OPENSSL_cleanse(tmp, sizeof(tmp)); } void ec_scalar_sub(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b) { const BIGNUM *order = &group->order.N; BN_ULONG tmp[EC_MAX_WORDS]; bn_mod_sub_words(r->words, a->words, b->words, order->d, tmp, order->width); OPENSSL_cleanse(tmp, sizeof(tmp)); } void ec_scalar_neg(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { EC_SCALAR zero; OPENSSL_memset(&zero, 0, sizeof(EC_SCALAR)); ec_scalar_sub(group, r, &zero, a); } void ec_scalar_select(const EC_GROUP *group, EC_SCALAR *out, BN_ULONG mask, const EC_SCALAR *a, const EC_SCALAR *b) { const BIGNUM *order = &group->order.N; bn_select_words(out->words, mask, a->words, b->words, order->width); } void ec_scalar_to_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { const BIGNUM *order = &group->order.N; bn_to_montgomery_small(r->words, a->words, order->width, &group->order); } void ec_scalar_from_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { const BIGNUM *order = &group->order.N; bn_from_montgomery_small(r->words, order->width, a->words, order->width, &group->order); } void ec_scalar_mul_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a, const EC_SCALAR *b) { const BIGNUM *order = &group->order.N; bn_mod_mul_montgomery_small(r->words, a->words, b->words, order->width, &group->order); } void ec_simple_scalar_inv0_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { const BIGNUM *order = &group->order.N; bn_mod_inverse0_prime_mont_small(r->words, a->words, order->width, &group->order); } int ec_simple_scalar_to_montgomery_inv_vartime(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { if (ec_scalar_is_zero(group, a)) { return 0; } // This implementation (in fact) runs in constant time, // even though for this interface it is not mandatory. // r = a^-1 in the Montgomery domain. This is // |ec_scalar_to_montgomery| followed by |ec_scalar_inv0_montgomery|, but // |ec_scalar_inv0_montgomery| followed by |ec_scalar_from_montgomery| is // equivalent and slightly more efficient. ec_scalar_inv0_montgomery(group, r, a); ec_scalar_from_montgomery(group, r, r); return 1; } void ec_scalar_inv0_montgomery(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { group->meth->scalar_inv0_montgomery(group, r, a); } int ec_scalar_to_montgomery_inv_vartime(const EC_GROUP *group, EC_SCALAR *r, const EC_SCALAR *a) { return group->meth->scalar_to_montgomery_inv_vartime(group, r, a); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/simple.cc.inc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" #include "../../internal.h" // Most method functions in this file are designed to work with non-trivial // representations of field elements if necessary (see ecp_mont.c): while // standard modular addition and subtraction are used, the field_mul and // field_sqr methods will be used for multiplication, and field_encode and // field_decode (if defined) will be used for converting between // representations. // // Functions here specifically assume that if a non-trivial representation is // used, it is a Montgomery representation (i.e. 'encoding' means multiplying // by some factor R). int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { // p must be a prime > 3 if (BN_num_bits(p) <= 2 || !BN_is_odd(p)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); return 0; } int ret = 0; BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } if (!BN_MONT_CTX_set(&group->field, p, ctx) || !ec_bignum_to_felem(group, &group->a, a) || !ec_bignum_to_felem(group, &group->b, b) || // Reuse Z from the generator to cache the value one. !ec_bignum_to_felem(group, &group->generator.raw.Z, BN_value_one())) { goto err; } // group->a_is_minus3 if (!BN_copy(tmp, a) || !BN_add_word(tmp, 3)) { goto err; } group->a_is_minus3 = (0 == BN_cmp(tmp, &group->field.N)); ret = 1; err: BN_CTX_end(ctx); return ret; } int ec_GFp_simple_group_get_curve(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b) { if ((p != NULL && !BN_copy(p, &group->field.N)) || (a != NULL && !ec_felem_to_bignum(group, a, &group->a)) || (b != NULL && !ec_felem_to_bignum(group, b, &group->b))) { return 0; } return 1; } void ec_GFp_simple_point_init(EC_JACOBIAN *point) { OPENSSL_memset(&point->X, 0, sizeof(EC_FELEM)); OPENSSL_memset(&point->Y, 0, sizeof(EC_FELEM)); OPENSSL_memset(&point->Z, 0, sizeof(EC_FELEM)); } void ec_GFp_simple_point_copy(EC_JACOBIAN *dest, const EC_JACOBIAN *src) { OPENSSL_memcpy(&dest->X, &src->X, sizeof(EC_FELEM)); OPENSSL_memcpy(&dest->Y, &src->Y, sizeof(EC_FELEM)); OPENSSL_memcpy(&dest->Z, &src->Z, sizeof(EC_FELEM)); } void ec_GFp_simple_point_set_to_infinity(const EC_GROUP *group, EC_JACOBIAN *point) { // Although it is strictly only necessary to zero Z, we zero the entire point // in case |point| was stack-allocated and yet to be initialized. ec_GFp_simple_point_init(point); } void ec_GFp_simple_invert(const EC_GROUP *group, EC_JACOBIAN *point) { ec_felem_neg(group, &point->Y, &point->Y); } int ec_GFp_simple_is_at_infinity(const EC_GROUP *group, const EC_JACOBIAN *point) { return ec_felem_non_zero_mask(group, &point->Z) == 0; } int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_JACOBIAN *point) { // We have a curve defined by a Weierstrass equation // y^2 = x^3 + a*x + b. // The point to consider is given in Jacobian projective coordinates // where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3). // Substituting this and multiplying by Z^6 transforms the above equation // into // Y^2 = X^3 + a*X*Z^4 + b*Z^6. // To test this, we add up the right-hand side in 'rh'. // // This function may be used when double-checking the secret result of a point // multiplication, so we proceed in constant-time. void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; // rh := X^2 EC_FELEM rh; felem_sqr(group, &rh, &point->X); EC_FELEM tmp, Z4, Z6; felem_sqr(group, &tmp, &point->Z); felem_sqr(group, &Z4, &tmp); felem_mul(group, &Z6, &Z4, &tmp); // rh := rh + a*Z^4 if (group->a_is_minus3) { ec_felem_add(group, &tmp, &Z4, &Z4); ec_felem_add(group, &tmp, &tmp, &Z4); ec_felem_sub(group, &rh, &rh, &tmp); } else { felem_mul(group, &tmp, &Z4, &group->a); ec_felem_add(group, &rh, &rh, &tmp); } // rh := (rh + a*Z^4)*X felem_mul(group, &rh, &rh, &point->X); // rh := rh + b*Z^6 felem_mul(group, &tmp, &group->b, &Z6); ec_felem_add(group, &rh, &rh, &tmp); // 'lh' := Y^2 felem_sqr(group, &tmp, &point->Y); ec_felem_sub(group, &tmp, &tmp, &rh); BN_ULONG not_equal = ec_felem_non_zero_mask(group, &tmp); // If Z = 0, the point is infinity, which is always on the curve. BN_ULONG not_infinity = ec_felem_non_zero_mask(group, &point->Z); return 1 & ~(not_infinity & not_equal); } int ec_GFp_simple_points_equal(const EC_GROUP *group, const EC_JACOBIAN *a, const EC_JACOBIAN *b) { // This function is implemented in constant-time for two reasons. First, // although EC points are usually public, their Jacobian Z coordinates may be // secret, or at least are not obviously public. Second, more complex // protocols will sometimes manipulate secret points. // // This does mean that we pay a 6M+2S Jacobian comparison when comparing two // publicly affine points costs no field operations at all. If needed, we can // restore this optimization by keeping better track of affine vs. Jacobian // forms. See https://crbug.com/boringssl/326. // If neither |a| or |b| is infinity, we have to decide whether // (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3), // or equivalently, whether // (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3). void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; EC_FELEM tmp1, tmp2, Za23, Zb23; felem_sqr(group, &Zb23, &b->Z); // Zb23 = Z_b^2 felem_mul(group, &tmp1, &a->X, &Zb23); // tmp1 = X_a * Z_b^2 felem_sqr(group, &Za23, &a->Z); // Za23 = Z_a^2 felem_mul(group, &tmp2, &b->X, &Za23); // tmp2 = X_b * Z_a^2 ec_felem_sub(group, &tmp1, &tmp1, &tmp2); const BN_ULONG x_not_equal = ec_felem_non_zero_mask(group, &tmp1); felem_mul(group, &Zb23, &Zb23, &b->Z); // Zb23 = Z_b^3 felem_mul(group, &tmp1, &a->Y, &Zb23); // tmp1 = Y_a * Z_b^3 felem_mul(group, &Za23, &Za23, &a->Z); // Za23 = Z_a^3 felem_mul(group, &tmp2, &b->Y, &Za23); // tmp2 = Y_b * Z_a^3 ec_felem_sub(group, &tmp1, &tmp1, &tmp2); const BN_ULONG y_not_equal = ec_felem_non_zero_mask(group, &tmp1); const BN_ULONG x_and_y_equal = ~(x_not_equal | y_not_equal); const BN_ULONG a_not_infinity = ec_felem_non_zero_mask(group, &a->Z); const BN_ULONG b_not_infinity = ec_felem_non_zero_mask(group, &b->Z); const BN_ULONG a_and_b_infinity = ~(a_not_infinity | b_not_infinity); const BN_ULONG equal = a_and_b_infinity | (a_not_infinity & b_not_infinity & x_and_y_equal); return equal & 1; } int ec_affine_jacobian_equal(const EC_GROUP *group, const EC_AFFINE *a, const EC_JACOBIAN *b) { // If |b| is not infinity, we have to decide whether // (X_a, Y_a) = (X_b/Z_b^2, Y_b/Z_b^3), // or equivalently, whether // (X_a*Z_b^2, Y_a*Z_b^3) = (X_b, Y_b). void (*const felem_mul)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a, const EC_FELEM *b) = group->meth->felem_mul; void (*const felem_sqr)(const EC_GROUP *, EC_FELEM *r, const EC_FELEM *a) = group->meth->felem_sqr; EC_FELEM tmp, Zb2; felem_sqr(group, &Zb2, &b->Z); // Zb2 = Z_b^2 felem_mul(group, &tmp, &a->X, &Zb2); // tmp = X_a * Z_b^2 ec_felem_sub(group, &tmp, &tmp, &b->X); const BN_ULONG x_not_equal = ec_felem_non_zero_mask(group, &tmp); felem_mul(group, &tmp, &a->Y, &Zb2); // tmp = Y_a * Z_b^2 felem_mul(group, &tmp, &tmp, &b->Z); // tmp = Y_a * Z_b^3 ec_felem_sub(group, &tmp, &tmp, &b->Y); const BN_ULONG y_not_equal = ec_felem_non_zero_mask(group, &tmp); const BN_ULONG x_and_y_equal = ~(x_not_equal | y_not_equal); const BN_ULONG b_not_infinity = ec_felem_non_zero_mask(group, &b->Z); const BN_ULONG equal = b_not_infinity & x_and_y_equal; return equal & 1; } int ec_GFp_simple_cmp_x_coordinate(const EC_GROUP *group, const EC_JACOBIAN *p, const EC_SCALAR *r) { if (ec_GFp_simple_is_at_infinity(group, p)) { // |ec_get_x_coordinate_as_scalar| will check this internally, but this way // we do not push to the error queue. return 0; } EC_SCALAR x; return ec_get_x_coordinate_as_scalar(group, &x, p) && ec_scalar_equal_vartime(group, &x, r); } void ec_GFp_simple_felem_to_bytes(const EC_GROUP *group, uint8_t *out, size_t *out_len, const EC_FELEM *in) { size_t len = BN_num_bytes(&group->field.N); bn_words_to_big_endian(out, len, in->words, group->field.N.width); *out_len = len; } int ec_GFp_simple_felem_from_bytes(const EC_GROUP *group, EC_FELEM *out, const uint8_t *in, size_t len) { if (len != BN_num_bytes(&group->field.N)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return 0; } bn_big_endian_to_words(out->words, group->field.N.width, in, len); if (!bn_less_than_words(out->words, group->field.N.d, group->field.N.width)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/simple_mul.cc.inc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "internal.h" #include "../bn/internal.h" #include "../../internal.h" void ec_GFp_mont_mul(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p, const EC_SCALAR *scalar) { // This is a generic implementation for uncommon curves that not do not // warrant a tuned one. It uses unsigned digits so that the doubling case in // |ec_GFp_mont_add| is always unreachable, erring on safety and simplicity. // Compute a table of the first 32 multiples of |p| (including infinity). EC_JACOBIAN precomp[32]; ec_GFp_simple_point_set_to_infinity(group, &precomp[0]); ec_GFp_simple_point_copy(&precomp[1], p); for (size_t j = 2; j < OPENSSL_ARRAY_SIZE(precomp); j++) { if (j & 1) { ec_GFp_mont_add(group, &precomp[j], &precomp[1], &precomp[j - 1]); } else { ec_GFp_mont_dbl(group, &precomp[j], &precomp[j / 2]); } } // Divide bits in |scalar| into windows. unsigned bits = EC_GROUP_order_bits(group); int r_is_at_infinity = 1; for (unsigned i = bits - 1; i < bits; i--) { if (!r_is_at_infinity) { ec_GFp_mont_dbl(group, r, r); } if (i % 5 == 0) { // Compute the next window value. const size_t width = group->order.N.width; uint8_t window = bn_is_bit_set_words(scalar->words, width, i + 4) << 4; window |= bn_is_bit_set_words(scalar->words, width, i + 3) << 3; window |= bn_is_bit_set_words(scalar->words, width, i + 2) << 2; window |= bn_is_bit_set_words(scalar->words, width, i + 1) << 1; window |= bn_is_bit_set_words(scalar->words, width, i); // Select the entry in constant-time. EC_JACOBIAN tmp; OPENSSL_memset(&tmp, 0, sizeof(EC_JACOBIAN)); for (size_t j = 0; j < OPENSSL_ARRAY_SIZE(precomp); j++) { BN_ULONG mask = constant_time_eq_w(j, window); ec_point_select(group, &tmp, mask, &precomp[j], &tmp); } if (r_is_at_infinity) { ec_GFp_simple_point_copy(r, &tmp); r_is_at_infinity = 0; } else { ec_GFp_mont_add(group, r, r, &tmp); } } } if (r_is_at_infinity) { ec_GFp_simple_point_set_to_infinity(group, r); } } void ec_GFp_mont_mul_base(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *scalar) { ec_GFp_mont_mul(group, r, &group->generator.raw, scalar); } static void ec_GFp_mont_batch_precomp(const EC_GROUP *group, EC_JACOBIAN *out, size_t num, const EC_JACOBIAN *p) { assert(num > 1); ec_GFp_simple_point_set_to_infinity(group, &out[0]); ec_GFp_simple_point_copy(&out[1], p); for (size_t j = 2; j < num; j++) { if (j & 1) { ec_GFp_mont_add(group, &out[j], &out[1], &out[j - 1]); } else { ec_GFp_mont_dbl(group, &out[j], &out[j / 2]); } } } static void ec_GFp_mont_batch_get_window(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN precomp[17], const EC_SCALAR *scalar, unsigned i) { const size_t width = group->order.N.width; uint8_t window = bn_is_bit_set_words(scalar->words, width, i + 4) << 5; window |= bn_is_bit_set_words(scalar->words, width, i + 3) << 4; window |= bn_is_bit_set_words(scalar->words, width, i + 2) << 3; window |= bn_is_bit_set_words(scalar->words, width, i + 1) << 2; window |= bn_is_bit_set_words(scalar->words, width, i) << 1; if (i > 0) { window |= bn_is_bit_set_words(scalar->words, width, i - 1); } crypto_word_t sign, digit; ec_GFp_nistp_recode_scalar_bits(&sign, &digit, window); // Select the entry in constant-time. OPENSSL_memset(out, 0, sizeof(EC_JACOBIAN)); for (size_t j = 0; j < 17; j++) { BN_ULONG mask = constant_time_eq_w(j, digit); ec_point_select(group, out, mask, &precomp[j], out); } // Negate if necessary. EC_FELEM neg_Y; ec_felem_neg(group, &neg_Y, &out->Y); crypto_word_t sign_mask = sign; sign_mask = 0u - sign_mask; ec_felem_select(group, &out->Y, sign_mask, &neg_Y, &out->Y); } void ec_GFp_mont_mul_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2) { EC_JACOBIAN precomp[3][17]; ec_GFp_mont_batch_precomp(group, precomp[0], 17, p0); ec_GFp_mont_batch_precomp(group, precomp[1], 17, p1); if (p2 != NULL) { ec_GFp_mont_batch_precomp(group, precomp[2], 17, p2); } // Divide bits in |scalar| into windows. unsigned bits = EC_GROUP_order_bits(group); int r_is_at_infinity = 1; for (unsigned i = bits; i <= bits; i--) { if (!r_is_at_infinity) { ec_GFp_mont_dbl(group, r, r); } if (i % 5 == 0) { EC_JACOBIAN tmp; ec_GFp_mont_batch_get_window(group, &tmp, precomp[0], scalar0, i); if (r_is_at_infinity) { ec_GFp_simple_point_copy(r, &tmp); r_is_at_infinity = 0; } else { ec_GFp_mont_add(group, r, r, &tmp); } ec_GFp_mont_batch_get_window(group, &tmp, precomp[1], scalar1, i); ec_GFp_mont_add(group, r, r, &tmp); if (p2 != NULL) { ec_GFp_mont_batch_get_window(group, &tmp, precomp[2], scalar2, i); ec_GFp_mont_add(group, r, r, &tmp); } } } if (r_is_at_infinity) { ec_GFp_simple_point_set_to_infinity(group, r); } } static unsigned ec_GFp_mont_comb_stride(const EC_GROUP *group) { return (EC_GROUP_get_degree(group) + EC_MONT_PRECOMP_COMB_SIZE - 1) / EC_MONT_PRECOMP_COMB_SIZE; } int ec_GFp_mont_init_precomp(const EC_GROUP *group, EC_PRECOMP *out, const EC_JACOBIAN *p) { // comb[i - 1] stores the ith element of the comb. That is, if i is // b4 * 2^4 + b3 * 2^3 + ... + b0 * 2^0, it stores k * |p|, where k is // b4 * 2^(4*stride) + b3 * 2^(3*stride) + ... + b0 * 2^(0*stride). stride // here is |ec_GFp_mont_comb_stride|. We store at index i - 1 because the 0th // comb entry is always infinity. EC_JACOBIAN comb[(1 << EC_MONT_PRECOMP_COMB_SIZE) - 1]; unsigned stride = ec_GFp_mont_comb_stride(group); // We compute the comb sequentially by the highest set bit. Initially, all // entries up to 2^0 are filled. comb[(1 << 0) - 1] = *p; for (unsigned i = 1; i < EC_MONT_PRECOMP_COMB_SIZE; i++) { // Compute entry 2^i by doubling the entry for 2^(i-1) |stride| times. unsigned bit = 1 << i; ec_GFp_mont_dbl(group, &comb[bit - 1], &comb[bit / 2 - 1]); for (unsigned j = 1; j < stride; j++) { ec_GFp_mont_dbl(group, &comb[bit - 1], &comb[bit - 1]); } // Compute entries from 2^i + 1 to 2^i + (2^i - 1) by adding entry 2^i to // a previous entry. for (unsigned j = 1; j < bit; j++) { ec_GFp_mont_add(group, &comb[bit + j - 1], &comb[bit - 1], &comb[j - 1]); } } // Store the comb in affine coordinates to shrink the table. (This reduces // cache pressure and makes the constant-time selects faster.) static_assert(OPENSSL_ARRAY_SIZE(comb) == OPENSSL_ARRAY_SIZE(out->comb), "comb sizes did not match"); return ec_jacobian_to_affine_batch(group, out->comb, comb, OPENSSL_ARRAY_SIZE(comb)); } static void ec_GFp_mont_get_comb_window(const EC_GROUP *group, EC_JACOBIAN *out, const EC_PRECOMP *precomp, const EC_SCALAR *scalar, unsigned i) { const size_t width = group->order.N.width; unsigned stride = ec_GFp_mont_comb_stride(group); // Select the bits corresponding to the comb shifted up by |i|. unsigned window = 0; for (unsigned j = 0; j < EC_MONT_PRECOMP_COMB_SIZE; j++) { window |= bn_is_bit_set_words(scalar->words, width, j * stride + i) << j; } // Select precomp->comb[window - 1]. If |window| is zero, |match| will always // be zero, which will leave |out| at infinity. OPENSSL_memset(out, 0, sizeof(EC_JACOBIAN)); for (unsigned j = 0; j < OPENSSL_ARRAY_SIZE(precomp->comb); j++) { BN_ULONG match = constant_time_eq_w(window, j + 1); ec_felem_select(group, &out->X, match, &precomp->comb[j].X, &out->X); ec_felem_select(group, &out->Y, match, &precomp->comb[j].Y, &out->Y); } BN_ULONG is_infinity = constant_time_is_zero_w(window); ec_felem_select(group, &out->Z, is_infinity, &out->Z, ec_felem_one(group)); } void ec_GFp_mont_mul_precomp(const EC_GROUP *group, EC_JACOBIAN *r, const EC_PRECOMP *p0, const EC_SCALAR *scalar0, const EC_PRECOMP *p1, const EC_SCALAR *scalar1, const EC_PRECOMP *p2, const EC_SCALAR *scalar2) { unsigned stride = ec_GFp_mont_comb_stride(group); int r_is_at_infinity = 1; for (unsigned i = stride - 1; i < stride; i--) { if (!r_is_at_infinity) { ec_GFp_mont_dbl(group, r, r); } EC_JACOBIAN tmp; ec_GFp_mont_get_comb_window(group, &tmp, p0, scalar0, i); if (r_is_at_infinity) { ec_GFp_simple_point_copy(r, &tmp); r_is_at_infinity = 0; } else { ec_GFp_mont_add(group, r, r, &tmp); } if (p1 != NULL) { ec_GFp_mont_get_comb_window(group, &tmp, p1, scalar1, i); ec_GFp_mont_add(group, r, r, &tmp); } if (p2 != NULL) { ec_GFp_mont_get_comb_window(group, &tmp, p2, scalar2, i); ec_GFp_mont_add(group, r, r, &tmp); } } if (r_is_at_infinity) { ec_GFp_simple_point_set_to_infinity(group, r); } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/util.cc.inc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "internal.h" // This function looks at 5+1 scalar bits (5 current, 1 adjacent less // significant bit), and recodes them into a signed digit for use in fast point // multiplication: the use of signed rather than unsigned digits means that // fewer points need to be precomputed, given that point inversion is easy (a // precomputed point dP makes -dP available as well). // // BACKGROUND: // // Signed digits for multiplication were introduced by Booth ("A signed binary // multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, // pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. // Booth's original encoding did not generally improve the density of nonzero // digits over the binary representation, and was merely meant to simplify the // handling of signed factors given in two's complement; but it has since been // shown to be the basis of various signed-digit representations that do have // further advantages, including the wNAF, using the following general // approach: // // (1) Given a binary representation // // b_k ... b_2 b_1 b_0, // // of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 // by using bit-wise subtraction as follows: // // b_k b_(k-1) ... b_2 b_1 b_0 // - b_k ... b_3 b_2 b_1 b_0 // ----------------------------------------- // s_(k+1) s_k ... s_3 s_2 s_1 s_0 // // A left-shift followed by subtraction of the original value yields a new // representation of the same value, using signed bits s_i = b_(i-1) - b_i. // This representation from Booth's paper has since appeared in the // literature under a variety of different names including "reversed binary // form", "alternating greedy expansion", "mutual opposite form", and // "sign-alternating {+-1}-representation". // // An interesting property is that among the nonzero bits, values 1 and -1 // strictly alternate. // // (2) Various window schemes can be applied to the Booth representation of // integers: for example, right-to-left sliding windows yield the wNAF // (a signed-digit encoding independently discovered by various researchers // in the 1990s), and left-to-right sliding windows yield a left-to-right // equivalent of the wNAF (independently discovered by various researchers // around 2004). // // To prevent leaking information through side channels in point multiplication, // we need to recode the given integer into a regular pattern: sliding windows // as in wNAFs won't do, we need their fixed-window equivalent -- which is a few // decades older: we'll be using the so-called "modified Booth encoding" due to // MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 // (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five // signed bits into a signed digit: // // s_(5j + 4) s_(5j + 3) s_(5j + 2) s_(5j + 1) s_(5j) // // The sign-alternating property implies that the resulting digit values are // integers from -16 to 16. // // Of course, we don't actually need to compute the signed digits s_i as an // intermediate step (that's just a nice way to see how this scheme relates // to the wNAF): a direct computation obtains the recoded digit from the // six bits b_(5j + 4) ... b_(5j - 1). // // This function takes those six bits as an integer (0 .. 63), writing the // recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute // value, in the range 0 .. 16). Note that this integer essentially provides // the input bits "shifted to the left" by one position: for example, the input // to compute the least significant recoded digit, given that there's no bit // b_-1, has to be b_4 b_3 b_2 b_1 b_0 0. // // DOUBLING CASE: // // Point addition formulas for short Weierstrass curves are often incomplete. // Edge cases such as P + P or P + ∞ must be handled separately. This // complicates constant-time requirements. P + ∞ cannot be avoided (any window // may be zero) and is handled with constant-time selects. P + P (where P is not // ∞) usually is not. Instead, windowing strategies are chosen to avoid this // case. Whether this happens depends on the group order. // // Let w be the window width (in this function, w = 5). The non-trivial doubling // case in single-point scalar multiplication may occur if and only if the // 2^(w-1) bit of the group order is zero. // // Note the above only holds if the scalar is fully reduced and the group order // is a prime that is much larger than 2^w. It also only holds when windows // are applied from most significant to least significant, doubling between each // window. It does not apply to more complex table strategies such as // |EC_GFp_nistz256_method|. // // PROOF: // // Let n be the group order. Let l be the number of bits needed to represent n. // Assume there exists some 0 <= k < n such that signed w-bit windowed // multiplication hits the doubling case. // // Windowed multiplication consists of iterating over groups of s_i (defined // above based on k's binary representation) from most to least significant. At // iteration i (for i = ..., 3w, 2w, w, 0, starting from the most significant // window), we: // // 1. Double the accumulator A, w times. Let A_i be the value of A at this // point. // // 2. Set A to T_i + A_i, where T_i is a precomputed multiple of P // corresponding to the window s_(i+w-1) ... s_i. // // Let j be the index such that A_j = T_j ≠ ∞. Looking at A_i and T_i as // multiples of P, define a_i and t_i to be scalar coefficients of A_i and T_i. // Thus a_j = t_j ≠ 0 (mod n). Note a_i and t_i may not be reduced mod n. t_i is // the value of the w signed bits s_(i+w-1) ... s_i. a_i is computed as a_i = // 2^w * (a_(i+w) + t_(i+w)). // // t_i is bounded by -2^(w-1) <= t_i <= 2^(w-1). Additionally, we may write it // in terms of unsigned bits b_i. t_i consists of signed bits s_(i+w-1) ... s_i. // This is computed as: // // b_(i+w-2) b_(i+w-3) ... b_i b_(i-1) // - b_(i+w-1) b_(i+w-2) ... b_(i+1) b_i // -------------------------------------------- // t_i = s_(i+w-1) s_(i+w-2) ... s_(i+1) s_i // // Observe that b_(i+w-2) through b_i occur in both terms. Let x be the integer // represented by that bit string, i.e. 2^(w-2)*b_(i+w-2) + ... + b_i. // // t_i = (2*x + b_(i-1)) - (2^(w-1)*b_(i+w-1) + x) // = x - 2^(w-1)*b_(i+w-1) + b_(i-1) // // Or, using C notation for bit operations: // // t_i = (k>>i) & ((1<<(w-1)) - 1) - (k>>i) & (1<<(w-1)) + (k>>(i-1)) & 1 // // Note b_(i-1) is added in left-shifted by one (or doubled) from its place. // This is compensated by t_(i-w)'s subtraction term. Thus, a_i may be computed // by adding b_l b_(l-1) ... b_(i+1) b_i and an extra copy of b_(i-1). In C // notation, this is: // // a_i = (k>>(i+w)) << w + ((k>>(i+w-1)) & 1) << w // // Observe that, while t_i may be positive or negative, a_i is bounded by // 0 <= a_i < n + 2^w. Additionally, a_i can only be zero if b_(i+w-1) and up // are all zero. (Note this implies a non-trivial P + (-P) is unreachable for // all groups. That would imply the subsequent a_i is zero, which means all // terms thus far were zero.) // // Returning to our doubling position, we have a_j = t_j (mod n). We now // determine the value of a_j - t_j, which must be divisible by n. Our bounds on // a_j and t_j imply a_j - t_j is 0 or n. If it is 0, a_j = t_j. However, 2^w // divides a_j and -2^(w-1) <= t_j <= 2^(w-1), so this can only happen if // a_j = t_j = 0, which is a trivial doubling. Therefore, a_j - t_j = n. // // Now we determine j. Suppose j > 0. w divides j, so j >= w. Then, // // n = a_j - t_j = (k>>(j+w)) << w + ((k>>(j+w-1)) & 1) << w - t_j // <= k/2^j + 2^w - t_j // < n/2^w + 2^w + 2^(w-1) // // n is much larger than 2^w, so this is impossible. Thus, j = 0: only the final // addition may hit the doubling case. // // Finally, we consider bit patterns for n and k. Divide k into k_H + k_M + k_L // such that k_H is the contribution from b_(l-1) .. b_w, k_M is the // contribution from b_(w-1), and k_L is the contribution from b_(w-2) ... b_0. // That is: // // - 2^w divides k_H // - k_M is 0 or 2^(w-1) // - 0 <= k_L < 2^(w-1) // // Divide n into n_H + n_M + n_L similarly. We thus have: // // t_0 = (k>>0) & ((1<<(w-1)) - 1) - (k>>0) & (1<<(w-1)) + (k>>(0-1)) & 1 // = k & ((1<<(w-1)) - 1) - k & (1<<(w-1)) // = k_L - k_M // // a_0 = (k>>(0+w)) << w + ((k>>(0+w-1)) & 1) << w // = (k>>w) << w + ((k>>(w-1)) & 1) << w // = k_H + 2*k_M // // n = a_0 - t_0 // n_H + n_M + n_L = (k_H + 2*k_M) - (k_L - k_M) // = k_H + 3*k_M - k_L // // k_H - k_L < k and k < n, so k_H - k_L ≠ n. Therefore k_M is not 0 and must be // 2^(w-1). Now we consider k_H and n_H. We know k_H <= n_H. Suppose k_H = n_H. // Then, // // n_M + n_L = 3*(2^(w-1)) - k_L // > 3*(2^(w-1)) - 2^(w-1) // = 2^w // // Contradiction (n_M + n_L is the bottom w bits of n). Thus k_H < n_H. Suppose // k_H < n_H - 2*2^w. Then, // // n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L // < n_H - 2*2^w + 3*(2^(w-1)) - k_L // n_M + n_L < -2^(w-1) - k_L // // Contradiction. Thus, k_H = n_H - 2^w. (Note 2^w divides n_H and k_H.) Thus, // // n_H + n_M + n_L = k_H + 3*(2^(w-1)) - k_L // = n_H - 2^w + 3*(2^(w-1)) - k_L // n_M + n_L = 2^(w-1) - k_L // <= 2^(w-1) // // Equality would mean 2^(w-1) divides n, which is impossible if n is prime. // Thus n_M + n_L < 2^(w-1), so n_M is zero, proving our condition. // // This proof constructs k, so, to show the converse, let k_H = n_H - 2^w, // k_M = 2^(w-1), k_L = 2^(w-1) - n_L. This will result in a non-trivial point // doubling in the final addition and is the only such scalar. // // COMMON CURVES: // // The group orders for common curves end in the following bit patterns: // // P-521: ...00001001; w = 4 is okay // P-384: ...01110011; w = 2, 5, 6, 7 are okay // P-256: ...01010001; w = 5, 7 are okay // P-224: ...00111101; w = 3, 4, 5, 6 are okay void ec_GFp_nistp_recode_scalar_bits(crypto_word_t *sign, crypto_word_t *digit, crypto_word_t in) { crypto_word_t s, d; s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as * 6-bit value */ d = (1 << 6) - in - 1; d = (d & s) | (in & ~s); d = (d >> 1) + (d & 1); *sign = s & 1; *digit = d; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ec/wnaf.cc.inc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "internal.h" // This file implements the wNAF-based interleaving multi-exponentiation method // at: // http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13 // http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf void ec_compute_wNAF(const EC_GROUP *group, int8_t *out, const EC_SCALAR *scalar, size_t bits, int w) { // 'int8_t' can represent integers with absolute values less than 2^7. assert(0 < w && w <= 7); assert(bits != 0); int bit = 1 << w; // 2^w, at most 128 int next_bit = bit << 1; // 2^(w+1), at most 256 int mask = next_bit - 1; // at most 255 int window_val = scalar->words[0] & mask; for (size_t j = 0; j < bits + 1; j++) { assert(0 <= window_val && window_val <= next_bit); int digit = 0; if (window_val & 1) { assert(0 < window_val && window_val < next_bit); if (window_val & bit) { digit = window_val - next_bit; // We know -next_bit < digit < 0 and window_val - digit = next_bit. // modified wNAF if (j + w + 1 >= bits) { // special case for generating modified wNAFs: // no new bits will be added into window_val, // so using a positive digit here will decrease // the total length of the representation digit = window_val & (mask >> 1); // We know 0 < digit < bit and window_val - digit = bit. } } else { digit = window_val; // We know 0 < digit < bit and window_val - digit = 0. } window_val -= digit; // Now window_val is 0 or 2^(w+1) in standard wNAF generation. // For modified window NAFs, it may also be 2^w. // // See the comments above for the derivation of each of these bounds. assert(window_val == 0 || window_val == next_bit || window_val == bit); assert(-bit < digit && digit < bit); // window_val was odd, so digit is also odd. assert(digit & 1); } out[j] = digit; // Incorporate the next bit. Previously, |window_val| <= |next_bit|, so if // we shift and add at most one copy of |bit|, this will continue to hold // afterwards. window_val >>= 1; window_val += bit * bn_is_bit_set_words(scalar->words, group->order.N.width, j + w + 1); assert(window_val <= next_bit); } // bits + 1 entries should be sufficient to consume all bits. assert(window_val == 0); } // compute_precomp sets |out[i]| to (2*i+1)*p, for i from 0 to |len|. static void compute_precomp(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN *p, size_t len) { ec_GFp_simple_point_copy(&out[0], p); EC_JACOBIAN two_p; ec_GFp_mont_dbl(group, &two_p, p); for (size_t i = 1; i < len; i++) { ec_GFp_mont_add(group, &out[i], &out[i - 1], &two_p); } } static void lookup_precomp(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN *precomp, int digit) { if (digit < 0) { digit = -digit; ec_GFp_simple_point_copy(out, &precomp[digit >> 1]); ec_GFp_simple_invert(group, out); } else { ec_GFp_simple_point_copy(out, &precomp[digit >> 1]); } } // EC_WNAF_WINDOW_BITS is the window size to use for |ec_GFp_mont_mul_public|. #define EC_WNAF_WINDOW_BITS 4 // EC_WNAF_TABLE_SIZE is the table size to use for |ec_GFp_mont_mul_public|. #define EC_WNAF_TABLE_SIZE (1 << (EC_WNAF_WINDOW_BITS - 1)) // EC_WNAF_STACK is the number of points worth of data to stack-allocate and // avoid a malloc. #define EC_WNAF_STACK 3 int ec_GFp_mont_mul_public_batch(const EC_GROUP *group, EC_JACOBIAN *r, const EC_SCALAR *g_scalar, const EC_JACOBIAN *points, const EC_SCALAR *scalars, size_t num) { size_t bits = EC_GROUP_order_bits(group); size_t wNAF_len = bits + 1; // Stack-allocated space, which will be used if the task is small enough. int8_t wNAF_stack[EC_WNAF_STACK][EC_MAX_BYTES * 8 + 1]; EC_JACOBIAN precomp_stack[EC_WNAF_STACK][EC_WNAF_TABLE_SIZE]; // Allocated pointers, which will remain NULL unless needed. EC_JACOBIAN(*precomp_alloc)[EC_WNAF_TABLE_SIZE] = NULL; int8_t(*wNAF_alloc)[EC_MAX_BYTES * 8 + 1] = NULL; // These fields point either to the stack or heap buffers of the same name. int8_t(*wNAF)[EC_MAX_BYTES * 8 + 1]; EC_JACOBIAN(*precomp)[EC_WNAF_TABLE_SIZE]; if (num <= EC_WNAF_STACK) { wNAF = wNAF_stack; precomp = precomp_stack; } else { wNAF_alloc = reinterpret_cast( OPENSSL_calloc(num, sizeof(wNAF_alloc[0]))); if (wNAF_alloc == NULL) { return 0; } precomp_alloc = reinterpret_cast( OPENSSL_calloc(num, sizeof(precomp_alloc[0]))); if (precomp_alloc == NULL) { OPENSSL_free(wNAF_alloc); return 0; } wNAF = wNAF_alloc; precomp = precomp_alloc; } int8_t g_wNAF[EC_MAX_BYTES * 8 + 1]; EC_JACOBIAN g_precomp[EC_WNAF_TABLE_SIZE]; assert(wNAF_len <= OPENSSL_ARRAY_SIZE(g_wNAF)); const EC_JACOBIAN *g = &group->generator.raw; if (g_scalar != NULL) { ec_compute_wNAF(group, g_wNAF, g_scalar, bits, EC_WNAF_WINDOW_BITS); compute_precomp(group, g_precomp, g, EC_WNAF_TABLE_SIZE); } for (size_t i = 0; i < num; i++) { assert(wNAF_len <= OPENSSL_ARRAY_SIZE(wNAF[i])); ec_compute_wNAF(group, wNAF[i], &scalars[i], bits, EC_WNAF_WINDOW_BITS); compute_precomp(group, precomp[i], &points[i], EC_WNAF_TABLE_SIZE); } EC_JACOBIAN tmp; int r_is_at_infinity = 1; for (size_t k = wNAF_len - 1; k < wNAF_len; k--) { if (!r_is_at_infinity) { ec_GFp_mont_dbl(group, r, r); } if (g_scalar != NULL && g_wNAF[k] != 0) { lookup_precomp(group, &tmp, g_precomp, g_wNAF[k]); if (r_is_at_infinity) { ec_GFp_simple_point_copy(r, &tmp); r_is_at_infinity = 0; } else { ec_GFp_mont_add(group, r, r, &tmp); } } for (size_t i = 0; i < num; i++) { if (wNAF[i][k] != 0) { lookup_precomp(group, &tmp, precomp[i], wNAF[i][k]); if (r_is_at_infinity) { ec_GFp_simple_point_copy(r, &tmp); r_is_at_infinity = 0; } else { ec_GFp_mont_add(group, r, r, &tmp); } } } } if (r_is_at_infinity) { ec_GFp_simple_point_set_to_infinity(group, r); } OPENSSL_free(wNAF_alloc); OPENSSL_free(precomp_alloc); return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ecdh/ecdh.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../../internal.h" #include "../ec/internal.h" #include "../service_indicator/internal.h" int ECDH_compute_key_fips(uint8_t *out, size_t out_len, const EC_POINT *pub_key, const EC_KEY *priv_key) { boringssl_ensure_ecc_self_test(); if (priv_key->priv_key == NULL) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_NO_PRIVATE_VALUE); return 0; } const EC_SCALAR *const priv = &priv_key->priv_key->scalar; const EC_GROUP *const group = EC_KEY_get0_group(priv_key); if (EC_GROUP_cmp(group, pub_key->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } EC_JACOBIAN shared_point; uint8_t buf[EC_MAX_BYTES]; size_t buflen; if (!ec_point_mul_scalar(group, &shared_point, &pub_key->raw, priv) || !ec_get_x_coordinate_as_bytes(group, buf, &buflen, sizeof(buf), &shared_point)) { OPENSSL_PUT_ERROR(ECDH, ECDH_R_POINT_ARITHMETIC_FAILURE); return 0; } FIPS_service_indicator_lock_state(); SHA256_CTX ctx; SHA512_CTX ctx_512; switch (out_len) { case SHA224_DIGEST_LENGTH: BCM_sha224_init(&ctx); BCM_sha224_update(&ctx, buf, buflen); BCM_sha224_final(out, &ctx); break; case SHA256_DIGEST_LENGTH: BCM_sha256_init(&ctx); BCM_sha256_update(&ctx, buf, buflen); BCM_sha256_final(out, &ctx); break; case SHA384_DIGEST_LENGTH: BCM_sha384_init(&ctx_512); BCM_sha384_update(&ctx_512, buf, buflen); BCM_sha384_final(out, &ctx_512); break; case SHA512_DIGEST_LENGTH: BCM_sha512_init(&ctx_512); BCM_sha512_update(&ctx_512, buf, buflen); BCM_sha512_final(out, &ctx_512); break; default: OPENSSL_PUT_ERROR(ECDH, ECDH_R_UNKNOWN_DIGEST_LENGTH); FIPS_service_indicator_unlock_state(); return 0; } FIPS_service_indicator_unlock_state(); ECDH_verify_service_indicator(priv_key); return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ecdsa/ecdsa.cc.inc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../../internal.h" #include "../bn/internal.h" #include "../ec/internal.h" #include "../service_indicator/internal.h" #include "internal.h" // digest_to_scalar interprets |digest_len| bytes from |digest| as a scalar for // ECDSA. static void digest_to_scalar(const EC_GROUP *group, EC_SCALAR *out, const uint8_t *digest, size_t digest_len) { const BIGNUM *order = EC_GROUP_get0_order(group); size_t num_bits = BN_num_bits(order); // Need to truncate digest if it is too long: first truncate whole bytes. size_t num_bytes = (num_bits + 7) / 8; if (digest_len > num_bytes) { digest_len = num_bytes; } bn_big_endian_to_words(out->words, order->width, digest, digest_len); // If it is still too long, truncate remaining bits with a shift. if (8 * digest_len > num_bits) { bn_rshift_words(out->words, out->words, 8 - (num_bits & 0x7), order->width); } // |out| now has the same bit width as |order|, but this only bounds by // 2*|order|. Subtract the order if out of range. // // Montgomery multiplication accepts the looser bounds, so this isn't strictly // necessary, but it is a cleaner abstraction and has no performance impact. BN_ULONG tmp[EC_MAX_WORDS]; bn_reduce_once_in_place(out->words, 0 /* no carry */, order->d, tmp, order->width); } int ecdsa_verify_fixed_no_self_test(const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *eckey) { const EC_GROUP *group = EC_KEY_get0_group(eckey); const EC_POINT *pub_key = EC_KEY_get0_public_key(eckey); if (group == NULL || pub_key == NULL || sig == NULL) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_MISSING_PARAMETERS); return 0; } size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); EC_SCALAR r, s, u1, u2, s_inv_mont, m; if (sig_len != 2 * scalar_len || !ec_scalar_from_bytes(group, &r, sig, scalar_len) || ec_scalar_is_zero(group, &r) || !ec_scalar_from_bytes(group, &s, sig + scalar_len, scalar_len) || ec_scalar_is_zero(group, &s)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); return 0; } // s_inv_mont = s^-1 in the Montgomery domain. if (!ec_scalar_to_montgomery_inv_vartime(group, &s_inv_mont, &s)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR); return 0; } // u1 = m * s^-1 mod order // u2 = r * s^-1 mod order // // |s_inv_mont| is in Montgomery form while |m| and |r| are not, so |u1| and // |u2| will be taken out of Montgomery form, as desired. digest_to_scalar(group, &m, digest, digest_len); ec_scalar_mul_montgomery(group, &u1, &m, &s_inv_mont); ec_scalar_mul_montgomery(group, &u2, &r, &s_inv_mont); EC_JACOBIAN point; if (!ec_point_mul_scalar_public(group, &point, &u1, &pub_key->raw, &u2)) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); return 0; } if (!ec_cmp_x_coordinate(group, &point, &r)) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); return 0; } return 1; } int ecdsa_verify_fixed(const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *key) { boringssl_ensure_ecc_self_test(); return ecdsa_verify_fixed_no_self_test(digest, digest_len, sig, sig_len, key); } static int ecdsa_sign_impl(const EC_GROUP *group, int *out_retry, uint8_t *sig, size_t *out_sig_len, size_t max_sig_len, const EC_SCALAR *priv_key, const EC_SCALAR *k, const uint8_t *digest, size_t digest_len) { *out_retry = 0; // Check that the size of the group order is FIPS compliant (FIPS 186-4 // B.5.2). const BIGNUM *order = EC_GROUP_get0_order(group); if (BN_num_bits(order) < 160) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); return 0; } size_t sig_len = 2 * BN_num_bytes(order); if (sig_len > max_sig_len) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); return 0; } // Compute r, the x-coordinate of k * generator. EC_JACOBIAN tmp_point; EC_SCALAR r; if (!ec_point_mul_scalar_base(group, &tmp_point, k) || !ec_get_x_coordinate_as_scalar(group, &r, &tmp_point)) { return 0; } if (constant_time_declassify_int(ec_scalar_is_zero(group, &r))) { *out_retry = 1; return 0; } // s = priv_key * r. Note if only one parameter is in the Montgomery domain, // |ec_scalar_mod_mul_montgomery| will compute the answer in the normal // domain. EC_SCALAR s; ec_scalar_to_montgomery(group, &s, &r); ec_scalar_mul_montgomery(group, &s, priv_key, &s); // s = m + priv_key * r. EC_SCALAR tmp; digest_to_scalar(group, &tmp, digest, digest_len); ec_scalar_add(group, &s, &s, &tmp); // s = k^-1 * (m + priv_key * r). First, we compute k^-1 in the Montgomery // domain. This is |ec_scalar_to_montgomery| followed by // |ec_scalar_inv0_montgomery|, but |ec_scalar_inv0_montgomery| followed by // |ec_scalar_from_montgomery| is equivalent and slightly more efficient. // Then, as above, only one parameter is in the Montgomery domain, so the // result is in the normal domain. Finally, note k is non-zero (or computing r // would fail), so the inverse must exist. ec_scalar_inv0_montgomery(group, &tmp, k); // tmp = k^-1 R^2 ec_scalar_from_montgomery(group, &tmp, &tmp); // tmp = k^-1 R ec_scalar_mul_montgomery(group, &s, &s, &tmp); if (constant_time_declassify_int(ec_scalar_is_zero(group, &s))) { *out_retry = 1; return 0; } CONSTTIME_DECLASSIFY(r.words, sizeof(r.words)); CONSTTIME_DECLASSIFY(s.words, sizeof(r.words)); size_t len; ec_scalar_to_bytes(group, sig, &len, &r); assert(len == sig_len / 2); ec_scalar_to_bytes(group, sig + len, &len, &s); assert(len == sig_len / 2); *out_sig_len = sig_len; return 1; } int ecdsa_sign_fixed_with_nonce_for_known_answer_test( const uint8_t *digest, size_t digest_len, uint8_t *sig, size_t *out_sig_len, size_t max_sig_len, const EC_KEY *eckey, const uint8_t *nonce, size_t nonce_len) { if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); return 0; } const EC_GROUP *group = EC_KEY_get0_group(eckey); if (group == NULL || eckey->priv_key == NULL) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } const EC_SCALAR *priv_key = &eckey->priv_key->scalar; EC_SCALAR k; if (!ec_scalar_from_bytes(group, &k, nonce, nonce_len)) { return 0; } int retry_ignored; return ecdsa_sign_impl(group, &retry_ignored, sig, out_sig_len, max_sig_len, priv_key, &k, digest, digest_len); } int ecdsa_sign_fixed(const uint8_t *digest, size_t digest_len, uint8_t *sig, size_t *out_sig_len, size_t max_sig_len, const EC_KEY *eckey) { boringssl_ensure_ecc_self_test(); if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); return 0; } const EC_GROUP *group = EC_KEY_get0_group(eckey); if (group == NULL || eckey->priv_key == NULL) { OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } const BIGNUM *order = EC_GROUP_get0_order(group); const EC_SCALAR *priv_key = &eckey->priv_key->scalar; // Pass a SHA512 hash of the private key and digest as additional data // into the RBG. This is a hardening measure against entropy failure. static_assert(BCM_SHA512_DIGEST_LENGTH >= 32, "additional_data is too large for SHA-512"); FIPS_service_indicator_lock_state(); SHA512_CTX sha; uint8_t additional_data[BCM_SHA512_DIGEST_LENGTH]; BCM_sha512_init(&sha); BCM_sha512_update(&sha, priv_key->words, order->width * sizeof(BN_ULONG)); BCM_sha512_update(&sha, digest, digest_len); BCM_sha512_final(additional_data, &sha); // Cap iterations so callers who supply invalid values as custom groups do not // infinite loop. This does not impact valid parameters (e.g. those covered by // FIPS) because the probability of requiring even one retry is negligible, // let alone 32. static const int kMaxIterations = 32; int ret = 0; int iters = 0; for (;;) { EC_SCALAR k; if (!ec_random_nonzero_scalar(group, &k, additional_data)) { goto out; } // TODO(davidben): Move this inside |ec_random_nonzero_scalar| or lower, so // that all scalars we generate are, by default, secret. CONSTTIME_SECRET(k.words, sizeof(k.words)); int retry; ret = ecdsa_sign_impl(group, &retry, sig, out_sig_len, max_sig_len, priv_key, &k, digest, digest_len); if (ret || !retry) { goto out; } iters++; if (iters > kMaxIterations) { OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_TOO_MANY_ITERATIONS); goto out; } } out: FIPS_service_indicator_unlock_state(); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/ecdsa/internal.h ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_ECDSA_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_ECDSA_INTERNAL_H #include #include "../ec/internal.h" #if defined(__cplusplus) extern "C" { #endif // ECDSA_MAX_FIXED_LEN is the maximum length of an ECDSA signature in the // fixed-width, big-endian format from IEEE P1363. #define ECDSA_MAX_FIXED_LEN (2 * EC_MAX_BYTES) // ecdsa_sign_fixed behaves like |ECDSA_sign| but uses the fixed-width, // big-endian format from IEEE P1363. int ecdsa_sign_fixed(const uint8_t *digest, size_t digest_len, uint8_t *sig, size_t *out_sig_len, size_t max_sig_len, const EC_KEY *key); // ecdsa_sign_fixed_with_nonce_for_known_answer_test behaves like // |ecdsa_sign_fixed| but takes a caller-supplied nonce. This function is used // as part of known-answer tests in the FIPS module. int ecdsa_sign_fixed_with_nonce_for_known_answer_test( const uint8_t *digest, size_t digest_len, uint8_t *sig, size_t *out_sig_len, size_t max_sig_len, const EC_KEY *key, const uint8_t *nonce, size_t nonce_len); // ecdsa_verify_fixed behaves like |ECDSA_verify| but uses the fixed-width, // big-endian format from IEEE P1363. int ecdsa_verify_fixed(const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *key); // ecdsa_verify_fixed_no_self_test behaves like ecdsa_verify_fixed, but doesn't // try to run the self-test first. This is for use in the self tests themselves, // to prevent an infinite loop. int ecdsa_verify_fixed_no_self_test(const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *key); #if defined(__cplusplus) } #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_ECDSA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/fips_shared_support.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #if defined(BORINGSSL_FIPS) && defined(BORINGSSL_SHARED_LIBRARY) // BORINGSSL_bcm_text_hash is is default hash value for the FIPS integrity check // that must be replaced with the real value during the build process. This // value need only be distinct, i.e. so that we can safely search-and-replace it // in an object file. extern const uint8_t BORINGSSL_bcm_text_hash[32] = { 0xae, 0x2c, 0xea, 0x2a, 0xbd, 0xa6, 0xf3, 0xec, 0x97, 0x7f, 0x9b, 0xf6, 0x94, 0x9a, 0xfc, 0x83, 0x68, 0x27, 0xcb, 0xa0, 0xa0, 0x9f, 0x6b, 0x6f, 0xde, 0x52, 0xcd, 0xe2, 0xcd, 0xff, 0x31, 0x80, }; #endif // FIPS && SHARED_LIBRARY ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/hkdf/hkdf.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../../internal.h" int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len, const uint8_t *info, size_t info_len) { // https://tools.ietf.org/html/rfc5869#section-2 uint8_t prk[EVP_MAX_MD_SIZE]; size_t prk_len; if (!HKDF_extract(prk, &prk_len, digest, secret, secret_len, salt, salt_len) || !HKDF_expand(out_key, out_len, digest, prk, prk_len, info, info_len)) { return 0; } return 1; } int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len) { // https://tools.ietf.org/html/rfc5869#section-2.2 // If salt is not given, HashLength zeros are used. However, HMAC does that // internally already so we can ignore it. unsigned len; if (HMAC(digest, salt, salt_len, secret, secret_len, out_key, &len) == NULL) { OPENSSL_PUT_ERROR(HKDF, ERR_R_HMAC_LIB); return 0; } *out_len = len; assert(*out_len == EVP_MD_size(digest)); return 1; } int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *prk, size_t prk_len, const uint8_t *info, size_t info_len) { // https://tools.ietf.org/html/rfc5869#section-2.3 const size_t digest_len = EVP_MD_size(digest); uint8_t previous[EVP_MAX_MD_SIZE]; size_t n, done = 0; unsigned i; int ret = 0; HMAC_CTX hmac; // Expand key material to desired length. n = (out_len + digest_len - 1) / digest_len; if (out_len + digest_len < out_len || n > 255) { OPENSSL_PUT_ERROR(HKDF, HKDF_R_OUTPUT_TOO_LARGE); return 0; } HMAC_CTX_init(&hmac); if (!HMAC_Init_ex(&hmac, prk, prk_len, digest, NULL)) { goto out; } for (i = 0; i < n; i++) { uint8_t ctr = i + 1; size_t todo; if (i != 0 && (!HMAC_Init_ex(&hmac, NULL, 0, NULL, NULL) || !HMAC_Update(&hmac, previous, digest_len))) { goto out; } if (!HMAC_Update(&hmac, info, info_len) || !HMAC_Update(&hmac, &ctr, 1) || !HMAC_Final(&hmac, previous, NULL)) { goto out; } todo = digest_len; if (todo > out_len - done) { todo = out_len - done; } OPENSSL_memcpy(out_key + done, previous, todo); done += todo; } ret = 1; out: HMAC_CTX_cleanup(&hmac); if (ret != 1) { OPENSSL_PUT_ERROR(HKDF, ERR_R_HMAC_LIB); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/hmac/hmac.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" #include "../service_indicator/internal.h" uint8_t *HMAC(const EVP_MD *evp_md, const void *key, size_t key_len, const uint8_t *data, size_t data_len, uint8_t *out, unsigned int *out_len) { HMAC_CTX ctx; HMAC_CTX_init(&ctx); // The underlying hash functions should not set the FIPS service indicator // until all operations have completed. FIPS_service_indicator_lock_state(); const int ok = HMAC_Init_ex(&ctx, key, key_len, evp_md, NULL) && HMAC_Update(&ctx, data, data_len) && HMAC_Final(&ctx, out, out_len); FIPS_service_indicator_unlock_state(); HMAC_CTX_cleanup(&ctx); if (!ok) { return NULL; } HMAC_verify_service_indicator(evp_md); return out; } void HMAC_CTX_init(HMAC_CTX *ctx) { ctx->md = NULL; EVP_MD_CTX_init(&ctx->i_ctx); EVP_MD_CTX_init(&ctx->o_ctx); EVP_MD_CTX_init(&ctx->md_ctx); } HMAC_CTX *HMAC_CTX_new(void) { HMAC_CTX *ctx = reinterpret_cast(OPENSSL_malloc(sizeof(HMAC_CTX))); if (ctx != NULL) { HMAC_CTX_init(ctx); } return ctx; } void HMAC_CTX_cleanup(HMAC_CTX *ctx) { EVP_MD_CTX_cleanup(&ctx->i_ctx); EVP_MD_CTX_cleanup(&ctx->o_ctx); EVP_MD_CTX_cleanup(&ctx->md_ctx); OPENSSL_cleanse(ctx, sizeof(HMAC_CTX)); } void HMAC_CTX_cleanse(HMAC_CTX *ctx) { EVP_MD_CTX_cleanse(&ctx->i_ctx); EVP_MD_CTX_cleanse(&ctx->o_ctx); EVP_MD_CTX_cleanse(&ctx->md_ctx); OPENSSL_cleanse(ctx, sizeof(HMAC_CTX)); } void HMAC_CTX_free(HMAC_CTX *ctx) { if (ctx == NULL) { return; } HMAC_CTX_cleanup(ctx); OPENSSL_free(ctx); } int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { int ret = 0; FIPS_service_indicator_lock_state(); if (md == NULL) { md = ctx->md; } // If either |key| is non-NULL or |md| has changed, initialize with a new key // rather than rewinding the previous one. // // TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is // ambiguous between using the empty key and reusing the previous key. There // exist callers which intend the latter, but the former is an awkward edge // case. Fix to API to avoid this. if (md != ctx->md || key != NULL) { uint8_t pad[EVP_MAX_MD_BLOCK_SIZE]; uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE]; unsigned key_block_len; size_t block_size = EVP_MD_block_size(md); assert(block_size <= sizeof(key_block)); assert(EVP_MD_size(md) <= block_size); if (block_size < key_len) { // Long keys are hashed. if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) || !EVP_DigestUpdate(&ctx->md_ctx, key, key_len) || !EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) { goto out; } } else { assert(key_len <= sizeof(key_block)); OPENSSL_memcpy(key_block, key, key_len); key_block_len = (unsigned)key_len; } // Keys are then padded with zeros. OPENSSL_memset(key_block + key_block_len, 0, block_size - key_block_len); for (size_t i = 0; i < block_size; i++) { pad[i] = 0x36 ^ key_block[i]; } if (!EVP_DigestInit_ex(&ctx->i_ctx, md, impl) || !EVP_DigestUpdate(&ctx->i_ctx, pad, block_size)) { goto out; } for (size_t i = 0; i < block_size; i++) { pad[i] = 0x5c ^ key_block[i]; } if (!EVP_DigestInit_ex(&ctx->o_ctx, md, impl) || !EVP_DigestUpdate(&ctx->o_ctx, pad, block_size)) { goto out; } ctx->md = md; } ret = EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->i_ctx); out: FIPS_service_indicator_unlock_state(); return ret; } int HMAC_Update(HMAC_CTX *ctx, const uint8_t *data, size_t data_len) { return EVP_DigestUpdate(&ctx->md_ctx, data, data_len); } int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len) { int ret = 0; unsigned int i; uint8_t buf[EVP_MAX_MD_SIZE]; FIPS_service_indicator_lock_state(); // TODO(davidben): The only thing that can officially fail here is // |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. if (!EVP_DigestFinal_ex(&ctx->md_ctx, buf, &i) || !EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->o_ctx) || !EVP_DigestUpdate(&ctx->md_ctx, buf, i) || !EVP_DigestFinal_ex(&ctx->md_ctx, out, out_len)) { *out_len = 0; goto out; } ret = 1; out: FIPS_service_indicator_unlock_state(); if (ret) { HMAC_verify_service_indicator(ctx->md); } return ret; } size_t HMAC_size(const HMAC_CTX *ctx) { return EVP_MD_size(ctx->md); } const EVP_MD *HMAC_CTX_get_md(const HMAC_CTX *ctx) { return ctx->md; } int HMAC_CTX_copy_ex(HMAC_CTX *dest, const HMAC_CTX *src) { if (!EVP_MD_CTX_copy_ex(&dest->i_ctx, &src->i_ctx) || !EVP_MD_CTX_copy_ex(&dest->o_ctx, &src->o_ctx) || !EVP_MD_CTX_copy_ex(&dest->md_ctx, &src->md_ctx)) { return 0; } dest->md = src->md; return 1; } void HMAC_CTX_reset(HMAC_CTX *ctx) { HMAC_CTX_cleanup(ctx); HMAC_CTX_init(ctx); } int HMAC_Init(HMAC_CTX *ctx, const void *key, int key_len, const EVP_MD *md) { if (key && md) { HMAC_CTX_init(ctx); } return HMAC_Init_ex(ctx, key, key_len, md, NULL); } int HMAC_CTX_copy(HMAC_CTX *dest, const HMAC_CTX *src) { HMAC_CTX_init(dest); return HMAC_CTX_copy_ex(dest, src); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/keccak/internal.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_KECCAK_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_KECCAK_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif enum boringssl_keccak_config_t { boringssl_sha3_256, boringssl_sha3_512, boringssl_shake128, boringssl_shake256, }; enum boringssl_keccak_phase_t { boringssl_keccak_phase_absorb, boringssl_keccak_phase_squeeze, }; struct BORINGSSL_keccak_st { enum boringssl_keccak_config_t config; enum boringssl_keccak_phase_t phase; uint64_t state[25]; size_t rate_bytes; size_t absorb_offset; size_t squeeze_offset; }; // BORINGSSL_keccak hashes |in_len| bytes from |in| and writes |out_len| bytes // of output to |out|. If the |config| specifies a fixed-output function, like // SHA3-256, then |out_len| must be the correct length for that function. OPENSSL_EXPORT void BORINGSSL_keccak(uint8_t *out, size_t out_len, const uint8_t *in, size_t in_len, enum boringssl_keccak_config_t config); // BORINGSSL_keccak_init prepares |ctx| for absorbing. The |config| must specify // a SHAKE variant, otherwise callers should use |BORINGSSL_keccak|. OPENSSL_EXPORT void BORINGSSL_keccak_init( struct BORINGSSL_keccak_st *ctx, enum boringssl_keccak_config_t config); // BORINGSSL_keccak_absorb absorbs |in_len| bytes from |in|. OPENSSL_EXPORT void BORINGSSL_keccak_absorb(struct BORINGSSL_keccak_st *ctx, const uint8_t *in, size_t in_len); // BORINGSSL_keccak_squeeze writes |out_len| bytes to |out| from |ctx|. OPENSSL_EXPORT void BORINGSSL_keccak_squeeze(struct BORINGSSL_keccak_st *ctx, uint8_t *out, size_t out_len); #if defined(__cplusplus) } #endif #endif // OPENSSL_HEADER_CRYPTO_KECCAK_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/keccak/keccak.cc.inc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../../internal.h" #include "./internal.h" // keccak_f implements the Keccak-1600 permutation as described at // https://keccak.team/keccak_specs_summary.html. Each lane is represented as a // 64-bit value and the 5×5 lanes are stored as an array in row-major order. static void keccak_f(uint64_t state[25]) { static const int kNumRounds = 24; for (int round = 0; round < kNumRounds; round++) { // θ step uint64_t c[5]; for (int x = 0; x < 5; x++) { c[x] = state[x] ^ state[x + 5] ^ state[x + 10] ^ state[x + 15] ^ state[x + 20]; } for (int x = 0; x < 5; x++) { const uint64_t d = c[(x + 4) % 5] ^ CRYPTO_rotl_u64(c[(x + 1) % 5], 1); for (int y = 0; y < 5; y++) { state[y * 5 + x] ^= d; } } // ρ and π steps. // // These steps involve a mapping of the state matrix. Each input point, // (x,y), is rotated and written to the point (y, 2x + 3y). In the Keccak // pseudo-code a separate array is used because an in-place operation would // overwrite some values that are subsequently needed. However, the mapping // forms a trail through 24 of the 25 values so we can do it in place with // only a single temporary variable. // // Start with (1, 0). The value here will be mapped and end up at (0, 2). // That value will end up at (2, 1), then (1, 2), and so on. After 24 // steps, 24 of the 25 values have been hit (as this mapping is injective) // and the sequence will repeat. All that remains is to handle the element // at (0, 0), but the rotation for that element is zero, and it goes to (0, // 0), so we can ignore it. uint64_t prev_value = state[1]; #define PI_RHO_STEP(index, rotation) \ do { \ const uint64_t value = CRYPTO_rotl_u64(prev_value, rotation); \ prev_value = state[index]; \ state[index] = value; \ } while (0) PI_RHO_STEP(10, 1); PI_RHO_STEP(7, 3); PI_RHO_STEP(11, 6); PI_RHO_STEP(17, 10); PI_RHO_STEP(18, 15); PI_RHO_STEP(3, 21); PI_RHO_STEP(5, 28); PI_RHO_STEP(16, 36); PI_RHO_STEP(8, 45); PI_RHO_STEP(21, 55); PI_RHO_STEP(24, 2); PI_RHO_STEP(4, 14); PI_RHO_STEP(15, 27); PI_RHO_STEP(23, 41); PI_RHO_STEP(19, 56); PI_RHO_STEP(13, 8); PI_RHO_STEP(12, 25); PI_RHO_STEP(2, 43); PI_RHO_STEP(20, 62); PI_RHO_STEP(14, 18); PI_RHO_STEP(22, 39); PI_RHO_STEP(9, 61); PI_RHO_STEP(6, 20); PI_RHO_STEP(1, 44); #undef PI_RHO_STEP // χ step for (int y = 0; y < 5; y++) { const int row_index = 5 * y; const uint64_t orig_x0 = state[row_index]; const uint64_t orig_x1 = state[row_index + 1]; state[row_index] ^= ~orig_x1 & state[row_index + 2]; state[row_index + 1] ^= ~state[row_index + 2] & state[row_index + 3]; state[row_index + 2] ^= ~state[row_index + 3] & state[row_index + 4]; state[row_index + 3] ^= ~state[row_index + 4] & orig_x0; state[row_index + 4] ^= ~orig_x0 & orig_x1; } // ι step // // From https://keccak.team/files/Keccak-reference-3.0.pdf, section // 1.2, the round constants are based on the output of a LFSR. Thus, as // suggested in the appendix of of // https://keccak.team/keccak_specs_summary.html, the values are // simply encoded here. static const uint64_t kRoundConstants[24] = { 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008, }; state[0] ^= kRoundConstants[round]; } } static void keccak_init(struct BORINGSSL_keccak_st *ctx, size_t *out_required_out_len, enum boringssl_keccak_config_t config) { size_t capacity_bytes; switch (config) { case boringssl_sha3_256: capacity_bytes = 512 / 8; *out_required_out_len = 32; break; case boringssl_sha3_512: capacity_bytes = 1024 / 8; *out_required_out_len = 64; break; case boringssl_shake128: capacity_bytes = 256 / 8; *out_required_out_len = 0; break; case boringssl_shake256: capacity_bytes = 512 / 8; *out_required_out_len = 0; break; default: abort(); } OPENSSL_memset(ctx, 0, sizeof(*ctx)); ctx->config = config; ctx->phase = boringssl_keccak_phase_absorb; ctx->rate_bytes = 200 - capacity_bytes; assert(ctx->rate_bytes % 8 == 0); } void BORINGSSL_keccak(uint8_t *out, size_t out_len, const uint8_t *in, size_t in_len, enum boringssl_keccak_config_t config) { struct BORINGSSL_keccak_st ctx; size_t required_out_len; keccak_init(&ctx, &required_out_len, config); if (required_out_len != 0 && out_len != required_out_len) { abort(); } BORINGSSL_keccak_absorb(&ctx, in, in_len); BORINGSSL_keccak_squeeze(&ctx, out, out_len); } void BORINGSSL_keccak_init(struct BORINGSSL_keccak_st *ctx, enum boringssl_keccak_config_t config) { size_t required_out_len; keccak_init(ctx, &required_out_len, config); if (required_out_len != 0) { abort(); } } void BORINGSSL_keccak_absorb(struct BORINGSSL_keccak_st *ctx, const uint8_t *in, size_t in_len) { if (ctx->phase == boringssl_keccak_phase_squeeze) { // It's illegal to call absorb() again after calling squeeze(). abort(); } const size_t rate_words = ctx->rate_bytes / 8; // XOR the input. Accessing |ctx->state| as a |uint8_t*| is allowed by strict // aliasing because we require |uint8_t| to be a character type. uint8_t *state_bytes = (uint8_t *)ctx->state; // Absorb partial block. if (ctx->absorb_offset != 0) { assert(ctx->absorb_offset < ctx->rate_bytes); size_t first_block_len = ctx->rate_bytes - ctx->absorb_offset; for (size_t i = 0; i < first_block_len && i < in_len; i++) { state_bytes[ctx->absorb_offset + i] ^= in[i]; } // This input didn't fill the block. if (first_block_len > in_len) { ctx->absorb_offset += in_len; return; } keccak_f(ctx->state); in += first_block_len; in_len -= first_block_len; } // Absorb full blocks. while (in_len >= ctx->rate_bytes) { for (size_t i = 0; i < rate_words; i++) { ctx->state[i] ^= CRYPTO_load_u64_le(in + 8 * i); } keccak_f(ctx->state); in += ctx->rate_bytes; in_len -= ctx->rate_bytes; } // Absorb partial block. assert(in_len < ctx->rate_bytes); for (size_t i = 0; i < in_len; i++) { state_bytes[i] ^= in[i]; } ctx->absorb_offset = in_len; } static void keccak_finalize(struct BORINGSSL_keccak_st *ctx) { uint8_t terminator; switch (ctx->config) { case boringssl_sha3_256: case boringssl_sha3_512: terminator = 0x06; break; case boringssl_shake128: case boringssl_shake256: terminator = 0x1f; break; default: abort(); } // XOR the terminator. Accessing |ctx->state| as a |uint8_t*| is allowed by // strict aliasing because we require |uint8_t| to be a character type. uint8_t *state_bytes = (uint8_t *)ctx->state; state_bytes[ctx->absorb_offset] ^= terminator; state_bytes[ctx->rate_bytes - 1] ^= 0x80; keccak_f(ctx->state); } void BORINGSSL_keccak_squeeze(struct BORINGSSL_keccak_st *ctx, uint8_t *out, size_t out_len) { if (ctx->phase == boringssl_keccak_phase_absorb) { keccak_finalize(ctx); ctx->phase = boringssl_keccak_phase_squeeze; } // Accessing |ctx->state| as a |uint8_t*| is allowed by strict aliasing // because we require |uint8_t| to be a character type. const uint8_t *state_bytes = (const uint8_t *)ctx->state; while (out_len) { if (ctx->squeeze_offset == ctx->rate_bytes) { keccak_f(ctx->state); ctx->squeeze_offset = 0; } size_t remaining = ctx->rate_bytes - ctx->squeeze_offset; size_t todo = out_len; if (todo > remaining) { todo = remaining; } OPENSSL_memcpy(out, &state_bytes[ctx->squeeze_offset], todo); out += todo; out_len -= todo; ctx->squeeze_offset += todo; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/mldsa/mldsa.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../keccak/internal.h" namespace mldsa { namespace { constexpr int kDegree = 256; constexpr int kRhoBytes = 32; constexpr int kSigmaBytes = 64; constexpr int kKBytes = 32; constexpr int kTrBytes = 64; constexpr int kMuBytes = 64; constexpr int kRhoPrimeBytes = 64; // 2^23 - 2^13 + 1 constexpr uint32_t kPrime = 8380417; // Inverse of -kPrime modulo 2^32 constexpr uint32_t kPrimeNegInverse = 4236238847; constexpr int kDroppedBits = 13; constexpr uint32_t kHalfPrime = (kPrime - 1) / 2; constexpr uint32_t kGamma2 = (kPrime - 1) / 32; // 256^-1 mod kPrime, in Montgomery form. constexpr uint32_t kInverseDegreeMontgomery = 41978; // Constants that vary depending on ML-DSA size. // // These are implemented as templates which take the K parameter to distinguish // the ML-DSA sizes. template constexpr size_t public_key_bytes() { if constexpr (K == 6) { return BCM_MLDSA65_PUBLIC_KEY_BYTES; } else if constexpr (K == 8) { return BCM_MLDSA87_PUBLIC_KEY_BYTES; } } template constexpr size_t signature_bytes() { if constexpr (K == 6) { return BCM_MLDSA65_SIGNATURE_BYTES; } else if constexpr (K == 8) { return BCM_MLDSA87_SIGNATURE_BYTES; } } template constexpr int tau() { if constexpr (K == 6) { return 49; } else if constexpr (K == 8) { return 60; } } template constexpr int lambda_bytes() { if constexpr (K == 6) { return 192 / 8; } else if constexpr (K == 8) { return 256 / 8; } } template constexpr int gamma1() { if constexpr (K == 6 || K == 8) { return 1 << 19; } } template constexpr int beta() { if constexpr (K == 6) { return 196; } else if constexpr (K == 8) { return 120; } } template constexpr int omega() { if constexpr (K == 6) { return 55; } else if constexpr (K == 8) { return 75; } } template constexpr int eta() { if constexpr (K == 6) { return 4; } else if constexpr (K == 8) { return 2; } } template constexpr int plus_minus_eta_bitlen() { if constexpr (K == 6) { return 4; } else if constexpr (K == 8) { return 3; } } // Fundamental types. typedef struct scalar { uint32_t c[kDegree]; } scalar; template struct vector { scalar v[K]; }; template struct matrix { scalar v[K][L]; }; /* Arithmetic */ // This bit of Python will be referenced in some of the following comments: // // q = 8380417 // # Inverse of -q modulo 2^32 // q_neg_inverse = 4236238847 // # 2^64 modulo q // montgomery_square = 2365951 // // def bitreverse(i): // ret = 0 // for n in range(8): // bit = i & 1 // ret <<= 1 // ret |= bit // i >>= 1 // return ret // // def montgomery_reduce(x): // a = (x * q_neg_inverse) % 2**32 // b = x + a * q // assert b & 0xFFFF_FFFF == 0 // c = b >> 32 // assert c < q // return c // // def montgomery_transform(x): // return montgomery_reduce(x * montgomery_square) // kNTTRootsMontgomery = [ // montgomery_transform(pow(1753, bitreverse(i), q)) for i in range(256) // ] static const uint32_t kNTTRootsMontgomery[256] = { 4193792, 25847, 5771523, 7861508, 237124, 7602457, 7504169, 466468, 1826347, 2353451, 8021166, 6288512, 3119733, 5495562, 3111497, 2680103, 2725464, 1024112, 7300517, 3585928, 7830929, 7260833, 2619752, 6271868, 6262231, 4520680, 6980856, 5102745, 1757237, 8360995, 4010497, 280005, 2706023, 95776, 3077325, 3530437, 6718724, 4788269, 5842901, 3915439, 4519302, 5336701, 3574422, 5512770, 3539968, 8079950, 2348700, 7841118, 6681150, 6736599, 3505694, 4558682, 3507263, 6239768, 6779997, 3699596, 811944, 531354, 954230, 3881043, 3900724, 5823537, 2071892, 5582638, 4450022, 6851714, 4702672, 5339162, 6927966, 3475950, 2176455, 6795196, 7122806, 1939314, 4296819, 7380215, 5190273, 5223087, 4747489, 126922, 3412210, 7396998, 2147896, 2715295, 5412772, 4686924, 7969390, 5903370, 7709315, 7151892, 8357436, 7072248, 7998430, 1349076, 1852771, 6949987, 5037034, 264944, 508951, 3097992, 44288, 7280319, 904516, 3958618, 4656075, 8371839, 1653064, 5130689, 2389356, 8169440, 759969, 7063561, 189548, 4827145, 3159746, 6529015, 5971092, 8202977, 1315589, 1341330, 1285669, 6795489, 7567685, 6940675, 5361315, 4499357, 4751448, 3839961, 2091667, 3407706, 2316500, 3817976, 5037939, 2244091, 5933984, 4817955, 266997, 2434439, 7144689, 3513181, 4860065, 4621053, 7183191, 5187039, 900702, 1859098, 909542, 819034, 495491, 6767243, 8337157, 7857917, 7725090, 5257975, 2031748, 3207046, 4823422, 7855319, 7611795, 4784579, 342297, 286988, 5942594, 4108315, 3437287, 5038140, 1735879, 203044, 2842341, 2691481, 5790267, 1265009, 4055324, 1247620, 2486353, 1595974, 4613401, 1250494, 2635921, 4832145, 5386378, 1869119, 1903435, 7329447, 7047359, 1237275, 5062207, 6950192, 7929317, 1312455, 3306115, 6417775, 7100756, 1917081, 5834105, 7005614, 1500165, 777191, 2235880, 3406031, 7838005, 5548557, 6709241, 6533464, 5796124, 4656147, 594136, 4603424, 6366809, 2432395, 2454455, 8215696, 1957272, 3369112, 185531, 7173032, 5196991, 162844, 1616392, 3014001, 810149, 1652634, 4686184, 6581310, 5341501, 3523897, 3866901, 269760, 2213111, 7404533, 1717735, 472078, 7953734, 1723600, 6577327, 1910376, 6712985, 7276084, 8119771, 4546524, 5441381, 6144432, 7959518, 6094090, 183443, 7403526, 1612842, 4834730, 7826001, 3919660, 8332111, 7018208, 3937738, 1400424, 7534263, 1976782}; // Reduces x mod kPrime in constant time, where 0 <= x < 2*kPrime. uint32_t reduce_once(uint32_t x) { declassify_assert(x < 2 * kPrime); // return x < kPrime ? x : x - kPrime; return constant_time_select_int(constant_time_lt_w(x, kPrime), x, x - kPrime); } // Returns the absolute value in constant time. uint32_t abs_signed(uint32_t x) { // return is_positive(x) ? x : -x; // Note: MSVC doesn't like applying the unary minus operator to unsigned types // (warning C4146), so we write the negation as a bitwise not plus one // (assuming two's complement representation). return constant_time_select_int(constant_time_lt_w(x, 0x80000000), x, 0u - x); } // Returns the absolute value modulo kPrime. uint32_t abs_mod_prime(uint32_t x) { declassify_assert(x < kPrime); // return x > kHalfPrime ? kPrime - x : x; return constant_time_select_int(constant_time_lt_w(kHalfPrime, x), kPrime - x, x); } // Returns the maximum of two values in constant time. uint32_t maximum(uint32_t x, uint32_t y) { // return x < y ? y : x; return constant_time_select_int(constant_time_lt_w(x, y), y, x); } uint32_t mod_sub(uint32_t a, uint32_t b) { declassify_assert(a < kPrime); declassify_assert(b < kPrime); return reduce_once(kPrime + a - b); } void scalar_add(scalar *out, const scalar *lhs, const scalar *rhs) { for (int i = 0; i < kDegree; i++) { out->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); } } void scalar_sub(scalar *out, const scalar *lhs, const scalar *rhs) { for (int i = 0; i < kDegree; i++) { out->c[i] = mod_sub(lhs->c[i], rhs->c[i]); } } uint32_t reduce_montgomery(uint64_t x) { declassify_assert(x <= ((uint64_t)kPrime << 32)); uint64_t a = (uint32_t)x * kPrimeNegInverse; uint64_t b = x + a * kPrime; declassify_assert((b & 0xffffffff) == 0); uint32_t c = b >> 32; return reduce_once(c); } // Multiply two scalars in the number theoretically transformed state. void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { for (int i = 0; i < kDegree; i++) { out->c[i] = reduce_montgomery((uint64_t)lhs->c[i] * (uint64_t)rhs->c[i]); } } // In place number theoretic transform of a given scalar. // // FIPS 204, Algorithm 41 (`NTT`). static void scalar_ntt(scalar *s) { // Step: 1, 2, 4, 8, ..., 128 // Offset: 128, 64, 32, 16, ..., 1 int offset = kDegree; for (int step = 1; step < kDegree; step <<= 1) { offset >>= 1; int k = 0; for (int i = 0; i < step; i++) { assert(k == 2 * offset * i); const uint32_t step_root = kNTTRootsMontgomery[step + i]; for (int j = k; j < k + offset; j++) { uint32_t even = s->c[j]; // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. // |step_root| < kPrime because it's static data. |s->c[...]| is < // kPrime by the invariants of that struct. uint32_t odd = reduce_montgomery((uint64_t)step_root * (uint64_t)s->c[j + offset]); s->c[j] = reduce_once(odd + even); s->c[j + offset] = mod_sub(even, odd); } k += 2 * offset; } } } // In place inverse number theoretic transform of a given scalar. // // FIPS 204, Algorithm 42 (`NTT^-1`). void scalar_inverse_ntt(scalar *s) { // Step: 128, 64, 32, 16, ..., 1 // Offset: 1, 2, 4, 8, ..., 128 int step = kDegree; for (int offset = 1; offset < kDegree; offset <<= 1) { step >>= 1; int k = 0; for (int i = 0; i < step; i++) { assert(k == 2 * offset * i); const uint32_t step_root = kPrime - kNTTRootsMontgomery[step + (step - 1 - i)]; for (int j = k; j < k + offset; j++) { uint32_t even = s->c[j]; uint32_t odd = s->c[j + offset]; s->c[j] = reduce_once(odd + even); // |reduce_montgomery| works on values up to kPrime*R and R > 2*kPrime. // kPrime + even < 2*kPrime because |even| < kPrime, by the invariants // of that structure. Thus kPrime + even - odd < 2*kPrime because odd >= // 0, because it's unsigned and less than kPrime. Lastly step_root < // kPrime, because |kNTTRootsMontgomery| is static data. s->c[j + offset] = reduce_montgomery((uint64_t)step_root * (uint64_t)(kPrime + even - odd)); } k += 2 * offset; } } for (int i = 0; i < kDegree; i++) { s->c[i] = reduce_montgomery((uint64_t)s->c[i] * (uint64_t)kInverseDegreeMontgomery); } } template void vector_zero(vector *out) { OPENSSL_memset(out, 0, sizeof(*out)); } template void vector_add(vector *out, const vector *lhs, const vector *rhs) { for (int i = 0; i < X; i++) { scalar_add(&out->v[i], &lhs->v[i], &rhs->v[i]); } } template void vector_sub(vector *out, const vector *lhs, const vector *rhs) { for (int i = 0; i < X; i++) { scalar_sub(&out->v[i], &lhs->v[i], &rhs->v[i]); } } template void vector_mult_scalar(vector *out, const vector *lhs, const scalar *rhs) { for (int i = 0; i < X; i++) { scalar_mult(&out->v[i], &lhs->v[i], rhs); } } template void vector_ntt(vector *a) { for (int i = 0; i < X; i++) { scalar_ntt(&a->v[i]); } } template void vector_inverse_ntt(vector *a) { for (int i = 0; i < X; i++) { scalar_inverse_ntt(&a->v[i]); } } template void matrix_mult(vector *out, const matrix *m, const vector *a) { vector_zero(out); for (int i = 0; i < K; i++) { for (int j = 0; j < L; j++) { scalar product; scalar_mult(&product, &m->v[i][j], &a->v[j]); scalar_add(&out->v[i], &out->v[i], &product); } } } /* Rounding & hints */ // FIPS 204, Algorithm 35 (`Power2Round`). void power2_round(uint32_t *r1, uint32_t *r0, uint32_t r) { *r1 = r >> kDroppedBits; *r0 = r - (*r1 << kDroppedBits); uint32_t r0_adjusted = mod_sub(*r0, 1 << kDroppedBits); uint32_t r1_adjusted = *r1 + 1; // Mask is set iff r0 > 2^(dropped_bits - 1). crypto_word_t mask = constant_time_lt_w((uint32_t)(1 << (kDroppedBits - 1)), *r0); // r0 = mask ? r0_adjusted : r0 *r0 = constant_time_select_int(mask, r0_adjusted, *r0); // r1 = mask ? r1_adjusted : r1 *r1 = constant_time_select_int(mask, r1_adjusted, *r1); } // Scale back previously rounded value. void scale_power2_round(uint32_t *out, uint32_t r1) { // Pre-condition: 0 <= r1 <= 2^10 - 1 assert(r1 < (1u << 10)); *out = r1 << kDroppedBits; // Post-condition: 0 <= out <= 2^23 - 2^13 = kPrime - 1 assert(*out < kPrime); } // FIPS 204, Algorithm 37 (`HighBits`). uint32_t high_bits(uint32_t x) { // Reference description (given 0 <= x < q): // // ``` // int32_t r0 = x mod+- (2 * kGamma2); // if (x - r0 == q - 1) { // return 0; // } else { // return (x - r0) / (2 * kGamma2); // } // ``` // // Below is the formula taken from the reference implementation. // // Here, kGamma2 == 2^18 - 2^8 // This returns ((ceil(x / 2^7) * (2^10 + 1) + 2^21) / 2^22) mod 2^4 uint32_t r1 = (x + 127) >> 7; r1 = (r1 * 1025 + (1 << 21)) >> 22; r1 &= 15; return r1; } // FIPS 204, Algorithm 36 (`Decompose`). void decompose(uint32_t *r1, int32_t *r0, uint32_t r) { *r1 = high_bits(r); *r0 = r; *r0 -= *r1 * 2 * (int32_t)kGamma2; *r0 -= (((int32_t)kHalfPrime - *r0) >> 31) & (int32_t)kPrime; } // FIPS 204, Algorithm 38 (`LowBits`). int32_t low_bits(uint32_t x) { uint32_t r1; int32_t r0; decompose(&r1, &r0, x); return r0; } // FIPS 204, Algorithm 39 (`MakeHint`). // // In the spec this takes two arguments, z and r, and is called with // z = -ct0 // r = w - cs2 + ct0 // // It then computes HighBits (algorithm 37) of z and z+r. But z+r is just w - // cs2, so this takes three arguments and saves an addition. int32_t make_hint(uint32_t ct0, uint32_t cs2, uint32_t w) { uint32_t r_plus_z = mod_sub(w, cs2); uint32_t r = reduce_once(r_plus_z + ct0); return high_bits(r) != high_bits(r_plus_z); } // FIPS 204, Algorithm 40 (`UseHint`). uint32_t use_hint_vartime(uint32_t h, uint32_t r) { uint32_t r1; int32_t r0; decompose(&r1, &r0, r); if (h) { if (r0 > 0) { // m = 16, thus |mod m| in the spec turns into |& 15|. return (r1 + 1) & 15; } else { return (r1 - 1) & 15; } } return r1; } void scalar_power2_round(scalar *s1, scalar *s0, const scalar *s) { for (int i = 0; i < kDegree; i++) { power2_round(&s1->c[i], &s0->c[i], s->c[i]); } } void scalar_scale_power2_round(scalar *out, const scalar *in) { for (int i = 0; i < kDegree; i++) { scale_power2_round(&out->c[i], in->c[i]); } } void scalar_high_bits(scalar *out, const scalar *in) { for (int i = 0; i < kDegree; i++) { out->c[i] = high_bits(in->c[i]); } } void scalar_low_bits(scalar *out, const scalar *in) { for (int i = 0; i < kDegree; i++) { out->c[i] = low_bits(in->c[i]); } } void scalar_max(uint32_t *max, const scalar *s) { for (int i = 0; i < kDegree; i++) { uint32_t abs = abs_mod_prime(s->c[i]); *max = maximum(*max, abs); } } void scalar_max_signed(uint32_t *max, const scalar *s) { for (int i = 0; i < kDegree; i++) { uint32_t abs = abs_signed(s->c[i]); *max = maximum(*max, abs); } } void scalar_make_hint(scalar *out, const scalar *ct0, const scalar *cs2, const scalar *w) { for (int i = 0; i < kDegree; i++) { out->c[i] = make_hint(ct0->c[i], cs2->c[i], w->c[i]); } } void scalar_use_hint_vartime(scalar *out, const scalar *h, const scalar *r) { for (int i = 0; i < kDegree; i++) { out->c[i] = use_hint_vartime(h->c[i], r->c[i]); } } template void vector_power2_round(vector *t1, vector *t0, const vector *t) { for (int i = 0; i < X; i++) { scalar_power2_round(&t1->v[i], &t0->v[i], &t->v[i]); } } template void vector_scale_power2_round(vector *out, const vector *in) { for (int i = 0; i < X; i++) { scalar_scale_power2_round(&out->v[i], &in->v[i]); } } template void vector_high_bits(vector *out, const vector *in) { for (int i = 0; i < X; i++) { scalar_high_bits(&out->v[i], &in->v[i]); } } template void vector_low_bits(vector *out, const vector *in) { for (int i = 0; i < X; i++) { scalar_low_bits(&out->v[i], &in->v[i]); } } template uint32_t vector_max(const vector *a) { uint32_t max = 0; for (int i = 0; i < X; i++) { scalar_max(&max, &a->v[i]); } return max; } template uint32_t vector_max_signed(const vector *a) { uint32_t max = 0; for (int i = 0; i < X; i++) { scalar_max_signed(&max, &a->v[i]); } return max; } // The input vector contains only zeroes and ones. template size_t vector_count_ones(const vector *a) { size_t count = 0; for (int i = 0; i < X; i++) { for (int j = 0; j < kDegree; j++) { count += a->v[i].c[j]; } } return count; } template void vector_make_hint(vector *out, const vector *ct0, const vector *cs2, const vector *w) { for (int i = 0; i < X; i++) { scalar_make_hint(&out->v[i], &ct0->v[i], &cs2->v[i], &w->v[i]); } } template void vector_use_hint_vartime(vector *out, const vector *h, const vector *r) { for (int i = 0; i < X; i++) { scalar_use_hint_vartime(&out->v[i], &h->v[i], &r->v[i]); } } /* Bit packing */ // FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 4. static void scalar_encode_4(uint8_t out[128], const scalar *s) { // Every two elements lands on a byte boundary. static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); for (int i = 0; i < kDegree / 2; i++) { uint32_t a = s->c[2 * i]; uint32_t b = s->c[2 * i + 1]; declassify_assert(a < 16); declassify_assert(b < 16); out[i] = a | (b << 4); } } // FIPS 204, Algorithm 16 (`SimpleBitPack`). Specialized to bitlen(b) = 10. void scalar_encode_10(uint8_t out[320], const scalar *s) { // Every four elements lands on a byte boundary. static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); for (int i = 0; i < kDegree / 4; i++) { uint32_t a = s->c[4 * i]; uint32_t b = s->c[4 * i + 1]; uint32_t c = s->c[4 * i + 2]; uint32_t d = s->c[4 * i + 3]; declassify_assert(a < 1024); declassify_assert(b < 1024); declassify_assert(c < 1024); declassify_assert(d < 1024); out[5 * i] = (uint8_t)a; out[5 * i + 1] = (uint8_t)((a >> 8) | (b << 2)); out[5 * i + 2] = (uint8_t)((b >> 6) | (c << 4)); out[5 * i + 3] = (uint8_t)((c >> 4) | (d << 6)); out[5 * i + 4] = (uint8_t)(d >> 2); } } // FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 4 and b = 4. void scalar_encode_signed_4_4(uint8_t out[128], const scalar *s) { // Every two elements lands on a byte boundary. static_assert(kDegree % 2 == 0, "kDegree must be a multiple of 2"); for (int i = 0; i < kDegree / 2; i++) { uint32_t a = mod_sub(4, s->c[2 * i]); uint32_t b = mod_sub(4, s->c[2 * i + 1]); declassify_assert(a < 16); declassify_assert(b < 16); out[i] = a | (b << 4); } } // FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 3 and b = 2. static void scalar_encode_signed_3_2(uint8_t out[96], const scalar *s) { static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); for (int i = 0; i < kDegree / 8; i++) { uint32_t a = mod_sub(2, s->c[8 * i]); uint32_t b = mod_sub(2, s->c[8 * i + 1]); uint32_t c = mod_sub(2, s->c[8 * i + 2]); uint32_t d = mod_sub(2, s->c[8 * i + 3]); uint32_t e = mod_sub(2, s->c[8 * i + 4]); uint32_t f = mod_sub(2, s->c[8 * i + 5]); uint32_t g = mod_sub(2, s->c[8 * i + 6]); uint32_t h = mod_sub(2, s->c[8 * i + 7]); uint32_t v = (h << 21) | (g << 18) | (f << 15) | (e << 12) | (d << 9) | (c << 6) | (b << 3) | a; uint8_t v_bytes[sizeof(v)]; CRYPTO_store_u32_le(v_bytes, v); OPENSSL_memcpy(&out[i * 3], v_bytes, 3); } } // FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 13 and b = // 2^12. void scalar_encode_signed_13_12(uint8_t out[416], const scalar *s) { static const uint32_t kMax = 1u << 12; // Every two elements lands on a byte boundary. static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); for (int i = 0; i < kDegree / 8; i++) { uint32_t a = mod_sub(kMax, s->c[8 * i]); uint32_t b = mod_sub(kMax, s->c[8 * i + 1]); uint32_t c = mod_sub(kMax, s->c[8 * i + 2]); uint32_t d = mod_sub(kMax, s->c[8 * i + 3]); uint32_t e = mod_sub(kMax, s->c[8 * i + 4]); uint32_t f = mod_sub(kMax, s->c[8 * i + 5]); uint32_t g = mod_sub(kMax, s->c[8 * i + 6]); uint32_t h = mod_sub(kMax, s->c[8 * i + 7]); declassify_assert(a < (1u << 13)); declassify_assert(b < (1u << 13)); declassify_assert(c < (1u << 13)); declassify_assert(d < (1u << 13)); declassify_assert(e < (1u << 13)); declassify_assert(f < (1u << 13)); declassify_assert(g < (1u << 13)); declassify_assert(h < (1u << 13)); a |= b << 13; a |= c << 26; c >>= 6; c |= d << 7; c |= e << 20; e >>= 12; e |= f << 1; e |= g << 14; e |= h << 27; h >>= 5; OPENSSL_memcpy(&out[13 * i], &a, sizeof(a)); OPENSSL_memcpy(&out[13 * i + 4], &c, sizeof(c)); OPENSSL_memcpy(&out[13 * i + 8], &e, sizeof(e)); OPENSSL_memcpy(&out[13 * i + 12], &h, 1); } } // FIPS 204, Algorithm 17 (`BitPack`). Specialized to bitlen(a+b) = 20 and b = // 2^19. void scalar_encode_signed_20_19(uint8_t out[640], const scalar *s) { static const uint32_t kMax = 1u << 19; // Every two elements lands on a byte boundary. static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); for (int i = 0; i < kDegree / 4; i++) { uint32_t a = mod_sub(kMax, s->c[4 * i]); uint32_t b = mod_sub(kMax, s->c[4 * i + 1]); uint32_t c = mod_sub(kMax, s->c[4 * i + 2]); uint32_t d = mod_sub(kMax, s->c[4 * i + 3]); declassify_assert(a < (1u << 20)); declassify_assert(b < (1u << 20)); declassify_assert(c < (1u << 20)); declassify_assert(d < (1u << 20)); a |= b << 20; b >>= 12; b |= c << 8; b |= d << 28; d >>= 4; OPENSSL_memcpy(&out[10 * i], &a, sizeof(a)); OPENSSL_memcpy(&out[10 * i + 4], &b, sizeof(b)); OPENSSL_memcpy(&out[10 * i + 8], &d, 2); } } // FIPS 204, Algorithm 17 (`BitPack`). void scalar_encode_signed(uint8_t *out, const scalar *s, int bits, uint32_t max) { if (bits == 3) { assert(max == 2); scalar_encode_signed_3_2(out, s); } else if (bits == 4) { assert(max == 4); scalar_encode_signed_4_4(out, s); } else if (bits == 20) { assert(max == 1u << 19); scalar_encode_signed_20_19(out, s); } else { assert(bits == 13); assert(max == 1u << 12); scalar_encode_signed_13_12(out, s); } } // FIPS 204, Algorithm 18 (`SimpleBitUnpack`). Specialized for bitlen(b) == 10. void scalar_decode_10(scalar *out, const uint8_t in[320]) { uint32_t v; static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); for (int i = 0; i < kDegree / 4; i++) { OPENSSL_memcpy(&v, &in[5 * i], sizeof(v)); out->c[4 * i] = v & 0x3ff; out->c[4 * i + 1] = (v >> 10) & 0x3ff; out->c[4 * i + 2] = (v >> 20) & 0x3ff; out->c[4 * i + 3] = (v >> 30) | (((uint32_t)in[5 * i + 4]) << 2); } } // FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 4 and b = // 4. int scalar_decode_signed_4_4(scalar *out, const uint8_t in[128]) { uint32_t v; static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); for (int i = 0; i < kDegree / 8; i++) { OPENSSL_memcpy(&v, &in[4 * i], sizeof(v)); // None of the nibbles may be >= 9. So if the MSB of any nibble is set, none // of the other bits may be set. First, select all the MSBs. const uint32_t msbs = v & 0x88888888u; // For each nibble where the MSB is set, form a mask of all the other bits. const uint32_t mask = (msbs >> 1) | (msbs >> 2) | (msbs >> 3); // A nibble is only out of range in the case of invalid input, in which case // it is okay to leak the value. if (constant_time_declassify_int((mask & v) != 0)) { return 0; } out->c[i * 8] = mod_sub(4, v & 15); out->c[i * 8 + 1] = mod_sub(4, (v >> 4) & 15); out->c[i * 8 + 2] = mod_sub(4, (v >> 8) & 15); out->c[i * 8 + 3] = mod_sub(4, (v >> 12) & 15); out->c[i * 8 + 4] = mod_sub(4, (v >> 16) & 15); out->c[i * 8 + 5] = mod_sub(4, (v >> 20) & 15); out->c[i * 8 + 6] = mod_sub(4, (v >> 24) & 15); out->c[i * 8 + 7] = mod_sub(4, v >> 28); } return 1; } // FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 3 and b = // 2. static int scalar_decode_signed_3_2(scalar *out, const uint8_t in[96]) { uint32_t v; uint8_t v_bytes[sizeof(v)] = {0}; static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); for (int i = 0; i < kDegree / 8; i++) { OPENSSL_memcpy(v_bytes, &in[3 * i], 3); v = CRYPTO_load_u32_le(v_bytes); // v contains 8, 3-bit values in the lower 24 bits. None of the values may // be >= 5. So if the MSB of any triple is set, none of the other bits may // be set. First, select all the MSBs. const uint32_t msbs = v & 000044444444u; // For each triple where the MSB is set, form a mask of all the other bits. const uint32_t mask = (msbs >> 1) | (msbs >> 2); // A triple is only out of range in the case of invalid input, in which case // it is okay to leak the value. if (constant_time_declassify_int((mask & v) != 0)) { return 0; } out->c[i * 8 + 0] = mod_sub(2, (v >> 0) & 7); out->c[i * 8 + 1] = mod_sub(2, (v >> 3) & 7); out->c[i * 8 + 2] = mod_sub(2, (v >> 6) & 7); out->c[i * 8 + 3] = mod_sub(2, (v >> 9) & 7); out->c[i * 8 + 4] = mod_sub(2, (v >> 12) & 7); out->c[i * 8 + 5] = mod_sub(2, (v >> 15) & 7); out->c[i * 8 + 6] = mod_sub(2, (v >> 18) & 7); out->c[i * 8 + 7] = mod_sub(2, v >> 21); } return 1; } // FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 13 and b = // 2^12. void scalar_decode_signed_13_12(scalar *out, const uint8_t in[416]) { static const uint32_t kMax = 1u << 12; static const uint32_t k13Bits = (1u << 13) - 1; static const uint32_t k7Bits = (1u << 7) - 1; uint32_t a, b, c; uint8_t d; static_assert(kDegree % 8 == 0, "kDegree must be a multiple of 8"); for (int i = 0; i < kDegree / 8; i++) { OPENSSL_memcpy(&a, &in[13 * i], sizeof(a)); OPENSSL_memcpy(&b, &in[13 * i + 4], sizeof(b)); OPENSSL_memcpy(&c, &in[13 * i + 8], sizeof(c)); d = in[13 * i + 12]; // It's not possible for a 13-bit number to be out of range when the max is // 2^12. out->c[i * 8] = mod_sub(kMax, a & k13Bits); out->c[i * 8 + 1] = mod_sub(kMax, (a >> 13) & k13Bits); out->c[i * 8 + 2] = mod_sub(kMax, (a >> 26) | ((b & k7Bits) << 6)); out->c[i * 8 + 3] = mod_sub(kMax, (b >> 7) & k13Bits); out->c[i * 8 + 4] = mod_sub(kMax, (b >> 20) | ((c & 1) << 12)); out->c[i * 8 + 5] = mod_sub(kMax, (c >> 1) & k13Bits); out->c[i * 8 + 6] = mod_sub(kMax, (c >> 14) & k13Bits); out->c[i * 8 + 7] = mod_sub(kMax, (c >> 27) | ((uint32_t)d) << 5); } } // FIPS 204, Algorithm 19 (`BitUnpack`). Specialized to bitlen(a+b) = 20 and b = // 2^19. void scalar_decode_signed_20_19(scalar *out, const uint8_t in[640]) { static const uint32_t kMax = 1u << 19; static const uint32_t k20Bits = (1u << 20) - 1; uint32_t a, b; uint16_t c; static_assert(kDegree % 4 == 0, "kDegree must be a multiple of 4"); for (int i = 0; i < kDegree / 4; i++) { OPENSSL_memcpy(&a, &in[10 * i], sizeof(a)); OPENSSL_memcpy(&b, &in[10 * i + 4], sizeof(b)); OPENSSL_memcpy(&c, &in[10 * i + 8], sizeof(c)); // It's not possible for a 20-bit number to be out of range when the max is // 2^19. out->c[i * 4] = mod_sub(kMax, a & k20Bits); out->c[i * 4 + 1] = mod_sub(kMax, (a >> 20) | ((b & 0xff) << 12)); out->c[i * 4 + 2] = mod_sub(kMax, (b >> 8) & k20Bits); out->c[i * 4 + 3] = mod_sub(kMax, (b >> 28) | ((uint32_t)c) << 4); } } // FIPS 204, Algorithm 19 (`BitUnpack`). int scalar_decode_signed(scalar *out, const uint8_t *in, int bits, uint32_t max) { if (bits == 3) { assert(max == 2); return scalar_decode_signed_3_2(out, in); } else if (bits == 4) { assert(max == 4); return scalar_decode_signed_4_4(out, in); } else if (bits == 13) { assert(max == (1u << 12)); scalar_decode_signed_13_12(out, in); return 1; } else if (bits == 20) { assert(max == (1u << 19)); scalar_decode_signed_20_19(out, in); return 1; } else { abort(); } } /* Expansion functions */ // FIPS 204, Algorithm 30 (`RejNTTPoly`). // // Rejection samples a Keccak stream to get uniformly distributed elements. This // is used for matrix expansion and only operates on public inputs. void scalar_from_keccak_vartime(scalar *out, const uint8_t derived_seed[kRhoBytes + 2]) { struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kRhoBytes + 2); assert(keccak_ctx.squeeze_offset == 0); assert(keccak_ctx.rate_bytes == 168); static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); int done = 0; while (done < kDegree) { uint8_t block[168]; BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); for (size_t i = 0; i < sizeof(block) && done < kDegree; i += 3) { // FIPS 204, Algorithm 14 (`CoeffFromThreeBytes`). uint32_t value = (uint32_t)block[i] | ((uint32_t)block[i + 1] << 8) | (((uint32_t)block[i + 2] & 0x7f) << 16); if (value < kPrime) { out->c[done++] = value; } } } } template static bool coefficient_from_nibble(uint32_t nibble, uint32_t *result); template <> bool coefficient_from_nibble<4>(uint32_t nibble, uint32_t *result) { if (constant_time_declassify_int(nibble < 9)) { *result = mod_sub(4, nibble); return true; } return false; } template <> bool coefficient_from_nibble<2>(uint32_t nibble, uint32_t *result) { if (constant_time_declassify_int(nibble < 15)) { *result = mod_sub(2, nibble % 5); return true; } return false; } // FIPS 204, Algorithm 31 (`RejBoundedPoly`). template void scalar_uniform(scalar *out, const uint8_t derived_seed[kSigmaBytes + 2]) { struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, derived_seed, kSigmaBytes + 2); assert(keccak_ctx.squeeze_offset == 0); assert(keccak_ctx.rate_bytes == 136); int done = 0; while (done < kDegree) { uint8_t block[136]; BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); for (size_t i = 0; i < sizeof(block) && done < kDegree; ++i) { uint32_t t0 = block[i] & 0x0F; uint32_t t1 = block[i] >> 4; // FIPS 204, Algorithm 15 (`CoefFromHalfByte`). Although both the input // and output here are secret, it is OK to leak when we rejected a byte. // Individual bytes of the SHAKE-256 stream are (indistiguishable from) // independent of each other and the original seed, so leaking information // about the rejected bytes does not reveal the input or output. uint32_t v; if (coefficient_from_nibble(t0, &v)) { out->c[done++] = v; } if (done < kDegree && coefficient_from_nibble(t1, &v)) { out->c[done++] = v; } } } } // FIPS 204, Algorithm 34 (`ExpandMask`), but just a single step. void scalar_sample_mask(scalar *out, const uint8_t derived_seed[kRhoPrimeBytes + 2]) { uint8_t buf[640]; BORINGSSL_keccak(buf, sizeof(buf), derived_seed, kRhoPrimeBytes + 2, boringssl_shake256); scalar_decode_signed_20_19(out, buf); } // FIPS 204, Algorithm 29 (`SampleInBall`). void scalar_sample_in_ball_vartime(scalar *out, const uint8_t *seed, int len, int tau) { struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, seed, len); assert(keccak_ctx.squeeze_offset == 0); assert(keccak_ctx.rate_bytes == 136); uint8_t block[136]; BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); uint64_t signs = CRYPTO_load_u64_le(block); int offset = 8; // SampleInBall implements a Fisher–Yates shuffle, which unavoidably leaks // where the zeros are by memory access pattern. Although this leak happens // before bad signatures are rejected, this is safe. See // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/8d8f01ac_70af3f21/ CONSTTIME_DECLASSIFY(block + offset, sizeof(block) - offset); OPENSSL_memset(out, 0, sizeof(*out)); for (size_t i = kDegree - tau; i < kDegree; i++) { size_t byte; for (;;) { if (offset == 136) { BORINGSSL_keccak_squeeze(&keccak_ctx, block, sizeof(block)); // See above. CONSTTIME_DECLASSIFY(block, sizeof(block)); offset = 0; } byte = block[offset++]; if (byte <= i) { break; } } out->c[i] = out->c[byte]; out->c[byte] = mod_sub(1, 2 * (signs & 1)); signs >>= 1; } } // FIPS 204, Algorithm 32 (`ExpandA`). template void matrix_expand(matrix *out, const uint8_t rho[kRhoBytes]) { static_assert(K <= 0x100, "K must fit in 8 bits"); static_assert(L <= 0x100, "L must fit in 8 bits"); uint8_t derived_seed[kRhoBytes + 2]; OPENSSL_memcpy(derived_seed, rho, kRhoBytes); for (int i = 0; i < K; i++) { for (int j = 0; j < L; j++) { derived_seed[kRhoBytes + 1] = (uint8_t)i; derived_seed[kRhoBytes] = (uint8_t)j; scalar_from_keccak_vartime(&out->v[i][j], derived_seed); } } } // FIPS 204, Algorithm 33 (`ExpandS`). template void vector_expand_short(vector *s1, vector *s2, const uint8_t sigma[kSigmaBytes]) { static_assert(K <= 0x100, "K must fit in 8 bits"); static_assert(L <= 0x100, "L must fit in 8 bits"); static_assert(K + L <= 0x100, "K+L must fit in 8 bits"); uint8_t derived_seed[kSigmaBytes + 2]; OPENSSL_memcpy(derived_seed, sigma, kSigmaBytes); derived_seed[kSigmaBytes] = 0; derived_seed[kSigmaBytes + 1] = 0; for (int i = 0; i < L; i++) { scalar_uniform()>(&s1->v[i], derived_seed); ++derived_seed[kSigmaBytes]; } for (int i = 0; i < K; i++) { scalar_uniform()>(&s2->v[i], derived_seed); ++derived_seed[kSigmaBytes]; } } // FIPS 204, Algorithm 34 (`ExpandMask`). template void vector_expand_mask(vector *out, const uint8_t seed[kRhoPrimeBytes], size_t kappa) { assert(kappa + L <= 0x10000); uint8_t derived_seed[kRhoPrimeBytes + 2]; OPENSSL_memcpy(derived_seed, seed, kRhoPrimeBytes); for (int i = 0; i < L; i++) { size_t index = kappa + i; derived_seed[kRhoPrimeBytes] = index & 0xFF; derived_seed[kRhoPrimeBytes + 1] = (index >> 8) & 0xFF; scalar_sample_mask(&out->v[i], derived_seed); } } /* Encoding */ // FIPS 204, Algorithm 16 (`SimpleBitPack`). // // Encodes an entire vector into 32*K*|bits| bytes. Note that since 256 // (kDegree) is divisible by 8, the individual vector entries will always fill a // whole number of bytes, so we do not need to worry about bit packing here. template void vector_encode(uint8_t *out, const vector *a, int bits) { if (bits == 4) { for (int i = 0; i < K; i++) { scalar_encode_4(out + i * bits * kDegree / 8, &a->v[i]); } } else { assert(bits == 10); for (int i = 0; i < K; i++) { scalar_encode_10(out + i * bits * kDegree / 8, &a->v[i]); } } } // FIPS 204, Algorithm 18 (`SimpleBitUnpack`). template void vector_decode_10(vector *out, const uint8_t *in) { for (int i = 0; i < K; i++) { scalar_decode_10(&out->v[i], in + i * 10 * kDegree / 8); } } // FIPS 204, Algorithm 17 (`BitPack`). // // Encodes an entire vector into 32*L*|bits| bytes. Note that since 256 // (kDegree) is divisible by 8, the individual vector entries will always fill a // whole number of bytes, so we do not need to worry about bit packing here. template void vector_encode_signed(uint8_t *out, const vector *a, int bits, uint32_t max) { for (int i = 0; i < X; i++) { scalar_encode_signed(out + i * bits * kDegree / 8, &a->v[i], bits, max); } } template int vector_decode_signed(vector *out, const uint8_t *in, int bits, uint32_t max) { for (int i = 0; i < X; i++) { if (!scalar_decode_signed(&out->v[i], in + i * bits * kDegree / 8, bits, max)) { return 0; } } return 1; } // FIPS 204, Algorithm 28 (`w1Encode`). template void w1_encode(uint8_t out[128 * K], const vector *w1) { vector_encode(out, w1, 4); } // FIPS 204, Algorithm 20 (`HintBitPack`). template void hint_bit_pack(uint8_t out[omega() + K], const vector *h) { OPENSSL_memset(out, 0, omega() + K); int index = 0; for (int i = 0; i < K; i++) { for (int j = 0; j < kDegree; j++) { if (h->v[i].c[j]) { // h must have at most omega() non-zero coefficients. BSSL_CHECK(index < omega()); out[index++] = j; } } out[omega() + i] = index; } } // FIPS 204, Algorithm 21 (`HintBitUnpack`). template int hint_bit_unpack(vector *h, const uint8_t in[omega() + K]) { vector_zero(h); int index = 0; for (int i = 0; i < K; i++) { const int limit = in[omega() + i]; if (limit < index || limit > omega()) { return 0; } int last = -1; while (index < limit) { int byte = in[index++]; if (last >= 0 && byte <= last) { return 0; } last = byte; static_assert(kDegree == 256, "kDegree must be 256 for this write to be in bounds"); h->v[i].c[byte] = 1; } } for (; index < omega(); index++) { if (in[index] != 0) { return 0; } } return 1; } template struct public_key { uint8_t rho[kRhoBytes]; vector t1; // Pre-cached value(s). uint8_t public_key_hash[kTrBytes]; }; template struct private_key { uint8_t rho[kRhoBytes]; uint8_t k[kKBytes]; uint8_t public_key_hash[kTrBytes]; vector s1; vector s2; vector t0; }; template struct signature { uint8_t c_tilde[2 * lambda_bytes()]; vector z; vector h; }; // FIPS 204, Algorithm 22 (`pkEncode`). template int mldsa_marshal_public_key(CBB *out, const struct public_key *pub) { if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { return 0; } uint8_t *vectork_output; if (!CBB_add_space(out, &vectork_output, 320 * K)) { return 0; } vector_encode(vectork_output, &pub->t1, 10); return 1; } // FIPS 204, Algorithm 23 (`pkDecode`). template int mldsa_parse_public_key(struct public_key *pub, CBS *in) { const CBS orig_in = *in; if (!CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { return 0; } CBS t1_bytes; if (!CBS_get_bytes(in, &t1_bytes, 320 * K) || CBS_len(in) != 0) { return 0; } vector_decode_10(&pub->t1, CBS_data(&t1_bytes)); // Compute pre-cached values. BORINGSSL_keccak(pub->public_key_hash, sizeof(pub->public_key_hash), CBS_data(&orig_in), CBS_len(&orig_in), boringssl_shake256); return 1; } // FIPS 204, Algorithm 24 (`skEncode`). template int mldsa_marshal_private_key(CBB *out, const struct private_key *priv) { if (!CBB_add_bytes(out, priv->rho, sizeof(priv->rho)) || !CBB_add_bytes(out, priv->k, sizeof(priv->k)) || !CBB_add_bytes(out, priv->public_key_hash, sizeof(priv->public_key_hash))) { return 0; } constexpr size_t scalar_bytes = (kDegree * plus_minus_eta_bitlen() + 7) / 8; uint8_t *vectorl_output; if (!CBB_add_space(out, &vectorl_output, scalar_bytes * L)) { return 0; } vector_encode_signed(vectorl_output, &priv->s1, plus_minus_eta_bitlen(), eta()); uint8_t *s2_output; if (!CBB_add_space(out, &s2_output, scalar_bytes * K)) { return 0; } vector_encode_signed(s2_output, &priv->s2, plus_minus_eta_bitlen(), eta()); uint8_t *t0_output; if (!CBB_add_space(out, &t0_output, 416 * K)) { return 0; } vector_encode_signed(t0_output, &priv->t0, 13, 1 << 12); return 1; } // FIPS 204, Algorithm 25 (`skDecode`). template int mldsa_parse_private_key(struct private_key *priv, CBS *in) { CBS s1_bytes; CBS s2_bytes; CBS t0_bytes; constexpr size_t scalar_bytes = (kDegree * plus_minus_eta_bitlen() + 7) / 8; if (!CBS_copy_bytes(in, priv->rho, sizeof(priv->rho)) || !CBS_copy_bytes(in, priv->k, sizeof(priv->k)) || !CBS_copy_bytes(in, priv->public_key_hash, sizeof(priv->public_key_hash)) || !CBS_get_bytes(in, &s1_bytes, scalar_bytes * L) || !vector_decode_signed(&priv->s1, CBS_data(&s1_bytes), plus_minus_eta_bitlen(), eta()) || !CBS_get_bytes(in, &s2_bytes, scalar_bytes * K) || !vector_decode_signed(&priv->s2, CBS_data(&s2_bytes), plus_minus_eta_bitlen(), eta()) || !CBS_get_bytes(in, &t0_bytes, 416 * K) || // Note: Decoding 13 bits into (-2^12, 2^12] cannot fail. !vector_decode_signed(&priv->t0, CBS_data(&t0_bytes), 13, 1 << 12)) { return 0; } return 1; } // FIPS 204, Algorithm 26 (`sigEncode`). template int mldsa_marshal_signature(CBB *out, const struct signature *sign) { if (!CBB_add_bytes(out, sign->c_tilde, sizeof(sign->c_tilde))) { return 0; } uint8_t *vectorl_output; if (!CBB_add_space(out, &vectorl_output, 640 * L)) { return 0; } vector_encode_signed(vectorl_output, &sign->z, 20, 1 << 19); uint8_t *hint_output; if (!CBB_add_space(out, &hint_output, omega() + K)) { return 0; } hint_bit_pack(hint_output, &sign->h); return 1; } // FIPS 204, Algorithm 27 (`sigDecode`). template int mldsa_parse_signature(struct signature *sign, CBS *in) { CBS z_bytes; CBS hint_bytes; if (!CBS_copy_bytes(in, sign->c_tilde, sizeof(sign->c_tilde)) || !CBS_get_bytes(in, &z_bytes, 640 * L) || // Note: Decoding 20 bits into (-2^19, 2^19] cannot fail. !vector_decode_signed(&sign->z, CBS_data(&z_bytes), 20, 1 << 19) || !CBS_get_bytes(in, &hint_bytes, omega() + K) || !hint_bit_unpack(&sign->h, CBS_data(&hint_bytes))) { return 0; }; return 1; } template struct DeleterFree { void operator()(T *ptr) { OPENSSL_free(ptr); } }; // FIPS 204, Algorithm 6 (`ML-DSA.KeyGen_internal`). Returns 1 on success and 0 // on failure. template int mldsa_generate_key_external_entropy( uint8_t out_encoded_public_key[public_key_bytes()], struct private_key *priv, const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { // Intermediate values, allocated on the heap to allow use when there is a // limited amount of stack. struct values_st { struct public_key pub; matrix a_ntt; vector s1_ntt; vector t; }; std::unique_ptr> values( reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); if (values == NULL) { return 0; } uint8_t augmented_entropy[BCM_MLDSA_SEED_BYTES + 2]; OPENSSL_memcpy(augmented_entropy, entropy, BCM_MLDSA_SEED_BYTES); // The k and l parameters are appended to the seed. augmented_entropy[BCM_MLDSA_SEED_BYTES] = K; augmented_entropy[BCM_MLDSA_SEED_BYTES + 1] = L; uint8_t expanded_seed[kRhoBytes + kSigmaBytes + kKBytes]; BORINGSSL_keccak(expanded_seed, sizeof(expanded_seed), augmented_entropy, sizeof(augmented_entropy), boringssl_shake256); const uint8_t *const rho = expanded_seed; const uint8_t *const sigma = expanded_seed + kRhoBytes; const uint8_t *const k = expanded_seed + kRhoBytes + kSigmaBytes; // rho is public. CONSTTIME_DECLASSIFY(rho, kRhoBytes); OPENSSL_memcpy(values->pub.rho, rho, sizeof(values->pub.rho)); OPENSSL_memcpy(priv->rho, rho, sizeof(priv->rho)); OPENSSL_memcpy(priv->k, k, sizeof(priv->k)); matrix_expand(&values->a_ntt, rho); vector_expand_short(&priv->s1, &priv->s2, sigma); OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); vector_ntt(&values->s1_ntt); matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); vector_inverse_ntt(&values->t); vector_add(&values->t, &values->t, &priv->s2); vector_power2_round(&values->pub.t1, &priv->t0, &values->t); // t1 is public. CONSTTIME_DECLASSIFY(&values->pub.t1, sizeof(values->pub.t1)); CBB cbb; CBB_init_fixed(&cbb, out_encoded_public_key, public_key_bytes()); if (!mldsa_marshal_public_key(&cbb, &values->pub)) { return 0; } assert(CBB_len(&cbb) == public_key_bytes()); BORINGSSL_keccak(priv->public_key_hash, sizeof(priv->public_key_hash), out_encoded_public_key, public_key_bytes(), boringssl_shake256); return 1; } template int mldsa_public_from_private(struct public_key *pub, const struct private_key *priv) { // Intermediate values, allocated on the heap to allow use when there is a // limited amount of stack. struct values_st { matrix a_ntt; vector s1_ntt; vector t; vector t0; }; std::unique_ptr> values( reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); if (values == NULL) { return 0; } OPENSSL_memcpy(pub->rho, priv->rho, sizeof(pub->rho)); OPENSSL_memcpy(pub->public_key_hash, priv->public_key_hash, sizeof(pub->public_key_hash)); matrix_expand(&values->a_ntt, priv->rho); OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); vector_ntt(&values->s1_ntt); matrix_mult(&values->t, &values->a_ntt, &values->s1_ntt); vector_inverse_ntt(&values->t); vector_add(&values->t, &values->t, &priv->s2); vector_power2_round(&pub->t1, &values->t0, &values->t); // t1 is part of the public key and thus is public. CONSTTIME_DECLASSIFY(&pub->t1, sizeof(pub->t1)); return 1; } // FIPS 204, Algorithm 7 (`ML-DSA.Sign_internal`). Returns 1 on success and 0 // on failure. template int mldsa_sign_internal( uint8_t out_encoded_signature[signature_bytes()], const struct private_key *priv, const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len, const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { uint8_t mu[kMuBytes]; struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, priv->public_key_hash, sizeof(priv->public_key_hash)); BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); uint8_t rho_prime[kRhoPrimeBytes]; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, priv->k, sizeof(priv->k)); BORINGSSL_keccak_absorb(&keccak_ctx, randomizer, BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES); BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); BORINGSSL_keccak_squeeze(&keccak_ctx, rho_prime, kRhoPrimeBytes); // Intermediate values, allocated on the heap to allow use when there is a // limited amount of stack. struct values_st { struct signature sign; vector s1_ntt; vector s2_ntt; vector t0_ntt; matrix a_ntt; vector y; vector w; vector w1; vector cs1; vector cs2; }; std::unique_ptr> values( reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); if (values == NULL) { return 0; } OPENSSL_memcpy(&values->s1_ntt, &priv->s1, sizeof(values->s1_ntt)); vector_ntt(&values->s1_ntt); OPENSSL_memcpy(&values->s2_ntt, &priv->s2, sizeof(values->s2_ntt)); vector_ntt(&values->s2_ntt); OPENSSL_memcpy(&values->t0_ntt, &priv->t0, sizeof(values->t0_ntt)); vector_ntt(&values->t0_ntt); matrix_expand(&values->a_ntt, priv->rho); // kappa must not exceed 2**16/L = 13107. But the probability of it // exceeding even 1000 iterations is vanishingly small. for (size_t kappa = 0;; kappa += L) { vector_expand_mask(&values->y, rho_prime, kappa); vector *y_ntt = &values->cs1; OPENSSL_memcpy(y_ntt, &values->y, sizeof(*y_ntt)); vector_ntt(y_ntt); matrix_mult(&values->w, &values->a_ntt, y_ntt); vector_inverse_ntt(&values->w); vector_high_bits(&values->w1, &values->w); uint8_t w1_encoded[128 * K]; w1_encode(w1_encoded, &values->w1); BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); BORINGSSL_keccak_squeeze(&keccak_ctx, values->sign.c_tilde, 2 * lambda_bytes()); scalar c_ntt; scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, sizeof(values->sign.c_tilde), tau()); scalar_ntt(&c_ntt); vector_mult_scalar(&values->cs1, &values->s1_ntt, &c_ntt); vector_inverse_ntt(&values->cs1); vector_mult_scalar(&values->cs2, &values->s2_ntt, &c_ntt); vector_inverse_ntt(&values->cs2); vector_add(&values->sign.z, &values->y, &values->cs1); vector *r0 = &values->w1; vector_sub(r0, &values->w, &values->cs2); vector_low_bits(r0, r0); // Leaking the fact that a signature was rejected is fine as the next // attempt at a signature will be (indistinguishable from) independent of // this one. Note, however, that we additionally leak which of the two // branches rejected the signature. Section 5.5 of // https://pq-crystals.org/dilithium/data/dilithium-specification-round3.pdf // describes this leak as OK. Note we leak less than what is described by // the paper; we do not reveal which coefficient violated the bound, and // we hide which of the |z_max| or |r0_max| bound failed. See also // https://boringssl-review.googlesource.com/c/boringssl/+/67747/comment/2bbab0fa_d241d35a/ uint32_t z_max = vector_max(&values->sign.z); uint32_t r0_max = vector_max_signed(r0); if (constant_time_declassify_w( constant_time_ge_w(z_max, gamma1() - beta()) | constant_time_ge_w(r0_max, kGamma2 - beta()))) { continue; } vector *ct0 = &values->w1; vector_mult_scalar(ct0, &values->t0_ntt, &c_ntt); vector_inverse_ntt(ct0); vector_make_hint(&values->sign.h, ct0, &values->cs2, &values->w); // See above. uint32_t ct0_max = vector_max(ct0); size_t h_ones = vector_count_ones(&values->sign.h); if (constant_time_declassify_w(constant_time_ge_w(ct0_max, kGamma2) | constant_time_lt_w(omega(), h_ones))) { continue; } // Although computed with the private key, the signature is public. CONSTTIME_DECLASSIFY(values->sign.c_tilde, sizeof(values->sign.c_tilde)); CONSTTIME_DECLASSIFY(&values->sign.z, sizeof(values->sign.z)); CONSTTIME_DECLASSIFY(&values->sign.h, sizeof(values->sign.h)); CBB cbb; CBB_init_fixed(&cbb, out_encoded_signature, signature_bytes()); if (!mldsa_marshal_signature(&cbb, &values->sign)) { return 0; } BSSL_CHECK(CBB_len(&cbb) == signature_bytes()); return 1; } } // FIPS 204, Algorithm 8 (`ML-DSA.Verify_internal`). template int mldsa_verify_internal(const struct public_key *pub, const uint8_t encoded_signature[signature_bytes()], const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len) { // Intermediate values, allocated on the heap to allow use when there is a // limited amount of stack. struct values_st { struct signature sign; matrix a_ntt; vector z_ntt; vector az_ntt; vector ct1_ntt; }; std::unique_ptr> values( reinterpret_cast(OPENSSL_malloc(sizeof(values_st)))); if (values == NULL) { return 0; } CBS cbs; CBS_init(&cbs, encoded_signature, signature_bytes()); if (!mldsa_parse_signature(&values->sign, &cbs)) { return 0; } matrix_expand(&values->a_ntt, pub->rho); uint8_t mu[kMuBytes]; struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, pub->public_key_hash, sizeof(pub->public_key_hash)); BORINGSSL_keccak_absorb(&keccak_ctx, context_prefix, context_prefix_len); BORINGSSL_keccak_absorb(&keccak_ctx, context, context_len); BORINGSSL_keccak_absorb(&keccak_ctx, msg, msg_len); BORINGSSL_keccak_squeeze(&keccak_ctx, mu, kMuBytes); scalar c_ntt; scalar_sample_in_ball_vartime(&c_ntt, values->sign.c_tilde, sizeof(values->sign.c_tilde), tau()); scalar_ntt(&c_ntt); OPENSSL_memcpy(&values->z_ntt, &values->sign.z, sizeof(values->z_ntt)); vector_ntt(&values->z_ntt); matrix_mult(&values->az_ntt, &values->a_ntt, &values->z_ntt); vector_scale_power2_round(&values->ct1_ntt, &pub->t1); vector_ntt(&values->ct1_ntt); vector_mult_scalar(&values->ct1_ntt, &values->ct1_ntt, &c_ntt); vector *const w1 = &values->az_ntt; vector_sub(w1, &values->az_ntt, &values->ct1_ntt); vector_inverse_ntt(w1); vector_use_hint_vartime(w1, &values->sign.h, w1); uint8_t w1_encoded[128 * K]; w1_encode(w1_encoded, w1); uint8_t c_tilde[2 * lambda_bytes()]; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake256); BORINGSSL_keccak_absorb(&keccak_ctx, mu, kMuBytes); BORINGSSL_keccak_absorb(&keccak_ctx, w1_encoded, 128 * K); BORINGSSL_keccak_squeeze(&keccak_ctx, c_tilde, 2 * lambda_bytes()); uint32_t z_max = vector_max(&values->sign.z); return z_max < static_cast(gamma1() - beta()) && OPENSSL_memcmp(c_tilde, values->sign.c_tilde, 2 * lambda_bytes()) == 0; } struct private_key<6, 5> *private_key_from_external_65( const struct BCM_mldsa65_private_key *external) { static_assert(sizeof(struct BCM_mldsa65_private_key) == sizeof(struct private_key<6, 5>), "MLDSA65 private key size incorrect"); static_assert(alignof(struct BCM_mldsa65_private_key) == alignof(struct private_key<6, 5>), "MLDSA65 private key align incorrect"); return (struct private_key<6, 5> *)external; } struct public_key<6> * public_key_from_external_65(const struct BCM_mldsa65_public_key *external) { static_assert(sizeof(struct BCM_mldsa65_public_key) == sizeof(struct public_key<6>), "MLDSA65 public key size incorrect"); static_assert(alignof(struct BCM_mldsa65_public_key) == alignof(struct public_key<6>), "MLDSA65 public key align incorrect"); return (struct public_key<6> *)external; } struct private_key<8, 7> * private_key_from_external_87(const struct BCM_mldsa87_private_key *external) { static_assert(sizeof(struct BCM_mldsa87_private_key) == sizeof(struct private_key<8, 7>), "MLDSA87 private key size incorrect"); static_assert(alignof(struct BCM_mldsa87_private_key) == alignof(struct private_key<8, 7>), "MLDSA87 private key align incorrect"); return (struct private_key<8, 7> *)external; } struct public_key<8> * public_key_from_external_87(const struct BCM_mldsa87_public_key *external) { static_assert(sizeof(struct BCM_mldsa87_public_key) == sizeof(struct public_key<8>), "MLDSA87 public key size incorrect"); static_assert(alignof(struct BCM_mldsa87_public_key) == alignof(struct public_key<8>), "MLDSA87 public key align incorrect"); return (struct public_key<8> *)external; } } // namespace } // namespace mldsa // ML-DSA-65 specific wrappers. bcm_status BCM_mldsa65_parse_public_key( struct BCM_mldsa65_public_key *public_key, CBS *in) { return bcm_as_approved_status(mldsa_parse_public_key( mldsa::public_key_from_external_65(public_key), in)); } bcm_status BCM_mldsa65_marshal_private_key( CBB *out, const struct BCM_mldsa65_private_key *private_key) { return bcm_as_approved_status(mldsa_marshal_private_key( out, mldsa::private_key_from_external_65(private_key))); } bcm_status BCM_mldsa65_parse_private_key( struct BCM_mldsa65_private_key *private_key, CBS *in) { return bcm_as_approved_status( mldsa_parse_private_key(mldsa::private_key_from_external_65(private_key), in) && CBS_len(in) == 0); } // Calls |MLDSA_generate_key_external_entropy| with random bytes from // |BCM_rand_bytes|. bcm_status BCM_mldsa65_generate_key( uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], uint8_t out_seed[BCM_MLDSA_SEED_BYTES], struct BCM_mldsa65_private_key *out_private_key) { BCM_rand_bytes(out_seed, BCM_MLDSA_SEED_BYTES); CONSTTIME_SECRET(out_seed, BCM_MLDSA_SEED_BYTES); return BCM_mldsa65_generate_key_external_entropy(out_encoded_public_key, out_private_key, out_seed); } bcm_status BCM_mldsa65_private_key_from_seed( struct BCM_mldsa65_private_key *out_private_key, const uint8_t seed[BCM_MLDSA_SEED_BYTES]) { uint8_t public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES]; return BCM_mldsa65_generate_key_external_entropy(public_key, out_private_key, seed); } bcm_status BCM_mldsa65_generate_key_external_entropy( uint8_t out_encoded_public_key[BCM_MLDSA65_PUBLIC_KEY_BYTES], struct BCM_mldsa65_private_key *out_private_key, const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { return bcm_as_approved_status(mldsa_generate_key_external_entropy( out_encoded_public_key, mldsa::private_key_from_external_65(out_private_key), entropy)); } bcm_status BCM_mldsa65_public_from_private( struct BCM_mldsa65_public_key *out_public_key, const struct BCM_mldsa65_private_key *private_key) { return bcm_as_approved_status(mldsa_public_from_private( mldsa::public_key_from_external_65(out_public_key), mldsa::private_key_from_external_65(private_key))); } bcm_status BCM_mldsa65_sign_internal( uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len, const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { return bcm_as_approved_status(mldsa_sign_internal( out_encoded_signature, mldsa::private_key_from_external_65(private_key), msg, msg_len, context_prefix, context_prefix_len, context, context_len, randomizer)); } // ML-DSA signature in randomized mode, filling the random bytes with // |BCM_rand_bytes|. bcm_status BCM_mldsa65_sign( uint8_t out_encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const struct BCM_mldsa65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { BSSL_CHECK(context_len <= 255); uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]; BCM_rand_bytes(randomizer, sizeof(randomizer)); CONSTTIME_SECRET(randomizer, sizeof(randomizer)); const uint8_t context_prefix[2] = {0, static_cast(context_len)}; return BCM_mldsa65_sign_internal( out_encoded_signature, private_key, msg, msg_len, context_prefix, sizeof(context_prefix), context, context_len, randomizer); } // FIPS 204, Algorithm 3 (`ML-DSA.Verify`). bcm_status BCM_mldsa65_verify( const struct BCM_mldsa65_public_key *public_key, const uint8_t signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { BSSL_CHECK(context_len <= 255); const uint8_t context_prefix[2] = {0, static_cast(context_len)}; return BCM_mldsa65_verify_internal(public_key, signature, msg, msg_len, context_prefix, sizeof(context_prefix), context, context_len); } bcm_status BCM_mldsa65_verify_internal( const struct BCM_mldsa65_public_key *public_key, const uint8_t encoded_signature[BCM_MLDSA65_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len) { return bcm_as_approved_status(mldsa::mldsa_verify_internal<6, 5>( mldsa::public_key_from_external_65(public_key), encoded_signature, msg, msg_len, context_prefix, context_prefix_len, context, context_len)); } bcm_status BCM_mldsa65_marshal_public_key( CBB *out, const struct BCM_mldsa65_public_key *public_key) { return bcm_as_approved_status(mldsa_marshal_public_key( out, mldsa::public_key_from_external_65(public_key))); } // ML-DSA-87 specific wrappers. bcm_status BCM_mldsa87_parse_public_key( struct BCM_mldsa87_public_key *public_key, CBS *in) { return bcm_as_approved_status(mldsa_parse_public_key( mldsa::public_key_from_external_87(public_key), in)); } bcm_status BCM_mldsa87_marshal_private_key( CBB *out, const struct BCM_mldsa87_private_key *private_key) { return bcm_as_approved_status(mldsa_marshal_private_key( out, mldsa::private_key_from_external_87(private_key))); } bcm_status BCM_mldsa87_parse_private_key( struct BCM_mldsa87_private_key *private_key, CBS *in) { return bcm_as_approved_status( mldsa_parse_private_key(mldsa::private_key_from_external_87(private_key), in) && CBS_len(in) == 0); } // Calls |MLDSA_generate_key_external_entropy| with random bytes from // |BCM_rand_bytes|. bcm_status BCM_mldsa87_generate_key( uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], uint8_t out_seed[BCM_MLDSA_SEED_BYTES], struct BCM_mldsa87_private_key *out_private_key) { BCM_rand_bytes(out_seed, BCM_MLDSA_SEED_BYTES); return BCM_mldsa87_generate_key_external_entropy(out_encoded_public_key, out_private_key, out_seed); } bcm_status BCM_mldsa87_private_key_from_seed( struct BCM_mldsa87_private_key *out_private_key, const uint8_t seed[BCM_MLDSA_SEED_BYTES]) { uint8_t public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES]; return BCM_mldsa87_generate_key_external_entropy(public_key, out_private_key, seed); } bcm_status BCM_mldsa87_generate_key_external_entropy( uint8_t out_encoded_public_key[BCM_MLDSA87_PUBLIC_KEY_BYTES], struct BCM_mldsa87_private_key *out_private_key, const uint8_t entropy[BCM_MLDSA_SEED_BYTES]) { return bcm_as_approved_status(mldsa_generate_key_external_entropy( out_encoded_public_key, mldsa::private_key_from_external_87(out_private_key), entropy)); } bcm_status BCM_mldsa87_public_from_private( struct BCM_mldsa87_public_key *out_public_key, const struct BCM_mldsa87_private_key *private_key) { return bcm_as_approved_status(mldsa_public_from_private( mldsa::public_key_from_external_87(out_public_key), mldsa::private_key_from_external_87(private_key))); } bcm_status BCM_mldsa87_sign_internal( uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len, const uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]) { return bcm_as_approved_status(mldsa_sign_internal( out_encoded_signature, mldsa::private_key_from_external_87(private_key), msg, msg_len, context_prefix, context_prefix_len, context, context_len, randomizer)); } // ML-DSA signature in randomized mode, filling the random bytes with // |BCM_rand_bytes|. bcm_status BCM_mldsa87_sign( uint8_t out_encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const struct BCM_mldsa87_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { BSSL_CHECK(context_len <= 255); uint8_t randomizer[BCM_MLDSA_SIGNATURE_RANDOMIZER_BYTES]; BCM_rand_bytes(randomizer, sizeof(randomizer)); const uint8_t context_prefix[2] = {0, static_cast(context_len)}; return BCM_mldsa87_sign_internal( out_encoded_signature, private_key, msg, msg_len, context_prefix, sizeof(context_prefix), context, context_len, randomizer); } // FIPS 204, Algorithm 3 (`ML-DSA.Verify`). bcm_status BCM_mldsa87_verify(const struct BCM_mldsa87_public_key *public_key, const uint8_t *signature, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { BSSL_CHECK(context_len <= 255); const uint8_t context_prefix[2] = {0, static_cast(context_len)}; return BCM_mldsa87_verify_internal(public_key, signature, msg, msg_len, context_prefix, sizeof(context_prefix), context, context_len); } bcm_status BCM_mldsa87_verify_internal( const struct BCM_mldsa87_public_key *public_key, const uint8_t encoded_signature[BCM_MLDSA87_SIGNATURE_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context_prefix, size_t context_prefix_len, const uint8_t *context, size_t context_len) { return bcm_as_approved_status(mldsa::mldsa_verify_internal<8, 7>( mldsa::public_key_from_external_87(public_key), encoded_signature, msg, msg_len, context_prefix, context_prefix_len, context, context_len)); } bcm_status BCM_mldsa87_marshal_public_key( CBB *out, const struct BCM_mldsa87_public_key *public_key) { return bcm_as_approved_status(mldsa_marshal_public_key( out, mldsa::public_key_from_external_87(public_key))); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/mlkem/mlkem.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../keccak/internal.h" namespace mlkem { namespace { // See // https://csrc.nist.gov/pubs/fips/203/final static void prf(uint8_t *out, size_t out_len, const uint8_t in[33]) { BORINGSSL_keccak(out, out_len, in, 33, boringssl_shake256); } // Section 4.1 void hash_h(uint8_t out[32], const uint8_t *in, size_t len) { BORINGSSL_keccak(out, 32, in, len, boringssl_sha3_256); } void hash_g(uint8_t out[64], const uint8_t *in, size_t len) { BORINGSSL_keccak(out, 64, in, len, boringssl_sha3_512); } // This is called `J` in the spec. void kdf(uint8_t out[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t failure_secret[32], const uint8_t *ciphertext, size_t ciphertext_len) { struct BORINGSSL_keccak_st st; BORINGSSL_keccak_init(&st, boringssl_shake256); BORINGSSL_keccak_absorb(&st, failure_secret, 32); BORINGSSL_keccak_absorb(&st, ciphertext, ciphertext_len); BORINGSSL_keccak_squeeze(&st, out, BCM_MLKEM_SHARED_SECRET_BYTES); } // Constants that are common across all sizes. #define DEGREE 256 const size_t kBarrettMultiplier = 5039; const unsigned kBarrettShift = 24; static const uint16_t kPrime = 3329; const int kLog2Prime = 12; const uint16_t kHalfPrime = (/*kPrime=*/3329 - 1) / 2; // kInverseDegree is 128^-1 mod 3329; 128 because kPrime does not have a 512th // root of unity. const uint16_t kInverseDegree = 3303; // Rank-specific constants. #define RANK768 3 static const int kDU768 = 10; const int kDV768 = 4; #define RANK1024 4 static const int kDU1024 = 11; const int kDV1024 = 5; constexpr size_t encoded_vector_size(int rank) { return (kLog2Prime * DEGREE / 8) * static_cast(rank); } constexpr size_t encoded_public_key_size(int rank) { return encoded_vector_size(rank) + /*sizeof(rho)=*/32; } static_assert(encoded_public_key_size(RANK768) == BCM_MLKEM768_PUBLIC_KEY_BYTES, ""); static_assert(encoded_public_key_size(RANK1024) == BCM_MLKEM1024_PUBLIC_KEY_BYTES, ""); constexpr size_t compressed_vector_size(int rank) { // `if constexpr` isn't available in C++17. return (rank == RANK768 ? kDU768 : kDU1024) * static_cast(rank) * DEGREE / 8; } constexpr size_t ciphertext_size(int rank) { return compressed_vector_size(rank) + (rank == RANK768 ? kDV768 : kDV1024) * DEGREE / 8; } static_assert(ciphertext_size(RANK768) == BCM_MLKEM768_CIPHERTEXT_BYTES, ""); static_assert(ciphertext_size(RANK1024) == BCM_MLKEM1024_CIPHERTEXT_BYTES, ""); typedef struct scalar { // On every function entry and exit, 0 <= c < kPrime. uint16_t c[DEGREE]; } scalar; template struct vector { scalar v[RANK]; }; template struct matrix { scalar v[RANK][RANK]; }; // This bit of Python will be referenced in some of the following comments: // // p = 3329 // // def bitreverse(i): // ret = 0 // for n in range(7): // bit = i & 1 // ret <<= 1 // ret |= bit // i >>= 1 // return ret // kNTTRoots = [pow(17, bitreverse(i), p) for i in range(128)] const uint16_t kNTTRoots[128] = { 1, 1729, 2580, 3289, 2642, 630, 1897, 848, 1062, 1919, 193, 797, 2786, 3260, 569, 1746, 296, 2447, 1339, 1476, 3046, 56, 2240, 1333, 1426, 2094, 535, 2882, 2393, 2879, 1974, 821, 289, 331, 3253, 1756, 1197, 2304, 2277, 2055, 650, 1977, 2513, 632, 2865, 33, 1320, 1915, 2319, 1435, 807, 452, 1438, 2868, 1534, 2402, 2647, 2617, 1481, 648, 2474, 3110, 1227, 910, 17, 2761, 583, 2649, 1637, 723, 2288, 1100, 1409, 2662, 3281, 233, 756, 2156, 3015, 3050, 1703, 1651, 2789, 1789, 1847, 952, 1461, 2687, 939, 2308, 2437, 2388, 733, 2337, 268, 641, 1584, 2298, 2037, 3220, 375, 2549, 2090, 1645, 1063, 319, 2773, 757, 2099, 561, 2466, 2594, 2804, 1092, 403, 1026, 1143, 2150, 2775, 886, 1722, 1212, 1874, 1029, 2110, 2935, 885, 2154, }; // kInverseNTTRoots = [pow(17, -bitreverse(i), p) for i in range(128)] const uint16_t kInverseNTTRoots[128] = { 1, 1600, 40, 749, 2481, 1432, 2699, 687, 1583, 2760, 69, 543, 2532, 3136, 1410, 2267, 2508, 1355, 450, 936, 447, 2794, 1235, 1903, 1996, 1089, 3273, 283, 1853, 1990, 882, 3033, 2419, 2102, 219, 855, 2681, 1848, 712, 682, 927, 1795, 461, 1891, 2877, 2522, 1894, 1010, 1414, 2009, 3296, 464, 2697, 816, 1352, 2679, 1274, 1052, 1025, 2132, 1573, 76, 2998, 3040, 1175, 2444, 394, 1219, 2300, 1455, 2117, 1607, 2443, 554, 1179, 2186, 2303, 2926, 2237, 525, 735, 863, 2768, 1230, 2572, 556, 3010, 2266, 1684, 1239, 780, 2954, 109, 1292, 1031, 1745, 2688, 3061, 992, 2596, 941, 892, 1021, 2390, 642, 1868, 2377, 1482, 1540, 540, 1678, 1626, 279, 314, 1173, 2573, 3096, 48, 667, 1920, 2229, 1041, 2606, 1692, 680, 2746, 568, 3312, }; // kModRoots = [pow(17, 2*bitreverse(i) + 1, p) for i in range(128)] const uint16_t kModRoots[128] = { 17, 3312, 2761, 568, 583, 2746, 2649, 680, 1637, 1692, 723, 2606, 2288, 1041, 1100, 2229, 1409, 1920, 2662, 667, 3281, 48, 233, 3096, 756, 2573, 2156, 1173, 3015, 314, 3050, 279, 1703, 1626, 1651, 1678, 2789, 540, 1789, 1540, 1847, 1482, 952, 2377, 1461, 1868, 2687, 642, 939, 2390, 2308, 1021, 2437, 892, 2388, 941, 733, 2596, 2337, 992, 268, 3061, 641, 2688, 1584, 1745, 2298, 1031, 2037, 1292, 3220, 109, 375, 2954, 2549, 780, 2090, 1239, 1645, 1684, 1063, 2266, 319, 3010, 2773, 556, 757, 2572, 2099, 1230, 561, 2768, 2466, 863, 2594, 735, 2804, 525, 1092, 2237, 403, 2926, 1026, 2303, 1143, 2186, 2150, 1179, 2775, 554, 886, 2443, 1722, 1607, 1212, 2117, 1874, 1455, 1029, 2300, 2110, 1219, 2935, 394, 885, 2444, 2154, 1175, }; // reduce_once reduces 0 <= x < 2*kPrime, mod kPrime. uint16_t reduce_once(uint16_t x) { declassify_assert(x < 2 * kPrime); const uint16_t subtracted = x - kPrime; uint16_t mask = 0u - (subtracted >> 15); // Although this is a constant-time select, we omit a value barrier here. // Value barriers impede auto-vectorization (likely because it forces the // value to transit through a general-purpose register). On AArch64, this is a // difference of 2x. // // We usually add value barriers to selects because Clang turns consecutive // selects with the same condition into a branch instead of CMOV/CSEL. This // condition does not occur in ML-KEM, so omitting it seems to be safe so far, // but see |scalar_centered_binomial_distribution_eta_2_with_prf|. return (mask & x) | (~mask & subtracted); } // constant time reduce x mod kPrime using Barrett reduction. x must be less // than kPrime + 2×kPrime². static uint16_t reduce(uint32_t x) { declassify_assert(x < kPrime + 2u * kPrime * kPrime); uint64_t product = (uint64_t)x * kBarrettMultiplier; uint32_t quotient = (uint32_t)(product >> kBarrettShift); uint32_t remainder = x - quotient * kPrime; return reduce_once(remainder); } void scalar_zero(scalar *out) { OPENSSL_memset(out, 0, sizeof(*out)); } template void vector_zero(vector *out) { OPENSSL_memset(out->v, 0, sizeof(scalar) * RANK); } // In place number theoretic transform of a given scalar. // Note that MLKEM's kPrime 3329 does not have a 512th root of unity, so this // transform leaves off the last iteration of the usual FFT code, with the 128 // relevant roots of unity being stored in |kNTTRoots|. This means the output // should be seen as 128 elements in GF(3329^2), with the coefficients of the // elements being consecutive entries in |s->c|. static void scalar_ntt(scalar *s) { int offset = DEGREE; // `int` is used here because using `size_t` throughout caused a ~5% slowdown // with Clang 14 on Aarch64. for (int step = 1; step < DEGREE / 2; step <<= 1) { offset >>= 1; int k = 0; for (int i = 0; i < step; i++) { const uint32_t step_root = kNTTRoots[i + step]; for (int j = k; j < k + offset; j++) { uint16_t odd = reduce(step_root * s->c[j + offset]); uint16_t even = s->c[j]; s->c[j] = reduce_once(odd + even); s->c[j + offset] = reduce_once(even - odd + kPrime); } k += 2 * offset; } } } template static void vector_ntt(vector *a) { for (int i = 0; i < RANK; i++) { scalar_ntt(&a->v[i]); } } // In place inverse number theoretic transform of a given scalar, with pairs of // entries of s->v being interpreted as elements of GF(3329^2). Just as with the // number theoretic transform, this leaves off the first step of the normal iFFT // to account for the fact that 3329 does not have a 512th root of unity, using // the precomputed 128 roots of unity stored in |kInverseNTTRoots|. void scalar_inverse_ntt(scalar *s) { int step = DEGREE / 2; // `int` is used here because using `size_t` throughout caused a ~5% slowdown // with Clang 14 on Aarch64. for (int offset = 2; offset < DEGREE; offset <<= 1) { step >>= 1; int k = 0; for (int i = 0; i < step; i++) { uint32_t step_root = kInverseNTTRoots[i + step]; for (int j = k; j < k + offset; j++) { uint16_t odd = s->c[j + offset]; uint16_t even = s->c[j]; s->c[j] = reduce_once(odd + even); s->c[j + offset] = reduce(step_root * (even - odd + kPrime)); } k += 2 * offset; } } for (int i = 0; i < DEGREE; i++) { s->c[i] = reduce(s->c[i] * kInverseDegree); } } template void vector_inverse_ntt(vector *a) { for (int i = 0; i < RANK; i++) { scalar_inverse_ntt(&a->v[i]); } } void scalar_add(scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE; i++) { lhs->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); } } void scalar_sub(scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE; i++) { lhs->c[i] = reduce_once(lhs->c[i] - rhs->c[i] + kPrime); } } // Multiplying two scalars in the number theoretically transformed state. Since // 3329 does not have a 512th root of unity, this means we have to interpret // the 2*ith and (2*i+1)th entries of the scalar as elements of GF(3329)[X]/(X^2 // - 17^(2*bitreverse(i)+1)) The value of 17^(2*bitreverse(i)+1) mod 3329 is // stored in the precomputed |kModRoots| table. Note that our Barrett transform // only allows us to multipy two reduced numbers together, so we need some // intermediate reduction steps, even if an uint64_t could hold 3 multiplied // numbers. void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE / 2; i++) { uint32_t real_real = (uint32_t)lhs->c[2 * i] * rhs->c[2 * i]; uint32_t img_img = (uint32_t)lhs->c[2 * i + 1] * rhs->c[2 * i + 1]; uint32_t real_img = (uint32_t)lhs->c[2 * i] * rhs->c[2 * i + 1]; uint32_t img_real = (uint32_t)lhs->c[2 * i + 1] * rhs->c[2 * i]; out->c[2 * i] = reduce(real_real + (uint32_t)reduce(img_img) * kModRoots[i]); out->c[2 * i + 1] = reduce(img_real + real_img); } } template void vector_add(vector *lhs, const vector *rhs) { for (int i = 0; i < RANK; i++) { scalar_add(&lhs->v[i], &rhs->v[i]); } } template static void matrix_mult(vector *out, const matrix *m, const vector *a) { vector_zero(out); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { scalar product; scalar_mult(&product, &m->v[i][j], &a->v[j]); scalar_add(&out->v[i], &product); } } } template void matrix_mult_transpose(vector *out, const matrix *m, const vector *a) { vector_zero(out); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { scalar product; scalar_mult(&product, &m->v[j][i], &a->v[j]); scalar_add(&out->v[i], &product); } } } template void scalar_inner_product(scalar *out, const vector *lhs, const vector *rhs) { scalar_zero(out); for (int i = 0; i < RANK; i++) { scalar product; scalar_mult(&product, &lhs->v[i], &rhs->v[i]); scalar_add(out, &product); } } // Algorithm 6 from the spec. Rejection samples a Keccak stream to get // uniformly distributed elements. This is used for matrix expansion and only // operates on public inputs. static void scalar_from_keccak_vartime(scalar *out, struct BORINGSSL_keccak_st *keccak_ctx) { assert(keccak_ctx->squeeze_offset == 0); assert(keccak_ctx->rate_bytes == 168); static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); int done = 0; while (done < DEGREE) { uint8_t block[168]; BORINGSSL_keccak_squeeze(keccak_ctx, block, sizeof(block)); for (size_t i = 0; i < sizeof(block) && done < DEGREE; i += 3) { uint16_t d1 = block[i] + 256 * (block[i + 1] % 16); uint16_t d2 = block[i + 1] / 16 + 16 * block[i + 2]; if (d1 < kPrime) { out->c[done++] = d1; } if (d2 < kPrime && done < DEGREE) { out->c[done++] = d2; } } } } // Algorithm 7 from the spec, with eta fixed to two and the PRF call // included. Creates binominally distributed elements by sampling 2*|eta| bits, // and setting the coefficient to the count of the first bits minus the count of // the second bits, resulting in a centered binomial distribution. Since eta is // two this gives -2/2 with a probability of 1/16, -1/1 with probability 1/4, // and 0 with probability 3/8. void scalar_centered_binomial_distribution_eta_2_with_prf( scalar *out, const uint8_t input[33]) { uint8_t entropy[128]; static_assert(sizeof(entropy) == 2 * /*kEta=*/2 * DEGREE / 8, ""); prf(entropy, sizeof(entropy), input); for (int i = 0; i < DEGREE; i += 2) { uint8_t byte = entropy[i / 2]; uint16_t value = (byte & 1) + ((byte >> 1) & 1); value -= ((byte >> 2) & 1) + ((byte >> 3) & 1); // Add |kPrime| if |value| underflowed. See |reduce_once| for a discussion // on why the value barrier is omitted. While this could have been written // reduce_once(value + kPrime), this is one extra addition and small range // of |value| tempts some versions of Clang to emit a branch. uint16_t mask = 0u - (value >> 15); out->c[i] = ((value + kPrime) & mask) | (value & ~mask); byte >>= 4; value = (byte & 1) + ((byte >> 1) & 1); value -= ((byte >> 2) & 1) + ((byte >> 3) & 1); // See above. mask = 0u - (value >> 15); out->c[i + 1] = ((value + kPrime) & mask) | (value & ~mask); } } // Generates a secret vector by using // |scalar_centered_binomial_distribution_eta_2_with_prf|, using the given seed // appending and incrementing |counter| for entry of the vector. template void vector_generate_secret_eta_2(vector *out, uint8_t *counter, const uint8_t seed[32]) { uint8_t input[33]; OPENSSL_memcpy(input, seed, 32); for (int i = 0; i < RANK; i++) { input[32] = (*counter)++; scalar_centered_binomial_distribution_eta_2_with_prf(&out->v[i], input); } } // Expands the matrix of a seed for key generation and for encaps-CPA. template void matrix_expand(matrix *out, const uint8_t rho[32]) { uint8_t input[34]; OPENSSL_memcpy(input, rho, 32); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { input[32] = i; input[33] = j; struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); BORINGSSL_keccak_absorb(&keccak_ctx, input, sizeof(input)); scalar_from_keccak_vartime(&out->v[i][j], &keccak_ctx); } } } const uint8_t kMasks[8] = {0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}; void scalar_encode(uint8_t *out, const scalar *s, int bits) { assert(bits <= (int)sizeof(*s->c) * 8 && bits != 1); uint8_t out_byte = 0; int out_byte_bits = 0; for (int i = 0; i < DEGREE; i++) { uint16_t element = s->c[i]; int element_bits_done = 0; while (element_bits_done < bits) { int chunk_bits = bits - element_bits_done; int out_bits_remaining = 8 - out_byte_bits; if (chunk_bits >= out_bits_remaining) { chunk_bits = out_bits_remaining; out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; *out = out_byte; out++; out_byte_bits = 0; out_byte = 0; } else { out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; out_byte_bits += chunk_bits; } element_bits_done += chunk_bits; element >>= chunk_bits; } } if (out_byte_bits > 0) { *out = out_byte; } } // scalar_encode_1 is |scalar_encode| specialised for |bits| == 1. void scalar_encode_1(uint8_t out[32], const scalar *s) { for (int i = 0; i < DEGREE; i += 8) { uint8_t out_byte = 0; for (int j = 0; j < 8; j++) { out_byte |= (s->c[i + j] & 1) << j; } *out = out_byte; out++; } } // Encodes an entire vector into 32*|RANK|*|bits| bytes. Note that since 256 // (DEGREE) is divisible by 8, the individual vector entries will always fill a // whole number of bytes, so we do not need to worry about bit packing here. template void vector_encode(uint8_t *out, const vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_encode(out + i * bits * DEGREE / 8, &a->v[i], bits); } } // scalar_decode parses |DEGREE * bits| bits from |in| into |DEGREE| values in // |out|. It returns one on success and zero if any parsed value is >= // |kPrime|. int scalar_decode(scalar *out, const uint8_t *in, int bits) { assert(bits <= (int)sizeof(*out->c) * 8 && bits != 1); uint8_t in_byte = 0; int in_byte_bits_left = 0; for (int i = 0; i < DEGREE; i++) { uint16_t element = 0; int element_bits_done = 0; while (element_bits_done < bits) { if (in_byte_bits_left == 0) { in_byte = *in; in++; in_byte_bits_left = 8; } int chunk_bits = bits - element_bits_done; if (chunk_bits > in_byte_bits_left) { chunk_bits = in_byte_bits_left; } element |= (in_byte & kMasks[chunk_bits - 1]) << element_bits_done; in_byte_bits_left -= chunk_bits; in_byte >>= chunk_bits; element_bits_done += chunk_bits; } // An element is only out of range in the case of invalid input, in which // case it is okay to leak the comparison. if (constant_time_declassify_int(element >= kPrime)) { return 0; } out->c[i] = element; } return 1; } // scalar_decode_1 is |scalar_decode| specialised for |bits| == 1. void scalar_decode_1(scalar *out, const uint8_t in[32]) { for (int i = 0; i < DEGREE; i += 8) { uint8_t in_byte = *in; in++; for (int j = 0; j < 8; j++) { out->c[i + j] = in_byte & 1; in_byte >>= 1; } } } // Decodes 32*|RANK|*|bits| bytes from |in| into |out|. It returns one on // success or zero if any parsed value is >= |kPrime|. template static int vector_decode(vector *out, const uint8_t *in, int bits) { for (int i = 0; i < RANK; i++) { if (!scalar_decode(&out->v[i], in + i * bits * DEGREE / 8, bits)) { return 0; } } return 1; } // Compresses (lossily) an input |x| mod 3329 into |bits| many bits by grouping // numbers close to each other together. The formula used is // round(2^|bits|/kPrime*x) mod 2^|bits|. // Uses Barrett reduction to achieve constant time. Since we need both the // remainder (for rounding) and the quotient (as the result), we cannot use // |reduce| here, but need to do the Barrett reduction directly. static uint16_t compress(uint16_t x, int bits) { uint32_t shifted = (uint32_t)x << bits; uint64_t product = (uint64_t)shifted * kBarrettMultiplier; uint32_t quotient = (uint32_t)(product >> kBarrettShift); uint32_t remainder = shifted - quotient * kPrime; // Adjust the quotient to round correctly: // 0 <= remainder <= kHalfPrime round to 0 // kHalfPrime < remainder <= kPrime + kHalfPrime round to 1 // kPrime + kHalfPrime < remainder < 2 * kPrime round to 2 declassify_assert(remainder < 2u * kPrime); quotient += 1 & constant_time_lt_w(kHalfPrime, remainder); quotient += 1 & constant_time_lt_w(kPrime + kHalfPrime, remainder); return quotient & ((1 << bits) - 1); } // Decompresses |x| by using an equi-distant representative. The formula is // round(kPrime/2^|bits|*x). Note that 2^|bits| being the divisor allows us to // implement this logic using only bit operations. uint16_t decompress(uint16_t x, int bits) { uint32_t product = (uint32_t)x * kPrime; uint32_t power = 1 << bits; // This is |product| % power, since |power| is a power of 2. uint32_t remainder = product & (power - 1); // This is |product| / power, since |power| is a power of 2. uint32_t lower = product >> bits; // The rounding logic works since the first half of numbers mod |power| have a // 0 as first bit, and the second half has a 1 as first bit, since |power| is // a power of 2. As a 12 bit number, |remainder| is always positive, so we // will shift in 0s for a right shift. return lower + (remainder >> (bits - 1)); } static void scalar_compress(scalar *s, int bits) { for (int i = 0; i < DEGREE; i++) { s->c[i] = compress(s->c[i], bits); } } static void scalar_decompress(scalar *s, int bits) { for (int i = 0; i < DEGREE; i++) { s->c[i] = decompress(s->c[i], bits); } } template void vector_compress(vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_compress(&a->v[i], bits); } } template void vector_decompress(vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_decompress(&a->v[i], bits); } } template struct public_key { vector t; uint8_t rho[32]; uint8_t public_key_hash[32]; matrix m; }; template struct private_key { struct public_key pub; vector s; uint8_t fo_failure_secret[32]; }; template static void decrypt_cpa( uint8_t out[32], const struct private_key *priv, const uint8_t ciphertext[BCM_MLKEM768_CIPHERTEXT_BYTES]) { constexpr int du = RANK == RANK768 ? kDU768 : kDU1024; constexpr int dv = RANK == RANK768 ? kDV768 : kDV1024; vector u; vector_decode(&u, ciphertext, du); vector_decompress(&u, du); vector_ntt(&u); scalar v; scalar_decode(&v, ciphertext + compressed_vector_size(RANK), dv); scalar_decompress(&v, dv); scalar mask; scalar_inner_product(&mask, &priv->s, &u); scalar_inverse_ntt(&mask); scalar_sub(&v, &mask); scalar_compress(&v, 1); scalar_encode_1(out, &v); } template static bcm_status mlkem_marshal_public_key(CBB *out, const struct public_key *pub) { uint8_t *vector_output; if (!CBB_add_space(out, &vector_output, encoded_vector_size(RANK))) { return bcm_status::failure; } vector_encode(vector_output, &pub->t, kLog2Prime); if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { return bcm_status::failure; } return bcm_status::approved; } template void mlkem_generate_key_external_seed( uint8_t *out_encoded_public_key, private_key *priv, const uint8_t seed[BCM_MLKEM_SEED_BYTES]) { uint8_t augmented_seed[33]; OPENSSL_memcpy(augmented_seed, seed, 32); augmented_seed[32] = RANK; uint8_t hashed[64]; hash_g(hashed, augmented_seed, sizeof(augmented_seed)); const uint8_t *const rho = hashed; const uint8_t *const sigma = hashed + 32; // rho is public. CONSTTIME_DECLASSIFY(rho, 32); OPENSSL_memcpy(priv->pub.rho, hashed, sizeof(priv->pub.rho)); matrix_expand(&priv->pub.m, rho); uint8_t counter = 0; vector_generate_secret_eta_2(&priv->s, &counter, sigma); vector_ntt(&priv->s); vector error; vector_generate_secret_eta_2(&error, &counter, sigma); vector_ntt(&error); matrix_mult_transpose(&priv->pub.t, &priv->pub.m, &priv->s); vector_add(&priv->pub.t, &error); // t is part of the public key and thus is public. CONSTTIME_DECLASSIFY(&priv->pub.t, sizeof(priv->pub.t)); CBB cbb; CBB_init_fixed(&cbb, out_encoded_public_key, encoded_public_key_size(RANK)); if (!bcm_success(mlkem_marshal_public_key(&cbb, &priv->pub))) { abort(); } hash_h(priv->pub.public_key_hash, out_encoded_public_key, encoded_public_key_size(RANK)); OPENSSL_memcpy(priv->fo_failure_secret, seed + 32, 32); } // Encrypts a message with given randomness to // the ciphertext in |out|. Without applying the Fujisaki-Okamoto transform this // would not result in a CCA secure scheme, since lattice schemes are vulnerable // to decryption failure oracles. template void encrypt_cpa(uint8_t *out, const struct mlkem::public_key *pub, const uint8_t message[32], const uint8_t randomness[32]) { constexpr int du = RANK == RANK768 ? mlkem::kDU768 : mlkem::kDU1024; constexpr int dv = RANK == RANK768 ? mlkem::kDV768 : mlkem::kDV1024; uint8_t counter = 0; mlkem::vector secret; vector_generate_secret_eta_2(&secret, &counter, randomness); vector_ntt(&secret); mlkem::vector error; vector_generate_secret_eta_2(&error, &counter, randomness); uint8_t input[33]; OPENSSL_memcpy(input, randomness, 32); input[32] = counter; mlkem::scalar scalar_error; scalar_centered_binomial_distribution_eta_2_with_prf(&scalar_error, input); mlkem::vector u; matrix_mult(&u, &pub->m, &secret); vector_inverse_ntt(&u); vector_add(&u, &error); mlkem::scalar v; scalar_inner_product(&v, &pub->t, &secret); scalar_inverse_ntt(&v); scalar_add(&v, &scalar_error); mlkem::scalar expanded_message; scalar_decode_1(&expanded_message, message); scalar_decompress(&expanded_message, 1); scalar_add(&v, &expanded_message); vector_compress(&u, du); vector_encode(out, &u, du); scalar_compress(&v, dv); scalar_encode(out + mlkem::compressed_vector_size(RANK), &v, dv); } // See section 6.3 template void mlkem_decap(uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, const struct private_key *priv) { uint8_t decrypted[64]; decrypt_cpa(decrypted, priv, ciphertext); OPENSSL_memcpy(decrypted + 32, priv->pub.public_key_hash, sizeof(decrypted) - 32); uint8_t key_and_randomness[64]; hash_g(key_and_randomness, decrypted, sizeof(decrypted)); constexpr size_t ciphertext_len = ciphertext_size(RANK); uint8_t expected_ciphertext[BCM_MLKEM1024_CIPHERTEXT_BYTES]; static_assert(ciphertext_len <= sizeof(expected_ciphertext), ""); encrypt_cpa(expected_ciphertext, &priv->pub, decrypted, key_and_randomness + 32); uint8_t failure_key[32]; kdf(failure_key, priv->fo_failure_secret, ciphertext, ciphertext_len); uint8_t mask = constant_time_eq_int_8( CRYPTO_memcmp(ciphertext, expected_ciphertext, ciphertext_len), 0); for (int i = 0; i < BCM_MLKEM_SHARED_SECRET_BYTES; i++) { out_shared_secret[i] = constant_time_select_8(mask, key_and_randomness[i], failure_key[i]); } } // mlkem_parse_public_key_no_hash parses |in| into |pub| but doesn't calculate // the value of |pub->public_key_hash|. template int mlkem_parse_public_key_no_hash(struct public_key *pub, CBS *in) { CBS t_bytes; if (!CBS_get_bytes(in, &t_bytes, encoded_vector_size(RANK)) || !vector_decode(&pub->t, CBS_data(&t_bytes), kLog2Prime) || !CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { return 0; } matrix_expand(&pub->m, pub->rho); return 1; } template int mlkem_parse_public_key(struct public_key *pub, CBS *in) { CBS orig_in = *in; if (!mlkem_parse_public_key_no_hash(pub, in) || // CBS_len(in) != 0) { return 0; } hash_h(pub->public_key_hash, CBS_data(&orig_in), CBS_len(&orig_in)); return 1; } template int mlkem_parse_private_key(struct private_key *priv, CBS *in) { CBS s_bytes; if (!CBS_get_bytes(in, &s_bytes, encoded_vector_size(RANK)) || !vector_decode(&priv->s, CBS_data(&s_bytes), kLog2Prime) || !mlkem_parse_public_key_no_hash(&priv->pub, in) || !CBS_copy_bytes(in, priv->pub.public_key_hash, sizeof(priv->pub.public_key_hash)) || !CBS_copy_bytes(in, priv->fo_failure_secret, sizeof(priv->fo_failure_secret)) || CBS_len(in) != 0) { return 0; } return 1; } template int mlkem_marshal_private_key(CBB *out, const struct private_key *priv) { uint8_t *s_output; if (!CBB_add_space(out, &s_output, encoded_vector_size(RANK))) { return 0; } vector_encode(s_output, &priv->s, kLog2Prime); if (!bcm_success(mlkem_marshal_public_key(out, &priv->pub)) || !CBB_add_bytes(out, priv->pub.public_key_hash, sizeof(priv->pub.public_key_hash)) || !CBB_add_bytes(out, priv->fo_failure_secret, sizeof(priv->fo_failure_secret))) { return 0; } return 1; } struct public_key *public_key_768_from_external( const struct BCM_mlkem768_public_key *external) { static_assert(sizeof(struct BCM_mlkem768_public_key) >= sizeof(struct public_key), "MLKEM public key is too small"); static_assert(alignof(struct BCM_mlkem768_public_key) >= alignof(struct public_key), "MLKEM public key alignment incorrect"); return (struct public_key *)external; } static struct public_key * public_key_1024_from_external(const struct BCM_mlkem1024_public_key *external) { static_assert(sizeof(struct BCM_mlkem1024_public_key) >= sizeof(struct public_key), "MLKEM1024 public key is too small"); static_assert(alignof(struct BCM_mlkem1024_public_key) >= alignof(struct public_key), "MLKEM1024 public key alignment incorrect"); return (struct public_key *)external; } struct private_key * private_key_768_from_external(const struct BCM_mlkem768_private_key *external) { static_assert(sizeof(struct BCM_mlkem768_private_key) >= sizeof(struct private_key), "MLKEM private key too small"); static_assert(alignof(struct BCM_mlkem768_private_key) >= alignof(struct private_key), "MLKEM private key alignment incorrect"); return (struct private_key *)external; } struct private_key * private_key_1024_from_external( const struct BCM_mlkem1024_private_key *external) { static_assert(sizeof(struct BCM_mlkem1024_private_key) >= sizeof(struct private_key), "MLKEM1024 private key too small"); static_assert(alignof(struct BCM_mlkem1024_private_key) >= alignof(struct private_key), "MLKEM1024 private key alignment incorrect"); return (struct private_key *)external; } } // namespace } // namespace mlkem bcm_infallible BCM_mlkem768_generate_key( uint8_t out_encoded_public_key[BCM_MLKEM768_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[BCM_MLKEM_SEED_BYTES], struct BCM_mlkem768_private_key *out_private_key) { uint8_t seed[BCM_MLKEM_SEED_BYTES]; BCM_rand_bytes(seed, sizeof(seed)); CONSTTIME_SECRET(seed, sizeof(seed)); if (optional_out_seed) { OPENSSL_memcpy(optional_out_seed, seed, sizeof(seed)); } BCM_mlkem768_generate_key_external_seed(out_encoded_public_key, out_private_key, seed); return bcm_infallible::approved; } bcm_status BCM_mlkem768_private_key_from_seed( struct BCM_mlkem768_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { if (seed_len != BCM_MLKEM_SEED_BYTES) { return bcm_status::failure; } uint8_t public_key_bytes[BCM_MLKEM768_PUBLIC_KEY_BYTES]; BCM_mlkem768_generate_key_external_seed(public_key_bytes, out_private_key, seed); return bcm_status::approved; } bcm_infallible BCM_mlkem1024_generate_key( uint8_t out_encoded_public_key[BCM_MLKEM1024_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[BCM_MLKEM_SEED_BYTES], struct BCM_mlkem1024_private_key *out_private_key) { uint8_t seed[BCM_MLKEM_SEED_BYTES]; BCM_rand_bytes(seed, sizeof(seed)); CONSTTIME_SECRET(seed, sizeof(seed)); if (optional_out_seed) { OPENSSL_memcpy(optional_out_seed, seed, sizeof(seed)); } BCM_mlkem1024_generate_key_external_seed(out_encoded_public_key, out_private_key, seed); return bcm_infallible::approved; } bcm_status BCM_mlkem1024_private_key_from_seed( struct BCM_mlkem1024_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { if (seed_len != BCM_MLKEM_SEED_BYTES) { return bcm_status::failure; } uint8_t public_key_bytes[BCM_MLKEM1024_PUBLIC_KEY_BYTES]; BCM_mlkem1024_generate_key_external_seed(public_key_bytes, out_private_key, seed); return bcm_status::approved; } bcm_infallible BCM_mlkem768_generate_key_external_seed( uint8_t out_encoded_public_key[BCM_MLKEM768_PUBLIC_KEY_BYTES], struct BCM_mlkem768_private_key *out_private_key, const uint8_t seed[BCM_MLKEM_SEED_BYTES]) { mlkem::private_key *priv = mlkem::private_key_768_from_external(out_private_key); mlkem_generate_key_external_seed(out_encoded_public_key, priv, seed); return bcm_infallible::approved; } bcm_infallible BCM_mlkem1024_generate_key_external_seed( uint8_t out_encoded_public_key[BCM_MLKEM1024_PUBLIC_KEY_BYTES], struct BCM_mlkem1024_private_key *out_private_key, const uint8_t seed[BCM_MLKEM_SEED_BYTES]) { mlkem::private_key *priv = mlkem::private_key_1024_from_external(out_private_key); mlkem_generate_key_external_seed(out_encoded_public_key, priv, seed); return bcm_infallible::approved; } bcm_infallible BCM_mlkem768_public_from_private( struct BCM_mlkem768_public_key *out_public_key, const struct BCM_mlkem768_private_key *private_key) { struct mlkem::public_key *const pub = mlkem::public_key_768_from_external(out_public_key); const struct mlkem::private_key *const priv = mlkem::private_key_768_from_external(private_key); *pub = priv->pub; return bcm_infallible::approved; } bcm_infallible BCM_mlkem1024_public_from_private( struct BCM_mlkem1024_public_key *out_public_key, const struct BCM_mlkem1024_private_key *private_key) { struct mlkem::public_key *const pub = mlkem::public_key_1024_from_external(out_public_key); const struct mlkem::private_key *const priv = mlkem::private_key_1024_from_external(private_key); *pub = priv->pub; return bcm_infallible::approved; } // Calls |MLKEM768_encap_external_entropy| with random bytes from // |BCM_rand_bytes| bcm_infallible BCM_mlkem768_encap( uint8_t out_ciphertext[BCM_MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem768_public_key *public_key) { uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]; BCM_rand_bytes(entropy, BCM_MLKEM_ENCAP_ENTROPY); CONSTTIME_SECRET(entropy, BCM_MLKEM_ENCAP_ENTROPY); BCM_mlkem768_encap_external_entropy(out_ciphertext, out_shared_secret, public_key, entropy); return bcm_infallible::approved; } bcm_infallible BCM_mlkem1024_encap( uint8_t out_ciphertext[BCM_MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem1024_public_key *public_key) { uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]; BCM_rand_bytes(entropy, BCM_MLKEM_ENCAP_ENTROPY); CONSTTIME_SECRET(entropy, BCM_MLKEM_ENCAP_ENTROPY); BCM_mlkem1024_encap_external_entropy(out_ciphertext, out_shared_secret, public_key, entropy); return bcm_infallible::approved; } // See section 6.2. template void mlkem_encap_external_entropy( uint8_t *out_ciphertext, uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct mlkem::public_key *pub, const uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]) { uint8_t input[64]; OPENSSL_memcpy(input, entropy, BCM_MLKEM_ENCAP_ENTROPY); OPENSSL_memcpy(input + BCM_MLKEM_ENCAP_ENTROPY, pub->public_key_hash, sizeof(input) - BCM_MLKEM_ENCAP_ENTROPY); uint8_t key_and_randomness[64]; mlkem::hash_g(key_and_randomness, input, sizeof(input)); encrypt_cpa(out_ciphertext, pub, entropy, key_and_randomness + 32); // The ciphertext is public. CONSTTIME_DECLASSIFY(out_ciphertext, mlkem::ciphertext_size(RANK)); static_assert(BCM_MLKEM_SHARED_SECRET_BYTES == 32, ""); memcpy(out_shared_secret, key_and_randomness, 32); } bcm_infallible BCM_mlkem768_encap_external_entropy( uint8_t out_ciphertext[BCM_MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem768_public_key *public_key, const uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]) { const struct mlkem::public_key *pub = mlkem::public_key_768_from_external(public_key); mlkem_encap_external_entropy(out_ciphertext, out_shared_secret, pub, entropy); return bcm_infallible::approved; } bcm_infallible BCM_mlkem1024_encap_external_entropy( uint8_t out_ciphertext[BCM_MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const struct BCM_mlkem1024_public_key *public_key, const uint8_t entropy[BCM_MLKEM_ENCAP_ENTROPY]) { const struct mlkem::public_key *pub = mlkem::public_key_1024_from_external(public_key); mlkem_encap_external_entropy(out_ciphertext, out_shared_secret, pub, entropy); return bcm_infallible::approved; } bcm_status BCM_mlkem768_decap( uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct BCM_mlkem768_private_key *private_key) { if (ciphertext_len != BCM_MLKEM768_CIPHERTEXT_BYTES) { BCM_rand_bytes(out_shared_secret, BCM_MLKEM_SHARED_SECRET_BYTES); return bcm_status::failure; } const struct mlkem::private_key *priv = mlkem::private_key_768_from_external(private_key); mlkem_decap(out_shared_secret, ciphertext, priv); return bcm_status::approved; } bcm_status BCM_mlkem1024_decap( uint8_t out_shared_secret[BCM_MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct BCM_mlkem1024_private_key *private_key) { if (ciphertext_len != BCM_MLKEM1024_CIPHERTEXT_BYTES) { BCM_rand_bytes(out_shared_secret, BCM_MLKEM_SHARED_SECRET_BYTES); return bcm_status::failure; } const struct mlkem::private_key *priv = mlkem::private_key_1024_from_external(private_key); mlkem_decap(out_shared_secret, ciphertext, priv); return bcm_status::approved; } bcm_status BCM_mlkem768_marshal_public_key( CBB *out, const struct BCM_mlkem768_public_key *public_key) { return mlkem_marshal_public_key( out, mlkem::public_key_768_from_external(public_key)); } bcm_status BCM_mlkem1024_marshal_public_key( CBB *out, const struct BCM_mlkem1024_public_key *public_key) { return mlkem_marshal_public_key( out, mlkem::public_key_1024_from_external(public_key)); } bcm_status BCM_mlkem768_parse_public_key( struct BCM_mlkem768_public_key *public_key, CBS *in) { struct mlkem::public_key *pub = mlkem::public_key_768_from_external(public_key); if (!mlkem_parse_public_key(pub, in)) { return bcm_status::failure; } return bcm_status::approved; } bcm_status BCM_mlkem1024_parse_public_key( struct BCM_mlkem1024_public_key *public_key, CBS *in) { struct mlkem::public_key *pub = mlkem::public_key_1024_from_external(public_key); if (!mlkem_parse_public_key(pub, in)) { return bcm_status::failure; } return bcm_status::approved; } bcm_status BCM_mlkem768_marshal_private_key( CBB *out, const struct BCM_mlkem768_private_key *private_key) { const struct mlkem::private_key *const priv = mlkem::private_key_768_from_external(private_key); if (!mlkem_marshal_private_key(out, priv)) { return bcm_status::failure; } return bcm_status::approved; } bcm_status BCM_mlkem1024_marshal_private_key( CBB *out, const struct BCM_mlkem1024_private_key *private_key) { const struct mlkem::private_key *const priv = mlkem::private_key_1024_from_external(private_key); if (!mlkem_marshal_private_key(out, priv)) { return bcm_status::failure; } return bcm_status::approved; } bcm_status BCM_mlkem768_parse_private_key( struct BCM_mlkem768_private_key *out_private_key, CBS *in) { struct mlkem::private_key *const priv = mlkem::private_key_768_from_external(out_private_key); if (!mlkem_parse_private_key(priv, in)) { return bcm_status::failure; } return bcm_status::approved; } bcm_status BCM_mlkem1024_parse_private_key( struct BCM_mlkem1024_private_key *out_private_key, CBS *in) { struct mlkem::private_key *const priv = mlkem::private_key_1024_from_external(out_private_key); if (!mlkem_parse_private_key(priv, in)) { return bcm_status::failure; } return bcm_status::approved; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rand/ctrdrbg.cc.inc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../aes/internal.h" #include "../service_indicator/internal.h" #include "internal.h" // Section references in this file refer to SP 800-90Ar1: // http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf // See table 3. static const uint64_t kMaxReseedCount = UINT64_C(1) << 48; CTR_DRBG_STATE *CTR_DRBG_new(const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len) { CTR_DRBG_STATE *drbg = reinterpret_cast( OPENSSL_malloc(sizeof(CTR_DRBG_STATE))); if (drbg == NULL || !CTR_DRBG_init(drbg, entropy, personalization, personalization_len)) { CTR_DRBG_free(drbg); return NULL; } return drbg; } void CTR_DRBG_free(CTR_DRBG_STATE *state) { OPENSSL_free(state); } int CTR_DRBG_init(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len) { // Section 10.2.1.3.1 if (personalization_len > CTR_DRBG_ENTROPY_LEN) { return 0; } uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; OPENSSL_memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); for (size_t i = 0; i < personalization_len; i++) { seed_material[i] ^= personalization[i]; } // Section 10.2.1.2 // kInitMask is the result of encrypting blocks with big-endian value 1, 2 // and 3 with the all-zero AES-256 key. static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, }; for (size_t i = 0; i < sizeof(kInitMask); i++) { seed_material[i] ^= kInitMask[i]; } drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, seed_material, 32); OPENSSL_memcpy(drbg->counter, seed_material + 32, 16); drbg->reseed_counter = 1; return 1; } static_assert(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0, "not a multiple of AES block size"); // ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a // big-endian number. static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { uint32_t ctr = CRYPTO_load_u32_be(drbg->counter + 12); CRYPTO_store_u32_be(drbg->counter + 12, ctr + n); } static int ctr_drbg_update(CTR_DRBG_STATE *drbg, const uint8_t *data, size_t data_len) { // Per section 10.2.1.2, |data_len| must be |CTR_DRBG_ENTROPY_LEN|. Here, we // allow shorter inputs and right-pad them with zeros. This is equivalent to // the specified algorithm but saves a copy in |CTR_DRBG_generate|. if (data_len > CTR_DRBG_ENTROPY_LEN) { return 0; } uint8_t temp[CTR_DRBG_ENTROPY_LEN]; for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { ctr32_add(drbg, 1); drbg->block(drbg->counter, temp + i, &drbg->ks); } for (size_t i = 0; i < data_len; i++) { temp[i] ^= data[i]; } drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, temp, 32); OPENSSL_memcpy(drbg->counter, temp + 32, 16); return 1; } int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *additional_data, size_t additional_data_len) { // Section 10.2.1.4 uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; if (additional_data_len > 0) { if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { return 0; } OPENSSL_memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); for (size_t i = 0; i < additional_data_len; i++) { entropy_copy[i] ^= additional_data[i]; } entropy = entropy_copy; } if (!ctr_drbg_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { return 0; } drbg->reseed_counter = 1; return 1; } int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, const uint8_t *additional_data, size_t additional_data_len) { // See 9.3.1 if (out_len > CTR_DRBG_MAX_GENERATE_LENGTH) { return 0; } // See 10.2.1.5.1 if (drbg->reseed_counter > kMaxReseedCount) { return 0; } if (additional_data_len != 0 && !ctr_drbg_update(drbg, additional_data, additional_data_len)) { return 0; } // kChunkSize is used to interact better with the cache. Since the AES-CTR // code assumes that it's encrypting rather than just writing keystream, the // buffer has to be zeroed first. Without chunking, large reads would zero // the whole buffer, flushing the L1 cache, and then do another pass (missing // the cache every time) to “encrypt” it. The code can avoid this by // chunking. static const size_t kChunkSize = 8 * 1024; while (out_len >= AES_BLOCK_SIZE) { size_t todo = kChunkSize; if (todo > out_len) { todo = out_len; } todo &= ~(AES_BLOCK_SIZE - 1); const size_t num_blocks = todo / AES_BLOCK_SIZE; OPENSSL_memset(out, 0, todo); ctr32_add(drbg, 1); drbg->ctr(out, out, num_blocks, &drbg->ks, drbg->counter); ctr32_add(drbg, (uint32_t)(num_blocks - 1)); out += todo; out_len -= todo; } if (out_len > 0) { uint8_t block[AES_BLOCK_SIZE]; ctr32_add(drbg, 1); drbg->block(drbg->counter, block, &drbg->ks); OPENSSL_memcpy(out, block, out_len); } // Right-padding |additional_data| in step 2.2 is handled implicitly by // |ctr_drbg_update|, to save a copy. if (!ctr_drbg_update(drbg, additional_data, additional_data_len)) { return 0; } drbg->reseed_counter++; FIPS_service_indicator_update_state(); return 1; } void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { OPENSSL_cleanse(drbg, sizeof(CTR_DRBG_STATE)); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rand/internal.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H #include #include #include "../../bcm_support.h" #include "../aes/internal.h" #if defined(__cplusplus) extern "C" { #endif // rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has // been enabled via |RAND_enable_fork_unsafe_buffering|. int rand_fork_unsafe_buffering_enabled(void); // CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP // 800-90Ar1. struct ctr_drbg_state_st { AES_KEY ks; block128_f block; ctr128_f ctr; uint8_t counter[16]; uint64_t reseed_counter; }; // CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of // entropy in |entropy| and, optionally, a personalization string up to // |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero // on error. OPENSSL_EXPORT int CTR_DRBG_init(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len); #if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) inline int have_rdrand(void) { return CRYPTO_is_RDRAND_capable(); } // have_fast_rdrand returns true if RDRAND is supported and it's reasonably // fast. Concretely the latter is defined by whether the chip is Intel (fast) or // not (assumed slow). inline int have_fast_rdrand(void) { return CRYPTO_is_RDRAND_capable() && CRYPTO_is_intel_cpu(); } // CRYPTO_rdrand writes eight bytes of random data from the hardware RNG to // |out|. It returns one on success or zero on hardware failure. int CRYPTO_rdrand(uint8_t out[8]); // CRYPTO_rdrand_multiple8_buf fills |len| bytes at |buf| with random data from // the hardware RNG. The |len| argument must be a multiple of eight. It returns // one on success and zero on hardware failure. int CRYPTO_rdrand_multiple8_buf(uint8_t *buf, size_t len); #else // OPENSSL_X86_64 && !OPENSSL_NO_ASM inline int have_rdrand(void) { return 0; } inline int have_fast_rdrand(void) { return 0; } #endif // OPENSSL_X86_64 && !OPENSSL_NO_ASM #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rand/rand.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #if defined(BORINGSSL_FIPS) #include #endif #include #include #include #include "../../bcm_support.h" #include "../bcm_interface.h" #include "../delocate.h" #include "internal.h" // It's assumed that the operating system always has an unfailing source of // entropy which is accessed via |CRYPTO_sysrand[_for_seed]|. (If the operating // system entropy source fails, it's up to |CRYPTO_sysrand| to abort the // process—we don't try to handle it.) // // In addition, the hardware may provide a low-latency RNG. Intel's rdrand // instruction is the canonical example of this. When a hardware RNG is // available we don't need to worry about an RNG failure arising from fork()ing // the process or moving a VM, so we can keep thread-local RNG state and use it // as an additional-data input to CTR-DRBG. // // (We assume that the OS entropy is safe from fork()ing and VM duplication. // This might be a bit of a leap of faith, esp on Windows, but there's nothing // that we can do about it.) // kReseedInterval is the number of generate calls made to CTR-DRBG before // reseeding. static const unsigned kReseedInterval = 4096; // CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the // continuous random number generator test in FIPS 140-2, section 4.9.2. #define CRNGT_BLOCK_SIZE 16 namespace { // rand_thread_state contains the per-thread state for the RNG. struct rand_thread_state { CTR_DRBG_STATE drbg; uint64_t fork_generation; // calls is the number of generate calls made on |drbg| since it was last // (re)seeded. This is bound by |kReseedInterval|. unsigned calls; // last_block_valid is non-zero iff |last_block| contains data from // |get_seed_entropy|. int last_block_valid; // fork_unsafe_buffering is non-zero iff, when |drbg| was last (re)seeded, // fork-unsafe buffering was enabled. int fork_unsafe_buffering; #if defined(BORINGSSL_FIPS) // last_block contains the previous block from |get_seed_entropy|. uint8_t last_block[CRNGT_BLOCK_SIZE]; // next and prev form a NULL-terminated, double-linked list of all states in // a process. struct rand_thread_state *next, *prev; // clear_drbg_lock synchronizes between uses of |drbg| and // |rand_thread_state_clear_all| clearing it. This lock should be uncontended // in the common case, except on shutdown. CRYPTO_MUTEX clear_drbg_lock; #endif }; } // namespace #if defined(BORINGSSL_FIPS) // thread_states_list is the head of a linked-list of all |rand_thread_state| // objects in the process, one per thread. This is needed because FIPS requires // that they be zeroed on process exit, but thread-local destructors aren't // called when the whole process is exiting. DEFINE_BSS_GET(struct rand_thread_state *, thread_states_list, nullptr) DEFINE_STATIC_MUTEX(thread_states_list_lock) static void rand_thread_state_clear_all(void) __attribute__((destructor)); static void rand_thread_state_clear_all(void) { CRYPTO_MUTEX_lock_write(thread_states_list_lock_bss_get()); for (struct rand_thread_state *cur = *thread_states_list_bss_get(); cur != NULL; cur = cur->next) { CRYPTO_MUTEX_lock_write(&cur->clear_drbg_lock); CTR_DRBG_clear(&cur->drbg); } // The locks are deliberately left locked so that any threads that are still // running will hang if they try to call |BCM_rand_bytes|. It also ensures // |rand_thread_state_free| cannot free any thread state while we've taken the // lock. } #endif // rand_thread_state_free frees a |rand_thread_state|. This is called when a // thread exits. static void rand_thread_state_free(void *state_in) { struct rand_thread_state *state = reinterpret_cast(state_in); if (state_in == NULL) { return; } #if defined(BORINGSSL_FIPS) CRYPTO_MUTEX_lock_write(thread_states_list_lock_bss_get()); if (state->prev != NULL) { state->prev->next = state->next; } else if (*thread_states_list_bss_get() == state) { // |state->prev| may be NULL either if it is the head of the list, // or if |state| is freed before it was added to the list at all. // Compare against the head of the list to distinguish these cases. *thread_states_list_bss_get() = state->next; } if (state->next != NULL) { state->next->prev = state->prev; } CRYPTO_MUTEX_unlock_write(thread_states_list_lock_bss_get()); CTR_DRBG_clear(&state->drbg); #endif OPENSSL_free(state); } #if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) // rdrand should only be called if either |have_rdrand| or |have_fast_rdrand| // returned true. static int rdrand(uint8_t *buf, const size_t len) { const size_t len_multiple8 = len & ~7; if (!CRYPTO_rdrand_multiple8_buf(buf, len_multiple8)) { return 0; } const size_t remainder = len - len_multiple8; if (remainder != 0) { assert(remainder < 8); uint8_t rand_buf[8]; if (!CRYPTO_rdrand(rand_buf)) { return 0; } OPENSSL_memcpy(buf + len_multiple8, rand_buf, remainder); } return 1; } #else static int rdrand(uint8_t *buf, size_t len) { return 0; } #endif bcm_status BCM_rand_bytes_hwrng(uint8_t *buf, const size_t len) { if (!have_rdrand()) { return bcm_status::failure; } if (rdrand(buf, len)) { return bcm_status::not_approved; } return bcm_status::failure; } #if defined(BORINGSSL_FIPS) // In passive entropy mode, entropy is supplied from outside of the module via // |BCM_rand_load_entropy| and is stored in global instance of the following // structure. struct entropy_buffer { // bytes contains entropy suitable for seeding a DRBG. uint8_t bytes[CRNGT_BLOCK_SIZE + CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD]; // bytes_valid indicates the number of bytes of |bytes| that contain valid // data. size_t bytes_valid; // want_additional_input is true if any of the contents of |bytes| were // obtained via a method other than from the kernel. In these cases entropy // from the kernel is also provided via an additional input to the DRBG. int want_additional_input; }; DEFINE_BSS_GET(struct entropy_buffer, entropy_buffer, {}) DEFINE_STATIC_MUTEX(entropy_buffer_lock) bcm_infallible BCM_rand_load_entropy(const uint8_t *entropy, size_t entropy_len, int want_additional_input) { struct entropy_buffer *const buffer = entropy_buffer_bss_get(); CRYPTO_MUTEX_lock_write(entropy_buffer_lock_bss_get()); const size_t space = sizeof(buffer->bytes) - buffer->bytes_valid; if (entropy_len > space) { entropy_len = space; } OPENSSL_memcpy(&buffer->bytes[buffer->bytes_valid], entropy, entropy_len); buffer->bytes_valid += entropy_len; buffer->want_additional_input |= want_additional_input && (entropy_len != 0); CRYPTO_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); return bcm_infallible::not_approved; } // get_seed_entropy fills |out_entropy_len| bytes of |out_entropy| from the // global |entropy_buffer|. static void get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len, int *out_want_additional_input) { struct entropy_buffer *const buffer = entropy_buffer_bss_get(); if (out_entropy_len > sizeof(buffer->bytes)) { abort(); } CRYPTO_MUTEX_lock_write(entropy_buffer_lock_bss_get()); while (buffer->bytes_valid < out_entropy_len) { CRYPTO_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); RAND_need_entropy(out_entropy_len - buffer->bytes_valid); CRYPTO_MUTEX_lock_write(entropy_buffer_lock_bss_get()); } *out_want_additional_input = buffer->want_additional_input; OPENSSL_memcpy(out_entropy, buffer->bytes, out_entropy_len); OPENSSL_memmove(buffer->bytes, &buffer->bytes[out_entropy_len], buffer->bytes_valid - out_entropy_len); buffer->bytes_valid -= out_entropy_len; if (buffer->bytes_valid == 0) { buffer->want_additional_input = 0; } CRYPTO_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); } // rand_get_seed fills |seed| with entropy. In some cases, it will additionally // fill |additional_input| with entropy to supplement |seed|. It sets // |*out_additional_input_len| to the number of extra bytes. static void rand_get_seed(struct rand_thread_state *state, uint8_t seed[CTR_DRBG_ENTROPY_LEN], uint8_t additional_input[CTR_DRBG_ENTROPY_LEN], size_t *out_additional_input_len) { uint8_t entropy_bytes[sizeof(state->last_block) + CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD]; uint8_t *entropy = entropy_bytes; size_t entropy_len = sizeof(entropy_bytes); if (state->last_block_valid) { // No need to fill |state->last_block| with entropy from the read. entropy += sizeof(state->last_block); entropy_len -= sizeof(state->last_block); } int want_additional_input; get_seed_entropy(entropy, entropy_len, &want_additional_input); if (!state->last_block_valid) { OPENSSL_memcpy(state->last_block, entropy, sizeof(state->last_block)); entropy += sizeof(state->last_block); entropy_len -= sizeof(state->last_block); } // See FIPS 140-2, section 4.9.2. This is the “continuous random number // generator test” which causes the program to randomly abort. Hopefully the // rate of failure is small enough not to be a problem in practice. if (CRYPTO_memcmp(state->last_block, entropy, sizeof(state->last_block)) == 0) { fprintf(CRYPTO_get_stderr(), "CRNGT failed.\n"); BORINGSSL_FIPS_abort(); } assert(entropy_len % CRNGT_BLOCK_SIZE == 0); for (size_t i = CRNGT_BLOCK_SIZE; i < entropy_len; i += CRNGT_BLOCK_SIZE) { if (CRYPTO_memcmp(entropy + i - CRNGT_BLOCK_SIZE, entropy + i, CRNGT_BLOCK_SIZE) == 0) { fprintf(CRYPTO_get_stderr(), "CRNGT failed.\n"); BORINGSSL_FIPS_abort(); } } OPENSSL_memcpy(state->last_block, entropy + entropy_len - CRNGT_BLOCK_SIZE, CRNGT_BLOCK_SIZE); assert(entropy_len == BORINGSSL_FIPS_OVERREAD * CTR_DRBG_ENTROPY_LEN); OPENSSL_memcpy(seed, entropy, CTR_DRBG_ENTROPY_LEN); for (size_t i = 1; i < BORINGSSL_FIPS_OVERREAD; i++) { for (size_t j = 0; j < CTR_DRBG_ENTROPY_LEN; j++) { seed[j] ^= entropy[CTR_DRBG_ENTROPY_LEN * i + j]; } } // If we used something other than system entropy then also // opportunistically read from the system. This avoids solely relying on the // hardware once the entropy pool has been initialized. *out_additional_input_len = 0; if (want_additional_input && CRYPTO_sysrand_if_available(additional_input, CTR_DRBG_ENTROPY_LEN)) { *out_additional_input_len = CTR_DRBG_ENTROPY_LEN; } } #else // rand_get_seed fills |seed| with entropy. In some cases, it will additionally // fill |additional_input| with entropy to supplement |seed|. It sets // |*out_additional_input_len| to the number of extra bytes. static void rand_get_seed(struct rand_thread_state *state, uint8_t seed[CTR_DRBG_ENTROPY_LEN], uint8_t additional_input[CTR_DRBG_ENTROPY_LEN], size_t *out_additional_input_len) { // If not in FIPS mode, we don't overread from the system entropy source and // we don't depend only on the hardware RDRAND. CRYPTO_sysrand_for_seed(seed, CTR_DRBG_ENTROPY_LEN); *out_additional_input_len = 0; } #endif bcm_infallible BCM_rand_bytes_with_additional_data( uint8_t *out, size_t out_len, const uint8_t user_additional_data[32]) { if (out_len == 0) { return bcm_infallible::approved; } const uint64_t fork_generation = CRYPTO_get_fork_generation(); const int fork_unsafe_buffering = rand_fork_unsafe_buffering_enabled(); // Additional data is mixed into every CTR-DRBG call to protect, as best we // can, against forks & VM clones. We do not over-read this information and // don't reseed with it so, from the point of view of FIPS, this doesn't // provide “prediction resistance”. But, in practice, it does. uint8_t additional_data[32]; // Intel chips have fast RDRAND instructions while, in other cases, RDRAND can // be _slower_ than a system call. if (!have_fast_rdrand() || !rdrand(additional_data, sizeof(additional_data))) { // Without a hardware RNG to save us from address-space duplication, the OS // entropy is used. This can be expensive (one read per |RAND_bytes| call) // and so is disabled when we have fork detection, or if the application has // promised not to fork. if (fork_generation != 0 || fork_unsafe_buffering) { OPENSSL_memset(additional_data, 0, sizeof(additional_data)); } else if (!have_rdrand()) { // No alternative so block for OS entropy. CRYPTO_sysrand(additional_data, sizeof(additional_data)); } else if (!CRYPTO_sysrand_if_available(additional_data, sizeof(additional_data)) && !rdrand(additional_data, sizeof(additional_data))) { // RDRAND failed: block for OS entropy. CRYPTO_sysrand(additional_data, sizeof(additional_data)); } } for (size_t i = 0; i < sizeof(additional_data); i++) { additional_data[i] ^= user_additional_data[i]; } struct rand_thread_state stack_state; struct rand_thread_state *state = reinterpret_cast( CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_RAND)); if (state == NULL) { state = reinterpret_cast( OPENSSL_zalloc(sizeof(struct rand_thread_state))); if (state == NULL || !CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_RAND, state, rand_thread_state_free)) { // If the system is out of memory, use an ephemeral state on the // stack. state = &stack_state; } state->last_block_valid = 0; uint8_t seed[CTR_DRBG_ENTROPY_LEN]; uint8_t personalization[CTR_DRBG_ENTROPY_LEN] = {0}; size_t personalization_len = 0; rand_get_seed(state, seed, personalization, &personalization_len); if (!CTR_DRBG_init(&state->drbg, seed, personalization, personalization_len)) { abort(); } state->calls = 0; state->fork_generation = fork_generation; state->fork_unsafe_buffering = fork_unsafe_buffering; #if defined(BORINGSSL_FIPS) CRYPTO_MUTEX_init(&state->clear_drbg_lock); if (state != &stack_state) { CRYPTO_MUTEX_lock_write(thread_states_list_lock_bss_get()); struct rand_thread_state **states_list = thread_states_list_bss_get(); state->next = *states_list; if (state->next != NULL) { state->next->prev = state; } state->prev = NULL; *states_list = state; CRYPTO_MUTEX_unlock_write(thread_states_list_lock_bss_get()); } #endif } if (state->calls >= kReseedInterval || // If we've forked since |state| was last seeded, reseed. state->fork_generation != fork_generation || // If |state| was seeded from a state with different fork-safety // preferences, reseed. Suppose |state| was fork-safe, then forked into // two children, but each of the children never fork and disable fork // safety. The children must reseed to avoid working from the same PRNG // state. state->fork_unsafe_buffering != fork_unsafe_buffering) { uint8_t seed[CTR_DRBG_ENTROPY_LEN]; uint8_t reseed_additional_data[CTR_DRBG_ENTROPY_LEN] = {0}; size_t reseed_additional_data_len = 0; rand_get_seed(state, seed, reseed_additional_data, &reseed_additional_data_len); #if defined(BORINGSSL_FIPS) // Take a read lock around accesses to |state->drbg|. This is needed to // avoid returning bad entropy if we race with // |rand_thread_state_clear_all|. CRYPTO_MUTEX_lock_read(&state->clear_drbg_lock); #endif if (!CTR_DRBG_reseed(&state->drbg, seed, reseed_additional_data, reseed_additional_data_len)) { abort(); } state->calls = 0; state->fork_generation = fork_generation; state->fork_unsafe_buffering = fork_unsafe_buffering; } else { #if defined(BORINGSSL_FIPS) CRYPTO_MUTEX_lock_read(&state->clear_drbg_lock); #endif } int first_call = 1; while (out_len > 0) { size_t todo = out_len; if (todo > CTR_DRBG_MAX_GENERATE_LENGTH) { todo = CTR_DRBG_MAX_GENERATE_LENGTH; } if (!CTR_DRBG_generate(&state->drbg, out, todo, additional_data, first_call ? sizeof(additional_data) : 0)) { abort(); } out += todo; out_len -= todo; // Though we only check before entering the loop, this cannot add enough to // overflow a |size_t|. state->calls++; first_call = 0; } if (state == &stack_state) { CTR_DRBG_clear(&state->drbg); } #if defined(BORINGSSL_FIPS) CRYPTO_MUTEX_unlock_read(&state->clear_drbg_lock); #endif return bcm_infallible::approved; } bcm_infallible BCM_rand_bytes(uint8_t *out, size_t out_len) { static const uint8_t kZeroAdditionalData[32] = {0}; BCM_rand_bytes_with_additional_data(out, out_len, kZeroAdditionalData); return bcm_infallible::approved; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rsa/blinding.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../../internal.h" #include "internal.h" #define BN_BLINDING_COUNTER 32 struct bn_blinding_st { BIGNUM *A; // The base blinding factor, Montgomery-encoded. BIGNUM *Ai; // The inverse of the blinding factor, Montgomery-encoded. unsigned counter; }; static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx); BN_BLINDING *BN_BLINDING_new(void) { BN_BLINDING *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(BN_BLINDING))); if (ret == NULL) { return NULL; } ret->A = BN_new(); if (ret->A == NULL) { goto err; } ret->Ai = BN_new(); if (ret->Ai == NULL) { goto err; } // The blinding values need to be created before this blinding can be used. ret->counter = BN_BLINDING_COUNTER - 1; return ret; err: BN_BLINDING_free(ret); return NULL; } void BN_BLINDING_free(BN_BLINDING *r) { if (r == nullptr) { return; } BN_free(r->A); BN_free(r->Ai); OPENSSL_free(r); } void BN_BLINDING_invalidate(BN_BLINDING *b) { b->counter = BN_BLINDING_COUNTER - 1; } static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { if (++b->counter == BN_BLINDING_COUNTER) { // re-create blinding parameters if (!bn_blinding_create_param(b, e, mont, ctx)) { goto err; } b->counter = 0; } else { if (!BN_mod_mul_montgomery(b->A, b->A, b->A, mont, ctx) || !BN_mod_mul_montgomery(b->Ai, b->Ai, b->Ai, mont, ctx)) { goto err; } } return 1; err: // |A| and |Ai| may be in an inconsistent state so they both need to be // replaced the next time this blinding is used. Note that this is only // sufficient because support for |BN_BLINDING_NO_UPDATE| and // |BN_BLINDING_NO_RECREATE| was previously dropped. b->counter = BN_BLINDING_COUNTER - 1; return 0; } int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| // cancels one Montgomery factor, so the resulting value of |n| is unencoded. if (!bn_blinding_update(b, e, mont, ctx) || !BN_mod_mul_montgomery(n, n, b->A, mont, ctx)) { return 0; } return 1; } int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont, BN_CTX *ctx) { // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| // cancels one Montgomery factor, so the resulting value of |n| is unencoded. return BN_mod_mul_montgomery(n, n, b->Ai, mont, ctx); } static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { int no_inverse; if (!BN_rand_range_ex(b->A, 1, &mont->N) || // Compute |b->A|^-1 in Montgomery form. Note |BN_from_montgomery| + // |BN_mod_inverse_blinded| is equivalent to, but more efficient than, // |BN_mod_inverse_blinded| + |BN_to_montgomery|. // // We do not retry if |b->A| has no inverse. Finding a non-invertible // value of |b->A| is equivalent to factoring |mont->N|. There is // negligible probability of stumbling on one at random. !BN_from_montgomery(b->Ai, b->A, mont, ctx) || !BN_mod_inverse_blinded(b->Ai, &no_inverse, b->Ai, mont, ctx) || // TODO(davidben): |BN_mod_exp_mont| internally computes the result in // Montgomery form. Save a pair of Montgomery reductions and a // multiplication by returning that value directly. !BN_mod_exp_mont(b->A, b->A, e, &mont->N, ctx, mont) || !BN_to_montgomery(b->A, b->A, mont, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rsa/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_RSA_INTERNAL_H #define OPENSSL_HEADER_RSA_INTERNAL_H #include #include #include #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif typedef struct bn_blinding_st BN_BLINDING; struct rsa_st { RSA_METHOD *meth; BIGNUM *n; BIGNUM *e; BIGNUM *d; BIGNUM *p; BIGNUM *q; BIGNUM *dmp1; BIGNUM *dmq1; BIGNUM *iqmp; // be careful using this if the RSA structure is shared CRYPTO_EX_DATA ex_data; CRYPTO_refcount_t references; int flags; CRYPTO_MUTEX lock; // Used to cache montgomery values. The creation of these values is protected // by |lock|. BN_MONT_CTX *mont_n; BN_MONT_CTX *mont_p; BN_MONT_CTX *mont_q; // The following fields are copies of |d|, |dmp1|, and |dmq1|, respectively, // but with the correct widths to prevent side channels. These must use // separate copies due to threading concerns caused by OpenSSL's API // mistakes. See https://github.com/openssl/openssl/issues/5158 and // the |freeze_private_key| implementation. BIGNUM *d_fixed, *dmp1_fixed, *dmq1_fixed; // iqmp_mont is q^-1 mod p in Montgomery form, using |mont_p|. BIGNUM *iqmp_mont; // num_blindings contains the size of the |blindings| and |blindings_inuse| // arrays. This member and the |blindings_inuse| array are protected by // |lock|. size_t num_blindings; // blindings is an array of BN_BLINDING structures that can be reserved by a // thread by locking |lock| and changing the corresponding element in // |blindings_inuse| from 0 to 1. BN_BLINDING **blindings; unsigned char *blindings_inuse; uint64_t blinding_fork_generation; // private_key_frozen is one if the key has been used for a private key // operation and may no longer be mutated. unsigned private_key_frozen:1; }; #define RSA_PKCS1_PADDING_SIZE 11 // Default implementations of RSA operations. const RSA_METHOD *RSA_default_method(void); int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); BN_BLINDING *BN_BLINDING_new(void); void BN_BLINDING_free(BN_BLINDING *b); void BN_BLINDING_invalidate(BN_BLINDING *b); int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont_ctx, BN_CTX *ctx); int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont_ctx, BN_CTX *ctx); int PKCS1_MGF1(uint8_t *out, size_t len, const uint8_t *seed, size_t seed_len, const EVP_MD *md); int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len); int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len); int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len); // rsa_check_public_key checks that |rsa|'s public modulus and exponent are // within DoS bounds. int rsa_check_public_key(const RSA *rsa); // rsa_private_transform_no_self_test calls either the method-specific // |private_transform| function (if given) or the generic one. See the comment // for |private_transform| in |rsa_meth_st|. int rsa_private_transform_no_self_test(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); // rsa_private_transform acts the same as |rsa_private_transform_no_self_test| // but, in FIPS mode, performs an RSA self test before calling the default RSA // implementation. int rsa_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); // rsa_invalidate_key is called after |rsa| has been mutated, to invalidate // fields derived from the original structure. This function assumes exclusive // access to |rsa|. In particular, no other thread may be concurrently signing, // etc., with |rsa|. void rsa_invalidate_key(RSA *rsa); // This constant is exported for test purposes. extern const BN_ULONG kBoringSSLRSASqrtTwo[]; extern const size_t kBoringSSLRSASqrtTwoLen; // Functions that avoid self-tests. // // Self-tests need to call functions that don't try and ensure that the // self-tests have passed. These functions, in turn, need to limit themselves // to such functions too. // // These functions are the same as their public versions, but skip the self-test // check. int rsa_verify_no_self_test(int hash_nid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, RSA *rsa); int rsa_verify_raw_no_self_test(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); int rsa_sign_no_self_test(int hash_nid, const uint8_t *digest, size_t digest_len, uint8_t *out, unsigned *out_len, RSA *rsa); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_RSA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rsa/padding.cc.inc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../service_indicator/internal.h" #include "internal.h" int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { // See RFC 8017, section 9.2. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY); return 0; } to[0] = 0; to[1] = 1; OPENSSL_memset(to + 2, 0xff, to_len - 3 - from_len); to[to_len - from_len - 1] = 0; OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len) { // See RFC 8017, section 9.2. This is part of signature verification and thus // does not need to run in constant-time. if (from_len < 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } // Check the header. if (from[0] != 0 || from[1] != 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_BLOCK_TYPE_IS_NOT_01); return 0; } // Scan over padded data, looking for the 00. size_t pad; for (pad = 2 /* header */; pad < from_len; pad++) { if (from[pad] == 0x00) { break; } if (from[pad] != 0xff) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_FIXED_HEADER_DECRYPT); return 0; } } if (pad == from_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_NULL_BEFORE_BLOCK_MISSING); return 0; } if (pad < 2 /* header */ + 8) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_PAD_BYTE_COUNT); return 0; } // Skip over the 00. pad++; if (from_len - pad > max_out) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); return 0; } OPENSSL_memcpy(out, from + pad, from_len - pad); *out_len = from_len - pad; return 1; } int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { if (from_len > to_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } if (from_len < to_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } OPENSSL_memcpy(to, from, from_len); return 1; } int PKCS1_MGF1(uint8_t *out, size_t len, const uint8_t *seed, size_t seed_len, const EVP_MD *md) { int ret = 0; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); FIPS_service_indicator_lock_state(); size_t md_len = EVP_MD_size(md); for (uint32_t i = 0; len > 0; i++) { uint8_t counter[4]; counter[0] = (uint8_t)(i >> 24); counter[1] = (uint8_t)(i >> 16); counter[2] = (uint8_t)(i >> 8); counter[3] = (uint8_t)i; if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, seed, seed_len) || !EVP_DigestUpdate(&ctx, counter, sizeof(counter))) { goto err; } if (md_len <= len) { if (!EVP_DigestFinal_ex(&ctx, out, NULL)) { goto err; } out += md_len; len -= md_len; } else { uint8_t digest[EVP_MAX_MD_SIZE]; if (!EVP_DigestFinal_ex(&ctx, digest, NULL)) { goto err; } OPENSSL_memcpy(out, digest, len); len = 0; } } ret = 1; err: EVP_MD_CTX_cleanup(&ctx); FIPS_service_indicator_unlock_state(); return ret; } static const uint8_t kPSSZeroes[] = {0, 0, 0, 0, 0, 0, 0, 0}; int RSA_verify_PKCS1_PSS_mgf1(const RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, const uint8_t *EM, int sLen) { if (mgf1Hash == NULL) { mgf1Hash = Hash; } int ret = 0; uint8_t *DB = NULL; const uint8_t *H; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); unsigned MSBits; size_t emLen, maskedDBLen, salt_start; FIPS_service_indicator_lock_state(); // Negative sLen has special meanings: // -1 sLen == hLen // -2 salt length is autorecovered from signature // -N reserved size_t hLen = EVP_MD_size(Hash); if (sLen == -1) { sLen = (int)hLen; } else if (sLen == -2) { sLen = -2; } else if (sLen < -2) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } MSBits = (BN_num_bits(rsa->n) - 1) & 0x7; emLen = RSA_size(rsa); if (EM[0] & (0xFF << MSBits)) { OPENSSL_PUT_ERROR(RSA, RSA_R_FIRST_OCTET_INVALID); goto err; } if (MSBits == 0) { EM++; emLen--; } // |sLen| may be -2 for the non-standard salt length recovery mode. if (emLen < hLen + 2 || (sLen >= 0 && emLen < hLen + (size_t)sLen + 2)) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } if (EM[emLen - 1] != 0xbc) { OPENSSL_PUT_ERROR(RSA, RSA_R_LAST_OCTET_INVALID); goto err; } maskedDBLen = emLen - hLen - 1; H = EM + maskedDBLen; DB = reinterpret_cast(OPENSSL_malloc(maskedDBLen)); if (!DB) { goto err; } if (!PKCS1_MGF1(DB, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } for (size_t i = 0; i < maskedDBLen; i++) { DB[i] ^= EM[i]; } if (MSBits) { DB[0] &= 0xFF >> (8 - MSBits); } // This step differs slightly from EMSA-PSS-VERIFY (RFC 8017) step 10 because // it accepts a non-standard salt recovery flow. DB should be some number of // zeros, a one, then the salt. for (salt_start = 0; DB[salt_start] == 0 && salt_start < maskedDBLen - 1; salt_start++) { ; } if (DB[salt_start] != 0x1) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_RECOVERY_FAILED); goto err; } salt_start++; // If a salt length was specified, check it matches. if (sLen >= 0 && maskedDBLen - salt_start != (size_t)sLen) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } uint8_t H_[EVP_MAX_MD_SIZE]; if (!EVP_DigestInit_ex(&ctx, Hash, NULL) || !EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) || !EVP_DigestUpdate(&ctx, mHash, hLen) || !EVP_DigestUpdate(&ctx, DB + salt_start, maskedDBLen - salt_start) || !EVP_DigestFinal_ex(&ctx, H_, NULL)) { goto err; } if (OPENSSL_memcmp(H_, H, hLen) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); goto err; } ret = 1; err: OPENSSL_free(DB); EVP_MD_CTX_cleanup(&ctx); FIPS_service_indicator_unlock_state(); return ret; } int RSA_padding_add_PKCS1_PSS_mgf1(const RSA *rsa, unsigned char *EM, const unsigned char *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, int sLenRequested) { int ret = 0, digest_ok; size_t maskedDBLen, MSBits, emLen; size_t hLen; unsigned char *H, *salt = NULL, *p; if (mgf1Hash == NULL) { mgf1Hash = Hash; } FIPS_service_indicator_lock_state(); hLen = EVP_MD_size(Hash); if (BN_is_zero(rsa->n)) { OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); goto err; } MSBits = (BN_num_bits(rsa->n) - 1) & 0x7; emLen = RSA_size(rsa); if (MSBits == 0) { assert(emLen >= 1); *EM++ = 0; emLen--; } if (emLen < hLen + 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); goto err; } // Negative sLenRequested has special meanings: // -1 sLen == hLen // -2 salt length is maximized // -N reserved size_t sLen; if (sLenRequested == -1) { sLen = hLen; } else if (sLenRequested == -2) { sLen = emLen - hLen - 2; } else if (sLenRequested < 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_SLEN_CHECK_FAILED); goto err; } else { sLen = (size_t)sLenRequested; } if (emLen - hLen - 2 < sLen) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); goto err; } if (sLen > 0) { salt = reinterpret_cast(OPENSSL_malloc(sLen)); if (!salt) { goto err; } BCM_rand_bytes(salt, sLen); } maskedDBLen = emLen - hLen - 1; H = EM + maskedDBLen; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); digest_ok = EVP_DigestInit_ex(&ctx, Hash, NULL) && EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) && EVP_DigestUpdate(&ctx, mHash, hLen) && EVP_DigestUpdate(&ctx, salt, sLen) && EVP_DigestFinal_ex(&ctx, H, NULL); EVP_MD_CTX_cleanup(&ctx); if (!digest_ok) { goto err; } // Generate dbMask in place then perform XOR on it if (!PKCS1_MGF1(EM, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } p = EM; // Initial PS XORs with all zeroes which is a NOP so just update // pointer. Note from a test above this value is guaranteed to // be non-negative. p += emLen - sLen - hLen - 2; *p++ ^= 0x1; if (sLen > 0) { for (size_t i = 0; i < sLen; i++) { *p++ ^= salt[i]; } } if (MSBits) { EM[0] &= 0xFF >> (8 - MSBits); } // H is already in place so just set final 0xbc EM[emLen - 1] = 0xbc; ret = 1; err: OPENSSL_free(salt); FIPS_service_indicator_unlock_state(); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rsa/rsa.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../bn/internal.h" #include "../delocate.h" #include "internal.h" // RSA_R_BLOCK_TYPE_IS_NOT_02 is part of the legacy SSLv23 padding scheme. // Cryptography.io depends on this error code. OPENSSL_DECLARE_ERROR_REASON(RSA, BLOCK_TYPE_IS_NOT_02) DEFINE_STATIC_EX_DATA_CLASS(g_rsa_ex_data_class) static int bn_dup_into(BIGNUM **dst, const BIGNUM *src) { if (src == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } BN_free(*dst); *dst = BN_dup(src); return *dst != NULL; } RSA *RSA_new_public_key(const BIGNUM *n, const BIGNUM *e) { RSA *rsa = RSA_new(); if (rsa == NULL || // !bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->e, e) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new_private_key(const BIGNUM *n, const BIGNUM *e, const BIGNUM *d, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp) { RSA *rsa = RSA_new(); if (rsa == NULL || // !bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->e, e) || // !bn_dup_into(&rsa->d, d) || // !bn_dup_into(&rsa->p, p) || // !bn_dup_into(&rsa->q, q) || // !bn_dup_into(&rsa->dmp1, dmp1) || // !bn_dup_into(&rsa->dmq1, dmq1) || // !bn_dup_into(&rsa->iqmp, iqmp) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new_private_key_no_crt(const BIGNUM *n, const BIGNUM *e, const BIGNUM *d) { RSA *rsa = RSA_new(); if (rsa == NULL || // !bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->e, e) || // !bn_dup_into(&rsa->d, d) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new_private_key_no_e(const BIGNUM *n, const BIGNUM *d) { RSA *rsa = RSA_new(); if (rsa == NULL) { return NULL; } rsa->flags |= RSA_FLAG_NO_PUBLIC_EXPONENT; if (!bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->d, d) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new_public_key_large_e(const BIGNUM *n, const BIGNUM *e) { RSA *rsa = RSA_new(); if (rsa == NULL) { return NULL; } rsa->flags |= RSA_FLAG_LARGE_PUBLIC_EXPONENT; if (!bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->e, e) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new_private_key_large_e(const BIGNUM *n, const BIGNUM *e, const BIGNUM *d, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp) { RSA *rsa = RSA_new(); if (rsa == NULL) { return NULL; } rsa->flags |= RSA_FLAG_LARGE_PUBLIC_EXPONENT; if (!bn_dup_into(&rsa->n, n) || // !bn_dup_into(&rsa->e, e) || // !bn_dup_into(&rsa->d, d) || // !bn_dup_into(&rsa->p, p) || // !bn_dup_into(&rsa->q, q) || // !bn_dup_into(&rsa->dmp1, dmp1) || // !bn_dup_into(&rsa->dmq1, dmq1) || // !bn_dup_into(&rsa->iqmp, iqmp) || // !RSA_check_key(rsa)) { RSA_free(rsa); return NULL; } return rsa; } RSA *RSA_new(void) { return RSA_new_method(NULL); } RSA *RSA_new_method(const ENGINE *engine) { RSA *rsa = reinterpret_cast(OPENSSL_zalloc(sizeof(RSA))); if (rsa == NULL) { return NULL; } if (engine) { rsa->meth = ENGINE_get_RSA_method(engine); } if (rsa->meth == NULL) { rsa->meth = (RSA_METHOD *)RSA_default_method(); } METHOD_ref(rsa->meth); rsa->references = 1; rsa->flags = rsa->meth->flags; CRYPTO_MUTEX_init(&rsa->lock); CRYPTO_new_ex_data(&rsa->ex_data); if (rsa->meth->init && !rsa->meth->init(rsa)) { CRYPTO_free_ex_data(g_rsa_ex_data_class_bss_get(), rsa, &rsa->ex_data); CRYPTO_MUTEX_cleanup(&rsa->lock); METHOD_unref(rsa->meth); OPENSSL_free(rsa); return NULL; } return rsa; } RSA *RSA_new_method_no_e(const ENGINE *engine, const BIGNUM *n) { RSA *rsa = RSA_new_method(engine); if (rsa == NULL || !bn_dup_into(&rsa->n, n)) { RSA_free(rsa); return NULL; } rsa->flags |= RSA_FLAG_NO_PUBLIC_EXPONENT; return rsa; } void RSA_free(RSA *rsa) { if (rsa == NULL) { return; } if (!CRYPTO_refcount_dec_and_test_zero(&rsa->references)) { return; } if (rsa->meth->finish) { rsa->meth->finish(rsa); } METHOD_unref(rsa->meth); CRYPTO_free_ex_data(g_rsa_ex_data_class_bss_get(), rsa, &rsa->ex_data); BN_free(rsa->n); BN_free(rsa->e); BN_free(rsa->d); BN_free(rsa->p); BN_free(rsa->q); BN_free(rsa->dmp1); BN_free(rsa->dmq1); BN_free(rsa->iqmp); rsa_invalidate_key(rsa); CRYPTO_MUTEX_cleanup(&rsa->lock); OPENSSL_free(rsa); } int RSA_up_ref(RSA *rsa) { CRYPTO_refcount_inc(&rsa->references); return 1; } unsigned RSA_bits(const RSA *rsa) { return BN_num_bits(rsa->n); } const BIGNUM *RSA_get0_n(const RSA *rsa) { return rsa->n; } const BIGNUM *RSA_get0_e(const RSA *rsa) { return rsa->e; } const BIGNUM *RSA_get0_d(const RSA *rsa) { return rsa->d; } const BIGNUM *RSA_get0_p(const RSA *rsa) { return rsa->p; } const BIGNUM *RSA_get0_q(const RSA *rsa) { return rsa->q; } const BIGNUM *RSA_get0_dmp1(const RSA *rsa) { return rsa->dmp1; } const BIGNUM *RSA_get0_dmq1(const RSA *rsa) { return rsa->dmq1; } const BIGNUM *RSA_get0_iqmp(const RSA *rsa) { return rsa->iqmp; } void RSA_get0_key(const RSA *rsa, const BIGNUM **out_n, const BIGNUM **out_e, const BIGNUM **out_d) { if (out_n != NULL) { *out_n = rsa->n; } if (out_e != NULL) { *out_e = rsa->e; } if (out_d != NULL) { *out_d = rsa->d; } } void RSA_get0_factors(const RSA *rsa, const BIGNUM **out_p, const BIGNUM **out_q) { if (out_p != NULL) { *out_p = rsa->p; } if (out_q != NULL) { *out_q = rsa->q; } } const RSA_PSS_PARAMS *RSA_get0_pss_params(const RSA *rsa) { // We do not support the id-RSASSA-PSS key encoding. If we add support later, // the |maskHash| field should be filled in for OpenSSL compatibility. return NULL; } void RSA_get0_crt_params(const RSA *rsa, const BIGNUM **out_dmp1, const BIGNUM **out_dmq1, const BIGNUM **out_iqmp) { if (out_dmp1 != NULL) { *out_dmp1 = rsa->dmp1; } if (out_dmq1 != NULL) { *out_dmq1 = rsa->dmq1; } if (out_iqmp != NULL) { *out_iqmp = rsa->iqmp; } } int RSA_set0_key(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d) { if ((rsa->n == NULL && n == NULL) || (rsa->e == NULL && e == NULL)) { return 0; } if (n != NULL) { BN_free(rsa->n); rsa->n = n; } if (e != NULL) { BN_free(rsa->e); rsa->e = e; } if (d != NULL) { BN_free(rsa->d); rsa->d = d; } rsa_invalidate_key(rsa); return 1; } int RSA_set0_factors(RSA *rsa, BIGNUM *p, BIGNUM *q) { if ((rsa->p == NULL && p == NULL) || (rsa->q == NULL && q == NULL)) { return 0; } if (p != NULL) { BN_free(rsa->p); rsa->p = p; } if (q != NULL) { BN_free(rsa->q); rsa->q = q; } rsa_invalidate_key(rsa); return 1; } int RSA_set0_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp) { if ((rsa->dmp1 == NULL && dmp1 == NULL) || (rsa->dmq1 == NULL && dmq1 == NULL) || (rsa->iqmp == NULL && iqmp == NULL)) { return 0; } if (dmp1 != NULL) { BN_free(rsa->dmp1); rsa->dmp1 = dmp1; } if (dmq1 != NULL) { BN_free(rsa->dmq1); rsa->dmq1 = dmq1; } if (iqmp != NULL) { BN_free(rsa->iqmp); rsa->iqmp = iqmp; } rsa_invalidate_key(rsa); return 1; } static int rsa_sign_raw_no_self_test(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { if (rsa->meth->sign_raw) { return rsa->meth->sign_raw(rsa, out_len, out, max_out, in, in_len, padding); } return rsa_default_sign_raw(rsa, out_len, out, max_out, in, in_len, padding); } int RSA_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { boringssl_ensure_rsa_self_test(); return rsa_sign_raw_no_self_test(rsa, out_len, out, max_out, in, in_len, padding); } unsigned RSA_size(const RSA *rsa) { return BN_num_bytes(rsa->n); } int RSA_is_opaque(const RSA *rsa) { return rsa->meth && (rsa->meth->flags & RSA_FLAG_OPAQUE); } int RSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(g_rsa_ex_data_class_bss_get(), argl, argp, free_func); } int RSA_set_ex_data(RSA *rsa, int idx, void *arg) { return CRYPTO_set_ex_data(&rsa->ex_data, idx, arg); } void *RSA_get_ex_data(const RSA *rsa, int idx) { return CRYPTO_get_ex_data(&rsa->ex_data, idx); } // SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's // the length of an MD5 and SHA1 hash. static const unsigned SSL_SIG_LENGTH = 36; // pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is // to be signed with PKCS#1. struct pkcs1_sig_prefix { // nid identifies the hash function. int nid; // hash_len is the expected length of the hash function. uint8_t hash_len; // len is the number of bytes of |bytes| which are valid. uint8_t len; // bytes contains the DER bytes. uint8_t bytes[19]; }; // kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with // different hash functions. static const struct pkcs1_sig_prefix kPKCS1SigPrefixes[] = { { NID_md5, MD5_DIGEST_LENGTH, 18, {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}, }, { NID_sha1, BCM_SHA_DIGEST_LENGTH, 15, {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, }, { NID_sha224, BCM_SHA224_DIGEST_LENGTH, 19, {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, }, { NID_sha256, BCM_SHA256_DIGEST_LENGTH, 19, {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, }, { NID_sha384, BCM_SHA384_DIGEST_LENGTH, 19, {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, }, { NID_sha512, BCM_SHA512_DIGEST_LENGTH, 19, {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, }, { NID_undef, 0, 0, {0}, }, }; static int rsa_check_digest_size(int hash_nid, size_t digest_len) { if (hash_nid == NID_md5_sha1) { if (digest_len != SSL_SIG_LENGTH) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; } return 1; } for (size_t i = 0; kPKCS1SigPrefixes[i].nid != NID_undef; i++) { const struct pkcs1_sig_prefix *sig_prefix = &kPKCS1SigPrefixes[i]; if (sig_prefix->nid == hash_nid) { if (digest_len != sig_prefix->hash_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; } return 1; } } OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_ALGORITHM_TYPE); return 0; } int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, int *is_alloced, int hash_nid, const uint8_t *digest, size_t digest_len) { if (!rsa_check_digest_size(hash_nid, digest_len)) { return 0; } if (hash_nid == NID_md5_sha1) { // The length should already have been checked. assert(digest_len == SSL_SIG_LENGTH); *out_msg = (uint8_t *)digest; *out_msg_len = digest_len; *is_alloced = 0; return 1; } for (size_t i = 0; kPKCS1SigPrefixes[i].nid != NID_undef; i++) { const struct pkcs1_sig_prefix *sig_prefix = &kPKCS1SigPrefixes[i]; if (sig_prefix->nid != hash_nid) { continue; } // The length should already have been checked. assert(digest_len == sig_prefix->hash_len); const uint8_t *prefix = sig_prefix->bytes; size_t prefix_len = sig_prefix->len; size_t signed_msg_len = prefix_len + digest_len; if (signed_msg_len < prefix_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_LONG); return 0; } uint8_t *signed_msg = reinterpret_cast(OPENSSL_malloc(signed_msg_len)); if (!signed_msg) { return 0; } OPENSSL_memcpy(signed_msg, prefix, prefix_len); OPENSSL_memcpy(signed_msg + prefix_len, digest, digest_len); *out_msg = signed_msg; *out_msg_len = signed_msg_len; *is_alloced = 1; return 1; } OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_ALGORITHM_TYPE); return 0; } int rsa_sign_no_self_test(int hash_nid, const uint8_t *digest, size_t digest_len, uint8_t *out, unsigned *out_len, RSA *rsa) { if (rsa->meth->sign) { if (!rsa_check_digest_size(hash_nid, digest_len)) { return 0; } // All supported digest lengths fit in |unsigned|. assert(digest_len <= EVP_MAX_MD_SIZE); static_assert(EVP_MAX_MD_SIZE <= UINT_MAX, "digest too long"); return rsa->meth->sign(hash_nid, digest, (unsigned)digest_len, out, out_len, rsa); } const unsigned rsa_size = RSA_size(rsa); int ret = 0; uint8_t *signed_msg = NULL; size_t signed_msg_len = 0; int signed_msg_is_alloced = 0; size_t size_t_out_len; if (!RSA_add_pkcs1_prefix(&signed_msg, &signed_msg_len, &signed_msg_is_alloced, hash_nid, digest, digest_len) || !rsa_sign_raw_no_self_test(rsa, &size_t_out_len, out, rsa_size, signed_msg, signed_msg_len, RSA_PKCS1_PADDING)) { goto err; } if (size_t_out_len > UINT_MAX) { OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); goto err; } *out_len = (unsigned)size_t_out_len; ret = 1; err: if (signed_msg_is_alloced) { OPENSSL_free(signed_msg); } return ret; } int RSA_sign(int hash_nid, const uint8_t *digest, size_t digest_len, uint8_t *out, unsigned *out_len, RSA *rsa) { boringssl_ensure_rsa_self_test(); return rsa_sign_no_self_test(hash_nid, digest, digest_len, out, out_len, rsa); } int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *digest, size_t digest_len, const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len) { if (digest_len != EVP_MD_size(md)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; } size_t padded_len = RSA_size(rsa); uint8_t *padded = reinterpret_cast(OPENSSL_malloc(padded_len)); if (padded == NULL) { return 0; } int ret = RSA_padding_add_PKCS1_PSS_mgf1(rsa, padded, digest, md, mgf1_md, salt_len) && RSA_sign_raw(rsa, out_len, out, max_out, padded, padded_len, RSA_NO_PADDING); OPENSSL_free(padded); return ret; } int rsa_verify_no_self_test(int hash_nid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, RSA *rsa) { if (rsa->n == NULL || rsa->e == NULL) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } const size_t rsa_size = RSA_size(rsa); uint8_t *buf = NULL; int ret = 0; uint8_t *signed_msg = NULL; size_t signed_msg_len = 0, len; int signed_msg_is_alloced = 0; if (hash_nid == NID_md5_sha1 && digest_len != SSL_SIG_LENGTH) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; } buf = reinterpret_cast(OPENSSL_malloc(rsa_size)); if (!buf) { return 0; } if (!rsa_verify_raw_no_self_test(rsa, &len, buf, rsa_size, sig, sig_len, RSA_PKCS1_PADDING) || !RSA_add_pkcs1_prefix(&signed_msg, &signed_msg_len, &signed_msg_is_alloced, hash_nid, digest, digest_len)) { goto out; } // Check that no other information follows the hash value (FIPS 186-4 Section // 5.5) and it matches the expected hash. if (len != signed_msg_len || OPENSSL_memcmp(buf, signed_msg, len) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); goto out; } ret = 1; out: OPENSSL_free(buf); if (signed_msg_is_alloced) { OPENSSL_free(signed_msg); } return ret; } int RSA_verify(int hash_nid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, RSA *rsa) { boringssl_ensure_rsa_self_test(); return rsa_verify_no_self_test(hash_nid, digest, digest_len, sig, sig_len, rsa); } int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *digest, size_t digest_len, const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len, const uint8_t *sig, size_t sig_len) { if (digest_len != EVP_MD_size(md)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; } size_t em_len = RSA_size(rsa); uint8_t *em = reinterpret_cast(OPENSSL_malloc(em_len)); if (em == NULL) { return 0; } int ret = 0; if (!RSA_verify_raw(rsa, &em_len, em, em_len, sig, sig_len, RSA_NO_PADDING)) { goto err; } if (em_len != RSA_size(rsa)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } ret = RSA_verify_PKCS1_PSS_mgf1(rsa, digest, md, mgf1_md, em, salt_len); err: OPENSSL_free(em); return ret; } static int check_mod_inverse(int *out_ok, const BIGNUM *a, const BIGNUM *ainv, const BIGNUM *m, unsigned m_min_bits, BN_CTX *ctx) { if (BN_is_negative(ainv) || constant_time_declassify_int(BN_cmp(ainv, m) >= 0)) { *out_ok = 0; return 1; } // Note |bn_mul_consttime| and |bn_div_consttime| do not scale linearly, but // checking |ainv| is in range bounds the running time, assuming |m|'s bounds // were checked by the caller. BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); int ret = tmp != NULL && bn_mul_consttime(tmp, a, ainv, ctx) && bn_div_consttime(NULL, tmp, tmp, m, m_min_bits, ctx); if (ret) { *out_ok = constant_time_declassify_int(BN_is_one(tmp)); } BN_CTX_end(ctx); return ret; } int RSA_check_key(const RSA *key) { // TODO(davidben): RSA key initialization is spread across // |rsa_check_public_key|, |RSA_check_key|, |freeze_private_key|, and // |BN_MONT_CTX_set_locked| as a result of API issues. See // https://crbug.com/boringssl/316. As a result, we inconsistently check RSA // invariants. We should fix this and integrate that logic. if (!rsa_check_public_key(key)) { return 0; } if ((key->p != NULL) != (key->q != NULL)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ONLY_ONE_OF_P_Q_GIVEN); return 0; } // |key->d| must be bounded by |key->n|. This ensures bounds on |RSA_bits| // translate to bounds on the running time of private key operations. if (key->d != NULL && (BN_is_negative(key->d) || BN_cmp(key->d, key->n) >= 0)) { OPENSSL_PUT_ERROR(RSA, RSA_R_D_OUT_OF_RANGE); return 0; } if (key->d == NULL || key->p == NULL) { // For a public key, or without p and q, there's nothing that can be // checked. return 1; } BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } BIGNUM tmp, de, pm1, qm1, dmp1, dmq1; int ok = 0, has_crt_values; unsigned pm1_bits, qm1_bits; BN_init(&tmp); BN_init(&de); BN_init(&pm1); BN_init(&qm1); BN_init(&dmp1); BN_init(&dmq1); // Check that p * q == n. Before we multiply, we check that p and q are in // bounds, to avoid a DoS vector in |bn_mul_consttime| below. Note that // n was bound by |rsa_check_public_key|. This also implicitly checks p and q // are odd, which is a necessary condition for Montgomery reduction. if (BN_is_negative(key->p) || constant_time_declassify_int(BN_cmp(key->p, key->n) >= 0) || BN_is_negative(key->q) || constant_time_declassify_int(BN_cmp(key->q, key->n) >= 0)) { OPENSSL_PUT_ERROR(RSA, RSA_R_N_NOT_EQUAL_P_Q); goto out; } if (!bn_mul_consttime(&tmp, key->p, key->q, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; } if (BN_cmp(&tmp, key->n) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_N_NOT_EQUAL_P_Q); goto out; } // d must be an inverse of e mod the Carmichael totient, lcm(p-1, q-1), but it // may be unreduced because other implementations use the Euler totient. We // simply check that d * e is one mod p-1 and mod q-1. Note d and e were bound // by earlier checks in this function. if (!bn_usub_consttime(&pm1, key->p, BN_value_one()) || !bn_usub_consttime(&qm1, key->q, BN_value_one())) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; } pm1_bits = BN_num_bits(&pm1); qm1_bits = BN_num_bits(&qm1); if (!bn_mul_consttime(&de, key->d, key->e, ctx) || !bn_div_consttime(NULL, &tmp, &de, &pm1, pm1_bits, ctx) || !bn_div_consttime(NULL, &de, &de, &qm1, qm1_bits, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; } if (constant_time_declassify_int(!BN_is_one(&tmp)) || constant_time_declassify_int(!BN_is_one(&de))) { OPENSSL_PUT_ERROR(RSA, RSA_R_D_E_NOT_CONGRUENT_TO_1); goto out; } has_crt_values = key->dmp1 != NULL; if (has_crt_values != (key->dmq1 != NULL) || has_crt_values != (key->iqmp != NULL)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INCONSISTENT_SET_OF_CRT_VALUES); goto out; } if (has_crt_values) { int dmp1_ok, dmq1_ok, iqmp_ok; if (!check_mod_inverse(&dmp1_ok, key->e, key->dmp1, &pm1, pm1_bits, ctx) || !check_mod_inverse(&dmq1_ok, key->e, key->dmq1, &qm1, qm1_bits, ctx) || // |p| is odd, so |pm1| and |p| have the same bit width. If they didn't, // we only need a lower bound anyway. !check_mod_inverse(&iqmp_ok, key->q, key->iqmp, key->p, pm1_bits, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; } if (!dmp1_ok || !dmq1_ok || !iqmp_ok) { OPENSSL_PUT_ERROR(RSA, RSA_R_CRT_VALUES_INCORRECT); goto out; } } ok = 1; out: BN_free(&tmp); BN_free(&de); BN_free(&pm1); BN_free(&qm1); BN_free(&dmp1); BN_free(&dmq1); BN_CTX_free(ctx); return ok; } // This is the product of the 132 smallest odd primes, from 3 to 751. static const BN_ULONG kSmallFactorsLimbs[] = {TOBN(0xc4309333, 0x3ef4e3e1), TOBN(0x71161eb6, 0xcd2d655f), TOBN(0x95e2238c, 0x0bf94862), TOBN(0x3eb233d3, 0x24f7912b), TOBN(0x6b55514b, 0xbf26c483), TOBN(0x0a84d817, 0x5a144871), TOBN(0x77d12fee, 0x9b82210a), TOBN(0xdb5b93c2, 0x97f050b3), TOBN(0x4acad6b9, 0x4d6c026b), TOBN(0xeb7751f3, 0x54aec893), TOBN(0xdba53368, 0x36bc85c4), TOBN(0xd85a1b28, 0x7f5ec78e), TOBN(0x2eb072d8, 0x6b322244), TOBN(0xbba51112, 0x5e2b3aea), TOBN(0x36ed1a6c, 0x0e2486bf), TOBN(0x5f270460, 0xec0c5727), 0x000017b1}; DEFINE_LOCAL_DATA(BIGNUM, g_small_factors) { out->d = (BN_ULONG *)kSmallFactorsLimbs; out->width = OPENSSL_ARRAY_SIZE(kSmallFactorsLimbs); out->dmax = out->width; out->neg = 0; out->flags = BN_FLG_STATIC_DATA; } int RSA_check_fips(RSA *key) { if (!RSA_check_key(key)) { return 0; } BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } BIGNUM small_gcd; BN_init(&small_gcd); int ret = 1; // Perform partial public key validation of RSA keys (SP 800-89 5.3.3). // Although this is not for primality testing, SP 800-89 cites an RSA // primality testing algorithm, so we use |BN_prime_checks_for_generation| to // match. This is only a plausibility test and we expect the value to be // composite, so too few iterations will cause us to reject the key, not use // an implausible one. // // |key->e| may be nullptr if created with |RSA_new_private_key_no_e|. enum bn_primality_result_t primality_result; if (key->e == nullptr || // BN_num_bits(key->e) <= 16 || // BN_num_bits(key->e) > 256 || // !BN_is_odd(key->n) || // !BN_is_odd(key->e) || !BN_gcd(&small_gcd, key->n, g_small_factors(), ctx) || !BN_is_one(&small_gcd) || !BN_enhanced_miller_rabin_primality_test(&primality_result, key->n, BN_prime_checks_for_generation, ctx, NULL) || primality_result != bn_non_prime_power_composite) { OPENSSL_PUT_ERROR(RSA, RSA_R_PUBLIC_KEY_VALIDATION_FAILED); ret = 0; } BN_free(&small_gcd); BN_CTX_free(ctx); if (!ret || key->d == NULL || key->p == NULL) { // On a failure or on only a public key, there's nothing else can be // checked. return ret; } // FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG, // section 9.9, it is not known whether |rsa| will be used for signing or // encryption, so either pair-wise consistency self-test is acceptable. We // perform a signing test. uint8_t data[32] = {0}; unsigned sig_len = RSA_size(key); uint8_t *sig = reinterpret_cast(OPENSSL_malloc(sig_len)); if (sig == NULL) { return 0; } if (!RSA_sign(NID_sha256, data, sizeof(data), sig, &sig_len, key)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); ret = 0; goto cleanup; } if (boringssl_fips_break_test("RSA_PWCT")) { data[0] = ~data[0]; } if (!RSA_verify(NID_sha256, data, sizeof(data), sig, sig_len, key)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); ret = 0; } cleanup: OPENSSL_free(sig); return ret; } int rsa_private_transform_no_self_test(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len) { if (rsa->meth->private_transform) { return rsa->meth->private_transform(rsa, out, in, len); } return rsa_default_private_transform(rsa, out, in, len); } int rsa_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len) { boringssl_ensure_rsa_self_test(); return rsa_private_transform_no_self_test(rsa, out, in, len); } int RSA_flags(const RSA *rsa) { return rsa->flags; } int RSA_test_flags(const RSA *rsa, int flags) { return rsa->flags & flags; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/rsa/rsa_impl.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../../bcm_support.h" #include "../../internal.h" #include "../bn/internal.h" #include "../delocate.h" #include "../service_indicator/internal.h" #include "internal.h" int rsa_check_public_key(const RSA *rsa) { if (rsa->n == NULL) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } unsigned n_bits = BN_num_bits(rsa->n); if (n_bits > OPENSSL_RSA_MAX_MODULUS_BITS) { OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE); return 0; } // TODO(crbug.com/boringssl/607): Raise this limit. 512-bit RSA was factored // in 1999. if (n_bits < 512) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } // RSA moduli must be positive and odd. In addition to being necessary for RSA // in general, we cannot setup Montgomery reduction with even moduli. if (!BN_is_odd(rsa->n) || BN_is_negative(rsa->n)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); return 0; } static const unsigned kMaxExponentBits = 33; if (rsa->e != NULL) { // Reject e = 1, negative e, and even e. e must be odd to be relatively // prime with phi(n). unsigned e_bits = BN_num_bits(rsa->e); if (e_bits < 2 || BN_is_negative(rsa->e) || !BN_is_odd(rsa->e)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_E_VALUE); return 0; } if (rsa->flags & RSA_FLAG_LARGE_PUBLIC_EXPONENT) { // The caller has requested disabling DoS protections. Still, e must be // less than n. if (BN_ucmp(rsa->n, rsa->e) <= 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_E_VALUE); return 0; } } else { // Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen // as the limit based on the recommendations in [1] and [2]. Windows // CryptoAPI doesn't support values larger than 32 bits [3], so it is // unlikely that exponents larger than 32 bits are being used for anything // Windows commonly does. // // [1] https://www.imperialviolet.org/2012/03/16/rsae.html // [2] https://www.imperialviolet.org/2012/03/17/rsados.html // [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx if (e_bits > kMaxExponentBits) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_E_VALUE); return 0; } // The upper bound on |e_bits| and lower bound on |n_bits| imply e is // bounded by n. assert(BN_ucmp(rsa->n, rsa->e) > 0); } } else if (!(rsa->flags & RSA_FLAG_NO_PUBLIC_EXPONENT)) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } return 1; } static int ensure_fixed_copy(BIGNUM **out, const BIGNUM *in, int width) { if (*out != NULL) { return 1; } BIGNUM *copy = BN_dup(in); if (copy == NULL || !bn_resize_words(copy, width)) { BN_free(copy); return 0; } *out = copy; bn_secret(copy); return 1; } // freeze_private_key finishes initializing |rsa|'s private key components. // After this function has returned, |rsa| may not be changed. This is needed // because |RSA| is a public struct and, additionally, OpenSSL 1.1.0 opaquified // it wrong (see https://github.com/openssl/openssl/issues/5158). static int freeze_private_key(RSA *rsa, BN_CTX *ctx) { CRYPTO_MUTEX_lock_read(&rsa->lock); int frozen = rsa->private_key_frozen; CRYPTO_MUTEX_unlock_read(&rsa->lock); if (frozen) { return 1; } int ret = 0; const BIGNUM *n_fixed; CRYPTO_MUTEX_lock_write(&rsa->lock); if (rsa->private_key_frozen) { ret = 1; goto err; } // Check the public components are within DoS bounds. if (!rsa_check_public_key(rsa)) { goto err; } // Pre-compute various intermediate values, as well as copies of private // exponents with correct widths. Note that other threads may concurrently // read from |rsa->n|, |rsa->e|, etc., so any fixes must be in separate // copies. We use |mont_n->N|, |mont_p->N|, and |mont_q->N| as copies of |n|, // |p|, and |q| with the correct minimal widths. if (rsa->mont_n == NULL) { rsa->mont_n = BN_MONT_CTX_new_for_modulus(rsa->n, ctx); if (rsa->mont_n == NULL) { goto err; } } n_fixed = &rsa->mont_n->N; // The only public upper-bound of |rsa->d| is the bit length of |rsa->n|. The // ASN.1 serialization of RSA private keys unfortunately leaks the byte length // of |rsa->d|, but normalize it so we only leak it once, rather than per // operation. if (rsa->d != NULL && !ensure_fixed_copy(&rsa->d_fixed, rsa->d, n_fixed->width)) { goto err; } if (rsa->e != NULL && rsa->p != NULL && rsa->q != NULL) { // TODO: p and q are also CONSTTIME_SECRET but not yet marked as such // because the Montgomery code does things like test whether or not values // are zero. So the secret marking probably needs to happen inside that // code. if (rsa->mont_p == NULL) { rsa->mont_p = BN_MONT_CTX_new_consttime(rsa->p, ctx); if (rsa->mont_p == NULL) { goto err; } } if (rsa->mont_q == NULL) { rsa->mont_q = BN_MONT_CTX_new_consttime(rsa->q, ctx); if (rsa->mont_q == NULL) { goto err; } } if (rsa->dmp1 != NULL && rsa->dmq1 != NULL && rsa->iqmp != NULL) { // CRT components are only publicly bounded by their corresponding // moduli's bit lengths. const BIGNUM *p_fixed = &rsa->mont_p->N; const BIGNUM *q_fixed = &rsa->mont_q->N; if (!ensure_fixed_copy(&rsa->dmp1_fixed, rsa->dmp1, p_fixed->width) || !ensure_fixed_copy(&rsa->dmq1_fixed, rsa->dmq1, q_fixed->width)) { goto err; } // Compute |iqmp_mont|, which is |iqmp| in Montgomery form and with the // correct bit width. if (rsa->iqmp_mont == NULL) { BIGNUM *iqmp_mont = BN_new(); if (iqmp_mont == NULL || !BN_to_montgomery(iqmp_mont, rsa->iqmp, rsa->mont_p, ctx)) { BN_free(iqmp_mont); goto err; } rsa->iqmp_mont = iqmp_mont; bn_secret(rsa->iqmp_mont); } } } rsa->private_key_frozen = 1; ret = 1; err: CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; } void rsa_invalidate_key(RSA *rsa) { rsa->private_key_frozen = 0; BN_MONT_CTX_free(rsa->mont_n); rsa->mont_n = NULL; BN_MONT_CTX_free(rsa->mont_p); rsa->mont_p = NULL; BN_MONT_CTX_free(rsa->mont_q); rsa->mont_q = NULL; BN_free(rsa->d_fixed); rsa->d_fixed = NULL; BN_free(rsa->dmp1_fixed); rsa->dmp1_fixed = NULL; BN_free(rsa->dmq1_fixed); rsa->dmq1_fixed = NULL; BN_free(rsa->iqmp_mont); rsa->iqmp_mont = NULL; for (size_t i = 0; i < rsa->num_blindings; i++) { BN_BLINDING_free(rsa->blindings[i]); } OPENSSL_free(rsa->blindings); rsa->blindings = NULL; rsa->num_blindings = 0; OPENSSL_free(rsa->blindings_inuse); rsa->blindings_inuse = NULL; rsa->blinding_fork_generation = 0; } // MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per // RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and // destroyed as needed. #if defined(OPENSSL_TSAN) // Smaller under TSAN so that the edge case can be hit with fewer threads. #define MAX_BLINDINGS_PER_RSA 2 #else #define MAX_BLINDINGS_PER_RSA 1024 #endif // rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by // allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If // none are free, the cache will be extended by a extra element and the new // BN_BLINDING is returned. // // On success, the index of the assigned BN_BLINDING is written to // |*index_used| and must be passed to |rsa_blinding_release| when finished. static BN_BLINDING *rsa_blinding_get(RSA *rsa, size_t *index_used, BN_CTX *ctx) { assert(ctx != NULL); assert(rsa->mont_n != NULL); BN_BLINDING *ret = NULL; const uint64_t fork_generation = CRYPTO_get_fork_generation(); CRYPTO_MUTEX_lock_write(&rsa->lock); // Wipe the blinding cache on |fork|. if (rsa->blinding_fork_generation != fork_generation) { for (size_t i = 0; i < rsa->num_blindings; i++) { // The inuse flag must be zero unless we were forked from a // multi-threaded process, in which case calling back into BoringSSL is // forbidden. assert(rsa->blindings_inuse[i] == 0); BN_BLINDING_invalidate(rsa->blindings[i]); } rsa->blinding_fork_generation = fork_generation; } uint8_t *const free_inuse_flag = reinterpret_cast( OPENSSL_memchr(rsa->blindings_inuse, 0, rsa->num_blindings)); size_t new_num_blindings; BN_BLINDING **new_blindings; uint8_t *new_blindings_inuse; if (free_inuse_flag != NULL) { *free_inuse_flag = 1; *index_used = free_inuse_flag - rsa->blindings_inuse; ret = rsa->blindings[*index_used]; goto out; } if (rsa->num_blindings >= MAX_BLINDINGS_PER_RSA) { // No |BN_BLINDING| is free and nor can the cache be extended. This index // value is magic and indicates to |rsa_blinding_release| that a // |BN_BLINDING| was not inserted into the array. *index_used = MAX_BLINDINGS_PER_RSA; ret = BN_BLINDING_new(); goto out; } // Double the length of the cache. static_assert(MAX_BLINDINGS_PER_RSA < UINT_MAX / 2, "MAX_BLINDINGS_PER_RSA too large"); new_num_blindings = rsa->num_blindings * 2; if (new_num_blindings == 0) { new_num_blindings = 1; } if (new_num_blindings > MAX_BLINDINGS_PER_RSA) { new_num_blindings = MAX_BLINDINGS_PER_RSA; } assert(new_num_blindings > rsa->num_blindings); new_blindings = reinterpret_cast( OPENSSL_calloc(new_num_blindings, sizeof(BN_BLINDING *))); new_blindings_inuse = reinterpret_cast(OPENSSL_malloc(new_num_blindings)); if (new_blindings == NULL || new_blindings_inuse == NULL) { goto err; } OPENSSL_memcpy(new_blindings, rsa->blindings, sizeof(BN_BLINDING *) * rsa->num_blindings); OPENSSL_memcpy(new_blindings_inuse, rsa->blindings_inuse, rsa->num_blindings); for (size_t i = rsa->num_blindings; i < new_num_blindings; i++) { new_blindings[i] = BN_BLINDING_new(); if (new_blindings[i] == NULL) { for (size_t j = rsa->num_blindings; j < i; j++) { BN_BLINDING_free(new_blindings[j]); } goto err; } } memset(&new_blindings_inuse[rsa->num_blindings], 0, new_num_blindings - rsa->num_blindings); new_blindings_inuse[rsa->num_blindings] = 1; *index_used = rsa->num_blindings; assert(*index_used != MAX_BLINDINGS_PER_RSA); ret = new_blindings[rsa->num_blindings]; OPENSSL_free(rsa->blindings); rsa->blindings = new_blindings; OPENSSL_free(rsa->blindings_inuse); rsa->blindings_inuse = new_blindings_inuse; rsa->num_blindings = new_num_blindings; goto out; err: OPENSSL_free(new_blindings_inuse); OPENSSL_free(new_blindings); out: CRYPTO_MUTEX_unlock_write(&rsa->lock); return ret; } // rsa_blinding_release marks the cached BN_BLINDING at the given index as free // for other threads to use. static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding, size_t blinding_index) { if (blinding_index == MAX_BLINDINGS_PER_RSA) { // This blinding wasn't cached. BN_BLINDING_free(blinding); return; } CRYPTO_MUTEX_lock_write(&rsa->lock); rsa->blindings_inuse[blinding_index] = 0; CRYPTO_MUTEX_unlock_write(&rsa->lock); } // signing int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { const unsigned rsa_size = RSA_size(rsa); uint8_t *buf = NULL; int i, ret = 0; if (max_out < rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_OUTPUT_BUFFER_TOO_SMALL); return 0; } buf = reinterpret_cast(OPENSSL_malloc(rsa_size)); if (buf == NULL) { goto err; } switch (padding) { case RSA_PKCS1_PADDING: i = RSA_padding_add_PKCS1_type_1(buf, rsa_size, in, in_len); break; case RSA_NO_PADDING: i = RSA_padding_add_none(buf, rsa_size, in, in_len); break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } if (i <= 0) { goto err; } if (!rsa_private_transform_no_self_test(rsa, out, buf, rsa_size)) { goto err; } CONSTTIME_DECLASSIFY(out, rsa_size); *out_len = rsa_size; ret = 1; err: OPENSSL_free(buf); return ret; } static int rsa_mod_exp_crt(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx); int rsa_verify_raw_no_self_test(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { if (rsa->n == NULL || rsa->e == NULL) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } if (!rsa_check_public_key(rsa)) { return 0; } const unsigned rsa_size = RSA_size(rsa); BIGNUM *f, *result; if (max_out < rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_OUTPUT_BUFFER_TOO_SMALL); return 0; } if (in_len != rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_LEN_NOT_EQUAL_TO_MOD_LEN); return 0; } BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { return 0; } int ret = 0; uint8_t *buf = NULL; BN_CTX_start(ctx); f = BN_CTX_get(ctx); result = BN_CTX_get(ctx); if (f == NULL || result == NULL) { goto err; } if (padding == RSA_NO_PADDING) { buf = out; } else { // Allocate a temporary buffer to hold the padded plaintext. buf = reinterpret_cast(OPENSSL_malloc(rsa_size)); if (buf == NULL) { goto err; } } if (BN_bin2bn(in, in_len, f) == NULL) { goto err; } if (BN_ucmp(f, rsa->n) >= 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); goto err; } if (!BN_MONT_CTX_set_locked(&rsa->mont_n, &rsa->lock, rsa->n, ctx) || !BN_mod_exp_mont(result, f, rsa->e, &rsa->mont_n->N, ctx, rsa->mont_n)) { goto err; } if (!BN_bn2bin_padded(buf, rsa_size, result)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } switch (padding) { case RSA_PKCS1_PADDING: ret = RSA_padding_check_PKCS1_type_1(out, out_len, rsa_size, buf, rsa_size); break; case RSA_NO_PADDING: ret = 1; *out_len = rsa_size; break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } if (!ret) { OPENSSL_PUT_ERROR(RSA, RSA_R_PADDING_CHECK_FAILED); goto err; } err: BN_CTX_end(ctx); BN_CTX_free(ctx); if (buf != out) { OPENSSL_free(buf); } return ret; } int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { boringssl_ensure_rsa_self_test(); return rsa_verify_raw_no_self_test(rsa, out_len, out, max_out, in, in_len, padding); } int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len) { if (rsa->n == NULL || rsa->d == NULL) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } BIGNUM *f, *result; BN_CTX *ctx = NULL; size_t blinding_index = 0; BN_BLINDING *blinding = NULL; int ret = 0, do_blinding; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); f = BN_CTX_get(ctx); result = BN_CTX_get(ctx); if (f == NULL || result == NULL) { goto err; } // The caller should have ensured this. assert(len == BN_num_bytes(rsa->n)); if (BN_bin2bn(in, len, f) == NULL) { goto err; } // The input to the RSA private transform may be secret, but padding is // expected to construct a value within range, so we can leak this comparison. if (constant_time_declassify_int(BN_ucmp(f, rsa->n) >= 0)) { // Usually the padding functions would catch this. OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); goto err; } if (!freeze_private_key(rsa, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } do_blinding = (rsa->flags & (RSA_FLAG_NO_BLINDING | RSA_FLAG_NO_PUBLIC_EXPONENT)) == 0; if (rsa->e == NULL && do_blinding) { // We cannot do blinding or verification without |e|, and continuing without // those countermeasures is dangerous. However, the Java/Android RSA API // requires support for keys where only |d| and |n| (and not |e|) are known. // The callers that require that bad behavior must set // |RSA_FLAG_NO_BLINDING| or use |RSA_new_private_key_no_e|. // // TODO(davidben): Update this comment when Conscrypt is updated to use // |RSA_new_private_key_no_e|. OPENSSL_PUT_ERROR(RSA, RSA_R_NO_PUBLIC_EXPONENT); goto err; } if (do_blinding) { blinding = rsa_blinding_get(rsa, &blinding_index, ctx); if (blinding == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } if (!BN_BLINDING_convert(f, blinding, rsa->e, rsa->mont_n, ctx)) { goto err; } } if (rsa->p != NULL && rsa->q != NULL && rsa->e != NULL && rsa->dmp1 != NULL && rsa->dmq1 != NULL && rsa->iqmp != NULL && // Require that we can reduce |f| by |rsa->p| and |rsa->q| in constant // time, which requires primes be the same size, rounded to the Montgomery // coefficient. (See |mod_montgomery|.) This is not required by RFC 8017, // but it is true for keys generated by us and all common implementations. bn_less_than_montgomery_R(rsa->q, rsa->mont_p) && bn_less_than_montgomery_R(rsa->p, rsa->mont_q)) { if (!rsa_mod_exp_crt(result, f, rsa, ctx)) { goto err; } } else if (!BN_mod_exp_mont_consttime(result, f, rsa->d_fixed, rsa->n, ctx, rsa->mont_n)) { goto err; } // Verify the result to protect against fault attacks as described in the // 1997 paper "On the Importance of Checking Cryptographic Protocols for // Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some // implementations do this only when the CRT is used, but we do it in all // cases. Section 6 of the aforementioned paper describes an attack that // works when the CRT isn't used. That attack is much less likely to succeed // than the CRT attack, but there have likely been improvements since 1997. // // This check is cheap assuming |e| is small, which we require in // |rsa_check_public_key|. if (rsa->e != NULL) { BIGNUM *vrfy = BN_CTX_get(ctx); if (vrfy == NULL || !BN_mod_exp_mont(vrfy, result, rsa->e, rsa->n, ctx, rsa->mont_n) || !constant_time_declassify_int(BN_equal_consttime(vrfy, f))) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } } if (do_blinding && !BN_BLINDING_invert(result, blinding, rsa->mont_n, ctx)) { goto err; } // The computation should have left |result| as a maximally-wide number, so // that it and serializing does not leak information about the magnitude of // the result. // // See Falko Strenzke, "Manger's Attack revisited", ICICS 2010. assert(result->width == rsa->mont_n->N.width); bn_assert_fits_in_bytes(result, len); if (!BN_bn2bin_padded(out, len, result)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } ret = 1; err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } if (blinding != NULL) { rsa_blinding_release(rsa, blinding, blinding_index); } return ret; } // mod_montgomery sets |r| to |I| mod |p|. |I| must already be fully reduced // modulo |p| times |q|. It returns one on success and zero on error. static int mod_montgomery(BIGNUM *r, const BIGNUM *I, const BIGNUM *p, const BN_MONT_CTX *mont_p, const BIGNUM *q, BN_CTX *ctx) { // Reducing in constant-time with Montgomery reduction requires I <= p * R. We // have I < p * q, so this follows if q < R. The caller should have checked // this already. if (!bn_less_than_montgomery_R(q, mont_p)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); return 0; } if ( // Reduce mod p with Montgomery reduction. This computes I * R^-1 mod p. !BN_from_montgomery(r, I, mont_p, ctx) || // Multiply by R^2 and do another Montgomery reduction to compute // I * R^-1 * R^2 * R^-1 = I mod p. !BN_to_montgomery(r, r, mont_p, ctx)) { return 0; } // By precomputing R^3 mod p (normally |BN_MONT_CTX| only uses R^2 mod p) and // adjusting the API for |BN_mod_exp_mont_consttime|, we could instead compute // I * R mod p here and save a reduction per prime. But this would require // changing the RSAZ code and may not be worth it. Note that the RSAZ code // uses a different radix, so it uses R' = 2^1044. There we'd actually want // R^2 * R', and would futher benefit from a precomputed R'^2. It currently // converts |mont_p->RR| to R'^2. return 1; } static int rsa_mod_exp_crt(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { assert(ctx != NULL); assert(rsa->n != NULL); assert(rsa->e != NULL); assert(rsa->d != NULL); assert(rsa->p != NULL); assert(rsa->q != NULL); assert(rsa->dmp1 != NULL); assert(rsa->dmq1 != NULL); assert(rsa->iqmp != NULL); BIGNUM *r1, *m1; int ret = 0; BN_CTX_start(ctx); r1 = BN_CTX_get(ctx); m1 = BN_CTX_get(ctx); BIGNUM *n, *p, *q; if (r1 == NULL || m1 == NULL) { goto err; } // Use the minimal-width versions of |n|, |p|, and |q|. Either works, but if // someone gives us non-minimal values, these will be slightly more efficient // on the non-Montgomery operations. n = &rsa->mont_n->N; p = &rsa->mont_p->N; q = &rsa->mont_q->N; // This is a pre-condition for |mod_montgomery|. It was already checked by the // caller. declassify_assert(BN_ucmp(I, n) < 0); if ( // |m1| is the result modulo |q|. !mod_montgomery(r1, I, q, rsa->mont_q, p, ctx) || !BN_mod_exp_mont_consttime(m1, r1, rsa->dmq1_fixed, q, ctx, rsa->mont_q) || // |r0| is the result modulo |p|. !mod_montgomery(r1, I, p, rsa->mont_p, q, ctx) || !BN_mod_exp_mont_consttime(r0, r1, rsa->dmp1_fixed, p, ctx, rsa->mont_p) || // Compute r0 = r0 - m1 mod p. |m1| is reduced mod |q|, not |p|, so we // just run |mod_montgomery| again for simplicity. This could be more // efficient with more cases: if |p > q|, |m1| is already reduced. If // |p < q| but they have the same bit width, |bn_reduce_once| suffices. // However, compared to over 2048 Montgomery multiplications above, this // difference is not measurable. !mod_montgomery(r1, m1, p, rsa->mont_p, q, ctx) || !bn_mod_sub_consttime(r0, r0, r1, p, ctx) || // r0 = r0 * iqmp mod p. We use Montgomery multiplication to compute this // in constant time. |iqmp_mont| is in Montgomery form and r0 is not, so // the result is taken out of Montgomery form. !BN_mod_mul_montgomery(r0, r0, rsa->iqmp_mont, rsa->mont_p, ctx) || // r0 = r0 * q + m1 gives the final result. Reducing modulo q gives m1, so // it is correct mod p. Reducing modulo p gives (r0-m1)*iqmp*q + m1 = r0, // so it is correct mod q. Finally, the result is bounded by [m1, n + m1), // and the result is at least |m1|, so this must be the unique answer in // [0, n). !bn_mul_consttime(r0, r0, q, ctx) || // !bn_uadd_consttime(r0, r0, m1)) { goto err; } // The result should be bounded by |n|, but fixed-width operations may // bound the width slightly higher, so fix it. This trips constant-time checks // because a naive data flow analysis does not realize the excess words are // publicly zero. declassify_assert(BN_cmp(r0, n) < 0); bn_assert_fits_in_bytes(r0, BN_num_bytes(n)); if (!bn_resize_words(r0, n->width)) { goto err; } ret = 1; err: BN_CTX_end(ctx); return ret; } static int ensure_bignum(BIGNUM **out) { if (*out == NULL) { *out = BN_new(); } return *out != NULL; } // kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2²⁰⁴⁷×√2⌋. This is // chosen to give enough precision for 4096-bit RSA, the largest key size FIPS // specifies. Key sizes beyond this will round up. // // To calculate, use the following Haskell code: // // import Text.Printf (printf) // import Data.List (intercalate) // // pow2 = 4095 // target = 2^pow2 // // f x = x*x - (toRational target) // // fprime x = 2*x // // newtonIteration x = x - (f x) / (fprime x) // // converge x = // let n = floor x in // if n*n - target < 0 && (n+1)*(n+1) - target > 0 // then n // else converge (newtonIteration x) // // divrem bits x = (x `div` (2^bits), x `rem` (2^bits)) // // bnWords :: Integer -> [Integer] // bnWords x = // if x == 0 // then [] // else let (high, low) = divrem 64 x in low : bnWords high // // showWord x = let (high, low) = divrem 32 x in printf "TOBN(0x%08x, 0x%08x)" // high low // // output :: String // output = intercalate ", " $ map showWord $ bnWords $ converge (2 ^ (pow2 // `div` 2)) // // To verify this number, check that n² < 2⁴⁰⁹⁵ < (n+1)², where n is value // represented here. Note the components are listed in little-endian order. Here // is some sample Python code to check: // // >>> TOBN = lambda a, b: a << 32 | b // >>> l = [ ] // >>> n = sum(a * 2**(64*i) for i, a in enumerate(l)) // >>> n**2 < 2**4095 < (n+1)**2 // True const BN_ULONG kBoringSSLRSASqrtTwo[] = { TOBN(0x4d7c60a5, 0xe633e3e1), TOBN(0x5fcf8f7b, 0xca3ea33b), TOBN(0xc246785e, 0x92957023), TOBN(0xf9acce41, 0x797f2805), TOBN(0xfdfe170f, 0xd3b1f780), TOBN(0xd24f4a76, 0x3facb882), TOBN(0x18838a2e, 0xaff5f3b2), TOBN(0xc1fcbdde, 0xa2f7dc33), TOBN(0xdea06241, 0xf7aa81c2), TOBN(0xf6a1be3f, 0xca221307), TOBN(0x332a5e9f, 0x7bda1ebf), TOBN(0x0104dc01, 0xfe32352f), TOBN(0xb8cf341b, 0x6f8236c7), TOBN(0x4264dabc, 0xd528b651), TOBN(0xf4d3a02c, 0xebc93e0c), TOBN(0x81394ab6, 0xd8fd0efd), TOBN(0xeaa4a089, 0x9040ca4a), TOBN(0xf52f120f, 0x836e582e), TOBN(0xcb2a6343, 0x31f3c84d), TOBN(0xc6d5a8a3, 0x8bb7e9dc), TOBN(0x460abc72, 0x2f7c4e33), TOBN(0xcab1bc91, 0x1688458a), TOBN(0x53059c60, 0x11bc337b), TOBN(0xd2202e87, 0x42af1f4e), TOBN(0x78048736, 0x3dfa2768), TOBN(0x0f74a85e, 0x439c7b4a), TOBN(0xa8b1fe6f, 0xdc83db39), TOBN(0x4afc8304, 0x3ab8a2c3), TOBN(0xed17ac85, 0x83339915), TOBN(0x1d6f60ba, 0x893ba84c), TOBN(0x597d89b3, 0x754abe9f), TOBN(0xb504f333, 0xf9de6484), }; const size_t kBoringSSLRSASqrtTwoLen = OPENSSL_ARRAY_SIZE(kBoringSSLRSASqrtTwo); // generate_prime sets |out| to a prime with length |bits| such that |out|-1 is // relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to // |p|. |sqrt2| must be ⌊2^(bits-1)×√2⌋ (or a slightly overestimate for large // sizes), and |pow2_bits_100| must be 2^(bits-100). // // This function fails with probability around 2^-21. static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, const BIGNUM *p, const BIGNUM *sqrt2, const BIGNUM *pow2_bits_100, BN_CTX *ctx, BN_GENCB *cb) { if (bits < 128 || (bits % BN_BITS2) != 0) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); return 0; } assert(BN_is_pow2(pow2_bits_100)); assert(BN_is_bit_set(pow2_bits_100, bits - 100)); // See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is nlen/2. // Use the limit from steps 4.7 and 5.8 for most values of |e|. When |e| is 3, // the 186-4 limit is too low, so we use a higher one. Note this case is not // reachable from |RSA_generate_key_fips|. // // |limit| determines the failure probability. We must find a prime that is // not 1 mod |e|. By the prime number theorem, we'll find one with probability // p = (e-1)/e * 2/(ln(2)*bits). Note the second term is doubled because we // discard even numbers. // // The failure probability is thus (1-p)^limit. To convert that to a power of // two, we take logs. -log_2((1-p)^limit) = -limit * ln(1-p) / ln(2). // // >>> def f(bits, e, limit): // ... p = (e-1.0)/e * 2.0/(math.log(2)*bits) // ... return -limit * math.log(1 - p) / math.log(2) // ... // >>> f(1024, 65537, 5*1024) // 20.842750558272634 // >>> f(1536, 65537, 5*1536) // 20.83294549602474 // >>> f(2048, 65537, 5*2048) // 20.828047576234948 // >>> f(1024, 3, 8*1024) // 22.222147925962307 // >>> f(1536, 3, 8*1536) // 22.21518251065506 // >>> f(2048, 3, 8*2048) // 22.211701985875937 if (bits >= INT_MAX / 32) { OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE); return 0; } int limit = BN_is_word(e, 3) ? bits * 8 : bits * 5; int ret = 0, tries = 0, rand_tries = 0; BN_CTX_start(ctx); BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } for (;;) { // Generate a random number of length |bits| where the bottom bit is set // (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the // bound checked below in steps 4.4 and 5.5). if (!BN_rand(out, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD) || !BN_GENCB_call(cb, BN_GENCB_GENERATED, rand_tries++)) { goto err; } if (p != NULL) { // If |p| and |out| are too close, try again (step 5.4). if (!bn_abs_sub_consttime(tmp, out, p, ctx)) { goto err; } if (BN_cmp(tmp, pow2_bits_100) <= 0) { continue; } } // If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5). This is equivalent // to out <= ⌊2^(bits-1)×√2⌋, or out <= sqrt2 for FIPS key sizes. // // For larger keys, the comparison is approximate, leaning towards // retrying. That is, we reject a negligible fraction of primes that are // within the FIPS bound, but we will never accept a prime outside the // bound, ensuring the resulting RSA key is the right size. // // Values over the threshold are discarded, so it is safe to leak this // comparison. if (constant_time_declassify_int(BN_cmp(out, sqrt2) <= 0)) { continue; } // RSA key generation's bottleneck is discarding composites. If it fails // trial division, do not bother computing a GCD or performing Miller-Rabin. if (!bn_odd_number_is_obviously_composite(out)) { // Check gcd(out-1, e) is one (steps 4.5 and 5.6). Leaking the final // result of this comparison is safe because, if not relatively prime, the // value will be discarded. int relatively_prime; if (!bn_usub_consttime(tmp, out, BN_value_one()) || !bn_is_relatively_prime(&relatively_prime, tmp, e, ctx)) { goto err; } if (constant_time_declassify_int(relatively_prime)) { // Test |out| for primality (steps 4.5.1 and 5.6.1). int is_probable_prime; if (!BN_primality_test(&is_probable_prime, out, BN_prime_checks_for_generation, ctx, 0, cb)) { goto err; } if (is_probable_prime) { ret = 1; goto err; } } } // If we've tried too many times to find a prime, abort (steps 4.7 and // 5.8). tries++; if (tries >= limit) { OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS); goto err; } if (!BN_GENCB_call(cb, 2, tries)) { goto err; } } err: BN_CTX_end(ctx); return ret; } // rsa_generate_key_impl generates an RSA key using a generalized version of // FIPS 186-4 appendix B.3. |RSA_generate_key_fips| performs additional checks // for FIPS-compliant key generation. // // This function returns one on success and zero on failure. It has a failure // probability of about 2^-20. static int rsa_generate_key_impl(RSA *rsa, int bits, const BIGNUM *e_value, BN_GENCB *cb) { // See FIPS 186-4 appendix B.3. This function implements a generalized version // of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks // for FIPS-compliant key generation. // Always generate RSA keys which are a multiple of 128 bits. Round |bits| // down as needed. bits &= ~127; // Reject excessively small keys. if (bits < 256) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } // Reject excessively large public exponents. Windows CryptoAPI and Go don't // support values larger than 32 bits, so match their limits for generating // keys. (|rsa_check_public_key| uses a slightly more conservative value, but // we don't need to support generating such keys.) // https://github.com/golang/go/issues/3161 // https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx if (BN_num_bits(e_value) > 32) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_E_VALUE); return 0; } int ret = 0; int prime_bits = bits / 2; BN_CTX *ctx = BN_CTX_new(); BIGNUM *totient, *pm1, *qm1, *sqrt2, *pow2_prime_bits_100, *pow2_prime_bits; int sqrt2_bits; if (ctx == NULL) { goto bn_err; } BN_CTX_start(ctx); totient = BN_CTX_get(ctx); pm1 = BN_CTX_get(ctx); qm1 = BN_CTX_get(ctx); sqrt2 = BN_CTX_get(ctx); pow2_prime_bits_100 = BN_CTX_get(ctx); pow2_prime_bits = BN_CTX_get(ctx); if (totient == NULL || pm1 == NULL || qm1 == NULL || sqrt2 == NULL || pow2_prime_bits_100 == NULL || pow2_prime_bits == NULL || !BN_set_bit(pow2_prime_bits_100, prime_bits - 100) || !BN_set_bit(pow2_prime_bits, prime_bits)) { goto bn_err; } // We need the RSA components non-NULL. if (!ensure_bignum(&rsa->n) || // !ensure_bignum(&rsa->d) || // !ensure_bignum(&rsa->e) || // !ensure_bignum(&rsa->p) || // !ensure_bignum(&rsa->q) || // !ensure_bignum(&rsa->dmp1) || // !ensure_bignum(&rsa->dmq1) || // !ensure_bignum(&rsa->iqmp)) { goto bn_err; } if (!BN_copy(rsa->e, e_value)) { goto bn_err; } // Compute sqrt2 >= ⌊2^(prime_bits-1)×√2⌋. if (!bn_set_words(sqrt2, kBoringSSLRSASqrtTwo, kBoringSSLRSASqrtTwoLen)) { goto bn_err; } sqrt2_bits = kBoringSSLRSASqrtTwoLen * BN_BITS2; assert(sqrt2_bits == (int)BN_num_bits(sqrt2)); if (sqrt2_bits > prime_bits) { // For key sizes up to 4096 (prime_bits = 2048), this is exactly // ⌊2^(prime_bits-1)×√2⌋. if (!BN_rshift(sqrt2, sqrt2, sqrt2_bits - prime_bits)) { goto bn_err; } } else if (prime_bits > sqrt2_bits) { // For key sizes beyond 4096, this is approximate. We err towards retrying // to ensure our key is the right size and round up. if (!BN_add_word(sqrt2, 1) || !BN_lshift(sqrt2, sqrt2, prime_bits - sqrt2_bits)) { goto bn_err; } } assert(prime_bits == (int)BN_num_bits(sqrt2)); do { // Generate p and q, each of size |prime_bits|, using the steps outlined in // appendix FIPS 186-4 appendix B.3.3. // // Each call to |generate_prime| fails with probability p = 2^-21. The // probability that either call fails is 1 - (1-p)^2, which is around 2^-20. if (!generate_prime(rsa->p, prime_bits, rsa->e, NULL, sqrt2, pow2_prime_bits_100, ctx, cb) || !BN_GENCB_call(cb, 3, 0) || !generate_prime(rsa->q, prime_bits, rsa->e, rsa->p, sqrt2, pow2_prime_bits_100, ctx, cb) || !BN_GENCB_call(cb, 3, 1)) { goto bn_err; } if (BN_cmp(rsa->p, rsa->q) < 0) { BIGNUM *tmp = rsa->p; rsa->p = rsa->q; rsa->q = tmp; } // Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs // from typical RSA implementations which use (p-1)*(q-1). // // Note this means the size of d might reveal information about p-1 and // q-1. However, we do operations with Chinese Remainder Theorem, so we only // use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient // does not affect those two values. int no_inverse; if (!bn_usub_consttime(pm1, rsa->p, BN_value_one()) || !bn_usub_consttime(qm1, rsa->q, BN_value_one()) || !bn_lcm_consttime(totient, pm1, qm1, ctx) || !bn_mod_inverse_consttime(rsa->d, &no_inverse, rsa->e, totient, ctx)) { goto bn_err; } // Retry if |rsa->d| <= 2^|prime_bits|. See appendix B.3.1's guidance on // values for d. When we retry, p and q are discarded, so it is safe to leak // this comparison. } while (constant_time_declassify_int(BN_cmp(rsa->d, pow2_prime_bits) <= 0)); assert(BN_num_bits(pm1) == (unsigned)prime_bits); assert(BN_num_bits(qm1) == (unsigned)prime_bits); if ( // Calculate n. !bn_mul_consttime(rsa->n, rsa->p, rsa->q, ctx) || // Calculate d mod (p-1). !bn_div_consttime(NULL, rsa->dmp1, rsa->d, pm1, prime_bits, ctx) || // Calculate d mod (q-1) !bn_div_consttime(NULL, rsa->dmq1, rsa->d, qm1, prime_bits, ctx)) { goto bn_err; } bn_set_minimal_width(rsa->n); // |rsa->n| is computed from the private key, but is public. bn_declassify(rsa->n); // Calculate q^-1 mod p. rsa->mont_p = BN_MONT_CTX_new_consttime(rsa->p, ctx); if (rsa->mont_p == NULL || // !bn_mod_inverse_secret_prime(rsa->iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { goto bn_err; } // Sanity-check that |rsa->n| has the specified size. This is implied by // |generate_prime|'s bounds. if (BN_num_bits(rsa->n) != (unsigned)bits) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } // The key generation process is complex and thus error-prone. It could be // disastrous to generate and then use a bad key so double-check that the key // makes sense. Also, while |rsa| is mutable, fill in the cached components. if (!RSA_check_key(rsa) || !freeze_private_key(rsa, ctx)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INTERNAL_ERROR); goto err; } ret = 1; bn_err: if (!ret) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); } err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } return ret; } static void replace_bignum(BIGNUM **out, BIGNUM **in) { BN_free(*out); *out = *in; *in = NULL; } static void replace_bn_mont_ctx(BN_MONT_CTX **out, BN_MONT_CTX **in) { BN_MONT_CTX_free(*out); *out = *in; *in = NULL; } static int RSA_generate_key_ex_maybe_fips(RSA *rsa, int bits, const BIGNUM *e_value, BN_GENCB *cb, int check_fips) { boringssl_ensure_rsa_self_test(); if (rsa == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } RSA *tmp = NULL; uint32_t err; int ret = 0; // |rsa_generate_key_impl|'s 2^-20 failure probability is too high at scale, // so we run the FIPS algorithm four times, bringing it down to 2^-80. We // should just adjust the retry limit, but FIPS 186-4 prescribes that value // and thus results in unnecessary complexity. int failures = 0; do { ERR_clear_error(); // Generate into scratch space, to avoid leaving partial work on failure. tmp = RSA_new(); if (tmp == NULL) { goto out; } if (rsa_generate_key_impl(tmp, bits, e_value, cb)) { break; } err = ERR_peek_error(); RSA_free(tmp); tmp = NULL; failures++; // Only retry on |RSA_R_TOO_MANY_ITERATIONS|. This is so a caller-induced // failure in |BN_GENCB_call| is still fatal. } while (failures < 4 && ERR_GET_LIB(err) == ERR_LIB_RSA && ERR_GET_REASON(err) == RSA_R_TOO_MANY_ITERATIONS); if (tmp == NULL || (check_fips && !RSA_check_fips(tmp))) { goto out; } rsa_invalidate_key(rsa); replace_bignum(&rsa->n, &tmp->n); replace_bignum(&rsa->e, &tmp->e); replace_bignum(&rsa->d, &tmp->d); replace_bignum(&rsa->p, &tmp->p); replace_bignum(&rsa->q, &tmp->q); replace_bignum(&rsa->dmp1, &tmp->dmp1); replace_bignum(&rsa->dmq1, &tmp->dmq1); replace_bignum(&rsa->iqmp, &tmp->iqmp); replace_bn_mont_ctx(&rsa->mont_n, &tmp->mont_n); replace_bn_mont_ctx(&rsa->mont_p, &tmp->mont_p); replace_bn_mont_ctx(&rsa->mont_q, &tmp->mont_q); replace_bignum(&rsa->d_fixed, &tmp->d_fixed); replace_bignum(&rsa->dmp1_fixed, &tmp->dmp1_fixed); replace_bignum(&rsa->dmq1_fixed, &tmp->dmq1_fixed); replace_bignum(&rsa->iqmp_mont, &tmp->iqmp_mont); rsa->private_key_frozen = tmp->private_key_frozen; ret = 1; out: RSA_free(tmp); return ret; } int RSA_generate_key_ex(RSA *rsa, int bits, const BIGNUM *e_value, BN_GENCB *cb) { return RSA_generate_key_ex_maybe_fips(rsa, bits, e_value, cb, /*check_fips=*/0); } int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb) { // FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit // primes, respectively) with the prime generation method we use. // Subsequently, IG A.14 stated that larger modulus sizes can be used and ACVP // testing supports 4096 bits. if (bits != 2048 && bits != 3072 && bits != 4096) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); return 0; } BIGNUM *e = BN_new(); int ret = e != NULL && BN_set_word(e, RSA_F4) && RSA_generate_key_ex_maybe_fips(rsa, bits, e, cb, /*check_fips=*/1); BN_free(e); if (ret) { FIPS_service_indicator_update_state(); } return ret; } DEFINE_METHOD_FUNCTION(RSA_METHOD, RSA_default_method) { // All of the methods are NULL to make it easier for the compiler/linker to // drop unused functions. The wrapper functions will select the appropriate // |rsa_default_*| implementation. OPENSSL_memset(out, 0, sizeof(RSA_METHOD)); out->common.is_static = 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/self_check/fips.cc.inc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../../internal.h" #include "../delocate.h" int FIPS_mode(void) { #if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN) return 1; #else return 0; #endif } int FIPS_mode_set(int on) { return on == FIPS_mode(); } const char *FIPS_module_name(void) { return "BoringCrypto"; } int CRYPTO_has_asm(void) { #if defined(OPENSSL_NO_ASM) return 0; #else return 1; #endif } uint32_t FIPS_version(void) { return 0; } int FIPS_query_algorithm_status(const char *algorithm) { #if defined(BORINGSSL_FIPS) static const char kApprovedAlgorithms[][13] = { "AES-CBC", "AES-CCM", "AES-CTR", "AES-ECB", "AES-GCM", "AES-KW", "AES-KWP", "ctrDRBG", "ECC-SSC", "ECDSA-sign", "ECDSA-verify", "FFC-SSC", "HMAC", "RSA-sign", "RSA-verify", "SHA-1", "SHA2-224", "SHA2-256", "SHA2-384", "SHA2-512", "SHA2-512/256", }; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kApprovedAlgorithms); i++) { if (strcmp(algorithm, kApprovedAlgorithms[i]) == 0) { return 1; } } #endif // BORINGSSL_FIPS return 0; } #if defined(BORINGSSL_FIPS_COUNTERS) size_t FIPS_read_counter(enum fips_counter_t counter) { size_t index = (size_t)counter; if (index > fips_counter_max) { abort(); } const size_t *array = reinterpret_cast( CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS)); if (!array) { return 0; } return array[index]; } void boringssl_fips_inc_counter(enum fips_counter_t counter) { size_t index = (size_t)counter; if (index > fips_counter_max) { abort(); } size_t *array = reinterpret_cast( CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS)); if (!array) { const size_t num_bytes = sizeof(size_t) * (fips_counter_max + 1); array = reinterpret_cast(OPENSSL_zalloc(num_bytes)); if (!array) { return; } if (!CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS, array, OPENSSL_free)) { // |OPENSSL_free| has already been called by |CRYPTO_set_thread_local|. return; } } array[index]++; } #else size_t FIPS_read_counter(enum fips_counter_t counter) { return 0; } // boringssl_fips_inc_counter is a no-op, inline function in internal.h in this // case. That should let the compiler optimise away the callsites. #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/self_check/self_check.cc.inc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../../bcm_support.h" #include "../../internal.h" #include "../delocate.h" #include "../dh/internal.h" #include "../ec/internal.h" #include "../ecdsa/internal.h" #include "../rand/internal.h" #include "../rsa/internal.h" #include "../service_indicator/internal.h" #include "../tls/internal.h" // MSVC wants to put a NUL byte at the end of non-char arrays and so cannot // compile the real logic. #if defined(_MSC_VER) int BORINGSSL_self_test(void) { return 0; } #else static void hexdump(FILE *out, const void *in, size_t len) { const uint8_t *in8 = reinterpret_cast(in); for (size_t i = 0; i < len; i++) { fprintf(out, "%02x", in8[i]); } } static int check_test(const void *expected, const void *actual, size_t expected_len, const char *name) { if (OPENSSL_memcmp(actual, expected, expected_len) != 0) { FILE *err = CRYPTO_get_stderr(); fprintf(err, "%s failed.\nExpected: ", name); hexdump(err, expected, expected_len); fprintf(err, "\nCalculated: "); hexdump(err, actual, expected_len); fprintf(err, "\n"); fflush(err); return 0; } return 1; } static int set_bignum(BIGNUM **out, const uint8_t *in, size_t len) { *out = BN_bin2bn(in, len, NULL); return *out != NULL; } static RSA *self_test_rsa_key(void) { static const uint8_t kN[] = { 0xd3, 0x3a, 0x62, 0x9f, 0x07, 0x77, 0xb0, 0x18, 0xf3, 0xff, 0xfe, 0xcc, 0xc9, 0xa2, 0xc2, 0x3a, 0xa6, 0x1d, 0xd8, 0xf0, 0x26, 0x5b, 0x38, 0x90, 0x17, 0x48, 0x15, 0xce, 0x21, 0xcd, 0xd6, 0x62, 0x99, 0xe2, 0xd7, 0xda, 0x40, 0x80, 0x3c, 0xad, 0x18, 0xb7, 0x26, 0xe9, 0x30, 0x8a, 0x23, 0x3f, 0x68, 0x9a, 0x9c, 0x31, 0x34, 0x91, 0x99, 0x06, 0x11, 0x36, 0xb2, 0x9e, 0x3a, 0xd0, 0xbc, 0xb9, 0x93, 0x4e, 0xb8, 0x72, 0xa1, 0x9f, 0xb6, 0x8c, 0xd5, 0x17, 0x1f, 0x7e, 0xaa, 0x75, 0xbb, 0xdf, 0xa1, 0x70, 0x48, 0xc4, 0xec, 0x9a, 0x51, 0xed, 0x41, 0xc9, 0x74, 0xc0, 0x3e, 0x1e, 0x85, 0x2f, 0xbe, 0x34, 0xc7, 0x65, 0x34, 0x8b, 0x4d, 0x55, 0x4b, 0xe1, 0x45, 0x54, 0x0d, 0x75, 0x7e, 0x89, 0x4d, 0x0c, 0xf6, 0x33, 0xe5, 0xfc, 0xfb, 0x56, 0x1b, 0xf2, 0x39, 0x9d, 0xe0, 0xff, 0x55, 0xcf, 0x02, 0x05, 0xb9, 0x74, 0xd2, 0x91, 0xfc, 0x87, 0xe1, 0xbb, 0x97, 0x2a, 0xe4, 0xdd, 0x20, 0xc0, 0x38, 0x47, 0xc0, 0x76, 0x3f, 0xa1, 0x9b, 0x5c, 0x20, 0xff, 0xff, 0xc7, 0x49, 0x3b, 0x4c, 0xaf, 0x99, 0xa6, 0x3e, 0x82, 0x5c, 0x58, 0x27, 0xce, 0x01, 0x03, 0xc3, 0x16, 0x35, 0x20, 0xe9, 0xf0, 0x15, 0x7a, 0x41, 0xd5, 0x1f, 0x52, 0xea, 0xdf, 0xad, 0x4c, 0xbb, 0x0d, 0xcb, 0x04, 0x91, 0xb0, 0x95, 0xa8, 0xce, 0x25, 0xfd, 0xd2, 0x62, 0x47, 0x77, 0xee, 0x13, 0xf1, 0x48, 0x72, 0x9e, 0xd9, 0x2d, 0xe6, 0x5f, 0xa4, 0xc6, 0x9e, 0x5a, 0xb2, 0xc6, 0xa2, 0xf7, 0x0a, 0x16, 0x17, 0xae, 0x6b, 0x1c, 0x30, 0x7c, 0x63, 0x08, 0x83, 0xe7, 0x43, 0xec, 0x54, 0x5e, 0x2c, 0x08, 0x0b, 0x5e, 0x46, 0xa7, 0x10, 0x93, 0x43, 0x53, 0x4e, 0xe3, 0x16, 0x73, 0x55, 0xce, 0xf2, 0x94, 0xc0, 0xbe, 0xb3, }; static const uint8_t kE[] = {0x01, 0x00, 0x01}; // 65537 static const uint8_t kD[] = { 0x2f, 0x2c, 0x1e, 0xd2, 0x3d, 0x2c, 0xb1, 0x9b, 0x21, 0x02, 0xce, 0xb8, 0x95, 0x5f, 0x4f, 0xd9, 0x21, 0x38, 0x11, 0x36, 0xb0, 0x9a, 0x36, 0xab, 0x97, 0x47, 0x75, 0xf7, 0x2e, 0xfd, 0x75, 0x1f, 0x58, 0x16, 0x9c, 0xf6, 0x14, 0xe9, 0x8e, 0xa3, 0x69, 0x9d, 0x9d, 0x86, 0xfe, 0x5c, 0x1b, 0x3b, 0x11, 0xf5, 0x55, 0x64, 0x77, 0xc4, 0xfc, 0x53, 0xaa, 0x8c, 0x78, 0x9f, 0x75, 0xab, 0x20, 0x3a, 0xa1, 0x77, 0x37, 0x22, 0x02, 0x8e, 0x54, 0x8a, 0x67, 0x1c, 0x5e, 0xe0, 0x3e, 0xd9, 0x44, 0x37, 0xd1, 0x29, 0xee, 0x56, 0x6c, 0x30, 0x9a, 0x93, 0x4d, 0xd9, 0xdb, 0xc5, 0x03, 0x1a, 0x75, 0xcc, 0x0f, 0xc2, 0x61, 0xb5, 0x6c, 0x62, 0x9f, 0xc6, 0xa8, 0xc7, 0x8a, 0x60, 0x17, 0x11, 0x62, 0x4c, 0xef, 0x74, 0x31, 0x97, 0xad, 0x89, 0x2d, 0xe8, 0x31, 0x1d, 0x8b, 0x58, 0x82, 0xe3, 0x03, 0x1a, 0x6b, 0xdf, 0x3f, 0x3e, 0xa4, 0x27, 0x19, 0xef, 0x46, 0x7a, 0x90, 0xdf, 0xa7, 0xe7, 0xc9, 0x66, 0xab, 0x41, 0x1d, 0x65, 0x78, 0x1c, 0x18, 0x40, 0x5c, 0xd6, 0x87, 0xb5, 0xea, 0x29, 0x44, 0xb3, 0xf5, 0xb3, 0xd2, 0x4f, 0xce, 0x88, 0x78, 0x49, 0x27, 0x4e, 0x0b, 0x30, 0x85, 0xfb, 0x73, 0xfd, 0x8b, 0x32, 0x15, 0xee, 0x1f, 0xc9, 0x0e, 0x89, 0xb9, 0x43, 0x2f, 0xe9, 0x60, 0x8d, 0xda, 0xae, 0x2b, 0x30, 0x99, 0xee, 0x88, 0x81, 0x20, 0x7b, 0x4a, 0xc3, 0x18, 0xf2, 0x94, 0x02, 0x79, 0x94, 0xaa, 0x65, 0xd9, 0x1b, 0x45, 0x2a, 0xac, 0x6e, 0x30, 0x48, 0x57, 0xea, 0xbe, 0x79, 0x7d, 0xfc, 0x67, 0xaa, 0x47, 0xc0, 0xf7, 0x52, 0xfd, 0x0b, 0x63, 0x4e, 0x3d, 0x2e, 0xcc, 0x36, 0xa0, 0xdb, 0x92, 0x0b, 0xa9, 0x1b, 0xeb, 0xc2, 0xd5, 0x08, 0xd3, 0x85, 0x87, 0xf8, 0x5d, 0x1a, 0xf6, 0xc1, }; static const uint8_t kP[] = { 0xf7, 0x06, 0xa3, 0x98, 0x8a, 0x52, 0xf8, 0x63, 0x68, 0x27, 0x4f, 0x68, 0x7f, 0x34, 0xec, 0x8e, 0x5d, 0xf8, 0x30, 0x92, 0xb3, 0x62, 0x4c, 0xeb, 0xdb, 0x19, 0x6b, 0x09, 0xc5, 0xa3, 0xf0, 0xbb, 0xff, 0x0f, 0xc2, 0xd4, 0x9b, 0xc9, 0x54, 0x4f, 0xb9, 0xf9, 0xe1, 0x4c, 0xf0, 0xe3, 0x4c, 0x90, 0xda, 0x7a, 0x01, 0xc2, 0x9f, 0xc4, 0xc8, 0x8e, 0xb1, 0x1e, 0x93, 0x75, 0x75, 0xc6, 0x13, 0x25, 0xc3, 0xee, 0x3b, 0xcc, 0xb8, 0x72, 0x6c, 0x49, 0xb0, 0x09, 0xfb, 0xab, 0x44, 0xeb, 0x4d, 0x40, 0xf0, 0x61, 0x6b, 0xe5, 0xe6, 0xfe, 0x3e, 0x0a, 0x77, 0x26, 0x39, 0x76, 0x3d, 0x4c, 0x3e, 0x9b, 0x5b, 0xc0, 0xaf, 0xa2, 0x58, 0x76, 0xb0, 0xe9, 0xda, 0x7f, 0x0e, 0x78, 0xc9, 0x76, 0x49, 0x5c, 0xfa, 0xb3, 0xb0, 0x15, 0x4b, 0x41, 0xc7, 0x27, 0xa4, 0x75, 0x28, 0x5c, 0x30, 0x69, 0x50, 0x29, }; static const uint8_t kQ[] = { 0xda, 0xe6, 0xd2, 0xbb, 0x44, 0xff, 0x4f, 0xdf, 0x57, 0xc1, 0x11, 0xa3, 0x51, 0xba, 0x17, 0x89, 0x4c, 0x01, 0xc0, 0x0c, 0x97, 0x34, 0x50, 0xcf, 0x32, 0x1e, 0xc0, 0xbd, 0x7b, 0x35, 0xb5, 0x6a, 0x26, 0xcc, 0xea, 0x4c, 0x8e, 0x87, 0x4a, 0x67, 0x8b, 0xd3, 0xe5, 0x4f, 0x3a, 0x60, 0x48, 0x59, 0x04, 0x93, 0x39, 0xd7, 0x7c, 0xfb, 0x19, 0x1a, 0x34, 0xd5, 0xe8, 0xaf, 0xe7, 0x22, 0x2c, 0x0d, 0xc2, 0x91, 0x69, 0xb6, 0xe9, 0x2a, 0xe9, 0x1c, 0x4c, 0x6e, 0x8f, 0x40, 0xf5, 0xa8, 0x3e, 0x82, 0x69, 0x69, 0xbe, 0x9f, 0x7d, 0x5c, 0x7f, 0x92, 0x78, 0x17, 0xa3, 0x6d, 0x41, 0x2d, 0x72, 0xed, 0x3f, 0x71, 0xfa, 0x97, 0xb4, 0x63, 0xe4, 0x4f, 0xd9, 0x46, 0x03, 0xfb, 0x00, 0xeb, 0x30, 0x70, 0xb9, 0x51, 0xd9, 0x0a, 0xd2, 0xf8, 0x50, 0xd4, 0xfb, 0x43, 0x84, 0xf8, 0xac, 0x58, 0xc3, 0x7b, }; static const uint8_t kDModPMinusOne[] = { 0xf5, 0x50, 0x8f, 0x88, 0x7d, 0xdd, 0xb5, 0xb4, 0x2a, 0x8b, 0xd7, 0x4d, 0x23, 0xfe, 0xaf, 0xe9, 0x16, 0x22, 0xd2, 0x41, 0xed, 0x88, 0xf2, 0x70, 0xcb, 0x4d, 0xeb, 0xc1, 0x71, 0x97, 0xc4, 0x0b, 0x3e, 0x5a, 0x2d, 0x96, 0xab, 0xfa, 0xfd, 0x12, 0x8b, 0xd3, 0x3e, 0x4e, 0x05, 0x6f, 0x04, 0xeb, 0x59, 0x3c, 0x0e, 0xa1, 0x73, 0xbe, 0x9d, 0x99, 0x2f, 0x05, 0xf9, 0x54, 0x8d, 0x98, 0x1e, 0x0d, 0xc4, 0x0c, 0xc3, 0x30, 0x23, 0xff, 0xe5, 0xd0, 0x2b, 0xd5, 0x4e, 0x2b, 0xa0, 0xae, 0xb8, 0x32, 0x84, 0x45, 0x8b, 0x3c, 0x6d, 0xf0, 0x10, 0x36, 0x9e, 0x6a, 0xc4, 0x67, 0xca, 0xa9, 0xfc, 0x06, 0x96, 0xd0, 0xbc, 0xda, 0xd1, 0x55, 0x55, 0x8d, 0x77, 0x21, 0xf4, 0x82, 0x39, 0x37, 0x91, 0xd5, 0x97, 0x56, 0x78, 0xc8, 0x3c, 0xcb, 0x5e, 0xf6, 0xdc, 0x58, 0x48, 0xb3, 0x7c, 0x94, 0x29, 0x39, }; static const uint8_t kDModQMinusOne[] = { 0x64, 0x65, 0xbd, 0x7d, 0x1a, 0x96, 0x26, 0xa1, 0xfe, 0xf3, 0x94, 0x0d, 0x5d, 0xec, 0x85, 0xe2, 0xf8, 0xb3, 0x4c, 0xcb, 0xf9, 0x85, 0x8b, 0x12, 0x9c, 0xa0, 0x32, 0x32, 0x35, 0x92, 0x5a, 0x94, 0x47, 0x1b, 0x70, 0xd2, 0x90, 0x04, 0x49, 0x01, 0xd8, 0xc5, 0xe4, 0xc4, 0x43, 0xb7, 0xe9, 0x36, 0xba, 0xbc, 0x73, 0xa8, 0xfb, 0xaf, 0x86, 0xc1, 0xd8, 0x3d, 0xcb, 0xac, 0xf1, 0xcb, 0x60, 0x7d, 0x27, 0x21, 0xde, 0x64, 0x7f, 0xe8, 0xa8, 0x65, 0xcc, 0x40, 0x60, 0xff, 0xa0, 0x2b, 0xfc, 0x0f, 0x80, 0x1d, 0x79, 0xca, 0x58, 0x8a, 0xd6, 0x0f, 0xed, 0x78, 0x9a, 0x02, 0x00, 0x04, 0xc2, 0x53, 0x41, 0xe8, 0x1a, 0xd0, 0xfd, 0x71, 0x5b, 0x43, 0xac, 0x19, 0x4a, 0xb6, 0x12, 0xa3, 0xcb, 0xe1, 0xc7, 0x7d, 0x5c, 0x98, 0x74, 0x4e, 0x63, 0x74, 0x6b, 0x91, 0x7a, 0x29, 0x3b, 0x92, 0xb2, 0x85, }; static const uint8_t kQInverseModP[] = { 0xd0, 0xde, 0x19, 0xda, 0x1e, 0xa2, 0xd8, 0x8f, 0x1c, 0x92, 0x73, 0xb0, 0xc9, 0x90, 0xc7, 0xf5, 0xec, 0xc5, 0x89, 0x01, 0x05, 0x78, 0x11, 0x2d, 0x74, 0x34, 0x44, 0xad, 0xd5, 0xf7, 0xa4, 0xfe, 0x9f, 0x25, 0x4d, 0x0b, 0x92, 0xe3, 0xb8, 0x7d, 0xd3, 0xfd, 0xa5, 0xca, 0x95, 0x60, 0xa3, 0xf9, 0x55, 0x42, 0x14, 0xb2, 0x45, 0x51, 0x9f, 0x73, 0x88, 0x43, 0x8a, 0xd1, 0x65, 0x9e, 0xd1, 0xf7, 0x82, 0x2a, 0x2a, 0x8d, 0x70, 0x56, 0xe3, 0xef, 0xc9, 0x0e, 0x2a, 0x2c, 0x15, 0xaf, 0x7f, 0x97, 0x81, 0x66, 0xf3, 0xb5, 0x00, 0xa9, 0x26, 0xcc, 0x1e, 0xc2, 0x98, 0xdd, 0xd3, 0x37, 0x06, 0x79, 0xb3, 0x60, 0x58, 0x79, 0x99, 0x3f, 0xa3, 0x15, 0x1f, 0x31, 0xe3, 0x11, 0x88, 0x4c, 0x35, 0x57, 0xfa, 0x79, 0xd7, 0xd8, 0x72, 0xee, 0x73, 0x95, 0x89, 0x29, 0xc7, 0x05, 0x27, 0x68, 0x90, 0x15, }; RSA *rsa = RSA_new(); if (rsa == NULL || // !set_bignum(&rsa->n, kN, sizeof(kN)) || !set_bignum(&rsa->e, kE, sizeof(kE)) || !set_bignum(&rsa->d, kD, sizeof(kD)) || !set_bignum(&rsa->p, kP, sizeof(kP)) || !set_bignum(&rsa->q, kQ, sizeof(kQ)) || !set_bignum(&rsa->dmp1, kDModPMinusOne, sizeof(kDModPMinusOne)) || !set_bignum(&rsa->dmq1, kDModQMinusOne, sizeof(kDModQMinusOne)) || !set_bignum(&rsa->iqmp, kQInverseModP, sizeof(kQInverseModP))) { RSA_free(rsa); return NULL; } return rsa; } static EC_KEY *self_test_ecdsa_key(void) { static const uint8_t kQx[] = { 0xc8, 0x15, 0x61, 0xec, 0xf2, 0xe5, 0x4e, 0xde, 0xfe, 0x66, 0x17, 0xdb, 0x1c, 0x7a, 0x34, 0xa7, 0x07, 0x44, 0xdd, 0xb2, 0x61, 0xf2, 0x69, 0xb8, 0x3d, 0xac, 0xfc, 0xd2, 0xad, 0xe5, 0xa6, 0x81, }; static const uint8_t kQy[] = { 0xe0, 0xe2, 0xaf, 0xa3, 0xf9, 0xb6, 0xab, 0xe4, 0xc6, 0x98, 0xef, 0x64, 0x95, 0xf1, 0xbe, 0x49, 0xa3, 0x19, 0x6c, 0x50, 0x56, 0xac, 0xb3, 0x76, 0x3f, 0xe4, 0x50, 0x7e, 0xec, 0x59, 0x6e, 0x88, }; static const uint8_t kD[] = { 0xc6, 0xc1, 0xaa, 0xda, 0x15, 0xb0, 0x76, 0x61, 0xf8, 0x14, 0x2c, 0x6c, 0xaf, 0x0f, 0xdb, 0x24, 0x1a, 0xff, 0x2e, 0xfe, 0x46, 0xc0, 0x93, 0x8b, 0x74, 0xf2, 0xbc, 0xc5, 0x30, 0x52, 0xb0, 0x77, }; EC_KEY *ec_key = EC_KEY_new(); BIGNUM *qx = BN_bin2bn(kQx, sizeof(kQx), NULL); BIGNUM *qy = BN_bin2bn(kQy, sizeof(kQy), NULL); BIGNUM *d = BN_bin2bn(kD, sizeof(kD), NULL); if (ec_key == NULL || qx == NULL || qy == NULL || d == NULL || !EC_KEY_set_group(ec_key, EC_group_p256()) || !EC_KEY_set_public_key_affine_coordinates(ec_key, qx, qy) || !EC_KEY_set_private_key(ec_key, d)) { EC_KEY_free(ec_key); ec_key = NULL; } BN_free(qx); BN_free(qy); BN_free(d); return ec_key; } static DH *self_test_dh(void) { DH *dh = DH_get_rfc7919_2048(); if (!dh) { return NULL; } BIGNUM *priv = BN_new(); if (!priv) { goto err; } // kFFDHE2048PrivateKeyData is a 225-bit value. (225 because that's the // minimum private key size in // https://tools.ietf.org/html/rfc7919#appendix-A.1.) static const BN_ULONG kFFDHE2048PrivateKeyData[] = { TOBN(0x187be36b, 0xd38a4fa1), TOBN(0x0a152f39, 0x6458f3b8), TOBN(0x0570187e, 0xc422eeb7), TOBN(0x00000001, 0x91173f2a), }; bn_set_static_words(priv, kFFDHE2048PrivateKeyData, OPENSSL_ARRAY_SIZE(kFFDHE2048PrivateKeyData)); if (!DH_set0_key(dh, NULL, priv)) { goto err; } return dh; err: BN_free(priv); DH_free(dh); return NULL; } // Lazy self-tests // // Self tests that are slow are deferred until the corresponding algorithm is // actually exercised, in FIPS mode. (In non-FIPS mode these tests are only run // when requested by |BORINGSSL_self_test|.) static int boringssl_self_test_rsa(void) { int ret = 0; uint8_t output[256]; RSA *const rsa_key = self_test_rsa_key(); if (rsa_key == NULL) { fprintf(CRYPTO_get_stderr(), "RSA key construction failed\n"); goto err; } // Disable blinding for the power-on tests because it's not needed and // triggers an entropy draw. rsa_key->flags |= RSA_FLAG_NO_BLINDING; // RSA Sign KAT static const uint8_t kRSASignDigest[32] = { 0xd2, 0xb5, 0x6e, 0x53, 0x30, 0x6f, 0x72, 0x0d, 0x79, 0x29, 0xd8, 0x70, 0x8b, 0xf4, 0x6f, 0x1c, 0x22, 0x30, 0x03, 0x05, 0x58, 0x2b, 0x11, 0x5b, 0xed, 0xca, 0xc7, 0x22, 0xd8, 0xaa, 0x5a, 0xb2, }; static const uint8_t kRSASignSignature[256] = { 0x64, 0xce, 0xdd, 0x91, 0x27, 0xb0, 0x4f, 0xb9, 0x14, 0xea, 0xc0, 0xb4, 0xa2, 0x06, 0xc5, 0xd8, 0x40, 0x0f, 0x6c, 0x54, 0xac, 0xf7, 0x02, 0xde, 0x26, 0xbb, 0xfd, 0x33, 0xe5, 0x2f, 0x4d, 0xb1, 0x53, 0xc4, 0xff, 0xd0, 0x5f, 0xea, 0x15, 0x89, 0x83, 0x4c, 0xe3, 0x80, 0x0b, 0xe9, 0x13, 0x82, 0x1d, 0x71, 0x92, 0x1a, 0x03, 0x60, 0x2c, 0xaf, 0xe2, 0x16, 0xc7, 0x43, 0x3f, 0xde, 0x6b, 0x94, 0xfd, 0x6e, 0x08, 0x7b, 0x11, 0xf1, 0x34, 0x52, 0xe5, 0xc0, 0x97, 0x66, 0x4a, 0xe0, 0x91, 0x45, 0xc8, 0xb1, 0x3d, 0x6a, 0x54, 0xc1, 0x32, 0x0f, 0x32, 0xad, 0x25, 0x11, 0x3e, 0x49, 0xad, 0x41, 0xce, 0x7b, 0xca, 0x95, 0x6b, 0x54, 0x5e, 0x86, 0x1b, 0xce, 0xfa, 0x2a, 0x60, 0xe8, 0xfa, 0xbb, 0x23, 0xb2, 0x41, 0xbc, 0x7c, 0x98, 0xec, 0x73, 0x20, 0xed, 0xb3, 0xcf, 0xab, 0x07, 0x24, 0x85, 0x6a, 0x2a, 0x61, 0x76, 0x28, 0xf8, 0x00, 0x80, 0xeb, 0xd9, 0x3a, 0x63, 0xe2, 0x01, 0xb1, 0xee, 0x6d, 0xe9, 0x73, 0xe9, 0xb6, 0x75, 0x2e, 0xf9, 0x81, 0xd9, 0xa8, 0x79, 0xf6, 0x8f, 0xe3, 0x02, 0x7d, 0xf6, 0xea, 0xdc, 0x35, 0xe4, 0x62, 0x0d, 0x91, 0xba, 0x3e, 0x7d, 0x8b, 0x82, 0xbf, 0x15, 0x74, 0x6a, 0x4e, 0x29, 0xf8, 0x9b, 0x2c, 0x94, 0x8d, 0xa7, 0x00, 0x4d, 0x7b, 0xbf, 0x35, 0x07, 0xeb, 0xdd, 0x10, 0xef, 0xd5, 0x2f, 0xe6, 0x98, 0x4b, 0x7e, 0x24, 0x80, 0xe2, 0x01, 0xf2, 0x66, 0xb7, 0xd3, 0x93, 0xfe, 0x2a, 0xb3, 0x74, 0xed, 0xec, 0x4b, 0xb1, 0x5f, 0x5f, 0xee, 0x85, 0x44, 0xa7, 0x26, 0xdf, 0xc1, 0x2e, 0x7a, 0xf3, 0xa5, 0x8f, 0xf8, 0x64, 0xda, 0x65, 0xad, 0x91, 0xe2, 0x90, 0x94, 0x20, 0x16, 0xb8, 0x61, 0xa5, 0x0a, 0x7d, 0xb4, 0xbf, 0xc0, 0x10, 0xaf, 0x72, 0x67, }; unsigned sig_len; if (!rsa_sign_no_self_test(NID_sha256, kRSASignDigest, sizeof(kRSASignDigest), output, &sig_len, rsa_key) || !check_test(kRSASignSignature, output, sizeof(kRSASignSignature), "RSA-sign KAT")) { fprintf(CRYPTO_get_stderr(), "RSA signing test failed.\n"); goto err; } // RSA Verify KAT static const uint8_t kRSAVerifyDigest[32] = { 0x09, 0x65, 0x2f, 0xd8, 0xed, 0x9d, 0xc2, 0x6d, 0xbc, 0xbf, 0xf2, 0xa7, 0xa5, 0xed, 0xe1, 0x37, 0x13, 0x78, 0x21, 0x36, 0xcf, 0x8d, 0x22, 0x3d, 0xab, 0x93, 0xb4, 0x12, 0xa8, 0xb5, 0x15, 0x53, }; static const uint8_t kRSAVerifySignature[256] = { 0xab, 0xe2, 0xcb, 0xc1, 0x3d, 0x6b, 0xd3, 0x9d, 0x48, 0xdb, 0x53, 0x34, 0xdd, 0xbf, 0x8d, 0x07, 0x0a, 0x93, 0xbd, 0xcb, 0x10, 0x4e, 0x2c, 0xc5, 0xd0, 0xee, 0x48, 0x6e, 0xe2, 0x95, 0xf6, 0xb3, 0x1b, 0xda, 0x12, 0x6c, 0x41, 0x89, 0x0b, 0x98, 0xb7, 0x3e, 0x70, 0xe6, 0xb6, 0x5d, 0x82, 0xf9, 0x5c, 0x66, 0x31, 0x21, 0x75, 0x5a, 0x90, 0x74, 0x4c, 0x8d, 0x1c, 0x21, 0x14, 0x8a, 0x19, 0x60, 0xbe, 0x0e, 0xca, 0x44, 0x6e, 0x9f, 0xf4, 0x97, 0xf1, 0x34, 0x5c, 0x53, 0x7e, 0xf8, 0x11, 0x9b, 0x9a, 0x43, 0x98, 0xe9, 0x5c, 0x5c, 0x6d, 0xe2, 0xb1, 0xc9, 0x55, 0x90, 0x5c, 0x52, 0x99, 0xd8, 0xce, 0x7a, 0x3b, 0x6a, 0xb7, 0x63, 0x80, 0xd9, 0xba, 0xbd, 0xd1, 0x5f, 0x61, 0x02, 0x37, 0xe1, 0xf3, 0xf2, 0xaa, 0x1c, 0x1f, 0x1e, 0x77, 0x0b, 0x62, 0xfb, 0xb5, 0x96, 0x38, 0x1b, 0x2e, 0xbd, 0xd7, 0x7e, 0xce, 0xf9, 0xc9, 0x0d, 0x4c, 0x92, 0xf7, 0xb6, 0xb0, 0x5f, 0xed, 0x29, 0x36, 0x28, 0x5f, 0xa9, 0x48, 0x26, 0xe6, 0x20, 0x55, 0x32, 0x2a, 0x33, 0xb6, 0xf0, 0x4c, 0x74, 0xce, 0x69, 0xe5, 0xd8, 0xd7, 0x37, 0xfb, 0x83, 0x8b, 0x79, 0xd2, 0xd4, 0x8e, 0x3d, 0xaf, 0x71, 0x38, 0x75, 0x31, 0x88, 0x25, 0x31, 0xa9, 0x5a, 0xc9, 0x64, 0xd0, 0x2e, 0xa4, 0x13, 0xbf, 0x85, 0x95, 0x29, 0x82, 0xbb, 0xc0, 0x89, 0x52, 0x7d, 0xaf, 0xf5, 0xb8, 0x45, 0xc9, 0xa0, 0xf4, 0xd1, 0x4e, 0xf1, 0x95, 0x6d, 0x9c, 0x3a, 0xca, 0xe8, 0x82, 0xd1, 0x2d, 0xa6, 0x6d, 0xa0, 0xf3, 0x57, 0x94, 0xf5, 0xee, 0x32, 0x23, 0x23, 0x33, 0x51, 0x7d, 0xb9, 0x31, 0x52, 0x32, 0xa1, 0x83, 0xb9, 0x91, 0x65, 0x4d, 0xbe, 0xa4, 0x16, 0x15, 0x34, 0x5c, 0x88, 0x53, 0x25, 0x92, 0x67, 0x44, 0xa5, 0x39, 0x15, }; if (!rsa_verify_no_self_test(NID_sha256, kRSAVerifyDigest, sizeof(kRSAVerifyDigest), kRSAVerifySignature, sizeof(kRSAVerifySignature), rsa_key)) { fprintf(CRYPTO_get_stderr(), "RSA-verify KAT failed.\n"); goto err; } ret = 1; err: RSA_free(rsa_key); return ret; } static int boringssl_self_test_ecc(void) { int ret = 0; EC_KEY *ec_key = NULL; EC_POINT *ec_point_in = NULL; EC_POINT *ec_point_out = NULL; BIGNUM *ec_scalar = NULL; const EC_GROUP *ec_group = NULL; // The 'k' value for ECDSA is fixed to avoid an entropy draw. uint8_t ecdsa_k[32] = {0}; ecdsa_k[31] = 42; ec_key = self_test_ecdsa_key(); if (ec_key == NULL) { fprintf(CRYPTO_get_stderr(), "ECDSA KeyGen failed\n"); goto err; } // ECDSA Sign/Verify KAT static const uint8_t kECDSASignDigest[32] = { 0x1e, 0x35, 0x93, 0x0b, 0xe8, 0x60, 0xd0, 0x94, 0x2c, 0xa7, 0xbb, 0xd6, 0xf6, 0xde, 0xd8, 0x7f, 0x15, 0x7e, 0x4d, 0xe2, 0x4f, 0x81, 0xed, 0x4b, 0x87, 0x5c, 0x0e, 0x01, 0x8e, 0x89, 0xa8, 0x1f, }; static const uint8_t kECDSASignSig[64] = { 0x67, 0x80, 0xc5, 0xfc, 0x70, 0x27, 0x5e, 0x2c, 0x70, 0x61, 0xa0, 0xe7, 0x87, 0x7b, 0xb1, 0x74, 0xde, 0xad, 0xeb, 0x98, 0x87, 0x02, 0x7f, 0x3f, 0xa8, 0x36, 0x54, 0x15, 0x8b, 0xa7, 0xf5, 0x0c, 0x68, 0x04, 0x73, 0x40, 0x94, 0xb2, 0xd1, 0x90, 0xac, 0x2d, 0x0c, 0xd7, 0xa5, 0x7f, 0x2f, 0x2e, 0xb2, 0x62, 0xb0, 0x09, 0x16, 0xe1, 0xa6, 0x70, 0xb5, 0xbb, 0x0d, 0xfd, 0x8e, 0x0c, 0x02, 0x3f, }; uint8_t ecdsa_sign_output[64]; size_t ecdsa_sign_output_len; if (!ecdsa_sign_fixed_with_nonce_for_known_answer_test( kECDSASignDigest, sizeof(kECDSASignDigest), ecdsa_sign_output, &ecdsa_sign_output_len, sizeof(ecdsa_sign_output), ec_key, ecdsa_k, sizeof(ecdsa_k)) || !check_test(kECDSASignSig, ecdsa_sign_output, sizeof(ecdsa_sign_output), "ECDSA-sign signature")) { fprintf(CRYPTO_get_stderr(), "ECDSA-sign KAT failed.\n"); goto err; } static const uint8_t kECDSAVerifyDigest[32] = { 0x78, 0x7c, 0x50, 0x5c, 0x60, 0xc9, 0xe4, 0x13, 0x6c, 0xe4, 0x48, 0xba, 0x93, 0xff, 0x71, 0xfa, 0x9c, 0x18, 0xf4, 0x17, 0x09, 0x4f, 0xdf, 0x5a, 0xe2, 0x75, 0xc0, 0xcc, 0xd2, 0x67, 0x97, 0xad, }; static const uint8_t kECDSAVerifySig[64] = { 0x67, 0x80, 0xc5, 0xfc, 0x70, 0x27, 0x5e, 0x2c, 0x70, 0x61, 0xa0, 0xe7, 0x87, 0x7b, 0xb1, 0x74, 0xde, 0xad, 0xeb, 0x98, 0x87, 0x02, 0x7f, 0x3f, 0xa8, 0x36, 0x54, 0x15, 0x8b, 0xa7, 0xf5, 0x0c, 0x2d, 0x36, 0xe5, 0x79, 0x97, 0x90, 0xbf, 0xbe, 0x21, 0x83, 0xd3, 0x3e, 0x96, 0xf3, 0xc5, 0x1f, 0x6a, 0x23, 0x2f, 0x2a, 0x24, 0x48, 0x8c, 0x8e, 0x5f, 0x64, 0xc3, 0x7e, 0xa2, 0xcf, 0x05, 0x29, }; if (!ecdsa_verify_fixed_no_self_test( kECDSAVerifyDigest, sizeof(kECDSAVerifyDigest), kECDSAVerifySig, sizeof(kECDSAVerifySig), ec_key)) { fprintf(CRYPTO_get_stderr(), "ECDSA-verify KAT failed.\n"); goto err; } // Primitive Z Computation KAT (IG 9.6). // kP256Point is SHA256("Primitive Z Computation KAT")×G within P-256. static const uint8_t kP256Point[65] = { 0x04, 0x4e, 0xc1, 0x94, 0x8c, 0x5c, 0xf4, 0x37, 0x35, 0x0d, 0xa3, 0xf9, 0x55, 0xf9, 0x8b, 0x26, 0x23, 0x5c, 0x43, 0xe0, 0x83, 0x51, 0x2b, 0x0d, 0x4b, 0x56, 0x24, 0xc3, 0xe4, 0xa5, 0xa8, 0xe2, 0xe9, 0x95, 0xf2, 0xc4, 0xb9, 0xb7, 0x48, 0x7d, 0x2a, 0xae, 0xc5, 0xc0, 0x0a, 0xcc, 0x1b, 0xd0, 0xec, 0xb8, 0xdc, 0xbe, 0x0c, 0xbe, 0x52, 0x79, 0x93, 0x7c, 0x0b, 0x92, 0x2b, 0x7f, 0x17, 0xa5, 0x80, }; // kP256Scalar is SHA256("Primitive Z Computation KAT scalar"). static const uint8_t kP256Scalar[32] = { 0xe7, 0x60, 0x44, 0x91, 0x26, 0x9a, 0xfb, 0x5b, 0x10, 0x2d, 0x6e, 0xa5, 0x2c, 0xb5, 0x9f, 0xeb, 0x70, 0xae, 0xde, 0x6c, 0xe3, 0xbf, 0xb3, 0xe0, 0x10, 0x54, 0x85, 0xab, 0xd8, 0x61, 0xd7, 0x7b, }; // kP256PointResult is |kP256Scalar|×|kP256Point|. static const uint8_t kP256PointResult[65] = { 0x04, 0xf1, 0x63, 0x00, 0x88, 0xc5, 0xd5, 0xe9, 0x05, 0x52, 0xac, 0xb6, 0xec, 0x68, 0x76, 0xb8, 0x73, 0x7f, 0x0f, 0x72, 0x34, 0xe6, 0xbb, 0x30, 0x32, 0x22, 0x37, 0xb6, 0x2a, 0x80, 0xe8, 0x9e, 0x6e, 0x6f, 0x36, 0x02, 0xe7, 0x21, 0xd2, 0x31, 0xdb, 0x94, 0x63, 0xb7, 0xd8, 0x19, 0x0e, 0xc2, 0xc0, 0xa7, 0x2f, 0x15, 0x49, 0x1a, 0xa2, 0x7c, 0x41, 0x8f, 0xaf, 0x9c, 0x40, 0xaf, 0x2e, 0x4a, 0x0c, }; ec_group = EC_group_p256(); ec_point_in = EC_POINT_new(ec_group); ec_point_out = EC_POINT_new(ec_group); ec_scalar = BN_new(); uint8_t z_comp_result[65]; if (ec_point_in == NULL || ec_point_out == NULL || ec_scalar == NULL || !EC_POINT_oct2point(ec_group, ec_point_in, kP256Point, sizeof(kP256Point), NULL) || !BN_bin2bn(kP256Scalar, sizeof(kP256Scalar), ec_scalar) || !ec_point_mul_no_self_test(ec_group, ec_point_out, NULL, ec_point_in, ec_scalar, NULL) || !EC_POINT_point2oct(ec_group, ec_point_out, POINT_CONVERSION_UNCOMPRESSED, z_comp_result, sizeof(z_comp_result), NULL) || !check_test(kP256PointResult, z_comp_result, sizeof(z_comp_result), "Z Computation Result")) { fprintf(CRYPTO_get_stderr(), "Z-computation KAT failed.\n"); goto err; } ret = 1; err: EC_KEY_free(ec_key); EC_POINT_free(ec_point_in); EC_POINT_free(ec_point_out); BN_free(ec_scalar); return ret; } static int boringssl_self_test_ffdh(void) { int ret = 0; DH *dh = NULL; BIGNUM *ffdhe2048_value = NULL; // FFC Diffie-Hellman KAT // kFFDHE2048PublicValueData is an arbitrary public value, mod // kFFDHE2048Data. (The private key happens to be 4096.) static const BN_ULONG kFFDHE2048PublicValueData[] = { TOBN(0x187be36b, 0xd38a4fa1), TOBN(0x0a152f39, 0x6458f3b8), TOBN(0x0570187e, 0xc422eeb7), TOBN(0x18af7482, 0x91173f2a), TOBN(0xe9fdac6a, 0xcff4eaaa), TOBN(0xf6afebb7, 0x6e589d6c), TOBN(0xf92f8e9a, 0xb7e33fb0), TOBN(0x70acf2aa, 0x4cf36ddd), TOBN(0x561ab426, 0xd07137fd), TOBN(0x5f57d037, 0x430ee91e), TOBN(0xe3e768c8, 0x60d10b8a), TOBN(0xb14884d8, 0xa18af8ce), TOBN(0xf8a98014, 0xa12b74e4), TOBN(0x748d407c, 0x3437b7a8), TOBN(0x627588c4, 0x9875d5a7), TOBN(0xdd24a127, 0x53c8f09d), TOBN(0x85a997d5, 0x0cd51aec), TOBN(0x44f0c619, 0xce348458), TOBN(0x9b894b24, 0x5f6b69a1), TOBN(0xae1302f2, 0xf6d4777e), TOBN(0xe6678eeb, 0x375db18e), TOBN(0x2674e1d6, 0x4fbcbdc8), TOBN(0xb297a823, 0x6fa93d28), TOBN(0x6a12fb70, 0x7c8c0510), TOBN(0x5c6d1aeb, 0xdb06f65b), TOBN(0xe8c2954e, 0x4c1804ca), TOBN(0x06bdeac1, 0xf5500fa7), TOBN(0x6a315604, 0x189cd76b), TOBN(0xbae7b0b3, 0x6e362dc0), TOBN(0xa57c73bd, 0xdc70fb82), TOBN(0xfaff50d2, 0x9d573457), TOBN(0x352bd399, 0xbe84058e), }; static const uint8_t kDHOutput[2048 / 8] = { 0x2a, 0xe6, 0xd3, 0xa6, 0x13, 0x58, 0x8e, 0xce, 0x53, 0xaa, 0xf6, 0x5d, 0x9a, 0xae, 0x02, 0x12, 0xf5, 0x80, 0x3d, 0x06, 0x09, 0x76, 0xac, 0x57, 0x37, 0x9e, 0xab, 0x38, 0x62, 0x25, 0x05, 0x1d, 0xf3, 0xa9, 0x39, 0x60, 0xf6, 0xae, 0x90, 0xed, 0x1e, 0xad, 0x6e, 0xe9, 0xe3, 0xba, 0x27, 0xf6, 0xdb, 0x54, 0xdf, 0xe2, 0xbd, 0xbb, 0x7f, 0xf1, 0x81, 0xac, 0x1a, 0xfa, 0xdb, 0x87, 0x07, 0x98, 0x76, 0x90, 0x21, 0xf2, 0xae, 0xda, 0x0d, 0x84, 0x97, 0x64, 0x0b, 0xbf, 0xb8, 0x8d, 0x10, 0x46, 0xe2, 0xd5, 0xca, 0x1b, 0xbb, 0xe5, 0x37, 0xb2, 0x3b, 0x35, 0xd3, 0x1b, 0x65, 0xea, 0xae, 0xf2, 0x03, 0xe2, 0xb6, 0xde, 0x22, 0xb7, 0x86, 0x49, 0x79, 0xfe, 0xd7, 0x16, 0xf7, 0xdc, 0x9c, 0x59, 0xf5, 0xb7, 0x70, 0xc0, 0x53, 0x42, 0x6f, 0xb1, 0xd2, 0x4e, 0x00, 0x25, 0x4b, 0x2d, 0x5a, 0x9b, 0xd0, 0xe9, 0x27, 0x43, 0xcc, 0x00, 0x66, 0xea, 0x94, 0x7a, 0x0b, 0xb9, 0x89, 0x0c, 0x5e, 0x94, 0xb8, 0x3a, 0x78, 0x9c, 0x4d, 0x84, 0xe6, 0x32, 0x2c, 0x38, 0x7c, 0xf7, 0x43, 0x9c, 0xd8, 0xb8, 0x1c, 0xce, 0x24, 0x91, 0x20, 0x67, 0x7a, 0x54, 0x1f, 0x7e, 0x86, 0x7f, 0xa1, 0xc1, 0x03, 0x4e, 0x2c, 0x26, 0x71, 0xb2, 0x06, 0x30, 0xb3, 0x6c, 0x15, 0xcc, 0xac, 0x25, 0xe5, 0x37, 0x3f, 0x24, 0x8f, 0x2a, 0x89, 0x5e, 0x3d, 0x43, 0x94, 0xc9, 0x36, 0xae, 0x40, 0x00, 0x6a, 0x0d, 0xb0, 0x6e, 0x8b, 0x2e, 0x70, 0x57, 0xe1, 0x88, 0x53, 0xd6, 0x06, 0x80, 0x2a, 0x4e, 0x5a, 0xf0, 0x1e, 0xaa, 0xcb, 0xab, 0x06, 0x0e, 0x27, 0x0f, 0xd9, 0x88, 0xd9, 0x01, 0xe3, 0x07, 0xeb, 0xdf, 0xc3, 0x12, 0xe3, 0x40, 0x88, 0x7b, 0x5f, 0x59, 0x78, 0x6e, 0x26, 0x20, 0xc3, 0xdf, 0xc8, 0xe4, 0x5e, 0xb8, }; ffdhe2048_value = BN_new(); if (ffdhe2048_value) { bn_set_static_words(ffdhe2048_value, kFFDHE2048PublicValueData, OPENSSL_ARRAY_SIZE(kFFDHE2048PublicValueData)); } dh = self_test_dh(); uint8_t dh_out[sizeof(kDHOutput)]; if (dh == NULL || ffdhe2048_value == NULL || sizeof(dh_out) != DH_size(dh) || dh_compute_key_padded_no_self_test(dh_out, ffdhe2048_value, dh) != sizeof(dh_out) || !check_test(kDHOutput, dh_out, sizeof(dh_out), "FFC DH")) { fprintf(CRYPTO_get_stderr(), "FFDH failed.\n"); goto err; } ret = 1; err: DH_free(dh); BN_free(ffdhe2048_value); return ret; } #if defined(BORINGSSL_FIPS) static void run_self_test_rsa(void) { FIPS_service_indicator_lock_state(); if (!boringssl_self_test_rsa()) { BORINGSSL_FIPS_abort(); } FIPS_service_indicator_unlock_state(); } DEFINE_STATIC_ONCE(g_self_test_once_rsa) void boringssl_ensure_rsa_self_test(void) { CRYPTO_once(g_self_test_once_rsa_bss_get(), run_self_test_rsa); } static void run_self_test_ecc(void) { FIPS_service_indicator_lock_state(); if (!boringssl_self_test_ecc()) { BORINGSSL_FIPS_abort(); } FIPS_service_indicator_unlock_state(); } DEFINE_STATIC_ONCE(g_self_test_once_ecc) void boringssl_ensure_ecc_self_test(void) { CRYPTO_once(g_self_test_once_ecc_bss_get(), run_self_test_ecc); } static void run_self_test_ffdh(void) { FIPS_service_indicator_lock_state(); if (!boringssl_self_test_ffdh()) { BORINGSSL_FIPS_abort(); } FIPS_service_indicator_unlock_state(); } DEFINE_STATIC_ONCE(g_self_test_once_ffdh) void boringssl_ensure_ffdh_self_test(void) { CRYPTO_once(g_self_test_once_ffdh_bss_get(), run_self_test_ffdh); } #endif // BORINGSSL_FIPS // Startup self tests. // // These tests are run at process start when in FIPS mode. int boringssl_self_test_sha256(void) { static const uint8_t kInput[16] = { 0xff, 0x3b, 0x85, 0x7d, 0xa7, 0x23, 0x6a, 0x2b, 0xaa, 0x0f, 0x39, 0x6b, 0x51, 0x52, 0x22, 0x17, }; static const uint8_t kPlaintextSHA256[32] = { 0x7f, 0xe4, 0xd5, 0xf1, 0xa1, 0xe3, 0x82, 0x87, 0xd9, 0x58, 0xf5, 0x11, 0xc7, 0x1d, 0x5e, 0x27, 0x5e, 0xcc, 0xd2, 0x66, 0xcf, 0xb9, 0xc8, 0xc6, 0x60, 0xd8, 0x92, 0x1e, 0x57, 0xfd, 0x46, 0x75, }; uint8_t output[SHA256_DIGEST_LENGTH]; // SHA-256 KAT SHA256(kInput, sizeof(kInput), output); return check_test(kPlaintextSHA256, output, sizeof(kPlaintextSHA256), "SHA-256 KAT"); } int boringssl_self_test_sha512(void) { static const uint8_t kInput[16] = { 0x21, 0x25, 0x12, 0xf8, 0xd2, 0xad, 0x83, 0x22, 0x78, 0x1c, 0x6c, 0x4d, 0x69, 0xa9, 0xda, 0xa1, }; static const uint8_t kPlaintextSHA512[64] = { 0x29, 0x3c, 0x94, 0x35, 0x4e, 0x98, 0x83, 0xe5, 0xc2, 0x78, 0x36, 0x7a, 0xe5, 0x18, 0x90, 0xbf, 0x35, 0x41, 0x01, 0x64, 0x19, 0x8d, 0x26, 0xeb, 0xe1, 0xf8, 0x2f, 0x04, 0x8e, 0xfa, 0x8b, 0x2b, 0xc6, 0xb2, 0x9d, 0x5d, 0x46, 0x76, 0x5a, 0xc8, 0xb5, 0x25, 0xa3, 0xea, 0x52, 0x84, 0x47, 0x6d, 0x6d, 0xf4, 0xc9, 0x71, 0xf3, 0x3d, 0x89, 0x4c, 0x3b, 0x20, 0x8c, 0x5b, 0x75, 0xe8, 0xf8, 0x7c, }; uint8_t output[SHA512_DIGEST_LENGTH]; // SHA-512 KAT SHA512(kInput, sizeof(kInput), output); return check_test(kPlaintextSHA512, output, sizeof(kPlaintextSHA512), "SHA-512 KAT"); } int boringssl_self_test_hmac_sha256(void) { static const uint8_t kInput[16] = { 0xda, 0xd9, 0x12, 0x93, 0xdf, 0xcf, 0x2a, 0x7c, 0x8e, 0xcd, 0x13, 0xfe, 0x35, 0x3f, 0xa7, 0x5b, }; static const uint8_t kPlaintextHMACSHA256[32] = { 0x36, 0x5f, 0x5b, 0xd5, 0xf5, 0xeb, 0xfd, 0xc7, 0x6e, 0x53, 0xa5, 0x73, 0x6d, 0x73, 0x20, 0x13, 0xaa, 0xd3, 0xbc, 0x86, 0x4b, 0xb8, 0x84, 0x94, 0x16, 0x46, 0x88, 0x9c, 0x48, 0xee, 0xa9, 0x0e, }; uint8_t output[EVP_MAX_MD_SIZE]; unsigned output_len; HMAC(EVP_sha256(), kInput, sizeof(kInput), kInput, sizeof(kInput), output, &output_len); return output_len == sizeof(kPlaintextHMACSHA256) && check_test(kPlaintextHMACSHA256, output, sizeof(kPlaintextHMACSHA256), "HMAC-SHA-256 KAT"); } static int boringssl_self_test_fast(void) { static const uint8_t kAESKey[16] = { 'B', 'o', 'r', 'i', 'n', 'g', 'C', 'r', 'y', 'p', 't', 'o', ' ', 'K', 'e', 'y', }; static const uint8_t kAESIV[16] = {0}; EVP_AEAD_CTX aead_ctx; EVP_AEAD_CTX_zero(&aead_ctx); int ret = 0; AES_KEY aes_key; uint8_t aes_iv[16]; uint8_t output[256]; // AES-CBC Encryption KAT static const uint8_t kAESCBCEncPlaintext[32] = { 0x07, 0x86, 0x09, 0xa6, 0xc5, 0xac, 0x25, 0x44, 0x69, 0x9a, 0xdf, 0x68, 0x2f, 0xa3, 0x77, 0xf9, 0xbe, 0x8a, 0xb6, 0xae, 0xf5, 0x63, 0xe8, 0xc5, 0x6a, 0x36, 0xb8, 0x4f, 0x55, 0x7f, 0xad, 0xd3, }; static const uint8_t kAESCBCEncCiphertext[sizeof(kAESCBCEncPlaintext)] = { 0x56, 0x46, 0xc1, 0x41, 0xf4, 0x13, 0xd6, 0xff, 0x62, 0x92, 0x41, 0x7a, 0x26, 0xc6, 0x86, 0xbd, 0x30, 0x5f, 0xb6, 0x57, 0xa7, 0xd2, 0x50, 0x3a, 0xc5, 0x5e, 0x8e, 0x93, 0x40, 0xf2, 0x10, 0xd8, }; memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_encrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { fprintf(CRYPTO_get_stderr(), "AES_set_encrypt_key failed.\n"); goto err; } AES_cbc_encrypt(kAESCBCEncPlaintext, output, sizeof(kAESCBCEncPlaintext), &aes_key, aes_iv, AES_ENCRYPT); if (!check_test(kAESCBCEncCiphertext, output, sizeof(kAESCBCEncCiphertext), "AES-CBC-encrypt KAT")) { goto err; } // AES-CBC Decryption KAT static const uint8_t kAESCBCDecCiphertext[32] = { 0x34, 0x7a, 0xa5, 0xa0, 0x24, 0xb2, 0x82, 0x57, 0xb3, 0x65, 0x10, 0xbe, 0x58, 0x3d, 0x4f, 0x47, 0xad, 0xb7, 0xbb, 0xee, 0xdc, 0x60, 0x05, 0xbb, 0xbd, 0x0d, 0x0a, 0x9f, 0x06, 0xbb, 0x7b, 0x10, }; static const uint8_t kAESCBCDecPlaintext[sizeof(kAESCBCDecCiphertext)] = { 0x51, 0xa7, 0xa0, 0x1f, 0x6b, 0x79, 0x6c, 0xcd, 0x48, 0x03, 0xa1, 0x41, 0xdc, 0x56, 0xa6, 0xc2, 0x16, 0xb5, 0xd1, 0xd3, 0xb7, 0x06, 0xb2, 0x25, 0x6f, 0xa6, 0xd0, 0xd2, 0x0e, 0x6f, 0x19, 0xb5, }; memcpy(aes_iv, kAESIV, sizeof(kAESIV)); if (AES_set_decrypt_key(kAESKey, 8 * sizeof(kAESKey), &aes_key) != 0) { fprintf(CRYPTO_get_stderr(), "AES_set_decrypt_key failed.\n"); goto err; } AES_cbc_encrypt(kAESCBCDecCiphertext, output, sizeof(kAESCBCDecCiphertext), &aes_key, aes_iv, AES_DECRYPT); if (!check_test(kAESCBCDecPlaintext, output, sizeof(kAESCBCDecPlaintext), "AES-CBC-decrypt KAT")) { goto err; } size_t out_len; uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; OPENSSL_memset(nonce, 0, sizeof(nonce)); if (!EVP_AEAD_CTX_init(&aead_ctx, EVP_aead_aes_128_gcm(), kAESKey, sizeof(kAESKey), 0, NULL)) { fprintf(CRYPTO_get_stderr(), "EVP_AEAD_CTX_init for AES-128-GCM failed.\n"); goto err; } // AES-GCM Encryption KAT static const uint8_t kAESGCMEncPlaintext[32] = { 0x8f, 0xcc, 0x40, 0x99, 0x80, 0x8e, 0x75, 0xca, 0xaf, 0xf5, 0x82, 0x89, 0x88, 0x48, 0xa8, 0x8d, 0x80, 0x8b, 0x55, 0xab, 0x4e, 0x93, 0x70, 0x79, 0x7d, 0x94, 0x0b, 0xe8, 0xcc, 0x1d, 0x78, 0x84, }; static const uint8_t kAESGCMCiphertext[sizeof(kAESGCMEncPlaintext) + 16] = { 0x87, 0x7b, 0xd5, 0x8d, 0x96, 0x3e, 0x4b, 0xe6, 0x64, 0x94, 0x40, 0x2f, 0x61, 0x9b, 0x7e, 0x56, 0x52, 0x7d, 0xa4, 0x5a, 0xf9, 0xa6, 0xe2, 0xdb, 0x1c, 0x63, 0x2e, 0x97, 0x93, 0x0f, 0xfb, 0xed, 0xb5, 0x9e, 0x1c, 0x20, 0xb2, 0xb0, 0x58, 0xda, 0x48, 0x07, 0x2d, 0xbd, 0x96, 0x0d, 0x34, 0xc6, }; if (!EVP_AEAD_CTX_seal(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kAESGCMEncPlaintext, sizeof(kAESGCMEncPlaintext), NULL, 0) || !check_test(kAESGCMCiphertext, output, sizeof(kAESGCMCiphertext), "AES-GCM-encrypt KAT")) { fprintf(CRYPTO_get_stderr(), "EVP_AEAD_CTX_seal for AES-128-GCM failed.\n"); goto err; } // AES-GCM Decryption KAT static const uint8_t kAESGCMDecCiphertext[48] = { 0x35, 0xf3, 0x05, 0x8f, 0x87, 0x57, 0x60, 0xff, 0x09, 0xd3, 0x12, 0x0f, 0x70, 0xc4, 0xbc, 0x9e, 0xd7, 0xa8, 0x68, 0x72, 0xe1, 0x34, 0x52, 0x20, 0x21, 0x76, 0xf7, 0x37, 0x1a, 0xe0, 0x4f, 0xaa, 0xe1, 0xdd, 0x39, 0x19, 0x20, 0xf5, 0xd1, 0x39, 0x53, 0xd8, 0x96, 0x78, 0x59, 0x94, 0x82, 0x3c, }; static const uint8_t kAESGCMDecPlaintext[sizeof(kAESGCMDecCiphertext) - 16] = { 0x3d, 0x44, 0x90, 0x9b, 0x91, 0xe7, 0x5e, 0xd3, 0xc2, 0xb2, 0xd0, 0xa9, 0x99, 0x17, 0x6a, 0x45, 0x05, 0x5e, 0x99, 0x83, 0x56, 0x01, 0xc0, 0x82, 0x40, 0x81, 0xd2, 0x48, 0x45, 0xf2, 0xcc, 0xc3, }; if (!EVP_AEAD_CTX_open(&aead_ctx, output, &out_len, sizeof(output), nonce, EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kAESGCMDecCiphertext, sizeof(kAESGCMDecCiphertext), NULL, 0) || !check_test(kAESGCMDecPlaintext, output, sizeof(kAESGCMDecPlaintext), "AES-GCM-decrypt KAT")) { fprintf(CRYPTO_get_stderr(), "AES-GCM-decrypt KAT failed because EVP_AEAD_CTX_open failed.\n"); goto err; } // SHA-1 KAT static const uint8_t kSHA1Input[16] = { 0x13, 0x2f, 0xd9, 0xba, 0xd5, 0xc1, 0x82, 0x62, 0x63, 0xba, 0xfb, 0xb6, 0x99, 0xf7, 0x07, 0xa5, }; static const uint8_t kSHA1Digest[20] = { 0x94, 0x19, 0x55, 0x93, 0x0a, 0x58, 0x29, 0x38, 0xeb, 0xf5, 0x09, 0x11, 0x6d, 0x1a, 0xfd, 0x0f, 0x1e, 0x11, 0xe3, 0xcb, }; SHA1(kSHA1Input, sizeof(kSHA1Input), output); if (!check_test(kSHA1Digest, output, sizeof(kSHA1Digest), "SHA-1 KAT")) { goto err; } if (!boringssl_self_test_sha256() || !boringssl_self_test_sha512() || !boringssl_self_test_hmac_sha256()) { goto err; } // DBRG KAT static const uint8_t kDRBGEntropy[48] = { 0xc4, 0xda, 0x07, 0x40, 0xd5, 0x05, 0xf1, 0xee, 0x28, 0x0b, 0x95, 0xe5, 0x8c, 0x49, 0x31, 0xac, 0x6d, 0xe8, 0x46, 0xa0, 0x15, 0x2f, 0xbb, 0x4a, 0x3f, 0x17, 0x4c, 0xf4, 0x78, 0x7a, 0x4f, 0x1a, 0x40, 0xc2, 0xb5, 0x0b, 0xab, 0xe1, 0x4a, 0xae, 0x53, 0x0b, 0xe5, 0x88, 0x6d, 0x91, 0x0a, 0x27, }; static const uint8_t kDRBGPersonalization[18] = { 'B', 'C', 'M', 'P', 'e', 'r', 's', 'o', 'n', 'a', 'l', 'i', 'z', 'a', 't', 'i', 'o', 'n'}; static const uint8_t kDRBGAD[16] = {'B', 'C', 'M', ' ', 'D', 'R', 'B', 'G', ' ', 'K', 'A', 'T', ' ', 'A', 'D', ' '}; static const uint8_t kDRBGOutput[64] = { 0x19, 0x1f, 0x2b, 0x49, 0x76, 0x85, 0xfd, 0x51, 0xb6, 0x56, 0xbc, 0x1c, 0x7d, 0xd5, 0xdd, 0x44, 0x76, 0xa3, 0x5e, 0x17, 0x9b, 0x8e, 0xb8, 0x98, 0x65, 0x12, 0xca, 0x35, 0x6c, 0xa0, 0x6f, 0xa0, 0x22, 0xe4, 0xf6, 0xd8, 0x43, 0xed, 0x4e, 0x2d, 0x97, 0x39, 0x43, 0x3b, 0x57, 0xfc, 0x23, 0x3f, 0x71, 0x0a, 0xe0, 0xed, 0xfe, 0xd5, 0xb8, 0x67, 0x7a, 0x00, 0x39, 0xb2, 0x6e, 0xa9, 0x25, 0x97, }; static const uint8_t kDRBGEntropy2[48] = { 0xc7, 0x16, 0x1c, 0xa3, 0x6c, 0x23, 0x09, 0xb7, 0x16, 0xe9, 0x85, 0x9b, 0xb9, 0x6c, 0x6d, 0x49, 0xbd, 0xc8, 0x35, 0x21, 0x03, 0xa1, 0x8c, 0xd2, 0x4e, 0xf4, 0x2e, 0xc9, 0x7e, 0xf4, 0x6b, 0xf4, 0x46, 0xeb, 0x1a, 0x45, 0x76, 0xc1, 0x86, 0xe9, 0x35, 0x18, 0x03, 0x76, 0x3a, 0x79, 0x12, 0xfe, }; static const uint8_t kDRBGReseedOutput[64] = { 0x00, 0xf2, 0x05, 0xaa, 0xfd, 0x11, 0x6c, 0x77, 0xbc, 0x81, 0x86, 0x99, 0xca, 0x51, 0xcf, 0x80, 0x15, 0x9f, 0x02, 0x9e, 0x0b, 0xcd, 0x26, 0xc8, 0x4b, 0x87, 0x8a, 0x15, 0x1a, 0xdd, 0xf2, 0xf3, 0xeb, 0x94, 0x0b, 0x08, 0xc8, 0xc9, 0x57, 0xa4, 0x0b, 0x4b, 0x0f, 0x13, 0xde, 0x7c, 0x0c, 0x6a, 0xac, 0x34, 0x4a, 0x9a, 0xf2, 0xd0, 0x83, 0x02, 0x05, 0x17, 0xc9, 0x81, 0x8f, 0x2a, 0x81, 0x92, }; CTR_DRBG_STATE drbg; if (!CTR_DRBG_init(&drbg, kDRBGEntropy, kDRBGPersonalization, sizeof(kDRBGPersonalization)) || !CTR_DRBG_generate(&drbg, output, sizeof(kDRBGOutput), kDRBGAD, sizeof(kDRBGAD)) || !check_test(kDRBGOutput, output, sizeof(kDRBGOutput), "DRBG Generate KAT") || !CTR_DRBG_reseed(&drbg, kDRBGEntropy2, kDRBGAD, sizeof(kDRBGAD)) || !CTR_DRBG_generate(&drbg, output, sizeof(kDRBGReseedOutput), kDRBGAD, sizeof(kDRBGAD)) || !check_test(kDRBGReseedOutput, output, sizeof(kDRBGReseedOutput), "DRBG-reseed KAT")) { fprintf(CRYPTO_get_stderr(), "CTR-DRBG failed.\n"); goto err; } CTR_DRBG_clear(&drbg); CTR_DRBG_STATE kZeroDRBG; memset(&kZeroDRBG, 0, sizeof(kZeroDRBG)); if (!check_test(&kZeroDRBG, &drbg, sizeof(drbg), "DRBG Clear KAT")) { goto err; } // TLS KDF KAT static const char kTLSLabel[] = "FIPS self test"; static const uint8_t kTLSSeed1[16] = { 0x8f, 0x0d, 0xe8, 0xb6, 0x90, 0x8f, 0xb1, 0xd2, 0x6d, 0x51, 0xf4, 0x79, 0x18, 0x63, 0x51, 0x65, }; static const uint8_t kTLSSeed2[16] = { 0x7d, 0x24, 0x1a, 0x9d, 0x3c, 0x59, 0xbf, 0x3c, 0x31, 0x1e, 0x2b, 0x21, 0x41, 0x8d, 0x32, 0x81, }; static const uint8_t kTLS10Secret[32] = { 0xab, 0xc3, 0x65, 0x7b, 0x09, 0x4c, 0x76, 0x28, 0xa0, 0xb2, 0x82, 0x99, 0x6f, 0xe7, 0x5a, 0x75, 0xf4, 0x98, 0x4f, 0xd9, 0x4d, 0x4e, 0xcc, 0x2f, 0xcf, 0x53, 0xa2, 0xc4, 0x69, 0xa3, 0xf7, 0x31, }; static const uint8_t kTLS10Output[32] = { 0x69, 0x7c, 0x4e, 0x2c, 0xee, 0x82, 0xb1, 0xd2, 0x8b, 0xac, 0x90, 0x7a, 0xa1, 0x8a, 0x81, 0xfe, 0xc5, 0x58, 0x45, 0x57, 0x61, 0x2f, 0x7a, 0x8d, 0x80, 0xfb, 0x44, 0xd8, 0x81, 0x60, 0xe5, 0xf8, }; uint8_t tls10_output[sizeof(kTLS10Output)]; if (!CRYPTO_tls1_prf(EVP_md5_sha1(), tls10_output, sizeof(tls10_output), kTLS10Secret, sizeof(kTLS10Secret), kTLSLabel, sizeof(kTLSLabel), kTLSSeed1, sizeof(kTLSSeed1), kTLSSeed2, sizeof(kTLSSeed2)) || !check_test(kTLS10Output, tls10_output, sizeof(kTLS10Output), "TLS10-KDF KAT")) { fprintf(CRYPTO_get_stderr(), "TLS KDF failed.\n"); goto err; } static const uint8_t kTLS12Secret[32] = { 0xc5, 0x43, 0x8e, 0xe2, 0x6f, 0xd4, 0xac, 0xbd, 0x25, 0x9f, 0xc9, 0x18, 0x55, 0xdc, 0x69, 0xbf, 0x88, 0x4e, 0xe2, 0x93, 0x22, 0xfc, 0xbf, 0xd2, 0x96, 0x6a, 0x46, 0x23, 0xd4, 0x2e, 0xc7, 0x81, }; static const uint8_t kTLS12Output[32] = { 0xee, 0x4a, 0xcd, 0x3f, 0xa3, 0xd3, 0x55, 0x89, 0x9e, 0x6f, 0xf1, 0x38, 0x46, 0x9d, 0x2b, 0x33, 0xaa, 0x7f, 0xc4, 0x7f, 0x51, 0x85, 0x8a, 0xf3, 0x13, 0x84, 0xbf, 0x53, 0x6a, 0x65, 0x37, 0x51, }; uint8_t tls12_output[sizeof(kTLS12Output)]; if (!CRYPTO_tls1_prf(EVP_sha256(), tls12_output, sizeof(tls12_output), kTLS12Secret, sizeof(kTLS12Secret), kTLSLabel, sizeof(kTLSLabel), kTLSSeed1, sizeof(kTLSSeed1), kTLSSeed2, sizeof(kTLSSeed2)) || !check_test(kTLS12Output, tls12_output, sizeof(kTLS12Output), "TLS12-KDF KAT")) { fprintf(CRYPTO_get_stderr(), "TLS KDF failed.\n"); goto err; } // TLS v1.3: derives a dummy client-early-traffic secret. static const uint8_t kTLS13Secret[32] = { 0x02, 0x4a, 0x0d, 0x80, 0xf3, 0x57, 0xf2, 0x49, 0x9a, 0x12, 0x44, 0xda, 0xc2, 0x6d, 0xab, 0x66, 0xfc, 0x13, 0xed, 0x85, 0xfc, 0xa7, 0x1d, 0xac, 0xe1, 0x46, 0x21, 0x11, 0x19, 0x52, 0x58, 0x74, }; static const uint8_t kTLS13Salt[16] = { 0x54, 0x61, 0x11, 0x36, 0x75, 0x91, 0xf0, 0xf8, 0x92, 0xec, 0x70, 0xbd, 0x78, 0x2a, 0xef, 0x61, }; static const uint8_t kTLS13Label[] = "c e traffic"; static const uint8_t kTLS13ClientHelloHash[32] = { 0x1d, 0xe8, 0x67, 0xed, 0x93, 0x6a, 0x73, 0x65, 0x9b, 0x05, 0xcf, 0x8a, 0x22, 0x77, 0xb7, 0x37, 0x29, 0xf2, 0x44, 0x94, 0x81, 0x6a, 0x83, 0x33, 0x7f, 0x09, 0xbb, 0x6c, 0xc2, 0x6f, 0x48, 0x9c, }; static const uint8_t kTLS13ExpandLabelOutput[32] = { 0x62, 0x91, 0x52, 0x90, 0x2e, 0xc9, 0xcf, 0x9c, 0x5f, 0x1e, 0x0a, 0xb7, 0x00, 0x33, 0x42, 0x24, 0xc4, 0xe3, 0xba, 0x01, 0x40, 0x32, 0x06, 0xab, 0x09, 0x23, 0x8a, 0xdd, 0x01, 0xa4, 0x05, 0xcd, }; uint8_t tls13_extract_output[32]; size_t tls13_extract_output_len; uint8_t tls13_expand_label_output[32]; if (!HKDF_extract(tls13_extract_output, &tls13_extract_output_len, EVP_sha256(), kTLS13Secret, sizeof(kTLS13Secret), kTLS13Salt, sizeof(kTLS13Salt)) || tls13_extract_output_len != sizeof(tls13_extract_output) || !CRYPTO_tls13_hkdf_expand_label( tls13_expand_label_output, sizeof(tls13_expand_label_output), EVP_sha256(), tls13_extract_output, sizeof(tls13_extract_output), kTLS13Label, sizeof(kTLS13Label) - 1, kTLS13ClientHelloHash, sizeof(kTLS13ClientHelloHash)) || !check_test(kTLS13ExpandLabelOutput, tls13_expand_label_output, sizeof(kTLS13ExpandLabelOutput), "CRYPTO_tls13_hkdf_expand_label")) { fprintf(CRYPTO_get_stderr(), "TLS13-KDF failed.\n"); goto err; } // HKDF static const uint8_t kHKDFSecret[32] = { 0x68, 0x67, 0x85, 0x04, 0xb9, 0xb3, 0xad, 0xd1, 0x7d, 0x59, 0x67, 0xa1, 0xa7, 0xbd, 0x37, 0x99, 0x3f, 0xd8, 0xa3, 0x3c, 0xe7, 0x30, 0x30, 0x71, 0xf3, 0x9c, 0x09, 0x6d, 0x16, 0x35, 0xb3, 0xc9, }; static const uint8_t kHKDFSalt[32] = { 0x8a, 0xab, 0x18, 0xb4, 0x9b, 0x0a, 0x17, 0xf9, 0xe8, 0xe6, 0x97, 0x1a, 0x3d, 0xff, 0xda, 0x9b, 0x26, 0x8b, 0x3d, 0x17, 0x78, 0x0a, 0xb3, 0xea, 0x65, 0xdb, 0x2a, 0xc0, 0x29, 0x9c, 0xfa, 0x72, }; static const uint8_t kHKDFInfo[32] = { 0xe5, 0x6f, 0xf9, 0xe1, 0x18, 0x5e, 0x64, 0x8c, 0x6c, 0x8f, 0xee, 0xc6, 0x93, 0x5a, 0xc5, 0x14, 0x8c, 0xf3, 0xd9, 0x78, 0xd2, 0x3a, 0x86, 0xdd, 0x01, 0xdf, 0xb9, 0xe9, 0x5e, 0xe5, 0x1a, 0x56, }; static const uint8_t kHKDFOutput[32] = { 0xa6, 0x29, 0xb4, 0xd7, 0xf4, 0xc1, 0x16, 0x64, 0x71, 0x5e, 0xa4, 0xa8, 0xe6, 0x60, 0x8c, 0xf3, 0xc1, 0xa5, 0x03, 0xe2, 0x22, 0xf9, 0x89, 0xe2, 0x12, 0x18, 0xbe, 0xef, 0x16, 0x86, 0xe0, 0xec, }; uint8_t hkdf_output[sizeof(kHKDFOutput)]; if (!HKDF(hkdf_output, sizeof(hkdf_output), EVP_sha256(), kHKDFSecret, sizeof(kHKDFSecret), kHKDFSalt, sizeof(kHKDFSalt), kHKDFInfo, sizeof(kHKDFInfo)) || !check_test(kHKDFOutput, hkdf_output, sizeof(kHKDFOutput), "HKDF")) { fprintf(CRYPTO_get_stderr(), "HKDF failed.\n"); goto err; } ret = 1; err: EVP_AEAD_CTX_cleanup(&aead_ctx); return ret; } int BORINGSSL_self_test(void) { if (!boringssl_self_test_fast() || // When requested to run self tests, also run the lazy tests. !boringssl_self_test_rsa() || // !boringssl_self_test_ecc() || // !boringssl_self_test_ffdh()) { return 0; } return 1; } #if defined(BORINGSSL_FIPS) int boringssl_self_test_startup(void) { return boringssl_self_test_fast(); } #endif #endif // !_MSC_VER ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/service_indicator/internal.h ================================================ /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SERVICE_INDICATOR_INTERNAL_H #define OPENSSL_HEADER_SERVICE_INDICATOR_INTERNAL_H #include #include #if defined(BORINGSSL_FIPS) // FIPS_service_indicator_update_state records that an approved service has been // invoked. void FIPS_service_indicator_update_state(void); // FIPS_service_indicator_lock_state and |FIPS_service_indicator_unlock_state| // stop |FIPS_service_indicator_update_state| from actually updating the service // indicator. This is used when a primitive calls a potentially approved // primitive to avoid false positives. For example, just because a key // generation calls |BCM_rand_bytes| (and thus the approved DRBG) doesn't mean // that the key generation operation itself is approved. // // This lock nests: i.e. locking twice is fine so long as each lock is paired // with an unlock. If the (64-bit) counter overflows, the process aborts. void FIPS_service_indicator_lock_state(void); void FIPS_service_indicator_unlock_state(void); // The following functions may call |FIPS_service_indicator_update_state| if // their parameter specifies an approved operation. void AEAD_GCM_verify_service_indicator(const EVP_AEAD_CTX *ctx); void AEAD_CCM_verify_service_indicator(const EVP_AEAD_CTX *ctx); void EC_KEY_keygen_verify_service_indicator(const EC_KEY *eckey); void ECDH_verify_service_indicator(const EC_KEY *ec_key); void EVP_Cipher_verify_service_indicator(const EVP_CIPHER_CTX *ctx); void EVP_DigestSign_verify_service_indicator(const EVP_MD_CTX *ctx); void EVP_DigestVerify_verify_service_indicator(const EVP_MD_CTX *ctx); void HMAC_verify_service_indicator(const EVP_MD *evp_md); void TLSKDF_verify_service_indicator(const EVP_MD *dgst); #else // Service indicator functions are no-ops in non-FIPS builds. inline void FIPS_service_indicator_update_state(void) {} inline void FIPS_service_indicator_lock_state(void) {} inline void FIPS_service_indicator_unlock_state(void) {} inline void AEAD_GCM_verify_service_indicator( [[maybe_unused]] const EVP_AEAD_CTX *ctx) {} inline void AEAD_CCM_verify_service_indicator( [[maybe_unused]] const EVP_AEAD_CTX *ctx) {} inline void EC_KEY_keygen_verify_service_indicator( [[maybe_unused]] const EC_KEY *eckey) {} inline void ECDH_verify_service_indicator( [[maybe_unused]] const EC_KEY *ec_key) {} inline void EVP_Cipher_verify_service_indicator( [[maybe_unused]] const EVP_CIPHER_CTX *ctx) {} inline void EVP_DigestSign_verify_service_indicator( [[maybe_unused]] const EVP_MD_CTX *ctx) {} inline void EVP_DigestVerify_verify_service_indicator( [[maybe_unused]] const EVP_MD_CTX *ctx) {} inline void HMAC_verify_service_indicator( [[maybe_unused]] const EVP_MD *evp_md) {} inline void TLSKDF_verify_service_indicator( [[maybe_unused]] const EVP_MD *dgst) {} #endif // BORINGSSL_FIPS #endif // OPENSSL_HEADER_SERVICE_INDICATOR_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/service_indicator/service_indicator.cc.inc ================================================ /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../../evp/internal.h" #include "../../internal.h" #include "internal.h" #if defined(BORINGSSL_FIPS) #define STATE_UNLOCKED 0 // fips_service_indicator_state is a thread-local structure that stores the // state of the FIPS service indicator. struct fips_service_indicator_state { // lock_state records the number of times the indicator has been locked. // When it is zero (i.e. |STATE_UNLOCKED|) then the indicator can be updated. uint64_t lock_state; // counter is the indicator state. It is incremented when an approved service // completes. uint64_t counter; }; // service_indicator_get returns a pointer to the |fips_service_indicator_state| // for the current thread. It returns NULL on error. // // FIPS 140-3 requires that the module should provide the service indicator // for approved services irrespective of whether the user queries it or not. // Hence, it is lazily initialized in any call to an approved service. static struct fips_service_indicator_state *service_indicator_get(void) { struct fips_service_indicator_state *indicator = reinterpret_cast(CRYPTO_get_thread_local( OPENSSL_THREAD_LOCAL_FIPS_SERVICE_INDICATOR_STATE)); if (indicator == NULL) { indicator = reinterpret_cast( OPENSSL_malloc(sizeof(struct fips_service_indicator_state))); if (indicator == NULL) { return NULL; } indicator->lock_state = STATE_UNLOCKED; indicator->counter = 0; if (!CRYPTO_set_thread_local( OPENSSL_THREAD_LOCAL_FIPS_SERVICE_INDICATOR_STATE, indicator, OPENSSL_free)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return NULL; } } return indicator; } static uint64_t service_indicator_get_counter(void) { struct fips_service_indicator_state *indicator = service_indicator_get(); if (indicator == NULL) { return 0; } return indicator->counter; } uint64_t FIPS_service_indicator_before_call(void) { return service_indicator_get_counter(); } uint64_t FIPS_service_indicator_after_call(void) { return service_indicator_get_counter(); } void FIPS_service_indicator_update_state(void) { struct fips_service_indicator_state *indicator = service_indicator_get(); if (indicator && indicator->lock_state == STATE_UNLOCKED) { indicator->counter++; } } void FIPS_service_indicator_lock_state(void) { struct fips_service_indicator_state *indicator = service_indicator_get(); if (indicator == NULL) { return; } // |FIPS_service_indicator_lock_state| and // |FIPS_service_indicator_unlock_state| should not under/overflow in normal // operation. They are still checked and errors added to facilitate testing in // service_indicator_test.cc. This should only happen if lock/unlock are // called in an incorrect order or multiple times in the same function. const uint64_t new_state = indicator->lock_state + 1; if (new_state < indicator->lock_state) { // Overflow. This would imply that our call stack length has exceeded a // |uint64_t| which impossible on a 64-bit system. abort(); } indicator->lock_state = new_state; } void FIPS_service_indicator_unlock_state(void) { struct fips_service_indicator_state *indicator = service_indicator_get(); if (indicator == NULL) { return; } if (indicator->lock_state == 0) { abort(); } indicator->lock_state--; } void AEAD_GCM_verify_service_indicator(const EVP_AEAD_CTX *ctx) { const size_t key_len = EVP_AEAD_key_length(ctx->aead); if (key_len == 16 || key_len == 32) { FIPS_service_indicator_update_state(); } } void AEAD_CCM_verify_service_indicator(const EVP_AEAD_CTX *ctx) { if (EVP_AEAD_key_length(ctx->aead) == 16 && ctx->tag_len == 4) { FIPS_service_indicator_update_state(); } } // is_ec_fips_approved returns one if the curve corresponding to the given NID // is FIPS approved, and zero otherwise. static int is_ec_fips_approved(int curve_nid) { switch (curve_nid) { case NID_secp224r1: case NID_X9_62_prime256v1: case NID_secp384r1: case NID_secp521r1: return 1; default: return 0; } } // is_md_fips_approved_for_signing returns one if the given message digest type // is FIPS approved for signing, and zero otherwise. static int is_md_fips_approved_for_signing(int md_type) { switch (md_type) { case NID_sha224: case NID_sha256: case NID_sha384: case NID_sha512: case NID_sha512_256: return 1; default: return 0; } } // is_md_fips_approved_for_verifying returns one if the given message digest // type is FIPS approved for verifying, and zero otherwise. static int is_md_fips_approved_for_verifying(int md_type) { switch (md_type) { case NID_sha224: case NID_sha256: case NID_sha384: case NID_sha512: case NID_sha512_256: return 1; default: return 0; } } static void evp_md_ctx_verify_service_indicator(const EVP_MD_CTX *ctx, int (*md_ok)(int md_type)) { if (EVP_MD_CTX_get0_md(ctx) == NULL) { // Signature schemes without a prehash are currently never FIPS approved. return; } EVP_PKEY_CTX *const pctx = ctx->pctx; const EVP_PKEY *const pkey = EVP_PKEY_CTX_get0_pkey(pctx); const int pkey_type = EVP_PKEY_id(pkey); const int md_type = EVP_MD_CTX_type(ctx); // EVP_PKEY_RSA_PSS SPKIs aren't supported. if (pkey_type == EVP_PKEY_RSA) { // Message digest used in the private key should be of the same type // as the given one, so we extract the MD type from the |EVP_PKEY| // and compare it with the type in |ctx|. const EVP_MD *pctx_md; if (!EVP_PKEY_CTX_get_signature_md(pctx, &pctx_md)) { goto err; } if (EVP_MD_type(pctx_md) != md_type) { goto err; } int padding; if (!EVP_PKEY_CTX_get_rsa_padding(pctx, &padding)) { goto err; } if (padding == RSA_PKCS1_PSS_PADDING) { int salt_len; const EVP_MD *mgf1_md; if (!EVP_PKEY_CTX_get_rsa_pss_saltlen(pctx, &salt_len) || !EVP_PKEY_CTX_get_rsa_mgf1_md(pctx, &mgf1_md) || (salt_len != -1 && salt_len != (int)EVP_MD_size(pctx_md)) || EVP_MD_type(mgf1_md) != md_type) { // Only PSS where saltLen == hashLen is tested with ACVP. Cases with // non-standard padding functions are also excluded. goto err; } } // The approved RSA key sizes for signing are 2048, 3072 and 4096 bits. // Note: |EVP_PKEY_size| returns the size in bytes. size_t pkey_size = EVP_PKEY_size(ctx->pctx->pkey); // Check if the MD type and the RSA key size are approved. if (md_ok(md_type) && (pkey_size == 256 || pkey_size == 384 || pkey_size == 512)) { FIPS_service_indicator_update_state(); } } else if (pkey_type == EVP_PKEY_EC) { // Check if the MD type and the elliptic curve are approved. if (md_ok(md_type) && is_ec_fips_approved(EC_GROUP_get_curve_name( EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(ctx->pctx->pkey))))) { FIPS_service_indicator_update_state(); } } err: // Ensure that junk errors aren't left on the queue. ERR_clear_error(); } void EC_KEY_keygen_verify_service_indicator(const EC_KEY *eckey) { if (is_ec_fips_approved(EC_GROUP_get_curve_name(EC_KEY_get0_group(eckey)))) { FIPS_service_indicator_update_state(); } } void ECDH_verify_service_indicator(const EC_KEY *ec_key) { if (is_ec_fips_approved(EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)))) { FIPS_service_indicator_update_state(); } } void EVP_Cipher_verify_service_indicator(const EVP_CIPHER_CTX *ctx) { switch (EVP_CIPHER_CTX_nid(ctx)) { case NID_aes_128_ecb: case NID_aes_192_ecb: case NID_aes_256_ecb: case NID_aes_128_cbc: case NID_aes_192_cbc: case NID_aes_256_cbc: case NID_aes_128_ctr: case NID_aes_192_ctr: case NID_aes_256_ctr: FIPS_service_indicator_update_state(); } } void EVP_DigestVerify_verify_service_indicator(const EVP_MD_CTX *ctx) { return evp_md_ctx_verify_service_indicator(ctx, is_md_fips_approved_for_verifying); } void EVP_DigestSign_verify_service_indicator(const EVP_MD_CTX *ctx) { return evp_md_ctx_verify_service_indicator(ctx, is_md_fips_approved_for_signing); } void HMAC_verify_service_indicator(const EVP_MD *evp_md) { switch (EVP_MD_type(evp_md)) { case NID_sha1: case NID_sha224: case NID_sha256: case NID_sha384: case NID_sha512: case NID_sha512_256: FIPS_service_indicator_update_state(); break; } } void TLSKDF_verify_service_indicator(const EVP_MD *md) { // HMAC-SHA{256, 384, 512} are approved for use in the KDF in TLS 1.2. These // Key Derivation functions are to be used in the context of the TLS protocol. switch (EVP_MD_type(md)) { case NID_sha256: case NID_sha384: case NID_sha512: FIPS_service_indicator_update_state(); break; } } #else uint64_t FIPS_service_indicator_before_call(void) { return 0; } uint64_t FIPS_service_indicator_after_call(void) { // One is returned so that the return value is always greater than zero, the // return value of |FIPS_service_indicator_before_call|. This makes everything // report as "approved" in non-FIPS builds. return 1; } #endif // BORINGSSL_FIPS ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/sha/internal.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SHA_INTERNAL_H #define OPENSSL_HEADER_SHA_INTERNAL_H #include #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif // Define SHA{n}[_{variant}]_ASM if sha{n}_block_data_order[_{variant}] is // defined in assembly. #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) #define SHA1_ASM_NOHW #define SHA256_ASM_NOHW #define SHA512_ASM_NOHW #define SHA1_ASM_HW inline int sha1_hw_capable(void) { return CRYPTO_is_ARMv8_SHA1_capable(); } #define SHA1_ASM_NEON void sha1_block_data_order_neon(uint32_t state[5], const uint8_t *data, size_t num); #define SHA256_ASM_HW inline int sha256_hw_capable(void) { return CRYPTO_is_ARMv8_SHA256_capable(); } #define SHA256_ASM_NEON void sha256_block_data_order_neon(uint32_t state[8], const uint8_t *data, size_t num); // Armv8.2 SHA-512 instructions are not available in 32-bit. #define SHA512_ASM_NEON void sha512_block_data_order_neon(uint64_t state[8], const uint8_t *data, size_t num); #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) #define SHA1_ASM_NOHW #define SHA256_ASM_NOHW #define SHA512_ASM_NOHW #define SHA1_ASM_HW inline int sha1_hw_capable(void) { return CRYPTO_is_ARMv8_SHA1_capable(); } #define SHA256_ASM_HW inline int sha256_hw_capable(void) { return CRYPTO_is_ARMv8_SHA256_capable(); } #define SHA512_ASM_HW inline int sha512_hw_capable(void) { return CRYPTO_is_ARMv8_SHA512_capable(); } #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) #define SHA1_ASM_NOHW #define SHA256_ASM_NOHW #define SHA512_ASM_NOHW #define SHA1_ASM_SSSE3 inline int sha1_ssse3_capable(void) { // TODO(davidben): Do we need to check the FXSR bit? The Intel manual does not // say to. return CRYPTO_is_SSSE3_capable() && CRYPTO_is_FXSR_capable(); } void sha1_block_data_order_ssse3(uint32_t state[5], const uint8_t *data, size_t num); #define SHA1_ASM_AVX inline int sha1_avx_capable(void) { // Pre-Zen AMD CPUs had slow SHLD/SHRD; Zen added the SHA extension; see the // discussion in sha1-586.pl. // // TODO(davidben): Should we enable SHAEXT on 32-bit x86? // TODO(davidben): Do we need to check the FXSR bit? The Intel manual does not // say to. return CRYPTO_is_AVX_capable() && CRYPTO_is_intel_cpu() && CRYPTO_is_FXSR_capable(); } void sha1_block_data_order_avx(uint32_t state[5], const uint8_t *data, size_t num); #define SHA256_ASM_SSSE3 inline int sha256_ssse3_capable(void) { // TODO(davidben): Do we need to check the FXSR bit? The Intel manual does not // say to. return CRYPTO_is_SSSE3_capable() && CRYPTO_is_FXSR_capable(); } void sha256_block_data_order_ssse3(uint32_t state[8], const uint8_t *data, size_t num); #define SHA256_ASM_AVX inline int sha256_avx_capable(void) { // Pre-Zen AMD CPUs had slow SHLD/SHRD; Zen added the SHA extension; see the // discussion in sha1-586.pl. // // TODO(davidben): Should we enable SHAEXT on 32-bit x86? // TODO(davidben): Do we need to check the FXSR bit? The Intel manual does not // say to. return CRYPTO_is_AVX_capable() && CRYPTO_is_intel_cpu() && CRYPTO_is_FXSR_capable(); } void sha256_block_data_order_avx(uint32_t state[8], const uint8_t *data, size_t num); #define SHA512_ASM_SSSE3 inline int sha512_ssse3_capable(void) { // TODO(davidben): Do we need to check the FXSR bit? The Intel manual does not // say to. return CRYPTO_is_SSSE3_capable() && CRYPTO_is_FXSR_capable(); } void sha512_block_data_order_ssse3(uint64_t state[8], const uint8_t *data, size_t num); #elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) #define SHA1_ASM_NOHW #define SHA256_ASM_NOHW #define SHA512_ASM_NOHW #define SHA1_ASM_HW inline int sha1_hw_capable(void) { return CRYPTO_is_x86_SHA_capable() && CRYPTO_is_SSSE3_capable(); } #define SHA1_ASM_AVX2 inline int sha1_avx2_capable(void) { return CRYPTO_is_AVX2_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_BMI1_capable(); } void sha1_block_data_order_avx2(uint32_t state[5], const uint8_t *data, size_t num); #define SHA1_ASM_AVX inline int sha1_avx_capable(void) { // Pre-Zen AMD CPUs had slow SHLD/SHRD; Zen added the SHA extension; see the // discussion in sha1-586.pl. return CRYPTO_is_AVX_capable() && CRYPTO_is_intel_cpu(); } void sha1_block_data_order_avx(uint32_t state[5], const uint8_t *data, size_t num); #define SHA1_ASM_SSSE3 inline int sha1_ssse3_capable(void) { return CRYPTO_is_SSSE3_capable(); } void sha1_block_data_order_ssse3(uint32_t state[5], const uint8_t *data, size_t num); #define SHA256_ASM_HW inline int sha256_hw_capable(void) { // Note that the original assembly did not check SSSE3. return CRYPTO_is_x86_SHA_capable() && CRYPTO_is_SSSE3_capable(); } #define SHA256_ASM_AVX inline int sha256_avx_capable(void) { // Pre-Zen AMD CPUs had slow SHLD/SHRD; Zen added the SHA extension; see the // discussion in sha1-586.pl. return CRYPTO_is_AVX_capable() && CRYPTO_is_intel_cpu(); } void sha256_block_data_order_avx(uint32_t state[8], const uint8_t *data, size_t num); #define SHA256_ASM_SSSE3 inline int sha256_ssse3_capable(void) { return CRYPTO_is_SSSE3_capable(); } void sha256_block_data_order_ssse3(uint32_t state[8], const uint8_t *data, size_t num); #define SHA512_ASM_AVX inline int sha512_avx_capable(void) { return CRYPTO_is_AVX_capable(); } void sha512_block_data_order_avx(uint64_t state[8], const uint8_t *data, size_t num); #endif #if defined(SHA1_ASM_HW) void sha1_block_data_order_hw(uint32_t state[5], const uint8_t *data, size_t num); #endif #if defined(SHA1_ASM_NOHW) void sha1_block_data_order_nohw(uint32_t state[5], const uint8_t *data, size_t num); #endif #if defined(SHA256_ASM_HW) void sha256_block_data_order_hw(uint32_t state[8], const uint8_t *data, size_t num); #endif #if defined(SHA256_ASM_NOHW) void sha256_block_data_order_nohw(uint32_t state[8], const uint8_t *data, size_t num); #endif #if defined(SHA512_ASM_HW) void sha512_block_data_order_hw(uint64_t state[8], const uint8_t *data, size_t num); #endif #if defined(SHA512_ASM_NOHW) void sha512_block_data_order_nohw(uint64_t state[8], const uint8_t *data, size_t num); #endif #if defined(__cplusplus) } // extern "C" #endif #endif // OPENSSL_HEADER_SHA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/sha/sha1.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../digest/md32_common.h" #include "../service_indicator/internal.h" #include "internal.h" bcm_infallible BCM_sha1_init(SHA_CTX *sha) { OPENSSL_memset(sha, 0, sizeof(SHA_CTX)); sha->h[0] = 0x67452301UL; sha->h[1] = 0xefcdab89UL; sha->h[2] = 0x98badcfeUL; sha->h[3] = 0x10325476UL; sha->h[4] = 0xc3d2e1f0UL; return bcm_infallible::approved; } #if !defined(SHA1_ASM) static void sha1_block_data_order(uint32_t state[5], const uint8_t *data, size_t num); #endif bcm_infallible BCM_sha1_transform(SHA_CTX *c, const uint8_t data[SHA_CBLOCK]) { sha1_block_data_order(c->h, data, 1); return bcm_infallible::approved; } bcm_infallible BCM_sha1_update(SHA_CTX *c, const void *data, size_t len) { crypto_md32_update(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); return bcm_infallible::approved; } static void sha1_output_state(uint8_t out[SHA_DIGEST_LENGTH], const SHA_CTX *ctx) { CRYPTO_store_u32_be(out, ctx->h[0]); CRYPTO_store_u32_be(out + 4, ctx->h[1]); CRYPTO_store_u32_be(out + 8, ctx->h[2]); CRYPTO_store_u32_be(out + 12, ctx->h[3]); CRYPTO_store_u32_be(out + 16, ctx->h[4]); } bcm_infallible BCM_sha1_final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *c) { crypto_md32_final(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num, c->Nh, c->Nl, /*is_big_endian=*/1); sha1_output_state(out, c); FIPS_service_indicator_update_state(); return bcm_infallible::approved; } bcm_infallible BCM_fips_186_2_prf(uint8_t *out, size_t out_len, const uint8_t xkey[SHA_DIGEST_LENGTH]) { // XKEY and XVAL are 160-bit values, but are internally right-padded up to // block size. See FIPS 186-2, Appendix 3.3. This buffer maintains both the // current value of XKEY and the padding. uint8_t block[SHA_CBLOCK] = {0}; OPENSSL_memcpy(block, xkey, SHA_DIGEST_LENGTH); while (out_len != 0) { // We always use a zero XSEED, so we can merge the inner and outer loops. // XVAL is also always equal to XKEY. SHA_CTX ctx; BCM_sha1_init(&ctx); BCM_sha1_transform(&ctx, block); // XKEY = (1 + XKEY + w_i) mod 2^b uint32_t carry = 1; for (int i = 4; i >= 0; i--) { uint32_t tmp = CRYPTO_load_u32_be(block + i * 4); tmp = CRYPTO_addc_u32(tmp, ctx.h[i], carry, &carry); CRYPTO_store_u32_be(block + i * 4, tmp); } // Output w_i. if (out_len < SHA_DIGEST_LENGTH) { uint8_t buf[SHA_DIGEST_LENGTH]; sha1_output_state(buf, &ctx); OPENSSL_memcpy(out, buf, out_len); break; } sha1_output_state(out, &ctx); out += SHA_DIGEST_LENGTH; out_len -= SHA_DIGEST_LENGTH; } return bcm_infallible::not_approved; } #define Xupdate(a, ix, ia, ib, ic, id) \ do { \ (a) = ((ia) ^ (ib) ^ (ic) ^ (id)); \ (ix) = (a) = CRYPTO_rotl_u32((a), 1); \ } while (0) #define K_00_19 0x5a827999UL #define K_20_39 0x6ed9eba1UL #define K_40_59 0x8f1bbcdcUL #define K_60_79 0xca62c1d6UL // As pointed out by Wei Dai , F() below can be simplified // to the code in F_00_19. Wei attributes these optimisations to Peter // Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define // F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another // tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) #define F_60_79(b, c, d) F_20_39(b, c, d) #define BODY_00_15(i, a, b, c, d, e, f, xi) \ do { \ (f) = (xi) + (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + \ F_00_19((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \ do { \ Xupdate(f, xi, xa, xb, xc, xd); \ (f) += (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + F_00_19((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \ do { \ Xupdate(f, xi, xa, xb, xc, xd); \ (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd) \ do { \ Xupdate(f, xa, xa, xb, xc, xd); \ (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd) \ do { \ Xupdate(f, xa, xa, xb, xc, xd); \ (f) += (e) + K_40_59 + CRYPTO_rotl_u32((a), 5) + F_40_59((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd) \ do { \ Xupdate(f, xa, xa, xb, xc, xd); \ (f) = (xa) + (e) + K_60_79 + CRYPTO_rotl_u32((a), 5) + \ F_60_79((b), (c), (d)); \ (b) = CRYPTO_rotl_u32((b), 30); \ } while (0) #ifdef X #undef X #endif /* Originally X was an array. As it's automatic it's natural * to expect RISC compiler to accomodate at least part of it in * the register bank, isn't it? Unfortunately not all compilers * "find" this expectation reasonable:-( On order to make such * compilers generate better code I replace X[] with a bunch of * X0, X1, etc. See the function body below... * */ #define X(i) XX##i #if !defined(SHA1_ASM) #if !defined(SHA1_ASM_NOHW) static void sha1_block_data_order_nohw(uint32_t state[5], const uint8_t *data, size_t num) { uint32_t A, B, C, D, E, T; uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15; A = state[0]; B = state[1]; C = state[2]; D = state[3]; E = state[4]; for (;;) { X(0) = CRYPTO_load_u32_be(data); data += 4; X(1) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(0, A, B, C, D, E, T, X(0)); X(2) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(1, T, A, B, C, D, E, X(1)); X(3) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(2, E, T, A, B, C, D, X(2)); X(4) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(3, D, E, T, A, B, C, X(3)); X(5) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(4, C, D, E, T, A, B, X(4)); X(6) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(5, B, C, D, E, T, A, X(5)); X(7) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(6, A, B, C, D, E, T, X(6)); X(8) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(7, T, A, B, C, D, E, X(7)); X(9) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(8, E, T, A, B, C, D, X(8)); X(10) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(9, D, E, T, A, B, C, X(9)); X(11) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(10, C, D, E, T, A, B, X(10)); X(12) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(11, B, C, D, E, T, A, X(11)); X(13) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(12, A, B, C, D, E, T, X(12)); X(14) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(13, T, A, B, C, D, E, X(13)); X(15) = CRYPTO_load_u32_be(data); data += 4; BODY_00_15(14, E, T, A, B, C, D, X(14)); BODY_00_15(15, D, E, T, A, B, C, X(15)); BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13)); BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14)); BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15)); BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0)); BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1)); BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2)); BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3)); BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4)); BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5)); BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6)); BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7)); BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8)); BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9)); BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10)); BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11)); BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12)); BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13)); BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14)); BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15)); BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0)); BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1)); BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2)); BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3)); BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4)); BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5)); BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6)); BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7)); BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8)); BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9)); BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10)); BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11)); BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12)); BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13)); BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14)); BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15)); BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0)); BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1)); BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2)); BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3)); BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4)); BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5)); BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6)); BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7)); BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8)); BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9)); BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10)); BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11)); BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12)); BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13)); BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14)); BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15)); BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0)); BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1)); BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2)); BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3)); BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4)); BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5)); BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6)); BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7)); BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8)); BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9)); BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10)); BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11)); BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12)); state[0] = (state[0] + E) & 0xffffffffL; state[1] = (state[1] + T) & 0xffffffffL; state[2] = (state[2] + A) & 0xffffffffL; state[3] = (state[3] + B) & 0xffffffffL; state[4] = (state[4] + C) & 0xffffffffL; if (--num == 0) { break; } A = state[0]; B = state[1]; C = state[2]; D = state[3]; E = state[4]; } } #endif // !SHA1_ASM_NOHW static void sha1_block_data_order(uint32_t state[5], const uint8_t *data, size_t num) { #if defined(SHA1_ASM_HW) if (sha1_hw_capable()) { sha1_block_data_order_hw(state, data, num); return; } #endif #if defined(SHA1_ASM_AVX2) if (sha1_avx2_capable()) { sha1_block_data_order_avx2(state, data, num); return; } #endif #if defined(SHA1_ASM_AVX) if (sha1_avx_capable()) { sha1_block_data_order_avx(state, data, num); return; } #endif #if defined(SHA1_ASM_SSSE3) if (sha1_ssse3_capable()) { sha1_block_data_order_ssse3(state, data, num); return; } #endif #if defined(SHA1_ASM_NEON) if (CRYPTO_is_NEON_capable()) { sha1_block_data_order_neon(state, data, num); return; } #endif sha1_block_data_order_nohw(state, data, num); } #endif // !SHA1_ASM #undef Xupdate #undef K_00_19 #undef K_20_39 #undef K_40_59 #undef K_60_79 #undef F_00_19 #undef F_20_39 #undef F_40_59 #undef F_60_79 #undef BODY_00_15 #undef BODY_16_19 #undef BODY_20_31 #undef BODY_32_39 #undef BODY_40_59 #undef BODY_60_79 #undef X ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/sha/sha256.cc.inc ================================================ /* * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../digest/md32_common.h" #include "../service_indicator/internal.h" #include "internal.h" bcm_infallible BCM_sha224_init(SHA256_CTX *sha) { OPENSSL_memset(sha, 0, sizeof(SHA256_CTX)); sha->h[0] = 0xc1059ed8UL; sha->h[1] = 0x367cd507UL; sha->h[2] = 0x3070dd17UL; sha->h[3] = 0xf70e5939UL; sha->h[4] = 0xffc00b31UL; sha->h[5] = 0x68581511UL; sha->h[6] = 0x64f98fa7UL; sha->h[7] = 0xbefa4fa4UL; sha->md_len = BCM_SHA224_DIGEST_LENGTH; return bcm_infallible::approved; } bcm_infallible BCM_sha256_init(SHA256_CTX *sha) { OPENSSL_memset(sha, 0, sizeof(SHA256_CTX)); sha->h[0] = 0x6a09e667UL; sha->h[1] = 0xbb67ae85UL; sha->h[2] = 0x3c6ef372UL; sha->h[3] = 0xa54ff53aUL; sha->h[4] = 0x510e527fUL; sha->h[5] = 0x9b05688cUL; sha->h[6] = 0x1f83d9abUL; sha->h[7] = 0x5be0cd19UL; sha->md_len = BCM_SHA256_DIGEST_LENGTH; return bcm_infallible::approved; } #if !defined(SHA256_ASM) static void sha256_block_data_order(uint32_t state[8], const uint8_t *in, size_t num); #endif bcm_infallible BCM_sha256_transform(SHA256_CTX *c, const uint8_t data[BCM_SHA256_CBLOCK]) { sha256_block_data_order(c->h, data, 1); return bcm_infallible::approved; } bcm_infallible BCM_sha256_update(SHA256_CTX *c, const void *data, size_t len) { crypto_md32_update(&sha256_block_data_order, c->h, c->data, BCM_SHA256_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); return bcm_infallible::approved; } bcm_infallible BCM_sha224_update(SHA256_CTX *ctx, const void *data, size_t len) { return BCM_sha256_update(ctx, data, len); } static void sha256_final_impl(uint8_t *out, size_t md_len, SHA256_CTX *c) { crypto_md32_final(&sha256_block_data_order, c->h, c->data, BCM_SHA256_CBLOCK, &c->num, c->Nh, c->Nl, /*is_big_endian=*/1); BSSL_CHECK(md_len <= BCM_SHA256_DIGEST_LENGTH); assert(md_len % 4 == 0); const size_t out_words = md_len / 4; for (size_t i = 0; i < out_words; i++) { CRYPTO_store_u32_be(out, c->h[i]); out += 4; } FIPS_service_indicator_update_state(); } bcm_infallible BCM_sha256_final(uint8_t out[BCM_SHA256_DIGEST_LENGTH], SHA256_CTX *c) { // Ideally we would assert |sha->md_len| is |BCM_SHA256_DIGEST_LENGTH| to // match the size hint, but calling code often pairs |SHA224_Init| with // |SHA256_Final| and expects |sha->md_len| to carry the size over. // // TODO(davidben): Add an assert and fix code to match them up. sha256_final_impl(out, c->md_len, c); return bcm_infallible::approved; } bcm_infallible BCM_sha224_final(uint8_t out[BCM_SHA224_DIGEST_LENGTH], SHA256_CTX *ctx) { // This function must be paired with |SHA224_Init|, which sets |ctx->md_len| // to |BCM_SHA224_DIGEST_LENGTH|. assert(ctx->md_len == BCM_SHA224_DIGEST_LENGTH); sha256_final_impl(out, BCM_SHA224_DIGEST_LENGTH, ctx); return bcm_infallible::approved; } #if !defined(SHA256_ASM) #if !defined(SHA256_ASM_NOHW) static const uint32_t K256[64] = { 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL, 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL, 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL, 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL}; // See FIPS 180-4, section 4.1.2. #define Sigma0(x) \ (CRYPTO_rotr_u32((x), 2) ^ CRYPTO_rotr_u32((x), 13) ^ \ CRYPTO_rotr_u32((x), 22)) #define Sigma1(x) \ (CRYPTO_rotr_u32((x), 6) ^ CRYPTO_rotr_u32((x), 11) ^ \ CRYPTO_rotr_u32((x), 25)) #define sigma0(x) \ (CRYPTO_rotr_u32((x), 7) ^ CRYPTO_rotr_u32((x), 18) ^ ((x) >> 3)) #define sigma1(x) \ (CRYPTO_rotr_u32((x), 17) ^ CRYPTO_rotr_u32((x), 19) ^ ((x) >> 10)) #define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z))) #define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) #define ROUND_00_15(i, a, b, c, d, e, f, g, h) \ do { \ T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i]; \ h = Sigma0(a) + Maj(a, b, c); \ d += T1; \ h += T1; \ } while (0) #define ROUND_16_63(i, a, b, c, d, e, f, g, h, X) \ do { \ s0 = X[(i + 1) & 0x0f]; \ s0 = sigma0(s0); \ s1 = X[(i + 14) & 0x0f]; \ s1 = sigma1(s1); \ T1 = X[(i) & 0x0f] += s0 + s1 + X[(i + 9) & 0x0f]; \ ROUND_00_15(i, a, b, c, d, e, f, g, h); \ } while (0) static void sha256_block_data_order_nohw(uint32_t state[8], const uint8_t *data, size_t num) { uint32_t a, b, c, d, e, f, g, h, s0, s1, T1; uint32_t X[16]; int i; while (num--) { a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; f = state[5]; g = state[6]; h = state[7]; T1 = X[0] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(1, h, a, b, c, d, e, f, g); T1 = X[2] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(2, g, h, a, b, c, d, e, f); T1 = X[3] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(3, f, g, h, a, b, c, d, e); T1 = X[4] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(4, e, f, g, h, a, b, c, d); T1 = X[5] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(5, d, e, f, g, h, a, b, c); T1 = X[6] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(6, c, d, e, f, g, h, a, b); T1 = X[7] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(7, b, c, d, e, f, g, h, a); T1 = X[8] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(8, a, b, c, d, e, f, g, h); T1 = X[9] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(9, h, a, b, c, d, e, f, g); T1 = X[10] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(10, g, h, a, b, c, d, e, f); T1 = X[11] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(11, f, g, h, a, b, c, d, e); T1 = X[12] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(12, e, f, g, h, a, b, c, d); T1 = X[13] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(13, d, e, f, g, h, a, b, c); T1 = X[14] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = CRYPTO_load_u32_be(data); data += 4; ROUND_00_15(15, b, c, d, e, f, g, h, a); for (i = 16; i < 64; i += 8) { ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X); ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X); ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X); ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X); ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X); ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X); ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X); ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } } #endif // !defined(SHA256_ASM_NOHW) static void sha256_block_data_order(uint32_t state[8], const uint8_t *data, size_t num) { #if defined(SHA256_ASM_HW) if (sha256_hw_capable()) { sha256_block_data_order_hw(state, data, num); return; } #endif #if defined(SHA256_ASM_AVX) if (sha256_avx_capable()) { sha256_block_data_order_avx(state, data, num); return; } #endif #if defined(SHA256_ASM_SSSE3) if (sha256_ssse3_capable()) { sha256_block_data_order_ssse3(state, data, num); return; } #endif #if defined(SHA256_ASM_NEON) if (CRYPTO_is_NEON_capable()) { sha256_block_data_order_neon(state, data, num); return; } #endif sha256_block_data_order_nohw(state, data, num); } #endif // !defined(SHA256_ASM) bcm_infallible BCM_sha256_transform_blocks(uint32_t state[8], const uint8_t *data, size_t num_blocks) { sha256_block_data_order(state, data, num_blocks); return bcm_infallible::approved; } #undef Sigma0 #undef Sigma1 #undef sigma0 #undef sigma1 #undef Ch #undef Maj #undef ROUND_00_15 #undef ROUND_16_63 ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/sha/sha512.cc.inc ================================================ /* * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "../service_indicator/internal.h" #include "internal.h" // The 32-bit hash algorithms share a common byte-order neutral collector and // padding function implementations that operate on unaligned data, // ../digest/md32_common.h. SHA-512 is the only 64-bit hash algorithm, as of // this writing, so there is no need for a common collector/padding // implementation yet. static void sha512_final_impl(uint8_t *out, size_t md_len, SHA512_CTX *sha); bcm_infallible BCM_sha384_init(SHA512_CTX *sha) { sha->h[0] = UINT64_C(0xcbbb9d5dc1059ed8); sha->h[1] = UINT64_C(0x629a292a367cd507); sha->h[2] = UINT64_C(0x9159015a3070dd17); sha->h[3] = UINT64_C(0x152fecd8f70e5939); sha->h[4] = UINT64_C(0x67332667ffc00b31); sha->h[5] = UINT64_C(0x8eb44a8768581511); sha->h[6] = UINT64_C(0xdb0c2e0d64f98fa7); sha->h[7] = UINT64_C(0x47b5481dbefa4fa4); sha->Nl = 0; sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA384_DIGEST_LENGTH; return bcm_infallible::approved; } bcm_infallible BCM_sha512_init(SHA512_CTX *sha) { sha->h[0] = UINT64_C(0x6a09e667f3bcc908); sha->h[1] = UINT64_C(0xbb67ae8584caa73b); sha->h[2] = UINT64_C(0x3c6ef372fe94f82b); sha->h[3] = UINT64_C(0xa54ff53a5f1d36f1); sha->h[4] = UINT64_C(0x510e527fade682d1); sha->h[5] = UINT64_C(0x9b05688c2b3e6c1f); sha->h[6] = UINT64_C(0x1f83d9abfb41bd6b); sha->h[7] = UINT64_C(0x5be0cd19137e2179); sha->Nl = 0; sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA512_DIGEST_LENGTH; return bcm_infallible::approved; } bcm_infallible BCM_sha512_256_init(SHA512_CTX *sha) { sha->h[0] = UINT64_C(0x22312194fc2bf72c); sha->h[1] = UINT64_C(0x9f555fa3c84c64c2); sha->h[2] = UINT64_C(0x2393b86b6f53b151); sha->h[3] = UINT64_C(0x963877195940eabd); sha->h[4] = UINT64_C(0x96283ee2a88effe3); sha->h[5] = UINT64_C(0xbe5e1e2553863992); sha->h[6] = UINT64_C(0x2b0199fc2c85b8aa); sha->h[7] = UINT64_C(0x0eb72ddc81c52ca2); sha->Nl = 0; sha->Nh = 0; sha->num = 0; sha->md_len = BCM_SHA512_256_DIGEST_LENGTH; return bcm_infallible::approved; } #if !defined(SHA512_ASM) static void sha512_block_data_order(uint64_t state[8], const uint8_t *in, size_t num_blocks); #endif bcm_infallible BCM_sha384_final(uint8_t out[BCM_SHA384_DIGEST_LENGTH], SHA512_CTX *sha) { // This function must be paired with |BCM_sha384_init|, which sets // |sha->md_len| to |BCM_SHA384_DIGEST_LENGTH|. assert(sha->md_len == BCM_SHA384_DIGEST_LENGTH); sha512_final_impl(out, BCM_SHA384_DIGEST_LENGTH, sha); return bcm_infallible::approved; } bcm_infallible BCM_sha384_update(SHA512_CTX *sha, const void *data, size_t len) { return BCM_sha512_update(sha, data, len); } bcm_infallible BCM_sha512_256_update(SHA512_CTX *sha, const void *data, size_t len) { return BCM_sha512_update(sha, data, len); } bcm_infallible BCM_sha512_256_final(uint8_t out[BCM_SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha) { // This function must be paired with |BCM_sha512_256_init|, which sets // |sha->md_len| to |BCM_SHA512_256_DIGEST_LENGTH|. assert(sha->md_len == BCM_SHA512_256_DIGEST_LENGTH); sha512_final_impl(out, BCM_SHA512_256_DIGEST_LENGTH, sha); return bcm_infallible::approved; } bcm_infallible BCM_sha512_transform(SHA512_CTX *c, const uint8_t block[SHA512_CBLOCK]) { sha512_block_data_order(c->h, block, 1); return bcm_infallible::approved; } bcm_infallible BCM_sha512_update(SHA512_CTX *c, const void *in_data, size_t len) { uint64_t l; uint8_t *p = c->p; const uint8_t *data = reinterpret_cast(in_data); if (len == 0) { return bcm_infallible::approved; } l = (c->Nl + (((uint64_t)len) << 3)) & UINT64_C(0xffffffffffffffff); if (l < c->Nl) { c->Nh++; } if (sizeof(len) >= 8) { c->Nh += (((uint64_t)len) >> 61); } c->Nl = l; if (c->num != 0) { size_t n = sizeof(c->p) - c->num; if (len < n) { OPENSSL_memcpy(p + c->num, data, len); c->num += (unsigned int)len; return bcm_infallible::approved; } else { OPENSSL_memcpy(p + c->num, data, n), c->num = 0; len -= n; data += n; sha512_block_data_order(c->h, p, 1); } } if (len >= sizeof(c->p)) { sha512_block_data_order(c->h, data, len / sizeof(c->p)); data += len; len %= sizeof(c->p); data -= len; } if (len != 0) { OPENSSL_memcpy(p, data, len); c->num = (int)len; } return bcm_infallible::approved; } bcm_infallible BCM_sha512_final(uint8_t out[BCM_SHA512_DIGEST_LENGTH], SHA512_CTX *sha) { // Ideally we would assert |sha->md_len| is |BCM_SHA512_DIGEST_LENGTH| to // match the size hint, but calling code often pairs |BCM_sha384_init| with // |BCM_sha512_final| and expects |sha->md_len| to carry the size over. // // TODO(davidben): Add an assert and fix code to match them up. sha512_final_impl(out, sha->md_len, sha); return bcm_infallible::approved; } static void sha512_final_impl(uint8_t *out, size_t md_len, SHA512_CTX *sha) { uint8_t *p = sha->p; size_t n = sha->num; p[n] = 0x80; // There always is a room for one n++; if (n > (sizeof(sha->p) - 16)) { OPENSSL_memset(p + n, 0, sizeof(sha->p) - n); n = 0; sha512_block_data_order(sha->h, p, 1); } OPENSSL_memset(p + n, 0, sizeof(sha->p) - 16 - n); CRYPTO_store_u64_be(p + sizeof(sha->p) - 16, sha->Nh); CRYPTO_store_u64_be(p + sizeof(sha->p) - 8, sha->Nl); sha512_block_data_order(sha->h, p, 1); assert(md_len % 8 == 0); const size_t out_words = md_len / 8; for (size_t i = 0; i < out_words; i++) { CRYPTO_store_u64_be(out, sha->h[i]); out += 8; } FIPS_service_indicator_update_state(); } #if !defined(SHA512_ASM) #if !defined(SHA512_ASM_NOHW) static const uint64_t K512[80] = { UINT64_C(0x428a2f98d728ae22), UINT64_C(0x7137449123ef65cd), UINT64_C(0xb5c0fbcfec4d3b2f), UINT64_C(0xe9b5dba58189dbbc), UINT64_C(0x3956c25bf348b538), UINT64_C(0x59f111f1b605d019), UINT64_C(0x923f82a4af194f9b), UINT64_C(0xab1c5ed5da6d8118), UINT64_C(0xd807aa98a3030242), UINT64_C(0x12835b0145706fbe), UINT64_C(0x243185be4ee4b28c), UINT64_C(0x550c7dc3d5ffb4e2), UINT64_C(0x72be5d74f27b896f), UINT64_C(0x80deb1fe3b1696b1), UINT64_C(0x9bdc06a725c71235), UINT64_C(0xc19bf174cf692694), UINT64_C(0xe49b69c19ef14ad2), UINT64_C(0xefbe4786384f25e3), UINT64_C(0x0fc19dc68b8cd5b5), UINT64_C(0x240ca1cc77ac9c65), UINT64_C(0x2de92c6f592b0275), UINT64_C(0x4a7484aa6ea6e483), UINT64_C(0x5cb0a9dcbd41fbd4), UINT64_C(0x76f988da831153b5), UINT64_C(0x983e5152ee66dfab), UINT64_C(0xa831c66d2db43210), UINT64_C(0xb00327c898fb213f), UINT64_C(0xbf597fc7beef0ee4), UINT64_C(0xc6e00bf33da88fc2), UINT64_C(0xd5a79147930aa725), UINT64_C(0x06ca6351e003826f), UINT64_C(0x142929670a0e6e70), UINT64_C(0x27b70a8546d22ffc), UINT64_C(0x2e1b21385c26c926), UINT64_C(0x4d2c6dfc5ac42aed), UINT64_C(0x53380d139d95b3df), UINT64_C(0x650a73548baf63de), UINT64_C(0x766a0abb3c77b2a8), UINT64_C(0x81c2c92e47edaee6), UINT64_C(0x92722c851482353b), UINT64_C(0xa2bfe8a14cf10364), UINT64_C(0xa81a664bbc423001), UINT64_C(0xc24b8b70d0f89791), UINT64_C(0xc76c51a30654be30), UINT64_C(0xd192e819d6ef5218), UINT64_C(0xd69906245565a910), UINT64_C(0xf40e35855771202a), UINT64_C(0x106aa07032bbd1b8), UINT64_C(0x19a4c116b8d2d0c8), UINT64_C(0x1e376c085141ab53), UINT64_C(0x2748774cdf8eeb99), UINT64_C(0x34b0bcb5e19b48a8), UINT64_C(0x391c0cb3c5c95a63), UINT64_C(0x4ed8aa4ae3418acb), UINT64_C(0x5b9cca4f7763e373), UINT64_C(0x682e6ff3d6b2b8a3), UINT64_C(0x748f82ee5defb2fc), UINT64_C(0x78a5636f43172f60), UINT64_C(0x84c87814a1f0ab72), UINT64_C(0x8cc702081a6439ec), UINT64_C(0x90befffa23631e28), UINT64_C(0xa4506cebde82bde9), UINT64_C(0xbef9a3f7b2c67915), UINT64_C(0xc67178f2e372532b), UINT64_C(0xca273eceea26619c), UINT64_C(0xd186b8c721c0c207), UINT64_C(0xeada7dd6cde0eb1e), UINT64_C(0xf57d4f7fee6ed178), UINT64_C(0x06f067aa72176fba), UINT64_C(0x0a637dc5a2c898a6), UINT64_C(0x113f9804bef90dae), UINT64_C(0x1b710b35131c471b), UINT64_C(0x28db77f523047d84), UINT64_C(0x32caab7b40c72493), UINT64_C(0x3c9ebe0a15c9bebc), UINT64_C(0x431d67c49c100d4c), UINT64_C(0x4cc5d4becb3e42b6), UINT64_C(0x597f299cfc657e2a), UINT64_C(0x5fcb6fab3ad6faec), UINT64_C(0x6c44198c4a475817), }; #define Sigma0(x) \ (CRYPTO_rotr_u64((x), 28) ^ CRYPTO_rotr_u64((x), 34) ^ \ CRYPTO_rotr_u64((x), 39)) #define Sigma1(x) \ (CRYPTO_rotr_u64((x), 14) ^ CRYPTO_rotr_u64((x), 18) ^ \ CRYPTO_rotr_u64((x), 41)) #define sigma0(x) \ (CRYPTO_rotr_u64((x), 1) ^ CRYPTO_rotr_u64((x), 8) ^ ((x) >> 7)) #define sigma1(x) \ (CRYPTO_rotr_u64((x), 19) ^ CRYPTO_rotr_u64((x), 61) ^ ((x) >> 6)) #define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z))) #define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) #if defined(__i386) || defined(__i386__) || defined(_M_IX86) // This code should give better results on 32-bit CPU with less than // ~24 registers, both size and performance wise... static void sha512_block_data_order_nohw(uint64_t state[8], const uint8_t *in, size_t num) { uint64_t A, E, T; uint64_t X[9 + 80], *F; int i; while (num--) { F = X + 80; A = state[0]; F[1] = state[1]; F[2] = state[2]; F[3] = state[3]; E = state[4]; F[5] = state[5]; F[6] = state[6]; F[7] = state[7]; for (i = 0; i < 16; i++, F--) { T = CRYPTO_load_u64_be(in + i * 8); F[0] = A; F[4] = E; F[8] = T; T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i]; E = F[3] + T; A = T + Sigma0(A) + Maj(A, F[1], F[2]); } for (; i < 80; i++, F--) { T = sigma0(F[8 + 16 - 1]); T += sigma1(F[8 + 16 - 14]); T += F[8 + 16] + F[8 + 16 - 9]; F[0] = A; F[4] = E; F[8] = T; T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i]; E = F[3] + T; A = T + Sigma0(A) + Maj(A, F[1], F[2]); } state[0] += A; state[1] += F[1]; state[2] += F[2]; state[3] += F[3]; state[4] += E; state[5] += F[5]; state[6] += F[6]; state[7] += F[7]; in += 16 * 8; } } #else #define ROUND_00_15(i, a, b, c, d, e, f, g, h) \ do { \ T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i]; \ h = Sigma0(a) + Maj(a, b, c); \ d += T1; \ h += T1; \ } while (0) #define ROUND_16_80(i, j, a, b, c, d, e, f, g, h, X) \ do { \ s0 = X[(j + 1) & 0x0f]; \ s0 = sigma0(s0); \ s1 = X[(j + 14) & 0x0f]; \ s1 = sigma1(s1); \ T1 = X[(j) & 0x0f] += s0 + s1 + X[(j + 9) & 0x0f]; \ ROUND_00_15(i + j, a, b, c, d, e, f, g, h); \ } while (0) static void sha512_block_data_order_nohw(uint64_t state[8], const uint8_t *in, size_t num) { uint64_t a, b, c, d, e, f, g, h, s0, s1, T1; uint64_t X[16]; int i; while (num--) { a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; f = state[5]; g = state[6]; h = state[7]; T1 = X[0] = CRYPTO_load_u64_be(in); ROUND_00_15(0, a, b, c, d, e, f, g, h); T1 = X[1] = CRYPTO_load_u64_be(in + 8); ROUND_00_15(1, h, a, b, c, d, e, f, g); T1 = X[2] = CRYPTO_load_u64_be(in + 2 * 8); ROUND_00_15(2, g, h, a, b, c, d, e, f); T1 = X[3] = CRYPTO_load_u64_be(in + 3 * 8); ROUND_00_15(3, f, g, h, a, b, c, d, e); T1 = X[4] = CRYPTO_load_u64_be(in + 4 * 8); ROUND_00_15(4, e, f, g, h, a, b, c, d); T1 = X[5] = CRYPTO_load_u64_be(in + 5 * 8); ROUND_00_15(5, d, e, f, g, h, a, b, c); T1 = X[6] = CRYPTO_load_u64_be(in + 6 * 8); ROUND_00_15(6, c, d, e, f, g, h, a, b); T1 = X[7] = CRYPTO_load_u64_be(in + 7 * 8); ROUND_00_15(7, b, c, d, e, f, g, h, a); T1 = X[8] = CRYPTO_load_u64_be(in + 8 * 8); ROUND_00_15(8, a, b, c, d, e, f, g, h); T1 = X[9] = CRYPTO_load_u64_be(in + 9 * 8); ROUND_00_15(9, h, a, b, c, d, e, f, g); T1 = X[10] = CRYPTO_load_u64_be(in + 10 * 8); ROUND_00_15(10, g, h, a, b, c, d, e, f); T1 = X[11] = CRYPTO_load_u64_be(in + 11 * 8); ROUND_00_15(11, f, g, h, a, b, c, d, e); T1 = X[12] = CRYPTO_load_u64_be(in + 12 * 8); ROUND_00_15(12, e, f, g, h, a, b, c, d); T1 = X[13] = CRYPTO_load_u64_be(in + 13 * 8); ROUND_00_15(13, d, e, f, g, h, a, b, c); T1 = X[14] = CRYPTO_load_u64_be(in + 14 * 8); ROUND_00_15(14, c, d, e, f, g, h, a, b); T1 = X[15] = CRYPTO_load_u64_be(in + 15 * 8); ROUND_00_15(15, b, c, d, e, f, g, h, a); for (i = 16; i < 80; i += 16) { ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X); ROUND_16_80(i, 1, h, a, b, c, d, e, f, g, X); ROUND_16_80(i, 2, g, h, a, b, c, d, e, f, X); ROUND_16_80(i, 3, f, g, h, a, b, c, d, e, X); ROUND_16_80(i, 4, e, f, g, h, a, b, c, d, X); ROUND_16_80(i, 5, d, e, f, g, h, a, b, c, X); ROUND_16_80(i, 6, c, d, e, f, g, h, a, b, X); ROUND_16_80(i, 7, b, c, d, e, f, g, h, a, X); ROUND_16_80(i, 8, a, b, c, d, e, f, g, h, X); ROUND_16_80(i, 9, h, a, b, c, d, e, f, g, X); ROUND_16_80(i, 10, g, h, a, b, c, d, e, f, X); ROUND_16_80(i, 11, f, g, h, a, b, c, d, e, X); ROUND_16_80(i, 12, e, f, g, h, a, b, c, d, X); ROUND_16_80(i, 13, d, e, f, g, h, a, b, c, X); ROUND_16_80(i, 14, c, d, e, f, g, h, a, b, X); ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; in += 16 * 8; } } #endif #endif // !SHA512_ASM_NOHW static void sha512_block_data_order(uint64_t state[8], const uint8_t *data, size_t num) { #if defined(SHA512_ASM_HW) if (sha512_hw_capable()) { sha512_block_data_order_hw(state, data, num); return; } #endif #if defined(SHA512_ASM_AVX) if (sha512_avx_capable()) { sha512_block_data_order_avx(state, data, num); return; } #endif #if defined(SHA512_ASM_SSSE3) if (sha512_ssse3_capable()) { sha512_block_data_order_ssse3(state, data, num); return; } #endif #if defined(SHA512_ASM_NEON) if (CRYPTO_is_NEON_capable()) { sha512_block_data_order_neon(state, data, num); return; } #endif sha512_block_data_order_nohw(state, data, num); } #endif // !SHA512_ASM #undef Sigma0 #undef Sigma1 #undef sigma0 #undef sigma1 #undef Ch #undef Maj #undef ROUND_00_15 #undef ROUND_16_80 ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/address.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_ADDRESS_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_ADDRESS_H #include #include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif // Offsets of various fields in the address structure for SLH-DSA-SHA2-128s. // The byte used to specify the Merkle tree layer. #define SLHDSA_SHA2_128S_OFFSET_LAYER 0 // The start of the 8 byte field used to specify the tree. #define SLHDSA_SHA2_128S_OFFSET_TREE 1 // The byte used to specify the hash type (reason). #define SLHDSA_SHA2_128S_OFFSET_TYPE 9 // The high byte used to specify the key pair (which one-time signature). #define SLHDSA_SHA2_128S_OFFSET_KP_ADDR2 12 // The low byte used to specific the key pair. #define SLHDSA_SHA2_128S_OFFSET_KP_ADDR1 13 // The byte used to specify the chain address (which Winternitz chain). #define SLHDSA_SHA2_128S_OFFSET_CHAIN_ADDR 17 // The byte used to specify the hash address (where in the Winternitz chain). #define SLHDSA_SHA2_128S_OFFSET_HASH_ADDR 21 // The byte used to specify the height of this node in the FORS or Merkle tree. #define SLHDSA_SHA2_128S_OFFSET_TREE_HGT 17 // The start of the 4 byte field used to specify the node in the FORS or Merkle // tree. #define SLHDSA_SHA2_128S_OFFSET_TREE_INDEX 18 inline void slhdsa_set_chain_addr(uint8_t addr[32], uint32_t chain) { addr[SLHDSA_SHA2_128S_OFFSET_CHAIN_ADDR] = (uint8_t)chain; } inline void slhdsa_set_hash_addr(uint8_t addr[32], uint32_t hash) { addr[SLHDSA_SHA2_128S_OFFSET_HASH_ADDR] = (uint8_t)hash; } inline void slhdsa_set_keypair_addr(uint8_t addr[32], uint32_t keypair) { addr[SLHDSA_SHA2_128S_OFFSET_KP_ADDR2] = (uint8_t)(keypair >> 8); addr[SLHDSA_SHA2_128S_OFFSET_KP_ADDR1] = (uint8_t)keypair; } inline void slhdsa_copy_keypair_addr(uint8_t out[32], const uint8_t in[32]) { OPENSSL_memcpy(out, in, SLHDSA_SHA2_128S_OFFSET_TREE + 8); out[SLHDSA_SHA2_128S_OFFSET_KP_ADDR2] = in[SLHDSA_SHA2_128S_OFFSET_KP_ADDR2]; out[SLHDSA_SHA2_128S_OFFSET_KP_ADDR1] = in[SLHDSA_SHA2_128S_OFFSET_KP_ADDR1]; } inline void slhdsa_set_layer_addr(uint8_t addr[32], uint32_t layer) { addr[SLHDSA_SHA2_128S_OFFSET_LAYER] = (uint8_t)layer; } inline void slhdsa_set_tree_addr(uint8_t addr[32], uint64_t tree) { CRYPTO_store_u64_be(&addr[SLHDSA_SHA2_128S_OFFSET_TREE], tree); } #define SLHDSA_SHA2_128S_ADDR_TYPE_WOTS 0 #define SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPK 1 #define SLHDSA_SHA2_128S_ADDR_TYPE_HASHTREE 2 #define SLHDSA_SHA2_128S_ADDR_TYPE_FORSTREE 3 #define SLHDSA_SHA2_128S_ADDR_TYPE_FORSPK 4 #define SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPRF 5 #define SLHDSA_SHA2_128S_ADDR_TYPE_FORSPRF 6 inline void slhdsa_set_type(uint8_t addr[32], uint32_t type) { // FIPS 205 relies on this setting parts of the address to 0, so we do it // here to avoid confusion. // // The behavior here is only correct for the SHA-2 instantiations. OPENSSL_memset(addr + 10, 0, 12); addr[SLHDSA_SHA2_128S_OFFSET_TYPE] = (uint8_t)type; } inline void slhdsa_set_tree_height(uint8_t addr[32], uint32_t tree_height) { addr[SLHDSA_SHA2_128S_OFFSET_TREE_HGT] = (uint8_t)tree_height; } inline void slhdsa_set_tree_index(uint8_t addr[32], uint32_t tree_index) { CRYPTO_store_u32_be(&addr[SLHDSA_SHA2_128S_OFFSET_TREE_INDEX], tree_index); } inline uint32_t slhdsa_get_tree_index(uint8_t addr[32]) { return CRYPTO_load_u32_be(addr + SLHDSA_SHA2_128S_OFFSET_TREE_INDEX); } #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_ADDRESS_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/fors.cc.inc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../../internal.h" #include "./address.h" #include "./fors.h" #include "./params.h" #include "./thash.h" // Compute the base 2^12 representation of `message` (algorithm 4, page 16). static void fors_base_b( uint16_t indices[SLHDSA_SHA2_128S_FORS_TREES], const uint8_t message[SLHDSA_SHA2_128S_FORS_MSG_BYTES]) { static_assert(SLHDSA_SHA2_128S_FORS_HEIGHT == 12, ""); static_assert((SLHDSA_SHA2_128S_FORS_TREES & 1) == 0, ""); const uint8_t *msg = message; for (size_t i = 0; i < SLHDSA_SHA2_128S_FORS_TREES; i += 2) { uint32_t val = ((uint32_t)msg[0] << 16) | ((uint32_t)msg[1] << 8) | msg[2]; indices[i] = (val >> 12) & 0xFFF; indices[i + 1] = val & 0xFFF; msg += 3; } } // Implements Algorithm 14: fors_skGen function (page 29) void slhdsa_fors_sk_gen(uint8_t fors_sk[BCM_SLHDSA_SHA2_128S_N], uint32_t idx, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { uint8_t sk_addr[32]; OPENSSL_memcpy(sk_addr, addr, sizeof(sk_addr)); slhdsa_set_type(sk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_FORSPRF); slhdsa_copy_keypair_addr(sk_addr, addr); slhdsa_set_tree_index(sk_addr, idx); slhdsa_thash_prf(fors_sk, pk_seed, sk_seed, sk_addr); } // Implements Algorithm 15: fors_node function (page 30) void slhdsa_fors_treehash(uint8_t root_node[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t i /*target node index*/, uint32_t z /*target node height*/, const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { BSSL_CHECK(z <= SLHDSA_SHA2_128S_FORS_HEIGHT); BSSL_CHECK(i < (uint32_t)(SLHDSA_SHA2_128S_FORS_TREES * (1 << (SLHDSA_SHA2_128S_FORS_HEIGHT - z)))); if (z == 0) { uint8_t sk[BCM_SLHDSA_SHA2_128S_N]; slhdsa_set_tree_height(addr, 0); slhdsa_set_tree_index(addr, i); slhdsa_fors_sk_gen(sk, i, sk_seed, pk_seed, addr); slhdsa_thash_f(root_node, sk, pk_seed, addr); } else { // Stores left node and right node. uint8_t nodes[2 * BCM_SLHDSA_SHA2_128S_N]; slhdsa_fors_treehash(nodes, sk_seed, 2 * i, z - 1, pk_seed, addr); slhdsa_fors_treehash(nodes + BCM_SLHDSA_SHA2_128S_N, sk_seed, 2 * i + 1, z - 1, pk_seed, addr); slhdsa_set_tree_height(addr, z); slhdsa_set_tree_index(addr, i); slhdsa_thash_h(root_node, nodes, pk_seed, addr); } } // Implements Algorithm 16: fors_sign function (page 31) void slhdsa_fors_sign(uint8_t fors_sig[SLHDSA_SHA2_128S_FORS_BYTES], const uint8_t message[SLHDSA_SHA2_128S_FORS_MSG_BYTES], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { uint16_t indices[SLHDSA_SHA2_128S_FORS_TREES]; // Derive FORS indices compatible with the NIST changes. fors_base_b(indices, message); for (size_t i = 0; i < SLHDSA_SHA2_128S_FORS_TREES; ++i) { slhdsa_set_tree_height(addr, 0); // Write the FORS secret key element to the correct position. slhdsa_fors_sk_gen( fors_sig + i * BCM_SLHDSA_SHA2_128S_N * (SLHDSA_SHA2_128S_FORS_HEIGHT + 1), i * (1 << SLHDSA_SHA2_128S_FORS_HEIGHT) + indices[i], sk_seed, pk_seed, addr); for (size_t j = 0; j < SLHDSA_SHA2_128S_FORS_HEIGHT; ++j) { size_t s = (indices[i] / (1 << j)) ^ 1; // Write the FORS auth path element to the correct position. slhdsa_fors_treehash( fors_sig + BCM_SLHDSA_SHA2_128S_N * (i * (SLHDSA_SHA2_128S_FORS_HEIGHT + 1) + j + 1), sk_seed, i * (1ULL << (SLHDSA_SHA2_128S_FORS_HEIGHT - j)) + s, j, pk_seed, addr); } } } // Implements Algorithm 17: fors_pkFromSig function (page 32) void slhdsa_fors_pk_from_sig( uint8_t fors_pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t fors_sig[SLHDSA_SHA2_128S_FORS_BYTES], const uint8_t message[SLHDSA_SHA2_128S_FORS_MSG_BYTES], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { uint16_t indices[SLHDSA_SHA2_128S_FORS_TREES]; uint8_t tmp[2 * BCM_SLHDSA_SHA2_128S_N]; uint8_t roots[SLHDSA_SHA2_128S_FORS_TREES * BCM_SLHDSA_SHA2_128S_N]; // Derive FORS indices compatible with the NIST changes. fors_base_b(indices, message); for (size_t i = 0; i < SLHDSA_SHA2_128S_FORS_TREES; ++i) { // Pointer to current sk and authentication path const uint8_t *sk = fors_sig + i * BCM_SLHDSA_SHA2_128S_N * (SLHDSA_SHA2_128S_FORS_HEIGHT + 1); const uint8_t *auth = fors_sig + i * BCM_SLHDSA_SHA2_128S_N * (SLHDSA_SHA2_128S_FORS_HEIGHT + 1) + BCM_SLHDSA_SHA2_128S_N; uint8_t nodes[2 * BCM_SLHDSA_SHA2_128S_N]; slhdsa_set_tree_height(addr, 0); slhdsa_set_tree_index( addr, (i * (1 << SLHDSA_SHA2_128S_FORS_HEIGHT)) + indices[i]); slhdsa_thash_f(nodes, sk, pk_seed, addr); for (size_t j = 0; j < SLHDSA_SHA2_128S_FORS_HEIGHT; ++j) { slhdsa_set_tree_height(addr, j + 1); // Even node if (((indices[i] / (1 << j)) % 2) == 0) { slhdsa_set_tree_index(addr, slhdsa_get_tree_index(addr) / 2); OPENSSL_memcpy(tmp, nodes, BCM_SLHDSA_SHA2_128S_N); OPENSSL_memcpy(tmp + BCM_SLHDSA_SHA2_128S_N, auth + j * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); slhdsa_thash_h(nodes + BCM_SLHDSA_SHA2_128S_N, tmp, pk_seed, addr); } else { slhdsa_set_tree_index(addr, (slhdsa_get_tree_index(addr) - 1) / 2); OPENSSL_memcpy(tmp, auth + j * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); OPENSSL_memcpy(tmp + BCM_SLHDSA_SHA2_128S_N, nodes, BCM_SLHDSA_SHA2_128S_N); slhdsa_thash_h(nodes + BCM_SLHDSA_SHA2_128S_N, tmp, pk_seed, addr); } OPENSSL_memcpy(nodes, nodes + BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); } OPENSSL_memcpy(roots + i * BCM_SLHDSA_SHA2_128S_N, nodes, BCM_SLHDSA_SHA2_128S_N); } uint8_t forspk_addr[32]; OPENSSL_memcpy(forspk_addr, addr, sizeof(forspk_addr)); slhdsa_set_type(forspk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_FORSPK); slhdsa_copy_keypair_addr(forspk_addr, addr); slhdsa_thash_tk(fors_pk, roots, pk_seed, forspk_addr); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/fors.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_FORS_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_FORS_H #include "./params.h" #if defined(__cplusplus) extern "C" { #endif // Implements Algorithm 14: fors_skGen function (page 29) void slhdsa_fors_sk_gen(uint8_t fors_sk[BCM_SLHDSA_SHA2_128S_N], uint32_t idx, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 15: fors_node function (page 30) void slhdsa_fors_treehash(uint8_t root_node[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t i /*target node index*/, uint32_t z /*target node height*/, const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 16: fors_sign function (page 31) void slhdsa_fors_sign(uint8_t fors_sig[SLHDSA_SHA2_128S_FORS_BYTES], const uint8_t message[SLHDSA_SHA2_128S_FORS_MSG_BYTES], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 17: fors_pkFromSig function (page 32) void slhdsa_fors_pk_from_sig( uint8_t fors_pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t fors_sig[SLHDSA_SHA2_128S_FORS_BYTES], const uint8_t message[SLHDSA_SHA2_128S_FORS_MSG_BYTES], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_FORS_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/merkle.cc.inc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../../internal.h" #include "./address.h" #include "./merkle.h" #include "./params.h" #include "./thash.h" #include "./wots.h" // Implements Algorithm 9: xmss_node function (page 23) void slhdsa_treehash(uint8_t out_pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t i /*target node index*/, uint32_t z /*target node height*/, const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { BSSL_CHECK(z <= SLHDSA_SHA2_128S_TREE_HEIGHT); BSSL_CHECK(i < (uint32_t)(1 << (SLHDSA_SHA2_128S_TREE_HEIGHT - z))); if (z == 0) { slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTS); slhdsa_set_keypair_addr(addr, i); slhdsa_wots_pk_gen(out_pk, sk_seed, pk_seed, addr); } else { // Stores left node and right node. uint8_t nodes[2 * BCM_SLHDSA_SHA2_128S_N]; slhdsa_treehash(nodes, sk_seed, 2 * i, z - 1, pk_seed, addr); slhdsa_treehash(nodes + BCM_SLHDSA_SHA2_128S_N, sk_seed, 2 * i + 1, z - 1, pk_seed, addr); slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_HASHTREE); slhdsa_set_tree_height(addr, z); slhdsa_set_tree_index(addr, i); slhdsa_thash_h(out_pk, nodes, pk_seed, addr); } } // Implements Algorithm 10: xmss_sign function (page 24) void slhdsa_xmss_sign(uint8_t sig[SLHDSA_SHA2_128S_XMSS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], unsigned int idx, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { // Build authentication path for (size_t j = 0; j < SLHDSA_SHA2_128S_TREE_HEIGHT; ++j) { unsigned int k = (idx >> j) ^ 1; slhdsa_treehash(sig + SLHDSA_SHA2_128S_WOTS_BYTES + j * BCM_SLHDSA_SHA2_128S_N, sk_seed, k, j, pk_seed, addr); } // Compute WOTS+ signature slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTS); slhdsa_set_keypair_addr(addr, idx); slhdsa_wots_sign(sig, msg, sk_seed, pk_seed, addr); } // Implements Algorithm 11: xmss_pkFromSig function (page 25) void slhdsa_xmss_pk_from_sig( uint8_t root[BCM_SLHDSA_SHA2_128S_N], const uint8_t xmss_sig[SLHDSA_SHA2_128S_XMSS_BYTES], unsigned int idx, const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { // Stores node[0] and node[1] from Algorithm 11 slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTS); slhdsa_set_keypair_addr(addr, idx); uint8_t node[2 * BCM_SLHDSA_SHA2_128S_N]; slhdsa_wots_pk_from_sig(node, xmss_sig, msg, pk_seed, addr); slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_HASHTREE); slhdsa_set_tree_index(addr, idx); uint8_t tmp[2 * BCM_SLHDSA_SHA2_128S_N]; const uint8_t *const auth = xmss_sig + SLHDSA_SHA2_128S_WOTS_BYTES; for (size_t k = 0; k < SLHDSA_SHA2_128S_TREE_HEIGHT; ++k) { slhdsa_set_tree_height(addr, k + 1); if (((idx >> k) & 1) == 0) { slhdsa_set_tree_index(addr, slhdsa_get_tree_index(addr) >> 1); OPENSSL_memcpy(tmp, node, BCM_SLHDSA_SHA2_128S_N); OPENSSL_memcpy(tmp + BCM_SLHDSA_SHA2_128S_N, auth + k * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); slhdsa_thash_h(node + BCM_SLHDSA_SHA2_128S_N, tmp, pk_seed, addr); } else { slhdsa_set_tree_index(addr, (slhdsa_get_tree_index(addr) - 1) >> 1); OPENSSL_memcpy(tmp, auth + k * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); OPENSSL_memcpy(tmp + BCM_SLHDSA_SHA2_128S_N, node, BCM_SLHDSA_SHA2_128S_N); slhdsa_thash_h(node + BCM_SLHDSA_SHA2_128S_N, tmp, pk_seed, addr); } OPENSSL_memcpy(node, node + BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); } OPENSSL_memcpy(root, node, BCM_SLHDSA_SHA2_128S_N); } // Implements Algorithm 12: ht_sign function (page 27) void slhdsa_ht_sign( uint8_t sig[SLHDSA_SHA2_128S_XMSS_BYTES * SLHDSA_SHA2_128S_D], const uint8_t message[BCM_SLHDSA_SHA2_128S_N], uint64_t idx_tree, uint32_t idx_leaf, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N]) { uint8_t addr[32] = {0}; slhdsa_set_tree_addr(addr, idx_tree); // Layer 0 slhdsa_xmss_sign(sig, message, idx_leaf, sk_seed, pk_seed, addr); uint8_t root[BCM_SLHDSA_SHA2_128S_N]; slhdsa_xmss_pk_from_sig(root, sig, idx_leaf, message, pk_seed, addr); sig += SLHDSA_SHA2_128S_XMSS_BYTES; // All other layers for (size_t j = 1; j < SLHDSA_SHA2_128S_D; ++j) { idx_leaf = idx_tree % (1 << SLHDSA_SHA2_128S_TREE_HEIGHT); idx_tree = idx_tree >> SLHDSA_SHA2_128S_TREE_HEIGHT; slhdsa_set_layer_addr(addr, j); slhdsa_set_tree_addr(addr, idx_tree); slhdsa_xmss_sign(sig, root, idx_leaf, sk_seed, pk_seed, addr); if (j < (SLHDSA_SHA2_128S_D - 1)) { slhdsa_xmss_pk_from_sig(root, sig, idx_leaf, root, pk_seed, addr); } sig += SLHDSA_SHA2_128S_XMSS_BYTES; } } // Implements Algorithm 13: ht_verify function (page 28) int slhdsa_ht_verify( const uint8_t sig[SLHDSA_SHA2_128S_D * SLHDSA_SHA2_128S_XMSS_BYTES], const uint8_t message[BCM_SLHDSA_SHA2_128S_N], uint64_t idx_tree, uint32_t idx_leaf, const uint8_t pk_root[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N]) { uint8_t addr[32] = {0}; slhdsa_set_tree_addr(addr, idx_tree); uint8_t node[BCM_SLHDSA_SHA2_128S_N]; slhdsa_xmss_pk_from_sig(node, sig, idx_leaf, message, pk_seed, addr); for (size_t j = 1; j < SLHDSA_SHA2_128S_D; ++j) { idx_leaf = idx_tree % (1 << SLHDSA_SHA2_128S_TREE_HEIGHT); idx_tree = idx_tree >> SLHDSA_SHA2_128S_TREE_HEIGHT; slhdsa_set_layer_addr(addr, j); slhdsa_set_tree_addr(addr, idx_tree); slhdsa_xmss_pk_from_sig(node, sig + j * SLHDSA_SHA2_128S_XMSS_BYTES, idx_leaf, node, pk_seed, addr); } return memcmp(node, pk_root, BCM_SLHDSA_SHA2_128S_N) == 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/merkle.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_MERKLE_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_MERKLE_H #include #include #include "./params.h" #if defined(__cplusplus) extern "C" { #endif // Implements Algorithm 9: xmss_node function (page 23) void slhdsa_treehash(uint8_t out_pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t i /*target node index*/, uint32_t z /*target node height*/, const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 10: xmss_sign function (page 24) void slhdsa_xmss_sign(uint8_t sig[SLHDSA_SHA2_128S_XMSS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], unsigned int idx, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 11: xmss_pkFromSig function (page 25) void slhdsa_xmss_pk_from_sig( uint8_t root[BCM_SLHDSA_SHA2_128S_N], const uint8_t xmss_sig[SLHDSA_SHA2_128S_XMSS_BYTES], unsigned int idx, const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 12: ht_sign function (page 27) void slhdsa_ht_sign( uint8_t sig[SLHDSA_SHA2_128S_D * SLHDSA_SHA2_128S_XMSS_BYTES], const uint8_t message[BCM_SLHDSA_SHA2_128S_N], uint64_t idx_tree, uint32_t idx_leaf, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N]); // Implements Algorithm 13: ht_verify function (page 28) int slhdsa_ht_verify( const uint8_t sig[SLHDSA_SHA2_128S_D * SLHDSA_SHA2_128S_XMSS_BYTES], const uint8_t message[BCM_SLHDSA_SHA2_128S_N], uint64_t idx_tree, uint32_t idx_leaf, const uint8_t pk_root[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_MERKLE_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/params.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_PARAMS_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_PARAMS_H #include #include "../bcm_interface.h" #if defined(__cplusplus) extern "C" { #endif // Total height of the tree structure. #define SLHDSA_SHA2_128S_FULL_HEIGHT 63 // Number of subtree layers. #define SLHDSA_SHA2_128S_D 7 // Height of the trees on each layer #define SLHDSA_SHA2_128S_TREE_HEIGHT 9 // Height of each individual FORS tree. #define SLHDSA_SHA2_128S_FORS_HEIGHT 12 // Total number of FORS tree used. #define SLHDSA_SHA2_128S_FORS_TREES 14 // Size of a FORS signature #define SLHDSA_SHA2_128S_FORS_BYTES \ ((SLHDSA_SHA2_128S_FORS_HEIGHT + 1) * SLHDSA_SHA2_128S_FORS_TREES * \ BCM_SLHDSA_SHA2_128S_N) // Winternitz parameter and derived values #define SLHDSA_SHA2_128S_WOTS_W 16 #define SLHDSA_SHA2_128S_WOTS_LOG_W 4 #define SLHDSA_SHA2_128S_WOTS_LEN1 32 #define SLHDSA_SHA2_128S_WOTS_LEN2 3 #define SLHDSA_SHA2_128S_WOTS_LEN 35 #define SLHDSA_SHA2_128S_WOTS_BYTES \ (BCM_SLHDSA_SHA2_128S_N * SLHDSA_SHA2_128S_WOTS_LEN) // XMSS sizes #define SLHDSA_SHA2_128S_XMSS_BYTES \ (SLHDSA_SHA2_128S_WOTS_BYTES + \ (BCM_SLHDSA_SHA2_128S_N * SLHDSA_SHA2_128S_TREE_HEIGHT)) // Size of the message digest (NOTE: This is only correct for the SHA-256 params // here) #define SLHDSA_SHA2_128S_DIGEST_SIZE \ (((SLHDSA_SHA2_128S_FORS_TREES * SLHDSA_SHA2_128S_FORS_HEIGHT) / 8) + \ (((SLHDSA_SHA2_128S_FULL_HEIGHT - SLHDSA_SHA2_128S_TREE_HEIGHT) / 8) + 1) + \ (SLHDSA_SHA2_128S_TREE_HEIGHT / 8) + 1) // Compressed address size when using SHA-256 #define SLHDSA_SHA2_128S_SHA256_ADDR_BYTES 22 // Size of the FORS message hash #define SLHDSA_SHA2_128S_FORS_MSG_BYTES \ ((SLHDSA_SHA2_128S_FORS_HEIGHT * SLHDSA_SHA2_128S_FORS_TREES + 7) / 8) #define SLHDSA_SHA2_128S_TREE_BITS \ (SLHDSA_SHA2_128S_TREE_HEIGHT * (SLHDSA_SHA2_128S_D - 1)) #define SLHDSA_SHA2_128S_TREE_BYTES ((SLHDSA_SHA2_128S_TREE_BITS + 7) / 8) #define SLHDSA_SHA2_128S_LEAF_BITS SLHDSA_SHA2_128S_TREE_HEIGHT #define SLHDSA_SHA2_128S_LEAF_BYTES ((SLHDSA_SHA2_128S_LEAF_BITS + 7) / 8) #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_PARAMS_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/slhdsa.cc.inc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../../internal.h" #include "../bcm_interface.h" #include "address.h" #include "fors.h" #include "merkle.h" #include "params.h" #include "thash.h" // The OBJECT IDENTIFIER header is also included in these values, per the spec. static const uint8_t kSHA256OID[] = {0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}; static const uint8_t kSHA384OID[] = {0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}; #define MAX_OID_LENGTH 11 #define MAX_CONTEXT_LENGTH 255 bcm_infallible BCM_slhdsa_sha2_128s_generate_key_from_seed( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_secret_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t seed[3 * BCM_SLHDSA_SHA2_128S_N]) { // Initialize SK.seed || SK.prf || PK.seed from seed. OPENSSL_memcpy(out_secret_key, seed, 3 * BCM_SLHDSA_SHA2_128S_N); // Initialize PK.seed from seed. OPENSSL_memcpy(out_public_key, seed + 2 * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); uint8_t addr[32] = {0}; slhdsa_set_layer_addr(addr, SLHDSA_SHA2_128S_D - 1); // Set PK.root slhdsa_treehash(out_public_key + BCM_SLHDSA_SHA2_128S_N, out_secret_key, 0, SLHDSA_SHA2_128S_TREE_HEIGHT, out_public_key, addr); OPENSSL_memcpy(out_secret_key + 3 * BCM_SLHDSA_SHA2_128S_N, out_public_key + BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N); return bcm_infallible::approved; } bcm_infallible BCM_slhdsa_sha2_128s_generate_key( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]) { uint8_t seed[3 * BCM_SLHDSA_SHA2_128S_N]; RAND_bytes(seed, 3 * BCM_SLHDSA_SHA2_128S_N); BCM_slhdsa_sha2_128s_generate_key_from_seed(out_public_key, out_private_key, seed); return bcm_infallible::approved; } bcm_infallible BCM_slhdsa_sha2_128s_public_from_private( uint8_t out_public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]) { OPENSSL_memcpy(out_public_key, private_key + 2 * BCM_SLHDSA_SHA2_128S_N, BCM_SLHDSA_SHA2_128S_N * 2); return bcm_infallible::approved; } // Note that this overreads by a byte. This is fine in the context that it's // used. static uint64_t load_tree_index(const uint8_t in[8]) { static_assert(SLHDSA_SHA2_128S_TREE_BYTES == 7, "This code needs to be updated"); uint64_t index = CRYPTO_load_u64_be(in); index >>= 8; index &= (~(uint64_t)0) >> (64 - SLHDSA_SHA2_128S_TREE_BITS); return index; } // Implements Algorithm 22: slh_sign function (Section 10.2.1, page 39) bcm_infallible BCM_slhdsa_sha2_128s_sign_internal( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t secret_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *context, size_t context_len, const uint8_t *msg, size_t msg_len, const uint8_t entropy[BCM_SLHDSA_SHA2_128S_N]) { const uint8_t *sk_seed = secret_key; const uint8_t *sk_prf = secret_key + BCM_SLHDSA_SHA2_128S_N; const uint8_t *pk_seed = secret_key + 2 * BCM_SLHDSA_SHA2_128S_N; const uint8_t *pk_root = secret_key + 3 * BCM_SLHDSA_SHA2_128S_N; // Derive randomizer R and copy it to signature uint8_t R[BCM_SLHDSA_SHA2_128S_N]; slhdsa_thash_prfmsg(R, sk_prf, entropy, header, context, context_len, msg, msg_len); OPENSSL_memcpy(out_signature, R, BCM_SLHDSA_SHA2_128S_N); // Compute message digest uint8_t digest[SLHDSA_SHA2_128S_DIGEST_SIZE]; slhdsa_thash_hmsg(digest, R, pk_seed, pk_root, header, context, context_len, msg, msg_len); uint8_t fors_digest[SLHDSA_SHA2_128S_FORS_MSG_BYTES]; OPENSSL_memcpy(fors_digest, digest, SLHDSA_SHA2_128S_FORS_MSG_BYTES); const uint64_t idx_tree = load_tree_index(digest + SLHDSA_SHA2_128S_FORS_MSG_BYTES); uint32_t idx_leaf = CRYPTO_load_u16_be( digest + SLHDSA_SHA2_128S_FORS_MSG_BYTES + SLHDSA_SHA2_128S_TREE_BYTES); idx_leaf &= (~(uint32_t)0) >> (32 - SLHDSA_SHA2_128S_LEAF_BITS); uint8_t addr[32] = {0}; slhdsa_set_tree_addr(addr, idx_tree); slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_FORSTREE); slhdsa_set_keypair_addr(addr, idx_leaf); slhdsa_fors_sign(out_signature + BCM_SLHDSA_SHA2_128S_N, fors_digest, sk_seed, pk_seed, addr); uint8_t pk_fors[BCM_SLHDSA_SHA2_128S_N]; slhdsa_fors_pk_from_sig(pk_fors, out_signature + BCM_SLHDSA_SHA2_128S_N, fors_digest, pk_seed, addr); slhdsa_ht_sign( out_signature + BCM_SLHDSA_SHA2_128S_N + SLHDSA_SHA2_128S_FORS_BYTES, pk_fors, idx_tree, idx_leaf, sk_seed, pk_seed); return bcm_infallible::approved; } bcm_status BCM_slhdsa_sha2_128s_sign( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { if (context_len > MAX_CONTEXT_LENGTH) { return bcm_status::failure; } // Construct header for M' as specified in Algorithm 22 uint8_t M_prime_header[2]; M_prime_header[0] = 0; // domain separator for pure signing M_prime_header[1] = (uint8_t)context_len; uint8_t entropy[BCM_SLHDSA_SHA2_128S_N]; RAND_bytes(entropy, sizeof(entropy)); BCM_slhdsa_sha2_128s_sign_internal(out_signature, private_key, M_prime_header, context, context_len, msg, msg_len, entropy); return bcm_status::approved; } static int slhdsa_get_context_and_oid(uint8_t *out_context_and_oid, size_t *out_context_and_oid_len, size_t max_out_context_and_oid, const uint8_t *context, size_t context_len, int hash_nid, size_t hashed_msg_len) { const uint8_t *oid; size_t oid_len; size_t expected_hash_len; switch (hash_nid) { case NID_sha256: oid = kSHA256OID; oid_len = sizeof(kSHA256OID); static_assert(sizeof(kSHA256OID) <= MAX_OID_LENGTH, ""); expected_hash_len = 32; break; // The SLH-DSA spec only lists SHA-256 and SHA-512. This function also // supports SHA-384, which is non-standard. case NID_sha384: oid = kSHA384OID; oid_len = sizeof(kSHA384OID); static_assert(sizeof(kSHA384OID) <= MAX_OID_LENGTH, ""); expected_hash_len = 48; break; // If adding a hash function with a larger `oid_len`, update the size of // `context_and_oid` in the callers. default: return 0; } if (hashed_msg_len != expected_hash_len) { return 0; } *out_context_and_oid_len = context_len + oid_len; if (*out_context_and_oid_len > max_out_context_and_oid) { return 0; } OPENSSL_memcpy(out_context_and_oid, context, context_len); OPENSSL_memcpy(out_context_and_oid + context_len, oid, oid_len); return 1; } bcm_status BCM_slhdsa_sha2_128s_prehash_sign( uint8_t out_signature[BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (context_len > MAX_CONTEXT_LENGTH) { return bcm_status::failure; } uint8_t M_prime_header[2]; M_prime_header[0] = 1; // domain separator for prehashed signing M_prime_header[1] = (uint8_t)context_len; uint8_t context_and_oid[MAX_CONTEXT_LENGTH + MAX_OID_LENGTH]; size_t context_and_oid_len; if (!slhdsa_get_context_and_oid(context_and_oid, &context_and_oid_len, sizeof(context_and_oid), context, context_len, hash_nid, hashed_msg_len)) { return bcm_status::failure; } uint8_t entropy[BCM_SLHDSA_SHA2_128S_N]; RAND_bytes(entropy, sizeof(entropy)); BCM_slhdsa_sha2_128s_sign_internal(out_signature, private_key, M_prime_header, context_and_oid, context_and_oid_len, hashed_msg, hashed_msg_len, entropy); return bcm_status::approved; } // Implements Algorithm 24: slh_verify function (Section 10.3, page 41) bcm_status BCM_slhdsa_sha2_128s_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { if (context_len > MAX_CONTEXT_LENGTH) { return bcm_status::failure; } // Construct header for M' as specified in Algorithm 24 uint8_t M_prime_header[2]; M_prime_header[0] = 0; // domain separator for pure verification M_prime_header[1] = (uint8_t)context_len; return BCM_slhdsa_sha2_128s_verify_internal( signature, signature_len, public_key, M_prime_header, context, context_len, msg, msg_len); } bcm_status BCM_slhdsa_sha2_128s_prehash_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (context_len > MAX_CONTEXT_LENGTH) { return bcm_status::failure; } uint8_t M_prime_header[2]; M_prime_header[0] = 1; // domain separator for prehashed verification M_prime_header[1] = (uint8_t)context_len; uint8_t context_and_oid[MAX_CONTEXT_LENGTH + MAX_OID_LENGTH]; size_t context_and_oid_len; if (!slhdsa_get_context_and_oid(context_and_oid, &context_and_oid_len, sizeof(context_and_oid), context, context_len, hash_nid, hashed_msg_len)) { return bcm_status::failure; } return BCM_slhdsa_sha2_128s_verify_internal( signature, signature_len, public_key, M_prime_header, context_and_oid, context_and_oid_len, hashed_msg, hashed_msg_len); } bcm_status BCM_slhdsa_sha2_128s_verify_internal( const uint8_t *signature, size_t signature_len, const uint8_t public_key[BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *context, size_t context_len, const uint8_t *msg, size_t msg_len) { if (signature_len != BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES) { return bcm_status::failure; } const uint8_t *pk_seed = public_key; const uint8_t *pk_root = public_key + BCM_SLHDSA_SHA2_128S_N; const uint8_t *r = signature; const uint8_t *sig_fors = signature + BCM_SLHDSA_SHA2_128S_N; const uint8_t *sig_ht = sig_fors + SLHDSA_SHA2_128S_FORS_BYTES; uint8_t digest[SLHDSA_SHA2_128S_DIGEST_SIZE]; slhdsa_thash_hmsg(digest, r, pk_seed, pk_root, header, context, context_len, msg, msg_len); uint8_t fors_digest[SLHDSA_SHA2_128S_FORS_MSG_BYTES]; OPENSSL_memcpy(fors_digest, digest, SLHDSA_SHA2_128S_FORS_MSG_BYTES); const uint64_t idx_tree = load_tree_index(digest + SLHDSA_SHA2_128S_FORS_MSG_BYTES); uint32_t idx_leaf = CRYPTO_load_u16_be( digest + SLHDSA_SHA2_128S_FORS_MSG_BYTES + SLHDSA_SHA2_128S_TREE_BYTES); idx_leaf &= (~(uint32_t)0) >> (32 - SLHDSA_SHA2_128S_LEAF_BITS); uint8_t addr[32] = {0}; slhdsa_set_tree_addr(addr, idx_tree); slhdsa_set_type(addr, SLHDSA_SHA2_128S_ADDR_TYPE_FORSTREE); slhdsa_set_keypair_addr(addr, idx_leaf); uint8_t pk_fors[BCM_SLHDSA_SHA2_128S_N]; slhdsa_fors_pk_from_sig(pk_fors, sig_fors, fors_digest, pk_seed, addr); if (!slhdsa_ht_verify(sig_ht, pk_fors, idx_tree, idx_leaf, pk_root, pk_seed)) { return bcm_status::failure; } return bcm_status::approved; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/thash.cc.inc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "../../internal.h" #include "./params.h" #include "./thash.h" // Internal thash function used by F, H, and T_l (Section 11.2, pages 44-46) static void slhdsa_thash(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t *input, size_t input_blocks, const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { SHA256_CTX sha256; SHA256_Init(&sha256); // Process pubseed with padding to full block. static const uint8_t kZeros[64 - BCM_SLHDSA_SHA2_128S_N] = {0}; SHA256_Update(&sha256, pk_seed, BCM_SLHDSA_SHA2_128S_N); SHA256_Update(&sha256, kZeros, sizeof(kZeros)); SHA256_Update(&sha256, addr, SLHDSA_SHA2_128S_SHA256_ADDR_BYTES); SHA256_Update(&sha256, input, input_blocks * BCM_SLHDSA_SHA2_128S_N); uint8_t hash[32]; SHA256_Final(hash, &sha256); OPENSSL_memcpy(output, hash, BCM_SLHDSA_SHA2_128S_N); } // Implements PRF_msg function (Section 4.1, page 11 and Section 11.2, pages // 44-46) void slhdsa_thash_prfmsg(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_prf[BCM_SLHDSA_SHA2_128S_N], const uint8_t entropy[BCM_SLHDSA_SHA2_128S_N], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *ctx, size_t ctx_len, const uint8_t *msg, size_t msg_len) { // Compute HMAC-SHA256(sk_prf, entropy || header || ctx || msg). We inline // HMAC to avoid an allocation. uint8_t hmac_key[SHA256_CBLOCK]; static_assert(BCM_SLHDSA_SHA2_128S_N <= SHA256_CBLOCK, "HMAC key is larger than block size"); OPENSSL_memcpy(hmac_key, sk_prf, BCM_SLHDSA_SHA2_128S_N); for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; i++) { hmac_key[i] ^= 0x36; } OPENSSL_memset(hmac_key + BCM_SLHDSA_SHA2_128S_N, 0x36, sizeof(hmac_key) - BCM_SLHDSA_SHA2_128S_N); SHA256_CTX sha_ctx; SHA256_Init(&sha_ctx); SHA256_Update(&sha_ctx, hmac_key, sizeof(hmac_key)); SHA256_Update(&sha_ctx, entropy, BCM_SLHDSA_SHA2_128S_N); if (header) { SHA256_Update(&sha_ctx, header, BCM_SLHDSA_M_PRIME_HEADER_LEN); } SHA256_Update(&sha_ctx, ctx, ctx_len); SHA256_Update(&sha_ctx, msg, msg_len); uint8_t hash[SHA256_DIGEST_LENGTH]; SHA256_Final(hash, &sha_ctx); for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; i++) { hmac_key[i] ^= 0x36 ^ 0x5c; } OPENSSL_memset(hmac_key + BCM_SLHDSA_SHA2_128S_N, 0x5c, sizeof(hmac_key) - BCM_SLHDSA_SHA2_128S_N); SHA256_Init(&sha_ctx); SHA256_Update(&sha_ctx, hmac_key, sizeof(hmac_key)); SHA256_Update(&sha_ctx, hash, sizeof(hash)); SHA256_Final(hash, &sha_ctx); // Truncate to BCM_SLHDSA_SHA2_128S_N bytes OPENSSL_memcpy(output, hash, BCM_SLHDSA_SHA2_128S_N); } // Implements H_msg function (Section 4.1, page 11 and Section 11.2, pages // 44-46) void slhdsa_thash_hmsg(uint8_t output[SLHDSA_SHA2_128S_DIGEST_SIZE], const uint8_t r[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_root[BCM_SLHDSA_SHA2_128S_N], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *ctx, size_t ctx_len, const uint8_t *msg, size_t msg_len) { // MGF1-SHA-256(R || PK.seed || SHA-256(R || PK.seed || PK.root || header || // ctx || M), m) input_buffer stores R || PK_SEED || SHA256(..) || 4-byte // index uint8_t input_buffer[2 * BCM_SLHDSA_SHA2_128S_N + 32 + 4] = {0}; OPENSSL_memcpy(input_buffer, r, BCM_SLHDSA_SHA2_128S_N); OPENSSL_memcpy(input_buffer + BCM_SLHDSA_SHA2_128S_N, pk_seed, BCM_SLHDSA_SHA2_128S_N); // Inner hash SHA256_CTX sha_ctx; SHA256_Init(&sha_ctx); SHA256_Update(&sha_ctx, r, BCM_SLHDSA_SHA2_128S_N); SHA256_Update(&sha_ctx, pk_seed, BCM_SLHDSA_SHA2_128S_N); SHA256_Update(&sha_ctx, pk_root, BCM_SLHDSA_SHA2_128S_N); if (header) { SHA256_Update(&sha_ctx, header, BCM_SLHDSA_M_PRIME_HEADER_LEN); } SHA256_Update(&sha_ctx, ctx, ctx_len); SHA256_Update(&sha_ctx, msg, msg_len); // Write directly into the input buffer SHA256_Final(input_buffer + 2 * BCM_SLHDSA_SHA2_128S_N, &sha_ctx); // MGF1-SHA-256 uint8_t hash[32]; static_assert(SLHDSA_SHA2_128S_DIGEST_SIZE < sizeof(hash), "More MGF1 iterations required"); SHA256(input_buffer, sizeof(input_buffer), hash); OPENSSL_memcpy(output, hash, SLHDSA_SHA2_128S_DIGEST_SIZE); } // Implements PRF function (Section 4.1, page 11 and Section 11.2, pages 44-46) void slhdsa_thash_prf(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { slhdsa_thash(output, sk_seed, 1, pk_seed, addr); } // Implements T_l function for WOTS+ public key compression (Section 4.1, page // 11 and Section 11.2, pages 44-46) void slhdsa_thash_tl(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { slhdsa_thash(output, input, SLHDSA_SHA2_128S_WOTS_LEN, pk_seed, addr); } // Implements H function (Section 4.1, page 11 and Section 11.2, pages 44-46) void slhdsa_thash_h(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[2 * BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { slhdsa_thash(output, input, 2, pk_seed, addr); } // Implements F function (Section 4.1, page 11 and Section 11.2, pages 44-46) void slhdsa_thash_f(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { slhdsa_thash(output, input, 1, pk_seed, addr); } // Implements T_k function for FORS public key compression (Section 4.1, page 11 // and Section 11.2, pages 44-46) void slhdsa_thash_tk( uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[SLHDSA_SHA2_128S_FORS_TREES * BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { slhdsa_thash(output, input, SLHDSA_SHA2_128S_FORS_TREES, pk_seed, addr); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/thash.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_THASH_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_THASH_H #include "./params.h" #if defined(__cplusplus) extern "C" { #endif // Implements PRF_msg: a pseudo-random function that is used to generate the // randomizer r for the randomized hashing of the message to be signed. // (Section 4.1, page 11) void slhdsa_thash_prfmsg(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_prf[BCM_SLHDSA_SHA2_128S_N], const uint8_t opt_rand[BCM_SLHDSA_SHA2_128S_N], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *ctx, size_t ctx_len, const uint8_t *msg, size_t msg_len); // Implements H_msg: a hash function used to generate the digest of the message // to be signed. (Section 4.1, page 11) void slhdsa_thash_hmsg(uint8_t output[SLHDSA_SHA2_128S_DIGEST_SIZE], const uint8_t r[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_root[BCM_SLHDSA_SHA2_128S_N], const uint8_t header[BCM_SLHDSA_M_PRIME_HEADER_LEN], const uint8_t *ctx, size_t ctx_len, const uint8_t *msg, size_t msg_len); // Implements PRF: a pseudo-random function that is used to generate the secret // values in WOTS+ and FORS private keys. (Section 4.1, page 11) void slhdsa_thash_prf(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements T_l: a hash function that maps an l*n-byte message to an n-byte // message. Used for WOTS+ public key compression. (Section 4.1, page 11) void slhdsa_thash_tl(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements H: a hash function that takes a 2*n-byte message as input and // produces an n-byte output. (Section 4.1, page 11) void slhdsa_thash_h(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[2 * BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements F: a hash function that takes an n-byte message as input and // produces an n-byte output. (Section 4.1, page 11) void slhdsa_thash_f(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements T_k: a hash function that maps a k*n-byte message to an n-byte // message. Used for FORS public key compression. (Section 4.1, page 11) void slhdsa_thash_tk( uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[SLHDSA_SHA2_128S_FORS_TREES * BCM_SLHDSA_SHA2_128S_N], const uint8_t pk_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_THASH_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/wots.cc.inc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "../../internal.h" #include "./address.h" #include "./params.h" #include "./thash.h" #include "./wots.h" // Implements Algorithm 5: chain function, page 18 static void chain(uint8_t output[BCM_SLHDSA_SHA2_128S_N], const uint8_t input[BCM_SLHDSA_SHA2_128S_N], uint32_t start, uint32_t steps, const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { assert(start < SLHDSA_SHA2_128S_WOTS_W); assert(steps < SLHDSA_SHA2_128S_WOTS_W); OPENSSL_memcpy(output, input, BCM_SLHDSA_SHA2_128S_N); for (size_t i = start; i < (start + steps) && i < SLHDSA_SHA2_128S_WOTS_W; ++i) { slhdsa_set_hash_addr(addr, i); slhdsa_thash_f(output, output, pub_seed, addr); } } static void slhdsa_wots_do_chain(uint8_t out[BCM_SLHDSA_SHA2_128S_N], uint8_t sk_addr[32], uint8_t addr[32], uint8_t value, const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t chain_index) { uint8_t tmp_sk[BCM_SLHDSA_SHA2_128S_N]; slhdsa_set_chain_addr(sk_addr, chain_index); slhdsa_thash_prf(tmp_sk, pub_seed, sk_seed, sk_addr); slhdsa_set_chain_addr(addr, chain_index); chain(out, tmp_sk, 0, value, pub_seed, addr); } // Implements Algorithm 6: wots_pkGen function, page 18 void slhdsa_wots_pk_gen(uint8_t pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { uint8_t wots_pk_addr[32], sk_addr[32]; OPENSSL_memcpy(wots_pk_addr, addr, sizeof(wots_pk_addr)); OPENSSL_memcpy(sk_addr, addr, sizeof(sk_addr)); slhdsa_set_type(sk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPRF); slhdsa_copy_keypair_addr(sk_addr, addr); uint8_t tmp[SLHDSA_SHA2_128S_WOTS_BYTES]; for (size_t i = 0; i < SLHDSA_SHA2_128S_WOTS_LEN; ++i) { slhdsa_wots_do_chain(tmp + i * BCM_SLHDSA_SHA2_128S_N, sk_addr, addr, SLHDSA_SHA2_128S_WOTS_W - 1, sk_seed, pub_seed, i); } // Compress pk slhdsa_set_type(wots_pk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPK); slhdsa_copy_keypair_addr(wots_pk_addr, addr); slhdsa_thash_tl(pk, tmp, pub_seed, wots_pk_addr); } // Implements Algorithm 7: wots_sign function, page 20 void slhdsa_wots_sign(uint8_t sig[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { // Compute checksum static_assert(SLHDSA_SHA2_128S_WOTS_LEN1 == BCM_SLHDSA_SHA2_128S_N * 2, ""); uint16_t csum = 0; for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; ++i) { csum += SLHDSA_SHA2_128S_WOTS_W - 1 - (msg[i] >> 4); csum += SLHDSA_SHA2_128S_WOTS_W - 1 - (msg[i] & 15); } // Compute chains uint8_t sk_addr[32]; OPENSSL_memcpy(sk_addr, addr, sizeof(sk_addr)); slhdsa_set_type(sk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPRF); slhdsa_copy_keypair_addr(sk_addr, addr); uint32_t chain_index = 0; for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; ++i) { slhdsa_wots_do_chain(sig, sk_addr, addr, msg[i] >> 4, sk_seed, pub_seed, chain_index++); sig += BCM_SLHDSA_SHA2_128S_N; slhdsa_wots_do_chain(sig, sk_addr, addr, msg[i] & 15, sk_seed, pub_seed, chain_index++); sig += BCM_SLHDSA_SHA2_128S_N; } // Include the SLHDSA_SHA2_128S_WOTS_LEN2 checksum values. slhdsa_wots_do_chain(sig, sk_addr, addr, (csum >> 8) & 15, sk_seed, pub_seed, chain_index++); sig += BCM_SLHDSA_SHA2_128S_N; slhdsa_wots_do_chain(sig, sk_addr, addr, (csum >> 4) & 15, sk_seed, pub_seed, chain_index++); sig += BCM_SLHDSA_SHA2_128S_N; slhdsa_wots_do_chain(sig, sk_addr, addr, csum & 15, sk_seed, pub_seed, chain_index++); } static void slhdsa_wots_pk_from_sig_do_chain( uint8_t out[SLHDSA_SHA2_128S_WOTS_BYTES], uint8_t addr[32], const uint8_t in[SLHDSA_SHA2_128S_WOTS_BYTES], uint8_t value, const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint32_t chain_index) { slhdsa_set_chain_addr(addr, chain_index); chain(out + chain_index * BCM_SLHDSA_SHA2_128S_N, in + chain_index * BCM_SLHDSA_SHA2_128S_N, value, SLHDSA_SHA2_128S_WOTS_W - 1 - value, pub_seed, addr); } // Implements Algorithm 8: wots_pkFromSig function, page 21 void slhdsa_wots_pk_from_sig(uint8_t pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sig[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]) { // Compute checksum static_assert(SLHDSA_SHA2_128S_WOTS_LEN1 == BCM_SLHDSA_SHA2_128S_N * 2, ""); uint16_t csum = 0; for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; ++i) { csum += SLHDSA_SHA2_128S_WOTS_W - 1 - (msg[i] >> 4); csum += SLHDSA_SHA2_128S_WOTS_W - 1 - (msg[i] & 15); } uint8_t tmp[SLHDSA_SHA2_128S_WOTS_BYTES]; uint8_t wots_pk_addr[32]; OPENSSL_memcpy(wots_pk_addr, addr, sizeof(wots_pk_addr)); uint32_t chain_index = 0; static_assert(SLHDSA_SHA2_128S_WOTS_LEN1 == BCM_SLHDSA_SHA2_128S_N * 2, ""); for (size_t i = 0; i < BCM_SLHDSA_SHA2_128S_N; ++i) { slhdsa_wots_pk_from_sig_do_chain(tmp, addr, sig, msg[i] >> 4, pub_seed, chain_index++); slhdsa_wots_pk_from_sig_do_chain(tmp, addr, sig, msg[i] & 15, pub_seed, chain_index++); } slhdsa_wots_pk_from_sig_do_chain(tmp, addr, sig, csum >> 8, pub_seed, chain_index++); slhdsa_wots_pk_from_sig_do_chain(tmp, addr, sig, (csum >> 4) & 15, pub_seed, chain_index++); slhdsa_wots_pk_from_sig_do_chain(tmp, addr, sig, csum & 15, pub_seed, chain_index++); // Compress pk slhdsa_set_type(wots_pk_addr, SLHDSA_SHA2_128S_ADDR_TYPE_WOTSPK); slhdsa_copy_keypair_addr(wots_pk_addr, addr); slhdsa_thash_tl(pk, tmp, pub_seed, wots_pk_addr); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/slhdsa/wots.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_WOTS_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_WOTS_H #include "./params.h" #if defined(__cplusplus) extern "C" { #endif // Implements Algorithm 6: wots_pkGen function, page 18 void slhdsa_wots_pk_gen(uint8_t pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 7: wots_sign function, page 20 void slhdsa_wots_sign(uint8_t sig[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t sk_seed[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); // Implements Algorithm 8: wots_pkFromSig function, page 21 void slhdsa_wots_pk_from_sig(uint8_t pk[BCM_SLHDSA_SHA2_128S_N], const uint8_t sig[SLHDSA_SHA2_128S_WOTS_BYTES], const uint8_t msg[BCM_SLHDSA_SHA2_128S_N], const uint8_t pub_seed[BCM_SLHDSA_SHA2_128S_N], uint8_t addr[32]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_SLHDSA_WOTS_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/tls/internal.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_FIPSMODULE_TLS_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_FIPSMODULE_TLS_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif // tls1_prf calculates |out_len| bytes of the TLS PDF, using |digest|, and // writes them to |out|. It returns one on success and zero on error. OPENSSL_EXPORT int CRYPTO_tls1_prf(const EVP_MD *digest, uint8_t *out, size_t out_len, const uint8_t *secret, size_t secret_len, const char *label, size_t label_len, const uint8_t *seed1, size_t seed1_len, const uint8_t *seed2, size_t seed2_len); // CRYPTO_tls13_hkdf_expand_label computes the TLS 1.3 KDF function of the same // name. See https://www.rfc-editor.org/rfc/rfc8446#section-7.1. OPENSSL_EXPORT int CRYPTO_tls13_hkdf_expand_label( uint8_t *out, size_t out_len, const EVP_MD *digest, // const uint8_t *secret, size_t secret_len, // const uint8_t *label, size_t label_len, // const uint8_t *hash, size_t hash_len); #if defined(__cplusplus) } #endif #endif // OPENSSL_HEADER_CRYPTO_FIPSMODULE_TLS_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/fipsmodule/tls/kdf.cc.inc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "internal.h" #include "../../internal.h" #include "../service_indicator/internal.h" // tls1_P_hash computes the TLS P_ function as described in RFC 5246, // section 5. It XORs |out_len| bytes to |out|, using |md| as the hash and // |secret| as the secret. |label|, |seed1|, and |seed2| are concatenated to // form the seed parameter. It returns true on success and false on failure. static int tls1_P_hash(uint8_t *out, size_t out_len, const EVP_MD *md, const uint8_t *secret, size_t secret_len, const char *label, size_t label_len, const uint8_t *seed1, size_t seed1_len, const uint8_t *seed2, size_t seed2_len) { HMAC_CTX ctx, ctx_tmp, ctx_init; uint8_t A1[EVP_MAX_MD_SIZE]; unsigned A1_len; int ret = 0; const size_t chunk = EVP_MD_size(md); HMAC_CTX_init(&ctx); HMAC_CTX_init(&ctx_tmp); HMAC_CTX_init(&ctx_init); if (!HMAC_Init_ex(&ctx_init, secret, secret_len, md, NULL) || !HMAC_CTX_copy_ex(&ctx, &ctx_init) || !HMAC_Update(&ctx, (const uint8_t *) label, label_len) || !HMAC_Update(&ctx, seed1, seed1_len) || !HMAC_Update(&ctx, seed2, seed2_len) || !HMAC_Final(&ctx, A1, &A1_len)) { goto err; } for (;;) { unsigned len_u; uint8_t hmac[EVP_MAX_MD_SIZE]; if (!HMAC_CTX_copy_ex(&ctx, &ctx_init) || !HMAC_Update(&ctx, A1, A1_len) || // Save a copy of |ctx| to compute the next A1 value below. (out_len > chunk && !HMAC_CTX_copy_ex(&ctx_tmp, &ctx)) || !HMAC_Update(&ctx, (const uint8_t *) label, label_len) || !HMAC_Update(&ctx, seed1, seed1_len) || !HMAC_Update(&ctx, seed2, seed2_len) || !HMAC_Final(&ctx, hmac, &len_u)) { goto err; } size_t len = len_u; assert(len == chunk); // XOR the result into |out|. if (len > out_len) { len = out_len; } for (size_t i = 0; i < len; i++) { out[i] ^= hmac[i]; } out += len; out_len -= len; if (out_len == 0) { break; } // Calculate the next A1 value. if (!HMAC_Final(&ctx_tmp, A1, &A1_len)) { goto err; } } ret = 1; err: OPENSSL_cleanse(A1, sizeof(A1)); HMAC_CTX_cleanup(&ctx); HMAC_CTX_cleanup(&ctx_tmp); HMAC_CTX_cleanup(&ctx_init); return ret; } int CRYPTO_tls1_prf(const EVP_MD *digest, uint8_t *out, size_t out_len, const uint8_t *secret, size_t secret_len, const char *label, size_t label_len, const uint8_t *seed1, size_t seed1_len, const uint8_t *seed2, size_t seed2_len) { if (out_len == 0) { return 1; } OPENSSL_memset(out, 0, out_len); const EVP_MD *const original_digest = digest; FIPS_service_indicator_lock_state(); int ret = 0; if (digest == EVP_md5_sha1()) { // If using the MD5/SHA1 PRF, |secret| is partitioned between MD5 and SHA-1. size_t secret_half = secret_len - (secret_len / 2); if (!tls1_P_hash(out, out_len, EVP_md5(), secret, secret_half, label, label_len, seed1, seed1_len, seed2, seed2_len)) { goto end; } // Note that, if |secret_len| is odd, the two halves share a byte. secret += secret_len - secret_half; secret_len = secret_half; digest = EVP_sha1(); } ret = tls1_P_hash(out, out_len, digest, secret, secret_len, label, label_len, seed1, seed1_len, seed2, seed2_len); end: FIPS_service_indicator_unlock_state(); if (ret) { TLSKDF_verify_service_indicator(original_digest); } return ret; } int CRYPTO_tls13_hkdf_expand_label(uint8_t *out, size_t out_len, const EVP_MD *digest, // const uint8_t *secret, size_t secret_len, const uint8_t *label, size_t label_len, const uint8_t *hash, size_t hash_len) { static const uint8_t kProtocolLabel[] = "tls13 "; CBB cbb, child; uint8_t *hkdf_label = NULL; size_t hkdf_label_len; FIPS_service_indicator_lock_state(); CBB_zero(&cbb); if (!CBB_init(&cbb, 2 + 1 + sizeof(kProtocolLabel) - 1 + label_len + 1 + hash_len) || !CBB_add_u16(&cbb, out_len) || !CBB_add_u8_length_prefixed(&cbb, &child) || !CBB_add_bytes(&child, kProtocolLabel, sizeof(kProtocolLabel) - 1) || !CBB_add_bytes(&child, label, label_len) || !CBB_add_u8_length_prefixed(&cbb, &child) || !CBB_add_bytes(&child, hash, hash_len) || !CBB_finish(&cbb, &hkdf_label, &hkdf_label_len)) { CBB_cleanup(&cbb); FIPS_service_indicator_unlock_state(); return 0; } const int ret = HKDF_expand(out, out_len, digest, secret, secret_len, hkdf_label, hkdf_label_len); OPENSSL_free(hkdf_label); FIPS_service_indicator_unlock_state(); if (ret) { TLSKDF_verify_service_indicator(digest); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/hpke/hpke.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../fipsmodule/ec/internal.h" #include "../internal.h" // This file implements RFC 9180. #define MAX_SEED_LEN X25519_PRIVATE_KEY_LEN #define MAX_SHARED_SECRET_LEN SHA256_DIGEST_LENGTH struct evp_hpke_kem_st { uint16_t id; size_t public_key_len; size_t private_key_len; size_t seed_len; size_t enc_len; int (*init_key)(EVP_HPKE_KEY *key, const uint8_t *priv_key, size_t priv_key_len); int (*generate_key)(EVP_HPKE_KEY *key); int (*encap_with_seed)(const EVP_HPKE_KEM *kem, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len); int (*decap)(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len); int (*auth_encap_with_seed)(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len); int (*auth_decap)(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len, const uint8_t *peer_public_key, size_t peer_public_key_len); }; struct evp_hpke_kdf_st { uint16_t id; // We only support HKDF-based KDFs. const EVP_MD *(*hkdf_md_func)(void); }; struct evp_hpke_aead_st { uint16_t id; const EVP_AEAD *(*aead_func)(void); }; // Low-level labeled KDF functions. static const char kHpkeVersionId[] = "HPKE-v1"; static int add_label_string(CBB *cbb, const char *label) { return CBB_add_bytes(cbb, (const uint8_t *)label, strlen(label)); } static int hpke_labeled_extract(const EVP_MD *hkdf_md, uint8_t *out_key, size_t *out_len, const uint8_t *salt, size_t salt_len, const uint8_t *suite_id, size_t suite_id_len, const char *label, const uint8_t *ikm, size_t ikm_len) { // labeledIKM = concat("HPKE-v1", suite_id, label, IKM) CBB labeled_ikm; int ok = CBB_init(&labeled_ikm, 0) && add_label_string(&labeled_ikm, kHpkeVersionId) && CBB_add_bytes(&labeled_ikm, suite_id, suite_id_len) && add_label_string(&labeled_ikm, label) && CBB_add_bytes(&labeled_ikm, ikm, ikm_len) && HKDF_extract(out_key, out_len, hkdf_md, CBB_data(&labeled_ikm), CBB_len(&labeled_ikm), salt, salt_len); CBB_cleanup(&labeled_ikm); return ok; } static int hpke_labeled_expand(const EVP_MD *hkdf_md, uint8_t *out_key, size_t out_len, const uint8_t *prk, size_t prk_len, const uint8_t *suite_id, size_t suite_id_len, const char *label, const uint8_t *info, size_t info_len) { // labeledInfo = concat(I2OSP(L, 2), "HPKE-v1", suite_id, label, info) CBB labeled_info; int ok = CBB_init(&labeled_info, 0) && // CBB_add_u16(&labeled_info, out_len) && add_label_string(&labeled_info, kHpkeVersionId) && CBB_add_bytes(&labeled_info, suite_id, suite_id_len) && add_label_string(&labeled_info, label) && CBB_add_bytes(&labeled_info, info, info_len) && HKDF_expand(out_key, out_len, hkdf_md, prk, prk_len, CBB_data(&labeled_info), CBB_len(&labeled_info)); CBB_cleanup(&labeled_info); return ok; } // KEM implementations. // dhkem_extract_and_expand implements the ExtractAndExpand operation in the // DHKEM construction. See section 4.1 of RFC 9180. static int dhkem_extract_and_expand(uint16_t kem_id, const EVP_MD *hkdf_md, uint8_t *out_key, size_t out_len, const uint8_t *dh, size_t dh_len, const uint8_t *kem_context, size_t kem_context_len) { // concat("KEM", I2OSP(kem_id, 2)) uint8_t suite_id[5] = {'K', 'E', 'M', static_cast(kem_id >> 8), static_cast(kem_id & 0xff)}; uint8_t prk[EVP_MAX_MD_SIZE]; size_t prk_len; return hpke_labeled_extract(hkdf_md, prk, &prk_len, NULL, 0, suite_id, sizeof(suite_id), "eae_prk", dh, dh_len) && hpke_labeled_expand(hkdf_md, out_key, out_len, prk, prk_len, suite_id, sizeof(suite_id), "shared_secret", kem_context, kem_context_len); } static int x25519_init_key(EVP_HPKE_KEY *key, const uint8_t *priv_key, size_t priv_key_len) { if (priv_key_len != X25519_PRIVATE_KEY_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } OPENSSL_memcpy(key->private_key, priv_key, priv_key_len); X25519_public_from_private(key->public_key, priv_key); return 1; } static int x25519_generate_key(EVP_HPKE_KEY *key) { X25519_keypair(key->public_key, key->private_key); return 1; } static int x25519_encap_with_seed( const EVP_HPKE_KEM *kem, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len) { if (max_enc < X25519_PUBLIC_VALUE_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } if (seed_len != X25519_PRIVATE_KEY_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } X25519_public_from_private(out_enc, seed); uint8_t dh[X25519_SHARED_KEY_LEN]; if (peer_public_key_len != X25519_PUBLIC_VALUE_LEN || !X25519(dh, seed, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[2 * X25519_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, out_enc, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, peer_public_key, X25519_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_enc_len = X25519_PUBLIC_VALUE_LEN; *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int x25519_decap(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len) { uint8_t dh[X25519_SHARED_KEY_LEN]; if (enc_len != X25519_PUBLIC_VALUE_LEN || !X25519(dh, key->private_key, enc)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[2 * X25519_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, enc, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, key->public_key, X25519_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int x25519_auth_encap_with_seed( const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len) { if (max_enc < X25519_PUBLIC_VALUE_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } if (seed_len != X25519_PRIVATE_KEY_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } X25519_public_from_private(out_enc, seed); uint8_t dh[2 * X25519_SHARED_KEY_LEN]; if (peer_public_key_len != X25519_PUBLIC_VALUE_LEN || !X25519(dh, seed, peer_public_key) || !X25519(dh + X25519_SHARED_KEY_LEN, key->private_key, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[3 * X25519_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, out_enc, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, peer_public_key, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + 2 * X25519_PUBLIC_VALUE_LEN, key->public_key, X25519_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_enc_len = X25519_PUBLIC_VALUE_LEN; *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int x25519_auth_decap(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len, const uint8_t *peer_public_key, size_t peer_public_key_len) { uint8_t dh[2 * X25519_SHARED_KEY_LEN]; if (enc_len != X25519_PUBLIC_VALUE_LEN || peer_public_key_len != X25519_PUBLIC_VALUE_LEN || !X25519(dh, key->private_key, enc) || !X25519(dh + X25519_SHARED_KEY_LEN, key->private_key, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[3 * X25519_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, enc, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, key->public_key, X25519_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + 2 * X25519_PUBLIC_VALUE_LEN, peer_public_key, X25519_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } const EVP_HPKE_KEM *EVP_hpke_x25519_hkdf_sha256(void) { static const EVP_HPKE_KEM kKEM = { /*id=*/EVP_HPKE_DHKEM_X25519_HKDF_SHA256, /*public_key_len=*/X25519_PUBLIC_VALUE_LEN, /*private_key_len=*/X25519_PRIVATE_KEY_LEN, /*seed_len=*/X25519_PRIVATE_KEY_LEN, /*enc_len=*/X25519_PUBLIC_VALUE_LEN, x25519_init_key, x25519_generate_key, x25519_encap_with_seed, x25519_decap, x25519_auth_encap_with_seed, x25519_auth_decap, }; return &kKEM; } #define P256_PRIVATE_KEY_LEN 32 #define P256_PUBLIC_KEY_LEN 65 #define P256_PUBLIC_VALUE_LEN 65 #define P256_SEED_LEN 32 #define P256_SHARED_KEY_LEN 32 static int p256_public_from_private(uint8_t out_pub[P256_PUBLIC_VALUE_LEN], const uint8_t priv[P256_PRIVATE_KEY_LEN]) { const EC_GROUP *const group = EC_group_p256(); const uint8_t kAllZeros[P256_PRIVATE_KEY_LEN] = {0}; EC_SCALAR private_scalar; EC_JACOBIAN public_point; EC_AFFINE public_point_affine; if (CRYPTO_memcmp(kAllZeros, priv, sizeof(kAllZeros)) == 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } if (!ec_scalar_from_bytes(group, &private_scalar, priv, P256_PRIVATE_KEY_LEN) || !ec_point_mul_scalar_base(group, &public_point, &private_scalar) || !ec_jacobian_to_affine(group, &public_point_affine, &public_point)) { return 0; } size_t out_len_x, out_len_y; out_pub[0] = POINT_CONVERSION_UNCOMPRESSED; ec_felem_to_bytes(group, &out_pub[1], &out_len_x, &public_point_affine.X); ec_felem_to_bytes(group, &out_pub[33], &out_len_y, &public_point_affine.Y); return 1; } static int p256_init_key(EVP_HPKE_KEY *key, const uint8_t *priv_key, size_t priv_key_len) { if (priv_key_len != P256_PRIVATE_KEY_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } if (!p256_public_from_private(key->public_key, priv_key)) { return 0; } OPENSSL_memcpy(key->private_key, priv_key, priv_key_len); return 1; } static int p256_private_key_from_seed(uint8_t out_priv[P256_PRIVATE_KEY_LEN], const uint8_t seed[P256_SEED_LEN]) { // https://www.rfc-editor.org/rfc/rfc9180.html#name-derivekeypair const uint8_t suite_id[5] = {'K', 'E', 'M', EVP_HPKE_DHKEM_P256_HKDF_SHA256 >> 8, EVP_HPKE_DHKEM_P256_HKDF_SHA256 & 0xff}; uint8_t dkp_prk[32]; size_t dkp_prk_len; if (!hpke_labeled_extract(EVP_sha256(), dkp_prk, &dkp_prk_len, NULL, 0, suite_id, sizeof(suite_id), "dkp_prk", seed, P256_SEED_LEN)) { return 0; } assert(dkp_prk_len == sizeof(dkp_prk)); const EC_GROUP *const group = EC_group_p256(); EC_SCALAR private_scalar; for (unsigned counter = 0; counter < 256; counter++) { const uint8_t counter_byte = counter & 0xff; if (!hpke_labeled_expand(EVP_sha256(), out_priv, P256_PRIVATE_KEY_LEN, dkp_prk, sizeof(dkp_prk), suite_id, sizeof(suite_id), "candidate", &counter_byte, sizeof(counter_byte))) { return 0; } // This checks that the scalar is less than the order. if (ec_scalar_from_bytes(group, &private_scalar, out_priv, P256_PRIVATE_KEY_LEN)) { return 1; } } // This happens with probability of 2^-(32*256). OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR); return 0; } static int p256_generate_key(EVP_HPKE_KEY *key) { uint8_t seed[P256_SEED_LEN]; RAND_bytes(seed, sizeof(seed)); if (!p256_private_key_from_seed(key->private_key, seed) || !p256_public_from_private(key->public_key, key->private_key)) { return 0; } return 1; } static int p256(uint8_t out_dh[P256_SHARED_KEY_LEN], const uint8_t my_private[P256_PRIVATE_KEY_LEN], const uint8_t their_public[P256_PUBLIC_VALUE_LEN]) { const EC_GROUP *const group = EC_group_p256(); EC_SCALAR private_scalar; EC_FELEM x, y; EC_JACOBIAN shared_point, their_point; EC_AFFINE their_point_affine, shared_point_affine; if (their_public[0] != POINT_CONVERSION_UNCOMPRESSED || !ec_felem_from_bytes(group, &x, &their_public[1], 32) || !ec_felem_from_bytes(group, &y, &their_public[33], 32) || !ec_point_set_affine_coordinates(group, &their_point_affine, &x, &y) || !ec_scalar_from_bytes(group, &private_scalar, my_private, P256_PRIVATE_KEY_LEN)) { OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR); return 0; } ec_affine_to_jacobian(group, &their_point, &their_point_affine); if (!ec_point_mul_scalar(group, &shared_point, &their_point, &private_scalar) || !ec_jacobian_to_affine(group, &shared_point_affine, &shared_point)) { OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR); return 0; } size_t out_len; ec_felem_to_bytes(group, out_dh, &out_len, &shared_point_affine.X); assert(out_len == P256_SHARED_KEY_LEN); return 1; } static int p256_encap_with_seed(const EVP_HPKE_KEM *kem, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len) { if (max_enc < P256_PUBLIC_VALUE_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } if (seed_len != P256_SEED_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } uint8_t private_key[P256_PRIVATE_KEY_LEN]; if (!p256_private_key_from_seed(private_key, seed)) { return 0; } p256_public_from_private(out_enc, private_key); uint8_t dh[P256_SHARED_KEY_LEN]; if (peer_public_key_len != P256_PUBLIC_VALUE_LEN || !p256(dh, private_key, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[2 * P256_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, out_enc, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + P256_PUBLIC_VALUE_LEN, peer_public_key, P256_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_enc_len = P256_PUBLIC_VALUE_LEN; *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int p256_decap(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len) { uint8_t dh[P256_SHARED_KEY_LEN]; if (enc_len != P256_PUBLIC_VALUE_LEN || // !p256(dh, key->private_key, enc)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[2 * P256_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, enc, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + P256_PUBLIC_VALUE_LEN, key->public_key, P256_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int p256_auth_encap_with_seed( const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *seed, size_t seed_len) { if (max_enc < P256_PUBLIC_VALUE_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } if (seed_len != P256_SEED_LEN) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return 0; } uint8_t private_key[P256_PRIVATE_KEY_LEN]; if (!p256_private_key_from_seed(private_key, seed)) { return 0; } p256_public_from_private(out_enc, private_key); uint8_t dh[2 * P256_SHARED_KEY_LEN]; if (peer_public_key_len != P256_PUBLIC_VALUE_LEN || !p256(dh, private_key, peer_public_key) || !p256(dh + P256_SHARED_KEY_LEN, key->private_key, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[3 * P256_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, out_enc, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + P256_PUBLIC_VALUE_LEN, peer_public_key, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + 2 * P256_PUBLIC_VALUE_LEN, key->public_key, P256_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_enc_len = P256_PUBLIC_VALUE_LEN; *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } static int p256_auth_decap(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret, size_t *out_shared_secret_len, const uint8_t *enc, size_t enc_len, const uint8_t *peer_public_key, size_t peer_public_key_len) { uint8_t dh[2 * P256_SHARED_KEY_LEN]; if (enc_len != P256_PUBLIC_VALUE_LEN || peer_public_key_len != P256_PUBLIC_VALUE_LEN || !p256(dh, key->private_key, enc) || !p256(dh + P256_SHARED_KEY_LEN, key->private_key, peer_public_key)) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY); return 0; } uint8_t kem_context[3 * P256_PUBLIC_VALUE_LEN]; OPENSSL_memcpy(kem_context, enc, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + P256_PUBLIC_VALUE_LEN, key->public_key, P256_PUBLIC_VALUE_LEN); OPENSSL_memcpy(kem_context + 2 * P256_PUBLIC_VALUE_LEN, peer_public_key, P256_PUBLIC_VALUE_LEN); if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret, SHA256_DIGEST_LENGTH, dh, sizeof(dh), kem_context, sizeof(kem_context))) { return 0; } *out_shared_secret_len = SHA256_DIGEST_LENGTH; return 1; } const EVP_HPKE_KEM *EVP_hpke_p256_hkdf_sha256(void) { static const EVP_HPKE_KEM kKEM = { /*id=*/EVP_HPKE_DHKEM_P256_HKDF_SHA256, /*public_key_len=*/P256_PUBLIC_KEY_LEN, /*private_key_len=*/P256_PRIVATE_KEY_LEN, /*seed_len=*/P256_SEED_LEN, /*enc_len=*/P256_PUBLIC_VALUE_LEN, p256_init_key, p256_generate_key, p256_encap_with_seed, p256_decap, p256_auth_encap_with_seed, p256_auth_decap, }; return &kKEM; } uint16_t EVP_HPKE_KEM_id(const EVP_HPKE_KEM *kem) { return kem->id; } size_t EVP_HPKE_KEM_public_key_len(const EVP_HPKE_KEM *kem) { return kem->public_key_len; } size_t EVP_HPKE_KEM_private_key_len(const EVP_HPKE_KEM *kem) { return kem->private_key_len; } size_t EVP_HPKE_KEM_enc_len(const EVP_HPKE_KEM *kem) { return kem->enc_len; } void EVP_HPKE_KEY_zero(EVP_HPKE_KEY *key) { OPENSSL_memset(key, 0, sizeof(EVP_HPKE_KEY)); } void EVP_HPKE_KEY_cleanup(EVP_HPKE_KEY *key) { // Nothing to clean up for now, but we may introduce a cleanup process in the // future. } EVP_HPKE_KEY *EVP_HPKE_KEY_new(void) { EVP_HPKE_KEY *key = reinterpret_cast(OPENSSL_malloc(sizeof(EVP_HPKE_KEY))); if (key == NULL) { return NULL; } EVP_HPKE_KEY_zero(key); return key; } void EVP_HPKE_KEY_free(EVP_HPKE_KEY *key) { if (key != NULL) { EVP_HPKE_KEY_cleanup(key); OPENSSL_free(key); } } int EVP_HPKE_KEY_copy(EVP_HPKE_KEY *dst, const EVP_HPKE_KEY *src) { // For now, |EVP_HPKE_KEY| is trivially copyable. OPENSSL_memcpy(dst, src, sizeof(EVP_HPKE_KEY)); return 1; } void EVP_HPKE_KEY_move(EVP_HPKE_KEY *out, EVP_HPKE_KEY *in) { EVP_HPKE_KEY_cleanup(out); // For now, |EVP_HPKE_KEY| is trivially movable. // Note that Rust may move this structure. See // bssl-crypto/src/scoped.rs:EvpHpkeKey. OPENSSL_memcpy(out, in, sizeof(EVP_HPKE_KEY)); EVP_HPKE_KEY_zero(in); } int EVP_HPKE_KEY_init(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem, const uint8_t *priv_key, size_t priv_key_len) { EVP_HPKE_KEY_zero(key); key->kem = kem; if (!kem->init_key(key, priv_key, priv_key_len)) { key->kem = NULL; return 0; } return 1; } int EVP_HPKE_KEY_generate(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem) { EVP_HPKE_KEY_zero(key); key->kem = kem; if (!kem->generate_key(key)) { key->kem = NULL; return 0; } return 1; } const EVP_HPKE_KEM *EVP_HPKE_KEY_kem(const EVP_HPKE_KEY *key) { return key->kem; } int EVP_HPKE_KEY_public_key(const EVP_HPKE_KEY *key, uint8_t *out, size_t *out_len, size_t max_out) { if (max_out < key->kem->public_key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } OPENSSL_memcpy(out, key->public_key, key->kem->public_key_len); *out_len = key->kem->public_key_len; return 1; } int EVP_HPKE_KEY_private_key(const EVP_HPKE_KEY *key, uint8_t *out, size_t *out_len, size_t max_out) { if (max_out < key->kem->private_key_len) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE); return 0; } OPENSSL_memcpy(out, key->private_key, key->kem->private_key_len); *out_len = key->kem->private_key_len; return 1; } // Supported KDFs and AEADs. const EVP_HPKE_KDF *EVP_hpke_hkdf_sha256(void) { static const EVP_HPKE_KDF kKDF = {EVP_HPKE_HKDF_SHA256, &EVP_sha256}; return &kKDF; } uint16_t EVP_HPKE_KDF_id(const EVP_HPKE_KDF *kdf) { return kdf->id; } const EVP_MD *EVP_HPKE_KDF_hkdf_md(const EVP_HPKE_KDF *kdf) { return kdf->hkdf_md_func(); } const EVP_HPKE_AEAD *EVP_hpke_aes_128_gcm(void) { static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_AES_128_GCM, &EVP_aead_aes_128_gcm}; return &kAEAD; } const EVP_HPKE_AEAD *EVP_hpke_aes_256_gcm(void) { static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_AES_256_GCM, &EVP_aead_aes_256_gcm}; return &kAEAD; } const EVP_HPKE_AEAD *EVP_hpke_chacha20_poly1305(void) { static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_CHACHA20_POLY1305, &EVP_aead_chacha20_poly1305}; return &kAEAD; } uint16_t EVP_HPKE_AEAD_id(const EVP_HPKE_AEAD *aead) { return aead->id; } const EVP_AEAD *EVP_HPKE_AEAD_aead(const EVP_HPKE_AEAD *aead) { return aead->aead_func(); } // HPKE implementation. // This is strlen("HPKE") + 3 * sizeof(uint16_t). #define HPKE_SUITE_ID_LEN 10 // The suite_id for non-KEM pieces of HPKE is defined as concat("HPKE", // I2OSP(kem_id, 2), I2OSP(kdf_id, 2), I2OSP(aead_id, 2)). static int hpke_build_suite_id(const EVP_HPKE_CTX *ctx, uint8_t out[HPKE_SUITE_ID_LEN]) { CBB cbb; CBB_init_fixed(&cbb, out, HPKE_SUITE_ID_LEN); return add_label_string(&cbb, "HPKE") && // CBB_add_u16(&cbb, ctx->kem->id) && // CBB_add_u16(&cbb, ctx->kdf->id) && // CBB_add_u16(&cbb, ctx->aead->id); } #define HPKE_MODE_BASE 0 #define HPKE_MODE_AUTH 2 static int hpke_key_schedule(EVP_HPKE_CTX *ctx, uint8_t mode, const uint8_t *shared_secret, size_t shared_secret_len, const uint8_t *info, size_t info_len) { uint8_t suite_id[HPKE_SUITE_ID_LEN]; if (!hpke_build_suite_id(ctx, suite_id)) { return 0; } // psk_id_hash = LabeledExtract("", "psk_id_hash", psk_id) // TODO(davidben): Precompute this value and store it with the EVP_HPKE_KDF. const EVP_MD *hkdf_md = ctx->kdf->hkdf_md_func(); uint8_t psk_id_hash[EVP_MAX_MD_SIZE]; size_t psk_id_hash_len; if (!hpke_labeled_extract(hkdf_md, psk_id_hash, &psk_id_hash_len, NULL, 0, suite_id, sizeof(suite_id), "psk_id_hash", NULL, 0)) { return 0; } // info_hash = LabeledExtract("", "info_hash", info) uint8_t info_hash[EVP_MAX_MD_SIZE]; size_t info_hash_len; if (!hpke_labeled_extract(hkdf_md, info_hash, &info_hash_len, NULL, 0, suite_id, sizeof(suite_id), "info_hash", info, info_len)) { return 0; } // key_schedule_context = concat(mode, psk_id_hash, info_hash) uint8_t context[sizeof(uint8_t) + 2 * EVP_MAX_MD_SIZE]; size_t context_len; CBB context_cbb; CBB_init_fixed(&context_cbb, context, sizeof(context)); if (!CBB_add_u8(&context_cbb, mode) || !CBB_add_bytes(&context_cbb, psk_id_hash, psk_id_hash_len) || !CBB_add_bytes(&context_cbb, info_hash, info_hash_len) || !CBB_finish(&context_cbb, NULL, &context_len)) { return 0; } // secret = LabeledExtract(shared_secret, "secret", psk) uint8_t secret[EVP_MAX_MD_SIZE]; size_t secret_len; if (!hpke_labeled_extract(hkdf_md, secret, &secret_len, shared_secret, shared_secret_len, suite_id, sizeof(suite_id), "secret", NULL, 0)) { return 0; } // key = LabeledExpand(secret, "key", key_schedule_context, Nk) const EVP_AEAD *aead = EVP_HPKE_AEAD_aead(ctx->aead); uint8_t key[EVP_AEAD_MAX_KEY_LENGTH]; const size_t kKeyLen = EVP_AEAD_key_length(aead); if (!hpke_labeled_expand(hkdf_md, key, kKeyLen, secret, secret_len, suite_id, sizeof(suite_id), "key", context, context_len) || !EVP_AEAD_CTX_init(&ctx->aead_ctx, aead, key, kKeyLen, EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) { return 0; } // base_nonce = LabeledExpand(secret, "base_nonce", key_schedule_context, Nn) if (!hpke_labeled_expand(hkdf_md, ctx->base_nonce, EVP_AEAD_nonce_length(aead), secret, secret_len, suite_id, sizeof(suite_id), "base_nonce", context, context_len)) { return 0; } // exporter_secret = LabeledExpand(secret, "exp", key_schedule_context, Nh) if (!hpke_labeled_expand(hkdf_md, ctx->exporter_secret, EVP_MD_size(hkdf_md), secret, secret_len, suite_id, sizeof(suite_id), "exp", context, context_len)) { return 0; } return 1; } void EVP_HPKE_CTX_zero(EVP_HPKE_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_HPKE_CTX)); EVP_AEAD_CTX_zero(&ctx->aead_ctx); } void EVP_HPKE_CTX_cleanup(EVP_HPKE_CTX *ctx) { EVP_AEAD_CTX_cleanup(&ctx->aead_ctx); } EVP_HPKE_CTX *EVP_HPKE_CTX_new(void) { EVP_HPKE_CTX *ctx = reinterpret_cast(OPENSSL_malloc(sizeof(EVP_HPKE_CTX))); if (ctx == NULL) { return NULL; } EVP_HPKE_CTX_zero(ctx); return ctx; } void EVP_HPKE_CTX_free(EVP_HPKE_CTX *ctx) { if (ctx != NULL) { EVP_HPKE_CTX_cleanup(ctx); OPENSSL_free(ctx); } } int EVP_HPKE_CTX_setup_sender(EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len) { uint8_t seed[MAX_SEED_LEN]; RAND_bytes(seed, kem->seed_len); return EVP_HPKE_CTX_setup_sender_with_seed_for_testing( ctx, out_enc, out_enc_len, max_enc, kem, kdf, aead, peer_public_key, peer_public_key_len, info, info_len, seed, kem->seed_len); } int EVP_HPKE_CTX_setup_sender_with_seed_for_testing( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len, const uint8_t *seed, size_t seed_len) { EVP_HPKE_CTX_zero(ctx); ctx->is_sender = 1; ctx->kem = kem; ctx->kdf = kdf; ctx->aead = aead; uint8_t shared_secret[MAX_SHARED_SECRET_LEN]; size_t shared_secret_len; if (!kem->encap_with_seed(kem, shared_secret, &shared_secret_len, out_enc, out_enc_len, max_enc, peer_public_key, peer_public_key_len, seed, seed_len) || !hpke_key_schedule(ctx, HPKE_MODE_BASE, shared_secret, shared_secret_len, info, info_len)) { EVP_HPKE_CTX_cleanup(ctx); return 0; } return 1; } int EVP_HPKE_CTX_setup_recipient(EVP_HPKE_CTX *ctx, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *enc, size_t enc_len, const uint8_t *info, size_t info_len) { EVP_HPKE_CTX_zero(ctx); ctx->is_sender = 0; ctx->kem = key->kem; ctx->kdf = kdf; ctx->aead = aead; uint8_t shared_secret[MAX_SHARED_SECRET_LEN]; size_t shared_secret_len; if (!key->kem->decap(key, shared_secret, &shared_secret_len, enc, enc_len) || !hpke_key_schedule(ctx, HPKE_MODE_BASE, shared_secret, shared_secret_len, info, info_len)) { EVP_HPKE_CTX_cleanup(ctx); return 0; } return 1; } int EVP_HPKE_CTX_setup_auth_sender( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len) { uint8_t seed[MAX_SEED_LEN]; RAND_bytes(seed, key->kem->seed_len); return EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing( ctx, out_enc, out_enc_len, max_enc, key, kdf, aead, peer_public_key, peer_public_key_len, info, info_len, seed, key->kem->seed_len); } int EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len, const uint8_t *seed, size_t seed_len) { if (key->kem->auth_encap_with_seed == NULL) { // Not all HPKE KEMs support AuthEncap. OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } EVP_HPKE_CTX_zero(ctx); ctx->is_sender = 1; ctx->kem = key->kem; ctx->kdf = kdf; ctx->aead = aead; uint8_t shared_secret[MAX_SHARED_SECRET_LEN]; size_t shared_secret_len; if (!key->kem->auth_encap_with_seed( key, shared_secret, &shared_secret_len, out_enc, out_enc_len, max_enc, peer_public_key, peer_public_key_len, seed, seed_len) || !hpke_key_schedule(ctx, HPKE_MODE_AUTH, shared_secret, shared_secret_len, info, info_len)) { EVP_HPKE_CTX_cleanup(ctx); return 0; } return 1; } int EVP_HPKE_CTX_setup_auth_recipient( EVP_HPKE_CTX *ctx, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *enc, size_t enc_len, const uint8_t *info, size_t info_len, const uint8_t *peer_public_key, size_t peer_public_key_len) { if (key->kem->auth_decap == NULL) { // Not all HPKE KEMs support AuthDecap. OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } EVP_HPKE_CTX_zero(ctx); ctx->is_sender = 0; ctx->kem = key->kem; ctx->kdf = kdf; ctx->aead = aead; uint8_t shared_secret[MAX_SHARED_SECRET_LEN]; size_t shared_secret_len; if (!key->kem->auth_decap(key, shared_secret, &shared_secret_len, enc, enc_len, peer_public_key, peer_public_key_len) || !hpke_key_schedule(ctx, HPKE_MODE_AUTH, shared_secret, shared_secret_len, info, info_len)) { EVP_HPKE_CTX_cleanup(ctx); return 0; } return 1; } static void hpke_nonce(const EVP_HPKE_CTX *ctx, uint8_t *out_nonce, size_t nonce_len) { assert(nonce_len >= 8); // Write padded big-endian bytes of |ctx->seq| to |out_nonce|. OPENSSL_memset(out_nonce, 0, nonce_len); uint64_t seq_copy = ctx->seq; for (size_t i = 0; i < 8; i++) { out_nonce[nonce_len - i - 1] = seq_copy & 0xff; seq_copy >>= 8; } // XOR the encoded sequence with the |ctx->base_nonce|. for (size_t i = 0; i < nonce_len; i++) { out_nonce[i] ^= ctx->base_nonce[i]; } } int EVP_HPKE_CTX_open(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { if (ctx->is_sender) { OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (ctx->seq == UINT64_MAX) { OPENSSL_PUT_ERROR(EVP, ERR_R_OVERFLOW); return 0; } uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; const size_t nonce_len = EVP_AEAD_nonce_length(ctx->aead_ctx.aead); hpke_nonce(ctx, nonce, nonce_len); if (!EVP_AEAD_CTX_open(&ctx->aead_ctx, out, out_len, max_out_len, nonce, nonce_len, in, in_len, ad, ad_len)) { return 0; } ctx->seq++; return 1; } int EVP_HPKE_CTX_seal(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { if (!ctx->is_sender) { OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (ctx->seq == UINT64_MAX) { OPENSSL_PUT_ERROR(EVP, ERR_R_OVERFLOW); return 0; } uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; const size_t nonce_len = EVP_AEAD_nonce_length(ctx->aead_ctx.aead); hpke_nonce(ctx, nonce, nonce_len); if (!EVP_AEAD_CTX_seal(&ctx->aead_ctx, out, out_len, max_out_len, nonce, nonce_len, in, in_len, ad, ad_len)) { return 0; } ctx->seq++; return 1; } int EVP_HPKE_CTX_export(const EVP_HPKE_CTX *ctx, uint8_t *out, size_t secret_len, const uint8_t *context, size_t context_len) { uint8_t suite_id[HPKE_SUITE_ID_LEN]; if (!hpke_build_suite_id(ctx, suite_id)) { return 0; } const EVP_MD *hkdf_md = ctx->kdf->hkdf_md_func(); if (!hpke_labeled_expand(hkdf_md, out, secret_len, ctx->exporter_secret, EVP_MD_size(hkdf_md), suite_id, sizeof(suite_id), "sec", context, context_len)) { return 0; } return 1; } size_t EVP_HPKE_CTX_max_overhead(const EVP_HPKE_CTX *ctx) { assert(ctx->is_sender); return EVP_AEAD_max_overhead(EVP_AEAD_CTX_aead(&ctx->aead_ctx)); } const EVP_HPKE_KEM *EVP_HPKE_CTX_kem(const EVP_HPKE_CTX *ctx) { return ctx->kem; } const EVP_HPKE_AEAD *EVP_HPKE_CTX_aead(const EVP_HPKE_CTX *ctx) { return ctx->aead; } const EVP_HPKE_KDF *EVP_HPKE_CTX_kdf(const EVP_HPKE_CTX *ctx) { return ctx->kdf; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/hrss/asm/poly_rq_mul.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #if defined(__x86_64__) && defined(__linux__) // Copyright (c) 2017, the HRSS authors. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && defined(OPENSSL_LINUX) && defined(OPENSSL_X86_64) // This is the polynomial multiplication function from [HRSS], provided by kind // permission of the authors. // // HRSS: https://eprint.iacr.org/2017/1005 # This file was generated by poly_rq_mul.py .text .align 32 const3: .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 const9: .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 const0: .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 const729: .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 const3_inv: .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 const5_inv: .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 shuf48_16: .byte 10 .byte 11 .byte 12 .byte 13 .byte 14 .byte 15 .byte 0 .byte 1 .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 8 .byte 9 .byte 10 .byte 11 .byte 12 .byte 13 .byte 14 .byte 15 .byte 0 .byte 1 .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 8 .byte 9 shufmin1_mask3: .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 mask32_to_16: .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 mask5_3_5_3: .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 mask3_5_3_5: .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 mask3_5_4_3_1: .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 0 mask_keephigh: .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 mask_mod8192: .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .text .global poly_Rq_mul .hidden poly_Rq_mul .type poly_Rq_mul, @function .att_syntax prefix poly_Rq_mul: .cfi_startproc _CET_ENDBR push %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register rbp push %r12 .cfi_offset r12, -24 # This function originally used a significant amount of stack space. As an # alternative, the needed scratch space is now passed in as the 4th argument. # The amount of scratch space used must thus be kept in sync with # POLY_MUL_RQ_SCRATCH_SPACE in internal.h. # # Setting RSP to point into the given scratch space upsets the ABI tests # therefore all references to RSP are switched to R8. mov %rcx, %r8 addq $6144+12288+512+9408+32, %r8 mov %r8, %rax subq $6144, %r8 mov %r8, %r11 subq $12288, %r8 mov %r8, %r12 subq $512, %r8 vmovdqa const3(%rip), %ymm3 vmovdqu 0(%rsi), %ymm0 vmovdqu 88(%rsi), %ymm1 vmovdqu 176(%rsi), %ymm2 vmovdqu 264(%rsi), %ymm12 vmovdqu 1056(%rsi), %ymm4 vmovdqu 1144(%rsi), %ymm5 vmovdqu 1232(%rsi), %ymm6 vmovdqu 1320(%rsi), %ymm7 vmovdqu 352(%rsi), %ymm8 vmovdqu 440(%rsi), %ymm9 vmovdqu 528(%rsi), %ymm10 vmovdqu 616(%rsi), %ymm11 vmovdqa %ymm0, 0(%rax) vmovdqa %ymm1, 96(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 192(%rax) vmovdqa %ymm2, 288(%rax) vmovdqa %ymm12, 384(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 480(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 576(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 672(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 768(%rax) vmovdqa %ymm4, 5184(%rax) vmovdqa %ymm5, 5280(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5376(%rax) vmovdqa %ymm6, 5472(%rax) vmovdqa %ymm7, 5568(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5664(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5760(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5856(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5952(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 704(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 792(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 880(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 968(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 864(%rax) vmovdqa %ymm9, 960(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1056(%rax) vmovdqa %ymm10, 1152(%rax) vmovdqa %ymm11, 1248(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1344(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1440(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1536(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1632(%rax) vmovdqa %ymm12, 1728(%rax) vmovdqa %ymm13, 1824(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1920(%rax) vmovdqa %ymm14, 2016(%rax) vmovdqa %ymm15, 2112(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2208(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2304(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2400(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2496(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2592(%rax) vmovdqa %ymm9, 2688(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2784(%rax) vmovdqa %ymm10, 2880(%rax) vmovdqa %ymm11, 2976(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3072(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3168(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3264(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3360(%rax) vmovdqa %ymm12, 3456(%rax) vmovdqa %ymm13, 3552(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3648(%rax) vmovdqa %ymm14, 3744(%rax) vmovdqa %ymm15, 3840(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3936(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4032(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4128(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4224(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4320(%rax) vmovdqa %ymm13, 4416(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4512(%rax) vmovdqa %ymm14, 4608(%rax) vmovdqa %ymm15, 4704(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4800(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4896(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4992(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5088(%rax) vmovdqu 32(%rsi), %ymm0 vmovdqu 120(%rsi), %ymm1 vmovdqu 208(%rsi), %ymm2 vmovdqu 296(%rsi), %ymm12 vmovdqu 1088(%rsi), %ymm4 vmovdqu 1176(%rsi), %ymm5 vmovdqu 1264(%rsi), %ymm6 vmovdqu 1352(%rsi), %ymm7 vmovdqu 384(%rsi), %ymm8 vmovdqu 472(%rsi), %ymm9 vmovdqu 560(%rsi), %ymm10 vmovdqu 648(%rsi), %ymm11 vmovdqa %ymm0, 32(%rax) vmovdqa %ymm1, 128(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 224(%rax) vmovdqa %ymm2, 320(%rax) vmovdqa %ymm12, 416(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 512(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 608(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 704(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 800(%rax) vmovdqa %ymm4, 5216(%rax) vmovdqa %ymm5, 5312(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5408(%rax) vmovdqa %ymm6, 5504(%rax) vmovdqa %ymm7, 5600(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5696(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5792(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5888(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5984(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 736(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 824(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 912(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1000(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 896(%rax) vmovdqa %ymm9, 992(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1088(%rax) vmovdqa %ymm10, 1184(%rax) vmovdqa %ymm11, 1280(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1376(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1472(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1568(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1664(%rax) vmovdqa %ymm12, 1760(%rax) vmovdqa %ymm13, 1856(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1952(%rax) vmovdqa %ymm14, 2048(%rax) vmovdqa %ymm15, 2144(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2240(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2336(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2432(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2528(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2624(%rax) vmovdqa %ymm9, 2720(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2816(%rax) vmovdqa %ymm10, 2912(%rax) vmovdqa %ymm11, 3008(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3104(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3200(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3296(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3392(%rax) vmovdqa %ymm12, 3488(%rax) vmovdqa %ymm13, 3584(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3680(%rax) vmovdqa %ymm14, 3776(%rax) vmovdqa %ymm15, 3872(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3968(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4064(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4160(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4256(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4352(%rax) vmovdqa %ymm13, 4448(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4544(%rax) vmovdqa %ymm14, 4640(%rax) vmovdqa %ymm15, 4736(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4832(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4928(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5024(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5120(%rax) vmovdqu 64(%rsi), %ymm0 vmovdqu 152(%rsi), %ymm1 vmovdqu 240(%rsi), %ymm2 vmovdqu 328(%rsi), %ymm12 vmovdqu 1120(%rsi), %ymm4 vmovdqu 1208(%rsi), %ymm5 vmovdqu 1296(%rsi), %ymm6 # Only 18 bytes more can be read, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rsi), %r9 movq %r9, -32(%rsp) movq 1384+8(%rsi), %r9 movq %r9, -24(%rsp) movw 1384+16(%rsi), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm7 vmovdqu 416(%rsi), %ymm8 vmovdqu 504(%rsi), %ymm9 vmovdqu 592(%rsi), %ymm10 vmovdqu 680(%rsi), %ymm11 vmovdqa %ymm0, 64(%rax) vmovdqa %ymm1, 160(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 256(%rax) vmovdqa %ymm2, 352(%rax) vmovdqa %ymm12, 448(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 544(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 640(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 736(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 832(%rax) vmovdqa %ymm4, 5248(%rax) vmovdqa %ymm5, 5344(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5440(%rax) vmovdqa %ymm6, 5536(%rax) vmovdqa %ymm7, 5632(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5728(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5824(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5920(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 6016(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 768(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 856(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 944(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1032(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 928(%rax) vmovdqa %ymm9, 1024(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1120(%rax) vmovdqa %ymm10, 1216(%rax) vmovdqa %ymm11, 1312(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1408(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1504(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1600(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1696(%rax) vmovdqa %ymm12, 1792(%rax) vmovdqa %ymm13, 1888(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1984(%rax) vmovdqa %ymm14, 2080(%rax) vmovdqa %ymm15, 2176(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2272(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2368(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2464(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2560(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2656(%rax) vmovdqa %ymm9, 2752(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2848(%rax) vmovdqa %ymm10, 2944(%rax) vmovdqa %ymm11, 3040(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3136(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3232(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3328(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3424(%rax) vmovdqa %ymm12, 3520(%rax) vmovdqa %ymm13, 3616(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3712(%rax) vmovdqa %ymm14, 3808(%rax) vmovdqa %ymm15, 3904(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4000(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4096(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4192(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4288(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4384(%rax) vmovdqa %ymm13, 4480(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4576(%rax) vmovdqa %ymm14, 4672(%rax) vmovdqa %ymm15, 4768(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4864(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4960(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5056(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5152(%rax) vmovdqu 0(%rdx), %ymm0 vmovdqu 88(%rdx), %ymm1 vmovdqu 176(%rdx), %ymm2 vmovdqu 264(%rdx), %ymm12 vmovdqu 1056(%rdx), %ymm4 vmovdqu 1144(%rdx), %ymm5 vmovdqu 1232(%rdx), %ymm6 vmovdqu 1320(%rdx), %ymm7 vmovdqu 352(%rdx), %ymm8 vmovdqu 440(%rdx), %ymm9 vmovdqu 528(%rdx), %ymm10 vmovdqu 616(%rdx), %ymm11 vmovdqa %ymm0, 0(%r11) vmovdqa %ymm1, 96(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 192(%r11) vmovdqa %ymm2, 288(%r11) vmovdqa %ymm12, 384(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 480(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 576(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 672(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 768(%r11) vmovdqa %ymm4, 5184(%r11) vmovdqa %ymm5, 5280(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5376(%r11) vmovdqa %ymm6, 5472(%r11) vmovdqa %ymm7, 5568(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5664(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5760(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5856(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5952(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 704(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 792(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 880(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 968(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 864(%r11) vmovdqa %ymm9, 960(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1056(%r11) vmovdqa %ymm10, 1152(%r11) vmovdqa %ymm11, 1248(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1344(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1440(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1536(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1632(%r11) vmovdqa %ymm12, 1728(%r11) vmovdqa %ymm13, 1824(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1920(%r11) vmovdqa %ymm14, 2016(%r11) vmovdqa %ymm15, 2112(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2208(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2304(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2400(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2496(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2592(%r11) vmovdqa %ymm9, 2688(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2784(%r11) vmovdqa %ymm10, 2880(%r11) vmovdqa %ymm11, 2976(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3072(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3168(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3264(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3360(%r11) vmovdqa %ymm12, 3456(%r11) vmovdqa %ymm13, 3552(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3648(%r11) vmovdqa %ymm14, 3744(%r11) vmovdqa %ymm15, 3840(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3936(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4032(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4128(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4224(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4320(%r11) vmovdqa %ymm13, 4416(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4512(%r11) vmovdqa %ymm14, 4608(%r11) vmovdqa %ymm15, 4704(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4800(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4896(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4992(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5088(%r11) vmovdqu 32(%rdx), %ymm0 vmovdqu 120(%rdx), %ymm1 vmovdqu 208(%rdx), %ymm2 vmovdqu 296(%rdx), %ymm12 vmovdqu 1088(%rdx), %ymm4 vmovdqu 1176(%rdx), %ymm5 vmovdqu 1264(%rdx), %ymm6 vmovdqu 1352(%rdx), %ymm7 vmovdqu 384(%rdx), %ymm8 vmovdqu 472(%rdx), %ymm9 vmovdqu 560(%rdx), %ymm10 vmovdqu 648(%rdx), %ymm11 vmovdqa %ymm0, 32(%r11) vmovdqa %ymm1, 128(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 224(%r11) vmovdqa %ymm2, 320(%r11) vmovdqa %ymm12, 416(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 512(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 608(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 704(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 800(%r11) vmovdqa %ymm4, 5216(%r11) vmovdqa %ymm5, 5312(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5408(%r11) vmovdqa %ymm6, 5504(%r11) vmovdqa %ymm7, 5600(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5696(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5792(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5888(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5984(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 736(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 824(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 912(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1000(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 896(%r11) vmovdqa %ymm9, 992(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1088(%r11) vmovdqa %ymm10, 1184(%r11) vmovdqa %ymm11, 1280(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1376(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1472(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1568(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1664(%r11) vmovdqa %ymm12, 1760(%r11) vmovdqa %ymm13, 1856(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1952(%r11) vmovdqa %ymm14, 2048(%r11) vmovdqa %ymm15, 2144(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2240(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2336(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2432(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2528(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2624(%r11) vmovdqa %ymm9, 2720(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2816(%r11) vmovdqa %ymm10, 2912(%r11) vmovdqa %ymm11, 3008(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3104(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3200(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3296(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3392(%r11) vmovdqa %ymm12, 3488(%r11) vmovdqa %ymm13, 3584(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3680(%r11) vmovdqa %ymm14, 3776(%r11) vmovdqa %ymm15, 3872(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3968(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4064(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4160(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4256(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4352(%r11) vmovdqa %ymm13, 4448(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4544(%r11) vmovdqa %ymm14, 4640(%r11) vmovdqa %ymm15, 4736(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4832(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4928(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5024(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5120(%r11) vmovdqu 64(%rdx), %ymm0 vmovdqu 152(%rdx), %ymm1 vmovdqu 240(%rdx), %ymm2 vmovdqu 328(%rdx), %ymm12 vmovdqu 1120(%rdx), %ymm4 vmovdqu 1208(%rdx), %ymm5 vmovdqu 1296(%rdx), %ymm6 # Only 18 bytes more can be read, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rdx), %r9 movq %r9, -32(%rsp) movq 1384+8(%rdx), %r9 movq %r9, -24(%rsp) movw 1384+16(%rdx), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm7 vmovdqu 416(%rdx), %ymm8 vmovdqu 504(%rdx), %ymm9 vmovdqu 592(%rdx), %ymm10 vmovdqu 680(%rdx), %ymm11 vmovdqa %ymm0, 64(%r11) vmovdqa %ymm1, 160(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 256(%r11) vmovdqa %ymm2, 352(%r11) vmovdqa %ymm12, 448(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 544(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 640(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 736(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 832(%r11) vmovdqa %ymm4, 5248(%r11) vmovdqa %ymm5, 5344(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5440(%r11) vmovdqa %ymm6, 5536(%r11) vmovdqa %ymm7, 5632(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5728(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5824(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5920(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 6016(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 768(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 856(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 944(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1032(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 928(%r11) vmovdqa %ymm9, 1024(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1120(%r11) vmovdqa %ymm10, 1216(%r11) vmovdqa %ymm11, 1312(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1408(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1504(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1600(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1696(%r11) vmovdqa %ymm12, 1792(%r11) vmovdqa %ymm13, 1888(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1984(%r11) vmovdqa %ymm14, 2080(%r11) vmovdqa %ymm15, 2176(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2272(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2368(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2464(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2560(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2656(%r11) vmovdqa %ymm9, 2752(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2848(%r11) vmovdqa %ymm10, 2944(%r11) vmovdqa %ymm11, 3040(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3136(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3232(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3328(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3424(%r11) vmovdqa %ymm12, 3520(%r11) vmovdqa %ymm13, 3616(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3712(%r11) vmovdqa %ymm14, 3808(%r11) vmovdqa %ymm15, 3904(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4000(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4096(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4192(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4288(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4384(%r11) vmovdqa %ymm13, 4480(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4576(%r11) vmovdqa %ymm14, 4672(%r11) vmovdqa %ymm15, 4768(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4864(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4960(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5056(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5152(%r11) subq $9408, %r8 mov $4, %ecx karatsuba_loop_4eced63f144beffcb0247f9c6f67d165: mov %r8, %r9 mov %r8, %r10 subq $32, %r8 vmovdqa 0(%rax), %ymm0 vmovdqa 192(%rax), %ymm1 vmovdqa 384(%rax), %ymm2 vmovdqa 576(%rax), %ymm3 vpunpcklwd 96(%rax), %ymm0, %ymm4 vpunpckhwd 96(%rax), %ymm0, %ymm5 vpunpcklwd 288(%rax), %ymm1, %ymm6 vpunpckhwd 288(%rax), %ymm1, %ymm7 vpunpcklwd 480(%rax), %ymm2, %ymm8 vpunpckhwd 480(%rax), %ymm2, %ymm9 vpunpcklwd 672(%rax), %ymm3, %ymm10 vpunpckhwd 672(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 768(%rax), %ymm0 vmovdqa 960(%rax), %ymm1 vmovdqa 1152(%rax), %ymm2 vmovdqa 1344(%rax), %ymm3 vpunpcklwd 864(%rax), %ymm0, %ymm12 vpunpckhwd 864(%rax), %ymm0, %ymm13 vpunpcklwd 1056(%rax), %ymm1, %ymm14 vpunpckhwd 1056(%rax), %ymm1, %ymm15 vpunpcklwd 1248(%rax), %ymm2, %ymm0 vpunpckhwd 1248(%rax), %ymm2, %ymm1 vpunpcklwd 1440(%rax), %ymm3, %ymm2 vpunpckhwd 1440(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 0(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 32(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 64(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 96(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 128(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 160(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 192(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 256(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 288(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 320(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 352(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 384(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 416(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 448(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 224(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 480(%r9) vmovdqa 32(%rax), %ymm0 vmovdqa 224(%rax), %ymm1 vmovdqa 416(%rax), %ymm2 vmovdqa 608(%rax), %ymm3 vpunpcklwd 128(%rax), %ymm0, %ymm4 vpunpckhwd 128(%rax), %ymm0, %ymm5 vpunpcklwd 320(%rax), %ymm1, %ymm6 vpunpckhwd 320(%rax), %ymm1, %ymm7 vpunpcklwd 512(%rax), %ymm2, %ymm8 vpunpckhwd 512(%rax), %ymm2, %ymm9 vpunpcklwd 704(%rax), %ymm3, %ymm10 vpunpckhwd 704(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 800(%rax), %ymm0 vmovdqa 992(%rax), %ymm1 vmovdqa 1184(%rax), %ymm2 vmovdqa 1376(%rax), %ymm3 vpunpcklwd 896(%rax), %ymm0, %ymm12 vpunpckhwd 896(%rax), %ymm0, %ymm13 vpunpcklwd 1088(%rax), %ymm1, %ymm14 vpunpckhwd 1088(%rax), %ymm1, %ymm15 vpunpcklwd 1280(%rax), %ymm2, %ymm0 vpunpckhwd 1280(%rax), %ymm2, %ymm1 vpunpcklwd 1472(%rax), %ymm3, %ymm2 vpunpckhwd 1472(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 512(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 544(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 576(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 608(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 640(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 672(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 704(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 768(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 800(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 832(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 864(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 896(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 928(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 960(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 736(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 992(%r9) vmovdqa 64(%rax), %ymm0 vmovdqa 256(%rax), %ymm1 vmovdqa 448(%rax), %ymm2 vmovdqa 640(%rax), %ymm3 vpunpcklwd 160(%rax), %ymm0, %ymm4 vpunpckhwd 160(%rax), %ymm0, %ymm5 vpunpcklwd 352(%rax), %ymm1, %ymm6 vpunpckhwd 352(%rax), %ymm1, %ymm7 vpunpcklwd 544(%rax), %ymm2, %ymm8 vpunpckhwd 544(%rax), %ymm2, %ymm9 vpunpcklwd 736(%rax), %ymm3, %ymm10 vpunpckhwd 736(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 832(%rax), %ymm0 vmovdqa 1024(%rax), %ymm1 vmovdqa 1216(%rax), %ymm2 vmovdqa 1408(%rax), %ymm3 vpunpcklwd 928(%rax), %ymm0, %ymm12 vpunpckhwd 928(%rax), %ymm0, %ymm13 vpunpcklwd 1120(%rax), %ymm1, %ymm14 vpunpckhwd 1120(%rax), %ymm1, %ymm15 vpunpcklwd 1312(%rax), %ymm2, %ymm0 vpunpckhwd 1312(%rax), %ymm2, %ymm1 vpunpcklwd 1504(%rax), %ymm3, %ymm2 vpunpckhwd 1504(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1024(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1056(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1088(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 1120(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 1152(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1184(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1216(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1280(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1312(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1344(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 1376(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1248(%r9) vmovdqa 0(%r11), %ymm0 vmovdqa 192(%r11), %ymm1 vmovdqa 384(%r11), %ymm2 vmovdqa 576(%r11), %ymm3 vpunpcklwd 96(%r11), %ymm0, %ymm4 vpunpckhwd 96(%r11), %ymm0, %ymm5 vpunpcklwd 288(%r11), %ymm1, %ymm6 vpunpckhwd 288(%r11), %ymm1, %ymm7 vpunpcklwd 480(%r11), %ymm2, %ymm8 vpunpckhwd 480(%r11), %ymm2, %ymm9 vpunpcklwd 672(%r11), %ymm3, %ymm10 vpunpckhwd 672(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 768(%r11), %ymm0 vmovdqa 960(%r11), %ymm1 vmovdqa 1152(%r11), %ymm2 vmovdqa 1344(%r11), %ymm3 vpunpcklwd 864(%r11), %ymm0, %ymm12 vpunpckhwd 864(%r11), %ymm0, %ymm13 vpunpcklwd 1056(%r11), %ymm1, %ymm14 vpunpckhwd 1056(%r11), %ymm1, %ymm15 vpunpcklwd 1248(%r11), %ymm2, %ymm0 vpunpckhwd 1248(%r11), %ymm2, %ymm1 vpunpcklwd 1440(%r11), %ymm3, %ymm2 vpunpckhwd 1440(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1408(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1440(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1472(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 1504(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 1536(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1568(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1600(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1664(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1696(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1728(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 1760(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 1792(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 1824(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 1856(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1632(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 1888(%r9) vmovdqa 32(%r11), %ymm0 vmovdqa 224(%r11), %ymm1 vmovdqa 416(%r11), %ymm2 vmovdqa 608(%r11), %ymm3 vpunpcklwd 128(%r11), %ymm0, %ymm4 vpunpckhwd 128(%r11), %ymm0, %ymm5 vpunpcklwd 320(%r11), %ymm1, %ymm6 vpunpckhwd 320(%r11), %ymm1, %ymm7 vpunpcklwd 512(%r11), %ymm2, %ymm8 vpunpckhwd 512(%r11), %ymm2, %ymm9 vpunpcklwd 704(%r11), %ymm3, %ymm10 vpunpckhwd 704(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 800(%r11), %ymm0 vmovdqa 992(%r11), %ymm1 vmovdqa 1184(%r11), %ymm2 vmovdqa 1376(%r11), %ymm3 vpunpcklwd 896(%r11), %ymm0, %ymm12 vpunpckhwd 896(%r11), %ymm0, %ymm13 vpunpcklwd 1088(%r11), %ymm1, %ymm14 vpunpckhwd 1088(%r11), %ymm1, %ymm15 vpunpcklwd 1280(%r11), %ymm2, %ymm0 vpunpckhwd 1280(%r11), %ymm2, %ymm1 vpunpcklwd 1472(%r11), %ymm3, %ymm2 vpunpckhwd 1472(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1920(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1952(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1984(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 2016(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 2048(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 2080(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 2112(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 2176(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 2208(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2240(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2272(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2304(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2336(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2368(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 2144(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2400(%r9) vmovdqa 64(%r11), %ymm0 vmovdqa 256(%r11), %ymm1 vmovdqa 448(%r11), %ymm2 vmovdqa 640(%r11), %ymm3 vpunpcklwd 160(%r11), %ymm0, %ymm4 vpunpckhwd 160(%r11), %ymm0, %ymm5 vpunpcklwd 352(%r11), %ymm1, %ymm6 vpunpckhwd 352(%r11), %ymm1, %ymm7 vpunpcklwd 544(%r11), %ymm2, %ymm8 vpunpckhwd 544(%r11), %ymm2, %ymm9 vpunpcklwd 736(%r11), %ymm3, %ymm10 vpunpckhwd 736(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 832(%r11), %ymm0 vmovdqa 1024(%r11), %ymm1 vmovdqa 1216(%r11), %ymm2 vmovdqa 1408(%r11), %ymm3 vpunpcklwd 928(%r11), %ymm0, %ymm12 vpunpckhwd 928(%r11), %ymm0, %ymm13 vpunpcklwd 1120(%r11), %ymm1, %ymm14 vpunpckhwd 1120(%r11), %ymm1, %ymm15 vpunpcklwd 1312(%r11), %ymm2, %ymm0 vpunpckhwd 1312(%r11), %ymm2, %ymm1 vpunpcklwd 1504(%r11), %ymm3, %ymm2 vpunpckhwd 1504(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 2432(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 2464(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 2496(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 2528(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 2560(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 2592(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 2624(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 2688(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 2720(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2752(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2784(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 2656(%r9) addq $32, %r8 innerloop_4eced63f144beffcb0247f9c6f67d165: vmovdqa 0(%r9), %ymm0 vmovdqa 1408(%r9), %ymm6 vmovdqa 32(%r9), %ymm1 vmovdqa 1440(%r9), %ymm7 vmovdqa 64(%r9), %ymm2 vmovdqa 1472(%r9), %ymm8 vmovdqa 96(%r9), %ymm3 vmovdqa 1504(%r9), %ymm9 vmovdqa 128(%r9), %ymm4 vmovdqa 1536(%r9), %ymm10 vmovdqa 160(%r9), %ymm5 vmovdqa 1568(%r9), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 2816(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2848(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 2880(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2912(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 2944(%r10) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2976(%r10) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3008(%r10) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3040(%r10) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3072(%r10) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3104(%r10) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 3136(%r10) vmovdqa 192(%r9), %ymm0 vmovdqa 1600(%r9), %ymm6 vmovdqa 224(%r9), %ymm1 vmovdqa 1632(%r9), %ymm7 vmovdqa 256(%r9), %ymm2 vmovdqa 1664(%r9), %ymm8 vmovdqa 288(%r9), %ymm3 vmovdqa 1696(%r9), %ymm9 vmovdqa 320(%r9), %ymm4 vmovdqa 1728(%r9), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3200(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3232(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3264(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3296(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3328(%r10) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3360(%r10) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3392(%r10) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3424(%r10) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 3456(%r10) vpaddw 0(%r9), %ymm0, %ymm0 vpaddw 1408(%r9), %ymm6, %ymm6 vpaddw 32(%r9), %ymm1, %ymm1 vpaddw 1440(%r9), %ymm7, %ymm7 vpaddw 64(%r9), %ymm2, %ymm2 vpaddw 1472(%r9), %ymm8, %ymm8 vpaddw 96(%r9), %ymm3, %ymm3 vpaddw 1504(%r9), %ymm9, %ymm9 vpaddw 128(%r9), %ymm4, %ymm4 vpaddw 1536(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 2976(%r10), %ymm12, %ymm12 vpsubw 3360(%r10), %ymm12, %ymm12 vmovdqa %ymm12, 3168(%r10) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 3008(%r10), %ymm0 vpsubw 3200(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 3392(%r10), %ymm6, %ymm6 vmovdqa %ymm6, 3200(%r10) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 2816(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3008(%r10) vmovdqa 3040(%r10), %ymm1 vpsubw 3232(%r10), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 3424(%r10), %ymm7, %ymm7 vmovdqa %ymm7, 3232(%r10) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 2848(%r10), %ymm1, %ymm1 vmovdqa %ymm1, 3040(%r10) vmovdqa 3072(%r10), %ymm2 vpsubw 3264(%r10), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 3456(%r10), %ymm8, %ymm8 vmovdqa %ymm8, 3264(%r10) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 2880(%r10), %ymm2, %ymm2 vmovdqa %ymm2, 3072(%r10) vmovdqa 3104(%r10), %ymm3 vpsubw 3296(%r10), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 3296(%r10) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 2912(%r10), %ymm3, %ymm3 vmovdqa %ymm3, 3104(%r10) vmovdqa 3136(%r10), %ymm4 vpsubw 3328(%r10), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 2944(%r10), %ymm4, %ymm4 vmovdqa %ymm4, 3136(%r10) vmovdqa 352(%r9), %ymm0 vmovdqa 1760(%r9), %ymm6 vmovdqa 384(%r9), %ymm1 vmovdqa 1792(%r9), %ymm7 vmovdqa 416(%r9), %ymm2 vmovdqa 1824(%r9), %ymm8 vmovdqa 448(%r9), %ymm3 vmovdqa 1856(%r9), %ymm9 vmovdqa 480(%r9), %ymm4 vmovdqa 1888(%r9), %ymm10 vmovdqa 512(%r9), %ymm5 vmovdqa 1920(%r9), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3520(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3552(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3584(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3616(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3648(%r10) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3680(%r10) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3712(%r10) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3744(%r10) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3776(%r10) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3808(%r10) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 3840(%r10) vmovdqa 544(%r9), %ymm0 vmovdqa 1952(%r9), %ymm6 vmovdqa 576(%r9), %ymm1 vmovdqa 1984(%r9), %ymm7 vmovdqa 608(%r9), %ymm2 vmovdqa 2016(%r9), %ymm8 vmovdqa 640(%r9), %ymm3 vmovdqa 2048(%r9), %ymm9 vmovdqa 672(%r9), %ymm4 vmovdqa 2080(%r9), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3904(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3936(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3968(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4000(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 4032(%r10) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4064(%r10) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 4096(%r10) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4128(%r10) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 4160(%r10) vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 3680(%r10), %ymm12, %ymm12 vpsubw 4064(%r10), %ymm12, %ymm12 vmovdqa %ymm12, 3872(%r10) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 3712(%r10), %ymm0 vpsubw 3904(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 4096(%r10), %ymm6, %ymm6 vmovdqa %ymm6, 3904(%r10) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 3520(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3712(%r10) vmovdqa 3744(%r10), %ymm1 vpsubw 3936(%r10), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 4128(%r10), %ymm7, %ymm7 vmovdqa %ymm7, 3936(%r10) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 3552(%r10), %ymm1, %ymm1 vmovdqa %ymm1, 3744(%r10) vmovdqa 3776(%r10), %ymm2 vpsubw 3968(%r10), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 4160(%r10), %ymm8, %ymm8 vmovdqa %ymm8, 3968(%r10) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 3584(%r10), %ymm2, %ymm2 vmovdqa %ymm2, 3776(%r10) vmovdqa 3808(%r10), %ymm3 vpsubw 4000(%r10), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 4000(%r10) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 3616(%r10), %ymm3, %ymm3 vmovdqa %ymm3, 3808(%r10) vmovdqa 3840(%r10), %ymm4 vpsubw 4032(%r10), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 3648(%r10), %ymm4, %ymm4 vmovdqa %ymm4, 3840(%r10) vmovdqa 0(%r9), %ymm0 vmovdqa 1408(%r9), %ymm6 vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vmovdqa 32(%r9), %ymm1 vmovdqa 1440(%r9), %ymm7 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vmovdqa 64(%r9), %ymm2 vmovdqa 1472(%r9), %ymm8 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vmovdqa 96(%r9), %ymm3 vmovdqa 1504(%r9), %ymm9 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vmovdqa 128(%r9), %ymm4 vmovdqa 1536(%r9), %ymm10 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vmovdqa 160(%r9), %ymm5 vmovdqa 1568(%r9), %ymm11 vpaddw 512(%r9), %ymm5, %ymm5 vpaddw 1920(%r9), %ymm11, %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 5888(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5920(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 5952(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5984(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6016(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6048(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6080(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6112(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6144(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6176(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 6208(%r8) vmovdqa 192(%r9), %ymm0 vmovdqa 1600(%r9), %ymm6 vpaddw 544(%r9), %ymm0, %ymm0 vpaddw 1952(%r9), %ymm6, %ymm6 vmovdqa 224(%r9), %ymm1 vmovdqa 1632(%r9), %ymm7 vpaddw 576(%r9), %ymm1, %ymm1 vpaddw 1984(%r9), %ymm7, %ymm7 vmovdqa 256(%r9), %ymm2 vmovdqa 1664(%r9), %ymm8 vpaddw 608(%r9), %ymm2, %ymm2 vpaddw 2016(%r9), %ymm8, %ymm8 vmovdqa 288(%r9), %ymm3 vmovdqa 1696(%r9), %ymm9 vpaddw 640(%r9), %ymm3, %ymm3 vpaddw 2048(%r9), %ymm9, %ymm9 vmovdqa 320(%r9), %ymm4 vmovdqa 1728(%r9), %ymm10 vpaddw 672(%r9), %ymm4, %ymm4 vpaddw 2080(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 6272(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6304(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6336(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6368(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6400(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6432(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6464(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6496(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 6528(%r8) vpaddw 0(%r9), %ymm0, %ymm0 vpaddw 1408(%r9), %ymm6, %ymm6 vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vpaddw 32(%r9), %ymm1, %ymm1 vpaddw 1440(%r9), %ymm7, %ymm7 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vpaddw 64(%r9), %ymm2, %ymm2 vpaddw 1472(%r9), %ymm8, %ymm8 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vpaddw 96(%r9), %ymm3, %ymm3 vpaddw 1504(%r9), %ymm9, %ymm9 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vpaddw 128(%r9), %ymm4, %ymm4 vpaddw 1536(%r9), %ymm10, %ymm10 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 6048(%r8), %ymm12, %ymm12 vpsubw 6432(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 6240(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 6080(%r8), %ymm0 vpsubw 6272(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 6464(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 6272(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 6080(%r8) vmovdqa 6112(%r8), %ymm1 vpsubw 6304(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 6496(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 6304(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 5920(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 6112(%r8) vmovdqa 6144(%r8), %ymm2 vpsubw 6336(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 6528(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 6336(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 5952(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 6144(%r8) vmovdqa 6176(%r8), %ymm3 vpsubw 6368(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 6368(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 5984(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 6176(%r8) vmovdqa 6208(%r8), %ymm4 vpsubw 6400(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 6016(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 6208(%r8) vmovdqa 6208(%r8), %ymm0 vpsubw 3136(%r10), %ymm0, %ymm0 vpsubw 3840(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3488(%r10) vmovdqa 3168(%r10), %ymm0 vpsubw 3520(%r10), %ymm0, %ymm0 vmovdqa 6240(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3872(%r10), %ymm1, %ymm1 vpsubw 2816(%r10), %ymm0, %ymm0 vpaddw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3168(%r10) vmovdqa %ymm1, 3520(%r10) vmovdqa 3200(%r10), %ymm0 vpsubw 3552(%r10), %ymm0, %ymm0 vmovdqa 6272(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3904(%r10), %ymm1, %ymm1 vpsubw 2848(%r10), %ymm0, %ymm0 vpaddw 5920(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3200(%r10) vmovdqa %ymm1, 3552(%r10) vmovdqa 3232(%r10), %ymm0 vpsubw 3584(%r10), %ymm0, %ymm0 vmovdqa 6304(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3936(%r10), %ymm1, %ymm1 vpsubw 2880(%r10), %ymm0, %ymm0 vpaddw 5952(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3232(%r10) vmovdqa %ymm1, 3584(%r10) vmovdqa 3264(%r10), %ymm0 vpsubw 3616(%r10), %ymm0, %ymm0 vmovdqa 6336(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3968(%r10), %ymm1, %ymm1 vpsubw 2912(%r10), %ymm0, %ymm0 vpaddw 5984(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3264(%r10) vmovdqa %ymm1, 3616(%r10) vmovdqa 3296(%r10), %ymm0 vpsubw 3648(%r10), %ymm0, %ymm0 vmovdqa 6368(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4000(%r10), %ymm1, %ymm1 vpsubw 2944(%r10), %ymm0, %ymm0 vpaddw 6016(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3296(%r10) vmovdqa %ymm1, 3648(%r10) vmovdqa 3328(%r10), %ymm0 vpsubw 3680(%r10), %ymm0, %ymm0 vmovdqa 6400(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4032(%r10), %ymm1, %ymm1 vpsubw 2976(%r10), %ymm0, %ymm0 vpaddw 6048(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3328(%r10) vmovdqa %ymm1, 3680(%r10) vmovdqa 3360(%r10), %ymm0 vpsubw 3712(%r10), %ymm0, %ymm0 vmovdqa 6432(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4064(%r10), %ymm1, %ymm1 vpsubw 3008(%r10), %ymm0, %ymm0 vpaddw 6080(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3360(%r10) vmovdqa %ymm1, 3712(%r10) vmovdqa 3392(%r10), %ymm0 vpsubw 3744(%r10), %ymm0, %ymm0 vmovdqa 6464(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4096(%r10), %ymm1, %ymm1 vpsubw 3040(%r10), %ymm0, %ymm0 vpaddw 6112(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3392(%r10) vmovdqa %ymm1, 3744(%r10) vmovdqa 3424(%r10), %ymm0 vpsubw 3776(%r10), %ymm0, %ymm0 vmovdqa 6496(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4128(%r10), %ymm1, %ymm1 vpsubw 3072(%r10), %ymm0, %ymm0 vpaddw 6144(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3424(%r10) vmovdqa %ymm1, 3776(%r10) vmovdqa 3456(%r10), %ymm0 vpsubw 3808(%r10), %ymm0, %ymm0 vmovdqa 6528(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4160(%r10), %ymm1, %ymm1 vpsubw 3104(%r10), %ymm0, %ymm0 vpaddw 6176(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3456(%r10) vmovdqa %ymm1, 3808(%r10) neg %ecx jns done_4eced63f144beffcb0247f9c6f67d165 add $704, %r9 add $1408, %r10 jmp innerloop_4eced63f144beffcb0247f9c6f67d165 done_4eced63f144beffcb0247f9c6f67d165: sub $704, %r9 sub $1408, %r10 vmovdqa 0(%r9), %ymm0 vpaddw 704(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6592(%r8) vmovdqa 1408(%r9), %ymm0 vpaddw 2112(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7296(%r8) vmovdqa 32(%r9), %ymm0 vpaddw 736(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6624(%r8) vmovdqa 1440(%r9), %ymm0 vpaddw 2144(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7328(%r8) vmovdqa 64(%r9), %ymm0 vpaddw 768(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6656(%r8) vmovdqa 1472(%r9), %ymm0 vpaddw 2176(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7360(%r8) vmovdqa 96(%r9), %ymm0 vpaddw 800(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6688(%r8) vmovdqa 1504(%r9), %ymm0 vpaddw 2208(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7392(%r8) vmovdqa 128(%r9), %ymm0 vpaddw 832(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6720(%r8) vmovdqa 1536(%r9), %ymm0 vpaddw 2240(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7424(%r8) vmovdqa 160(%r9), %ymm0 vpaddw 864(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6752(%r8) vmovdqa 1568(%r9), %ymm0 vpaddw 2272(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7456(%r8) vmovdqa 192(%r9), %ymm0 vpaddw 896(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6784(%r8) vmovdqa 1600(%r9), %ymm0 vpaddw 2304(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7488(%r8) vmovdqa 224(%r9), %ymm0 vpaddw 928(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6816(%r8) vmovdqa 1632(%r9), %ymm0 vpaddw 2336(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7520(%r8) vmovdqa 256(%r9), %ymm0 vpaddw 960(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6848(%r8) vmovdqa 1664(%r9), %ymm0 vpaddw 2368(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7552(%r8) vmovdqa 288(%r9), %ymm0 vpaddw 992(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6880(%r8) vmovdqa 1696(%r9), %ymm0 vpaddw 2400(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7584(%r8) vmovdqa 320(%r9), %ymm0 vpaddw 1024(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6912(%r8) vmovdqa 1728(%r9), %ymm0 vpaddw 2432(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7616(%r8) vmovdqa 352(%r9), %ymm0 vpaddw 1056(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6944(%r8) vmovdqa 1760(%r9), %ymm0 vpaddw 2464(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7648(%r8) vmovdqa 384(%r9), %ymm0 vpaddw 1088(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6976(%r8) vmovdqa 1792(%r9), %ymm0 vpaddw 2496(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7680(%r8) vmovdqa 416(%r9), %ymm0 vpaddw 1120(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7008(%r8) vmovdqa 1824(%r9), %ymm0 vpaddw 2528(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7712(%r8) vmovdqa 448(%r9), %ymm0 vpaddw 1152(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7040(%r8) vmovdqa 1856(%r9), %ymm0 vpaddw 2560(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7744(%r8) vmovdqa 480(%r9), %ymm0 vpaddw 1184(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7072(%r8) vmovdqa 1888(%r9), %ymm0 vpaddw 2592(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7776(%r8) vmovdqa 512(%r9), %ymm0 vpaddw 1216(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7104(%r8) vmovdqa 1920(%r9), %ymm0 vpaddw 2624(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7808(%r8) vmovdqa 544(%r9), %ymm0 vpaddw 1248(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7136(%r8) vmovdqa 1952(%r9), %ymm0 vpaddw 2656(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7840(%r8) vmovdqa 576(%r9), %ymm0 vpaddw 1280(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7168(%r8) vmovdqa 1984(%r9), %ymm0 vpaddw 2688(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7872(%r8) vmovdqa 608(%r9), %ymm0 vpaddw 1312(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7200(%r8) vmovdqa 2016(%r9), %ymm0 vpaddw 2720(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7904(%r8) vmovdqa 640(%r9), %ymm0 vpaddw 1344(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7232(%r8) vmovdqa 2048(%r9), %ymm0 vpaddw 2752(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7936(%r8) vmovdqa 672(%r9), %ymm0 vpaddw 1376(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7264(%r8) vmovdqa 2080(%r9), %ymm0 vpaddw 2784(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7968(%r8) vmovdqa 6592(%r8), %ymm0 vmovdqa 7296(%r8), %ymm6 vmovdqa 6624(%r8), %ymm1 vmovdqa 7328(%r8), %ymm7 vmovdqa 6656(%r8), %ymm2 vmovdqa 7360(%r8), %ymm8 vmovdqa 6688(%r8), %ymm3 vmovdqa 7392(%r8), %ymm9 vmovdqa 6720(%r8), %ymm4 vmovdqa 7424(%r8), %ymm10 vmovdqa 6752(%r8), %ymm5 vmovdqa 7456(%r8), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8000(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8032(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8064(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8096(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8128(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8160(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8192(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8224(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8256(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8288(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 8320(%r8) vmovdqa 6784(%r8), %ymm0 vmovdqa 7488(%r8), %ymm6 vmovdqa 6816(%r8), %ymm1 vmovdqa 7520(%r8), %ymm7 vmovdqa 6848(%r8), %ymm2 vmovdqa 7552(%r8), %ymm8 vmovdqa 6880(%r8), %ymm3 vmovdqa 7584(%r8), %ymm9 vmovdqa 6912(%r8), %ymm4 vmovdqa 7616(%r8), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8384(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8416(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8448(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8480(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8512(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8544(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8576(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8608(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 8640(%r8) vpaddw 6592(%r8), %ymm0, %ymm0 vpaddw 7296(%r8), %ymm6, %ymm6 vpaddw 6624(%r8), %ymm1, %ymm1 vpaddw 7328(%r8), %ymm7, %ymm7 vpaddw 6656(%r8), %ymm2, %ymm2 vpaddw 7360(%r8), %ymm8, %ymm8 vpaddw 6688(%r8), %ymm3, %ymm3 vpaddw 7392(%r8), %ymm9, %ymm9 vpaddw 6720(%r8), %ymm4, %ymm4 vpaddw 7424(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 8160(%r8), %ymm12, %ymm12 vpsubw 8544(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 8352(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 8192(%r8), %ymm0 vpsubw 8384(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 8576(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 8384(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 8000(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8192(%r8) vmovdqa 8224(%r8), %ymm1 vpsubw 8416(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 8608(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 8416(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 8032(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 8224(%r8) vmovdqa 8256(%r8), %ymm2 vpsubw 8448(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 8640(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 8448(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 8064(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 8256(%r8) vmovdqa 8288(%r8), %ymm3 vpsubw 8480(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 8480(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 8096(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 8288(%r8) vmovdqa 8320(%r8), %ymm4 vpsubw 8512(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 8128(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 8320(%r8) vmovdqa 6944(%r8), %ymm0 vmovdqa 7648(%r8), %ymm6 vmovdqa 6976(%r8), %ymm1 vmovdqa 7680(%r8), %ymm7 vmovdqa 7008(%r8), %ymm2 vmovdqa 7712(%r8), %ymm8 vmovdqa 7040(%r8), %ymm3 vmovdqa 7744(%r8), %ymm9 vmovdqa 7072(%r8), %ymm4 vmovdqa 7776(%r8), %ymm10 vmovdqa 7104(%r8), %ymm5 vmovdqa 7808(%r8), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8704(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8736(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8768(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8800(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8832(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8864(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8896(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8928(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8960(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8992(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 9024(%r8) vmovdqa 7136(%r8), %ymm0 vmovdqa 7840(%r8), %ymm6 vmovdqa 7168(%r8), %ymm1 vmovdqa 7872(%r8), %ymm7 vmovdqa 7200(%r8), %ymm2 vmovdqa 7904(%r8), %ymm8 vmovdqa 7232(%r8), %ymm3 vmovdqa 7936(%r8), %ymm9 vmovdqa 7264(%r8), %ymm4 vmovdqa 7968(%r8), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 9088(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9120(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9152(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9184(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9216(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9248(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9280(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9312(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 9344(%r8) vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 8864(%r8), %ymm12, %ymm12 vpsubw 9248(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 9056(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 8896(%r8), %ymm0 vpsubw 9088(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 9280(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 9088(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 8704(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8896(%r8) vmovdqa 8928(%r8), %ymm1 vpsubw 9120(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 9312(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 9120(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 8736(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 8928(%r8) vmovdqa 8960(%r8), %ymm2 vpsubw 9152(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 9344(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 9152(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 8768(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 8960(%r8) vmovdqa 8992(%r8), %ymm3 vpsubw 9184(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 9184(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 8800(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 8992(%r8) vmovdqa 9024(%r8), %ymm4 vpsubw 9216(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 8832(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 9024(%r8) vmovdqa 6592(%r8), %ymm0 vmovdqa 7296(%r8), %ymm6 vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vmovdqa 6624(%r8), %ymm1 vmovdqa 7328(%r8), %ymm7 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vmovdqa 6656(%r8), %ymm2 vmovdqa 7360(%r8), %ymm8 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vmovdqa 6688(%r8), %ymm3 vmovdqa 7392(%r8), %ymm9 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vmovdqa 6720(%r8), %ymm4 vmovdqa 7424(%r8), %ymm10 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vmovdqa 6752(%r8), %ymm5 vmovdqa 7456(%r8), %ymm11 vpaddw 7104(%r8), %ymm5, %ymm5 vpaddw 7808(%r8), %ymm11, %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 5888(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5920(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 5952(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5984(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6016(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6048(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6080(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6112(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6144(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6176(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 6208(%r8) vmovdqa 6784(%r8), %ymm0 vmovdqa 7488(%r8), %ymm6 vpaddw 7136(%r8), %ymm0, %ymm0 vpaddw 7840(%r8), %ymm6, %ymm6 vmovdqa 6816(%r8), %ymm1 vmovdqa 7520(%r8), %ymm7 vpaddw 7168(%r8), %ymm1, %ymm1 vpaddw 7872(%r8), %ymm7, %ymm7 vmovdqa 6848(%r8), %ymm2 vmovdqa 7552(%r8), %ymm8 vpaddw 7200(%r8), %ymm2, %ymm2 vpaddw 7904(%r8), %ymm8, %ymm8 vmovdqa 6880(%r8), %ymm3 vmovdqa 7584(%r8), %ymm9 vpaddw 7232(%r8), %ymm3, %ymm3 vpaddw 7936(%r8), %ymm9, %ymm9 vmovdqa 6912(%r8), %ymm4 vmovdqa 7616(%r8), %ymm10 vpaddw 7264(%r8), %ymm4, %ymm4 vpaddw 7968(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 6272(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6304(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6336(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6368(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6400(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6432(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6464(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6496(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 6528(%r8) vpaddw 6592(%r8), %ymm0, %ymm0 vpaddw 7296(%r8), %ymm6, %ymm6 vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vpaddw 6624(%r8), %ymm1, %ymm1 vpaddw 7328(%r8), %ymm7, %ymm7 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vpaddw 6656(%r8), %ymm2, %ymm2 vpaddw 7360(%r8), %ymm8, %ymm8 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vpaddw 6688(%r8), %ymm3, %ymm3 vpaddw 7392(%r8), %ymm9, %ymm9 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vpaddw 6720(%r8), %ymm4, %ymm4 vpaddw 7424(%r8), %ymm10, %ymm10 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 6048(%r8), %ymm12, %ymm12 vpsubw 6432(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 6240(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 6080(%r8), %ymm0 vpsubw 6272(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 6464(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 6272(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 6080(%r8) vmovdqa 6112(%r8), %ymm1 vpsubw 6304(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 6496(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 6304(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 5920(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 6112(%r8) vmovdqa 6144(%r8), %ymm2 vpsubw 6336(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 6528(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 6336(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 5952(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 6144(%r8) vmovdqa 6176(%r8), %ymm3 vpsubw 6368(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 6368(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 5984(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 6176(%r8) vmovdqa 6208(%r8), %ymm4 vpsubw 6400(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 6016(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 6208(%r8) vmovdqa 8352(%r8), %ymm0 vpsubw 8704(%r8), %ymm0, %ymm0 vmovdqa 6240(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9056(%r8), %ymm1, %ymm6 vpsubw 8000(%r8), %ymm0, %ymm0 vpaddw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8352(%r8) vmovdqa 8384(%r8), %ymm0 vpsubw 8736(%r8), %ymm0, %ymm0 vmovdqa 6272(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9088(%r8), %ymm1, %ymm7 vpsubw 8032(%r8), %ymm0, %ymm0 vpaddw 5920(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8384(%r8) vmovdqa 8416(%r8), %ymm0 vpsubw 8768(%r8), %ymm0, %ymm0 vmovdqa 6304(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9120(%r8), %ymm1, %ymm8 vpsubw 8064(%r8), %ymm0, %ymm0 vpaddw 5952(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8416(%r8) vmovdqa 8448(%r8), %ymm0 vpsubw 8800(%r8), %ymm0, %ymm0 vmovdqa 6336(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9152(%r8), %ymm1, %ymm9 vpsubw 8096(%r8), %ymm0, %ymm0 vpaddw 5984(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8448(%r8) vmovdqa 8480(%r8), %ymm0 vpsubw 8832(%r8), %ymm0, %ymm0 vmovdqa 6368(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9184(%r8), %ymm1, %ymm10 vpsubw 8128(%r8), %ymm0, %ymm0 vpaddw 6016(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8480(%r8) vmovdqa 8512(%r8), %ymm0 vpsubw 8864(%r8), %ymm0, %ymm0 vmovdqa 6400(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9216(%r8), %ymm1, %ymm11 vpsubw 8160(%r8), %ymm0, %ymm0 vpaddw 6048(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8512(%r8) vmovdqa 8544(%r8), %ymm0 vpsubw 8896(%r8), %ymm0, %ymm0 vmovdqa 6432(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9248(%r8), %ymm1, %ymm12 vpsubw 8192(%r8), %ymm0, %ymm0 vpaddw 6080(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8544(%r8) vmovdqa 8576(%r8), %ymm0 vpsubw 8928(%r8), %ymm0, %ymm0 vmovdqa 6464(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9280(%r8), %ymm1, %ymm13 vpsubw 8224(%r8), %ymm0, %ymm0 vpaddw 6112(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8576(%r8) vmovdqa 8608(%r8), %ymm0 vpsubw 8960(%r8), %ymm0, %ymm0 vmovdqa 6496(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9312(%r8), %ymm1, %ymm14 vpsubw 8256(%r8), %ymm0, %ymm0 vpaddw 6144(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8608(%r8) vmovdqa 8640(%r8), %ymm0 vpsubw 8992(%r8), %ymm0, %ymm0 vmovdqa 6528(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9344(%r8), %ymm1, %ymm15 vpsubw 8288(%r8), %ymm0, %ymm0 vpaddw 6176(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8640(%r8) vmovdqa 6208(%r8), %ymm0 vpsubw 8320(%r8), %ymm0, %ymm0 vpsubw 9024(%r8), %ymm0, %ymm0 vpsubw 3488(%r10), %ymm0, %ymm0 vpsubw 4896(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 4192(%r10) vmovdqa 3520(%r10), %ymm0 vpsubw 4224(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm6, %ymm6 vpsubw 4928(%r10), %ymm6, %ymm6 vpsubw 2816(%r10), %ymm0, %ymm0 vpaddw 8000(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3520(%r10) vmovdqa %ymm6, 4224(%r10) vmovdqa 3552(%r10), %ymm0 vpsubw 4256(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm7, %ymm7 vpsubw 4960(%r10), %ymm7, %ymm7 vpsubw 2848(%r10), %ymm0, %ymm0 vpaddw 8032(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3552(%r10) vmovdqa %ymm7, 4256(%r10) vmovdqa 3584(%r10), %ymm0 vpsubw 4288(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm8, %ymm8 vpsubw 4992(%r10), %ymm8, %ymm8 vpsubw 2880(%r10), %ymm0, %ymm0 vpaddw 8064(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3584(%r10) vmovdqa %ymm8, 4288(%r10) vmovdqa 3616(%r10), %ymm0 vpsubw 4320(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm9, %ymm9 vpsubw 5024(%r10), %ymm9, %ymm9 vpsubw 2912(%r10), %ymm0, %ymm0 vpaddw 8096(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3616(%r10) vmovdqa %ymm9, 4320(%r10) vmovdqa 3648(%r10), %ymm0 vpsubw 4352(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm10, %ymm10 vpsubw 5056(%r10), %ymm10, %ymm10 vpsubw 2944(%r10), %ymm0, %ymm0 vpaddw 8128(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3648(%r10) vmovdqa %ymm10, 4352(%r10) vmovdqa 3680(%r10), %ymm0 vpsubw 4384(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm11, %ymm11 vpsubw 5088(%r10), %ymm11, %ymm11 vpsubw 2976(%r10), %ymm0, %ymm0 vpaddw 8160(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3680(%r10) vmovdqa %ymm11, 4384(%r10) vmovdqa 3712(%r10), %ymm0 vpsubw 4416(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm12 vpsubw 5120(%r10), %ymm12, %ymm12 vpsubw 3008(%r10), %ymm0, %ymm0 vpaddw 8192(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3712(%r10) vmovdqa %ymm12, 4416(%r10) vmovdqa 3744(%r10), %ymm0 vpsubw 4448(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm13, %ymm13 vpsubw 5152(%r10), %ymm13, %ymm13 vpsubw 3040(%r10), %ymm0, %ymm0 vpaddw 8224(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3744(%r10) vmovdqa %ymm13, 4448(%r10) vmovdqa 3776(%r10), %ymm0 vpsubw 4480(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm14, %ymm14 vpsubw 5184(%r10), %ymm14, %ymm14 vpsubw 3072(%r10), %ymm0, %ymm0 vpaddw 8256(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3776(%r10) vmovdqa %ymm14, 4480(%r10) vmovdqa 3808(%r10), %ymm0 vpsubw 4512(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm15, %ymm15 vpsubw 5216(%r10), %ymm15, %ymm15 vpsubw 3104(%r10), %ymm0, %ymm0 vpaddw 8288(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3808(%r10) vmovdqa %ymm15, 4512(%r10) vmovdqa 3840(%r10), %ymm0 vpsubw 4544(%r10), %ymm0, %ymm0 vmovdqa 9024(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5248(%r10), %ymm1, %ymm1 vpsubw 3136(%r10), %ymm0, %ymm0 vpaddw 8320(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3840(%r10) vmovdqa %ymm1, 4544(%r10) vmovdqa 3872(%r10), %ymm0 vpsubw 4576(%r10), %ymm0, %ymm0 vmovdqa 9056(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5280(%r10), %ymm1, %ymm1 vpsubw 3168(%r10), %ymm0, %ymm0 vpaddw 8352(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3872(%r10) vmovdqa %ymm1, 4576(%r10) vmovdqa 3904(%r10), %ymm0 vpsubw 4608(%r10), %ymm0, %ymm0 vmovdqa 9088(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5312(%r10), %ymm1, %ymm1 vpsubw 3200(%r10), %ymm0, %ymm0 vpaddw 8384(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3904(%r10) vmovdqa %ymm1, 4608(%r10) vmovdqa 3936(%r10), %ymm0 vpsubw 4640(%r10), %ymm0, %ymm0 vmovdqa 9120(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5344(%r10), %ymm1, %ymm1 vpsubw 3232(%r10), %ymm0, %ymm0 vpaddw 8416(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3936(%r10) vmovdqa %ymm1, 4640(%r10) vmovdqa 3968(%r10), %ymm0 vpsubw 4672(%r10), %ymm0, %ymm0 vmovdqa 9152(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5376(%r10), %ymm1, %ymm1 vpsubw 3264(%r10), %ymm0, %ymm0 vpaddw 8448(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3968(%r10) vmovdqa %ymm1, 4672(%r10) vmovdqa 4000(%r10), %ymm0 vpsubw 4704(%r10), %ymm0, %ymm0 vmovdqa 9184(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5408(%r10), %ymm1, %ymm1 vpsubw 3296(%r10), %ymm0, %ymm0 vpaddw 8480(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4000(%r10) vmovdqa %ymm1, 4704(%r10) vmovdqa 4032(%r10), %ymm0 vpsubw 4736(%r10), %ymm0, %ymm0 vmovdqa 9216(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5440(%r10), %ymm1, %ymm1 vpsubw 3328(%r10), %ymm0, %ymm0 vpaddw 8512(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4032(%r10) vmovdqa %ymm1, 4736(%r10) vmovdqa 4064(%r10), %ymm0 vpsubw 4768(%r10), %ymm0, %ymm0 vmovdqa 9248(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5472(%r10), %ymm1, %ymm1 vpsubw 3360(%r10), %ymm0, %ymm0 vpaddw 8544(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4064(%r10) vmovdqa %ymm1, 4768(%r10) vmovdqa 4096(%r10), %ymm0 vpsubw 4800(%r10), %ymm0, %ymm0 vmovdqa 9280(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5504(%r10), %ymm1, %ymm1 vpsubw 3392(%r10), %ymm0, %ymm0 vpaddw 8576(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4096(%r10) vmovdqa %ymm1, 4800(%r10) vmovdqa 4128(%r10), %ymm0 vpsubw 4832(%r10), %ymm0, %ymm0 vmovdqa 9312(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5536(%r10), %ymm1, %ymm1 vpsubw 3424(%r10), %ymm0, %ymm0 vpaddw 8608(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4128(%r10) vmovdqa %ymm1, 4832(%r10) vmovdqa 4160(%r10), %ymm0 vpsubw 4864(%r10), %ymm0, %ymm0 vmovdqa 9344(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5568(%r10), %ymm1, %ymm1 vpsubw 3456(%r10), %ymm0, %ymm0 vpaddw 8640(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4160(%r10) vmovdqa %ymm1, 4864(%r10) vpxor %ymm1, %ymm1, %ymm1 vmovdqa %ymm1, 5600(%r10) subq $32, %r8 vmovdqa 2816(%r10), %ymm0 vmovdqa 2880(%r10), %ymm1 vmovdqa 2944(%r10), %ymm2 vmovdqa 3008(%r10), %ymm3 vpunpcklwd 2848(%r10), %ymm0, %ymm4 vpunpckhwd 2848(%r10), %ymm0, %ymm5 vpunpcklwd 2912(%r10), %ymm1, %ymm6 vpunpckhwd 2912(%r10), %ymm1, %ymm7 vpunpcklwd 2976(%r10), %ymm2, %ymm8 vpunpckhwd 2976(%r10), %ymm2, %ymm9 vpunpcklwd 3040(%r10), %ymm3, %ymm10 vpunpckhwd 3040(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 3072(%r10), %ymm0 vmovdqa 3136(%r10), %ymm1 vmovdqa 3200(%r10), %ymm2 vmovdqa 3264(%r10), %ymm3 vpunpcklwd 3104(%r10), %ymm0, %ymm12 vpunpckhwd 3104(%r10), %ymm0, %ymm13 vpunpcklwd 3168(%r10), %ymm1, %ymm14 vpunpckhwd 3168(%r10), %ymm1, %ymm15 vpunpcklwd 3232(%r10), %ymm2, %ymm0 vpunpckhwd 3232(%r10), %ymm2, %ymm1 vpunpcklwd 3296(%r10), %ymm3, %ymm2 vpunpckhwd 3296(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 0(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 192(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 384(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 576(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 768(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 960(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1152(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1536(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1728(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1920(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2112(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2304(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2496(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2688(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1344(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2880(%r12) vmovdqa 3328(%r10), %ymm0 vmovdqa 3392(%r10), %ymm1 vmovdqa 3456(%r10), %ymm2 vmovdqa 3520(%r10), %ymm3 vpunpcklwd 3360(%r10), %ymm0, %ymm4 vpunpckhwd 3360(%r10), %ymm0, %ymm5 vpunpcklwd 3424(%r10), %ymm1, %ymm6 vpunpckhwd 3424(%r10), %ymm1, %ymm7 vpunpcklwd 3488(%r10), %ymm2, %ymm8 vpunpckhwd 3488(%r10), %ymm2, %ymm9 vpunpcklwd 3552(%r10), %ymm3, %ymm10 vpunpckhwd 3552(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 3584(%r10), %ymm0 vmovdqa 3648(%r10), %ymm1 vmovdqa 3712(%r10), %ymm2 vmovdqa 3776(%r10), %ymm3 vpunpcklwd 3616(%r10), %ymm0, %ymm12 vpunpckhwd 3616(%r10), %ymm0, %ymm13 vpunpcklwd 3680(%r10), %ymm1, %ymm14 vpunpckhwd 3680(%r10), %ymm1, %ymm15 vpunpcklwd 3744(%r10), %ymm2, %ymm0 vpunpckhwd 3744(%r10), %ymm2, %ymm1 vpunpcklwd 3808(%r10), %ymm3, %ymm2 vpunpckhwd 3808(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 32(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 224(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 416(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 608(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 800(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 992(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1184(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1568(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1760(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1952(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2144(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2336(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2528(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2720(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1376(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2912(%r12) vmovdqa 3840(%r10), %ymm0 vmovdqa 3904(%r10), %ymm1 vmovdqa 3968(%r10), %ymm2 vmovdqa 4032(%r10), %ymm3 vpunpcklwd 3872(%r10), %ymm0, %ymm4 vpunpckhwd 3872(%r10), %ymm0, %ymm5 vpunpcklwd 3936(%r10), %ymm1, %ymm6 vpunpckhwd 3936(%r10), %ymm1, %ymm7 vpunpcklwd 4000(%r10), %ymm2, %ymm8 vpunpckhwd 4000(%r10), %ymm2, %ymm9 vpunpcklwd 4064(%r10), %ymm3, %ymm10 vpunpckhwd 4064(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4096(%r10), %ymm0 vmovdqa 4160(%r10), %ymm1 vmovdqa 4224(%r10), %ymm2 vmovdqa 4288(%r10), %ymm3 vpunpcklwd 4128(%r10), %ymm0, %ymm12 vpunpckhwd 4128(%r10), %ymm0, %ymm13 vpunpcklwd 4192(%r10), %ymm1, %ymm14 vpunpckhwd 4192(%r10), %ymm1, %ymm15 vpunpcklwd 4256(%r10), %ymm2, %ymm0 vpunpckhwd 4256(%r10), %ymm2, %ymm1 vpunpcklwd 4320(%r10), %ymm3, %ymm2 vpunpckhwd 4320(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 64(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 256(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 448(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 640(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 832(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1024(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1216(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1600(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1792(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1984(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2176(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2368(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2560(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2752(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1408(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2944(%r12) vmovdqa 4224(%r10), %ymm0 vmovdqa 4288(%r10), %ymm1 vmovdqa 4352(%r10), %ymm2 vmovdqa 4416(%r10), %ymm3 vpunpcklwd 4256(%r10), %ymm0, %ymm4 vpunpckhwd 4256(%r10), %ymm0, %ymm5 vpunpcklwd 4320(%r10), %ymm1, %ymm6 vpunpckhwd 4320(%r10), %ymm1, %ymm7 vpunpcklwd 4384(%r10), %ymm2, %ymm8 vpunpckhwd 4384(%r10), %ymm2, %ymm9 vpunpcklwd 4448(%r10), %ymm3, %ymm10 vpunpckhwd 4448(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4480(%r10), %ymm0 vmovdqa 4544(%r10), %ymm1 vmovdqa 4608(%r10), %ymm2 vmovdqa 4672(%r10), %ymm3 vpunpcklwd 4512(%r10), %ymm0, %ymm12 vpunpckhwd 4512(%r10), %ymm0, %ymm13 vpunpcklwd 4576(%r10), %ymm1, %ymm14 vpunpckhwd 4576(%r10), %ymm1, %ymm15 vpunpcklwd 4640(%r10), %ymm2, %ymm0 vpunpckhwd 4640(%r10), %ymm2, %ymm1 vpunpcklwd 4704(%r10), %ymm3, %ymm2 vpunpckhwd 4704(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 96(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 288(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 480(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 672(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 864(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1056(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1248(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1632(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1824(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2016(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2208(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2400(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2592(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2784(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1440(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2976(%r12) vmovdqa 4736(%r10), %ymm0 vmovdqa 4800(%r10), %ymm1 vmovdqa 4864(%r10), %ymm2 vmovdqa 4928(%r10), %ymm3 vpunpcklwd 4768(%r10), %ymm0, %ymm4 vpunpckhwd 4768(%r10), %ymm0, %ymm5 vpunpcklwd 4832(%r10), %ymm1, %ymm6 vpunpckhwd 4832(%r10), %ymm1, %ymm7 vpunpcklwd 4896(%r10), %ymm2, %ymm8 vpunpckhwd 4896(%r10), %ymm2, %ymm9 vpunpcklwd 4960(%r10), %ymm3, %ymm10 vpunpckhwd 4960(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4992(%r10), %ymm0 vmovdqa 5056(%r10), %ymm1 vmovdqa 5120(%r10), %ymm2 vmovdqa 5184(%r10), %ymm3 vpunpcklwd 5024(%r10), %ymm0, %ymm12 vpunpckhwd 5024(%r10), %ymm0, %ymm13 vpunpcklwd 5088(%r10), %ymm1, %ymm14 vpunpckhwd 5088(%r10), %ymm1, %ymm15 vpunpcklwd 5152(%r10), %ymm2, %ymm0 vpunpckhwd 5152(%r10), %ymm2, %ymm1 vpunpcklwd 5216(%r10), %ymm3, %ymm2 vpunpckhwd 5216(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 128(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 320(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 512(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 704(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 896(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1088(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1280(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1664(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1856(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2048(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2240(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2432(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2624(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2816(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1472(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 3008(%r12) vmovdqa 5248(%r10), %ymm0 vmovdqa 5312(%r10), %ymm1 vmovdqa 5376(%r10), %ymm2 vmovdqa 5440(%r10), %ymm3 vpunpcklwd 5280(%r10), %ymm0, %ymm4 vpunpckhwd 5280(%r10), %ymm0, %ymm5 vpunpcklwd 5344(%r10), %ymm1, %ymm6 vpunpckhwd 5344(%r10), %ymm1, %ymm7 vpunpcklwd 5408(%r10), %ymm2, %ymm8 vpunpckhwd 5408(%r10), %ymm2, %ymm9 vpunpcklwd 5472(%r10), %ymm3, %ymm10 vpunpckhwd 5472(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 5504(%r10), %ymm0 vmovdqa 5568(%r10), %ymm1 vmovdqa 5632(%r10), %ymm2 vmovdqa 5696(%r10), %ymm3 vpunpcklwd 5536(%r10), %ymm0, %ymm12 vpunpckhwd 5536(%r10), %ymm0, %ymm13 vpunpcklwd 5600(%r10), %ymm1, %ymm14 vpunpckhwd 5600(%r10), %ymm1, %ymm15 vpunpcklwd 5664(%r10), %ymm2, %ymm0 vpunpckhwd 5664(%r10), %ymm2, %ymm1 vpunpcklwd 5728(%r10), %ymm3, %ymm2 vpunpckhwd 5728(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 160(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 352(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 544(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 736(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 928(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1120(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1312(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1696(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1888(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2080(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2272(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2464(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2656(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2848(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1504(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 3040(%r12) addq $32, %r8 add $1536, %rax add $1536, %r11 add $3072, %r12 dec %ecx jnz karatsuba_loop_4eced63f144beffcb0247f9c6f67d165 sub $12288, %r12 add $9408-2400, %r8 vpxor %ymm0, %ymm0, %ymm0 vmovdqa %ymm0, 1792(%r8) vmovdqa %ymm0, 1824(%r8) vmovdqa %ymm0, 1856(%r8) vmovdqa %ymm0, 1888(%r8) vmovdqa %ymm0, 1920(%r8) vmovdqa %ymm0, 1952(%r8) vmovdqa %ymm0, 1984(%r8) vmovdqa %ymm0, 2016(%r8) vmovdqa %ymm0, 2048(%r8) vmovdqa %ymm0, 2080(%r8) vmovdqa %ymm0, 2112(%r8) vmovdqa %ymm0, 2144(%r8) vmovdqa %ymm0, 2176(%r8) vmovdqa %ymm0, 2208(%r8) vmovdqa %ymm0, 2240(%r8) vmovdqa %ymm0, 2272(%r8) vmovdqa %ymm0, 2304(%r8) vmovdqa %ymm0, 2336(%r8) vmovdqa %ymm0, 2368(%r8) vmovdqa %ymm0, 2400(%r8) vmovdqa %ymm0, 2432(%r8) vmovdqa %ymm0, 2464(%r8) vmovdqa %ymm0, 2496(%r8) vmovdqa %ymm0, 2528(%r8) vmovdqa %ymm0, 2560(%r8) vmovdqa %ymm0, 2592(%r8) vmovdqa %ymm0, 2624(%r8) vmovdqa %ymm0, 2656(%r8) vmovdqa %ymm0, 2688(%r8) vmovdqa %ymm0, 2720(%r8) vmovdqa %ymm0, 2752(%r8) vmovdqa %ymm0, 2784(%r8) vmovdqa const729(%rip), %ymm15 vmovdqa const3_inv(%rip), %ymm14 vmovdqa const5_inv(%rip), %ymm13 vmovdqa const9(%rip), %ymm12 vmovdqa 96(%r12), %ymm0 vpsubw 192(%r12), %ymm0, %ymm0 vmovdqa 480(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 288(%r12), %ymm1, %ymm1 vpsubw 0(%r12), %ymm0, %ymm0 vpaddw 384(%r12), %ymm0, %ymm0 vmovdqa 672(%r12), %ymm2 vpsubw 768(%r12), %ymm2, %ymm2 vmovdqa 1056(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 864(%r12), %ymm3, %ymm3 vpsubw 576(%r12), %ymm2, %ymm2 vpaddw 960(%r12), %ymm2, %ymm2 vmovdqa 1248(%r12), %ymm4 vpsubw 1344(%r12), %ymm4, %ymm4 vmovdqa 1632(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1440(%r12), %ymm5, %ymm5 vpsubw 1152(%r12), %ymm4, %ymm4 vpaddw 1536(%r12), %ymm4, %ymm4 vpsubw 576(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 0(%r12), %ymm1, %ymm1 vpaddw 1152(%r12), %ymm1, %ymm1 vmovdqa 288(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1440(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 864(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 0(%r12), %ymm8 vmovdqa 864(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1824(%r12), %ymm0 vpsubw 1920(%r12), %ymm0, %ymm0 vmovdqa 2208(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2016(%r12), %ymm1, %ymm1 vpsubw 1728(%r12), %ymm0, %ymm0 vpaddw 2112(%r12), %ymm0, %ymm0 vmovdqa 2400(%r12), %ymm2 vpsubw 2496(%r12), %ymm2, %ymm2 vmovdqa 2784(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2592(%r12), %ymm3, %ymm3 vpsubw 2304(%r12), %ymm2, %ymm2 vpaddw 2688(%r12), %ymm2, %ymm2 vmovdqa 2976(%r12), %ymm4 vpsubw 3072(%r12), %ymm4, %ymm4 vmovdqa 3360(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3168(%r12), %ymm5, %ymm5 vpsubw 2880(%r12), %ymm4, %ymm4 vpaddw 3264(%r12), %ymm4, %ymm4 vpsubw 2304(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1728(%r12), %ymm1, %ymm1 vpaddw 2880(%r12), %ymm1, %ymm1 vmovdqa 2016(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3168(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2592(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1728(%r12), %ymm8 vmovdqa 2592(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3552(%r12), %ymm0 vpsubw 3648(%r12), %ymm0, %ymm0 vmovdqa 3936(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3744(%r12), %ymm1, %ymm1 vpsubw 3456(%r12), %ymm0, %ymm0 vpaddw 3840(%r12), %ymm0, %ymm0 vmovdqa 4128(%r12), %ymm2 vpsubw 4224(%r12), %ymm2, %ymm2 vmovdqa 4512(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4320(%r12), %ymm3, %ymm3 vpsubw 4032(%r12), %ymm2, %ymm2 vpaddw 4416(%r12), %ymm2, %ymm2 vmovdqa 4704(%r12), %ymm4 vpsubw 4800(%r12), %ymm4, %ymm4 vmovdqa 5088(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4896(%r12), %ymm5, %ymm5 vpsubw 4608(%r12), %ymm4, %ymm4 vpaddw 4992(%r12), %ymm4, %ymm4 vpsubw 4032(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3456(%r12), %ymm1, %ymm1 vpaddw 4608(%r12), %ymm1, %ymm1 vmovdqa 3744(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4896(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4320(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3456(%r12), %ymm8 vmovdqa 4320(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5280(%r12), %ymm0 vpsubw 5376(%r12), %ymm0, %ymm0 vmovdqa 5664(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5472(%r12), %ymm1, %ymm1 vpsubw 5184(%r12), %ymm0, %ymm0 vpaddw 5568(%r12), %ymm0, %ymm0 vmovdqa 5856(%r12), %ymm2 vpsubw 5952(%r12), %ymm2, %ymm2 vmovdqa 6240(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6048(%r12), %ymm3, %ymm3 vpsubw 5760(%r12), %ymm2, %ymm2 vpaddw 6144(%r12), %ymm2, %ymm2 vmovdqa 6432(%r12), %ymm4 vpsubw 6528(%r12), %ymm4, %ymm4 vmovdqa 6816(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6624(%r12), %ymm5, %ymm5 vpsubw 6336(%r12), %ymm4, %ymm4 vpaddw 6720(%r12), %ymm4, %ymm4 vpsubw 5760(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5184(%r12), %ymm1, %ymm1 vpaddw 6336(%r12), %ymm1, %ymm1 vmovdqa 5472(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6624(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6048(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5184(%r12), %ymm8 vmovdqa 6048(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7008(%r12), %ymm0 vpsubw 7104(%r12), %ymm0, %ymm0 vmovdqa 7392(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7200(%r12), %ymm1, %ymm1 vpsubw 6912(%r12), %ymm0, %ymm0 vpaddw 7296(%r12), %ymm0, %ymm0 vmovdqa 7584(%r12), %ymm2 vpsubw 7680(%r12), %ymm2, %ymm2 vmovdqa 7968(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7776(%r12), %ymm3, %ymm3 vpsubw 7488(%r12), %ymm2, %ymm2 vpaddw 7872(%r12), %ymm2, %ymm2 vmovdqa 8160(%r12), %ymm4 vpsubw 8256(%r12), %ymm4, %ymm4 vmovdqa 8544(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8352(%r12), %ymm5, %ymm5 vpsubw 8064(%r12), %ymm4, %ymm4 vpaddw 8448(%r12), %ymm4, %ymm4 vpsubw 7488(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6912(%r12), %ymm1, %ymm1 vpaddw 8064(%r12), %ymm1, %ymm1 vmovdqa 7200(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8352(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7776(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6912(%r12), %ymm8 vmovdqa 7776(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8736(%r12), %ymm0 vpsubw 8832(%r12), %ymm0, %ymm0 vmovdqa 9120(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8928(%r12), %ymm1, %ymm1 vpsubw 8640(%r12), %ymm0, %ymm0 vpaddw 9024(%r12), %ymm0, %ymm0 vmovdqa 9312(%r12), %ymm2 vpsubw 9408(%r12), %ymm2, %ymm2 vmovdqa 9696(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9504(%r12), %ymm3, %ymm3 vpsubw 9216(%r12), %ymm2, %ymm2 vpaddw 9600(%r12), %ymm2, %ymm2 vmovdqa 9888(%r12), %ymm4 vpsubw 9984(%r12), %ymm4, %ymm4 vmovdqa 10272(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10080(%r12), %ymm5, %ymm5 vpsubw 9792(%r12), %ymm4, %ymm4 vpaddw 10176(%r12), %ymm4, %ymm4 vpsubw 9216(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8640(%r12), %ymm1, %ymm1 vpaddw 9792(%r12), %ymm1, %ymm1 vmovdqa 8928(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10080(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9504(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8640(%r12), %ymm8 vmovdqa 9504(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10464(%r12), %ymm0 vpsubw 10560(%r12), %ymm0, %ymm0 vmovdqa 10848(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10656(%r12), %ymm1, %ymm1 vpsubw 10368(%r12), %ymm0, %ymm0 vpaddw 10752(%r12), %ymm0, %ymm0 vmovdqa 11040(%r12), %ymm2 vpsubw 11136(%r12), %ymm2, %ymm2 vmovdqa 11424(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11232(%r12), %ymm3, %ymm3 vpsubw 10944(%r12), %ymm2, %ymm2 vpaddw 11328(%r12), %ymm2, %ymm2 vmovdqa 11616(%r12), %ymm4 vpsubw 11712(%r12), %ymm4, %ymm4 vmovdqa 12000(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11808(%r12), %ymm5, %ymm5 vpsubw 11520(%r12), %ymm4, %ymm4 vpaddw 11904(%r12), %ymm4, %ymm4 vpsubw 10944(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10368(%r12), %ymm1, %ymm1 vpaddw 11520(%r12), %ymm1, %ymm1 vmovdqa 10656(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11808(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11232(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10368(%r12), %ymm8 vmovdqa 11232(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vmovdqa 256(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm7 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm7, %ymm4 vpaddd %ymm6, %ymm8, %ymm3 vpsubd %ymm10, %ymm4, %ymm4 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm5, %ymm7, %ymm5 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm8 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm7, %ymm7 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm7, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm4, %ymm3 vmovdqa 768(%r8), %ymm4 vpaddw 1024(%r8), %ymm4, %ymm7 vpsubw 1024(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpsllw $7, %ymm5, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vmovdqa 1280(%r8), %ymm8 vpsubw %ymm11, %ymm8, %ymm8 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpmullw %ymm12, %ymm7, %ymm8 vpaddw %ymm8, %ymm3, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm9 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw %ymm7, %ymm11, %ymm11 vmovdqa %xmm9, 2048(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm9 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm8, %ymm8 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm9, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm9 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm9, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 0(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 352(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 704(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 1056(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm8 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm7, %ymm7 vmovdqa 288(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm3 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm9 vpaddd %ymm6, %ymm4, %ymm10 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm3, %ymm3 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm9, %ymm10 vmovdqa 800(%r8), %ymm9 vpaddw 1056(%r8), %ymm9, %ymm3 vpsubw 1056(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1312(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm7 vpsubw %ymm7, %ymm4, %ymm7 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm4 vpaddw %ymm4, %ymm10, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm7, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm7 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm3, %ymm3 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm7, 2080(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm7 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm4, %ymm4 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm7, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm7 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm11, %ymm11 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm7, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 88(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 440(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 792(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1144(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm3, %ymm3 vmovdqa 320(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm10 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm7 vpaddd %ymm6, %ymm9, %ymm8 vpsubd %ymm4, %ymm7, %ymm7 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm10, %ymm10 vpsubd %ymm9, %ymm7, %ymm7 vpsubd %ymm10, %ymm8, %ymm8 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm7, %ymm8 vmovdqa 832(%r8), %ymm7 vpaddw 1088(%r8), %ymm7, %ymm10 vpsubw 1088(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm11, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm8, %ymm10, %ymm10 vmovdqa 1344(%r8), %ymm9 vpsubw %ymm11, %ymm9, %ymm9 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm9, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm8, %ymm8 vpmullw %ymm12, %ymm10, %ymm9 vpaddw %ymm9, %ymm8, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm7, %ymm9, %ymm9 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm10, %ymm10 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm3, 2112(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm3, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vmovdqa %xmm3, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 176(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 528(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 880(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 1232(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm10, %ymm10 vmovdqa 352(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm8 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm8, %ymm3 vpaddd %ymm6, %ymm7, %ymm4 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm10, %ymm4, %ymm4 vpsubd %ymm11, %ymm8, %ymm11 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm7 vpunpckhwd const0(%rip), %ymm11, %ymm8 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm8, %ymm8 vpsubd %ymm7, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm3, %ymm4 vmovdqa 864(%r8), %ymm3 vpaddw 1120(%r8), %ymm3, %ymm8 vpsubw 1120(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpsllw $7, %ymm11, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vmovdqa 1376(%r8), %ymm7 vpsubw %ymm5, %ymm7, %ymm7 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm7, %ymm10 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpmullw %ymm12, %ymm8, %ymm7 vpaddw %ymm7, %ymm4, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm10, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm10 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm8, %ymm8 vpaddw %ymm8, %ymm5, %ymm5 vmovdqa %xmm10, 2144(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm10 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm7, %ymm7 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm10, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm10 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm11, %ymm11 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm10, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 264(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 616(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 968(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1320(%rdi) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm7 vpunpckhwd const0(%rip), %ymm11, %ymm8 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm8, %ymm8 vmovdqa 384(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm4 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm4, %ymm10 vpaddd %ymm6, %ymm3, %ymm9 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm5, %ymm4, %ymm5 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm4, %ymm4 vpsubd %ymm3, %ymm10, %ymm10 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm10, %ymm9 vmovdqa 896(%r8), %ymm10 vpaddw 1152(%r8), %ymm10, %ymm4 vpsubw 1152(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpsllw $7, %ymm5, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1408(%r8), %ymm3 vpsubw %ymm11, %ymm3, %ymm3 vpmullw %ymm15, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm3 vpaddw %ymm3, %ymm9, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vmovdqu 352(%rdi), %ymm8 vmovdqu 704(%rdi), %ymm7 vmovdqu 1056(%rdi), %ymm2 vpaddw %ymm11, %ymm8, %ymm11 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm9, %ymm2, %ymm9 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm2 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm10, %ymm10 vmovdqu 0(%rdi), %ymm7 vpaddw %ymm10, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 0(%rdi) vmovdqa %xmm2, 1920(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm2 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw %ymm4, %ymm11, %ymm11 vmovdqa %xmm2, 2176(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm3, %ymm3 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm2, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm5, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vmovdqa %xmm2, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 704(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1056(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm4, %ymm4 vmovdqa 416(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm9 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm9, %ymm2 vpaddd %ymm6, %ymm10, %ymm7 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm4, %ymm7, %ymm7 vpsubd %ymm11, %ymm9, %ymm11 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vpsubd %ymm10, %ymm2, %ymm2 vpsubd %ymm9, %ymm7, %ymm7 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm2, %ymm7 vmovdqa 928(%r8), %ymm2 vpaddw 1184(%r8), %ymm2, %ymm9 vpsubw 1184(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpsllw $7, %ymm11, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm7, %ymm9, %ymm9 vmovdqa 1440(%r8), %ymm10 vpsubw %ymm5, %ymm10, %ymm10 vpmullw %ymm15, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm7, %ymm7 vpmullw %ymm12, %ymm9, %ymm10 vpaddw %ymm10, %ymm7, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vmovdqu 440(%rdi), %ymm4 vmovdqu 792(%rdi), %ymm3 vmovdqu 1144(%rdi), %ymm8 vpaddw %ymm5, %ymm4, %ymm5 vpaddw %ymm6, %ymm3, %ymm6 vpaddw %ymm7, %ymm8, %ymm7 vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm8 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm2, %ymm2 vmovdqu 88(%rdi), %ymm3 vpaddw %ymm2, %ymm3, %ymm3 vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 88(%rdi) vmovdqa %xmm8, 1952(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm8 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm9, %ymm9 vpaddw %ymm9, %ymm5, %ymm5 vmovdqa %xmm8, 2208(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm8 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm10, %ymm10 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm8, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm11, %ymm11 vpaddw %ymm11, %ymm7, %ymm7 vmovdqa %xmm8, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 440(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 792(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 1144(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vmovdqa 448(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm7 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm7, %ymm8 vpaddd %ymm6, %ymm2, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm5, %ymm7, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm7, %ymm7 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm7, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm8, %ymm3 vmovdqa 960(%r8), %ymm8 vpaddw 1216(%r8), %ymm8, %ymm7 vpsubw 1216(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm7, %ymm2 vpsllw $7, %ymm5, %ymm7 vpsubw %ymm7, %ymm2, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vmovdqa 1472(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpmullw %ymm12, %ymm7, %ymm2 vpaddw %ymm2, %ymm3, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vmovdqu 528(%rdi), %ymm9 vmovdqu 880(%rdi), %ymm10 vmovdqu 1232(%rdi), %ymm4 vpaddw %ymm11, %ymm9, %ymm11 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm3, %ymm4, %ymm3 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm8, %ymm8 vmovdqu 176(%rdi), %ymm10 vpaddw %ymm8, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 176(%rdi) vmovdqa %xmm4, 1984(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm4 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw %ymm7, %ymm11, %ymm11 vmovdqa %xmm4, 2240(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm4 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm2, %ymm2 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm4, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm4 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm4, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 880(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1232(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm7, %ymm7 vmovdqa 480(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm3 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm4 vpaddd %ymm6, %ymm8, %ymm10 vpsubd %ymm2, %ymm4, %ymm4 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm4, %ymm10 vmovdqa 992(%r8), %ymm4 vpaddw 1248(%r8), %ymm4, %ymm3 vpsubw 1248(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1504(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm8 vpaddw %ymm8, %ymm10, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vmovdqu 616(%rdi), %ymm7 vmovdqu 968(%rdi), %ymm2 vmovdqu 1320(%rdi), %ymm9 vpaddw %ymm5, %ymm7, %ymm5 vpaddw %ymm6, %ymm2, %ymm6 vpaddw %ymm10, %ymm9, %ymm10 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm4, %ymm4 vmovdqu 264(%rdi), %ymm2 vpaddw %ymm4, %ymm2, %ymm2 vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 264(%rdi) vmovdqa %xmm9, 2016(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm9 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm3, %ymm3 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm9, 2272(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm9 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm9, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm9 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm9, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 616(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 968(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1320(%rdi) vmovdqa 128(%r12), %ymm0 vpsubw 224(%r12), %ymm0, %ymm0 vmovdqa 512(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 320(%r12), %ymm1, %ymm1 vpsubw 32(%r12), %ymm0, %ymm0 vpaddw 416(%r12), %ymm0, %ymm0 vmovdqa 704(%r12), %ymm2 vpsubw 800(%r12), %ymm2, %ymm2 vmovdqa 1088(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 896(%r12), %ymm3, %ymm3 vpsubw 608(%r12), %ymm2, %ymm2 vpaddw 992(%r12), %ymm2, %ymm2 vmovdqa 1280(%r12), %ymm4 vpsubw 1376(%r12), %ymm4, %ymm4 vmovdqa 1664(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1472(%r12), %ymm5, %ymm5 vpsubw 1184(%r12), %ymm4, %ymm4 vpaddw 1568(%r12), %ymm4, %ymm4 vpsubw 608(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 32(%r12), %ymm1, %ymm1 vpaddw 1184(%r12), %ymm1, %ymm1 vmovdqa 320(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1472(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 896(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 32(%r12), %ymm8 vmovdqa 896(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1856(%r12), %ymm0 vpsubw 1952(%r12), %ymm0, %ymm0 vmovdqa 2240(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2048(%r12), %ymm1, %ymm1 vpsubw 1760(%r12), %ymm0, %ymm0 vpaddw 2144(%r12), %ymm0, %ymm0 vmovdqa 2432(%r12), %ymm2 vpsubw 2528(%r12), %ymm2, %ymm2 vmovdqa 2816(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2624(%r12), %ymm3, %ymm3 vpsubw 2336(%r12), %ymm2, %ymm2 vpaddw 2720(%r12), %ymm2, %ymm2 vmovdqa 3008(%r12), %ymm4 vpsubw 3104(%r12), %ymm4, %ymm4 vmovdqa 3392(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3200(%r12), %ymm5, %ymm5 vpsubw 2912(%r12), %ymm4, %ymm4 vpaddw 3296(%r12), %ymm4, %ymm4 vpsubw 2336(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1760(%r12), %ymm1, %ymm1 vpaddw 2912(%r12), %ymm1, %ymm1 vmovdqa 2048(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3200(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2624(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1760(%r12), %ymm8 vmovdqa 2624(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3584(%r12), %ymm0 vpsubw 3680(%r12), %ymm0, %ymm0 vmovdqa 3968(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3776(%r12), %ymm1, %ymm1 vpsubw 3488(%r12), %ymm0, %ymm0 vpaddw 3872(%r12), %ymm0, %ymm0 vmovdqa 4160(%r12), %ymm2 vpsubw 4256(%r12), %ymm2, %ymm2 vmovdqa 4544(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4352(%r12), %ymm3, %ymm3 vpsubw 4064(%r12), %ymm2, %ymm2 vpaddw 4448(%r12), %ymm2, %ymm2 vmovdqa 4736(%r12), %ymm4 vpsubw 4832(%r12), %ymm4, %ymm4 vmovdqa 5120(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4928(%r12), %ymm5, %ymm5 vpsubw 4640(%r12), %ymm4, %ymm4 vpaddw 5024(%r12), %ymm4, %ymm4 vpsubw 4064(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3488(%r12), %ymm1, %ymm1 vpaddw 4640(%r12), %ymm1, %ymm1 vmovdqa 3776(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4928(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4352(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3488(%r12), %ymm8 vmovdqa 4352(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5312(%r12), %ymm0 vpsubw 5408(%r12), %ymm0, %ymm0 vmovdqa 5696(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5504(%r12), %ymm1, %ymm1 vpsubw 5216(%r12), %ymm0, %ymm0 vpaddw 5600(%r12), %ymm0, %ymm0 vmovdqa 5888(%r12), %ymm2 vpsubw 5984(%r12), %ymm2, %ymm2 vmovdqa 6272(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6080(%r12), %ymm3, %ymm3 vpsubw 5792(%r12), %ymm2, %ymm2 vpaddw 6176(%r12), %ymm2, %ymm2 vmovdqa 6464(%r12), %ymm4 vpsubw 6560(%r12), %ymm4, %ymm4 vmovdqa 6848(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6656(%r12), %ymm5, %ymm5 vpsubw 6368(%r12), %ymm4, %ymm4 vpaddw 6752(%r12), %ymm4, %ymm4 vpsubw 5792(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5216(%r12), %ymm1, %ymm1 vpaddw 6368(%r12), %ymm1, %ymm1 vmovdqa 5504(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6656(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6080(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5216(%r12), %ymm8 vmovdqa 6080(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7040(%r12), %ymm0 vpsubw 7136(%r12), %ymm0, %ymm0 vmovdqa 7424(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7232(%r12), %ymm1, %ymm1 vpsubw 6944(%r12), %ymm0, %ymm0 vpaddw 7328(%r12), %ymm0, %ymm0 vmovdqa 7616(%r12), %ymm2 vpsubw 7712(%r12), %ymm2, %ymm2 vmovdqa 8000(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7808(%r12), %ymm3, %ymm3 vpsubw 7520(%r12), %ymm2, %ymm2 vpaddw 7904(%r12), %ymm2, %ymm2 vmovdqa 8192(%r12), %ymm4 vpsubw 8288(%r12), %ymm4, %ymm4 vmovdqa 8576(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8384(%r12), %ymm5, %ymm5 vpsubw 8096(%r12), %ymm4, %ymm4 vpaddw 8480(%r12), %ymm4, %ymm4 vpsubw 7520(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6944(%r12), %ymm1, %ymm1 vpaddw 8096(%r12), %ymm1, %ymm1 vmovdqa 7232(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8384(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7808(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6944(%r12), %ymm8 vmovdqa 7808(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8768(%r12), %ymm0 vpsubw 8864(%r12), %ymm0, %ymm0 vmovdqa 9152(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8960(%r12), %ymm1, %ymm1 vpsubw 8672(%r12), %ymm0, %ymm0 vpaddw 9056(%r12), %ymm0, %ymm0 vmovdqa 9344(%r12), %ymm2 vpsubw 9440(%r12), %ymm2, %ymm2 vmovdqa 9728(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9536(%r12), %ymm3, %ymm3 vpsubw 9248(%r12), %ymm2, %ymm2 vpaddw 9632(%r12), %ymm2, %ymm2 vmovdqa 9920(%r12), %ymm4 vpsubw 10016(%r12), %ymm4, %ymm4 vmovdqa 10304(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10112(%r12), %ymm5, %ymm5 vpsubw 9824(%r12), %ymm4, %ymm4 vpaddw 10208(%r12), %ymm4, %ymm4 vpsubw 9248(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8672(%r12), %ymm1, %ymm1 vpaddw 9824(%r12), %ymm1, %ymm1 vmovdqa 8960(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10112(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9536(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8672(%r12), %ymm8 vmovdqa 9536(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10496(%r12), %ymm0 vpsubw 10592(%r12), %ymm0, %ymm0 vmovdqa 10880(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10688(%r12), %ymm1, %ymm1 vpsubw 10400(%r12), %ymm0, %ymm0 vpaddw 10784(%r12), %ymm0, %ymm0 vmovdqa 11072(%r12), %ymm2 vpsubw 11168(%r12), %ymm2, %ymm2 vmovdqa 11456(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11264(%r12), %ymm3, %ymm3 vpsubw 10976(%r12), %ymm2, %ymm2 vpaddw 11360(%r12), %ymm2, %ymm2 vmovdqa 11648(%r12), %ymm4 vpsubw 11744(%r12), %ymm4, %ymm4 vmovdqa 12032(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11840(%r12), %ymm5, %ymm5 vpsubw 11552(%r12), %ymm4, %ymm4 vpaddw 11936(%r12), %ymm4, %ymm4 vpsubw 10976(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10400(%r12), %ymm1, %ymm1 vpaddw 11552(%r12), %ymm1, %ymm1 vmovdqa 10688(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11840(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11264(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10400(%r12), %ymm8 vmovdqa 11264(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vmovdqa 256(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm10 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm9 vpaddd %ymm6, %ymm4, %ymm2 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm4 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm10, %ymm10 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm10, %ymm2, %ymm2 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm9, %ymm2 vmovdqa 768(%r8), %ymm9 vpaddw 1024(%r8), %ymm9, %ymm10 vpsubw 1024(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vmovdqa 1280(%r8), %ymm4 vpsubw %ymm11, %ymm4, %ymm4 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpmullw %ymm12, %ymm10, %ymm4 vpaddw %ymm4, %ymm2, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm10, %ymm10 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm3, 2048(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm3 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm4, %ymm4 vpaddw 2304(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm3, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2560(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm3, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 32(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 384(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 736(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1088(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm4 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm10, %ymm10 vmovdqa 288(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm2 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm3 vpaddd %ymm6, %ymm9, %ymm8 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm2, %ymm2 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm3, %ymm8 vmovdqa 800(%r8), %ymm3 vpaddw 1056(%r8), %ymm3, %ymm2 vpsubw 1056(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1312(%r8), %ymm9 vpsubw %ymm5, %ymm9, %ymm9 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm9 vpaddw %ymm9, %ymm8, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm10 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm2, %ymm2 vpaddw 2080(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm10, 2080(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm10 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw 2336(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm10, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm10 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm11, %ymm11 vpaddw 2592(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm10, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 120(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 472(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 824(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1176(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm2, %ymm2 vmovdqa 320(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm8 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm10 vpaddd %ymm6, %ymm3, %ymm4 vpsubd %ymm9, %ymm10, %ymm10 vpsubd %ymm2, %ymm4, %ymm4 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm8, %ymm8 vpsubd %ymm3, %ymm10, %ymm10 vpsubd %ymm8, %ymm4, %ymm4 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm10, %ymm4 vmovdqa 832(%r8), %ymm10 vpaddw 1088(%r8), %ymm10, %ymm8 vpsubw 1088(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vmovdqa 1344(%r8), %ymm3 vpsubw %ymm11, %ymm3, %ymm3 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm3, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpmullw %ymm12, %ymm8, %ymm3 vpaddw %ymm3, %ymm4, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm2 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm8, %ymm8 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm2, 2112(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm3, %ymm3 vpaddw 2368(%r8), %ymm6, %ymm6 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm2, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm5, %ymm5 vpaddw 2624(%r8), %ymm4, %ymm4 vpaddw %ymm5, %ymm4, %ymm4 vmovdqa %xmm2, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 208(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 560(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 912(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1264(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm8, %ymm8 vmovdqa 352(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm4 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm4, %ymm2 vpaddd %ymm6, %ymm10, %ymm9 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm11, %ymm4, %ymm11 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm4 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm4, %ymm4 vpsubd %ymm10, %ymm2, %ymm2 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm2, %ymm9 vmovdqa 864(%r8), %ymm2 vpaddw 1120(%r8), %ymm2, %ymm4 vpsubw 1120(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpsllw $7, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1376(%r8), %ymm10 vpsubw %ymm5, %ymm10, %ymm10 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm10, %ymm8 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm10 vpaddw %ymm10, %ymm9, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm8, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm8 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm4, %ymm4 vpaddw 2144(%r8), %ymm5, %ymm5 vpaddw %ymm4, %ymm5, %ymm5 vmovdqa %xmm8, 2144(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm8 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm10, %ymm10 vpaddw 2400(%r8), %ymm6, %ymm6 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm8, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm11, %ymm11 vpaddw 2656(%r8), %ymm9, %ymm9 vpaddw %ymm11, %ymm9, %ymm9 vmovdqa %xmm8, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 296(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 648(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1000(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 1352(%rdi) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm4 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm4, %ymm4 vmovdqa 384(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm9 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm9, %ymm8 vpaddd %ymm6, %ymm2, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm5, %ymm9, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm9 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm9, %ymm9 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm8, %ymm3 vmovdqa 896(%r8), %ymm8 vpaddw 1152(%r8), %ymm8, %ymm9 vpsubw 1152(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpsllw $7, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vmovdqa 1408(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpmullw %ymm12, %ymm9, %ymm2 vpaddw %ymm2, %ymm3, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vmovdqu 384(%rdi), %ymm4 vmovdqu 736(%rdi), %ymm10 vmovdqu 1088(%rdi), %ymm7 vpaddw %ymm11, %ymm4, %ymm11 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm3, %ymm7, %ymm3 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm7 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm8, %ymm8 vmovdqu 32(%rdi), %ymm10 vpaddw 1920(%r8), %ymm10, %ymm10 vpaddw %ymm8, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 32(%rdi) vmovdqa %xmm7, 1920(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm7 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm9, %ymm9 vpaddw 2176(%r8), %ymm11, %ymm11 vpaddw %ymm9, %ymm11, %ymm11 vmovdqa %xmm7, 2176(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm7 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm2, %ymm2 vpaddw 2432(%r8), %ymm6, %ymm6 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm7, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm7 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw 2688(%r8), %ymm3, %ymm3 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm7, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 384(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 736(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1088(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm9 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm9, %ymm9 vmovdqa 416(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm3 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm7 vpaddd %ymm6, %ymm8, %ymm10 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm9, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vpsubd %ymm8, %ymm7, %ymm7 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm7, %ymm10 vmovdqa 928(%r8), %ymm7 vpaddw 1184(%r8), %ymm7, %ymm3 vpsubw 1184(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1440(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm8 vpaddw %ymm8, %ymm10, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vmovdqu 472(%rdi), %ymm9 vmovdqu 824(%rdi), %ymm2 vmovdqu 1176(%rdi), %ymm4 vpaddw %ymm5, %ymm9, %ymm5 vpaddw %ymm6, %ymm2, %ymm6 vpaddw %ymm10, %ymm4, %ymm10 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm4 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm7, %ymm7 vmovdqu 120(%rdi), %ymm2 vpaddw 1952(%r8), %ymm2, %ymm2 vpaddw %ymm7, %ymm2, %ymm2 vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 120(%rdi) vmovdqa %xmm4, 1952(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm4 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm3, %ymm3 vpaddw 2208(%r8), %ymm5, %ymm5 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm4, 2208(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw 2464(%r8), %ymm6, %ymm6 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm4, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm4 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw 2720(%r8), %ymm10, %ymm10 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm4, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 472(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 824(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1176(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vmovdqa 448(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm10 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm4 vpaddd %ymm6, %ymm7, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm10, %ymm10 vpsubd %ymm7, %ymm4, %ymm4 vpsubd %ymm10, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm4, %ymm2 vmovdqa 960(%r8), %ymm4 vpaddw 1216(%r8), %ymm4, %ymm10 vpsubw 1216(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm7 vpsubw %ymm7, %ymm10, %ymm7 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm7, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vmovdqa 1472(%r8), %ymm7 vpsubw %ymm11, %ymm7, %ymm7 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpmullw %ymm12, %ymm10, %ymm7 vpaddw %ymm7, %ymm2, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vmovdqu 560(%rdi), %ymm3 vmovdqu 912(%rdi), %ymm8 vmovdqu 1264(%rdi), %ymm9 vpaddw %ymm11, %ymm3, %ymm11 vpaddw %ymm6, %ymm8, %ymm6 vpaddw %ymm2, %ymm9, %ymm2 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm4, %ymm4 vmovdqu 208(%rdi), %ymm8 vpaddw 1984(%r8), %ymm8, %ymm8 vpaddw %ymm4, %ymm8, %ymm8 vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 208(%rdi) vmovdqa %xmm9, 1984(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm9 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm10, %ymm10 vpaddw 2240(%r8), %ymm11, %ymm11 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm9, 2240(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm9 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm7, %ymm7 vpaddw 2496(%r8), %ymm6, %ymm6 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm9, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm9 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2752(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm9, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 560(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 912(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 1264(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm10, %ymm10 vmovdqa 480(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm2 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm9 vpaddd %ymm6, %ymm4, %ymm8 vpsubd %ymm7, %ymm9, %ymm9 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm9, %ymm8 vmovdqa 992(%r8), %ymm9 vpaddw 1248(%r8), %ymm9, %ymm2 vpsubw 1248(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1504(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm4 vpaddw %ymm4, %ymm8, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vmovdqu 648(%rdi), %ymm10 vmovdqu 1000(%rdi), %ymm7 vmovdqu 1352(%rdi), %ymm3 vpaddw %ymm5, %ymm10, %ymm5 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm8, %ymm3, %ymm8 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm9, %ymm9 vmovdqu 296(%rdi), %ymm7 vpaddw 2016(%r8), %ymm7, %ymm7 vpaddw %ymm9, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 296(%rdi) vmovdqa %xmm3, 2016(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm3 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm2, %ymm2 vpaddw 2272(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm3, 2272(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm3 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw 2528(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm3, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm3 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm11, %ymm11 vpaddw 2784(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm3, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 648(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 1000(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 1352(%rdi) vmovdqa 160(%r12), %ymm0 vpsubw 256(%r12), %ymm0, %ymm0 vmovdqa 544(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 352(%r12), %ymm1, %ymm1 vpsubw 64(%r12), %ymm0, %ymm0 vpaddw 448(%r12), %ymm0, %ymm0 vmovdqa 736(%r12), %ymm2 vpsubw 832(%r12), %ymm2, %ymm2 vmovdqa 1120(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 928(%r12), %ymm3, %ymm3 vpsubw 640(%r12), %ymm2, %ymm2 vpaddw 1024(%r12), %ymm2, %ymm2 vmovdqa 1312(%r12), %ymm4 vpsubw 1408(%r12), %ymm4, %ymm4 vmovdqa 1696(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1504(%r12), %ymm5, %ymm5 vpsubw 1216(%r12), %ymm4, %ymm4 vpaddw 1600(%r12), %ymm4, %ymm4 vpsubw 640(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 64(%r12), %ymm1, %ymm1 vpaddw 1216(%r12), %ymm1, %ymm1 vmovdqa 352(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1504(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 928(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 64(%r12), %ymm8 vmovdqa 928(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1888(%r12), %ymm0 vpsubw 1984(%r12), %ymm0, %ymm0 vmovdqa 2272(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2080(%r12), %ymm1, %ymm1 vpsubw 1792(%r12), %ymm0, %ymm0 vpaddw 2176(%r12), %ymm0, %ymm0 vmovdqa 2464(%r12), %ymm2 vpsubw 2560(%r12), %ymm2, %ymm2 vmovdqa 2848(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2656(%r12), %ymm3, %ymm3 vpsubw 2368(%r12), %ymm2, %ymm2 vpaddw 2752(%r12), %ymm2, %ymm2 vmovdqa 3040(%r12), %ymm4 vpsubw 3136(%r12), %ymm4, %ymm4 vmovdqa 3424(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3232(%r12), %ymm5, %ymm5 vpsubw 2944(%r12), %ymm4, %ymm4 vpaddw 3328(%r12), %ymm4, %ymm4 vpsubw 2368(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1792(%r12), %ymm1, %ymm1 vpaddw 2944(%r12), %ymm1, %ymm1 vmovdqa 2080(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3232(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2656(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1792(%r12), %ymm8 vmovdqa 2656(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3616(%r12), %ymm0 vpsubw 3712(%r12), %ymm0, %ymm0 vmovdqa 4000(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3808(%r12), %ymm1, %ymm1 vpsubw 3520(%r12), %ymm0, %ymm0 vpaddw 3904(%r12), %ymm0, %ymm0 vmovdqa 4192(%r12), %ymm2 vpsubw 4288(%r12), %ymm2, %ymm2 vmovdqa 4576(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4384(%r12), %ymm3, %ymm3 vpsubw 4096(%r12), %ymm2, %ymm2 vpaddw 4480(%r12), %ymm2, %ymm2 vmovdqa 4768(%r12), %ymm4 vpsubw 4864(%r12), %ymm4, %ymm4 vmovdqa 5152(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4960(%r12), %ymm5, %ymm5 vpsubw 4672(%r12), %ymm4, %ymm4 vpaddw 5056(%r12), %ymm4, %ymm4 vpsubw 4096(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3520(%r12), %ymm1, %ymm1 vpaddw 4672(%r12), %ymm1, %ymm1 vmovdqa 3808(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4960(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4384(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3520(%r12), %ymm8 vmovdqa 4384(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5344(%r12), %ymm0 vpsubw 5440(%r12), %ymm0, %ymm0 vmovdqa 5728(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5536(%r12), %ymm1, %ymm1 vpsubw 5248(%r12), %ymm0, %ymm0 vpaddw 5632(%r12), %ymm0, %ymm0 vmovdqa 5920(%r12), %ymm2 vpsubw 6016(%r12), %ymm2, %ymm2 vmovdqa 6304(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6112(%r12), %ymm3, %ymm3 vpsubw 5824(%r12), %ymm2, %ymm2 vpaddw 6208(%r12), %ymm2, %ymm2 vmovdqa 6496(%r12), %ymm4 vpsubw 6592(%r12), %ymm4, %ymm4 vmovdqa 6880(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6688(%r12), %ymm5, %ymm5 vpsubw 6400(%r12), %ymm4, %ymm4 vpaddw 6784(%r12), %ymm4, %ymm4 vpsubw 5824(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5248(%r12), %ymm1, %ymm1 vpaddw 6400(%r12), %ymm1, %ymm1 vmovdqa 5536(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6688(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6112(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5248(%r12), %ymm8 vmovdqa 6112(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7072(%r12), %ymm0 vpsubw 7168(%r12), %ymm0, %ymm0 vmovdqa 7456(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7264(%r12), %ymm1, %ymm1 vpsubw 6976(%r12), %ymm0, %ymm0 vpaddw 7360(%r12), %ymm0, %ymm0 vmovdqa 7648(%r12), %ymm2 vpsubw 7744(%r12), %ymm2, %ymm2 vmovdqa 8032(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7840(%r12), %ymm3, %ymm3 vpsubw 7552(%r12), %ymm2, %ymm2 vpaddw 7936(%r12), %ymm2, %ymm2 vmovdqa 8224(%r12), %ymm4 vpsubw 8320(%r12), %ymm4, %ymm4 vmovdqa 8608(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8416(%r12), %ymm5, %ymm5 vpsubw 8128(%r12), %ymm4, %ymm4 vpaddw 8512(%r12), %ymm4, %ymm4 vpsubw 7552(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6976(%r12), %ymm1, %ymm1 vpaddw 8128(%r12), %ymm1, %ymm1 vmovdqa 7264(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8416(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7840(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6976(%r12), %ymm8 vmovdqa 7840(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8800(%r12), %ymm0 vpsubw 8896(%r12), %ymm0, %ymm0 vmovdqa 9184(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8992(%r12), %ymm1, %ymm1 vpsubw 8704(%r12), %ymm0, %ymm0 vpaddw 9088(%r12), %ymm0, %ymm0 vmovdqa 9376(%r12), %ymm2 vpsubw 9472(%r12), %ymm2, %ymm2 vmovdqa 9760(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9568(%r12), %ymm3, %ymm3 vpsubw 9280(%r12), %ymm2, %ymm2 vpaddw 9664(%r12), %ymm2, %ymm2 vmovdqa 9952(%r12), %ymm4 vpsubw 10048(%r12), %ymm4, %ymm4 vmovdqa 10336(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10144(%r12), %ymm5, %ymm5 vpsubw 9856(%r12), %ymm4, %ymm4 vpaddw 10240(%r12), %ymm4, %ymm4 vpsubw 9280(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8704(%r12), %ymm1, %ymm1 vpaddw 9856(%r12), %ymm1, %ymm1 vmovdqa 8992(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10144(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9568(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8704(%r12), %ymm8 vmovdqa 9568(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10528(%r12), %ymm0 vpsubw 10624(%r12), %ymm0, %ymm0 vmovdqa 10912(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10720(%r12), %ymm1, %ymm1 vpsubw 10432(%r12), %ymm0, %ymm0 vpaddw 10816(%r12), %ymm0, %ymm0 vmovdqa 11104(%r12), %ymm2 vpsubw 11200(%r12), %ymm2, %ymm2 vmovdqa 11488(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11296(%r12), %ymm3, %ymm3 vpsubw 11008(%r12), %ymm2, %ymm2 vpaddw 11392(%r12), %ymm2, %ymm2 vmovdqa 11680(%r12), %ymm4 vpsubw 11776(%r12), %ymm4, %ymm4 vmovdqa 12064(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11872(%r12), %ymm5, %ymm5 vpsubw 11584(%r12), %ymm4, %ymm4 vpaddw 11968(%r12), %ymm4, %ymm4 vpsubw 11008(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10432(%r12), %ymm1, %ymm1 vpaddw 11584(%r12), %ymm1, %ymm1 vmovdqa 10720(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11872(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11296(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10432(%r12), %ymm8 vmovdqa 11296(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vmovdqa 256(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm8 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm3 vpaddd %ymm6, %ymm9, %ymm7 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm8, %ymm7, %ymm7 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm3, %ymm7 vmovdqa 768(%r8), %ymm3 vpaddw 1024(%r8), %ymm3, %ymm8 vpsubw 1024(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vmovdqa 1280(%r8), %ymm9 vpsubw %ymm11, %ymm9, %ymm9 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpmullw %ymm12, %ymm8, %ymm9 vpaddw %ymm9, %ymm7, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm2 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm8, %ymm8 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm2, 2048(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw 2304(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm2, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw 2560(%r8), %ymm7, %ymm7 vpaddw %ymm5, %ymm7, %ymm7 vmovdqa %xmm2, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 64(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 80(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 416(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 432(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 768(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 784(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %xmm3, 1120(%rdi) vextracti128 $1, %ymm3, %xmm3 vmovq %xmm3, 1136(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm8, %ymm8 vmovdqa 288(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm7 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm7, %ymm2 vpaddd %ymm6, %ymm3, %ymm4 vpsubd %ymm9, %ymm2, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm11, %ymm7, %ymm11 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm3 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm7, %ymm7 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm7, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm2, %ymm4 vmovdqa 800(%r8), %ymm2 vpaddw 1056(%r8), %ymm2, %ymm7 vpsubw 1056(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpsllw $7, %ymm11, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vmovdqa 1312(%r8), %ymm3 vpsubw %ymm5, %ymm3, %ymm3 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpmullw %ymm12, %ymm7, %ymm3 vpaddw %ymm3, %ymm4, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm8 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm7, %ymm7 vpaddw 2080(%r8), %ymm5, %ymm5 vpaddw %ymm7, %ymm5, %ymm5 vmovdqa %xmm8, 2080(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm8 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm3, %ymm3 vpaddw 2336(%r8), %ymm6, %ymm6 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm8, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm11, %ymm11 vpaddw 2592(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm8, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 152(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 168(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 504(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 520(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 856(%rdi) vextracti128 $1, %ymm4, %xmm4 vmovq %xmm4, 872(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %xmm2, 1208(%rdi) vextracti128 $1, %ymm2, %xmm2 vmovq %xmm2, 1224(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm3 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm7, %ymm7 vmovdqa 320(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm4 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm4, %ymm8 vpaddd %ymm6, %ymm2, %ymm9 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm7, %ymm9, %ymm9 vpsubd %ymm5, %ymm4, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm4, %ymm4 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm8, %ymm9 vmovdqa 832(%r8), %ymm8 vpaddw 1088(%r8), %ymm8, %ymm4 vpsubw 1088(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsllw $7, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1344(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm7 vpsubw %ymm7, %ymm2, %ymm7 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm2 vpaddw %ymm2, %ymm9, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm7, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm7 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm4, %ymm4 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw %ymm4, %ymm11, %ymm11 vmovdqa %xmm7, 2112(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_4_3_1(%rip), %ymm2, %ymm7 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm2, %ymm2 vpaddw 2368(%r8), %ymm6, %ymm6 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm7, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm7 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm5, %ymm5 vpaddw 2624(%r8), %ymm9, %ymm9 vpaddw %ymm5, %ymm9, %ymm9 vmovdqa %xmm7, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 240(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 256(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 592(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 608(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %xmm9, 944(%rdi) vextracti128 $1, %ymm9, %xmm9 vmovq %xmm9, 960(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 1296(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 1312(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm4, %ymm4 vmovdqa 352(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm9 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm9, %ymm7 vpaddd %ymm6, %ymm8, %ymm3 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm11, %ymm9, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm9, %ymm9 vpsubd %ymm8, %ymm7, %ymm7 vpsubd %ymm9, %ymm3, %ymm3 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm7, %ymm3 vmovdqa 864(%r8), %ymm7 vpaddw 1120(%r8), %ymm7, %ymm9 vpsubw 1120(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpsllw $7, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vmovdqa 1376(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm4 vpsubw %ymm4, %ymm8, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpmullw %ymm12, %ymm9, %ymm8 vpaddw %ymm8, %ymm3, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm4 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm9, %ymm9 vpaddw 2144(%r8), %ymm5, %ymm5 vpaddw %ymm9, %ymm5, %ymm5 vmovdqa %xmm4, 2144(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw 2400(%r8), %ymm6, %ymm6 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm4, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm4 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw 2656(%r8), %ymm3, %ymm3 vpaddw %ymm11, %ymm3, %ymm3 vmovdqa %xmm4, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 328(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 344(%rdi) vpshufb shufmin1_mask3(%rip), %ymm5, %ymm5 vmovdqa %xmm5, 1792(%r8) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 680(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 696(%rdi) vpshufb shufmin1_mask3(%rip), %ymm6, %ymm6 vmovdqa %xmm6, 1824(%r8) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %xmm3, 1032(%rdi) vextracti128 $1, %ymm3, %xmm3 vmovq %xmm3, 1048(%rdi) vpshufb shufmin1_mask3(%rip), %ymm3, %ymm3 vmovdqa %xmm3, 1856(%r8) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 1384(%rdi) vextracti128 $1, %ymm7, %xmm7 vpextrw $0, %xmm7, 1400(%rdi) vpshufb shufmin1_mask3(%rip), %ymm7, %ymm7 vmovdqa %xmm7, 1888(%r8) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm9, %ymm9 vmovdqa 384(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm3 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm3, %ymm4 vpaddd %ymm6, %ymm7, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm9, %ymm2, %ymm2 vpsubd %ymm5, %ymm3, %ymm5 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm3 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm3, %ymm3 vpsubd %ymm7, %ymm4, %ymm4 vpsubd %ymm3, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm4, %ymm2 vmovdqa 896(%r8), %ymm4 vpaddw 1152(%r8), %ymm4, %ymm3 vpsubw 1152(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpsllw $7, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vmovdqa 1408(%r8), %ymm7 vpsubw %ymm11, %ymm7, %ymm7 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm7, %ymm9 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm2 vpmullw %ymm12, %ymm3, %ymm7 vpaddw %ymm7, %ymm2, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm9, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vmovdqu 416(%rdi), %ymm9 vmovdqu 768(%rdi), %ymm8 vmovdqu 1120(%rdi), %ymm10 vpaddw %ymm11, %ymm9, %ymm11 vpaddw %ymm6, %ymm8, %ymm6 vpaddw %ymm2, %ymm10, %ymm2 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm10 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm4, %ymm4 vmovdqu 64(%rdi), %ymm8 vpaddw 1920(%r8), %ymm8, %ymm8 vpaddw %ymm4, %ymm8, %ymm8 vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 64(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 80(%rdi) vmovdqa %xmm10, 1920(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm10 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm3, %ymm3 vpaddw 2176(%r8), %ymm11, %ymm11 vpaddw %ymm3, %ymm11, %ymm11 vmovdqa %xmm10, 2176(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm10 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm7, %ymm7 vpaddw 2432(%r8), %ymm6, %ymm6 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm10, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm10 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2688(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm10, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 416(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 432(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 768(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 784(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %xmm2, 1120(%rdi) vextracti128 $1, %ymm2, %xmm2 vmovq %xmm2, 1136(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm3 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm3, %ymm3 vmovdqa 416(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm2 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm10 vpaddd %ymm6, %ymm4, %ymm8 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vpsubd %ymm4, %ymm10, %ymm10 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm10, %ymm8 vmovdqa 928(%r8), %ymm10 vpaddw 1184(%r8), %ymm10, %ymm2 vpsubw 1184(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1440(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm4 vpaddw %ymm4, %ymm8, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm10, %ymm4, %ymm4 vpsubw %ymm4, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vmovdqu 504(%rdi), %ymm3 vmovdqu 856(%rdi), %ymm7 vmovdqu 1208(%rdi), %ymm9 vpaddw %ymm5, %ymm3, %ymm5 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm8, %ymm9, %ymm8 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_4_3_1(%rip), %ymm10, %ymm9 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm10, %ymm10 vmovdqu 152(%rdi), %ymm7 vpaddw 1952(%r8), %ymm7, %ymm7 vpaddw %ymm10, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 152(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 168(%rdi) vmovdqa %xmm9, 1952(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_4_3_1(%rip), %ymm2, %ymm9 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm2, %ymm2 vpaddw 2208(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm9, 2208(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw 2464(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm9, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm9 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm11, %ymm11 vpaddw 2720(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm9, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 504(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 520(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 856(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 872(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 1208(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 1224(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vmovdqa 448(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm8 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm9 vpaddd %ymm6, %ymm10, %ymm7 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm10 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm8, %ymm8 vpsubd %ymm10, %ymm9, %ymm9 vpsubd %ymm8, %ymm7, %ymm7 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm9, %ymm7 vmovdqa 960(%r8), %ymm9 vpaddw 1216(%r8), %ymm9, %ymm8 vpsubw 1216(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm11, %ymm10 vpsubw %ymm10, %ymm8, %ymm10 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm10, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vmovdqa 1472(%r8), %ymm10 vpsubw %ymm11, %ymm10, %ymm10 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm10, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpmullw %ymm12, %ymm8, %ymm10 vpaddw %ymm10, %ymm7, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm9, %ymm10, %ymm10 vpsubw %ymm10, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vmovdqu 592(%rdi), %ymm2 vmovdqu 944(%rdi), %ymm4 vmovdqu 1296(%rdi), %ymm3 vpaddw %ymm11, %ymm2, %ymm11 vpaddw %ymm6, %ymm4, %ymm6 vpaddw %ymm7, %ymm3, %ymm7 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm9, %ymm9 vmovdqu 240(%rdi), %ymm4 vpaddw 1984(%r8), %ymm4, %ymm4 vpaddw %ymm9, %ymm4, %ymm4 vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 240(%rdi) vextracti128 $1, %ymm4, %xmm4 vmovq %xmm4, 256(%rdi) vmovdqa %xmm3, 1984(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm3 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm8, %ymm8 vpaddw 2240(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm3, 2240(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_4_3_1(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm10, %ymm10 vpaddw 2496(%r8), %ymm6, %ymm6 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm3, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw 2752(%r8), %ymm7, %ymm7 vpaddw %ymm5, %ymm7, %ymm7 vmovdqa %xmm3, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 592(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 608(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 944(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 960(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 1296(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 1312(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm10 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm8, %ymm8 vmovdqa 480(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm7 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm7, %ymm3 vpaddd %ymm6, %ymm9, %ymm4 vpsubd %ymm10, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm11, %ymm7, %ymm11 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm7, %ymm7 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm7, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm3, %ymm4 vmovdqa 992(%r8), %ymm3 vpaddw 1248(%r8), %ymm3, %ymm7 vpsubw 1248(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm9 vpsubw %ymm9, %ymm7, %ymm9 vpsllw $7, %ymm11, %ymm7 vpsubw %ymm7, %ymm9, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vmovdqa 1504(%r8), %ymm9 vpsubw %ymm5, %ymm9, %ymm9 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpmullw %ymm12, %ymm7, %ymm9 vpaddw %ymm9, %ymm4, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vextracti128 $1, %ymm4, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2816(%r8) vextracti128 $1, %ymm3, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2848(%r8) vextracti128 $1, %ymm7, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2880(%r8) vmovdqu 680(%rdi), %ymm8 vmovdqu 1032(%rdi), %ymm10 # Only 18 bytes can be read at 1384, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rdi), %r9 movq %r9, -32(%rsp) movq 1384+8(%rdi), %r9 movq %r9, -24(%rsp) movw 1384+16(%rdi), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm2 vpaddw %ymm5, %ymm8, %ymm5 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm4, %ymm2, %ymm4 vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm3, %ymm3 vmovdqu 328(%rdi), %ymm10 vpaddw 2016(%r8), %ymm10, %ymm10 vpaddw %ymm3, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %xmm10, 328(%rdi) vextracti128 $1, %ymm10, %xmm10 vmovq %xmm10, 344(%rdi) vpshufb shufmin1_mask3(%rip), %ymm10, %ymm10 vmovdqa %xmm10, 1792(%r8) vmovdqa %xmm2, 2016(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm2 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw 2272(%r8), %ymm5, %ymm5 vpaddw %ymm7, %ymm5, %ymm5 vmovdqa %xmm2, 2272(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm9, %ymm9 vpaddw 2528(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm2, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm2 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm11, %ymm11 vpaddw 2784(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm2, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 680(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 696(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 1032(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 1048(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 1384(%rdi) vextracti128 $1, %ymm4, %xmm4 vpextrw $0, %xmm4, 1400(%rdi) vmovdqu 0(%rdi), %ymm11 vpaddw 1888(%r8), %ymm11, %ymm11 vpaddw 2816(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 0(%rdi) vmovdqu 352(%rdi), %ymm11 vpaddw 2528(%r8), %ymm11, %ymm11 vpaddw 2848(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vmovdqu 704(%rdi), %ymm11 vpaddw 2784(%r8), %ymm11, %ymm11 vpaddw 2880(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 704(%rdi) vmovdqu 88(%rdi), %ymm11 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw 1920(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 88(%rdi) vmovdqu 440(%rdi), %ymm11 vpaddw 2304(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 440(%rdi) vmovdqu 792(%rdi), %ymm11 vpaddw 2560(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 792(%rdi) vmovdqu 176(%rdi), %ymm11 vpaddw 2080(%r8), %ymm11, %ymm11 vpaddw 1952(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 176(%rdi) vmovdqu 528(%rdi), %ymm11 vpaddw 2336(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vmovdqu 880(%rdi), %ymm11 vpaddw 2592(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 880(%rdi) vmovdqu 264(%rdi), %ymm11 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw 1984(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 264(%rdi) vmovdqu 616(%rdi), %ymm11 vpaddw 2368(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 616(%rdi) vmovdqu 968(%rdi), %ymm11 vpaddw 2624(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 968(%rdi) vmovdqu 352(%rdi), %ymm11 vpaddw 2144(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vmovdqu 704(%rdi), %ymm11 vpaddw 2400(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 704(%rdi) vmovdqu 1056(%rdi), %ymm11 vpaddw 2656(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1056(%rdi) vmovdqu 440(%rdi), %ymm11 vpaddw 2176(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 440(%rdi) vmovdqu 792(%rdi), %ymm11 vpaddw 2432(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 792(%rdi) vmovdqu 1144(%rdi), %ymm11 vpaddw 2688(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1144(%rdi) vmovdqu 528(%rdi), %ymm11 vpaddw 2208(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vmovdqu 880(%rdi), %ymm11 vpaddw 2464(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 880(%rdi) vmovdqu 1232(%rdi), %ymm11 vpaddw 2720(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1232(%rdi) vmovdqu 616(%rdi), %ymm11 vpaddw 2240(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 616(%rdi) vmovdqu 968(%rdi), %ymm11 vpaddw 2496(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 968(%rdi) vmovdqu 1320(%rdi), %ymm11 vpaddw 2752(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1320(%rdi) pop %r12 .cfi_restore r12 pop %rbp .cfi_restore rbp .cfi_def_cfa_register rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .size poly_Rq_mul,.-poly_Rq_mul #endif #endif // defined(__x86_64__) && defined(__linux__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/hrss/hrss.cc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" #if defined(OPENSSL_SSE2) #include #endif #if (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) && defined(__ARM_NEON) #include #endif // This is an implementation of [HRSS], but with a KEM transformation based on // [SXY]. The primary references are: // HRSS: https://eprint.iacr.org/2017/667.pdf // HRSSNIST: // https://csrc.nist.gov/CSRC/media/Projects/Post-Quantum-Cryptography/documents/round-1/submissions/NTRU_HRSS_KEM.zip // SXY: https://eprint.iacr.org/2017/1005.pdf // NTRUTN14: // https://assets.onboardsecurity.com/static/downloads/NTRU/resources/NTRUTech014.pdf // NTRUCOMP: https://eprint.iacr.org/2018/1174 // SAFEGCD: https://gcd.cr.yp.to/papers.html#safegcd // Vector operations. // // A couple of functions in this file can use vector operations to meaningful // effect. If we're building for a target that has a supported vector unit, // |HRSS_HAVE_VECTOR_UNIT| will be defined and |vec_t| will be typedefed to a // 128-bit vector. The following functions abstract over the differences between // NEON and SSE2 for implementing some vector operations. // TODO: MSVC can likely also be made to work with vector operations, but ^ must // be replaced with _mm_xor_si128, etc. #if defined(OPENSSL_SSE2) && (defined(__clang__) || !defined(_MSC_VER)) #define HRSS_HAVE_VECTOR_UNIT typedef __m128i vec_t; // vec_capable returns one iff the current platform supports SSE2. static int vec_capable(void) { return 1; } // vec_add performs a pair-wise addition of four uint16s from |a| and |b|. static inline vec_t vec_add(vec_t a, vec_t b) { return _mm_add_epi16(a, b); } // vec_sub performs a pair-wise subtraction of four uint16s from |a| and |b|. static inline vec_t vec_sub(vec_t a, vec_t b) { return _mm_sub_epi16(a, b); } // vec_mul multiplies each uint16_t in |a| by |b| and returns the resulting // vector. static inline vec_t vec_mul(vec_t a, uint16_t b) { return _mm_mullo_epi16(a, _mm_set1_epi16(b)); } // vec_fma multiplies each uint16_t in |b| by |c|, adds the result to |a|, and // returns the resulting vector. static inline vec_t vec_fma(vec_t a, vec_t b, uint16_t c) { return _mm_add_epi16(a, _mm_mullo_epi16(b, _mm_set1_epi16(c))); } // vec3_rshift_word right-shifts the 24 uint16_t's in |v| by one uint16. static inline void vec3_rshift_word(vec_t v[3]) { // Intel's left and right shifting is backwards compared to the order in // memory because they're based on little-endian order of words (and not just // bytes). So the shifts in this function will be backwards from what one // might expect. const __m128i carry0 = _mm_srli_si128(v[0], 14); v[0] = _mm_slli_si128(v[0], 2); const __m128i carry1 = _mm_srli_si128(v[1], 14); v[1] = _mm_slli_si128(v[1], 2); v[1] |= carry0; v[2] = _mm_slli_si128(v[2], 2); v[2] |= carry1; } // vec4_rshift_word right-shifts the 32 uint16_t's in |v| by one uint16. static inline void vec4_rshift_word(vec_t v[4]) { // Intel's left and right shifting is backwards compared to the order in // memory because they're based on little-endian order of words (and not just // bytes). So the shifts in this function will be backwards from what one // might expect. const __m128i carry0 = _mm_srli_si128(v[0], 14); v[0] = _mm_slli_si128(v[0], 2); const __m128i carry1 = _mm_srli_si128(v[1], 14); v[1] = _mm_slli_si128(v[1], 2); v[1] |= carry0; const __m128i carry2 = _mm_srli_si128(v[2], 14); v[2] = _mm_slli_si128(v[2], 2); v[2] |= carry1; v[3] = _mm_slli_si128(v[3], 2); v[3] |= carry2; } // vec_merge_3_5 takes the final three uint16_t's from |left|, appends the first // five from |right|, and returns the resulting vector. static inline vec_t vec_merge_3_5(vec_t left, vec_t right) { return _mm_srli_si128(left, 10) | _mm_slli_si128(right, 6); } // poly3_vec_lshift1 left-shifts the 768 bits in |a_s|, and in |a_a|, by one // bit. static inline void poly3_vec_lshift1(vec_t a_s[6], vec_t a_a[6]) { vec_t carry_s = {0}; vec_t carry_a = {0}; for (int i = 0; i < 6; i++) { vec_t next_carry_s = _mm_srli_epi64(a_s[i], 63); a_s[i] = _mm_slli_epi64(a_s[i], 1); a_s[i] |= _mm_slli_si128(next_carry_s, 8); a_s[i] |= carry_s; carry_s = _mm_srli_si128(next_carry_s, 8); vec_t next_carry_a = _mm_srli_epi64(a_a[i], 63); a_a[i] = _mm_slli_epi64(a_a[i], 1); a_a[i] |= _mm_slli_si128(next_carry_a, 8); a_a[i] |= carry_a; carry_a = _mm_srli_si128(next_carry_a, 8); } } // poly3_vec_rshift1 right-shifts the 768 bits in |a_s|, and in |a_a|, by one // bit. static inline void poly3_vec_rshift1(vec_t a_s[6], vec_t a_a[6]) { vec_t carry_s = {0}; vec_t carry_a = {0}; for (int i = 5; i >= 0; i--) { const vec_t next_carry_s = _mm_slli_epi64(a_s[i], 63); a_s[i] = _mm_srli_epi64(a_s[i], 1); a_s[i] |= _mm_srli_si128(next_carry_s, 8); a_s[i] |= carry_s; carry_s = _mm_slli_si128(next_carry_s, 8); const vec_t next_carry_a = _mm_slli_epi64(a_a[i], 63); a_a[i] = _mm_srli_epi64(a_a[i], 1); a_a[i] |= _mm_srli_si128(next_carry_a, 8); a_a[i] |= carry_a; carry_a = _mm_slli_si128(next_carry_a, 8); } } // vec_broadcast_bit duplicates the least-significant bit in |a| to all bits in // a vector and returns the result. static inline vec_t vec_broadcast_bit(vec_t a) { return _mm_shuffle_epi32(_mm_srai_epi32(_mm_slli_epi64(a, 63), 31), 0b01010101); } // vec_get_word returns the |i|th uint16_t in |v|. (This is a macro because the // compiler requires that |i| be a compile-time constant.) #define vec_get_word(v, i) _mm_extract_epi16(v, i) #elif (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) && defined(__ARM_NEON) #define HRSS_HAVE_VECTOR_UNIT typedef uint16x8_t vec_t; // These functions perform the same actions as the SSE2 function of the same // name, above. static int vec_capable(void) { return CRYPTO_is_NEON_capable(); } static inline vec_t vec_add(vec_t a, vec_t b) { return a + b; } static inline vec_t vec_sub(vec_t a, vec_t b) { return a - b; } static inline vec_t vec_mul(vec_t a, uint16_t b) { return vmulq_n_u16(a, b); } static inline vec_t vec_fma(vec_t a, vec_t b, uint16_t c) { return vmlaq_n_u16(a, b, c); } static inline void vec3_rshift_word(vec_t v[3]) { const uint16x8_t kZero = {0}; v[2] = vextq_u16(v[1], v[2], 7); v[1] = vextq_u16(v[0], v[1], 7); v[0] = vextq_u16(kZero, v[0], 7); } static inline void vec4_rshift_word(vec_t v[4]) { const uint16x8_t kZero = {0}; v[3] = vextq_u16(v[2], v[3], 7); v[2] = vextq_u16(v[1], v[2], 7); v[1] = vextq_u16(v[0], v[1], 7); v[0] = vextq_u16(kZero, v[0], 7); } static inline vec_t vec_merge_3_5(vec_t left, vec_t right) { return vextq_u16(left, right, 5); } static inline uint16_t vec_get_word(vec_t v, unsigned i) { return v[i]; } #if !defined(OPENSSL_AARCH64) static inline vec_t vec_broadcast_bit(vec_t a) { a = (vec_t)vshrq_n_s16(((int16x8_t)a) << 15, 15); return vdupq_lane_u16(vget_low_u16(a), 0); } static inline void poly3_vec_lshift1(vec_t a_s[6], vec_t a_a[6]) { vec_t carry_s = {0}; vec_t carry_a = {0}; const vec_t kZero = {0}; for (int i = 0; i < 6; i++) { vec_t next_carry_s = a_s[i] >> 15; a_s[i] <<= 1; a_s[i] |= vextq_u16(kZero, next_carry_s, 7); a_s[i] |= carry_s; carry_s = vextq_u16(next_carry_s, kZero, 7); vec_t next_carry_a = a_a[i] >> 15; a_a[i] <<= 1; a_a[i] |= vextq_u16(kZero, next_carry_a, 7); a_a[i] |= carry_a; carry_a = vextq_u16(next_carry_a, kZero, 7); } } static inline void poly3_vec_rshift1(vec_t a_s[6], vec_t a_a[6]) { vec_t carry_s = {0}; vec_t carry_a = {0}; const vec_t kZero = {0}; for (int i = 5; i >= 0; i--) { vec_t next_carry_s = a_s[i] << 15; a_s[i] >>= 1; a_s[i] |= vextq_u16(next_carry_s, kZero, 1); a_s[i] |= carry_s; carry_s = vextq_u16(kZero, next_carry_s, 1); vec_t next_carry_a = a_a[i] << 15; a_a[i] >>= 1; a_a[i] |= vextq_u16(next_carry_a, kZero, 1); a_a[i] |= carry_a; carry_a = vextq_u16(kZero, next_carry_a, 1); } } #endif // !OPENSSL_AARCH64 #endif // (ARM || AARCH64) && NEON // Polynomials in this scheme have N terms. // #define N 701 // Underlying data types and arithmetic operations. // ------------------------------------------------ // Binary polynomials. // poly2 represents a degree-N polynomial over GF(2). The words are in little- // endian order, i.e. the coefficient of x^0 is the LSB of the first word. The // final word is only partially used since N is not a multiple of the word size. // Defined in internal.h: // struct poly2 { // crypto_word_t v[WORDS_PER_POLY]; // }; static void poly2_zero(struct poly2 *p) { OPENSSL_memset(&p->v[0], 0, sizeof(crypto_word_t) * WORDS_PER_POLY); } // word_reverse returns |in| with the bits in reverse order. static crypto_word_t word_reverse(crypto_word_t in) { #if defined(OPENSSL_64_BIT) static const crypto_word_t kMasks[6] = { UINT64_C(0x5555555555555555), UINT64_C(0x3333333333333333), UINT64_C(0x0f0f0f0f0f0f0f0f), UINT64_C(0x00ff00ff00ff00ff), UINT64_C(0x0000ffff0000ffff), UINT64_C(0x00000000ffffffff), }; #else static const crypto_word_t kMasks[5] = { 0x55555555, 0x33333333, 0x0f0f0f0f, 0x00ff00ff, 0x0000ffff, }; #endif for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMasks); i++) { in = ((in >> (1 << i)) & kMasks[i]) | ((in & kMasks[i]) << (1 << i)); } return in; } // lsb_to_all replicates the least-significant bit of |v| to all bits of the // word. This is used in bit-slicing operations to make a vector from a fixed // value. static crypto_word_t lsb_to_all(crypto_word_t v) { return 0u - (v & 1); } // poly2_mod_phiN reduces |p| by Φ(N). static void poly2_mod_phiN(struct poly2 *p) { // m is the term at x^700, replicated to every bit. const crypto_word_t m = lsb_to_all(p->v[WORDS_PER_POLY - 1] >> (BITS_IN_LAST_WORD - 1)); for (size_t i = 0; i < WORDS_PER_POLY; i++) { p->v[i] ^= m; } p->v[WORDS_PER_POLY - 1] &= (UINT64_C(1) << (BITS_IN_LAST_WORD - 1)) - 1; } // poly2_reverse_700 reverses the order of the first 700 bits of |in| and writes // the result to |out|. static void poly2_reverse_700(struct poly2 *out, const struct poly2 *in) { struct poly2 t; for (size_t i = 0; i < WORDS_PER_POLY; i++) { t.v[i] = word_reverse(in->v[i]); } static const size_t shift = BITS_PER_WORD - ((N - 1) % BITS_PER_WORD); for (size_t i = 0; i < WORDS_PER_POLY - 1; i++) { out->v[i] = t.v[WORDS_PER_POLY - 1 - i] >> shift; out->v[i] |= t.v[WORDS_PER_POLY - 2 - i] << (BITS_PER_WORD - shift); } out->v[WORDS_PER_POLY - 1] = t.v[0] >> shift; } // poly2_cswap exchanges the values of |a| and |b| if |swap| is all ones. static void poly2_cswap(struct poly2 *a, struct poly2 *b, crypto_word_t swap) { for (size_t i = 0; i < WORDS_PER_POLY; i++) { const crypto_word_t sum = swap & (a->v[i] ^ b->v[i]); a->v[i] ^= sum; b->v[i] ^= sum; } } // poly2_fmadd sets |out| to |out| + |in| * m, where m is either // |CONSTTIME_TRUE_W| or |CONSTTIME_FALSE_W|. static void poly2_fmadd(struct poly2 *out, const struct poly2 *in, crypto_word_t m) { for (size_t i = 0; i < WORDS_PER_POLY; i++) { out->v[i] ^= in->v[i] & m; } } // poly2_lshift1 left-shifts |p| by one bit. static void poly2_lshift1(struct poly2 *p) { crypto_word_t carry = 0; for (size_t i = 0; i < WORDS_PER_POLY; i++) { const crypto_word_t next_carry = p->v[i] >> (BITS_PER_WORD - 1); p->v[i] <<= 1; p->v[i] |= carry; carry = next_carry; } } // poly2_rshift1 right-shifts |p| by one bit. static void poly2_rshift1(struct poly2 *p) { crypto_word_t carry = 0; for (size_t i = WORDS_PER_POLY - 1; i < WORDS_PER_POLY; i--) { const crypto_word_t next_carry = p->v[i] & 1; p->v[i] >>= 1; p->v[i] |= carry << (BITS_PER_WORD - 1); carry = next_carry; } } // poly2_clear_top_bits clears the bits in the final word that are only for // alignment. static void poly2_clear_top_bits(struct poly2 *p) { p->v[WORDS_PER_POLY - 1] &= (UINT64_C(1) << BITS_IN_LAST_WORD) - 1; } // Ternary polynomials. // poly3 represents a degree-N polynomial over GF(3). Each coefficient is // bitsliced across the |s| and |a| arrays, like this: // // s | a | value // ----------------- // 0 | 0 | 0 // 0 | 1 | 1 // 1 | 1 | -1 (aka 2) // 1 | 0 | // // ('s' is for sign, and 'a' is the absolute value.) // // Once bitsliced as such, the following circuits can be used to implement // addition and multiplication mod 3: // // (s3, a3) = (s1, a1) × (s2, a2) // a3 = a1 ∧ a2 // s3 = (s1 ⊕ s2) ∧ a3 // // (s3, a3) = (s1, a1) + (s2, a2) // t = s1 ⊕ a2 // s3 = t ∧ (s2 ⊕ a1) // a3 = (a1 ⊕ a2) ∨ (t ⊕ s2) // // (s3, a3) = (s1, a1) - (s2, a2) // t = a1 ⊕ a2 // s3 = (s1 ⊕ a2) ∧ (t ⊕ s2) // a3 = t ∨ (s1 ⊕ s2) // // Negating a value just involves XORing s by a. // // struct poly3 { // struct poly2 s, a; // }; static void poly3_zero(struct poly3 *p) { poly2_zero(&p->s); poly2_zero(&p->a); } // poly3_reverse_700 reverses the order of the first 700 terms of |in| and // writes them to |out|. static void poly3_reverse_700(struct poly3 *out, const struct poly3 *in) { poly2_reverse_700(&out->a, &in->a); poly2_reverse_700(&out->s, &in->s); } // poly3_word_mul sets (|out_s|, |out_a|) to (|s1|, |a1|) × (|s2|, |a2|). static void poly3_word_mul(crypto_word_t *out_s, crypto_word_t *out_a, const crypto_word_t s1, const crypto_word_t a1, const crypto_word_t s2, const crypto_word_t a2) { *out_a = a1 & a2; *out_s = (s1 ^ s2) & *out_a; } // poly3_word_add sets (|out_s|, |out_a|) to (|s1|, |a1|) + (|s2|, |a2|). static void poly3_word_add(crypto_word_t *out_s, crypto_word_t *out_a, const crypto_word_t s1, const crypto_word_t a1, const crypto_word_t s2, const crypto_word_t a2) { const crypto_word_t t = s1 ^ a2; *out_s = t & (s2 ^ a1); *out_a = (a1 ^ a2) | (t ^ s2); } // poly3_word_sub sets (|out_s|, |out_a|) to (|s1|, |a1|) - (|s2|, |a2|). static void poly3_word_sub(crypto_word_t *out_s, crypto_word_t *out_a, const crypto_word_t s1, const crypto_word_t a1, const crypto_word_t s2, const crypto_word_t a2) { const crypto_word_t t = a1 ^ a2; *out_s = (s1 ^ a2) & (t ^ s2); *out_a = t | (s1 ^ s2); } // poly3_mul_const sets |p| to |p|×m, where m = (ms, ma). static void poly3_mul_const(struct poly3 *p, crypto_word_t ms, crypto_word_t ma) { ms = lsb_to_all(ms); ma = lsb_to_all(ma); for (size_t i = 0; i < WORDS_PER_POLY; i++) { poly3_word_mul(&p->s.v[i], &p->a.v[i], p->s.v[i], p->a.v[i], ms, ma); } } // poly3_fmadd sets |out| to |out| - |in|×m, where m is (ms, ma). static void poly3_fmsub(struct poly3 *out, const struct poly3 *in, crypto_word_t ms, crypto_word_t ma) { crypto_word_t product_s, product_a; for (size_t i = 0; i < WORDS_PER_POLY; i++) { poly3_word_mul(&product_s, &product_a, in->s.v[i], in->a.v[i], ms, ma); poly3_word_sub(&out->s.v[i], &out->a.v[i], out->s.v[i], out->a.v[i], product_s, product_a); } } // final_bit_to_all replicates the bit in the final position of the last word to // all the bits in the word. static crypto_word_t final_bit_to_all(crypto_word_t v) { return lsb_to_all(v >> (BITS_IN_LAST_WORD - 1)); } // poly3_mod_phiN reduces |p| by Φ(N). static void poly3_mod_phiN(struct poly3 *p) { // In order to reduce by Φ(N) we subtract by the value of the greatest // coefficient. const crypto_word_t factor_s = final_bit_to_all(p->s.v[WORDS_PER_POLY - 1]); const crypto_word_t factor_a = final_bit_to_all(p->a.v[WORDS_PER_POLY - 1]); for (size_t i = 0; i < WORDS_PER_POLY; i++) { poly3_word_sub(&p->s.v[i], &p->a.v[i], p->s.v[i], p->a.v[i], factor_s, factor_a); } poly2_clear_top_bits(&p->s); poly2_clear_top_bits(&p->a); } static void poly3_cswap(struct poly3 *a, struct poly3 *b, crypto_word_t swap) { poly2_cswap(&a->s, &b->s, swap); poly2_cswap(&a->a, &b->a, swap); } static void poly3_lshift1(struct poly3 *p) { poly2_lshift1(&p->s); poly2_lshift1(&p->a); } static void poly3_rshift1(struct poly3 *p) { poly2_rshift1(&p->s); poly2_rshift1(&p->a); } // poly3_span represents a pointer into a poly3. struct poly3_span { crypto_word_t *s; crypto_word_t *a; }; // poly3_span_add adds |n| words of values from |a| and |b| and writes the // result to |out|. static void poly3_span_add(const struct poly3_span *out, const struct poly3_span *a, const struct poly3_span *b, size_t n) { for (size_t i = 0; i < n; i++) { poly3_word_add(&out->s[i], &out->a[i], a->s[i], a->a[i], b->s[i], b->a[i]); } } // poly3_span_sub subtracts |n| words of |b| from |n| words of |a|. static void poly3_span_sub(const struct poly3_span *a, const struct poly3_span *b, size_t n) { for (size_t i = 0; i < n; i++) { poly3_word_sub(&a->s[i], &a->a[i], a->s[i], a->a[i], b->s[i], b->a[i]); } } // poly3_mul_aux is a recursive function that multiplies |n| words from |a| and // |b| and writes 2×|n| words to |out|. Each call uses 2*ceil(n/2) elements of // |scratch| and the function recurses, except if |n| == 1, when |scratch| isn't // used and the recursion stops. For |n| in {11, 22}, the transitive total // amount of |scratch| needed happens to be 2n+2. static void poly3_mul_aux(const struct poly3_span *out, const struct poly3_span *scratch, const struct poly3_span *a, const struct poly3_span *b, size_t n) { if (n == 1) { crypto_word_t r_s_low = 0, r_s_high = 0, r_a_low = 0, r_a_high = 0; crypto_word_t b_s = b->s[0], b_a = b->a[0]; const crypto_word_t a_s = a->s[0], a_a = a->a[0]; for (size_t i = 0; i < BITS_PER_WORD; i++) { // Multiply (s, a) by the next value from (b_s, b_a). crypto_word_t m_s, m_a; poly3_word_mul(&m_s, &m_a, a_s, a_a, lsb_to_all(b_s), lsb_to_all(b_a)); b_s >>= 1; b_a >>= 1; if (i == 0) { // Special case otherwise the code tries to shift by BITS_PER_WORD // below, which is undefined. r_s_low = m_s; r_a_low = m_a; continue; } // Shift the multiplication result to the correct position. const crypto_word_t m_s_low = m_s << i; const crypto_word_t m_s_high = m_s >> (BITS_PER_WORD - i); const crypto_word_t m_a_low = m_a << i; const crypto_word_t m_a_high = m_a >> (BITS_PER_WORD - i); // Add into the result. poly3_word_add(&r_s_low, &r_a_low, r_s_low, r_a_low, m_s_low, m_a_low); poly3_word_add(&r_s_high, &r_a_high, r_s_high, r_a_high, m_s_high, m_a_high); } out->s[0] = r_s_low; out->s[1] = r_s_high; out->a[0] = r_a_low; out->a[1] = r_a_high; return; } // Karatsuba multiplication. // https://en.wikipedia.org/wiki/Karatsuba_algorithm // When |n| is odd, the two "halves" will have different lengths. The first // is always the smaller. const size_t low_len = n / 2; const size_t high_len = n - low_len; const struct poly3_span a_high = {&a->s[low_len], &a->a[low_len]}; const struct poly3_span b_high = {&b->s[low_len], &b->a[low_len]}; // Store a_1 + a_0 in the first half of |out| and b_1 + b_0 in the second // half. const struct poly3_span a_cross_sum = *out; const struct poly3_span b_cross_sum = {&out->s[high_len], &out->a[high_len]}; poly3_span_add(&a_cross_sum, a, &a_high, low_len); poly3_span_add(&b_cross_sum, b, &b_high, low_len); if (high_len != low_len) { a_cross_sum.s[low_len] = a_high.s[low_len]; a_cross_sum.a[low_len] = a_high.a[low_len]; b_cross_sum.s[low_len] = b_high.s[low_len]; b_cross_sum.a[low_len] = b_high.a[low_len]; } const struct poly3_span child_scratch = {&scratch->s[2 * high_len], &scratch->a[2 * high_len]}; const struct poly3_span out_mid = {&out->s[low_len], &out->a[low_len]}; const struct poly3_span out_high = {&out->s[2 * low_len], &out->a[2 * low_len]}; // Calculate (a_1 + a_0) × (b_1 + b_0) and write to scratch buffer. poly3_mul_aux(scratch, &child_scratch, &a_cross_sum, &b_cross_sum, high_len); // Calculate a_1 × b_1. poly3_mul_aux(&out_high, &child_scratch, &a_high, &b_high, high_len); // Calculate a_0 × b_0. poly3_mul_aux(out, &child_scratch, a, b, low_len); // Subtract those last two products from the first. poly3_span_sub(scratch, out, low_len * 2); poly3_span_sub(scratch, &out_high, high_len * 2); // Add the middle product into the output. poly3_span_add(&out_mid, &out_mid, scratch, high_len * 2); } // HRSS_poly3_mul sets |*out| to |x|×|y| mod Φ(N). void HRSS_poly3_mul(struct poly3 *out, const struct poly3 *x, const struct poly3 *y) { crypto_word_t prod_s[WORDS_PER_POLY * 2]; crypto_word_t prod_a[WORDS_PER_POLY * 2]; crypto_word_t scratch_s[WORDS_PER_POLY * 2 + 2]; crypto_word_t scratch_a[WORDS_PER_POLY * 2 + 2]; const struct poly3_span prod_span = {prod_s, prod_a}; const struct poly3_span scratch_span = {scratch_s, scratch_a}; const struct poly3_span x_span = {(crypto_word_t *)x->s.v, (crypto_word_t *)x->a.v}; const struct poly3_span y_span = {(crypto_word_t *)y->s.v, (crypto_word_t *)y->a.v}; poly3_mul_aux(&prod_span, &scratch_span, &x_span, &y_span, WORDS_PER_POLY); // |prod| needs to be reduced mod (𝑥^n - 1), which just involves adding the // upper-half to the lower-half. However, N is 701, which isn't a multiple of // BITS_PER_WORD, so the upper-half vectors all have to be shifted before // being added to the lower-half. for (size_t i = 0; i < WORDS_PER_POLY; i++) { crypto_word_t v_s = prod_s[WORDS_PER_POLY + i - 1] >> BITS_IN_LAST_WORD; v_s |= prod_s[WORDS_PER_POLY + i] << (BITS_PER_WORD - BITS_IN_LAST_WORD); crypto_word_t v_a = prod_a[WORDS_PER_POLY + i - 1] >> BITS_IN_LAST_WORD; v_a |= prod_a[WORDS_PER_POLY + i] << (BITS_PER_WORD - BITS_IN_LAST_WORD); poly3_word_add(&out->s.v[i], &out->a.v[i], prod_s[i], prod_a[i], v_s, v_a); } poly3_mod_phiN(out); } #if defined(HRSS_HAVE_VECTOR_UNIT) && !defined(OPENSSL_AARCH64) // poly3_vec_cswap swaps (|a_s|, |a_a|) and (|b_s|, |b_a|) if |swap| is // |0xff..ff|. Otherwise, |swap| must be zero. static inline void poly3_vec_cswap(vec_t a_s[6], vec_t a_a[6], vec_t b_s[6], vec_t b_a[6], const vec_t swap) { for (int i = 0; i < 6; i++) { const vec_t sum_s = swap & (a_s[i] ^ b_s[i]); a_s[i] ^= sum_s; b_s[i] ^= sum_s; const vec_t sum_a = swap & (a_a[i] ^ b_a[i]); a_a[i] ^= sum_a; b_a[i] ^= sum_a; } } // poly3_vec_fmsub subtracts (|ms|, |ma|) × (|b_s|, |b_a|) from (|a_s|, |a_a|). static inline void poly3_vec_fmsub(vec_t a_s[6], vec_t a_a[6], vec_t b_s[6], vec_t b_a[6], const vec_t ms, const vec_t ma) { for (int i = 0; i < 6; i++) { // See the bitslice formula, above. const vec_t s = b_s[i]; const vec_t a = b_a[i]; const vec_t product_a = a & ma; const vec_t product_s = (s ^ ms) & product_a; const vec_t out_s = a_s[i]; const vec_t out_a = a_a[i]; const vec_t t = out_a ^ product_a; a_s[i] = (out_s ^ product_a) & (t ^ product_s); a_a[i] = t | (out_s ^ product_s); } } // poly3_invert_vec sets |*out| to |in|^-1, i.e. such that |out|×|in| == 1 mod // Φ(N). static void poly3_invert_vec(struct poly3 *out, const struct poly3 *in) { // This algorithm is taken from section 7.1 of [SAFEGCD]. const vec_t kZero = {0}; const vec_t kOne = {1}; static const uint8_t kBottomSixtyOne[sizeof(vec_t)] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f}; vec_t v_s[6], v_a[6], r_s[6], r_a[6], f_s[6], f_a[6], g_s[6], g_a[6]; // v = 0 memset(&v_s, 0, sizeof(v_s)); memset(&v_a, 0, sizeof(v_a)); // r = 1 memset(&r_s, 0, sizeof(r_s)); memset(&r_a, 0, sizeof(r_a)); r_a[0] = kOne; // f = all ones. memset(f_s, 0, sizeof(f_s)); memset(f_a, 0xff, 5 * sizeof(vec_t)); memcpy(&f_a[5], kBottomSixtyOne, sizeof(kBottomSixtyOne)); // g is the reversal of |in|. struct poly3 in_reversed; poly3_reverse_700(&in_reversed, in); g_s[5] = kZero; memcpy(&g_s, &in_reversed.s.v, WORDS_PER_POLY * sizeof(crypto_word_t)); g_a[5] = kZero; memcpy(&g_a, &in_reversed.a.v, WORDS_PER_POLY * sizeof(crypto_word_t)); int delta = 1; for (size_t i = 0; i < (2 * (N - 1)) - 1; i++) { poly3_vec_lshift1(v_s, v_a); const crypto_word_t delta_sign_bit = (delta >> (sizeof(delta) * 8 - 1)) & 1; const crypto_word_t delta_is_non_negative = delta_sign_bit - 1; const crypto_word_t delta_is_non_zero = ~constant_time_is_zero_w(delta); const vec_t g_has_constant_term = vec_broadcast_bit(g_a[0]); const vec_t mask_w = { static_cast::type>( delta_is_non_negative & delta_is_non_zero)}; const vec_t mask = vec_broadcast_bit(mask_w) & g_has_constant_term; const vec_t c_a = vec_broadcast_bit(f_a[0] & g_a[0]); const vec_t c_s = vec_broadcast_bit((f_s[0] ^ g_s[0]) & c_a); delta = constant_time_select_int(lsb_to_all(mask[0]), -delta, delta); delta++; poly3_vec_cswap(f_s, f_a, g_s, g_a, mask); poly3_vec_fmsub(g_s, g_a, f_s, f_a, c_s, c_a); poly3_vec_rshift1(g_s, g_a); poly3_vec_cswap(v_s, v_a, r_s, r_a, mask); poly3_vec_fmsub(r_s, r_a, v_s, v_a, c_s, c_a); } assert(delta == 0); memcpy(out->s.v, v_s, WORDS_PER_POLY * sizeof(crypto_word_t)); memcpy(out->a.v, v_a, WORDS_PER_POLY * sizeof(crypto_word_t)); poly3_mul_const(out, vec_get_word(f_s[0], 0), vec_get_word(f_a[0], 0)); poly3_reverse_700(out, out); } #endif // HRSS_HAVE_VECTOR_UNIT // HRSS_poly3_invert sets |*out| to |in|^-1, i.e. such that |out|×|in| == 1 mod // Φ(N). void HRSS_poly3_invert(struct poly3 *out, const struct poly3 *in) { // The vector version of this function seems slightly slower on AArch64, but // is useful on ARMv7 and x86-64. #if defined(HRSS_HAVE_VECTOR_UNIT) && !defined(OPENSSL_AARCH64) if (vec_capable()) { poly3_invert_vec(out, in); return; } #endif // This algorithm is taken from section 7.1 of [SAFEGCD]. struct poly3 v, r, f, g; // v = 0 poly3_zero(&v); // r = 1 poly3_zero(&r); r.a.v[0] = 1; // f = all ones. OPENSSL_memset(&f.s, 0, sizeof(struct poly2)); OPENSSL_memset(&f.a, 0xff, sizeof(struct poly2)); f.a.v[WORDS_PER_POLY - 1] >>= BITS_PER_WORD - BITS_IN_LAST_WORD; // g is the reversal of |in|. poly3_reverse_700(&g, in); int delta = 1; for (size_t i = 0; i < (2 * (N - 1)) - 1; i++) { poly3_lshift1(&v); const crypto_word_t delta_sign_bit = (delta >> (sizeof(delta) * 8 - 1)) & 1; const crypto_word_t delta_is_non_negative = delta_sign_bit - 1; const crypto_word_t delta_is_non_zero = ~constant_time_is_zero_w(delta); const crypto_word_t g_has_constant_term = lsb_to_all(g.a.v[0]); const crypto_word_t mask = g_has_constant_term & delta_is_non_negative & delta_is_non_zero; crypto_word_t c_s, c_a; poly3_word_mul(&c_s, &c_a, f.s.v[0], f.a.v[0], g.s.v[0], g.a.v[0]); c_s = lsb_to_all(c_s); c_a = lsb_to_all(c_a); delta = constant_time_select_int(mask, -delta, delta); delta++; poly3_cswap(&f, &g, mask); poly3_fmsub(&g, &f, c_s, c_a); poly3_rshift1(&g); poly3_cswap(&v, &r, mask); poly3_fmsub(&r, &v, c_s, c_a); } assert(delta == 0); poly3_mul_const(&v, f.s.v[0], f.a.v[0]); poly3_reverse_700(out, &v); } // Polynomials in Q. // Coefficients are reduced mod Q. (Q is clearly not prime, therefore the // coefficients do not form a field.) #define Q 8192 // VECS_PER_POLY is the number of 128-bit vectors needed to represent a // polynomial. #define COEFFICIENTS_PER_VEC (sizeof(vec_t) / sizeof(uint16_t)) #define VECS_PER_POLY ((N + COEFFICIENTS_PER_VEC - 1) / COEFFICIENTS_PER_VEC) namespace { // poly represents a polynomial with coefficients mod Q. Note that, while Q is a // power of two, this does not operate in GF(Q). That would be a binary field // but this is simply mod Q. Thus the coefficients are not a field. // // Coefficients are ordered little-endian, thus the coefficient of x^0 is the // first element of the array. struct poly { #if defined(HRSS_HAVE_VECTOR_UNIT) union { // N + 3 = 704, which is a multiple of 64 and thus aligns things, esp for // the vector code. uint16_t v[N + 3]; vec_t vectors[VECS_PER_POLY]; }; #else // Even if !HRSS_HAVE_VECTOR_UNIT, external assembly may be called that // requires alignment. alignas(16) uint16_t v[N + 3]; #endif }; } // namespace // poly_normalize zeros out the excess elements of |x| which are included only // for alignment. static void poly_normalize(struct poly *x) { OPENSSL_memset(&x->v[N], 0, 3 * sizeof(uint16_t)); } // poly_assert_normalized asserts that the excess elements of |x| are zeroed out // for the cases that case. (E.g. |poly_mul_vec|.) static void poly_assert_normalized(const struct poly *x) { assert(x->v[N] == 0); assert(x->v[N + 1] == 0); assert(x->v[N + 2] == 0); } namespace { // POLY_MUL_SCRATCH contains space for the working variables needed by // |poly_mul|. The contents afterwards may be discarded, but the object may also // be reused with future |poly_mul| calls to save heap allocations. // // This object must have 32-byte alignment. struct POLY_MUL_SCRATCH { union { // This is used by |poly_mul_novec|. struct { uint16_t prod[2 * N]; uint16_t scratch[1318]; } novec; #if defined(HRSS_HAVE_VECTOR_UNIT) // This is used by |poly_mul_vec|. struct { vec_t prod[VECS_PER_POLY * 2]; vec_t scratch[172]; } vec; #endif #if defined(POLY_RQ_MUL_ASM) // This is the space used by |poly_Rq_mul|. uint8_t rq[POLY_MUL_RQ_SCRATCH_SPACE]; #endif } u; }; } // namespace #if defined(HRSS_HAVE_VECTOR_UNIT) // poly_mul_vec_aux is a recursive function that multiplies |n| words from |a| // and |b| and writes 2×|n| words to |out|. Each call uses 2*ceil(n/2) elements // of |scratch| and the function recurses, except if |n| < 3, when |scratch| // isn't used and the recursion stops. If |n| == |VECS_PER_POLY| then |scratch| // needs 172 elements. static void poly_mul_vec_aux(vec_t *out, vec_t *scratch, const vec_t *a, const vec_t *b, const size_t n) { // In [HRSS], the technique they used for polynomial multiplication is // described: they start with Toom-4 at the top level and then two layers of // Karatsuba. Karatsuba is a specific instance of the general Toom–Cook // decomposition, which splits an input n-ways and produces 2n-1 // multiplications of those parts. So, starting with 704 coefficients (rounded // up from 701 to have more factors of two), Toom-4 gives seven // multiplications of degree-174 polynomials. Each round of Karatsuba (which // is Toom-2) increases the number of multiplications by a factor of three // while halving the size of the values being multiplied. So two rounds gives // 63 multiplications of degree-44 polynomials. Then they (I think) form // vectors by gathering all 63 coefficients of each power together, for each // input, and doing more rounds of Karatsuba on the vectors until they bottom- // out somewhere with schoolbook multiplication. // // I tried something like that for NEON. NEON vectors are 128 bits so hold // eight coefficients. I wrote a function that did Karatsuba on eight // multiplications at the same time, using such vectors, and a Go script that // decomposed from degree-704, with Karatsuba in non-transposed form, until it // reached multiplications of degree-44. It batched up those 81 // multiplications into lots of eight with a single one left over (which was // handled directly). // // It worked, but it was significantly slower than the dumb algorithm used // below. Potentially that was because I misunderstood how [HRSS] did it, or // because Clang is bad at generating good code from NEON intrinsics on ARMv7. // (Which is true: the code generated by Clang for the below is pretty crap.) // // This algorithm is much simpler. It just does Karatsuba decomposition all // the way down and never transposes. When it gets down to degree-16 or // degree-24 values, they are multiplied using schoolbook multiplication and // vector intrinsics. The vector operations form each of the eight phase- // shifts of one of the inputs, point-wise multiply, and then add into the // result at the correct place. This means that 33% (degree-16) or 25% // (degree-24) of the multiplies and adds are wasted, but it does ok. if (n == 2) { vec_t result[4]; vec_t vec_a[3]; static const vec_t kZero = {0}; vec_a[0] = a[0]; vec_a[1] = a[1]; vec_a[2] = kZero; result[0] = vec_mul(vec_a[0], vec_get_word(b[0], 0)); result[1] = vec_mul(vec_a[1], vec_get_word(b[0], 0)); result[1] = vec_fma(result[1], vec_a[0], vec_get_word(b[1], 0)); result[2] = vec_mul(vec_a[1], vec_get_word(b[1], 0)); result[3] = kZero; vec3_rshift_word(vec_a); #define BLOCK(x, y) \ do { \ result[x + 0] = \ vec_fma(result[x + 0], vec_a[0], vec_get_word(b[y / 8], y % 8)); \ result[x + 1] = \ vec_fma(result[x + 1], vec_a[1], vec_get_word(b[y / 8], y % 8)); \ result[x + 2] = \ vec_fma(result[x + 2], vec_a[2], vec_get_word(b[y / 8], y % 8)); \ } while (0) BLOCK(0, 1); BLOCK(1, 9); vec3_rshift_word(vec_a); BLOCK(0, 2); BLOCK(1, 10); vec3_rshift_word(vec_a); BLOCK(0, 3); BLOCK(1, 11); vec3_rshift_word(vec_a); BLOCK(0, 4); BLOCK(1, 12); vec3_rshift_word(vec_a); BLOCK(0, 5); BLOCK(1, 13); vec3_rshift_word(vec_a); BLOCK(0, 6); BLOCK(1, 14); vec3_rshift_word(vec_a); BLOCK(0, 7); BLOCK(1, 15); #undef BLOCK memcpy(out, result, sizeof(result)); return; } if (n == 3) { vec_t result[6]; vec_t vec_a[4]; static const vec_t kZero = {0}; vec_a[0] = a[0]; vec_a[1] = a[1]; vec_a[2] = a[2]; vec_a[3] = kZero; result[0] = vec_mul(a[0], vec_get_word(b[0], 0)); result[1] = vec_mul(a[1], vec_get_word(b[0], 0)); result[2] = vec_mul(a[2], vec_get_word(b[0], 0)); #define BLOCK_PRE(x, y) \ do { \ result[x + 0] = \ vec_fma(result[x + 0], vec_a[0], vec_get_word(b[y / 8], y % 8)); \ result[x + 1] = \ vec_fma(result[x + 1], vec_a[1], vec_get_word(b[y / 8], y % 8)); \ result[x + 2] = vec_mul(vec_a[2], vec_get_word(b[y / 8], y % 8)); \ } while (0) BLOCK_PRE(1, 8); BLOCK_PRE(2, 16); result[5] = kZero; vec4_rshift_word(vec_a); #define BLOCK(x, y) \ do { \ result[x + 0] = \ vec_fma(result[x + 0], vec_a[0], vec_get_word(b[y / 8], y % 8)); \ result[x + 1] = \ vec_fma(result[x + 1], vec_a[1], vec_get_word(b[y / 8], y % 8)); \ result[x + 2] = \ vec_fma(result[x + 2], vec_a[2], vec_get_word(b[y / 8], y % 8)); \ result[x + 3] = \ vec_fma(result[x + 3], vec_a[3], vec_get_word(b[y / 8], y % 8)); \ } while (0) BLOCK(0, 1); BLOCK(1, 9); BLOCK(2, 17); vec4_rshift_word(vec_a); BLOCK(0, 2); BLOCK(1, 10); BLOCK(2, 18); vec4_rshift_word(vec_a); BLOCK(0, 3); BLOCK(1, 11); BLOCK(2, 19); vec4_rshift_word(vec_a); BLOCK(0, 4); BLOCK(1, 12); BLOCK(2, 20); vec4_rshift_word(vec_a); BLOCK(0, 5); BLOCK(1, 13); BLOCK(2, 21); vec4_rshift_word(vec_a); BLOCK(0, 6); BLOCK(1, 14); BLOCK(2, 22); vec4_rshift_word(vec_a); BLOCK(0, 7); BLOCK(1, 15); BLOCK(2, 23); #undef BLOCK #undef BLOCK_PRE memcpy(out, result, sizeof(result)); return; } // Karatsuba multiplication. // https://en.wikipedia.org/wiki/Karatsuba_algorithm // When |n| is odd, the two "halves" will have different lengths. The first is // always the smaller. const size_t low_len = n / 2; const size_t high_len = n - low_len; const vec_t *a_high = &a[low_len]; const vec_t *b_high = &b[low_len]; // Store a_1 + a_0 in the first half of |out| and b_1 + b_0 in the second // half. for (size_t i = 0; i < low_len; i++) { out[i] = vec_add(a_high[i], a[i]); out[high_len + i] = vec_add(b_high[i], b[i]); } if (high_len != low_len) { out[low_len] = a_high[low_len]; out[high_len + low_len] = b_high[low_len]; } vec_t *const child_scratch = &scratch[2 * high_len]; // Calculate (a_1 + a_0) × (b_1 + b_0) and write to scratch buffer. poly_mul_vec_aux(scratch, child_scratch, out, &out[high_len], high_len); // Calculate a_1 × b_1. poly_mul_vec_aux(&out[low_len * 2], child_scratch, a_high, b_high, high_len); // Calculate a_0 × b_0. poly_mul_vec_aux(out, child_scratch, a, b, low_len); // Subtract those last two products from the first. for (size_t i = 0; i < low_len * 2; i++) { scratch[i] = vec_sub(scratch[i], vec_add(out[i], out[low_len * 2 + i])); } if (low_len != high_len) { scratch[low_len * 2] = vec_sub(scratch[low_len * 2], out[low_len * 4]); scratch[low_len * 2 + 1] = vec_sub(scratch[low_len * 2 + 1], out[low_len * 4 + 1]); } // Add the middle product into the output. for (size_t i = 0; i < high_len * 2; i++) { out[low_len + i] = vec_add(out[low_len + i], scratch[i]); } } // poly_mul_vec sets |*out| to |x|×|y| mod (𝑥^n - 1). static void poly_mul_vec(struct POLY_MUL_SCRATCH *scratch, struct poly *out, const struct poly *x, const struct poly *y) { static_assert(sizeof(out->v) == sizeof(vec_t) * VECS_PER_POLY, "struct poly is the wrong size"); static_assert(alignof(struct poly) == alignof(vec_t), "struct poly has incorrect alignment"); poly_assert_normalized(x); poly_assert_normalized(y); vec_t *const prod = scratch->u.vec.prod; vec_t *const aux_scratch = scratch->u.vec.scratch; poly_mul_vec_aux(prod, aux_scratch, x->vectors, y->vectors, VECS_PER_POLY); // |prod| needs to be reduced mod (𝑥^n - 1), which just involves adding the // upper-half to the lower-half. However, N is 701, which isn't a multiple of // the vector size, so the upper-half vectors all have to be shifted before // being added to the lower-half. vec_t *out_vecs = (vec_t *)out->v; for (size_t i = 0; i < VECS_PER_POLY; i++) { const vec_t prev = prod[VECS_PER_POLY - 1 + i]; const vec_t this_vec = prod[VECS_PER_POLY + i]; out_vecs[i] = vec_add(prod[i], vec_merge_3_5(prev, this_vec)); } OPENSSL_memset(&out->v[N], 0, 3 * sizeof(uint16_t)); } #endif // HRSS_HAVE_VECTOR_UNIT // poly_mul_novec_aux writes the product of |a| and |b| to |out|, using // |scratch| as scratch space. It'll use Karatsuba if the inputs are large // enough to warrant it. Each call uses 2*ceil(n/2) elements of |scratch| and // the function recurses, except if |n| < 64, when |scratch| isn't used and the // recursion stops. If |n| == |N| then |scratch| needs 1318 elements. static void poly_mul_novec_aux(uint16_t *out, uint16_t *scratch, const uint16_t *a, const uint16_t *b, size_t n) { static const size_t kSchoolbookLimit = 64; if (n < kSchoolbookLimit) { OPENSSL_memset(out, 0, sizeof(uint16_t) * n * 2); for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < n; j++) { out[i + j] += (unsigned)a[i] * b[j]; } } return; } // Karatsuba multiplication. // https://en.wikipedia.org/wiki/Karatsuba_algorithm // When |n| is odd, the two "halves" will have different lengths. The // first is always the smaller. const size_t low_len = n / 2; const size_t high_len = n - low_len; const uint16_t *const a_high = &a[low_len]; const uint16_t *const b_high = &b[low_len]; for (size_t i = 0; i < low_len; i++) { out[i] = a_high[i] + a[i]; out[high_len + i] = b_high[i] + b[i]; } if (high_len != low_len) { out[low_len] = a_high[low_len]; out[high_len + low_len] = b_high[low_len]; } uint16_t *const child_scratch = &scratch[2 * high_len]; poly_mul_novec_aux(scratch, child_scratch, out, &out[high_len], high_len); poly_mul_novec_aux(&out[low_len * 2], child_scratch, a_high, b_high, high_len); poly_mul_novec_aux(out, child_scratch, a, b, low_len); for (size_t i = 0; i < low_len * 2; i++) { scratch[i] -= out[i] + out[low_len * 2 + i]; } if (low_len != high_len) { scratch[low_len * 2] -= out[low_len * 4]; assert(out[low_len * 4 + 1] == 0); } for (size_t i = 0; i < high_len * 2; i++) { out[low_len + i] += scratch[i]; } } // poly_mul_novec sets |*out| to |x|×|y| mod (𝑥^n - 1). static void poly_mul_novec(struct POLY_MUL_SCRATCH *scratch, struct poly *out, const struct poly *x, const struct poly *y) { uint16_t *const prod = scratch->u.novec.prod; uint16_t *const aux_scratch = scratch->u.novec.scratch; poly_mul_novec_aux(prod, aux_scratch, x->v, y->v, N); for (size_t i = 0; i < N; i++) { out->v[i] = prod[i] + prod[i + N]; } OPENSSL_memset(&out->v[N], 0, 3 * sizeof(uint16_t)); } static void poly_mul(struct POLY_MUL_SCRATCH *scratch, struct poly *r, const struct poly *a, const struct poly *b) { #if defined(POLY_RQ_MUL_ASM) if (CRYPTO_is_AVX2_capable()) { poly_Rq_mul(r->v, a->v, b->v, scratch->u.rq); poly_normalize(r); } else #endif #if defined(HRSS_HAVE_VECTOR_UNIT) if (vec_capable()) { poly_mul_vec(scratch, r, a, b); } else #endif // Fallback, non-vector case. { poly_mul_novec(scratch, r, a, b); } poly_assert_normalized(r); } // poly_mul_x_minus_1 sets |p| to |p|×(𝑥 - 1) mod (𝑥^n - 1). static void poly_mul_x_minus_1(struct poly *p) { // Multiplying by (𝑥 - 1) means negating each coefficient and adding in // the value of the previous one. const uint16_t orig_final_coefficient = p->v[N - 1]; for (size_t i = N - 1; i > 0; i--) { p->v[i] = p->v[i - 1] - p->v[i]; } p->v[0] = orig_final_coefficient - p->v[0]; } // poly_mod_phiN sets |p| to |p| mod Φ(N). static void poly_mod_phiN(struct poly *p) { const uint16_t coeff700 = p->v[N - 1]; for (unsigned i = 0; i < N; i++) { p->v[i] -= coeff700; } } // poly_clamp reduces each coefficient mod Q. static void poly_clamp(struct poly *p) { for (unsigned i = 0; i < N; i++) { p->v[i] &= Q - 1; } } // Conversion functions // -------------------- // poly2_from_poly sets |*out| to |in| mod 2. static void poly2_from_poly(struct poly2 *out, const struct poly *in) { crypto_word_t *words = out->v; unsigned shift = 0; crypto_word_t word = 0; for (unsigned i = 0; i < N; i++) { word >>= 1; word |= (crypto_word_t)(in->v[i] & 1) << (BITS_PER_WORD - 1); shift++; if (shift == BITS_PER_WORD) { *words = word; words++; word = 0; shift = 0; } } word >>= BITS_PER_WORD - shift; *words = word; } // mod3 treats |a| as a signed number and returns |a| mod 3. static uint16_t mod3(int16_t a) { const int16_t q = ((int32_t)a * 21845) >> 16; int16_t ret = a - 3 * q; // At this point, |ret| is in {0, 1, 2, 3} and that needs to be mapped to {0, // 1, 2, 0}. return ret & ((ret & (ret >> 1)) - 1); } // poly3_from_poly sets |*out| to |in|. static void poly3_from_poly(struct poly3 *out, const struct poly *in) { crypto_word_t *words_s = out->s.v; crypto_word_t *words_a = out->a.v; crypto_word_t s = 0; crypto_word_t a = 0; unsigned shift = 0; for (unsigned i = 0; i < N; i++) { // This duplicates the 13th bit upwards to the top of the uint16, // essentially treating it as a sign bit and converting into a signed int16. // The signed value is reduced mod 3, yielding {0, 1, 2}. const uint16_t v = mod3((int16_t)(in->v[i] << 3) >> 3); s >>= 1; const crypto_word_t s_bit = (crypto_word_t)(v & 2) << (BITS_PER_WORD - 2); s |= s_bit; a >>= 1; a |= s_bit | (crypto_word_t)(v & 1) << (BITS_PER_WORD - 1); shift++; if (shift == BITS_PER_WORD) { *words_s = s; words_s++; *words_a = a; words_a++; s = a = 0; shift = 0; } } s >>= BITS_PER_WORD - shift; a >>= BITS_PER_WORD - shift; *words_s = s; *words_a = a; } // poly3_from_poly_checked sets |*out| to |in|, which has coefficients in {0, 1, // Q-1}. It returns a mask indicating whether all coefficients were found to be // in that set. static crypto_word_t poly3_from_poly_checked(struct poly3 *out, const struct poly *in) { crypto_word_t *words_s = out->s.v; crypto_word_t *words_a = out->a.v; crypto_word_t s = 0; crypto_word_t a = 0; unsigned shift = 0; crypto_word_t ok = CONSTTIME_TRUE_W; for (unsigned i = 0; i < N; i++) { const uint16_t v = in->v[i]; // Maps {0, 1, Q-1} to {0, 1, 2}. uint16_t mod3 = v & 3; mod3 ^= mod3 >> 1; const uint16_t expected = (uint16_t)((~((mod3 >> 1) - 1)) | mod3) % Q; ok &= constant_time_eq_w(v, expected); s >>= 1; const crypto_word_t s_bit = (crypto_word_t)(mod3 & 2) << (BITS_PER_WORD - 2); s |= s_bit; a >>= 1; a |= s_bit | (crypto_word_t)(mod3 & 1) << (BITS_PER_WORD - 1); shift++; if (shift == BITS_PER_WORD) { *words_s = s; words_s++; *words_a = a; words_a++; s = a = 0; shift = 0; } } s >>= BITS_PER_WORD - shift; a >>= BITS_PER_WORD - shift; *words_s = s; *words_a = a; return ok; } static void poly_from_poly2(struct poly *out, const struct poly2 *in) { const crypto_word_t *words = in->v; unsigned shift = 0; crypto_word_t word = *words; for (unsigned i = 0; i < N; i++) { out->v[i] = word & 1; word >>= 1; shift++; if (shift == BITS_PER_WORD) { words++; word = *words; shift = 0; } } poly_normalize(out); } static void poly_from_poly3(struct poly *out, const struct poly3 *in) { const crypto_word_t *words_s = in->s.v; const crypto_word_t *words_a = in->a.v; crypto_word_t word_s = ~(*words_s); crypto_word_t word_a = *words_a; unsigned shift = 0; for (unsigned i = 0; i < N; i++) { out->v[i] = (uint16_t)(word_s & 1) - 1; out->v[i] |= word_a & 1; word_s >>= 1; word_a >>= 1; shift++; if (shift == BITS_PER_WORD) { words_s++; words_a++; word_s = ~(*words_s); word_a = *words_a; shift = 0; } } poly_normalize(out); } // Polynomial inversion // -------------------- // poly_invert_mod2 sets |*out| to |in^-1| (i.e. such that |*out|×|in| = 1 mod // Φ(N)), all mod 2. This isn't useful in itself, but is part of doing inversion // mod Q. static void poly_invert_mod2(struct poly *out, const struct poly *in) { // This algorithm is taken from section 7.1 of [SAFEGCD]. struct poly2 v, r, f, g; // v = 0 poly2_zero(&v); // r = 1 poly2_zero(&r); r.v[0] = 1; // f = all ones. OPENSSL_memset(&f, 0xff, sizeof(struct poly2)); f.v[WORDS_PER_POLY - 1] >>= BITS_PER_WORD - BITS_IN_LAST_WORD; // g is the reversal of |in|. poly2_from_poly(&g, in); poly2_mod_phiN(&g); poly2_reverse_700(&g, &g); int delta = 1; for (size_t i = 0; i < (2 * (N - 1)) - 1; i++) { poly2_lshift1(&v); const crypto_word_t delta_sign_bit = (delta >> (sizeof(delta) * 8 - 1)) & 1; const crypto_word_t delta_is_non_negative = delta_sign_bit - 1; const crypto_word_t delta_is_non_zero = ~constant_time_is_zero_w(delta); const crypto_word_t g_has_constant_term = lsb_to_all(g.v[0]); const crypto_word_t mask = g_has_constant_term & delta_is_non_negative & delta_is_non_zero; const crypto_word_t c = lsb_to_all(f.v[0] & g.v[0]); delta = constant_time_select_int(mask, -delta, delta); delta++; poly2_cswap(&f, &g, mask); poly2_fmadd(&g, &f, c); poly2_rshift1(&g); poly2_cswap(&v, &r, mask); poly2_fmadd(&r, &v, c); } assert(delta == 0); assert(f.v[0] & 1); poly2_reverse_700(&v, &v); poly_from_poly2(out, &v); poly_assert_normalized(out); } // poly_invert sets |*out| to |in^-1| (i.e. such that |*out|×|in| = 1 mod Φ(N)). static void poly_invert(struct POLY_MUL_SCRATCH *scratch, struct poly *out, const struct poly *in) { // Inversion mod Q, which is done based on the result of inverting mod // 2. See [NTRUTN14] paper, bottom of page two. struct poly a, *b, tmp; // a = -in. for (unsigned i = 0; i < N; i++) { a.v[i] = -in->v[i]; } poly_normalize(&a); // b = in^-1 mod 2. b = out; poly_invert_mod2(b, in); // We are working mod Q=2**13 and we need to iterate ceil(log_2(13)) // times, which is four. for (unsigned i = 0; i < 4; i++) { poly_mul(scratch, &tmp, &a, b); tmp.v[0] += 2; poly_mul(scratch, b, b, &tmp); } poly_assert_normalized(out); } // Marshal and unmarshal functions for various basic types. // -------------------------------------------------------- #define POLY_BYTES 1138 // poly_marshal serialises all but the final coefficient of |in| to |out|. static void poly_marshal(uint8_t out[POLY_BYTES], const struct poly *in) { const uint16_t *p = in->v; for (size_t i = 0; i < N / 8; i++) { out[0] = p[0]; out[1] = (0x1f & (p[0] >> 8)) | ((p[1] & 0x07) << 5); out[2] = p[1] >> 3; out[3] = (3 & (p[1] >> 11)) | ((p[2] & 0x3f) << 2); out[4] = (0x7f & (p[2] >> 6)) | ((p[3] & 0x01) << 7); out[5] = p[3] >> 1; out[6] = (0xf & (p[3] >> 9)) | ((p[4] & 0x0f) << 4); out[7] = p[4] >> 4; out[8] = (1 & (p[4] >> 12)) | ((p[5] & 0x7f) << 1); out[9] = (0x3f & (p[5] >> 7)) | ((p[6] & 0x03) << 6); out[10] = p[6] >> 2; out[11] = (7 & (p[6] >> 10)) | ((p[7] & 0x1f) << 3); out[12] = p[7] >> 5; p += 8; out += 13; } // There are four remaining values. out[0] = p[0]; out[1] = (0x1f & (p[0] >> 8)) | ((p[1] & 0x07) << 5); out[2] = p[1] >> 3; out[3] = (3 & (p[1] >> 11)) | ((p[2] & 0x3f) << 2); out[4] = (0x7f & (p[2] >> 6)) | ((p[3] & 0x01) << 7); out[5] = p[3] >> 1; out[6] = 0xf & (p[3] >> 9); } // poly_unmarshal parses the output of |poly_marshal| and sets |out| such that // all but the final coefficients match, and the final coefficient is calculated // such that evaluating |out| at one results in zero. It returns one on success // or zero if |in| is an invalid encoding. static int poly_unmarshal(struct poly *out, const uint8_t in[POLY_BYTES]) { uint16_t *p = out->v; for (size_t i = 0; i < N / 8; i++) { p[0] = (uint16_t)(in[0]) | (uint16_t)(in[1] & 0x1f) << 8; p[1] = (uint16_t)(in[1] >> 5) | (uint16_t)(in[2]) << 3 | (uint16_t)(in[3] & 3) << 11; p[2] = (uint16_t)(in[3] >> 2) | (uint16_t)(in[4] & 0x7f) << 6; p[3] = (uint16_t)(in[4] >> 7) | (uint16_t)(in[5]) << 1 | (uint16_t)(in[6] & 0xf) << 9; p[4] = (uint16_t)(in[6] >> 4) | (uint16_t)(in[7]) << 4 | (uint16_t)(in[8] & 1) << 12; p[5] = (uint16_t)(in[8] >> 1) | (uint16_t)(in[9] & 0x3f) << 7; p[6] = (uint16_t)(in[9] >> 6) | (uint16_t)(in[10]) << 2 | (uint16_t)(in[11] & 7) << 10; p[7] = (uint16_t)(in[11] >> 3) | (uint16_t)(in[12]) << 5; p += 8; in += 13; } // There are four coefficients remaining. p[0] = (uint16_t)(in[0]) | (uint16_t)(in[1] & 0x1f) << 8; p[1] = (uint16_t)(in[1] >> 5) | (uint16_t)(in[2]) << 3 | (uint16_t)(in[3] & 3) << 11; p[2] = (uint16_t)(in[3] >> 2) | (uint16_t)(in[4] & 0x7f) << 6; p[3] = (uint16_t)(in[4] >> 7) | (uint16_t)(in[5]) << 1 | (uint16_t)(in[6] & 0xf) << 9; for (unsigned i = 0; i < N - 1; i++) { out->v[i] = (int16_t)(out->v[i] << 3) >> 3; } // There are four unused bits in the last byte. We require them to be zero. if ((in[6] & 0xf0) != 0) { return 0; } // Set the final coefficient as specifed in [HRSSNIST] 1.9.2 step 6. uint32_t sum = 0; for (size_t i = 0; i < N - 1; i++) { sum += out->v[i]; } out->v[N - 1] = (uint16_t)(0u - sum); poly_normalize(out); return 1; } // mod3_from_modQ maps {0, 1, Q-1, 65535} -> {0, 1, 2, 2}. Note that |v| may // have an invalid value when processing attacker-controlled inputs. static uint16_t mod3_from_modQ(uint16_t v) { v &= 3; return v ^ (v >> 1); } // poly_marshal_mod3 marshals |in| to |out| where the coefficients of |in| are // all in {0, 1, Q-1, 65535} and |in| is mod Φ(N). (Note that coefficients may // have invalid values when processing attacker-controlled inputs.) static void poly_marshal_mod3(uint8_t out[HRSS_POLY3_BYTES], const struct poly *in) { const uint16_t *coeffs = in->v; // Only 700 coefficients are marshaled because in[700] must be zero. assert(coeffs[N - 1] == 0); for (size_t i = 0; i < HRSS_POLY3_BYTES; i++) { const uint16_t coeffs0 = mod3_from_modQ(coeffs[0]); const uint16_t coeffs1 = mod3_from_modQ(coeffs[1]); const uint16_t coeffs2 = mod3_from_modQ(coeffs[2]); const uint16_t coeffs3 = mod3_from_modQ(coeffs[3]); const uint16_t coeffs4 = mod3_from_modQ(coeffs[4]); out[i] = coeffs0 + coeffs1 * 3 + coeffs2 * 9 + coeffs3 * 27 + coeffs4 * 81; coeffs += 5; } } // HRSS-specific functions // ----------------------- // poly_short_sample samples a vector of values in {0xffff (i.e. -1), 0, 1}. // This is the same action as the algorithm in [HRSSNIST] section 1.8.1, but // with HRSS-SXY the sampling algorithm is now a private detail of the // implementation (previously it had to match between two parties). This // function uses that freedom to implement a flatter distribution of values. static void poly_short_sample(struct poly *out, const uint8_t in[HRSS_SAMPLE_BYTES]) { static_assert(HRSS_SAMPLE_BYTES == N - 1, "HRSS_SAMPLE_BYTES incorrect"); for (size_t i = 0; i < N - 1; i++) { uint16_t v = mod3(in[i]); // Map {0, 1, 2} -> {0, 1, 0xffff} v |= ((v >> 1) ^ 1) - 1; out->v[i] = v; } out->v[N - 1] = 0; poly_normalize(out); } // poly_short_sample_plus performs the T+ sample as defined in [HRSSNIST], // section 1.8.2. static void poly_short_sample_plus(struct poly *out, const uint8_t in[HRSS_SAMPLE_BYTES]) { poly_short_sample(out, in); // sum (and the product in the for loop) will overflow. But that's fine // because |sum| is bound by +/- (N-2), and N < 2^15 so it works out. uint16_t sum = 0; for (unsigned i = 0; i < N - 2; i++) { sum += (unsigned)out->v[i] * out->v[i + 1]; } // If the sum is negative, flip the sign of even-positioned coefficients. (See // page 8 of [HRSS].) sum = ((int16_t)sum) >> 15; const uint16_t scale = sum | (~sum & 1); for (unsigned i = 0; i < N; i += 2) { out->v[i] = (unsigned)out->v[i] * scale; } poly_assert_normalized(out); } // poly_lift computes the function discussed in [HRSS], appendix B. static void poly_lift(struct poly *out, const struct poly *a) { // We wish to calculate a/(𝑥-1) mod Φ(N) over GF(3), where Φ(N) is the // Nth cyclotomic polynomial, i.e. 1 + 𝑥 + … + 𝑥^700 (since N is prime). // 1/(𝑥-1) has a fairly basic structure that we can exploit to speed this up: // // R. = PolynomialRing(GF(3)…) // inv = R.cyclotomic_polynomial(1).inverse_mod(R.cyclotomic_polynomial(n)) // list(inv)[:15] // [1, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 2] // // This three-element pattern of coefficients repeats for the whole // polynomial. // // Next define the overbar operator such that z̅ = z[0] + // reverse(z[1:]). (Index zero of a polynomial here is the coefficient // of the constant term. So index one is the coefficient of 𝑥 and so // on.) // // A less odd way to define this is to see that z̅ negates the indexes, // so z̅[0] = z[-0], z̅[1] = z[-1] and so on. // // The use of z̅ is that, when working mod (𝑥^701 - 1), vz[0] = , vz[1] = , …. (Where is the inner product: the sum // of the point-wise products.) Although we calculated the inverse mod // Φ(N), we can work mod (𝑥^N - 1) and reduce mod Φ(N) at the end. // (That's because (𝑥^N - 1) is a multiple of Φ(N).) // // When working mod (𝑥^N - 1), multiplication by 𝑥 is a right-rotation // of the list of coefficients. // // Thus we can consider what the pattern of z̅, 𝑥z̅, 𝑥^2z̅, … looks like: // // def reverse(xs): // suffix = list(xs[1:]) // suffix.reverse() // return [xs[0]] + suffix // // def rotate(xs): // return [xs[-1]] + xs[:-1] // // zoverbar = reverse(list(inv) + [0]) // xzoverbar = rotate(reverse(list(inv) + [0])) // x2zoverbar = rotate(rotate(reverse(list(inv) + [0]))) // // zoverbar[:15] // [1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1] // xzoverbar[:15] // [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0] // x2zoverbar[:15] // [2, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2] // // (For a formula for z̅, see lemma two of appendix B.) // // After the first three elements have been taken care of, all then have // a repeating three-element cycle. The next value (𝑥^3z̅) involves // three rotations of the first pattern, thus the three-element cycle // lines up. However, the discontinuity in the first three elements // obviously moves to a different position. Consider the difference // between 𝑥^3z̅ and z̅: // // [x-y for (x,y) in zip(zoverbar, x3zoverbar)][:15] // [0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] // // This pattern of differences is the same for all elements, although it // obviously moves right with the rotations. // // From this, we reach algorithm eight of appendix B. // Handle the first three elements of the inner products. out->v[0] = a->v[0] + a->v[2]; out->v[1] = a->v[1]; out->v[2] = -a->v[0] + a->v[2]; // s0, s1, s2 are added into out->v[0], out->v[1], and out->v[2], // respectively. We do not compute s1 because it's just -(s0 + s1). uint16_t s0 = 0, s2 = 0; for (size_t i = 3; i < 699; i += 3) { s0 += -a->v[i] + a->v[i + 2]; // s1 += a->v[i] - a->v[i + 1]; s2 += a->v[i + 1] - a->v[i + 2]; } // Handle the fact that the three-element pattern doesn't fill the // polynomial exactly (since 701 isn't a multiple of three). s0 -= a->v[699]; // s1 += a->v[699] - a->v[700]; s2 += a->v[700]; // Note that s0 + s1 + s2 = 0. out->v[0] += s0; out->v[1] -= (s0 + s2); // = s1 out->v[2] += s2; // Calculate the remaining inner products by taking advantage of the // fact that the pattern repeats every three cycles and the pattern of // differences moves with the rotation. for (size_t i = 3; i < N; i++) { out->v[i] = (out->v[i - 3] - (a->v[i - 2] + a->v[i - 1] + a->v[i])); } // Reduce mod Φ(N) by subtracting a multiple of out[700] from every // element and convert to mod Q. (See above about adding twice as // subtraction.) const crypto_word_t v = out->v[700]; for (unsigned i = 0; i < N; i++) { const uint16_t vi_mod3 = mod3(out->v[i] - v); // Map {0, 1, 2} to {0, 1, 0xffff}. out->v[i] = (~((vi_mod3 >> 1) - 1)) | vi_mod3; } poly_mul_x_minus_1(out); poly_normalize(out); } namespace { struct public_key { struct poly ph; }; struct private_key { struct poly3 f, f_inverse; struct poly ph_inverse; uint8_t hmac_key[32]; }; } // namespace // public_key_from_external converts an external public key pointer into an // internal one. Externally the alignment is only specified to be eight bytes // but we need 16-byte alignment. We could annotate the external struct with // that alignment but we can only assume that malloced pointers are 8-byte // aligned in any case. (Even if the underlying malloc returns values with // 16-byte alignment, |OPENSSL_malloc| will store an 8-byte size prefix and mess // that up.) static struct public_key *public_key_from_external( struct HRSS_public_key *ext) { static_assert( sizeof(struct HRSS_public_key) >= sizeof(struct public_key) + 15, "HRSS public key too small"); return reinterpret_cast(align_pointer(ext->opaque, 16)); } // private_key_from_external does the same thing as |public_key_from_external|, // but for private keys. See the comment on that function about alignment // issues. static struct private_key *private_key_from_external( struct HRSS_private_key *ext) { static_assert( sizeof(struct HRSS_private_key) >= sizeof(struct private_key) + 15, "HRSS private key too small"); return reinterpret_cast(align_pointer(ext->opaque, 16)); } // malloc_align32 returns a pointer to |size| bytes of 32-byte-aligned heap and // sets |*out_ptr| to a value that can be passed to |OPENSSL_free| to release // it. It returns NULL if out of memory. static void *malloc_align32(void **out_ptr, size_t size) { void *ptr = OPENSSL_malloc(size + 31); if (!ptr) { *out_ptr = NULL; return NULL; } *out_ptr = ptr; return align_pointer(ptr, 32); } int HRSS_generate_key( struct HRSS_public_key *out_pub, struct HRSS_private_key *out_priv, const uint8_t in[HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES + 32]) { struct public_key *pub = public_key_from_external(out_pub); struct private_key *priv = private_key_from_external(out_priv); struct vars { struct POLY_MUL_SCRATCH scratch; struct poly f; struct poly pg_phi1; struct poly pfg_phi1; struct poly pfg_phi1_inverse; }; void *malloc_ptr; struct vars *const vars = reinterpret_cast( malloc_align32(&malloc_ptr, sizeof(struct vars))); if (!vars) { // If the caller ignores the return value the output will still be safe. // The private key output is randomised in case it's later passed to // |HRSS_encap|. memset(out_pub, 0, sizeof(struct HRSS_public_key)); RAND_bytes((uint8_t *)out_priv, sizeof(struct HRSS_private_key)); return 0; } #if !defined(NDEBUG) OPENSSL_memset(vars, 0xff, sizeof(struct vars)); #endif OPENSSL_memcpy(priv->hmac_key, in + 2 * HRSS_SAMPLE_BYTES, sizeof(priv->hmac_key)); poly_short_sample_plus(&vars->f, in); poly3_from_poly(&priv->f, &vars->f); HRSS_poly3_invert(&priv->f_inverse, &priv->f); // pg_phi1 is p (i.e. 3) × g × Φ(1) (i.e. 𝑥-1). poly_short_sample_plus(&vars->pg_phi1, in + HRSS_SAMPLE_BYTES); for (unsigned i = 0; i < N; i++) { vars->pg_phi1.v[i] *= 3; } poly_mul_x_minus_1(&vars->pg_phi1); poly_mul(&vars->scratch, &vars->pfg_phi1, &vars->f, &vars->pg_phi1); poly_invert(&vars->scratch, &vars->pfg_phi1_inverse, &vars->pfg_phi1); poly_mul(&vars->scratch, &pub->ph, &vars->pfg_phi1_inverse, &vars->pg_phi1); poly_mul(&vars->scratch, &pub->ph, &pub->ph, &vars->pg_phi1); poly_clamp(&pub->ph); poly_mul(&vars->scratch, &priv->ph_inverse, &vars->pfg_phi1_inverse, &vars->f); poly_mul(&vars->scratch, &priv->ph_inverse, &priv->ph_inverse, &vars->f); poly_clamp(&priv->ph_inverse); OPENSSL_free(malloc_ptr); return 1; } static const char kSharedKey[] = "shared key"; int HRSS_encap(uint8_t out_ciphertext[POLY_BYTES], uint8_t out_shared_key[32], const struct HRSS_public_key *in_pub, const uint8_t in[HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES]) { const struct public_key *pub = public_key_from_external((struct HRSS_public_key *)in_pub); struct vars { struct POLY_MUL_SCRATCH scratch; struct poly m, r, m_lifted; struct poly prh_plus_m; SHA256_CTX hash_ctx; uint8_t m_bytes[HRSS_POLY3_BYTES]; uint8_t r_bytes[HRSS_POLY3_BYTES]; }; void *malloc_ptr; struct vars *const vars = reinterpret_cast( malloc_align32(&malloc_ptr, sizeof(struct vars))); if (!vars) { // If the caller ignores the return value the output will still be safe. // The private key output is randomised in case it's used to encrypt and // transmit something. memset(out_ciphertext, 0, POLY_BYTES); RAND_bytes(out_shared_key, 32); return 0; } #if !defined(NDEBUG) OPENSSL_memset(vars, 0xff, sizeof(struct vars)); #endif poly_short_sample(&vars->m, in); poly_short_sample(&vars->r, in + HRSS_SAMPLE_BYTES); poly_lift(&vars->m_lifted, &vars->m); poly_mul(&vars->scratch, &vars->prh_plus_m, &vars->r, &pub->ph); for (unsigned i = 0; i < N; i++) { vars->prh_plus_m.v[i] += vars->m_lifted.v[i]; } poly_marshal(out_ciphertext, &vars->prh_plus_m); poly_marshal_mod3(vars->m_bytes, &vars->m); poly_marshal_mod3(vars->r_bytes, &vars->r); SHA256_Init(&vars->hash_ctx); SHA256_Update(&vars->hash_ctx, kSharedKey, sizeof(kSharedKey)); SHA256_Update(&vars->hash_ctx, vars->m_bytes, sizeof(vars->m_bytes)); SHA256_Update(&vars->hash_ctx, vars->r_bytes, sizeof(vars->r_bytes)); SHA256_Update(&vars->hash_ctx, out_ciphertext, POLY_BYTES); SHA256_Final(out_shared_key, &vars->hash_ctx); OPENSSL_free(malloc_ptr); return 1; } int HRSS_decap(uint8_t out_shared_key[HRSS_KEY_BYTES], const struct HRSS_private_key *in_priv, const uint8_t *ciphertext, size_t ciphertext_len) { const struct private_key *priv = private_key_from_external((struct HRSS_private_key *)in_priv); #if defined(_MSC_VER) // MSVC will produce this useless warning: // warning C4324: structure was padded due to alignment specifier #pragma warning(push) #pragma warning(disable : 4324) #endif struct vars { struct POLY_MUL_SCRATCH scratch; uint8_t masked_key[SHA256_CBLOCK]; SHA256_CTX hash_ctx; struct poly c; struct poly f, cf; struct poly3 cf3, m3; struct poly m, m_lifted; struct poly r; struct poly3 r3; uint8_t expected_ciphertext[HRSS_CIPHERTEXT_BYTES]; uint8_t m_bytes[HRSS_POLY3_BYTES]; uint8_t r_bytes[HRSS_POLY3_BYTES]; uint8_t shared_key[32]; }; #if defined(_MSC_VER) #pragma warning(pop) #endif void *malloc_ptr; struct vars *const vars = reinterpret_cast( malloc_align32(&malloc_ptr, sizeof(struct vars))); if (!vars) { // If the caller ignores the return value the output will still be safe. // The private key output is randomised in case it's used to encrypt and // transmit something. RAND_bytes(out_shared_key, HRSS_KEY_BYTES); return 0; } #if !defined(NDEBUG) OPENSSL_memset(vars, 0xff, sizeof(struct vars)); #endif // This is HMAC, expanded inline rather than using the |HMAC| function so that // we can avoid dealing with possible allocation failures and so keep this // function infallible. static_assert(sizeof(priv->hmac_key) <= sizeof(vars->masked_key), "HRSS HMAC key larger than SHA-256 block size"); for (size_t i = 0; i < sizeof(priv->hmac_key); i++) { vars->masked_key[i] = priv->hmac_key[i] ^ 0x36; } OPENSSL_memset(vars->masked_key + sizeof(priv->hmac_key), 0x36, sizeof(vars->masked_key) - sizeof(priv->hmac_key)); SHA256_Init(&vars->hash_ctx); SHA256_Update(&vars->hash_ctx, vars->masked_key, sizeof(vars->masked_key)); SHA256_Update(&vars->hash_ctx, ciphertext, ciphertext_len); uint8_t inner_digest[SHA256_DIGEST_LENGTH]; SHA256_Final(inner_digest, &vars->hash_ctx); for (size_t i = 0; i < sizeof(priv->hmac_key); i++) { vars->masked_key[i] ^= (0x5c ^ 0x36); } OPENSSL_memset(vars->masked_key + sizeof(priv->hmac_key), 0x5c, sizeof(vars->masked_key) - sizeof(priv->hmac_key)); SHA256_Init(&vars->hash_ctx); SHA256_Update(&vars->hash_ctx, vars->masked_key, sizeof(vars->masked_key)); SHA256_Update(&vars->hash_ctx, inner_digest, sizeof(inner_digest)); static_assert(HRSS_KEY_BYTES == SHA256_DIGEST_LENGTH, "HRSS shared key length incorrect"); SHA256_Final(out_shared_key, &vars->hash_ctx); // If the ciphertext is publicly invalid then a random shared key is still // returned to simply the logic of the caller, but this path is not constant // time. crypto_word_t ok = 0; if (ciphertext_len != HRSS_CIPHERTEXT_BYTES || !poly_unmarshal(&vars->c, ciphertext)) { goto out; } poly_from_poly3(&vars->f, &priv->f); poly_mul(&vars->scratch, &vars->cf, &vars->c, &vars->f); poly3_from_poly(&vars->cf3, &vars->cf); // Note that cf3 is not reduced mod Φ(N). That reduction is deferred. HRSS_poly3_mul(&vars->m3, &vars->cf3, &priv->f_inverse); poly_from_poly3(&vars->m, &vars->m3); poly_lift(&vars->m_lifted, &vars->m); for (unsigned i = 0; i < N; i++) { vars->r.v[i] = vars->c.v[i] - vars->m_lifted.v[i]; } poly_normalize(&vars->r); poly_mul(&vars->scratch, &vars->r, &vars->r, &priv->ph_inverse); poly_mod_phiN(&vars->r); poly_clamp(&vars->r); ok = poly3_from_poly_checked(&vars->r3, &vars->r); // [NTRUCOMP] section 5.1 includes ReEnc2 and a proof that it's valid. Rather // than do an expensive |poly_mul|, it rebuilds |c'| from |c - lift(m)| // (called |b|) with: // t = (−b(1)/N) mod Q // c' = b + tΦ(N) + lift(m) mod Q // // When polynomials are transmitted, the final coefficient is omitted and // |poly_unmarshal| sets it such that f(1) == 0. Thus c(1) == 0. Also, // |poly_lift| multiplies the result by (x-1) and therefore evaluating a // lifted polynomial at 1 is also zero. Thus lift(m)(1) == 0 and so // (c - lift(m))(1) == 0. // // Although we defer the reduction above, |b| is conceptually reduced mod // Φ(N). In order to do that reduction one subtracts |c[N-1]| from every // coefficient. Therefore b(1) = -c[N-1]×N. The value of |t|, above, then is // just recovering |c[N-1]|, and adding tΦ(N) is simply undoing the reduction. // Therefore b + tΦ(N) + lift(m) = c by construction and we don't need to // recover |c| at all so long as we do the checks in // |poly3_from_poly_checked|. // // The |poly_marshal| here then is just confirming that |poly_unmarshal| is // strict and could be omitted. static_assert(HRSS_CIPHERTEXT_BYTES == POLY_BYTES, "ciphertext is the wrong size"); assert(ciphertext_len == sizeof(vars->expected_ciphertext)); poly_marshal(vars->expected_ciphertext, &vars->c); poly_marshal_mod3(vars->m_bytes, &vars->m); poly_marshal_mod3(vars->r_bytes, &vars->r); ok &= constant_time_is_zero_w( CRYPTO_memcmp(ciphertext, vars->expected_ciphertext, sizeof(vars->expected_ciphertext))); SHA256_Init(&vars->hash_ctx); SHA256_Update(&vars->hash_ctx, kSharedKey, sizeof(kSharedKey)); SHA256_Update(&vars->hash_ctx, vars->m_bytes, sizeof(vars->m_bytes)); SHA256_Update(&vars->hash_ctx, vars->r_bytes, sizeof(vars->r_bytes)); SHA256_Update(&vars->hash_ctx, vars->expected_ciphertext, sizeof(vars->expected_ciphertext)); SHA256_Final(vars->shared_key, &vars->hash_ctx); for (unsigned i = 0; i < sizeof(vars->shared_key); i++) { out_shared_key[i] = constant_time_select_8(ok, vars->shared_key[i], out_shared_key[i]); } out: OPENSSL_free(malloc_ptr); return 1; } void HRSS_marshal_public_key(uint8_t out[HRSS_PUBLIC_KEY_BYTES], const struct HRSS_public_key *in_pub) { const struct public_key *pub = public_key_from_external((struct HRSS_public_key *)in_pub); poly_marshal(out, &pub->ph); } int HRSS_parse_public_key(struct HRSS_public_key *out, const uint8_t in[HRSS_PUBLIC_KEY_BYTES]) { struct public_key *pub = public_key_from_external(out); if (!poly_unmarshal(&pub->ph, in)) { return 0; } OPENSSL_memset(&pub->ph.v[N], 0, 3 * sizeof(uint16_t)); return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/hrss/internal.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_HRSS_INTERNAL_H #define OPENSSL_HEADER_HRSS_INTERNAL_H #include #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif #define N 701 #define BITS_PER_WORD (sizeof(crypto_word_t) * 8) #define WORDS_PER_POLY ((N + BITS_PER_WORD - 1) / BITS_PER_WORD) #define BITS_IN_LAST_WORD (N % BITS_PER_WORD) struct poly2 { crypto_word_t v[WORDS_PER_POLY]; }; struct poly3 { struct poly2 s, a; }; OPENSSL_EXPORT void HRSS_poly3_mul(struct poly3 *out, const struct poly3 *x, const struct poly3 *y); OPENSSL_EXPORT void HRSS_poly3_invert(struct poly3 *out, const struct poly3 *in); // On x86-64, we can use the AVX2 code from [HRSS]. (The authors have given // explicit permission for this and signed a CLA.) However it's 57KB of object // code, so it's not used if |OPENSSL_SMALL| is defined. #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && \ defined(OPENSSL_X86_64) && defined(OPENSSL_LINUX) #define POLY_RQ_MUL_ASM // POLY_MUL_RQ_SCRATCH_SPACE is the number of bytes of scratch space needed // by the assembly function poly_Rq_mul. #define POLY_MUL_RQ_SCRATCH_SPACE (6144 + 6144 + 12288 + 512 + 9408 + 32) // poly_Rq_mul is defined in assembly. Inputs and outputs must be 16-byte- // aligned. extern void poly_Rq_mul( uint16_t r[N + 3], const uint16_t a[N + 3], const uint16_t b[N + 3], // The following should be `scratch[POLY_MUL_RQ_SCRATCH_SPACE]` but // GCC 11.1 has a bug with unions that breaks that. uint8_t scratch[]); #endif #if defined(__cplusplus) } // extern "C" #endif #endif // !OPENSSL_HEADER_HRSS_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CRYPTO_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_INTERNAL_H #include #include #include #include #include #include #include #if defined(BORINGSSL_CONSTANT_TIME_VALIDATION) #include #endif #if defined(BORINGSSL_FIPS_BREAK_TESTS) #include #endif #if defined(OPENSSL_THREADS) && \ (!defined(OPENSSL_WINDOWS) || defined(__MINGW32__)) #include #define OPENSSL_PTHREADS #endif #if defined(OPENSSL_THREADS) && !defined(OPENSSL_PTHREADS) && \ defined(OPENSSL_WINDOWS) #define OPENSSL_WINDOWS_THREADS #endif #if defined(OPENSSL_THREADS) #include #endif #if defined(OPENSSL_WINDOWS_THREADS) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #if defined(__cplusplus) extern "C" { #endif #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_STATIC_ARMCAP) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) // x86, x86_64, and the ARMs need to record the result of a cpuid/getauxval call // for the asm to work correctly, unless compiled without asm code. #define NEED_CPUID // OPENSSL_cpuid_setup initializes the platform-specific feature cache. This // function should not be called directly. Call |OPENSSL_init_cpuid| instead. void OPENSSL_cpuid_setup(void); // OPENSSL_init_cpuid initializes the platform-specific feature cache, if // needed. This function is idempotent and may be called concurrently. void OPENSSL_init_cpuid(void); #else inline void OPENSSL_init_cpuid(void) {} #endif #if (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) && \ !defined(OPENSSL_STATIC_ARMCAP) // OPENSSL_get_armcap_pointer_for_test returns a pointer to |OPENSSL_armcap_P| // for unit tests. Any modifications to the value must be made before any other // function call in BoringSSL. OPENSSL_EXPORT uint32_t *OPENSSL_get_armcap_pointer_for_test(void); #endif // On non-MSVC 64-bit targets, we expect __uint128_t support. This includes // clang-cl, which defines both __clang__ and _MSC_VER. #if (!defined(_MSC_VER) || defined(__clang__)) && defined(OPENSSL_64_BIT) #define BORINGSSL_HAS_UINT128 typedef __int128_t int128_t; typedef __uint128_t uint128_t; // __uint128_t division depends on intrinsics in the compiler runtime. Those // intrinsics are missing in clang-cl (https://crbug.com/787617) and nanolibc. // These may be bugs in the toolchain definition, but just disable it for now. // EDK2's toolchain is missing __udivti3 (b/339380897) so cannot support // 128-bit division currently. #if !defined(_MSC_VER) && !defined(OPENSSL_NANOLIBC) && \ !defined(__EDK2_BORINGSSL__) #define BORINGSSL_CAN_DIVIDE_UINT128 #endif #endif #define OPENSSL_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) #if defined(__clang__) && __clang_major__ >= 5 #if __has_attribute(fallthrough) #define OPENSSL_CAN_USE_ATTR_FALLTHROUGH #endif #endif // GCC-like compilers indicate SSE2 with |__SSE2__|. MSVC leaves the caller to // know that x86_64 has SSE2, and uses _M_IX86_FP to indicate SSE2 on x86. // https://learn.microsoft.com/en-us/cpp/preprocessor/predefined-macros?view=msvc-170 #if defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \ (defined(_M_IX86_FP) && _M_IX86_FP >= 2) #define OPENSSL_SSE2 #endif #if defined(OPENSSL_X86) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SSE2) #error \ "x86 assembly requires SSE2. Build with -msse2 (recommended), or disable assembly optimizations with -DOPENSSL_NO_ASM." #endif // For convenience in testing the fallback code, we allow disabling SSE2 // intrinsics via |OPENSSL_NO_SSE2_FOR_TESTING|. We require SSE2 on x86 and // x86_64, so we would otherwise need to test such code on a non-x86 platform. // // This does not remove the above requirement for SSE2 support with assembly // optimizations. It only disables some intrinsics-based optimizations so that // we can test the fallback code on CI. #if defined(OPENSSL_SSE2) && defined(OPENSSL_NO_SSE2_FOR_TESTING) #undef OPENSSL_SSE2 #endif #if defined(__GNUC__) || defined(__clang__) #define OPENSSL_ATTR_CONST __attribute__((const)) #else #define OPENSSL_ATTR_CONST #endif #if defined(BORINGSSL_MALLOC_FAILURE_TESTING) // OPENSSL_reset_malloc_counter_for_testing, when malloc testing is enabled, // resets the internal malloc counter, to simulate further malloc failures. This // should be called in between independent tests, at a point where failure from // a previous test will not impact subsequent ones. OPENSSL_EXPORT void OPENSSL_reset_malloc_counter_for_testing(void); // OPENSSL_disable_malloc_failures_for_testing, when malloc testing is enabled, // disables simulated malloc failures. Calls to |OPENSSL_malloc| will not // increment the malloc counter or synthesize failures. This may be used to skip // simulating malloc failures in some region of code. OPENSSL_EXPORT void OPENSSL_disable_malloc_failures_for_testing(void); // OPENSSL_enable_malloc_failures_for_testing, when malloc testing is enabled, // re-enables simulated malloc failures. OPENSSL_EXPORT void OPENSSL_enable_malloc_failures_for_testing(void); #else inline void OPENSSL_reset_malloc_counter_for_testing(void) {} inline void OPENSSL_disable_malloc_failures_for_testing(void) {} inline void OPENSSL_enable_malloc_failures_for_testing(void) {} #endif #if defined(__has_builtin) #define OPENSSL_HAS_BUILTIN(x) __has_builtin(x) #else #define OPENSSL_HAS_BUILTIN(x) 0 #endif // Pointer utility functions. // buffers_alias returns one if |a| and |b| alias and zero otherwise. static inline int buffers_alias(const void *a, size_t a_bytes, const void *b, size_t b_bytes) { // Cast |a| and |b| to integers. In C, pointer comparisons between unrelated // objects are undefined whereas pointer to integer conversions are merely // implementation-defined. We assume the implementation defined it in a sane // way. uintptr_t a_u = (uintptr_t)a; uintptr_t b_u = (uintptr_t)b; return a_u + a_bytes > b_u && b_u + b_bytes > a_u; } // align_pointer returns |ptr|, advanced to |alignment|. |alignment| must be a // power of two, and |ptr| must have at least |alignment - 1| bytes of scratch // space. static inline void *align_pointer(void *ptr, size_t alignment) { // |alignment| must be a power of two. assert(alignment != 0 && (alignment & (alignment - 1)) == 0); // Instead of aligning |ptr| as a |uintptr_t| and casting back, compute the // offset and advance in pointer space. C guarantees that casting from pointer // to |uintptr_t| and back gives the same pointer, but general // integer-to-pointer conversions are implementation-defined. GCC does define // it in the useful way, but this makes fewer assumptions. uintptr_t offset = (0u - (uintptr_t)ptr) & (alignment - 1); ptr = (char *)ptr + offset; assert(((uintptr_t)ptr & (alignment - 1)) == 0); return ptr; } // Constant-time utility functions. // // The following methods return a bitmask of all ones (0xff...f) for true and 0 // for false. This is useful for choosing a value based on the result of a // conditional in constant time. For example, // // if (a < b) { // c = a; // } else { // c = b; // } // // can be written as // // crypto_word_t lt = constant_time_lt_w(a, b); // c = constant_time_select_w(lt, a, b); // crypto_word_t is the type that most constant-time functions use. Ideally we // would like it to be |size_t|, but NaCl builds in 64-bit mode with 32-bit // pointers, which means that |size_t| can be 32 bits when |BN_ULONG| is 64 // bits. Since we want to be able to do constant-time operations on a // |BN_ULONG|, |crypto_word_t| is defined as an unsigned value with the native // word length. #if defined(OPENSSL_64_BIT) typedef uint64_t crypto_word_t; #elif defined(OPENSSL_32_BIT) typedef uint32_t crypto_word_t; #else #error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" #endif #define CONSTTIME_TRUE_W ~((crypto_word_t)0) #define CONSTTIME_FALSE_W ((crypto_word_t)0) #define CONSTTIME_TRUE_8 ((uint8_t)0xff) #define CONSTTIME_FALSE_8 ((uint8_t)0) // value_barrier_w returns |a|, but prevents GCC and Clang from reasoning about // the returned value. This is used to mitigate compilers undoing constant-time // code, until we can express our requirements directly in the language. // // Note the compiler is aware that |value_barrier_w| has no side effects and // always has the same output for a given input. This allows it to eliminate // dead code, move computations across loops, and vectorize. static inline crypto_word_t value_barrier_w(crypto_word_t a) { #if defined(__GNUC__) || defined(__clang__) __asm__("" : "+r"(a) : /* no inputs */); #endif return a; } // value_barrier_u32 behaves like |value_barrier_w| but takes a |uint32_t|. static inline uint32_t value_barrier_u32(uint32_t a) { #if defined(__GNUC__) || defined(__clang__) __asm__("" : "+r"(a) : /* no inputs */); #endif return a; } // value_barrier_u64 behaves like |value_barrier_w| but takes a |uint64_t|. static inline uint64_t value_barrier_u64(uint64_t a) { #if defined(__GNUC__) || defined(__clang__) __asm__("" : "+r"(a) : /* no inputs */); #endif return a; } // |value_barrier_u8| could be defined as above, but compilers other than // clang seem to still materialize 0x00..00MM instead of reusing 0x??..??MM. // constant_time_msb_w returns the given value with the MSB copied to all the // other bits. static inline crypto_word_t constant_time_msb_w(crypto_word_t a) { return 0u - (a >> (sizeof(a) * 8 - 1)); } // constant_time_lt_w returns 0xff..f if a < b and 0 otherwise. static inline crypto_word_t constant_time_lt_w(crypto_word_t a, crypto_word_t b) { // Consider the two cases of the problem: // msb(a) == msb(b): a < b iff the MSB of a - b is set. // msb(a) != msb(b): a < b iff the MSB of b is set. // // If msb(a) == msb(b) then the following evaluates as: // msb(a^((a^b)|((a-b)^a))) == // msb(a^((a-b) ^ a)) == (because msb(a^b) == 0) // msb(a^a^(a-b)) == (rearranging) // msb(a-b) (because ∀x. x^x == 0) // // Else, if msb(a) != msb(b) then the following evaluates as: // msb(a^((a^b)|((a-b)^a))) == // msb(a^(𝟙 | ((a-b)^a))) == (because msb(a^b) == 1 and 𝟙 // represents a value s.t. msb(𝟙) = 1) // msb(a^𝟙) == (because ORing with 1 results in 1) // msb(b) // // // Here is an SMT-LIB verification of this formula: // // (define-fun lt ((a (_ BitVec 32)) (b (_ BitVec 32))) (_ BitVec 32) // (bvxor a (bvor (bvxor a b) (bvxor (bvsub a b) a))) // ) // // (declare-fun a () (_ BitVec 32)) // (declare-fun b () (_ BitVec 32)) // // (assert (not (= (= #x00000001 (bvlshr (lt a b) #x0000001f)) (bvult a b)))) // (check-sat) // (get-model) return constant_time_msb_w(a ^ ((a ^ b) | ((a - b) ^ a))); } // constant_time_lt_8 acts like |constant_time_lt_w| but returns an 8-bit // mask. static inline uint8_t constant_time_lt_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_lt_w(a, b)); } // constant_time_ge_w returns 0xff..f if a >= b and 0 otherwise. static inline crypto_word_t constant_time_ge_w(crypto_word_t a, crypto_word_t b) { return ~constant_time_lt_w(a, b); } // constant_time_ge_8 acts like |constant_time_ge_w| but returns an 8-bit // mask. static inline uint8_t constant_time_ge_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_ge_w(a, b)); } // constant_time_is_zero returns 0xff..f if a == 0 and 0 otherwise. static inline crypto_word_t constant_time_is_zero_w(crypto_word_t a) { // Here is an SMT-LIB verification of this formula: // // (define-fun is_zero ((a (_ BitVec 32))) (_ BitVec 32) // (bvand (bvnot a) (bvsub a #x00000001)) // ) // // (declare-fun a () (_ BitVec 32)) // // (assert (not (= (= #x00000001 (bvlshr (is_zero a) #x0000001f)) (= a // #x00000000)))) (check-sat) (get-model) return constant_time_msb_w(~a & (a - 1)); } // constant_time_is_zero_8 acts like |constant_time_is_zero_w| but returns an // 8-bit mask. static inline uint8_t constant_time_is_zero_8(crypto_word_t a) { return (uint8_t)(constant_time_is_zero_w(a)); } // constant_time_eq_w returns 0xff..f if a == b and 0 otherwise. static inline crypto_word_t constant_time_eq_w(crypto_word_t a, crypto_word_t b) { return constant_time_is_zero_w(a ^ b); } // constant_time_eq_8 acts like |constant_time_eq_w| but returns an 8-bit // mask. static inline uint8_t constant_time_eq_8(crypto_word_t a, crypto_word_t b) { return (uint8_t)(constant_time_eq_w(a, b)); } // constant_time_eq_int acts like |constant_time_eq_w| but works on int // values. static inline crypto_word_t constant_time_eq_int(int a, int b) { return constant_time_eq_w((crypto_word_t)(a), (crypto_word_t)(b)); } // constant_time_eq_int_8 acts like |constant_time_eq_int| but returns an 8-bit // mask. static inline uint8_t constant_time_eq_int_8(int a, int b) { return constant_time_eq_8((crypto_word_t)(a), (crypto_word_t)(b)); } // constant_time_select_w returns (mask & a) | (~mask & b). When |mask| is all // 1s or all 0s (as returned by the methods above), the select methods return // either |a| (if |mask| is nonzero) or |b| (if |mask| is zero). static inline crypto_word_t constant_time_select_w(crypto_word_t mask, crypto_word_t a, crypto_word_t b) { // Clang recognizes this pattern as a select. While it usually transforms it // to a cmov, it sometimes further transforms it into a branch, which we do // not want. // // Hiding the value of the mask from the compiler evades this transformation. mask = value_barrier_w(mask); return (mask & a) | (~mask & b); } // constant_time_select_8 acts like |constant_time_select| but operates on // 8-bit values. static inline uint8_t constant_time_select_8(crypto_word_t mask, uint8_t a, uint8_t b) { // |mask| is a word instead of |uint8_t| to avoid materializing 0x000..0MM // Making both |mask| and its value barrier |uint8_t| would allow the compiler // to materialize 0x????..?MM instead, but only clang is that clever. // However, vectorization of bitwise operations seems to work better on // |uint8_t| than a mix of |uint64_t| and |uint8_t|, so |m| is cast to // |uint8_t| after the value barrier but before the bitwise operations. uint8_t m = value_barrier_w(mask); return (m & a) | (~m & b); } // constant_time_select_int acts like |constant_time_select| but operates on // ints. static inline int constant_time_select_int(crypto_word_t mask, int a, int b) { return (int)(constant_time_select_w(mask, (crypto_word_t)(a), (crypto_word_t)(b))); } // constant_time_conditional_memcpy copies |n| bytes from |src| to |dst| if // |mask| is 0xff..ff and does nothing if |mask| is 0. The |n|-byte memory // ranges at |dst| and |src| must not overlap, as when calling |memcpy|. static inline void constant_time_conditional_memcpy(void *dst, const void *src, const size_t n, const crypto_word_t mask) { assert(!buffers_alias(dst, n, src, n)); uint8_t *out = (uint8_t *)dst; const uint8_t *in = (const uint8_t *)src; for (size_t i = 0; i < n; i++) { out[i] = constant_time_select_8(mask, in[i], out[i]); } } // constant_time_conditional_memxor xors |n| bytes from |src| to |dst| if // |mask| is 0xff..ff and does nothing if |mask| is 0. The |n|-byte memory // ranges at |dst| and |src| must not overlap, as when calling |memcpy|. static inline void constant_time_conditional_memxor(void *dst, const void *src, size_t n, const crypto_word_t mask) { assert(!buffers_alias(dst, n, src, n)); uint8_t *out = (uint8_t *)dst; const uint8_t *in = (const uint8_t *)src; #if defined(__GNUC__) && !defined(__clang__) // gcc 13.2.0 doesn't automatically vectorize this loop regardless of barrier typedef uint8_t v32u8 __attribute__((vector_size(32), aligned(1), may_alias)); size_t n_vec = n & ~(size_t)31; v32u8 masks = ((uint8_t)mask - (v32u8){}); // broadcast for (size_t i = 0; i < n_vec; i += 32) { *(v32u8 *)&out[i] ^= masks & *(v32u8 *)&in[i]; } out += n_vec; n -= n_vec; #endif for (size_t i = 0; i < n; i++) { out[i] ^= value_barrier_w(mask) & in[i]; } } #if defined(BORINGSSL_CONSTANT_TIME_VALIDATION) // CONSTTIME_SECRET takes a pointer and a number of bytes and marks that region // of memory as secret. Secret data is tracked as it flows to registers and // other parts of a memory. If secret data is used as a condition for a branch, // or as a memory index, it will trigger warnings in valgrind. #define CONSTTIME_SECRET(ptr, len) VALGRIND_MAKE_MEM_UNDEFINED(ptr, len) // CONSTTIME_DECLASSIFY takes a pointer and a number of bytes and marks that // region of memory as public. Public data is not subject to constant-time // rules. #define CONSTTIME_DECLASSIFY(ptr, len) VALGRIND_MAKE_MEM_DEFINED(ptr, len) #else #define CONSTTIME_SECRET(ptr, len) #define CONSTTIME_DECLASSIFY(ptr, len) #endif // BORINGSSL_CONSTANT_TIME_VALIDATION static inline crypto_word_t constant_time_declassify_w(crypto_word_t v) { // Return |v| through a value barrier to be safe. Valgrind-based constant-time // validation is partly to check the compiler has not undone any constant-time // work. Any place |BORINGSSL_CONSTANT_TIME_VALIDATION| influences // optimizations, this validation is inaccurate. // // However, by sending pointers through valgrind, we likely inhibit escape // analysis. On local variables, particularly booleans, we likely // significantly impact optimizations. // // Thus, to be safe, stick a value barrier, in hopes of comparably inhibiting // compiler analysis. CONSTTIME_DECLASSIFY(&v, sizeof(v)); return value_barrier_w(v); } static inline int constant_time_declassify_int(int v) { static_assert(sizeof(uint32_t) == sizeof(int), "int is not the same size as uint32_t"); // See comment above. CONSTTIME_DECLASSIFY(&v, sizeof(v)); return value_barrier_u32(v); } // declassify_assert behaves like |assert| but declassifies the result of // evaluating |expr|. This allows the assertion to branch on the (presumably // public) result, but still ensures that values leading up to the computation // were secret. #define declassify_assert(expr) assert(constant_time_declassify_int(expr)) // Thread-safe initialisation. #if !defined(OPENSSL_THREADS) typedef uint32_t CRYPTO_once_t; #define CRYPTO_ONCE_INIT 0 #elif defined(OPENSSL_WINDOWS_THREADS) typedef INIT_ONCE CRYPTO_once_t; #define CRYPTO_ONCE_INIT INIT_ONCE_STATIC_INIT #elif defined(OPENSSL_PTHREADS) typedef pthread_once_t CRYPTO_once_t; #define CRYPTO_ONCE_INIT PTHREAD_ONCE_INIT #else #error "Unknown threading library" #endif // CRYPTO_once calls |init| exactly once per process. This is thread-safe: if // concurrent threads call |CRYPTO_once| with the same |CRYPTO_once_t| argument // then they will block until |init| completes, but |init| will have only been // called once. // // The |once| argument must be a |CRYPTO_once_t| that has been initialised with // the value |CRYPTO_ONCE_INIT|. OPENSSL_EXPORT void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)); // Atomics. // // The following functions provide an API analogous to from C11 // and abstract between a few variations on atomics we need to support. #if defined(OPENSSL_THREADS) using CRYPTO_atomic_u32 = std::atomic; static_assert(sizeof(CRYPTO_atomic_u32) == sizeof(uint32_t), ""); inline uint32_t CRYPTO_atomic_load_u32(const CRYPTO_atomic_u32 *val) { return val->load(std::memory_order_seq_cst); } inline bool CRYPTO_atomic_compare_exchange_weak_u32(CRYPTO_atomic_u32 *val, uint32_t *expected, uint32_t desired) { return val->compare_exchange_weak( *expected, desired, std::memory_order_seq_cst, std::memory_order_seq_cst); } inline void CRYPTO_atomic_store_u32(CRYPTO_atomic_u32 *val, uint32_t desired) { val->store(desired, std::memory_order_seq_cst); } #else typedef uint32_t CRYPTO_atomic_u32; inline uint32_t CRYPTO_atomic_load_u32(CRYPTO_atomic_u32 *val) { return *val; } inline int CRYPTO_atomic_compare_exchange_weak_u32(CRYPTO_atomic_u32 *val, uint32_t *expected, uint32_t desired) { if (*val != *expected) { *expected = *val; return 0; } *val = desired; return 1; } inline void CRYPTO_atomic_store_u32(CRYPTO_atomic_u32 *val, uint32_t desired) { *val = desired; } #endif // See the comment in the |__cplusplus| section above. static_assert(sizeof(CRYPTO_atomic_u32) == sizeof(uint32_t), "CRYPTO_atomic_u32 does not match uint32_t size"); static_assert(alignof(CRYPTO_atomic_u32) == alignof(uint32_t), "CRYPTO_atomic_u32 does not match uint32_t alignment"); // Reference counting. // CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. #define CRYPTO_REFCOUNT_MAX 0xffffffff // CRYPTO_refcount_inc atomically increments the value at |*count| unless the // value would overflow. It's safe for multiple threads to concurrently call // this or |CRYPTO_refcount_dec_and_test_zero| on the same // |CRYPTO_refcount_t|. OPENSSL_EXPORT void CRYPTO_refcount_inc(CRYPTO_refcount_t *count); // CRYPTO_refcount_dec_and_test_zero tests the value at |*count|: // if it's zero, it crashes the address space. // if it's the maximum value, it returns zero. // otherwise, it atomically decrements it and returns one iff the resulting // value is zero. // // It's safe for multiple threads to concurrently call this or // |CRYPTO_refcount_inc| on the same |CRYPTO_refcount_t|. OPENSSL_EXPORT int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *count); // Locks. #if !defined(OPENSSL_THREADS) typedef struct crypto_mutex_st { char padding; // Empty structs have different sizes in C and C++. } CRYPTO_MUTEX; #define CRYPTO_MUTEX_INIT \ { 0 } #elif defined(OPENSSL_WINDOWS_THREADS) typedef SRWLOCK CRYPTO_MUTEX; #define CRYPTO_MUTEX_INIT SRWLOCK_INIT #elif defined(OPENSSL_PTHREADS) typedef pthread_rwlock_t CRYPTO_MUTEX; #define CRYPTO_MUTEX_INIT PTHREAD_RWLOCK_INITIALIZER #else #error "Unknown threading library" #endif // CRYPTO_MUTEX_init initialises |lock|. If |lock| is a static variable, use a // |CRYPTO_MUTEX_INIT|. OPENSSL_EXPORT void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock); // CRYPTO_MUTEX_lock_read locks |lock| such that other threads may also have a // read lock, but none may have a write lock. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock); // CRYPTO_MUTEX_lock_write locks |lock| such that no other thread has any type // of lock on it. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock); // CRYPTO_MUTEX_unlock_read unlocks |lock| for reading. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock); // CRYPTO_MUTEX_unlock_write unlocks |lock| for writing. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock); // CRYPTO_MUTEX_cleanup releases all resources held by |lock|. OPENSSL_EXPORT void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock); #if defined(__cplusplus) extern "C++" { BSSL_NAMESPACE_BEGIN namespace internal { // MutexLockBase is a RAII helper for CRYPTO_MUTEX locking. template class MutexLockBase { public: explicit MutexLockBase(CRYPTO_MUTEX *mu) : mu_(mu) { assert(mu_ != nullptr); LockFunc(mu_); } ~MutexLockBase() { ReleaseFunc(mu_); } MutexLockBase(const MutexLockBase &) = delete; MutexLockBase &operator=(const MutexLockBase &) = delete; private: CRYPTO_MUTEX *const mu_; }; } // namespace internal using MutexWriteLock = internal::MutexLockBase; using MutexReadLock = internal::MutexLockBase; BSSL_NAMESPACE_END } // extern "C++" #endif // defined(__cplusplus) // Thread local storage. // thread_local_data_t enumerates the types of thread-local data that can be // stored. typedef enum { OPENSSL_THREAD_LOCAL_ERR = 0, OPENSSL_THREAD_LOCAL_RAND, OPENSSL_THREAD_LOCAL_FIPS_COUNTERS, OPENSSL_THREAD_LOCAL_FIPS_SERVICE_INDICATOR_STATE, OPENSSL_THREAD_LOCAL_TEST, NUM_OPENSSL_THREAD_LOCALS, } thread_local_data_t; // thread_local_destructor_t is the type of a destructor function that will be // called when a thread exits and its thread-local storage needs to be freed. typedef void (*thread_local_destructor_t)(void *); // CRYPTO_get_thread_local gets the pointer value that is stored for the // current thread for the given index, or NULL if none has been set. OPENSSL_EXPORT void *CRYPTO_get_thread_local(thread_local_data_t value); // CRYPTO_set_thread_local sets a pointer value for the current thread at the // given index. This function should only be called once per thread for a given // |index|: rather than update the pointer value itself, update the data that // is pointed to. // // The destructor function will be called when a thread exits to free this // thread-local data. All calls to |CRYPTO_set_thread_local| with the same // |index| should have the same |destructor| argument. The destructor may be // called with a NULL argument if a thread that never set a thread-local // pointer for |index|, exits. The destructor may be called concurrently with // different arguments. // // This function returns one on success or zero on error. If it returns zero // then |destructor| has been called with |value| already. OPENSSL_EXPORT int CRYPTO_set_thread_local( thread_local_data_t index, void *value, thread_local_destructor_t destructor); // ex_data typedef struct crypto_ex_data_func_st CRYPTO_EX_DATA_FUNCS; // CRYPTO_EX_DATA_CLASS tracks the ex_indices registered for a type which // supports ex_data. It should defined as a static global within the module // which defines that type. typedef struct { CRYPTO_MUTEX lock; // funcs is a linked list of |CRYPTO_EX_DATA_FUNCS| structures. It may be // traversed without serialization only up to |num_funcs|. last points to the // final entry of |funcs|, or NULL if empty. CRYPTO_EX_DATA_FUNCS *funcs, *last; // num_funcs is the number of entries in |funcs|. CRYPTO_atomic_u32 num_funcs; // num_reserved is one if the ex_data index zero is reserved for legacy // |TYPE_get_app_data| functions. uint8_t num_reserved; } CRYPTO_EX_DATA_CLASS; #define CRYPTO_EX_DATA_CLASS_INIT \ { CRYPTO_MUTEX_INIT, NULL, NULL, {}, 0 } #define CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA \ { CRYPTO_MUTEX_INIT, NULL, NULL, {}, 1 } // CRYPTO_get_ex_new_index_ex allocates a new index for |ex_data_class|. Each // class of object should provide a wrapper function that uses the correct // |CRYPTO_EX_DATA_CLASS|. It returns the new index on success and -1 on error. OPENSSL_EXPORT int CRYPTO_get_ex_new_index_ex( CRYPTO_EX_DATA_CLASS *ex_data_class, long argl, void *argp, CRYPTO_EX_free *free_func); // CRYPTO_set_ex_data sets an extra data pointer on a given object. Each class // of object should provide a wrapper function. OPENSSL_EXPORT int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val); // CRYPTO_get_ex_data returns an extra data pointer for a given object, or NULL // if no such index exists. Each class of object should provide a wrapper // function. OPENSSL_EXPORT void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int index); // CRYPTO_new_ex_data initialises a newly allocated |CRYPTO_EX_DATA|. OPENSSL_EXPORT void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad); // CRYPTO_free_ex_data frees |ad|, which is embedded inside |obj|, which is an // object of the given class. OPENSSL_EXPORT void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad); // Endianness conversions. #if defined(__GNUC__) && __GNUC__ >= 2 static inline uint16_t CRYPTO_bswap2(uint16_t x) { return __builtin_bswap16(x); } static inline uint32_t CRYPTO_bswap4(uint32_t x) { return __builtin_bswap32(x); } static inline uint64_t CRYPTO_bswap8(uint64_t x) { return __builtin_bswap64(x); } #elif defined(_MSC_VER) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #pragma intrinsic(_byteswap_uint64, _byteswap_ulong, _byteswap_ushort) static inline uint16_t CRYPTO_bswap2(uint16_t x) { return _byteswap_ushort(x); } static inline uint32_t CRYPTO_bswap4(uint32_t x) { return _byteswap_ulong(x); } static inline uint64_t CRYPTO_bswap8(uint64_t x) { return _byteswap_uint64(x); } #else static inline uint16_t CRYPTO_bswap2(uint16_t x) { return (x >> 8) | (x << 8); } static inline uint32_t CRYPTO_bswap4(uint32_t x) { x = (x >> 16) | (x << 16); x = ((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8); return x; } static inline uint64_t CRYPTO_bswap8(uint64_t x) { return CRYPTO_bswap4(x >> 32) | (((uint64_t)CRYPTO_bswap4(x)) << 32); } #endif // Language bug workarounds. // // Most C standard library functions are undefined if passed NULL, even when the // corresponding length is zero. This gives them (and, in turn, all functions // which call them) surprising behavior on empty arrays. Some compilers will // miscompile code due to this rule. See also // https://www.imperialviolet.org/2016/06/26/nonnull.html // // These wrapper functions behave the same as the corresponding C standard // functions, but behave as expected when passed NULL if the length is zero. // // Note |OPENSSL_memcmp| is a different function from |CRYPTO_memcmp|. // C++ defines |memchr| as a const-correct overload. #if defined(__cplusplus) extern "C++" { static inline const void *OPENSSL_memchr(const void *s, int c, size_t n) { if (n == 0) { return NULL; } return memchr(s, c, n); } static inline void *OPENSSL_memchr(void *s, int c, size_t n) { if (n == 0) { return NULL; } return memchr(s, c, n); } } // extern "C++" #else // __cplusplus static inline void *OPENSSL_memchr(const void *s, int c, size_t n) { if (n == 0) { return NULL; } return memchr(s, c, n); } #endif // __cplusplus static inline int OPENSSL_memcmp(const void *s1, const void *s2, size_t n) { if (n == 0) { return 0; } return memcmp(s1, s2, n); } static inline void *OPENSSL_memcpy(void *dst, const void *src, size_t n) { if (n == 0) { return dst; } return memcpy(dst, src, n); } static inline void *OPENSSL_memmove(void *dst, const void *src, size_t n) { if (n == 0) { return dst; } return memmove(dst, src, n); } static inline void *OPENSSL_memset(void *dst, int c, size_t n) { if (n == 0) { return dst; } return memset(dst, c, n); } // Loads and stores. // // The following functions load and store sized integers with the specified // endianness. They use |memcpy|, and so avoid alignment or strict aliasing // requirements on the input and output pointers. static inline uint16_t CRYPTO_load_u16_be(const void *in) { uint16_t v; OPENSSL_memcpy(&v, in, sizeof(v)); return CRYPTO_bswap2(v); } static inline void CRYPTO_store_u16_be(void *out, uint16_t v) { v = CRYPTO_bswap2(v); OPENSSL_memcpy(out, &v, sizeof(v)); } static inline uint32_t CRYPTO_load_u32_le(const void *in) { uint32_t v; OPENSSL_memcpy(&v, in, sizeof(v)); return v; } static inline void CRYPTO_store_u32_le(void *out, uint32_t v) { OPENSSL_memcpy(out, &v, sizeof(v)); } static inline uint32_t CRYPTO_load_u32_be(const void *in) { uint32_t v; OPENSSL_memcpy(&v, in, sizeof(v)); return CRYPTO_bswap4(v); } static inline void CRYPTO_store_u32_be(void *out, uint32_t v) { v = CRYPTO_bswap4(v); OPENSSL_memcpy(out, &v, sizeof(v)); } static inline uint64_t CRYPTO_load_u64_le(const void *in) { uint64_t v; OPENSSL_memcpy(&v, in, sizeof(v)); return v; } static inline void CRYPTO_store_u64_le(void *out, uint64_t v) { OPENSSL_memcpy(out, &v, sizeof(v)); } static inline uint64_t CRYPTO_load_u64_be(const void *ptr) { uint64_t ret; OPENSSL_memcpy(&ret, ptr, sizeof(ret)); return CRYPTO_bswap8(ret); } static inline void CRYPTO_store_u64_be(void *out, uint64_t v) { v = CRYPTO_bswap8(v); OPENSSL_memcpy(out, &v, sizeof(v)); } static inline crypto_word_t CRYPTO_load_word_le(const void *in) { crypto_word_t v; OPENSSL_memcpy(&v, in, sizeof(v)); return v; } static inline void CRYPTO_store_word_le(void *out, crypto_word_t v) { OPENSSL_memcpy(out, &v, sizeof(v)); } static inline crypto_word_t CRYPTO_load_word_be(const void *in) { crypto_word_t v; OPENSSL_memcpy(&v, in, sizeof(v)); #if defined(OPENSSL_64_BIT) static_assert(sizeof(v) == 8, "crypto_word_t has unexpected size"); return CRYPTO_bswap8(v); #else static_assert(sizeof(v) == 4, "crypto_word_t has unexpected size"); return CRYPTO_bswap4(v); #endif } // Bit rotation functions. // // Note these functions use |(-shift) & 31|, etc., because shifting by the bit // width is undefined. Both Clang and GCC recognize this pattern as a rotation, // but MSVC does not. Instead, we call MSVC's built-in functions. static inline uint32_t CRYPTO_rotl_u32(uint32_t value, int shift) { #if defined(_MSC_VER) return _rotl(value, shift); #else return (value << shift) | (value >> ((-shift) & 31)); #endif } static inline uint32_t CRYPTO_rotr_u32(uint32_t value, int shift) { #if defined(_MSC_VER) return _rotr(value, shift); #else return (value >> shift) | (value << ((-shift) & 31)); #endif } static inline uint64_t CRYPTO_rotl_u64(uint64_t value, int shift) { #if defined(_MSC_VER) return _rotl64(value, shift); #else return (value << shift) | (value >> ((-shift) & 63)); #endif } static inline uint64_t CRYPTO_rotr_u64(uint64_t value, int shift) { #if defined(_MSC_VER) return _rotr64(value, shift); #else return (value >> shift) | (value << ((-shift) & 63)); #endif } // FIPS functions. #if defined(BORINGSSL_FIPS) // BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test // fails. It prevents any further cryptographic operations by the current // process. void BORINGSSL_FIPS_abort(void) __attribute__((noreturn)); // boringssl_self_test_startup runs all startup self tests and returns one on // success or zero on error. Startup self tests do not include lazy tests. // Call |BORINGSSL_self_test| to run every self test. int boringssl_self_test_startup(void); // boringssl_ensure_rsa_self_test checks whether the RSA self-test has been run // in this address space. If not, it runs it and crashes the address space if // unsuccessful. void boringssl_ensure_rsa_self_test(void); // boringssl_ensure_ecc_self_test checks whether the ECDSA and ECDH self-test // has been run in this address space. If not, it runs it and crashes the // address space if unsuccessful. void boringssl_ensure_ecc_self_test(void); // boringssl_ensure_ffdh_self_test checks whether the FFDH self-test has been // run in this address space. If not, it runs it and crashes the address space // if unsuccessful. void boringssl_ensure_ffdh_self_test(void); #else // Outside of FIPS mode, the lazy tests are no-ops. inline void boringssl_ensure_rsa_self_test(void) {} inline void boringssl_ensure_ecc_self_test(void) {} inline void boringssl_ensure_ffdh_self_test(void) {} #endif // FIPS // boringssl_self_test_sha256 performs a SHA-256 KAT. int boringssl_self_test_sha256(void); // boringssl_self_test_sha512 performs a SHA-512 KAT. int boringssl_self_test_sha512(void); // boringssl_self_test_hmac_sha256 performs an HMAC-SHA-256 KAT. int boringssl_self_test_hmac_sha256(void); #if defined(BORINGSSL_FIPS_COUNTERS) void boringssl_fips_inc_counter(enum fips_counter_t counter); #else inline void boringssl_fips_inc_counter(enum fips_counter_t counter) {} #endif #if defined(BORINGSSL_FIPS_BREAK_TESTS) inline int boringssl_fips_break_test(const char *test) { const char *const value = getenv("BORINGSSL_FIPS_BREAK_TEST"); return value != NULL && strcmp(value, test) == 0; } #else inline int boringssl_fips_break_test(const char *test) { return 0; } #endif // BORINGSSL_FIPS_BREAK_TESTS // Runtime CPU feature support #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) // OPENSSL_ia32cap_P contains the Intel CPUID bits when running on an x86 or // x86-64 system. // // Index 0: // EDX for CPUID where EAX = 1 // Bit 20 is always zero // Bit 28 is adjusted to reflect whether the data cache is shared between // multiple logical cores // Bit 30 is used to indicate an Intel CPU // Index 1: // ECX for CPUID where EAX = 1 // Bit 11 is used to indicate AMD XOP support, not SDBG // Index 2: // EBX for CPUID where EAX = 7, ECX = 0 // Bit 14 (for removed feature MPX) is used to indicate a preference for ymm // registers over zmm even when zmm registers are supported // Index 3: // ECX for CPUID where EAX = 7, ECX = 0 // // Note: the CPUID bits are pre-adjusted for the OSXSAVE bit and the XMM, YMM, // and AVX512 bits in XCR0, so it is not necessary to check those. (WARNING: See // caveats in cpu_intel.c.) // // From C, this symbol should only be accessed with |OPENSSL_get_ia32cap|. extern uint32_t OPENSSL_ia32cap_P[4]; // OPENSSL_get_ia32cap initializes the library if needed and returns the |idx|th // entry of |OPENSSL_ia32cap_P|. It is marked as a const function so duplicate // calls can be merged by the compiler, at least when indices match. OPENSSL_ATTR_CONST uint32_t OPENSSL_get_ia32cap(int idx); // See Intel manual, volume 2A, table 3-11. inline int CRYPTO_is_FXSR_capable(void) { #if defined(__FXSR__) return 1; #else return (OPENSSL_get_ia32cap(0) & (1u << 24)) != 0; #endif } inline int CRYPTO_is_intel_cpu(void) { // The reserved bit 30 is used to indicate an Intel CPU. return (OPENSSL_get_ia32cap(0) & (1u << 30)) != 0; } // See Intel manual, volume 2A, table 3-10. inline int CRYPTO_is_PCLMUL_capable(void) { #if defined(__PCLMUL__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 1)) != 0; #endif } inline int CRYPTO_is_SSSE3_capable(void) { #if defined(__SSSE3__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 9)) != 0; #endif } inline int CRYPTO_is_SSE4_1_capable(void) { #if defined(__SSE4_1__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 19)) != 0; #endif } inline int CRYPTO_is_MOVBE_capable(void) { #if defined(__MOVBE__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 22)) != 0; #endif } inline int CRYPTO_is_AESNI_capable(void) { #if defined(__AES__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 25)) != 0; #endif } // We intentionally avoid defining a |CRYPTO_is_XSAVE_capable| function. See // |CRYPTO_cpu_perf_is_like_silvermont|. inline int CRYPTO_is_AVX_capable(void) { #if defined(__AVX__) return 1; #else return (OPENSSL_get_ia32cap(1) & (1u << 28)) != 0; #endif } inline int CRYPTO_is_RDRAND_capable(void) { // We intentionally do not check |__RDRND__| here. On some AMD processors, we // will act as if the hardware is RDRAND-incapable, even it actually supports // it. See cpu_intel.c. return (OPENSSL_get_ia32cap(1) & (1u << 30)) != 0; } // See Intel manual, volume 2A, table 3-8. inline int CRYPTO_is_BMI1_capable(void) { #if defined(__BMI__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 3)) != 0; #endif } inline int CRYPTO_is_AVX2_capable(void) { #if defined(__AVX2__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 5)) != 0; #endif } inline int CRYPTO_is_BMI2_capable(void) { #if defined(__BMI2__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 8)) != 0; #endif } inline int CRYPTO_is_ADX_capable(void) { #if defined(__ADX__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 19)) != 0; #endif } // SHA-1 and SHA-256 are defined as a single extension. inline int CRYPTO_is_x86_SHA_capable(void) { // We should check __SHA__ here, but for now we ignore it. We've run into a // few places where projects build with -march=goldmont, but need a build that // does not require SHA extensions: // // - Some CrOS toolchain definitions are incorrect and build with // -march=goldmont when targetting boards that are not Goldmont. b/320482539 // tracks fixing this. // // - Sometimes projects build with -march=goldmont as a rough optimized // baseline. However, Intel CPU capabilities are not strictly linear, so // this does not quite work. Some combination of -mtune and // -march=x86-64-v{1,2,3,4} would be a better strategy here. // // - QEMU versions before 8.2 do not support SHA extensions and disable it // with a warning. Projects that target Goldmont and test on QEMU will // break. The long-term fix is to update to 8.2. A principled short-term fix // would be -march=goldmont -mno-sha, to reflect that the binary needs to // run on both QEMU-8.1-Goldmont and actual-Goldmont. // // TODO(b/320482539): Once the CrOS toolchain is fixed, try this again. return (OPENSSL_get_ia32cap(2) & (1u << 29)) != 0; } // CRYPTO_cpu_perf_is_like_silvermont returns one if, based on a heuristic, the // CPU has Silvermont-like performance characteristics. It is often faster to // run different codepaths on these CPUs than the available instructions would // otherwise select. See chacha-x86_64.pl. // // Bonnell, Silvermont's predecessor in the Atom lineup, will also be matched by // this. Goldmont (Silvermont's successor in the Atom lineup) added XSAVE so it // isn't matched by this. Various sources indicate AMD first implemented MOVBE // and XSAVE at the same time in Jaguar, so it seems like AMD chips will not be // matched by this. That seems to be the case for other x86(-64) CPUs. inline int CRYPTO_cpu_perf_is_like_silvermont(void) { // WARNING: This MUST NOT be used to guard the execution of the XSAVE // instruction. This is the "hardware supports XSAVE" bit, not the OSXSAVE bit // that indicates whether we can safely execute XSAVE. This bit may be set // even when XSAVE is disabled (by the operating system). See how the users of // this bit use it. // // Historically, the XSAVE bit was artificially cleared on Knights Landing // and Knights Mill chips, but as Intel has removed all support from GCC, // LLVM, and SDE, we assume they are no longer worth special-casing. int hardware_supports_xsave = (OPENSSL_get_ia32cap(1) & (1u << 26)) != 0; return !hardware_supports_xsave && CRYPTO_is_MOVBE_capable(); } inline int CRYPTO_is_AVX512BW_capable(void) { #if defined(__AVX512BW__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 30)) != 0; #endif } inline int CRYPTO_is_AVX512VL_capable(void) { #if defined(__AVX512VL__) return 1; #else return (OPENSSL_get_ia32cap(2) & (1u << 31)) != 0; #endif } // CRYPTO_cpu_avoid_zmm_registers returns 1 if zmm registers (512-bit vectors) // should not be used even if the CPU supports them. // // Note that this reuses the bit for the removed MPX feature. inline int CRYPTO_cpu_avoid_zmm_registers(void) { return (OPENSSL_get_ia32cap(2) & (1u << 14)) != 0; } inline int CRYPTO_is_VAES_capable(void) { #if defined(__VAES__) return 1; #else return (OPENSSL_get_ia32cap(3) & (1u << 9)) != 0; #endif } inline int CRYPTO_is_VPCLMULQDQ_capable(void) { #if defined(__VPCLMULQDQ__) return 1; #else return (OPENSSL_get_ia32cap(3) & (1u << 10)) != 0; #endif } #endif // OPENSSL_X86 || OPENSSL_X86_64 #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) // OPENSSL_armcap_P contains ARM CPU capabilities. From C, this should only be // accessed with |OPENSSL_get_armcap|. extern uint32_t OPENSSL_armcap_P; // OPENSSL_get_armcap initializes the library if needed and returns ARM CPU // capabilities. It is marked as a const function so duplicate calls can be // merged by the compiler. OPENSSL_ATTR_CONST uint32_t OPENSSL_get_armcap(void); // Normalize some older feature flags to their modern ACLE values. // https://developer.arm.com/architectures/system-architectures/software-standards/acle #if defined(__ARM_NEON__) && !defined(__ARM_NEON) #define __ARM_NEON 1 #endif #if defined(__ARM_FEATURE_CRYPTO) #if !defined(__ARM_FEATURE_AES) #define __ARM_FEATURE_AES 1 #endif #if !defined(__ARM_FEATURE_SHA2) #define __ARM_FEATURE_SHA2 1 #endif #endif // CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If // this is known statically, it is a constant inline function. inline int CRYPTO_is_NEON_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_NEON) || defined(__ARM_NEON) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV7_NEON) != 0; #endif } inline int CRYPTO_is_ARMv8_AES_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_AES) || defined(__ARM_FEATURE_AES) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV8_AES) != 0; #endif } inline int CRYPTO_is_ARMv8_PMULL_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_AES) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV8_PMULL) != 0; #endif } inline int CRYPTO_is_ARMv8_SHA1_capable(void) { // SHA-1 and SHA-2 (only) share |__ARM_FEATURE_SHA2| but otherwise // are dealt with independently. #if defined(OPENSSL_STATIC_ARMCAP_SHA1) || defined(__ARM_FEATURE_SHA2) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV8_SHA1) != 0; #endif } inline int CRYPTO_is_ARMv8_SHA256_capable(void) { // SHA-1 and SHA-2 (only) share |__ARM_FEATURE_SHA2| but otherwise // are dealt with independently. #if defined(OPENSSL_STATIC_ARMCAP_SHA256) || defined(__ARM_FEATURE_SHA2) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV8_SHA256) != 0; #endif } inline int CRYPTO_is_ARMv8_SHA512_capable(void) { // There is no |OPENSSL_STATIC_ARMCAP_SHA512|. #if defined(__ARM_FEATURE_SHA512) return 1; #elif defined(OPENSSL_STATIC_ARMCAP) return 0; #else return (OPENSSL_get_armcap() & ARMV8_SHA512) != 0; #endif } #endif // OPENSSL_ARM || OPENSSL_AARCH64 #if defined(BORINGSSL_DISPATCH_TEST) // Runtime CPU dispatch testing support // BORINGSSL_function_hit is an array of flags. The following functions will // set these flags if BORINGSSL_DISPATCH_TEST is defined. // 0: aes_hw_ctr32_encrypt_blocks // 1: aes_hw_encrypt // 2: aesni_gcm_encrypt // 3: aes_hw_set_encrypt_key // 4: vpaes_encrypt // 5: vpaes_set_encrypt_key // 6: aes_gcm_enc_update_vaes_avx10_256 [reserved] // 7: aes_gcm_enc_update_vaes_avx10_512 // 8: aes_gcm_enc_update_vaes_avx2 extern uint8_t BORINGSSL_function_hit[9]; #endif // BORINGSSL_DISPATCH_TEST // OPENSSL_vasprintf_internal is just like |vasprintf(3)|. If |system_malloc| is // 0, memory will be allocated with |OPENSSL_malloc| and must be freed with // |OPENSSL_free|. Otherwise the system |malloc| function is used and the memory // must be freed with the system |free| function. OPENSSL_EXPORT int OPENSSL_vasprintf_internal(char **str, const char *format, va_list args, int system_malloc) OPENSSL_PRINTF_FORMAT_FUNC(2, 0); #if defined(__cplusplus) } // extern C #endif // Arithmetic functions. // CRYPTO_addc_* returns |x + y + carry|, and sets |*out_carry| to the carry // bit. |carry| must be zero or one. #if OPENSSL_HAS_BUILTIN(__builtin_addc) inline unsigned int CRYPTO_addc_impl(unsigned int x, unsigned int y, unsigned int carry, unsigned int *out_carry) { return __builtin_addc(x, y, carry, out_carry); } inline unsigned long CRYPTO_addc_impl(unsigned long x, unsigned long y, unsigned long carry, unsigned long *out_carry) { return __builtin_addcl(x, y, carry, out_carry); } inline unsigned long long CRYPTO_addc_impl(unsigned long long x, unsigned long long y, unsigned long long carry, unsigned long long *out_carry) { return __builtin_addcll(x, y, carry, out_carry); } inline uint32_t CRYPTO_addc_u32(uint32_t x, uint32_t y, uint32_t carry, uint32_t *out_carry) { return CRYPTO_addc_impl(x, y, carry, out_carry); } inline uint64_t CRYPTO_addc_u64(uint64_t x, uint64_t y, uint64_t carry, uint64_t *out_carry) { return CRYPTO_addc_impl(x, y, carry, out_carry); } #else static inline uint32_t CRYPTO_addc_u32(uint32_t x, uint32_t y, uint32_t carry, uint32_t *out_carry) { declassify_assert(carry <= 1); uint64_t ret = carry; ret += (uint64_t)x + y; *out_carry = (uint32_t)(ret >> 32); return (uint32_t)ret; } static inline uint64_t CRYPTO_addc_u64(uint64_t x, uint64_t y, uint64_t carry, uint64_t *out_carry) { declassify_assert(carry <= 1); #if defined(BORINGSSL_HAS_UINT128) uint128_t ret = carry; ret += (uint128_t)x + y; *out_carry = (uint64_t)(ret >> 64); return (uint64_t)ret; #else x += carry; carry = x < carry; uint64_t ret = x + y; carry += ret < x; *out_carry = carry; return ret; #endif } #endif // CRYPTO_subc_* returns |x - y - borrow|, and sets |*out_borrow| to the borrow // bit. |borrow| must be zero or one. #if OPENSSL_HAS_BUILTIN(__builtin_subc) inline unsigned int CRYPTO_subc_impl(unsigned int x, unsigned int y, unsigned int borrow, unsigned int *out_borrow) { return __builtin_subc(x, y, borrow, out_borrow); } inline unsigned long CRYPTO_subc_impl(unsigned long x, unsigned long y, unsigned long borrow, unsigned long *out_borrow) { return __builtin_subcl(x, y, borrow, out_borrow); } inline unsigned long long CRYPTO_subc_impl(unsigned long long x, unsigned long long y, unsigned long long borrow, unsigned long long *out_borrow) { return __builtin_subcll(x, y, borrow, out_borrow); } inline uint32_t CRYPTO_subc_u32(uint32_t x, uint32_t y, uint32_t borrow, uint32_t *out_borrow) { return CRYPTO_subc_impl(x, y, borrow, out_borrow); } inline uint64_t CRYPTO_subc_u64(uint64_t x, uint64_t y, uint64_t borrow, uint64_t *out_borrow) { return CRYPTO_subc_impl(x, y, borrow, out_borrow); } #else static inline uint32_t CRYPTO_subc_u32(uint32_t x, uint32_t y, uint32_t borrow, uint32_t *out_borrow) { declassify_assert(borrow <= 1); uint32_t ret = x - y - borrow; *out_borrow = (x < y) | ((x == y) & borrow); return ret; } static inline uint64_t CRYPTO_subc_u64(uint64_t x, uint64_t y, uint64_t borrow, uint64_t *out_borrow) { declassify_assert(borrow <= 1); uint64_t ret = x - y - borrow; *out_borrow = (x < y) | ((x == y) & borrow); return ret; } #endif #if defined(OPENSSL_64_BIT) #define CRYPTO_addc_w CRYPTO_addc_u64 #define CRYPTO_subc_w CRYPTO_subc_u64 #else #define CRYPTO_addc_w CRYPTO_addc_u32 #define CRYPTO_subc_w CRYPTO_subc_u32 #endif #endif // OPENSSL_HEADER_CRYPTO_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/kyber/internal.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_KYBER_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_KYBER_INTERNAL_H #include #include #if defined(__cplusplus) extern "C" { #endif // KYBER_ENCAP_ENTROPY is the number of bytes of uniformly random entropy // necessary to encapsulate a secret. The entropy will be leaked to the // decapsulating party. #define KYBER_ENCAP_ENTROPY 32 // KYBER_GENERATE_KEY_ENTROPY is the number of bytes of uniformly random entropy // necessary to generate a key. #define KYBER_GENERATE_KEY_ENTROPY 64 // KYBER_generate_key_external_entropy is a deterministic function to create a // pair of Kyber768 keys, using the supplied entropy. The entropy needs to be // uniformly random generated. This function is should only be used for tests, // regular callers should use the non-deterministic |KYBER_generate_key| // directly. OPENSSL_EXPORT void KYBER_generate_key_external_entropy( uint8_t out_encoded_public_key[KYBER_PUBLIC_KEY_BYTES], struct KYBER_private_key *out_private_key, const uint8_t entropy[KYBER_GENERATE_KEY_ENTROPY]); // KYBER_encap_external_entropy behaves like |KYBER_encap|, but uses // |KYBER_ENCAP_ENTROPY| bytes of |entropy| for randomization. The decapsulating // side will be able to recover |entropy| in full. This function should only be // used for tests, regular callers should use the non-deterministic // |KYBER_encap| directly. OPENSSL_EXPORT void KYBER_encap_external_entropy( uint8_t out_ciphertext[KYBER_CIPHERTEXT_BYTES], uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const struct KYBER_public_key *public_key, const uint8_t entropy[KYBER_ENCAP_ENTROPY]); #if defined(__cplusplus) } #endif #endif // OPENSSL_HEADER_CRYPTO_KYBER_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/kyber/kyber.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define OPENSSL_UNSTABLE_EXPERIMENTAL_KYBER #include #include #include #include #include #include "../fipsmodule/keccak/internal.h" #include "../internal.h" #include "./internal.h" // See // https://pq-crystals.org/kyber/data/kyber-specification-round3-20210804.pdf static void prf(uint8_t *out, size_t out_len, const uint8_t in[33]) { BORINGSSL_keccak(out, out_len, in, 33, boringssl_shake256); } static void hash_h(uint8_t out[32], const uint8_t *in, size_t len) { BORINGSSL_keccak(out, 32, in, len, boringssl_sha3_256); } static void hash_g(uint8_t out[64], const uint8_t *in, size_t len) { BORINGSSL_keccak(out, 64, in, len, boringssl_sha3_512); } static void kdf(uint8_t *out, size_t out_len, const uint8_t *in, size_t len) { BORINGSSL_keccak(out, out_len, in, len, boringssl_shake256); } #define DEGREE 256 #define RANK 3 static const size_t kBarrettMultiplier = 5039; static const unsigned kBarrettShift = 24; static const uint16_t kPrime = 3329; static const int kLog2Prime = 12; static const uint16_t kHalfPrime = (/*kPrime=*/3329 - 1) / 2; static const int kDU = 10; static const int kDV = 4; // kInverseDegree is 128^-1 mod 3329; 128 because kPrime does not have a 512th // root of unity. static const uint16_t kInverseDegree = 3303; static const size_t kEncodedVectorSize = (/*kLog2Prime=*/12 * DEGREE / 8) * RANK; static const size_t kCompressedVectorSize = /*kDU=*/10 * RANK * DEGREE / 8; typedef struct scalar { // On every function entry and exit, 0 <= c < kPrime. uint16_t c[DEGREE]; } scalar; typedef struct vector { scalar v[RANK]; } vector; typedef struct matrix { scalar v[RANK][RANK]; } matrix; // This bit of Python will be referenced in some of the following comments: // // p = 3329 // // def bitreverse(i): // ret = 0 // for n in range(7): // bit = i & 1 // ret <<= 1 // ret |= bit // i >>= 1 // return ret // kNTTRoots = [pow(17, bitreverse(i), p) for i in range(128)] static const uint16_t kNTTRoots[128] = { 1, 1729, 2580, 3289, 2642, 630, 1897, 848, 1062, 1919, 193, 797, 2786, 3260, 569, 1746, 296, 2447, 1339, 1476, 3046, 56, 2240, 1333, 1426, 2094, 535, 2882, 2393, 2879, 1974, 821, 289, 331, 3253, 1756, 1197, 2304, 2277, 2055, 650, 1977, 2513, 632, 2865, 33, 1320, 1915, 2319, 1435, 807, 452, 1438, 2868, 1534, 2402, 2647, 2617, 1481, 648, 2474, 3110, 1227, 910, 17, 2761, 583, 2649, 1637, 723, 2288, 1100, 1409, 2662, 3281, 233, 756, 2156, 3015, 3050, 1703, 1651, 2789, 1789, 1847, 952, 1461, 2687, 939, 2308, 2437, 2388, 733, 2337, 268, 641, 1584, 2298, 2037, 3220, 375, 2549, 2090, 1645, 1063, 319, 2773, 757, 2099, 561, 2466, 2594, 2804, 1092, 403, 1026, 1143, 2150, 2775, 886, 1722, 1212, 1874, 1029, 2110, 2935, 885, 2154, }; // kInverseNTTRoots = [pow(17, -bitreverse(i), p) for i in range(128)] static const uint16_t kInverseNTTRoots[128] = { 1, 1600, 40, 749, 2481, 1432, 2699, 687, 1583, 2760, 69, 543, 2532, 3136, 1410, 2267, 2508, 1355, 450, 936, 447, 2794, 1235, 1903, 1996, 1089, 3273, 283, 1853, 1990, 882, 3033, 2419, 2102, 219, 855, 2681, 1848, 712, 682, 927, 1795, 461, 1891, 2877, 2522, 1894, 1010, 1414, 2009, 3296, 464, 2697, 816, 1352, 2679, 1274, 1052, 1025, 2132, 1573, 76, 2998, 3040, 1175, 2444, 394, 1219, 2300, 1455, 2117, 1607, 2443, 554, 1179, 2186, 2303, 2926, 2237, 525, 735, 863, 2768, 1230, 2572, 556, 3010, 2266, 1684, 1239, 780, 2954, 109, 1292, 1031, 1745, 2688, 3061, 992, 2596, 941, 892, 1021, 2390, 642, 1868, 2377, 1482, 1540, 540, 1678, 1626, 279, 314, 1173, 2573, 3096, 48, 667, 1920, 2229, 1041, 2606, 1692, 680, 2746, 568, 3312, }; // kModRoots = [pow(17, 2*bitreverse(i) + 1, p) for i in range(128)] static const uint16_t kModRoots[128] = { 17, 3312, 2761, 568, 583, 2746, 2649, 680, 1637, 1692, 723, 2606, 2288, 1041, 1100, 2229, 1409, 1920, 2662, 667, 3281, 48, 233, 3096, 756, 2573, 2156, 1173, 3015, 314, 3050, 279, 1703, 1626, 1651, 1678, 2789, 540, 1789, 1540, 1847, 1482, 952, 2377, 1461, 1868, 2687, 642, 939, 2390, 2308, 1021, 2437, 892, 2388, 941, 733, 2596, 2337, 992, 268, 3061, 641, 2688, 1584, 1745, 2298, 1031, 2037, 1292, 3220, 109, 375, 2954, 2549, 780, 2090, 1239, 1645, 1684, 1063, 2266, 319, 3010, 2773, 556, 757, 2572, 2099, 1230, 561, 2768, 2466, 863, 2594, 735, 2804, 525, 1092, 2237, 403, 2926, 1026, 2303, 1143, 2186, 2150, 1179, 2775, 554, 886, 2443, 1722, 1607, 1212, 2117, 1874, 1455, 1029, 2300, 2110, 1219, 2935, 394, 885, 2444, 2154, 1175, }; // reduce_once reduces 0 <= x < 2*kPrime, mod kPrime. static uint16_t reduce_once(uint16_t x) { declassify_assert(x < 2 * kPrime); const uint16_t subtracted = x - kPrime; uint16_t mask = 0u - (subtracted >> 15); // Although this is a constant-time select, we omit a value barrier here. // Value barriers impede auto-vectorization (likely because it forces the // value to transit through a general-purpose register). On AArch64, this is a // difference of 2x. // // We usually add value barriers to selects because Clang turns consecutive // selects with the same condition into a branch instead of CMOV/CSEL. This // condition does not occur in Kyber, so omitting it seems to be safe so far, // but see |scalar_centered_binomial_distribution_eta_2_with_prf|. return (mask & x) | (~mask & subtracted); } // constant time reduce x mod kPrime using Barrett reduction. x must be less // than kPrime + 2×kPrime². static uint16_t reduce(uint32_t x) { declassify_assert(x < kPrime + 2u * kPrime * kPrime); uint64_t product = (uint64_t)x * kBarrettMultiplier; uint32_t quotient = (uint32_t)(product >> kBarrettShift); uint32_t remainder = x - quotient * kPrime; return reduce_once(remainder); } static void scalar_zero(scalar *out) { OPENSSL_memset(out, 0, sizeof(*out)); } static void vector_zero(vector *out) { OPENSSL_memset(out, 0, sizeof(*out)); } // In place number theoretic transform of a given scalar. // Note that Kyber's kPrime 3329 does not have a 512th root of unity, so this // transform leaves off the last iteration of the usual FFT code, with the 128 // relevant roots of unity being stored in |kNTTRoots|. This means the output // should be seen as 128 elements in GF(3329^2), with the coefficients of the // elements being consecutive entries in |s->c|. static void scalar_ntt(scalar *s) { int offset = DEGREE; // `int` is used here because using `size_t` throughout caused a ~5% slowdown // with Clang 14 on Aarch64. for (int step = 1; step < DEGREE / 2; step <<= 1) { offset >>= 1; int k = 0; for (int i = 0; i < step; i++) { const uint32_t step_root = kNTTRoots[i + step]; for (int j = k; j < k + offset; j++) { uint16_t odd = reduce(step_root * s->c[j + offset]); uint16_t even = s->c[j]; s->c[j] = reduce_once(odd + even); s->c[j + offset] = reduce_once(even - odd + kPrime); } k += 2 * offset; } } } static void vector_ntt(vector *a) { for (int i = 0; i < RANK; i++) { scalar_ntt(&a->v[i]); } } // In place inverse number theoretic transform of a given scalar, with pairs of // entries of s->v being interpreted as elements of GF(3329^2). Just as with the // number theoretic transform, this leaves off the first step of the normal iFFT // to account for the fact that 3329 does not have a 512th root of unity, using // the precomputed 128 roots of unity stored in |kInverseNTTRoots|. static void scalar_inverse_ntt(scalar *s) { int step = DEGREE / 2; // `int` is used here because using `size_t` throughout caused a ~5% slowdown // with Clang 14 on Aarch64. for (int offset = 2; offset < DEGREE; offset <<= 1) { step >>= 1; int k = 0; for (int i = 0; i < step; i++) { uint32_t step_root = kInverseNTTRoots[i + step]; for (int j = k; j < k + offset; j++) { uint16_t odd = s->c[j + offset]; uint16_t even = s->c[j]; s->c[j] = reduce_once(odd + even); s->c[j + offset] = reduce(step_root * (even - odd + kPrime)); } k += 2 * offset; } } for (int i = 0; i < DEGREE; i++) { s->c[i] = reduce(s->c[i] * kInverseDegree); } } static void vector_inverse_ntt(vector *a) { for (int i = 0; i < RANK; i++) { scalar_inverse_ntt(&a->v[i]); } } static void scalar_add(scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE; i++) { lhs->c[i] = reduce_once(lhs->c[i] + rhs->c[i]); } } static void scalar_sub(scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE; i++) { lhs->c[i] = reduce_once(lhs->c[i] - rhs->c[i] + kPrime); } } // Multiplying two scalars in the number theoretically transformed state. Since // 3329 does not have a 512th root of unity, this means we have to interpret // the 2*ith and (2*i+1)th entries of the scalar as elements of GF(3329)[X]/(X^2 // - 17^(2*bitreverse(i)+1)) The value of 17^(2*bitreverse(i)+1) mod 3329 is // stored in the precomputed |kModRoots| table. Note that our Barrett transform // only allows us to multipy two reduced numbers together, so we need some // intermediate reduction steps, even if an uint64_t could hold 3 multiplied // numbers. static void scalar_mult(scalar *out, const scalar *lhs, const scalar *rhs) { for (int i = 0; i < DEGREE / 2; i++) { uint32_t real_real = (uint32_t)lhs->c[2 * i] * rhs->c[2 * i]; uint32_t img_img = (uint32_t)lhs->c[2 * i + 1] * rhs->c[2 * i + 1]; uint32_t real_img = (uint32_t)lhs->c[2 * i] * rhs->c[2 * i + 1]; uint32_t img_real = (uint32_t)lhs->c[2 * i + 1] * rhs->c[2 * i]; out->c[2 * i] = reduce(real_real + (uint32_t)reduce(img_img) * kModRoots[i]); out->c[2 * i + 1] = reduce(img_real + real_img); } } static void vector_add(vector *lhs, const vector *rhs) { for (int i = 0; i < RANK; i++) { scalar_add(&lhs->v[i], &rhs->v[i]); } } static void matrix_mult(vector *out, const matrix *m, const vector *a) { vector_zero(out); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { scalar product; scalar_mult(&product, &m->v[i][j], &a->v[j]); scalar_add(&out->v[i], &product); } } } static void matrix_mult_transpose(vector *out, const matrix *m, const vector *a) { vector_zero(out); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { scalar product; scalar_mult(&product, &m->v[j][i], &a->v[j]); scalar_add(&out->v[i], &product); } } } static void scalar_inner_product(scalar *out, const vector *lhs, const vector *rhs) { scalar_zero(out); for (int i = 0; i < RANK; i++) { scalar product; scalar_mult(&product, &lhs->v[i], &rhs->v[i]); scalar_add(out, &product); } } // Algorithm 1 of the Kyber spec. Rejection samples a Keccak stream to get // uniformly distributed elements. This is used for matrix expansion and only // operates on public inputs. static void scalar_from_keccak_vartime(scalar *out, struct BORINGSSL_keccak_st *keccak_ctx) { assert(keccak_ctx->squeeze_offset == 0); assert(keccak_ctx->rate_bytes == 168); static_assert(168 % 3 == 0, "block and coefficient boundaries do not align"); int done = 0; while (done < DEGREE) { uint8_t block[168]; BORINGSSL_keccak_squeeze(keccak_ctx, block, sizeof(block)); for (size_t i = 0; i < sizeof(block) && done < DEGREE; i += 3) { uint16_t d1 = block[i] + 256 * (block[i + 1] % 16); uint16_t d2 = block[i + 1] / 16 + 16 * block[i + 2]; if (d1 < kPrime) { out->c[done++] = d1; } if (d2 < kPrime && done < DEGREE) { out->c[done++] = d2; } } } } // Algorithm 2 of the Kyber spec, with eta fixed to two and the PRF call // included. Creates binominally distributed elements by sampling 2*|eta| bits, // and setting the coefficient to the count of the first bits minus the count of // the second bits, resulting in a centered binomial distribution. Since eta is // two this gives -2/2 with a probability of 1/16, -1/1 with probability 1/4, // and 0 with probability 3/8. static void scalar_centered_binomial_distribution_eta_2_with_prf( scalar *out, const uint8_t input[33]) { uint8_t entropy[128]; static_assert(sizeof(entropy) == 2 * /*kEta=*/2 * DEGREE / 8, ""); prf(entropy, sizeof(entropy), input); for (int i = 0; i < DEGREE; i += 2) { uint8_t byte = entropy[i / 2]; uint16_t value = (byte & 1) + ((byte >> 1) & 1); value -= ((byte >> 2) & 1) + ((byte >> 3) & 1); // Add |kPrime| if |value| underflowed. See |reduce_once| for a discussion // on why the value barrier is omitted. While this could have been written // reduce_once(value + kPrime), this is one extra addition and small range // of |value| tempts some versions of Clang to emit a branch. uint16_t mask = 0u - (value >> 15); out->c[i] = value + (kPrime & mask); byte >>= 4; value = (byte & 1) + ((byte >> 1) & 1); value -= ((byte >> 2) & 1) + ((byte >> 3) & 1); // See above. mask = 0u - (value >> 15); out->c[i + 1] = value + (kPrime & mask); } } // Generates a secret vector by using // |scalar_centered_binomial_distribution_eta_2_with_prf|, using the given seed // appending and incrementing |counter| for entry of the vector. static void vector_generate_secret_eta_2(vector *out, uint8_t *counter, const uint8_t seed[32]) { uint8_t input[33]; OPENSSL_memcpy(input, seed, 32); for (int i = 0; i < RANK; i++) { input[32] = (*counter)++; scalar_centered_binomial_distribution_eta_2_with_prf(&out->v[i], input); } } // Expands the matrix of a seed for key generation and for encaps-CPA. static void matrix_expand(matrix *out, const uint8_t rho[32]) { uint8_t input[34]; OPENSSL_memcpy(input, rho, 32); for (int i = 0; i < RANK; i++) { for (int j = 0; j < RANK; j++) { input[32] = i; input[33] = j; struct BORINGSSL_keccak_st keccak_ctx; BORINGSSL_keccak_init(&keccak_ctx, boringssl_shake128); BORINGSSL_keccak_absorb(&keccak_ctx, input, sizeof(input)); scalar_from_keccak_vartime(&out->v[i][j], &keccak_ctx); } } } static const uint8_t kMasks[8] = {0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}; static void scalar_encode(uint8_t *out, const scalar *s, int bits) { assert(bits <= (int)sizeof(*s->c) * 8 && bits != 1); uint8_t out_byte = 0; int out_byte_bits = 0; for (int i = 0; i < DEGREE; i++) { uint16_t element = s->c[i]; int element_bits_done = 0; while (element_bits_done < bits) { int chunk_bits = bits - element_bits_done; int out_bits_remaining = 8 - out_byte_bits; if (chunk_bits >= out_bits_remaining) { chunk_bits = out_bits_remaining; out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; *out = out_byte; out++; out_byte_bits = 0; out_byte = 0; } else { out_byte |= (element & kMasks[chunk_bits - 1]) << out_byte_bits; out_byte_bits += chunk_bits; } element_bits_done += chunk_bits; element >>= chunk_bits; } } if (out_byte_bits > 0) { *out = out_byte; } } // scalar_encode_1 is |scalar_encode| specialised for |bits| == 1. static void scalar_encode_1(uint8_t out[32], const scalar *s) { for (int i = 0; i < DEGREE; i += 8) { uint8_t out_byte = 0; for (int j = 0; j < 8; j++) { out_byte |= (s->c[i + j] & 1) << j; } *out = out_byte; out++; } } // Encodes an entire vector into 32*|RANK|*|bits| bytes. Note that since 256 // (DEGREE) is divisible by 8, the individual vector entries will always fill a // whole number of bytes, so we do not need to worry about bit packing here. static void vector_encode(uint8_t *out, const vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_encode(out + i * bits * DEGREE / 8, &a->v[i], bits); } } // scalar_decode parses |DEGREE * bits| bits from |in| into |DEGREE| values in // |out|. It returns one on success and zero if any parsed value is >= // |kPrime|. static int scalar_decode(scalar *out, const uint8_t *in, int bits) { assert(bits <= (int)sizeof(*out->c) * 8 && bits != 1); uint8_t in_byte = 0; int in_byte_bits_left = 0; for (int i = 0; i < DEGREE; i++) { uint16_t element = 0; int element_bits_done = 0; while (element_bits_done < bits) { if (in_byte_bits_left == 0) { in_byte = *in; in++; in_byte_bits_left = 8; } int chunk_bits = bits - element_bits_done; if (chunk_bits > in_byte_bits_left) { chunk_bits = in_byte_bits_left; } element |= (in_byte & kMasks[chunk_bits - 1]) << element_bits_done; in_byte_bits_left -= chunk_bits; in_byte >>= chunk_bits; element_bits_done += chunk_bits; } // An element is only out of range in the case of invalid input, in which // case it is okay to leak the comparison. if (constant_time_declassify_int(element >= kPrime)) { return 0; } out->c[i] = element; } return 1; } // scalar_decode_1 is |scalar_decode| specialised for |bits| == 1. static void scalar_decode_1(scalar *out, const uint8_t in[32]) { for (int i = 0; i < DEGREE; i += 8) { uint8_t in_byte = *in; in++; for (int j = 0; j < 8; j++) { out->c[i + j] = in_byte & 1; in_byte >>= 1; } } } // Decodes 32*|RANK|*|bits| bytes from |in| into |out|. It returns one on // success or zero if any parsed value is >= |kPrime|. static int vector_decode(vector *out, const uint8_t *in, int bits) { for (int i = 0; i < RANK; i++) { if (!scalar_decode(&out->v[i], in + i * bits * DEGREE / 8, bits)) { return 0; } } return 1; } // Compresses (lossily) an input |x| mod 3329 into |bits| many bits by grouping // numbers close to each other together. The formula used is // round(2^|bits|/kPrime*x) mod 2^|bits|. // Uses Barrett reduction to achieve constant time. Since we need both the // remainder (for rounding) and the quotient (as the result), we cannot use // |reduce| here, but need to do the Barrett reduction directly. static uint16_t compress(uint16_t x, int bits) { uint32_t shifted = (uint32_t)x << bits; uint64_t product = (uint64_t)shifted * kBarrettMultiplier; uint32_t quotient = (uint32_t)(product >> kBarrettShift); uint32_t remainder = shifted - quotient * kPrime; // Adjust the quotient to round correctly: // 0 <= remainder <= kHalfPrime round to 0 // kHalfPrime < remainder <= kPrime + kHalfPrime round to 1 // kPrime + kHalfPrime < remainder < 2 * kPrime round to 2 declassify_assert(remainder < 2u * kPrime); quotient += 1 & constant_time_lt_w(kHalfPrime, remainder); quotient += 1 & constant_time_lt_w(kPrime + kHalfPrime, remainder); return quotient & ((1 << bits) - 1); } // Decompresses |x| by using an equi-distant representative. The formula is // round(kPrime/2^|bits|*x). Note that 2^|bits| being the divisor allows us to // implement this logic using only bit operations. static uint16_t decompress(uint16_t x, int bits) { uint32_t product = (uint32_t)x * kPrime; uint32_t power = 1 << bits; // This is |product| % power, since |power| is a power of 2. uint32_t remainder = product & (power - 1); // This is |product| / power, since |power| is a power of 2. uint32_t lower = product >> bits; // The rounding logic works since the first half of numbers mod |power| have a // 0 as first bit, and the second half has a 1 as first bit, since |power| is // a power of 2. As a 12 bit number, |remainder| is always positive, so we // will shift in 0s for a right shift. return lower + (remainder >> (bits - 1)); } static void scalar_compress(scalar *s, int bits) { for (int i = 0; i < DEGREE; i++) { s->c[i] = compress(s->c[i], bits); } } static void scalar_decompress(scalar *s, int bits) { for (int i = 0; i < DEGREE; i++) { s->c[i] = decompress(s->c[i], bits); } } static void vector_compress(vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_compress(&a->v[i], bits); } } static void vector_decompress(vector *a, int bits) { for (int i = 0; i < RANK; i++) { scalar_decompress(&a->v[i], bits); } } namespace { struct public_key { vector t; uint8_t rho[32]; uint8_t public_key_hash[32]; matrix m; }; static struct public_key *public_key_from_external( const struct KYBER_public_key *external) { static_assert(sizeof(struct KYBER_public_key) >= sizeof(struct public_key), "Kyber public key is too small"); static_assert(alignof(struct KYBER_public_key) >= alignof(struct public_key), "Kyber public key align incorrect"); return (struct public_key *)external; } struct private_key { struct public_key pub; vector s; uint8_t fo_failure_secret[32]; }; static struct private_key *private_key_from_external( const struct KYBER_private_key *external) { static_assert(sizeof(struct KYBER_private_key) >= sizeof(struct private_key), "Kyber private key too small"); static_assert( alignof(struct KYBER_private_key) >= alignof(struct private_key), "Kyber private key align incorrect"); return (struct private_key *)external; } } // namespace // Calls |KYBER_generate_key_external_entropy| with random bytes from // |RAND_bytes|. void KYBER_generate_key(uint8_t out_encoded_public_key[KYBER_PUBLIC_KEY_BYTES], struct KYBER_private_key *out_private_key) { uint8_t entropy[KYBER_GENERATE_KEY_ENTROPY]; RAND_bytes(entropy, sizeof(entropy)); CONSTTIME_SECRET(entropy, sizeof(entropy)); KYBER_generate_key_external_entropy(out_encoded_public_key, out_private_key, entropy); } static int kyber_marshal_public_key(CBB *out, const struct public_key *pub) { uint8_t *vector_output; if (!CBB_add_space(out, &vector_output, kEncodedVectorSize)) { return 0; } vector_encode(vector_output, &pub->t, kLog2Prime); if (!CBB_add_bytes(out, pub->rho, sizeof(pub->rho))) { return 0; } return 1; } // Algorithms 4 and 7 of the Kyber spec. Algorithms are combined since key // generation is not part of the FO transform, and the spec uses Algorithm 7 to // specify the actual key format. void KYBER_generate_key_external_entropy( uint8_t out_encoded_public_key[KYBER_PUBLIC_KEY_BYTES], struct KYBER_private_key *out_private_key, const uint8_t entropy[KYBER_GENERATE_KEY_ENTROPY]) { struct private_key *priv = private_key_from_external(out_private_key); uint8_t hashed[64]; hash_g(hashed, entropy, 32); const uint8_t *const rho = hashed; const uint8_t *const sigma = hashed + 32; // rho is public. CONSTTIME_DECLASSIFY(rho, 32); OPENSSL_memcpy(priv->pub.rho, hashed, sizeof(priv->pub.rho)); matrix_expand(&priv->pub.m, rho); uint8_t counter = 0; vector_generate_secret_eta_2(&priv->s, &counter, sigma); vector_ntt(&priv->s); vector error; vector_generate_secret_eta_2(&error, &counter, sigma); vector_ntt(&error); matrix_mult_transpose(&priv->pub.t, &priv->pub.m, &priv->s); vector_add(&priv->pub.t, &error); // t is part of the public key and thus is public. CONSTTIME_DECLASSIFY(&priv->pub.t, sizeof(priv->pub.t)); CBB cbb; CBB_init_fixed(&cbb, out_encoded_public_key, KYBER_PUBLIC_KEY_BYTES); if (!kyber_marshal_public_key(&cbb, &priv->pub)) { abort(); } hash_h(priv->pub.public_key_hash, out_encoded_public_key, KYBER_PUBLIC_KEY_BYTES); OPENSSL_memcpy(priv->fo_failure_secret, entropy + 32, 32); } void KYBER_public_from_private(struct KYBER_public_key *out_public_key, const struct KYBER_private_key *private_key) { struct public_key *const pub = public_key_from_external(out_public_key); const struct private_key *const priv = private_key_from_external(private_key); *pub = priv->pub; } // Algorithm 5 of the Kyber spec. Encrypts a message with given randomness to // the ciphertext in |out|. Without applying the Fujisaki-Okamoto transform this // would not result in a CCA secure scheme, since lattice schemes are vulnerable // to decryption failure oracles. static void encrypt_cpa(uint8_t out[KYBER_CIPHERTEXT_BYTES], const struct public_key *pub, const uint8_t message[32], const uint8_t randomness[32]) { uint8_t counter = 0; vector secret; vector_generate_secret_eta_2(&secret, &counter, randomness); vector_ntt(&secret); vector error; vector_generate_secret_eta_2(&error, &counter, randomness); uint8_t input[33]; OPENSSL_memcpy(input, randomness, 32); input[32] = counter; scalar scalar_error; scalar_centered_binomial_distribution_eta_2_with_prf(&scalar_error, input); vector u; matrix_mult(&u, &pub->m, &secret); vector_inverse_ntt(&u); vector_add(&u, &error); scalar v; scalar_inner_product(&v, &pub->t, &secret); scalar_inverse_ntt(&v); scalar_add(&v, &scalar_error); scalar expanded_message; scalar_decode_1(&expanded_message, message); scalar_decompress(&expanded_message, 1); scalar_add(&v, &expanded_message); vector_compress(&u, kDU); vector_encode(out, &u, kDU); scalar_compress(&v, kDV); scalar_encode(out + kCompressedVectorSize, &v, kDV); } // Calls KYBER_encap_external_entropy| with random bytes from |RAND_bytes| void KYBER_encap(uint8_t out_ciphertext[KYBER_CIPHERTEXT_BYTES], uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const struct KYBER_public_key *public_key) { uint8_t entropy[KYBER_ENCAP_ENTROPY]; RAND_bytes(entropy, KYBER_ENCAP_ENTROPY); CONSTTIME_SECRET(entropy, KYBER_ENCAP_ENTROPY); KYBER_encap_external_entropy(out_ciphertext, out_shared_secret, public_key, entropy); } // Algorithm 8 of the Kyber spec, safe for line 2 of the spec. The spec there // hashes the output of the system's random number generator, since the FO // transform will reveal it to the decrypting party. There is no reason to do // this when a secure random number generator is used. When an insecure random // number generator is used, the caller should switch to a secure one before // calling this method. void KYBER_encap_external_entropy( uint8_t out_ciphertext[KYBER_CIPHERTEXT_BYTES], uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const struct KYBER_public_key *public_key, const uint8_t entropy[KYBER_ENCAP_ENTROPY]) { const struct public_key *pub = public_key_from_external(public_key); uint8_t input[64]; OPENSSL_memcpy(input, entropy, KYBER_ENCAP_ENTROPY); OPENSSL_memcpy(input + KYBER_ENCAP_ENTROPY, pub->public_key_hash, sizeof(input) - KYBER_ENCAP_ENTROPY); uint8_t prekey_and_randomness[64]; hash_g(prekey_and_randomness, input, sizeof(input)); encrypt_cpa(out_ciphertext, pub, entropy, prekey_and_randomness + 32); // The ciphertext is public. CONSTTIME_DECLASSIFY(out_ciphertext, KYBER_CIPHERTEXT_BYTES); hash_h(prekey_and_randomness + 32, out_ciphertext, KYBER_CIPHERTEXT_BYTES); kdf(out_shared_secret, KYBER_SHARED_SECRET_BYTES, prekey_and_randomness, sizeof(prekey_and_randomness)); } // Algorithm 6 of the Kyber spec. static void decrypt_cpa(uint8_t out[32], const struct private_key *priv, const uint8_t ciphertext[KYBER_CIPHERTEXT_BYTES]) { vector u; vector_decode(&u, ciphertext, kDU); vector_decompress(&u, kDU); vector_ntt(&u); scalar v; scalar_decode(&v, ciphertext + kCompressedVectorSize, kDV); scalar_decompress(&v, kDV); scalar mask; scalar_inner_product(&mask, &priv->s, &u); scalar_inverse_ntt(&mask); scalar_sub(&v, &mask); scalar_compress(&v, 1); scalar_encode_1(out, &v); } // Algorithm 9 of the Kyber spec, performing the FO transform by running // encrypt_cpa on the decrypted message. The spec does not allow the decryption // failure to be passed on to the caller, and instead returns a result that is // deterministic but unpredictable to anyone without knowledge of the private // key. void KYBER_decap(uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const uint8_t ciphertext[KYBER_CIPHERTEXT_BYTES], const struct KYBER_private_key *private_key) { const struct private_key *priv = private_key_from_external(private_key); uint8_t decrypted[64]; decrypt_cpa(decrypted, priv, ciphertext); OPENSSL_memcpy(decrypted + 32, priv->pub.public_key_hash, sizeof(decrypted) - 32); uint8_t prekey_and_randomness[64]; hash_g(prekey_and_randomness, decrypted, sizeof(decrypted)); uint8_t expected_ciphertext[KYBER_CIPHERTEXT_BYTES]; encrypt_cpa(expected_ciphertext, &priv->pub, decrypted, prekey_and_randomness + 32); uint8_t mask = constant_time_eq_int_8(CRYPTO_memcmp(ciphertext, expected_ciphertext, sizeof(expected_ciphertext)), 0); uint8_t input[64]; for (int i = 0; i < 32; i++) { input[i] = constant_time_select_8(mask, prekey_and_randomness[i], priv->fo_failure_secret[i]); } hash_h(input + 32, ciphertext, KYBER_CIPHERTEXT_BYTES); kdf(out_shared_secret, KYBER_SHARED_SECRET_BYTES, input, sizeof(input)); } int KYBER_marshal_public_key(CBB *out, const struct KYBER_public_key *public_key) { return kyber_marshal_public_key(out, public_key_from_external(public_key)); } // kyber_parse_public_key_no_hash parses |in| into |pub| but doesn't calculate // the value of |pub->public_key_hash|. static int kyber_parse_public_key_no_hash(struct public_key *pub, CBS *in) { CBS t_bytes; if (!CBS_get_bytes(in, &t_bytes, kEncodedVectorSize) || !vector_decode(&pub->t, CBS_data(&t_bytes), kLog2Prime) || !CBS_copy_bytes(in, pub->rho, sizeof(pub->rho))) { return 0; } matrix_expand(&pub->m, pub->rho); return 1; } int KYBER_parse_public_key(struct KYBER_public_key *public_key, CBS *in) { struct public_key *pub = public_key_from_external(public_key); CBS orig_in = *in; if (!kyber_parse_public_key_no_hash(pub, in) || // CBS_len(in) != 0) { return 0; } hash_h(pub->public_key_hash, CBS_data(&orig_in), CBS_len(&orig_in)); return 1; } int KYBER_marshal_private_key(CBB *out, const struct KYBER_private_key *private_key) { const struct private_key *const priv = private_key_from_external(private_key); uint8_t *s_output; if (!CBB_add_space(out, &s_output, kEncodedVectorSize)) { return 0; } vector_encode(s_output, &priv->s, kLog2Prime); if (!kyber_marshal_public_key(out, &priv->pub) || !CBB_add_bytes(out, priv->pub.public_key_hash, sizeof(priv->pub.public_key_hash)) || !CBB_add_bytes(out, priv->fo_failure_secret, sizeof(priv->fo_failure_secret))) { return 0; } return 1; } int KYBER_parse_private_key(struct KYBER_private_key *out_private_key, CBS *in) { struct private_key *const priv = private_key_from_external(out_private_key); CBS s_bytes; if (!CBS_get_bytes(in, &s_bytes, kEncodedVectorSize) || !vector_decode(&priv->s, CBS_data(&s_bytes), kLog2Prime) || !kyber_parse_public_key_no_hash(&priv->pub, in) || !CBS_copy_bytes(in, priv->pub.public_key_hash, sizeof(priv->pub.public_key_hash)) || !CBS_copy_bytes(in, priv->fo_failure_secret, sizeof(priv->fo_failure_secret)) || CBS_len(in) != 0) { return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/lhash/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_LHASH_INTERNAL_H #define OPENSSL_HEADER_LHASH_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif // lhash is a traditional, chaining hash table that automatically expands and // contracts as needed. One should not use the lh_* functions directly, rather // use the type-safe macro wrappers: // // A hash table of a specific type of object has type |LHASH_OF(type)|. This // can be defined (once) with |DEFINE_LHASH_OF(type)| and declared where needed // with |DECLARE_LHASH_OF(type)|. For example: // // struct foo { // int bar; // }; // // DEFINE_LHASH_OF(struct foo) // // Although note that the hash table will contain /pointers/ to |foo|. // // A macro will be defined for each of the |OPENSSL_lh_*| functions below. For // |LHASH_OF(foo)|, the macros would be |lh_foo_new|, |lh_foo_num_items| etc. // lhash_cmp_func is a comparison function that returns a value equal, or not // equal, to zero depending on whether |*a| is equal, or not equal to |*b|, // respectively. Note the difference between this and |stack_cmp_func| in that // this takes pointers to the objects directly. // // This function's actual type signature is int (*)(const T*, const T*). The // low-level |lh_*| functions will be passed a type-specific wrapper to call it // correctly. typedef int (*lhash_cmp_func)(const void *a, const void *b); typedef int (*lhash_cmp_func_helper)(lhash_cmp_func func, const void *a, const void *b); // lhash_hash_func is a function that maps an object to a uniformly distributed // uint32_t. // // This function's actual type signature is uint32_t (*)(const T*). The // low-level |lh_*| functions will be passed a type-specific wrapper to call it // correctly. typedef uint32_t (*lhash_hash_func)(const void *a); typedef uint32_t (*lhash_hash_func_helper)(lhash_hash_func func, const void *a); typedef struct lhash_st _LHASH; // OPENSSL_lh_new returns a new, empty hash table or NULL on error. OPENSSL_EXPORT _LHASH *OPENSSL_lh_new(lhash_hash_func hash, lhash_cmp_func comp); // OPENSSL_lh_free frees the hash table itself but none of the elements. See // |OPENSSL_lh_doall|. OPENSSL_EXPORT void OPENSSL_lh_free(_LHASH *lh); // OPENSSL_lh_num_items returns the number of items in |lh|. OPENSSL_EXPORT size_t OPENSSL_lh_num_items(const _LHASH *lh); // OPENSSL_lh_retrieve finds an element equal to |data| in the hash table and // returns it. If no such element exists, it returns NULL. OPENSSL_EXPORT void *OPENSSL_lh_retrieve(const _LHASH *lh, const void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func); // OPENSSL_lh_retrieve_key finds an element matching |key|, given the specified // hash and comparison function. This differs from |OPENSSL_lh_retrieve| in that // the key may be a different type than the values stored in |lh|. |key_hash| // and |cmp_key| must be compatible with the functions passed into // |OPENSSL_lh_new|. OPENSSL_EXPORT void *OPENSSL_lh_retrieve_key(const _LHASH *lh, const void *key, uint32_t key_hash, int (*cmp_key)(const void *key, const void *value)); // OPENSSL_lh_insert inserts |data| into the hash table. If an existing element // is equal to |data| (with respect to the comparison function) then |*old_data| // will be set to that value and it will be replaced. Otherwise, or in the // event of an error, |*old_data| will be set to NULL. It returns one on // success or zero in the case of an allocation error. OPENSSL_EXPORT int OPENSSL_lh_insert(_LHASH *lh, void **old_data, void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func); // OPENSSL_lh_delete removes an element equal to |data| from the hash table and // returns it. If no such element is found, it returns NULL. OPENSSL_EXPORT void *OPENSSL_lh_delete(_LHASH *lh, const void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func); // OPENSSL_lh_doall_arg calls |func| on each element of the hash table and also // passes |arg| as the second argument. // TODO(fork): rename this OPENSSL_EXPORT void OPENSSL_lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg); #define DEFINE_LHASH_OF(type) \ /* We disable MSVC C4191 in this macro, which warns when pointers are cast \ * to the wrong type. While the cast itself is valid, it is often a bug \ * because calling it through the cast is UB. However, we never actually \ * call functions as |lhash_cmp_func|. The type is just a type-erased \ * function pointer. (C does not guarantee function pointers fit in \ * |void*|, and GCC will warn on this.) Thus we just disable the false \ * positive warning. */ \ OPENSSL_MSVC_PRAGMA(warning(push)) \ OPENSSL_MSVC_PRAGMA(warning(disable : 4191)) \ \ DECLARE_LHASH_OF(type) \ \ typedef int (*lhash_##type##_cmp_func)(const type *, const type *); \ typedef uint32_t (*lhash_##type##_hash_func)(const type *); \ \ inline int lh_##type##_call_cmp_func(lhash_cmp_func func, const void *a, \ const void *b) { \ return ((lhash_##type##_cmp_func)func)((const type *)a, (const type *)b); \ } \ \ inline uint32_t lh_##type##_call_hash_func(lhash_hash_func func, \ const void *a) { \ return ((lhash_##type##_hash_func)func)((const type *)a); \ } \ \ inline LHASH_OF(type) *lh_##type##_new(lhash_##type##_hash_func hash, \ lhash_##type##_cmp_func comp) { \ return (LHASH_OF(type) *)OPENSSL_lh_new((lhash_hash_func)hash, \ (lhash_cmp_func)comp); \ } \ \ inline void lh_##type##_free(LHASH_OF(type) *lh) { \ OPENSSL_lh_free((_LHASH *)lh); \ } \ \ inline size_t lh_##type##_num_items(const LHASH_OF(type) *lh) { \ return OPENSSL_lh_num_items((const _LHASH *)lh); \ } \ \ inline type *lh_##type##_retrieve(const LHASH_OF(type) *lh, \ const type *data) { \ return (type *)OPENSSL_lh_retrieve((const _LHASH *)lh, data, \ lh_##type##_call_hash_func, \ lh_##type##_call_cmp_func); \ } \ \ typedef struct { \ int (*cmp_key)(const void *key, const type *value); \ const void *key; \ } LHASH_CMP_KEY_##type; \ \ inline int lh_##type##_call_cmp_key(const void *key, const void *value) { \ const LHASH_CMP_KEY_##type *cb = (const LHASH_CMP_KEY_##type *)key; \ return cb->cmp_key(cb->key, (const type *)value); \ } \ \ inline type *lh_##type##_retrieve_key( \ const LHASH_OF(type) *lh, const void *key, uint32_t key_hash, \ int (*cmp_key)(const void *key, const type *value)) { \ LHASH_CMP_KEY_##type cb = {cmp_key, key}; \ return (type *)OPENSSL_lh_retrieve_key((const _LHASH *)lh, &cb, key_hash, \ lh_##type##_call_cmp_key); \ } \ \ inline int lh_##type##_insert(LHASH_OF(type) *lh, type **old_data, \ type *data) { \ void *old_data_void = NULL; \ int ret = OPENSSL_lh_insert((_LHASH *)lh, &old_data_void, data, \ lh_##type##_call_hash_func, \ lh_##type##_call_cmp_func); \ *old_data = (type *)old_data_void; \ return ret; \ } \ \ inline type *lh_##type##_delete(LHASH_OF(type) *lh, const type *data) { \ return (type *)OPENSSL_lh_delete((_LHASH *)lh, data, \ lh_##type##_call_hash_func, \ lh_##type##_call_cmp_func); \ } \ \ typedef struct { \ void (*doall_arg)(type *, void *); \ void *arg; \ } LHASH_DOALL_##type; \ \ inline void lh_##type##_call_doall_arg(void *value, void *arg) { \ const LHASH_DOALL_##type *cb = (const LHASH_DOALL_##type *)arg; \ cb->doall_arg((type *)value, cb->arg); \ } \ \ inline void lh_##type##_doall_arg(LHASH_OF(type) *lh, \ void (*func)(type *, void *), void *arg) { \ LHASH_DOALL_##type cb = {func, arg}; \ OPENSSL_lh_doall_arg((_LHASH *)lh, lh_##type##_call_doall_arg, &cb); \ } \ \ OPENSSL_MSVC_PRAGMA(warning(pop)) #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_LHASH_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/lhash/lhash.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" // kMinNumBuckets is the minimum size of the buckets array in an |_LHASH|. static const size_t kMinNumBuckets = 16; // kMaxAverageChainLength contains the maximum, average chain length. When the // average chain length exceeds this value, the hash table will be resized. static const size_t kMaxAverageChainLength = 2; static const size_t kMinAverageChainLength = 1; // lhash_item_st is an element of a hash chain. It points to the opaque data // for this element and to the next item in the chain. The linked-list is NULL // terminated. typedef struct lhash_item_st { void *data; struct lhash_item_st *next; // hash contains the cached, hash value of |data|. uint32_t hash; } LHASH_ITEM; struct lhash_st { // num_items contains the total number of items in the hash table. size_t num_items; // buckets is an array of |num_buckets| pointers. Each points to the head of // a chain of LHASH_ITEM objects that have the same hash value, mod // |num_buckets|. LHASH_ITEM **buckets; // num_buckets contains the length of |buckets|. This value is always >= // kMinNumBuckets. size_t num_buckets; // callback_depth contains the current depth of |lh_doall| or |lh_doall_arg| // calls. If non-zero then this suppresses resizing of the |buckets| array, // which would otherwise disrupt the iteration. unsigned callback_depth; lhash_cmp_func comp; lhash_hash_func hash; }; _LHASH *OPENSSL_lh_new(lhash_hash_func hash, lhash_cmp_func comp) { _LHASH *ret = reinterpret_cast<_LHASH *>(OPENSSL_zalloc(sizeof(_LHASH))); if (ret == NULL) { return NULL; } ret->num_buckets = kMinNumBuckets; ret->buckets = reinterpret_cast( OPENSSL_calloc(ret->num_buckets, sizeof(LHASH_ITEM *))); if (ret->buckets == NULL) { OPENSSL_free(ret); return NULL; } ret->comp = comp; ret->hash = hash; return ret; } void OPENSSL_lh_free(_LHASH *lh) { if (lh == NULL) { return; } for (size_t i = 0; i < lh->num_buckets; i++) { LHASH_ITEM *next; for (LHASH_ITEM *n = lh->buckets[i]; n != NULL; n = next) { next = n->next; OPENSSL_free(n); } } OPENSSL_free(lh->buckets); OPENSSL_free(lh); } size_t OPENSSL_lh_num_items(const _LHASH *lh) { return lh->num_items; } // get_next_ptr_and_hash returns a pointer to the pointer that points to the // item equal to |data|. In other words, it searches for an item equal to |data| // and, if it's at the start of a chain, then it returns a pointer to an // element of |lh->buckets|, otherwise it returns a pointer to the |next| // element of the previous item in the chain. If an element equal to |data| is // not found, it returns a pointer that points to a NULL pointer. If |out_hash| // is not NULL, then it also puts the hash value of |data| in |*out_hash|. static LHASH_ITEM **get_next_ptr_and_hash(const _LHASH *lh, uint32_t *out_hash, const void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func) { const uint32_t hash = call_hash_func(lh->hash, data); if (out_hash != NULL) { *out_hash = hash; } LHASH_ITEM **ret = &lh->buckets[hash % lh->num_buckets]; for (LHASH_ITEM *cur = *ret; cur != NULL; cur = *ret) { if (call_cmp_func(lh->comp, cur->data, data) == 0) { break; } ret = &cur->next; } return ret; } // get_next_ptr_by_key behaves like |get_next_ptr_and_hash| but takes a key // which may be a different type from the values stored in |lh|. static LHASH_ITEM **get_next_ptr_by_key(const _LHASH *lh, const void *key, uint32_t key_hash, int (*cmp_key)(const void *key, const void *value)) { LHASH_ITEM **ret = &lh->buckets[key_hash % lh->num_buckets]; for (LHASH_ITEM *cur = *ret; cur != NULL; cur = *ret) { if (cmp_key(key, cur->data) == 0) { break; } ret = &cur->next; } return ret; } void *OPENSSL_lh_retrieve(const _LHASH *lh, const void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func) { LHASH_ITEM **next_ptr = get_next_ptr_and_hash(lh, NULL, data, call_hash_func, call_cmp_func); return *next_ptr == NULL ? NULL : (*next_ptr)->data; } void *OPENSSL_lh_retrieve_key(const _LHASH *lh, const void *key, uint32_t key_hash, int (*cmp_key)(const void *key, const void *value)) { LHASH_ITEM **next_ptr = get_next_ptr_by_key(lh, key, key_hash, cmp_key); return *next_ptr == NULL ? NULL : (*next_ptr)->data; } // lh_rebucket allocates a new array of |new_num_buckets| pointers and // redistributes the existing items into it before making it |lh->buckets| and // freeing the old array. static void lh_rebucket(_LHASH *lh, const size_t new_num_buckets) { LHASH_ITEM **new_buckets, *cur, *next; size_t i, alloc_size; alloc_size = sizeof(LHASH_ITEM *) * new_num_buckets; if (alloc_size / sizeof(LHASH_ITEM *) != new_num_buckets) { return; } new_buckets = reinterpret_cast(OPENSSL_zalloc(alloc_size)); if (new_buckets == NULL) { return; } for (i = 0; i < lh->num_buckets; i++) { for (cur = lh->buckets[i]; cur != NULL; cur = next) { const size_t new_bucket = cur->hash % new_num_buckets; next = cur->next; cur->next = new_buckets[new_bucket]; new_buckets[new_bucket] = cur; } } OPENSSL_free(lh->buckets); lh->num_buckets = new_num_buckets; lh->buckets = new_buckets; } // lh_maybe_resize resizes the |buckets| array if needed. static void lh_maybe_resize(_LHASH *lh) { size_t avg_chain_length; if (lh->callback_depth > 0) { // Don't resize the hash if we are currently iterating over it. return; } assert(lh->num_buckets >= kMinNumBuckets); avg_chain_length = lh->num_items / lh->num_buckets; if (avg_chain_length > kMaxAverageChainLength) { const size_t new_num_buckets = lh->num_buckets * 2; if (new_num_buckets > lh->num_buckets) { lh_rebucket(lh, new_num_buckets); } } else if (avg_chain_length < kMinAverageChainLength && lh->num_buckets > kMinNumBuckets) { size_t new_num_buckets = lh->num_buckets / 2; if (new_num_buckets < kMinNumBuckets) { new_num_buckets = kMinNumBuckets; } lh_rebucket(lh, new_num_buckets); } } int OPENSSL_lh_insert(_LHASH *lh, void **old_data, void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func) { uint32_t hash; LHASH_ITEM **next_ptr, *item; *old_data = NULL; next_ptr = get_next_ptr_and_hash(lh, &hash, data, call_hash_func, call_cmp_func); if (*next_ptr != NULL) { // An element equal to |data| already exists in the hash table. It will be // replaced. *old_data = (*next_ptr)->data; (*next_ptr)->data = data; return 1; } // An element equal to |data| doesn't exist in the hash table yet. item = reinterpret_cast(OPENSSL_malloc(sizeof(LHASH_ITEM))); if (item == NULL) { return 0; } item->data = data; item->hash = hash; item->next = NULL; *next_ptr = item; lh->num_items++; lh_maybe_resize(lh); return 1; } void *OPENSSL_lh_delete(_LHASH *lh, const void *data, lhash_hash_func_helper call_hash_func, lhash_cmp_func_helper call_cmp_func) { LHASH_ITEM **next_ptr, *item, *ret; next_ptr = get_next_ptr_and_hash(lh, NULL, data, call_hash_func, call_cmp_func); if (*next_ptr == NULL) { // No such element. return NULL; } item = *next_ptr; *next_ptr = item->next; ret = reinterpret_cast(item->data); OPENSSL_free(item); lh->num_items--; lh_maybe_resize(lh); return ret; } void OPENSSL_lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) { if (lh == NULL) { return; } if (lh->callback_depth < UINT_MAX) { // |callback_depth| is a saturating counter. lh->callback_depth++; } for (size_t i = 0; i < lh->num_buckets; i++) { LHASH_ITEM *next; for (LHASH_ITEM *cur = lh->buckets[i]; cur != NULL; cur = next) { next = cur->next; func(cur->data, arg); } } if (lh->callback_depth < UINT_MAX) { lh->callback_depth--; } // The callback may have added or removed elements and the non-zero value of // |callback_depth| will have suppressed any resizing. Thus any needed // resizing is done here. lh_maybe_resize(lh); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/md4/md4.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../fipsmodule/digest/md32_common.h" #include "../internal.h" uint8_t *MD4(const uint8_t *data, size_t len, uint8_t out[MD4_DIGEST_LENGTH]) { MD4_CTX ctx; MD4_Init(&ctx); MD4_Update(&ctx, data, len); MD4_Final(out, &ctx); return out; } // Implemented from RFC 1186 The MD4 Message-Digest Algorithm. int MD4_Init(MD4_CTX *md4) { OPENSSL_memset(md4, 0, sizeof(MD4_CTX)); md4->h[0] = 0x67452301UL; md4->h[1] = 0xefcdab89UL; md4->h[2] = 0x98badcfeUL; md4->h[3] = 0x10325476UL; return 1; } static void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num); void MD4_Transform(MD4_CTX *c, const uint8_t data[MD4_CBLOCK]) { md4_block_data_order(c->h, data, 1); } int MD4_Update(MD4_CTX *c, const void *data, size_t len) { crypto_md32_update(&md4_block_data_order, c->h, c->data, MD4_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); return 1; } int MD4_Final(uint8_t out[MD4_DIGEST_LENGTH], MD4_CTX *c) { crypto_md32_final(&md4_block_data_order, c->h, c->data, MD4_CBLOCK, &c->num, c->Nh, c->Nl, /*is_big_endian=*/0); CRYPTO_store_u32_le(out, c->h[0]); CRYPTO_store_u32_le(out + 4, c->h[1]); CRYPTO_store_u32_le(out + 8, c->h[2]); CRYPTO_store_u32_le(out + 12, c->h[3]); return 1; } // As pointed out by Wei Dai , the above can be // simplified to the code below. Wei attributes these optimizations // to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) #define H(b, c, d) ((b) ^ (c) ^ (d)) #define R0(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + F((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ } while (0) #define R1(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + G((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ } while (0) #define R2(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + H((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ } while (0) static void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { uint32_t A, B, C, D; uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15; A = state[0]; B = state[1]; C = state[2]; D = state[3]; for (; num--;) { X0 = CRYPTO_load_u32_le(data); data += 4; X1 = CRYPTO_load_u32_le(data); data += 4; // Round 0 R0(A, B, C, D, X0, 3, 0); X2 = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X1, 7, 0); X3 = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X2, 11, 0); X4 = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X3, 19, 0); X5 = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X4, 3, 0); X6 = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X5, 7, 0); X7 = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X6, 11, 0); X8 = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X7, 19, 0); X9 = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X8, 3, 0); X10 = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X9, 7, 0); X11 = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X10, 11, 0); X12 = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X11, 19, 0); X13 = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X12, 3, 0); X14 = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X13, 7, 0); X15 = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X14, 11, 0); R0(B, C, D, A, X15, 19, 0); // Round 1 R1(A, B, C, D, X0, 3, 0x5A827999L); R1(D, A, B, C, X4, 5, 0x5A827999L); R1(C, D, A, B, X8, 9, 0x5A827999L); R1(B, C, D, A, X12, 13, 0x5A827999L); R1(A, B, C, D, X1, 3, 0x5A827999L); R1(D, A, B, C, X5, 5, 0x5A827999L); R1(C, D, A, B, X9, 9, 0x5A827999L); R1(B, C, D, A, X13, 13, 0x5A827999L); R1(A, B, C, D, X2, 3, 0x5A827999L); R1(D, A, B, C, X6, 5, 0x5A827999L); R1(C, D, A, B, X10, 9, 0x5A827999L); R1(B, C, D, A, X14, 13, 0x5A827999L); R1(A, B, C, D, X3, 3, 0x5A827999L); R1(D, A, B, C, X7, 5, 0x5A827999L); R1(C, D, A, B, X11, 9, 0x5A827999L); R1(B, C, D, A, X15, 13, 0x5A827999L); // Round 2 R2(A, B, C, D, X0, 3, 0x6ED9EBA1L); R2(D, A, B, C, X8, 9, 0x6ED9EBA1L); R2(C, D, A, B, X4, 11, 0x6ED9EBA1L); R2(B, C, D, A, X12, 15, 0x6ED9EBA1L); R2(A, B, C, D, X2, 3, 0x6ED9EBA1L); R2(D, A, B, C, X10, 9, 0x6ED9EBA1L); R2(C, D, A, B, X6, 11, 0x6ED9EBA1L); R2(B, C, D, A, X14, 15, 0x6ED9EBA1L); R2(A, B, C, D, X1, 3, 0x6ED9EBA1L); R2(D, A, B, C, X9, 9, 0x6ED9EBA1L); R2(C, D, A, B, X5, 11, 0x6ED9EBA1L); R2(B, C, D, A, X13, 15, 0x6ED9EBA1L); R2(A, B, C, D, X3, 3, 0x6ED9EBA1L); R2(D, A, B, C, X11, 9, 0x6ED9EBA1L); R2(C, D, A, B, X7, 11, 0x6ED9EBA1L); R2(B, C, D, A, X15, 15, 0x6ED9EBA1L); A = state[0] += A; B = state[1] += B; C = state[2] += C; D = state[3] += D; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/md5/internal.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_MD5_INTERNAL_H #define OPENSSL_HEADER_MD5_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) #define MD5_ASM extern void md5_block_asm_data_order(uint32_t *state, const uint8_t *data, size_t num); #endif #if defined(__cplusplus) } // extern "C" #endif #endif // OPENSSL_HEADER_MD5_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/md5/md5.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "../fipsmodule/digest/md32_common.h" #include "../internal.h" #include "internal.h" uint8_t *MD5(const uint8_t *data, size_t len, uint8_t out[MD5_DIGEST_LENGTH]) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, data, len); MD5_Final(out, &ctx); return out; } int MD5_Init(MD5_CTX *md5) { OPENSSL_memset(md5, 0, sizeof(MD5_CTX)); md5->h[0] = 0x67452301UL; md5->h[1] = 0xefcdab89UL; md5->h[2] = 0x98badcfeUL; md5->h[3] = 0x10325476UL; return 1; } #if defined(MD5_ASM) #define md5_block_data_order md5_block_asm_data_order #else static void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #endif void MD5_Transform(MD5_CTX *c, const uint8_t data[MD5_CBLOCK]) { md5_block_data_order(c->h, data, 1); } int MD5_Update(MD5_CTX *c, const void *data, size_t len) { crypto_md32_update(&md5_block_data_order, c->h, c->data, MD5_CBLOCK, &c->num, &c->Nh, &c->Nl, reinterpret_cast(data), len); return 1; } int MD5_Final(uint8_t out[MD5_DIGEST_LENGTH], MD5_CTX *c) { crypto_md32_final(&md5_block_data_order, c->h, c->data, MD5_CBLOCK, &c->num, c->Nh, c->Nl, /*is_big_endian=*/0); CRYPTO_store_u32_le(out, c->h[0]); CRYPTO_store_u32_le(out + 4, c->h[1]); CRYPTO_store_u32_le(out + 8, c->h[2]); CRYPTO_store_u32_le(out + 12, c->h[3]); return 1; } // As pointed out by Wei Dai , the above can be // simplified to the code below. Wei attributes these optimizations // to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) ((((b) ^ (c)) & (d)) ^ (c)) #define H(b, c, d) ((b) ^ (c) ^ (d)) #define I(b, c, d) (((~(d)) | (b)) ^ (c)) #define R0(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + F((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ (a) += (b); \ } while (0) #define R1(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + G((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ (a) += (b); \ } while (0) #define R2(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + H((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ (a) += (b); \ } while (0) #define R3(a, b, c, d, k, s, t) \ do { \ (a) += ((k) + (t) + I((b), (c), (d))); \ (a) = CRYPTO_rotl_u32(a, s); \ (a) += (b); \ } while (0) #ifndef MD5_ASM #ifdef X #undef X #endif static void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { uint32_t A, B, C, D; uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15; #define X(i) XX##i A = state[0]; B = state[1]; C = state[2]; D = state[3]; for (; num--;) { X(0) = CRYPTO_load_u32_le(data); data += 4; X(1) = CRYPTO_load_u32_le(data); data += 4; // Round 0 R0(A, B, C, D, X(0), 7, 0xd76aa478L); X(2) = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X(1), 12, 0xe8c7b756L); X(3) = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X(2), 17, 0x242070dbL); X(4) = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X(3), 22, 0xc1bdceeeL); X(5) = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X(4), 7, 0xf57c0fafL); X(6) = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X(5), 12, 0x4787c62aL); X(7) = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X(6), 17, 0xa8304613L); X(8) = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X(7), 22, 0xfd469501L); X(9) = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X(8), 7, 0x698098d8L); X(10) = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X(9), 12, 0x8b44f7afL); X(11) = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X(10), 17, 0xffff5bb1L); X(12) = CRYPTO_load_u32_le(data); data += 4; R0(B, C, D, A, X(11), 22, 0x895cd7beL); X(13) = CRYPTO_load_u32_le(data); data += 4; R0(A, B, C, D, X(12), 7, 0x6b901122L); X(14) = CRYPTO_load_u32_le(data); data += 4; R0(D, A, B, C, X(13), 12, 0xfd987193L); X(15) = CRYPTO_load_u32_le(data); data += 4; R0(C, D, A, B, X(14), 17, 0xa679438eL); R0(B, C, D, A, X(15), 22, 0x49b40821L); // Round 1 R1(A, B, C, D, X(1), 5, 0xf61e2562L); R1(D, A, B, C, X(6), 9, 0xc040b340L); R1(C, D, A, B, X(11), 14, 0x265e5a51L); R1(B, C, D, A, X(0), 20, 0xe9b6c7aaL); R1(A, B, C, D, X(5), 5, 0xd62f105dL); R1(D, A, B, C, X(10), 9, 0x02441453L); R1(C, D, A, B, X(15), 14, 0xd8a1e681L); R1(B, C, D, A, X(4), 20, 0xe7d3fbc8L); R1(A, B, C, D, X(9), 5, 0x21e1cde6L); R1(D, A, B, C, X(14), 9, 0xc33707d6L); R1(C, D, A, B, X(3), 14, 0xf4d50d87L); R1(B, C, D, A, X(8), 20, 0x455a14edL); R1(A, B, C, D, X(13), 5, 0xa9e3e905L); R1(D, A, B, C, X(2), 9, 0xfcefa3f8L); R1(C, D, A, B, X(7), 14, 0x676f02d9L); R1(B, C, D, A, X(12), 20, 0x8d2a4c8aL); // Round 2 R2(A, B, C, D, X(5), 4, 0xfffa3942L); R2(D, A, B, C, X(8), 11, 0x8771f681L); R2(C, D, A, B, X(11), 16, 0x6d9d6122L); R2(B, C, D, A, X(14), 23, 0xfde5380cL); R2(A, B, C, D, X(1), 4, 0xa4beea44L); R2(D, A, B, C, X(4), 11, 0x4bdecfa9L); R2(C, D, A, B, X(7), 16, 0xf6bb4b60L); R2(B, C, D, A, X(10), 23, 0xbebfbc70L); R2(A, B, C, D, X(13), 4, 0x289b7ec6L); R2(D, A, B, C, X(0), 11, 0xeaa127faL); R2(C, D, A, B, X(3), 16, 0xd4ef3085L); R2(B, C, D, A, X(6), 23, 0x04881d05L); R2(A, B, C, D, X(9), 4, 0xd9d4d039L); R2(D, A, B, C, X(12), 11, 0xe6db99e5L); R2(C, D, A, B, X(15), 16, 0x1fa27cf8L); R2(B, C, D, A, X(2), 23, 0xc4ac5665L); // Round 3 R3(A, B, C, D, X(0), 6, 0xf4292244L); R3(D, A, B, C, X(7), 10, 0x432aff97L); R3(C, D, A, B, X(14), 15, 0xab9423a7L); R3(B, C, D, A, X(5), 21, 0xfc93a039L); R3(A, B, C, D, X(12), 6, 0x655b59c3L); R3(D, A, B, C, X(3), 10, 0x8f0ccc92L); R3(C, D, A, B, X(10), 15, 0xffeff47dL); R3(B, C, D, A, X(1), 21, 0x85845dd1L); R3(A, B, C, D, X(8), 6, 0x6fa87e4fL); R3(D, A, B, C, X(15), 10, 0xfe2ce6e0L); R3(C, D, A, B, X(6), 15, 0xa3014314L); R3(B, C, D, A, X(13), 21, 0x4e0811a1L); R3(A, B, C, D, X(4), 6, 0xf7537e82L); R3(D, A, B, C, X(11), 10, 0xbd3af235L); R3(C, D, A, B, X(2), 15, 0x2ad7d2bbL); R3(B, C, D, A, X(9), 21, 0xeb86d391L); A = state[0] += A; B = state[1] += B; C = state[2] += C; D = state[3] += D; } } #undef X #endif #undef F #undef G #undef H #undef I #undef R0 #undef R1 #undef R2 #undef R3 ================================================ FILE: Sources/CNIOBoringSSL/crypto/mem.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #if defined(OPENSSL_WINDOWS) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #endif #if defined(BORINGSSL_MALLOC_FAILURE_TESTING) #include #include #include #endif #include "internal.h" #define OPENSSL_MALLOC_PREFIX 8 static_assert(OPENSSL_MALLOC_PREFIX >= sizeof(size_t), "size_t too large"); #if defined(OPENSSL_ASAN) extern "C" { void __asan_poison_memory_region(const volatile void *addr, size_t size); void __asan_unpoison_memory_region(const volatile void *addr, size_t size); } #else static void __asan_poison_memory_region(const void *addr, size_t size) {} static void __asan_unpoison_memory_region(const void *addr, size_t size) {} #endif // Windows doesn't really support weak symbols as of May 2019, and Clang on // Windows will emit strong symbols instead. See // https://bugs.llvm.org/show_bug.cgi?id=37598 // // EDK2 targets UEFI but builds as ELF and then translates the binary to // COFF(!). Thus it builds with __ELF__ defined but cannot actually cope with // weak symbols. #if !defined(__EDK2_BORINGSSL__) && defined(__ELF__) && defined(__GNUC__) #define WEAK_SYMBOL_FUNC(rettype, name, args) \ extern "C" { \ rettype name args __attribute__((weak)); \ } #else #define WEAK_SYMBOL_FUNC(rettype, name, args) \ static rettype(*const name) args = NULL; #endif #if defined(BORINGSSL_DETECT_SDALLOCX) // sdallocx is a sized |free| function. By passing the size (which we happen to // always know in BoringSSL), the malloc implementation can save work. We cannot // depend on |sdallocx| being available, however, so it's a weak symbol. // // This mechanism is kept opt-in because it assumes that, when |sdallocx| is // defined, it is part of the same allocator as |malloc|. This is usually true // but may break if |malloc| does not implement |sdallocx|, but some other // allocator with |sdallocx| is imported which does. WEAK_SYMBOL_FUNC(void, sdallocx, (void *ptr, size_t size, int flags)) #else static void (*const sdallocx)(void *ptr, size_t size, int flags) = NULL; #endif // The following three functions can be defined to override default heap // allocation and freeing. If defined, it is the responsibility of // |OPENSSL_memory_free| to zero out the memory before returning it to the // system. |OPENSSL_memory_free| will not be passed NULL pointers. // // WARNING: These functions are called on every allocation and free in // BoringSSL across the entire process. They may be called by any code in the // process which calls BoringSSL, including in process initializers and thread // destructors. When called, BoringSSL may hold pthreads locks. Any other code // in the process which, directly or indirectly, calls BoringSSL may be on the // call stack and may itself be using arbitrary synchronization primitives. // // As a result, these functions may not have the usual programming environment // available to most C or C++ code. In particular, they may not call into // BoringSSL, or any library which depends on BoringSSL. Any synchronization // primitives used must tolerate every other synchronization primitive linked // into the process, including pthreads locks. Failing to meet these constraints // may result in deadlocks, crashes, or memory corruption. WEAK_SYMBOL_FUNC(void *, OPENSSL_memory_alloc, (size_t size)) WEAK_SYMBOL_FUNC(void, OPENSSL_memory_free, (void *ptr)) WEAK_SYMBOL_FUNC(size_t, OPENSSL_memory_get_size, (void *ptr)) #if defined(BORINGSSL_MALLOC_FAILURE_TESTING) static CRYPTO_MUTEX malloc_failure_lock = CRYPTO_MUTEX_INIT; static uint64_t current_malloc_count = 0; static uint64_t malloc_number_to_fail = 0; static int malloc_failure_enabled = 0, break_on_malloc_fail = 0, any_malloc_failed = 0, disable_malloc_failures = 0; static void malloc_exit_handler(void) { CRYPTO_MUTEX_lock_read(&malloc_failure_lock); if (any_malloc_failed) { // Signal to the test driver that some allocation failed, so it knows to // increment the counter and continue. _exit(88); } CRYPTO_MUTEX_unlock_read(&malloc_failure_lock); } static void init_malloc_failure(void) { const char *env = getenv("MALLOC_NUMBER_TO_FAIL"); if (env != NULL && env[0] != 0) { char *endptr; malloc_number_to_fail = strtoull(env, &endptr, 10); if (*endptr == 0) { malloc_failure_enabled = 1; atexit(malloc_exit_handler); } } break_on_malloc_fail = getenv("MALLOC_BREAK_ON_FAIL") != NULL; } // should_fail_allocation returns one if the current allocation should fail and // zero otherwise. static int should_fail_allocation() { static CRYPTO_once_t once = CRYPTO_ONCE_INIT; CRYPTO_once(&once, init_malloc_failure); if (!malloc_failure_enabled || disable_malloc_failures) { return 0; } // We lock just so multi-threaded tests are still correct, but we won't test // every malloc exhaustively. CRYPTO_MUTEX_lock_write(&malloc_failure_lock); int should_fail = current_malloc_count == malloc_number_to_fail; current_malloc_count++; any_malloc_failed = any_malloc_failed || should_fail; CRYPTO_MUTEX_unlock_write(&malloc_failure_lock); if (should_fail && break_on_malloc_fail) { raise(SIGTRAP); } if (should_fail) { errno = ENOMEM; } return should_fail; } void OPENSSL_reset_malloc_counter_for_testing(void) { CRYPTO_MUTEX_lock_write(&malloc_failure_lock); current_malloc_count = 0; CRYPTO_MUTEX_unlock_write(&malloc_failure_lock); } void OPENSSL_disable_malloc_failures_for_testing(void) { CRYPTO_MUTEX_lock_write(&malloc_failure_lock); BSSL_CHECK(!disable_malloc_failures); disable_malloc_failures = 1; CRYPTO_MUTEX_unlock_write(&malloc_failure_lock); } void OPENSSL_enable_malloc_failures_for_testing(void) { CRYPTO_MUTEX_lock_write(&malloc_failure_lock); BSSL_CHECK(disable_malloc_failures); disable_malloc_failures = 0; CRYPTO_MUTEX_unlock_write(&malloc_failure_lock); } #else static int should_fail_allocation(void) { return 0; } #endif void *OPENSSL_malloc(size_t size) { void *ptr = nullptr; if (should_fail_allocation()) { goto err; } if (OPENSSL_memory_alloc != NULL) { assert(OPENSSL_memory_free != NULL); assert(OPENSSL_memory_get_size != NULL); void *ptr2 = OPENSSL_memory_alloc(size); if (ptr2 == NULL && size != 0) { goto err; } return ptr2; } if (size + OPENSSL_MALLOC_PREFIX < size) { goto err; } ptr = malloc(size + OPENSSL_MALLOC_PREFIX); if (ptr == NULL) { goto err; } *(size_t *)ptr = size; __asan_poison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); return ((uint8_t *)ptr) + OPENSSL_MALLOC_PREFIX; err: // This only works because ERR does not call OPENSSL_malloc. OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); return NULL; } void *OPENSSL_zalloc(size_t size) { void *ret = OPENSSL_malloc(size); if (ret != NULL) { OPENSSL_memset(ret, 0, size); } return ret; } void *OPENSSL_calloc(size_t num, size_t size) { if (size != 0 && num > SIZE_MAX / size) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); return NULL; } return OPENSSL_zalloc(num * size); } void OPENSSL_free(void *orig_ptr) { if (orig_ptr == NULL) { return; } if (OPENSSL_memory_free != NULL) { OPENSSL_memory_free(orig_ptr); return; } void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; __asan_unpoison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); size_t size = *(size_t *)ptr; OPENSSL_cleanse(ptr, size + OPENSSL_MALLOC_PREFIX); // ASan knows to intercept malloc and free, but not sdallocx. #if defined(OPENSSL_ASAN) (void)sdallocx; free(ptr); #else if (sdallocx) { sdallocx(ptr, size + OPENSSL_MALLOC_PREFIX, 0 /* flags */); } else { free(ptr); } #endif } void *OPENSSL_realloc(void *orig_ptr, size_t new_size) { if (orig_ptr == NULL) { return OPENSSL_malloc(new_size); } size_t old_size; if (OPENSSL_memory_get_size != NULL) { old_size = OPENSSL_memory_get_size(orig_ptr); } else { void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; __asan_unpoison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); old_size = *(size_t *)ptr; __asan_poison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); } void *ret = OPENSSL_malloc(new_size); if (ret == NULL) { return NULL; } size_t to_copy = new_size; if (old_size < to_copy) { to_copy = old_size; } memcpy(ret, orig_ptr, to_copy); OPENSSL_free(orig_ptr); return ret; } void OPENSSL_cleanse(void *ptr, size_t len) { #if defined(OPENSSL_WINDOWS) SecureZeroMemory(ptr, len); #else OPENSSL_memset(ptr, 0, len); #if !defined(OPENSSL_NO_ASM) /* As best as we can tell, this is sufficient to break any optimisations that might try to eliminate "superfluous" memsets. If there's an easy way to detect memset_s, it would be better to use that. */ __asm__ __volatile__("" : : "r"(ptr) : "memory"); #endif #endif // !OPENSSL_NO_ASM } void OPENSSL_clear_free(void *ptr, size_t unused) { OPENSSL_free(ptr); } int CRYPTO_secure_malloc_init(size_t size, size_t min_size) { return 0; } int CRYPTO_secure_malloc_initialized(void) { return 0; } size_t CRYPTO_secure_used(void) { return 0; } void *OPENSSL_secure_malloc(size_t size) { return OPENSSL_malloc(size); } void OPENSSL_secure_clear_free(void *ptr, size_t len) { OPENSSL_clear_free(ptr, len); } int CRYPTO_memcmp(const void *in_a, const void *in_b, size_t len) { const uint8_t *a = reinterpret_cast(in_a); const uint8_t *b = reinterpret_cast(in_b); uint8_t x = 0; for (size_t i = 0; i < len; i++) { x |= a[i] ^ b[i]; } return x; } uint32_t OPENSSL_hash32(const void *ptr, size_t len) { // These are the FNV-1a parameters for 32 bits. static const uint32_t kPrime = 16777619u; static const uint32_t kOffsetBasis = 2166136261u; const uint8_t *in = reinterpret_cast(ptr); uint32_t h = kOffsetBasis; for (size_t i = 0; i < len; i++) { h ^= in[i]; h *= kPrime; } return h; } uint32_t OPENSSL_strhash(const char *s) { return OPENSSL_hash32(s, strlen(s)); } size_t OPENSSL_strnlen(const char *s, size_t len) { for (size_t i = 0; i < len; i++) { if (s[i] == 0) { return i; } } return len; } char *OPENSSL_strdup(const char *s) { if (s == NULL) { return NULL; } // Copy the NUL terminator. return reinterpret_cast(OPENSSL_memdup(s, strlen(s) + 1)); } int OPENSSL_isalpha(int c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } int OPENSSL_isdigit(int c) { return c >= '0' && c <= '9'; } int OPENSSL_isxdigit(int c) { return OPENSSL_isdigit(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); } int OPENSSL_fromxdigit(uint8_t *out, int c) { if (OPENSSL_isdigit(c)) { *out = c - '0'; return 1; } if ('a' <= c && c <= 'f') { *out = c - 'a' + 10; return 1; } if ('A' <= c && c <= 'F') { *out = c - 'A' + 10; return 1; } return 0; } int OPENSSL_isalnum(int c) { return OPENSSL_isalpha(c) || OPENSSL_isdigit(c); } int OPENSSL_tolower(int c) { if (c >= 'A' && c <= 'Z') { return c + ('a' - 'A'); } return c; } int OPENSSL_isspace(int c) { return c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r' || c == ' '; } int OPENSSL_strcasecmp(const char *a, const char *b) { for (size_t i = 0;; i++) { const int aa = OPENSSL_tolower(a[i]); const int bb = OPENSSL_tolower(b[i]); if (aa < bb) { return -1; } else if (aa > bb) { return 1; } else if (aa == 0) { return 0; } } } int OPENSSL_strncasecmp(const char *a, const char *b, size_t n) { for (size_t i = 0; i < n; i++) { const int aa = OPENSSL_tolower(a[i]); const int bb = OPENSSL_tolower(b[i]); if (aa < bb) { return -1; } else if (aa > bb) { return 1; } else if (aa == 0) { return 0; } } return 0; } int BIO_snprintf(char *buf, size_t n, const char *format, ...) { va_list args; va_start(args, format); int ret = BIO_vsnprintf(buf, n, format, args); va_end(args); return ret; } int BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args) { return vsnprintf(buf, n, format, args); } int OPENSSL_vasprintf_internal(char **str, const char *format, va_list args, int system_malloc) { void *(*allocate)(size_t) = system_malloc ? malloc : OPENSSL_malloc; void (*deallocate)(void *) = system_malloc ? free : OPENSSL_free; void *(*reallocate)(void *, size_t) = system_malloc ? realloc : OPENSSL_realloc; char *candidate = NULL; size_t candidate_len = 64; // TODO(bbe) what's the best initial size? int ret; if ((candidate = reinterpret_cast(allocate(candidate_len))) == NULL) { goto err; } va_list args_copy; va_copy(args_copy, args); ret = vsnprintf(candidate, candidate_len, format, args_copy); va_end(args_copy); if (ret < 0) { goto err; } if ((size_t)ret >= candidate_len) { // Too big to fit in allocation. char *tmp; candidate_len = (size_t)ret + 1; if ((tmp = reinterpret_cast( reallocate(candidate, candidate_len))) == NULL) { goto err; } candidate = tmp; ret = vsnprintf(candidate, candidate_len, format, args); } // At this point this should not happen unless vsnprintf is insane. if (ret < 0 || (size_t)ret >= candidate_len) { goto err; } *str = candidate; return ret; err: deallocate(candidate); *str = NULL; errno = ENOMEM; return -1; } int OPENSSL_vasprintf(char **str, const char *format, va_list args) { return OPENSSL_vasprintf_internal(str, format, args, /*system_malloc=*/0); } int OPENSSL_asprintf(char **str, const char *format, ...) { va_list args; va_start(args, format); int ret = OPENSSL_vasprintf(str, format, args); va_end(args); return ret; } char *OPENSSL_strndup(const char *str, size_t size) { size = OPENSSL_strnlen(str, size); size_t alloc_size = size + 1; if (alloc_size < size) { // overflow OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); return NULL; } char *ret = reinterpret_cast(OPENSSL_malloc(alloc_size)); if (ret == NULL) { return NULL; } OPENSSL_memcpy(ret, str, size); ret[size] = '\0'; return ret; } size_t OPENSSL_strlcpy(char *dst, const char *src, size_t dst_size) { size_t l = 0; for (; dst_size > 1 && *src; dst_size--) { *dst++ = *src++; l++; } if (dst_size) { *dst = 0; } return l + strlen(src); } size_t OPENSSL_strlcat(char *dst, const char *src, size_t dst_size) { size_t l = 0; for (; dst_size > 0 && *dst; dst_size--, dst++) { l++; } return l + OPENSSL_strlcpy(dst, src, dst_size); } void *OPENSSL_memdup(const void *data, size_t size) { if (size == 0) { return NULL; } void *ret = OPENSSL_malloc(size); if (ret == NULL) { return NULL; } OPENSSL_memcpy(ret, data, size); return ret; } void *CRYPTO_malloc(size_t size, const char *file, int line) { return OPENSSL_malloc(size); } void *CRYPTO_realloc(void *ptr, size_t new_size, const char *file, int line) { return OPENSSL_realloc(ptr, new_size); } void CRYPTO_free(void *ptr, const char *file, int line) { OPENSSL_free(ptr); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/mldsa/mldsa.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../fipsmodule/bcm_interface.h" static_assert(sizeof(BCM_mldsa65_private_key) == sizeof(MLDSA65_private_key), ""); static_assert(alignof(BCM_mldsa65_private_key) == alignof(MLDSA65_private_key), ""); static_assert(sizeof(BCM_mldsa65_public_key) == sizeof(MLDSA65_public_key), ""); static_assert(alignof(BCM_mldsa65_public_key) == alignof(MLDSA65_public_key), ""); static_assert(MLDSA_SEED_BYTES == BCM_MLDSA_SEED_BYTES, ""); static_assert(MLDSA65_PRIVATE_KEY_BYTES == BCM_MLDSA65_PRIVATE_KEY_BYTES, ""); static_assert(MLDSA65_PUBLIC_KEY_BYTES == BCM_MLDSA65_PUBLIC_KEY_BYTES, ""); static_assert(MLDSA65_SIGNATURE_BYTES == BCM_MLDSA65_SIGNATURE_BYTES, ""); int MLDSA65_generate_key( uint8_t out_encoded_public_key[MLDSA65_PUBLIC_KEY_BYTES], uint8_t out_seed[MLDSA_SEED_BYTES], struct MLDSA65_private_key *out_private_key) { return bcm_success(BCM_mldsa65_generate_key( out_encoded_public_key, out_seed, reinterpret_cast(out_private_key))); } int MLDSA65_private_key_from_seed(struct MLDSA65_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { if (seed_len != BCM_MLDSA_SEED_BYTES) { return 0; } return bcm_success(BCM_mldsa65_private_key_from_seed( reinterpret_cast(out_private_key), seed)); } int MLDSA65_public_from_private(struct MLDSA65_public_key *out_public_key, const struct MLDSA65_private_key *private_key) { return bcm_success(BCM_mldsa65_public_from_private( reinterpret_cast(out_public_key), reinterpret_cast(private_key))); } int MLDSA65_sign(uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], const struct MLDSA65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { if (context_len > 255) { return 0; } return bcm_success(BCM_mldsa65_sign( out_encoded_signature, reinterpret_cast(private_key), msg, msg_len, context, context_len)); } int MLDSA65_verify(const struct MLDSA65_public_key *public_key, const uint8_t *signature, size_t signature_len, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { if (context_len > 255 || signature_len != BCM_MLDSA65_SIGNATURE_BYTES) { return 0; } return bcm_success(BCM_mldsa65_verify( reinterpret_cast(public_key), signature, msg, msg_len, context, context_len)); } int MLDSA65_marshal_public_key(CBB *out, const struct MLDSA65_public_key *public_key) { return bcm_success(BCM_mldsa65_marshal_public_key( out, reinterpret_cast(public_key))); } int MLDSA65_parse_public_key(struct MLDSA65_public_key *public_key, CBS *in) { return bcm_success(BCM_mldsa65_parse_public_key( reinterpret_cast(public_key), in)); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/mlkem/mlkem.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../fipsmodule/bcm_interface.h" static_assert(sizeof(BCM_mlkem768_private_key) <= sizeof(MLKEM768_private_key), ""); static_assert(alignof(BCM_mlkem768_private_key) <= alignof(MLKEM768_private_key), ""); static_assert(sizeof(BCM_mlkem768_public_key) <= sizeof(MLKEM768_public_key), ""); static_assert(alignof(BCM_mlkem768_public_key) <= alignof(MLKEM768_public_key), ""); static_assert(MLKEM768_PUBLIC_KEY_BYTES == BCM_MLKEM768_PUBLIC_KEY_BYTES, ""); static_assert(MLKEM_SEED_BYTES == BCM_MLKEM_SEED_BYTES, ""); static_assert(MLKEM768_CIPHERTEXT_BYTES == BCM_MLKEM768_CIPHERTEXT_BYTES, ""); static_assert(MLKEM_SHARED_SECRET_BYTES == BCM_MLKEM_SHARED_SECRET_BYTES, ""); static_assert(MLKEM1024_PUBLIC_KEY_BYTES == BCM_MLKEM1024_PUBLIC_KEY_BYTES, ""); static_assert(MLKEM1024_CIPHERTEXT_BYTES == BCM_MLKEM1024_CIPHERTEXT_BYTES, ""); void MLKEM768_generate_key( uint8_t out_encoded_public_key[MLKEM768_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[MLKEM_SEED_BYTES], struct MLKEM768_private_key *out_private_key) { BCM_mlkem768_generate_key( out_encoded_public_key, optional_out_seed, reinterpret_cast(out_private_key)); } int MLKEM768_private_key_from_seed(struct MLKEM768_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { return bcm_success(BCM_mlkem768_private_key_from_seed( reinterpret_cast(out_private_key), seed, seed_len)); } void MLKEM768_public_from_private( struct MLKEM768_public_key *out_public_key, const struct MLKEM768_private_key *private_key) { (void)BCM_mlkem768_public_from_private( reinterpret_cast(out_public_key), reinterpret_cast(private_key)); } void MLKEM768_encap(uint8_t out_ciphertext[MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const struct MLKEM768_public_key *public_key) { (void)BCM_mlkem768_encap( out_ciphertext, out_shared_secret, reinterpret_cast(public_key)); } int MLKEM768_decap(uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct MLKEM768_private_key *private_key) { return bcm_success(BCM_mlkem768_decap( out_shared_secret, ciphertext, ciphertext_len, reinterpret_cast(private_key))); } int MLKEM768_marshal_public_key(CBB *out, const struct MLKEM768_public_key *public_key) { return bcm_success(BCM_mlkem768_marshal_public_key( out, reinterpret_cast(public_key))); } int MLKEM768_parse_public_key(struct MLKEM768_public_key *out_public_key, CBS *in) { return bcm_success(BCM_mlkem768_parse_public_key( reinterpret_cast(out_public_key), in)); } static_assert(sizeof(BCM_mlkem1024_private_key) <= sizeof(MLKEM1024_private_key), ""); static_assert(alignof(BCM_mlkem1024_private_key) <= alignof(MLKEM1024_private_key), ""); static_assert(sizeof(BCM_mlkem1024_public_key) <= sizeof(MLKEM1024_public_key), ""); static_assert(alignof(BCM_mlkem1024_public_key) <= alignof(MLKEM1024_public_key), ""); void MLKEM1024_generate_key( uint8_t out_encoded_public_key[MLKEM1024_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[MLKEM_SEED_BYTES], struct MLKEM1024_private_key *out_private_key) { (void)BCM_mlkem1024_generate_key( out_encoded_public_key, optional_out_seed, reinterpret_cast(out_private_key)); } int MLKEM1024_private_key_from_seed( struct MLKEM1024_private_key *out_private_key, const uint8_t *seed, size_t seed_len) { return bcm_success(BCM_mlkem1024_private_key_from_seed( reinterpret_cast(out_private_key), seed, seed_len)); } void MLKEM1024_public_from_private( struct MLKEM1024_public_key *out_public_key, const struct MLKEM1024_private_key *private_key) { (void)BCM_mlkem1024_public_from_private( reinterpret_cast(out_public_key), reinterpret_cast(private_key)); } void MLKEM1024_encap(uint8_t out_ciphertext[MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const struct MLKEM1024_public_key *public_key) { (void)BCM_mlkem1024_encap( out_ciphertext, out_shared_secret, reinterpret_cast(public_key)); } int MLKEM1024_decap(uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct MLKEM1024_private_key *private_key) { return bcm_success(BCM_mlkem1024_decap( out_shared_secret, ciphertext, ciphertext_len, reinterpret_cast(private_key))); } int MLKEM1024_marshal_public_key( CBB *out, const struct MLKEM1024_public_key *public_key) { return bcm_success(BCM_mlkem1024_marshal_public_key( out, reinterpret_cast(public_key))); } int MLKEM1024_parse_public_key(struct MLKEM1024_public_key *out_public_key, CBS *in) { return bcm_success(BCM_mlkem1024_parse_public_key( reinterpret_cast(out_public_key), in)); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/obj/obj.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../internal.h" #include "../lhash/internal.h" // obj_data.h must be included after the definition of |ASN1_OBJECT|. #include "obj_dat.h" DEFINE_LHASH_OF(ASN1_OBJECT) static CRYPTO_MUTEX global_added_lock = CRYPTO_MUTEX_INIT; // These globals are protected by |global_added_lock|. static LHASH_OF(ASN1_OBJECT) *global_added_by_data = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_nid = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_short_name = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_long_name = NULL; static CRYPTO_MUTEX global_next_nid_lock = CRYPTO_MUTEX_INIT; static unsigned global_next_nid = NUM_NID; static int obj_next_nid(void) { CRYPTO_MUTEX_lock_write(&global_next_nid_lock); int ret = global_next_nid++; CRYPTO_MUTEX_unlock_write(&global_next_nid_lock); return ret; } ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o) { ASN1_OBJECT *r; unsigned char *data = NULL; char *sn = NULL, *ln = NULL; if (o == NULL) { return NULL; } if (!(o->flags & ASN1_OBJECT_FLAG_DYNAMIC)) { // TODO(fork): this is a little dangerous. return (ASN1_OBJECT *)o; } r = ASN1_OBJECT_new(); if (r == NULL) { OPENSSL_PUT_ERROR(OBJ, ERR_R_ASN1_LIB); return NULL; } r->ln = r->sn = NULL; // once data is attached to an object, it remains const r->data = reinterpret_cast(OPENSSL_memdup(o->data, o->length)); if (o->length != 0 && r->data == NULL) { goto err; } r->length = o->length; r->nid = o->nid; if (o->ln != NULL) { ln = OPENSSL_strdup(o->ln); if (ln == NULL) { goto err; } } if (o->sn != NULL) { sn = OPENSSL_strdup(o->sn); if (sn == NULL) { goto err; } } r->sn = sn; r->ln = ln; r->flags = o->flags | (ASN1_OBJECT_FLAG_DYNAMIC | ASN1_OBJECT_FLAG_DYNAMIC_STRINGS | ASN1_OBJECT_FLAG_DYNAMIC_DATA); return r; err: OPENSSL_free(ln); OPENSSL_free(sn); OPENSSL_free(data); OPENSSL_free(r); return NULL; } int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { if (a->length < b->length) { return -1; } else if (a->length > b->length) { return 1; } return OPENSSL_memcmp(a->data, b->data, a->length); } const uint8_t *OBJ_get0_data(const ASN1_OBJECT *obj) { if (obj == NULL) { return NULL; } return obj->data; } size_t OBJ_length(const ASN1_OBJECT *obj) { if (obj == NULL || obj->length < 0) { return 0; } return (size_t)obj->length; } static const ASN1_OBJECT *get_builtin_object(int nid) { // |NID_undef| is stored separately, so all the indices are off by one. The // caller of this function must have a valid built-in, non-undef NID. BSSL_CHECK(nid > 0 && nid < NUM_NID); return &kObjects[nid - 1]; } // obj_cmp is called to search the kNIDsInOIDOrder array. The |key| argument is // an |ASN1_OBJECT|* that we're looking for and |element| is a pointer to an // unsigned int in the array. static int obj_cmp(const void *key, const void *element) { uint16_t nid = *((const uint16_t *)element); return OBJ_cmp(reinterpret_cast(key), get_builtin_object(nid)); } int OBJ_obj2nid(const ASN1_OBJECT *obj) { if (obj == NULL) { return NID_undef; } if (obj->nid != 0) { return obj->nid; } CRYPTO_MUTEX_lock_read(&global_added_lock); if (global_added_by_data != NULL) { ASN1_OBJECT *match; match = lh_ASN1_OBJECT_retrieve(global_added_by_data, obj); if (match != NULL) { CRYPTO_MUTEX_unlock_read(&global_added_lock); return match->nid; } } CRYPTO_MUTEX_unlock_read(&global_added_lock); const uint16_t *nid_ptr = reinterpret_cast( bsearch(obj, kNIDsInOIDOrder, OPENSSL_ARRAY_SIZE(kNIDsInOIDOrder), sizeof(kNIDsInOIDOrder[0]), obj_cmp)); if (nid_ptr == NULL) { return NID_undef; } return get_builtin_object(*nid_ptr)->nid; } int OBJ_cbs2nid(const CBS *cbs) { if (CBS_len(cbs) > INT_MAX) { return NID_undef; } ASN1_OBJECT obj; OPENSSL_memset(&obj, 0, sizeof(obj)); obj.data = CBS_data(cbs); obj.length = (int)CBS_len(cbs); return OBJ_obj2nid(&obj); } // short_name_cmp is called to search the kNIDsInShortNameOrder array. The // |key| argument is name that we're looking for and |element| is a pointer to // an unsigned int in the array. static int short_name_cmp(const void *key, const void *element) { const char *name = (const char *)key; uint16_t nid = *((const uint16_t *)element); return strcmp(name, get_builtin_object(nid)->sn); } int OBJ_sn2nid(const char *short_name) { CRYPTO_MUTEX_lock_read(&global_added_lock); if (global_added_by_short_name != NULL) { ASN1_OBJECT *match, templ; templ.sn = short_name; match = lh_ASN1_OBJECT_retrieve(global_added_by_short_name, &templ); if (match != NULL) { CRYPTO_MUTEX_unlock_read(&global_added_lock); return match->nid; } } CRYPTO_MUTEX_unlock_read(&global_added_lock); const uint16_t *nid_ptr = reinterpret_cast( bsearch(short_name, kNIDsInShortNameOrder, OPENSSL_ARRAY_SIZE(kNIDsInShortNameOrder), sizeof(kNIDsInShortNameOrder[0]), short_name_cmp)); if (nid_ptr == NULL) { return NID_undef; } return get_builtin_object(*nid_ptr)->nid; } // long_name_cmp is called to search the kNIDsInLongNameOrder array. The // |key| argument is name that we're looking for and |element| is a pointer to // an unsigned int in the array. static int long_name_cmp(const void *key, const void *element) { const char *name = (const char *)key; uint16_t nid = *((const uint16_t *)element); return strcmp(name, get_builtin_object(nid)->ln); } int OBJ_ln2nid(const char *long_name) { CRYPTO_MUTEX_lock_read(&global_added_lock); if (global_added_by_long_name != NULL) { ASN1_OBJECT *match, templ; templ.ln = long_name; match = lh_ASN1_OBJECT_retrieve(global_added_by_long_name, &templ); if (match != NULL) { CRYPTO_MUTEX_unlock_read(&global_added_lock); return match->nid; } } CRYPTO_MUTEX_unlock_read(&global_added_lock); const uint16_t *nid_ptr = reinterpret_cast(bsearch( long_name, kNIDsInLongNameOrder, OPENSSL_ARRAY_SIZE(kNIDsInLongNameOrder), sizeof(kNIDsInLongNameOrder[0]), long_name_cmp)); if (nid_ptr == NULL) { return NID_undef; } return get_builtin_object(*nid_ptr)->nid; } int OBJ_txt2nid(const char *s) { ASN1_OBJECT *obj; int nid; obj = OBJ_txt2obj(s, 0 /* search names */); nid = OBJ_obj2nid(obj); ASN1_OBJECT_free(obj); return nid; } OPENSSL_EXPORT int OBJ_nid2cbb(CBB *out, int nid) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); CBB oid; if (obj == NULL || !CBB_add_asn1(out, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, obj->data, obj->length) || !CBB_flush(out)) { return 0; } return 1; } const ASN1_OBJECT *OBJ_get_undef(void) { static const ASN1_OBJECT kUndef = { /*sn=*/SN_undef, /*ln=*/LN_undef, /*nid=*/NID_undef, /*length=*/0, /*data=*/NULL, /*flags=*/0, }; return &kUndef; } ASN1_OBJECT *OBJ_nid2obj(int nid) { if (nid == NID_undef) { return (ASN1_OBJECT *)OBJ_get_undef(); } if (nid > 0 && nid < NUM_NID) { const ASN1_OBJECT *obj = get_builtin_object(nid); if (nid != NID_undef && obj->nid == NID_undef) { goto err; } return (ASN1_OBJECT *)obj; } CRYPTO_MUTEX_lock_read(&global_added_lock); if (global_added_by_nid != NULL) { ASN1_OBJECT *match, templ; templ.nid = nid; match = lh_ASN1_OBJECT_retrieve(global_added_by_nid, &templ); if (match != NULL) { CRYPTO_MUTEX_unlock_read(&global_added_lock); return match; } } CRYPTO_MUTEX_unlock_read(&global_added_lock); err: OPENSSL_PUT_ERROR(OBJ, OBJ_R_UNKNOWN_NID); return NULL; } const char *OBJ_nid2sn(int nid) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { return NULL; } return obj->sn; } const char *OBJ_nid2ln(int nid) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { return NULL; } return obj->ln; } static ASN1_OBJECT *create_object_with_text_oid(int (*get_nid)(void), const char *oid, const char *short_name, const char *long_name) { uint8_t *buf; size_t len; CBB cbb; if (!CBB_init(&cbb, 32) || !CBB_add_asn1_oid_from_text(&cbb, oid, strlen(oid)) || !CBB_finish(&cbb, &buf, &len)) { OPENSSL_PUT_ERROR(OBJ, OBJ_R_INVALID_OID_STRING); CBB_cleanup(&cbb); return NULL; } ASN1_OBJECT *ret = ASN1_OBJECT_create(get_nid ? get_nid() : NID_undef, buf, len, short_name, long_name); OPENSSL_free(buf); return ret; } ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { if (!dont_search_names) { int nid = OBJ_sn2nid(s); if (nid == NID_undef) { nid = OBJ_ln2nid(s); } if (nid != NID_undef) { return OBJ_nid2obj(nid); } } return create_object_with_text_oid(NULL, s, NULL, NULL); } static int strlcpy_int(char *dst, const char *src, int dst_size) { size_t ret = OPENSSL_strlcpy(dst, src, dst_size < 0 ? 0 : (size_t)dst_size); if (ret > INT_MAX) { OPENSSL_PUT_ERROR(OBJ, ERR_R_OVERFLOW); return -1; } return (int)ret; } int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, int always_return_oid) { // Python depends on the empty OID successfully encoding as the empty // string. if (obj == NULL || obj->length == 0) { return strlcpy_int(out, "", out_len); } if (!always_return_oid) { int nid = OBJ_obj2nid(obj); if (nid != NID_undef) { const char *name = OBJ_nid2ln(nid); if (name == NULL) { name = OBJ_nid2sn(nid); } if (name != NULL) { return strlcpy_int(out, name, out_len); } } } CBS cbs; CBS_init(&cbs, obj->data, obj->length); char *txt = CBS_asn1_oid_to_text(&cbs); if (txt == NULL) { if (out_len > 0) { out[0] = '\0'; } return -1; } int ret = strlcpy_int(out, txt, out_len); OPENSSL_free(txt); return ret; } static uint32_t hash_nid(const ASN1_OBJECT *obj) { return obj->nid; } static int cmp_nid(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return a->nid - b->nid; } static uint32_t hash_data(const ASN1_OBJECT *obj) { return OPENSSL_hash32(obj->data, obj->length); } static uint32_t hash_short_name(const ASN1_OBJECT *obj) { return OPENSSL_strhash(obj->sn); } static int cmp_short_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return strcmp(a->sn, b->sn); } static uint32_t hash_long_name(const ASN1_OBJECT *obj) { return OPENSSL_strhash(obj->ln); } static int cmp_long_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return strcmp(a->ln, b->ln); } // obj_add_object inserts |obj| into the various global hashes for run-time // added objects. It returns one on success or zero otherwise. static int obj_add_object(ASN1_OBJECT *obj) { obj->flags &= ~(ASN1_OBJECT_FLAG_DYNAMIC | ASN1_OBJECT_FLAG_DYNAMIC_STRINGS | ASN1_OBJECT_FLAG_DYNAMIC_DATA); CRYPTO_MUTEX_lock_write(&global_added_lock); if (global_added_by_nid == NULL) { global_added_by_nid = lh_ASN1_OBJECT_new(hash_nid, cmp_nid); } if (global_added_by_data == NULL) { global_added_by_data = lh_ASN1_OBJECT_new(hash_data, OBJ_cmp); } if (global_added_by_short_name == NULL) { global_added_by_short_name = lh_ASN1_OBJECT_new(hash_short_name, cmp_short_name); } if (global_added_by_long_name == NULL) { global_added_by_long_name = lh_ASN1_OBJECT_new(hash_long_name, cmp_long_name); } int ok = 0; if (global_added_by_nid == NULL || // global_added_by_data == NULL || // global_added_by_short_name == NULL || // global_added_by_long_name == NULL) { goto err; } // We don't pay attention to |old_object| (which contains any previous object // that was evicted from the hashes) because we don't have a reference count // on ASN1_OBJECT values. Also, we should never have duplicates nids and so // should always have objects in |global_added_by_nid|. ASN1_OBJECT *old_object; ok = lh_ASN1_OBJECT_insert(global_added_by_nid, &old_object, obj); if (obj->length != 0 && obj->data != NULL) { ok &= lh_ASN1_OBJECT_insert(global_added_by_data, &old_object, obj); } if (obj->sn != NULL) { ok &= lh_ASN1_OBJECT_insert(global_added_by_short_name, &old_object, obj); } if (obj->ln != NULL) { ok &= lh_ASN1_OBJECT_insert(global_added_by_long_name, &old_object, obj); } err: CRYPTO_MUTEX_unlock_write(&global_added_lock); return ok; } int OBJ_create(const char *oid, const char *short_name, const char *long_name) { ASN1_OBJECT *op = create_object_with_text_oid(obj_next_nid, oid, short_name, long_name); if (op == NULL || !obj_add_object(op)) { return NID_undef; } return op->nid; } void OBJ_cleanup(void) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/obj/obj_dat.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* This file is generated by crypto/obj/objects.go. */ #define NUM_NID 966 static const uint8_t kObjectData[] = { /* NID_rsadsi */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, /* NID_pkcs */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, /* NID_md2 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x02, /* NID_md5 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* NID_rc4 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x04, /* NID_rsaEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, /* NID_md2WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x02, /* NID_md5WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x04, /* NID_pbeWithMD2AndDES_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x01, /* NID_pbeWithMD5AndDES_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x03, /* NID_X500 */ 0x55, /* NID_X509 */ 0x55, 0x04, /* NID_commonName */ 0x55, 0x04, 0x03, /* NID_countryName */ 0x55, 0x04, 0x06, /* NID_localityName */ 0x55, 0x04, 0x07, /* NID_stateOrProvinceName */ 0x55, 0x04, 0x08, /* NID_organizationName */ 0x55, 0x04, 0x0a, /* NID_organizationalUnitName */ 0x55, 0x04, 0x0b, /* NID_rsa */ 0x55, 0x08, 0x01, 0x01, /* NID_pkcs7 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, /* NID_pkcs7_data */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01, /* NID_pkcs7_signed */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, /* NID_pkcs7_enveloped */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x03, /* NID_pkcs7_signedAndEnveloped */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x04, /* NID_pkcs7_digest */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x05, /* NID_pkcs7_encrypted */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x06, /* NID_pkcs3 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x03, /* NID_dhKeyAgreement */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x03, 0x01, /* NID_des_ecb */ 0x2b, 0x0e, 0x03, 0x02, 0x06, /* NID_des_cfb64 */ 0x2b, 0x0e, 0x03, 0x02, 0x09, /* NID_des_cbc */ 0x2b, 0x0e, 0x03, 0x02, 0x07, /* NID_des_ede_ecb */ 0x2b, 0x0e, 0x03, 0x02, 0x11, /* NID_idea_cbc */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x81, 0x3c, 0x07, 0x01, 0x01, 0x02, /* NID_rc2_cbc */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02, /* NID_sha */ 0x2b, 0x0e, 0x03, 0x02, 0x12, /* NID_shaWithRSAEncryption */ 0x2b, 0x0e, 0x03, 0x02, 0x0f, /* NID_des_ede3_cbc */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07, /* NID_des_ofb64 */ 0x2b, 0x0e, 0x03, 0x02, 0x08, /* NID_pkcs9 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, /* NID_pkcs9_emailAddress */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, /* NID_pkcs9_unstructuredName */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x02, /* NID_pkcs9_contentType */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x03, /* NID_pkcs9_messageDigest */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x04, /* NID_pkcs9_signingTime */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x05, /* NID_pkcs9_countersignature */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x06, /* NID_pkcs9_challengePassword */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x07, /* NID_pkcs9_unstructuredAddress */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x08, /* NID_pkcs9_extCertAttributes */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x09, /* NID_netscape */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, /* NID_netscape_cert_extension */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, /* NID_netscape_data_type */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x02, /* NID_sha1 */ 0x2b, 0x0e, 0x03, 0x02, 0x1a, /* NID_sha1WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, /* NID_dsaWithSHA */ 0x2b, 0x0e, 0x03, 0x02, 0x0d, /* NID_dsa_2 */ 0x2b, 0x0e, 0x03, 0x02, 0x0c, /* NID_pbeWithSHA1AndRC2_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0b, /* NID_id_pbkdf2 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0c, /* NID_dsaWithSHA1_2 */ 0x2b, 0x0e, 0x03, 0x02, 0x1b, /* NID_netscape_cert_type */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x01, /* NID_netscape_base_url */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x02, /* NID_netscape_revocation_url */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x03, /* NID_netscape_ca_revocation_url */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x04, /* NID_netscape_renewal_url */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x07, /* NID_netscape_ca_policy_url */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x08, /* NID_netscape_ssl_server_name */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x0c, /* NID_netscape_comment */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x01, 0x0d, /* NID_netscape_cert_sequence */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x02, 0x05, /* NID_id_ce */ 0x55, 0x1d, /* NID_subject_key_identifier */ 0x55, 0x1d, 0x0e, /* NID_key_usage */ 0x55, 0x1d, 0x0f, /* NID_private_key_usage_period */ 0x55, 0x1d, 0x10, /* NID_subject_alt_name */ 0x55, 0x1d, 0x11, /* NID_issuer_alt_name */ 0x55, 0x1d, 0x12, /* NID_basic_constraints */ 0x55, 0x1d, 0x13, /* NID_crl_number */ 0x55, 0x1d, 0x14, /* NID_certificate_policies */ 0x55, 0x1d, 0x20, /* NID_authority_key_identifier */ 0x55, 0x1d, 0x23, /* NID_bf_cbc */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x02, /* NID_mdc2 */ 0x55, 0x08, 0x03, 0x65, /* NID_mdc2WithRSA */ 0x55, 0x08, 0x03, 0x64, /* NID_givenName */ 0x55, 0x04, 0x2a, /* NID_surname */ 0x55, 0x04, 0x04, /* NID_initials */ 0x55, 0x04, 0x2b, /* NID_crl_distribution_points */ 0x55, 0x1d, 0x1f, /* NID_md5WithRSA */ 0x2b, 0x0e, 0x03, 0x02, 0x03, /* NID_serialNumber */ 0x55, 0x04, 0x05, /* NID_title */ 0x55, 0x04, 0x0c, /* NID_description */ 0x55, 0x04, 0x0d, /* NID_cast5_cbc */ 0x2a, 0x86, 0x48, 0x86, 0xf6, 0x7d, 0x07, 0x42, 0x0a, /* NID_pbeWithMD5AndCast5_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf6, 0x7d, 0x07, 0x42, 0x0c, /* NID_dsaWithSHA1 */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x03, /* NID_sha1WithRSA */ 0x2b, 0x0e, 0x03, 0x02, 0x1d, /* NID_dsa */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x01, /* NID_ripemd160 */ 0x2b, 0x24, 0x03, 0x02, 0x01, /* NID_ripemd160WithRSA */ 0x2b, 0x24, 0x03, 0x03, 0x01, 0x02, /* NID_rc5_cbc */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x08, /* NID_zlib_compression */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x08, /* NID_ext_key_usage */ 0x55, 0x1d, 0x25, /* NID_id_pkix */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, /* NID_id_kp */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, /* NID_server_auth */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, /* NID_client_auth */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02, /* NID_code_sign */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x03, /* NID_email_protect */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x04, /* NID_time_stamp */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x08, /* NID_ms_code_ind */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x01, 0x15, /* NID_ms_code_com */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x01, 0x16, /* NID_ms_ctl_sign */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x0a, 0x03, 0x01, /* NID_ms_sgc */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x0a, 0x03, 0x03, /* NID_ms_efs */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x0a, 0x03, 0x04, /* NID_ns_sgc */ 0x60, 0x86, 0x48, 0x01, 0x86, 0xf8, 0x42, 0x04, 0x01, /* NID_delta_crl */ 0x55, 0x1d, 0x1b, /* NID_crl_reason */ 0x55, 0x1d, 0x15, /* NID_invalidity_date */ 0x55, 0x1d, 0x18, /* NID_sxnet */ 0x2b, 0x65, 0x01, 0x04, 0x01, /* NID_pbe_WithSHA1And128BitRC4 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x01, /* NID_pbe_WithSHA1And40BitRC4 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x02, /* NID_pbe_WithSHA1And3_Key_TripleDES_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03, /* NID_pbe_WithSHA1And2_Key_TripleDES_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x04, /* NID_pbe_WithSHA1And128BitRC2_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x05, /* NID_pbe_WithSHA1And40BitRC2_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x06, /* NID_keyBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x01, /* NID_pkcs8ShroudedKeyBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x02, /* NID_certBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x03, /* NID_crlBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x04, /* NID_secretBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x05, /* NID_safeContentsBag */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x06, /* NID_friendlyName */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x14, /* NID_localKeyID */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x15, /* NID_x509Certificate */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x16, 0x01, /* NID_sdsiCertificate */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x16, 0x02, /* NID_x509Crl */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x17, 0x01, /* NID_pbes2 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d, /* NID_pbmac1 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0e, /* NID_hmacWithSHA1 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x07, /* NID_id_qt_cps */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x01, /* NID_id_qt_unotice */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x02, /* NID_SMIMECapabilities */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x0f, /* NID_pbeWithMD2AndRC2_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x04, /* NID_pbeWithMD5AndRC2_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x06, /* NID_pbeWithSHA1AndDES_CBC */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0a, /* NID_ms_ext_req */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x02, 0x01, 0x0e, /* NID_ext_req */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x0e, /* NID_name */ 0x55, 0x04, 0x29, /* NID_dnQualifier */ 0x55, 0x04, 0x2e, /* NID_id_pe */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, /* NID_id_ad */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, /* NID_info_access */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x01, /* NID_ad_OCSP */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, /* NID_ad_ca_issuers */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x02, /* NID_OCSP_sign */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x09, /* NID_member_body */ 0x2a, /* NID_ISO_US */ 0x2a, 0x86, 0x48, /* NID_X9_57 */ 0x2a, 0x86, 0x48, 0xce, 0x38, /* NID_X9cm */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, /* NID_pkcs1 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, /* NID_pkcs5 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, /* NID_SMIME */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, /* NID_id_smime_mod */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, /* NID_id_smime_ct */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, /* NID_id_smime_aa */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, /* NID_id_smime_alg */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, /* NID_id_smime_cd */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x04, /* NID_id_smime_spq */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x05, /* NID_id_smime_cti */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, /* NID_id_smime_mod_cms */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x01, /* NID_id_smime_mod_ess */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x02, /* NID_id_smime_mod_oid */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x03, /* NID_id_smime_mod_msg_v3 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x04, /* NID_id_smime_mod_ets_eSignature_88 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x05, /* NID_id_smime_mod_ets_eSignature_97 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x06, /* NID_id_smime_mod_ets_eSigPolicy_88 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x07, /* NID_id_smime_mod_ets_eSigPolicy_97 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x00, 0x08, /* NID_id_smime_ct_receipt */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x01, /* NID_id_smime_ct_authData */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x02, /* NID_id_smime_ct_publishCert */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x03, /* NID_id_smime_ct_TSTInfo */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x04, /* NID_id_smime_ct_TDTInfo */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x05, /* NID_id_smime_ct_contentInfo */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x06, /* NID_id_smime_ct_DVCSRequestData */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x07, /* NID_id_smime_ct_DVCSResponseData */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x08, /* NID_id_smime_aa_receiptRequest */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x01, /* NID_id_smime_aa_securityLabel */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x02, /* NID_id_smime_aa_mlExpandHistory */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x03, /* NID_id_smime_aa_contentHint */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x04, /* NID_id_smime_aa_msgSigDigest */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x05, /* NID_id_smime_aa_encapContentType */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x06, /* NID_id_smime_aa_contentIdentifier */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x07, /* NID_id_smime_aa_macValue */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x08, /* NID_id_smime_aa_equivalentLabels */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x09, /* NID_id_smime_aa_contentReference */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0a, /* NID_id_smime_aa_encrypKeyPref */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0b, /* NID_id_smime_aa_signingCertificate */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0c, /* NID_id_smime_aa_smimeEncryptCerts */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0d, /* NID_id_smime_aa_timeStampToken */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0e, /* NID_id_smime_aa_ets_sigPolicyId */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x0f, /* NID_id_smime_aa_ets_commitmentType */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x10, /* NID_id_smime_aa_ets_signerLocation */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x11, /* NID_id_smime_aa_ets_signerAttr */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x12, /* NID_id_smime_aa_ets_otherSigCert */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x13, /* NID_id_smime_aa_ets_contentTimestamp */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x14, /* NID_id_smime_aa_ets_CertificateRefs */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x15, /* NID_id_smime_aa_ets_RevocationRefs */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x16, /* NID_id_smime_aa_ets_certValues */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x17, /* NID_id_smime_aa_ets_revocationValues */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x18, /* NID_id_smime_aa_ets_escTimeStamp */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x19, /* NID_id_smime_aa_ets_certCRLTimestamp */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x1a, /* NID_id_smime_aa_ets_archiveTimeStamp */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x1b, /* NID_id_smime_aa_signatureType */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x1c, /* NID_id_smime_aa_dvcs_dvc */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x02, 0x1d, /* NID_id_smime_alg_ESDHwith3DES */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x01, /* NID_id_smime_alg_ESDHwithRC2 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x02, /* NID_id_smime_alg_3DESwrap */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x03, /* NID_id_smime_alg_RC2wrap */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x04, /* NID_id_smime_alg_ESDH */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x05, /* NID_id_smime_alg_CMS3DESwrap */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x06, /* NID_id_smime_alg_CMSRC2wrap */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x07, /* NID_id_smime_cd_ldap */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x04, 0x01, /* NID_id_smime_spq_ets_sqt_uri */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x05, 0x01, /* NID_id_smime_spq_ets_sqt_unotice */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x05, 0x02, /* NID_id_smime_cti_ets_proofOfOrigin */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x01, /* NID_id_smime_cti_ets_proofOfReceipt */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x02, /* NID_id_smime_cti_ets_proofOfDelivery */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x03, /* NID_id_smime_cti_ets_proofOfSender */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x04, /* NID_id_smime_cti_ets_proofOfApproval */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x05, /* NID_id_smime_cti_ets_proofOfCreation */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x06, 0x06, /* NID_md4 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04, /* NID_id_pkix_mod */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, /* NID_id_qt */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, /* NID_id_it */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, /* NID_id_pkip */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, /* NID_id_alg */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x06, /* NID_id_cmc */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, /* NID_id_on */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x08, /* NID_id_pda */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, /* NID_id_aca */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, /* NID_id_qcs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0b, /* NID_id_cct */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0c, /* NID_id_pkix1_explicit_88 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x01, /* NID_id_pkix1_implicit_88 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x02, /* NID_id_pkix1_explicit_93 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x03, /* NID_id_pkix1_implicit_93 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x04, /* NID_id_mod_crmf */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x05, /* NID_id_mod_cmc */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x06, /* NID_id_mod_kea_profile_88 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x07, /* NID_id_mod_kea_profile_93 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x08, /* NID_id_mod_cmp */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x09, /* NID_id_mod_qualified_cert_88 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0a, /* NID_id_mod_qualified_cert_93 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0b, /* NID_id_mod_attribute_cert */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0c, /* NID_id_mod_timestamp_protocol */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0d, /* NID_id_mod_ocsp */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0e, /* NID_id_mod_dvcs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x0f, /* NID_id_mod_cmp2000 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x00, 0x10, /* NID_biometricInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x02, /* NID_qcStatements */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x03, /* NID_ac_auditEntity */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x04, /* NID_ac_targeting */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x05, /* NID_aaControls */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x06, /* NID_sbgp_ipAddrBlock */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x07, /* NID_sbgp_autonomousSysNum */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x08, /* NID_sbgp_routerIdentifier */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x09, /* NID_textNotice */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x03, /* NID_ipsecEndSystem */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x05, /* NID_ipsecTunnel */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x06, /* NID_ipsecUser */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x07, /* NID_dvcs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x0a, /* NID_id_it_caProtEncCert */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x01, /* NID_id_it_signKeyPairTypes */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x02, /* NID_id_it_encKeyPairTypes */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x03, /* NID_id_it_preferredSymmAlg */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x04, /* NID_id_it_caKeyUpdateInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x05, /* NID_id_it_currentCRL */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x06, /* NID_id_it_unsupportedOIDs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x07, /* NID_id_it_subscriptionRequest */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x08, /* NID_id_it_subscriptionResponse */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x09, /* NID_id_it_keyPairParamReq */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0a, /* NID_id_it_keyPairParamRep */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0b, /* NID_id_it_revPassphrase */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0c, /* NID_id_it_implicitConfirm */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0d, /* NID_id_it_confirmWaitTime */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0e, /* NID_id_it_origPKIMessage */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x0f, /* NID_id_regCtrl */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, /* NID_id_regInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x02, /* NID_id_regCtrl_regToken */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x01, /* NID_id_regCtrl_authenticator */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x02, /* NID_id_regCtrl_pkiPublicationInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x03, /* NID_id_regCtrl_pkiArchiveOptions */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x04, /* NID_id_regCtrl_oldCertID */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x05, /* NID_id_regCtrl_protocolEncrKey */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x01, 0x06, /* NID_id_regInfo_utf8Pairs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x02, 0x01, /* NID_id_regInfo_certReq */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x05, 0x02, 0x02, /* NID_id_alg_des40 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x06, 0x01, /* NID_id_alg_noSignature */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x06, 0x02, /* NID_id_alg_dh_sig_hmac_sha1 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x06, 0x03, /* NID_id_alg_dh_pop */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x06, 0x04, /* NID_id_cmc_statusInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x01, /* NID_id_cmc_identification */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x02, /* NID_id_cmc_identityProof */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x03, /* NID_id_cmc_dataReturn */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x04, /* NID_id_cmc_transactionId */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x05, /* NID_id_cmc_senderNonce */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x06, /* NID_id_cmc_recipientNonce */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x07, /* NID_id_cmc_addExtensions */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x08, /* NID_id_cmc_encryptedPOP */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x09, /* NID_id_cmc_decryptedPOP */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x0a, /* NID_id_cmc_lraPOPWitness */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x0b, /* NID_id_cmc_getCert */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x0f, /* NID_id_cmc_getCRL */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x10, /* NID_id_cmc_revokeRequest */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x11, /* NID_id_cmc_regInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x12, /* NID_id_cmc_responseInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x13, /* NID_id_cmc_queryPending */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x15, /* NID_id_cmc_popLinkRandom */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x16, /* NID_id_cmc_popLinkWitness */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x17, /* NID_id_cmc_confirmCertAcceptance */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x07, 0x18, /* NID_id_on_personalData */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x08, 0x01, /* NID_id_pda_dateOfBirth */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, 0x01, /* NID_id_pda_placeOfBirth */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, 0x02, /* NID_id_pda_gender */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, 0x03, /* NID_id_pda_countryOfCitizenship */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, 0x04, /* NID_id_pda_countryOfResidence */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x09, 0x05, /* NID_id_aca_authenticationInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x01, /* NID_id_aca_accessIdentity */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x02, /* NID_id_aca_chargingIdentity */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x03, /* NID_id_aca_group */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x04, /* NID_id_aca_role */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x05, /* NID_id_qcs_pkixQCSyntax_v1 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0b, 0x01, /* NID_id_cct_crs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0c, 0x01, /* NID_id_cct_PKIData */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0c, 0x02, /* NID_id_cct_PKIResponse */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0c, 0x03, /* NID_ad_timeStamping */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x03, /* NID_ad_dvcs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x04, /* NID_id_pkix_OCSP_basic */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x01, /* NID_id_pkix_OCSP_Nonce */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x02, /* NID_id_pkix_OCSP_CrlID */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x03, /* NID_id_pkix_OCSP_acceptableResponses */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x04, /* NID_id_pkix_OCSP_noCheck */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x05, /* NID_id_pkix_OCSP_archiveCutoff */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x06, /* NID_id_pkix_OCSP_serviceLocator */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x07, /* NID_id_pkix_OCSP_extendedStatus */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x08, /* NID_id_pkix_OCSP_valid */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x09, /* NID_id_pkix_OCSP_path */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x0a, /* NID_id_pkix_OCSP_trustRoot */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x0b, /* NID_algorithm */ 0x2b, 0x0e, 0x03, 0x02, /* NID_rsaSignature */ 0x2b, 0x0e, 0x03, 0x02, 0x0b, /* NID_X500algorithms */ 0x55, 0x08, /* NID_org */ 0x2b, /* NID_dod */ 0x2b, 0x06, /* NID_iana */ 0x2b, 0x06, 0x01, /* NID_Directory */ 0x2b, 0x06, 0x01, 0x01, /* NID_Management */ 0x2b, 0x06, 0x01, 0x02, /* NID_Experimental */ 0x2b, 0x06, 0x01, 0x03, /* NID_Private */ 0x2b, 0x06, 0x01, 0x04, /* NID_Security */ 0x2b, 0x06, 0x01, 0x05, /* NID_SNMPv2 */ 0x2b, 0x06, 0x01, 0x06, /* NID_Mail */ 0x2b, 0x06, 0x01, 0x07, /* NID_Enterprises */ 0x2b, 0x06, 0x01, 0x04, 0x01, /* NID_dcObject */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x8b, 0x3a, 0x82, 0x58, /* NID_domainComponent */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x19, /* NID_Domain */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x0d, /* NID_selected_attribute_types */ 0x55, 0x01, 0x05, /* NID_clearance */ 0x55, 0x01, 0x05, 0x37, /* NID_md4WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x03, /* NID_ac_proxying */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x0a, /* NID_sinfo_access */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x0b, /* NID_id_aca_encAttrs */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x0a, 0x06, /* NID_role */ 0x55, 0x04, 0x48, /* NID_policy_constraints */ 0x55, 0x1d, 0x24, /* NID_target_information */ 0x55, 0x1d, 0x37, /* NID_no_rev_avail */ 0x55, 0x1d, 0x38, /* NID_ansi_X9_62 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, /* NID_X9_62_prime_field */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x01, /* NID_X9_62_characteristic_two_field */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x02, /* NID_X9_62_id_ecPublicKey */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, /* NID_X9_62_prime192v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x01, /* NID_X9_62_prime192v2 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x02, /* NID_X9_62_prime192v3 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x03, /* NID_X9_62_prime239v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x04, /* NID_X9_62_prime239v2 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x05, /* NID_X9_62_prime239v3 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x06, /* NID_X9_62_prime256v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, /* NID_ecdsa_with_SHA1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x01, /* NID_ms_csp_name */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x11, 0x01, /* NID_aes_128_ecb */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x01, /* NID_aes_128_cbc */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02, /* NID_aes_128_ofb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x03, /* NID_aes_128_cfb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x04, /* NID_aes_192_ecb */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x15, /* NID_aes_192_cbc */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16, /* NID_aes_192_ofb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x17, /* NID_aes_192_cfb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x18, /* NID_aes_256_ecb */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x29, /* NID_aes_256_cbc */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a, /* NID_aes_256_ofb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2b, /* NID_aes_256_cfb128 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2c, /* NID_hold_instruction_code */ 0x55, 0x1d, 0x17, /* NID_hold_instruction_none */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x02, 0x01, /* NID_hold_instruction_call_issuer */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x02, 0x02, /* NID_hold_instruction_reject */ 0x2a, 0x86, 0x48, 0xce, 0x38, 0x02, 0x03, /* NID_data */ 0x09, /* NID_pss */ 0x09, 0x92, 0x26, /* NID_ucl */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, /* NID_pilot */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, /* NID_pilotAttributeType */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, /* NID_pilotAttributeSyntax */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x03, /* NID_pilotObjectClass */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, /* NID_pilotGroups */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x0a, /* NID_iA5StringSyntax */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x03, 0x04, /* NID_caseIgnoreIA5StringSyntax */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x03, 0x05, /* NID_pilotObject */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x03, /* NID_pilotPerson */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x04, /* NID_account */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x05, /* NID_document */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x06, /* NID_room */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x07, /* NID_documentSeries */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x09, /* NID_rFC822localPart */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x0e, /* NID_dNSDomain */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x0f, /* NID_domainRelatedObject */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x11, /* NID_friendlyCountry */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x12, /* NID_simpleSecurityObject */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x13, /* NID_pilotOrganization */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x14, /* NID_pilotDSA */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x15, /* NID_qualityLabelledData */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x04, 0x16, /* NID_userId */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x01, /* NID_textEncodedORAddress */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x02, /* NID_rfc822Mailbox */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x03, /* NID_info */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x04, /* NID_favouriteDrink */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x05, /* NID_roomNumber */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x06, /* NID_photo */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x07, /* NID_userClass */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x08, /* NID_host */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x09, /* NID_manager */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0a, /* NID_documentIdentifier */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0b, /* NID_documentTitle */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0c, /* NID_documentVersion */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0d, /* NID_documentAuthor */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0e, /* NID_documentLocation */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x0f, /* NID_homeTelephoneNumber */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x14, /* NID_secretary */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x15, /* NID_otherMailbox */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x16, /* NID_lastModifiedTime */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x17, /* NID_lastModifiedBy */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x18, /* NID_aRecord */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1a, /* NID_pilotAttributeType27 */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1b, /* NID_mXRecord */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1c, /* NID_nSRecord */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1d, /* NID_sOARecord */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1e, /* NID_cNAMERecord */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x1f, /* NID_associatedDomain */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x25, /* NID_associatedName */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x26, /* NID_homePostalAddress */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x27, /* NID_personalTitle */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x28, /* NID_mobileTelephoneNumber */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x29, /* NID_pagerTelephoneNumber */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x2a, /* NID_friendlyCountryName */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x2b, /* NID_organizationalStatus */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x2d, /* NID_janetMailbox */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x2e, /* NID_mailPreferenceOption */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x2f, /* NID_buildingName */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x30, /* NID_dSAQuality */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x31, /* NID_singleLevelQuality */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x32, /* NID_subtreeMinimumQuality */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x33, /* NID_subtreeMaximumQuality */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x34, /* NID_personalSignature */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x35, /* NID_dITRedirect */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x36, /* NID_audio */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x37, /* NID_documentPublisher */ 0x09, 0x92, 0x26, 0x89, 0x93, 0xf2, 0x2c, 0x64, 0x01, 0x38, /* NID_x500UniqueIdentifier */ 0x55, 0x04, 0x2d, /* NID_mime_mhs */ 0x2b, 0x06, 0x01, 0x07, 0x01, /* NID_mime_mhs_headings */ 0x2b, 0x06, 0x01, 0x07, 0x01, 0x01, /* NID_mime_mhs_bodies */ 0x2b, 0x06, 0x01, 0x07, 0x01, 0x02, /* NID_id_hex_partial_message */ 0x2b, 0x06, 0x01, 0x07, 0x01, 0x01, 0x01, /* NID_id_hex_multipart_message */ 0x2b, 0x06, 0x01, 0x07, 0x01, 0x01, 0x02, /* NID_generationQualifier */ 0x55, 0x04, 0x2c, /* NID_pseudonym */ 0x55, 0x04, 0x41, /* NID_id_set */ 0x67, 0x2a, /* NID_set_ctype */ 0x67, 0x2a, 0x00, /* NID_set_msgExt */ 0x67, 0x2a, 0x01, /* NID_set_attr */ 0x67, 0x2a, 0x03, /* NID_set_policy */ 0x67, 0x2a, 0x05, /* NID_set_certExt */ 0x67, 0x2a, 0x07, /* NID_set_brand */ 0x67, 0x2a, 0x08, /* NID_setct_PANData */ 0x67, 0x2a, 0x00, 0x00, /* NID_setct_PANToken */ 0x67, 0x2a, 0x00, 0x01, /* NID_setct_PANOnly */ 0x67, 0x2a, 0x00, 0x02, /* NID_setct_OIData */ 0x67, 0x2a, 0x00, 0x03, /* NID_setct_PI */ 0x67, 0x2a, 0x00, 0x04, /* NID_setct_PIData */ 0x67, 0x2a, 0x00, 0x05, /* NID_setct_PIDataUnsigned */ 0x67, 0x2a, 0x00, 0x06, /* NID_setct_HODInput */ 0x67, 0x2a, 0x00, 0x07, /* NID_setct_AuthResBaggage */ 0x67, 0x2a, 0x00, 0x08, /* NID_setct_AuthRevReqBaggage */ 0x67, 0x2a, 0x00, 0x09, /* NID_setct_AuthRevResBaggage */ 0x67, 0x2a, 0x00, 0x0a, /* NID_setct_CapTokenSeq */ 0x67, 0x2a, 0x00, 0x0b, /* NID_setct_PInitResData */ 0x67, 0x2a, 0x00, 0x0c, /* NID_setct_PI_TBS */ 0x67, 0x2a, 0x00, 0x0d, /* NID_setct_PResData */ 0x67, 0x2a, 0x00, 0x0e, /* NID_setct_AuthReqTBS */ 0x67, 0x2a, 0x00, 0x10, /* NID_setct_AuthResTBS */ 0x67, 0x2a, 0x00, 0x11, /* NID_setct_AuthResTBSX */ 0x67, 0x2a, 0x00, 0x12, /* NID_setct_AuthTokenTBS */ 0x67, 0x2a, 0x00, 0x13, /* NID_setct_CapTokenData */ 0x67, 0x2a, 0x00, 0x14, /* NID_setct_CapTokenTBS */ 0x67, 0x2a, 0x00, 0x15, /* NID_setct_AcqCardCodeMsg */ 0x67, 0x2a, 0x00, 0x16, /* NID_setct_AuthRevReqTBS */ 0x67, 0x2a, 0x00, 0x17, /* NID_setct_AuthRevResData */ 0x67, 0x2a, 0x00, 0x18, /* NID_setct_AuthRevResTBS */ 0x67, 0x2a, 0x00, 0x19, /* NID_setct_CapReqTBS */ 0x67, 0x2a, 0x00, 0x1a, /* NID_setct_CapReqTBSX */ 0x67, 0x2a, 0x00, 0x1b, /* NID_setct_CapResData */ 0x67, 0x2a, 0x00, 0x1c, /* NID_setct_CapRevReqTBS */ 0x67, 0x2a, 0x00, 0x1d, /* NID_setct_CapRevReqTBSX */ 0x67, 0x2a, 0x00, 0x1e, /* NID_setct_CapRevResData */ 0x67, 0x2a, 0x00, 0x1f, /* NID_setct_CredReqTBS */ 0x67, 0x2a, 0x00, 0x20, /* NID_setct_CredReqTBSX */ 0x67, 0x2a, 0x00, 0x21, /* NID_setct_CredResData */ 0x67, 0x2a, 0x00, 0x22, /* NID_setct_CredRevReqTBS */ 0x67, 0x2a, 0x00, 0x23, /* NID_setct_CredRevReqTBSX */ 0x67, 0x2a, 0x00, 0x24, /* NID_setct_CredRevResData */ 0x67, 0x2a, 0x00, 0x25, /* NID_setct_PCertReqData */ 0x67, 0x2a, 0x00, 0x26, /* NID_setct_PCertResTBS */ 0x67, 0x2a, 0x00, 0x27, /* NID_setct_BatchAdminReqData */ 0x67, 0x2a, 0x00, 0x28, /* NID_setct_BatchAdminResData */ 0x67, 0x2a, 0x00, 0x29, /* NID_setct_CardCInitResTBS */ 0x67, 0x2a, 0x00, 0x2a, /* NID_setct_MeAqCInitResTBS */ 0x67, 0x2a, 0x00, 0x2b, /* NID_setct_RegFormResTBS */ 0x67, 0x2a, 0x00, 0x2c, /* NID_setct_CertReqData */ 0x67, 0x2a, 0x00, 0x2d, /* NID_setct_CertReqTBS */ 0x67, 0x2a, 0x00, 0x2e, /* NID_setct_CertResData */ 0x67, 0x2a, 0x00, 0x2f, /* NID_setct_CertInqReqTBS */ 0x67, 0x2a, 0x00, 0x30, /* NID_setct_ErrorTBS */ 0x67, 0x2a, 0x00, 0x31, /* NID_setct_PIDualSignedTBE */ 0x67, 0x2a, 0x00, 0x32, /* NID_setct_PIUnsignedTBE */ 0x67, 0x2a, 0x00, 0x33, /* NID_setct_AuthReqTBE */ 0x67, 0x2a, 0x00, 0x34, /* NID_setct_AuthResTBE */ 0x67, 0x2a, 0x00, 0x35, /* NID_setct_AuthResTBEX */ 0x67, 0x2a, 0x00, 0x36, /* NID_setct_AuthTokenTBE */ 0x67, 0x2a, 0x00, 0x37, /* NID_setct_CapTokenTBE */ 0x67, 0x2a, 0x00, 0x38, /* NID_setct_CapTokenTBEX */ 0x67, 0x2a, 0x00, 0x39, /* NID_setct_AcqCardCodeMsgTBE */ 0x67, 0x2a, 0x00, 0x3a, /* NID_setct_AuthRevReqTBE */ 0x67, 0x2a, 0x00, 0x3b, /* NID_setct_AuthRevResTBE */ 0x67, 0x2a, 0x00, 0x3c, /* NID_setct_AuthRevResTBEB */ 0x67, 0x2a, 0x00, 0x3d, /* NID_setct_CapReqTBE */ 0x67, 0x2a, 0x00, 0x3e, /* NID_setct_CapReqTBEX */ 0x67, 0x2a, 0x00, 0x3f, /* NID_setct_CapResTBE */ 0x67, 0x2a, 0x00, 0x40, /* NID_setct_CapRevReqTBE */ 0x67, 0x2a, 0x00, 0x41, /* NID_setct_CapRevReqTBEX */ 0x67, 0x2a, 0x00, 0x42, /* NID_setct_CapRevResTBE */ 0x67, 0x2a, 0x00, 0x43, /* NID_setct_CredReqTBE */ 0x67, 0x2a, 0x00, 0x44, /* NID_setct_CredReqTBEX */ 0x67, 0x2a, 0x00, 0x45, /* NID_setct_CredResTBE */ 0x67, 0x2a, 0x00, 0x46, /* NID_setct_CredRevReqTBE */ 0x67, 0x2a, 0x00, 0x47, /* NID_setct_CredRevReqTBEX */ 0x67, 0x2a, 0x00, 0x48, /* NID_setct_CredRevResTBE */ 0x67, 0x2a, 0x00, 0x49, /* NID_setct_BatchAdminReqTBE */ 0x67, 0x2a, 0x00, 0x4a, /* NID_setct_BatchAdminResTBE */ 0x67, 0x2a, 0x00, 0x4b, /* NID_setct_RegFormReqTBE */ 0x67, 0x2a, 0x00, 0x4c, /* NID_setct_CertReqTBE */ 0x67, 0x2a, 0x00, 0x4d, /* NID_setct_CertReqTBEX */ 0x67, 0x2a, 0x00, 0x4e, /* NID_setct_CertResTBE */ 0x67, 0x2a, 0x00, 0x4f, /* NID_setct_CRLNotificationTBS */ 0x67, 0x2a, 0x00, 0x50, /* NID_setct_CRLNotificationResTBS */ 0x67, 0x2a, 0x00, 0x51, /* NID_setct_BCIDistributionTBS */ 0x67, 0x2a, 0x00, 0x52, /* NID_setext_genCrypt */ 0x67, 0x2a, 0x01, 0x01, /* NID_setext_miAuth */ 0x67, 0x2a, 0x01, 0x03, /* NID_setext_pinSecure */ 0x67, 0x2a, 0x01, 0x04, /* NID_setext_pinAny */ 0x67, 0x2a, 0x01, 0x05, /* NID_setext_track2 */ 0x67, 0x2a, 0x01, 0x07, /* NID_setext_cv */ 0x67, 0x2a, 0x01, 0x08, /* NID_set_policy_root */ 0x67, 0x2a, 0x05, 0x00, /* NID_setCext_hashedRoot */ 0x67, 0x2a, 0x07, 0x00, /* NID_setCext_certType */ 0x67, 0x2a, 0x07, 0x01, /* NID_setCext_merchData */ 0x67, 0x2a, 0x07, 0x02, /* NID_setCext_cCertRequired */ 0x67, 0x2a, 0x07, 0x03, /* NID_setCext_tunneling */ 0x67, 0x2a, 0x07, 0x04, /* NID_setCext_setExt */ 0x67, 0x2a, 0x07, 0x05, /* NID_setCext_setQualf */ 0x67, 0x2a, 0x07, 0x06, /* NID_setCext_PGWYcapabilities */ 0x67, 0x2a, 0x07, 0x07, /* NID_setCext_TokenIdentifier */ 0x67, 0x2a, 0x07, 0x08, /* NID_setCext_Track2Data */ 0x67, 0x2a, 0x07, 0x09, /* NID_setCext_TokenType */ 0x67, 0x2a, 0x07, 0x0a, /* NID_setCext_IssuerCapabilities */ 0x67, 0x2a, 0x07, 0x0b, /* NID_setAttr_Cert */ 0x67, 0x2a, 0x03, 0x00, /* NID_setAttr_PGWYcap */ 0x67, 0x2a, 0x03, 0x01, /* NID_setAttr_TokenType */ 0x67, 0x2a, 0x03, 0x02, /* NID_setAttr_IssCap */ 0x67, 0x2a, 0x03, 0x03, /* NID_set_rootKeyThumb */ 0x67, 0x2a, 0x03, 0x00, 0x00, /* NID_set_addPolicy */ 0x67, 0x2a, 0x03, 0x00, 0x01, /* NID_setAttr_Token_EMV */ 0x67, 0x2a, 0x03, 0x02, 0x01, /* NID_setAttr_Token_B0Prime */ 0x67, 0x2a, 0x03, 0x02, 0x02, /* NID_setAttr_IssCap_CVM */ 0x67, 0x2a, 0x03, 0x03, 0x03, /* NID_setAttr_IssCap_T2 */ 0x67, 0x2a, 0x03, 0x03, 0x04, /* NID_setAttr_IssCap_Sig */ 0x67, 0x2a, 0x03, 0x03, 0x05, /* NID_setAttr_GenCryptgrm */ 0x67, 0x2a, 0x03, 0x03, 0x03, 0x01, /* NID_setAttr_T2Enc */ 0x67, 0x2a, 0x03, 0x03, 0x04, 0x01, /* NID_setAttr_T2cleartxt */ 0x67, 0x2a, 0x03, 0x03, 0x04, 0x02, /* NID_setAttr_TokICCsig */ 0x67, 0x2a, 0x03, 0x03, 0x05, 0x01, /* NID_setAttr_SecDevSig */ 0x67, 0x2a, 0x03, 0x03, 0x05, 0x02, /* NID_set_brand_IATA_ATA */ 0x67, 0x2a, 0x08, 0x01, /* NID_set_brand_Diners */ 0x67, 0x2a, 0x08, 0x1e, /* NID_set_brand_AmericanExpress */ 0x67, 0x2a, 0x08, 0x22, /* NID_set_brand_JCB */ 0x67, 0x2a, 0x08, 0x23, /* NID_set_brand_Visa */ 0x67, 0x2a, 0x08, 0x04, /* NID_set_brand_MasterCard */ 0x67, 0x2a, 0x08, 0x05, /* NID_set_brand_Novus */ 0x67, 0x2a, 0x08, 0xae, 0x7b, /* NID_des_cdmf */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x0a, /* NID_rsaOAEPEncryptionSET */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x06, /* NID_international_organizations */ 0x67, /* NID_ms_smartcard_login */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x02, /* NID_ms_upn */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, /* NID_streetAddress */ 0x55, 0x04, 0x09, /* NID_postalCode */ 0x55, 0x04, 0x11, /* NID_id_ppl */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x15, /* NID_proxyCertInfo */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x0e, /* NID_id_ppl_anyLanguage */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x15, 0x00, /* NID_id_ppl_inheritAll */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x15, 0x01, /* NID_name_constraints */ 0x55, 0x1d, 0x1e, /* NID_Independent */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x15, 0x02, /* NID_sha256WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, /* NID_sha384WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0c, /* NID_sha512WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0d, /* NID_sha224WithRSAEncryption */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0e, /* NID_sha256 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, /* NID_sha384 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, /* NID_sha512 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, /* NID_sha224 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, /* NID_identified_organization */ 0x2b, /* NID_certicom_arc */ 0x2b, 0x81, 0x04, /* NID_wap */ 0x67, 0x2b, /* NID_wap_wsg */ 0x67, 0x2b, 0x01, /* NID_X9_62_id_characteristic_two_basis */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x02, 0x03, /* NID_X9_62_onBasis */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x02, 0x03, 0x01, /* NID_X9_62_tpBasis */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x02, 0x03, 0x02, /* NID_X9_62_ppBasis */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x02, 0x03, 0x03, /* NID_X9_62_c2pnb163v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x01, /* NID_X9_62_c2pnb163v2 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x02, /* NID_X9_62_c2pnb163v3 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x03, /* NID_X9_62_c2pnb176v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x04, /* NID_X9_62_c2tnb191v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x05, /* NID_X9_62_c2tnb191v2 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x06, /* NID_X9_62_c2tnb191v3 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x07, /* NID_X9_62_c2onb191v4 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x08, /* NID_X9_62_c2onb191v5 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x09, /* NID_X9_62_c2pnb208w1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0a, /* NID_X9_62_c2tnb239v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0b, /* NID_X9_62_c2tnb239v2 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0c, /* NID_X9_62_c2tnb239v3 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0d, /* NID_X9_62_c2onb239v4 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0e, /* NID_X9_62_c2onb239v5 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x0f, /* NID_X9_62_c2pnb272w1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x10, /* NID_X9_62_c2pnb304w1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x11, /* NID_X9_62_c2tnb359v1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x12, /* NID_X9_62_c2pnb368w1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x13, /* NID_X9_62_c2tnb431r1 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x00, 0x14, /* NID_secp112r1 */ 0x2b, 0x81, 0x04, 0x00, 0x06, /* NID_secp112r2 */ 0x2b, 0x81, 0x04, 0x00, 0x07, /* NID_secp128r1 */ 0x2b, 0x81, 0x04, 0x00, 0x1c, /* NID_secp128r2 */ 0x2b, 0x81, 0x04, 0x00, 0x1d, /* NID_secp160k1 */ 0x2b, 0x81, 0x04, 0x00, 0x09, /* NID_secp160r1 */ 0x2b, 0x81, 0x04, 0x00, 0x08, /* NID_secp160r2 */ 0x2b, 0x81, 0x04, 0x00, 0x1e, /* NID_secp192k1 */ 0x2b, 0x81, 0x04, 0x00, 0x1f, /* NID_secp224k1 */ 0x2b, 0x81, 0x04, 0x00, 0x20, /* NID_secp224r1 */ 0x2b, 0x81, 0x04, 0x00, 0x21, /* NID_secp256k1 */ 0x2b, 0x81, 0x04, 0x00, 0x0a, /* NID_secp384r1 */ 0x2b, 0x81, 0x04, 0x00, 0x22, /* NID_secp521r1 */ 0x2b, 0x81, 0x04, 0x00, 0x23, /* NID_sect113r1 */ 0x2b, 0x81, 0x04, 0x00, 0x04, /* NID_sect113r2 */ 0x2b, 0x81, 0x04, 0x00, 0x05, /* NID_sect131r1 */ 0x2b, 0x81, 0x04, 0x00, 0x16, /* NID_sect131r2 */ 0x2b, 0x81, 0x04, 0x00, 0x17, /* NID_sect163k1 */ 0x2b, 0x81, 0x04, 0x00, 0x01, /* NID_sect163r1 */ 0x2b, 0x81, 0x04, 0x00, 0x02, /* NID_sect163r2 */ 0x2b, 0x81, 0x04, 0x00, 0x0f, /* NID_sect193r1 */ 0x2b, 0x81, 0x04, 0x00, 0x18, /* NID_sect193r2 */ 0x2b, 0x81, 0x04, 0x00, 0x19, /* NID_sect233k1 */ 0x2b, 0x81, 0x04, 0x00, 0x1a, /* NID_sect233r1 */ 0x2b, 0x81, 0x04, 0x00, 0x1b, /* NID_sect239k1 */ 0x2b, 0x81, 0x04, 0x00, 0x03, /* NID_sect283k1 */ 0x2b, 0x81, 0x04, 0x00, 0x10, /* NID_sect283r1 */ 0x2b, 0x81, 0x04, 0x00, 0x11, /* NID_sect409k1 */ 0x2b, 0x81, 0x04, 0x00, 0x24, /* NID_sect409r1 */ 0x2b, 0x81, 0x04, 0x00, 0x25, /* NID_sect571k1 */ 0x2b, 0x81, 0x04, 0x00, 0x26, /* NID_sect571r1 */ 0x2b, 0x81, 0x04, 0x00, 0x27, /* NID_wap_wsg_idm_ecid_wtls1 */ 0x67, 0x2b, 0x01, 0x04, 0x01, /* NID_wap_wsg_idm_ecid_wtls3 */ 0x67, 0x2b, 0x01, 0x04, 0x03, /* NID_wap_wsg_idm_ecid_wtls4 */ 0x67, 0x2b, 0x01, 0x04, 0x04, /* NID_wap_wsg_idm_ecid_wtls5 */ 0x67, 0x2b, 0x01, 0x04, 0x05, /* NID_wap_wsg_idm_ecid_wtls6 */ 0x67, 0x2b, 0x01, 0x04, 0x06, /* NID_wap_wsg_idm_ecid_wtls7 */ 0x67, 0x2b, 0x01, 0x04, 0x07, /* NID_wap_wsg_idm_ecid_wtls8 */ 0x67, 0x2b, 0x01, 0x04, 0x08, /* NID_wap_wsg_idm_ecid_wtls9 */ 0x67, 0x2b, 0x01, 0x04, 0x09, /* NID_wap_wsg_idm_ecid_wtls10 */ 0x67, 0x2b, 0x01, 0x04, 0x0a, /* NID_wap_wsg_idm_ecid_wtls11 */ 0x67, 0x2b, 0x01, 0x04, 0x0b, /* NID_wap_wsg_idm_ecid_wtls12 */ 0x67, 0x2b, 0x01, 0x04, 0x0c, /* NID_any_policy */ 0x55, 0x1d, 0x20, 0x00, /* NID_policy_mappings */ 0x55, 0x1d, 0x21, /* NID_inhibit_any_policy */ 0x55, 0x1d, 0x36, /* NID_camellia_128_cbc */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x01, 0x02, /* NID_camellia_192_cbc */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x01, 0x03, /* NID_camellia_256_cbc */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x01, 0x04, /* NID_camellia_128_ecb */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x01, /* NID_camellia_192_ecb */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x15, /* NID_camellia_256_ecb */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x29, /* NID_camellia_128_cfb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x04, /* NID_camellia_192_cfb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x18, /* NID_camellia_256_cfb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x2c, /* NID_camellia_128_ofb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x03, /* NID_camellia_192_ofb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x17, /* NID_camellia_256_ofb128 */ 0x03, 0xa2, 0x31, 0x05, 0x03, 0x01, 0x09, 0x2b, /* NID_subject_directory_attributes */ 0x55, 0x1d, 0x09, /* NID_issuing_distribution_point */ 0x55, 0x1d, 0x1c, /* NID_certificate_issuer */ 0x55, 0x1d, 0x1d, /* NID_kisa */ 0x2a, 0x83, 0x1a, 0x8c, 0x9a, 0x44, /* NID_seed_ecb */ 0x2a, 0x83, 0x1a, 0x8c, 0x9a, 0x44, 0x01, 0x03, /* NID_seed_cbc */ 0x2a, 0x83, 0x1a, 0x8c, 0x9a, 0x44, 0x01, 0x04, /* NID_seed_ofb128 */ 0x2a, 0x83, 0x1a, 0x8c, 0x9a, 0x44, 0x01, 0x06, /* NID_seed_cfb128 */ 0x2a, 0x83, 0x1a, 0x8c, 0x9a, 0x44, 0x01, 0x05, /* NID_hmac_md5 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x08, 0x01, 0x01, /* NID_hmac_sha1 */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x08, 0x01, 0x02, /* NID_id_PasswordBasedMAC */ 0x2a, 0x86, 0x48, 0x86, 0xf6, 0x7d, 0x07, 0x42, 0x0d, /* NID_id_DHBasedMac */ 0x2a, 0x86, 0x48, 0x86, 0xf6, 0x7d, 0x07, 0x42, 0x1e, /* NID_id_it_suppLangTags */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x04, 0x10, /* NID_caRepository */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x05, /* NID_id_smime_ct_compressedData */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x09, /* NID_id_ct_asciiTextWithCRLF */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x1b, /* NID_id_aes128_wrap */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x05, /* NID_id_aes192_wrap */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x19, /* NID_id_aes256_wrap */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2d, /* NID_ecdsa_with_Recommended */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x02, /* NID_ecdsa_with_Specified */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, /* NID_ecdsa_with_SHA224 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x01, /* NID_ecdsa_with_SHA256 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, /* NID_ecdsa_with_SHA384 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x03, /* NID_ecdsa_with_SHA512 */ 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x04, /* NID_hmacWithMD5 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x06, /* NID_hmacWithSHA224 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x08, /* NID_hmacWithSHA256 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x09, /* NID_hmacWithSHA384 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x0a, /* NID_hmacWithSHA512 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x0b, /* NID_dsa_with_SHA224 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x03, 0x01, /* NID_dsa_with_SHA256 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x03, 0x02, /* NID_whirlpool */ 0x28, 0xcf, 0x06, 0x03, 0x00, 0x37, /* NID_cryptopro */ 0x2a, 0x85, 0x03, 0x02, 0x02, /* NID_cryptocom */ 0x2a, 0x85, 0x03, 0x02, 0x09, /* NID_id_GostR3411_94_with_GostR3410_2001 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x03, /* NID_id_GostR3411_94_with_GostR3410_94 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x04, /* NID_id_GostR3411_94 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x09, /* NID_id_HMACGostR3411_94 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x0a, /* NID_id_GostR3410_2001 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x13, /* NID_id_GostR3410_94 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x14, /* NID_id_Gost28147_89 */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x15, /* NID_id_Gost28147_89_MAC */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x16, /* NID_id_GostR3411_94_prf */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x17, /* NID_id_GostR3410_2001DH */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x62, /* NID_id_GostR3410_94DH */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x63, /* NID_id_Gost28147_89_CryptoPro_KeyMeshing */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x0e, 0x01, /* NID_id_Gost28147_89_None_KeyMeshing */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x0e, 0x00, /* NID_id_GostR3411_94_TestParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1e, 0x00, /* NID_id_GostR3411_94_CryptoProParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1e, 0x01, /* NID_id_Gost28147_89_TestParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x00, /* NID_id_Gost28147_89_CryptoPro_A_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x01, /* NID_id_Gost28147_89_CryptoPro_B_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x02, /* NID_id_Gost28147_89_CryptoPro_C_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x03, /* NID_id_Gost28147_89_CryptoPro_D_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x04, /* NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x05, /* NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x06, /* NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x1f, 0x07, /* NID_id_GostR3410_94_TestParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x20, 0x00, /* NID_id_GostR3410_94_CryptoPro_A_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x20, 0x02, /* NID_id_GostR3410_94_CryptoPro_B_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x20, 0x03, /* NID_id_GostR3410_94_CryptoPro_C_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x20, 0x04, /* NID_id_GostR3410_94_CryptoPro_D_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x20, 0x05, /* NID_id_GostR3410_94_CryptoPro_XchA_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x21, 0x01, /* NID_id_GostR3410_94_CryptoPro_XchB_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x21, 0x02, /* NID_id_GostR3410_94_CryptoPro_XchC_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x21, 0x03, /* NID_id_GostR3410_2001_TestParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x23, 0x00, /* NID_id_GostR3410_2001_CryptoPro_A_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x23, 0x01, /* NID_id_GostR3410_2001_CryptoPro_B_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x23, 0x02, /* NID_id_GostR3410_2001_CryptoPro_C_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x23, 0x03, /* NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x24, 0x00, /* NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x24, 0x01, /* NID_id_GostR3410_94_a */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x14, 0x01, /* NID_id_GostR3410_94_aBis */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x14, 0x02, /* NID_id_GostR3410_94_b */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x14, 0x03, /* NID_id_GostR3410_94_bBis */ 0x2a, 0x85, 0x03, 0x02, 0x02, 0x14, 0x04, /* NID_id_Gost28147_89_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x06, 0x01, /* NID_id_GostR3410_94_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x05, 0x03, /* NID_id_GostR3410_2001_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x05, 0x04, /* NID_id_GostR3411_94_with_GostR3410_94_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x03, 0x03, /* NID_id_GostR3411_94_with_GostR3410_2001_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x03, 0x04, /* NID_id_GostR3410_2001_ParamSet_cc */ 0x2a, 0x85, 0x03, 0x02, 0x09, 0x01, 0x08, 0x01, /* NID_LocalKeySet */ 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x11, 0x02, /* NID_freshest_crl */ 0x55, 0x1d, 0x2e, /* NID_id_on_permanentIdentifier */ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x08, 0x03, /* NID_searchGuide */ 0x55, 0x04, 0x0e, /* NID_businessCategory */ 0x55, 0x04, 0x0f, /* NID_postalAddress */ 0x55, 0x04, 0x10, /* NID_postOfficeBox */ 0x55, 0x04, 0x12, /* NID_physicalDeliveryOfficeName */ 0x55, 0x04, 0x13, /* NID_telephoneNumber */ 0x55, 0x04, 0x14, /* NID_telexNumber */ 0x55, 0x04, 0x15, /* NID_teletexTerminalIdentifier */ 0x55, 0x04, 0x16, /* NID_facsimileTelephoneNumber */ 0x55, 0x04, 0x17, /* NID_x121Address */ 0x55, 0x04, 0x18, /* NID_internationaliSDNNumber */ 0x55, 0x04, 0x19, /* NID_registeredAddress */ 0x55, 0x04, 0x1a, /* NID_destinationIndicator */ 0x55, 0x04, 0x1b, /* NID_preferredDeliveryMethod */ 0x55, 0x04, 0x1c, /* NID_presentationAddress */ 0x55, 0x04, 0x1d, /* NID_supportedApplicationContext */ 0x55, 0x04, 0x1e, /* NID_member */ 0x55, 0x04, 0x1f, /* NID_owner */ 0x55, 0x04, 0x20, /* NID_roleOccupant */ 0x55, 0x04, 0x21, /* NID_seeAlso */ 0x55, 0x04, 0x22, /* NID_userPassword */ 0x55, 0x04, 0x23, /* NID_userCertificate */ 0x55, 0x04, 0x24, /* NID_cACertificate */ 0x55, 0x04, 0x25, /* NID_authorityRevocationList */ 0x55, 0x04, 0x26, /* NID_certificateRevocationList */ 0x55, 0x04, 0x27, /* NID_crossCertificatePair */ 0x55, 0x04, 0x28, /* NID_enhancedSearchGuide */ 0x55, 0x04, 0x2f, /* NID_protocolInformation */ 0x55, 0x04, 0x30, /* NID_distinguishedName */ 0x55, 0x04, 0x31, /* NID_uniqueMember */ 0x55, 0x04, 0x32, /* NID_houseIdentifier */ 0x55, 0x04, 0x33, /* NID_supportedAlgorithms */ 0x55, 0x04, 0x34, /* NID_deltaRevocationList */ 0x55, 0x04, 0x35, /* NID_dmdName */ 0x55, 0x04, 0x36, /* NID_id_alg_PWRI_KEK */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x03, 0x09, /* NID_aes_128_gcm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x06, /* NID_aes_128_ccm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x07, /* NID_id_aes128_wrap_pad */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x08, /* NID_aes_192_gcm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x1a, /* NID_aes_192_ccm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x1b, /* NID_id_aes192_wrap_pad */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x1c, /* NID_aes_256_gcm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2e, /* NID_aes_256_ccm */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2f, /* NID_id_aes256_wrap_pad */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x30, /* NID_id_camellia128_wrap */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x03, 0x02, /* NID_id_camellia192_wrap */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x03, 0x03, /* NID_id_camellia256_wrap */ 0x2a, 0x83, 0x08, 0x8c, 0x9a, 0x4b, 0x3d, 0x01, 0x01, 0x03, 0x04, /* NID_anyExtendedKeyUsage */ 0x55, 0x1d, 0x25, 0x00, /* NID_mgf1 */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x08, /* NID_rsassaPss */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0a, /* NID_rsaesOaep */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x07, /* NID_dhpublicnumber */ 0x2a, 0x86, 0x48, 0xce, 0x3e, 0x02, 0x01, /* NID_brainpoolP160r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x01, /* NID_brainpoolP160t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x02, /* NID_brainpoolP192r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x03, /* NID_brainpoolP192t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x04, /* NID_brainpoolP224r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x05, /* NID_brainpoolP224t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x06, /* NID_brainpoolP256r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07, /* NID_brainpoolP256t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x08, /* NID_brainpoolP320r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x09, /* NID_brainpoolP320t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0a, /* NID_brainpoolP384r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0b, /* NID_brainpoolP384t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0c, /* NID_brainpoolP512r1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0d, /* NID_brainpoolP512t1 */ 0x2b, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0e, /* NID_pSpecified */ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x09, /* NID_dhSinglePass_stdDH_sha1kdf_scheme */ 0x2b, 0x81, 0x05, 0x10, 0x86, 0x48, 0x3f, 0x00, 0x02, /* NID_dhSinglePass_stdDH_sha224kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0b, 0x00, /* NID_dhSinglePass_stdDH_sha256kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0b, 0x01, /* NID_dhSinglePass_stdDH_sha384kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0b, 0x02, /* NID_dhSinglePass_stdDH_sha512kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0b, 0x03, /* NID_dhSinglePass_cofactorDH_sha1kdf_scheme */ 0x2b, 0x81, 0x05, 0x10, 0x86, 0x48, 0x3f, 0x00, 0x03, /* NID_dhSinglePass_cofactorDH_sha224kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x00, /* NID_dhSinglePass_cofactorDH_sha256kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x01, /* NID_dhSinglePass_cofactorDH_sha384kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x02, /* NID_dhSinglePass_cofactorDH_sha512kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x03, /* NID_X25519 */ 0x2b, 0x65, 0x6e, /* NID_ED25519 */ 0x2b, 0x65, 0x70, /* NID_ED448 */ 0x2b, 0x65, 0x71, /* NID_X448 */ 0x2b, 0x65, 0x6f, /* NID_sha512_256 */ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x06, }; static const ASN1_OBJECT kObjects[NUM_NID] = { {"rsadsi", "RSA Data Security, Inc.", NID_rsadsi, 6, &kObjectData[0], 0}, {"pkcs", "RSA Data Security, Inc. PKCS", NID_pkcs, 7, &kObjectData[6], 0}, {"MD2", "md2", NID_md2, 8, &kObjectData[13], 0}, {"MD5", "md5", NID_md5, 8, &kObjectData[21], 0}, {"RC4", "rc4", NID_rc4, 8, &kObjectData[29], 0}, {"rsaEncryption", "rsaEncryption", NID_rsaEncryption, 9, &kObjectData[37], 0}, {"RSA-MD2", "md2WithRSAEncryption", NID_md2WithRSAEncryption, 9, &kObjectData[46], 0}, {"RSA-MD5", "md5WithRSAEncryption", NID_md5WithRSAEncryption, 9, &kObjectData[55], 0}, {"PBE-MD2-DES", "pbeWithMD2AndDES-CBC", NID_pbeWithMD2AndDES_CBC, 9, &kObjectData[64], 0}, {"PBE-MD5-DES", "pbeWithMD5AndDES-CBC", NID_pbeWithMD5AndDES_CBC, 9, &kObjectData[73], 0}, {"X500", "directory services (X.500)", NID_X500, 1, &kObjectData[82], 0}, {"X509", "X509", NID_X509, 2, &kObjectData[83], 0}, {"CN", "commonName", NID_commonName, 3, &kObjectData[85], 0}, {"C", "countryName", NID_countryName, 3, &kObjectData[88], 0}, {"L", "localityName", NID_localityName, 3, &kObjectData[91], 0}, {"ST", "stateOrProvinceName", NID_stateOrProvinceName, 3, &kObjectData[94], 0}, {"O", "organizationName", NID_organizationName, 3, &kObjectData[97], 0}, {"OU", "organizationalUnitName", NID_organizationalUnitName, 3, &kObjectData[100], 0}, {"RSA", "rsa", NID_rsa, 4, &kObjectData[103], 0}, {"pkcs7", "pkcs7", NID_pkcs7, 8, &kObjectData[107], 0}, {"pkcs7-data", "pkcs7-data", NID_pkcs7_data, 9, &kObjectData[115], 0}, {"pkcs7-signedData", "pkcs7-signedData", NID_pkcs7_signed, 9, &kObjectData[124], 0}, {"pkcs7-envelopedData", "pkcs7-envelopedData", NID_pkcs7_enveloped, 9, &kObjectData[133], 0}, {"pkcs7-signedAndEnvelopedData", "pkcs7-signedAndEnvelopedData", NID_pkcs7_signedAndEnveloped, 9, &kObjectData[142], 0}, {"pkcs7-digestData", "pkcs7-digestData", NID_pkcs7_digest, 9, &kObjectData[151], 0}, {"pkcs7-encryptedData", "pkcs7-encryptedData", NID_pkcs7_encrypted, 9, &kObjectData[160], 0}, {"pkcs3", "pkcs3", NID_pkcs3, 8, &kObjectData[169], 0}, {"dhKeyAgreement", "dhKeyAgreement", NID_dhKeyAgreement, 9, &kObjectData[177], 0}, {"DES-ECB", "des-ecb", NID_des_ecb, 5, &kObjectData[186], 0}, {"DES-CFB", "des-cfb", NID_des_cfb64, 5, &kObjectData[191], 0}, {"DES-CBC", "des-cbc", NID_des_cbc, 5, &kObjectData[196], 0}, {"DES-EDE", "des-ede", NID_des_ede_ecb, 5, &kObjectData[201], 0}, {"DES-EDE3", "des-ede3", NID_des_ede3_ecb, 0, NULL, 0}, {"IDEA-CBC", "idea-cbc", NID_idea_cbc, 11, &kObjectData[206], 0}, {"IDEA-CFB", "idea-cfb", NID_idea_cfb64, 0, NULL, 0}, {"IDEA-ECB", "idea-ecb", NID_idea_ecb, 0, NULL, 0}, {"RC2-CBC", "rc2-cbc", NID_rc2_cbc, 8, &kObjectData[217], 0}, {"RC2-ECB", "rc2-ecb", NID_rc2_ecb, 0, NULL, 0}, {"RC2-CFB", "rc2-cfb", NID_rc2_cfb64, 0, NULL, 0}, {"RC2-OFB", "rc2-ofb", NID_rc2_ofb64, 0, NULL, 0}, {"SHA", "sha", NID_sha, 5, &kObjectData[225], 0}, {"RSA-SHA", "shaWithRSAEncryption", NID_shaWithRSAEncryption, 5, &kObjectData[230], 0}, {"DES-EDE-CBC", "des-ede-cbc", NID_des_ede_cbc, 0, NULL, 0}, {"DES-EDE3-CBC", "des-ede3-cbc", NID_des_ede3_cbc, 8, &kObjectData[235], 0}, {"DES-OFB", "des-ofb", NID_des_ofb64, 5, &kObjectData[243], 0}, {"IDEA-OFB", "idea-ofb", NID_idea_ofb64, 0, NULL, 0}, {"pkcs9", "pkcs9", NID_pkcs9, 8, &kObjectData[248], 0}, {"emailAddress", "emailAddress", NID_pkcs9_emailAddress, 9, &kObjectData[256], 0}, {"unstructuredName", "unstructuredName", NID_pkcs9_unstructuredName, 9, &kObjectData[265], 0}, {"contentType", "contentType", NID_pkcs9_contentType, 9, &kObjectData[274], 0}, {"messageDigest", "messageDigest", NID_pkcs9_messageDigest, 9, &kObjectData[283], 0}, {"signingTime", "signingTime", NID_pkcs9_signingTime, 9, &kObjectData[292], 0}, {"countersignature", "countersignature", NID_pkcs9_countersignature, 9, &kObjectData[301], 0}, {"challengePassword", "challengePassword", NID_pkcs9_challengePassword, 9, &kObjectData[310], 0}, {"unstructuredAddress", "unstructuredAddress", NID_pkcs9_unstructuredAddress, 9, &kObjectData[319], 0}, {"extendedCertificateAttributes", "extendedCertificateAttributes", NID_pkcs9_extCertAttributes, 9, &kObjectData[328], 0}, {"Netscape", "Netscape Communications Corp.", NID_netscape, 7, &kObjectData[337], 0}, {"nsCertExt", "Netscape Certificate Extension", NID_netscape_cert_extension, 8, &kObjectData[344], 0}, {"nsDataType", "Netscape Data Type", NID_netscape_data_type, 8, &kObjectData[352], 0}, {"DES-EDE-CFB", "des-ede-cfb", NID_des_ede_cfb64, 0, NULL, 0}, {"DES-EDE3-CFB", "des-ede3-cfb", NID_des_ede3_cfb64, 0, NULL, 0}, {"DES-EDE-OFB", "des-ede-ofb", NID_des_ede_ofb64, 0, NULL, 0}, {"DES-EDE3-OFB", "des-ede3-ofb", NID_des_ede3_ofb64, 0, NULL, 0}, {"SHA1", "sha1", NID_sha1, 5, &kObjectData[360], 0}, {"RSA-SHA1", "sha1WithRSAEncryption", NID_sha1WithRSAEncryption, 9, &kObjectData[365], 0}, {"DSA-SHA", "dsaWithSHA", NID_dsaWithSHA, 5, &kObjectData[374], 0}, {"DSA-old", "dsaEncryption-old", NID_dsa_2, 5, &kObjectData[379], 0}, {"PBE-SHA1-RC2-64", "pbeWithSHA1AndRC2-CBC", NID_pbeWithSHA1AndRC2_CBC, 9, &kObjectData[384], 0}, {"PBKDF2", "PBKDF2", NID_id_pbkdf2, 9, &kObjectData[393], 0}, {"DSA-SHA1-old", "dsaWithSHA1-old", NID_dsaWithSHA1_2, 5, &kObjectData[402], 0}, {"nsCertType", "Netscape Cert Type", NID_netscape_cert_type, 9, &kObjectData[407], 0}, {"nsBaseUrl", "Netscape Base Url", NID_netscape_base_url, 9, &kObjectData[416], 0}, {"nsRevocationUrl", "Netscape Revocation Url", NID_netscape_revocation_url, 9, &kObjectData[425], 0}, {"nsCaRevocationUrl", "Netscape CA Revocation Url", NID_netscape_ca_revocation_url, 9, &kObjectData[434], 0}, {"nsRenewalUrl", "Netscape Renewal Url", NID_netscape_renewal_url, 9, &kObjectData[443], 0}, {"nsCaPolicyUrl", "Netscape CA Policy Url", NID_netscape_ca_policy_url, 9, &kObjectData[452], 0}, {"nsSslServerName", "Netscape SSL Server Name", NID_netscape_ssl_server_name, 9, &kObjectData[461], 0}, {"nsComment", "Netscape Comment", NID_netscape_comment, 9, &kObjectData[470], 0}, {"nsCertSequence", "Netscape Certificate Sequence", NID_netscape_cert_sequence, 9, &kObjectData[479], 0}, {"DESX-CBC", "desx-cbc", NID_desx_cbc, 0, NULL, 0}, {"id-ce", "id-ce", NID_id_ce, 2, &kObjectData[488], 0}, {"subjectKeyIdentifier", "X509v3 Subject Key Identifier", NID_subject_key_identifier, 3, &kObjectData[490], 0}, {"keyUsage", "X509v3 Key Usage", NID_key_usage, 3, &kObjectData[493], 0}, {"privateKeyUsagePeriod", "X509v3 Private Key Usage Period", NID_private_key_usage_period, 3, &kObjectData[496], 0}, {"subjectAltName", "X509v3 Subject Alternative Name", NID_subject_alt_name, 3, &kObjectData[499], 0}, {"issuerAltName", "X509v3 Issuer Alternative Name", NID_issuer_alt_name, 3, &kObjectData[502], 0}, {"basicConstraints", "X509v3 Basic Constraints", NID_basic_constraints, 3, &kObjectData[505], 0}, {"crlNumber", "X509v3 CRL Number", NID_crl_number, 3, &kObjectData[508], 0}, {"certificatePolicies", "X509v3 Certificate Policies", NID_certificate_policies, 3, &kObjectData[511], 0}, {"authorityKeyIdentifier", "X509v3 Authority Key Identifier", NID_authority_key_identifier, 3, &kObjectData[514], 0}, {"BF-CBC", "bf-cbc", NID_bf_cbc, 9, &kObjectData[517], 0}, {"BF-ECB", "bf-ecb", NID_bf_ecb, 0, NULL, 0}, {"BF-CFB", "bf-cfb", NID_bf_cfb64, 0, NULL, 0}, {"BF-OFB", "bf-ofb", NID_bf_ofb64, 0, NULL, 0}, {"MDC2", "mdc2", NID_mdc2, 4, &kObjectData[526], 0}, {"RSA-MDC2", "mdc2WithRSA", NID_mdc2WithRSA, 4, &kObjectData[530], 0}, {"RC4-40", "rc4-40", NID_rc4_40, 0, NULL, 0}, {"RC2-40-CBC", "rc2-40-cbc", NID_rc2_40_cbc, 0, NULL, 0}, {"GN", "givenName", NID_givenName, 3, &kObjectData[534], 0}, {"SN", "surname", NID_surname, 3, &kObjectData[537], 0}, {"initials", "initials", NID_initials, 3, &kObjectData[540], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"crlDistributionPoints", "X509v3 CRL Distribution Points", NID_crl_distribution_points, 3, &kObjectData[543], 0}, {"RSA-NP-MD5", "md5WithRSA", NID_md5WithRSA, 5, &kObjectData[546], 0}, {"serialNumber", "serialNumber", NID_serialNumber, 3, &kObjectData[551], 0}, {"title", "title", NID_title, 3, &kObjectData[554], 0}, {"description", "description", NID_description, 3, &kObjectData[557], 0}, {"CAST5-CBC", "cast5-cbc", NID_cast5_cbc, 9, &kObjectData[560], 0}, {"CAST5-ECB", "cast5-ecb", NID_cast5_ecb, 0, NULL, 0}, {"CAST5-CFB", "cast5-cfb", NID_cast5_cfb64, 0, NULL, 0}, {"CAST5-OFB", "cast5-ofb", NID_cast5_ofb64, 0, NULL, 0}, {"pbeWithMD5AndCast5CBC", "pbeWithMD5AndCast5CBC", NID_pbeWithMD5AndCast5_CBC, 9, &kObjectData[569], 0}, {"DSA-SHA1", "dsaWithSHA1", NID_dsaWithSHA1, 7, &kObjectData[578], 0}, {"MD5-SHA1", "md5-sha1", NID_md5_sha1, 0, NULL, 0}, {"RSA-SHA1-2", "sha1WithRSA", NID_sha1WithRSA, 5, &kObjectData[585], 0}, {"DSA", "dsaEncryption", NID_dsa, 7, &kObjectData[590], 0}, {"RIPEMD160", "ripemd160", NID_ripemd160, 5, &kObjectData[597], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"RSA-RIPEMD160", "ripemd160WithRSA", NID_ripemd160WithRSA, 6, &kObjectData[602], 0}, {"RC5-CBC", "rc5-cbc", NID_rc5_cbc, 8, &kObjectData[608], 0}, {"RC5-ECB", "rc5-ecb", NID_rc5_ecb, 0, NULL, 0}, {"RC5-CFB", "rc5-cfb", NID_rc5_cfb64, 0, NULL, 0}, {"RC5-OFB", "rc5-ofb", NID_rc5_ofb64, 0, NULL, 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"ZLIB", "zlib compression", NID_zlib_compression, 11, &kObjectData[616], 0}, {"extendedKeyUsage", "X509v3 Extended Key Usage", NID_ext_key_usage, 3, &kObjectData[627], 0}, {"PKIX", "PKIX", NID_id_pkix, 6, &kObjectData[630], 0}, {"id-kp", "id-kp", NID_id_kp, 7, &kObjectData[636], 0}, {"serverAuth", "TLS Web Server Authentication", NID_server_auth, 8, &kObjectData[643], 0}, {"clientAuth", "TLS Web Client Authentication", NID_client_auth, 8, &kObjectData[651], 0}, {"codeSigning", "Code Signing", NID_code_sign, 8, &kObjectData[659], 0}, {"emailProtection", "E-mail Protection", NID_email_protect, 8, &kObjectData[667], 0}, {"timeStamping", "Time Stamping", NID_time_stamp, 8, &kObjectData[675], 0}, {"msCodeInd", "Microsoft Individual Code Signing", NID_ms_code_ind, 10, &kObjectData[683], 0}, {"msCodeCom", "Microsoft Commercial Code Signing", NID_ms_code_com, 10, &kObjectData[693], 0}, {"msCTLSign", "Microsoft Trust List Signing", NID_ms_ctl_sign, 10, &kObjectData[703], 0}, {"msSGC", "Microsoft Server Gated Crypto", NID_ms_sgc, 10, &kObjectData[713], 0}, {"msEFS", "Microsoft Encrypted File System", NID_ms_efs, 10, &kObjectData[723], 0}, {"nsSGC", "Netscape Server Gated Crypto", NID_ns_sgc, 9, &kObjectData[733], 0}, {"deltaCRL", "X509v3 Delta CRL Indicator", NID_delta_crl, 3, &kObjectData[742], 0}, {"CRLReason", "X509v3 CRL Reason Code", NID_crl_reason, 3, &kObjectData[745], 0}, {"invalidityDate", "Invalidity Date", NID_invalidity_date, 3, &kObjectData[748], 0}, {"SXNetID", "Strong Extranet ID", NID_sxnet, 5, &kObjectData[751], 0}, {"PBE-SHA1-RC4-128", "pbeWithSHA1And128BitRC4", NID_pbe_WithSHA1And128BitRC4, 10, &kObjectData[756], 0}, {"PBE-SHA1-RC4-40", "pbeWithSHA1And40BitRC4", NID_pbe_WithSHA1And40BitRC4, 10, &kObjectData[766], 0}, {"PBE-SHA1-3DES", "pbeWithSHA1And3-KeyTripleDES-CBC", NID_pbe_WithSHA1And3_Key_TripleDES_CBC, 10, &kObjectData[776], 0}, {"PBE-SHA1-2DES", "pbeWithSHA1And2-KeyTripleDES-CBC", NID_pbe_WithSHA1And2_Key_TripleDES_CBC, 10, &kObjectData[786], 0}, {"PBE-SHA1-RC2-128", "pbeWithSHA1And128BitRC2-CBC", NID_pbe_WithSHA1And128BitRC2_CBC, 10, &kObjectData[796], 0}, {"PBE-SHA1-RC2-40", "pbeWithSHA1And40BitRC2-CBC", NID_pbe_WithSHA1And40BitRC2_CBC, 10, &kObjectData[806], 0}, {"keyBag", "keyBag", NID_keyBag, 11, &kObjectData[816], 0}, {"pkcs8ShroudedKeyBag", "pkcs8ShroudedKeyBag", NID_pkcs8ShroudedKeyBag, 11, &kObjectData[827], 0}, {"certBag", "certBag", NID_certBag, 11, &kObjectData[838], 0}, {"crlBag", "crlBag", NID_crlBag, 11, &kObjectData[849], 0}, {"secretBag", "secretBag", NID_secretBag, 11, &kObjectData[860], 0}, {"safeContentsBag", "safeContentsBag", NID_safeContentsBag, 11, &kObjectData[871], 0}, {"friendlyName", "friendlyName", NID_friendlyName, 9, &kObjectData[882], 0}, {"localKeyID", "localKeyID", NID_localKeyID, 9, &kObjectData[891], 0}, {"x509Certificate", "x509Certificate", NID_x509Certificate, 10, &kObjectData[900], 0}, {"sdsiCertificate", "sdsiCertificate", NID_sdsiCertificate, 10, &kObjectData[910], 0}, {"x509Crl", "x509Crl", NID_x509Crl, 10, &kObjectData[920], 0}, {"PBES2", "PBES2", NID_pbes2, 9, &kObjectData[930], 0}, {"PBMAC1", "PBMAC1", NID_pbmac1, 9, &kObjectData[939], 0}, {"hmacWithSHA1", "hmacWithSHA1", NID_hmacWithSHA1, 8, &kObjectData[948], 0}, {"id-qt-cps", "Policy Qualifier CPS", NID_id_qt_cps, 8, &kObjectData[956], 0}, {"id-qt-unotice", "Policy Qualifier User Notice", NID_id_qt_unotice, 8, &kObjectData[964], 0}, {"RC2-64-CBC", "rc2-64-cbc", NID_rc2_64_cbc, 0, NULL, 0}, {"SMIME-CAPS", "S/MIME Capabilities", NID_SMIMECapabilities, 9, &kObjectData[972], 0}, {"PBE-MD2-RC2-64", "pbeWithMD2AndRC2-CBC", NID_pbeWithMD2AndRC2_CBC, 9, &kObjectData[981], 0}, {"PBE-MD5-RC2-64", "pbeWithMD5AndRC2-CBC", NID_pbeWithMD5AndRC2_CBC, 9, &kObjectData[990], 0}, {"PBE-SHA1-DES", "pbeWithSHA1AndDES-CBC", NID_pbeWithSHA1AndDES_CBC, 9, &kObjectData[999], 0}, {"msExtReq", "Microsoft Extension Request", NID_ms_ext_req, 10, &kObjectData[1008], 0}, {"extReq", "Extension Request", NID_ext_req, 9, &kObjectData[1018], 0}, {"name", "name", NID_name, 3, &kObjectData[1027], 0}, {"dnQualifier", "dnQualifier", NID_dnQualifier, 3, &kObjectData[1030], 0}, {"id-pe", "id-pe", NID_id_pe, 7, &kObjectData[1033], 0}, {"id-ad", "id-ad", NID_id_ad, 7, &kObjectData[1040], 0}, {"authorityInfoAccess", "Authority Information Access", NID_info_access, 8, &kObjectData[1047], 0}, {"OCSP", "OCSP", NID_ad_OCSP, 8, &kObjectData[1055], 0}, {"caIssuers", "CA Issuers", NID_ad_ca_issuers, 8, &kObjectData[1063], 0}, {"OCSPSigning", "OCSP Signing", NID_OCSP_sign, 8, &kObjectData[1071], 0}, {"ISO", "iso", NID_iso, 0, NULL, 0}, {"member-body", "ISO Member Body", NID_member_body, 1, &kObjectData[1079], 0}, {"ISO-US", "ISO US Member Body", NID_ISO_US, 3, &kObjectData[1080], 0}, {"X9-57", "X9.57", NID_X9_57, 5, &kObjectData[1083], 0}, {"X9cm", "X9.57 CM ?", NID_X9cm, 6, &kObjectData[1088], 0}, {"pkcs1", "pkcs1", NID_pkcs1, 8, &kObjectData[1094], 0}, {"pkcs5", "pkcs5", NID_pkcs5, 8, &kObjectData[1102], 0}, {"SMIME", "S/MIME", NID_SMIME, 9, &kObjectData[1110], 0}, {"id-smime-mod", "id-smime-mod", NID_id_smime_mod, 10, &kObjectData[1119], 0}, {"id-smime-ct", "id-smime-ct", NID_id_smime_ct, 10, &kObjectData[1129], 0}, {"id-smime-aa", "id-smime-aa", NID_id_smime_aa, 10, &kObjectData[1139], 0}, {"id-smime-alg", "id-smime-alg", NID_id_smime_alg, 10, &kObjectData[1149], 0}, {"id-smime-cd", "id-smime-cd", NID_id_smime_cd, 10, &kObjectData[1159], 0}, {"id-smime-spq", "id-smime-spq", NID_id_smime_spq, 10, &kObjectData[1169], 0}, {"id-smime-cti", "id-smime-cti", NID_id_smime_cti, 10, &kObjectData[1179], 0}, {"id-smime-mod-cms", "id-smime-mod-cms", NID_id_smime_mod_cms, 11, &kObjectData[1189], 0}, {"id-smime-mod-ess", "id-smime-mod-ess", NID_id_smime_mod_ess, 11, &kObjectData[1200], 0}, {"id-smime-mod-oid", "id-smime-mod-oid", NID_id_smime_mod_oid, 11, &kObjectData[1211], 0}, {"id-smime-mod-msg-v3", "id-smime-mod-msg-v3", NID_id_smime_mod_msg_v3, 11, &kObjectData[1222], 0}, {"id-smime-mod-ets-eSignature-88", "id-smime-mod-ets-eSignature-88", NID_id_smime_mod_ets_eSignature_88, 11, &kObjectData[1233], 0}, {"id-smime-mod-ets-eSignature-97", "id-smime-mod-ets-eSignature-97", NID_id_smime_mod_ets_eSignature_97, 11, &kObjectData[1244], 0}, {"id-smime-mod-ets-eSigPolicy-88", "id-smime-mod-ets-eSigPolicy-88", NID_id_smime_mod_ets_eSigPolicy_88, 11, &kObjectData[1255], 0}, {"id-smime-mod-ets-eSigPolicy-97", "id-smime-mod-ets-eSigPolicy-97", NID_id_smime_mod_ets_eSigPolicy_97, 11, &kObjectData[1266], 0}, {"id-smime-ct-receipt", "id-smime-ct-receipt", NID_id_smime_ct_receipt, 11, &kObjectData[1277], 0}, {"id-smime-ct-authData", "id-smime-ct-authData", NID_id_smime_ct_authData, 11, &kObjectData[1288], 0}, {"id-smime-ct-publishCert", "id-smime-ct-publishCert", NID_id_smime_ct_publishCert, 11, &kObjectData[1299], 0}, {"id-smime-ct-TSTInfo", "id-smime-ct-TSTInfo", NID_id_smime_ct_TSTInfo, 11, &kObjectData[1310], 0}, {"id-smime-ct-TDTInfo", "id-smime-ct-TDTInfo", NID_id_smime_ct_TDTInfo, 11, &kObjectData[1321], 0}, {"id-smime-ct-contentInfo", "id-smime-ct-contentInfo", NID_id_smime_ct_contentInfo, 11, &kObjectData[1332], 0}, {"id-smime-ct-DVCSRequestData", "id-smime-ct-DVCSRequestData", NID_id_smime_ct_DVCSRequestData, 11, &kObjectData[1343], 0}, {"id-smime-ct-DVCSResponseData", "id-smime-ct-DVCSResponseData", NID_id_smime_ct_DVCSResponseData, 11, &kObjectData[1354], 0}, {"id-smime-aa-receiptRequest", "id-smime-aa-receiptRequest", NID_id_smime_aa_receiptRequest, 11, &kObjectData[1365], 0}, {"id-smime-aa-securityLabel", "id-smime-aa-securityLabel", NID_id_smime_aa_securityLabel, 11, &kObjectData[1376], 0}, {"id-smime-aa-mlExpandHistory", "id-smime-aa-mlExpandHistory", NID_id_smime_aa_mlExpandHistory, 11, &kObjectData[1387], 0}, {"id-smime-aa-contentHint", "id-smime-aa-contentHint", NID_id_smime_aa_contentHint, 11, &kObjectData[1398], 0}, {"id-smime-aa-msgSigDigest", "id-smime-aa-msgSigDigest", NID_id_smime_aa_msgSigDigest, 11, &kObjectData[1409], 0}, {"id-smime-aa-encapContentType", "id-smime-aa-encapContentType", NID_id_smime_aa_encapContentType, 11, &kObjectData[1420], 0}, {"id-smime-aa-contentIdentifier", "id-smime-aa-contentIdentifier", NID_id_smime_aa_contentIdentifier, 11, &kObjectData[1431], 0}, {"id-smime-aa-macValue", "id-smime-aa-macValue", NID_id_smime_aa_macValue, 11, &kObjectData[1442], 0}, {"id-smime-aa-equivalentLabels", "id-smime-aa-equivalentLabels", NID_id_smime_aa_equivalentLabels, 11, &kObjectData[1453], 0}, {"id-smime-aa-contentReference", "id-smime-aa-contentReference", NID_id_smime_aa_contentReference, 11, &kObjectData[1464], 0}, {"id-smime-aa-encrypKeyPref", "id-smime-aa-encrypKeyPref", NID_id_smime_aa_encrypKeyPref, 11, &kObjectData[1475], 0}, {"id-smime-aa-signingCertificate", "id-smime-aa-signingCertificate", NID_id_smime_aa_signingCertificate, 11, &kObjectData[1486], 0}, {"id-smime-aa-smimeEncryptCerts", "id-smime-aa-smimeEncryptCerts", NID_id_smime_aa_smimeEncryptCerts, 11, &kObjectData[1497], 0}, {"id-smime-aa-timeStampToken", "id-smime-aa-timeStampToken", NID_id_smime_aa_timeStampToken, 11, &kObjectData[1508], 0}, {"id-smime-aa-ets-sigPolicyId", "id-smime-aa-ets-sigPolicyId", NID_id_smime_aa_ets_sigPolicyId, 11, &kObjectData[1519], 0}, {"id-smime-aa-ets-commitmentType", "id-smime-aa-ets-commitmentType", NID_id_smime_aa_ets_commitmentType, 11, &kObjectData[1530], 0}, {"id-smime-aa-ets-signerLocation", "id-smime-aa-ets-signerLocation", NID_id_smime_aa_ets_signerLocation, 11, &kObjectData[1541], 0}, {"id-smime-aa-ets-signerAttr", "id-smime-aa-ets-signerAttr", NID_id_smime_aa_ets_signerAttr, 11, &kObjectData[1552], 0}, {"id-smime-aa-ets-otherSigCert", "id-smime-aa-ets-otherSigCert", NID_id_smime_aa_ets_otherSigCert, 11, &kObjectData[1563], 0}, {"id-smime-aa-ets-contentTimestamp", "id-smime-aa-ets-contentTimestamp", NID_id_smime_aa_ets_contentTimestamp, 11, &kObjectData[1574], 0}, {"id-smime-aa-ets-CertificateRefs", "id-smime-aa-ets-CertificateRefs", NID_id_smime_aa_ets_CertificateRefs, 11, &kObjectData[1585], 0}, {"id-smime-aa-ets-RevocationRefs", "id-smime-aa-ets-RevocationRefs", NID_id_smime_aa_ets_RevocationRefs, 11, &kObjectData[1596], 0}, {"id-smime-aa-ets-certValues", "id-smime-aa-ets-certValues", NID_id_smime_aa_ets_certValues, 11, &kObjectData[1607], 0}, {"id-smime-aa-ets-revocationValues", "id-smime-aa-ets-revocationValues", NID_id_smime_aa_ets_revocationValues, 11, &kObjectData[1618], 0}, {"id-smime-aa-ets-escTimeStamp", "id-smime-aa-ets-escTimeStamp", NID_id_smime_aa_ets_escTimeStamp, 11, &kObjectData[1629], 0}, {"id-smime-aa-ets-certCRLTimestamp", "id-smime-aa-ets-certCRLTimestamp", NID_id_smime_aa_ets_certCRLTimestamp, 11, &kObjectData[1640], 0}, {"id-smime-aa-ets-archiveTimeStamp", "id-smime-aa-ets-archiveTimeStamp", NID_id_smime_aa_ets_archiveTimeStamp, 11, &kObjectData[1651], 0}, {"id-smime-aa-signatureType", "id-smime-aa-signatureType", NID_id_smime_aa_signatureType, 11, &kObjectData[1662], 0}, {"id-smime-aa-dvcs-dvc", "id-smime-aa-dvcs-dvc", NID_id_smime_aa_dvcs_dvc, 11, &kObjectData[1673], 0}, {"id-smime-alg-ESDHwith3DES", "id-smime-alg-ESDHwith3DES", NID_id_smime_alg_ESDHwith3DES, 11, &kObjectData[1684], 0}, {"id-smime-alg-ESDHwithRC2", "id-smime-alg-ESDHwithRC2", NID_id_smime_alg_ESDHwithRC2, 11, &kObjectData[1695], 0}, {"id-smime-alg-3DESwrap", "id-smime-alg-3DESwrap", NID_id_smime_alg_3DESwrap, 11, &kObjectData[1706], 0}, {"id-smime-alg-RC2wrap", "id-smime-alg-RC2wrap", NID_id_smime_alg_RC2wrap, 11, &kObjectData[1717], 0}, {"id-smime-alg-ESDH", "id-smime-alg-ESDH", NID_id_smime_alg_ESDH, 11, &kObjectData[1728], 0}, {"id-smime-alg-CMS3DESwrap", "id-smime-alg-CMS3DESwrap", NID_id_smime_alg_CMS3DESwrap, 11, &kObjectData[1739], 0}, {"id-smime-alg-CMSRC2wrap", "id-smime-alg-CMSRC2wrap", NID_id_smime_alg_CMSRC2wrap, 11, &kObjectData[1750], 0}, {"id-smime-cd-ldap", "id-smime-cd-ldap", NID_id_smime_cd_ldap, 11, &kObjectData[1761], 0}, {"id-smime-spq-ets-sqt-uri", "id-smime-spq-ets-sqt-uri", NID_id_smime_spq_ets_sqt_uri, 11, &kObjectData[1772], 0}, {"id-smime-spq-ets-sqt-unotice", "id-smime-spq-ets-sqt-unotice", NID_id_smime_spq_ets_sqt_unotice, 11, &kObjectData[1783], 0}, {"id-smime-cti-ets-proofOfOrigin", "id-smime-cti-ets-proofOfOrigin", NID_id_smime_cti_ets_proofOfOrigin, 11, &kObjectData[1794], 0}, {"id-smime-cti-ets-proofOfReceipt", "id-smime-cti-ets-proofOfReceipt", NID_id_smime_cti_ets_proofOfReceipt, 11, &kObjectData[1805], 0}, {"id-smime-cti-ets-proofOfDelivery", "id-smime-cti-ets-proofOfDelivery", NID_id_smime_cti_ets_proofOfDelivery, 11, &kObjectData[1816], 0}, {"id-smime-cti-ets-proofOfSender", "id-smime-cti-ets-proofOfSender", NID_id_smime_cti_ets_proofOfSender, 11, &kObjectData[1827], 0}, {"id-smime-cti-ets-proofOfApproval", "id-smime-cti-ets-proofOfApproval", NID_id_smime_cti_ets_proofOfApproval, 11, &kObjectData[1838], 0}, {"id-smime-cti-ets-proofOfCreation", "id-smime-cti-ets-proofOfCreation", NID_id_smime_cti_ets_proofOfCreation, 11, &kObjectData[1849], 0}, {"MD4", "md4", NID_md4, 8, &kObjectData[1860], 0}, {"id-pkix-mod", "id-pkix-mod", NID_id_pkix_mod, 7, &kObjectData[1868], 0}, {"id-qt", "id-qt", NID_id_qt, 7, &kObjectData[1875], 0}, {"id-it", "id-it", NID_id_it, 7, &kObjectData[1882], 0}, {"id-pkip", "id-pkip", NID_id_pkip, 7, &kObjectData[1889], 0}, {"id-alg", "id-alg", NID_id_alg, 7, &kObjectData[1896], 0}, {"id-cmc", "id-cmc", NID_id_cmc, 7, &kObjectData[1903], 0}, {"id-on", "id-on", NID_id_on, 7, &kObjectData[1910], 0}, {"id-pda", "id-pda", NID_id_pda, 7, &kObjectData[1917], 0}, {"id-aca", "id-aca", NID_id_aca, 7, &kObjectData[1924], 0}, {"id-qcs", "id-qcs", NID_id_qcs, 7, &kObjectData[1931], 0}, {"id-cct", "id-cct", NID_id_cct, 7, &kObjectData[1938], 0}, {"id-pkix1-explicit-88", "id-pkix1-explicit-88", NID_id_pkix1_explicit_88, 8, &kObjectData[1945], 0}, {"id-pkix1-implicit-88", "id-pkix1-implicit-88", NID_id_pkix1_implicit_88, 8, &kObjectData[1953], 0}, {"id-pkix1-explicit-93", "id-pkix1-explicit-93", NID_id_pkix1_explicit_93, 8, &kObjectData[1961], 0}, {"id-pkix1-implicit-93", "id-pkix1-implicit-93", NID_id_pkix1_implicit_93, 8, &kObjectData[1969], 0}, {"id-mod-crmf", "id-mod-crmf", NID_id_mod_crmf, 8, &kObjectData[1977], 0}, {"id-mod-cmc", "id-mod-cmc", NID_id_mod_cmc, 8, &kObjectData[1985], 0}, {"id-mod-kea-profile-88", "id-mod-kea-profile-88", NID_id_mod_kea_profile_88, 8, &kObjectData[1993], 0}, {"id-mod-kea-profile-93", "id-mod-kea-profile-93", NID_id_mod_kea_profile_93, 8, &kObjectData[2001], 0}, {"id-mod-cmp", "id-mod-cmp", NID_id_mod_cmp, 8, &kObjectData[2009], 0}, {"id-mod-qualified-cert-88", "id-mod-qualified-cert-88", NID_id_mod_qualified_cert_88, 8, &kObjectData[2017], 0}, {"id-mod-qualified-cert-93", "id-mod-qualified-cert-93", NID_id_mod_qualified_cert_93, 8, &kObjectData[2025], 0}, {"id-mod-attribute-cert", "id-mod-attribute-cert", NID_id_mod_attribute_cert, 8, &kObjectData[2033], 0}, {"id-mod-timestamp-protocol", "id-mod-timestamp-protocol", NID_id_mod_timestamp_protocol, 8, &kObjectData[2041], 0}, {"id-mod-ocsp", "id-mod-ocsp", NID_id_mod_ocsp, 8, &kObjectData[2049], 0}, {"id-mod-dvcs", "id-mod-dvcs", NID_id_mod_dvcs, 8, &kObjectData[2057], 0}, {"id-mod-cmp2000", "id-mod-cmp2000", NID_id_mod_cmp2000, 8, &kObjectData[2065], 0}, {"biometricInfo", "Biometric Info", NID_biometricInfo, 8, &kObjectData[2073], 0}, {"qcStatements", "qcStatements", NID_qcStatements, 8, &kObjectData[2081], 0}, {"ac-auditEntity", "ac-auditEntity", NID_ac_auditEntity, 8, &kObjectData[2089], 0}, {"ac-targeting", "ac-targeting", NID_ac_targeting, 8, &kObjectData[2097], 0}, {"aaControls", "aaControls", NID_aaControls, 8, &kObjectData[2105], 0}, {"sbgp-ipAddrBlock", "sbgp-ipAddrBlock", NID_sbgp_ipAddrBlock, 8, &kObjectData[2113], 0}, {"sbgp-autonomousSysNum", "sbgp-autonomousSysNum", NID_sbgp_autonomousSysNum, 8, &kObjectData[2121], 0}, {"sbgp-routerIdentifier", "sbgp-routerIdentifier", NID_sbgp_routerIdentifier, 8, &kObjectData[2129], 0}, {"textNotice", "textNotice", NID_textNotice, 8, &kObjectData[2137], 0}, {"ipsecEndSystem", "IPSec End System", NID_ipsecEndSystem, 8, &kObjectData[2145], 0}, {"ipsecTunnel", "IPSec Tunnel", NID_ipsecTunnel, 8, &kObjectData[2153], 0}, {"ipsecUser", "IPSec User", NID_ipsecUser, 8, &kObjectData[2161], 0}, {"DVCS", "dvcs", NID_dvcs, 8, &kObjectData[2169], 0}, {"id-it-caProtEncCert", "id-it-caProtEncCert", NID_id_it_caProtEncCert, 8, &kObjectData[2177], 0}, {"id-it-signKeyPairTypes", "id-it-signKeyPairTypes", NID_id_it_signKeyPairTypes, 8, &kObjectData[2185], 0}, {"id-it-encKeyPairTypes", "id-it-encKeyPairTypes", NID_id_it_encKeyPairTypes, 8, &kObjectData[2193], 0}, {"id-it-preferredSymmAlg", "id-it-preferredSymmAlg", NID_id_it_preferredSymmAlg, 8, &kObjectData[2201], 0}, {"id-it-caKeyUpdateInfo", "id-it-caKeyUpdateInfo", NID_id_it_caKeyUpdateInfo, 8, &kObjectData[2209], 0}, {"id-it-currentCRL", "id-it-currentCRL", NID_id_it_currentCRL, 8, &kObjectData[2217], 0}, {"id-it-unsupportedOIDs", "id-it-unsupportedOIDs", NID_id_it_unsupportedOIDs, 8, &kObjectData[2225], 0}, {"id-it-subscriptionRequest", "id-it-subscriptionRequest", NID_id_it_subscriptionRequest, 8, &kObjectData[2233], 0}, {"id-it-subscriptionResponse", "id-it-subscriptionResponse", NID_id_it_subscriptionResponse, 8, &kObjectData[2241], 0}, {"id-it-keyPairParamReq", "id-it-keyPairParamReq", NID_id_it_keyPairParamReq, 8, &kObjectData[2249], 0}, {"id-it-keyPairParamRep", "id-it-keyPairParamRep", NID_id_it_keyPairParamRep, 8, &kObjectData[2257], 0}, {"id-it-revPassphrase", "id-it-revPassphrase", NID_id_it_revPassphrase, 8, &kObjectData[2265], 0}, {"id-it-implicitConfirm", "id-it-implicitConfirm", NID_id_it_implicitConfirm, 8, &kObjectData[2273], 0}, {"id-it-confirmWaitTime", "id-it-confirmWaitTime", NID_id_it_confirmWaitTime, 8, &kObjectData[2281], 0}, {"id-it-origPKIMessage", "id-it-origPKIMessage", NID_id_it_origPKIMessage, 8, &kObjectData[2289], 0}, {"id-regCtrl", "id-regCtrl", NID_id_regCtrl, 8, &kObjectData[2297], 0}, {"id-regInfo", "id-regInfo", NID_id_regInfo, 8, &kObjectData[2305], 0}, {"id-regCtrl-regToken", "id-regCtrl-regToken", NID_id_regCtrl_regToken, 9, &kObjectData[2313], 0}, {"id-regCtrl-authenticator", "id-regCtrl-authenticator", NID_id_regCtrl_authenticator, 9, &kObjectData[2322], 0}, {"id-regCtrl-pkiPublicationInfo", "id-regCtrl-pkiPublicationInfo", NID_id_regCtrl_pkiPublicationInfo, 9, &kObjectData[2331], 0}, {"id-regCtrl-pkiArchiveOptions", "id-regCtrl-pkiArchiveOptions", NID_id_regCtrl_pkiArchiveOptions, 9, &kObjectData[2340], 0}, {"id-regCtrl-oldCertID", "id-regCtrl-oldCertID", NID_id_regCtrl_oldCertID, 9, &kObjectData[2349], 0}, {"id-regCtrl-protocolEncrKey", "id-regCtrl-protocolEncrKey", NID_id_regCtrl_protocolEncrKey, 9, &kObjectData[2358], 0}, {"id-regInfo-utf8Pairs", "id-regInfo-utf8Pairs", NID_id_regInfo_utf8Pairs, 9, &kObjectData[2367], 0}, {"id-regInfo-certReq", "id-regInfo-certReq", NID_id_regInfo_certReq, 9, &kObjectData[2376], 0}, {"id-alg-des40", "id-alg-des40", NID_id_alg_des40, 8, &kObjectData[2385], 0}, {"id-alg-noSignature", "id-alg-noSignature", NID_id_alg_noSignature, 8, &kObjectData[2393], 0}, {"id-alg-dh-sig-hmac-sha1", "id-alg-dh-sig-hmac-sha1", NID_id_alg_dh_sig_hmac_sha1, 8, &kObjectData[2401], 0}, {"id-alg-dh-pop", "id-alg-dh-pop", NID_id_alg_dh_pop, 8, &kObjectData[2409], 0}, {"id-cmc-statusInfo", "id-cmc-statusInfo", NID_id_cmc_statusInfo, 8, &kObjectData[2417], 0}, {"id-cmc-identification", "id-cmc-identification", NID_id_cmc_identification, 8, &kObjectData[2425], 0}, {"id-cmc-identityProof", "id-cmc-identityProof", NID_id_cmc_identityProof, 8, &kObjectData[2433], 0}, {"id-cmc-dataReturn", "id-cmc-dataReturn", NID_id_cmc_dataReturn, 8, &kObjectData[2441], 0}, {"id-cmc-transactionId", "id-cmc-transactionId", NID_id_cmc_transactionId, 8, &kObjectData[2449], 0}, {"id-cmc-senderNonce", "id-cmc-senderNonce", NID_id_cmc_senderNonce, 8, &kObjectData[2457], 0}, {"id-cmc-recipientNonce", "id-cmc-recipientNonce", NID_id_cmc_recipientNonce, 8, &kObjectData[2465], 0}, {"id-cmc-addExtensions", "id-cmc-addExtensions", NID_id_cmc_addExtensions, 8, &kObjectData[2473], 0}, {"id-cmc-encryptedPOP", "id-cmc-encryptedPOP", NID_id_cmc_encryptedPOP, 8, &kObjectData[2481], 0}, {"id-cmc-decryptedPOP", "id-cmc-decryptedPOP", NID_id_cmc_decryptedPOP, 8, &kObjectData[2489], 0}, {"id-cmc-lraPOPWitness", "id-cmc-lraPOPWitness", NID_id_cmc_lraPOPWitness, 8, &kObjectData[2497], 0}, {"id-cmc-getCert", "id-cmc-getCert", NID_id_cmc_getCert, 8, &kObjectData[2505], 0}, {"id-cmc-getCRL", "id-cmc-getCRL", NID_id_cmc_getCRL, 8, &kObjectData[2513], 0}, {"id-cmc-revokeRequest", "id-cmc-revokeRequest", NID_id_cmc_revokeRequest, 8, &kObjectData[2521], 0}, {"id-cmc-regInfo", "id-cmc-regInfo", NID_id_cmc_regInfo, 8, &kObjectData[2529], 0}, {"id-cmc-responseInfo", "id-cmc-responseInfo", NID_id_cmc_responseInfo, 8, &kObjectData[2537], 0}, {"id-cmc-queryPending", "id-cmc-queryPending", NID_id_cmc_queryPending, 8, &kObjectData[2545], 0}, {"id-cmc-popLinkRandom", "id-cmc-popLinkRandom", NID_id_cmc_popLinkRandom, 8, &kObjectData[2553], 0}, {"id-cmc-popLinkWitness", "id-cmc-popLinkWitness", NID_id_cmc_popLinkWitness, 8, &kObjectData[2561], 0}, {"id-cmc-confirmCertAcceptance", "id-cmc-confirmCertAcceptance", NID_id_cmc_confirmCertAcceptance, 8, &kObjectData[2569], 0}, {"id-on-personalData", "id-on-personalData", NID_id_on_personalData, 8, &kObjectData[2577], 0}, {"id-pda-dateOfBirth", "id-pda-dateOfBirth", NID_id_pda_dateOfBirth, 8, &kObjectData[2585], 0}, {"id-pda-placeOfBirth", "id-pda-placeOfBirth", NID_id_pda_placeOfBirth, 8, &kObjectData[2593], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"id-pda-gender", "id-pda-gender", NID_id_pda_gender, 8, &kObjectData[2601], 0}, {"id-pda-countryOfCitizenship", "id-pda-countryOfCitizenship", NID_id_pda_countryOfCitizenship, 8, &kObjectData[2609], 0}, {"id-pda-countryOfResidence", "id-pda-countryOfResidence", NID_id_pda_countryOfResidence, 8, &kObjectData[2617], 0}, {"id-aca-authenticationInfo", "id-aca-authenticationInfo", NID_id_aca_authenticationInfo, 8, &kObjectData[2625], 0}, {"id-aca-accessIdentity", "id-aca-accessIdentity", NID_id_aca_accessIdentity, 8, &kObjectData[2633], 0}, {"id-aca-chargingIdentity", "id-aca-chargingIdentity", NID_id_aca_chargingIdentity, 8, &kObjectData[2641], 0}, {"id-aca-group", "id-aca-group", NID_id_aca_group, 8, &kObjectData[2649], 0}, {"id-aca-role", "id-aca-role", NID_id_aca_role, 8, &kObjectData[2657], 0}, {"id-qcs-pkixQCSyntax-v1", "id-qcs-pkixQCSyntax-v1", NID_id_qcs_pkixQCSyntax_v1, 8, &kObjectData[2665], 0}, {"id-cct-crs", "id-cct-crs", NID_id_cct_crs, 8, &kObjectData[2673], 0}, {"id-cct-PKIData", "id-cct-PKIData", NID_id_cct_PKIData, 8, &kObjectData[2681], 0}, {"id-cct-PKIResponse", "id-cct-PKIResponse", NID_id_cct_PKIResponse, 8, &kObjectData[2689], 0}, {"ad_timestamping", "AD Time Stamping", NID_ad_timeStamping, 8, &kObjectData[2697], 0}, {"AD_DVCS", "ad dvcs", NID_ad_dvcs, 8, &kObjectData[2705], 0}, {"basicOCSPResponse", "Basic OCSP Response", NID_id_pkix_OCSP_basic, 9, &kObjectData[2713], 0}, {"Nonce", "OCSP Nonce", NID_id_pkix_OCSP_Nonce, 9, &kObjectData[2722], 0}, {"CrlID", "OCSP CRL ID", NID_id_pkix_OCSP_CrlID, 9, &kObjectData[2731], 0}, {"acceptableResponses", "Acceptable OCSP Responses", NID_id_pkix_OCSP_acceptableResponses, 9, &kObjectData[2740], 0}, {"noCheck", "OCSP No Check", NID_id_pkix_OCSP_noCheck, 9, &kObjectData[2749], 0}, {"archiveCutoff", "OCSP Archive Cutoff", NID_id_pkix_OCSP_archiveCutoff, 9, &kObjectData[2758], 0}, {"serviceLocator", "OCSP Service Locator", NID_id_pkix_OCSP_serviceLocator, 9, &kObjectData[2767], 0}, {"extendedStatus", "Extended OCSP Status", NID_id_pkix_OCSP_extendedStatus, 9, &kObjectData[2776], 0}, {"valid", "valid", NID_id_pkix_OCSP_valid, 9, &kObjectData[2785], 0}, {"path", "path", NID_id_pkix_OCSP_path, 9, &kObjectData[2794], 0}, {"trustRoot", "Trust Root", NID_id_pkix_OCSP_trustRoot, 9, &kObjectData[2803], 0}, {"algorithm", "algorithm", NID_algorithm, 4, &kObjectData[2812], 0}, {"rsaSignature", "rsaSignature", NID_rsaSignature, 5, &kObjectData[2816], 0}, {"X500algorithms", "directory services - algorithms", NID_X500algorithms, 2, &kObjectData[2821], 0}, {"ORG", "org", NID_org, 1, &kObjectData[2823], 0}, {"DOD", "dod", NID_dod, 2, &kObjectData[2824], 0}, {"IANA", "iana", NID_iana, 3, &kObjectData[2826], 0}, {"directory", "Directory", NID_Directory, 4, &kObjectData[2829], 0}, {"mgmt", "Management", NID_Management, 4, &kObjectData[2833], 0}, {"experimental", "Experimental", NID_Experimental, 4, &kObjectData[2837], 0}, {"private", "Private", NID_Private, 4, &kObjectData[2841], 0}, {"security", "Security", NID_Security, 4, &kObjectData[2845], 0}, {"snmpv2", "SNMPv2", NID_SNMPv2, 4, &kObjectData[2849], 0}, {"Mail", "Mail", NID_Mail, 4, &kObjectData[2853], 0}, {"enterprises", "Enterprises", NID_Enterprises, 5, &kObjectData[2857], 0}, {"dcobject", "dcObject", NID_dcObject, 9, &kObjectData[2862], 0}, {"DC", "domainComponent", NID_domainComponent, 10, &kObjectData[2871], 0}, {"domain", "Domain", NID_Domain, 10, &kObjectData[2881], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"selected-attribute-types", "Selected Attribute Types", NID_selected_attribute_types, 3, &kObjectData[2891], 0}, {"clearance", "clearance", NID_clearance, 4, &kObjectData[2894], 0}, {"RSA-MD4", "md4WithRSAEncryption", NID_md4WithRSAEncryption, 9, &kObjectData[2898], 0}, {"ac-proxying", "ac-proxying", NID_ac_proxying, 8, &kObjectData[2907], 0}, {"subjectInfoAccess", "Subject Information Access", NID_sinfo_access, 8, &kObjectData[2915], 0}, {"id-aca-encAttrs", "id-aca-encAttrs", NID_id_aca_encAttrs, 8, &kObjectData[2923], 0}, {"role", "role", NID_role, 3, &kObjectData[2931], 0}, {"policyConstraints", "X509v3 Policy Constraints", NID_policy_constraints, 3, &kObjectData[2934], 0}, {"targetInformation", "X509v3 AC Targeting", NID_target_information, 3, &kObjectData[2937], 0}, {"noRevAvail", "X509v3 No Revocation Available", NID_no_rev_avail, 3, &kObjectData[2940], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"ansi-X9-62", "ANSI X9.62", NID_ansi_X9_62, 5, &kObjectData[2943], 0}, {"prime-field", "prime-field", NID_X9_62_prime_field, 7, &kObjectData[2948], 0}, {"characteristic-two-field", "characteristic-two-field", NID_X9_62_characteristic_two_field, 7, &kObjectData[2955], 0}, {"id-ecPublicKey", "id-ecPublicKey", NID_X9_62_id_ecPublicKey, 7, &kObjectData[2962], 0}, {"prime192v1", "prime192v1", NID_X9_62_prime192v1, 8, &kObjectData[2969], 0}, {"prime192v2", "prime192v2", NID_X9_62_prime192v2, 8, &kObjectData[2977], 0}, {"prime192v3", "prime192v3", NID_X9_62_prime192v3, 8, &kObjectData[2985], 0}, {"prime239v1", "prime239v1", NID_X9_62_prime239v1, 8, &kObjectData[2993], 0}, {"prime239v2", "prime239v2", NID_X9_62_prime239v2, 8, &kObjectData[3001], 0}, {"prime239v3", "prime239v3", NID_X9_62_prime239v3, 8, &kObjectData[3009], 0}, {"prime256v1", "prime256v1", NID_X9_62_prime256v1, 8, &kObjectData[3017], 0}, {"ecdsa-with-SHA1", "ecdsa-with-SHA1", NID_ecdsa_with_SHA1, 7, &kObjectData[3025], 0}, {"CSPName", "Microsoft CSP Name", NID_ms_csp_name, 9, &kObjectData[3032], 0}, {"AES-128-ECB", "aes-128-ecb", NID_aes_128_ecb, 9, &kObjectData[3041], 0}, {"AES-128-CBC", "aes-128-cbc", NID_aes_128_cbc, 9, &kObjectData[3050], 0}, {"AES-128-OFB", "aes-128-ofb", NID_aes_128_ofb128, 9, &kObjectData[3059], 0}, {"AES-128-CFB", "aes-128-cfb", NID_aes_128_cfb128, 9, &kObjectData[3068], 0}, {"AES-192-ECB", "aes-192-ecb", NID_aes_192_ecb, 9, &kObjectData[3077], 0}, {"AES-192-CBC", "aes-192-cbc", NID_aes_192_cbc, 9, &kObjectData[3086], 0}, {"AES-192-OFB", "aes-192-ofb", NID_aes_192_ofb128, 9, &kObjectData[3095], 0}, {"AES-192-CFB", "aes-192-cfb", NID_aes_192_cfb128, 9, &kObjectData[3104], 0}, {"AES-256-ECB", "aes-256-ecb", NID_aes_256_ecb, 9, &kObjectData[3113], 0}, {"AES-256-CBC", "aes-256-cbc", NID_aes_256_cbc, 9, &kObjectData[3122], 0}, {"AES-256-OFB", "aes-256-ofb", NID_aes_256_ofb128, 9, &kObjectData[3131], 0}, {"AES-256-CFB", "aes-256-cfb", NID_aes_256_cfb128, 9, &kObjectData[3140], 0}, {"holdInstructionCode", "Hold Instruction Code", NID_hold_instruction_code, 3, &kObjectData[3149], 0}, {"holdInstructionNone", "Hold Instruction None", NID_hold_instruction_none, 7, &kObjectData[3152], 0}, {"holdInstructionCallIssuer", "Hold Instruction Call Issuer", NID_hold_instruction_call_issuer, 7, &kObjectData[3159], 0}, {"holdInstructionReject", "Hold Instruction Reject", NID_hold_instruction_reject, 7, &kObjectData[3166], 0}, {"data", "data", NID_data, 1, &kObjectData[3173], 0}, {"pss", "pss", NID_pss, 3, &kObjectData[3174], 0}, {"ucl", "ucl", NID_ucl, 7, &kObjectData[3177], 0}, {"pilot", "pilot", NID_pilot, 8, &kObjectData[3184], 0}, {"pilotAttributeType", "pilotAttributeType", NID_pilotAttributeType, 9, &kObjectData[3192], 0}, {"pilotAttributeSyntax", "pilotAttributeSyntax", NID_pilotAttributeSyntax, 9, &kObjectData[3201], 0}, {"pilotObjectClass", "pilotObjectClass", NID_pilotObjectClass, 9, &kObjectData[3210], 0}, {"pilotGroups", "pilotGroups", NID_pilotGroups, 9, &kObjectData[3219], 0}, {"iA5StringSyntax", "iA5StringSyntax", NID_iA5StringSyntax, 10, &kObjectData[3228], 0}, {"caseIgnoreIA5StringSyntax", "caseIgnoreIA5StringSyntax", NID_caseIgnoreIA5StringSyntax, 10, &kObjectData[3238], 0}, {"pilotObject", "pilotObject", NID_pilotObject, 10, &kObjectData[3248], 0}, {"pilotPerson", "pilotPerson", NID_pilotPerson, 10, &kObjectData[3258], 0}, {"account", "account", NID_account, 10, &kObjectData[3268], 0}, {"document", "document", NID_document, 10, &kObjectData[3278], 0}, {"room", "room", NID_room, 10, &kObjectData[3288], 0}, {"documentSeries", "documentSeries", NID_documentSeries, 10, &kObjectData[3298], 0}, {"rFC822localPart", "rFC822localPart", NID_rFC822localPart, 10, &kObjectData[3308], 0}, {"dNSDomain", "dNSDomain", NID_dNSDomain, 10, &kObjectData[3318], 0}, {"domainRelatedObject", "domainRelatedObject", NID_domainRelatedObject, 10, &kObjectData[3328], 0}, {"friendlyCountry", "friendlyCountry", NID_friendlyCountry, 10, &kObjectData[3338], 0}, {"simpleSecurityObject", "simpleSecurityObject", NID_simpleSecurityObject, 10, &kObjectData[3348], 0}, {"pilotOrganization", "pilotOrganization", NID_pilotOrganization, 10, &kObjectData[3358], 0}, {"pilotDSA", "pilotDSA", NID_pilotDSA, 10, &kObjectData[3368], 0}, {"qualityLabelledData", "qualityLabelledData", NID_qualityLabelledData, 10, &kObjectData[3378], 0}, {"UID", "userId", NID_userId, 10, &kObjectData[3388], 0}, {"textEncodedORAddress", "textEncodedORAddress", NID_textEncodedORAddress, 10, &kObjectData[3398], 0}, {"mail", "rfc822Mailbox", NID_rfc822Mailbox, 10, &kObjectData[3408], 0}, {"info", "info", NID_info, 10, &kObjectData[3418], 0}, {"favouriteDrink", "favouriteDrink", NID_favouriteDrink, 10, &kObjectData[3428], 0}, {"roomNumber", "roomNumber", NID_roomNumber, 10, &kObjectData[3438], 0}, {"photo", "photo", NID_photo, 10, &kObjectData[3448], 0}, {"userClass", "userClass", NID_userClass, 10, &kObjectData[3458], 0}, {"host", "host", NID_host, 10, &kObjectData[3468], 0}, {"manager", "manager", NID_manager, 10, &kObjectData[3478], 0}, {"documentIdentifier", "documentIdentifier", NID_documentIdentifier, 10, &kObjectData[3488], 0}, {"documentTitle", "documentTitle", NID_documentTitle, 10, &kObjectData[3498], 0}, {"documentVersion", "documentVersion", NID_documentVersion, 10, &kObjectData[3508], 0}, {"documentAuthor", "documentAuthor", NID_documentAuthor, 10, &kObjectData[3518], 0}, {"documentLocation", "documentLocation", NID_documentLocation, 10, &kObjectData[3528], 0}, {"homeTelephoneNumber", "homeTelephoneNumber", NID_homeTelephoneNumber, 10, &kObjectData[3538], 0}, {"secretary", "secretary", NID_secretary, 10, &kObjectData[3548], 0}, {"otherMailbox", "otherMailbox", NID_otherMailbox, 10, &kObjectData[3558], 0}, {"lastModifiedTime", "lastModifiedTime", NID_lastModifiedTime, 10, &kObjectData[3568], 0}, {"lastModifiedBy", "lastModifiedBy", NID_lastModifiedBy, 10, &kObjectData[3578], 0}, {"aRecord", "aRecord", NID_aRecord, 10, &kObjectData[3588], 0}, {"pilotAttributeType27", "pilotAttributeType27", NID_pilotAttributeType27, 10, &kObjectData[3598], 0}, {"mXRecord", "mXRecord", NID_mXRecord, 10, &kObjectData[3608], 0}, {"nSRecord", "nSRecord", NID_nSRecord, 10, &kObjectData[3618], 0}, {"sOARecord", "sOARecord", NID_sOARecord, 10, &kObjectData[3628], 0}, {"cNAMERecord", "cNAMERecord", NID_cNAMERecord, 10, &kObjectData[3638], 0}, {"associatedDomain", "associatedDomain", NID_associatedDomain, 10, &kObjectData[3648], 0}, {"associatedName", "associatedName", NID_associatedName, 10, &kObjectData[3658], 0}, {"homePostalAddress", "homePostalAddress", NID_homePostalAddress, 10, &kObjectData[3668], 0}, {"personalTitle", "personalTitle", NID_personalTitle, 10, &kObjectData[3678], 0}, {"mobileTelephoneNumber", "mobileTelephoneNumber", NID_mobileTelephoneNumber, 10, &kObjectData[3688], 0}, {"pagerTelephoneNumber", "pagerTelephoneNumber", NID_pagerTelephoneNumber, 10, &kObjectData[3698], 0}, {"friendlyCountryName", "friendlyCountryName", NID_friendlyCountryName, 10, &kObjectData[3708], 0}, {"organizationalStatus", "organizationalStatus", NID_organizationalStatus, 10, &kObjectData[3718], 0}, {"janetMailbox", "janetMailbox", NID_janetMailbox, 10, &kObjectData[3728], 0}, {"mailPreferenceOption", "mailPreferenceOption", NID_mailPreferenceOption, 10, &kObjectData[3738], 0}, {"buildingName", "buildingName", NID_buildingName, 10, &kObjectData[3748], 0}, {"dSAQuality", "dSAQuality", NID_dSAQuality, 10, &kObjectData[3758], 0}, {"singleLevelQuality", "singleLevelQuality", NID_singleLevelQuality, 10, &kObjectData[3768], 0}, {"subtreeMinimumQuality", "subtreeMinimumQuality", NID_subtreeMinimumQuality, 10, &kObjectData[3778], 0}, {"subtreeMaximumQuality", "subtreeMaximumQuality", NID_subtreeMaximumQuality, 10, &kObjectData[3788], 0}, {"personalSignature", "personalSignature", NID_personalSignature, 10, &kObjectData[3798], 0}, {"dITRedirect", "dITRedirect", NID_dITRedirect, 10, &kObjectData[3808], 0}, {"audio", "audio", NID_audio, 10, &kObjectData[3818], 0}, {"documentPublisher", "documentPublisher", NID_documentPublisher, 10, &kObjectData[3828], 0}, {"x500UniqueIdentifier", "x500UniqueIdentifier", NID_x500UniqueIdentifier, 3, &kObjectData[3838], 0}, {"mime-mhs", "MIME MHS", NID_mime_mhs, 5, &kObjectData[3841], 0}, {"mime-mhs-headings", "mime-mhs-headings", NID_mime_mhs_headings, 6, &kObjectData[3846], 0}, {"mime-mhs-bodies", "mime-mhs-bodies", NID_mime_mhs_bodies, 6, &kObjectData[3852], 0}, {"id-hex-partial-message", "id-hex-partial-message", NID_id_hex_partial_message, 7, &kObjectData[3858], 0}, {"id-hex-multipart-message", "id-hex-multipart-message", NID_id_hex_multipart_message, 7, &kObjectData[3865], 0}, {"generationQualifier", "generationQualifier", NID_generationQualifier, 3, &kObjectData[3872], 0}, {"pseudonym", "pseudonym", NID_pseudonym, 3, &kObjectData[3875], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"id-set", "Secure Electronic Transactions", NID_id_set, 2, &kObjectData[3878], 0}, {"set-ctype", "content types", NID_set_ctype, 3, &kObjectData[3880], 0}, {"set-msgExt", "message extensions", NID_set_msgExt, 3, &kObjectData[3883], 0}, {"set-attr", "set-attr", NID_set_attr, 3, &kObjectData[3886], 0}, {"set-policy", "set-policy", NID_set_policy, 3, &kObjectData[3889], 0}, {"set-certExt", "certificate extensions", NID_set_certExt, 3, &kObjectData[3892], 0}, {"set-brand", "set-brand", NID_set_brand, 3, &kObjectData[3895], 0}, {"setct-PANData", "setct-PANData", NID_setct_PANData, 4, &kObjectData[3898], 0}, {"setct-PANToken", "setct-PANToken", NID_setct_PANToken, 4, &kObjectData[3902], 0}, {"setct-PANOnly", "setct-PANOnly", NID_setct_PANOnly, 4, &kObjectData[3906], 0}, {"setct-OIData", "setct-OIData", NID_setct_OIData, 4, &kObjectData[3910], 0}, {"setct-PI", "setct-PI", NID_setct_PI, 4, &kObjectData[3914], 0}, {"setct-PIData", "setct-PIData", NID_setct_PIData, 4, &kObjectData[3918], 0}, {"setct-PIDataUnsigned", "setct-PIDataUnsigned", NID_setct_PIDataUnsigned, 4, &kObjectData[3922], 0}, {"setct-HODInput", "setct-HODInput", NID_setct_HODInput, 4, &kObjectData[3926], 0}, {"setct-AuthResBaggage", "setct-AuthResBaggage", NID_setct_AuthResBaggage, 4, &kObjectData[3930], 0}, {"setct-AuthRevReqBaggage", "setct-AuthRevReqBaggage", NID_setct_AuthRevReqBaggage, 4, &kObjectData[3934], 0}, {"setct-AuthRevResBaggage", "setct-AuthRevResBaggage", NID_setct_AuthRevResBaggage, 4, &kObjectData[3938], 0}, {"setct-CapTokenSeq", "setct-CapTokenSeq", NID_setct_CapTokenSeq, 4, &kObjectData[3942], 0}, {"setct-PInitResData", "setct-PInitResData", NID_setct_PInitResData, 4, &kObjectData[3946], 0}, {"setct-PI-TBS", "setct-PI-TBS", NID_setct_PI_TBS, 4, &kObjectData[3950], 0}, {"setct-PResData", "setct-PResData", NID_setct_PResData, 4, &kObjectData[3954], 0}, {"setct-AuthReqTBS", "setct-AuthReqTBS", NID_setct_AuthReqTBS, 4, &kObjectData[3958], 0}, {"setct-AuthResTBS", "setct-AuthResTBS", NID_setct_AuthResTBS, 4, &kObjectData[3962], 0}, {"setct-AuthResTBSX", "setct-AuthResTBSX", NID_setct_AuthResTBSX, 4, &kObjectData[3966], 0}, {"setct-AuthTokenTBS", "setct-AuthTokenTBS", NID_setct_AuthTokenTBS, 4, &kObjectData[3970], 0}, {"setct-CapTokenData", "setct-CapTokenData", NID_setct_CapTokenData, 4, &kObjectData[3974], 0}, {"setct-CapTokenTBS", "setct-CapTokenTBS", NID_setct_CapTokenTBS, 4, &kObjectData[3978], 0}, {"setct-AcqCardCodeMsg", "setct-AcqCardCodeMsg", NID_setct_AcqCardCodeMsg, 4, &kObjectData[3982], 0}, {"setct-AuthRevReqTBS", "setct-AuthRevReqTBS", NID_setct_AuthRevReqTBS, 4, &kObjectData[3986], 0}, {"setct-AuthRevResData", "setct-AuthRevResData", NID_setct_AuthRevResData, 4, &kObjectData[3990], 0}, {"setct-AuthRevResTBS", "setct-AuthRevResTBS", NID_setct_AuthRevResTBS, 4, &kObjectData[3994], 0}, {"setct-CapReqTBS", "setct-CapReqTBS", NID_setct_CapReqTBS, 4, &kObjectData[3998], 0}, {"setct-CapReqTBSX", "setct-CapReqTBSX", NID_setct_CapReqTBSX, 4, &kObjectData[4002], 0}, {"setct-CapResData", "setct-CapResData", NID_setct_CapResData, 4, &kObjectData[4006], 0}, {"setct-CapRevReqTBS", "setct-CapRevReqTBS", NID_setct_CapRevReqTBS, 4, &kObjectData[4010], 0}, {"setct-CapRevReqTBSX", "setct-CapRevReqTBSX", NID_setct_CapRevReqTBSX, 4, &kObjectData[4014], 0}, {"setct-CapRevResData", "setct-CapRevResData", NID_setct_CapRevResData, 4, &kObjectData[4018], 0}, {"setct-CredReqTBS", "setct-CredReqTBS", NID_setct_CredReqTBS, 4, &kObjectData[4022], 0}, {"setct-CredReqTBSX", "setct-CredReqTBSX", NID_setct_CredReqTBSX, 4, &kObjectData[4026], 0}, {"setct-CredResData", "setct-CredResData", NID_setct_CredResData, 4, &kObjectData[4030], 0}, {"setct-CredRevReqTBS", "setct-CredRevReqTBS", NID_setct_CredRevReqTBS, 4, &kObjectData[4034], 0}, {"setct-CredRevReqTBSX", "setct-CredRevReqTBSX", NID_setct_CredRevReqTBSX, 4, &kObjectData[4038], 0}, {"setct-CredRevResData", "setct-CredRevResData", NID_setct_CredRevResData, 4, &kObjectData[4042], 0}, {"setct-PCertReqData", "setct-PCertReqData", NID_setct_PCertReqData, 4, &kObjectData[4046], 0}, {"setct-PCertResTBS", "setct-PCertResTBS", NID_setct_PCertResTBS, 4, &kObjectData[4050], 0}, {"setct-BatchAdminReqData", "setct-BatchAdminReqData", NID_setct_BatchAdminReqData, 4, &kObjectData[4054], 0}, {"setct-BatchAdminResData", "setct-BatchAdminResData", NID_setct_BatchAdminResData, 4, &kObjectData[4058], 0}, {"setct-CardCInitResTBS", "setct-CardCInitResTBS", NID_setct_CardCInitResTBS, 4, &kObjectData[4062], 0}, {"setct-MeAqCInitResTBS", "setct-MeAqCInitResTBS", NID_setct_MeAqCInitResTBS, 4, &kObjectData[4066], 0}, {"setct-RegFormResTBS", "setct-RegFormResTBS", NID_setct_RegFormResTBS, 4, &kObjectData[4070], 0}, {"setct-CertReqData", "setct-CertReqData", NID_setct_CertReqData, 4, &kObjectData[4074], 0}, {"setct-CertReqTBS", "setct-CertReqTBS", NID_setct_CertReqTBS, 4, &kObjectData[4078], 0}, {"setct-CertResData", "setct-CertResData", NID_setct_CertResData, 4, &kObjectData[4082], 0}, {"setct-CertInqReqTBS", "setct-CertInqReqTBS", NID_setct_CertInqReqTBS, 4, &kObjectData[4086], 0}, {"setct-ErrorTBS", "setct-ErrorTBS", NID_setct_ErrorTBS, 4, &kObjectData[4090], 0}, {"setct-PIDualSignedTBE", "setct-PIDualSignedTBE", NID_setct_PIDualSignedTBE, 4, &kObjectData[4094], 0}, {"setct-PIUnsignedTBE", "setct-PIUnsignedTBE", NID_setct_PIUnsignedTBE, 4, &kObjectData[4098], 0}, {"setct-AuthReqTBE", "setct-AuthReqTBE", NID_setct_AuthReqTBE, 4, &kObjectData[4102], 0}, {"setct-AuthResTBE", "setct-AuthResTBE", NID_setct_AuthResTBE, 4, &kObjectData[4106], 0}, {"setct-AuthResTBEX", "setct-AuthResTBEX", NID_setct_AuthResTBEX, 4, &kObjectData[4110], 0}, {"setct-AuthTokenTBE", "setct-AuthTokenTBE", NID_setct_AuthTokenTBE, 4, &kObjectData[4114], 0}, {"setct-CapTokenTBE", "setct-CapTokenTBE", NID_setct_CapTokenTBE, 4, &kObjectData[4118], 0}, {"setct-CapTokenTBEX", "setct-CapTokenTBEX", NID_setct_CapTokenTBEX, 4, &kObjectData[4122], 0}, {"setct-AcqCardCodeMsgTBE", "setct-AcqCardCodeMsgTBE", NID_setct_AcqCardCodeMsgTBE, 4, &kObjectData[4126], 0}, {"setct-AuthRevReqTBE", "setct-AuthRevReqTBE", NID_setct_AuthRevReqTBE, 4, &kObjectData[4130], 0}, {"setct-AuthRevResTBE", "setct-AuthRevResTBE", NID_setct_AuthRevResTBE, 4, &kObjectData[4134], 0}, {"setct-AuthRevResTBEB", "setct-AuthRevResTBEB", NID_setct_AuthRevResTBEB, 4, &kObjectData[4138], 0}, {"setct-CapReqTBE", "setct-CapReqTBE", NID_setct_CapReqTBE, 4, &kObjectData[4142], 0}, {"setct-CapReqTBEX", "setct-CapReqTBEX", NID_setct_CapReqTBEX, 4, &kObjectData[4146], 0}, {"setct-CapResTBE", "setct-CapResTBE", NID_setct_CapResTBE, 4, &kObjectData[4150], 0}, {"setct-CapRevReqTBE", "setct-CapRevReqTBE", NID_setct_CapRevReqTBE, 4, &kObjectData[4154], 0}, {"setct-CapRevReqTBEX", "setct-CapRevReqTBEX", NID_setct_CapRevReqTBEX, 4, &kObjectData[4158], 0}, {"setct-CapRevResTBE", "setct-CapRevResTBE", NID_setct_CapRevResTBE, 4, &kObjectData[4162], 0}, {"setct-CredReqTBE", "setct-CredReqTBE", NID_setct_CredReqTBE, 4, &kObjectData[4166], 0}, {"setct-CredReqTBEX", "setct-CredReqTBEX", NID_setct_CredReqTBEX, 4, &kObjectData[4170], 0}, {"setct-CredResTBE", "setct-CredResTBE", NID_setct_CredResTBE, 4, &kObjectData[4174], 0}, {"setct-CredRevReqTBE", "setct-CredRevReqTBE", NID_setct_CredRevReqTBE, 4, &kObjectData[4178], 0}, {"setct-CredRevReqTBEX", "setct-CredRevReqTBEX", NID_setct_CredRevReqTBEX, 4, &kObjectData[4182], 0}, {"setct-CredRevResTBE", "setct-CredRevResTBE", NID_setct_CredRevResTBE, 4, &kObjectData[4186], 0}, {"setct-BatchAdminReqTBE", "setct-BatchAdminReqTBE", NID_setct_BatchAdminReqTBE, 4, &kObjectData[4190], 0}, {"setct-BatchAdminResTBE", "setct-BatchAdminResTBE", NID_setct_BatchAdminResTBE, 4, &kObjectData[4194], 0}, {"setct-RegFormReqTBE", "setct-RegFormReqTBE", NID_setct_RegFormReqTBE, 4, &kObjectData[4198], 0}, {"setct-CertReqTBE", "setct-CertReqTBE", NID_setct_CertReqTBE, 4, &kObjectData[4202], 0}, {"setct-CertReqTBEX", "setct-CertReqTBEX", NID_setct_CertReqTBEX, 4, &kObjectData[4206], 0}, {"setct-CertResTBE", "setct-CertResTBE", NID_setct_CertResTBE, 4, &kObjectData[4210], 0}, {"setct-CRLNotificationTBS", "setct-CRLNotificationTBS", NID_setct_CRLNotificationTBS, 4, &kObjectData[4214], 0}, {"setct-CRLNotificationResTBS", "setct-CRLNotificationResTBS", NID_setct_CRLNotificationResTBS, 4, &kObjectData[4218], 0}, {"setct-BCIDistributionTBS", "setct-BCIDistributionTBS", NID_setct_BCIDistributionTBS, 4, &kObjectData[4222], 0}, {"setext-genCrypt", "generic cryptogram", NID_setext_genCrypt, 4, &kObjectData[4226], 0}, {"setext-miAuth", "merchant initiated auth", NID_setext_miAuth, 4, &kObjectData[4230], 0}, {"setext-pinSecure", "setext-pinSecure", NID_setext_pinSecure, 4, &kObjectData[4234], 0}, {"setext-pinAny", "setext-pinAny", NID_setext_pinAny, 4, &kObjectData[4238], 0}, {"setext-track2", "setext-track2", NID_setext_track2, 4, &kObjectData[4242], 0}, {"setext-cv", "additional verification", NID_setext_cv, 4, &kObjectData[4246], 0}, {"set-policy-root", "set-policy-root", NID_set_policy_root, 4, &kObjectData[4250], 0}, {"setCext-hashedRoot", "setCext-hashedRoot", NID_setCext_hashedRoot, 4, &kObjectData[4254], 0}, {"setCext-certType", "setCext-certType", NID_setCext_certType, 4, &kObjectData[4258], 0}, {"setCext-merchData", "setCext-merchData", NID_setCext_merchData, 4, &kObjectData[4262], 0}, {"setCext-cCertRequired", "setCext-cCertRequired", NID_setCext_cCertRequired, 4, &kObjectData[4266], 0}, {"setCext-tunneling", "setCext-tunneling", NID_setCext_tunneling, 4, &kObjectData[4270], 0}, {"setCext-setExt", "setCext-setExt", NID_setCext_setExt, 4, &kObjectData[4274], 0}, {"setCext-setQualf", "setCext-setQualf", NID_setCext_setQualf, 4, &kObjectData[4278], 0}, {"setCext-PGWYcapabilities", "setCext-PGWYcapabilities", NID_setCext_PGWYcapabilities, 4, &kObjectData[4282], 0}, {"setCext-TokenIdentifier", "setCext-TokenIdentifier", NID_setCext_TokenIdentifier, 4, &kObjectData[4286], 0}, {"setCext-Track2Data", "setCext-Track2Data", NID_setCext_Track2Data, 4, &kObjectData[4290], 0}, {"setCext-TokenType", "setCext-TokenType", NID_setCext_TokenType, 4, &kObjectData[4294], 0}, {"setCext-IssuerCapabilities", "setCext-IssuerCapabilities", NID_setCext_IssuerCapabilities, 4, &kObjectData[4298], 0}, {"setAttr-Cert", "setAttr-Cert", NID_setAttr_Cert, 4, &kObjectData[4302], 0}, {"setAttr-PGWYcap", "payment gateway capabilities", NID_setAttr_PGWYcap, 4, &kObjectData[4306], 0}, {"setAttr-TokenType", "setAttr-TokenType", NID_setAttr_TokenType, 4, &kObjectData[4310], 0}, {"setAttr-IssCap", "issuer capabilities", NID_setAttr_IssCap, 4, &kObjectData[4314], 0}, {"set-rootKeyThumb", "set-rootKeyThumb", NID_set_rootKeyThumb, 5, &kObjectData[4318], 0}, {"set-addPolicy", "set-addPolicy", NID_set_addPolicy, 5, &kObjectData[4323], 0}, {"setAttr-Token-EMV", "setAttr-Token-EMV", NID_setAttr_Token_EMV, 5, &kObjectData[4328], 0}, {"setAttr-Token-B0Prime", "setAttr-Token-B0Prime", NID_setAttr_Token_B0Prime, 5, &kObjectData[4333], 0}, {"setAttr-IssCap-CVM", "setAttr-IssCap-CVM", NID_setAttr_IssCap_CVM, 5, &kObjectData[4338], 0}, {"setAttr-IssCap-T2", "setAttr-IssCap-T2", NID_setAttr_IssCap_T2, 5, &kObjectData[4343], 0}, {"setAttr-IssCap-Sig", "setAttr-IssCap-Sig", NID_setAttr_IssCap_Sig, 5, &kObjectData[4348], 0}, {"setAttr-GenCryptgrm", "generate cryptogram", NID_setAttr_GenCryptgrm, 6, &kObjectData[4353], 0}, {"setAttr-T2Enc", "encrypted track 2", NID_setAttr_T2Enc, 6, &kObjectData[4359], 0}, {"setAttr-T2cleartxt", "cleartext track 2", NID_setAttr_T2cleartxt, 6, &kObjectData[4365], 0}, {"setAttr-TokICCsig", "ICC or token signature", NID_setAttr_TokICCsig, 6, &kObjectData[4371], 0}, {"setAttr-SecDevSig", "secure device signature", NID_setAttr_SecDevSig, 6, &kObjectData[4377], 0}, {"set-brand-IATA-ATA", "set-brand-IATA-ATA", NID_set_brand_IATA_ATA, 4, &kObjectData[4383], 0}, {"set-brand-Diners", "set-brand-Diners", NID_set_brand_Diners, 4, &kObjectData[4387], 0}, {"set-brand-AmericanExpress", "set-brand-AmericanExpress", NID_set_brand_AmericanExpress, 4, &kObjectData[4391], 0}, {"set-brand-JCB", "set-brand-JCB", NID_set_brand_JCB, 4, &kObjectData[4395], 0}, {"set-brand-Visa", "set-brand-Visa", NID_set_brand_Visa, 4, &kObjectData[4399], 0}, {"set-brand-MasterCard", "set-brand-MasterCard", NID_set_brand_MasterCard, 4, &kObjectData[4403], 0}, {"set-brand-Novus", "set-brand-Novus", NID_set_brand_Novus, 5, &kObjectData[4407], 0}, {"DES-CDMF", "des-cdmf", NID_des_cdmf, 8, &kObjectData[4412], 0}, {"rsaOAEPEncryptionSET", "rsaOAEPEncryptionSET", NID_rsaOAEPEncryptionSET, 9, &kObjectData[4420], 0}, {"ITU-T", "itu-t", NID_itu_t, 0, NULL, 0}, {"JOINT-ISO-ITU-T", "joint-iso-itu-t", NID_joint_iso_itu_t, 0, NULL, 0}, {"international-organizations", "International Organizations", NID_international_organizations, 1, &kObjectData[4429], 0}, {"msSmartcardLogin", "Microsoft Smartcardlogin", NID_ms_smartcard_login, 10, &kObjectData[4430], 0}, {"msUPN", "Microsoft Universal Principal Name", NID_ms_upn, 10, &kObjectData[4440], 0}, {"AES-128-CFB1", "aes-128-cfb1", NID_aes_128_cfb1, 0, NULL, 0}, {"AES-192-CFB1", "aes-192-cfb1", NID_aes_192_cfb1, 0, NULL, 0}, {"AES-256-CFB1", "aes-256-cfb1", NID_aes_256_cfb1, 0, NULL, 0}, {"AES-128-CFB8", "aes-128-cfb8", NID_aes_128_cfb8, 0, NULL, 0}, {"AES-192-CFB8", "aes-192-cfb8", NID_aes_192_cfb8, 0, NULL, 0}, {"AES-256-CFB8", "aes-256-cfb8", NID_aes_256_cfb8, 0, NULL, 0}, {"DES-CFB1", "des-cfb1", NID_des_cfb1, 0, NULL, 0}, {"DES-CFB8", "des-cfb8", NID_des_cfb8, 0, NULL, 0}, {"DES-EDE3-CFB1", "des-ede3-cfb1", NID_des_ede3_cfb1, 0, NULL, 0}, {"DES-EDE3-CFB8", "des-ede3-cfb8", NID_des_ede3_cfb8, 0, NULL, 0}, {"street", "streetAddress", NID_streetAddress, 3, &kObjectData[4450], 0}, {"postalCode", "postalCode", NID_postalCode, 3, &kObjectData[4453], 0}, {"id-ppl", "id-ppl", NID_id_ppl, 7, &kObjectData[4456], 0}, {"proxyCertInfo", "Proxy Certificate Information", NID_proxyCertInfo, 8, &kObjectData[4463], 0}, {"id-ppl-anyLanguage", "Any language", NID_id_ppl_anyLanguage, 8, &kObjectData[4471], 0}, {"id-ppl-inheritAll", "Inherit all", NID_id_ppl_inheritAll, 8, &kObjectData[4479], 0}, {"nameConstraints", "X509v3 Name Constraints", NID_name_constraints, 3, &kObjectData[4487], 0}, {"id-ppl-independent", "Independent", NID_Independent, 8, &kObjectData[4490], 0}, {"RSA-SHA256", "sha256WithRSAEncryption", NID_sha256WithRSAEncryption, 9, &kObjectData[4498], 0}, {"RSA-SHA384", "sha384WithRSAEncryption", NID_sha384WithRSAEncryption, 9, &kObjectData[4507], 0}, {"RSA-SHA512", "sha512WithRSAEncryption", NID_sha512WithRSAEncryption, 9, &kObjectData[4516], 0}, {"RSA-SHA224", "sha224WithRSAEncryption", NID_sha224WithRSAEncryption, 9, &kObjectData[4525], 0}, {"SHA256", "sha256", NID_sha256, 9, &kObjectData[4534], 0}, {"SHA384", "sha384", NID_sha384, 9, &kObjectData[4543], 0}, {"SHA512", "sha512", NID_sha512, 9, &kObjectData[4552], 0}, {"SHA224", "sha224", NID_sha224, 9, &kObjectData[4561], 0}, {"identified-organization", "identified-organization", NID_identified_organization, 1, &kObjectData[4570], 0}, {"certicom-arc", "certicom-arc", NID_certicom_arc, 3, &kObjectData[4571], 0}, {"wap", "wap", NID_wap, 2, &kObjectData[4574], 0}, {"wap-wsg", "wap-wsg", NID_wap_wsg, 3, &kObjectData[4576], 0}, {"id-characteristic-two-basis", "id-characteristic-two-basis", NID_X9_62_id_characteristic_two_basis, 8, &kObjectData[4579], 0}, {"onBasis", "onBasis", NID_X9_62_onBasis, 9, &kObjectData[4587], 0}, {"tpBasis", "tpBasis", NID_X9_62_tpBasis, 9, &kObjectData[4596], 0}, {"ppBasis", "ppBasis", NID_X9_62_ppBasis, 9, &kObjectData[4605], 0}, {"c2pnb163v1", "c2pnb163v1", NID_X9_62_c2pnb163v1, 8, &kObjectData[4614], 0}, {"c2pnb163v2", "c2pnb163v2", NID_X9_62_c2pnb163v2, 8, &kObjectData[4622], 0}, {"c2pnb163v3", "c2pnb163v3", NID_X9_62_c2pnb163v3, 8, &kObjectData[4630], 0}, {"c2pnb176v1", "c2pnb176v1", NID_X9_62_c2pnb176v1, 8, &kObjectData[4638], 0}, {"c2tnb191v1", "c2tnb191v1", NID_X9_62_c2tnb191v1, 8, &kObjectData[4646], 0}, {"c2tnb191v2", "c2tnb191v2", NID_X9_62_c2tnb191v2, 8, &kObjectData[4654], 0}, {"c2tnb191v3", "c2tnb191v3", NID_X9_62_c2tnb191v3, 8, &kObjectData[4662], 0}, {"c2onb191v4", "c2onb191v4", NID_X9_62_c2onb191v4, 8, &kObjectData[4670], 0}, {"c2onb191v5", "c2onb191v5", NID_X9_62_c2onb191v5, 8, &kObjectData[4678], 0}, {"c2pnb208w1", "c2pnb208w1", NID_X9_62_c2pnb208w1, 8, &kObjectData[4686], 0}, {"c2tnb239v1", "c2tnb239v1", NID_X9_62_c2tnb239v1, 8, &kObjectData[4694], 0}, {"c2tnb239v2", "c2tnb239v2", NID_X9_62_c2tnb239v2, 8, &kObjectData[4702], 0}, {"c2tnb239v3", "c2tnb239v3", NID_X9_62_c2tnb239v3, 8, &kObjectData[4710], 0}, {"c2onb239v4", "c2onb239v4", NID_X9_62_c2onb239v4, 8, &kObjectData[4718], 0}, {"c2onb239v5", "c2onb239v5", NID_X9_62_c2onb239v5, 8, &kObjectData[4726], 0}, {"c2pnb272w1", "c2pnb272w1", NID_X9_62_c2pnb272w1, 8, &kObjectData[4734], 0}, {"c2pnb304w1", "c2pnb304w1", NID_X9_62_c2pnb304w1, 8, &kObjectData[4742], 0}, {"c2tnb359v1", "c2tnb359v1", NID_X9_62_c2tnb359v1, 8, &kObjectData[4750], 0}, {"c2pnb368w1", "c2pnb368w1", NID_X9_62_c2pnb368w1, 8, &kObjectData[4758], 0}, {"c2tnb431r1", "c2tnb431r1", NID_X9_62_c2tnb431r1, 8, &kObjectData[4766], 0}, {"secp112r1", "secp112r1", NID_secp112r1, 5, &kObjectData[4774], 0}, {"secp112r2", "secp112r2", NID_secp112r2, 5, &kObjectData[4779], 0}, {"secp128r1", "secp128r1", NID_secp128r1, 5, &kObjectData[4784], 0}, {"secp128r2", "secp128r2", NID_secp128r2, 5, &kObjectData[4789], 0}, {"secp160k1", "secp160k1", NID_secp160k1, 5, &kObjectData[4794], 0}, {"secp160r1", "secp160r1", NID_secp160r1, 5, &kObjectData[4799], 0}, {"secp160r2", "secp160r2", NID_secp160r2, 5, &kObjectData[4804], 0}, {"secp192k1", "secp192k1", NID_secp192k1, 5, &kObjectData[4809], 0}, {"secp224k1", "secp224k1", NID_secp224k1, 5, &kObjectData[4814], 0}, {"secp224r1", "secp224r1", NID_secp224r1, 5, &kObjectData[4819], 0}, {"secp256k1", "secp256k1", NID_secp256k1, 5, &kObjectData[4824], 0}, {"secp384r1", "secp384r1", NID_secp384r1, 5, &kObjectData[4829], 0}, {"secp521r1", "secp521r1", NID_secp521r1, 5, &kObjectData[4834], 0}, {"sect113r1", "sect113r1", NID_sect113r1, 5, &kObjectData[4839], 0}, {"sect113r2", "sect113r2", NID_sect113r2, 5, &kObjectData[4844], 0}, {"sect131r1", "sect131r1", NID_sect131r1, 5, &kObjectData[4849], 0}, {"sect131r2", "sect131r2", NID_sect131r2, 5, &kObjectData[4854], 0}, {"sect163k1", "sect163k1", NID_sect163k1, 5, &kObjectData[4859], 0}, {"sect163r1", "sect163r1", NID_sect163r1, 5, &kObjectData[4864], 0}, {"sect163r2", "sect163r2", NID_sect163r2, 5, &kObjectData[4869], 0}, {"sect193r1", "sect193r1", NID_sect193r1, 5, &kObjectData[4874], 0}, {"sect193r2", "sect193r2", NID_sect193r2, 5, &kObjectData[4879], 0}, {"sect233k1", "sect233k1", NID_sect233k1, 5, &kObjectData[4884], 0}, {"sect233r1", "sect233r1", NID_sect233r1, 5, &kObjectData[4889], 0}, {"sect239k1", "sect239k1", NID_sect239k1, 5, &kObjectData[4894], 0}, {"sect283k1", "sect283k1", NID_sect283k1, 5, &kObjectData[4899], 0}, {"sect283r1", "sect283r1", NID_sect283r1, 5, &kObjectData[4904], 0}, {"sect409k1", "sect409k1", NID_sect409k1, 5, &kObjectData[4909], 0}, {"sect409r1", "sect409r1", NID_sect409r1, 5, &kObjectData[4914], 0}, {"sect571k1", "sect571k1", NID_sect571k1, 5, &kObjectData[4919], 0}, {"sect571r1", "sect571r1", NID_sect571r1, 5, &kObjectData[4924], 0}, {"wap-wsg-idm-ecid-wtls1", "wap-wsg-idm-ecid-wtls1", NID_wap_wsg_idm_ecid_wtls1, 5, &kObjectData[4929], 0}, {"wap-wsg-idm-ecid-wtls3", "wap-wsg-idm-ecid-wtls3", NID_wap_wsg_idm_ecid_wtls3, 5, &kObjectData[4934], 0}, {"wap-wsg-idm-ecid-wtls4", "wap-wsg-idm-ecid-wtls4", NID_wap_wsg_idm_ecid_wtls4, 5, &kObjectData[4939], 0}, {"wap-wsg-idm-ecid-wtls5", "wap-wsg-idm-ecid-wtls5", NID_wap_wsg_idm_ecid_wtls5, 5, &kObjectData[4944], 0}, {"wap-wsg-idm-ecid-wtls6", "wap-wsg-idm-ecid-wtls6", NID_wap_wsg_idm_ecid_wtls6, 5, &kObjectData[4949], 0}, {"wap-wsg-idm-ecid-wtls7", "wap-wsg-idm-ecid-wtls7", NID_wap_wsg_idm_ecid_wtls7, 5, &kObjectData[4954], 0}, {"wap-wsg-idm-ecid-wtls8", "wap-wsg-idm-ecid-wtls8", NID_wap_wsg_idm_ecid_wtls8, 5, &kObjectData[4959], 0}, {"wap-wsg-idm-ecid-wtls9", "wap-wsg-idm-ecid-wtls9", NID_wap_wsg_idm_ecid_wtls9, 5, &kObjectData[4964], 0}, {"wap-wsg-idm-ecid-wtls10", "wap-wsg-idm-ecid-wtls10", NID_wap_wsg_idm_ecid_wtls10, 5, &kObjectData[4969], 0}, {"wap-wsg-idm-ecid-wtls11", "wap-wsg-idm-ecid-wtls11", NID_wap_wsg_idm_ecid_wtls11, 5, &kObjectData[4974], 0}, {"wap-wsg-idm-ecid-wtls12", "wap-wsg-idm-ecid-wtls12", NID_wap_wsg_idm_ecid_wtls12, 5, &kObjectData[4979], 0}, {"anyPolicy", "X509v3 Any Policy", NID_any_policy, 4, &kObjectData[4984], 0}, {"policyMappings", "X509v3 Policy Mappings", NID_policy_mappings, 3, &kObjectData[4988], 0}, {"inhibitAnyPolicy", "X509v3 Inhibit Any Policy", NID_inhibit_any_policy, 3, &kObjectData[4991], 0}, {"Oakley-EC2N-3", "ipsec3", NID_ipsec3, 0, NULL, 0}, {"Oakley-EC2N-4", "ipsec4", NID_ipsec4, 0, NULL, 0}, {"CAMELLIA-128-CBC", "camellia-128-cbc", NID_camellia_128_cbc, 11, &kObjectData[4994], 0}, {"CAMELLIA-192-CBC", "camellia-192-cbc", NID_camellia_192_cbc, 11, &kObjectData[5005], 0}, {"CAMELLIA-256-CBC", "camellia-256-cbc", NID_camellia_256_cbc, 11, &kObjectData[5016], 0}, {"CAMELLIA-128-ECB", "camellia-128-ecb", NID_camellia_128_ecb, 8, &kObjectData[5027], 0}, {"CAMELLIA-192-ECB", "camellia-192-ecb", NID_camellia_192_ecb, 8, &kObjectData[5035], 0}, {"CAMELLIA-256-ECB", "camellia-256-ecb", NID_camellia_256_ecb, 8, &kObjectData[5043], 0}, {"CAMELLIA-128-CFB", "camellia-128-cfb", NID_camellia_128_cfb128, 8, &kObjectData[5051], 0}, {"CAMELLIA-192-CFB", "camellia-192-cfb", NID_camellia_192_cfb128, 8, &kObjectData[5059], 0}, {"CAMELLIA-256-CFB", "camellia-256-cfb", NID_camellia_256_cfb128, 8, &kObjectData[5067], 0}, {"CAMELLIA-128-CFB1", "camellia-128-cfb1", NID_camellia_128_cfb1, 0, NULL, 0}, {"CAMELLIA-192-CFB1", "camellia-192-cfb1", NID_camellia_192_cfb1, 0, NULL, 0}, {"CAMELLIA-256-CFB1", "camellia-256-cfb1", NID_camellia_256_cfb1, 0, NULL, 0}, {"CAMELLIA-128-CFB8", "camellia-128-cfb8", NID_camellia_128_cfb8, 0, NULL, 0}, {"CAMELLIA-192-CFB8", "camellia-192-cfb8", NID_camellia_192_cfb8, 0, NULL, 0}, {"CAMELLIA-256-CFB8", "camellia-256-cfb8", NID_camellia_256_cfb8, 0, NULL, 0}, {"CAMELLIA-128-OFB", "camellia-128-ofb", NID_camellia_128_ofb128, 8, &kObjectData[5075], 0}, {"CAMELLIA-192-OFB", "camellia-192-ofb", NID_camellia_192_ofb128, 8, &kObjectData[5083], 0}, {"CAMELLIA-256-OFB", "camellia-256-ofb", NID_camellia_256_ofb128, 8, &kObjectData[5091], 0}, {"subjectDirectoryAttributes", "X509v3 Subject Directory Attributes", NID_subject_directory_attributes, 3, &kObjectData[5099], 0}, {"issuingDistributionPoint", "X509v3 Issuing Distribution Point", NID_issuing_distribution_point, 3, &kObjectData[5102], 0}, {"certificateIssuer", "X509v3 Certificate Issuer", NID_certificate_issuer, 3, &kObjectData[5105], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"KISA", "kisa", NID_kisa, 6, &kObjectData[5108], 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"SEED-ECB", "seed-ecb", NID_seed_ecb, 8, &kObjectData[5114], 0}, {"SEED-CBC", "seed-cbc", NID_seed_cbc, 8, &kObjectData[5122], 0}, {"SEED-OFB", "seed-ofb", NID_seed_ofb128, 8, &kObjectData[5130], 0}, {"SEED-CFB", "seed-cfb", NID_seed_cfb128, 8, &kObjectData[5138], 0}, {"HMAC-MD5", "hmac-md5", NID_hmac_md5, 8, &kObjectData[5146], 0}, {"HMAC-SHA1", "hmac-sha1", NID_hmac_sha1, 8, &kObjectData[5154], 0}, {"id-PasswordBasedMAC", "password based MAC", NID_id_PasswordBasedMAC, 9, &kObjectData[5162], 0}, {"id-DHBasedMac", "Diffie-Hellman based MAC", NID_id_DHBasedMac, 9, &kObjectData[5171], 0}, {"id-it-suppLangTags", "id-it-suppLangTags", NID_id_it_suppLangTags, 8, &kObjectData[5180], 0}, {"caRepository", "CA Repository", NID_caRepository, 8, &kObjectData[5188], 0}, {"id-smime-ct-compressedData", "id-smime-ct-compressedData", NID_id_smime_ct_compressedData, 11, &kObjectData[5196], 0}, {"id-ct-asciiTextWithCRLF", "id-ct-asciiTextWithCRLF", NID_id_ct_asciiTextWithCRLF, 11, &kObjectData[5207], 0}, {"id-aes128-wrap", "id-aes128-wrap", NID_id_aes128_wrap, 9, &kObjectData[5218], 0}, {"id-aes192-wrap", "id-aes192-wrap", NID_id_aes192_wrap, 9, &kObjectData[5227], 0}, {"id-aes256-wrap", "id-aes256-wrap", NID_id_aes256_wrap, 9, &kObjectData[5236], 0}, {"ecdsa-with-Recommended", "ecdsa-with-Recommended", NID_ecdsa_with_Recommended, 7, &kObjectData[5245], 0}, {"ecdsa-with-Specified", "ecdsa-with-Specified", NID_ecdsa_with_Specified, 7, &kObjectData[5252], 0}, {"ecdsa-with-SHA224", "ecdsa-with-SHA224", NID_ecdsa_with_SHA224, 8, &kObjectData[5259], 0}, {"ecdsa-with-SHA256", "ecdsa-with-SHA256", NID_ecdsa_with_SHA256, 8, &kObjectData[5267], 0}, {"ecdsa-with-SHA384", "ecdsa-with-SHA384", NID_ecdsa_with_SHA384, 8, &kObjectData[5275], 0}, {"ecdsa-with-SHA512", "ecdsa-with-SHA512", NID_ecdsa_with_SHA512, 8, &kObjectData[5283], 0}, {"hmacWithMD5", "hmacWithMD5", NID_hmacWithMD5, 8, &kObjectData[5291], 0}, {"hmacWithSHA224", "hmacWithSHA224", NID_hmacWithSHA224, 8, &kObjectData[5299], 0}, {"hmacWithSHA256", "hmacWithSHA256", NID_hmacWithSHA256, 8, &kObjectData[5307], 0}, {"hmacWithSHA384", "hmacWithSHA384", NID_hmacWithSHA384, 8, &kObjectData[5315], 0}, {"hmacWithSHA512", "hmacWithSHA512", NID_hmacWithSHA512, 8, &kObjectData[5323], 0}, {"dsa_with_SHA224", "dsa_with_SHA224", NID_dsa_with_SHA224, 9, &kObjectData[5331], 0}, {"dsa_with_SHA256", "dsa_with_SHA256", NID_dsa_with_SHA256, 9, &kObjectData[5340], 0}, {"whirlpool", "whirlpool", NID_whirlpool, 6, &kObjectData[5349], 0}, {"cryptopro", "cryptopro", NID_cryptopro, 5, &kObjectData[5355], 0}, {"cryptocom", "cryptocom", NID_cryptocom, 5, &kObjectData[5360], 0}, {"id-GostR3411-94-with-GostR3410-2001", "GOST R 34.11-94 with GOST R 34.10-2001", NID_id_GostR3411_94_with_GostR3410_2001, 6, &kObjectData[5365], 0}, {"id-GostR3411-94-with-GostR3410-94", "GOST R 34.11-94 with GOST R 34.10-94", NID_id_GostR3411_94_with_GostR3410_94, 6, &kObjectData[5371], 0}, {"md_gost94", "GOST R 34.11-94", NID_id_GostR3411_94, 6, &kObjectData[5377], 0}, {"id-HMACGostR3411-94", "HMAC GOST 34.11-94", NID_id_HMACGostR3411_94, 6, &kObjectData[5383], 0}, {"gost2001", "GOST R 34.10-2001", NID_id_GostR3410_2001, 6, &kObjectData[5389], 0}, {"gost94", "GOST R 34.10-94", NID_id_GostR3410_94, 6, &kObjectData[5395], 0}, {"gost89", "GOST 28147-89", NID_id_Gost28147_89, 6, &kObjectData[5401], 0}, {"gost89-cnt", "gost89-cnt", NID_gost89_cnt, 0, NULL, 0}, {"gost-mac", "GOST 28147-89 MAC", NID_id_Gost28147_89_MAC, 6, &kObjectData[5407], 0}, {"prf-gostr3411-94", "GOST R 34.11-94 PRF", NID_id_GostR3411_94_prf, 6, &kObjectData[5413], 0}, {"id-GostR3410-2001DH", "GOST R 34.10-2001 DH", NID_id_GostR3410_2001DH, 6, &kObjectData[5419], 0}, {"id-GostR3410-94DH", "GOST R 34.10-94 DH", NID_id_GostR3410_94DH, 6, &kObjectData[5425], 0}, {"id-Gost28147-89-CryptoPro-KeyMeshing", "id-Gost28147-89-CryptoPro-KeyMeshing", NID_id_Gost28147_89_CryptoPro_KeyMeshing, 7, &kObjectData[5431], 0}, {"id-Gost28147-89-None-KeyMeshing", "id-Gost28147-89-None-KeyMeshing", NID_id_Gost28147_89_None_KeyMeshing, 7, &kObjectData[5438], 0}, {"id-GostR3411-94-TestParamSet", "id-GostR3411-94-TestParamSet", NID_id_GostR3411_94_TestParamSet, 7, &kObjectData[5445], 0}, {"id-GostR3411-94-CryptoProParamSet", "id-GostR3411-94-CryptoProParamSet", NID_id_GostR3411_94_CryptoProParamSet, 7, &kObjectData[5452], 0}, {"id-Gost28147-89-TestParamSet", "id-Gost28147-89-TestParamSet", NID_id_Gost28147_89_TestParamSet, 7, &kObjectData[5459], 0}, {"id-Gost28147-89-CryptoPro-A-ParamSet", "id-Gost28147-89-CryptoPro-A-ParamSet", NID_id_Gost28147_89_CryptoPro_A_ParamSet, 7, &kObjectData[5466], 0}, {"id-Gost28147-89-CryptoPro-B-ParamSet", "id-Gost28147-89-CryptoPro-B-ParamSet", NID_id_Gost28147_89_CryptoPro_B_ParamSet, 7, &kObjectData[5473], 0}, {"id-Gost28147-89-CryptoPro-C-ParamSet", "id-Gost28147-89-CryptoPro-C-ParamSet", NID_id_Gost28147_89_CryptoPro_C_ParamSet, 7, &kObjectData[5480], 0}, {"id-Gost28147-89-CryptoPro-D-ParamSet", "id-Gost28147-89-CryptoPro-D-ParamSet", NID_id_Gost28147_89_CryptoPro_D_ParamSet, 7, &kObjectData[5487], 0}, {"id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet", "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet", NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet, 7, &kObjectData[5494], 0}, {"id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet", "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet", NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet, 7, &kObjectData[5501], 0}, {"id-Gost28147-89-CryptoPro-RIC-1-ParamSet", "id-Gost28147-89-CryptoPro-RIC-1-ParamSet", NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet, 7, &kObjectData[5508], 0}, {"id-GostR3410-94-TestParamSet", "id-GostR3410-94-TestParamSet", NID_id_GostR3410_94_TestParamSet, 7, &kObjectData[5515], 0}, {"id-GostR3410-94-CryptoPro-A-ParamSet", "id-GostR3410-94-CryptoPro-A-ParamSet", NID_id_GostR3410_94_CryptoPro_A_ParamSet, 7, &kObjectData[5522], 0}, {"id-GostR3410-94-CryptoPro-B-ParamSet", "id-GostR3410-94-CryptoPro-B-ParamSet", NID_id_GostR3410_94_CryptoPro_B_ParamSet, 7, &kObjectData[5529], 0}, {"id-GostR3410-94-CryptoPro-C-ParamSet", "id-GostR3410-94-CryptoPro-C-ParamSet", NID_id_GostR3410_94_CryptoPro_C_ParamSet, 7, &kObjectData[5536], 0}, {"id-GostR3410-94-CryptoPro-D-ParamSet", "id-GostR3410-94-CryptoPro-D-ParamSet", NID_id_GostR3410_94_CryptoPro_D_ParamSet, 7, &kObjectData[5543], 0}, {"id-GostR3410-94-CryptoPro-XchA-ParamSet", "id-GostR3410-94-CryptoPro-XchA-ParamSet", NID_id_GostR3410_94_CryptoPro_XchA_ParamSet, 7, &kObjectData[5550], 0}, {"id-GostR3410-94-CryptoPro-XchB-ParamSet", "id-GostR3410-94-CryptoPro-XchB-ParamSet", NID_id_GostR3410_94_CryptoPro_XchB_ParamSet, 7, &kObjectData[5557], 0}, {"id-GostR3410-94-CryptoPro-XchC-ParamSet", "id-GostR3410-94-CryptoPro-XchC-ParamSet", NID_id_GostR3410_94_CryptoPro_XchC_ParamSet, 7, &kObjectData[5564], 0}, {"id-GostR3410-2001-TestParamSet", "id-GostR3410-2001-TestParamSet", NID_id_GostR3410_2001_TestParamSet, 7, &kObjectData[5571], 0}, {"id-GostR3410-2001-CryptoPro-A-ParamSet", "id-GostR3410-2001-CryptoPro-A-ParamSet", NID_id_GostR3410_2001_CryptoPro_A_ParamSet, 7, &kObjectData[5578], 0}, {"id-GostR3410-2001-CryptoPro-B-ParamSet", "id-GostR3410-2001-CryptoPro-B-ParamSet", NID_id_GostR3410_2001_CryptoPro_B_ParamSet, 7, &kObjectData[5585], 0}, {"id-GostR3410-2001-CryptoPro-C-ParamSet", "id-GostR3410-2001-CryptoPro-C-ParamSet", NID_id_GostR3410_2001_CryptoPro_C_ParamSet, 7, &kObjectData[5592], 0}, {"id-GostR3410-2001-CryptoPro-XchA-ParamSet", "id-GostR3410-2001-CryptoPro-XchA-ParamSet", NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet, 7, &kObjectData[5599], 0}, {"id-GostR3410-2001-CryptoPro-XchB-ParamSet", "id-GostR3410-2001-CryptoPro-XchB-ParamSet", NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet, 7, &kObjectData[5606], 0}, {"id-GostR3410-94-a", "id-GostR3410-94-a", NID_id_GostR3410_94_a, 7, &kObjectData[5613], 0}, {"id-GostR3410-94-aBis", "id-GostR3410-94-aBis", NID_id_GostR3410_94_aBis, 7, &kObjectData[5620], 0}, {"id-GostR3410-94-b", "id-GostR3410-94-b", NID_id_GostR3410_94_b, 7, &kObjectData[5627], 0}, {"id-GostR3410-94-bBis", "id-GostR3410-94-bBis", NID_id_GostR3410_94_bBis, 7, &kObjectData[5634], 0}, {"id-Gost28147-89-cc", "GOST 28147-89 Cryptocom ParamSet", NID_id_Gost28147_89_cc, 8, &kObjectData[5641], 0}, {"gost94cc", "GOST 34.10-94 Cryptocom", NID_id_GostR3410_94_cc, 8, &kObjectData[5649], 0}, {"gost2001cc", "GOST 34.10-2001 Cryptocom", NID_id_GostR3410_2001_cc, 8, &kObjectData[5657], 0}, {"id-GostR3411-94-with-GostR3410-94-cc", "GOST R 34.11-94 with GOST R 34.10-94 Cryptocom", NID_id_GostR3411_94_with_GostR3410_94_cc, 8, &kObjectData[5665], 0}, {"id-GostR3411-94-with-GostR3410-2001-cc", "GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom", NID_id_GostR3411_94_with_GostR3410_2001_cc, 8, &kObjectData[5673], 0}, {"id-GostR3410-2001-ParamSet-cc", "GOST R 3410-2001 Parameter Set Cryptocom", NID_id_GostR3410_2001_ParamSet_cc, 8, &kObjectData[5681], 0}, {"HMAC", "hmac", NID_hmac, 0, NULL, 0}, {"LocalKeySet", "Microsoft Local Key set", NID_LocalKeySet, 9, &kObjectData[5689], 0}, {"freshestCRL", "X509v3 Freshest CRL", NID_freshest_crl, 3, &kObjectData[5698], 0}, {"id-on-permanentIdentifier", "Permanent Identifier", NID_id_on_permanentIdentifier, 8, &kObjectData[5701], 0}, {"searchGuide", "searchGuide", NID_searchGuide, 3, &kObjectData[5709], 0}, {"businessCategory", "businessCategory", NID_businessCategory, 3, &kObjectData[5712], 0}, {"postalAddress", "postalAddress", NID_postalAddress, 3, &kObjectData[5715], 0}, {"postOfficeBox", "postOfficeBox", NID_postOfficeBox, 3, &kObjectData[5718], 0}, {"physicalDeliveryOfficeName", "physicalDeliveryOfficeName", NID_physicalDeliveryOfficeName, 3, &kObjectData[5721], 0}, {"telephoneNumber", "telephoneNumber", NID_telephoneNumber, 3, &kObjectData[5724], 0}, {"telexNumber", "telexNumber", NID_telexNumber, 3, &kObjectData[5727], 0}, {"teletexTerminalIdentifier", "teletexTerminalIdentifier", NID_teletexTerminalIdentifier, 3, &kObjectData[5730], 0}, {"facsimileTelephoneNumber", "facsimileTelephoneNumber", NID_facsimileTelephoneNumber, 3, &kObjectData[5733], 0}, {"x121Address", "x121Address", NID_x121Address, 3, &kObjectData[5736], 0}, {"internationaliSDNNumber", "internationaliSDNNumber", NID_internationaliSDNNumber, 3, &kObjectData[5739], 0}, {"registeredAddress", "registeredAddress", NID_registeredAddress, 3, &kObjectData[5742], 0}, {"destinationIndicator", "destinationIndicator", NID_destinationIndicator, 3, &kObjectData[5745], 0}, {"preferredDeliveryMethod", "preferredDeliveryMethod", NID_preferredDeliveryMethod, 3, &kObjectData[5748], 0}, {"presentationAddress", "presentationAddress", NID_presentationAddress, 3, &kObjectData[5751], 0}, {"supportedApplicationContext", "supportedApplicationContext", NID_supportedApplicationContext, 3, &kObjectData[5754], 0}, {"member", "member", NID_member, 3, &kObjectData[5757], 0}, {"owner", "owner", NID_owner, 3, &kObjectData[5760], 0}, {"roleOccupant", "roleOccupant", NID_roleOccupant, 3, &kObjectData[5763], 0}, {"seeAlso", "seeAlso", NID_seeAlso, 3, &kObjectData[5766], 0}, {"userPassword", "userPassword", NID_userPassword, 3, &kObjectData[5769], 0}, {"userCertificate", "userCertificate", NID_userCertificate, 3, &kObjectData[5772], 0}, {"cACertificate", "cACertificate", NID_cACertificate, 3, &kObjectData[5775], 0}, {"authorityRevocationList", "authorityRevocationList", NID_authorityRevocationList, 3, &kObjectData[5778], 0}, {"certificateRevocationList", "certificateRevocationList", NID_certificateRevocationList, 3, &kObjectData[5781], 0}, {"crossCertificatePair", "crossCertificatePair", NID_crossCertificatePair, 3, &kObjectData[5784], 0}, {"enhancedSearchGuide", "enhancedSearchGuide", NID_enhancedSearchGuide, 3, &kObjectData[5787], 0}, {"protocolInformation", "protocolInformation", NID_protocolInformation, 3, &kObjectData[5790], 0}, {"distinguishedName", "distinguishedName", NID_distinguishedName, 3, &kObjectData[5793], 0}, {"uniqueMember", "uniqueMember", NID_uniqueMember, 3, &kObjectData[5796], 0}, {"houseIdentifier", "houseIdentifier", NID_houseIdentifier, 3, &kObjectData[5799], 0}, {"supportedAlgorithms", "supportedAlgorithms", NID_supportedAlgorithms, 3, &kObjectData[5802], 0}, {"deltaRevocationList", "deltaRevocationList", NID_deltaRevocationList, 3, &kObjectData[5805], 0}, {"dmdName", "dmdName", NID_dmdName, 3, &kObjectData[5808], 0}, {"id-alg-PWRI-KEK", "id-alg-PWRI-KEK", NID_id_alg_PWRI_KEK, 11, &kObjectData[5811], 0}, {"CMAC", "cmac", NID_cmac, 0, NULL, 0}, {"id-aes128-GCM", "aes-128-gcm", NID_aes_128_gcm, 9, &kObjectData[5822], 0}, {"id-aes128-CCM", "aes-128-ccm", NID_aes_128_ccm, 9, &kObjectData[5831], 0}, {"id-aes128-wrap-pad", "id-aes128-wrap-pad", NID_id_aes128_wrap_pad, 9, &kObjectData[5840], 0}, {"id-aes192-GCM", "aes-192-gcm", NID_aes_192_gcm, 9, &kObjectData[5849], 0}, {"id-aes192-CCM", "aes-192-ccm", NID_aes_192_ccm, 9, &kObjectData[5858], 0}, {"id-aes192-wrap-pad", "id-aes192-wrap-pad", NID_id_aes192_wrap_pad, 9, &kObjectData[5867], 0}, {"id-aes256-GCM", "aes-256-gcm", NID_aes_256_gcm, 9, &kObjectData[5876], 0}, {"id-aes256-CCM", "aes-256-ccm", NID_aes_256_ccm, 9, &kObjectData[5885], 0}, {"id-aes256-wrap-pad", "id-aes256-wrap-pad", NID_id_aes256_wrap_pad, 9, &kObjectData[5894], 0}, {"AES-128-CTR", "aes-128-ctr", NID_aes_128_ctr, 0, NULL, 0}, {"AES-192-CTR", "aes-192-ctr", NID_aes_192_ctr, 0, NULL, 0}, {"AES-256-CTR", "aes-256-ctr", NID_aes_256_ctr, 0, NULL, 0}, {"id-camellia128-wrap", "id-camellia128-wrap", NID_id_camellia128_wrap, 11, &kObjectData[5903], 0}, {"id-camellia192-wrap", "id-camellia192-wrap", NID_id_camellia192_wrap, 11, &kObjectData[5914], 0}, {"id-camellia256-wrap", "id-camellia256-wrap", NID_id_camellia256_wrap, 11, &kObjectData[5925], 0}, {"anyExtendedKeyUsage", "Any Extended Key Usage", NID_anyExtendedKeyUsage, 4, &kObjectData[5936], 0}, {"MGF1", "mgf1", NID_mgf1, 9, &kObjectData[5940], 0}, {"RSASSA-PSS", "rsassaPss", NID_rsassaPss, 9, &kObjectData[5949], 0}, {"AES-128-XTS", "aes-128-xts", NID_aes_128_xts, 0, NULL, 0}, {"AES-256-XTS", "aes-256-xts", NID_aes_256_xts, 0, NULL, 0}, {"RC4-HMAC-MD5", "rc4-hmac-md5", NID_rc4_hmac_md5, 0, NULL, 0}, {"AES-128-CBC-HMAC-SHA1", "aes-128-cbc-hmac-sha1", NID_aes_128_cbc_hmac_sha1, 0, NULL, 0}, {"AES-192-CBC-HMAC-SHA1", "aes-192-cbc-hmac-sha1", NID_aes_192_cbc_hmac_sha1, 0, NULL, 0}, {"AES-256-CBC-HMAC-SHA1", "aes-256-cbc-hmac-sha1", NID_aes_256_cbc_hmac_sha1, 0, NULL, 0}, {"RSAES-OAEP", "rsaesOaep", NID_rsaesOaep, 9, &kObjectData[5958], 0}, {"dhpublicnumber", "X9.42 DH", NID_dhpublicnumber, 7, &kObjectData[5967], 0}, {"brainpoolP160r1", "brainpoolP160r1", NID_brainpoolP160r1, 9, &kObjectData[5974], 0}, {"brainpoolP160t1", "brainpoolP160t1", NID_brainpoolP160t1, 9, &kObjectData[5983], 0}, {"brainpoolP192r1", "brainpoolP192r1", NID_brainpoolP192r1, 9, &kObjectData[5992], 0}, {"brainpoolP192t1", "brainpoolP192t1", NID_brainpoolP192t1, 9, &kObjectData[6001], 0}, {"brainpoolP224r1", "brainpoolP224r1", NID_brainpoolP224r1, 9, &kObjectData[6010], 0}, {"brainpoolP224t1", "brainpoolP224t1", NID_brainpoolP224t1, 9, &kObjectData[6019], 0}, {"brainpoolP256r1", "brainpoolP256r1", NID_brainpoolP256r1, 9, &kObjectData[6028], 0}, {"brainpoolP256t1", "brainpoolP256t1", NID_brainpoolP256t1, 9, &kObjectData[6037], 0}, {"brainpoolP320r1", "brainpoolP320r1", NID_brainpoolP320r1, 9, &kObjectData[6046], 0}, {"brainpoolP320t1", "brainpoolP320t1", NID_brainpoolP320t1, 9, &kObjectData[6055], 0}, {"brainpoolP384r1", "brainpoolP384r1", NID_brainpoolP384r1, 9, &kObjectData[6064], 0}, {"brainpoolP384t1", "brainpoolP384t1", NID_brainpoolP384t1, 9, &kObjectData[6073], 0}, {"brainpoolP512r1", "brainpoolP512r1", NID_brainpoolP512r1, 9, &kObjectData[6082], 0}, {"brainpoolP512t1", "brainpoolP512t1", NID_brainpoolP512t1, 9, &kObjectData[6091], 0}, {"PSPECIFIED", "pSpecified", NID_pSpecified, 9, &kObjectData[6100], 0}, {"dhSinglePass-stdDH-sha1kdf-scheme", "dhSinglePass-stdDH-sha1kdf-scheme", NID_dhSinglePass_stdDH_sha1kdf_scheme, 9, &kObjectData[6109], 0}, {"dhSinglePass-stdDH-sha224kdf-scheme", "dhSinglePass-stdDH-sha224kdf-scheme", NID_dhSinglePass_stdDH_sha224kdf_scheme, 6, &kObjectData[6118], 0}, {"dhSinglePass-stdDH-sha256kdf-scheme", "dhSinglePass-stdDH-sha256kdf-scheme", NID_dhSinglePass_stdDH_sha256kdf_scheme, 6, &kObjectData[6124], 0}, {"dhSinglePass-stdDH-sha384kdf-scheme", "dhSinglePass-stdDH-sha384kdf-scheme", NID_dhSinglePass_stdDH_sha384kdf_scheme, 6, &kObjectData[6130], 0}, {"dhSinglePass-stdDH-sha512kdf-scheme", "dhSinglePass-stdDH-sha512kdf-scheme", NID_dhSinglePass_stdDH_sha512kdf_scheme, 6, &kObjectData[6136], 0}, {"dhSinglePass-cofactorDH-sha1kdf-scheme", "dhSinglePass-cofactorDH-sha1kdf-scheme", NID_dhSinglePass_cofactorDH_sha1kdf_scheme, 9, &kObjectData[6142], 0}, {"dhSinglePass-cofactorDH-sha224kdf-scheme", "dhSinglePass-cofactorDH-sha224kdf-scheme", NID_dhSinglePass_cofactorDH_sha224kdf_scheme, 6, &kObjectData[6151], 0}, {"dhSinglePass-cofactorDH-sha256kdf-scheme", "dhSinglePass-cofactorDH-sha256kdf-scheme", NID_dhSinglePass_cofactorDH_sha256kdf_scheme, 6, &kObjectData[6157], 0}, {"dhSinglePass-cofactorDH-sha384kdf-scheme", "dhSinglePass-cofactorDH-sha384kdf-scheme", NID_dhSinglePass_cofactorDH_sha384kdf_scheme, 6, &kObjectData[6163], 0}, {"dhSinglePass-cofactorDH-sha512kdf-scheme", "dhSinglePass-cofactorDH-sha512kdf-scheme", NID_dhSinglePass_cofactorDH_sha512kdf_scheme, 6, &kObjectData[6169], 0}, {"dh-std-kdf", "dh-std-kdf", NID_dh_std_kdf, 0, NULL, 0}, {"dh-cofactor-kdf", "dh-cofactor-kdf", NID_dh_cofactor_kdf, 0, NULL, 0}, {"X25519", "X25519", NID_X25519, 3, &kObjectData[6175], 0}, {"ED25519", "ED25519", NID_ED25519, 3, &kObjectData[6178], 0}, {"ChaCha20-Poly1305", "chacha20-poly1305", NID_chacha20_poly1305, 0, NULL, 0}, {"KxRSA", "kx-rsa", NID_kx_rsa, 0, NULL, 0}, {"KxECDHE", "kx-ecdhe", NID_kx_ecdhe, 0, NULL, 0}, {"KxPSK", "kx-psk", NID_kx_psk, 0, NULL, 0}, {"AuthRSA", "auth-rsa", NID_auth_rsa, 0, NULL, 0}, {"AuthECDSA", "auth-ecdsa", NID_auth_ecdsa, 0, NULL, 0}, {"AuthPSK", "auth-psk", NID_auth_psk, 0, NULL, 0}, {"KxANY", "kx-any", NID_kx_any, 0, NULL, 0}, {"AuthANY", "auth-any", NID_auth_any, 0, NULL, 0}, {NULL, NULL, NID_undef, 0, NULL, 0}, {"ED448", "ED448", NID_ED448, 3, &kObjectData[6181], 0}, {"X448", "X448", NID_X448, 3, &kObjectData[6184], 0}, {"SHA512-256", "sha512-256", NID_sha512_256, 9, &kObjectData[6187], 0}, {"HKDF", "hkdf", NID_hkdf, 0, NULL, 0}, {"X25519Kyber768Draft00", "X25519Kyber768Draft00", NID_X25519Kyber768Draft00, 0, NULL, 0}, {"X25519MLKEM768", "X25519MLKEM768", NID_X25519MLKEM768, 0, NULL, 0}, }; static const uint16_t kNIDsInShortNameOrder[] = { 364 /* AD_DVCS */, 419 /* AES-128-CBC */, 916 /* AES-128-CBC-HMAC-SHA1 */, 421 /* AES-128-CFB */, 650 /* AES-128-CFB1 */, 653 /* AES-128-CFB8 */, 904 /* AES-128-CTR */, 418 /* AES-128-ECB */, 420 /* AES-128-OFB */, 913 /* AES-128-XTS */, 423 /* AES-192-CBC */, 917 /* AES-192-CBC-HMAC-SHA1 */, 425 /* AES-192-CFB */, 651 /* AES-192-CFB1 */, 654 /* AES-192-CFB8 */, 905 /* AES-192-CTR */, 422 /* AES-192-ECB */, 424 /* AES-192-OFB */, 427 /* AES-256-CBC */, 918 /* AES-256-CBC-HMAC-SHA1 */, 429 /* AES-256-CFB */, 652 /* AES-256-CFB1 */, 655 /* AES-256-CFB8 */, 906 /* AES-256-CTR */, 426 /* AES-256-ECB */, 428 /* AES-256-OFB */, 914 /* AES-256-XTS */, 958 /* AuthANY */, 955 /* AuthECDSA */, 956 /* AuthPSK */, 954 /* AuthRSA */, 91 /* BF-CBC */, 93 /* BF-CFB */, 92 /* BF-ECB */, 94 /* BF-OFB */, 14 /* C */, 751 /* CAMELLIA-128-CBC */, 757 /* CAMELLIA-128-CFB */, 760 /* CAMELLIA-128-CFB1 */, 763 /* CAMELLIA-128-CFB8 */, 754 /* CAMELLIA-128-ECB */, 766 /* CAMELLIA-128-OFB */, 752 /* CAMELLIA-192-CBC */, 758 /* CAMELLIA-192-CFB */, 761 /* CAMELLIA-192-CFB1 */, 764 /* CAMELLIA-192-CFB8 */, 755 /* CAMELLIA-192-ECB */, 767 /* CAMELLIA-192-OFB */, 753 /* CAMELLIA-256-CBC */, 759 /* CAMELLIA-256-CFB */, 762 /* CAMELLIA-256-CFB1 */, 765 /* CAMELLIA-256-CFB8 */, 756 /* CAMELLIA-256-ECB */, 768 /* CAMELLIA-256-OFB */, 108 /* CAST5-CBC */, 110 /* CAST5-CFB */, 109 /* CAST5-ECB */, 111 /* CAST5-OFB */, 894 /* CMAC */, 13 /* CN */, 141 /* CRLReason */, 417 /* CSPName */, 950 /* ChaCha20-Poly1305 */, 367 /* CrlID */, 391 /* DC */, 31 /* DES-CBC */, 643 /* DES-CDMF */, 30 /* DES-CFB */, 656 /* DES-CFB1 */, 657 /* DES-CFB8 */, 29 /* DES-ECB */, 32 /* DES-EDE */, 43 /* DES-EDE-CBC */, 60 /* DES-EDE-CFB */, 62 /* DES-EDE-OFB */, 33 /* DES-EDE3 */, 44 /* DES-EDE3-CBC */, 61 /* DES-EDE3-CFB */, 658 /* DES-EDE3-CFB1 */, 659 /* DES-EDE3-CFB8 */, 63 /* DES-EDE3-OFB */, 45 /* DES-OFB */, 80 /* DESX-CBC */, 380 /* DOD */, 116 /* DSA */, 66 /* DSA-SHA */, 113 /* DSA-SHA1 */, 70 /* DSA-SHA1-old */, 67 /* DSA-old */, 297 /* DVCS */, 949 /* ED25519 */, 960 /* ED448 */, 99 /* GN */, 963 /* HKDF */, 855 /* HMAC */, 780 /* HMAC-MD5 */, 781 /* HMAC-SHA1 */, 381 /* IANA */, 34 /* IDEA-CBC */, 35 /* IDEA-CFB */, 36 /* IDEA-ECB */, 46 /* IDEA-OFB */, 181 /* ISO */, 183 /* ISO-US */, 645 /* ITU-T */, 646 /* JOINT-ISO-ITU-T */, 773 /* KISA */, 957 /* KxANY */, 952 /* KxECDHE */, 953 /* KxPSK */, 951 /* KxRSA */, 15 /* L */, 856 /* LocalKeySet */, 3 /* MD2 */, 257 /* MD4 */, 4 /* MD5 */, 114 /* MD5-SHA1 */, 95 /* MDC2 */, 911 /* MGF1 */, 388 /* Mail */, 57 /* Netscape */, 366 /* Nonce */, 17 /* O */, 178 /* OCSP */, 180 /* OCSPSigning */, 379 /* ORG */, 18 /* OU */, 749 /* Oakley-EC2N-3 */, 750 /* Oakley-EC2N-4 */, 9 /* PBE-MD2-DES */, 168 /* PBE-MD2-RC2-64 */, 10 /* PBE-MD5-DES */, 169 /* PBE-MD5-RC2-64 */, 147 /* PBE-SHA1-2DES */, 146 /* PBE-SHA1-3DES */, 170 /* PBE-SHA1-DES */, 148 /* PBE-SHA1-RC2-128 */, 149 /* PBE-SHA1-RC2-40 */, 68 /* PBE-SHA1-RC2-64 */, 144 /* PBE-SHA1-RC4-128 */, 145 /* PBE-SHA1-RC4-40 */, 161 /* PBES2 */, 69 /* PBKDF2 */, 162 /* PBMAC1 */, 127 /* PKIX */, 935 /* PSPECIFIED */, 98 /* RC2-40-CBC */, 166 /* RC2-64-CBC */, 37 /* RC2-CBC */, 39 /* RC2-CFB */, 38 /* RC2-ECB */, 40 /* RC2-OFB */, 5 /* RC4 */, 97 /* RC4-40 */, 915 /* RC4-HMAC-MD5 */, 120 /* RC5-CBC */, 122 /* RC5-CFB */, 121 /* RC5-ECB */, 123 /* RC5-OFB */, 117 /* RIPEMD160 */, 19 /* RSA */, 7 /* RSA-MD2 */, 396 /* RSA-MD4 */, 8 /* RSA-MD5 */, 96 /* RSA-MDC2 */, 104 /* RSA-NP-MD5 */, 119 /* RSA-RIPEMD160 */, 42 /* RSA-SHA */, 65 /* RSA-SHA1 */, 115 /* RSA-SHA1-2 */, 671 /* RSA-SHA224 */, 668 /* RSA-SHA256 */, 669 /* RSA-SHA384 */, 670 /* RSA-SHA512 */, 919 /* RSAES-OAEP */, 912 /* RSASSA-PSS */, 777 /* SEED-CBC */, 779 /* SEED-CFB */, 776 /* SEED-ECB */, 778 /* SEED-OFB */, 41 /* SHA */, 64 /* SHA1 */, 675 /* SHA224 */, 672 /* SHA256 */, 673 /* SHA384 */, 674 /* SHA512 */, 962 /* SHA512-256 */, 188 /* SMIME */, 167 /* SMIME-CAPS */, 100 /* SN */, 16 /* ST */, 143 /* SXNetID */, 458 /* UID */, 948 /* X25519 */, 964 /* X25519Kyber768Draft00 */, 965 /* X25519MLKEM768 */, 961 /* X448 */, 11 /* X500 */, 378 /* X500algorithms */, 12 /* X509 */, 184 /* X9-57 */, 185 /* X9cm */, 125 /* ZLIB */, 478 /* aRecord */, 289 /* aaControls */, 287 /* ac-auditEntity */, 397 /* ac-proxying */, 288 /* ac-targeting */, 368 /* acceptableResponses */, 446 /* account */, 363 /* ad_timestamping */, 376 /* algorithm */, 405 /* ansi-X9-62 */, 910 /* anyExtendedKeyUsage */, 746 /* anyPolicy */, 370 /* archiveCutoff */, 484 /* associatedDomain */, 485 /* associatedName */, 501 /* audio */, 177 /* authorityInfoAccess */, 90 /* authorityKeyIdentifier */, 882 /* authorityRevocationList */, 87 /* basicConstraints */, 365 /* basicOCSPResponse */, 285 /* biometricInfo */, 921 /* brainpoolP160r1 */, 922 /* brainpoolP160t1 */, 923 /* brainpoolP192r1 */, 924 /* brainpoolP192t1 */, 925 /* brainpoolP224r1 */, 926 /* brainpoolP224t1 */, 927 /* brainpoolP256r1 */, 928 /* brainpoolP256t1 */, 929 /* brainpoolP320r1 */, 930 /* brainpoolP320t1 */, 931 /* brainpoolP384r1 */, 932 /* brainpoolP384t1 */, 933 /* brainpoolP512r1 */, 934 /* brainpoolP512t1 */, 494 /* buildingName */, 860 /* businessCategory */, 691 /* c2onb191v4 */, 692 /* c2onb191v5 */, 697 /* c2onb239v4 */, 698 /* c2onb239v5 */, 684 /* c2pnb163v1 */, 685 /* c2pnb163v2 */, 686 /* c2pnb163v3 */, 687 /* c2pnb176v1 */, 693 /* c2pnb208w1 */, 699 /* c2pnb272w1 */, 700 /* c2pnb304w1 */, 702 /* c2pnb368w1 */, 688 /* c2tnb191v1 */, 689 /* c2tnb191v2 */, 690 /* c2tnb191v3 */, 694 /* c2tnb239v1 */, 695 /* c2tnb239v2 */, 696 /* c2tnb239v3 */, 701 /* c2tnb359v1 */, 703 /* c2tnb431r1 */, 881 /* cACertificate */, 483 /* cNAMERecord */, 179 /* caIssuers */, 785 /* caRepository */, 443 /* caseIgnoreIA5StringSyntax */, 152 /* certBag */, 677 /* certicom-arc */, 771 /* certificateIssuer */, 89 /* certificatePolicies */, 883 /* certificateRevocationList */, 54 /* challengePassword */, 407 /* characteristic-two-field */, 395 /* clearance */, 130 /* clientAuth */, 131 /* codeSigning */, 50 /* contentType */, 53 /* countersignature */, 153 /* crlBag */, 103 /* crlDistributionPoints */, 88 /* crlNumber */, 884 /* crossCertificatePair */, 806 /* cryptocom */, 805 /* cryptopro */, 500 /* dITRedirect */, 451 /* dNSDomain */, 495 /* dSAQuality */, 434 /* data */, 390 /* dcobject */, 140 /* deltaCRL */, 891 /* deltaRevocationList */, 107 /* description */, 871 /* destinationIndicator */, 947 /* dh-cofactor-kdf */, 946 /* dh-std-kdf */, 28 /* dhKeyAgreement */, 941 /* dhSinglePass-cofactorDH-sha1kdf-scheme */, 942 /* dhSinglePass-cofactorDH-sha224kdf-scheme */, 943 /* dhSinglePass-cofactorDH-sha256kdf-scheme */, 944 /* dhSinglePass-cofactorDH-sha384kdf-scheme */, 945 /* dhSinglePass-cofactorDH-sha512kdf-scheme */, 936 /* dhSinglePass-stdDH-sha1kdf-scheme */, 937 /* dhSinglePass-stdDH-sha224kdf-scheme */, 938 /* dhSinglePass-stdDH-sha256kdf-scheme */, 939 /* dhSinglePass-stdDH-sha384kdf-scheme */, 940 /* dhSinglePass-stdDH-sha512kdf-scheme */, 920 /* dhpublicnumber */, 382 /* directory */, 887 /* distinguishedName */, 892 /* dmdName */, 174 /* dnQualifier */, 447 /* document */, 471 /* documentAuthor */, 468 /* documentIdentifier */, 472 /* documentLocation */, 502 /* documentPublisher */, 449 /* documentSeries */, 469 /* documentTitle */, 470 /* documentVersion */, 392 /* domain */, 452 /* domainRelatedObject */, 802 /* dsa_with_SHA224 */, 803 /* dsa_with_SHA256 */, 791 /* ecdsa-with-Recommended */, 416 /* ecdsa-with-SHA1 */, 793 /* ecdsa-with-SHA224 */, 794 /* ecdsa-with-SHA256 */, 795 /* ecdsa-with-SHA384 */, 796 /* ecdsa-with-SHA512 */, 792 /* ecdsa-with-Specified */, 48 /* emailAddress */, 132 /* emailProtection */, 885 /* enhancedSearchGuide */, 389 /* enterprises */, 384 /* experimental */, 172 /* extReq */, 56 /* extendedCertificateAttributes */, 126 /* extendedKeyUsage */, 372 /* extendedStatus */, 867 /* facsimileTelephoneNumber */, 462 /* favouriteDrink */, 857 /* freshestCRL */, 453 /* friendlyCountry */, 490 /* friendlyCountryName */, 156 /* friendlyName */, 509 /* generationQualifier */, 815 /* gost-mac */, 811 /* gost2001 */, 851 /* gost2001cc */, 813 /* gost89 */, 814 /* gost89-cnt */, 812 /* gost94 */, 850 /* gost94cc */, 797 /* hmacWithMD5 */, 163 /* hmacWithSHA1 */, 798 /* hmacWithSHA224 */, 799 /* hmacWithSHA256 */, 800 /* hmacWithSHA384 */, 801 /* hmacWithSHA512 */, 432 /* holdInstructionCallIssuer */, 430 /* holdInstructionCode */, 431 /* holdInstructionNone */, 433 /* holdInstructionReject */, 486 /* homePostalAddress */, 473 /* homeTelephoneNumber */, 466 /* host */, 889 /* houseIdentifier */, 442 /* iA5StringSyntax */, 783 /* id-DHBasedMac */, 824 /* id-Gost28147-89-CryptoPro-A-ParamSet */, 825 /* id-Gost28147-89-CryptoPro-B-ParamSet */, 826 /* id-Gost28147-89-CryptoPro-C-ParamSet */, 827 /* id-Gost28147-89-CryptoPro-D-ParamSet */, 819 /* id-Gost28147-89-CryptoPro-KeyMeshing */, 829 /* id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet */, 828 /* id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet */, 830 /* id-Gost28147-89-CryptoPro-RIC-1-ParamSet */, 820 /* id-Gost28147-89-None-KeyMeshing */, 823 /* id-Gost28147-89-TestParamSet */, 849 /* id-Gost28147-89-cc */, 840 /* id-GostR3410-2001-CryptoPro-A-ParamSet */, 841 /* id-GostR3410-2001-CryptoPro-B-ParamSet */, 842 /* id-GostR3410-2001-CryptoPro-C-ParamSet */, 843 /* id-GostR3410-2001-CryptoPro-XchA-ParamSet */, 844 /* id-GostR3410-2001-CryptoPro-XchB-ParamSet */, 854 /* id-GostR3410-2001-ParamSet-cc */, 839 /* id-GostR3410-2001-TestParamSet */, 817 /* id-GostR3410-2001DH */, 832 /* id-GostR3410-94-CryptoPro-A-ParamSet */, 833 /* id-GostR3410-94-CryptoPro-B-ParamSet */, 834 /* id-GostR3410-94-CryptoPro-C-ParamSet */, 835 /* id-GostR3410-94-CryptoPro-D-ParamSet */, 836 /* id-GostR3410-94-CryptoPro-XchA-ParamSet */, 837 /* id-GostR3410-94-CryptoPro-XchB-ParamSet */, 838 /* id-GostR3410-94-CryptoPro-XchC-ParamSet */, 831 /* id-GostR3410-94-TestParamSet */, 845 /* id-GostR3410-94-a */, 846 /* id-GostR3410-94-aBis */, 847 /* id-GostR3410-94-b */, 848 /* id-GostR3410-94-bBis */, 818 /* id-GostR3410-94DH */, 822 /* id-GostR3411-94-CryptoProParamSet */, 821 /* id-GostR3411-94-TestParamSet */, 807 /* id-GostR3411-94-with-GostR3410-2001 */, 853 /* id-GostR3411-94-with-GostR3410-2001-cc */, 808 /* id-GostR3411-94-with-GostR3410-94 */, 852 /* id-GostR3411-94-with-GostR3410-94-cc */, 810 /* id-HMACGostR3411-94 */, 782 /* id-PasswordBasedMAC */, 266 /* id-aca */, 355 /* id-aca-accessIdentity */, 354 /* id-aca-authenticationInfo */, 356 /* id-aca-chargingIdentity */, 399 /* id-aca-encAttrs */, 357 /* id-aca-group */, 358 /* id-aca-role */, 176 /* id-ad */, 896 /* id-aes128-CCM */, 895 /* id-aes128-GCM */, 788 /* id-aes128-wrap */, 897 /* id-aes128-wrap-pad */, 899 /* id-aes192-CCM */, 898 /* id-aes192-GCM */, 789 /* id-aes192-wrap */, 900 /* id-aes192-wrap-pad */, 902 /* id-aes256-CCM */, 901 /* id-aes256-GCM */, 790 /* id-aes256-wrap */, 903 /* id-aes256-wrap-pad */, 262 /* id-alg */, 893 /* id-alg-PWRI-KEK */, 323 /* id-alg-des40 */, 326 /* id-alg-dh-pop */, 325 /* id-alg-dh-sig-hmac-sha1 */, 324 /* id-alg-noSignature */, 907 /* id-camellia128-wrap */, 908 /* id-camellia192-wrap */, 909 /* id-camellia256-wrap */, 268 /* id-cct */, 361 /* id-cct-PKIData */, 362 /* id-cct-PKIResponse */, 360 /* id-cct-crs */, 81 /* id-ce */, 680 /* id-characteristic-two-basis */, 263 /* id-cmc */, 334 /* id-cmc-addExtensions */, 346 /* id-cmc-confirmCertAcceptance */, 330 /* id-cmc-dataReturn */, 336 /* id-cmc-decryptedPOP */, 335 /* id-cmc-encryptedPOP */, 339 /* id-cmc-getCRL */, 338 /* id-cmc-getCert */, 328 /* id-cmc-identification */, 329 /* id-cmc-identityProof */, 337 /* id-cmc-lraPOPWitness */, 344 /* id-cmc-popLinkRandom */, 345 /* id-cmc-popLinkWitness */, 343 /* id-cmc-queryPending */, 333 /* id-cmc-recipientNonce */, 341 /* id-cmc-regInfo */, 342 /* id-cmc-responseInfo */, 340 /* id-cmc-revokeRequest */, 332 /* id-cmc-senderNonce */, 327 /* id-cmc-statusInfo */, 331 /* id-cmc-transactionId */, 787 /* id-ct-asciiTextWithCRLF */, 408 /* id-ecPublicKey */, 508 /* id-hex-multipart-message */, 507 /* id-hex-partial-message */, 260 /* id-it */, 302 /* id-it-caKeyUpdateInfo */, 298 /* id-it-caProtEncCert */, 311 /* id-it-confirmWaitTime */, 303 /* id-it-currentCRL */, 300 /* id-it-encKeyPairTypes */, 310 /* id-it-implicitConfirm */, 308 /* id-it-keyPairParamRep */, 307 /* id-it-keyPairParamReq */, 312 /* id-it-origPKIMessage */, 301 /* id-it-preferredSymmAlg */, 309 /* id-it-revPassphrase */, 299 /* id-it-signKeyPairTypes */, 305 /* id-it-subscriptionRequest */, 306 /* id-it-subscriptionResponse */, 784 /* id-it-suppLangTags */, 304 /* id-it-unsupportedOIDs */, 128 /* id-kp */, 280 /* id-mod-attribute-cert */, 274 /* id-mod-cmc */, 277 /* id-mod-cmp */, 284 /* id-mod-cmp2000 */, 273 /* id-mod-crmf */, 283 /* id-mod-dvcs */, 275 /* id-mod-kea-profile-88 */, 276 /* id-mod-kea-profile-93 */, 282 /* id-mod-ocsp */, 278 /* id-mod-qualified-cert-88 */, 279 /* id-mod-qualified-cert-93 */, 281 /* id-mod-timestamp-protocol */, 264 /* id-on */, 858 /* id-on-permanentIdentifier */, 347 /* id-on-personalData */, 265 /* id-pda */, 352 /* id-pda-countryOfCitizenship */, 353 /* id-pda-countryOfResidence */, 348 /* id-pda-dateOfBirth */, 351 /* id-pda-gender */, 349 /* id-pda-placeOfBirth */, 175 /* id-pe */, 261 /* id-pkip */, 258 /* id-pkix-mod */, 269 /* id-pkix1-explicit-88 */, 271 /* id-pkix1-explicit-93 */, 270 /* id-pkix1-implicit-88 */, 272 /* id-pkix1-implicit-93 */, 662 /* id-ppl */, 664 /* id-ppl-anyLanguage */, 667 /* id-ppl-independent */, 665 /* id-ppl-inheritAll */, 267 /* id-qcs */, 359 /* id-qcs-pkixQCSyntax-v1 */, 259 /* id-qt */, 164 /* id-qt-cps */, 165 /* id-qt-unotice */, 313 /* id-regCtrl */, 316 /* id-regCtrl-authenticator */, 319 /* id-regCtrl-oldCertID */, 318 /* id-regCtrl-pkiArchiveOptions */, 317 /* id-regCtrl-pkiPublicationInfo */, 320 /* id-regCtrl-protocolEncrKey */, 315 /* id-regCtrl-regToken */, 314 /* id-regInfo */, 322 /* id-regInfo-certReq */, 321 /* id-regInfo-utf8Pairs */, 512 /* id-set */, 191 /* id-smime-aa */, 215 /* id-smime-aa-contentHint */, 218 /* id-smime-aa-contentIdentifier */, 221 /* id-smime-aa-contentReference */, 240 /* id-smime-aa-dvcs-dvc */, 217 /* id-smime-aa-encapContentType */, 222 /* id-smime-aa-encrypKeyPref */, 220 /* id-smime-aa-equivalentLabels */, 232 /* id-smime-aa-ets-CertificateRefs */, 233 /* id-smime-aa-ets-RevocationRefs */, 238 /* id-smime-aa-ets-archiveTimeStamp */, 237 /* id-smime-aa-ets-certCRLTimestamp */, 234 /* id-smime-aa-ets-certValues */, 227 /* id-smime-aa-ets-commitmentType */, 231 /* id-smime-aa-ets-contentTimestamp */, 236 /* id-smime-aa-ets-escTimeStamp */, 230 /* id-smime-aa-ets-otherSigCert */, 235 /* id-smime-aa-ets-revocationValues */, 226 /* id-smime-aa-ets-sigPolicyId */, 229 /* id-smime-aa-ets-signerAttr */, 228 /* id-smime-aa-ets-signerLocation */, 219 /* id-smime-aa-macValue */, 214 /* id-smime-aa-mlExpandHistory */, 216 /* id-smime-aa-msgSigDigest */, 212 /* id-smime-aa-receiptRequest */, 213 /* id-smime-aa-securityLabel */, 239 /* id-smime-aa-signatureType */, 223 /* id-smime-aa-signingCertificate */, 224 /* id-smime-aa-smimeEncryptCerts */, 225 /* id-smime-aa-timeStampToken */, 192 /* id-smime-alg */, 243 /* id-smime-alg-3DESwrap */, 246 /* id-smime-alg-CMS3DESwrap */, 247 /* id-smime-alg-CMSRC2wrap */, 245 /* id-smime-alg-ESDH */, 241 /* id-smime-alg-ESDHwith3DES */, 242 /* id-smime-alg-ESDHwithRC2 */, 244 /* id-smime-alg-RC2wrap */, 193 /* id-smime-cd */, 248 /* id-smime-cd-ldap */, 190 /* id-smime-ct */, 210 /* id-smime-ct-DVCSRequestData */, 211 /* id-smime-ct-DVCSResponseData */, 208 /* id-smime-ct-TDTInfo */, 207 /* id-smime-ct-TSTInfo */, 205 /* id-smime-ct-authData */, 786 /* id-smime-ct-compressedData */, 209 /* id-smime-ct-contentInfo */, 206 /* id-smime-ct-publishCert */, 204 /* id-smime-ct-receipt */, 195 /* id-smime-cti */, 255 /* id-smime-cti-ets-proofOfApproval */, 256 /* id-smime-cti-ets-proofOfCreation */, 253 /* id-smime-cti-ets-proofOfDelivery */, 251 /* id-smime-cti-ets-proofOfOrigin */, 252 /* id-smime-cti-ets-proofOfReceipt */, 254 /* id-smime-cti-ets-proofOfSender */, 189 /* id-smime-mod */, 196 /* id-smime-mod-cms */, 197 /* id-smime-mod-ess */, 202 /* id-smime-mod-ets-eSigPolicy-88 */, 203 /* id-smime-mod-ets-eSigPolicy-97 */, 200 /* id-smime-mod-ets-eSignature-88 */, 201 /* id-smime-mod-ets-eSignature-97 */, 199 /* id-smime-mod-msg-v3 */, 198 /* id-smime-mod-oid */, 194 /* id-smime-spq */, 250 /* id-smime-spq-ets-sqt-unotice */, 249 /* id-smime-spq-ets-sqt-uri */, 676 /* identified-organization */, 461 /* info */, 748 /* inhibitAnyPolicy */, 101 /* initials */, 647 /* international-organizations */, 869 /* internationaliSDNNumber */, 142 /* invalidityDate */, 294 /* ipsecEndSystem */, 295 /* ipsecTunnel */, 296 /* ipsecUser */, 86 /* issuerAltName */, 770 /* issuingDistributionPoint */, 492 /* janetMailbox */, 150 /* keyBag */, 83 /* keyUsage */, 477 /* lastModifiedBy */, 476 /* lastModifiedTime */, 157 /* localKeyID */, 480 /* mXRecord */, 460 /* mail */, 493 /* mailPreferenceOption */, 467 /* manager */, 809 /* md_gost94 */, 875 /* member */, 182 /* member-body */, 51 /* messageDigest */, 383 /* mgmt */, 504 /* mime-mhs */, 506 /* mime-mhs-bodies */, 505 /* mime-mhs-headings */, 488 /* mobileTelephoneNumber */, 136 /* msCTLSign */, 135 /* msCodeCom */, 134 /* msCodeInd */, 138 /* msEFS */, 171 /* msExtReq */, 137 /* msSGC */, 648 /* msSmartcardLogin */, 649 /* msUPN */, 481 /* nSRecord */, 173 /* name */, 666 /* nameConstraints */, 369 /* noCheck */, 403 /* noRevAvail */, 72 /* nsBaseUrl */, 76 /* nsCaPolicyUrl */, 74 /* nsCaRevocationUrl */, 58 /* nsCertExt */, 79 /* nsCertSequence */, 71 /* nsCertType */, 78 /* nsComment */, 59 /* nsDataType */, 75 /* nsRenewalUrl */, 73 /* nsRevocationUrl */, 139 /* nsSGC */, 77 /* nsSslServerName */, 681 /* onBasis */, 491 /* organizationalStatus */, 475 /* otherMailbox */, 876 /* owner */, 489 /* pagerTelephoneNumber */, 374 /* path */, 112 /* pbeWithMD5AndCast5CBC */, 499 /* personalSignature */, 487 /* personalTitle */, 464 /* photo */, 863 /* physicalDeliveryOfficeName */, 437 /* pilot */, 439 /* pilotAttributeSyntax */, 438 /* pilotAttributeType */, 479 /* pilotAttributeType27 */, 456 /* pilotDSA */, 441 /* pilotGroups */, 444 /* pilotObject */, 440 /* pilotObjectClass */, 455 /* pilotOrganization */, 445 /* pilotPerson */, 2 /* pkcs */, 186 /* pkcs1 */, 27 /* pkcs3 */, 187 /* pkcs5 */, 20 /* pkcs7 */, 21 /* pkcs7-data */, 25 /* pkcs7-digestData */, 26 /* pkcs7-encryptedData */, 23 /* pkcs7-envelopedData */, 24 /* pkcs7-signedAndEnvelopedData */, 22 /* pkcs7-signedData */, 151 /* pkcs8ShroudedKeyBag */, 47 /* pkcs9 */, 401 /* policyConstraints */, 747 /* policyMappings */, 862 /* postOfficeBox */, 861 /* postalAddress */, 661 /* postalCode */, 683 /* ppBasis */, 872 /* preferredDeliveryMethod */, 873 /* presentationAddress */, 816 /* prf-gostr3411-94 */, 406 /* prime-field */, 409 /* prime192v1 */, 410 /* prime192v2 */, 411 /* prime192v3 */, 412 /* prime239v1 */, 413 /* prime239v2 */, 414 /* prime239v3 */, 415 /* prime256v1 */, 385 /* private */, 84 /* privateKeyUsagePeriod */, 886 /* protocolInformation */, 663 /* proxyCertInfo */, 510 /* pseudonym */, 435 /* pss */, 286 /* qcStatements */, 457 /* qualityLabelledData */, 450 /* rFC822localPart */, 870 /* registeredAddress */, 400 /* role */, 877 /* roleOccupant */, 448 /* room */, 463 /* roomNumber */, 6 /* rsaEncryption */, 644 /* rsaOAEPEncryptionSET */, 377 /* rsaSignature */, 1 /* rsadsi */, 482 /* sOARecord */, 155 /* safeContentsBag */, 291 /* sbgp-autonomousSysNum */, 290 /* sbgp-ipAddrBlock */, 292 /* sbgp-routerIdentifier */, 159 /* sdsiCertificate */, 859 /* searchGuide */, 704 /* secp112r1 */, 705 /* secp112r2 */, 706 /* secp128r1 */, 707 /* secp128r2 */, 708 /* secp160k1 */, 709 /* secp160r1 */, 710 /* secp160r2 */, 711 /* secp192k1 */, 712 /* secp224k1 */, 713 /* secp224r1 */, 714 /* secp256k1 */, 715 /* secp384r1 */, 716 /* secp521r1 */, 154 /* secretBag */, 474 /* secretary */, 717 /* sect113r1 */, 718 /* sect113r2 */, 719 /* sect131r1 */, 720 /* sect131r2 */, 721 /* sect163k1 */, 722 /* sect163r1 */, 723 /* sect163r2 */, 724 /* sect193r1 */, 725 /* sect193r2 */, 726 /* sect233k1 */, 727 /* sect233r1 */, 728 /* sect239k1 */, 729 /* sect283k1 */, 730 /* sect283r1 */, 731 /* sect409k1 */, 732 /* sect409r1 */, 733 /* sect571k1 */, 734 /* sect571r1 */, 386 /* security */, 878 /* seeAlso */, 394 /* selected-attribute-types */, 105 /* serialNumber */, 129 /* serverAuth */, 371 /* serviceLocator */, 625 /* set-addPolicy */, 515 /* set-attr */, 518 /* set-brand */, 638 /* set-brand-AmericanExpress */, 637 /* set-brand-Diners */, 636 /* set-brand-IATA-ATA */, 639 /* set-brand-JCB */, 641 /* set-brand-MasterCard */, 642 /* set-brand-Novus */, 640 /* set-brand-Visa */, 517 /* set-certExt */, 513 /* set-ctype */, 514 /* set-msgExt */, 516 /* set-policy */, 607 /* set-policy-root */, 624 /* set-rootKeyThumb */, 620 /* setAttr-Cert */, 631 /* setAttr-GenCryptgrm */, 623 /* setAttr-IssCap */, 628 /* setAttr-IssCap-CVM */, 630 /* setAttr-IssCap-Sig */, 629 /* setAttr-IssCap-T2 */, 621 /* setAttr-PGWYcap */, 635 /* setAttr-SecDevSig */, 632 /* setAttr-T2Enc */, 633 /* setAttr-T2cleartxt */, 634 /* setAttr-TokICCsig */, 627 /* setAttr-Token-B0Prime */, 626 /* setAttr-Token-EMV */, 622 /* setAttr-TokenType */, 619 /* setCext-IssuerCapabilities */, 615 /* setCext-PGWYcapabilities */, 616 /* setCext-TokenIdentifier */, 618 /* setCext-TokenType */, 617 /* setCext-Track2Data */, 611 /* setCext-cCertRequired */, 609 /* setCext-certType */, 608 /* setCext-hashedRoot */, 610 /* setCext-merchData */, 613 /* setCext-setExt */, 614 /* setCext-setQualf */, 612 /* setCext-tunneling */, 540 /* setct-AcqCardCodeMsg */, 576 /* setct-AcqCardCodeMsgTBE */, 570 /* setct-AuthReqTBE */, 534 /* setct-AuthReqTBS */, 527 /* setct-AuthResBaggage */, 571 /* setct-AuthResTBE */, 572 /* setct-AuthResTBEX */, 535 /* setct-AuthResTBS */, 536 /* setct-AuthResTBSX */, 528 /* setct-AuthRevReqBaggage */, 577 /* setct-AuthRevReqTBE */, 541 /* setct-AuthRevReqTBS */, 529 /* setct-AuthRevResBaggage */, 542 /* setct-AuthRevResData */, 578 /* setct-AuthRevResTBE */, 579 /* setct-AuthRevResTBEB */, 543 /* setct-AuthRevResTBS */, 573 /* setct-AuthTokenTBE */, 537 /* setct-AuthTokenTBS */, 600 /* setct-BCIDistributionTBS */, 558 /* setct-BatchAdminReqData */, 592 /* setct-BatchAdminReqTBE */, 559 /* setct-BatchAdminResData */, 593 /* setct-BatchAdminResTBE */, 599 /* setct-CRLNotificationResTBS */, 598 /* setct-CRLNotificationTBS */, 580 /* setct-CapReqTBE */, 581 /* setct-CapReqTBEX */, 544 /* setct-CapReqTBS */, 545 /* setct-CapReqTBSX */, 546 /* setct-CapResData */, 582 /* setct-CapResTBE */, 583 /* setct-CapRevReqTBE */, 584 /* setct-CapRevReqTBEX */, 547 /* setct-CapRevReqTBS */, 548 /* setct-CapRevReqTBSX */, 549 /* setct-CapRevResData */, 585 /* setct-CapRevResTBE */, 538 /* setct-CapTokenData */, 530 /* setct-CapTokenSeq */, 574 /* setct-CapTokenTBE */, 575 /* setct-CapTokenTBEX */, 539 /* setct-CapTokenTBS */, 560 /* setct-CardCInitResTBS */, 566 /* setct-CertInqReqTBS */, 563 /* setct-CertReqData */, 595 /* setct-CertReqTBE */, 596 /* setct-CertReqTBEX */, 564 /* setct-CertReqTBS */, 565 /* setct-CertResData */, 597 /* setct-CertResTBE */, 586 /* setct-CredReqTBE */, 587 /* setct-CredReqTBEX */, 550 /* setct-CredReqTBS */, 551 /* setct-CredReqTBSX */, 552 /* setct-CredResData */, 588 /* setct-CredResTBE */, 589 /* setct-CredRevReqTBE */, 590 /* setct-CredRevReqTBEX */, 553 /* setct-CredRevReqTBS */, 554 /* setct-CredRevReqTBSX */, 555 /* setct-CredRevResData */, 591 /* setct-CredRevResTBE */, 567 /* setct-ErrorTBS */, 526 /* setct-HODInput */, 561 /* setct-MeAqCInitResTBS */, 522 /* setct-OIData */, 519 /* setct-PANData */, 521 /* setct-PANOnly */, 520 /* setct-PANToken */, 556 /* setct-PCertReqData */, 557 /* setct-PCertResTBS */, 523 /* setct-PI */, 532 /* setct-PI-TBS */, 524 /* setct-PIData */, 525 /* setct-PIDataUnsigned */, 568 /* setct-PIDualSignedTBE */, 569 /* setct-PIUnsignedTBE */, 531 /* setct-PInitResData */, 533 /* setct-PResData */, 594 /* setct-RegFormReqTBE */, 562 /* setct-RegFormResTBS */, 606 /* setext-cv */, 601 /* setext-genCrypt */, 602 /* setext-miAuth */, 604 /* setext-pinAny */, 603 /* setext-pinSecure */, 605 /* setext-track2 */, 52 /* signingTime */, 454 /* simpleSecurityObject */, 496 /* singleLevelQuality */, 387 /* snmpv2 */, 660 /* street */, 85 /* subjectAltName */, 769 /* subjectDirectoryAttributes */, 398 /* subjectInfoAccess */, 82 /* subjectKeyIdentifier */, 498 /* subtreeMaximumQuality */, 497 /* subtreeMinimumQuality */, 890 /* supportedAlgorithms */, 874 /* supportedApplicationContext */, 402 /* targetInformation */, 864 /* telephoneNumber */, 866 /* teletexTerminalIdentifier */, 865 /* telexNumber */, 459 /* textEncodedORAddress */, 293 /* textNotice */, 133 /* timeStamping */, 106 /* title */, 682 /* tpBasis */, 375 /* trustRoot */, 436 /* ucl */, 888 /* uniqueMember */, 55 /* unstructuredAddress */, 49 /* unstructuredName */, 880 /* userCertificate */, 465 /* userClass */, 879 /* userPassword */, 373 /* valid */, 678 /* wap */, 679 /* wap-wsg */, 735 /* wap-wsg-idm-ecid-wtls1 */, 743 /* wap-wsg-idm-ecid-wtls10 */, 744 /* wap-wsg-idm-ecid-wtls11 */, 745 /* wap-wsg-idm-ecid-wtls12 */, 736 /* wap-wsg-idm-ecid-wtls3 */, 737 /* wap-wsg-idm-ecid-wtls4 */, 738 /* wap-wsg-idm-ecid-wtls5 */, 739 /* wap-wsg-idm-ecid-wtls6 */, 740 /* wap-wsg-idm-ecid-wtls7 */, 741 /* wap-wsg-idm-ecid-wtls8 */, 742 /* wap-wsg-idm-ecid-wtls9 */, 804 /* whirlpool */, 868 /* x121Address */, 503 /* x500UniqueIdentifier */, 158 /* x509Certificate */, 160 /* x509Crl */, }; static const uint16_t kNIDsInLongNameOrder[] = { 363 /* AD Time Stamping */, 405 /* ANSI X9.62 */, 368 /* Acceptable OCSP Responses */, 910 /* Any Extended Key Usage */, 664 /* Any language */, 177 /* Authority Information Access */, 365 /* Basic OCSP Response */, 285 /* Biometric Info */, 179 /* CA Issuers */, 785 /* CA Repository */, 131 /* Code Signing */, 783 /* Diffie-Hellman based MAC */, 382 /* Directory */, 392 /* Domain */, 132 /* E-mail Protection */, 949 /* ED25519 */, 960 /* ED448 */, 389 /* Enterprises */, 384 /* Experimental */, 372 /* Extended OCSP Status */, 172 /* Extension Request */, 813 /* GOST 28147-89 */, 849 /* GOST 28147-89 Cryptocom ParamSet */, 815 /* GOST 28147-89 MAC */, 851 /* GOST 34.10-2001 Cryptocom */, 850 /* GOST 34.10-94 Cryptocom */, 811 /* GOST R 34.10-2001 */, 817 /* GOST R 34.10-2001 DH */, 812 /* GOST R 34.10-94 */, 818 /* GOST R 34.10-94 DH */, 809 /* GOST R 34.11-94 */, 816 /* GOST R 34.11-94 PRF */, 807 /* GOST R 34.11-94 with GOST R 34.10-2001 */, 853 /* GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom */, 808 /* GOST R 34.11-94 with GOST R 34.10-94 */, 852 /* GOST R 34.11-94 with GOST R 34.10-94 Cryptocom */, 854 /* GOST R 3410-2001 Parameter Set Cryptocom */, 810 /* HMAC GOST 34.11-94 */, 432 /* Hold Instruction Call Issuer */, 430 /* Hold Instruction Code */, 431 /* Hold Instruction None */, 433 /* Hold Instruction Reject */, 634 /* ICC or token signature */, 294 /* IPSec End System */, 295 /* IPSec Tunnel */, 296 /* IPSec User */, 182 /* ISO Member Body */, 183 /* ISO US Member Body */, 667 /* Independent */, 665 /* Inherit all */, 647 /* International Organizations */, 142 /* Invalidity Date */, 504 /* MIME MHS */, 388 /* Mail */, 383 /* Management */, 417 /* Microsoft CSP Name */, 135 /* Microsoft Commercial Code Signing */, 138 /* Microsoft Encrypted File System */, 171 /* Microsoft Extension Request */, 134 /* Microsoft Individual Code Signing */, 856 /* Microsoft Local Key set */, 137 /* Microsoft Server Gated Crypto */, 648 /* Microsoft Smartcardlogin */, 136 /* Microsoft Trust List Signing */, 649 /* Microsoft Universal Principal Name */, 72 /* Netscape Base Url */, 76 /* Netscape CA Policy Url */, 74 /* Netscape CA Revocation Url */, 71 /* Netscape Cert Type */, 58 /* Netscape Certificate Extension */, 79 /* Netscape Certificate Sequence */, 78 /* Netscape Comment */, 57 /* Netscape Communications Corp. */, 59 /* Netscape Data Type */, 75 /* Netscape Renewal Url */, 73 /* Netscape Revocation Url */, 77 /* Netscape SSL Server Name */, 139 /* Netscape Server Gated Crypto */, 178 /* OCSP */, 370 /* OCSP Archive Cutoff */, 367 /* OCSP CRL ID */, 369 /* OCSP No Check */, 366 /* OCSP Nonce */, 371 /* OCSP Service Locator */, 180 /* OCSP Signing */, 161 /* PBES2 */, 69 /* PBKDF2 */, 162 /* PBMAC1 */, 127 /* PKIX */, 858 /* Permanent Identifier */, 164 /* Policy Qualifier CPS */, 165 /* Policy Qualifier User Notice */, 385 /* Private */, 663 /* Proxy Certificate Information */, 1 /* RSA Data Security, Inc. */, 2 /* RSA Data Security, Inc. PKCS */, 188 /* S/MIME */, 167 /* S/MIME Capabilities */, 387 /* SNMPv2 */, 512 /* Secure Electronic Transactions */, 386 /* Security */, 394 /* Selected Attribute Types */, 143 /* Strong Extranet ID */, 398 /* Subject Information Access */, 130 /* TLS Web Client Authentication */, 129 /* TLS Web Server Authentication */, 133 /* Time Stamping */, 375 /* Trust Root */, 948 /* X25519 */, 964 /* X25519Kyber768Draft00 */, 965 /* X25519MLKEM768 */, 961 /* X448 */, 12 /* X509 */, 402 /* X509v3 AC Targeting */, 746 /* X509v3 Any Policy */, 90 /* X509v3 Authority Key Identifier */, 87 /* X509v3 Basic Constraints */, 103 /* X509v3 CRL Distribution Points */, 88 /* X509v3 CRL Number */, 141 /* X509v3 CRL Reason Code */, 771 /* X509v3 Certificate Issuer */, 89 /* X509v3 Certificate Policies */, 140 /* X509v3 Delta CRL Indicator */, 126 /* X509v3 Extended Key Usage */, 857 /* X509v3 Freshest CRL */, 748 /* X509v3 Inhibit Any Policy */, 86 /* X509v3 Issuer Alternative Name */, 770 /* X509v3 Issuing Distribution Point */, 83 /* X509v3 Key Usage */, 666 /* X509v3 Name Constraints */, 403 /* X509v3 No Revocation Available */, 401 /* X509v3 Policy Constraints */, 747 /* X509v3 Policy Mappings */, 84 /* X509v3 Private Key Usage Period */, 85 /* X509v3 Subject Alternative Name */, 769 /* X509v3 Subject Directory Attributes */, 82 /* X509v3 Subject Key Identifier */, 920 /* X9.42 DH */, 184 /* X9.57 */, 185 /* X9.57 CM ? */, 478 /* aRecord */, 289 /* aaControls */, 287 /* ac-auditEntity */, 397 /* ac-proxying */, 288 /* ac-targeting */, 446 /* account */, 364 /* ad dvcs */, 606 /* additional verification */, 419 /* aes-128-cbc */, 916 /* aes-128-cbc-hmac-sha1 */, 896 /* aes-128-ccm */, 421 /* aes-128-cfb */, 650 /* aes-128-cfb1 */, 653 /* aes-128-cfb8 */, 904 /* aes-128-ctr */, 418 /* aes-128-ecb */, 895 /* aes-128-gcm */, 420 /* aes-128-ofb */, 913 /* aes-128-xts */, 423 /* aes-192-cbc */, 917 /* aes-192-cbc-hmac-sha1 */, 899 /* aes-192-ccm */, 425 /* aes-192-cfb */, 651 /* aes-192-cfb1 */, 654 /* aes-192-cfb8 */, 905 /* aes-192-ctr */, 422 /* aes-192-ecb */, 898 /* aes-192-gcm */, 424 /* aes-192-ofb */, 427 /* aes-256-cbc */, 918 /* aes-256-cbc-hmac-sha1 */, 902 /* aes-256-ccm */, 429 /* aes-256-cfb */, 652 /* aes-256-cfb1 */, 655 /* aes-256-cfb8 */, 906 /* aes-256-ctr */, 426 /* aes-256-ecb */, 901 /* aes-256-gcm */, 428 /* aes-256-ofb */, 914 /* aes-256-xts */, 376 /* algorithm */, 484 /* associatedDomain */, 485 /* associatedName */, 501 /* audio */, 958 /* auth-any */, 955 /* auth-ecdsa */, 956 /* auth-psk */, 954 /* auth-rsa */, 882 /* authorityRevocationList */, 91 /* bf-cbc */, 93 /* bf-cfb */, 92 /* bf-ecb */, 94 /* bf-ofb */, 921 /* brainpoolP160r1 */, 922 /* brainpoolP160t1 */, 923 /* brainpoolP192r1 */, 924 /* brainpoolP192t1 */, 925 /* brainpoolP224r1 */, 926 /* brainpoolP224t1 */, 927 /* brainpoolP256r1 */, 928 /* brainpoolP256t1 */, 929 /* brainpoolP320r1 */, 930 /* brainpoolP320t1 */, 931 /* brainpoolP384r1 */, 932 /* brainpoolP384t1 */, 933 /* brainpoolP512r1 */, 934 /* brainpoolP512t1 */, 494 /* buildingName */, 860 /* businessCategory */, 691 /* c2onb191v4 */, 692 /* c2onb191v5 */, 697 /* c2onb239v4 */, 698 /* c2onb239v5 */, 684 /* c2pnb163v1 */, 685 /* c2pnb163v2 */, 686 /* c2pnb163v3 */, 687 /* c2pnb176v1 */, 693 /* c2pnb208w1 */, 699 /* c2pnb272w1 */, 700 /* c2pnb304w1 */, 702 /* c2pnb368w1 */, 688 /* c2tnb191v1 */, 689 /* c2tnb191v2 */, 690 /* c2tnb191v3 */, 694 /* c2tnb239v1 */, 695 /* c2tnb239v2 */, 696 /* c2tnb239v3 */, 701 /* c2tnb359v1 */, 703 /* c2tnb431r1 */, 881 /* cACertificate */, 483 /* cNAMERecord */, 751 /* camellia-128-cbc */, 757 /* camellia-128-cfb */, 760 /* camellia-128-cfb1 */, 763 /* camellia-128-cfb8 */, 754 /* camellia-128-ecb */, 766 /* camellia-128-ofb */, 752 /* camellia-192-cbc */, 758 /* camellia-192-cfb */, 761 /* camellia-192-cfb1 */, 764 /* camellia-192-cfb8 */, 755 /* camellia-192-ecb */, 767 /* camellia-192-ofb */, 753 /* camellia-256-cbc */, 759 /* camellia-256-cfb */, 762 /* camellia-256-cfb1 */, 765 /* camellia-256-cfb8 */, 756 /* camellia-256-ecb */, 768 /* camellia-256-ofb */, 443 /* caseIgnoreIA5StringSyntax */, 108 /* cast5-cbc */, 110 /* cast5-cfb */, 109 /* cast5-ecb */, 111 /* cast5-ofb */, 152 /* certBag */, 677 /* certicom-arc */, 517 /* certificate extensions */, 883 /* certificateRevocationList */, 950 /* chacha20-poly1305 */, 54 /* challengePassword */, 407 /* characteristic-two-field */, 395 /* clearance */, 633 /* cleartext track 2 */, 894 /* cmac */, 13 /* commonName */, 513 /* content types */, 50 /* contentType */, 53 /* countersignature */, 14 /* countryName */, 153 /* crlBag */, 884 /* crossCertificatePair */, 806 /* cryptocom */, 805 /* cryptopro */, 500 /* dITRedirect */, 451 /* dNSDomain */, 495 /* dSAQuality */, 434 /* data */, 390 /* dcObject */, 891 /* deltaRevocationList */, 31 /* des-cbc */, 643 /* des-cdmf */, 30 /* des-cfb */, 656 /* des-cfb1 */, 657 /* des-cfb8 */, 29 /* des-ecb */, 32 /* des-ede */, 43 /* des-ede-cbc */, 60 /* des-ede-cfb */, 62 /* des-ede-ofb */, 33 /* des-ede3 */, 44 /* des-ede3-cbc */, 61 /* des-ede3-cfb */, 658 /* des-ede3-cfb1 */, 659 /* des-ede3-cfb8 */, 63 /* des-ede3-ofb */, 45 /* des-ofb */, 107 /* description */, 871 /* destinationIndicator */, 80 /* desx-cbc */, 947 /* dh-cofactor-kdf */, 946 /* dh-std-kdf */, 28 /* dhKeyAgreement */, 941 /* dhSinglePass-cofactorDH-sha1kdf-scheme */, 942 /* dhSinglePass-cofactorDH-sha224kdf-scheme */, 943 /* dhSinglePass-cofactorDH-sha256kdf-scheme */, 944 /* dhSinglePass-cofactorDH-sha384kdf-scheme */, 945 /* dhSinglePass-cofactorDH-sha512kdf-scheme */, 936 /* dhSinglePass-stdDH-sha1kdf-scheme */, 937 /* dhSinglePass-stdDH-sha224kdf-scheme */, 938 /* dhSinglePass-stdDH-sha256kdf-scheme */, 939 /* dhSinglePass-stdDH-sha384kdf-scheme */, 940 /* dhSinglePass-stdDH-sha512kdf-scheme */, 11 /* directory services (X.500) */, 378 /* directory services - algorithms */, 887 /* distinguishedName */, 892 /* dmdName */, 174 /* dnQualifier */, 447 /* document */, 471 /* documentAuthor */, 468 /* documentIdentifier */, 472 /* documentLocation */, 502 /* documentPublisher */, 449 /* documentSeries */, 469 /* documentTitle */, 470 /* documentVersion */, 380 /* dod */, 391 /* domainComponent */, 452 /* domainRelatedObject */, 116 /* dsaEncryption */, 67 /* dsaEncryption-old */, 66 /* dsaWithSHA */, 113 /* dsaWithSHA1 */, 70 /* dsaWithSHA1-old */, 802 /* dsa_with_SHA224 */, 803 /* dsa_with_SHA256 */, 297 /* dvcs */, 791 /* ecdsa-with-Recommended */, 416 /* ecdsa-with-SHA1 */, 793 /* ecdsa-with-SHA224 */, 794 /* ecdsa-with-SHA256 */, 795 /* ecdsa-with-SHA384 */, 796 /* ecdsa-with-SHA512 */, 792 /* ecdsa-with-Specified */, 48 /* emailAddress */, 632 /* encrypted track 2 */, 885 /* enhancedSearchGuide */, 56 /* extendedCertificateAttributes */, 867 /* facsimileTelephoneNumber */, 462 /* favouriteDrink */, 453 /* friendlyCountry */, 490 /* friendlyCountryName */, 156 /* friendlyName */, 631 /* generate cryptogram */, 509 /* generationQualifier */, 601 /* generic cryptogram */, 99 /* givenName */, 814 /* gost89-cnt */, 963 /* hkdf */, 855 /* hmac */, 780 /* hmac-md5 */, 781 /* hmac-sha1 */, 797 /* hmacWithMD5 */, 163 /* hmacWithSHA1 */, 798 /* hmacWithSHA224 */, 799 /* hmacWithSHA256 */, 800 /* hmacWithSHA384 */, 801 /* hmacWithSHA512 */, 486 /* homePostalAddress */, 473 /* homeTelephoneNumber */, 466 /* host */, 889 /* houseIdentifier */, 442 /* iA5StringSyntax */, 381 /* iana */, 824 /* id-Gost28147-89-CryptoPro-A-ParamSet */, 825 /* id-Gost28147-89-CryptoPro-B-ParamSet */, 826 /* id-Gost28147-89-CryptoPro-C-ParamSet */, 827 /* id-Gost28147-89-CryptoPro-D-ParamSet */, 819 /* id-Gost28147-89-CryptoPro-KeyMeshing */, 829 /* id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet */, 828 /* id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet */, 830 /* id-Gost28147-89-CryptoPro-RIC-1-ParamSet */, 820 /* id-Gost28147-89-None-KeyMeshing */, 823 /* id-Gost28147-89-TestParamSet */, 840 /* id-GostR3410-2001-CryptoPro-A-ParamSet */, 841 /* id-GostR3410-2001-CryptoPro-B-ParamSet */, 842 /* id-GostR3410-2001-CryptoPro-C-ParamSet */, 843 /* id-GostR3410-2001-CryptoPro-XchA-ParamSet */, 844 /* id-GostR3410-2001-CryptoPro-XchB-ParamSet */, 839 /* id-GostR3410-2001-TestParamSet */, 832 /* id-GostR3410-94-CryptoPro-A-ParamSet */, 833 /* id-GostR3410-94-CryptoPro-B-ParamSet */, 834 /* id-GostR3410-94-CryptoPro-C-ParamSet */, 835 /* id-GostR3410-94-CryptoPro-D-ParamSet */, 836 /* id-GostR3410-94-CryptoPro-XchA-ParamSet */, 837 /* id-GostR3410-94-CryptoPro-XchB-ParamSet */, 838 /* id-GostR3410-94-CryptoPro-XchC-ParamSet */, 831 /* id-GostR3410-94-TestParamSet */, 845 /* id-GostR3410-94-a */, 846 /* id-GostR3410-94-aBis */, 847 /* id-GostR3410-94-b */, 848 /* id-GostR3410-94-bBis */, 822 /* id-GostR3411-94-CryptoProParamSet */, 821 /* id-GostR3411-94-TestParamSet */, 266 /* id-aca */, 355 /* id-aca-accessIdentity */, 354 /* id-aca-authenticationInfo */, 356 /* id-aca-chargingIdentity */, 399 /* id-aca-encAttrs */, 357 /* id-aca-group */, 358 /* id-aca-role */, 176 /* id-ad */, 788 /* id-aes128-wrap */, 897 /* id-aes128-wrap-pad */, 789 /* id-aes192-wrap */, 900 /* id-aes192-wrap-pad */, 790 /* id-aes256-wrap */, 903 /* id-aes256-wrap-pad */, 262 /* id-alg */, 893 /* id-alg-PWRI-KEK */, 323 /* id-alg-des40 */, 326 /* id-alg-dh-pop */, 325 /* id-alg-dh-sig-hmac-sha1 */, 324 /* id-alg-noSignature */, 907 /* id-camellia128-wrap */, 908 /* id-camellia192-wrap */, 909 /* id-camellia256-wrap */, 268 /* id-cct */, 361 /* id-cct-PKIData */, 362 /* id-cct-PKIResponse */, 360 /* id-cct-crs */, 81 /* id-ce */, 680 /* id-characteristic-two-basis */, 263 /* id-cmc */, 334 /* id-cmc-addExtensions */, 346 /* id-cmc-confirmCertAcceptance */, 330 /* id-cmc-dataReturn */, 336 /* id-cmc-decryptedPOP */, 335 /* id-cmc-encryptedPOP */, 339 /* id-cmc-getCRL */, 338 /* id-cmc-getCert */, 328 /* id-cmc-identification */, 329 /* id-cmc-identityProof */, 337 /* id-cmc-lraPOPWitness */, 344 /* id-cmc-popLinkRandom */, 345 /* id-cmc-popLinkWitness */, 343 /* id-cmc-queryPending */, 333 /* id-cmc-recipientNonce */, 341 /* id-cmc-regInfo */, 342 /* id-cmc-responseInfo */, 340 /* id-cmc-revokeRequest */, 332 /* id-cmc-senderNonce */, 327 /* id-cmc-statusInfo */, 331 /* id-cmc-transactionId */, 787 /* id-ct-asciiTextWithCRLF */, 408 /* id-ecPublicKey */, 508 /* id-hex-multipart-message */, 507 /* id-hex-partial-message */, 260 /* id-it */, 302 /* id-it-caKeyUpdateInfo */, 298 /* id-it-caProtEncCert */, 311 /* id-it-confirmWaitTime */, 303 /* id-it-currentCRL */, 300 /* id-it-encKeyPairTypes */, 310 /* id-it-implicitConfirm */, 308 /* id-it-keyPairParamRep */, 307 /* id-it-keyPairParamReq */, 312 /* id-it-origPKIMessage */, 301 /* id-it-preferredSymmAlg */, 309 /* id-it-revPassphrase */, 299 /* id-it-signKeyPairTypes */, 305 /* id-it-subscriptionRequest */, 306 /* id-it-subscriptionResponse */, 784 /* id-it-suppLangTags */, 304 /* id-it-unsupportedOIDs */, 128 /* id-kp */, 280 /* id-mod-attribute-cert */, 274 /* id-mod-cmc */, 277 /* id-mod-cmp */, 284 /* id-mod-cmp2000 */, 273 /* id-mod-crmf */, 283 /* id-mod-dvcs */, 275 /* id-mod-kea-profile-88 */, 276 /* id-mod-kea-profile-93 */, 282 /* id-mod-ocsp */, 278 /* id-mod-qualified-cert-88 */, 279 /* id-mod-qualified-cert-93 */, 281 /* id-mod-timestamp-protocol */, 264 /* id-on */, 347 /* id-on-personalData */, 265 /* id-pda */, 352 /* id-pda-countryOfCitizenship */, 353 /* id-pda-countryOfResidence */, 348 /* id-pda-dateOfBirth */, 351 /* id-pda-gender */, 349 /* id-pda-placeOfBirth */, 175 /* id-pe */, 261 /* id-pkip */, 258 /* id-pkix-mod */, 269 /* id-pkix1-explicit-88 */, 271 /* id-pkix1-explicit-93 */, 270 /* id-pkix1-implicit-88 */, 272 /* id-pkix1-implicit-93 */, 662 /* id-ppl */, 267 /* id-qcs */, 359 /* id-qcs-pkixQCSyntax-v1 */, 259 /* id-qt */, 313 /* id-regCtrl */, 316 /* id-regCtrl-authenticator */, 319 /* id-regCtrl-oldCertID */, 318 /* id-regCtrl-pkiArchiveOptions */, 317 /* id-regCtrl-pkiPublicationInfo */, 320 /* id-regCtrl-protocolEncrKey */, 315 /* id-regCtrl-regToken */, 314 /* id-regInfo */, 322 /* id-regInfo-certReq */, 321 /* id-regInfo-utf8Pairs */, 191 /* id-smime-aa */, 215 /* id-smime-aa-contentHint */, 218 /* id-smime-aa-contentIdentifier */, 221 /* id-smime-aa-contentReference */, 240 /* id-smime-aa-dvcs-dvc */, 217 /* id-smime-aa-encapContentType */, 222 /* id-smime-aa-encrypKeyPref */, 220 /* id-smime-aa-equivalentLabels */, 232 /* id-smime-aa-ets-CertificateRefs */, 233 /* id-smime-aa-ets-RevocationRefs */, 238 /* id-smime-aa-ets-archiveTimeStamp */, 237 /* id-smime-aa-ets-certCRLTimestamp */, 234 /* id-smime-aa-ets-certValues */, 227 /* id-smime-aa-ets-commitmentType */, 231 /* id-smime-aa-ets-contentTimestamp */, 236 /* id-smime-aa-ets-escTimeStamp */, 230 /* id-smime-aa-ets-otherSigCert */, 235 /* id-smime-aa-ets-revocationValues */, 226 /* id-smime-aa-ets-sigPolicyId */, 229 /* id-smime-aa-ets-signerAttr */, 228 /* id-smime-aa-ets-signerLocation */, 219 /* id-smime-aa-macValue */, 214 /* id-smime-aa-mlExpandHistory */, 216 /* id-smime-aa-msgSigDigest */, 212 /* id-smime-aa-receiptRequest */, 213 /* id-smime-aa-securityLabel */, 239 /* id-smime-aa-signatureType */, 223 /* id-smime-aa-signingCertificate */, 224 /* id-smime-aa-smimeEncryptCerts */, 225 /* id-smime-aa-timeStampToken */, 192 /* id-smime-alg */, 243 /* id-smime-alg-3DESwrap */, 246 /* id-smime-alg-CMS3DESwrap */, 247 /* id-smime-alg-CMSRC2wrap */, 245 /* id-smime-alg-ESDH */, 241 /* id-smime-alg-ESDHwith3DES */, 242 /* id-smime-alg-ESDHwithRC2 */, 244 /* id-smime-alg-RC2wrap */, 193 /* id-smime-cd */, 248 /* id-smime-cd-ldap */, 190 /* id-smime-ct */, 210 /* id-smime-ct-DVCSRequestData */, 211 /* id-smime-ct-DVCSResponseData */, 208 /* id-smime-ct-TDTInfo */, 207 /* id-smime-ct-TSTInfo */, 205 /* id-smime-ct-authData */, 786 /* id-smime-ct-compressedData */, 209 /* id-smime-ct-contentInfo */, 206 /* id-smime-ct-publishCert */, 204 /* id-smime-ct-receipt */, 195 /* id-smime-cti */, 255 /* id-smime-cti-ets-proofOfApproval */, 256 /* id-smime-cti-ets-proofOfCreation */, 253 /* id-smime-cti-ets-proofOfDelivery */, 251 /* id-smime-cti-ets-proofOfOrigin */, 252 /* id-smime-cti-ets-proofOfReceipt */, 254 /* id-smime-cti-ets-proofOfSender */, 189 /* id-smime-mod */, 196 /* id-smime-mod-cms */, 197 /* id-smime-mod-ess */, 202 /* id-smime-mod-ets-eSigPolicy-88 */, 203 /* id-smime-mod-ets-eSigPolicy-97 */, 200 /* id-smime-mod-ets-eSignature-88 */, 201 /* id-smime-mod-ets-eSignature-97 */, 199 /* id-smime-mod-msg-v3 */, 198 /* id-smime-mod-oid */, 194 /* id-smime-spq */, 250 /* id-smime-spq-ets-sqt-unotice */, 249 /* id-smime-spq-ets-sqt-uri */, 34 /* idea-cbc */, 35 /* idea-cfb */, 36 /* idea-ecb */, 46 /* idea-ofb */, 676 /* identified-organization */, 461 /* info */, 101 /* initials */, 869 /* internationaliSDNNumber */, 749 /* ipsec3 */, 750 /* ipsec4 */, 181 /* iso */, 623 /* issuer capabilities */, 645 /* itu-t */, 492 /* janetMailbox */, 646 /* joint-iso-itu-t */, 150 /* keyBag */, 773 /* kisa */, 957 /* kx-any */, 952 /* kx-ecdhe */, 953 /* kx-psk */, 951 /* kx-rsa */, 477 /* lastModifiedBy */, 476 /* lastModifiedTime */, 157 /* localKeyID */, 15 /* localityName */, 480 /* mXRecord */, 493 /* mailPreferenceOption */, 467 /* manager */, 3 /* md2 */, 7 /* md2WithRSAEncryption */, 257 /* md4 */, 396 /* md4WithRSAEncryption */, 4 /* md5 */, 114 /* md5-sha1 */, 104 /* md5WithRSA */, 8 /* md5WithRSAEncryption */, 95 /* mdc2 */, 96 /* mdc2WithRSA */, 875 /* member */, 602 /* merchant initiated auth */, 514 /* message extensions */, 51 /* messageDigest */, 911 /* mgf1 */, 506 /* mime-mhs-bodies */, 505 /* mime-mhs-headings */, 488 /* mobileTelephoneNumber */, 481 /* nSRecord */, 173 /* name */, 681 /* onBasis */, 379 /* org */, 17 /* organizationName */, 491 /* organizationalStatus */, 18 /* organizationalUnitName */, 475 /* otherMailbox */, 876 /* owner */, 935 /* pSpecified */, 489 /* pagerTelephoneNumber */, 782 /* password based MAC */, 374 /* path */, 621 /* payment gateway capabilities */, 9 /* pbeWithMD2AndDES-CBC */, 168 /* pbeWithMD2AndRC2-CBC */, 112 /* pbeWithMD5AndCast5CBC */, 10 /* pbeWithMD5AndDES-CBC */, 169 /* pbeWithMD5AndRC2-CBC */, 148 /* pbeWithSHA1And128BitRC2-CBC */, 144 /* pbeWithSHA1And128BitRC4 */, 147 /* pbeWithSHA1And2-KeyTripleDES-CBC */, 146 /* pbeWithSHA1And3-KeyTripleDES-CBC */, 149 /* pbeWithSHA1And40BitRC2-CBC */, 145 /* pbeWithSHA1And40BitRC4 */, 170 /* pbeWithSHA1AndDES-CBC */, 68 /* pbeWithSHA1AndRC2-CBC */, 499 /* personalSignature */, 487 /* personalTitle */, 464 /* photo */, 863 /* physicalDeliveryOfficeName */, 437 /* pilot */, 439 /* pilotAttributeSyntax */, 438 /* pilotAttributeType */, 479 /* pilotAttributeType27 */, 456 /* pilotDSA */, 441 /* pilotGroups */, 444 /* pilotObject */, 440 /* pilotObjectClass */, 455 /* pilotOrganization */, 445 /* pilotPerson */, 186 /* pkcs1 */, 27 /* pkcs3 */, 187 /* pkcs5 */, 20 /* pkcs7 */, 21 /* pkcs7-data */, 25 /* pkcs7-digestData */, 26 /* pkcs7-encryptedData */, 23 /* pkcs7-envelopedData */, 24 /* pkcs7-signedAndEnvelopedData */, 22 /* pkcs7-signedData */, 151 /* pkcs8ShroudedKeyBag */, 47 /* pkcs9 */, 862 /* postOfficeBox */, 861 /* postalAddress */, 661 /* postalCode */, 683 /* ppBasis */, 872 /* preferredDeliveryMethod */, 873 /* presentationAddress */, 406 /* prime-field */, 409 /* prime192v1 */, 410 /* prime192v2 */, 411 /* prime192v3 */, 412 /* prime239v1 */, 413 /* prime239v2 */, 414 /* prime239v3 */, 415 /* prime256v1 */, 886 /* protocolInformation */, 510 /* pseudonym */, 435 /* pss */, 286 /* qcStatements */, 457 /* qualityLabelledData */, 450 /* rFC822localPart */, 98 /* rc2-40-cbc */, 166 /* rc2-64-cbc */, 37 /* rc2-cbc */, 39 /* rc2-cfb */, 38 /* rc2-ecb */, 40 /* rc2-ofb */, 5 /* rc4 */, 97 /* rc4-40 */, 915 /* rc4-hmac-md5 */, 120 /* rc5-cbc */, 122 /* rc5-cfb */, 121 /* rc5-ecb */, 123 /* rc5-ofb */, 870 /* registeredAddress */, 460 /* rfc822Mailbox */, 117 /* ripemd160 */, 119 /* ripemd160WithRSA */, 400 /* role */, 877 /* roleOccupant */, 448 /* room */, 463 /* roomNumber */, 19 /* rsa */, 6 /* rsaEncryption */, 644 /* rsaOAEPEncryptionSET */, 377 /* rsaSignature */, 919 /* rsaesOaep */, 912 /* rsassaPss */, 482 /* sOARecord */, 155 /* safeContentsBag */, 291 /* sbgp-autonomousSysNum */, 290 /* sbgp-ipAddrBlock */, 292 /* sbgp-routerIdentifier */, 159 /* sdsiCertificate */, 859 /* searchGuide */, 704 /* secp112r1 */, 705 /* secp112r2 */, 706 /* secp128r1 */, 707 /* secp128r2 */, 708 /* secp160k1 */, 709 /* secp160r1 */, 710 /* secp160r2 */, 711 /* secp192k1 */, 712 /* secp224k1 */, 713 /* secp224r1 */, 714 /* secp256k1 */, 715 /* secp384r1 */, 716 /* secp521r1 */, 154 /* secretBag */, 474 /* secretary */, 717 /* sect113r1 */, 718 /* sect113r2 */, 719 /* sect131r1 */, 720 /* sect131r2 */, 721 /* sect163k1 */, 722 /* sect163r1 */, 723 /* sect163r2 */, 724 /* sect193r1 */, 725 /* sect193r2 */, 726 /* sect233k1 */, 727 /* sect233r1 */, 728 /* sect239k1 */, 729 /* sect283k1 */, 730 /* sect283r1 */, 731 /* sect409k1 */, 732 /* sect409r1 */, 733 /* sect571k1 */, 734 /* sect571r1 */, 635 /* secure device signature */, 878 /* seeAlso */, 777 /* seed-cbc */, 779 /* seed-cfb */, 776 /* seed-ecb */, 778 /* seed-ofb */, 105 /* serialNumber */, 625 /* set-addPolicy */, 515 /* set-attr */, 518 /* set-brand */, 638 /* set-brand-AmericanExpress */, 637 /* set-brand-Diners */, 636 /* set-brand-IATA-ATA */, 639 /* set-brand-JCB */, 641 /* set-brand-MasterCard */, 642 /* set-brand-Novus */, 640 /* set-brand-Visa */, 516 /* set-policy */, 607 /* set-policy-root */, 624 /* set-rootKeyThumb */, 620 /* setAttr-Cert */, 628 /* setAttr-IssCap-CVM */, 630 /* setAttr-IssCap-Sig */, 629 /* setAttr-IssCap-T2 */, 627 /* setAttr-Token-B0Prime */, 626 /* setAttr-Token-EMV */, 622 /* setAttr-TokenType */, 619 /* setCext-IssuerCapabilities */, 615 /* setCext-PGWYcapabilities */, 616 /* setCext-TokenIdentifier */, 618 /* setCext-TokenType */, 617 /* setCext-Track2Data */, 611 /* setCext-cCertRequired */, 609 /* setCext-certType */, 608 /* setCext-hashedRoot */, 610 /* setCext-merchData */, 613 /* setCext-setExt */, 614 /* setCext-setQualf */, 612 /* setCext-tunneling */, 540 /* setct-AcqCardCodeMsg */, 576 /* setct-AcqCardCodeMsgTBE */, 570 /* setct-AuthReqTBE */, 534 /* setct-AuthReqTBS */, 527 /* setct-AuthResBaggage */, 571 /* setct-AuthResTBE */, 572 /* setct-AuthResTBEX */, 535 /* setct-AuthResTBS */, 536 /* setct-AuthResTBSX */, 528 /* setct-AuthRevReqBaggage */, 577 /* setct-AuthRevReqTBE */, 541 /* setct-AuthRevReqTBS */, 529 /* setct-AuthRevResBaggage */, 542 /* setct-AuthRevResData */, 578 /* setct-AuthRevResTBE */, 579 /* setct-AuthRevResTBEB */, 543 /* setct-AuthRevResTBS */, 573 /* setct-AuthTokenTBE */, 537 /* setct-AuthTokenTBS */, 600 /* setct-BCIDistributionTBS */, 558 /* setct-BatchAdminReqData */, 592 /* setct-BatchAdminReqTBE */, 559 /* setct-BatchAdminResData */, 593 /* setct-BatchAdminResTBE */, 599 /* setct-CRLNotificationResTBS */, 598 /* setct-CRLNotificationTBS */, 580 /* setct-CapReqTBE */, 581 /* setct-CapReqTBEX */, 544 /* setct-CapReqTBS */, 545 /* setct-CapReqTBSX */, 546 /* setct-CapResData */, 582 /* setct-CapResTBE */, 583 /* setct-CapRevReqTBE */, 584 /* setct-CapRevReqTBEX */, 547 /* setct-CapRevReqTBS */, 548 /* setct-CapRevReqTBSX */, 549 /* setct-CapRevResData */, 585 /* setct-CapRevResTBE */, 538 /* setct-CapTokenData */, 530 /* setct-CapTokenSeq */, 574 /* setct-CapTokenTBE */, 575 /* setct-CapTokenTBEX */, 539 /* setct-CapTokenTBS */, 560 /* setct-CardCInitResTBS */, 566 /* setct-CertInqReqTBS */, 563 /* setct-CertReqData */, 595 /* setct-CertReqTBE */, 596 /* setct-CertReqTBEX */, 564 /* setct-CertReqTBS */, 565 /* setct-CertResData */, 597 /* setct-CertResTBE */, 586 /* setct-CredReqTBE */, 587 /* setct-CredReqTBEX */, 550 /* setct-CredReqTBS */, 551 /* setct-CredReqTBSX */, 552 /* setct-CredResData */, 588 /* setct-CredResTBE */, 589 /* setct-CredRevReqTBE */, 590 /* setct-CredRevReqTBEX */, 553 /* setct-CredRevReqTBS */, 554 /* setct-CredRevReqTBSX */, 555 /* setct-CredRevResData */, 591 /* setct-CredRevResTBE */, 567 /* setct-ErrorTBS */, 526 /* setct-HODInput */, 561 /* setct-MeAqCInitResTBS */, 522 /* setct-OIData */, 519 /* setct-PANData */, 521 /* setct-PANOnly */, 520 /* setct-PANToken */, 556 /* setct-PCertReqData */, 557 /* setct-PCertResTBS */, 523 /* setct-PI */, 532 /* setct-PI-TBS */, 524 /* setct-PIData */, 525 /* setct-PIDataUnsigned */, 568 /* setct-PIDualSignedTBE */, 569 /* setct-PIUnsignedTBE */, 531 /* setct-PInitResData */, 533 /* setct-PResData */, 594 /* setct-RegFormReqTBE */, 562 /* setct-RegFormResTBS */, 604 /* setext-pinAny */, 603 /* setext-pinSecure */, 605 /* setext-track2 */, 41 /* sha */, 64 /* sha1 */, 115 /* sha1WithRSA */, 65 /* sha1WithRSAEncryption */, 675 /* sha224 */, 671 /* sha224WithRSAEncryption */, 672 /* sha256 */, 668 /* sha256WithRSAEncryption */, 673 /* sha384 */, 669 /* sha384WithRSAEncryption */, 674 /* sha512 */, 962 /* sha512-256 */, 670 /* sha512WithRSAEncryption */, 42 /* shaWithRSAEncryption */, 52 /* signingTime */, 454 /* simpleSecurityObject */, 496 /* singleLevelQuality */, 16 /* stateOrProvinceName */, 660 /* streetAddress */, 498 /* subtreeMaximumQuality */, 497 /* subtreeMinimumQuality */, 890 /* supportedAlgorithms */, 874 /* supportedApplicationContext */, 100 /* surname */, 864 /* telephoneNumber */, 866 /* teletexTerminalIdentifier */, 865 /* telexNumber */, 459 /* textEncodedORAddress */, 293 /* textNotice */, 106 /* title */, 682 /* tpBasis */, 436 /* ucl */, 888 /* uniqueMember */, 55 /* unstructuredAddress */, 49 /* unstructuredName */, 880 /* userCertificate */, 465 /* userClass */, 458 /* userId */, 879 /* userPassword */, 373 /* valid */, 678 /* wap */, 679 /* wap-wsg */, 735 /* wap-wsg-idm-ecid-wtls1 */, 743 /* wap-wsg-idm-ecid-wtls10 */, 744 /* wap-wsg-idm-ecid-wtls11 */, 745 /* wap-wsg-idm-ecid-wtls12 */, 736 /* wap-wsg-idm-ecid-wtls3 */, 737 /* wap-wsg-idm-ecid-wtls4 */, 738 /* wap-wsg-idm-ecid-wtls5 */, 739 /* wap-wsg-idm-ecid-wtls6 */, 740 /* wap-wsg-idm-ecid-wtls7 */, 741 /* wap-wsg-idm-ecid-wtls8 */, 742 /* wap-wsg-idm-ecid-wtls9 */, 804 /* whirlpool */, 868 /* x121Address */, 503 /* x500UniqueIdentifier */, 158 /* x509Certificate */, 160 /* x509Crl */, 125 /* zlib compression */, }; static const uint16_t kNIDsInOIDOrder[] = { 434 /* 0.9 (OBJ_data) */, 182 /* 1.2 (OBJ_member_body) */, 379 /* 1.3 (OBJ_org) */, 676 /* 1.3 (OBJ_identified_organization) */, 11 /* 2.5 (OBJ_X500) */, 647 /* 2.23 (OBJ_international_organizations) */, 380 /* 1.3.6 (OBJ_dod) */, 12 /* 2.5.4 (OBJ_X509) */, 378 /* 2.5.8 (OBJ_X500algorithms) */, 81 /* 2.5.29 (OBJ_id_ce) */, 512 /* 2.23.42 (OBJ_id_set) */, 678 /* 2.23.43 (OBJ_wap) */, 435 /* 0.9.2342 (OBJ_pss) */, 183 /* 1.2.840 (OBJ_ISO_US) */, 381 /* 1.3.6.1 (OBJ_iana) */, 948 /* 1.3.101.110 (OBJ_X25519) */, 961 /* 1.3.101.111 (OBJ_X448) */, 949 /* 1.3.101.112 (OBJ_ED25519) */, 960 /* 1.3.101.113 (OBJ_ED448) */, 677 /* 1.3.132 (OBJ_certicom_arc) */, 394 /* 2.5.1.5 (OBJ_selected_attribute_types) */, 13 /* 2.5.4.3 (OBJ_commonName) */, 100 /* 2.5.4.4 (OBJ_surname) */, 105 /* 2.5.4.5 (OBJ_serialNumber) */, 14 /* 2.5.4.6 (OBJ_countryName) */, 15 /* 2.5.4.7 (OBJ_localityName) */, 16 /* 2.5.4.8 (OBJ_stateOrProvinceName) */, 660 /* 2.5.4.9 (OBJ_streetAddress) */, 17 /* 2.5.4.10 (OBJ_organizationName) */, 18 /* 2.5.4.11 (OBJ_organizationalUnitName) */, 106 /* 2.5.4.12 (OBJ_title) */, 107 /* 2.5.4.13 (OBJ_description) */, 859 /* 2.5.4.14 (OBJ_searchGuide) */, 860 /* 2.5.4.15 (OBJ_businessCategory) */, 861 /* 2.5.4.16 (OBJ_postalAddress) */, 661 /* 2.5.4.17 (OBJ_postalCode) */, 862 /* 2.5.4.18 (OBJ_postOfficeBox) */, 863 /* 2.5.4.19 (OBJ_physicalDeliveryOfficeName) */, 864 /* 2.5.4.20 (OBJ_telephoneNumber) */, 865 /* 2.5.4.21 (OBJ_telexNumber) */, 866 /* 2.5.4.22 (OBJ_teletexTerminalIdentifier) */, 867 /* 2.5.4.23 (OBJ_facsimileTelephoneNumber) */, 868 /* 2.5.4.24 (OBJ_x121Address) */, 869 /* 2.5.4.25 (OBJ_internationaliSDNNumber) */, 870 /* 2.5.4.26 (OBJ_registeredAddress) */, 871 /* 2.5.4.27 (OBJ_destinationIndicator) */, 872 /* 2.5.4.28 (OBJ_preferredDeliveryMethod) */, 873 /* 2.5.4.29 (OBJ_presentationAddress) */, 874 /* 2.5.4.30 (OBJ_supportedApplicationContext) */, 875 /* 2.5.4.31 (OBJ_member) */, 876 /* 2.5.4.32 (OBJ_owner) */, 877 /* 2.5.4.33 (OBJ_roleOccupant) */, 878 /* 2.5.4.34 (OBJ_seeAlso) */, 879 /* 2.5.4.35 (OBJ_userPassword) */, 880 /* 2.5.4.36 (OBJ_userCertificate) */, 881 /* 2.5.4.37 (OBJ_cACertificate) */, 882 /* 2.5.4.38 (OBJ_authorityRevocationList) */, 883 /* 2.5.4.39 (OBJ_certificateRevocationList) */, 884 /* 2.5.4.40 (OBJ_crossCertificatePair) */, 173 /* 2.5.4.41 (OBJ_name) */, 99 /* 2.5.4.42 (OBJ_givenName) */, 101 /* 2.5.4.43 (OBJ_initials) */, 509 /* 2.5.4.44 (OBJ_generationQualifier) */, 503 /* 2.5.4.45 (OBJ_x500UniqueIdentifier) */, 174 /* 2.5.4.46 (OBJ_dnQualifier) */, 885 /* 2.5.4.47 (OBJ_enhancedSearchGuide) */, 886 /* 2.5.4.48 (OBJ_protocolInformation) */, 887 /* 2.5.4.49 (OBJ_distinguishedName) */, 888 /* 2.5.4.50 (OBJ_uniqueMember) */, 889 /* 2.5.4.51 (OBJ_houseIdentifier) */, 890 /* 2.5.4.52 (OBJ_supportedAlgorithms) */, 891 /* 2.5.4.53 (OBJ_deltaRevocationList) */, 892 /* 2.5.4.54 (OBJ_dmdName) */, 510 /* 2.5.4.65 (OBJ_pseudonym) */, 400 /* 2.5.4.72 (OBJ_role) */, 769 /* 2.5.29.9 (OBJ_subject_directory_attributes) */, 82 /* 2.5.29.14 (OBJ_subject_key_identifier) */, 83 /* 2.5.29.15 (OBJ_key_usage) */, 84 /* 2.5.29.16 (OBJ_private_key_usage_period) */, 85 /* 2.5.29.17 (OBJ_subject_alt_name) */, 86 /* 2.5.29.18 (OBJ_issuer_alt_name) */, 87 /* 2.5.29.19 (OBJ_basic_constraints) */, 88 /* 2.5.29.20 (OBJ_crl_number) */, 141 /* 2.5.29.21 (OBJ_crl_reason) */, 430 /* 2.5.29.23 (OBJ_hold_instruction_code) */, 142 /* 2.5.29.24 (OBJ_invalidity_date) */, 140 /* 2.5.29.27 (OBJ_delta_crl) */, 770 /* 2.5.29.28 (OBJ_issuing_distribution_point) */, 771 /* 2.5.29.29 (OBJ_certificate_issuer) */, 666 /* 2.5.29.30 (OBJ_name_constraints) */, 103 /* 2.5.29.31 (OBJ_crl_distribution_points) */, 89 /* 2.5.29.32 (OBJ_certificate_policies) */, 747 /* 2.5.29.33 (OBJ_policy_mappings) */, 90 /* 2.5.29.35 (OBJ_authority_key_identifier) */, 401 /* 2.5.29.36 (OBJ_policy_constraints) */, 126 /* 2.5.29.37 (OBJ_ext_key_usage) */, 857 /* 2.5.29.46 (OBJ_freshest_crl) */, 748 /* 2.5.29.54 (OBJ_inhibit_any_policy) */, 402 /* 2.5.29.55 (OBJ_target_information) */, 403 /* 2.5.29.56 (OBJ_no_rev_avail) */, 513 /* 2.23.42.0 (OBJ_set_ctype) */, 514 /* 2.23.42.1 (OBJ_set_msgExt) */, 515 /* 2.23.42.3 (OBJ_set_attr) */, 516 /* 2.23.42.5 (OBJ_set_policy) */, 517 /* 2.23.42.7 (OBJ_set_certExt) */, 518 /* 2.23.42.8 (OBJ_set_brand) */, 679 /* 2.23.43.1 (OBJ_wap_wsg) */, 382 /* 1.3.6.1.1 (OBJ_Directory) */, 383 /* 1.3.6.1.2 (OBJ_Management) */, 384 /* 1.3.6.1.3 (OBJ_Experimental) */, 385 /* 1.3.6.1.4 (OBJ_Private) */, 386 /* 1.3.6.1.5 (OBJ_Security) */, 387 /* 1.3.6.1.6 (OBJ_SNMPv2) */, 388 /* 1.3.6.1.7 (OBJ_Mail) */, 376 /* 1.3.14.3.2 (OBJ_algorithm) */, 395 /* 2.5.1.5.55 (OBJ_clearance) */, 19 /* 2.5.8.1.1 (OBJ_rsa) */, 96 /* 2.5.8.3.100 (OBJ_mdc2WithRSA) */, 95 /* 2.5.8.3.101 (OBJ_mdc2) */, 746 /* 2.5.29.32.0 (OBJ_any_policy) */, 910 /* 2.5.29.37.0 (OBJ_anyExtendedKeyUsage) */, 519 /* 2.23.42.0.0 (OBJ_setct_PANData) */, 520 /* 2.23.42.0.1 (OBJ_setct_PANToken) */, 521 /* 2.23.42.0.2 (OBJ_setct_PANOnly) */, 522 /* 2.23.42.0.3 (OBJ_setct_OIData) */, 523 /* 2.23.42.0.4 (OBJ_setct_PI) */, 524 /* 2.23.42.0.5 (OBJ_setct_PIData) */, 525 /* 2.23.42.0.6 (OBJ_setct_PIDataUnsigned) */, 526 /* 2.23.42.0.7 (OBJ_setct_HODInput) */, 527 /* 2.23.42.0.8 (OBJ_setct_AuthResBaggage) */, 528 /* 2.23.42.0.9 (OBJ_setct_AuthRevReqBaggage) */, 529 /* 2.23.42.0.10 (OBJ_setct_AuthRevResBaggage) */, 530 /* 2.23.42.0.11 (OBJ_setct_CapTokenSeq) */, 531 /* 2.23.42.0.12 (OBJ_setct_PInitResData) */, 532 /* 2.23.42.0.13 (OBJ_setct_PI_TBS) */, 533 /* 2.23.42.0.14 (OBJ_setct_PResData) */, 534 /* 2.23.42.0.16 (OBJ_setct_AuthReqTBS) */, 535 /* 2.23.42.0.17 (OBJ_setct_AuthResTBS) */, 536 /* 2.23.42.0.18 (OBJ_setct_AuthResTBSX) */, 537 /* 2.23.42.0.19 (OBJ_setct_AuthTokenTBS) */, 538 /* 2.23.42.0.20 (OBJ_setct_CapTokenData) */, 539 /* 2.23.42.0.21 (OBJ_setct_CapTokenTBS) */, 540 /* 2.23.42.0.22 (OBJ_setct_AcqCardCodeMsg) */, 541 /* 2.23.42.0.23 (OBJ_setct_AuthRevReqTBS) */, 542 /* 2.23.42.0.24 (OBJ_setct_AuthRevResData) */, 543 /* 2.23.42.0.25 (OBJ_setct_AuthRevResTBS) */, 544 /* 2.23.42.0.26 (OBJ_setct_CapReqTBS) */, 545 /* 2.23.42.0.27 (OBJ_setct_CapReqTBSX) */, 546 /* 2.23.42.0.28 (OBJ_setct_CapResData) */, 547 /* 2.23.42.0.29 (OBJ_setct_CapRevReqTBS) */, 548 /* 2.23.42.0.30 (OBJ_setct_CapRevReqTBSX) */, 549 /* 2.23.42.0.31 (OBJ_setct_CapRevResData) */, 550 /* 2.23.42.0.32 (OBJ_setct_CredReqTBS) */, 551 /* 2.23.42.0.33 (OBJ_setct_CredReqTBSX) */, 552 /* 2.23.42.0.34 (OBJ_setct_CredResData) */, 553 /* 2.23.42.0.35 (OBJ_setct_CredRevReqTBS) */, 554 /* 2.23.42.0.36 (OBJ_setct_CredRevReqTBSX) */, 555 /* 2.23.42.0.37 (OBJ_setct_CredRevResData) */, 556 /* 2.23.42.0.38 (OBJ_setct_PCertReqData) */, 557 /* 2.23.42.0.39 (OBJ_setct_PCertResTBS) */, 558 /* 2.23.42.0.40 (OBJ_setct_BatchAdminReqData) */, 559 /* 2.23.42.0.41 (OBJ_setct_BatchAdminResData) */, 560 /* 2.23.42.0.42 (OBJ_setct_CardCInitResTBS) */, 561 /* 2.23.42.0.43 (OBJ_setct_MeAqCInitResTBS) */, 562 /* 2.23.42.0.44 (OBJ_setct_RegFormResTBS) */, 563 /* 2.23.42.0.45 (OBJ_setct_CertReqData) */, 564 /* 2.23.42.0.46 (OBJ_setct_CertReqTBS) */, 565 /* 2.23.42.0.47 (OBJ_setct_CertResData) */, 566 /* 2.23.42.0.48 (OBJ_setct_CertInqReqTBS) */, 567 /* 2.23.42.0.49 (OBJ_setct_ErrorTBS) */, 568 /* 2.23.42.0.50 (OBJ_setct_PIDualSignedTBE) */, 569 /* 2.23.42.0.51 (OBJ_setct_PIUnsignedTBE) */, 570 /* 2.23.42.0.52 (OBJ_setct_AuthReqTBE) */, 571 /* 2.23.42.0.53 (OBJ_setct_AuthResTBE) */, 572 /* 2.23.42.0.54 (OBJ_setct_AuthResTBEX) */, 573 /* 2.23.42.0.55 (OBJ_setct_AuthTokenTBE) */, 574 /* 2.23.42.0.56 (OBJ_setct_CapTokenTBE) */, 575 /* 2.23.42.0.57 (OBJ_setct_CapTokenTBEX) */, 576 /* 2.23.42.0.58 (OBJ_setct_AcqCardCodeMsgTBE) */, 577 /* 2.23.42.0.59 (OBJ_setct_AuthRevReqTBE) */, 578 /* 2.23.42.0.60 (OBJ_setct_AuthRevResTBE) */, 579 /* 2.23.42.0.61 (OBJ_setct_AuthRevResTBEB) */, 580 /* 2.23.42.0.62 (OBJ_setct_CapReqTBE) */, 581 /* 2.23.42.0.63 (OBJ_setct_CapReqTBEX) */, 582 /* 2.23.42.0.64 (OBJ_setct_CapResTBE) */, 583 /* 2.23.42.0.65 (OBJ_setct_CapRevReqTBE) */, 584 /* 2.23.42.0.66 (OBJ_setct_CapRevReqTBEX) */, 585 /* 2.23.42.0.67 (OBJ_setct_CapRevResTBE) */, 586 /* 2.23.42.0.68 (OBJ_setct_CredReqTBE) */, 587 /* 2.23.42.0.69 (OBJ_setct_CredReqTBEX) */, 588 /* 2.23.42.0.70 (OBJ_setct_CredResTBE) */, 589 /* 2.23.42.0.71 (OBJ_setct_CredRevReqTBE) */, 590 /* 2.23.42.0.72 (OBJ_setct_CredRevReqTBEX) */, 591 /* 2.23.42.0.73 (OBJ_setct_CredRevResTBE) */, 592 /* 2.23.42.0.74 (OBJ_setct_BatchAdminReqTBE) */, 593 /* 2.23.42.0.75 (OBJ_setct_BatchAdminResTBE) */, 594 /* 2.23.42.0.76 (OBJ_setct_RegFormReqTBE) */, 595 /* 2.23.42.0.77 (OBJ_setct_CertReqTBE) */, 596 /* 2.23.42.0.78 (OBJ_setct_CertReqTBEX) */, 597 /* 2.23.42.0.79 (OBJ_setct_CertResTBE) */, 598 /* 2.23.42.0.80 (OBJ_setct_CRLNotificationTBS) */, 599 /* 2.23.42.0.81 (OBJ_setct_CRLNotificationResTBS) */, 600 /* 2.23.42.0.82 (OBJ_setct_BCIDistributionTBS) */, 601 /* 2.23.42.1.1 (OBJ_setext_genCrypt) */, 602 /* 2.23.42.1.3 (OBJ_setext_miAuth) */, 603 /* 2.23.42.1.4 (OBJ_setext_pinSecure) */, 604 /* 2.23.42.1.5 (OBJ_setext_pinAny) */, 605 /* 2.23.42.1.7 (OBJ_setext_track2) */, 606 /* 2.23.42.1.8 (OBJ_setext_cv) */, 620 /* 2.23.42.3.0 (OBJ_setAttr_Cert) */, 621 /* 2.23.42.3.1 (OBJ_setAttr_PGWYcap) */, 622 /* 2.23.42.3.2 (OBJ_setAttr_TokenType) */, 623 /* 2.23.42.3.3 (OBJ_setAttr_IssCap) */, 607 /* 2.23.42.5.0 (OBJ_set_policy_root) */, 608 /* 2.23.42.7.0 (OBJ_setCext_hashedRoot) */, 609 /* 2.23.42.7.1 (OBJ_setCext_certType) */, 610 /* 2.23.42.7.2 (OBJ_setCext_merchData) */, 611 /* 2.23.42.7.3 (OBJ_setCext_cCertRequired) */, 612 /* 2.23.42.7.4 (OBJ_setCext_tunneling) */, 613 /* 2.23.42.7.5 (OBJ_setCext_setExt) */, 614 /* 2.23.42.7.6 (OBJ_setCext_setQualf) */, 615 /* 2.23.42.7.7 (OBJ_setCext_PGWYcapabilities) */, 616 /* 2.23.42.7.8 (OBJ_setCext_TokenIdentifier) */, 617 /* 2.23.42.7.9 (OBJ_setCext_Track2Data) */, 618 /* 2.23.42.7.10 (OBJ_setCext_TokenType) */, 619 /* 2.23.42.7.11 (OBJ_setCext_IssuerCapabilities) */, 636 /* 2.23.42.8.1 (OBJ_set_brand_IATA_ATA) */, 640 /* 2.23.42.8.4 (OBJ_set_brand_Visa) */, 641 /* 2.23.42.8.5 (OBJ_set_brand_MasterCard) */, 637 /* 2.23.42.8.30 (OBJ_set_brand_Diners) */, 638 /* 2.23.42.8.34 (OBJ_set_brand_AmericanExpress) */, 639 /* 2.23.42.8.35 (OBJ_set_brand_JCB) */, 805 /* 1.2.643.2.2 (OBJ_cryptopro) */, 806 /* 1.2.643.2.9 (OBJ_cryptocom) */, 184 /* 1.2.840.10040 (OBJ_X9_57) */, 405 /* 1.2.840.10045 (OBJ_ansi_X9_62) */, 389 /* 1.3.6.1.4.1 (OBJ_Enterprises) */, 504 /* 1.3.6.1.7.1 (OBJ_mime_mhs) */, 104 /* 1.3.14.3.2.3 (OBJ_md5WithRSA) */, 29 /* 1.3.14.3.2.6 (OBJ_des_ecb) */, 31 /* 1.3.14.3.2.7 (OBJ_des_cbc) */, 45 /* 1.3.14.3.2.8 (OBJ_des_ofb64) */, 30 /* 1.3.14.3.2.9 (OBJ_des_cfb64) */, 377 /* 1.3.14.3.2.11 (OBJ_rsaSignature) */, 67 /* 1.3.14.3.2.12 (OBJ_dsa_2) */, 66 /* 1.3.14.3.2.13 (OBJ_dsaWithSHA) */, 42 /* 1.3.14.3.2.15 (OBJ_shaWithRSAEncryption) */, 32 /* 1.3.14.3.2.17 (OBJ_des_ede_ecb) */, 41 /* 1.3.14.3.2.18 (OBJ_sha) */, 64 /* 1.3.14.3.2.26 (OBJ_sha1) */, 70 /* 1.3.14.3.2.27 (OBJ_dsaWithSHA1_2) */, 115 /* 1.3.14.3.2.29 (OBJ_sha1WithRSA) */, 117 /* 1.3.36.3.2.1 (OBJ_ripemd160) */, 143 /* 1.3.101.1.4.1 (OBJ_sxnet) */, 721 /* 1.3.132.0.1 (OBJ_sect163k1) */, 722 /* 1.3.132.0.2 (OBJ_sect163r1) */, 728 /* 1.3.132.0.3 (OBJ_sect239k1) */, 717 /* 1.3.132.0.4 (OBJ_sect113r1) */, 718 /* 1.3.132.0.5 (OBJ_sect113r2) */, 704 /* 1.3.132.0.6 (OBJ_secp112r1) */, 705 /* 1.3.132.0.7 (OBJ_secp112r2) */, 709 /* 1.3.132.0.8 (OBJ_secp160r1) */, 708 /* 1.3.132.0.9 (OBJ_secp160k1) */, 714 /* 1.3.132.0.10 (OBJ_secp256k1) */, 723 /* 1.3.132.0.15 (OBJ_sect163r2) */, 729 /* 1.3.132.0.16 (OBJ_sect283k1) */, 730 /* 1.3.132.0.17 (OBJ_sect283r1) */, 719 /* 1.3.132.0.22 (OBJ_sect131r1) */, 720 /* 1.3.132.0.23 (OBJ_sect131r2) */, 724 /* 1.3.132.0.24 (OBJ_sect193r1) */, 725 /* 1.3.132.0.25 (OBJ_sect193r2) */, 726 /* 1.3.132.0.26 (OBJ_sect233k1) */, 727 /* 1.3.132.0.27 (OBJ_sect233r1) */, 706 /* 1.3.132.0.28 (OBJ_secp128r1) */, 707 /* 1.3.132.0.29 (OBJ_secp128r2) */, 710 /* 1.3.132.0.30 (OBJ_secp160r2) */, 711 /* 1.3.132.0.31 (OBJ_secp192k1) */, 712 /* 1.3.132.0.32 (OBJ_secp224k1) */, 713 /* 1.3.132.0.33 (OBJ_secp224r1) */, 715 /* 1.3.132.0.34 (OBJ_secp384r1) */, 716 /* 1.3.132.0.35 (OBJ_secp521r1) */, 731 /* 1.3.132.0.36 (OBJ_sect409k1) */, 732 /* 1.3.132.0.37 (OBJ_sect409r1) */, 733 /* 1.3.132.0.38 (OBJ_sect571k1) */, 734 /* 1.3.132.0.39 (OBJ_sect571r1) */, 624 /* 2.23.42.3.0.0 (OBJ_set_rootKeyThumb) */, 625 /* 2.23.42.3.0.1 (OBJ_set_addPolicy) */, 626 /* 2.23.42.3.2.1 (OBJ_setAttr_Token_EMV) */, 627 /* 2.23.42.3.2.2 (OBJ_setAttr_Token_B0Prime) */, 628 /* 2.23.42.3.3.3 (OBJ_setAttr_IssCap_CVM) */, 629 /* 2.23.42.3.3.4 (OBJ_setAttr_IssCap_T2) */, 630 /* 2.23.42.3.3.5 (OBJ_setAttr_IssCap_Sig) */, 642 /* 2.23.42.8.6011 (OBJ_set_brand_Novus) */, 735 /* 2.23.43.1.4.1 (OBJ_wap_wsg_idm_ecid_wtls1) */, 736 /* 2.23.43.1.4.3 (OBJ_wap_wsg_idm_ecid_wtls3) */, 737 /* 2.23.43.1.4.4 (OBJ_wap_wsg_idm_ecid_wtls4) */, 738 /* 2.23.43.1.4.5 (OBJ_wap_wsg_idm_ecid_wtls5) */, 739 /* 2.23.43.1.4.6 (OBJ_wap_wsg_idm_ecid_wtls6) */, 740 /* 2.23.43.1.4.7 (OBJ_wap_wsg_idm_ecid_wtls7) */, 741 /* 2.23.43.1.4.8 (OBJ_wap_wsg_idm_ecid_wtls8) */, 742 /* 2.23.43.1.4.9 (OBJ_wap_wsg_idm_ecid_wtls9) */, 743 /* 2.23.43.1.4.10 (OBJ_wap_wsg_idm_ecid_wtls10) */, 744 /* 2.23.43.1.4.11 (OBJ_wap_wsg_idm_ecid_wtls11) */, 745 /* 2.23.43.1.4.12 (OBJ_wap_wsg_idm_ecid_wtls12) */, 804 /* 1.0.10118.3.0.55 (OBJ_whirlpool) */, 773 /* 1.2.410.200004 (OBJ_kisa) */, 807 /* 1.2.643.2.2.3 (OBJ_id_GostR3411_94_with_GostR3410_2001) */, 808 /* 1.2.643.2.2.4 (OBJ_id_GostR3411_94_with_GostR3410_94) */, 809 /* 1.2.643.2.2.9 (OBJ_id_GostR3411_94) */, 810 /* 1.2.643.2.2.10 (OBJ_id_HMACGostR3411_94) */, 811 /* 1.2.643.2.2.19 (OBJ_id_GostR3410_2001) */, 812 /* 1.2.643.2.2.20 (OBJ_id_GostR3410_94) */, 813 /* 1.2.643.2.2.21 (OBJ_id_Gost28147_89) */, 815 /* 1.2.643.2.2.22 (OBJ_id_Gost28147_89_MAC) */, 816 /* 1.2.643.2.2.23 (OBJ_id_GostR3411_94_prf) */, 817 /* 1.2.643.2.2.98 (OBJ_id_GostR3410_2001DH) */, 818 /* 1.2.643.2.2.99 (OBJ_id_GostR3410_94DH) */, 1 /* 1.2.840.113549 (OBJ_rsadsi) */, 185 /* 1.2.840.10040.4 (OBJ_X9cm) */, 127 /* 1.3.6.1.5.5.7 (OBJ_id_pkix) */, 505 /* 1.3.6.1.7.1.1 (OBJ_mime_mhs_headings) */, 506 /* 1.3.6.1.7.1.2 (OBJ_mime_mhs_bodies) */, 119 /* 1.3.36.3.3.1.2 (OBJ_ripemd160WithRSA) */, 937 /* 1.3.132.1.11.0 (OBJ_dhSinglePass_stdDH_sha224kdf_scheme) */, 938 /* 1.3.132.1.11.1 (OBJ_dhSinglePass_stdDH_sha256kdf_scheme) */, 939 /* 1.3.132.1.11.2 (OBJ_dhSinglePass_stdDH_sha384kdf_scheme) */, 940 /* 1.3.132.1.11.3 (OBJ_dhSinglePass_stdDH_sha512kdf_scheme) */, 942 /* 1.3.132.1.14.0 (OBJ_dhSinglePass_cofactorDH_sha224kdf_scheme) */, 943 /* 1.3.132.1.14.1 (OBJ_dhSinglePass_cofactorDH_sha256kdf_scheme) */, 944 /* 1.3.132.1.14.2 (OBJ_dhSinglePass_cofactorDH_sha384kdf_scheme) */, 945 /* 1.3.132.1.14.3 (OBJ_dhSinglePass_cofactorDH_sha512kdf_scheme) */, 631 /* 2.23.42.3.3.3.1 (OBJ_setAttr_GenCryptgrm) */, 632 /* 2.23.42.3.3.4.1 (OBJ_setAttr_T2Enc) */, 633 /* 2.23.42.3.3.4.2 (OBJ_setAttr_T2cleartxt) */, 634 /* 2.23.42.3.3.5.1 (OBJ_setAttr_TokICCsig) */, 635 /* 2.23.42.3.3.5.2 (OBJ_setAttr_SecDevSig) */, 436 /* 0.9.2342.19200300 (OBJ_ucl) */, 820 /* 1.2.643.2.2.14.0 (OBJ_id_Gost28147_89_None_KeyMeshing) */, 819 /* 1.2.643.2.2.14.1 (OBJ_id_Gost28147_89_CryptoPro_KeyMeshing) */, 845 /* 1.2.643.2.2.20.1 (OBJ_id_GostR3410_94_a) */, 846 /* 1.2.643.2.2.20.2 (OBJ_id_GostR3410_94_aBis) */, 847 /* 1.2.643.2.2.20.3 (OBJ_id_GostR3410_94_b) */, 848 /* 1.2.643.2.2.20.4 (OBJ_id_GostR3410_94_bBis) */, 821 /* 1.2.643.2.2.30.0 (OBJ_id_GostR3411_94_TestParamSet) */, 822 /* 1.2.643.2.2.30.1 (OBJ_id_GostR3411_94_CryptoProParamSet) */, 823 /* 1.2.643.2.2.31.0 (OBJ_id_Gost28147_89_TestParamSet) */, 824 /* 1.2.643.2.2.31.1 (OBJ_id_Gost28147_89_CryptoPro_A_ParamSet) */, 825 /* 1.2.643.2.2.31.2 (OBJ_id_Gost28147_89_CryptoPro_B_ParamSet) */, 826 /* 1.2.643.2.2.31.3 (OBJ_id_Gost28147_89_CryptoPro_C_ParamSet) */, 827 /* 1.2.643.2.2.31.4 (OBJ_id_Gost28147_89_CryptoPro_D_ParamSet) */, 828 /* 1.2.643.2.2.31.5 (OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet) */ , 829 /* 1.2.643.2.2.31.6 (OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet) */ , 830 /* 1.2.643.2.2.31.7 (OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet) */, 831 /* 1.2.643.2.2.32.0 (OBJ_id_GostR3410_94_TestParamSet) */, 832 /* 1.2.643.2.2.32.2 (OBJ_id_GostR3410_94_CryptoPro_A_ParamSet) */, 833 /* 1.2.643.2.2.32.3 (OBJ_id_GostR3410_94_CryptoPro_B_ParamSet) */, 834 /* 1.2.643.2.2.32.4 (OBJ_id_GostR3410_94_CryptoPro_C_ParamSet) */, 835 /* 1.2.643.2.2.32.5 (OBJ_id_GostR3410_94_CryptoPro_D_ParamSet) */, 836 /* 1.2.643.2.2.33.1 (OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet) */, 837 /* 1.2.643.2.2.33.2 (OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet) */, 838 /* 1.2.643.2.2.33.3 (OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet) */, 839 /* 1.2.643.2.2.35.0 (OBJ_id_GostR3410_2001_TestParamSet) */, 840 /* 1.2.643.2.2.35.1 (OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet) */, 841 /* 1.2.643.2.2.35.2 (OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet) */, 842 /* 1.2.643.2.2.35.3 (OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet) */, 843 /* 1.2.643.2.2.36.0 (OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet) */, 844 /* 1.2.643.2.2.36.1 (OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet) */, 2 /* 1.2.840.113549.1 (OBJ_pkcs) */, 431 /* 1.2.840.10040.2.1 (OBJ_hold_instruction_none) */, 432 /* 1.2.840.10040.2.2 (OBJ_hold_instruction_call_issuer) */, 433 /* 1.2.840.10040.2.3 (OBJ_hold_instruction_reject) */, 116 /* 1.2.840.10040.4.1 (OBJ_dsa) */, 113 /* 1.2.840.10040.4.3 (OBJ_dsaWithSHA1) */, 406 /* 1.2.840.10045.1.1 (OBJ_X9_62_prime_field) */, 407 /* 1.2.840.10045.1.2 (OBJ_X9_62_characteristic_two_field) */, 408 /* 1.2.840.10045.2.1 (OBJ_X9_62_id_ecPublicKey) */, 416 /* 1.2.840.10045.4.1 (OBJ_ecdsa_with_SHA1) */, 791 /* 1.2.840.10045.4.2 (OBJ_ecdsa_with_Recommended) */, 792 /* 1.2.840.10045.4.3 (OBJ_ecdsa_with_Specified) */, 920 /* 1.2.840.10046.2.1 (OBJ_dhpublicnumber) */, 258 /* 1.3.6.1.5.5.7.0 (OBJ_id_pkix_mod) */, 175 /* 1.3.6.1.5.5.7.1 (OBJ_id_pe) */, 259 /* 1.3.6.1.5.5.7.2 (OBJ_id_qt) */, 128 /* 1.3.6.1.5.5.7.3 (OBJ_id_kp) */, 260 /* 1.3.6.1.5.5.7.4 (OBJ_id_it) */, 261 /* 1.3.6.1.5.5.7.5 (OBJ_id_pkip) */, 262 /* 1.3.6.1.5.5.7.6 (OBJ_id_alg) */, 263 /* 1.3.6.1.5.5.7.7 (OBJ_id_cmc) */, 264 /* 1.3.6.1.5.5.7.8 (OBJ_id_on) */, 265 /* 1.3.6.1.5.5.7.9 (OBJ_id_pda) */, 266 /* 1.3.6.1.5.5.7.10 (OBJ_id_aca) */, 267 /* 1.3.6.1.5.5.7.11 (OBJ_id_qcs) */, 268 /* 1.3.6.1.5.5.7.12 (OBJ_id_cct) */, 662 /* 1.3.6.1.5.5.7.21 (OBJ_id_ppl) */, 176 /* 1.3.6.1.5.5.7.48 (OBJ_id_ad) */, 507 /* 1.3.6.1.7.1.1.1 (OBJ_id_hex_partial_message) */, 508 /* 1.3.6.1.7.1.1.2 (OBJ_id_hex_multipart_message) */, 57 /* 2.16.840.1.113730 (OBJ_netscape) */, 754 /* 0.3.4401.5.3.1.9.1 (OBJ_camellia_128_ecb) */, 766 /* 0.3.4401.5.3.1.9.3 (OBJ_camellia_128_ofb128) */, 757 /* 0.3.4401.5.3.1.9.4 (OBJ_camellia_128_cfb128) */, 755 /* 0.3.4401.5.3.1.9.21 (OBJ_camellia_192_ecb) */, 767 /* 0.3.4401.5.3.1.9.23 (OBJ_camellia_192_ofb128) */, 758 /* 0.3.4401.5.3.1.9.24 (OBJ_camellia_192_cfb128) */, 756 /* 0.3.4401.5.3.1.9.41 (OBJ_camellia_256_ecb) */, 768 /* 0.3.4401.5.3.1.9.43 (OBJ_camellia_256_ofb128) */, 759 /* 0.3.4401.5.3.1.9.44 (OBJ_camellia_256_cfb128) */, 437 /* 0.9.2342.19200300.100 (OBJ_pilot) */, 776 /* 1.2.410.200004.1.3 (OBJ_seed_ecb) */, 777 /* 1.2.410.200004.1.4 (OBJ_seed_cbc) */, 779 /* 1.2.410.200004.1.5 (OBJ_seed_cfb128) */, 778 /* 1.2.410.200004.1.6 (OBJ_seed_ofb128) */, 852 /* 1.2.643.2.9.1.3.3 (OBJ_id_GostR3411_94_with_GostR3410_94_cc) */, 853 /* 1.2.643.2.9.1.3.4 (OBJ_id_GostR3411_94_with_GostR3410_2001_cc) */, 850 /* 1.2.643.2.9.1.5.3 (OBJ_id_GostR3410_94_cc) */, 851 /* 1.2.643.2.9.1.5.4 (OBJ_id_GostR3410_2001_cc) */, 849 /* 1.2.643.2.9.1.6.1 (OBJ_id_Gost28147_89_cc) */, 854 /* 1.2.643.2.9.1.8.1 (OBJ_id_GostR3410_2001_ParamSet_cc) */, 186 /* 1.2.840.113549.1.1 (OBJ_pkcs1) */, 27 /* 1.2.840.113549.1.3 (OBJ_pkcs3) */, 187 /* 1.2.840.113549.1.5 (OBJ_pkcs5) */, 20 /* 1.2.840.113549.1.7 (OBJ_pkcs7) */, 47 /* 1.2.840.113549.1.9 (OBJ_pkcs9) */, 3 /* 1.2.840.113549.2.2 (OBJ_md2) */, 257 /* 1.2.840.113549.2.4 (OBJ_md4) */, 4 /* 1.2.840.113549.2.5 (OBJ_md5) */, 797 /* 1.2.840.113549.2.6 (OBJ_hmacWithMD5) */, 163 /* 1.2.840.113549.2.7 (OBJ_hmacWithSHA1) */, 798 /* 1.2.840.113549.2.8 (OBJ_hmacWithSHA224) */, 799 /* 1.2.840.113549.2.9 (OBJ_hmacWithSHA256) */, 800 /* 1.2.840.113549.2.10 (OBJ_hmacWithSHA384) */, 801 /* 1.2.840.113549.2.11 (OBJ_hmacWithSHA512) */, 37 /* 1.2.840.113549.3.2 (OBJ_rc2_cbc) */, 5 /* 1.2.840.113549.3.4 (OBJ_rc4) */, 44 /* 1.2.840.113549.3.7 (OBJ_des_ede3_cbc) */, 120 /* 1.2.840.113549.3.8 (OBJ_rc5_cbc) */, 643 /* 1.2.840.113549.3.10 (OBJ_des_cdmf) */, 680 /* 1.2.840.10045.1.2.3 (OBJ_X9_62_id_characteristic_two_basis) */, 684 /* 1.2.840.10045.3.0.1 (OBJ_X9_62_c2pnb163v1) */, 685 /* 1.2.840.10045.3.0.2 (OBJ_X9_62_c2pnb163v2) */, 686 /* 1.2.840.10045.3.0.3 (OBJ_X9_62_c2pnb163v3) */, 687 /* 1.2.840.10045.3.0.4 (OBJ_X9_62_c2pnb176v1) */, 688 /* 1.2.840.10045.3.0.5 (OBJ_X9_62_c2tnb191v1) */, 689 /* 1.2.840.10045.3.0.6 (OBJ_X9_62_c2tnb191v2) */, 690 /* 1.2.840.10045.3.0.7 (OBJ_X9_62_c2tnb191v3) */, 691 /* 1.2.840.10045.3.0.8 (OBJ_X9_62_c2onb191v4) */, 692 /* 1.2.840.10045.3.0.9 (OBJ_X9_62_c2onb191v5) */, 693 /* 1.2.840.10045.3.0.10 (OBJ_X9_62_c2pnb208w1) */, 694 /* 1.2.840.10045.3.0.11 (OBJ_X9_62_c2tnb239v1) */, 695 /* 1.2.840.10045.3.0.12 (OBJ_X9_62_c2tnb239v2) */, 696 /* 1.2.840.10045.3.0.13 (OBJ_X9_62_c2tnb239v3) */, 697 /* 1.2.840.10045.3.0.14 (OBJ_X9_62_c2onb239v4) */, 698 /* 1.2.840.10045.3.0.15 (OBJ_X9_62_c2onb239v5) */, 699 /* 1.2.840.10045.3.0.16 (OBJ_X9_62_c2pnb272w1) */, 700 /* 1.2.840.10045.3.0.17 (OBJ_X9_62_c2pnb304w1) */, 701 /* 1.2.840.10045.3.0.18 (OBJ_X9_62_c2tnb359v1) */, 702 /* 1.2.840.10045.3.0.19 (OBJ_X9_62_c2pnb368w1) */, 703 /* 1.2.840.10045.3.0.20 (OBJ_X9_62_c2tnb431r1) */, 409 /* 1.2.840.10045.3.1.1 (OBJ_X9_62_prime192v1) */, 410 /* 1.2.840.10045.3.1.2 (OBJ_X9_62_prime192v2) */, 411 /* 1.2.840.10045.3.1.3 (OBJ_X9_62_prime192v3) */, 412 /* 1.2.840.10045.3.1.4 (OBJ_X9_62_prime239v1) */, 413 /* 1.2.840.10045.3.1.5 (OBJ_X9_62_prime239v2) */, 414 /* 1.2.840.10045.3.1.6 (OBJ_X9_62_prime239v3) */, 415 /* 1.2.840.10045.3.1.7 (OBJ_X9_62_prime256v1) */, 793 /* 1.2.840.10045.4.3.1 (OBJ_ecdsa_with_SHA224) */, 794 /* 1.2.840.10045.4.3.2 (OBJ_ecdsa_with_SHA256) */, 795 /* 1.2.840.10045.4.3.3 (OBJ_ecdsa_with_SHA384) */, 796 /* 1.2.840.10045.4.3.4 (OBJ_ecdsa_with_SHA512) */, 269 /* 1.3.6.1.5.5.7.0.1 (OBJ_id_pkix1_explicit_88) */, 270 /* 1.3.6.1.5.5.7.0.2 (OBJ_id_pkix1_implicit_88) */, 271 /* 1.3.6.1.5.5.7.0.3 (OBJ_id_pkix1_explicit_93) */, 272 /* 1.3.6.1.5.5.7.0.4 (OBJ_id_pkix1_implicit_93) */, 273 /* 1.3.6.1.5.5.7.0.5 (OBJ_id_mod_crmf) */, 274 /* 1.3.6.1.5.5.7.0.6 (OBJ_id_mod_cmc) */, 275 /* 1.3.6.1.5.5.7.0.7 (OBJ_id_mod_kea_profile_88) */, 276 /* 1.3.6.1.5.5.7.0.8 (OBJ_id_mod_kea_profile_93) */, 277 /* 1.3.6.1.5.5.7.0.9 (OBJ_id_mod_cmp) */, 278 /* 1.3.6.1.5.5.7.0.10 (OBJ_id_mod_qualified_cert_88) */, 279 /* 1.3.6.1.5.5.7.0.11 (OBJ_id_mod_qualified_cert_93) */, 280 /* 1.3.6.1.5.5.7.0.12 (OBJ_id_mod_attribute_cert) */, 281 /* 1.3.6.1.5.5.7.0.13 (OBJ_id_mod_timestamp_protocol) */, 282 /* 1.3.6.1.5.5.7.0.14 (OBJ_id_mod_ocsp) */, 283 /* 1.3.6.1.5.5.7.0.15 (OBJ_id_mod_dvcs) */, 284 /* 1.3.6.1.5.5.7.0.16 (OBJ_id_mod_cmp2000) */, 177 /* 1.3.6.1.5.5.7.1.1 (OBJ_info_access) */, 285 /* 1.3.6.1.5.5.7.1.2 (OBJ_biometricInfo) */, 286 /* 1.3.6.1.5.5.7.1.3 (OBJ_qcStatements) */, 287 /* 1.3.6.1.5.5.7.1.4 (OBJ_ac_auditEntity) */, 288 /* 1.3.6.1.5.5.7.1.5 (OBJ_ac_targeting) */, 289 /* 1.3.6.1.5.5.7.1.6 (OBJ_aaControls) */, 290 /* 1.3.6.1.5.5.7.1.7 (OBJ_sbgp_ipAddrBlock) */, 291 /* 1.3.6.1.5.5.7.1.8 (OBJ_sbgp_autonomousSysNum) */, 292 /* 1.3.6.1.5.5.7.1.9 (OBJ_sbgp_routerIdentifier) */, 397 /* 1.3.6.1.5.5.7.1.10 (OBJ_ac_proxying) */, 398 /* 1.3.6.1.5.5.7.1.11 (OBJ_sinfo_access) */, 663 /* 1.3.6.1.5.5.7.1.14 (OBJ_proxyCertInfo) */, 164 /* 1.3.6.1.5.5.7.2.1 (OBJ_id_qt_cps) */, 165 /* 1.3.6.1.5.5.7.2.2 (OBJ_id_qt_unotice) */, 293 /* 1.3.6.1.5.5.7.2.3 (OBJ_textNotice) */, 129 /* 1.3.6.1.5.5.7.3.1 (OBJ_server_auth) */, 130 /* 1.3.6.1.5.5.7.3.2 (OBJ_client_auth) */, 131 /* 1.3.6.1.5.5.7.3.3 (OBJ_code_sign) */, 132 /* 1.3.6.1.5.5.7.3.4 (OBJ_email_protect) */, 294 /* 1.3.6.1.5.5.7.3.5 (OBJ_ipsecEndSystem) */, 295 /* 1.3.6.1.5.5.7.3.6 (OBJ_ipsecTunnel) */, 296 /* 1.3.6.1.5.5.7.3.7 (OBJ_ipsecUser) */, 133 /* 1.3.6.1.5.5.7.3.8 (OBJ_time_stamp) */, 180 /* 1.3.6.1.5.5.7.3.9 (OBJ_OCSP_sign) */, 297 /* 1.3.6.1.5.5.7.3.10 (OBJ_dvcs) */, 298 /* 1.3.6.1.5.5.7.4.1 (OBJ_id_it_caProtEncCert) */, 299 /* 1.3.6.1.5.5.7.4.2 (OBJ_id_it_signKeyPairTypes) */, 300 /* 1.3.6.1.5.5.7.4.3 (OBJ_id_it_encKeyPairTypes) */, 301 /* 1.3.6.1.5.5.7.4.4 (OBJ_id_it_preferredSymmAlg) */, 302 /* 1.3.6.1.5.5.7.4.5 (OBJ_id_it_caKeyUpdateInfo) */, 303 /* 1.3.6.1.5.5.7.4.6 (OBJ_id_it_currentCRL) */, 304 /* 1.3.6.1.5.5.7.4.7 (OBJ_id_it_unsupportedOIDs) */, 305 /* 1.3.6.1.5.5.7.4.8 (OBJ_id_it_subscriptionRequest) */, 306 /* 1.3.6.1.5.5.7.4.9 (OBJ_id_it_subscriptionResponse) */, 307 /* 1.3.6.1.5.5.7.4.10 (OBJ_id_it_keyPairParamReq) */, 308 /* 1.3.6.1.5.5.7.4.11 (OBJ_id_it_keyPairParamRep) */, 309 /* 1.3.6.1.5.5.7.4.12 (OBJ_id_it_revPassphrase) */, 310 /* 1.3.6.1.5.5.7.4.13 (OBJ_id_it_implicitConfirm) */, 311 /* 1.3.6.1.5.5.7.4.14 (OBJ_id_it_confirmWaitTime) */, 312 /* 1.3.6.1.5.5.7.4.15 (OBJ_id_it_origPKIMessage) */, 784 /* 1.3.6.1.5.5.7.4.16 (OBJ_id_it_suppLangTags) */, 313 /* 1.3.6.1.5.5.7.5.1 (OBJ_id_regCtrl) */, 314 /* 1.3.6.1.5.5.7.5.2 (OBJ_id_regInfo) */, 323 /* 1.3.6.1.5.5.7.6.1 (OBJ_id_alg_des40) */, 324 /* 1.3.6.1.5.5.7.6.2 (OBJ_id_alg_noSignature) */, 325 /* 1.3.6.1.5.5.7.6.3 (OBJ_id_alg_dh_sig_hmac_sha1) */, 326 /* 1.3.6.1.5.5.7.6.4 (OBJ_id_alg_dh_pop) */, 327 /* 1.3.6.1.5.5.7.7.1 (OBJ_id_cmc_statusInfo) */, 328 /* 1.3.6.1.5.5.7.7.2 (OBJ_id_cmc_identification) */, 329 /* 1.3.6.1.5.5.7.7.3 (OBJ_id_cmc_identityProof) */, 330 /* 1.3.6.1.5.5.7.7.4 (OBJ_id_cmc_dataReturn) */, 331 /* 1.3.6.1.5.5.7.7.5 (OBJ_id_cmc_transactionId) */, 332 /* 1.3.6.1.5.5.7.7.6 (OBJ_id_cmc_senderNonce) */, 333 /* 1.3.6.1.5.5.7.7.7 (OBJ_id_cmc_recipientNonce) */, 334 /* 1.3.6.1.5.5.7.7.8 (OBJ_id_cmc_addExtensions) */, 335 /* 1.3.6.1.5.5.7.7.9 (OBJ_id_cmc_encryptedPOP) */, 336 /* 1.3.6.1.5.5.7.7.10 (OBJ_id_cmc_decryptedPOP) */, 337 /* 1.3.6.1.5.5.7.7.11 (OBJ_id_cmc_lraPOPWitness) */, 338 /* 1.3.6.1.5.5.7.7.15 (OBJ_id_cmc_getCert) */, 339 /* 1.3.6.1.5.5.7.7.16 (OBJ_id_cmc_getCRL) */, 340 /* 1.3.6.1.5.5.7.7.17 (OBJ_id_cmc_revokeRequest) */, 341 /* 1.3.6.1.5.5.7.7.18 (OBJ_id_cmc_regInfo) */, 342 /* 1.3.6.1.5.5.7.7.19 (OBJ_id_cmc_responseInfo) */, 343 /* 1.3.6.1.5.5.7.7.21 (OBJ_id_cmc_queryPending) */, 344 /* 1.3.6.1.5.5.7.7.22 (OBJ_id_cmc_popLinkRandom) */, 345 /* 1.3.6.1.5.5.7.7.23 (OBJ_id_cmc_popLinkWitness) */, 346 /* 1.3.6.1.5.5.7.7.24 (OBJ_id_cmc_confirmCertAcceptance) */, 347 /* 1.3.6.1.5.5.7.8.1 (OBJ_id_on_personalData) */, 858 /* 1.3.6.1.5.5.7.8.3 (OBJ_id_on_permanentIdentifier) */, 348 /* 1.3.6.1.5.5.7.9.1 (OBJ_id_pda_dateOfBirth) */, 349 /* 1.3.6.1.5.5.7.9.2 (OBJ_id_pda_placeOfBirth) */, 351 /* 1.3.6.1.5.5.7.9.3 (OBJ_id_pda_gender) */, 352 /* 1.3.6.1.5.5.7.9.4 (OBJ_id_pda_countryOfCitizenship) */, 353 /* 1.3.6.1.5.5.7.9.5 (OBJ_id_pda_countryOfResidence) */, 354 /* 1.3.6.1.5.5.7.10.1 (OBJ_id_aca_authenticationInfo) */, 355 /* 1.3.6.1.5.5.7.10.2 (OBJ_id_aca_accessIdentity) */, 356 /* 1.3.6.1.5.5.7.10.3 (OBJ_id_aca_chargingIdentity) */, 357 /* 1.3.6.1.5.5.7.10.4 (OBJ_id_aca_group) */, 358 /* 1.3.6.1.5.5.7.10.5 (OBJ_id_aca_role) */, 399 /* 1.3.6.1.5.5.7.10.6 (OBJ_id_aca_encAttrs) */, 359 /* 1.3.6.1.5.5.7.11.1 (OBJ_id_qcs_pkixQCSyntax_v1) */, 360 /* 1.3.6.1.5.5.7.12.1 (OBJ_id_cct_crs) */, 361 /* 1.3.6.1.5.5.7.12.2 (OBJ_id_cct_PKIData) */, 362 /* 1.3.6.1.5.5.7.12.3 (OBJ_id_cct_PKIResponse) */, 664 /* 1.3.6.1.5.5.7.21.0 (OBJ_id_ppl_anyLanguage) */, 665 /* 1.3.6.1.5.5.7.21.1 (OBJ_id_ppl_inheritAll) */, 667 /* 1.3.6.1.5.5.7.21.2 (OBJ_Independent) */, 178 /* 1.3.6.1.5.5.7.48.1 (OBJ_ad_OCSP) */, 179 /* 1.3.6.1.5.5.7.48.2 (OBJ_ad_ca_issuers) */, 363 /* 1.3.6.1.5.5.7.48.3 (OBJ_ad_timeStamping) */, 364 /* 1.3.6.1.5.5.7.48.4 (OBJ_ad_dvcs) */, 785 /* 1.3.6.1.5.5.7.48.5 (OBJ_caRepository) */, 780 /* 1.3.6.1.5.5.8.1.1 (OBJ_hmac_md5) */, 781 /* 1.3.6.1.5.5.8.1.2 (OBJ_hmac_sha1) */, 58 /* 2.16.840.1.113730.1 (OBJ_netscape_cert_extension) */, 59 /* 2.16.840.1.113730.2 (OBJ_netscape_data_type) */, 438 /* 0.9.2342.19200300.100.1 (OBJ_pilotAttributeType) */, 439 /* 0.9.2342.19200300.100.3 (OBJ_pilotAttributeSyntax) */, 440 /* 0.9.2342.19200300.100.4 (OBJ_pilotObjectClass) */, 441 /* 0.9.2342.19200300.100.10 (OBJ_pilotGroups) */, 108 /* 1.2.840.113533.7.66.10 (OBJ_cast5_cbc) */, 112 /* 1.2.840.113533.7.66.12 (OBJ_pbeWithMD5AndCast5_CBC) */, 782 /* 1.2.840.113533.7.66.13 (OBJ_id_PasswordBasedMAC) */, 783 /* 1.2.840.113533.7.66.30 (OBJ_id_DHBasedMac) */, 6 /* 1.2.840.113549.1.1.1 (OBJ_rsaEncryption) */, 7 /* 1.2.840.113549.1.1.2 (OBJ_md2WithRSAEncryption) */, 396 /* 1.2.840.113549.1.1.3 (OBJ_md4WithRSAEncryption) */, 8 /* 1.2.840.113549.1.1.4 (OBJ_md5WithRSAEncryption) */, 65 /* 1.2.840.113549.1.1.5 (OBJ_sha1WithRSAEncryption) */, 644 /* 1.2.840.113549.1.1.6 (OBJ_rsaOAEPEncryptionSET) */, 919 /* 1.2.840.113549.1.1.7 (OBJ_rsaesOaep) */, 911 /* 1.2.840.113549.1.1.8 (OBJ_mgf1) */, 935 /* 1.2.840.113549.1.1.9 (OBJ_pSpecified) */, 912 /* 1.2.840.113549.1.1.10 (OBJ_rsassaPss) */, 668 /* 1.2.840.113549.1.1.11 (OBJ_sha256WithRSAEncryption) */, 669 /* 1.2.840.113549.1.1.12 (OBJ_sha384WithRSAEncryption) */, 670 /* 1.2.840.113549.1.1.13 (OBJ_sha512WithRSAEncryption) */, 671 /* 1.2.840.113549.1.1.14 (OBJ_sha224WithRSAEncryption) */, 28 /* 1.2.840.113549.1.3.1 (OBJ_dhKeyAgreement) */, 9 /* 1.2.840.113549.1.5.1 (OBJ_pbeWithMD2AndDES_CBC) */, 10 /* 1.2.840.113549.1.5.3 (OBJ_pbeWithMD5AndDES_CBC) */, 168 /* 1.2.840.113549.1.5.4 (OBJ_pbeWithMD2AndRC2_CBC) */, 169 /* 1.2.840.113549.1.5.6 (OBJ_pbeWithMD5AndRC2_CBC) */, 170 /* 1.2.840.113549.1.5.10 (OBJ_pbeWithSHA1AndDES_CBC) */, 68 /* 1.2.840.113549.1.5.11 (OBJ_pbeWithSHA1AndRC2_CBC) */, 69 /* 1.2.840.113549.1.5.12 (OBJ_id_pbkdf2) */, 161 /* 1.2.840.113549.1.5.13 (OBJ_pbes2) */, 162 /* 1.2.840.113549.1.5.14 (OBJ_pbmac1) */, 21 /* 1.2.840.113549.1.7.1 (OBJ_pkcs7_data) */, 22 /* 1.2.840.113549.1.7.2 (OBJ_pkcs7_signed) */, 23 /* 1.2.840.113549.1.7.3 (OBJ_pkcs7_enveloped) */, 24 /* 1.2.840.113549.1.7.4 (OBJ_pkcs7_signedAndEnveloped) */, 25 /* 1.2.840.113549.1.7.5 (OBJ_pkcs7_digest) */, 26 /* 1.2.840.113549.1.7.6 (OBJ_pkcs7_encrypted) */, 48 /* 1.2.840.113549.1.9.1 (OBJ_pkcs9_emailAddress) */, 49 /* 1.2.840.113549.1.9.2 (OBJ_pkcs9_unstructuredName) */, 50 /* 1.2.840.113549.1.9.3 (OBJ_pkcs9_contentType) */, 51 /* 1.2.840.113549.1.9.4 (OBJ_pkcs9_messageDigest) */, 52 /* 1.2.840.113549.1.9.5 (OBJ_pkcs9_signingTime) */, 53 /* 1.2.840.113549.1.9.6 (OBJ_pkcs9_countersignature) */, 54 /* 1.2.840.113549.1.9.7 (OBJ_pkcs9_challengePassword) */, 55 /* 1.2.840.113549.1.9.8 (OBJ_pkcs9_unstructuredAddress) */, 56 /* 1.2.840.113549.1.9.9 (OBJ_pkcs9_extCertAttributes) */, 172 /* 1.2.840.113549.1.9.14 (OBJ_ext_req) */, 167 /* 1.2.840.113549.1.9.15 (OBJ_SMIMECapabilities) */, 188 /* 1.2.840.113549.1.9.16 (OBJ_SMIME) */, 156 /* 1.2.840.113549.1.9.20 (OBJ_friendlyName) */, 157 /* 1.2.840.113549.1.9.21 (OBJ_localKeyID) */, 681 /* 1.2.840.10045.1.2.3.1 (OBJ_X9_62_onBasis) */, 682 /* 1.2.840.10045.1.2.3.2 (OBJ_X9_62_tpBasis) */, 683 /* 1.2.840.10045.1.2.3.3 (OBJ_X9_62_ppBasis) */, 417 /* 1.3.6.1.4.1.311.17.1 (OBJ_ms_csp_name) */, 856 /* 1.3.6.1.4.1.311.17.2 (OBJ_LocalKeySet) */, 390 /* 1.3.6.1.4.1.1466.344 (OBJ_dcObject) */, 91 /* 1.3.6.1.4.1.3029.1.2 (OBJ_bf_cbc) */, 315 /* 1.3.6.1.5.5.7.5.1.1 (OBJ_id_regCtrl_regToken) */, 316 /* 1.3.6.1.5.5.7.5.1.2 (OBJ_id_regCtrl_authenticator) */, 317 /* 1.3.6.1.5.5.7.5.1.3 (OBJ_id_regCtrl_pkiPublicationInfo) */, 318 /* 1.3.6.1.5.5.7.5.1.4 (OBJ_id_regCtrl_pkiArchiveOptions) */, 319 /* 1.3.6.1.5.5.7.5.1.5 (OBJ_id_regCtrl_oldCertID) */, 320 /* 1.3.6.1.5.5.7.5.1.6 (OBJ_id_regCtrl_protocolEncrKey) */, 321 /* 1.3.6.1.5.5.7.5.2.1 (OBJ_id_regInfo_utf8Pairs) */, 322 /* 1.3.6.1.5.5.7.5.2.2 (OBJ_id_regInfo_certReq) */, 365 /* 1.3.6.1.5.5.7.48.1.1 (OBJ_id_pkix_OCSP_basic) */, 366 /* 1.3.6.1.5.5.7.48.1.2 (OBJ_id_pkix_OCSP_Nonce) */, 367 /* 1.3.6.1.5.5.7.48.1.3 (OBJ_id_pkix_OCSP_CrlID) */, 368 /* 1.3.6.1.5.5.7.48.1.4 (OBJ_id_pkix_OCSP_acceptableResponses) */, 369 /* 1.3.6.1.5.5.7.48.1.5 (OBJ_id_pkix_OCSP_noCheck) */, 370 /* 1.3.6.1.5.5.7.48.1.6 (OBJ_id_pkix_OCSP_archiveCutoff) */, 371 /* 1.3.6.1.5.5.7.48.1.7 (OBJ_id_pkix_OCSP_serviceLocator) */, 372 /* 1.3.6.1.5.5.7.48.1.8 (OBJ_id_pkix_OCSP_extendedStatus) */, 373 /* 1.3.6.1.5.5.7.48.1.9 (OBJ_id_pkix_OCSP_valid) */, 374 /* 1.3.6.1.5.5.7.48.1.10 (OBJ_id_pkix_OCSP_path) */, 375 /* 1.3.6.1.5.5.7.48.1.11 (OBJ_id_pkix_OCSP_trustRoot) */, 921 /* 1.3.36.3.3.2.8.1.1.1 (OBJ_brainpoolP160r1) */, 922 /* 1.3.36.3.3.2.8.1.1.2 (OBJ_brainpoolP160t1) */, 923 /* 1.3.36.3.3.2.8.1.1.3 (OBJ_brainpoolP192r1) */, 924 /* 1.3.36.3.3.2.8.1.1.4 (OBJ_brainpoolP192t1) */, 925 /* 1.3.36.3.3.2.8.1.1.5 (OBJ_brainpoolP224r1) */, 926 /* 1.3.36.3.3.2.8.1.1.6 (OBJ_brainpoolP224t1) */, 927 /* 1.3.36.3.3.2.8.1.1.7 (OBJ_brainpoolP256r1) */, 928 /* 1.3.36.3.3.2.8.1.1.8 (OBJ_brainpoolP256t1) */, 929 /* 1.3.36.3.3.2.8.1.1.9 (OBJ_brainpoolP320r1) */, 930 /* 1.3.36.3.3.2.8.1.1.10 (OBJ_brainpoolP320t1) */, 931 /* 1.3.36.3.3.2.8.1.1.11 (OBJ_brainpoolP384r1) */, 932 /* 1.3.36.3.3.2.8.1.1.12 (OBJ_brainpoolP384t1) */, 933 /* 1.3.36.3.3.2.8.1.1.13 (OBJ_brainpoolP512r1) */, 934 /* 1.3.36.3.3.2.8.1.1.14 (OBJ_brainpoolP512t1) */, 936 /* 1.3.133.16.840.63.0.2 (OBJ_dhSinglePass_stdDH_sha1kdf_scheme) */, 941 /* 1.3.133.16.840.63.0.3 (OBJ_dhSinglePass_cofactorDH_sha1kdf_scheme) */ , 418 /* 2.16.840.1.101.3.4.1.1 (OBJ_aes_128_ecb) */, 419 /* 2.16.840.1.101.3.4.1.2 (OBJ_aes_128_cbc) */, 420 /* 2.16.840.1.101.3.4.1.3 (OBJ_aes_128_ofb128) */, 421 /* 2.16.840.1.101.3.4.1.4 (OBJ_aes_128_cfb128) */, 788 /* 2.16.840.1.101.3.4.1.5 (OBJ_id_aes128_wrap) */, 895 /* 2.16.840.1.101.3.4.1.6 (OBJ_aes_128_gcm) */, 896 /* 2.16.840.1.101.3.4.1.7 (OBJ_aes_128_ccm) */, 897 /* 2.16.840.1.101.3.4.1.8 (OBJ_id_aes128_wrap_pad) */, 422 /* 2.16.840.1.101.3.4.1.21 (OBJ_aes_192_ecb) */, 423 /* 2.16.840.1.101.3.4.1.22 (OBJ_aes_192_cbc) */, 424 /* 2.16.840.1.101.3.4.1.23 (OBJ_aes_192_ofb128) */, 425 /* 2.16.840.1.101.3.4.1.24 (OBJ_aes_192_cfb128) */, 789 /* 2.16.840.1.101.3.4.1.25 (OBJ_id_aes192_wrap) */, 898 /* 2.16.840.1.101.3.4.1.26 (OBJ_aes_192_gcm) */, 899 /* 2.16.840.1.101.3.4.1.27 (OBJ_aes_192_ccm) */, 900 /* 2.16.840.1.101.3.4.1.28 (OBJ_id_aes192_wrap_pad) */, 426 /* 2.16.840.1.101.3.4.1.41 (OBJ_aes_256_ecb) */, 427 /* 2.16.840.1.101.3.4.1.42 (OBJ_aes_256_cbc) */, 428 /* 2.16.840.1.101.3.4.1.43 (OBJ_aes_256_ofb128) */, 429 /* 2.16.840.1.101.3.4.1.44 (OBJ_aes_256_cfb128) */, 790 /* 2.16.840.1.101.3.4.1.45 (OBJ_id_aes256_wrap) */, 901 /* 2.16.840.1.101.3.4.1.46 (OBJ_aes_256_gcm) */, 902 /* 2.16.840.1.101.3.4.1.47 (OBJ_aes_256_ccm) */, 903 /* 2.16.840.1.101.3.4.1.48 (OBJ_id_aes256_wrap_pad) */, 672 /* 2.16.840.1.101.3.4.2.1 (OBJ_sha256) */, 673 /* 2.16.840.1.101.3.4.2.2 (OBJ_sha384) */, 674 /* 2.16.840.1.101.3.4.2.3 (OBJ_sha512) */, 675 /* 2.16.840.1.101.3.4.2.4 (OBJ_sha224) */, 962 /* 2.16.840.1.101.3.4.2.6 (OBJ_sha512_256) */, 802 /* 2.16.840.1.101.3.4.3.1 (OBJ_dsa_with_SHA224) */, 803 /* 2.16.840.1.101.3.4.3.2 (OBJ_dsa_with_SHA256) */, 71 /* 2.16.840.1.113730.1.1 (OBJ_netscape_cert_type) */, 72 /* 2.16.840.1.113730.1.2 (OBJ_netscape_base_url) */, 73 /* 2.16.840.1.113730.1.3 (OBJ_netscape_revocation_url) */, 74 /* 2.16.840.1.113730.1.4 (OBJ_netscape_ca_revocation_url) */, 75 /* 2.16.840.1.113730.1.7 (OBJ_netscape_renewal_url) */, 76 /* 2.16.840.1.113730.1.8 (OBJ_netscape_ca_policy_url) */, 77 /* 2.16.840.1.113730.1.12 (OBJ_netscape_ssl_server_name) */, 78 /* 2.16.840.1.113730.1.13 (OBJ_netscape_comment) */, 79 /* 2.16.840.1.113730.2.5 (OBJ_netscape_cert_sequence) */, 139 /* 2.16.840.1.113730.4.1 (OBJ_ns_sgc) */, 458 /* 0.9.2342.19200300.100.1.1 (OBJ_userId) */, 459 /* 0.9.2342.19200300.100.1.2 (OBJ_textEncodedORAddress) */, 460 /* 0.9.2342.19200300.100.1.3 (OBJ_rfc822Mailbox) */, 461 /* 0.9.2342.19200300.100.1.4 (OBJ_info) */, 462 /* 0.9.2342.19200300.100.1.5 (OBJ_favouriteDrink) */, 463 /* 0.9.2342.19200300.100.1.6 (OBJ_roomNumber) */, 464 /* 0.9.2342.19200300.100.1.7 (OBJ_photo) */, 465 /* 0.9.2342.19200300.100.1.8 (OBJ_userClass) */, 466 /* 0.9.2342.19200300.100.1.9 (OBJ_host) */, 467 /* 0.9.2342.19200300.100.1.10 (OBJ_manager) */, 468 /* 0.9.2342.19200300.100.1.11 (OBJ_documentIdentifier) */, 469 /* 0.9.2342.19200300.100.1.12 (OBJ_documentTitle) */, 470 /* 0.9.2342.19200300.100.1.13 (OBJ_documentVersion) */, 471 /* 0.9.2342.19200300.100.1.14 (OBJ_documentAuthor) */, 472 /* 0.9.2342.19200300.100.1.15 (OBJ_documentLocation) */, 473 /* 0.9.2342.19200300.100.1.20 (OBJ_homeTelephoneNumber) */, 474 /* 0.9.2342.19200300.100.1.21 (OBJ_secretary) */, 475 /* 0.9.2342.19200300.100.1.22 (OBJ_otherMailbox) */, 476 /* 0.9.2342.19200300.100.1.23 (OBJ_lastModifiedTime) */, 477 /* 0.9.2342.19200300.100.1.24 (OBJ_lastModifiedBy) */, 391 /* 0.9.2342.19200300.100.1.25 (OBJ_domainComponent) */, 478 /* 0.9.2342.19200300.100.1.26 (OBJ_aRecord) */, 479 /* 0.9.2342.19200300.100.1.27 (OBJ_pilotAttributeType27) */, 480 /* 0.9.2342.19200300.100.1.28 (OBJ_mXRecord) */, 481 /* 0.9.2342.19200300.100.1.29 (OBJ_nSRecord) */, 482 /* 0.9.2342.19200300.100.1.30 (OBJ_sOARecord) */, 483 /* 0.9.2342.19200300.100.1.31 (OBJ_cNAMERecord) */, 484 /* 0.9.2342.19200300.100.1.37 (OBJ_associatedDomain) */, 485 /* 0.9.2342.19200300.100.1.38 (OBJ_associatedName) */, 486 /* 0.9.2342.19200300.100.1.39 (OBJ_homePostalAddress) */, 487 /* 0.9.2342.19200300.100.1.40 (OBJ_personalTitle) */, 488 /* 0.9.2342.19200300.100.1.41 (OBJ_mobileTelephoneNumber) */, 489 /* 0.9.2342.19200300.100.1.42 (OBJ_pagerTelephoneNumber) */, 490 /* 0.9.2342.19200300.100.1.43 (OBJ_friendlyCountryName) */, 491 /* 0.9.2342.19200300.100.1.45 (OBJ_organizationalStatus) */, 492 /* 0.9.2342.19200300.100.1.46 (OBJ_janetMailbox) */, 493 /* 0.9.2342.19200300.100.1.47 (OBJ_mailPreferenceOption) */, 494 /* 0.9.2342.19200300.100.1.48 (OBJ_buildingName) */, 495 /* 0.9.2342.19200300.100.1.49 (OBJ_dSAQuality) */, 496 /* 0.9.2342.19200300.100.1.50 (OBJ_singleLevelQuality) */, 497 /* 0.9.2342.19200300.100.1.51 (OBJ_subtreeMinimumQuality) */, 498 /* 0.9.2342.19200300.100.1.52 (OBJ_subtreeMaximumQuality) */, 499 /* 0.9.2342.19200300.100.1.53 (OBJ_personalSignature) */, 500 /* 0.9.2342.19200300.100.1.54 (OBJ_dITRedirect) */, 501 /* 0.9.2342.19200300.100.1.55 (OBJ_audio) */, 502 /* 0.9.2342.19200300.100.1.56 (OBJ_documentPublisher) */, 442 /* 0.9.2342.19200300.100.3.4 (OBJ_iA5StringSyntax) */, 443 /* 0.9.2342.19200300.100.3.5 (OBJ_caseIgnoreIA5StringSyntax) */, 444 /* 0.9.2342.19200300.100.4.3 (OBJ_pilotObject) */, 445 /* 0.9.2342.19200300.100.4.4 (OBJ_pilotPerson) */, 446 /* 0.9.2342.19200300.100.4.5 (OBJ_account) */, 447 /* 0.9.2342.19200300.100.4.6 (OBJ_document) */, 448 /* 0.9.2342.19200300.100.4.7 (OBJ_room) */, 449 /* 0.9.2342.19200300.100.4.9 (OBJ_documentSeries) */, 392 /* 0.9.2342.19200300.100.4.13 (OBJ_Domain) */, 450 /* 0.9.2342.19200300.100.4.14 (OBJ_rFC822localPart) */, 451 /* 0.9.2342.19200300.100.4.15 (OBJ_dNSDomain) */, 452 /* 0.9.2342.19200300.100.4.17 (OBJ_domainRelatedObject) */, 453 /* 0.9.2342.19200300.100.4.18 (OBJ_friendlyCountry) */, 454 /* 0.9.2342.19200300.100.4.19 (OBJ_simpleSecurityObject) */, 455 /* 0.9.2342.19200300.100.4.20 (OBJ_pilotOrganization) */, 456 /* 0.9.2342.19200300.100.4.21 (OBJ_pilotDSA) */, 457 /* 0.9.2342.19200300.100.4.22 (OBJ_qualityLabelledData) */, 189 /* 1.2.840.113549.1.9.16.0 (OBJ_id_smime_mod) */, 190 /* 1.2.840.113549.1.9.16.1 (OBJ_id_smime_ct) */, 191 /* 1.2.840.113549.1.9.16.2 (OBJ_id_smime_aa) */, 192 /* 1.2.840.113549.1.9.16.3 (OBJ_id_smime_alg) */, 193 /* 1.2.840.113549.1.9.16.4 (OBJ_id_smime_cd) */, 194 /* 1.2.840.113549.1.9.16.5 (OBJ_id_smime_spq) */, 195 /* 1.2.840.113549.1.9.16.6 (OBJ_id_smime_cti) */, 158 /* 1.2.840.113549.1.9.22.1 (OBJ_x509Certificate) */, 159 /* 1.2.840.113549.1.9.22.2 (OBJ_sdsiCertificate) */, 160 /* 1.2.840.113549.1.9.23.1 (OBJ_x509Crl) */, 144 /* 1.2.840.113549.1.12.1.1 (OBJ_pbe_WithSHA1And128BitRC4) */, 145 /* 1.2.840.113549.1.12.1.2 (OBJ_pbe_WithSHA1And40BitRC4) */, 146 /* 1.2.840.113549.1.12.1.3 (OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC) */, 147 /* 1.2.840.113549.1.12.1.4 (OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC) */, 148 /* 1.2.840.113549.1.12.1.5 (OBJ_pbe_WithSHA1And128BitRC2_CBC) */, 149 /* 1.2.840.113549.1.12.1.6 (OBJ_pbe_WithSHA1And40BitRC2_CBC) */, 171 /* 1.3.6.1.4.1.311.2.1.14 (OBJ_ms_ext_req) */, 134 /* 1.3.6.1.4.1.311.2.1.21 (OBJ_ms_code_ind) */, 135 /* 1.3.6.1.4.1.311.2.1.22 (OBJ_ms_code_com) */, 136 /* 1.3.6.1.4.1.311.10.3.1 (OBJ_ms_ctl_sign) */, 137 /* 1.3.6.1.4.1.311.10.3.3 (OBJ_ms_sgc) */, 138 /* 1.3.6.1.4.1.311.10.3.4 (OBJ_ms_efs) */, 648 /* 1.3.6.1.4.1.311.20.2.2 (OBJ_ms_smartcard_login) */, 649 /* 1.3.6.1.4.1.311.20.2.3 (OBJ_ms_upn) */, 751 /* 1.2.392.200011.61.1.1.1.2 (OBJ_camellia_128_cbc) */, 752 /* 1.2.392.200011.61.1.1.1.3 (OBJ_camellia_192_cbc) */, 753 /* 1.2.392.200011.61.1.1.1.4 (OBJ_camellia_256_cbc) */, 907 /* 1.2.392.200011.61.1.1.3.2 (OBJ_id_camellia128_wrap) */, 908 /* 1.2.392.200011.61.1.1.3.3 (OBJ_id_camellia192_wrap) */, 909 /* 1.2.392.200011.61.1.1.3.4 (OBJ_id_camellia256_wrap) */, 196 /* 1.2.840.113549.1.9.16.0.1 (OBJ_id_smime_mod_cms) */, 197 /* 1.2.840.113549.1.9.16.0.2 (OBJ_id_smime_mod_ess) */, 198 /* 1.2.840.113549.1.9.16.0.3 (OBJ_id_smime_mod_oid) */, 199 /* 1.2.840.113549.1.9.16.0.4 (OBJ_id_smime_mod_msg_v3) */, 200 /* 1.2.840.113549.1.9.16.0.5 (OBJ_id_smime_mod_ets_eSignature_88) */, 201 /* 1.2.840.113549.1.9.16.0.6 (OBJ_id_smime_mod_ets_eSignature_97) */, 202 /* 1.2.840.113549.1.9.16.0.7 (OBJ_id_smime_mod_ets_eSigPolicy_88) */, 203 /* 1.2.840.113549.1.9.16.0.8 (OBJ_id_smime_mod_ets_eSigPolicy_97) */, 204 /* 1.2.840.113549.1.9.16.1.1 (OBJ_id_smime_ct_receipt) */, 205 /* 1.2.840.113549.1.9.16.1.2 (OBJ_id_smime_ct_authData) */, 206 /* 1.2.840.113549.1.9.16.1.3 (OBJ_id_smime_ct_publishCert) */, 207 /* 1.2.840.113549.1.9.16.1.4 (OBJ_id_smime_ct_TSTInfo) */, 208 /* 1.2.840.113549.1.9.16.1.5 (OBJ_id_smime_ct_TDTInfo) */, 209 /* 1.2.840.113549.1.9.16.1.6 (OBJ_id_smime_ct_contentInfo) */, 210 /* 1.2.840.113549.1.9.16.1.7 (OBJ_id_smime_ct_DVCSRequestData) */, 211 /* 1.2.840.113549.1.9.16.1.8 (OBJ_id_smime_ct_DVCSResponseData) */, 786 /* 1.2.840.113549.1.9.16.1.9 (OBJ_id_smime_ct_compressedData) */, 787 /* 1.2.840.113549.1.9.16.1.27 (OBJ_id_ct_asciiTextWithCRLF) */, 212 /* 1.2.840.113549.1.9.16.2.1 (OBJ_id_smime_aa_receiptRequest) */, 213 /* 1.2.840.113549.1.9.16.2.2 (OBJ_id_smime_aa_securityLabel) */, 214 /* 1.2.840.113549.1.9.16.2.3 (OBJ_id_smime_aa_mlExpandHistory) */, 215 /* 1.2.840.113549.1.9.16.2.4 (OBJ_id_smime_aa_contentHint) */, 216 /* 1.2.840.113549.1.9.16.2.5 (OBJ_id_smime_aa_msgSigDigest) */, 217 /* 1.2.840.113549.1.9.16.2.6 (OBJ_id_smime_aa_encapContentType) */, 218 /* 1.2.840.113549.1.9.16.2.7 (OBJ_id_smime_aa_contentIdentifier) */, 219 /* 1.2.840.113549.1.9.16.2.8 (OBJ_id_smime_aa_macValue) */, 220 /* 1.2.840.113549.1.9.16.2.9 (OBJ_id_smime_aa_equivalentLabels) */, 221 /* 1.2.840.113549.1.9.16.2.10 (OBJ_id_smime_aa_contentReference) */, 222 /* 1.2.840.113549.1.9.16.2.11 (OBJ_id_smime_aa_encrypKeyPref) */, 223 /* 1.2.840.113549.1.9.16.2.12 (OBJ_id_smime_aa_signingCertificate) */, 224 /* 1.2.840.113549.1.9.16.2.13 (OBJ_id_smime_aa_smimeEncryptCerts) */, 225 /* 1.2.840.113549.1.9.16.2.14 (OBJ_id_smime_aa_timeStampToken) */, 226 /* 1.2.840.113549.1.9.16.2.15 (OBJ_id_smime_aa_ets_sigPolicyId) */, 227 /* 1.2.840.113549.1.9.16.2.16 (OBJ_id_smime_aa_ets_commitmentType) */, 228 /* 1.2.840.113549.1.9.16.2.17 (OBJ_id_smime_aa_ets_signerLocation) */, 229 /* 1.2.840.113549.1.9.16.2.18 (OBJ_id_smime_aa_ets_signerAttr) */, 230 /* 1.2.840.113549.1.9.16.2.19 (OBJ_id_smime_aa_ets_otherSigCert) */, 231 /* 1.2.840.113549.1.9.16.2.20 (OBJ_id_smime_aa_ets_contentTimestamp) */, 232 /* 1.2.840.113549.1.9.16.2.21 (OBJ_id_smime_aa_ets_CertificateRefs) */, 233 /* 1.2.840.113549.1.9.16.2.22 (OBJ_id_smime_aa_ets_RevocationRefs) */, 234 /* 1.2.840.113549.1.9.16.2.23 (OBJ_id_smime_aa_ets_certValues) */, 235 /* 1.2.840.113549.1.9.16.2.24 (OBJ_id_smime_aa_ets_revocationValues) */, 236 /* 1.2.840.113549.1.9.16.2.25 (OBJ_id_smime_aa_ets_escTimeStamp) */, 237 /* 1.2.840.113549.1.9.16.2.26 (OBJ_id_smime_aa_ets_certCRLTimestamp) */, 238 /* 1.2.840.113549.1.9.16.2.27 (OBJ_id_smime_aa_ets_archiveTimeStamp) */, 239 /* 1.2.840.113549.1.9.16.2.28 (OBJ_id_smime_aa_signatureType) */, 240 /* 1.2.840.113549.1.9.16.2.29 (OBJ_id_smime_aa_dvcs_dvc) */, 241 /* 1.2.840.113549.1.9.16.3.1 (OBJ_id_smime_alg_ESDHwith3DES) */, 242 /* 1.2.840.113549.1.9.16.3.2 (OBJ_id_smime_alg_ESDHwithRC2) */, 243 /* 1.2.840.113549.1.9.16.3.3 (OBJ_id_smime_alg_3DESwrap) */, 244 /* 1.2.840.113549.1.9.16.3.4 (OBJ_id_smime_alg_RC2wrap) */, 245 /* 1.2.840.113549.1.9.16.3.5 (OBJ_id_smime_alg_ESDH) */, 246 /* 1.2.840.113549.1.9.16.3.6 (OBJ_id_smime_alg_CMS3DESwrap) */, 247 /* 1.2.840.113549.1.9.16.3.7 (OBJ_id_smime_alg_CMSRC2wrap) */, 125 /* 1.2.840.113549.1.9.16.3.8 (OBJ_zlib_compression) */, 893 /* 1.2.840.113549.1.9.16.3.9 (OBJ_id_alg_PWRI_KEK) */, 248 /* 1.2.840.113549.1.9.16.4.1 (OBJ_id_smime_cd_ldap) */, 249 /* 1.2.840.113549.1.9.16.5.1 (OBJ_id_smime_spq_ets_sqt_uri) */, 250 /* 1.2.840.113549.1.9.16.5.2 (OBJ_id_smime_spq_ets_sqt_unotice) */, 251 /* 1.2.840.113549.1.9.16.6.1 (OBJ_id_smime_cti_ets_proofOfOrigin) */, 252 /* 1.2.840.113549.1.9.16.6.2 (OBJ_id_smime_cti_ets_proofOfReceipt) */, 253 /* 1.2.840.113549.1.9.16.6.3 (OBJ_id_smime_cti_ets_proofOfDelivery) */, 254 /* 1.2.840.113549.1.9.16.6.4 (OBJ_id_smime_cti_ets_proofOfSender) */, 255 /* 1.2.840.113549.1.9.16.6.5 (OBJ_id_smime_cti_ets_proofOfApproval) */, 256 /* 1.2.840.113549.1.9.16.6.6 (OBJ_id_smime_cti_ets_proofOfCreation) */, 150 /* 1.2.840.113549.1.12.10.1.1 (OBJ_keyBag) */, 151 /* 1.2.840.113549.1.12.10.1.2 (OBJ_pkcs8ShroudedKeyBag) */, 152 /* 1.2.840.113549.1.12.10.1.3 (OBJ_certBag) */, 153 /* 1.2.840.113549.1.12.10.1.4 (OBJ_crlBag) */, 154 /* 1.2.840.113549.1.12.10.1.5 (OBJ_secretBag) */, 155 /* 1.2.840.113549.1.12.10.1.6 (OBJ_safeContentsBag) */, 34 /* 1.3.6.1.4.1.188.7.1.1.2 (OBJ_idea_cbc) */, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/obj/obj_xref.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include "../internal.h" typedef struct { int sign_nid; int digest_nid; int pkey_nid; } nid_triple; static const nid_triple kTriples[] = { // RSA PKCS#1. {NID_md4WithRSAEncryption, NID_md4, NID_rsaEncryption}, {NID_md5WithRSAEncryption, NID_md5, NID_rsaEncryption}, {NID_sha1WithRSAEncryption, NID_sha1, NID_rsaEncryption}, {NID_sha224WithRSAEncryption, NID_sha224, NID_rsaEncryption}, {NID_sha256WithRSAEncryption, NID_sha256, NID_rsaEncryption}, {NID_sha384WithRSAEncryption, NID_sha384, NID_rsaEncryption}, {NID_sha512WithRSAEncryption, NID_sha512, NID_rsaEncryption}, // DSA. {NID_dsaWithSHA1, NID_sha1, NID_dsa}, {NID_dsaWithSHA1_2, NID_sha1, NID_dsa_2}, {NID_dsa_with_SHA224, NID_sha224, NID_dsa}, {NID_dsa_with_SHA256, NID_sha256, NID_dsa}, // ECDSA. {NID_ecdsa_with_SHA1, NID_sha1, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA224, NID_sha224, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA256, NID_sha256, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA384, NID_sha384, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA512, NID_sha512, NID_X9_62_id_ecPublicKey}, // The following algorithms use more complex (or simpler) parameters. The // digest "undef" indicates the caller should handle this explicitly. {NID_rsassaPss, NID_undef, NID_rsaEncryption}, {NID_ED25519, NID_undef, NID_ED25519}, }; int OBJ_find_sigid_algs(int sign_nid, int *out_digest_nid, int *out_pkey_nid) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kTriples); i++) { if (kTriples[i].sign_nid == sign_nid) { if (out_digest_nid != NULL) { *out_digest_nid = kTriples[i].digest_nid; } if (out_pkey_nid != NULL) { *out_pkey_nid = kTriples[i].pkey_nid; } return 1; } } return 0; } int OBJ_find_sigid_by_algs(int *out_sign_nid, int digest_nid, int pkey_nid) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kTriples); i++) { if (kTriples[i].digest_nid == digest_nid && kTriples[i].pkey_nid == pkey_nid) { if (out_sign_nid != NULL) { *out_sign_nid = kTriples[i].sign_nid; } return 1; } } return 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_PEM_INTERNAL_H #define OPENSSL_HEADER_PEM_INTERNAL_H #include #ifdef __cplusplus extern "C" { #endif // PEM_get_EVP_CIPHER_INFO decodes |header| as a PEM header block and writes the // specified cipher and IV to |cipher|. It returns one on success and zero on // error. |header| must be a NUL-terminated string. If |header| does not // specify encryption, this function will return success and set // |cipher->cipher| to NULL. int PEM_get_EVP_CIPHER_INFO(const char *header, EVP_CIPHER_INFO *cipher); // PEM_do_header decrypts |*len| bytes from |data| in-place according to the // information in |cipher|. On success, it returns one and sets |*len| to the // length of the plaintext. Otherwise, it returns zero. If |cipher| specifies // encryption, the key is derived from a password returned from |callback|. int PEM_do_header(const EVP_CIPHER_INFO *cipher, uint8_t *data, long *len, pem_password_cb *callback, void *u); #ifdef __cplusplus } // extern "C" #endif #endif // OPENSSL_HEADER_PEM_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_all.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include static RSA *pkey_get_rsa(EVP_PKEY *key, RSA **rsa); static DSA *pkey_get_dsa(EVP_PKEY *key, DSA **dsa); static EC_KEY *pkey_get_eckey(EVP_PKEY *key, EC_KEY **eckey); IMPLEMENT_PEM_rw(X509_REQ, X509_REQ, PEM_STRING_X509_REQ, X509_REQ) IMPLEMENT_PEM_write(X509_REQ_NEW, X509_REQ, PEM_STRING_X509_REQ_OLD, X509_REQ) IMPLEMENT_PEM_rw(X509_CRL, X509_CRL, PEM_STRING_X509_CRL, X509_CRL) IMPLEMENT_PEM_rw(PKCS7, PKCS7, PEM_STRING_PKCS7, PKCS7) // We treat RSA or DSA private keys as a special case. For private keys we // read in an EVP_PKEY structure with PEM_read_bio_PrivateKey() and extract // the relevant private key: this means can handle "traditional" and PKCS#8 // formats transparently. static RSA *pkey_get_rsa(EVP_PKEY *key, RSA **rsa) { RSA *rtmp; if (!key) { return NULL; } rtmp = EVP_PKEY_get1_RSA(key); EVP_PKEY_free(key); if (!rtmp) { return NULL; } if (rsa) { RSA_free(*rsa); *rsa = rtmp; } return rtmp; } RSA *PEM_read_bio_RSAPrivateKey(BIO *bp, RSA **rsa, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_bio_PrivateKey(bp, NULL, cb, u); return pkey_get_rsa(pktmp, rsa); } RSA *PEM_read_RSAPrivateKey(FILE *fp, RSA **rsa, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_PrivateKey(fp, NULL, cb, u); return pkey_get_rsa(pktmp, rsa); } IMPLEMENT_PEM_write_cb_const(RSAPrivateKey, RSA, PEM_STRING_RSA, RSAPrivateKey) IMPLEMENT_PEM_rw_const(RSAPublicKey, RSA, PEM_STRING_RSA_PUBLIC, RSAPublicKey) IMPLEMENT_PEM_rw(RSA_PUBKEY, RSA, PEM_STRING_PUBLIC, RSA_PUBKEY) #ifndef OPENSSL_NO_DSA static DSA *pkey_get_dsa(EVP_PKEY *key, DSA **dsa) { DSA *dtmp; if (!key) { return NULL; } dtmp = EVP_PKEY_get1_DSA(key); EVP_PKEY_free(key); if (!dtmp) { return NULL; } if (dsa) { DSA_free(*dsa); *dsa = dtmp; } return dtmp; } DSA *PEM_read_bio_DSAPrivateKey(BIO *bp, DSA **dsa, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_bio_PrivateKey(bp, NULL, cb, u); return pkey_get_dsa(pktmp, dsa); // will free pktmp } IMPLEMENT_PEM_write_cb_const(DSAPrivateKey, DSA, PEM_STRING_DSA, DSAPrivateKey) IMPLEMENT_PEM_rw(DSA_PUBKEY, DSA, PEM_STRING_PUBLIC, DSA_PUBKEY) DSA *PEM_read_DSAPrivateKey(FILE *fp, DSA **dsa, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_PrivateKey(fp, NULL, cb, u); return pkey_get_dsa(pktmp, dsa); // will free pktmp } IMPLEMENT_PEM_rw_const(DSAparams, DSA, PEM_STRING_DSAPARAMS, DSAparams) #endif static EC_KEY *pkey_get_eckey(EVP_PKEY *key, EC_KEY **eckey) { EC_KEY *dtmp; if (!key) { return NULL; } dtmp = EVP_PKEY_get1_EC_KEY(key); EVP_PKEY_free(key); if (!dtmp) { return NULL; } if (eckey) { EC_KEY_free(*eckey); *eckey = dtmp; } return dtmp; } EC_KEY *PEM_read_bio_ECPrivateKey(BIO *bp, EC_KEY **key, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_bio_PrivateKey(bp, NULL, cb, u); return pkey_get_eckey(pktmp, key); // will free pktmp } IMPLEMENT_PEM_write_cb(ECPrivateKey, EC_KEY, PEM_STRING_ECPRIVATEKEY, ECPrivateKey) IMPLEMENT_PEM_rw(EC_PUBKEY, EC_KEY, PEM_STRING_PUBLIC, EC_PUBKEY) EC_KEY *PEM_read_ECPrivateKey(FILE *fp, EC_KEY **eckey, pem_password_cb *cb, void *u) { EVP_PKEY *pktmp; pktmp = PEM_read_PrivateKey(fp, NULL, cb, u); return pkey_get_eckey(pktmp, eckey); // will free pktmp } IMPLEMENT_PEM_rw_const(DHparams, DH, PEM_STRING_DHPARAMS, DHparams) IMPLEMENT_PEM_rw(PUBKEY, EVP_PKEY, PEM_STRING_PUBLIC, PUBKEY) ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_info.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "internal.h" static X509_PKEY *X509_PKEY_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(X509_PKEY))); } static void X509_PKEY_free(X509_PKEY *x) { if (x == NULL) { return; } EVP_PKEY_free(x->dec_pkey); OPENSSL_free(x); } static X509_INFO *X509_INFO_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(X509_INFO))); } void X509_INFO_free(X509_INFO *x) { if (x == NULL) { return; } X509_free(x->x509); X509_CRL_free(x->crl); X509_PKEY_free(x->x_pkey); OPENSSL_free(x->enc_data); OPENSSL_free(x); } STACK_OF(X509_INFO) *PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } STACK_OF(X509_INFO) *ret = PEM_X509_INFO_read_bio(b, sk, cb, u); BIO_free(b); return ret; } enum parse_result_t { parse_ok, parse_error, parse_new_entry, }; static enum parse_result_t parse_x509(X509_INFO *info, const uint8_t *data, size_t len, int key_type) { if (info->x509 != NULL) { return parse_new_entry; } info->x509 = d2i_X509(NULL, &data, len); return info->x509 != NULL ? parse_ok : parse_error; } static enum parse_result_t parse_x509_aux(X509_INFO *info, const uint8_t *data, size_t len, int key_type) { if (info->x509 != NULL) { return parse_new_entry; } info->x509 = d2i_X509_AUX(NULL, &data, len); return info->x509 != NULL ? parse_ok : parse_error; } static enum parse_result_t parse_crl(X509_INFO *info, const uint8_t *data, size_t len, int key_type) { if (info->crl != NULL) { return parse_new_entry; } info->crl = d2i_X509_CRL(NULL, &data, len); return info->crl != NULL ? parse_ok : parse_error; } static enum parse_result_t parse_key(X509_INFO *info, const uint8_t *data, size_t len, int key_type) { if (info->x_pkey != NULL) { return parse_new_entry; } info->x_pkey = X509_PKEY_new(); if (info->x_pkey == NULL) { return parse_error; } info->x_pkey->dec_pkey = d2i_PrivateKey(key_type, NULL, &data, len); return info->x_pkey->dec_pkey != NULL ? parse_ok : parse_error; } STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio(BIO *bp, STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u) { X509_INFO *info = NULL; char *name = NULL, *header = NULL; unsigned char *data = NULL; long len; int ok = 0; STACK_OF(X509_INFO) *ret = NULL; if (sk == NULL) { ret = sk_X509_INFO_new_null(); if (ret == NULL) { return NULL; } } else { ret = sk; } size_t orig_num = sk_X509_INFO_num(ret); info = X509_INFO_new(); if (info == NULL) { goto err; } for (;;) { if (!PEM_read_bio(bp, &name, &header, &data, &len)) { uint32_t error = ERR_peek_last_error(); if (ERR_GET_LIB(error) == ERR_LIB_PEM && ERR_GET_REASON(error) == PEM_R_NO_START_LINE) { ERR_clear_error(); break; } goto err; } enum parse_result_t (*parse_function)(X509_INFO *, const uint8_t *, size_t, int) = NULL; int key_type = EVP_PKEY_NONE; if (strcmp(name, PEM_STRING_X509) == 0 || strcmp(name, PEM_STRING_X509_OLD) == 0) { parse_function = parse_x509; } else if (strcmp(name, PEM_STRING_X509_TRUSTED) == 0) { parse_function = parse_x509_aux; } else if (strcmp(name, PEM_STRING_X509_CRL) == 0) { parse_function = parse_crl; } else if (strcmp(name, PEM_STRING_RSA) == 0) { parse_function = parse_key; key_type = EVP_PKEY_RSA; } else if (strcmp(name, PEM_STRING_DSA) == 0) { parse_function = parse_key; key_type = EVP_PKEY_DSA; } else if (strcmp(name, PEM_STRING_ECPRIVATEKEY) == 0) { parse_function = parse_key; key_type = EVP_PKEY_EC; } // If a private key has a header, assume it is encrypted. This function does // not decrypt private keys. if (key_type != EVP_PKEY_NONE && strlen(header) > 10) { if (info->x_pkey != NULL) { if (!sk_X509_INFO_push(ret, info)) { goto err; } info = X509_INFO_new(); if (info == NULL) { goto err; } } // Use an empty key as a placeholder. info->x_pkey = X509_PKEY_new(); if (info->x_pkey == NULL || !PEM_get_EVP_CIPHER_INFO(header, &info->enc_cipher)) { goto err; } info->enc_data = (char *)data; info->enc_len = (int)len; data = NULL; } else if (parse_function != NULL) { EVP_CIPHER_INFO cipher; if (!PEM_get_EVP_CIPHER_INFO(header, &cipher) || !PEM_do_header(&cipher, data, &len, cb, u)) { goto err; } enum parse_result_t result = parse_function(info, data, len, key_type); if (result == parse_new_entry) { if (!sk_X509_INFO_push(ret, info)) { goto err; } info = X509_INFO_new(); if (info == NULL) { goto err; } result = parse_function(info, data, len, key_type); } if (result != parse_ok) { OPENSSL_PUT_ERROR(PEM, ERR_R_ASN1_LIB); goto err; } } OPENSSL_free(name); OPENSSL_free(header); OPENSSL_free(data); name = NULL; header = NULL; data = NULL; } // Push the last entry on the stack if not empty. if (info->x509 != NULL || info->crl != NULL || info->x_pkey != NULL || info->enc_data != NULL) { if (!sk_X509_INFO_push(ret, info)) { goto err; } info = NULL; } ok = 1; err: X509_INFO_free(info); if (!ok) { while (sk_X509_INFO_num(ret) > orig_num) { X509_INFO_free(sk_X509_INFO_pop(ret)); } if (ret != sk) { sk_X509_INFO_free(ret); } ret = NULL; } OPENSSL_free(name); OPENSSL_free(header); OPENSSL_free(data); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_lib.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" #define MIN_LENGTH 4 static int load_iv(const char **fromp, unsigned char *to, size_t num); static int check_pem(const char *nm, const char *name); // PEM_proc_type appends a Proc-Type header to |buf|, determined by |type|. static void PEM_proc_type(char buf[PEM_BUFSIZE], int type) { const char *str; if (type == PEM_TYPE_ENCRYPTED) { str = "ENCRYPTED"; } else if (type == PEM_TYPE_MIC_CLEAR) { str = "MIC-CLEAR"; } else if (type == PEM_TYPE_MIC_ONLY) { str = "MIC-ONLY"; } else { str = "BAD-TYPE"; } OPENSSL_strlcat(buf, "Proc-Type: 4,", PEM_BUFSIZE); OPENSSL_strlcat(buf, str, PEM_BUFSIZE); OPENSSL_strlcat(buf, "\n", PEM_BUFSIZE); } // PEM_dek_info appends a DEK-Info header to |buf|, with an algorithm of |type| // and a single parameter, specified by hex-encoding |len| bytes from |str|. static void PEM_dek_info(char buf[PEM_BUFSIZE], const char *type, size_t len, char *str) { static const unsigned char map[17] = "0123456789ABCDEF"; OPENSSL_strlcat(buf, "DEK-Info: ", PEM_BUFSIZE); OPENSSL_strlcat(buf, type, PEM_BUFSIZE); OPENSSL_strlcat(buf, ",", PEM_BUFSIZE); const size_t used = strlen(buf); const size_t available = PEM_BUFSIZE - used; if (len * 2 < len || len * 2 + 2 < len || available < len * 2 + 2) { return; } for (size_t i = 0; i < len; i++) { buf[used + i * 2] = map[(str[i] >> 4) & 0x0f]; buf[used + i * 2 + 1] = map[(str[i]) & 0x0f]; } buf[used + len * 2] = '\n'; buf[used + len * 2 + 1] = '\0'; } void *PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x, pem_password_cb *cb, void *u) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return NULL; } void *ret = PEM_ASN1_read_bio(d2i, name, b, x, cb, u); BIO_free(b); return ret; } static int check_pem(const char *nm, const char *name) { // Normal matching nm and name if (!strcmp(nm, name)) { return 1; } // Make PEM_STRING_EVP_PKEY match any private key if (!strcmp(name, PEM_STRING_EVP_PKEY)) { return !strcmp(nm, PEM_STRING_PKCS8) || !strcmp(nm, PEM_STRING_PKCS8INF) || !strcmp(nm, PEM_STRING_RSA) || !strcmp(nm, PEM_STRING_EC) || !strcmp(nm, PEM_STRING_DSA); } // Permit older strings if (!strcmp(nm, PEM_STRING_X509_OLD) && !strcmp(name, PEM_STRING_X509)) { return 1; } if (!strcmp(nm, PEM_STRING_X509_REQ_OLD) && !strcmp(name, PEM_STRING_X509_REQ)) { return 1; } // Allow normal certs to be read as trusted certs if (!strcmp(nm, PEM_STRING_X509) && !strcmp(name, PEM_STRING_X509_TRUSTED)) { return 1; } if (!strcmp(nm, PEM_STRING_X509_OLD) && !strcmp(name, PEM_STRING_X509_TRUSTED)) { return 1; } // Some CAs use PKCS#7 with CERTIFICATE headers if (!strcmp(nm, PEM_STRING_X509) && !strcmp(name, PEM_STRING_PKCS7)) { return 1; } if (!strcmp(nm, PEM_STRING_PKCS7_SIGNED) && !strcmp(name, PEM_STRING_PKCS7)) { return 1; } #ifndef OPENSSL_NO_CMS if (!strcmp(nm, PEM_STRING_X509) && !strcmp(name, PEM_STRING_CMS)) { return 1; } // Allow CMS to be read from PKCS#7 headers if (!strcmp(nm, PEM_STRING_PKCS7) && !strcmp(name, PEM_STRING_CMS)) { return 1; } #endif return 0; } static const EVP_CIPHER *cipher_by_name(std::string_view name) { // This is similar to the (deprecated) function |EVP_get_cipherbyname|. Note // the PEM code assumes that ciphers have at least 8 bytes of IV, at most 20 // bytes of overhead and generally behave like CBC mode. if (name == SN_des_cbc) { return EVP_des_cbc(); } else if (name == SN_des_ede3_cbc) { return EVP_des_ede3_cbc(); } else if (name == SN_aes_128_cbc) { return EVP_aes_128_cbc(); } else if (name == SN_aes_192_cbc) { return EVP_aes_192_cbc(); } else if (name == SN_aes_256_cbc) { return EVP_aes_256_cbc(); } else { return NULL; } } int PEM_bytes_read_bio(unsigned char **pdata, long *plen, char **pnm, const char *name, BIO *bp, pem_password_cb *cb, void *u) { EVP_CIPHER_INFO cipher; char *nm = NULL, *header = NULL; unsigned char *data = NULL; long len; int ret = 0; for (;;) { if (!PEM_read_bio(bp, &nm, &header, &data, &len)) { uint32_t error = ERR_peek_error(); if (ERR_GET_LIB(error) == ERR_LIB_PEM && ERR_GET_REASON(error) == PEM_R_NO_START_LINE) { ERR_add_error_data(2, "Expecting: ", name); } return 0; } if (check_pem(nm, name)) { break; } OPENSSL_free(nm); OPENSSL_free(header); OPENSSL_free(data); } if (!PEM_get_EVP_CIPHER_INFO(header, &cipher)) { goto err; } if (!PEM_do_header(&cipher, data, &len, cb, u)) { goto err; } *pdata = data; *plen = len; if (pnm) { *pnm = nm; } ret = 1; err: if (!ret || !pnm) { OPENSSL_free(nm); } OPENSSL_free(header); if (!ret) { OPENSSL_free(data); } return ret; } int PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp, void *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *callback, void *u) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } int ret = PEM_ASN1_write_bio(i2d, name, b, x, enc, pass, pass_len, callback, u); BIO_free(b); return ret; } int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp, void *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *callback, void *u) { EVP_CIPHER_CTX ctx; int dsize = 0, i, j, ret = 0; unsigned char *p, *data = NULL; const char *objstr = NULL; char buf[PEM_BUFSIZE]; unsigned char key[EVP_MAX_KEY_LENGTH]; unsigned char iv[EVP_MAX_IV_LENGTH]; if (enc != NULL) { objstr = OBJ_nid2sn(EVP_CIPHER_nid(enc)); if (objstr == NULL || cipher_by_name(objstr) == NULL || EVP_CIPHER_iv_length(enc) < 8) { OPENSSL_PUT_ERROR(PEM, PEM_R_UNSUPPORTED_CIPHER); goto err; } } if ((dsize = i2d(x, NULL)) < 0) { OPENSSL_PUT_ERROR(PEM, ERR_R_ASN1_LIB); dsize = 0; goto err; } // dzise + 8 bytes are needed // actually it needs the cipher block size extra... data = (unsigned char *)OPENSSL_malloc((unsigned int)dsize + 20); if (data == NULL) { goto err; } p = data; i = i2d(x, &p); if (enc != NULL) { const unsigned iv_len = EVP_CIPHER_iv_length(enc); if (pass == NULL) { if (!callback) { callback = PEM_def_callback; } pass_len = (*callback)(buf, PEM_BUFSIZE, 1, u); if (pass_len < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_READ_KEY); goto err; } pass = (const unsigned char *)buf; } assert(iv_len <= sizeof(iv)); if (!RAND_bytes(iv, iv_len)) { // Generate a salt goto err; } // The 'iv' is used as the iv and as a salt. It is NOT taken from // the BytesToKey function if (!EVP_BytesToKey(enc, EVP_md5(), iv, pass, pass_len, 1, key, NULL)) { goto err; } if (pass == (const unsigned char *)buf) { OPENSSL_cleanse(buf, PEM_BUFSIZE); } assert(strlen(objstr) + 23 + 2 * iv_len + 13 <= sizeof(buf)); buf[0] = '\0'; PEM_proc_type(buf, PEM_TYPE_ENCRYPTED); PEM_dek_info(buf, objstr, iv_len, (char *)iv); // k=strlen(buf); EVP_CIPHER_CTX_init(&ctx); ret = 1; if (!EVP_EncryptInit_ex(&ctx, enc, NULL, key, iv) || !EVP_EncryptUpdate(&ctx, data, &j, data, i) || !EVP_EncryptFinal_ex(&ctx, &(data[j]), &i)) { ret = 0; } else { i += j; } EVP_CIPHER_CTX_cleanup(&ctx); if (ret == 0) { goto err; } } else { ret = 1; buf[0] = '\0'; } i = PEM_write_bio(bp, name, buf, data, i); if (i <= 0) { ret = 0; } err: OPENSSL_cleanse(key, sizeof(key)); OPENSSL_cleanse(iv, sizeof(iv)); OPENSSL_cleanse((char *)&ctx, sizeof(ctx)); OPENSSL_cleanse(buf, PEM_BUFSIZE); OPENSSL_free(data); return ret; } int PEM_do_header(const EVP_CIPHER_INFO *cipher, unsigned char *data, long *plen, pem_password_cb *callback, void *u) { int i = 0, j, o, pass_len; long len; EVP_CIPHER_CTX ctx; unsigned char key[EVP_MAX_KEY_LENGTH]; char buf[PEM_BUFSIZE]; len = *plen; if (cipher->cipher == NULL) { return 1; } pass_len = 0; if (!callback) { callback = PEM_def_callback; } pass_len = callback(buf, PEM_BUFSIZE, 0, u); if (pass_len < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_PASSWORD_READ); return 0; } if (!EVP_BytesToKey(cipher->cipher, EVP_md5(), cipher->iv, (unsigned char *)buf, pass_len, 1, key, NULL)) { return 0; } j = (int)len; EVP_CIPHER_CTX_init(&ctx); o = EVP_DecryptInit_ex(&ctx, cipher->cipher, NULL, key, cipher->iv); if (o) { o = EVP_DecryptUpdate(&ctx, data, &i, data, j); } if (o) { o = EVP_DecryptFinal_ex(&ctx, &(data[i]), &j); } EVP_CIPHER_CTX_cleanup(&ctx); OPENSSL_cleanse((char *)buf, sizeof(buf)); OPENSSL_cleanse((char *)key, sizeof(key)); if (!o) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_DECRYPT); return 0; } j += i; *plen = j; return 1; } int PEM_get_EVP_CIPHER_INFO(const char *header, EVP_CIPHER_INFO *cipher) { cipher->cipher = NULL; OPENSSL_memset(cipher->iv, 0, sizeof(cipher->iv)); if ((header == NULL) || (*header == '\0') || (*header == '\n')) { return 1; } if (strncmp(header, "Proc-Type: ", 11) != 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_NOT_PROC_TYPE); return 0; } header += 11; if (header[0] != '4' || header[1] != ',') { OPENSSL_PUT_ERROR(PEM, PEM_R_UNSUPPORTED_PROC_TYPE_VERSION); return 0; } header += 2; if (strncmp(header, "ENCRYPTED", 9) != 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_NOT_ENCRYPTED); return 0; } for (; (*header != '\n') && (*header != '\0'); header++) { ; } if (*header == '\0') { OPENSSL_PUT_ERROR(PEM, PEM_R_SHORT_HEADER); return 0; } header++; if (strncmp(header, "DEK-Info: ", 10) != 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_NOT_DEK_INFO); return 0; } header += 10; const char *p = header; for (;;) { char c = *header; if (!((c >= 'A' && c <= 'Z') || c == '-' || OPENSSL_isdigit(c))) { break; } header++; } cipher->cipher = cipher_by_name(std::string_view(p, header - p)); header++; if (cipher->cipher == NULL) { OPENSSL_PUT_ERROR(PEM, PEM_R_UNSUPPORTED_ENCRYPTION); return 0; } // The IV parameter must be at least 8 bytes long to be used as the salt in // the KDF. (This should not happen given |cipher_by_name|.) if (EVP_CIPHER_iv_length(cipher->cipher) < 8) { assert(0); OPENSSL_PUT_ERROR(PEM, PEM_R_UNSUPPORTED_ENCRYPTION); return 0; } const char **header_pp = &header; if (!load_iv(header_pp, cipher->iv, EVP_CIPHER_iv_length(cipher->cipher))) { return 0; } return 1; } static int load_iv(const char **fromp, unsigned char *to, size_t num) { uint8_t v; const char *from; from = *fromp; for (size_t i = 0; i < num; i++) { to[i] = 0; } num *= 2; for (size_t i = 0; i < num; i++) { if (!OPENSSL_fromxdigit(&v, *from)) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_IV_CHARS); return 0; } from++; to[i / 2] |= v << (!(i & 1)) * 4; } *fromp = from; return 1; } int PEM_write(FILE *fp, const char *name, const char *header, const unsigned char *data, long len) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } int ret = PEM_write_bio(b, name, header, data, len); BIO_free(b); return ret; } int PEM_write_bio(BIO *bp, const char *name, const char *header, const unsigned char *data, long len) { int nlen, n, i, j, outl; unsigned char *buf = NULL; EVP_ENCODE_CTX ctx; int reason = ERR_R_BUF_LIB; int retval = 0; EVP_EncodeInit(&ctx); nlen = strlen(name); if ((BIO_write(bp, "-----BEGIN ", 11) != 11) || (BIO_write(bp, name, nlen) != nlen) || (BIO_write(bp, "-----\n", 6) != 6)) { goto err; } i = strlen(header); if (i > 0) { if ((BIO_write(bp, header, i) != i) || (BIO_write(bp, "\n", 1) != 1)) { goto err; } } buf = reinterpret_cast(OPENSSL_malloc(PEM_BUFSIZE * 8)); if (buf == NULL) { goto err; } i = j = 0; while (len > 0) { n = (int)((len > (PEM_BUFSIZE * 5)) ? (PEM_BUFSIZE * 5) : len); EVP_EncodeUpdate(&ctx, buf, &outl, &(data[j]), n); if ((outl) && (BIO_write(bp, (char *)buf, outl) != outl)) { goto err; } i += outl; len -= n; j += n; } EVP_EncodeFinal(&ctx, buf, &outl); if ((outl > 0) && (BIO_write(bp, (char *)buf, outl) != outl)) { goto err; } if ((BIO_write(bp, "-----END ", 9) != 9) || (BIO_write(bp, name, nlen) != nlen) || (BIO_write(bp, "-----\n", 6) != 6)) { goto err; } retval = i + outl; err: if (retval == 0) { OPENSSL_PUT_ERROR(PEM, reason); } OPENSSL_free(buf); return retval; } int PEM_read(FILE *fp, char **name, char **header, unsigned char **data, long *len) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } int ret = PEM_read_bio(b, name, header, data, len); BIO_free(b); return ret; } int PEM_read_bio(BIO *bp, char **name, char **header, unsigned char **data, long *len) { EVP_ENCODE_CTX ctx; int end = 0, i, k, bl = 0, hl = 0, nohead = 0; char buf[256]; BUF_MEM *nameB; BUF_MEM *headerB; BUF_MEM *dataB, *tmpB; nameB = BUF_MEM_new(); headerB = BUF_MEM_new(); dataB = BUF_MEM_new(); if ((nameB == NULL) || (headerB == NULL) || (dataB == NULL)) { BUF_MEM_free(nameB); BUF_MEM_free(headerB); BUF_MEM_free(dataB); return 0; } buf[254] = '\0'; for (;;) { i = BIO_gets(bp, buf, 254); if (i <= 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_NO_START_LINE); goto err; } while ((i >= 0) && (buf[i] <= ' ')) { i--; } buf[++i] = '\n'; buf[++i] = '\0'; if (strncmp(buf, "-----BEGIN ", 11) == 0) { i = strlen(&(buf[11])); if (strncmp(&(buf[11 + i - 6]), "-----\n", 6) != 0) { continue; } if (!BUF_MEM_grow(nameB, i + 9)) { goto err; } OPENSSL_memcpy(nameB->data, &(buf[11]), i - 6); nameB->data[i - 6] = '\0'; break; } } hl = 0; if (!BUF_MEM_grow(headerB, 256)) { goto err; } headerB->data[0] = '\0'; for (;;) { i = BIO_gets(bp, buf, 254); if (i <= 0) { break; } while ((i >= 0) && (buf[i] <= ' ')) { i--; } buf[++i] = '\n'; buf[++i] = '\0'; if (buf[0] == '\n') { break; } if (!BUF_MEM_grow(headerB, hl + i + 9)) { goto err; } if (strncmp(buf, "-----END ", 9) == 0) { nohead = 1; break; } OPENSSL_memcpy(&(headerB->data[hl]), buf, i); headerB->data[hl + i] = '\0'; hl += i; } bl = 0; if (!BUF_MEM_grow(dataB, 1024)) { goto err; } dataB->data[0] = '\0'; if (!nohead) { for (;;) { i = BIO_gets(bp, buf, 254); if (i <= 0) { break; } while ((i >= 0) && (buf[i] <= ' ')) { i--; } buf[++i] = '\n'; buf[++i] = '\0'; if (i != 65) { end = 1; } if (strncmp(buf, "-----END ", 9) == 0) { break; } if (i > 65) { break; } if (!BUF_MEM_grow_clean(dataB, i + bl + 9)) { goto err; } OPENSSL_memcpy(&(dataB->data[bl]), buf, i); dataB->data[bl + i] = '\0'; bl += i; if (end) { buf[0] = '\0'; i = BIO_gets(bp, buf, 254); if (i <= 0) { break; } while ((i >= 0) && (buf[i] <= ' ')) { i--; } buf[++i] = '\n'; buf[++i] = '\0'; break; } } } else { tmpB = headerB; headerB = dataB; dataB = tmpB; bl = hl; } i = strlen(nameB->data); if ((strncmp(buf, "-----END ", 9) != 0) || (strncmp(nameB->data, &(buf[9]), i) != 0) || (strncmp(&(buf[9 + i]), "-----\n", 6) != 0)) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_END_LINE); goto err; } EVP_DecodeInit(&ctx); i = EVP_DecodeUpdate(&ctx, (unsigned char *)dataB->data, &bl, (unsigned char *)dataB->data, bl); if (i < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_BASE64_DECODE); goto err; } i = EVP_DecodeFinal(&ctx, (unsigned char *)&(dataB->data[bl]), &k); if (i < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_BASE64_DECODE); goto err; } bl += k; if (bl == 0) { goto err; } *name = nameB->data; *header = headerB->data; *data = (unsigned char *)dataB->data; *len = bl; OPENSSL_free(nameB); OPENSSL_free(headerB); OPENSSL_free(dataB); return 1; err: BUF_MEM_free(nameB); BUF_MEM_free(headerB); BUF_MEM_free(dataB); return 0; } int PEM_def_callback(char *buf, int size, int rwflag, void *userdata) { if (!buf || !userdata || size < 0) { return -1; } size_t len = strlen((char *)userdata); if (len >= (size_t)size) { return -1; } OPENSSL_strlcpy(buf, reinterpret_cast(userdata), (size_t)size); return (int)len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_oth.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include // Handle 'other' PEMs: not private keys void *PEM_ASN1_read_bio(d2i_of_void *d2i, const char *name, BIO *bp, void **x, pem_password_cb *cb, void *u) { const unsigned char *p = NULL; unsigned char *data = NULL; long len; char *ret = NULL; if (!PEM_bytes_read_bio(&data, &len, NULL, name, bp, cb, u)) { return NULL; } p = data; ret = reinterpret_cast(d2i(x, &p, len)); if (ret == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_ASN1_LIB); } OPENSSL_free(data); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_pk8.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include static int do_pk8pkey(BIO *bp, const EVP_PKEY *x, int isder, int nid, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u); static int do_pk8pkey_fp(FILE *bp, const EVP_PKEY *x, int isder, int nid, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u); // These functions write a private key in PKCS#8 format: it is a "drop in" // replacement for PEM_write_bio_PrivateKey() and friends. As usual if 'enc' // is NULL then it uses the unencrypted private key form. The 'nid' versions // uses PKCS#5 v1.5 PBE algorithms whereas the others use PKCS#5 v2.0. int PEM_write_bio_PKCS8PrivateKey_nid(BIO *bp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey(bp, x, 0, nid, NULL, pass, pass_len, cb, u); } int PEM_write_bio_PKCS8PrivateKey(BIO *bp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey(bp, x, 0, -1, enc, pass, pass_len, cb, u); } int i2d_PKCS8PrivateKey_bio(BIO *bp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey(bp, x, 1, -1, enc, pass, pass_len, cb, u); } int i2d_PKCS8PrivateKey_nid_bio(BIO *bp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey(bp, x, 1, nid, NULL, pass, pass_len, cb, u); } static int do_pk8pkey(BIO *bp, const EVP_PKEY *x, int isder, int nid, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { X509_SIG *p8; PKCS8_PRIV_KEY_INFO *p8inf; char buf[PEM_BUFSIZE]; int ret; if (!(p8inf = EVP_PKEY2PKCS8(x))) { OPENSSL_PUT_ERROR(PEM, PEM_R_ERROR_CONVERTING_PRIVATE_KEY); return 0; } if (enc || (nid != -1)) { if (!pass) { if (!cb) { cb = PEM_def_callback; } pass_len = cb(buf, PEM_BUFSIZE, 1, u); if (pass_len < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_READ_KEY); PKCS8_PRIV_KEY_INFO_free(p8inf); return 0; } pass = buf; } p8 = PKCS8_encrypt(nid, enc, pass, pass_len, NULL, 0, 0, p8inf); if (pass == buf) { OPENSSL_cleanse(buf, pass_len); } PKCS8_PRIV_KEY_INFO_free(p8inf); if (isder) { ret = i2d_PKCS8_bio(bp, p8); } else { ret = PEM_write_bio_PKCS8(bp, p8); } X509_SIG_free(p8); return ret; } else { if (isder) { ret = i2d_PKCS8_PRIV_KEY_INFO_bio(bp, p8inf); } else { ret = PEM_write_bio_PKCS8_PRIV_KEY_INFO(bp, p8inf); } PKCS8_PRIV_KEY_INFO_free(p8inf); return ret; } } EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, void *u) { PKCS8_PRIV_KEY_INFO *p8inf = NULL; X509_SIG *p8 = NULL; int pass_len; EVP_PKEY *ret; char psbuf[PEM_BUFSIZE]; p8 = d2i_PKCS8_bio(bp, NULL); if (!p8) { return NULL; } pass_len = 0; if (!cb) { cb = PEM_def_callback; } pass_len = cb(psbuf, PEM_BUFSIZE, 0, u); if (pass_len < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_PASSWORD_READ); X509_SIG_free(p8); return NULL; } p8inf = PKCS8_decrypt(p8, psbuf, pass_len); X509_SIG_free(p8); OPENSSL_cleanse(psbuf, pass_len); if (!p8inf) { return NULL; } ret = EVP_PKCS82PKEY(p8inf); PKCS8_PRIV_KEY_INFO_free(p8inf); if (!ret) { return NULL; } if (x) { if (*x) { EVP_PKEY_free(*x); } *x = ret; } return ret; } int i2d_PKCS8PrivateKey_fp(FILE *fp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey_fp(fp, x, 1, -1, enc, pass, pass_len, cb, u); } int i2d_PKCS8PrivateKey_nid_fp(FILE *fp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey_fp(fp, x, 1, nid, NULL, pass, pass_len, cb, u); } int PEM_write_PKCS8PrivateKey_nid(FILE *fp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey_fp(fp, x, 0, nid, NULL, pass, pass_len, cb, u); } int PEM_write_PKCS8PrivateKey(FILE *fp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { return do_pk8pkey_fp(fp, x, 0, -1, enc, pass, pass_len, cb, u); } static int do_pk8pkey_fp(FILE *fp, const EVP_PKEY *x, int isder, int nid, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u) { BIO *bp; int ret; if (!(bp = BIO_new_fp(fp, BIO_NOCLOSE))) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } ret = do_pk8pkey(bp, x, isder, nid, enc, pass, pass_len, cb, u); BIO_free(bp); return ret; } EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_password_cb *cb, void *u) { BIO *bp; EVP_PKEY *ret; if (!(bp = BIO_new_fp(fp, BIO_NOCLOSE))) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return NULL; } ret = d2i_PKCS8PrivateKey_bio(bp, x, cb, u); BIO_free(bp); return ret; } IMPLEMENT_PEM_rw(PKCS8, X509_SIG, PEM_STRING_PKCS8, X509_SIG) IMPLEMENT_PEM_rw(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO, PEM_STRING_PKCS8INF, PKCS8_PRIV_KEY_INFO) ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_pkey.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include EVP_PKEY *PEM_read_bio_PrivateKey(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, void *u) { char *nm = NULL; const unsigned char *p = NULL; unsigned char *data = NULL; long len; EVP_PKEY *ret = NULL; if (!PEM_bytes_read_bio(&data, &len, &nm, PEM_STRING_EVP_PKEY, bp, cb, u)) { return NULL; } p = data; if (strcmp(nm, PEM_STRING_PKCS8INF) == 0) { PKCS8_PRIV_KEY_INFO *p8inf; p8inf = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, len); if (!p8inf) { goto p8err; } ret = EVP_PKCS82PKEY(p8inf); if (x) { if (*x) { EVP_PKEY_free((EVP_PKEY *)*x); } *x = ret; } PKCS8_PRIV_KEY_INFO_free(p8inf); } else if (strcmp(nm, PEM_STRING_PKCS8) == 0) { PKCS8_PRIV_KEY_INFO *p8inf; X509_SIG *p8; int pass_len; char psbuf[PEM_BUFSIZE]; p8 = d2i_X509_SIG(NULL, &p, len); if (!p8) { goto p8err; } pass_len = 0; if (!cb) { cb = PEM_def_callback; } pass_len = cb(psbuf, PEM_BUFSIZE, 0, u); if (pass_len < 0) { OPENSSL_PUT_ERROR(PEM, PEM_R_BAD_PASSWORD_READ); X509_SIG_free(p8); goto err; } p8inf = PKCS8_decrypt(p8, psbuf, pass_len); X509_SIG_free(p8); OPENSSL_cleanse(psbuf, pass_len); if (!p8inf) { goto p8err; } ret = EVP_PKCS82PKEY(p8inf); if (x) { if (*x) { EVP_PKEY_free((EVP_PKEY *)*x); } *x = ret; } PKCS8_PRIV_KEY_INFO_free(p8inf); } else if (strcmp(nm, PEM_STRING_RSA) == 0) { // TODO(davidben): d2i_PrivateKey parses PKCS#8 along with the // standalone format. This and the cases below probably should not // accept PKCS#8. ret = d2i_PrivateKey(EVP_PKEY_RSA, x, &p, len); } else if (strcmp(nm, PEM_STRING_EC) == 0) { ret = d2i_PrivateKey(EVP_PKEY_EC, x, &p, len); } else if (strcmp(nm, PEM_STRING_DSA) == 0) { ret = d2i_PrivateKey(EVP_PKEY_DSA, x, &p, len); } p8err: if (ret == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_ASN1_LIB); } err: OPENSSL_free(nm); OPENSSL_free(data); return ret; } int PEM_write_bio_PrivateKey(BIO *bp, EVP_PKEY *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *cb, void *u) { return PEM_write_bio_PKCS8PrivateKey(bp, x, enc, (const char *)pass, pass_len, cb, u); } EVP_PKEY *PEM_read_PrivateKey(FILE *fp, EVP_PKEY **x, pem_password_cb *cb, void *u) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return NULL; } EVP_PKEY *ret = PEM_read_bio_PrivateKey(b, x, cb, u); BIO_free(b); return ret; } int PEM_write_PrivateKey(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *cb, void *u) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(PEM, ERR_R_BUF_LIB); return 0; } int ret = PEM_write_bio_PrivateKey(b, x, enc, pass, pass_len, cb, u); BIO_free(b); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_x509.cc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include IMPLEMENT_PEM_rw(X509, X509, PEM_STRING_X509, X509) ================================================ FILE: Sources/CNIOBoringSSL/crypto/pem/pem_xaux.cc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include IMPLEMENT_PEM_rw(X509_AUX, X509, PEM_STRING_X509_TRUSTED, X509_AUX) ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs7/internal.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_PKCS7_INTERNAL_H #define OPENSSL_HEADER_PKCS7_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif // pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 // SignedData blob from |cbs| and sets |*out| to point to the rest of the // input. If the input is in BER format, then |*der_bytes| will be set to a // pointer that needs to be freed by the caller once they have finished // processing |*out| (which will be pointing into |*der_bytes|). // // It returns one on success or zero on error. On error, |*der_bytes| is // NULL. int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs); // pkcs7_add_signed_data writes a PKCS#7, SignedData structure to |out|. While // doing so it makes callbacks to let the caller fill in parts of the structure. // All callbacks are ignored if NULL and return one on success or zero on error. // // digest_algos_cb: may write AlgorithmIdentifiers into the given CBB, which // is a SET of digest algorithms. // cert_crl_cb: may write the |certificates| or |crls| fields. // (See https://datatracker.ietf.org/doc/html/rfc2315#section-9.1) // signer_infos_cb: may write the contents of the |signerInfos| field. // (See https://datatracker.ietf.org/doc/html/rfc2315#section-9.1) // // pkcs7_add_signed_data returns one on success or zero on error. int pkcs7_add_signed_data(CBB *out, int (*digest_algos_cb)(CBB *out, const void *arg), int (*cert_crl_cb)(CBB *out, const void *arg), int (*signer_infos_cb)(CBB *out, const void *arg), const void *arg); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_PKCS7_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs7/pkcs7.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../bytestring/internal.h" #include "internal.h" // 1.2.840.113549.1.7.1 static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01}; // 1.2.840.113549.1.7.2 static const uint8_t kPKCS7SignedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02}; // pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 // SignedData blob from |cbs| and sets |*out| to point to the rest of the // input. If the input is in BER format, then |*der_bytes| will be set to a // pointer that needs to be freed by the caller once they have finished // processing |*out| (which will be pointing into |*der_bytes|). // // It returns one on success or zero on error. On error, |*der_bytes| is // NULL. int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { CBS in, content_info, content_type, wrapped_signed_data, signed_data; uint64_t version; // The input may be in BER format. *der_bytes = NULL; if (!CBS_asn1_ber_to_der(cbs, &in, der_bytes) || // See https://tools.ietf.org/html/rfc2315#section-7 !CBS_get_asn1(&in, &content_info, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&content_info, &content_type, CBS_ASN1_OBJECT)) { goto err; } if (!CBS_mem_equal(&content_type, kPKCS7SignedData, sizeof(kPKCS7SignedData))) { OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_NOT_PKCS7_SIGNED_DATA); goto err; } // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBS_get_asn1(&content_info, &wrapped_signed_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || !CBS_get_asn1(&wrapped_signed_data, &signed_data, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&signed_data, &version) || !CBS_get_asn1(&signed_data, NULL /* digests */, CBS_ASN1_SET) || !CBS_get_asn1(&signed_data, NULL /* content */, CBS_ASN1_SEQUENCE)) { goto err; } if (version < 1) { OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_BAD_PKCS7_VERSION); goto err; } CBS_init(out, CBS_data(&signed_data), CBS_len(&signed_data)); return 1; err: OPENSSL_free(*der_bytes); *der_bytes = NULL; return 0; } int PKCS7_get_raw_certificates(STACK_OF(CRYPTO_BUFFER) *out_certs, CBS *cbs, CRYPTO_BUFFER_POOL *pool) { CBS signed_data, certificates; uint8_t *der_bytes = NULL; int ret = 0, has_certificates; const size_t initial_certs_len = sk_CRYPTO_BUFFER_num(out_certs); // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!pkcs7_parse_header(&der_bytes, &signed_data, cbs) || !CBS_get_optional_asn1( &signed_data, &certificates, &has_certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { goto err; } if (!has_certificates) { CBS_init(&certificates, NULL, 0); } while (CBS_len(&certificates) > 0) { CBS cert; if (!CBS_get_asn1_element(&certificates, &cert, CBS_ASN1_SEQUENCE)) { goto err; } CRYPTO_BUFFER *buf = CRYPTO_BUFFER_new_from_CBS(&cert, pool); if (buf == NULL || !sk_CRYPTO_BUFFER_push(out_certs, buf)) { CRYPTO_BUFFER_free(buf); goto err; } } ret = 1; err: OPENSSL_free(der_bytes); if (!ret) { while (sk_CRYPTO_BUFFER_num(out_certs) != initial_certs_len) { CRYPTO_BUFFER *buf = sk_CRYPTO_BUFFER_pop(out_certs); CRYPTO_BUFFER_free(buf); } } return ret; } static int pkcs7_bundle_raw_certificates_cb(CBB *out, const void *arg) { const STACK_OF(CRYPTO_BUFFER) *certs = reinterpret_cast(arg); CBB certificates; // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { return 0; } for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(certs); i++) { CRYPTO_BUFFER *cert = sk_CRYPTO_BUFFER_value(certs, i); if (!CBB_add_bytes(&certificates, CRYPTO_BUFFER_data(cert), CRYPTO_BUFFER_len(cert))) { return 0; } } // |certificates| is a implicitly-tagged SET OF. return CBB_flush_asn1_set_of(&certificates) && CBB_flush(out); } int PKCS7_bundle_raw_certificates(CBB *out, const STACK_OF(CRYPTO_BUFFER) *certs) { return pkcs7_add_signed_data(out, /*digest_algos_cb=*/NULL, pkcs7_bundle_raw_certificates_cb, /*signer_infos_cb=*/NULL, certs); } int pkcs7_add_signed_data(CBB *out, int (*digest_algos_cb)(CBB *out, const void *arg), int (*cert_crl_cb)(CBB *out, const void *arg), int (*signer_infos_cb)(CBB *out, const void *arg), const void *arg) { CBB outer_seq, oid, wrapped_seq, seq, version_bytes, digest_algos_set, content_info, signer_infos; // See https://tools.ietf.org/html/rfc2315#section-7 if (!CBB_add_asn1(out, &outer_seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&outer_seq, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPKCS7SignedData, sizeof(kPKCS7SignedData)) || !CBB_add_asn1(&outer_seq, &wrapped_seq, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || // See https://tools.ietf.org/html/rfc2315#section-9.1 !CBB_add_asn1(&wrapped_seq, &seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&seq, &version_bytes, CBS_ASN1_INTEGER) || !CBB_add_u8(&version_bytes, 1) || !CBB_add_asn1(&seq, &digest_algos_set, CBS_ASN1_SET) || (digest_algos_cb != NULL && !digest_algos_cb(&digest_algos_set, arg)) || !CBB_add_asn1(&seq, &content_info, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&content_info, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPKCS7Data, sizeof(kPKCS7Data)) || (cert_crl_cb != NULL && !cert_crl_cb(&seq, arg)) || !CBB_add_asn1(&seq, &signer_infos, CBS_ASN1_SET) || (signer_infos_cb != NULL && !signer_infos_cb(&signer_infos, arg))) { return 0; } return CBB_flush(out); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs7/pkcs7_x509.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs) { int ret = 0; const size_t initial_certs_len = sk_X509_num(out_certs); STACK_OF(CRYPTO_BUFFER) *raw = sk_CRYPTO_BUFFER_new_null(); if (raw == NULL || !PKCS7_get_raw_certificates(raw, cbs, NULL)) { goto err; } for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(raw); i++) { CRYPTO_BUFFER *buf = sk_CRYPTO_BUFFER_value(raw, i); X509 *x509 = X509_parse_from_buffer(buf); if (x509 == NULL || !sk_X509_push(out_certs, x509)) { X509_free(x509); goto err; } } ret = 1; err: sk_CRYPTO_BUFFER_pop_free(raw, CRYPTO_BUFFER_free); if (!ret) { while (sk_X509_num(out_certs) != initial_certs_len) { X509 *x509 = sk_X509_pop(out_certs); X509_free(x509); } } return ret; } int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs) { CBS signed_data, crls; uint8_t *der_bytes = NULL; int ret = 0, has_crls; const size_t initial_crls_len = sk_X509_CRL_num(out_crls); // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!pkcs7_parse_header(&der_bytes, &signed_data, cbs) || // Even if only CRLs are included, there may be an empty certificates // block. OpenSSL does this, for example. !CBS_get_optional_asn1( &signed_data, NULL, NULL, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || !CBS_get_optional_asn1( &signed_data, &crls, &has_crls, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 1)) { goto err; } if (!has_crls) { CBS_init(&crls, NULL, 0); } while (CBS_len(&crls) > 0) { CBS crl_data; X509_CRL *crl; const uint8_t *inp; if (!CBS_get_asn1_element(&crls, &crl_data, CBS_ASN1_SEQUENCE)) { goto err; } if (CBS_len(&crl_data) > LONG_MAX) { goto err; } inp = CBS_data(&crl_data); crl = d2i_X509_CRL(NULL, &inp, (long)CBS_len(&crl_data)); if (!crl) { goto err; } assert(inp == CBS_data(&crl_data) + CBS_len(&crl_data)); if (sk_X509_CRL_push(out_crls, crl) == 0) { X509_CRL_free(crl); goto err; } } ret = 1; err: OPENSSL_free(der_bytes); if (!ret) { while (sk_X509_CRL_num(out_crls) != initial_crls_len) { X509_CRL_free(sk_X509_CRL_pop(out_crls)); } } return ret; } int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, BIO *pem_bio) { uint8_t *data; long len; int ret; // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM // internally will actually allow several other values too, including // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, NULL /* password callback argument */)) { return 0; } CBS cbs; CBS_init(&cbs, data, len); ret = PKCS7_get_certificates(out_certs, &cbs); OPENSSL_free(data); return ret; } int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, BIO *pem_bio) { uint8_t *data; long len; int ret; // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM // internally will actually allow several other values too, including // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, NULL /* password callback argument */)) { return 0; } CBS cbs; CBS_init(&cbs, data, len); ret = PKCS7_get_CRLs(out_crls, &cbs); OPENSSL_free(data); return ret; } static int pkcs7_bundle_certificates_cb(CBB *out, const void *arg) { const STACK_OF(X509) *certs = reinterpret_cast(arg); size_t i; CBB certificates; // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { return 0; } for (i = 0; i < sk_X509_num(certs); i++) { X509 *x509 = sk_X509_value(certs, i); uint8_t *buf; int len = i2d_X509(x509, NULL); if (len < 0 || !CBB_add_space(&certificates, &buf, len) || i2d_X509(x509, &buf) < 0) { return 0; } } // |certificates| is a implicitly-tagged SET OF. return CBB_flush_asn1_set_of(&certificates) && CBB_flush(out); } int PKCS7_bundle_certificates(CBB *out, const STACK_OF(X509) *certs) { return pkcs7_add_signed_data(out, /*digest_algos_cb=*/NULL, pkcs7_bundle_certificates_cb, /*signer_infos_cb=*/NULL, certs); } static int pkcs7_bundle_crls_cb(CBB *out, const void *arg) { const STACK_OF(X509_CRL) *crls = reinterpret_cast(arg); size_t i; CBB crl_data; // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &crl_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 1)) { return 0; } for (i = 0; i < sk_X509_CRL_num(crls); i++) { X509_CRL *crl = sk_X509_CRL_value(crls, i); uint8_t *buf; int len = i2d_X509_CRL(crl, NULL); if (len < 0 || !CBB_add_space(&crl_data, &buf, len) || i2d_X509_CRL(crl, &buf) < 0) { return 0; } } // |crl_data| is a implicitly-tagged SET OF. return CBB_flush_asn1_set_of(&crl_data) && CBB_flush(out); } int PKCS7_bundle_CRLs(CBB *out, const STACK_OF(X509_CRL) *crls) { return pkcs7_add_signed_data(out, /*digest_algos_cb=*/NULL, pkcs7_bundle_crls_cb, /*signer_infos_cb=*/NULL, crls); } static PKCS7 *pkcs7_new(CBS *cbs) { CBS copy = *cbs, copy2 = *cbs; PKCS7 *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(PKCS7))); if (ret == NULL) { return NULL; } ret->type = OBJ_nid2obj(NID_pkcs7_signed); ret->d.sign = reinterpret_cast(OPENSSL_malloc(sizeof(PKCS7_SIGNED))); if (ret->d.sign == NULL) { goto err; } ret->d.sign->cert = sk_X509_new_null(); ret->d.sign->crl = sk_X509_CRL_new_null(); if (ret->d.sign->cert == NULL || ret->d.sign->crl == NULL || !PKCS7_get_certificates(ret->d.sign->cert, ©) || !PKCS7_get_CRLs(ret->d.sign->crl, cbs)) { goto err; } if (sk_X509_num(ret->d.sign->cert) == 0) { sk_X509_free(ret->d.sign->cert); ret->d.sign->cert = NULL; } if (sk_X509_CRL_num(ret->d.sign->crl) == 0) { sk_X509_CRL_free(ret->d.sign->crl); ret->d.sign->crl = NULL; } ret->ber_len = CBS_len(©2) - CBS_len(cbs); ret->ber_bytes = reinterpret_cast( OPENSSL_memdup(CBS_data(©2), ret->ber_len)); if (ret->ber_bytes == NULL) { goto err; } return ret; err: PKCS7_free(ret); return NULL; } PKCS7 *d2i_PKCS7(PKCS7 **out, const uint8_t **inp, size_t len) { CBS cbs; CBS_init(&cbs, *inp, len); PKCS7 *ret = pkcs7_new(&cbs); if (ret == NULL) { return NULL; } *inp = CBS_data(&cbs); if (out != NULL) { PKCS7_free(*out); *out = ret; } return ret; } PKCS7 *d2i_PKCS7_bio(BIO *bio, PKCS7 **out) { // Use a generous bound, to allow for PKCS#7 files containing large root sets. static const size_t kMaxSize = 4 * 1024 * 1024; uint8_t *data; size_t len; if (!BIO_read_asn1(bio, &data, &len, kMaxSize)) { return NULL; } CBS cbs; CBS_init(&cbs, data, len); PKCS7 *ret = pkcs7_new(&cbs); OPENSSL_free(data); if (out != NULL && ret != NULL) { PKCS7_free(*out); *out = ret; } return ret; } int i2d_PKCS7(const PKCS7 *p7, uint8_t **out) { if (p7->ber_len > INT_MAX) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); return -1; } if (out == NULL) { return (int)p7->ber_len; } if (*out == NULL) { *out = reinterpret_cast(OPENSSL_memdup(p7->ber_bytes, p7->ber_len)); if (*out == NULL) { return -1; } } else { OPENSSL_memcpy(*out, p7->ber_bytes, p7->ber_len); *out += p7->ber_len; } return (int)p7->ber_len; } int i2d_PKCS7_bio(BIO *bio, const PKCS7 *p7) { return BIO_write_all(bio, p7->ber_bytes, p7->ber_len); } void PKCS7_free(PKCS7 *p7) { if (p7 == NULL) { return; } OPENSSL_free(p7->ber_bytes); ASN1_OBJECT_free(p7->type); // We only supported signed data. if (p7->d.sign != NULL) { sk_X509_pop_free(p7->d.sign->cert, X509_free); sk_X509_CRL_pop_free(p7->d.sign->crl, X509_CRL_free); OPENSSL_free(p7->d.sign); } OPENSSL_free(p7); } // We only support signed data, so these getters are no-ops. int PKCS7_type_is_data(const PKCS7 *p7) { return 0; } int PKCS7_type_is_digest(const PKCS7 *p7) { return 0; } int PKCS7_type_is_encrypted(const PKCS7 *p7) { return 0; } int PKCS7_type_is_enveloped(const PKCS7 *p7) { return 0; } int PKCS7_type_is_signed(const PKCS7 *p7) { return 1; } int PKCS7_type_is_signedAndEnveloped(const PKCS7 *p7) { return 0; } // write_sha256_ai writes an AlgorithmIdentifier for SHA-256 to // |digest_algos_set|. static int write_sha256_ai(CBB *digest_algos_set, const void *arg) { CBB seq; return CBB_add_asn1(digest_algos_set, &seq, CBS_ASN1_SEQUENCE) && OBJ_nid2cbb(&seq, NID_sha256) && // // https://datatracker.ietf.org/doc/html/rfc5754#section-2 // "Implementations MUST generate SHA2 AlgorithmIdentifiers with absent // parameters." CBB_flush(digest_algos_set); } // sign_sha256 writes at most |max_out_sig| bytes of the signature of |data| by // |pkey| to |out_sig| and sets |*out_sig_len| to the number of bytes written. // It returns one on success or zero on error. static int sign_sha256(uint8_t *out_sig, size_t *out_sig_len, size_t max_out_sig, EVP_PKEY *pkey, BIO *data) { static const size_t kBufSize = 4096; uint8_t *buffer = reinterpret_cast(OPENSSL_malloc(kBufSize)); if (!buffer) { return 0; } EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); int ret = 0; if (!EVP_DigestSignInit(&ctx, NULL, EVP_sha256(), NULL, pkey)) { goto out; } for (;;) { const int n = BIO_read(data, buffer, kBufSize); if (n == 0) { break; } else if (n < 0 || !EVP_DigestSignUpdate(&ctx, buffer, n)) { goto out; } } *out_sig_len = max_out_sig; if (!EVP_DigestSignFinal(&ctx, out_sig, out_sig_len)) { goto out; } ret = 1; out: EVP_MD_CTX_cleanup(&ctx); OPENSSL_free(buffer); return ret; } namespace { struct signer_info_data { const X509 *sign_cert; uint8_t *signature; size_t signature_len; }; } // namespace // write_signer_info writes the SignerInfo structure from // https://datatracker.ietf.org/doc/html/rfc2315#section-9.2 to |out|. It // returns one on success or zero on error. static int write_signer_info(CBB *out, const void *arg) { const struct signer_info_data *const si_data = reinterpret_cast(arg); int ret = 0; uint8_t *subject_bytes = NULL; uint8_t *serial_bytes = NULL; const int subject_len = i2d_X509_NAME(X509_get_subject_name(si_data->sign_cert), &subject_bytes); const int serial_len = i2d_ASN1_INTEGER( (ASN1_INTEGER *)X509_get0_serialNumber(si_data->sign_cert), &serial_bytes); CBB seq, issuer_and_serial, signing_algo, null, signature; if (subject_len < 0 || serial_len < 0 || !CBB_add_asn1(out, &seq, CBS_ASN1_SEQUENCE) || // version !CBB_add_asn1_uint64(&seq, 1) || !CBB_add_asn1(&seq, &issuer_and_serial, CBS_ASN1_SEQUENCE) || !CBB_add_bytes(&issuer_and_serial, subject_bytes, subject_len) || !CBB_add_bytes(&issuer_and_serial, serial_bytes, serial_len) || !write_sha256_ai(&seq, NULL) || !CBB_add_asn1(&seq, &signing_algo, CBS_ASN1_SEQUENCE) || !OBJ_nid2cbb(&signing_algo, NID_rsaEncryption) || !CBB_add_asn1(&signing_algo, &null, CBS_ASN1_NULL) || !CBB_add_asn1(&seq, &signature, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&signature, si_data->signature, si_data->signature_len) || !CBB_flush(out)) { goto out; } ret = 1; out: OPENSSL_free(subject_bytes); OPENSSL_free(serial_bytes); return ret; } PKCS7 *PKCS7_sign(X509 *sign_cert, EVP_PKEY *pkey, STACK_OF(X509) *certs, BIO *data, int flags) { CBB cbb; if (!CBB_init(&cbb, 2048)) { return NULL; } uint8_t *der = NULL; size_t len; PKCS7 *ret = NULL; if (sign_cert == NULL && pkey == NULL && flags == PKCS7_DETACHED) { // Caller just wants to bundle certificates. if (!PKCS7_bundle_certificates(&cbb, certs)) { goto out; } } else if (sign_cert != NULL && pkey != NULL && certs == NULL && data != NULL && flags == (PKCS7_NOATTR | PKCS7_BINARY | PKCS7_NOCERTS | PKCS7_DETACHED) && EVP_PKEY_id(pkey) == NID_rsaEncryption) { // sign-file.c from the Linux kernel. const size_t signature_max_len = EVP_PKEY_size(pkey); struct signer_info_data si_data = { /*sign_cert=*/sign_cert, /*signature=*/ reinterpret_cast(OPENSSL_malloc(signature_max_len)), /*signature_len=*/0, }; if (!si_data.signature || !sign_sha256(si_data.signature, &si_data.signature_len, signature_max_len, pkey, data) || !pkcs7_add_signed_data(&cbb, write_sha256_ai, /*cert_crl_cb=*/NULL, write_signer_info, &si_data)) { OPENSSL_free(si_data.signature); goto out; } OPENSSL_free(si_data.signature); } else { OPENSSL_PUT_ERROR(PKCS7, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); goto out; } if (!CBB_finish(&cbb, &der, &len)) { goto out; } CBS cbs; CBS_init(&cbs, der, len); ret = pkcs7_new(&cbs); out: CBB_cleanup(&cbb); OPENSSL_free(der); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs8/internal.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_PKCS8_INTERNAL_H #define OPENSSL_HEADER_PKCS8_INTERNAL_H #include #include #if defined(__cplusplus) extern "C" { #endif struct pkcs8_priv_key_info_st { ASN1_INTEGER *version; X509_ALGOR *pkeyalg; ASN1_OCTET_STRING *pkey; STACK_OF(X509_ATTRIBUTE) *attributes; }; // pkcs8_pbe_decrypt decrypts |in| using the PBE scheme described by // |algorithm|, which should be a serialized AlgorithmIdentifier structure. On // success, it sets |*out| to a newly-allocated buffer containing the decrypted // result and returns one. Otherwise, it returns zero. int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, const char *pass, size_t pass_len, const uint8_t *in, size_t in_len); #define PKCS12_KEY_ID 1 #define PKCS12_IV_ID 2 #define PKCS12_MAC_ID 3 // pkcs12_key_gen runs the PKCS#12 key derivation function as specified in // RFC 7292, appendix B. On success, it writes the resulting |out_len| bytes of // key material to |out| and returns one. Otherwise, it returns zero. |id| // should be one of the |PKCS12_*_ID| values. int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, uint8_t id, uint32_t iterations, size_t out_len, uint8_t *out, const EVP_MD *md); // pkcs12_pbe_encrypt_init configures |ctx| for encrypting with a PBES1 scheme // defined in PKCS#12. It writes the corresponding AlgorithmIdentifier to |out|. int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len); struct pbe_suite { int pbe_nid; uint8_t oid[10]; uint8_t oid_len; const EVP_CIPHER *(*cipher_func)(void); const EVP_MD *(*md_func)(void); // decrypt_init initialize |ctx| for decrypting. The password is specified by // |pass| and |pass_len|. |param| contains the serialized parameters field of // the AlgorithmIdentifier. // // It returns one on success and zero on error. int (*decrypt_init)(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param); }; #define PKCS5_SALT_LEN 8 int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param); // PKCS5_pbe2_encrypt_init configures |ctx| for encrypting with PKCS #5 PBES2, // as defined in RFC 2998, with the specified parameters. It writes the // corresponding AlgorithmIdentifier to |out|. int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len); // pkcs12_iterations_acceptable returns one if |iterations| is a reasonable // number of PBKDF2 iterations and zero otherwise. int pkcs12_iterations_acceptable(uint64_t iterations); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_PKCS8_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs8/p5_pbev2.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "internal.h" #include "../internal.h" // 1.2.840.113549.1.5.12 static const uint8_t kPBKDF2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0c}; // 1.2.840.113549.1.5.13 static const uint8_t kPBES2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d}; // 1.2.840.113549.2.7 static const uint8_t kHMACWithSHA1[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x07}; // 1.2.840.113549.2.9 static const uint8_t kHMACWithSHA256[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x09}; static const struct { uint8_t oid[9]; uint8_t oid_len; int nid; const EVP_CIPHER *(*cipher_func)(void); } kCipherOIDs[] = { // 1.2.840.113549.3.2 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02}, 8, NID_rc2_cbc, &EVP_rc2_cbc}, // 1.2.840.113549.3.7 {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07}, 8, NID_des_ede3_cbc, &EVP_des_ede3_cbc}, // 2.16.840.1.101.3.4.1.2 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02}, 9, NID_aes_128_cbc, &EVP_aes_128_cbc}, // 2.16.840.1.101.3.4.1.22 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16}, 9, NID_aes_192_cbc, &EVP_aes_192_cbc}, // 2.16.840.1.101.3.4.1.42 {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a}, 9, NID_aes_256_cbc, &EVP_aes_256_cbc}, }; static const EVP_CIPHER *cbs_to_cipher(const CBS *cbs) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCipherOIDs); i++) { if (CBS_mem_equal(cbs, kCipherOIDs[i].oid, kCipherOIDs[i].oid_len)) { return kCipherOIDs[i].cipher_func(); } } return NULL; } static int add_cipher_oid(CBB *out, int nid) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCipherOIDs); i++) { if (kCipherOIDs[i].nid == nid) { CBB child; return CBB_add_asn1(out, &child, CBS_ASN1_OBJECT) && CBB_add_bytes(&child, kCipherOIDs[i].oid, kCipherOIDs[i].oid_len) && CBB_flush(out); } } OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_CIPHER); return 0; } static int pkcs5_pbe2_cipher_init(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const EVP_MD *pbkdf2_md, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, const uint8_t *iv, size_t iv_len, int enc) { if (iv_len != EVP_CIPHER_iv_length(cipher)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_ERROR_SETTING_CIPHER_PARAMS); return 0; } uint8_t key[EVP_MAX_KEY_LENGTH]; int ret = PKCS5_PBKDF2_HMAC(pass, pass_len, salt, salt_len, iterations, pbkdf2_md, EVP_CIPHER_key_length(cipher), key) && EVP_CipherInit_ex(ctx, cipher, NULL /* engine */, key, iv, enc); OPENSSL_cleanse(key, EVP_MAX_KEY_LENGTH); return ret; } int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len) { int cipher_nid = EVP_CIPHER_nid(cipher); if (cipher_nid == NID_undef) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER); return 0; } // Generate a random IV. uint8_t iv[EVP_MAX_IV_LENGTH]; if (!RAND_bytes(iv, EVP_CIPHER_iv_length(cipher))) { return 0; } // See RFC 2898, appendix A. CBB algorithm, oid, param, kdf, kdf_oid, kdf_param, salt_cbb, cipher_cbb, iv_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPBES2, sizeof(kPBES2)) || !CBB_add_asn1(&algorithm, ¶m, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(¶m, &kdf, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&kdf, &kdf_oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&kdf_oid, kPBKDF2, sizeof(kPBKDF2)) || !CBB_add_asn1(&kdf, &kdf_param, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&kdf_param, &salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&salt_cbb, salt, salt_len) || !CBB_add_asn1_uint64(&kdf_param, iterations) || // Specify a key length for RC2. (cipher_nid == NID_rc2_cbc && !CBB_add_asn1_uint64(&kdf_param, EVP_CIPHER_key_length(cipher))) || // Omit the PRF. We use the default hmacWithSHA1. !CBB_add_asn1(¶m, &cipher_cbb, CBS_ASN1_SEQUENCE) || !add_cipher_oid(&cipher_cbb, cipher_nid) || // RFC 2898 says RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and // IV, but OpenSSL always uses an OCTET STRING IV, so we do the same. !CBB_add_asn1(&cipher_cbb, &iv_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&iv_cbb, iv, EVP_CIPHER_iv_length(cipher)) || !CBB_flush(out)) { return 0; } return pkcs5_pbe2_cipher_init(ctx, cipher, EVP_sha1(), iterations, pass, pass_len, salt, salt_len, iv, EVP_CIPHER_iv_length(cipher), 1 /* encrypt */); } int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param) { CBS pbe_param, kdf, kdf_obj, enc_scheme, enc_obj; if (!CBS_get_asn1(param, &pbe_param, CBS_ASN1_SEQUENCE) || CBS_len(param) != 0 || !CBS_get_asn1(&pbe_param, &kdf, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&pbe_param, &enc_scheme, CBS_ASN1_SEQUENCE) || CBS_len(&pbe_param) != 0 || !CBS_get_asn1(&kdf, &kdf_obj, CBS_ASN1_OBJECT) || !CBS_get_asn1(&enc_scheme, &enc_obj, CBS_ASN1_OBJECT)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } // Only PBKDF2 is supported. if (!CBS_mem_equal(&kdf_obj, kPBKDF2, sizeof(kPBKDF2))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION); return 0; } // See if we recognise the encryption algorithm. const EVP_CIPHER *cipher = cbs_to_cipher(&enc_obj); if (cipher == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_CIPHER); return 0; } // Parse the KDF parameters. See RFC 8018, appendix A.2. CBS pbkdf2_params, salt; uint64_t iterations; if (!CBS_get_asn1(&kdf, &pbkdf2_params, CBS_ASN1_SEQUENCE) || CBS_len(&kdf) != 0 || !CBS_get_asn1(&pbkdf2_params, &salt, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1_uint64(&pbkdf2_params, &iterations)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } if (!pkcs12_iterations_acceptable(iterations)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_ITERATION_COUNT); return 0; } // The optional keyLength parameter, if present, must match the key length of // the cipher. if (CBS_peek_asn1_tag(&pbkdf2_params, CBS_ASN1_INTEGER)) { uint64_t key_len; if (!CBS_get_asn1_uint64(&pbkdf2_params, &key_len)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } if (key_len != EVP_CIPHER_key_length(cipher)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_KEYLENGTH); return 0; } } const EVP_MD *md = EVP_sha1(); if (CBS_len(&pbkdf2_params) != 0) { CBS alg_id, prf; if (!CBS_get_asn1(&pbkdf2_params, &alg_id, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&alg_id, &prf, CBS_ASN1_OBJECT) || CBS_len(&pbkdf2_params) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } if (CBS_mem_equal(&prf, kHMACWithSHA1, sizeof(kHMACWithSHA1))) { // hmacWithSHA1 is the DEFAULT, so DER requires it be omitted, but we // match OpenSSL in tolerating it being present. md = EVP_sha1(); } else if (CBS_mem_equal(&prf, kHMACWithSHA256, sizeof(kHMACWithSHA256))) { md = EVP_sha256(); } else { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_PRF); return 0; } // All supported PRFs use a NULL parameter. CBS null; if (!CBS_get_asn1(&alg_id, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || CBS_len(&alg_id) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } } // Parse the encryption scheme parameters. Note OpenSSL does not match the // specification. Per RFC 2898, this should depend on the encryption scheme. // In particular, RC2-CBC uses a SEQUENCE with version and IV. We align with // OpenSSL. CBS iv; if (!CBS_get_asn1(&enc_scheme, &iv, CBS_ASN1_OCTETSTRING) || CBS_len(&enc_scheme) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_PRF); return 0; } return pkcs5_pbe2_cipher_init(ctx, cipher, md, (uint32_t)iterations, pass, pass_len, CBS_data(&salt), CBS_len(&salt), CBS_data(&iv), CBS_len(&iv), 0 /* decrypt */); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs8/pkcs8.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../internal.h" #include "internal.h" static int pkcs12_encode_password(const char *in, size_t in_len, uint8_t **out, size_t *out_len) { CBB cbb; if (!CBB_init(&cbb, in_len * 2)) { return 0; } // Convert the password to BMPString, or UCS-2. See // https://tools.ietf.org/html/rfc7292#appendix-B.1. CBS cbs; CBS_init(&cbs, (const uint8_t *)in, in_len); while (CBS_len(&cbs) != 0) { uint32_t c; if (!CBS_get_utf8(&cbs, &c) || !CBB_add_ucs2_be(&cbb, c)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INVALID_CHARACTERS); goto err; } } // Terminate the result with a UCS-2 NUL. if (!CBB_add_ucs2_be(&cbb, 0) || !CBB_finish(&cbb, out, out_len)) { goto err; } return 1; err: CBB_cleanup(&cbb); return 0; } int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, uint8_t id, uint32_t iterations, size_t out_len, uint8_t *out, const EVP_MD *md) { // See https://tools.ietf.org/html/rfc7292#appendix-B. Quoted parts of the // specification have errata applied and other typos fixed. if (iterations < 1) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_ITERATION_COUNT); return 0; } int ret = 0; EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); uint8_t *pass_raw = NULL, *I = NULL; size_t pass_raw_len = 0, I_len = 0; { // If |pass| is NULL, we use the empty string rather than {0, 0} as the raw // password. if (pass != NULL && !pkcs12_encode_password(pass, pass_len, &pass_raw, &pass_raw_len)) { goto err; } // In the spec, |block_size| is called "v", but measured in bits. size_t block_size = EVP_MD_block_size(md); // 1. Construct a string, D (the "diversifier"), by concatenating v/8 copies // of ID. uint8_t D[EVP_MAX_MD_BLOCK_SIZE]; OPENSSL_memset(D, id, block_size); // 2. Concatenate copies of the salt together to create a string S of length // v(ceiling(s/v)) bits (the final copy of the salt may be truncated to // create S). Note that if the salt is the empty string, then so is S. // // 3. Concatenate copies of the password together to create a string P of // length v(ceiling(p/v)) bits (the final copy of the password may be // truncated to create P). Note that if the password is the empty string, // then so is P. // // 4. Set I=S||P to be the concatenation of S and P. if (salt_len + block_size - 1 < salt_len || pass_raw_len + block_size - 1 < pass_raw_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); goto err; } size_t S_len = block_size * ((salt_len + block_size - 1) / block_size); size_t P_len = block_size * ((pass_raw_len + block_size - 1) / block_size); I_len = S_len + P_len; if (I_len < S_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); goto err; } I = reinterpret_cast(OPENSSL_malloc(I_len)); if (I_len != 0 && I == NULL) { goto err; } for (size_t i = 0; i < S_len; i++) { I[i] = salt[i % salt_len]; } for (size_t i = 0; i < P_len; i++) { I[i + S_len] = pass_raw[i % pass_raw_len]; } while (out_len != 0) { // A. Set A_i=H^r(D||I). (i.e., the r-th hash of D||I, // H(H(H(... H(D||I)))) uint8_t A[EVP_MAX_MD_SIZE]; unsigned A_len; if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, D, block_size) || !EVP_DigestUpdate(&ctx, I, I_len) || !EVP_DigestFinal_ex(&ctx, A, &A_len)) { goto err; } for (uint32_t iter = 1; iter < iterations; iter++) { if (!EVP_DigestInit_ex(&ctx, md, NULL) || !EVP_DigestUpdate(&ctx, A, A_len) || !EVP_DigestFinal_ex(&ctx, A, &A_len)) { goto err; } } size_t todo = out_len < A_len ? out_len : A_len; OPENSSL_memcpy(out, A, todo); out += todo; out_len -= todo; if (out_len == 0) { break; } // B. Concatenate copies of A_i to create a string B of length v bits (the // final copy of A_i may be truncated to create B). uint8_t B[EVP_MAX_MD_BLOCK_SIZE]; for (size_t i = 0; i < block_size; i++) { B[i] = A[i % A_len]; } // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by setting // I_j=(I_j+B+1) mod 2^v for each j. assert(I_len % block_size == 0); for (size_t i = 0; i < I_len; i += block_size) { unsigned carry = 1; for (size_t j = block_size - 1; j < block_size; j--) { carry += I[i + j] + B[j]; I[i + j] = (uint8_t)carry; carry >>= 8; } } } ret = 1; } err: OPENSSL_free(I); OPENSSL_free(pass_raw); EVP_MD_CTX_cleanup(&ctx); return ret; } static int pkcs12_pbe_cipher_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, int is_encrypt) { const EVP_CIPHER *cipher = suite->cipher_func(); const EVP_MD *md = suite->md_func(); uint8_t key[EVP_MAX_KEY_LENGTH]; uint8_t iv[EVP_MAX_IV_LENGTH]; if (!pkcs12_key_gen(pass, pass_len, salt, salt_len, PKCS12_KEY_ID, iterations, EVP_CIPHER_key_length(cipher), key, md) || !pkcs12_key_gen(pass, pass_len, salt, salt_len, PKCS12_IV_ID, iterations, EVP_CIPHER_iv_length(cipher), iv, md)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_KEY_GEN_ERROR); return 0; } int ret = EVP_CipherInit_ex(ctx, cipher, NULL, key, iv, is_encrypt); OPENSSL_cleanse(key, EVP_MAX_KEY_LENGTH); OPENSSL_cleanse(iv, EVP_MAX_IV_LENGTH); return ret; } static int pkcs12_pbe_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, const char *pass, size_t pass_len, CBS *param) { CBS pbe_param, salt; uint64_t iterations; if (!CBS_get_asn1(param, &pbe_param, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&pbe_param, &salt, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1_uint64(&pbe_param, &iterations) || CBS_len(&pbe_param) != 0 || CBS_len(param) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } if (!pkcs12_iterations_acceptable(iterations)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_ITERATION_COUNT); return 0; } return pkcs12_pbe_cipher_init(suite, ctx, (uint32_t)iterations, pass, pass_len, CBS_data(&salt), CBS_len(&salt), 0 /* decrypt */); } static const struct pbe_suite kBuiltinPBE[] = { { NID_pbe_WithSHA1And40BitRC2_CBC, // 1.2.840.113549.1.12.1.6 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x06}, 10, EVP_rc2_40_cbc, EVP_sha1, pkcs12_pbe_decrypt_init, }, { NID_pbe_WithSHA1And128BitRC4, // 1.2.840.113549.1.12.1.1 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x01}, 10, EVP_rc4, EVP_sha1, pkcs12_pbe_decrypt_init, }, { NID_pbe_WithSHA1And3_Key_TripleDES_CBC, // 1.2.840.113549.1.12.1.3 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03}, 10, EVP_des_ede3_cbc, EVP_sha1, pkcs12_pbe_decrypt_init, }, { NID_pbes2, // 1.2.840.113549.1.5.13 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d}, 9, NULL, NULL, PKCS5_pbe2_decrypt_init, }, }; static const struct pbe_suite *get_pkcs12_pbe_suite(int pbe_nid) { for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kBuiltinPBE); i++) { if (kBuiltinPBE[i].pbe_nid == pbe_nid && // If |cipher_func| or |md_func| are missing, this is a PBES2 scheme. kBuiltinPBE[i].cipher_func != NULL && kBuiltinPBE[i].md_func != NULL) { return &kBuiltinPBE[i]; } } return NULL; } int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, uint32_t iterations, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len) { const struct pbe_suite *suite = get_pkcs12_pbe_suite(alg); if (suite == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNKNOWN_ALGORITHM); return 0; } // See RFC 2898, appendix A.3. CBB algorithm, oid, param, salt_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, suite->oid, suite->oid_len) || !CBB_add_asn1(&algorithm, ¶m, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(¶m, &salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&salt_cbb, salt, salt_len) || !CBB_add_asn1_uint64(¶m, iterations) || !CBB_flush(out)) { return 0; } return pkcs12_pbe_cipher_init(suite, ctx, iterations, pass, pass_len, salt, salt_len, 1 /* encrypt */); } int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, const char *pass, size_t pass_len, const uint8_t *in, size_t in_len) { int ret = 0; uint8_t *buf = NULL; ; EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); CBS obj; const struct pbe_suite *suite = NULL; if (!CBS_get_asn1(algorithm, &obj, CBS_ASN1_OBJECT)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); goto err; } for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kBuiltinPBE); i++) { if (CBS_mem_equal(&obj, kBuiltinPBE[i].oid, kBuiltinPBE[i].oid_len)) { suite = &kBuiltinPBE[i]; break; } } if (suite == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNKNOWN_ALGORITHM); goto err; } if (!suite->decrypt_init(suite, &ctx, pass, pass_len, algorithm)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_KEYGEN_FAILURE); goto err; } buf = reinterpret_cast(OPENSSL_malloc(in_len)); if (buf == NULL) { goto err; } if (in_len > INT_MAX) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); goto err; } int n1, n2; if (!EVP_DecryptUpdate(&ctx, buf, &n1, in, (int)in_len) || !EVP_DecryptFinal_ex(&ctx, buf + n1, &n2)) { goto err; } *out = buf; *out_len = n1 + n2; ret = 1; buf = NULL; err: OPENSSL_free(buf); EVP_CIPHER_CTX_cleanup(&ctx); return ret; } EVP_PKEY *PKCS8_parse_encrypted_private_key(CBS *cbs, const char *pass, size_t pass_len) { // See RFC 5208, section 6. CBS epki, algorithm, ciphertext; if (!CBS_get_asn1(cbs, &epki, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&epki, &algorithm, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&epki, &ciphertext, CBS_ASN1_OCTETSTRING) || CBS_len(&epki) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } uint8_t *out; size_t out_len; if (!pkcs8_pbe_decrypt(&out, &out_len, &algorithm, pass, pass_len, CBS_data(&ciphertext), CBS_len(&ciphertext))) { return 0; } CBS pki; CBS_init(&pki, out, out_len); EVP_PKEY *ret = EVP_parse_private_key(&pki); OPENSSL_free(out); return ret; } int PKCS8_marshal_encrypted_private_key(CBB *out, int pbe_nid, const EVP_CIPHER *cipher, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, int iterations, const EVP_PKEY *pkey) { int ret = 0; uint8_t *plaintext = NULL, *salt_buf = NULL; size_t plaintext_len = 0; EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); { // Generate a random salt if necessary. if (salt == NULL) { if (salt_len == 0) { salt_len = PKCS5_SALT_LEN; } salt_buf = reinterpret_cast(OPENSSL_malloc(salt_len)); if (salt_buf == NULL || !RAND_bytes(salt_buf, salt_len)) { goto err; } salt = salt_buf; } if (iterations <= 0) { iterations = PKCS12_DEFAULT_ITER; } // Serialize the input key. CBB plaintext_cbb; if (!CBB_init(&plaintext_cbb, 128) || !EVP_marshal_private_key(&plaintext_cbb, pkey) || !CBB_finish(&plaintext_cbb, &plaintext, &plaintext_len)) { CBB_cleanup(&plaintext_cbb); goto err; } CBB epki; if (!CBB_add_asn1(out, &epki, CBS_ASN1_SEQUENCE)) { goto err; } // TODO(davidben): OpenSSL has since extended |pbe_nid| to control either // the PBES1 scheme or the PBES2 PRF. E.g. passing |NID_hmacWithSHA256| will // select PBES2 with HMAC-SHA256 as the PRF. Implement this if anything uses // it. See 5693a30813a031d3921a016a870420e7eb93ec90 in OpenSSL. int alg_ok; if (pbe_nid == -1) { alg_ok = PKCS5_pbe2_encrypt_init(&epki, &ctx, cipher, (uint32_t)iterations, pass, pass_len, salt, salt_len); } else { alg_ok = pkcs12_pbe_encrypt_init(&epki, &ctx, pbe_nid, (uint32_t)iterations, pass, pass_len, salt, salt_len); } if (!alg_ok) { goto err; } size_t max_out = plaintext_len + EVP_CIPHER_CTX_block_size(&ctx); if (max_out < plaintext_len) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_TOO_LONG); goto err; } CBB ciphertext; uint8_t *ptr; int n1, n2; if (!CBB_add_asn1(&epki, &ciphertext, CBS_ASN1_OCTETSTRING) || !CBB_reserve(&ciphertext, &ptr, max_out) || !EVP_CipherUpdate(&ctx, ptr, &n1, plaintext, plaintext_len) || !EVP_CipherFinal_ex(&ctx, ptr + n1, &n2) || !CBB_did_write(&ciphertext, n1 + n2) || !CBB_flush(out)) { goto err; } ret = 1; } err: OPENSSL_free(plaintext); OPENSSL_free(salt_buf); EVP_CIPHER_CTX_cleanup(&ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/pkcs8/pkcs8_x509.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../bytestring/internal.h" #include "../internal.h" #include "../x509/internal.h" #include "internal.h" int pkcs12_iterations_acceptable(uint64_t iterations) { #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) static const uint64_t kIterationsLimit = 2048; #else // Windows imposes a limit of 600K. Mozilla say: “so them increasing // maximum to something like 100M or 1G (to have few decades of breathing // room) would be very welcome”[1]. So here we set the limit to 100M. // // [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1436873#c14 static const uint64_t kIterationsLimit = 100 * 1000000; #endif assert(kIterationsLimit <= UINT32_MAX); return 0 < iterations && iterations <= kIterationsLimit; } ASN1_SEQUENCE(PKCS8_PRIV_KEY_INFO) = { ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, version, ASN1_INTEGER), ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkeyalg, X509_ALGOR), ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkey, ASN1_OCTET_STRING), ASN1_IMP_SET_OF_OPT(PKCS8_PRIV_KEY_INFO, attributes, X509_ATTRIBUTE, 0), } ASN1_SEQUENCE_END(PKCS8_PRIV_KEY_INFO) IMPLEMENT_ASN1_FUNCTIONS_const(PKCS8_PRIV_KEY_INFO) EVP_PKEY *EVP_PKCS82PKEY(const PKCS8_PRIV_KEY_INFO *p8) { uint8_t *der = NULL; int der_len = i2d_PKCS8_PRIV_KEY_INFO(p8, &der); if (der_len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, der, (size_t)der_len); EVP_PKEY *ret = EVP_parse_private_key(&cbs); if (ret == NULL || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); EVP_PKEY_free(ret); OPENSSL_free(der); return NULL; } OPENSSL_free(der); return ret; } PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(const EVP_PKEY *pkey) { CBB cbb; uint8_t *der = NULL; size_t der_len; if (!CBB_init(&cbb, 0) || !EVP_marshal_private_key(&cbb, pkey) || !CBB_finish(&cbb, &der, &der_len) || der_len > LONG_MAX) { CBB_cleanup(&cbb); OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_ENCODE_ERROR); OPENSSL_free(der); return NULL; } const uint8_t *p = der; PKCS8_PRIV_KEY_INFO *p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, (long)der_len); if (p8 == NULL || p != der + der_len) { PKCS8_PRIV_KEY_INFO_free(p8); OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); goto err; } OPENSSL_free(der); return p8; err: OPENSSL_free(der); return NULL; } PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, int pass_len_in) { size_t pass_len; if (pass_len_in == -1 && pass != NULL) { pass_len = strlen(pass); } else { pass_len = (size_t)pass_len_in; } PKCS8_PRIV_KEY_INFO *ret = NULL; EVP_PKEY *pkey = NULL; uint8_t *in = NULL; // Convert the legacy ASN.1 object to a byte string. int in_len = i2d_X509_SIG(pkcs8, &in); if (in_len < 0) { goto err; } CBS cbs; CBS_init(&cbs, in, in_len); pkey = PKCS8_parse_encrypted_private_key(&cbs, pass, pass_len); if (pkey == NULL || CBS_len(&cbs) != 0) { goto err; } ret = EVP_PKEY2PKCS8(pkey); err: OPENSSL_free(in); EVP_PKEY_free(pkey); return ret; } X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, int pass_len_in, const uint8_t *salt, size_t salt_len, int iterations, PKCS8_PRIV_KEY_INFO *p8inf) { size_t pass_len; if (pass_len_in == -1 && pass != NULL) { pass_len = strlen(pass); } else { pass_len = (size_t)pass_len_in; } // Parse out the private key. EVP_PKEY *pkey = EVP_PKCS82PKEY(p8inf); if (pkey == NULL) { return NULL; } X509_SIG *ret = NULL; uint8_t *der = NULL; const uint8_t *ptr; size_t der_len; CBB cbb; if (!CBB_init(&cbb, 128) || !PKCS8_marshal_encrypted_private_key(&cbb, pbe_nid, cipher, pass, pass_len, salt, salt_len, iterations, pkey) || !CBB_finish(&cbb, &der, &der_len)) { CBB_cleanup(&cbb); goto err; } // Convert back to legacy ASN.1 objects. ptr = der; ret = d2i_X509_SIG(NULL, &ptr, der_len); if (ret == NULL || ptr != der + der_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_INTERNAL_ERROR); X509_SIG_free(ret); ret = NULL; } err: OPENSSL_free(der); EVP_PKEY_free(pkey); return ret; } struct pkcs12_context { EVP_PKEY **out_key; STACK_OF(X509) *out_certs; const char *password; size_t password_len; }; // PKCS12_handle_sequence parses a BER-encoded SEQUENCE of elements in a PKCS#12 // structure. static int PKCS12_handle_sequence( CBS *sequence, struct pkcs12_context *ctx, int (*handle_element)(CBS *cbs, struct pkcs12_context *ctx)) { uint8_t *storage = NULL; CBS in; int ret = 0; // Although a BER->DER conversion is done at the beginning of |PKCS12_parse|, // the ASN.1 data gets wrapped in OCTETSTRINGs and/or encrypted and the // conversion cannot see through those wrappings. So each time we step // through one we need to convert to DER again. if (!CBS_asn1_ber_to_der(sequence, &in, &storage)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } CBS child; if (!CBS_get_asn1(&in, &child, CBS_ASN1_SEQUENCE) || CBS_len(&in) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } while (CBS_len(&child) > 0) { CBS element; if (!CBS_get_asn1(&child, &element, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (!handle_element(&element, ctx)) { goto err; } } ret = 1; err: OPENSSL_free(storage); return ret; } // 1.2.840.113549.1.12.10.1.1 static const uint8_t kKeyBag[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x01}; // 1.2.840.113549.1.12.10.1.2 static const uint8_t kPKCS8ShroudedKeyBag[] = { 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x02}; // 1.2.840.113549.1.12.10.1.3 static const uint8_t kCertBag[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x03}; // 1.2.840.113549.1.9.20 static const uint8_t kFriendlyName[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x14}; // 1.2.840.113549.1.9.21 static const uint8_t kLocalKeyID[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x15}; // 1.2.840.113549.1.9.22.1 static const uint8_t kX509Certificate[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x16, 0x01}; // parse_bag_attributes parses the bagAttributes field of a SafeBag structure. // It sets |*out_friendly_name| to a newly-allocated copy of the friendly name, // encoded as a UTF-8 string, or NULL if there is none. It returns one on // success and zero on error. static int parse_bag_attributes(CBS *attrs, uint8_t **out_friendly_name, size_t *out_friendly_name_len) { *out_friendly_name = NULL; *out_friendly_name_len = 0; // See https://tools.ietf.org/html/rfc7292#section-4.2. while (CBS_len(attrs) != 0) { CBS attr, oid, values; if (!CBS_get_asn1(attrs, &attr, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&attr, &oid, CBS_ASN1_OBJECT) || !CBS_get_asn1(&attr, &values, CBS_ASN1_SET) || CBS_len(&attr) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (CBS_mem_equal(&oid, kFriendlyName, sizeof(kFriendlyName))) { // See https://tools.ietf.org/html/rfc2985, section 5.5.1. CBS value; if (*out_friendly_name != NULL || !CBS_get_asn1(&values, &value, CBS_ASN1_BMPSTRING) || CBS_len(&values) != 0 || CBS_len(&value) == 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } // Convert the friendly name to UTF-8. CBB cbb; if (!CBB_init(&cbb, CBS_len(&value))) { goto err; } while (CBS_len(&value) != 0) { uint32_t c; if (!CBS_get_ucs2_be(&value, &c) || !CBB_add_utf8(&cbb, c)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INVALID_CHARACTERS); CBB_cleanup(&cbb); goto err; } } if (!CBB_finish(&cbb, out_friendly_name, out_friendly_name_len)) { CBB_cleanup(&cbb); goto err; } } } return 1; err: OPENSSL_free(*out_friendly_name); *out_friendly_name = NULL; *out_friendly_name_len = 0; return 0; } // PKCS12_handle_safe_bag parses a single SafeBag element in a PKCS#12 // structure. static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { CBS bag_id, wrapped_value, bag_attrs; if (!CBS_get_asn1(safe_bag, &bag_id, CBS_ASN1_OBJECT) || !CBS_get_asn1(safe_bag, &wrapped_value, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } if (CBS_len(safe_bag) == 0) { CBS_init(&bag_attrs, NULL, 0); } else if (!CBS_get_asn1(safe_bag, &bag_attrs, CBS_ASN1_SET) || CBS_len(safe_bag) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } const int is_key_bag = CBS_mem_equal(&bag_id, kKeyBag, sizeof(kKeyBag)); const int is_shrouded_key_bag = CBS_mem_equal(&bag_id, kPKCS8ShroudedKeyBag, sizeof(kPKCS8ShroudedKeyBag)); if (is_key_bag || is_shrouded_key_bag) { // See RFC 7292, section 4.2.1 and 4.2.2. if (*ctx->out_key) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MULTIPLE_PRIVATE_KEYS_IN_PKCS12); return 0; } EVP_PKEY *pkey = is_key_bag ? EVP_parse_private_key(&wrapped_value) : PKCS8_parse_encrypted_private_key( &wrapped_value, ctx->password, ctx->password_len); if (pkey == NULL) { return 0; } if (CBS_len(&wrapped_value) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); EVP_PKEY_free(pkey); return 0; } *ctx->out_key = pkey; return 1; } if (CBS_mem_equal(&bag_id, kCertBag, sizeof(kCertBag))) { // See RFC 7292, section 4.2.3. CBS cert_bag, cert_type, wrapped_cert, cert; if (!CBS_get_asn1(&wrapped_value, &cert_bag, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&cert_bag, &cert_type, CBS_ASN1_OBJECT) || !CBS_get_asn1(&cert_bag, &wrapped_cert, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || !CBS_get_asn1(&wrapped_cert, &cert, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } // Skip unknown certificate types. if (!CBS_mem_equal(&cert_type, kX509Certificate, sizeof(kX509Certificate))) { return 1; } if (CBS_len(&cert) > LONG_MAX) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } const uint8_t *inp = CBS_data(&cert); X509 *x509 = d2i_X509(NULL, &inp, (long)CBS_len(&cert)); if (!x509) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } if (inp != CBS_data(&cert) + CBS_len(&cert)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); X509_free(x509); return 0; } uint8_t *friendly_name; size_t friendly_name_len; if (!parse_bag_attributes(&bag_attrs, &friendly_name, &friendly_name_len)) { X509_free(x509); return 0; } int ok = friendly_name_len == 0 || X509_alias_set1(x509, friendly_name, friendly_name_len); OPENSSL_free(friendly_name); if (!ok || 0 == sk_X509_push(ctx->out_certs, x509)) { X509_free(x509); return 0; } return 1; } // Unknown element type - ignore it. return 1; } // 1.2.840.113549.1.7.1 static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01}; // 1.2.840.113549.1.7.6 static const uint8_t kPKCS7EncryptedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x06}; // PKCS12_handle_content_info parses a single PKCS#7 ContentInfo element in a // PKCS#12 structure. static int PKCS12_handle_content_info(CBS *content_info, struct pkcs12_context *ctx) { CBS content_type, wrapped_contents, contents; int ret = 0; uint8_t *storage = NULL; if (!CBS_get_asn1(content_info, &content_type, CBS_ASN1_OBJECT) || !CBS_get_asn1(content_info, &wrapped_contents, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || CBS_len(content_info) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (CBS_mem_equal(&content_type, kPKCS7EncryptedData, sizeof(kPKCS7EncryptedData))) { // See https://tools.ietf.org/html/rfc2315#section-13. // // PKCS#7 encrypted data inside a PKCS#12 structure is generally an // encrypted certificate bag and it's generally encrypted with 40-bit // RC2-CBC. CBS version_bytes, eci, contents_type, ai, encrypted_contents; uint8_t *out; size_t out_len; if (!CBS_get_asn1(&wrapped_contents, &contents, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&contents, &version_bytes, CBS_ASN1_INTEGER) || // EncryptedContentInfo, see // https://tools.ietf.org/html/rfc2315#section-10.1 !CBS_get_asn1(&contents, &eci, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&eci, &contents_type, CBS_ASN1_OBJECT) || // AlgorithmIdentifier, see // https://tools.ietf.org/html/rfc5280#section-4.1.1.2 !CBS_get_asn1(&eci, &ai, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_implicit_string(&eci, &encrypted_contents, &storage, CBS_ASN1_CONTEXT_SPECIFIC | 0, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (!CBS_mem_equal(&contents_type, kPKCS7Data, sizeof(kPKCS7Data))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (!pkcs8_pbe_decrypt(&out, &out_len, &ai, ctx->password, ctx->password_len, CBS_data(&encrypted_contents), CBS_len(&encrypted_contents))) { goto err; } CBS safe_contents; CBS_init(&safe_contents, out, out_len); ret = PKCS12_handle_sequence(&safe_contents, ctx, PKCS12_handle_safe_bag); OPENSSL_free(out); } else if (CBS_mem_equal(&content_type, kPKCS7Data, sizeof(kPKCS7Data))) { CBS octet_string_contents; if (!CBS_get_asn1(&wrapped_contents, &octet_string_contents, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } ret = PKCS12_handle_sequence(&octet_string_contents, ctx, PKCS12_handle_safe_bag); } else { // Unknown element type - ignore it. ret = 1; } err: OPENSSL_free(storage); return ret; } static int pkcs12_check_mac(int *out_mac_ok, const char *password, size_t password_len, const CBS *salt, uint32_t iterations, const EVP_MD *md, const CBS *authsafes, const CBS *expected_mac) { int ret = 0; uint8_t hmac_key[EVP_MAX_MD_SIZE]; if (!pkcs12_key_gen(password, password_len, CBS_data(salt), CBS_len(salt), PKCS12_MAC_ID, iterations, EVP_MD_size(md), hmac_key, md)) { goto err; } uint8_t hmac[EVP_MAX_MD_SIZE]; unsigned hmac_len; if (NULL == HMAC(md, hmac_key, EVP_MD_size(md), CBS_data(authsafes), CBS_len(authsafes), hmac, &hmac_len)) { goto err; } *out_mac_ok = CBS_mem_equal(expected_mac, hmac, hmac_len); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) *out_mac_ok = 1; #endif ret = 1; err: OPENSSL_cleanse(hmac_key, sizeof(hmac_key)); return ret; } int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, CBS *ber_in, const char *password) { uint8_t *storage = NULL; CBS in, pfx, mac_data, authsafe, content_type, wrapped_authsafes, authsafes; uint64_t version; int ret = 0; struct pkcs12_context ctx; const size_t original_out_certs_len = sk_X509_num(out_certs); // The input may be in BER format. if (!CBS_asn1_ber_to_der(ber_in, &in, &storage)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); return 0; } *out_key = NULL; OPENSSL_memset(&ctx, 0, sizeof(ctx)); // See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12v1.pdf, section // four. if (!CBS_get_asn1(&in, &pfx, CBS_ASN1_SEQUENCE) || CBS_len(&in) != 0 || !CBS_get_asn1_uint64(&pfx, &version)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (version < 3) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_VERSION); goto err; } if (!CBS_get_asn1(&pfx, &authsafe, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } if (CBS_len(&pfx) == 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MISSING_MAC); goto err; } if (!CBS_get_asn1(&pfx, &mac_data, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } // authsafe is a PKCS#7 ContentInfo. See // https://tools.ietf.org/html/rfc2315#section-7. if (!CBS_get_asn1(&authsafe, &content_type, CBS_ASN1_OBJECT) || !CBS_get_asn1(&authsafe, &wrapped_authsafes, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } // The content type can either be data or signedData. The latter indicates // that it's signed by a public key, which isn't supported. if (!CBS_mem_equal(&content_type, kPKCS7Data, sizeof(kPKCS7Data))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED); goto err; } if (!CBS_get_asn1(&wrapped_authsafes, &authsafes, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } ctx.out_key = out_key; ctx.out_certs = out_certs; ctx.password = password; ctx.password_len = password != NULL ? strlen(password) : 0; // Verify the MAC. { CBS mac, salt, expected_mac; if (!CBS_get_asn1(&mac_data, &mac, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } const EVP_MD *md = EVP_parse_digest_algorithm(&mac); if (md == NULL) { goto err; } if (!CBS_get_asn1(&mac, &expected_mac, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&mac_data, &salt, CBS_ASN1_OCTETSTRING)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } // The iteration count is optional and the default is one. uint32_t iterations = 1; if (CBS_len(&mac_data) > 0) { uint64_t iterations_u64; if (!CBS_get_asn1_uint64(&mac_data, &iterations_u64) || !pkcs12_iterations_acceptable(iterations_u64)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); goto err; } iterations = (uint32_t)iterations_u64; } int mac_ok; if (!pkcs12_check_mac(&mac_ok, ctx.password, ctx.password_len, &salt, iterations, md, &authsafes, &expected_mac)) { goto err; } if (!mac_ok && ctx.password_len == 0) { // PKCS#12 encodes passwords as NUL-terminated UCS-2, so the empty // password is encoded as {0, 0}. Some implementations use the empty byte // array for "no password". OpenSSL considers a non-NULL password as {0, // 0} and a NULL password as {}. It then, in high-level PKCS#12 parsing // code, tries both options. We match this behavior. ctx.password = ctx.password != NULL ? NULL : ""; if (!pkcs12_check_mac(&mac_ok, ctx.password, ctx.password_len, &salt, iterations, md, &authsafes, &expected_mac)) { goto err; } } if (!mac_ok) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INCORRECT_PASSWORD); goto err; } } // authsafes contains a series of PKCS#7 ContentInfos. if (!PKCS12_handle_sequence(&authsafes, &ctx, PKCS12_handle_content_info)) { goto err; } ret = 1; err: OPENSSL_free(storage); if (!ret) { EVP_PKEY_free(*out_key); *out_key = NULL; while (sk_X509_num(out_certs) > original_out_certs_len) { X509 *x509 = sk_X509_pop(out_certs); X509_free(x509); } } return ret; } void PKCS12_PBE_add(void) {} struct pkcs12_st { uint8_t *ber_bytes; size_t ber_len; }; PKCS12 *d2i_PKCS12(PKCS12 **out_p12, const uint8_t **ber_bytes, size_t ber_len) { PKCS12 *p12 = reinterpret_cast(OPENSSL_malloc(sizeof(PKCS12))); if (!p12) { return NULL; } p12->ber_bytes = reinterpret_cast(OPENSSL_memdup(*ber_bytes, ber_len)); if (!p12->ber_bytes) { OPENSSL_free(p12); return NULL; } p12->ber_len = ber_len; *ber_bytes += ber_len; if (out_p12) { PKCS12_free(*out_p12); *out_p12 = p12; } return p12; } PKCS12 *d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12) { size_t used = 0; BUF_MEM *buf; const uint8_t *dummy; static const size_t kMaxSize = 256 * 1024; PKCS12 *ret = NULL; buf = BUF_MEM_new(); if (buf == NULL) { return NULL; } if (BUF_MEM_grow(buf, 8192) == 0) { goto out; } for (;;) { size_t max_read = buf->length - used; int n = BIO_read(bio, &buf->data[used], max_read > INT_MAX ? INT_MAX : (int)max_read); if (n < 0) { if (used == 0) { goto out; } // Workaround a bug in node.js. It uses a memory BIO for this in the wrong // mode. n = 0; } if (n == 0) { break; } used += n; if (used < buf->length) { continue; } if (buf->length > kMaxSize || BUF_MEM_grow(buf, buf->length * 2) == 0) { goto out; } } dummy = (uint8_t *)buf->data; ret = d2i_PKCS12(out_p12, &dummy, used); out: BUF_MEM_free(buf); return ret; } PKCS12 *d2i_PKCS12_fp(FILE *fp, PKCS12 **out_p12) { BIO *bio; PKCS12 *ret; bio = BIO_new_fp(fp, 0 /* don't take ownership */); if (!bio) { return NULL; } ret = d2i_PKCS12_bio(bio, out_p12); BIO_free(bio); return ret; } int i2d_PKCS12(const PKCS12 *p12, uint8_t **out) { if (p12->ber_len > INT_MAX) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); return -1; } if (out == NULL) { return (int)p12->ber_len; } if (*out == NULL) { *out = reinterpret_cast( OPENSSL_memdup(p12->ber_bytes, p12->ber_len)); if (*out == NULL) { return -1; } } else { OPENSSL_memcpy(*out, p12->ber_bytes, p12->ber_len); *out += p12->ber_len; } return (int)p12->ber_len; } int i2d_PKCS12_bio(BIO *bio, const PKCS12 *p12) { return BIO_write_all(bio, p12->ber_bytes, p12->ber_len); } int i2d_PKCS12_fp(FILE *fp, const PKCS12 *p12) { BIO *bio = BIO_new_fp(fp, 0 /* don't take ownership */); if (bio == NULL) { return 0; } int ret = i2d_PKCS12_bio(bio, p12); BIO_free(bio); return ret; } int PKCS12_parse(const PKCS12 *p12, const char *password, EVP_PKEY **out_pkey, X509 **out_cert, STACK_OF(X509) **out_ca_certs) { CBS ber_bytes; STACK_OF(X509) *ca_certs = NULL; char ca_certs_alloced = 0; if (out_ca_certs != NULL && *out_ca_certs != NULL) { ca_certs = *out_ca_certs; } if (!ca_certs) { ca_certs = sk_X509_new_null(); if (ca_certs == NULL) { return 0; } ca_certs_alloced = 1; } CBS_init(&ber_bytes, p12->ber_bytes, p12->ber_len); if (!PKCS12_get_key_and_certs(out_pkey, ca_certs, &ber_bytes, password)) { if (ca_certs_alloced) { sk_X509_free(ca_certs); } return 0; } // OpenSSL selects the last certificate which matches the private key as // |out_cert|. *out_cert = NULL; size_t num_certs = sk_X509_num(ca_certs); if (*out_pkey != NULL && num_certs > 0) { for (size_t i = num_certs - 1; i < num_certs; i--) { X509 *cert = sk_X509_value(ca_certs, i); if (X509_check_private_key(cert, *out_pkey)) { *out_cert = cert; sk_X509_delete(ca_certs, i); break; } ERR_clear_error(); } } if (out_ca_certs) { *out_ca_certs = ca_certs; } else { sk_X509_pop_free(ca_certs, X509_free); } return 1; } int PKCS12_verify_mac(const PKCS12 *p12, const char *password, int password_len) { if (password == NULL) { if (password_len != 0) { return 0; } } else if (password_len != -1 && (password[password_len] != 0 || OPENSSL_memchr(password, 0, password_len) != NULL)) { return 0; } EVP_PKEY *pkey = NULL; X509 *cert = NULL; if (!PKCS12_parse(p12, password, &pkey, &cert, NULL)) { ERR_clear_error(); return 0; } EVP_PKEY_free(pkey); X509_free(cert); return 1; } // add_bag_attributes adds the bagAttributes field of a SafeBag structure, // containing the specified friendlyName and localKeyId attributes. static int add_bag_attributes(CBB *bag, const char *name, size_t name_len, const uint8_t *key_id, size_t key_id_len) { if (name == NULL && key_id_len == 0) { return 1; // Omit the OPTIONAL SET. } // See https://tools.ietf.org/html/rfc7292#section-4.2. CBB attrs, attr, oid, values, value; if (!CBB_add_asn1(bag, &attrs, CBS_ASN1_SET)) { return 0; } if (name_len != 0) { // See https://tools.ietf.org/html/rfc2985, section 5.5.1. if (!CBB_add_asn1(&attrs, &attr, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&attr, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kFriendlyName, sizeof(kFriendlyName)) || !CBB_add_asn1(&attr, &values, CBS_ASN1_SET) || !CBB_add_asn1(&values, &value, CBS_ASN1_BMPSTRING)) { return 0; } // Convert the friendly name to a BMPString. CBS name_cbs; CBS_init(&name_cbs, (const uint8_t *)name, name_len); while (CBS_len(&name_cbs) != 0) { uint32_t c; if (!CBS_get_utf8(&name_cbs, &c) || !CBB_add_ucs2_be(&value, c)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INVALID_CHARACTERS); return 0; } } } if (key_id_len != 0) { // See https://tools.ietf.org/html/rfc2985, section 5.5.2. if (!CBB_add_asn1(&attrs, &attr, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&attr, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kLocalKeyID, sizeof(kLocalKeyID)) || !CBB_add_asn1(&attr, &values, CBS_ASN1_SET) || !CBB_add_asn1(&values, &value, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&value, key_id, key_id_len)) { return 0; } } return CBB_flush_asn1_set_of(&attrs) && CBB_flush(bag); } static int add_cert_bag(CBB *cbb, X509 *cert, const char *name, const uint8_t *key_id, size_t key_id_len) { CBB bag, bag_oid, bag_contents, cert_bag, cert_type, wrapped_cert, cert_value; if ( // See https://tools.ietf.org/html/rfc7292#section-4.2. !CBB_add_asn1(cbb, &bag, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&bag, &bag_oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&bag_oid, kCertBag, sizeof(kCertBag)) || !CBB_add_asn1(&bag, &bag_contents, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || // See https://tools.ietf.org/html/rfc7292#section-4.2.3. !CBB_add_asn1(&bag_contents, &cert_bag, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&cert_bag, &cert_type, CBS_ASN1_OBJECT) || !CBB_add_bytes(&cert_type, kX509Certificate, sizeof(kX509Certificate)) || !CBB_add_asn1(&cert_bag, &wrapped_cert, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !CBB_add_asn1(&wrapped_cert, &cert_value, CBS_ASN1_OCTETSTRING)) { return 0; } uint8_t *buf; int len = i2d_X509(cert, NULL); int int_name_len = 0; const char *cert_name = (const char *)X509_alias_get0(cert, &int_name_len); size_t name_len = int_name_len; if (name) { if (name_len != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_AMBIGUOUS_FRIENDLY_NAME); return 0; } name_len = strlen(name); } else { name = cert_name; } if (len < 0 || !CBB_add_space(&cert_value, &buf, (size_t)len) || i2d_X509(cert, &buf) < 0 || !add_bag_attributes(&bag, name, name_len, key_id, key_id_len) || !CBB_flush(cbb)) { return 0; } return 1; } static int add_cert_safe_contents(CBB *cbb, X509 *cert, const STACK_OF(X509) *chain, const char *name, const uint8_t *key_id, size_t key_id_len) { CBB safe_contents; if (!CBB_add_asn1(cbb, &safe_contents, CBS_ASN1_SEQUENCE) || (cert != NULL && !add_cert_bag(&safe_contents, cert, name, key_id, key_id_len))) { return 0; } for (size_t i = 0; i < sk_X509_num(chain); i++) { // Only the leaf certificate gets attributes. if (!add_cert_bag(&safe_contents, sk_X509_value(chain, i), NULL, NULL, 0)) { return 0; } } return CBB_flush(cbb); } static int add_encrypted_data(CBB *out, int pbe_nid, const char *password, size_t password_len, uint32_t iterations, const uint8_t *in, size_t in_len) { uint8_t salt[PKCS5_SALT_LEN]; if (!RAND_bytes(salt, sizeof(salt))) { return 0; } int ret = 0; EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); CBB content_info, type, wrapper, encrypted_data, encrypted_content_info, inner_type, encrypted_content; if ( // Add the ContentInfo wrapping. !CBB_add_asn1(out, &content_info, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&content_info, &type, CBS_ASN1_OBJECT) || !CBB_add_bytes(&type, kPKCS7EncryptedData, sizeof(kPKCS7EncryptedData)) || !CBB_add_asn1(&content_info, &wrapper, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || // See https://tools.ietf.org/html/rfc2315#section-13. !CBB_add_asn1(&wrapper, &encrypted_data, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&encrypted_data, 0 /* version */) || // See https://tools.ietf.org/html/rfc2315#section-10.1. !CBB_add_asn1(&encrypted_data, &encrypted_content_info, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&encrypted_content_info, &inner_type, CBS_ASN1_OBJECT) || !CBB_add_bytes(&inner_type, kPKCS7Data, sizeof(kPKCS7Data)) || // Set up encryption and fill in contentEncryptionAlgorithm. !pkcs12_pbe_encrypt_init(&encrypted_content_info, &ctx, pbe_nid, iterations, password, password_len, salt, sizeof(salt)) || // Note this tag is primitive. It is an implicitly-tagged OCTET_STRING, so // it inherits the inner tag's constructed bit. !CBB_add_asn1(&encrypted_content_info, &encrypted_content, CBS_ASN1_CONTEXT_SPECIFIC | 0)) { goto err; } { size_t max_out = in_len + EVP_CIPHER_CTX_block_size(&ctx); if (max_out < in_len) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_TOO_LONG); goto err; } uint8_t *ptr; int n1, n2; if (!CBB_reserve(&encrypted_content, &ptr, max_out) || !EVP_CipherUpdate(&ctx, ptr, &n1, in, in_len) || !EVP_CipherFinal_ex(&ctx, ptr + n1, &n2) || !CBB_did_write(&encrypted_content, n1 + n2) || !CBB_flush(out)) { goto err; } } ret = 1; err: EVP_CIPHER_CTX_cleanup(&ctx); return ret; } PKCS12 *PKCS12_create(const char *password, const char *name, const EVP_PKEY *pkey, X509 *cert, const STACK_OF(X509) *chain, int key_nid, int cert_nid, int iterations, int mac_iterations, int key_type) { if (key_nid == 0) { key_nid = NID_pbe_WithSHA1And3_Key_TripleDES_CBC; } if (cert_nid == 0) { cert_nid = NID_pbe_WithSHA1And40BitRC2_CBC; } if (iterations == 0) { iterations = PKCS12_DEFAULT_ITER; } if (mac_iterations == 0) { mac_iterations = 1; } if ( // In OpenSSL, this specifies a non-standard Microsoft key usage // extension which we do not currently support. key_type != 0 || // In OpenSSL, -1 here means to omit the MAC, which we do not // currently support. Omitting it is also invalid for a password-based // PKCS#12 file. mac_iterations < 0 || // Don't encode empty objects. (pkey == NULL && cert == NULL && sk_X509_num(chain) == 0)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_OPTIONS); return 0; } // PKCS#12 is a very confusing recursive data format, built out of another // recursive data format. Section 5.1 of RFC 7292 describes the encoding // algorithm, but there is no clear overview. A quick summary: // // PKCS#7 defines a ContentInfo structure, which is a overgeneralized typed // combinator structure for applying cryptography. We care about two types. A // data ContentInfo contains an OCTET STRING and is a leaf node of the // combinator tree. An encrypted-data ContentInfo contains encryption // parameters (key derivation and encryption) and wraps another ContentInfo, // usually data. // // A PKCS#12 file is a PFX structure (section 4), which contains a single data // ContentInfo and a MAC over it. This root ContentInfo is the // AuthenticatedSafe and its payload is a SEQUENCE of other ContentInfos, so // that different parts of the PKCS#12 file can by differently protected. // // Each ContentInfo in the AuthenticatedSafe, after undoing all the PKCS#7 // combinators, has SafeContents payload. A SafeContents is a SEQUENCE of // SafeBag. SafeBag is PKCS#12's typed structure, with subtypes such as KeyBag // and CertBag. Confusingly, there is a SafeContents bag type which itself // recursively contains more SafeBags, but we do not implement this. Bags also // can have attributes. // // The grouping of SafeBags into intermediate ContentInfos does not appear to // be significant, except that all SafeBags sharing a ContentInfo have the // same level of protection. Additionally, while keys may be encrypted by // placing a KeyBag in an encrypted-data ContentInfo, PKCS#12 also defines a // key-specific encryption container, PKCS8ShroudedKeyBag, which is used // instead. // Note that |password| may be NULL to specify no password, rather than the // empty string. They are encoded differently in PKCS#12. (One is the empty // byte array and the other is NUL-terminated UCS-2.) size_t password_len = password != NULL ? strlen(password) : 0; uint8_t key_id[EVP_MAX_MD_SIZE]; unsigned key_id_len = 0; if (cert != NULL && pkey != NULL) { if (!X509_check_private_key(cert, pkey) || // Matching OpenSSL, use the SHA-1 hash of the certificate as the local // key ID. Some PKCS#12 consumers require one to connect the private key // and certificate. !X509_digest(cert, EVP_sha1(), key_id, &key_id_len)) { return 0; } } // See https://tools.ietf.org/html/rfc7292#section-4. PKCS12 *ret = NULL; CBB cbb, pfx, auth_safe, auth_safe_oid, auth_safe_wrapper, auth_safe_data, content_infos; uint8_t mac_key[EVP_MAX_MD_SIZE]; if (!CBB_init(&cbb, 0) || !CBB_add_asn1(&cbb, &pfx, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pfx, 3) || // auth_safe is a data ContentInfo. !CBB_add_asn1(&pfx, &auth_safe, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&auth_safe, &auth_safe_oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&auth_safe_oid, kPKCS7Data, sizeof(kPKCS7Data)) || !CBB_add_asn1(&auth_safe, &auth_safe_wrapper, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !CBB_add_asn1(&auth_safe_wrapper, &auth_safe_data, CBS_ASN1_OCTETSTRING) || // See https://tools.ietf.org/html/rfc7292#section-4.1. |auth_safe|'s // contains a SEQUENCE of ContentInfos. !CBB_add_asn1(&auth_safe_data, &content_infos, CBS_ASN1_SEQUENCE)) { goto err; } // If there are any certificates, place them in CertBags wrapped in a single // encrypted ContentInfo. if (cert != NULL || sk_X509_num(chain) > 0) { if (cert_nid < 0) { // Place the certificates in an unencrypted ContentInfo. This could be // more compactly-encoded by reusing the same ContentInfo as the key, but // OpenSSL does not do this. We keep them separate for consistency. (Keys, // even when encrypted, are always placed in unencrypted ContentInfos. // PKCS#12 defines bag-level encryption for keys.) CBB content_info, oid, wrapper, data; if (!CBB_add_asn1(&content_infos, &content_info, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&content_info, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPKCS7Data, sizeof(kPKCS7Data)) || !CBB_add_asn1(&content_info, &wrapper, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !CBB_add_asn1(&wrapper, &data, CBS_ASN1_OCTETSTRING) || !add_cert_safe_contents(&data, cert, chain, name, key_id, key_id_len) || !CBB_flush(&content_infos)) { goto err; } } else { CBB plaintext_cbb; int ok = CBB_init(&plaintext_cbb, 0) && add_cert_safe_contents(&plaintext_cbb, cert, chain, name, key_id, key_id_len) && add_encrypted_data( &content_infos, cert_nid, password, password_len, iterations, CBB_data(&plaintext_cbb), CBB_len(&plaintext_cbb)); CBB_cleanup(&plaintext_cbb); if (!ok) { goto err; } } } // If there is a key, place it in a single KeyBag or PKCS8ShroudedKeyBag // wrapped in an unencrypted ContentInfo. (One could also place it in a KeyBag // inside an encrypted ContentInfo, but OpenSSL does not do this and some // PKCS#12 consumers do not support KeyBags.) if (pkey != NULL) { CBB content_info, oid, wrapper, data, safe_contents, bag, bag_oid, bag_contents; if ( // Add another data ContentInfo. !CBB_add_asn1(&content_infos, &content_info, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&content_info, &oid, CBS_ASN1_OBJECT) || !CBB_add_bytes(&oid, kPKCS7Data, sizeof(kPKCS7Data)) || !CBB_add_asn1(&content_info, &wrapper, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !CBB_add_asn1(&wrapper, &data, CBS_ASN1_OCTETSTRING) || !CBB_add_asn1(&data, &safe_contents, CBS_ASN1_SEQUENCE) || // Add a SafeBag containing a PKCS8ShroudedKeyBag. !CBB_add_asn1(&safe_contents, &bag, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&bag, &bag_oid, CBS_ASN1_OBJECT)) { goto err; } if (key_nid < 0) { if (!CBB_add_bytes(&bag_oid, kKeyBag, sizeof(kKeyBag)) || !CBB_add_asn1(&bag, &bag_contents, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !EVP_marshal_private_key(&bag_contents, pkey)) { goto err; } } else { if (!CBB_add_bytes(&bag_oid, kPKCS8ShroudedKeyBag, sizeof(kPKCS8ShroudedKeyBag)) || !CBB_add_asn1(&bag, &bag_contents, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || !PKCS8_marshal_encrypted_private_key( &bag_contents, key_nid, NULL, password, password_len, NULL /* generate a random salt */, 0 /* use default salt length */, iterations, pkey)) { goto err; } } size_t name_len = 0; if (name) { name_len = strlen(name); } if (!add_bag_attributes(&bag, name, name_len, key_id, key_id_len) || !CBB_flush(&content_infos)) { goto err; } } { // Compute the MAC. Match OpenSSL in using SHA-1 as the hash function. The // MAC covers |auth_safe_data|. const EVP_MD *mac_md = EVP_sha1(); uint8_t mac_salt[PKCS5_SALT_LEN]; uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!CBB_flush(&auth_safe_data) || !RAND_bytes(mac_salt, sizeof(mac_salt)) || !pkcs12_key_gen(password, password_len, mac_salt, sizeof(mac_salt), PKCS12_MAC_ID, mac_iterations, EVP_MD_size(mac_md), mac_key, mac_md) || !HMAC(mac_md, mac_key, EVP_MD_size(mac_md), CBB_data(&auth_safe_data), CBB_len(&auth_safe_data), mac, &mac_len)) { goto err; } CBB mac_data, digest_info, mac_cbb, mac_salt_cbb; if (!CBB_add_asn1(&pfx, &mac_data, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&mac_data, &digest_info, CBS_ASN1_SEQUENCE) || !EVP_marshal_digest_algorithm(&digest_info, mac_md) || !CBB_add_asn1(&digest_info, &mac_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&mac_cbb, mac, mac_len) || !CBB_add_asn1(&mac_data, &mac_salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&mac_salt_cbb, mac_salt, sizeof(mac_salt)) || // The iteration count has a DEFAULT of 1, but RFC 7292 says "The // default is for historical reasons and its use is deprecated." Thus we // explicitly encode the iteration count, though it is not valid DER. !CBB_add_asn1_uint64(&mac_data, mac_iterations)) { goto err; } ret = reinterpret_cast(OPENSSL_malloc(sizeof(PKCS12))); if (ret == NULL || !CBB_finish(&cbb, &ret->ber_bytes, &ret->ber_len)) { OPENSSL_free(ret); ret = NULL; goto err; } } err: OPENSSL_cleanse(mac_key, sizeof(mac_key)); CBB_cleanup(&cbb); return ret; } void PKCS12_free(PKCS12 *p12) { if (p12 == NULL) { return; } OPENSSL_free(p12->ber_bytes); OPENSSL_free(p12); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/poly1305/internal.h ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_POLY1305_INTERNAL_H #define OPENSSL_HEADER_POLY1305_INTERNAL_H #include #include #if defined(__cplusplus) extern "C" { #endif #if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE) #define OPENSSL_POLY1305_NEON void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]); void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, size_t in_len); void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]); #endif #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_POLY1305_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/poly1305/poly1305.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This implementation of poly1305 is by Andrew Moon // (https://github.com/floodyberry/poly1305-donna) and released as public // domain. #include #include #include #include "../internal.h" #include "internal.h" #if !defined(BORINGSSL_HAS_UINT128) || !defined(OPENSSL_X86_64) static uint64_t mul32x32_64(uint32_t a, uint32_t b) { return (uint64_t)a * b; } struct poly1305_state_st { uint32_t r0, r1, r2, r3, r4; uint32_t s1, s2, s3, s4; uint32_t h0, h1, h2, h3, h4; uint8_t buf[16]; size_t buf_used; uint8_t key[16]; }; static_assert( sizeof(struct poly1305_state_st) + 63 <= sizeof(poly1305_state), "poly1305_state isn't large enough to hold aligned poly1305_state_st"); static inline struct poly1305_state_st *poly1305_aligned_state( poly1305_state *state) { return reinterpret_cast(align_pointer(state, 64)); } // poly1305_blocks updates |state| given some amount of input data. This // function may only be called with a |len| that is not a multiple of 16 at the // end of the data. Otherwise the input must be buffered into 16 byte blocks. static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, size_t len) { uint32_t t0, t1, t2, t3; uint64_t t[5]; uint32_t b; uint64_t c; size_t j; uint8_t mp[16]; if (len < 16) { goto poly1305_donna_atmost15bytes; } poly1305_donna_16bytes: t0 = CRYPTO_load_u32_le(in); t1 = CRYPTO_load_u32_le(in + 4); t2 = CRYPTO_load_u32_le(in + 8); t3 = CRYPTO_load_u32_le(in + 12); in += 16; len -= 16; state->h0 += t0 & 0x3ffffff; state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; state->h4 += (t3 >> 8) | (1 << 24); poly1305_donna_mul: t[0] = mul32x32_64(state->h0, state->r0) + mul32x32_64(state->h1, state->s4) + mul32x32_64(state->h2, state->s3) + mul32x32_64(state->h3, state->s2) + mul32x32_64(state->h4, state->s1); t[1] = mul32x32_64(state->h0, state->r1) + mul32x32_64(state->h1, state->r0) + mul32x32_64(state->h2, state->s4) + mul32x32_64(state->h3, state->s3) + mul32x32_64(state->h4, state->s2); t[2] = mul32x32_64(state->h0, state->r2) + mul32x32_64(state->h1, state->r1) + mul32x32_64(state->h2, state->r0) + mul32x32_64(state->h3, state->s4) + mul32x32_64(state->h4, state->s3); t[3] = mul32x32_64(state->h0, state->r3) + mul32x32_64(state->h1, state->r2) + mul32x32_64(state->h2, state->r1) + mul32x32_64(state->h3, state->r0) + mul32x32_64(state->h4, state->s4); t[4] = mul32x32_64(state->h0, state->r4) + mul32x32_64(state->h1, state->r3) + mul32x32_64(state->h2, state->r2) + mul32x32_64(state->h3, state->r1) + mul32x32_64(state->h4, state->r0); state->h0 = (uint32_t)t[0] & 0x3ffffff; c = (t[0] >> 26); t[1] += c; state->h1 = (uint32_t)t[1] & 0x3ffffff; b = (uint32_t)(t[1] >> 26); t[2] += b; state->h2 = (uint32_t)t[2] & 0x3ffffff; b = (uint32_t)(t[2] >> 26); t[3] += b; state->h3 = (uint32_t)t[3] & 0x3ffffff; b = (uint32_t)(t[3] >> 26); t[4] += b; state->h4 = (uint32_t)t[4] & 0x3ffffff; b = (uint32_t)(t[4] >> 26); state->h0 += b * 5; if (len >= 16) { goto poly1305_donna_16bytes; } // final bytes poly1305_donna_atmost15bytes: if (!len) { return; } for (j = 0; j < len; j++) { mp[j] = in[j]; } mp[j++] = 1; for (; j < 16; j++) { mp[j] = 0; } len = 0; t0 = CRYPTO_load_u32_le(mp + 0); t1 = CRYPTO_load_u32_le(mp + 4); t2 = CRYPTO_load_u32_le(mp + 8); t3 = CRYPTO_load_u32_le(mp + 12); state->h0 += t0 & 0x3ffffff; state->h1 += ((((uint64_t)t1 << 32) | t0) >> 26) & 0x3ffffff; state->h2 += ((((uint64_t)t2 << 32) | t1) >> 20) & 0x3ffffff; state->h3 += ((((uint64_t)t3 << 32) | t2) >> 14) & 0x3ffffff; state->h4 += (t3 >> 8); goto poly1305_donna_mul; } void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { struct poly1305_state_st *state = poly1305_aligned_state(statep); uint32_t t0, t1, t2, t3; #if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_init_neon(statep, key); return; } #endif t0 = CRYPTO_load_u32_le(key + 0); t1 = CRYPTO_load_u32_le(key + 4); t2 = CRYPTO_load_u32_le(key + 8); t3 = CRYPTO_load_u32_le(key + 12); // precompute multipliers state->r0 = t0 & 0x3ffffff; t0 >>= 26; t0 |= t1 << 6; state->r1 = t0 & 0x3ffff03; t1 >>= 20; t1 |= t2 << 12; state->r2 = t1 & 0x3ffc0ff; t2 >>= 14; t2 |= t3 << 18; state->r3 = t2 & 0x3f03fff; t3 >>= 8; state->r4 = t3 & 0x00fffff; state->s1 = state->r1 * 5; state->s2 = state->r2 * 5; state->s3 = state->r3 * 5; state->s4 = state->r4 * 5; // init state state->h0 = 0; state->h1 = 0; state->h2 = 0; state->h3 = 0; state->h4 = 0; state->buf_used = 0; OPENSSL_memcpy(state->key, key + 16, sizeof(state->key)); } void CRYPTO_poly1305_update(poly1305_state *statep, const uint8_t *in, size_t in_len) { struct poly1305_state_st *state = poly1305_aligned_state(statep); // Work around a C language bug. See https://crbug.com/1019588. if (in_len == 0) { return; } #if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_update_neon(statep, in, in_len); return; } #endif if (state->buf_used) { size_t todo = 16 - state->buf_used; if (todo > in_len) { todo = in_len; } for (size_t i = 0; i < todo; i++) { state->buf[state->buf_used + i] = in[i]; } state->buf_used += todo; in_len -= todo; in += todo; if (state->buf_used == 16) { poly1305_update(state, state->buf, 16); state->buf_used = 0; } } if (in_len >= 16) { size_t todo = in_len & ~0xf; poly1305_update(state, in, todo); in += todo; in_len &= 0xf; } if (in_len) { for (size_t i = 0; i < in_len; i++) { state->buf[i] = in[i]; } state->buf_used = in_len; } } void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { struct poly1305_state_st *state = poly1305_aligned_state(statep); uint32_t g0, g1, g2, g3, g4; uint32_t b, nb; #if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_finish_neon(statep, mac); return; } #endif if (state->buf_used) { poly1305_update(state, state->buf, state->buf_used); } b = state->h0 >> 26; state->h0 = state->h0 & 0x3ffffff; state->h1 += b; b = state->h1 >> 26; state->h1 = state->h1 & 0x3ffffff; state->h2 += b; b = state->h2 >> 26; state->h2 = state->h2 & 0x3ffffff; state->h3 += b; b = state->h3 >> 26; state->h3 = state->h3 & 0x3ffffff; state->h4 += b; b = state->h4 >> 26; state->h4 = state->h4 & 0x3ffffff; state->h0 += b * 5; g0 = state->h0 + 5; b = g0 >> 26; g0 &= 0x3ffffff; g1 = state->h1 + b; b = g1 >> 26; g1 &= 0x3ffffff; g2 = state->h2 + b; b = g2 >> 26; g2 &= 0x3ffffff; g3 = state->h3 + b; b = g3 >> 26; g3 &= 0x3ffffff; g4 = state->h4 + b - (1 << 26); b = (g4 >> 31) - 1; nb = ~b; state->h0 = (state->h0 & nb) | (g0 & b); state->h1 = (state->h1 & nb) | (g1 & b); state->h2 = (state->h2 & nb) | (g2 & b); state->h3 = (state->h3 & nb) | (g3 & b); state->h4 = (state->h4 & nb) | (g4 & b); uint64_t f0 = ((state->h0) | (state->h1 << 26)) + (uint64_t)CRYPTO_load_u32_le(&state->key[0]); uint64_t f1 = ((state->h1 >> 6) | (state->h2 << 20)) + (uint64_t)CRYPTO_load_u32_le(&state->key[4]); uint64_t f2 = ((state->h2 >> 12) | (state->h3 << 14)) + (uint64_t)CRYPTO_load_u32_le(&state->key[8]); uint64_t f3 = ((state->h3 >> 18) | (state->h4 << 8)) + (uint64_t)CRYPTO_load_u32_le(&state->key[12]); CRYPTO_store_u32_le(&mac[0], (uint32_t)f0); f1 += (f0 >> 32); CRYPTO_store_u32_le(&mac[4], (uint32_t)f1); f2 += (f1 >> 32); CRYPTO_store_u32_le(&mac[8], (uint32_t)f2); f3 += (f2 >> 32); CRYPTO_store_u32_le(&mac[12], (uint32_t)f3); } #endif // !BORINGSSL_HAS_UINT128 || !OPENSSL_X86_64 ================================================ FILE: Sources/CNIOBoringSSL/crypto/poly1305/poly1305_arm.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This implementation was taken from the public domain, neon2 version in // SUPERCOP by D. J. Bernstein and Peter Schwabe. #include #include #include #include "../internal.h" #include "internal.h" #if defined(OPENSSL_POLY1305_NEON) typedef struct { uint32_t v[12]; // for alignment; only using 10 } fe1305x2; #define addmulmod openssl_poly1305_neon2_addmulmod #define blocks openssl_poly1305_neon2_blocks extern "C" { extern void addmulmod(fe1305x2 *r, const fe1305x2 *x, const fe1305x2 *y, const fe1305x2 *c); extern int blocks(fe1305x2 *h, const fe1305x2 *precomp, const uint8_t *in, size_t inlen); } static void freeze(fe1305x2 *r) { int i; uint32_t x0 = r->v[0]; uint32_t x1 = r->v[2]; uint32_t x2 = r->v[4]; uint32_t x3 = r->v[6]; uint32_t x4 = r->v[8]; uint32_t y0; uint32_t y1; uint32_t y2; uint32_t y3; uint32_t y4; uint32_t swap; for (i = 0; i < 3; ++i) { x1 += x0 >> 26; x0 &= 0x3ffffff; x2 += x1 >> 26; x1 &= 0x3ffffff; x3 += x2 >> 26; x2 &= 0x3ffffff; x4 += x3 >> 26; x3 &= 0x3ffffff; x0 += 5 * (x4 >> 26); x4 &= 0x3ffffff; } y0 = x0 + 5; y1 = x1 + (y0 >> 26); y0 &= 0x3ffffff; y2 = x2 + (y1 >> 26); y1 &= 0x3ffffff; y3 = x3 + (y2 >> 26); y2 &= 0x3ffffff; y4 = x4 + (y3 >> 26); y3 &= 0x3ffffff; swap = -(y4 >> 26); y4 &= 0x3ffffff; y0 ^= x0; y1 ^= x1; y2 ^= x2; y3 ^= x3; y4 ^= x4; y0 &= swap; y1 &= swap; y2 &= swap; y3 &= swap; y4 &= swap; y0 ^= x0; y1 ^= x1; y2 ^= x2; y3 ^= x3; y4 ^= x4; r->v[0] = y0; r->v[2] = y1; r->v[4] = y2; r->v[6] = y3; r->v[8] = y4; } static void store32(uint8_t out[4], uint32_t v) { OPENSSL_memcpy(out, &v, 4); } // load32 exists to avoid breaking strict aliasing rules in // fe1305x2_frombytearray. static uint32_t load32(const uint8_t t[4]) { uint32_t tmp; OPENSSL_memcpy(&tmp, t, sizeof(tmp)); return tmp; } static void fe1305x2_tobytearray(uint8_t r[16], fe1305x2 *x) { uint32_t x0 = x->v[0]; uint32_t x1 = x->v[2]; uint32_t x2 = x->v[4]; uint32_t x3 = x->v[6]; uint32_t x4 = x->v[8]; x1 += x0 >> 26; x0 &= 0x3ffffff; x2 += x1 >> 26; x1 &= 0x3ffffff; x3 += x2 >> 26; x2 &= 0x3ffffff; x4 += x3 >> 26; x3 &= 0x3ffffff; store32(r, x0 + (x1 << 26)); store32(r + 4, (x1 >> 6) + (x2 << 20)); store32(r + 8, (x2 >> 12) + (x3 << 14)); store32(r + 12, (x3 >> 18) + (x4 << 8)); } static void fe1305x2_frombytearray(fe1305x2 *r, const uint8_t *x, size_t xlen) { size_t i; uint8_t t[17]; for (i = 0; (i < 16) && (i < xlen); i++) { t[i] = x[i]; } xlen -= i; x += i; t[i++] = 1; for (; i < 17; i++) { t[i] = 0; } r->v[0] = 0x3ffffff & load32(t); r->v[2] = 0x3ffffff & (load32(t + 3) >> 2); r->v[4] = 0x3ffffff & (load32(t + 6) >> 4); r->v[6] = 0x3ffffff & (load32(t + 9) >> 6); r->v[8] = load32(t + 13); if (xlen) { for (i = 0; (i < 16) && (i < xlen); i++) { t[i] = x[i]; } t[i++] = 1; for (; i < 17; i++) { t[i] = 0; } r->v[1] = 0x3ffffff & load32(t); r->v[3] = 0x3ffffff & (load32(t + 3) >> 2); r->v[5] = 0x3ffffff & (load32(t + 6) >> 4); r->v[7] = 0x3ffffff & (load32(t + 9) >> 6); r->v[9] = load32(t + 13); } else { r->v[1] = r->v[3] = r->v[5] = r->v[7] = r->v[9] = 0; } } static const fe1305x2 zero alignas(16) = {0}; struct poly1305_state_st { uint8_t data[sizeof(fe1305x2[5]) + 128]; uint8_t buf[32]; size_t buf_used; uint8_t key[16]; }; static_assert( sizeof(struct poly1305_state_st) + 63 <= sizeof(poly1305_state), "poly1305_state isn't large enough to hold aligned poly1305_state_st."); void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]) { struct poly1305_state_st *st = (struct poly1305_state_st *)(state); fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); fe1305x2 *const h = r + 1; fe1305x2 *const c = h + 1; fe1305x2 *const precomp = c + 1; r->v[1] = r->v[0] = 0x3ffffff & load32(key); r->v[3] = r->v[2] = 0x3ffff03 & (load32(key + 3) >> 2); r->v[5] = r->v[4] = 0x3ffc0ff & (load32(key + 6) >> 4); r->v[7] = r->v[6] = 0x3f03fff & (load32(key + 9) >> 6); r->v[9] = r->v[8] = 0x00fffff & (load32(key + 12) >> 8); for (size_t j = 0; j < 10; j++) { h->v[j] = 0; // XXX: should fast-forward a bit } addmulmod(precomp, r, r, &zero); // precompute r^2 addmulmod(precomp + 1, precomp, precomp, &zero); // precompute r^4 OPENSSL_memcpy(st->key, key + 16, 16); st->buf_used = 0; } void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, size_t in_len) { struct poly1305_state_st *st = (struct poly1305_state_st *)(state); fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); fe1305x2 *const h = r + 1; fe1305x2 *const c = h + 1; fe1305x2 *const precomp = c + 1; if (st->buf_used) { size_t todo = 32 - st->buf_used; if (todo > in_len) { todo = in_len; } for (size_t i = 0; i < todo; i++) { st->buf[st->buf_used + i] = in[i]; } st->buf_used += todo; in_len -= todo; in += todo; if (st->buf_used == sizeof(st->buf) && in_len) { addmulmod(h, h, precomp, &zero); fe1305x2_frombytearray(c, st->buf, sizeof(st->buf)); for (size_t i = 0; i < 10; i++) { h->v[i] += c->v[i]; } st->buf_used = 0; } } while (in_len > 32) { size_t tlen = 1048576; if (in_len < tlen) { tlen = in_len; } tlen -= blocks(h, precomp, in, tlen); in_len -= tlen; in += tlen; } if (in_len) { for (size_t i = 0; i < in_len; i++) { st->buf[i] = in[i]; } st->buf_used = in_len; } } void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]) { struct poly1305_state_st *st = (struct poly1305_state_st *)(state); fe1305x2 *const r = (fe1305x2 *)(st->data + (15 & (-(int)st->data))); fe1305x2 *const h = r + 1; fe1305x2 *const c = h + 1; fe1305x2 *const precomp = c + 1; addmulmod(h, h, precomp, &zero); if (st->buf_used > 16) { fe1305x2_frombytearray(c, st->buf, st->buf_used); precomp->v[1] = r->v[1]; precomp->v[3] = r->v[3]; precomp->v[5] = r->v[5]; precomp->v[7] = r->v[7]; precomp->v[9] = r->v[9]; addmulmod(h, h, precomp, c); } else if (st->buf_used > 0) { fe1305x2_frombytearray(c, st->buf, st->buf_used); r->v[1] = 1; r->v[3] = 0; r->v[5] = 0; r->v[7] = 0; r->v[9] = 0; addmulmod(h, h, r, c); } h->v[0] += h->v[1]; h->v[2] += h->v[3]; h->v[4] += h->v[5]; h->v[6] += h->v[7]; h->v[8] += h->v[9]; freeze(h); fe1305x2_frombytearray(c, st->key, 16); c->v[8] ^= (1 << 24); h->v[0] += c->v[0]; h->v[2] += c->v[2]; h->v[4] += c->v[4]; h->v[6] += c->v[6]; h->v[8] += c->v[8]; fe1305x2_tobytearray(mac, h); } #endif // OPENSSL_POLY1305_NEON ================================================ FILE: Sources/CNIOBoringSSL/crypto/poly1305/poly1305_arm_asm.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #if defined(__arm__) && defined(__linux__) #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) # This implementation was taken from the public domain, neon2 version in # SUPERCOP by D. J. Bernstein and Peter Schwabe. # qhasm: int32 input_0 # qhasm: int32 input_1 # qhasm: int32 input_2 # qhasm: int32 input_3 # qhasm: stack32 input_4 # qhasm: stack32 input_5 # qhasm: stack32 input_6 # qhasm: stack32 input_7 # qhasm: int32 caller_r4 # qhasm: int32 caller_r5 # qhasm: int32 caller_r6 # qhasm: int32 caller_r7 # qhasm: int32 caller_r8 # qhasm: int32 caller_r9 # qhasm: int32 caller_r10 # qhasm: int32 caller_r11 # qhasm: int32 caller_r12 # qhasm: int32 caller_r14 # qhasm: reg128 caller_q4 # qhasm: reg128 caller_q5 # qhasm: reg128 caller_q6 # qhasm: reg128 caller_q7 # qhasm: startcode .fpu neon .text # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 x01 # qhasm: reg128 x23 # qhasm: reg128 x4 # qhasm: reg128 y0 # qhasm: reg128 y12 # qhasm: reg128 y34 # qhasm: reg128 5y12 # qhasm: reg128 5y34 # qhasm: stack128 y0_stack # qhasm: stack128 y12_stack # qhasm: stack128 y34_stack # qhasm: stack128 5y12_stack # qhasm: stack128 5y34_stack # qhasm: reg128 z0 # qhasm: reg128 z12 # qhasm: reg128 z34 # qhasm: reg128 5z12 # qhasm: reg128 5z34 # qhasm: stack128 z0_stack # qhasm: stack128 z12_stack # qhasm: stack128 z34_stack # qhasm: stack128 5z12_stack # qhasm: stack128 5z34_stack # qhasm: stack128 two24 # qhasm: int32 ptr # qhasm: reg128 c01 # qhasm: reg128 c23 # qhasm: reg128 d01 # qhasm: reg128 d23 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 t2 # qhasm: reg128 t3 # qhasm: reg128 t4 # qhasm: reg128 mask # qhasm: reg128 u0 # qhasm: reg128 u1 # qhasm: reg128 u2 # qhasm: reg128 u3 # qhasm: reg128 u4 # qhasm: reg128 v01 # qhasm: reg128 mid # qhasm: reg128 v23 # qhasm: reg128 v4 # qhasm: int32 len # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks .align 4 .global openssl_poly1305_neon2_blocks .hidden openssl_poly1305_neon2_blocks .type openssl_poly1305_neon2_blocks STT_FUNC openssl_poly1305_neon2_blocks: vpush {q4,q5,q6,q7} mov r12,sp sub sp,sp,#192 bic sp,sp,#31 # qhasm: len = input_3 # asm 1: mov >len=int32#4,len=r3,y12=reg128#2%bot->y12=reg128#2%top},[y12=d2->y12=d3},[y34=reg128#3%bot->y34=reg128#3%top},[y34=d4->y34=d5},[input_1=int32#2,input_1=r1,z12=reg128#5%bot->z12=reg128#5%top},[z12=d8->z12=d9},[z34=reg128#6%bot->z34=reg128#6%top},[z34=d10->z34=d11},[mask=reg128#7,#0xffffffff # asm 2: vmov.i64 >mask=q6,#0xffffffff vmov.i64 q6,#0xffffffff # qhasm: 2x u4 = 0xff # asm 1: vmov.i64 >u4=reg128#8,#0xff # asm 2: vmov.i64 >u4=q7,#0xff vmov.i64 q7,#0xff # qhasm: x01 aligned= mem128[input_0];input_0+=16 # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[x01=d16->x01=d17},[x23=reg128#10%bot->x23=reg128#10%top},[x23=d18->x23=d19},[input_0=int32#1,input_0=r0,>=6 # asm 1: vshr.u64 >mask=reg128#7,mask=q6,>= 7 # asm 1: vshr.u64 >u4=reg128#8,u4=q7,5y12=reg128#12,5y12=q11,5y34=reg128#13,5y34=q12,5y12=reg128#12,<5y12=reg128#12,5y12=q11,<5y12=q11,5y34=reg128#13,<5y34=reg128#13,5y34=q12,<5y34=q12,u4=reg128#8,u4=q7,5z12=reg128#14,5z12=q13,5z34=reg128#15,5z34=q14,5z12=reg128#14,<5z12=reg128#14,5z12=q13,<5z12=q13,5z34=reg128#15,<5z34=reg128#15,5z34=q14,<5z34=q14,ptr=int32#2,ptr=r1,r4=reg128#16,r4=q15,r0=reg128#8,r0=q7,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,ptr=r1,ptr=int32#2,<5y12_stack=stack128#5 # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64] add r1,sp,#64 # qhasm: mem128[ptr] aligned= 5y12 # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[ptr=int32#2,<5y34_stack=stack128#6 # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80] add r1,sp,#80 # qhasm: mem128[ptr] aligned= 5y34 # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[ptr=int32#2,<5z12_stack=stack128#10 # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144] add r1,sp,#144 # qhasm: mem128[ptr] aligned= 5z12 # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[ptr=int32#2,<5z34_stack=stack128#11 # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160] add r1,sp,#160 # qhasm: mem128[ptr] aligned= 5z34 # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[? len - 64 # asm 1: cmp bls ._below64bytes # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,input_2=r1,c01=reg128#1%bot->c01=reg128#1%top},[c01=d0->c01=d1},[c23=reg128#2%bot->c23=reg128#2%top},[c23=d2->c23=d3},[ptr=int32#3,ptr=r2,z12=reg128#3%bot->z12=reg128#3%top},[z12=d4->z12=d5},[ptr=int32#3,ptr=r2,z0=reg128#4%bot->z0=reg128#4%top},[z0=d6->z0=d7},[r3=reg128#5,r3=q4,input_2=int32#2,input_2=r1,ptr=int32#3,<5z34_stack=stack128#11 # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160] add r2,sp,#160 # qhasm: 5z34 aligned= mem128[ptr] # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[5z34=d10->5z34=d11},[r0=reg128#8,r0=q7,r2=reg128#14,r2=q13,d01=reg128#12%bot->d01=reg128#12%top},[d01=d22->d01=d23},[r1=reg128#15,r1=q14,ptr=int32#3,<5z12_stack=stack128#10 # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144] add r2,sp,#144 # qhasm: 5z12 aligned= mem128[ptr] # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[5z12=d0->5z12=d1},[d23=reg128#2%bot->d23=reg128#2%top},[d23=d2->d23=d3},[input_2=int32#2,input_2=r1,> 40 # asm 1: vshr.u64 >v4=reg128#4,v4=q3,> 14; v23[3] = d23[2,3] unsigned>> 14 # asm 1: vshrn.u64 > 26; v01[3] = d01[2,3] unsigned>> 26 # asm 1: vshrn.u64 > 20; v23[1] = mid[2,3] unsigned>> 20 # asm 1: vshrn.u64 ptr=int32#3,ptr=r2,y34=reg128#3%bot->y34=reg128#3%top},[y34=d4->y34=d5},[ptr=int32#3,ptr=r2,y12=reg128#2%bot->y12=reg128#2%top},[y12=d2->y12=d3},[ptr=int32#3,ptr=r2,y0=reg128#1%bot->y0=reg128#1%top},[y0=d0->y0=d1},[ptr=int32#3,<5y34_stack=stack128#6 # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80] add r2,sp,#80 # qhasm: 5y34 aligned= mem128[ptr] # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[5y34=d24->5y34=d25},[ptr=int32#3,<5y12_stack=stack128#5 # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64] add r2,sp,#64 # qhasm: 5y12 aligned= mem128[ptr] # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[5y12=d22->5y12=d23},[ptr=int32#3,ptr=r2,> 26 # asm 1: vshr.u64 >t1=reg128#4,t1=q3,len=int32#4,len=r3,r0=reg128#6,r0=q5,r1=reg128#4,r1=q3,> 26 # asm 1: vshr.u64 >t4=reg128#8,t4=q7,r3=reg128#5,r3=q4,x4=reg128#8,x4=q7,r4=reg128#16%bot->r4=reg128#16%top},[r4=d30->r4=d31},[> 26 # asm 1: vshr.u64 >t2=reg128#9,t2=q8,r1=reg128#4,r1=q3,> 26 # asm 1: vshr.u64 >t0=reg128#10,t0=q9,r2=reg128#9,r2=q8,x4=reg128#11,x4=q10,x01=reg128#6,x01=q5,r0=reg128#8%bot->r0=reg128#8%top},[r0=d14->r0=d15},[ptr=int32#3,ptr=r2,t0=reg128#10,t0=q9,> 26 # asm 1: vshr.u64 >t3=reg128#14,t3=q13,x01=reg128#15,x01=q14,z34=reg128#6%bot->z34=reg128#6%top},[z34=d10->z34=d11},[x23=reg128#10,x23=q9,r3=reg128#5,r3=q4,input_2=int32#2,input_2=r1,> 26 # asm 1: vshr.u64 >t1=reg128#14,t1=q13,x01=reg128#9,x01=q8,r1=reg128#4,r1=q3,> 26 # asm 1: vshr.u64 >t4=reg128#14,t4=q13,r3=reg128#5,r3=q4,x4=reg128#11,x4=q10,? len - 64 # asm 1: cmp bhi ._mainloop2 # qhasm: input_2 -= 32 # asm 1: sub >input_2=int32#3,input_2=r2,? len - 32 # asm 1: cmp bls ._end # qhasm: mainloop: ._mainloop: # qhasm: new r0 # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#2,ptr=r1,r4=reg128#5%bot->r4=reg128#5%top},[r4=d8->r4=d9},[u4=reg128#6%bot->u4=reg128#6%top},[u4=d10->u4=d11},[c01=reg128#8%bot->c01=reg128#8%top},[c01=d14->c01=d15},[c23=reg128#14%bot->c23=reg128#14%top},[c23=d26->c23=d27},[r0=reg128#4,r0=q3,r3=reg128#6,r3=q5,r1=reg128#14,r1=q13,r2=reg128#8,r2=q7,> 26 # asm 1: vshr.u64 >t1=reg128#9,t1=q8,r0=reg128#4,r0=q3,r1=reg128#9,r1=q8,> 26 # asm 1: vshr.u64 >t4=reg128#10,t4=q9,r3=reg128#6,r3=q5,r4=reg128#5,r4=q4,> 26 # asm 1: vshr.u64 >t2=reg128#10,t2=q9,r1=reg128#11,r1=q10,> 26 # asm 1: vshr.u64 >t0=reg128#9,t0=q8,r2=reg128#8,r2=q7,r4=reg128#5,r4=q4,r0=reg128#4,r0=q3,t0=reg128#9,t0=q8,> 26 # asm 1: vshr.u64 >t3=reg128#14,t3=q13,r0=reg128#4,r0=q3,x23=reg128#10,x23=q9,r3=reg128#6,r3=q5,> 26 # asm 1: vshr.u64 >t1=reg128#8,t1=q7,x01=reg128#9,x01=q8,r1=reg128#4,r1=q3,> 26 # asm 1: vshr.u64 >t4=reg128#8,t4=q7,r3=reg128#6,r3=q5,x4=reg128#11,x4=q10,len=int32#4,len=r3,? len - 32 # asm 1: cmp bhi ._mainloop # qhasm: end: ._end: # qhasm: mem128[input_0] = x01;input_0+=16 # asm 1: vst1.8 {len=int32#1,len=r0,mask=reg128#1,#0xffffffff # asm 2: vmov.i64 >mask=q0,#0xffffffff vmov.i64 q0,#0xffffffff # qhasm: y01 aligned= mem128[input_2];input_2+=16 # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[y01=d2->y01=d3},[_5y01=reg128#3,_5y01=q2,y23=reg128#4%bot->y23=reg128#4%top},[y23=d6->y23=d7},[_5y23=reg128#9,_5y23=q8,_5y4=reg128#11,_5y4=q10,x01=reg128#12%bot->x01=reg128#12%top},[x01=d22->x01=d23},[_5y01=reg128#3,<_5y01=reg128#3,_5y01=q2,<_5y01=q2,x23=reg128#13%bot->x23=reg128#13%top},[x23=d24->x23=d25},[_5y23=reg128#9,<_5y23=reg128#9,_5y23=q8,<_5y23=q8,_5y4=reg128#11,<_5y4=reg128#11,_5y4=q10,<_5y4=q10,c01=reg128#14%bot->c01=reg128#14%top},[c01=d26->c01=d27},[x01=reg128#12,x01=q11,c23=reg128#14%bot->c23=reg128#14%top},[c23=d26->c23=d27},[x23=reg128#13,x23=q12,>=6 # asm 1: vshr.u64 >mask=reg128#1,mask=q0,x4=reg128#14,x4=q13,r0=reg128#15,r0=q14,r1=reg128#3,r1=q2,r2=reg128#16,r2=q15,r3=reg128#9,r3=q8,r4=reg128#10,r4=q9,> 26 # asm 1: vshr.u64 >t1=reg128#2,t1=q1,r0=reg128#4,r0=q3,r1=reg128#2,r1=q1,> 26 # asm 1: vshr.u64 >t4=reg128#3,t4=q2,r3=reg128#9,r3=q8,r4=reg128#3,r4=q2,> 26 # asm 1: vshr.u64 >t2=reg128#10,t2=q9,r1=reg128#2,r1=q1,> 26 # asm 1: vshr.u64 >t0=reg128#11,t0=q10,r2=reg128#10,r2=q9,r4=reg128#3,r4=q2,r0=reg128#4,r0=q3,t0=reg128#11,t0=q10,> 26 # asm 1: vshr.u64 >t3=reg128#12,t3=q11,r0=reg128#4,r0=q3,x23=reg128#10,x23=q9,r3=reg128#9,r3=q8,> 26 # asm 1: vshr.u64 >t1=reg128#11,t1=q10,x01=reg128#4,x01=q3,r1=reg128#2,r1=q1,> 26 # asm 1: vshr.u64 >t4=reg128#11,t4=q10,r3=reg128#1,r3=q0,x4=reg128#3,x4=q2, #include #include "../internal.h" #if defined(BORINGSSL_HAS_UINT128) && defined(OPENSSL_X86_64) #include typedef __m128i xmmi; alignas(16) static const uint32_t poly1305_x64_sse2_message_mask[4] = { (1 << 26) - 1, 0, (1 << 26) - 1, 0}; alignas(16) static const uint32_t poly1305_x64_sse2_5[4] = {5, 0, 5, 0}; alignas(16) static const uint32_t poly1305_x64_sse2_1shl128[4] = {(1 << 24), 0, (1 << 24), 0}; static inline uint128_t add128(uint128_t a, uint128_t b) { return a + b; } static inline uint128_t add128_64(uint128_t a, uint64_t b) { return a + b; } static inline uint128_t mul64x64_128(uint64_t a, uint64_t b) { return (uint128_t)a * b; } static inline uint64_t lo128(uint128_t a) { return (uint64_t)a; } static inline uint64_t shr128(uint128_t v, const int shift) { return (uint64_t)(v >> shift); } static inline uint64_t shr128_pair(uint64_t hi, uint64_t lo, const int shift) { return (uint64_t)((((uint128_t)hi << 64) | lo) >> shift); } typedef struct poly1305_power_t { union { xmmi v; uint64_t u[2]; uint32_t d[4]; } R20, R21, R22, R23, R24, S21, S22, S23, S24; } poly1305_power; typedef struct poly1305_state_internal_t { poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 bytes of free storage */ union { xmmi H[5]; // 80 bytes uint64_t HH[10]; }; // uint64_t r0,r1,r2; [24 bytes] // uint64_t pad0,pad1; [16 bytes] uint64_t started; // 8 bytes uint64_t leftover; // 8 bytes uint8_t buffer[64]; // 64 bytes } poly1305_state_internal; /* 448 bytes total + 63 bytes for alignment = 511 bytes raw */ static_assert(sizeof(struct poly1305_state_internal_t) + 63 <= sizeof(poly1305_state), "poly1305_state isn't large enough to hold aligned " "poly1305_state_internal_t"); static inline poly1305_state_internal *poly1305_aligned_state( poly1305_state *state) { return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63); } static inline size_t poly1305_min(size_t a, size_t b) { return (a < b) ? a : b; } void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { poly1305_state_internal *st = poly1305_aligned_state(state); poly1305_power *p; uint64_t r0, r1, r2; uint64_t t0, t1; // clamp key t0 = CRYPTO_load_u64_le(key + 0); t1 = CRYPTO_load_u64_le(key + 8); r0 = t0 & 0xffc0fffffff; t0 >>= 44; t0 |= t1 << 20; r1 = t0 & 0xfffffc0ffff; t1 >>= 24; r2 = t1 & 0x00ffffffc0f; // store r in un-used space of st->P[1] p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); p->R21.d[1] = (uint32_t)(r1); p->R21.d[3] = (uint32_t)(r1 >> 32); p->R22.d[1] = (uint32_t)(r2); p->R22.d[3] = (uint32_t)(r2 >> 32); // store pad p->R23.d[1] = CRYPTO_load_u32_le(key + 16); p->R23.d[3] = CRYPTO_load_u32_le(key + 20); p->R24.d[1] = CRYPTO_load_u32_le(key + 24); p->R24.d[3] = CRYPTO_load_u32_le(key + 28); // H = 0 st->H[0] = _mm_setzero_si128(); st->H[1] = _mm_setzero_si128(); st->H[2] = _mm_setzero_si128(); st->H[3] = _mm_setzero_si128(); st->H[4] = _mm_setzero_si128(); st->started = 0; st->leftover = 0; } static void poly1305_first_block(poly1305_state_internal *st, const uint8_t *m) { const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); xmmi T5, T6; poly1305_power *p; uint128_t d[3]; uint64_t r0, r1, r2; uint64_t r20, r21, r22, s22; uint64_t pad0, pad1; uint64_t c; uint64_t i; // pull out stored info p = &st->P[1]; r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; // compute powers r^2,r^4 r20 = r0; r21 = r1; r22 = r2; for (i = 0; i < 2; i++) { s22 = r22 * (5 << 2); d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22)); d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21)); d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20)); r20 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44); d[1] = add128_64(d[1], c); r21 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44); d[2] = add128_64(d[2], c); r22 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42); r20 += c * 5; c = (r20 >> 44); r20 = r20 & 0xfffffffffff; r21 += c; p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)(r20) & 0x3ffffff), _MM_SHUFFLE(1, 0, 1, 0)); p->R21.v = _mm_shuffle_epi32( _mm_cvtsi32_si128((uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff), _MM_SHUFFLE(1, 0, 1, 0)); p->R22.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r21 >> 8)) & 0x3ffffff), _MM_SHUFFLE(1, 0, 1, 0)); p->R23.v = _mm_shuffle_epi32( _mm_cvtsi32_si128((uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff), _MM_SHUFFLE(1, 0, 1, 0)); p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((uint32_t)((r22 >> 16))), _MM_SHUFFLE(1, 0, 1, 0)); p->S21.v = _mm_mul_epu32(p->R21.v, FIVE); p->S22.v = _mm_mul_epu32(p->R22.v, FIVE); p->S23.v = _mm_mul_epu32(p->R23.v, FIVE); p->S24.v = _mm_mul_epu32(p->R24.v, FIVE); p--; } // put saved info back p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); p->R21.d[1] = (uint32_t)(r1); p->R21.d[3] = (uint32_t)(r1 >> 32); p->R22.d[1] = (uint32_t)(r2); p->R22.d[3] = (uint32_t)(r2 >> 32); p->R23.d[1] = (uint32_t)(pad0); p->R23.d[3] = (uint32_t)(pad0 >> 32); p->R24.d[1] = (uint32_t)(pad1); p->R24.d[3] = (uint32_t)(pad1 >> 32); // H = [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), _mm_loadl_epi64((const xmmi *)(m + 24))); st->H[0] = _mm_and_si128(MMASK, T5); st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); st->H[2] = _mm_and_si128(MMASK, T5); st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); } static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, size_t bytes) { const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); poly1305_power *p; xmmi H0, H1, H2, H3, H4; xmmi T0, T1, T2, T3, T4, T5, T6; xmmi M0, M1, M2, M3, M4; xmmi C1, C2; H0 = st->H[0]; H1 = st->H[1]; H2 = st->H[2]; H3 = st->H[3]; H4 = st->H[4]; while (bytes >= 64) { // H *= [r^4,r^4] p = &st->P[0]; T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); T3 = _mm_mul_epu32(H0, p->R23.v); T4 = _mm_mul_epu32(H0, p->R24.v); T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); // H += [Mx,My]*[r^2,r^2] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), _mm_loadl_epi64((const xmmi *)(m + 24))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); M2 = _mm_and_si128(MMASK, T5); M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); p = &st->P[1]; T5 = _mm_mul_epu32(M0, p->R20.v); T6 = _mm_mul_epu32(M0, p->R21.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(M1, p->S24.v); T6 = _mm_mul_epu32(M1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(M2, p->S23.v); T6 = _mm_mul_epu32(M2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(M3, p->S22.v); T6 = _mm_mul_epu32(M3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(M4, p->S21.v); T6 = _mm_mul_epu32(M4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(M0, p->R22.v); T6 = _mm_mul_epu32(M0, p->R23.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(M1, p->R21.v); T6 = _mm_mul_epu32(M1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(M2, p->R20.v); T6 = _mm_mul_epu32(M2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(M3, p->S24.v); T6 = _mm_mul_epu32(M3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(M4, p->S23.v); T6 = _mm_mul_epu32(M4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(M0, p->R24.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(M1, p->R23.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(M2, p->R22.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(M3, p->R21.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(M4, p->R20.v); T4 = _mm_add_epi64(T4, T5); // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 32)), _mm_loadl_epi64((const xmmi *)(m + 48))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 40)), _mm_loadl_epi64((const xmmi *)(m + 56))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); M2 = _mm_and_si128(MMASK, T5); M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); T0 = _mm_add_epi64(T0, M0); T1 = _mm_add_epi64(T1, M1); T2 = _mm_add_epi64(T2, M2); T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2); C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2); C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); // H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; H3 = T3; H4 = T4; m += 64; bytes -= 64; } st->H[0] = H0; st->H[1] = H1; st->H[2] = H2; st->H[3] = H3; st->H[4] = H4; } static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, size_t bytes) { const xmmi MMASK = _mm_load_si128((const xmmi *)poly1305_x64_sse2_message_mask); const xmmi HIBIT = _mm_load_si128((const xmmi *)poly1305_x64_sse2_1shl128); const xmmi FIVE = _mm_load_si128((const xmmi *)poly1305_x64_sse2_5); poly1305_power *p; xmmi H0, H1, H2, H3, H4; xmmi M0, M1, M2, M3, M4; xmmi T0, T1, T2, T3, T4, T5, T6; xmmi C1, C2; uint64_t r0, r1, r2; uint64_t t0, t1, t2, t3, t4; uint64_t c; size_t consumed = 0; H0 = st->H[0]; H1 = st->H[1]; H2 = st->H[2]; H3 = st->H[3]; H4 = st->H[4]; // p = [r^2,r^2] p = &st->P[1]; if (bytes >= 32) { // H *= [r^2,r^2] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); T3 = _mm_mul_epu32(H0, p->R23.v); T4 = _mm_mul_epu32(H0, p->R24.v); T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), _mm_loadl_epi64((const xmmi *)(m + 24))); M0 = _mm_and_si128(MMASK, T5); M1 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); T5 = _mm_or_si128(_mm_srli_epi64(T5, 52), _mm_slli_epi64(T6, 12)); M2 = _mm_and_si128(MMASK, T5); M3 = _mm_and_si128(MMASK, _mm_srli_epi64(T5, 26)); M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT); T0 = _mm_add_epi64(T0, M0); T1 = _mm_add_epi64(T1, M1); T2 = _mm_add_epi64(T2, M2); T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2); C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2); C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); // H = (H*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; H3 = T3; H4 = T4; consumed = 32; } // finalize, H *= [r^2,r] r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; p->R20.d[2] = (uint32_t)(r0) & 0x3ffffff; p->R21.d[2] = (uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff; p->R22.d[2] = (uint32_t)((r1 >> 8)) & 0x3ffffff; p->R23.d[2] = (uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff; p->R24.d[2] = (uint32_t)((r2 >> 16)); p->S21.d[2] = p->R21.d[2] * 5; p->S22.d[2] = p->R22.d[2] * 5; p->S23.d[2] = p->R23.d[2] * 5; p->S24.d[2] = p->R24.d[2] * 5; // H *= [r^2,r] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); T3 = _mm_mul_epu32(H0, p->R23.v); T4 = _mm_mul_epu32(H0, p->R24.v); T5 = _mm_mul_epu32(H1, p->S24.v); T6 = _mm_mul_epu32(H1, p->R20.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H2, p->S23.v); T6 = _mm_mul_epu32(H2, p->S24.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H3, p->S22.v); T6 = _mm_mul_epu32(H3, p->S23.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H4, p->S21.v); T6 = _mm_mul_epu32(H4, p->S22.v); T0 = _mm_add_epi64(T0, T5); T1 = _mm_add_epi64(T1, T6); T5 = _mm_mul_epu32(H1, p->R21.v); T6 = _mm_mul_epu32(H1, p->R22.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H2, p->R20.v); T6 = _mm_mul_epu32(H2, p->R21.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H3, p->S24.v); T6 = _mm_mul_epu32(H3, p->R20.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H4, p->S23.v); T6 = _mm_mul_epu32(H4, p->S24.v); T2 = _mm_add_epi64(T2, T5); T3 = _mm_add_epi64(T3, T6); T5 = _mm_mul_epu32(H1, p->R23.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H2, p->R22.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H3, p->R21.v); T4 = _mm_add_epi64(T4, T5); T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_and_si128(T3, MMASK); T1 = _mm_add_epi64(T1, C1); T4 = _mm_add_epi64(T4, C2); C1 = _mm_srli_epi64(T1, 26); C2 = _mm_srli_epi64(T4, 26); T1 = _mm_and_si128(T1, MMASK); T4 = _mm_and_si128(T4, MMASK); T2 = _mm_add_epi64(T2, C1); T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE)); C1 = _mm_srli_epi64(T2, 26); C2 = _mm_srli_epi64(T0, 26); T2 = _mm_and_si128(T2, MMASK); T0 = _mm_and_si128(T0, MMASK); T3 = _mm_add_epi64(T3, C1); T1 = _mm_add_epi64(T1, C2); C1 = _mm_srli_epi64(T3, 26); T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); // H = H[0]+H[1] H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8)); H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8)); H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8)); H3 = _mm_add_epi64(T3, _mm_srli_si128(T3, 8)); H4 = _mm_add_epi64(T4, _mm_srli_si128(T4, 8)); t0 = _mm_cvtsi128_si32(H0); c = (t0 >> 26); t0 &= 0x3ffffff; t1 = _mm_cvtsi128_si32(H1) + c; c = (t1 >> 26); t1 &= 0x3ffffff; t2 = _mm_cvtsi128_si32(H2) + c; c = (t2 >> 26); t2 &= 0x3ffffff; t3 = _mm_cvtsi128_si32(H3) + c; c = (t3 >> 26); t3 &= 0x3ffffff; t4 = _mm_cvtsi128_si32(H4) + c; c = (t4 >> 26); t4 &= 0x3ffffff; t0 = t0 + (c * 5); c = (t0 >> 26); t0 &= 0x3ffffff; t1 = t1 + c; st->HH[0] = ((t0) | (t1 << 26)) & UINT64_C(0xfffffffffff); st->HH[1] = ((t1 >> 18) | (t2 << 8) | (t3 << 34)) & UINT64_C(0xfffffffffff); st->HH[2] = ((t3 >> 10) | (t4 << 16)) & UINT64_C(0x3ffffffffff); return consumed; } void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, size_t bytes) { poly1305_state_internal *st = poly1305_aligned_state(state); size_t want; // Work around a C language bug. See https://crbug.com/1019588. if (bytes == 0) { return; } // need at least 32 initial bytes to start the accelerated branch if (!st->started) { if ((st->leftover == 0) && (bytes > 32)) { poly1305_first_block(st, m); m += 32; bytes -= 32; } else { want = poly1305_min(32 - st->leftover, bytes); OPENSSL_memcpy(st->buffer + st->leftover, m, want); bytes -= want; m += want; st->leftover += want; if ((st->leftover < 32) || (bytes == 0)) { return; } poly1305_first_block(st, st->buffer); st->leftover = 0; } st->started = 1; } // handle leftover if (st->leftover) { want = poly1305_min(64 - st->leftover, bytes); OPENSSL_memcpy(st->buffer + st->leftover, m, want); bytes -= want; m += want; st->leftover += want; if (st->leftover < 64) { return; } poly1305_blocks(st, st->buffer, 64); st->leftover = 0; } // process 64 byte blocks if (bytes >= 64) { want = (bytes & ~63); poly1305_blocks(st, m, want); m += want; bytes -= want; } if (bytes) { OPENSSL_memcpy(st->buffer + st->leftover, m, bytes); st->leftover += bytes; } } void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { poly1305_state_internal *st = poly1305_aligned_state(state); size_t leftover = st->leftover; uint8_t *m = st->buffer; uint128_t d[3]; uint64_t h0, h1, h2; uint64_t t0, t1; uint64_t g0, g1, g2, c, nc; uint64_t r0, r1, r2, s1, s2; poly1305_power *p; if (st->started) { size_t consumed = poly1305_combine(st, m, leftover); leftover -= consumed; m += consumed; } // st->HH will either be 0 or have the combined result h0 = st->HH[0]; h1 = st->HH[1]; h2 = st->HH[2]; p = &st->P[1]; r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; s1 = r1 * (5 << 2); s2 = r2 * (5 << 2); if (leftover < 16) { goto poly1305_donna_atmost15bytes; } poly1305_donna_atleast16bytes: t0 = CRYPTO_load_u64_le(m + 0); t1 = CRYPTO_load_u64_le(m + 8); h0 += t0 & 0xfffffffffff; t0 = shr128_pair(t1, t0, 44); h1 += t0 & 0xfffffffffff; h2 += (t1 >> 24) | ((uint64_t)1 << 40); poly1305_donna_mul: d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)), mul64x64_128(h2, s1)); d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)), mul64x64_128(h2, s2)); d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)), mul64x64_128(h2, r0)); h0 = lo128(d[0]) & 0xfffffffffff; c = shr128(d[0], 44); d[1] = add128_64(d[1], c); h1 = lo128(d[1]) & 0xfffffffffff; c = shr128(d[1], 44); d[2] = add128_64(d[2], c); h2 = lo128(d[2]) & 0x3ffffffffff; c = shr128(d[2], 42); h0 += c * 5; m += 16; leftover -= 16; if (leftover >= 16) { goto poly1305_donna_atleast16bytes; } // final bytes poly1305_donna_atmost15bytes: if (!leftover) { goto poly1305_donna_finish; } m[leftover++] = 1; OPENSSL_memset(m + leftover, 0, 16 - leftover); leftover = 16; t0 = CRYPTO_load_u64_le(m + 0); t1 = CRYPTO_load_u64_le(m + 8); h0 += t0 & 0xfffffffffff; t0 = shr128_pair(t1, t0, 44); h1 += t0 & 0xfffffffffff; h2 += (t1 >> 24); goto poly1305_donna_mul; poly1305_donna_finish: c = (h0 >> 44); h0 &= 0xfffffffffff; h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff; h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff; h0 += c * 5; g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff; g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff; g2 = h2 + c - ((uint64_t)1 << 42); c = (g2 >> 63) - 1; nc = ~c; h0 = (h0 & nc) | (g0 & c); h1 = (h1 & nc) | (g1 & c); h2 = (h2 & nc) | (g2 & c); // pad t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; h0 += (t0 & 0xfffffffffff); c = (h0 >> 44); h0 &= 0xfffffffffff; t0 = shr128_pair(t1, t0, 44); h1 += (t0 & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff; t1 = (t1 >> 24); h2 += (t1) + c; CRYPTO_store_u64_le(mac + 0, ((h0) | (h1 << 44))); CRYPTO_store_u64_le(mac + 8, ((h1 >> 20) | (h2 << 24))); } #endif // BORINGSSL_HAS_UINT128 && OPENSSL_X86_64 ================================================ FILE: Sources/CNIOBoringSSL/crypto/pool/internal.h ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_POOL_INTERNAL_H #define OPENSSL_HEADER_POOL_INTERNAL_H #include #include #include "../internal.h" #include "../lhash/internal.h" #if defined(__cplusplus) extern "C" { #endif DEFINE_LHASH_OF(CRYPTO_BUFFER) struct crypto_buffer_st { CRYPTO_BUFFER_POOL *pool; uint8_t *data; size_t len; CRYPTO_refcount_t references; int data_is_static; }; struct crypto_buffer_pool_st { LHASH_OF(CRYPTO_BUFFER) *bufs; CRYPTO_MUTEX lock; const uint64_t hash_key[2]; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_POOL_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/pool/pool.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static uint32_t CRYPTO_BUFFER_hash(const CRYPTO_BUFFER *buf) { return (uint32_t)SIPHASH_24(buf->pool->hash_key, buf->data, buf->len); } static int CRYPTO_BUFFER_cmp(const CRYPTO_BUFFER *a, const CRYPTO_BUFFER *b) { // Only |CRYPTO_BUFFER|s from the same pool have compatible hashes. assert(a->pool != NULL); assert(a->pool == b->pool); if (a->len != b->len) { return 1; } return OPENSSL_memcmp(a->data, b->data, a->len); } CRYPTO_BUFFER_POOL *CRYPTO_BUFFER_POOL_new(void) { CRYPTO_BUFFER_POOL *pool = reinterpret_cast( OPENSSL_zalloc(sizeof(CRYPTO_BUFFER_POOL))); if (pool == NULL) { return NULL; } pool->bufs = lh_CRYPTO_BUFFER_new(CRYPTO_BUFFER_hash, CRYPTO_BUFFER_cmp); if (pool->bufs == NULL) { OPENSSL_free(pool); return NULL; } CRYPTO_MUTEX_init(&pool->lock); RAND_bytes((uint8_t *)&pool->hash_key, sizeof(pool->hash_key)); return pool; } void CRYPTO_BUFFER_POOL_free(CRYPTO_BUFFER_POOL *pool) { if (pool == NULL) { return; } #if !defined(NDEBUG) CRYPTO_MUTEX_lock_write(&pool->lock); assert(lh_CRYPTO_BUFFER_num_items(pool->bufs) == 0); CRYPTO_MUTEX_unlock_write(&pool->lock); #endif lh_CRYPTO_BUFFER_free(pool->bufs); CRYPTO_MUTEX_cleanup(&pool->lock); OPENSSL_free(pool); } static void crypto_buffer_free_object(CRYPTO_BUFFER *buf) { if (!buf->data_is_static) { OPENSSL_free(buf->data); } OPENSSL_free(buf); } static CRYPTO_BUFFER *crypto_buffer_new(const uint8_t *data, size_t len, int data_is_static, CRYPTO_BUFFER_POOL *pool) { if (pool != NULL) { CRYPTO_BUFFER tmp; tmp.data = (uint8_t *)data; tmp.len = len; tmp.pool = pool; CRYPTO_MUTEX_lock_read(&pool->lock); CRYPTO_BUFFER *duplicate = lh_CRYPTO_BUFFER_retrieve(pool->bufs, &tmp); if (data_is_static && duplicate != NULL && !duplicate->data_is_static) { // If the new |CRYPTO_BUFFER| would have static data, but the duplicate // does not, we replace the old one with the new static version. duplicate = NULL; } if (duplicate != NULL) { CRYPTO_refcount_inc(&duplicate->references); } CRYPTO_MUTEX_unlock_read(&pool->lock); if (duplicate != NULL) { return duplicate; } } CRYPTO_BUFFER *const buf = reinterpret_cast(OPENSSL_zalloc(sizeof(CRYPTO_BUFFER))); if (buf == NULL) { return NULL; } if (data_is_static) { buf->data = (uint8_t *)data; buf->data_is_static = 1; } else { buf->data = reinterpret_cast(OPENSSL_memdup(data, len)); if (len != 0 && buf->data == NULL) { OPENSSL_free(buf); return NULL; } } buf->len = len; buf->references = 1; if (pool == NULL) { return buf; } buf->pool = pool; CRYPTO_MUTEX_lock_write(&pool->lock); CRYPTO_BUFFER *duplicate = lh_CRYPTO_BUFFER_retrieve(pool->bufs, buf); if (data_is_static && duplicate != NULL && !duplicate->data_is_static) { // If the new |CRYPTO_BUFFER| would have static data, but the duplicate does // not, we replace the old one with the new static version. duplicate = NULL; } int inserted = 0; if (duplicate == NULL) { CRYPTO_BUFFER *old = NULL; inserted = lh_CRYPTO_BUFFER_insert(pool->bufs, &old, buf); // |old| may be non-NULL if a match was found but ignored. |pool->bufs| does // not increment refcounts, so there is no need to clean up after the // replacement. } else { CRYPTO_refcount_inc(&duplicate->references); } CRYPTO_MUTEX_unlock_write(&pool->lock); if (!inserted) { // We raced to insert |buf| into the pool and lost, or else there was an // error inserting. crypto_buffer_free_object(buf); return duplicate; } return buf; } CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool) { return crypto_buffer_new(data, len, /*data_is_static=*/0, pool); } CRYPTO_BUFFER *CRYPTO_BUFFER_alloc(uint8_t **out_data, size_t len) { CRYPTO_BUFFER *const buf = reinterpret_cast(OPENSSL_zalloc(sizeof(CRYPTO_BUFFER))); if (buf == NULL) { return NULL; } buf->data = reinterpret_cast(OPENSSL_malloc(len)); if (len != 0 && buf->data == NULL) { OPENSSL_free(buf); return NULL; } buf->len = len; buf->references = 1; *out_data = buf->data; return buf; } CRYPTO_BUFFER *CRYPTO_BUFFER_new_from_CBS(const CBS *cbs, CRYPTO_BUFFER_POOL *pool) { return CRYPTO_BUFFER_new(CBS_data(cbs), CBS_len(cbs), pool); } CRYPTO_BUFFER *CRYPTO_BUFFER_new_from_static_data_unsafe( const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool) { return crypto_buffer_new(data, len, /*data_is_static=*/1, pool); } void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { if (buf == NULL) { return; } CRYPTO_BUFFER_POOL *const pool = buf->pool; if (pool == NULL) { if (CRYPTO_refcount_dec_and_test_zero(&buf->references)) { // If a reference count of zero is observed, there cannot be a reference // from any pool to this buffer and thus we are able to free this // buffer. crypto_buffer_free_object(buf); } return; } CRYPTO_MUTEX_lock_write(&pool->lock); if (!CRYPTO_refcount_dec_and_test_zero(&buf->references)) { CRYPTO_MUTEX_unlock_write(&buf->pool->lock); return; } // We have an exclusive lock on the pool, therefore no concurrent lookups can // find this buffer and increment the reference count. Thus, if the count is // zero there are and can never be any more references and thus we can free // this buffer. // // Note it is possible |buf| is no longer in the pool, if it was replaced by a // static version. If that static version was since removed, it is even // possible for |found| to be NULL. CRYPTO_BUFFER *found = lh_CRYPTO_BUFFER_retrieve(pool->bufs, buf); if (found == buf) { found = lh_CRYPTO_BUFFER_delete(pool->bufs, buf); assert(found == buf); (void)found; } CRYPTO_MUTEX_unlock_write(&buf->pool->lock); crypto_buffer_free_object(buf); } int CRYPTO_BUFFER_up_ref(CRYPTO_BUFFER *buf) { // This is safe in the case that |buf->pool| is NULL because it's just // standard reference counting in that case. // // This is also safe if |buf->pool| is non-NULL because, if it were racing // with |CRYPTO_BUFFER_free| then the two callers must have independent // references already and so the reference count will never hit zero. CRYPTO_refcount_inc(&buf->references); return 1; } const uint8_t *CRYPTO_BUFFER_data(const CRYPTO_BUFFER *buf) { return buf->data; } size_t CRYPTO_BUFFER_len(const CRYPTO_BUFFER *buf) { return buf->len; } void CRYPTO_BUFFER_init_CBS(const CRYPTO_BUFFER *buf, CBS *out) { CBS_init(out, buf->data, buf->len); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/deterministic.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../bcm_support.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_DETERMINISTIC) #include #include #include "../internal.h" // g_num_calls is the number of calls to |CRYPTO_sysrand| that have occurred. // // This is intentionally not thread-safe. If the fuzzer mode is ever used in a // multi-threaded program, replace this with a thread-local. (A mutex would not // be deterministic.) static uint64_t g_num_calls = 0; static CRYPTO_MUTEX g_num_calls_lock = CRYPTO_MUTEX_INIT; void RAND_reset_for_fuzzing(void) { g_num_calls = 0; } void CRYPTO_init_sysrand(void) {} void CRYPTO_sysrand(uint8_t *out, size_t requested) { static const uint8_t kZeroKey[32] = {0}; CRYPTO_MUTEX_lock_write(&g_num_calls_lock); uint64_t num_calls = g_num_calls++; CRYPTO_MUTEX_unlock_write(&g_num_calls_lock); uint8_t nonce[12]; OPENSSL_memset(nonce, 0, sizeof(nonce)); OPENSSL_memcpy(nonce, &num_calls, sizeof(num_calls)); OPENSSL_memset(out, 0, requested); CRYPTO_chacha_20(out, out, requested, kZeroKey, nonce, 0); } int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); return 1; } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { CRYPTO_sysrand(out, requested); } #endif // OPENSSL_RAND_DETERMINISTIC ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/fork_detect.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_GNU_SOURCE) #define _GNU_SOURCE // needed for madvise() and MAP_ANONYMOUS on Linux. #endif #include "../bcm_support.h" #if defined(OPENSSL_FORK_DETECTION_MADVISE) #include #include #include #include #if defined(MADV_WIPEONFORK) static_assert(MADV_WIPEONFORK == 18, "MADV_WIPEONFORK is not 18"); #else #define MADV_WIPEONFORK 18 #endif #elif defined(OPENSSL_FORK_DETECTION_PTHREAD_ATFORK) #include #include #include #endif // OPENSSL_FORK_DETECTION_PTHREAD_ATFORK #include "../internal.h" #if defined(OPENSSL_FORK_DETECTION_MADVISE) static int g_force_madv_wipeonfork; static int g_force_madv_wipeonfork_enabled; static CRYPTO_once_t g_fork_detect_once = CRYPTO_ONCE_INIT; static CRYPTO_MUTEX g_fork_detect_lock = CRYPTO_MUTEX_INIT; static CRYPTO_atomic_u32 *g_fork_detect_addr; static uint64_t g_fork_generation; static void init_fork_detect(void) { if (g_force_madv_wipeonfork) { return; } long page_size = sysconf(_SC_PAGESIZE); if (page_size <= 0) { return; } void *addr = mmap(NULL, (size_t)page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { return; } // Some versions of qemu (up to at least 5.0.0-rc4, see linux-user/syscall.c) // ignore |madvise| calls and just return zero (i.e. success). But we need to // know whether MADV_WIPEONFORK actually took effect. Therefore try an invalid // call to check that the implementation of |madvise| is actually rejecting // unknown |advice| values. if (madvise(addr, (size_t)page_size, -1) == 0 || madvise(addr, (size_t)page_size, MADV_WIPEONFORK) != 0) { munmap(addr, (size_t)page_size); return; } CRYPTO_atomic_u32 *const atomic = reinterpret_cast(addr); CRYPTO_atomic_store_u32(atomic, 1); g_fork_detect_addr = atomic; g_fork_generation = 1; } uint64_t CRYPTO_get_fork_generation(void) { CRYPTO_once(&g_fork_detect_once, init_fork_detect); // In a single-threaded process, there are obviously no races because there's // only a single mutator in the address space. // // In a multi-threaded environment, |CRYPTO_once| ensures that the flag byte // is initialised atomically, even if multiple threads enter this function // concurrently. // // Additionally, while the kernel will only clear WIPEONFORK at a point when a // child process is single-threaded, the child may become multi-threaded // before it observes this. Therefore, we must synchronize the logic below. CRYPTO_atomic_u32 *const flag_ptr = g_fork_detect_addr; if (flag_ptr == NULL) { // Our kernel is too old to support |MADV_WIPEONFORK| or // |g_force_madv_wipeonfork| is set. if (g_force_madv_wipeonfork && g_force_madv_wipeonfork_enabled) { // A constant generation number to simulate support, even if the kernel // doesn't support it. return 42; } // With Linux and clone(), we do not believe that pthread_atfork() is // sufficient for detecting all forms of address space duplication. At this // point we have a kernel that does not support MADV_WIPEONFORK. We could // return the generation number from pthread_atfork() here and it would // probably be safe in almost any situation, but to ensure safety we return // 0 and force an entropy draw on every call. return 0; } // In the common case, try to observe the flag without taking a lock. This // avoids cacheline contention in the PRNG. uint64_t *const generation_ptr = &g_fork_generation; if (CRYPTO_atomic_load_u32(flag_ptr) != 0) { // If we observe a non-zero flag, it is safe to read |generation_ptr| // without a lock. The flag and generation number are fixed for this copy of // the address space. return *generation_ptr; } // The flag was zero. The generation number must be incremented, but other // threads may have concurrently observed the zero, so take a lock before // incrementing. CRYPTO_MUTEX *const lock = &g_fork_detect_lock; CRYPTO_MUTEX_lock_write(lock); uint64_t current_generation = *generation_ptr; if (CRYPTO_atomic_load_u32(flag_ptr) == 0) { // A fork has occurred. current_generation++; if (current_generation == 0) { // Zero means fork detection isn't supported, so skip that value. current_generation = 1; } // We must update |generation_ptr| before |flag_ptr|. Other threads may // observe |flag_ptr| without taking a lock. *generation_ptr = current_generation; CRYPTO_atomic_store_u32(flag_ptr, 1); } CRYPTO_MUTEX_unlock_write(lock); return current_generation; } void CRYPTO_fork_detect_force_madv_wipeonfork_for_testing(int on) { g_force_madv_wipeonfork = 1; g_force_madv_wipeonfork_enabled = on; } #elif defined(OPENSSL_FORK_DETECTION_PTHREAD_ATFORK) static CRYPTO_once_t g_pthread_fork_detection_once = CRYPTO_ONCE_INIT; static uint64_t g_atfork_fork_generation; static void we_are_forked(void) { // Immediately after a fork, the process must be single-threaded. uint64_t value = g_atfork_fork_generation + 1; if (value == 0) { value = 1; } g_atfork_fork_generation = value; } static void init_pthread_fork_detection(void) { if (pthread_atfork(NULL, NULL, we_are_forked) != 0) { abort(); } g_atfork_fork_generation = 1; } uint64_t CRYPTO_get_fork_generation(void) { CRYPTO_once(&g_pthread_fork_detection_once, init_pthread_fork_detection); return g_atfork_fork_generation; } #elif defined(OPENSSL_DOES_NOT_FORK) // These platforms are guaranteed not to fork, and therefore do not require // fork detection support. Returning a constant non zero value makes BoringSSL // assume address space duplication is not a concern and adding entropy to // every RAND_bytes call is not needed. uint64_t CRYPTO_get_fork_generation(void) { return 0xc0ffee; } #else // These platforms may fork, but we do not have a mitigation mechanism in // place. Returning a constant zero value makes BoringSSL assume that address // space duplication could have occured on any call entropy must be added to // every RAND_bytes call. uint64_t CRYPTO_get_fork_generation(void) { return 0; } #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/forkunsafe.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../fipsmodule/rand/internal.h" #include "../internal.h" // g_buffering_enabled is one if fork-unsafe buffering has been enabled and zero // otherwise. static CRYPTO_atomic_u32 g_buffering_enabled; #if !defined(OPENSSL_WINDOWS) void RAND_enable_fork_unsafe_buffering(int fd) { // We no longer support setting the file-descriptor with this function. if (fd != -1) { abort(); } CRYPTO_atomic_store_u32(&g_buffering_enabled, 1); } void RAND_disable_fork_unsafe_buffering(void) { CRYPTO_atomic_store_u32(&g_buffering_enabled, 0); } #endif int rand_fork_unsafe_buffering_enabled(void) { return CRYPTO_atomic_load_u32(&g_buffering_enabled) != 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/getentropy.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_DEFAULT_SOURCE) #define _DEFAULT_SOURCE // Needed for getentropy on musl and glibc #endif #include #include "../bcm_support.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_GETENTROPY) #include #include #include #if defined(OPENSSL_MACOS) || defined(OPENSSL_FUCHSIA) #include #endif void CRYPTO_init_sysrand(void) {} // CRYPTO_sysrand puts |requested| random bytes into |out|. void CRYPTO_sysrand(uint8_t *out, size_t requested) { while (requested > 0) { // |getentropy| can only request 256 bytes at a time. size_t todo = requested <= 256 ? requested : 256; if (getentropy(out, todo) != 0) { perror("getentropy() failed"); abort(); } out += todo; requested -= todo; } } int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); return 1; } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { CRYPTO_sysrand(out, requested); } #endif // OPENSSL_RAND_GETENTROPY ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/getrandom_fillin.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_RAND_GETRANDOM_FILLIN_H #define OPENSSL_HEADER_CRYPTO_RAND_GETRANDOM_FILLIN_H #include #if defined(OPENSSL_LINUX) #include #if defined(OPENSSL_X86_64) #define EXPECTED_NR_getrandom 318 #elif defined(OPENSSL_X86) #define EXPECTED_NR_getrandom 355 #elif defined(OPENSSL_AARCH64) #define EXPECTED_NR_getrandom 278 #elif defined(OPENSSL_ARM) #define EXPECTED_NR_getrandom 384 #elif defined(OPENSSL_RISCV64) #define EXPECTED_NR_getrandom 278 #endif #if defined(EXPECTED_NR_getrandom) #define USE_NR_getrandom #if defined(__NR_getrandom) #if __NR_getrandom != EXPECTED_NR_getrandom #error "system call number for getrandom is not the expected value" #endif #else // __NR_getrandom #define __NR_getrandom EXPECTED_NR_getrandom #endif // __NR_getrandom #endif // EXPECTED_NR_getrandom #if !defined(GRND_NONBLOCK) #define GRND_NONBLOCK 1 #endif #if !defined(GRND_RANDOM) #define GRND_RANDOM 2 #endif #endif // OPENSSL_LINUX #endif // OPENSSL_HEADER_CRYPTO_RAND_GETRANDOM_FILLIN_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/ios.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../bcm_support.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_IOS) #include #include void CRYPTO_init_sysrand(void) {} void CRYPTO_sysrand(uint8_t *out, size_t requested) { if (CCRandomGenerateBytes(out, requested) != kCCSuccess) { abort(); } } int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); return 1; } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { CRYPTO_sysrand(out, requested); } #endif // OPENSSL_RAND_IOS ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/passive.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../bcm_support.h" #include "../fipsmodule/bcm_interface.h" #include "../internal.h" #if defined(BORINGSSL_FIPS) #include // passive_get_seed_entropy writes |out_entropy_len| bytes of entropy, suitable // for seeding a DRBG, to |out_entropy|. It sets |*out_used_cpu| to one if the // entropy came directly from the CPU and zero if it came from the OS. It // actively obtains entropy from the CPU/OS static void passive_get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len, int *out_want_additional_input) { *out_want_additional_input = 0; if (bcm_success(BCM_rand_bytes_hwrng(out_entropy, out_entropy_len))) { *out_want_additional_input = 1; } else { CRYPTO_sysrand_for_seed(out_entropy, out_entropy_len); } } #define ENTROPY_READ_LEN \ (/* last_block size */ 16 + CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD) #if defined(OPENSSL_ANDROID) #include #include #include #include #include // socket_history_t enumerates whether the entropy daemon should be contacted // for a given entropy request. Values other than socket_not_yet_attempted are // sticky so if the first attempt to read from the daemon fails it's assumed // that the daemon is not present and no more attempts will be made. If the // first attempt is successful then attempts will be made forever more. enum class socket_history_t { // initial value, no connections to the entropy daemon have been made yet. socket_not_yet_attempted = 0, // reading from the entropy daemon was successful socket_success, // reading from the entropy daemon failed. socket_failed, }; static std::atomic g_socket_history{ socket_history_t::socket_not_yet_attempted}; // DAEMON_RESPONSE_LEN is the number of bytes that the entropy daemon replies // with. #define DAEMON_RESPONSE_LEN 496 static_assert(ENTROPY_READ_LEN == DAEMON_RESPONSE_LEN, "entropy daemon response length mismatch"); static int get_seed_from_daemon(uint8_t *out_entropy, size_t out_entropy_len) { // |RAND_need_entropy| should never call this function for more than // |DAEMON_RESPONSE_LEN| bytes. if (out_entropy_len > DAEMON_RESPONSE_LEN) { abort(); } const socket_history_t socket_history = g_socket_history.load(std::memory_order_acquire); if (socket_history == socket_history_t::socket_failed) { return 0; } int ret = 0; static const char kSocketPath[] = "/dev/socket/prng_seeder"; struct sockaddr_un sun; uint8_t buffer[DAEMON_RESPONSE_LEN]; size_t done = 0; const int sock = socket(AF_UNIX, SOCK_STREAM, 0); if (sock < 0) { goto out; } memset(&sun, 0, sizeof(sun)); sun.sun_family = AF_UNIX; static_assert(sizeof(kSocketPath) <= UNIX_PATH_MAX, "kSocketPath too long"); OPENSSL_memcpy(sun.sun_path, kSocketPath, sizeof(kSocketPath)); if (connect(sock, (struct sockaddr *)&sun, sizeof(sun))) { goto out; } while (done < sizeof(buffer)) { ssize_t n; do { n = read(sock, buffer + done, sizeof(buffer) - done); } while (n == -1 && errno == EINTR); if (n < 1) { goto out; } done += n; } if (done != DAEMON_RESPONSE_LEN) { // The daemon should always write |DAEMON_RESPONSE_LEN| bytes on every // connection. goto out; } assert(out_entropy_len <= DAEMON_RESPONSE_LEN); OPENSSL_memcpy(out_entropy, buffer, out_entropy_len); ret = 1; out: if (socket_history == socket_history_t::socket_not_yet_attempted) { socket_history_t expected = socket_history_t::socket_not_yet_attempted; // If another thread has already updated |g_socket_history| then we defer // to their value. g_socket_history.compare_exchange_strong( expected, (ret == 0) ? socket_history_t::socket_failed : socket_history_t::socket_success, std::memory_order_release, std::memory_order_relaxed); } close(sock); return ret; } #else static int get_seed_from_daemon(uint8_t *out_entropy, size_t out_entropy_len) { return 0; } #endif // OPENSSL_ANDROID // RAND_need_entropy is called by the FIPS module when it has blocked because of // a lack of entropy. This signal is used as an indication to feed it more. void RAND_need_entropy(size_t bytes_needed) { uint8_t buf[ENTROPY_READ_LEN]; size_t todo = sizeof(buf); if (todo > bytes_needed) { todo = bytes_needed; } int want_additional_input; if (get_seed_from_daemon(buf, todo)) { want_additional_input = 1; } else { passive_get_seed_entropy(buf, todo, &want_additional_input); } if (boringssl_fips_break_test("CRNG")) { // This breaks the "continuous random number generator test" defined in FIPS // 140-2, section 4.9.2, and implemented in |rand_get_seed|. OPENSSL_memset(buf, 0, todo); } BCM_rand_load_entropy(buf, todo, want_additional_input); } #endif // FIPS ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/rand.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../bcm_support.h" #include "../fipsmodule/bcm_interface.h" int RAND_bytes(uint8_t *buf, size_t len) { BCM_rand_bytes(buf, len); return 1; } int RAND_pseudo_bytes(uint8_t *buf, size_t len) { return RAND_bytes(buf, len); } void RAND_seed(const void *buf, int num) { // OpenSSH calls |RAND_seed| before jailing on the assumption that any needed // file descriptors etc will be opened. uint8_t unused; RAND_bytes(&unused, sizeof(unused)); } int RAND_load_file(const char *path, long num) { if (num < 0) { // read the "whole file" return 1; } else if (num <= INT_MAX) { return (int)num; } else { return INT_MAX; } } const char *RAND_file_name(char *buf, size_t num) { return NULL; } void RAND_add(const void *buf, int num, double entropy) {} int RAND_egd(const char *path) { return 255; } int RAND_poll(void) { return 1; } int RAND_status(void) { return 1; } static const struct rand_meth_st kSSLeayMethod = { RAND_seed, RAND_bytes, RAND_cleanup, RAND_add, RAND_pseudo_bytes, RAND_status, }; RAND_METHOD *RAND_SSLeay(void) { return (RAND_METHOD *)&kSSLeayMethod; } RAND_METHOD *RAND_OpenSSL(void) { return RAND_SSLeay(); } const RAND_METHOD *RAND_get_rand_method(void) { return RAND_SSLeay(); } int RAND_set_rand_method(const RAND_METHOD *method) { return 1; } void RAND_cleanup(void) {} void RAND_get_system_entropy_for_custom_prng(uint8_t *buf, size_t len) { if (len > 256) { abort(); } CRYPTO_sysrand_for_seed(buf, len); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/sysrand_internal.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_SYSRAND_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_SYSRAND_INTERNAL_H #include #if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) #define OPENSSL_RAND_DETERMINISTIC #elif defined(OPENSSL_TRUSTY) #define OPENSSL_RAND_TRUSTY #elif defined(OPENSSL_WINDOWS) #define OPENSSL_RAND_WINDOWS #elif defined(OPENSSL_LINUX) #define OPENSSL_RAND_URANDOM #elif defined(OPENSSL_APPLE) && !defined(OPENSSL_MACOS) // Unlike macOS, iOS and similar hide away getentropy(). #define OPENSSL_RAND_IOS #else // By default if you are integrating BoringSSL we expect you to // provide getentropy from the header file. #define OPENSSL_RAND_GETENTROPY #endif #endif // OPENSSL_HEADER_CRYPTO__SYSRAND_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/trusty.cc ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../bcm_support.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_TRUSTY) #include #include #include #include #include void CRYPTO_init_sysrand(void) {} void CRYPTO_sysrand(uint8_t *out, size_t requested) { if (trusty_rng_hw_rand(out, requested) != NO_ERROR) { abort(); } } int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); return 1; } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { CRYPTO_sysrand(out, requested); } #endif // OPENSSL_RAND_TRUSTY ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/urandom.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_GNU_SOURCE) #define _GNU_SOURCE // needed for syscall() on Linux. #endif #include #include "../bcm_support.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_URANDOM) #include #include #include #include #include #include #if defined(OPENSSL_LINUX) #if defined(BORINGSSL_FIPS) #include #include #endif #include #if defined(OPENSSL_ANDROID) #include #endif #if !defined(OPENSSL_ANDROID) #define OPENSSL_HAS_GETAUXVAL #endif // glibc prior to 2.16 does not have getauxval and sys/auxv.h. Android has some // host builds (i.e. not building for Android itself, so |OPENSSL_ANDROID| is // unset) which are still using a 2.15 sysroot. // // TODO(davidben): Remove this once Android updates their sysroot. #if defined(__GLIBC_PREREQ) #if !__GLIBC_PREREQ(2, 16) #undef OPENSSL_HAS_GETAUXVAL #endif #endif #if defined(OPENSSL_HAS_GETAUXVAL) #include #endif #endif // OPENSSL_LINUX #include #include #include "../internal.h" #include "getrandom_fillin.h" #if defined(USE_NR_getrandom) #if defined(OPENSSL_MSAN) extern "C" { void __msan_unpoison(void *, size_t); } #endif static ssize_t boringssl_getrandom(void *buf, size_t buf_len, unsigned flags) { ssize_t ret; do { ret = syscall(__NR_getrandom, buf, buf_len, flags); } while (ret == -1 && errno == EINTR); #if defined(OPENSSL_MSAN) if (ret > 0) { // MSAN doesn't recognise |syscall| and thus doesn't notice that we have // initialised the output buffer. __msan_unpoison(buf, ret); } #endif // OPENSSL_MSAN return ret; } #endif // USE_NR_getrandom // kHaveGetrandom in |urandom_fd| signals that |getrandom| or |getentropy| is // available and should be used instead. static const int kHaveGetrandom = -3; // urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. static int urandom_fd; #if defined(USE_NR_getrandom) // getrandom_ready is one if |getrandom| had been initialized by the time // |init_once| was called and zero otherwise. static int getrandom_ready; // extra_getrandom_flags_for_seed contains a value that is ORed into the flags // for getrandom() when reading entropy for a seed. static int extra_getrandom_flags_for_seed; // On Android, check a system property to decide whether to set // |extra_getrandom_flags_for_seed| otherwise they will default to zero. If // ro.oem_boringcrypto_hwrand is true then |extra_getrandom_flags_for_seed| will // be set to GRND_RANDOM, causing all random data to be drawn from the same // source as /dev/random. static void maybe_set_extra_getrandom_flags(void) { #if defined(BORINGSSL_FIPS) && defined(OPENSSL_ANDROID) char value[PROP_VALUE_MAX + 1]; int length = __system_property_get("ro.boringcrypto.hwrand", value); if (length < 0 || length > PROP_VALUE_MAX) { return; } value[length] = 0; if (OPENSSL_strcasecmp(value, "true") == 0) { extra_getrandom_flags_for_seed = GRND_RANDOM; } #endif } #endif // USE_NR_getrandom static CRYPTO_once_t rand_once = CRYPTO_ONCE_INIT; // init_once initializes the state of this module to values previously // requested. This is the only function that modifies |urandom_fd|, which may be // read safely after calling the once. static void init_once(void) { #if defined(USE_NR_getrandom) int have_getrandom; uint8_t dummy; ssize_t getrandom_ret = boringssl_getrandom(&dummy, sizeof(dummy), GRND_NONBLOCK); if (getrandom_ret == 1) { getrandom_ready = 1; have_getrandom = 1; } else if (getrandom_ret == -1 && errno == EAGAIN) { // We have getrandom, but the entropy pool has not been initialized yet. have_getrandom = 1; } else if (getrandom_ret == -1 && errno == ENOSYS) { // Fallthrough to using /dev/urandom, below. have_getrandom = 0; } else { // Other errors are fatal. perror("getrandom"); abort(); } if (have_getrandom) { urandom_fd = kHaveGetrandom; maybe_set_extra_getrandom_flags(); return; } #endif // USE_NR_getrandom // FIPS builds must support getrandom. // // Historically, only Android FIPS builds required getrandom, while Linux FIPS // builds had a /dev/urandom fallback which used RNDGETENTCNT as a poor // approximation for getrandom's blocking behavior. This is now removed, but // avoid making assumptions on this removal until March 2023, in case it needs // to be restored. This comment can be deleted after March 2023. #if defined(BORINGSSL_FIPS) perror("getrandom not found"); abort(); #endif int fd; do { fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC); } while (fd == -1 && errno == EINTR); if (fd < 0) { perror("failed to open /dev/urandom"); abort(); } urandom_fd = fd; } static CRYPTO_once_t wait_for_entropy_once = CRYPTO_ONCE_INIT; static void wait_for_entropy(void) { int fd = urandom_fd; if (fd == kHaveGetrandom) { // |getrandom| and |getentropy| support blocking in |fill_with_entropy| // directly. For |getrandom|, we first probe with a non-blocking call to aid // debugging. #if defined(USE_NR_getrandom) if (getrandom_ready) { // The entropy pool was already initialized in |init_once|. return; } uint8_t dummy; ssize_t getrandom_ret = boringssl_getrandom(&dummy, sizeof(dummy), GRND_NONBLOCK); if (getrandom_ret == -1 && errno == EAGAIN) { // Attempt to get the path of the current process to aid in debugging when // something blocks. const char *current_process = ""; #if defined(OPENSSL_HAS_GETAUXVAL) const unsigned long getauxval_ret = getauxval(AT_EXECFN); if (getauxval_ret != 0) { current_process = (const char *)getauxval_ret; } #endif fprintf( stderr, "%s: getrandom indicates that the entropy pool has not been " "initialized. Rather than continue with poor entropy, this process " "will block until entropy is available.\n", current_process); getrandom_ret = boringssl_getrandom(&dummy, sizeof(dummy), 0 /* no flags */); } if (getrandom_ret != 1) { perror("getrandom"); abort(); } #endif // USE_NR_getrandom return; } } // fill_with_entropy writes |len| bytes of entropy into |out|. It returns one // on success and zero on error. If |block| is one, this function will block // until the entropy pool is initialized. Otherwise, this function may fail, // setting |errno| to |EAGAIN| if the entropy pool has not yet been initialized. // If |seed| is one, this function will OR in the value of // |*extra_getrandom_flags_for_seed()| when using |getrandom|. static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) { if (len == 0) { return 1; } #if defined(USE_NR_getrandom) || defined(FREEBSD_GETRANDOM) int getrandom_flags = 0; if (!block) { getrandom_flags |= GRND_NONBLOCK; } #endif #if defined(USE_NR_getrandom) if (seed) { getrandom_flags |= extra_getrandom_flags_for_seed; } #endif CRYPTO_init_sysrand(); if (block) { CRYPTO_once(&wait_for_entropy_once, wait_for_entropy); } // Clear |errno| so it has defined value if |read| or |getrandom| // "successfully" returns zero. errno = 0; while (len > 0) { ssize_t r; if (urandom_fd == kHaveGetrandom) { #if defined(USE_NR_getrandom) r = boringssl_getrandom(out, len, getrandom_flags); #else // USE_NR_getrandom fprintf(stderr, "urandom fd corrupt.\n"); abort(); #endif } else { do { r = read(urandom_fd, out, len); } while (r == -1 && errno == EINTR); } if (r <= 0) { return 0; } out += r; len -= r; } return 1; } void CRYPTO_init_sysrand(void) { CRYPTO_once(&rand_once, init_once); } // CRYPTO_sysrand puts |requested| random bytes into |out|. void CRYPTO_sysrand(uint8_t *out, size_t requested) { if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/0)) { perror("entropy fill failed"); abort(); } } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/1)) { perror("entropy fill failed"); abort(); } } int CRYPTO_sysrand_if_available(uint8_t *out, size_t requested) { if (fill_with_entropy(out, requested, /*block=*/0, /*seed=*/0)) { return 1; } else if (errno == EAGAIN) { OPENSSL_memset(out, 0, requested); return 0; } else { perror("opportunistic entropy fill failed"); abort(); } } #endif // OPENSSL_RAND_URANDOM ================================================ FILE: Sources/CNIOBoringSSL/crypto/rand/windows.cc ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include "../bcm_support.h" #include "../internal.h" #include "sysrand_internal.h" #if defined(OPENSSL_RAND_WINDOWS) #include #include OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) #include OPENSSL_MSVC_PRAGMA(comment(lib, "bcrypt.lib")) #endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP OPENSSL_MSVC_PRAGMA(warning(pop)) #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) void CRYPTO_init_sysrand(void) {} void CRYPTO_sysrand(uint8_t *out, size_t requested) { while (requested > 0) { ULONG output_bytes_this_pass = ULONG_MAX; if (requested < output_bytes_this_pass) { output_bytes_this_pass = (ULONG)requested; } if (!BCRYPT_SUCCESS(BCryptGenRandom( /*hAlgorithm=*/NULL, out, output_bytes_this_pass, BCRYPT_USE_SYSTEM_PREFERRED_RNG))) { abort(); } requested -= output_bytes_this_pass; out += output_bytes_this_pass; } } #else // See: https://learn.microsoft.com/en-us/windows/win32/seccng/processprng typedef BOOL (WINAPI *ProcessPrngFunction)(PBYTE pbData, SIZE_T cbData); static ProcessPrngFunction g_processprng_fn = NULL; static void init_processprng(void) { HMODULE hmod = LoadLibraryW(L"bcryptprimitives"); if (hmod == NULL) { abort(); } g_processprng_fn = (ProcessPrngFunction)GetProcAddress(hmod, "ProcessPrng"); if (g_processprng_fn == NULL) { abort(); } } void CRYPTO_init_sysrand(void) { static CRYPTO_once_t once = CRYPTO_ONCE_INIT; CRYPTO_once(&once, init_processprng); } void CRYPTO_sysrand(uint8_t *out, size_t requested) { CRYPTO_init_sysrand(); // On non-UWP configurations, use ProcessPrng instead of BCryptGenRandom // to avoid accessing resources that may be unavailable inside the // Chromium sandbox. See https://crbug.com/74242 if (!g_processprng_fn(out, requested)) { abort(); } } #endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); return 1; } void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { CRYPTO_sysrand(out, requested); } #endif // OPENSSL_RAND_WINDOWS ================================================ FILE: Sources/CNIOBoringSSL/crypto/rc4/rc4.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include void RC4(RC4_KEY *key, size_t len, const uint8_t *in, uint8_t *out) { uint32_t x = key->x; uint32_t y = key->y; uint32_t *d = key->data; for (size_t i = 0; i < len; i++) { x = (x + 1) & 0xff; uint32_t tx = d[x]; y = (tx + y) & 0xff; uint32_t ty = d[y]; d[x] = ty; d[y] = tx; out[i] = d[(tx + ty) & 0xff] ^ in[i]; } key->x = x; key->y = y; } void RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key) { uint32_t *d = &rc4key->data[0]; rc4key->x = 0; rc4key->y = 0; for (unsigned i = 0; i < 256; i++) { d[i] = i; } unsigned id1 = 0, id2 = 0; for (unsigned i = 0; i < 256; i++) { uint32_t tmp = d[i]; id2 = (key[id1] + tmp + id2) & 0xff; if (++id1 == len) { id1 = 0; } d[i] = d[id2]; d[id2] = tmp; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/refcount.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #include #include // See comment above the typedef of CRYPTO_refcount_t about these tests. static_assert(alignof(CRYPTO_refcount_t) == alignof(CRYPTO_atomic_u32), "CRYPTO_refcount_t does not match CRYPTO_atomic_u32 alignment"); static_assert(sizeof(CRYPTO_refcount_t) == sizeof(CRYPTO_atomic_u32), "CRYPTO_refcount_t does not match CRYPTO_atomic_u32 size"); static_assert((CRYPTO_refcount_t)-1 == CRYPTO_REFCOUNT_MAX, "CRYPTO_REFCOUNT_MAX is incorrect"); void CRYPTO_refcount_inc(CRYPTO_refcount_t *in_count) { CRYPTO_atomic_u32 *count = (CRYPTO_atomic_u32 *)in_count; uint32_t expected = CRYPTO_atomic_load_u32(count); while (expected != CRYPTO_REFCOUNT_MAX) { uint32_t new_value = expected + 1; if (CRYPTO_atomic_compare_exchange_weak_u32(count, &expected, new_value)) { break; } } } int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *in_count) { CRYPTO_atomic_u32 *count = (CRYPTO_atomic_u32 *)in_count; uint32_t expected = CRYPTO_atomic_load_u32(count); for (;;) { if (expected == 0) { abort(); } else if (expected == CRYPTO_REFCOUNT_MAX) { return 0; } else { const uint32_t new_value = expected - 1; if (CRYPTO_atomic_compare_exchange_weak_u32(count, &expected, new_value)) { return new_value == 0; } } } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rsa/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_RSA_EXTRA_INTERNAL_H #define OPENSSL_HEADER_RSA_EXTRA_INTERNAL_H #include #if defined(__cplusplus) extern "C" { #endif int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_RSA_EXTRA_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/rsa/rsa_asn1.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../fipsmodule/rsa/internal.h" #include "../bytestring/internal.h" #include "../internal.h" static int parse_integer(CBS *cbs, BIGNUM **out) { assert(*out == NULL); *out = BN_new(); if (*out == NULL) { return 0; } return BN_parse_asn1_unsigned(cbs, *out); } static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { // An RSA object may be missing some components. OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } return BN_marshal_asn1(cbb, bn); } RSA *RSA_parse_public_key(CBS *cbs) { RSA *ret = RSA_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !parse_integer(&child, &ret->n) || !parse_integer(&child, &ret->e) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); RSA_free(ret); return NULL; } if (!RSA_check_key(ret)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); RSA_free(ret); return NULL; } return ret; } RSA *RSA_public_key_from_bytes(const uint8_t *in, size_t in_len) { CBS cbs; CBS_init(&cbs, in, in_len); RSA *ret = RSA_parse_public_key(&cbs); if (ret == NULL || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); RSA_free(ret); return NULL; } return ret; } int RSA_marshal_public_key(CBB *cbb, const RSA *rsa) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !marshal_integer(&child, rsa->n) || !marshal_integer(&child, rsa->e) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); return 0; } return 1; } int RSA_public_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa) { CBB cbb; CBB_zero(&cbb); if (!CBB_init(&cbb, 0) || !RSA_marshal_public_key(&cbb, rsa) || !CBB_finish(&cbb, out_bytes, out_len)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); CBB_cleanup(&cbb); return 0; } return 1; } // kVersionTwoPrime is the value of the version field for a two-prime // RSAPrivateKey structure (RFC 3447). static const uint64_t kVersionTwoPrime = 0; RSA *RSA_parse_private_key(CBS *cbs) { RSA *ret = RSA_new(); if (ret == NULL) { return NULL; } CBS child; uint64_t version; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&child, &version)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); goto err; } if (version != kVersionTwoPrime) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_VERSION); goto err; } if (!parse_integer(&child, &ret->n) || !parse_integer(&child, &ret->e) || !parse_integer(&child, &ret->d) || !parse_integer(&child, &ret->p) || !parse_integer(&child, &ret->q) || !parse_integer(&child, &ret->dmp1) || !parse_integer(&child, &ret->dmq1) || !parse_integer(&child, &ret->iqmp)) { goto err; } if (CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); goto err; } if (!RSA_check_key(ret)) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); goto err; } return ret; err: RSA_free(ret); return NULL; } RSA *RSA_private_key_from_bytes(const uint8_t *in, size_t in_len) { CBS cbs; CBS_init(&cbs, in, in_len); RSA *ret = RSA_parse_private_key(&cbs); if (ret == NULL || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); RSA_free(ret); return NULL; } return ret; } int RSA_marshal_private_key(CBB *cbb, const RSA *rsa) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&child, kVersionTwoPrime) || !marshal_integer(&child, rsa->n) || !marshal_integer(&child, rsa->e) || !marshal_integer(&child, rsa->d) || !marshal_integer(&child, rsa->p) || !marshal_integer(&child, rsa->q) || !marshal_integer(&child, rsa->dmp1) || !marshal_integer(&child, rsa->dmq1) || !marshal_integer(&child, rsa->iqmp) || !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); return 0; } return 1; } int RSA_private_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa) { CBB cbb; CBB_zero(&cbb); if (!CBB_init(&cbb, 0) || !RSA_marshal_private_key(&cbb, rsa) || !CBB_finish(&cbb, out_bytes, out_len)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); CBB_cleanup(&cbb); return 0; } return 1; } RSA *d2i_RSAPublicKey(RSA **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); RSA *ret = RSA_parse_public_key(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { RSA_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_RSAPublicKey(const RSA *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !RSA_marshal_public_key(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } RSA *d2i_RSAPrivateKey(RSA **out, const uint8_t **inp, long len) { if (len < 0) { return NULL; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); RSA *ret = RSA_parse_private_key(&cbs); if (ret == NULL) { return NULL; } if (out != NULL) { RSA_free(*out); *out = ret; } *inp = CBS_data(&cbs); return ret; } int i2d_RSAPrivateKey(const RSA *in, uint8_t **outp) { CBB cbb; if (!CBB_init(&cbb, 0) || !RSA_marshal_private_key(&cbb, in)) { CBB_cleanup(&cbb); return -1; } return CBB_finish_i2d(&cbb, outp); } RSA *RSAPublicKey_dup(const RSA *rsa) { uint8_t *der; size_t der_len; if (!RSA_public_key_to_bytes(&der, &der_len, rsa)) { return NULL; } RSA *ret = RSA_public_key_from_bytes(der, der_len); OPENSSL_free(der); return ret; } RSA *RSAPrivateKey_dup(const RSA *rsa) { uint8_t *der; size_t der_len; if (!RSA_private_key_to_bytes(&der, &der_len, rsa)) { return NULL; } RSA *ret = RSA_private_key_from_bytes(der, der_len); OPENSSL_free(der); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rsa/rsa_crypt.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/rsa/internal.h" #include "../internal.h" #include "internal.h" static void rand_nonzero(uint8_t *out, size_t len) { RAND_bytes(out, len); for (size_t i = 0; i < len; i++) { // Zero values are replaced, and the distribution of zero and non-zero bytes // is public, so leaking this is safe. while (constant_time_declassify_int(out[i] == 0)) { RAND_bytes(out + i, 1); } } } int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md) { if (md == NULL) { md = EVP_sha1(); } if (mgf1md == NULL) { mgf1md = md; } size_t mdlen = EVP_MD_size(md); if (to_len < 2 * mdlen + 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } size_t emlen = to_len - 1; if (from_len > emlen - 2 * mdlen - 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } if (emlen < 2 * mdlen + 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } to[0] = 0; uint8_t *seed = to + 1; uint8_t *db = to + mdlen + 1; uint8_t *dbmask = NULL; int ret = 0; if (!EVP_Digest(param, param_len, db, NULL, md, NULL)) { goto out; } OPENSSL_memset(db + mdlen, 0, emlen - from_len - 2 * mdlen - 1); db[emlen - from_len - mdlen - 1] = 0x01; OPENSSL_memcpy(db + emlen - from_len - mdlen, from, from_len); if (!RAND_bytes(seed, mdlen)) { goto out; } dbmask = reinterpret_cast(OPENSSL_malloc(emlen - mdlen)); if (dbmask == NULL) { goto out; } if (!PKCS1_MGF1(dbmask, emlen - mdlen, seed, mdlen, mgf1md)) { goto out; } for (size_t i = 0; i < emlen - mdlen; i++) { db[i] ^= dbmask[i]; } uint8_t seedmask[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seedmask, mdlen, db, emlen - mdlen, mgf1md)) { goto out; } for (size_t i = 0; i < mdlen; i++) { seed[i] ^= seedmask[i]; } ret = 1; out: OPENSSL_free(dbmask); return ret; } int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md) { uint8_t *db = NULL; { if (md == NULL) { md = EVP_sha1(); } if (mgf1md == NULL) { mgf1md = md; } size_t mdlen = EVP_MD_size(md); // The encoded message is one byte smaller than the modulus to ensure that // it doesn't end up greater than the modulus. Thus there's an extra "+1" // here compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. if (from_len < 1 + 2 * mdlen + 1) { // 'from_len' is the length of the modulus, i.e. does not depend on the // particular ciphertext. goto decoding_err; } size_t dblen = from_len - mdlen - 1; db = reinterpret_cast(OPENSSL_malloc(dblen)); if (db == NULL) { goto err; } const uint8_t *maskedseed = from + 1; const uint8_t *maskeddb = from + 1 + mdlen; uint8_t seed[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seed, mdlen, maskeddb, dblen, mgf1md)) { goto err; } for (size_t i = 0; i < mdlen; i++) { seed[i] ^= maskedseed[i]; } if (!PKCS1_MGF1(db, dblen, seed, mdlen, mgf1md)) { goto err; } for (size_t i = 0; i < dblen; i++) { db[i] ^= maskeddb[i]; } uint8_t phash[EVP_MAX_MD_SIZE]; if (!EVP_Digest(param, param_len, phash, NULL, md, NULL)) { goto err; } crypto_word_t bad = ~constant_time_is_zero_w(CRYPTO_memcmp(db, phash, mdlen)); bad |= ~constant_time_is_zero_w(from[0]); crypto_word_t looking_for_one_byte = CONSTTIME_TRUE_W; size_t one_index = 0; for (size_t i = mdlen; i < dblen; i++) { crypto_word_t equals1 = constant_time_eq_w(db[i], 1); crypto_word_t equals0 = constant_time_eq_w(db[i], 0); one_index = constant_time_select_w(looking_for_one_byte & equals1, i, one_index); looking_for_one_byte = constant_time_select_w(equals1, 0, looking_for_one_byte); bad |= looking_for_one_byte & ~equals0; } bad |= looking_for_one_byte; // Whether the overall padding was valid or not in OAEP is public. if (constant_time_declassify_w(bad)) { goto decoding_err; } // Once the padding is known to be valid, the output length is also public. static_assert(sizeof(size_t) <= sizeof(crypto_word_t), "size_t does not fit in crypto_word_t"); one_index = constant_time_declassify_w(one_index); one_index++; size_t mlen = dblen - one_index; if (max_out < mlen) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } OPENSSL_memcpy(out, db + one_index, mlen); *out_len = mlen; OPENSSL_free(db); return 1; } decoding_err: // To avoid chosen ciphertext attacks, the error message should not reveal // which kind of decoding error happened. OPENSSL_PUT_ERROR(RSA, RSA_R_OAEP_DECODING_ERROR); err: OPENSSL_free(db); return 0; } static int rsa_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len) { // See RFC 8017, section 7.2.1. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); return 0; } to[0] = 0; to[1] = 2; size_t padding_len = to_len - 3 - from_len; rand_nonzero(to + 2, padding_len); to[2 + padding_len] = 0; OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } static int rsa_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *from, size_t from_len) { if (from_len == 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); return 0; } // PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography // Standard", section 7.2.2. if (from_len < RSA_PKCS1_PADDING_SIZE) { // |from| is zero-padded to the size of the RSA modulus, a public value, so // this can be rejected in non-constant time. OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } crypto_word_t first_byte_is_zero = constant_time_eq_w(from[0], 0); crypto_word_t second_byte_is_two = constant_time_eq_w(from[1], 2); crypto_word_t zero_index = 0, looking_for_index = CONSTTIME_TRUE_W; for (size_t i = 2; i < from_len; i++) { crypto_word_t equals0 = constant_time_is_zero_w(from[i]); zero_index = constant_time_select_w(looking_for_index & equals0, i, zero_index); looking_for_index = constant_time_select_w(equals0, 0, looking_for_index); } // The input must begin with 00 02. crypto_word_t valid_index = first_byte_is_zero; valid_index &= second_byte_is_two; // We must have found the end of PS. valid_index &= ~looking_for_index; // PS must be at least 8 bytes long, and it starts two bytes into |from|. valid_index &= constant_time_ge_w(zero_index, 2 + 8); // Skip the zero byte. zero_index++; // NOTE: Although this logic attempts to be constant time, the API contracts // of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it // impossible to completely avoid Bleichenbacher's attack. Consumers should // use |RSA_PADDING_NONE| and perform the padding check in constant-time // combined with a swap to a random session key or other mitigation. CONSTTIME_DECLASSIFY(&valid_index, sizeof(valid_index)); CONSTTIME_DECLASSIFY(&zero_index, sizeof(zero_index)); if (!valid_index) { OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; } const size_t msg_len = from_len - zero_index; if (msg_len > max_out) { // This shouldn't happen because this function is always called with // |max_out| as the key size and |from_len| is bounded by the key size. OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); return 0; } OPENSSL_memcpy(out, &from[zero_index], msg_len); *out_len = msg_len; return 1; } int RSA_public_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding) { size_t out_len; if (!RSA_encrypt(rsa, &out_len, to, RSA_size(rsa), from, flen, padding)) { return -1; } if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); return -1; } return (int)out_len; } int RSA_private_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding) { size_t out_len; if (!RSA_sign_raw(rsa, &out_len, to, RSA_size(rsa), from, flen, padding)) { return -1; } if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); return -1; } return (int)out_len; } int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { if (rsa->n == NULL || rsa->e == NULL) { OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } if (!rsa_check_public_key(rsa)) { return 0; } const unsigned rsa_size = RSA_size(rsa); BIGNUM *f, *result; uint8_t *buf = NULL; BN_CTX *ctx = NULL; int i, ret = 0; if (max_out < rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_OUTPUT_BUFFER_TOO_SMALL); return 0; } ctx = BN_CTX_new(); if (ctx == NULL) { goto err; } BN_CTX_start(ctx); f = BN_CTX_get(ctx); result = BN_CTX_get(ctx); buf = reinterpret_cast(OPENSSL_malloc(rsa_size)); if (!f || !result || !buf) { goto err; } switch (padding) { case RSA_PKCS1_PADDING: i = rsa_padding_add_PKCS1_type_2(buf, rsa_size, in, in_len); break; case RSA_PKCS1_OAEP_PADDING: // Use the default parameters: SHA-1 for both hashes and no label. i = RSA_padding_add_PKCS1_OAEP_mgf1(buf, rsa_size, in, in_len, NULL, 0, NULL, NULL); break; case RSA_NO_PADDING: i = RSA_padding_add_none(buf, rsa_size, in, in_len); break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } if (i <= 0) { goto err; } if (BN_bin2bn(buf, rsa_size, f) == NULL) { goto err; } if (BN_ucmp(f, rsa->n) >= 0) { // usually the padding functions would catch this OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); goto err; } if (!BN_MONT_CTX_set_locked(&rsa->mont_n, &rsa->lock, rsa->n, ctx) || !BN_mod_exp_mont(result, f, rsa->e, &rsa->mont_n->N, ctx, rsa->mont_n)) { goto err; } // put in leading 0 bytes if the number is less than the length of the // modulus if (!BN_bn2bin_padded(out, rsa_size, result)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } *out_len = rsa_size; ret = 1; err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } OPENSSL_free(buf); return ret; } static int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { const unsigned rsa_size = RSA_size(rsa); uint8_t *buf = NULL; int ret = 0; if (max_out < rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_OUTPUT_BUFFER_TOO_SMALL); return 0; } if (padding == RSA_NO_PADDING) { buf = out; } else { // Allocate a temporary buffer to hold the padded plaintext. buf = reinterpret_cast(OPENSSL_malloc(rsa_size)); if (buf == NULL) { goto err; } } if (in_len != rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_LEN_NOT_EQUAL_TO_MOD_LEN); goto err; } if (!rsa_private_transform(rsa, buf, in, rsa_size)) { goto err; } switch (padding) { case RSA_PKCS1_PADDING: ret = rsa_padding_check_PKCS1_type_2(out, out_len, rsa_size, buf, rsa_size); break; case RSA_PKCS1_OAEP_PADDING: // Use the default parameters: SHA-1 for both hashes and no label. ret = RSA_padding_check_PKCS1_OAEP_mgf1(out, out_len, rsa_size, buf, rsa_size, NULL, 0, NULL, NULL); break; case RSA_NO_PADDING: *out_len = rsa_size; ret = 1; break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } CONSTTIME_DECLASSIFY(&ret, sizeof(ret)); if (!ret) { OPENSSL_PUT_ERROR(RSA, RSA_R_PADDING_CHECK_FAILED); } else { CONSTTIME_DECLASSIFY(out, *out_len); } err: if (padding != RSA_NO_PADDING) { OPENSSL_free(buf); } return ret; } int RSA_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { if (rsa->meth->decrypt) { return rsa->meth->decrypt(rsa, out_len, out, max_out, in, in_len, padding); } return rsa_default_decrypt(rsa, out_len, out, max_out, in, in_len, padding); } int RSA_private_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding) { size_t out_len; if (!RSA_decrypt(rsa, &out_len, to, RSA_size(rsa), from, flen, padding)) { return -1; } if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); return -1; } return (int)out_len; } int RSA_public_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding) { size_t out_len; if (!RSA_verify_raw(rsa, &out_len, to, RSA_size(rsa), from, flen, padding)) { return -1; } if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); return -1; } return (int)out_len; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/rsa/rsa_extra.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include int RSA_blinding_on(RSA *rsa, BN_CTX *ctx) { return 1; } void RSA_blinding_off(RSA *rsa) {} ================================================ FILE: Sources/CNIOBoringSSL/crypto/rsa/rsa_print.cc ================================================ /* * Copyright 2006-2017 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include int RSA_print(BIO *bio, const RSA *rsa, int indent) { EVP_PKEY *pkey = EVP_PKEY_new(); int ret = pkey != NULL && EVP_PKEY_set1_RSA(pkey, (RSA *)rsa) && EVP_PKEY_print_private(bio, pkey, indent, NULL); EVP_PKEY_free(pkey); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/sha/sha1.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../fipsmodule/bcm_interface.h" int SHA1_Init(SHA_CTX *sha) { BCM_sha1_init(sha); return 1; } int SHA1_Update(SHA_CTX *sha, const void *data, size_t len) { BCM_sha1_update(sha, data, len); return 1; } int SHA1_Final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *sha) { BCM_sha1_final(out, sha); return 1; } uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]) { SHA_CTX ctx; BCM_sha1_init(&ctx); BCM_sha1_update(&ctx, data, len); BCM_sha1_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } void SHA1_Transform(SHA_CTX *sha, const uint8_t block[SHA_CBLOCK]) { BCM_sha1_transform(sha, block); } void CRYPTO_fips_186_2_prf(uint8_t *out, size_t out_len, const uint8_t xkey[SHA_DIGEST_LENGTH]) { BCM_fips_186_2_prf(out, out_len, xkey); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/sha/sha256.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../fipsmodule/bcm_interface.h" int SHA224_Init(SHA256_CTX *sha) { BCM_sha224_init(sha); return 1; } int SHA224_Update(SHA256_CTX *sha, const void *data, size_t len) { BCM_sha224_update(sha, data, len); return 1; } int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *sha) { BCM_sha224_final(out, sha); return 1; } uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t out[SHA224_DIGEST_LENGTH]) { SHA256_CTX ctx; BCM_sha224_init(&ctx); BCM_sha224_update(&ctx, data, len); BCM_sha224_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } int SHA256_Init(SHA256_CTX *sha) { BCM_sha256_init(sha); return 1; } int SHA256_Update(SHA256_CTX *sha, const void *data, size_t len) { BCM_sha256_update(sha, data, len); return 1; } int SHA256_Final(uint8_t out[SHA256_DIGEST_LENGTH], SHA256_CTX *sha) { // TODO(bbe): This overflow check one of the few places a low-level hash // 'final' function can fail. SHA-512 does not have a corresponding check. // The BCM function is infallible and will abort if this is done incorrectly. // we should verify nothing crashes with this removed and eliminate the 0 // return. if (sha->md_len > SHA256_DIGEST_LENGTH) { return 0; } BCM_sha256_final(out, sha); return 1; } uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t out[SHA256_DIGEST_LENGTH]) { SHA256_CTX ctx; BCM_sha256_init(&ctx); BCM_sha256_update(&ctx, data, len); BCM_sha256_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } void SHA256_Transform(SHA256_CTX *sha, const uint8_t block[SHA256_CBLOCK]) { BCM_sha256_transform(sha, block); } void SHA256_TransformBlocks(uint32_t state[8], const uint8_t *data, size_t num_blocks) { BCM_sha256_transform_blocks(state, data, num_blocks); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/sha/sha512.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../fipsmodule/bcm_interface.h" int SHA384_Init(SHA512_CTX *sha) { BCM_sha384_init(sha); return 1; } int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) { BCM_sha384_update(sha, data, len); return 1; } int SHA384_Final(uint8_t out[SHA384_DIGEST_LENGTH], SHA512_CTX *sha) { BCM_sha384_final(out, sha); return 1; } uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t out[SHA384_DIGEST_LENGTH]) { SHA512_CTX ctx; BCM_sha384_init(&ctx); BCM_sha384_update(&ctx, data, len); BCM_sha384_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } int SHA512_256_Init(SHA512_CTX *sha) { BCM_sha512_256_init(sha); return 1; } int SHA512_256_Update(SHA512_CTX *sha, const void *data, size_t len) { BCM_sha512_256_update(sha, data, len); return 1; } int SHA512_256_Final(uint8_t out[SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha) { BCM_sha512_256_final(out, sha); return 1; } uint8_t *SHA512_256(const uint8_t *data, size_t len, uint8_t out[SHA512_256_DIGEST_LENGTH]) { SHA512_CTX ctx; BCM_sha512_256_init(&ctx); BCM_sha512_256_update(&ctx, data, len); BCM_sha512_256_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } int SHA512_Init(SHA512_CTX *sha) { BCM_sha512_init(sha); return 1; } int SHA512_Update(SHA512_CTX *sha, const void *data, size_t len) { BCM_sha512_update(sha, data, len); return 1; } int SHA512_Final(uint8_t out[SHA512_DIGEST_LENGTH], SHA512_CTX *sha) { // Historically this function retured failure if passed NULL, even // though other final functions do not. if (out == NULL) { return 0; } BCM_sha512_final(out, sha); return 1; } uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t out[SHA512_DIGEST_LENGTH]) { SHA512_CTX ctx; BCM_sha512_init(&ctx); BCM_sha512_update(&ctx, data, len); BCM_sha512_final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); return out; } void SHA512_Transform(SHA512_CTX *sha, const uint8_t block[SHA512_CBLOCK]) { BCM_sha512_transform(sha, block); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/siphash/siphash.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../internal.h" static void siphash_round(uint64_t v[4]) { v[0] += v[1]; v[2] += v[3]; v[1] = CRYPTO_rotl_u64(v[1], 13); v[3] = CRYPTO_rotl_u64(v[3], 16); v[1] ^= v[0]; v[3] ^= v[2]; v[0] = CRYPTO_rotl_u64(v[0], 32); v[2] += v[1]; v[0] += v[3]; v[1] = CRYPTO_rotl_u64(v[1], 17); v[3] = CRYPTO_rotl_u64(v[3], 21); v[1] ^= v[2]; v[3] ^= v[0]; v[2] = CRYPTO_rotl_u64(v[2], 32); } uint64_t SIPHASH_24(const uint64_t key[2], const uint8_t *input, size_t input_len) { const size_t orig_input_len = input_len; uint64_t v[4]; v[0] = key[0] ^ UINT64_C(0x736f6d6570736575); v[1] = key[1] ^ UINT64_C(0x646f72616e646f6d); v[2] = key[0] ^ UINT64_C(0x6c7967656e657261); v[3] = key[1] ^ UINT64_C(0x7465646279746573); while (input_len >= sizeof(uint64_t)) { uint64_t m = CRYPTO_load_u64_le(input); v[3] ^= m; siphash_round(v); siphash_round(v); v[0] ^= m; input += sizeof(uint64_t); input_len -= sizeof(uint64_t); } uint8_t last_block[8]; OPENSSL_memset(last_block, 0, sizeof(last_block)); OPENSSL_memcpy(last_block, input, input_len); last_block[7] = orig_input_len & 0xff; uint64_t last_block_word = CRYPTO_load_u64_le(last_block); v[3] ^= last_block_word; siphash_round(v); siphash_round(v); v[0] ^= last_block_word; v[2] ^= 0xff; siphash_round(v); siphash_round(v); siphash_round(v); siphash_round(v); return v[0] ^ v[1] ^ v[2] ^ v[3]; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/slhdsa/slhdsa.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include "../fipsmodule/bcm_interface.h" static_assert(SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES == BCM_SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES, ""); static_assert(SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES == BCM_SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES, ""); static_assert(SLHDSA_SHA2_128S_SIGNATURE_BYTES == BCM_SLHDSA_SHA2_128S_SIGNATURE_BYTES, ""); void SLHDSA_SHA2_128S_generate_key( uint8_t out_public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]) { BCM_slhdsa_sha2_128s_generate_key(out_public_key, out_private_key); } void SLHDSA_SHA2_128S_public_from_private( uint8_t out_public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]) { BCM_slhdsa_sha2_128s_public_from_private(out_public_key, private_key); } int SLHDSA_SHA2_128S_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { return bcm_success(BCM_slhdsa_sha2_128s_sign(out_signature, private_key, msg, msg_len, context, context_len)); } int SLHDSA_SHA2_128S_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len) { return bcm_success(BCM_slhdsa_sha2_128s_verify(signature, signature_len, public_key, msg, msg_len, context, context_len)); } int SLHDSA_SHA2_128S_prehash_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (hash_nid != NID_sha256) { return 0; } return bcm_success(BCM_slhdsa_sha2_128s_prehash_sign( out_signature, private_key, hashed_msg, hashed_msg_len, hash_nid, context, context_len)); } int SLHDSA_SHA2_128S_prehash_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (hash_nid != NID_sha256) { return 0; } return bcm_success(BCM_slhdsa_sha2_128s_prehash_verify( signature, signature_len, public_key, hashed_msg, hashed_msg_len, hash_nid, context, context_len)); } int SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (hash_nid != NID_sha384) { return 0; } return bcm_success(BCM_slhdsa_sha2_128s_prehash_sign( out_signature, private_key, hashed_msg, hashed_msg_len, hash_nid, context, context_len)); } int SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len) { if (hash_nid != NID_sha384) { return 0; } return bcm_success(BCM_slhdsa_sha2_128s_prehash_verify( signature, signature_len, public_key, hashed_msg, hashed_msg_len, hash_nid, context, context_len)); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/spake2plus/internal.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SPAKE2PLUS_INTERNAL_H #define OPENSSL_HEADER_SPAKE2PLUS_INTERNAL_H #include #include #include #include #include "../fipsmodule/ec/internal.h" BSSL_NAMESPACE_BEGIN // SPAKE2+. // // SPAKE2+ is an augmented password-authenticated key-exchange. It allows // two parties, a prover and verifier, to derive a strong shared key with no // risk of disclosing the password, known only to the prover, to the verifier. // (But note that the verifier can still attempt an offline, brute-force attack // to recover the password.) // // This is an implementation of SPAKE2+ using P-256 as the group, SHA-256 as // the hash function, HKDF-SHA256 as the key derivation function, and // HMAC-SHA256 as the message authentication code. // // See https://www.rfc-editor.org/rfc/rfc9383.html namespace spake2plus { // kShareSize is the size of a SPAKE2+ key share. constexpr size_t kShareSize = 65; // kConfirmSize is the size of a SPAKE2+ key confirmation message. constexpr size_t kConfirmSize = 32; // kVerifierSize is the size of the w0 and w1 values in the SPAKE2+ protocol. constexpr size_t kVerifierSize = 32; // kRegistrationRecordSize is the number of bytes in a registration record, // which is provided to the verifier. constexpr size_t kRegistrationRecordSize = 65; // kSecretSize is the number of bytes of shared secret that the SPAKE2+ protocol // generates. constexpr size_t kSecretSize = 32; // Register computes the values needed in the offline registration // step of the SPAKE2+ protocol. See the following for more details: // https://www.rfc-editor.org/rfc/rfc9383.html#section-3.2 // // The |password| argument is the mandatory prover password. The |out_w0|, // |out_w1|, and |out_registration_record| arguments are where the password // verifiers (w0 and w1) and registration record (L) are stored, respectively. // The prover is given |out_w0| and |out_w1| while the verifier is given // |out_w0| and |out_registration_record|. // // To ensure success, |out_w0| and |out_w1| must be of length |kVerifierSize|, // and |out_registration_record| of size |kRegistrationRecordSize|. [[nodiscard]] OPENSSL_EXPORT bool Register( Span out_w0, Span out_w1, Span out_registration_record, Span password, Span id_prover, Span id_verifier); class OPENSSL_EXPORT Prover { public: static constexpr bool kAllowUniquePtr = true; Prover(); ~Prover(); // Init creates a new prover, which can only be used for a single execution of // the protocol. // // The |context| argument is an application-specific value meant to constrain // the protocol execution. The |w0| and |w1| arguments are password verifier // values computed during the offline registration phase of the protocol. The // |id_prover| and |id_verifier| arguments allow optional, opaque names to be // bound into the protocol. See the following for more information about how // these identities may be chosen: // https://www.rfc-editor.org/rfc/rfc9383.html#name-definition-of-spake2 [[nodiscard]] bool Init(Span context, Span id_prover, Span id_verifier, Span w0, Span w1, Span x = Span()); // GenerateShare computes a SPAKE2+ share and writes it to |out_share|. // // This function can only be called once for a given |Prover|. To ensure // success, |out_share| must be |kShareSize| bytes. [[nodiscard]] bool GenerateShare(Span out_share); // ComputeConfirmation computes a SPAKE2+ key confirmation // message and writes it to |out_confirm|. It also computes the shared secret // and writes it to |out_secret|. // // This function can only be called once for a given |Prover|. // // To ensure success, |out_confirm| must be |kConfirmSize| bytes // and |out_secret| must be |kSecretSize| bytes. [[nodiscard]] bool ComputeConfirmation(Span out_confirm, Span out_secret, Span peer_share, Span peer_confirm); private: enum class State { kInit, kShareGenerated, kConfirmGenerated, kDone, }; State state_ = State::kInit; SHA256_CTX transcript_hash_; EC_SCALAR w0_; EC_SCALAR w1_; EC_SCALAR x_; EC_AFFINE X_; uint8_t share_[kShareSize]; }; class OPENSSL_EXPORT Verifier { public: static constexpr bool kAllowUniquePtr = true; Verifier(); ~Verifier(); // Init creates a new verifier, which can only be used for a single execution // of the protocol. // // The |context| argument is an application-specific value meant to constrain // the protocol execution. The |w0| and |registration_record| arguments are // required, and are computed by the prover via |Register|. Only the prover // can produce |w0| and |registration_record|, as they require // knowledge of the password. The prover must securely transmit this to the // verifier out-of-band. The |id_prover| and |id_verifier| arguments allow // optional, opaque names to be bound into the protocol. See the following for // more information about how these identities may be chosen: // https://www.rfc-editor.org/rfc/rfc9383.html#name-definition-of-spake2 [[nodiscard]] bool Init(Span context, Span id_prover, Span id_verifier, Span w0, Span registration_record, Span y = Span()); // ProcessProverShare computes a SPAKE2+ share from an input share, // |prover_share|, and writes it to |out_share|. It also computes the key // confirmation message and writes it to |out_confirm|. Finally, it computes // the shared secret and writes it to |out_secret|. // // This function can only be called once for a given |Verifier|. // // To ensure success, |out_share| must be |kShareSize| bytes, |out_confirm| // must be |kConfirmSize| bytes, and |out_secret| must be |kSecretSize| bytes. [[nodiscard]] bool ProcessProverShare(Span out_share, Span out_confirm, Span out_secret, Span prover_share); // VerifyProverConfirmation verifies a SPAKE2+ key confirmation message, // |prover_confirm|. // // This function can only be called once for a given |Verifier|. [[nodiscard]] bool VerifyProverConfirmation(Span peer_confirm); private: enum class State { kInit, kProverShareSeen, kDone, }; State state_ = State::kInit; SHA256_CTX transcript_hash_; EC_SCALAR w0_; EC_AFFINE L_; EC_SCALAR y_; uint8_t confirm_[kConfirmSize]; }; } // namespace spake2plus BSSL_NAMESPACE_END #endif // OPENSSL_HEADER_SPAKE2PLUS_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/spake2plus/spake2plus.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/ec/internal.h" #include "../internal.h" #include "./internal.h" #include "CNIOBoringSSL_err.h" BSSL_NAMESPACE_BEGIN namespace spake2plus { namespace { const uint8_t kDefaultAdditionalData[32] = {0}; // https://www.rfc-editor.org/rfc/rfc9383.html#appendix-B // seed: 1.2.840.10045.3.1.7 point generation seed (M) // M = // 02886e2f97ace46e55ba9dd7242579f2993b64e16ef3dcab95afd497333d8fa12f // // `M` is interpreted as a X9.62-format compressed point. This is then the // uncompressed form: const uint8_t kM_bytes[] = { 0x04, 0x88, 0x6e, 0x2f, 0x97, 0xac, 0xe4, 0x6e, 0x55, 0xba, 0x9d, 0xd7, 0x24, 0x25, 0x79, 0xf2, 0x99, 0x3b, 0x64, 0xe1, 0x6e, 0xf3, 0xdc, 0xab, 0x95, 0xaf, 0xd4, 0x97, 0x33, 0x3d, 0x8f, 0xa1, 0x2f, 0x5f, 0xf3, 0x55, 0x16, 0x3e, 0x43, 0xce, 0x22, 0x4e, 0x0b, 0x0e, 0x65, 0xff, 0x02, 0xac, 0x8e, 0x5c, 0x7b, 0xe0, 0x94, 0x19, 0xc7, 0x85, 0xe0, 0xca, 0x54, 0x7d, 0x55, 0xa1, 0x2e, 0x2d, 0x20}; // https://www.rfc-editor.org/rfc/rfc9383.html#appendix-B // seed: 1.2.840.10045.3.1.7 point generation seed (N) // N = // 03d8bbd6c639c62937b04d997f38c3770719c629d7014d49a24b4f98baa1292b49 // // `N` is interpreted as a X9.62-format compressed point. This is then the // uncompressed form: const uint8_t kN_bytes[] = { 0x04, 0xd8, 0xbb, 0xd6, 0xc6, 0x39, 0xc6, 0x29, 0x37, 0xb0, 0x4d, 0x99, 0x7f, 0x38, 0xc3, 0x77, 0x07, 0x19, 0xc6, 0x29, 0xd7, 0x01, 0x4d, 0x49, 0xa2, 0x4b, 0x4f, 0x98, 0xba, 0xa1, 0x29, 0x2b, 0x49, 0x07, 0xd6, 0x0a, 0xa6, 0xbf, 0xad, 0xe4, 0x50, 0x08, 0xa6, 0x36, 0x33, 0x7f, 0x51, 0x68, 0xc6, 0x4d, 0x9b, 0xd3, 0x60, 0x34, 0x80, 0x8c, 0xd5, 0x64, 0x49, 0x0b, 0x1e, 0x65, 0x6e, 0xdb, 0xe7}; void UpdateWithLengthPrefix(SHA256_CTX *sha, Span data) { uint8_t len_le[8]; CRYPTO_store_u64_le(len_le, data.size()); SHA256_Update(sha, len_le, sizeof(len_le)); SHA256_Update(sha, data.data(), data.size()); } void ConstantToJacobian(const EC_GROUP *group, EC_JACOBIAN *out, bssl::Span in) { EC_AFFINE point; BSSL_CHECK(ec_point_from_uncompressed(group, &point, in.data(), in.size())); ec_affine_to_jacobian(group, out, &point); } void ScalarToSizedBuffer(const EC_GROUP *group, const EC_SCALAR *s, Span out_buf) { size_t out_bytes; ec_scalar_to_bytes(group, out_buf.data(), &out_bytes, s); BSSL_CHECK(out_bytes == out_buf.size()); } bool AddLengthPrefixed(CBB *cbb, Span bytes) { return CBB_add_u64le(cbb, bytes.size()) && CBB_add_bytes(cbb, bytes.data(), bytes.size()); } void InitTranscriptHash(SHA256_CTX *sha, Span context, Span id_prover, Span id_verifier) { SHA256_Init(sha); UpdateWithLengthPrefix(sha, context); UpdateWithLengthPrefix(sha, id_prover); UpdateWithLengthPrefix(sha, id_verifier); UpdateWithLengthPrefix(sha, kM_bytes); UpdateWithLengthPrefix(sha, kN_bytes); } bool ComputeTranscript(uint8_t out_prover_confirm[kConfirmSize], uint8_t out_verifier_confirm[kConfirmSize], uint8_t out_secret[kSecretSize], const uint8_t prover_share[kShareSize], const uint8_t verifier_share[kShareSize], SHA256_CTX *sha, const EC_AFFINE *Z, const EC_AFFINE *V, const EC_SCALAR *w0) { const EC_GROUP *group = EC_group_p256(); uint8_t Z_enc[kShareSize]; size_t Z_enc_len = ec_point_to_bytes(group, Z, POINT_CONVERSION_UNCOMPRESSED, Z_enc, sizeof(Z_enc)); BSSL_CHECK(Z_enc_len == sizeof(Z_enc)); uint8_t V_enc[kShareSize]; size_t V_enc_len = ec_point_to_bytes(group, V, POINT_CONVERSION_UNCOMPRESSED, V_enc, sizeof(V_enc)); BSSL_CHECK(V_enc_len == sizeof(V_enc)); uint8_t w0_enc[kVerifierSize]; ScalarToSizedBuffer(group, w0, w0_enc); uint8_t K_main[SHA256_DIGEST_LENGTH]; UpdateWithLengthPrefix(sha, Span(prover_share, kShareSize)); UpdateWithLengthPrefix(sha, Span(verifier_share, kShareSize)); UpdateWithLengthPrefix(sha, Z_enc); UpdateWithLengthPrefix(sha, V_enc); UpdateWithLengthPrefix(sha, w0_enc); SHA256_Final(K_main, sha); auto confirmation_str = StringAsBytes("ConfirmationKeys"); uint8_t keys[kSecretSize * 2]; if (!HKDF(keys, sizeof(keys), EVP_sha256(), K_main, sizeof(K_main), nullptr, 0, confirmation_str.data(), confirmation_str.size())) { return false; } auto secret_info_str = StringAsBytes("SharedKey"); if (!HKDF(out_secret, kSecretSize, EVP_sha256(), K_main, sizeof(K_main), nullptr, 0, secret_info_str.data(), secret_info_str.size())) { return false; } unsigned prover_confirm_len; if (HMAC(EVP_sha256(), keys, kSecretSize, verifier_share, kShareSize, out_prover_confirm, &prover_confirm_len) == nullptr) { return false; } BSSL_CHECK(prover_confirm_len == kConfirmSize); unsigned verifier_confirm_len; if (HMAC(EVP_sha256(), keys + kSecretSize, kSecretSize, prover_share, kShareSize, out_verifier_confirm, &verifier_confirm_len) == nullptr) { return false; } BSSL_CHECK(verifier_confirm_len == kConfirmSize); return true; } } // namespace bool Register(Span out_w0, Span out_w1, Span out_registration_record, Span password, Span id_prover, Span id_verifier) { if (out_w0.size() != kVerifierSize || out_w1.size() != kVerifierSize || out_registration_record.size() != kRegistrationRecordSize) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } // Offline registration format from: // https://www.rfc-editor.org/rfc/rfc9383.html#section-3.2 ScopedCBB mhf_input; if (!CBB_init(mhf_input.get(), password.size() + id_prover.size() + id_verifier.size() + 3 * sizeof(uint64_t)) || // !AddLengthPrefixed(mhf_input.get(), password) || !AddLengthPrefixed(mhf_input.get(), id_prover) || !AddLengthPrefixed(mhf_input.get(), id_verifier) || !CBB_flush(mhf_input.get())) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } // https://neuromancer.sk/std/nist/P-256 // sage: p = // 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff // ....: K = GF(p) // ....: a = // K(0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc) // ....: b = // K(0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b) // ....: E = EllipticCurve(K, (a, b)) // ....: G = // E(0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296, // ....: 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5) // ....: // E.set_order(0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc63 // ....: 2551 * 0x1) // sage: k = 64 // sage: L = (2 * (ceil(log(p)/log(2)) + k)) / 8 // RFC 9383 Section 3.2 constexpr size_t kKDFOutputSize = 80; constexpr size_t kKDFOutputWords = kKDFOutputSize / BN_BYTES; uint8_t key[kKDFOutputSize]; if (!EVP_PBE_scrypt((const char *)CBB_data(mhf_input.get()), CBB_len(mhf_input.get()), nullptr, 0, /*N=*/32768, /*r=*/8, /*p=*/1, /*max_mem=*/1024 * 1024 * 33, key, kKDFOutputSize)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } const EC_GROUP *group = EC_group_p256(); BN_ULONG w0_words[kKDFOutputWords / 2]; bn_big_endian_to_words(w0_words, kKDFOutputWords / 2, key, kKDFOutputSize / 2); EC_SCALAR w0; ec_scalar_reduce(group, &w0, w0_words, kKDFOutputWords / 2); ScalarToSizedBuffer(group, &w0, out_w0); BN_ULONG w1_words[kKDFOutputWords / 2]; bn_big_endian_to_words(w1_words, kKDFOutputWords / 2, key + kKDFOutputSize / 2, kKDFOutputSize / 2); EC_SCALAR w1; ec_scalar_reduce(group, &w1, w1_words, kKDFOutputWords / 2); ScalarToSizedBuffer(group, &w1, out_w1); EC_JACOBIAN L_j; EC_AFFINE L; if (!ec_point_mul_scalar_base(group, &L_j, &w1) || // !ec_jacobian_to_affine(group, &L, &L_j) || // !ec_point_to_bytes(group, &L, POINT_CONVERSION_UNCOMPRESSED, out_registration_record.data(), kRegistrationRecordSize)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } return true; } Prover::Prover() = default; Prover::~Prover() = default; bool Prover::Init(Span context, Span id_prover, Span id_verifier, Span w0, Span w1, Span x) { const EC_GROUP *group = EC_group_p256(); if (!ec_scalar_from_bytes(group, &w0_, w0.data(), w0.size()) || !ec_scalar_from_bytes(group, &w1_, w1.data(), w1.size()) || (!x.empty() && !ec_scalar_from_bytes(group, &x_, x.data(), x.size())) || // (x.empty() && !ec_random_scalar(group, &x_, kDefaultAdditionalData))) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } InitTranscriptHash(&transcript_hash_, context, id_prover, id_verifier); return true; } bool Prover::GenerateShare(Span out_share) { if (state_ != State::kInit || out_share.size() != kShareSize) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } // Compute X = x×P + w0×M. // TODO(crbug.com/383778231): This could be sped up with a constant-time, // two-point multiplication. const EC_GROUP *group = EC_group_p256(); EC_JACOBIAN l; if (!ec_point_mul_scalar_base(group, &l, &x_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } EC_JACOBIAN M_j; ConstantToJacobian(group, &M_j, kM_bytes); EC_JACOBIAN r; if (!ec_point_mul_scalar(group, &r, &M_j, &w0_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } EC_JACOBIAN X_j; group->meth->add(group, &X_j, &l, &r); if (!ec_jacobian_to_affine(group, &X_, &X_j)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } size_t written = ec_point_to_bytes(group, &X_, POINT_CONVERSION_UNCOMPRESSED, out_share.data(), kShareSize); BSSL_CHECK(written == kShareSize); memcpy(share_, out_share.data(), kShareSize); state_ = State::kShareGenerated; return true; } bool Prover::ComputeConfirmation(Span out_confirm, Span out_secret, Span peer_share, Span peer_confirm) { if (state_ != State::kShareGenerated || out_confirm.size() != kConfirmSize || out_secret.size() != kSecretSize || peer_share.size() != kShareSize || peer_confirm.size() != kConfirmSize) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } const EC_GROUP *group = EC_group_p256(); EC_AFFINE Y; if (!ec_point_from_uncompressed(group, &Y, peer_share.data(), peer_share.size())) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } EC_JACOBIAN N_j; ConstantToJacobian(group, &N_j, kN_bytes); EC_JACOBIAN r; if (!ec_point_mul_scalar(group, &r, &N_j, &w0_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } ec_felem_neg(group, &r.Y, &r.Y); EC_JACOBIAN Y_j; ec_affine_to_jacobian(group, &Y_j, &Y); EC_JACOBIAN t; group->meth->add(group, &t, &Y_j, &r); EC_JACOBIAN tmp; EC_AFFINE Z, V; // TODO(crbug.com/383778231): The two affine conversions could be batched // together. if (!ec_point_mul_scalar(group, &tmp, &t, &x_) || // !ec_jacobian_to_affine(group, &Z, &tmp) || // !ec_point_mul_scalar(group, &tmp, &t, &w1_) || // !ec_jacobian_to_affine(group, &V, &tmp)) { return 0; } uint8_t verifier_confirm[kConfirmSize]; if (!ComputeTranscript(out_confirm.data(), verifier_confirm, out_secret.data(), share_, peer_share.data(), &transcript_hash_, &Z, &V, &w0_) || CRYPTO_memcmp(verifier_confirm, peer_confirm.data(), sizeof(verifier_confirm)) != 0) { return 0; } state_ = State::kDone; return true; } Verifier::Verifier() = default; Verifier::~Verifier() = default; bool Verifier::Init(Span context, Span id_prover, Span id_verifier, Span w0, Span registration_record, Span y) { const EC_GROUP *group = EC_group_p256(); if (!ec_scalar_from_bytes(group, &w0_, w0.data(), w0.size()) || !ec_point_from_uncompressed(group, &L_, registration_record.data(), registration_record.size()) || // (!y.empty() && !ec_scalar_from_bytes(group, &y_, y.data(), y.size())) || // (y.empty() && !ec_random_scalar(group, &y_, kDefaultAdditionalData))) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } InitTranscriptHash(&transcript_hash_, context, id_prover, id_verifier); return true; } bool Verifier::ProcessProverShare(Span out_share, Span out_confirm, Span out_secret, Span prover_share) { if (state_ != State::kInit || // out_share.size() != kShareSize || out_confirm.size() != kConfirmSize || out_secret.size() != kSecretSize || prover_share.size() != kShareSize) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } const EC_GROUP *group = EC_group_p256(); EC_JACOBIAN l, r, M_j, N_j; ConstantToJacobian(group, &M_j, kM_bytes); ConstantToJacobian(group, &N_j, kN_bytes); // Compute Y = y×P + w0×M. // TODO(crbug.com/383778231): This could be sped up with a constant-time, // two-point multiplication. if (!ec_point_mul_scalar_base(group, &l, &y_) || !ec_point_mul_scalar(group, &r, &N_j, &w0_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } EC_JACOBIAN Y_j; EC_AFFINE Y; group->meth->add(group, &Y_j, &l, &r); if (!ec_jacobian_to_affine(group, &Y, &Y_j)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } const size_t written = ec_point_to_bytes( group, &Y, POINT_CONVERSION_UNCOMPRESSED, out_share.data(), kShareSize); BSSL_CHECK(written == kShareSize); EC_JACOBIAN r2; EC_AFFINE X; if (!ec_point_from_uncompressed(group, &X, prover_share.data(), prover_share.size()) || !ec_point_mul_scalar(group, &r2, &M_j, &w0_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } ec_felem_neg(group, &r2.Y, &r2.Y); EC_JACOBIAN X_j, T; ec_affine_to_jacobian(group, &X_j, &X); group->meth->add(group, &T, &X_j, &r2); // TODO(crbug.com/383778231): The two affine conversions could be batched // together. EC_JACOBIAN tmp; EC_AFFINE Z; if (!ec_point_mul_scalar(group, &tmp, &T, &y_) || // !ec_jacobian_to_affine(group, &Z, &tmp)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } EC_JACOBIAN L_j; EC_AFFINE V; ec_affine_to_jacobian(group, &L_j, &L_); if (!ec_point_mul_scalar(group, &tmp, &L_j, &y_) || // !ec_jacobian_to_affine(group, &V, &tmp)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } if (!ComputeTranscript(confirm_, out_confirm.data(), out_secret.data(), prover_share.data(), out_share.data(), &transcript_hash_, &Z, &V, &w0_)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } state_ = State::kProverShareSeen; return true; } bool Verifier::VerifyProverConfirmation(Span peer_confirm) { if (state_ != State::kProverShareSeen || // peer_confirm.size() != kConfirmSize || // CRYPTO_memcmp(confirm_, peer_confirm.data(), sizeof(confirm_)) != 0) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_INTERNAL_ERROR); return false; } state_ = State::kDone; return true; } } // namespace spake2plus BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/crypto/stack/stack.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" struct stack_st { // num contains the number of valid pointers in |data|. size_t num; void **data; // sorted is non-zero if the values pointed to by |data| are in ascending // order, based on |comp|. int sorted; // num_alloc contains the number of pointers allocated in the buffer pointed // to by |data|, which may be larger than |num|. size_t num_alloc; // comp is an optional comparison function. OPENSSL_sk_cmp_func comp; }; // kMinSize is the number of pointers that will be initially allocated in a new // stack. static const size_t kMinSize = 4; OPENSSL_STACK *OPENSSL_sk_new(OPENSSL_sk_cmp_func comp) { OPENSSL_STACK *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(OPENSSL_STACK))); if (ret == NULL) { return NULL; } ret->data = reinterpret_cast(OPENSSL_calloc(kMinSize, sizeof(void *))); if (ret->data == NULL) { goto err; } ret->comp = comp; ret->num_alloc = kMinSize; return ret; err: OPENSSL_free(ret); return NULL; } OPENSSL_STACK *OPENSSL_sk_new_null(void) { return OPENSSL_sk_new(NULL); } size_t OPENSSL_sk_num(const OPENSSL_STACK *sk) { if (sk == NULL) { return 0; } return sk->num; } void OPENSSL_sk_zero(OPENSSL_STACK *sk) { if (sk == NULL || sk->num == 0) { return; } OPENSSL_memset(sk->data, 0, sizeof(void *) * sk->num); sk->num = 0; sk->sorted = 0; } void *OPENSSL_sk_value(const OPENSSL_STACK *sk, size_t i) { if (!sk || i >= sk->num) { return NULL; } return sk->data[i]; } void *OPENSSL_sk_set(OPENSSL_STACK *sk, size_t i, void *value) { if (!sk || i >= sk->num) { return NULL; } return sk->data[i] = value; } void OPENSSL_sk_free(OPENSSL_STACK *sk) { if (sk == NULL) { return; } OPENSSL_free(sk->data); OPENSSL_free(sk); } void OPENSSL_sk_pop_free_ex(OPENSSL_STACK *sk, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func) { if (sk == NULL) { return; } for (size_t i = 0; i < sk->num; i++) { if (sk->data[i] != NULL) { call_free_func(free_func, sk->data[i]); } } OPENSSL_sk_free(sk); } // Historically, |sk_pop_free| called the function as |OPENSSL_sk_free_func| // directly. This is undefined in C. Some callers called |sk_pop_free| directly, // so we must maintain a compatibility version for now. static void call_free_func_legacy(OPENSSL_sk_free_func func, void *ptr) { func(ptr); } void sk_pop_free(OPENSSL_STACK *sk, OPENSSL_sk_free_func free_func) { OPENSSL_sk_pop_free_ex(sk, call_free_func_legacy, free_func); } size_t OPENSSL_sk_insert(OPENSSL_STACK *sk, void *p, size_t where) { if (sk == NULL) { return 0; } if (sk->num >= INT_MAX) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_OVERFLOW); return 0; } if (sk->num_alloc <= sk->num + 1) { // Attempt to double the size of the array. size_t new_alloc = sk->num_alloc << 1; size_t alloc_size = new_alloc * sizeof(void *); void **data; // If the doubling overflowed, try to increment. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { new_alloc = sk->num_alloc + 1; alloc_size = new_alloc * sizeof(void *); } // If the increment also overflowed, fail. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { return 0; } data = reinterpret_cast(OPENSSL_realloc(sk->data, alloc_size)); if (data == NULL) { return 0; } sk->data = data; sk->num_alloc = new_alloc; } if (where >= sk->num) { sk->data[sk->num] = p; } else { OPENSSL_memmove(&sk->data[where + 1], &sk->data[where], sizeof(void *) * (sk->num - where)); sk->data[where] = p; } sk->num++; sk->sorted = 0; return sk->num; } void *OPENSSL_sk_delete(OPENSSL_STACK *sk, size_t where) { void *ret; if (!sk || where >= sk->num) { return NULL; } ret = sk->data[where]; if (where != sk->num - 1) { OPENSSL_memmove(&sk->data[where], &sk->data[where + 1], sizeof(void *) * (sk->num - where - 1)); } sk->num--; return ret; } void *OPENSSL_sk_delete_ptr(OPENSSL_STACK *sk, const void *p) { if (sk == NULL) { return NULL; } for (size_t i = 0; i < sk->num; i++) { if (sk->data[i] == p) { return OPENSSL_sk_delete(sk, i); } } return NULL; } void OPENSSL_sk_delete_if(OPENSSL_STACK *sk, OPENSSL_sk_call_delete_if_func call_func, OPENSSL_sk_delete_if_func func, void *data) { if (sk == NULL) { return; } size_t new_num = 0; for (size_t i = 0; i < sk->num; i++) { if (!call_func(func, sk->data[i], data)) { sk->data[new_num] = sk->data[i]; new_num++; } } sk->num = new_num; } int OPENSSL_sk_find(const OPENSSL_STACK *sk, size_t *out_index, const void *p, OPENSSL_sk_call_cmp_func call_cmp_func) { if (sk == NULL) { return 0; } if (sk->comp == NULL) { // Use pointer equality when no comparison function has been set. for (size_t i = 0; i < sk->num; i++) { if (sk->data[i] == p) { if (out_index) { *out_index = i; } return 1; } } return 0; } if (p == NULL) { return 0; } if (!OPENSSL_sk_is_sorted(sk)) { for (size_t i = 0; i < sk->num; i++) { if (call_cmp_func(sk->comp, p, sk->data[i]) == 0) { if (out_index) { *out_index = i; } return 1; } } return 0; } // The stack is sorted, so binary search to find the element. // // |lo| and |hi| maintain a half-open interval of where the answer may be. All // indices such that |lo <= idx < hi| are candidates. size_t lo = 0, hi = sk->num; while (lo < hi) { // Bias |mid| towards |lo|. See the |r == 0| case below. size_t mid = lo + (hi - lo - 1) / 2; assert(lo <= mid && mid < hi); int r = call_cmp_func(sk->comp, p, sk->data[mid]); if (r > 0) { lo = mid + 1; // |mid| is too low. } else if (r < 0) { hi = mid; // |mid| is too high. } else { // |mid| matches. However, this function returns the earliest match, so we // can only return if the range has size one. if (hi - lo == 1) { if (out_index != NULL) { *out_index = mid; } return 1; } // The sample is biased towards |lo|. |mid| can only be |hi - 1| if // |hi - lo| was one, so this makes forward progress. assert(mid + 1 < hi); hi = mid + 1; } } assert(lo == hi); return 0; // Not found. } void *OPENSSL_sk_shift(OPENSSL_STACK *sk) { if (sk == NULL) { return NULL; } if (sk->num == 0) { return NULL; } return OPENSSL_sk_delete(sk, 0); } size_t OPENSSL_sk_push(OPENSSL_STACK *sk, void *p) { return OPENSSL_sk_insert(sk, p, sk->num); } void *OPENSSL_sk_pop(OPENSSL_STACK *sk) { if (sk == NULL) { return NULL; } if (sk->num == 0) { return NULL; } return OPENSSL_sk_delete(sk, sk->num - 1); } OPENSSL_STACK *OPENSSL_sk_dup(const OPENSSL_STACK *sk) { if (sk == NULL) { return NULL; } OPENSSL_STACK *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(OPENSSL_STACK))); if (ret == NULL) { return NULL; } ret->data = reinterpret_cast( OPENSSL_memdup(sk->data, sizeof(void *) * sk->num_alloc)); if (ret->data == NULL) { goto err; } ret->num = sk->num; ret->sorted = sk->sorted; ret->num_alloc = sk->num_alloc; ret->comp = sk->comp; return ret; err: OPENSSL_sk_free(ret); return NULL; } static size_t parent_idx(size_t idx) { assert(idx > 0); return (idx - 1) / 2; } static size_t left_idx(size_t idx) { // The largest possible index is |PTRDIFF_MAX|, not |SIZE_MAX|. If // |ptrdiff_t|, a signed type, is the same size as |size_t|, this cannot // overflow. assert(idx <= PTRDIFF_MAX); static_assert(PTRDIFF_MAX <= (SIZE_MAX - 1) / 2, "2 * idx + 1 may oveflow"); return 2 * idx + 1; } // down_heap fixes the subtree rooted at |i|. |i|'s children must each satisfy // the heap property. Only the first |num| elements of |sk| are considered. static void down_heap(OPENSSL_STACK *sk, OPENSSL_sk_call_cmp_func call_cmp_func, size_t i, size_t num) { assert(i < num && num <= sk->num); for (;;) { size_t left = left_idx(i); if (left >= num) { break; // No left child. } // Swap |i| with the largest of its children. size_t next = i; if (call_cmp_func(sk->comp, sk->data[next], sk->data[left]) < 0) { next = left; } size_t right = left + 1; // Cannot overflow because |left < num|. if (right < num && call_cmp_func(sk->comp, sk->data[next], sk->data[right]) < 0) { next = right; } if (i == next) { break; // |i| is already larger than its children. } void *tmp = sk->data[i]; sk->data[i] = sk->data[next]; sk->data[next] = tmp; i = next; } } void OPENSSL_sk_sort(OPENSSL_STACK *sk, OPENSSL_sk_call_cmp_func call_cmp_func) { if (sk == NULL || sk->comp == NULL || sk->sorted) { return; } if (sk->num >= 2) { // |qsort| lacks a context parameter in the comparison function for us to // pass in |call_cmp_func| and |sk->comp|. While we could cast |sk->comp| to // the expected type, it is undefined behavior in C can trip sanitizers. // |qsort_r| and |qsort_s| avoid this, but using them is impractical. See // https://stackoverflow.com/a/39561369 // // Use our own heap sort instead. This is not performance-sensitive, so we // optimize for simplicity and size. First, build a max-heap in place. for (size_t i = parent_idx(sk->num - 1); i < sk->num; i--) { down_heap(sk, call_cmp_func, i, sk->num); } // Iteratively remove the maximum element to populate the result in reverse. for (size_t i = sk->num - 1; i > 0; i--) { void *tmp = sk->data[0]; sk->data[0] = sk->data[i]; sk->data[i] = tmp; down_heap(sk, call_cmp_func, 0, i); } } sk->sorted = 1; } int OPENSSL_sk_is_sorted(const OPENSSL_STACK *sk) { if (!sk) { return 1; } // Zero- and one-element lists are always sorted. return sk->sorted || (sk->comp != NULL && sk->num < 2); } OPENSSL_sk_cmp_func OPENSSL_sk_set_cmp_func(OPENSSL_STACK *sk, OPENSSL_sk_cmp_func comp) { OPENSSL_sk_cmp_func old = sk->comp; if (sk->comp != comp) { sk->sorted = 0; } sk->comp = comp; return old; } OPENSSL_STACK *OPENSSL_sk_deep_copy(const OPENSSL_STACK *sk, OPENSSL_sk_call_copy_func call_copy_func, OPENSSL_sk_copy_func copy_func, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func) { OPENSSL_STACK *ret = OPENSSL_sk_dup(sk); if (ret == NULL) { return NULL; } for (size_t i = 0; i < ret->num; i++) { if (ret->data[i] == NULL) { continue; } ret->data[i] = call_copy_func(copy_func, ret->data[i]); if (ret->data[i] == NULL) { for (size_t j = 0; j < i; j++) { if (ret->data[j] != NULL) { call_free_func(free_func, ret->data[j]); } } OPENSSL_sk_free(ret); return NULL; } } return ret; } OPENSSL_STACK *sk_new_null(void) { return OPENSSL_sk_new_null(); } size_t sk_num(const OPENSSL_STACK *sk) { return OPENSSL_sk_num(sk); } void *sk_value(const OPENSSL_STACK *sk, size_t i) { return OPENSSL_sk_value(sk, i); } void sk_free(OPENSSL_STACK *sk) { OPENSSL_sk_free(sk); } size_t sk_push(OPENSSL_STACK *sk, void *p) { return OPENSSL_sk_push(sk, p); } void *sk_pop(OPENSSL_STACK *sk) { return OPENSSL_sk_pop(sk); } void sk_pop_free_ex(OPENSSL_STACK *sk, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func) { OPENSSL_sk_pop_free_ex(sk, call_free_func, free_func); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/thread.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include int CRYPTO_num_locks(void) { return 1; } void CRYPTO_set_locking_callback(void (*func)(int mode, int lock_num, const char *file, int line)) {} void (*CRYPTO_get_locking_callback(void))(int mode, int lock_num, const char *file, int line) { return NULL; } void CRYPTO_set_add_lock_callback(int (*func)(int *num, int mount, int lock_num, const char *file, int line)) {} const char *CRYPTO_get_lock_name(int lock_num) { return "No old-style OpenSSL locks anymore"; } int CRYPTO_THREADID_set_callback(void (*func)(CRYPTO_THREADID *)) { return 1; } void CRYPTO_THREADID_set_numeric(CRYPTO_THREADID *id, unsigned long val) {} void CRYPTO_THREADID_set_pointer(CRYPTO_THREADID *id, void *ptr) {} void CRYPTO_THREADID_current(CRYPTO_THREADID *id) {} void CRYPTO_set_id_callback(unsigned long (*func)(void)) {} void CRYPTO_set_dynlock_create_callback(struct CRYPTO_dynlock_value *( *dyn_create_function)(const char *file, int line)) {} void CRYPTO_set_dynlock_lock_callback(void (*dyn_lock_function)( int mode, struct CRYPTO_dynlock_value *l, const char *file, int line)) {} void CRYPTO_set_dynlock_destroy_callback(void (*dyn_destroy_function)( struct CRYPTO_dynlock_value *l, const char *file, int line)) {} struct CRYPTO_dynlock_value *(*CRYPTO_get_dynlock_create_callback(void))( const char *file, int line) { return NULL; } void (*CRYPTO_get_dynlock_lock_callback(void))(int mode, struct CRYPTO_dynlock_value *l, const char *file, int line) { return NULL; } void (*CRYPTO_get_dynlock_destroy_callback(void))( struct CRYPTO_dynlock_value *l, const char *file, int line) { return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/thread_none.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if !defined(OPENSSL_THREADS) void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) {} void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)) { if (*once) { return; } *once = 1; init(); } static void *g_thread_locals[NUM_OPENSSL_THREAD_LOCALS]; void *CRYPTO_get_thread_local(thread_local_data_t index) { return g_thread_locals[index]; } int CRYPTO_set_thread_local(thread_local_data_t index, void *value, thread_local_destructor_t destructor) { g_thread_locals[index] = value; return 1; } #endif // !OPENSSL_THREADS ================================================ FILE: Sources/CNIOBoringSSL/crypto/thread_pthread.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Ensure we can't call OPENSSL_malloc circularly. #define _BORINGSSL_PROHIBIT_OPENSSL_MALLOC #include "internal.h" #if defined(OPENSSL_PTHREADS) #include #include #include #include void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock) { if (pthread_rwlock_init(lock, NULL) != 0) { abort(); } } void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock) { if (pthread_rwlock_rdlock(lock) != 0) { abort(); } } void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock) { if (pthread_rwlock_wrlock(lock) != 0) { abort(); } } void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock) { if (pthread_rwlock_unlock(lock) != 0) { abort(); } } void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) { if (pthread_rwlock_unlock(lock) != 0) { abort(); } } void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) { pthread_rwlock_destroy(lock); } void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)) { if (pthread_once(once, init) != 0) { abort(); } } static pthread_mutex_t g_destructors_lock = PTHREAD_MUTEX_INITIALIZER; static thread_local_destructor_t g_destructors[NUM_OPENSSL_THREAD_LOCALS]; // thread_local_destructor is called when a thread exits. It releases thread // local data for that thread only. static void thread_local_destructor(void *arg) { if (arg == NULL) { return; } thread_local_destructor_t destructors[NUM_OPENSSL_THREAD_LOCALS]; if (pthread_mutex_lock(&g_destructors_lock) != 0) { return; } OPENSSL_memcpy(destructors, g_destructors, sizeof(destructors)); pthread_mutex_unlock(&g_destructors_lock); unsigned i; void **pointers = reinterpret_cast(arg); for (i = 0; i < NUM_OPENSSL_THREAD_LOCALS; i++) { if (destructors[i] != NULL) { destructors[i](pointers[i]); } } free(pointers); } static pthread_once_t g_thread_local_init_once = PTHREAD_ONCE_INIT; static pthread_key_t g_thread_local_key; static int g_thread_local_key_created = 0; static void thread_local_init(void) { g_thread_local_key_created = pthread_key_create(&g_thread_local_key, thread_local_destructor) == 0; } void *CRYPTO_get_thread_local(thread_local_data_t index) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); if (!g_thread_local_key_created) { return NULL; } void **pointers = reinterpret_cast(pthread_getspecific(g_thread_local_key)); if (pointers == NULL) { return NULL; } return pointers[index]; } int CRYPTO_set_thread_local(thread_local_data_t index, void *value, thread_local_destructor_t destructor) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); if (!g_thread_local_key_created) { destructor(value); return 0; } void **pointers = reinterpret_cast(pthread_getspecific(g_thread_local_key)); if (pointers == NULL) { pointers = reinterpret_cast( malloc(sizeof(void *) * NUM_OPENSSL_THREAD_LOCALS)); if (pointers == NULL) { destructor(value); return 0; } OPENSSL_memset(pointers, 0, sizeof(void *) * NUM_OPENSSL_THREAD_LOCALS); if (pthread_setspecific(g_thread_local_key, pointers) != 0) { free(pointers); destructor(value); return 0; } } if (pthread_mutex_lock(&g_destructors_lock) != 0) { destructor(value); return 0; } g_destructors[index] = destructor; pthread_mutex_unlock(&g_destructors_lock); pointers[index] = value; return 1; } #endif // OPENSSL_PTHREADS ================================================ FILE: Sources/CNIOBoringSSL/crypto/thread_win.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Ensure we can't call OPENSSL_malloc circularly. #define _BORINGSSL_PROHIBIT_OPENSSL_MALLOC #include "internal.h" #if defined(OPENSSL_WINDOWS_THREADS) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #include #include #include static BOOL CALLBACK call_once_init(INIT_ONCE *once, void *arg, void **out) { void (**init)(void) = (void (**)(void))arg; (**init)(); return TRUE; } void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)) { if (!InitOnceExecuteOnce(once, call_once_init, &init, NULL)) { abort(); } } void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock) { InitializeSRWLock(lock); } void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock) { AcquireSRWLockShared(lock); } void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock) { AcquireSRWLockExclusive(lock); } void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock) { ReleaseSRWLockShared(lock); } void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) { ReleaseSRWLockExclusive(lock); } void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) { // SRWLOCKs require no cleanup. } static SRWLOCK g_destructors_lock = SRWLOCK_INIT; static thread_local_destructor_t g_destructors[NUM_OPENSSL_THREAD_LOCALS]; static CRYPTO_once_t g_thread_local_init_once = CRYPTO_ONCE_INIT; static DWORD g_thread_local_key; static int g_thread_local_failed; static void thread_local_init(void) { g_thread_local_key = TlsAlloc(); g_thread_local_failed = (g_thread_local_key == TLS_OUT_OF_INDEXES); } static void NTAPI thread_local_destructor(PVOID module, DWORD reason, PVOID reserved) { // Only free memory on |DLL_THREAD_DETACH|, not |DLL_PROCESS_DETACH|. In // VS2015's debug runtime, the C runtime has been unloaded by the time // |DLL_PROCESS_DETACH| runs. See https://crbug.com/575795. This is consistent // with |pthread_key_create| which does not call destructors on process exit, // only thread exit. if (reason != DLL_THREAD_DETACH) { return; } CRYPTO_once(&g_thread_local_init_once, thread_local_init); if (g_thread_local_failed) { return; } void **pointers = (void **)TlsGetValue(g_thread_local_key); if (pointers == NULL) { return; } thread_local_destructor_t destructors[NUM_OPENSSL_THREAD_LOCALS]; AcquireSRWLockExclusive(&g_destructors_lock); OPENSSL_memcpy(destructors, g_destructors, sizeof(destructors)); ReleaseSRWLockExclusive(&g_destructors_lock); for (unsigned i = 0; i < NUM_OPENSSL_THREAD_LOCALS; i++) { if (destructors[i] != NULL) { destructors[i](pointers[i]); } } free(pointers); } // Thread Termination Callbacks. // // Windows doesn't support a per-thread destructor with its TLS primitives. // So, we build it manually by inserting a function to be called on each // thread's exit. This magic is from http://www.codeproject.com/threads/tls.asp // and it works for VC++ 7.0 and later. // // Force a reference to _tls_used to make the linker create the TLS directory // if it's not already there. (E.g. if __declspec(thread) is not used). Force // a reference to p_thread_callback_boringssl to prevent whole program // optimization from discarding the variable. // // Note, in the prefixed build, |p_thread_callback_boringssl| may be a macro. #define STRINGIFY(x) #x #define EXPAND_AND_STRINGIFY(x) STRINGIFY(x) #ifdef _WIN64 __pragma(comment(linker, "/INCLUDE:_tls_used")) __pragma(comment( linker, "/INCLUDE:" EXPAND_AND_STRINGIFY(p_thread_callback_boringssl))) #else __pragma(comment(linker, "/INCLUDE:__tls_used")) __pragma(comment( linker, "/INCLUDE:_" EXPAND_AND_STRINGIFY(p_thread_callback_boringssl))) #endif // .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are // called automatically by the OS loader code (not the CRT) when the module is // loaded and on thread creation. They are NOT called if the module has been // loaded by a LoadLibrary() call. It must have implicitly been loaded at // process startup. // // By implicitly loaded, I mean that it is directly referenced by the main EXE // or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being // implicitly loaded. // // See VC\crt\src\tlssup.c for reference. // The linker must not discard p_thread_callback_boringssl. (We force a // reference to this variable with a linker /INCLUDE:symbol pragma to ensure // that.) If this variable is discarded, the OnThreadExit function will never // be called. #ifdef _WIN64 // .CRT section is merged with .rdata on x64 so it must be constant data. #pragma const_seg(".CRT$XLC") // clang-format off // When defining a const variable, it must have external linkage to be sure // the linker doesn't discard it. extern "C" { extern const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl; } // clang-format on const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; // Reset the default section. #pragma const_seg() #else #pragma data_seg(".CRT$XLC") // clang-format off extern "C" { extern PIMAGE_TLS_CALLBACK p_thread_callback_boringssl; } // clang-format on PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; // Reset the default section. #pragma data_seg() #endif // _WIN64 static void **get_thread_locals(void) { // |TlsGetValue| clears the last error even on success, so that callers may // distinguish it successfully returning NULL or failing. It is documented to // never fail if the argument is a valid index from |TlsAlloc|, so we do not // need to handle this. // // However, this error-mangling behavior interferes with the caller's use of // |GetLastError|. In particular |SSL_get_error| queries the error queue to // determine whether the caller should look at the OS's errors. To avoid // destroying state, save and restore the Windows error. // // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx DWORD last_error = GetLastError(); void **ret = reinterpret_cast(TlsGetValue(g_thread_local_key)); SetLastError(last_error); return ret; } void *CRYPTO_get_thread_local(thread_local_data_t index) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); if (g_thread_local_failed) { return NULL; } void **pointers = get_thread_locals(); if (pointers == NULL) { return NULL; } return pointers[index]; } int CRYPTO_set_thread_local(thread_local_data_t index, void *value, thread_local_destructor_t destructor) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); if (g_thread_local_failed) { destructor(value); return 0; } void **pointers = get_thread_locals(); if (pointers == NULL) { pointers = reinterpret_cast( malloc(sizeof(void *) * NUM_OPENSSL_THREAD_LOCALS)); if (pointers == NULL) { destructor(value); return 0; } OPENSSL_memset(pointers, 0, sizeof(void *) * NUM_OPENSSL_THREAD_LOCALS); if (TlsSetValue(g_thread_local_key, pointers) == 0) { free(pointers); destructor(value); return 0; } } AcquireSRWLockExclusive(&g_destructors_lock); g_destructors[index] = destructor; ReleaseSRWLockExclusive(&g_destructors_lock); pointers[index] = value; return 1; } #endif // OPENSSL_WINDOWS_THREADS ================================================ FILE: Sources/CNIOBoringSSL/crypto/trust_token/internal.h ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_TRUST_TOKEN_INTERNAL_H #define OPENSSL_HEADER_TRUST_TOKEN_INTERNAL_H #include #include #include #include #include "../fipsmodule/ec/internal.h" #include #if defined(__cplusplus) extern "C" { #endif // For the following cryptographic schemes, we use P-384 instead of our usual // choice of P-256. See Appendix I of // https://eprint.iacr.org/2020/072/20200324:214215 which describes two attacks // which may affect smaller curves. In particular, p-1 for P-256 is smooth, // giving a low complexity for the p-1 attack. P-384's p-1 has a 281-bit prime // factor, // 3055465788140352002733946906144561090641249606160407884365391979704929268480326390471. // This lower-bounds the p-1 attack at O(2^140). The p+1 attack is lower-bounded // by O(p^(1/3)) or O(2^128), so we do not need to check the smoothness of p+1. // TRUST_TOKEN_NONCE_SIZE is the size of nonces used as part of the Trust_Token // protocol. #define TRUST_TOKEN_NONCE_SIZE 64 typedef struct { // TODO(https://crbug.com/boringssl/334): These should store |EC_PRECOMP| so // that |TRUST_TOKEN_finish_issuance| can use |ec_point_mul_scalar_precomp|. EC_AFFINE pub0; EC_AFFINE pub1; EC_AFFINE pubs; } TRUST_TOKEN_CLIENT_KEY; typedef struct { EC_SCALAR x0; EC_SCALAR y0; EC_SCALAR x1; EC_SCALAR y1; EC_SCALAR xs; EC_SCALAR ys; EC_AFFINE pub0; EC_PRECOMP pub0_precomp; EC_AFFINE pub1; EC_PRECOMP pub1_precomp; EC_AFFINE pubs; EC_PRECOMP pubs_precomp; } TRUST_TOKEN_ISSUER_KEY; // TRUST_TOKEN_PRETOKEN represents the intermediate state a client keeps during // a Trust_Token issuance operation. typedef struct pmb_pretoken_st { uint8_t salt[TRUST_TOKEN_NONCE_SIZE]; uint8_t t[TRUST_TOKEN_NONCE_SIZE]; EC_SCALAR r; EC_AFFINE Tp; } TRUST_TOKEN_PRETOKEN; // TRUST_TOKEN_PRETOKEN_free releases the memory associated with |token|. OPENSSL_EXPORT void TRUST_TOKEN_PRETOKEN_free(TRUST_TOKEN_PRETOKEN *token); DEFINE_STACK_OF(TRUST_TOKEN_PRETOKEN) // PMBTokens. // // PMBTokens is described in https://eprint.iacr.org/2020/072/20200324:214215 // and provides anonymous tokens with private metadata. We implement the // construction with validity verification, described in appendix H, // construction 6. // The following functions implement the corresponding |TRUST_TOKENS_METHOD| // functions for |TRUST_TOKENS_experiment_v1|'s PMBTokens construction which // uses P-384. int pmbtoken_exp1_generate_key(CBB *out_private, CBB *out_public); int pmbtoken_exp1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); int pmbtoken_exp1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); int pmbtoken_exp1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_exp1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); int pmbtoken_exp1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); STACK_OF(TRUST_TOKEN) *pmbtoken_exp1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); int pmbtoken_exp1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // pmbtoken_exp1_get_h_for_testing returns H in uncompressed coordinates. This // function is used to confirm H was computed as expected. OPENSSL_EXPORT int pmbtoken_exp1_get_h_for_testing(uint8_t out[97]); // The following functions implement the corresponding |TRUST_TOKENS_METHOD| // functions for |TRUST_TOKENS_experiment_v2|'s PMBTokens construction which // uses P-384. int pmbtoken_exp2_generate_key(CBB *out_private, CBB *out_public); int pmbtoken_exp2_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); int pmbtoken_exp2_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); int pmbtoken_exp2_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_exp2_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); int pmbtoken_exp2_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); STACK_OF(TRUST_TOKEN) *pmbtoken_exp2_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); int pmbtoken_exp2_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // pmbtoken_exp2_get_h_for_testing returns H in uncompressed coordinates. This // function is used to confirm H was computed as expected. OPENSSL_EXPORT int pmbtoken_exp2_get_h_for_testing(uint8_t out[97]); // The following functions implement the corresponding |TRUST_TOKENS_METHOD| // functions for |TRUST_TOKENS_pst_v1|'s PMBTokens construction which uses // P-384. int pmbtoken_pst1_generate_key(CBB *out_private, CBB *out_public); int pmbtoken_pst1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); int pmbtoken_pst1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); int pmbtoken_pst1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_pst1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); int pmbtoken_pst1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); STACK_OF(TRUST_TOKEN) *pmbtoken_pst1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); int pmbtoken_pst1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // pmbtoken_pst1_get_h_for_testing returns H in uncompressed coordinates. This // function is used to confirm H was computed as expected. OPENSSL_EXPORT int pmbtoken_pst1_get_h_for_testing(uint8_t out[97]); // VOPRF. // // VOPRFs are described in https://tools.ietf.org/html/draft-irtf-cfrg-voprf-04 // and provide anonymous tokens. This implementation uses TrustToken DSTs and // the DLEQ batching primitive from // https://eprint.iacr.org/2020/072/20200324:214215. // VOPRF only uses the |pub|' field of the TRUST_TOKEN_CLIENT_KEY and // |xs|/|pubs| fields of the TRUST_TOKEN_ISSUER_KEY. // The following functions implement the corresponding |TRUST_TOKENS_METHOD| // functions for |TRUST_TOKENS_experiment_v2|'s VOPRF construction which uses // P-384. int voprf_exp2_generate_key(CBB *out_private, CBB *out_public); int voprf_exp2_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); int voprf_exp2_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); int voprf_exp2_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); STACK_OF(TRUST_TOKEN_PRETOKEN) *voprf_exp2_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); int voprf_exp2_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); STACK_OF(TRUST_TOKEN) *voprf_exp2_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); int voprf_exp2_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // The following functions implement the corresponding |TRUST_TOKENS_METHOD| // functions for |TRUST_TOKENS_pst_v1|'s VOPRF construction which uses P-384. int voprf_pst1_generate_key(CBB *out_private, CBB *out_public); int voprf_pst1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); int voprf_pst1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); int voprf_pst1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); STACK_OF(TRUST_TOKEN_PRETOKEN) *voprf_pst1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); int voprf_pst1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); OPENSSL_EXPORT int voprf_pst1_sign_with_proof_scalar_for_testing( const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata, const uint8_t *proof_scalar_buf, size_t proof_scalar_len); STACK_OF(TRUST_TOKEN) *voprf_pst1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); int voprf_pst1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // Trust Tokens internals. struct trust_token_method_st { // generate_key generates a fresh keypair and writes their serialized // forms into |out_private| and |out_public|. It returns one on success and // zero on failure. int (*generate_key)(CBB *out_private, CBB *out_public); // derive_key_from_secret deterministically derives a keypair based on // |secret| and writes their serialized forms into |out_private| and // |out_public|. It returns one on success and zero on failure. int (*derive_key_from_secret)(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len); // client_key_from_bytes decodes a client key from |in| and sets |key| // to the resulting key. It returns one on success and zero // on failure. int (*client_key_from_bytes)(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len); // issuer_key_from_bytes decodes a issuer key from |in| and sets |key| // to the resulting key. It returns one on success and zero // on failure. int (*issuer_key_from_bytes)(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len); // blind generates a new issuance request for |count| tokens. If // |include_message| is set, then |msg| is used to derive the token nonces. On // success, it returns a newly-allocated |STACK_OF(TRUST_TOKEN_PRETOKEN)| and // writes a request to the issuer to |cbb|. On failure, it returns NULL. The // |STACK_OF(TRUST_TOKEN_PRETOKEN)|s should be passed to |pmbtoken_unblind| // when the server responds. // // This function implements the AT.Usr0 operation. STACK_OF(TRUST_TOKEN_PRETOKEN) *(*blind)(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len); // sign parses a request for |num_requested| tokens from |cbs| and // issues |num_to_issue| tokens with |key| and a private metadata value of // |private_metadata|. It then writes the response to |cbb|. It returns one on // success and zero on failure. // // This function implements the AT.Sig operation. int (*sign)(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata); // unblind processes an issuance response for |count| tokens from |cbs| // and unblinds the signed tokens. |pretokens| are the pre-tokens returned // from the corresponding |blind| call. On success, the function returns a // newly-allocated |STACK_OF(TRUST_TOKEN)| containing the resulting tokens. // Each token's serialization will have |key_id| prepended. Otherwise, it // returns NULL. // // This function implements the AT.Usr1 operation. STACK_OF(TRUST_TOKEN) *(*unblind)( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id); // read parses a token from |token| and verifies it using |key|. If // |include_message| is set, then the nonce is derived from |msg| and the salt // in the token. On success, it returns one and stores the nonce and private // metadata bit in |out_nonce| and |*out_private_metadata|. Otherwise, it // returns zero. Note that, unlike the output of |unblind|, |token| does not // have a four-byte key ID prepended. int (*read)(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len); // whether the construction supports private metadata. int has_private_metadata; // max keys that can be configured. size_t max_keys; // whether the SRR is part of the protocol. int has_srr; }; // Structure representing a single Trust Token public key with the specified ID. struct trust_token_client_key_st { uint32_t id; TRUST_TOKEN_CLIENT_KEY key; }; // Structure representing a single Trust Token private key with the specified // ID. struct trust_token_issuer_key_st { uint32_t id; TRUST_TOKEN_ISSUER_KEY key; }; struct trust_token_client_st { const TRUST_TOKEN_METHOD *method; // max_batchsize is the maximum supported batchsize. uint16_t max_batchsize; // keys is the set of public keys that are supported by the client for // issuance/redemptions. struct trust_token_client_key_st keys[6]; // num_keys is the number of keys currently configured. size_t num_keys; // pretokens is the intermediate state during an active issuance. STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens; // srr_key is the public key used to verify the signature of the SRR. EVP_PKEY *srr_key; }; struct trust_token_issuer_st { const TRUST_TOKEN_METHOD *method; // max_batchsize is the maximum supported batchsize. uint16_t max_batchsize; // keys is the set of private keys that are supported by the issuer for // issuance/redemptions. The public metadata is an index into this list of // keys. struct trust_token_issuer_key_st keys[6]; // num_keys is the number of keys currently configured. size_t num_keys; // srr_key is the private key used to sign the SRR. EVP_PKEY *srr_key; // metadata_key is the secret material used to encode the private metadata bit // in the SRR. uint8_t *metadata_key; size_t metadata_key_len; }; #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(TRUST_TOKEN_PRETOKEN, TRUST_TOKEN_PRETOKEN_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_TRUST_TOKEN_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/trust_token/pmbtoken.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "../ec/internal.h" #include "../fipsmodule/bn/internal.h" #include "../fipsmodule/ec/internal.h" #include "internal.h" typedef int (*hash_t_func_t)(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]); typedef int (*hash_s_func_t)(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *t, const uint8_t s[TRUST_TOKEN_NONCE_SIZE]); typedef int (*hash_c_func_t)(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len); typedef int (*hash_to_scalar_func_t)(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len); typedef struct { const EC_GROUP *group; EC_PRECOMP g_precomp; EC_PRECOMP h_precomp; EC_JACOBIAN h; // hash_t implements the H_t operation in PMBTokens. It returns one on success // and zero on error. hash_t_func_t hash_t; // hash_s implements the H_s operation in PMBTokens. It returns one on success // and zero on error. hash_s_func_t hash_s; // hash_c implements the H_c operation in PMBTokens. It returns one on success // and zero on error. hash_c_func_t hash_c; // hash_to_scalar implements the HashToScalar operation for PMBTokens. It // returns one on success and zero on error. hash_to_scalar_func_t hash_to_scalar; int prefix_point : 1; } PMBTOKEN_METHOD; static const uint8_t kDefaultAdditionalData[32] = {0}; static int pmbtoken_init_method(PMBTOKEN_METHOD *method, const EC_GROUP *group, const uint8_t *h_bytes, size_t h_len, hash_t_func_t hash_t, hash_s_func_t hash_s, hash_c_func_t hash_c, hash_to_scalar_func_t hash_to_scalar, int prefix_point) { method->group = group; method->hash_t = hash_t; method->hash_s = hash_s; method->hash_c = hash_c; method->hash_to_scalar = hash_to_scalar; method->prefix_point = prefix_point; EC_AFFINE h; if (!ec_point_from_uncompressed(method->group, &h, h_bytes, h_len)) { return 0; } ec_affine_to_jacobian(method->group, &method->h, &h); if (!ec_init_precomp(method->group, &method->g_precomp, &method->group->generator.raw) || !ec_init_precomp(method->group, &method->h_precomp, &method->h)) { return 0; } return 1; } static int derive_scalar_from_secret(const PMBTOKEN_METHOD *method, EC_SCALAR *out, const uint8_t *secret, size_t secret_len, uint8_t scalar_id) { static const uint8_t kKeygenLabel[] = "TrustTokenPMBTokenKeyGen"; int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kKeygenLabel, sizeof(kKeygenLabel)) || !CBB_add_u8(&cbb, scalar_id) || !CBB_add_bytes(&cbb, secret, secret_len) || !CBB_finish(&cbb, &buf, &len) || !method->hash_to_scalar(method->group, out, buf, len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int point_to_cbb(CBB *out, const EC_GROUP *group, const EC_AFFINE *point) { size_t len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (len == 0) { return 0; } uint8_t *p; return CBB_add_space(out, &p, len) && ec_point_to_bytes(group, point, POINT_CONVERSION_UNCOMPRESSED, p, len) == len; } static int cbb_add_prefixed_point(CBB *out, const EC_GROUP *group, const EC_AFFINE *point, int prefix_point) { if (prefix_point) { CBB child; if (!CBB_add_u16_length_prefixed(out, &child) || !point_to_cbb(&child, group, point) || !CBB_flush(out)) { return 0; } } else { if (!point_to_cbb(out, group, point) || !CBB_flush(out)) { return 0; } } return 1; } static int cbs_get_prefixed_point(CBS *cbs, const EC_GROUP *group, EC_AFFINE *out, int prefix_point) { CBS child; if (prefix_point) { if (!CBS_get_u16_length_prefixed(cbs, &child)) { return 0; } } else { size_t plen = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBS_get_bytes(cbs, &child, plen)) { return 0; } } if (!ec_point_from_uncompressed(group, out, CBS_data(&child), CBS_len(&child))) { return 0; } return 1; } static int mul_public_3(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1, const EC_JACOBIAN *p2, const EC_SCALAR *scalar2) { EC_JACOBIAN points[3] = {*p0, *p1, *p2}; EC_SCALAR scalars[3] = {*scalar0, *scalar1, *scalar2}; return ec_point_mul_scalar_public_batch(group, out, /*g_scalar=*/NULL, points, scalars, 3); } static int pmbtoken_compute_keys(const PMBTOKEN_METHOD *method, CBB *out_private, CBB *out_public, const EC_SCALAR *x0, const EC_SCALAR *y0, const EC_SCALAR *x1, const EC_SCALAR *y1, const EC_SCALAR *xs, const EC_SCALAR *ys) { const EC_GROUP *group = method->group; EC_JACOBIAN pub[3]; if (!ec_point_mul_scalar_precomp(group, &pub[0], &method->g_precomp, x0, &method->h_precomp, y0, NULL, NULL) || !ec_point_mul_scalar_precomp(group, &pub[1], &method->g_precomp, x1, &method->h_precomp, y1, NULL, NULL) || !ec_point_mul_scalar_precomp(method->group, &pub[2], &method->g_precomp, xs, &method->h_precomp, ys, NULL, NULL)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); return 0; } const EC_SCALAR *scalars[] = {x0, y0, x1, y1, xs, ys}; size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(scalars); i++) { uint8_t *buf; if (!CBB_add_space(out_private, &buf, scalar_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } ec_scalar_to_bytes(group, buf, &scalar_len, scalars[i]); } EC_AFFINE pub_affine[3]; if (!ec_jacobian_to_affine_batch(group, pub_affine, pub, 3)) { return 0; } if (!cbb_add_prefixed_point(out_public, group, &pub_affine[0], method->prefix_point) || !cbb_add_prefixed_point(out_public, group, &pub_affine[1], method->prefix_point) || !cbb_add_prefixed_point(out_public, group, &pub_affine[2], method->prefix_point)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } return 1; } static int pmbtoken_generate_key(const PMBTOKEN_METHOD *method, CBB *out_private, CBB *out_public) { EC_SCALAR x0, y0, x1, y1, xs, ys; if (!ec_random_nonzero_scalar(method->group, &x0, kDefaultAdditionalData) || !ec_random_nonzero_scalar(method->group, &y0, kDefaultAdditionalData) || !ec_random_nonzero_scalar(method->group, &x1, kDefaultAdditionalData) || !ec_random_nonzero_scalar(method->group, &y1, kDefaultAdditionalData) || !ec_random_nonzero_scalar(method->group, &xs, kDefaultAdditionalData) || !ec_random_nonzero_scalar(method->group, &ys, kDefaultAdditionalData)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); return 0; } return pmbtoken_compute_keys(method, out_private, out_public, &x0, &y0, &x1, &y1, &xs, &ys); } static int pmbtoken_derive_key_from_secret(const PMBTOKEN_METHOD *method, CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { EC_SCALAR x0, y0, x1, y1, xs, ys; if (!derive_scalar_from_secret(method, &x0, secret, secret_len, 0) || !derive_scalar_from_secret(method, &y0, secret, secret_len, 1) || !derive_scalar_from_secret(method, &x1, secret, secret_len, 2) || !derive_scalar_from_secret(method, &y1, secret, secret_len, 3) || !derive_scalar_from_secret(method, &xs, secret, secret_len, 4) || !derive_scalar_from_secret(method, &ys, secret, secret_len, 5)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); return 0; } return pmbtoken_compute_keys(method, out_private, out_public, &x0, &y0, &x1, &y1, &xs, &ys); } static int pmbtoken_client_key_from_bytes(const PMBTOKEN_METHOD *method, TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { CBS cbs; CBS_init(&cbs, in, len); if (!cbs_get_prefixed_point(&cbs, method->group, &key->pub0, method->prefix_point) || !cbs_get_prefixed_point(&cbs, method->group, &key->pub1, method->prefix_point) || !cbs_get_prefixed_point(&cbs, method->group, &key->pubs, method->prefix_point) || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } return 1; } static int pmbtoken_issuer_key_from_bytes(const PMBTOKEN_METHOD *method, TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { const EC_GROUP *group = method->group; CBS cbs, tmp; CBS_init(&cbs, in, len); size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); EC_SCALAR *scalars[] = {&key->x0, &key->y0, &key->x1, &key->y1, &key->xs, &key->ys}; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(scalars); i++) { if (!CBS_get_bytes(&cbs, &tmp, scalar_len) || !ec_scalar_from_bytes(group, scalars[i], CBS_data(&tmp), CBS_len(&tmp))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } } // Recompute the public key. EC_JACOBIAN pub[3]; EC_AFFINE pub_affine[3]; if (!ec_point_mul_scalar_precomp(group, &pub[0], &method->g_precomp, &key->x0, &method->h_precomp, &key->y0, NULL, NULL) || !ec_init_precomp(group, &key->pub0_precomp, &pub[0]) || !ec_point_mul_scalar_precomp(group, &pub[1], &method->g_precomp, &key->x1, &method->h_precomp, &key->y1, NULL, NULL) || !ec_init_precomp(group, &key->pub1_precomp, &pub[1]) || !ec_point_mul_scalar_precomp(group, &pub[2], &method->g_precomp, &key->xs, &method->h_precomp, &key->ys, NULL, NULL) || !ec_init_precomp(group, &key->pubs_precomp, &pub[2]) || !ec_jacobian_to_affine_batch(group, pub_affine, pub, 3)) { return 0; } key->pub0 = pub_affine[0]; key->pub1 = pub_affine[1]; key->pubs = pub_affine[2]; return 1; } static STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_blind( const PMBTOKEN_METHOD *method, CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { SHA512_CTX hash_ctx; const EC_GROUP *group = method->group; STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens = sk_TRUST_TOKEN_PRETOKEN_new_null(); if (pretokens == NULL) { goto err; } for (size_t i = 0; i < count; i++) { // Insert |pretoken| into |pretokens| early to simplify error-handling. TRUST_TOKEN_PRETOKEN *pretoken = reinterpret_cast( OPENSSL_malloc(sizeof(TRUST_TOKEN_PRETOKEN))); if (pretoken == NULL || !sk_TRUST_TOKEN_PRETOKEN_push(pretokens, pretoken)) { TRUST_TOKEN_PRETOKEN_free(pretoken); goto err; } RAND_bytes(pretoken->salt, sizeof(pretoken->salt)); if (include_message) { assert(SHA512_DIGEST_LENGTH == TRUST_TOKEN_NONCE_SIZE); SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, pretoken->salt, sizeof(pretoken->salt)); SHA512_Update(&hash_ctx, msg, msg_len); SHA512_Final(pretoken->t, &hash_ctx); } else { OPENSSL_memcpy(pretoken->t, pretoken->salt, TRUST_TOKEN_NONCE_SIZE); } // We sample |pretoken->r| in Montgomery form to simplify inverting. if (!ec_random_nonzero_scalar(group, &pretoken->r, kDefaultAdditionalData)) { goto err; } EC_SCALAR rinv; ec_scalar_inv0_montgomery(group, &rinv, &pretoken->r); // Convert both out of Montgomery form. ec_scalar_from_montgomery(group, &pretoken->r, &pretoken->r); ec_scalar_from_montgomery(group, &rinv, &rinv); EC_JACOBIAN T, Tp; if (!method->hash_t(group, &T, pretoken->t) || !ec_point_mul_scalar(group, &Tp, &T, &rinv) || !ec_jacobian_to_affine(group, &pretoken->Tp, &Tp)) { goto err; } if (!cbb_add_prefixed_point(cbb, group, &pretoken->Tp, method->prefix_point)) { goto err; } } return pretokens; err: sk_TRUST_TOKEN_PRETOKEN_pop_free(pretokens, TRUST_TOKEN_PRETOKEN_free); return NULL; } static int scalar_to_cbb(CBB *out, const EC_GROUP *group, const EC_SCALAR *scalar) { uint8_t *buf; size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); if (!CBB_add_space(out, &buf, scalar_len)) { return 0; } ec_scalar_to_bytes(group, buf, &scalar_len, scalar); return 1; } static int scalar_from_cbs(CBS *cbs, const EC_GROUP *group, EC_SCALAR *out) { size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); CBS tmp; if (!CBS_get_bytes(cbs, &tmp, scalar_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } ec_scalar_from_bytes(group, out, CBS_data(&tmp), CBS_len(&tmp)); return 1; } static int hash_c_dleq(const PMBTOKEN_METHOD *method, EC_SCALAR *out, const EC_AFFINE *X, const EC_AFFINE *T, const EC_AFFINE *S, const EC_AFFINE *W, const EC_AFFINE *K0, const EC_AFFINE *K1) { static const uint8_t kDLEQ2Label[] = "DLEQ2"; int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kDLEQ2Label, sizeof(kDLEQ2Label)) || !point_to_cbb(&cbb, method->group, X) || !point_to_cbb(&cbb, method->group, T) || !point_to_cbb(&cbb, method->group, S) || !point_to_cbb(&cbb, method->group, W) || !point_to_cbb(&cbb, method->group, K0) || !point_to_cbb(&cbb, method->group, K1) || !CBB_finish(&cbb, &buf, &len) || !method->hash_c(method->group, out, buf, len)) { goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int hash_c_dleqor(const PMBTOKEN_METHOD *method, EC_SCALAR *out, const EC_AFFINE *X0, const EC_AFFINE *X1, const EC_AFFINE *T, const EC_AFFINE *S, const EC_AFFINE *W, const EC_AFFINE *K00, const EC_AFFINE *K01, const EC_AFFINE *K10, const EC_AFFINE *K11) { static const uint8_t kDLEQOR2Label[] = "DLEQOR2"; int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kDLEQOR2Label, sizeof(kDLEQOR2Label)) || !point_to_cbb(&cbb, method->group, X0) || !point_to_cbb(&cbb, method->group, X1) || !point_to_cbb(&cbb, method->group, T) || !point_to_cbb(&cbb, method->group, S) || !point_to_cbb(&cbb, method->group, W) || !point_to_cbb(&cbb, method->group, K00) || !point_to_cbb(&cbb, method->group, K01) || !point_to_cbb(&cbb, method->group, K10) || !point_to_cbb(&cbb, method->group, K11) || !CBB_finish(&cbb, &buf, &len) || !method->hash_c(method->group, out, buf, len)) { goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int hash_c_batch(const PMBTOKEN_METHOD *method, EC_SCALAR *out, const CBB *points, size_t index) { static const uint8_t kDLEQBatchLabel[] = "DLEQ BATCH"; if (index > 0xffff) { // The protocol supports only two-byte batches. OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_OVERFLOW); return 0; } int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kDLEQBatchLabel, sizeof(kDLEQBatchLabel)) || !CBB_add_bytes(&cbb, CBB_data(points), CBB_len(points)) || !CBB_add_u16(&cbb, (uint16_t)index) || !CBB_finish(&cbb, &buf, &len) || !method->hash_c(method->group, out, buf, len)) { goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } // The DLEQ2 and DLEQOR2 constructions are described in appendix B of // https://eprint.iacr.org/2020/072/20200324:214215. DLEQ2 is an instance of // DLEQOR2 with only one value (n=1). static int dleq_generate(const PMBTOKEN_METHOD *method, CBB *cbb, const TRUST_TOKEN_ISSUER_KEY *priv, const EC_JACOBIAN *T, const EC_JACOBIAN *S, const EC_JACOBIAN *W, const EC_JACOBIAN *Ws, uint8_t private_metadata) { const EC_GROUP *group = method->group; // We generate a DLEQ proof for the validity token and a DLEQOR2 proof for the // private metadata token. To allow amortizing Jacobian-to-affine conversions, // we compute Ki for both proofs first. enum { idx_T, idx_S, idx_W, idx_Ws, idx_Ks0, idx_Ks1, idx_Kb0, idx_Kb1, idx_Ko0, idx_Ko1, num_idx, }; EC_JACOBIAN jacobians[num_idx]; // Setup the DLEQ proof. EC_SCALAR ks0, ks1; if ( // ks0, ks1 <- Zp !ec_random_nonzero_scalar(group, &ks0, kDefaultAdditionalData) || !ec_random_nonzero_scalar(group, &ks1, kDefaultAdditionalData) || // Ks = ks0*(G;T) + ks1*(H;S) !ec_point_mul_scalar_precomp(group, &jacobians[idx_Ks0], &method->g_precomp, &ks0, &method->h_precomp, &ks1, NULL, NULL) || !ec_point_mul_scalar_batch(group, &jacobians[idx_Ks1], T, &ks0, S, &ks1, NULL, NULL)) { return 0; } // Setup the DLEQOR proof. First, select values of xb, yb (keys corresponding // to the private metadata value) and pubo (public key corresponding to the // other value) in constant time. BN_ULONG mask = ((BN_ULONG)0) - (private_metadata & 1); EC_PRECOMP pubo_precomp; EC_SCALAR xb, yb; ec_scalar_select(group, &xb, mask, &priv->x1, &priv->x0); ec_scalar_select(group, &yb, mask, &priv->y1, &priv->y0); ec_precomp_select(group, &pubo_precomp, mask, &priv->pub0_precomp, &priv->pub1_precomp); EC_SCALAR k0, k1, minus_co, uo, vo; if ( // k0, k1 <- Zp !ec_random_nonzero_scalar(group, &k0, kDefaultAdditionalData) || !ec_random_nonzero_scalar(group, &k1, kDefaultAdditionalData) || // Kb = k0*(G;T) + k1*(H;S) !ec_point_mul_scalar_precomp(group, &jacobians[idx_Kb0], &method->g_precomp, &k0, &method->h_precomp, &k1, NULL, NULL) || !ec_point_mul_scalar_batch(group, &jacobians[idx_Kb1], T, &k0, S, &k1, NULL, NULL) || // co, uo, vo <- Zp !ec_random_nonzero_scalar(group, &minus_co, kDefaultAdditionalData) || !ec_random_nonzero_scalar(group, &uo, kDefaultAdditionalData) || !ec_random_nonzero_scalar(group, &vo, kDefaultAdditionalData) || // Ko = uo*(G;T) + vo*(H;S) - co*(pubo;W) !ec_point_mul_scalar_precomp(group, &jacobians[idx_Ko0], &method->g_precomp, &uo, &method->h_precomp, &vo, &pubo_precomp, &minus_co) || !ec_point_mul_scalar_batch(group, &jacobians[idx_Ko1], T, &uo, S, &vo, W, &minus_co)) { return 0; } EC_AFFINE affines[num_idx]; jacobians[idx_T] = *T; jacobians[idx_S] = *S; jacobians[idx_W] = *W; jacobians[idx_Ws] = *Ws; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } // Select the K corresponding to K0 and K1 in constant-time. EC_AFFINE K00, K01, K10, K11; ec_affine_select(group, &K00, mask, &affines[idx_Ko0], &affines[idx_Kb0]); ec_affine_select(group, &K01, mask, &affines[idx_Ko1], &affines[idx_Kb1]); ec_affine_select(group, &K10, mask, &affines[idx_Kb0], &affines[idx_Ko0]); ec_affine_select(group, &K11, mask, &affines[idx_Kb1], &affines[idx_Ko1]); // Compute c = Hc(...) for the two proofs. EC_SCALAR cs, c; if (!hash_c_dleq(method, &cs, &priv->pubs, &affines[idx_T], &affines[idx_S], &affines[idx_Ws], &affines[idx_Ks0], &affines[idx_Ks1]) || !hash_c_dleqor(method, &c, &priv->pub0, &priv->pub1, &affines[idx_T], &affines[idx_S], &affines[idx_W], &K00, &K01, &K10, &K11)) { return 0; } // Compute cb, ub, and ub for the two proofs. In each of these products, only // one operand is in Montgomery form, so the product does not need to be // converted. EC_SCALAR cs_mont; ec_scalar_to_montgomery(group, &cs_mont, &cs); // us = ks0 + cs*xs EC_SCALAR us, vs; ec_scalar_mul_montgomery(group, &us, &priv->xs, &cs_mont); ec_scalar_add(group, &us, &ks0, &us); // vs = ks1 + cs*ys ec_scalar_mul_montgomery(group, &vs, &priv->ys, &cs_mont); ec_scalar_add(group, &vs, &ks1, &vs); // Store DLEQ2 proof in transcript. if (!scalar_to_cbb(cbb, group, &cs) || !scalar_to_cbb(cbb, group, &us) || !scalar_to_cbb(cbb, group, &vs)) { return 0; } // cb = c - co EC_SCALAR cb, ub, vb; ec_scalar_add(group, &cb, &c, &minus_co); EC_SCALAR cb_mont; ec_scalar_to_montgomery(group, &cb_mont, &cb); // ub = k0 + cb*xb ec_scalar_mul_montgomery(group, &ub, &xb, &cb_mont); ec_scalar_add(group, &ub, &k0, &ub); // vb = k1 + cb*yb ec_scalar_mul_montgomery(group, &vb, &yb, &cb_mont); ec_scalar_add(group, &vb, &k1, &vb); // Select c, u, v in constant-time. EC_SCALAR co, c0, c1, u0, u1, v0, v1; ec_scalar_neg(group, &co, &minus_co); ec_scalar_select(group, &c0, mask, &co, &cb); ec_scalar_select(group, &u0, mask, &uo, &ub); ec_scalar_select(group, &v0, mask, &vo, &vb); ec_scalar_select(group, &c1, mask, &cb, &co); ec_scalar_select(group, &u1, mask, &ub, &uo); ec_scalar_select(group, &v1, mask, &vb, &vo); // Store DLEQOR2 proof in transcript. if (!scalar_to_cbb(cbb, group, &c0) || !scalar_to_cbb(cbb, group, &c1) || !scalar_to_cbb(cbb, group, &u0) || !scalar_to_cbb(cbb, group, &u1) || !scalar_to_cbb(cbb, group, &v0) || !scalar_to_cbb(cbb, group, &v1)) { return 0; } return 1; } static int dleq_verify(const PMBTOKEN_METHOD *method, CBS *cbs, const TRUST_TOKEN_CLIENT_KEY *pub, const EC_JACOBIAN *T, const EC_JACOBIAN *S, const EC_JACOBIAN *W, const EC_JACOBIAN *Ws) { const EC_GROUP *group = method->group; const EC_JACOBIAN *g = &group->generator.raw; // We verify a DLEQ proof for the validity token and a DLEQOR2 proof for the // private metadata token. To allow amortizing Jacobian-to-affine conversions, // we compute Ki for both proofs first. Additionally, all inputs to this // function are public, so we can use the faster variable-time // multiplications. enum { idx_T, idx_S, idx_W, idx_Ws, idx_Ks0, idx_Ks1, idx_K00, idx_K01, idx_K10, idx_K11, num_idx, }; EC_JACOBIAN jacobians[num_idx]; // Decode the DLEQ proof. EC_SCALAR cs, us, vs; if (!scalar_from_cbs(cbs, group, &cs) || !scalar_from_cbs(cbs, group, &us) || !scalar_from_cbs(cbs, group, &vs)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } // Ks = us*(G;T) + vs*(H;S) - cs*(pubs;Ws) EC_JACOBIAN pubs; ec_affine_to_jacobian(group, &pubs, &pub->pubs); EC_SCALAR minus_cs; ec_scalar_neg(group, &minus_cs, &cs); if (!mul_public_3(group, &jacobians[idx_Ks0], g, &us, &method->h, &vs, &pubs, &minus_cs) || !mul_public_3(group, &jacobians[idx_Ks1], T, &us, S, &vs, Ws, &minus_cs)) { return 0; } // Decode the DLEQOR proof. EC_SCALAR c0, c1, u0, u1, v0, v1; if (!scalar_from_cbs(cbs, group, &c0) || !scalar_from_cbs(cbs, group, &c1) || !scalar_from_cbs(cbs, group, &u0) || !scalar_from_cbs(cbs, group, &u1) || !scalar_from_cbs(cbs, group, &v0) || !scalar_from_cbs(cbs, group, &v1)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } EC_JACOBIAN pub0, pub1; ec_affine_to_jacobian(group, &pub0, &pub->pub0); ec_affine_to_jacobian(group, &pub1, &pub->pub1); EC_SCALAR minus_c0, minus_c1; ec_scalar_neg(group, &minus_c0, &c0); ec_scalar_neg(group, &minus_c1, &c1); if ( // K0 = u0*(G;T) + v0*(H;S) - c0*(pub0;W) !mul_public_3(group, &jacobians[idx_K00], g, &u0, &method->h, &v0, &pub0, &minus_c0) || !mul_public_3(group, &jacobians[idx_K01], T, &u0, S, &v0, W, &minus_c0) || // K1 = u1*(G;T) + v1*(H;S) - c1*(pub1;W) !mul_public_3(group, &jacobians[idx_K10], g, &u1, &method->h, &v1, &pub1, &minus_c1) || !mul_public_3(group, &jacobians[idx_K11], T, &u1, S, &v1, W, &minus_c1)) { return 0; } EC_AFFINE affines[num_idx]; jacobians[idx_T] = *T; jacobians[idx_S] = *S; jacobians[idx_W] = *W; jacobians[idx_Ws] = *Ws; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } // Check the DLEQ proof. EC_SCALAR calculated; if (!hash_c_dleq(method, &calculated, &pub->pubs, &affines[idx_T], &affines[idx_S], &affines[idx_Ws], &affines[idx_Ks0], &affines[idx_Ks1])) { return 0; } // cs == calculated if (!ec_scalar_equal_vartime(group, &cs, &calculated)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_PROOF); return 0; } // Check the DLEQOR proof. if (!hash_c_dleqor(method, &calculated, &pub->pub0, &pub->pub1, &affines[idx_T], &affines[idx_S], &affines[idx_W], &affines[idx_K00], &affines[idx_K01], &affines[idx_K10], &affines[idx_K11])) { return 0; } // c0 + c1 == calculated EC_SCALAR c; ec_scalar_add(group, &c, &c0, &c1); if (!ec_scalar_equal_vartime(group, &c, &calculated)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_PROOF); return 0; } return 1; } static int pmbtoken_sign(const PMBTOKEN_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { const EC_GROUP *group = method->group; if (num_requested < num_to_issue) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } int ret = 0; EC_JACOBIAN *Tps = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Sps = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Wps = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Wsps = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_SCALAR *es = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_SCALAR))); CBB batch_cbb; CBB_zero(&batch_cbb); { if (!Tps || !Sps || !Wps || !Wsps || !es || !CBB_init(&batch_cbb, 0) || !point_to_cbb(&batch_cbb, method->group, &key->pubs) || !point_to_cbb(&batch_cbb, method->group, &key->pub0) || !point_to_cbb(&batch_cbb, method->group, &key->pub1)) { goto err; } for (size_t i = 0; i < num_to_issue; i++) { EC_AFFINE Tp_affine; EC_JACOBIAN Tp; if (!cbs_get_prefixed_point(cbs, group, &Tp_affine, method->prefix_point)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &Tp, &Tp_affine); EC_SCALAR xb, yb; BN_ULONG mask = ((BN_ULONG)0) - (private_metadata & 1); ec_scalar_select(group, &xb, mask, &key->x1, &key->x0); ec_scalar_select(group, &yb, mask, &key->y1, &key->y0); uint8_t s[TRUST_TOKEN_NONCE_SIZE]; RAND_bytes(s, TRUST_TOKEN_NONCE_SIZE); // The |jacobians| and |affines| contain Sp, Wp, and Wsp. EC_JACOBIAN jacobians[3]; EC_AFFINE affines[3]; if (!method->hash_s(group, &jacobians[0], &Tp_affine, s) || !ec_point_mul_scalar_batch(group, &jacobians[1], &Tp, &xb, &jacobians[0], &yb, NULL, NULL) || !ec_point_mul_scalar_batch(group, &jacobians[2], &Tp, &key->xs, &jacobians[0], &key->ys, NULL, NULL) || !ec_jacobian_to_affine_batch(group, affines, jacobians, 3) || !CBB_add_bytes(cbb, s, TRUST_TOKEN_NONCE_SIZE) || !cbb_add_prefixed_point(cbb, group, &affines[1], method->prefix_point) || !cbb_add_prefixed_point(cbb, group, &affines[2], method->prefix_point)) { goto err; } if (!point_to_cbb(&batch_cbb, group, &Tp_affine) || !point_to_cbb(&batch_cbb, group, &affines[0]) || !point_to_cbb(&batch_cbb, group, &affines[1]) || !point_to_cbb(&batch_cbb, group, &affines[2])) { goto err; } Tps[i] = Tp; Sps[i] = jacobians[0]; Wps[i] = jacobians[1]; Wsps[i] = jacobians[2]; if (!CBB_flush(cbb)) { goto err; } } // The DLEQ batching construction is described in appendix B of // https://eprint.iacr.org/2020/072/20200324:214215. Note the additional // computations all act on public inputs. for (size_t i = 0; i < num_to_issue; i++) { if (!hash_c_batch(method, &es[i], &batch_cbb, i)) { goto err; } } EC_JACOBIAN Tp_batch, Sp_batch, Wp_batch, Wsp_batch; if (!ec_point_mul_scalar_public_batch(group, &Tp_batch, /*g_scalar=*/NULL, Tps, es, num_to_issue) || !ec_point_mul_scalar_public_batch(group, &Sp_batch, /*g_scalar=*/NULL, Sps, es, num_to_issue) || !ec_point_mul_scalar_public_batch(group, &Wp_batch, /*g_scalar=*/NULL, Wps, es, num_to_issue) || !ec_point_mul_scalar_public_batch(group, &Wsp_batch, /*g_scalar=*/NULL, Wsps, es, num_to_issue)) { goto err; } CBB proof; if (!CBB_add_u16_length_prefixed(cbb, &proof) || !dleq_generate(method, &proof, key, &Tp_batch, &Sp_batch, &Wp_batch, &Wsp_batch, private_metadata) || !CBB_flush(cbb)) { goto err; } // Skip over any unused requests. size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); size_t token_len = point_len; if (method->prefix_point) { token_len += 2; } if (!CBS_skip(cbs, token_len * (num_requested - num_to_issue))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ret = 1; } err: OPENSSL_free(Tps); OPENSSL_free(Sps); OPENSSL_free(Wps); OPENSSL_free(Wsps); OPENSSL_free(es); CBB_cleanup(&batch_cbb); return ret; } static STACK_OF(TRUST_TOKEN) *pmbtoken_unblind( const PMBTOKEN_METHOD *method, const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { const EC_GROUP *group = method->group; if (count > sk_TRUST_TOKEN_PRETOKEN_num(pretokens)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return NULL; } int ok = 0; STACK_OF(TRUST_TOKEN) *ret = sk_TRUST_TOKEN_new_null(); EC_JACOBIAN *Tps = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Sps = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Wps = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Wsps = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_SCALAR *es = reinterpret_cast(OPENSSL_calloc(count, sizeof(EC_SCALAR))); CBB batch_cbb; CBB_zero(&batch_cbb); if (ret == NULL || Tps == NULL || Sps == NULL || Wps == NULL || Wsps == NULL || es == NULL || !CBB_init(&batch_cbb, 0) || !point_to_cbb(&batch_cbb, method->group, &key->pubs) || !point_to_cbb(&batch_cbb, method->group, &key->pub0) || !point_to_cbb(&batch_cbb, method->group, &key->pub1)) { goto err; } for (size_t i = 0; i < count; i++) { const TRUST_TOKEN_PRETOKEN *pretoken = sk_TRUST_TOKEN_PRETOKEN_value(pretokens, i); uint8_t s[TRUST_TOKEN_NONCE_SIZE]; EC_AFFINE Wp_affine, Wsp_affine; if (!CBS_copy_bytes(cbs, s, TRUST_TOKEN_NONCE_SIZE) || !cbs_get_prefixed_point(cbs, group, &Wp_affine, method->prefix_point) || !cbs_get_prefixed_point(cbs, group, &Wsp_affine, method->prefix_point)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &Tps[i], &pretoken->Tp); ec_affine_to_jacobian(group, &Wps[i], &Wp_affine); ec_affine_to_jacobian(group, &Wsps[i], &Wsp_affine); if (!method->hash_s(group, &Sps[i], &pretoken->Tp, s)) { goto err; } EC_AFFINE Sp_affine; if (!point_to_cbb(&batch_cbb, group, &pretoken->Tp) || !ec_jacobian_to_affine(group, &Sp_affine, &Sps[i]) || !point_to_cbb(&batch_cbb, group, &Sp_affine) || !point_to_cbb(&batch_cbb, group, &Wp_affine) || !point_to_cbb(&batch_cbb, group, &Wsp_affine)) { goto err; } // Unblind the token. EC_JACOBIAN jacobians[3]; EC_AFFINE affines[3]; if (!ec_point_mul_scalar(group, &jacobians[0], &Sps[i], &pretoken->r) || !ec_point_mul_scalar(group, &jacobians[1], &Wps[i], &pretoken->r) || !ec_point_mul_scalar(group, &jacobians[2], &Wsps[i], &pretoken->r) || !ec_jacobian_to_affine_batch(group, affines, jacobians, 3)) { goto err; } // Serialize the token. Include |key_id| to avoid an extra copy in the layer // above. CBB token_cbb; size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBB_init(&token_cbb, 4 + TRUST_TOKEN_NONCE_SIZE + 3 * (2 + point_len)) || !CBB_add_u32(&token_cbb, key_id) || !CBB_add_bytes(&token_cbb, pretoken->salt, TRUST_TOKEN_NONCE_SIZE) || !cbb_add_prefixed_point(&token_cbb, group, &affines[0], method->prefix_point) || !cbb_add_prefixed_point(&token_cbb, group, &affines[1], method->prefix_point) || !cbb_add_prefixed_point(&token_cbb, group, &affines[2], method->prefix_point) || !CBB_flush(&token_cbb)) { CBB_cleanup(&token_cbb); goto err; } TRUST_TOKEN *token = TRUST_TOKEN_new(CBB_data(&token_cbb), CBB_len(&token_cbb)); CBB_cleanup(&token_cbb); if (token == NULL || !sk_TRUST_TOKEN_push(ret, token)) { TRUST_TOKEN_free(token); goto err; } } // The DLEQ batching construction is described in appendix B of // https://eprint.iacr.org/2020/072/20200324:214215. Note the additional // computations all act on public inputs. for (size_t i = 0; i < count; i++) { if (!hash_c_batch(method, &es[i], &batch_cbb, i)) { goto err; } } EC_JACOBIAN Tp_batch, Sp_batch, Wp_batch, Wsp_batch; if (!ec_point_mul_scalar_public_batch(group, &Tp_batch, /*g_scalar=*/NULL, Tps, es, count) || !ec_point_mul_scalar_public_batch(group, &Sp_batch, /*g_scalar=*/NULL, Sps, es, count) || !ec_point_mul_scalar_public_batch(group, &Wp_batch, /*g_scalar=*/NULL, Wps, es, count) || !ec_point_mul_scalar_public_batch(group, &Wsp_batch, /*g_scalar=*/NULL, Wsps, es, count)) { goto err; } CBS proof; if (!CBS_get_u16_length_prefixed(cbs, &proof) || !dleq_verify(method, &proof, key, &Tp_batch, &Sp_batch, &Wp_batch, &Wsp_batch) || CBS_len(&proof) != 0) { goto err; } ok = 1; err: OPENSSL_free(Tps); OPENSSL_free(Sps); OPENSSL_free(Wps); OPENSSL_free(Wsps); OPENSSL_free(es); CBB_cleanup(&batch_cbb); if (!ok) { sk_TRUST_TOKEN_pop_free(ret, TRUST_TOKEN_free); ret = NULL; } return ret; } static int pmbtoken_read(const PMBTOKEN_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { const EC_GROUP *group = method->group; CBS cbs, salt; CBS_init(&cbs, token, token_len); EC_AFFINE S, W, Ws; if (!CBS_get_bytes(&cbs, &salt, TRUST_TOKEN_NONCE_SIZE) || !cbs_get_prefixed_point(&cbs, group, &S, method->prefix_point) || !cbs_get_prefixed_point(&cbs, group, &W, method->prefix_point) || !cbs_get_prefixed_point(&cbs, group, &Ws, method->prefix_point) || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_TOKEN); return 0; } if (include_message) { SHA512_CTX hash_ctx; assert(SHA512_DIGEST_LENGTH == TRUST_TOKEN_NONCE_SIZE); SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, CBS_data(&salt), CBS_len(&salt)); SHA512_Update(&hash_ctx, msg, msg_len); SHA512_Final(out_nonce, &hash_ctx); } else { OPENSSL_memcpy(out_nonce, CBS_data(&salt), CBS_len(&salt)); } EC_JACOBIAN T; if (!method->hash_t(group, &T, out_nonce)) { return 0; } // We perform three multiplications with S and T. This is enough that it is // worth using |ec_point_mul_scalar_precomp|. EC_JACOBIAN S_jacobian; EC_PRECOMP S_precomp, T_precomp; ec_affine_to_jacobian(group, &S_jacobian, &S); if (!ec_init_precomp(group, &S_precomp, &S_jacobian) || !ec_init_precomp(group, &T_precomp, &T)) { return 0; } EC_JACOBIAN Ws_calculated; // Check the validity of the token. if (!ec_point_mul_scalar_precomp(group, &Ws_calculated, &T_precomp, &key->xs, &S_precomp, &key->ys, NULL, NULL) || !ec_affine_jacobian_equal(group, &Ws, &Ws_calculated)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BAD_VALIDITY_CHECK); return 0; } EC_JACOBIAN W0, W1; if (!ec_point_mul_scalar_precomp(group, &W0, &T_precomp, &key->x0, &S_precomp, &key->y0, NULL, NULL) || !ec_point_mul_scalar_precomp(group, &W1, &T_precomp, &key->x1, &S_precomp, &key->y1, NULL, NULL)) { return 0; } const int is_W0 = ec_affine_jacobian_equal(group, &W, &W0); const int is_W1 = ec_affine_jacobian_equal(group, &W, &W1); const int is_valid = is_W0 ^ is_W1; if (!is_valid) { // Invalid tokens will fail the validity check above. OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } *out_private_metadata = is_W1; return 1; } // PMBTokens experiment v1. static int pmbtoken_exp1_hash_t(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashTLabel[] = "PMBTokens Experiment V1 HashT"; return ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( group, out, kHashTLabel, sizeof(kHashTLabel), t, TRUST_TOKEN_NONCE_SIZE); } static int pmbtoken_exp1_hash_s(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *t, const uint8_t s[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashSLabel[] = "PMBTokens Experiment V1 HashS"; int ret = 0; CBB cbb; uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !point_to_cbb(&cbb, group, t) || !CBB_add_bytes(&cbb, s, TRUST_TOKEN_NONCE_SIZE) || !CBB_finish(&cbb, &buf, &len) || !ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( group, out, kHashSLabel, sizeof(kHashSLabel), buf, len)) { goto err; } ret = 1; err: OPENSSL_free(buf); CBB_cleanup(&cbb); return ret; } static int pmbtoken_exp1_hash_c(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashCLabel[] = "PMBTokens Experiment V1 HashC"; return ec_hash_to_scalar_p384_xmd_sha512_draft07( group, out, kHashCLabel, sizeof(kHashCLabel), buf, len); } static int pmbtoken_exp1_hash_to_scalar(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashLabel[] = "PMBTokens Experiment V1 HashToScalar"; return ec_hash_to_scalar_p384_xmd_sha512_draft07( group, out, kHashLabel, sizeof(kHashLabel), buf, len); } static int pmbtoken_exp1_ok = 0; static PMBTOKEN_METHOD pmbtoken_exp1_method; static CRYPTO_once_t pmbtoken_exp1_method_once = CRYPTO_ONCE_INIT; static void pmbtoken_exp1_init_method_impl(void) { // This is the output of |ec_hash_to_scalar_p384_xmd_sha512_draft07| with DST // "PMBTokens Experiment V1 HashH" and message "generator". static const uint8_t kH[] = { 0x04, 0x82, 0xd5, 0x68, 0xf5, 0x39, 0xf6, 0x08, 0x19, 0xa1, 0x75, 0x9f, 0x98, 0xb5, 0x10, 0xf5, 0x0b, 0x9d, 0x2b, 0xe1, 0x64, 0x4d, 0x02, 0x76, 0x18, 0x11, 0xf8, 0x2f, 0xd3, 0x33, 0x25, 0x1f, 0x2c, 0xb8, 0xf6, 0xf1, 0x9e, 0x93, 0x85, 0x79, 0xb3, 0xb7, 0x81, 0xa3, 0xe6, 0x23, 0xc3, 0x1c, 0xff, 0x03, 0xd9, 0x40, 0x6c, 0xec, 0xe0, 0x4d, 0xea, 0xdf, 0x9d, 0x94, 0xd1, 0x87, 0xab, 0x27, 0xf7, 0x4f, 0x53, 0xea, 0xa3, 0x18, 0x72, 0xb9, 0xd1, 0x56, 0xa0, 0x4e, 0x81, 0xaa, 0xeb, 0x1c, 0x22, 0x6d, 0x39, 0x1c, 0x5e, 0xb1, 0x27, 0xfc, 0x87, 0xc3, 0x95, 0xd0, 0x13, 0xb7, 0x0b, 0x5c, 0xc7, }; pmbtoken_exp1_ok = pmbtoken_init_method( &pmbtoken_exp1_method, EC_group_p384(), kH, sizeof(kH), pmbtoken_exp1_hash_t, pmbtoken_exp1_hash_s, pmbtoken_exp1_hash_c, pmbtoken_exp1_hash_to_scalar, 1); } static int pmbtoken_exp1_init_method(void) { CRYPTO_once(&pmbtoken_exp1_method_once, pmbtoken_exp1_init_method_impl); if (!pmbtoken_exp1_ok) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int pmbtoken_exp1_generate_key(CBB *out_private, CBB *out_public) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_generate_key(&pmbtoken_exp1_method, out_private, out_public); } int pmbtoken_exp1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_derive_key_from_secret(&pmbtoken_exp1_method, out_private, out_public, secret, secret_len); } int pmbtoken_exp1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_client_key_from_bytes(&pmbtoken_exp1_method, key, in, len); } int pmbtoken_exp1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_issuer_key_from_bytes(&pmbtoken_exp1_method, key, in, len); } STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_exp1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_exp1_init_method()) { return NULL; } return pmbtoken_blind(&pmbtoken_exp1_method, cbb, count, include_message, msg, msg_len); } int pmbtoken_exp1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_sign(&pmbtoken_exp1_method, key, cbb, cbs, num_requested, num_to_issue, private_metadata); } STACK_OF(TRUST_TOKEN) *pmbtoken_exp1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { if (!pmbtoken_exp1_init_method()) { return NULL; } return pmbtoken_unblind(&pmbtoken_exp1_method, key, pretokens, cbs, count, key_id); } int pmbtoken_exp1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_exp1_init_method()) { return 0; } return pmbtoken_read(&pmbtoken_exp1_method, key, out_nonce, out_private_metadata, token, token_len, include_message, msg, msg_len); } int pmbtoken_exp1_get_h_for_testing(uint8_t out[97]) { if (!pmbtoken_exp1_init_method()) { return 0; } EC_AFFINE h; return ec_jacobian_to_affine(pmbtoken_exp1_method.group, &h, &pmbtoken_exp1_method.h) && ec_point_to_bytes(pmbtoken_exp1_method.group, &h, POINT_CONVERSION_UNCOMPRESSED, out, 97) == 97; } // PMBTokens experiment v2. static int pmbtoken_exp2_hash_t(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashTLabel[] = "PMBTokens Experiment V2 HashT"; return ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( group, out, kHashTLabel, sizeof(kHashTLabel), t, TRUST_TOKEN_NONCE_SIZE); } static int pmbtoken_exp2_hash_s(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *t, const uint8_t s[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashSLabel[] = "PMBTokens Experiment V2 HashS"; int ret = 0; CBB cbb; uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !point_to_cbb(&cbb, group, t) || !CBB_add_bytes(&cbb, s, TRUST_TOKEN_NONCE_SIZE) || !CBB_finish(&cbb, &buf, &len) || !ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( group, out, kHashSLabel, sizeof(kHashSLabel), buf, len)) { goto err; } ret = 1; err: OPENSSL_free(buf); CBB_cleanup(&cbb); return ret; } static int pmbtoken_exp2_hash_c(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashCLabel[] = "PMBTokens Experiment V2 HashC"; return ec_hash_to_scalar_p384_xmd_sha512_draft07( group, out, kHashCLabel, sizeof(kHashCLabel), buf, len); } static int pmbtoken_exp2_hash_to_scalar(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashLabel[] = "PMBTokens Experiment V2 HashToScalar"; return ec_hash_to_scalar_p384_xmd_sha512_draft07( group, out, kHashLabel, sizeof(kHashLabel), buf, len); } static int pmbtoken_exp2_ok = 0; static PMBTOKEN_METHOD pmbtoken_exp2_method; static CRYPTO_once_t pmbtoken_exp2_method_once = CRYPTO_ONCE_INIT; static void pmbtoken_exp2_init_method_impl(void) { // This is the output of |ec_hash_to_scalar_p384_xmd_sha512_draft07| with DST // "PMBTokens Experiment V2 HashH" and message "generator". static const uint8_t kH[] = { 0x04, 0xbc, 0x27, 0x24, 0x99, 0xfa, 0xc9, 0xa4, 0x74, 0x6f, 0xf9, 0x07, 0x81, 0x55, 0xf8, 0x1f, 0x6f, 0xda, 0x09, 0xe7, 0x8c, 0x5d, 0x9e, 0x4e, 0x14, 0x7c, 0x53, 0x14, 0xbc, 0x7e, 0x29, 0x57, 0x92, 0x17, 0x94, 0x6e, 0xd2, 0xdf, 0xa5, 0x31, 0x1b, 0x4e, 0xb7, 0xfc, 0x93, 0xe3, 0x6e, 0x14, 0x1f, 0x4f, 0x14, 0xf3, 0xe5, 0x47, 0x61, 0x1c, 0x2c, 0x72, 0x25, 0xf0, 0x4a, 0x45, 0x23, 0x2d, 0x57, 0x93, 0x0e, 0xb2, 0x55, 0xb8, 0x57, 0x25, 0x4c, 0x1e, 0xdb, 0xfd, 0x58, 0x70, 0x17, 0x9a, 0xbb, 0x9e, 0x5e, 0x93, 0x9e, 0x92, 0xd3, 0xe8, 0x25, 0x62, 0xbf, 0x59, 0xb2, 0xd2, 0x3d, 0x71, 0xff}; pmbtoken_exp2_ok = pmbtoken_init_method( &pmbtoken_exp2_method, EC_group_p384(), kH, sizeof(kH), pmbtoken_exp2_hash_t, pmbtoken_exp2_hash_s, pmbtoken_exp2_hash_c, pmbtoken_exp2_hash_to_scalar, 0); } static int pmbtoken_exp2_init_method(void) { CRYPTO_once(&pmbtoken_exp2_method_once, pmbtoken_exp2_init_method_impl); if (!pmbtoken_exp2_ok) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int pmbtoken_exp2_generate_key(CBB *out_private, CBB *out_public) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_generate_key(&pmbtoken_exp2_method, out_private, out_public); } int pmbtoken_exp2_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_derive_key_from_secret(&pmbtoken_exp2_method, out_private, out_public, secret, secret_len); } int pmbtoken_exp2_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_client_key_from_bytes(&pmbtoken_exp2_method, key, in, len); } int pmbtoken_exp2_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_issuer_key_from_bytes(&pmbtoken_exp2_method, key, in, len); } STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_exp2_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_exp2_init_method()) { return NULL; } return pmbtoken_blind(&pmbtoken_exp2_method, cbb, count, include_message, msg, msg_len); } int pmbtoken_exp2_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_sign(&pmbtoken_exp2_method, key, cbb, cbs, num_requested, num_to_issue, private_metadata); } STACK_OF(TRUST_TOKEN) *pmbtoken_exp2_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { if (!pmbtoken_exp2_init_method()) { return NULL; } return pmbtoken_unblind(&pmbtoken_exp2_method, key, pretokens, cbs, count, key_id); } int pmbtoken_exp2_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_exp2_init_method()) { return 0; } return pmbtoken_read(&pmbtoken_exp2_method, key, out_nonce, out_private_metadata, token, token_len, include_message, msg, msg_len); } int pmbtoken_exp2_get_h_for_testing(uint8_t out[97]) { if (!pmbtoken_exp2_init_method()) { return 0; } EC_AFFINE h; return ec_jacobian_to_affine(pmbtoken_exp2_method.group, &h, &pmbtoken_exp2_method.h) && ec_point_to_bytes(pmbtoken_exp2_method.group, &h, POINT_CONVERSION_UNCOMPRESSED, out, 97) == 97; } // PMBTokens PST v1. static int pmbtoken_pst1_hash_t(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashTLabel[] = "PMBTokens PST V1 HashT"; return ec_hash_to_curve_p384_xmd_sha384_sswu( group, out, kHashTLabel, sizeof(kHashTLabel), t, TRUST_TOKEN_NONCE_SIZE); } static int pmbtoken_pst1_hash_s(const EC_GROUP *group, EC_JACOBIAN *out, const EC_AFFINE *t, const uint8_t s[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashSLabel[] = "PMBTokens PST V1 HashS"; int ret = 0; CBB cbb; uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !point_to_cbb(&cbb, group, t) || !CBB_add_bytes(&cbb, s, TRUST_TOKEN_NONCE_SIZE) || !CBB_finish(&cbb, &buf, &len) || !ec_hash_to_curve_p384_xmd_sha384_sswu(group, out, kHashSLabel, sizeof(kHashSLabel), buf, len)) { goto err; } ret = 1; err: OPENSSL_free(buf); CBB_cleanup(&cbb); return ret; } static int pmbtoken_pst1_hash_c(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashCLabel[] = "PMBTokens PST V1 HashC"; return ec_hash_to_scalar_p384_xmd_sha384(group, out, kHashCLabel, sizeof(kHashCLabel), buf, len); } static int pmbtoken_pst1_hash_to_scalar(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashLabel[] = "PMBTokens PST V1 HashToScalar"; return ec_hash_to_scalar_p384_xmd_sha384(group, out, kHashLabel, sizeof(kHashLabel), buf, len); } static int pmbtoken_pst1_ok = 0; static PMBTOKEN_METHOD pmbtoken_pst1_method; static CRYPTO_once_t pmbtoken_pst1_method_once = CRYPTO_ONCE_INIT; static void pmbtoken_pst1_init_method_impl(void) { // This is the output of |ec_hash_to_scalar_p384_xmd_sha384| with DST // "PMBTokens PST V1 HashH" and message "generator". static const uint8_t kH[] = { 0x04, 0x4c, 0xfa, 0xd4, 0x33, 0x6d, 0x8c, 0x4e, 0x18, 0xce, 0x1a, 0x82, 0x7b, 0x53, 0x8c, 0xf8, 0x63, 0x18, 0xe5, 0xa3, 0x96, 0x0d, 0x05, 0xde, 0xf4, 0x83, 0xa7, 0xd8, 0xde, 0x9c, 0x50, 0x81, 0x38, 0xc9, 0x38, 0x25, 0xa3, 0x70, 0x97, 0xc1, 0x1c, 0x33, 0x2e, 0x83, 0x68, 0x64, 0x9c, 0x53, 0x73, 0xc3, 0x03, 0xc1, 0xa9, 0xd8, 0x92, 0xa2, 0x32, 0xf4, 0x22, 0x40, 0x07, 0x2d, 0x9b, 0x6f, 0xab, 0xff, 0x2a, 0x92, 0x03, 0xb1, 0x73, 0x09, 0x1a, 0x6a, 0x4a, 0xc2, 0x4c, 0xac, 0x13, 0x59, 0xf4, 0x28, 0x0e, 0x78, 0x69, 0xa5, 0xdf, 0x0d, 0x74, 0xeb, 0x14, 0xca, 0x8a, 0x32, 0xbb, 0xd3, 0x91}; pmbtoken_pst1_ok = pmbtoken_init_method( &pmbtoken_pst1_method, EC_group_p384(), kH, sizeof(kH), pmbtoken_pst1_hash_t, pmbtoken_pst1_hash_s, pmbtoken_pst1_hash_c, pmbtoken_pst1_hash_to_scalar, 0); } static int pmbtoken_pst1_init_method(void) { CRYPTO_once(&pmbtoken_pst1_method_once, pmbtoken_pst1_init_method_impl); if (!pmbtoken_pst1_ok) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } return 1; } int pmbtoken_pst1_generate_key(CBB *out_private, CBB *out_public) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_generate_key(&pmbtoken_pst1_method, out_private, out_public); } int pmbtoken_pst1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_derive_key_from_secret(&pmbtoken_pst1_method, out_private, out_public, secret, secret_len); } int pmbtoken_pst1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_client_key_from_bytes(&pmbtoken_pst1_method, key, in, len); } int pmbtoken_pst1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_issuer_key_from_bytes(&pmbtoken_pst1_method, key, in, len); } STACK_OF(TRUST_TOKEN_PRETOKEN) *pmbtoken_pst1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_pst1_init_method()) { return NULL; } return pmbtoken_blind(&pmbtoken_pst1_method, cbb, count, include_message, msg, msg_len); } int pmbtoken_pst1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_sign(&pmbtoken_pst1_method, key, cbb, cbs, num_requested, num_to_issue, private_metadata); } STACK_OF(TRUST_TOKEN) *pmbtoken_pst1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { if (!pmbtoken_pst1_init_method()) { return NULL; } return pmbtoken_unblind(&pmbtoken_pst1_method, key, pretokens, cbs, count, key_id); } int pmbtoken_pst1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { if (!pmbtoken_pst1_init_method()) { return 0; } return pmbtoken_read(&pmbtoken_pst1_method, key, out_nonce, out_private_metadata, token, token_len, include_message, msg, msg_len); } int pmbtoken_pst1_get_h_for_testing(uint8_t out[97]) { if (!pmbtoken_pst1_init_method()) { return 0; } EC_AFFINE h; return ec_jacobian_to_affine(pmbtoken_pst1_method.group, &h, &pmbtoken_pst1_method.h) && ec_point_to_bytes(pmbtoken_pst1_method.group, &h, POINT_CONVERSION_UNCOMPRESSED, out, 97) == 97; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/trust_token/trust_token.cc ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "internal.h" // The Trust Token API is described in // https://github.com/WICG/trust-token-api/blob/main/README.md and provides a // protocol for issuing and redeeming tokens built on top of the PMBTokens // construction. const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v1(void) { static const TRUST_TOKEN_METHOD kMethod = { pmbtoken_exp1_generate_key, pmbtoken_exp1_derive_key_from_secret, pmbtoken_exp1_client_key_from_bytes, pmbtoken_exp1_issuer_key_from_bytes, pmbtoken_exp1_blind, pmbtoken_exp1_sign, pmbtoken_exp1_unblind, pmbtoken_exp1_read, 1, /* has_private_metadata */ 3, /* max_keys */ 1, /* has_srr */ }; return &kMethod; } const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v2_voprf(void) { static const TRUST_TOKEN_METHOD kMethod = { voprf_exp2_generate_key, voprf_exp2_derive_key_from_secret, voprf_exp2_client_key_from_bytes, voprf_exp2_issuer_key_from_bytes, voprf_exp2_blind, voprf_exp2_sign, voprf_exp2_unblind, voprf_exp2_read, 0, /* has_private_metadata */ 6, /* max_keys */ 0, /* has_srr */ }; return &kMethod; } const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v2_pmb(void) { static const TRUST_TOKEN_METHOD kMethod = { pmbtoken_exp2_generate_key, pmbtoken_exp2_derive_key_from_secret, pmbtoken_exp2_client_key_from_bytes, pmbtoken_exp2_issuer_key_from_bytes, pmbtoken_exp2_blind, pmbtoken_exp2_sign, pmbtoken_exp2_unblind, pmbtoken_exp2_read, 1, /* has_private_metadata */ 3, /* max_keys */ 0, /* has_srr */ }; return &kMethod; } const TRUST_TOKEN_METHOD *TRUST_TOKEN_pst_v1_voprf(void) { static const TRUST_TOKEN_METHOD kMethod = { voprf_pst1_generate_key, voprf_pst1_derive_key_from_secret, voprf_pst1_client_key_from_bytes, voprf_pst1_issuer_key_from_bytes, voprf_pst1_blind, voprf_pst1_sign, voprf_pst1_unblind, voprf_pst1_read, 0, /* has_private_metadata */ 6, /* max_keys */ 0, /* has_srr */ }; return &kMethod; } const TRUST_TOKEN_METHOD *TRUST_TOKEN_pst_v1_pmb(void) { static const TRUST_TOKEN_METHOD kMethod = { pmbtoken_pst1_generate_key, pmbtoken_pst1_derive_key_from_secret, pmbtoken_pst1_client_key_from_bytes, pmbtoken_pst1_issuer_key_from_bytes, pmbtoken_pst1_blind, pmbtoken_pst1_sign, pmbtoken_pst1_unblind, pmbtoken_pst1_read, 1, /* has_private_metadata */ 3, /* max_keys */ 0, /* has_srr */ }; return &kMethod; } void TRUST_TOKEN_PRETOKEN_free(TRUST_TOKEN_PRETOKEN *pretoken) { OPENSSL_free(pretoken); } TRUST_TOKEN *TRUST_TOKEN_new(const uint8_t *data, size_t len) { TRUST_TOKEN *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(TRUST_TOKEN))); if (ret == NULL) { return NULL; } ret->data = reinterpret_cast(OPENSSL_memdup(data, len)); if (len != 0 && ret->data == NULL) { OPENSSL_free(ret); return NULL; } ret->len = len; return ret; } void TRUST_TOKEN_free(TRUST_TOKEN *token) { if (token == NULL) { return; } OPENSSL_free(token->data); OPENSSL_free(token); } int TRUST_TOKEN_generate_key(const TRUST_TOKEN_METHOD *method, uint8_t *out_priv_key, size_t *out_priv_key_len, size_t max_priv_key_len, uint8_t *out_pub_key, size_t *out_pub_key_len, size_t max_pub_key_len, uint32_t id) { // Prepend the key ID in front of the PMBTokens format. CBB priv_cbb, pub_cbb; CBB_init_fixed(&priv_cbb, out_priv_key, max_priv_key_len); CBB_init_fixed(&pub_cbb, out_pub_key, max_pub_key_len); if (!CBB_add_u32(&priv_cbb, id) || // !CBB_add_u32(&pub_cbb, id)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } if (!method->generate_key(&priv_cbb, &pub_cbb)) { return 0; } if (!CBB_finish(&priv_cbb, NULL, out_priv_key_len) || !CBB_finish(&pub_cbb, NULL, out_pub_key_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } return 1; } int TRUST_TOKEN_derive_key_from_secret( const TRUST_TOKEN_METHOD *method, uint8_t *out_priv_key, size_t *out_priv_key_len, size_t max_priv_key_len, uint8_t *out_pub_key, size_t *out_pub_key_len, size_t max_pub_key_len, uint32_t id, const uint8_t *secret, size_t secret_len) { // Prepend the key ID in front of the PMBTokens format. CBB priv_cbb, pub_cbb; CBB_init_fixed(&priv_cbb, out_priv_key, max_priv_key_len); CBB_init_fixed(&pub_cbb, out_pub_key, max_pub_key_len); if (!CBB_add_u32(&priv_cbb, id) || // !CBB_add_u32(&pub_cbb, id)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } if (!method->derive_key_from_secret(&priv_cbb, &pub_cbb, secret, secret_len)) { return 0; } if (!CBB_finish(&priv_cbb, NULL, out_priv_key_len) || !CBB_finish(&pub_cbb, NULL, out_pub_key_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } return 1; } TRUST_TOKEN_CLIENT *TRUST_TOKEN_CLIENT_new(const TRUST_TOKEN_METHOD *method, size_t max_batchsize) { if (max_batchsize > 0xffff) { // The protocol supports only two-byte token counts. OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_OVERFLOW); return NULL; } TRUST_TOKEN_CLIENT *ret = reinterpret_cast( OPENSSL_zalloc(sizeof(TRUST_TOKEN_CLIENT))); if (ret == NULL) { return NULL; } ret->method = method; ret->max_batchsize = (uint16_t)max_batchsize; return ret; } void TRUST_TOKEN_CLIENT_free(TRUST_TOKEN_CLIENT *ctx) { if (ctx == NULL) { return; } EVP_PKEY_free(ctx->srr_key); sk_TRUST_TOKEN_PRETOKEN_pop_free(ctx->pretokens, TRUST_TOKEN_PRETOKEN_free); OPENSSL_free(ctx); } int TRUST_TOKEN_CLIENT_add_key(TRUST_TOKEN_CLIENT *ctx, size_t *out_key_index, const uint8_t *key, size_t key_len) { if (ctx->num_keys == OPENSSL_ARRAY_SIZE(ctx->keys) || ctx->num_keys >= ctx->method->max_keys) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_TOO_MANY_KEYS); return 0; } struct trust_token_client_key_st *key_s = &ctx->keys[ctx->num_keys]; CBS cbs; CBS_init(&cbs, key, key_len); uint32_t key_id; if (!CBS_get_u32(&cbs, &key_id) || !ctx->method->client_key_from_bytes(&key_s->key, CBS_data(&cbs), CBS_len(&cbs))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } key_s->id = key_id; *out_key_index = ctx->num_keys; ctx->num_keys += 1; return 1; } int TRUST_TOKEN_CLIENT_set_srr_key(TRUST_TOKEN_CLIENT *ctx, EVP_PKEY *key) { if (!ctx->method->has_srr) { return 1; } EVP_PKEY_free(ctx->srr_key); EVP_PKEY_up_ref(key); ctx->srr_key = key; return 1; } static int trust_token_client_begin_issuance_impl( TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { if (count > ctx->max_batchsize) { count = ctx->max_batchsize; } int ret = 0; CBB request; STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens = NULL; if (!CBB_init(&request, 0) || !CBB_add_u16(&request, count)) { goto err; } pretokens = ctx->method->blind(&request, count, include_message, msg, msg_len); if (pretokens == NULL) { goto err; } if (!CBB_finish(&request, out, out_len)) { goto err; } sk_TRUST_TOKEN_PRETOKEN_pop_free(ctx->pretokens, TRUST_TOKEN_PRETOKEN_free); ctx->pretokens = pretokens; pretokens = NULL; ret = 1; err: CBB_cleanup(&request); sk_TRUST_TOKEN_PRETOKEN_pop_free(pretokens, TRUST_TOKEN_PRETOKEN_free); return ret; } int TRUST_TOKEN_CLIENT_begin_issuance(TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, size_t count) { return trust_token_client_begin_issuance_impl(ctx, out, out_len, count, /*include_message=*/0, NULL, 0); } int TRUST_TOKEN_CLIENT_begin_issuance_over_message( TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, size_t count, const uint8_t *msg, size_t msg_len) { return trust_token_client_begin_issuance_impl( ctx, out, out_len, count, /*include_message=*/1, msg, msg_len); } STACK_OF(TRUST_TOKEN) *TRUST_TOKEN_CLIENT_finish_issuance( TRUST_TOKEN_CLIENT *ctx, size_t *out_key_index, const uint8_t *response, size_t response_len) { CBS in; CBS_init(&in, response, response_len); uint16_t count; uint32_t key_id; if (!CBS_get_u16(&in, &count) || !CBS_get_u32(&in, &key_id)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return NULL; } size_t key_index = 0; const struct trust_token_client_key_st *key = NULL; for (size_t i = 0; i < ctx->num_keys; i++) { if (ctx->keys[i].id == key_id) { key_index = i; key = &ctx->keys[i]; break; } } if (key == NULL) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_KEY_ID); return NULL; } if (count > sk_TRUST_TOKEN_PRETOKEN_num(ctx->pretokens)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return NULL; } STACK_OF(TRUST_TOKEN) *tokens = ctx->method->unblind(&key->key, ctx->pretokens, &in, count, key_id); if (tokens == NULL) { return NULL; } if (CBS_len(&in) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); sk_TRUST_TOKEN_pop_free(tokens, TRUST_TOKEN_free); return NULL; } sk_TRUST_TOKEN_PRETOKEN_pop_free(ctx->pretokens, TRUST_TOKEN_PRETOKEN_free); ctx->pretokens = NULL; *out_key_index = key_index; return tokens; } int TRUST_TOKEN_CLIENT_begin_redemption(TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, const TRUST_TOKEN *token, const uint8_t *data, size_t data_len, uint64_t time) { CBB request, token_inner, inner; if (!CBB_init(&request, 0) || !CBB_add_u16_length_prefixed(&request, &token_inner) || !CBB_add_bytes(&token_inner, token->data, token->len) || !CBB_add_u16_length_prefixed(&request, &inner) || !CBB_add_bytes(&inner, data, data_len) || (ctx->method->has_srr && !CBB_add_u64(&request, time)) || !CBB_finish(&request, out, out_len)) { CBB_cleanup(&request); return 0; } return 1; } int TRUST_TOKEN_CLIENT_finish_redemption(TRUST_TOKEN_CLIENT *ctx, uint8_t **out_rr, size_t *out_rr_len, uint8_t **out_sig, size_t *out_sig_len, const uint8_t *response, size_t response_len) { CBS in, srr, sig; CBS_init(&in, response, response_len); if (!ctx->method->has_srr) { if (!CBS_stow(&in, out_rr, out_rr_len)) { return 0; } *out_sig = NULL; *out_sig_len = 0; return 1; } if (!CBS_get_u16_length_prefixed(&in, &srr) || !CBS_get_u16_length_prefixed(&in, &sig) || CBS_len(&in) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_ERROR); return 0; } if (ctx->srr_key == NULL) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_NO_SRR_KEY_CONFIGURED); return 0; } EVP_MD_CTX md_ctx; EVP_MD_CTX_init(&md_ctx); int sig_ok = EVP_DigestVerifyInit(&md_ctx, NULL, NULL, NULL, ctx->srr_key) && EVP_DigestVerify(&md_ctx, CBS_data(&sig), CBS_len(&sig), CBS_data(&srr), CBS_len(&srr)); EVP_MD_CTX_cleanup(&md_ctx); if (!sig_ok) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_SRR_SIGNATURE_ERROR); return 0; } uint8_t *srr_buf = NULL, *sig_buf = NULL; size_t srr_len, sig_len; if (!CBS_stow(&srr, &srr_buf, &srr_len) || !CBS_stow(&sig, &sig_buf, &sig_len)) { OPENSSL_free(srr_buf); OPENSSL_free(sig_buf); return 0; } *out_rr = srr_buf; *out_rr_len = srr_len; *out_sig = sig_buf; *out_sig_len = sig_len; return 1; } TRUST_TOKEN_ISSUER *TRUST_TOKEN_ISSUER_new(const TRUST_TOKEN_METHOD *method, size_t max_batchsize) { if (max_batchsize > 0xffff) { // The protocol supports only two-byte token counts. OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_OVERFLOW); return NULL; } TRUST_TOKEN_ISSUER *ret = reinterpret_cast( OPENSSL_zalloc(sizeof(TRUST_TOKEN_ISSUER))); if (ret == NULL) { return NULL; } ret->method = method; ret->max_batchsize = (uint16_t)max_batchsize; return ret; } void TRUST_TOKEN_ISSUER_free(TRUST_TOKEN_ISSUER *ctx) { if (ctx == NULL) { return; } EVP_PKEY_free(ctx->srr_key); OPENSSL_free(ctx->metadata_key); OPENSSL_free(ctx); } int TRUST_TOKEN_ISSUER_add_key(TRUST_TOKEN_ISSUER *ctx, const uint8_t *key, size_t key_len) { if (ctx->num_keys == OPENSSL_ARRAY_SIZE(ctx->keys) || ctx->num_keys >= ctx->method->max_keys) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_TOO_MANY_KEYS); return 0; } struct trust_token_issuer_key_st *key_s = &ctx->keys[ctx->num_keys]; CBS cbs; CBS_init(&cbs, key, key_len); uint32_t key_id; if (!CBS_get_u32(&cbs, &key_id) || !ctx->method->issuer_key_from_bytes(&key_s->key, CBS_data(&cbs), CBS_len(&cbs))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } key_s->id = key_id; ctx->num_keys += 1; return 1; } int TRUST_TOKEN_ISSUER_set_srr_key(TRUST_TOKEN_ISSUER *ctx, EVP_PKEY *key) { EVP_PKEY_free(ctx->srr_key); EVP_PKEY_up_ref(key); ctx->srr_key = key; return 1; } int TRUST_TOKEN_ISSUER_set_metadata_key(TRUST_TOKEN_ISSUER *ctx, const uint8_t *key, size_t len) { if (len < 32) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_METADATA_KEY); } OPENSSL_free(ctx->metadata_key); ctx->metadata_key_len = 0; ctx->metadata_key = reinterpret_cast(OPENSSL_memdup(key, len)); if (ctx->metadata_key == NULL) { return 0; } ctx->metadata_key_len = len; return 1; } static const struct trust_token_issuer_key_st *trust_token_issuer_get_key( const TRUST_TOKEN_ISSUER *ctx, uint32_t key_id) { for (size_t i = 0; i < ctx->num_keys; i++) { if (ctx->keys[i].id == key_id) { return &ctx->keys[i]; } } return NULL; } int TRUST_TOKEN_ISSUER_issue(const TRUST_TOKEN_ISSUER *ctx, uint8_t **out, size_t *out_len, size_t *out_tokens_issued, const uint8_t *request, size_t request_len, uint32_t public_metadata, uint8_t private_metadata, size_t max_issuance) { if (max_issuance > ctx->max_batchsize) { max_issuance = ctx->max_batchsize; } const struct trust_token_issuer_key_st *key = trust_token_issuer_get_key(ctx, public_metadata); if (key == NULL || private_metadata > 1 || (!ctx->method->has_private_metadata && private_metadata != 0)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_METADATA); return 0; } CBS in; uint16_t num_requested; CBS_init(&in, request, request_len); if (!CBS_get_u16(&in, &num_requested)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } size_t num_to_issue = num_requested; if (num_to_issue > max_issuance) { num_to_issue = max_issuance; } int ret = 0; CBB response; if (!CBB_init(&response, 0) || !CBB_add_u16(&response, num_to_issue) || !CBB_add_u32(&response, public_metadata)) { goto err; } if (!ctx->method->sign(&key->key, &response, &in, num_requested, num_to_issue, private_metadata)) { goto err; } if (CBS_len(&in) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } if (!CBB_finish(&response, out, out_len)) { goto err; } *out_tokens_issued = num_to_issue; ret = 1; err: CBB_cleanup(&response); return ret; } static int trust_token_issuer_redeem_impl( const TRUST_TOKEN_ISSUER *ctx, uint32_t *out_public, uint8_t *out_private, TRUST_TOKEN **out_token, uint8_t **out_client_data, size_t *out_client_data_len, const uint8_t *request, size_t request_len, int include_message, const uint8_t *msg, size_t msg_len) { CBS request_cbs, token_cbs; CBS_init(&request_cbs, request, request_len); if (!CBS_get_u16_length_prefixed(&request_cbs, &token_cbs)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_ERROR); return 0; } uint32_t public_metadata = 0; uint8_t private_metadata = 0; // Parse the token. If there is an error, treat it as an invalid token. if (!CBS_get_u32(&token_cbs, &public_metadata)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_TOKEN); return 0; } const struct trust_token_issuer_key_st *key = trust_token_issuer_get_key(ctx, public_metadata); uint8_t nonce[TRUST_TOKEN_NONCE_SIZE]; if (key == NULL || !ctx->method->read(&key->key, nonce, &private_metadata, CBS_data(&token_cbs), CBS_len(&token_cbs), include_message, msg, msg_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_TOKEN); return 0; } CBS client_data; if (!CBS_get_u16_length_prefixed(&request_cbs, &client_data) || (ctx->method->has_srr && !CBS_skip(&request_cbs, 8)) || CBS_len(&request_cbs) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_ERROR); return 0; } uint8_t *client_data_buf = NULL; size_t client_data_len = 0; TRUST_TOKEN *token; if (!CBS_stow(&client_data, &client_data_buf, &client_data_len)) { goto err; } token = TRUST_TOKEN_new(nonce, TRUST_TOKEN_NONCE_SIZE); if (token == NULL) { goto err; } *out_public = public_metadata; *out_private = private_metadata; *out_token = token; *out_client_data = client_data_buf; *out_client_data_len = client_data_len; return 1; err: OPENSSL_free(client_data_buf); return 0; } int TRUST_TOKEN_ISSUER_redeem(const TRUST_TOKEN_ISSUER *ctx, uint32_t *out_public, uint8_t *out_private, TRUST_TOKEN **out_token, uint8_t **out_client_data, size_t *out_client_data_len, const uint8_t *request, size_t request_len) { return trust_token_issuer_redeem_impl(ctx, out_public, out_private, out_token, out_client_data, out_client_data_len, request, request_len, 0, NULL, 0); } int TRUST_TOKEN_ISSUER_redeem_over_message( const TRUST_TOKEN_ISSUER *ctx, uint32_t *out_public, uint8_t *out_private, TRUST_TOKEN **out_token, uint8_t **out_client_data, size_t *out_client_data_len, const uint8_t *request, size_t request_len, const uint8_t *msg, size_t msg_len) { return trust_token_issuer_redeem_impl(ctx, out_public, out_private, out_token, out_client_data, out_client_data_len, request, request_len, 1, msg, msg_len); } static uint8_t get_metadata_obfuscator(const uint8_t *key, size_t key_len, const uint8_t *client_data, size_t client_data_len) { uint8_t metadata_obfuscator[SHA256_DIGEST_LENGTH]; SHA256_CTX sha_ctx; SHA256_Init(&sha_ctx); SHA256_Update(&sha_ctx, key, key_len); SHA256_Update(&sha_ctx, client_data, client_data_len); SHA256_Final(metadata_obfuscator, &sha_ctx); return metadata_obfuscator[0] >> 7; } int TRUST_TOKEN_decode_private_metadata(const TRUST_TOKEN_METHOD *method, uint8_t *out_value, const uint8_t *key, size_t key_len, const uint8_t *nonce, size_t nonce_len, uint8_t encrypted_bit) { uint8_t metadata_obfuscator = get_metadata_obfuscator(key, key_len, nonce, nonce_len); *out_value = encrypted_bit ^ metadata_obfuscator; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/trust_token/voprf.cc ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "../ec/internal.h" #include "../fipsmodule/ec/internal.h" #include "internal.h" typedef int (*hash_to_group_func_t)(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]); typedef int (*hash_to_scalar_func_t)(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len); typedef struct { const EC_GROUP *(*group_func)(void); // hash_to_group implements the HashToGroup operation for VOPRFs. It returns // one on success and zero on error. hash_to_group_func_t hash_to_group; // hash_to_scalar implements the HashToScalar operation for VOPRFs. It returns // one on success and zero on error. hash_to_scalar_func_t hash_to_scalar; } VOPRF_METHOD; static const uint8_t kDefaultAdditionalData[32] = {0}; static int cbb_add_point(CBB *out, const EC_GROUP *group, const EC_AFFINE *point) { uint8_t *p; size_t len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); return CBB_add_space(out, &p, len) && ec_point_to_bytes(group, point, POINT_CONVERSION_UNCOMPRESSED, p, len) == len && CBB_flush(out); } static int cbb_serialize_point(CBB *out, const EC_GROUP *group, const EC_AFFINE *point) { uint8_t *p; size_t len = ec_point_byte_len(group, POINT_CONVERSION_COMPRESSED); return CBB_add_u16(out, len) && CBB_add_space(out, &p, len) && ec_point_to_bytes(group, point, POINT_CONVERSION_COMPRESSED, p, len) == len && CBB_flush(out); } static int cbs_get_point(CBS *cbs, const EC_GROUP *group, EC_AFFINE *out) { CBS child; size_t plen = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBS_get_bytes(cbs, &child, plen) || !ec_point_from_uncompressed(group, out, CBS_data(&child), CBS_len(&child))) { return 0; } return 1; } static int scalar_to_cbb(CBB *out, const EC_GROUP *group, const EC_SCALAR *scalar) { uint8_t *buf; size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); if (!CBB_add_space(out, &buf, scalar_len)) { return 0; } ec_scalar_to_bytes(group, buf, &scalar_len, scalar); return 1; } static int scalar_from_cbs(CBS *cbs, const EC_GROUP *group, EC_SCALAR *out) { size_t scalar_len = BN_num_bytes(EC_GROUP_get0_order(group)); CBS tmp; if (!CBS_get_bytes(cbs, &tmp, scalar_len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } ec_scalar_from_bytes(group, out, CBS_data(&tmp), CBS_len(&tmp)); return 1; } static int voprf_calculate_key(const VOPRF_METHOD *method, CBB *out_private, CBB *out_public, const EC_SCALAR *priv) { const EC_GROUP *group = method->group_func(); EC_JACOBIAN pub; EC_AFFINE pub_affine; if (!ec_point_mul_scalar_base(group, &pub, priv) || !ec_jacobian_to_affine(group, &pub_affine, &pub)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); return 0; } if (!scalar_to_cbb(out_private, group, priv) || !cbb_add_point(out_public, group, &pub_affine)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BUFFER_TOO_SMALL); return 0; } return 1; } static int voprf_generate_key(const VOPRF_METHOD *method, CBB *out_private, CBB *out_public) { EC_SCALAR priv; if (!ec_random_nonzero_scalar(method->group_func(), &priv, kDefaultAdditionalData)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); return 0; } return voprf_calculate_key(method, out_private, out_public, &priv); } static int voprf_derive_key_from_secret(const VOPRF_METHOD *method, CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { static const uint8_t kKeygenLabel[] = "TrustTokenVOPRFKeyGen"; EC_SCALAR priv; int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kKeygenLabel, sizeof(kKeygenLabel)) || !CBB_add_bytes(&cbb, secret, secret_len) || !CBB_finish(&cbb, &buf, &len) || !method->hash_to_scalar(method->group_func(), &priv, buf, len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_KEYGEN_FAILURE); goto err; } ok = voprf_calculate_key(method, out_private, out_public, &priv); err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int voprf_client_key_from_bytes(const VOPRF_METHOD *method, TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { const EC_GROUP *group = method->group_func(); if (!ec_point_from_uncompressed(group, &key->pubs, in, len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } return 1; } static int voprf_issuer_key_from_bytes(const VOPRF_METHOD *method, TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { const EC_GROUP *group = method->group_func(); if (!ec_scalar_from_bytes(group, &key->xs, in, len)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } // Recompute the public key. EC_JACOBIAN pub; if (!ec_point_mul_scalar_base(group, &pub, &key->xs) || !ec_jacobian_to_affine(group, &key->pubs, &pub)) { return 0; } return 1; } static STACK_OF(TRUST_TOKEN_PRETOKEN) *voprf_blind(const VOPRF_METHOD *method, CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { SHA512_CTX hash_ctx; const EC_GROUP *group = method->group_func(); STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens = sk_TRUST_TOKEN_PRETOKEN_new_null(); if (pretokens == NULL) { goto err; } for (size_t i = 0; i < count; i++) { // Insert |pretoken| into |pretokens| early to simplify error-handling. TRUST_TOKEN_PRETOKEN *pretoken = reinterpret_cast( OPENSSL_malloc(sizeof(TRUST_TOKEN_PRETOKEN))); if (pretoken == NULL || !sk_TRUST_TOKEN_PRETOKEN_push(pretokens, pretoken)) { TRUST_TOKEN_PRETOKEN_free(pretoken); goto err; } RAND_bytes(pretoken->salt, sizeof(pretoken->salt)); if (include_message) { assert(SHA512_DIGEST_LENGTH == TRUST_TOKEN_NONCE_SIZE); SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, pretoken->salt, sizeof(pretoken->salt)); SHA512_Update(&hash_ctx, msg, msg_len); SHA512_Final(pretoken->t, &hash_ctx); } else { OPENSSL_memcpy(pretoken->t, pretoken->salt, TRUST_TOKEN_NONCE_SIZE); } // We sample r in Montgomery form to simplify inverting. EC_SCALAR r; if (!ec_random_nonzero_scalar(group, &r, kDefaultAdditionalData)) { goto err; } // pretoken->r is rinv. ec_scalar_inv0_montgomery(group, &pretoken->r, &r); // Convert both out of Montgomery form. ec_scalar_from_montgomery(group, &r, &r); ec_scalar_from_montgomery(group, &pretoken->r, &pretoken->r); // Tp is the blinded token in the VOPRF protocol. EC_JACOBIAN P, Tp; if (!method->hash_to_group(group, &P, pretoken->t) || !ec_point_mul_scalar(group, &Tp, &P, &r) || !ec_jacobian_to_affine(group, &pretoken->Tp, &Tp)) { goto err; } if (!cbb_add_point(cbb, group, &pretoken->Tp)) { goto err; } } return pretokens; err: sk_TRUST_TOKEN_PRETOKEN_pop_free(pretokens, TRUST_TOKEN_PRETOKEN_free); return NULL; } static int hash_to_scalar_dleq(const VOPRF_METHOD *method, EC_SCALAR *out, const EC_AFFINE *X, const EC_AFFINE *T, const EC_AFFINE *W, const EC_AFFINE *K0, const EC_AFFINE *K1) { static const uint8_t kDLEQLabel[] = "DLEQ"; const EC_GROUP *group = method->group_func(); int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kDLEQLabel, sizeof(kDLEQLabel)) || !cbb_add_point(&cbb, group, X) || !cbb_add_point(&cbb, group, T) || !cbb_add_point(&cbb, group, W) || !cbb_add_point(&cbb, group, K0) || !cbb_add_point(&cbb, group, K1) || !CBB_finish(&cbb, &buf, &len) || !method->hash_to_scalar(group, out, buf, len)) { goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int hash_to_scalar_challenge(const VOPRF_METHOD *method, EC_SCALAR *out, const EC_AFFINE *Bm, const EC_AFFINE *a0, const EC_AFFINE *a1, const EC_AFFINE *a2, const EC_AFFINE *a3) { static const uint8_t kChallengeLabel[] = "Challenge"; const EC_GROUP *group = method->group_func(); CBB cbb; uint8_t transcript[5 * EC_MAX_COMPRESSED + 2 + sizeof(kChallengeLabel) - 1]; size_t len; if (!CBB_init_fixed(&cbb, transcript, sizeof(transcript)) || !cbb_serialize_point(&cbb, group, Bm) || !cbb_serialize_point(&cbb, group, a0) || !cbb_serialize_point(&cbb, group, a1) || !cbb_serialize_point(&cbb, group, a2) || !cbb_serialize_point(&cbb, group, a3) || !CBB_add_bytes(&cbb, kChallengeLabel, sizeof(kChallengeLabel) - 1) || !CBB_finish(&cbb, NULL, &len) || !method->hash_to_scalar(group, out, transcript, len)) { return 0; } return 1; } static int hash_to_scalar_batch(const VOPRF_METHOD *method, EC_SCALAR *out, const CBB *points, size_t index) { static const uint8_t kDLEQBatchLabel[] = "DLEQ BATCH"; if (index > 0xffff) { // The protocol supports only two-byte batches. OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_OVERFLOW); return 0; } int ok = 0; CBB cbb; CBB_zero(&cbb); uint8_t *buf = NULL; size_t len; if (!CBB_init(&cbb, 0) || !CBB_add_bytes(&cbb, kDLEQBatchLabel, sizeof(kDLEQBatchLabel)) || !CBB_add_bytes(&cbb, CBB_data(points), CBB_len(points)) || !CBB_add_u16(&cbb, (uint16_t)index) || !CBB_finish(&cbb, &buf, &len) || !method->hash_to_scalar(method->group_func(), out, buf, len)) { goto err; } ok = 1; err: CBB_cleanup(&cbb); OPENSSL_free(buf); return ok; } static int dleq_generate(const VOPRF_METHOD *method, CBB *cbb, const TRUST_TOKEN_ISSUER_KEY *priv, const EC_JACOBIAN *T, const EC_JACOBIAN *W) { const EC_GROUP *group = method->group_func(); enum { idx_T, idx_W, idx_k0, idx_k1, num_idx, }; EC_JACOBIAN jacobians[num_idx]; // Setup the DLEQ proof. EC_SCALAR r; if ( // r <- Zp !ec_random_nonzero_scalar(group, &r, kDefaultAdditionalData) || // k0;k1 = r*(G;T) !ec_point_mul_scalar_base(group, &jacobians[idx_k0], &r) || !ec_point_mul_scalar(group, &jacobians[idx_k1], T, &r)) { return 0; } EC_AFFINE affines[num_idx]; jacobians[idx_T] = *T; jacobians[idx_W] = *W; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } // Compute c = Hc(...). EC_SCALAR c; if (!hash_to_scalar_dleq(method, &c, &priv->pubs, &affines[idx_T], &affines[idx_W], &affines[idx_k0], &affines[idx_k1])) { return 0; } EC_SCALAR c_mont; ec_scalar_to_montgomery(group, &c_mont, &c); // u = r + c*xs EC_SCALAR u; ec_scalar_mul_montgomery(group, &u, &priv->xs, &c_mont); ec_scalar_add(group, &u, &r, &u); // Store DLEQ proof in transcript. if (!scalar_to_cbb(cbb, group, &c) || !scalar_to_cbb(cbb, group, &u)) { return 0; } return 1; } static int mul_public_2(const EC_GROUP *group, EC_JACOBIAN *out, const EC_JACOBIAN *p0, const EC_SCALAR *scalar0, const EC_JACOBIAN *p1, const EC_SCALAR *scalar1) { EC_JACOBIAN points[2] = {*p0, *p1}; EC_SCALAR scalars[2] = {*scalar0, *scalar1}; return ec_point_mul_scalar_public_batch(group, out, /*g_scalar=*/NULL, points, scalars, 2); } static int dleq_verify(const VOPRF_METHOD *method, CBS *cbs, const TRUST_TOKEN_CLIENT_KEY *pub, const EC_JACOBIAN *T, const EC_JACOBIAN *W) { const EC_GROUP *group = method->group_func(); enum { idx_T, idx_W, idx_k0, idx_k1, num_idx, }; EC_JACOBIAN jacobians[num_idx]; // Decode the DLEQ proof. EC_SCALAR c, u; if (!scalar_from_cbs(cbs, group, &c) || !scalar_from_cbs(cbs, group, &u)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } // k0;k1 = u*(G;T) - c*(pub;W) EC_JACOBIAN pubs; ec_affine_to_jacobian(group, &pubs, &pub->pubs); EC_SCALAR minus_c; ec_scalar_neg(group, &minus_c, &c); if (!ec_point_mul_scalar_public(group, &jacobians[idx_k0], &u, &pubs, &minus_c) || !mul_public_2(group, &jacobians[idx_k1], T, &u, W, &minus_c)) { return 0; } // Check the DLEQ proof. EC_AFFINE affines[num_idx]; jacobians[idx_T] = *T; jacobians[idx_W] = *W; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } // Compute c = Hc(...). EC_SCALAR calculated; if (!hash_to_scalar_dleq(method, &calculated, &pub->pubs, &affines[idx_T], &affines[idx_W], &affines[idx_k0], &affines[idx_k1])) { return 0; } // c == calculated if (!ec_scalar_equal_vartime(group, &c, &calculated)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_PROOF); return 0; } return 1; } static int voprf_sign_tt(const VOPRF_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue) { const EC_GROUP *group = method->group_func(); if (num_requested < num_to_issue) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } int ret = 0; EC_JACOBIAN *BTs = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Zs = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_SCALAR *es = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_SCALAR))); CBB batch_cbb; CBB_zero(&batch_cbb); { if (!BTs || !Zs || !es || !CBB_init(&batch_cbb, 0) || !cbb_add_point(&batch_cbb, group, &key->pubs)) { goto err; } for (size_t i = 0; i < num_to_issue; i++) { EC_AFFINE BT_affine, Z_affine; EC_JACOBIAN BT, Z; if (!cbs_get_point(cbs, group, &BT_affine)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &BT, &BT_affine); if (!ec_point_mul_scalar(group, &Z, &BT, &key->xs) || !ec_jacobian_to_affine(group, &Z_affine, &Z) || !cbb_add_point(cbb, group, &Z_affine)) { goto err; } if (!cbb_add_point(&batch_cbb, group, &BT_affine) || !cbb_add_point(&batch_cbb, group, &Z_affine)) { goto err; } BTs[i] = BT; Zs[i] = Z; if (!CBB_flush(cbb)) { goto err; } } // The DLEQ batching construction is described in appendix B of // https://eprint.iacr.org/2020/072/20200324:214215. Note the additional // computations all act on public inputs. for (size_t i = 0; i < num_to_issue; i++) { if (!hash_to_scalar_batch(method, &es[i], &batch_cbb, i)) { goto err; } } EC_JACOBIAN BT_batch, Z_batch; if (!ec_point_mul_scalar_public_batch(group, &BT_batch, /*g_scalar=*/NULL, BTs, es, num_to_issue) || !ec_point_mul_scalar_public_batch(group, &Z_batch, /*g_scalar=*/NULL, Zs, es, num_to_issue)) { goto err; } CBB proof; if (!CBB_add_u16_length_prefixed(cbb, &proof) || !dleq_generate(method, &proof, key, &BT_batch, &Z_batch) || !CBB_flush(cbb)) { goto err; } // Skip over any unused requests. size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBS_skip(cbs, point_len * (num_requested - num_to_issue))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ret = 1; } err: OPENSSL_free(BTs); OPENSSL_free(Zs); OPENSSL_free(es); CBB_cleanup(&batch_cbb); return ret; } static STACK_OF(TRUST_TOKEN) *voprf_unblind_tt( const VOPRF_METHOD *method, const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { const EC_GROUP *group = method->group_func(); if (count > sk_TRUST_TOKEN_PRETOKEN_num(pretokens)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return NULL; } int ok = 0; STACK_OF(TRUST_TOKEN) *ret = sk_TRUST_TOKEN_new_null(); EC_JACOBIAN *BTs = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Zs = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_SCALAR *es = reinterpret_cast(OPENSSL_calloc(count, sizeof(EC_SCALAR))); CBB batch_cbb; CBB_zero(&batch_cbb); if (ret == NULL || BTs == NULL || Zs == NULL || es == NULL || !CBB_init(&batch_cbb, 0) || !cbb_add_point(&batch_cbb, group, &key->pubs)) { goto err; } for (size_t i = 0; i < count; i++) { const TRUST_TOKEN_PRETOKEN *pretoken = sk_TRUST_TOKEN_PRETOKEN_value(pretokens, i); EC_AFFINE Z_affine; if (!cbs_get_point(cbs, group, &Z_affine)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &BTs[i], &pretoken->Tp); ec_affine_to_jacobian(group, &Zs[i], &Z_affine); if (!cbb_add_point(&batch_cbb, group, &pretoken->Tp) || !cbb_add_point(&batch_cbb, group, &Z_affine)) { goto err; } // Unblind the token. // pretoken->r is rinv. EC_JACOBIAN N; EC_AFFINE N_affine; if (!ec_point_mul_scalar(group, &N, &Zs[i], &pretoken->r) || !ec_jacobian_to_affine(group, &N_affine, &N)) { goto err; } // Serialize the token. Include |key_id| to avoid an extra copy in the layer // above. CBB token_cbb; size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBB_init(&token_cbb, 4 + TRUST_TOKEN_NONCE_SIZE + (2 + point_len)) || !CBB_add_u32(&token_cbb, key_id) || !CBB_add_bytes(&token_cbb, pretoken->salt, TRUST_TOKEN_NONCE_SIZE) || !cbb_add_point(&token_cbb, group, &N_affine) || !CBB_flush(&token_cbb)) { CBB_cleanup(&token_cbb); goto err; } TRUST_TOKEN *token = TRUST_TOKEN_new(CBB_data(&token_cbb), CBB_len(&token_cbb)); CBB_cleanup(&token_cbb); if (token == NULL || !sk_TRUST_TOKEN_push(ret, token)) { TRUST_TOKEN_free(token); goto err; } } // The DLEQ batching construction is described in appendix B of // https://eprint.iacr.org/2020/072/20200324:214215. Note the additional // computations all act on public inputs. for (size_t i = 0; i < count; i++) { if (!hash_to_scalar_batch(method, &es[i], &batch_cbb, i)) { goto err; } } EC_JACOBIAN BT_batch, Z_batch; if (!ec_point_mul_scalar_public_batch(group, &BT_batch, /*g_scalar=*/NULL, BTs, es, count) || !ec_point_mul_scalar_public_batch(group, &Z_batch, /*g_scalar=*/NULL, Zs, es, count)) { goto err; } CBS proof; if (!CBS_get_u16_length_prefixed(cbs, &proof) || !dleq_verify(method, &proof, key, &BT_batch, &Z_batch) || CBS_len(&proof) != 0) { goto err; } ok = 1; err: OPENSSL_free(BTs); OPENSSL_free(Zs); OPENSSL_free(es); CBB_cleanup(&batch_cbb); if (!ok) { sk_TRUST_TOKEN_pop_free(ret, TRUST_TOKEN_free); ret = NULL; } return ret; } static void sha384_update_u16(SHA512_CTX *ctx, uint16_t v) { uint8_t buf[2] = {static_cast(v >> 8), static_cast(v & 0xff)}; SHA384_Update(ctx, buf, 2); } static void sha384_update_point_with_length(SHA512_CTX *ctx, const EC_GROUP *group, const EC_AFFINE *point) { uint8_t buf[EC_MAX_COMPRESSED]; size_t len = ec_point_to_bytes(group, point, POINT_CONVERSION_COMPRESSED, buf, sizeof(buf)); assert(len > 0); sha384_update_u16(ctx, (uint16_t)len); SHA384_Update(ctx, buf, len); } static int compute_composite_seed(const VOPRF_METHOD *method, uint8_t out[SHA384_DIGEST_LENGTH], const EC_AFFINE *pub) { const EC_GROUP *group = method->group_func(); static const uint8_t kSeedDST[] = "Seed-OPRFV1-\x01-P384-SHA384"; SHA512_CTX hash_ctx; SHA384_Init(&hash_ctx); sha384_update_point_with_length(&hash_ctx, group, pub); sha384_update_u16(&hash_ctx, sizeof(kSeedDST) - 1); SHA384_Update(&hash_ctx, kSeedDST, sizeof(kSeedDST) - 1); SHA384_Final(out, &hash_ctx); return 1; } static int compute_composite_element(const VOPRF_METHOD *method, uint8_t seed[SHA384_DIGEST_LENGTH], EC_SCALAR *di, size_t index, const EC_AFFINE *C, const EC_AFFINE *D) { static const uint8_t kCompositeLabel[] = "Composite"; const EC_GROUP *group = method->group_func(); if (index > UINT16_MAX) { return 0; } CBB cbb; uint8_t transcript[2 + SHA384_DIGEST_LENGTH + 2 + 2 * EC_MAX_COMPRESSED + sizeof(kCompositeLabel) - 1]; size_t len; if (!CBB_init_fixed(&cbb, transcript, sizeof(transcript)) || !CBB_add_u16(&cbb, SHA384_DIGEST_LENGTH) || !CBB_add_bytes(&cbb, seed, SHA384_DIGEST_LENGTH) || !CBB_add_u16(&cbb, index) || !cbb_serialize_point(&cbb, group, C) || !cbb_serialize_point(&cbb, group, D) || !CBB_add_bytes(&cbb, kCompositeLabel, sizeof(kCompositeLabel) - 1) || !CBB_finish(&cbb, NULL, &len) || !method->hash_to_scalar(group, di, transcript, len)) { return 0; } return 1; } static int generate_proof(const VOPRF_METHOD *method, CBB *cbb, const TRUST_TOKEN_ISSUER_KEY *priv, const EC_SCALAR *r, const EC_JACOBIAN *M, const EC_JACOBIAN *Z) { const EC_GROUP *group = method->group_func(); enum { idx_M, idx_Z, idx_t2, idx_t3, num_idx, }; EC_JACOBIAN jacobians[num_idx]; if (!ec_point_mul_scalar_base(group, &jacobians[idx_t2], r) || !ec_point_mul_scalar(group, &jacobians[idx_t3], M, r)) { return 0; } EC_AFFINE affines[num_idx]; jacobians[idx_M] = *M; jacobians[idx_Z] = *Z; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } EC_SCALAR c; if (!hash_to_scalar_challenge(method, &c, &priv->pubs, &affines[idx_M], &affines[idx_Z], &affines[idx_t2], &affines[idx_t3])) { return 0; } EC_SCALAR c_mont; ec_scalar_to_montgomery(group, &c_mont, &c); // s = r - c*xs EC_SCALAR s; ec_scalar_mul_montgomery(group, &s, &priv->xs, &c_mont); ec_scalar_sub(group, &s, r, &s); // Store DLEQ proof in transcript. if (!scalar_to_cbb(cbb, group, &c) || !scalar_to_cbb(cbb, group, &s)) { return 0; } return 1; } static int verify_proof(const VOPRF_METHOD *method, CBS *cbs, const TRUST_TOKEN_CLIENT_KEY *pub, const EC_JACOBIAN *M, const EC_JACOBIAN *Z) { const EC_GROUP *group = method->group_func(); enum { idx_M, idx_Z, idx_t2, idx_t3, num_idx, }; EC_JACOBIAN jacobians[num_idx]; EC_SCALAR c, s; if (!scalar_from_cbs(cbs, group, &c) || !scalar_from_cbs(cbs, group, &s)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return 0; } EC_JACOBIAN pubs; ec_affine_to_jacobian(group, &pubs, &pub->pubs); if (!ec_point_mul_scalar_public(group, &jacobians[idx_t2], &s, &pubs, &c) || !mul_public_2(group, &jacobians[idx_t3], M, &s, Z, &c)) { return 0; } EC_AFFINE affines[num_idx]; jacobians[idx_M] = *M; jacobians[idx_Z] = *Z; if (!ec_jacobian_to_affine_batch(group, affines, jacobians, num_idx)) { return 0; } EC_SCALAR expected_c; if (!hash_to_scalar_challenge(method, &expected_c, &pub->pubs, &affines[idx_M], &affines[idx_Z], &affines[idx_t2], &affines[idx_t3])) { return 0; } // c == expected_c if (!ec_scalar_equal_vartime(group, &c, &expected_c)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_PROOF); return 0; } return 1; } static int voprf_sign_impl(const VOPRF_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, const EC_SCALAR *proof_scalar) { const EC_GROUP *group = method->group_func(); if (num_requested < num_to_issue) { OPENSSL_PUT_ERROR(TRUST_TOKEN, ERR_R_INTERNAL_ERROR); return 0; } int ret = 0; EC_JACOBIAN *BTs = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Zs = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_JACOBIAN))); EC_SCALAR *dis = reinterpret_cast( OPENSSL_calloc(num_to_issue, sizeof(EC_SCALAR))); { if (!BTs || !Zs || !dis) { goto err; } uint8_t seed[SHA384_DIGEST_LENGTH]; if (!compute_composite_seed(method, seed, &key->pubs)) { goto err; } // This implements the BlindEvaluateBatch as defined in section 4 of // draft-robert-privacypass-batched-tokens-01, based on the constructions // in draft-irtf-cfrg-voprf-21. To optimize the computation of the proof, // the computation of di is done during the token signing and passed into // the proof generation. for (size_t i = 0; i < num_to_issue; i++) { EC_AFFINE BT_affine, Z_affine; EC_JACOBIAN BT, Z; if (!cbs_get_point(cbs, group, &BT_affine)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &BT, &BT_affine); if (!ec_point_mul_scalar(group, &Z, &BT, &key->xs) || !ec_jacobian_to_affine(group, &Z_affine, &Z) || !cbb_add_point(cbb, group, &Z_affine)) { goto err; } BTs[i] = BT; Zs[i] = Z; if (!compute_composite_element(method, seed, &dis[i], i, &BT_affine, &Z_affine)) { goto err; } if (!CBB_flush(cbb)) { goto err; } } EC_JACOBIAN M, Z; if (!ec_point_mul_scalar_public_batch(group, &M, /*g_scalar=*/NULL, BTs, dis, num_to_issue) || !ec_point_mul_scalar(group, &Z, &M, &key->xs)) { goto err; } CBB proof; if (!CBB_add_u16_length_prefixed(cbb, &proof) || !generate_proof(method, &proof, key, proof_scalar, &M, &Z) || !CBB_flush(cbb)) { goto err; } // Skip over any unused requests. size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBS_skip(cbs, point_len * (num_requested - num_to_issue))) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ret = 1; } err: OPENSSL_free(BTs); OPENSSL_free(Zs); OPENSSL_free(dis); return ret; } static int voprf_sign(const VOPRF_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue) { EC_SCALAR proof_scalar; if (!ec_random_nonzero_scalar(method->group_func(), &proof_scalar, kDefaultAdditionalData)) { return 0; } return voprf_sign_impl(method, key, cbb, cbs, num_requested, num_to_issue, &proof_scalar); } static int voprf_sign_with_proof_scalar_for_testing( const VOPRF_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, const uint8_t *proof_scalar_buf, size_t proof_scalar_len) { EC_SCALAR proof_scalar; if (!ec_scalar_from_bytes(method->group_func(), &proof_scalar, proof_scalar_buf, proof_scalar_len)) { return 0; } return voprf_sign_impl(method, key, cbb, cbs, num_requested, num_to_issue, &proof_scalar); } static STACK_OF(TRUST_TOKEN) *voprf_unblind( const VOPRF_METHOD *method, const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { const EC_GROUP *group = method->group_func(); if (count > sk_TRUST_TOKEN_PRETOKEN_num(pretokens)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); return NULL; } int ok = 0; STACK_OF(TRUST_TOKEN) *ret = sk_TRUST_TOKEN_new_null(); EC_JACOBIAN *BTs = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_JACOBIAN *Zs = reinterpret_cast( OPENSSL_calloc(count, sizeof(EC_JACOBIAN))); EC_SCALAR *dis = reinterpret_cast(OPENSSL_calloc(count, sizeof(EC_SCALAR))); if (ret == NULL || !BTs || !Zs || !dis) { goto err; } uint8_t seed[SHA384_DIGEST_LENGTH]; if (!compute_composite_seed(method, seed, &key->pubs)) { goto err; } for (size_t i = 0; i < count; i++) { const TRUST_TOKEN_PRETOKEN *pretoken = sk_TRUST_TOKEN_PRETOKEN_value(pretokens, i); EC_AFFINE Z_affine; if (!cbs_get_point(cbs, group, &Z_affine)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_DECODE_FAILURE); goto err; } ec_affine_to_jacobian(group, &BTs[i], &pretoken->Tp); ec_affine_to_jacobian(group, &Zs[i], &Z_affine); if (!compute_composite_element(method, seed, &dis[i], i, &pretoken->Tp, &Z_affine)) { goto err; } // Unblind the token. // pretoken->r is rinv. EC_JACOBIAN N; EC_AFFINE N_affine; if (!ec_point_mul_scalar(group, &N, &Zs[i], &pretoken->r) || !ec_jacobian_to_affine(group, &N_affine, &N)) { goto err; } // Serialize the token. Include |key_id| to avoid an extra copy in the layer // above. CBB token_cbb; size_t point_len = ec_point_byte_len(group, POINT_CONVERSION_UNCOMPRESSED); if (!CBB_init(&token_cbb, 4 + TRUST_TOKEN_NONCE_SIZE + (2 + point_len)) || !CBB_add_u32(&token_cbb, key_id) || !CBB_add_bytes(&token_cbb, pretoken->salt, TRUST_TOKEN_NONCE_SIZE) || !cbb_add_point(&token_cbb, group, &N_affine) || !CBB_flush(&token_cbb)) { CBB_cleanup(&token_cbb); goto err; } TRUST_TOKEN *token = TRUST_TOKEN_new(CBB_data(&token_cbb), CBB_len(&token_cbb)); CBB_cleanup(&token_cbb); if (token == NULL || !sk_TRUST_TOKEN_push(ret, token)) { TRUST_TOKEN_free(token); goto err; } } EC_JACOBIAN M, Z; if (!ec_point_mul_scalar_public_batch(group, &M, /*g_scalar=*/NULL, BTs, dis, count) || !ec_point_mul_scalar_public_batch(group, &Z, /*g_scalar=*/NULL, Zs, dis, count)) { goto err; } CBS proof; if (!CBS_get_u16_length_prefixed(cbs, &proof) || !verify_proof(method, &proof, key, &M, &Z) || CBS_len(&proof) != 0) { goto err; } ok = 1; err: OPENSSL_free(BTs); OPENSSL_free(Zs); OPENSSL_free(dis); if (!ok) { sk_TRUST_TOKEN_pop_free(ret, TRUST_TOKEN_free); ret = NULL; } return ret; } static int voprf_read(const VOPRF_METHOD *method, const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { const EC_GROUP *group = method->group_func(); CBS cbs, salt; CBS_init(&cbs, token, token_len); EC_AFFINE Ws; if (!CBS_get_bytes(&cbs, &salt, TRUST_TOKEN_NONCE_SIZE) || !cbs_get_point(&cbs, group, &Ws) || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_INVALID_TOKEN); return 0; } if (include_message) { SHA512_CTX hash_ctx; assert(SHA512_DIGEST_LENGTH == TRUST_TOKEN_NONCE_SIZE); SHA512_Init(&hash_ctx); SHA512_Update(&hash_ctx, CBS_data(&salt), CBS_len(&salt)); SHA512_Update(&hash_ctx, msg, msg_len); SHA512_Final(out_nonce, &hash_ctx); } else { OPENSSL_memcpy(out_nonce, CBS_data(&salt), CBS_len(&salt)); } EC_JACOBIAN T; if (!method->hash_to_group(group, &T, out_nonce)) { return 0; } EC_JACOBIAN Ws_calculated; if (!ec_point_mul_scalar(group, &Ws_calculated, &T, &key->xs) || !ec_affine_jacobian_equal(group, &Ws, &Ws_calculated)) { OPENSSL_PUT_ERROR(TRUST_TOKEN, TRUST_TOKEN_R_BAD_VALIDITY_CHECK); return 0; } return 1; } // VOPRF experiment v2. static int voprf_exp2_hash_to_group(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashTLabel[] = "TrustToken VOPRF Experiment V2 HashToGroup"; return ec_hash_to_curve_p384_xmd_sha512_sswu_draft07( group, out, kHashTLabel, sizeof(kHashTLabel), t, TRUST_TOKEN_NONCE_SIZE); } static int voprf_exp2_hash_to_scalar(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashCLabel[] = "TrustToken VOPRF Experiment V2 HashToScalar"; return ec_hash_to_scalar_p384_xmd_sha512_draft07( group, out, kHashCLabel, sizeof(kHashCLabel), buf, len); } static VOPRF_METHOD voprf_exp2_method = { EC_group_p384, voprf_exp2_hash_to_group, voprf_exp2_hash_to_scalar}; int voprf_exp2_generate_key(CBB *out_private, CBB *out_public) { return voprf_generate_key(&voprf_exp2_method, out_private, out_public); } int voprf_exp2_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { return voprf_derive_key_from_secret(&voprf_exp2_method, out_private, out_public, secret, secret_len); } int voprf_exp2_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { return voprf_client_key_from_bytes(&voprf_exp2_method, key, in, len); } int voprf_exp2_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { return voprf_issuer_key_from_bytes(&voprf_exp2_method, key, in, len); } STACK_OF(TRUST_TOKEN_PRETOKEN) *voprf_exp2_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { return voprf_blind(&voprf_exp2_method, cbb, count, include_message, msg, msg_len); } int voprf_exp2_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { if (private_metadata != 0) { return 0; } return voprf_sign_tt(&voprf_exp2_method, key, cbb, cbs, num_requested, num_to_issue); } STACK_OF(TRUST_TOKEN) *voprf_exp2_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { return voprf_unblind_tt(&voprf_exp2_method, key, pretokens, cbs, count, key_id); } int voprf_exp2_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { return voprf_read(&voprf_exp2_method, key, out_nonce, token, token_len, include_message, msg, msg_len); } // VOPRF PST v1. static int voprf_pst1_hash_to_group(const EC_GROUP *group, EC_JACOBIAN *out, const uint8_t t[TRUST_TOKEN_NONCE_SIZE]) { const uint8_t kHashTLabel[] = "HashToGroup-OPRFV1-\x01-P384-SHA384"; return ec_hash_to_curve_p384_xmd_sha384_sswu(group, out, kHashTLabel, sizeof(kHashTLabel) - 1, t, TRUST_TOKEN_NONCE_SIZE); } static int voprf_pst1_hash_to_scalar(const EC_GROUP *group, EC_SCALAR *out, uint8_t *buf, size_t len) { const uint8_t kHashCLabel[] = "HashToScalar-OPRFV1-\x01-P384-SHA384"; return ec_hash_to_scalar_p384_xmd_sha384(group, out, kHashCLabel, sizeof(kHashCLabel) - 1, buf, len); } static VOPRF_METHOD voprf_pst1_method = { EC_group_p384, voprf_pst1_hash_to_group, voprf_pst1_hash_to_scalar}; int voprf_pst1_generate_key(CBB *out_private, CBB *out_public) { return voprf_generate_key(&voprf_pst1_method, out_private, out_public); } int voprf_pst1_derive_key_from_secret(CBB *out_private, CBB *out_public, const uint8_t *secret, size_t secret_len) { return voprf_derive_key_from_secret(&voprf_pst1_method, out_private, out_public, secret, secret_len); } int voprf_pst1_client_key_from_bytes(TRUST_TOKEN_CLIENT_KEY *key, const uint8_t *in, size_t len) { return voprf_client_key_from_bytes(&voprf_pst1_method, key, in, len); } int voprf_pst1_issuer_key_from_bytes(TRUST_TOKEN_ISSUER_KEY *key, const uint8_t *in, size_t len) { return voprf_issuer_key_from_bytes(&voprf_pst1_method, key, in, len); } STACK_OF(TRUST_TOKEN_PRETOKEN) *voprf_pst1_blind(CBB *cbb, size_t count, int include_message, const uint8_t *msg, size_t msg_len) { return voprf_blind(&voprf_pst1_method, cbb, count, include_message, msg, msg_len); } int voprf_pst1_sign(const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata) { if (private_metadata != 0) { return 0; } return voprf_sign(&voprf_pst1_method, key, cbb, cbs, num_requested, num_to_issue); } int voprf_pst1_sign_with_proof_scalar_for_testing( const TRUST_TOKEN_ISSUER_KEY *key, CBB *cbb, CBS *cbs, size_t num_requested, size_t num_to_issue, uint8_t private_metadata, const uint8_t *proof_scalar_buf, size_t proof_scalar_len) { if (private_metadata != 0) { return 0; } return voprf_sign_with_proof_scalar_for_testing( &voprf_pst1_method, key, cbb, cbs, num_requested, num_to_issue, proof_scalar_buf, proof_scalar_len); } STACK_OF(TRUST_TOKEN) *voprf_pst1_unblind( const TRUST_TOKEN_CLIENT_KEY *key, const STACK_OF(TRUST_TOKEN_PRETOKEN) *pretokens, CBS *cbs, size_t count, uint32_t key_id) { return voprf_unblind(&voprf_pst1_method, key, pretokens, cbs, count, key_id); } int voprf_pst1_read(const TRUST_TOKEN_ISSUER_KEY *key, uint8_t out_nonce[TRUST_TOKEN_NONCE_SIZE], uint8_t *out_private_metadata, const uint8_t *token, size_t token_len, int include_message, const uint8_t *msg, size_t msg_len) { return voprf_read(&voprf_pst1_method, key, out_nonce, token, token_len, include_message, msg, msg_len); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/a_digest.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include int ASN1_digest(i2d_of_void *i2d, const EVP_MD *type, char *data, unsigned char *md, unsigned int *len) { int i, ret; unsigned char *str, *p; i = i2d(data, NULL); if ((str = (unsigned char *)OPENSSL_malloc(i)) == NULL) { return 0; } p = str; i2d(data, &p); ret = EVP_Digest(str, i, md, len, type, NULL); OPENSSL_free(str); return ret; } int ASN1_item_digest(const ASN1_ITEM *it, const EVP_MD *type, void *asn, unsigned char *md, unsigned int *len) { int i, ret; unsigned char *str = NULL; i = ASN1_item_i2d(reinterpret_cast(asn), &str, it); if (!str) { return 0; } ret = EVP_Digest(str, i, md, len, type, NULL); OPENSSL_free(str); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/a_sign.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "internal.h" int ASN1_item_sign(const ASN1_ITEM *it, X509_ALGOR *algor1, X509_ALGOR *algor2, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey, const EVP_MD *type) { if (signature->type != V_ASN1_BIT_STRING) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); return 0; } EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); if (!EVP_DigestSignInit(&ctx, NULL, type, NULL, pkey)) { EVP_MD_CTX_cleanup(&ctx); return 0; } return ASN1_item_sign_ctx(it, algor1, algor2, signature, asn, &ctx); } int ASN1_item_sign_ctx(const ASN1_ITEM *it, X509_ALGOR *algor1, X509_ALGOR *algor2, ASN1_BIT_STRING *signature, void *asn, EVP_MD_CTX *ctx) { int ret = 0; uint8_t *in = NULL, *out = NULL; { if (signature->type != V_ASN1_BIT_STRING) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); goto err; } // Write out the requested copies of the AlgorithmIdentifier. if (algor1 && !x509_digest_sign_algorithm(ctx, algor1)) { goto err; } if (algor2 && !x509_digest_sign_algorithm(ctx, algor2)) { goto err; } int in_len = ASN1_item_i2d(reinterpret_cast(asn), &in, it); if (in_len < 0) { goto err; } EVP_PKEY *pkey = EVP_PKEY_CTX_get0_pkey(ctx->pctx); size_t out_len = EVP_PKEY_size(pkey); if (out_len > INT_MAX) { OPENSSL_PUT_ERROR(X509, ERR_R_OVERFLOW); goto err; } out = reinterpret_cast(OPENSSL_malloc(out_len)); if (out == NULL) { goto err; } if (!EVP_DigestSign(ctx, out, &out_len, in, in_len)) { OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB); goto err; } ASN1_STRING_set0(signature, out, (int)out_len); out = NULL; signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); signature->flags |= ASN1_STRING_FLAG_BITS_LEFT; ret = (int)out_len; } err: EVP_MD_CTX_cleanup(ctx); OPENSSL_free(in); OPENSSL_free(out); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/a_verify.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "internal.h" int ASN1_item_verify(const ASN1_ITEM *it, const X509_ALGOR *a, const ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { if (!pkey) { OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER); return 0; } size_t sig_len; if (signature->type == V_ASN1_BIT_STRING) { if (!ASN1_BIT_STRING_num_bytes(signature, &sig_len)) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_BIT_STRING_BITS_LEFT); return 0; } } else { sig_len = (size_t)ASN1_STRING_length(signature); } EVP_MD_CTX ctx; uint8_t *buf_in = NULL; int ret = 0, inl = 0; EVP_MD_CTX_init(&ctx); if (!x509_digest_verify_init(&ctx, a, pkey)) { goto err; } inl = ASN1_item_i2d(reinterpret_cast(asn), &buf_in, it); if (buf_in == NULL) { goto err; } if (!EVP_DigestVerify(&ctx, ASN1_STRING_get0_data(signature), sig_len, buf_in, inl)) { OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB); goto err; } ret = 1; err: OPENSSL_free(buf_in); EVP_MD_CTX_cleanup(&ctx); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/algorithm.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "internal.h" // Restrict the digests that are allowed in X509 certificates static int x509_digest_nid_ok(const int digest_nid) { switch (digest_nid) { case NID_md4: case NID_md5: return 0; } return 1; } int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor) { EVP_PKEY *pkey = EVP_PKEY_CTX_get0_pkey(ctx->pctx); if (pkey == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_CONTEXT_NOT_INITIALISED); return 0; } if (EVP_PKEY_id(pkey) == EVP_PKEY_RSA) { int pad_mode; if (!EVP_PKEY_CTX_get_rsa_padding(ctx->pctx, &pad_mode)) { return 0; } // RSA-PSS has special signature algorithm logic. if (pad_mode == RSA_PKCS1_PSS_PADDING) { return x509_rsa_ctx_to_pss(ctx, algor); } } if (EVP_PKEY_id(pkey) == EVP_PKEY_ED25519) { return X509_ALGOR_set0(algor, OBJ_nid2obj(NID_ED25519), V_ASN1_UNDEF, NULL); } // Default behavior: look up the OID for the algorithm/hash pair and encode // that. const EVP_MD *digest = EVP_MD_CTX_get0_md(ctx); if (digest == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_CONTEXT_NOT_INITIALISED); return 0; } const int digest_nid = EVP_MD_type(digest); int sign_nid; if (!x509_digest_nid_ok(digest_nid) || !OBJ_find_sigid_by_algs(&sign_nid, digest_nid, EVP_PKEY_id(pkey))) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED); return 0; } // RSA signature algorithms include an explicit NULL parameter. Others omit // it. int paramtype = (EVP_PKEY_id(pkey) == EVP_PKEY_RSA) ? V_ASN1_NULL : V_ASN1_UNDEF; return X509_ALGOR_set0(algor, OBJ_nid2obj(sign_nid), paramtype, NULL); } int x509_digest_verify_init(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg, EVP_PKEY *pkey) { // Convert the signature OID into digest and public key OIDs. int sigalg_nid = OBJ_obj2nid(sigalg->algorithm); int digest_nid, pkey_nid; if (!OBJ_find_sigid_algs(sigalg_nid, &digest_nid, &pkey_nid)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); return 0; } // Check the public key OID matches the public key type. if (pkey_nid != EVP_PKEY_id(pkey)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_PUBLIC_KEY_TYPE); return 0; } // Check for permitted digest algorithms if (!x509_digest_nid_ok(digest_nid)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED); return 0; } // NID_undef signals that there are custom parameters to set. if (digest_nid == NID_undef) { if (sigalg_nid == NID_rsassaPss) { return x509_rsa_pss_to_ctx(ctx, sigalg, pkey); } if (sigalg_nid == NID_ED25519) { if (sigalg->parameter != NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PARAMETER); return 0; } return EVP_DigestVerifyInit(ctx, NULL, NULL, NULL, pkey); } OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); return 0; } // The parameter should be an explicit NULL for RSA and omitted for ECDSA. For // compatibility, we allow either for both algorithms. See b/167375496. // // TODO(davidben): Chromium's verifier allows both forms for RSA, but enforces // ECDSA more strictly. Align with Chromium and add a flag for b/167375496. if (sigalg->parameter != NULL && sigalg->parameter->type != V_ASN1_NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PARAMETER); return 0; } // Otherwise, initialize with the digest from the OID. const EVP_MD *digest = EVP_get_digestbynid(digest_nid); if (digest == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); return 0; } return EVP_DigestVerifyInit(ctx, NULL, digest, NULL, pkey); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/asn1_gen.cc ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../conf/internal.h" #include "../internal.h" #include "internal.h" // Although this file is in crypto/x509 for layering purposes, it emits // errors from the ASN.1 module for OpenSSL compatibility. // ASN1_GEN_MAX_DEPTH is the maximum number of nested TLVs allowed. #define ASN1_GEN_MAX_DEPTH 50 // ASN1_GEN_MAX_OUTPUT is the maximum output, in bytes, allowed. This limit is // necessary because the SEQUENCE and SET section reference mechanism allows the // output length to grow super-linearly with the input length. #define ASN1_GEN_MAX_OUTPUT (64 * 1024) // ASN1_GEN_FORMAT_* are the values for the format modifiers. #define ASN1_GEN_FORMAT_ASCII 1 #define ASN1_GEN_FORMAT_UTF8 2 #define ASN1_GEN_FORMAT_HEX 3 #define ASN1_GEN_FORMAT_BITLIST 4 // generate_v3 converts |str| into an ASN.1 structure and writes the result to // |cbb|. It returns one on success and zero on error. |depth| bounds recursion, // and |format| specifies the current format modifier. // // If |tag| is non-zero, the structure is implicitly tagged with |tag|. |tag| // must not have the constructed bit set. static int generate_v3(CBB *cbb, const char *str, const X509V3_CTX *cnf, CBS_ASN1_TAG tag, int format, int depth); static int bitstr_cb(const char *elem, size_t len, void *bitstr); ASN1_TYPE *ASN1_generate_v3(const char *str, const X509V3_CTX *cnf) { CBB cbb; if (!CBB_init(&cbb, 0) || // !generate_v3(&cbb, str, cnf, /*tag=*/0, ASN1_GEN_FORMAT_ASCII, /*depth=*/0)) { CBB_cleanup(&cbb); return NULL; } // While not strictly necessary to avoid a DoS (we rely on any super-linear // checks being performed internally), cap the overall output to // |ASN1_GEN_MAX_OUTPUT| so the externally-visible behavior is consistent. if (CBB_len(&cbb) > ASN1_GEN_MAX_OUTPUT) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG); CBB_cleanup(&cbb); return NULL; } const uint8_t *der = CBB_data(&cbb); ASN1_TYPE *ret = d2i_ASN1_TYPE(NULL, &der, CBB_len(&cbb)); CBB_cleanup(&cbb); return ret; } static int cbs_str_equal(const CBS *cbs, const char *str) { return CBS_len(cbs) == strlen(str) && OPENSSL_memcmp(CBS_data(cbs), str, strlen(str)) == 0; } // parse_tag decodes a tag specifier in |cbs|. It returns the tag on success or // zero on error. static CBS_ASN1_TAG parse_tag(const CBS *cbs) { CBS copy = *cbs; uint64_t num; if (!CBS_get_u64_decimal(©, &num) || num > CBS_ASN1_TAG_NUMBER_MASK) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_NUMBER); return 0; } CBS_ASN1_TAG tag_class = CBS_ASN1_CONTEXT_SPECIFIC; // The tag may be suffixed by a class. uint8_t c; if (CBS_get_u8(©, &c)) { switch (c) { case 'U': tag_class = CBS_ASN1_UNIVERSAL; break; case 'A': tag_class = CBS_ASN1_APPLICATION; break; case 'P': tag_class = CBS_ASN1_PRIVATE; break; case 'C': tag_class = CBS_ASN1_CONTEXT_SPECIFIC; break; default: { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_MODIFIER); return 0; } } if (CBS_len(©) != 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_MODIFIER); return 0; } } // Tag [UNIVERSAL 0] is reserved for indefinite-length end-of-contents. We // also use zero in this file to indicator no explicit tagging. if (tag_class == CBS_ASN1_UNIVERSAL && num == 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_NUMBER); return 0; } return tag_class | (CBS_ASN1_TAG)num; } static int generate_wrapped(CBB *cbb, const char *str, const X509V3_CTX *cnf, CBS_ASN1_TAG tag, int padding, int format, int depth) { CBB child; return CBB_add_asn1(cbb, &child, tag) && (!padding || CBB_add_u8(&child, 0)) && generate_v3(&child, str, cnf, /*tag=*/0, format, depth + 1) && CBB_flush(cbb); } static int generate_v3(CBB *cbb, const char *str, const X509V3_CTX *cnf, CBS_ASN1_TAG tag, int format, int depth) { assert((tag & CBS_ASN1_CONSTRUCTED) == 0); if (depth > ASN1_GEN_MAX_DEPTH) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_NESTED_TAGGING); return 0; } // Process modifiers. This function uses a mix of NUL-terminated strings and // |CBS|. Several functions only work with NUL-terminated strings, so we need // to keep track of when a slice spans the whole buffer. for (;;) { // Skip whitespace. while (*str != '\0' && OPENSSL_isspace((unsigned char)*str)) { str++; } // Modifiers end at commas. const char *comma = strchr(str, ','); if (comma == NULL) { break; } // Remove trailing whitespace. CBS modifier; CBS_init(&modifier, (const uint8_t *)str, comma - str); for (;;) { uint8_t v; CBS copy = modifier; if (!CBS_get_last_u8(©, &v) || !OPENSSL_isspace(v)) { break; } modifier = copy; } // Advance the string past the modifier, but save the original value. We // will need to rewind if this is not a recognized modifier. const char *str_old = str; str = comma + 1; // Each modifier is either NAME:VALUE or NAME. CBS name; int has_value = CBS_get_until_first(&modifier, &name, ':'); if (has_value) { CBS_skip(&modifier, 1); // Skip the colon. } else { name = modifier; CBS_init(&modifier, NULL, 0); } if (cbs_str_equal(&name, "FORMAT") || cbs_str_equal(&name, "FORM")) { if (cbs_str_equal(&modifier, "ASCII")) { format = ASN1_GEN_FORMAT_ASCII; } else if (cbs_str_equal(&modifier, "UTF8")) { format = ASN1_GEN_FORMAT_UTF8; } else if (cbs_str_equal(&modifier, "HEX")) { format = ASN1_GEN_FORMAT_HEX; } else if (cbs_str_equal(&modifier, "BITLIST")) { format = ASN1_GEN_FORMAT_BITLIST; } else { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_FORMAT); return 0; } } else if (cbs_str_equal(&name, "IMP") || cbs_str_equal(&name, "IMPLICIT")) { if (tag != 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_NESTED_TAGGING); return 0; } tag = parse_tag(&modifier); if (tag == 0) { return 0; } } else if (cbs_str_equal(&name, "EXP") || cbs_str_equal(&name, "EXPLICIT")) { // It would actually be supportable, but OpenSSL does not allow wrapping // an explicit tag in an implicit tag. if (tag != 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_NESTED_TAGGING); return 0; } tag = parse_tag(&modifier); return tag != 0 && generate_wrapped(cbb, str, cnf, tag | CBS_ASN1_CONSTRUCTED, /*padding=*/0, format, depth); } else if (cbs_str_equal(&name, "OCTWRAP")) { tag = tag == 0 ? CBS_ASN1_OCTETSTRING : tag; return generate_wrapped(cbb, str, cnf, tag, /*padding=*/0, format, depth); } else if (cbs_str_equal(&name, "BITWRAP")) { tag = tag == 0 ? CBS_ASN1_BITSTRING : tag; return generate_wrapped(cbb, str, cnf, tag, /*padding=*/1, format, depth); } else if (cbs_str_equal(&name, "SEQWRAP")) { tag = tag == 0 ? CBS_ASN1_SEQUENCE : (tag | CBS_ASN1_CONSTRUCTED); tag |= CBS_ASN1_CONSTRUCTED; return generate_wrapped(cbb, str, cnf, tag, /*padding=*/0, format, depth); } else if (cbs_str_equal(&name, "SETWRAP")) { tag = tag == 0 ? CBS_ASN1_SET : (tag | CBS_ASN1_CONSTRUCTED); return generate_wrapped(cbb, str, cnf, tag, /*padding=*/0, format, depth); } else { // If this was not a recognized modifier, rewind |str| to before splitting // on the comma. The type itself consumes all remaining input. str = str_old; break; } } // The final element is, like modifiers, NAME:VALUE or NAME, but VALUE spans // the length of the string, including any commas. const char *colon = strchr(str, ':'); CBS name; const char *value; int has_value = colon != NULL; if (has_value) { CBS_init(&name, (const uint8_t *)str, colon - str); value = colon + 1; } else { CBS_init(&name, (const uint8_t *)str, strlen(str)); value = ""; // Most types treat missing and empty value equivalently. } static const struct { const char *name; CBS_ASN1_TAG type; } kTypes[] = { {"BOOL", CBS_ASN1_BOOLEAN}, {"BOOLEAN", CBS_ASN1_BOOLEAN}, {"NULL", CBS_ASN1_NULL}, {"INT", CBS_ASN1_INTEGER}, {"INTEGER", CBS_ASN1_INTEGER}, {"ENUM", CBS_ASN1_ENUMERATED}, {"ENUMERATED", CBS_ASN1_ENUMERATED}, {"OID", CBS_ASN1_OBJECT}, {"OBJECT", CBS_ASN1_OBJECT}, {"UTCTIME", CBS_ASN1_UTCTIME}, {"UTC", CBS_ASN1_UTCTIME}, {"GENERALIZEDTIME", CBS_ASN1_GENERALIZEDTIME}, {"GENTIME", CBS_ASN1_GENERALIZEDTIME}, {"OCT", CBS_ASN1_OCTETSTRING}, {"OCTETSTRING", CBS_ASN1_OCTETSTRING}, {"BITSTR", CBS_ASN1_BITSTRING}, {"BITSTRING", CBS_ASN1_BITSTRING}, {"UNIVERSALSTRING", CBS_ASN1_UNIVERSALSTRING}, {"UNIV", CBS_ASN1_UNIVERSALSTRING}, {"IA5", CBS_ASN1_IA5STRING}, {"IA5STRING", CBS_ASN1_IA5STRING}, {"UTF8", CBS_ASN1_UTF8STRING}, {"UTF8String", CBS_ASN1_UTF8STRING}, {"BMP", CBS_ASN1_BMPSTRING}, {"BMPSTRING", CBS_ASN1_BMPSTRING}, {"PRINTABLESTRING", CBS_ASN1_PRINTABLESTRING}, {"PRINTABLE", CBS_ASN1_PRINTABLESTRING}, {"T61", CBS_ASN1_T61STRING}, {"T61STRING", CBS_ASN1_T61STRING}, {"TELETEXSTRING", CBS_ASN1_T61STRING}, {"SEQUENCE", CBS_ASN1_SEQUENCE}, {"SEQ", CBS_ASN1_SEQUENCE}, {"SET", CBS_ASN1_SET}, }; CBS_ASN1_TAG type = 0; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kTypes); i++) { if (cbs_str_equal(&name, kTypes[i].name)) { type = kTypes[i].type; break; } } if (type == 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_TAG); return 0; } // If there is an implicit tag, use the constructed bit from the base type. tag = tag == 0 ? type : (tag | (type & CBS_ASN1_CONSTRUCTED)); CBB child; if (!CBB_add_asn1(cbb, &child, tag)) { return 0; } switch (type) { case CBS_ASN1_NULL: if (*value != '\0') { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_NULL_VALUE); return 0; } return CBB_flush(cbb); case CBS_ASN1_BOOLEAN: { if (format != ASN1_GEN_FORMAT_ASCII) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_NOT_ASCII_FORMAT); return 0; } ASN1_BOOLEAN boolean; if (!X509V3_bool_from_string(value, &boolean)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_BOOLEAN); return 0; } return CBB_add_u8(&child, boolean ? 0xff : 0x00) && CBB_flush(cbb); } case CBS_ASN1_INTEGER: case CBS_ASN1_ENUMERATED: { if (format != ASN1_GEN_FORMAT_ASCII) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INTEGER_NOT_ASCII_FORMAT); return 0; } ASN1_INTEGER *obj = s2i_ASN1_INTEGER(NULL, value); if (obj == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_INTEGER); return 0; } int len = i2c_ASN1_INTEGER(obj, NULL); uint8_t *out; int ok = len > 0 && // CBB_add_space(&child, &out, len) && i2c_ASN1_INTEGER(obj, &out) == len && CBB_flush(cbb); ASN1_INTEGER_free(obj); return ok; } case CBS_ASN1_OBJECT: { if (format != ASN1_GEN_FORMAT_ASCII) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_OBJECT_NOT_ASCII_FORMAT); return 0; } ASN1_OBJECT *obj = OBJ_txt2obj(value, /*dont_search_names=*/0); if (obj == NULL || obj->length == 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_OBJECT); return 0; } int ok = CBB_add_bytes(&child, obj->data, obj->length) && CBB_flush(cbb); ASN1_OBJECT_free(obj); return ok; } case CBS_ASN1_UTCTIME: case CBS_ASN1_GENERALIZEDTIME: { if (format != ASN1_GEN_FORMAT_ASCII) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TIME_NOT_ASCII_FORMAT); return 0; } CBS value_cbs; CBS_init(&value_cbs, (const uint8_t *)value, strlen(value)); int ok = type == CBS_ASN1_UTCTIME ? CBS_parse_utc_time(&value_cbs, NULL, /*allow_timezone_offset=*/0) : CBS_parse_generalized_time(&value_cbs, NULL, /*allow_timezone_offset=*/0); if (!ok) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_TIME_VALUE); return 0; } return CBB_add_bytes(&child, (const uint8_t *)value, strlen(value)) && CBB_flush(cbb); } case CBS_ASN1_UNIVERSALSTRING: case CBS_ASN1_IA5STRING: case CBS_ASN1_UTF8STRING: case CBS_ASN1_BMPSTRING: case CBS_ASN1_PRINTABLESTRING: case CBS_ASN1_T61STRING: { int encoding; if (format == ASN1_GEN_FORMAT_ASCII) { encoding = MBSTRING_ASC; } else if (format == ASN1_GEN_FORMAT_UTF8) { encoding = MBSTRING_UTF8; } else { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_FORMAT); return 0; } // |maxsize| is measured in code points, rather than bytes, but pass it in // as a loose cap so fuzzers can exit from excessively long inputs // earlier. This limit is not load-bearing because |ASN1_mbstring_ncopy|'s // output is already linear in the input. ASN1_STRING *obj = NULL; if (ASN1_mbstring_ncopy(&obj, (const uint8_t *)value, -1, encoding, ASN1_tag2bit(type), /*minsize=*/0, /*maxsize=*/ASN1_GEN_MAX_OUTPUT) <= 0) { return 0; } int ok = CBB_add_bytes(&child, obj->data, obj->length) && CBB_flush(cbb); ASN1_STRING_free(obj); return ok; } case CBS_ASN1_BITSTRING: if (format == ASN1_GEN_FORMAT_BITLIST) { ASN1_BIT_STRING *obj = ASN1_BIT_STRING_new(); if (obj == NULL) { return 0; } if (!CONF_parse_list(value, ',', 1, bitstr_cb, obj)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_LIST_ERROR); ASN1_BIT_STRING_free(obj); return 0; } int len = i2c_ASN1_BIT_STRING(obj, NULL); uint8_t *out; int ok = len > 0 && // CBB_add_space(&child, &out, len) && i2c_ASN1_BIT_STRING(obj, &out) == len && // CBB_flush(cbb); ASN1_BIT_STRING_free(obj); return ok; } // The other formats are the same as OCTET STRING, but with the leading // zero bytes. if (!CBB_add_u8(&child, 0)) { return 0; } [[fallthrough]]; case CBS_ASN1_OCTETSTRING: if (format == ASN1_GEN_FORMAT_ASCII) { return CBB_add_bytes(&child, (const uint8_t *)value, strlen(value)) && CBB_flush(cbb); } if (format == ASN1_GEN_FORMAT_HEX) { size_t len; uint8_t *data = x509v3_hex_to_bytes(value, &len); if (data == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_HEX); return 0; } int ok = CBB_add_bytes(&child, data, len) && CBB_flush(cbb); OPENSSL_free(data); return ok; } OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_BITSTRING_FORMAT); return 0; case CBS_ASN1_SEQUENCE: case CBS_ASN1_SET: if (has_value) { if (cnf == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG); return 0; } const STACK_OF(CONF_VALUE) *section = X509V3_get_section(cnf, value); if (section == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG); return 0; } for (size_t i = 0; i < sk_CONF_VALUE_num(section); i++) { const CONF_VALUE *conf = sk_CONF_VALUE_value(section, i); if (!generate_v3(&child, conf->value, cnf, /*tag=*/0, ASN1_GEN_FORMAT_ASCII, depth + 1)) { return 0; } // This recursive call, by referencing |section|, is the one place // where |generate_v3|'s output can be super-linear in the input. // Check bounds here. if (CBB_len(&child) > ASN1_GEN_MAX_OUTPUT) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_TOO_LONG); return 0; } } } if (type == CBS_ASN1_SET) { // The SET type here is a SET OF and must be sorted. return CBB_flush_asn1_set_of(&child) && CBB_flush(cbb); } return CBB_flush(cbb); default: OPENSSL_PUT_ERROR(ASN1, ERR_R_INTERNAL_ERROR); return 0; } } static int bitstr_cb(const char *elem, size_t len, void *bitstr) { CBS cbs; CBS_init(&cbs, (const uint8_t *)elem, len); uint64_t bitnum; if (!CBS_get_u64_decimal(&cbs, &bitnum) || CBS_len(&cbs) != 0 || // Cap the highest allowed bit so this mechanism cannot be used to create // extremely large allocations with short inputs. The highest named bit in // RFC 5280 is 8, so 256 should give comfortable margin but still only // allow a 32-byte allocation. // // We do not consider this function to be safe with untrusted inputs (even // without bugs, it is prone to string injection vulnerabilities), so DoS // is not truly a concern, but the limit is necessary to keep fuzzing // effective. bitnum > 256) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_NUMBER); return 0; } if (!ASN1_BIT_STRING_set_bit(reinterpret_cast(bitstr), (int)bitnum, 1)) { return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/by_dir.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" typedef struct lookup_dir_hashes_st { uint32_t hash; int suffix; } BY_DIR_HASH; typedef struct lookup_dir_entry_st { CRYPTO_MUTEX lock; char *dir; int dir_type; STACK_OF(BY_DIR_HASH) *hashes; } BY_DIR_ENTRY; typedef struct lookup_dir_st { STACK_OF(BY_DIR_ENTRY) *dirs; } BY_DIR; DEFINE_STACK_OF(BY_DIR_HASH) DEFINE_STACK_OF(BY_DIR_ENTRY) static int dir_ctrl(X509_LOOKUP *ctx, int cmd, const char *argp, long argl, char **ret); static int new_dir(X509_LOOKUP *lu); static void free_dir(X509_LOOKUP *lu); static int add_cert_dir(BY_DIR *ctx, const char *dir, int type); static int get_cert_by_subject(X509_LOOKUP *xl, int type, X509_NAME *name, X509_OBJECT *ret); static const X509_LOOKUP_METHOD x509_dir_lookup = { new_dir, // new free_dir, // free dir_ctrl, // ctrl get_cert_by_subject, // get_by_subject }; const X509_LOOKUP_METHOD *X509_LOOKUP_hash_dir(void) { return &x509_dir_lookup; } static int dir_ctrl(X509_LOOKUP *ctx, int cmd, const char *argp, long argl, char **retp) { int ret = 0; char *dir = NULL; BY_DIR *ld = reinterpret_cast(ctx->method_data); switch (cmd) { case X509_L_ADD_DIR: if (argl == X509_FILETYPE_DEFAULT) { dir = (char *)getenv(X509_get_default_cert_dir_env()); if (dir) { ret = add_cert_dir(ld, dir, X509_FILETYPE_PEM); } else { ret = add_cert_dir(ld, X509_get_default_cert_dir(), X509_FILETYPE_PEM); } if (!ret) { OPENSSL_PUT_ERROR(X509, X509_R_LOADING_CERT_DIR); } } else { ret = add_cert_dir(ld, argp, (int)argl); } break; } return ret; } static int new_dir(X509_LOOKUP *lu) { BY_DIR *a; if ((a = (BY_DIR *)OPENSSL_malloc(sizeof(BY_DIR))) == NULL) { return 0; } a->dirs = NULL; lu->method_data = a; return 1; } static void by_dir_hash_free(BY_DIR_HASH *hash) { OPENSSL_free(hash); } static int by_dir_hash_cmp(const BY_DIR_HASH *const *a, const BY_DIR_HASH *const *b) { if ((*a)->hash > (*b)->hash) { return 1; } if ((*a)->hash < (*b)->hash) { return -1; } return 0; } static void by_dir_entry_free(BY_DIR_ENTRY *ent) { if (ent != NULL) { CRYPTO_MUTEX_cleanup(&ent->lock); OPENSSL_free(ent->dir); sk_BY_DIR_HASH_pop_free(ent->hashes, by_dir_hash_free); OPENSSL_free(ent); } } static void free_dir(X509_LOOKUP *lu) { BY_DIR *a = reinterpret_cast(lu->method_data); if (a != NULL) { sk_BY_DIR_ENTRY_pop_free(a->dirs, by_dir_entry_free); OPENSSL_free(a); } } #if defined(OPENSSL_WINDOWS) #define DIR_HASH_SEPARATOR ';' #else #define DIR_HASH_SEPARATOR ':' #endif static int add_cert_dir(BY_DIR *ctx, const char *dir, int type) { size_t j, len; const char *s, *ss, *p; if (dir == NULL || !*dir) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_DIRECTORY); return 0; } s = dir; p = s; do { if (*p == DIR_HASH_SEPARATOR || *p == '\0') { BY_DIR_ENTRY *ent; ss = s; s = p + 1; len = p - ss; if (len == 0) { continue; } for (j = 0; j < sk_BY_DIR_ENTRY_num(ctx->dirs); j++) { ent = sk_BY_DIR_ENTRY_value(ctx->dirs, j); if (strlen(ent->dir) == len && strncmp(ent->dir, ss, len) == 0) { break; } } if (j < sk_BY_DIR_ENTRY_num(ctx->dirs)) { continue; } if (ctx->dirs == NULL) { ctx->dirs = sk_BY_DIR_ENTRY_new_null(); if (!ctx->dirs) { return 0; } } ent = reinterpret_cast( OPENSSL_malloc(sizeof(BY_DIR_ENTRY))); if (!ent) { return 0; } CRYPTO_MUTEX_init(&ent->lock); ent->dir_type = type; ent->hashes = sk_BY_DIR_HASH_new(by_dir_hash_cmp); ent->dir = OPENSSL_strndup(ss, len); if (ent->dir == NULL || ent->hashes == NULL || !sk_BY_DIR_ENTRY_push(ctx->dirs, ent)) { by_dir_entry_free(ent); return 0; } } } while (*p++ != '\0'); return 1; } static int get_cert_by_subject(X509_LOOKUP *xl, int type, X509_NAME *name, X509_OBJECT *ret) { union { struct { X509 st_x509; X509_CINF st_x509_cinf; } x509; struct { X509_CRL st_crl; X509_CRL_INFO st_crl_info; } crl; } data; int ok = 0; size_t i; int k; uint32_t h; uint32_t hash_array[2]; int hash_index; char *b = NULL; X509_OBJECT stmp, *tmp; const char *postfix = ""; if (name == NULL) { return 0; } stmp.type = type; BY_DIR *ctx = reinterpret_cast(xl->method_data); if (type == X509_LU_X509) { data.x509.st_x509.cert_info = &data.x509.st_x509_cinf; data.x509.st_x509_cinf.subject = name; stmp.data.x509 = &data.x509.st_x509; postfix = ""; } else if (type == X509_LU_CRL) { data.crl.st_crl.crl = &data.crl.st_crl_info; data.crl.st_crl_info.issuer = name; stmp.data.crl = &data.crl.st_crl; postfix = "r"; } else { OPENSSL_PUT_ERROR(X509, X509_R_WRONG_LOOKUP_TYPE); goto finish; } hash_array[0] = X509_NAME_hash(name); hash_array[1] = X509_NAME_hash_old(name); for (hash_index = 0; hash_index < 2; ++hash_index) { h = hash_array[hash_index]; for (i = 0; i < sk_BY_DIR_ENTRY_num(ctx->dirs); i++) { BY_DIR_ENTRY *ent; size_t idx; BY_DIR_HASH htmp, *hent; ent = sk_BY_DIR_ENTRY_value(ctx->dirs, i); if (type == X509_LU_CRL && ent->hashes) { htmp.hash = h; CRYPTO_MUTEX_lock_read(&ent->lock); if (sk_BY_DIR_HASH_find(ent->hashes, &idx, &htmp)) { hent = sk_BY_DIR_HASH_value(ent->hashes, idx); k = hent->suffix; } else { hent = NULL; k = 0; } CRYPTO_MUTEX_unlock_read(&ent->lock); } else { k = 0; hent = NULL; } for (;;) { OPENSSL_free(b); if (OPENSSL_asprintf(&b, "%s/%08" PRIx32 ".%s%d", ent->dir, h, postfix, k) == -1) { OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); b = nullptr; goto finish; } if (type == X509_LU_X509) { if ((X509_load_cert_file(xl, b, ent->dir_type)) == 0) { // Don't expose the lower level error, All of these boil // down to "we could not find a CA". ERR_clear_error(); break; } } else if (type == X509_LU_CRL) { if ((X509_load_crl_file(xl, b, ent->dir_type)) == 0) { // Don't expose the lower level error, All of these boil // down to "we could not find a CRL". ERR_clear_error(); break; } } // The lack of a CA or CRL will be caught higher up k++; } // we have added it to the cache so now pull it out again CRYPTO_MUTEX_lock_write(&xl->store_ctx->objs_lock); tmp = NULL; sk_X509_OBJECT_sort(xl->store_ctx->objs); if (sk_X509_OBJECT_find(xl->store_ctx->objs, &idx, &stmp)) { tmp = sk_X509_OBJECT_value(xl->store_ctx->objs, idx); } CRYPTO_MUTEX_unlock_write(&xl->store_ctx->objs_lock); // If a CRL, update the last file suffix added for this if (type == X509_LU_CRL) { CRYPTO_MUTEX_lock_write(&ent->lock); // Look for entry again in case another thread added an entry // first. if (!hent) { htmp.hash = h; sk_BY_DIR_HASH_sort(ent->hashes); if (sk_BY_DIR_HASH_find(ent->hashes, &idx, &htmp)) { hent = sk_BY_DIR_HASH_value(ent->hashes, idx); } } if (!hent) { hent = reinterpret_cast( OPENSSL_malloc(sizeof(BY_DIR_HASH))); if (hent == NULL) { CRYPTO_MUTEX_unlock_write(&ent->lock); ok = 0; goto finish; } hent->hash = h; hent->suffix = k; if (!sk_BY_DIR_HASH_push(ent->hashes, hent)) { CRYPTO_MUTEX_unlock_write(&ent->lock); OPENSSL_free(hent); ok = 0; goto finish; } sk_BY_DIR_HASH_sort(ent->hashes); } else if (hent->suffix < k) { hent->suffix = k; } CRYPTO_MUTEX_unlock_write(&ent->lock); } if (tmp != NULL) { ok = 1; ret->type = tmp->type; OPENSSL_memcpy(&ret->data, &tmp->data, sizeof(ret->data)); // Clear any errors that might have been raised processing empty // or malformed files. ERR_clear_error(); // If we were going to up the reference count, we would need // to do it on a perl 'type' basis goto finish; } } } finish: OPENSSL_free(b); return ok; } int X509_LOOKUP_add_dir(X509_LOOKUP *lookup, const char *name, int type) { return X509_LOOKUP_ctrl(lookup, X509_L_ADD_DIR, name, type, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/by_file.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" static int by_file_ctrl(X509_LOOKUP *ctx, int cmd, const char *argc, long argl, char **ret); static const X509_LOOKUP_METHOD x509_file_lookup = { NULL, // new NULL, // free by_file_ctrl, // ctrl NULL, // get_by_subject }; const X509_LOOKUP_METHOD *X509_LOOKUP_file(void) { return &x509_file_lookup; } static int by_file_ctrl(X509_LOOKUP *ctx, int cmd, const char *argp, long argl, char **ret) { if (cmd != X509_L_FILE_LOAD) { return 0; } const char *file = argp; int type = argl; if (argl == X509_FILETYPE_DEFAULT) { if ((file = getenv(X509_get_default_cert_file_env())) == NULL) { file = X509_get_default_cert_file(); } type = X509_FILETYPE_PEM; } if (X509_load_cert_crl_file(ctx, file, type) != 0) { return 1; } if (argl == X509_FILETYPE_DEFAULT) { OPENSSL_PUT_ERROR(X509, X509_R_LOADING_DEFAULTS); } return 0; } int X509_load_cert_file(X509_LOOKUP *ctx, const char *file, int type) { int ret = 0; BIO *in = NULL; int i, count = 0; X509 *x = NULL; in = BIO_new(BIO_s_file()); if ((in == NULL) || (BIO_read_filename(in, file) <= 0)) { OPENSSL_PUT_ERROR(X509, ERR_R_SYS_LIB); goto err; } if (type == X509_FILETYPE_PEM) { for (;;) { x = PEM_read_bio_X509_AUX(in, NULL, NULL, NULL); if (x == NULL) { uint32_t error = ERR_peek_last_error(); if (ERR_GET_LIB(error) == ERR_LIB_PEM && ERR_GET_REASON(error) == PEM_R_NO_START_LINE && count > 0) { ERR_clear_error(); break; } OPENSSL_PUT_ERROR(X509, ERR_R_PEM_LIB); goto err; } i = X509_STORE_add_cert(ctx->store_ctx, x); if (!i) { goto err; } count++; X509_free(x); x = NULL; } ret = count; } else if (type == X509_FILETYPE_ASN1) { x = d2i_X509_bio(in, NULL); if (x == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_ASN1_LIB); goto err; } i = X509_STORE_add_cert(ctx->store_ctx, x); if (!i) { goto err; } ret = i; } else { OPENSSL_PUT_ERROR(X509, X509_R_BAD_X509_FILETYPE); goto err; } if (ret == 0) { OPENSSL_PUT_ERROR(X509, X509_R_NO_CERTIFICATE_FOUND); } err: X509_free(x); BIO_free(in); return ret; } int X509_load_crl_file(X509_LOOKUP *ctx, const char *file, int type) { int ret = 0; BIO *in = NULL; int i, count = 0; X509_CRL *x = NULL; in = BIO_new(BIO_s_file()); if ((in == NULL) || (BIO_read_filename(in, file) <= 0)) { OPENSSL_PUT_ERROR(X509, ERR_R_SYS_LIB); goto err; } if (type == X509_FILETYPE_PEM) { for (;;) { x = PEM_read_bio_X509_CRL(in, NULL, NULL, NULL); if (x == NULL) { uint32_t error = ERR_peek_last_error(); if (ERR_GET_LIB(error) == ERR_LIB_PEM && ERR_GET_REASON(error) == PEM_R_NO_START_LINE && count > 0) { ERR_clear_error(); break; } OPENSSL_PUT_ERROR(X509, ERR_R_PEM_LIB); goto err; } i = X509_STORE_add_crl(ctx->store_ctx, x); if (!i) { goto err; } count++; X509_CRL_free(x); x = NULL; } ret = count; } else if (type == X509_FILETYPE_ASN1) { x = d2i_X509_CRL_bio(in, NULL); if (x == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_ASN1_LIB); goto err; } i = X509_STORE_add_crl(ctx->store_ctx, x); if (!i) { goto err; } ret = i; } else { OPENSSL_PUT_ERROR(X509, X509_R_BAD_X509_FILETYPE); goto err; } if (ret == 0) { OPENSSL_PUT_ERROR(X509, X509_R_NO_CRL_FOUND); } err: X509_CRL_free(x); BIO_free(in); return ret; } int X509_load_cert_crl_file(X509_LOOKUP *ctx, const char *file, int type) { STACK_OF(X509_INFO) *inf; X509_INFO *itmp; BIO *in; size_t i; int count = 0; if (type != X509_FILETYPE_PEM) { return X509_load_cert_file(ctx, file, type); } in = BIO_new_file(file, "rb"); if (!in) { OPENSSL_PUT_ERROR(X509, ERR_R_SYS_LIB); return 0; } inf = PEM_X509_INFO_read_bio(in, NULL, NULL, NULL); BIO_free(in); if (!inf) { OPENSSL_PUT_ERROR(X509, ERR_R_PEM_LIB); return 0; } for (i = 0; i < sk_X509_INFO_num(inf); i++) { itmp = sk_X509_INFO_value(inf, i); if (itmp->x509) { if (!X509_STORE_add_cert(ctx->store_ctx, itmp->x509)) { goto err; } count++; } if (itmp->crl) { if (!X509_STORE_add_crl(ctx->store_ctx, itmp->crl)) { goto err; } count++; } } if (count == 0) { OPENSSL_PUT_ERROR(X509, X509_R_NO_CERTIFICATE_OR_CRL_FOUND); } err: sk_X509_INFO_pop_free(inf, X509_INFO_free); return count; } int X509_LOOKUP_load_file(X509_LOOKUP *lookup, const char *name, int type) { return X509_LOOKUP_ctrl(lookup, X509_L_FILE_LOAD, name, type, NULL); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/ext_dat.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ // This file contains a table of "standard" extensions #if defined(__cplusplus) extern "C" { #endif extern const X509V3_EXT_METHOD v3_bcons, v3_nscert, v3_key_usage, v3_ext_ku; extern const X509V3_EXT_METHOD v3_info, v3_sinfo; extern const X509V3_EXT_METHOD v3_ns_ia5_list[], v3_alt[], v3_skey_id, v3_akey_id; extern const X509V3_EXT_METHOD v3_crl_num, v3_crl_reason, v3_crl_invdate; extern const X509V3_EXT_METHOD v3_delta_crl, v3_cpols, v3_crld, v3_freshest_crl; extern const X509V3_EXT_METHOD v3_ocsp_nonce, v3_ocsp_accresp, v3_ocsp_acutoff; extern const X509V3_EXT_METHOD v3_ocsp_crlid, v3_ocsp_nocheck, v3_ocsp_serviceloc; extern const X509V3_EXT_METHOD v3_crl_hold; extern const X509V3_EXT_METHOD v3_policy_mappings, v3_policy_constraints; extern const X509V3_EXT_METHOD v3_name_constraints, v3_inhibit_anyp, v3_idp; extern const X509V3_EXT_METHOD v3_addr, v3_asid; // This table will be searched using OBJ_bsearch so it *must* kept in order // of the ext_nid values. // TODO(fork): OCSP support #define OPENSSL_NO_OCSP static const X509V3_EXT_METHOD *const standard_exts[] = { &v3_nscert, &v3_ns_ia5_list[0], &v3_ns_ia5_list[1], &v3_ns_ia5_list[2], &v3_ns_ia5_list[3], &v3_ns_ia5_list[4], &v3_ns_ia5_list[5], &v3_ns_ia5_list[6], &v3_skey_id, &v3_key_usage, &v3_alt[0], &v3_alt[1], &v3_bcons, &v3_crl_num, &v3_cpols, &v3_akey_id, &v3_crld, &v3_ext_ku, &v3_delta_crl, &v3_crl_reason, &v3_crl_invdate, &v3_info, #ifndef OPENSSL_NO_OCSP &v3_ocsp_nonce, &v3_ocsp_crlid, &v3_ocsp_accresp, &v3_ocsp_acutoff, &v3_ocsp_serviceloc, #endif &v3_ocsp_nocheck, &v3_sinfo, &v3_policy_constraints, #ifndef OPENSSL_NO_OCSP &v3_crl_hold, #endif &v3_name_constraints, &v3_policy_mappings, &v3_inhibit_anyp, &v3_idp, &v3_alt[2], &v3_freshest_crl, }; // Number of standard extensions #define STANDARD_EXTENSION_COUNT \ (sizeof(standard_exts) / sizeof(X509V3_EXT_METHOD *)) #if defined(__cplusplus) } // extern C #endif ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/i2d_pr.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include int i2d_PrivateKey(const EVP_PKEY *a, uint8_t **pp) { switch (EVP_PKEY_id(a)) { case EVP_PKEY_RSA: return i2d_RSAPrivateKey(EVP_PKEY_get0_RSA(a), pp); case EVP_PKEY_EC: return i2d_ECPrivateKey(EVP_PKEY_get0_EC_KEY(a), pp); case EVP_PKEY_DSA: return i2d_DSAPrivateKey(EVP_PKEY_get0_DSA(a), pp); default: // Although this file is in crypto/x509 for layering reasons, it emits // an error code from ASN1 for OpenSSL compatibility. OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE); return -1; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/internal.h ================================================ /* * Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_X509_INTERNAL_H #define OPENSSL_HEADER_X509_INTERNAL_H #include #include #include #include "../asn1/internal.h" #include "../internal.h" #if defined(__cplusplus) extern "C" { #endif // Internal structures. typedef struct X509_val_st { ASN1_TIME *notBefore; ASN1_TIME *notAfter; } X509_VAL; DECLARE_ASN1_FUNCTIONS_const(X509_VAL) struct X509_pubkey_st { X509_ALGOR *algor; ASN1_BIT_STRING *public_key; EVP_PKEY *pkey; } /* X509_PUBKEY */; // X509_PUBKEY is an |ASN1_ITEM| whose ASN.1 type is SubjectPublicKeyInfo and C // type is |X509_PUBKEY*|. DECLARE_ASN1_ITEM(X509_PUBKEY) struct X509_name_entry_st { ASN1_OBJECT *object; ASN1_STRING *value; int set; } /* X509_NAME_ENTRY */; // X509_NAME_ENTRY is an |ASN1_ITEM| whose ASN.1 type is AttributeTypeAndValue // (RFC 5280) and C type is |X509_NAME_ENTRY*|. DECLARE_ASN1_ITEM(X509_NAME_ENTRY) // we always keep X509_NAMEs in 2 forms. struct X509_name_st { STACK_OF(X509_NAME_ENTRY) *entries; int modified; // true if 'bytes' needs to be built BUF_MEM *bytes; unsigned char *canon_enc; int canon_enclen; } /* X509_NAME */; struct x509_attributes_st { ASN1_OBJECT *object; STACK_OF(ASN1_TYPE) *set; } /* X509_ATTRIBUTE */; // X509_ATTRIBUTE is an |ASN1_ITEM| whose ASN.1 type is Attribute (RFC 2986) and // C type is |X509_ATTRIBUTE*|. DECLARE_ASN1_ITEM(X509_ATTRIBUTE) typedef struct x509_cert_aux_st { STACK_OF(ASN1_OBJECT) *trust; // trusted uses STACK_OF(ASN1_OBJECT) *reject; // rejected uses ASN1_UTF8STRING *alias; // "friendly name" ASN1_OCTET_STRING *keyid; // key id of private key } X509_CERT_AUX; DECLARE_ASN1_FUNCTIONS_const(X509_CERT_AUX) struct X509_extension_st { ASN1_OBJECT *object; ASN1_BOOLEAN critical; ASN1_OCTET_STRING *value; } /* X509_EXTENSION */; // X509_EXTENSION is an |ASN1_ITEM| whose ASN.1 type is X.509 Extension (RFC // 5280) and C type is |X509_EXTENSION*|. DECLARE_ASN1_ITEM(X509_EXTENSION) // X509_EXTENSIONS is an |ASN1_ITEM| whose ASN.1 type is SEQUENCE of Extension // (RFC 5280) and C type is |STACK_OF(X509_EXTENSION)*|. DECLARE_ASN1_ITEM(X509_EXTENSIONS) typedef struct { ASN1_INTEGER *version; // [ 0 ] default of v1 ASN1_INTEGER *serialNumber; X509_ALGOR *signature; X509_NAME *issuer; X509_VAL *validity; X509_NAME *subject; X509_PUBKEY *key; ASN1_BIT_STRING *issuerUID; // [ 1 ] optional in v2 ASN1_BIT_STRING *subjectUID; // [ 2 ] optional in v2 STACK_OF(X509_EXTENSION) *extensions; // [ 3 ] optional in v3 ASN1_ENCODING enc; } X509_CINF; // TODO(https://crbug.com/boringssl/407): This is not const because it contains // an |X509_NAME|. DECLARE_ASN1_FUNCTIONS(X509_CINF) struct x509_st { X509_CINF *cert_info; X509_ALGOR *sig_alg; ASN1_BIT_STRING *signature; CRYPTO_refcount_t references; CRYPTO_EX_DATA ex_data; // These contain copies of various extension values long ex_pathlen; uint32_t ex_flags; uint32_t ex_kusage; uint32_t ex_xkusage; ASN1_OCTET_STRING *skid; AUTHORITY_KEYID *akid; STACK_OF(DIST_POINT) *crldp; STACK_OF(GENERAL_NAME) *altname; NAME_CONSTRAINTS *nc; unsigned char cert_hash[SHA256_DIGEST_LENGTH]; X509_CERT_AUX *aux; CRYPTO_MUTEX lock; } /* X509 */; // X509 is an |ASN1_ITEM| whose ASN.1 type is X.509 Certificate (RFC 5280) and C // type is |X509*|. DECLARE_ASN1_ITEM(X509) typedef struct { ASN1_ENCODING enc; ASN1_INTEGER *version; X509_NAME *subject; X509_PUBKEY *pubkey; // d=2 hl=2 l= 0 cons: cont: 00 STACK_OF(X509_ATTRIBUTE) *attributes; // [ 0 ] } X509_REQ_INFO; // TODO(https://crbug.com/boringssl/407): This is not const because it contains // an |X509_NAME|. DECLARE_ASN1_FUNCTIONS(X509_REQ_INFO) struct X509_req_st { X509_REQ_INFO *req_info; X509_ALGOR *sig_alg; ASN1_BIT_STRING *signature; } /* X509_REQ */; // X509_REQ is an |ASN1_ITEM| whose ASN.1 type is CertificateRequest (RFC 2986) // and C type is |X509_REQ*|. DECLARE_ASN1_ITEM(X509_REQ) struct x509_revoked_st { ASN1_INTEGER *serialNumber; ASN1_TIME *revocationDate; STACK_OF(X509_EXTENSION) /* optional */ *extensions; // Revocation reason int reason; } /* X509_REVOKED */; // X509_REVOKED is an |ASN1_ITEM| whose ASN.1 type is an element of the // revokedCertificates field of TBSCertList (RFC 5280) and C type is // |X509_REVOKED*|. DECLARE_ASN1_ITEM(X509_REVOKED) typedef struct { ASN1_INTEGER *version; X509_ALGOR *sig_alg; X509_NAME *issuer; ASN1_TIME *lastUpdate; ASN1_TIME *nextUpdate; STACK_OF(X509_REVOKED) *revoked; STACK_OF(X509_EXTENSION) /* [0] */ *extensions; ASN1_ENCODING enc; } X509_CRL_INFO; // TODO(https://crbug.com/boringssl/407): This is not const because it contains // an |X509_NAME|. DECLARE_ASN1_FUNCTIONS(X509_CRL_INFO) // Values in idp_flags field // IDP present #define IDP_PRESENT 0x1 // IDP values inconsistent #define IDP_INVALID 0x2 // onlyuser true #define IDP_ONLYUSER 0x4 // onlyCA true #define IDP_ONLYCA 0x8 // onlyattr true #define IDP_ONLYATTR 0x10 // indirectCRL true #define IDP_INDIRECT 0x20 // onlysomereasons present #define IDP_REASONS 0x40 struct X509_crl_st { // actual signature X509_CRL_INFO *crl; X509_ALGOR *sig_alg; ASN1_BIT_STRING *signature; CRYPTO_refcount_t references; int flags; // Copies of various extensions AUTHORITY_KEYID *akid; ISSUING_DIST_POINT *idp; // Convenient breakdown of IDP int idp_flags; unsigned char crl_hash[SHA256_DIGEST_LENGTH]; } /* X509_CRL */; // X509_CRL is an |ASN1_ITEM| whose ASN.1 type is X.509 CertificateList (RFC // 5280) and C type is |X509_CRL*|. DECLARE_ASN1_ITEM(X509_CRL) // GENERAL_NAME is an |ASN1_ITEM| whose ASN.1 type is GeneralName and C type is // |GENERAL_NAME*|. DECLARE_ASN1_ITEM(GENERAL_NAME) // GENERAL_NAMES is an |ASN1_ITEM| whose ASN.1 type is SEQUENCE OF GeneralName // and C type is |GENERAL_NAMES*|, aka |STACK_OF(GENERAL_NAME)*|. DECLARE_ASN1_ITEM(GENERAL_NAMES) struct X509_VERIFY_PARAM_st { int64_t check_time; // POSIX time to use unsigned long flags; // Various verify flags int purpose; // purpose to check untrusted certificates int trust; // trust setting to check int depth; // Verify depth STACK_OF(ASN1_OBJECT) *policies; // Permissible policies // The following fields specify acceptable peer identities. STACK_OF(OPENSSL_STRING) *hosts; // Set of acceptable names unsigned int hostflags; // Flags to control matching features char *email; // If not NULL email address to match size_t emaillen; unsigned char *ip; // If not NULL IP address to match size_t iplen; // Length of IP address unsigned char poison; // Fail all verifications at name checking } /* X509_VERIFY_PARAM */; struct x509_object_st { // one of the above types int type; union { char *ptr; X509 *x509; X509_CRL *crl; EVP_PKEY *pkey; } data; } /* X509_OBJECT */; // NETSCAPE_SPKI is an |ASN1_ITEM| whose ASN.1 type is // SignedPublicKeyAndChallenge and C type is |NETSCAPE_SPKI*|. DECLARE_ASN1_ITEM(NETSCAPE_SPKI) // NETSCAPE_SPKAC is an |ASN1_ITEM| whose ASN.1 type is PublicKeyAndChallenge // and C type is |NETSCAPE_SPKAC*|. DECLARE_ASN1_ITEM(NETSCAPE_SPKAC) // This is a static that defines the function interface struct x509_lookup_method_st { int (*new_item)(X509_LOOKUP *ctx); void (*free)(X509_LOOKUP *ctx); int (*ctrl)(X509_LOOKUP *ctx, int cmd, const char *argc, long argl, char **ret); int (*get_by_subject)(X509_LOOKUP *ctx, int type, X509_NAME *name, X509_OBJECT *ret); } /* X509_LOOKUP_METHOD */; DEFINE_STACK_OF(X509_LOOKUP) // This is used to hold everything. It is used for all certificate // validation. Once we have a certificate chain, the 'verify' // function is then called to actually check the cert chain. struct x509_store_st { // The following is a cache of trusted certs STACK_OF(X509_OBJECT) *objs; // Cache of all objects CRYPTO_MUTEX objs_lock; // These are external lookup methods STACK_OF(X509_LOOKUP) *get_cert_methods; X509_VERIFY_PARAM *param; // Callbacks for various operations X509_STORE_CTX_verify_cb verify_cb; // error callback CRYPTO_refcount_t references; } /* X509_STORE */; // This is the functions plus an instance of the local variables. struct x509_lookup_st { const X509_LOOKUP_METHOD *method; // the functions void *method_data; // method data X509_STORE *store_ctx; // who owns us } /* X509_LOOKUP */; // This is a used when verifying cert chains. Since the // gathering of the cert chain can take some time (and have to be // 'retried', this needs to be kept and passed around. struct x509_store_ctx_st { X509_STORE *ctx; // The following are set by the caller X509 *cert; // The cert to check STACK_OF(X509) *untrusted; // chain of X509s - untrusted - passed in STACK_OF(X509_CRL) *crls; // set of CRLs passed in X509_VERIFY_PARAM *param; // trusted_stack, if non-NULL, is a set of trusted certificates to consider // instead of those from |X509_STORE|. STACK_OF(X509) *trusted_stack; // Callbacks for various operations X509_STORE_CTX_verify_cb verify_cb; // error callback // The following is built up int last_untrusted; // index of last untrusted cert STACK_OF(X509) *chain; // chain of X509s - built up and trusted // When something goes wrong, this is why int error_depth; int error; X509 *current_cert; X509_CRL *current_crl; // current CRL X509 *current_crl_issuer; // issuer of current CRL int current_crl_score; // score of current CRL CRYPTO_EX_DATA ex_data; } /* X509_STORE_CTX */; ASN1_TYPE *ASN1_generate_v3(const char *str, const X509V3_CTX *cnf); int X509_CERT_AUX_print(BIO *bp, X509_CERT_AUX *x, int indent); // RSA-PSS functions. // x509_rsa_pss_to_ctx configures |ctx| for an RSA-PSS operation based on // signature algorithm parameters in |sigalg| (which must have type // |NID_rsassaPss|) and key |pkey|. It returns one on success and zero on // error. int x509_rsa_pss_to_ctx(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg, EVP_PKEY *pkey); // x509_rsa_pss_to_ctx sets |algor| to the signature algorithm parameters for // |ctx|, which must have been configured for an RSA-PSS signing operation. It // returns one on success and zero on error. int x509_rsa_ctx_to_pss(EVP_MD_CTX *ctx, X509_ALGOR *algor); // x509_print_rsa_pss_params prints a human-readable representation of RSA-PSS // parameters in |sigalg| to |bp|. It returns one on success and zero on // error. int x509_print_rsa_pss_params(BIO *bp, const X509_ALGOR *sigalg, int indent, ASN1_PCTX *pctx); // Signature algorithm functions. // x509_digest_sign_algorithm encodes the signing parameters of |ctx| as an // AlgorithmIdentifier and saves the result in |algor|. It returns one on // success, or zero on error. int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor); // x509_digest_verify_init sets up |ctx| for a signature verification operation // with public key |pkey| and parameters from |algor|. The |ctx| argument must // have been initialised with |EVP_MD_CTX_init|. It returns one on success, or // zero on error. int x509_digest_verify_init(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg, EVP_PKEY *pkey); // Path-building functions. // X509_policy_check checks certificate policies in |certs|. |user_policies| is // the user-initial-policy-set. If |user_policies| is NULL or empty, it is // interpreted as anyPolicy. |flags| is a set of |X509_V_FLAG_*| values to // apply. It returns |X509_V_OK| on success and |X509_V_ERR_*| on error. It // additionally sets |*out_current_cert| to the certificate where the error // occurred. If the function succeeded, or the error applies to the entire // chain, it sets |*out_current_cert| to NULL. int X509_policy_check(const STACK_OF(X509) *certs, const STACK_OF(ASN1_OBJECT) *user_policies, unsigned long flags, X509 **out_current_cert); // x509_check_issued_with_callback calls |X509_check_issued|, but allows the // verify callback to override the result. It returns one on success and zero on // error. // // TODO(davidben): Reduce the scope of the verify callback and remove this. The // callback only runs with |X509_V_FLAG_CB_ISSUER_CHECK|, which is only used by // one internal project and rust-openssl, who use it by mistake. int x509_check_issued_with_callback(X509_STORE_CTX *ctx, X509 *x, X509 *issuer); // x509v3_bytes_to_hex encodes |len| bytes from |in| to hex and returns a // newly-allocated NUL-terminated string containing the result, or NULL on // allocation error. // // This function was historically named |hex_to_string| in OpenSSL. Despite the // name, |hex_to_string| converted to hex. OPENSSL_EXPORT char *x509v3_bytes_to_hex(const uint8_t *in, size_t len); // x509v3_hex_string_to_bytes decodes |str| in hex and returns a newly-allocated // array containing the result, or NULL on error. On success, it sets |*len| to // the length of the result. Colon separators between bytes in the input are // allowed and ignored. // // This function was historically named |string_to_hex| in OpenSSL. Despite the // name, |string_to_hex| converted from hex. unsigned char *x509v3_hex_to_bytes(const char *str, size_t *len); // x509v3_conf_name_matches returns one if |name| is equal to |cmp| or begins // with |cmp| followed by '.', and zero otherwise. int x509v3_conf_name_matches(const char *name, const char *cmp); // x509v3_looks_like_dns_name returns one if |in| looks like a DNS name and zero // otherwise. OPENSSL_EXPORT int x509v3_looks_like_dns_name(const unsigned char *in, size_t len); // x509v3_cache_extensions fills in a number of fields relating to X.509 // extensions in |x|. It returns one on success and zero if some extensions were // invalid. OPENSSL_EXPORT int x509v3_cache_extensions(X509 *x); // x509v3_a2i_ipadd decodes |ipasc| as an IPv4 or IPv6 address. IPv6 addresses // use colon-separated syntax while IPv4 addresses use dotted decimal syntax. If // it decodes an IPv4 address, it writes the result to the first four bytes of // |ipout| and returns four. If it decodes an IPv6 address, it writes the result // to all 16 bytes of |ipout| and returns 16. Otherwise, it returns zero. int x509v3_a2i_ipadd(unsigned char ipout[16], const char *ipasc); // A |BIT_STRING_BITNAME| is used to contain a list of bit names. typedef struct { int bitnum; const char *lname; const char *sname; } BIT_STRING_BITNAME; // x509V3_add_value_asn1_string appends a |CONF_VALUE| with the specified name // and value to |*extlist|. if |*extlist| is NULL, it sets |*extlist| to a // newly-allocated |STACK_OF(CONF_VALUE)| first. It returns one on success and // zero on error. int x509V3_add_value_asn1_string(const char *name, const ASN1_STRING *value, STACK_OF(CONF_VALUE) **extlist); // X509V3_NAME_from_section adds attributes to |nm| by interpreting the // key/value pairs in |dn_sk|. It returns one on success and zero on error. // |chtype|, which should be one of |MBSTRING_*| constants, determines the // character encoding used to interpret values. int X509V3_NAME_from_section(X509_NAME *nm, const STACK_OF(CONF_VALUE) *dn_sk, int chtype); // X509V3_bool_from_string decodes |str| as a boolean. On success, it returns // one and sets |*out_bool| to resulting value. Otherwise, it returns zero. int X509V3_bool_from_string(const char *str, ASN1_BOOLEAN *out_bool); // X509V3_get_value_bool decodes |value| as a boolean. On success, it returns // one and sets |*out_bool| to the resulting value. Otherwise, it returns zero. int X509V3_get_value_bool(const CONF_VALUE *value, ASN1_BOOLEAN *out_bool); // X509V3_get_value_int decodes |value| as an integer. On success, it returns // one and sets |*aint| to the resulting value. Otherwise, it returns zero. If // |*aint| was non-NULL at the start of the function, it frees the previous // value before writing a new one. int X509V3_get_value_int(const CONF_VALUE *value, ASN1_INTEGER **aint); // X509V3_get_section behaves like |NCONF_get_section| but queries |ctx|'s // config database. const STACK_OF(CONF_VALUE) *X509V3_get_section(const X509V3_CTX *ctx, const char *section); // X509V3_add_value appends a |CONF_VALUE| containing |name| and |value| to // |*extlist|. It returns one on success and zero on error. If |*extlist| is // NULL, it sets |*extlist| to a newly-allocated |STACK_OF(CONF_VALUE)| // containing the result. Either |name| or |value| may be NULL to omit the // field. // // On failure, if |*extlist| was NULL, |*extlist| will remain NULL when the // function returns. int X509V3_add_value(const char *name, const char *value, STACK_OF(CONF_VALUE) **extlist); // X509V3_add_value_bool behaves like |X509V3_add_value| but stores the value // "TRUE" if |asn1_bool| is non-zero and "FALSE" otherwise. int X509V3_add_value_bool(const char *name, int asn1_bool, STACK_OF(CONF_VALUE) **extlist); // X509V3_add_value_bool behaves like |X509V3_add_value| but stores a string // representation of |aint|. Note this string representation may be decimal or // hexadecimal, depending on the size of |aint|. int X509V3_add_value_int(const char *name, const ASN1_INTEGER *aint, STACK_OF(CONF_VALUE) **extlist); STACK_OF(CONF_VALUE) *X509V3_parse_list(const char *line); #define X509V3_conf_err(val) \ ERR_add_error_data(6, "section:", (val)->section, ",name:", (val)->name, \ ",value:", (val)->value); // GENERAL_NAME_cmp returns zero if |a| and |b| are equal and a non-zero // value otherwise. Note this function does not provide a comparison suitable // for sorting. // // This function is exported for testing. OPENSSL_EXPORT int GENERAL_NAME_cmp(const GENERAL_NAME *a, const GENERAL_NAME *b); // X509_VERIFY_PARAM_lookup returns a pre-defined |X509_VERIFY_PARAM| named by // |name|, or NULL if no such name is defined. const X509_VERIFY_PARAM *X509_VERIFY_PARAM_lookup(const char *name); GENERAL_NAME *v2i_GENERAL_NAME(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const CONF_VALUE *cnf); GENERAL_NAME *v2i_GENERAL_NAME_ex(GENERAL_NAME *out, const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const CONF_VALUE *cnf, int is_nc); GENERAL_NAMES *v2i_GENERAL_NAMES(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); // TODO(https://crbug.com/boringssl/407): Make |issuer| const once the // |X509_NAME| issue is resolved. int X509_check_akid(X509 *issuer, const AUTHORITY_KEYID *akid); int X509_is_valid_trust_id(int trust); int X509_PURPOSE_get_trust(const X509_PURPOSE *xp); // TODO(https://crbug.com/boringssl/695): Remove this. int DIST_POINT_set_dpname(DIST_POINT_NAME *dpn, X509_NAME *iname); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_X509_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/name_print.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include static int maybe_write(BIO *out, const void *buf, int len) { // If |out| is NULL, ignore the output but report the length. return out == NULL || BIO_write(out, buf, len) == len; } // do_indent prints |indent| spaces to |out|. static int do_indent(BIO *out, int indent) { for (int i = 0; i < indent; i++) { if (!maybe_write(out, " ", 1)) { return 0; } } return 1; } #define FN_WIDTH_LN 25 #define FN_WIDTH_SN 10 static int do_name_ex(BIO *out, const X509_NAME *n, int indent, unsigned long flags) { int prev = -1, orflags; char objtmp[80]; const char *objbuf; int outlen, len; const char *sep_dn, *sep_mv, *sep_eq; int sep_dn_len, sep_mv_len, sep_eq_len; if (indent < 0) { indent = 0; } outlen = indent; if (!do_indent(out, indent)) { return -1; } switch (flags & XN_FLAG_SEP_MASK) { case XN_FLAG_SEP_MULTILINE: sep_dn = "\n"; sep_dn_len = 1; sep_mv = " + "; sep_mv_len = 3; break; case XN_FLAG_SEP_COMMA_PLUS: sep_dn = ","; sep_dn_len = 1; sep_mv = "+"; sep_mv_len = 1; indent = 0; break; case XN_FLAG_SEP_CPLUS_SPC: sep_dn = ", "; sep_dn_len = 2; sep_mv = " + "; sep_mv_len = 3; indent = 0; break; case XN_FLAG_SEP_SPLUS_SPC: sep_dn = "; "; sep_dn_len = 2; sep_mv = " + "; sep_mv_len = 3; indent = 0; break; default: return -1; } if (flags & XN_FLAG_SPC_EQ) { sep_eq = " = "; sep_eq_len = 3; } else { sep_eq = "="; sep_eq_len = 1; } int cnt = X509_NAME_entry_count(n); for (int i = 0; i < cnt; i++) { const X509_NAME_ENTRY *ent; if (flags & XN_FLAG_DN_REV) { ent = X509_NAME_get_entry(n, cnt - i - 1); } else { ent = X509_NAME_get_entry(n, i); } if (prev != -1) { if (prev == X509_NAME_ENTRY_set(ent)) { if (!maybe_write(out, sep_mv, sep_mv_len)) { return -1; } outlen += sep_mv_len; } else { if (!maybe_write(out, sep_dn, sep_dn_len)) { return -1; } outlen += sep_dn_len; if (!do_indent(out, indent)) { return -1; } outlen += indent; } } prev = X509_NAME_ENTRY_set(ent); const ASN1_OBJECT *fn = X509_NAME_ENTRY_get_object(ent); const ASN1_STRING *val = X509_NAME_ENTRY_get_data(ent); assert((flags & XN_FLAG_FN_MASK) == XN_FLAG_FN_SN); int fn_nid = OBJ_obj2nid(fn); if (fn_nid == NID_undef) { OBJ_obj2txt(objtmp, sizeof(objtmp), fn, 1); objbuf = objtmp; } else { objbuf = OBJ_nid2sn(fn_nid); } int objlen = strlen(objbuf); if (!maybe_write(out, objbuf, objlen) || !maybe_write(out, sep_eq, sep_eq_len)) { return -1; } outlen += objlen + sep_eq_len; // If the field name is unknown then fix up the DER dump flag. We // might want to limit this further so it will DER dump on anything // other than a few 'standard' fields. if ((fn_nid == NID_undef) && (flags & XN_FLAG_DUMP_UNKNOWN_FIELDS)) { orflags = ASN1_STRFLGS_DUMP_ALL; } else { orflags = 0; } len = ASN1_STRING_print_ex(out, val, flags | orflags); if (len < 0) { return -1; } outlen += len; } return outlen; } int X509_NAME_print_ex(BIO *out, const X509_NAME *nm, int indent, unsigned long flags) { if (flags == XN_FLAG_COMPAT) { return X509_NAME_print(out, nm, indent); } return do_name_ex(out, nm, indent, flags); } int X509_NAME_print_ex_fp(FILE *fp, const X509_NAME *nm, int indent, unsigned long flags) { BIO *bio = NULL; if (fp != NULL) { // If |fp| is NULL, this function returns the number of bytes without // writing. bio = BIO_new_fp(fp, BIO_NOCLOSE); if (bio == NULL) { return -1; } } int ret = X509_NAME_print_ex(bio, nm, indent, flags); BIO_free(bio); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/policy.cc ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include "../internal.h" #include "internal.h" // This file computes the X.509 policy tree, as described in RFC 5280, section // 6.1. It differs in that: // // (1) It does not track "qualifier_set". This is not needed as it is not // output by this implementation. // // (2) It builds a directed acyclic graph, rather than a tree. When a given // policy matches multiple parents, RFC 5280 makes a separate node for // each parent. This representation condenses them into one node with // multiple parents. Thus we refer to this structure as a "policy graph", // rather than a "policy tree". // // (3) "expected_policy_set" is not tracked explicitly and built temporarily // as part of building the graph. // // (4) anyPolicy nodes are not tracked explicitly. // // (5) Some pruning steps are deferred to when policies are evaluated, as a // reachability pass. // An X509_POLICY_NODE is a node in the policy graph. It corresponds to a node // from RFC 5280, section 6.1.2, step (a), but we store some fields differently. typedef struct x509_policy_node_st { // policy is the "valid_policy" field from RFC 5280. ASN1_OBJECT *policy; // parent_policies, if non-empty, is the list of "valid_policy" values for all // nodes which are a parent of this node. In this case, no entry in this list // will be anyPolicy. This list is in no particular order and may contain // duplicates if the corresponding certificate had duplicate mappings. // // If empty, this node has a single parent, anyPolicy. The node is then a root // policies, and is in authorities-constrained-policy-set if it has a path to // a leaf node. // // Note it is not possible for a policy to have both anyPolicy and a // concrete policy as a parent. Section 6.1.3, step (d.1.ii) only runs if // there was no match in step (d.1.i). We do not need to represent a parent // list of, say, {anyPolicy, OID1, OID2}. STACK_OF(ASN1_OBJECT) *parent_policies; // mapped is one if this node matches a policy mapping in the certificate and // zero otherwise. int mapped; // reachable is one if this node is reachable from some valid policy in the // end-entity certificate. It is computed during |has_explicit_policy|. int reachable; } X509_POLICY_NODE; DEFINE_STACK_OF(X509_POLICY_NODE) // An X509_POLICY_LEVEL is the collection of nodes at the same depth in the // policy graph. This structure can also be used to represent a level's // "expected_policy_set" values. See |process_policy_mappings|. typedef struct x509_policy_level_st { // nodes is the list of nodes at this depth, except for the anyPolicy node, if // any. This list is sorted by policy OID for efficient lookup. STACK_OF(X509_POLICY_NODE) *nodes; // has_any_policy is one if there is an anyPolicy node at this depth, and zero // otherwise. int has_any_policy; } X509_POLICY_LEVEL; DEFINE_STACK_OF(X509_POLICY_LEVEL) static int is_any_policy(const ASN1_OBJECT *obj) { return OBJ_obj2nid(obj) == NID_any_policy; } static void x509_policy_node_free(X509_POLICY_NODE *node) { if (node != NULL) { ASN1_OBJECT_free(node->policy); sk_ASN1_OBJECT_pop_free(node->parent_policies, ASN1_OBJECT_free); OPENSSL_free(node); } } static X509_POLICY_NODE *x509_policy_node_new(const ASN1_OBJECT *policy) { assert(!is_any_policy(policy)); X509_POLICY_NODE *node = reinterpret_cast( OPENSSL_zalloc(sizeof(X509_POLICY_NODE))); if (node == NULL) { return NULL; } node->policy = OBJ_dup(policy); node->parent_policies = sk_ASN1_OBJECT_new_null(); if (node->policy == NULL || node->parent_policies == NULL) { x509_policy_node_free(node); return NULL; } return node; } static int x509_policy_node_cmp(const X509_POLICY_NODE *const *a, const X509_POLICY_NODE *const *b) { return OBJ_cmp((*a)->policy, (*b)->policy); } static void x509_policy_level_free(X509_POLICY_LEVEL *level) { if (level != NULL) { sk_X509_POLICY_NODE_pop_free(level->nodes, x509_policy_node_free); OPENSSL_free(level); } } static X509_POLICY_LEVEL *x509_policy_level_new(void) { X509_POLICY_LEVEL *level = reinterpret_cast( OPENSSL_zalloc(sizeof(X509_POLICY_LEVEL))); if (level == NULL) { return NULL; } level->nodes = sk_X509_POLICY_NODE_new(x509_policy_node_cmp); if (level->nodes == NULL) { x509_policy_level_free(level); return NULL; } return level; } static int x509_policy_level_is_empty(const X509_POLICY_LEVEL *level) { return !level->has_any_policy && sk_X509_POLICY_NODE_num(level->nodes) == 0; } static void x509_policy_level_clear(X509_POLICY_LEVEL *level) { level->has_any_policy = 0; for (size_t i = 0; i < sk_X509_POLICY_NODE_num(level->nodes); i++) { x509_policy_node_free(sk_X509_POLICY_NODE_value(level->nodes, i)); } sk_X509_POLICY_NODE_zero(level->nodes); } // x509_policy_level_find returns the node in |level| corresponding to |policy|, // or NULL if none exists. static X509_POLICY_NODE *x509_policy_level_find(X509_POLICY_LEVEL *level, const ASN1_OBJECT *policy) { assert(sk_X509_POLICY_NODE_is_sorted(level->nodes)); X509_POLICY_NODE node; node.policy = (ASN1_OBJECT *)policy; size_t idx; if (!sk_X509_POLICY_NODE_find(level->nodes, &idx, &node)) { return NULL; } return sk_X509_POLICY_NODE_value(level->nodes, idx); } // x509_policy_level_add_nodes adds the nodes in |nodes| to |level|. It returns // one on success and zero on error. No policy in |nodes| may already be present // in |level|. This function modifies |nodes| to avoid making a copy, but the // caller is still responsible for releasing |nodes| itself. // // This function is used to add nodes to |level| in bulk, and avoid resorting // |level| after each addition. static int x509_policy_level_add_nodes(X509_POLICY_LEVEL *level, STACK_OF(X509_POLICY_NODE) *nodes) { for (size_t i = 0; i < sk_X509_POLICY_NODE_num(nodes); i++) { X509_POLICY_NODE *node = sk_X509_POLICY_NODE_value(nodes, i); if (!sk_X509_POLICY_NODE_push(level->nodes, node)) { return 0; } sk_X509_POLICY_NODE_set(nodes, i, NULL); } sk_X509_POLICY_NODE_sort(level->nodes); #if !defined(NDEBUG) // There should be no duplicate nodes. for (size_t i = 1; i < sk_X509_POLICY_NODE_num(level->nodes); i++) { assert(OBJ_cmp(sk_X509_POLICY_NODE_value(level->nodes, i - 1)->policy, sk_X509_POLICY_NODE_value(level->nodes, i)->policy) != 0); } #endif return 1; } static int policyinfo_cmp(const POLICYINFO *const *a, const POLICYINFO *const *b) { return OBJ_cmp((*a)->policyid, (*b)->policyid); } static int delete_if_not_in_policies(X509_POLICY_NODE *node, void *data) { const CERTIFICATEPOLICIES *policies = reinterpret_cast(data); assert(sk_POLICYINFO_is_sorted(policies)); POLICYINFO info; info.policyid = node->policy; if (sk_POLICYINFO_find(policies, NULL, &info)) { return 0; } x509_policy_node_free(node); return 1; } // process_certificate_policies updates |level| to incorporate |x509|'s // certificate policies extension. This implements steps (d) and (e) of RFC // 5280, section 6.1.3. |level| must contain the previous level's // "expected_policy_set" information. For all but the top-most level, this is // the output of |process_policy_mappings|. |any_policy_allowed| specifies // whether anyPolicy is allowed or inhibited, taking into account the exception // for self-issued certificates. static int process_certificate_policies(const X509 *x509, X509_POLICY_LEVEL *level, int any_policy_allowed) { int ret = 0; int critical; STACK_OF(X509_POLICY_NODE) *new_nodes = NULL; CERTIFICATEPOLICIES *policies = reinterpret_cast( X509_get_ext_d2i(x509, NID_certificate_policies, &critical, NULL)); { if (policies == NULL) { if (critical != -1) { return 0; // Syntax error in the extension. } // RFC 5280, section 6.1.3, step (e). x509_policy_level_clear(level); return 1; } // certificatePolicies may not be empty. See RFC 5280, section 4.2.1.4. // TODO(https://crbug.com/boringssl/443): Move this check into the parser. if (sk_POLICYINFO_num(policies) == 0) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_POLICY_EXTENSION); goto err; } sk_POLICYINFO_set_cmp_func(policies, policyinfo_cmp); sk_POLICYINFO_sort(policies); int cert_has_any_policy = 0; for (size_t i = 0; i < sk_POLICYINFO_num(policies); i++) { const POLICYINFO *policy = sk_POLICYINFO_value(policies, i); if (is_any_policy(policy->policyid)) { cert_has_any_policy = 1; } if (i > 0 && OBJ_cmp(sk_POLICYINFO_value(policies, i - 1)->policyid, policy->policyid) == 0) { // Per RFC 5280, section 4.2.1.4, |policies| may not have duplicates. OPENSSL_PUT_ERROR(X509, X509_R_INVALID_POLICY_EXTENSION); goto err; } } // This does the same thing as RFC 5280, section 6.1.3, step (d), though in // a slighty different order. |level| currently contains // "expected_policy_set" values of the previous level. See // |process_policy_mappings| for details. const int previous_level_has_any_policy = level->has_any_policy; // First, we handle steps (d.1.i) and (d.2). The net effect of these two // steps is to intersect |level| with |policies|, ignoring anyPolicy if it // is inhibited. if (!cert_has_any_policy || !any_policy_allowed) { sk_X509_POLICY_NODE_delete_if(level->nodes, delete_if_not_in_policies, policies); level->has_any_policy = 0; } // Step (d.1.ii) may attach new nodes to the previous level's anyPolicy // node. if (previous_level_has_any_policy) { new_nodes = sk_X509_POLICY_NODE_new_null(); if (new_nodes == NULL) { goto err; } for (size_t i = 0; i < sk_POLICYINFO_num(policies); i++) { const POLICYINFO *policy = sk_POLICYINFO_value(policies, i); // Though we've reordered the steps slightly, |policy| is in |level| if // and only if it would have been a match in step (d.1.ii). if (!is_any_policy(policy->policyid) && x509_policy_level_find(level, policy->policyid) == NULL) { X509_POLICY_NODE *node = x509_policy_node_new(policy->policyid); if (node == NULL || // !sk_X509_POLICY_NODE_push(new_nodes, node)) { x509_policy_node_free(node); goto err; } } } if (!x509_policy_level_add_nodes(level, new_nodes)) { goto err; } } ret = 1; } err: sk_X509_POLICY_NODE_pop_free(new_nodes, x509_policy_node_free); CERTIFICATEPOLICIES_free(policies); return ret; } static int compare_issuer_policy(const POLICY_MAPPING *const *a, const POLICY_MAPPING *const *b) { return OBJ_cmp((*a)->issuerDomainPolicy, (*b)->issuerDomainPolicy); } static int compare_subject_policy(const POLICY_MAPPING *const *a, const POLICY_MAPPING *const *b) { return OBJ_cmp((*a)->subjectDomainPolicy, (*b)->subjectDomainPolicy); } static int delete_if_mapped(X509_POLICY_NODE *node, void *data) { const POLICY_MAPPINGS *mappings = reinterpret_cast(data); // |mappings| must have been sorted by |compare_issuer_policy|. assert(sk_POLICY_MAPPING_is_sorted(mappings)); POLICY_MAPPING mapping; mapping.issuerDomainPolicy = node->policy; if (!sk_POLICY_MAPPING_find(mappings, /*out_index=*/NULL, &mapping)) { return 0; } x509_policy_node_free(node); return 1; } // process_policy_mappings processes the policy mappings extension of |cert|, // whose corresponding graph level is |level|. |mapping_allowed| specifies // whether policy mapping is inhibited at this point. On success, it returns an // |X509_POLICY_LEVEL| containing the "expected_policy_set" for |level|. On // error, it returns NULL. This implements steps (a) and (b) of RFC 5280, // section 6.1.4. // // We represent the "expected_policy_set" as an |X509_POLICY_LEVEL|. // |has_any_policy| indicates whether there is an anyPolicy node with // "expected_policy_set" of {anyPolicy}. If a node with policy oid P1 contains // P2 in its "expected_policy_set", the level will contain a node of policy P2 // with P1 in |parent_policies|. // // This is equivalent to the |X509_POLICY_LEVEL| that would result if the next // certificats contained anyPolicy. |process_certificate_policies| will filter // this result down to compute the actual level. static X509_POLICY_LEVEL *process_policy_mappings(const X509 *cert, X509_POLICY_LEVEL *level, int mapping_allowed) { int ok = 0; STACK_OF(X509_POLICY_NODE) *new_nodes = NULL; X509_POLICY_LEVEL *next = NULL; int critical; POLICY_MAPPINGS *mappings = reinterpret_cast( X509_get_ext_d2i(cert, NID_policy_mappings, &critical, NULL)); { if (mappings == NULL && critical != -1) { // Syntax error in the policy mappings extension. goto err; } if (mappings != NULL) { // PolicyMappings may not be empty. See RFC 5280, section 4.2.1.5. // TODO(https://crbug.com/boringssl/443): Move this check into the parser. if (sk_POLICY_MAPPING_num(mappings) == 0) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_POLICY_EXTENSION); goto err; } // RFC 5280, section 6.1.4, step (a). for (size_t i = 0; i < sk_POLICY_MAPPING_num(mappings); i++) { POLICY_MAPPING *mapping = sk_POLICY_MAPPING_value(mappings, i); if (is_any_policy(mapping->issuerDomainPolicy) || is_any_policy(mapping->subjectDomainPolicy)) { goto err; } } // Sort to group by issuerDomainPolicy. sk_POLICY_MAPPING_set_cmp_func(mappings, compare_issuer_policy); sk_POLICY_MAPPING_sort(mappings); if (mapping_allowed) { // Mark nodes as mapped, and add any nodes to |level| which may be // needed as part of RFC 5280, section 6.1.4, step (b.1). new_nodes = sk_X509_POLICY_NODE_new_null(); if (new_nodes == NULL) { goto err; } const ASN1_OBJECT *last_policy = NULL; for (size_t i = 0; i < sk_POLICY_MAPPING_num(mappings); i++) { const POLICY_MAPPING *mapping = sk_POLICY_MAPPING_value(mappings, i); // There may be multiple mappings with the same |issuerDomainPolicy|. if (last_policy != NULL && OBJ_cmp(mapping->issuerDomainPolicy, last_policy) == 0) { continue; } last_policy = mapping->issuerDomainPolicy; X509_POLICY_NODE *node = x509_policy_level_find(level, mapping->issuerDomainPolicy); if (node == NULL) { if (!level->has_any_policy) { continue; } node = x509_policy_node_new(mapping->issuerDomainPolicy); if (node == NULL || // !sk_X509_POLICY_NODE_push(new_nodes, node)) { x509_policy_node_free(node); goto err; } } node->mapped = 1; } if (!x509_policy_level_add_nodes(level, new_nodes)) { goto err; } } else { // RFC 5280, section 6.1.4, step (b.2). If mapping is inhibited, delete // all mapped nodes. sk_X509_POLICY_NODE_delete_if(level->nodes, delete_if_mapped, mappings); sk_POLICY_MAPPING_pop_free(mappings, POLICY_MAPPING_free); mappings = NULL; } } // If a node was not mapped, it retains the original "explicit_policy_set" // value, itself. Add those to |mappings|. if (mappings == NULL) { mappings = sk_POLICY_MAPPING_new_null(); if (mappings == NULL) { goto err; } } for (size_t i = 0; i < sk_X509_POLICY_NODE_num(level->nodes); i++) { X509_POLICY_NODE *node = sk_X509_POLICY_NODE_value(level->nodes, i); if (!node->mapped) { POLICY_MAPPING *mapping = POLICY_MAPPING_new(); if (mapping == NULL) { goto err; } mapping->issuerDomainPolicy = OBJ_dup(node->policy); mapping->subjectDomainPolicy = OBJ_dup(node->policy); if (mapping->issuerDomainPolicy == NULL || mapping->subjectDomainPolicy == NULL || !sk_POLICY_MAPPING_push(mappings, mapping)) { POLICY_MAPPING_free(mapping); goto err; } } } // Sort to group by subjectDomainPolicy. sk_POLICY_MAPPING_set_cmp_func(mappings, compare_subject_policy); sk_POLICY_MAPPING_sort(mappings); // Convert |mappings| to our "expected_policy_set" representation. next = x509_policy_level_new(); if (next == NULL) { goto err; } next->has_any_policy = level->has_any_policy; X509_POLICY_NODE *last_node = NULL; for (size_t i = 0; i < sk_POLICY_MAPPING_num(mappings); i++) { POLICY_MAPPING *mapping = sk_POLICY_MAPPING_value(mappings, i); // Skip mappings where |issuerDomainPolicy| does not appear in the graph. if (!level->has_any_policy && x509_policy_level_find(level, mapping->issuerDomainPolicy) == NULL) { continue; } if (last_node == NULL || OBJ_cmp(last_node->policy, mapping->subjectDomainPolicy) != 0) { last_node = x509_policy_node_new(mapping->subjectDomainPolicy); if (last_node == NULL || !sk_X509_POLICY_NODE_push(next->nodes, last_node)) { x509_policy_node_free(last_node); goto err; } } if (!sk_ASN1_OBJECT_push(last_node->parent_policies, mapping->issuerDomainPolicy)) { goto err; } mapping->issuerDomainPolicy = NULL; } sk_X509_POLICY_NODE_sort(next->nodes); ok = 1; } err: if (!ok) { x509_policy_level_free(next); next = NULL; } sk_POLICY_MAPPING_pop_free(mappings, POLICY_MAPPING_free); sk_X509_POLICY_NODE_pop_free(new_nodes, x509_policy_node_free); return next; } // apply_skip_certs, if |skip_certs| is non-NULL, sets |*value| to the minimum // of its current value and |skip_certs|. It returns one on success and zero if // |skip_certs| is negative. static int apply_skip_certs(const ASN1_INTEGER *skip_certs, size_t *value) { if (skip_certs == NULL) { return 1; } // TODO(https://crbug.com/boringssl/443): Move this check into the parser. if (skip_certs->type & V_ASN1_NEG) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_POLICY_EXTENSION); return 0; } // If |skip_certs| does not fit in |uint64_t|, it must exceed |*value|. uint64_t u64; if (ASN1_INTEGER_get_uint64(&u64, skip_certs) && u64 < *value) { *value = (size_t)u64; } ERR_clear_error(); return 1; } // process_policy_constraints updates |*explicit_policy|, |*policy_mapping|, and // |*inhibit_any_policy| according to |x509|'s policy constraints and inhibit // anyPolicy extensions. It returns one on success and zero on error. This // implements steps (i) and (j) of RFC 5280, section 6.1.4. static int process_policy_constraints(const X509 *x509, size_t *explicit_policy, size_t *policy_mapping, size_t *inhibit_any_policy) { int critical; POLICY_CONSTRAINTS *constraints = reinterpret_cast( X509_get_ext_d2i(x509, NID_policy_constraints, &critical, NULL)); if (constraints == NULL && critical != -1) { return 0; } if (constraints != NULL) { if (constraints->requireExplicitPolicy == NULL && constraints->inhibitPolicyMapping == NULL) { // Per RFC 5280, section 4.2.1.11, at least one of the fields must be // present. OPENSSL_PUT_ERROR(X509, X509_R_INVALID_POLICY_EXTENSION); POLICY_CONSTRAINTS_free(constraints); return 0; } int ok = apply_skip_certs(constraints->requireExplicitPolicy, explicit_policy) && apply_skip_certs(constraints->inhibitPolicyMapping, policy_mapping); POLICY_CONSTRAINTS_free(constraints); if (!ok) { return 0; } } ASN1_INTEGER *inhibit_any_policy_ext = reinterpret_cast( X509_get_ext_d2i(x509, NID_inhibit_any_policy, &critical, NULL)); if (inhibit_any_policy_ext == NULL && critical != -1) { return 0; } int ok = apply_skip_certs(inhibit_any_policy_ext, inhibit_any_policy); ASN1_INTEGER_free(inhibit_any_policy_ext); return ok; } // has_explicit_policy returns one if the set of authority-space policy OIDs // |levels| has some non-empty intersection with |user_policies|, and zero // otherwise. This mirrors the logic in RFC 5280, section 6.1.5, step (g). This // function modifies |levels| and should only be called at the end of policy // evaluation. static int has_explicit_policy(STACK_OF(X509_POLICY_LEVEL) *levels, const STACK_OF(ASN1_OBJECT) *user_policies) { assert(user_policies == NULL || sk_ASN1_OBJECT_is_sorted(user_policies)); // Step (g.i). If the policy graph is empty, the intersection is empty. size_t num_levels = sk_X509_POLICY_LEVEL_num(levels); X509_POLICY_LEVEL *level = sk_X509_POLICY_LEVEL_value(levels, num_levels - 1); if (x509_policy_level_is_empty(level)) { return 0; } // If |user_policies| is empty, we interpret it as having a single anyPolicy // value. The caller may also have supplied anyPolicy explicitly. int user_has_any_policy = sk_ASN1_OBJECT_num(user_policies) == 0; for (size_t i = 0; i < sk_ASN1_OBJECT_num(user_policies); i++) { if (is_any_policy(sk_ASN1_OBJECT_value(user_policies, i))) { user_has_any_policy = 1; break; } } // Step (g.ii). If the policy graph is not empty and the user set contains // anyPolicy, the intersection is the entire (non-empty) graph. if (user_has_any_policy) { return 1; } // Step (g.iii) does not delete anyPolicy nodes, so if the graph has // anyPolicy, some explicit policy will survive. The actual intersection may // synthesize some nodes in step (g.iii.3), but we do not return the policy // list itself, so we skip actually computing this. if (level->has_any_policy) { return 1; } // We defer pruning the tree, so as we look for nodes with parent anyPolicy, // step (g.iii.1), we must limit to nodes reachable from the bottommost level. // Start by marking each of those nodes as reachable. for (size_t i = 0; i < sk_X509_POLICY_NODE_num(level->nodes); i++) { sk_X509_POLICY_NODE_value(level->nodes, i)->reachable = 1; } for (size_t i = num_levels - 1; i < num_levels; i--) { level = sk_X509_POLICY_LEVEL_value(levels, i); for (size_t j = 0; j < sk_X509_POLICY_NODE_num(level->nodes); j++) { X509_POLICY_NODE *node = sk_X509_POLICY_NODE_value(level->nodes, j); if (!node->reachable) { continue; } if (sk_ASN1_OBJECT_num(node->parent_policies) == 0) { // |node|'s parent is anyPolicy and is part of "valid_policy_node_set". // If it exists in |user_policies|, the intersection is non-empty and we // can return immediately. if (sk_ASN1_OBJECT_find(user_policies, /*out_index=*/NULL, node->policy)) { return 1; } } else if (i > 0) { // |node|'s parents are concrete policies. Mark the parents reachable, // to be inspected by the next loop iteration. X509_POLICY_LEVEL *prev = sk_X509_POLICY_LEVEL_value(levels, i - 1); for (size_t k = 0; k < sk_ASN1_OBJECT_num(node->parent_policies); k++) { X509_POLICY_NODE *parent = x509_policy_level_find( prev, sk_ASN1_OBJECT_value(node->parent_policies, k)); if (parent != NULL) { parent->reachable = 1; } } } } } return 0; } static int asn1_object_cmp(const ASN1_OBJECT *const *a, const ASN1_OBJECT *const *b) { return OBJ_cmp(*a, *b); } int X509_policy_check(const STACK_OF(X509) *certs, const STACK_OF(ASN1_OBJECT) *user_policies, unsigned long flags, X509 **out_current_cert) { *out_current_cert = NULL; int ret = X509_V_ERR_OUT_OF_MEM; X509_POLICY_LEVEL *level = NULL; STACK_OF(X509_POLICY_LEVEL) *levels = NULL; STACK_OF(ASN1_OBJECT) *user_policies_sorted = NULL; size_t num_certs = sk_X509_num(certs); // Skip policy checking if the chain is just the trust anchor. if (num_certs <= 1) { return X509_V_OK; } // See RFC 5280, section 6.1.2, steps (d) through (f). size_t explicit_policy = (flags & X509_V_FLAG_EXPLICIT_POLICY) ? 0 : num_certs + 1; size_t inhibit_any_policy = (flags & X509_V_FLAG_INHIBIT_ANY) ? 0 : num_certs + 1; size_t policy_mapping = (flags & X509_V_FLAG_INHIBIT_MAP) ? 0 : num_certs + 1; levels = sk_X509_POLICY_LEVEL_new_null(); if (levels == NULL) { goto err; } for (size_t i = num_certs - 2; i < num_certs; i--) { X509 *cert = sk_X509_value(certs, i); if (!x509v3_cache_extensions(cert)) { goto err; } const int is_self_issued = (cert->ex_flags & EXFLAG_SI) != 0; if (level == NULL) { assert(i == num_certs - 2); level = x509_policy_level_new(); if (level == NULL) { goto err; } level->has_any_policy = 1; } // RFC 5280, section 6.1.3, steps (d) and (e). |any_policy_allowed| is // computed as in step (d.2). const int any_policy_allowed = inhibit_any_policy > 0 || (i > 0 && is_self_issued); if (!process_certificate_policies(cert, level, any_policy_allowed)) { ret = X509_V_ERR_INVALID_POLICY_EXTENSION; *out_current_cert = cert; goto err; } // RFC 5280, section 6.1.3, step (f). if (explicit_policy == 0 && x509_policy_level_is_empty(level)) { ret = X509_V_ERR_NO_EXPLICIT_POLICY; goto err; } // Insert into the list. if (!sk_X509_POLICY_LEVEL_push(levels, level)) { goto err; } X509_POLICY_LEVEL *current_level = level; level = NULL; // If this is not the leaf certificate, we go to section 6.1.4. If it // is the leaf certificate, we go to section 6.1.5 instead. if (i != 0) { // RFC 5280, section 6.1.4, steps (a) and (b). level = process_policy_mappings(cert, current_level, policy_mapping > 0); if (level == NULL) { ret = X509_V_ERR_INVALID_POLICY_EXTENSION; *out_current_cert = cert; goto err; } } // RFC 5280, section 6.1.4, step (h-j) for non-leaves, and section 6.1.5, // step (a-b) for leaves. In the leaf case, RFC 5280 says only to update // |explicit_policy|, but |policy_mapping| and |inhibit_any_policy| are no // longer read at this point, so we use the same process. if (i == 0 || !is_self_issued) { if (explicit_policy > 0) { explicit_policy--; } if (policy_mapping > 0) { policy_mapping--; } if (inhibit_any_policy > 0) { inhibit_any_policy--; } } if (!process_policy_constraints(cert, &explicit_policy, &policy_mapping, &inhibit_any_policy)) { ret = X509_V_ERR_INVALID_POLICY_EXTENSION; *out_current_cert = cert; goto err; } } // RFC 5280, section 6.1.5, step (g). We do not output the policy set, so it // is only necessary to check if the user-constrained-policy-set is not empty. if (explicit_policy == 0) { // Build a sorted copy of |user_policies| for more efficient lookup. if (user_policies != NULL) { user_policies_sorted = sk_ASN1_OBJECT_dup(user_policies); if (user_policies_sorted == NULL) { goto err; } sk_ASN1_OBJECT_set_cmp_func(user_policies_sorted, asn1_object_cmp); sk_ASN1_OBJECT_sort(user_policies_sorted); } if (!has_explicit_policy(levels, user_policies_sorted)) { ret = X509_V_ERR_NO_EXPLICIT_POLICY; goto err; } } ret = X509_V_OK; err: x509_policy_level_free(level); // |user_policies_sorted|'s contents are owned by |user_policies|, so we do // not use |sk_ASN1_OBJECT_pop_free|. sk_ASN1_OBJECT_free(user_policies_sorted); sk_X509_POLICY_LEVEL_pop_free(levels, x509_policy_level_free); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/rsa_pss.cc ================================================ /* * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "internal.h" static int rsa_pss_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { if (operation == ASN1_OP_FREE_PRE) { RSA_PSS_PARAMS *pss = (RSA_PSS_PARAMS *)*pval; X509_ALGOR_free(pss->maskHash); } return 1; } ASN1_SEQUENCE_cb(RSA_PSS_PARAMS, rsa_pss_cb) = { ASN1_EXP_OPT(RSA_PSS_PARAMS, hashAlgorithm, X509_ALGOR, 0), ASN1_EXP_OPT(RSA_PSS_PARAMS, maskGenAlgorithm, X509_ALGOR, 1), ASN1_EXP_OPT(RSA_PSS_PARAMS, saltLength, ASN1_INTEGER, 2), ASN1_EXP_OPT(RSA_PSS_PARAMS, trailerField, ASN1_INTEGER, 3), } ASN1_SEQUENCE_END_cb(RSA_PSS_PARAMS, RSA_PSS_PARAMS) IMPLEMENT_ASN1_FUNCTIONS_const(RSA_PSS_PARAMS) // Given an MGF1 Algorithm ID decode to an Algorithm Identifier static X509_ALGOR *rsa_mgf1_decode(const X509_ALGOR *alg) { if (OBJ_obj2nid(alg->algorithm) != NID_mgf1 || alg->parameter == NULL || alg->parameter->type != V_ASN1_SEQUENCE) { return NULL; } const uint8_t *p = alg->parameter->value.sequence->data; int plen = alg->parameter->value.sequence->length; return d2i_X509_ALGOR(NULL, &p, plen); } static RSA_PSS_PARAMS *rsa_pss_decode(const X509_ALGOR *alg) { if (alg->parameter == NULL || alg->parameter->type != V_ASN1_SEQUENCE) { return NULL; } const uint8_t *p = alg->parameter->value.sequence->data; int plen = alg->parameter->value.sequence->length; return d2i_RSA_PSS_PARAMS(NULL, &p, plen); } static int is_allowed_pss_md(const EVP_MD *md) { int md_type = EVP_MD_type(md); return md_type == NID_sha256 || md_type == NID_sha384 || md_type == NID_sha512; } // rsa_md_to_algor sets |*palg| to an |X509_ALGOR| describing the digest |md|, // which must be an allowed PSS digest. static int rsa_md_to_algor(X509_ALGOR **palg, const EVP_MD *md) { // SHA-1 should be omitted (DEFAULT), but we do not allow SHA-1. assert(is_allowed_pss_md(md)); *palg = X509_ALGOR_new(); if (*palg == NULL) { return 0; } if (!X509_ALGOR_set_md(*palg, md)) { X509_ALGOR_free(*palg); *palg = NULL; return 0; } return 1; } // rsa_md_to_mgf1 sets |*palg| to an |X509_ALGOR| describing MGF-1 with the // digest |mgf1md|, which must be an allowed PSS digest. static int rsa_md_to_mgf1(X509_ALGOR **palg, const EVP_MD *mgf1md) { // SHA-1 should be omitted (DEFAULT), but we do not allow SHA-1. assert(is_allowed_pss_md(mgf1md)); X509_ALGOR *algtmp = NULL; ASN1_STRING *stmp = NULL; // need to embed algorithm ID inside another if (!rsa_md_to_algor(&algtmp, mgf1md) || !ASN1_item_pack(algtmp, ASN1_ITEM_rptr(X509_ALGOR), &stmp)) { goto err; } *palg = X509_ALGOR_new(); if (!*palg) { goto err; } if (!X509_ALGOR_set0(*palg, OBJ_nid2obj(NID_mgf1), V_ASN1_SEQUENCE, stmp)) { goto err; } stmp = NULL; err: ASN1_STRING_free(stmp); X509_ALGOR_free(algtmp); if (*palg) { return 1; } return 0; } static const EVP_MD *rsa_algor_to_md(const X509_ALGOR *alg) { if (!alg) { // If omitted, PSS defaults to SHA-1, which we do not allow. OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return NULL; } const EVP_MD *md = EVP_get_digestbyobj(alg->algorithm); if (md == NULL || !is_allowed_pss_md(md)) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return NULL; } return md; } static const EVP_MD *rsa_mgf1_to_md(const X509_ALGOR *alg) { if (!alg) { // If omitted, PSS defaults to MGF-1 with SHA-1, which we do not allow. OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return NULL; } // Check mask and lookup mask hash algorithm. X509_ALGOR *maskHash = rsa_mgf1_decode(alg); if (maskHash == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return NULL; } const EVP_MD *ret = rsa_algor_to_md(maskHash); X509_ALGOR_free(maskHash); return ret; } int x509_rsa_ctx_to_pss(EVP_MD_CTX *ctx, X509_ALGOR *algor) { const EVP_MD *sigmd, *mgf1md; int saltlen; if (!EVP_PKEY_CTX_get_signature_md(ctx->pctx, &sigmd) || !EVP_PKEY_CTX_get_rsa_mgf1_md(ctx->pctx, &mgf1md) || !EVP_PKEY_CTX_get_rsa_pss_saltlen(ctx->pctx, &saltlen)) { return 0; } if (sigmd != mgf1md || !is_allowed_pss_md(sigmd)) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return 0; } int md_len = (int)EVP_MD_size(sigmd); if (saltlen == -1) { saltlen = md_len; } else if (saltlen != md_len) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); return 0; } int ret = 0; ASN1_STRING *os = NULL; RSA_PSS_PARAMS *pss = RSA_PSS_PARAMS_new(); if (!pss) { goto err; } // The DEFAULT value is 20, but this does not match any supported digest. assert(saltlen != 20); pss->saltLength = ASN1_INTEGER_new(); if (!pss->saltLength || // !ASN1_INTEGER_set_int64(pss->saltLength, saltlen)) { goto err; } if (!rsa_md_to_algor(&pss->hashAlgorithm, sigmd) || !rsa_md_to_mgf1(&pss->maskGenAlgorithm, mgf1md)) { goto err; } // Finally create string with pss parameter encoding. if (!ASN1_item_pack(pss, ASN1_ITEM_rptr(RSA_PSS_PARAMS), &os)) { goto err; } if (!X509_ALGOR_set0(algor, OBJ_nid2obj(NID_rsassaPss), V_ASN1_SEQUENCE, os)) { goto err; } os = NULL; ret = 1; err: RSA_PSS_PARAMS_free(pss); ASN1_STRING_free(os); return ret; } int x509_rsa_pss_to_ctx(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg, EVP_PKEY *pkey) { assert(OBJ_obj2nid(sigalg->algorithm) == NID_rsassaPss); // Decode PSS parameters int ret = 0; RSA_PSS_PARAMS *pss = rsa_pss_decode(sigalg); { if (pss == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); goto err; } const EVP_MD *mgf1md = rsa_mgf1_to_md(pss->maskGenAlgorithm); const EVP_MD *md = rsa_algor_to_md(pss->hashAlgorithm); if (mgf1md == NULL || md == NULL) { goto err; } // We require the MGF-1 and signing hashes to match. if (mgf1md != md) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); goto err; } // We require the salt length be the hash length. The DEFAULT value is 20, // but this does not match any supported salt length. uint64_t salt_len = 0; if (pss->saltLength == NULL || !ASN1_INTEGER_get_uint64(&salt_len, pss->saltLength) || salt_len != EVP_MD_size(md)) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); goto err; } assert(salt_len <= INT_MAX); // The trailer field must be 1 (0xbc). This value is DEFAULT, so the // structure is required to omit it in DER. Although a syntax error, we also // tolerate an explicitly-encoded value. See the certificates in // cl/362617931. if (pss->trailerField != NULL && ASN1_INTEGER_get(pss->trailerField) != 1) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PSS_PARAMETERS); goto err; } EVP_PKEY_CTX *pctx; if (!EVP_DigestVerifyInit(ctx, &pctx, md, NULL, pkey) || !EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) || !EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, (int)salt_len) || !EVP_PKEY_CTX_set_rsa_mgf1_md(pctx, mgf1md)) { goto err; } ret = 1; } err: RSA_PSS_PARAMS_free(pss); return ret; } int x509_print_rsa_pss_params(BIO *bp, const X509_ALGOR *sigalg, int indent, ASN1_PCTX *pctx) { assert(OBJ_obj2nid(sigalg->algorithm) == NID_rsassaPss); int rv = 0; X509_ALGOR *maskHash = NULL; RSA_PSS_PARAMS *pss = rsa_pss_decode(sigalg); if (!pss) { if (BIO_puts(bp, " (INVALID PSS PARAMETERS)\n") <= 0) { goto err; } rv = 1; goto err; } if (BIO_puts(bp, "\n") <= 0 || // !BIO_indent(bp, indent, 128) || // BIO_puts(bp, "Hash Algorithm: ") <= 0) { goto err; } if (pss->hashAlgorithm) { if (i2a_ASN1_OBJECT(bp, pss->hashAlgorithm->algorithm) <= 0) { goto err; } } else if (BIO_puts(bp, "sha1 (default)") <= 0) { goto err; } if (BIO_puts(bp, "\n") <= 0 || // !BIO_indent(bp, indent, 128) || // BIO_puts(bp, "Mask Algorithm: ") <= 0) { goto err; } if (pss->maskGenAlgorithm) { maskHash = rsa_mgf1_decode(pss->maskGenAlgorithm); if (maskHash == NULL) { if (BIO_puts(bp, "INVALID") <= 0) { goto err; } } else { if (i2a_ASN1_OBJECT(bp, pss->maskGenAlgorithm->algorithm) <= 0 || BIO_puts(bp, " with ") <= 0 || i2a_ASN1_OBJECT(bp, maskHash->algorithm) <= 0) { goto err; } } } else if (BIO_puts(bp, "mgf1 with sha1 (default)") <= 0) { goto err; } BIO_puts(bp, "\n"); if (!BIO_indent(bp, indent, 128) || // BIO_puts(bp, "Salt Length: 0x") <= 0) { goto err; } if (pss->saltLength) { if (i2a_ASN1_INTEGER(bp, pss->saltLength) <= 0) { goto err; } } else if (BIO_puts(bp, "14 (default)") <= 0) { goto err; } BIO_puts(bp, "\n"); if (!BIO_indent(bp, indent, 128) || // BIO_puts(bp, "Trailer Field: 0x") <= 0) { goto err; } if (pss->trailerField) { if (i2a_ASN1_INTEGER(bp, pss->trailerField) <= 0) { goto err; } } else if (BIO_puts(bp, "BC (default)") <= 0) { goto err; } BIO_puts(bp, "\n"); rv = 1; err: RSA_PSS_PARAMS_free(pss); X509_ALGOR_free(maskHash); return rv; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/t_crl.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include int X509_CRL_print_fp(FILE *fp, X509_CRL *x) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); return 0; } int ret = X509_CRL_print(b, x); BIO_free(b); return ret; } int X509_CRL_print(BIO *out, X509_CRL *x) { long version = X509_CRL_get_version(x); assert(X509_CRL_VERSION_1 <= version && version <= X509_CRL_VERSION_2); const X509_ALGOR *sig_alg; const ASN1_BIT_STRING *signature; X509_CRL_get0_signature(x, &signature, &sig_alg); if (BIO_printf(out, "Certificate Revocation List (CRL):\n") <= 0 || BIO_printf(out, "%8sVersion %ld (0x%lx)\n", "", version + 1, (unsigned long)version) <= 0 || // Note this and the other |X509_signature_print| call both print the // outer signature algorithm, rather than printing the inner and outer // ones separately. This matches OpenSSL, though it was probably a bug. !X509_signature_print(out, sig_alg, NULL)) { return 0; } char *issuer = X509_NAME_oneline(X509_CRL_get_issuer(x), NULL, 0); int ok = issuer != NULL && BIO_printf(out, "%8sIssuer: %s\n", "", issuer) > 0; OPENSSL_free(issuer); if (!ok) { return 0; } if (BIO_printf(out, "%8sLast Update: ", "") <= 0 || !ASN1_TIME_print(out, X509_CRL_get0_lastUpdate(x)) || BIO_printf(out, "\n%8sNext Update: ", "") <= 0) { return 0; } if (X509_CRL_get0_nextUpdate(x)) { if (!ASN1_TIME_print(out, X509_CRL_get0_nextUpdate(x))) { return 0; } } else { if (BIO_printf(out, "NONE") <= 0) { return 0; } } if (BIO_printf(out, "\n") <= 0 || !X509V3_extensions_print(out, "CRL extensions", X509_CRL_get0_extensions(x), 0, 8)) { return 0; } const STACK_OF(X509_REVOKED) *rev = X509_CRL_get_REVOKED(x); if (sk_X509_REVOKED_num(rev) > 0) { if (BIO_printf(out, "Revoked Certificates:\n") <= 0) { return 0; } } else { if (BIO_printf(out, "No Revoked Certificates.\n") <= 0) { return 0; } } for (size_t i = 0; i < sk_X509_REVOKED_num(rev); i++) { const X509_REVOKED *r = sk_X509_REVOKED_value(rev, i); if (BIO_printf(out, " Serial Number: ") <= 0 || i2a_ASN1_INTEGER(out, X509_REVOKED_get0_serialNumber(r)) <= 0 || BIO_printf(out, "\n Revocation Date: ") <= 0 || !ASN1_TIME_print(out, X509_REVOKED_get0_revocationDate(r)) || BIO_printf(out, "\n") <= 0 || !X509V3_extensions_print(out, "CRL entry extensions", X509_REVOKED_get0_extensions(r), 0, 8)) { } } return X509_signature_print(out, sig_alg, signature); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/t_req.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "internal.h" int X509_REQ_print_fp(FILE *fp, X509_REQ *x) { BIO *bio = BIO_new_fp(fp, BIO_NOCLOSE); if (bio == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); return 0; } int ret = X509_REQ_print(bio, x); BIO_free(bio); return ret; } int X509_REQ_print_ex(BIO *bio, X509_REQ *x, unsigned long nmflags, unsigned long cflag) { long l; STACK_OF(X509_ATTRIBUTE) *sk; char mlch = ' '; int nmindent = 0; if ((nmflags & XN_FLAG_SEP_MASK) == XN_FLAG_SEP_MULTILINE) { mlch = '\n'; nmindent = 12; } if (nmflags == X509_FLAG_COMPAT) { nmindent = 16; } X509_REQ_INFO *ri = x->req_info; if (!(cflag & X509_FLAG_NO_HEADER)) { if (BIO_write(bio, "Certificate Request:\n", 21) <= 0 || BIO_write(bio, " Data:\n", 10) <= 0) { goto err; } } if (!(cflag & X509_FLAG_NO_VERSION)) { l = X509_REQ_get_version(x); // Only zero, |X509_REQ_VERSION_1|, is valid but our parser accepts some // invalid values for compatibility. assert(0 <= l && l <= 2); if (BIO_printf(bio, "%8sVersion: %ld (0x%lx)\n", "", l + 1, (unsigned long)l) <= 0) { goto err; } } if (!(cflag & X509_FLAG_NO_SUBJECT)) { if (BIO_printf(bio, " Subject:%c", mlch) <= 0 || X509_NAME_print_ex(bio, ri->subject, nmindent, nmflags) < 0 || BIO_write(bio, "\n", 1) <= 0) { goto err; } } if (!(cflag & X509_FLAG_NO_PUBKEY)) { if (BIO_write(bio, " Subject Public Key Info:\n", 33) <= 0 || BIO_printf(bio, "%12sPublic Key Algorithm: ", "") <= 0 || i2a_ASN1_OBJECT(bio, ri->pubkey->algor->algorithm) <= 0 || BIO_puts(bio, "\n") <= 0) { goto err; } const EVP_PKEY *pkey = X509_REQ_get0_pubkey(x); if (pkey == NULL) { BIO_printf(bio, "%12sUnable to load Public Key\n", ""); ERR_print_errors(bio); } else { EVP_PKEY_print_public(bio, pkey, 16, NULL); } } if (!(cflag & X509_FLAG_NO_ATTRIBUTES)) { if (BIO_printf(bio, "%8sAttributes:\n", "") <= 0) { goto err; } sk = x->req_info->attributes; if (sk_X509_ATTRIBUTE_num(sk) == 0) { if (BIO_printf(bio, "%12sa0:00\n", "") <= 0) { goto err; } } else { size_t i; for (i = 0; i < sk_X509_ATTRIBUTE_num(sk); i++) { X509_ATTRIBUTE *a = sk_X509_ATTRIBUTE_value(sk, i); ASN1_OBJECT *aobj = X509_ATTRIBUTE_get0_object(a); if (X509_REQ_extension_nid(OBJ_obj2nid(aobj))) { continue; } if (BIO_printf(bio, "%12s", "") <= 0) { goto err; } const int num_attrs = X509_ATTRIBUTE_count(a); const int obj_str_len = i2a_ASN1_OBJECT(bio, aobj); if (obj_str_len <= 0) { if (BIO_puts(bio, "(Unable to print attribute ID.)\n") < 0) { goto err; } else { continue; } } int j; for (j = 0; j < num_attrs; j++) { const ASN1_TYPE *at = X509_ATTRIBUTE_get0_type(a, j); const int type = at->type; ASN1_BIT_STRING *bs = at->value.asn1_string; int k; for (k = 25 - obj_str_len; k > 0; k--) { if (BIO_write(bio, " ", 1) != 1) { goto err; } } if (BIO_puts(bio, ":") <= 0) { goto err; } if (type == V_ASN1_PRINTABLESTRING || type == V_ASN1_UTF8STRING || type == V_ASN1_IA5STRING || type == V_ASN1_T61STRING) { if (BIO_write(bio, (char *)bs->data, bs->length) != bs->length) { goto err; } BIO_puts(bio, "\n"); } else { BIO_puts(bio, "unable to print attribute\n"); } } } } } if (!(cflag & X509_FLAG_NO_EXTENSIONS)) { STACK_OF(X509_EXTENSION) *exts = X509_REQ_get_extensions(x); if (exts) { BIO_printf(bio, "%8sRequested Extensions:\n", ""); for (size_t i = 0; i < sk_X509_EXTENSION_num(exts); i++) { const X509_EXTENSION *ex = sk_X509_EXTENSION_value(exts, i); if (BIO_printf(bio, "%12s", "") <= 0) { goto err; } const ASN1_OBJECT *obj = X509_EXTENSION_get_object(ex); i2a_ASN1_OBJECT(bio, obj); const int is_critical = X509_EXTENSION_get_critical(ex); if (BIO_printf(bio, ": %s\n", is_critical ? "critical" : "") <= 0) { goto err; } if (!X509V3_EXT_print(bio, ex, cflag, 16)) { BIO_printf(bio, "%16s", ""); ASN1_STRING_print(bio, X509_EXTENSION_get_data(ex)); } if (BIO_write(bio, "\n", 1) <= 0) { goto err; } } sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free); } } if (!(cflag & X509_FLAG_NO_SIGDUMP) && !X509_signature_print(bio, x->sig_alg, x->signature)) { goto err; } return 1; err: OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); return 0; } int X509_REQ_print(BIO *bio, X509_REQ *req) { return X509_REQ_print_ex(bio, req, XN_FLAG_COMPAT, X509_FLAG_COMPAT); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/t_x509.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include // for PRIu64 and friends #include #include #include #include #include #include #include #include #include "internal.h" int X509_print_ex_fp(FILE *fp, X509 *x, unsigned long nmflag, unsigned long cflag) { BIO *b = BIO_new_fp(fp, BIO_NOCLOSE); if (b == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); return 0; } int ret = X509_print_ex(b, x, nmflag, cflag); BIO_free(b); return ret; } int X509_print_fp(FILE *fp, X509 *x) { return X509_print_ex_fp(fp, x, XN_FLAG_COMPAT, X509_FLAG_COMPAT); } int X509_print(BIO *bp, X509 *x) { return X509_print_ex(bp, x, XN_FLAG_COMPAT, X509_FLAG_COMPAT); } int X509_print_ex(BIO *bp, X509 *x, unsigned long nmflags, unsigned long cflag) { char mlch = ' '; int nmindent = 0; if ((nmflags & XN_FLAG_SEP_MASK) == XN_FLAG_SEP_MULTILINE) { mlch = '\n'; nmindent = 12; } if (nmflags == X509_FLAG_COMPAT) { nmindent = 16; } const X509_CINF *ci = x->cert_info; if (!(cflag & X509_FLAG_NO_HEADER)) { if (BIO_write(bp, "Certificate:\n", 13) <= 0) { return 0; } if (BIO_write(bp, " Data:\n", 10) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_VERSION)) { long l = X509_get_version(x); assert(X509_VERSION_1 <= l && l <= X509_VERSION_3); if (BIO_printf(bp, "%8sVersion: %ld (0x%lx)\n", "", l + 1, (unsigned long)l) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_SERIAL)) { if (BIO_write(bp, " Serial Number:", 22) <= 0) { return 0; } const ASN1_INTEGER *serial = X509_get0_serialNumber(x); uint64_t serial_u64; if (ASN1_INTEGER_get_uint64(&serial_u64, serial)) { assert(serial->type != V_ASN1_NEG_INTEGER); if (BIO_printf(bp, " %" PRIu64 " (0x%" PRIx64 ")\n", serial_u64, serial_u64) <= 0) { return 0; } } else { ERR_clear_error(); // Clear |ASN1_INTEGER_get_uint64|'s error. const char *neg = (serial->type == V_ASN1_NEG_INTEGER) ? " (Negative)" : ""; if (BIO_printf(bp, "\n%12s%s", "", neg) <= 0) { return 0; } for (int i = 0; i < serial->length; i++) { if (BIO_printf(bp, "%02x%c", serial->data[i], ((i + 1 == serial->length) ? '\n' : ':')) <= 0) { return 0; } } } } if (!(cflag & X509_FLAG_NO_SIGNAME)) { if (X509_signature_print(bp, ci->signature, NULL) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_ISSUER)) { if (BIO_printf(bp, " Issuer:%c", mlch) <= 0) { return 0; } if (X509_NAME_print_ex(bp, X509_get_issuer_name(x), nmindent, nmflags) < 0) { return 0; } if (BIO_write(bp, "\n", 1) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_VALIDITY)) { if (BIO_write(bp, " Validity\n", 17) <= 0) { return 0; } if (BIO_write(bp, " Not Before: ", 24) <= 0) { return 0; } if (!ASN1_TIME_print(bp, X509_get_notBefore(x))) { return 0; } if (BIO_write(bp, "\n Not After : ", 25) <= 0) { return 0; } if (!ASN1_TIME_print(bp, X509_get_notAfter(x))) { return 0; } if (BIO_write(bp, "\n", 1) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_SUBJECT)) { if (BIO_printf(bp, " Subject:%c", mlch) <= 0) { return 0; } if (X509_NAME_print_ex(bp, X509_get_subject_name(x), nmindent, nmflags) < 0) { return 0; } if (BIO_write(bp, "\n", 1) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_PUBKEY)) { if (BIO_write(bp, " Subject Public Key Info:\n", 33) <= 0) { return 0; } if (BIO_printf(bp, "%12sPublic Key Algorithm: ", "") <= 0) { return 0; } if (i2a_ASN1_OBJECT(bp, ci->key->algor->algorithm) <= 0) { return 0; } if (BIO_puts(bp, "\n") <= 0) { return 0; } const EVP_PKEY *pkey = X509_get0_pubkey(x); if (pkey == NULL) { BIO_printf(bp, "%12sUnable to load Public Key\n", ""); ERR_print_errors(bp); } else { EVP_PKEY_print_public(bp, pkey, 16, NULL); } } if (!(cflag & X509_FLAG_NO_IDS)) { if (ci->issuerUID) { if (BIO_printf(bp, "%8sIssuer Unique ID: ", "") <= 0) { return 0; } if (!X509_signature_dump(bp, ci->issuerUID, 12)) { return 0; } } if (ci->subjectUID) { if (BIO_printf(bp, "%8sSubject Unique ID: ", "") <= 0) { return 0; } if (!X509_signature_dump(bp, ci->subjectUID, 12)) { return 0; } } } if (!(cflag & X509_FLAG_NO_EXTENSIONS)) { X509V3_extensions_print(bp, "X509v3 extensions", ci->extensions, cflag, 8); } if (!(cflag & X509_FLAG_NO_SIGDUMP)) { if (X509_signature_print(bp, x->sig_alg, x->signature) <= 0) { return 0; } } if (!(cflag & X509_FLAG_NO_AUX)) { if (!X509_CERT_AUX_print(bp, x->aux, 0)) { return 0; } } return 1; } int X509_signature_print(BIO *bp, const X509_ALGOR *sigalg, const ASN1_STRING *sig) { if (BIO_puts(bp, " Signature Algorithm: ") <= 0) { return 0; } if (i2a_ASN1_OBJECT(bp, sigalg->algorithm) <= 0) { return 0; } // RSA-PSS signatures have parameters to print. int sig_nid = OBJ_obj2nid(sigalg->algorithm); if (sig_nid == NID_rsassaPss && !x509_print_rsa_pss_params(bp, sigalg, 9, 0)) { return 0; } if (sig) { return X509_signature_dump(bp, sig, 9); } else if (BIO_puts(bp, "\n") <= 0) { return 0; } return 1; } int X509_NAME_print(BIO *bp, const X509_NAME *name, int obase) { char *s, *c, *b; int ret = 0, i; b = X509_NAME_oneline(name, NULL, 0); if (!b) { return 0; } if (!*b) { OPENSSL_free(b); return 1; } s = b + 1; // skip the first slash c = s; for (;;) { if (((*s == '/') && ((s[1] >= 'A') && (s[1] <= 'Z') && ((s[2] == '=') || ((s[2] >= 'A') && (s[2] <= 'Z') && (s[3] == '='))))) || (*s == '\0')) { i = s - c; if (BIO_write(bp, c, i) != i) { goto err; } c = s + 1; // skip following slash if (*s != '\0') { if (BIO_write(bp, ", ", 2) != 2) { goto err; } } } if (*s == '\0') { break; } s++; } ret = 1; if (0) { err: OPENSSL_PUT_ERROR(X509, ERR_R_BUF_LIB); } OPENSSL_free(b); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/t_x509a.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" // X509_CERT_AUX and string set routines int X509_CERT_AUX_print(BIO *out, X509_CERT_AUX *aux, int indent) { char oidstr[80], first; size_t i; int j; if (!aux) { return 1; } if (aux->trust) { first = 1; BIO_printf(out, "%*sTrusted Uses:\n%*s", indent, "", indent + 2, ""); for (i = 0; i < sk_ASN1_OBJECT_num(aux->trust); i++) { if (!first) { BIO_puts(out, ", "); } else { first = 0; } OBJ_obj2txt(oidstr, sizeof oidstr, sk_ASN1_OBJECT_value(aux->trust, i), 0); BIO_puts(out, oidstr); } BIO_puts(out, "\n"); } else { BIO_printf(out, "%*sNo Trusted Uses.\n", indent, ""); } if (aux->reject) { first = 1; BIO_printf(out, "%*sRejected Uses:\n%*s", indent, "", indent + 2, ""); for (i = 0; i < sk_ASN1_OBJECT_num(aux->reject); i++) { if (!first) { BIO_puts(out, ", "); } else { first = 0; } OBJ_obj2txt(oidstr, sizeof oidstr, sk_ASN1_OBJECT_value(aux->reject, i), 0); BIO_puts(out, oidstr); } BIO_puts(out, "\n"); } else { BIO_printf(out, "%*sNo Rejected Uses.\n", indent, ""); } if (aux->alias) { BIO_printf(out, "%*sAlias: %.*s\n", indent, "", aux->alias->length, aux->alias->data); } if (aux->keyid) { BIO_printf(out, "%*sKey Id: ", indent, ""); for (j = 0; j < aux->keyid->length; j++) { BIO_printf(out, "%s%02X", j ? ":" : "", aux->keyid->data[j]); } BIO_write(out, "\n", 1); } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_akey.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_KEYID( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *extlist); static void *v2i_AUTHORITY_KEYID(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values); const X509V3_EXT_METHOD v3_akey_id = { NID_authority_key_identifier, X509V3_EXT_MULTILINE, ASN1_ITEM_ref(AUTHORITY_KEYID), 0, 0, 0, 0, 0, 0, i2v_AUTHORITY_KEYID, v2i_AUTHORITY_KEYID, 0, 0, NULL, }; static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_KEYID( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *extlist) { const AUTHORITY_KEYID *akeyid = reinterpret_cast(ext); int extlist_was_null = extlist == NULL; if (akeyid->keyid) { char *tmp = x509v3_bytes_to_hex(akeyid->keyid->data, akeyid->keyid->length); int ok = tmp != NULL && X509V3_add_value("keyid", tmp, &extlist); OPENSSL_free(tmp); if (!ok) { goto err; } } if (akeyid->issuer) { STACK_OF(CONF_VALUE) *tmpextlist = i2v_GENERAL_NAMES(NULL, akeyid->issuer, extlist); if (tmpextlist == NULL) { goto err; } extlist = tmpextlist; } if (akeyid->serial) { if (!X509V3_add_value_int("serial", akeyid->serial, &extlist)) { goto err; } } return extlist; err: if (extlist_was_null) { sk_CONF_VALUE_pop_free(extlist, X509V3_conf_free); } return NULL; } // Currently two options: keyid: use the issuers subject keyid, the value // 'always' means its is an error if the issuer certificate doesn't have a // key id. issuer: use the issuers cert issuer and serial number. The default // is to only use this if keyid is not present. With the option 'always' this // is always included. static void *v2i_AUTHORITY_KEYID(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values) { char keyid = 0, issuer = 0; int j; ASN1_OCTET_STRING *ikeyid = NULL; X509_NAME *isname = NULL; GENERAL_NAMES *gens = NULL; GENERAL_NAME *gen = NULL; ASN1_INTEGER *serial = NULL; const X509 *cert; AUTHORITY_KEYID *akeyid; for (size_t i = 0; i < sk_CONF_VALUE_num(values); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(values, i); if (!strcmp(cnf->name, "keyid")) { keyid = 1; if (cnf->value && !strcmp(cnf->value, "always")) { keyid = 2; } } else if (!strcmp(cnf->name, "issuer")) { issuer = 1; if (cnf->value && !strcmp(cnf->value, "always")) { issuer = 2; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNKNOWN_OPTION); ERR_add_error_data(2, "name=", cnf->name); return NULL; } } if (!ctx || !ctx->issuer_cert) { if (ctx && (ctx->flags == X509V3_CTX_TEST)) { return AUTHORITY_KEYID_new(); } OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_ISSUER_CERTIFICATE); return NULL; } cert = ctx->issuer_cert; if (keyid) { j = X509_get_ext_by_NID(cert, NID_subject_key_identifier, -1); const X509_EXTENSION *ext; if ((j >= 0) && (ext = X509_get_ext(cert, j))) { ikeyid = reinterpret_cast(X509V3_EXT_d2i(ext)); } if (keyid == 2 && !ikeyid) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNABLE_TO_GET_ISSUER_KEYID); return NULL; } } if ((issuer && !ikeyid) || (issuer == 2)) { isname = X509_NAME_dup(X509_get_issuer_name(cert)); serial = ASN1_INTEGER_dup(X509_get0_serialNumber(cert)); if (!isname || !serial) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS); goto err; } } if (!(akeyid = AUTHORITY_KEYID_new())) { goto err; } if (isname) { if (!(gens = sk_GENERAL_NAME_new_null()) || !(gen = GENERAL_NAME_new()) || !sk_GENERAL_NAME_push(gens, gen)) { goto err; } gen->type = GEN_DIRNAME; gen->d.dirn = isname; } akeyid->issuer = gens; akeyid->serial = serial; akeyid->keyid = ikeyid; return akeyid; err: X509_NAME_free(isname); ASN1_INTEGER_free(serial); ASN1_OCTET_STRING_free(ikeyid); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_akeya.cc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" ASN1_SEQUENCE(AUTHORITY_KEYID) = { ASN1_IMP_OPT(AUTHORITY_KEYID, keyid, ASN1_OCTET_STRING, 0), ASN1_IMP_SEQUENCE_OF_OPT(AUTHORITY_KEYID, issuer, GENERAL_NAME, 1), ASN1_IMP_OPT(AUTHORITY_KEYID, serial, ASN1_INTEGER, 2), } ASN1_SEQUENCE_END(AUTHORITY_KEYID) IMPLEMENT_ASN1_FUNCTIONS(AUTHORITY_KEYID) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_alt.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static void *v2i_subject_alt(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static void *v2i_issuer_alt(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static int copy_email(const X509V3_CTX *ctx, GENERAL_NAMES *gens, int move_p); static int copy_issuer(const X509V3_CTX *ctx, GENERAL_NAMES *gens); static int do_othername(GENERAL_NAME *gen, const char *value, const X509V3_CTX *ctx); static int do_dirname(GENERAL_NAME *gen, const char *value, const X509V3_CTX *ctx); static STACK_OF(CONF_VALUE) *i2v_GENERAL_NAMES_cb( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *ret) { return i2v_GENERAL_NAMES(method, reinterpret_cast(ext), ret); } const X509V3_EXT_METHOD v3_alt[] = { {NID_subject_alt_name, 0, ASN1_ITEM_ref(GENERAL_NAMES), 0, 0, 0, 0, 0, 0, i2v_GENERAL_NAMES_cb, v2i_subject_alt, NULL, NULL, NULL}, {NID_issuer_alt_name, 0, ASN1_ITEM_ref(GENERAL_NAMES), 0, 0, 0, 0, 0, 0, i2v_GENERAL_NAMES_cb, v2i_issuer_alt, NULL, NULL, NULL}, {NID_certificate_issuer, 0, ASN1_ITEM_ref(GENERAL_NAMES), 0, 0, 0, 0, 0, 0, i2v_GENERAL_NAMES_cb, NULL, NULL, NULL, NULL}, }; STACK_OF(CONF_VALUE) *i2v_GENERAL_NAMES(const X509V3_EXT_METHOD *method, const GENERAL_NAMES *gens, STACK_OF(CONF_VALUE) *ret) { int ret_was_null = ret == NULL; for (size_t i = 0; i < sk_GENERAL_NAME_num(gens); i++) { const GENERAL_NAME *gen = sk_GENERAL_NAME_value(gens, i); STACK_OF(CONF_VALUE) *tmp = i2v_GENERAL_NAME(method, gen, ret); if (tmp == NULL) { if (ret_was_null) { sk_CONF_VALUE_pop_free(ret, X509V3_conf_free); } return NULL; } ret = tmp; } if (!ret) { return sk_CONF_VALUE_new_null(); } return ret; } STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME(const X509V3_EXT_METHOD *method, const GENERAL_NAME *gen, STACK_OF(CONF_VALUE) *ret) { // Note the error-handling for this function relies on there being at most // one |X509V3_add_value| call. If there were two and the second failed, we // would need to sometimes free the first call's result. unsigned char *p; char oline[256], htmp[5]; int i; switch (gen->type) { case GEN_OTHERNAME: if (!X509V3_add_value("othername", "", &ret)) { return NULL; } break; case GEN_X400: if (!X509V3_add_value("X400Name", "", &ret)) { return NULL; } break; case GEN_EDIPARTY: if (!X509V3_add_value("EdiPartyName", "", &ret)) { return NULL; } break; case GEN_EMAIL: if (!x509V3_add_value_asn1_string("email", gen->d.ia5, &ret)) { return NULL; } break; case GEN_DNS: if (!x509V3_add_value_asn1_string("DNS", gen->d.ia5, &ret)) { return NULL; } break; case GEN_URI: if (!x509V3_add_value_asn1_string("URI", gen->d.ia5, &ret)) { return NULL; } break; case GEN_DIRNAME: if (X509_NAME_oneline(gen->d.dirn, oline, 256) == NULL || !X509V3_add_value("DirName", oline, &ret)) { return NULL; } break; case GEN_IPADD: p = gen->d.ip->data; if (gen->d.ip->length == 4) { snprintf(oline, sizeof(oline), "%d.%d.%d.%d", p[0], p[1], p[2], p[3]); } else if (gen->d.ip->length == 16) { oline[0] = 0; for (i = 0; i < 8; i++) { uint16_t v = ((uint16_t)p[0] << 8) | p[1]; snprintf(htmp, sizeof(htmp), "%X", v); p += 2; OPENSSL_strlcat(oline, htmp, sizeof(oline)); if (i != 7) { OPENSSL_strlcat(oline, ":", sizeof(oline)); } } } else { if (!X509V3_add_value("IP Address", "", &ret)) { return NULL; } break; } if (!X509V3_add_value("IP Address", oline, &ret)) { return NULL; } break; case GEN_RID: i2t_ASN1_OBJECT(oline, 256, gen->d.rid); if (!X509V3_add_value("Registered ID", oline, &ret)) { return NULL; } break; } return ret; } int GENERAL_NAME_print(BIO *out, const GENERAL_NAME *gen) { switch (gen->type) { case GEN_OTHERNAME: BIO_printf(out, "othername:"); break; case GEN_X400: BIO_printf(out, "X400Name:"); break; case GEN_EDIPARTY: // Maybe fix this: it is supported now BIO_printf(out, "EdiPartyName:"); break; case GEN_EMAIL: BIO_printf(out, "email:"); ASN1_STRING_print(out, gen->d.ia5); break; case GEN_DNS: BIO_printf(out, "DNS:"); ASN1_STRING_print(out, gen->d.ia5); break; case GEN_URI: BIO_printf(out, "URI:"); ASN1_STRING_print(out, gen->d.ia5); break; case GEN_DIRNAME: BIO_printf(out, "DirName: "); X509_NAME_print_ex(out, gen->d.dirn, 0, XN_FLAG_ONELINE); break; case GEN_IPADD: { const unsigned char *p = gen->d.ip->data; if (gen->d.ip->length == 4) { BIO_printf(out, "IP Address:%d.%d.%d.%d", p[0], p[1], p[2], p[3]); } else if (gen->d.ip->length == 16) { BIO_printf(out, "IP Address"); for (int i = 0; i < 8; i++) { uint16_t v = ((uint16_t)p[0] << 8) | p[1]; BIO_printf(out, ":%X", v); p += 2; } BIO_puts(out, "\n"); } else { BIO_printf(out, "IP Address:"); break; } break; } case GEN_RID: BIO_printf(out, "Registered ID"); i2a_ASN1_OBJECT(out, gen->d.rid); break; } return 1; } static void *v2i_issuer_alt(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { GENERAL_NAMES *gens = sk_GENERAL_NAME_new_null(); if (gens == NULL) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); if (x509v3_conf_name_matches(cnf->name, "issuer") && cnf->value && !strcmp(cnf->value, "copy")) { if (!copy_issuer(ctx, gens)) { goto err; } } else { GENERAL_NAME *gen = v2i_GENERAL_NAME(method, ctx, cnf); if (gen == NULL || !sk_GENERAL_NAME_push(gens, gen)) { GENERAL_NAME_free(gen); goto err; } } } return gens; err: sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); return NULL; } // Append subject altname of issuer to issuer alt name of subject static int copy_issuer(const X509V3_CTX *ctx, GENERAL_NAMES *gens) { if (ctx && (ctx->flags == X509V3_CTX_TEST)) { return 1; } if (!ctx || !ctx->issuer_cert) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_ISSUER_DETAILS); return 0; } int i = X509_get_ext_by_NID(ctx->issuer_cert, NID_subject_alt_name, -1); if (i < 0) { return 1; } int ret = 0; GENERAL_NAMES *ialt = NULL; X509_EXTENSION *ext; if (!(ext = X509_get_ext(ctx->issuer_cert, i)) || !(ialt = reinterpret_cast(X509V3_EXT_d2i(ext)))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_ISSUER_DECODE_ERROR); goto err; } for (size_t j = 0; j < sk_GENERAL_NAME_num(ialt); j++) { GENERAL_NAME *gen = sk_GENERAL_NAME_value(ialt, j); if (!sk_GENERAL_NAME_push(gens, gen)) { goto err; } // Ownership of |gen| has moved from |ialt| to |gens|. sk_GENERAL_NAME_set(ialt, j, NULL); } ret = 1; err: GENERAL_NAMES_free(ialt); return ret; } static void *v2i_subject_alt(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { GENERAL_NAMES *gens = sk_GENERAL_NAME_new_null(); if (gens == NULL) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); if (x509v3_conf_name_matches(cnf->name, "email") && cnf->value && !strcmp(cnf->value, "copy")) { if (!copy_email(ctx, gens, 0)) { goto err; } } else if (x509v3_conf_name_matches(cnf->name, "email") && cnf->value && !strcmp(cnf->value, "move")) { if (!copy_email(ctx, gens, 1)) { goto err; } } else { GENERAL_NAME *gen = v2i_GENERAL_NAME(method, ctx, cnf); if (gen == NULL || !sk_GENERAL_NAME_push(gens, gen)) { GENERAL_NAME_free(gen); goto err; } } } return gens; err: sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); return NULL; } // Copy any email addresses in a certificate or request to GENERAL_NAMES static int copy_email(const X509V3_CTX *ctx, GENERAL_NAMES *gens, int move_p) { X509_NAME *nm; ASN1_IA5STRING *email = NULL; X509_NAME_ENTRY *ne; GENERAL_NAME *gen = NULL; int i; if (ctx != NULL && ctx->flags == X509V3_CTX_TEST) { return 1; } if (!ctx || (!ctx->subject_cert && !ctx->subject_req)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_SUBJECT_DETAILS); goto err; } // Find the subject name if (ctx->subject_cert) { nm = X509_get_subject_name(ctx->subject_cert); } else { nm = X509_REQ_get_subject_name(ctx->subject_req); } // Now add any email address(es) to STACK i = -1; while ((i = X509_NAME_get_index_by_NID(nm, NID_pkcs9_emailAddress, i)) >= 0) { ne = X509_NAME_get_entry(nm, i); email = ASN1_STRING_dup(X509_NAME_ENTRY_get_data(ne)); if (move_p) { X509_NAME_delete_entry(nm, i); X509_NAME_ENTRY_free(ne); i--; } if (!email || !(gen = GENERAL_NAME_new())) { goto err; } gen->d.ia5 = email; email = NULL; gen->type = GEN_EMAIL; if (!sk_GENERAL_NAME_push(gens, gen)) { goto err; } gen = NULL; } return 1; err: GENERAL_NAME_free(gen); ASN1_IA5STRING_free(email); return 0; } GENERAL_NAMES *v2i_GENERAL_NAMES(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { GENERAL_NAMES *gens = sk_GENERAL_NAME_new_null(); if (gens == NULL) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); GENERAL_NAME *gen = v2i_GENERAL_NAME(method, ctx, cnf); if (gen == NULL || !sk_GENERAL_NAME_push(gens, gen)) { GENERAL_NAME_free(gen); goto err; } } return gens; err: sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); return NULL; } GENERAL_NAME *v2i_GENERAL_NAME(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const CONF_VALUE *cnf) { return v2i_GENERAL_NAME_ex(NULL, method, ctx, cnf, 0); } static GENERAL_NAME *a2i_GENERAL_NAME(GENERAL_NAME *out, const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, int gen_type, const char *value, int is_nc) { if (!value) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_MISSING_VALUE); return NULL; } GENERAL_NAME *gen = NULL; if (out) { gen = out; } else { gen = GENERAL_NAME_new(); if (gen == NULL) { return NULL; } } switch (gen_type) { case GEN_URI: case GEN_EMAIL: case GEN_DNS: { ASN1_IA5STRING *str = ASN1_IA5STRING_new(); if (str == NULL || !ASN1_STRING_set(str, value, strlen(value))) { ASN1_STRING_free(str); goto err; } gen->type = gen_type; gen->d.ia5 = str; break; } case GEN_RID: { ASN1_OBJECT *obj; if (!(obj = OBJ_txt2obj(value, 0))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_BAD_OBJECT); ERR_add_error_data(2, "value=", value); goto err; } gen->type = GEN_RID; gen->d.rid = obj; break; } case GEN_IPADD: gen->type = GEN_IPADD; if (is_nc) { gen->d.ip = a2i_IPADDRESS_NC(value); } else { gen->d.ip = a2i_IPADDRESS(value); } if (gen->d.ip == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_BAD_IP_ADDRESS); ERR_add_error_data(2, "value=", value); goto err; } break; case GEN_DIRNAME: if (!do_dirname(gen, value, ctx)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_DIRNAME_ERROR); goto err; } break; case GEN_OTHERNAME: if (!do_othername(gen, value, ctx)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_OTHERNAME_ERROR); goto err; } break; default: OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNSUPPORTED_TYPE); goto err; } return gen; err: if (!out) { GENERAL_NAME_free(gen); } return NULL; } GENERAL_NAME *v2i_GENERAL_NAME_ex(GENERAL_NAME *out, const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const CONF_VALUE *cnf, int is_nc) { const char *name = cnf->name; const char *value = cnf->value; if (!value) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_MISSING_VALUE); return NULL; } int type; if (x509v3_conf_name_matches(name, "email")) { type = GEN_EMAIL; } else if (x509v3_conf_name_matches(name, "URI")) { type = GEN_URI; } else if (x509v3_conf_name_matches(name, "DNS")) { type = GEN_DNS; } else if (x509v3_conf_name_matches(name, "RID")) { type = GEN_RID; } else if (x509v3_conf_name_matches(name, "IP")) { type = GEN_IPADD; } else if (x509v3_conf_name_matches(name, "dirName")) { type = GEN_DIRNAME; } else if (x509v3_conf_name_matches(name, "otherName")) { type = GEN_OTHERNAME; } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNSUPPORTED_OPTION); ERR_add_error_data(2, "name=", name); return NULL; } return a2i_GENERAL_NAME(out, method, ctx, type, value, is_nc); } static int do_othername(GENERAL_NAME *gen, const char *value, const X509V3_CTX *ctx) { const char *semicolon = strchr(value, ';'); if (semicolon == NULL) { return 0; } OTHERNAME *name = OTHERNAME_new(); if (name == NULL) { return 0; } char *objtmp = OPENSSL_strndup(value, semicolon - value); if (objtmp == NULL) { goto err; } ASN1_OBJECT_free(name->type_id); name->type_id = OBJ_txt2obj(objtmp, /*dont_search_names=*/0); OPENSSL_free(objtmp); if (name->type_id == NULL) { goto err; } ASN1_TYPE_free(name->value); name->value = ASN1_generate_v3(semicolon + 1, ctx); if (name->value == NULL) { goto err; } gen->type = GEN_OTHERNAME; gen->d.otherName = name; return 1; err: OTHERNAME_free(name); return 0; } static int do_dirname(GENERAL_NAME *gen, const char *value, const X509V3_CTX *ctx) { int ret = 0; const STACK_OF(CONF_VALUE) *sk = X509V3_get_section(ctx, value); X509_NAME *nm = X509_NAME_new(); if (nm == NULL) { goto err; } if (sk == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_SECTION_NOT_FOUND); ERR_add_error_data(2, "section=", value); goto err; } // FIXME: should allow other character types... if (!X509V3_NAME_from_section(nm, sk, MBSTRING_ASC)) { goto err; } gen->type = GEN_DIRNAME; gen->d.dirn = nm; ret = 1; err: if (!ret) { X509_NAME_free(nm); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_bcons.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static STACK_OF(CONF_VALUE) *i2v_BASIC_CONSTRAINTS( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *extlist); static void *v2i_BASIC_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values); const X509V3_EXT_METHOD v3_bcons = { NID_basic_constraints, 0, ASN1_ITEM_ref(BASIC_CONSTRAINTS), 0, 0, 0, 0, 0, 0, i2v_BASIC_CONSTRAINTS, v2i_BASIC_CONSTRAINTS, NULL, NULL, NULL, }; ASN1_SEQUENCE(BASIC_CONSTRAINTS) = { ASN1_OPT(BASIC_CONSTRAINTS, ca, ASN1_FBOOLEAN), ASN1_OPT(BASIC_CONSTRAINTS, pathlen, ASN1_INTEGER), } ASN1_SEQUENCE_END(BASIC_CONSTRAINTS) IMPLEMENT_ASN1_FUNCTIONS_const(BASIC_CONSTRAINTS) static STACK_OF(CONF_VALUE) *i2v_BASIC_CONSTRAINTS( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *extlist) { const BASIC_CONSTRAINTS *bcons = reinterpret_cast(ext); X509V3_add_value_bool("CA", bcons->ca, &extlist); X509V3_add_value_int("pathlen", bcons->pathlen, &extlist); return extlist; } static void *v2i_BASIC_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values) { BASIC_CONSTRAINTS *bcons = NULL; if (!(bcons = BASIC_CONSTRAINTS_new())) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(values); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(values, i); if (!strcmp(val->name, "CA")) { if (!X509V3_get_value_bool(val, &bcons->ca)) { goto err; } } else if (!strcmp(val->name, "pathlen")) { if (!X509V3_get_value_int(val, &bcons->pathlen)) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NAME); X509V3_conf_err(val); goto err; } } return bcons; err: BASIC_CONSTRAINTS_free(bcons); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_bitst.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static const BIT_STRING_BITNAME ns_cert_type_table[] = { {0, "SSL Client", "client"}, {1, "SSL Server", "server"}, {2, "S/MIME", "email"}, {3, "Object Signing", "objsign"}, {4, "Unused", "reserved"}, {5, "SSL CA", "sslCA"}, {6, "S/MIME CA", "emailCA"}, {7, "Object Signing CA", "objCA"}, {-1, NULL, NULL}}; static const BIT_STRING_BITNAME key_usage_type_table[] = { {0, "Digital Signature", "digitalSignature"}, {1, "Non Repudiation", "nonRepudiation"}, {2, "Key Encipherment", "keyEncipherment"}, {3, "Data Encipherment", "dataEncipherment"}, {4, "Key Agreement", "keyAgreement"}, {5, "Certificate Sign", "keyCertSign"}, {6, "CRL Sign", "cRLSign"}, {7, "Encipher Only", "encipherOnly"}, {8, "Decipher Only", "decipherOnly"}, {-1, NULL, NULL}}; static STACK_OF(CONF_VALUE) *i2v_ASN1_BIT_STRING( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *ret) { const ASN1_BIT_STRING *bits = reinterpret_cast(ext); const BIT_STRING_BITNAME *bnam; for (bnam = reinterpret_cast(method->usr_data); bnam->lname; bnam++) { if (ASN1_BIT_STRING_get_bit(bits, bnam->bitnum)) { X509V3_add_value(bnam->lname, NULL, &ret); } } return ret; } static void *v2i_ASN1_BIT_STRING(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { ASN1_BIT_STRING *bs; if (!(bs = ASN1_BIT_STRING_new())) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(nval, i); const BIT_STRING_BITNAME *bnam; for (bnam = reinterpret_cast(method->usr_data); bnam->lname; bnam++) { if (!strcmp(bnam->sname, val->name) || !strcmp(bnam->lname, val->name)) { if (!ASN1_BIT_STRING_set_bit(bs, bnam->bitnum, 1)) { ASN1_BIT_STRING_free(bs); return NULL; } break; } } if (!bnam->lname) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT); X509V3_conf_err(val); ASN1_BIT_STRING_free(bs); return NULL; } } return bs; } #define EXT_BITSTRING(nid, table) \ { \ nid, 0, ASN1_ITEM_ref(ASN1_BIT_STRING), 0, 0, 0, 0, 0, 0, \ i2v_ASN1_BIT_STRING, v2i_ASN1_BIT_STRING, NULL, NULL, (void *)(table) \ } const X509V3_EXT_METHOD v3_nscert = EXT_BITSTRING(NID_netscape_cert_type, ns_cert_type_table); const X509V3_EXT_METHOD v3_key_usage = EXT_BITSTRING(NID_key_usage, key_usage_type_table); ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_conf.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ // extension creation utilities #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static int v3_check_critical(const char **value); static int v3_check_generic(const char **value); static X509_EXTENSION *do_ext_nconf(const CONF *conf, const X509V3_CTX *ctx, int ext_nid, int crit, const char *value); static X509_EXTENSION *v3_generic_extension(const char *ext, const char *value, int crit, int type, const X509V3_CTX *ctx); static X509_EXTENSION *do_ext_i2d(const X509V3_EXT_METHOD *method, int ext_nid, int crit, void *ext_struc); static unsigned char *generic_asn1(const char *value, const X509V3_CTX *ctx, size_t *ext_len); X509_EXTENSION *X509V3_EXT_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *name, const char *value) { // If omitted, fill in an empty |X509V3_CTX|. X509V3_CTX ctx_tmp; if (ctx == NULL) { X509V3_set_ctx(&ctx_tmp, NULL, NULL, NULL, NULL, 0); X509V3_set_nconf(&ctx_tmp, conf); ctx = &ctx_tmp; } int crit = v3_check_critical(&value); int ext_type = v3_check_generic(&value); if (ext_type != 0) { return v3_generic_extension(name, value, crit, ext_type, ctx); } X509_EXTENSION *ret = do_ext_nconf(conf, ctx, OBJ_sn2nid(name), crit, value); if (!ret) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_ERROR_IN_EXTENSION); ERR_add_error_data(4, "name=", name, ", value=", value); } return ret; } X509_EXTENSION *X509V3_EXT_nconf_nid(const CONF *conf, const X509V3_CTX *ctx, int ext_nid, const char *value) { // If omitted, fill in an empty |X509V3_CTX|. X509V3_CTX ctx_tmp; if (ctx == NULL) { X509V3_set_ctx(&ctx_tmp, NULL, NULL, NULL, NULL, 0); X509V3_set_nconf(&ctx_tmp, conf); ctx = &ctx_tmp; } int crit = v3_check_critical(&value); int ext_type = v3_check_generic(&value); if (ext_type != 0) { return v3_generic_extension(OBJ_nid2sn(ext_nid), value, crit, ext_type, ctx); } return do_ext_nconf(conf, ctx, ext_nid, crit, value); } // CONF *conf: Config file // char *value: Value static X509_EXTENSION *do_ext_nconf(const CONF *conf, const X509V3_CTX *ctx, int ext_nid, int crit, const char *value) { const X509V3_EXT_METHOD *method; X509_EXTENSION *ext; const STACK_OF(CONF_VALUE) *nval; STACK_OF(CONF_VALUE) *nval_owned = NULL; void *ext_struc; if (ext_nid == NID_undef) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNKNOWN_EXTENSION_NAME); return NULL; } if (!(method = X509V3_EXT_get_nid(ext_nid))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNKNOWN_EXTENSION); return NULL; } // Now get internal extension representation based on type if (method->v2i) { if (*value == '@') { // TODO(davidben): This is the only place where |X509V3_EXT_nconf|'s // |conf| parameter is used. All other codepaths use the copy inside // |ctx|. Should this be switched and then the parameter ignored? if (conf == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_CONFIG_DATABASE); return NULL; } nval = NCONF_get_section(conf, value + 1); } else { nval_owned = X509V3_parse_list(value); nval = nval_owned; } if (nval == NULL || sk_CONF_VALUE_num(nval) <= 0) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_EXTENSION_STRING); ERR_add_error_data(4, "name=", OBJ_nid2sn(ext_nid), ",section=", value); sk_CONF_VALUE_pop_free(nval_owned, X509V3_conf_free); return NULL; } ext_struc = method->v2i(method, ctx, nval); sk_CONF_VALUE_pop_free(nval_owned, X509V3_conf_free); if (!ext_struc) { return NULL; } } else if (method->s2i) { if (!(ext_struc = method->s2i(method, ctx, value))) { return NULL; } } else if (method->r2i) { // TODO(davidben): Should this check be removed? This matches OpenSSL, but // r2i-based extensions do not necessarily require a config database. The // two built-in extensions only use it some of the time, and already handle // |X509V3_get_section| returning NULL. if (!ctx->db) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_CONFIG_DATABASE); return NULL; } if (!(ext_struc = method->r2i(method, ctx, value))) { return NULL; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED); ERR_add_error_data(2, "name=", OBJ_nid2sn(ext_nid)); return NULL; } ext = do_ext_i2d(method, ext_nid, crit, ext_struc); ASN1_item_free(reinterpret_cast(ext_struc), ASN1_ITEM_ptr(method->it)); return ext; } static X509_EXTENSION *do_ext_i2d(const X509V3_EXT_METHOD *method, int ext_nid, int crit, void *ext_struc) { // Convert the extension's internal representation to DER. unsigned char *ext_der = NULL; int ext_len = ASN1_item_i2d(reinterpret_cast(ext_struc), &ext_der, ASN1_ITEM_ptr(method->it)); if (ext_len < 0) { return NULL; } ASN1_OCTET_STRING *ext_oct = ASN1_OCTET_STRING_new(); if (ext_oct == NULL) { OPENSSL_free(ext_der); return NULL; } ASN1_STRING_set0(ext_oct, ext_der, ext_len); X509_EXTENSION *ext = X509_EXTENSION_create_by_NID(NULL, ext_nid, crit, ext_oct); ASN1_OCTET_STRING_free(ext_oct); return ext; } // Given an internal structure, nid and critical flag create an extension X509_EXTENSION *X509V3_EXT_i2d(int ext_nid, int crit, void *ext_struc) { const X509V3_EXT_METHOD *method; if (!(method = X509V3_EXT_get_nid(ext_nid))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_UNKNOWN_EXTENSION); return NULL; } return do_ext_i2d(method, ext_nid, crit, ext_struc); } // Check the extension string for critical flag static int v3_check_critical(const char **value) { const char *p = *value; if ((strlen(p) < 9) || strncmp(p, "critical,", 9)) { return 0; } p += 9; while (OPENSSL_isspace((unsigned char)*p)) { p++; } *value = p; return 1; } // Check extension string for generic extension and return the type static int v3_check_generic(const char **value) { int gen_type = 0; const char *p = *value; if ((strlen(p) >= 4) && !strncmp(p, "DER:", 4)) { p += 4; gen_type = 1; } else if ((strlen(p) >= 5) && !strncmp(p, "ASN1:", 5)) { p += 5; gen_type = 2; } else { return 0; } while (OPENSSL_isspace((unsigned char)*p)) { p++; } *value = p; return gen_type; } // Create a generic extension: for now just handle DER type static X509_EXTENSION *v3_generic_extension(const char *ext, const char *value, int crit, int gen_type, const X509V3_CTX *ctx) { unsigned char *ext_der = NULL; size_t ext_len = 0; ASN1_OBJECT *obj = NULL; ASN1_OCTET_STRING *oct = NULL; X509_EXTENSION *extension = NULL; if (!(obj = OBJ_txt2obj(ext, 0))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_EXTENSION_NAME_ERROR); ERR_add_error_data(2, "name=", ext); goto err; } if (gen_type == 1) { ext_der = x509v3_hex_to_bytes(value, &ext_len); } else if (gen_type == 2) { ext_der = generic_asn1(value, ctx, &ext_len); } if (ext_der == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_EXTENSION_VALUE_ERROR); ERR_add_error_data(2, "value=", value); goto err; } if (ext_len > INT_MAX) { OPENSSL_PUT_ERROR(X509V3, ERR_R_OVERFLOW); goto err; } oct = ASN1_OCTET_STRING_new(); if (oct == NULL) { goto err; } ASN1_STRING_set0(oct, ext_der, (int)ext_len); ext_der = NULL; extension = X509_EXTENSION_create_by_OBJ(NULL, obj, crit, oct); err: ASN1_OBJECT_free(obj); ASN1_OCTET_STRING_free(oct); OPENSSL_free(ext_der); return extension; } static unsigned char *generic_asn1(const char *value, const X509V3_CTX *ctx, size_t *ext_len) { ASN1_TYPE *typ = ASN1_generate_v3(value, ctx); if (typ == NULL) { return NULL; } unsigned char *ext_der = NULL; int len = i2d_ASN1_TYPE(typ, &ext_der); ASN1_TYPE_free(typ); if (len < 0) { return NULL; } *ext_len = len; return ext_der; } // This is the main function: add a bunch of extensions based on a config // file section to an extension STACK. int X509V3_EXT_add_nconf_sk(const CONF *conf, const X509V3_CTX *ctx, const char *section, STACK_OF(X509_EXTENSION) **sk) { const STACK_OF(CONF_VALUE) *nval = NCONF_get_section(conf, section); if (nval == NULL) { return 0; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(nval, i); X509_EXTENSION *ext = X509V3_EXT_nconf(conf, ctx, val->name, val->value); int ok = ext != NULL && // (sk == NULL || X509v3_add_ext(sk, ext, -1) != NULL); X509_EXTENSION_free(ext); if (!ok) { return 0; } } return 1; } // Convenience functions to add extensions to a certificate, CRL and request int X509V3_EXT_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509 *cert) { STACK_OF(X509_EXTENSION) **sk = NULL; if (cert) { sk = &cert->cert_info->extensions; } return X509V3_EXT_add_nconf_sk(conf, ctx, section, sk); } // Same as above but for a CRL int X509V3_EXT_CRL_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509_CRL *crl) { STACK_OF(X509_EXTENSION) **sk = NULL; if (crl) { sk = &crl->crl->extensions; } return X509V3_EXT_add_nconf_sk(conf, ctx, section, sk); } // Add extensions to certificate request int X509V3_EXT_REQ_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509_REQ *req) { STACK_OF(X509_EXTENSION) *extlist = NULL, **sk = NULL; int i; if (req) { sk = &extlist; } i = X509V3_EXT_add_nconf_sk(conf, ctx, section, sk); if (!i || !sk) { return i; } i = X509_REQ_add_extensions(req, extlist); sk_X509_EXTENSION_pop_free(extlist, X509_EXTENSION_free); return i; } // Config database functions const STACK_OF(CONF_VALUE) *X509V3_get_section(const X509V3_CTX *ctx, const char *section) { if (ctx->db == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_OPERATION_NOT_DEFINED); return NULL; } return NCONF_get_section(ctx->db, section); } void X509V3_set_nconf(X509V3_CTX *ctx, const CONF *conf) { ctx->db = conf; } void X509V3_set_ctx(X509V3_CTX *ctx, const X509 *issuer, const X509 *subj, const X509_REQ *req, const X509_CRL *crl, int flags) { OPENSSL_memset(ctx, 0, sizeof(*ctx)); ctx->issuer_cert = issuer; ctx->subject_cert = subj; ctx->crl = crl; ctx->subject_req = req; ctx->flags = flags; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_cpols.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" // Certificate policies extension support: this one is a bit complex... static int i2r_certpol(const X509V3_EXT_METHOD *method, void *ext, BIO *out, int indent); static void *r2i_certpol(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *value); static void print_qualifiers(BIO *out, const STACK_OF(POLICYQUALINFO) *quals, int indent); static void print_notice(BIO *out, const USERNOTICE *notice, int indent); static POLICYINFO *policy_section(const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *polstrs, int ia5org); static POLICYQUALINFO *notice_section(const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *unot, int ia5org); static int nref_nos(STACK_OF(ASN1_INTEGER) *nnums, const STACK_OF(CONF_VALUE) *nos); const X509V3_EXT_METHOD v3_cpols = { NID_certificate_policies, 0, ASN1_ITEM_ref(CERTIFICATEPOLICIES), 0, 0, 0, 0, 0, 0, 0, 0, i2r_certpol, r2i_certpol, NULL, }; DECLARE_ASN1_ITEM(POLICYINFO) DECLARE_ASN1_ITEM(POLICYQUALINFO) DECLARE_ASN1_ITEM(USERNOTICE) DECLARE_ASN1_ITEM(NOTICEREF) ASN1_ITEM_TEMPLATE(CERTIFICATEPOLICIES) = ASN1_EX_TEMPLATE_TYPE( ASN1_TFLG_SEQUENCE_OF, 0, CERTIFICATEPOLICIES, POLICYINFO) ASN1_ITEM_TEMPLATE_END(CERTIFICATEPOLICIES) IMPLEMENT_ASN1_FUNCTIONS_const(CERTIFICATEPOLICIES) ASN1_SEQUENCE(POLICYINFO) = { ASN1_SIMPLE(POLICYINFO, policyid, ASN1_OBJECT), ASN1_SEQUENCE_OF_OPT(POLICYINFO, qualifiers, POLICYQUALINFO), } ASN1_SEQUENCE_END(POLICYINFO) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(POLICYINFO) ASN1_ADB_TEMPLATE(policydefault) = ASN1_SIMPLE(POLICYQUALINFO, d.other, ASN1_ANY); ASN1_ADB(POLICYQUALINFO) = { ADB_ENTRY(NID_id_qt_cps, ASN1_SIMPLE(POLICYQUALINFO, d.cpsuri, ASN1_IA5STRING)), ADB_ENTRY(NID_id_qt_unotice, ASN1_SIMPLE(POLICYQUALINFO, d.usernotice, USERNOTICE)), } ASN1_ADB_END(POLICYQUALINFO, 0, pqualid, 0, &policydefault_tt, NULL); ASN1_SEQUENCE(POLICYQUALINFO) = { ASN1_SIMPLE(POLICYQUALINFO, pqualid, ASN1_OBJECT), ASN1_ADB_OBJECT(POLICYQUALINFO), } ASN1_SEQUENCE_END(POLICYQUALINFO) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(POLICYQUALINFO) ASN1_SEQUENCE(USERNOTICE) = { ASN1_OPT(USERNOTICE, noticeref, NOTICEREF), ASN1_OPT(USERNOTICE, exptext, DISPLAYTEXT), } ASN1_SEQUENCE_END(USERNOTICE) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(USERNOTICE) ASN1_SEQUENCE(NOTICEREF) = { ASN1_SIMPLE(NOTICEREF, organization, DISPLAYTEXT), ASN1_SEQUENCE_OF(NOTICEREF, noticenos, ASN1_INTEGER), } ASN1_SEQUENCE_END(NOTICEREF) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(NOTICEREF) static void *r2i_certpol(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *value) { STACK_OF(POLICYINFO) *pols = sk_POLICYINFO_new_null(); if (pols == NULL) { return NULL; } STACK_OF(CONF_VALUE) *vals = X509V3_parse_list(value); { if (vals == NULL) { OPENSSL_PUT_ERROR(X509V3, ERR_R_X509V3_LIB); goto err; } int ia5org = 0; for (size_t i = 0; i < sk_CONF_VALUE_num(vals); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(vals, i); if (cnf->value || !cnf->name) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_POLICY_IDENTIFIER); X509V3_conf_err(cnf); goto err; } POLICYINFO *pol; const char *pstr = cnf->name; if (!strcmp(pstr, "ia5org")) { ia5org = 1; continue; } else if (*pstr == '@') { const STACK_OF(CONF_VALUE) *polsect = X509V3_get_section(ctx, pstr + 1); if (!polsect) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_SECTION); X509V3_conf_err(cnf); goto err; } pol = policy_section(ctx, polsect, ia5org); if (!pol) { goto err; } } else { ASN1_OBJECT *pobj = OBJ_txt2obj(cnf->name, 0); if (pobj == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER); X509V3_conf_err(cnf); goto err; } pol = POLICYINFO_new(); if (pol == NULL) { ASN1_OBJECT_free(pobj); goto err; } pol->policyid = pobj; } if (!sk_POLICYINFO_push(pols, pol)) { POLICYINFO_free(pol); goto err; } } sk_CONF_VALUE_pop_free(vals, X509V3_conf_free); return pols; } err: sk_CONF_VALUE_pop_free(vals, X509V3_conf_free); sk_POLICYINFO_pop_free(pols, POLICYINFO_free); return NULL; } static POLICYINFO *policy_section(const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *polstrs, int ia5org) { POLICYINFO *pol; POLICYQUALINFO *qual; if (!(pol = POLICYINFO_new())) { goto err; } for (size_t i = 0; i < sk_CONF_VALUE_num(polstrs); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(polstrs, i); if (!strcmp(cnf->name, "policyIdentifier")) { ASN1_OBJECT *pobj; if (!(pobj = OBJ_txt2obj(cnf->value, 0))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER); X509V3_conf_err(cnf); goto err; } pol->policyid = pobj; } else if (x509v3_conf_name_matches(cnf->name, "CPS")) { if (!pol->qualifiers) { pol->qualifiers = sk_POLICYQUALINFO_new_null(); } if (!(qual = POLICYQUALINFO_new())) { goto err; } if (!sk_POLICYQUALINFO_push(pol->qualifiers, qual)) { goto err; } qual->pqualid = OBJ_nid2obj(NID_id_qt_cps); if (qual->pqualid == NULL) { OPENSSL_PUT_ERROR(X509V3, ERR_R_INTERNAL_ERROR); goto err; } qual->d.cpsuri = ASN1_IA5STRING_new(); if (qual->d.cpsuri == NULL) { goto err; } if (!ASN1_STRING_set(qual->d.cpsuri, cnf->value, strlen(cnf->value))) { goto err; } } else if (x509v3_conf_name_matches(cnf->name, "userNotice")) { if (*cnf->value != '@') { OPENSSL_PUT_ERROR(X509V3, X509V3_R_EXPECTED_A_SECTION_NAME); X509V3_conf_err(cnf); goto err; } const STACK_OF(CONF_VALUE) *unot = X509V3_get_section(ctx, cnf->value + 1); if (!unot) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_SECTION); X509V3_conf_err(cnf); goto err; } qual = notice_section(ctx, unot, ia5org); if (!qual) { goto err; } if (!pol->qualifiers) { pol->qualifiers = sk_POLICYQUALINFO_new_null(); } if (!sk_POLICYQUALINFO_push(pol->qualifiers, qual)) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OPTION); X509V3_conf_err(cnf); goto err; } } if (!pol->policyid) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_POLICY_IDENTIFIER); goto err; } return pol; err: POLICYINFO_free(pol); return NULL; } static POLICYQUALINFO *notice_section(const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *unot, int ia5org) { USERNOTICE *notice; POLICYQUALINFO *qual; if (!(qual = POLICYQUALINFO_new())) { goto err; } qual->pqualid = OBJ_nid2obj(NID_id_qt_unotice); if (qual->pqualid == NULL) { OPENSSL_PUT_ERROR(X509V3, ERR_R_INTERNAL_ERROR); goto err; } if (!(notice = USERNOTICE_new())) { goto err; } qual->d.usernotice = notice; for (size_t i = 0; i < sk_CONF_VALUE_num(unot); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(unot, i); if (!strcmp(cnf->name, "explicitText")) { notice->exptext = ASN1_VISIBLESTRING_new(); if (notice->exptext == NULL) { goto err; } if (!ASN1_STRING_set(notice->exptext, cnf->value, strlen(cnf->value))) { goto err; } } else if (!strcmp(cnf->name, "organization")) { NOTICEREF *nref; if (!notice->noticeref) { if (!(nref = NOTICEREF_new())) { goto err; } notice->noticeref = nref; } else { nref = notice->noticeref; } if (ia5org) { nref->organization->type = V_ASN1_IA5STRING; } else { nref->organization->type = V_ASN1_VISIBLESTRING; } if (!ASN1_STRING_set(nref->organization, cnf->value, strlen(cnf->value))) { goto err; } } else if (!strcmp(cnf->name, "noticeNumbers")) { NOTICEREF *nref; STACK_OF(CONF_VALUE) *nos; if (!notice->noticeref) { if (!(nref = NOTICEREF_new())) { goto err; } notice->noticeref = nref; } else { nref = notice->noticeref; } nos = X509V3_parse_list(cnf->value); if (!nos || !sk_CONF_VALUE_num(nos)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NUMBERS); X509V3_conf_err(cnf); sk_CONF_VALUE_pop_free(nos, X509V3_conf_free); goto err; } int ret = nref_nos(nref->noticenos, nos); sk_CONF_VALUE_pop_free(nos, X509V3_conf_free); if (!ret) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OPTION); X509V3_conf_err(cnf); goto err; } } if (notice->noticeref && (!notice->noticeref->noticenos || !notice->noticeref->organization)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NEED_ORGANIZATION_AND_NUMBERS); goto err; } return qual; err: POLICYQUALINFO_free(qual); return NULL; } static int nref_nos(STACK_OF(ASN1_INTEGER) *nnums, const STACK_OF(CONF_VALUE) *nos) { for (size_t i = 0; i < sk_CONF_VALUE_num(nos); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nos, i); ASN1_INTEGER *aint = s2i_ASN1_INTEGER(NULL, cnf->name); if (aint == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NUMBER); return 0; } if (!sk_ASN1_INTEGER_push(nnums, aint)) { ASN1_INTEGER_free(aint); return 0; } } return 1; } static int i2r_certpol(const X509V3_EXT_METHOD *method, void *ext, BIO *out, int indent) { const STACK_OF(POLICYINFO) *pol = reinterpret_cast(ext); // First print out the policy OIDs for (size_t i = 0; i < sk_POLICYINFO_num(pol); i++) { const POLICYINFO *pinfo = sk_POLICYINFO_value(pol, i); BIO_printf(out, "%*sPolicy: ", indent, ""); i2a_ASN1_OBJECT(out, pinfo->policyid); BIO_puts(out, "\n"); if (pinfo->qualifiers) { print_qualifiers(out, pinfo->qualifiers, indent + 2); } } return 1; } static void print_qualifiers(BIO *out, const STACK_OF(POLICYQUALINFO) *quals, int indent) { for (size_t i = 0; i < sk_POLICYQUALINFO_num(quals); i++) { const POLICYQUALINFO *qualinfo = sk_POLICYQUALINFO_value(quals, i); switch (OBJ_obj2nid(qualinfo->pqualid)) { case NID_id_qt_cps: BIO_printf(out, "%*sCPS: %.*s\n", indent, "", qualinfo->d.cpsuri->length, qualinfo->d.cpsuri->data); break; case NID_id_qt_unotice: BIO_printf(out, "%*sUser Notice:\n", indent, ""); print_notice(out, qualinfo->d.usernotice, indent + 2); break; default: BIO_printf(out, "%*sUnknown Qualifier: ", indent + 2, ""); i2a_ASN1_OBJECT(out, qualinfo->pqualid); BIO_puts(out, "\n"); break; } } } static void print_notice(BIO *out, const USERNOTICE *notice, int indent) { if (notice->noticeref) { NOTICEREF *ref; ref = notice->noticeref; BIO_printf(out, "%*sOrganization: %.*s\n", indent, "", ref->organization->length, ref->organization->data); BIO_printf(out, "%*sNumber%s: ", indent, "", sk_ASN1_INTEGER_num(ref->noticenos) > 1 ? "s" : ""); for (size_t i = 0; i < sk_ASN1_INTEGER_num(ref->noticenos); i++) { ASN1_INTEGER *num; char *tmp; num = sk_ASN1_INTEGER_value(ref->noticenos, i); if (i) { BIO_puts(out, ", "); } if (num == NULL) { BIO_puts(out, "(null)"); } else { tmp = i2s_ASN1_INTEGER(NULL, num); if (tmp == NULL) { return; } BIO_puts(out, tmp); OPENSSL_free(tmp); } } BIO_puts(out, "\n"); } if (notice->exptext) { BIO_printf(out, "%*sExplicit Text: %.*s\n", indent, "", notice->exptext->length, notice->exptext->data); } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_crld.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static void *v2i_crld(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static int i2r_crldp(const X509V3_EXT_METHOD *method, void *pcrldp, BIO *out, int indent); const X509V3_EXT_METHOD v3_crld = { NID_crl_distribution_points, 0, ASN1_ITEM_ref(CRL_DIST_POINTS), 0, 0, 0, 0, 0, 0, 0, v2i_crld, i2r_crldp, 0, NULL, }; const X509V3_EXT_METHOD v3_freshest_crl = { NID_freshest_crl, 0, ASN1_ITEM_ref(CRL_DIST_POINTS), 0, 0, 0, 0, 0, 0, 0, v2i_crld, i2r_crldp, 0, NULL, }; static STACK_OF(GENERAL_NAME) *gnames_from_sectname(const X509V3_CTX *ctx, char *sect) { const STACK_OF(CONF_VALUE) *gnsect; STACK_OF(CONF_VALUE) *gnsect_owned = NULL; if (*sect == '@') { gnsect = X509V3_get_section(ctx, sect + 1); } else { gnsect_owned = X509V3_parse_list(sect); gnsect = gnsect_owned; } if (!gnsect) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_SECTION_NOT_FOUND); return NULL; } STACK_OF(GENERAL_NAME) *gens = v2i_GENERAL_NAMES(NULL, ctx, gnsect); sk_CONF_VALUE_pop_free(gnsect_owned, X509V3_conf_free); return gens; } // set_dist_point_name decodes a DistributionPointName from |cnf| and writes the // result in |*pdp|. It returns 1 on success, -1 on error, and 0 if |cnf| used // an unrecognized input type. The zero return can be used by callers to support // additional syntax. static int set_dist_point_name(DIST_POINT_NAME **pdp, const X509V3_CTX *ctx, const CONF_VALUE *cnf) { STACK_OF(GENERAL_NAME) *fnm = NULL; STACK_OF(X509_NAME_ENTRY) *rnm = NULL; if (!strncmp(cnf->name, "fullname", 9)) { // If |cnf| comes from |X509V3_parse_list|, which is possible for a v2i // function, |cnf->value| may be NULL. if (cnf->value == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_MISSING_VALUE); return -1; } fnm = gnames_from_sectname(ctx, cnf->value); if (!fnm) { goto err; } } else if (!strcmp(cnf->name, "relativename")) { // If |cnf| comes from |X509V3_parse_list|, which is possible for a v2i // function, |cnf->value| may be NULL. if (cnf->value == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_MISSING_VALUE); return -1; } const STACK_OF(CONF_VALUE) *dnsect = X509V3_get_section(ctx, cnf->value); if (!dnsect) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_SECTION_NOT_FOUND); return -1; } X509_NAME *nm = X509_NAME_new(); if (!nm) { return -1; } int ret = X509V3_NAME_from_section(nm, dnsect, MBSTRING_ASC); rnm = nm->entries; nm->entries = NULL; X509_NAME_free(nm); if (!ret || sk_X509_NAME_ENTRY_num(rnm) <= 0) { goto err; } // There can only be one RDN in nameRelativeToCRLIssuer. if (sk_X509_NAME_ENTRY_value(rnm, sk_X509_NAME_ENTRY_num(rnm) - 1)->set) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_MULTIPLE_RDNS); goto err; } } else { return 0; } if (*pdp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_DISTPOINT_ALREADY_SET); goto err; } *pdp = DIST_POINT_NAME_new(); if (!*pdp) { goto err; } if (fnm) { (*pdp)->type = 0; (*pdp)->name.fullname = fnm; } else { (*pdp)->type = 1; (*pdp)->name.relativename = rnm; } return 1; err: sk_GENERAL_NAME_pop_free(fnm, GENERAL_NAME_free); sk_X509_NAME_ENTRY_pop_free(rnm, X509_NAME_ENTRY_free); return -1; } static const BIT_STRING_BITNAME reason_flags[] = { {0, "Unused", "unused"}, {1, "Key Compromise", "keyCompromise"}, {2, "CA Compromise", "CACompromise"}, {3, "Affiliation Changed", "affiliationChanged"}, {4, "Superseded", "superseded"}, {5, "Cessation Of Operation", "cessationOfOperation"}, {6, "Certificate Hold", "certificateHold"}, {7, "Privilege Withdrawn", "privilegeWithdrawn"}, {8, "AA Compromise", "AACompromise"}, {-1, NULL, NULL}}; static int set_reasons(ASN1_BIT_STRING **preas, const char *value) { if (*preas) { // Duplicate "reasons" or "onlysomereasons" key. OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_VALUE); return 0; } int ret = 0; STACK_OF(CONF_VALUE) *rsk = X509V3_parse_list(value); if (!rsk) { return 0; } for (size_t i = 0; i < sk_CONF_VALUE_num(rsk); i++) { const char *bnam = sk_CONF_VALUE_value(rsk, i)->name; if (!*preas) { *preas = ASN1_BIT_STRING_new(); if (!*preas) { goto err; } } const BIT_STRING_BITNAME *pbn; for (pbn = reason_flags; pbn->lname; pbn++) { if (!strcmp(pbn->sname, bnam)) { if (!ASN1_BIT_STRING_set_bit(*preas, pbn->bitnum, 1)) { goto err; } break; } } if (!pbn->lname) { goto err; } } ret = 1; err: sk_CONF_VALUE_pop_free(rsk, X509V3_conf_free); return ret; } static int print_reasons(BIO *out, const char *rname, ASN1_BIT_STRING *rflags, int indent) { int first = 1; const BIT_STRING_BITNAME *pbn; BIO_printf(out, "%*s%s:\n%*s", indent, "", rname, indent + 2, ""); for (pbn = reason_flags; pbn->lname; pbn++) { if (ASN1_BIT_STRING_get_bit(rflags, pbn->bitnum)) { if (first) { first = 0; } else { BIO_puts(out, ", "); } BIO_puts(out, pbn->lname); } } if (first) { BIO_puts(out, "\n"); } else { BIO_puts(out, "\n"); } return 1; } static DIST_POINT *crldp_from_section(const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { DIST_POINT *point = NULL; point = DIST_POINT_new(); if (!point) { goto err; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); int ret = set_dist_point_name(&point->distpoint, ctx, cnf); if (ret > 0) { continue; } if (ret < 0) { goto err; } if (!strcmp(cnf->name, "reasons")) { if (!set_reasons(&point->reasons, cnf->value)) { goto err; } } else if (!strcmp(cnf->name, "CRLissuer")) { GENERAL_NAMES_free(point->CRLissuer); point->CRLissuer = gnames_from_sectname(ctx, cnf->value); if (!point->CRLissuer) { goto err; } } } return point; err: DIST_POINT_free(point); return NULL; } static void *v2i_crld(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { STACK_OF(DIST_POINT) *crld = NULL; GENERAL_NAMES *gens = NULL; GENERAL_NAME *gen = NULL; if (!(crld = sk_DIST_POINT_new_null())) { goto err; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { DIST_POINT *point; const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); if (!cnf->value) { const STACK_OF(CONF_VALUE) *dpsect = X509V3_get_section(ctx, cnf->name); if (!dpsect) { goto err; } point = crldp_from_section(ctx, dpsect); if (!point) { goto err; } if (!sk_DIST_POINT_push(crld, point)) { DIST_POINT_free(point); goto err; } } else { if (!(gen = v2i_GENERAL_NAME(method, ctx, cnf))) { goto err; } if (!(gens = GENERAL_NAMES_new())) { goto err; } if (!sk_GENERAL_NAME_push(gens, gen)) { goto err; } gen = NULL; if (!(point = DIST_POINT_new())) { goto err; } if (!sk_DIST_POINT_push(crld, point)) { DIST_POINT_free(point); goto err; } if (!(point->distpoint = DIST_POINT_NAME_new())) { goto err; } point->distpoint->name.fullname = gens; point->distpoint->type = 0; gens = NULL; } } return crld; err: GENERAL_NAME_free(gen); GENERAL_NAMES_free(gens); sk_DIST_POINT_pop_free(crld, DIST_POINT_free); return NULL; } static int dpn_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { DIST_POINT_NAME *dpn = (DIST_POINT_NAME *)*pval; switch (operation) { case ASN1_OP_NEW_POST: dpn->dpname = NULL; break; case ASN1_OP_FREE_POST: X509_NAME_free(dpn->dpname); break; } return 1; } ASN1_CHOICE_cb(DIST_POINT_NAME, dpn_cb) = { ASN1_IMP_SEQUENCE_OF(DIST_POINT_NAME, name.fullname, GENERAL_NAME, 0), ASN1_IMP_SET_OF(DIST_POINT_NAME, name.relativename, X509_NAME_ENTRY, 1), } ASN1_CHOICE_END_cb(DIST_POINT_NAME, DIST_POINT_NAME, type) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(DIST_POINT_NAME) ASN1_SEQUENCE(DIST_POINT) = { ASN1_EXP_OPT(DIST_POINT, distpoint, DIST_POINT_NAME, 0), ASN1_IMP_OPT(DIST_POINT, reasons, ASN1_BIT_STRING, 1), ASN1_IMP_SEQUENCE_OF_OPT(DIST_POINT, CRLissuer, GENERAL_NAME, 2), } ASN1_SEQUENCE_END(DIST_POINT) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(DIST_POINT) ASN1_ITEM_TEMPLATE(CRL_DIST_POINTS) = ASN1_EX_TEMPLATE_TYPE( ASN1_TFLG_SEQUENCE_OF, 0, CRLDistributionPoints, DIST_POINT) ASN1_ITEM_TEMPLATE_END(CRL_DIST_POINTS) IMPLEMENT_ASN1_FUNCTIONS(CRL_DIST_POINTS) ASN1_SEQUENCE(ISSUING_DIST_POINT) = { ASN1_EXP_OPT(ISSUING_DIST_POINT, distpoint, DIST_POINT_NAME, 0), ASN1_IMP_OPT(ISSUING_DIST_POINT, onlyuser, ASN1_FBOOLEAN, 1), ASN1_IMP_OPT(ISSUING_DIST_POINT, onlyCA, ASN1_FBOOLEAN, 2), ASN1_IMP_OPT(ISSUING_DIST_POINT, onlysomereasons, ASN1_BIT_STRING, 3), ASN1_IMP_OPT(ISSUING_DIST_POINT, indirectCRL, ASN1_FBOOLEAN, 4), ASN1_IMP_OPT(ISSUING_DIST_POINT, onlyattr, ASN1_FBOOLEAN, 5), } ASN1_SEQUENCE_END(ISSUING_DIST_POINT) IMPLEMENT_ASN1_FUNCTIONS(ISSUING_DIST_POINT) static int i2r_idp(const X509V3_EXT_METHOD *method, void *pidp, BIO *out, int indent); static void *v2i_idp(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); const X509V3_EXT_METHOD v3_idp = { NID_issuing_distribution_point, X509V3_EXT_MULTILINE, ASN1_ITEM_ref(ISSUING_DIST_POINT), 0, 0, 0, 0, 0, 0, 0, v2i_idp, i2r_idp, 0, NULL, }; static void *v2i_idp(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { ISSUING_DIST_POINT *idp = ISSUING_DIST_POINT_new(); if (!idp) { goto err; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); const char *name = cnf->name; const char *val = cnf->value; int ret = set_dist_point_name(&idp->distpoint, ctx, cnf); if (ret > 0) { continue; } if (ret < 0) { goto err; } if (!strcmp(name, "onlyuser")) { if (!X509V3_get_value_bool(cnf, &idp->onlyuser)) { goto err; } } else if (!strcmp(name, "onlyCA")) { if (!X509V3_get_value_bool(cnf, &idp->onlyCA)) { goto err; } } else if (!strcmp(name, "onlyAA")) { if (!X509V3_get_value_bool(cnf, &idp->onlyattr)) { goto err; } } else if (!strcmp(name, "indirectCRL")) { if (!X509V3_get_value_bool(cnf, &idp->indirectCRL)) { goto err; } } else if (!strcmp(name, "onlysomereasons")) { if (!set_reasons(&idp->onlysomereasons, val)) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NAME); X509V3_conf_err(cnf); goto err; } } return idp; err: ISSUING_DIST_POINT_free(idp); return NULL; } static int print_gens(BIO *out, STACK_OF(GENERAL_NAME) *gens, int indent) { size_t i; for (i = 0; i < sk_GENERAL_NAME_num(gens); i++) { BIO_printf(out, "%*s", indent + 2, ""); GENERAL_NAME_print(out, sk_GENERAL_NAME_value(gens, i)); BIO_puts(out, "\n"); } return 1; } static int print_distpoint(BIO *out, DIST_POINT_NAME *dpn, int indent) { if (dpn->type == 0) { BIO_printf(out, "%*sFull Name:\n", indent, ""); print_gens(out, dpn->name.fullname, indent); } else { X509_NAME ntmp; ntmp.entries = dpn->name.relativename; BIO_printf(out, "%*sRelative Name:\n%*s", indent, "", indent + 2, ""); X509_NAME_print_ex(out, &ntmp, 0, XN_FLAG_ONELINE); BIO_puts(out, "\n"); } return 1; } static int i2r_idp(const X509V3_EXT_METHOD *method, void *pidp, BIO *out, int indent) { ISSUING_DIST_POINT *idp = reinterpret_cast(pidp); if (idp->distpoint) { print_distpoint(out, idp->distpoint, indent); } if (idp->onlyuser > 0) { BIO_printf(out, "%*sOnly User Certificates\n", indent, ""); } if (idp->onlyCA > 0) { BIO_printf(out, "%*sOnly CA Certificates\n", indent, ""); } if (idp->indirectCRL > 0) { BIO_printf(out, "%*sIndirect CRL\n", indent, ""); } if (idp->onlysomereasons) { print_reasons(out, "Only Some Reasons", idp->onlysomereasons, indent); } if (idp->onlyattr > 0) { BIO_printf(out, "%*sOnly Attribute Certificates\n", indent, ""); } if (!idp->distpoint && (idp->onlyuser <= 0) && (idp->onlyCA <= 0) && (idp->indirectCRL <= 0) && !idp->onlysomereasons && (idp->onlyattr <= 0)) { BIO_printf(out, "%*s\n", indent, ""); } return 1; } static int i2r_crldp(const X509V3_EXT_METHOD *method, void *pcrldp, BIO *out, int indent) { STACK_OF(DIST_POINT) *crld = reinterpret_cast(pcrldp); DIST_POINT *point; size_t i; for (i = 0; i < sk_DIST_POINT_num(crld); i++) { BIO_puts(out, "\n"); point = sk_DIST_POINT_value(crld, i); if (point->distpoint) { print_distpoint(out, point->distpoint, indent); } if (point->reasons) { print_reasons(out, "Reasons", point->reasons, indent); } if (point->CRLissuer) { BIO_printf(out, "%*sCRL Issuer:\n", indent, ""); print_gens(out, point->CRLissuer, indent); } } return 1; } int DIST_POINT_set_dpname(DIST_POINT_NAME *dpn, X509_NAME *iname) { size_t i; STACK_OF(X509_NAME_ENTRY) *frag; X509_NAME_ENTRY *ne; if (!dpn || (dpn->type != 1)) { return 1; } frag = dpn->name.relativename; dpn->dpname = X509_NAME_dup(iname); if (!dpn->dpname) { return 0; } for (i = 0; i < sk_X509_NAME_ENTRY_num(frag); i++) { ne = sk_X509_NAME_ENTRY_value(frag, i); if (!X509_NAME_add_entry(dpn->dpname, ne, -1, i ? 0 : 1)) { X509_NAME_free(dpn->dpname); dpn->dpname = NULL; return 0; } } // generate cached encoding of name if (i2d_X509_NAME(dpn->dpname, NULL) < 0) { X509_NAME_free(dpn->dpname); dpn->dpname = NULL; return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_enum.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "ext_dat.h" #include "internal.h" typedef BIT_STRING_BITNAME ENUMERATED_NAMES; static const ENUMERATED_NAMES crl_reasons[] = { {CRL_REASON_UNSPECIFIED, "Unspecified", "unspecified"}, {CRL_REASON_KEY_COMPROMISE, "Key Compromise", "keyCompromise"}, {CRL_REASON_CA_COMPROMISE, "CA Compromise", "CACompromise"}, {CRL_REASON_AFFILIATION_CHANGED, "Affiliation Changed", "affiliationChanged"}, {CRL_REASON_SUPERSEDED, "Superseded", "superseded"}, {CRL_REASON_CESSATION_OF_OPERATION, "Cessation Of Operation", "cessationOfOperation"}, {CRL_REASON_CERTIFICATE_HOLD, "Certificate Hold", "certificateHold"}, {CRL_REASON_REMOVE_FROM_CRL, "Remove From CRL", "removeFromCRL"}, {CRL_REASON_PRIVILEGE_WITHDRAWN, "Privilege Withdrawn", "privilegeWithdrawn"}, {CRL_REASON_AA_COMPROMISE, "AA Compromise", "AACompromise"}, {-1, NULL, NULL}}; static char *i2s_ASN1_ENUMERATED_TABLE(const X509V3_EXT_METHOD *method, void *ext) { const ASN1_ENUMERATED *e = reinterpret_cast(ext); long strval = ASN1_ENUMERATED_get(e); for (const ENUMERATED_NAMES *enam = reinterpret_cast(method->usr_data); enam->lname; enam++) { if (strval == enam->bitnum) { return OPENSSL_strdup(enam->lname); } } return i2s_ASN1_ENUMERATED(method, e); } const X509V3_EXT_METHOD v3_crl_reason = { NID_crl_reason, 0, ASN1_ITEM_ref(ASN1_ENUMERATED), 0, 0, 0, 0, i2s_ASN1_ENUMERATED_TABLE, 0, 0, 0, 0, 0, (void *)crl_reasons, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_extku.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static void *v2i_EXTENDED_KEY_USAGE(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static STACK_OF(CONF_VALUE) *i2v_EXTENDED_KEY_USAGE( const X509V3_EXT_METHOD *method, void *eku, STACK_OF(CONF_VALUE) *extlist); const X509V3_EXT_METHOD v3_ext_ku = { NID_ext_key_usage, 0, ASN1_ITEM_ref(EXTENDED_KEY_USAGE), 0, 0, 0, 0, 0, 0, i2v_EXTENDED_KEY_USAGE, v2i_EXTENDED_KEY_USAGE, 0, 0, NULL, }; // NB OCSP acceptable responses also is a SEQUENCE OF OBJECT const X509V3_EXT_METHOD v3_ocsp_accresp = { NID_id_pkix_OCSP_acceptableResponses, 0, ASN1_ITEM_ref(EXTENDED_KEY_USAGE), 0, 0, 0, 0, 0, 0, i2v_EXTENDED_KEY_USAGE, v2i_EXTENDED_KEY_USAGE, 0, 0, NULL, }; ASN1_ITEM_TEMPLATE(EXTENDED_KEY_USAGE) = ASN1_EX_TEMPLATE_TYPE( ASN1_TFLG_SEQUENCE_OF, 0, EXTENDED_KEY_USAGE, ASN1_OBJECT) ASN1_ITEM_TEMPLATE_END(EXTENDED_KEY_USAGE) IMPLEMENT_ASN1_FUNCTIONS_const(EXTENDED_KEY_USAGE) static STACK_OF(CONF_VALUE) *i2v_EXTENDED_KEY_USAGE( const X509V3_EXT_METHOD *method, void *a, STACK_OF(CONF_VALUE) *ext_list) { const EXTENDED_KEY_USAGE *eku = reinterpret_cast(a); for (size_t i = 0; i < sk_ASN1_OBJECT_num(eku); i++) { const ASN1_OBJECT *obj = sk_ASN1_OBJECT_value(eku, i); char obj_tmp[80]; i2t_ASN1_OBJECT(obj_tmp, 80, obj); X509V3_add_value(NULL, obj_tmp, &ext_list); } return ext_list; } static void *v2i_EXTENDED_KEY_USAGE(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { EXTENDED_KEY_USAGE *extku = sk_ASN1_OBJECT_new_null(); if (extku == NULL) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(nval, i); const char *extval; if (val->value) { extval = val->value; } else { extval = val->name; } ASN1_OBJECT *obj = OBJ_txt2obj(extval, 0); if (obj == NULL || !sk_ASN1_OBJECT_push(extku, obj)) { ASN1_OBJECT_free(obj); sk_ASN1_OBJECT_pop_free(extku, ASN1_OBJECT_free); OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER); X509V3_conf_err(val); return NULL; } } return extku; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_genn.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" ASN1_SEQUENCE(OTHERNAME) = { ASN1_SIMPLE(OTHERNAME, type_id, ASN1_OBJECT), // Maybe have a true ANY DEFINED BY later ASN1_EXP(OTHERNAME, value, ASN1_ANY, 0), } ASN1_SEQUENCE_END(OTHERNAME) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(OTHERNAME) ASN1_SEQUENCE(EDIPARTYNAME) = { // DirectoryString is a CHOICE type, so use explicit tagging. ASN1_EXP_OPT(EDIPARTYNAME, nameAssigner, DIRECTORYSTRING, 0), ASN1_EXP(EDIPARTYNAME, partyName, DIRECTORYSTRING, 1), } ASN1_SEQUENCE_END(EDIPARTYNAME) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(EDIPARTYNAME) ASN1_CHOICE(GENERAL_NAME) = { ASN1_IMP(GENERAL_NAME, d.otherName, OTHERNAME, GEN_OTHERNAME), ASN1_IMP(GENERAL_NAME, d.rfc822Name, ASN1_IA5STRING, GEN_EMAIL), ASN1_IMP(GENERAL_NAME, d.dNSName, ASN1_IA5STRING, GEN_DNS), // Don't decode this ASN1_IMP(GENERAL_NAME, d.x400Address, ASN1_SEQUENCE, GEN_X400), // X509_NAME is a CHOICE type so use EXPLICIT ASN1_EXP(GENERAL_NAME, d.directoryName, X509_NAME, GEN_DIRNAME), ASN1_IMP(GENERAL_NAME, d.ediPartyName, EDIPARTYNAME, GEN_EDIPARTY), ASN1_IMP(GENERAL_NAME, d.uniformResourceIdentifier, ASN1_IA5STRING, GEN_URI), ASN1_IMP(GENERAL_NAME, d.iPAddress, ASN1_OCTET_STRING, GEN_IPADD), ASN1_IMP(GENERAL_NAME, d.registeredID, ASN1_OBJECT, GEN_RID), } ASN1_CHOICE_END(GENERAL_NAME) IMPLEMENT_ASN1_FUNCTIONS(GENERAL_NAME) ASN1_ITEM_TEMPLATE(GENERAL_NAMES) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, GeneralNames, GENERAL_NAME) ASN1_ITEM_TEMPLATE_END(GENERAL_NAMES) IMPLEMENT_ASN1_FUNCTIONS(GENERAL_NAMES) IMPLEMENT_ASN1_DUP_FUNCTION(GENERAL_NAME) static int edipartyname_cmp(const EDIPARTYNAME *a, const EDIPARTYNAME *b) { // nameAssigner is optional and may be NULL. if (a->nameAssigner == NULL) { if (b->nameAssigner != NULL) { return -1; } } else { if (b->nameAssigner == NULL || ASN1_STRING_cmp(a->nameAssigner, b->nameAssigner) != 0) { return -1; } } // partyName may not be NULL. return ASN1_STRING_cmp(a->partyName, b->partyName); } // Returns 0 if they are equal, != 0 otherwise. static int othername_cmp(const OTHERNAME *a, const OTHERNAME *b) { int result = -1; if (!a || !b) { return -1; } // Check their type first. if ((result = OBJ_cmp(a->type_id, b->type_id)) != 0) { return result; } // Check the value. result = ASN1_TYPE_cmp(a->value, b->value); return result; } // Returns 0 if they are equal, != 0 otherwise. int GENERAL_NAME_cmp(const GENERAL_NAME *a, const GENERAL_NAME *b) { if (!a || !b || a->type != b->type) { return -1; } switch (a->type) { case GEN_X400: return ASN1_STRING_cmp(a->d.x400Address, b->d.x400Address); case GEN_EDIPARTY: return edipartyname_cmp(a->d.ediPartyName, b->d.ediPartyName); case GEN_OTHERNAME: return othername_cmp(a->d.otherName, b->d.otherName); case GEN_EMAIL: case GEN_DNS: case GEN_URI: return ASN1_STRING_cmp(a->d.ia5, b->d.ia5); case GEN_DIRNAME: return X509_NAME_cmp(a->d.dirn, b->d.dirn); case GEN_IPADD: return ASN1_OCTET_STRING_cmp(a->d.ip, b->d.ip); case GEN_RID: return OBJ_cmp(a->d.rid, b->d.rid); } return -1; } void GENERAL_NAME_set0_value(GENERAL_NAME *a, int type, void *value) { switch (type) { case GEN_X400: a->d.x400Address = reinterpret_cast(value); break; case GEN_EDIPARTY: a->d.ediPartyName = reinterpret_cast(value); break; case GEN_OTHERNAME: a->d.otherName = reinterpret_cast(value); break; case GEN_EMAIL: case GEN_DNS: case GEN_URI: a->d.ia5 = reinterpret_cast(value); break; case GEN_DIRNAME: a->d.dirn = reinterpret_cast(value); break; case GEN_IPADD: a->d.ip = reinterpret_cast(value); break; case GEN_RID: a->d.rid = reinterpret_cast(value); break; } a->type = type; } void *GENERAL_NAME_get0_value(const GENERAL_NAME *a, int *out_type) { if (out_type) { *out_type = a->type; } switch (a->type) { case GEN_X400: return a->d.x400Address; case GEN_EDIPARTY: return a->d.ediPartyName; case GEN_OTHERNAME: return a->d.otherName; case GEN_EMAIL: case GEN_DNS: case GEN_URI: return a->d.ia5; case GEN_DIRNAME: return a->d.dirn; case GEN_IPADD: return a->d.ip; case GEN_RID: return a->d.rid; default: return NULL; } } int GENERAL_NAME_set0_othername(GENERAL_NAME *gen, ASN1_OBJECT *oid, ASN1_TYPE *value) { OTHERNAME *oth; oth = OTHERNAME_new(); if (!oth) { return 0; } ASN1_TYPE_free(oth->value); oth->type_id = oid; oth->value = value; GENERAL_NAME_set0_value(gen, GEN_OTHERNAME, oth); return 1; } int GENERAL_NAME_get0_otherName(const GENERAL_NAME *gen, ASN1_OBJECT **out_oid, ASN1_TYPE **out_value) { if (gen->type != GEN_OTHERNAME) { return 0; } if (out_oid != NULL) { *out_oid = gen->d.otherName->type_id; } if (out_value != NULL) { *out_value = gen->d.otherName->value; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_ia5.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../internal.h" #include "ext_dat.h" static char *i2s_ASN1_IA5STRING(const X509V3_EXT_METHOD *method, void *ext) { const ASN1_IA5STRING *ia5 = reinterpret_cast(ext); char *tmp; if (!ia5 || !ia5->length) { return NULL; } if (!(tmp = reinterpret_cast(OPENSSL_malloc(ia5->length + 1)))) { return NULL; } OPENSSL_memcpy(tmp, ia5->data, ia5->length); tmp[ia5->length] = 0; return tmp; } static void *s2i_ASN1_IA5STRING(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str) { ASN1_IA5STRING *ia5; if (!str) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_ARGUMENT); return NULL; } if (!(ia5 = ASN1_IA5STRING_new())) { goto err; } if (!ASN1_STRING_set(ia5, str, strlen(str))) { ASN1_IA5STRING_free(ia5); goto err; } return ia5; err: return NULL; } #define EXT_IA5STRING(nid) \ { \ nid, 0, ASN1_ITEM_ref(ASN1_IA5STRING), 0, 0, 0, 0, i2s_ASN1_IA5STRING, \ s2i_ASN1_IA5STRING, 0, 0, 0, 0, NULL \ } #define EXT_END \ { -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } const X509V3_EXT_METHOD v3_ns_ia5_list[] = { EXT_IA5STRING(NID_netscape_base_url), EXT_IA5STRING(NID_netscape_revocation_url), EXT_IA5STRING(NID_netscape_ca_revocation_url), EXT_IA5STRING(NID_netscape_renewal_url), EXT_IA5STRING(NID_netscape_ca_policy_url), EXT_IA5STRING(NID_netscape_ssl_server_name), EXT_IA5STRING(NID_netscape_comment), EXT_END}; ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_info.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_INFO_ACCESS( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *ret); static void *v2i_AUTHORITY_INFO_ACCESS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); const X509V3_EXT_METHOD v3_info = { NID_info_access, X509V3_EXT_MULTILINE, ASN1_ITEM_ref(AUTHORITY_INFO_ACCESS), 0, 0, 0, 0, 0, 0, i2v_AUTHORITY_INFO_ACCESS, v2i_AUTHORITY_INFO_ACCESS, 0, 0, NULL, }; const X509V3_EXT_METHOD v3_sinfo = { NID_sinfo_access, X509V3_EXT_MULTILINE, ASN1_ITEM_ref(AUTHORITY_INFO_ACCESS), 0, 0, 0, 0, 0, 0, i2v_AUTHORITY_INFO_ACCESS, v2i_AUTHORITY_INFO_ACCESS, 0, 0, NULL, }; ASN1_SEQUENCE(ACCESS_DESCRIPTION) = { ASN1_SIMPLE(ACCESS_DESCRIPTION, method, ASN1_OBJECT), ASN1_SIMPLE(ACCESS_DESCRIPTION, location, GENERAL_NAME), } ASN1_SEQUENCE_END(ACCESS_DESCRIPTION) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(ACCESS_DESCRIPTION) ASN1_ITEM_TEMPLATE(AUTHORITY_INFO_ACCESS) = ASN1_EX_TEMPLATE_TYPE( ASN1_TFLG_SEQUENCE_OF, 0, GeneralNames, ACCESS_DESCRIPTION) ASN1_ITEM_TEMPLATE_END(AUTHORITY_INFO_ACCESS) IMPLEMENT_ASN1_FUNCTIONS(AUTHORITY_INFO_ACCESS) static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_INFO_ACCESS( const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *ret) { const AUTHORITY_INFO_ACCESS *ainfo = reinterpret_cast(ext); ACCESS_DESCRIPTION *desc; char objtmp[80], *name; CONF_VALUE *vtmp; STACK_OF(CONF_VALUE) *tret = ret; for (size_t i = 0; i < sk_ACCESS_DESCRIPTION_num(ainfo); i++) { STACK_OF(CONF_VALUE) *tmp; desc = sk_ACCESS_DESCRIPTION_value(ainfo, i); tmp = i2v_GENERAL_NAME(method, desc->location, tret); if (tmp == NULL) { goto err; } tret = tmp; vtmp = sk_CONF_VALUE_value(tret, i); i2t_ASN1_OBJECT(objtmp, sizeof objtmp, desc->method); if (OPENSSL_asprintf(&name, "%s - %s", objtmp, vtmp->name) == -1) { goto err; } OPENSSL_free(vtmp->name); vtmp->name = name; } if (ret == NULL && tret == NULL) { return sk_CONF_VALUE_new_null(); } return tret; err: if (ret == NULL && tret != NULL) { sk_CONF_VALUE_pop_free(tret, X509V3_conf_free); } return NULL; } static void *v2i_AUTHORITY_INFO_ACCESS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { AUTHORITY_INFO_ACCESS *ainfo = NULL; ACCESS_DESCRIPTION *acc; if (!(ainfo = sk_ACCESS_DESCRIPTION_new_null())) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *cnf = sk_CONF_VALUE_value(nval, i); if (!(acc = ACCESS_DESCRIPTION_new()) || !sk_ACCESS_DESCRIPTION_push(ainfo, acc)) { goto err; } char *ptmp = strchr(cnf->name, ';'); if (!ptmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_SYNTAX); goto err; } CONF_VALUE ctmp; ctmp.name = ptmp + 1; ctmp.value = cnf->value; if (!v2i_GENERAL_NAME_ex(acc->location, method, ctx, &ctmp, 0)) { goto err; } char *objtmp = OPENSSL_strndup(cnf->name, ptmp - cnf->name); if (objtmp == NULL) { goto err; } acc->method = OBJ_txt2obj(objtmp, 0); if (!acc->method) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_BAD_OBJECT); ERR_add_error_data(2, "value=", objtmp); OPENSSL_free(objtmp); goto err; } OPENSSL_free(objtmp); } return ainfo; err: sk_ACCESS_DESCRIPTION_pop_free(ainfo, ACCESS_DESCRIPTION_free); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_int.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "ext_dat.h" static char *i2s_ASN1_INTEGER_cb(const X509V3_EXT_METHOD *method, void *ext) { return i2s_ASN1_INTEGER(method, reinterpret_cast(ext)); } static void *s2i_asn1_int(const X509V3_EXT_METHOD *meth, const X509V3_CTX *ctx, const char *value) { return s2i_ASN1_INTEGER(meth, value); } const X509V3_EXT_METHOD v3_crl_num = { NID_crl_number, 0, ASN1_ITEM_ref(ASN1_INTEGER), 0, 0, 0, 0, i2s_ASN1_INTEGER_cb, 0, 0, 0, 0, 0, NULL, }; const X509V3_EXT_METHOD v3_delta_crl = { NID_delta_crl, 0, ASN1_ITEM_ref(ASN1_INTEGER), 0, 0, 0, 0, i2s_ASN1_INTEGER_cb, 0, 0, 0, 0, 0, NULL, }; const X509V3_EXT_METHOD v3_inhibit_anyp = { NID_inhibit_any_policy, 0, ASN1_ITEM_ref(ASN1_INTEGER), 0, 0, 0, 0, i2s_ASN1_INTEGER_cb, s2i_asn1_int, 0, 0, 0, 0, NULL, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_lib.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* X509 v3 extension utilities */ #include #include #include #include #include #include #include #include "internal.h" #include "ext_dat.h" DEFINE_STACK_OF(X509V3_EXT_METHOD) static STACK_OF(X509V3_EXT_METHOD) *ext_list = NULL; static int ext_stack_cmp(const X509V3_EXT_METHOD *const *a, const X509V3_EXT_METHOD *const *b) { return ((*a)->ext_nid - (*b)->ext_nid); } int X509V3_EXT_add(X509V3_EXT_METHOD *ext) { // We only support |ASN1_ITEM|-based extensions. assert(ext->it != NULL); // TODO(davidben): This should be locked. Also check for duplicates. if (!ext_list && !(ext_list = sk_X509V3_EXT_METHOD_new(ext_stack_cmp))) { return 0; } if (!sk_X509V3_EXT_METHOD_push(ext_list, ext)) { return 0; } sk_X509V3_EXT_METHOD_sort(ext_list); return 1; } static int ext_cmp(const void *void_a, const void *void_b) { const X509V3_EXT_METHOD **a = (const X509V3_EXT_METHOD **)void_a; const X509V3_EXT_METHOD **b = (const X509V3_EXT_METHOD **)void_b; return ext_stack_cmp(a, b); } const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int nid) { X509V3_EXT_METHOD tmp; const X509V3_EXT_METHOD *t = &tmp, *const * ret; size_t idx; if (nid < 0) { return NULL; } tmp.ext_nid = nid; ret = reinterpret_cast( bsearch(&t, standard_exts, STANDARD_EXTENSION_COUNT, sizeof(X509V3_EXT_METHOD *), ext_cmp)); if (ret) { return *ret; } if (!ext_list) { return NULL; } if (!sk_X509V3_EXT_METHOD_find(ext_list, &idx, &tmp)) { return NULL; } return sk_X509V3_EXT_METHOD_value(ext_list, idx); } const X509V3_EXT_METHOD *X509V3_EXT_get(const X509_EXTENSION *ext) { int nid; if ((nid = OBJ_obj2nid(ext->object)) == NID_undef) { return NULL; } return X509V3_EXT_get_nid(nid); } int X509V3_EXT_free(int nid, void *ext_data) { const X509V3_EXT_METHOD *ext_method = X509V3_EXT_get_nid(nid); if (ext_method == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_CANNOT_FIND_FREE_FUNCTION); return 0; } ASN1_item_free(reinterpret_cast(ext_data), ASN1_ITEM_ptr(ext_method->it)); return 1; } int X509V3_EXT_add_alias(int nid_to, int nid_from) { OPENSSL_BEGIN_ALLOW_DEPRECATED const X509V3_EXT_METHOD *ext; X509V3_EXT_METHOD *tmpext; if (!(ext = X509V3_EXT_get_nid(nid_from))) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_EXTENSION_NOT_FOUND); return 0; } if (!(tmpext = (X509V3_EXT_METHOD *)OPENSSL_malloc(sizeof(X509V3_EXT_METHOD)))) { return 0; } *tmpext = *ext; tmpext->ext_nid = nid_to; if (!X509V3_EXT_add(tmpext)) { OPENSSL_free(tmpext); return 0; } return 1; OPENSSL_END_ALLOW_DEPRECATED } // Legacy function: we don't need to add standard extensions any more because // they are now kept in ext_dat.h. int X509V3_add_standard_extensions(void) { return 1; } // Return an extension internal structure void *X509V3_EXT_d2i(const X509_EXTENSION *ext) { const X509V3_EXT_METHOD *method; const unsigned char *p; if (!(method = X509V3_EXT_get(ext))) { return NULL; } p = ext->value->data; void *ret = ASN1_item_d2i(NULL, &p, ext->value->length, ASN1_ITEM_ptr(method->it)); if (ret == NULL) { return NULL; } // Check for trailing data. if (p != ext->value->data + ext->value->length) { ASN1_item_free(reinterpret_cast(ret), ASN1_ITEM_ptr(method->it)); OPENSSL_PUT_ERROR(X509V3, X509V3_R_TRAILING_DATA_IN_EXTENSION); return NULL; } return ret; } void *X509V3_get_d2i(const STACK_OF(X509_EXTENSION) *extensions, int nid, int *out_critical, int *out_idx) { int lastpos; X509_EXTENSION *ex, *found_ex = NULL; if (!extensions) { if (out_idx) { *out_idx = -1; } if (out_critical) { *out_critical = -1; } return NULL; } if (out_idx) { lastpos = *out_idx + 1; } else { lastpos = 0; } if (lastpos < 0) { lastpos = 0; } for (size_t i = lastpos; i < sk_X509_EXTENSION_num(extensions); i++) { ex = sk_X509_EXTENSION_value(extensions, i); if (OBJ_obj2nid(ex->object) == nid) { if (out_idx) { // TODO(https://crbug.com/boringssl/379): Consistently reject // duplicate extensions. *out_idx = (int)i; found_ex = ex; break; } else if (found_ex) { // Found more than one if (out_critical) { *out_critical = -2; } return NULL; } found_ex = ex; } } if (found_ex) { // Found it if (out_critical) { *out_critical = X509_EXTENSION_get_critical(found_ex); } return X509V3_EXT_d2i(found_ex); } // Extension not found if (out_idx) { *out_idx = -1; } if (out_critical) { *out_critical = -1; } return NULL; } // This function is a general extension append, replace and delete utility. // The precise operation is governed by the 'flags' value. The 'crit' and // 'value' arguments (if relevant) are the extensions internal structure. int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, int crit, unsigned long flags) { int errcode, extidx = -1; X509_EXTENSION *ext = NULL, *extmp; STACK_OF(X509_EXTENSION) *ret = NULL; unsigned long ext_op = flags & X509V3_ADD_OP_MASK; // If appending we don't care if it exists, otherwise look for existing // extension. if (ext_op != X509V3_ADD_APPEND) { extidx = X509v3_get_ext_by_NID(*x, nid, -1); } // See if extension exists if (extidx >= 0) { // If keep existing, nothing to do if (ext_op == X509V3_ADD_KEEP_EXISTING) { return 1; } // If default then its an error if (ext_op == X509V3_ADD_DEFAULT) { errcode = X509V3_R_EXTENSION_EXISTS; goto err; } // If delete, just delete it if (ext_op == X509V3_ADD_DELETE) { X509_EXTENSION *prev_ext = sk_X509_EXTENSION_delete(*x, extidx); if (prev_ext == NULL) { return -1; } X509_EXTENSION_free(prev_ext); return 1; } } else { // If replace existing or delete, error since extension must exist if ((ext_op == X509V3_ADD_REPLACE_EXISTING) || (ext_op == X509V3_ADD_DELETE)) { errcode = X509V3_R_EXTENSION_NOT_FOUND; goto err; } } // If we get this far then we have to create an extension: could have // some flags for alternative encoding schemes... ext = X509V3_EXT_i2d(nid, crit, value); if (!ext) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_ERROR_CREATING_EXTENSION); return 0; } // If extension exists replace it.. if (extidx >= 0) { extmp = sk_X509_EXTENSION_value(*x, extidx); X509_EXTENSION_free(extmp); if (!sk_X509_EXTENSION_set(*x, extidx, ext)) { return -1; } return 1; } if ((ret = *x) == NULL && (ret = sk_X509_EXTENSION_new_null()) == NULL) { goto m_fail; } if (!sk_X509_EXTENSION_push(ret, ext)) { goto m_fail; } *x = ret; return 1; m_fail: if (ret != *x) { sk_X509_EXTENSION_free(ret); } X509_EXTENSION_free(ext); return -1; err: if (!(flags & X509V3_ADD_SILENT)) { OPENSSL_PUT_ERROR(X509V3, errcode); } return 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_ncons.cc ================================================ /* * Copyright 2003-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../internal.h" #include "ext_dat.h" #include "internal.h" static void *v2i_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static int i2r_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method, void *a, BIO *bp, int ind); static int do_i2r_name_constraints(const X509V3_EXT_METHOD *method, STACK_OF(GENERAL_SUBTREE) *trees, BIO *bp, int ind, const char *name); static int print_nc_ipadd(BIO *bp, const ASN1_OCTET_STRING *ip); static int nc_match(GENERAL_NAME *gen, NAME_CONSTRAINTS *nc); static int nc_match_single(GENERAL_NAME *sub, GENERAL_NAME *gen); static int nc_dn(X509_NAME *sub, X509_NAME *nm); static int nc_dns(const ASN1_IA5STRING *sub, const ASN1_IA5STRING *dns); static int nc_email(const ASN1_IA5STRING *sub, const ASN1_IA5STRING *eml); static int nc_uri(const ASN1_IA5STRING *uri, const ASN1_IA5STRING *base); const X509V3_EXT_METHOD v3_name_constraints = { NID_name_constraints, 0, ASN1_ITEM_ref(NAME_CONSTRAINTS), 0, 0, 0, 0, 0, 0, 0, v2i_NAME_CONSTRAINTS, i2r_NAME_CONSTRAINTS, 0, NULL, }; ASN1_SEQUENCE(GENERAL_SUBTREE) = { ASN1_SIMPLE(GENERAL_SUBTREE, base, GENERAL_NAME), ASN1_IMP_OPT(GENERAL_SUBTREE, minimum, ASN1_INTEGER, 0), ASN1_IMP_OPT(GENERAL_SUBTREE, maximum, ASN1_INTEGER, 1), } ASN1_SEQUENCE_END(GENERAL_SUBTREE) ASN1_SEQUENCE(NAME_CONSTRAINTS) = { ASN1_IMP_SEQUENCE_OF_OPT(NAME_CONSTRAINTS, permittedSubtrees, GENERAL_SUBTREE, 0), ASN1_IMP_SEQUENCE_OF_OPT(NAME_CONSTRAINTS, excludedSubtrees, GENERAL_SUBTREE, 1), } ASN1_SEQUENCE_END(NAME_CONSTRAINTS) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(GENERAL_SUBTREE) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(NAME_CONSTRAINTS) static void *v2i_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { STACK_OF(GENERAL_SUBTREE) **ptree = NULL; NAME_CONSTRAINTS *ncons = NULL; GENERAL_SUBTREE *sub = NULL; ncons = NAME_CONSTRAINTS_new(); if (!ncons) { goto err; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(nval, i); CONF_VALUE tval; if (!strncmp(val->name, "permitted", 9) && val->name[9]) { ptree = &ncons->permittedSubtrees; tval.name = val->name + 10; } else if (!strncmp(val->name, "excluded", 8) && val->name[8]) { ptree = &ncons->excludedSubtrees; tval.name = val->name + 9; } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_SYNTAX); goto err; } tval.value = val->value; sub = GENERAL_SUBTREE_new(); if (!v2i_GENERAL_NAME_ex(sub->base, method, ctx, &tval, 1)) { goto err; } if (!*ptree) { *ptree = sk_GENERAL_SUBTREE_new_null(); } if (!*ptree || !sk_GENERAL_SUBTREE_push(*ptree, sub)) { goto err; } sub = NULL; } return ncons; err: NAME_CONSTRAINTS_free(ncons); GENERAL_SUBTREE_free(sub); return NULL; } static int i2r_NAME_CONSTRAINTS(const X509V3_EXT_METHOD *method, void *a, BIO *bp, int ind) { NAME_CONSTRAINTS *ncons = reinterpret_cast(a); do_i2r_name_constraints(method, ncons->permittedSubtrees, bp, ind, "Permitted"); do_i2r_name_constraints(method, ncons->excludedSubtrees, bp, ind, "Excluded"); return 1; } static int do_i2r_name_constraints(const X509V3_EXT_METHOD *method, STACK_OF(GENERAL_SUBTREE) *trees, BIO *bp, int ind, const char *name) { GENERAL_SUBTREE *tree; size_t i; if (sk_GENERAL_SUBTREE_num(trees) > 0) { BIO_printf(bp, "%*s%s:\n", ind, "", name); } for (i = 0; i < sk_GENERAL_SUBTREE_num(trees); i++) { tree = sk_GENERAL_SUBTREE_value(trees, i); BIO_printf(bp, "%*s", ind + 2, ""); if (tree->base->type == GEN_IPADD) { print_nc_ipadd(bp, tree->base->d.ip); } else { GENERAL_NAME_print(bp, tree->base); } BIO_puts(bp, "\n"); } return 1; } static int print_nc_ipadd(BIO *bp, const ASN1_OCTET_STRING *ip) { int i, len; unsigned char *p; p = ip->data; len = ip->length; BIO_puts(bp, "IP:"); if (len == 8) { BIO_printf(bp, "%d.%d.%d.%d/%d.%d.%d.%d", p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); } else if (len == 32) { for (i = 0; i < 16; i++) { uint16_t v = ((uint16_t)p[0] << 8) | p[1]; BIO_printf(bp, "%X", v); p += 2; if (i == 7) { BIO_puts(bp, "/"); } else if (i != 15) { BIO_puts(bp, ":"); } } } else { BIO_printf(bp, "IP Address:"); } return 1; } //- // Check a certificate conforms to a specified set of constraints. // Return values: // X509_V_OK: All constraints obeyed. // X509_V_ERR_PERMITTED_VIOLATION: Permitted subtree violation. // X509_V_ERR_EXCLUDED_VIOLATION: Excluded subtree violation. // X509_V_ERR_SUBTREE_MINMAX: Min or max values present and matching type. // X509_V_ERR_UNSPECIFIED: Unspecified error. // X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE: Unsupported constraint type. // X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX: Bad or unsupported constraint // syntax. // X509_V_ERR_UNSUPPORTED_NAME_SYNTAX: Bad or unsupported syntax of name. int NAME_CONSTRAINTS_check(X509 *x, NAME_CONSTRAINTS *nc) { int r, i; size_t j; X509_NAME *nm; nm = X509_get_subject_name(x); // Guard against certificates with an excessive number of names or // constraints causing a computationally expensive name constraints // check. size_t name_count = X509_NAME_entry_count(nm) + sk_GENERAL_NAME_num(x->altname); size_t constraint_count = sk_GENERAL_SUBTREE_num(nc->permittedSubtrees) + sk_GENERAL_SUBTREE_num(nc->excludedSubtrees); size_t check_count = constraint_count * name_count; if (name_count < (size_t)X509_NAME_entry_count(nm) || constraint_count < sk_GENERAL_SUBTREE_num(nc->permittedSubtrees) || (constraint_count && check_count / constraint_count != name_count) || check_count > 1 << 20) { return X509_V_ERR_UNSPECIFIED; } if (X509_NAME_entry_count(nm) > 0) { GENERAL_NAME gntmp; gntmp.type = GEN_DIRNAME; gntmp.d.directoryName = nm; r = nc_match(&gntmp, nc); if (r != X509_V_OK) { return r; } gntmp.type = GEN_EMAIL; // Process any email address attributes in subject name for (i = -1;;) { i = X509_NAME_get_index_by_NID(nm, NID_pkcs9_emailAddress, i); if (i == -1) { break; } const X509_NAME_ENTRY *ne = X509_NAME_get_entry(nm, i); gntmp.d.rfc822Name = X509_NAME_ENTRY_get_data(ne); if (gntmp.d.rfc822Name->type != V_ASN1_IA5STRING) { return X509_V_ERR_UNSUPPORTED_NAME_SYNTAX; } r = nc_match(&gntmp, nc); if (r != X509_V_OK) { return r; } } } for (j = 0; j < sk_GENERAL_NAME_num(x->altname); j++) { GENERAL_NAME *gen = sk_GENERAL_NAME_value(x->altname, j); r = nc_match(gen, nc); if (r != X509_V_OK) { return r; } } return X509_V_OK; } static int nc_match(GENERAL_NAME *gen, NAME_CONSTRAINTS *nc) { GENERAL_SUBTREE *sub; int r, match = 0; size_t i; // Permitted subtrees: if any subtrees exist of matching the type at // least one subtree must match. for (i = 0; i < sk_GENERAL_SUBTREE_num(nc->permittedSubtrees); i++) { sub = sk_GENERAL_SUBTREE_value(nc->permittedSubtrees, i); if (gen->type != sub->base->type) { continue; } if (sub->minimum || sub->maximum) { return X509_V_ERR_SUBTREE_MINMAX; } // If we already have a match don't bother trying any more if (match == 2) { continue; } if (match == 0) { match = 1; } r = nc_match_single(gen, sub->base); if (r == X509_V_OK) { match = 2; } else if (r != X509_V_ERR_PERMITTED_VIOLATION) { return r; } } if (match == 1) { return X509_V_ERR_PERMITTED_VIOLATION; } // Excluded subtrees: must not match any of these for (i = 0; i < sk_GENERAL_SUBTREE_num(nc->excludedSubtrees); i++) { sub = sk_GENERAL_SUBTREE_value(nc->excludedSubtrees, i); if (gen->type != sub->base->type) { continue; } if (sub->minimum || sub->maximum) { return X509_V_ERR_SUBTREE_MINMAX; } r = nc_match_single(gen, sub->base); if (r == X509_V_OK) { return X509_V_ERR_EXCLUDED_VIOLATION; } else if (r != X509_V_ERR_PERMITTED_VIOLATION) { return r; } } return X509_V_OK; } static int nc_match_single(GENERAL_NAME *gen, GENERAL_NAME *base) { switch (base->type) { case GEN_DIRNAME: return nc_dn(gen->d.directoryName, base->d.directoryName); case GEN_DNS: return nc_dns(gen->d.dNSName, base->d.dNSName); case GEN_EMAIL: return nc_email(gen->d.rfc822Name, base->d.rfc822Name); case GEN_URI: return nc_uri(gen->d.uniformResourceIdentifier, base->d.uniformResourceIdentifier); default: return X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE; } } // directoryName name constraint matching. The canonical encoding of // X509_NAME makes this comparison easy. It is matched if the subtree is a // subset of the name. static int nc_dn(X509_NAME *nm, X509_NAME *base) { // Ensure canonical encodings are up to date. if (nm->modified && i2d_X509_NAME(nm, NULL) < 0) { return X509_V_ERR_OUT_OF_MEM; } if (base->modified && i2d_X509_NAME(base, NULL) < 0) { return X509_V_ERR_OUT_OF_MEM; } if (base->canon_enclen > nm->canon_enclen) { return X509_V_ERR_PERMITTED_VIOLATION; } if (OPENSSL_memcmp(base->canon_enc, nm->canon_enc, base->canon_enclen)) { return X509_V_ERR_PERMITTED_VIOLATION; } return X509_V_OK; } static int starts_with(const CBS *cbs, uint8_t c) { return CBS_len(cbs) > 0 && CBS_data(cbs)[0] == c; } static int equal_case(const CBS *a, const CBS *b) { if (CBS_len(a) != CBS_len(b)) { return 0; } // Note we cannot use |OPENSSL_strncasecmp| because that would stop // iterating at NUL. const uint8_t *a_data = CBS_data(a), *b_data = CBS_data(b); for (size_t i = 0; i < CBS_len(a); i++) { if (OPENSSL_tolower(a_data[i]) != OPENSSL_tolower(b_data[i])) { return 0; } } return 1; } static int has_suffix_case(const CBS *a, const CBS *b) { if (CBS_len(a) < CBS_len(b)) { return 0; } CBS copy = *a; CBS_skip(©, CBS_len(a) - CBS_len(b)); return equal_case(©, b); } static int nc_dns(const ASN1_IA5STRING *dns, const ASN1_IA5STRING *base) { CBS dns_cbs, base_cbs; CBS_init(&dns_cbs, dns->data, dns->length); CBS_init(&base_cbs, base->data, base->length); // Empty matches everything if (CBS_len(&base_cbs) == 0) { return X509_V_OK; } // If |base_cbs| begins with a '.', do a simple suffix comparison. This is // not part of RFC5280, but is part of OpenSSL's original behavior. if (starts_with(&base_cbs, '.')) { if (has_suffix_case(&dns_cbs, &base_cbs)) { return X509_V_OK; } return X509_V_ERR_PERMITTED_VIOLATION; } // Otherwise can add zero or more components on the left so compare RHS // and if dns is longer and expect '.' as preceding character. if (CBS_len(&dns_cbs) > CBS_len(&base_cbs)) { uint8_t dot; if (!CBS_skip(&dns_cbs, CBS_len(&dns_cbs) - CBS_len(&base_cbs) - 1) || !CBS_get_u8(&dns_cbs, &dot) || dot != '.') { return X509_V_ERR_PERMITTED_VIOLATION; } } if (!equal_case(&dns_cbs, &base_cbs)) { return X509_V_ERR_PERMITTED_VIOLATION; } return X509_V_OK; } static int nc_email(const ASN1_IA5STRING *eml, const ASN1_IA5STRING *base) { CBS eml_cbs, base_cbs; CBS_init(&eml_cbs, eml->data, eml->length); CBS_init(&base_cbs, base->data, base->length); // TODO(davidben): In OpenSSL 1.1.1, this switched from the first '@' to the // last one. Match them here, or perhaps do an actual parse. Looks like // multiple '@'s may be allowed in quoted strings. CBS eml_local, base_local; if (!CBS_get_until_first(&eml_cbs, &eml_local, '@')) { return X509_V_ERR_UNSUPPORTED_NAME_SYNTAX; } int base_has_at = CBS_get_until_first(&base_cbs, &base_local, '@'); // Special case: initial '.' is RHS match if (!base_has_at && starts_with(&base_cbs, '.')) { if (has_suffix_case(&eml_cbs, &base_cbs)) { return X509_V_OK; } return X509_V_ERR_PERMITTED_VIOLATION; } // If we have anything before '@' match local part if (base_has_at) { // TODO(davidben): This interprets a constraint of "@example.com" as // "example.com", which is not part of RFC5280. if (CBS_len(&base_local) > 0) { // Case sensitive match of local part if (!CBS_mem_equal(&base_local, CBS_data(&eml_local), CBS_len(&eml_local))) { return X509_V_ERR_PERMITTED_VIOLATION; } } // Position base after '@' assert(starts_with(&base_cbs, '@')); CBS_skip(&base_cbs, 1); } // Just have hostname left to match: case insensitive assert(starts_with(&eml_cbs, '@')); CBS_skip(&eml_cbs, 1); if (!equal_case(&base_cbs, &eml_cbs)) { return X509_V_ERR_PERMITTED_VIOLATION; } return X509_V_OK; } static int nc_uri(const ASN1_IA5STRING *uri, const ASN1_IA5STRING *base) { CBS uri_cbs, base_cbs; CBS_init(&uri_cbs, uri->data, uri->length); CBS_init(&base_cbs, base->data, base->length); // Check for foo:// and skip past it CBS scheme; uint8_t byte; if (!CBS_get_until_first(&uri_cbs, &scheme, ':') || !CBS_skip(&uri_cbs, 1) || // Skip the colon !CBS_get_u8(&uri_cbs, &byte) || byte != '/' || !CBS_get_u8(&uri_cbs, &byte) || byte != '/') { return X509_V_ERR_UNSUPPORTED_NAME_SYNTAX; } // Look for a port indicator as end of hostname first. Otherwise look for // trailing slash, or the end of the string. // TODO(davidben): This is not a correct URI parser and mishandles IPv6 // literals. CBS host; if (!CBS_get_until_first(&uri_cbs, &host, ':') && !CBS_get_until_first(&uri_cbs, &host, '/')) { host = uri_cbs; } if (CBS_len(&host) == 0) { return X509_V_ERR_UNSUPPORTED_NAME_SYNTAX; } // Special case: initial '.' is RHS match if (starts_with(&base_cbs, '.')) { if (has_suffix_case(&host, &base_cbs)) { return X509_V_OK; } return X509_V_ERR_PERMITTED_VIOLATION; } if (!equal_case(&base_cbs, &host)) { return X509_V_ERR_PERMITTED_VIOLATION; } return X509_V_OK; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_ocsp.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "ext_dat.h" // OCSP extensions and a couple of CRL entry extensions static int i2r_ocsp_acutoff(const X509V3_EXT_METHOD *method, void *nonce, BIO *out, int indent); static int i2r_ocsp_nocheck(const X509V3_EXT_METHOD *method, void *nocheck, BIO *out, int indent); static void *s2i_ocsp_nocheck(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str); const X509V3_EXT_METHOD v3_crl_invdate = { NID_invalidity_date, 0, ASN1_ITEM_ref(ASN1_GENERALIZEDTIME), 0, 0, 0, 0, 0, 0, 0, 0, i2r_ocsp_acutoff, 0, NULL, }; const X509V3_EXT_METHOD v3_ocsp_nocheck = { NID_id_pkix_OCSP_noCheck, 0, ASN1_ITEM_ref(ASN1_NULL), 0, 0, 0, 0, 0, s2i_ocsp_nocheck, 0, 0, i2r_ocsp_nocheck, 0, NULL, }; static int i2r_ocsp_acutoff(const X509V3_EXT_METHOD *method, void *cutoff, BIO *bp, int ind) { if (BIO_printf(bp, "%*s", ind, "") <= 0) { return 0; } if (!ASN1_GENERALIZEDTIME_print( bp, reinterpret_cast(cutoff))) { return 0; } return 1; } // Nocheck is just a single NULL. Don't print anything and always set it static int i2r_ocsp_nocheck(const X509V3_EXT_METHOD *method, void *nocheck, BIO *out, int indent) { return 1; } static void *s2i_ocsp_nocheck(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str) { return ASN1_NULL_new(); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_pcons.cc ================================================ /* * Copyright 2003-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static STACK_OF(CONF_VALUE) *i2v_POLICY_CONSTRAINTS( const X509V3_EXT_METHOD *method, void *bcons, STACK_OF(CONF_VALUE) *extlist); static void *v2i_POLICY_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values); const X509V3_EXT_METHOD v3_policy_constraints = { NID_policy_constraints, 0, ASN1_ITEM_ref(POLICY_CONSTRAINTS), 0, 0, 0, 0, 0, 0, i2v_POLICY_CONSTRAINTS, v2i_POLICY_CONSTRAINTS, NULL, NULL, NULL}; ASN1_SEQUENCE(POLICY_CONSTRAINTS) = { ASN1_IMP_OPT(POLICY_CONSTRAINTS, requireExplicitPolicy, ASN1_INTEGER, 0), ASN1_IMP_OPT(POLICY_CONSTRAINTS, inhibitPolicyMapping, ASN1_INTEGER, 1), } ASN1_SEQUENCE_END(POLICY_CONSTRAINTS) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(POLICY_CONSTRAINTS) static STACK_OF(CONF_VALUE) *i2v_POLICY_CONSTRAINTS( const X509V3_EXT_METHOD *method, void *a, STACK_OF(CONF_VALUE) *extlist) { const POLICY_CONSTRAINTS *pcons = reinterpret_cast(a); X509V3_add_value_int("Require Explicit Policy", pcons->requireExplicitPolicy, &extlist); X509V3_add_value_int("Inhibit Policy Mapping", pcons->inhibitPolicyMapping, &extlist); return extlist; } static void *v2i_POLICY_CONSTRAINTS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values) { POLICY_CONSTRAINTS *pcons = NULL; if (!(pcons = POLICY_CONSTRAINTS_new())) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(values); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(values, i); if (!strcmp(val->name, "requireExplicitPolicy")) { if (!X509V3_get_value_int(val, &pcons->requireExplicitPolicy)) { goto err; } } else if (!strcmp(val->name, "inhibitPolicyMapping")) { if (!X509V3_get_value_int(val, &pcons->inhibitPolicyMapping)) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NAME); X509V3_conf_err(val); goto err; } } if (!pcons->inhibitPolicyMapping && !pcons->requireExplicitPolicy) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_ILLEGAL_EMPTY_EXTENSION); goto err; } return pcons; err: POLICY_CONSTRAINTS_free(pcons); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_pmaps.cc ================================================ /* * Copyright 2003-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" static void *v2i_POLICY_MAPPINGS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval); static STACK_OF(CONF_VALUE) *i2v_POLICY_MAPPINGS( const X509V3_EXT_METHOD *method, void *pmps, STACK_OF(CONF_VALUE) *extlist); const X509V3_EXT_METHOD v3_policy_mappings = { NID_policy_mappings, 0, ASN1_ITEM_ref(POLICY_MAPPINGS), 0, 0, 0, 0, 0, 0, i2v_POLICY_MAPPINGS, v2i_POLICY_MAPPINGS, 0, 0, NULL, }; ASN1_SEQUENCE(POLICY_MAPPING) = { ASN1_SIMPLE(POLICY_MAPPING, issuerDomainPolicy, ASN1_OBJECT), ASN1_SIMPLE(POLICY_MAPPING, subjectDomainPolicy, ASN1_OBJECT), } ASN1_SEQUENCE_END(POLICY_MAPPING) ASN1_ITEM_TEMPLATE(POLICY_MAPPINGS) = ASN1_EX_TEMPLATE_TYPE( ASN1_TFLG_SEQUENCE_OF, 0, POLICY_MAPPINGS, POLICY_MAPPING) ASN1_ITEM_TEMPLATE_END(POLICY_MAPPINGS) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(POLICY_MAPPING) static STACK_OF(CONF_VALUE) *i2v_POLICY_MAPPINGS( const X509V3_EXT_METHOD *method, void *a, STACK_OF(CONF_VALUE) *ext_list) { const POLICY_MAPPINGS *pmaps = reinterpret_cast(a); for (size_t i = 0; i < sk_POLICY_MAPPING_num(pmaps); i++) { const POLICY_MAPPING *pmap = sk_POLICY_MAPPING_value(pmaps, i); char obj_tmp1[80], obj_tmp2[80]; i2t_ASN1_OBJECT(obj_tmp1, 80, pmap->issuerDomainPolicy); i2t_ASN1_OBJECT(obj_tmp2, 80, pmap->subjectDomainPolicy); X509V3_add_value(obj_tmp1, obj_tmp2, &ext_list); } return ext_list; } static void *v2i_POLICY_MAPPINGS(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *nval) { POLICY_MAPPINGS *pmaps = sk_POLICY_MAPPING_new_null(); if (pmaps == NULL) { return NULL; } for (size_t i = 0; i < sk_CONF_VALUE_num(nval); i++) { const CONF_VALUE *val = sk_CONF_VALUE_value(nval, i); if (!val->value || !val->name) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER); X509V3_conf_err(val); goto err; } POLICY_MAPPING *pmap = POLICY_MAPPING_new(); if (pmap == NULL || !sk_POLICY_MAPPING_push(pmaps, pmap)) { POLICY_MAPPING_free(pmap); goto err; } pmap->issuerDomainPolicy = OBJ_txt2obj(val->name, 0); pmap->subjectDomainPolicy = OBJ_txt2obj(val->value, 0); if (!pmap->issuerDomainPolicy || !pmap->subjectDomainPolicy) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_OBJECT_IDENTIFIER); X509V3_conf_err(val); goto err; } } return pmaps; err: sk_POLICY_MAPPING_pop_free(pmaps, POLICY_MAPPING_free); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_prn.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ // X509 v3 extension utilities #include #include #include #include #include // Extension printing routines static int unknown_ext_print(BIO *out, const X509_EXTENSION *ext, unsigned long flag, int indent, int supported); // Print out a name+value stack static void X509V3_EXT_val_prn(BIO *out, const STACK_OF(CONF_VALUE) *val, int indent, int ml) { if (!val) { return; } if (!ml || !sk_CONF_VALUE_num(val)) { BIO_printf(out, "%*s", indent, ""); if (!sk_CONF_VALUE_num(val)) { BIO_puts(out, "\n"); } } for (size_t i = 0; i < sk_CONF_VALUE_num(val); i++) { if (ml) { BIO_printf(out, "%*s", indent, ""); } else if (i > 0) { BIO_printf(out, ", "); } const CONF_VALUE *nval = sk_CONF_VALUE_value(val, i); if (!nval->name) { BIO_puts(out, nval->value); } else if (!nval->value) { BIO_puts(out, nval->name); } else { BIO_printf(out, "%s:%s", nval->name, nval->value); } if (ml) { BIO_puts(out, "\n"); } } } // Main routine: print out a general extension int X509V3_EXT_print(BIO *out, const X509_EXTENSION *ext, unsigned long flag, int indent) { const X509V3_EXT_METHOD *method = X509V3_EXT_get(ext); if (method == NULL) { return unknown_ext_print(out, ext, flag, indent, 0); } const ASN1_STRING *ext_data = X509_EXTENSION_get_data(ext); const unsigned char *p = ASN1_STRING_get0_data(ext_data); void *ext_str = ASN1_item_d2i(NULL, &p, ASN1_STRING_length(ext_data), ASN1_ITEM_ptr(method->it)); if (!ext_str) { return unknown_ext_print(out, ext, flag, indent, 1); } char *value = NULL; STACK_OF(CONF_VALUE) *nval = NULL; int ok = 0; if (method->i2s) { if (!(value = method->i2s(method, ext_str))) { goto err; } BIO_printf(out, "%*s%s", indent, "", value); } else if (method->i2v) { if (!(nval = method->i2v(method, ext_str, NULL))) { goto err; } X509V3_EXT_val_prn(out, nval, indent, method->ext_flags & X509V3_EXT_MULTILINE); } else if (method->i2r) { if (!method->i2r(method, ext_str, out, indent)) { goto err; } } else { OPENSSL_PUT_ERROR(X509V3, X509V3_R_OPERATION_NOT_DEFINED); goto err; } ok = 1; err: sk_CONF_VALUE_pop_free(nval, X509V3_conf_free); OPENSSL_free(value); ASN1_item_free(reinterpret_cast(ext_str), ASN1_ITEM_ptr(method->it)); return ok; } int X509V3_extensions_print(BIO *bp, const char *title, const STACK_OF(X509_EXTENSION) *exts, unsigned long flag, int indent) { size_t i; int j; if (sk_X509_EXTENSION_num(exts) <= 0) { return 1; } if (title) { BIO_printf(bp, "%*s%s:\n", indent, "", title); indent += 4; } for (i = 0; i < sk_X509_EXTENSION_num(exts); i++) { const X509_EXTENSION *ex = sk_X509_EXTENSION_value(exts, i); if (indent && BIO_printf(bp, "%*s", indent, "") <= 0) { return 0; } const ASN1_OBJECT *obj = X509_EXTENSION_get_object(ex); i2a_ASN1_OBJECT(bp, obj); j = X509_EXTENSION_get_critical(ex); if (BIO_printf(bp, ": %s\n", j ? "critical" : "") <= 0) { return 0; } if (!X509V3_EXT_print(bp, ex, flag, indent + 4)) { BIO_printf(bp, "%*s", indent + 4, ""); ASN1_STRING_print(bp, X509_EXTENSION_get_data(ex)); } if (BIO_write(bp, "\n", 1) <= 0) { return 0; } } return 1; } static int unknown_ext_print(BIO *out, const X509_EXTENSION *ext, unsigned long flag, int indent, int supported) { switch (flag & X509V3_EXT_UNKNOWN_MASK) { case X509V3_EXT_DEFAULT: return 0; case X509V3_EXT_ERROR_UNKNOWN: if (supported) { BIO_printf(out, "%*s", indent, ""); } else { BIO_printf(out, "%*s", indent, ""); } return 1; case X509V3_EXT_PARSE_UNKNOWN: case X509V3_EXT_DUMP_UNKNOWN: { const ASN1_STRING *data = X509_EXTENSION_get_data(ext); return BIO_hexdump(out, ASN1_STRING_get0_data(data), ASN1_STRING_length(data), indent); } default: return 1; } } int X509V3_EXT_print_fp(FILE *fp, const X509_EXTENSION *ext, int flag, int indent) { BIO *bio_tmp; int ret; if (!(bio_tmp = BIO_new_fp(fp, BIO_NOCLOSE))) { return 0; } ret = X509V3_EXT_print(bio_tmp, ext, flag, indent); BIO_free(bio_tmp); return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_purp.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" struct x509_purpose_st { int purpose; int trust; // Default trust ID int (*check_purpose)(const struct x509_purpose_st *, const X509 *, int); const char *sname; } /* X509_PURPOSE */; #define V1_ROOT (EXFLAG_V1 | EXFLAG_SS) #define ku_reject(x, usage) \ (((x)->ex_flags & EXFLAG_KUSAGE) && !((x)->ex_kusage & (usage))) #define xku_reject(x, usage) \ (((x)->ex_flags & EXFLAG_XKUSAGE) && !((x)->ex_xkusage & (usage))) static int check_ca(const X509 *x); static int check_purpose_ssl_client(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_ssl_server(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_ns_ssl_server(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_smime_sign(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_smime_encrypt(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_crl_sign(const X509_PURPOSE *xp, const X509 *x, int ca); static int check_purpose_timestamp_sign(const X509_PURPOSE *xp, const X509 *x, int ca); static int no_check(const X509_PURPOSE *xp, const X509 *x, int ca); // X509_TRUST_NONE is not a valid |X509_TRUST_*| constant. It is used by // |X509_PURPOSE_ANY| to indicate that it has no corresponding trust type and // cannot be used with |X509_STORE_CTX_set_purpose|. #define X509_TRUST_NONE (-1) static const X509_PURPOSE xstandard[] = { {X509_PURPOSE_SSL_CLIENT, X509_TRUST_SSL_CLIENT, check_purpose_ssl_client, "sslclient"}, {X509_PURPOSE_SSL_SERVER, X509_TRUST_SSL_SERVER, check_purpose_ssl_server, "sslserver"}, {X509_PURPOSE_NS_SSL_SERVER, X509_TRUST_SSL_SERVER, check_purpose_ns_ssl_server, "nssslserver"}, {X509_PURPOSE_SMIME_SIGN, X509_TRUST_EMAIL, check_purpose_smime_sign, "smimesign"}, {X509_PURPOSE_SMIME_ENCRYPT, X509_TRUST_EMAIL, check_purpose_smime_encrypt, "smimeencrypt"}, {X509_PURPOSE_CRL_SIGN, X509_TRUST_COMPAT, check_purpose_crl_sign, "crlsign"}, {X509_PURPOSE_ANY, X509_TRUST_NONE, no_check, "any"}, // |X509_PURPOSE_OCSP_HELPER| performs no actual checks. OpenSSL's OCSP // implementation relied on the caller performing EKU and KU checks. {X509_PURPOSE_OCSP_HELPER, X509_TRUST_COMPAT, no_check, "ocsphelper"}, {X509_PURPOSE_TIMESTAMP_SIGN, X509_TRUST_TSA, check_purpose_timestamp_sign, "timestampsign"}, }; int X509_check_purpose(X509 *x, int id, int ca) { // This differs from OpenSSL, which uses -1 to indicate a fatal error and 0 to // indicate an invalid certificate. BoringSSL uses 0 for both. if (!x509v3_cache_extensions(x)) { return 0; } if (id == -1) { return 1; } const X509_PURPOSE *pt = X509_PURPOSE_get0(id); if (pt == NULL) { return 0; } // Historically, |check_purpose| implementations other than |X509_PURPOSE_ANY| // called |check_ca|. This is redundant with the |X509_V_ERR_INVALID_CA| // logic, but |X509_check_purpose| is public API, so we preserve this // behavior. if (ca && id != X509_PURPOSE_ANY && !check_ca(x)) { return 0; } return pt->check_purpose(pt, x, ca); } const X509_PURPOSE *X509_PURPOSE_get0(int id) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(xstandard); i++) { if (xstandard[i].purpose == id) { return &xstandard[i]; } } return NULL; } int X509_PURPOSE_get_by_sname(const char *sname) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(xstandard); i++) { if (strcmp(xstandard[i].sname, sname) == 0) { return xstandard[i].purpose; } } return -1; } int X509_PURPOSE_get_id(const X509_PURPOSE *xp) { return xp->purpose; } int X509_PURPOSE_get_trust(const X509_PURPOSE *xp) { return xp->trust; } int X509_supported_extension(const X509_EXTENSION *ex) { int nid = OBJ_obj2nid(X509_EXTENSION_get_object(ex)); return nid == NID_key_usage || // nid == NID_subject_alt_name || // nid == NID_basic_constraints || // nid == NID_certificate_policies || // nid == NID_ext_key_usage || // nid == NID_policy_constraints || // nid == NID_name_constraints || // nid == NID_policy_mappings || // nid == NID_inhibit_any_policy; } static int setup_dp(X509 *x, DIST_POINT *dp) { if (!dp->distpoint || (dp->distpoint->type != 1)) { return 1; } X509_NAME *iname = NULL; for (size_t i = 0; i < sk_GENERAL_NAME_num(dp->CRLissuer); i++) { GENERAL_NAME *gen = sk_GENERAL_NAME_value(dp->CRLissuer, i); if (gen->type == GEN_DIRNAME) { iname = gen->d.directoryName; break; } } if (!iname) { iname = X509_get_issuer_name(x); } return DIST_POINT_set_dpname(dp->distpoint, iname); } static int setup_crldp(X509 *x) { int j; x->crldp = reinterpret_cast( X509_get_ext_d2i(x, NID_crl_distribution_points, &j, NULL)); if (x->crldp == NULL && j != -1) { return 0; } for (size_t i = 0; i < sk_DIST_POINT_num(x->crldp); i++) { if (!setup_dp(x, sk_DIST_POINT_value(x->crldp, i))) { return 0; } } return 1; } int x509v3_cache_extensions(X509 *x) { BASIC_CONSTRAINTS *bs; ASN1_BIT_STRING *usage; EXTENDED_KEY_USAGE *extusage; size_t i; int j; CRYPTO_MUTEX_lock_read(&x->lock); const int is_set = x->ex_flags & EXFLAG_SET; CRYPTO_MUTEX_unlock_read(&x->lock); if (is_set) { return (x->ex_flags & EXFLAG_INVALID) == 0; } CRYPTO_MUTEX_lock_write(&x->lock); if (x->ex_flags & EXFLAG_SET) { CRYPTO_MUTEX_unlock_write(&x->lock); return (x->ex_flags & EXFLAG_INVALID) == 0; } if (!X509_digest(x, EVP_sha256(), x->cert_hash, NULL)) { x->ex_flags |= EXFLAG_INVALID; } // V1 should mean no extensions ... if (X509_get_version(x) == X509_VERSION_1) { x->ex_flags |= EXFLAG_V1; } // Handle basic constraints if ((bs = reinterpret_cast( X509_get_ext_d2i(x, NID_basic_constraints, &j, NULL)))) { if (bs->ca) { x->ex_flags |= EXFLAG_CA; } if (bs->pathlen) { if ((bs->pathlen->type == V_ASN1_NEG_INTEGER) || !bs->ca) { x->ex_flags |= EXFLAG_INVALID; x->ex_pathlen = 0; } else { // TODO(davidben): |ASN1_INTEGER_get| returns -1 on overflow, // which currently acts as if the constraint isn't present. This // works (an overflowing path length constraint may as well be // infinity), but Chromium's verifier simply treats values above // 255 as an error. x->ex_pathlen = ASN1_INTEGER_get(bs->pathlen); } } else { x->ex_pathlen = -1; } BASIC_CONSTRAINTS_free(bs); x->ex_flags |= EXFLAG_BCONS; } else if (j != -1) { x->ex_flags |= EXFLAG_INVALID; } // Handle key usage if ((usage = reinterpret_cast( X509_get_ext_d2i(x, NID_key_usage, &j, NULL)))) { if (usage->length > 0) { x->ex_kusage = usage->data[0]; if (usage->length > 1) { x->ex_kusage |= usage->data[1] << 8; } } else { x->ex_kusage = 0; } x->ex_flags |= EXFLAG_KUSAGE; ASN1_BIT_STRING_free(usage); } else if (j != -1) { x->ex_flags |= EXFLAG_INVALID; } x->ex_xkusage = 0; if ((extusage = reinterpret_cast( X509_get_ext_d2i(x, NID_ext_key_usage, &j, NULL)))) { x->ex_flags |= EXFLAG_XKUSAGE; for (i = 0; i < sk_ASN1_OBJECT_num(extusage); i++) { switch (OBJ_obj2nid(sk_ASN1_OBJECT_value(extusage, i))) { case NID_server_auth: x->ex_xkusage |= XKU_SSL_SERVER; break; case NID_client_auth: x->ex_xkusage |= XKU_SSL_CLIENT; break; case NID_email_protect: x->ex_xkusage |= XKU_SMIME; break; case NID_code_sign: x->ex_xkusage |= XKU_CODE_SIGN; break; case NID_ms_sgc: case NID_ns_sgc: x->ex_xkusage |= XKU_SGC; break; case NID_OCSP_sign: x->ex_xkusage |= XKU_OCSP_SIGN; break; case NID_time_stamp: x->ex_xkusage |= XKU_TIMESTAMP; break; case NID_dvcs: x->ex_xkusage |= XKU_DVCS; break; case NID_anyExtendedKeyUsage: x->ex_xkusage |= XKU_ANYEKU; break; } } sk_ASN1_OBJECT_pop_free(extusage, ASN1_OBJECT_free); } else if (j != -1) { x->ex_flags |= EXFLAG_INVALID; } x->skid = reinterpret_cast( X509_get_ext_d2i(x, NID_subject_key_identifier, &j, NULL)); if (x->skid == NULL && j != -1) { x->ex_flags |= EXFLAG_INVALID; } x->akid = reinterpret_cast( X509_get_ext_d2i(x, NID_authority_key_identifier, &j, NULL)); if (x->akid == NULL && j != -1) { x->ex_flags |= EXFLAG_INVALID; } // Does subject name match issuer ? if (!X509_NAME_cmp(X509_get_subject_name(x), X509_get_issuer_name(x))) { x->ex_flags |= EXFLAG_SI; // If SKID matches AKID also indicate self signed if (X509_check_akid(x, x->akid) == X509_V_OK && !ku_reject(x, X509v3_KU_KEY_CERT_SIGN)) { x->ex_flags |= EXFLAG_SS; } } x->altname = reinterpret_cast( X509_get_ext_d2i(x, NID_subject_alt_name, &j, NULL)); if (x->altname == NULL && j != -1) { x->ex_flags |= EXFLAG_INVALID; } x->nc = reinterpret_cast( X509_get_ext_d2i(x, NID_name_constraints, &j, NULL)); if (x->nc == NULL && j != -1) { x->ex_flags |= EXFLAG_INVALID; } if (!setup_crldp(x)) { x->ex_flags |= EXFLAG_INVALID; } for (j = 0; j < X509_get_ext_count(x); j++) { const X509_EXTENSION *ex = X509_get_ext(x, j); if (!X509_EXTENSION_get_critical(ex)) { continue; } if (!X509_supported_extension(ex)) { x->ex_flags |= EXFLAG_CRITICAL; break; } } x->ex_flags |= EXFLAG_SET; CRYPTO_MUTEX_unlock_write(&x->lock); return (x->ex_flags & EXFLAG_INVALID) == 0; } // check_ca returns one if |x| should be considered a CA certificate and zero // otherwise. static int check_ca(const X509 *x) { // keyUsage if present should allow cert signing if (ku_reject(x, X509v3_KU_KEY_CERT_SIGN)) { return 0; } // Version 1 certificates are considered CAs and don't have extensions. if ((x->ex_flags & V1_ROOT) == V1_ROOT) { return 1; } // Otherwise, it's only a CA if basicConstraints says so. return ((x->ex_flags & EXFLAG_BCONS) && (x->ex_flags & EXFLAG_CA)); } int X509_check_ca(X509 *x) { if (!x509v3_cache_extensions(x)) { return 0; } return check_ca(x); } // check_purpose returns one if |x| is a valid part of a certificate path for // extended key usage |required_xku| and at least one of key usages in // |required_kus|. |ca| indicates whether |x| is a CA or end-entity certificate. static int check_purpose(const X509 *x, int ca, int required_xku, int required_kus) { // Check extended key usage on the entire chain. if (required_xku != 0 && xku_reject(x, required_xku)) { return 0; } // Check key usages only on the end-entity certificate. return ca || !ku_reject(x, required_kus); } static int check_purpose_ssl_client(const X509_PURPOSE *xp, const X509 *x, int ca) { // We need to do digital signatures or key agreement. // // TODO(davidben): We do not implement any TLS client certificate modes based // on key agreement. return check_purpose(x, ca, XKU_SSL_CLIENT, X509v3_KU_DIGITAL_SIGNATURE | X509v3_KU_KEY_AGREEMENT); } // Key usage needed for TLS/SSL server: digital signature, encipherment or // key agreement. The ssl code can check this more thoroughly for individual // key types. #define X509v3_KU_TLS \ (X509v3_KU_DIGITAL_SIGNATURE | X509v3_KU_KEY_ENCIPHERMENT | \ X509v3_KU_KEY_AGREEMENT) static int check_purpose_ssl_server(const X509_PURPOSE *xp, const X509 *x, int ca) { return check_purpose(x, ca, XKU_SSL_SERVER, X509v3_KU_TLS); } static int check_purpose_ns_ssl_server(const X509_PURPOSE *xp, const X509 *x, int ca) { // We need to encipher or Netscape complains. return check_purpose(x, ca, XKU_SSL_SERVER, X509v3_KU_KEY_ENCIPHERMENT); } static int check_purpose_smime_sign(const X509_PURPOSE *xp, const X509 *x, int ca) { return check_purpose(x, ca, XKU_SMIME, X509v3_KU_DIGITAL_SIGNATURE | X509v3_KU_NON_REPUDIATION); } static int check_purpose_smime_encrypt(const X509_PURPOSE *xp, const X509 *x, int ca) { return check_purpose(x, ca, XKU_SMIME, X509v3_KU_KEY_ENCIPHERMENT); } static int check_purpose_crl_sign(const X509_PURPOSE *xp, const X509 *x, int ca) { return check_purpose(x, ca, /*required_xku=*/0, X509v3_KU_CRL_SIGN); } static int check_purpose_timestamp_sign(const X509_PURPOSE *xp, const X509 *x, int ca) { if (ca) { return 1; } // Check the optional key usage field: // if Key Usage is present, it must be one of digitalSignature // and/or nonRepudiation (other values are not consistent and shall // be rejected). if ((x->ex_flags & EXFLAG_KUSAGE) && ((x->ex_kusage & ~(X509v3_KU_NON_REPUDIATION | X509v3_KU_DIGITAL_SIGNATURE)) || !(x->ex_kusage & (X509v3_KU_NON_REPUDIATION | X509v3_KU_DIGITAL_SIGNATURE)))) { return 0; } // Only time stamp key usage is permitted and it's required. // // TODO(davidben): Should we check EKUs up the chain like the other cases? if (!(x->ex_flags & EXFLAG_XKUSAGE) || x->ex_xkusage != XKU_TIMESTAMP) { return 0; } // Extended Key Usage MUST be critical int i_ext = X509_get_ext_by_NID(x, NID_ext_key_usage, -1); if (i_ext >= 0) { const X509_EXTENSION *ext = X509_get_ext(x, i_ext); if (!X509_EXTENSION_get_critical(ext)) { return 0; } } return 1; } static int no_check(const X509_PURPOSE *xp, const X509 *x, int ca) { return 1; } int X509_check_issued(X509 *issuer, X509 *subject) { if (X509_NAME_cmp(X509_get_subject_name(issuer), X509_get_issuer_name(subject))) { return X509_V_ERR_SUBJECT_ISSUER_MISMATCH; } if (!x509v3_cache_extensions(issuer) || !x509v3_cache_extensions(subject)) { return X509_V_ERR_UNSPECIFIED; } if (subject->akid) { int ret = X509_check_akid(issuer, subject->akid); if (ret != X509_V_OK) { return ret; } } if (ku_reject(issuer, X509v3_KU_KEY_CERT_SIGN)) { return X509_V_ERR_KEYUSAGE_NO_CERTSIGN; } return X509_V_OK; } int X509_check_akid(X509 *issuer, const AUTHORITY_KEYID *akid) { if (!akid) { return X509_V_OK; } // Check key ids (if present) if (akid->keyid && issuer->skid && ASN1_OCTET_STRING_cmp(akid->keyid, issuer->skid)) { return X509_V_ERR_AKID_SKID_MISMATCH; } // Check serial number if (akid->serial && ASN1_INTEGER_cmp(X509_get_serialNumber(issuer), akid->serial)) { return X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH; } // Check issuer name if (akid->issuer) { // Ugh, for some peculiar reason AKID includes SEQUENCE OF // GeneralName. So look for a DirName. There may be more than one but // we only take any notice of the first. GENERAL_NAMES *gens; GENERAL_NAME *gen; X509_NAME *nm = NULL; size_t i; gens = akid->issuer; for (i = 0; i < sk_GENERAL_NAME_num(gens); i++) { gen = sk_GENERAL_NAME_value(gens, i); if (gen->type == GEN_DIRNAME) { nm = gen->d.dirn; break; } } if (nm && X509_NAME_cmp(nm, X509_get_issuer_name(issuer))) { return X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH; } } return X509_V_OK; } uint32_t X509_get_extension_flags(X509 *x) { // Ignore the return value. On failure, |x->ex_flags| will include // |EXFLAG_INVALID|. x509v3_cache_extensions(x); return x->ex_flags; } uint32_t X509_get_key_usage(X509 *x) { if (!x509v3_cache_extensions(x)) { return 0; } if (x->ex_flags & EXFLAG_KUSAGE) { return x->ex_kusage; } // If there is no extension, key usage is unconstrained, so set all bits to // one. Note that, although we use |UINT32_MAX|, |ex_kusage| only contains the // first 16 bits when the extension is present. return UINT32_MAX; } uint32_t X509_get_extended_key_usage(X509 *x) { if (!x509v3_cache_extensions(x)) { return 0; } if (x->ex_flags & EXFLAG_XKUSAGE) { return x->ex_xkusage; } // If there is no extension, extended key usage is unconstrained, so set all // bits to one. return UINT32_MAX; } const ASN1_OCTET_STRING *X509_get0_subject_key_id(X509 *x509) { if (!x509v3_cache_extensions(x509)) { return NULL; } return x509->skid; } const ASN1_OCTET_STRING *X509_get0_authority_key_id(X509 *x509) { if (!x509v3_cache_extensions(x509)) { return NULL; } return x509->akid != NULL ? x509->akid->keyid : NULL; } const GENERAL_NAMES *X509_get0_authority_issuer(X509 *x509) { if (!x509v3_cache_extensions(x509)) { return NULL; } return x509->akid != NULL ? x509->akid->issuer : NULL; } const ASN1_INTEGER *X509_get0_authority_serial(X509 *x509) { if (!x509v3_cache_extensions(x509)) { return NULL; } return x509->akid != NULL ? x509->akid->serial : NULL; } long X509_get_pathlen(X509 *x509) { if (!x509v3_cache_extensions(x509) || (x509->ex_flags & EXFLAG_BCONS) == 0) { return -1; } return x509->ex_pathlen; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_skey.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "ext_dat.h" #include "internal.h" char *i2s_ASN1_OCTET_STRING(const X509V3_EXT_METHOD *method, const ASN1_OCTET_STRING *oct) { return x509v3_bytes_to_hex(oct->data, oct->length); } ASN1_OCTET_STRING *s2i_ASN1_OCTET_STRING(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str) { size_t len; uint8_t *data = x509v3_hex_to_bytes(str, &len); ASN1_OCTET_STRING *oct; if (data == NULL) { return NULL; } if (len > INT_MAX) { OPENSSL_PUT_ERROR(X509V3, ERR_R_OVERFLOW); goto err; } oct = ASN1_OCTET_STRING_new(); if (oct == NULL) { goto err; } ASN1_STRING_set0(oct, data, (int)len); return oct; err: OPENSSL_free(data); return NULL; } static char *i2s_ASN1_OCTET_STRING_cb(const X509V3_EXT_METHOD *method, void *ext) { return i2s_ASN1_OCTET_STRING(method, reinterpret_cast(ext)); } static void *s2i_skey_id(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str) { ASN1_OCTET_STRING *oct; ASN1_BIT_STRING *pk; unsigned char pkey_dig[EVP_MAX_MD_SIZE]; unsigned int diglen; if (strcmp(str, "hash")) { return s2i_ASN1_OCTET_STRING(method, ctx, str); } if (!(oct = ASN1_OCTET_STRING_new())) { return NULL; } if (ctx && (ctx->flags == X509V3_CTX_TEST)) { return oct; } if (!ctx || (!ctx->subject_req && !ctx->subject_cert)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_PUBLIC_KEY); goto err; } if (ctx->subject_req) { pk = ctx->subject_req->req_info->pubkey->public_key; } else { pk = ctx->subject_cert->cert_info->key->public_key; } if (!pk) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_NO_PUBLIC_KEY); goto err; } if (!EVP_Digest(pk->data, pk->length, pkey_dig, &diglen, EVP_sha1(), NULL)) { goto err; } if (!ASN1_OCTET_STRING_set(oct, pkey_dig, diglen)) { goto err; } return oct; err: ASN1_OCTET_STRING_free(oct); return NULL; } const X509V3_EXT_METHOD v3_skey_id = { NID_subject_key_identifier, 0, ASN1_ITEM_ref(ASN1_OCTET_STRING), 0, 0, 0, 0, i2s_ASN1_OCTET_STRING_cb, s2i_skey_id, 0, 0, 0, 0, NULL, }; ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/v3_utl.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* X509 v3 extension utilities */ #include #include #include #include #include #include #include #include #include #include #include "../conf/internal.h" #include "../internal.h" #include "internal.h" static char *strip_spaces(char *name); static int sk_strcmp(const char *const *a, const char *const *b); static STACK_OF(OPENSSL_STRING) *get_email(const X509_NAME *name, const GENERAL_NAMES *gens); static void str_free(OPENSSL_STRING str); static int append_ia5(STACK_OF(OPENSSL_STRING) **sk, const ASN1_IA5STRING *email); static int ipv4_from_asc(uint8_t v4[4], const char *in); static int ipv6_from_asc(uint8_t v6[16], const char *in); static int ipv6_cb(const char *elem, size_t len, void *usr); static int ipv6_hex(uint8_t *out, const char *in, size_t inlen); // Add a CONF_VALUE name value pair to stack static int x509V3_add_len_value(const char *name, const char *value, size_t value_len, int omit_value, STACK_OF(CONF_VALUE) **extlist) { CONF_VALUE *vtmp = NULL; char *tname = NULL, *tvalue = NULL; int extlist_was_null = *extlist == NULL; if (name && !(tname = OPENSSL_strdup(name))) { goto err; } if (!omit_value) { // |CONF_VALUE| cannot represent strings with NULs. if (OPENSSL_memchr(value, 0, value_len)) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_VALUE); goto err; } tvalue = OPENSSL_strndup(value, value_len); if (tvalue == NULL) { goto err; } } if (!(vtmp = CONF_VALUE_new())) { goto err; } if (!*extlist && !(*extlist = sk_CONF_VALUE_new_null())) { goto err; } vtmp->section = NULL; vtmp->name = tname; vtmp->value = tvalue; if (!sk_CONF_VALUE_push(*extlist, vtmp)) { goto err; } return 1; err: if (extlist_was_null) { sk_CONF_VALUE_free(*extlist); *extlist = NULL; } OPENSSL_free(vtmp); OPENSSL_free(tname); OPENSSL_free(tvalue); return 0; } int X509V3_add_value(const char *name, const char *value, STACK_OF(CONF_VALUE) **extlist) { return x509V3_add_len_value(name, value, value != NULL ? strlen(value) : 0, /*omit_value=*/value == NULL, extlist); } int x509V3_add_value_asn1_string(const char *name, const ASN1_STRING *value, STACK_OF(CONF_VALUE) **extlist) { return x509V3_add_len_value(name, (const char *)value->data, value->length, /*omit_value=*/0, extlist); } // Free function for STACK_OF(CONF_VALUE) void X509V3_conf_free(CONF_VALUE *conf) { if (!conf) { return; } OPENSSL_free(conf->name); OPENSSL_free(conf->value); OPENSSL_free(conf->section); OPENSSL_free(conf); } int X509V3_add_value_bool(const char *name, int asn1_bool, STACK_OF(CONF_VALUE) **extlist) { if (asn1_bool) { return X509V3_add_value(name, "TRUE", extlist); } return X509V3_add_value(name, "FALSE", extlist); } static char *bignum_to_string(const BIGNUM *bn) { char *tmp, *ret; // Display large numbers in hex and small numbers in decimal. Converting to // decimal takes quadratic time and is no more useful than hex for large // numbers. if (BN_num_bits(bn) < 32) { return BN_bn2dec(bn); } tmp = BN_bn2hex(bn); if (tmp == NULL) { return NULL; } // Prepend "0x", but place it after the "-" if negative. if (OPENSSL_asprintf(&ret, "%s0x%s", (tmp[0] == '-') ? "-" : "", (tmp[0] == '-') ? tmp + 1 : tmp) == -1) { ret = nullptr; } OPENSSL_free(tmp); return ret; } char *i2s_ASN1_ENUMERATED(const X509V3_EXT_METHOD *method, const ASN1_ENUMERATED *a) { BIGNUM *bntmp = NULL; char *strtmp = NULL; if (!a) { return NULL; } if (!(bntmp = ASN1_ENUMERATED_to_BN(a, NULL)) || !(strtmp = bignum_to_string(bntmp))) { } BN_free(bntmp); return strtmp; } char *i2s_ASN1_INTEGER(const X509V3_EXT_METHOD *method, const ASN1_INTEGER *a) { BIGNUM *bntmp = NULL; char *strtmp = NULL; if (!a) { return NULL; } if (!(bntmp = ASN1_INTEGER_to_BN(a, NULL)) || !(strtmp = bignum_to_string(bntmp))) { } BN_free(bntmp); return strtmp; } ASN1_INTEGER *s2i_ASN1_INTEGER(const X509V3_EXT_METHOD *method, const char *value) { BIGNUM *bn = NULL; ASN1_INTEGER *aint; int isneg, ishex; int ret; if (!value) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_VALUE); return 0; } bn = BN_new(); if (value[0] == '-') { value++; isneg = 1; } else { isneg = 0; } if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X'))) { value += 2; ishex = 1; } else { ishex = 0; } if (ishex) { ret = BN_hex2bn(&bn, value); } else { // Decoding from decimal scales quadratically in the input length. Bound the // largest decimal input we accept in the config parser. 8,192 decimal // digits allows values up to 27,213 bits. Ths exceeds the largest RSA, DSA, // or DH modulus we support, and those are not usefully represented in // decimal. if (strlen(value) > 8192) { BN_free(bn); OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NUMBER); return 0; } ret = BN_dec2bn(&bn, value); } if (!ret || value[ret]) { BN_free(bn); OPENSSL_PUT_ERROR(X509V3, X509V3_R_BN_DEC2BN_ERROR); return 0; } if (isneg && BN_is_zero(bn)) { isneg = 0; } aint = BN_to_ASN1_INTEGER(bn, NULL); BN_free(bn); if (!aint) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_BN_TO_ASN1_INTEGER_ERROR); return 0; } if (isneg) { aint->type |= V_ASN1_NEG; } return aint; } int X509V3_add_value_int(const char *name, const ASN1_INTEGER *aint, STACK_OF(CONF_VALUE) **extlist) { char *strtmp; int ret; if (!aint) { return 1; } if (!(strtmp = i2s_ASN1_INTEGER(NULL, aint))) { return 0; } ret = X509V3_add_value(name, strtmp, extlist); OPENSSL_free(strtmp); return ret; } int X509V3_bool_from_string(const char *str, ASN1_BOOLEAN *out_bool) { if (!strcmp(str, "TRUE") || !strcmp(str, "true") || !strcmp(str, "Y") || !strcmp(str, "y") || !strcmp(str, "YES") || !strcmp(str, "yes")) { *out_bool = ASN1_BOOLEAN_TRUE; return 1; } if (!strcmp(str, "FALSE") || !strcmp(str, "false") || !strcmp(str, "N") || !strcmp(str, "n") || !strcmp(str, "NO") || !strcmp(str, "no")) { *out_bool = ASN1_BOOLEAN_FALSE; return 1; } OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_BOOLEAN_STRING); return 0; } int X509V3_get_value_bool(const CONF_VALUE *value, ASN1_BOOLEAN *out_bool) { const char *btmp = value->value; if (btmp == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_BOOLEAN_STRING); goto err; } if (!X509V3_bool_from_string(btmp, out_bool)) { goto err; } return 1; err: X509V3_conf_err(value); return 0; } int X509V3_get_value_int(const CONF_VALUE *value, ASN1_INTEGER **aint) { ASN1_INTEGER *itmp; if (!(itmp = s2i_ASN1_INTEGER(NULL, value->value))) { X509V3_conf_err(value); return 0; } ASN1_INTEGER_free(*aint); *aint = itmp; return 1; } #define HDR_NAME 1 #define HDR_VALUE 2 // #define DEBUG STACK_OF(CONF_VALUE) *X509V3_parse_list(const char *line) { char *p, *q, c; char *ntmp, *vtmp; STACK_OF(CONF_VALUE) *values = NULL; char *linebuf; int state; // We are going to modify the line so copy it first linebuf = OPENSSL_strdup(line); if (linebuf == NULL) { goto err; } state = HDR_NAME; ntmp = NULL; // Go through all characters for (p = linebuf, q = linebuf; (c = *p) && (c != '\r') && (c != '\n'); p++) { switch (state) { case HDR_NAME: if (c == ':') { state = HDR_VALUE; *p = 0; ntmp = strip_spaces(q); if (!ntmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_NAME); goto err; } q = p + 1; } else if (c == ',') { *p = 0; ntmp = strip_spaces(q); q = p + 1; #if 0 printf("%s\n", ntmp); #endif if (!ntmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_NAME); goto err; } X509V3_add_value(ntmp, NULL, &values); } break; case HDR_VALUE: if (c == ',') { state = HDR_NAME; *p = 0; vtmp = strip_spaces(q); #if 0 printf("%s\n", ntmp); #endif if (!vtmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_VALUE); goto err; } X509V3_add_value(ntmp, vtmp, &values); ntmp = NULL; q = p + 1; } } } if (state == HDR_VALUE) { vtmp = strip_spaces(q); #if 0 printf("%s=%s\n", ntmp, vtmp); #endif if (!vtmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_VALUE); goto err; } X509V3_add_value(ntmp, vtmp, &values); } else { ntmp = strip_spaces(q); #if 0 printf("%s\n", ntmp); #endif if (!ntmp) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_NAME); goto err; } X509V3_add_value(ntmp, NULL, &values); } OPENSSL_free(linebuf); return values; err: OPENSSL_free(linebuf); sk_CONF_VALUE_pop_free(values, X509V3_conf_free); return NULL; } // Delete leading and trailing spaces from a string static char *strip_spaces(char *name) { char *p, *q; // Skip over leading spaces p = name; while (*p && OPENSSL_isspace((unsigned char)*p)) { p++; } if (!*p) { return NULL; } q = p + strlen(p) - 1; while ((q != p) && OPENSSL_isspace((unsigned char)*q)) { q--; } if (p != q) { q[1] = 0; } if (!*p) { return NULL; } return p; } // hex string utilities char *x509v3_bytes_to_hex(const uint8_t *in, size_t len) { CBB cbb; if (!CBB_init(&cbb, len * 3 + 1)) { goto err; } for (size_t i = 0; i < len; i++) { static const char hex[] = "0123456789ABCDEF"; if ((i > 0 && !CBB_add_u8(&cbb, ':')) || !CBB_add_u8(&cbb, hex[in[i] >> 4]) || !CBB_add_u8(&cbb, hex[in[i] & 0xf])) { goto err; } } uint8_t *ret; size_t unused_len; if (!CBB_add_u8(&cbb, 0) || !CBB_finish(&cbb, &ret, &unused_len)) { goto err; } return (char *)ret; err: CBB_cleanup(&cbb); return NULL; } unsigned char *x509v3_hex_to_bytes(const char *str, size_t *len) { unsigned char *hexbuf, *q; unsigned char ch, cl, *p; uint8_t high, low; if (!str) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_NULL_ARGUMENT); return NULL; } if (!(hexbuf = reinterpret_cast(OPENSSL_malloc(strlen(str) >> 1)))) { goto err; } for (p = (unsigned char *)str, q = hexbuf; *p;) { ch = *p++; if (ch == ':') { continue; } cl = *p++; if (!cl) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_ODD_NUMBER_OF_DIGITS); OPENSSL_free(hexbuf); return NULL; } if (!OPENSSL_fromxdigit(&high, ch)) { goto badhex; } if (!OPENSSL_fromxdigit(&low, cl)) { goto badhex; } *q++ = (high << 4) | low; } if (len) { *len = q - hexbuf; } return hexbuf; err: OPENSSL_free(hexbuf); return NULL; badhex: OPENSSL_free(hexbuf); OPENSSL_PUT_ERROR(X509V3, X509V3_R_ILLEGAL_HEX_DIGIT); return NULL; } int x509v3_conf_name_matches(const char *name, const char *cmp) { // |name| must begin with |cmp|. size_t len = strlen(cmp); if (strncmp(name, cmp, len) != 0) { return 0; } // |name| must either be equal to |cmp| or begin with |cmp|, followed by '.'. return name[len] == '\0' || name[len] == '.'; } static int sk_strcmp(const char *const *a, const char *const *b) { return strcmp(*a, *b); } STACK_OF(OPENSSL_STRING) *X509_get1_email(const X509 *x) { GENERAL_NAMES *gens; STACK_OF(OPENSSL_STRING) *ret; gens = reinterpret_cast( X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL)); ret = get_email(X509_get_subject_name(x), gens); sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); return ret; } STACK_OF(OPENSSL_STRING) *X509_get1_ocsp(const X509 *x) { AUTHORITY_INFO_ACCESS *info; STACK_OF(OPENSSL_STRING) *ret = NULL; size_t i; info = reinterpret_cast( X509_get_ext_d2i(x, NID_info_access, NULL, NULL)); if (!info) { return NULL; } for (i = 0; i < sk_ACCESS_DESCRIPTION_num(info); i++) { ACCESS_DESCRIPTION *ad = sk_ACCESS_DESCRIPTION_value(info, i); if (OBJ_obj2nid(ad->method) == NID_ad_OCSP) { if (ad->location->type == GEN_URI) { if (!append_ia5(&ret, ad->location->d.uniformResourceIdentifier)) { break; } } } } AUTHORITY_INFO_ACCESS_free(info); return ret; } STACK_OF(OPENSSL_STRING) *X509_REQ_get1_email(const X509_REQ *x) { GENERAL_NAMES *gens; STACK_OF(X509_EXTENSION) *exts; STACK_OF(OPENSSL_STRING) *ret; exts = X509_REQ_get_extensions(x); gens = reinterpret_cast( X509V3_get_d2i(exts, NID_subject_alt_name, NULL, NULL)); ret = get_email(X509_REQ_get_subject_name(x), gens); sk_GENERAL_NAME_pop_free(gens, GENERAL_NAME_free); sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free); return ret; } static STACK_OF(OPENSSL_STRING) *get_email(const X509_NAME *name, const GENERAL_NAMES *gens) { STACK_OF(OPENSSL_STRING) *ret = NULL; // Now add any email address(es) to STACK int i = -1; // First supplied X509_NAME while ((i = X509_NAME_get_index_by_NID(name, NID_pkcs9_emailAddress, i)) >= 0) { const X509_NAME_ENTRY *ne = X509_NAME_get_entry(name, i); const ASN1_IA5STRING *email = X509_NAME_ENTRY_get_data(ne); if (!append_ia5(&ret, email)) { return NULL; } } for (size_t j = 0; j < sk_GENERAL_NAME_num(gens); j++) { const GENERAL_NAME *gen = sk_GENERAL_NAME_value(gens, j); if (gen->type != GEN_EMAIL) { continue; } if (!append_ia5(&ret, gen->d.ia5)) { return NULL; } } return ret; } static void str_free(OPENSSL_STRING str) { OPENSSL_free(str); } static int append_ia5(STACK_OF(OPENSSL_STRING) **sk, const ASN1_IA5STRING *email) { // First some sanity checks if (email->type != V_ASN1_IA5STRING) { return 1; } if (email->data == NULL || email->length == 0) { return 1; } // |OPENSSL_STRING| cannot represent strings with embedded NULs. Do not // report them as outputs. if (OPENSSL_memchr(email->data, 0, email->length) != NULL) { return 1; } char *emtmp = NULL; if (!*sk) { *sk = sk_OPENSSL_STRING_new(sk_strcmp); } if (!*sk) { goto err; } emtmp = OPENSSL_strndup((char *)email->data, email->length); if (emtmp == NULL) { goto err; } // Don't add duplicates sk_OPENSSL_STRING_sort(*sk); if (sk_OPENSSL_STRING_find(*sk, NULL, emtmp)) { OPENSSL_free(emtmp); return 1; } if (!sk_OPENSSL_STRING_push(*sk, emtmp)) { goto err; } return 1; err: // TODO(davidben): Fix the error-handling in this file. It currently relies // on |append_ia5| leaving |*sk| at NULL on error. OPENSSL_free(emtmp); X509_email_free(*sk); *sk = NULL; return 0; } void X509_email_free(STACK_OF(OPENSSL_STRING) *sk) { sk_OPENSSL_STRING_pop_free(sk, str_free); } typedef int (*equal_fn)(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len, unsigned int flags); // Compare while ASCII ignoring case. static int equal_nocase(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len, unsigned int flags) { if (pattern_len != subject_len) { return 0; } while (pattern_len) { unsigned char l = *pattern; unsigned char r = *subject; // The pattern must not contain NUL characters. if (l == 0) { return 0; } if (l != r) { if (OPENSSL_tolower(l) != OPENSSL_tolower(r)) { return 0; } } ++pattern; ++subject; --pattern_len; } return 1; } // Compare using OPENSSL_memcmp. static int equal_case(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len, unsigned int flags) { if (pattern_len != subject_len) { return 0; } return !OPENSSL_memcmp(pattern, subject, pattern_len); } // RFC 5280, section 7.5, requires that only the domain is compared in a // case-insensitive manner. static int equal_email(const unsigned char *a, size_t a_len, const unsigned char *b, size_t b_len, unsigned int unused_flags) { size_t i = a_len; if (a_len != b_len) { return 0; } // We search backwards for the '@' character, so that we do not have to // deal with quoted local-parts. The domain part is compared in a // case-insensitive manner. while (i > 0) { --i; if (a[i] == '@' || b[i] == '@') { if (!equal_nocase(a + i, a_len - i, b + i, a_len - i, 0)) { return 0; } break; } } if (i == 0) { i = a_len; } return equal_case(a, i, b, i, 0); } // Compare the prefix and suffix with the subject, and check that the // characters in-between are valid. static int wildcard_match(const unsigned char *prefix, size_t prefix_len, const unsigned char *suffix, size_t suffix_len, const unsigned char *subject, size_t subject_len, unsigned int flags) { const unsigned char *wildcard_start; const unsigned char *wildcard_end; const unsigned char *p; int allow_idna = 0; if (subject_len < prefix_len + suffix_len) { return 0; } if (!equal_nocase(prefix, prefix_len, subject, prefix_len, flags)) { return 0; } wildcard_start = subject + prefix_len; wildcard_end = subject + (subject_len - suffix_len); if (!equal_nocase(wildcard_end, suffix_len, suffix, suffix_len, flags)) { return 0; } // If the wildcard makes up the entire first label, it must match at // least one character. if (prefix_len == 0 && *suffix == '.') { if (wildcard_start == wildcard_end) { return 0; } allow_idna = 1; } // IDNA labels cannot match partial wildcards if (!allow_idna && subject_len >= 4 && OPENSSL_strncasecmp((char *)subject, "xn--", 4) == 0) { return 0; } // The wildcard may match a literal '*' if (wildcard_end == wildcard_start + 1 && *wildcard_start == '*') { return 1; } // Check that the part matched by the wildcard contains only // permitted characters and only matches a single label. for (p = wildcard_start; p != wildcard_end; ++p) { if (!OPENSSL_isalnum(*p) && *p != '-') { return 0; } } return 1; } #define LABEL_START (1 << 0) #define LABEL_END (1 << 1) #define LABEL_HYPHEN (1 << 2) #define LABEL_IDNA (1 << 3) static const unsigned char *valid_star(const unsigned char *p, size_t len, unsigned int flags) { const unsigned char *star = 0; size_t i; int state = LABEL_START; int dots = 0; for (i = 0; i < len; ++i) { // Locate first and only legal wildcard, either at the start // or end of a non-IDNA first and not final label. if (p[i] == '*') { int atstart = (state & LABEL_START); int atend = (i == len - 1 || p[i + 1] == '.'); // At most one wildcard per pattern. // No wildcards in IDNA labels. // No wildcards after the first label. if (star != NULL || (state & LABEL_IDNA) != 0 || dots) { return NULL; } // Only full-label '*.example.com' wildcards. if (!atstart || !atend) { return NULL; } star = &p[i]; state &= ~LABEL_START; } else if (OPENSSL_isalnum(p[i])) { if ((state & LABEL_START) != 0 && len - i >= 4 && OPENSSL_strncasecmp((char *)&p[i], "xn--", 4) == 0) { state |= LABEL_IDNA; } state &= ~(LABEL_HYPHEN | LABEL_START); } else if (p[i] == '.') { if ((state & (LABEL_HYPHEN | LABEL_START)) != 0) { return NULL; } state = LABEL_START; ++dots; } else if (p[i] == '-') { // no domain/subdomain starts with '-' if ((state & LABEL_START) != 0) { return NULL; } state |= LABEL_HYPHEN; } else { return NULL; } } // The final label must not end in a hyphen or ".", and // there must be at least two dots after the star. if ((state & (LABEL_START | LABEL_HYPHEN)) != 0 || dots < 2) { return NULL; } return star; } // Compare using wildcards. static int equal_wildcard(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len, unsigned int flags) { const unsigned char *star = NULL; // Subject names starting with '.' can only match a wildcard pattern // via a subject sub-domain pattern suffix match. if (!(subject_len > 1 && subject[0] == '.')) { star = valid_star(pattern, pattern_len, flags); } if (star == NULL) { return equal_nocase(pattern, pattern_len, subject, subject_len, flags); } return wildcard_match(pattern, star - pattern, star + 1, (pattern + pattern_len) - star - 1, subject, subject_len, flags); } int x509v3_looks_like_dns_name(const unsigned char *in, size_t len) { // This function is used as a heuristic for whether a common name is a // hostname to be matched, or merely a decorative name to describe the // subject. This heuristic must be applied to both name constraints and the // common name fallback, so it must be loose enough to accept hostname // common names, and tight enough to reject decorative common names. if (len > 0 && in[len - 1] == '.') { len--; } // Wildcards are allowed in front. if (len >= 2 && in[0] == '*' && in[1] == '.') { in += 2; len -= 2; } if (len == 0) { return 0; } size_t label_start = 0; for (size_t i = 0; i < len; i++) { unsigned char c = in[i]; if (OPENSSL_isalnum(c) || (c == '-' && i > label_start) || // These are not valid characters in hostnames, but commonly found // in deployments outside the Web PKI. c == '_' || c == ':') { continue; } // Labels must not be empty. if (c == '.' && i > label_start && i < len - 1) { label_start = i + 1; continue; } return 0; } return 1; } // Compare an ASN1_STRING to a supplied string. If they match return 1. If // cmp_type > 0 only compare if string matches the type, otherwise convert it // to UTF8. static int do_check_string(const ASN1_STRING *a, int cmp_type, equal_fn equal, unsigned int flags, int check_type, const char *b, size_t blen, char **peername) { int rv = 0; if (!a->data || !a->length) { return 0; } if (cmp_type > 0) { if (cmp_type != a->type) { return 0; } if (cmp_type == V_ASN1_IA5STRING) { rv = equal(a->data, a->length, (unsigned char *)b, blen, flags); } else if (a->length == (int)blen && !OPENSSL_memcmp(a->data, b, blen)) { rv = 1; } if (rv > 0 && peername) { *peername = OPENSSL_strndup((char *)a->data, a->length); if (*peername == NULL) { return -1; } } } else { int astrlen; unsigned char *astr; astrlen = ASN1_STRING_to_UTF8(&astr, a); if (astrlen < 0) { return -1; } // We check the common name against DNS name constraints if it passes // |x509v3_looks_like_dns_name|. Thus we must not consider common names // for DNS fallbacks if they fail this check. if (check_type == GEN_DNS && !x509v3_looks_like_dns_name(astr, astrlen)) { rv = 0; } else { rv = equal(astr, astrlen, (unsigned char *)b, blen, flags); } if (rv > 0 && peername) { *peername = OPENSSL_strndup((char *)astr, astrlen); if (*peername == NULL) { return -1; } } OPENSSL_free(astr); } return rv; } static int do_x509_check(const X509 *x, const char *chk, size_t chklen, unsigned int flags, int check_type, char **peername) { int cnid = NID_undef; int alt_type; int rv = 0; equal_fn equal; if (check_type == GEN_EMAIL) { cnid = NID_pkcs9_emailAddress; alt_type = V_ASN1_IA5STRING; equal = equal_email; } else if (check_type == GEN_DNS) { cnid = NID_commonName; alt_type = V_ASN1_IA5STRING; if (flags & X509_CHECK_FLAG_NO_WILDCARDS) { equal = equal_nocase; } else { equal = equal_wildcard; } } else { alt_type = V_ASN1_OCTET_STRING; equal = equal_case; } GENERAL_NAMES *gens = reinterpret_cast( X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL)); if (gens) { for (size_t i = 0; i < sk_GENERAL_NAME_num(gens); i++) { const GENERAL_NAME *gen = sk_GENERAL_NAME_value(gens, i); if (gen->type != check_type) { continue; } const ASN1_STRING *cstr; if (check_type == GEN_EMAIL) { cstr = gen->d.rfc822Name; } else if (check_type == GEN_DNS) { cstr = gen->d.dNSName; } else { cstr = gen->d.iPAddress; } // Positive on success, negative on error! if ((rv = do_check_string(cstr, alt_type, equal, flags, check_type, chk, chklen, peername)) != 0) { break; } } GENERAL_NAMES_free(gens); return rv; } // We're done if CN-ID is not pertinent if (cnid == NID_undef || (flags & X509_CHECK_FLAG_NEVER_CHECK_SUBJECT)) { return 0; } int j = -1; const X509_NAME *name = X509_get_subject_name(x); while ((j = X509_NAME_get_index_by_NID(name, cnid, j)) >= 0) { const X509_NAME_ENTRY *ne = X509_NAME_get_entry(name, j); const ASN1_STRING *str = X509_NAME_ENTRY_get_data(ne); // Positive on success, negative on error! if ((rv = do_check_string(str, -1, equal, flags, check_type, chk, chklen, peername)) != 0) { return rv; } } return 0; } int X509_check_host(const X509 *x, const char *chk, size_t chklen, unsigned int flags, char **peername) { if (chk == NULL) { return -2; } if (OPENSSL_memchr(chk, '\0', chklen)) { return -2; } return do_x509_check(x, chk, chklen, flags, GEN_DNS, peername); } int X509_check_email(const X509 *x, const char *chk, size_t chklen, unsigned int flags) { if (chk == NULL) { return -2; } if (OPENSSL_memchr(chk, '\0', chklen)) { return -2; } return do_x509_check(x, chk, chklen, flags, GEN_EMAIL, NULL); } int X509_check_ip(const X509 *x, const unsigned char *chk, size_t chklen, unsigned int flags) { if (chk == NULL) { return -2; } return do_x509_check(x, (const char *)chk, chklen, flags, GEN_IPADD, NULL); } int X509_check_ip_asc(const X509 *x, const char *ipasc, unsigned int flags) { unsigned char ipout[16]; size_t iplen; if (ipasc == NULL) { return -2; } iplen = (size_t)x509v3_a2i_ipadd(ipout, ipasc); if (iplen == 0) { return -2; } return do_x509_check(x, (const char *)ipout, iplen, flags, GEN_IPADD, NULL); } // Convert IP addresses both IPv4 and IPv6 into an OCTET STRING compatible // with RFC 3280. ASN1_OCTET_STRING *a2i_IPADDRESS(const char *ipasc) { unsigned char ipout[16]; ASN1_OCTET_STRING *ret; int iplen; iplen = x509v3_a2i_ipadd(ipout, ipasc); if (!iplen) { return NULL; } ret = ASN1_OCTET_STRING_new(); if (!ret) { return NULL; } if (!ASN1_OCTET_STRING_set(ret, ipout, iplen)) { ASN1_OCTET_STRING_free(ret); return NULL; } return ret; } ASN1_OCTET_STRING *a2i_IPADDRESS_NC(const char *ipasc) { ASN1_OCTET_STRING *ret = NULL; unsigned char ipout[32]; char *iptmp = NULL, *p; int iplen1, iplen2; // FIXME: yes, this function takes a const pointer and writes to it! p = const_cast(strchr(ipasc, '/')); if (!p) { return NULL; } iptmp = OPENSSL_strdup(ipasc); if (!iptmp) { return NULL; } p = iptmp + (p - ipasc); *p++ = 0; iplen1 = x509v3_a2i_ipadd(ipout, iptmp); if (!iplen1) { goto err; } iplen2 = x509v3_a2i_ipadd(ipout + iplen1, p); OPENSSL_free(iptmp); iptmp = NULL; if (!iplen2 || (iplen1 != iplen2)) { goto err; } ret = ASN1_OCTET_STRING_new(); if (!ret) { goto err; } if (!ASN1_OCTET_STRING_set(ret, ipout, iplen1 + iplen2)) { goto err; } return ret; err: OPENSSL_free(iptmp); ASN1_OCTET_STRING_free(ret); return NULL; } int x509v3_a2i_ipadd(uint8_t ipout[16], const char *ipasc) { // If string contains a ':' assume IPv6 if (strchr(ipasc, ':')) { if (!ipv6_from_asc(ipout, ipasc)) { return 0; } return 16; } else { if (!ipv4_from_asc(ipout, ipasc)) { return 0; } return 4; } } // get_ipv4_component consumes one IPv4 component, terminated by either '.' or // the end of the string, from |*str|. On success, it returns one, sets |*out| // to the component, and advances |*str| to the first unconsumed character. On // invalid input, it returns zero. static int get_ipv4_component(uint8_t *out_byte, const char **str) { // Store a slightly larger intermediary so the overflow check is easier. uint32_t out = 0; for (;;) { if (!OPENSSL_isdigit(**str)) { return 0; } out = (out * 10) + (**str - '0'); if (out > 255) { // Components must be 8-bit. return 0; } (*str)++; if ((**str) == '.' || (**str) == '\0') { *out_byte = (uint8_t)out; return 1; } if (out == 0) { // Reject extra leading zeros. Parsers sometimes treat them as octal, so // accepting them would misinterpret input. return 0; } } } // get_ipv4_dot consumes a '.' from |*str| and advances it. It returns one on // success and zero if |*str| does not point to a '.'. static int get_ipv4_dot(const char **str) { if (**str != '.') { return 0; } (*str)++; return 1; } static int ipv4_from_asc(uint8_t v4[4], const char *in) { if (!get_ipv4_component(&v4[0], &in) || !get_ipv4_dot(&in) || !get_ipv4_component(&v4[1], &in) || !get_ipv4_dot(&in) || !get_ipv4_component(&v4[2], &in) || !get_ipv4_dot(&in) || !get_ipv4_component(&v4[3], &in) || *in != '\0') { return 0; } return 1; } typedef struct { // Temporary store for IPV6 output uint8_t tmp[16]; // Total number of bytes in tmp int total; // The position of a zero (corresponding to '::') int zero_pos; // Number of zeroes int zero_cnt; } IPV6_STAT; static int ipv6_from_asc(uint8_t v6[16], const char *in) { IPV6_STAT v6stat; v6stat.total = 0; v6stat.zero_pos = -1; v6stat.zero_cnt = 0; // Treat the IPv6 representation as a list of values separated by ':'. // The presence of a '::' will parse as one, two or three zero length // elements. if (!CONF_parse_list(in, ':', 0, ipv6_cb, &v6stat)) { return 0; } if (v6stat.zero_pos == -1) { // If no '::' must have exactly 16 bytes if (v6stat.total != 16) { return 0; } } else { // If '::' must have less than 16 bytes if (v6stat.total >= 16) { return 0; } if (v6stat.zero_cnt > 3) { // More than three zeroes is an error return 0; } else if (v6stat.zero_cnt == 3) { // Can only have three zeroes if nothing else present if (v6stat.total > 0) { return 0; } } else if (v6stat.zero_cnt == 2) { // Can only have two zeroes if at start or end if (v6stat.zero_pos != 0 && v6stat.zero_pos != v6stat.total) { return 0; } } else { // Can only have one zero if *not* start or end if (v6stat.zero_pos == 0 || v6stat.zero_pos == v6stat.total) { return 0; } } } // Format the result. if (v6stat.zero_pos >= 0) { // Copy initial part OPENSSL_memcpy(v6, v6stat.tmp, v6stat.zero_pos); // Zero middle OPENSSL_memset(v6 + v6stat.zero_pos, 0, 16 - v6stat.total); // Copy final part if (v6stat.total != v6stat.zero_pos) { OPENSSL_memcpy(v6 + v6stat.zero_pos + 16 - v6stat.total, v6stat.tmp + v6stat.zero_pos, v6stat.total - v6stat.zero_pos); } } else { OPENSSL_memcpy(v6, v6stat.tmp, 16); } return 1; } static int ipv6_cb(const char *elem, size_t len, void *usr) { IPV6_STAT *s = reinterpret_cast(usr); // Error if 16 bytes written if (s->total == 16) { return 0; } if (len == 0) { // Zero length element, corresponds to '::' if (s->zero_pos == -1) { s->zero_pos = s->total; } else if (s->zero_pos != s->total) { // If we've already got a :: its an error return 0; } if (s->zero_cnt >= 3) { // More than three zeros is an error. return 0; } s->zero_cnt++; } else { // If more than 4 characters could be final a.b.c.d form if (len > 4) { // Need at least 4 bytes left if (s->total > 12) { return 0; } // Must be end of string if (elem[len]) { return 0; } if (!ipv4_from_asc(s->tmp + s->total, elem)) { return 0; } s->total += 4; } else { if (!ipv6_hex(s->tmp + s->total, elem, len)) { return 0; } s->total += 2; } } return 1; } // Convert a string of up to 4 hex digits into the corresponding IPv6 form. static int ipv6_hex(uint8_t *out, const char *in, size_t inlen) { if (inlen > 4) { return 0; } uint16_t num = 0; while (inlen--) { uint8_t val; if (!OPENSSL_fromxdigit(&val, *in++)) { return 0; } num = (num << 4) | val; } out[0] = num >> 8; out[1] = num & 0xff; return 1; } int X509V3_NAME_from_section(X509_NAME *nm, const STACK_OF(CONF_VALUE) *dn_sk, int chtype) { if (!nm) { return 0; } for (size_t i = 0; i < sk_CONF_VALUE_num(dn_sk); i++) { const CONF_VALUE *v = sk_CONF_VALUE_value(dn_sk, i); const char *type = v->name; // Skip past any leading X. X: X, etc to allow for multiple instances for (const char *p = type; *p; p++) { if ((*p == ':') || (*p == ',') || (*p == '.')) { p++; if (*p) { type = p; } break; } } int mval; if (*type == '+') { mval = -1; type++; } else { mval = 0; } if (!X509_NAME_add_entry_by_txt(nm, type, chtype, (unsigned char *)v->value, -1, -1, mval)) { return 0; } } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include // |X509_R_UNSUPPORTED_ALGORITHM| is no longer emitted, but continue to define // it to avoid downstream churn. OPENSSL_DECLARE_ERROR_REASON(X509, UNSUPPORTED_ALGORITHM) int X509_signature_dump(BIO *bp, const ASN1_STRING *sig, int indent) { const uint8_t *s; int i, n; n = sig->length; s = sig->data; for (i = 0; i < n; i++) { if ((i % 18) == 0) { if (BIO_write(bp, "\n", 1) <= 0 || BIO_indent(bp, indent, indent) <= 0) { return 0; } } if (BIO_printf(bp, "%02x%s", s[i], ((i + 1) == n) ? "" : ":") <= 0) { return 0; } } if (BIO_write(bp, "\n", 1) != 1) { return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_att.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../asn1/internal.h" #include "internal.h" X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_NID(X509_ATTRIBUTE **attr, int nid, int attrtype, const void *data, int len) { const ASN1_OBJECT *obj; obj = OBJ_nid2obj(nid); if (obj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_NID); return NULL; } return X509_ATTRIBUTE_create_by_OBJ(attr, obj, attrtype, data, len); } X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_OBJ(X509_ATTRIBUTE **attr, const ASN1_OBJECT *obj, int attrtype, const void *data, int len) { X509_ATTRIBUTE *ret; if ((attr == NULL) || (*attr == NULL)) { if ((ret = X509_ATTRIBUTE_new()) == NULL) { return NULL; } } else { ret = *attr; } if (!X509_ATTRIBUTE_set1_object(ret, obj)) { goto err; } if (!X509_ATTRIBUTE_set1_data(ret, attrtype, data, len)) { goto err; } if ((attr != NULL) && (*attr == NULL)) { *attr = ret; } return ret; err: if ((attr == NULL) || (ret != *attr)) { X509_ATTRIBUTE_free(ret); } return NULL; } X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_txt(X509_ATTRIBUTE **attr, const char *attrname, int type, const unsigned char *bytes, int len) { ASN1_OBJECT *obj; X509_ATTRIBUTE *nattr; obj = OBJ_txt2obj(attrname, 0); if (obj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_FIELD_NAME); ERR_add_error_data(2, "name=", attrname); return NULL; } nattr = X509_ATTRIBUTE_create_by_OBJ(attr, obj, type, bytes, len); ASN1_OBJECT_free(obj); return nattr; } int X509_ATTRIBUTE_set1_object(X509_ATTRIBUTE *attr, const ASN1_OBJECT *obj) { if ((attr == NULL) || (obj == NULL)) { return 0; } ASN1_OBJECT_free(attr->object); attr->object = OBJ_dup(obj); return attr->object != NULL; } int X509_ATTRIBUTE_set1_data(X509_ATTRIBUTE *attr, int attrtype, const void *data, int len) { if (!attr) { return 0; } if (attrtype == 0) { // Do nothing. This is used to create an empty value set in // |X509_ATTRIBUTE_create_by_*|. This is invalid, but supported by OpenSSL. return 1; } ASN1_TYPE *typ = ASN1_TYPE_new(); if (typ == NULL) { return 0; } // This function is several functions in one. if (attrtype & MBSTRING_FLAG) { // |data| is an encoded string. We must decode and re-encode it to |attr|'s // preferred ASN.1 type. Note |len| may be -1, in which case // |ASN1_STRING_set_by_NID| calls |strlen| automatically. ASN1_STRING *str = ASN1_STRING_set_by_NID(NULL, reinterpret_cast(data), len, attrtype, OBJ_obj2nid(attr->object)); if (str == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_ASN1_LIB); goto err; } asn1_type_set0_string(typ, str); } else if (len != -1) { // |attrtype| must be a valid |ASN1_STRING| type. |data| and |len| is a // value in the corresponding |ASN1_STRING| representation. ASN1_STRING *str = ASN1_STRING_type_new(attrtype); if (str == NULL || !ASN1_STRING_set(str, data, len)) { ASN1_STRING_free(str); goto err; } asn1_type_set0_string(typ, str); } else { // |attrtype| must be a valid |ASN1_TYPE| type. |data| is a pointer to an // object of the corresponding type. if (!ASN1_TYPE_set1(typ, attrtype, data)) { goto err; } } if (!sk_ASN1_TYPE_push(attr->set, typ)) { goto err; } return 1; err: ASN1_TYPE_free(typ); return 0; } int X509_ATTRIBUTE_count(const X509_ATTRIBUTE *attr) { return (int)sk_ASN1_TYPE_num(attr->set); } ASN1_OBJECT *X509_ATTRIBUTE_get0_object(X509_ATTRIBUTE *attr) { if (attr == NULL) { return NULL; } return attr->object; } void *X509_ATTRIBUTE_get0_data(X509_ATTRIBUTE *attr, int idx, int attrtype, void *unused) { ASN1_TYPE *ttmp; ttmp = X509_ATTRIBUTE_get0_type(attr, idx); if (!ttmp) { return NULL; } if (attrtype != ASN1_TYPE_get(ttmp)) { OPENSSL_PUT_ERROR(X509, X509_R_WRONG_TYPE); return NULL; } return (void *)asn1_type_value_as_pointer(ttmp); } ASN1_TYPE *X509_ATTRIBUTE_get0_type(X509_ATTRIBUTE *attr, int idx) { if (attr == NULL) { return NULL; } if (idx >= X509_ATTRIBUTE_count(attr)) { return NULL; } return sk_ASN1_TYPE_value(attr->set, idx); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_cmp.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" int X509_issuer_name_cmp(const X509 *a, const X509 *b) { return (X509_NAME_cmp(a->cert_info->issuer, b->cert_info->issuer)); } int X509_subject_name_cmp(const X509 *a, const X509 *b) { return (X509_NAME_cmp(a->cert_info->subject, b->cert_info->subject)); } int X509_CRL_cmp(const X509_CRL *a, const X509_CRL *b) { return (X509_NAME_cmp(a->crl->issuer, b->crl->issuer)); } int X509_CRL_match(const X509_CRL *a, const X509_CRL *b) { return OPENSSL_memcmp(a->crl_hash, b->crl_hash, SHA256_DIGEST_LENGTH); } X509_NAME *X509_get_issuer_name(const X509 *a) { return a->cert_info->issuer; } uint32_t X509_issuer_name_hash(X509 *x) { return X509_NAME_hash(x->cert_info->issuer); } uint32_t X509_issuer_name_hash_old(X509 *x) { return (X509_NAME_hash_old(x->cert_info->issuer)); } X509_NAME *X509_get_subject_name(const X509 *a) { return a->cert_info->subject; } ASN1_INTEGER *X509_get_serialNumber(X509 *a) { return a->cert_info->serialNumber; } const ASN1_INTEGER *X509_get0_serialNumber(const X509 *x509) { return x509->cert_info->serialNumber; } uint32_t X509_subject_name_hash(X509 *x) { return X509_NAME_hash(x->cert_info->subject); } uint32_t X509_subject_name_hash_old(X509 *x) { return X509_NAME_hash_old(x->cert_info->subject); } // Compare two certificates: they must be identical for this to work. NB: // Although "cmp" operations are generally prototyped to take "const" // arguments (eg. for use in STACKs), the way X509 handling is - these // operations may involve ensuring the hashes are up-to-date and ensuring // certain cert information is cached. So this is the point where the // "depth-first" constification tree has to halt with an evil cast. int X509_cmp(const X509 *a, const X509 *b) { // Fill in the |cert_hash| fields. // // TODO(davidben): This may fail, in which case the the hash will be all // zeros. This produces a consistent comparison (failures are sticky), but // not a good one. OpenSSL now returns -2, but this is not a consistent // comparison and may cause misbehaving sorts by transitivity. For now, we // retain the old OpenSSL behavior, which was to ignore the error. See // https://crbug.com/boringssl/355. x509v3_cache_extensions((X509 *)a); x509v3_cache_extensions((X509 *)b); return OPENSSL_memcmp(a->cert_hash, b->cert_hash, SHA256_DIGEST_LENGTH); } int X509_NAME_cmp(const X509_NAME *a, const X509_NAME *b) { int ret; // Ensure canonical encoding is present and up to date if (!a->canon_enc || a->modified) { ret = i2d_X509_NAME((X509_NAME *)a, NULL); if (ret < 0) { return -2; } } if (!b->canon_enc || b->modified) { ret = i2d_X509_NAME((X509_NAME *)b, NULL); if (ret < 0) { return -2; } } ret = a->canon_enclen - b->canon_enclen; if (ret) { return ret; } return OPENSSL_memcmp(a->canon_enc, b->canon_enc, a->canon_enclen); } uint32_t X509_NAME_hash(X509_NAME *x) { // Make sure the X509_NAME structure contains a valid cached encoding. if (i2d_X509_NAME(x, NULL) < 0) { return 0; } uint8_t md[SHA_DIGEST_LENGTH]; SHA1(x->canon_enc, x->canon_enclen, md); return CRYPTO_load_u32_le(md); } // I now DER encode the name and hash it. Since I cache the DER encoding, // this is reasonably efficient. uint32_t X509_NAME_hash_old(X509_NAME *x) { // Make sure the X509_NAME structure contains a valid cached encoding. if (i2d_X509_NAME(x, NULL) < 0) { return 0; } uint8_t md[SHA_DIGEST_LENGTH]; MD5((const uint8_t *)x->bytes->data, x->bytes->length, md); return CRYPTO_load_u32_le(md); } X509 *X509_find_by_issuer_and_serial(const STACK_OF(X509) *sk, X509_NAME *name, const ASN1_INTEGER *serial) { if (serial->type != V_ASN1_INTEGER && serial->type != V_ASN1_NEG_INTEGER) { return NULL; } for (size_t i = 0; i < sk_X509_num(sk); i++) { X509 *x509 = sk_X509_value(sk, i); if (ASN1_INTEGER_cmp(X509_get0_serialNumber(x509), serial) == 0 && X509_NAME_cmp(X509_get_issuer_name(x509), name) == 0) { return x509; } } return NULL; } X509 *X509_find_by_subject(const STACK_OF(X509) *sk, X509_NAME *name) { for (size_t i = 0; i < sk_X509_num(sk); i++) { X509 *x509 = sk_X509_value(sk, i); if (X509_NAME_cmp(X509_get_subject_name(x509), name) == 0) { return x509; } } return NULL; } EVP_PKEY *X509_get0_pubkey(const X509 *x) { if (x == NULL) { return NULL; } return X509_PUBKEY_get0(x->cert_info->key); } EVP_PKEY *X509_get_pubkey(const X509 *x) { if (x == NULL) { return NULL; } return X509_PUBKEY_get(x->cert_info->key); } ASN1_BIT_STRING *X509_get0_pubkey_bitstr(const X509 *x) { if (!x) { return NULL; } return x->cert_info->key->public_key; } int X509_check_private_key(const X509 *x, const EVP_PKEY *k) { const EVP_PKEY *xk = X509_get0_pubkey(x); if (xk == NULL) { return 0; } int ret = EVP_PKEY_cmp(xk, k); if (ret > 0) { return 1; } switch (ret) { case 0: OPENSSL_PUT_ERROR(X509, X509_R_KEY_VALUES_MISMATCH); return 0; case -1: OPENSSL_PUT_ERROR(X509, X509_R_KEY_TYPE_MISMATCH); return 0; case -2: OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_KEY_TYPE); return 0; } return 0; } // Not strictly speaking an "up_ref" as a STACK doesn't have a reference // count but it has the same effect by duping the STACK and upping the ref of // each X509 structure. STACK_OF(X509) *X509_chain_up_ref(STACK_OF(X509) *chain) { STACK_OF(X509) *ret = sk_X509_dup(chain); if (ret == NULL) { return NULL; } for (size_t i = 0; i < sk_X509_num(ret); i++) { X509_up_ref(sk_X509_value(ret, i)); } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_d2.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include int X509_STORE_set_default_paths(X509_STORE *ctx) { X509_LOOKUP *lookup; lookup = X509_STORE_add_lookup(ctx, X509_LOOKUP_file()); if (lookup == NULL) { return 0; } X509_LOOKUP_load_file(lookup, NULL, X509_FILETYPE_DEFAULT); lookup = X509_STORE_add_lookup(ctx, X509_LOOKUP_hash_dir()); if (lookup == NULL) { return 0; } X509_LOOKUP_add_dir(lookup, NULL, X509_FILETYPE_DEFAULT); // clear any errors ERR_clear_error(); return 1; } int X509_STORE_load_locations(X509_STORE *ctx, const char *file, const char *path) { X509_LOOKUP *lookup; if (file != NULL) { lookup = X509_STORE_add_lookup(ctx, X509_LOOKUP_file()); if (lookup == NULL) { return 0; } if (X509_LOOKUP_load_file(lookup, file, X509_FILETYPE_PEM) != 1) { return 0; } } if (path != NULL) { lookup = X509_STORE_add_lookup(ctx, X509_LOOKUP_hash_dir()); if (lookup == NULL) { return 0; } if (X509_LOOKUP_add_dir(lookup, path, X509_FILETYPE_PEM) != 1) { return 0; } } if ((path == NULL) && (file == NULL)) { return 0; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_def.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include // TODO(fork): cleanup #if defined(OPENSSL_FUCHSIA) #define OPENSSLDIR "/config/ssl" #else #define OPENSSLDIR "/etc/ssl" #endif #define X509_CERT_AREA OPENSSLDIR #define X509_CERT_DIR OPENSSLDIR "/certs" #define X509_CERT_FILE OPENSSLDIR "/cert.pem" #define X509_PRIVATE_DIR OPENSSLDIR "/private" #define X509_CERT_DIR_EVP "SSL_CERT_DIR" #define X509_CERT_FILE_EVP "SSL_CERT_FILE" const char *X509_get_default_private_dir(void) { return X509_PRIVATE_DIR; } const char *X509_get_default_cert_area(void) { return X509_CERT_AREA; } const char *X509_get_default_cert_dir(void) { return X509_CERT_DIR; } const char *X509_get_default_cert_file(void) { return X509_CERT_FILE; } const char *X509_get_default_cert_dir_env(void) { return X509_CERT_DIR_EVP; } const char *X509_get_default_cert_file_env(void) { return X509_CERT_FILE_EVP; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_ext.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" int X509_CRL_get_ext_count(const X509_CRL *x) { return (X509v3_get_ext_count(x->crl->extensions)); } int X509_CRL_get_ext_by_NID(const X509_CRL *x, int nid, int lastpos) { return (X509v3_get_ext_by_NID(x->crl->extensions, nid, lastpos)); } int X509_CRL_get_ext_by_OBJ(const X509_CRL *x, const ASN1_OBJECT *obj, int lastpos) { return (X509v3_get_ext_by_OBJ(x->crl->extensions, obj, lastpos)); } int X509_CRL_get_ext_by_critical(const X509_CRL *x, int crit, int lastpos) { return (X509v3_get_ext_by_critical(x->crl->extensions, crit, lastpos)); } X509_EXTENSION *X509_CRL_get_ext(const X509_CRL *x, int loc) { return (X509v3_get_ext(x->crl->extensions, loc)); } X509_EXTENSION *X509_CRL_delete_ext(X509_CRL *x, int loc) { return (X509v3_delete_ext(x->crl->extensions, loc)); } void *X509_CRL_get_ext_d2i(const X509_CRL *crl, int nid, int *out_critical, int *out_idx) { return X509V3_get_d2i(crl->crl->extensions, nid, out_critical, out_idx); } int X509_CRL_add1_ext_i2d(X509_CRL *x, int nid, void *value, int crit, unsigned long flags) { return X509V3_add1_i2d(&x->crl->extensions, nid, value, crit, flags); } int X509_CRL_add_ext(X509_CRL *x, const X509_EXTENSION *ex, int loc) { return (X509v3_add_ext(&(x->crl->extensions), ex, loc) != NULL); } int X509_get_ext_count(const X509 *x) { return (X509v3_get_ext_count(x->cert_info->extensions)); } int X509_get_ext_by_NID(const X509 *x, int nid, int lastpos) { return (X509v3_get_ext_by_NID(x->cert_info->extensions, nid, lastpos)); } int X509_get_ext_by_OBJ(const X509 *x, const ASN1_OBJECT *obj, int lastpos) { return (X509v3_get_ext_by_OBJ(x->cert_info->extensions, obj, lastpos)); } int X509_get_ext_by_critical(const X509 *x, int crit, int lastpos) { return (X509v3_get_ext_by_critical(x->cert_info->extensions, crit, lastpos)); } X509_EXTENSION *X509_get_ext(const X509 *x, int loc) { return (X509v3_get_ext(x->cert_info->extensions, loc)); } X509_EXTENSION *X509_delete_ext(X509 *x, int loc) { return (X509v3_delete_ext(x->cert_info->extensions, loc)); } int X509_add_ext(X509 *x, const X509_EXTENSION *ex, int loc) { return (X509v3_add_ext(&(x->cert_info->extensions), ex, loc) != NULL); } void *X509_get_ext_d2i(const X509 *x509, int nid, int *out_critical, int *out_idx) { return X509V3_get_d2i(x509->cert_info->extensions, nid, out_critical, out_idx); } int X509_add1_ext_i2d(X509 *x, int nid, void *value, int crit, unsigned long flags) { return X509V3_add1_i2d(&x->cert_info->extensions, nid, value, crit, flags); } int X509_REVOKED_get_ext_count(const X509_REVOKED *x) { return (X509v3_get_ext_count(x->extensions)); } int X509_REVOKED_get_ext_by_NID(const X509_REVOKED *x, int nid, int lastpos) { return (X509v3_get_ext_by_NID(x->extensions, nid, lastpos)); } int X509_REVOKED_get_ext_by_OBJ(const X509_REVOKED *x, const ASN1_OBJECT *obj, int lastpos) { return (X509v3_get_ext_by_OBJ(x->extensions, obj, lastpos)); } int X509_REVOKED_get_ext_by_critical(const X509_REVOKED *x, int crit, int lastpos) { return (X509v3_get_ext_by_critical(x->extensions, crit, lastpos)); } X509_EXTENSION *X509_REVOKED_get_ext(const X509_REVOKED *x, int loc) { return (X509v3_get_ext(x->extensions, loc)); } X509_EXTENSION *X509_REVOKED_delete_ext(X509_REVOKED *x, int loc) { return (X509v3_delete_ext(x->extensions, loc)); } int X509_REVOKED_add_ext(X509_REVOKED *x, const X509_EXTENSION *ex, int loc) { return (X509v3_add_ext(&(x->extensions), ex, loc) != NULL); } void *X509_REVOKED_get_ext_d2i(const X509_REVOKED *revoked, int nid, int *out_critical, int *out_idx) { return X509V3_get_d2i(revoked->extensions, nid, out_critical, out_idx); } int X509_REVOKED_add1_ext_i2d(X509_REVOKED *x, int nid, void *value, int crit, unsigned long flags) { return X509V3_add1_i2d(&x->extensions, nid, value, crit, flags); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_lu.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" static int X509_OBJECT_idx_by_subject(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name); static X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name); static X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h, X509_OBJECT *x); static int X509_OBJECT_up_ref_count(X509_OBJECT *a); static X509_LOOKUP *X509_LOOKUP_new(const X509_LOOKUP_METHOD *method, X509_STORE *store); static int X509_LOOKUP_by_subject(X509_LOOKUP *ctx, int type, X509_NAME *name, X509_OBJECT *ret); static X509_LOOKUP *X509_LOOKUP_new(const X509_LOOKUP_METHOD *method, X509_STORE *store) { X509_LOOKUP *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(X509_LOOKUP))); if (ret == NULL) { return NULL; } ret->method = method; ret->store_ctx = store; if (method->new_item != NULL && !method->new_item(ret)) { OPENSSL_free(ret); return NULL; } return ret; } void X509_LOOKUP_free(X509_LOOKUP *ctx) { if (ctx == NULL) { return; } if (ctx->method != NULL && ctx->method->free != NULL) { (*ctx->method->free)(ctx); } OPENSSL_free(ctx); } int X509_LOOKUP_ctrl(X509_LOOKUP *ctx, int cmd, const char *argc, long argl, char **ret) { if (ctx->method == NULL) { return -1; } if (ctx->method->ctrl != NULL) { return ctx->method->ctrl(ctx, cmd, argc, argl, ret); } else { return 1; } } static int X509_LOOKUP_by_subject(X509_LOOKUP *ctx, int type, X509_NAME *name, X509_OBJECT *ret) { if (ctx->method == NULL || ctx->method->get_by_subject == NULL) { return 0; } // Note |get_by_subject| leaves |ret| in an inconsistent state. It has // pointers to an |X509| or |X509_CRL|, but has not bumped the refcount yet. // For now, the caller is expected to fix this, but ideally we'd fix the // |X509_LOOKUP| convention itself. return ctx->method->get_by_subject(ctx, type, name, ret) > 0; } static int x509_object_cmp(const X509_OBJECT *a, const X509_OBJECT *b) { int ret = a->type - b->type; if (ret) { return ret; } switch (a->type) { case X509_LU_X509: return X509_subject_name_cmp(a->data.x509, b->data.x509); case X509_LU_CRL: return X509_CRL_cmp(a->data.crl, b->data.crl); default: // abort(); return 0; } } static int x509_object_cmp_sk(const X509_OBJECT *const *a, const X509_OBJECT *const *b) { return x509_object_cmp(*a, *b); } X509_STORE *X509_STORE_new(void) { X509_STORE *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(X509_STORE))); if (ret == NULL) { return NULL; } ret->references = 1; CRYPTO_MUTEX_init(&ret->objs_lock); ret->objs = sk_X509_OBJECT_new(x509_object_cmp_sk); ret->get_cert_methods = sk_X509_LOOKUP_new_null(); ret->param = X509_VERIFY_PARAM_new(); if (ret->objs == NULL || ret->get_cert_methods == NULL || ret->param == NULL) { X509_STORE_free(ret); return NULL; } return ret; } int X509_STORE_up_ref(X509_STORE *store) { CRYPTO_refcount_inc(&store->references); return 1; } void X509_STORE_free(X509_STORE *vfy) { if (vfy == nullptr || !CRYPTO_refcount_dec_and_test_zero(&vfy->references)) { return; } CRYPTO_MUTEX_cleanup(&vfy->objs_lock); sk_X509_LOOKUP_pop_free(vfy->get_cert_methods, X509_LOOKUP_free); sk_X509_OBJECT_pop_free(vfy->objs, X509_OBJECT_free); X509_VERIFY_PARAM_free(vfy->param); OPENSSL_free(vfy); } X509_LOOKUP *X509_STORE_add_lookup(X509_STORE *v, const X509_LOOKUP_METHOD *m) { STACK_OF(X509_LOOKUP) *sk = v->get_cert_methods; for (size_t i = 0; i < sk_X509_LOOKUP_num(sk); i++) { X509_LOOKUP *lu = sk_X509_LOOKUP_value(sk, i); if (m == lu->method) { return lu; } } X509_LOOKUP *lu = X509_LOOKUP_new(m, v); if (lu == NULL || !sk_X509_LOOKUP_push(v->get_cert_methods, lu)) { X509_LOOKUP_free(lu); return NULL; } return lu; } int X509_STORE_CTX_get_by_subject(X509_STORE_CTX *vs, int type, X509_NAME *name, X509_OBJECT *ret) { X509_STORE *ctx = vs->ctx; X509_OBJECT stmp; CRYPTO_MUTEX_lock_write(&ctx->objs_lock); X509_OBJECT *tmp = X509_OBJECT_retrieve_by_subject(ctx->objs, type, name); CRYPTO_MUTEX_unlock_write(&ctx->objs_lock); if (tmp == NULL || type == X509_LU_CRL) { for (size_t i = 0; i < sk_X509_LOOKUP_num(ctx->get_cert_methods); i++) { X509_LOOKUP *lu = sk_X509_LOOKUP_value(ctx->get_cert_methods, i); if (X509_LOOKUP_by_subject(lu, type, name, &stmp)) { tmp = &stmp; break; } } if (tmp == NULL) { return 0; } } // TODO(crbug.com/boringssl/685): This should call // |X509_OBJECT_free_contents|. ret->type = tmp->type; ret->data = tmp->data; X509_OBJECT_up_ref_count(ret); return 1; } static int x509_store_add(X509_STORE *ctx, void *x, int is_crl) { if (x == NULL) { return 0; } X509_OBJECT *const obj = X509_OBJECT_new(); if (obj == NULL) { return 0; } if (is_crl) { obj->type = X509_LU_CRL; obj->data.crl = (X509_CRL *)x; } else { obj->type = X509_LU_X509; obj->data.x509 = (X509 *)x; } X509_OBJECT_up_ref_count(obj); CRYPTO_MUTEX_lock_write(&ctx->objs_lock); int ret = 1; int added = 0; // Duplicates are silently ignored if (!X509_OBJECT_retrieve_match(ctx->objs, obj)) { ret = added = (sk_X509_OBJECT_push(ctx->objs, obj) != 0); } CRYPTO_MUTEX_unlock_write(&ctx->objs_lock); if (!added) { X509_OBJECT_free(obj); } return ret; } int X509_STORE_add_cert(X509_STORE *ctx, X509 *x) { return x509_store_add(ctx, x, /*is_crl=*/0); } int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x) { return x509_store_add(ctx, x, /*is_crl=*/1); } X509_OBJECT *X509_OBJECT_new(void) { return reinterpret_cast(OPENSSL_zalloc(sizeof(X509_OBJECT))); } void X509_OBJECT_free(X509_OBJECT *obj) { if (obj == NULL) { return; } X509_OBJECT_free_contents(obj); OPENSSL_free(obj); } static int X509_OBJECT_up_ref_count(X509_OBJECT *a) { switch (a->type) { case X509_LU_X509: X509_up_ref(a->data.x509); break; case X509_LU_CRL: X509_CRL_up_ref(a->data.crl); break; } return 1; } void X509_OBJECT_free_contents(X509_OBJECT *a) { switch (a->type) { case X509_LU_X509: X509_free(a->data.x509); break; case X509_LU_CRL: X509_CRL_free(a->data.crl); break; } OPENSSL_memset(a, 0, sizeof(X509_OBJECT)); } int X509_OBJECT_get_type(const X509_OBJECT *a) { return a->type; } X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a) { if (a == NULL || a->type != X509_LU_X509) { return NULL; } return a->data.x509; } static int x509_object_idx_cnt(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name, int *pnmatch) { X509_OBJECT stmp; X509 x509_s; X509_CINF cinf_s; X509_CRL crl_s; X509_CRL_INFO crl_info_s; stmp.type = type; switch (type) { case X509_LU_X509: stmp.data.x509 = &x509_s; x509_s.cert_info = &cinf_s; cinf_s.subject = name; break; case X509_LU_CRL: stmp.data.crl = &crl_s; crl_s.crl = &crl_info_s; crl_info_s.issuer = name; break; default: // abort(); return -1; } size_t idx; sk_X509_OBJECT_sort(h); if (!sk_X509_OBJECT_find(h, &idx, &stmp)) { return -1; } if (pnmatch != NULL) { *pnmatch = 1; for (size_t tidx = idx + 1; tidx < sk_X509_OBJECT_num(h); tidx++) { const X509_OBJECT *tobj = sk_X509_OBJECT_value(h, tidx); if (x509_object_cmp(tobj, &stmp)) { break; } (*pnmatch)++; } } return (int)idx; } static int X509_OBJECT_idx_by_subject(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name) { return x509_object_idx_cnt(h, type, name, NULL); } static X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name) { int idx; idx = X509_OBJECT_idx_by_subject(h, type, name); if (idx == -1) { return NULL; } return sk_X509_OBJECT_value(h, idx); } static X509_OBJECT *x509_object_dup(const X509_OBJECT *obj) { X509_OBJECT *ret = X509_OBJECT_new(); if (ret == NULL) { return NULL; } ret->type = obj->type; ret->data = obj->data; X509_OBJECT_up_ref_count(ret); return ret; } STACK_OF(X509_OBJECT) *X509_STORE_get1_objects(X509_STORE *store) { CRYPTO_MUTEX_lock_read(&store->objs_lock); STACK_OF(X509_OBJECT) *ret = sk_X509_OBJECT_deep_copy(store->objs, x509_object_dup, X509_OBJECT_free); CRYPTO_MUTEX_unlock_read(&store->objs_lock); return ret; } STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(X509_STORE *store) { return store->objs; } STACK_OF(X509) *X509_STORE_CTX_get1_certs(X509_STORE_CTX *ctx, X509_NAME *nm) { int cnt; STACK_OF(X509) *sk = sk_X509_new_null(); if (sk == NULL) { return NULL; } CRYPTO_MUTEX_lock_write(&ctx->ctx->objs_lock); int idx = x509_object_idx_cnt(ctx->ctx->objs, X509_LU_X509, nm, &cnt); if (idx < 0) { // Nothing found in cache: do lookup to possibly add new objects to // cache X509_OBJECT xobj; CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); if (!X509_STORE_CTX_get_by_subject(ctx, X509_LU_X509, nm, &xobj)) { sk_X509_free(sk); return NULL; } X509_OBJECT_free_contents(&xobj); CRYPTO_MUTEX_lock_write(&ctx->ctx->objs_lock); idx = x509_object_idx_cnt(ctx->ctx->objs, X509_LU_X509, nm, &cnt); if (idx < 0) { CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); sk_X509_free(sk); return NULL; } } for (int i = 0; i < cnt; i++, idx++) { X509_OBJECT *obj = sk_X509_OBJECT_value(ctx->ctx->objs, idx); X509 *x = obj->data.x509; if (!sk_X509_push(sk, x)) { CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); sk_X509_pop_free(sk, X509_free); return NULL; } X509_up_ref(x); } CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); return sk; } STACK_OF(X509_CRL) *X509_STORE_CTX_get1_crls(X509_STORE_CTX *ctx, X509_NAME *nm) { int cnt; X509_OBJECT xobj; STACK_OF(X509_CRL) *sk = sk_X509_CRL_new_null(); if (sk == NULL) { return NULL; } // Always do lookup to possibly add new CRLs to cache. if (!X509_STORE_CTX_get_by_subject(ctx, X509_LU_CRL, nm, &xobj)) { sk_X509_CRL_free(sk); return NULL; } X509_OBJECT_free_contents(&xobj); CRYPTO_MUTEX_lock_write(&ctx->ctx->objs_lock); int idx = x509_object_idx_cnt(ctx->ctx->objs, X509_LU_CRL, nm, &cnt); if (idx < 0) { CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); sk_X509_CRL_free(sk); return NULL; } for (int i = 0; i < cnt; i++, idx++) { X509_OBJECT *obj = sk_X509_OBJECT_value(ctx->ctx->objs, idx); X509_CRL *x = obj->data.crl; X509_CRL_up_ref(x); if (!sk_X509_CRL_push(sk, x)) { CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); X509_CRL_free(x); sk_X509_CRL_pop_free(sk, X509_CRL_free); return NULL; } } CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); return sk; } static X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h, X509_OBJECT *x) { sk_X509_OBJECT_sort(h); size_t idx; if (!sk_X509_OBJECT_find(h, &idx, x)) { return NULL; } if ((x->type != X509_LU_X509) && (x->type != X509_LU_CRL)) { return sk_X509_OBJECT_value(h, idx); } for (size_t i = idx; i < sk_X509_OBJECT_num(h); i++) { X509_OBJECT *obj = sk_X509_OBJECT_value(h, i); if (x509_object_cmp(obj, x)) { return NULL; } if (x->type == X509_LU_X509) { if (!X509_cmp(obj->data.x509, x->data.x509)) { return obj; } } else if (x->type == X509_LU_CRL) { if (!X509_CRL_match(obj->data.crl, x->data.crl)) { return obj; } } else { return obj; } } return NULL; } int X509_STORE_CTX_get1_issuer(X509 **out_issuer, X509_STORE_CTX *ctx, X509 *x) { X509_NAME *xn; X509_OBJECT obj, *pobj; int idx, ret; size_t i; xn = X509_get_issuer_name(x); if (!X509_STORE_CTX_get_by_subject(ctx, X509_LU_X509, xn, &obj)) { return 0; } // If certificate matches all OK if (x509_check_issued_with_callback(ctx, x, obj.data.x509)) { *out_issuer = obj.data.x509; return 1; } X509_OBJECT_free_contents(&obj); // Else find index of first cert accepted by // |x509_check_issued_with_callback|. ret = 0; CRYPTO_MUTEX_lock_write(&ctx->ctx->objs_lock); idx = X509_OBJECT_idx_by_subject(ctx->ctx->objs, X509_LU_X509, xn); if (idx != -1) { // should be true as we've had at least one // match // Look through all matching certs for suitable issuer for (i = idx; i < sk_X509_OBJECT_num(ctx->ctx->objs); i++) { pobj = sk_X509_OBJECT_value(ctx->ctx->objs, i); // See if we've run past the matches if (pobj->type != X509_LU_X509) { break; } if (X509_NAME_cmp(xn, X509_get_subject_name(pobj->data.x509))) { break; } if (x509_check_issued_with_callback(ctx, x, pobj->data.x509)) { *out_issuer = pobj->data.x509; X509_OBJECT_up_ref_count(pobj); ret = 1; break; } } } CRYPTO_MUTEX_unlock_write(&ctx->ctx->objs_lock); return ret; } int X509_STORE_set_flags(X509_STORE *ctx, unsigned long flags) { return X509_VERIFY_PARAM_set_flags(ctx->param, flags); } int X509_STORE_set_depth(X509_STORE *ctx, int depth) { X509_VERIFY_PARAM_set_depth(ctx->param, depth); return 1; } int X509_STORE_set_purpose(X509_STORE *ctx, int purpose) { return X509_VERIFY_PARAM_set_purpose(ctx->param, purpose); } int X509_STORE_set_trust(X509_STORE *ctx, int trust) { return X509_VERIFY_PARAM_set_trust(ctx->param, trust); } int X509_STORE_set1_param(X509_STORE *ctx, const X509_VERIFY_PARAM *param) { return X509_VERIFY_PARAM_set1(ctx->param, param); } X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *ctx) { return ctx->param; } void X509_STORE_set_verify_cb(X509_STORE *ctx, X509_STORE_CTX_verify_cb verify_cb) { ctx->verify_cb = verify_cb; } X509_STORE *X509_STORE_CTX_get0_store(const X509_STORE_CTX *ctx) { return ctx->ctx; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_obj.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../internal.h" #include "internal.h" // Limit to ensure we don't overflow: much greater than // anything enountered in practice. #define NAME_ONELINE_MAX (1024 * 1024) char *X509_NAME_oneline(const X509_NAME *a, char *buf, int len) { X509_NAME_ENTRY *ne; size_t i; int n, lold, l, l1, l2, num, j, type; const char *s; char *p; unsigned char *q; BUF_MEM *b = NULL; static const char hex[17] = "0123456789ABCDEF"; int gs_doit[4]; char tmp_buf[80]; if (buf == NULL) { if ((b = BUF_MEM_new()) == NULL) { goto err; } if (!BUF_MEM_grow(b, 200)) { goto err; } b->data[0] = '\0'; len = 200; } else if (len <= 0) { return NULL; } if (a == NULL) { if (b) { buf = b->data; OPENSSL_free(b); } OPENSSL_strlcpy(buf, "NO X509_NAME", len); return buf; } len--; // space for '\0' l = 0; for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) { ne = sk_X509_NAME_ENTRY_value(a->entries, i); n = OBJ_obj2nid(ne->object); if ((n == NID_undef) || ((s = OBJ_nid2sn(n)) == NULL)) { i2t_ASN1_OBJECT(tmp_buf, sizeof(tmp_buf), ne->object); s = tmp_buf; } l1 = strlen(s); type = ne->value->type; num = ne->value->length; if (num > NAME_ONELINE_MAX) { OPENSSL_PUT_ERROR(X509, X509_R_NAME_TOO_LONG); goto err; } q = ne->value->data; if ((type == V_ASN1_GENERALSTRING) && ((num % 4) == 0)) { gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 0; for (j = 0; j < num; j++) { if (q[j] != 0) { gs_doit[j & 3] = 1; } } if (gs_doit[0] | gs_doit[1] | gs_doit[2]) { gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; } else { gs_doit[0] = gs_doit[1] = gs_doit[2] = 0; gs_doit[3] = 1; } } else { gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; } for (l2 = j = 0; j < num; j++) { if (!gs_doit[j & 3]) { continue; } l2++; if ((q[j] < ' ') || (q[j] > '~')) { l2 += 3; } } lold = l; l += 1 + l1 + 1 + l2; if (l > NAME_ONELINE_MAX) { OPENSSL_PUT_ERROR(X509, X509_R_NAME_TOO_LONG); goto err; } if (b != NULL) { if (!BUF_MEM_grow(b, l + 1)) { goto err; } p = &(b->data[lold]); } else if (l > len) { break; } else { p = &(buf[lold]); } *(p++) = '/'; OPENSSL_memcpy(p, s, (unsigned int)l1); p += l1; *(p++) = '='; q = ne->value->data; for (j = 0; j < num; j++) { if (!gs_doit[j & 3]) { continue; } n = q[j]; if ((n < ' ') || (n > '~')) { *(p++) = '\\'; *(p++) = 'x'; *(p++) = hex[(n >> 4) & 0x0f]; *(p++) = hex[n & 0x0f]; } else { *(p++) = n; } } *p = '\0'; } if (b != NULL) { p = b->data; OPENSSL_free(b); } else { p = buf; } if (i == 0) { *p = '\0'; } return p; err: BUF_MEM_free(b); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_req.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "internal.h" long X509_REQ_get_version(const X509_REQ *req) { return ASN1_INTEGER_get(req->req_info->version); } X509_NAME *X509_REQ_get_subject_name(const X509_REQ *req) { return req->req_info->subject; } EVP_PKEY *X509_REQ_get_pubkey(const X509_REQ *req) { if (req == NULL) { return NULL; } return X509_PUBKEY_get(req->req_info->pubkey); } EVP_PKEY *X509_REQ_get0_pubkey(const X509_REQ *req) { if (req == NULL) { return NULL; } return X509_PUBKEY_get0(req->req_info->pubkey); } int X509_REQ_check_private_key(const X509_REQ *x, const EVP_PKEY *k) { const EVP_PKEY *xk = X509_REQ_get0_pubkey(x); if (xk == NULL) { return 0; } int ret = EVP_PKEY_cmp(xk, k); if (ret > 0) { return 1; } switch (ret) { case 0: OPENSSL_PUT_ERROR(X509, X509_R_KEY_VALUES_MISMATCH); return 0; case -1: OPENSSL_PUT_ERROR(X509, X509_R_KEY_TYPE_MISMATCH); return 0; case -2: if (EVP_PKEY_id(k) == EVP_PKEY_EC) { OPENSSL_PUT_ERROR(X509, ERR_R_EC_LIB); } else { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_KEY_TYPE); } return 0; } return 0; } int X509_REQ_extension_nid(int req_nid) { return req_nid == NID_ext_req || req_nid == NID_ms_ext_req; } STACK_OF(X509_EXTENSION) *X509_REQ_get_extensions(const X509_REQ *req) { if (req == NULL || req->req_info == NULL) { return NULL; } int idx = X509_REQ_get_attr_by_NID(req, NID_ext_req, -1); if (idx == -1) { idx = X509_REQ_get_attr_by_NID(req, NID_ms_ext_req, -1); } if (idx == -1) { return NULL; } const X509_ATTRIBUTE *attr = X509_REQ_get_attr(req, idx); // TODO(davidben): |X509_ATTRIBUTE_get0_type| is not const-correct. It should // take and return a const pointer. const ASN1_TYPE *ext = X509_ATTRIBUTE_get0_type((X509_ATTRIBUTE *)attr, 0); if (!ext || ext->type != V_ASN1_SEQUENCE) { return NULL; } const unsigned char *p = ext->value.sequence->data; return (STACK_OF(X509_EXTENSION) *)ASN1_item_d2i( NULL, &p, ext->value.sequence->length, ASN1_ITEM_rptr(X509_EXTENSIONS)); } // Add a STACK_OF extensions to a certificate request: allow alternative OIDs // in case we want to create a non standard one. int X509_REQ_add_extensions_nid(X509_REQ *req, const STACK_OF(X509_EXTENSION) *exts, int nid) { // Generate encoding of extensions unsigned char *ext = NULL; int ext_len = ASN1_item_i2d((ASN1_VALUE *)exts, &ext, ASN1_ITEM_rptr(X509_EXTENSIONS)); if (ext_len <= 0) { return 0; } int ret = X509_REQ_add1_attr_by_NID(req, nid, V_ASN1_SEQUENCE, ext, ext_len); OPENSSL_free(ext); return ret; } // This is the normal usage: use the "official" OID int X509_REQ_add_extensions(X509_REQ *req, const STACK_OF(X509_EXTENSION) *exts) { return X509_REQ_add_extensions_nid(req, exts, NID_ext_req); } int X509_REQ_get_attr_count(const X509_REQ *req) { return (int)sk_X509_ATTRIBUTE_num(req->req_info->attributes); } int X509_REQ_get_attr_by_NID(const X509_REQ *req, int nid, int lastpos) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { return -1; } return X509_REQ_get_attr_by_OBJ(req, obj, lastpos); } int X509_REQ_get_attr_by_OBJ(const X509_REQ *req, const ASN1_OBJECT *obj, int lastpos) { if (req->req_info->attributes == NULL) { return -1; } lastpos++; if (lastpos < 0) { lastpos = 0; } int n = (int)sk_X509_ATTRIBUTE_num(req->req_info->attributes); for (; lastpos < n; lastpos++) { const X509_ATTRIBUTE *attr = sk_X509_ATTRIBUTE_value(req->req_info->attributes, lastpos); if (OBJ_cmp(attr->object, obj) == 0) { return lastpos; } } return -1; } X509_ATTRIBUTE *X509_REQ_get_attr(const X509_REQ *req, int loc) { if (req->req_info->attributes == NULL || loc < 0 || sk_X509_ATTRIBUTE_num(req->req_info->attributes) <= (size_t)loc) { return NULL; } return sk_X509_ATTRIBUTE_value(req->req_info->attributes, loc); } X509_ATTRIBUTE *X509_REQ_delete_attr(X509_REQ *req, int loc) { if (req->req_info->attributes == NULL || loc < 0 || sk_X509_ATTRIBUTE_num(req->req_info->attributes) <= (size_t)loc) { return NULL; } return sk_X509_ATTRIBUTE_delete(req->req_info->attributes, loc); } static int X509_REQ_add0_attr(X509_REQ *req, X509_ATTRIBUTE *attr) { if (req->req_info->attributes == NULL) { req->req_info->attributes = sk_X509_ATTRIBUTE_new_null(); } if (req->req_info->attributes == NULL || !sk_X509_ATTRIBUTE_push(req->req_info->attributes, attr)) { return 0; } return 1; } int X509_REQ_add1_attr(X509_REQ *req, const X509_ATTRIBUTE *attr) { X509_ATTRIBUTE *new_attr = X509_ATTRIBUTE_dup(attr); if (new_attr == NULL || !X509_REQ_add0_attr(req, new_attr)) { X509_ATTRIBUTE_free(new_attr); return 0; } return 1; } int X509_REQ_add1_attr_by_OBJ(X509_REQ *req, const ASN1_OBJECT *obj, int attrtype, const unsigned char *data, int len) { X509_ATTRIBUTE *attr = X509_ATTRIBUTE_create_by_OBJ(NULL, obj, attrtype, data, len); if (attr == NULL || !X509_REQ_add0_attr(req, attr)) { X509_ATTRIBUTE_free(attr); return 0; } return 1; } int X509_REQ_add1_attr_by_NID(X509_REQ *req, int nid, int attrtype, const unsigned char *data, int len) { X509_ATTRIBUTE *attr = X509_ATTRIBUTE_create_by_NID(NULL, nid, attrtype, data, len); if (attr == NULL || !X509_REQ_add0_attr(req, attr)) { X509_ATTRIBUTE_free(attr); return 0; } return 1; } int X509_REQ_add1_attr_by_txt(X509_REQ *req, const char *attrname, int attrtype, const unsigned char *data, int len) { X509_ATTRIBUTE *attr = X509_ATTRIBUTE_create_by_txt(NULL, attrname, attrtype, data, len); if (attr == NULL || !X509_REQ_add0_attr(req, attr)) { X509_ATTRIBUTE_free(attr); return 0; } return 1; } void X509_REQ_get0_signature(const X509_REQ *req, const ASN1_BIT_STRING **psig, const X509_ALGOR **palg) { if (psig != NULL) { *psig = req->signature; } if (palg != NULL) { *palg = req->sig_alg; } } int X509_REQ_get_signature_nid(const X509_REQ *req) { return OBJ_obj2nid(req->sig_alg->algorithm); } int i2d_re_X509_REQ_tbs(X509_REQ *req, unsigned char **pp) { asn1_encoding_clear(&req->req_info->enc); return i2d_X509_REQ_INFO(req->req_info, pp); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_set.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" long X509_get_version(const X509 *x509) { // The default version is v1(0). if (x509->cert_info->version == NULL) { return X509_VERSION_1; } return ASN1_INTEGER_get(x509->cert_info->version); } int X509_set_version(X509 *x, long version) { if (x == NULL) { return 0; } if (version < X509_VERSION_1 || version > X509_VERSION_3) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); return 0; } // v1(0) is default and is represented by omitting the version. if (version == X509_VERSION_1) { ASN1_INTEGER_free(x->cert_info->version); x->cert_info->version = NULL; return 1; } if (x->cert_info->version == NULL) { x->cert_info->version = ASN1_INTEGER_new(); if (x->cert_info->version == NULL) { return 0; } } return ASN1_INTEGER_set_int64(x->cert_info->version, version); } int X509_set_serialNumber(X509 *x, const ASN1_INTEGER *serial) { if (serial->type != V_ASN1_INTEGER && serial->type != V_ASN1_NEG_INTEGER) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); return 0; } ASN1_INTEGER *in; if (x == NULL) { return 0; } in = x->cert_info->serialNumber; if (in != serial) { in = ASN1_INTEGER_dup(serial); if (in != NULL) { ASN1_INTEGER_free(x->cert_info->serialNumber); x->cert_info->serialNumber = in; } } return in != NULL; } int X509_set_issuer_name(X509 *x, X509_NAME *name) { if ((x == NULL) || (x->cert_info == NULL)) { return 0; } return (X509_NAME_set(&x->cert_info->issuer, name)); } int X509_set_subject_name(X509 *x, X509_NAME *name) { if ((x == NULL) || (x->cert_info == NULL)) { return 0; } return (X509_NAME_set(&x->cert_info->subject, name)); } int X509_set1_notBefore(X509 *x, const ASN1_TIME *tm) { ASN1_TIME *in; if ((x == NULL) || (x->cert_info->validity == NULL)) { return 0; } in = x->cert_info->validity->notBefore; if (in != tm) { in = ASN1_STRING_dup(tm); if (in != NULL) { ASN1_TIME_free(x->cert_info->validity->notBefore); x->cert_info->validity->notBefore = in; } } return in != NULL; } int X509_set_notBefore(X509 *x, const ASN1_TIME *tm) { return X509_set1_notBefore(x, tm); } const ASN1_TIME *X509_get0_notBefore(const X509 *x) { return x->cert_info->validity->notBefore; } ASN1_TIME *X509_getm_notBefore(X509 *x) { // Note this function takes a const |X509| pointer in OpenSSL. We require // non-const as this allows mutating |x|. If it comes up for compatibility, // we can relax this. return x->cert_info->validity->notBefore; } ASN1_TIME *X509_get_notBefore(const X509 *x509) { // In OpenSSL, this function is an alias for |X509_getm_notBefore|, but our // |X509_getm_notBefore| is const-correct. |X509_get_notBefore| was // originally a macro, so it needs to capture both get0 and getm use cases. return x509->cert_info->validity->notBefore; } int X509_set1_notAfter(X509 *x, const ASN1_TIME *tm) { ASN1_TIME *in; if ((x == NULL) || (x->cert_info->validity == NULL)) { return 0; } in = x->cert_info->validity->notAfter; if (in != tm) { in = ASN1_STRING_dup(tm); if (in != NULL) { ASN1_TIME_free(x->cert_info->validity->notAfter); x->cert_info->validity->notAfter = in; } } return in != NULL; } int X509_set_notAfter(X509 *x, const ASN1_TIME *tm) { return X509_set1_notAfter(x, tm); } const ASN1_TIME *X509_get0_notAfter(const X509 *x) { return x->cert_info->validity->notAfter; } ASN1_TIME *X509_getm_notAfter(X509 *x) { // Note this function takes a const |X509| pointer in OpenSSL. We require // non-const as this allows mutating |x|. If it comes up for compatibility, // we can relax this. return x->cert_info->validity->notAfter; } ASN1_TIME *X509_get_notAfter(const X509 *x509) { // In OpenSSL, this function is an alias for |X509_getm_notAfter|, but our // |X509_getm_notAfter| is const-correct. |X509_get_notAfter| was // originally a macro, so it needs to capture both get0 and getm use cases. return x509->cert_info->validity->notAfter; } void X509_get0_uids(const X509 *x509, const ASN1_BIT_STRING **out_issuer_uid, const ASN1_BIT_STRING **out_subject_uid) { if (out_issuer_uid != NULL) { *out_issuer_uid = x509->cert_info->issuerUID; } if (out_subject_uid != NULL) { *out_subject_uid = x509->cert_info->subjectUID; } } int X509_set_pubkey(X509 *x, EVP_PKEY *pkey) { if ((x == NULL) || (x->cert_info == NULL)) { return 0; } return (X509_PUBKEY_set(&(x->cert_info->key), pkey)); } const STACK_OF(X509_EXTENSION) *X509_get0_extensions(const X509 *x) { return x->cert_info->extensions; } const X509_ALGOR *X509_get0_tbs_sigalg(const X509 *x) { return x->cert_info->signature; } X509_PUBKEY *X509_get_X509_PUBKEY(const X509 *x509) { return x509->cert_info->key; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_trs.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../internal.h" #include "internal.h" typedef struct x509_trust_st X509_TRUST; struct x509_trust_st { int trust; int (*check_trust)(const X509_TRUST *, X509 *); int nid; } /* X509_TRUST */; static int trust_1oidany(const X509_TRUST *trust, X509 *x); static int trust_compat(const X509_TRUST *trust, X509 *x); static int obj_trust(int id, X509 *x); static const X509_TRUST trstandard[] = { {X509_TRUST_COMPAT, trust_compat, 0}, {X509_TRUST_SSL_CLIENT, trust_1oidany, NID_client_auth}, {X509_TRUST_SSL_SERVER, trust_1oidany, NID_server_auth}, {X509_TRUST_EMAIL, trust_1oidany, NID_email_protect}, {X509_TRUST_OBJECT_SIGN, trust_1oidany, NID_code_sign}, {X509_TRUST_TSA, trust_1oidany, NID_time_stamp}}; static const X509_TRUST *X509_TRUST_get0(int id) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(trstandard); i++) { if (trstandard[i].trust == id) { return &trstandard[i]; } } return NULL; } int X509_check_trust(X509 *x, int id, int flags) { if (id == -1) { return X509_TRUST_TRUSTED; } // We get this as a default value if (id == 0) { int rv = obj_trust(NID_anyExtendedKeyUsage, x); if (rv != X509_TRUST_UNTRUSTED) { return rv; } return trust_compat(NULL, x); } const X509_TRUST *pt = X509_TRUST_get0(id); if (pt == NULL) { // Unknown trust IDs are silently reintrepreted as NIDs. This is unreachable // from the certificate verifier itself, but wpa_supplicant relies on it. // Note this relies on commonly-used NIDs and trust IDs not colliding. return obj_trust(id, x); } return pt->check_trust(pt, x); } int X509_is_valid_trust_id(int trust) { return X509_TRUST_get0(trust) != NULL; } static int trust_1oidany(const X509_TRUST *trust, X509 *x) { if (x->aux && (x->aux->trust || x->aux->reject)) { return obj_trust(trust->nid, x); } // we don't have any trust settings: for compatibility we return trusted // if it is self signed return trust_compat(trust, x); } static int trust_compat(const X509_TRUST *trust, X509 *x) { if (!x509v3_cache_extensions(x)) { return X509_TRUST_UNTRUSTED; } if (x->ex_flags & EXFLAG_SS) { return X509_TRUST_TRUSTED; } else { return X509_TRUST_UNTRUSTED; } } static int obj_trust(int id, X509 *x) { X509_CERT_AUX *ax = x->aux; if (!ax) { return X509_TRUST_UNTRUSTED; } for (size_t i = 0; i < sk_ASN1_OBJECT_num(ax->reject); i++) { const ASN1_OBJECT *obj = sk_ASN1_OBJECT_value(ax->reject, i); if (OBJ_obj2nid(obj) == id) { return X509_TRUST_REJECTED; } } for (size_t i = 0; i < sk_ASN1_OBJECT_num(ax->trust); i++) { const ASN1_OBJECT *obj = sk_ASN1_OBJECT_value(ax->trust, i); if (OBJ_obj2nid(obj) == id) { return X509_TRUST_TRUSTED; } } return X509_TRUST_UNTRUSTED; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_txt.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include const char *X509_verify_cert_error_string(long err) { switch (err) { case X509_V_OK: return "ok"; case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: return "unable to get issuer certificate"; case X509_V_ERR_UNABLE_TO_GET_CRL: return "unable to get certificate CRL"; case X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE: return "unable to decrypt certificate's signature"; case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: return "unable to decrypt CRL's signature"; case X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY: return "unable to decode issuer public key"; case X509_V_ERR_CERT_SIGNATURE_FAILURE: return "certificate signature failure"; case X509_V_ERR_CRL_SIGNATURE_FAILURE: return "CRL signature failure"; case X509_V_ERR_CERT_NOT_YET_VALID: return "certificate is not yet valid"; case X509_V_ERR_CRL_NOT_YET_VALID: return "CRL is not yet valid"; case X509_V_ERR_CERT_HAS_EXPIRED: return "certificate has expired"; case X509_V_ERR_CRL_HAS_EXPIRED: return "CRL has expired"; case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: return "format error in certificate's notBefore field"; case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: return "format error in certificate's notAfter field"; case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: return "format error in CRL's lastUpdate field"; case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: return "format error in CRL's nextUpdate field"; case X509_V_ERR_OUT_OF_MEM: return "out of memory"; case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: return "self signed certificate"; case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: return "self signed certificate in certificate chain"; case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: return "unable to get local issuer certificate"; case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE: return "unable to verify the first certificate"; case X509_V_ERR_CERT_CHAIN_TOO_LONG: return "certificate chain too long"; case X509_V_ERR_CERT_REVOKED: return "certificate revoked"; case X509_V_ERR_INVALID_CA: return "invalid CA certificate"; case X509_V_ERR_INVALID_NON_CA: return "invalid non-CA certificate (has CA markings)"; case X509_V_ERR_PATH_LENGTH_EXCEEDED: return "path length constraint exceeded"; case X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED: return "proxy path length constraint exceeded"; case X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED: return "proxy certificates not allowed, please set the appropriate flag"; case X509_V_ERR_INVALID_PURPOSE: return "unsupported certificate purpose"; case X509_V_ERR_CERT_UNTRUSTED: return "certificate not trusted"; case X509_V_ERR_CERT_REJECTED: return "certificate rejected"; case X509_V_ERR_APPLICATION_VERIFICATION: return "application verification failure"; case X509_V_ERR_SUBJECT_ISSUER_MISMATCH: return "subject issuer mismatch"; case X509_V_ERR_AKID_SKID_MISMATCH: return "authority and subject key identifier mismatch"; case X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH: return "authority and issuer serial number mismatch"; case X509_V_ERR_KEYUSAGE_NO_CERTSIGN: return "key usage does not include certificate signing"; case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: return "unable to get CRL issuer certificate"; case X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION: return "unhandled critical extension"; case X509_V_ERR_KEYUSAGE_NO_CRL_SIGN: return "key usage does not include CRL signing"; case X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE: return "key usage does not include digital signature"; case X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION: return "unhandled critical CRL extension"; case X509_V_ERR_INVALID_EXTENSION: return "invalid or inconsistent certificate extension"; case X509_V_ERR_INVALID_POLICY_EXTENSION: return "invalid or inconsistent certificate policy extension"; case X509_V_ERR_NO_EXPLICIT_POLICY: return "no explicit policy"; case X509_V_ERR_DIFFERENT_CRL_SCOPE: return "Different CRL scope"; case X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE: return "Unsupported extension feature"; case X509_V_ERR_UNNESTED_RESOURCE: return "RFC 3779 resource not subset of parent's resources"; case X509_V_ERR_PERMITTED_VIOLATION: return "permitted subtree violation"; case X509_V_ERR_EXCLUDED_VIOLATION: return "excluded subtree violation"; case X509_V_ERR_SUBTREE_MINMAX: return "name constraints minimum and maximum not supported"; case X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE: return "unsupported name constraint type"; case X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX: return "unsupported or invalid name constraint syntax"; case X509_V_ERR_UNSUPPORTED_NAME_SYNTAX: return "unsupported or invalid name syntax"; case X509_V_ERR_CRL_PATH_VALIDATION_ERROR: return "CRL path validation error"; case X509_V_ERR_HOSTNAME_MISMATCH: return "Hostname mismatch"; case X509_V_ERR_EMAIL_MISMATCH: return "Email address mismatch"; case X509_V_ERR_IP_ADDRESS_MISMATCH: return "IP address mismatch"; case X509_V_ERR_INVALID_CALL: return "Invalid certificate verification context"; case X509_V_ERR_STORE_LOOKUP: return "Issuer certificate lookup error"; case X509_V_ERR_NAME_CONSTRAINTS_WITHOUT_SANS: return "Issuer has name constraints but leaf has no SANs"; default: return "unknown certificate verification error"; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_v3.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "internal.h" int X509v3_get_ext_count(const STACK_OF(X509_EXTENSION) *x) { if (x == NULL) { return 0; } return (int)sk_X509_EXTENSION_num(x); } int X509v3_get_ext_by_NID(const STACK_OF(X509_EXTENSION) *x, int nid, int lastpos) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { return -1; } return X509v3_get_ext_by_OBJ(x, obj, lastpos); } int X509v3_get_ext_by_OBJ(const STACK_OF(X509_EXTENSION) *sk, const ASN1_OBJECT *obj, int lastpos) { if (sk == NULL) { return -1; } lastpos++; if (lastpos < 0) { lastpos = 0; } int n = (int)sk_X509_EXTENSION_num(sk); for (; lastpos < n; lastpos++) { const X509_EXTENSION *ex = sk_X509_EXTENSION_value(sk, lastpos); if (OBJ_cmp(ex->object, obj) == 0) { return lastpos; } } return -1; } int X509v3_get_ext_by_critical(const STACK_OF(X509_EXTENSION) *sk, int crit, int lastpos) { if (sk == NULL) { return -1; } lastpos++; if (lastpos < 0) { lastpos = 0; } crit = !!crit; int n = (int)sk_X509_EXTENSION_num(sk); for (; lastpos < n; lastpos++) { const X509_EXTENSION *ex = sk_X509_EXTENSION_value(sk, lastpos); if (X509_EXTENSION_get_critical(ex) == crit) { return lastpos; } } return -1; } X509_EXTENSION *X509v3_get_ext(const STACK_OF(X509_EXTENSION) *x, int loc) { if (x == NULL || loc < 0 || sk_X509_EXTENSION_num(x) <= (size_t)loc) { return NULL; } else { return sk_X509_EXTENSION_value(x, loc); } } X509_EXTENSION *X509v3_delete_ext(STACK_OF(X509_EXTENSION) *x, int loc) { X509_EXTENSION *ret; if (x == NULL || loc < 0 || sk_X509_EXTENSION_num(x) <= (size_t)loc) { return NULL; } ret = sk_X509_EXTENSION_delete(x, loc); return ret; } STACK_OF(X509_EXTENSION) *X509v3_add_ext(STACK_OF(X509_EXTENSION) **x, const X509_EXTENSION *ex, int loc) { X509_EXTENSION *new_ex = NULL; STACK_OF(X509_EXTENSION) *sk = NULL; int free_sk = 0, n; if (x == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER); goto err; } if (*x == NULL) { if ((sk = sk_X509_EXTENSION_new_null()) == NULL) { goto err; } free_sk = 1; } else { sk = *x; } n = (int)sk_X509_EXTENSION_num(sk); if (loc > n) { loc = n; } else if (loc < 0) { loc = n; } if ((new_ex = X509_EXTENSION_dup(ex)) == NULL) { goto err; } if (!sk_X509_EXTENSION_insert(sk, new_ex, loc)) { goto err; } if (*x == NULL) { *x = sk; } return sk; err: X509_EXTENSION_free(new_ex); if (free_sk) { sk_X509_EXTENSION_free(sk); } return NULL; } X509_EXTENSION *X509_EXTENSION_create_by_NID(X509_EXTENSION **ex, int nid, int crit, const ASN1_OCTET_STRING *data) { const ASN1_OBJECT *obj; X509_EXTENSION *ret; obj = OBJ_nid2obj(nid); if (obj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_NID); return NULL; } ret = X509_EXTENSION_create_by_OBJ(ex, obj, crit, data); return ret; } X509_EXTENSION *X509_EXTENSION_create_by_OBJ(X509_EXTENSION **ex, const ASN1_OBJECT *obj, int crit, const ASN1_OCTET_STRING *data) { X509_EXTENSION *ret; if ((ex == NULL) || (*ex == NULL)) { if ((ret = X509_EXTENSION_new()) == NULL) { return NULL; } } else { ret = *ex; } if (!X509_EXTENSION_set_object(ret, obj)) { goto err; } if (!X509_EXTENSION_set_critical(ret, crit)) { goto err; } if (!X509_EXTENSION_set_data(ret, data)) { goto err; } if ((ex != NULL) && (*ex == NULL)) { *ex = ret; } return ret; err: if ((ex == NULL) || (ret != *ex)) { X509_EXTENSION_free(ret); } return NULL; } int X509_EXTENSION_set_object(X509_EXTENSION *ex, const ASN1_OBJECT *obj) { if ((ex == NULL) || (obj == NULL)) { return 0; } ASN1_OBJECT_free(ex->object); ex->object = OBJ_dup(obj); return ex->object != NULL; } int X509_EXTENSION_set_critical(X509_EXTENSION *ex, int crit) { if (ex == NULL) { return 0; } // The critical field is DEFAULT FALSE, so non-critical extensions should omit // the value. ex->critical = crit ? ASN1_BOOLEAN_TRUE : ASN1_BOOLEAN_NONE; return 1; } int X509_EXTENSION_set_data(X509_EXTENSION *ex, const ASN1_OCTET_STRING *data) { int i; if (ex == NULL) { return 0; } i = ASN1_OCTET_STRING_set(ex->value, data->data, data->length); if (!i) { return 0; } return 1; } ASN1_OBJECT *X509_EXTENSION_get_object(const X509_EXTENSION *ex) { if (ex == NULL) { return NULL; } return ex->object; } ASN1_OCTET_STRING *X509_EXTENSION_get_data(const X509_EXTENSION *ex) { if (ex == NULL) { return NULL; } return ex->value; } int X509_EXTENSION_get_critical(const X509_EXTENSION *ex) { if (ex == NULL) { return 0; } if (ex->critical > 0) { return 1; } return 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_vfy.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; // CRL score values // No unhandled critical extensions #define CRL_SCORE_NOCRITICAL 0x100 // certificate is within CRL scope #define CRL_SCORE_SCOPE 0x080 // CRL times valid #define CRL_SCORE_TIME 0x040 // Issuer name matches certificate #define CRL_SCORE_ISSUER_NAME 0x020 // If this score or above CRL is probably valid #define CRL_SCORE_VALID \ (CRL_SCORE_NOCRITICAL | CRL_SCORE_TIME | CRL_SCORE_SCOPE) // CRL issuer is certificate issuer #define CRL_SCORE_ISSUER_CERT 0x018 // CRL issuer is on certificate path #define CRL_SCORE_SAME_PATH 0x008 // CRL issuer matches CRL AKID #define CRL_SCORE_AKID 0x004 static int null_callback(int ok, X509_STORE_CTX *e); static X509 *find_issuer(X509_STORE_CTX *ctx, STACK_OF(X509) *sk, X509 *x); static int check_chain_extensions(X509_STORE_CTX *ctx); static int check_name_constraints(X509_STORE_CTX *ctx); static int check_id(X509_STORE_CTX *ctx); static int check_trust(X509_STORE_CTX *ctx); static int check_revocation(X509_STORE_CTX *ctx); static int check_cert(X509_STORE_CTX *ctx); static int check_policy(X509_STORE_CTX *ctx); static X509 *get_trusted_issuer(X509_STORE_CTX *ctx, X509 *x); static int get_crl_score(X509_STORE_CTX *ctx, X509 **pissuer, X509_CRL *crl, X509 *x); static int get_crl(X509_STORE_CTX *ctx, X509_CRL **pcrl, X509 *x); static int crl_akid_check(X509_STORE_CTX *ctx, X509_CRL *crl, X509 **pissuer, int *pcrl_score); static int crl_crldp_check(X509 *x, X509_CRL *crl, int crl_score); static int check_crl(X509_STORE_CTX *ctx, X509_CRL *crl); static int cert_crl(X509_STORE_CTX *ctx, X509_CRL *crl, X509 *x); static int internal_verify(X509_STORE_CTX *ctx); static int null_callback(int ok, X509_STORE_CTX *e) { return ok; } // cert_self_signed checks if |x| is self-signed. If |x| is valid, it returns // one and sets |*out_is_self_signed| to the result. If |x| is invalid, it // returns zero. static int cert_self_signed(X509 *x, int *out_is_self_signed) { if (!x509v3_cache_extensions(x)) { return 0; } *out_is_self_signed = (x->ex_flags & EXFLAG_SS) != 0; return 1; } static int call_verify_cb(int ok, X509_STORE_CTX *ctx) { ok = ctx->verify_cb(ok, ctx); // Historically, callbacks returning values like -1 would be treated as a mix // of success or failure. Insert that callers check correctly. // // TODO(davidben): Also use this wrapper to constrain which errors may be // suppressed, and ensure all |verify_cb| calls remember to fill in an error. BSSL_CHECK(ok == 0 || ok == 1); return ok; } // Given a certificate try and find an exact match in the store static X509 *lookup_cert_match(X509_STORE_CTX *ctx, X509 *x) { STACK_OF(X509) *certs; X509 *xtmp = NULL; size_t i; // Lookup all certs with matching subject name certs = X509_STORE_CTX_get1_certs(ctx, X509_get_subject_name(x)); if (certs == NULL) { return NULL; } // Look for exact match for (i = 0; i < sk_X509_num(certs); i++) { xtmp = sk_X509_value(certs, i); if (!X509_cmp(xtmp, x)) { break; } } if (i < sk_X509_num(certs)) { X509_up_ref(xtmp); } else { xtmp = NULL; } sk_X509_pop_free(certs, X509_free); return xtmp; } int X509_verify_cert(X509_STORE_CTX *ctx) { X509 *chain_ss = NULL; int bad_chain = 0; X509_VERIFY_PARAM *param = ctx->param; int i, ok = 0; int j, retry, trust; STACK_OF(X509) *sktmp = NULL; { if (ctx->cert == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_NO_CERT_SET_FOR_US_TO_VERIFY); ctx->error = X509_V_ERR_INVALID_CALL; return 0; } if (ctx->chain != NULL) { // This X509_STORE_CTX has already been used to verify a cert. We // cannot do another one. OPENSSL_PUT_ERROR(X509, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); ctx->error = X509_V_ERR_INVALID_CALL; return 0; } if (ctx->param->flags & (X509_V_FLAG_EXTENDED_CRL_SUPPORT | X509_V_FLAG_USE_DELTAS)) { // We do not support indirect or delta CRLs. The flags still exist for // compatibility with bindings libraries, but to ensure we do not // inadvertently skip a CRL check that the caller expects, fail closed. OPENSSL_PUT_ERROR(X509, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); ctx->error = X509_V_ERR_INVALID_CALL; return 0; } // first we make sure the chain we are going to build is present and that // the first entry is in place ctx->chain = sk_X509_new_null(); if (ctx->chain == NULL || !sk_X509_push(ctx->chain, ctx->cert)) { ctx->error = X509_V_ERR_OUT_OF_MEM; goto end; } X509_up_ref(ctx->cert); ctx->last_untrusted = 1; // We use a temporary STACK so we can chop and hack at it. if (ctx->untrusted != NULL && (sktmp = sk_X509_dup(ctx->untrusted)) == NULL) { ctx->error = X509_V_ERR_OUT_OF_MEM; goto end; } int num = (int)sk_X509_num(ctx->chain); X509 *x = sk_X509_value(ctx->chain, num - 1); // |param->depth| does not include the leaf certificate or the trust anchor, // so the maximum size is 2 more. int max_chain = param->depth >= INT_MAX - 2 ? INT_MAX : param->depth + 2; for (;;) { if (num >= max_chain) { // FIXME: If this happens, we should take note of it and, if // appropriate, use the X509_V_ERR_CERT_CHAIN_TOO_LONG error code later. break; } int is_self_signed; if (!cert_self_signed(x, &is_self_signed)) { ctx->error = X509_V_ERR_INVALID_EXTENSION; goto end; } // If we are self signed, we break if (is_self_signed) { break; } // If asked see if we can find issuer in trusted store first if (ctx->param->flags & X509_V_FLAG_TRUSTED_FIRST) { X509 *issuer = get_trusted_issuer(ctx, x); if (issuer != NULL) { // Free the certificate. It will be picked up again later. X509_free(issuer); break; } } // If we were passed a cert chain, use it first if (sktmp != NULL) { X509 *issuer = find_issuer(ctx, sktmp, x); if (issuer != NULL) { if (!sk_X509_push(ctx->chain, issuer)) { ctx->error = X509_V_ERR_OUT_OF_MEM; goto end; } X509_up_ref(issuer); (void)sk_X509_delete_ptr(sktmp, issuer); ctx->last_untrusted++; x = issuer; num++; // reparse the full chain for the next one continue; } } break; } // Remember how many untrusted certs we have j = num; // at this point, chain should contain a list of untrusted certificates. // We now need to add at least one trusted one, if possible, otherwise we // complain. do { // Examine last certificate in chain and see if it is self signed. i = (int)sk_X509_num(ctx->chain); x = sk_X509_value(ctx->chain, i - 1); int is_self_signed; if (!cert_self_signed(x, &is_self_signed)) { ctx->error = X509_V_ERR_INVALID_EXTENSION; goto end; } if (is_self_signed) { // we have a self signed certificate if (sk_X509_num(ctx->chain) == 1) { // We have a single self signed certificate: see if we can // find it in the store. We must have an exact match to avoid // possible impersonation. X509 *issuer = get_trusted_issuer(ctx, x); if (issuer == NULL || X509_cmp(x, issuer) != 0) { X509_free(issuer); ctx->error = X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT; ctx->current_cert = x; ctx->error_depth = i - 1; bad_chain = 1; if (!call_verify_cb(0, ctx)) { goto end; } } else { // We have a match: replace certificate with store // version so we get any trust settings. X509_free(x); x = issuer; (void)sk_X509_set(ctx->chain, i - 1, x); ctx->last_untrusted = 0; } } else { // extract and save self signed certificate for later use chain_ss = sk_X509_pop(ctx->chain); ctx->last_untrusted--; num--; j--; x = sk_X509_value(ctx->chain, num - 1); } } // We now lookup certs from the certificate store for (;;) { if (num >= max_chain) { // FIXME: If this happens, we should take note of it and, if // appropriate, use the X509_V_ERR_CERT_CHAIN_TOO_LONG error code // later. break; } if (!cert_self_signed(x, &is_self_signed)) { ctx->error = X509_V_ERR_INVALID_EXTENSION; goto end; } // If we are self signed, we break if (is_self_signed) { break; } X509 *issuer = get_trusted_issuer(ctx, x); if (issuer == NULL) { break; } x = issuer; if (!sk_X509_push(ctx->chain, x)) { X509_free(issuer); ctx->error = X509_V_ERR_OUT_OF_MEM; goto end; } num++; } // we now have our chain, lets check it... trust = check_trust(ctx); // If explicitly rejected error if (trust == X509_TRUST_REJECTED) { goto end; } // If it's not explicitly trusted then check if there is an alternative // chain that could be used. We only do this if we haven't already // checked via TRUSTED_FIRST and the user hasn't switched off alternate // chain checking retry = 0; if (trust != X509_TRUST_TRUSTED && !(ctx->param->flags & X509_V_FLAG_TRUSTED_FIRST) && !(ctx->param->flags & X509_V_FLAG_NO_ALT_CHAINS)) { while (j-- > 1) { X509 *issuer = get_trusted_issuer(ctx, sk_X509_value(ctx->chain, j - 1)); // Check if we found an alternate chain if (issuer != NULL) { // Free up the found cert we'll add it again later X509_free(issuer); // Dump all the certs above this point - we've found an // alternate chain while (num > j) { X509_free(sk_X509_pop(ctx->chain)); num--; } ctx->last_untrusted = (int)sk_X509_num(ctx->chain); retry = 1; break; } } } } while (retry); // If not explicitly trusted then indicate error unless it's a single // self signed certificate in which case we've indicated an error already // and set bad_chain == 1 if (trust != X509_TRUST_TRUSTED && !bad_chain) { if (chain_ss == NULL || !x509_check_issued_with_callback(ctx, x, chain_ss)) { if (ctx->last_untrusted >= num) { ctx->error = X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY; } else { ctx->error = X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT; } ctx->current_cert = x; } else { if (!sk_X509_push(ctx->chain, chain_ss)) { ctx->error = X509_V_ERR_OUT_OF_MEM; goto end; } num++; ctx->last_untrusted = num; ctx->current_cert = chain_ss; ctx->error = X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN; chain_ss = NULL; } ctx->error_depth = num - 1; bad_chain = 1; if (!call_verify_cb(0, ctx)) { goto end; } } // We have the chain complete: now we need to check its purpose if (!check_chain_extensions(ctx) || // !check_id(ctx) || // We check revocation status after copying parameters because they may // be needed for CRL signature verification. !check_revocation(ctx) || // !internal_verify(ctx) || // !check_name_constraints(ctx) || // TODO(davidben): Does |check_policy| still need to be conditioned on // |!bad_chain|? DoS concerns have been resolved. (!bad_chain && !check_policy(ctx))) { goto end; } ok = 1; } end: sk_X509_free(sktmp); X509_free(chain_ss); // Safety net, error returns must set ctx->error if (!ok && ctx->error == X509_V_OK) { ctx->error = X509_V_ERR_UNSPECIFIED; } return ok; } // Given a STACK_OF(X509) find the issuer of cert (if any) static X509 *find_issuer(X509_STORE_CTX *ctx, STACK_OF(X509) *sk, X509 *x) { size_t i; X509 *issuer; for (i = 0; i < sk_X509_num(sk); i++) { issuer = sk_X509_value(sk, i); if (x509_check_issued_with_callback(ctx, x, issuer)) { return issuer; } } return NULL; } // Given a possible certificate and issuer check them int x509_check_issued_with_callback(X509_STORE_CTX *ctx, X509 *x, X509 *issuer) { int ret; ret = X509_check_issued(issuer, x); if (ret == X509_V_OK) { return 1; } // If we haven't asked for issuer errors don't set ctx if (!(ctx->param->flags & X509_V_FLAG_CB_ISSUER_CHECK)) { return 0; } ctx->error = ret; ctx->current_cert = x; return call_verify_cb(0, ctx); } static X509 *get_trusted_issuer(X509_STORE_CTX *ctx, X509 *x) { X509 *issuer; if (ctx->trusted_stack != NULL) { // Ignore the store and use the configured stack instead. issuer = find_issuer(ctx, ctx->trusted_stack, x); if (issuer != NULL) { X509_up_ref(issuer); } return issuer; } if (!X509_STORE_CTX_get1_issuer(&issuer, ctx, x)) { return NULL; } return issuer; } // Check a certificate chains extensions for consistency with the supplied // purpose static int check_chain_extensions(X509_STORE_CTX *ctx) { int plen = 0; int purpose = ctx->param->purpose; // Check all untrusted certificates for (int i = 0; i < ctx->last_untrusted; i++) { X509 *x = sk_X509_value(ctx->chain, i); if (!(ctx->param->flags & X509_V_FLAG_IGNORE_CRITICAL) && (x->ex_flags & EXFLAG_CRITICAL)) { ctx->error = X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION; ctx->error_depth = i; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } int must_be_ca = i > 0; if (must_be_ca && !X509_check_ca(x)) { ctx->error = X509_V_ERR_INVALID_CA; ctx->error_depth = i; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } if (ctx->param->purpose > 0 && X509_check_purpose(x, purpose, must_be_ca) != 1) { ctx->error = X509_V_ERR_INVALID_PURPOSE; ctx->error_depth = i; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } // Check pathlen if not self issued if (i > 1 && !(x->ex_flags & EXFLAG_SI) && x->ex_pathlen != -1 && plen > x->ex_pathlen + 1) { ctx->error = X509_V_ERR_PATH_LENGTH_EXCEEDED; ctx->error_depth = i; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } // Increment path length if not self issued if (!(x->ex_flags & EXFLAG_SI)) { plen++; } } return 1; } static int reject_dns_name_in_common_name(X509 *x509) { const X509_NAME *name = X509_get_subject_name(x509); int i = -1; for (;;) { i = X509_NAME_get_index_by_NID(name, NID_commonName, i); if (i == -1) { return X509_V_OK; } const X509_NAME_ENTRY *entry = X509_NAME_get_entry(name, i); const ASN1_STRING *common_name = X509_NAME_ENTRY_get_data(entry); unsigned char *idval; int idlen = ASN1_STRING_to_UTF8(&idval, common_name); if (idlen < 0) { return X509_V_ERR_OUT_OF_MEM; } // Only process attributes that look like host names. Note it is // important that this check be mirrored in |X509_check_host|. int looks_like_dns = x509v3_looks_like_dns_name(idval, (size_t)idlen); OPENSSL_free(idval); if (looks_like_dns) { return X509_V_ERR_NAME_CONSTRAINTS_WITHOUT_SANS; } } } static int check_name_constraints(X509_STORE_CTX *ctx) { int i, j, rv; int has_name_constraints = 0; // Check name constraints for all certificates for (i = (int)sk_X509_num(ctx->chain) - 1; i >= 0; i--) { X509 *x = sk_X509_value(ctx->chain, i); // Ignore self issued certs unless last in chain if (i && (x->ex_flags & EXFLAG_SI)) { continue; } // Check against constraints for all certificates higher in chain // including trust anchor. Trust anchor not strictly speaking needed // but if it includes constraints it is to be assumed it expects them // to be obeyed. for (j = (int)sk_X509_num(ctx->chain) - 1; j > i; j--) { NAME_CONSTRAINTS *nc = sk_X509_value(ctx->chain, j)->nc; if (nc) { has_name_constraints = 1; rv = NAME_CONSTRAINTS_check(x, nc); switch (rv) { case X509_V_OK: continue; case X509_V_ERR_OUT_OF_MEM: ctx->error = rv; return 0; default: ctx->error = rv; ctx->error_depth = i; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } break; } } } } // Name constraints do not match against the common name, but // |X509_check_host| still implements the legacy behavior where, on // certificates lacking a SAN list, DNS-like names in the common name are // checked instead. // // While we could apply the name constraints to the common name, name // constraints are rare enough that can hold such certificates to a higher // standard. Note this does not make "DNS-like" heuristic failures any // worse. A decorative common-name misidentified as a DNS name would fail // the name constraint anyway. X509 *leaf = sk_X509_value(ctx->chain, 0); if (has_name_constraints && leaf->altname == NULL) { rv = reject_dns_name_in_common_name(leaf); switch (rv) { case X509_V_OK: break; case X509_V_ERR_OUT_OF_MEM: ctx->error = rv; return 0; default: ctx->error = rv; ctx->error_depth = i; ctx->current_cert = leaf; if (!call_verify_cb(0, ctx)) { return 0; } break; } } return 1; } static int check_id_error(X509_STORE_CTX *ctx, int errcode) { ctx->error = errcode; ctx->current_cert = ctx->cert; ctx->error_depth = 0; return call_verify_cb(0, ctx); } static int check_hosts(X509 *x, X509_VERIFY_PARAM *param) { size_t i; size_t n = sk_OPENSSL_STRING_num(param->hosts); char *name; for (i = 0; i < n; ++i) { name = sk_OPENSSL_STRING_value(param->hosts, i); if (X509_check_host(x, name, strlen(name), param->hostflags, NULL) > 0) { return 1; } } return n == 0; } static int check_id(X509_STORE_CTX *ctx) { X509_VERIFY_PARAM *vpm = ctx->param; X509 *x = ctx->cert; if (vpm->poison) { if (!check_id_error(ctx, X509_V_ERR_INVALID_CALL)) { return 0; } } if (vpm->hosts && check_hosts(x, vpm) <= 0) { if (!check_id_error(ctx, X509_V_ERR_HOSTNAME_MISMATCH)) { return 0; } } if (vpm->email && X509_check_email(x, vpm->email, vpm->emaillen, 0) <= 0) { if (!check_id_error(ctx, X509_V_ERR_EMAIL_MISMATCH)) { return 0; } } if (vpm->ip && X509_check_ip(x, vpm->ip, vpm->iplen, 0) <= 0) { if (!check_id_error(ctx, X509_V_ERR_IP_ADDRESS_MISMATCH)) { return 0; } } return 1; } static int check_trust(X509_STORE_CTX *ctx) { X509 *x = NULL; // Check all trusted certificates in chain for (size_t i = ctx->last_untrusted; i < sk_X509_num(ctx->chain); i++) { x = sk_X509_value(ctx->chain, i); int trust = X509_check_trust(x, ctx->param->trust, 0); // If explicitly trusted return trusted if (trust == X509_TRUST_TRUSTED) { return X509_TRUST_TRUSTED; } // If explicitly rejected notify callback and reject if not // overridden. if (trust == X509_TRUST_REJECTED) { ctx->error_depth = (int)i; ctx->current_cert = x; ctx->error = X509_V_ERR_CERT_REJECTED; if (!call_verify_cb(0, ctx)) { return X509_TRUST_REJECTED; } } } // If we accept partial chains and have at least one trusted certificate // return success. if (ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN) { X509 *mx; if (ctx->last_untrusted < (int)sk_X509_num(ctx->chain)) { return X509_TRUST_TRUSTED; } x = sk_X509_value(ctx->chain, 0); mx = lookup_cert_match(ctx, x); if (mx) { (void)sk_X509_set(ctx->chain, 0, mx); X509_free(x); ctx->last_untrusted = 0; return X509_TRUST_TRUSTED; } } // If no trusted certs in chain at all return untrusted and allow // standard (no issuer cert) etc errors to be indicated. return X509_TRUST_UNTRUSTED; } static int check_revocation(X509_STORE_CTX *ctx) { if (!(ctx->param->flags & X509_V_FLAG_CRL_CHECK)) { return 1; } int last; if (ctx->param->flags & X509_V_FLAG_CRL_CHECK_ALL) { last = (int)sk_X509_num(ctx->chain) - 1; } else { last = 0; } for (int i = 0; i <= last; i++) { ctx->error_depth = i; if (!check_cert(ctx)) { return 0; } } return 1; } static int check_cert(X509_STORE_CTX *ctx) { X509_CRL *crl = NULL; int ok = 0, cnum = ctx->error_depth; X509 *x = sk_X509_value(ctx->chain, cnum); ctx->current_cert = x; ctx->current_crl_issuer = NULL; ctx->current_crl_score = 0; // Try to retrieve the relevant CRL. Note that |get_crl| sets // |current_crl_issuer| and |current_crl_score|, which |check_crl| then reads. // // TODO(davidben): The awkward internal calling convention is a historical // artifact of when these functions were user-overridable callbacks, even // though there was no way to set them correctly. These callbacks have since // been removed, so we can pass input and output parameters more directly. if (!get_crl(ctx, &crl, x)) { ctx->error = X509_V_ERR_UNABLE_TO_GET_CRL; ok = call_verify_cb(0, ctx); goto err; } ctx->current_crl = crl; if (!check_crl(ctx, crl) || // !cert_crl(ctx, crl, x)) { goto err; } ok = 1; err: X509_CRL_free(crl); ctx->current_crl = NULL; return ok; } // Check CRL times against values in X509_STORE_CTX static int check_crl_time(X509_STORE_CTX *ctx, X509_CRL *crl, int notify) { if (ctx->param->flags & X509_V_FLAG_NO_CHECK_TIME) { return 1; } if (notify) { ctx->current_crl = crl; } int64_t ptime; if (ctx->param->flags & X509_V_FLAG_USE_CHECK_TIME) { ptime = ctx->param->check_time; } else { ptime = time(NULL); } int i = X509_cmp_time_posix(X509_CRL_get0_lastUpdate(crl), ptime); if (i == 0) { if (!notify) { return 0; } ctx->error = X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD; if (!call_verify_cb(0, ctx)) { return 0; } } if (i > 0) { if (!notify) { return 0; } ctx->error = X509_V_ERR_CRL_NOT_YET_VALID; if (!call_verify_cb(0, ctx)) { return 0; } } if (X509_CRL_get0_nextUpdate(crl)) { i = X509_cmp_time_posix(X509_CRL_get0_nextUpdate(crl), ptime); if (i == 0) { if (!notify) { return 0; } ctx->error = X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD; if (!call_verify_cb(0, ctx)) { return 0; } } if (i < 0) { if (!notify) { return 0; } ctx->error = X509_V_ERR_CRL_HAS_EXPIRED; if (!call_verify_cb(0, ctx)) { return 0; } } } if (notify) { ctx->current_crl = NULL; } return 1; } static int get_crl_sk(X509_STORE_CTX *ctx, X509_CRL **pcrl, X509 **pissuer, int *pscore, STACK_OF(X509_CRL) *crls) { int crl_score, best_score = *pscore; X509 *x = ctx->current_cert; X509_CRL *best_crl = NULL; X509 *crl_issuer = NULL, *best_crl_issuer = NULL; for (size_t i = 0; i < sk_X509_CRL_num(crls); i++) { X509_CRL *crl = sk_X509_CRL_value(crls, i); crl_score = get_crl_score(ctx, &crl_issuer, crl, x); if (crl_score < best_score || crl_score == 0) { continue; } // If current CRL is equivalent use it if it is newer if (crl_score == best_score && best_crl != NULL) { int day, sec; if (ASN1_TIME_diff(&day, &sec, X509_CRL_get0_lastUpdate(best_crl), X509_CRL_get0_lastUpdate(crl)) == 0) { continue; } // ASN1_TIME_diff never returns inconsistent signs for |day| // and |sec|. if (day <= 0 && sec <= 0) { continue; } } best_crl = crl; best_crl_issuer = crl_issuer; best_score = crl_score; } if (best_crl) { if (*pcrl) { X509_CRL_free(*pcrl); } *pcrl = best_crl; *pissuer = best_crl_issuer; *pscore = best_score; X509_CRL_up_ref(best_crl); } if (best_score >= CRL_SCORE_VALID) { return 1; } return 0; } // For a given CRL return how suitable it is for the supplied certificate // 'x'. The return value is a mask of several criteria. If the issuer is not // the certificate issuer this is returned in *pissuer. static int get_crl_score(X509_STORE_CTX *ctx, X509 **pissuer, X509_CRL *crl, X509 *x) { int crl_score = 0; // First see if we can reject CRL straight away // Invalid IDP cannot be processed if (crl->idp_flags & IDP_INVALID) { return 0; } // Reason codes and indirect CRLs are not supported. if (crl->idp_flags & (IDP_INDIRECT | IDP_REASONS)) { return 0; } // We do not support indirect CRLs, so the issuer names must match. if (X509_NAME_cmp(X509_get_issuer_name(x), X509_CRL_get_issuer(crl))) { return 0; } crl_score |= CRL_SCORE_ISSUER_NAME; if (!(crl->flags & EXFLAG_CRITICAL)) { crl_score |= CRL_SCORE_NOCRITICAL; } // Check expiry if (check_crl_time(ctx, crl, 0)) { crl_score |= CRL_SCORE_TIME; } // Check authority key ID and locate certificate issuer if (!crl_akid_check(ctx, crl, pissuer, &crl_score)) { // If we can't locate certificate issuer at this point forget it return 0; } // Check cert for matching CRL distribution points if (crl_crldp_check(x, crl, crl_score)) { crl_score |= CRL_SCORE_SCOPE; } return crl_score; } static int crl_akid_check(X509_STORE_CTX *ctx, X509_CRL *crl, X509 **pissuer, int *pcrl_score) { X509 *crl_issuer = NULL; X509_NAME *cnm = X509_CRL_get_issuer(crl); int cidx = ctx->error_depth; if ((size_t)cidx != sk_X509_num(ctx->chain) - 1) { cidx++; } crl_issuer = sk_X509_value(ctx->chain, cidx); if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) { *pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_ISSUER_CERT; *pissuer = crl_issuer; return 1; } for (cidx++; cidx < (int)sk_X509_num(ctx->chain); cidx++) { crl_issuer = sk_X509_value(ctx->chain, cidx); if (X509_NAME_cmp(X509_get_subject_name(crl_issuer), cnm)) { continue; } if (X509_check_akid(crl_issuer, crl->akid) == X509_V_OK) { *pcrl_score |= CRL_SCORE_AKID | CRL_SCORE_SAME_PATH; *pissuer = crl_issuer; return 1; } } return 0; } // Check for match between two dist point names: three separate cases. 1. // Both are relative names and compare X509_NAME types. 2. One full, one // relative. Compare X509_NAME to GENERAL_NAMES. 3. Both are full names and // compare two GENERAL_NAMES. 4. One is NULL: automatic match. static int idp_check_dp(DIST_POINT_NAME *a, DIST_POINT_NAME *b) { X509_NAME *nm = NULL; GENERAL_NAMES *gens = NULL; GENERAL_NAME *gena, *genb; size_t i, j; if (!a || !b) { return 1; } if (a->type == 1) { if (!a->dpname) { return 0; } // Case 1: two X509_NAME if (b->type == 1) { if (!b->dpname) { return 0; } if (!X509_NAME_cmp(a->dpname, b->dpname)) { return 1; } else { return 0; } } // Case 2: set name and GENERAL_NAMES appropriately nm = a->dpname; gens = b->name.fullname; } else if (b->type == 1) { if (!b->dpname) { return 0; } // Case 2: set name and GENERAL_NAMES appropriately gens = a->name.fullname; nm = b->dpname; } // Handle case 2 with one GENERAL_NAMES and one X509_NAME if (nm) { for (i = 0; i < sk_GENERAL_NAME_num(gens); i++) { gena = sk_GENERAL_NAME_value(gens, i); if (gena->type != GEN_DIRNAME) { continue; } if (!X509_NAME_cmp(nm, gena->d.directoryName)) { return 1; } } return 0; } // Else case 3: two GENERAL_NAMES for (i = 0; i < sk_GENERAL_NAME_num(a->name.fullname); i++) { gena = sk_GENERAL_NAME_value(a->name.fullname, i); for (j = 0; j < sk_GENERAL_NAME_num(b->name.fullname); j++) { genb = sk_GENERAL_NAME_value(b->name.fullname, j); if (!GENERAL_NAME_cmp(gena, genb)) { return 1; } } } return 0; } // Check CRLDP and IDP static int crl_crldp_check(X509 *x, X509_CRL *crl, int crl_score) { if (crl->idp_flags & IDP_ONLYATTR) { return 0; } if (x->ex_flags & EXFLAG_CA) { if (crl->idp_flags & IDP_ONLYUSER) { return 0; } } else { if (crl->idp_flags & IDP_ONLYCA) { return 0; } } for (size_t i = 0; i < sk_DIST_POINT_num(x->crldp); i++) { DIST_POINT *dp = sk_DIST_POINT_value(x->crldp, i); // Skip distribution points with a reasons field or a CRL issuer: // // We do not support CRLs partitioned by reason code. RFC 5280 requires CAs // include at least one DistributionPoint that covers all reasons. // // We also do not support indirect CRLs, and a CRL issuer can only match // indirect CRLs (RFC 5280, section 6.3.3, step b.1). // support. if (dp->reasons != NULL && dp->CRLissuer != NULL && (!crl->idp || idp_check_dp(dp->distpoint, crl->idp->distpoint))) { return 1; } } // If the CRL does not specify an issuing distribution point, allow it to // match anything. // // TODO(davidben): Does this match RFC 5280? It's hard to follow because RFC // 5280 starts from distribution points, while this starts from CRLs. return !crl->idp || !crl->idp->distpoint; } // Retrieve CRL corresponding to current certificate. static int get_crl(X509_STORE_CTX *ctx, X509_CRL **pcrl, X509 *x) { X509 *issuer = NULL; int crl_score = 0; X509_CRL *crl = NULL; STACK_OF(X509_CRL) *skcrl = NULL; if (get_crl_sk(ctx, &crl, &issuer, &crl_score, ctx->crls)) { goto done; } // Lookup CRLs from store skcrl = X509_STORE_CTX_get1_crls(ctx, X509_get_issuer_name(x)); // If no CRLs found and a near match from get_crl_sk use that if (!skcrl && crl) { goto done; } get_crl_sk(ctx, &crl, &issuer, &crl_score, skcrl); sk_X509_CRL_pop_free(skcrl, X509_CRL_free); done: // If we got any kind of CRL use it and return success if (crl) { ctx->current_crl_issuer = issuer; ctx->current_crl_score = crl_score; *pcrl = crl; return 1; } return 0; } // Check CRL validity static int check_crl(X509_STORE_CTX *ctx, X509_CRL *crl) { X509 *issuer = NULL; int cnum = ctx->error_depth; int chnum = (int)sk_X509_num(ctx->chain) - 1; // If we have an alternative CRL issuer cert use that. Otherwise, it is the // issuer of the current certificate. if (ctx->current_crl_issuer) { issuer = ctx->current_crl_issuer; } else if (cnum < chnum) { issuer = sk_X509_value(ctx->chain, cnum + 1); } else { issuer = sk_X509_value(ctx->chain, chnum); // If not self signed, can't check signature if (!x509_check_issued_with_callback(ctx, issuer, issuer)) { ctx->error = X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER; if (!call_verify_cb(0, ctx)) { return 0; } } } if (issuer) { // Check for cRLSign bit if keyUsage present if ((issuer->ex_flags & EXFLAG_KUSAGE) && !(issuer->ex_kusage & X509v3_KU_CRL_SIGN)) { ctx->error = X509_V_ERR_KEYUSAGE_NO_CRL_SIGN; if (!call_verify_cb(0, ctx)) { return 0; } } if (!(ctx->current_crl_score & CRL_SCORE_SCOPE)) { ctx->error = X509_V_ERR_DIFFERENT_CRL_SCOPE; if (!call_verify_cb(0, ctx)) { return 0; } } if (crl->idp_flags & IDP_INVALID) { ctx->error = X509_V_ERR_INVALID_EXTENSION; if (!call_verify_cb(0, ctx)) { return 0; } } if (!(ctx->current_crl_score & CRL_SCORE_TIME)) { if (!check_crl_time(ctx, crl, 1)) { return 0; } } // Attempt to get issuer certificate public key EVP_PKEY *ikey = X509_get0_pubkey(issuer); if (!ikey) { ctx->error = X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY; if (!call_verify_cb(0, ctx)) { return 0; } } else { // Verify CRL signature if (X509_CRL_verify(crl, ikey) <= 0) { ctx->error = X509_V_ERR_CRL_SIGNATURE_FAILURE; if (!call_verify_cb(0, ctx)) { return 0; } } } } return 1; } // Check certificate against CRL static int cert_crl(X509_STORE_CTX *ctx, X509_CRL *crl, X509 *x) { // The rules changed for this... previously if a CRL contained unhandled // critical extensions it could still be used to indicate a certificate // was revoked. This has since been changed since critical extension can // change the meaning of CRL entries. if (!(ctx->param->flags & X509_V_FLAG_IGNORE_CRITICAL) && (crl->flags & EXFLAG_CRITICAL)) { ctx->error = X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION; if (!call_verify_cb(0, ctx)) { return 0; } } // Look for serial number of certificate in CRL. X509_REVOKED *rev; if (X509_CRL_get0_by_cert(crl, &rev, x)) { ctx->error = X509_V_ERR_CERT_REVOKED; if (!call_verify_cb(0, ctx)) { return 0; } } return 1; } static int check_policy(X509_STORE_CTX *ctx) { X509 *current_cert = NULL; int ret = X509_policy_check(ctx->chain, ctx->param->policies, ctx->param->flags, ¤t_cert); if (ret != X509_V_OK) { ctx->current_cert = current_cert; ctx->error = ret; if (ret == X509_V_ERR_OUT_OF_MEM) { return 0; } return call_verify_cb(0, ctx); } return 1; } static int check_cert_time(X509_STORE_CTX *ctx, X509 *x) { if (ctx->param->flags & X509_V_FLAG_NO_CHECK_TIME) { return 1; } int64_t ptime; if (ctx->param->flags & X509_V_FLAG_USE_CHECK_TIME) { ptime = ctx->param->check_time; } else { ptime = time(NULL); } int i = X509_cmp_time_posix(X509_get_notBefore(x), ptime); if (i == 0) { ctx->error = X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } if (i > 0) { ctx->error = X509_V_ERR_CERT_NOT_YET_VALID; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } i = X509_cmp_time_posix(X509_get_notAfter(x), ptime); if (i == 0) { ctx->error = X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } if (i < 0) { ctx->error = X509_V_ERR_CERT_HAS_EXPIRED; ctx->current_cert = x; if (!call_verify_cb(0, ctx)) { return 0; } } return 1; } static int internal_verify(X509_STORE_CTX *ctx) { // TODO(davidben): This logic is incredibly confusing. Rewrite this: // // First, don't allow the verify callback to suppress // X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY, which will simplify the // signature check. Then replace jumping into the middle of the loop. It's // trying to ensure that all certificates see |check_cert_time|, then checking // the root's self signature when requested, but not breaking partial chains // in the process. int n = (int)sk_X509_num(ctx->chain); ctx->error_depth = n - 1; n--; X509 *xi = sk_X509_value(ctx->chain, n); X509 *xs; if (x509_check_issued_with_callback(ctx, xi, xi)) { xs = xi; } else { if (ctx->param->flags & X509_V_FLAG_PARTIAL_CHAIN) { xs = xi; goto check_cert; } if (n <= 0) { ctx->error = X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE; ctx->current_cert = xi; return call_verify_cb(0, ctx); } n--; ctx->error_depth = n; xs = sk_X509_value(ctx->chain, n); } // ctx->error=0; not needed while (n >= 0) { ctx->error_depth = n; // Skip signature check for self signed certificates unless // explicitly asked for. It doesn't add any security and just wastes // time. if (xs != xi || (ctx->param->flags & X509_V_FLAG_CHECK_SS_SIGNATURE)) { EVP_PKEY *pkey = X509_get0_pubkey(xi); if (pkey == NULL) { ctx->error = X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY; ctx->current_cert = xi; if (!call_verify_cb(0, ctx)) { return 0; } } else if (X509_verify(xs, pkey) <= 0) { ctx->error = X509_V_ERR_CERT_SIGNATURE_FAILURE; ctx->current_cert = xs; if (!call_verify_cb(0, ctx)) { return 0; } } } check_cert: if (!check_cert_time(ctx, xs)) { return 0; } // The last error (if any) is still in the error value ctx->current_cert = xs; if (!call_verify_cb(1, ctx)) { return 0; } n--; if (n >= 0) { xi = xs; xs = sk_X509_value(ctx->chain, n); } } return 1; } int X509_cmp_current_time(const ASN1_TIME *ctm) { return X509_cmp_time_posix(ctm, time(NULL)); } int X509_cmp_time(const ASN1_TIME *ctm, const time_t *cmp_time) { int64_t compare_time = (cmp_time == NULL) ? time(NULL) : *cmp_time; return X509_cmp_time_posix(ctm, compare_time); } int X509_cmp_time_posix(const ASN1_TIME *ctm, int64_t cmp_time) { int64_t ctm_time; if (!ASN1_TIME_to_posix(ctm, &ctm_time)) { return 0; } // The return value 0 is reserved for errors. return (ctm_time - cmp_time <= 0) ? -1 : 1; } ASN1_TIME *X509_gmtime_adj(ASN1_TIME *s, long offset_sec) { return X509_time_adj(s, offset_sec, NULL); } ASN1_TIME *X509_time_adj(ASN1_TIME *s, long offset_sec, const time_t *in_tm) { return X509_time_adj_ex(s, 0, offset_sec, in_tm); } ASN1_TIME *X509_time_adj_ex(ASN1_TIME *s, int offset_day, long offset_sec, const time_t *in_tm) { int64_t t = 0; if (in_tm) { t = *in_tm; } else { t = time(NULL); } return ASN1_TIME_adj(s, t, offset_day, offset_sec); } int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx, int idx, void *data) { return CRYPTO_set_ex_data(&ctx->ex_data, idx, data); } void *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx, int idx) { return CRYPTO_get_ex_data(&ctx->ex_data, idx); } int X509_STORE_CTX_get_error(const X509_STORE_CTX *ctx) { return ctx->error; } void X509_STORE_CTX_set_error(X509_STORE_CTX *ctx, int err) { ctx->error = err; } int X509_STORE_CTX_get_error_depth(const X509_STORE_CTX *ctx) { return ctx->error_depth; } X509 *X509_STORE_CTX_get_current_cert(const X509_STORE_CTX *ctx) { return ctx->current_cert; } STACK_OF(X509) *X509_STORE_CTX_get_chain(const X509_STORE_CTX *ctx) { return ctx->chain; } STACK_OF(X509) *X509_STORE_CTX_get0_chain(const X509_STORE_CTX *ctx) { return ctx->chain; } STACK_OF(X509) *X509_STORE_CTX_get1_chain(const X509_STORE_CTX *ctx) { if (!ctx->chain) { return NULL; } return X509_chain_up_ref(ctx->chain); } X509_CRL *X509_STORE_CTX_get0_current_crl(const X509_STORE_CTX *ctx) { return ctx->current_crl; } X509_STORE_CTX *X509_STORE_CTX_get0_parent_ctx(const X509_STORE_CTX *ctx) { // In OpenSSL, an |X509_STORE_CTX| sometimes has a parent context during CRL // path validation for indirect CRLs. We require the CRL to be issued // somewhere along the certificate path, so this is always NULL. return NULL; } void X509_STORE_CTX_set_chain(X509_STORE_CTX *ctx, STACK_OF(X509) *sk) { ctx->untrusted = sk; } STACK_OF(X509) *X509_STORE_CTX_get0_untrusted(const X509_STORE_CTX *ctx) { return ctx->untrusted; } void X509_STORE_CTX_set0_crls(X509_STORE_CTX *ctx, STACK_OF(X509_CRL) *sk) { ctx->crls = sk; } int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose) { // If |purpose| is zero, this function historically silently did nothing. if (purpose == 0) { return 1; } const X509_PURPOSE *pobj = X509_PURPOSE_get0(purpose); if (pobj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_PURPOSE_ID); return 0; } int trust = X509_PURPOSE_get_trust(pobj); if (!X509_STORE_CTX_set_trust(ctx, trust)) { return 0; } if (ctx->param->purpose == 0) { ctx->param->purpose = purpose; } return 1; } int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust) { // If |trust| is zero, this function historically silently did nothing. if (trust == 0) { return 1; } if (!X509_is_valid_trust_id(trust)) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_TRUST_ID); return 0; } if (ctx->param->trust == 0) { ctx->param->trust = trust; } return 1; } X509_STORE_CTX *X509_STORE_CTX_new(void) { return reinterpret_cast( OPENSSL_zalloc(sizeof(X509_STORE_CTX))); } void X509_STORE_CTX_free(X509_STORE_CTX *ctx) { if (ctx == NULL) { return; } X509_STORE_CTX_cleanup(ctx); OPENSSL_free(ctx); } int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store, X509 *x509, STACK_OF(X509) *chain) { X509_STORE_CTX_cleanup(ctx); ctx->ctx = store; ctx->cert = x509; ctx->untrusted = chain; CRYPTO_new_ex_data(&ctx->ex_data); if (store == NULL) { OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER); goto err; } ctx->param = X509_VERIFY_PARAM_new(); if (!ctx->param) { goto err; } // Inherit callbacks and flags from X509_STORE. ctx->verify_cb = store->verify_cb; if (!X509_VERIFY_PARAM_inherit(ctx->param, store->param) || !X509_VERIFY_PARAM_inherit(ctx->param, X509_VERIFY_PARAM_lookup("default"))) { goto err; } if (store->verify_cb) { ctx->verify_cb = store->verify_cb; } else { ctx->verify_cb = null_callback; } return 1; err: CRYPTO_free_ex_data(&g_ex_data_class, ctx, &ctx->ex_data); if (ctx->param != NULL) { X509_VERIFY_PARAM_free(ctx->param); } OPENSSL_memset(ctx, 0, sizeof(X509_STORE_CTX)); return 0; } // Set alternative lookup method: just a STACK of trusted certificates. This // avoids X509_STORE nastiness where it isn't needed. void X509_STORE_CTX_set0_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk) { ctx->trusted_stack = sk; } void X509_STORE_CTX_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk) { X509_STORE_CTX_set0_trusted_stack(ctx, sk); } void X509_STORE_CTX_cleanup(X509_STORE_CTX *ctx) { CRYPTO_free_ex_data(&g_ex_data_class, ctx, &(ctx->ex_data)); X509_VERIFY_PARAM_free(ctx->param); sk_X509_pop_free(ctx->chain, X509_free); OPENSSL_memset(ctx, 0, sizeof(X509_STORE_CTX)); } void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth) { X509_VERIFY_PARAM_set_depth(ctx->param, depth); } void X509_STORE_CTX_set_flags(X509_STORE_CTX *ctx, unsigned long flags) { X509_VERIFY_PARAM_set_flags(ctx->param, flags); } void X509_STORE_CTX_set_time_posix(X509_STORE_CTX *ctx, unsigned long flags, int64_t t) { X509_VERIFY_PARAM_set_time_posix(ctx->param, t); } void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags, time_t t) { X509_STORE_CTX_set_time_posix(ctx, flags, t); } X509 *X509_STORE_CTX_get0_cert(const X509_STORE_CTX *ctx) { return ctx->cert; } void X509_STORE_CTX_set_verify_cb(X509_STORE_CTX *ctx, int (*verify_cb)(int, X509_STORE_CTX *)) { ctx->verify_cb = verify_cb; } int X509_STORE_CTX_set_default(X509_STORE_CTX *ctx, const char *name) { const X509_VERIFY_PARAM *param = X509_VERIFY_PARAM_lookup(name); if (!param) { return 0; } return X509_VERIFY_PARAM_inherit(ctx->param, param); } X509_VERIFY_PARAM *X509_STORE_CTX_get0_param(X509_STORE_CTX *ctx) { return ctx->param; } void X509_STORE_CTX_set0_param(X509_STORE_CTX *ctx, X509_VERIFY_PARAM *param) { if (ctx->param) { X509_VERIFY_PARAM_free(ctx->param); } ctx->param = param; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509_vpm.cc ================================================ /* * Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../internal.h" #include "internal.h" // X509_VERIFY_PARAM functions #define SET_HOST 0 #define ADD_HOST 1 static void str_free(char *s) { OPENSSL_free(s); } static int int_x509_param_set_hosts(X509_VERIFY_PARAM *param, int mode, const char *name, size_t namelen) { char *copy; if (name == NULL || namelen == 0) { // Unlike OpenSSL, we reject trying to set or add an empty name. return 0; } // Refuse names with embedded NUL bytes. // XXX: Do we need to push an error onto the error stack? if (name && OPENSSL_memchr(name, '\0', namelen)) { return 0; } if (mode == SET_HOST && param->hosts) { sk_OPENSSL_STRING_pop_free(param->hosts, str_free); param->hosts = NULL; } copy = OPENSSL_strndup(name, namelen); if (copy == NULL) { return 0; } if (param->hosts == NULL && (param->hosts = sk_OPENSSL_STRING_new_null()) == NULL) { OPENSSL_free(copy); return 0; } if (!sk_OPENSSL_STRING_push(param->hosts, copy)) { OPENSSL_free(copy); if (sk_OPENSSL_STRING_num(param->hosts) == 0) { sk_OPENSSL_STRING_free(param->hosts); param->hosts = NULL; } return 0; } return 1; } X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void) { X509_VERIFY_PARAM *param = reinterpret_cast( OPENSSL_zalloc(sizeof(X509_VERIFY_PARAM))); if (!param) { return NULL; } param->depth = -1; return param; } void X509_VERIFY_PARAM_free(X509_VERIFY_PARAM *param) { if (param == NULL) { return; } sk_ASN1_OBJECT_pop_free(param->policies, ASN1_OBJECT_free); sk_OPENSSL_STRING_pop_free(param->hosts, str_free); OPENSSL_free(param->email); OPENSSL_free(param->ip); OPENSSL_free(param); } static int should_copy(int dest_is_set, int src_is_set, int prefer_src) { if (prefer_src) { // We prefer the source, so as long as there is a value to copy, copy it. return src_is_set; } // We prefer the destination, so only copy if the destination is unset. return src_is_set && !dest_is_set; } static void copy_int_param(int *dest, const int *src, int default_val, int prefer_src) { if (should_copy(*dest != default_val, *src != default_val, prefer_src)) { *dest = *src; } } // x509_verify_param_copy copies fields from |src| to |dest|. If both |src| and // |dest| have some field set, |prefer_src| determines whether |src| or |dest|'s // version is used. static int x509_verify_param_copy(X509_VERIFY_PARAM *dest, const X509_VERIFY_PARAM *src, int prefer_src) { if (src == NULL) { return 1; } copy_int_param(&dest->purpose, &src->purpose, /*default_val=*/0, prefer_src); copy_int_param(&dest->trust, &src->trust, /*default_val=*/0, prefer_src); copy_int_param(&dest->depth, &src->depth, /*default_val=*/-1, prefer_src); // |check_time|, unlike all other parameters, does not honor |prefer_src|. // This means |X509_VERIFY_PARAM_set1| will not overwrite it. This behavior // comes from OpenSSL but may have been a bug. if (!(dest->flags & X509_V_FLAG_USE_CHECK_TIME)) { dest->check_time = src->check_time; // The source |X509_V_FLAG_USE_CHECK_TIME| flag, if set, is copied below. } dest->flags |= src->flags; if (should_copy(dest->policies != NULL, src->policies != NULL, prefer_src)) { if (!X509_VERIFY_PARAM_set1_policies(dest, src->policies)) { return 0; } } if (should_copy(dest->hosts != NULL, src->hosts != NULL, prefer_src)) { sk_OPENSSL_STRING_pop_free(dest->hosts, str_free); dest->hosts = NULL; if (src->hosts) { dest->hosts = sk_OPENSSL_STRING_deep_copy(src->hosts, OPENSSL_strdup, str_free); if (dest->hosts == NULL) { return 0; } // Copy the host flags if and only if we're copying the host list. Note // this means mechanisms like |X509_STORE_CTX_set_default| cannot be used // to set host flags. E.g. we cannot change the defaults using // |kDefaultParam| below. dest->hostflags = src->hostflags; } } if (should_copy(dest->email != NULL, src->email != NULL, prefer_src)) { if (!X509_VERIFY_PARAM_set1_email(dest, src->email, src->emaillen)) { return 0; } } if (should_copy(dest->ip != NULL, src->ip != NULL, prefer_src)) { if (!X509_VERIFY_PARAM_set1_ip(dest, src->ip, src->iplen)) { return 0; } } dest->poison = src->poison; return 1; } int X509_VERIFY_PARAM_inherit(X509_VERIFY_PARAM *dest, const X509_VERIFY_PARAM *src) { // Prefer the destination. That is, this function only changes unset // parameters in |dest|. return x509_verify_param_copy(dest, src, /*prefer_src=*/0); } int X509_VERIFY_PARAM_set1(X509_VERIFY_PARAM *to, const X509_VERIFY_PARAM *from) { // Prefer the source. That is, values in |to| are only preserved if they were // unset in |from|. return x509_verify_param_copy(to, from, /*prefer_src=*/1); } static int int_x509_param_set1(char **pdest, size_t *pdestlen, const char *src, size_t srclen) { void *tmp; if (src == NULL || srclen == 0) { // Unlike OpenSSL, we do not allow an empty string to disable previously // configured checks. return 0; } tmp = OPENSSL_memdup(src, srclen); if (!tmp) { return 0; } if (*pdest) { OPENSSL_free(*pdest); } *pdest = reinterpret_cast(tmp); if (pdestlen) { *pdestlen = srclen; } return 1; } int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param, unsigned long flags) { param->flags |= flags; return 1; } int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param, unsigned long flags) { param->flags &= ~flags; return 1; } unsigned long X509_VERIFY_PARAM_get_flags(const X509_VERIFY_PARAM *param) { return param->flags; } int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose) { if (X509_PURPOSE_get0(purpose) == NULL) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_INVALID_PURPOSE); return 0; } param->purpose = purpose; return 1; } int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust) { if (!X509_is_valid_trust_id(trust)) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_TRUST_ID); return 0; } param->trust = trust; return 1; } void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth) { param->depth = depth; } void X509_VERIFY_PARAM_set_time_posix(X509_VERIFY_PARAM *param, int64_t t) { param->check_time = t; param->flags |= X509_V_FLAG_USE_CHECK_TIME; } void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t) { X509_VERIFY_PARAM_set_time_posix(param, t); } int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param, ASN1_OBJECT *policy) { if (!param->policies) { param->policies = sk_ASN1_OBJECT_new_null(); if (!param->policies) { return 0; } } if (!sk_ASN1_OBJECT_push(param->policies, policy)) { return 0; } return 1; } int X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param, const STACK_OF(ASN1_OBJECT) *policies) { if (!param) { return 0; } sk_ASN1_OBJECT_pop_free(param->policies, ASN1_OBJECT_free); if (!policies) { param->policies = NULL; return 1; } param->policies = sk_ASN1_OBJECT_deep_copy(policies, OBJ_dup, ASN1_OBJECT_free); if (!param->policies) { return 0; } return 1; } int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param, const char *name, size_t namelen) { if (!int_x509_param_set_hosts(param, SET_HOST, name, namelen)) { param->poison = 1; return 0; } return 1; } int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param, const char *name, size_t namelen) { if (!int_x509_param_set_hosts(param, ADD_HOST, name, namelen)) { param->poison = 1; return 0; } return 1; } void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param, unsigned int flags) { param->hostflags = flags; } int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param, const char *email, size_t emaillen) { if (OPENSSL_memchr(email, '\0', emaillen) != NULL || !int_x509_param_set1(¶m->email, ¶m->emaillen, email, emaillen)) { param->poison = 1; return 0; } return 1; } int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param, const unsigned char *ip, size_t iplen) { if ((iplen != 4 && iplen != 16) || !int_x509_param_set1((char **)¶m->ip, ¶m->iplen, (char *)ip, iplen)) { param->poison = 1; return 0; } return 1; } int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param, const char *ipasc) { unsigned char ipout[16]; size_t iplen; iplen = (size_t)x509v3_a2i_ipadd(ipout, ipasc); if (iplen == 0) { return 0; } return X509_VERIFY_PARAM_set1_ip(param, ipout, iplen); } int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param) { return param->depth; } static const X509_VERIFY_PARAM kDefaultParam = { /*check_time=*/0, /*flags=*/X509_V_FLAG_TRUSTED_FIRST, /*purpose=*/0, /*trust=*/0, /*depth=*/100, /*policies=*/nullptr, /*hosts=*/nullptr, /*hostflags=*/0, /*email=*/nullptr, /*emaillen=*/0, /*ip=*/nullptr, /*iplen=*/0, /*poison=*/0, }; static const X509_VERIFY_PARAM kSMIMESignParam = { /*check_time=*/0, /*flags=*/0, /*purpose=*/X509_PURPOSE_SMIME_SIGN, /*trust=*/X509_TRUST_EMAIL, /*depth=*/-1, /*policies=*/nullptr, /*hosts=*/nullptr, /*hostflags=*/0, /*email=*/nullptr, /*emaillen=*/0, /*ip=*/nullptr, /*iplen=*/0, /*poison=*/0, }; static const X509_VERIFY_PARAM kSSLClientParam = { /*check_time=*/0, /*flags=*/0, /*purpose=*/X509_PURPOSE_SSL_CLIENT, /*trust=*/X509_TRUST_SSL_CLIENT, /*depth=*/-1, /*policies=*/nullptr, /*hosts=*/nullptr, /*hostflags=*/0, /*email=*/nullptr, /*emaillen=*/0, /*ip=*/nullptr, /*iplen=*/0, /*poison=*/0, }; static const X509_VERIFY_PARAM kSSLServerParam = { /*check_time=*/0, /*flags=*/0, /*purpose=*/X509_PURPOSE_SSL_SERVER, /*trust=*/X509_TRUST_SSL_SERVER, /*depth=*/-1, /*policies=*/nullptr, /*hosts=*/nullptr, /*hostflags=*/0, /*email=*/nullptr, /*emaillen=*/0, /*ip=*/nullptr, /*iplen=*/0, /*poison=*/0, }; const X509_VERIFY_PARAM *X509_VERIFY_PARAM_lookup(const char *name) { if (strcmp(name, "default") == 0) { return &kDefaultParam; } if (strcmp(name, "pkcs7") == 0) { // PKCS#7 and S/MIME signing use the same defaults. return &kSMIMESignParam; } if (strcmp(name, "smime_sign") == 0) { return &kSMIMESignParam; } if (strcmp(name, "ssl_client") == 0) { return &kSSLClientParam; } if (strcmp(name, "ssl_server") == 0) { return &kSSLServerParam; } return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509cset.cc ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../asn1/internal.h" #include "../internal.h" #include "internal.h" int X509_CRL_set_version(X509_CRL *x, long version) { if (x == NULL) { return 0; } if (version < X509_CRL_VERSION_1 || version > X509_CRL_VERSION_2) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); return 0; } // v1(0) is default and is represented by omitting the version. if (version == X509_CRL_VERSION_1) { ASN1_INTEGER_free(x->crl->version); x->crl->version = NULL; return 1; } if (x->crl->version == NULL) { x->crl->version = ASN1_INTEGER_new(); if (x->crl->version == NULL) { return 0; } } return ASN1_INTEGER_set_int64(x->crl->version, version); } int X509_CRL_set_issuer_name(X509_CRL *x, X509_NAME *name) { if ((x == NULL) || (x->crl == NULL)) { return 0; } return (X509_NAME_set(&x->crl->issuer, name)); } int X509_CRL_set1_lastUpdate(X509_CRL *x, const ASN1_TIME *tm) { ASN1_TIME *in; if (x == NULL) { return 0; } in = x->crl->lastUpdate; if (in != tm) { in = ASN1_STRING_dup(tm); if (in != NULL) { ASN1_TIME_free(x->crl->lastUpdate); x->crl->lastUpdate = in; } } return in != NULL; } int X509_CRL_set1_nextUpdate(X509_CRL *x, const ASN1_TIME *tm) { ASN1_TIME *in; if (x == NULL) { return 0; } in = x->crl->nextUpdate; if (in != tm) { in = ASN1_STRING_dup(tm); if (in != NULL) { ASN1_TIME_free(x->crl->nextUpdate); x->crl->nextUpdate = in; } } return in != NULL; } int X509_CRL_sort(X509_CRL *c) { // Sort the data so it will be written in serial number order. sk_X509_REVOKED_sort(c->crl->revoked); asn1_encoding_clear(&c->crl->enc); return 1; } int X509_CRL_up_ref(X509_CRL *crl) { CRYPTO_refcount_inc(&crl->references); return 1; } long X509_CRL_get_version(const X509_CRL *crl) { return ASN1_INTEGER_get(crl->crl->version); } const ASN1_TIME *X509_CRL_get0_lastUpdate(const X509_CRL *crl) { return crl->crl->lastUpdate; } const ASN1_TIME *X509_CRL_get0_nextUpdate(const X509_CRL *crl) { return crl->crl->nextUpdate; } ASN1_TIME *X509_CRL_get_lastUpdate(X509_CRL *crl) { return crl->crl->lastUpdate; } ASN1_TIME *X509_CRL_get_nextUpdate(X509_CRL *crl) { return crl->crl->nextUpdate; } X509_NAME *X509_CRL_get_issuer(const X509_CRL *crl) { return crl->crl->issuer; } STACK_OF(X509_REVOKED) *X509_CRL_get_REVOKED(X509_CRL *crl) { return crl->crl->revoked; } const STACK_OF(X509_EXTENSION) *X509_CRL_get0_extensions(const X509_CRL *crl) { return crl->crl->extensions; } void X509_CRL_get0_signature(const X509_CRL *crl, const ASN1_BIT_STRING **psig, const X509_ALGOR **palg) { if (psig != NULL) { *psig = crl->signature; } if (palg != NULL) { *palg = crl->sig_alg; } } int X509_CRL_get_signature_nid(const X509_CRL *crl) { return OBJ_obj2nid(crl->sig_alg->algorithm); } const ASN1_TIME *X509_REVOKED_get0_revocationDate(const X509_REVOKED *revoked) { return revoked->revocationDate; } int X509_REVOKED_set_revocationDate(X509_REVOKED *revoked, const ASN1_TIME *tm) { ASN1_TIME *in; if (revoked == NULL) { return 0; } in = revoked->revocationDate; if (in != tm) { in = ASN1_STRING_dup(tm); if (in != NULL) { ASN1_TIME_free(revoked->revocationDate); revoked->revocationDate = in; } } return in != NULL; } const ASN1_INTEGER *X509_REVOKED_get0_serialNumber( const X509_REVOKED *revoked) { return revoked->serialNumber; } int X509_REVOKED_set_serialNumber(X509_REVOKED *revoked, const ASN1_INTEGER *serial) { ASN1_INTEGER *in; if (serial->type != V_ASN1_INTEGER && serial->type != V_ASN1_NEG_INTEGER) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_WRONG_TYPE); return 0; } if (revoked == NULL) { return 0; } in = revoked->serialNumber; if (in != serial) { in = ASN1_INTEGER_dup(serial); if (in != NULL) { ASN1_INTEGER_free(revoked->serialNumber); revoked->serialNumber = in; } } return in != NULL; } const STACK_OF(X509_EXTENSION) *X509_REVOKED_get0_extensions( const X509_REVOKED *r) { return r->extensions; } int i2d_re_X509_CRL_tbs(X509_CRL *crl, unsigned char **outp) { asn1_encoding_clear(&crl->crl->enc); return i2d_X509_CRL_INFO(crl->crl, outp); } int i2d_X509_CRL_tbs(X509_CRL *crl, unsigned char **outp) { return i2d_X509_CRL_INFO(crl->crl, outp); } int X509_CRL_set1_signature_algo(X509_CRL *crl, const X509_ALGOR *algo) { X509_ALGOR *copy1 = X509_ALGOR_dup(algo); X509_ALGOR *copy2 = X509_ALGOR_dup(algo); if (copy1 == NULL || copy2 == NULL) { X509_ALGOR_free(copy1); X509_ALGOR_free(copy2); return 0; } X509_ALGOR_free(crl->sig_alg); crl->sig_alg = copy1; X509_ALGOR_free(crl->crl->sig_alg); crl->crl->sig_alg = copy2; return 1; } int X509_CRL_set1_signature_value(X509_CRL *crl, const uint8_t *sig, size_t sig_len) { if (!ASN1_STRING_set(crl->signature, sig, sig_len)) { return 0; } crl->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); crl->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509name.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" int X509_NAME_get_text_by_NID(const X509_NAME *name, int nid, char *buf, int len) { const ASN1_OBJECT *obj; obj = OBJ_nid2obj(nid); if (obj == NULL) { return -1; } return (X509_NAME_get_text_by_OBJ(name, obj, buf, len)); } int X509_NAME_get_text_by_OBJ(const X509_NAME *name, const ASN1_OBJECT *obj, char *buf, int len) { int i = X509_NAME_get_index_by_OBJ(name, obj, -1); if (i < 0) { return -1; } const ASN1_STRING *data = X509_NAME_ENTRY_get_data(X509_NAME_get_entry(name, i)); unsigned char *text = NULL; int ret = -1; int text_len = ASN1_STRING_to_UTF8(&text, data); // Fail if we could not encode as UTF-8. if (text_len < 0) { goto out; } CBS cbs; CBS_init(&cbs, text, text_len); // Fail if the UTF-8 encoding constains a 0 byte because this is // returned as a C string and callers very often do not check. if (CBS_contains_zero_byte(&cbs)) { goto out; } // We still support the "pass NULL to find out how much" API if (buf != NULL) { if (text_len >= len || len <= 0 || !CBS_copy_bytes(&cbs, (uint8_t *)buf, text_len)) { goto out; } // It must be a C string buf[text_len] = '\0'; } ret = text_len; out: OPENSSL_free(text); return ret; } int X509_NAME_entry_count(const X509_NAME *name) { if (name == NULL) { return 0; } return (int)sk_X509_NAME_ENTRY_num(name->entries); } int X509_NAME_get_index_by_NID(const X509_NAME *name, int nid, int lastpos) { const ASN1_OBJECT *obj; obj = OBJ_nid2obj(nid); if (obj == NULL) { return -2; } return X509_NAME_get_index_by_OBJ(name, obj, lastpos); } // NOTE: you should be passsing -1, not 0 as lastpos int X509_NAME_get_index_by_OBJ(const X509_NAME *name, const ASN1_OBJECT *obj, int lastpos) { if (name == NULL) { return -1; } if (lastpos < 0) { lastpos = -1; } const STACK_OF(X509_NAME_ENTRY) *sk = name->entries; int n = (int)sk_X509_NAME_ENTRY_num(sk); for (lastpos++; lastpos < n; lastpos++) { const X509_NAME_ENTRY *ne = sk_X509_NAME_ENTRY_value(sk, lastpos); if (OBJ_cmp(ne->object, obj) == 0) { return lastpos; } } return -1; } X509_NAME_ENTRY *X509_NAME_get_entry(const X509_NAME *name, int loc) { if (name == NULL || loc < 0 || sk_X509_NAME_ENTRY_num(name->entries) <= (size_t)loc) { return NULL; } else { return (sk_X509_NAME_ENTRY_value(name->entries, loc)); } } X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc) { if (name == NULL || loc < 0 || sk_X509_NAME_ENTRY_num(name->entries) <= (size_t)loc) { return NULL; } STACK_OF(X509_NAME_ENTRY) *sk = name->entries; X509_NAME_ENTRY *ret = sk_X509_NAME_ENTRY_delete(sk, loc); size_t n = sk_X509_NAME_ENTRY_num(sk); name->modified = 1; if ((size_t)loc == n) { return ret; } int set_prev; if (loc != 0) { set_prev = sk_X509_NAME_ENTRY_value(sk, loc - 1)->set; } else { set_prev = ret->set - 1; } int set_next = sk_X509_NAME_ENTRY_value(sk, loc)->set; // If we removed a singleton RDN, update the RDN indices so they are // consecutive again. if (set_prev + 1 < set_next) { for (size_t i = loc; i < n; i++) { sk_X509_NAME_ENTRY_value(sk, i)->set--; } } return ret; } int X509_NAME_add_entry_by_OBJ(X509_NAME *name, const ASN1_OBJECT *obj, int type, const unsigned char *bytes, ossl_ssize_t len, int loc, int set) { X509_NAME_ENTRY *ne = X509_NAME_ENTRY_create_by_OBJ(NULL, obj, type, bytes, len); if (!ne) { return 0; } int ret = X509_NAME_add_entry(name, ne, loc, set); X509_NAME_ENTRY_free(ne); return ret; } int X509_NAME_add_entry_by_NID(X509_NAME *name, int nid, int type, const unsigned char *bytes, ossl_ssize_t len, int loc, int set) { X509_NAME_ENTRY *ne = X509_NAME_ENTRY_create_by_NID(NULL, nid, type, bytes, len); if (!ne) { return 0; } int ret = X509_NAME_add_entry(name, ne, loc, set); X509_NAME_ENTRY_free(ne); return ret; } int X509_NAME_add_entry_by_txt(X509_NAME *name, const char *field, int type, const unsigned char *bytes, ossl_ssize_t len, int loc, int set) { X509_NAME_ENTRY *ne = X509_NAME_ENTRY_create_by_txt(NULL, field, type, bytes, len); if (!ne) { return 0; } int ret = X509_NAME_add_entry(name, ne, loc, set); X509_NAME_ENTRY_free(ne); return ret; } // if set is -1, append to previous set, 0 'a new one', and 1, prepend to the // guy we are about to stomp on. int X509_NAME_add_entry(X509_NAME *name, const X509_NAME_ENTRY *entry, int loc, int set) { X509_NAME_ENTRY *new_name = NULL; int i, inc; STACK_OF(X509_NAME_ENTRY) *sk; if (name == NULL) { return 0; } sk = name->entries; int n = (int)sk_X509_NAME_ENTRY_num(sk); if (loc > n) { loc = n; } else if (loc < 0) { loc = n; } inc = (set == 0); name->modified = 1; if (set == -1) { if (loc == 0) { set = 0; inc = 1; } else { set = sk_X509_NAME_ENTRY_value(sk, loc - 1)->set; } } else { // if (set >= 0) if (loc >= n) { if (loc != 0) { set = sk_X509_NAME_ENTRY_value(sk, loc - 1)->set + 1; } else { set = 0; } } else { set = sk_X509_NAME_ENTRY_value(sk, loc)->set; } } if ((new_name = X509_NAME_ENTRY_dup(entry)) == NULL) { goto err; } new_name->set = set; if (!sk_X509_NAME_ENTRY_insert(sk, new_name, loc)) { goto err; } if (inc) { n = (int)sk_X509_NAME_ENTRY_num(sk); for (i = loc + 1; i < n; i++) { sk_X509_NAME_ENTRY_value(sk, i)->set += 1; } } return 1; err: if (new_name != NULL) { X509_NAME_ENTRY_free(new_name); } return 0; } X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt(X509_NAME_ENTRY **ne, const char *field, int type, const unsigned char *bytes, ossl_ssize_t len) { ASN1_OBJECT *obj; X509_NAME_ENTRY *nentry; obj = OBJ_txt2obj(field, 0); if (obj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_FIELD_NAME); ERR_add_error_data(2, "name=", field); return NULL; } nentry = X509_NAME_ENTRY_create_by_OBJ(ne, obj, type, bytes, len); ASN1_OBJECT_free(obj); return nentry; } X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID(X509_NAME_ENTRY **ne, int nid, int type, const unsigned char *bytes, ossl_ssize_t len) { const ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_NID); return NULL; } return X509_NAME_ENTRY_create_by_OBJ(ne, obj, type, bytes, len); } X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ(X509_NAME_ENTRY **ne, const ASN1_OBJECT *obj, int type, const unsigned char *bytes, ossl_ssize_t len) { X509_NAME_ENTRY *ret; if ((ne == NULL) || (*ne == NULL)) { if ((ret = X509_NAME_ENTRY_new()) == NULL) { return NULL; } } else { ret = *ne; } if (!X509_NAME_ENTRY_set_object(ret, obj)) { goto err; } if (!X509_NAME_ENTRY_set_data(ret, type, bytes, len)) { goto err; } if ((ne != NULL) && (*ne == NULL)) { *ne = ret; } return ret; err: if ((ne == NULL) || (ret != *ne)) { X509_NAME_ENTRY_free(ret); } return NULL; } int X509_NAME_ENTRY_set_object(X509_NAME_ENTRY *ne, const ASN1_OBJECT *obj) { if ((ne == NULL) || (obj == NULL)) { OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER); return 0; } ASN1_OBJECT_free(ne->object); ne->object = OBJ_dup(obj); return ((ne->object == NULL) ? 0 : 1); } int X509_NAME_ENTRY_set_data(X509_NAME_ENTRY *ne, int type, const unsigned char *bytes, ossl_ssize_t len) { if ((ne == NULL) || ((bytes == NULL) && (len != 0))) { return 0; } if ((type > 0) && (type & MBSTRING_FLAG)) { return ASN1_STRING_set_by_NID(&ne->value, bytes, len, type, OBJ_obj2nid(ne->object)) ? 1 : 0; } if (len < 0) { len = strlen((const char *)bytes); } if (!ASN1_STRING_set(ne->value, bytes, len)) { return 0; } if (type != V_ASN1_UNDEF) { ne->value->type = type; } return 1; } ASN1_OBJECT *X509_NAME_ENTRY_get_object(const X509_NAME_ENTRY *ne) { if (ne == NULL) { return NULL; } return ne->object; } ASN1_STRING *X509_NAME_ENTRY_get_data(const X509_NAME_ENTRY *ne) { if (ne == NULL) { return NULL; } return ne->value; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509rset.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" int X509_REQ_set_version(X509_REQ *x, long version) { if (x == NULL) { return 0; } if (version != X509_REQ_VERSION_1) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); return 0; } return ASN1_INTEGER_set_int64(x->req_info->version, version); } int X509_REQ_set_subject_name(X509_REQ *x, X509_NAME *name) { if ((x == NULL) || (x->req_info == NULL)) { return 0; } return (X509_NAME_set(&x->req_info->subject, name)); } int X509_REQ_set_pubkey(X509_REQ *x, EVP_PKEY *pkey) { if ((x == NULL) || (x->req_info == NULL)) { return 0; } return (X509_PUBKEY_set(&x->req_info->pubkey, pkey)); } int X509_REQ_set1_signature_algo(X509_REQ *req, const X509_ALGOR *algo) { X509_ALGOR *copy = X509_ALGOR_dup(algo); if (copy == NULL) { return 0; } X509_ALGOR_free(req->sig_alg); req->sig_alg = copy; return 1; } int X509_REQ_set1_signature_value(X509_REQ *req, const uint8_t *sig, size_t sig_len) { if (!ASN1_STRING_set(req->signature, sig, sig_len)) { return 0; } req->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); req->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT; return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x509spki.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *x, EVP_PKEY *pkey) { if ((x == NULL) || (x->spkac == NULL)) { return 0; } return (X509_PUBKEY_set(&(x->spkac->pubkey), pkey)); } EVP_PKEY *NETSCAPE_SPKI_get_pubkey(const NETSCAPE_SPKI *x) { if ((x == NULL) || (x->spkac == NULL)) { return NULL; } return (X509_PUBKEY_get(x->spkac->pubkey)); } // Load a Netscape SPKI from a base64 encoded string NETSCAPE_SPKI *NETSCAPE_SPKI_b64_decode(const char *str, ossl_ssize_t len) { unsigned char *spki_der; const unsigned char *p; size_t spki_len; NETSCAPE_SPKI *spki; if (len <= 0) { len = strlen(str); } if (!EVP_DecodedLength(&spki_len, len)) { OPENSSL_PUT_ERROR(X509, X509_R_BASE64_DECODE_ERROR); return NULL; } if (!(spki_der = reinterpret_cast(OPENSSL_malloc(spki_len)))) { return NULL; } if (!EVP_DecodeBase64(spki_der, &spki_len, spki_len, (const uint8_t *)str, len)) { OPENSSL_PUT_ERROR(X509, X509_R_BASE64_DECODE_ERROR); OPENSSL_free(spki_der); return NULL; } p = spki_der; spki = d2i_NETSCAPE_SPKI(NULL, &p, spki_len); OPENSSL_free(spki_der); return spki; } // Generate a base64 encoded string from an SPKI char *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *spki) { unsigned char *der_spki, *p; char *b64_str; size_t b64_len; int der_len; der_len = i2d_NETSCAPE_SPKI(spki, NULL); if (!EVP_EncodedLength(&b64_len, der_len)) { OPENSSL_PUT_ERROR(X509, ERR_R_OVERFLOW); return NULL; } der_spki = reinterpret_cast(OPENSSL_malloc(der_len)); if (der_spki == NULL) { return NULL; } b64_str = reinterpret_cast(OPENSSL_malloc(b64_len)); if (b64_str == NULL) { OPENSSL_free(der_spki); return NULL; } p = der_spki; i2d_NETSCAPE_SPKI(spki, &p); EVP_EncodeBlock((unsigned char *)b64_str, der_spki, der_len); OPENSSL_free(der_spki); return b64_str; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_algor.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../asn1/internal.h" ASN1_SEQUENCE(X509_ALGOR) = { ASN1_SIMPLE(X509_ALGOR, algorithm, ASN1_OBJECT), ASN1_OPT(X509_ALGOR, parameter, ASN1_ANY), } ASN1_SEQUENCE_END(X509_ALGOR) IMPLEMENT_ASN1_FUNCTIONS_const(X509_ALGOR) IMPLEMENT_ASN1_DUP_FUNCTION_const(X509_ALGOR) int X509_ALGOR_set0(X509_ALGOR *alg, ASN1_OBJECT *aobj, int ptype, void *pval) { if (!alg) { return 0; } if (ptype != V_ASN1_UNDEF) { if (alg->parameter == NULL) { alg->parameter = ASN1_TYPE_new(); } if (alg->parameter == NULL) { return 0; } } if (alg) { ASN1_OBJECT_free(alg->algorithm); alg->algorithm = aobj; } if (ptype == 0) { return 1; } if (ptype == V_ASN1_UNDEF) { if (alg->parameter) { ASN1_TYPE_free(alg->parameter); alg->parameter = NULL; } } else { ASN1_TYPE_set(alg->parameter, ptype, pval); } return 1; } void X509_ALGOR_get0(const ASN1_OBJECT **out_obj, int *out_param_type, const void **out_param_value, const X509_ALGOR *alg) { if (out_obj != NULL) { *out_obj = alg->algorithm; } if (out_param_type != NULL) { int type = V_ASN1_UNDEF; const void *value = NULL; if (alg->parameter != NULL) { type = alg->parameter->type; value = asn1_type_value_as_pointer(alg->parameter); } *out_param_type = type; if (out_param_value != NULL) { *out_param_value = value; } } } // Set up an X509_ALGOR DigestAlgorithmIdentifier from an EVP_MD int X509_ALGOR_set_md(X509_ALGOR *alg, const EVP_MD *md) { int param_type; if (EVP_MD_flags(md) & EVP_MD_FLAG_DIGALGID_ABSENT) { param_type = V_ASN1_UNDEF; } else { param_type = V_ASN1_NULL; } return X509_ALGOR_set0(alg, OBJ_nid2obj(EVP_MD_type(md)), param_type, NULL); } // X509_ALGOR_cmp returns 0 if |a| and |b| are equal and non-zero otherwise. int X509_ALGOR_cmp(const X509_ALGOR *a, const X509_ALGOR *b) { int rv; rv = OBJ_cmp(a->algorithm, b->algorithm); if (rv) { return rv; } if (!a->parameter && !b->parameter) { return 0; } return ASN1_TYPE_cmp(a->parameter, b->parameter); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_all.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "internal.h" int X509_verify(X509 *x509, EVP_PKEY *pkey) { if (X509_ALGOR_cmp(x509->sig_alg, x509->cert_info->signature)) { OPENSSL_PUT_ERROR(X509, X509_R_SIGNATURE_ALGORITHM_MISMATCH); return 0; } return ASN1_item_verify(ASN1_ITEM_rptr(X509_CINF), x509->sig_alg, x509->signature, x509->cert_info, pkey); } int X509_REQ_verify(X509_REQ *req, EVP_PKEY *pkey) { return ASN1_item_verify(ASN1_ITEM_rptr(X509_REQ_INFO), req->sig_alg, req->signature, req->req_info, pkey); } int X509_sign(X509 *x, EVP_PKEY *pkey, const EVP_MD *md) { asn1_encoding_clear(&x->cert_info->enc); return (ASN1_item_sign(ASN1_ITEM_rptr(X509_CINF), x->cert_info->signature, x->sig_alg, x->signature, x->cert_info, pkey, md)); } int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx) { asn1_encoding_clear(&x->cert_info->enc); return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CINF), x->cert_info->signature, x->sig_alg, x->signature, x->cert_info, ctx); } int X509_REQ_sign(X509_REQ *x, EVP_PKEY *pkey, const EVP_MD *md) { asn1_encoding_clear(&x->req_info->enc); return (ASN1_item_sign(ASN1_ITEM_rptr(X509_REQ_INFO), x->sig_alg, NULL, x->signature, x->req_info, pkey, md)); } int X509_REQ_sign_ctx(X509_REQ *x, EVP_MD_CTX *ctx) { asn1_encoding_clear(&x->req_info->enc); return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_REQ_INFO), x->sig_alg, NULL, x->signature, x->req_info, ctx); } int X509_CRL_sign(X509_CRL *x, EVP_PKEY *pkey, const EVP_MD *md) { asn1_encoding_clear(&x->crl->enc); return (ASN1_item_sign(ASN1_ITEM_rptr(X509_CRL_INFO), x->crl->sig_alg, x->sig_alg, x->signature, x->crl, pkey, md)); } int X509_CRL_sign_ctx(X509_CRL *x, EVP_MD_CTX *ctx) { asn1_encoding_clear(&x->crl->enc); return ASN1_item_sign_ctx(ASN1_ITEM_rptr(X509_CRL_INFO), x->crl->sig_alg, x->sig_alg, x->signature, x->crl, ctx); } int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *x, EVP_PKEY *pkey, const EVP_MD *md) { return (ASN1_item_sign(ASN1_ITEM_rptr(NETSCAPE_SPKAC), x->sig_algor, NULL, x->signature, x->spkac, pkey, md)); } int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *spki, EVP_PKEY *pkey) { return (ASN1_item_verify(ASN1_ITEM_rptr(NETSCAPE_SPKAC), spki->sig_algor, spki->signature, spki->spkac, pkey)); } X509_CRL *d2i_X509_CRL_fp(FILE *fp, X509_CRL **crl) { return reinterpret_cast( ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509_CRL), fp, crl)); } int i2d_X509_CRL_fp(FILE *fp, X509_CRL *crl) { return ASN1_item_i2d_fp(ASN1_ITEM_rptr(X509_CRL), fp, crl); } X509_CRL *d2i_X509_CRL_bio(BIO *bp, X509_CRL **crl) { return reinterpret_cast( ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509_CRL), bp, crl)); } int i2d_X509_CRL_bio(BIO *bp, X509_CRL *crl) { return ASN1_item_i2d_bio(ASN1_ITEM_rptr(X509_CRL), bp, crl); } X509_REQ *d2i_X509_REQ_fp(FILE *fp, X509_REQ **req) { return reinterpret_cast( ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509_REQ), fp, req)); } int i2d_X509_REQ_fp(FILE *fp, X509_REQ *req) { return ASN1_item_i2d_fp(ASN1_ITEM_rptr(X509_REQ), fp, req); } X509_REQ *d2i_X509_REQ_bio(BIO *bp, X509_REQ **req) { return reinterpret_cast( ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509_REQ), bp, req)); } int i2d_X509_REQ_bio(BIO *bp, X509_REQ *req) { return ASN1_item_i2d_bio(ASN1_ITEM_rptr(X509_REQ), bp, req); } #define IMPLEMENT_D2I_FP(type, name, bio_func) \ type *name(FILE *fp, type **obj) { \ BIO *bio = BIO_new_fp(fp, BIO_NOCLOSE); \ if (bio == NULL) { \ return NULL; \ } \ type *ret = bio_func(bio, obj); \ BIO_free(bio); \ return ret; \ } #define IMPLEMENT_I2D_FP(type, name, bio_func) \ int name(FILE *fp, type *obj) { \ BIO *bio = BIO_new_fp(fp, BIO_NOCLOSE); \ if (bio == NULL) { \ return 0; \ } \ int ret = bio_func(bio, obj); \ BIO_free(bio); \ return ret; \ } IMPLEMENT_D2I_FP(X509, d2i_X509_fp, d2i_X509_bio) IMPLEMENT_I2D_FP(X509, i2d_X509_fp, i2d_X509_bio) IMPLEMENT_D2I_FP(RSA, d2i_RSAPrivateKey_fp, d2i_RSAPrivateKey_bio) IMPLEMENT_I2D_FP(RSA, i2d_RSAPrivateKey_fp, i2d_RSAPrivateKey_bio) IMPLEMENT_D2I_FP(RSA, d2i_RSAPublicKey_fp, d2i_RSAPublicKey_bio) IMPLEMENT_I2D_FP(RSA, i2d_RSAPublicKey_fp, i2d_RSAPublicKey_bio) IMPLEMENT_D2I_FP(RSA, d2i_RSA_PUBKEY_fp, d2i_RSA_PUBKEY_bio) IMPLEMENT_I2D_FP(RSA, i2d_RSA_PUBKEY_fp, i2d_RSA_PUBKEY_bio) #define IMPLEMENT_D2I_BIO(type, name, d2i_func) \ type *name(BIO *bio, type **obj) { \ uint8_t *data; \ size_t len; \ if (!BIO_read_asn1(bio, &data, &len, 100 * 1024)) { \ return NULL; \ } \ const uint8_t *ptr = data; \ type *ret = d2i_func(obj, &ptr, (long)len); \ OPENSSL_free(data); \ return ret; \ } #define IMPLEMENT_I2D_BIO(type, name, i2d_func) \ int name(BIO *bio, type *obj) { \ uint8_t *data = NULL; \ int len = i2d_func(obj, &data); \ if (len < 0) { \ return 0; \ } \ int ret = BIO_write_all(bio, data, len); \ OPENSSL_free(data); \ return ret; \ } IMPLEMENT_D2I_BIO(X509, d2i_X509_bio, d2i_X509) IMPLEMENT_I2D_BIO(X509, i2d_X509_bio, i2d_X509) IMPLEMENT_D2I_BIO(RSA, d2i_RSAPrivateKey_bio, d2i_RSAPrivateKey) IMPLEMENT_I2D_BIO(RSA, i2d_RSAPrivateKey_bio, i2d_RSAPrivateKey) IMPLEMENT_D2I_BIO(RSA, d2i_RSAPublicKey_bio, d2i_RSAPublicKey) IMPLEMENT_I2D_BIO(RSA, i2d_RSAPublicKey_bio, i2d_RSAPublicKey) IMPLEMENT_D2I_BIO(RSA, d2i_RSA_PUBKEY_bio, d2i_RSA_PUBKEY) IMPLEMENT_I2D_BIO(RSA, i2d_RSA_PUBKEY_bio, i2d_RSA_PUBKEY) IMPLEMENT_D2I_FP(DSA, d2i_DSAPrivateKey_fp, d2i_DSAPrivateKey_bio) IMPLEMENT_I2D_FP(DSA, i2d_DSAPrivateKey_fp, i2d_DSAPrivateKey_bio) IMPLEMENT_D2I_FP(DSA, d2i_DSA_PUBKEY_fp, d2i_DSA_PUBKEY_bio) IMPLEMENT_I2D_FP(DSA, i2d_DSA_PUBKEY_fp, i2d_DSA_PUBKEY_bio) IMPLEMENT_D2I_BIO(DSA, d2i_DSAPrivateKey_bio, d2i_DSAPrivateKey) IMPLEMENT_I2D_BIO(DSA, i2d_DSAPrivateKey_bio, i2d_DSAPrivateKey) IMPLEMENT_D2I_BIO(DSA, d2i_DSA_PUBKEY_bio, d2i_DSA_PUBKEY) IMPLEMENT_I2D_BIO(DSA, i2d_DSA_PUBKEY_bio, i2d_DSA_PUBKEY) IMPLEMENT_D2I_FP(EC_KEY, d2i_ECPrivateKey_fp, d2i_ECPrivateKey_bio) IMPLEMENT_I2D_FP(EC_KEY, i2d_ECPrivateKey_fp, i2d_ECPrivateKey_bio) IMPLEMENT_D2I_FP(EC_KEY, d2i_EC_PUBKEY_fp, d2i_EC_PUBKEY_bio) IMPLEMENT_I2D_FP(EC_KEY, i2d_EC_PUBKEY_fp, i2d_EC_PUBKEY_bio) IMPLEMENT_D2I_BIO(EC_KEY, d2i_ECPrivateKey_bio, d2i_ECPrivateKey) IMPLEMENT_I2D_BIO(EC_KEY, i2d_ECPrivateKey_bio, i2d_ECPrivateKey) IMPLEMENT_D2I_BIO(EC_KEY, d2i_EC_PUBKEY_bio, d2i_EC_PUBKEY) IMPLEMENT_I2D_BIO(EC_KEY, i2d_EC_PUBKEY_bio, i2d_EC_PUBKEY) int X509_pubkey_digest(const X509 *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { ASN1_BIT_STRING *key; key = X509_get0_pubkey_bitstr(data); if (!key) { return 0; } return EVP_Digest(key->data, key->length, md, len, type, NULL); } int X509_digest(const X509 *x509, const EVP_MD *md, uint8_t *out, unsigned *out_len) { uint8_t *der = NULL; // TODO(https://crbug.com/boringssl/407): This function is not const-correct. int der_len = i2d_X509((X509 *)x509, &der); if (der_len < 0) { return 0; } int ret = EVP_Digest(der, der_len, out, out_len, md, NULL); OPENSSL_free(der); return ret; } int X509_CRL_digest(const X509_CRL *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { return ( ASN1_item_digest(ASN1_ITEM_rptr(X509_CRL), type, (char *)data, md, len)); } int X509_REQ_digest(const X509_REQ *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { return ( ASN1_item_digest(ASN1_ITEM_rptr(X509_REQ), type, (char *)data, md, len)); } int X509_NAME_digest(const X509_NAME *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { return ( ASN1_item_digest(ASN1_ITEM_rptr(X509_NAME), type, (char *)data, md, len)); } IMPLEMENT_D2I_FP(X509_SIG, d2i_PKCS8_fp, d2i_PKCS8_bio) IMPLEMENT_I2D_FP(X509_SIG, i2d_PKCS8_fp, i2d_PKCS8_bio) IMPLEMENT_D2I_BIO(X509_SIG, d2i_PKCS8_bio, d2i_X509_SIG) IMPLEMENT_I2D_BIO(X509_SIG, i2d_PKCS8_bio, i2d_X509_SIG) IMPLEMENT_D2I_FP(PKCS8_PRIV_KEY_INFO, d2i_PKCS8_PRIV_KEY_INFO_fp, d2i_PKCS8_PRIV_KEY_INFO_bio) IMPLEMENT_I2D_FP(PKCS8_PRIV_KEY_INFO, i2d_PKCS8_PRIV_KEY_INFO_fp, i2d_PKCS8_PRIV_KEY_INFO_bio) int i2d_PKCS8PrivateKeyInfo_fp(FILE *fp, EVP_PKEY *key) { PKCS8_PRIV_KEY_INFO *p8inf; int ret; p8inf = EVP_PKEY2PKCS8(key); if (!p8inf) { return 0; } ret = i2d_PKCS8_PRIV_KEY_INFO_fp(fp, p8inf); PKCS8_PRIV_KEY_INFO_free(p8inf); return ret; } IMPLEMENT_D2I_FP(EVP_PKEY, d2i_PrivateKey_fp, d2i_PrivateKey_bio) IMPLEMENT_I2D_FP(EVP_PKEY, i2d_PrivateKey_fp, i2d_PrivateKey_bio) IMPLEMENT_D2I_FP(EVP_PKEY, d2i_PUBKEY_fp, d2i_PUBKEY_bio) IMPLEMENT_I2D_FP(EVP_PKEY, i2d_PUBKEY_fp, i2d_PUBKEY_bio) IMPLEMENT_D2I_BIO(PKCS8_PRIV_KEY_INFO, d2i_PKCS8_PRIV_KEY_INFO_bio, d2i_PKCS8_PRIV_KEY_INFO) IMPLEMENT_I2D_BIO(PKCS8_PRIV_KEY_INFO, i2d_PKCS8_PRIV_KEY_INFO_bio, i2d_PKCS8_PRIV_KEY_INFO) int i2d_PKCS8PrivateKeyInfo_bio(BIO *bp, EVP_PKEY *key) { PKCS8_PRIV_KEY_INFO *p8inf; int ret; p8inf = EVP_PKEY2PKCS8(key); if (!p8inf) { return 0; } ret = i2d_PKCS8_PRIV_KEY_INFO_bio(bp, p8inf); PKCS8_PRIV_KEY_INFO_free(p8inf); return ret; } IMPLEMENT_D2I_BIO(EVP_PKEY, d2i_PrivateKey_bio, d2i_AutoPrivateKey) IMPLEMENT_I2D_BIO(EVP_PKEY, i2d_PrivateKey_bio, i2d_PrivateKey) IMPLEMENT_D2I_BIO(EVP_PKEY, d2i_PUBKEY_bio, d2i_PUBKEY) IMPLEMENT_I2D_BIO(EVP_PKEY, i2d_PUBKEY_bio, i2d_PUBKEY) IMPLEMENT_D2I_BIO(DH, d2i_DHparams_bio, d2i_DHparams) IMPLEMENT_I2D_BIO(const DH, i2d_DHparams_bio, i2d_DHparams) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_attrib.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" ASN1_SEQUENCE(X509_ATTRIBUTE) = { ASN1_SIMPLE(X509_ATTRIBUTE, object, ASN1_OBJECT), ASN1_SET_OF(X509_ATTRIBUTE, set, ASN1_ANY), } ASN1_SEQUENCE_END(X509_ATTRIBUTE) IMPLEMENT_ASN1_FUNCTIONS_const(X509_ATTRIBUTE) IMPLEMENT_ASN1_DUP_FUNCTION_const(X509_ATTRIBUTE) X509_ATTRIBUTE *X509_ATTRIBUTE_create(int nid, int attrtype, void *value) { ASN1_OBJECT *obj = OBJ_nid2obj(nid); if (obj == NULL) { return NULL; } X509_ATTRIBUTE *ret = X509_ATTRIBUTE_new(); ASN1_TYPE *val = ASN1_TYPE_new(); if (ret == NULL || val == NULL) { goto err; } ret->object = obj; if (!sk_ASN1_TYPE_push(ret->set, val)) { goto err; } ASN1_TYPE_set(val, attrtype, value); return ret; err: X509_ATTRIBUTE_free(ret); ASN1_TYPE_free(val); return NULL; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_crl.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../internal.h" #include "internal.h" static int X509_REVOKED_cmp(const X509_REVOKED *const *a, const X509_REVOKED *const *b); static int setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp); ASN1_SEQUENCE(X509_REVOKED) = { ASN1_SIMPLE(X509_REVOKED, serialNumber, ASN1_INTEGER), ASN1_SIMPLE(X509_REVOKED, revocationDate, ASN1_TIME), ASN1_SEQUENCE_OF_OPT(X509_REVOKED, extensions, X509_EXTENSION), } ASN1_SEQUENCE_END(X509_REVOKED) static int crl_lookup(X509_CRL *crl, X509_REVOKED **ret, const ASN1_INTEGER *serial, X509_NAME *issuer); // The X509_CRL_INFO structure needs a bit of customisation. Since we cache // the original encoding the signature wont be affected by reordering of the // revoked field. static int crl_inf_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { X509_CRL_INFO *a = (X509_CRL_INFO *)*pval; if (!a || !a->revoked) { return 1; } switch (operation) { // Just set cmp function here. We don't sort because that would // affect the output of X509_CRL_print(). case ASN1_OP_D2I_POST: (void)sk_X509_REVOKED_set_cmp_func(a->revoked, X509_REVOKED_cmp); break; } return 1; } ASN1_SEQUENCE_enc(X509_CRL_INFO, enc, crl_inf_cb) = { ASN1_OPT(X509_CRL_INFO, version, ASN1_INTEGER), ASN1_SIMPLE(X509_CRL_INFO, sig_alg, X509_ALGOR), ASN1_SIMPLE(X509_CRL_INFO, issuer, X509_NAME), ASN1_SIMPLE(X509_CRL_INFO, lastUpdate, ASN1_TIME), ASN1_OPT(X509_CRL_INFO, nextUpdate, ASN1_TIME), ASN1_SEQUENCE_OF_OPT(X509_CRL_INFO, revoked, X509_REVOKED), ASN1_EXP_SEQUENCE_OF_OPT(X509_CRL_INFO, extensions, X509_EXTENSION, 0), } ASN1_SEQUENCE_END_enc(X509_CRL_INFO, X509_CRL_INFO) static int crl_parse_entry_extensions(X509_CRL *crl) { STACK_OF(X509_REVOKED) *revoked = X509_CRL_get_REVOKED(crl); for (size_t i = 0; i < sk_X509_REVOKED_num(revoked); i++) { X509_REVOKED *rev = sk_X509_REVOKED_value(revoked, i); int crit; ASN1_ENUMERATED *reason = reinterpret_cast( X509_REVOKED_get_ext_d2i(rev, NID_crl_reason, &crit, NULL)); if (!reason && crit != -1) { crl->flags |= EXFLAG_INVALID; return 1; } if (reason) { rev->reason = ASN1_ENUMERATED_get(reason); ASN1_ENUMERATED_free(reason); } else { rev->reason = CRL_REASON_NONE; } // We do not support any critical CRL entry extensions. const STACK_OF(X509_EXTENSION) *exts = rev->extensions; for (size_t j = 0; j < sk_X509_EXTENSION_num(exts); j++) { const X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, j); if (X509_EXTENSION_get_critical(ext)) { crl->flags |= EXFLAG_CRITICAL; break; } } } return 1; } // The X509_CRL structure needs a bit of customisation. Cache some extensions // and hash of the whole CRL. static int crl_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { X509_CRL *crl = (X509_CRL *)*pval; int i; switch (operation) { case ASN1_OP_NEW_POST: crl->idp = NULL; crl->akid = NULL; crl->flags = 0; crl->idp_flags = 0; break; case ASN1_OP_D2I_POST: { // The version must be one of v1(0) or v2(1). long version = X509_CRL_VERSION_1; if (crl->crl->version != NULL) { version = ASN1_INTEGER_get(crl->crl->version); // TODO(https://crbug.com/boringssl/364): |X509_CRL_VERSION_1| // should also be rejected. This means an explicitly-encoded X.509v1 // version. v1 is DEFAULT, so DER requires it be omitted. if (version < X509_CRL_VERSION_1 || version > X509_CRL_VERSION_2) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); return 0; } } // Per RFC 5280, section 5.1.2.1, extensions require v2. if (version != X509_CRL_VERSION_2 && crl->crl->extensions != NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_FIELD_FOR_VERSION); return 0; } if (!X509_CRL_digest(crl, EVP_sha256(), crl->crl_hash, NULL)) { return 0; } crl->idp = reinterpret_cast( X509_CRL_get_ext_d2i(crl, NID_issuing_distribution_point, &i, NULL)); if (crl->idp != NULL) { if (!setup_idp(crl, crl->idp)) { return 0; } } else if (i != -1) { return 0; } crl->akid = reinterpret_cast( X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, &i, NULL)); if (crl->akid == NULL && i != -1) { return 0; } // See if we have any unhandled critical CRL extensions and indicate // this in a flag. We only currently handle IDP so anything else // critical sets the flag. This code accesses the X509_CRL structure // directly: applications shouldn't do this. const STACK_OF(X509_EXTENSION) *exts = crl->crl->extensions; for (size_t idx = 0; idx < sk_X509_EXTENSION_num(exts); idx++) { const X509_EXTENSION *ext = sk_X509_EXTENSION_value(exts, idx); int nid = OBJ_obj2nid(X509_EXTENSION_get_object(ext)); if (X509_EXTENSION_get_critical(ext)) { if (nid == NID_issuing_distribution_point || nid == NID_authority_key_identifier) { continue; } crl->flags |= EXFLAG_CRITICAL; break; } } if (!crl_parse_entry_extensions(crl)) { return 0; } break; } case ASN1_OP_FREE_POST: AUTHORITY_KEYID_free(crl->akid); ISSUING_DIST_POINT_free(crl->idp); break; } return 1; } // Convert IDP into a more convenient form // // TODO(davidben): Each of these flags are already booleans, so this is not // really more convenient. We can probably remove |idp_flags|. static int setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp) { int idp_only = 0; // Set various flags according to IDP crl->idp_flags |= IDP_PRESENT; if (idp->onlyuser > 0) { idp_only++; crl->idp_flags |= IDP_ONLYUSER; } if (idp->onlyCA > 0) { idp_only++; crl->idp_flags |= IDP_ONLYCA; } if (idp->onlyattr > 0) { idp_only++; crl->idp_flags |= IDP_ONLYATTR; } // Per RFC 5280, section 5.2.5, at most one of onlyContainsUserCerts, // onlyContainsCACerts, and onlyContainsAttributeCerts may be true. // // TODO(crbug.com/boringssl/443): Move this check to the |ISSUING_DIST_POINT| // parser. if (idp_only > 1) { crl->idp_flags |= IDP_INVALID; } if (idp->indirectCRL > 0) { crl->idp_flags |= IDP_INDIRECT; } if (idp->onlysomereasons) { crl->idp_flags |= IDP_REASONS; } // TODO(davidben): The new verifier does not support nameRelativeToCRLIssuer. // Remove this? return DIST_POINT_set_dpname(idp->distpoint, X509_CRL_get_issuer(crl)); } ASN1_SEQUENCE_ref(X509_CRL, crl_cb) = { ASN1_SIMPLE(X509_CRL, crl, X509_CRL_INFO), ASN1_SIMPLE(X509_CRL, sig_alg, X509_ALGOR), ASN1_SIMPLE(X509_CRL, signature, ASN1_BIT_STRING), } ASN1_SEQUENCE_END_ref(X509_CRL, X509_CRL) // Although |X509_REVOKED| contains an |X509_NAME|, it can be const. It is not // affected by https://crbug.com/boringssl/407 because the |X509_NAME| does // not participate in serialization. IMPLEMENT_ASN1_FUNCTIONS_const(X509_REVOKED) IMPLEMENT_ASN1_DUP_FUNCTION_const(X509_REVOKED) IMPLEMENT_ASN1_FUNCTIONS(X509_CRL_INFO) IMPLEMENT_ASN1_FUNCTIONS(X509_CRL) IMPLEMENT_ASN1_DUP_FUNCTION(X509_CRL) static int X509_REVOKED_cmp(const X509_REVOKED *const *a, const X509_REVOKED *const *b) { return ASN1_STRING_cmp((*a)->serialNumber, (*b)->serialNumber); } int X509_CRL_add0_revoked(X509_CRL *crl, X509_REVOKED *rev) { X509_CRL_INFO *inf; inf = crl->crl; if (!inf->revoked) { inf->revoked = sk_X509_REVOKED_new(X509_REVOKED_cmp); } if (!inf->revoked || !sk_X509_REVOKED_push(inf->revoked, rev)) { return 0; } asn1_encoding_clear(&inf->enc); return 1; } int X509_CRL_verify(X509_CRL *crl, EVP_PKEY *pkey) { if (X509_ALGOR_cmp(crl->sig_alg, crl->crl->sig_alg) != 0) { OPENSSL_PUT_ERROR(X509, X509_R_SIGNATURE_ALGORITHM_MISMATCH); return 0; } return ASN1_item_verify(ASN1_ITEM_rptr(X509_CRL_INFO), crl->sig_alg, crl->signature, crl->crl, pkey); } int X509_CRL_get0_by_serial(X509_CRL *crl, X509_REVOKED **ret, const ASN1_INTEGER *serial) { return crl_lookup(crl, ret, serial, NULL); } int X509_CRL_get0_by_cert(X509_CRL *crl, X509_REVOKED **ret, X509 *x) { return crl_lookup(crl, ret, X509_get_serialNumber(x), X509_get_issuer_name(x)); } static int crl_revoked_issuer_match(X509_CRL *crl, X509_NAME *nm, X509_REVOKED *rev) { return nm == NULL || X509_NAME_cmp(nm, X509_CRL_get_issuer(crl)) == 0; } static CRYPTO_MUTEX g_crl_sort_lock = CRYPTO_MUTEX_INIT; static int crl_lookup(X509_CRL *crl, X509_REVOKED **ret, const ASN1_INTEGER *serial, X509_NAME *issuer) { // Use an assert, rather than a runtime error, because returning nothing for a // CRL is arguably failing open, rather than closed. assert(serial->type == V_ASN1_INTEGER || serial->type == V_ASN1_NEG_INTEGER); X509_REVOKED rtmp, *rev; size_t idx; rtmp.serialNumber = (ASN1_INTEGER *)serial; // Sort revoked into serial number order if not already sorted. Do this // under a lock to avoid race condition. CRYPTO_MUTEX_lock_read(&g_crl_sort_lock); const int is_sorted = sk_X509_REVOKED_is_sorted(crl->crl->revoked); CRYPTO_MUTEX_unlock_read(&g_crl_sort_lock); if (!is_sorted) { CRYPTO_MUTEX_lock_write(&g_crl_sort_lock); if (!sk_X509_REVOKED_is_sorted(crl->crl->revoked)) { sk_X509_REVOKED_sort(crl->crl->revoked); } CRYPTO_MUTEX_unlock_write(&g_crl_sort_lock); } if (!sk_X509_REVOKED_find(crl->crl->revoked, &idx, &rtmp)) { return 0; } // Need to look for matching name for (; idx < sk_X509_REVOKED_num(crl->crl->revoked); idx++) { rev = sk_X509_REVOKED_value(crl->crl->revoked, idx); if (ASN1_INTEGER_cmp(rev->serialNumber, serial)) { return 0; } if (crl_revoked_issuer_match(crl, issuer, rev)) { if (ret) { *ret = rev; } return 1; } } return 0; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_exten.cc ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" ASN1_SEQUENCE(X509_EXTENSION) = { ASN1_SIMPLE(X509_EXTENSION, object, ASN1_OBJECT), ASN1_OPT(X509_EXTENSION, critical, ASN1_BOOLEAN), ASN1_SIMPLE(X509_EXTENSION, value, ASN1_OCTET_STRING), } ASN1_SEQUENCE_END(X509_EXTENSION) ASN1_ITEM_TEMPLATE(X509_EXTENSIONS) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, Extension, X509_EXTENSION) ASN1_ITEM_TEMPLATE_END(X509_EXTENSIONS) IMPLEMENT_ASN1_FUNCTIONS_const(X509_EXTENSION) IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(X509_EXTENSIONS, X509_EXTENSIONS, X509_EXTENSIONS) IMPLEMENT_ASN1_DUP_FUNCTION_const(X509_EXTENSION) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_name.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../internal.h" #include "internal.h" typedef STACK_OF(X509_NAME_ENTRY) STACK_OF_X509_NAME_ENTRY; DEFINE_STACK_OF(STACK_OF_X509_NAME_ENTRY) // Maximum length of X509_NAME: much larger than anything we should // ever see in practice. #define X509_NAME_MAX (1024 * 1024) static int x509_name_ex_d2i(ASN1_VALUE **val, const unsigned char **in, long len, const ASN1_ITEM *it, int opt, ASN1_TLC *ctx); static int x509_name_ex_i2d(ASN1_VALUE **val, unsigned char **out, const ASN1_ITEM *it); static int x509_name_ex_new(ASN1_VALUE **val, const ASN1_ITEM *it); static void x509_name_ex_free(ASN1_VALUE **val, const ASN1_ITEM *it); static int x509_name_encode(X509_NAME *a); static int x509_name_canon(X509_NAME *a); static int asn1_string_canon(ASN1_STRING *out, ASN1_STRING *in); static int i2d_name_canon(STACK_OF(STACK_OF_X509_NAME_ENTRY) *intname, unsigned char **in); ASN1_SEQUENCE(X509_NAME_ENTRY) = { ASN1_SIMPLE(X509_NAME_ENTRY, object, ASN1_OBJECT), ASN1_SIMPLE(X509_NAME_ENTRY, value, ASN1_PRINTABLE), } ASN1_SEQUENCE_END(X509_NAME_ENTRY) IMPLEMENT_ASN1_ALLOC_FUNCTIONS(X509_NAME_ENTRY) IMPLEMENT_ASN1_DUP_FUNCTION_const(X509_NAME_ENTRY) // For the "Name" type we need a SEQUENCE OF { SET OF X509_NAME_ENTRY } so // declare two template wrappers for this ASN1_ITEM_TEMPLATE(X509_NAME_ENTRIES) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SET_OF, 0, RDNS, X509_NAME_ENTRY) ASN1_ITEM_TEMPLATE_END(X509_NAME_ENTRIES) ASN1_ITEM_TEMPLATE(X509_NAME_INTERNAL) = ASN1_EX_TEMPLATE_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, Name, X509_NAME_ENTRIES) ASN1_ITEM_TEMPLATE_END(X509_NAME_INTERNAL) // Normally that's where it would end: we'd have two nested STACK structures // representing the ASN1. Unfortunately X509_NAME uses a completely different // form and caches encodings so we have to process the internal form and // convert to the external form. static const ASN1_EXTERN_FUNCS x509_name_ff = { x509_name_ex_new, x509_name_ex_free, x509_name_ex_d2i, x509_name_ex_i2d, }; IMPLEMENT_EXTERN_ASN1(X509_NAME, V_ASN1_SEQUENCE, x509_name_ff) IMPLEMENT_ASN1_FUNCTIONS(X509_NAME) IMPLEMENT_ASN1_DUP_FUNCTION(X509_NAME) static int x509_name_ex_new(ASN1_VALUE **val, const ASN1_ITEM *it) { X509_NAME *ret = NULL; ret = reinterpret_cast(OPENSSL_malloc(sizeof(X509_NAME))); if (!ret) { goto memerr; } if ((ret->entries = sk_X509_NAME_ENTRY_new_null()) == NULL) { goto memerr; } if ((ret->bytes = BUF_MEM_new()) == NULL) { goto memerr; } ret->canon_enc = NULL; ret->canon_enclen = 0; ret->modified = 1; *val = (ASN1_VALUE *)ret; return 1; memerr: if (ret) { if (ret->entries) { sk_X509_NAME_ENTRY_free(ret->entries); } OPENSSL_free(ret); } return 0; } static void x509_name_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) { X509_NAME *a; if (!pval || !*pval) { return; } a = (X509_NAME *)*pval; BUF_MEM_free(a->bytes); sk_X509_NAME_ENTRY_pop_free(a->entries, X509_NAME_ENTRY_free); if (a->canon_enc) { OPENSSL_free(a->canon_enc); } OPENSSL_free(a); *pval = NULL; } static void local_sk_X509_NAME_ENTRY_free(STACK_OF(X509_NAME_ENTRY) *ne) { sk_X509_NAME_ENTRY_free(ne); } static void local_sk_X509_NAME_ENTRY_pop_free(STACK_OF(X509_NAME_ENTRY) *ne) { sk_X509_NAME_ENTRY_pop_free(ne, X509_NAME_ENTRY_free); } static int x509_name_ex_d2i(ASN1_VALUE **val, const unsigned char **in, long len, const ASN1_ITEM *it, int opt, ASN1_TLC *ctx) { const unsigned char *p = *in, *q; STACK_OF(STACK_OF_X509_NAME_ENTRY) *intname = NULL; X509_NAME *nm = NULL; size_t i, j; int ret; STACK_OF(X509_NAME_ENTRY) *entries; X509_NAME_ENTRY *entry; // Bound the size of an X509_NAME we are willing to parse. if (len > X509_NAME_MAX) { len = X509_NAME_MAX; } q = p; // Get internal representation of Name ASN1_VALUE *intname_val = NULL; ret = ASN1_item_ex_d2i(&intname_val, &p, len, ASN1_ITEM_rptr(X509_NAME_INTERNAL), /*tag=*/-1, /*aclass=*/0, opt, /*buf=*/NULL); if (ret <= 0) { return ret; } intname = (STACK_OF(STACK_OF_X509_NAME_ENTRY) *)intname_val; if (*val) { x509_name_ex_free(val, NULL); } ASN1_VALUE *nm_val = NULL; if (!x509_name_ex_new(&nm_val, NULL)) { goto err; } nm = (X509_NAME *)nm_val; // We've decoded it: now cache encoding if (!BUF_MEM_grow(nm->bytes, p - q)) { goto err; } OPENSSL_memcpy(nm->bytes->data, q, p - q); // Convert internal representation to X509_NAME structure for (i = 0; i < sk_STACK_OF_X509_NAME_ENTRY_num(intname); i++) { entries = sk_STACK_OF_X509_NAME_ENTRY_value(intname, i); for (j = 0; j < sk_X509_NAME_ENTRY_num(entries); j++) { entry = sk_X509_NAME_ENTRY_value(entries, j); entry->set = (int)i; if (!sk_X509_NAME_ENTRY_push(nm->entries, entry)) { goto err; } (void)sk_X509_NAME_ENTRY_set(entries, j, NULL); } } ret = x509_name_canon(nm); if (!ret) { goto err; } sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname, local_sk_X509_NAME_ENTRY_free); nm->modified = 0; *val = (ASN1_VALUE *)nm; *in = p; return ret; err: X509_NAME_free(nm); sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname, local_sk_X509_NAME_ENTRY_pop_free); OPENSSL_PUT_ERROR(X509, ERR_R_ASN1_LIB); return 0; } static int x509_name_ex_i2d(ASN1_VALUE **val, unsigned char **out, const ASN1_ITEM *it) { X509_NAME *a = (X509_NAME *)*val; if (a->modified && (!x509_name_encode(a) || !x509_name_canon(a))) { return -1; } int ret = a->bytes->length; if (out != NULL) { OPENSSL_memcpy(*out, a->bytes->data, ret); *out += ret; } return ret; } static int x509_name_encode(X509_NAME *a) { int len; unsigned char *p; STACK_OF(X509_NAME_ENTRY) *entries = NULL; X509_NAME_ENTRY *entry; int set = -1; size_t i; STACK_OF(STACK_OF_X509_NAME_ENTRY) *intname = sk_STACK_OF_X509_NAME_ENTRY_new_null(); { if (!intname) { goto err; } for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) { entry = sk_X509_NAME_ENTRY_value(a->entries, i); if (entry->set != set) { entries = sk_X509_NAME_ENTRY_new_null(); if (!entries) { goto err; } if (!sk_STACK_OF_X509_NAME_ENTRY_push(intname, entries)) { sk_X509_NAME_ENTRY_free(entries); goto err; } set = entry->set; } if (!sk_X509_NAME_ENTRY_push(entries, entry)) { goto err; } } ASN1_VALUE *intname_val = (ASN1_VALUE *)intname; len = ASN1_item_ex_i2d(&intname_val, NULL, ASN1_ITEM_rptr(X509_NAME_INTERNAL), /*tag=*/-1, /*aclass=*/0); if (len <= 0) { goto err; } if (!BUF_MEM_grow(a->bytes, len)) { goto err; } p = (unsigned char *)a->bytes->data; if (ASN1_item_ex_i2d(&intname_val, &p, ASN1_ITEM_rptr(X509_NAME_INTERNAL), /*tag=*/-1, /*aclass=*/0) <= 0) { goto err; } sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname, local_sk_X509_NAME_ENTRY_free); a->modified = 0; return 1; } err: sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname, local_sk_X509_NAME_ENTRY_free); return 0; } // This function generates the canonical encoding of the Name structure. In // it all strings are converted to UTF8, leading, trailing and multiple // spaces collapsed, converted to lower case and the leading SEQUENCE header // removed. In future we could also normalize the UTF8 too. By doing this // comparison of Name structures can be rapidly perfomed by just using // OPENSSL_memcmp() of the canonical encoding. By omitting the leading SEQUENCE // name constraints of type dirName can also be checked with a simple // OPENSSL_memcmp(). static int x509_name_canon(X509_NAME *a) { unsigned char *p; STACK_OF(STACK_OF_X509_NAME_ENTRY) *intname = NULL; STACK_OF(X509_NAME_ENTRY) *entries = NULL; X509_NAME_ENTRY *entry, *tmpentry = NULL; int set = -1, ret = 0, len; size_t i; if (a->canon_enc) { OPENSSL_free(a->canon_enc); a->canon_enc = NULL; } // Special case: empty X509_NAME => null encoding if (sk_X509_NAME_ENTRY_num(a->entries) == 0) { a->canon_enclen = 0; return 1; } intname = sk_STACK_OF_X509_NAME_ENTRY_new_null(); if (!intname) { goto err; } for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) { entry = sk_X509_NAME_ENTRY_value(a->entries, i); if (entry->set != set) { entries = sk_X509_NAME_ENTRY_new_null(); if (!entries) { goto err; } if (!sk_STACK_OF_X509_NAME_ENTRY_push(intname, entries)) { sk_X509_NAME_ENTRY_free(entries); goto err; } set = entry->set; } tmpentry = X509_NAME_ENTRY_new(); if (tmpentry == NULL) { goto err; } tmpentry->object = OBJ_dup(entry->object); if (!asn1_string_canon(tmpentry->value, entry->value)) { goto err; } if (!sk_X509_NAME_ENTRY_push(entries, tmpentry)) { goto err; } tmpentry = NULL; } // Finally generate encoding len = i2d_name_canon(intname, NULL); if (len < 0) { goto err; } a->canon_enclen = len; p = reinterpret_cast(OPENSSL_malloc(a->canon_enclen)); if (!p) { goto err; } a->canon_enc = p; i2d_name_canon(intname, &p); ret = 1; err: if (tmpentry) { X509_NAME_ENTRY_free(tmpentry); } if (intname) { sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname, local_sk_X509_NAME_ENTRY_pop_free); } return ret; } // Bitmap of all the types of string that will be canonicalized. #define ASN1_MASK_CANON \ (B_ASN1_UTF8STRING | B_ASN1_BMPSTRING | B_ASN1_UNIVERSALSTRING | \ B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | B_ASN1_IA5STRING | \ B_ASN1_VISIBLESTRING) static int asn1_string_canon(ASN1_STRING *out, ASN1_STRING *in) { unsigned char *to, *from; int len, i; // If type not in bitmask just copy string across if (!(ASN1_tag2bit(in->type) & ASN1_MASK_CANON)) { if (!ASN1_STRING_copy(out, in)) { return 0; } return 1; } out->type = V_ASN1_UTF8STRING; out->length = ASN1_STRING_to_UTF8(&out->data, in); if (out->length == -1) { return 0; } to = out->data; from = to; len = out->length; // Convert string in place to canonical form. // Ignore leading spaces while ((len > 0) && OPENSSL_isspace(*from)) { from++; len--; } to = from + len; // Ignore trailing spaces while ((len > 0) && OPENSSL_isspace(to[-1])) { to--; len--; } to = out->data; i = 0; while (i < len) { // Collapse multiple spaces if (OPENSSL_isspace(*from)) { // Copy one space across *to++ = ' '; // Ignore subsequent spaces. Note: don't need to check len here // because we know the last character is a non-space so we can't // overflow. do { from++; i++; } while (OPENSSL_isspace(*from)); } else { *to++ = OPENSSL_tolower(*from); from++; i++; } } out->length = to - out->data; return 1; } static int i2d_name_canon(STACK_OF(STACK_OF_X509_NAME_ENTRY) *_intname, unsigned char **in) { int len, ltmp; size_t i; ASN1_VALUE *v; STACK_OF(ASN1_VALUE) *intname = (STACK_OF(ASN1_VALUE) *)_intname; len = 0; for (i = 0; i < sk_ASN1_VALUE_num(intname); i++) { v = sk_ASN1_VALUE_value(intname, i); ltmp = ASN1_item_ex_i2d(&v, in, ASN1_ITEM_rptr(X509_NAME_ENTRIES), /*tag=*/-1, /*aclass=*/0); if (ltmp < 0) { return ltmp; } len += ltmp; } return len; } int X509_NAME_set(X509_NAME **xn, X509_NAME *name) { if ((name = X509_NAME_dup(name)) == NULL) { return 0; } X509_NAME_free(*xn); *xn = name; return 1; } int X509_NAME_ENTRY_set(const X509_NAME_ENTRY *ne) { return ne->set; } int X509_NAME_get0_der(X509_NAME *nm, const unsigned char **out_der, size_t *out_der_len) { // Make sure encoding is valid if (i2d_X509_NAME(nm, NULL) <= 0) { return 0; } if (out_der != NULL) { *out_der = (unsigned char *)nm->bytes->data; } if (out_der_len != NULL) { *out_der_len = nm->bytes->length; } return 1; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_pubkey.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../internal.h" #include "internal.h" static void x509_pubkey_changed(X509_PUBKEY *pub) { EVP_PKEY_free(pub->pkey); pub->pkey = NULL; // Re-encode the |X509_PUBKEY| to DER and parse it with EVP's APIs. uint8_t *spki = NULL; int spki_len = i2d_X509_PUBKEY(pub, &spki); EVP_PKEY *pkey; if (spki_len < 0) { goto err; } CBS cbs; CBS_init(&cbs, spki, (size_t)spki_len); pkey = EVP_parse_public_key(&cbs); if (pkey == NULL || CBS_len(&cbs) != 0) { EVP_PKEY_free(pkey); goto err; } pub->pkey = pkey; err: OPENSSL_free(spki); // If the operation failed, clear errors. An |X509_PUBKEY| whose key we cannot // parse is still a valid SPKI. It just cannot be converted to an |EVP_PKEY|. ERR_clear_error(); } static int pubkey_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { X509_PUBKEY *pubkey = (X509_PUBKEY *)*pval; if (operation == ASN1_OP_FREE_POST) { EVP_PKEY_free(pubkey->pkey); } else if (operation == ASN1_OP_D2I_POST) { x509_pubkey_changed(pubkey); } return 1; } ASN1_SEQUENCE_cb(X509_PUBKEY, pubkey_cb) = { ASN1_SIMPLE(X509_PUBKEY, algor, X509_ALGOR), ASN1_SIMPLE(X509_PUBKEY, public_key, ASN1_BIT_STRING), } ASN1_SEQUENCE_END_cb(X509_PUBKEY, X509_PUBKEY) IMPLEMENT_ASN1_FUNCTIONS_const(X509_PUBKEY) int X509_PUBKEY_set(X509_PUBKEY **x, EVP_PKEY *pkey) { X509_PUBKEY *pk = NULL; uint8_t *spki = NULL; size_t spki_len; if (x == NULL) { return 0; } CBB cbb; const uint8_t *p; if (!CBB_init(&cbb, 0) || // !EVP_marshal_public_key(&cbb, pkey) || !CBB_finish(&cbb, &spki, &spki_len) || // spki_len > LONG_MAX) { CBB_cleanup(&cbb); OPENSSL_PUT_ERROR(X509, X509_R_PUBLIC_KEY_ENCODE_ERROR); goto error; } p = spki; pk = d2i_X509_PUBKEY(NULL, &p, (long)spki_len); if (pk == NULL || p != spki + spki_len) { OPENSSL_PUT_ERROR(X509, X509_R_PUBLIC_KEY_DECODE_ERROR); goto error; } OPENSSL_free(spki); X509_PUBKEY_free(*x); *x = pk; return 1; error: X509_PUBKEY_free(pk); OPENSSL_free(spki); return 0; } EVP_PKEY *X509_PUBKEY_get0(const X509_PUBKEY *key) { if (key == NULL) { return NULL; } if (key->pkey == NULL) { OPENSSL_PUT_ERROR(X509, X509_R_PUBLIC_KEY_DECODE_ERROR); return NULL; } return key->pkey; } EVP_PKEY *X509_PUBKEY_get(const X509_PUBKEY *key) { EVP_PKEY *pkey = X509_PUBKEY_get0(key); if (pkey != NULL) { EVP_PKEY_up_ref(pkey); } return pkey; } int X509_PUBKEY_set0_param(X509_PUBKEY *pub, ASN1_OBJECT *obj, int param_type, void *param_value, uint8_t *key, int key_len) { if (!X509_ALGOR_set0(pub->algor, obj, param_type, param_value)) { return 0; } ASN1_STRING_set0(pub->public_key, key, key_len); // Set the number of unused bits to zero. pub->public_key->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); pub->public_key->flags |= ASN1_STRING_FLAG_BITS_LEFT; x509_pubkey_changed(pub); return 1; } int X509_PUBKEY_get0_param(ASN1_OBJECT **out_obj, const uint8_t **out_key, int *out_key_len, X509_ALGOR **out_alg, X509_PUBKEY *pub) { if (out_obj != NULL) { *out_obj = pub->algor->algorithm; } if (out_key != NULL) { *out_key = pub->public_key->data; *out_key_len = pub->public_key->length; } if (out_alg != NULL) { *out_alg = pub->algor; } return 1; } const ASN1_BIT_STRING *X509_PUBKEY_get0_public_key(const X509_PUBKEY *pub) { return pub->public_key; } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_req.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "internal.h" // X509_REQ_INFO is handled in an unusual way to get round invalid encodings. // Some broken certificate requests don't encode the attributes field if it // is empty. This is in violation of PKCS#10 but we need to tolerate it. We // do this by making the attributes field OPTIONAL then using the callback to // initialise it to an empty STACK. This means that the field will be // correctly encoded unless we NULL out the field. static int rinf_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, void *exarg) { X509_REQ_INFO *rinf = (X509_REQ_INFO *)*pval; if (operation == ASN1_OP_NEW_POST) { rinf->attributes = sk_X509_ATTRIBUTE_new_null(); if (!rinf->attributes) { return 0; } } if (operation == ASN1_OP_D2I_POST) { // The only defined CSR version is v1(0). For compatibility, we also accept // a hypothetical v3(2). Although not defined, older versions of certbot // use it. See https://github.com/certbot/certbot/pull/9334. long version = ASN1_INTEGER_get(rinf->version); if (version != X509_REQ_VERSION_1 && version != 2) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); return 0; } } return 1; } ASN1_SEQUENCE_enc(X509_REQ_INFO, enc, rinf_cb) = { ASN1_SIMPLE(X509_REQ_INFO, version, ASN1_INTEGER), ASN1_SIMPLE(X509_REQ_INFO, subject, X509_NAME), ASN1_SIMPLE(X509_REQ_INFO, pubkey, X509_PUBKEY), // This isn't really OPTIONAL but it gets around invalid encodings. ASN1_IMP_SET_OF_OPT(X509_REQ_INFO, attributes, X509_ATTRIBUTE, 0), } ASN1_SEQUENCE_END_enc(X509_REQ_INFO, X509_REQ_INFO) IMPLEMENT_ASN1_FUNCTIONS(X509_REQ_INFO) ASN1_SEQUENCE(X509_REQ) = { ASN1_SIMPLE(X509_REQ, req_info, X509_REQ_INFO), ASN1_SIMPLE(X509_REQ, sig_alg, X509_ALGOR), ASN1_SIMPLE(X509_REQ, signature, ASN1_BIT_STRING), } ASN1_SEQUENCE_END(X509_REQ) IMPLEMENT_ASN1_FUNCTIONS(X509_REQ) IMPLEMENT_ASN1_DUP_FUNCTION(X509_REQ) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_sig.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include struct X509_sig_st { X509_ALGOR *algor; ASN1_OCTET_STRING *digest; } /* X509_SIG */; ASN1_SEQUENCE(X509_SIG) = { ASN1_SIMPLE(X509_SIG, algor, X509_ALGOR), ASN1_SIMPLE(X509_SIG, digest, ASN1_OCTET_STRING), } ASN1_SEQUENCE_END(X509_SIG) IMPLEMENT_ASN1_FUNCTIONS_const(X509_SIG) void X509_SIG_get0(const X509_SIG *sig, const X509_ALGOR **out_alg, const ASN1_OCTET_STRING **out_digest) { if (out_alg != NULL) { *out_alg = sig->algor; } if (out_digest != NULL) { *out_digest = sig->digest; } } void X509_SIG_getm(X509_SIG *sig, X509_ALGOR **out_alg, ASN1_OCTET_STRING **out_digest) { if (out_alg != NULL) { *out_alg = sig->algor; } if (out_digest != NULL) { *out_digest = sig->digest; } } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_spki.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" ASN1_SEQUENCE(NETSCAPE_SPKAC) = { ASN1_SIMPLE(NETSCAPE_SPKAC, pubkey, X509_PUBKEY), ASN1_SIMPLE(NETSCAPE_SPKAC, challenge, ASN1_IA5STRING), } ASN1_SEQUENCE_END(NETSCAPE_SPKAC) IMPLEMENT_ASN1_FUNCTIONS_const(NETSCAPE_SPKAC) ASN1_SEQUENCE(NETSCAPE_SPKI) = { ASN1_SIMPLE(NETSCAPE_SPKI, spkac, NETSCAPE_SPKAC), ASN1_SIMPLE(NETSCAPE_SPKI, sig_algor, X509_ALGOR), ASN1_SIMPLE(NETSCAPE_SPKI, signature, ASN1_BIT_STRING), } ASN1_SEQUENCE_END(NETSCAPE_SPKI) IMPLEMENT_ASN1_FUNCTIONS_const(NETSCAPE_SPKI) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_val.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include "internal.h" ASN1_SEQUENCE(X509_VAL) = { ASN1_SIMPLE(X509_VAL, notBefore, ASN1_TIME), ASN1_SIMPLE(X509_VAL, notAfter, ASN1_TIME), } ASN1_SEQUENCE_END(X509_VAL) IMPLEMENT_ASN1_FUNCTIONS_const(X509_VAL) ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_x509.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../asn1/internal.h" #include "../bytestring/internal.h" #include "../internal.h" #include "internal.h" static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; ASN1_SEQUENCE_enc(X509_CINF, enc, 0) = { ASN1_EXP_OPT(X509_CINF, version, ASN1_INTEGER, 0), ASN1_SIMPLE(X509_CINF, serialNumber, ASN1_INTEGER), ASN1_SIMPLE(X509_CINF, signature, X509_ALGOR), ASN1_SIMPLE(X509_CINF, issuer, X509_NAME), ASN1_SIMPLE(X509_CINF, validity, X509_VAL), ASN1_SIMPLE(X509_CINF, subject, X509_NAME), ASN1_SIMPLE(X509_CINF, key, X509_PUBKEY), ASN1_IMP_OPT(X509_CINF, issuerUID, ASN1_BIT_STRING, 1), ASN1_IMP_OPT(X509_CINF, subjectUID, ASN1_BIT_STRING, 2), ASN1_EXP_SEQUENCE_OF_OPT(X509_CINF, extensions, X509_EXTENSION, 3), } ASN1_SEQUENCE_END_enc(X509_CINF, X509_CINF) IMPLEMENT_ASN1_FUNCTIONS(X509_CINF) // x509_new_null returns a new |X509| object where the |cert_info|, |sig_alg|, // and |signature| fields are not yet filled in. static X509 *x509_new_null(void) { X509 *ret = reinterpret_cast(OPENSSL_zalloc(sizeof(X509))); if (ret == NULL) { return NULL; } ret->references = 1; ret->ex_pathlen = -1; CRYPTO_new_ex_data(&ret->ex_data); CRYPTO_MUTEX_init(&ret->lock); return ret; } X509 *X509_new(void) { X509 *ret = x509_new_null(); if (ret == NULL) { return NULL; } ret->cert_info = X509_CINF_new(); ret->sig_alg = X509_ALGOR_new(); ret->signature = ASN1_BIT_STRING_new(); if (ret->cert_info == NULL || ret->sig_alg == NULL || ret->signature == NULL) { X509_free(ret); return NULL; } return ret; } void X509_free(X509 *x509) { if (x509 == NULL || !CRYPTO_refcount_dec_and_test_zero(&x509->references)) { return; } CRYPTO_free_ex_data(&g_ex_data_class, x509, &x509->ex_data); X509_CINF_free(x509->cert_info); X509_ALGOR_free(x509->sig_alg); ASN1_BIT_STRING_free(x509->signature); ASN1_OCTET_STRING_free(x509->skid); AUTHORITY_KEYID_free(x509->akid); CRL_DIST_POINTS_free(x509->crldp); GENERAL_NAMES_free(x509->altname); NAME_CONSTRAINTS_free(x509->nc); X509_CERT_AUX_free(x509->aux); CRYPTO_MUTEX_cleanup(&x509->lock); OPENSSL_free(x509); } static X509 *x509_parse(CBS *cbs, CRYPTO_BUFFER *buf) { CBS cert, tbs, sigalg, sig; if (!CBS_get_asn1(cbs, &cert, CBS_ASN1_SEQUENCE) || // Bound the length to comfortably fit in an int. Lengths in this // module often omit overflow checks. CBS_len(&cert) > INT_MAX / 2 || !CBS_get_asn1_element(&cert, &tbs, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_element(&cert, &sigalg, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return NULL; } // For just the signature field, we accept non-minimal BER lengths, though not // indefinite-length encoding. See b/18228011. // // TODO(crbug.com/boringssl/354): Switch the affected callers to convert the // certificate before parsing and then remove this workaround. CBS_ASN1_TAG tag; size_t header_len; int indefinite; if (!CBS_get_any_ber_asn1_element(&cert, &sig, &tag, &header_len, /*out_ber_found=*/NULL, &indefinite) || tag != CBS_ASN1_BITSTRING || indefinite || // !CBS_skip(&sig, header_len) || // CBS_len(&cert) != 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_DECODE_ERROR); return NULL; } X509 *ret = x509_new_null(); if (ret == NULL) { return NULL; } { // TODO(crbug.com/boringssl/443): When the rest of the library is decoupled // from the tasn_*.c implementation, replace this with |CBS|-based // functions. const uint8_t *inp = CBS_data(&tbs); if (ASN1_item_ex_d2i((ASN1_VALUE **)&ret->cert_info, &inp, CBS_len(&tbs), ASN1_ITEM_rptr(X509_CINF), /*tag=*/-1, /*aclass=*/0, /*opt=*/0, buf) <= 0 || inp != CBS_data(&tbs) + CBS_len(&tbs)) { goto err; } inp = CBS_data(&sigalg); ret->sig_alg = d2i_X509_ALGOR(NULL, &inp, CBS_len(&sigalg)); if (ret->sig_alg == NULL || inp != CBS_data(&sigalg) + CBS_len(&sigalg)) { goto err; } inp = CBS_data(&sig); ret->signature = c2i_ASN1_BIT_STRING(NULL, &inp, CBS_len(&sig)); if (ret->signature == NULL || inp != CBS_data(&sig) + CBS_len(&sig)) { goto err; } // The version must be one of v1(0), v2(1), or v3(2). long version = X509_VERSION_1; if (ret->cert_info->version != NULL) { version = ASN1_INTEGER_get(ret->cert_info->version); // TODO(https://crbug.com/boringssl/364): |X509_VERSION_1| should // also be rejected here. This means an explicitly-encoded X.509v1 // version. v1 is DEFAULT, so DER requires it be omitted. if (version < X509_VERSION_1 || version > X509_VERSION_3) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_VERSION); goto err; } } // Per RFC 5280, section 4.1.2.8, these fields require v2 or v3. if (version == X509_VERSION_1 && (ret->cert_info->issuerUID != NULL || ret->cert_info->subjectUID != NULL)) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_FIELD_FOR_VERSION); goto err; } // Per RFC 5280, section 4.1.2.9, extensions require v3. if (version != X509_VERSION_3 && ret->cert_info->extensions != NULL) { OPENSSL_PUT_ERROR(X509, X509_R_INVALID_FIELD_FOR_VERSION); goto err; } return ret; } err: X509_free(ret); return NULL; } X509 *d2i_X509(X509 **out, const uint8_t **inp, long len) { X509 *ret = NULL; if (len < 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BUFFER_TOO_SMALL); goto err; } CBS cbs; CBS_init(&cbs, *inp, (size_t)len); ret = x509_parse(&cbs, NULL); if (ret == NULL) { goto err; } *inp = CBS_data(&cbs); err: if (out != NULL) { X509_free(*out); *out = ret; } return ret; } int i2d_X509(X509 *x509, uint8_t **outp) { if (x509 == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_MISSING_VALUE); return -1; } CBB cbb, cert; int len; if (!CBB_init(&cbb, 64) || // !CBB_add_asn1(&cbb, &cert, CBS_ASN1_SEQUENCE)) { goto err; } // TODO(crbug.com/boringssl/443): When the rest of the library is decoupled // from the tasn_*.c implementation, replace this with |CBS|-based functions. uint8_t *out; len = i2d_X509_CINF(x509->cert_info, NULL); if (len < 0 || // !CBB_add_space(&cert, &out, (size_t)len) || i2d_X509_CINF(x509->cert_info, &out) != len) { goto err; } len = i2d_X509_ALGOR(x509->sig_alg, NULL); if (len < 0 || // !CBB_add_space(&cert, &out, (size_t)len) || i2d_X509_ALGOR(x509->sig_alg, &out) != len) { goto err; } len = i2d_ASN1_BIT_STRING(x509->signature, NULL); if (len < 0 || // !CBB_add_space(&cert, &out, (size_t)len) || i2d_ASN1_BIT_STRING(x509->signature, &out) != len) { goto err; } return CBB_finish_i2d(&cbb, outp); err: CBB_cleanup(&cbb); return -1; } static int x509_new_cb(ASN1_VALUE **pval, const ASN1_ITEM *it) { *pval = (ASN1_VALUE *)X509_new(); return *pval != NULL; } static void x509_free_cb(ASN1_VALUE **pval, const ASN1_ITEM *it) { X509_free((X509 *)*pval); *pval = NULL; } static int x509_d2i_cb(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int opt, ASN1_TLC *ctx) { if (len < 0) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_BUFFER_TOO_SMALL); return 0; } CBS cbs; CBS_init(&cbs, *in, len); if (opt && !CBS_peek_asn1_tag(&cbs, CBS_ASN1_SEQUENCE)) { return -1; } X509 *ret = x509_parse(&cbs, NULL); if (ret == NULL) { return 0; } *in = CBS_data(&cbs); X509_free((X509 *)*pval); *pval = (ASN1_VALUE *)ret; return 1; } static int x509_i2d_cb(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it) { return i2d_X509((X509 *)*pval, out); } static const ASN1_EXTERN_FUNCS x509_extern_funcs = { x509_new_cb, x509_free_cb, x509_d2i_cb, x509_i2d_cb, }; IMPLEMENT_EXTERN_ASN1(X509, V_ASN1_SEQUENCE, x509_extern_funcs) X509 *X509_dup(X509 *x509) { uint8_t *der = NULL; int len = i2d_X509(x509, &der); if (len < 0) { return NULL; } const uint8_t *inp = der; X509 *ret = d2i_X509(NULL, &inp, len); OPENSSL_free(der); return ret; } X509 *X509_parse_from_buffer(CRYPTO_BUFFER *buf) { CBS cbs; CBS_init(&cbs, CRYPTO_BUFFER_data(buf), CRYPTO_BUFFER_len(buf)); X509 *ret = x509_parse(&cbs, buf); if (ret == NULL || CBS_len(&cbs) != 0) { X509_free(ret); return NULL; } return ret; } int X509_up_ref(X509 *x) { CRYPTO_refcount_inc(&x->references); return 1; } int X509_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int X509_set_ex_data(X509 *r, int idx, void *arg) { return (CRYPTO_set_ex_data(&r->ex_data, idx, arg)); } void *X509_get_ex_data(X509 *r, int idx) { return (CRYPTO_get_ex_data(&r->ex_data, idx)); } // X509_AUX ASN1 routines. X509_AUX is the name given to a certificate with // extra info tagged on the end. Since these functions set how a certificate // is trusted they should only be used when the certificate comes from a // reliable source such as local storage. X509 *d2i_X509_AUX(X509 **a, const unsigned char **pp, long length) { const unsigned char *q = *pp; X509 *ret; int freeret = 0; if (!a || *a == NULL) { freeret = 1; } ret = d2i_X509(a, &q, length); // If certificate unreadable then forget it if (!ret) { return NULL; } // update length length -= q - *pp; // Parse auxiliary information if there is any. if (length > 0 && !d2i_X509_CERT_AUX(&ret->aux, &q, length)) { goto err; } *pp = q; return ret; err: if (freeret) { X509_free(ret); if (a) { *a = NULL; } } return NULL; } // Serialize trusted certificate to *pp or just return the required buffer // length if pp == NULL. We ultimately want to avoid modifying *pp in the // error path, but that depends on similar hygiene in lower-level functions. // Here we avoid compounding the problem. static int i2d_x509_aux_internal(X509 *a, unsigned char **pp) { int length, tmplen; unsigned char *start = pp != NULL ? *pp : NULL; assert(pp == NULL || *pp != NULL); // This might perturb *pp on error, but fixing that belongs in i2d_X509() // not here. It should be that if a == NULL length is zero, but we check // both just in case. length = i2d_X509(a, pp); if (length <= 0 || a == NULL) { return length; } if (a->aux != NULL) { tmplen = i2d_X509_CERT_AUX(a->aux, pp); if (tmplen < 0) { if (start != NULL) { *pp = start; } return tmplen; } length += tmplen; } return length; } // Serialize trusted certificate to *pp, or just return the required buffer // length if pp == NULL. // // When pp is not NULL, but *pp == NULL, we allocate the buffer, but since // we're writing two ASN.1 objects back to back, we can't have i2d_X509() do // the allocation, nor can we allow i2d_X509_CERT_AUX() to increment the // allocated buffer. int i2d_X509_AUX(X509 *a, unsigned char **pp) { int length; unsigned char *tmp; // Buffer provided by caller if (pp == NULL || *pp != NULL) { return i2d_x509_aux_internal(a, pp); } // Obtain the combined length if ((length = i2d_x509_aux_internal(a, NULL)) <= 0) { return length; } // Allocate requisite combined storage *pp = tmp = reinterpret_cast(OPENSSL_malloc(length)); if (tmp == NULL) { return -1; // Push error onto error stack? } // Encode, but keep *pp at the originally malloced pointer length = i2d_x509_aux_internal(a, &tmp); if (length <= 0) { OPENSSL_free(*pp); *pp = NULL; } return length; } int i2d_re_X509_tbs(X509 *x509, unsigned char **outp) { asn1_encoding_clear(&x509->cert_info->enc); return i2d_X509_CINF(x509->cert_info, outp); } int i2d_X509_tbs(X509 *x509, unsigned char **outp) { return i2d_X509_CINF(x509->cert_info, outp); } int X509_set1_signature_algo(X509 *x509, const X509_ALGOR *algo) { X509_ALGOR *copy1 = X509_ALGOR_dup(algo); X509_ALGOR *copy2 = X509_ALGOR_dup(algo); if (copy1 == NULL || copy2 == NULL) { X509_ALGOR_free(copy1); X509_ALGOR_free(copy2); return 0; } X509_ALGOR_free(x509->sig_alg); x509->sig_alg = copy1; X509_ALGOR_free(x509->cert_info->signature); x509->cert_info->signature = copy2; return 1; } int X509_set1_signature_value(X509 *x509, const uint8_t *sig, size_t sig_len) { if (!ASN1_STRING_set(x509->signature, sig, sig_len)) { return 0; } x509->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07); x509->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT; return 1; } void X509_get0_signature(const ASN1_BIT_STRING **psig, const X509_ALGOR **palg, const X509 *x) { if (psig) { *psig = x->signature; } if (palg) { *palg = x->sig_alg; } } int X509_get_signature_nid(const X509 *x) { return OBJ_obj2nid(x->sig_alg->algorithm); } ================================================ FILE: Sources/CNIOBoringSSL/crypto/x509/x_x509a.cc ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" // X509_CERT_AUX routines. These are used to encode additional user // modifiable data about a certificate. This data is appended to the X509 // encoding when the *_X509_AUX routines are used. This means that the // "traditional" X509 routines will simply ignore the extra data. static X509_CERT_AUX *aux_get(X509 *x); ASN1_SEQUENCE(X509_CERT_AUX) = { ASN1_SEQUENCE_OF_OPT(X509_CERT_AUX, trust, ASN1_OBJECT), ASN1_IMP_SEQUENCE_OF_OPT(X509_CERT_AUX, reject, ASN1_OBJECT, 0), ASN1_OPT(X509_CERT_AUX, alias, ASN1_UTF8STRING), ASN1_OPT(X509_CERT_AUX, keyid, ASN1_OCTET_STRING), } ASN1_SEQUENCE_END(X509_CERT_AUX) IMPLEMENT_ASN1_FUNCTIONS_const(X509_CERT_AUX) static X509_CERT_AUX *aux_get(X509 *x) { if (!x) { return NULL; } if (!x->aux && !(x->aux = X509_CERT_AUX_new())) { return NULL; } return x->aux; } int X509_alias_set1(X509 *x, const uint8_t *name, ossl_ssize_t len) { X509_CERT_AUX *aux; // TODO(davidben): Empty aliases are not meaningful in PKCS#12, and the // getters cannot quite represent them. Also erase the object if |len| is // zero. if (!name) { if (!x || !x->aux || !x->aux->alias) { return 1; } ASN1_UTF8STRING_free(x->aux->alias); x->aux->alias = NULL; return 1; } if (!(aux = aux_get(x))) { return 0; } if (!aux->alias && !(aux->alias = ASN1_UTF8STRING_new())) { return 0; } return ASN1_STRING_set(aux->alias, name, len); } int X509_keyid_set1(X509 *x, const uint8_t *id, ossl_ssize_t len) { X509_CERT_AUX *aux; // TODO(davidben): Empty key IDs are not meaningful in PKCS#12, and the // getters cannot quite represent them. Also erase the object if |len| is // zero. if (!id) { if (!x || !x->aux || !x->aux->keyid) { return 1; } ASN1_OCTET_STRING_free(x->aux->keyid); x->aux->keyid = NULL; return 1; } if (!(aux = aux_get(x))) { return 0; } if (!aux->keyid && !(aux->keyid = ASN1_OCTET_STRING_new())) { return 0; } return ASN1_STRING_set(aux->keyid, id, len); } const uint8_t *X509_alias_get0(const X509 *x, int *out_len) { const ASN1_UTF8STRING *alias = x->aux != NULL ? x->aux->alias : NULL; if (out_len != NULL) { *out_len = alias != NULL ? alias->length : 0; } return alias != NULL ? alias->data : NULL; } const uint8_t *X509_keyid_get0(const X509 *x, int *out_len) { const ASN1_OCTET_STRING *keyid = x->aux != NULL ? x->aux->keyid : NULL; if (out_len != NULL) { *out_len = keyid != NULL ? keyid->length : 0; } return keyid != NULL ? keyid->data : NULL; } int X509_add1_trust_object(X509 *x, const ASN1_OBJECT *obj) { X509_CERT_AUX *aux; ASN1_OBJECT *objtmp = OBJ_dup(obj); if (objtmp == NULL) { goto err; } aux = aux_get(x); if (aux->trust == NULL) { aux->trust = sk_ASN1_OBJECT_new_null(); if (aux->trust == NULL) { goto err; } } if (!sk_ASN1_OBJECT_push(aux->trust, objtmp)) { goto err; } return 1; err: ASN1_OBJECT_free(objtmp); return 0; } int X509_add1_reject_object(X509 *x, const ASN1_OBJECT *obj) { X509_CERT_AUX *aux; ASN1_OBJECT *objtmp = OBJ_dup(obj); if (objtmp == NULL) { goto err; } aux = aux_get(x); if (aux->reject == NULL) { aux->reject = sk_ASN1_OBJECT_new_null(); if (aux->reject == NULL) { goto err; } } if (!sk_ASN1_OBJECT_push(aux->reject, objtmp)) { goto err; } return 1; err: ASN1_OBJECT_free(objtmp); return 0; } void X509_trust_clear(X509 *x) { if (x->aux && x->aux->trust) { sk_ASN1_OBJECT_pop_free(x->aux->trust, ASN1_OBJECT_free); x->aux->trust = NULL; } } void X509_reject_clear(X509 *x) { if (x->aux && x->aux->reject) { sk_ASN1_OBJECT_pop_free(x->aux->reject, ASN1_OBJECT_free); x->aux->reject = NULL; } } ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 6 L$bswap_mask: .quad 0x08090a0b0c0d0e0f, 0x0001020304050607 L$gfpoly: .quad 1, 0xc200000000000000 L$gfpoly_and_internal_carrybit: .quad 1, 0xc200000000000001 L$ctr_pattern: .quad 0, 0 .quad 1, 0 L$inc_2blocks: .quad 2, 0 .quad 3, 0 L$inc_4blocks: .quad 4, 0 .text .globl _gcm_gmult_vpclmulqdq_avx10 .private_extern _gcm_gmult_vpclmulqdq_avx10 .p2align 5 _gcm_gmult_vpclmulqdq_avx10: _CET_ENDBR vmovdqu (%rdi),%xmm0 vmovdqu L$bswap_mask(%rip),%xmm1 vmovdqu 256-16(%rsi),%xmm2 vmovdqu L$gfpoly(%rip),%xmm3 vpshufb %xmm1,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 vpxord %xmm6,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 vpshufd $0x4e,%xmm4,%xmm4 vpternlogd $0x96,%xmm6,%xmm4,%xmm5 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 vpshufd $0x4e,%xmm5,%xmm5 vpternlogd $0x96,%xmm4,%xmm5,%xmm0 vpshufb %xmm1,%xmm0,%xmm0 vmovdqu %xmm0,(%rdi) ret .globl _gcm_init_vpclmulqdq_avx10_512 .private_extern _gcm_init_vpclmulqdq_avx10_512 .p2align 5 _gcm_init_vpclmulqdq_avx10_512: _CET_ENDBR leaq 256-64(%rdi),%r8 vpshufd $0x4e,(%rsi),%xmm3 vpshufd $0xd3,%xmm3,%xmm0 vpsrad $31,%xmm0,%xmm0 vpaddq %xmm3,%xmm3,%xmm3 vpternlogd $0x78,L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm3 vbroadcasti32x4 L$gfpoly(%rip),%zmm5 vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 vpxord %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpternlogd $0x96,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm3,%xmm4 vpclmulqdq $0x01,%xmm1,%xmm5,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpternlogd $0x96,%xmm0,%xmm1,%xmm4 vinserti128 $1,%xmm3,%ymm4,%ymm3 vinserti128 $1,%xmm4,%ymm4,%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm4,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm4,%ymm3,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpternlogd $0x96,%ymm2,%ymm0,%ymm1 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm5,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpternlogd $0x96,%ymm0,%ymm1,%ymm4 vinserti64x4 $1,%ymm3,%zmm4,%zmm3 vshufi64x2 $0,%zmm4,%zmm4,%zmm4 vmovdqu8 %zmm3,(%r8) movl $3,%eax L$precompute_next__func1: subq $64,%r8 vpclmulqdq $0x00,%zmm4,%zmm3,%zmm0 vpclmulqdq $0x01,%zmm4,%zmm3,%zmm1 vpclmulqdq $0x10,%zmm4,%zmm3,%zmm2 vpxord %zmm2,%zmm1,%zmm1 vpclmulqdq $0x01,%zmm0,%zmm5,%zmm2 vpshufd $0x4e,%zmm0,%zmm0 vpternlogd $0x96,%zmm2,%zmm0,%zmm1 vpclmulqdq $0x11,%zmm4,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm1,%zmm5,%zmm0 vpshufd $0x4e,%zmm1,%zmm1 vpternlogd $0x96,%zmm0,%zmm1,%zmm3 vmovdqu8 %zmm3,(%r8) decl %eax jnz L$precompute_next__func1 vzeroupper ret .globl _gcm_ghash_vpclmulqdq_avx10_512 .private_extern _gcm_ghash_vpclmulqdq_avx10_512 .p2align 5 _gcm_ghash_vpclmulqdq_avx10_512: _CET_ENDBR vmovdqu L$bswap_mask(%rip),%xmm4 vmovdqu L$gfpoly(%rip),%xmm10 vmovdqu (%rdi),%xmm5 vpshufb %xmm4,%xmm5,%xmm5 cmpq $64,%rcx jb L$aad_blockbyblock__func1 vshufi64x2 $0,%zmm4,%zmm4,%zmm4 vshufi64x2 $0,%zmm10,%zmm10,%zmm10 vmovdqu8 256-64(%rsi),%zmm9 cmpq $256-1,%rcx jbe L$aad_loop_1x__func1 vmovdqu8 256-256(%rsi),%zmm6 vmovdqu8 256-192(%rsi),%zmm7 vmovdqu8 256-128(%rsi),%zmm8 L$aad_loop_4x__func1: vmovdqu8 0(%rdx),%zmm0 vmovdqu8 64(%rdx),%zmm1 vmovdqu8 128(%rdx),%zmm2 vmovdqu8 192(%rdx),%zmm3 vpshufb %zmm4,%zmm0,%zmm0 vpxord %zmm5,%zmm0,%zmm0 vpshufb %zmm4,%zmm1,%zmm1 vpshufb %zmm4,%zmm2,%zmm2 vpshufb %zmm4,%zmm3,%zmm3 vpclmulqdq $0x00,%zmm6,%zmm0,%zmm5 vpclmulqdq $0x00,%zmm7,%zmm1,%zmm11 vpclmulqdq $0x00,%zmm8,%zmm2,%zmm12 vpxord %zmm11,%zmm5,%zmm5 vpclmulqdq $0x00,%zmm9,%zmm3,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm5 vpclmulqdq $0x01,%zmm6,%zmm0,%zmm11 vpclmulqdq $0x01,%zmm7,%zmm1,%zmm12 vpclmulqdq $0x01,%zmm8,%zmm2,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x01,%zmm9,%zmm3,%zmm12 vpclmulqdq $0x10,%zmm6,%zmm0,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x10,%zmm7,%zmm1,%zmm12 vpclmulqdq $0x10,%zmm8,%zmm2,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x01,%zmm5,%zmm10,%zmm13 vpclmulqdq $0x10,%zmm9,%zmm3,%zmm12 vpxord %zmm12,%zmm11,%zmm11 vpshufd $0x4e,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm6,%zmm0,%zmm0 vpclmulqdq $0x11,%zmm7,%zmm1,%zmm1 vpclmulqdq $0x11,%zmm8,%zmm2,%zmm2 vpternlogd $0x96,%zmm13,%zmm5,%zmm11 vpclmulqdq $0x11,%zmm9,%zmm3,%zmm3 vpternlogd $0x96,%zmm2,%zmm1,%zmm0 vpclmulqdq $0x01,%zmm11,%zmm10,%zmm12 vpxord %zmm3,%zmm0,%zmm5 vpshufd $0x4e,%zmm11,%zmm11 vpternlogd $0x96,%zmm12,%zmm11,%zmm5 vextracti32x4 $1,%zmm5,%xmm0 vextracti32x4 $2,%zmm5,%xmm1 vextracti32x4 $3,%zmm5,%xmm2 vpxord %xmm0,%xmm5,%xmm5 vpternlogd $0x96,%xmm1,%xmm2,%xmm5 subq $-256,%rdx addq $-256,%rcx cmpq $256-1,%rcx ja L$aad_loop_4x__func1 cmpq $64,%rcx jb L$aad_large_done__func1 L$aad_loop_1x__func1: vmovdqu8 (%rdx),%zmm0 vpshufb %zmm4,%zmm0,%zmm0 vpxord %zmm0,%zmm5,%zmm5 vpclmulqdq $0x00,%zmm9,%zmm5,%zmm0 vpclmulqdq $0x01,%zmm9,%zmm5,%zmm1 vpclmulqdq $0x10,%zmm9,%zmm5,%zmm2 vpxord %zmm2,%zmm1,%zmm1 vpclmulqdq $0x01,%zmm0,%zmm10,%zmm2 vpshufd $0x4e,%zmm0,%zmm0 vpternlogd $0x96,%zmm2,%zmm0,%zmm1 vpclmulqdq $0x11,%zmm9,%zmm5,%zmm5 vpclmulqdq $0x01,%zmm1,%zmm10,%zmm0 vpshufd $0x4e,%zmm1,%zmm1 vpternlogd $0x96,%zmm0,%zmm1,%zmm5 vextracti32x4 $1,%zmm5,%xmm0 vextracti32x4 $2,%zmm5,%xmm1 vextracti32x4 $3,%zmm5,%xmm2 vpxord %xmm0,%xmm5,%xmm5 vpternlogd $0x96,%xmm1,%xmm2,%xmm5 addq $64,%rdx subq $64,%rcx cmpq $64,%rcx jae L$aad_loop_1x__func1 L$aad_large_done__func1: vzeroupper L$aad_blockbyblock__func1: testq %rcx,%rcx jz L$aad_done__func1 vmovdqu 256-16(%rsi),%xmm9 L$aad_loop_blockbyblock__func1: vmovdqu (%rdx),%xmm0 vpshufb %xmm4,%xmm0,%xmm0 vpxor %xmm0,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 vpxord %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpternlogd $0x96,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpternlogd $0x96,%xmm0,%xmm1,%xmm5 addq $16,%rdx subq $16,%rcx jnz L$aad_loop_blockbyblock__func1 L$aad_done__func1: vpshufb %xmm4,%xmm5,%xmm5 vmovdqu %xmm5,(%rdi) ret .globl _aes_gcm_enc_update_vaes_avx10_512 .private_extern _aes_gcm_enc_update_vaes_avx10_512 .p2align 5 _aes_gcm_enc_update_vaes_avx10_512: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+7(%rip) #endif vbroadcasti32x4 L$bswap_mask(%rip),%zmm8 vbroadcasti32x4 L$gfpoly(%rip),%zmm31 vmovdqu (%r12),%xmm10 vpshufb %xmm8,%xmm10,%xmm10 vbroadcasti32x4 (%r8),%zmm12 vpshufb %zmm8,%zmm12,%zmm12 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti32x4 (%rcx),%zmm13 vbroadcasti32x4 (%r11),%zmm14 vpaddd L$ctr_pattern(%rip),%zmm12,%zmm12 vbroadcasti32x4 L$inc_4blocks(%rip),%zmm11 cmpq $256-1,%rdx jbe L$crypt_loop_4x_done__func1 vmovdqu8 256-256(%r9),%zmm27 vmovdqu8 256-192(%r9),%zmm28 vmovdqu8 256-128(%r9),%zmm29 vmovdqu8 256-64(%r9),%zmm30 vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 leaq 16(%rcx),%rax L$vaesenc_loop_first_4_vecs__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_first_4_vecs__func1 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx jbe L$ghash_last_ciphertext_4x__func1 vbroadcasti32x4 -144(%r11),%zmm15 vbroadcasti32x4 -128(%r11),%zmm16 vbroadcasti32x4 -112(%r11),%zmm17 vbroadcasti32x4 -96(%r11),%zmm18 vbroadcasti32x4 -80(%r11),%zmm19 vbroadcasti32x4 -64(%r11),%zmm20 vbroadcasti32x4 -48(%r11),%zmm21 vbroadcasti32x4 -32(%r11),%zmm22 vbroadcasti32x4 -16(%r11),%zmm23 L$crypt_loop_4x__func1: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 cmpl $24,%r10d jl L$aes128__func1 je L$aes192__func1 vbroadcasti32x4 -208(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -192(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 L$aes192__func1: vbroadcasti32x4 -176(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -160(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 L$aes128__func1: prefetcht0 512+0(%rdi) prefetcht0 512+64(%rdi) prefetcht0 512+128(%rdi) prefetcht0 512+192(%rdi) vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vaesenc %zmm15,%zmm0,%zmm0 vaesenc %zmm15,%zmm1,%zmm1 vaesenc %zmm15,%zmm2,%zmm2 vaesenc %zmm15,%zmm3,%zmm3 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vaesenc %zmm16,%zmm0,%zmm0 vaesenc %zmm16,%zmm1,%zmm1 vaesenc %zmm16,%zmm2,%zmm2 vaesenc %zmm16,%zmm3,%zmm3 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vaesenc %zmm17,%zmm0,%zmm0 vaesenc %zmm17,%zmm1,%zmm1 vaesenc %zmm17,%zmm2,%zmm2 vaesenc %zmm17,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vaesenc %zmm18,%zmm0,%zmm0 vaesenc %zmm18,%zmm1,%zmm1 vaesenc %zmm18,%zmm2,%zmm2 vaesenc %zmm18,%zmm3,%zmm3 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vaesenc %zmm19,%zmm0,%zmm0 vaesenc %zmm19,%zmm1,%zmm1 vaesenc %zmm19,%zmm2,%zmm2 vaesenc %zmm19,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vaesenc %zmm20,%zmm0,%zmm0 vaesenc %zmm20,%zmm1,%zmm1 vaesenc %zmm20,%zmm2,%zmm2 vaesenc %zmm20,%zmm3,%zmm3 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vaesenc %zmm21,%zmm0,%zmm0 vaesenc %zmm21,%zmm1,%zmm1 vaesenc %zmm21,%zmm2,%zmm2 vaesenc %zmm21,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vaesenc %zmm22,%zmm0,%zmm0 vaesenc %zmm22,%zmm1,%zmm1 vaesenc %zmm22,%zmm2,%zmm2 vaesenc %zmm22,%zmm3,%zmm3 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vaesenc %zmm23,%zmm0,%zmm0 vaesenc %zmm23,%zmm1,%zmm1 vaesenc %zmm23,%zmm2,%zmm2 vaesenc %zmm23,%zmm3,%zmm3 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx ja L$crypt_loop_4x__func1 L$ghash_last_ciphertext_4x__func1: vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 L$crypt_loop_4x_done__func1: testq %rdx,%rdx jz L$done__func1 movq %rdx,%rax negq %rax andq $-16,%rax leaq 256(%r9,%rax,1),%r8 vpxor %xmm4,%xmm4,%xmm4 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 cmpq $64,%rdx jb L$partial_vec__func1 L$crypt_loop_1x__func1: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax L$vaesenc_loop_tail_full_vec__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_full_vec__func1 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1 vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi) vmovdqu8 (%r8),%zmm30 vpshufb %zmm8,%zmm0,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 vpxor %xmm10,%xmm10,%xmm10 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx cmpq $64,%rdx jae L$crypt_loop_1x__func1 testq %rdx,%rdx jz L$reduce__func1 L$partial_vec__func1: movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k1 addq $15,%rdx andq $-16,%rdx movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k2 vpshufb %zmm8,%zmm12,%zmm0 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax L$vaesenc_loop_tail_partialvec__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_partialvec__func1 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1{%k1}{z} vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi){%k1} vmovdqu8 (%r8),%zmm30{%k2}{z} vmovdqu8 %zmm0,%zmm1{%k1}{z} vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 L$reduce__func1: vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 vpshufd $0x4e,%zmm4,%zmm4 vpternlogd $0x96,%zmm0,%zmm4,%zmm5 vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 vpshufd $0x4e,%zmm5,%zmm5 vpternlogd $0x96,%zmm0,%zmm5,%zmm6 vextracti32x4 $1,%zmm6,%xmm0 vextracti32x4 $2,%zmm6,%xmm1 vextracti32x4 $3,%zmm6,%xmm2 vpxord %xmm0,%xmm6,%xmm10 vpternlogd $0x96,%xmm1,%xmm2,%xmm10 L$done__func1: vpshufb %xmm8,%xmm10,%xmm10 vmovdqu %xmm10,(%r12) vzeroupper popq %r12 ret .globl _aes_gcm_dec_update_vaes_avx10_512 .private_extern _aes_gcm_dec_update_vaes_avx10_512 .p2align 5 _aes_gcm_dec_update_vaes_avx10_512: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 vbroadcasti32x4 L$bswap_mask(%rip),%zmm8 vbroadcasti32x4 L$gfpoly(%rip),%zmm31 vmovdqu (%r12),%xmm10 vpshufb %xmm8,%xmm10,%xmm10 vbroadcasti32x4 (%r8),%zmm12 vpshufb %zmm8,%zmm12,%zmm12 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti32x4 (%rcx),%zmm13 vbroadcasti32x4 (%r11),%zmm14 vpaddd L$ctr_pattern(%rip),%zmm12,%zmm12 vbroadcasti32x4 L$inc_4blocks(%rip),%zmm11 cmpq $256-1,%rdx jbe L$crypt_loop_4x_done__func2 vmovdqu8 256-256(%r9),%zmm27 vmovdqu8 256-192(%r9),%zmm28 vmovdqu8 256-128(%r9),%zmm29 vmovdqu8 256-64(%r9),%zmm30 vbroadcasti32x4 -144(%r11),%zmm15 vbroadcasti32x4 -128(%r11),%zmm16 vbroadcasti32x4 -112(%r11),%zmm17 vbroadcasti32x4 -96(%r11),%zmm18 vbroadcasti32x4 -80(%r11),%zmm19 vbroadcasti32x4 -64(%r11),%zmm20 vbroadcasti32x4 -48(%r11),%zmm21 vbroadcasti32x4 -32(%r11),%zmm22 vbroadcasti32x4 -16(%r11),%zmm23 L$crypt_loop_4x__func2: vmovdqu8 0(%rdi),%zmm4 vmovdqu8 64(%rdi),%zmm5 vmovdqu8 128(%rdi),%zmm6 vmovdqu8 192(%rdi),%zmm7 vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 cmpl $24,%r10d jl L$aes128__func2 je L$aes192__func2 vbroadcasti32x4 -208(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -192(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 L$aes192__func2: vbroadcasti32x4 -176(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -160(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 L$aes128__func2: prefetcht0 512+0(%rdi) prefetcht0 512+64(%rdi) prefetcht0 512+128(%rdi) prefetcht0 512+192(%rdi) vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vaesenc %zmm15,%zmm0,%zmm0 vaesenc %zmm15,%zmm1,%zmm1 vaesenc %zmm15,%zmm2,%zmm2 vaesenc %zmm15,%zmm3,%zmm3 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vaesenc %zmm16,%zmm0,%zmm0 vaesenc %zmm16,%zmm1,%zmm1 vaesenc %zmm16,%zmm2,%zmm2 vaesenc %zmm16,%zmm3,%zmm3 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vaesenc %zmm17,%zmm0,%zmm0 vaesenc %zmm17,%zmm1,%zmm1 vaesenc %zmm17,%zmm2,%zmm2 vaesenc %zmm17,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vaesenc %zmm18,%zmm0,%zmm0 vaesenc %zmm18,%zmm1,%zmm1 vaesenc %zmm18,%zmm2,%zmm2 vaesenc %zmm18,%zmm3,%zmm3 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vaesenc %zmm19,%zmm0,%zmm0 vaesenc %zmm19,%zmm1,%zmm1 vaesenc %zmm19,%zmm2,%zmm2 vaesenc %zmm19,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vaesenc %zmm20,%zmm0,%zmm0 vaesenc %zmm20,%zmm1,%zmm1 vaesenc %zmm20,%zmm2,%zmm2 vaesenc %zmm20,%zmm3,%zmm3 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vaesenc %zmm21,%zmm0,%zmm0 vaesenc %zmm21,%zmm1,%zmm1 vaesenc %zmm21,%zmm2,%zmm2 vaesenc %zmm21,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vaesenc %zmm22,%zmm0,%zmm0 vaesenc %zmm22,%zmm1,%zmm1 vaesenc %zmm22,%zmm2,%zmm2 vaesenc %zmm22,%zmm3,%zmm3 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vaesenc %zmm23,%zmm0,%zmm0 vaesenc %zmm23,%zmm1,%zmm1 vaesenc %zmm23,%zmm2,%zmm2 vaesenc %zmm23,%zmm3,%zmm3 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx ja L$crypt_loop_4x__func2 L$crypt_loop_4x_done__func2: testq %rdx,%rdx jz L$done__func2 movq %rdx,%rax negq %rax andq $-16,%rax leaq 256(%r9,%rax,1),%r8 vpxor %xmm4,%xmm4,%xmm4 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 cmpq $64,%rdx jb L$partial_vec__func2 L$crypt_loop_1x__func2: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax L$vaesenc_loop_tail_full_vec__func2: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_full_vec__func2 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1 vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi) vmovdqu8 (%r8),%zmm30 vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 vpxor %xmm10,%xmm10,%xmm10 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx cmpq $64,%rdx jae L$crypt_loop_1x__func2 testq %rdx,%rdx jz L$reduce__func2 L$partial_vec__func2: movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k1 addq $15,%rdx andq $-16,%rdx movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k2 vpshufb %zmm8,%zmm12,%zmm0 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax L$vaesenc_loop_tail_partialvec__func2: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_partialvec__func2 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1{%k1}{z} vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi){%k1} vmovdqu8 (%r8),%zmm30{%k2}{z} vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 L$reduce__func2: vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 vpshufd $0x4e,%zmm4,%zmm4 vpternlogd $0x96,%zmm0,%zmm4,%zmm5 vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 vpshufd $0x4e,%zmm5,%zmm5 vpternlogd $0x96,%zmm0,%zmm5,%zmm6 vextracti32x4 $1,%zmm6,%xmm0 vextracti32x4 $2,%zmm6,%xmm1 vextracti32x4 $3,%zmm6,%xmm2 vpxord %xmm0,%xmm6,%xmm10 vpternlogd $0x96,%xmm1,%xmm2,%xmm10 L$done__func2: vpshufb %xmm8,%xmm10,%xmm10 vmovdqu %xmm10,(%r12) vzeroupper popq %r12 ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aes-gcm-avx10-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .section .rodata .align 64 .Lbswap_mask: .quad 0x08090a0b0c0d0e0f, 0x0001020304050607 .Lgfpoly: .quad 1, 0xc200000000000000 .Lgfpoly_and_internal_carrybit: .quad 1, 0xc200000000000001 .Lctr_pattern: .quad 0, 0 .quad 1, 0 .Linc_2blocks: .quad 2, 0 .quad 3, 0 .Linc_4blocks: .quad 4, 0 .text .globl gcm_gmult_vpclmulqdq_avx10 .hidden gcm_gmult_vpclmulqdq_avx10 .type gcm_gmult_vpclmulqdq_avx10,@function .align 32 gcm_gmult_vpclmulqdq_avx10: .cfi_startproc _CET_ENDBR vmovdqu (%rdi),%xmm0 vmovdqu .Lbswap_mask(%rip),%xmm1 vmovdqu 256-16(%rsi),%xmm2 vmovdqu .Lgfpoly(%rip),%xmm3 vpshufb %xmm1,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 vpxord %xmm6,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 vpshufd $0x4e,%xmm4,%xmm4 vpternlogd $0x96,%xmm6,%xmm4,%xmm5 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 vpshufd $0x4e,%xmm5,%xmm5 vpternlogd $0x96,%xmm4,%xmm5,%xmm0 vpshufb %xmm1,%xmm0,%xmm0 vmovdqu %xmm0,(%rdi) ret .cfi_endproc .size gcm_gmult_vpclmulqdq_avx10, . - gcm_gmult_vpclmulqdq_avx10 .globl gcm_init_vpclmulqdq_avx10_512 .hidden gcm_init_vpclmulqdq_avx10_512 .type gcm_init_vpclmulqdq_avx10_512,@function .align 32 gcm_init_vpclmulqdq_avx10_512: .cfi_startproc _CET_ENDBR leaq 256-64(%rdi),%r8 vpshufd $0x4e,(%rsi),%xmm3 vpshufd $0xd3,%xmm3,%xmm0 vpsrad $31,%xmm0,%xmm0 vpaddq %xmm3,%xmm3,%xmm3 vpternlogd $0x78,.Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm3 vbroadcasti32x4 .Lgfpoly(%rip),%zmm5 vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 vpxord %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpternlogd $0x96,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm3,%xmm4 vpclmulqdq $0x01,%xmm1,%xmm5,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpternlogd $0x96,%xmm0,%xmm1,%xmm4 vinserti128 $1,%xmm3,%ymm4,%ymm3 vinserti128 $1,%xmm4,%ymm4,%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm4,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm4,%ymm3,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpternlogd $0x96,%ymm2,%ymm0,%ymm1 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm5,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpternlogd $0x96,%ymm0,%ymm1,%ymm4 vinserti64x4 $1,%ymm3,%zmm4,%zmm3 vshufi64x2 $0,%zmm4,%zmm4,%zmm4 vmovdqu8 %zmm3,(%r8) movl $3,%eax .Lprecompute_next__func1: subq $64,%r8 vpclmulqdq $0x00,%zmm4,%zmm3,%zmm0 vpclmulqdq $0x01,%zmm4,%zmm3,%zmm1 vpclmulqdq $0x10,%zmm4,%zmm3,%zmm2 vpxord %zmm2,%zmm1,%zmm1 vpclmulqdq $0x01,%zmm0,%zmm5,%zmm2 vpshufd $0x4e,%zmm0,%zmm0 vpternlogd $0x96,%zmm2,%zmm0,%zmm1 vpclmulqdq $0x11,%zmm4,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm1,%zmm5,%zmm0 vpshufd $0x4e,%zmm1,%zmm1 vpternlogd $0x96,%zmm0,%zmm1,%zmm3 vmovdqu8 %zmm3,(%r8) decl %eax jnz .Lprecompute_next__func1 vzeroupper ret .cfi_endproc .size gcm_init_vpclmulqdq_avx10_512, . - gcm_init_vpclmulqdq_avx10_512 .globl gcm_ghash_vpclmulqdq_avx10_512 .hidden gcm_ghash_vpclmulqdq_avx10_512 .type gcm_ghash_vpclmulqdq_avx10_512,@function .align 32 gcm_ghash_vpclmulqdq_avx10_512: .cfi_startproc _CET_ENDBR vmovdqu .Lbswap_mask(%rip),%xmm4 vmovdqu .Lgfpoly(%rip),%xmm10 vmovdqu (%rdi),%xmm5 vpshufb %xmm4,%xmm5,%xmm5 cmpq $64,%rcx jb .Laad_blockbyblock__func1 vshufi64x2 $0,%zmm4,%zmm4,%zmm4 vshufi64x2 $0,%zmm10,%zmm10,%zmm10 vmovdqu8 256-64(%rsi),%zmm9 cmpq $256-1,%rcx jbe .Laad_loop_1x__func1 vmovdqu8 256-256(%rsi),%zmm6 vmovdqu8 256-192(%rsi),%zmm7 vmovdqu8 256-128(%rsi),%zmm8 .Laad_loop_4x__func1: vmovdqu8 0(%rdx),%zmm0 vmovdqu8 64(%rdx),%zmm1 vmovdqu8 128(%rdx),%zmm2 vmovdqu8 192(%rdx),%zmm3 vpshufb %zmm4,%zmm0,%zmm0 vpxord %zmm5,%zmm0,%zmm0 vpshufb %zmm4,%zmm1,%zmm1 vpshufb %zmm4,%zmm2,%zmm2 vpshufb %zmm4,%zmm3,%zmm3 vpclmulqdq $0x00,%zmm6,%zmm0,%zmm5 vpclmulqdq $0x00,%zmm7,%zmm1,%zmm11 vpclmulqdq $0x00,%zmm8,%zmm2,%zmm12 vpxord %zmm11,%zmm5,%zmm5 vpclmulqdq $0x00,%zmm9,%zmm3,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm5 vpclmulqdq $0x01,%zmm6,%zmm0,%zmm11 vpclmulqdq $0x01,%zmm7,%zmm1,%zmm12 vpclmulqdq $0x01,%zmm8,%zmm2,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x01,%zmm9,%zmm3,%zmm12 vpclmulqdq $0x10,%zmm6,%zmm0,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x10,%zmm7,%zmm1,%zmm12 vpclmulqdq $0x10,%zmm8,%zmm2,%zmm13 vpternlogd $0x96,%zmm13,%zmm12,%zmm11 vpclmulqdq $0x01,%zmm5,%zmm10,%zmm13 vpclmulqdq $0x10,%zmm9,%zmm3,%zmm12 vpxord %zmm12,%zmm11,%zmm11 vpshufd $0x4e,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm6,%zmm0,%zmm0 vpclmulqdq $0x11,%zmm7,%zmm1,%zmm1 vpclmulqdq $0x11,%zmm8,%zmm2,%zmm2 vpternlogd $0x96,%zmm13,%zmm5,%zmm11 vpclmulqdq $0x11,%zmm9,%zmm3,%zmm3 vpternlogd $0x96,%zmm2,%zmm1,%zmm0 vpclmulqdq $0x01,%zmm11,%zmm10,%zmm12 vpxord %zmm3,%zmm0,%zmm5 vpshufd $0x4e,%zmm11,%zmm11 vpternlogd $0x96,%zmm12,%zmm11,%zmm5 vextracti32x4 $1,%zmm5,%xmm0 vextracti32x4 $2,%zmm5,%xmm1 vextracti32x4 $3,%zmm5,%xmm2 vpxord %xmm0,%xmm5,%xmm5 vpternlogd $0x96,%xmm1,%xmm2,%xmm5 subq $-256,%rdx addq $-256,%rcx cmpq $256-1,%rcx ja .Laad_loop_4x__func1 cmpq $64,%rcx jb .Laad_large_done__func1 .Laad_loop_1x__func1: vmovdqu8 (%rdx),%zmm0 vpshufb %zmm4,%zmm0,%zmm0 vpxord %zmm0,%zmm5,%zmm5 vpclmulqdq $0x00,%zmm9,%zmm5,%zmm0 vpclmulqdq $0x01,%zmm9,%zmm5,%zmm1 vpclmulqdq $0x10,%zmm9,%zmm5,%zmm2 vpxord %zmm2,%zmm1,%zmm1 vpclmulqdq $0x01,%zmm0,%zmm10,%zmm2 vpshufd $0x4e,%zmm0,%zmm0 vpternlogd $0x96,%zmm2,%zmm0,%zmm1 vpclmulqdq $0x11,%zmm9,%zmm5,%zmm5 vpclmulqdq $0x01,%zmm1,%zmm10,%zmm0 vpshufd $0x4e,%zmm1,%zmm1 vpternlogd $0x96,%zmm0,%zmm1,%zmm5 vextracti32x4 $1,%zmm5,%xmm0 vextracti32x4 $2,%zmm5,%xmm1 vextracti32x4 $3,%zmm5,%xmm2 vpxord %xmm0,%xmm5,%xmm5 vpternlogd $0x96,%xmm1,%xmm2,%xmm5 addq $64,%rdx subq $64,%rcx cmpq $64,%rcx jae .Laad_loop_1x__func1 .Laad_large_done__func1: vzeroupper .Laad_blockbyblock__func1: testq %rcx,%rcx jz .Laad_done__func1 vmovdqu 256-16(%rsi),%xmm9 .Laad_loop_blockbyblock__func1: vmovdqu (%rdx),%xmm0 vpshufb %xmm4,%xmm0,%xmm0 vpxor %xmm0,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm9,%xmm5,%xmm0 vpclmulqdq $0x01,%xmm9,%xmm5,%xmm1 vpclmulqdq $0x10,%xmm9,%xmm5,%xmm2 vpxord %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm10,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpternlogd $0x96,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x11,%xmm9,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm1,%xmm10,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpternlogd $0x96,%xmm0,%xmm1,%xmm5 addq $16,%rdx subq $16,%rcx jnz .Laad_loop_blockbyblock__func1 .Laad_done__func1: vpshufb %xmm4,%xmm5,%xmm5 vmovdqu %xmm5,(%rdi) ret .cfi_endproc .size gcm_ghash_vpclmulqdq_avx10_512, . - gcm_ghash_vpclmulqdq_avx10_512 .globl aes_gcm_enc_update_vaes_avx10_512 .hidden aes_gcm_enc_update_vaes_avx10_512 .type aes_gcm_enc_update_vaes_avx10_512,@function .align 32 aes_gcm_enc_update_vaes_avx10_512: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 movq 16(%rsp),%r12 #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+7(%rip) #endif vbroadcasti32x4 .Lbswap_mask(%rip),%zmm8 vbroadcasti32x4 .Lgfpoly(%rip),%zmm31 vmovdqu (%r12),%xmm10 vpshufb %xmm8,%xmm10,%xmm10 vbroadcasti32x4 (%r8),%zmm12 vpshufb %zmm8,%zmm12,%zmm12 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti32x4 (%rcx),%zmm13 vbroadcasti32x4 (%r11),%zmm14 vpaddd .Lctr_pattern(%rip),%zmm12,%zmm12 vbroadcasti32x4 .Linc_4blocks(%rip),%zmm11 cmpq $256-1,%rdx jbe .Lcrypt_loop_4x_done__func1 vmovdqu8 256-256(%r9),%zmm27 vmovdqu8 256-192(%r9),%zmm28 vmovdqu8 256-128(%r9),%zmm29 vmovdqu8 256-64(%r9),%zmm30 vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 leaq 16(%rcx),%rax .Lvaesenc_loop_first_4_vecs__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_first_4_vecs__func1 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx jbe .Lghash_last_ciphertext_4x__func1 vbroadcasti32x4 -144(%r11),%zmm15 vbroadcasti32x4 -128(%r11),%zmm16 vbroadcasti32x4 -112(%r11),%zmm17 vbroadcasti32x4 -96(%r11),%zmm18 vbroadcasti32x4 -80(%r11),%zmm19 vbroadcasti32x4 -64(%r11),%zmm20 vbroadcasti32x4 -48(%r11),%zmm21 vbroadcasti32x4 -32(%r11),%zmm22 vbroadcasti32x4 -16(%r11),%zmm23 .Lcrypt_loop_4x__func1: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 cmpl $24,%r10d jl .Laes128__func1 je .Laes192__func1 vbroadcasti32x4 -208(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -192(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 .Laes192__func1: vbroadcasti32x4 -176(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -160(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 .Laes128__func1: prefetcht0 512+0(%rdi) prefetcht0 512+64(%rdi) prefetcht0 512+128(%rdi) prefetcht0 512+192(%rdi) vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vaesenc %zmm15,%zmm0,%zmm0 vaesenc %zmm15,%zmm1,%zmm1 vaesenc %zmm15,%zmm2,%zmm2 vaesenc %zmm15,%zmm3,%zmm3 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vaesenc %zmm16,%zmm0,%zmm0 vaesenc %zmm16,%zmm1,%zmm1 vaesenc %zmm16,%zmm2,%zmm2 vaesenc %zmm16,%zmm3,%zmm3 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vaesenc %zmm17,%zmm0,%zmm0 vaesenc %zmm17,%zmm1,%zmm1 vaesenc %zmm17,%zmm2,%zmm2 vaesenc %zmm17,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vaesenc %zmm18,%zmm0,%zmm0 vaesenc %zmm18,%zmm1,%zmm1 vaesenc %zmm18,%zmm2,%zmm2 vaesenc %zmm18,%zmm3,%zmm3 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vaesenc %zmm19,%zmm0,%zmm0 vaesenc %zmm19,%zmm1,%zmm1 vaesenc %zmm19,%zmm2,%zmm2 vaesenc %zmm19,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vaesenc %zmm20,%zmm0,%zmm0 vaesenc %zmm20,%zmm1,%zmm1 vaesenc %zmm20,%zmm2,%zmm2 vaesenc %zmm20,%zmm3,%zmm3 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vaesenc %zmm21,%zmm0,%zmm0 vaesenc %zmm21,%zmm1,%zmm1 vaesenc %zmm21,%zmm2,%zmm2 vaesenc %zmm21,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vaesenc %zmm22,%zmm0,%zmm0 vaesenc %zmm22,%zmm1,%zmm1 vaesenc %zmm22,%zmm2,%zmm2 vaesenc %zmm22,%zmm3,%zmm3 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vaesenc %zmm23,%zmm0,%zmm0 vaesenc %zmm23,%zmm1,%zmm1 vaesenc %zmm23,%zmm2,%zmm2 vaesenc %zmm23,%zmm3,%zmm3 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx ja .Lcrypt_loop_4x__func1 .Lghash_last_ciphertext_4x__func1: vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 .Lcrypt_loop_4x_done__func1: testq %rdx,%rdx jz .Ldone__func1 movq %rdx,%rax negq %rax andq $-16,%rax leaq 256(%r9,%rax,1),%r8 vpxor %xmm4,%xmm4,%xmm4 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 cmpq $64,%rdx jb .Lpartial_vec__func1 .Lcrypt_loop_1x__func1: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_full_vec__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_full_vec__func1 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1 vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi) vmovdqu8 (%r8),%zmm30 vpshufb %zmm8,%zmm0,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 vpxor %xmm10,%xmm10,%xmm10 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx cmpq $64,%rdx jae .Lcrypt_loop_1x__func1 testq %rdx,%rdx jz .Lreduce__func1 .Lpartial_vec__func1: movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k1 addq $15,%rdx andq $-16,%rdx movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k2 vpshufb %zmm8,%zmm12,%zmm0 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_partialvec__func1: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_partialvec__func1 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1{%k1}{z} vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi){%k1} vmovdqu8 (%r8),%zmm30{%k2}{z} vmovdqu8 %zmm0,%zmm1{%k1}{z} vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 .Lreduce__func1: vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 vpshufd $0x4e,%zmm4,%zmm4 vpternlogd $0x96,%zmm0,%zmm4,%zmm5 vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 vpshufd $0x4e,%zmm5,%zmm5 vpternlogd $0x96,%zmm0,%zmm5,%zmm6 vextracti32x4 $1,%zmm6,%xmm0 vextracti32x4 $2,%zmm6,%xmm1 vextracti32x4 $3,%zmm6,%xmm2 vpxord %xmm0,%xmm6,%xmm10 vpternlogd $0x96,%xmm1,%xmm2,%xmm10 .Ldone__func1: vpshufb %xmm8,%xmm10,%xmm10 vmovdqu %xmm10,(%r12) vzeroupper popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes_gcm_enc_update_vaes_avx10_512, . - aes_gcm_enc_update_vaes_avx10_512 .globl aes_gcm_dec_update_vaes_avx10_512 .hidden aes_gcm_dec_update_vaes_avx10_512 .type aes_gcm_dec_update_vaes_avx10_512,@function .align 32 aes_gcm_dec_update_vaes_avx10_512: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 movq 16(%rsp),%r12 vbroadcasti32x4 .Lbswap_mask(%rip),%zmm8 vbroadcasti32x4 .Lgfpoly(%rip),%zmm31 vmovdqu (%r12),%xmm10 vpshufb %xmm8,%xmm10,%xmm10 vbroadcasti32x4 (%r8),%zmm12 vpshufb %zmm8,%zmm12,%zmm12 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti32x4 (%rcx),%zmm13 vbroadcasti32x4 (%r11),%zmm14 vpaddd .Lctr_pattern(%rip),%zmm12,%zmm12 vbroadcasti32x4 .Linc_4blocks(%rip),%zmm11 cmpq $256-1,%rdx jbe .Lcrypt_loop_4x_done__func2 vmovdqu8 256-256(%r9),%zmm27 vmovdqu8 256-192(%r9),%zmm28 vmovdqu8 256-128(%r9),%zmm29 vmovdqu8 256-64(%r9),%zmm30 vbroadcasti32x4 -144(%r11),%zmm15 vbroadcasti32x4 -128(%r11),%zmm16 vbroadcasti32x4 -112(%r11),%zmm17 vbroadcasti32x4 -96(%r11),%zmm18 vbroadcasti32x4 -80(%r11),%zmm19 vbroadcasti32x4 -64(%r11),%zmm20 vbroadcasti32x4 -48(%r11),%zmm21 vbroadcasti32x4 -32(%r11),%zmm22 vbroadcasti32x4 -16(%r11),%zmm23 .Lcrypt_loop_4x__func2: vmovdqu8 0(%rdi),%zmm4 vmovdqu8 64(%rdi),%zmm5 vmovdqu8 128(%rdi),%zmm6 vmovdqu8 192(%rdi),%zmm7 vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm1 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm2 vpaddd %zmm11,%zmm12,%zmm12 vpshufb %zmm8,%zmm12,%zmm3 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 vpxord %zmm13,%zmm1,%zmm1 vpxord %zmm13,%zmm2,%zmm2 vpxord %zmm13,%zmm3,%zmm3 cmpl $24,%r10d jl .Laes128__func2 je .Laes192__func2 vbroadcasti32x4 -208(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -192(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 .Laes192__func2: vbroadcasti32x4 -176(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 vbroadcasti32x4 -160(%r11),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 vaesenc %zmm9,%zmm1,%zmm1 vaesenc %zmm9,%zmm2,%zmm2 vaesenc %zmm9,%zmm3,%zmm3 .Laes128__func2: prefetcht0 512+0(%rdi) prefetcht0 512+64(%rdi) prefetcht0 512+128(%rdi) prefetcht0 512+192(%rdi) vpshufb %zmm8,%zmm4,%zmm4 vpxord %zmm10,%zmm4,%zmm4 vpshufb %zmm8,%zmm5,%zmm5 vpshufb %zmm8,%zmm6,%zmm6 vaesenc %zmm15,%zmm0,%zmm0 vaesenc %zmm15,%zmm1,%zmm1 vaesenc %zmm15,%zmm2,%zmm2 vaesenc %zmm15,%zmm3,%zmm3 vpshufb %zmm8,%zmm7,%zmm7 vpclmulqdq $0x00,%zmm27,%zmm4,%zmm10 vpclmulqdq $0x00,%zmm28,%zmm5,%zmm24 vpclmulqdq $0x00,%zmm29,%zmm6,%zmm25 vaesenc %zmm16,%zmm0,%zmm0 vaesenc %zmm16,%zmm1,%zmm1 vaesenc %zmm16,%zmm2,%zmm2 vaesenc %zmm16,%zmm3,%zmm3 vpxord %zmm24,%zmm10,%zmm10 vpclmulqdq $0x00,%zmm30,%zmm7,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm10 vpclmulqdq $0x01,%zmm27,%zmm4,%zmm24 vaesenc %zmm17,%zmm0,%zmm0 vaesenc %zmm17,%zmm1,%zmm1 vaesenc %zmm17,%zmm2,%zmm2 vaesenc %zmm17,%zmm3,%zmm3 vpclmulqdq $0x01,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x01,%zmm29,%zmm6,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm30,%zmm7,%zmm25 vaesenc %zmm18,%zmm0,%zmm0 vaesenc %zmm18,%zmm1,%zmm1 vaesenc %zmm18,%zmm2,%zmm2 vaesenc %zmm18,%zmm3,%zmm3 vpclmulqdq $0x10,%zmm27,%zmm4,%zmm26 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x10,%zmm28,%zmm5,%zmm25 vpclmulqdq $0x10,%zmm29,%zmm6,%zmm26 vaesenc %zmm19,%zmm0,%zmm0 vaesenc %zmm19,%zmm1,%zmm1 vaesenc %zmm19,%zmm2,%zmm2 vaesenc %zmm19,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm25,%zmm24 vpclmulqdq $0x01,%zmm10,%zmm31,%zmm26 vpclmulqdq $0x10,%zmm30,%zmm7,%zmm25 vpxord %zmm25,%zmm24,%zmm24 vaesenc %zmm20,%zmm0,%zmm0 vaesenc %zmm20,%zmm1,%zmm1 vaesenc %zmm20,%zmm2,%zmm2 vaesenc %zmm20,%zmm3,%zmm3 vpshufd $0x4e,%zmm10,%zmm10 vpclmulqdq $0x11,%zmm27,%zmm4,%zmm4 vpclmulqdq $0x11,%zmm28,%zmm5,%zmm5 vpclmulqdq $0x11,%zmm29,%zmm6,%zmm6 vaesenc %zmm21,%zmm0,%zmm0 vaesenc %zmm21,%zmm1,%zmm1 vaesenc %zmm21,%zmm2,%zmm2 vaesenc %zmm21,%zmm3,%zmm3 vpternlogd $0x96,%zmm26,%zmm10,%zmm24 vpclmulqdq $0x11,%zmm30,%zmm7,%zmm7 vpternlogd $0x96,%zmm6,%zmm5,%zmm4 vpclmulqdq $0x01,%zmm24,%zmm31,%zmm25 vaesenc %zmm22,%zmm0,%zmm0 vaesenc %zmm22,%zmm1,%zmm1 vaesenc %zmm22,%zmm2,%zmm2 vaesenc %zmm22,%zmm3,%zmm3 vpxord %zmm7,%zmm4,%zmm10 vpshufd $0x4e,%zmm24,%zmm24 vpternlogd $0x96,%zmm25,%zmm24,%zmm10 vaesenc %zmm23,%zmm0,%zmm0 vaesenc %zmm23,%zmm1,%zmm1 vaesenc %zmm23,%zmm2,%zmm2 vaesenc %zmm23,%zmm3,%zmm3 vextracti32x4 $1,%zmm10,%xmm4 vextracti32x4 $2,%zmm10,%xmm5 vextracti32x4 $3,%zmm10,%xmm6 vpxord %xmm4,%xmm10,%xmm10 vpternlogd $0x96,%xmm5,%xmm6,%xmm10 vpxord 0(%rdi),%zmm14,%zmm4 vpxord 64(%rdi),%zmm14,%zmm5 vpxord 128(%rdi),%zmm14,%zmm6 vpxord 192(%rdi),%zmm14,%zmm7 vaesenclast %zmm4,%zmm0,%zmm4 vaesenclast %zmm5,%zmm1,%zmm5 vaesenclast %zmm6,%zmm2,%zmm6 vaesenclast %zmm7,%zmm3,%zmm7 vmovdqu8 %zmm4,0(%rsi) vmovdqu8 %zmm5,64(%rsi) vmovdqu8 %zmm6,128(%rsi) vmovdqu8 %zmm7,192(%rsi) subq $-256,%rdi subq $-256,%rsi addq $-256,%rdx cmpq $256-1,%rdx ja .Lcrypt_loop_4x__func2 .Lcrypt_loop_4x_done__func2: testq %rdx,%rdx jz .Ldone__func2 movq %rdx,%rax negq %rax andq $-16,%rax leaq 256(%r9,%rax,1),%r8 vpxor %xmm4,%xmm4,%xmm4 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 cmpq $64,%rdx jb .Lpartial_vec__func2 .Lcrypt_loop_1x__func2: vpshufb %zmm8,%zmm12,%zmm0 vpaddd %zmm11,%zmm12,%zmm12 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_full_vec__func2: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_full_vec__func2 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1 vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi) vmovdqu8 (%r8),%zmm30 vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 vpxor %xmm10,%xmm10,%xmm10 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx cmpq $64,%rdx jae .Lcrypt_loop_1x__func2 testq %rdx,%rdx jz .Lreduce__func2 .Lpartial_vec__func2: movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k1 addq $15,%rdx andq $-16,%rdx movq $-1,%rax bzhiq %rdx,%rax,%rax kmovq %rax,%k2 vpshufb %zmm8,%zmm12,%zmm0 vpxord %zmm13,%zmm0,%zmm0 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_partialvec__func2: vbroadcasti32x4 (%rax),%zmm9 vaesenc %zmm9,%zmm0,%zmm0 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_partialvec__func2 vaesenclast %zmm14,%zmm0,%zmm0 vmovdqu8 (%rdi),%zmm1{%k1}{z} vpxord %zmm1,%zmm0,%zmm0 vmovdqu8 %zmm0,(%rsi){%k1} vmovdqu8 (%r8),%zmm30{%k2}{z} vpshufb %zmm8,%zmm1,%zmm0 vpxord %zmm10,%zmm0,%zmm0 vpclmulqdq $0x00,%zmm30,%zmm0,%zmm7 vpclmulqdq $0x01,%zmm30,%zmm0,%zmm1 vpclmulqdq $0x10,%zmm30,%zmm0,%zmm2 vpclmulqdq $0x11,%zmm30,%zmm0,%zmm3 vpxord %zmm7,%zmm4,%zmm4 vpternlogd $0x96,%zmm2,%zmm1,%zmm5 vpxord %zmm3,%zmm6,%zmm6 .Lreduce__func2: vpclmulqdq $0x01,%zmm4,%zmm31,%zmm0 vpshufd $0x4e,%zmm4,%zmm4 vpternlogd $0x96,%zmm0,%zmm4,%zmm5 vpclmulqdq $0x01,%zmm5,%zmm31,%zmm0 vpshufd $0x4e,%zmm5,%zmm5 vpternlogd $0x96,%zmm0,%zmm5,%zmm6 vextracti32x4 $1,%zmm6,%xmm0 vextracti32x4 $2,%zmm6,%xmm1 vextracti32x4 $3,%zmm6,%xmm2 vpxord %xmm0,%xmm6,%xmm10 vpternlogd $0x96,%xmm1,%xmm2,%xmm10 .Ldone__func2: vpshufb %xmm8,%xmm10,%xmm10 vmovdqu %xmm10,(%r12) vzeroupper popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes_gcm_dec_update_vaes_avx10_512, . - aes_gcm_dec_update_vaes_avx10_512 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aes-gcm-avx2-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 4 L$bswap_mask: .quad 0x08090a0b0c0d0e0f, 0x0001020304050607 L$gfpoly: .quad 1, 0xc200000000000000 L$gfpoly_and_internal_carrybit: .quad 1, 0xc200000000000001 .p2align 5 L$ctr_pattern: .quad 0, 0 .quad 1, 0 L$inc_2blocks: .quad 2, 0 .quad 2, 0 .text .globl _gcm_init_vpclmulqdq_avx2 .private_extern _gcm_init_vpclmulqdq_avx2 .p2align 5 _gcm_init_vpclmulqdq_avx2: _CET_ENDBR vpshufd $0x4e,(%rsi),%xmm3 vpshufd $0xd3,%xmm3,%xmm0 vpsrad $31,%xmm0,%xmm0 vpaddq %xmm3,%xmm3,%xmm3 vpand L$gfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vbroadcasti128 L$gfpoly(%rip),%ymm6 vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5 vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm5,%xmm5 vpxor %xmm0,%xmm5,%xmm5 vinserti128 $1,%xmm3,%ymm5,%ymm3 vinserti128 $1,%xmm5,%ymm5,%ymm5 vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,96(%rdi) vmovdqu %ymm4,64(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128+32(%rdi) vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm3,%ymm3 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,0(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128(%rdi) vzeroupper ret .globl _gcm_gmult_vpclmulqdq_avx2 .private_extern _gcm_gmult_vpclmulqdq_avx2 .p2align 5 _gcm_gmult_vpclmulqdq_avx2: _CET_ENDBR vmovdqu (%rdi),%xmm0 vmovdqu L$bswap_mask(%rip),%xmm1 vmovdqu 128-16(%rsi),%xmm2 vmovdqu L$gfpoly(%rip),%xmm3 vpshufb %xmm1,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 vpshufd $0x4e,%xmm4,%xmm4 vpxor %xmm4,%xmm5,%xmm5 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 vpshufd $0x4e,%xmm5,%xmm5 vpxor %xmm5,%xmm0,%xmm0 vpxor %xmm4,%xmm0,%xmm0 vpshufb %xmm1,%xmm0,%xmm0 vmovdqu %xmm0,(%rdi) ret .globl _gcm_ghash_vpclmulqdq_avx2 .private_extern _gcm_ghash_vpclmulqdq_avx2 .p2align 5 _gcm_ghash_vpclmulqdq_avx2: _CET_ENDBR vbroadcasti128 L$bswap_mask(%rip),%ymm6 vmovdqu (%rdi),%xmm5 vpshufb %xmm6,%xmm5,%xmm5 vbroadcasti128 L$gfpoly(%rip),%ymm7 cmpq $32,%rcx jb L$ghash_lastblock cmpq $127,%rcx jbe L$ghash_loop_1x vmovdqu 128(%rsi),%ymm8 vmovdqu 128+32(%rsi),%ymm9 L$ghash_loop_4x: vmovdqu 0(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 0(%rsi),%ymm2 vpxor %ymm5,%ymm1,%ymm1 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4 vmovdqu 32(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 32(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vmovdqu 64(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 64(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vmovdqu 96(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 96(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpxor %ymm5,%ymm4,%ymm4 vbroadcasti128 L$gfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0 vpshufd $0x4e,%ymm3,%ymm3 vpxor %ymm3,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0 vpshufd $0x4e,%ymm4,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpxor %ymm0,%ymm5,%ymm5 vextracti128 $1,%ymm5,%xmm0 vpxor %xmm0,%xmm5,%xmm5 subq $-128,%rdx addq $-128,%rcx cmpq $127,%rcx ja L$ghash_loop_4x cmpq $32,%rcx jb L$ghash_loop_1x_done L$ghash_loop_1x: vmovdqu (%rdx),%ymm0 vpshufb %ymm6,%ymm0,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vmovdqu 128-32(%rsi),%ymm0 vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm2,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1 vpshufd $0x4e,%ymm2,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpxor %ymm1,%ymm5,%ymm5 vextracti128 $1,%ymm5,%xmm0 vpxor %xmm0,%xmm5,%xmm5 addq $32,%rdx subq $32,%rcx cmpq $32,%rcx jae L$ghash_loop_1x L$ghash_loop_1x_done: vzeroupper L$ghash_lastblock: testq %rcx,%rcx jz L$ghash_done vmovdqu (%rdx),%xmm0 vpshufb %xmm6,%xmm0,%xmm0 vpxor %xmm0,%xmm5,%xmm5 vmovdqu 128-16(%rsi),%xmm0 vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm2,%xmm2 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1 vpshufd $0x4e,%xmm2,%xmm2 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm1,%xmm5,%xmm5 L$ghash_done: vpshufb %xmm6,%xmm5,%xmm5 vmovdqu %xmm5,(%rdi) ret .globl _aes_gcm_enc_update_vaes_avx2 .private_extern _aes_gcm_enc_update_vaes_avx2 .p2align 5 _aes_gcm_enc_update_vaes_avx2: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+8(%rip) #endif vbroadcasti128 L$bswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe L$crypt_loop_4x_done__func1 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 leaq 16(%rcx),%rax L$vaesenc_loop_first_4_vecs__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_first_4_vecs__func1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx jbe L$ghash_last_ciphertext_4x__func1 .p2align 4 L$crypt_loop_4x__func1: vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl L$aes128__func1 je L$aes192__func1 vbroadcasti128 -208(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -192(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 L$aes192__func1: vbroadcasti128 -176(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -160(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 L$aes128__func1: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vbroadcasti128 -144(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -128(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx ja L$crypt_loop_4x__func1 L$ghash_last_ciphertext_4x__func1: vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi L$crypt_loop_4x_done__func1: testq %rdx,%rdx jz L$done__func1 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb L$lessthan64bytes__func1 vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_1__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_1__func1 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %ymm0,%ymm13,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7 vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz L$reduce__func1 vpxor %xmm1,%xmm1,%xmm1 L$lessthan64bytes__func1: vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_2__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_2__func1 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 cmpq $32,%rdx jb L$xor_one_block__func1 je L$xor_two_blocks__func1 L$xor_three_blocks__func1: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %xmm0,%xmm13,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp L$ghash_mul_one_vec_unreduced__func1 L$xor_two_blocks__func1: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp L$ghash_mul_one_vec_unreduced__func1 L$xor_one_block__func1: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 L$ghash_mul_one_vec_unreduced__func1: vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm7,%ymm7 L$reduce__func1: vbroadcasti128 L$gfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 L$done__func1: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 ret .globl _aes_gcm_dec_update_vaes_avx2 .private_extern _aes_gcm_dec_update_vaes_avx2 .p2align 5 _aes_gcm_dec_update_vaes_avx2: _CET_ENDBR pushq %r12 movq 16(%rsp),%r12 vbroadcasti128 L$bswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd L$ctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe L$crypt_loop_4x_done__func2 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 .p2align 4 L$crypt_loop_4x__func2: vmovdqu L$inc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl L$aes128__func2 je L$aes192__func2 vbroadcasti128 -208(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -192(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 L$aes192__func2: vbroadcasti128 -176(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -160(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 L$aes128__func2: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vbroadcasti128 -144(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -128(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 32(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 64(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 L$gfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi subq $-128,%rsi addq $-128,%rdx cmpq $127,%rdx ja L$crypt_loop_4x__func2 L$crypt_loop_4x_done__func2: testq %rdx,%rdx jz L$done__func2 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb L$lessthan64bytes__func2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_1__func2: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_1__func2 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %ymm0,%ymm3,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7 vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz L$reduce__func2 vpxor %xmm1,%xmm1,%xmm1 L$lessthan64bytes__func2: vpshufb %ymm0,%ymm11,%ymm12 vpaddd L$inc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax L$vaesenc_loop_tail_2__func2: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne L$vaesenc_loop_tail_2__func2 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 cmpq $32,%rdx jb L$xor_one_block__func2 je L$xor_two_blocks__func2 L$xor_three_blocks__func2: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %xmm0,%xmm3,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp L$ghash_mul_one_vec_unreduced__func2 L$xor_two_blocks__func2: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp L$ghash_mul_one_vec_unreduced__func2 L$xor_one_block__func2: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm2,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 L$ghash_mul_one_vec_unreduced__func2: vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm7,%ymm7 L$reduce__func2: vbroadcasti128 L$gfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 L$done__func2: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aes-gcm-avx2-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .section .rodata .align 16 .Lbswap_mask: .quad 0x08090a0b0c0d0e0f, 0x0001020304050607 .Lgfpoly: .quad 1, 0xc200000000000000 .Lgfpoly_and_internal_carrybit: .quad 1, 0xc200000000000001 .align 32 .Lctr_pattern: .quad 0, 0 .quad 1, 0 .Linc_2blocks: .quad 2, 0 .quad 2, 0 .text .globl gcm_init_vpclmulqdq_avx2 .hidden gcm_init_vpclmulqdq_avx2 .type gcm_init_vpclmulqdq_avx2,@function .align 32 gcm_init_vpclmulqdq_avx2: .cfi_startproc _CET_ENDBR vpshufd $0x4e,(%rsi),%xmm3 vpshufd $0xd3,%xmm3,%xmm0 vpsrad $31,%xmm0,%xmm0 vpaddq %xmm3,%xmm3,%xmm3 vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vbroadcasti128 .Lgfpoly(%rip),%ymm6 vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0 vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1 vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2 vpshufd $0x4e,%xmm0,%xmm0 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5 vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm5,%xmm5 vpxor %xmm0,%xmm5,%xmm5 vinserti128 $1,%xmm3,%ymm5,%ymm3 vinserti128 $1,%xmm5,%ymm5,%ymm5 vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,96(%rdi) vmovdqu %ymm4,64(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128+32(%rdi) vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm3,%ymm3 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0 vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1 vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2 vpshufd $0x4e,%ymm0,%ymm0 vpxor %ymm0,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4 vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,0(%rdi) vpunpcklqdq %ymm3,%ymm4,%ymm0 vpunpckhqdq %ymm3,%ymm4,%ymm1 vpxor %ymm1,%ymm0,%ymm0 vmovdqu %ymm0,128(%rdi) vzeroupper ret .cfi_endproc .size gcm_init_vpclmulqdq_avx2, . - gcm_init_vpclmulqdq_avx2 .globl gcm_gmult_vpclmulqdq_avx2 .hidden gcm_gmult_vpclmulqdq_avx2 .type gcm_gmult_vpclmulqdq_avx2,@function .align 32 gcm_gmult_vpclmulqdq_avx2: .cfi_startproc _CET_ENDBR vmovdqu (%rdi),%xmm0 vmovdqu .Lbswap_mask(%rip),%xmm1 vmovdqu 128-16(%rsi),%xmm2 vmovdqu .Lgfpoly(%rip),%xmm3 vpshufb %xmm1,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6 vpshufd $0x4e,%xmm4,%xmm4 vpxor %xmm4,%xmm5,%xmm5 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4 vpshufd $0x4e,%xmm5,%xmm5 vpxor %xmm5,%xmm0,%xmm0 vpxor %xmm4,%xmm0,%xmm0 vpshufb %xmm1,%xmm0,%xmm0 vmovdqu %xmm0,(%rdi) ret .cfi_endproc .size gcm_gmult_vpclmulqdq_avx2, . - gcm_gmult_vpclmulqdq_avx2 .globl gcm_ghash_vpclmulqdq_avx2 .hidden gcm_ghash_vpclmulqdq_avx2 .type gcm_ghash_vpclmulqdq_avx2,@function .align 32 gcm_ghash_vpclmulqdq_avx2: .cfi_startproc _CET_ENDBR vbroadcasti128 .Lbswap_mask(%rip),%ymm6 vmovdqu (%rdi),%xmm5 vpshufb %xmm6,%xmm5,%xmm5 vbroadcasti128 .Lgfpoly(%rip),%ymm7 cmpq $32,%rcx jb .Lghash_lastblock cmpq $127,%rcx jbe .Lghash_loop_1x vmovdqu 128(%rsi),%ymm8 vmovdqu 128+32(%rsi),%ymm9 .Lghash_loop_4x: vmovdqu 0(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 0(%rsi),%ymm2 vpxor %ymm5,%ymm1,%ymm1 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4 vmovdqu 32(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 32(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vmovdqu 64(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 64(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vmovdqu 96(%rdx),%ymm1 vpshufb %ymm6,%ymm1,%ymm1 vmovdqu 96(%rsi),%ymm2 vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vpunpckhqdq %ymm1,%ymm1,%ymm0 vpxor %ymm1,%ymm0,%ymm0 vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0 vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpxor %ymm5,%ymm4,%ymm4 vbroadcasti128 .Lgfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0 vpshufd $0x4e,%ymm3,%ymm3 vpxor %ymm3,%ymm4,%ymm4 vpxor %ymm0,%ymm4,%ymm4 vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0 vpshufd $0x4e,%ymm4,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpxor %ymm0,%ymm5,%ymm5 vextracti128 $1,%ymm5,%xmm0 vpxor %xmm0,%xmm5,%xmm5 subq $-128,%rdx addq $-128,%rcx cmpq $127,%rcx ja .Lghash_loop_4x cmpq $32,%rcx jb .Lghash_loop_1x_done .Lghash_loop_1x: vmovdqu (%rdx),%ymm0 vpshufb %ymm6,%ymm0,%ymm0 vpxor %ymm0,%ymm5,%ymm5 vmovdqu 128-32(%rsi),%ymm0 vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1 vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2 vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3 vpshufd $0x4e,%ymm1,%ymm1 vpxor %ymm1,%ymm2,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1 vpshufd $0x4e,%ymm2,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpxor %ymm1,%ymm5,%ymm5 vextracti128 $1,%ymm5,%xmm0 vpxor %xmm0,%xmm5,%xmm5 addq $32,%rdx subq $32,%rcx cmpq $32,%rcx jae .Lghash_loop_1x .Lghash_loop_1x_done: vzeroupper .Lghash_lastblock: testq %rcx,%rcx jz .Lghash_done vmovdqu (%rdx),%xmm0 vpshufb %xmm6,%xmm0,%xmm0 vpxor %xmm0,%xmm5,%xmm5 vmovdqu 128-16(%rsi),%xmm0 vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1 vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2 vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3 vpshufd $0x4e,%xmm1,%xmm1 vpxor %xmm1,%xmm2,%xmm2 vpxor %xmm3,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5 vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1 vpshufd $0x4e,%xmm2,%xmm2 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm1,%xmm5,%xmm5 .Lghash_done: vpshufb %xmm6,%xmm5,%xmm5 vmovdqu %xmm5,(%rdi) ret .cfi_endproc .size gcm_ghash_vpclmulqdq_avx2, . - gcm_ghash_vpclmulqdq_avx2 .globl aes_gcm_enc_update_vaes_avx2 .hidden aes_gcm_enc_update_vaes_avx2 .type aes_gcm_enc_update_vaes_avx2,@function .align 32 aes_gcm_enc_update_vaes_avx2: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 movq 16(%rsp),%r12 #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+8(%rip) #endif vbroadcasti128 .Lbswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe .Lcrypt_loop_4x_done__func1 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 vmovdqu .Linc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 leaq 16(%rcx),%rax .Lvaesenc_loop_first_4_vecs__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_first_4_vecs__func1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx jbe .Lghash_last_ciphertext_4x__func1 .align 16 .Lcrypt_loop_4x__func1: vmovdqu .Linc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl .Laes128__func1 je .Laes192__func1 vbroadcasti128 -208(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -192(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 .Laes192__func1: vbroadcasti128 -176(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -160(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 .Laes128__func1: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vbroadcasti128 -144(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -128(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 .Lgfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi addq $-128,%rdx cmpq $127,%rdx ja .Lcrypt_loop_4x__func1 .Lghash_last_ciphertext_4x__func1: vmovdqu 0(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vmovdqu 32(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 64(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rsi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 .Lgfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 subq $-128,%rsi .Lcrypt_loop_4x_done__func1: testq %rdx,%rdx jz .Ldone__func1 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb .Llessthan64bytes__func1 vpshufb %ymm0,%ymm11,%ymm12 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_1__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_1__func1 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %ymm0,%ymm13,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7 vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz .Lreduce__func1 vpxor %xmm1,%xmm1,%xmm1 .Llessthan64bytes__func1: vpshufb %ymm0,%ymm11,%ymm12 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_2__func1: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_2__func1 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 cmpq $32,%rdx jb .Lxor_one_block__func1 je .Lxor_two_blocks__func1 .Lxor_three_blocks__func1: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpshufb %xmm0,%xmm13,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp .Lghash_mul_one_vec_unreduced__func1 .Lxor_two_blocks__func1: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp .Lghash_mul_one_vec_unreduced__func1 .Lxor_one_block__func1: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 .Lghash_mul_one_vec_unreduced__func1: vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm7,%ymm7 .Lreduce__func1: vbroadcasti128 .Lgfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 .Ldone__func1: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes_gcm_enc_update_vaes_avx2, . - aes_gcm_enc_update_vaes_avx2 .globl aes_gcm_dec_update_vaes_avx2 .hidden aes_gcm_dec_update_vaes_avx2 .type aes_gcm_dec_update_vaes_avx2,@function .align 32 aes_gcm_dec_update_vaes_avx2: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 movq 16(%rsp),%r12 vbroadcasti128 .Lbswap_mask(%rip),%ymm0 vmovdqu (%r12),%xmm1 vpshufb %xmm0,%xmm1,%xmm1 vbroadcasti128 (%r8),%ymm11 vpshufb %ymm0,%ymm11,%ymm11 movl 240(%rcx),%r10d leal -20(,%r10,4),%r10d leaq 96(%rcx,%r10,4),%r11 vbroadcasti128 (%rcx),%ymm9 vbroadcasti128 (%r11),%ymm10 vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11 cmpq $127,%rdx jbe .Lcrypt_loop_4x_done__func2 vmovdqu 128(%r9),%ymm7 vmovdqu 128+32(%r9),%ymm8 .align 16 .Lcrypt_loop_4x__func2: vmovdqu .Linc_2blocks(%rip),%ymm2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm14 vpaddd %ymm2,%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm15 vpaddd %ymm2,%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 vpxor %ymm9,%ymm14,%ymm14 vpxor %ymm9,%ymm15,%ymm15 cmpl $24,%r10d jl .Laes128__func2 je .Laes192__func2 vbroadcasti128 -208(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -192(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 .Laes192__func2: vbroadcasti128 -176(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -160(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 .Laes128__func2: prefetcht0 512(%rdi) prefetcht0 512+64(%rdi) vmovdqu 0(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 0(%r9),%ymm4 vpxor %ymm1,%ymm3,%ymm3 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6 vbroadcasti128 -144(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vbroadcasti128 -128(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 32(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 32(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -112(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 64(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vmovdqu 64(%r9),%ymm4 vbroadcasti128 -96(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -80(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vmovdqu 96(%rdi),%ymm3 vpshufb %ymm0,%ymm3,%ymm3 vbroadcasti128 -64(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vmovdqu 96(%r9),%ymm4 vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm5,%ymm5 vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vpunpckhqdq %ymm3,%ymm3,%ymm2 vpxor %ymm3,%ymm2,%ymm2 vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -48(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm1,%ymm6,%ymm6 vbroadcasti128 .Lgfpoly(%rip),%ymm4 vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm2,%ymm6,%ymm6 vbroadcasti128 -32(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm1,%ymm1 vpxor %ymm2,%ymm1,%ymm1 vbroadcasti128 -16(%r11),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 vaesenc %ymm2,%ymm14,%ymm14 vaesenc %ymm2,%ymm15,%ymm15 vextracti128 $1,%ymm1,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vpxor 0(%rdi),%ymm10,%ymm2 vpxor 32(%rdi),%ymm10,%ymm3 vpxor 64(%rdi),%ymm10,%ymm5 vpxor 96(%rdi),%ymm10,%ymm6 vaesenclast %ymm2,%ymm12,%ymm12 vaesenclast %ymm3,%ymm13,%ymm13 vaesenclast %ymm5,%ymm14,%ymm14 vaesenclast %ymm6,%ymm15,%ymm15 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vmovdqu %ymm14,64(%rsi) vmovdqu %ymm15,96(%rsi) subq $-128,%rdi subq $-128,%rsi addq $-128,%rdx cmpq $127,%rdx ja .Lcrypt_loop_4x__func2 .Lcrypt_loop_4x_done__func2: testq %rdx,%rdx jz .Ldone__func2 leaq 128(%r9),%r8 subq %rdx,%r8 vpxor %xmm5,%xmm5,%xmm5 vpxor %xmm6,%xmm6,%xmm6 vpxor %xmm7,%xmm7,%xmm7 cmpq $64,%rdx jb .Llessthan64bytes__func2 vpshufb %ymm0,%ymm11,%ymm12 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_1__func2: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_1__func2 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%ymm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %ymm3,%ymm13,%ymm13 vmovdqu %ymm12,0(%rsi) vmovdqu %ymm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %ymm0,%ymm3,%ymm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%ymm3 vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7 vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4 vpxor %ymm4,%ymm7,%ymm7 addq $64,%r8 addq $64,%rdi addq $64,%rsi subq $64,%rdx jz .Lreduce__func2 vpxor %xmm1,%xmm1,%xmm1 .Llessthan64bytes__func2: vpshufb %ymm0,%ymm11,%ymm12 vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11 vpshufb %ymm0,%ymm11,%ymm13 vpxor %ymm9,%ymm12,%ymm12 vpxor %ymm9,%ymm13,%ymm13 leaq 16(%rcx),%rax .Lvaesenc_loop_tail_2__func2: vbroadcasti128 (%rax),%ymm2 vaesenc %ymm2,%ymm12,%ymm12 vaesenc %ymm2,%ymm13,%ymm13 addq $16,%rax cmpq %rax,%r11 jne .Lvaesenc_loop_tail_2__func2 vaesenclast %ymm10,%ymm12,%ymm12 vaesenclast %ymm10,%ymm13,%ymm13 cmpq $32,%rdx jb .Lxor_one_block__func2 je .Lxor_two_blocks__func2 .Lxor_three_blocks__func2: vmovdqu 0(%rdi),%ymm2 vmovdqu 32(%rdi),%xmm3 vpxor %ymm2,%ymm12,%ymm12 vpxor %xmm3,%xmm13,%xmm13 vmovdqu %ymm12,0(%rsi) vmovdqu %xmm13,32(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpshufb %xmm0,%xmm3,%xmm13 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 vmovdqu 32(%r8),%xmm3 vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4 vpxor %ymm4,%ymm7,%ymm7 jmp .Lghash_mul_one_vec_unreduced__func2 .Lxor_two_blocks__func2: vmovdqu (%rdi),%ymm2 vpxor %ymm2,%ymm12,%ymm12 vmovdqu %ymm12,(%rsi) vpshufb %ymm0,%ymm2,%ymm12 vpxor %ymm1,%ymm12,%ymm12 vmovdqu (%r8),%ymm2 jmp .Lghash_mul_one_vec_unreduced__func2 .Lxor_one_block__func2: vmovdqu (%rdi),%xmm2 vpxor %xmm2,%xmm12,%xmm12 vmovdqu %xmm12,(%rsi) vpshufb %xmm0,%xmm2,%xmm12 vpxor %xmm1,%xmm12,%xmm12 vmovdqu (%r8),%xmm2 .Lghash_mul_one_vec_unreduced__func2: vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm5,%ymm5 vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm6,%ymm6 vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4 vpxor %ymm4,%ymm7,%ymm7 .Lreduce__func2: vbroadcasti128 .Lgfpoly(%rip),%ymm2 vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3 vpshufd $0x4e,%ymm5,%ymm5 vpxor %ymm5,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3 vpshufd $0x4e,%ymm6,%ymm6 vpxor %ymm6,%ymm7,%ymm7 vpxor %ymm3,%ymm7,%ymm7 vextracti128 $1,%ymm7,%xmm1 vpxor %xmm7,%xmm1,%xmm1 .Ldone__func2: vpshufb %xmm0,%xmm1,%xmm1 vmovdqu %xmm1,(%r12) vzeroupper popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes_gcm_dec_update_vaes_avx2, . - aes_gcm_dec_update_vaes_avx2 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-gcm-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 5 _aesni_ctr32_ghash_6x: vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp L$oop6x .p2align 5 L$oop6x: addl $100663296,%ebx jc L$handle_ctr32 vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 L$resume_ctr32: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movbeq 88(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 80(%r14),%r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movbeq 72(%r14),%r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movbeq 64(%r14),%r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movbeq 56(%r14),%r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movbeq 48(%r14),%r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movbeq 40(%r14),%r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movbeq 32(%r14),%r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movbeq 24(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 16(%r14),%r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movbeq 8(%r14),%r13 vaesenc %xmm1,%xmm13,%xmm13 movbeq 0(%r14),%r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $11,%r10d jb L$enc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 je L$enc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp L$enc_tail .p2align 5 L$handle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp L$resume_ctr32 .p2align 5 L$enc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi prefetcht0 512(%rdi) prefetcht0 576(%rdi) vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%rax subq $0x6,%rdx jc L$6x_done vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp L$oop6x L$6x_done: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 ret .globl _aesni_gcm_decrypt .private_extern _aesni_gcm_decrypt .p2align 5 _aesni_gcm_decrypt: _CET_ENDBR xorq %rax,%rax cmpq $0x60,%rdx jb L$gcm_dec_abort pushq %rbp movq %rsp,%rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 vzeroupper movq 16(%rbp),%r12 vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq L$bswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 vmovdqu (%r12),%xmm8 andq $-128,%rsp vmovdqu (%r11),%xmm0 leaq 128(%rcx),%rcx leaq 32(%r9),%r9 movl 240-128(%rcx),%r10d vpshufb %xmm0,%xmm8,%xmm8 andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc L$dec_no_key_aliasing cmpq $768,%r15 jnc L$dec_no_key_aliasing subq %r15,%rsp L$dec_no_key_aliasing: vmovdqu 80(%rdi),%xmm7 movq %rdi,%r14 vmovdqu 64(%rdi),%xmm4 leaq -192(%rdi,%rdx,1),%r15 vmovdqu 48(%rdi),%xmm5 shrq $4,%rdx xorq %rax,%rax vmovdqu 32(%rdi),%xmm6 vpshufb %xmm0,%xmm7,%xmm7 vmovdqu 16(%rdi),%xmm2 vpshufb %xmm0,%xmm4,%xmm4 vmovdqu (%rdi),%xmm3 vpshufb %xmm0,%xmm5,%xmm5 vmovdqu %xmm4,48(%rsp) vpshufb %xmm0,%xmm6,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm2,%xmm2 vmovdqu %xmm6,80(%rsp) vpshufb %xmm0,%xmm3,%xmm3 vmovdqu %xmm2,96(%rsp) vmovdqu %xmm3,112(%rsp) call _aesni_ctr32_ghash_6x movq 16(%rbp),%r12 vmovups %xmm9,-96(%rsi) vmovups %xmm10,-80(%rsi) vmovups %xmm11,-64(%rsi) vmovups %xmm12,-48(%rsi) vmovups %xmm13,-32(%rsi) vmovups %xmm14,-16(%rsi) vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp L$gcm_dec_abort: ret .p2align 5 _aesni_ctr32_6x: vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%r10),%r13 vmovups 16-128(%rcx),%xmm15 leaq 32-128(%rcx),%r12 vpxor %xmm4,%xmm1,%xmm9 addl $100663296,%ebx jc L$handle_ctr32_2 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddb %xmm2,%xmm11,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddb %xmm2,%xmm12,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp L$oop_ctr32 .p2align 4 L$oop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vmovups (%r12),%xmm15 leaq 16(%r12),%r12 decl %r13d jnz L$oop_ctr32 vmovdqu (%r12),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor 0(%rdi),%xmm3,%xmm4 vaesenc %xmm15,%xmm10,%xmm10 vpxor 16(%rdi),%xmm3,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 vpxor 32(%rdi),%xmm3,%xmm6 vaesenc %xmm15,%xmm12,%xmm12 vpxor 48(%rdi),%xmm3,%xmm8 vaesenc %xmm15,%xmm13,%xmm13 vpxor 64(%rdi),%xmm3,%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vpxor 80(%rdi),%xmm3,%xmm3 leaq 96(%rdi),%rdi vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm5,%xmm10,%xmm10 vaesenclast %xmm6,%xmm11,%xmm11 vaesenclast %xmm8,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vaesenclast %xmm3,%xmm14,%xmm14 vmovups %xmm9,0(%rsi) vmovups %xmm10,16(%rsi) vmovups %xmm11,32(%rsi) vmovups %xmm12,48(%rsi) vmovups %xmm13,64(%rsi) vmovups %xmm14,80(%rsi) leaq 96(%rsi),%rsi ret .p2align 5 L$handle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpshufb %xmm0,%xmm14,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp L$oop_ctr32 .globl _aesni_gcm_encrypt .private_extern _aesni_gcm_encrypt .p2align 5 _aesni_gcm_encrypt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+2(%rip) #endif xorq %rax,%rax cmpq $288,%rdx jb L$gcm_enc_abort pushq %rbp movq %rsp,%rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq L$bswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 leaq 128(%rcx),%rcx vmovdqu (%r11),%xmm0 andq $-128,%rsp movl 240-128(%rcx),%r10d andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc L$enc_no_key_aliasing cmpq $768,%r15 jnc L$enc_no_key_aliasing subq %r15,%rsp L$enc_no_key_aliasing: movq %rsi,%r14 leaq -192(%rsi,%rdx,1),%r15 shrq $4,%rdx call _aesni_ctr32_6x vpshufb %xmm0,%xmm9,%xmm8 vpshufb %xmm0,%xmm10,%xmm2 vmovdqu %xmm8,112(%rsp) vpshufb %xmm0,%xmm11,%xmm4 vmovdqu %xmm2,96(%rsp) vpshufb %xmm0,%xmm12,%xmm5 vmovdqu %xmm4,80(%rsp) vpshufb %xmm0,%xmm13,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm14,%xmm7 vmovdqu %xmm6,48(%rsp) call _aesni_ctr32_6x movq 16(%rbp),%r12 leaq 32(%r9),%r9 vmovdqu (%r12),%xmm8 subq $12,%rdx movq $192,%rax vpshufb %xmm0,%xmm8,%xmm8 call _aesni_ctr32_ghash_6x vmovdqu 32(%rsp),%xmm7 vmovdqu (%r11),%xmm0 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm7,%xmm7,%xmm1 vmovdqu 32-32(%r9),%xmm15 vmovups %xmm9,-96(%rsi) vpshufb %xmm0,%xmm9,%xmm9 vpxor %xmm7,%xmm1,%xmm1 vmovups %xmm10,-80(%rsi) vpshufb %xmm0,%xmm10,%xmm10 vmovups %xmm11,-64(%rsi) vpshufb %xmm0,%xmm11,%xmm11 vmovups %xmm12,-48(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vmovups %xmm13,-32(%rsi) vpshufb %xmm0,%xmm13,%xmm13 vmovups %xmm14,-16(%rsi) vpshufb %xmm0,%xmm14,%xmm14 vmovdqu %xmm9,16(%rsp) vmovdqu 48(%rsp),%xmm6 vmovdqu 16-32(%r9),%xmm0 vpunpckhqdq %xmm6,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 vpxor %xmm6,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vmovdqu 64(%rsp),%xmm9 vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm9,%xmm9,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 vpxor %xmm9,%xmm5,%xmm5 vpxor %xmm7,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vmovdqu 80(%rsp),%xmm1 vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm4,%xmm7,%xmm7 vpunpckhqdq %xmm1,%xmm1,%xmm4 vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm6,%xmm9,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 96(%rsp),%xmm2 vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm7,%xmm6,%xmm6 vpunpckhqdq %xmm2,%xmm2,%xmm7 vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm7,%xmm7 vpxor %xmm9,%xmm1,%xmm1 vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm5,%xmm4,%xmm4 vpxor 112(%rsp),%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 vmovdqu 112-32(%r9),%xmm0 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 vpxor %xmm4,%xmm7,%xmm4 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm1 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 vpxor %xmm14,%xmm1,%xmm1 vpxor %xmm5,%xmm6,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 vmovdqu 32-32(%r9),%xmm15 vpxor %xmm2,%xmm8,%xmm7 vpxor %xmm4,%xmm9,%xmm6 vmovdqu 16-32(%r9),%xmm0 vpxor %xmm5,%xmm7,%xmm9 vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 vpxor %xmm9,%xmm6,%xmm6 vpunpckhqdq %xmm13,%xmm13,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 vpxor %xmm13,%xmm2,%xmm2 vpslldq $8,%xmm6,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vpxor %xmm9,%xmm5,%xmm8 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm6,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm12,%xmm12,%xmm9 vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 vpxor %xmm12,%xmm9,%xmm9 vpxor %xmm14,%xmm13,%xmm13 vpalignr $8,%xmm8,%xmm8,%xmm14 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm11,%xmm11,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm13,%xmm12,%xmm12 vxorps 16(%rsp),%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm9,%xmm9 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm10,%xmm10,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 vpxor %xmm10,%xmm2,%xmm2 vpalignr $8,%xmm8,%xmm8,%xmm14 vpxor %xmm12,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm9,%xmm1,%xmm1 vxorps %xmm7,%xmm14,%xmm14 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 vmovdqu 112-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm11,%xmm10,%xmm10 vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 vpxor %xmm4,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 vpxor %xmm10,%xmm7,%xmm7 vpxor %xmm2,%xmm6,%xmm6 vpxor %xmm5,%xmm7,%xmm4 vpxor %xmm4,%xmm6,%xmm6 vpslldq $8,%xmm6,%xmm1 vmovdqu 16(%r11),%xmm3 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm1,%xmm5,%xmm8 vpxor %xmm6,%xmm7,%xmm7 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm2,%xmm8,%xmm8 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm7,%xmm2,%xmm2 vpxor %xmm2,%xmm8,%xmm8 movq 16(%rbp),%r12 vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp L$gcm_enc_abort: ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$poly: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 L$one_msb: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 L$two_lsb: .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 L$one_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-gcm-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: .cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x .align 32 .Loop6x: addl $100663296,%ebx jc .Lhandle_ctr32 vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 .Lresume_ctr32: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movbeq 88(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 80(%r14),%r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movbeq 72(%r14),%r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movbeq 64(%r14),%r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movbeq 56(%r14),%r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movbeq 48(%r14),%r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movbeq 40(%r14),%r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movbeq 32(%r14),%r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movbeq 24(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 16(%r14),%r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movbeq 8(%r14),%r13 vaesenc %xmm1,%xmm13,%xmm13 movbeq 0(%r14),%r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $11,%r10d jb .Lenc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 je .Lenc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail .align 32 .Lhandle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32 .align 32 .Lenc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi prefetcht0 512(%rdi) prefetcht0 576(%rdi) vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%rax subq $0x6,%rdx jc .L6x_done vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp .Loop6x .L6x_done: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 ret .cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .hidden aesni_gcm_decrypt .type aesni_gcm_decrypt,@function .align 32 aesni_gcm_decrypt: .cfi_startproc _CET_ENDBR xorq %rax,%rax cmpq $0x60,%rdx jb .Lgcm_dec_abort pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 movq %rsp,%rbp .cfi_def_cfa_register %rbp pushq %rbx .cfi_offset %rbx,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 vzeroupper movq 16(%rbp),%r12 vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 vmovdqu (%r12),%xmm8 andq $-128,%rsp vmovdqu (%r11),%xmm0 leaq 128(%rcx),%rcx leaq 32(%r9),%r9 movl 240-128(%rcx),%r10d vpshufb %xmm0,%xmm8,%xmm8 andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Ldec_no_key_aliasing cmpq $768,%r15 jnc .Ldec_no_key_aliasing subq %r15,%rsp .Ldec_no_key_aliasing: vmovdqu 80(%rdi),%xmm7 movq %rdi,%r14 vmovdqu 64(%rdi),%xmm4 leaq -192(%rdi,%rdx,1),%r15 vmovdqu 48(%rdi),%xmm5 shrq $4,%rdx xorq %rax,%rax vmovdqu 32(%rdi),%xmm6 vpshufb %xmm0,%xmm7,%xmm7 vmovdqu 16(%rdi),%xmm2 vpshufb %xmm0,%xmm4,%xmm4 vmovdqu (%rdi),%xmm3 vpshufb %xmm0,%xmm5,%xmm5 vmovdqu %xmm4,48(%rsp) vpshufb %xmm0,%xmm6,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm2,%xmm2 vmovdqu %xmm6,80(%rsp) vpshufb %xmm0,%xmm3,%xmm3 vmovdqu %xmm2,96(%rsp) vmovdqu %xmm3,112(%rsp) call _aesni_ctr32_ghash_6x movq 16(%rbp),%r12 vmovups %xmm9,-96(%rsi) vmovups %xmm10,-80(%rsi) vmovups %xmm11,-64(%rsi) vmovups %xmm12,-48(%rsi) vmovups %xmm13,-32(%rsi) vmovups %xmm14,-16(%rsi) vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp .cfi_def_cfa %rsp, 0x38 popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp .Lgcm_dec_abort: ret .cfi_endproc .size aesni_gcm_decrypt,.-aesni_gcm_decrypt .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: .cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%r10),%r13 vmovups 16-128(%rcx),%xmm15 leaq 32-128(%rcx),%r12 vpxor %xmm4,%xmm1,%xmm9 addl $100663296,%ebx jc .Lhandle_ctr32_2 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddb %xmm2,%xmm11,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddb %xmm2,%xmm12,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .align 16 .Loop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vmovups (%r12),%xmm15 leaq 16(%r12),%r12 decl %r13d jnz .Loop_ctr32 vmovdqu (%r12),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor 0(%rdi),%xmm3,%xmm4 vaesenc %xmm15,%xmm10,%xmm10 vpxor 16(%rdi),%xmm3,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 vpxor 32(%rdi),%xmm3,%xmm6 vaesenc %xmm15,%xmm12,%xmm12 vpxor 48(%rdi),%xmm3,%xmm8 vaesenc %xmm15,%xmm13,%xmm13 vpxor 64(%rdi),%xmm3,%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vpxor 80(%rdi),%xmm3,%xmm3 leaq 96(%rdi),%rdi vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm5,%xmm10,%xmm10 vaesenclast %xmm6,%xmm11,%xmm11 vaesenclast %xmm8,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vaesenclast %xmm3,%xmm14,%xmm14 vmovups %xmm9,0(%rsi) vmovups %xmm10,16(%rsi) vmovups %xmm11,32(%rsi) vmovups %xmm12,48(%rsi) vmovups %xmm13,64(%rsi) vmovups %xmm14,80(%rsi) leaq 96(%rsi),%rsi ret .align 32 .Lhandle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpshufb %xmm0,%xmm14,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt .hidden aesni_gcm_encrypt .type aesni_gcm_encrypt,@function .align 32 aesni_gcm_encrypt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+2(%rip) #endif xorq %rax,%rax cmpq $288,%rdx jb .Lgcm_enc_abort pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 movq %rsp,%rbp .cfi_def_cfa_register %rbp pushq %rbx .cfi_offset %rbx,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 leaq 128(%rcx),%rcx vmovdqu (%r11),%xmm0 andq $-128,%rsp movl 240-128(%rcx),%r10d andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Lenc_no_key_aliasing cmpq $768,%r15 jnc .Lenc_no_key_aliasing subq %r15,%rsp .Lenc_no_key_aliasing: movq %rsi,%r14 leaq -192(%rsi,%rdx,1),%r15 shrq $4,%rdx call _aesni_ctr32_6x vpshufb %xmm0,%xmm9,%xmm8 vpshufb %xmm0,%xmm10,%xmm2 vmovdqu %xmm8,112(%rsp) vpshufb %xmm0,%xmm11,%xmm4 vmovdqu %xmm2,96(%rsp) vpshufb %xmm0,%xmm12,%xmm5 vmovdqu %xmm4,80(%rsp) vpshufb %xmm0,%xmm13,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm14,%xmm7 vmovdqu %xmm6,48(%rsp) call _aesni_ctr32_6x movq 16(%rbp),%r12 leaq 32(%r9),%r9 vmovdqu (%r12),%xmm8 subq $12,%rdx movq $192,%rax vpshufb %xmm0,%xmm8,%xmm8 call _aesni_ctr32_ghash_6x vmovdqu 32(%rsp),%xmm7 vmovdqu (%r11),%xmm0 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm7,%xmm7,%xmm1 vmovdqu 32-32(%r9),%xmm15 vmovups %xmm9,-96(%rsi) vpshufb %xmm0,%xmm9,%xmm9 vpxor %xmm7,%xmm1,%xmm1 vmovups %xmm10,-80(%rsi) vpshufb %xmm0,%xmm10,%xmm10 vmovups %xmm11,-64(%rsi) vpshufb %xmm0,%xmm11,%xmm11 vmovups %xmm12,-48(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vmovups %xmm13,-32(%rsi) vpshufb %xmm0,%xmm13,%xmm13 vmovups %xmm14,-16(%rsi) vpshufb %xmm0,%xmm14,%xmm14 vmovdqu %xmm9,16(%rsp) vmovdqu 48(%rsp),%xmm6 vmovdqu 16-32(%r9),%xmm0 vpunpckhqdq %xmm6,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 vpxor %xmm6,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vmovdqu 64(%rsp),%xmm9 vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm9,%xmm9,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 vpxor %xmm9,%xmm5,%xmm5 vpxor %xmm7,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vmovdqu 80(%rsp),%xmm1 vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm4,%xmm7,%xmm7 vpunpckhqdq %xmm1,%xmm1,%xmm4 vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm6,%xmm9,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 96(%rsp),%xmm2 vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm7,%xmm6,%xmm6 vpunpckhqdq %xmm2,%xmm2,%xmm7 vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm7,%xmm7 vpxor %xmm9,%xmm1,%xmm1 vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm5,%xmm4,%xmm4 vpxor 112(%rsp),%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 vmovdqu 112-32(%r9),%xmm0 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 vpxor %xmm4,%xmm7,%xmm4 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm1 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 vpxor %xmm14,%xmm1,%xmm1 vpxor %xmm5,%xmm6,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 vmovdqu 32-32(%r9),%xmm15 vpxor %xmm2,%xmm8,%xmm7 vpxor %xmm4,%xmm9,%xmm6 vmovdqu 16-32(%r9),%xmm0 vpxor %xmm5,%xmm7,%xmm9 vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 vpxor %xmm9,%xmm6,%xmm6 vpunpckhqdq %xmm13,%xmm13,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 vpxor %xmm13,%xmm2,%xmm2 vpslldq $8,%xmm6,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vpxor %xmm9,%xmm5,%xmm8 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm6,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm12,%xmm12,%xmm9 vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 vpxor %xmm12,%xmm9,%xmm9 vpxor %xmm14,%xmm13,%xmm13 vpalignr $8,%xmm8,%xmm8,%xmm14 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm11,%xmm11,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm13,%xmm12,%xmm12 vxorps 16(%rsp),%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm9,%xmm9 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm10,%xmm10,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 vpxor %xmm10,%xmm2,%xmm2 vpalignr $8,%xmm8,%xmm8,%xmm14 vpxor %xmm12,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm9,%xmm1,%xmm1 vxorps %xmm7,%xmm14,%xmm14 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 vmovdqu 112-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm11,%xmm10,%xmm10 vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 vpxor %xmm4,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 vpxor %xmm10,%xmm7,%xmm7 vpxor %xmm2,%xmm6,%xmm6 vpxor %xmm5,%xmm7,%xmm4 vpxor %xmm4,%xmm6,%xmm6 vpslldq $8,%xmm6,%xmm1 vmovdqu 16(%r11),%xmm3 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm1,%xmm5,%xmm8 vpxor %xmm6,%xmm7,%xmm7 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm2,%xmm8,%xmm8 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm7,%xmm2,%xmm2 vpxor %xmm2,%xmm8,%xmm8 movq 16(%rbp),%r12 vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp .cfi_def_cfa %rsp, 0x38 popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp .Lgcm_enc_abort: ret .cfi_endproc .size aesni_gcm_encrypt,.-aesni_gcm_encrypt .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lpoly: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 .Lone_msb: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 .Ltwo_lsb: .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .Lone_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-x86-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .globl _aes_hw_encrypt .private_extern _aes_hw_encrypt .align 4 _aes_hw_encrypt: L_aes_hw_encrypt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L000pic_for_function_hit L000pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+1-L000pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L001enc1_loop_1: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L001enc1_loop_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .globl _aes_hw_decrypt .private_extern _aes_hw_decrypt .align 4 _aes_hw_decrypt: L_aes_hw_decrypt_begin: movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L002dec1_loop_2: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L002dec1_loop_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .private_extern __aesni_encrypt2 .align 4 __aesni_encrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L003enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L003enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .private_extern __aesni_decrypt2 .align 4 __aesni_decrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L004dec2_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%edx,%ecx,1),%xmm0 jnz L004dec2_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .private_extern __aesni_encrypt3 .align 4 __aesni_encrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L005enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%edx,%ecx,1),%xmm0 jnz L005enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .private_extern __aesni_decrypt3 .align 4 __aesni_decrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx L006dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%edx,%ecx,1),%xmm0 jnz L006dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .private_extern __aesni_encrypt4 .align 4 __aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx L007enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%edx,%ecx,1),%xmm0 jnz L007enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .private_extern __aesni_decrypt4 .align 4 __aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx L008dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%edx,%ecx,1),%xmm0 jnz L008dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .private_extern __aesni_encrypt6 .align 4 __aesni_encrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp L009_aesni_encrypt6_inner .align 4,0x90 L010enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 L009_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 L_aesni_encrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%edx,%ecx,1),%xmm0 jnz L010enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .private_extern __aesni_decrypt6 .align 4 __aesni_decrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp L011_aesni_decrypt6_inner .align 4,0x90 L012dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 L011_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 L_aesni_decrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%edx,%ecx,1),%xmm0 jnz L012dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .globl _aes_hw_ecb_encrypt .private_extern _aes_hw_ecb_encrypt .align 4 _aes_hw_ecb_encrypt: L_aes_hw_ecb_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax jz L013ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx jz L014ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb L015ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp L016ecb_enc_loop6_enter .align 4,0x90 L017ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi L016ecb_enc_loop6_enter: call __aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc L017ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz L013ecb_ret L015ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax jb L018ecb_enc_one movups 16(%esi),%xmm3 je L019ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax jb L020ecb_enc_three movups 48(%esi),%xmm5 je L021ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_encrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L013ecb_ret .align 4,0x90 L018ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L022enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L022enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) jmp L013ecb_ret .align 4,0x90 L019ecb_enc_two: call __aesni_encrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L013ecb_ret .align 4,0x90 L020ecb_enc_three: call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L013ecb_ret .align 4,0x90 L021ecb_enc_four: call __aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) jmp L013ecb_ret .align 4,0x90 L014ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb L023ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp L024ecb_dec_loop6_enter .align 4,0x90 L025ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi L024ecb_dec_loop6_enter: call __aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc L025ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz L013ecb_ret L023ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax jb L026ecb_dec_one movups 16(%esi),%xmm3 je L027ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax jb L028ecb_dec_three movups 48(%esi),%xmm5 je L029ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_decrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L013ecb_ret .align 4,0x90 L026ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L030dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L030dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) jmp L013ecb_ret .align 4,0x90 L027ecb_dec_two: call __aesni_decrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L013ecb_ret .align 4,0x90 L028ecb_dec_three: call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L013ecb_ret .align 4,0x90 L029ecb_dec_four: call __aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) L013ecb_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ccm64_encrypt_blocks .private_extern _aes_hw_ccm64_encrypt_blocks .align 4 _aes_hw_ccm64_encrypt_blocks: L_aes_hw_ccm64_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) shll $4,%ecx movl $16,%ebx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 leal 32(%edx,%ecx,1),%edx subl %ecx,%ebx .byte 102,15,56,0,253 L031ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 xorps %xmm0,%xmm3 movups 32(%ebp),%xmm0 L032ccm64_enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L032ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) .byte 102,15,56,0,213 leal 16(%edi),%edi jnz L031ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ccm64_decrypt_blocks .private_extern _aes_hw_ccm64_decrypt_blocks .align 4 _aes_hw_ccm64_decrypt_blocks: L_aes_hw_ccm64_decrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 movl %edx,%ebp movl %ecx,%ebx .byte 102,15,56,0,253 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L033enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L033enc1_loop_5 .byte 102,15,56,221,209 shll $4,%ebx movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi subl %ebx,%ecx leal 32(%ebp,%ebx,1),%edx movl %ecx,%ebx jmp L034ccm64_dec_outer .align 4,0x90 L034ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax jz L035ccm64_dec_break movups (%ebp),%xmm0 movl %ebx,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 movups 32(%ebp),%xmm0 L036ccm64_dec2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz L036ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi jmp L034ccm64_dec_outer .align 4,0x90 L035ccm64_dec_break: movl 240(%ebp),%ecx movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 L037enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L037enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .align 4 _aes_hw_ctr32_encrypt_blocks: L_aes_hw_ctr32_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L038pic_for_function_hit L038pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+0-L038pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $88,%esp andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax je L039ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $6,%ecx xorl %ebp,%ebp movl %ecx,16(%esp) movl %ecx,20(%esp) movl %ecx,24(%esp) movl %ebp,28(%esp) .byte 102,15,58,22,251,3 .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqa (%esp),%xmm2 .byte 102,15,58,34,195,0 leal 3(%ebx),%ebp .byte 102,15,58,34,205,0 incl %ebx .byte 102,15,58,34,195,1 incl %ebp .byte 102,15,58,34,205,1 incl %ebx .byte 102,15,58,34,195,2 incl %ebp .byte 102,15,58,34,205,2 movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 movdqu (%edx),%xmm6 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 pshufd $192,%xmm0,%xmm2 pshufd $128,%xmm0,%xmm3 cmpl $6,%eax jb L040ctr32_tail pxor %xmm6,%xmm7 shll $4,%ecx movl $16,%ebx movdqa %xmm7,32(%esp) movl %edx,%ebp subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx subl $6,%eax jmp L041ctr32_loop6 .align 4,0x90 L041ctr32_loop6: pshufd $64,%xmm0,%xmm4 movdqa 32(%esp),%xmm0 pshufd $192,%xmm1,%xmm5 pxor %xmm0,%xmm2 pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 pshufd $64,%xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 .byte 102,15,56,220,209 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 .byte 102,15,56,220,217 movups 32(%ebp),%xmm0 movl %ebx,%ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 movdqa 64(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 paddd 48(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 movups %xmm6,64(%edi) pshufd $192,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi pshufd $128,%xmm0,%xmm3 subl $6,%eax jnc L041ctr32_loop6 addl $6,%eax jz L042ctr32_ret movdqu (%ebp),%xmm7 movl %ebp,%edx pxor 32(%esp),%xmm7 movl 240(%ebp),%ecx L040ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax jb L043ctr32_one pshufd $64,%xmm0,%xmm4 por %xmm7,%xmm3 je L044ctr32_two pshufd $192,%xmm1,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax jb L045ctr32_three pshufd $128,%xmm1,%xmm6 por %xmm7,%xmm5 je L046ctr32_four por %xmm7,%xmm6 call __aesni_encrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups 48(%esi),%xmm0 xorps %xmm1,%xmm4 movups 64(%esi),%xmm1 xorps %xmm0,%xmm5 movups %xmm2,(%edi) xorps %xmm1,%xmm6 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp L042ctr32_ret .align 4,0x90 L039ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx L043ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L047enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L047enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) jmp L042ctr32_ret .align 4,0x90 L044ctr32_two: call __aesni_encrypt2 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp L042ctr32_ret .align 4,0x90 L045ctr32_three: call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 movups 32(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp L042ctr32_ret .align 4,0x90 L046ctr32_four: call __aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 movups 32(%esi),%xmm1 xorps %xmm6,%xmm2 movups 48(%esi),%xmm0 xorps %xmm7,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) L042ctr32_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movl 80(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_xts_encrypt .private_extern _aes_hw_xts_encrypt .align 4 _aes_hw_xts_encrypt: L_aes_hw_xts_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L048enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L048enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp movl 240(%edx),%ecx andl $-16,%esp movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax movl %edx,%ebp movl %ecx,%ebx subl $96,%eax jc L049xts_enc_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp L050xts_enc_loop6 .align 4,0x90 L050xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,220,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc L050xts_enc_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx L049xts_enc_short: addl $96,%eax jz L051xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb L052xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je L053xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb L054xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je L055xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call __aesni_encrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp L056xts_enc_done .align 4,0x90 L052xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L057enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L057enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp L056xts_enc_done .align 4,0x90 L053xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call __aesni_encrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp L056xts_enc_done .align 4,0x90 L054xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call __aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp L056xts_enc_done .align 4,0x90 L055xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call __aesni_encrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp L056xts_enc_done .align 4,0x90 L051xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax jz L058xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) jmp L059xts_enc_steal .align 4,0x90 L056xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz L058xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 L059xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi movb %cl,-16(%edi) movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax jnz L059xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups -16(%edi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L060enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L060enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) L058xts_enc_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_xts_decrypt .private_extern _aes_hw_xts_decrypt .align 4 _aes_hw_xts_decrypt: L_aes_hw_xts_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L061enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L061enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp andl $-16,%esp xorl %ebx,%ebx testl $15,%eax setnz %bl shll $4,%ebx subl %ebx,%eax movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movl 240(%edx),%ecx movl %edx,%ebp movl %ecx,%ebx movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax jc L062xts_dec_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp L063xts_dec_loop6 .align 4,0x90 L063xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,222,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 call L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc L063xts_dec_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx L062xts_dec_short: addl $96,%eax jz L064xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb L065xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je L066xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb L067xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je L068xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call __aesni_decrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp L069xts_dec_done .align 4,0x90 L065xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L070dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L070dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp L069xts_dec_done .align 4,0x90 L066xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call __aesni_decrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp L069xts_dec_done .align 4,0x90 L067xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call __aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp L069xts_dec_done .align 4,0x90 L068xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call __aesni_decrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp L069xts_dec_done .align 4,0x90 L064xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax jz L071xts_dec_ret movl %eax,112(%esp) jmp L072xts_dec_only_one_more .align 4,0x90 L069xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz L071xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 L072xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm5 pxor %xmm1,%xmm5 movl %ebp,%edx movl %ebx,%ecx movups (%esi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L073dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L073dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) L074xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi movb %cl,(%edi) movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax jnz L074xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups (%edi),%xmm2 xorps %xmm6,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L075dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L075dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) L071xts_dec_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_cbc_encrypt .private_extern _aes_hw_cbc_encrypt .align 4 _aes_hw_cbc_encrypt: L_aes_hw_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl %esp,%ebx movl 24(%esp),%edi subl $24,%ebx movl 28(%esp),%eax andl $-16,%ebx movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax jz L076cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 movl 240(%edx),%ecx movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx je L077cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax jb L078cbc_enc_tail subl $16,%eax jmp L079cbc_enc_loop .align 4,0x90 L079cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 L080enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L080enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax jnc L079cbc_enc_loop addl $16,%eax jnz L078cbc_enc_tail movaps %xmm2,%xmm7 pxor %xmm2,%xmm2 jmp L081cbc_ret L078cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx subl %eax,%ecx xorl %eax,%eax .long 2868115081 leal -16(%edi),%edi movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx jmp L079cbc_enc_loop .align 4,0x90 L077cbc_decrypt: cmpl $80,%eax jbe L082cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax jmp L083cbc_dec_loop6_enter .align 4,0x90 L084cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi L083cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 call __aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm1 xorps %xmm0,%xmm6 movups 80(%esi),%xmm0 xorps %xmm1,%xmm7 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 96(%esi),%esi movups %xmm4,32(%edi) movl %ebx,%ecx movups %xmm5,48(%edi) movl %ebp,%edx movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax ja L084cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax jle L085cbc_dec_clear_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi L082cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax jbe L086cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax jbe L087cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax jbe L088cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax jbe L089cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 xorps %xmm7,%xmm7 call __aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm7 xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) pxor %xmm3,%xmm3 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 movups %xmm5,48(%edi) pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 pxor %xmm6,%xmm6 subl $80,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L086cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 L091dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz L091dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L087cbc_dec_two: call __aesni_decrypt2 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L088cbc_dec_three: call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 pxor %xmm4,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L089cbc_dec_four: call __aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 xorps %xmm7,%xmm2 movups 48(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 pxor %xmm5,%xmm5 subl $64,%eax jmp L090cbc_dec_tail_collected .align 4,0x90 L085cbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 L090cbc_dec_tail_collected: andl $15,%eax jnz L092cbc_dec_tail_partial movups %xmm2,(%edi) pxor %xmm0,%xmm0 jmp L081cbc_ret .align 4,0x90 L092cbc_dec_tail_partial: movaps %xmm2,(%esp) pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 movdqa %xmm2,(%esp) L081cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp pxor %xmm2,%xmm2 pxor %xmm1,%xmm1 movups %xmm7,(%ebp) pxor %xmm7,%xmm7 L076cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .globl _aes_hw_set_encrypt_key_base .private_extern _aes_hw_set_encrypt_key_base .align 4 _aes_hw_set_encrypt_key_base: L_aes_hw_set_encrypt_key_base_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L093pic_for_function_hit L093pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+3-L093pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call L094pic L094pic: popl %ebx leal Lkey_const-L094pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je L09514rounds cmpl $192,%ecx je L09612rounds cmpl $128,%ecx jne L097bad_keybits .align 4,0x90 L09810rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 call L099key_128_cold .byte 102,15,58,223,200,2 call L100key_128 .byte 102,15,58,223,200,4 call L100key_128 .byte 102,15,58,223,200,8 call L100key_128 .byte 102,15,58,223,200,16 call L100key_128 .byte 102,15,58,223,200,32 call L100key_128 .byte 102,15,58,223,200,64 call L100key_128 .byte 102,15,58,223,200,128 call L100key_128 .byte 102,15,58,223,200,27 call L100key_128 .byte 102,15,58,223,200,54 call L100key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) jmp L101good_key .align 4,0x90 L100key_128: movups %xmm0,(%edx) leal 16(%edx),%edx L099key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 4,0x90 L09612rounds: movq 16(%eax),%xmm2 movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 call L102key_192a_cold .byte 102,15,58,223,202,2 call L103key_192b .byte 102,15,58,223,202,4 call L104key_192a .byte 102,15,58,223,202,8 call L103key_192b .byte 102,15,58,223,202,16 call L104key_192a .byte 102,15,58,223,202,32 call L103key_192b .byte 102,15,58,223,202,64 call L104key_192a .byte 102,15,58,223,202,128 call L103key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) jmp L101good_key .align 4,0x90 L104key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 4,0x90 L102key_192a_cold: movaps %xmm2,%xmm5 L105key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .align 4,0x90 L103key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx jmp L105key_192b_warm .align 4,0x90 L09514rounds: movups 16(%eax),%xmm2 leal 16(%edx),%edx movl $13,%ecx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 call L106key_256a_cold .byte 102,15,58,223,200,1 call L107key_256b .byte 102,15,58,223,202,2 call L108key_256a .byte 102,15,58,223,200,2 call L107key_256b .byte 102,15,58,223,202,4 call L108key_256a .byte 102,15,58,223,200,4 call L107key_256b .byte 102,15,58,223,202,8 call L108key_256a .byte 102,15,58,223,200,8 call L107key_256b .byte 102,15,58,223,202,16 call L108key_256a .byte 102,15,58,223,200,16 call L107key_256b .byte 102,15,58,223,202,32 call L108key_256a .byte 102,15,58,223,200,32 call L107key_256b .byte 102,15,58,223,202,64 call L108key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax jmp L101good_key .align 4,0x90 L108key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx L106key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 4,0x90 L107key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret L101good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 2,0x90 L097bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .globl _aes_hw_set_encrypt_key_alt .private_extern _aes_hw_set_encrypt_key_alt .align 4 _aes_hw_set_encrypt_key_alt: L_aes_hw_set_encrypt_key_alt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L109pic_for_function_hit L109pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+3-L109pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call L110pic L110pic: popl %ebx leal Lkey_const-L110pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je L11114rounds_alt cmpl $192,%ecx je L11212rounds_alt cmpl $128,%ecx jne L113bad_keybits .align 4,0x90 L11410rounds_alt: movdqa (%ebx),%xmm5 movl $8,%ecx movdqa 32(%ebx),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,-16(%edx) L115loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leal 16(%edx),%edx movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%edx) movdqa %xmm0,%xmm2 decl %ecx jnz L115loop_key128 movdqa 48(%ebx),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%edx) movl $9,%ecx movl %ecx,96(%edx) jmp L116good_key .align 4,0x90 L11212rounds_alt: movq 16(%eax),%xmm2 movdqa 16(%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $8,%ecx movdqu %xmm0,-16(%edx) L117loop_key192: movq %xmm2,(%edx) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leal 24(%edx),%edx movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%edx) decl %ecx jnz L117loop_key192 movl $11,%ecx movl %ecx,32(%edx) jmp L116good_key .align 4,0x90 L11114rounds_alt: movups 16(%eax),%xmm2 leal 16(%edx),%edx movdqa (%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $7,%ecx movdqu %xmm0,-32(%edx) movdqa %xmm2,%xmm1 movdqu %xmm2,-16(%edx) L118loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) decl %ecx jz L119done_key256 pshufd $255,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%edx) leal 32(%edx),%edx movdqa %xmm2,%xmm1 jmp L118loop_key256 L119done_key256: movl $13,%ecx movl %ecx,16(%edx) L116good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 2,0x90 L113bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .globl _aes_hw_encrypt_key_to_decrypt_key .private_extern _aes_hw_encrypt_key_to_decrypt_key .align 4 _aes_hw_encrypt_key_to_decrypt_key: L_aes_hw_encrypt_key_to_decrypt_key_begin: movl 4(%esp),%edx movl 240(%edx),%ecx shll $4,%ecx leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 movups %xmm0,(%eax) movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax L120dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leal 16(%edx),%edx leal -16(%eax),%eax movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax ja L120dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 ret .align 6,0x90 Lkey_const: .long 202313229,202313229,202313229,202313229 .long 67569157,67569157,67569157,67569157 .long 1,1,1,1 .long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-x86-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .globl aes_hw_encrypt .hidden aes_hw_encrypt .type aes_hw_encrypt,@function .align 16 aes_hw_encrypt: .L_aes_hw_encrypt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L000pic_for_function_hit .L000pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+1-.L000pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L001enc1_loop_1: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L001enc1_loop_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .size aes_hw_encrypt,.-.L_aes_hw_encrypt_begin .globl aes_hw_decrypt .hidden aes_hw_decrypt .type aes_hw_decrypt,@function .align 16 aes_hw_decrypt: .L_aes_hw_decrypt_begin: movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L002dec1_loop_2: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L002dec1_loop_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .size aes_hw_decrypt,.-.L_aes_hw_decrypt_begin .hidden _aesni_encrypt2 .type _aesni_encrypt2,@function .align 16 _aesni_encrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L003enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L003enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .size _aesni_encrypt2,.-_aesni_encrypt2 .hidden _aesni_decrypt2 .type _aesni_decrypt2,@function .align 16 _aesni_decrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L004dec2_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L004dec2_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .size _aesni_decrypt2,.-_aesni_decrypt2 .hidden _aesni_encrypt3 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L005enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%edx,%ecx,1),%xmm0 jnz .L005enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .size _aesni_encrypt3,.-_aesni_encrypt3 .hidden _aesni_decrypt3 .type _aesni_decrypt3,@function .align 16 _aesni_decrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L006dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%edx,%ecx,1),%xmm0 jnz .L006dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .size _aesni_decrypt3,.-_aesni_decrypt3 .hidden _aesni_encrypt4 .type _aesni_encrypt4,@function .align 16 _aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx .L007enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%edx,%ecx,1),%xmm0 jnz .L007enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .size _aesni_encrypt4,.-_aesni_encrypt4 .hidden _aesni_decrypt4 .type _aesni_decrypt4,@function .align 16 _aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx .L008dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%edx,%ecx,1),%xmm0 jnz .L008dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .size _aesni_decrypt4,.-_aesni_decrypt4 .hidden _aesni_encrypt6 .type _aesni_encrypt6,@function .align 16 _aesni_encrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp .L009_aesni_encrypt6_inner .align 16 .L010enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .L009_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .L_aesni_encrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%edx,%ecx,1),%xmm0 jnz .L010enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .size _aesni_encrypt6,.-_aesni_encrypt6 .hidden _aesni_decrypt6 .type _aesni_decrypt6,@function .align 16 _aesni_decrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp .L011_aesni_decrypt6_inner .align 16 .L012dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .L011_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .L_aesni_decrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%edx,%ecx,1),%xmm0 jnz .L012dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .size _aesni_decrypt6,.-_aesni_decrypt6 .globl aes_hw_ecb_encrypt .hidden aes_hw_ecb_encrypt .type aes_hw_ecb_encrypt,@function .align 16 aes_hw_ecb_encrypt: .L_aes_hw_ecb_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax jz .L013ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx jz .L014ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb .L015ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp .L016ecb_enc_loop6_enter .align 16 .L017ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi .L016ecb_enc_loop6_enter: call _aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc .L017ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz .L013ecb_ret .L015ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax jb .L018ecb_enc_one movups 16(%esi),%xmm3 je .L019ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax jb .L020ecb_enc_three movups 48(%esi),%xmm5 je .L021ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_encrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L013ecb_ret .align 16 .L018ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L022enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L022enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) jmp .L013ecb_ret .align 16 .L019ecb_enc_two: call _aesni_encrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L013ecb_ret .align 16 .L020ecb_enc_three: call _aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L013ecb_ret .align 16 .L021ecb_enc_four: call _aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) jmp .L013ecb_ret .align 16 .L014ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb .L023ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp .L024ecb_dec_loop6_enter .align 16 .L025ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi .L024ecb_dec_loop6_enter: call _aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc .L025ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz .L013ecb_ret .L023ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax jb .L026ecb_dec_one movups 16(%esi),%xmm3 je .L027ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax jb .L028ecb_dec_three movups 48(%esi),%xmm5 je .L029ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_decrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L013ecb_ret .align 16 .L026ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L030dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L030dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) jmp .L013ecb_ret .align 16 .L027ecb_dec_two: call _aesni_decrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L013ecb_ret .align 16 .L028ecb_dec_three: call _aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L013ecb_ret .align 16 .L029ecb_dec_four: call _aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) .L013ecb_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ecb_encrypt,.-.L_aes_hw_ecb_encrypt_begin .globl aes_hw_ccm64_encrypt_blocks .hidden aes_hw_ccm64_encrypt_blocks .type aes_hw_ccm64_encrypt_blocks,@function .align 16 aes_hw_ccm64_encrypt_blocks: .L_aes_hw_ccm64_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) shll $4,%ecx movl $16,%ebx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 leal 32(%edx,%ecx,1),%edx subl %ecx,%ebx .byte 102,15,56,0,253 .L031ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 xorps %xmm0,%xmm3 movups 32(%ebp),%xmm0 .L032ccm64_enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L032ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) .byte 102,15,56,0,213 leal 16(%edi),%edi jnz .L031ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ccm64_encrypt_blocks,.-.L_aes_hw_ccm64_encrypt_blocks_begin .globl aes_hw_ccm64_decrypt_blocks .hidden aes_hw_ccm64_decrypt_blocks .type aes_hw_ccm64_decrypt_blocks,@function .align 16 aes_hw_ccm64_decrypt_blocks: .L_aes_hw_ccm64_decrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 movl %edx,%ebp movl %ecx,%ebx .byte 102,15,56,0,253 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L033enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L033enc1_loop_5 .byte 102,15,56,221,209 shll $4,%ebx movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi subl %ebx,%ecx leal 32(%ebp,%ebx,1),%edx movl %ecx,%ebx jmp .L034ccm64_dec_outer .align 16 .L034ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax jz .L035ccm64_dec_break movups (%ebp),%xmm0 movl %ebx,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 movups 32(%ebp),%xmm0 .L036ccm64_dec2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L036ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi jmp .L034ccm64_dec_outer .align 16 .L035ccm64_dec_break: movl 240(%ebp),%ecx movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 .L037enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L037enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ccm64_decrypt_blocks,.-.L_aes_hw_ccm64_decrypt_blocks_begin .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,@function .align 16 aes_hw_ctr32_encrypt_blocks: .L_aes_hw_ctr32_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L038pic_for_function_hit .L038pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+0-.L038pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $88,%esp andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax je .L039ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $6,%ecx xorl %ebp,%ebp movl %ecx,16(%esp) movl %ecx,20(%esp) movl %ecx,24(%esp) movl %ebp,28(%esp) .byte 102,15,58,22,251,3 .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqa (%esp),%xmm2 .byte 102,15,58,34,195,0 leal 3(%ebx),%ebp .byte 102,15,58,34,205,0 incl %ebx .byte 102,15,58,34,195,1 incl %ebp .byte 102,15,58,34,205,1 incl %ebx .byte 102,15,58,34,195,2 incl %ebp .byte 102,15,58,34,205,2 movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 movdqu (%edx),%xmm6 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 pshufd $192,%xmm0,%xmm2 pshufd $128,%xmm0,%xmm3 cmpl $6,%eax jb .L040ctr32_tail pxor %xmm6,%xmm7 shll $4,%ecx movl $16,%ebx movdqa %xmm7,32(%esp) movl %edx,%ebp subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx subl $6,%eax jmp .L041ctr32_loop6 .align 16 .L041ctr32_loop6: pshufd $64,%xmm0,%xmm4 movdqa 32(%esp),%xmm0 pshufd $192,%xmm1,%xmm5 pxor %xmm0,%xmm2 pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 pshufd $64,%xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 .byte 102,15,56,220,209 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 .byte 102,15,56,220,217 movups 32(%ebp),%xmm0 movl %ebx,%ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 movdqa 64(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 paddd 48(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 movups %xmm6,64(%edi) pshufd $192,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi pshufd $128,%xmm0,%xmm3 subl $6,%eax jnc .L041ctr32_loop6 addl $6,%eax jz .L042ctr32_ret movdqu (%ebp),%xmm7 movl %ebp,%edx pxor 32(%esp),%xmm7 movl 240(%ebp),%ecx .L040ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax jb .L043ctr32_one pshufd $64,%xmm0,%xmm4 por %xmm7,%xmm3 je .L044ctr32_two pshufd $192,%xmm1,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax jb .L045ctr32_three pshufd $128,%xmm1,%xmm6 por %xmm7,%xmm5 je .L046ctr32_four por %xmm7,%xmm6 call _aesni_encrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups 48(%esi),%xmm0 xorps %xmm1,%xmm4 movups 64(%esi),%xmm1 xorps %xmm0,%xmm5 movups %xmm2,(%edi) xorps %xmm1,%xmm6 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L042ctr32_ret .align 16 .L039ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx .L043ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L047enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L047enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) jmp .L042ctr32_ret .align 16 .L044ctr32_two: call _aesni_encrypt2 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L042ctr32_ret .align 16 .L045ctr32_three: call _aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 movups 32(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L042ctr32_ret .align 16 .L046ctr32_four: call _aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 movups 32(%esi),%xmm1 xorps %xmm6,%xmm2 movups 48(%esi),%xmm0 xorps %xmm7,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) .L042ctr32_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movl 80(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin .globl aes_hw_xts_encrypt .hidden aes_hw_xts_encrypt .type aes_hw_xts_encrypt,@function .align 16 aes_hw_xts_encrypt: .L_aes_hw_xts_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L048enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L048enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp movl 240(%edx),%ecx andl $-16,%esp movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax movl %edx,%ebp movl %ecx,%ebx subl $96,%eax jc .L049xts_enc_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp .L050xts_enc_loop6 .align 16 .L050xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,220,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc .L050xts_enc_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx .L049xts_enc_short: addl $96,%eax jz .L051xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb .L052xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je .L053xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb .L054xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je .L055xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call _aesni_encrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp .L056xts_enc_done .align 16 .L052xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L057enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L057enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp .L056xts_enc_done .align 16 .L053xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call _aesni_encrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp .L056xts_enc_done .align 16 .L054xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call _aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp .L056xts_enc_done .align 16 .L055xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call _aesni_encrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp .L056xts_enc_done .align 16 .L051xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax jz .L058xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) jmp .L059xts_enc_steal .align 16 .L056xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz .L058xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 .L059xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi movb %cl,-16(%edi) movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax jnz .L059xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups -16(%edi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L060enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L060enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) .L058xts_enc_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_xts_encrypt,.-.L_aes_hw_xts_encrypt_begin .globl aes_hw_xts_decrypt .hidden aes_hw_xts_decrypt .type aes_hw_xts_decrypt,@function .align 16 aes_hw_xts_decrypt: .L_aes_hw_xts_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L061enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L061enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp andl $-16,%esp xorl %ebx,%ebx testl $15,%eax setnz %bl shll $4,%ebx subl %ebx,%eax movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movl 240(%edx),%ecx movl %edx,%ebp movl %ecx,%ebx movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax jc .L062xts_dec_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp .L063xts_dec_loop6 .align 16 .L063xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,222,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 call .L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc .L063xts_dec_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx .L062xts_dec_short: addl $96,%eax jz .L064xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb .L065xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je .L066xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb .L067xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je .L068xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call _aesni_decrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp .L069xts_dec_done .align 16 .L065xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L070dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L070dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp .L069xts_dec_done .align 16 .L066xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call _aesni_decrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp .L069xts_dec_done .align 16 .L067xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call _aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp .L069xts_dec_done .align 16 .L068xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call _aesni_decrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp .L069xts_dec_done .align 16 .L064xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax jz .L071xts_dec_ret movl %eax,112(%esp) jmp .L072xts_dec_only_one_more .align 16 .L069xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz .L071xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 .L072xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm5 pxor %xmm1,%xmm5 movl %ebp,%edx movl %ebx,%ecx movups (%esi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L073dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L073dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) .L074xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi movb %cl,(%edi) movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax jnz .L074xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups (%edi),%xmm2 xorps %xmm6,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L075dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L075dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) .L071xts_dec_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_xts_decrypt,.-.L_aes_hw_xts_decrypt_begin .globl aes_hw_cbc_encrypt .hidden aes_hw_cbc_encrypt .type aes_hw_cbc_encrypt,@function .align 16 aes_hw_cbc_encrypt: .L_aes_hw_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl %esp,%ebx movl 24(%esp),%edi subl $24,%ebx movl 28(%esp),%eax andl $-16,%ebx movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax jz .L076cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 movl 240(%edx),%ecx movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx je .L077cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax jb .L078cbc_enc_tail subl $16,%eax jmp .L079cbc_enc_loop .align 16 .L079cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 .L080enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L080enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax jnc .L079cbc_enc_loop addl $16,%eax jnz .L078cbc_enc_tail movaps %xmm2,%xmm7 pxor %xmm2,%xmm2 jmp .L081cbc_ret .L078cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx subl %eax,%ecx xorl %eax,%eax .long 2868115081 leal -16(%edi),%edi movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx jmp .L079cbc_enc_loop .align 16 .L077cbc_decrypt: cmpl $80,%eax jbe .L082cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax jmp .L083cbc_dec_loop6_enter .align 16 .L084cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi .L083cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 call _aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm1 xorps %xmm0,%xmm6 movups 80(%esi),%xmm0 xorps %xmm1,%xmm7 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 96(%esi),%esi movups %xmm4,32(%edi) movl %ebx,%ecx movups %xmm5,48(%edi) movl %ebp,%edx movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax ja .L084cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax jle .L085cbc_dec_clear_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi .L082cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax jbe .L086cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax jbe .L087cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax jbe .L088cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax jbe .L089cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 xorps %xmm7,%xmm7 call _aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm7 xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) pxor %xmm3,%xmm3 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 movups %xmm5,48(%edi) pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 pxor %xmm6,%xmm6 subl $80,%eax jmp .L090cbc_dec_tail_collected .align 16 .L086cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L091dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L091dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax jmp .L090cbc_dec_tail_collected .align 16 .L087cbc_dec_two: call _aesni_decrypt2 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax jmp .L090cbc_dec_tail_collected .align 16 .L088cbc_dec_three: call _aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 pxor %xmm4,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax jmp .L090cbc_dec_tail_collected .align 16 .L089cbc_dec_four: call _aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 xorps %xmm7,%xmm2 movups 48(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 pxor %xmm5,%xmm5 subl $64,%eax jmp .L090cbc_dec_tail_collected .align 16 .L085cbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 .L090cbc_dec_tail_collected: andl $15,%eax jnz .L092cbc_dec_tail_partial movups %xmm2,(%edi) pxor %xmm0,%xmm0 jmp .L081cbc_ret .align 16 .L092cbc_dec_tail_partial: movaps %xmm2,(%esp) pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 movdqa %xmm2,(%esp) .L081cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp pxor %xmm2,%xmm2 pxor %xmm1,%xmm1 movups %xmm7,(%ebp) pxor %xmm7,%xmm7 .L076cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_cbc_encrypt,.-.L_aes_hw_cbc_encrypt_begin .globl aes_hw_set_encrypt_key_base .hidden aes_hw_set_encrypt_key_base .type aes_hw_set_encrypt_key_base,@function .align 16 aes_hw_set_encrypt_key_base: .L_aes_hw_set_encrypt_key_base_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L093pic_for_function_hit .L093pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+3-.L093pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call .L094pic .L094pic: popl %ebx leal .Lkey_const-.L094pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je .L09514rounds cmpl $192,%ecx je .L09612rounds cmpl $128,%ecx jne .L097bad_keybits .align 16 .L09810rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 call .L099key_128_cold .byte 102,15,58,223,200,2 call .L100key_128 .byte 102,15,58,223,200,4 call .L100key_128 .byte 102,15,58,223,200,8 call .L100key_128 .byte 102,15,58,223,200,16 call .L100key_128 .byte 102,15,58,223,200,32 call .L100key_128 .byte 102,15,58,223,200,64 call .L100key_128 .byte 102,15,58,223,200,128 call .L100key_128 .byte 102,15,58,223,200,27 call .L100key_128 .byte 102,15,58,223,200,54 call .L100key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) jmp .L101good_key .align 16 .L100key_128: movups %xmm0,(%edx) leal 16(%edx),%edx .L099key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L09612rounds: movq 16(%eax),%xmm2 movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 call .L102key_192a_cold .byte 102,15,58,223,202,2 call .L103key_192b .byte 102,15,58,223,202,4 call .L104key_192a .byte 102,15,58,223,202,8 call .L103key_192b .byte 102,15,58,223,202,16 call .L104key_192a .byte 102,15,58,223,202,32 call .L103key_192b .byte 102,15,58,223,202,64 call .L104key_192a .byte 102,15,58,223,202,128 call .L103key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) jmp .L101good_key .align 16 .L104key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 16 .L102key_192a_cold: movaps %xmm2,%xmm5 .L105key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .align 16 .L103key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx jmp .L105key_192b_warm .align 16 .L09514rounds: movups 16(%eax),%xmm2 leal 16(%edx),%edx movl $13,%ecx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 call .L106key_256a_cold .byte 102,15,58,223,200,1 call .L107key_256b .byte 102,15,58,223,202,2 call .L108key_256a .byte 102,15,58,223,200,2 call .L107key_256b .byte 102,15,58,223,202,4 call .L108key_256a .byte 102,15,58,223,200,4 call .L107key_256b .byte 102,15,58,223,202,8 call .L108key_256a .byte 102,15,58,223,200,8 call .L107key_256b .byte 102,15,58,223,202,16 call .L108key_256a .byte 102,15,58,223,200,16 call .L107key_256b .byte 102,15,58,223,202,32 call .L108key_256a .byte 102,15,58,223,200,32 call .L107key_256b .byte 102,15,58,223,202,64 call .L108key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax jmp .L101good_key .align 16 .L108key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx .L106key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L107key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .L101good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 4 .L097bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .size aes_hw_set_encrypt_key_base,.-.L_aes_hw_set_encrypt_key_base_begin .globl aes_hw_set_encrypt_key_alt .hidden aes_hw_set_encrypt_key_alt .type aes_hw_set_encrypt_key_alt,@function .align 16 aes_hw_set_encrypt_key_alt: .L_aes_hw_set_encrypt_key_alt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L109pic_for_function_hit .L109pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+3-.L109pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call .L110pic .L110pic: popl %ebx leal .Lkey_const-.L110pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je .L11114rounds_alt cmpl $192,%ecx je .L11212rounds_alt cmpl $128,%ecx jne .L113bad_keybits .align 16 .L11410rounds_alt: movdqa (%ebx),%xmm5 movl $8,%ecx movdqa 32(%ebx),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,-16(%edx) .L115loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leal 16(%edx),%edx movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%edx) movdqa %xmm0,%xmm2 decl %ecx jnz .L115loop_key128 movdqa 48(%ebx),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%edx) movl $9,%ecx movl %ecx,96(%edx) jmp .L116good_key .align 16 .L11212rounds_alt: movq 16(%eax),%xmm2 movdqa 16(%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $8,%ecx movdqu %xmm0,-16(%edx) .L117loop_key192: movq %xmm2,(%edx) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leal 24(%edx),%edx movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%edx) decl %ecx jnz .L117loop_key192 movl $11,%ecx movl %ecx,32(%edx) jmp .L116good_key .align 16 .L11114rounds_alt: movups 16(%eax),%xmm2 leal 16(%edx),%edx movdqa (%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $7,%ecx movdqu %xmm0,-32(%edx) movdqa %xmm2,%xmm1 movdqu %xmm2,-16(%edx) .L118loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) decl %ecx jz .L119done_key256 pshufd $255,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%edx) leal 32(%edx),%edx movdqa %xmm2,%xmm1 jmp .L118loop_key256 .L119done_key256: movl $13,%ecx movl %ecx,16(%edx) .L116good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 4 .L113bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .size aes_hw_set_encrypt_key_alt,.-.L_aes_hw_set_encrypt_key_alt_begin .globl aes_hw_encrypt_key_to_decrypt_key .hidden aes_hw_encrypt_key_to_decrypt_key .type aes_hw_encrypt_key_to_decrypt_key,@function .align 16 aes_hw_encrypt_key_to_decrypt_key: .L_aes_hw_encrypt_key_to_decrypt_key_begin: movl 4(%esp),%edx movl 240(%edx),%ecx shll $4,%ecx leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 movups %xmm0,(%eax) movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax .L120dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leal 16(%edx),%edx leal -16(%eax),%eax movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax ja .L120dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 ret .size aes_hw_encrypt_key_to_decrypt_key,.-.L_aes_hw_encrypt_key_to_decrypt_key_begin .align 64 .Lkey_const: .long 202313229,202313229,202313229,202313229 .long 67569157,67569157,67569157,67569157 .long 1,1,1,1 .long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _aes_hw_encrypt .private_extern _aes_hw_encrypt .p2align 4 _aes_hw_encrypt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+1(%rip) #endif movups (%rdi),%xmm2 movl 240(%rdx),%eax movups (%rdx),%xmm0 movups 16(%rdx),%xmm1 leaq 32(%rdx),%rdx xorps %xmm0,%xmm2 L$oop_enc1_1: .byte 102,15,56,220,209 decl %eax movups (%rdx),%xmm1 leaq 16(%rdx),%rdx jnz L$oop_enc1_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 ret .globl _aes_hw_decrypt .private_extern _aes_hw_decrypt .p2align 4 _aes_hw_decrypt: _CET_ENDBR movups (%rdi),%xmm2 movl 240(%rdx),%eax movups (%rdx),%xmm0 movups 16(%rdx),%xmm1 leaq 32(%rdx),%rdx xorps %xmm0,%xmm2 L$oop_dec1_2: .byte 102,15,56,222,209 decl %eax movups (%rdx),%xmm1 leaq 16(%rdx),%rdx jnz L$oop_dec1_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 ret .p2align 4 _aesni_encrypt2: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$enc_loop2: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop2 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .p2align 4 _aesni_decrypt2: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$dec_loop2: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%rcx,%rax,1),%xmm0 jnz L$dec_loop2 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .p2align 4 _aesni_encrypt3: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$enc_loop3: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop3 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .p2align 4 _aesni_decrypt3: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$dec_loop3: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%rcx,%rax,1),%xmm0 jnz L$dec_loop3 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .p2align 4 _aesni_encrypt4: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax L$enc_loop4: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop4 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .p2align 4 _aesni_decrypt4: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax L$dec_loop4: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%rcx,%rax,1),%xmm0 jnz L$dec_loop4 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .p2align 4 _aesni_encrypt6: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$enc_loop6_enter .p2align 4 L$enc_loop6: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 L$enc_loop6_enter: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop6 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .p2align 4 _aesni_decrypt6: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,222,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$dec_loop6_enter .p2align 4 L$dec_loop6: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 L$dec_loop6_enter: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%rcx,%rax,1),%xmm0 jnz L$dec_loop6 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .p2align 4 _aesni_encrypt8: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,220,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$enc_loop8_inner .p2align 4 L$enc_loop8: .byte 102,15,56,220,209 .byte 102,15,56,220,217 L$enc_loop8_inner: .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 L$enc_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop8 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 .byte 102,68,15,56,221,192 .byte 102,68,15,56,221,200 ret .p2align 4 _aesni_decrypt8: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,222,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,222,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$dec_loop8_inner .p2align 4 L$dec_loop8: .byte 102,15,56,222,209 .byte 102,15,56,222,217 L$dec_loop8_inner: .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 L$dec_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups -16(%rcx,%rax,1),%xmm0 jnz L$dec_loop8 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 .byte 102,68,15,56,223,192 .byte 102,68,15,56,223,200 ret .globl _aes_hw_ecb_encrypt .private_extern _aes_hw_ecb_encrypt .p2align 4 _aes_hw_ecb_encrypt: _CET_ENDBR andq $-16,%rdx jz L$ecb_ret movl 240(%rcx),%eax movups (%rcx),%xmm0 movq %rcx,%r11 movl %eax,%r10d testl %r8d,%r8d jz L$ecb_decrypt cmpq $0x80,%rdx jb L$ecb_enc_tail movdqu (%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqu 32(%rdi),%xmm4 movdqu 48(%rdi),%xmm5 movdqu 64(%rdi),%xmm6 movdqu 80(%rdi),%xmm7 movdqu 96(%rdi),%xmm8 movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi subq $0x80,%rdx jmp L$ecb_enc_loop8_enter .p2align 4 L$ecb_enc_loop8: movups %xmm2,(%rsi) movq %r11,%rcx movdqu (%rdi),%xmm2 movl %r10d,%eax movups %xmm3,16(%rsi) movdqu 16(%rdi),%xmm3 movups %xmm4,32(%rsi) movdqu 32(%rdi),%xmm4 movups %xmm5,48(%rsi) movdqu 48(%rdi),%xmm5 movups %xmm6,64(%rsi) movdqu 64(%rdi),%xmm6 movups %xmm7,80(%rsi) movdqu 80(%rdi),%xmm7 movups %xmm8,96(%rsi) movdqu 96(%rdi),%xmm8 movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi L$ecb_enc_loop8_enter: call _aesni_encrypt8 subq $0x80,%rdx jnc L$ecb_enc_loop8 movups %xmm2,(%rsi) movq %r11,%rcx movups %xmm3,16(%rsi) movl %r10d,%eax movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi addq $0x80,%rdx jz L$ecb_ret L$ecb_enc_tail: movups (%rdi),%xmm2 cmpq $0x20,%rdx jb L$ecb_enc_one movups 16(%rdi),%xmm3 je L$ecb_enc_two movups 32(%rdi),%xmm4 cmpq $0x40,%rdx jb L$ecb_enc_three movups 48(%rdi),%xmm5 je L$ecb_enc_four movups 64(%rdi),%xmm6 cmpq $0x60,%rdx jb L$ecb_enc_five movups 80(%rdi),%xmm7 je L$ecb_enc_six movdqu 96(%rdi),%xmm8 xorps %xmm9,%xmm9 call _aesni_encrypt8 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) movups %xmm8,96(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_one: movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_enc1_3: .byte 102,15,56,220,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_enc1_3 .byte 102,15,56,221,209 movups %xmm2,(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_two: call _aesni_encrypt2 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_three: call _aesni_encrypt3 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_four: call _aesni_encrypt4 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_five: xorps %xmm7,%xmm7 call _aesni_encrypt6 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_enc_six: call _aesni_encrypt6 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) jmp L$ecb_ret .p2align 4 L$ecb_decrypt: cmpq $0x80,%rdx jb L$ecb_dec_tail movdqu (%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqu 32(%rdi),%xmm4 movdqu 48(%rdi),%xmm5 movdqu 64(%rdi),%xmm6 movdqu 80(%rdi),%xmm7 movdqu 96(%rdi),%xmm8 movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi subq $0x80,%rdx jmp L$ecb_dec_loop8_enter .p2align 4 L$ecb_dec_loop8: movups %xmm2,(%rsi) movq %r11,%rcx movdqu (%rdi),%xmm2 movl %r10d,%eax movups %xmm3,16(%rsi) movdqu 16(%rdi),%xmm3 movups %xmm4,32(%rsi) movdqu 32(%rdi),%xmm4 movups %xmm5,48(%rsi) movdqu 48(%rdi),%xmm5 movups %xmm6,64(%rsi) movdqu 64(%rdi),%xmm6 movups %xmm7,80(%rsi) movdqu 80(%rdi),%xmm7 movups %xmm8,96(%rsi) movdqu 96(%rdi),%xmm8 movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi L$ecb_dec_loop8_enter: call _aesni_decrypt8 movups (%r11),%xmm0 subq $0x80,%rdx jnc L$ecb_dec_loop8 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movq %r11,%rcx movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movl %r10d,%eax movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 movups %xmm8,96(%rsi) pxor %xmm8,%xmm8 movups %xmm9,112(%rsi) pxor %xmm9,%xmm9 leaq 128(%rsi),%rsi addq $0x80,%rdx jz L$ecb_ret L$ecb_dec_tail: movups (%rdi),%xmm2 cmpq $0x20,%rdx jb L$ecb_dec_one movups 16(%rdi),%xmm3 je L$ecb_dec_two movups 32(%rdi),%xmm4 cmpq $0x40,%rdx jb L$ecb_dec_three movups 48(%rdi),%xmm5 je L$ecb_dec_four movups 64(%rdi),%xmm6 cmpq $0x60,%rdx jb L$ecb_dec_five movups 80(%rdi),%xmm7 je L$ecb_dec_six movups 96(%rdi),%xmm8 movups (%rcx),%xmm0 xorps %xmm9,%xmm9 call _aesni_decrypt8 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 movups %xmm8,96(%rsi) pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 jmp L$ecb_ret .p2align 4 L$ecb_dec_one: movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_dec1_4: .byte 102,15,56,222,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_dec1_4 .byte 102,15,56,223,209 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp L$ecb_ret .p2align 4 L$ecb_dec_two: call _aesni_decrypt2 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 jmp L$ecb_ret .p2align 4 L$ecb_dec_three: call _aesni_decrypt3 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 jmp L$ecb_ret .p2align 4 L$ecb_dec_four: call _aesni_decrypt4 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 jmp L$ecb_ret .p2align 4 L$ecb_dec_five: xorps %xmm7,%xmm7 call _aesni_decrypt6 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 jmp L$ecb_ret .p2align 4 L$ecb_dec_six: call _aesni_decrypt6 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 L$ecb_ret: xorps %xmm0,%xmm0 pxor %xmm1,%xmm1 ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .p2align 4 _aes_hw_ctr32_encrypt_blocks: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit(%rip) #endif cmpq $1,%rdx jne L$ctr32_bulk movups (%r8),%xmm2 movups (%rdi),%xmm3 movl 240(%rcx),%edx movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_enc1_5: .byte 102,15,56,220,209 decl %edx movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_enc1_5 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) xorps %xmm2,%xmm2 jmp L$ctr32_epilogue .p2align 4 L$ctr32_bulk: leaq (%rsp),%r11 pushq %rbp subq $128,%rsp andq $-16,%rsp movdqu (%r8),%xmm2 movdqu (%rcx),%xmm0 movl 12(%r8),%r8d pxor %xmm0,%xmm2 movl 12(%rcx),%ebp movdqa %xmm2,0(%rsp) bswapl %r8d movdqa %xmm2,%xmm3 movdqa %xmm2,%xmm4 movdqa %xmm2,%xmm5 movdqa %xmm2,64(%rsp) movdqa %xmm2,80(%rsp) movdqa %xmm2,96(%rsp) movq %rdx,%r10 movdqa %xmm2,112(%rsp) leaq 1(%r8),%rax leaq 2(%r8),%rdx bswapl %eax bswapl %edx xorl %ebp,%eax xorl %ebp,%edx .byte 102,15,58,34,216,3 leaq 3(%r8),%rax movdqa %xmm3,16(%rsp) .byte 102,15,58,34,226,3 bswapl %eax movq %r10,%rdx leaq 4(%r8),%r10 movdqa %xmm4,32(%rsp) xorl %ebp,%eax bswapl %r10d .byte 102,15,58,34,232,3 xorl %ebp,%r10d movdqa %xmm5,48(%rsp) leaq 5(%r8),%r9 movl %r10d,64+12(%rsp) bswapl %r9d leaq 6(%r8),%r10 movl 240(%rcx),%eax xorl %ebp,%r9d bswapl %r10d movl %r9d,80+12(%rsp) xorl %ebp,%r10d leaq 7(%r8),%r9 movl %r10d,96+12(%rsp) bswapl %r9d xorl %ebp,%r9d movl %r9d,112+12(%rsp) movups 16(%rcx),%xmm1 movdqa 64(%rsp),%xmm6 movdqa 80(%rsp),%xmm7 cmpq $8,%rdx jb L$ctr32_tail leaq 128(%rcx),%rcx subq $8,%rdx jmp L$ctr32_loop8 .p2align 5 L$ctr32_loop8: addl $8,%r8d movdqa 96(%rsp),%xmm8 .byte 102,15,56,220,209 movl %r8d,%r9d movdqa 112(%rsp),%xmm9 .byte 102,15,56,220,217 bswapl %r9d movups 32-128(%rcx),%xmm0 .byte 102,15,56,220,225 xorl %ebp,%r9d nop .byte 102,15,56,220,233 movl %r9d,0+12(%rsp) leaq 1(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 48-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,16+12(%rsp) leaq 2(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 64-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,32+12(%rsp) leaq 3(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 80-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,48+12(%rsp) leaq 4(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 96-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,64+12(%rsp) leaq 5(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 112-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,80+12(%rsp) leaq 6(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 128-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,96+12(%rsp) leaq 7(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 144-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 xorl %ebp,%r9d movdqu 0(%rdi),%xmm10 .byte 102,15,56,220,232 movl %r9d,112+12(%rsp) cmpl $11,%eax .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 160-128(%rcx),%xmm0 jb L$ctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 176-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 192-128(%rcx),%xmm0 je L$ctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 208-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 224-128(%rcx),%xmm0 jmp L$ctr32_enc_done .p2align 4 L$ctr32_enc_done: movdqu 16(%rdi),%xmm11 pxor %xmm0,%xmm10 movdqu 32(%rdi),%xmm12 pxor %xmm0,%xmm11 movdqu 48(%rdi),%xmm13 pxor %xmm0,%xmm12 movdqu 64(%rdi),%xmm14 pxor %xmm0,%xmm13 movdqu 80(%rdi),%xmm15 pxor %xmm0,%xmm14 prefetcht0 448(%rdi) prefetcht0 512(%rdi) pxor %xmm0,%xmm15 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movdqu 96(%rdi),%xmm1 leaq 128(%rdi),%rdi .byte 102,65,15,56,221,210 pxor %xmm0,%xmm1 movdqu 112-128(%rdi),%xmm10 .byte 102,65,15,56,221,219 pxor %xmm0,%xmm10 movdqa 0(%rsp),%xmm11 .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 movdqa 16(%rsp),%xmm12 movdqa 32(%rsp),%xmm13 .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 movdqa 48(%rsp),%xmm14 movdqa 64(%rsp),%xmm15 .byte 102,68,15,56,221,193 movdqa 80(%rsp),%xmm0 movups 16-128(%rcx),%xmm1 .byte 102,69,15,56,221,202 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm0,%xmm7 movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi subq $8,%rdx jnc L$ctr32_loop8 addq $8,%rdx jz L$ctr32_done leaq -128(%rcx),%rcx L$ctr32_tail: leaq 16(%rcx),%rcx cmpq $4,%rdx jb L$ctr32_loop3 je L$ctr32_loop4 shll $4,%eax movdqa 96(%rsp),%xmm8 pxor %xmm9,%xmm9 movups 16(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 leaq 32-16(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,225 addq $16,%rax movups (%rdi),%xmm10 .byte 102,15,56,220,233 .byte 102,15,56,220,241 movups 16(%rdi),%xmm11 movups 32(%rdi),%xmm12 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 call L$enc_loop8_enter movdqu 48(%rdi),%xmm13 pxor %xmm10,%xmm2 movdqu 64(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm10,%xmm6 movdqu %xmm5,48(%rsi) movdqu %xmm6,64(%rsi) cmpq $6,%rdx jb L$ctr32_done movups 80(%rdi),%xmm11 xorps %xmm11,%xmm7 movups %xmm7,80(%rsi) je L$ctr32_done movups 96(%rdi),%xmm12 xorps %xmm12,%xmm8 movups %xmm8,96(%rsi) jmp L$ctr32_done .p2align 5 L$ctr32_loop4: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx),%xmm1 jnz L$ctr32_loop4 .byte 102,15,56,221,209 .byte 102,15,56,221,217 movups (%rdi),%xmm10 movups 16(%rdi),%xmm11 .byte 102,15,56,221,225 .byte 102,15,56,221,233 movups 32(%rdi),%xmm12 movups 48(%rdi),%xmm13 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) pxor %xmm12,%xmm4 movdqu %xmm4,32(%rsi) pxor %xmm13,%xmm5 movdqu %xmm5,48(%rsi) jmp L$ctr32_done .p2align 5 L$ctr32_loop3: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx),%xmm1 jnz L$ctr32_loop3 .byte 102,15,56,221,209 .byte 102,15,56,221,217 .byte 102,15,56,221,225 movups (%rdi),%xmm10 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) cmpq $2,%rdx jb L$ctr32_done movups 16(%rdi),%xmm11 xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) je L$ctr32_done movups 32(%rdi),%xmm12 xorps %xmm12,%xmm4 movups %xmm4,32(%rsi) L$ctr32_done: xorps %xmm0,%xmm0 xorl %ebp,%ebp pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movaps %xmm0,0(%rsp) pxor %xmm8,%xmm8 movaps %xmm0,16(%rsp) pxor %xmm9,%xmm9 movaps %xmm0,32(%rsp) pxor %xmm10,%xmm10 movaps %xmm0,48(%rsp) pxor %xmm11,%xmm11 movaps %xmm0,64(%rsp) pxor %xmm12,%xmm12 movaps %xmm0,80(%rsp) pxor %xmm13,%xmm13 movaps %xmm0,96(%rsp) pxor %xmm14,%xmm14 movaps %xmm0,112(%rsp) pxor %xmm15,%xmm15 movq -8(%r11),%rbp leaq (%r11),%rsp L$ctr32_epilogue: ret .globl _aes_hw_cbc_encrypt .private_extern _aes_hw_cbc_encrypt .p2align 4 _aes_hw_cbc_encrypt: _CET_ENDBR testq %rdx,%rdx jz L$cbc_ret movl 240(%rcx),%r10d movq %rcx,%r11 testl %r9d,%r9d jz L$cbc_decrypt movups (%r8),%xmm2 movl %r10d,%eax cmpq $16,%rdx jb L$cbc_enc_tail subq $16,%rdx jmp L$cbc_enc_loop .p2align 4 L$cbc_enc_loop: movups (%rdi),%xmm3 leaq 16(%rdi),%rdi movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 xorps %xmm0,%xmm3 leaq 32(%rcx),%rcx xorps %xmm3,%xmm2 L$oop_enc1_6: .byte 102,15,56,220,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_enc1_6 .byte 102,15,56,221,209 movl %r10d,%eax movq %r11,%rcx movups %xmm2,0(%rsi) leaq 16(%rsi),%rsi subq $16,%rdx jnc L$cbc_enc_loop addq $16,%rdx jnz L$cbc_enc_tail pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%r8) pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 jmp L$cbc_ret L$cbc_enc_tail: movq %rdx,%rcx xchgq %rdi,%rsi .long 0x9066A4F3 movl $16,%ecx subq %rdx,%rcx xorl %eax,%eax .long 0x9066AAF3 leaq -16(%rdi),%rdi movl %r10d,%eax movq %rdi,%rsi movq %r11,%rcx xorq %rdx,%rdx jmp L$cbc_enc_loop .p2align 4 L$cbc_decrypt: cmpq $16,%rdx jne L$cbc_decrypt_bulk movdqu (%rdi),%xmm2 movdqu (%r8),%xmm3 movdqa %xmm2,%xmm4 movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_dec1_7: .byte 102,15,56,222,209 decl %r10d movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_dec1_7 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqu %xmm4,(%r8) xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp L$cbc_ret .p2align 4 L$cbc_decrypt_bulk: leaq (%rsp),%r11 pushq %rbp subq $16,%rsp andq $-16,%rsp movq %rcx,%rbp movups (%r8),%xmm10 movl %r10d,%eax cmpq $0x50,%rdx jbe L$cbc_dec_tail movups (%rcx),%xmm0 movdqu 0(%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqa %xmm2,%xmm11 movdqu 32(%rdi),%xmm4 movdqa %xmm3,%xmm12 movdqu 48(%rdi),%xmm5 movdqa %xmm4,%xmm13 movdqu 64(%rdi),%xmm6 movdqa %xmm5,%xmm14 movdqu 80(%rdi),%xmm7 movdqa %xmm6,%xmm15 cmpq $0x70,%rdx jbe L$cbc_dec_six_or_seven subq $0x70,%rdx leaq 112(%rcx),%rcx jmp L$cbc_dec_loop8_enter .p2align 4 L$cbc_dec_loop8: movups %xmm9,(%rsi) leaq 16(%rsi),%rsi L$cbc_dec_loop8_enter: movdqu 96(%rdi),%xmm8 pxor %xmm0,%xmm2 movdqu 112(%rdi),%xmm9 pxor %xmm0,%xmm3 movups 16-112(%rcx),%xmm1 pxor %xmm0,%xmm4 movq $-1,%rbp cmpq $0x70,%rdx pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,222,209 pxor %xmm0,%xmm9 movups 32-112(%rcx),%xmm0 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 adcq $0,%rbp andq $128,%rbp .byte 102,68,15,56,222,201 addq %rdi,%rbp movups 48-112(%rcx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 64-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 80-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 96-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 112-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 128-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 144-112(%rcx),%xmm1 cmpl $11,%eax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 160-112(%rcx),%xmm0 jb L$cbc_dec_done .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 176-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 192-112(%rcx),%xmm0 je L$cbc_dec_done .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 208-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 224-112(%rcx),%xmm0 jmp L$cbc_dec_done .p2align 4 L$cbc_dec_done: .byte 102,15,56,222,209 .byte 102,15,56,222,217 pxor %xmm0,%xmm10 pxor %xmm0,%xmm11 .byte 102,15,56,222,225 .byte 102,15,56,222,233 pxor %xmm0,%xmm12 pxor %xmm0,%xmm13 .byte 102,15,56,222,241 .byte 102,15,56,222,249 pxor %xmm0,%xmm14 pxor %xmm0,%xmm15 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movdqu 80(%rdi),%xmm1 .byte 102,65,15,56,223,210 movdqu 96(%rdi),%xmm10 pxor %xmm0,%xmm1 .byte 102,65,15,56,223,219 pxor %xmm0,%xmm10 movdqu 112(%rdi),%xmm0 .byte 102,65,15,56,223,228 leaq 128(%rdi),%rdi movdqu 0(%rbp),%xmm11 .byte 102,65,15,56,223,237 .byte 102,65,15,56,223,246 movdqu 16(%rbp),%xmm12 movdqu 32(%rbp),%xmm13 .byte 102,65,15,56,223,255 .byte 102,68,15,56,223,193 movdqu 48(%rbp),%xmm14 movdqu 64(%rbp),%xmm15 .byte 102,69,15,56,223,202 movdqa %xmm0,%xmm10 movdqu 80(%rbp),%xmm1 movups -112(%rcx),%xmm0 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm1,%xmm7 movups %xmm8,96(%rsi) leaq 112(%rsi),%rsi subq $0x80,%rdx ja L$cbc_dec_loop8 movaps %xmm9,%xmm2 leaq -112(%rcx),%rcx addq $0x70,%rdx jle L$cbc_dec_clear_tail_collected movups %xmm9,(%rsi) leaq 16(%rsi),%rsi cmpq $0x50,%rdx jbe L$cbc_dec_tail movaps %xmm11,%xmm2 L$cbc_dec_six_or_seven: cmpq $0x60,%rdx ja L$cbc_dec_seven movaps %xmm7,%xmm8 call _aesni_decrypt6 pxor %xmm10,%xmm2 movaps %xmm8,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 pxor %xmm15,%xmm7 movdqu %xmm6,64(%rsi) pxor %xmm6,%xmm6 leaq 80(%rsi),%rsi movdqa %xmm7,%xmm2 pxor %xmm7,%xmm7 jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_seven: movups 96(%rdi),%xmm8 xorps %xmm9,%xmm9 call _aesni_decrypt8 movups 80(%rdi),%xmm9 pxor %xmm10,%xmm2 movups 96(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 pxor %xmm15,%xmm7 movdqu %xmm6,64(%rsi) pxor %xmm6,%xmm6 pxor %xmm9,%xmm8 movdqu %xmm7,80(%rsi) pxor %xmm7,%xmm7 leaq 96(%rsi),%rsi movdqa %xmm8,%xmm2 pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 jmp L$cbc_dec_tail_collected L$cbc_dec_tail: movups (%rdi),%xmm2 subq $0x10,%rdx jbe L$cbc_dec_one movups 16(%rdi),%xmm3 movaps %xmm2,%xmm11 subq $0x10,%rdx jbe L$cbc_dec_two movups 32(%rdi),%xmm4 movaps %xmm3,%xmm12 subq $0x10,%rdx jbe L$cbc_dec_three movups 48(%rdi),%xmm5 movaps %xmm4,%xmm13 subq $0x10,%rdx jbe L$cbc_dec_four movups 64(%rdi),%xmm6 movaps %xmm5,%xmm14 movaps %xmm6,%xmm15 xorps %xmm7,%xmm7 call _aesni_decrypt6 pxor %xmm10,%xmm2 movaps %xmm15,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 leaq 64(%rsi),%rsi movdqa %xmm6,%xmm2 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 subq $0x10,%rdx jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_one: movaps %xmm2,%xmm11 movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_dec1_8: .byte 102,15,56,222,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_dec1_8 .byte 102,15,56,223,209 xorps %xmm10,%xmm2 movaps %xmm11,%xmm10 jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_two: movaps %xmm3,%xmm12 call _aesni_decrypt2 pxor %xmm10,%xmm2 movaps %xmm12,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) movdqa %xmm3,%xmm2 pxor %xmm3,%xmm3 leaq 16(%rsi),%rsi jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_three: movaps %xmm4,%xmm13 call _aesni_decrypt3 pxor %xmm10,%xmm2 movaps %xmm13,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 movdqa %xmm4,%xmm2 pxor %xmm4,%xmm4 leaq 32(%rsi),%rsi jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_four: movaps %xmm5,%xmm14 call _aesni_decrypt4 pxor %xmm10,%xmm2 movaps %xmm14,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 movdqa %xmm5,%xmm2 pxor %xmm5,%xmm5 leaq 48(%rsi),%rsi jmp L$cbc_dec_tail_collected .p2align 4 L$cbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 L$cbc_dec_tail_collected: movups %xmm10,(%r8) andq $15,%rdx jnz L$cbc_dec_tail_partial movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp L$cbc_dec_ret .p2align 4 L$cbc_dec_tail_partial: movaps %xmm2,(%rsp) pxor %xmm2,%xmm2 movq $16,%rcx movq %rsi,%rdi subq %rdx,%rcx leaq (%rsp),%rsi .long 0x9066A4F3 movdqa %xmm2,(%rsp) L$cbc_dec_ret: xorps %xmm0,%xmm0 pxor %xmm1,%xmm1 movq -8(%r11),%rbp leaq (%r11),%rsp L$cbc_ret: ret .globl _aes_hw_encrypt_key_to_decrypt_key .private_extern _aes_hw_encrypt_key_to_decrypt_key .p2align 4 _aes_hw_encrypt_key_to_decrypt_key: _CET_ENDBR movl 240(%rdi),%esi shll $4,%esi leaq 16(%rdi,%rsi,1),%rdx movups (%rdi),%xmm0 movups (%rdx),%xmm1 movups %xmm0,(%rdx) movups %xmm1,(%rdi) leaq 16(%rdi),%rdi leaq -16(%rdx),%rdx L$dec_key_inverse: movups (%rdi),%xmm0 movups (%rdx),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leaq 16(%rdi),%rdi leaq -16(%rdx),%rdx movups %xmm0,16(%rdx) movups %xmm1,-16(%rdi) cmpq %rdi,%rdx ja L$dec_key_inverse movups (%rdi),%xmm0 .byte 102,15,56,219,192 pxor %xmm1,%xmm1 movups %xmm0,(%rdx) pxor %xmm0,%xmm0 ret .globl _aes_hw_set_encrypt_key_base .private_extern _aes_hw_set_encrypt_key_base .p2align 4 _aes_hw_set_encrypt_key_base: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je L$14rounds cmpl $192,%esi je L$12rounds cmpl $128,%esi jne L$bad_keybits L$10rounds: movl $9,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,200,1 call L$key_expansion_128_cold .byte 102,15,58,223,200,2 call L$key_expansion_128 .byte 102,15,58,223,200,4 call L$key_expansion_128 .byte 102,15,58,223,200,8 call L$key_expansion_128 .byte 102,15,58,223,200,16 call L$key_expansion_128 .byte 102,15,58,223,200,32 call L$key_expansion_128 .byte 102,15,58,223,200,64 call L$key_expansion_128 .byte 102,15,58,223,200,128 call L$key_expansion_128 .byte 102,15,58,223,200,27 call L$key_expansion_128 .byte 102,15,58,223,200,54 call L$key_expansion_128 movups %xmm0,(%rax) movl %esi,80(%rax) xorl %eax,%eax jmp L$enc_key_ret .p2align 4 L$12rounds: movq 16(%rdi),%xmm2 movl $11,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,202,1 call L$key_expansion_192a_cold .byte 102,15,58,223,202,2 call L$key_expansion_192b .byte 102,15,58,223,202,4 call L$key_expansion_192a .byte 102,15,58,223,202,8 call L$key_expansion_192b .byte 102,15,58,223,202,16 call L$key_expansion_192a .byte 102,15,58,223,202,32 call L$key_expansion_192b .byte 102,15,58,223,202,64 call L$key_expansion_192a .byte 102,15,58,223,202,128 call L$key_expansion_192b movups %xmm0,(%rax) movl %esi,48(%rax) xorq %rax,%rax jmp L$enc_key_ret .p2align 4 L$14rounds: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movups %xmm0,(%rdx) movups %xmm2,16(%rdx) .byte 102,15,58,223,202,1 call L$key_expansion_256a_cold .byte 102,15,58,223,200,1 call L$key_expansion_256b .byte 102,15,58,223,202,2 call L$key_expansion_256a .byte 102,15,58,223,200,2 call L$key_expansion_256b .byte 102,15,58,223,202,4 call L$key_expansion_256a .byte 102,15,58,223,200,4 call L$key_expansion_256b .byte 102,15,58,223,202,8 call L$key_expansion_256a .byte 102,15,58,223,200,8 call L$key_expansion_256b .byte 102,15,58,223,202,16 call L$key_expansion_256a .byte 102,15,58,223,200,16 call L$key_expansion_256b .byte 102,15,58,223,202,32 call L$key_expansion_256a .byte 102,15,58,223,200,32 call L$key_expansion_256b .byte 102,15,58,223,202,64 call L$key_expansion_256a movups %xmm0,(%rax) movl %esi,16(%rax) xorq %rax,%rax jmp L$enc_key_ret .p2align 4 L$bad_keybits: movq $-2,%rax L$enc_key_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp ret .p2align 4 L$key_expansion_128: movups %xmm0,(%rax) leaq 16(%rax),%rax L$key_expansion_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .p2align 4 L$key_expansion_192a: movups %xmm0,(%rax) leaq 16(%rax),%rax L$key_expansion_192a_cold: movaps %xmm2,%xmm5 L$key_expansion_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .p2align 4 L$key_expansion_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%rax) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%rax) leaq 32(%rax),%rax jmp L$key_expansion_192b_warm .p2align 4 L$key_expansion_256a: movups %xmm2,(%rax) leaq 16(%rax),%rax L$key_expansion_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .p2align 4 L$key_expansion_256b: movups %xmm0,(%rax) leaq 16(%rax),%rax shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .globl _aes_hw_set_encrypt_key_alt .private_extern _aes_hw_set_encrypt_key_alt .p2align 4 _aes_hw_set_encrypt_key_alt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je L$14rounds_alt cmpl $192,%esi je L$12rounds_alt cmpl $128,%esi jne L$bad_keybits_alt movl $9,%esi movdqa L$key_rotate(%rip),%xmm5 movl $8,%r10d movdqa L$key_rcon1(%rip),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,(%rdx) jmp L$oop_key128 .p2align 4 L$oop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leaq 16(%rax),%rax movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%rax) movdqa %xmm0,%xmm2 decl %r10d jnz L$oop_key128 movdqa L$key_rcon1b(%rip),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%rax) movl %esi,96(%rax) xorl %eax,%eax jmp L$enc_key_ret_alt .p2align 4 L$12rounds_alt: movq 16(%rdi),%xmm2 movl $11,%esi movdqa L$key_rotate192(%rip),%xmm5 movdqa L$key_rcon1(%rip),%xmm4 movl $8,%r10d movdqu %xmm0,(%rdx) jmp L$oop_key192 .p2align 4 L$oop_key192: movq %xmm2,0(%rax) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leaq 24(%rax),%rax movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $0xff,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%rax) decl %r10d jnz L$oop_key192 movl %esi,32(%rax) xorl %eax,%eax jmp L$enc_key_ret_alt .p2align 4 L$14rounds_alt: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movdqa L$key_rotate(%rip),%xmm5 movdqa L$key_rcon1(%rip),%xmm4 movl $7,%r10d movdqu %xmm0,0(%rdx) movdqa %xmm2,%xmm1 movdqu %xmm2,16(%rdx) jmp L$oop_key256 .p2align 4 L$oop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) decl %r10d jz L$done_key256 pshufd $0xff,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%rax) leaq 32(%rax),%rax movdqa %xmm2,%xmm1 jmp L$oop_key256 L$done_key256: movl %esi,16(%rax) xorl %eax,%eax jmp L$enc_key_ret_alt .p2align 4 L$bad_keybits_alt: movq $-2,%rax L$enc_key_ret_alt: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$increment32: .long 6,6,6,0 L$increment64: .long 1,0,0,0 L$xts_magic: .long 0x87,0,1,0 L$increment1: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 L$key_rotate: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d L$key_rotate192: .long 0x04070605,0x04070605,0x04070605,0x04070605 L$key_rcon1: .long 1,1,1,1 L$key_rcon1b: .long 0x1b,0x1b,0x1b,0x1b .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesni-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl aes_hw_encrypt .hidden aes_hw_encrypt .type aes_hw_encrypt,@function .align 16 aes_hw_encrypt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+1(%rip) #endif movups (%rdi),%xmm2 movl 240(%rdx),%eax movups (%rdx),%xmm0 movups 16(%rdx),%xmm1 leaq 32(%rdx),%rdx xorps %xmm0,%xmm2 .Loop_enc1_1: .byte 102,15,56,220,209 decl %eax movups (%rdx),%xmm1 leaq 16(%rdx),%rdx jnz .Loop_enc1_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 ret .cfi_endproc .size aes_hw_encrypt,.-aes_hw_encrypt .globl aes_hw_decrypt .hidden aes_hw_decrypt .type aes_hw_decrypt,@function .align 16 aes_hw_decrypt: .cfi_startproc _CET_ENDBR movups (%rdi),%xmm2 movl 240(%rdx),%eax movups (%rdx),%xmm0 movups 16(%rdx),%xmm1 leaq 32(%rdx),%rdx xorps %xmm0,%xmm2 .Loop_dec1_2: .byte 102,15,56,222,209 decl %eax movups (%rdx),%xmm1 leaq 16(%rdx),%rdx jnz .Loop_dec1_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 ret .cfi_endproc .size aes_hw_decrypt, .-aes_hw_decrypt .type _aesni_encrypt2,@function .align 16 _aesni_encrypt2: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Lenc_loop2: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop2 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .cfi_endproc .size _aesni_encrypt2,.-_aesni_encrypt2 .type _aesni_decrypt2,@function .align 16 _aesni_decrypt2: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Ldec_loop2: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%rcx,%rax,1),%xmm0 jnz .Ldec_loop2 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .cfi_endproc .size _aesni_decrypt2,.-_aesni_decrypt2 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Lenc_loop3: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop3 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .cfi_endproc .size _aesni_encrypt3,.-_aesni_encrypt3 .type _aesni_decrypt3,@function .align 16 _aesni_decrypt3: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax .Ldec_loop3: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%rcx,%rax,1),%xmm0 jnz .Ldec_loop3 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .cfi_endproc .size _aesni_decrypt3,.-_aesni_decrypt3 .type _aesni_encrypt4,@function .align 16 _aesni_encrypt4: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax .Lenc_loop4: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop4 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .cfi_endproc .size _aesni_encrypt4,.-_aesni_encrypt4 .type _aesni_decrypt4,@function .align 16 _aesni_decrypt4: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax .Ldec_loop4: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%rcx,%rax,1),%xmm0 jnz .Ldec_loop4 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .cfi_endproc .size _aesni_decrypt4,.-_aesni_decrypt4 .type _aesni_encrypt6,@function .align 16 _aesni_encrypt6: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Lenc_loop6_enter .align 16 .Lenc_loop6: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .Lenc_loop6_enter: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop6 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .cfi_endproc .size _aesni_encrypt6,.-_aesni_encrypt6 .type _aesni_decrypt6,@function .align 16 _aesni_decrypt6: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,222,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Ldec_loop6_enter .align 16 .Ldec_loop6: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .Ldec_loop6_enter: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%rcx,%rax,1),%xmm0 jnz .Ldec_loop6 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .cfi_endproc .size _aesni_decrypt6,.-_aesni_decrypt6 .type _aesni_encrypt8,@function .align 16 _aesni_encrypt8: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,220,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Lenc_loop8_inner .align 16 .Lenc_loop8: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .Lenc_loop8_inner: .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .Lenc_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups -16(%rcx,%rax,1),%xmm0 jnz .Lenc_loop8 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 .byte 102,68,15,56,221,192 .byte 102,68,15,56,221,200 ret .cfi_endproc .size _aesni_encrypt8,.-_aesni_encrypt8 .type _aesni_decrypt8,@function .align 16 _aesni_decrypt8: .cfi_startproc movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,222,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,222,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp .Ldec_loop8_inner .align 16 .Ldec_loop8: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .Ldec_loop8_inner: .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 .Ldec_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups -16(%rcx,%rax,1),%xmm0 jnz .Ldec_loop8 .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 .byte 102,68,15,56,223,192 .byte 102,68,15,56,223,200 ret .cfi_endproc .size _aesni_decrypt8,.-_aesni_decrypt8 .globl aes_hw_ecb_encrypt .hidden aes_hw_ecb_encrypt .type aes_hw_ecb_encrypt,@function .align 16 aes_hw_ecb_encrypt: .cfi_startproc _CET_ENDBR andq $-16,%rdx jz .Lecb_ret movl 240(%rcx),%eax movups (%rcx),%xmm0 movq %rcx,%r11 movl %eax,%r10d testl %r8d,%r8d jz .Lecb_decrypt cmpq $0x80,%rdx jb .Lecb_enc_tail movdqu (%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqu 32(%rdi),%xmm4 movdqu 48(%rdi),%xmm5 movdqu 64(%rdi),%xmm6 movdqu 80(%rdi),%xmm7 movdqu 96(%rdi),%xmm8 movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi subq $0x80,%rdx jmp .Lecb_enc_loop8_enter .align 16 .Lecb_enc_loop8: movups %xmm2,(%rsi) movq %r11,%rcx movdqu (%rdi),%xmm2 movl %r10d,%eax movups %xmm3,16(%rsi) movdqu 16(%rdi),%xmm3 movups %xmm4,32(%rsi) movdqu 32(%rdi),%xmm4 movups %xmm5,48(%rsi) movdqu 48(%rdi),%xmm5 movups %xmm6,64(%rsi) movdqu 64(%rdi),%xmm6 movups %xmm7,80(%rsi) movdqu 80(%rdi),%xmm7 movups %xmm8,96(%rsi) movdqu 96(%rdi),%xmm8 movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi .Lecb_enc_loop8_enter: call _aesni_encrypt8 subq $0x80,%rdx jnc .Lecb_enc_loop8 movups %xmm2,(%rsi) movq %r11,%rcx movups %xmm3,16(%rsi) movl %r10d,%eax movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi addq $0x80,%rdx jz .Lecb_ret .Lecb_enc_tail: movups (%rdi),%xmm2 cmpq $0x20,%rdx jb .Lecb_enc_one movups 16(%rdi),%xmm3 je .Lecb_enc_two movups 32(%rdi),%xmm4 cmpq $0x40,%rdx jb .Lecb_enc_three movups 48(%rdi),%xmm5 je .Lecb_enc_four movups 64(%rdi),%xmm6 cmpq $0x60,%rdx jb .Lecb_enc_five movups 80(%rdi),%xmm7 je .Lecb_enc_six movdqu 96(%rdi),%xmm8 xorps %xmm9,%xmm9 call _aesni_encrypt8 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) movups %xmm8,96(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_one: movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_enc1_3: .byte 102,15,56,220,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_enc1_3 .byte 102,15,56,221,209 movups %xmm2,(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_two: call _aesni_encrypt2 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_three: call _aesni_encrypt3 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_four: call _aesni_encrypt4 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_five: xorps %xmm7,%xmm7 call _aesni_encrypt6 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) jmp .Lecb_ret .align 16 .Lecb_enc_six: call _aesni_encrypt6 movups %xmm2,(%rsi) movups %xmm3,16(%rsi) movups %xmm4,32(%rsi) movups %xmm5,48(%rsi) movups %xmm6,64(%rsi) movups %xmm7,80(%rsi) jmp .Lecb_ret .align 16 .Lecb_decrypt: cmpq $0x80,%rdx jb .Lecb_dec_tail movdqu (%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqu 32(%rdi),%xmm4 movdqu 48(%rdi),%xmm5 movdqu 64(%rdi),%xmm6 movdqu 80(%rdi),%xmm7 movdqu 96(%rdi),%xmm8 movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi subq $0x80,%rdx jmp .Lecb_dec_loop8_enter .align 16 .Lecb_dec_loop8: movups %xmm2,(%rsi) movq %r11,%rcx movdqu (%rdi),%xmm2 movl %r10d,%eax movups %xmm3,16(%rsi) movdqu 16(%rdi),%xmm3 movups %xmm4,32(%rsi) movdqu 32(%rdi),%xmm4 movups %xmm5,48(%rsi) movdqu 48(%rdi),%xmm5 movups %xmm6,64(%rsi) movdqu 64(%rdi),%xmm6 movups %xmm7,80(%rsi) movdqu 80(%rdi),%xmm7 movups %xmm8,96(%rsi) movdqu 96(%rdi),%xmm8 movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi movdqu 112(%rdi),%xmm9 leaq 128(%rdi),%rdi .Lecb_dec_loop8_enter: call _aesni_decrypt8 movups (%r11),%xmm0 subq $0x80,%rdx jnc .Lecb_dec_loop8 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movq %r11,%rcx movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movl %r10d,%eax movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 movups %xmm8,96(%rsi) pxor %xmm8,%xmm8 movups %xmm9,112(%rsi) pxor %xmm9,%xmm9 leaq 128(%rsi),%rsi addq $0x80,%rdx jz .Lecb_ret .Lecb_dec_tail: movups (%rdi),%xmm2 cmpq $0x20,%rdx jb .Lecb_dec_one movups 16(%rdi),%xmm3 je .Lecb_dec_two movups 32(%rdi),%xmm4 cmpq $0x40,%rdx jb .Lecb_dec_three movups 48(%rdi),%xmm5 je .Lecb_dec_four movups 64(%rdi),%xmm6 cmpq $0x60,%rdx jb .Lecb_dec_five movups 80(%rdi),%xmm7 je .Lecb_dec_six movups 96(%rdi),%xmm8 movups (%rcx),%xmm0 xorps %xmm9,%xmm9 call _aesni_decrypt8 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 movups %xmm8,96(%rsi) pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 jmp .Lecb_ret .align 16 .Lecb_dec_one: movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_dec1_4: .byte 102,15,56,222,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_dec1_4 .byte 102,15,56,223,209 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp .Lecb_ret .align 16 .Lecb_dec_two: call _aesni_decrypt2 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 jmp .Lecb_ret .align 16 .Lecb_dec_three: call _aesni_decrypt3 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 jmp .Lecb_ret .align 16 .Lecb_dec_four: call _aesni_decrypt4 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 jmp .Lecb_ret .align 16 .Lecb_dec_five: xorps %xmm7,%xmm7 call _aesni_decrypt6 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 jmp .Lecb_ret .align 16 .Lecb_dec_six: call _aesni_decrypt6 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 movups %xmm3,16(%rsi) pxor %xmm3,%xmm3 movups %xmm4,32(%rsi) pxor %xmm4,%xmm4 movups %xmm5,48(%rsi) pxor %xmm5,%xmm5 movups %xmm6,64(%rsi) pxor %xmm6,%xmm6 movups %xmm7,80(%rsi) pxor %xmm7,%xmm7 .Lecb_ret: xorps %xmm0,%xmm0 pxor %xmm1,%xmm1 ret .cfi_endproc .size aes_hw_ecb_encrypt,.-aes_hw_ecb_encrypt .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,@function .align 16 aes_hw_ctr32_encrypt_blocks: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit(%rip) #endif cmpq $1,%rdx jne .Lctr32_bulk movups (%r8),%xmm2 movups (%rdi),%xmm3 movl 240(%rcx),%edx movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_enc1_5: .byte 102,15,56,220,209 decl %edx movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_enc1_5 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) xorps %xmm2,%xmm2 jmp .Lctr32_epilogue .align 16 .Lctr32_bulk: leaq (%rsp),%r11 .cfi_def_cfa_register %r11 pushq %rbp .cfi_offset %rbp,-16 subq $128,%rsp andq $-16,%rsp movdqu (%r8),%xmm2 movdqu (%rcx),%xmm0 movl 12(%r8),%r8d pxor %xmm0,%xmm2 movl 12(%rcx),%ebp movdqa %xmm2,0(%rsp) bswapl %r8d movdqa %xmm2,%xmm3 movdqa %xmm2,%xmm4 movdqa %xmm2,%xmm5 movdqa %xmm2,64(%rsp) movdqa %xmm2,80(%rsp) movdqa %xmm2,96(%rsp) movq %rdx,%r10 movdqa %xmm2,112(%rsp) leaq 1(%r8),%rax leaq 2(%r8),%rdx bswapl %eax bswapl %edx xorl %ebp,%eax xorl %ebp,%edx .byte 102,15,58,34,216,3 leaq 3(%r8),%rax movdqa %xmm3,16(%rsp) .byte 102,15,58,34,226,3 bswapl %eax movq %r10,%rdx leaq 4(%r8),%r10 movdqa %xmm4,32(%rsp) xorl %ebp,%eax bswapl %r10d .byte 102,15,58,34,232,3 xorl %ebp,%r10d movdqa %xmm5,48(%rsp) leaq 5(%r8),%r9 movl %r10d,64+12(%rsp) bswapl %r9d leaq 6(%r8),%r10 movl 240(%rcx),%eax xorl %ebp,%r9d bswapl %r10d movl %r9d,80+12(%rsp) xorl %ebp,%r10d leaq 7(%r8),%r9 movl %r10d,96+12(%rsp) bswapl %r9d xorl %ebp,%r9d movl %r9d,112+12(%rsp) movups 16(%rcx),%xmm1 movdqa 64(%rsp),%xmm6 movdqa 80(%rsp),%xmm7 cmpq $8,%rdx jb .Lctr32_tail leaq 128(%rcx),%rcx subq $8,%rdx jmp .Lctr32_loop8 .align 32 .Lctr32_loop8: addl $8,%r8d movdqa 96(%rsp),%xmm8 .byte 102,15,56,220,209 movl %r8d,%r9d movdqa 112(%rsp),%xmm9 .byte 102,15,56,220,217 bswapl %r9d movups 32-128(%rcx),%xmm0 .byte 102,15,56,220,225 xorl %ebp,%r9d nop .byte 102,15,56,220,233 movl %r9d,0+12(%rsp) leaq 1(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 48-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,16+12(%rsp) leaq 2(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 64-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,32+12(%rsp) leaq 3(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 80-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,48+12(%rsp) leaq 4(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 96-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,64+12(%rsp) leaq 5(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 112-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,80+12(%rsp) leaq 6(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 128-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,96+12(%rsp) leaq 7(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 144-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 xorl %ebp,%r9d movdqu 0(%rdi),%xmm10 .byte 102,15,56,220,232 movl %r9d,112+12(%rsp) cmpl $11,%eax .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 160-128(%rcx),%xmm0 jb .Lctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 176-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 192-128(%rcx),%xmm0 je .Lctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 208-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 224-128(%rcx),%xmm0 jmp .Lctr32_enc_done .align 16 .Lctr32_enc_done: movdqu 16(%rdi),%xmm11 pxor %xmm0,%xmm10 movdqu 32(%rdi),%xmm12 pxor %xmm0,%xmm11 movdqu 48(%rdi),%xmm13 pxor %xmm0,%xmm12 movdqu 64(%rdi),%xmm14 pxor %xmm0,%xmm13 movdqu 80(%rdi),%xmm15 pxor %xmm0,%xmm14 prefetcht0 448(%rdi) prefetcht0 512(%rdi) pxor %xmm0,%xmm15 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movdqu 96(%rdi),%xmm1 leaq 128(%rdi),%rdi .byte 102,65,15,56,221,210 pxor %xmm0,%xmm1 movdqu 112-128(%rdi),%xmm10 .byte 102,65,15,56,221,219 pxor %xmm0,%xmm10 movdqa 0(%rsp),%xmm11 .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 movdqa 16(%rsp),%xmm12 movdqa 32(%rsp),%xmm13 .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 movdqa 48(%rsp),%xmm14 movdqa 64(%rsp),%xmm15 .byte 102,68,15,56,221,193 movdqa 80(%rsp),%xmm0 movups 16-128(%rcx),%xmm1 .byte 102,69,15,56,221,202 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm0,%xmm7 movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi subq $8,%rdx jnc .Lctr32_loop8 addq $8,%rdx jz .Lctr32_done leaq -128(%rcx),%rcx .Lctr32_tail: leaq 16(%rcx),%rcx cmpq $4,%rdx jb .Lctr32_loop3 je .Lctr32_loop4 shll $4,%eax movdqa 96(%rsp),%xmm8 pxor %xmm9,%xmm9 movups 16(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 leaq 32-16(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,225 addq $16,%rax movups (%rdi),%xmm10 .byte 102,15,56,220,233 .byte 102,15,56,220,241 movups 16(%rdi),%xmm11 movups 32(%rdi),%xmm12 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 call .Lenc_loop8_enter movdqu 48(%rdi),%xmm13 pxor %xmm10,%xmm2 movdqu 64(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm10,%xmm6 movdqu %xmm5,48(%rsi) movdqu %xmm6,64(%rsi) cmpq $6,%rdx jb .Lctr32_done movups 80(%rdi),%xmm11 xorps %xmm11,%xmm7 movups %xmm7,80(%rsi) je .Lctr32_done movups 96(%rdi),%xmm12 xorps %xmm12,%xmm8 movups %xmm8,96(%rsi) jmp .Lctr32_done .align 32 .Lctr32_loop4: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx),%xmm1 jnz .Lctr32_loop4 .byte 102,15,56,221,209 .byte 102,15,56,221,217 movups (%rdi),%xmm10 movups 16(%rdi),%xmm11 .byte 102,15,56,221,225 .byte 102,15,56,221,233 movups 32(%rdi),%xmm12 movups 48(%rdi),%xmm13 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) pxor %xmm12,%xmm4 movdqu %xmm4,32(%rsi) pxor %xmm13,%xmm5 movdqu %xmm5,48(%rsi) jmp .Lctr32_done .align 32 .Lctr32_loop3: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx),%xmm1 jnz .Lctr32_loop3 .byte 102,15,56,221,209 .byte 102,15,56,221,217 .byte 102,15,56,221,225 movups (%rdi),%xmm10 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) cmpq $2,%rdx jb .Lctr32_done movups 16(%rdi),%xmm11 xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) je .Lctr32_done movups 32(%rdi),%xmm12 xorps %xmm12,%xmm4 movups %xmm4,32(%rsi) .Lctr32_done: xorps %xmm0,%xmm0 xorl %ebp,%ebp pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movaps %xmm0,0(%rsp) pxor %xmm8,%xmm8 movaps %xmm0,16(%rsp) pxor %xmm9,%xmm9 movaps %xmm0,32(%rsp) pxor %xmm10,%xmm10 movaps %xmm0,48(%rsp) pxor %xmm11,%xmm11 movaps %xmm0,64(%rsp) pxor %xmm12,%xmm12 movaps %xmm0,80(%rsp) pxor %xmm13,%xmm13 movaps %xmm0,96(%rsp) pxor %xmm14,%xmm14 movaps %xmm0,112(%rsp) pxor %xmm15,%xmm15 movq -8(%r11),%rbp .cfi_restore %rbp leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lctr32_epilogue: ret .cfi_endproc .size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks .globl aes_hw_cbc_encrypt .hidden aes_hw_cbc_encrypt .type aes_hw_cbc_encrypt,@function .align 16 aes_hw_cbc_encrypt: .cfi_startproc _CET_ENDBR testq %rdx,%rdx jz .Lcbc_ret movl 240(%rcx),%r10d movq %rcx,%r11 testl %r9d,%r9d jz .Lcbc_decrypt movups (%r8),%xmm2 movl %r10d,%eax cmpq $16,%rdx jb .Lcbc_enc_tail subq $16,%rdx jmp .Lcbc_enc_loop .align 16 .Lcbc_enc_loop: movups (%rdi),%xmm3 leaq 16(%rdi),%rdi movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 xorps %xmm0,%xmm3 leaq 32(%rcx),%rcx xorps %xmm3,%xmm2 .Loop_enc1_6: .byte 102,15,56,220,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_enc1_6 .byte 102,15,56,221,209 movl %r10d,%eax movq %r11,%rcx movups %xmm2,0(%rsi) leaq 16(%rsi),%rsi subq $16,%rdx jnc .Lcbc_enc_loop addq $16,%rdx jnz .Lcbc_enc_tail pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%r8) pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 jmp .Lcbc_ret .Lcbc_enc_tail: movq %rdx,%rcx xchgq %rdi,%rsi .long 0x9066A4F3 movl $16,%ecx subq %rdx,%rcx xorl %eax,%eax .long 0x9066AAF3 leaq -16(%rdi),%rdi movl %r10d,%eax movq %rdi,%rsi movq %r11,%rcx xorq %rdx,%rdx jmp .Lcbc_enc_loop .align 16 .Lcbc_decrypt: cmpq $16,%rdx jne .Lcbc_decrypt_bulk movdqu (%rdi),%xmm2 movdqu (%r8),%xmm3 movdqa %xmm2,%xmm4 movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_dec1_7: .byte 102,15,56,222,209 decl %r10d movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_dec1_7 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqu %xmm4,(%r8) xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp .Lcbc_ret .align 16 .Lcbc_decrypt_bulk: leaq (%rsp),%r11 .cfi_def_cfa_register %r11 pushq %rbp .cfi_offset %rbp,-16 subq $16,%rsp andq $-16,%rsp movq %rcx,%rbp movups (%r8),%xmm10 movl %r10d,%eax cmpq $0x50,%rdx jbe .Lcbc_dec_tail movups (%rcx),%xmm0 movdqu 0(%rdi),%xmm2 movdqu 16(%rdi),%xmm3 movdqa %xmm2,%xmm11 movdqu 32(%rdi),%xmm4 movdqa %xmm3,%xmm12 movdqu 48(%rdi),%xmm5 movdqa %xmm4,%xmm13 movdqu 64(%rdi),%xmm6 movdqa %xmm5,%xmm14 movdqu 80(%rdi),%xmm7 movdqa %xmm6,%xmm15 cmpq $0x70,%rdx jbe .Lcbc_dec_six_or_seven subq $0x70,%rdx leaq 112(%rcx),%rcx jmp .Lcbc_dec_loop8_enter .align 16 .Lcbc_dec_loop8: movups %xmm9,(%rsi) leaq 16(%rsi),%rsi .Lcbc_dec_loop8_enter: movdqu 96(%rdi),%xmm8 pxor %xmm0,%xmm2 movdqu 112(%rdi),%xmm9 pxor %xmm0,%xmm3 movups 16-112(%rcx),%xmm1 pxor %xmm0,%xmm4 movq $-1,%rbp cmpq $0x70,%rdx pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,222,209 pxor %xmm0,%xmm9 movups 32-112(%rcx),%xmm0 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 adcq $0,%rbp andq $128,%rbp .byte 102,68,15,56,222,201 addq %rdi,%rbp movups 48-112(%rcx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 64-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 80-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 96-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 112-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 128-112(%rcx),%xmm0 nop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 144-112(%rcx),%xmm1 cmpl $11,%eax .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 160-112(%rcx),%xmm0 jb .Lcbc_dec_done .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 176-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 192-112(%rcx),%xmm0 je .Lcbc_dec_done .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movups 208-112(%rcx),%xmm1 nop .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 .byte 102,68,15,56,222,192 .byte 102,68,15,56,222,200 movups 224-112(%rcx),%xmm0 jmp .Lcbc_dec_done .align 16 .Lcbc_dec_done: .byte 102,15,56,222,209 .byte 102,15,56,222,217 pxor %xmm0,%xmm10 pxor %xmm0,%xmm11 .byte 102,15,56,222,225 .byte 102,15,56,222,233 pxor %xmm0,%xmm12 pxor %xmm0,%xmm13 .byte 102,15,56,222,241 .byte 102,15,56,222,249 pxor %xmm0,%xmm14 pxor %xmm0,%xmm15 .byte 102,68,15,56,222,193 .byte 102,68,15,56,222,201 movdqu 80(%rdi),%xmm1 .byte 102,65,15,56,223,210 movdqu 96(%rdi),%xmm10 pxor %xmm0,%xmm1 .byte 102,65,15,56,223,219 pxor %xmm0,%xmm10 movdqu 112(%rdi),%xmm0 .byte 102,65,15,56,223,228 leaq 128(%rdi),%rdi movdqu 0(%rbp),%xmm11 .byte 102,65,15,56,223,237 .byte 102,65,15,56,223,246 movdqu 16(%rbp),%xmm12 movdqu 32(%rbp),%xmm13 .byte 102,65,15,56,223,255 .byte 102,68,15,56,223,193 movdqu 48(%rbp),%xmm14 movdqu 64(%rbp),%xmm15 .byte 102,69,15,56,223,202 movdqa %xmm0,%xmm10 movdqu 80(%rbp),%xmm1 movups -112(%rcx),%xmm0 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm1,%xmm7 movups %xmm8,96(%rsi) leaq 112(%rsi),%rsi subq $0x80,%rdx ja .Lcbc_dec_loop8 movaps %xmm9,%xmm2 leaq -112(%rcx),%rcx addq $0x70,%rdx jle .Lcbc_dec_clear_tail_collected movups %xmm9,(%rsi) leaq 16(%rsi),%rsi cmpq $0x50,%rdx jbe .Lcbc_dec_tail movaps %xmm11,%xmm2 .Lcbc_dec_six_or_seven: cmpq $0x60,%rdx ja .Lcbc_dec_seven movaps %xmm7,%xmm8 call _aesni_decrypt6 pxor %xmm10,%xmm2 movaps %xmm8,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 pxor %xmm15,%xmm7 movdqu %xmm6,64(%rsi) pxor %xmm6,%xmm6 leaq 80(%rsi),%rsi movdqa %xmm7,%xmm2 pxor %xmm7,%xmm7 jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_seven: movups 96(%rdi),%xmm8 xorps %xmm9,%xmm9 call _aesni_decrypt8 movups 80(%rdi),%xmm9 pxor %xmm10,%xmm2 movups 96(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 pxor %xmm15,%xmm7 movdqu %xmm6,64(%rsi) pxor %xmm6,%xmm6 pxor %xmm9,%xmm8 movdqu %xmm7,80(%rsi) pxor %xmm7,%xmm7 leaq 96(%rsi),%rsi movdqa %xmm8,%xmm2 pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 jmp .Lcbc_dec_tail_collected .Lcbc_dec_tail: movups (%rdi),%xmm2 subq $0x10,%rdx jbe .Lcbc_dec_one movups 16(%rdi),%xmm3 movaps %xmm2,%xmm11 subq $0x10,%rdx jbe .Lcbc_dec_two movups 32(%rdi),%xmm4 movaps %xmm3,%xmm12 subq $0x10,%rdx jbe .Lcbc_dec_three movups 48(%rdi),%xmm5 movaps %xmm4,%xmm13 subq $0x10,%rdx jbe .Lcbc_dec_four movups 64(%rdi),%xmm6 movaps %xmm5,%xmm14 movaps %xmm6,%xmm15 xorps %xmm7,%xmm7 call _aesni_decrypt6 pxor %xmm10,%xmm2 movaps %xmm15,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 pxor %xmm14,%xmm6 movdqu %xmm5,48(%rsi) pxor %xmm5,%xmm5 leaq 64(%rsi),%rsi movdqa %xmm6,%xmm2 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 subq $0x10,%rdx jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_one: movaps %xmm2,%xmm11 movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 .Loop_dec1_8: .byte 102,15,56,222,209 decl %eax movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz .Loop_dec1_8 .byte 102,15,56,223,209 xorps %xmm10,%xmm2 movaps %xmm11,%xmm10 jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_two: movaps %xmm3,%xmm12 call _aesni_decrypt2 pxor %xmm10,%xmm2 movaps %xmm12,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) movdqa %xmm3,%xmm2 pxor %xmm3,%xmm3 leaq 16(%rsi),%rsi jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_three: movaps %xmm4,%xmm13 call _aesni_decrypt3 pxor %xmm10,%xmm2 movaps %xmm13,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 movdqa %xmm4,%xmm2 pxor %xmm4,%xmm4 leaq 32(%rsi),%rsi jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_four: movaps %xmm5,%xmm14 call _aesni_decrypt4 pxor %xmm10,%xmm2 movaps %xmm14,%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm3,%xmm3 pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm4,%xmm4 movdqa %xmm5,%xmm2 pxor %xmm5,%xmm5 leaq 48(%rsi),%rsi jmp .Lcbc_dec_tail_collected .align 16 .Lcbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 pxor %xmm8,%xmm8 pxor %xmm9,%xmm9 .Lcbc_dec_tail_collected: movups %xmm10,(%r8) andq $15,%rdx jnz .Lcbc_dec_tail_partial movups %xmm2,(%rsi) pxor %xmm2,%xmm2 jmp .Lcbc_dec_ret .align 16 .Lcbc_dec_tail_partial: movaps %xmm2,(%rsp) pxor %xmm2,%xmm2 movq $16,%rcx movq %rsi,%rdi subq %rdx,%rcx leaq (%rsp),%rsi .long 0x9066A4F3 movdqa %xmm2,(%rsp) .Lcbc_dec_ret: xorps %xmm0,%xmm0 pxor %xmm1,%xmm1 movq -8(%r11),%rbp .cfi_restore %rbp leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lcbc_ret: ret .cfi_endproc .size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt .globl aes_hw_encrypt_key_to_decrypt_key .hidden aes_hw_encrypt_key_to_decrypt_key .type aes_hw_encrypt_key_to_decrypt_key,@function .align 16 aes_hw_encrypt_key_to_decrypt_key: .cfi_startproc _CET_ENDBR movl 240(%rdi),%esi shll $4,%esi leaq 16(%rdi,%rsi,1),%rdx movups (%rdi),%xmm0 movups (%rdx),%xmm1 movups %xmm0,(%rdx) movups %xmm1,(%rdi) leaq 16(%rdi),%rdi leaq -16(%rdx),%rdx .Ldec_key_inverse: movups (%rdi),%xmm0 movups (%rdx),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leaq 16(%rdi),%rdi leaq -16(%rdx),%rdx movups %xmm0,16(%rdx) movups %xmm1,-16(%rdi) cmpq %rdi,%rdx ja .Ldec_key_inverse movups (%rdi),%xmm0 .byte 102,15,56,219,192 pxor %xmm1,%xmm1 movups %xmm0,(%rdx) pxor %xmm0,%xmm0 ret .cfi_endproc .size aes_hw_encrypt_key_to_decrypt_key,.-aes_hw_encrypt_key_to_decrypt_key .globl aes_hw_set_encrypt_key_base .hidden aes_hw_set_encrypt_key_base .type aes_hw_set_encrypt_key_base,@function .align 16 aes_hw_set_encrypt_key_base: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp .cfi_adjust_cfa_offset 8 movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je .L14rounds cmpl $192,%esi je .L12rounds cmpl $128,%esi jne .Lbad_keybits .L10rounds: movl $9,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,200,1 call .Lkey_expansion_128_cold .byte 102,15,58,223,200,2 call .Lkey_expansion_128 .byte 102,15,58,223,200,4 call .Lkey_expansion_128 .byte 102,15,58,223,200,8 call .Lkey_expansion_128 .byte 102,15,58,223,200,16 call .Lkey_expansion_128 .byte 102,15,58,223,200,32 call .Lkey_expansion_128 .byte 102,15,58,223,200,64 call .Lkey_expansion_128 .byte 102,15,58,223,200,128 call .Lkey_expansion_128 .byte 102,15,58,223,200,27 call .Lkey_expansion_128 .byte 102,15,58,223,200,54 call .Lkey_expansion_128 movups %xmm0,(%rax) movl %esi,80(%rax) xorl %eax,%eax jmp .Lenc_key_ret .align 16 .L12rounds: movq 16(%rdi),%xmm2 movl $11,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,202,1 call .Lkey_expansion_192a_cold .byte 102,15,58,223,202,2 call .Lkey_expansion_192b .byte 102,15,58,223,202,4 call .Lkey_expansion_192a .byte 102,15,58,223,202,8 call .Lkey_expansion_192b .byte 102,15,58,223,202,16 call .Lkey_expansion_192a .byte 102,15,58,223,202,32 call .Lkey_expansion_192b .byte 102,15,58,223,202,64 call .Lkey_expansion_192a .byte 102,15,58,223,202,128 call .Lkey_expansion_192b movups %xmm0,(%rax) movl %esi,48(%rax) xorq %rax,%rax jmp .Lenc_key_ret .align 16 .L14rounds: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movups %xmm0,(%rdx) movups %xmm2,16(%rdx) .byte 102,15,58,223,202,1 call .Lkey_expansion_256a_cold .byte 102,15,58,223,200,1 call .Lkey_expansion_256b .byte 102,15,58,223,202,2 call .Lkey_expansion_256a .byte 102,15,58,223,200,2 call .Lkey_expansion_256b .byte 102,15,58,223,202,4 call .Lkey_expansion_256a .byte 102,15,58,223,200,4 call .Lkey_expansion_256b .byte 102,15,58,223,202,8 call .Lkey_expansion_256a .byte 102,15,58,223,200,8 call .Lkey_expansion_256b .byte 102,15,58,223,202,16 call .Lkey_expansion_256a .byte 102,15,58,223,200,16 call .Lkey_expansion_256b .byte 102,15,58,223,202,32 call .Lkey_expansion_256a .byte 102,15,58,223,200,32 call .Lkey_expansion_256b .byte 102,15,58,223,202,64 call .Lkey_expansion_256a movups %xmm0,(%rax) movl %esi,16(%rax) xorq %rax,%rax jmp .Lenc_key_ret .align 16 .Lbad_keybits: movq $-2,%rax .Lenc_key_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .align 16 .Lkey_expansion_128: .cfi_startproc movups %xmm0,(%rax) leaq 16(%rax),%rax .Lkey_expansion_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .cfi_endproc .align 16 .Lkey_expansion_192a: .cfi_startproc movups %xmm0,(%rax) leaq 16(%rax),%rax .Lkey_expansion_192a_cold: movaps %xmm2,%xmm5 .Lkey_expansion_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .cfi_endproc .align 16 .Lkey_expansion_192b: .cfi_startproc movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%rax) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%rax) leaq 32(%rax),%rax jmp .Lkey_expansion_192b_warm .cfi_endproc .align 16 .Lkey_expansion_256a: .cfi_startproc movups %xmm2,(%rax) leaq 16(%rax),%rax .Lkey_expansion_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .cfi_endproc .align 16 .Lkey_expansion_256b: .cfi_startproc movups %xmm0,(%rax) leaq 16(%rax),%rax shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .cfi_endproc .size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base .globl aes_hw_set_encrypt_key_alt .hidden aes_hw_set_encrypt_key_alt .type aes_hw_set_encrypt_key_alt,@function .align 16 aes_hw_set_encrypt_key_alt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp .cfi_adjust_cfa_offset 8 movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je .L14rounds_alt cmpl $192,%esi je .L12rounds_alt cmpl $128,%esi jne .Lbad_keybits_alt movl $9,%esi movdqa .Lkey_rotate(%rip),%xmm5 movl $8,%r10d movdqa .Lkey_rcon1(%rip),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,(%rdx) jmp .Loop_key128 .align 16 .Loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leaq 16(%rax),%rax movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%rax) movdqa %xmm0,%xmm2 decl %r10d jnz .Loop_key128 movdqa .Lkey_rcon1b(%rip),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%rax) movl %esi,96(%rax) xorl %eax,%eax jmp .Lenc_key_ret_alt .align 16 .L12rounds_alt: movq 16(%rdi),%xmm2 movl $11,%esi movdqa .Lkey_rotate192(%rip),%xmm5 movdqa .Lkey_rcon1(%rip),%xmm4 movl $8,%r10d movdqu %xmm0,(%rdx) jmp .Loop_key192 .align 16 .Loop_key192: movq %xmm2,0(%rax) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leaq 24(%rax),%rax movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $0xff,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%rax) decl %r10d jnz .Loop_key192 movl %esi,32(%rax) xorl %eax,%eax jmp .Lenc_key_ret_alt .align 16 .L14rounds_alt: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movdqa .Lkey_rotate(%rip),%xmm5 movdqa .Lkey_rcon1(%rip),%xmm4 movl $7,%r10d movdqu %xmm0,0(%rdx) movdqa %xmm2,%xmm1 movdqu %xmm2,16(%rdx) jmp .Loop_key256 .align 16 .Loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) decl %r10d jz .Ldone_key256 pshufd $0xff,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%rax) leaq 32(%rax),%rax movdqa %xmm2,%xmm1 jmp .Loop_key256 .Ldone_key256: movl %esi,16(%rax) xorl %eax,%eax jmp .Lenc_key_ret_alt .align 16 .Lbad_keybits_alt: movq $-2,%rax .Lenc_key_ret_alt: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lincrement32: .long 6,6,6,0 .Lincrement64: .long 1,0,0,0 .Lxts_magic: .long 0x87,0,1,0 .Lincrement1: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 .Lkey_rotate: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d .Lkey_rotate192: .long 0x04070605,0x04070605,0x04070605,0x04070605 .Lkey_rcon1: .long 1,1,1,1 .Lkey_rcon1b: .long 0x1b,0x1b,0x1b,0x1b .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-armv7-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include #if __ARM_MAX_ARCH__>=7 .text .arch armv7-a @ don't confuse not-so-latest binutils with argv8 :-) .fpu neon .code 32 #undef __thumb2__ .align 5 .Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .hidden aes_hw_set_encrypt_key .type aes_hw_set_encrypt_key,%function .align 5 aes_hw_set_encrypt_key: .Lenc_key: mov r3,#-2 cmp r1,#128 blt .Lenc_key_abort cmp r1,#256 bgt .Lenc_key_abort tst r1,#0x3f bne .Lenc_key_abort adr r3,.Lrcon cmp r1,#192 veor q0,q0,q0 vld1.8 {q3},[r0]! mov r1,#8 @ reuse r1 vld1.32 {q1,q2},[r3]! blt .Loop128 beq .L192 b .L256 .align 4 .Loop128: vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 bne .Loop128 vld1.32 {q1},[r3] vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 vtbl.8 d20,{q3},d4 vtbl.8 d21,{q3},d5 vext.8 q9,q0,q3,#12 vst1.32 {q3},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 veor q3,q3,q10 vst1.32 {q3},[r2] add r2,r2,#0x50 mov r12,#10 b .Ldone .align 4 .L192: vld1.8 {d16},[r0]! vmov.i8 q10,#8 @ borrow q10 vst1.32 {q3},[r2]! vsub.i8 q2,q2,q10 @ adjust the mask .Loop192: vtbl.8 d20,{q8},d4 vtbl.8 d21,{q8},d5 vext.8 q9,q0,q3,#12 vst1.32 {d16},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vdup.32 q9,d7[1] veor q9,q9,q8 veor q10,q10,q1 vext.8 q8,q0,q8,#12 vshl.u8 q1,q1,#1 veor q8,q8,q9 veor q3,q3,q10 veor q8,q8,q10 vst1.32 {q3},[r2]! bne .Loop192 mov r12,#12 add r2,r2,#0x20 b .Ldone .align 4 .L256: vld1.8 {q8},[r0] mov r1,#7 mov r12,#14 vst1.32 {q3},[r2]! .Loop256: vtbl.8 d20,{q8},d4 vtbl.8 d21,{q8},d5 vext.8 q9,q0,q3,#12 vst1.32 {q8},[r2]! .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 subs r1,r1,#1 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q3,q3,q9 vext.8 q9,q0,q9,#12 veor q10,q10,q1 veor q3,q3,q9 vshl.u8 q1,q1,#1 veor q3,q3,q10 vst1.32 {q3},[r2]! beq .Ldone vdup.32 q10,d7[1] vext.8 q9,q0,q8,#12 .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0 veor q8,q8,q9 vext.8 q9,q0,q9,#12 veor q8,q8,q9 vext.8 q9,q0,q9,#12 veor q8,q8,q9 veor q8,q8,q10 b .Loop256 .Ldone: str r12,[r2] mov r3,#0 .Lenc_key_abort: mov r0,r3 @ return value bx lr .size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key .globl aes_hw_set_decrypt_key .hidden aes_hw_set_decrypt_key .type aes_hw_set_decrypt_key,%function .align 5 aes_hw_set_decrypt_key: stmdb sp!,{r4,lr} bl .Lenc_key cmp r0,#0 bne .Ldec_key_abort sub r2,r2,#240 @ restore original r2 mov r4,#-16 add r0,r2,r12,lsl#4 @ end of key schedule vld1.32 {q0},[r2] vld1.32 {q1},[r0] vst1.32 {q0},[r0],r4 vst1.32 {q1},[r2]! .Loop_imc: vld1.32 {q0},[r2] vld1.32 {q1},[r0] .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 vst1.32 {q0},[r0],r4 vst1.32 {q1},[r2]! cmp r0,r2 bhi .Loop_imc vld1.32 {q0},[r2] .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 vst1.32 {q0},[r0] eor r0,r0,r0 @ return value .Ldec_key_abort: ldmia sp!,{r4,pc} .size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key .globl aes_hw_encrypt .hidden aes_hw_encrypt .type aes_hw_encrypt,%function .align 5 aes_hw_encrypt: AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] sub r3,r3,#2 vld1.32 {q1},[r2]! .Loop_enc: .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q0},[r2]! subs r3,r3,#2 .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q1},[r2]! bgt .Loop_enc .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 vld1.32 {q0},[r2] .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 veor q2,q2,q0 vst1.8 {q2},[r1] bx lr .size aes_hw_encrypt,.-aes_hw_encrypt .globl aes_hw_decrypt .hidden aes_hw_decrypt .type aes_hw_decrypt,%function .align 5 aes_hw_decrypt: AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] sub r3,r3,#2 vld1.32 {q1},[r2]! .Loop_dec: .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q0},[r2]! subs r3,r3,#2 .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q1},[r2]! bgt .Loop_dec .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0 .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2 vld1.32 {q0},[r2] .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1 veor q2,q2,q0 vst1.8 {q2},[r1] bx lr .size aes_hw_decrypt,.-aes_hw_decrypt .globl aes_hw_cbc_encrypt .hidden aes_hw_cbc_encrypt .type aes_hw_cbc_encrypt,%function .align 5 aes_hw_cbc_encrypt: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,lr} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldmia ip,{r4,r5} @ load remaining args subs r2,r2,#16 mov r8,#16 blo .Lcbc_abort moveq r8,#0 cmp r5,#0 @ en- or decrypting? ldr r5,[r3,#240] and r2,r2,#-16 vld1.8 {q6},[r4] vld1.8 {q0},[r0],r8 vld1.32 {q8,q9},[r3] @ load key schedule... sub r5,r5,#6 add r7,r3,r5,lsl#4 @ pointer to last 7 round keys sub r5,r5,#2 vld1.32 {q10,q11},[r7]! vld1.32 {q12,q13},[r7]! vld1.32 {q14,q15},[r7]! vld1.32 {q7},[r7] add r7,r3,#32 mov r6,r5 beq .Lcbc_dec cmp r5,#2 veor q0,q0,q6 veor q5,q8,q7 beq .Lcbc_enc128 vld1.32 {q2,q3},[r7] add r7,r3,#16 add r6,r3,#16*4 add r12,r3,#16*5 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 add r14,r3,#16*6 add r3,r3,#16*7 b .Lenter_cbc_enc .align 4 .Loop_cbc_enc: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vst1.8 {q6},[r1]! .Lenter_cbc_enc: .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q8},[r6] cmp r5,#4 .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r12] beq .Lcbc_enc192 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q8},[r14] .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r3] nop .Lcbc_enc192: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 subs r2,r2,#16 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 moveq r8,#0 .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.8 {q8},[r0],r8 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 veor q8,q8,q5 .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.32 {q9},[r7] @ re-pre-load rndkey[1] .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 veor q6,q0,q7 bhs .Loop_cbc_enc vst1.8 {q6},[r1]! b .Lcbc_done .align 5 .Lcbc_enc128: vld1.32 {q2,q3},[r7] .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 b .Lenter_cbc_enc128 .Loop_cbc_enc128: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vst1.8 {q6},[r1]! .Lenter_cbc_enc128: .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 subs r2,r2,#16 .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 moveq r8,#0 .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 vld1.8 {q8},[r0],r8 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 veor q8,q8,q5 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 veor q6,q0,q7 bhs .Loop_cbc_enc128 vst1.8 {q6},[r1]! b .Lcbc_done .align 5 .Lcbc_dec: vld1.8 {q10},[r0]! subs r2,r2,#32 @ bias add r6,r5,#2 vorr q3,q0,q0 vorr q1,q0,q0 vorr q11,q10,q10 blo .Lcbc_dec_tail vorr q1,q10,q10 vld1.8 {q10},[r0]! vorr q2,q0,q0 vorr q3,q1,q1 vorr q11,q10,q10 .Loop3x_cbc_dec: .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q9},[r7]! bgt .Loop3x_cbc_dec .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q4,q6,q7 subs r2,r2,#0x30 veor q5,q2,q7 movlo r6,r2 @ r6, r6, is zero at this point .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q9,q3,q7 add r0,r0,r6 @ r0 is adjusted in such way that @ at exit from the loop q1-q10 @ are loaded with last "words" vorr q6,q11,q11 mov r7,r3 .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q2},[r0]! .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q3},[r0]! .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14 .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0 .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.8 {q11},[r0]! .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15 .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] add r6,r5,#2 veor q4,q4,q0 veor q5,q5,q1 veor q10,q10,q9 vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] vst1.8 {q4},[r1]! vorr q0,q2,q2 vst1.8 {q5},[r1]! vorr q1,q3,q3 vst1.8 {q10},[r1]! vorr q10,q11,q11 bhs .Loop3x_cbc_dec cmn r2,#0x30 beq .Lcbc_done nop .Lcbc_dec_tail: .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 vld1.32 {q9},[r7]! bgt .Lcbc_dec_tail .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 cmn r2,#0x20 .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q5,q6,q7 .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14 .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1 .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14 .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10 veor q9,q3,q7 .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15 .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15 beq .Lcbc_dec_one veor q5,q5,q1 veor q9,q9,q10 vorr q6,q11,q11 vst1.8 {q5},[r1]! vst1.8 {q9},[r1]! b .Lcbc_done .Lcbc_dec_one: veor q5,q5,q10 vorr q6,q11,q11 vst1.8 {q5},[r1]! .Lcbc_done: vst1.8 {q6},[r4] .Lcbc_abort: vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,pc} .size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,%function .align 5 aes_hw_ctr32_encrypt_blocks: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldr r4, [ip] @ load remaining arg ldr r5,[r3,#240] ldr r8, [r4, #12] vld1.32 {q0},[r4] vld1.32 {q8,q9},[r3] @ load key schedule... sub r5,r5,#4 mov r12,#16 cmp r2,#2 add r7,r3,r5,lsl#4 @ pointer to last 5 round keys sub r5,r5,#2 vld1.32 {q12,q13},[r7]! vld1.32 {q14,q15},[r7]! vld1.32 {q7},[r7] add r7,r3,#32 mov r6,r5 movlo r12,#0 @ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are @ affected by silicon errata #1742098 [0] and #1655431 [1], @ respectively, where the second instruction of an aese/aesmc @ instruction pair may execute twice if an interrupt is taken right @ after the first instruction consumes an input register of which a @ single 32-bit lane has been updated the last time it was modified. @ @ This function uses a counter in one 32-bit lane. The @ could write to q1 and q10 directly, but that trips this bugs. @ We write to q6 and copy to the final register as a workaround. @ @ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice @ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev r8, r8 #endif add r10, r8, #1 vorr q6,q0,q0 rev r10, r10 vmov.32 d13[1],r10 add r8, r8, #2 vorr q1,q6,q6 bls .Lctr32_tail rev r12, r8 vmov.32 d13[1],r12 sub r2,r2,#3 @ bias vorr q10,q6,q6 b .Loop3x_ctr32 .align 4 .Loop3x_ctr32: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.32 {q9},[r7]! bgt .Loop3x_ctr32 .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 vld1.8 {q2},[r0]! add r9,r8,#1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.8 {q3},[r0]! rev r9,r9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 vld1.8 {q11},[r0]! mov r7,r3 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 veor q2,q2,q7 add r10,r8,#2 .byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 veor q3,q3,q7 add r8,r8,#3 .byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 @ Note the logic to update q0, q1, and q1 is written to work @ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in @ 32-bit mode. See the comment above. veor q11,q11,q7 vmov.32 d13[1], r9 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 vorr q0,q6,q6 rev r10,r10 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 vmov.32 d13[1], r10 rev r12,r8 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 vorr q1,q6,q6 vmov.32 d13[1], r12 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 vorr q10,q6,q6 subs r2,r2,#3 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 .byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15 veor q2,q2,q4 vld1.32 {q8},[r7]! @ re-pre-load rndkey[0] vst1.8 {q2},[r1]! veor q3,q3,q5 mov r6,r5 vst1.8 {q3},[r1]! veor q11,q11,q9 vld1.32 {q9},[r7]! @ re-pre-load rndkey[1] vst1.8 {q11},[r1]! bhs .Loop3x_ctr32 adds r2,r2,#3 beq .Lctr32_done cmp r2,#1 mov r12,#16 moveq r12,#0 .Lctr32_tail: .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.32 {q8},[r7]! subs r6,r6,#2 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.32 {q9},[r7]! bgt .Lctr32_tail .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.8 {q2},[r0],r12 .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 vld1.8 {q3},[r0] .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 veor q2,q2,q7 .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14 .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0 .byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14 .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1 veor q3,q3,q7 .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15 .byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15 cmp r2,#1 veor q2,q2,q0 veor q3,q3,q1 vst1.8 {q2},[r1]! beq .Lctr32_done vst1.8 {q3},[r1] .Lctr32_done: vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc} .size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include #if __ARM_MAX_ARCH__>=7 .text .section __TEXT,__const .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl _aes_hw_set_encrypt_key .private_extern _aes_hw_set_encrypt_key .align 5 _aes_hw_set_encrypt_key: Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt Lenc_key_abort cmp w1,#256 b.gt Lenc_key_abort tst w1,#0x3f b.ne Lenc_key_abort adrp x3,Lrcon@PAGE add x3,x3,Lrcon@PAGEOFF cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt Loop128 b.eq L192 b L256 .align 4 Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b Ldone .align 4 L192: ld1 {v4.8b},[x0],#8 movi v6.16b,#8 // borrow v6.16b st1 {v3.4s},[x2],#16 sub v2.16b,v2.16b,v6.16b // adjust the mask Loop192: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.8b},[x2],#8 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b dup v5.4s,v3.s[3] eor v5.16b,v5.16b,v4.16b eor v6.16b,v6.16b,v1.16b ext v4.16b,v0.16b,v4.16b,#12 shl v1.16b,v1.16b,#1 eor v4.16b,v4.16b,v5.16b eor v3.16b,v3.16b,v6.16b eor v4.16b,v4.16b,v6.16b st1 {v3.4s},[x2],#16 b.ne Loop192 mov w12,#12 add x2,x2,#0x20 b Ldone .align 4 L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b Loop256 Ldone: str w12,[x2] mov x3,#0 Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .globl _aes_hw_set_decrypt_key .private_extern _aes_hw_set_decrypt_key .align 5 _aes_hw_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl Lenc_key cmp x0,#0 b.ne Ldec_key_abort sub x2,x2,#240 // restore original x2 mov x4,#-16 add x0,x2,x12,lsl#4 // end of key schedule ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 Loop_imc: ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] aesimc v0.16b,v0.16b aesimc v1.16b,v1.16b st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 cmp x0,x2 b.hi Loop_imc ld1 {v0.4s},[x2] aesimc v0.16b,v0.16b st1 {v0.4s},[x0] eor x0,x0,x0 // return value Ldec_key_abort: ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl _aes_hw_encrypt .private_extern _aes_hw_encrypt .align 5 _aes_hw_encrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_enc: aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aese v2.16b,v1.16b aesmc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_enc aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2] aese v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl _aes_hw_decrypt .private_extern _aes_hw_decrypt .align 5 _aes_hw_decrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_dec: aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aesd v2.16b,v1.16b aesimc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_dec aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2] aesd v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl _aes_hw_cbc_encrypt .private_extern _aes_hw_cbc_encrypt .align 5 _aes_hw_cbc_encrypt: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 mov x8,#16 b.lo Lcbc_abort csel x8,xzr,x8,eq cmp w5,#0 // en- or decrypting? ldr w5,[x3,#240] and x2,x2,#-16 ld1 {v6.16b},[x4] ld1 {v0.16b},[x0],x8 ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#6 add x7,x3,x5,lsl#4 // pointer to last 7 round keys sub w5,w5,#2 ld1 {v18.4s,v19.4s},[x7],#32 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 b.eq Lcbc_dec cmp w5,#2 eor v0.16b,v0.16b,v6.16b eor v5.16b,v16.16b,v7.16b b.eq Lcbc_enc128 ld1 {v2.4s,v3.4s},[x7] add x7,x3,#16 add x6,x3,#16*4 add x12,x3,#16*5 aese v0.16b,v16.16b aesmc v0.16b,v0.16b add x14,x3,#16*6 add x3,x3,#16*7 b Lenter_cbc_enc .align 4 Loop_cbc_enc: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc: aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v0.16b,v2.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x6] cmp w5,#4 aese v0.16b,v3.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x12] b.eq Lcbc_enc192 aese v0.16b,v16.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x14] aese v0.16b,v17.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x3] nop Lcbc_enc192: aese v0.16b,v16.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v17.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x7] // re-pre-load rndkey[1] aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_enc128: ld1 {v2.4s,v3.4s},[x7] aese v0.16b,v16.16b aesmc v0.16b,v0.16b b Lenter_cbc_enc128 Loop_cbc_enc128: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc128: aese v0.16b,v17.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v2.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v3.16b aesmc v0.16b,v0.16b aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc128 st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_dec: ld1 {v18.16b},[x0],#16 subs x2,x2,#32 // bias add w6,w5,#2 orr v3.16b,v0.16b,v0.16b orr v1.16b,v0.16b,v0.16b orr v19.16b,v18.16b,v18.16b b.lo Lcbc_dec_tail orr v1.16b,v18.16b,v18.16b ld1 {v18.16b},[x0],#16 orr v2.16b,v0.16b,v0.16b orr v3.16b,v1.16b,v1.16b orr v19.16b,v18.16b,v18.16b Loop3x_cbc_dec: aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_cbc_dec aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b eor v4.16b,v6.16b,v7.16b subs x2,x2,#0x30 eor v5.16b,v2.16b,v7.16b csel x6,x2,x6,lo // x6, w6, is zero at this point aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b add x0,x0,x6 // x0 is adjusted in such way that // at exit from the loop v1.16b-v18.16b // are loaded with last "words" orr v6.16b,v19.16b,v19.16b mov x7,x3 aesd v0.16b,v20.16b aesimc v0.16b,v0.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b ld1 {v2.16b},[x0],#16 aesd v0.16b,v21.16b aesimc v0.16b,v0.16b aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 aesd v0.16b,v22.16b aesimc v0.16b,v0.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b ld1 {v19.16b},[x0],#16 aesd v0.16b,v23.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] add w6,w5,#2 eor v4.16b,v4.16b,v0.16b eor v5.16b,v5.16b,v1.16b eor v18.16b,v18.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v4.16b},[x1],#16 orr v0.16b,v2.16b,v2.16b st1 {v5.16b},[x1],#16 orr v1.16b,v3.16b,v3.16b st1 {v18.16b},[x1],#16 orr v18.16b,v19.16b,v19.16b b.hs Loop3x_cbc_dec cmn x2,#0x30 b.eq Lcbc_done nop Lcbc_dec_tail: aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Lcbc_dec_tail aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b cmn x2,#0x20 aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b eor v5.16b,v6.16b,v7.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b b.eq Lcbc_dec_one eor v5.16b,v5.16b,v1.16b eor v17.16b,v17.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 st1 {v17.16b},[x1],#16 b Lcbc_done Lcbc_dec_one: eor v5.16b,v5.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 Lcbc_done: st1 {v6.16b},[x4] Lcbc_abort: ldr x29,[sp],#16 ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .align 5 _aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs Loop3x_ctr32 adds x2,x2,#3 b.eq Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq Lctr32_done st1 {v3.16b},[x1] Lctr32_done: ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .section .rodata .align 5 .Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .hidden aes_hw_set_encrypt_key .type aes_hw_set_encrypt_key,%function .align 5 aes_hw_set_encrypt_key: .Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt .Lenc_key_abort cmp w1,#256 b.gt .Lenc_key_abort tst w1,#0x3f b.ne .Lenc_key_abort adrp x3,.Lrcon add x3,x3,:lo12:.Lrcon cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt .Loop128 b.eq .L192 b .L256 .align 4 .Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne .Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b .Ldone .align 4 .L192: ld1 {v4.8b},[x0],#8 movi v6.16b,#8 // borrow v6.16b st1 {v3.4s},[x2],#16 sub v2.16b,v2.16b,v6.16b // adjust the mask .Loop192: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.8b},[x2],#8 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b dup v5.4s,v3.s[3] eor v5.16b,v5.16b,v4.16b eor v6.16b,v6.16b,v1.16b ext v4.16b,v0.16b,v4.16b,#12 shl v1.16b,v1.16b,#1 eor v4.16b,v4.16b,v5.16b eor v3.16b,v3.16b,v6.16b eor v4.16b,v4.16b,v6.16b st1 {v3.4s},[x2],#16 b.ne .Loop192 mov w12,#12 add x2,x2,#0x20 b .Ldone .align 4 .L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 .Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq .Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b .Loop256 .Ldone: str w12,[x2] mov x3,#0 .Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key .globl aes_hw_set_decrypt_key .hidden aes_hw_set_decrypt_key .type aes_hw_set_decrypt_key,%function .align 5 aes_hw_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl .Lenc_key cmp x0,#0 b.ne .Ldec_key_abort sub x2,x2,#240 // restore original x2 mov x4,#-16 add x0,x2,x12,lsl#4 // end of key schedule ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 .Loop_imc: ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] aesimc v0.16b,v0.16b aesimc v1.16b,v1.16b st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 cmp x0,x2 b.hi .Loop_imc ld1 {v0.4s},[x2] aesimc v0.16b,v0.16b st1 {v0.4s},[x0] eor x0,x0,x0 // return value .Ldec_key_abort: ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key .globl aes_hw_encrypt .hidden aes_hw_encrypt .type aes_hw_encrypt,%function .align 5 aes_hw_encrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 .Loop_enc: aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aese v2.16b,v1.16b aesmc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt .Loop_enc aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2] aese v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .size aes_hw_encrypt,.-aes_hw_encrypt .globl aes_hw_decrypt .hidden aes_hw_decrypt .type aes_hw_decrypt,%function .align 5 aes_hw_decrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 .Loop_dec: aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aesd v2.16b,v1.16b aesimc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt .Loop_dec aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2] aesd v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .size aes_hw_decrypt,.-aes_hw_decrypt .globl aes_hw_cbc_encrypt .hidden aes_hw_cbc_encrypt .type aes_hw_cbc_encrypt,%function .align 5 aes_hw_cbc_encrypt: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 mov x8,#16 b.lo .Lcbc_abort csel x8,xzr,x8,eq cmp w5,#0 // en- or decrypting? ldr w5,[x3,#240] and x2,x2,#-16 ld1 {v6.16b},[x4] ld1 {v0.16b},[x0],x8 ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#6 add x7,x3,x5,lsl#4 // pointer to last 7 round keys sub w5,w5,#2 ld1 {v18.4s,v19.4s},[x7],#32 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 b.eq .Lcbc_dec cmp w5,#2 eor v0.16b,v0.16b,v6.16b eor v5.16b,v16.16b,v7.16b b.eq .Lcbc_enc128 ld1 {v2.4s,v3.4s},[x7] add x7,x3,#16 add x6,x3,#16*4 add x12,x3,#16*5 aese v0.16b,v16.16b aesmc v0.16b,v0.16b add x14,x3,#16*6 add x3,x3,#16*7 b .Lenter_cbc_enc .align 4 .Loop_cbc_enc: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 .Lenter_cbc_enc: aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v0.16b,v2.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x6] cmp w5,#4 aese v0.16b,v3.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x12] b.eq .Lcbc_enc192 aese v0.16b,v16.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x14] aese v0.16b,v17.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x3] nop .Lcbc_enc192: aese v0.16b,v16.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v17.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x7] // re-pre-load rndkey[1] aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs .Loop_cbc_enc st1 {v6.16b},[x1],#16 b .Lcbc_done .align 5 .Lcbc_enc128: ld1 {v2.4s,v3.4s},[x7] aese v0.16b,v16.16b aesmc v0.16b,v0.16b b .Lenter_cbc_enc128 .Loop_cbc_enc128: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 .Lenter_cbc_enc128: aese v0.16b,v17.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v2.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v3.16b aesmc v0.16b,v0.16b aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs .Loop_cbc_enc128 st1 {v6.16b},[x1],#16 b .Lcbc_done .align 5 .Lcbc_dec: ld1 {v18.16b},[x0],#16 subs x2,x2,#32 // bias add w6,w5,#2 orr v3.16b,v0.16b,v0.16b orr v1.16b,v0.16b,v0.16b orr v19.16b,v18.16b,v18.16b b.lo .Lcbc_dec_tail orr v1.16b,v18.16b,v18.16b ld1 {v18.16b},[x0],#16 orr v2.16b,v0.16b,v0.16b orr v3.16b,v1.16b,v1.16b orr v19.16b,v18.16b,v18.16b .Loop3x_cbc_dec: aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt .Loop3x_cbc_dec aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b eor v4.16b,v6.16b,v7.16b subs x2,x2,#0x30 eor v5.16b,v2.16b,v7.16b csel x6,x2,x6,lo // x6, w6, is zero at this point aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b add x0,x0,x6 // x0 is adjusted in such way that // at exit from the loop v1.16b-v18.16b // are loaded with last "words" orr v6.16b,v19.16b,v19.16b mov x7,x3 aesd v0.16b,v20.16b aesimc v0.16b,v0.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b ld1 {v2.16b},[x0],#16 aesd v0.16b,v21.16b aesimc v0.16b,v0.16b aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 aesd v0.16b,v22.16b aesimc v0.16b,v0.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b ld1 {v19.16b},[x0],#16 aesd v0.16b,v23.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] add w6,w5,#2 eor v4.16b,v4.16b,v0.16b eor v5.16b,v5.16b,v1.16b eor v18.16b,v18.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v4.16b},[x1],#16 orr v0.16b,v2.16b,v2.16b st1 {v5.16b},[x1],#16 orr v1.16b,v3.16b,v3.16b st1 {v18.16b},[x1],#16 orr v18.16b,v19.16b,v19.16b b.hs .Loop3x_cbc_dec cmn x2,#0x30 b.eq .Lcbc_done nop .Lcbc_dec_tail: aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt .Lcbc_dec_tail aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b cmn x2,#0x20 aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b eor v5.16b,v6.16b,v7.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b b.eq .Lcbc_dec_one eor v5.16b,v5.16b,v1.16b eor v17.16b,v17.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 st1 {v17.16b},[x1],#16 b .Lcbc_done .Lcbc_dec_one: eor v5.16b,v5.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 .Lcbc_done: st1 {v6.16b},[x4] .Lcbc_abort: ldr x29,[sp],#16 ret .size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,%function .align 5 aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls .Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b .Loop3x_ctr32 .align 4 .Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt .Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs .Loop3x_ctr32 adds x2,x2,#3 b.eq .Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq .Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt .Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq .Lctr32_done st1 {v3.16b},[x1] .Lctr32_done: ldr x29,[sp],#16 ret .size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .section .rodata .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .def aes_hw_set_encrypt_key .type 32 .endef .align 5 aes_hw_set_encrypt_key: Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt Lenc_key_abort cmp w1,#256 b.gt Lenc_key_abort tst w1,#0x3f b.ne Lenc_key_abort adrp x3,Lrcon add x3,x3,:lo12:Lrcon cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt Loop128 b.eq L192 b L256 .align 4 Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b Ldone .align 4 L192: ld1 {v4.8b},[x0],#8 movi v6.16b,#8 // borrow v6.16b st1 {v3.4s},[x2],#16 sub v2.16b,v2.16b,v6.16b // adjust the mask Loop192: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.8b},[x2],#8 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b dup v5.4s,v3.s[3] eor v5.16b,v5.16b,v4.16b eor v6.16b,v6.16b,v1.16b ext v4.16b,v0.16b,v4.16b,#12 shl v1.16b,v1.16b,#1 eor v4.16b,v4.16b,v5.16b eor v3.16b,v3.16b,v6.16b eor v4.16b,v4.16b,v6.16b st1 {v3.4s},[x2],#16 b.ne Loop192 mov w12,#12 add x2,x2,#0x20 b Ldone .align 4 L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b Loop256 Ldone: str w12,[x2] mov x3,#0 Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .globl aes_hw_set_decrypt_key .def aes_hw_set_decrypt_key .type 32 .endef .align 5 aes_hw_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl Lenc_key cmp x0,#0 b.ne Ldec_key_abort sub x2,x2,#240 // restore original x2 mov x4,#-16 add x0,x2,x12,lsl#4 // end of key schedule ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 Loop_imc: ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] aesimc v0.16b,v0.16b aesimc v1.16b,v1.16b st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 cmp x0,x2 b.hi Loop_imc ld1 {v0.4s},[x2] aesimc v0.16b,v0.16b st1 {v0.4s},[x0] eor x0,x0,x0 // return value Ldec_key_abort: ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_hw_encrypt .def aes_hw_encrypt .type 32 .endef .align 5 aes_hw_encrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_enc: aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aese v2.16b,v1.16b aesmc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_enc aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2] aese v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl aes_hw_decrypt .def aes_hw_decrypt .type 32 .endef .align 5 aes_hw_decrypt: AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_dec: aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aesd v2.16b,v1.16b aesimc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_dec aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2] aesd v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl aes_hw_cbc_encrypt .def aes_hw_cbc_encrypt .type 32 .endef .align 5 aes_hw_cbc_encrypt: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 mov x8,#16 b.lo Lcbc_abort csel x8,xzr,x8,eq cmp w5,#0 // en- or decrypting? ldr w5,[x3,#240] and x2,x2,#-16 ld1 {v6.16b},[x4] ld1 {v0.16b},[x0],x8 ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#6 add x7,x3,x5,lsl#4 // pointer to last 7 round keys sub w5,w5,#2 ld1 {v18.4s,v19.4s},[x7],#32 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 b.eq Lcbc_dec cmp w5,#2 eor v0.16b,v0.16b,v6.16b eor v5.16b,v16.16b,v7.16b b.eq Lcbc_enc128 ld1 {v2.4s,v3.4s},[x7] add x7,x3,#16 add x6,x3,#16*4 add x12,x3,#16*5 aese v0.16b,v16.16b aesmc v0.16b,v0.16b add x14,x3,#16*6 add x3,x3,#16*7 b Lenter_cbc_enc .align 4 Loop_cbc_enc: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc: aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v0.16b,v2.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x6] cmp w5,#4 aese v0.16b,v3.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x12] b.eq Lcbc_enc192 aese v0.16b,v16.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x14] aese v0.16b,v17.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x3] nop Lcbc_enc192: aese v0.16b,v16.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v17.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x7] // re-pre-load rndkey[1] aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_enc128: ld1 {v2.4s,v3.4s},[x7] aese v0.16b,v16.16b aesmc v0.16b,v0.16b b Lenter_cbc_enc128 Loop_cbc_enc128: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc128: aese v0.16b,v17.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v2.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v3.16b aesmc v0.16b,v0.16b aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc128 st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_dec: ld1 {v18.16b},[x0],#16 subs x2,x2,#32 // bias add w6,w5,#2 orr v3.16b,v0.16b,v0.16b orr v1.16b,v0.16b,v0.16b orr v19.16b,v18.16b,v18.16b b.lo Lcbc_dec_tail orr v1.16b,v18.16b,v18.16b ld1 {v18.16b},[x0],#16 orr v2.16b,v0.16b,v0.16b orr v3.16b,v1.16b,v1.16b orr v19.16b,v18.16b,v18.16b Loop3x_cbc_dec: aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_cbc_dec aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b eor v4.16b,v6.16b,v7.16b subs x2,x2,#0x30 eor v5.16b,v2.16b,v7.16b csel x6,x2,x6,lo // x6, w6, is zero at this point aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b add x0,x0,x6 // x0 is adjusted in such way that // at exit from the loop v1.16b-v18.16b // are loaded with last "words" orr v6.16b,v19.16b,v19.16b mov x7,x3 aesd v0.16b,v20.16b aesimc v0.16b,v0.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b ld1 {v2.16b},[x0],#16 aesd v0.16b,v21.16b aesimc v0.16b,v0.16b aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 aesd v0.16b,v22.16b aesimc v0.16b,v0.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b ld1 {v19.16b},[x0],#16 aesd v0.16b,v23.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] add w6,w5,#2 eor v4.16b,v4.16b,v0.16b eor v5.16b,v5.16b,v1.16b eor v18.16b,v18.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v4.16b},[x1],#16 orr v0.16b,v2.16b,v2.16b st1 {v5.16b},[x1],#16 orr v1.16b,v3.16b,v3.16b st1 {v18.16b},[x1],#16 orr v18.16b,v19.16b,v19.16b b.hs Loop3x_cbc_dec cmn x2,#0x30 b.eq Lcbc_done nop Lcbc_dec_tail: aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Lcbc_dec_tail aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b cmn x2,#0x20 aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b eor v5.16b,v6.16b,v7.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b b.eq Lcbc_dec_one eor v5.16b,v5.16b,v1.16b eor v17.16b,v17.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 st1 {v17.16b},[x1],#16 b Lcbc_done Lcbc_dec_one: eor v5.16b,v5.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 Lcbc_done: st1 {v6.16b},[x4] Lcbc_abort: ldr x29,[sp],#16 ret .globl aes_hw_ctr32_encrypt_blocks .def aes_hw_ctr32_encrypt_blocks .type 32 .endef .align 5 aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs Loop3x_ctr32 adds x2,x2,#3 b.eq Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq Lctr32_done st1 {v3.16b},[x1] Lctr32_done: ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-gcm-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include #if __ARM_MAX_ARCH__ >= 8 .text .globl _aes_gcm_enc_kernel .private_extern _aes_gcm_enc_kernel .align 4 _aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_more_than_1 sub w12, w12, #1 b Lenc_blocks_less_than_1 Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl _aes_gcm_dec_kernel .private_extern _aes_gcm_dec_kernel .align 4 _aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_more_than_1 sub w12, w12, #1 b Ldec_blocks_less_than_1 Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-gcm-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .hidden aes_gcm_enc_kernel .type aes_gcm_enc_kernel,%function .align 4 aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt .Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq .Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 .Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge .Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge .Lenc_prepretail // do prepretail .Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt .Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq .Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // .LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt .Lenc_main_loop .Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt .Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq .Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 .Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b .Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt .Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt .Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt .Lenc_blocks_more_than_1 sub w12, w12, #1 b .Lenc_blocks_less_than_1 .Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result .Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid .Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low .Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .size aes_gcm_enc_kernel,.-aes_gcm_enc_kernel .globl aes_gcm_dec_kernel .hidden aes_gcm_dec_kernel .type aes_gcm_dec_kernel,%function .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt .Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq .Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 .Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge .Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge .Ldec_prepretail // do prepretail .Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt .Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq .Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // .LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt .Ldec_main_loop .Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt .Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq .Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low .Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt .Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt .Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt .Ldec_blocks_more_than_1 sub w12, w12, #1 b .Ldec_blocks_less_than_1 .Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high .Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high .Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high .Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .size aes_gcm_dec_kernel,.-aes_gcm_dec_kernel #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/aesv8-gcm-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .def aes_gcm_enc_kernel .type 32 .endef .align 4 aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_more_than_1 sub w12, w12, #1 b Lenc_blocks_less_than_1 Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_gcm_dec_kernel .def aes_gcm_dec_kernel .type 32 .endef .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_more_than_1 sub w12, w12, #1 b Ldec_blocks_less_than_1 Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/armv4-mont-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .globl bn_mul_mont_nohw .hidden bn_mul_mont_nohw .type bn_mul_mont_nohw,%function .align 5 bn_mul_mont_nohw: ldr ip,[sp,#4] @ load num stmdb sp!,{r0,r2} @ sp points at argument block cmp ip,#2 mov r0,ip @ load num #ifdef __thumb2__ ittt lt #endif movlt r0,#0 addlt sp,sp,#2*4 blt .Labrt stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers mov r0,r0,lsl#2 @ rescale r0 for byte count sub sp,sp,r0 @ alloca(4*num) sub sp,sp,#4 @ +extra dword sub r0,r0,#4 @ "num=num-1" add r4,r2,r0 @ &bp[num-1] add r0,sp,r0 @ r0 to point at &tp[num-1] ldr r8,[r0,#14*4] @ &n0 ldr r2,[r2] @ bp[0] ldr r5,[r1],#4 @ ap[0],ap++ ldr r6,[r3],#4 @ np[0],np++ ldr r8,[r8] @ *n0 str r4,[r0,#15*4] @ save &bp[num] umull r10,r11,r5,r2 @ ap[0]*bp[0] str r8,[r0,#14*4] @ save n0 value mul r8,r10,r8 @ "tp[0]"*n0 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" mov r4,sp .L1st: ldr r5,[r1],#4 @ ap[j],ap++ mov r10,r11 ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[0] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne .L1st adds r12,r12,r11 ldr r4,[r0,#13*4] @ restore bp mov r14,#0 ldr r8,[r0,#14*4] @ restore n0 adc r14,r14,#0 str r12,[r0] @ tp[num-1]= mov r7,sp str r14,[r0,#4] @ tp[num]= .Louter: sub r7,r0,r7 @ "original" r0-1 value sub r1,r1,r7 @ "rewind" ap to &ap[1] ldr r2,[r4,#4]! @ *(++bp) sub r3,r3,r7 @ "rewind" np to &np[1] ldr r5,[r1,#-4] @ ap[0] ldr r10,[sp] @ tp[0] ldr r6,[r3,#-4] @ np[0] ldr r7,[sp,#4] @ tp[1] mov r11,#0 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] str r4,[r0,#13*4] @ save bp mul r8,r10,r8 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" mov r4,sp .Linner: ldr r5,[r1],#4 @ ap[j],ap++ adds r10,r11,r7 @ +=tp[j] ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[i] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adc r11,r11,#0 ldr r7,[r4,#8] @ tp[j+1] adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne .Linner adds r12,r12,r11 mov r14,#0 ldr r4,[r0,#13*4] @ restore bp adc r14,r14,#0 ldr r8,[r0,#14*4] @ restore n0 adds r12,r12,r7 ldr r7,[r0,#15*4] @ restore &bp[num] adc r14,r14,#0 str r12,[r0] @ tp[num-1]= str r14,[r0,#4] @ tp[num]= cmp r4,r7 #ifdef __thumb2__ itt ne #endif movne r7,sp bne .Louter ldr r2,[r0,#12*4] @ pull rp mov r5,sp add r0,r0,#4 @ r0 to point at &tp[num] sub r5,r0,r5 @ "original" num value mov r4,sp @ "rewind" r4 mov r1,r4 @ "borrow" r1 sub r3,r3,r5 @ "rewind" r3 to &np[0] subs r7,r7,r7 @ "clear" carry flag .Lsub: ldr r7,[r4],#4 ldr r6,[r3],#4 sbcs r7,r7,r6 @ tp[j]-np[j] str r7,[r2],#4 @ rp[j]= teq r4,r0 @ preserve carry bne .Lsub sbcs r14,r14,#0 @ upmost carry mov r4,sp @ "rewind" r4 sub r2,r2,r5 @ "rewind" r2 .Lcopy: ldr r7,[r4] @ conditional copy ldr r5,[r2] str sp,[r4],#4 @ zap tp #ifdef __thumb2__ it cc #endif movcc r5,r7 str r5,[r2],#4 teq r4,r0 @ preserve carry bne .Lcopy mov sp,r0 add sp,sp,#4 @ skip over tp[num+1] ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers add sp,sp,#2*4 @ skip over {r0,r2} mov r0,#1 .Labrt: #if __ARM_ARCH>=5 bx lr @ bx lr #else tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size bn_mul_mont_nohw,.-bn_mul_mont_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl bn_mul8x_mont_neon .hidden bn_mul8x_mont_neon .type bn_mul8x_mont_neon,%function .align 5 bn_mul8x_mont_neon: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldmia ip,{r4,r5} @ load rest of parameter block mov ip,sp cmp r5,#8 bhi .LNEON_8n @ special case for r5==8, everything is in register bank... vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 sub r7,sp,r5,lsl#4 vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-( and r7,r7,#-64 vld1.32 {d30[0]}, [r4,:32] mov sp,r7 @ alloca vzip.16 d28,d8 vmull.u32 q6,d28,d0[0] vmull.u32 q7,d28,d0[1] vmull.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmull.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 vmul.u32 d29,d29,d30 vmull.u32 q10,d28,d2[0] vld1.32 {d4,d5,d6,d7}, [r3]! vmull.u32 q11,d28,d2[1] vmull.u32 q12,d28,d3[0] vzip.16 d29,d8 vmull.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] sub r9,r5,#1 vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 b .LNEON_outer8 .align 4 .LNEON_outer8: vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 vzip.16 d28,d8 vadd.u64 d12,d12,d10 vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 subs r9,r9,#1 vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 bne .LNEON_outer8 vadd.u64 d12,d12,d10 mov r7,sp vshr.u64 d10,d12,#16 mov r8,r5 vadd.u64 d13,d13,d10 add r6,sp,#96 vshr.u64 d10,d13,#16 vzip.16 d12,d13 b .LNEON_tail_entry .align 4 .LNEON_8n: veor q6,q6,q6 sub r7,sp,#128 veor q7,q7,q7 sub r7,r7,r5,lsl#4 veor q8,q8,q8 and r7,r7,#-64 veor q9,q9,q9 mov sp,r7 @ alloca veor q10,q10,q10 add r7,r7,#256 veor q11,q11,q11 sub r8,r5,#8 veor q12,q12,q12 veor q13,q13,q13 .LNEON_8n_init: vst1.64 {q6,q7},[r7,:256]! subs r8,r8,#8 vst1.64 {q8,q9},[r7,:256]! vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12,q13},[r7,:256]! bne .LNEON_8n_init add r6,sp,#256 vld1.32 {d0,d1,d2,d3},[r1]! add r10,sp,#8 vld1.32 {d30[0]},[r4,:32] mov r9,r5 b .LNEON_8n_outer .align 4 .LNEON_8n_outer: vld1.32 {d28[0]},[r2,:32]! @ *b++ veor d8,d8,d8 vzip.16 d28,d8 add r7,sp,#128 vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] veor d8,d8,d8 vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 vmlal.u32 q10,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q11,d28,d2[1] vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q6,d29,d4[0] veor d10,d10,d10 vmlal.u32 q7,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q8,d29,d5[0] vshr.u64 d12,d12,#16 vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vadd.u64 d12,d12,d13 vmlal.u32 q11,d29,d6[1] vshr.u64 d12,d12,#16 vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vadd.u64 d14,d14,d12 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0] vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128]! vmlal.u32 q8,d28,d0[1] veor d8,d8,d8 vmlal.u32 q9,d28,d1[0] vshl.i64 d29,d15,#16 vmlal.u32 q10,d28,d1[1] vadd.u64 d29,d29,d14 vmlal.u32 q11,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q12,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1] vmlal.u32 q13,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q6,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q7,d29,d4[0] veor d10,d10,d10 vmlal.u32 q8,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q9,d29,d5[0] vshr.u64 d14,d14,#16 vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vadd.u64 d14,d14,d15 vmlal.u32 q12,d29,d6[1] vshr.u64 d14,d14,#16 vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vadd.u64 d16,d16,d14 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1] vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128]! vmlal.u32 q9,d28,d0[1] veor d8,d8,d8 vmlal.u32 q10,d28,d1[0] vshl.i64 d29,d17,#16 vmlal.u32 q11,d28,d1[1] vadd.u64 d29,d29,d16 vmlal.u32 q12,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q13,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2] vmlal.u32 q6,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q7,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q8,d29,d4[0] veor d10,d10,d10 vmlal.u32 q9,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q10,d29,d5[0] vshr.u64 d16,d16,#16 vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vadd.u64 d16,d16,d17 vmlal.u32 q13,d29,d6[1] vshr.u64 d16,d16,#16 vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vadd.u64 d18,d18,d16 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2] vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128]! vmlal.u32 q10,d28,d0[1] veor d8,d8,d8 vmlal.u32 q11,d28,d1[0] vshl.i64 d29,d19,#16 vmlal.u32 q12,d28,d1[1] vadd.u64 d29,d29,d18 vmlal.u32 q13,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q6,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3] vmlal.u32 q7,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q8,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q9,d29,d4[0] veor d10,d10,d10 vmlal.u32 q10,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q11,d29,d5[0] vshr.u64 d18,d18,#16 vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vadd.u64 d18,d18,d19 vmlal.u32 q6,d29,d6[1] vshr.u64 d18,d18,#16 vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vadd.u64 d20,d20,d18 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3] vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128]! vmlal.u32 q11,d28,d0[1] veor d8,d8,d8 vmlal.u32 q12,d28,d1[0] vshl.i64 d29,d21,#16 vmlal.u32 q13,d28,d1[1] vadd.u64 d29,d29,d20 vmlal.u32 q6,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q7,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4] vmlal.u32 q8,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q9,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q10,d29,d4[0] veor d10,d10,d10 vmlal.u32 q11,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q12,d29,d5[0] vshr.u64 d20,d20,#16 vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vadd.u64 d20,d20,d21 vmlal.u32 q7,d29,d6[1] vshr.u64 d20,d20,#16 vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vadd.u64 d22,d22,d20 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4] vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128]! vmlal.u32 q12,d28,d0[1] veor d8,d8,d8 vmlal.u32 q13,d28,d1[0] vshl.i64 d29,d23,#16 vmlal.u32 q6,d28,d1[1] vadd.u64 d29,d29,d22 vmlal.u32 q7,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q8,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5] vmlal.u32 q9,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q10,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q11,d29,d4[0] veor d10,d10,d10 vmlal.u32 q12,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q13,d29,d5[0] vshr.u64 d22,d22,#16 vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vadd.u64 d22,d22,d23 vmlal.u32 q8,d29,d6[1] vshr.u64 d22,d22,#16 vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vadd.u64 d24,d24,d22 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5] vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128]! vmlal.u32 q13,d28,d0[1] veor d8,d8,d8 vmlal.u32 q6,d28,d1[0] vshl.i64 d29,d25,#16 vmlal.u32 q7,d28,d1[1] vadd.u64 d29,d29,d24 vmlal.u32 q8,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q9,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6] vmlal.u32 q10,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q11,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q12,d29,d4[0] veor d10,d10,d10 vmlal.u32 q13,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q6,d29,d5[0] vshr.u64 d24,d24,#16 vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vadd.u64 d24,d24,d25 vmlal.u32 q9,d29,d6[1] vshr.u64 d24,d24,#16 vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vadd.u64 d26,d26,d24 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6] vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128]! vmlal.u32 q6,d28,d0[1] veor d8,d8,d8 vmlal.u32 q7,d28,d1[0] vshl.i64 d29,d27,#16 vmlal.u32 q8,d28,d1[1] vadd.u64 d29,d29,d26 vmlal.u32 q9,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7] vmlal.u32 q11,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q12,d28,d3[1] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q13,d29,d4[0] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q6,d29,d4[1] vmlal.u32 q7,d29,d5[0] vshr.u64 d26,d26,#16 vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vadd.u64 d26,d26,d27 vmlal.u32 q10,d29,d6[1] vshr.u64 d26,d26,#16 vmlal.u32 q11,d29,d7[0] vmlal.u32 q12,d29,d7[1] vadd.u64 d12,d12,d26 vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7] add r10,sp,#8 @ rewind sub r8,r5,#8 b .LNEON_8n_inner .align 4 .LNEON_8n_inner: subs r8,r8,#8 vmlal.u32 q6,d28,d0[0] vld1.64 {q13},[r6,:128] vmlal.u32 q7,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0] vmlal.u32 q8,d28,d1[0] vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q9,d28,d1[1] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vmlal.u32 q13,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmlal.u32 q11,d29,d6[1] vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vst1.64 {q6},[r7,:128]! vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128] vmlal.u32 q8,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1] vmlal.u32 q9,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d1[1] vmlal.u32 q11,d28,d2[0] vmlal.u32 q12,d28,d2[1] vmlal.u32 q13,d28,d3[0] vmlal.u32 q6,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2] vmlal.u32 q7,d29,d4[0] vmlal.u32 q8,d29,d4[1] vmlal.u32 q9,d29,d5[0] vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vmlal.u32 q12,d29,d6[1] vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vst1.64 {q7},[r7,:128]! vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128] vmlal.u32 q9,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2] vmlal.u32 q10,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q11,d28,d1[1] vmlal.u32 q12,d28,d2[0] vmlal.u32 q13,d28,d2[1] vmlal.u32 q6,d28,d3[0] vmlal.u32 q7,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3] vmlal.u32 q8,d29,d4[0] vmlal.u32 q9,d29,d4[1] vmlal.u32 q10,d29,d5[0] vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vmlal.u32 q13,d29,d6[1] vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vst1.64 {q8},[r7,:128]! vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128] vmlal.u32 q10,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3] vmlal.u32 q11,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q12,d28,d1[1] vmlal.u32 q13,d28,d2[0] vmlal.u32 q6,d28,d2[1] vmlal.u32 q7,d28,d3[0] vmlal.u32 q8,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4] vmlal.u32 q9,d29,d4[0] vmlal.u32 q10,d29,d4[1] vmlal.u32 q11,d29,d5[0] vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vmlal.u32 q6,d29,d6[1] vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vst1.64 {q9},[r7,:128]! vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128] vmlal.u32 q11,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4] vmlal.u32 q12,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q13,d28,d1[1] vmlal.u32 q6,d28,d2[0] vmlal.u32 q7,d28,d2[1] vmlal.u32 q8,d28,d3[0] vmlal.u32 q9,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5] vmlal.u32 q10,d29,d4[0] vmlal.u32 q11,d29,d4[1] vmlal.u32 q12,d29,d5[0] vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vmlal.u32 q7,d29,d6[1] vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vst1.64 {q10},[r7,:128]! vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128] vmlal.u32 q12,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5] vmlal.u32 q13,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q6,d28,d1[1] vmlal.u32 q7,d28,d2[0] vmlal.u32 q8,d28,d2[1] vmlal.u32 q9,d28,d3[0] vmlal.u32 q10,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6] vmlal.u32 q11,d29,d4[0] vmlal.u32 q12,d29,d4[1] vmlal.u32 q13,d29,d5[0] vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vmlal.u32 q8,d29,d6[1] vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vst1.64 {q11},[r7,:128]! vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128] vmlal.u32 q13,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6] vmlal.u32 q6,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q7,d28,d1[1] vmlal.u32 q8,d28,d2[0] vmlal.u32 q9,d28,d2[1] vmlal.u32 q10,d28,d3[0] vmlal.u32 q11,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7] vmlal.u32 q12,d29,d4[0] vmlal.u32 q13,d29,d4[1] vmlal.u32 q6,d29,d5[0] vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vmlal.u32 q9,d29,d6[1] vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vst1.64 {q12},[r7,:128]! vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128] vmlal.u32 q6,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7] vmlal.u32 q7,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q8,d28,d1[1] vmlal.u32 q9,d28,d2[0] vmlal.u32 q10,d28,d2[1] vmlal.u32 q11,d28,d3[0] vmlal.u32 q12,d28,d3[1] it eq subeq r1,r1,r5,lsl#2 @ rewind vmlal.u32 q13,d29,d4[0] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q6,d29,d4[1] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q7,d29,d5[0] add r10,sp,#8 @ rewind vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vmlal.u32 q10,d29,d6[1] vmlal.u32 q11,d29,d7[0] vst1.64 {q13},[r7,:128]! vmlal.u32 q12,d29,d7[1] bne .LNEON_8n_inner add r6,sp,#128 vst1.64 {q6,q7},[r7,:256]! veor q2,q2,q2 @ d4-d5 vst1.64 {q8,q9},[r7,:256]! veor q3,q3,q3 @ d6-d7 vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12},[r7,:128] subs r9,r9,#8 vld1.64 {q6,q7},[r6,:256]! vld1.64 {q8,q9},[r6,:256]! vld1.64 {q10,q11},[r6,:256]! vld1.64 {q12,q13},[r6,:256]! itt ne subne r3,r3,r5,lsl#2 @ rewind bne .LNEON_8n_outer add r7,sp,#128 vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame vshr.u64 d10,d12,#16 vst1.64 {q2,q3},[sp,:256]! vadd.u64 d13,d13,d10 vst1.64 {q2,q3}, [sp,:256]! vshr.u64 d10,d13,#16 vst1.64 {q2,q3}, [sp,:256]! vzip.16 d12,d13 mov r8,r5 b .LNEON_tail_entry .align 4 .LNEON_tail: vadd.u64 d12,d12,d10 vshr.u64 d10,d12,#16 vld1.64 {q8,q9}, [r6, :256]! vadd.u64 d13,d13,d10 vld1.64 {q10,q11}, [r6, :256]! vshr.u64 d10,d13,#16 vld1.64 {q12,q13}, [r6, :256]! vzip.16 d12,d13 .LNEON_tail_entry: vadd.u64 d14,d14,d10 vst1.32 {d12[0]}, [r7, :32]! vshr.u64 d10,d14,#16 vadd.u64 d15,d15,d10 vshr.u64 d10,d15,#16 vzip.16 d14,d15 vadd.u64 d16,d16,d10 vst1.32 {d14[0]}, [r7, :32]! vshr.u64 d10,d16,#16 vadd.u64 d17,d17,d10 vshr.u64 d10,d17,#16 vzip.16 d16,d17 vadd.u64 d18,d18,d10 vst1.32 {d16[0]}, [r7, :32]! vshr.u64 d10,d18,#16 vadd.u64 d19,d19,d10 vshr.u64 d10,d19,#16 vzip.16 d18,d19 vadd.u64 d20,d20,d10 vst1.32 {d18[0]}, [r7, :32]! vshr.u64 d10,d20,#16 vadd.u64 d21,d21,d10 vshr.u64 d10,d21,#16 vzip.16 d20,d21 vadd.u64 d22,d22,d10 vst1.32 {d20[0]}, [r7, :32]! vshr.u64 d10,d22,#16 vadd.u64 d23,d23,d10 vshr.u64 d10,d23,#16 vzip.16 d22,d23 vadd.u64 d24,d24,d10 vst1.32 {d22[0]}, [r7, :32]! vshr.u64 d10,d24,#16 vadd.u64 d25,d25,d10 vshr.u64 d10,d25,#16 vzip.16 d24,d25 vadd.u64 d26,d26,d10 vst1.32 {d24[0]}, [r7, :32]! vshr.u64 d10,d26,#16 vadd.u64 d27,d27,d10 vshr.u64 d10,d27,#16 vzip.16 d26,d27 vld1.64 {q6,q7}, [r6, :256]! subs r8,r8,#8 vst1.32 {d26[0]}, [r7, :32]! bne .LNEON_tail vst1.32 {d10[0]}, [r7, :32] @ top-most bit sub r3,r3,r5,lsl#2 @ rewind r3 subs r1,sp,#0 @ clear carry flag add r2,sp,r5,lsl#2 .LNEON_sub: ldmia r1!, {r4,r5,r6,r7} ldmia r3!, {r8,r9,r10,r11} sbcs r8, r4,r8 sbcs r9, r5,r9 sbcs r10,r6,r10 sbcs r11,r7,r11 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne .LNEON_sub ldr r10, [r1] @ load top-most bit mov r11,sp veor q0,q0,q0 sub r11,r2,r11 @ this is num*4 veor q1,q1,q1 mov r1,sp sub r0,r0,r11 @ rewind r0 mov r3,r2 @ second 3/4th of frame sbcs r10,r10,#0 @ result is carry flag .LNEON_copy_n_zap: ldmia r1!, {r4,r5,r6,r7} ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r3,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 ldmia r1, {r4,r5,r6,r7} stmia r0!, {r8,r9,r10,r11} sub r1,r1,#16 ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r1,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne .LNEON_copy_n_zap mov sp,ip vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11} bx lr @ bx lr .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon #endif .byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/armv8-mont-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .text .globl _bn_mul_mont .private_extern _bn_mul_mont .align 5 _bn_mul_mont: AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 b.eq __bn_mul4x_mont Lmul_mont: stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,L1st_skip L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,L1st L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,Linner_skip Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,Linner Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .align 5 __bn_sqr8x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont Lsqr8x_mont: stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b Lsqr8x_zero_start Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_mul .align 4 Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b Lsqr8x_outer_loop .align 4 Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_tail .align 4 Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b Lsqr8x_done .align 4 Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .align 5 __bn_mul4x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to // only from bn_mul_mont or __bn_mul8x_mont which have already signed the // return address. stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_reduction cbz x10,Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_1st_tail .align 5 Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_tail .align 4 Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b Loop_mul4x_reduction .align 4 Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b Lmul4x_done .align 4 Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/armv8-mont-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .text .globl bn_mul_mont .hidden bn_mul_mont .type bn_mul_mont,%function .align 5 bn_mul_mont: AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 b.eq __bn_mul4x_mont .Lmul_mont: stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,.L1st_skip .L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,.L1st .L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] .Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,.Linner_skip .Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,.Linner .Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,.Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 .Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,.Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop .Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,.Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .size bn_mul_mont,.-bn_mul_mont .type __bn_sqr8x_mont,%function .align 5 __bn_sqr8x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont .Lsqr8x_mont: stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b .Lsqr8x_zero_start .Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] .Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,.Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 .Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,.Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ .Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,.Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq .Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b .Lsqr8x_mul .align 4 .Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,.Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b .Lsqr8x_outer_loop .align 4 .Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 .Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,.Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 .Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,.Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,.Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 .Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,.Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,.Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b .Lsqr8x_tail .align 4 .Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne .Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy .Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,.Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 .Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,.Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b .Lsqr8x_done .align 4 .Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] .Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .size __bn_sqr8x_mont,.-__bn_sqr8x_mont .type __bn_mul4x_mont,%function .align 5 __bn_mul4x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to // only from bn_mul_mont or __bn_mul8x_mont which have already signed the // return address. stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp .Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_1st_reduction cbz x10,.Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,.Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b .Loop_mul4x_1st_tail .align 5 .Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 .Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 .Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,.Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b .Loop_mul4x_tail .align 4 .Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq .Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b .Loop_mul4x_reduction .align 4 .Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 .Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,.Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 .Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,.Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b .Lmul4x_done .align 4 .Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] .Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .size __bn_mul4x_mont,.-__bn_mul4x_mont .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/armv8-mont-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .text .globl bn_mul_mont .def bn_mul_mont .type 32 .endef .align 5 bn_mul_mont: AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 b.eq __bn_mul4x_mont Lmul_mont: stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,L1st_skip L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,L1st L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,Linner_skip Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,Linner Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .def __bn_sqr8x_mont .type 32 .endef .align 5 __bn_sqr8x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont Lsqr8x_mont: stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b Lsqr8x_zero_start Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_mul .align 4 Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b Lsqr8x_outer_loop .align 4 Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_tail .align 4 Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b Lsqr8x_done .align 4 Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .def __bn_mul4x_mont .type 32 .endef .align 5 __bn_mul4x_mont: // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to // only from bn_mul_mont or __bn_mul8x_mont which have already signed the // return address. stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_reduction cbz x10,Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_1st_tail .align 5 Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_tail .align 4 Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b Loop_mul4x_reduction .align 4 Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b Lmul4x_done .align 4 Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bn-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_add_words .private_extern _bn_mul_add_words .align 4 _bn_mul_add_words: L_bn_mul_add_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 jmp L000maw_sse2_entry .align 4,0x90 L001maw_sse2_unrolled: movd (%eax),%mm3 paddq %mm3,%mm1 movd (%edx),%mm2 pmuludq %mm0,%mm2 movd 4(%edx),%mm4 pmuludq %mm0,%mm4 movd 8(%edx),%mm6 pmuludq %mm0,%mm6 movd 12(%edx),%mm7 pmuludq %mm0,%mm7 paddq %mm2,%mm1 movd 4(%eax),%mm3 paddq %mm4,%mm3 movd 8(%eax),%mm5 paddq %mm6,%mm5 movd 12(%eax),%mm4 paddq %mm4,%mm7 movd %mm1,(%eax) movd 16(%edx),%mm2 pmuludq %mm0,%mm2 psrlq $32,%mm1 movd 20(%edx),%mm4 pmuludq %mm0,%mm4 paddq %mm3,%mm1 movd 24(%edx),%mm6 pmuludq %mm0,%mm6 movd %mm1,4(%eax) psrlq $32,%mm1 movd 28(%edx),%mm3 addl $32,%edx pmuludq %mm0,%mm3 paddq %mm5,%mm1 movd 16(%eax),%mm5 paddq %mm5,%mm2 movd %mm1,8(%eax) psrlq $32,%mm1 paddq %mm7,%mm1 movd 20(%eax),%mm5 paddq %mm5,%mm4 movd %mm1,12(%eax) psrlq $32,%mm1 paddq %mm2,%mm1 movd 24(%eax),%mm5 paddq %mm5,%mm6 movd %mm1,16(%eax) psrlq $32,%mm1 paddq %mm4,%mm1 movd 28(%eax),%mm5 paddq %mm5,%mm3 movd %mm1,20(%eax) psrlq $32,%mm1 paddq %mm6,%mm1 movd %mm1,24(%eax) psrlq $32,%mm1 paddq %mm3,%mm1 movd %mm1,28(%eax) leal 32(%eax),%eax psrlq $32,%mm1 subl $8,%ecx jz L002maw_sse2_exit L000maw_sse2_entry: testl $4294967288,%ecx jnz L001maw_sse2_unrolled .align 2,0x90 L003maw_sse2_loop: movd (%edx),%mm2 movd (%eax),%mm3 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm3,%mm1 paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz L003maw_sse2_loop L002maw_sse2_exit: movd %mm1,%eax emms ret popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_mul_words .private_extern _bn_mul_words .align 4 _bn_mul_words: L_bn_mul_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 .align 4,0x90 L004mw_sse2_loop: movd (%edx),%mm2 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz L004mw_sse2_loop movd %mm1,%eax emms ret popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_sqr_words .private_extern _bn_sqr_words .align 4 _bn_sqr_words: L_bn_sqr_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx .align 4,0x90 L005sqr_sse2_loop: movd (%edx),%mm0 pmuludq %mm0,%mm0 leal 4(%edx),%edx movq %mm0,(%eax) subl $1,%ecx leal 8(%eax),%eax jnz L005sqr_sse2_loop emms ret popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_div_words .private_extern _bn_div_words .align 4 _bn_div_words: L_bn_div_words_begin: movl 4(%esp),%edx movl 8(%esp),%eax movl 12(%esp),%ecx divl %ecx ret .globl _bn_add_words .private_extern _bn_add_words .align 4 _bn_add_words: L_bn_add_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz L006aw_finish L007aw_loop: # Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz L007aw_loop L006aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L008aw_end # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz L008aw_end # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz L008aw_end # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz L008aw_end # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz L008aw_end # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz L008aw_end # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz L008aw_end # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) L008aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .globl _bn_sub_words .private_extern _bn_sub_words .align 4 _bn_sub_words: L_bn_sub_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz L009aw_finish L010aw_loop: # Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz L010aw_loop L009aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L011aw_end # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz L011aw_end # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz L011aw_end # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz L011aw_end # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz L011aw_end # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz L011aw_end # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz L011aw_end # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) L011aw_end: popl %edi popl %esi popl %ebx popl %ebp ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bn-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_add_words .hidden bn_mul_add_words .type bn_mul_add_words,@function .align 16 bn_mul_add_words: .L_bn_mul_add_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 jmp .L000maw_sse2_entry .align 16 .L001maw_sse2_unrolled: movd (%eax),%mm3 paddq %mm3,%mm1 movd (%edx),%mm2 pmuludq %mm0,%mm2 movd 4(%edx),%mm4 pmuludq %mm0,%mm4 movd 8(%edx),%mm6 pmuludq %mm0,%mm6 movd 12(%edx),%mm7 pmuludq %mm0,%mm7 paddq %mm2,%mm1 movd 4(%eax),%mm3 paddq %mm4,%mm3 movd 8(%eax),%mm5 paddq %mm6,%mm5 movd 12(%eax),%mm4 paddq %mm4,%mm7 movd %mm1,(%eax) movd 16(%edx),%mm2 pmuludq %mm0,%mm2 psrlq $32,%mm1 movd 20(%edx),%mm4 pmuludq %mm0,%mm4 paddq %mm3,%mm1 movd 24(%edx),%mm6 pmuludq %mm0,%mm6 movd %mm1,4(%eax) psrlq $32,%mm1 movd 28(%edx),%mm3 addl $32,%edx pmuludq %mm0,%mm3 paddq %mm5,%mm1 movd 16(%eax),%mm5 paddq %mm5,%mm2 movd %mm1,8(%eax) psrlq $32,%mm1 paddq %mm7,%mm1 movd 20(%eax),%mm5 paddq %mm5,%mm4 movd %mm1,12(%eax) psrlq $32,%mm1 paddq %mm2,%mm1 movd 24(%eax),%mm5 paddq %mm5,%mm6 movd %mm1,16(%eax) psrlq $32,%mm1 paddq %mm4,%mm1 movd 28(%eax),%mm5 paddq %mm5,%mm3 movd %mm1,20(%eax) psrlq $32,%mm1 paddq %mm6,%mm1 movd %mm1,24(%eax) psrlq $32,%mm1 paddq %mm3,%mm1 movd %mm1,28(%eax) leal 32(%eax),%eax psrlq $32,%mm1 subl $8,%ecx jz .L002maw_sse2_exit .L000maw_sse2_entry: testl $4294967288,%ecx jnz .L001maw_sse2_unrolled .align 4 .L003maw_sse2_loop: movd (%edx),%mm2 movd (%eax),%mm3 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm3,%mm1 paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz .L003maw_sse2_loop .L002maw_sse2_exit: movd %mm1,%eax emms ret popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_add_words,.-.L_bn_mul_add_words_begin .globl bn_mul_words .hidden bn_mul_words .type bn_mul_words,@function .align 16 bn_mul_words: .L_bn_mul_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 .align 16 .L004mw_sse2_loop: movd (%edx),%mm2 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz .L004mw_sse2_loop movd %mm1,%eax emms ret popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_words,.-.L_bn_mul_words_begin .globl bn_sqr_words .hidden bn_sqr_words .type bn_sqr_words,@function .align 16 bn_sqr_words: .L_bn_sqr_words_begin: movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx .align 16 .L005sqr_sse2_loop: movd (%edx),%mm0 pmuludq %mm0,%mm0 leal 4(%edx),%edx movq %mm0,(%eax) subl $1,%ecx leal 8(%eax),%eax jnz .L005sqr_sse2_loop emms ret popl %edi popl %esi popl %ebx popl %ebp ret .size bn_sqr_words,.-.L_bn_sqr_words_begin .globl bn_div_words .hidden bn_div_words .type bn_div_words,@function .align 16 bn_div_words: .L_bn_div_words_begin: movl 4(%esp),%edx movl 8(%esp),%eax movl 12(%esp),%ecx divl %ecx ret .size bn_div_words,.-.L_bn_div_words_begin .globl bn_add_words .hidden bn_add_words .type bn_add_words,@function .align 16 bn_add_words: .L_bn_add_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz .L006aw_finish .L007aw_loop: movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) movl 28(%esi),%ecx movl 28(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz .L007aw_loop .L006aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz .L008aw_end movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz .L008aw_end movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz .L008aw_end movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz .L008aw_end movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz .L008aw_end movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz .L008aw_end movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz .L008aw_end movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) .L008aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_add_words,.-.L_bn_add_words_begin .globl bn_sub_words .hidden bn_sub_words .type bn_sub_words,@function .align 16 bn_sub_words: .L_bn_sub_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz .L009aw_finish .L010aw_loop: movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz .L010aw_loop .L009aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz .L011aw_end movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz .L011aw_end movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz .L011aw_end movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz .L011aw_end movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz .L011aw_end movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz .L011aw_end movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz .L011aw_end movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) .L011aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_sub_words,.-.L_bn_sub_words_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bn-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .text // BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl _bn_add_words .private_extern _bn_add_words .align 4 _bn_add_words: AARCH64_VALID_CALL_TARGET # Clear the carry flag. cmn xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Ladd_tail Ladd_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 adcs x4, x4, x6 adcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Ladd_loop Ladd_tail: cbz x3, Ladd_exit ldr x4, [x1], #8 ldr x6, [x2], #8 adcs x4, x4, x6 str x4, [x0], #8 Ladd_exit: cset x0, cs ret // BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl _bn_sub_words .private_extern _bn_sub_words .align 4 _bn_sub_words: AARCH64_VALID_CALL_TARGET # Set the carry flag. Arm's borrow bit is flipped from the carry flag, # so we want C = 1 here. cmp xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Lsub_tail Lsub_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 sbcs x4, x4, x6 sbcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Lsub_loop Lsub_tail: cbz x3, Lsub_exit ldr x4, [x1], #8 ldr x6, [x2], #8 sbcs x4, x4, x6 str x4, [x0], #8 Lsub_exit: cset x0, cc ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bn-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .text // BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .type bn_add_words, %function .globl bn_add_words .hidden bn_add_words .align 4 bn_add_words: AARCH64_VALID_CALL_TARGET # Clear the carry flag. cmn xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, .Ladd_tail .Ladd_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 adcs x4, x4, x6 adcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, .Ladd_loop .Ladd_tail: cbz x3, .Ladd_exit ldr x4, [x1], #8 ldr x6, [x2], #8 adcs x4, x4, x6 str x4, [x0], #8 .Ladd_exit: cset x0, cs ret .size bn_add_words,.-bn_add_words // BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .type bn_sub_words, %function .globl bn_sub_words .hidden bn_sub_words .align 4 bn_sub_words: AARCH64_VALID_CALL_TARGET # Set the carry flag. Arm's borrow bit is flipped from the carry flag, # so we want C = 1 here. cmp xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, .Lsub_tail .Lsub_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 sbcs x4, x4, x6 sbcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, .Lsub_loop .Lsub_tail: cbz x3, .Lsub_exit ldr x4, [x1], #8 ldr x6, [x2], #8 sbcs x4, x4, x6 str x4, [x0], #8 .Lsub_exit: cset x0, cc ret .size bn_sub_words,.-bn_sub_words #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bn-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .text // BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl bn_add_words .align 4 bn_add_words: AARCH64_VALID_CALL_TARGET # Clear the carry flag. cmn xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Ladd_tail Ladd_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 adcs x4, x4, x6 adcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Ladd_loop Ladd_tail: cbz x3, Ladd_exit ldr x4, [x1], #8 ldr x6, [x2], #8 adcs x4, x4, x6 str x4, [x0], #8 Ladd_exit: cset x0, cs ret // BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl bn_sub_words .align 4 bn_sub_words: AARCH64_VALID_CALL_TARGET # Set the carry flag. Arm's borrow bit is flipped from the carry flag, # so we want C = 1 here. cmp xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Lsub_tail Lsub_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 sbcs x4, x4, x6 sbcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Lsub_loop Lsub_tail: cbz x3, Lsub_exit ldr x4, [x1], #8 ldr x6, [x2], #8 sbcs x4, x4, x6 str x4, [x0], #8 Lsub_exit: cset x0, cc ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/bsaes-armv7-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Specific modes and adaptation for Linux kernel by Ard Biesheuvel @ of Linaro. Permission to use under GPL terms is granted. @ ==================================================================== @ Bit-sliced AES for ARM NEON @ @ February 2012. @ @ This implementation is direct adaptation of bsaes-x86_64 module for @ ARM NEON. Except that this module is endian-neutral [in sense that @ it can be compiled for either endianness] by courtesy of vld1.8's @ neutrality. Initial version doesn't implement interface to OpenSSL, @ only low-level primitives and unsupported entry points, just enough @ to collect performance results, which for Cortex-A8 core are: @ @ encrypt 19.5 cycles per byte processed with 128-bit key @ decrypt 22.1 cycles per byte processed with 128-bit key @ key conv. 440 cycles per 128-bit key/0.18 of 8x block @ @ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, @ which is [much] worse than anticipated (for further details see @ http://www.openssl.org/~appro/Snapdragon-S4.html). @ @ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code @ manages in 20.0 cycles]. @ @ When comparing to x86_64 results keep in mind that NEON unit is @ [mostly] single-issue and thus can't [fully] benefit from @ instruction-level parallelism. And when comparing to aes-armv4 @ results keep in mind key schedule conversion overhead (see @ bsaes-x86_64.pl for further details)... @ @ @ April-August 2013 @ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard. #ifndef __KERNEL__ # include # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} # define VFP_ABI_FRAME 0x40 #else # define VFP_ABI_PUSH # define VFP_ABI_POP # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK # define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .text .syntax unified @ ARMv7-capable assembler is expected to handle this #if defined(__thumb2__) && !defined(__APPLE__) .thumb #else .code 32 # undef __thumb2__ #endif .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: adr r6,. vldmia r4!, {q9} @ round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,.LM0ISR #else add r6,r6,#.LM0ISR-_bsaes_decrypt8 #endif vldmia r6!, {q8} @ .LM0ISR veor q10, q0, q9 @ xor with round0 key veor q11, q1, q9 vtbl.8 d0, {q10}, d16 vtbl.8 d1, {q10}, d17 veor q12, q2, q9 vtbl.8 d2, {q11}, d16 vtbl.8 d3, {q11}, d17 veor q13, q3, q9 vtbl.8 d4, {q12}, d16 vtbl.8 d5, {q12}, d17 veor q14, q4, q9 vtbl.8 d6, {q13}, d16 vtbl.8 d7, {q13}, d17 veor q15, q5, q9 vtbl.8 d8, {q14}, d16 vtbl.8 d9, {q14}, d17 veor q10, q6, q9 vtbl.8 d10, {q15}, d16 vtbl.8 d11, {q15}, d17 veor q11, q7, q9 vtbl.8 d12, {q10}, d16 vtbl.8 d13, {q10}, d17 vtbl.8 d14, {q11}, d16 vtbl.8 d15, {q11}, d17 vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q6, #1 vshr.u64 q11, q4, #1 veor q10, q10, q7 veor q11, q11, q5 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #1 veor q5, q5, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q2, #1 vshr.u64 q11, q0, #1 veor q10, q10, q3 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q3, q3, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q5, #2 vshr.u64 q11, q4, #2 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q7, q7, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q5, q5, q10 veor q4, q4, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q3 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q3, q3, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q3, #4 vshr.u64 q11, q2, #4 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q6, q6, q11 vshl.u64 q11, q11, #4 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q5 veor q11, q11, q4 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q4, q4, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 sub r5,r5,#1 b .Ldec_sbox .align 4 .Ldec_loop: vldmia r4!, {q8,q9,q10,q11} veor q8, q8, q0 veor q9, q9, q1 vtbl.8 d0, {q8}, d24 vtbl.8 d1, {q8}, d25 vldmia r4!, {q8} veor q10, q10, q2 vtbl.8 d2, {q9}, d24 vtbl.8 d3, {q9}, d25 vldmia r4!, {q9} veor q11, q11, q3 vtbl.8 d4, {q10}, d24 vtbl.8 d5, {q10}, d25 vldmia r4!, {q10} vtbl.8 d6, {q11}, d24 vtbl.8 d7, {q11}, d25 vldmia r4!, {q11} veor q8, q8, q4 veor q9, q9, q5 vtbl.8 d8, {q8}, d24 vtbl.8 d9, {q8}, d25 veor q10, q10, q6 vtbl.8 d10, {q9}, d24 vtbl.8 d11, {q9}, d25 veor q11, q11, q7 vtbl.8 d12, {q10}, d24 vtbl.8 d13, {q10}, d25 vtbl.8 d14, {q11}, d24 vtbl.8 d15, {q11}, d25 .Ldec_sbox: veor q1, q1, q4 veor q3, q3, q4 veor q4, q4, q7 veor q1, q1, q6 veor q2, q2, q7 veor q6, q6, q4 veor q0, q0, q1 veor q2, q2, q5 veor q7, q7, q6 veor q3, q3, q0 veor q5, q5, q0 veor q1, q1, q3 veor q11, q3, q0 veor q10, q7, q4 veor q9, q1, q6 veor q13, q4, q0 vmov q8, q10 veor q12, q5, q2 vorr q10, q10, q9 veor q15, q11, q8 vand q14, q11, q12 vorr q11, q11, q12 veor q12, q12, q9 vand q8, q8, q9 veor q9, q6, q2 vand q15, q15, q12 vand q13, q13, q9 veor q9, q3, q7 veor q12, q1, q5 veor q11, q11, q13 veor q10, q10, q13 vand q13, q9, q12 vorr q9, q9, q12 veor q11, q11, q15 veor q8, q8, q13 veor q10, q10, q14 veor q9, q9, q15 veor q8, q8, q14 vand q12, q4, q6 veor q9, q9, q14 vand q13, q0, q2 vand q14, q7, q1 vorr q15, q3, q5 veor q11, q11, q12 veor q9, q9, q14 veor q8, q8, q15 veor q10, q10, q13 @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 @ new smaller inversion vand q14, q11, q9 vmov q12, q8 veor q13, q10, q14 veor q15, q8, q14 veor q14, q8, q14 @ q14=q15 vbsl q13, q9, q8 vbsl q15, q11, q10 veor q11, q11, q10 vbsl q12, q13, q14 vbsl q8, q14, q13 vand q14, q12, q15 veor q9, q9, q8 veor q14, q14, q11 veor q12, q5, q2 veor q8, q1, q6 veor q10, q15, q14 vand q10, q10, q5 veor q5, q5, q1 vand q11, q1, q15 vand q5, q5, q14 veor q1, q11, q10 veor q5, q5, q11 veor q15, q15, q13 veor q14, q14, q9 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q2 veor q12, q12, q8 veor q2, q2, q6 vand q8, q8, q15 vand q6, q6, q13 vand q12, q12, q14 vand q2, q2, q9 veor q8, q8, q12 veor q2, q2, q6 veor q12, q12, q11 veor q6, q6, q10 veor q5, q5, q12 veor q2, q2, q12 veor q1, q1, q8 veor q6, q6, q8 veor q12, q3, q0 veor q8, q7, q4 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q0 veor q12, q12, q8 veor q0, q0, q4 vand q8, q8, q15 vand q4, q4, q13 vand q12, q12, q14 vand q0, q0, q9 veor q8, q8, q12 veor q0, q0, q4 veor q12, q12, q11 veor q4, q4, q10 veor q15, q15, q13 veor q14, q14, q9 veor q10, q15, q14 vand q10, q10, q3 veor q3, q3, q7 vand q11, q7, q15 vand q3, q3, q14 veor q7, q11, q10 veor q3, q3, q11 veor q3, q3, q12 veor q0, q0, q12 veor q7, q7, q8 veor q4, q4, q8 veor q1, q1, q7 veor q6, q6, q5 veor q4, q4, q1 veor q2, q2, q7 veor q5, q5, q7 veor q4, q4, q2 veor q7, q7, q0 veor q4, q4, q5 veor q3, q3, q6 veor q6, q6, q1 veor q3, q3, q4 veor q4, q4, q0 veor q7, q7, q3 subs r5,r5,#1 bcc .Ldec_done @ multiplication by 0x05-0x00-0x04-0x00 vext.8 q8, q0, q0, #8 vext.8 q14, q3, q3, #8 vext.8 q15, q5, q5, #8 veor q8, q8, q0 vext.8 q9, q1, q1, #8 veor q14, q14, q3 vext.8 q10, q6, q6, #8 veor q15, q15, q5 vext.8 q11, q4, q4, #8 veor q9, q9, q1 vext.8 q12, q2, q2, #8 veor q10, q10, q6 vext.8 q13, q7, q7, #8 veor q11, q11, q4 veor q12, q12, q2 veor q13, q13, q7 veor q0, q0, q14 veor q1, q1, q14 veor q6, q6, q8 veor q2, q2, q10 veor q4, q4, q9 veor q1, q1, q15 veor q6, q6, q15 veor q2, q2, q14 veor q7, q7, q11 veor q4, q4, q14 veor q3, q3, q12 veor q2, q2, q15 veor q7, q7, q15 veor q5, q5, q13 vext.8 q8, q0, q0, #12 @ x0 <<< 32 vext.8 q9, q1, q1, #12 veor q0, q0, q8 @ x0 ^ (x0 <<< 32) vext.8 q10, q6, q6, #12 veor q1, q1, q9 vext.8 q11, q4, q4, #12 veor q6, q6, q10 vext.8 q12, q2, q2, #12 veor q4, q4, q11 vext.8 q13, q7, q7, #12 veor q2, q2, q12 vext.8 q14, q3, q3, #12 veor q7, q7, q13 vext.8 q15, q5, q5, #12 veor q3, q3, q14 veor q9, q9, q0 veor q5, q5, q15 vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) veor q10, q10, q1 veor q8, q8, q5 veor q9, q9, q5 vext.8 q1, q1, q1, #8 veor q13, q13, q2 veor q0, q0, q8 veor q14, q14, q7 veor q1, q1, q9 vext.8 q8, q2, q2, #8 veor q12, q12, q4 vext.8 q9, q7, q7, #8 veor q15, q15, q3 vext.8 q2, q4, q4, #8 veor q11, q11, q6 vext.8 q7, q5, q5, #8 veor q12, q12, q5 vext.8 q4, q3, q3, #8 veor q11, q11, q5 vext.8 q3, q6, q6, #8 veor q5, q9, q13 veor q11, q11, q2 veor q7, q7, q15 veor q6, q4, q14 veor q4, q8, q12 veor q2, q3, q10 vmov q3, q11 @ vmov q5, q9 vldmia r6, {q12} @ .LISR ite eq @ Thumb2 thing, sanity check in ARM addeq r6,r6,#0x10 bne .Ldec_loop vldmia r6, {q12} @ .LISRM0 b .Ldec_loop .align 4 .Ldec_done: vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q3, #1 vshr.u64 q11, q2, #1 veor q10, q10, q5 veor q11, q11, q7 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #1 veor q7, q7, q11 vshl.u64 q11, q11, #1 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q6, #1 vshr.u64 q11, q0, #1 veor q10, q10, q4 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q4, q4, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q7, #2 vshr.u64 q11, q2, #2 veor q10, q10, q5 veor q11, q11, q3 vand q10, q10, q9 vand q11, q11, q9 veor q5, q5, q10 vshl.u64 q10, q10, #2 veor q3, q3, q11 vshl.u64 q11, q11, #2 veor q7, q7, q10 veor q2, q2, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q4 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q4, q4, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q4, #4 vshr.u64 q11, q6, #4 veor q10, q10, q5 veor q11, q11, q3 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q3, q3, q11 vshl.u64 q11, q11, #4 veor q4, q4, q10 veor q6, q6, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q7 veor q11, q11, q2 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q2, q2, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 vldmia r4, {q8} @ last round key veor q6, q6, q8 veor q4, q4, q8 veor q2, q2, q8 veor q7, q7, q8 veor q3, q3, q8 veor q5, q5, q8 veor q0, q0, q8 veor q1, q1, q8 bx lr .size _bsaes_decrypt8,.-_bsaes_decrypt8 .type _bsaes_const,%object .align 6 _bsaes_const: .LM0ISR:@ InvShiftRows constants .quad 0x0a0e0206070b0f03, 0x0004080c0d010509 .LISR: .quad 0x0504070602010003, 0x0f0e0d0c080b0a09 .LISRM0: .quad 0x01040b0e0205080f, 0x0306090c00070a0d .LM0SR:@ ShiftRows constants .quad 0x0a0e02060f03070b, 0x0004080c05090d01 .LSR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b .LSRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d .LM0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d .LREVM0SR: .quad 0x090d01050c000408, 0x03070b0f060a0e02 .byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 6 .size _bsaes_const,.-_bsaes_const .type _bsaes_encrypt8,%function .align 4 _bsaes_encrypt8: adr r6,. vldmia r4!, {q9} @ round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,.LM0SR #else sub r6,r6,#_bsaes_encrypt8-.LM0SR #endif vldmia r6!, {q8} @ .LM0SR _bsaes_encrypt8_alt: veor q10, q0, q9 @ xor with round0 key veor q11, q1, q9 vtbl.8 d0, {q10}, d16 vtbl.8 d1, {q10}, d17 veor q12, q2, q9 vtbl.8 d2, {q11}, d16 vtbl.8 d3, {q11}, d17 veor q13, q3, q9 vtbl.8 d4, {q12}, d16 vtbl.8 d5, {q12}, d17 veor q14, q4, q9 vtbl.8 d6, {q13}, d16 vtbl.8 d7, {q13}, d17 veor q15, q5, q9 vtbl.8 d8, {q14}, d16 vtbl.8 d9, {q14}, d17 veor q10, q6, q9 vtbl.8 d10, {q15}, d16 vtbl.8 d11, {q15}, d17 veor q11, q7, q9 vtbl.8 d12, {q10}, d16 vtbl.8 d13, {q10}, d17 vtbl.8 d14, {q11}, d16 vtbl.8 d15, {q11}, d17 _bsaes_encrypt8_bitslice: vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q6, #1 vshr.u64 q11, q4, #1 veor q10, q10, q7 veor q11, q11, q5 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #1 veor q5, q5, q11 vshl.u64 q11, q11, #1 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q2, #1 vshr.u64 q11, q0, #1 veor q10, q10, q3 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q3, q3, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q5, #2 vshr.u64 q11, q4, #2 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q9 vand q11, q11, q9 veor q7, q7, q10 vshl.u64 q10, q10, #2 veor q6, q6, q11 vshl.u64 q11, q11, #2 veor q5, q5, q10 veor q4, q4, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q3 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q3, q3, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q3, #4 vshr.u64 q11, q2, #4 veor q10, q10, q7 veor q11, q11, q6 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q6, q6, q11 vshl.u64 q11, q11, #4 veor q3, q3, q10 veor q2, q2, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q5 veor q11, q11, q4 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q4, q4, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 sub r5,r5,#1 b .Lenc_sbox .align 4 .Lenc_loop: vldmia r4!, {q8,q9,q10,q11} veor q8, q8, q0 veor q9, q9, q1 vtbl.8 d0, {q8}, d24 vtbl.8 d1, {q8}, d25 vldmia r4!, {q8} veor q10, q10, q2 vtbl.8 d2, {q9}, d24 vtbl.8 d3, {q9}, d25 vldmia r4!, {q9} veor q11, q11, q3 vtbl.8 d4, {q10}, d24 vtbl.8 d5, {q10}, d25 vldmia r4!, {q10} vtbl.8 d6, {q11}, d24 vtbl.8 d7, {q11}, d25 vldmia r4!, {q11} veor q8, q8, q4 veor q9, q9, q5 vtbl.8 d8, {q8}, d24 vtbl.8 d9, {q8}, d25 veor q10, q10, q6 vtbl.8 d10, {q9}, d24 vtbl.8 d11, {q9}, d25 veor q11, q11, q7 vtbl.8 d12, {q10}, d24 vtbl.8 d13, {q10}, d25 vtbl.8 d14, {q11}, d24 vtbl.8 d15, {q11}, d25 .Lenc_sbox: veor q2, q2, q1 veor q5, q5, q6 veor q3, q3, q0 veor q6, q6, q2 veor q5, q5, q0 veor q6, q6, q3 veor q3, q3, q7 veor q7, q7, q5 veor q3, q3, q4 veor q4, q4, q5 veor q2, q2, q7 veor q3, q3, q1 veor q1, q1, q5 veor q11, q7, q4 veor q10, q1, q2 veor q9, q5, q3 veor q13, q2, q4 vmov q8, q10 veor q12, q6, q0 vorr q10, q10, q9 veor q15, q11, q8 vand q14, q11, q12 vorr q11, q11, q12 veor q12, q12, q9 vand q8, q8, q9 veor q9, q3, q0 vand q15, q15, q12 vand q13, q13, q9 veor q9, q7, q1 veor q12, q5, q6 veor q11, q11, q13 veor q10, q10, q13 vand q13, q9, q12 vorr q9, q9, q12 veor q11, q11, q15 veor q8, q8, q13 veor q10, q10, q14 veor q9, q9, q15 veor q8, q8, q14 vand q12, q2, q3 veor q9, q9, q14 vand q13, q4, q0 vand q14, q1, q5 vorr q15, q7, q6 veor q11, q11, q12 veor q9, q9, q14 veor q8, q8, q15 veor q10, q10, q13 @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 @ new smaller inversion vand q14, q11, q9 vmov q12, q8 veor q13, q10, q14 veor q15, q8, q14 veor q14, q8, q14 @ q14=q15 vbsl q13, q9, q8 vbsl q15, q11, q10 veor q11, q11, q10 vbsl q12, q13, q14 vbsl q8, q14, q13 vand q14, q12, q15 veor q9, q9, q8 veor q14, q14, q11 veor q12, q6, q0 veor q8, q5, q3 veor q10, q15, q14 vand q10, q10, q6 veor q6, q6, q5 vand q11, q5, q15 vand q6, q6, q14 veor q5, q11, q10 veor q6, q6, q11 veor q15, q15, q13 veor q14, q14, q9 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q0 veor q12, q12, q8 veor q0, q0, q3 vand q8, q8, q15 vand q3, q3, q13 vand q12, q12, q14 vand q0, q0, q9 veor q8, q8, q12 veor q0, q0, q3 veor q12, q12, q11 veor q3, q3, q10 veor q6, q6, q12 veor q0, q0, q12 veor q5, q5, q8 veor q3, q3, q8 veor q12, q7, q4 veor q8, q1, q2 veor q11, q15, q14 veor q10, q13, q9 vand q11, q11, q12 vand q10, q10, q4 veor q12, q12, q8 veor q4, q4, q2 vand q8, q8, q15 vand q2, q2, q13 vand q12, q12, q14 vand q4, q4, q9 veor q8, q8, q12 veor q4, q4, q2 veor q12, q12, q11 veor q2, q2, q10 veor q15, q15, q13 veor q14, q14, q9 veor q10, q15, q14 vand q10, q10, q7 veor q7, q7, q1 vand q11, q1, q15 vand q7, q7, q14 veor q1, q11, q10 veor q7, q7, q11 veor q7, q7, q12 veor q4, q4, q12 veor q1, q1, q8 veor q2, q2, q8 veor q7, q7, q0 veor q1, q1, q6 veor q6, q6, q0 veor q4, q4, q7 veor q0, q0, q1 veor q1, q1, q5 veor q5, q5, q2 veor q2, q2, q3 veor q3, q3, q5 veor q4, q4, q5 veor q6, q6, q3 subs r5,r5,#1 bcc .Lenc_done vext.8 q8, q0, q0, #12 @ x0 <<< 32 vext.8 q9, q1, q1, #12 veor q0, q0, q8 @ x0 ^ (x0 <<< 32) vext.8 q10, q4, q4, #12 veor q1, q1, q9 vext.8 q11, q6, q6, #12 veor q4, q4, q10 vext.8 q12, q3, q3, #12 veor q6, q6, q11 vext.8 q13, q7, q7, #12 veor q3, q3, q12 vext.8 q14, q2, q2, #12 veor q7, q7, q13 vext.8 q15, q5, q5, #12 veor q2, q2, q14 veor q9, q9, q0 veor q5, q5, q15 vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) veor q10, q10, q1 veor q8, q8, q5 veor q9, q9, q5 vext.8 q1, q1, q1, #8 veor q13, q13, q3 veor q0, q0, q8 veor q14, q14, q7 veor q1, q1, q9 vext.8 q8, q3, q3, #8 veor q12, q12, q6 vext.8 q9, q7, q7, #8 veor q15, q15, q2 vext.8 q3, q6, q6, #8 veor q11, q11, q4 vext.8 q7, q5, q5, #8 veor q12, q12, q5 vext.8 q6, q2, q2, #8 veor q11, q11, q5 vext.8 q2, q4, q4, #8 veor q5, q9, q13 veor q4, q8, q12 veor q3, q3, q11 veor q7, q7, q15 veor q6, q6, q14 @ vmov q4, q8 veor q2, q2, q10 @ vmov q5, q9 vldmia r6, {q12} @ .LSR ite eq @ Thumb2 thing, samity check in ARM addeq r6,r6,#0x10 bne .Lenc_loop vldmia r6, {q12} @ .LSRM0 b .Lenc_loop .align 4 .Lenc_done: vmov.i8 q8,#0x55 @ compose .LBS0 vmov.i8 q9,#0x33 @ compose .LBS1 vshr.u64 q10, q2, #1 vshr.u64 q11, q3, #1 veor q10, q10, q5 veor q11, q11, q7 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #1 veor q7, q7, q11 vshl.u64 q11, q11, #1 veor q2, q2, q10 veor q3, q3, q11 vshr.u64 q10, q4, #1 vshr.u64 q11, q0, #1 veor q10, q10, q6 veor q11, q11, q1 vand q10, q10, q8 vand q11, q11, q8 veor q6, q6, q10 vshl.u64 q10, q10, #1 veor q1, q1, q11 vshl.u64 q11, q11, #1 veor q4, q4, q10 veor q0, q0, q11 vmov.i8 q8,#0x0f @ compose .LBS2 vshr.u64 q10, q7, #2 vshr.u64 q11, q3, #2 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q9 vand q11, q11, q9 veor q5, q5, q10 vshl.u64 q10, q10, #2 veor q2, q2, q11 vshl.u64 q11, q11, #2 veor q7, q7, q10 veor q3, q3, q11 vshr.u64 q10, q1, #2 vshr.u64 q11, q0, #2 veor q10, q10, q6 veor q11, q11, q4 vand q10, q10, q9 vand q11, q11, q9 veor q6, q6, q10 vshl.u64 q10, q10, #2 veor q4, q4, q11 vshl.u64 q11, q11, #2 veor q1, q1, q10 veor q0, q0, q11 vshr.u64 q10, q6, #4 vshr.u64 q11, q4, #4 veor q10, q10, q5 veor q11, q11, q2 vand q10, q10, q8 vand q11, q11, q8 veor q5, q5, q10 vshl.u64 q10, q10, #4 veor q2, q2, q11 vshl.u64 q11, q11, #4 veor q6, q6, q10 veor q4, q4, q11 vshr.u64 q10, q1, #4 vshr.u64 q11, q0, #4 veor q10, q10, q7 veor q11, q11, q3 vand q10, q10, q8 vand q11, q11, q8 veor q7, q7, q10 vshl.u64 q10, q10, #4 veor q3, q3, q11 vshl.u64 q11, q11, #4 veor q1, q1, q10 veor q0, q0, q11 vldmia r4, {q8} @ last round key veor q4, q4, q8 veor q6, q6, q8 veor q3, q3, q8 veor q7, q7, q8 veor q2, q2, q8 veor q5, q5, q8 veor q0, q0, q8 veor q1, q1, q8 bx lr .size _bsaes_encrypt8,.-_bsaes_encrypt8 .type _bsaes_key_convert,%function .align 4 _bsaes_key_convert: adr r6,. vld1.8 {q7}, [r4]! @ load round 0 key #if defined(__thumb2__) || defined(__APPLE__) adr r6,.LM0 #else sub r6,r6,#_bsaes_key_convert-.LM0 #endif vld1.8 {q15}, [r4]! @ load round 1 key vmov.i8 q8, #0x01 @ bit masks vmov.i8 q9, #0x02 vmov.i8 q10, #0x04 vmov.i8 q11, #0x08 vmov.i8 q12, #0x10 vmov.i8 q13, #0x20 vldmia r6, {q14} @ .LM0 #ifdef __ARMEL__ vrev32.8 q7, q7 vrev32.8 q15, q15 #endif sub r5,r5,#1 vstmia r12!, {q7} @ save round 0 key b .Lkey_loop .align 4 .Lkey_loop: vtbl.8 d14,{q15},d28 vtbl.8 d15,{q15},d29 vmov.i8 q6, #0x40 vmov.i8 q15, #0x80 vtst.8 q0, q7, q8 vtst.8 q1, q7, q9 vtst.8 q2, q7, q10 vtst.8 q3, q7, q11 vtst.8 q4, q7, q12 vtst.8 q5, q7, q13 vtst.8 q6, q7, q6 vtst.8 q7, q7, q15 vld1.8 {q15}, [r4]! @ load next round key vmvn q0, q0 @ "pnot" vmvn q1, q1 vmvn q5, q5 vmvn q6, q6 #ifdef __ARMEL__ vrev32.8 q15, q15 #endif subs r5,r5,#1 vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key bne .Lkey_loop vmov.i8 q7,#0x63 @ compose .L63 @ don't save last round key bx lr .size _bsaes_key_convert,.-_bsaes_key_convert .globl bsaes_cbc_encrypt .hidden bsaes_cbc_encrypt .type bsaes_cbc_encrypt,%function .align 5 bsaes_cbc_encrypt: @ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for @ short inputs. We patch this out, using bsaes for all input sizes. @ it is up to the caller to make sure we are called with enc == 0 mov ip, sp stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} VFP_ABI_PUSH ldr r8, [ip] @ IV is 1st arg on the stack mov r2, r2, lsr#4 @ len in 16 byte blocks sub sp, #0x10 @ scratch space to carry over the IV mov r9, sp @ save sp ldr r10, [r3, #240] @ get # of rounds #ifndef BSAES_ASM_EXTENDED_KEY @ allocate the key schedule on the stack sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key add r12, #96 @ sifze of bit-slices key schedule @ populate the key schedule mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds mov sp, r12 @ sp is sp bl _bsaes_key_convert vldmia sp, {q6} vstmia r12, {q15} @ save last round key veor q7, q7, q6 @ fix up round 0 key vstmia sp, {q7} #else ldr r12, [r3, #244] eors r12, #1 beq 0f @ populate the key schedule str r12, [r3, #244] mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds add r12, r3, #248 @ pass key schedule bl _bsaes_key_convert add r4, r3, #248 vldmia r4, {q6} vstmia r12, {q15} @ save last round key veor q7, q7, q6 @ fix up round 0 key vstmia r4, {q7} .align 2 #endif vld1.8 {q15}, [r8] @ load IV b .Lcbc_dec_loop .align 4 .Lcbc_dec_loop: subs r2, r2, #0x8 bmi .Lcbc_dec_loop_finish vld1.8 {q0,q1}, [r0]! @ load input vld1.8 {q2,q3}, [r0]! #ifndef BSAES_ASM_EXTENDED_KEY mov r4, sp @ pass the key #else add r4, r3, #248 #endif vld1.8 {q4,q5}, [r0]! mov r5, r10 vld1.8 {q6,q7}, [r0] sub r0, r0, #0x60 vstmia r9, {q15} @ put aside IV bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12,q13}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q14,q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q3, q3, q13 vst1.8 {q6}, [r1]! veor q5, q5, q14 vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! vst1.8 {q3}, [r1]! vst1.8 {q5}, [r1]! b .Lcbc_dec_loop .Lcbc_dec_loop_finish: adds r2, r2, #8 beq .Lcbc_dec_done @ Set up most parameters for the _bsaes_decrypt8 call. #ifndef BSAES_ASM_EXTENDED_KEY mov r4, sp @ pass the key #else add r4, r3, #248 #endif mov r5, r10 vstmia r9, {q15} @ put aside IV vld1.8 {q0}, [r0]! @ load input cmp r2, #2 blo .Lcbc_dec_one vld1.8 {q1}, [r0]! beq .Lcbc_dec_two vld1.8 {q2}, [r0]! cmp r2, #4 blo .Lcbc_dec_three vld1.8 {q3}, [r0]! beq .Lcbc_dec_four vld1.8 {q4}, [r0]! cmp r2, #6 blo .Lcbc_dec_five vld1.8 {q5}, [r0]! beq .Lcbc_dec_six vld1.8 {q6}, [r0]! sub r0, r0, #0x70 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12,q13}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q3, q3, q13 vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! vst1.8 {q3}, [r1]! b .Lcbc_dec_done .align 4 .Lcbc_dec_six: sub r0, r0, #0x60 bl _bsaes_decrypt8 vldmia r9,{q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q12}, [r0]! veor q4, q4, q10 veor q2, q2, q11 vld1.8 {q15}, [r0]! veor q7, q7, q12 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! vst1.8 {q7}, [r1]! b .Lcbc_dec_done .align 4 .Lcbc_dec_five: sub r0, r0, #0x50 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10,q11}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q15}, [r0]! veor q4, q4, q10 vst1.8 {q0,q1}, [r1]! @ write output veor q2, q2, q11 vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! vst1.8 {q2}, [r1]! b .Lcbc_dec_done .align 4 .Lcbc_dec_four: sub r0, r0, #0x40 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q10}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vld1.8 {q15}, [r0]! veor q4, q4, q10 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! vst1.8 {q4}, [r1]! b .Lcbc_dec_done .align 4 .Lcbc_dec_three: sub r0, r0, #0x30 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8,q9}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q15}, [r0]! veor q1, q1, q8 veor q6, q6, q9 vst1.8 {q0,q1}, [r1]! @ write output vst1.8 {q6}, [r1]! b .Lcbc_dec_done .align 4 .Lcbc_dec_two: sub r0, r0, #0x20 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q8}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vld1.8 {q15}, [r0]! @ reload input veor q1, q1, q8 vst1.8 {q0,q1}, [r1]! @ write output b .Lcbc_dec_done .align 4 .Lcbc_dec_one: sub r0, r0, #0x10 bl _bsaes_decrypt8 vldmia r9, {q14} @ reload IV vld1.8 {q15}, [r0]! @ reload input veor q0, q0, q14 @ ^= IV vst1.8 {q0}, [r1]! @ write output .Lcbc_dec_done: #ifndef BSAES_ASM_EXTENDED_KEY vmov.i32 q0, #0 vmov.i32 q1, #0 .Lcbc_dec_bzero:@ wipe key schedule [if any] vstmia sp!, {q0,q1} cmp sp, r9 bne .Lcbc_dec_bzero #endif mov sp, r9 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb vst1.8 {q15}, [r8] @ return IV VFP_ABI_POP ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} .size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt .globl bsaes_ctr32_encrypt_blocks .hidden bsaes_ctr32_encrypt_blocks .type bsaes_ctr32_encrypt_blocks,%function .align 5 bsaes_ctr32_encrypt_blocks: @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this @ out to retain a constant-time implementation. mov ip, sp stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} VFP_ABI_PUSH ldr r8, [ip] @ ctr is 1st arg on the stack sub sp, sp, #0x10 @ scratch space to carry over the ctr mov r9, sp @ save sp ldr r10, [r3, #240] @ get # of rounds #ifndef BSAES_ASM_EXTENDED_KEY @ allocate the key schedule on the stack sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key add r12, #96 @ size of bit-sliced key schedule @ populate the key schedule mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds mov sp, r12 @ sp is sp bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key vld1.8 {q0}, [r8] @ load counter #ifdef __APPLE__ mov r8, #:lower16:(.LREVM0SR-.LM0) add r8, r6, r8 #else add r8, r6, #.LREVM0SR-.LM0 @ borrow r8 #endif vldmia sp, {q4} @ load round0 key #else ldr r12, [r3, #244] eors r12, #1 beq 0f @ populate the key schedule str r12, [r3, #244] mov r4, r3 @ pass key mov r5, r10 @ pass # of rounds add r12, r3, #248 @ pass key schedule bl _bsaes_key_convert veor q7,q7,q15 @ fix up last round key vstmia r12, {q7} @ save last round key .align 2 add r12, r3, #248 vld1.8 {q0}, [r8] @ load counter adrl r8, .LREVM0SR @ borrow r8 vldmia r12, {q4} @ load round0 key sub sp, #0x10 @ place for adjusted round0 key #endif vmov.i32 q8,#1 @ compose 1<<96 veor q9,q9,q9 vrev32.8 q0,q0 vext.8 q8,q9,q8,#4 vrev32.8 q4,q4 vadd.u32 q9,q8,q8 @ compose 2<<96 vstmia sp, {q4} @ save adjusted round0 key b .Lctr_enc_loop .align 4 .Lctr_enc_loop: vadd.u32 q10, q8, q9 @ compose 3<<96 vadd.u32 q1, q0, q8 @ +1 vadd.u32 q2, q0, q9 @ +2 vadd.u32 q3, q0, q10 @ +3 vadd.u32 q4, q1, q10 vadd.u32 q5, q2, q10 vadd.u32 q6, q3, q10 vadd.u32 q7, q4, q10 vadd.u32 q10, q5, q10 @ next counter @ Borrow prologue from _bsaes_encrypt8 to use the opportunity @ to flip byte order in 32-bit counter vldmia sp, {q9} @ load round0 key #ifndef BSAES_ASM_EXTENDED_KEY add r4, sp, #0x10 @ pass next round key #else add r4, r3, #264 #endif vldmia r8, {q8} @ .LREVM0SR mov r5, r10 @ pass rounds vstmia r9, {q10} @ save next counter #ifdef __APPLE__ mov r6, #:lower16:(.LREVM0SR-.LSR) sub r6, r8, r6 #else sub r6, r8, #.LREVM0SR-.LSR @ pass constants #endif bl _bsaes_encrypt8_alt subs r2, r2, #8 blo .Lctr_enc_loop_done vld1.8 {q8,q9}, [r0]! @ load input vld1.8 {q10,q11}, [r0]! veor q0, q8 veor q1, q9 vld1.8 {q12,q13}, [r0]! veor q4, q10 veor q6, q11 vld1.8 {q14,q15}, [r0]! veor q3, q12 vst1.8 {q0,q1}, [r1]! @ write output veor q7, q13 veor q2, q14 vst1.8 {q4}, [r1]! veor q5, q15 vst1.8 {q6}, [r1]! vmov.i32 q8, #1 @ compose 1<<96 vst1.8 {q3}, [r1]! veor q9, q9, q9 vst1.8 {q7}, [r1]! vext.8 q8, q9, q8, #4 vst1.8 {q2}, [r1]! vadd.u32 q9,q8,q8 @ compose 2<<96 vst1.8 {q5}, [r1]! vldmia r9, {q0} @ load counter bne .Lctr_enc_loop b .Lctr_enc_done .align 4 .Lctr_enc_loop_done: add r2, r2, #8 vld1.8 {q8}, [r0]! @ load input veor q0, q8 vst1.8 {q0}, [r1]! @ write output cmp r2, #2 blo .Lctr_enc_done vld1.8 {q9}, [r0]! veor q1, q9 vst1.8 {q1}, [r1]! beq .Lctr_enc_done vld1.8 {q10}, [r0]! veor q4, q10 vst1.8 {q4}, [r1]! cmp r2, #4 blo .Lctr_enc_done vld1.8 {q11}, [r0]! veor q6, q11 vst1.8 {q6}, [r1]! beq .Lctr_enc_done vld1.8 {q12}, [r0]! veor q3, q12 vst1.8 {q3}, [r1]! cmp r2, #6 blo .Lctr_enc_done vld1.8 {q13}, [r0]! veor q7, q13 vst1.8 {q7}, [r1]! beq .Lctr_enc_done vld1.8 {q14}, [r0] veor q2, q14 vst1.8 {q2}, [r1]! .Lctr_enc_done: vmov.i32 q0, #0 vmov.i32 q1, #0 #ifndef BSAES_ASM_EXTENDED_KEY .Lctr_enc_bzero:@ wipe key schedule [if any] vstmia sp!, {q0,q1} cmp sp, r9 bne .Lctr_enc_bzero #else vstmia sp, {q0,q1} #endif mov sp, r9 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb VFP_ABI_POP ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return @ OpenSSL contains aes_nohw_* fallback code here. We patch this @ out to retain a constant-time implementation. .size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/co-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_comba8 .private_extern _bn_mul_comba8 .align 4 _bn_mul_comba8: L_bn_mul_comba8_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx # ################## Calculate word 0 xorl %ebp,%ebp # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax # saved r[0] # ################## Calculate word 1 xorl %ebx,%ebx # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax # saved r[1] # ################## Calculate word 2 xorl %ecx,%ecx # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax # saved r[2] # ################## Calculate word 3 xorl %ebp,%ebp # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 16(%esi),%eax # saved r[3] # ################## Calculate word 4 xorl %ebx,%ebx # mul a[4]*b[0] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[1]*b[3] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[0]*b[4] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 20(%esi),%eax # saved r[4] # ################## Calculate word 5 xorl %ecx,%ecx # mul a[5]*b[0] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[4]*b[1] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx # mul a[1]*b[4] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[0]*b[5] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 24(%esi),%eax # saved r[5] # ################## Calculate word 6 xorl %ebp,%ebp # mul a[6]*b[0] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[5]*b[1] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[4]*b[2] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp # mul a[2]*b[4] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp # mul a[1]*b[5] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[0]*b[6] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,24(%eax) movl 28(%esi),%eax # saved r[6] # ################## Calculate word 7 xorl %ebx,%ebx # mul a[7]*b[0] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[6]*b[1] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[5]*b[2] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[4]*b[3] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[3]*b[4] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx # mul a[2]*b[5] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx # mul a[1]*b[6] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[0]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx movl %ecx,28(%eax) movl 28(%esi),%eax # saved r[7] # ################## Calculate word 8 xorl %ecx,%ecx # mul a[7]*b[1] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[6]*b[2] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[5]*b[3] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx # mul a[4]*b[4] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[3]*b[5] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx # mul a[2]*b[6] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx # mul a[1]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx movl %ebp,32(%eax) movl 28(%esi),%eax # saved r[8] # ################## Calculate word 9 xorl %ebp,%ebp # mul a[7]*b[2] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[6]*b[3] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp # mul a[5]*b[4] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp # mul a[4]*b[5] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[3]*b[6] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp # mul a[2]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp movl %ebx,36(%eax) movl 28(%esi),%eax # saved r[9] # ################## Calculate word 10 xorl %ebx,%ebx # mul a[7]*b[3] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx # mul a[6]*b[4] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx # mul a[5]*b[5] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx # mul a[4]*b[6] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[3]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx movl %ecx,40(%eax) movl 28(%esi),%eax # saved r[10] # ################## Calculate word 11 xorl %ecx,%ecx # mul a[7]*b[4] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx # mul a[6]*b[5] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx # mul a[5]*b[6] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx # mul a[4]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx movl %ebp,44(%eax) movl 28(%esi),%eax # saved r[11] # ################## Calculate word 12 xorl %ebp,%ebp # mul a[7]*b[5] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp # mul a[6]*b[6] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp # mul a[5]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp movl %ebx,48(%eax) movl 28(%esi),%eax # saved r[12] # ################## Calculate word 13 xorl %ebx,%ebx # mul a[7]*b[6] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx # mul a[6]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx movl %ecx,52(%eax) movl 28(%esi),%eax # saved r[13] # ################## Calculate word 14 xorl %ecx,%ecx # mul a[7]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%eax) # saved r[14] # save r[15] movl %ebx,60(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_mul_comba4 .private_extern _bn_mul_comba4 .align 4 _bn_mul_comba4: L_bn_mul_comba4_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx # ################## Calculate word 0 xorl %ebp,%ebp # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax # saved r[0] # ################## Calculate word 1 xorl %ebx,%ebx # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax # saved r[1] # ################## Calculate word 2 xorl %ecx,%ecx # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax # saved r[2] # ################## Calculate word 3 xorl %ebp,%ebp # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 12(%esi),%eax # saved r[3] # ################## Calculate word 4 xorl %ebx,%ebx # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx # mul a[1]*b[3] mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 12(%esi),%eax # saved r[4] # ################## Calculate word 5 xorl %ecx,%ecx # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 12(%esi),%eax # saved r[5] # ################## Calculate word 6 xorl %ebp,%ebp # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%eax) # saved r[6] # save r[7] movl %ecx,28(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_sqr_comba8 .private_extern _bn_sqr_comba8 .align 4 _bn_sqr_comba8: L_bn_sqr_comba8_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax # ############### Calculate word 0 xorl %ebp,%ebp # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax # saved r[0] # ############### Calculate word 1 xorl %ebx,%ebx # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx # saved r[1] # ############### Calculate word 2 xorl %ecx,%ecx # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax # saved r[2] # ############### Calculate word 3 xorl %ebp,%ebp # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl (%esi),%edx # saved r[3] # ############### Calculate word 4 xorl %ebx,%ebx # sqr a[4]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 12(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp movl (%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 20(%esi),%eax # saved r[4] # ############### Calculate word 5 xorl %ecx,%ecx # sqr a[5]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx movl 4(%esi),%edx # sqr a[4]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) movl (%esi),%edx # saved r[5] # ############### Calculate word 6 xorl %ebp,%ebp # sqr a[6]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[5]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl 8(%esi),%edx # sqr a[4]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,24(%edi) movl 28(%esi),%eax # saved r[6] # ############### Calculate word 7 xorl %ebx,%ebx # sqr a[7]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx # sqr a[6]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx movl 8(%esi),%edx # sqr a[5]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%eax adcl $0,%ebx movl 12(%esi),%edx # sqr a[4]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,28(%edi) movl 4(%esi),%edx # saved r[7] # ############### Calculate word 8 xorl %ecx,%ecx # sqr a[7]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx # sqr a[6]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 20(%esi),%eax adcl $0,%ecx movl 12(%esi),%edx # sqr a[5]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx # sqr a[4]*a[4] mull %eax addl %eax,%ebp adcl %edx,%ebx movl 8(%esi),%edx adcl $0,%ecx movl %ebp,32(%edi) movl 28(%esi),%eax # saved r[8] # ############### Calculate word 9 xorl %ebp,%ebp # sqr a[7]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp movl 12(%esi),%edx # sqr a[6]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 16(%esi),%edx # sqr a[5]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 28(%esi),%eax adcl $0,%ebp movl %ebx,36(%edi) movl 12(%esi),%edx # saved r[9] # ############### Calculate word 10 xorl %ebx,%ebx # sqr a[7]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 16(%esi),%edx # sqr a[6]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx # sqr a[5]*a[5] mull %eax addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%edx adcl $0,%ebx movl %ecx,40(%edi) movl 28(%esi),%eax # saved r[10] # ############### Calculate word 11 xorl %ecx,%ecx # sqr a[7]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 20(%esi),%edx # sqr a[6]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 28(%esi),%eax adcl $0,%ecx movl %ebp,44(%edi) movl 20(%esi),%edx # saved r[11] # ############### Calculate word 12 xorl %ebp,%ebp # sqr a[7]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp # sqr a[6]*a[6] mull %eax addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%edx adcl $0,%ebp movl %ebx,48(%edi) movl 28(%esi),%eax # saved r[12] # ############### Calculate word 13 xorl %ebx,%ebx # sqr a[7]*a[6] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,52(%edi) # saved r[13] # ############### Calculate word 14 xorl %ecx,%ecx # sqr a[7]*a[7] mull %eax addl %eax,%ebp adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%edi) # saved r[14] movl %ebx,60(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .globl _bn_sqr_comba4 .private_extern _bn_sqr_comba4 .align 4 _bn_sqr_comba4: L_bn_sqr_comba4_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax # ############### Calculate word 0 xorl %ebp,%ebp # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax # saved r[0] # ############### Calculate word 1 xorl %ebx,%ebx # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx # saved r[1] # ############### Calculate word 2 xorl %ecx,%ecx # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax # saved r[2] # ############### Calculate word 3 xorl %ebp,%ebp # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl 4(%esi),%edx # saved r[3] # ############### Calculate word 4 xorl %ebx,%ebx # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 12(%esi),%eax # saved r[4] # ############### Calculate word 5 xorl %ecx,%ecx # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) # saved r[5] # ############### Calculate word 6 xorl %ebp,%ebp # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%edi) # saved r[6] movl %ecx,28(%edi) popl %ebx popl %ebp popl %edi popl %esi ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/co-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_comba8 .hidden bn_mul_comba8 .type bn_mul_comba8,@function .align 16 bn_mul_comba8: .L_bn_mul_comba8_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 16(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 20(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 24(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,24(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx movl %ecx,28(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx movl %ebp,32(%eax) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp movl %ebx,36(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx movl %ecx,40(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx movl %ebp,44(%eax) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp movl %ebx,48(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx movl %ecx,52(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%eax) movl %ebx,60(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_mul_comba8,.-.L_bn_mul_comba8_begin .globl bn_mul_comba4 .hidden bn_mul_comba4 .type bn_mul_comba4,@function .align 16 bn_mul_comba4: .L_bn_mul_comba4_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 12(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 12(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%eax) movl %ecx,28(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_mul_comba4,.-.L_bn_mul_comba4_begin .globl bn_sqr_comba8 .hidden bn_sqr_comba8 .type bn_sqr_comba8,@function .align 16 bn_sqr_comba8: .L_bn_sqr_comba8_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl (%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 12(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl (%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 20(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) movl (%esi),%edx xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,24(%edi) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%eax adcl $0,%ebx movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,28(%edi) movl 4(%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 20(%esi),%eax adcl $0,%ecx movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl 8(%esi),%edx adcl $0,%ecx movl %ebp,32(%edi) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 16(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 28(%esi),%eax adcl $0,%ebp movl %ebx,36(%edi) movl 12(%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 16(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%edx adcl $0,%ebx movl %ecx,40(%edi) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 20(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 28(%esi),%eax adcl $0,%ecx movl %ebp,44(%edi) movl 20(%esi),%edx xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%edx adcl $0,%ebp movl %ebx,48(%edi) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,52(%edi) xorl %ecx,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%edi) movl %ebx,60(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin .globl bn_sqr_comba4 .hidden bn_sqr_comba4 .type bn_sqr_comba4,@function .align 16 bn_sqr_comba4: .L_bn_sqr_comba4_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl 4(%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 12(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%edi) movl %ecx,28(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-armv4-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL @ instructions are in aesv8-armx.pl.) .arch armv7-a .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #define ldrplb ldrbpl #define ldrneb ldrbne #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl gcm_init_neon .hidden gcm_init_neon .type gcm_init_neon,%function .align 4 gcm_init_neon: vld1.64 d7,[r1]! @ load H vmov.i8 q8,#0xe1 vld1.64 d6,[r1] vshl.i64 d17,#57 vshr.u64 d16,#63 @ t0=0xc2....01 vdup.8 q9,d7[7] vshr.u64 d26,d6,#63 vshr.s8 q9,#7 @ broadcast carry bit vshl.i64 q3,q3,#1 vand q8,q8,q9 vorr d7,d26 @ H<<<=1 veor q3,q3,q8 @ twisted H vstmia r0,{q3} bx lr @ bx lr .size gcm_init_neon,.-gcm_init_neon .globl gcm_gmult_neon .hidden gcm_gmult_neon .type gcm_gmult_neon,%function .align 4 gcm_gmult_neon: vld1.64 d7,[r0]! @ load Xi vld1.64 d6,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q3,q3 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing mov r3,#16 b .Lgmult_neon .size gcm_gmult_neon,.-gcm_gmult_neon .globl gcm_ghash_neon .hidden gcm_ghash_neon .type gcm_ghash_neon,%function .align 4 gcm_ghash_neon: vld1.64 d1,[r0]! @ load Xi vld1.64 d0,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q0,q0 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing .Loop_neon: vld1.64 d7,[r2]! @ load inp vld1.64 d6,[r2]! #ifdef __ARMEL__ vrev64.8 q3,q3 #endif veor q3,q0 @ inp^=Xi .Lgmult_neon: vext.8 d16, d26, d26, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d0, d6, d6, #1 @ B1 vmull.p8 q0, d26, d0 @ E = A*B1 vext.8 d18, d26, d26, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d26, d22 @ G = A*B2 vext.8 d20, d26, d26, #3 @ A3 veor q8, q8, q0 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d0, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q0, d26, d0 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d26, d22 @ K = A*B4 veor q10, q10, q0 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q0, d26, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q0, q0, q8 veor q0, q0, q10 veor d6,d6,d7 @ Karatsuba pre-processing vext.8 d16, d28, d28, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d2, d6, d6, #1 @ B1 vmull.p8 q1, d28, d2 @ E = A*B1 vext.8 d18, d28, d28, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d28, d22 @ G = A*B2 vext.8 d20, d28, d28, #3 @ A3 veor q8, q8, q1 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d2, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q1, d28, d2 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d28, d22 @ K = A*B4 veor q10, q10, q1 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q1, d28, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q1, q1, q8 veor q1, q1, q10 vext.8 d16, d27, d27, #1 @ A1 vmull.p8 q8, d16, d7 @ F = A1*B vext.8 d4, d7, d7, #1 @ B1 vmull.p8 q2, d27, d4 @ E = A*B1 vext.8 d18, d27, d27, #2 @ A2 vmull.p8 q9, d18, d7 @ H = A2*B vext.8 d22, d7, d7, #2 @ B2 vmull.p8 q11, d27, d22 @ G = A*B2 vext.8 d20, d27, d27, #3 @ A3 veor q8, q8, q2 @ L = E + F vmull.p8 q10, d20, d7 @ J = A3*B vext.8 d4, d7, d7, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q2, d27, d4 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d7, d7, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d27, d22 @ K = A*B4 veor q10, q10, q2 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q2, d27, d7 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q2, q2, q8 veor q2, q2, q10 veor q1,q1,q0 @ Karatsuba post-processing veor q1,q1,q2 veor d1,d1,d2 veor d4,d4,d3 @ Xh|Xl - 256-bit result @ equivalent of reduction_avx from ghash-x86_64.pl vshl.i64 q9,q0,#57 @ 1st phase vshl.i64 q10,q0,#62 veor q10,q10,q9 @ vshl.i64 q9,q0,#63 veor q10, q10, q9 @ veor d1,d1,d20 @ veor d4,d4,d21 vshr.u64 q10,q0,#1 @ 2nd phase veor q2,q2,q0 veor q0,q0,q10 @ vshr.u64 q10,q10,#6 vshr.u64 q0,q0,#1 @ veor q0,q0,q2 @ veor q0,q0,q10 @ subs r3,#16 bne .Loop_neon #ifdef __ARMEL__ vrev64.8 q0,q0 #endif sub r0,#16 vst1.64 d1,[r0]! @ write out Xi vst1.64 d0,[r0] bx lr @ bx lr .size gcm_ghash_neon,.-gcm_ghash_neon #endif .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-neon-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .text .globl _gcm_init_neon .private_extern _gcm_init_neon .align 4 _gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .globl _gcm_gmult_neon .private_extern _gcm_gmult_neon .align 4 _gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks@PAGE // load constants add x9, x9, Lmasks@PAGEOFF ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b Lgmult_neon .globl _gcm_ghash_neon .private_extern _gcm_ghash_neon .align 4 _gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks@PAGE // load constants add x9, x9, Lmasks@PAGEOFF ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .section __TEXT,__const .align 4 Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-neon-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .text .globl gcm_init_neon .hidden gcm_init_neon .type gcm_init_neon,%function .align 4 gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .size gcm_init_neon,.-gcm_init_neon .globl gcm_gmult_neon .hidden gcm_gmult_neon .type gcm_gmult_neon,%function .align 4 gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, .Lmasks // load constants add x9, x9, :lo12:.Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b .Lgmult_neon .size gcm_gmult_neon,.-gcm_gmult_neon .globl gcm_ghash_neon .hidden gcm_ghash_neon .type gcm_ghash_neon,%function .align 4 gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, .Lmasks // load constants add x9, x9, :lo12:.Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing .Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi .Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne .Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .size gcm_ghash_neon,.-gcm_ghash_neon .section .rodata .align 4 .Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-neon-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .text .globl gcm_init_neon .def gcm_init_neon .type 32 .endef .align 4 gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .globl gcm_gmult_neon .def gcm_gmult_neon .type 32 .endef .align 4 gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b Lgmult_neon .globl gcm_ghash_neon .def gcm_ghash_neon .type 32 .endef .align 4 gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .section .rodata .align 4 Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-ssse3-x86-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _gcm_gmult_ssse3 .private_extern _gcm_gmult_ssse3 .align 4 _gcm_gmult_ssse3: L_gcm_gmult_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movdqu (%edi),%xmm0 call L000pic_point L000pic_point: popl %eax movdqa Lreverse_bytes-L000pic_point(%eax),%xmm7 movdqa Llow4_mask-L000pic_point(%eax),%xmm2 .byte 102,15,56,0,199 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L001loop_row_1: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L001loop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L002loop_row_2: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L002loop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax L003loop_row_3: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L003loop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,0,215 movdqu %xmm2,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .globl _gcm_ghash_ssse3 .private_extern _gcm_ghash_ssse3 .align 4 _gcm_ghash_ssse3: L_gcm_ghash_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%edx movl 32(%esp),%ecx movdqu (%edi),%xmm0 call L004pic_point L004pic_point: popl %ebx movdqa Lreverse_bytes-L004pic_point(%ebx),%xmm7 andl $-16,%ecx .byte 102,15,56,0,199 pxor %xmm3,%xmm3 L005loop_ghash: movdqa Llow4_mask-L004pic_point(%ebx),%xmm2 movdqu (%edx),%xmm1 .byte 102,15,56,0,207 pxor %xmm1,%xmm0 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 movl $5,%eax L006loop_row_4: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L006loop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax L007loop_row_5: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L007loop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax L008loop_row_6: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz L008loop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leal -256(%esi),%esi leal 16(%edx),%edx subl $16,%ecx jnz L005loop_ghash .byte 102,15,56,0,199 movdqu %xmm0,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .align 4,0x90 Lreverse_bytes: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .align 4,0x90 Llow4_mask: .long 252645135,252645135,252645135,252645135 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-ssse3-x86-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl gcm_gmult_ssse3 .hidden gcm_gmult_ssse3 .type gcm_gmult_ssse3,@function .align 16 gcm_gmult_ssse3: .L_gcm_gmult_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movdqu (%edi),%xmm0 call .L000pic_point .L000pic_point: popl %eax movdqa .Lreverse_bytes-.L000pic_point(%eax),%xmm7 movdqa .Llow4_mask-.L000pic_point(%eax),%xmm2 .byte 102,15,56,0,199 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L001loop_row_1: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L001loop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L002loop_row_2: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L002loop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax .L003loop_row_3: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L003loop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,0,215 movdqu %xmm2,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_gmult_ssse3,.-.L_gcm_gmult_ssse3_begin .globl gcm_ghash_ssse3 .hidden gcm_ghash_ssse3 .type gcm_ghash_ssse3,@function .align 16 gcm_ghash_ssse3: .L_gcm_ghash_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%edx movl 32(%esp),%ecx movdqu (%edi),%xmm0 call .L004pic_point .L004pic_point: popl %ebx movdqa .Lreverse_bytes-.L004pic_point(%ebx),%xmm7 andl $-16,%ecx .byte 102,15,56,0,199 pxor %xmm3,%xmm3 .L005loop_ghash: movdqa .Llow4_mask-.L004pic_point(%ebx),%xmm2 movdqu (%edx),%xmm1 .byte 102,15,56,0,207 pxor %xmm1,%xmm0 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 movl $5,%eax .L006loop_row_4: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L006loop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L007loop_row_5: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L007loop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax .L008loop_row_6: movdqu (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L008loop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leal -256(%esi),%esi leal 16(%edx),%edx subl $16,%ecx jnz .L005loop_ghash .byte 102,15,56,0,199 movdqu %xmm0,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_ghash_ssse3,.-.L_gcm_ghash_ssse3_begin .align 16 .Lreverse_bytes: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .align 16 .Llow4_mask: .long 252645135,252645135,252645135,252645135 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-ssse3-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _gcm_gmult_ssse3 .private_extern _gcm_gmult_ssse3 .p2align 4 _gcm_gmult_ssse3: _CET_ENDBR movdqu (%rdi),%xmm0 movdqa L$reverse_bytes(%rip),%xmm10 movdqa L$low4_mask(%rip),%xmm2 .byte 102,65,15,56,0,194 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax L$oop_row_1: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax L$oop_row_2: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $6,%rax L$oop_row_3: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,65,15,56,0,210 movdqu %xmm2,(%rdi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 ret .globl _gcm_ghash_ssse3 .private_extern _gcm_ghash_ssse3 .p2align 4 _gcm_ghash_ssse3: _CET_ENDBR movdqu (%rdi),%xmm0 movdqa L$reverse_bytes(%rip),%xmm10 movdqa L$low4_mask(%rip),%xmm11 andq $-16,%rcx .byte 102,65,15,56,0,194 pxor %xmm3,%xmm3 L$oop_ghash: movdqu (%rdx),%xmm1 .byte 102,65,15,56,0,202 pxor %xmm1,%xmm0 movdqa %xmm11,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm11,%xmm0 pxor %xmm2,%xmm2 movq $5,%rax L$oop_row_4: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax L$oop_row_5: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $6,%rax L$oop_row_6: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz L$oop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leaq -256(%rsi),%rsi leaq 16(%rdx),%rdx subq $16,%rcx jnz L$oop_ghash .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 ret .section __DATA,__const .p2align 4 L$reverse_bytes: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 L$low4_mask: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-ssse3-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type gcm_gmult_ssse3, @function .globl gcm_gmult_ssse3 .hidden gcm_gmult_ssse3 .align 16 gcm_gmult_ssse3: .cfi_startproc _CET_ENDBR movdqu (%rdi),%xmm0 movdqa .Lreverse_bytes(%rip),%xmm10 movdqa .Llow4_mask(%rip),%xmm2 .byte 102,65,15,56,0,194 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax .Loop_row_1: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax .Loop_row_2: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $6,%rax .Loop_row_3: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,65,15,56,0,210 movdqu %xmm2,(%rdi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 ret .cfi_endproc .size gcm_gmult_ssse3,.-gcm_gmult_ssse3 .type gcm_ghash_ssse3, @function .globl gcm_ghash_ssse3 .hidden gcm_ghash_ssse3 .align 16 gcm_ghash_ssse3: .cfi_startproc _CET_ENDBR movdqu (%rdi),%xmm0 movdqa .Lreverse_bytes(%rip),%xmm10 movdqa .Llow4_mask(%rip),%xmm11 andq $-16,%rcx .byte 102,65,15,56,0,194 pxor %xmm3,%xmm3 .Loop_ghash: movdqu (%rdx),%xmm1 .byte 102,65,15,56,0,202 pxor %xmm1,%xmm0 movdqa %xmm11,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm11,%xmm0 pxor %xmm2,%xmm2 movq $5,%rax .Loop_row_4: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $5,%rax .Loop_row_5: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movq $6,%rax .Loop_row_6: movdqu (%rsi),%xmm4 leaq 16(%rsi),%rsi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subq $1,%rax jnz .Loop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leaq -256(%rsi),%rsi leaq 16(%rdx),%rdx subq $16,%rcx jnz .Loop_ghash .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 ret .cfi_endproc .size gcm_ghash_ssse3,.-gcm_ghash_ssse3 .section .rodata .align 16 .Lreverse_bytes: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .Llow4_mask: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-x86-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _gcm_init_clmul .private_extern _gcm_init_clmul .align 4 _gcm_init_clmul: L_gcm_init_clmul_begin: movl 4(%esp),%edx movl 8(%esp),%eax call L000pic L000pic: popl %ecx leal Lbswap-L000pic(%ecx),%ecx movdqu (%eax),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand 16(%ecx),%xmm5 pxor %xmm5,%xmm2 movdqa %xmm2,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,(%edx) pxor %xmm0,%xmm4 movdqu %xmm0,16(%edx) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%edx) ret .globl _gcm_gmult_clmul .private_extern _gcm_gmult_clmul .align 4 _gcm_gmult_clmul: L_gcm_gmult_clmul_begin: movl 4(%esp),%eax movl 8(%esp),%edx call L001pic L001pic: popl %ecx leal Lbswap-L001pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movups (%edx),%xmm2 .byte 102,15,56,0,197 movups 32(%edx),%xmm4 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%eax) ret .globl _gcm_ghash_clmul .private_extern _gcm_ghash_clmul .align 4 _gcm_ghash_clmul: L_gcm_ghash_clmul_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%eax movl 24(%esp),%edx movl 28(%esp),%esi movl 32(%esp),%ebx call L002pic L002pic: popl %ecx leal Lbswap-L002pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movdqu (%edx),%xmm2 .byte 102,15,56,0,197 subl $16,%ebx jz L003odd_tail movdqu (%esi),%xmm3 movdqu 16(%esi),%xmm6 .byte 102,15,56,0,221 .byte 102,15,56,0,245 movdqu 32(%edx),%xmm5 pxor %xmm3,%xmm0 pshufd $78,%xmm6,%xmm3 movdqa %xmm6,%xmm7 pxor %xmm6,%xmm3 leal 32(%esi),%esi .byte 102,15,58,68,242,0 .byte 102,15,58,68,250,17 .byte 102,15,58,68,221,0 movups 16(%edx),%xmm2 nop subl $32,%ebx jbe L004even_tail jmp L005mod_loop .align 5,0x90 L005mod_loop: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 nop .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movups (%edx),%xmm2 xorps %xmm6,%xmm0 movdqa (%ecx),%xmm5 xorps %xmm7,%xmm1 movdqu (%esi),%xmm7 pxor %xmm0,%xmm3 movdqu 16(%esi),%xmm6 pxor %xmm1,%xmm3 .byte 102,15,56,0,253 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 .byte 102,15,56,0,245 pxor %xmm7,%xmm1 movdqa %xmm6,%xmm7 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 .byte 102,15,58,68,242,0 movups 32(%edx),%xmm5 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 pshufd $78,%xmm7,%xmm3 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm7,%xmm3 pxor %xmm4,%xmm1 .byte 102,15,58,68,250,17 movups 16(%edx),%xmm2 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,58,68,221,0 leal 32(%esi),%esi subl $32,%ebx ja L005mod_loop L004even_tail: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movdqa (%ecx),%xmm5 xorps %xmm6,%xmm0 xorps %xmm7,%xmm1 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testl %ebx,%ebx jnz L006done movups (%edx),%xmm2 L003odd_tail: movdqu (%esi),%xmm3 .byte 102,15,56,0,221 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 L006done: .byte 102,15,56,0,197 movdqu %xmm0,(%eax) popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 Lbswap: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 .byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 .byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 .byte 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-x86-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,@function .align 16 gcm_init_clmul: .L_gcm_init_clmul_begin: movl 4(%esp),%edx movl 8(%esp),%eax call .L000pic .L000pic: popl %ecx leal .Lbswap-.L000pic(%ecx),%ecx movdqu (%eax),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand 16(%ecx),%xmm5 pxor %xmm5,%xmm2 movdqa %xmm2,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,(%edx) pxor %xmm0,%xmm4 movdqu %xmm0,16(%edx) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%edx) ret .size gcm_init_clmul,.-.L_gcm_init_clmul_begin .globl gcm_gmult_clmul .hidden gcm_gmult_clmul .type gcm_gmult_clmul,@function .align 16 gcm_gmult_clmul: .L_gcm_gmult_clmul_begin: movl 4(%esp),%eax movl 8(%esp),%edx call .L001pic .L001pic: popl %ecx leal .Lbswap-.L001pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movups (%edx),%xmm2 .byte 102,15,56,0,197 movups 32(%edx),%xmm4 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%eax) ret .size gcm_gmult_clmul,.-.L_gcm_gmult_clmul_begin .globl gcm_ghash_clmul .hidden gcm_ghash_clmul .type gcm_ghash_clmul,@function .align 16 gcm_ghash_clmul: .L_gcm_ghash_clmul_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%eax movl 24(%esp),%edx movl 28(%esp),%esi movl 32(%esp),%ebx call .L002pic .L002pic: popl %ecx leal .Lbswap-.L002pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movdqu (%edx),%xmm2 .byte 102,15,56,0,197 subl $16,%ebx jz .L003odd_tail movdqu (%esi),%xmm3 movdqu 16(%esi),%xmm6 .byte 102,15,56,0,221 .byte 102,15,56,0,245 movdqu 32(%edx),%xmm5 pxor %xmm3,%xmm0 pshufd $78,%xmm6,%xmm3 movdqa %xmm6,%xmm7 pxor %xmm6,%xmm3 leal 32(%esi),%esi .byte 102,15,58,68,242,0 .byte 102,15,58,68,250,17 .byte 102,15,58,68,221,0 movups 16(%edx),%xmm2 nop subl $32,%ebx jbe .L004even_tail jmp .L005mod_loop .align 32 .L005mod_loop: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 nop .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movups (%edx),%xmm2 xorps %xmm6,%xmm0 movdqa (%ecx),%xmm5 xorps %xmm7,%xmm1 movdqu (%esi),%xmm7 pxor %xmm0,%xmm3 movdqu 16(%esi),%xmm6 pxor %xmm1,%xmm3 .byte 102,15,56,0,253 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 .byte 102,15,56,0,245 pxor %xmm7,%xmm1 movdqa %xmm6,%xmm7 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 .byte 102,15,58,68,242,0 movups 32(%edx),%xmm5 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 pshufd $78,%xmm7,%xmm3 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm7,%xmm3 pxor %xmm4,%xmm1 .byte 102,15,58,68,250,17 movups 16(%edx),%xmm2 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,58,68,221,0 leal 32(%esi),%esi subl $32,%ebx ja .L005mod_loop .L004even_tail: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movdqa (%ecx),%xmm5 xorps %xmm6,%xmm0 xorps %xmm7,%xmm1 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testl %ebx,%ebx jnz .L006done movups (%edx),%xmm2 .L003odd_tail: movdqu (%esi),%xmm3 .byte 102,15,56,0,221 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .L006done: .byte 102,15,56,0,197 movdqu %xmm0,(%eax) popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin .align 64 .Lbswap: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 .byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 .byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 .byte 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _gcm_init_clmul .private_extern _gcm_init_clmul .p2align 4 _gcm_init_clmul: _CET_ENDBR L$_init_clmul: movdqu (%rsi),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand L$0x1c2_polynomial(%rip),%xmm5 pxor %xmm5,%xmm2 pshufd $78,%xmm2,%xmm6 movdqa %xmm2,%xmm0 pxor %xmm2,%xmm6 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,0(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,16(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%rdi) movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm5,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm5,%xmm3 movdqu %xmm5,48(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,64(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,80(%rdi) ret .globl _gcm_gmult_clmul .private_extern _gcm_gmult_clmul .p2align 4 _gcm_gmult_clmul: _CET_ENDBR L$_gmult_clmul: movdqu (%rdi),%xmm0 movdqa L$bswap_mask(%rip),%xmm5 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm4 .byte 102,15,56,0,197 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%rdi) ret .globl _gcm_ghash_clmul .private_extern _gcm_ghash_clmul .p2align 5 _gcm_ghash_clmul: _CET_ENDBR L$_ghash_clmul: movdqa L$bswap_mask(%rip),%xmm10 movdqu (%rdi),%xmm0 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm7 .byte 102,65,15,56,0,194 subq $0x10,%rcx jz L$odd_tail movdqu 16(%rsi),%xmm6 cmpq $0x30,%rcx jb L$skip4x subq $0x30,%rcx movq $0xA040608020C0E000,%rax movdqu 48(%rsi),%xmm14 movdqu 64(%rsi),%xmm15 movdqu 48(%rdx),%xmm3 movdqu 32(%rdx),%xmm11 .byte 102,65,15,56,0,218 .byte 102,69,15,56,0,218 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm11,%xmm12 .byte 102,68,15,58,68,222,0 .byte 102,68,15,58,68,238,17 .byte 102,68,15,58,68,231,16 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 xorps %xmm12,%xmm4 movdqu 16(%rdx),%xmm11 movdqu 0(%rdx),%xmm8 .byte 102,69,15,56,0,218 .byte 102,69,15,56,0,194 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm8,%xmm0 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,69,15,58,68,238,17 .byte 102,68,15,58,68,231,0 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jc L$tail4x jmp L$mod4_loop .p2align 5 L$mod4_loop: .byte 102,65,15,58,68,199,0 xorps %xmm12,%xmm4 movdqu 48(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,65,15,58,68,207,17 xorps %xmm3,%xmm0 movdqu 32(%rdx),%xmm3 movdqa %xmm11,%xmm13 .byte 102,68,15,58,68,199,16 pshufd $78,%xmm11,%xmm12 xorps %xmm5,%xmm1 pxor %xmm11,%xmm12 .byte 102,65,15,56,0,218 movups 32(%rsi),%xmm7 xorps %xmm4,%xmm8 .byte 102,68,15,58,68,218,0 pshufd $78,%xmm3,%xmm4 pxor %xmm0,%xmm8 movdqa %xmm3,%xmm5 pxor %xmm1,%xmm8 pxor %xmm3,%xmm4 movdqa %xmm8,%xmm9 .byte 102,68,15,58,68,234,17 pslldq $8,%xmm8 psrldq $8,%xmm9 pxor %xmm8,%xmm0 movdqa L$7_mask(%rip),%xmm8 pxor %xmm9,%xmm1 .byte 102,76,15,110,200 pand %xmm0,%xmm8 .byte 102,69,15,56,0,200 pxor %xmm0,%xmm9 .byte 102,68,15,58,68,231,0 psllq $57,%xmm9 movdqa %xmm9,%xmm8 pslldq $8,%xmm9 .byte 102,15,58,68,222,0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 movdqu 0(%rdx),%xmm8 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,238,17 xorps %xmm11,%xmm3 movdqu 16(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,15,58,68,231,16 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 .byte 102,69,15,56,0,194 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 movdqa %xmm11,%xmm13 pxor %xmm12,%xmm4 pshufd $78,%xmm11,%xmm12 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm1 .byte 102,69,15,58,68,238,17 xorps %xmm11,%xmm3 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,68,15,58,68,231,0 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jnc L$mod4_loop L$tail4x: .byte 102,65,15,58,68,199,0 .byte 102,65,15,58,68,207,17 .byte 102,68,15,58,68,199,16 xorps %xmm12,%xmm4 xorps %xmm3,%xmm0 xorps %xmm5,%xmm1 pxor %xmm0,%xmm1 pxor %xmm4,%xmm8 pxor %xmm1,%xmm8 pxor %xmm0,%xmm1 movdqa %xmm8,%xmm9 psrldq $8,%xmm8 pslldq $8,%xmm9 pxor %xmm8,%xmm1 pxor %xmm9,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 addq $0x40,%rcx jz L$done movdqu 32(%rsi),%xmm7 subq $0x10,%rcx jz L$odd_tail L$skip4x: movdqu (%rdx),%xmm8 movdqu 16(%rdx),%xmm3 .byte 102,69,15,56,0,194 .byte 102,65,15,56,0,218 pxor %xmm8,%xmm0 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 leaq 32(%rdx),%rdx nop subq $0x20,%rcx jbe L$even_tail nop jmp L$mod_loop .p2align 5 L$mod_loop: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 movdqu (%rdx),%xmm9 pxor %xmm0,%xmm8 .byte 102,69,15,56,0,202 movdqu 16(%rdx),%xmm3 pxor %xmm1,%xmm8 pxor %xmm9,%xmm1 pxor %xmm8,%xmm4 .byte 102,65,15,56,0,218 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm3,%xmm5 movdqa %xmm0,%xmm9 movdqa %xmm0,%xmm8 psllq $5,%xmm0 pxor %xmm0,%xmm8 .byte 102,15,58,68,218,0 psllq $1,%xmm0 pxor %xmm8,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm8 pslldq $8,%xmm0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pshufd $78,%xmm5,%xmm4 pxor %xmm8,%xmm1 pxor %xmm5,%xmm4 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,234,17 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 pxor %xmm9,%xmm0 leaq 32(%rdx),%rdx psrlq $1,%xmm0 .byte 102,15,58,68,231,0 pxor %xmm1,%xmm0 subq $0x20,%rcx ja L$mod_loop L$even_tail: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 pxor %xmm0,%xmm8 pxor %xmm1,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testq %rcx,%rcx jnz L$done L$odd_tail: movdqu (%rdx),%xmm8 .byte 102,69,15,56,0,194 pxor %xmm8,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,223,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 L$done: .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) ret .globl _gcm_init_avx .private_extern _gcm_init_avx .p2align 5 _gcm_init_avx: _CET_ENDBR vzeroupper vmovdqu (%rsi),%xmm2 vpshufd $78,%xmm2,%xmm2 vpshufd $255,%xmm2,%xmm4 vpsrlq $63,%xmm2,%xmm3 vpsllq $1,%xmm2,%xmm2 vpxor %xmm5,%xmm5,%xmm5 vpcmpgtd %xmm4,%xmm5,%xmm5 vpslldq $8,%xmm3,%xmm3 vpor %xmm3,%xmm2,%xmm2 vpand L$0x1c2_polynomial(%rip),%xmm5,%xmm5 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm2,%xmm2,%xmm6 vmovdqa %xmm2,%xmm0 vpxor %xmm2,%xmm6,%xmm6 movq $4,%r10 jmp L$init_start_avx .p2align 5 L$init_loop_avx: vpalignr $8,%xmm3,%xmm4,%xmm5 vmovdqu %xmm5,-16(%rdi) vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 L$init_start_avx: vmovdqa %xmm0,%xmm5 vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpshufd $78,%xmm5,%xmm3 vpshufd $78,%xmm0,%xmm4 vpxor %xmm5,%xmm3,%xmm3 vmovdqu %xmm5,0(%rdi) vpxor %xmm0,%xmm4,%xmm4 vmovdqu %xmm0,16(%rdi) leaq 48(%rdi),%rdi subq $1,%r10 jnz L$init_loop_avx vpalignr $8,%xmm4,%xmm3,%xmm5 vmovdqu %xmm5,-16(%rdi) vzeroupper ret .globl _gcm_gmult_avx .private_extern _gcm_gmult_avx .p2align 5 _gcm_gmult_avx: _CET_ENDBR jmp L$_gmult_clmul .globl _gcm_ghash_avx .private_extern _gcm_ghash_avx .p2align 5 _gcm_ghash_avx: _CET_ENDBR vzeroupper vmovdqu (%rdi),%xmm10 leaq L$0x1c2_polynomial(%rip),%r10 leaq 64(%rsi),%rsi vmovdqu L$bswap_mask(%rip),%xmm13 vpshufb %xmm13,%xmm10,%xmm10 cmpq $0x80,%rcx jb L$short_avx subq $0x80,%rcx vmovdqu 112(%rdx),%xmm14 vmovdqu 0-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vmovdqu 32-64(%rsi),%xmm7 vpunpckhqdq %xmm14,%xmm14,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm14,%xmm9,%xmm9 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 80(%rdx),%xmm14 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 48-64(%rsi),%xmm6 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 64(%rdx),%xmm15 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 48(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 32(%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 16(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu (%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 leaq 128(%rdx),%rdx cmpq $0x80,%rcx jb L$tail_avx vpxor %xmm10,%xmm15,%xmm15 subq $0x80,%rcx jmp L$oop8x_avx .p2align 5 L$oop8x_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 112(%rdx),%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpxor %xmm15,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 vmovdqu 0-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 vmovdqu 32-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm3,%xmm10,%xmm10 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vxorps %xmm4,%xmm11,%xmm11 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm5,%xmm12,%xmm12 vxorps %xmm15,%xmm8,%xmm8 vmovdqu 80(%rdx),%xmm14 vpxor %xmm10,%xmm12,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm11,%xmm12,%xmm12 vpslldq $8,%xmm12,%xmm9 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vpsrldq $8,%xmm12,%xmm12 vpxor %xmm9,%xmm10,%xmm10 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vxorps %xmm12,%xmm11,%xmm11 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 64(%rdx),%xmm15 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vxorps %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vmovdqu 48(%rdx),%xmm14 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 32(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vxorps %xmm12,%xmm10,%xmm10 vmovdqu 16(%rdx),%xmm14 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vxorps %xmm11,%xmm12,%xmm12 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu (%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm12,%xmm15,%xmm15 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 vpxor %xmm10,%xmm15,%xmm15 leaq 128(%rdx),%rdx subq $0x80,%rcx jnc L$oop8x_avx addq $0x80,%rcx jmp L$tail_no_xor_avx .p2align 5 L$short_avx: vmovdqu -16(%rdx,%rcx,1),%xmm14 leaq (%rdx,%rcx,1),%rdx vmovdqu 0-64(%rsi),%xmm6 vmovdqu 32-64(%rsi),%xmm7 vpshufb %xmm13,%xmm14,%xmm15 vmovdqa %xmm0,%xmm3 vmovdqa %xmm1,%xmm4 vmovdqa %xmm2,%xmm5 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -32(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -48(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 80-64(%rsi),%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -64(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -80(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 96-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 128-64(%rsi),%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -96(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -112(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 144-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovq 184-64(%rsi),%xmm7 subq $0x10,%rcx jmp L$tail_avx .p2align 5 L$tail_avx: vpxor %xmm10,%xmm15,%xmm15 L$tail_no_xor_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu (%r10),%xmm12 vpxor %xmm0,%xmm3,%xmm10 vpxor %xmm1,%xmm4,%xmm11 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm10,%xmm5,%xmm5 vpxor %xmm11,%xmm5,%xmm5 vpslldq $8,%xmm5,%xmm9 vpsrldq $8,%xmm5,%xmm5 vpxor %xmm9,%xmm10,%xmm10 vpxor %xmm5,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm11,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 cmpq $0,%rcx jne L$short_avx vpshufb %xmm13,%xmm10,%xmm10 vmovdqu %xmm10,(%rdi) vzeroupper ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$0x1c2_polynomial: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 L$7_mask: .long 7,0,7,0 .p2align 6 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghash-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,@function .align 16 gcm_init_clmul: .cfi_startproc _CET_ENDBR .L_init_clmul: movdqu (%rsi),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand .L0x1c2_polynomial(%rip),%xmm5 pxor %xmm5,%xmm2 pshufd $78,%xmm2,%xmm6 movdqa %xmm2,%xmm0 pxor %xmm2,%xmm6 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,0(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,16(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%rdi) movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm5,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm5,%xmm3 movdqu %xmm5,48(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,64(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,80(%rdi) ret .cfi_endproc .size gcm_init_clmul,.-gcm_init_clmul .globl gcm_gmult_clmul .hidden gcm_gmult_clmul .type gcm_gmult_clmul,@function .align 16 gcm_gmult_clmul: .cfi_startproc _CET_ENDBR .L_gmult_clmul: movdqu (%rdi),%xmm0 movdqa .Lbswap_mask(%rip),%xmm5 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm4 .byte 102,15,56,0,197 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%rdi) ret .cfi_endproc .size gcm_gmult_clmul,.-gcm_gmult_clmul .globl gcm_ghash_clmul .hidden gcm_ghash_clmul .type gcm_ghash_clmul,@function .align 32 gcm_ghash_clmul: .cfi_startproc _CET_ENDBR .L_ghash_clmul: movdqa .Lbswap_mask(%rip),%xmm10 movdqu (%rdi),%xmm0 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm7 .byte 102,65,15,56,0,194 subq $0x10,%rcx jz .Lodd_tail movdqu 16(%rsi),%xmm6 cmpq $0x30,%rcx jb .Lskip4x subq $0x30,%rcx movq $0xA040608020C0E000,%rax movdqu 48(%rsi),%xmm14 movdqu 64(%rsi),%xmm15 movdqu 48(%rdx),%xmm3 movdqu 32(%rdx),%xmm11 .byte 102,65,15,56,0,218 .byte 102,69,15,56,0,218 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm11,%xmm12 .byte 102,68,15,58,68,222,0 .byte 102,68,15,58,68,238,17 .byte 102,68,15,58,68,231,16 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 xorps %xmm12,%xmm4 movdqu 16(%rdx),%xmm11 movdqu 0(%rdx),%xmm8 .byte 102,69,15,56,0,218 .byte 102,69,15,56,0,194 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm8,%xmm0 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,69,15,58,68,238,17 .byte 102,68,15,58,68,231,0 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jc .Ltail4x jmp .Lmod4_loop .align 32 .Lmod4_loop: .byte 102,65,15,58,68,199,0 xorps %xmm12,%xmm4 movdqu 48(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,65,15,58,68,207,17 xorps %xmm3,%xmm0 movdqu 32(%rdx),%xmm3 movdqa %xmm11,%xmm13 .byte 102,68,15,58,68,199,16 pshufd $78,%xmm11,%xmm12 xorps %xmm5,%xmm1 pxor %xmm11,%xmm12 .byte 102,65,15,56,0,218 movups 32(%rsi),%xmm7 xorps %xmm4,%xmm8 .byte 102,68,15,58,68,218,0 pshufd $78,%xmm3,%xmm4 pxor %xmm0,%xmm8 movdqa %xmm3,%xmm5 pxor %xmm1,%xmm8 pxor %xmm3,%xmm4 movdqa %xmm8,%xmm9 .byte 102,68,15,58,68,234,17 pslldq $8,%xmm8 psrldq $8,%xmm9 pxor %xmm8,%xmm0 movdqa .L7_mask(%rip),%xmm8 pxor %xmm9,%xmm1 .byte 102,76,15,110,200 pand %xmm0,%xmm8 .byte 102,69,15,56,0,200 pxor %xmm0,%xmm9 .byte 102,68,15,58,68,231,0 psllq $57,%xmm9 movdqa %xmm9,%xmm8 pslldq $8,%xmm9 .byte 102,15,58,68,222,0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 movdqu 0(%rdx),%xmm8 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,238,17 xorps %xmm11,%xmm3 movdqu 16(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,15,58,68,231,16 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 .byte 102,69,15,56,0,194 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 movdqa %xmm11,%xmm13 pxor %xmm12,%xmm4 pshufd $78,%xmm11,%xmm12 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm1 .byte 102,69,15,58,68,238,17 xorps %xmm11,%xmm3 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,68,15,58,68,231,0 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jnc .Lmod4_loop .Ltail4x: .byte 102,65,15,58,68,199,0 .byte 102,65,15,58,68,207,17 .byte 102,68,15,58,68,199,16 xorps %xmm12,%xmm4 xorps %xmm3,%xmm0 xorps %xmm5,%xmm1 pxor %xmm0,%xmm1 pxor %xmm4,%xmm8 pxor %xmm1,%xmm8 pxor %xmm0,%xmm1 movdqa %xmm8,%xmm9 psrldq $8,%xmm8 pslldq $8,%xmm9 pxor %xmm8,%xmm1 pxor %xmm9,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 addq $0x40,%rcx jz .Ldone movdqu 32(%rsi),%xmm7 subq $0x10,%rcx jz .Lodd_tail .Lskip4x: movdqu (%rdx),%xmm8 movdqu 16(%rdx),%xmm3 .byte 102,69,15,56,0,194 .byte 102,65,15,56,0,218 pxor %xmm8,%xmm0 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 leaq 32(%rdx),%rdx nop subq $0x20,%rcx jbe .Leven_tail nop jmp .Lmod_loop .align 32 .Lmod_loop: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 movdqu (%rdx),%xmm9 pxor %xmm0,%xmm8 .byte 102,69,15,56,0,202 movdqu 16(%rdx),%xmm3 pxor %xmm1,%xmm8 pxor %xmm9,%xmm1 pxor %xmm8,%xmm4 .byte 102,65,15,56,0,218 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm3,%xmm5 movdqa %xmm0,%xmm9 movdqa %xmm0,%xmm8 psllq $5,%xmm0 pxor %xmm0,%xmm8 .byte 102,15,58,68,218,0 psllq $1,%xmm0 pxor %xmm8,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm8 pslldq $8,%xmm0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pshufd $78,%xmm5,%xmm4 pxor %xmm8,%xmm1 pxor %xmm5,%xmm4 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,234,17 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 pxor %xmm9,%xmm0 leaq 32(%rdx),%rdx psrlq $1,%xmm0 .byte 102,15,58,68,231,0 pxor %xmm1,%xmm0 subq $0x20,%rcx ja .Lmod_loop .Leven_tail: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 pxor %xmm0,%xmm8 pxor %xmm1,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testq %rcx,%rcx jnz .Ldone .Lodd_tail: movdqu (%rdx),%xmm8 .byte 102,69,15,56,0,194 pxor %xmm8,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,223,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .Ldone: .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) ret .cfi_endproc .size gcm_ghash_clmul,.-gcm_ghash_clmul .globl gcm_init_avx .hidden gcm_init_avx .type gcm_init_avx,@function .align 32 gcm_init_avx: .cfi_startproc _CET_ENDBR vzeroupper vmovdqu (%rsi),%xmm2 vpshufd $78,%xmm2,%xmm2 vpshufd $255,%xmm2,%xmm4 vpsrlq $63,%xmm2,%xmm3 vpsllq $1,%xmm2,%xmm2 vpxor %xmm5,%xmm5,%xmm5 vpcmpgtd %xmm4,%xmm5,%xmm5 vpslldq $8,%xmm3,%xmm3 vpor %xmm3,%xmm2,%xmm2 vpand .L0x1c2_polynomial(%rip),%xmm5,%xmm5 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm2,%xmm2,%xmm6 vmovdqa %xmm2,%xmm0 vpxor %xmm2,%xmm6,%xmm6 movq $4,%r10 jmp .Linit_start_avx .align 32 .Linit_loop_avx: vpalignr $8,%xmm3,%xmm4,%xmm5 vmovdqu %xmm5,-16(%rdi) vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 .Linit_start_avx: vmovdqa %xmm0,%xmm5 vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpshufd $78,%xmm5,%xmm3 vpshufd $78,%xmm0,%xmm4 vpxor %xmm5,%xmm3,%xmm3 vmovdqu %xmm5,0(%rdi) vpxor %xmm0,%xmm4,%xmm4 vmovdqu %xmm0,16(%rdi) leaq 48(%rdi),%rdi subq $1,%r10 jnz .Linit_loop_avx vpalignr $8,%xmm4,%xmm3,%xmm5 vmovdqu %xmm5,-16(%rdi) vzeroupper ret .cfi_endproc .size gcm_init_avx,.-gcm_init_avx .globl gcm_gmult_avx .hidden gcm_gmult_avx .type gcm_gmult_avx,@function .align 32 gcm_gmult_avx: .cfi_startproc _CET_ENDBR jmp .L_gmult_clmul .cfi_endproc .size gcm_gmult_avx,.-gcm_gmult_avx .globl gcm_ghash_avx .hidden gcm_ghash_avx .type gcm_ghash_avx,@function .align 32 gcm_ghash_avx: .cfi_startproc _CET_ENDBR vzeroupper vmovdqu (%rdi),%xmm10 leaq .L0x1c2_polynomial(%rip),%r10 leaq 64(%rsi),%rsi vmovdqu .Lbswap_mask(%rip),%xmm13 vpshufb %xmm13,%xmm10,%xmm10 cmpq $0x80,%rcx jb .Lshort_avx subq $0x80,%rcx vmovdqu 112(%rdx),%xmm14 vmovdqu 0-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vmovdqu 32-64(%rsi),%xmm7 vpunpckhqdq %xmm14,%xmm14,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm14,%xmm9,%xmm9 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 80(%rdx),%xmm14 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 48-64(%rsi),%xmm6 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 64(%rdx),%xmm15 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 48(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 32(%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 16(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu (%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 leaq 128(%rdx),%rdx cmpq $0x80,%rcx jb .Ltail_avx vpxor %xmm10,%xmm15,%xmm15 subq $0x80,%rcx jmp .Loop8x_avx .align 32 .Loop8x_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 112(%rdx),%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpxor %xmm15,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 vmovdqu 0-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 vmovdqu 32-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm3,%xmm10,%xmm10 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vxorps %xmm4,%xmm11,%xmm11 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm5,%xmm12,%xmm12 vxorps %xmm15,%xmm8,%xmm8 vmovdqu 80(%rdx),%xmm14 vpxor %xmm10,%xmm12,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm11,%xmm12,%xmm12 vpslldq $8,%xmm12,%xmm9 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vpsrldq $8,%xmm12,%xmm12 vpxor %xmm9,%xmm10,%xmm10 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vxorps %xmm12,%xmm11,%xmm11 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 64(%rdx),%xmm15 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vxorps %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vmovdqu 48(%rdx),%xmm14 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 32(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vxorps %xmm12,%xmm10,%xmm10 vmovdqu 16(%rdx),%xmm14 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vxorps %xmm11,%xmm12,%xmm12 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu (%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm12,%xmm15,%xmm15 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 vpxor %xmm10,%xmm15,%xmm15 leaq 128(%rdx),%rdx subq $0x80,%rcx jnc .Loop8x_avx addq $0x80,%rcx jmp .Ltail_no_xor_avx .align 32 .Lshort_avx: vmovdqu -16(%rdx,%rcx,1),%xmm14 leaq (%rdx,%rcx,1),%rdx vmovdqu 0-64(%rsi),%xmm6 vmovdqu 32-64(%rsi),%xmm7 vpshufb %xmm13,%xmm14,%xmm15 vmovdqa %xmm0,%xmm3 vmovdqa %xmm1,%xmm4 vmovdqa %xmm2,%xmm5 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -32(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -48(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 80-64(%rsi),%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -64(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -80(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 96-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 128-64(%rsi),%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -96(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -112(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 144-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovq 184-64(%rsi),%xmm7 subq $0x10,%rcx jmp .Ltail_avx .align 32 .Ltail_avx: vpxor %xmm10,%xmm15,%xmm15 .Ltail_no_xor_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu (%r10),%xmm12 vpxor %xmm0,%xmm3,%xmm10 vpxor %xmm1,%xmm4,%xmm11 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm10,%xmm5,%xmm5 vpxor %xmm11,%xmm5,%xmm5 vpslldq $8,%xmm5,%xmm9 vpsrldq $8,%xmm5,%xmm5 vpxor %xmm9,%xmm10,%xmm10 vpxor %xmm5,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm11,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 cmpq $0,%rcx jne .Lshort_avx vpshufb %xmm13,%xmm10,%xmm10 vmovdqu %xmm10,(%rdi) vzeroupper ret .cfi_endproc .size gcm_ghash_avx,.-gcm_ghash_avx .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .L0x1c2_polynomial: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 .L7_mask: .long 7,0,7,0 .align 64 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghashv8-armv7-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include #if __ARM_MAX_ARCH__>=7 .text .fpu neon .code 32 #undef __thumb2__ .globl gcm_init_v8 .hidden gcm_init_v8 .type gcm_init_v8,%function .align 4 gcm_init_v8: AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r1] @ load input H vmov.i8 q11,#0xe1 vshl.i64 q11,q11,#57 @ 0xc2.0 vext.8 q3,q9,q9,#8 vshr.u64 q10,q11,#63 vdup.32 q9,d18[1] vext.8 q8,q10,q11,#8 @ t0=0xc2....01 vshr.u64 q10,q3,#63 vshr.s32 q9,q9,#31 @ broadcast carry bit vand q10,q10,q8 vshl.i64 q3,q3,#1 vext.8 q10,q10,q10,#8 vand q8,q8,q9 vorr q3,q3,q10 @ H<<<=1 veor q12,q3,q8 @ twisted H vst1.64 {q12},[r0]! @ store Htable[0] @ calculate H^2 vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing .byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12 veor q8,q8,q12 .byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12 .byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q14,q0,q10 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing veor q9,q9,q14 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed vst1.64 {q13,q14},[r0]! @ store Htable[1..2] bx lr .size gcm_init_v8,.-gcm_init_v8 .globl gcm_gmult_v8 .hidden gcm_gmult_v8 .type gcm_gmult_v8,%function .align 4 gcm_gmult_v8: AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r0] @ load Xi vmov.i8 q11,#0xe1 vld1.64 {q12,q13},[r1] @ load twisted H, ... vshl.u64 q11,q11,#57 #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vext.8 q3,q9,q9,#8 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo veor q9,q9,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q0,q0,q10 #ifndef __ARMEB__ vrev64.8 q0,q0 #endif vext.8 q0,q0,q0,#8 vst1.64 {q0},[r0] @ write out Xi bx lr .size gcm_gmult_v8,.-gcm_gmult_v8 .globl gcm_ghash_v8 .hidden gcm_ghash_v8 .type gcm_ghash_v8,%function .align 4 gcm_ghash_v8: AARCH64_VALID_CALL_TARGET vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so vld1.64 {q0},[r0] @ load [rotated] Xi @ "[rotated]" means that @ loaded value would have @ to be rotated in order to @ make it appear as in @ algorithm specification subs r3,r3,#32 @ see if r3 is 32 or larger mov r12,#16 @ r12 is used as post- @ increment for input pointer; @ as loop is modulo-scheduled @ r12 is zeroed just in time @ to preclude overstepping @ inp[len], which means that @ last block[s] are actually @ loaded twice, but last @ copy is not processed vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2 vmov.i8 q11,#0xe1 vld1.64 {q14},[r1] moveq r12,#0 @ is it time to zero r12? vext.8 q0,q0,q0,#8 @ rotate Xi vld1.64 {q8},[r2]! @ load [rotated] I[0] vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant #ifndef __ARMEB__ vrev64.8 q8,q8 vrev64.8 q0,q0 #endif vext.8 q3,q8,q8,#8 @ rotate I[0] blo .Lodd_tail_v8 @ r3 was less than 32 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vext.8 q7,q9,q9,#8 veor q3,q3,q0 @ I[i]^=Xi .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 veor q9,q9,q7 @ Karatsuba pre-processing .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 b .Loop_mod2x_v8 .align 4 .Loop_mod2x_v8: vext.8 q10,q3,q3,#8 subs r3,r3,#32 @ is there more data? .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo movlo r12,#0 @ is it time to zero r12? .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 veor q10,q10,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi veor q0,q0,q4 @ accumulate .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] veor q2,q2,q6 moveq r12,#0 @ is it time to zero r12? veor q1,q1,q5 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] #ifndef __ARMEB__ vrev64.8 q8,q8 #endif veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction #ifndef __ARMEB__ vrev64.8 q9,q9 #endif vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl vext.8 q7,q9,q9,#8 vext.8 q3,q8,q8,#8 veor q0,q1,q10 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 veor q3,q3,q2 @ accumulate q3 early vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q3,q3,q10 veor q9,q9,q7 @ Karatsuba pre-processing veor q3,q3,q0 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes veor q2,q2,q10 vext.8 q3,q8,q8,#8 @ re-construct q3 adds r3,r3,#32 @ re-construct r3 veor q0,q0,q2 @ re-construct q0 beq .Ldone_v8 @ is r3 zero? .Lodd_tail_v8: vext.8 q10,q0,q0,#8 veor q3,q3,q0 @ inp^=Xi veor q9,q8,q10 @ q9 is rotated inp^Xi .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo veor q9,q9,q3 @ Karatsuba pre-processing .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) vext.8 q9,q0,q2,#8 @ Karatsuba post-processing veor q10,q0,q2 veor q1,q1,q9 veor q1,q1,q10 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction vmov d4,d3 @ Xh|Xm - 256-bit result vmov d3,d0 @ Xm is rotated Xl veor q0,q1,q10 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 veor q10,q10,q2 veor q0,q0,q10 .Ldone_v8: #ifndef __ARMEB__ vrev64.8 q0,q0 #endif vext.8 q0,q0,q0,#8 vst1.64 {q0},[r0] @ write out Xi vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so bx lr .size gcm_ghash_v8,.-gcm_ghash_v8 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghashv8-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include #if __ARM_MAX_ARCH__>=7 .text .globl _gcm_init_v8 .private_extern _gcm_init_v8 .align 4 _gcm_init_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .globl _gcm_gmult_v8 .private_extern _gcm_gmult_v8 .align 4 _gcm_gmult_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .globl _gcm_ghash_v8 .private_extern _gcm_ghash_v8 .align 4 _gcm_ghash_v8: AARCH64_VALID_CALL_TARGET cmp x3,#64 b.hs Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have //to be rotated in order to //make it appear as in //algorithm specification subs x3,x3,#32 //see if x3 is 32 or larger mov x12,#16 //x12 is used as post- //increment for input pointer; //as loop is modulo-scheduled //x12 is zeroed just in time //to preclude overstepping //inp[len], which means that //last block[s] are actually //loaded twice, but last //copy is not processed ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v22.2d},[x1] csel x12,xzr,x12,eq //is it time to zero x12? ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi ld1 {v16.2d},[x2],#16 //load [rotated] I[0] shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b rev64 v0.16b,v0.16b #endif ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] b.lo Lodd_tail_v8 //x3 was less than 32 ld1 {v17.2d},[x2],x12 //load [rotated] I[1] #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v7.16b,v17.16b,v17.16b,#8 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing pmull2 v6.1q,v20.2d,v7.2d b Loop_mod2x_v8 .align 4 Loop_mod2x_v8: ext v18.16b,v3.16b,v3.16b,#8 subs x3,x3,#32 //is there more data? pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo csel x12,xzr,x12,lo //is it time to zero x12? pmull v5.1q,v21.1d,v17.1d eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi eor v0.16b,v0.16b,v4.16b //accumulate pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] eor v2.16b,v2.16b,v6.16b csel x12,xzr,x12,eq //is it time to zero x12? eor v1.16b,v1.16b,v5.16b ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b #endif eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v7.16b,v17.16b,v17.16b,#8 ext v3.16b,v16.16b,v16.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v3.16b,v3.16b,v18.16b eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing eor v3.16b,v3.16b,v0.16b pmull2 v6.1q,v20.2d,v7.2d b.hs Loop_mod2x_v8 //there was at least 32 more bytes eor v2.16b,v2.16b,v18.16b ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b adds x3,x3,#32 //re-construct x3 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b b.eq Ldone_v8 //is x3 zero? Lodd_tail_v8: ext v18.16b,v0.16b,v0.16b,#8 eor v3.16b,v3.16b,v0.16b //inp^=Xi eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b Ldone_v8: #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .align 4 gcm_ghash_v8_4x: Lgcm_ghash_v8_4x: ld1 {v0.2d},[x0] //load [rotated] Xi ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif ext v25.16b,v7.16b,v7.16b,#8 ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b pmull2 v31.1q,v20.2d,v25.2d pmull v30.1q,v21.1d,v7.1d pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b subs x3,x3,#128 b.lo Ltail4x b Loop4x .align 4 Loop4x: eor v16.16b,v4.16b,v0.16b ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 ext v3.16b,v16.16b,v16.16b,#8 #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d ext v25.16b,v7.16b,v7.16b,#8 pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b ext v24.16b,v6.16b,v6.16b,#8 eor v1.16b,v1.16b,v30.16b ext v23.16b,v5.16b,v5.16b,#8 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b eor v1.16b,v1.16b,v17.16b pmull2 v31.1q,v20.2d,v25.2d eor v1.16b,v1.16b,v18.16b pmull v30.1q,v21.1d,v7.1d pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d eor v0.16b,v1.16b,v18.16b pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b eor v18.16b,v18.16b,v2.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v0.16b,v0.16b,v18.16b eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 eor v30.16b,v30.16b,v5.16b subs x3,x3,#64 b.hs Loop4x Ltail4x: eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b adds x3,x3,#64 b.eq Ldone4x cmp x3,#32 b.lo Lone b.eq Ltwo Lthree: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d,v6.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v29.1q,v20.1d,v24.1d //H·Ii+2 eor v6.16b,v6.16b,v24.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b pmull2 v31.1q,v20.2d,v24.2d pmull v30.1q,v21.1d,v6.1d eor v0.16b,v0.16b,v18.16b pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 eor v5.16b,v5.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 pmull2 v23.1q,v22.2d,v23.2d eor v16.16b,v4.16b,v0.16b pmull2 v5.1q,v21.2d,v5.2d ext v3.16b,v16.16b,v16.16b,#8 eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v26.2d,v3.2d pmull v1.1q,v27.1d,v16.1d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Ltwo: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 pmull v29.1q,v20.1d,v23.1d //H·Ii+1 eor v5.16b,v5.16b,v23.16b eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull2 v31.1q,v20.2d,v23.2d pmull v30.1q,v21.1d,v5.1d pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v22.2d,v3.2d pmull2 v1.1q,v21.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Lone: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v20.1d,v3.1d eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v20.2d,v3.2d pmull v1.1q,v21.1d,v16.1d Ldone4x: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif st1 {v0.2d},[x0] //write out Xi ret .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghashv8-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_v8 .hidden gcm_init_v8 .type gcm_init_v8,%function .align 4 gcm_init_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .size gcm_init_v8,.-gcm_init_v8 .globl gcm_gmult_v8 .hidden gcm_gmult_v8 .type gcm_gmult_v8,%function .align 4 gcm_gmult_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .size gcm_gmult_v8,.-gcm_gmult_v8 .globl gcm_ghash_v8 .hidden gcm_ghash_v8 .type gcm_ghash_v8,%function .align 4 gcm_ghash_v8: AARCH64_VALID_CALL_TARGET cmp x3,#64 b.hs .Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have //to be rotated in order to //make it appear as in //algorithm specification subs x3,x3,#32 //see if x3 is 32 or larger mov x12,#16 //x12 is used as post- //increment for input pointer; //as loop is modulo-scheduled //x12 is zeroed just in time //to preclude overstepping //inp[len], which means that //last block[s] are actually //loaded twice, but last //copy is not processed ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v22.2d},[x1] csel x12,xzr,x12,eq //is it time to zero x12? ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi ld1 {v16.2d},[x2],#16 //load [rotated] I[0] shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b rev64 v0.16b,v0.16b #endif ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] b.lo .Lodd_tail_v8 //x3 was less than 32 ld1 {v17.2d},[x2],x12 //load [rotated] I[1] #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v7.16b,v17.16b,v17.16b,#8 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing pmull2 v6.1q,v20.2d,v7.2d b .Loop_mod2x_v8 .align 4 .Loop_mod2x_v8: ext v18.16b,v3.16b,v3.16b,#8 subs x3,x3,#32 //is there more data? pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo csel x12,xzr,x12,lo //is it time to zero x12? pmull v5.1q,v21.1d,v17.1d eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi eor v0.16b,v0.16b,v4.16b //accumulate pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] eor v2.16b,v2.16b,v6.16b csel x12,xzr,x12,eq //is it time to zero x12? eor v1.16b,v1.16b,v5.16b ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b #endif eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v7.16b,v17.16b,v17.16b,#8 ext v3.16b,v16.16b,v16.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v3.16b,v3.16b,v18.16b eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing eor v3.16b,v3.16b,v0.16b pmull2 v6.1q,v20.2d,v7.2d b.hs .Loop_mod2x_v8 //there was at least 32 more bytes eor v2.16b,v2.16b,v18.16b ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b adds x3,x3,#32 //re-construct x3 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b b.eq .Ldone_v8 //is x3 zero? .Lodd_tail_v8: ext v18.16b,v0.16b,v0.16b,#8 eor v3.16b,v3.16b,v0.16b //inp^=Xi eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b .Ldone_v8: #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .size gcm_ghash_v8,.-gcm_ghash_v8 .type gcm_ghash_v8_4x,%function .align 4 gcm_ghash_v8_4x: .Lgcm_ghash_v8_4x: ld1 {v0.2d},[x0] //load [rotated] Xi ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif ext v25.16b,v7.16b,v7.16b,#8 ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b pmull2 v31.1q,v20.2d,v25.2d pmull v30.1q,v21.1d,v7.1d pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b subs x3,x3,#128 b.lo .Ltail4x b .Loop4x .align 4 .Loop4x: eor v16.16b,v4.16b,v0.16b ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 ext v3.16b,v16.16b,v16.16b,#8 #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d ext v25.16b,v7.16b,v7.16b,#8 pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b ext v24.16b,v6.16b,v6.16b,#8 eor v1.16b,v1.16b,v30.16b ext v23.16b,v5.16b,v5.16b,#8 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b eor v1.16b,v1.16b,v17.16b pmull2 v31.1q,v20.2d,v25.2d eor v1.16b,v1.16b,v18.16b pmull v30.1q,v21.1d,v7.1d pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d eor v0.16b,v1.16b,v18.16b pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b eor v18.16b,v18.16b,v2.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v0.16b,v0.16b,v18.16b eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 eor v30.16b,v30.16b,v5.16b subs x3,x3,#64 b.hs .Loop4x .Ltail4x: eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b adds x3,x3,#64 b.eq .Ldone4x cmp x3,#32 b.lo .Lone b.eq .Ltwo .Lthree: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d,v6.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v29.1q,v20.1d,v24.1d //H·Ii+2 eor v6.16b,v6.16b,v24.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b pmull2 v31.1q,v20.2d,v24.2d pmull v30.1q,v21.1d,v6.1d eor v0.16b,v0.16b,v18.16b pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 eor v5.16b,v5.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 pmull2 v23.1q,v22.2d,v23.2d eor v16.16b,v4.16b,v0.16b pmull2 v5.1q,v21.2d,v5.2d ext v3.16b,v16.16b,v16.16b,#8 eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v26.2d,v3.2d pmull v1.1q,v27.1d,v16.1d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b .Ldone4x .align 4 .Ltwo: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 pmull v29.1q,v20.1d,v23.1d //H·Ii+1 eor v5.16b,v5.16b,v23.16b eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull2 v31.1q,v20.2d,v23.2d pmull v30.1q,v21.1d,v5.1d pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v22.2d,v3.2d pmull2 v1.1q,v21.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b .Ldone4x .align 4 .Lone: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v20.1d,v3.1d eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v20.2d,v3.2d pmull v1.1q,v21.1d,v16.1d .Ldone4x: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif st1 {v0.2d},[x0] //write out Xi ret .size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/ghashv8-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_v8 .def gcm_init_v8 .type 32 .endef .align 4 gcm_init_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .globl gcm_gmult_v8 .def gcm_gmult_v8 .type 32 .endef .align 4 gcm_gmult_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .globl gcm_ghash_v8 .def gcm_ghash_v8 .type 32 .endef .align 4 gcm_ghash_v8: AARCH64_VALID_CALL_TARGET cmp x3,#64 b.hs Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have //to be rotated in order to //make it appear as in //algorithm specification subs x3,x3,#32 //see if x3 is 32 or larger mov x12,#16 //x12 is used as post- //increment for input pointer; //as loop is modulo-scheduled //x12 is zeroed just in time //to preclude overstepping //inp[len], which means that //last block[s] are actually //loaded twice, but last //copy is not processed ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v22.2d},[x1] csel x12,xzr,x12,eq //is it time to zero x12? ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi ld1 {v16.2d},[x2],#16 //load [rotated] I[0] shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b rev64 v0.16b,v0.16b #endif ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] b.lo Lodd_tail_v8 //x3 was less than 32 ld1 {v17.2d},[x2],x12 //load [rotated] I[1] #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v7.16b,v17.16b,v17.16b,#8 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing pmull2 v6.1q,v20.2d,v7.2d b Loop_mod2x_v8 .align 4 Loop_mod2x_v8: ext v18.16b,v3.16b,v3.16b,#8 subs x3,x3,#32 //is there more data? pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo csel x12,xzr,x12,lo //is it time to zero x12? pmull v5.1q,v21.1d,v17.1d eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi eor v0.16b,v0.16b,v4.16b //accumulate pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] eor v2.16b,v2.16b,v6.16b csel x12,xzr,x12,eq //is it time to zero x12? eor v1.16b,v1.16b,v5.16b ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b #endif eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v7.16b,v17.16b,v17.16b,#8 ext v3.16b,v16.16b,v16.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v3.16b,v3.16b,v18.16b eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing eor v3.16b,v3.16b,v0.16b pmull2 v6.1q,v20.2d,v7.2d b.hs Loop_mod2x_v8 //there was at least 32 more bytes eor v2.16b,v2.16b,v18.16b ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b adds x3,x3,#32 //re-construct x3 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b b.eq Ldone_v8 //is x3 zero? Lodd_tail_v8: ext v18.16b,v0.16b,v0.16b,#8 eor v3.16b,v3.16b,v0.16b //inp^=Xi eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b Ldone_v8: #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .def gcm_ghash_v8_4x .type 32 .endef .align 4 gcm_ghash_v8_4x: Lgcm_ghash_v8_4x: ld1 {v0.2d},[x0] //load [rotated] Xi ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 movi v19.16b,#0xe1 ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif ext v25.16b,v7.16b,v7.16b,#8 ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b pmull2 v31.1q,v20.2d,v25.2d pmull v30.1q,v21.1d,v7.1d pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b subs x3,x3,#128 b.lo Ltail4x b Loop4x .align 4 Loop4x: eor v16.16b,v4.16b,v0.16b ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 ext v3.16b,v16.16b,v16.16b,#8 #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d ext v25.16b,v7.16b,v7.16b,#8 pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b ext v24.16b,v6.16b,v6.16b,#8 eor v1.16b,v1.16b,v30.16b ext v23.16b,v5.16b,v5.16b,#8 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b eor v1.16b,v1.16b,v17.16b pmull2 v31.1q,v20.2d,v25.2d eor v1.16b,v1.16b,v18.16b pmull v30.1q,v21.1d,v7.1d pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d eor v0.16b,v1.16b,v18.16b pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b eor v18.16b,v18.16b,v2.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v0.16b,v0.16b,v18.16b eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 eor v30.16b,v30.16b,v5.16b subs x3,x3,#64 b.hs Loop4x Ltail4x: eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b adds x3,x3,#64 b.eq Ldone4x cmp x3,#32 b.lo Lone b.eq Ltwo Lthree: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d,v6.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v29.1q,v20.1d,v24.1d //H·Ii+2 eor v6.16b,v6.16b,v24.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b pmull2 v31.1q,v20.2d,v24.2d pmull v30.1q,v21.1d,v6.1d eor v0.16b,v0.16b,v18.16b pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 eor v5.16b,v5.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 pmull2 v23.1q,v22.2d,v23.2d eor v16.16b,v4.16b,v0.16b pmull2 v5.1q,v21.2d,v5.2d ext v3.16b,v16.16b,v16.16b,#8 eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v26.2d,v3.2d pmull v1.1q,v27.1d,v16.1d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Ltwo: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 pmull v29.1q,v20.1d,v23.1d //H·Ii+1 eor v5.16b,v5.16b,v23.16b eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull2 v31.1q,v20.2d,v23.2d pmull v30.1q,v21.1d,v5.1d pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v22.2d,v3.2d pmull2 v1.1q,v21.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Lone: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v20.1d,v3.1d eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v20.2d,v3.2d pmull v1.1q,v21.1d,v16.1d Ldone4x: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif st1 {v0.2d},[x0] //write out Xi ret .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256-armv8-asm-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include "CNIOBoringSSL_arm_arch.h" .section __TEXT,__const .align 5 Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe Lone: .quad 1,0,0,0 Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl _ecp_nistz256_mul_mont .private_extern _ecp_nistz256_mul_mont .align 4 _ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_sqr_mont .private_extern _ecp_nistz256_sqr_mont .align 4 _ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_div_by_2 .private_extern _ecp_nistz256_div_by_2 .align 4 _ecp_nistz256_div_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_div_by_2 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_mul_by_2 .private_extern _ecp_nistz256_mul_by_2 .align 4 _ecp_nistz256_mul_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_mul_by_3 .private_extern _ecp_nistz256_mul_by_3 .align 4 _ecp_nistz256_mul_by_3: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a mov x8,x4 mov x9,x5 mov x10,x6 mov x11,x7 bl __ecp_nistz256_add_to // ret += a // 2*a+a=3*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl _ecp_nistz256_sub .private_extern _ecp_nistz256_sub .align 4 _ecp_nistz256_sub: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_neg .private_extern _ecp_nistz256_neg .align 4 _ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .globl _ecp_nistz256_point_double .private_extern _ecp_nistz256_point_double .align 5 _ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ecp_nistz256_point_add .private_extern _ecp_nistz256_point_add .align 5 _ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b Ldouble_shortcut .align 4 Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ecp_nistz256_point_add_affine .private_extern _ecp_nistz256_point_add_affine .align 5 _ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,Lone_mont@PAGE-64 add x23,x23,Lone_mont@PAGEOFF-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl _ecp_nistz256_ord_mul_mont .private_extern _ecp_nistz256_ord_mul_mont .align 4 _ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord@PAGE add x23,x23,Lord@PAGEOFF ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl _ecp_nistz256_ord_sqr_mont .private_extern _ecp_nistz256_ord_sqr_mont .align 4 _ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord@PAGE add x23,x23,Lord@PAGEOFF ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b Loop_ord_sqr .align 4 Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl _ecp_nistz256_select_w5 .private_extern _ecp_nistz256_select_w5 .align 4 _ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl _ecp_nistz256_select_w7 .private_extern _ecp_nistz256_select_w7 .align 4 _ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256-armv8-asm-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include "CNIOBoringSSL_arm_arch.h" .section .rodata .align 5 .Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 .LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd .Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe .Lone: .quad 1,0,0,0 .Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 .LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_mul_mont .hidden ecp_nistz256_mul_mont .type ecp_nistz256_mul_mont,%function .align 4 ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_sqr_mont .hidden ecp_nistz256_sqr_mont .type ecp_nistz256_sqr_mont,%function .align 4 ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_div_by_2 .hidden ecp_nistz256_div_by_2 .type ecp_nistz256_div_by_2,%function .align 4 ecp_nistz256_div_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_div_by_2 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2 // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_2 .hidden ecp_nistz256_mul_by_2 .type ecp_nistz256_mul_by_2,%function .align 4 ecp_nistz256_mul_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2 // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_3 .hidden ecp_nistz256_mul_by_3 .type ecp_nistz256_mul_by_3,%function .align 4 ecp_nistz256_mul_by_3: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a mov x8,x4 mov x9,x5 mov x10,x6 mov x11,x7 bl __ecp_nistz256_add_to // ret += a // 2*a+a=3*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3 // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_sub .hidden ecp_nistz256_sub .type ecp_nistz256_sub,%function .align 4 ecp_nistz256_sub: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_sub,.-ecp_nistz256_sub // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_neg .hidden ecp_nistz256_neg .type ecp_nistz256_neg,%function .align 4 ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_neg,.-ecp_nistz256_neg // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .type __ecp_nistz256_mul_mont,%function .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .type __ecp_nistz256_sqr_mont,%function .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .type __ecp_nistz256_add_to,%function .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_add_to,.-__ecp_nistz256_add_to .type __ecp_nistz256_sub_from,%function .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from .type __ecp_nistz256_sub_morf,%function .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf .type __ecp_nistz256_div_by_2,%function .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2 .globl ecp_nistz256_point_double .hidden ecp_nistz256_point_double .type ecp_nistz256_point_double,%function .align 5 ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 .Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_double,.-ecp_nistz256_point_double .globl ecp_nistz256_point_add .hidden ecp_nistz256_point_add .type ecp_nistz256_point_add,%function .align 5 ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,.Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) .Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b .Ldouble_shortcut .align 4 .Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] .Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_add,.-ecp_nistz256_point_add .globl ecp_nistz256_point_add_affine .hidden ecp_nistz256_point_add_affine .type ecp_nistz256_point_add_affine,%function .align 5 ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,.Lone_mont-64 add x23,x23,:lo12:.Lone_mont-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl ecp_nistz256_ord_mul_mont .hidden ecp_nistz256_ord_mul_mont .type ecp_nistz256_ord_mul_mont,%function .align 4 ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,.Lord add x23,x23,:lo12:.Lord ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret .size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl ecp_nistz256_ord_sqr_mont .hidden ecp_nistz256_ord_sqr_mont .type ecp_nistz256_ord_sqr_mont,%function .align 4 ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,.Lord add x23,x23,:lo12:.Lord ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b .Loop_ord_sqr .align 4 .Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,.Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret .size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w5 .hidden ecp_nistz256_select_w5 .type ecp_nistz256_select_w5,%function .align 4 ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 .Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, .Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret .size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5 //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w7 .hidden ecp_nistz256_select_w7 .type ecp_nistz256_select_w7,%function .align 4 ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 .Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, .Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret .size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256-armv8-asm-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "CNIOBoringSSL_arm_arch.h" .section .rodata .align 5 Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe Lone: .quad 1,0,0,0 Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_mul_mont .def ecp_nistz256_mul_mont .type 32 .endef .align 4 ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_sqr_mont .def ecp_nistz256_sqr_mont .type 32 .endef .align 4 ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_div_by_2 .def ecp_nistz256_div_by_2 .type 32 .endef .align 4 ecp_nistz256_div_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_div_by_2 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_2 .def ecp_nistz256_mul_by_2 .type 32 .endef .align 4 ecp_nistz256_mul_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_3 .def ecp_nistz256_mul_by_3 .type 32 .endef .align 4 ecp_nistz256_mul_by_3: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a mov x8,x4 mov x9,x5 mov x10,x6 mov x11,x7 bl __ecp_nistz256_add_to // ret += a // 2*a+a=3*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_sub .def ecp_nistz256_sub .type 32 .endef .align 4 ecp_nistz256_sub: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_neg .def ecp_nistz256_neg .type 32 .endef .align 4 ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .def __ecp_nistz256_mul_mont .type 32 .endef .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .def __ecp_nistz256_sqr_mont .type 32 .endef .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .def __ecp_nistz256_add_to .type 32 .endef .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_from .type 32 .endef .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_morf .type 32 .endef .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_div_by_2 .type 32 .endef .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .globl ecp_nistz256_point_double .def ecp_nistz256_point_double .type 32 .endef .align 5 ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add .def ecp_nistz256_point_add .type 32 .endef .align 5 ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b Ldouble_shortcut .align 4 Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add_affine .def ecp_nistz256_point_add_affine .type 32 .endef .align 5 ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,Lone_mont-64 add x23,x23,:lo12:Lone_mont-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl ecp_nistz256_ord_mul_mont .def ecp_nistz256_ord_mul_mont .type 32 .endef .align 4 ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl ecp_nistz256_ord_sqr_mont .def ecp_nistz256_ord_sqr_mont .type 32 .endef .align 4 ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b Loop_ord_sqr .align 4 Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w5 .def ecp_nistz256_select_w5 .type 32 .endef .align 4 ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w7 .def ecp_nistz256_select_w7 .type 32 .endef .align 4 ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256-x86_64-asm-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .section __DATA,__const .p2align 6 L$poly: .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 L$One: .long 1,1,1,1,1,1,1,1 L$Two: .long 2,2,2,2,2,2,2,2 L$Three: .long 3,3,3,3,3,3,3,3 L$ONE_mont: .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe L$ord: .quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 L$ordK: .quad 0xccd1c8aaee00bc4f .text .globl _ecp_nistz256_neg .private_extern _ecp_nistz256_neg .p2align 5 _ecp_nistz256_neg: _CET_ENDBR pushq %r12 pushq %r13 L$neg_body: xorq %r8,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %r13,%r13 subq 0(%rsi),%r8 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r8,%rax sbbq 24(%rsi),%r11 leaq L$poly(%rip),%rsi movq %r9,%rdx sbbq $0,%r13 addq 0(%rsi),%r8 movq %r10,%rcx adcq 8(%rsi),%r9 adcq 16(%rsi),%r10 movq %r11,%r12 adcq 24(%rsi),%r11 testq %r13,%r13 cmovzq %rax,%r8 cmovzq %rdx,%r9 movq %r8,0(%rdi) cmovzq %rcx,%r10 movq %r9,8(%rdi) cmovzq %r12,%r11 movq %r10,16(%rdi) movq %r11,24(%rdi) movq 0(%rsp),%r13 movq 8(%rsp),%r12 leaq 16(%rsp),%rsp L$neg_epilogue: ret .globl _ecp_nistz256_ord_mul_mont_nohw .private_extern _ecp_nistz256_ord_mul_mont_nohw .p2align 5 _ecp_nistz256_ord_mul_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_mul_body: movq 0(%rdx),%rax movq %rdx,%rbx leaq L$ord(%rip),%r14 movq L$ordK(%rip),%r15 movq %rax,%rcx mulq 0(%rsi) movq %rax,%r8 movq %rcx,%rax movq %rdx,%r9 mulq 8(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%r10 mulq 16(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %r8,%r13 imulq %r15,%r8 movq %rdx,%r11 mulq 24(%rsi) addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%r12 mulq 0(%r14) movq %r8,%rbp addq %rax,%r13 movq %r8,%rax adcq $0,%rdx movq %rdx,%rcx subq %r8,%r10 sbbq $0,%r8 mulq 8(%r14) addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %rbp,%rax adcq %rdx,%r10 movq %rbp,%rdx adcq $0,%r8 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 8(%rbx),%rax sbbq %rdx,%rbp addq %r8,%r11 adcq %rbp,%r12 adcq $0,%r13 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r10 adcq $0,%rdx addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %r9,%rcx imulq %r15,%r9 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r12 adcq $0,%rdx xorq %r8,%r8 addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 mulq 0(%r14) movq %r9,%rbp addq %rax,%rcx movq %r9,%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%r9 mulq 8(%r14) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq %rdx,%r11 movq %rbp,%rdx adcq $0,%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r12 movq 16(%rbx),%rax sbbq %rdx,%rbp addq %r9,%r12 adcq %rbp,%r13 adcq $0,%r8 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %r10,%rcx imulq %r15,%r10 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r13 adcq $0,%rdx xorq %r9,%r9 addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 mulq 0(%r14) movq %r10,%rbp addq %rax,%rcx movq %r10,%rax adcq %rdx,%rcx subq %r10,%r12 sbbq $0,%r10 mulq 8(%r14) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq %rdx,%r12 movq %rbp,%rdx adcq $0,%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r13 movq 24(%rbx),%rax sbbq %rdx,%rbp addq %r10,%r13 adcq %rbp,%r8 adcq $0,%r9 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r13 adcq $0,%rdx addq %rax,%r13 movq %rcx,%rax adcq $0,%rdx movq %r11,%rcx imulq %r15,%r11 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r8 adcq $0,%rdx xorq %r10,%r10 addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 mulq 0(%r14) movq %r11,%rbp addq %rax,%rcx movq %r11,%rax adcq %rdx,%rcx subq %r11,%r13 sbbq $0,%r11 mulq 8(%r14) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq %rdx,%r13 movq %rbp,%rdx adcq $0,%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 sbbq %rdx,%rbp addq %r11,%r8 adcq %rbp,%r9 adcq $0,%r10 movq %r12,%rsi subq 0(%r14),%r12 movq %r13,%r11 sbbq 8(%r14),%r13 movq %r8,%rcx sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rsi,%r12 cmovcq %r11,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_mul_epilogue: ret .globl _ecp_nistz256_ord_sqr_mont_nohw .private_extern _ecp_nistz256_ord_sqr_mont_nohw .p2align 5 _ecp_nistz256_ord_sqr_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_sqr_body: movq 0(%rsi),%r8 movq 8(%rsi),%rax movq 16(%rsi),%r14 movq 24(%rsi),%r15 leaq L$ord(%rip),%rsi movq %rdx,%rbx jmp L$oop_ord_sqr .p2align 5 L$oop_ord_sqr: movq %rax,%rbp mulq %r8 movq %rax,%r9 .byte 102,72,15,110,205 movq %r14,%rax movq %rdx,%r10 mulq %r8 addq %rax,%r10 movq %r15,%rax .byte 102,73,15,110,214 adcq $0,%rdx movq %rdx,%r11 mulq %r8 addq %rax,%r11 movq %r15,%rax .byte 102,73,15,110,223 adcq $0,%rdx movq %rdx,%r12 mulq %r14 movq %rax,%r13 movq %r14,%rax movq %rdx,%r14 mulq %rbp addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r15 mulq %rbp addq %rax,%r12 adcq $0,%rdx addq %r15,%r12 adcq %rdx,%r13 adcq $0,%r14 xorq %r15,%r15 movq %r8,%rax addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 .byte 102,72,15,126,200 movq %rdx,%rbp mulq %rax addq %rbp,%r9 adcq %rax,%r10 .byte 102,72,15,126,208 adcq $0,%rdx movq %rdx,%rbp mulq %rax addq %rbp,%r11 adcq %rax,%r12 .byte 102,72,15,126,216 adcq $0,%rdx movq %rdx,%rbp movq %r8,%rcx imulq 32(%rsi),%r8 mulq %rax addq %rbp,%r13 adcq %rax,%r14 movq 0(%rsi),%rax adcq %rdx,%r15 mulq %r8 movq %r8,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r8,%r10 sbbq $0,%rbp mulq %r8 addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %r8,%rax adcq %rdx,%r10 movq %r8,%rdx adcq $0,%rbp movq %r9,%rcx imulq 32(%rsi),%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 0(%rsi),%rax sbbq %rdx,%r8 addq %rbp,%r11 adcq $0,%r8 mulq %r9 movq %r9,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%rbp mulq %r9 addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %r9,%rax adcq %rdx,%r11 movq %r9,%rdx adcq $0,%rbp movq %r10,%rcx imulq 32(%rsi),%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 movq 0(%rsi),%rax sbbq %rdx,%r9 addq %rbp,%r8 adcq $0,%r9 mulq %r10 movq %r10,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r10,%r8 sbbq $0,%rbp mulq %r10 addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %r10,%rax adcq %rdx,%r8 movq %r10,%rdx adcq $0,%rbp movq %r11,%rcx imulq 32(%rsi),%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r9 movq 0(%rsi),%rax sbbq %rdx,%r10 addq %rbp,%r9 adcq $0,%r10 mulq %r11 movq %r11,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r11,%r9 sbbq $0,%rbp mulq %r11 addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 movq %r11,%rdx adcq $0,%rbp shlq $32,%rax shrq $32,%rdx subq %rax,%r10 sbbq %rdx,%r11 addq %rbp,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r12,%r8 adcq %r13,%r9 movq %r8,%r12 adcq %r14,%r10 adcq %r15,%r11 movq %r9,%rax adcq $0,%rdx subq 0(%rsi),%r8 movq %r10,%r14 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r15 sbbq 24(%rsi),%r11 sbbq $0,%rdx cmovcq %r12,%r8 cmovncq %r9,%rax cmovncq %r10,%r14 cmovncq %r11,%r15 decq %rbx jnz L$oop_ord_sqr movq %r8,0(%rdi) movq %rax,8(%rdi) pxor %xmm1,%xmm1 movq %r14,16(%rdi) pxor %xmm2,%xmm2 movq %r15,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_sqr_epilogue: ret .globl _ecp_nistz256_ord_mul_mont_adx .private_extern _ecp_nistz256_ord_mul_mont_adx .p2align 5 _ecp_nistz256_ord_mul_mont_adx: L$ecp_nistz256_ord_mul_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi leaq L$ord-128(%rip),%r14 movq L$ordK(%rip),%r15 mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 mulxq %r11,%rbp,%r11 addq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx mulxq %r15,%rdx,%rax adcq %rbp,%r10 adcq %rcx,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24+128(%r14),%rcx,%rbp movq 8(%rbx),%rdx adcxq %rcx,%r11 adoxq %rbp,%r12 adcxq %r8,%r12 adoxq %r8,%r13 adcq $0,%r13 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%r14),%rcx,%rbp movq 16(%rbx),%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r9,%r13 adoxq %r9,%r8 adcq $0,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%r14),%rcx,%rbp movq 24(%rbx),%rdx adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r10,%r8 adoxq %r10,%r9 adcq $0,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r8 adoxq %rbp,%r9 adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%r14),%rcx,%rbp leaq 128(%r14),%r14 movq %r12,%rbx adcxq %rcx,%r8 adoxq %rbp,%r9 movq %r13,%rdx adcxq %r11,%r9 adoxq %r11,%r10 adcq $0,%r10 movq %r8,%rcx subq 0(%r14),%r12 sbbq 8(%r14),%r13 sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_mulx_epilogue: ret .globl _ecp_nistz256_ord_sqr_mont_adx .private_extern _ecp_nistz256_ord_sqr_mont_adx .p2align 5 _ecp_nistz256_ord_sqr_mont_adx: _CET_ENDBR L$ecp_nistz256_ord_sqr_mont_adx: pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_sqrx_body: movq %rdx,%rbx movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq L$ord(%rip),%rsi jmp L$oop_ord_sqrx .p2align 5 L$oop_ord_sqrx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 movq %rdx,%rax .byte 102,73,15,110,206 mulxq %r8,%rbp,%r12 movq %r14,%rdx addq %rcx,%r10 .byte 102,73,15,110,215 adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq %rax,%rdx .byte 102,73,15,110,216 xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp .byte 102,72,15,126,202 adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax .byte 102,72,15,126,210 adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 mulxq %rdx,%rcx,%rbp .byte 0x67 .byte 102,72,15,126,218 adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 adoxq %rbp,%r13 mulxq %rdx,%rcx,%rax adoxq %rcx,%r14 adoxq %rax,%r15 movq %r8,%rdx mulxq 32(%rsi),%rdx,%rcx xorq %rax,%rax mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 adcxq %rax,%r8 movq %r9,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 adoxq %rax,%r9 movq %r10,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 adcxq %rax,%r10 movq %r11,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 adoxq %rax,%r11 addq %r8,%r12 adcq %r13,%r9 movq %r12,%rdx adcq %r14,%r10 adcq %r15,%r11 movq %r9,%r14 adcq $0,%rax subq 0(%rsi),%r12 movq %r10,%r15 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r8 sbbq 24(%rsi),%r11 sbbq $0,%rax cmovncq %r12,%rdx cmovncq %r9,%r14 cmovncq %r10,%r15 cmovncq %r11,%r8 decq %rbx jnz L$oop_ord_sqrx movq %rdx,0(%rdi) movq %r14,8(%rdi) pxor %xmm1,%xmm1 movq %r15,16(%rdi) pxor %xmm2,%xmm2 movq %r8,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_sqrx_epilogue: ret .globl _ecp_nistz256_mul_mont_nohw .private_extern _ecp_nistz256_mul_mont_nohw .p2align 5 _ecp_nistz256_mul_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mul_body: movq %rdx,%rbx movq 0(%rdx),%rax movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 call __ecp_nistz256_mul_montq movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$mul_epilogue: ret .p2align 5 __ecp_nistz256_mul_montq: movq %rax,%rbp mulq %r9 movq L$poly+8(%rip),%r14 movq %rax,%r8 movq %rbp,%rax movq %rdx,%r9 mulq %r10 movq L$poly+24(%rip),%r15 addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r10 mulq %r11 addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r12 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx xorq %r13,%r13 movq %rdx,%r12 movq %r8,%rbp shlq $32,%r8 mulq %r15 shrq $32,%rbp addq %r8,%r9 adcq %rbp,%r10 adcq %rax,%r11 movq 8(%rbx),%rax adcq %rdx,%r12 adcq $0,%r13 xorq %r8,%r8 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 movq %r9,%rbp shlq $32,%r9 mulq %r15 shrq $32,%rbp addq %r9,%r10 adcq %rbp,%r11 adcq %rax,%r12 movq 16(%rbx),%rax adcq %rdx,%r13 adcq $0,%r8 xorq %r9,%r9 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 movq %r10,%rbp shlq $32,%r10 mulq %r15 shrq $32,%rbp addq %r10,%r11 adcq %rbp,%r12 adcq %rax,%r13 movq 24(%rbx),%rax adcq %rdx,%r8 adcq $0,%r9 xorq %r10,%r10 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 movq %r11,%rbp shlq $32,%r11 mulq %r15 shrq $32,%rbp addq %r11,%r12 adcq %rbp,%r13 movq %r12,%rcx adcq %rax,%r8 adcq %rdx,%r9 movq %r13,%rbp adcq $0,%r10 subq $-1,%r12 movq %r8,%rbx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rdx sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rcx,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rbx,%r8 movq %r13,8(%rdi) cmovcq %rdx,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_sqr_mont_nohw .private_extern _ecp_nistz256_sqr_mont_nohw .p2align 5 _ecp_nistz256_sqr_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqr_body: movq 0(%rsi),%rax movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 call __ecp_nistz256_sqr_montq movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$sqr_epilogue: ret .p2align 5 __ecp_nistz256_sqr_montq: movq %rax,%r13 mulq %r14 movq %rax,%r9 movq %r15,%rax movq %rdx,%r10 mulq %r13 addq %rax,%r10 movq %r8,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r13 addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r12 mulq %r14 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%rbp mulq %r14 addq %rax,%r12 movq %r8,%rax adcq $0,%rdx addq %rbp,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r15 xorq %r15,%r15 addq %rax,%r13 movq 0(%rsi),%rax movq %rdx,%r14 adcq $0,%r14 addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 movq 8(%rsi),%rax movq %rdx,%rcx mulq %rax addq %rcx,%r9 adcq %rax,%r10 movq 16(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r11 adcq %rax,%r12 movq 24(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r13 adcq %rax,%r14 movq %r8,%rax adcq %rdx,%r15 movq L$poly+8(%rip),%rsi movq L$poly+24(%rip),%rbp movq %r8,%rcx shlq $32,%r8 mulq %rbp shrq $32,%rcx addq %r8,%r9 adcq %rcx,%r10 adcq %rax,%r11 movq %r9,%rax adcq $0,%rdx movq %r9,%rcx shlq $32,%r9 movq %rdx,%r8 mulq %rbp shrq $32,%rcx addq %r9,%r10 adcq %rcx,%r11 adcq %rax,%r8 movq %r10,%rax adcq $0,%rdx movq %r10,%rcx shlq $32,%r10 movq %rdx,%r9 mulq %rbp shrq $32,%rcx addq %r10,%r11 adcq %rcx,%r8 adcq %rax,%r9 movq %r11,%rax adcq $0,%rdx movq %r11,%rcx shlq $32,%r11 movq %rdx,%r10 mulq %rbp shrq $32,%rcx addq %r11,%r8 adcq %rcx,%r9 adcq %rax,%r10 adcq $0,%rdx xorq %r11,%r11 addq %r8,%r12 adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %rdx,%r15 movq %r13,%r9 adcq $0,%r11 subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%rcx sbbq %rbp,%r15 sbbq $0,%r11 cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %rcx,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .globl _ecp_nistz256_mul_mont_adx .private_extern _ecp_nistz256_mul_mont_adx .p2align 5 _ecp_nistz256_mul_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi call __ecp_nistz256_mul_montx movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$mulx_epilogue: ret .p2align 5 __ecp_nistz256_mul_montx: mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 movq $32,%r14 xorq %r13,%r13 mulxq %r11,%rbp,%r11 movq L$poly+24(%rip),%r15 adcq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx adcq %rbp,%r10 shlxq %r14,%r8,%rbp adcq %rcx,%r11 shrxq %r14,%r8,%rcx adcq $0,%r12 addq %rbp,%r9 adcq %rcx,%r10 mulxq %r15,%rcx,%rbp movq 8(%rbx),%rdx adcq %rcx,%r11 adcq %rbp,%r12 adcq $0,%r13 xorq %r8,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx adcxq %rcx,%r12 shlxq %r14,%r9,%rcx adoxq %rbp,%r13 shrxq %r14,%r9,%rbp adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 addq %rcx,%r10 adcq %rbp,%r11 mulxq %r15,%rcx,%rbp movq 16(%rbx),%rdx adcq %rcx,%r12 adcq %rbp,%r13 adcq $0,%r8 xorq %r9,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx adcxq %rcx,%r13 shlxq %r14,%r10,%rcx adoxq %rbp,%r8 shrxq %r14,%r10,%rbp adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 addq %rcx,%r11 adcq %rbp,%r12 mulxq %r15,%rcx,%rbp movq 24(%rbx),%rdx adcq %rcx,%r13 adcq %rbp,%r8 adcq $0,%r9 xorq %r10,%r10 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx adcxq %rcx,%r8 shlxq %r14,%r11,%rcx adoxq %rbp,%r9 shrxq %r14,%r11,%rbp adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 addq %rcx,%r12 adcq %rbp,%r13 mulxq %r15,%rcx,%rbp movq %r12,%rbx movq L$poly+8(%rip),%r14 adcq %rcx,%r8 movq %r13,%rdx adcq %rbp,%r9 adcq $0,%r10 xorl %eax,%eax movq %r8,%rcx sbbq $-1,%r12 sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rbp sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %rbp,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_sqr_mont_adx .private_extern _ecp_nistz256_sqr_mont_adx .p2align 5 _ecp_nistz256_sqr_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqrx_body: movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq -128(%rsi),%rsi call __ecp_nistz256_sqr_montx movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$sqrx_epilogue: ret .p2align 5 __ecp_nistz256_sqr_montx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 xorl %eax,%eax adcq %rcx,%r10 mulxq %r8,%rbp,%r12 movq %r14,%rdx adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq 0+128(%rsi),%rdx xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp movq 8+128(%rsi),%rdx adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax movq 16+128(%rsi),%rdx adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 .byte 0x67 mulxq %rdx,%rcx,%rbp movq 24+128(%rsi),%rdx adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 movq $32,%rsi adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %rdx,%rcx,%rax movq L$poly+24(%rip),%rdx adoxq %rcx,%r14 shlxq %rsi,%r8,%rcx adoxq %rax,%r15 shrxq %rsi,%r8,%rax movq %rdx,%rbp addq %rcx,%r9 adcq %rax,%r10 mulxq %r8,%rcx,%r8 adcq %rcx,%r11 shlxq %rsi,%r9,%rcx adcq $0,%r8 shrxq %rsi,%r9,%rax addq %rcx,%r10 adcq %rax,%r11 mulxq %r9,%rcx,%r9 adcq %rcx,%r8 shlxq %rsi,%r10,%rcx adcq $0,%r9 shrxq %rsi,%r10,%rax addq %rcx,%r11 adcq %rax,%r8 mulxq %r10,%rcx,%r10 adcq %rcx,%r9 shlxq %rsi,%r11,%rcx adcq $0,%r10 shrxq %rsi,%r11,%rax addq %rcx,%r8 adcq %rax,%r9 mulxq %r11,%rcx,%r11 adcq %rcx,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r8,%r12 movq L$poly+8(%rip),%rsi adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %r11,%r15 movq %r13,%r9 adcq $0,%rdx subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%r11 sbbq %rbp,%r15 sbbq $0,%rdx cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %r11,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .globl _ecp_nistz256_select_w5_nohw .private_extern _ecp_nistz256_select_w5_nohw .p2align 5 _ecp_nistz256_select_w5_nohw: _CET_ENDBR movdqa L$One(%rip),%xmm0 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movdqa %xmm0,%xmm8 pshufd $0,%xmm1,%xmm1 movq $16,%rax L$select_loop_sse_w5: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 pcmpeqd %xmm1,%xmm15 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 movdqa 64(%rsi),%xmm13 movdqa 80(%rsi),%xmm14 leaq 96(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 pand %xmm15,%xmm13 por %xmm12,%xmm5 pand %xmm15,%xmm14 por %xmm13,%xmm6 por %xmm14,%xmm7 decq %rax jnz L$select_loop_sse_w5 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) movdqu %xmm6,64(%rdi) movdqu %xmm7,80(%rdi) ret L$SEH_end_ecp_nistz256_select_w5_nohw: .globl _ecp_nistz256_select_w7_nohw .private_extern _ecp_nistz256_select_w7_nohw .p2align 5 _ecp_nistz256_select_w7_nohw: _CET_ENDBR movdqa L$One(%rip),%xmm8 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa %xmm8,%xmm0 pshufd $0,%xmm1,%xmm1 movq $64,%rax L$select_loop_sse_w7: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 pcmpeqd %xmm1,%xmm15 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 leaq 64(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 prefetcht0 255(%rsi) por %xmm12,%xmm5 decq %rax jnz L$select_loop_sse_w7 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) ret L$SEH_end_ecp_nistz256_select_w7_nohw: .globl _ecp_nistz256_select_w5_avx2 .private_extern _ecp_nistz256_select_w5_avx2 .p2align 5 _ecp_nistz256_select_w5_avx2: _CET_ENDBR vzeroupper vmovdqa L$Two(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vpxor %ymm4,%ymm4,%ymm4 vmovdqa L$One(%rip),%ymm5 vmovdqa L$Two(%rip),%ymm10 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $8,%rax L$select_loop_avx2_w5: vmovdqa 0(%rsi),%ymm6 vmovdqa 32(%rsi),%ymm7 vmovdqa 64(%rsi),%ymm8 vmovdqa 96(%rsi),%ymm11 vmovdqa 128(%rsi),%ymm12 vmovdqa 160(%rsi),%ymm13 vpcmpeqd %ymm1,%ymm5,%ymm9 vpcmpeqd %ymm1,%ymm10,%ymm14 vpaddd %ymm0,%ymm5,%ymm5 vpaddd %ymm0,%ymm10,%ymm10 leaq 192(%rsi),%rsi vpand %ymm9,%ymm6,%ymm6 vpand %ymm9,%ymm7,%ymm7 vpand %ymm9,%ymm8,%ymm8 vpand %ymm14,%ymm11,%ymm11 vpand %ymm14,%ymm12,%ymm12 vpand %ymm14,%ymm13,%ymm13 vpxor %ymm6,%ymm2,%ymm2 vpxor %ymm7,%ymm3,%ymm3 vpxor %ymm8,%ymm4,%ymm4 vpxor %ymm11,%ymm2,%ymm2 vpxor %ymm12,%ymm3,%ymm3 vpxor %ymm13,%ymm4,%ymm4 decq %rax jnz L$select_loop_avx2_w5 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,64(%rdi) vzeroupper ret L$SEH_end_ecp_nistz256_select_w5_avx2: .globl _ecp_nistz256_select_w7_avx2 .private_extern _ecp_nistz256_select_w7_avx2 .p2align 5 _ecp_nistz256_select_w7_avx2: _CET_ENDBR vzeroupper vmovdqa L$Three(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vmovdqa L$One(%rip),%ymm4 vmovdqa L$Two(%rip),%ymm8 vmovdqa L$Three(%rip),%ymm12 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $21,%rax L$select_loop_avx2_w7: vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm9 vmovdqa 96(%rsi),%ymm10 vmovdqa 128(%rsi),%ymm13 vmovdqa 160(%rsi),%ymm14 vpcmpeqd %ymm1,%ymm4,%ymm7 vpcmpeqd %ymm1,%ymm8,%ymm11 vpcmpeqd %ymm1,%ymm12,%ymm15 vpaddd %ymm0,%ymm4,%ymm4 vpaddd %ymm0,%ymm8,%ymm8 vpaddd %ymm0,%ymm12,%ymm12 leaq 192(%rsi),%rsi vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpand %ymm11,%ymm9,%ymm9 vpand %ymm11,%ymm10,%ymm10 vpand %ymm15,%ymm13,%ymm13 vpand %ymm15,%ymm14,%ymm14 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vpxor %ymm9,%ymm2,%ymm2 vpxor %ymm10,%ymm3,%ymm3 vpxor %ymm13,%ymm2,%ymm2 vpxor %ymm14,%ymm3,%ymm3 decq %rax jnz L$select_loop_avx2_w7 vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vpcmpeqd %ymm1,%ymm4,%ymm7 vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vzeroupper ret L$SEH_end_ecp_nistz256_select_w7_avx2: .p2align 5 __ecp_nistz256_add_toq: xorq %r11,%r11 addq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_sub_fromq: subq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq %r11,%r11 addq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 testq %r11,%r11 cmovzq %rax,%r12 cmovzq %rbp,%r13 movq %r12,0(%rdi) cmovzq %rcx,%r8 movq %r13,8(%rdi) cmovzq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_subq: subq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq %r11,%r11 addq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 testq %r11,%r11 cmovnzq %rax,%r12 cmovnzq %rbp,%r13 cmovnzq %rcx,%r8 cmovnzq %r10,%r9 ret .p2align 5 __ecp_nistz256_mul_by_2q: xorq %r11,%r11 addq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_point_double_nohw .private_extern _ecp_nistz256_point_double_nohw .p2align 5 _ecp_nistz256_point_double_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $160+8,%rsp L$point_doubleq_body: L$point_double_shortcutq: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq L$poly+8(%rip),%r14 movq L$poly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-0(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 32(%rbx),%rax movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-0(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montq call __ecp_nistz256_mul_by_2q movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montq xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rax leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 0+32(%rsp),%rax movq 8+32(%rsp),%r14 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montq leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subq movq 32(%rsp),%rax leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-0(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromq leaq 160+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_doubleq_epilogue: ret .globl _ecp_nistz256_point_add_nohw .private_extern _ecp_nistz256_point_add_nohw .p2align 5 _ecp_nistz256_point_add_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $576+8,%rsp L$point_addq_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-0(%rsi),%rsi movq %rax,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rax movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-0(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 416(%rsp),%rax leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 512(%rsp),%rax leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq 0+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 480(%rsp),%rax leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz L$add_proceedq testq %r9,%r9 jz L$add_doubleq .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp L$add_doneq .p2align 5 L$add_doubleq: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp jmp L$point_double_shortcutq .p2align 5 L$add_proceedq: movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq 0+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0(%rsp),%rax leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montq movq 160(%rsp),%rax leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) L$add_doneq: leaq 576+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_addq_epilogue: ret .globl _ecp_nistz256_point_add_affine_nohw .private_extern _ecp_nistz256_point_add_affine_nohw .p2align 5 _ecp_nistz256_point_add_affine_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $480+8,%rsp L$add_affineq_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-0(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rax movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-0(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+96(%rsp),%rax movq 8+96(%rsp),%r14 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq 0+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rax leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq 0+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand L$ONE_mont(%rip),%xmm2 pand L$ONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$add_affineq_epilogue: ret .p2align 5 __ecp_nistz256_add_tox: xorq %r11,%r11 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_sub_fromx: xorq %r11,%r11 sbbq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq $0,%r11 xorq %r10,%r10 adcq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 btq $0,%r11 cmovncq %rax,%r12 cmovncq %rbp,%r13 movq %r12,0(%rdi) cmovncq %rcx,%r8 movq %r13,8(%rdi) cmovncq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_subx: xorq %r11,%r11 sbbq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq $0,%r11 xorq %r9,%r9 adcq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 btq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 cmovcq %rcx,%r8 cmovcq %r10,%r9 ret .p2align 5 __ecp_nistz256_mul_by_2x: xorq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_point_double_adx .private_extern _ecp_nistz256_point_double_adx .p2align 5 _ecp_nistz256_point_double_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $160+8,%rsp L$point_doublex_body: L$point_double_shortcutx: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq L$poly+8(%rip),%r14 movq L$poly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-128(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 32(%rbx),%rdx movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-128(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montx call __ecp_nistz256_mul_by_2x movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montx xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rdx leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 0+32(%rsp),%rdx movq 8+32(%rsp),%r14 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montx leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subx movq 32(%rsp),%rdx leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-128(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromx leaq 160+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_doublex_epilogue: ret .globl _ecp_nistz256_point_add_adx .private_extern _ecp_nistz256_point_add_adx .p2align 5 _ecp_nistz256_point_add_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $576+8,%rsp L$point_addx_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-128(%rsi),%rsi movq %rdx,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rdx movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-128(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 416(%rsp),%rdx leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 512(%rsp),%rdx leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq -128+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 480(%rsp),%rdx leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz L$add_proceedx testq %r9,%r9 jz L$add_doublex .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp L$add_donex .p2align 5 L$add_doublex: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp jmp L$point_double_shortcutx .p2align 5 L$add_proceedx: movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq -128+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0(%rsp),%rdx leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montx movq 160(%rsp),%rdx leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) L$add_donex: leaq 576+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_addx_epilogue: ret .globl _ecp_nistz256_point_add_affine_adx .private_extern _ecp_nistz256_point_add_affine_adx .p2align 5 _ecp_nistz256_point_add_affine_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $480+8,%rsp L$add_affinex_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-128(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rdx movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-128(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+96(%rsp),%rdx movq 8+96(%rsp),%r14 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq -128+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rdx leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq -128+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand L$ONE_mont(%rip),%xmm2 pand L$ONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$add_affinex_epilogue: ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256-x86_64-asm-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .section .rodata .align 64 .Lpoly: .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 .LOne: .long 1,1,1,1,1,1,1,1 .LTwo: .long 2,2,2,2,2,2,2,2 .LThree: .long 3,3,3,3,3,3,3,3 .LONE_mont: .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe .Lord: .quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 .LordK: .quad 0xccd1c8aaee00bc4f .text .globl ecp_nistz256_neg .hidden ecp_nistz256_neg .type ecp_nistz256_neg,@function .align 32 ecp_nistz256_neg: .cfi_startproc _CET_ENDBR pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 .Lneg_body: xorq %r8,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %r13,%r13 subq 0(%rsi),%r8 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r8,%rax sbbq 24(%rsi),%r11 leaq .Lpoly(%rip),%rsi movq %r9,%rdx sbbq $0,%r13 addq 0(%rsi),%r8 movq %r10,%rcx adcq 8(%rsi),%r9 adcq 16(%rsi),%r10 movq %r11,%r12 adcq 24(%rsi),%r11 testq %r13,%r13 cmovzq %rax,%r8 cmovzq %rdx,%r9 movq %r8,0(%rdi) cmovzq %rcx,%r10 movq %r9,8(%rdi) cmovzq %r12,%r11 movq %r10,16(%rdi) movq %r11,24(%rdi) movq 0(%rsp),%r13 .cfi_restore %r13 movq 8(%rsp),%r12 .cfi_restore %r12 leaq 16(%rsp),%rsp .cfi_adjust_cfa_offset -16 .Lneg_epilogue: ret .cfi_endproc .size ecp_nistz256_neg,.-ecp_nistz256_neg .globl ecp_nistz256_ord_mul_mont_nohw .hidden ecp_nistz256_ord_mul_mont_nohw .type ecp_nistz256_ord_mul_mont_nohw,@function .align 32 ecp_nistz256_ord_mul_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_mul_body: movq 0(%rdx),%rax movq %rdx,%rbx leaq .Lord(%rip),%r14 movq .LordK(%rip),%r15 movq %rax,%rcx mulq 0(%rsi) movq %rax,%r8 movq %rcx,%rax movq %rdx,%r9 mulq 8(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%r10 mulq 16(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %r8,%r13 imulq %r15,%r8 movq %rdx,%r11 mulq 24(%rsi) addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%r12 mulq 0(%r14) movq %r8,%rbp addq %rax,%r13 movq %r8,%rax adcq $0,%rdx movq %rdx,%rcx subq %r8,%r10 sbbq $0,%r8 mulq 8(%r14) addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %rbp,%rax adcq %rdx,%r10 movq %rbp,%rdx adcq $0,%r8 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 8(%rbx),%rax sbbq %rdx,%rbp addq %r8,%r11 adcq %rbp,%r12 adcq $0,%r13 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r10 adcq $0,%rdx addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %r9,%rcx imulq %r15,%r9 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r12 adcq $0,%rdx xorq %r8,%r8 addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 mulq 0(%r14) movq %r9,%rbp addq %rax,%rcx movq %r9,%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%r9 mulq 8(%r14) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq %rdx,%r11 movq %rbp,%rdx adcq $0,%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r12 movq 16(%rbx),%rax sbbq %rdx,%rbp addq %r9,%r12 adcq %rbp,%r13 adcq $0,%r8 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %r10,%rcx imulq %r15,%r10 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r13 adcq $0,%rdx xorq %r9,%r9 addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 mulq 0(%r14) movq %r10,%rbp addq %rax,%rcx movq %r10,%rax adcq %rdx,%rcx subq %r10,%r12 sbbq $0,%r10 mulq 8(%r14) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq %rdx,%r12 movq %rbp,%rdx adcq $0,%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r13 movq 24(%rbx),%rax sbbq %rdx,%rbp addq %r10,%r13 adcq %rbp,%r8 adcq $0,%r9 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r13 adcq $0,%rdx addq %rax,%r13 movq %rcx,%rax adcq $0,%rdx movq %r11,%rcx imulq %r15,%r11 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r8 adcq $0,%rdx xorq %r10,%r10 addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 mulq 0(%r14) movq %r11,%rbp addq %rax,%rcx movq %r11,%rax adcq %rdx,%rcx subq %r11,%r13 sbbq $0,%r11 mulq 8(%r14) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq %rdx,%r13 movq %rbp,%rdx adcq $0,%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 sbbq %rdx,%rbp addq %r11,%r8 adcq %rbp,%r9 adcq $0,%r10 movq %r12,%rsi subq 0(%r14),%r12 movq %r13,%r11 sbbq 8(%r14),%r13 movq %r8,%rcx sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rsi,%r12 cmovcq %r11,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_mul_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_mul_mont_nohw,.-ecp_nistz256_ord_mul_mont_nohw .globl ecp_nistz256_ord_sqr_mont_nohw .hidden ecp_nistz256_ord_sqr_mont_nohw .type ecp_nistz256_ord_sqr_mont_nohw,@function .align 32 ecp_nistz256_ord_sqr_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_sqr_body: movq 0(%rsi),%r8 movq 8(%rsi),%rax movq 16(%rsi),%r14 movq 24(%rsi),%r15 leaq .Lord(%rip),%rsi movq %rdx,%rbx jmp .Loop_ord_sqr .align 32 .Loop_ord_sqr: movq %rax,%rbp mulq %r8 movq %rax,%r9 .byte 102,72,15,110,205 movq %r14,%rax movq %rdx,%r10 mulq %r8 addq %rax,%r10 movq %r15,%rax .byte 102,73,15,110,214 adcq $0,%rdx movq %rdx,%r11 mulq %r8 addq %rax,%r11 movq %r15,%rax .byte 102,73,15,110,223 adcq $0,%rdx movq %rdx,%r12 mulq %r14 movq %rax,%r13 movq %r14,%rax movq %rdx,%r14 mulq %rbp addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r15 mulq %rbp addq %rax,%r12 adcq $0,%rdx addq %r15,%r12 adcq %rdx,%r13 adcq $0,%r14 xorq %r15,%r15 movq %r8,%rax addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 .byte 102,72,15,126,200 movq %rdx,%rbp mulq %rax addq %rbp,%r9 adcq %rax,%r10 .byte 102,72,15,126,208 adcq $0,%rdx movq %rdx,%rbp mulq %rax addq %rbp,%r11 adcq %rax,%r12 .byte 102,72,15,126,216 adcq $0,%rdx movq %rdx,%rbp movq %r8,%rcx imulq 32(%rsi),%r8 mulq %rax addq %rbp,%r13 adcq %rax,%r14 movq 0(%rsi),%rax adcq %rdx,%r15 mulq %r8 movq %r8,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r8,%r10 sbbq $0,%rbp mulq %r8 addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %r8,%rax adcq %rdx,%r10 movq %r8,%rdx adcq $0,%rbp movq %r9,%rcx imulq 32(%rsi),%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 0(%rsi),%rax sbbq %rdx,%r8 addq %rbp,%r11 adcq $0,%r8 mulq %r9 movq %r9,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%rbp mulq %r9 addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %r9,%rax adcq %rdx,%r11 movq %r9,%rdx adcq $0,%rbp movq %r10,%rcx imulq 32(%rsi),%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 movq 0(%rsi),%rax sbbq %rdx,%r9 addq %rbp,%r8 adcq $0,%r9 mulq %r10 movq %r10,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r10,%r8 sbbq $0,%rbp mulq %r10 addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %r10,%rax adcq %rdx,%r8 movq %r10,%rdx adcq $0,%rbp movq %r11,%rcx imulq 32(%rsi),%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r9 movq 0(%rsi),%rax sbbq %rdx,%r10 addq %rbp,%r9 adcq $0,%r10 mulq %r11 movq %r11,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r11,%r9 sbbq $0,%rbp mulq %r11 addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 movq %r11,%rdx adcq $0,%rbp shlq $32,%rax shrq $32,%rdx subq %rax,%r10 sbbq %rdx,%r11 addq %rbp,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r12,%r8 adcq %r13,%r9 movq %r8,%r12 adcq %r14,%r10 adcq %r15,%r11 movq %r9,%rax adcq $0,%rdx subq 0(%rsi),%r8 movq %r10,%r14 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r15 sbbq 24(%rsi),%r11 sbbq $0,%rdx cmovcq %r12,%r8 cmovncq %r9,%rax cmovncq %r10,%r14 cmovncq %r11,%r15 decq %rbx jnz .Loop_ord_sqr movq %r8,0(%rdi) movq %rax,8(%rdi) pxor %xmm1,%xmm1 movq %r14,16(%rdi) pxor %xmm2,%xmm2 movq %r15,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_sqr_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_sqr_mont_nohw,.-ecp_nistz256_ord_sqr_mont_nohw .globl ecp_nistz256_ord_mul_mont_adx .hidden ecp_nistz256_ord_mul_mont_adx .type ecp_nistz256_ord_mul_mont_adx,@function .align 32 ecp_nistz256_ord_mul_mont_adx: .cfi_startproc .Lecp_nistz256_ord_mul_mont_adx: _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi leaq .Lord-128(%rip),%r14 movq .LordK(%rip),%r15 mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 mulxq %r11,%rbp,%r11 addq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx mulxq %r15,%rdx,%rax adcq %rbp,%r10 adcq %rcx,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24+128(%r14),%rcx,%rbp movq 8(%rbx),%rdx adcxq %rcx,%r11 adoxq %rbp,%r12 adcxq %r8,%r12 adoxq %r8,%r13 adcq $0,%r13 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%r14),%rcx,%rbp movq 16(%rbx),%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r9,%r13 adoxq %r9,%r8 adcq $0,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%r14),%rcx,%rbp movq 24(%rbx),%rdx adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r10,%r8 adoxq %r10,%r9 adcq $0,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r8 adoxq %rbp,%r9 adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%r14),%rcx,%rbp leaq 128(%r14),%r14 movq %r12,%rbx adcxq %rcx,%r8 adoxq %rbp,%r9 movq %r13,%rdx adcxq %r11,%r9 adoxq %r11,%r10 adcq $0,%r10 movq %r8,%rcx subq 0(%r14),%r12 sbbq 8(%r14),%r13 sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_mulx_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_mul_mont_adx,.-ecp_nistz256_ord_mul_mont_adx .globl ecp_nistz256_ord_sqr_mont_adx .hidden ecp_nistz256_ord_sqr_mont_adx .type ecp_nistz256_ord_sqr_mont_adx,@function .align 32 ecp_nistz256_ord_sqr_mont_adx: .cfi_startproc _CET_ENDBR .Lecp_nistz256_ord_sqr_mont_adx: pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lord_sqrx_body: movq %rdx,%rbx movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq .Lord(%rip),%rsi jmp .Loop_ord_sqrx .align 32 .Loop_ord_sqrx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 movq %rdx,%rax .byte 102,73,15,110,206 mulxq %r8,%rbp,%r12 movq %r14,%rdx addq %rcx,%r10 .byte 102,73,15,110,215 adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq %rax,%rdx .byte 102,73,15,110,216 xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp .byte 102,72,15,126,202 adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax .byte 102,72,15,126,210 adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 mulxq %rdx,%rcx,%rbp .byte 0x67 .byte 102,72,15,126,218 adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 adoxq %rbp,%r13 mulxq %rdx,%rcx,%rax adoxq %rcx,%r14 adoxq %rax,%r15 movq %r8,%rdx mulxq 32(%rsi),%rdx,%rcx xorq %rax,%rax mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 adcxq %rax,%r8 movq %r9,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 adoxq %rax,%r9 movq %r10,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 adcxq %rax,%r10 movq %r11,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 adoxq %rax,%r11 addq %r8,%r12 adcq %r13,%r9 movq %r12,%rdx adcq %r14,%r10 adcq %r15,%r11 movq %r9,%r14 adcq $0,%rax subq 0(%rsi),%r12 movq %r10,%r15 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r8 sbbq 24(%rsi),%r11 sbbq $0,%rax cmovncq %r12,%rdx cmovncq %r9,%r14 cmovncq %r10,%r15 cmovncq %r11,%r8 decq %rbx jnz .Loop_ord_sqrx movq %rdx,0(%rdi) movq %r14,8(%rdi) pxor %xmm1,%xmm1 movq %r15,16(%rdi) pxor %xmm2,%xmm2 movq %r8,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lord_sqrx_epilogue: ret .cfi_endproc .size ecp_nistz256_ord_sqr_mont_adx,.-ecp_nistz256_ord_sqr_mont_adx .globl ecp_nistz256_mul_mont_nohw .hidden ecp_nistz256_mul_mont_nohw .type ecp_nistz256_mul_mont_nohw,@function .align 32 ecp_nistz256_mul_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lmul_body: movq %rdx,%rbx movq 0(%rdx),%rax movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 call __ecp_nistz256_mul_montq movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lmul_epilogue: ret .cfi_endproc .size ecp_nistz256_mul_mont_nohw,.-ecp_nistz256_mul_mont_nohw .type __ecp_nistz256_mul_montq,@function .align 32 __ecp_nistz256_mul_montq: .cfi_startproc movq %rax,%rbp mulq %r9 movq .Lpoly+8(%rip),%r14 movq %rax,%r8 movq %rbp,%rax movq %rdx,%r9 mulq %r10 movq .Lpoly+24(%rip),%r15 addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r10 mulq %r11 addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r12 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx xorq %r13,%r13 movq %rdx,%r12 movq %r8,%rbp shlq $32,%r8 mulq %r15 shrq $32,%rbp addq %r8,%r9 adcq %rbp,%r10 adcq %rax,%r11 movq 8(%rbx),%rax adcq %rdx,%r12 adcq $0,%r13 xorq %r8,%r8 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 movq %r9,%rbp shlq $32,%r9 mulq %r15 shrq $32,%rbp addq %r9,%r10 adcq %rbp,%r11 adcq %rax,%r12 movq 16(%rbx),%rax adcq %rdx,%r13 adcq $0,%r8 xorq %r9,%r9 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 movq %r10,%rbp shlq $32,%r10 mulq %r15 shrq $32,%rbp addq %r10,%r11 adcq %rbp,%r12 adcq %rax,%r13 movq 24(%rbx),%rax adcq %rdx,%r8 adcq $0,%r9 xorq %r10,%r10 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 movq %r11,%rbp shlq $32,%r11 mulq %r15 shrq $32,%rbp addq %r11,%r12 adcq %rbp,%r13 movq %r12,%rcx adcq %rax,%r8 adcq %rdx,%r9 movq %r13,%rbp adcq $0,%r10 subq $-1,%r12 movq %r8,%rbx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rdx sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rcx,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rbx,%r8 movq %r13,8(%rdi) cmovcq %rdx,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq .globl ecp_nistz256_sqr_mont_nohw .hidden ecp_nistz256_sqr_mont_nohw .type ecp_nistz256_sqr_mont_nohw,@function .align 32 ecp_nistz256_sqr_mont_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lsqr_body: movq 0(%rsi),%rax movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 call __ecp_nistz256_sqr_montq movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lsqr_epilogue: ret .cfi_endproc .size ecp_nistz256_sqr_mont_nohw,.-ecp_nistz256_sqr_mont_nohw .type __ecp_nistz256_sqr_montq,@function .align 32 __ecp_nistz256_sqr_montq: .cfi_startproc movq %rax,%r13 mulq %r14 movq %rax,%r9 movq %r15,%rax movq %rdx,%r10 mulq %r13 addq %rax,%r10 movq %r8,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r13 addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r12 mulq %r14 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%rbp mulq %r14 addq %rax,%r12 movq %r8,%rax adcq $0,%rdx addq %rbp,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r15 xorq %r15,%r15 addq %rax,%r13 movq 0(%rsi),%rax movq %rdx,%r14 adcq $0,%r14 addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 movq 8(%rsi),%rax movq %rdx,%rcx mulq %rax addq %rcx,%r9 adcq %rax,%r10 movq 16(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r11 adcq %rax,%r12 movq 24(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r13 adcq %rax,%r14 movq %r8,%rax adcq %rdx,%r15 movq .Lpoly+8(%rip),%rsi movq .Lpoly+24(%rip),%rbp movq %r8,%rcx shlq $32,%r8 mulq %rbp shrq $32,%rcx addq %r8,%r9 adcq %rcx,%r10 adcq %rax,%r11 movq %r9,%rax adcq $0,%rdx movq %r9,%rcx shlq $32,%r9 movq %rdx,%r8 mulq %rbp shrq $32,%rcx addq %r9,%r10 adcq %rcx,%r11 adcq %rax,%r8 movq %r10,%rax adcq $0,%rdx movq %r10,%rcx shlq $32,%r10 movq %rdx,%r9 mulq %rbp shrq $32,%rcx addq %r10,%r11 adcq %rcx,%r8 adcq %rax,%r9 movq %r11,%rax adcq $0,%rdx movq %r11,%rcx shlq $32,%r11 movq %rdx,%r10 mulq %rbp shrq $32,%rcx addq %r11,%r8 adcq %rcx,%r9 adcq %rax,%r10 adcq $0,%rdx xorq %r11,%r11 addq %r8,%r12 adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %rdx,%r15 movq %r13,%r9 adcq $0,%r11 subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%rcx sbbq %rbp,%r15 sbbq $0,%r11 cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %rcx,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq .globl ecp_nistz256_mul_mont_adx .hidden ecp_nistz256_mul_mont_adx .type ecp_nistz256_mul_mont_adx,@function .align 32 ecp_nistz256_mul_mont_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lmulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi call __ecp_nistz256_mul_montx movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lmulx_epilogue: ret .cfi_endproc .size ecp_nistz256_mul_mont_adx,.-ecp_nistz256_mul_mont_adx .type __ecp_nistz256_mul_montx,@function .align 32 __ecp_nistz256_mul_montx: .cfi_startproc mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 movq $32,%r14 xorq %r13,%r13 mulxq %r11,%rbp,%r11 movq .Lpoly+24(%rip),%r15 adcq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx adcq %rbp,%r10 shlxq %r14,%r8,%rbp adcq %rcx,%r11 shrxq %r14,%r8,%rcx adcq $0,%r12 addq %rbp,%r9 adcq %rcx,%r10 mulxq %r15,%rcx,%rbp movq 8(%rbx),%rdx adcq %rcx,%r11 adcq %rbp,%r12 adcq $0,%r13 xorq %r8,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx adcxq %rcx,%r12 shlxq %r14,%r9,%rcx adoxq %rbp,%r13 shrxq %r14,%r9,%rbp adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 addq %rcx,%r10 adcq %rbp,%r11 mulxq %r15,%rcx,%rbp movq 16(%rbx),%rdx adcq %rcx,%r12 adcq %rbp,%r13 adcq $0,%r8 xorq %r9,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx adcxq %rcx,%r13 shlxq %r14,%r10,%rcx adoxq %rbp,%r8 shrxq %r14,%r10,%rbp adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 addq %rcx,%r11 adcq %rbp,%r12 mulxq %r15,%rcx,%rbp movq 24(%rbx),%rdx adcq %rcx,%r13 adcq %rbp,%r8 adcq $0,%r9 xorq %r10,%r10 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx adcxq %rcx,%r8 shlxq %r14,%r11,%rcx adoxq %rbp,%r9 shrxq %r14,%r11,%rbp adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 addq %rcx,%r12 adcq %rbp,%r13 mulxq %r15,%rcx,%rbp movq %r12,%rbx movq .Lpoly+8(%rip),%r14 adcq %rcx,%r8 movq %r13,%rdx adcq %rbp,%r9 adcq $0,%r10 xorl %eax,%eax movq %r8,%rcx sbbq $-1,%r12 sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rbp sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %rbp,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx .globl ecp_nistz256_sqr_mont_adx .hidden ecp_nistz256_sqr_mont_adx .type ecp_nistz256_sqr_mont_adx,@function .align 32 ecp_nistz256_sqr_mont_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 .Lsqrx_body: movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq -128(%rsi),%rsi call __ecp_nistz256_sqr_montx movq 0(%rsp),%r15 .cfi_restore %r15 movq 8(%rsp),%r14 .cfi_restore %r14 movq 16(%rsp),%r13 .cfi_restore %r13 movq 24(%rsp),%r12 .cfi_restore %r12 movq 32(%rsp),%rbx .cfi_restore %rbx movq 40(%rsp),%rbp .cfi_restore %rbp leaq 48(%rsp),%rsp .cfi_adjust_cfa_offset -48 .Lsqrx_epilogue: ret .cfi_endproc .size ecp_nistz256_sqr_mont_adx,.-ecp_nistz256_sqr_mont_adx .type __ecp_nistz256_sqr_montx,@function .align 32 __ecp_nistz256_sqr_montx: .cfi_startproc mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 xorl %eax,%eax adcq %rcx,%r10 mulxq %r8,%rbp,%r12 movq %r14,%rdx adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq 0+128(%rsi),%rdx xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp movq 8+128(%rsi),%rdx adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax movq 16+128(%rsi),%rdx adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 .byte 0x67 mulxq %rdx,%rcx,%rbp movq 24+128(%rsi),%rdx adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 movq $32,%rsi adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %rdx,%rcx,%rax movq .Lpoly+24(%rip),%rdx adoxq %rcx,%r14 shlxq %rsi,%r8,%rcx adoxq %rax,%r15 shrxq %rsi,%r8,%rax movq %rdx,%rbp addq %rcx,%r9 adcq %rax,%r10 mulxq %r8,%rcx,%r8 adcq %rcx,%r11 shlxq %rsi,%r9,%rcx adcq $0,%r8 shrxq %rsi,%r9,%rax addq %rcx,%r10 adcq %rax,%r11 mulxq %r9,%rcx,%r9 adcq %rcx,%r8 shlxq %rsi,%r10,%rcx adcq $0,%r9 shrxq %rsi,%r10,%rax addq %rcx,%r11 adcq %rax,%r8 mulxq %r10,%rcx,%r10 adcq %rcx,%r9 shlxq %rsi,%r11,%rcx adcq $0,%r10 shrxq %rsi,%r11,%rax addq %rcx,%r8 adcq %rax,%r9 mulxq %r11,%rcx,%r11 adcq %rcx,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r8,%r12 movq .Lpoly+8(%rip),%rsi adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %r11,%r15 movq %r13,%r9 adcq $0,%rdx subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%r11 sbbq %rbp,%r15 sbbq $0,%rdx cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %r11,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx .globl ecp_nistz256_select_w5_nohw .hidden ecp_nistz256_select_w5_nohw .type ecp_nistz256_select_w5_nohw,@function .align 32 ecp_nistz256_select_w5_nohw: .cfi_startproc _CET_ENDBR movdqa .LOne(%rip),%xmm0 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movdqa %xmm0,%xmm8 pshufd $0,%xmm1,%xmm1 movq $16,%rax .Lselect_loop_sse_w5: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 pcmpeqd %xmm1,%xmm15 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 movdqa 64(%rsi),%xmm13 movdqa 80(%rsi),%xmm14 leaq 96(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 pand %xmm15,%xmm13 por %xmm12,%xmm5 pand %xmm15,%xmm14 por %xmm13,%xmm6 por %xmm14,%xmm7 decq %rax jnz .Lselect_loop_sse_w5 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) movdqu %xmm6,64(%rdi) movdqu %xmm7,80(%rdi) ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w5_nohw: .size ecp_nistz256_select_w5_nohw,.-ecp_nistz256_select_w5_nohw .globl ecp_nistz256_select_w7_nohw .hidden ecp_nistz256_select_w7_nohw .type ecp_nistz256_select_w7_nohw,@function .align 32 ecp_nistz256_select_w7_nohw: .cfi_startproc _CET_ENDBR movdqa .LOne(%rip),%xmm8 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa %xmm8,%xmm0 pshufd $0,%xmm1,%xmm1 movq $64,%rax .Lselect_loop_sse_w7: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 pcmpeqd %xmm1,%xmm15 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 leaq 64(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 prefetcht0 255(%rsi) por %xmm12,%xmm5 decq %rax jnz .Lselect_loop_sse_w7 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w7_nohw: .size ecp_nistz256_select_w7_nohw,.-ecp_nistz256_select_w7_nohw .globl ecp_nistz256_select_w5_avx2 .hidden ecp_nistz256_select_w5_avx2 .type ecp_nistz256_select_w5_avx2,@function .align 32 ecp_nistz256_select_w5_avx2: .cfi_startproc _CET_ENDBR vzeroupper vmovdqa .LTwo(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vpxor %ymm4,%ymm4,%ymm4 vmovdqa .LOne(%rip),%ymm5 vmovdqa .LTwo(%rip),%ymm10 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $8,%rax .Lselect_loop_avx2_w5: vmovdqa 0(%rsi),%ymm6 vmovdqa 32(%rsi),%ymm7 vmovdqa 64(%rsi),%ymm8 vmovdqa 96(%rsi),%ymm11 vmovdqa 128(%rsi),%ymm12 vmovdqa 160(%rsi),%ymm13 vpcmpeqd %ymm1,%ymm5,%ymm9 vpcmpeqd %ymm1,%ymm10,%ymm14 vpaddd %ymm0,%ymm5,%ymm5 vpaddd %ymm0,%ymm10,%ymm10 leaq 192(%rsi),%rsi vpand %ymm9,%ymm6,%ymm6 vpand %ymm9,%ymm7,%ymm7 vpand %ymm9,%ymm8,%ymm8 vpand %ymm14,%ymm11,%ymm11 vpand %ymm14,%ymm12,%ymm12 vpand %ymm14,%ymm13,%ymm13 vpxor %ymm6,%ymm2,%ymm2 vpxor %ymm7,%ymm3,%ymm3 vpxor %ymm8,%ymm4,%ymm4 vpxor %ymm11,%ymm2,%ymm2 vpxor %ymm12,%ymm3,%ymm3 vpxor %ymm13,%ymm4,%ymm4 decq %rax jnz .Lselect_loop_avx2_w5 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,64(%rdi) vzeroupper ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w5_avx2: .size ecp_nistz256_select_w5_avx2,.-ecp_nistz256_select_w5_avx2 .globl ecp_nistz256_select_w7_avx2 .hidden ecp_nistz256_select_w7_avx2 .type ecp_nistz256_select_w7_avx2,@function .align 32 ecp_nistz256_select_w7_avx2: .cfi_startproc _CET_ENDBR vzeroupper vmovdqa .LThree(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vmovdqa .LOne(%rip),%ymm4 vmovdqa .LTwo(%rip),%ymm8 vmovdqa .LThree(%rip),%ymm12 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $21,%rax .Lselect_loop_avx2_w7: vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm9 vmovdqa 96(%rsi),%ymm10 vmovdqa 128(%rsi),%ymm13 vmovdqa 160(%rsi),%ymm14 vpcmpeqd %ymm1,%ymm4,%ymm7 vpcmpeqd %ymm1,%ymm8,%ymm11 vpcmpeqd %ymm1,%ymm12,%ymm15 vpaddd %ymm0,%ymm4,%ymm4 vpaddd %ymm0,%ymm8,%ymm8 vpaddd %ymm0,%ymm12,%ymm12 leaq 192(%rsi),%rsi vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpand %ymm11,%ymm9,%ymm9 vpand %ymm11,%ymm10,%ymm10 vpand %ymm15,%ymm13,%ymm13 vpand %ymm15,%ymm14,%ymm14 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vpxor %ymm9,%ymm2,%ymm2 vpxor %ymm10,%ymm3,%ymm3 vpxor %ymm13,%ymm2,%ymm2 vpxor %ymm14,%ymm3,%ymm3 decq %rax jnz .Lselect_loop_avx2_w7 vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vpcmpeqd %ymm1,%ymm4,%ymm7 vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vzeroupper ret .cfi_endproc .LSEH_end_ecp_nistz256_select_w7_avx2: .size ecp_nistz256_select_w7_avx2,.-ecp_nistz256_select_w7_avx2 .type __ecp_nistz256_add_toq,@function .align 32 __ecp_nistz256_add_toq: .cfi_startproc xorq %r11,%r11 addq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq .type __ecp_nistz256_sub_fromq,@function .align 32 __ecp_nistz256_sub_fromq: .cfi_startproc subq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq %r11,%r11 addq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 testq %r11,%r11 cmovzq %rax,%r12 cmovzq %rbp,%r13 movq %r12,0(%rdi) cmovzq %rcx,%r8 movq %r13,8(%rdi) cmovzq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq .type __ecp_nistz256_subq,@function .align 32 __ecp_nistz256_subq: .cfi_startproc subq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq %r11,%r11 addq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 testq %r11,%r11 cmovnzq %rax,%r12 cmovnzq %rbp,%r13 cmovnzq %rcx,%r8 cmovnzq %r10,%r9 ret .cfi_endproc .size __ecp_nistz256_subq,.-__ecp_nistz256_subq .type __ecp_nistz256_mul_by_2q,@function .align 32 __ecp_nistz256_mul_by_2q: .cfi_startproc xorq %r11,%r11 addq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q .globl ecp_nistz256_point_double_nohw .hidden ecp_nistz256_point_double_nohw .type ecp_nistz256_point_double_nohw,@function .align 32 ecp_nistz256_point_double_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $160+8,%rsp .cfi_adjust_cfa_offset 32*5+8 .Lpoint_doubleq_body: .Lpoint_double_shortcutq: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq .Lpoly+8(%rip),%r14 movq .Lpoly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-0(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 32(%rbx),%rax movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-0(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montq call __ecp_nistz256_mul_by_2q movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montq xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rax leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 0+32(%rsp),%rax movq 8+32(%rsp),%r14 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montq leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subq movq 32(%rsp),%rax leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-0(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromq leaq 160+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_doubleq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_double_nohw,.-ecp_nistz256_point_double_nohw .globl ecp_nistz256_point_add_nohw .hidden ecp_nistz256_point_add_nohw .type ecp_nistz256_point_add_nohw,@function .align 32 ecp_nistz256_point_add_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $576+8,%rsp .cfi_adjust_cfa_offset 32*18+8 .Lpoint_addq_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-0(%rsi),%rsi movq %rax,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rax movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-0(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 416(%rsp),%rax leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 512(%rsp),%rax leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq 0+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 480(%rsp),%rax leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz .Ladd_proceedq testq %r9,%r9 jz .Ladd_doubleq .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp .Ladd_doneq .align 32 .Ladd_doubleq: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp .cfi_adjust_cfa_offset -416 jmp .Lpoint_double_shortcutq .cfi_adjust_cfa_offset 416 .align 32 .Ladd_proceedq: movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq 0+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0(%rsp),%rax leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montq movq 160(%rsp),%rax leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) .Ladd_doneq: leaq 576+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_addq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_nohw,.-ecp_nistz256_point_add_nohw .globl ecp_nistz256_point_add_affine_nohw .hidden ecp_nistz256_point_add_affine_nohw .type ecp_nistz256_point_add_affine_nohw,@function .align 32 ecp_nistz256_point_add_affine_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $480+8,%rsp .cfi_adjust_cfa_offset 32*15+8 .Ladd_affineq_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-0(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rax movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-0(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+96(%rsp),%rax movq 8+96(%rsp),%r14 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq 0+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rax leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq 0+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand .LONE_mont(%rip),%xmm2 pand .LONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Ladd_affineq_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_affine_nohw,.-ecp_nistz256_point_add_affine_nohw .type __ecp_nistz256_add_tox,@function .align 32 __ecp_nistz256_add_tox: .cfi_startproc xorq %r11,%r11 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox .type __ecp_nistz256_sub_fromx,@function .align 32 __ecp_nistz256_sub_fromx: .cfi_startproc xorq %r11,%r11 sbbq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq $0,%r11 xorq %r10,%r10 adcq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 btq $0,%r11 cmovncq %rax,%r12 cmovncq %rbp,%r13 movq %r12,0(%rdi) cmovncq %rcx,%r8 movq %r13,8(%rdi) cmovncq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx .type __ecp_nistz256_subx,@function .align 32 __ecp_nistz256_subx: .cfi_startproc xorq %r11,%r11 sbbq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq $0,%r11 xorq %r9,%r9 adcq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 btq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 cmovcq %rcx,%r8 cmovcq %r10,%r9 ret .cfi_endproc .size __ecp_nistz256_subx,.-__ecp_nistz256_subx .type __ecp_nistz256_mul_by_2x,@function .align 32 __ecp_nistz256_mul_by_2x: .cfi_startproc xorq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .cfi_endproc .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x .globl ecp_nistz256_point_double_adx .hidden ecp_nistz256_point_double_adx .type ecp_nistz256_point_double_adx,@function .align 32 ecp_nistz256_point_double_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $160+8,%rsp .cfi_adjust_cfa_offset 32*5+8 .Lpoint_doublex_body: .Lpoint_double_shortcutx: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq .Lpoly+8(%rip),%r14 movq .Lpoly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-128(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 32(%rbx),%rdx movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-128(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montx call __ecp_nistz256_mul_by_2x movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montx xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rdx leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 0+32(%rsp),%rdx movq 8+32(%rsp),%r14 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montx leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subx movq 32(%rsp),%rdx leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-128(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromx leaq 160+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_doublex_epilogue: ret .cfi_endproc .size ecp_nistz256_point_double_adx,.-ecp_nistz256_point_double_adx .globl ecp_nistz256_point_add_adx .hidden ecp_nistz256_point_add_adx .type ecp_nistz256_point_add_adx,@function .align 32 ecp_nistz256_point_add_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $576+8,%rsp .cfi_adjust_cfa_offset 32*18+8 .Lpoint_addx_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-128(%rsi),%rsi movq %rdx,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rdx movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-128(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 416(%rsp),%rdx leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 512(%rsp),%rdx leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq -128+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 480(%rsp),%rdx leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz .Ladd_proceedx testq %r9,%r9 jz .Ladd_doublex .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp .Ladd_donex .align 32 .Ladd_doublex: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp .cfi_adjust_cfa_offset -416 jmp .Lpoint_double_shortcutx .cfi_adjust_cfa_offset 416 .align 32 .Ladd_proceedx: movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq -128+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0(%rsp),%rdx leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montx movq 160(%rsp),%rdx leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) .Ladd_donex: leaq 576+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpoint_addx_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_adx,.-ecp_nistz256_point_add_adx .globl ecp_nistz256_point_add_affine_adx .hidden ecp_nistz256_point_add_affine_adx .type ecp_nistz256_point_add_affine_adx,@function .align 32 ecp_nistz256_point_add_affine_adx: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 subq $480+8,%rsp .cfi_adjust_cfa_offset 32*15+8 .Ladd_affinex_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-128(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rdx movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-128(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+96(%rsp),%rdx movq 8+96(%rsp),%r14 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq -128+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rdx leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq -128+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand .LONE_mont(%rip),%xmm2 pand .LONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbx .cfi_restore %rbx movq -8(%rsi),%rbp .cfi_restore %rbp leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Ladd_affinex_epilogue: ret .cfi_endproc .size ecp_nistz256_point_add_affine_adx,.-ecp_nistz256_point_add_affine_adx #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256_beeu-armv8-asm-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include "CNIOBoringSSL_arm_arch.h" .text .globl _beeu_mod_inverse_vartime .private_extern _beeu_mod_inverse_vartime .align 4 _beeu_mod_inverse_vartime: // Reserve enough space for 14 8-byte registers on the stack // in the first stp call for x29, x30. // Then store the remaining callee-saved registers. // // | x29 | x30 | x19 | x20 | ... | x27 | x28 | x0 | x2 | // ^ ^ // sp <------------------- 112 bytes ----------------> old sp // x29 (FP) // AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-112]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x2,[sp,#96] // B = b3..b0 := a ldp x25,x26,[x1] ldp x27,x28,[x1,#16] // n3..n0 := n // Note: the value of input params are changed in the following. ldp x0,x1,[x2] ldp x2,x30,[x2,#16] // A = a3..a0 := n mov x21, x0 mov x22, x1 mov x23, x2 mov x24, x30 // X = x4..x0 := 1 mov x3, #1 eor x4, x4, x4 eor x5, x5, x5 eor x6, x6, x6 eor x7, x7, x7 // Y = y4..y0 := 0 eor x8, x8, x8 eor x9, x9, x9 eor x10, x10, x10 eor x11, x11, x11 eor x12, x12, x12 Lbeeu_loop: // if B == 0, jump to .Lbeeu_loop_end orr x14, x25, x26 orr x14, x14, x27 // reverse the bit order of x25. This is needed for clz after this macro rbit x15, x25 orr x14, x14, x28 cbz x14,Lbeeu_loop_end // 0 < B < |n|, // 0 < A <= |n|, // (1) X*a == B (mod |n|), // (2) (-1)*Y*a == A (mod |n|) // Now divide B by the maximum possible power of two in the // integers, and divide X by the same value mod |n|. // When we're done, (1) still holds. // shift := number of trailing 0s in x25 // ( = number of leading 0s in x15; see the "rbit" instruction in TEST_B_ZERO) clz x13, x15 // If there is no shift, goto shift_A_Y cbz x13, Lbeeu_shift_A_Y // Shift B right by "x13" bits neg x14, x13 lsr x25, x25, x13 lsl x15, x26, x14 lsr x26, x26, x13 lsl x19, x27, x14 orr x25, x25, x15 lsr x27, x27, x13 lsl x20, x28, x14 orr x26, x26, x19 lsr x28, x28, x13 orr x27, x27, x20 // Shift X right by "x13" bits, adding n whenever X becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_X: tbz x3, #0, Lshift1_0 adds x3, x3, x0 adcs x4, x4, x1 adcs x5, x5, x2 adcs x6, x6, x30 adc x7, x7, x14 Lshift1_0: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x3, x4, x3, #1 extr x4, x5, x4, #1 extr x5, x6, x5, #1 extr x6, x7, x6, #1 lsr x7, x7, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_X // Note: the steps above perform the same sequence as in p256_beeu-x86_64-asm.pl // with the following differences: // - "x13" is set directly to the number of trailing 0s in B // (using rbit and clz instructions) // - The loop is only used to call SHIFT1(X) // and x13 is decreased while executing the X loop. // - SHIFT256(B, x13) is performed before right-shifting X; they are independent Lbeeu_shift_A_Y: // Same for A and Y. // Afterwards, (2) still holds. // Reverse the bit order of x21 // x13 := number of trailing 0s in x21 (= number of leading 0s in x15) rbit x15, x21 clz x13, x15 // If there is no shift, goto |B-A|, X+Y update cbz x13, Lbeeu_update_B_X_or_A_Y // Shift A right by "x13" bits neg x14, x13 lsr x21, x21, x13 lsl x15, x22, x14 lsr x22, x22, x13 lsl x19, x23, x14 orr x21, x21, x15 lsr x23, x23, x13 lsl x20, x24, x14 orr x22, x22, x19 lsr x24, x24, x13 orr x23, x23, x20 // Shift Y right by "x13" bits, adding n whenever Y becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_Y: tbz x8, #0, Lshift1_1 adds x8, x8, x0 adcs x9, x9, x1 adcs x10, x10, x2 adcs x11, x11, x30 adc x12, x12, x14 Lshift1_1: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x8, x9, x8, #1 extr x9, x10, x9, #1 extr x10, x11, x10, #1 extr x11, x12, x11, #1 lsr x12, x12, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_Y Lbeeu_update_B_X_or_A_Y: // Try T := B - A; if cs, continue with B > A (cs: carry set = no borrow) // Note: this is a case of unsigned arithmetic, where T fits in 4 64-bit words // without taking a sign bit if generated. The lack of a carry would // indicate a negative result. See, for example, // https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/condition-codes-1-condition-flags-and-codes subs x14, x25, x21 sbcs x15, x26, x22 sbcs x19, x27, x23 sbcs x20, x28, x24 bcs Lbeeu_B_greater_than_A // Else A > B => // A := A - B; Y := Y + X; goto beginning of the loop subs x21, x21, x25 sbcs x22, x22, x26 sbcs x23, x23, x27 sbcs x24, x24, x28 adds x8, x8, x3 adcs x9, x9, x4 adcs x10, x10, x5 adcs x11, x11, x6 adc x12, x12, x7 b Lbeeu_loop Lbeeu_B_greater_than_A: // Continue with B > A => // B := B - A; X := X + Y; goto beginning of the loop mov x25, x14 mov x26, x15 mov x27, x19 mov x28, x20 adds x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 b Lbeeu_loop Lbeeu_loop_end: // The Euclid's algorithm loop ends when A == gcd(a,n); // this would be 1, when a and n are co-prime (i.e. do not have a common factor). // Since (-1)*Y*a == A (mod |n|), Y>0 // then out = -Y mod n // Verify that A = 1 ==> (-1)*Y*a = A = 1 (mod |n|) // Is A-1 == 0? // If not, fail. sub x14, x21, #1 orr x14, x14, x22 orr x14, x14, x23 orr x14, x14, x24 cbnz x14, Lbeeu_err // If Y>n ==> Y:=Y-n Lbeeu_reduction_loop: // x_i := y_i - n_i (X is no longer needed, use it as temp) // (x14 = 0 from above) subs x3, x8, x0 sbcs x4, x9, x1 sbcs x5, x10, x2 sbcs x6, x11, x30 sbcs x7, x12, x14 // If result is non-negative (i.e., cs = carry set = no borrow), // y_i := x_i; goto reduce again // else // y_i := y_i; continue csel x8, x3, x8, cs csel x9, x4, x9, cs csel x10, x5, x10, cs csel x11, x6, x11, cs csel x12, x7, x12, cs bcs Lbeeu_reduction_loop // Now Y < n (Y cannot be equal to n, since the inverse cannot be 0) // out = -Y = n-Y subs x8, x0, x8 sbcs x9, x1, x9 sbcs x10, x2, x10 sbcs x11, x30, x11 // Save Y in output (out (x0) was saved on the stack) ldr x3, [sp,#96] stp x8, x9, [x3] stp x10, x11, [x3,#16] // return 1 (success) mov x0, #1 b Lbeeu_finish Lbeeu_err: // return 0 (error) eor x0, x0, x0 Lbeeu_finish: // Restore callee-saved registers, except x0, x2 add sp,x29,#0 ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldp x29,x30,[sp],#112 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256_beeu-armv8-asm-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include "CNIOBoringSSL_arm_arch.h" .text .globl beeu_mod_inverse_vartime .hidden beeu_mod_inverse_vartime .type beeu_mod_inverse_vartime, %function .align 4 beeu_mod_inverse_vartime: // Reserve enough space for 14 8-byte registers on the stack // in the first stp call for x29, x30. // Then store the remaining callee-saved registers. // // | x29 | x30 | x19 | x20 | ... | x27 | x28 | x0 | x2 | // ^ ^ // sp <------------------- 112 bytes ----------------> old sp // x29 (FP) // AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-112]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x2,[sp,#96] // B = b3..b0 := a ldp x25,x26,[x1] ldp x27,x28,[x1,#16] // n3..n0 := n // Note: the value of input params are changed in the following. ldp x0,x1,[x2] ldp x2,x30,[x2,#16] // A = a3..a0 := n mov x21, x0 mov x22, x1 mov x23, x2 mov x24, x30 // X = x4..x0 := 1 mov x3, #1 eor x4, x4, x4 eor x5, x5, x5 eor x6, x6, x6 eor x7, x7, x7 // Y = y4..y0 := 0 eor x8, x8, x8 eor x9, x9, x9 eor x10, x10, x10 eor x11, x11, x11 eor x12, x12, x12 .Lbeeu_loop: // if B == 0, jump to .Lbeeu_loop_end orr x14, x25, x26 orr x14, x14, x27 // reverse the bit order of x25. This is needed for clz after this macro rbit x15, x25 orr x14, x14, x28 cbz x14,.Lbeeu_loop_end // 0 < B < |n|, // 0 < A <= |n|, // (1) X*a == B (mod |n|), // (2) (-1)*Y*a == A (mod |n|) // Now divide B by the maximum possible power of two in the // integers, and divide X by the same value mod |n|. // When we're done, (1) still holds. // shift := number of trailing 0s in x25 // ( = number of leading 0s in x15; see the "rbit" instruction in TEST_B_ZERO) clz x13, x15 // If there is no shift, goto shift_A_Y cbz x13, .Lbeeu_shift_A_Y // Shift B right by "x13" bits neg x14, x13 lsr x25, x25, x13 lsl x15, x26, x14 lsr x26, x26, x13 lsl x19, x27, x14 orr x25, x25, x15 lsr x27, x27, x13 lsl x20, x28, x14 orr x26, x26, x19 lsr x28, x28, x13 orr x27, x27, x20 // Shift X right by "x13" bits, adding n whenever X becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 .Lbeeu_shift_loop_X: tbz x3, #0, .Lshift1_0 adds x3, x3, x0 adcs x4, x4, x1 adcs x5, x5, x2 adcs x6, x6, x30 adc x7, x7, x14 .Lshift1_0: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x3, x4, x3, #1 extr x4, x5, x4, #1 extr x5, x6, x5, #1 extr x6, x7, x6, #1 lsr x7, x7, #1 subs x13, x13, #1 bne .Lbeeu_shift_loop_X // Note: the steps above perform the same sequence as in p256_beeu-x86_64-asm.pl // with the following differences: // - "x13" is set directly to the number of trailing 0s in B // (using rbit and clz instructions) // - The loop is only used to call SHIFT1(X) // and x13 is decreased while executing the X loop. // - SHIFT256(B, x13) is performed before right-shifting X; they are independent .Lbeeu_shift_A_Y: // Same for A and Y. // Afterwards, (2) still holds. // Reverse the bit order of x21 // x13 := number of trailing 0s in x21 (= number of leading 0s in x15) rbit x15, x21 clz x13, x15 // If there is no shift, goto |B-A|, X+Y update cbz x13, .Lbeeu_update_B_X_or_A_Y // Shift A right by "x13" bits neg x14, x13 lsr x21, x21, x13 lsl x15, x22, x14 lsr x22, x22, x13 lsl x19, x23, x14 orr x21, x21, x15 lsr x23, x23, x13 lsl x20, x24, x14 orr x22, x22, x19 lsr x24, x24, x13 orr x23, x23, x20 // Shift Y right by "x13" bits, adding n whenever Y becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 .Lbeeu_shift_loop_Y: tbz x8, #0, .Lshift1_1 adds x8, x8, x0 adcs x9, x9, x1 adcs x10, x10, x2 adcs x11, x11, x30 adc x12, x12, x14 .Lshift1_1: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x8, x9, x8, #1 extr x9, x10, x9, #1 extr x10, x11, x10, #1 extr x11, x12, x11, #1 lsr x12, x12, #1 subs x13, x13, #1 bne .Lbeeu_shift_loop_Y .Lbeeu_update_B_X_or_A_Y: // Try T := B - A; if cs, continue with B > A (cs: carry set = no borrow) // Note: this is a case of unsigned arithmetic, where T fits in 4 64-bit words // without taking a sign bit if generated. The lack of a carry would // indicate a negative result. See, for example, // https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/condition-codes-1-condition-flags-and-codes subs x14, x25, x21 sbcs x15, x26, x22 sbcs x19, x27, x23 sbcs x20, x28, x24 bcs .Lbeeu_B_greater_than_A // Else A > B => // A := A - B; Y := Y + X; goto beginning of the loop subs x21, x21, x25 sbcs x22, x22, x26 sbcs x23, x23, x27 sbcs x24, x24, x28 adds x8, x8, x3 adcs x9, x9, x4 adcs x10, x10, x5 adcs x11, x11, x6 adc x12, x12, x7 b .Lbeeu_loop .Lbeeu_B_greater_than_A: // Continue with B > A => // B := B - A; X := X + Y; goto beginning of the loop mov x25, x14 mov x26, x15 mov x27, x19 mov x28, x20 adds x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 b .Lbeeu_loop .Lbeeu_loop_end: // The Euclid's algorithm loop ends when A == gcd(a,n); // this would be 1, when a and n are co-prime (i.e. do not have a common factor). // Since (-1)*Y*a == A (mod |n|), Y>0 // then out = -Y mod n // Verify that A = 1 ==> (-1)*Y*a = A = 1 (mod |n|) // Is A-1 == 0? // If not, fail. sub x14, x21, #1 orr x14, x14, x22 orr x14, x14, x23 orr x14, x14, x24 cbnz x14, .Lbeeu_err // If Y>n ==> Y:=Y-n .Lbeeu_reduction_loop: // x_i := y_i - n_i (X is no longer needed, use it as temp) // (x14 = 0 from above) subs x3, x8, x0 sbcs x4, x9, x1 sbcs x5, x10, x2 sbcs x6, x11, x30 sbcs x7, x12, x14 // If result is non-negative (i.e., cs = carry set = no borrow), // y_i := x_i; goto reduce again // else // y_i := y_i; continue csel x8, x3, x8, cs csel x9, x4, x9, cs csel x10, x5, x10, cs csel x11, x6, x11, cs csel x12, x7, x12, cs bcs .Lbeeu_reduction_loop // Now Y < n (Y cannot be equal to n, since the inverse cannot be 0) // out = -Y = n-Y subs x8, x0, x8 sbcs x9, x1, x9 sbcs x10, x2, x10 sbcs x11, x30, x11 // Save Y in output (out (x0) was saved on the stack) ldr x3, [sp,#96] stp x8, x9, [x3] stp x10, x11, [x3,#16] // return 1 (success) mov x0, #1 b .Lbeeu_finish .Lbeeu_err: // return 0 (error) eor x0, x0, x0 .Lbeeu_finish: // Restore callee-saved registers, except x0, x2 add sp,x29,#0 ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldp x29,x30,[sp],#112 AARCH64_VALIDATE_LINK_REGISTER ret .size beeu_mod_inverse_vartime,.-beeu_mod_inverse_vartime #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256_beeu-armv8-asm-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "CNIOBoringSSL_arm_arch.h" .text .globl beeu_mod_inverse_vartime .align 4 beeu_mod_inverse_vartime: // Reserve enough space for 14 8-byte registers on the stack // in the first stp call for x29, x30. // Then store the remaining callee-saved registers. // // | x29 | x30 | x19 | x20 | ... | x27 | x28 | x0 | x2 | // ^ ^ // sp <------------------- 112 bytes ----------------> old sp // x29 (FP) // AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-112]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x2,[sp,#96] // B = b3..b0 := a ldp x25,x26,[x1] ldp x27,x28,[x1,#16] // n3..n0 := n // Note: the value of input params are changed in the following. ldp x0,x1,[x2] ldp x2,x30,[x2,#16] // A = a3..a0 := n mov x21, x0 mov x22, x1 mov x23, x2 mov x24, x30 // X = x4..x0 := 1 mov x3, #1 eor x4, x4, x4 eor x5, x5, x5 eor x6, x6, x6 eor x7, x7, x7 // Y = y4..y0 := 0 eor x8, x8, x8 eor x9, x9, x9 eor x10, x10, x10 eor x11, x11, x11 eor x12, x12, x12 Lbeeu_loop: // if B == 0, jump to .Lbeeu_loop_end orr x14, x25, x26 orr x14, x14, x27 // reverse the bit order of x25. This is needed for clz after this macro rbit x15, x25 orr x14, x14, x28 cbz x14,Lbeeu_loop_end // 0 < B < |n|, // 0 < A <= |n|, // (1) X*a == B (mod |n|), // (2) (-1)*Y*a == A (mod |n|) // Now divide B by the maximum possible power of two in the // integers, and divide X by the same value mod |n|. // When we're done, (1) still holds. // shift := number of trailing 0s in x25 // ( = number of leading 0s in x15; see the "rbit" instruction in TEST_B_ZERO) clz x13, x15 // If there is no shift, goto shift_A_Y cbz x13, Lbeeu_shift_A_Y // Shift B right by "x13" bits neg x14, x13 lsr x25, x25, x13 lsl x15, x26, x14 lsr x26, x26, x13 lsl x19, x27, x14 orr x25, x25, x15 lsr x27, x27, x13 lsl x20, x28, x14 orr x26, x26, x19 lsr x28, x28, x13 orr x27, x27, x20 // Shift X right by "x13" bits, adding n whenever X becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_X: tbz x3, #0, Lshift1_0 adds x3, x3, x0 adcs x4, x4, x1 adcs x5, x5, x2 adcs x6, x6, x30 adc x7, x7, x14 Lshift1_0: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x3, x4, x3, #1 extr x4, x5, x4, #1 extr x5, x6, x5, #1 extr x6, x7, x6, #1 lsr x7, x7, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_X // Note: the steps above perform the same sequence as in p256_beeu-x86_64-asm.pl // with the following differences: // - "x13" is set directly to the number of trailing 0s in B // (using rbit and clz instructions) // - The loop is only used to call SHIFT1(X) // and x13 is decreased while executing the X loop. // - SHIFT256(B, x13) is performed before right-shifting X; they are independent Lbeeu_shift_A_Y: // Same for A and Y. // Afterwards, (2) still holds. // Reverse the bit order of x21 // x13 := number of trailing 0s in x21 (= number of leading 0s in x15) rbit x15, x21 clz x13, x15 // If there is no shift, goto |B-A|, X+Y update cbz x13, Lbeeu_update_B_X_or_A_Y // Shift A right by "x13" bits neg x14, x13 lsr x21, x21, x13 lsl x15, x22, x14 lsr x22, x22, x13 lsl x19, x23, x14 orr x21, x21, x15 lsr x23, x23, x13 lsl x20, x24, x14 orr x22, x22, x19 lsr x24, x24, x13 orr x23, x23, x20 // Shift Y right by "x13" bits, adding n whenever Y becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_Y: tbz x8, #0, Lshift1_1 adds x8, x8, x0 adcs x9, x9, x1 adcs x10, x10, x2 adcs x11, x11, x30 adc x12, x12, x14 Lshift1_1: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x8, x9, x8, #1 extr x9, x10, x9, #1 extr x10, x11, x10, #1 extr x11, x12, x11, #1 lsr x12, x12, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_Y Lbeeu_update_B_X_or_A_Y: // Try T := B - A; if cs, continue with B > A (cs: carry set = no borrow) // Note: this is a case of unsigned arithmetic, where T fits in 4 64-bit words // without taking a sign bit if generated. The lack of a carry would // indicate a negative result. See, for example, // https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/condition-codes-1-condition-flags-and-codes subs x14, x25, x21 sbcs x15, x26, x22 sbcs x19, x27, x23 sbcs x20, x28, x24 bcs Lbeeu_B_greater_than_A // Else A > B => // A := A - B; Y := Y + X; goto beginning of the loop subs x21, x21, x25 sbcs x22, x22, x26 sbcs x23, x23, x27 sbcs x24, x24, x28 adds x8, x8, x3 adcs x9, x9, x4 adcs x10, x10, x5 adcs x11, x11, x6 adc x12, x12, x7 b Lbeeu_loop Lbeeu_B_greater_than_A: // Continue with B > A => // B := B - A; X := X + Y; goto beginning of the loop mov x25, x14 mov x26, x15 mov x27, x19 mov x28, x20 adds x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 b Lbeeu_loop Lbeeu_loop_end: // The Euclid's algorithm loop ends when A == gcd(a,n); // this would be 1, when a and n are co-prime (i.e. do not have a common factor). // Since (-1)*Y*a == A (mod |n|), Y>0 // then out = -Y mod n // Verify that A = 1 ==> (-1)*Y*a = A = 1 (mod |n|) // Is A-1 == 0? // If not, fail. sub x14, x21, #1 orr x14, x14, x22 orr x14, x14, x23 orr x14, x14, x24 cbnz x14, Lbeeu_err // If Y>n ==> Y:=Y-n Lbeeu_reduction_loop: // x_i := y_i - n_i (X is no longer needed, use it as temp) // (x14 = 0 from above) subs x3, x8, x0 sbcs x4, x9, x1 sbcs x5, x10, x2 sbcs x6, x11, x30 sbcs x7, x12, x14 // If result is non-negative (i.e., cs = carry set = no borrow), // y_i := x_i; goto reduce again // else // y_i := y_i; continue csel x8, x3, x8, cs csel x9, x4, x9, cs csel x10, x5, x10, cs csel x11, x6, x11, cs csel x12, x7, x12, cs bcs Lbeeu_reduction_loop // Now Y < n (Y cannot be equal to n, since the inverse cannot be 0) // out = -Y = n-Y subs x8, x0, x8 sbcs x9, x1, x9 sbcs x10, x2, x10 sbcs x11, x30, x11 // Save Y in output (out (x0) was saved on the stack) ldr x3, [sp,#96] stp x8, x9, [x3] stp x10, x11, [x3,#16] // return 1 (success) mov x0, #1 b Lbeeu_finish Lbeeu_err: // return 0 (error) eor x0, x0, x0 Lbeeu_finish: // Restore callee-saved registers, except x0, x2 add sp,x29,#0 ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldp x29,x30,[sp],#112 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256_beeu-x86_64-asm-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .private_extern _beeu_mod_inverse_vartime .globl _beeu_mod_inverse_vartime .private_extern _beeu_mod_inverse_vartime .p2align 5 _beeu_mod_inverse_vartime: _CET_ENDBR pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %rbx pushq %rsi subq $80,%rsp movq %rdi,0(%rsp) movq $1,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %rdi,%rdi xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 xorq %rbp,%rbp vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu %xmm0,48(%rsp) vmovdqu %xmm1,64(%rsp) vmovdqu 0(%rdx),%xmm0 vmovdqu 16(%rdx),%xmm1 vmovdqu %xmm0,16(%rsp) vmovdqu %xmm1,32(%rsp) L$beeu_loop: xorq %rbx,%rbx orq 48(%rsp),%rbx orq 56(%rsp),%rbx orq 64(%rsp),%rbx orq 72(%rsp),%rbx jz L$beeu_loop_end movq $1,%rcx L$beeu_shift_loop_XB: movq %rcx,%rbx andq 48(%rsp),%rbx jnz L$beeu_shift_loop_end_XB movq $1,%rbx andq %r8,%rbx jz L$shift1_0 addq 0(%rdx),%r8 adcq 8(%rdx),%r9 adcq 16(%rdx),%r10 adcq 24(%rdx),%r11 adcq $0,%rdi L$shift1_0: shrdq $1,%r9,%r8 shrdq $1,%r10,%r9 shrdq $1,%r11,%r10 shrdq $1,%rdi,%r11 shrq $1,%rdi shlq $1,%rcx cmpq $0x8000000,%rcx jne L$beeu_shift_loop_XB L$beeu_shift_loop_end_XB: bsfq %rcx,%rcx testq %rcx,%rcx jz L$beeu_no_shift_XB movq 8+48(%rsp),%rax movq 16+48(%rsp),%rbx movq 24+48(%rsp),%rsi shrdq %cl,%rax,0+48(%rsp) shrdq %cl,%rbx,8+48(%rsp) shrdq %cl,%rsi,16+48(%rsp) shrq %cl,%rsi movq %rsi,24+48(%rsp) L$beeu_no_shift_XB: movq $1,%rcx L$beeu_shift_loop_YA: movq %rcx,%rbx andq 16(%rsp),%rbx jnz L$beeu_shift_loop_end_YA movq $1,%rbx andq %r12,%rbx jz L$shift1_1 addq 0(%rdx),%r12 adcq 8(%rdx),%r13 adcq 16(%rdx),%r14 adcq 24(%rdx),%r15 adcq $0,%rbp L$shift1_1: shrdq $1,%r13,%r12 shrdq $1,%r14,%r13 shrdq $1,%r15,%r14 shrdq $1,%rbp,%r15 shrq $1,%rbp shlq $1,%rcx cmpq $0x8000000,%rcx jne L$beeu_shift_loop_YA L$beeu_shift_loop_end_YA: bsfq %rcx,%rcx testq %rcx,%rcx jz L$beeu_no_shift_YA movq 8+16(%rsp),%rax movq 16+16(%rsp),%rbx movq 24+16(%rsp),%rsi shrdq %cl,%rax,0+16(%rsp) shrdq %cl,%rbx,8+16(%rsp) shrdq %cl,%rsi,16+16(%rsp) shrq %cl,%rsi movq %rsi,24+16(%rsp) L$beeu_no_shift_YA: movq 48(%rsp),%rax movq 56(%rsp),%rbx movq 64(%rsp),%rsi movq 72(%rsp),%rcx subq 16(%rsp),%rax sbbq 24(%rsp),%rbx sbbq 32(%rsp),%rsi sbbq 40(%rsp),%rcx jnc L$beeu_B_bigger_than_A movq 16(%rsp),%rax movq 24(%rsp),%rbx movq 32(%rsp),%rsi movq 40(%rsp),%rcx subq 48(%rsp),%rax sbbq 56(%rsp),%rbx sbbq 64(%rsp),%rsi sbbq 72(%rsp),%rcx movq %rax,16(%rsp) movq %rbx,24(%rsp) movq %rsi,32(%rsp) movq %rcx,40(%rsp) addq %r8,%r12 adcq %r9,%r13 adcq %r10,%r14 adcq %r11,%r15 adcq %rdi,%rbp jmp L$beeu_loop L$beeu_B_bigger_than_A: movq %rax,48(%rsp) movq %rbx,56(%rsp) movq %rsi,64(%rsp) movq %rcx,72(%rsp) addq %r12,%r8 adcq %r13,%r9 adcq %r14,%r10 adcq %r15,%r11 adcq %rbp,%rdi jmp L$beeu_loop L$beeu_loop_end: movq 16(%rsp),%rbx subq $1,%rbx orq 24(%rsp),%rbx orq 32(%rsp),%rbx orq 40(%rsp),%rbx jnz L$beeu_err movq 0(%rdx),%r8 movq 8(%rdx),%r9 movq 16(%rdx),%r10 movq 24(%rdx),%r11 xorq %rdi,%rdi L$beeu_reduction_loop: movq %r12,16(%rsp) movq %r13,24(%rsp) movq %r14,32(%rsp) movq %r15,40(%rsp) movq %rbp,48(%rsp) subq %r8,%r12 sbbq %r9,%r13 sbbq %r10,%r14 sbbq %r11,%r15 sbbq $0,%rbp cmovcq 16(%rsp),%r12 cmovcq 24(%rsp),%r13 cmovcq 32(%rsp),%r14 cmovcq 40(%rsp),%r15 jnc L$beeu_reduction_loop subq %r12,%r8 sbbq %r13,%r9 sbbq %r14,%r10 sbbq %r15,%r11 L$beeu_save: movq 0(%rsp),%rdi movq %r8,0(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq $1,%rax jmp L$beeu_finish L$beeu_err: xorq %rax,%rax L$beeu_finish: addq $80,%rsp popq %rsi popq %rbx popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/p256_beeu-x86_64-asm-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type beeu_mod_inverse_vartime,@function .hidden beeu_mod_inverse_vartime .globl beeu_mod_inverse_vartime .hidden beeu_mod_inverse_vartime .align 32 beeu_mod_inverse_vartime: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp,-16 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset r12,-24 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset r13,-32 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset r14,-40 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset r15,-48 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset rbx,-56 pushq %rsi .cfi_adjust_cfa_offset 8 .cfi_offset rsi,-64 subq $80,%rsp .cfi_adjust_cfa_offset 80 movq %rdi,0(%rsp) movq $1,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %rdi,%rdi xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 xorq %rbp,%rbp vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu %xmm0,48(%rsp) vmovdqu %xmm1,64(%rsp) vmovdqu 0(%rdx),%xmm0 vmovdqu 16(%rdx),%xmm1 vmovdqu %xmm0,16(%rsp) vmovdqu %xmm1,32(%rsp) .Lbeeu_loop: xorq %rbx,%rbx orq 48(%rsp),%rbx orq 56(%rsp),%rbx orq 64(%rsp),%rbx orq 72(%rsp),%rbx jz .Lbeeu_loop_end movq $1,%rcx .Lbeeu_shift_loop_XB: movq %rcx,%rbx andq 48(%rsp),%rbx jnz .Lbeeu_shift_loop_end_XB movq $1,%rbx andq %r8,%rbx jz .Lshift1_0 addq 0(%rdx),%r8 adcq 8(%rdx),%r9 adcq 16(%rdx),%r10 adcq 24(%rdx),%r11 adcq $0,%rdi .Lshift1_0: shrdq $1,%r9,%r8 shrdq $1,%r10,%r9 shrdq $1,%r11,%r10 shrdq $1,%rdi,%r11 shrq $1,%rdi shlq $1,%rcx cmpq $0x8000000,%rcx jne .Lbeeu_shift_loop_XB .Lbeeu_shift_loop_end_XB: bsfq %rcx,%rcx testq %rcx,%rcx jz .Lbeeu_no_shift_XB movq 8+48(%rsp),%rax movq 16+48(%rsp),%rbx movq 24+48(%rsp),%rsi shrdq %cl,%rax,0+48(%rsp) shrdq %cl,%rbx,8+48(%rsp) shrdq %cl,%rsi,16+48(%rsp) shrq %cl,%rsi movq %rsi,24+48(%rsp) .Lbeeu_no_shift_XB: movq $1,%rcx .Lbeeu_shift_loop_YA: movq %rcx,%rbx andq 16(%rsp),%rbx jnz .Lbeeu_shift_loop_end_YA movq $1,%rbx andq %r12,%rbx jz .Lshift1_1 addq 0(%rdx),%r12 adcq 8(%rdx),%r13 adcq 16(%rdx),%r14 adcq 24(%rdx),%r15 adcq $0,%rbp .Lshift1_1: shrdq $1,%r13,%r12 shrdq $1,%r14,%r13 shrdq $1,%r15,%r14 shrdq $1,%rbp,%r15 shrq $1,%rbp shlq $1,%rcx cmpq $0x8000000,%rcx jne .Lbeeu_shift_loop_YA .Lbeeu_shift_loop_end_YA: bsfq %rcx,%rcx testq %rcx,%rcx jz .Lbeeu_no_shift_YA movq 8+16(%rsp),%rax movq 16+16(%rsp),%rbx movq 24+16(%rsp),%rsi shrdq %cl,%rax,0+16(%rsp) shrdq %cl,%rbx,8+16(%rsp) shrdq %cl,%rsi,16+16(%rsp) shrq %cl,%rsi movq %rsi,24+16(%rsp) .Lbeeu_no_shift_YA: movq 48(%rsp),%rax movq 56(%rsp),%rbx movq 64(%rsp),%rsi movq 72(%rsp),%rcx subq 16(%rsp),%rax sbbq 24(%rsp),%rbx sbbq 32(%rsp),%rsi sbbq 40(%rsp),%rcx jnc .Lbeeu_B_bigger_than_A movq 16(%rsp),%rax movq 24(%rsp),%rbx movq 32(%rsp),%rsi movq 40(%rsp),%rcx subq 48(%rsp),%rax sbbq 56(%rsp),%rbx sbbq 64(%rsp),%rsi sbbq 72(%rsp),%rcx movq %rax,16(%rsp) movq %rbx,24(%rsp) movq %rsi,32(%rsp) movq %rcx,40(%rsp) addq %r8,%r12 adcq %r9,%r13 adcq %r10,%r14 adcq %r11,%r15 adcq %rdi,%rbp jmp .Lbeeu_loop .Lbeeu_B_bigger_than_A: movq %rax,48(%rsp) movq %rbx,56(%rsp) movq %rsi,64(%rsp) movq %rcx,72(%rsp) addq %r12,%r8 adcq %r13,%r9 adcq %r14,%r10 adcq %r15,%r11 adcq %rbp,%rdi jmp .Lbeeu_loop .Lbeeu_loop_end: movq 16(%rsp),%rbx subq $1,%rbx orq 24(%rsp),%rbx orq 32(%rsp),%rbx orq 40(%rsp),%rbx jnz .Lbeeu_err movq 0(%rdx),%r8 movq 8(%rdx),%r9 movq 16(%rdx),%r10 movq 24(%rdx),%r11 xorq %rdi,%rdi .Lbeeu_reduction_loop: movq %r12,16(%rsp) movq %r13,24(%rsp) movq %r14,32(%rsp) movq %r15,40(%rsp) movq %rbp,48(%rsp) subq %r8,%r12 sbbq %r9,%r13 sbbq %r10,%r14 sbbq %r11,%r15 sbbq $0,%rbp cmovcq 16(%rsp),%r12 cmovcq 24(%rsp),%r13 cmovcq 32(%rsp),%r14 cmovcq 40(%rsp),%r15 jnc .Lbeeu_reduction_loop subq %r12,%r8 sbbq %r13,%r9 sbbq %r14,%r10 sbbq %r15,%r11 .Lbeeu_save: movq 0(%rsp),%rdi movq %r8,0(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq $1,%rax jmp .Lbeeu_finish .Lbeeu_err: xorq %rax,%rax .Lbeeu_finish: addq $80,%rsp .cfi_adjust_cfa_offset -80 popq %rsi .cfi_adjust_cfa_offset -8 .cfi_restore rsi popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore rbx popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore r12 popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore rbp ret .cfi_endproc .size beeu_mod_inverse_vartime, .-beeu_mod_inverse_vartime #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/rdrand-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _CRYPTO_rdrand .private_extern _CRYPTO_rdrand .p2align 4 _CRYPTO_rdrand: _CET_ENDBR xorq %rax,%rax .byte 72,15,199,242 adcq %rax,%rax movq %rdx,0(%rdi) ret .globl _CRYPTO_rdrand_multiple8_buf .private_extern _CRYPTO_rdrand_multiple8_buf .p2align 4 _CRYPTO_rdrand_multiple8_buf: _CET_ENDBR testq %rsi,%rsi jz L$out movq $8,%rdx L$loop: .byte 72,15,199,241 jnc L$err movq %rcx,0(%rdi) addq %rdx,%rdi subq %rdx,%rsi jnz L$loop L$out: movq $1,%rax ret L$err: xorq %rax,%rax ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/rdrand-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl CRYPTO_rdrand .hidden CRYPTO_rdrand .type CRYPTO_rdrand,@function .align 16 CRYPTO_rdrand: .cfi_startproc _CET_ENDBR xorq %rax,%rax .byte 72,15,199,242 adcq %rax,%rax movq %rdx,0(%rdi) ret .cfi_endproc .size CRYPTO_rdrand,.-CRYPTO_rdrand .globl CRYPTO_rdrand_multiple8_buf .hidden CRYPTO_rdrand_multiple8_buf .type CRYPTO_rdrand_multiple8_buf,@function .align 16 CRYPTO_rdrand_multiple8_buf: .cfi_startproc _CET_ENDBR testq %rsi,%rsi jz .Lout movq $8,%rdx .Lloop: .byte 72,15,199,241 jnc .Lerr movq %rcx,0(%rdi) addq %rdx,%rdi subq %rdx,%rsi jnz .Lloop .Lout: movq $1,%rax ret .Lerr: xorq %rax,%rax ret .cfi_endproc .size CRYPTO_rdrand_multiple8_buf,.-CRYPTO_rdrand_multiple8_buf #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/rsaz-avx2-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _rsaz_1024_sqr_avx2 .private_extern _rsaz_1024_sqr_avx2 .p2align 6 _rsaz_1024_sqr_avx2: _CET_ENDBR leaq (%rsp),%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 vzeroupper movq %rax,%rbp movq %rdx,%r13 subq $832,%rsp movq %r13,%r15 subq $-128,%rdi subq $-128,%rsi subq $-128,%r13 andq $4095,%r15 addq $320,%r15 shrq $12,%r15 vpxor %ymm9,%ymm9,%ymm9 jz L$sqr_1024_no_n_copy subq $320,%rsp vmovdqu 0-128(%r13),%ymm0 andq $-2048,%rsp vmovdqu 32-128(%r13),%ymm1 vmovdqu 64-128(%r13),%ymm2 vmovdqu 96-128(%r13),%ymm3 vmovdqu 128-128(%r13),%ymm4 vmovdqu 160-128(%r13),%ymm5 vmovdqu 192-128(%r13),%ymm6 vmovdqu 224-128(%r13),%ymm7 vmovdqu 256-128(%r13),%ymm8 leaq 832+128(%rsp),%r13 vmovdqu %ymm0,0-128(%r13) vmovdqu %ymm1,32-128(%r13) vmovdqu %ymm2,64-128(%r13) vmovdqu %ymm3,96-128(%r13) vmovdqu %ymm4,128-128(%r13) vmovdqu %ymm5,160-128(%r13) vmovdqu %ymm6,192-128(%r13) vmovdqu %ymm7,224-128(%r13) vmovdqu %ymm8,256-128(%r13) vmovdqu %ymm9,288-128(%r13) L$sqr_1024_no_n_copy: andq $-1024,%rsp vmovdqu 32-128(%rsi),%ymm1 vmovdqu 64-128(%rsi),%ymm2 vmovdqu 96-128(%rsi),%ymm3 vmovdqu 128-128(%rsi),%ymm4 vmovdqu 160-128(%rsi),%ymm5 vmovdqu 192-128(%rsi),%ymm6 vmovdqu 224-128(%rsi),%ymm7 vmovdqu 256-128(%rsi),%ymm8 leaq 192(%rsp),%rbx vmovdqu L$and_mask(%rip),%ymm15 jmp L$OOP_GRANDE_SQR_1024 .p2align 5 L$OOP_GRANDE_SQR_1024: leaq 576+128(%rsp),%r9 leaq 448(%rsp),%r12 vpaddq %ymm1,%ymm1,%ymm1 vpbroadcastq 0-128(%rsi),%ymm10 vpaddq %ymm2,%ymm2,%ymm2 vmovdqa %ymm1,0-128(%r9) vpaddq %ymm3,%ymm3,%ymm3 vmovdqa %ymm2,32-128(%r9) vpaddq %ymm4,%ymm4,%ymm4 vmovdqa %ymm3,64-128(%r9) vpaddq %ymm5,%ymm5,%ymm5 vmovdqa %ymm4,96-128(%r9) vpaddq %ymm6,%ymm6,%ymm6 vmovdqa %ymm5,128-128(%r9) vpaddq %ymm7,%ymm7,%ymm7 vmovdqa %ymm6,160-128(%r9) vpaddq %ymm8,%ymm8,%ymm8 vmovdqa %ymm7,192-128(%r9) vpxor %ymm9,%ymm9,%ymm9 vmovdqa %ymm8,224-128(%r9) vpmuludq 0-128(%rsi),%ymm10,%ymm0 vpbroadcastq 32-128(%rsi),%ymm11 vmovdqu %ymm9,288-192(%rbx) vpmuludq %ymm10,%ymm1,%ymm1 vmovdqu %ymm9,320-448(%r12) vpmuludq %ymm10,%ymm2,%ymm2 vmovdqu %ymm9,352-448(%r12) vpmuludq %ymm10,%ymm3,%ymm3 vmovdqu %ymm9,384-448(%r12) vpmuludq %ymm10,%ymm4,%ymm4 vmovdqu %ymm9,416-448(%r12) vpmuludq %ymm10,%ymm5,%ymm5 vmovdqu %ymm9,448-448(%r12) vpmuludq %ymm10,%ymm6,%ymm6 vmovdqu %ymm9,480-448(%r12) vpmuludq %ymm10,%ymm7,%ymm7 vmovdqu %ymm9,512-448(%r12) vpmuludq %ymm10,%ymm8,%ymm8 vpbroadcastq 64-128(%rsi),%ymm10 vmovdqu %ymm9,544-448(%r12) movq %rsi,%r15 movl $4,%r14d jmp L$sqr_entry_1024 .p2align 5 L$OOP_SQR_1024: vpbroadcastq 32-128(%r15),%ymm11 vpmuludq 0-128(%rsi),%ymm10,%ymm0 vpaddq 0-192(%rbx),%ymm0,%ymm0 vpmuludq 0-128(%r9),%ymm10,%ymm1 vpaddq 32-192(%rbx),%ymm1,%ymm1 vpmuludq 32-128(%r9),%ymm10,%ymm2 vpaddq 64-192(%rbx),%ymm2,%ymm2 vpmuludq 64-128(%r9),%ymm10,%ymm3 vpaddq 96-192(%rbx),%ymm3,%ymm3 vpmuludq 96-128(%r9),%ymm10,%ymm4 vpaddq 128-192(%rbx),%ymm4,%ymm4 vpmuludq 128-128(%r9),%ymm10,%ymm5 vpaddq 160-192(%rbx),%ymm5,%ymm5 vpmuludq 160-128(%r9),%ymm10,%ymm6 vpaddq 192-192(%rbx),%ymm6,%ymm6 vpmuludq 192-128(%r9),%ymm10,%ymm7 vpaddq 224-192(%rbx),%ymm7,%ymm7 vpmuludq 224-128(%r9),%ymm10,%ymm8 vpbroadcastq 64-128(%r15),%ymm10 vpaddq 256-192(%rbx),%ymm8,%ymm8 L$sqr_entry_1024: vmovdqu %ymm0,0-192(%rbx) vmovdqu %ymm1,32-192(%rbx) vpmuludq 32-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 32-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm3,%ymm3 vpmuludq 64-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 96-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 128-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq 160-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 192-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 224-128(%r9),%ymm11,%ymm0 vpbroadcastq 96-128(%r15),%ymm11 vpaddq 288-192(%rbx),%ymm0,%ymm0 vmovdqu %ymm2,64-192(%rbx) vmovdqu %ymm3,96-192(%rbx) vpmuludq 64-128(%rsi),%ymm10,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 64-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 96-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq 128-128(%r9),%ymm10,%ymm13 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 160-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 192-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm0,%ymm0 vpmuludq 224-128(%r9),%ymm10,%ymm1 vpbroadcastq 128-128(%r15),%ymm10 vpaddq 320-448(%r12),%ymm1,%ymm1 vmovdqu %ymm4,128-192(%rbx) vmovdqu %ymm5,160-192(%rbx) vpmuludq 96-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm6,%ymm6 vpmuludq 96-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm7,%ymm7 vpmuludq 128-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm8,%ymm8 vpmuludq 160-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm0,%ymm0 vpmuludq 192-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm1,%ymm1 vpmuludq 224-128(%r9),%ymm11,%ymm2 vpbroadcastq 160-128(%r15),%ymm11 vpaddq 352-448(%r12),%ymm2,%ymm2 vmovdqu %ymm6,192-192(%rbx) vmovdqu %ymm7,224-192(%rbx) vpmuludq 128-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 128-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm0,%ymm0 vpmuludq 160-128(%r9),%ymm10,%ymm13 vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 192-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 224-128(%r9),%ymm10,%ymm3 vpbroadcastq 192-128(%r15),%ymm10 vpaddq 384-448(%r12),%ymm3,%ymm3 vmovdqu %ymm8,256-192(%rbx) vmovdqu %ymm0,288-192(%rbx) leaq 8(%rbx),%rbx vpmuludq 160-128(%rsi),%ymm11,%ymm13 vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 160-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 192-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm3,%ymm3 vpmuludq 224-128(%r9),%ymm11,%ymm4 vpbroadcastq 224-128(%r15),%ymm11 vpaddq 416-448(%r12),%ymm4,%ymm4 vmovdqu %ymm1,320-448(%r12) vmovdqu %ymm2,352-448(%r12) vpmuludq 192-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm3,%ymm3 vpmuludq 192-128(%r9),%ymm10,%ymm14 vpbroadcastq 256-128(%r15),%ymm0 vpaddq %ymm14,%ymm4,%ymm4 vpmuludq 224-128(%r9),%ymm10,%ymm5 vpbroadcastq 0+8-128(%r15),%ymm10 vpaddq 448-448(%r12),%ymm5,%ymm5 vmovdqu %ymm3,384-448(%r12) vmovdqu %ymm4,416-448(%r12) leaq 8(%r15),%r15 vpmuludq 224-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 224-128(%r9),%ymm11,%ymm6 vpaddq 480-448(%r12),%ymm6,%ymm6 vpmuludq 256-128(%rsi),%ymm0,%ymm7 vmovdqu %ymm5,448-448(%r12) vpaddq 512-448(%r12),%ymm7,%ymm7 vmovdqu %ymm6,480-448(%r12) vmovdqu %ymm7,512-448(%r12) leaq 8(%r12),%r12 decl %r14d jnz L$OOP_SQR_1024 vmovdqu 256(%rsp),%ymm8 vmovdqu 288(%rsp),%ymm1 vmovdqu 320(%rsp),%ymm2 leaq 192(%rsp),%rbx vpsrlq $29,%ymm8,%ymm14 vpand %ymm15,%ymm8,%ymm8 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpermq $0x93,%ymm14,%ymm14 vpxor %ymm9,%ymm9,%ymm9 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm9,%ymm14,%ymm10 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm8,%ymm8 vpblendd $3,%ymm11,%ymm9,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vpaddq %ymm11,%ymm2,%ymm2 vmovdqu %ymm1,288-192(%rbx) vmovdqu %ymm2,320-192(%rbx) movq (%rsp),%rax movq 8(%rsp),%r10 movq 16(%rsp),%r11 movq 24(%rsp),%r12 vmovdqu 32(%rsp),%ymm1 vmovdqu 64-192(%rbx),%ymm2 vmovdqu 96-192(%rbx),%ymm3 vmovdqu 128-192(%rbx),%ymm4 vmovdqu 160-192(%rbx),%ymm5 vmovdqu 192-192(%rbx),%ymm6 vmovdqu 224-192(%rbx),%ymm7 movq %rax,%r9 imull %ecx,%eax andl $0x1fffffff,%eax vmovd %eax,%xmm12 movq %rax,%rdx imulq -128(%r13),%rax vpbroadcastq %xmm12,%ymm12 addq %rax,%r9 movq %rdx,%rax imulq 8-128(%r13),%rax shrq $29,%r9 addq %rax,%r10 movq %rdx,%rax imulq 16-128(%r13),%rax addq %r9,%r10 addq %rax,%r11 imulq 24-128(%r13),%rdx addq %rdx,%r12 movq %r10,%rax imull %ecx,%eax andl $0x1fffffff,%eax movl $9,%r14d jmp L$OOP_REDUCE_1024 .p2align 5 L$OOP_REDUCE_1024: vmovd %eax,%xmm13 vpbroadcastq %xmm13,%ymm13 vpmuludq 32-128(%r13),%ymm12,%ymm10 movq %rax,%rdx imulq -128(%r13),%rax vpaddq %ymm10,%ymm1,%ymm1 addq %rax,%r10 vpmuludq 64-128(%r13),%ymm12,%ymm14 movq %rdx,%rax imulq 8-128(%r13),%rax vpaddq %ymm14,%ymm2,%ymm2 vpmuludq 96-128(%r13),%ymm12,%ymm11 .byte 0x67 addq %rax,%r11 .byte 0x67 movq %rdx,%rax imulq 16-128(%r13),%rax shrq $29,%r10 vpaddq %ymm11,%ymm3,%ymm3 vpmuludq 128-128(%r13),%ymm12,%ymm10 addq %rax,%r12 addq %r10,%r11 vpaddq %ymm10,%ymm4,%ymm4 vpmuludq 160-128(%r13),%ymm12,%ymm14 movq %r11,%rax imull %ecx,%eax vpaddq %ymm14,%ymm5,%ymm5 vpmuludq 192-128(%r13),%ymm12,%ymm11 andl $0x1fffffff,%eax vpaddq %ymm11,%ymm6,%ymm6 vpmuludq 224-128(%r13),%ymm12,%ymm10 vpaddq %ymm10,%ymm7,%ymm7 vpmuludq 256-128(%r13),%ymm12,%ymm14 vmovd %eax,%xmm12 vpaddq %ymm14,%ymm8,%ymm8 vpbroadcastq %xmm12,%ymm12 vpmuludq 32-8-128(%r13),%ymm13,%ymm11 vmovdqu 96-8-128(%r13),%ymm14 movq %rax,%rdx imulq -128(%r13),%rax vpaddq %ymm11,%ymm1,%ymm1 vpmuludq 64-8-128(%r13),%ymm13,%ymm10 vmovdqu 128-8-128(%r13),%ymm11 addq %rax,%r11 movq %rdx,%rax imulq 8-128(%r13),%rax vpaddq %ymm10,%ymm2,%ymm2 addq %r12,%rax shrq $29,%r11 vpmuludq %ymm13,%ymm14,%ymm14 vmovdqu 160-8-128(%r13),%ymm10 addq %r11,%rax vpaddq %ymm14,%ymm3,%ymm3 vpmuludq %ymm13,%ymm11,%ymm11 vmovdqu 192-8-128(%r13),%ymm14 .byte 0x67 movq %rax,%r12 imull %ecx,%eax vpaddq %ymm11,%ymm4,%ymm4 vpmuludq %ymm13,%ymm10,%ymm10 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 andl $0x1fffffff,%eax vpaddq %ymm10,%ymm5,%ymm5 vpmuludq %ymm13,%ymm14,%ymm14 vmovdqu 256-8-128(%r13),%ymm10 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq %ymm13,%ymm11,%ymm11 vmovdqu 288-8-128(%r13),%ymm9 vmovd %eax,%xmm0 imulq -128(%r13),%rax vpaddq %ymm11,%ymm7,%ymm7 vpmuludq %ymm13,%ymm10,%ymm10 vmovdqu 32-16-128(%r13),%ymm14 vpbroadcastq %xmm0,%ymm0 vpaddq %ymm10,%ymm8,%ymm8 vpmuludq %ymm13,%ymm9,%ymm9 vmovdqu 64-16-128(%r13),%ymm11 addq %rax,%r12 vmovdqu 32-24-128(%r13),%ymm13 vpmuludq %ymm12,%ymm14,%ymm14 vmovdqu 96-16-128(%r13),%ymm10 vpaddq %ymm14,%ymm1,%ymm1 vpmuludq %ymm0,%ymm13,%ymm13 vpmuludq %ymm12,%ymm11,%ymm11 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff vpaddq %ymm1,%ymm13,%ymm13 vpaddq %ymm11,%ymm2,%ymm2 vpmuludq %ymm12,%ymm10,%ymm10 vmovdqu 160-16-128(%r13),%ymm11 .byte 0x67 vmovq %xmm13,%rax vmovdqu %ymm13,(%rsp) vpaddq %ymm10,%ymm3,%ymm3 vpmuludq %ymm12,%ymm14,%ymm14 vmovdqu 192-16-128(%r13),%ymm10 vpaddq %ymm14,%ymm4,%ymm4 vpmuludq %ymm12,%ymm11,%ymm11 vmovdqu 224-16-128(%r13),%ymm14 vpaddq %ymm11,%ymm5,%ymm5 vpmuludq %ymm12,%ymm10,%ymm10 vmovdqu 256-16-128(%r13),%ymm11 vpaddq %ymm10,%ymm6,%ymm6 vpmuludq %ymm12,%ymm14,%ymm14 shrq $29,%r12 vmovdqu 288-16-128(%r13),%ymm10 addq %r12,%rax vpaddq %ymm14,%ymm7,%ymm7 vpmuludq %ymm12,%ymm11,%ymm11 movq %rax,%r9 imull %ecx,%eax vpaddq %ymm11,%ymm8,%ymm8 vpmuludq %ymm12,%ymm10,%ymm10 andl $0x1fffffff,%eax vmovd %eax,%xmm12 vmovdqu 96-24-128(%r13),%ymm11 .byte 0x67 vpaddq %ymm10,%ymm9,%ymm9 vpbroadcastq %xmm12,%ymm12 vpmuludq 64-24-128(%r13),%ymm0,%ymm14 vmovdqu 128-24-128(%r13),%ymm10 movq %rax,%rdx imulq -128(%r13),%rax movq 8(%rsp),%r10 vpaddq %ymm14,%ymm2,%ymm1 vpmuludq %ymm0,%ymm11,%ymm11 vmovdqu 160-24-128(%r13),%ymm14 addq %rax,%r9 movq %rdx,%rax imulq 8-128(%r13),%rax .byte 0x67 shrq $29,%r9 movq 16(%rsp),%r11 vpaddq %ymm11,%ymm3,%ymm2 vpmuludq %ymm0,%ymm10,%ymm10 vmovdqu 192-24-128(%r13),%ymm11 addq %rax,%r10 movq %rdx,%rax imulq 16-128(%r13),%rax vpaddq %ymm10,%ymm4,%ymm3 vpmuludq %ymm0,%ymm14,%ymm14 vmovdqu 224-24-128(%r13),%ymm10 imulq 24-128(%r13),%rdx addq %rax,%r11 leaq (%r9,%r10,1),%rax vpaddq %ymm14,%ymm5,%ymm4 vpmuludq %ymm0,%ymm11,%ymm11 vmovdqu 256-24-128(%r13),%ymm14 movq %rax,%r10 imull %ecx,%eax vpmuludq %ymm0,%ymm10,%ymm10 vpaddq %ymm11,%ymm6,%ymm5 vmovdqu 288-24-128(%r13),%ymm11 andl $0x1fffffff,%eax vpaddq %ymm10,%ymm7,%ymm6 vpmuludq %ymm0,%ymm14,%ymm14 addq 24(%rsp),%rdx vpaddq %ymm14,%ymm8,%ymm7 vpmuludq %ymm0,%ymm11,%ymm11 vpaddq %ymm11,%ymm9,%ymm8 vmovq %r12,%xmm9 movq %rdx,%r12 decl %r14d jnz L$OOP_REDUCE_1024 leaq 448(%rsp),%r12 vpaddq %ymm9,%ymm13,%ymm0 vpxor %ymm9,%ymm9,%ymm9 vpaddq 288-192(%rbx),%ymm0,%ymm0 vpaddq 320-448(%r12),%ymm1,%ymm1 vpaddq 352-448(%r12),%ymm2,%ymm2 vpaddq 384-448(%r12),%ymm3,%ymm3 vpaddq 416-448(%r12),%ymm4,%ymm4 vpaddq 448-448(%r12),%ymm5,%ymm5 vpaddq 480-448(%r12),%ymm6,%ymm6 vpaddq 512-448(%r12),%ymm7,%ymm7 vpaddq 544-448(%r12),%ymm8,%ymm8 vpsrlq $29,%ymm0,%ymm14 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm12,%ymm12 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm0,%ymm0 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm2,%ymm2 vpblendd $3,%ymm13,%ymm9,%ymm13 vpaddq %ymm12,%ymm3,%ymm3 vpaddq %ymm13,%ymm4,%ymm4 vpsrlq $29,%ymm0,%ymm14 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm12,%ymm12 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm0,%ymm0 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vmovdqu %ymm0,0-128(%rdi) vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm2,%ymm2 vmovdqu %ymm1,32-128(%rdi) vpblendd $3,%ymm13,%ymm9,%ymm13 vpaddq %ymm12,%ymm3,%ymm3 vmovdqu %ymm2,64-128(%rdi) vpaddq %ymm13,%ymm4,%ymm4 vmovdqu %ymm3,96-128(%rdi) vpsrlq $29,%ymm4,%ymm14 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm11 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm4,%ymm4 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm5,%ymm5 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm6,%ymm6 vpblendd $3,%ymm13,%ymm0,%ymm13 vpaddq %ymm12,%ymm7,%ymm7 vpaddq %ymm13,%ymm8,%ymm8 vpsrlq $29,%ymm4,%ymm14 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm11 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm4,%ymm4 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm5,%ymm5 vmovdqu %ymm4,128-128(%rdi) vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm6,%ymm6 vmovdqu %ymm5,160-128(%rdi) vpblendd $3,%ymm13,%ymm0,%ymm13 vpaddq %ymm12,%ymm7,%ymm7 vmovdqu %ymm6,192-128(%rdi) vpaddq %ymm13,%ymm8,%ymm8 vmovdqu %ymm7,224-128(%rdi) vmovdqu %ymm8,256-128(%rdi) movq %rdi,%rsi decl %r8d jne L$OOP_GRANDE_SQR_1024 vzeroall movq %rbp,%rax movq -48(%rax),%r15 movq -40(%rax),%r14 movq -32(%rax),%r13 movq -24(%rax),%r12 movq -16(%rax),%rbp movq -8(%rax),%rbx leaq (%rax),%rsp L$sqr_1024_epilogue: ret .globl _rsaz_1024_mul_avx2 .private_extern _rsaz_1024_mul_avx2 .p2align 6 _rsaz_1024_mul_avx2: _CET_ENDBR leaq (%rsp),%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 movq %rax,%rbp vzeroall movq %rdx,%r13 subq $64,%rsp .byte 0x67,0x67 movq %rsi,%r15 andq $4095,%r15 addq $320,%r15 shrq $12,%r15 movq %rsi,%r15 cmovnzq %r13,%rsi cmovnzq %r15,%r13 movq %rcx,%r15 subq $-128,%rsi subq $-128,%rcx subq $-128,%rdi andq $4095,%r15 addq $320,%r15 .byte 0x67,0x67 shrq $12,%r15 jz L$mul_1024_no_n_copy subq $320,%rsp vmovdqu 0-128(%rcx),%ymm0 andq $-512,%rsp vmovdqu 32-128(%rcx),%ymm1 vmovdqu 64-128(%rcx),%ymm2 vmovdqu 96-128(%rcx),%ymm3 vmovdqu 128-128(%rcx),%ymm4 vmovdqu 160-128(%rcx),%ymm5 vmovdqu 192-128(%rcx),%ymm6 vmovdqu 224-128(%rcx),%ymm7 vmovdqu 256-128(%rcx),%ymm8 leaq 64+128(%rsp),%rcx vmovdqu %ymm0,0-128(%rcx) vpxor %ymm0,%ymm0,%ymm0 vmovdqu %ymm1,32-128(%rcx) vpxor %ymm1,%ymm1,%ymm1 vmovdqu %ymm2,64-128(%rcx) vpxor %ymm2,%ymm2,%ymm2 vmovdqu %ymm3,96-128(%rcx) vpxor %ymm3,%ymm3,%ymm3 vmovdqu %ymm4,128-128(%rcx) vpxor %ymm4,%ymm4,%ymm4 vmovdqu %ymm5,160-128(%rcx) vpxor %ymm5,%ymm5,%ymm5 vmovdqu %ymm6,192-128(%rcx) vpxor %ymm6,%ymm6,%ymm6 vmovdqu %ymm7,224-128(%rcx) vpxor %ymm7,%ymm7,%ymm7 vmovdqu %ymm8,256-128(%rcx) vmovdqa %ymm0,%ymm8 vmovdqu %ymm9,288-128(%rcx) L$mul_1024_no_n_copy: andq $-64,%rsp movq (%r13),%rbx vpbroadcastq (%r13),%ymm10 vmovdqu %ymm0,(%rsp) xorq %r9,%r9 .byte 0x67 xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 vmovdqu L$and_mask(%rip),%ymm15 movl $9,%r14d vmovdqu %ymm9,288-128(%rdi) jmp L$oop_mul_1024 .p2align 5 L$oop_mul_1024: vpsrlq $29,%ymm3,%ymm9 movq %rbx,%rax imulq -128(%rsi),%rax addq %r9,%rax movq %rbx,%r10 imulq 8-128(%rsi),%r10 addq 8(%rsp),%r10 movq %rax,%r9 imull %r8d,%eax andl $0x1fffffff,%eax movq %rbx,%r11 imulq 16-128(%rsi),%r11 addq 16(%rsp),%r11 movq %rbx,%r12 imulq 24-128(%rsi),%r12 addq 24(%rsp),%r12 vpmuludq 32-128(%rsi),%ymm10,%ymm0 vmovd %eax,%xmm11 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq 64-128(%rsi),%ymm10,%ymm12 vpbroadcastq %xmm11,%ymm11 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 96-128(%rsi),%ymm10,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq 128-128(%rsi),%ymm10,%ymm0 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq 160-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 192-128(%rsi),%ymm10,%ymm13 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq 224-128(%rsi),%ymm10,%ymm0 vpermq $0x93,%ymm9,%ymm9 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq 256-128(%rsi),%ymm10,%ymm12 vpbroadcastq 8(%r13),%ymm10 vpaddq %ymm12,%ymm8,%ymm8 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r9 movq %rdx,%rax imulq 8-128(%rcx),%rax addq %rax,%r10 movq %rdx,%rax imulq 16-128(%rcx),%rax addq %rax,%r11 shrq $29,%r9 imulq 24-128(%rcx),%rdx addq %rdx,%r12 addq %r9,%r10 vpmuludq 32-128(%rcx),%ymm11,%ymm13 vmovq %xmm10,%rbx vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 64-128(%rcx),%ymm11,%ymm0 vpaddq %ymm0,%ymm2,%ymm2 vpmuludq 96-128(%rcx),%ymm11,%ymm12 vpaddq %ymm12,%ymm3,%ymm3 vpmuludq 128-128(%rcx),%ymm11,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 160-128(%rcx),%ymm11,%ymm0 vpaddq %ymm0,%ymm5,%ymm5 vpmuludq 192-128(%rcx),%ymm11,%ymm12 vpaddq %ymm12,%ymm6,%ymm6 vpmuludq 224-128(%rcx),%ymm11,%ymm13 vpblendd $3,%ymm14,%ymm9,%ymm12 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 256-128(%rcx),%ymm11,%ymm0 vpaddq %ymm12,%ymm3,%ymm3 vpaddq %ymm0,%ymm8,%ymm8 movq %rbx,%rax imulq -128(%rsi),%rax addq %rax,%r10 vmovdqu -8+32-128(%rsi),%ymm12 movq %rbx,%rax imulq 8-128(%rsi),%rax addq %rax,%r11 vmovdqu -8+64-128(%rsi),%ymm13 movq %r10,%rax vpblendd $0xfc,%ymm14,%ymm9,%ymm9 imull %r8d,%eax vpaddq %ymm9,%ymm4,%ymm4 andl $0x1fffffff,%eax imulq 16-128(%rsi),%rbx addq %rbx,%r12 vpmuludq %ymm10,%ymm12,%ymm12 vmovd %eax,%xmm11 vmovdqu -8+96-128(%rsi),%ymm0 vpaddq %ymm12,%ymm1,%ymm1 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq %xmm11,%ymm11 vmovdqu -8+128-128(%rsi),%ymm12 vpaddq %ymm13,%ymm2,%ymm2 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -8+160-128(%rsi),%ymm13 vpaddq %ymm0,%ymm3,%ymm3 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -8+192-128(%rsi),%ymm0 vpaddq %ymm12,%ymm4,%ymm4 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -8+224-128(%rsi),%ymm12 vpaddq %ymm13,%ymm5,%ymm5 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -8+256-128(%rsi),%ymm13 vpaddq %ymm0,%ymm6,%ymm6 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -8+288-128(%rsi),%ymm9 vpaddq %ymm12,%ymm7,%ymm7 vpmuludq %ymm10,%ymm13,%ymm13 vpaddq %ymm13,%ymm8,%ymm8 vpmuludq %ymm10,%ymm9,%ymm9 vpbroadcastq 16(%r13),%ymm10 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r10 vmovdqu -8+32-128(%rcx),%ymm0 movq %rdx,%rax imulq 8-128(%rcx),%rax addq %rax,%r11 vmovdqu -8+64-128(%rcx),%ymm12 shrq $29,%r10 imulq 16-128(%rcx),%rdx addq %rdx,%r12 addq %r10,%r11 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -8+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -8+128-128(%rcx),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -8+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -8+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -8+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -8+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -8+288-128(%rcx),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm11,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm11,%ymm13,%ymm13 vpaddq %ymm13,%ymm9,%ymm9 vmovdqu -16+32-128(%rsi),%ymm0 movq %rbx,%rax imulq -128(%rsi),%rax addq %r11,%rax vmovdqu -16+64-128(%rsi),%ymm12 movq %rax,%r11 imull %r8d,%eax andl $0x1fffffff,%eax imulq 8-128(%rsi),%rbx addq %rbx,%r12 vpmuludq %ymm10,%ymm0,%ymm0 vmovd %eax,%xmm11 vmovdqu -16+96-128(%rsi),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm10,%ymm12,%ymm12 vpbroadcastq %xmm11,%ymm11 vmovdqu -16+128-128(%rsi),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -16+160-128(%rsi),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -16+192-128(%rsi),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -16+224-128(%rsi),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -16+256-128(%rsi),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -16+288-128(%rsi),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm10,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq 24(%r13),%ymm10 vpaddq %ymm13,%ymm9,%ymm9 vmovdqu -16+32-128(%rcx),%ymm0 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r11 vmovdqu -16+64-128(%rcx),%ymm12 imulq 8-128(%rcx),%rdx addq %rdx,%r12 shrq $29,%r11 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -16+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -16+128-128(%rcx),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -16+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -16+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -16+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -16+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -16+288-128(%rcx),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -24+32-128(%rsi),%ymm0 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+64-128(%rsi),%ymm12 vpaddq %ymm13,%ymm9,%ymm9 addq %r11,%r12 imulq -128(%rsi),%rbx addq %rbx,%r12 movq %r12,%rax imull %r8d,%eax andl $0x1fffffff,%eax vpmuludq %ymm10,%ymm0,%ymm0 vmovd %eax,%xmm11 vmovdqu -24+96-128(%rsi),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm10,%ymm12,%ymm12 vpbroadcastq %xmm11,%ymm11 vmovdqu -24+128-128(%rsi),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -24+160-128(%rsi),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -24+192-128(%rsi),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -24+224-128(%rsi),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -24+256-128(%rsi),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -24+288-128(%rsi),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm10,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq 32(%r13),%ymm10 vpaddq %ymm13,%ymm9,%ymm9 addq $32,%r13 vmovdqu -24+32-128(%rcx),%ymm0 imulq -128(%rcx),%rax addq %rax,%r12 shrq $29,%r12 vmovdqu -24+64-128(%rcx),%ymm12 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -24+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm0 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu %ymm0,(%rsp) vpaddq %ymm12,%ymm2,%ymm1 vmovdqu -24+128-128(%rcx),%ymm0 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm2 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -24+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm3 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -24+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm4 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm5 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -24+288-128(%rcx),%ymm13 movq %r12,%r9 vpaddq %ymm0,%ymm7,%ymm6 vpmuludq %ymm11,%ymm12,%ymm12 addq (%rsp),%r9 vpaddq %ymm12,%ymm8,%ymm7 vpmuludq %ymm11,%ymm13,%ymm13 vmovq %r12,%xmm12 vpaddq %ymm13,%ymm9,%ymm8 decl %r14d jnz L$oop_mul_1024 vpaddq (%rsp),%ymm12,%ymm0 vpsrlq $29,%ymm0,%ymm12 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm13 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm10,%ymm10 vpblendd $3,%ymm12,%ymm13,%ymm12 vpermq $0x93,%ymm11,%ymm11 vpaddq %ymm9,%ymm0,%ymm0 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm1,%ymm1 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm2,%ymm2 vpblendd $3,%ymm11,%ymm14,%ymm11 vpaddq %ymm10,%ymm3,%ymm3 vpaddq %ymm11,%ymm4,%ymm4 vpsrlq $29,%ymm0,%ymm12 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm13 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm10,%ymm10 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm0,%ymm0 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm1,%ymm1 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm2,%ymm2 vpblendd $3,%ymm11,%ymm14,%ymm11 vpaddq %ymm10,%ymm3,%ymm3 vpaddq %ymm11,%ymm4,%ymm4 vmovdqu %ymm0,0-128(%rdi) vmovdqu %ymm1,32-128(%rdi) vmovdqu %ymm2,64-128(%rdi) vmovdqu %ymm3,96-128(%rdi) vpsrlq $29,%ymm4,%ymm12 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm13 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm10,%ymm10 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm4,%ymm4 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm5,%ymm5 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm6,%ymm6 vpblendd $3,%ymm11,%ymm0,%ymm11 vpaddq %ymm10,%ymm7,%ymm7 vpaddq %ymm11,%ymm8,%ymm8 vpsrlq $29,%ymm4,%ymm12 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm13 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm10,%ymm10 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm4,%ymm4 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm5,%ymm5 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm6,%ymm6 vpblendd $3,%ymm11,%ymm0,%ymm11 vpaddq %ymm10,%ymm7,%ymm7 vpaddq %ymm11,%ymm8,%ymm8 vmovdqu %ymm4,128-128(%rdi) vmovdqu %ymm5,160-128(%rdi) vmovdqu %ymm6,192-128(%rdi) vmovdqu %ymm7,224-128(%rdi) vmovdqu %ymm8,256-128(%rdi) vzeroupper movq %rbp,%rax movq -48(%rax),%r15 movq -40(%rax),%r14 movq -32(%rax),%r13 movq -24(%rax),%r12 movq -16(%rax),%rbp movq -8(%rax),%rbx leaq (%rax),%rsp L$mul_1024_epilogue: ret .globl _rsaz_1024_red2norm_avx2 .private_extern _rsaz_1024_red2norm_avx2 .p2align 5 _rsaz_1024_red2norm_avx2: _CET_ENDBR subq $-128,%rsi xorq %rax,%rax movq -128(%rsi),%r8 movq -120(%rsi),%r9 movq -112(%rsi),%r10 shlq $0,%r8 shlq $29,%r9 movq %r10,%r11 shlq $58,%r10 shrq $6,%r11 addq %r8,%rax addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,0(%rdi) movq %r11,%rax movq -104(%rsi),%r8 movq -96(%rsi),%r9 shlq $23,%r8 movq %r9,%r10 shlq $52,%r9 shrq $12,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,8(%rdi) movq %r10,%rax movq -88(%rsi),%r11 movq -80(%rsi),%r8 shlq $17,%r11 movq %r8,%r9 shlq $46,%r8 shrq $18,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,16(%rdi) movq %r9,%rax movq -72(%rsi),%r10 movq -64(%rsi),%r11 shlq $11,%r10 movq %r11,%r8 shlq $40,%r11 shrq $24,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,24(%rdi) movq %r8,%rax movq -56(%rsi),%r9 movq -48(%rsi),%r10 movq -40(%rsi),%r11 shlq $5,%r9 shlq $34,%r10 movq %r11,%r8 shlq $63,%r11 shrq $1,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,32(%rdi) movq %r8,%rax movq -32(%rsi),%r9 movq -24(%rsi),%r10 shlq $28,%r9 movq %r10,%r11 shlq $57,%r10 shrq $7,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,40(%rdi) movq %r11,%rax movq -16(%rsi),%r8 movq -8(%rsi),%r9 shlq $22,%r8 movq %r9,%r10 shlq $51,%r9 shrq $13,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,48(%rdi) movq %r10,%rax movq 0(%rsi),%r11 movq 8(%rsi),%r8 shlq $16,%r11 movq %r8,%r9 shlq $45,%r8 shrq $19,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,56(%rdi) movq %r9,%rax movq 16(%rsi),%r10 movq 24(%rsi),%r11 shlq $10,%r10 movq %r11,%r8 shlq $39,%r11 shrq $25,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,64(%rdi) movq %r8,%rax movq 32(%rsi),%r9 movq 40(%rsi),%r10 movq 48(%rsi),%r11 shlq $4,%r9 shlq $33,%r10 movq %r11,%r8 shlq $62,%r11 shrq $2,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,72(%rdi) movq %r8,%rax movq 56(%rsi),%r9 movq 64(%rsi),%r10 shlq $27,%r9 movq %r10,%r11 shlq $56,%r10 shrq $8,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,80(%rdi) movq %r11,%rax movq 72(%rsi),%r8 movq 80(%rsi),%r9 shlq $21,%r8 movq %r9,%r10 shlq $50,%r9 shrq $14,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,88(%rdi) movq %r10,%rax movq 88(%rsi),%r11 movq 96(%rsi),%r8 shlq $15,%r11 movq %r8,%r9 shlq $44,%r8 shrq $20,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,96(%rdi) movq %r9,%rax movq 104(%rsi),%r10 movq 112(%rsi),%r11 shlq $9,%r10 movq %r11,%r8 shlq $38,%r11 shrq $26,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,104(%rdi) movq %r8,%rax movq 120(%rsi),%r9 movq 128(%rsi),%r10 movq 136(%rsi),%r11 shlq $3,%r9 shlq $32,%r10 movq %r11,%r8 shlq $61,%r11 shrq $3,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,112(%rdi) movq %r8,%rax movq 144(%rsi),%r9 movq 152(%rsi),%r10 shlq $26,%r9 movq %r10,%r11 shlq $55,%r10 shrq $9,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,120(%rdi) movq %r11,%rax ret .globl _rsaz_1024_norm2red_avx2 .private_extern _rsaz_1024_norm2red_avx2 .p2align 5 _rsaz_1024_norm2red_avx2: _CET_ENDBR subq $-128,%rdi movq (%rsi),%r8 movl $0x1fffffff,%eax movq 8(%rsi),%r9 movq %r8,%r11 shrq $0,%r11 andq %rax,%r11 movq %r11,-128(%rdi) movq %r8,%r10 shrq $29,%r10 andq %rax,%r10 movq %r10,-120(%rdi) shrdq $58,%r9,%r8 andq %rax,%r8 movq %r8,-112(%rdi) movq 16(%rsi),%r10 movq %r9,%r8 shrq $23,%r8 andq %rax,%r8 movq %r8,-104(%rdi) shrdq $52,%r10,%r9 andq %rax,%r9 movq %r9,-96(%rdi) movq 24(%rsi),%r11 movq %r10,%r9 shrq $17,%r9 andq %rax,%r9 movq %r9,-88(%rdi) shrdq $46,%r11,%r10 andq %rax,%r10 movq %r10,-80(%rdi) movq 32(%rsi),%r8 movq %r11,%r10 shrq $11,%r10 andq %rax,%r10 movq %r10,-72(%rdi) shrdq $40,%r8,%r11 andq %rax,%r11 movq %r11,-64(%rdi) movq 40(%rsi),%r9 movq %r8,%r11 shrq $5,%r11 andq %rax,%r11 movq %r11,-56(%rdi) movq %r8,%r10 shrq $34,%r10 andq %rax,%r10 movq %r10,-48(%rdi) shrdq $63,%r9,%r8 andq %rax,%r8 movq %r8,-40(%rdi) movq 48(%rsi),%r10 movq %r9,%r8 shrq $28,%r8 andq %rax,%r8 movq %r8,-32(%rdi) shrdq $57,%r10,%r9 andq %rax,%r9 movq %r9,-24(%rdi) movq 56(%rsi),%r11 movq %r10,%r9 shrq $22,%r9 andq %rax,%r9 movq %r9,-16(%rdi) shrdq $51,%r11,%r10 andq %rax,%r10 movq %r10,-8(%rdi) movq 64(%rsi),%r8 movq %r11,%r10 shrq $16,%r10 andq %rax,%r10 movq %r10,0(%rdi) shrdq $45,%r8,%r11 andq %rax,%r11 movq %r11,8(%rdi) movq 72(%rsi),%r9 movq %r8,%r11 shrq $10,%r11 andq %rax,%r11 movq %r11,16(%rdi) shrdq $39,%r9,%r8 andq %rax,%r8 movq %r8,24(%rdi) movq 80(%rsi),%r10 movq %r9,%r8 shrq $4,%r8 andq %rax,%r8 movq %r8,32(%rdi) movq %r9,%r11 shrq $33,%r11 andq %rax,%r11 movq %r11,40(%rdi) shrdq $62,%r10,%r9 andq %rax,%r9 movq %r9,48(%rdi) movq 88(%rsi),%r11 movq %r10,%r9 shrq $27,%r9 andq %rax,%r9 movq %r9,56(%rdi) shrdq $56,%r11,%r10 andq %rax,%r10 movq %r10,64(%rdi) movq 96(%rsi),%r8 movq %r11,%r10 shrq $21,%r10 andq %rax,%r10 movq %r10,72(%rdi) shrdq $50,%r8,%r11 andq %rax,%r11 movq %r11,80(%rdi) movq 104(%rsi),%r9 movq %r8,%r11 shrq $15,%r11 andq %rax,%r11 movq %r11,88(%rdi) shrdq $44,%r9,%r8 andq %rax,%r8 movq %r8,96(%rdi) movq 112(%rsi),%r10 movq %r9,%r8 shrq $9,%r8 andq %rax,%r8 movq %r8,104(%rdi) shrdq $38,%r10,%r9 andq %rax,%r9 movq %r9,112(%rdi) movq 120(%rsi),%r11 movq %r10,%r9 shrq $3,%r9 andq %rax,%r9 movq %r9,120(%rdi) movq %r10,%r8 shrq $32,%r8 andq %rax,%r8 movq %r8,128(%rdi) shrdq $61,%r11,%r10 andq %rax,%r10 movq %r10,136(%rdi) xorq %r8,%r8 movq %r11,%r10 shrq $26,%r10 andq %rax,%r10 movq %r10,144(%rdi) shrdq $55,%r8,%r11 andq %rax,%r11 movq %r11,152(%rdi) movq %r8,160(%rdi) movq %r8,168(%rdi) movq %r8,176(%rdi) movq %r8,184(%rdi) ret .globl _rsaz_1024_scatter5_avx2 .private_extern _rsaz_1024_scatter5_avx2 .p2align 5 _rsaz_1024_scatter5_avx2: _CET_ENDBR vzeroupper vmovdqu L$scatter_permd(%rip),%ymm5 shll $4,%edx leaq (%rdi,%rdx,1),%rdi movl $9,%eax jmp L$oop_scatter_1024 .p2align 5 L$oop_scatter_1024: vmovdqu (%rsi),%ymm0 leaq 32(%rsi),%rsi vpermd %ymm0,%ymm5,%ymm0 vmovdqu %xmm0,(%rdi) leaq 512(%rdi),%rdi decl %eax jnz L$oop_scatter_1024 vzeroupper ret .globl _rsaz_1024_gather5_avx2 .private_extern _rsaz_1024_gather5_avx2 .p2align 5 _rsaz_1024_gather5_avx2: _CET_ENDBR vzeroupper movq %rsp,%r11 leaq -256(%rsp),%rsp andq $-32,%rsp leaq L$inc(%rip),%r10 leaq -128(%rsp),%rax vmovd %edx,%xmm4 vmovdqa (%r10),%ymm0 vmovdqa 32(%r10),%ymm1 vmovdqa 64(%r10),%ymm5 vpbroadcastd %xmm4,%ymm4 vpaddd %ymm5,%ymm0,%ymm2 vpcmpeqd %ymm4,%ymm0,%ymm0 vpaddd %ymm5,%ymm1,%ymm3 vpcmpeqd %ymm4,%ymm1,%ymm1 vmovdqa %ymm0,0+128(%rax) vpaddd %ymm5,%ymm2,%ymm0 vpcmpeqd %ymm4,%ymm2,%ymm2 vmovdqa %ymm1,32+128(%rax) vpaddd %ymm5,%ymm3,%ymm1 vpcmpeqd %ymm4,%ymm3,%ymm3 vmovdqa %ymm2,64+128(%rax) vpaddd %ymm5,%ymm0,%ymm2 vpcmpeqd %ymm4,%ymm0,%ymm0 vmovdqa %ymm3,96+128(%rax) vpaddd %ymm5,%ymm1,%ymm3 vpcmpeqd %ymm4,%ymm1,%ymm1 vmovdqa %ymm0,128+128(%rax) vpaddd %ymm5,%ymm2,%ymm8 vpcmpeqd %ymm4,%ymm2,%ymm2 vmovdqa %ymm1,160+128(%rax) vpaddd %ymm5,%ymm3,%ymm9 vpcmpeqd %ymm4,%ymm3,%ymm3 vmovdqa %ymm2,192+128(%rax) vpaddd %ymm5,%ymm8,%ymm10 vpcmpeqd %ymm4,%ymm8,%ymm8 vmovdqa %ymm3,224+128(%rax) vpaddd %ymm5,%ymm9,%ymm11 vpcmpeqd %ymm4,%ymm9,%ymm9 vpaddd %ymm5,%ymm10,%ymm12 vpcmpeqd %ymm4,%ymm10,%ymm10 vpaddd %ymm5,%ymm11,%ymm13 vpcmpeqd %ymm4,%ymm11,%ymm11 vpaddd %ymm5,%ymm12,%ymm14 vpcmpeqd %ymm4,%ymm12,%ymm12 vpaddd %ymm5,%ymm13,%ymm15 vpcmpeqd %ymm4,%ymm13,%ymm13 vpcmpeqd %ymm4,%ymm14,%ymm14 vpcmpeqd %ymm4,%ymm15,%ymm15 vmovdqa -32(%r10),%ymm7 leaq 128(%rsi),%rsi movl $9,%edx L$oop_gather_1024: vmovdqa 0-128(%rsi),%ymm0 vmovdqa 32-128(%rsi),%ymm1 vmovdqa 64-128(%rsi),%ymm2 vmovdqa 96-128(%rsi),%ymm3 vpand 0+128(%rax),%ymm0,%ymm0 vpand 32+128(%rax),%ymm1,%ymm1 vpand 64+128(%rax),%ymm2,%ymm2 vpor %ymm0,%ymm1,%ymm4 vpand 96+128(%rax),%ymm3,%ymm3 vmovdqa 128-128(%rsi),%ymm0 vmovdqa 160-128(%rsi),%ymm1 vpor %ymm2,%ymm3,%ymm5 vmovdqa 192-128(%rsi),%ymm2 vmovdqa 224-128(%rsi),%ymm3 vpand 128+128(%rax),%ymm0,%ymm0 vpand 160+128(%rax),%ymm1,%ymm1 vpand 192+128(%rax),%ymm2,%ymm2 vpor %ymm0,%ymm4,%ymm4 vpand 224+128(%rax),%ymm3,%ymm3 vpand 256-128(%rsi),%ymm8,%ymm0 vpor %ymm1,%ymm5,%ymm5 vpand 288-128(%rsi),%ymm9,%ymm1 vpor %ymm2,%ymm4,%ymm4 vpand 320-128(%rsi),%ymm10,%ymm2 vpor %ymm3,%ymm5,%ymm5 vpand 352-128(%rsi),%ymm11,%ymm3 vpor %ymm0,%ymm4,%ymm4 vpand 384-128(%rsi),%ymm12,%ymm0 vpor %ymm1,%ymm5,%ymm5 vpand 416-128(%rsi),%ymm13,%ymm1 vpor %ymm2,%ymm4,%ymm4 vpand 448-128(%rsi),%ymm14,%ymm2 vpor %ymm3,%ymm5,%ymm5 vpand 480-128(%rsi),%ymm15,%ymm3 leaq 512(%rsi),%rsi vpor %ymm0,%ymm4,%ymm4 vpor %ymm1,%ymm5,%ymm5 vpor %ymm2,%ymm4,%ymm4 vpor %ymm3,%ymm5,%ymm5 vpor %ymm5,%ymm4,%ymm4 vextracti128 $1,%ymm4,%xmm5 vpor %xmm4,%xmm5,%xmm5 vpermd %ymm5,%ymm7,%ymm5 vmovdqu %ymm5,(%rdi) leaq 32(%rdi),%rdi decl %edx jnz L$oop_gather_1024 vpxor %ymm0,%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) vzeroupper leaq (%r11),%rsp ret L$SEH_end_rsaz_1024_gather5: .section __DATA,__const .p2align 6 L$and_mask: .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff L$scatter_permd: .long 0,2,4,6,7,7,7,7 L$gather_permd: .long 0,7,1,7,2,7,3,7 L$inc: .long 0,0,0,0, 1,1,1,1 .long 2,2,2,2, 3,3,3,3 .long 4,4,4,4, 4,4,4,4 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/rsaz-avx2-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl rsaz_1024_sqr_avx2 .hidden rsaz_1024_sqr_avx2 .type rsaz_1024_sqr_avx2,@function .align 64 rsaz_1024_sqr_avx2: .cfi_startproc _CET_ENDBR leaq (%rsp),%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 vzeroupper movq %rax,%rbp .cfi_def_cfa_register %rbp movq %rdx,%r13 subq $832,%rsp movq %r13,%r15 subq $-128,%rdi subq $-128,%rsi subq $-128,%r13 andq $4095,%r15 addq $320,%r15 shrq $12,%r15 vpxor %ymm9,%ymm9,%ymm9 jz .Lsqr_1024_no_n_copy subq $320,%rsp vmovdqu 0-128(%r13),%ymm0 andq $-2048,%rsp vmovdqu 32-128(%r13),%ymm1 vmovdqu 64-128(%r13),%ymm2 vmovdqu 96-128(%r13),%ymm3 vmovdqu 128-128(%r13),%ymm4 vmovdqu 160-128(%r13),%ymm5 vmovdqu 192-128(%r13),%ymm6 vmovdqu 224-128(%r13),%ymm7 vmovdqu 256-128(%r13),%ymm8 leaq 832+128(%rsp),%r13 vmovdqu %ymm0,0-128(%r13) vmovdqu %ymm1,32-128(%r13) vmovdqu %ymm2,64-128(%r13) vmovdqu %ymm3,96-128(%r13) vmovdqu %ymm4,128-128(%r13) vmovdqu %ymm5,160-128(%r13) vmovdqu %ymm6,192-128(%r13) vmovdqu %ymm7,224-128(%r13) vmovdqu %ymm8,256-128(%r13) vmovdqu %ymm9,288-128(%r13) .Lsqr_1024_no_n_copy: andq $-1024,%rsp vmovdqu 32-128(%rsi),%ymm1 vmovdqu 64-128(%rsi),%ymm2 vmovdqu 96-128(%rsi),%ymm3 vmovdqu 128-128(%rsi),%ymm4 vmovdqu 160-128(%rsi),%ymm5 vmovdqu 192-128(%rsi),%ymm6 vmovdqu 224-128(%rsi),%ymm7 vmovdqu 256-128(%rsi),%ymm8 leaq 192(%rsp),%rbx vmovdqu .Land_mask(%rip),%ymm15 jmp .LOOP_GRANDE_SQR_1024 .align 32 .LOOP_GRANDE_SQR_1024: leaq 576+128(%rsp),%r9 leaq 448(%rsp),%r12 vpaddq %ymm1,%ymm1,%ymm1 vpbroadcastq 0-128(%rsi),%ymm10 vpaddq %ymm2,%ymm2,%ymm2 vmovdqa %ymm1,0-128(%r9) vpaddq %ymm3,%ymm3,%ymm3 vmovdqa %ymm2,32-128(%r9) vpaddq %ymm4,%ymm4,%ymm4 vmovdqa %ymm3,64-128(%r9) vpaddq %ymm5,%ymm5,%ymm5 vmovdqa %ymm4,96-128(%r9) vpaddq %ymm6,%ymm6,%ymm6 vmovdqa %ymm5,128-128(%r9) vpaddq %ymm7,%ymm7,%ymm7 vmovdqa %ymm6,160-128(%r9) vpaddq %ymm8,%ymm8,%ymm8 vmovdqa %ymm7,192-128(%r9) vpxor %ymm9,%ymm9,%ymm9 vmovdqa %ymm8,224-128(%r9) vpmuludq 0-128(%rsi),%ymm10,%ymm0 vpbroadcastq 32-128(%rsi),%ymm11 vmovdqu %ymm9,288-192(%rbx) vpmuludq %ymm10,%ymm1,%ymm1 vmovdqu %ymm9,320-448(%r12) vpmuludq %ymm10,%ymm2,%ymm2 vmovdqu %ymm9,352-448(%r12) vpmuludq %ymm10,%ymm3,%ymm3 vmovdqu %ymm9,384-448(%r12) vpmuludq %ymm10,%ymm4,%ymm4 vmovdqu %ymm9,416-448(%r12) vpmuludq %ymm10,%ymm5,%ymm5 vmovdqu %ymm9,448-448(%r12) vpmuludq %ymm10,%ymm6,%ymm6 vmovdqu %ymm9,480-448(%r12) vpmuludq %ymm10,%ymm7,%ymm7 vmovdqu %ymm9,512-448(%r12) vpmuludq %ymm10,%ymm8,%ymm8 vpbroadcastq 64-128(%rsi),%ymm10 vmovdqu %ymm9,544-448(%r12) movq %rsi,%r15 movl $4,%r14d jmp .Lsqr_entry_1024 .align 32 .LOOP_SQR_1024: vpbroadcastq 32-128(%r15),%ymm11 vpmuludq 0-128(%rsi),%ymm10,%ymm0 vpaddq 0-192(%rbx),%ymm0,%ymm0 vpmuludq 0-128(%r9),%ymm10,%ymm1 vpaddq 32-192(%rbx),%ymm1,%ymm1 vpmuludq 32-128(%r9),%ymm10,%ymm2 vpaddq 64-192(%rbx),%ymm2,%ymm2 vpmuludq 64-128(%r9),%ymm10,%ymm3 vpaddq 96-192(%rbx),%ymm3,%ymm3 vpmuludq 96-128(%r9),%ymm10,%ymm4 vpaddq 128-192(%rbx),%ymm4,%ymm4 vpmuludq 128-128(%r9),%ymm10,%ymm5 vpaddq 160-192(%rbx),%ymm5,%ymm5 vpmuludq 160-128(%r9),%ymm10,%ymm6 vpaddq 192-192(%rbx),%ymm6,%ymm6 vpmuludq 192-128(%r9),%ymm10,%ymm7 vpaddq 224-192(%rbx),%ymm7,%ymm7 vpmuludq 224-128(%r9),%ymm10,%ymm8 vpbroadcastq 64-128(%r15),%ymm10 vpaddq 256-192(%rbx),%ymm8,%ymm8 .Lsqr_entry_1024: vmovdqu %ymm0,0-192(%rbx) vmovdqu %ymm1,32-192(%rbx) vpmuludq 32-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 32-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm3,%ymm3 vpmuludq 64-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 96-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 128-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq 160-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 192-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 224-128(%r9),%ymm11,%ymm0 vpbroadcastq 96-128(%r15),%ymm11 vpaddq 288-192(%rbx),%ymm0,%ymm0 vmovdqu %ymm2,64-192(%rbx) vmovdqu %ymm3,96-192(%rbx) vpmuludq 64-128(%rsi),%ymm10,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 64-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 96-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq 128-128(%r9),%ymm10,%ymm13 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 160-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 192-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm0,%ymm0 vpmuludq 224-128(%r9),%ymm10,%ymm1 vpbroadcastq 128-128(%r15),%ymm10 vpaddq 320-448(%r12),%ymm1,%ymm1 vmovdqu %ymm4,128-192(%rbx) vmovdqu %ymm5,160-192(%rbx) vpmuludq 96-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm6,%ymm6 vpmuludq 96-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm7,%ymm7 vpmuludq 128-128(%r9),%ymm11,%ymm13 vpaddq %ymm13,%ymm8,%ymm8 vpmuludq 160-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm0,%ymm0 vpmuludq 192-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm1,%ymm1 vpmuludq 224-128(%r9),%ymm11,%ymm2 vpbroadcastq 160-128(%r15),%ymm11 vpaddq 352-448(%r12),%ymm2,%ymm2 vmovdqu %ymm6,192-192(%rbx) vmovdqu %ymm7,224-192(%rbx) vpmuludq 128-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq 128-128(%r9),%ymm10,%ymm14 vpaddq %ymm14,%ymm0,%ymm0 vpmuludq 160-128(%r9),%ymm10,%ymm13 vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 192-128(%r9),%ymm10,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 224-128(%r9),%ymm10,%ymm3 vpbroadcastq 192-128(%r15),%ymm10 vpaddq 384-448(%r12),%ymm3,%ymm3 vmovdqu %ymm8,256-192(%rbx) vmovdqu %ymm0,288-192(%rbx) leaq 8(%rbx),%rbx vpmuludq 160-128(%rsi),%ymm11,%ymm13 vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 160-128(%r9),%ymm11,%ymm12 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 192-128(%r9),%ymm11,%ymm14 vpaddq %ymm14,%ymm3,%ymm3 vpmuludq 224-128(%r9),%ymm11,%ymm4 vpbroadcastq 224-128(%r15),%ymm11 vpaddq 416-448(%r12),%ymm4,%ymm4 vmovdqu %ymm1,320-448(%r12) vmovdqu %ymm2,352-448(%r12) vpmuludq 192-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm3,%ymm3 vpmuludq 192-128(%r9),%ymm10,%ymm14 vpbroadcastq 256-128(%r15),%ymm0 vpaddq %ymm14,%ymm4,%ymm4 vpmuludq 224-128(%r9),%ymm10,%ymm5 vpbroadcastq 0+8-128(%r15),%ymm10 vpaddq 448-448(%r12),%ymm5,%ymm5 vmovdqu %ymm3,384-448(%r12) vmovdqu %ymm4,416-448(%r12) leaq 8(%r15),%r15 vpmuludq 224-128(%rsi),%ymm11,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 224-128(%r9),%ymm11,%ymm6 vpaddq 480-448(%r12),%ymm6,%ymm6 vpmuludq 256-128(%rsi),%ymm0,%ymm7 vmovdqu %ymm5,448-448(%r12) vpaddq 512-448(%r12),%ymm7,%ymm7 vmovdqu %ymm6,480-448(%r12) vmovdqu %ymm7,512-448(%r12) leaq 8(%r12),%r12 decl %r14d jnz .LOOP_SQR_1024 vmovdqu 256(%rsp),%ymm8 vmovdqu 288(%rsp),%ymm1 vmovdqu 320(%rsp),%ymm2 leaq 192(%rsp),%rbx vpsrlq $29,%ymm8,%ymm14 vpand %ymm15,%ymm8,%ymm8 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpermq $0x93,%ymm14,%ymm14 vpxor %ymm9,%ymm9,%ymm9 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm9,%ymm14,%ymm10 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm8,%ymm8 vpblendd $3,%ymm11,%ymm9,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vpaddq %ymm11,%ymm2,%ymm2 vmovdqu %ymm1,288-192(%rbx) vmovdqu %ymm2,320-192(%rbx) movq (%rsp),%rax movq 8(%rsp),%r10 movq 16(%rsp),%r11 movq 24(%rsp),%r12 vmovdqu 32(%rsp),%ymm1 vmovdqu 64-192(%rbx),%ymm2 vmovdqu 96-192(%rbx),%ymm3 vmovdqu 128-192(%rbx),%ymm4 vmovdqu 160-192(%rbx),%ymm5 vmovdqu 192-192(%rbx),%ymm6 vmovdqu 224-192(%rbx),%ymm7 movq %rax,%r9 imull %ecx,%eax andl $0x1fffffff,%eax vmovd %eax,%xmm12 movq %rax,%rdx imulq -128(%r13),%rax vpbroadcastq %xmm12,%ymm12 addq %rax,%r9 movq %rdx,%rax imulq 8-128(%r13),%rax shrq $29,%r9 addq %rax,%r10 movq %rdx,%rax imulq 16-128(%r13),%rax addq %r9,%r10 addq %rax,%r11 imulq 24-128(%r13),%rdx addq %rdx,%r12 movq %r10,%rax imull %ecx,%eax andl $0x1fffffff,%eax movl $9,%r14d jmp .LOOP_REDUCE_1024 .align 32 .LOOP_REDUCE_1024: vmovd %eax,%xmm13 vpbroadcastq %xmm13,%ymm13 vpmuludq 32-128(%r13),%ymm12,%ymm10 movq %rax,%rdx imulq -128(%r13),%rax vpaddq %ymm10,%ymm1,%ymm1 addq %rax,%r10 vpmuludq 64-128(%r13),%ymm12,%ymm14 movq %rdx,%rax imulq 8-128(%r13),%rax vpaddq %ymm14,%ymm2,%ymm2 vpmuludq 96-128(%r13),%ymm12,%ymm11 .byte 0x67 addq %rax,%r11 .byte 0x67 movq %rdx,%rax imulq 16-128(%r13),%rax shrq $29,%r10 vpaddq %ymm11,%ymm3,%ymm3 vpmuludq 128-128(%r13),%ymm12,%ymm10 addq %rax,%r12 addq %r10,%r11 vpaddq %ymm10,%ymm4,%ymm4 vpmuludq 160-128(%r13),%ymm12,%ymm14 movq %r11,%rax imull %ecx,%eax vpaddq %ymm14,%ymm5,%ymm5 vpmuludq 192-128(%r13),%ymm12,%ymm11 andl $0x1fffffff,%eax vpaddq %ymm11,%ymm6,%ymm6 vpmuludq 224-128(%r13),%ymm12,%ymm10 vpaddq %ymm10,%ymm7,%ymm7 vpmuludq 256-128(%r13),%ymm12,%ymm14 vmovd %eax,%xmm12 vpaddq %ymm14,%ymm8,%ymm8 vpbroadcastq %xmm12,%ymm12 vpmuludq 32-8-128(%r13),%ymm13,%ymm11 vmovdqu 96-8-128(%r13),%ymm14 movq %rax,%rdx imulq -128(%r13),%rax vpaddq %ymm11,%ymm1,%ymm1 vpmuludq 64-8-128(%r13),%ymm13,%ymm10 vmovdqu 128-8-128(%r13),%ymm11 addq %rax,%r11 movq %rdx,%rax imulq 8-128(%r13),%rax vpaddq %ymm10,%ymm2,%ymm2 addq %r12,%rax shrq $29,%r11 vpmuludq %ymm13,%ymm14,%ymm14 vmovdqu 160-8-128(%r13),%ymm10 addq %r11,%rax vpaddq %ymm14,%ymm3,%ymm3 vpmuludq %ymm13,%ymm11,%ymm11 vmovdqu 192-8-128(%r13),%ymm14 .byte 0x67 movq %rax,%r12 imull %ecx,%eax vpaddq %ymm11,%ymm4,%ymm4 vpmuludq %ymm13,%ymm10,%ymm10 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 andl $0x1fffffff,%eax vpaddq %ymm10,%ymm5,%ymm5 vpmuludq %ymm13,%ymm14,%ymm14 vmovdqu 256-8-128(%r13),%ymm10 vpaddq %ymm14,%ymm6,%ymm6 vpmuludq %ymm13,%ymm11,%ymm11 vmovdqu 288-8-128(%r13),%ymm9 vmovd %eax,%xmm0 imulq -128(%r13),%rax vpaddq %ymm11,%ymm7,%ymm7 vpmuludq %ymm13,%ymm10,%ymm10 vmovdqu 32-16-128(%r13),%ymm14 vpbroadcastq %xmm0,%ymm0 vpaddq %ymm10,%ymm8,%ymm8 vpmuludq %ymm13,%ymm9,%ymm9 vmovdqu 64-16-128(%r13),%ymm11 addq %rax,%r12 vmovdqu 32-24-128(%r13),%ymm13 vpmuludq %ymm12,%ymm14,%ymm14 vmovdqu 96-16-128(%r13),%ymm10 vpaddq %ymm14,%ymm1,%ymm1 vpmuludq %ymm0,%ymm13,%ymm13 vpmuludq %ymm12,%ymm11,%ymm11 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff vpaddq %ymm1,%ymm13,%ymm13 vpaddq %ymm11,%ymm2,%ymm2 vpmuludq %ymm12,%ymm10,%ymm10 vmovdqu 160-16-128(%r13),%ymm11 .byte 0x67 vmovq %xmm13,%rax vmovdqu %ymm13,(%rsp) vpaddq %ymm10,%ymm3,%ymm3 vpmuludq %ymm12,%ymm14,%ymm14 vmovdqu 192-16-128(%r13),%ymm10 vpaddq %ymm14,%ymm4,%ymm4 vpmuludq %ymm12,%ymm11,%ymm11 vmovdqu 224-16-128(%r13),%ymm14 vpaddq %ymm11,%ymm5,%ymm5 vpmuludq %ymm12,%ymm10,%ymm10 vmovdqu 256-16-128(%r13),%ymm11 vpaddq %ymm10,%ymm6,%ymm6 vpmuludq %ymm12,%ymm14,%ymm14 shrq $29,%r12 vmovdqu 288-16-128(%r13),%ymm10 addq %r12,%rax vpaddq %ymm14,%ymm7,%ymm7 vpmuludq %ymm12,%ymm11,%ymm11 movq %rax,%r9 imull %ecx,%eax vpaddq %ymm11,%ymm8,%ymm8 vpmuludq %ymm12,%ymm10,%ymm10 andl $0x1fffffff,%eax vmovd %eax,%xmm12 vmovdqu 96-24-128(%r13),%ymm11 .byte 0x67 vpaddq %ymm10,%ymm9,%ymm9 vpbroadcastq %xmm12,%ymm12 vpmuludq 64-24-128(%r13),%ymm0,%ymm14 vmovdqu 128-24-128(%r13),%ymm10 movq %rax,%rdx imulq -128(%r13),%rax movq 8(%rsp),%r10 vpaddq %ymm14,%ymm2,%ymm1 vpmuludq %ymm0,%ymm11,%ymm11 vmovdqu 160-24-128(%r13),%ymm14 addq %rax,%r9 movq %rdx,%rax imulq 8-128(%r13),%rax .byte 0x67 shrq $29,%r9 movq 16(%rsp),%r11 vpaddq %ymm11,%ymm3,%ymm2 vpmuludq %ymm0,%ymm10,%ymm10 vmovdqu 192-24-128(%r13),%ymm11 addq %rax,%r10 movq %rdx,%rax imulq 16-128(%r13),%rax vpaddq %ymm10,%ymm4,%ymm3 vpmuludq %ymm0,%ymm14,%ymm14 vmovdqu 224-24-128(%r13),%ymm10 imulq 24-128(%r13),%rdx addq %rax,%r11 leaq (%r9,%r10,1),%rax vpaddq %ymm14,%ymm5,%ymm4 vpmuludq %ymm0,%ymm11,%ymm11 vmovdqu 256-24-128(%r13),%ymm14 movq %rax,%r10 imull %ecx,%eax vpmuludq %ymm0,%ymm10,%ymm10 vpaddq %ymm11,%ymm6,%ymm5 vmovdqu 288-24-128(%r13),%ymm11 andl $0x1fffffff,%eax vpaddq %ymm10,%ymm7,%ymm6 vpmuludq %ymm0,%ymm14,%ymm14 addq 24(%rsp),%rdx vpaddq %ymm14,%ymm8,%ymm7 vpmuludq %ymm0,%ymm11,%ymm11 vpaddq %ymm11,%ymm9,%ymm8 vmovq %r12,%xmm9 movq %rdx,%r12 decl %r14d jnz .LOOP_REDUCE_1024 leaq 448(%rsp),%r12 vpaddq %ymm9,%ymm13,%ymm0 vpxor %ymm9,%ymm9,%ymm9 vpaddq 288-192(%rbx),%ymm0,%ymm0 vpaddq 320-448(%r12),%ymm1,%ymm1 vpaddq 352-448(%r12),%ymm2,%ymm2 vpaddq 384-448(%r12),%ymm3,%ymm3 vpaddq 416-448(%r12),%ymm4,%ymm4 vpaddq 448-448(%r12),%ymm5,%ymm5 vpaddq 480-448(%r12),%ymm6,%ymm6 vpaddq 512-448(%r12),%ymm7,%ymm7 vpaddq 544-448(%r12),%ymm8,%ymm8 vpsrlq $29,%ymm0,%ymm14 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm12,%ymm12 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm0,%ymm0 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm2,%ymm2 vpblendd $3,%ymm13,%ymm9,%ymm13 vpaddq %ymm12,%ymm3,%ymm3 vpaddq %ymm13,%ymm4,%ymm4 vpsrlq $29,%ymm0,%ymm14 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm11 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm12,%ymm12 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm0,%ymm0 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm1,%ymm1 vmovdqu %ymm0,0-128(%rdi) vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm2,%ymm2 vmovdqu %ymm1,32-128(%rdi) vpblendd $3,%ymm13,%ymm9,%ymm13 vpaddq %ymm12,%ymm3,%ymm3 vmovdqu %ymm2,64-128(%rdi) vpaddq %ymm13,%ymm4,%ymm4 vmovdqu %ymm3,96-128(%rdi) vpsrlq $29,%ymm4,%ymm14 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm11 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm4,%ymm4 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm5,%ymm5 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm6,%ymm6 vpblendd $3,%ymm13,%ymm0,%ymm13 vpaddq %ymm12,%ymm7,%ymm7 vpaddq %ymm13,%ymm8,%ymm8 vpsrlq $29,%ymm4,%ymm14 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm11 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm12 vpermq $0x93,%ymm14,%ymm14 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm13 vpermq $0x93,%ymm11,%ymm11 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm13,%ymm13 vpblendd $3,%ymm9,%ymm14,%ymm10 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm14,%ymm11,%ymm14 vpaddq %ymm10,%ymm4,%ymm4 vpblendd $3,%ymm11,%ymm12,%ymm11 vpaddq %ymm14,%ymm5,%ymm5 vmovdqu %ymm4,128-128(%rdi) vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm11,%ymm6,%ymm6 vmovdqu %ymm5,160-128(%rdi) vpblendd $3,%ymm13,%ymm0,%ymm13 vpaddq %ymm12,%ymm7,%ymm7 vmovdqu %ymm6,192-128(%rdi) vpaddq %ymm13,%ymm8,%ymm8 vmovdqu %ymm7,224-128(%rdi) vmovdqu %ymm8,256-128(%rdi) movq %rdi,%rsi decl %r8d jne .LOOP_GRANDE_SQR_1024 vzeroall movq %rbp,%rax .cfi_def_cfa_register %rax movq -48(%rax),%r15 .cfi_restore %r15 movq -40(%rax),%r14 .cfi_restore %r14 movq -32(%rax),%r13 .cfi_restore %r13 movq -24(%rax),%r12 .cfi_restore %r12 movq -16(%rax),%rbp .cfi_restore %rbp movq -8(%rax),%rbx .cfi_restore %rbx leaq (%rax),%rsp .cfi_def_cfa_register %rsp .Lsqr_1024_epilogue: ret .cfi_endproc .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2 .globl rsaz_1024_mul_avx2 .hidden rsaz_1024_mul_avx2 .type rsaz_1024_mul_avx2,@function .align 64 rsaz_1024_mul_avx2: .cfi_startproc _CET_ENDBR leaq (%rsp),%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 movq %rax,%rbp .cfi_def_cfa_register %rbp vzeroall movq %rdx,%r13 subq $64,%rsp .byte 0x67,0x67 movq %rsi,%r15 andq $4095,%r15 addq $320,%r15 shrq $12,%r15 movq %rsi,%r15 cmovnzq %r13,%rsi cmovnzq %r15,%r13 movq %rcx,%r15 subq $-128,%rsi subq $-128,%rcx subq $-128,%rdi andq $4095,%r15 addq $320,%r15 .byte 0x67,0x67 shrq $12,%r15 jz .Lmul_1024_no_n_copy subq $320,%rsp vmovdqu 0-128(%rcx),%ymm0 andq $-512,%rsp vmovdqu 32-128(%rcx),%ymm1 vmovdqu 64-128(%rcx),%ymm2 vmovdqu 96-128(%rcx),%ymm3 vmovdqu 128-128(%rcx),%ymm4 vmovdqu 160-128(%rcx),%ymm5 vmovdqu 192-128(%rcx),%ymm6 vmovdqu 224-128(%rcx),%ymm7 vmovdqu 256-128(%rcx),%ymm8 leaq 64+128(%rsp),%rcx vmovdqu %ymm0,0-128(%rcx) vpxor %ymm0,%ymm0,%ymm0 vmovdqu %ymm1,32-128(%rcx) vpxor %ymm1,%ymm1,%ymm1 vmovdqu %ymm2,64-128(%rcx) vpxor %ymm2,%ymm2,%ymm2 vmovdqu %ymm3,96-128(%rcx) vpxor %ymm3,%ymm3,%ymm3 vmovdqu %ymm4,128-128(%rcx) vpxor %ymm4,%ymm4,%ymm4 vmovdqu %ymm5,160-128(%rcx) vpxor %ymm5,%ymm5,%ymm5 vmovdqu %ymm6,192-128(%rcx) vpxor %ymm6,%ymm6,%ymm6 vmovdqu %ymm7,224-128(%rcx) vpxor %ymm7,%ymm7,%ymm7 vmovdqu %ymm8,256-128(%rcx) vmovdqa %ymm0,%ymm8 vmovdqu %ymm9,288-128(%rcx) .Lmul_1024_no_n_copy: andq $-64,%rsp movq (%r13),%rbx vpbroadcastq (%r13),%ymm10 vmovdqu %ymm0,(%rsp) xorq %r9,%r9 .byte 0x67 xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 vmovdqu .Land_mask(%rip),%ymm15 movl $9,%r14d vmovdqu %ymm9,288-128(%rdi) jmp .Loop_mul_1024 .align 32 .Loop_mul_1024: vpsrlq $29,%ymm3,%ymm9 movq %rbx,%rax imulq -128(%rsi),%rax addq %r9,%rax movq %rbx,%r10 imulq 8-128(%rsi),%r10 addq 8(%rsp),%r10 movq %rax,%r9 imull %r8d,%eax andl $0x1fffffff,%eax movq %rbx,%r11 imulq 16-128(%rsi),%r11 addq 16(%rsp),%r11 movq %rbx,%r12 imulq 24-128(%rsi),%r12 addq 24(%rsp),%r12 vpmuludq 32-128(%rsi),%ymm10,%ymm0 vmovd %eax,%xmm11 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq 64-128(%rsi),%ymm10,%ymm12 vpbroadcastq %xmm11,%ymm11 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq 96-128(%rsi),%ymm10,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq 128-128(%rsi),%ymm10,%ymm0 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq 160-128(%rsi),%ymm10,%ymm12 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq 192-128(%rsi),%ymm10,%ymm13 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq 224-128(%rsi),%ymm10,%ymm0 vpermq $0x93,%ymm9,%ymm9 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq 256-128(%rsi),%ymm10,%ymm12 vpbroadcastq 8(%r13),%ymm10 vpaddq %ymm12,%ymm8,%ymm8 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r9 movq %rdx,%rax imulq 8-128(%rcx),%rax addq %rax,%r10 movq %rdx,%rax imulq 16-128(%rcx),%rax addq %rax,%r11 shrq $29,%r9 imulq 24-128(%rcx),%rdx addq %rdx,%r12 addq %r9,%r10 vpmuludq 32-128(%rcx),%ymm11,%ymm13 vmovq %xmm10,%rbx vpaddq %ymm13,%ymm1,%ymm1 vpmuludq 64-128(%rcx),%ymm11,%ymm0 vpaddq %ymm0,%ymm2,%ymm2 vpmuludq 96-128(%rcx),%ymm11,%ymm12 vpaddq %ymm12,%ymm3,%ymm3 vpmuludq 128-128(%rcx),%ymm11,%ymm13 vpaddq %ymm13,%ymm4,%ymm4 vpmuludq 160-128(%rcx),%ymm11,%ymm0 vpaddq %ymm0,%ymm5,%ymm5 vpmuludq 192-128(%rcx),%ymm11,%ymm12 vpaddq %ymm12,%ymm6,%ymm6 vpmuludq 224-128(%rcx),%ymm11,%ymm13 vpblendd $3,%ymm14,%ymm9,%ymm12 vpaddq %ymm13,%ymm7,%ymm7 vpmuludq 256-128(%rcx),%ymm11,%ymm0 vpaddq %ymm12,%ymm3,%ymm3 vpaddq %ymm0,%ymm8,%ymm8 movq %rbx,%rax imulq -128(%rsi),%rax addq %rax,%r10 vmovdqu -8+32-128(%rsi),%ymm12 movq %rbx,%rax imulq 8-128(%rsi),%rax addq %rax,%r11 vmovdqu -8+64-128(%rsi),%ymm13 movq %r10,%rax vpblendd $0xfc,%ymm14,%ymm9,%ymm9 imull %r8d,%eax vpaddq %ymm9,%ymm4,%ymm4 andl $0x1fffffff,%eax imulq 16-128(%rsi),%rbx addq %rbx,%r12 vpmuludq %ymm10,%ymm12,%ymm12 vmovd %eax,%xmm11 vmovdqu -8+96-128(%rsi),%ymm0 vpaddq %ymm12,%ymm1,%ymm1 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq %xmm11,%ymm11 vmovdqu -8+128-128(%rsi),%ymm12 vpaddq %ymm13,%ymm2,%ymm2 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -8+160-128(%rsi),%ymm13 vpaddq %ymm0,%ymm3,%ymm3 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -8+192-128(%rsi),%ymm0 vpaddq %ymm12,%ymm4,%ymm4 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -8+224-128(%rsi),%ymm12 vpaddq %ymm13,%ymm5,%ymm5 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -8+256-128(%rsi),%ymm13 vpaddq %ymm0,%ymm6,%ymm6 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -8+288-128(%rsi),%ymm9 vpaddq %ymm12,%ymm7,%ymm7 vpmuludq %ymm10,%ymm13,%ymm13 vpaddq %ymm13,%ymm8,%ymm8 vpmuludq %ymm10,%ymm9,%ymm9 vpbroadcastq 16(%r13),%ymm10 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r10 vmovdqu -8+32-128(%rcx),%ymm0 movq %rdx,%rax imulq 8-128(%rcx),%rax addq %rax,%r11 vmovdqu -8+64-128(%rcx),%ymm12 shrq $29,%r10 imulq 16-128(%rcx),%rdx addq %rdx,%r12 addq %r10,%r11 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -8+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -8+128-128(%rcx),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -8+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -8+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -8+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -8+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -8+288-128(%rcx),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm11,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm11,%ymm13,%ymm13 vpaddq %ymm13,%ymm9,%ymm9 vmovdqu -16+32-128(%rsi),%ymm0 movq %rbx,%rax imulq -128(%rsi),%rax addq %r11,%rax vmovdqu -16+64-128(%rsi),%ymm12 movq %rax,%r11 imull %r8d,%eax andl $0x1fffffff,%eax imulq 8-128(%rsi),%rbx addq %rbx,%r12 vpmuludq %ymm10,%ymm0,%ymm0 vmovd %eax,%xmm11 vmovdqu -16+96-128(%rsi),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm10,%ymm12,%ymm12 vpbroadcastq %xmm11,%ymm11 vmovdqu -16+128-128(%rsi),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -16+160-128(%rsi),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -16+192-128(%rsi),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -16+224-128(%rsi),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -16+256-128(%rsi),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -16+288-128(%rsi),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm10,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq 24(%r13),%ymm10 vpaddq %ymm13,%ymm9,%ymm9 vmovdqu -16+32-128(%rcx),%ymm0 movq %rax,%rdx imulq -128(%rcx),%rax addq %rax,%r11 vmovdqu -16+64-128(%rcx),%ymm12 imulq 8-128(%rcx),%rdx addq %rdx,%r12 shrq $29,%r11 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -16+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -16+128-128(%rcx),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -16+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -16+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -16+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -16+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -16+288-128(%rcx),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -24+32-128(%rsi),%ymm0 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+64-128(%rsi),%ymm12 vpaddq %ymm13,%ymm9,%ymm9 addq %r11,%r12 imulq -128(%rsi),%rbx addq %rbx,%r12 movq %r12,%rax imull %r8d,%eax andl $0x1fffffff,%eax vpmuludq %ymm10,%ymm0,%ymm0 vmovd %eax,%xmm11 vmovdqu -24+96-128(%rsi),%ymm13 vpaddq %ymm0,%ymm1,%ymm1 vpmuludq %ymm10,%ymm12,%ymm12 vpbroadcastq %xmm11,%ymm11 vmovdqu -24+128-128(%rsi),%ymm0 vpaddq %ymm12,%ymm2,%ymm2 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -24+160-128(%rsi),%ymm12 vpaddq %ymm13,%ymm3,%ymm3 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -24+192-128(%rsi),%ymm13 vpaddq %ymm0,%ymm4,%ymm4 vpmuludq %ymm10,%ymm12,%ymm12 vmovdqu -24+224-128(%rsi),%ymm0 vpaddq %ymm12,%ymm5,%ymm5 vpmuludq %ymm10,%ymm13,%ymm13 vmovdqu -24+256-128(%rsi),%ymm12 vpaddq %ymm13,%ymm6,%ymm6 vpmuludq %ymm10,%ymm0,%ymm0 vmovdqu -24+288-128(%rsi),%ymm13 vpaddq %ymm0,%ymm7,%ymm7 vpmuludq %ymm10,%ymm12,%ymm12 vpaddq %ymm12,%ymm8,%ymm8 vpmuludq %ymm10,%ymm13,%ymm13 vpbroadcastq 32(%r13),%ymm10 vpaddq %ymm13,%ymm9,%ymm9 addq $32,%r13 vmovdqu -24+32-128(%rcx),%ymm0 imulq -128(%rcx),%rax addq %rax,%r12 shrq $29,%r12 vmovdqu -24+64-128(%rcx),%ymm12 vpmuludq %ymm11,%ymm0,%ymm0 vmovq %xmm10,%rbx vmovdqu -24+96-128(%rcx),%ymm13 vpaddq %ymm0,%ymm1,%ymm0 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu %ymm0,(%rsp) vpaddq %ymm12,%ymm2,%ymm1 vmovdqu -24+128-128(%rcx),%ymm0 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+160-128(%rcx),%ymm12 vpaddq %ymm13,%ymm3,%ymm2 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -24+192-128(%rcx),%ymm13 vpaddq %ymm0,%ymm4,%ymm3 vpmuludq %ymm11,%ymm12,%ymm12 vmovdqu -24+224-128(%rcx),%ymm0 vpaddq %ymm12,%ymm5,%ymm4 vpmuludq %ymm11,%ymm13,%ymm13 vmovdqu -24+256-128(%rcx),%ymm12 vpaddq %ymm13,%ymm6,%ymm5 vpmuludq %ymm11,%ymm0,%ymm0 vmovdqu -24+288-128(%rcx),%ymm13 movq %r12,%r9 vpaddq %ymm0,%ymm7,%ymm6 vpmuludq %ymm11,%ymm12,%ymm12 addq (%rsp),%r9 vpaddq %ymm12,%ymm8,%ymm7 vpmuludq %ymm11,%ymm13,%ymm13 vmovq %r12,%xmm12 vpaddq %ymm13,%ymm9,%ymm8 decl %r14d jnz .Loop_mul_1024 vpaddq (%rsp),%ymm12,%ymm0 vpsrlq $29,%ymm0,%ymm12 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm13 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm10,%ymm10 vpblendd $3,%ymm12,%ymm13,%ymm12 vpermq $0x93,%ymm11,%ymm11 vpaddq %ymm9,%ymm0,%ymm0 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm1,%ymm1 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm2,%ymm2 vpblendd $3,%ymm11,%ymm14,%ymm11 vpaddq %ymm10,%ymm3,%ymm3 vpaddq %ymm11,%ymm4,%ymm4 vpsrlq $29,%ymm0,%ymm12 vpand %ymm15,%ymm0,%ymm0 vpsrlq $29,%ymm1,%ymm13 vpand %ymm15,%ymm1,%ymm1 vpsrlq $29,%ymm2,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm2,%ymm2 vpsrlq $29,%ymm3,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm3,%ymm3 vpermq $0x93,%ymm10,%ymm10 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm0,%ymm0 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm1,%ymm1 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm2,%ymm2 vpblendd $3,%ymm11,%ymm14,%ymm11 vpaddq %ymm10,%ymm3,%ymm3 vpaddq %ymm11,%ymm4,%ymm4 vmovdqu %ymm0,0-128(%rdi) vmovdqu %ymm1,32-128(%rdi) vmovdqu %ymm2,64-128(%rdi) vmovdqu %ymm3,96-128(%rdi) vpsrlq $29,%ymm4,%ymm12 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm13 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm10,%ymm10 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm4,%ymm4 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm5,%ymm5 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm6,%ymm6 vpblendd $3,%ymm11,%ymm0,%ymm11 vpaddq %ymm10,%ymm7,%ymm7 vpaddq %ymm11,%ymm8,%ymm8 vpsrlq $29,%ymm4,%ymm12 vpand %ymm15,%ymm4,%ymm4 vpsrlq $29,%ymm5,%ymm13 vpand %ymm15,%ymm5,%ymm5 vpsrlq $29,%ymm6,%ymm10 vpermq $0x93,%ymm12,%ymm12 vpand %ymm15,%ymm6,%ymm6 vpsrlq $29,%ymm7,%ymm11 vpermq $0x93,%ymm13,%ymm13 vpand %ymm15,%ymm7,%ymm7 vpsrlq $29,%ymm8,%ymm0 vpermq $0x93,%ymm10,%ymm10 vpand %ymm15,%ymm8,%ymm8 vpermq $0x93,%ymm11,%ymm11 vpblendd $3,%ymm14,%ymm12,%ymm9 vpermq $0x93,%ymm0,%ymm0 vpblendd $3,%ymm12,%ymm13,%ymm12 vpaddq %ymm9,%ymm4,%ymm4 vpblendd $3,%ymm13,%ymm10,%ymm13 vpaddq %ymm12,%ymm5,%ymm5 vpblendd $3,%ymm10,%ymm11,%ymm10 vpaddq %ymm13,%ymm6,%ymm6 vpblendd $3,%ymm11,%ymm0,%ymm11 vpaddq %ymm10,%ymm7,%ymm7 vpaddq %ymm11,%ymm8,%ymm8 vmovdqu %ymm4,128-128(%rdi) vmovdqu %ymm5,160-128(%rdi) vmovdqu %ymm6,192-128(%rdi) vmovdqu %ymm7,224-128(%rdi) vmovdqu %ymm8,256-128(%rdi) vzeroupper movq %rbp,%rax .cfi_def_cfa_register %rax movq -48(%rax),%r15 .cfi_restore %r15 movq -40(%rax),%r14 .cfi_restore %r14 movq -32(%rax),%r13 .cfi_restore %r13 movq -24(%rax),%r12 .cfi_restore %r12 movq -16(%rax),%rbp .cfi_restore %rbp movq -8(%rax),%rbx .cfi_restore %rbx leaq (%rax),%rsp .cfi_def_cfa_register %rsp .Lmul_1024_epilogue: ret .cfi_endproc .size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2 .globl rsaz_1024_red2norm_avx2 .hidden rsaz_1024_red2norm_avx2 .type rsaz_1024_red2norm_avx2,@function .align 32 rsaz_1024_red2norm_avx2: .cfi_startproc _CET_ENDBR subq $-128,%rsi xorq %rax,%rax movq -128(%rsi),%r8 movq -120(%rsi),%r9 movq -112(%rsi),%r10 shlq $0,%r8 shlq $29,%r9 movq %r10,%r11 shlq $58,%r10 shrq $6,%r11 addq %r8,%rax addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,0(%rdi) movq %r11,%rax movq -104(%rsi),%r8 movq -96(%rsi),%r9 shlq $23,%r8 movq %r9,%r10 shlq $52,%r9 shrq $12,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,8(%rdi) movq %r10,%rax movq -88(%rsi),%r11 movq -80(%rsi),%r8 shlq $17,%r11 movq %r8,%r9 shlq $46,%r8 shrq $18,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,16(%rdi) movq %r9,%rax movq -72(%rsi),%r10 movq -64(%rsi),%r11 shlq $11,%r10 movq %r11,%r8 shlq $40,%r11 shrq $24,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,24(%rdi) movq %r8,%rax movq -56(%rsi),%r9 movq -48(%rsi),%r10 movq -40(%rsi),%r11 shlq $5,%r9 shlq $34,%r10 movq %r11,%r8 shlq $63,%r11 shrq $1,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,32(%rdi) movq %r8,%rax movq -32(%rsi),%r9 movq -24(%rsi),%r10 shlq $28,%r9 movq %r10,%r11 shlq $57,%r10 shrq $7,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,40(%rdi) movq %r11,%rax movq -16(%rsi),%r8 movq -8(%rsi),%r9 shlq $22,%r8 movq %r9,%r10 shlq $51,%r9 shrq $13,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,48(%rdi) movq %r10,%rax movq 0(%rsi),%r11 movq 8(%rsi),%r8 shlq $16,%r11 movq %r8,%r9 shlq $45,%r8 shrq $19,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,56(%rdi) movq %r9,%rax movq 16(%rsi),%r10 movq 24(%rsi),%r11 shlq $10,%r10 movq %r11,%r8 shlq $39,%r11 shrq $25,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,64(%rdi) movq %r8,%rax movq 32(%rsi),%r9 movq 40(%rsi),%r10 movq 48(%rsi),%r11 shlq $4,%r9 shlq $33,%r10 movq %r11,%r8 shlq $62,%r11 shrq $2,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,72(%rdi) movq %r8,%rax movq 56(%rsi),%r9 movq 64(%rsi),%r10 shlq $27,%r9 movq %r10,%r11 shlq $56,%r10 shrq $8,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,80(%rdi) movq %r11,%rax movq 72(%rsi),%r8 movq 80(%rsi),%r9 shlq $21,%r8 movq %r9,%r10 shlq $50,%r9 shrq $14,%r10 addq %r8,%rax addq %r9,%rax adcq $0,%r10 movq %rax,88(%rdi) movq %r10,%rax movq 88(%rsi),%r11 movq 96(%rsi),%r8 shlq $15,%r11 movq %r8,%r9 shlq $44,%r8 shrq $20,%r9 addq %r11,%rax addq %r8,%rax adcq $0,%r9 movq %rax,96(%rdi) movq %r9,%rax movq 104(%rsi),%r10 movq 112(%rsi),%r11 shlq $9,%r10 movq %r11,%r8 shlq $38,%r11 shrq $26,%r8 addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,104(%rdi) movq %r8,%rax movq 120(%rsi),%r9 movq 128(%rsi),%r10 movq 136(%rsi),%r11 shlq $3,%r9 shlq $32,%r10 movq %r11,%r8 shlq $61,%r11 shrq $3,%r8 addq %r9,%rax addq %r10,%rax addq %r11,%rax adcq $0,%r8 movq %rax,112(%rdi) movq %r8,%rax movq 144(%rsi),%r9 movq 152(%rsi),%r10 shlq $26,%r9 movq %r10,%r11 shlq $55,%r10 shrq $9,%r11 addq %r9,%rax addq %r10,%rax adcq $0,%r11 movq %rax,120(%rdi) movq %r11,%rax ret .cfi_endproc .size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2 .globl rsaz_1024_norm2red_avx2 .hidden rsaz_1024_norm2red_avx2 .type rsaz_1024_norm2red_avx2,@function .align 32 rsaz_1024_norm2red_avx2: .cfi_startproc _CET_ENDBR subq $-128,%rdi movq (%rsi),%r8 movl $0x1fffffff,%eax movq 8(%rsi),%r9 movq %r8,%r11 shrq $0,%r11 andq %rax,%r11 movq %r11,-128(%rdi) movq %r8,%r10 shrq $29,%r10 andq %rax,%r10 movq %r10,-120(%rdi) shrdq $58,%r9,%r8 andq %rax,%r8 movq %r8,-112(%rdi) movq 16(%rsi),%r10 movq %r9,%r8 shrq $23,%r8 andq %rax,%r8 movq %r8,-104(%rdi) shrdq $52,%r10,%r9 andq %rax,%r9 movq %r9,-96(%rdi) movq 24(%rsi),%r11 movq %r10,%r9 shrq $17,%r9 andq %rax,%r9 movq %r9,-88(%rdi) shrdq $46,%r11,%r10 andq %rax,%r10 movq %r10,-80(%rdi) movq 32(%rsi),%r8 movq %r11,%r10 shrq $11,%r10 andq %rax,%r10 movq %r10,-72(%rdi) shrdq $40,%r8,%r11 andq %rax,%r11 movq %r11,-64(%rdi) movq 40(%rsi),%r9 movq %r8,%r11 shrq $5,%r11 andq %rax,%r11 movq %r11,-56(%rdi) movq %r8,%r10 shrq $34,%r10 andq %rax,%r10 movq %r10,-48(%rdi) shrdq $63,%r9,%r8 andq %rax,%r8 movq %r8,-40(%rdi) movq 48(%rsi),%r10 movq %r9,%r8 shrq $28,%r8 andq %rax,%r8 movq %r8,-32(%rdi) shrdq $57,%r10,%r9 andq %rax,%r9 movq %r9,-24(%rdi) movq 56(%rsi),%r11 movq %r10,%r9 shrq $22,%r9 andq %rax,%r9 movq %r9,-16(%rdi) shrdq $51,%r11,%r10 andq %rax,%r10 movq %r10,-8(%rdi) movq 64(%rsi),%r8 movq %r11,%r10 shrq $16,%r10 andq %rax,%r10 movq %r10,0(%rdi) shrdq $45,%r8,%r11 andq %rax,%r11 movq %r11,8(%rdi) movq 72(%rsi),%r9 movq %r8,%r11 shrq $10,%r11 andq %rax,%r11 movq %r11,16(%rdi) shrdq $39,%r9,%r8 andq %rax,%r8 movq %r8,24(%rdi) movq 80(%rsi),%r10 movq %r9,%r8 shrq $4,%r8 andq %rax,%r8 movq %r8,32(%rdi) movq %r9,%r11 shrq $33,%r11 andq %rax,%r11 movq %r11,40(%rdi) shrdq $62,%r10,%r9 andq %rax,%r9 movq %r9,48(%rdi) movq 88(%rsi),%r11 movq %r10,%r9 shrq $27,%r9 andq %rax,%r9 movq %r9,56(%rdi) shrdq $56,%r11,%r10 andq %rax,%r10 movq %r10,64(%rdi) movq 96(%rsi),%r8 movq %r11,%r10 shrq $21,%r10 andq %rax,%r10 movq %r10,72(%rdi) shrdq $50,%r8,%r11 andq %rax,%r11 movq %r11,80(%rdi) movq 104(%rsi),%r9 movq %r8,%r11 shrq $15,%r11 andq %rax,%r11 movq %r11,88(%rdi) shrdq $44,%r9,%r8 andq %rax,%r8 movq %r8,96(%rdi) movq 112(%rsi),%r10 movq %r9,%r8 shrq $9,%r8 andq %rax,%r8 movq %r8,104(%rdi) shrdq $38,%r10,%r9 andq %rax,%r9 movq %r9,112(%rdi) movq 120(%rsi),%r11 movq %r10,%r9 shrq $3,%r9 andq %rax,%r9 movq %r9,120(%rdi) movq %r10,%r8 shrq $32,%r8 andq %rax,%r8 movq %r8,128(%rdi) shrdq $61,%r11,%r10 andq %rax,%r10 movq %r10,136(%rdi) xorq %r8,%r8 movq %r11,%r10 shrq $26,%r10 andq %rax,%r10 movq %r10,144(%rdi) shrdq $55,%r8,%r11 andq %rax,%r11 movq %r11,152(%rdi) movq %r8,160(%rdi) movq %r8,168(%rdi) movq %r8,176(%rdi) movq %r8,184(%rdi) ret .cfi_endproc .size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2 .globl rsaz_1024_scatter5_avx2 .hidden rsaz_1024_scatter5_avx2 .type rsaz_1024_scatter5_avx2,@function .align 32 rsaz_1024_scatter5_avx2: .cfi_startproc _CET_ENDBR vzeroupper vmovdqu .Lscatter_permd(%rip),%ymm5 shll $4,%edx leaq (%rdi,%rdx,1),%rdi movl $9,%eax jmp .Loop_scatter_1024 .align 32 .Loop_scatter_1024: vmovdqu (%rsi),%ymm0 leaq 32(%rsi),%rsi vpermd %ymm0,%ymm5,%ymm0 vmovdqu %xmm0,(%rdi) leaq 512(%rdi),%rdi decl %eax jnz .Loop_scatter_1024 vzeroupper ret .cfi_endproc .size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2 .globl rsaz_1024_gather5_avx2 .hidden rsaz_1024_gather5_avx2 .type rsaz_1024_gather5_avx2,@function .align 32 rsaz_1024_gather5_avx2: .cfi_startproc _CET_ENDBR vzeroupper movq %rsp,%r11 .cfi_def_cfa_register %r11 leaq -256(%rsp),%rsp andq $-32,%rsp leaq .Linc(%rip),%r10 leaq -128(%rsp),%rax vmovd %edx,%xmm4 vmovdqa (%r10),%ymm0 vmovdqa 32(%r10),%ymm1 vmovdqa 64(%r10),%ymm5 vpbroadcastd %xmm4,%ymm4 vpaddd %ymm5,%ymm0,%ymm2 vpcmpeqd %ymm4,%ymm0,%ymm0 vpaddd %ymm5,%ymm1,%ymm3 vpcmpeqd %ymm4,%ymm1,%ymm1 vmovdqa %ymm0,0+128(%rax) vpaddd %ymm5,%ymm2,%ymm0 vpcmpeqd %ymm4,%ymm2,%ymm2 vmovdqa %ymm1,32+128(%rax) vpaddd %ymm5,%ymm3,%ymm1 vpcmpeqd %ymm4,%ymm3,%ymm3 vmovdqa %ymm2,64+128(%rax) vpaddd %ymm5,%ymm0,%ymm2 vpcmpeqd %ymm4,%ymm0,%ymm0 vmovdqa %ymm3,96+128(%rax) vpaddd %ymm5,%ymm1,%ymm3 vpcmpeqd %ymm4,%ymm1,%ymm1 vmovdqa %ymm0,128+128(%rax) vpaddd %ymm5,%ymm2,%ymm8 vpcmpeqd %ymm4,%ymm2,%ymm2 vmovdqa %ymm1,160+128(%rax) vpaddd %ymm5,%ymm3,%ymm9 vpcmpeqd %ymm4,%ymm3,%ymm3 vmovdqa %ymm2,192+128(%rax) vpaddd %ymm5,%ymm8,%ymm10 vpcmpeqd %ymm4,%ymm8,%ymm8 vmovdqa %ymm3,224+128(%rax) vpaddd %ymm5,%ymm9,%ymm11 vpcmpeqd %ymm4,%ymm9,%ymm9 vpaddd %ymm5,%ymm10,%ymm12 vpcmpeqd %ymm4,%ymm10,%ymm10 vpaddd %ymm5,%ymm11,%ymm13 vpcmpeqd %ymm4,%ymm11,%ymm11 vpaddd %ymm5,%ymm12,%ymm14 vpcmpeqd %ymm4,%ymm12,%ymm12 vpaddd %ymm5,%ymm13,%ymm15 vpcmpeqd %ymm4,%ymm13,%ymm13 vpcmpeqd %ymm4,%ymm14,%ymm14 vpcmpeqd %ymm4,%ymm15,%ymm15 vmovdqa -32(%r10),%ymm7 leaq 128(%rsi),%rsi movl $9,%edx .Loop_gather_1024: vmovdqa 0-128(%rsi),%ymm0 vmovdqa 32-128(%rsi),%ymm1 vmovdqa 64-128(%rsi),%ymm2 vmovdqa 96-128(%rsi),%ymm3 vpand 0+128(%rax),%ymm0,%ymm0 vpand 32+128(%rax),%ymm1,%ymm1 vpand 64+128(%rax),%ymm2,%ymm2 vpor %ymm0,%ymm1,%ymm4 vpand 96+128(%rax),%ymm3,%ymm3 vmovdqa 128-128(%rsi),%ymm0 vmovdqa 160-128(%rsi),%ymm1 vpor %ymm2,%ymm3,%ymm5 vmovdqa 192-128(%rsi),%ymm2 vmovdqa 224-128(%rsi),%ymm3 vpand 128+128(%rax),%ymm0,%ymm0 vpand 160+128(%rax),%ymm1,%ymm1 vpand 192+128(%rax),%ymm2,%ymm2 vpor %ymm0,%ymm4,%ymm4 vpand 224+128(%rax),%ymm3,%ymm3 vpand 256-128(%rsi),%ymm8,%ymm0 vpor %ymm1,%ymm5,%ymm5 vpand 288-128(%rsi),%ymm9,%ymm1 vpor %ymm2,%ymm4,%ymm4 vpand 320-128(%rsi),%ymm10,%ymm2 vpor %ymm3,%ymm5,%ymm5 vpand 352-128(%rsi),%ymm11,%ymm3 vpor %ymm0,%ymm4,%ymm4 vpand 384-128(%rsi),%ymm12,%ymm0 vpor %ymm1,%ymm5,%ymm5 vpand 416-128(%rsi),%ymm13,%ymm1 vpor %ymm2,%ymm4,%ymm4 vpand 448-128(%rsi),%ymm14,%ymm2 vpor %ymm3,%ymm5,%ymm5 vpand 480-128(%rsi),%ymm15,%ymm3 leaq 512(%rsi),%rsi vpor %ymm0,%ymm4,%ymm4 vpor %ymm1,%ymm5,%ymm5 vpor %ymm2,%ymm4,%ymm4 vpor %ymm3,%ymm5,%ymm5 vpor %ymm5,%ymm4,%ymm4 vextracti128 $1,%ymm4,%xmm5 vpor %xmm4,%xmm5,%xmm5 vpermd %ymm5,%ymm7,%ymm5 vmovdqu %ymm5,(%rdi) leaq 32(%rdi),%rdi decl %edx jnz .Loop_gather_1024 vpxor %ymm0,%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) vzeroupper leaq (%r11),%rsp .cfi_def_cfa_register %rsp ret .cfi_endproc .LSEH_end_rsaz_1024_gather5: .size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2 .section .rodata .align 64 .Land_mask: .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff .Lscatter_permd: .long 0,2,4,6,7,7,7,7 .Lgather_permd: .long 0,7,1,7,2,7,3,7 .Linc: .long 0,0,0,0, 1,1,1,1 .long 2,2,2,2, 3,3,3,3 .long 4,4,4,4, 4,4,4,4 .align 64 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _sha1_block_data_order_nohw .private_extern _sha1_block_data_order_nohw .align 4 _sha1_block_data_order_nohw: L_sha1_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebp movl 24(%esp),%esi movl 28(%esp),%eax subl $76,%esp shll $6,%eax addl %esi,%eax movl %eax,104(%esp) movl 16(%ebp),%edi jmp L000loop .align 4,0x90 L000loop: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,(%esp) movl %ebx,4(%esp) movl %ecx,8(%esp) movl %edx,12(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,16(%esp) movl %ebx,20(%esp) movl %ecx,24(%esp) movl %edx,28(%esp) movl 32(%esi),%eax movl 36(%esi),%ebx movl 40(%esi),%ecx movl 44(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,40(%esp) movl %edx,44(%esp) movl 48(%esi),%eax movl 52(%esi),%ebx movl 56(%esi),%ecx movl 60(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,48(%esp) movl %ebx,52(%esp) movl %ecx,56(%esp) movl %edx,60(%esp) movl %esi,100(%esp) movl (%ebp),%eax movl 4(%ebp),%ebx movl 8(%ebp),%ecx movl 12(%ebp),%edx # 00_15 0 movl %ecx,%esi movl %eax,%ebp roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl (%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp # 00_15 1 movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 4(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp # 00_15 2 movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 8(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp # 00_15 3 movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 12(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp # 00_15 4 movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 16(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp # 00_15 5 movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 20(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp # 00_15 6 movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 24(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp # 00_15 7 movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 28(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp # 00_15 8 movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 32(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp # 00_15 9 movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 36(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp # 00_15 10 movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 40(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp # 00_15 11 movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 44(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp # 00_15 12 movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 48(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp # 00_15 13 movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 52(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp # 00_15 14 movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 56(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp # 00_15 15 movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 60(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp movl (%esp),%ebx addl %ebp,%ecx # 16_19 16 movl %edi,%ebp xorl 8(%esp),%ebx xorl %esi,%ebp xorl 32(%esp),%ebx andl %edx,%ebp xorl 52(%esp),%ebx roll $1,%ebx xorl %esi,%ebp addl %ebp,%eax movl %ecx,%ebp rorl $2,%edx movl %ebx,(%esp) roll $5,%ebp leal 1518500249(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx # 16_19 17 movl %edx,%ebp xorl 12(%esp),%eax xorl %edi,%ebp xorl 36(%esp),%eax andl %ecx,%ebp xorl 56(%esp),%eax roll $1,%eax xorl %edi,%ebp addl %ebp,%esi movl %ebx,%ebp rorl $2,%ecx movl %eax,4(%esp) roll $5,%ebp leal 1518500249(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax # 16_19 18 movl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 40(%esp),%esi andl %ebx,%ebp xorl 60(%esp),%esi roll $1,%esi xorl %edx,%ebp addl %ebp,%edi movl %eax,%ebp rorl $2,%ebx movl %esi,8(%esp) roll $5,%ebp leal 1518500249(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi # 16_19 19 movl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 44(%esp),%edi andl %eax,%ebp xorl (%esp),%edi roll $1,%edi xorl %ecx,%ebp addl %ebp,%edx movl %esi,%ebp rorl $2,%eax movl %edi,12(%esp) roll $5,%ebp leal 1518500249(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi # 20_39 20 movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx # 20_39 21 movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx # 20_39 22 movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx # 20_39 23 movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 1859775393(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax # 20_39 24 movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 1859775393(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi # 20_39 25 movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 1859775393(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi # 20_39 26 movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx # 20_39 27 movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx # 20_39 28 movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx # 20_39 29 movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,52(%esp) leal 1859775393(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax # 20_39 30 movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,56(%esp) leal 1859775393(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi # 20_39 31 movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,60(%esp) leal 1859775393(%edi,%edx,1),%edi movl (%esp),%edx addl %ebp,%edi # 20_39 32 movl %esi,%ebp xorl 8(%esp),%edx xorl %eax,%ebp xorl 32(%esp),%edx xorl %ebx,%ebp xorl 52(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 4(%esp),%ecx addl %ebp,%edx # 20_39 33 movl %edi,%ebp xorl 12(%esp),%ecx xorl %esi,%ebp xorl 36(%esp),%ecx xorl %eax,%ebp xorl 56(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,4(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 8(%esp),%ebx addl %ebp,%ecx # 20_39 34 movl %edx,%ebp xorl 16(%esp),%ebx xorl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl 60(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,8(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 12(%esp),%eax addl %ebp,%ebx # 20_39 35 movl %ecx,%ebp xorl 20(%esp),%eax xorl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl (%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,12(%esp) leal 1859775393(%eax,%esi,1),%eax movl 16(%esp),%esi addl %ebp,%eax # 20_39 36 movl %ebx,%ebp xorl 24(%esp),%esi xorl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 4(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,16(%esp) leal 1859775393(%esi,%edi,1),%esi movl 20(%esp),%edi addl %ebp,%esi # 20_39 37 movl %eax,%ebp xorl 28(%esp),%edi xorl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 8(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,20(%esp) leal 1859775393(%edi,%edx,1),%edi movl 24(%esp),%edx addl %ebp,%edi # 20_39 38 movl %esi,%ebp xorl 32(%esp),%edx xorl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 12(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,24(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 28(%esp),%ecx addl %ebp,%edx # 20_39 39 movl %edi,%ebp xorl 36(%esp),%ecx xorl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 16(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,28(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 32(%esp),%ebx addl %ebp,%ecx # 40_59 40 movl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl (%esp),%ebx andl %edx,%ebp xorl 20(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,32(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 36(%esp),%eax addl %ebp,%ebx # 40_59 41 movl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl 4(%esp),%eax andl %ecx,%ebp xorl 24(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,36(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 40(%esp),%esi addl %ebp,%eax # 40_59 42 movl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 8(%esp),%esi andl %ebx,%ebp xorl 28(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,40(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 44(%esp),%edi addl %ebp,%esi # 40_59 43 movl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 12(%esp),%edi andl %eax,%ebp xorl 32(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,44(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 48(%esp),%edx addl %ebp,%edi # 40_59 44 movl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 16(%esp),%edx andl %esi,%ebp xorl 36(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,48(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 52(%esp),%ecx addl %ebp,%edx # 40_59 45 movl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 20(%esp),%ecx andl %edi,%ebp xorl 40(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,52(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 56(%esp),%ebx addl %ebp,%ecx # 40_59 46 movl %edi,%ebp xorl (%esp),%ebx xorl %esi,%ebp xorl 24(%esp),%ebx andl %edx,%ebp xorl 44(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,56(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 60(%esp),%eax addl %ebp,%ebx # 40_59 47 movl %edx,%ebp xorl 4(%esp),%eax xorl %edi,%ebp xorl 28(%esp),%eax andl %ecx,%ebp xorl 48(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,60(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl (%esp),%esi addl %ebp,%eax # 40_59 48 movl %ecx,%ebp xorl 8(%esp),%esi xorl %edx,%ebp xorl 32(%esp),%esi andl %ebx,%ebp xorl 52(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 4(%esp),%edi addl %ebp,%esi # 40_59 49 movl %ebx,%ebp xorl 12(%esp),%edi xorl %ecx,%ebp xorl 36(%esp),%edi andl %eax,%ebp xorl 56(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,4(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 8(%esp),%edx addl %ebp,%edi # 40_59 50 movl %eax,%ebp xorl 16(%esp),%edx xorl %ebx,%ebp xorl 40(%esp),%edx andl %esi,%ebp xorl 60(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,8(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 12(%esp),%ecx addl %ebp,%edx # 40_59 51 movl %esi,%ebp xorl 20(%esp),%ecx xorl %eax,%ebp xorl 44(%esp),%ecx andl %edi,%ebp xorl (%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,12(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 16(%esp),%ebx addl %ebp,%ecx # 40_59 52 movl %edi,%ebp xorl 24(%esp),%ebx xorl %esi,%ebp xorl 48(%esp),%ebx andl %edx,%ebp xorl 4(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,16(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 20(%esp),%eax addl %ebp,%ebx # 40_59 53 movl %edx,%ebp xorl 28(%esp),%eax xorl %edi,%ebp xorl 52(%esp),%eax andl %ecx,%ebp xorl 8(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,20(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 24(%esp),%esi addl %ebp,%eax # 40_59 54 movl %ecx,%ebp xorl 32(%esp),%esi xorl %edx,%ebp xorl 56(%esp),%esi andl %ebx,%ebp xorl 12(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,24(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 28(%esp),%edi addl %ebp,%esi # 40_59 55 movl %ebx,%ebp xorl 36(%esp),%edi xorl %ecx,%ebp xorl 60(%esp),%edi andl %eax,%ebp xorl 16(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,28(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 32(%esp),%edx addl %ebp,%edi # 40_59 56 movl %eax,%ebp xorl 40(%esp),%edx xorl %ebx,%ebp xorl (%esp),%edx andl %esi,%ebp xorl 20(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,32(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 36(%esp),%ecx addl %ebp,%edx # 40_59 57 movl %esi,%ebp xorl 44(%esp),%ecx xorl %eax,%ebp xorl 4(%esp),%ecx andl %edi,%ebp xorl 24(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,36(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 40(%esp),%ebx addl %ebp,%ecx # 40_59 58 movl %edi,%ebp xorl 48(%esp),%ebx xorl %esi,%ebp xorl 8(%esp),%ebx andl %edx,%ebp xorl 28(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,40(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 44(%esp),%eax addl %ebp,%ebx # 40_59 59 movl %edx,%ebp xorl 52(%esp),%eax xorl %edi,%ebp xorl 12(%esp),%eax andl %ecx,%ebp xorl 32(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,44(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 48(%esp),%esi addl %ebp,%eax # 20_39 60 movl %ebx,%ebp xorl 56(%esp),%esi xorl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 36(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,48(%esp) leal 3395469782(%esi,%edi,1),%esi movl 52(%esp),%edi addl %ebp,%esi # 20_39 61 movl %eax,%ebp xorl 60(%esp),%edi xorl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 40(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,52(%esp) leal 3395469782(%edi,%edx,1),%edi movl 56(%esp),%edx addl %ebp,%edi # 20_39 62 movl %esi,%ebp xorl (%esp),%edx xorl %eax,%ebp xorl 24(%esp),%edx xorl %ebx,%ebp xorl 44(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,56(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 60(%esp),%ecx addl %ebp,%edx # 20_39 63 movl %edi,%ebp xorl 4(%esp),%ecx xorl %esi,%ebp xorl 28(%esp),%ecx xorl %eax,%ebp xorl 48(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,60(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl (%esp),%ebx addl %ebp,%ecx # 20_39 64 movl %edx,%ebp xorl 8(%esp),%ebx xorl %edi,%ebp xorl 32(%esp),%ebx xorl %esi,%ebp xorl 52(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx # 20_39 65 movl %ecx,%ebp xorl 12(%esp),%eax xorl %edx,%ebp xorl 36(%esp),%eax xorl %edi,%ebp xorl 56(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,4(%esp) leal 3395469782(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax # 20_39 66 movl %ebx,%ebp xorl 16(%esp),%esi xorl %ecx,%ebp xorl 40(%esp),%esi xorl %edx,%ebp xorl 60(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,8(%esp) leal 3395469782(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi # 20_39 67 movl %eax,%ebp xorl 20(%esp),%edi xorl %ebx,%ebp xorl 44(%esp),%edi xorl %ecx,%ebp xorl (%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,12(%esp) leal 3395469782(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi # 20_39 68 movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx # 20_39 69 movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx # 20_39 70 movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx # 20_39 71 movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 3395469782(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax # 20_39 72 movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 3395469782(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi # 20_39 73 movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 3395469782(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi # 20_39 74 movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx # 20_39 75 movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx # 20_39 76 movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx # 20_39 77 movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp leal 3395469782(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax # 20_39 78 movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp leal 3395469782(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi # 20_39 79 movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp leal 3395469782(%edi,%edx,1),%edi addl %ebp,%edi movl 96(%esp),%ebp movl 100(%esp),%edx addl (%ebp),%edi addl 4(%ebp),%esi addl 8(%ebp),%eax addl 12(%ebp),%ebx addl 16(%ebp),%ecx movl %edi,(%ebp) addl $64,%edx movl %esi,4(%ebp) cmpl 104(%esp),%edx movl %eax,8(%ebp) movl %ecx,%edi movl %ebx,12(%ebp) movl %edx,%esi movl %ecx,16(%ebp) jb L000loop addl $76,%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _sha1_block_data_order_ssse3 .private_extern _sha1_block_data_order_ssse3 .align 4 _sha1_block_data_order_ssse3: L_sha1_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call L001pic_point L001pic_point: popl %ebp leal LK_XX_XX-L001pic_point(%ebp),%ebp movdqa (%ebp),%xmm7 movdqa 16(%ebp),%xmm0 movdqa 32(%ebp),%xmm1 movdqa 48(%ebp),%xmm2 movdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp movdqa %xmm0,112(%esp) movdqa %xmm1,128(%esp) movdqa %xmm2,144(%esp) shll $6,%edx movdqa %xmm7,160(%esp) addl %ebp,%edx movdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi movdqu -64(%ebp),%xmm0 movdqu -48(%ebp),%xmm1 movdqu -32(%ebp),%xmm2 movdqu -16(%ebp),%xmm3 .byte 102,15,56,0,198 .byte 102,15,56,0,206 .byte 102,15,56,0,214 movdqa %xmm7,96(%esp) .byte 102,15,56,0,222 paddd %xmm7,%xmm0 paddd %xmm7,%xmm1 paddd %xmm7,%xmm2 movdqa %xmm0,(%esp) psubd %xmm7,%xmm0 movdqa %xmm1,16(%esp) psubd %xmm7,%xmm1 movdqa %xmm2,32(%esp) movl %ecx,%ebp psubd %xmm7,%xmm2 xorl %edx,%ebp pshufd $238,%xmm0,%xmm4 andl %ebp,%esi jmp L002loop .align 4,0x90 L002loop: rorl $2,%ebx xorl %edx,%esi movl %eax,%ebp punpcklqdq %xmm1,%xmm4 movdqa %xmm3,%xmm6 addl (%esp),%edi xorl %ecx,%ebx paddd %xmm3,%xmm7 movdqa %xmm0,64(%esp) roll $5,%eax addl %esi,%edi psrldq $4,%xmm6 andl %ebx,%ebp xorl %ecx,%ebx pxor %xmm0,%xmm4 addl %eax,%edi rorl $7,%eax pxor %xmm2,%xmm6 xorl %ecx,%ebp movl %edi,%esi addl 4(%esp),%edx pxor %xmm6,%xmm4 xorl %ebx,%eax roll $5,%edi movdqa %xmm7,48(%esp) addl %ebp,%edx andl %eax,%esi movdqa %xmm4,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi movdqa %xmm4,%xmm6 xorl %ebx,%esi pslldq $12,%xmm0 paddd %xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx psrld $31,%xmm6 xorl %eax,%edi roll $5,%edx movdqa %xmm0,%xmm7 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi psrld $30,%xmm0 addl %edx,%ecx rorl $7,%edx por %xmm6,%xmm4 xorl %eax,%ebp movl %ecx,%esi addl 12(%esp),%ebx pslld $2,%xmm7 xorl %edi,%edx roll $5,%ecx pxor %xmm0,%xmm4 movdqa 96(%esp),%xmm0 addl %ebp,%ebx andl %edx,%esi pxor %xmm7,%xmm4 pshufd $238,%xmm1,%xmm5 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx xorl %edi,%esi movl %ebx,%ebp punpcklqdq %xmm2,%xmm5 movdqa %xmm4,%xmm7 addl 16(%esp),%eax xorl %edx,%ecx paddd %xmm4,%xmm0 movdqa %xmm1,80(%esp) roll $5,%ebx addl %esi,%eax psrldq $4,%xmm7 andl %ecx,%ebp xorl %edx,%ecx pxor %xmm1,%xmm5 addl %ebx,%eax rorl $7,%ebx pxor %xmm3,%xmm7 xorl %edx,%ebp movl %eax,%esi addl 20(%esp),%edi pxor %xmm7,%xmm5 xorl %ecx,%ebx roll $5,%eax movdqa %xmm0,(%esp) addl %ebp,%edi andl %ebx,%esi movdqa %xmm5,%xmm1 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax movdqa %xmm5,%xmm7 xorl %ecx,%esi pslldq $12,%xmm1 paddd %xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx psrld $31,%xmm7 xorl %ebx,%eax roll $5,%edi movdqa %xmm1,%xmm0 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax psrld $30,%xmm1 addl %edi,%edx rorl $7,%edi por %xmm7,%xmm5 xorl %ebx,%ebp movl %edx,%esi addl 28(%esp),%ecx pslld $2,%xmm0 xorl %eax,%edi roll $5,%edx pxor %xmm1,%xmm5 movdqa 112(%esp),%xmm1 addl %ebp,%ecx andl %edi,%esi pxor %xmm0,%xmm5 pshufd $238,%xmm2,%xmm6 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp punpcklqdq %xmm3,%xmm6 movdqa %xmm5,%xmm0 addl 32(%esp),%ebx xorl %edi,%edx paddd %xmm5,%xmm1 movdqa %xmm2,96(%esp) roll $5,%ecx addl %esi,%ebx psrldq $4,%xmm0 andl %edx,%ebp xorl %edi,%edx pxor %xmm2,%xmm6 addl %ecx,%ebx rorl $7,%ecx pxor %xmm4,%xmm0 xorl %edi,%ebp movl %ebx,%esi addl 36(%esp),%eax pxor %xmm0,%xmm6 xorl %edx,%ecx roll $5,%ebx movdqa %xmm1,16(%esp) addl %ebp,%eax andl %ecx,%esi movdqa %xmm6,%xmm2 xorl %edx,%ecx addl %ebx,%eax rorl $7,%ebx movdqa %xmm6,%xmm0 xorl %edx,%esi pslldq $12,%xmm2 paddd %xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi psrld $31,%xmm0 xorl %ecx,%ebx roll $5,%eax movdqa %xmm2,%xmm1 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx psrld $30,%xmm2 addl %eax,%edi rorl $7,%eax por %xmm0,%xmm6 xorl %ecx,%ebp movdqa 64(%esp),%xmm0 movl %edi,%esi addl 44(%esp),%edx pslld $2,%xmm1 xorl %ebx,%eax roll $5,%edi pxor %xmm2,%xmm6 movdqa 112(%esp),%xmm2 addl %ebp,%edx andl %eax,%esi pxor %xmm1,%xmm6 pshufd $238,%xmm3,%xmm7 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%esi movl %edx,%ebp punpcklqdq %xmm4,%xmm7 movdqa %xmm6,%xmm1 addl 48(%esp),%ecx xorl %eax,%edi paddd %xmm6,%xmm2 movdqa %xmm3,64(%esp) roll $5,%edx addl %esi,%ecx psrldq $4,%xmm1 andl %edi,%ebp xorl %eax,%edi pxor %xmm3,%xmm7 addl %edx,%ecx rorl $7,%edx pxor %xmm5,%xmm1 xorl %eax,%ebp movl %ecx,%esi addl 52(%esp),%ebx pxor %xmm1,%xmm7 xorl %edi,%edx roll $5,%ecx movdqa %xmm2,32(%esp) addl %ebp,%ebx andl %edx,%esi movdqa %xmm7,%xmm3 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx movdqa %xmm7,%xmm1 xorl %edi,%esi pslldq $12,%xmm3 paddd %xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax psrld $31,%xmm1 xorl %edx,%ecx roll $5,%ebx movdqa %xmm3,%xmm2 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx psrld $30,%xmm3 addl %ebx,%eax rorl $7,%ebx por %xmm1,%xmm7 xorl %edx,%ebp movdqa 80(%esp),%xmm1 movl %eax,%esi addl 60(%esp),%edi pslld $2,%xmm2 xorl %ecx,%ebx roll $5,%eax pxor %xmm3,%xmm7 movdqa 112(%esp),%xmm3 addl %ebp,%edi andl %ebx,%esi pxor %xmm2,%xmm7 pshufd $238,%xmm6,%xmm2 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx pxor %xmm1,%xmm0 movdqa %xmm4,80(%esp) xorl %ebx,%eax roll $5,%edi movdqa %xmm3,%xmm4 addl %esi,%edx paddd %xmm7,%xmm3 andl %eax,%ebp pxor %xmm2,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi roll $5,%edx pslld $2,%xmm0 addl %ebp,%ecx andl %edi,%esi psrld $30,%xmm2 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx xorl %edi,%edx roll $5,%ecx por %xmm2,%xmm0 addl %esi,%ebx andl %edx,%ebp movdqa 96(%esp),%xmm2 xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi pshufd $238,%xmm7,%xmm3 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 16(%esp),%edi pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm2,%xmm1 movdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm4,%xmm5 rorl $7,%ebx paddd %xmm0,%xmm4 addl %eax,%edi pxor %xmm3,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi psrld $30,%xmm3 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm3,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp movdqa 64(%esp),%xmm3 movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx pshufd $238,%xmm0,%xmm4 addl %ecx,%ebx addl 32(%esp),%eax pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx pxor %xmm3,%xmm2 movdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp movdqa 128(%esp),%xmm6 rorl $7,%ecx paddd %xmm1,%xmm5 addl %ebx,%eax pxor %xmm4,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi pslld $2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi psrld $30,%xmm4 movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx por %xmm4,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp movdqa 80(%esp),%xmm4 movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi pshufd $238,%xmm1,%xmm5 addl %edx,%ecx addl 48(%esp),%ebx pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx pxor %xmm4,%xmm3 movdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp movdqa %xmm6,%xmm7 rorl $7,%edx paddd %xmm2,%xmm6 addl %ecx,%ebx pxor %xmm5,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax pslld $2,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi psrld $30,%xmm5 movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi por %xmm5,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp movdqa 96(%esp),%xmm5 movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax pshufd $238,%xmm2,%xmm6 addl %edi,%edx addl (%esp),%ecx pxor %xmm0,%xmm4 punpcklqdq %xmm3,%xmm6 xorl %eax,%esi movl %edx,%ebp roll $5,%edx pxor %xmm5,%xmm4 movdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp movdqa %xmm7,%xmm0 rorl $7,%edi paddd %xmm3,%xmm7 addl %edx,%ecx pxor %xmm6,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx movdqa %xmm4,%xmm6 movdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx pslld $2,%xmm4 addl 8(%esp),%eax xorl %edx,%esi psrld $30,%xmm6 movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax por %xmm6,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp movdqa 64(%esp),%xmm6 movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx pshufd $238,%xmm3,%xmm7 addl %eax,%edi addl 16(%esp),%edx pxor %xmm1,%xmm5 punpcklqdq %xmm4,%xmm7 xorl %ebx,%esi movl %edi,%ebp roll $5,%edi pxor %xmm6,%xmm5 movdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp movdqa %xmm0,%xmm1 rorl $7,%eax paddd %xmm4,%xmm0 addl %edi,%edx pxor %xmm7,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx movdqa %xmm5,%xmm7 movdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx pslld $2,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi psrld $30,%xmm7 movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx por %xmm7,%xmm5 addl 28(%esp),%eax movdqa 80(%esp),%xmm7 rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pshufd $238,%xmm4,%xmm0 addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 32(%esp),%edi pxor %xmm2,%xmm6 punpcklqdq %xmm5,%xmm0 andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx pxor %xmm7,%xmm6 movdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi roll $5,%eax movdqa %xmm1,%xmm2 addl %esi,%edi paddd %xmm5,%xmm1 xorl %ebx,%ebp pxor %xmm0,%xmm6 xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx andl %ebx,%ebp movdqa %xmm6,%xmm0 movdqa %xmm1,16(%esp) xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi pslld $2,%xmm6 addl %ebp,%edx xorl %eax,%esi psrld $30,%xmm0 xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi xorl %ebx,%eax rorl $7,%edi por %xmm0,%xmm6 movl %edx,%ebp xorl %eax,%esi movdqa 96(%esp),%xmm0 roll $5,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx pshufd $238,%xmm5,%xmm1 addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 48(%esp),%eax pxor %xmm3,%xmm7 punpcklqdq %xmm6,%xmm1 andl %edx,%esi xorl %edi,%edx rorl $7,%ecx pxor %xmm0,%xmm7 movdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi roll $5,%ebx movdqa 144(%esp),%xmm3 addl %esi,%eax paddd %xmm6,%xmm2 xorl %ecx,%ebp pxor %xmm1,%xmm7 xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi andl %ecx,%ebp movdqa %xmm7,%xmm1 movdqa %xmm2,32(%esp) xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax pslld $2,%xmm7 addl %ebp,%edi xorl %ebx,%esi psrld $30,%xmm1 xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax por %xmm1,%xmm7 movl %edi,%ebp xorl %ebx,%esi movdqa 64(%esp),%xmm1 roll $5,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx pshufd $238,%xmm6,%xmm2 addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl (%esp),%ebx pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 andl %edi,%esi xorl %eax,%edi rorl $7,%edx pxor %xmm1,%xmm0 movdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi roll $5,%ecx movdqa %xmm3,%xmm4 addl %esi,%ebx paddd %xmm7,%xmm3 xorl %edx,%ebp pxor %xmm2,%xmm0 xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax andl %edx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pslld $2,%xmm0 addl %ebp,%eax xorl %ecx,%esi psrld $30,%xmm2 xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx por %xmm2,%xmm0 movl %eax,%ebp xorl %ecx,%esi movdqa 80(%esp),%xmm2 roll $5,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi pshufd $238,%xmm7,%xmm3 addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 16(%esp),%ecx pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 andl %eax,%esi xorl %ebx,%eax rorl $7,%edi pxor %xmm2,%xmm1 movdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi roll $5,%edx movdqa %xmm4,%xmm5 addl %esi,%ecx paddd %xmm0,%xmm4 xorl %edi,%ebp pxor %xmm3,%xmm1 xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx andl %edi,%ebp movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx pslld $2,%xmm1 addl %ebp,%ebx xorl %edx,%esi psrld $30,%xmm3 xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi xorl %edi,%edx rorl $7,%ecx por %xmm3,%xmm1 movl %ebx,%ebp xorl %edx,%esi movdqa 96(%esp),%xmm3 roll $5,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax pshufd $238,%xmm0,%xmm4 addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 32(%esp),%edx pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax pxor %xmm3,%xmm2 movdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi roll $5,%edi movdqa %xmm5,%xmm6 addl %esi,%edx paddd %xmm1,%xmm5 xorl %eax,%ebp pxor %xmm4,%xmm2 xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx andl %eax,%ebp movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx pslld $2,%xmm2 addl %ebp,%ecx xorl %edi,%esi psrld $30,%xmm4 xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi xorl %eax,%edi rorl $7,%edx por %xmm4,%xmm2 movl %ecx,%ebp xorl %edi,%esi movdqa 64(%esp),%xmm4 roll $5,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx pshufd $238,%xmm1,%xmm5 addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax addl 48(%esp),%edi pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm4,%xmm3 movdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm6,%xmm7 rorl $7,%ebx paddd %xmm2,%xmm6 addl %eax,%edi pxor %xmm5,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi psrld $30,%xmm5 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm5,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl (%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx paddd %xmm3,%xmm7 addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi movdqa %xmm7,48(%esp) roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je L003done movdqa 160(%esp),%xmm7 movdqa 176(%esp),%xmm6 movdqu (%ebp),%xmm0 movdqu 16(%ebp),%xmm1 movdqu 32(%ebp),%xmm2 movdqu 48(%ebp),%xmm3 addl $64,%ebp .byte 102,15,56,0,198 movl %ebp,196(%esp) movdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx .byte 102,15,56,0,206 addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi paddd %xmm7,%xmm0 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx movdqa %xmm0,(%esp) addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp psubd %xmm7,%xmm0 roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi .byte 102,15,56,0,214 addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi paddd %xmm7,%xmm1 roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx movdqa %xmm1,16(%esp) addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp psubd %xmm7,%xmm1 roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax .byte 102,15,56,0,222 addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi paddd %xmm7,%xmm2 roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi movdqa %xmm2,32(%esp) addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp psubd %xmm7,%xmm2 roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %ecx,%ebx movl %edx,12(%ebp) xorl %edx,%ebx movl %edi,16(%ebp) movl %esi,%ebp pshufd $238,%xmm0,%xmm4 andl %ebx,%esi movl %ebp,%ebx jmp L002loop .align 4,0x90 L003done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .globl _sha1_block_data_order_avx .private_extern _sha1_block_data_order_avx .align 4 _sha1_block_data_order_avx: L_sha1_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call L004pic_point L004pic_point: popl %ebp leal LK_XX_XX-L004pic_point(%ebp),%ebp vzeroall vmovdqa (%ebp),%xmm7 vmovdqa 16(%ebp),%xmm0 vmovdqa 32(%ebp),%xmm1 vmovdqa 48(%ebp),%xmm2 vmovdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp vmovdqa %xmm0,112(%esp) vmovdqa %xmm1,128(%esp) vmovdqa %xmm2,144(%esp) shll $6,%edx vmovdqa %xmm7,160(%esp) addl %ebp,%edx vmovdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi vmovdqu -64(%ebp),%xmm0 vmovdqu -48(%ebp),%xmm1 vmovdqu -32(%ebp),%xmm2 vmovdqu -16(%ebp),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 vpshufb %xmm6,%xmm1,%xmm1 vpshufb %xmm6,%xmm2,%xmm2 vmovdqa %xmm7,96(%esp) vpshufb %xmm6,%xmm3,%xmm3 vpaddd %xmm7,%xmm0,%xmm4 vpaddd %xmm7,%xmm1,%xmm5 vpaddd %xmm7,%xmm2,%xmm6 vmovdqa %xmm4,(%esp) movl %ecx,%ebp vmovdqa %xmm5,16(%esp) xorl %edx,%ebp vmovdqa %xmm6,32(%esp) andl %ebp,%esi jmp L005loop .align 4,0x90 L005loop: shrdl $2,%ebx,%ebx xorl %edx,%esi vpalignr $8,%xmm0,%xmm1,%xmm4 movl %eax,%ebp addl (%esp),%edi vpaddd %xmm3,%xmm7,%xmm7 vmovdqa %xmm0,64(%esp) xorl %ecx,%ebx shldl $5,%eax,%eax vpsrldq $4,%xmm3,%xmm6 addl %esi,%edi andl %ebx,%ebp vpxor %xmm0,%xmm4,%xmm4 xorl %ecx,%ebx addl %eax,%edi vpxor %xmm2,%xmm6,%xmm6 shrdl $7,%eax,%eax xorl %ecx,%ebp vmovdqa %xmm7,48(%esp) movl %edi,%esi addl 4(%esp),%edx vpxor %xmm6,%xmm4,%xmm4 xorl %ebx,%eax shldl $5,%edi,%edi addl %ebp,%edx andl %eax,%esi vpsrld $31,%xmm4,%xmm6 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpslldq $12,%xmm4,%xmm0 vpaddd %xmm4,%xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpsrld $30,%xmm0,%xmm7 vpor %xmm6,%xmm4,%xmm4 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi addl %edx,%ecx vpslld $2,%xmm0,%xmm0 shrdl $7,%edx,%edx xorl %eax,%ebp vpxor %xmm7,%xmm4,%xmm4 movl %ecx,%esi addl 12(%esp),%ebx xorl %edi,%edx shldl $5,%ecx,%ecx vpxor %xmm0,%xmm4,%xmm4 addl %ebp,%ebx andl %edx,%esi vmovdqa 96(%esp),%xmm0 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpalignr $8,%xmm1,%xmm2,%xmm5 movl %ebx,%ebp addl 16(%esp),%eax vpaddd %xmm4,%xmm0,%xmm0 vmovdqa %xmm1,80(%esp) xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrldq $4,%xmm4,%xmm7 addl %esi,%eax andl %ecx,%ebp vpxor %xmm1,%xmm5,%xmm5 xorl %edx,%ecx addl %ebx,%eax vpxor %xmm3,%xmm7,%xmm7 shrdl $7,%ebx,%ebx xorl %edx,%ebp vmovdqa %xmm0,(%esp) movl %eax,%esi addl 20(%esp),%edi vpxor %xmm7,%xmm5,%xmm5 xorl %ecx,%ebx shldl $5,%eax,%eax addl %ebp,%edi andl %ebx,%esi vpsrld $31,%xmm5,%xmm7 xorl %ecx,%ebx addl %eax,%edi shrdl $7,%eax,%eax xorl %ecx,%esi vpslldq $12,%xmm5,%xmm1 vpaddd %xmm5,%xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm0 vpor %xmm7,%xmm5,%xmm5 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 shrdl $7,%edi,%edi xorl %ebx,%ebp vpxor %xmm0,%xmm5,%xmm5 movl %edx,%esi addl 28(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpxor %xmm1,%xmm5,%xmm5 addl %ebp,%ecx andl %edi,%esi vmovdqa 112(%esp),%xmm1 xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi vpalignr $8,%xmm2,%xmm3,%xmm6 movl %ecx,%ebp addl 32(%esp),%ebx vpaddd %xmm5,%xmm1,%xmm1 vmovdqa %xmm2,96(%esp) xorl %edi,%edx shldl $5,%ecx,%ecx vpsrldq $4,%xmm5,%xmm0 addl %esi,%ebx andl %edx,%ebp vpxor %xmm2,%xmm6,%xmm6 xorl %edi,%edx addl %ecx,%ebx vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%ecx,%ecx xorl %edi,%ebp vmovdqa %xmm1,16(%esp) movl %ebx,%esi addl 36(%esp),%eax vpxor %xmm0,%xmm6,%xmm6 xorl %edx,%ecx shldl $5,%ebx,%ebx addl %ebp,%eax andl %ecx,%esi vpsrld $31,%xmm6,%xmm0 xorl %edx,%ecx addl %ebx,%eax shrdl $7,%ebx,%ebx xorl %edx,%esi vpslldq $12,%xmm6,%xmm2 vpaddd %xmm6,%xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm1 vpor %xmm0,%xmm6,%xmm6 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 vmovdqa 64(%esp),%xmm0 shrdl $7,%eax,%eax xorl %ecx,%ebp vpxor %xmm1,%xmm6,%xmm6 movl %edi,%esi addl 44(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpxor %xmm2,%xmm6,%xmm6 addl %ebp,%edx andl %eax,%esi vmovdqa 112(%esp),%xmm2 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpalignr $8,%xmm3,%xmm4,%xmm7 movl %edx,%ebp addl 48(%esp),%ecx vpaddd %xmm6,%xmm2,%xmm2 vmovdqa %xmm3,64(%esp) xorl %eax,%edi shldl $5,%edx,%edx vpsrldq $4,%xmm6,%xmm1 addl %esi,%ecx andl %edi,%ebp vpxor %xmm3,%xmm7,%xmm7 xorl %eax,%edi addl %edx,%ecx vpxor %xmm5,%xmm1,%xmm1 shrdl $7,%edx,%edx xorl %eax,%ebp vmovdqa %xmm2,32(%esp) movl %ecx,%esi addl 52(%esp),%ebx vpxor %xmm1,%xmm7,%xmm7 xorl %edi,%edx shldl $5,%ecx,%ecx addl %ebp,%ebx andl %edx,%esi vpsrld $31,%xmm7,%xmm1 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpslldq $12,%xmm7,%xmm3 vpaddd %xmm7,%xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm2 vpor %xmm1,%xmm7,%xmm7 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 vmovdqa 80(%esp),%xmm1 shrdl $7,%ebx,%ebx xorl %edx,%ebp vpxor %xmm2,%xmm7,%xmm7 movl %eax,%esi addl 60(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpxor %xmm3,%xmm7,%xmm7 addl %ebp,%edi andl %ebx,%esi vmovdqa 112(%esp),%xmm3 xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,80(%esp) xorl %ebx,%eax shldl $5,%edi,%edi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 addl %esi,%edx andl %eax,%ebp vpxor %xmm2,%xmm0,%xmm0 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%ebp vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpslld $2,%xmm0,%xmm0 addl %ebp,%ecx andl %edi,%esi xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx vpor %xmm2,%xmm0,%xmm0 xorl %edi,%edx shldl $5,%ecx,%ecx vmovdqa 96(%esp),%xmm2 addl %esi,%ebx andl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm3,%xmm1,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm3,%xmm1,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp vmovdqa 64(%esp),%xmm3 movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp vmovdqa 128(%esp),%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shrdl $7,%ecx,%ecx addl %ebx,%eax vpxor %xmm4,%xmm2,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vpor %xmm4,%xmm2,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp vmovdqa 80(%esp),%xmm4 movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%edx,%edx addl %ecx,%ebx vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp vmovdqa 96(%esp),%xmm5 movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpalignr $8,%xmm2,%xmm3,%xmm6 vpxor %xmm0,%xmm4,%xmm4 addl (%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx vpxor %xmm5,%xmm4,%xmm4 vmovdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp vmovdqa %xmm7,%xmm0 vpaddd %xmm3,%xmm7,%xmm7 shrdl $7,%edi,%edi addl %edx,%ecx vpxor %xmm6,%xmm4,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx vpsrld $30,%xmm4,%xmm6 vmovdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpslld $2,%xmm4,%xmm4 addl 8(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax vpor %xmm6,%xmm4,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp vmovdqa 64(%esp),%xmm6 movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpalignr $8,%xmm3,%xmm4,%xmm7 vpxor %xmm1,%xmm5,%xmm5 addl 16(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi vpxor %xmm6,%xmm5,%xmm5 vmovdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp vmovdqa %xmm0,%xmm1 vpaddd %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax addl %edi,%edx vpxor %xmm7,%xmm5,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx vpsrld $30,%xmm5,%xmm7 vmovdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpslld $2,%xmm5,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vpor %xmm7,%xmm5,%xmm5 addl 28(%esp),%eax vmovdqa 80(%esp),%xmm7 shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax vpalignr $8,%xmm4,%xmm5,%xmm0 vpxor %xmm2,%xmm6,%xmm6 addl 32(%esp),%edi andl %ecx,%esi xorl %edx,%ecx shrdl $7,%ebx,%ebx vpxor %xmm7,%xmm6,%xmm6 vmovdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi vmovdqa %xmm1,%xmm2 vpaddd %xmm5,%xmm1,%xmm1 shldl $5,%eax,%eax addl %esi,%edi vpxor %xmm0,%xmm6,%xmm6 xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx vpsrld $30,%xmm6,%xmm0 vmovdqa %xmm1,16(%esp) andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi vpslld $2,%xmm6,%xmm6 xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi vpor %xmm0,%xmm6,%xmm6 xorl %ebx,%eax shrdl $7,%edi,%edi vmovdqa 96(%esp),%xmm0 movl %edx,%ebp xorl %eax,%esi shldl $5,%edx,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx vpalignr $8,%xmm5,%xmm6,%xmm1 vpxor %xmm3,%xmm7,%xmm7 addl 48(%esp),%eax andl %edx,%esi xorl %edi,%edx shrdl $7,%ecx,%ecx vpxor %xmm0,%xmm7,%xmm7 vmovdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi vmovdqa 144(%esp),%xmm3 vpaddd %xmm6,%xmm2,%xmm2 shldl $5,%ebx,%ebx addl %esi,%eax vpxor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi vpsrld $30,%xmm7,%xmm1 vmovdqa %xmm2,32(%esp) andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi vpslld $2,%xmm7,%xmm7 xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi vpor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebx shrdl $7,%eax,%eax vmovdqa 64(%esp),%xmm1 movl %edi,%ebp xorl %ebx,%esi shldl $5,%edi,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 addl (%esp),%ebx andl %edi,%esi xorl %eax,%edi shrdl $7,%edx,%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 shldl $5,%ecx,%ecx addl %esi,%ebx vpxor %xmm2,%xmm0,%xmm0 xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi vpslld $2,%xmm0,%xmm0 xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi vpor %xmm2,%xmm0,%xmm0 xorl %edx,%ecx shrdl $7,%ebx,%ebx vmovdqa 80(%esp),%xmm2 movl %eax,%ebp xorl %ecx,%esi shldl $5,%eax,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%ecx andl %eax,%esi xorl %ebx,%eax shrdl $7,%edi,%edi vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shldl $5,%edx,%edx addl %esi,%ecx vpxor %xmm3,%xmm1,%xmm1 xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi vpslld $2,%xmm1,%xmm1 xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi vpor %xmm3,%xmm1,%xmm1 xorl %edi,%edx shrdl $7,%ecx,%ecx vmovdqa 96(%esp),%xmm3 movl %ebx,%ebp xorl %edx,%esi shldl $5,%ebx,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx shrdl $7,%eax,%eax vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi vmovdqa %xmm5,%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shldl $5,%edi,%edi addl %esi,%edx vpxor %xmm4,%xmm2,%xmm2 xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi vpslld $2,%xmm2,%xmm2 xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi vpor %xmm4,%xmm2,%xmm2 xorl %eax,%edi shrdl $7,%edx,%edx vmovdqa 64(%esp),%xmm4 movl %ecx,%ebp xorl %edi,%esi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm3,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl (%esp),%eax vpaddd %xmm3,%xmm7,%xmm7 xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax vmovdqa %xmm7,48(%esp) xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je L006done vmovdqa 160(%esp),%xmm7 vmovdqa 176(%esp),%xmm6 vmovdqu (%ebp),%xmm0 vmovdqu 16(%ebp),%xmm1 vmovdqu 32(%ebp),%xmm2 vmovdqu 48(%ebp),%xmm3 addl $64,%ebp vpshufb %xmm6,%xmm0,%xmm0 movl %ebp,196(%esp) vmovdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi vpshufb %xmm6,%xmm1,%xmm1 movl %ecx,%ebp shldl $5,%ecx,%ecx vpaddd %xmm7,%xmm0,%xmm4 addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vmovdqa %xmm4,(%esp) addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi vpshufb %xmm6,%xmm2,%xmm2 movl %edx,%ebp shldl $5,%edx,%edx vpaddd %xmm7,%xmm1,%xmm5 addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vmovdqa %xmm5,16(%esp) addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi vpshufb %xmm6,%xmm3,%xmm3 movl %edi,%ebp shldl $5,%edi,%edi vpaddd %xmm7,%xmm2,%xmm6 addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vmovdqa %xmm6,32(%esp) addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,%ebx movl %ecx,8(%ebp) xorl %edx,%ebx movl %edx,12(%ebp) movl %edi,16(%ebp) movl %esi,%ebp andl %ebx,%esi movl %ebp,%ebx jmp L005loop .align 4,0x90 L006done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax vzeroall movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 LK_XX_XX: .long 1518500249,1518500249,1518500249,1518500249 .long 1859775393,1859775393,1859775393,1859775393 .long 2400959708,2400959708,2400959708,2400959708 .long 3395469782,3395469782,3395469782,3395469782 .long 66051,67438087,134810123,202182159 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 .byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 .byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 .byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha1_block_data_order_nohw .hidden sha1_block_data_order_nohw .type sha1_block_data_order_nohw,@function .align 16 sha1_block_data_order_nohw: .L_sha1_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebp movl 24(%esp),%esi movl 28(%esp),%eax subl $76,%esp shll $6,%eax addl %esi,%eax movl %eax,104(%esp) movl 16(%ebp),%edi jmp .L000loop .align 16 .L000loop: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,(%esp) movl %ebx,4(%esp) movl %ecx,8(%esp) movl %edx,12(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,16(%esp) movl %ebx,20(%esp) movl %ecx,24(%esp) movl %edx,28(%esp) movl 32(%esi),%eax movl 36(%esi),%ebx movl 40(%esi),%ecx movl 44(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,40(%esp) movl %edx,44(%esp) movl 48(%esi),%eax movl 52(%esi),%ebx movl 56(%esi),%ecx movl 60(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,48(%esp) movl %ebx,52(%esp) movl %ecx,56(%esp) movl %edx,60(%esp) movl %esi,100(%esp) movl (%ebp),%eax movl 4(%ebp),%ebx movl 8(%ebp),%ecx movl 12(%ebp),%edx movl %ecx,%esi movl %eax,%ebp roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl (%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 4(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 8(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 12(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 16(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 20(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 24(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 28(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 32(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 36(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 40(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 44(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 48(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 52(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 56(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 60(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp movl (%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 8(%esp),%ebx xorl %esi,%ebp xorl 32(%esp),%ebx andl %edx,%ebp xorl 52(%esp),%ebx roll $1,%ebx xorl %esi,%ebp addl %ebp,%eax movl %ecx,%ebp rorl $2,%edx movl %ebx,(%esp) roll $5,%ebp leal 1518500249(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 12(%esp),%eax xorl %edi,%ebp xorl 36(%esp),%eax andl %ecx,%ebp xorl 56(%esp),%eax roll $1,%eax xorl %edi,%ebp addl %ebp,%esi movl %ebx,%ebp rorl $2,%ecx movl %eax,4(%esp) roll $5,%ebp leal 1518500249(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 40(%esp),%esi andl %ebx,%ebp xorl 60(%esp),%esi roll $1,%esi xorl %edx,%ebp addl %ebp,%edi movl %eax,%ebp rorl $2,%ebx movl %esi,8(%esp) roll $5,%ebp leal 1518500249(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 44(%esp),%edi andl %eax,%ebp xorl (%esp),%edi roll $1,%edi xorl %ecx,%ebp addl %ebp,%edx movl %esi,%ebp rorl $2,%eax movl %edi,12(%esp) roll $5,%ebp leal 1518500249(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 1859775393(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 1859775393(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 1859775393(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,52(%esp) leal 1859775393(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,56(%esp) leal 1859775393(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,60(%esp) leal 1859775393(%edi,%edx,1),%edi movl (%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 8(%esp),%edx xorl %eax,%ebp xorl 32(%esp),%edx xorl %ebx,%ebp xorl 52(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 4(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 12(%esp),%ecx xorl %esi,%ebp xorl 36(%esp),%ecx xorl %eax,%ebp xorl 56(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,4(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 8(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 16(%esp),%ebx xorl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl 60(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,8(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 12(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 20(%esp),%eax xorl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl (%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,12(%esp) leal 1859775393(%eax,%esi,1),%eax movl 16(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 24(%esp),%esi xorl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 4(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,16(%esp) leal 1859775393(%esi,%edi,1),%esi movl 20(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 28(%esp),%edi xorl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 8(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,20(%esp) leal 1859775393(%edi,%edx,1),%edi movl 24(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 32(%esp),%edx xorl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 12(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,24(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 28(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 36(%esp),%ecx xorl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 16(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,28(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 32(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl (%esp),%ebx andl %edx,%ebp xorl 20(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,32(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 36(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl 4(%esp),%eax andl %ecx,%ebp xorl 24(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,36(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 40(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 8(%esp),%esi andl %ebx,%ebp xorl 28(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,40(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 44(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 12(%esp),%edi andl %eax,%ebp xorl 32(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,44(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 48(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 16(%esp),%edx andl %esi,%ebp xorl 36(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,48(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 52(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 20(%esp),%ecx andl %edi,%ebp xorl 40(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,52(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 56(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl (%esp),%ebx xorl %esi,%ebp xorl 24(%esp),%ebx andl %edx,%ebp xorl 44(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,56(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 60(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 4(%esp),%eax xorl %edi,%ebp xorl 28(%esp),%eax andl %ecx,%ebp xorl 48(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,60(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl (%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 8(%esp),%esi xorl %edx,%ebp xorl 32(%esp),%esi andl %ebx,%ebp xorl 52(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 4(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 12(%esp),%edi xorl %ecx,%ebp xorl 36(%esp),%edi andl %eax,%ebp xorl 56(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,4(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 8(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 16(%esp),%edx xorl %ebx,%ebp xorl 40(%esp),%edx andl %esi,%ebp xorl 60(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,8(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 12(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 20(%esp),%ecx xorl %eax,%ebp xorl 44(%esp),%ecx andl %edi,%ebp xorl (%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,12(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 16(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 24(%esp),%ebx xorl %esi,%ebp xorl 48(%esp),%ebx andl %edx,%ebp xorl 4(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,16(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 20(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 28(%esp),%eax xorl %edi,%ebp xorl 52(%esp),%eax andl %ecx,%ebp xorl 8(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,20(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 24(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 32(%esp),%esi xorl %edx,%ebp xorl 56(%esp),%esi andl %ebx,%ebp xorl 12(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,24(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 28(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 36(%esp),%edi xorl %ecx,%ebp xorl 60(%esp),%edi andl %eax,%ebp xorl 16(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,28(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 32(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 40(%esp),%edx xorl %ebx,%ebp xorl (%esp),%edx andl %esi,%ebp xorl 20(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,32(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 36(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 44(%esp),%ecx xorl %eax,%ebp xorl 4(%esp),%ecx andl %edi,%ebp xorl 24(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,36(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 40(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 48(%esp),%ebx xorl %esi,%ebp xorl 8(%esp),%ebx andl %edx,%ebp xorl 28(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,40(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 44(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 52(%esp),%eax xorl %edi,%ebp xorl 12(%esp),%eax andl %ecx,%ebp xorl 32(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,44(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 48(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 56(%esp),%esi xorl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 36(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,48(%esp) leal 3395469782(%esi,%edi,1),%esi movl 52(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 60(%esp),%edi xorl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 40(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,52(%esp) leal 3395469782(%edi,%edx,1),%edi movl 56(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl (%esp),%edx xorl %eax,%ebp xorl 24(%esp),%edx xorl %ebx,%ebp xorl 44(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,56(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 60(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 4(%esp),%ecx xorl %esi,%ebp xorl 28(%esp),%ecx xorl %eax,%ebp xorl 48(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,60(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl (%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 8(%esp),%ebx xorl %edi,%ebp xorl 32(%esp),%ebx xorl %esi,%ebp xorl 52(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 12(%esp),%eax xorl %edx,%ebp xorl 36(%esp),%eax xorl %edi,%ebp xorl 56(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,4(%esp) leal 3395469782(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 16(%esp),%esi xorl %ecx,%ebp xorl 40(%esp),%esi xorl %edx,%ebp xorl 60(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,8(%esp) leal 3395469782(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 20(%esp),%edi xorl %ebx,%ebp xorl 44(%esp),%edi xorl %ecx,%ebp xorl (%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,12(%esp) leal 3395469782(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 3395469782(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 3395469782(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 3395469782(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp leal 3395469782(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp leal 3395469782(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp leal 3395469782(%edi,%edx,1),%edi addl %ebp,%edi movl 96(%esp),%ebp movl 100(%esp),%edx addl (%ebp),%edi addl 4(%ebp),%esi addl 8(%ebp),%eax addl 12(%ebp),%ebx addl 16(%ebp),%ecx movl %edi,(%ebp) addl $64,%edx movl %esi,4(%ebp) cmpl 104(%esp),%edx movl %eax,8(%ebp) movl %ecx,%edi movl %ebx,12(%ebp) movl %edx,%esi movl %ecx,16(%ebp) jb .L000loop addl $76,%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_nohw,.-.L_sha1_block_data_order_nohw_begin .globl sha1_block_data_order_ssse3 .hidden sha1_block_data_order_ssse3 .type sha1_block_data_order_ssse3,@function .align 16 sha1_block_data_order_ssse3: .L_sha1_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .L001pic_point .L001pic_point: popl %ebp leal .LK_XX_XX-.L001pic_point(%ebp),%ebp movdqa (%ebp),%xmm7 movdqa 16(%ebp),%xmm0 movdqa 32(%ebp),%xmm1 movdqa 48(%ebp),%xmm2 movdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp movdqa %xmm0,112(%esp) movdqa %xmm1,128(%esp) movdqa %xmm2,144(%esp) shll $6,%edx movdqa %xmm7,160(%esp) addl %ebp,%edx movdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi movdqu -64(%ebp),%xmm0 movdqu -48(%ebp),%xmm1 movdqu -32(%ebp),%xmm2 movdqu -16(%ebp),%xmm3 .byte 102,15,56,0,198 .byte 102,15,56,0,206 .byte 102,15,56,0,214 movdqa %xmm7,96(%esp) .byte 102,15,56,0,222 paddd %xmm7,%xmm0 paddd %xmm7,%xmm1 paddd %xmm7,%xmm2 movdqa %xmm0,(%esp) psubd %xmm7,%xmm0 movdqa %xmm1,16(%esp) psubd %xmm7,%xmm1 movdqa %xmm2,32(%esp) movl %ecx,%ebp psubd %xmm7,%xmm2 xorl %edx,%ebp pshufd $238,%xmm0,%xmm4 andl %ebp,%esi jmp .L002loop .align 16 .L002loop: rorl $2,%ebx xorl %edx,%esi movl %eax,%ebp punpcklqdq %xmm1,%xmm4 movdqa %xmm3,%xmm6 addl (%esp),%edi xorl %ecx,%ebx paddd %xmm3,%xmm7 movdqa %xmm0,64(%esp) roll $5,%eax addl %esi,%edi psrldq $4,%xmm6 andl %ebx,%ebp xorl %ecx,%ebx pxor %xmm0,%xmm4 addl %eax,%edi rorl $7,%eax pxor %xmm2,%xmm6 xorl %ecx,%ebp movl %edi,%esi addl 4(%esp),%edx pxor %xmm6,%xmm4 xorl %ebx,%eax roll $5,%edi movdqa %xmm7,48(%esp) addl %ebp,%edx andl %eax,%esi movdqa %xmm4,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi movdqa %xmm4,%xmm6 xorl %ebx,%esi pslldq $12,%xmm0 paddd %xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx psrld $31,%xmm6 xorl %eax,%edi roll $5,%edx movdqa %xmm0,%xmm7 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi psrld $30,%xmm0 addl %edx,%ecx rorl $7,%edx por %xmm6,%xmm4 xorl %eax,%ebp movl %ecx,%esi addl 12(%esp),%ebx pslld $2,%xmm7 xorl %edi,%edx roll $5,%ecx pxor %xmm0,%xmm4 movdqa 96(%esp),%xmm0 addl %ebp,%ebx andl %edx,%esi pxor %xmm7,%xmm4 pshufd $238,%xmm1,%xmm5 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx xorl %edi,%esi movl %ebx,%ebp punpcklqdq %xmm2,%xmm5 movdqa %xmm4,%xmm7 addl 16(%esp),%eax xorl %edx,%ecx paddd %xmm4,%xmm0 movdqa %xmm1,80(%esp) roll $5,%ebx addl %esi,%eax psrldq $4,%xmm7 andl %ecx,%ebp xorl %edx,%ecx pxor %xmm1,%xmm5 addl %ebx,%eax rorl $7,%ebx pxor %xmm3,%xmm7 xorl %edx,%ebp movl %eax,%esi addl 20(%esp),%edi pxor %xmm7,%xmm5 xorl %ecx,%ebx roll $5,%eax movdqa %xmm0,(%esp) addl %ebp,%edi andl %ebx,%esi movdqa %xmm5,%xmm1 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax movdqa %xmm5,%xmm7 xorl %ecx,%esi pslldq $12,%xmm1 paddd %xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx psrld $31,%xmm7 xorl %ebx,%eax roll $5,%edi movdqa %xmm1,%xmm0 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax psrld $30,%xmm1 addl %edi,%edx rorl $7,%edi por %xmm7,%xmm5 xorl %ebx,%ebp movl %edx,%esi addl 28(%esp),%ecx pslld $2,%xmm0 xorl %eax,%edi roll $5,%edx pxor %xmm1,%xmm5 movdqa 112(%esp),%xmm1 addl %ebp,%ecx andl %edi,%esi pxor %xmm0,%xmm5 pshufd $238,%xmm2,%xmm6 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp punpcklqdq %xmm3,%xmm6 movdqa %xmm5,%xmm0 addl 32(%esp),%ebx xorl %edi,%edx paddd %xmm5,%xmm1 movdqa %xmm2,96(%esp) roll $5,%ecx addl %esi,%ebx psrldq $4,%xmm0 andl %edx,%ebp xorl %edi,%edx pxor %xmm2,%xmm6 addl %ecx,%ebx rorl $7,%ecx pxor %xmm4,%xmm0 xorl %edi,%ebp movl %ebx,%esi addl 36(%esp),%eax pxor %xmm0,%xmm6 xorl %edx,%ecx roll $5,%ebx movdqa %xmm1,16(%esp) addl %ebp,%eax andl %ecx,%esi movdqa %xmm6,%xmm2 xorl %edx,%ecx addl %ebx,%eax rorl $7,%ebx movdqa %xmm6,%xmm0 xorl %edx,%esi pslldq $12,%xmm2 paddd %xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi psrld $31,%xmm0 xorl %ecx,%ebx roll $5,%eax movdqa %xmm2,%xmm1 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx psrld $30,%xmm2 addl %eax,%edi rorl $7,%eax por %xmm0,%xmm6 xorl %ecx,%ebp movdqa 64(%esp),%xmm0 movl %edi,%esi addl 44(%esp),%edx pslld $2,%xmm1 xorl %ebx,%eax roll $5,%edi pxor %xmm2,%xmm6 movdqa 112(%esp),%xmm2 addl %ebp,%edx andl %eax,%esi pxor %xmm1,%xmm6 pshufd $238,%xmm3,%xmm7 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%esi movl %edx,%ebp punpcklqdq %xmm4,%xmm7 movdqa %xmm6,%xmm1 addl 48(%esp),%ecx xorl %eax,%edi paddd %xmm6,%xmm2 movdqa %xmm3,64(%esp) roll $5,%edx addl %esi,%ecx psrldq $4,%xmm1 andl %edi,%ebp xorl %eax,%edi pxor %xmm3,%xmm7 addl %edx,%ecx rorl $7,%edx pxor %xmm5,%xmm1 xorl %eax,%ebp movl %ecx,%esi addl 52(%esp),%ebx pxor %xmm1,%xmm7 xorl %edi,%edx roll $5,%ecx movdqa %xmm2,32(%esp) addl %ebp,%ebx andl %edx,%esi movdqa %xmm7,%xmm3 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx movdqa %xmm7,%xmm1 xorl %edi,%esi pslldq $12,%xmm3 paddd %xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax psrld $31,%xmm1 xorl %edx,%ecx roll $5,%ebx movdqa %xmm3,%xmm2 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx psrld $30,%xmm3 addl %ebx,%eax rorl $7,%ebx por %xmm1,%xmm7 xorl %edx,%ebp movdqa 80(%esp),%xmm1 movl %eax,%esi addl 60(%esp),%edi pslld $2,%xmm2 xorl %ecx,%ebx roll $5,%eax pxor %xmm3,%xmm7 movdqa 112(%esp),%xmm3 addl %ebp,%edi andl %ebx,%esi pxor %xmm2,%xmm7 pshufd $238,%xmm6,%xmm2 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx pxor %xmm1,%xmm0 movdqa %xmm4,80(%esp) xorl %ebx,%eax roll $5,%edi movdqa %xmm3,%xmm4 addl %esi,%edx paddd %xmm7,%xmm3 andl %eax,%ebp pxor %xmm2,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi roll $5,%edx pslld $2,%xmm0 addl %ebp,%ecx andl %edi,%esi psrld $30,%xmm2 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx xorl %edi,%edx roll $5,%ecx por %xmm2,%xmm0 addl %esi,%ebx andl %edx,%ebp movdqa 96(%esp),%xmm2 xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi pshufd $238,%xmm7,%xmm3 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 16(%esp),%edi pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm2,%xmm1 movdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm4,%xmm5 rorl $7,%ebx paddd %xmm0,%xmm4 addl %eax,%edi pxor %xmm3,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi psrld $30,%xmm3 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm3,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp movdqa 64(%esp),%xmm3 movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx pshufd $238,%xmm0,%xmm4 addl %ecx,%ebx addl 32(%esp),%eax pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx pxor %xmm3,%xmm2 movdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp movdqa 128(%esp),%xmm6 rorl $7,%ecx paddd %xmm1,%xmm5 addl %ebx,%eax pxor %xmm4,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi pslld $2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi psrld $30,%xmm4 movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx por %xmm4,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp movdqa 80(%esp),%xmm4 movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi pshufd $238,%xmm1,%xmm5 addl %edx,%ecx addl 48(%esp),%ebx pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx pxor %xmm4,%xmm3 movdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp movdqa %xmm6,%xmm7 rorl $7,%edx paddd %xmm2,%xmm6 addl %ecx,%ebx pxor %xmm5,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax pslld $2,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi psrld $30,%xmm5 movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi por %xmm5,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp movdqa 96(%esp),%xmm5 movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax pshufd $238,%xmm2,%xmm6 addl %edi,%edx addl (%esp),%ecx pxor %xmm0,%xmm4 punpcklqdq %xmm3,%xmm6 xorl %eax,%esi movl %edx,%ebp roll $5,%edx pxor %xmm5,%xmm4 movdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp movdqa %xmm7,%xmm0 rorl $7,%edi paddd %xmm3,%xmm7 addl %edx,%ecx pxor %xmm6,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx movdqa %xmm4,%xmm6 movdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx pslld $2,%xmm4 addl 8(%esp),%eax xorl %edx,%esi psrld $30,%xmm6 movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax por %xmm6,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp movdqa 64(%esp),%xmm6 movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx pshufd $238,%xmm3,%xmm7 addl %eax,%edi addl 16(%esp),%edx pxor %xmm1,%xmm5 punpcklqdq %xmm4,%xmm7 xorl %ebx,%esi movl %edi,%ebp roll $5,%edi pxor %xmm6,%xmm5 movdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp movdqa %xmm0,%xmm1 rorl $7,%eax paddd %xmm4,%xmm0 addl %edi,%edx pxor %xmm7,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx movdqa %xmm5,%xmm7 movdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx pslld $2,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi psrld $30,%xmm7 movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx por %xmm7,%xmm5 addl 28(%esp),%eax movdqa 80(%esp),%xmm7 rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pshufd $238,%xmm4,%xmm0 addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 32(%esp),%edi pxor %xmm2,%xmm6 punpcklqdq %xmm5,%xmm0 andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx pxor %xmm7,%xmm6 movdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi roll $5,%eax movdqa %xmm1,%xmm2 addl %esi,%edi paddd %xmm5,%xmm1 xorl %ebx,%ebp pxor %xmm0,%xmm6 xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx andl %ebx,%ebp movdqa %xmm6,%xmm0 movdqa %xmm1,16(%esp) xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi pslld $2,%xmm6 addl %ebp,%edx xorl %eax,%esi psrld $30,%xmm0 xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi xorl %ebx,%eax rorl $7,%edi por %xmm0,%xmm6 movl %edx,%ebp xorl %eax,%esi movdqa 96(%esp),%xmm0 roll $5,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx pshufd $238,%xmm5,%xmm1 addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 48(%esp),%eax pxor %xmm3,%xmm7 punpcklqdq %xmm6,%xmm1 andl %edx,%esi xorl %edi,%edx rorl $7,%ecx pxor %xmm0,%xmm7 movdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi roll $5,%ebx movdqa 144(%esp),%xmm3 addl %esi,%eax paddd %xmm6,%xmm2 xorl %ecx,%ebp pxor %xmm1,%xmm7 xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi andl %ecx,%ebp movdqa %xmm7,%xmm1 movdqa %xmm2,32(%esp) xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax pslld $2,%xmm7 addl %ebp,%edi xorl %ebx,%esi psrld $30,%xmm1 xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax por %xmm1,%xmm7 movl %edi,%ebp xorl %ebx,%esi movdqa 64(%esp),%xmm1 roll $5,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx pshufd $238,%xmm6,%xmm2 addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl (%esp),%ebx pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 andl %edi,%esi xorl %eax,%edi rorl $7,%edx pxor %xmm1,%xmm0 movdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi roll $5,%ecx movdqa %xmm3,%xmm4 addl %esi,%ebx paddd %xmm7,%xmm3 xorl %edx,%ebp pxor %xmm2,%xmm0 xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax andl %edx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pslld $2,%xmm0 addl %ebp,%eax xorl %ecx,%esi psrld $30,%xmm2 xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx por %xmm2,%xmm0 movl %eax,%ebp xorl %ecx,%esi movdqa 80(%esp),%xmm2 roll $5,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi pshufd $238,%xmm7,%xmm3 addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 16(%esp),%ecx pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 andl %eax,%esi xorl %ebx,%eax rorl $7,%edi pxor %xmm2,%xmm1 movdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi roll $5,%edx movdqa %xmm4,%xmm5 addl %esi,%ecx paddd %xmm0,%xmm4 xorl %edi,%ebp pxor %xmm3,%xmm1 xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx andl %edi,%ebp movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx pslld $2,%xmm1 addl %ebp,%ebx xorl %edx,%esi psrld $30,%xmm3 xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi xorl %edi,%edx rorl $7,%ecx por %xmm3,%xmm1 movl %ebx,%ebp xorl %edx,%esi movdqa 96(%esp),%xmm3 roll $5,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax pshufd $238,%xmm0,%xmm4 addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 32(%esp),%edx pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax pxor %xmm3,%xmm2 movdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi roll $5,%edi movdqa %xmm5,%xmm6 addl %esi,%edx paddd %xmm1,%xmm5 xorl %eax,%ebp pxor %xmm4,%xmm2 xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx andl %eax,%ebp movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx pslld $2,%xmm2 addl %ebp,%ecx xorl %edi,%esi psrld $30,%xmm4 xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi xorl %eax,%edi rorl $7,%edx por %xmm4,%xmm2 movl %ecx,%ebp xorl %edi,%esi movdqa 64(%esp),%xmm4 roll $5,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx pshufd $238,%xmm1,%xmm5 addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax addl 48(%esp),%edi pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm4,%xmm3 movdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm6,%xmm7 rorl $7,%ebx paddd %xmm2,%xmm6 addl %eax,%edi pxor %xmm5,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi psrld $30,%xmm5 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm5,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl (%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx paddd %xmm3,%xmm7 addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi movdqa %xmm7,48(%esp) roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je .L003done movdqa 160(%esp),%xmm7 movdqa 176(%esp),%xmm6 movdqu (%ebp),%xmm0 movdqu 16(%ebp),%xmm1 movdqu 32(%ebp),%xmm2 movdqu 48(%ebp),%xmm3 addl $64,%ebp .byte 102,15,56,0,198 movl %ebp,196(%esp) movdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx .byte 102,15,56,0,206 addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi paddd %xmm7,%xmm0 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx movdqa %xmm0,(%esp) addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp psubd %xmm7,%xmm0 roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi .byte 102,15,56,0,214 addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi paddd %xmm7,%xmm1 roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx movdqa %xmm1,16(%esp) addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp psubd %xmm7,%xmm1 roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax .byte 102,15,56,0,222 addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi paddd %xmm7,%xmm2 roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi movdqa %xmm2,32(%esp) addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp psubd %xmm7,%xmm2 roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %ecx,%ebx movl %edx,12(%ebp) xorl %edx,%ebx movl %edi,16(%ebp) movl %esi,%ebp pshufd $238,%xmm0,%xmm4 andl %ebx,%esi movl %ebp,%ebx jmp .L002loop .align 16 .L003done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_ssse3,.-.L_sha1_block_data_order_ssse3_begin .globl sha1_block_data_order_avx .hidden sha1_block_data_order_avx .type sha1_block_data_order_avx,@function .align 16 sha1_block_data_order_avx: .L_sha1_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .L004pic_point .L004pic_point: popl %ebp leal .LK_XX_XX-.L004pic_point(%ebp),%ebp vzeroall vmovdqa (%ebp),%xmm7 vmovdqa 16(%ebp),%xmm0 vmovdqa 32(%ebp),%xmm1 vmovdqa 48(%ebp),%xmm2 vmovdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp vmovdqa %xmm0,112(%esp) vmovdqa %xmm1,128(%esp) vmovdqa %xmm2,144(%esp) shll $6,%edx vmovdqa %xmm7,160(%esp) addl %ebp,%edx vmovdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi vmovdqu -64(%ebp),%xmm0 vmovdqu -48(%ebp),%xmm1 vmovdqu -32(%ebp),%xmm2 vmovdqu -16(%ebp),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 vpshufb %xmm6,%xmm1,%xmm1 vpshufb %xmm6,%xmm2,%xmm2 vmovdqa %xmm7,96(%esp) vpshufb %xmm6,%xmm3,%xmm3 vpaddd %xmm7,%xmm0,%xmm4 vpaddd %xmm7,%xmm1,%xmm5 vpaddd %xmm7,%xmm2,%xmm6 vmovdqa %xmm4,(%esp) movl %ecx,%ebp vmovdqa %xmm5,16(%esp) xorl %edx,%ebp vmovdqa %xmm6,32(%esp) andl %ebp,%esi jmp .L005loop .align 16 .L005loop: shrdl $2,%ebx,%ebx xorl %edx,%esi vpalignr $8,%xmm0,%xmm1,%xmm4 movl %eax,%ebp addl (%esp),%edi vpaddd %xmm3,%xmm7,%xmm7 vmovdqa %xmm0,64(%esp) xorl %ecx,%ebx shldl $5,%eax,%eax vpsrldq $4,%xmm3,%xmm6 addl %esi,%edi andl %ebx,%ebp vpxor %xmm0,%xmm4,%xmm4 xorl %ecx,%ebx addl %eax,%edi vpxor %xmm2,%xmm6,%xmm6 shrdl $7,%eax,%eax xorl %ecx,%ebp vmovdqa %xmm7,48(%esp) movl %edi,%esi addl 4(%esp),%edx vpxor %xmm6,%xmm4,%xmm4 xorl %ebx,%eax shldl $5,%edi,%edi addl %ebp,%edx andl %eax,%esi vpsrld $31,%xmm4,%xmm6 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpslldq $12,%xmm4,%xmm0 vpaddd %xmm4,%xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpsrld $30,%xmm0,%xmm7 vpor %xmm6,%xmm4,%xmm4 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi addl %edx,%ecx vpslld $2,%xmm0,%xmm0 shrdl $7,%edx,%edx xorl %eax,%ebp vpxor %xmm7,%xmm4,%xmm4 movl %ecx,%esi addl 12(%esp),%ebx xorl %edi,%edx shldl $5,%ecx,%ecx vpxor %xmm0,%xmm4,%xmm4 addl %ebp,%ebx andl %edx,%esi vmovdqa 96(%esp),%xmm0 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpalignr $8,%xmm1,%xmm2,%xmm5 movl %ebx,%ebp addl 16(%esp),%eax vpaddd %xmm4,%xmm0,%xmm0 vmovdqa %xmm1,80(%esp) xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrldq $4,%xmm4,%xmm7 addl %esi,%eax andl %ecx,%ebp vpxor %xmm1,%xmm5,%xmm5 xorl %edx,%ecx addl %ebx,%eax vpxor %xmm3,%xmm7,%xmm7 shrdl $7,%ebx,%ebx xorl %edx,%ebp vmovdqa %xmm0,(%esp) movl %eax,%esi addl 20(%esp),%edi vpxor %xmm7,%xmm5,%xmm5 xorl %ecx,%ebx shldl $5,%eax,%eax addl %ebp,%edi andl %ebx,%esi vpsrld $31,%xmm5,%xmm7 xorl %ecx,%ebx addl %eax,%edi shrdl $7,%eax,%eax xorl %ecx,%esi vpslldq $12,%xmm5,%xmm1 vpaddd %xmm5,%xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm0 vpor %xmm7,%xmm5,%xmm5 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 shrdl $7,%edi,%edi xorl %ebx,%ebp vpxor %xmm0,%xmm5,%xmm5 movl %edx,%esi addl 28(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpxor %xmm1,%xmm5,%xmm5 addl %ebp,%ecx andl %edi,%esi vmovdqa 112(%esp),%xmm1 xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi vpalignr $8,%xmm2,%xmm3,%xmm6 movl %ecx,%ebp addl 32(%esp),%ebx vpaddd %xmm5,%xmm1,%xmm1 vmovdqa %xmm2,96(%esp) xorl %edi,%edx shldl $5,%ecx,%ecx vpsrldq $4,%xmm5,%xmm0 addl %esi,%ebx andl %edx,%ebp vpxor %xmm2,%xmm6,%xmm6 xorl %edi,%edx addl %ecx,%ebx vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%ecx,%ecx xorl %edi,%ebp vmovdqa %xmm1,16(%esp) movl %ebx,%esi addl 36(%esp),%eax vpxor %xmm0,%xmm6,%xmm6 xorl %edx,%ecx shldl $5,%ebx,%ebx addl %ebp,%eax andl %ecx,%esi vpsrld $31,%xmm6,%xmm0 xorl %edx,%ecx addl %ebx,%eax shrdl $7,%ebx,%ebx xorl %edx,%esi vpslldq $12,%xmm6,%xmm2 vpaddd %xmm6,%xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm1 vpor %xmm0,%xmm6,%xmm6 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 vmovdqa 64(%esp),%xmm0 shrdl $7,%eax,%eax xorl %ecx,%ebp vpxor %xmm1,%xmm6,%xmm6 movl %edi,%esi addl 44(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpxor %xmm2,%xmm6,%xmm6 addl %ebp,%edx andl %eax,%esi vmovdqa 112(%esp),%xmm2 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpalignr $8,%xmm3,%xmm4,%xmm7 movl %edx,%ebp addl 48(%esp),%ecx vpaddd %xmm6,%xmm2,%xmm2 vmovdqa %xmm3,64(%esp) xorl %eax,%edi shldl $5,%edx,%edx vpsrldq $4,%xmm6,%xmm1 addl %esi,%ecx andl %edi,%ebp vpxor %xmm3,%xmm7,%xmm7 xorl %eax,%edi addl %edx,%ecx vpxor %xmm5,%xmm1,%xmm1 shrdl $7,%edx,%edx xorl %eax,%ebp vmovdqa %xmm2,32(%esp) movl %ecx,%esi addl 52(%esp),%ebx vpxor %xmm1,%xmm7,%xmm7 xorl %edi,%edx shldl $5,%ecx,%ecx addl %ebp,%ebx andl %edx,%esi vpsrld $31,%xmm7,%xmm1 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpslldq $12,%xmm7,%xmm3 vpaddd %xmm7,%xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm2 vpor %xmm1,%xmm7,%xmm7 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 vmovdqa 80(%esp),%xmm1 shrdl $7,%ebx,%ebx xorl %edx,%ebp vpxor %xmm2,%xmm7,%xmm7 movl %eax,%esi addl 60(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpxor %xmm3,%xmm7,%xmm7 addl %ebp,%edi andl %ebx,%esi vmovdqa 112(%esp),%xmm3 xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,80(%esp) xorl %ebx,%eax shldl $5,%edi,%edi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 addl %esi,%edx andl %eax,%ebp vpxor %xmm2,%xmm0,%xmm0 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%ebp vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpslld $2,%xmm0,%xmm0 addl %ebp,%ecx andl %edi,%esi xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx vpor %xmm2,%xmm0,%xmm0 xorl %edi,%edx shldl $5,%ecx,%ecx vmovdqa 96(%esp),%xmm2 addl %esi,%ebx andl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm3,%xmm1,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm3,%xmm1,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp vmovdqa 64(%esp),%xmm3 movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp vmovdqa 128(%esp),%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shrdl $7,%ecx,%ecx addl %ebx,%eax vpxor %xmm4,%xmm2,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vpor %xmm4,%xmm2,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp vmovdqa 80(%esp),%xmm4 movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%edx,%edx addl %ecx,%ebx vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp vmovdqa 96(%esp),%xmm5 movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpalignr $8,%xmm2,%xmm3,%xmm6 vpxor %xmm0,%xmm4,%xmm4 addl (%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx vpxor %xmm5,%xmm4,%xmm4 vmovdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp vmovdqa %xmm7,%xmm0 vpaddd %xmm3,%xmm7,%xmm7 shrdl $7,%edi,%edi addl %edx,%ecx vpxor %xmm6,%xmm4,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx vpsrld $30,%xmm4,%xmm6 vmovdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpslld $2,%xmm4,%xmm4 addl 8(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax vpor %xmm6,%xmm4,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp vmovdqa 64(%esp),%xmm6 movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpalignr $8,%xmm3,%xmm4,%xmm7 vpxor %xmm1,%xmm5,%xmm5 addl 16(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi vpxor %xmm6,%xmm5,%xmm5 vmovdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp vmovdqa %xmm0,%xmm1 vpaddd %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax addl %edi,%edx vpxor %xmm7,%xmm5,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx vpsrld $30,%xmm5,%xmm7 vmovdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpslld $2,%xmm5,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vpor %xmm7,%xmm5,%xmm5 addl 28(%esp),%eax vmovdqa 80(%esp),%xmm7 shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax vpalignr $8,%xmm4,%xmm5,%xmm0 vpxor %xmm2,%xmm6,%xmm6 addl 32(%esp),%edi andl %ecx,%esi xorl %edx,%ecx shrdl $7,%ebx,%ebx vpxor %xmm7,%xmm6,%xmm6 vmovdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi vmovdqa %xmm1,%xmm2 vpaddd %xmm5,%xmm1,%xmm1 shldl $5,%eax,%eax addl %esi,%edi vpxor %xmm0,%xmm6,%xmm6 xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx vpsrld $30,%xmm6,%xmm0 vmovdqa %xmm1,16(%esp) andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi vpslld $2,%xmm6,%xmm6 xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi vpor %xmm0,%xmm6,%xmm6 xorl %ebx,%eax shrdl $7,%edi,%edi vmovdqa 96(%esp),%xmm0 movl %edx,%ebp xorl %eax,%esi shldl $5,%edx,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx vpalignr $8,%xmm5,%xmm6,%xmm1 vpxor %xmm3,%xmm7,%xmm7 addl 48(%esp),%eax andl %edx,%esi xorl %edi,%edx shrdl $7,%ecx,%ecx vpxor %xmm0,%xmm7,%xmm7 vmovdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi vmovdqa 144(%esp),%xmm3 vpaddd %xmm6,%xmm2,%xmm2 shldl $5,%ebx,%ebx addl %esi,%eax vpxor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi vpsrld $30,%xmm7,%xmm1 vmovdqa %xmm2,32(%esp) andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi vpslld $2,%xmm7,%xmm7 xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi vpor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebx shrdl $7,%eax,%eax vmovdqa 64(%esp),%xmm1 movl %edi,%ebp xorl %ebx,%esi shldl $5,%edi,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 addl (%esp),%ebx andl %edi,%esi xorl %eax,%edi shrdl $7,%edx,%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 shldl $5,%ecx,%ecx addl %esi,%ebx vpxor %xmm2,%xmm0,%xmm0 xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi vpslld $2,%xmm0,%xmm0 xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi vpor %xmm2,%xmm0,%xmm0 xorl %edx,%ecx shrdl $7,%ebx,%ebx vmovdqa 80(%esp),%xmm2 movl %eax,%ebp xorl %ecx,%esi shldl $5,%eax,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%ecx andl %eax,%esi xorl %ebx,%eax shrdl $7,%edi,%edi vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shldl $5,%edx,%edx addl %esi,%ecx vpxor %xmm3,%xmm1,%xmm1 xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi vpslld $2,%xmm1,%xmm1 xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi vpor %xmm3,%xmm1,%xmm1 xorl %edi,%edx shrdl $7,%ecx,%ecx vmovdqa 96(%esp),%xmm3 movl %ebx,%ebp xorl %edx,%esi shldl $5,%ebx,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx shrdl $7,%eax,%eax vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi vmovdqa %xmm5,%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shldl $5,%edi,%edi addl %esi,%edx vpxor %xmm4,%xmm2,%xmm2 xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi vpslld $2,%xmm2,%xmm2 xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi vpor %xmm4,%xmm2,%xmm2 xorl %eax,%edi shrdl $7,%edx,%edx vmovdqa 64(%esp),%xmm4 movl %ecx,%ebp xorl %edi,%esi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm3,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl (%esp),%eax vpaddd %xmm3,%xmm7,%xmm7 xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax vmovdqa %xmm7,48(%esp) xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je .L006done vmovdqa 160(%esp),%xmm7 vmovdqa 176(%esp),%xmm6 vmovdqu (%ebp),%xmm0 vmovdqu 16(%ebp),%xmm1 vmovdqu 32(%ebp),%xmm2 vmovdqu 48(%ebp),%xmm3 addl $64,%ebp vpshufb %xmm6,%xmm0,%xmm0 movl %ebp,196(%esp) vmovdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi vpshufb %xmm6,%xmm1,%xmm1 movl %ecx,%ebp shldl $5,%ecx,%ecx vpaddd %xmm7,%xmm0,%xmm4 addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vmovdqa %xmm4,(%esp) addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi vpshufb %xmm6,%xmm2,%xmm2 movl %edx,%ebp shldl $5,%edx,%edx vpaddd %xmm7,%xmm1,%xmm5 addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vmovdqa %xmm5,16(%esp) addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi vpshufb %xmm6,%xmm3,%xmm3 movl %edi,%ebp shldl $5,%edi,%edi vpaddd %xmm7,%xmm2,%xmm6 addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vmovdqa %xmm6,32(%esp) addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,%ebx movl %ecx,8(%ebp) xorl %edx,%ebx movl %edx,12(%ebp) movl %edi,16(%ebp) movl %esi,%ebp andl %ebx,%esi movl %ebp,%ebx jmp .L005loop .align 16 .L006done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax vzeroall movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_avx,.-.L_sha1_block_data_order_avx_begin .align 64 .LK_XX_XX: .long 1518500249,1518500249,1518500249,1518500249 .long 1859775393,1859775393,1859775393,1859775393 .long 2400959708,2400959708,2400959708,2400959708 .long 3395469782,3395469782,3395469782,3395469782 .long 66051,67438087,134810123,202182159 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 .byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 .byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 .byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-armv4-large-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .globl sha1_block_data_order_nohw .hidden sha1_block_data_order_nohw .type sha1_block_data_order_nohw,%function .align 5 sha1_block_data_order_nohw: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 ldmia r0,{r3,r4,r5,r6,r7} .Lloop: ldr r8,.LK_00_19 mov r14,sp sub sp,sp,#15*4 mov r5,r5,ror#30 mov r6,r6,ror#30 mov r7,r7,ror#30 @ [6] .L_00_15: #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r5,r6 @ F_xx_xx orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx add r7,r7,r3,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r6,r8,r6,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r4,r5 @ F_xx_xx orr r9,r9,r11,lsl#16 add r6,r6,r7,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r6,r8,r6,ror#2 @ E+=K_00_19 eor r10,r4,r5 @ F_xx_xx add r6,r6,r7,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r3,r10,ror#2 add r6,r6,r9 @ E+=X[i] eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r6,r6,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r5,r8,r5,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r3,r4 @ F_xx_xx orr r9,r9,r11,lsl#16 add r5,r5,r6,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r5,r8,r5,ror#2 @ E+=K_00_19 eor r10,r3,r4 @ F_xx_xx add r5,r5,r6,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r7,r10,ror#2 add r5,r5,r9 @ E+=X[i] eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r5,r5,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r4,r8,r4,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r7,r3 @ F_xx_xx orr r9,r9,r11,lsl#16 add r4,r4,r5,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r4,r8,r4,ror#2 @ E+=K_00_19 eor r10,r7,r3 @ F_xx_xx add r4,r4,r5,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r6,r10,ror#2 add r4,r4,r9 @ E+=X[i] eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r4,r4,r10 @ E+=F_00_19(B,C,D) #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r3,r8,r3,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r6,r7 @ F_xx_xx orr r9,r9,r11,lsl#16 add r3,r3,r4,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r3,r8,r3,ror#2 @ E+=K_00_19 eor r10,r6,r7 @ F_xx_xx add r3,r3,r4,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r5,r10,ror#2 add r3,r3,r9 @ E+=X[i] eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_00_19(B,C,D) #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp #endif bne .L_00_15 @ [((11+4)*5+2)*3] sub sp,sp,#25*4 #if __ARM_ARCH<7 ldrb r10,[r1,#2] ldrb r9,[r1,#3] ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 ldrb r12,[r1],#4 orr r9,r9,r10,lsl#8 eor r10,r5,r6 @ F_xx_xx orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) orr r9,r9,r12,lsl#24 #else ldr r9,[r1],#4 @ handles unaligned add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx add r7,r7,r3,ror#27 @ E+=ROR(A,27) #ifdef __ARMEL__ rev r9,r9 @ byte swap #endif #endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) add r6,r6,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) add r5,r5,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) add r4,r4,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) add r3,r3,r10 @ E+=F_00_19(B,C,D) ldr r8,.LK_20_39 @ [+15+16*4] cmn sp,#0 @ [+3], clear carry to denote 20_39 .L_20_39_or_60_79: ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r4,r10,ror#2 @ F_xx_xx @ F_xx_xx add r7,r7,r9 @ E+=X[i] add r7,r7,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] add r6,r6,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] add r5,r5,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] add r4,r4,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! eor r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_20_39(B,C,D) #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp @ preserve carry #endif bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes ldr r8,.LK_40_59 sub sp,sp,#20*4 @ [+2] .L_40_59: ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r4,r10,ror#2 @ F_xx_xx and r11,r5,r6 @ F_xx_xx add r7,r7,r9 @ E+=X[i] add r7,r7,r10 @ E+=F_40_59(B,C,D) add r7,r7,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx and r11,r4,r5 @ F_xx_xx add r6,r6,r9 @ E+=X[i] add r6,r6,r10 @ E+=F_40_59(B,C,D) add r6,r6,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx and r11,r3,r4 @ F_xx_xx add r5,r5,r9 @ E+=X[i] add r5,r5,r10 @ E+=F_40_59(B,C,D) add r5,r5,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx and r11,r7,r3 @ F_xx_xx add r4,r4,r9 @ E+=X[i] add r4,r4,r10 @ E+=F_40_59(B,C,D) add r4,r4,r11,ror#2 ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] ldr r11,[r14,#7*4] add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx and r11,r6,r7 @ F_xx_xx add r3,r3,r9 @ E+=X[i] add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r11,ror#2 #if defined(__thumb2__) mov r12,sp teq r14,r12 #else teq r14,sp #endif bne .L_40_59 @ [+((12+5)*5+2)*4] ldr r8,.LK_60_79 sub sp,sp,#20*4 cmp sp,#0 @ set carry to denote 60_79 b .L_20_39_or_60_79 @ [+4], spare 300 bytes .L_done: add sp,sp,#80*4 @ "deallocate" stack frame ldmia r0,{r8,r9,r10,r11,r12} add r3,r8,r3 add r4,r9,r4 add r5,r10,r5,ror#2 add r6,r11,r6,ror#2 add r7,r12,r7,ror#2 stmia r0,{r3,r4,r5,r6,r7} teq r1,r2 bne .Lloop @ [+18], total 1307 #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha1_block_data_order_nohw,.-sha1_block_data_order_nohw .align 5 .LK_00_19:.word 0x5a827999 .LK_20_39:.word 0x6ed9eba1 .LK_40_59:.word 0x8f1bbcdc .LK_60_79:.word 0xca62c1d6 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 5 #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl sha1_block_data_order_neon .hidden sha1_block_data_order_neon .type sha1_block_data_order_neon,%function .align 4 sha1_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 @ dmb @ errata #451034 on early Cortex A8 @ vstmdb sp!,{d8-d15} @ ABI specification says so mov r14,sp sub r12,sp,#64 adr r8,.LK_00_19 bic r12,r12,#15 @ align for 128-bit stores ldmia r0,{r3,r4,r5,r6,r7} @ load context mov sp,r12 @ alloca vld1.8 {q0,q1},[r1]! @ handles unaligned veor q15,q15,q15 vld1.8 {q2,q3},[r1]! vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19 vrev32.8 q0,q0 @ yes, even on vrev32.8 q1,q1 @ big-endian... vrev32.8 q2,q2 vadd.i32 q8,q0,q14 vrev32.8 q3,q3 vadd.i32 q9,q1,q14 vst1.32 {q8},[r12,:128]! vadd.i32 q10,q2,q14 vst1.32 {q9},[r12,:128]! vst1.32 {q10},[r12,:128]! ldr r9,[sp] @ big RAW stall .Loop_neon: vext.8 q8,q0,q1,#8 bic r10,r6,r4 add r7,r7,r9 and r11,r5,r4 vadd.i32 q13,q3,q14 ldr r9,[sp,#4] add r7,r7,r3,ror#27 vext.8 q12,q3,q15,#4 eor r11,r11,r10 mov r4,r4,ror#2 add r7,r7,r11 veor q8,q8,q0 bic r10,r5,r3 add r6,r6,r9 veor q12,q12,q2 and r11,r4,r3 ldr r9,[sp,#8] veor q12,q12,q8 add r6,r6,r7,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q13,q15,q12,#4 bic r10,r4,r7 add r5,r5,r9 vadd.i32 q8,q12,q12 and r11,r3,r7 ldr r9,[sp,#12] vsri.32 q8,q12,#31 add r5,r5,r6,ror#27 eor r11,r11,r10 mov r7,r7,ror#2 vshr.u32 q12,q13,#30 add r5,r5,r11 bic r10,r3,r6 vshl.u32 q13,q13,#2 add r4,r4,r9 and r11,r7,r6 veor q8,q8,q12 ldr r9,[sp,#16] add r4,r4,r5,ror#27 veor q8,q8,q13 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q9,q1,q2,#8 bic r10,r7,r5 add r3,r3,r9 and r11,r6,r5 vadd.i32 q13,q8,q14 ldr r9,[sp,#20] vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r4,ror#27 vext.8 q12,q8,q15,#4 eor r11,r11,r10 mov r5,r5,ror#2 add r3,r3,r11 veor q9,q9,q1 bic r10,r6,r4 add r7,r7,r9 veor q12,q12,q3 and r11,r5,r4 ldr r9,[sp,#24] veor q12,q12,q9 add r7,r7,r3,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r4,r4,ror#2 add r7,r7,r11 vext.8 q13,q15,q12,#4 bic r10,r5,r3 add r6,r6,r9 vadd.i32 q9,q12,q12 and r11,r4,r3 ldr r9,[sp,#28] vsri.32 q9,q12,#31 add r6,r6,r7,ror#27 eor r11,r11,r10 mov r3,r3,ror#2 vshr.u32 q12,q13,#30 add r6,r6,r11 bic r10,r4,r7 vshl.u32 q13,q13,#2 add r5,r5,r9 and r11,r3,r7 veor q9,q9,q12 ldr r9,[sp,#32] add r5,r5,r6,ror#27 veor q9,q9,q13 eor r11,r11,r10 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q10,q2,q3,#8 bic r10,r3,r6 add r4,r4,r9 and r11,r7,r6 vadd.i32 q13,q9,q14 ldr r9,[sp,#36] add r4,r4,r5,ror#27 vext.8 q12,q9,q15,#4 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 veor q10,q10,q2 bic r10,r7,r5 add r3,r3,r9 veor q12,q12,q8 and r11,r6,r5 ldr r9,[sp,#40] veor q12,q12,q10 add r3,r3,r4,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r5,r5,ror#2 add r3,r3,r11 vext.8 q13,q15,q12,#4 bic r10,r6,r4 add r7,r7,r9 vadd.i32 q10,q12,q12 and r11,r5,r4 ldr r9,[sp,#44] vsri.32 q10,q12,#31 add r7,r7,r3,ror#27 eor r11,r11,r10 mov r4,r4,ror#2 vshr.u32 q12,q13,#30 add r7,r7,r11 bic r10,r5,r3 vshl.u32 q13,q13,#2 add r6,r6,r9 and r11,r4,r3 veor q10,q10,q12 ldr r9,[sp,#48] add r6,r6,r7,ror#27 veor q10,q10,q13 eor r11,r11,r10 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q11,q3,q8,#8 bic r10,r4,r7 add r5,r5,r9 and r11,r3,r7 vadd.i32 q13,q10,q14 ldr r9,[sp,#52] add r5,r5,r6,ror#27 vext.8 q12,q10,q15,#4 eor r11,r11,r10 mov r7,r7,ror#2 add r5,r5,r11 veor q11,q11,q3 bic r10,r3,r6 add r4,r4,r9 veor q12,q12,q9 and r11,r7,r6 ldr r9,[sp,#56] veor q12,q12,q11 add r4,r4,r5,ror#27 eor r11,r11,r10 vst1.32 {q13},[r12,:128]! mov r6,r6,ror#2 add r4,r4,r11 vext.8 q13,q15,q12,#4 bic r10,r7,r5 add r3,r3,r9 vadd.i32 q11,q12,q12 and r11,r6,r5 ldr r9,[sp,#60] vsri.32 q11,q12,#31 add r3,r3,r4,ror#27 eor r11,r11,r10 mov r5,r5,ror#2 vshr.u32 q12,q13,#30 add r3,r3,r11 bic r10,r6,r4 vshl.u32 q13,q13,#2 add r7,r7,r9 and r11,r5,r4 veor q11,q11,q12 ldr r9,[sp,#0] add r7,r7,r3,ror#27 veor q11,q11,q13 eor r11,r11,r10 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q10,q11,#8 bic r10,r5,r3 add r6,r6,r9 and r11,r4,r3 veor q0,q0,q8 ldr r9,[sp,#4] add r6,r6,r7,ror#27 veor q0,q0,q1 eor r11,r11,r10 mov r3,r3,ror#2 vadd.i32 q13,q11,q14 add r6,r6,r11 bic r10,r4,r7 veor q12,q12,q0 add r5,r5,r9 and r11,r3,r7 vshr.u32 q0,q12,#30 ldr r9,[sp,#8] add r5,r5,r6,ror#27 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 eor r11,r11,r10 mov r7,r7,ror#2 vsli.32 q0,q12,#2 add r5,r5,r11 bic r10,r3,r6 add r4,r4,r9 and r11,r7,r6 ldr r9,[sp,#12] add r4,r4,r5,ror#27 eor r11,r11,r10 mov r6,r6,ror#2 add r4,r4,r11 bic r10,r7,r5 add r3,r3,r9 and r11,r6,r5 ldr r9,[sp,#16] add r3,r3,r4,ror#27 eor r11,r11,r10 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q11,q0,#8 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#20] veor q1,q1,q9 eor r11,r10,r5 add r7,r7,r3,ror#27 veor q1,q1,q2 mov r4,r4,ror#2 add r7,r7,r11 vadd.i32 q13,q0,q14 eor r10,r3,r5 add r6,r6,r9 veor q12,q12,q1 ldr r9,[sp,#24] eor r11,r10,r4 vshr.u32 q1,q12,#30 add r6,r6,r7,ror#27 mov r3,r3,ror#2 vst1.32 {q13},[r12,:128]! add r6,r6,r11 eor r10,r7,r4 vsli.32 q1,q12,#2 add r5,r5,r9 ldr r9,[sp,#28] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#32] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q12,q0,q1,#8 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#36] veor q2,q2,q10 eor r11,r10,r6 add r3,r3,r4,ror#27 veor q2,q2,q3 mov r5,r5,ror#2 add r3,r3,r11 vadd.i32 q13,q1,q14 eor r10,r4,r6 vld1.32 {d28[],d29[]},[r8,:32]! add r7,r7,r9 veor q12,q12,q2 ldr r9,[sp,#40] eor r11,r10,r5 vshr.u32 q2,q12,#30 add r7,r7,r3,ror#27 mov r4,r4,ror#2 vst1.32 {q13},[r12,:128]! add r7,r7,r11 eor r10,r3,r5 vsli.32 q2,q12,#2 add r6,r6,r9 ldr r9,[sp,#44] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#48] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q12,q1,q2,#8 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#52] veor q3,q3,q11 eor r11,r10,r7 add r4,r4,r5,ror#27 veor q3,q3,q8 mov r6,r6,ror#2 add r4,r4,r11 vadd.i32 q13,q2,q14 eor r10,r5,r7 add r3,r3,r9 veor q12,q12,q3 ldr r9,[sp,#56] eor r11,r10,r6 vshr.u32 q3,q12,#30 add r3,r3,r4,ror#27 mov r5,r5,ror#2 vst1.32 {q13},[r12,:128]! add r3,r3,r11 eor r10,r4,r6 vsli.32 q3,q12,#2 add r7,r7,r9 ldr r9,[sp,#60] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#0] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q12,q2,q3,#8 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#4] veor q8,q8,q0 eor r11,r10,r3 add r5,r5,r6,ror#27 veor q8,q8,q9 mov r7,r7,ror#2 add r5,r5,r11 vadd.i32 q13,q3,q14 eor r10,r6,r3 add r4,r4,r9 veor q12,q12,q8 ldr r9,[sp,#8] eor r11,r10,r7 vshr.u32 q8,q12,#30 add r4,r4,r5,ror#27 mov r6,r6,ror#2 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 add r4,r4,r11 eor r10,r5,r7 vsli.32 q8,q12,#2 add r3,r3,r9 ldr r9,[sp,#12] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#16] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q3,q8,#8 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#20] veor q9,q9,q1 eor r11,r10,r4 add r6,r6,r7,ror#27 veor q9,q9,q10 mov r3,r3,ror#2 add r6,r6,r11 vadd.i32 q13,q8,q14 eor r10,r7,r4 add r5,r5,r9 veor q12,q12,q9 ldr r9,[sp,#24] eor r11,r10,r3 vshr.u32 q9,q12,#30 add r5,r5,r6,ror#27 mov r7,r7,ror#2 vst1.32 {q13},[r12,:128]! add r5,r5,r11 eor r10,r6,r3 vsli.32 q9,q12,#2 add r4,r4,r9 ldr r9,[sp,#28] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#32] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q8,q9,#8 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#36] veor q10,q10,q2 add r7,r7,r3,ror#27 eor r11,r5,r6 veor q10,q10,q11 add r7,r7,r10 and r11,r11,r4 vadd.i32 q13,q9,q14 mov r4,r4,ror#2 add r7,r7,r11 veor q12,q12,q10 add r6,r6,r9 and r10,r4,r5 vshr.u32 q10,q12,#30 ldr r9,[sp,#40] add r6,r6,r7,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r4,r5 add r6,r6,r10 vsli.32 q10,q12,#2 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#44] add r5,r5,r6,ror#27 eor r11,r3,r4 add r5,r5,r10 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#48] add r4,r4,r5,ror#27 eor r11,r7,r3 add r4,r4,r10 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 vext.8 q12,q9,q10,#8 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#52] veor q11,q11,q3 add r3,r3,r4,ror#27 eor r11,r6,r7 veor q11,q11,q0 add r3,r3,r10 and r11,r11,r5 vadd.i32 q13,q10,q14 mov r5,r5,ror#2 vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r11 veor q12,q12,q11 add r7,r7,r9 and r10,r5,r6 vshr.u32 q11,q12,#30 ldr r9,[sp,#56] add r7,r7,r3,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r5,r6 add r7,r7,r10 vsli.32 q11,q12,#2 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#60] add r6,r6,r7,ror#27 eor r11,r4,r5 add r6,r6,r10 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#0] add r5,r5,r6,ror#27 eor r11,r3,r4 add r5,r5,r10 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 vext.8 q12,q10,q11,#8 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#4] veor q0,q0,q8 add r4,r4,r5,ror#27 eor r11,r7,r3 veor q0,q0,q1 add r4,r4,r10 and r11,r11,r6 vadd.i32 q13,q11,q14 mov r6,r6,ror#2 add r4,r4,r11 veor q12,q12,q0 add r3,r3,r9 and r10,r6,r7 vshr.u32 q0,q12,#30 ldr r9,[sp,#8] add r3,r3,r4,ror#27 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 eor r11,r6,r7 add r3,r3,r10 vsli.32 q0,q12,#2 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#12] add r7,r7,r3,ror#27 eor r11,r5,r6 add r7,r7,r10 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#16] add r6,r6,r7,ror#27 eor r11,r4,r5 add r6,r6,r10 and r11,r11,r3 mov r3,r3,ror#2 add r6,r6,r11 vext.8 q12,q11,q0,#8 add r5,r5,r9 and r10,r3,r4 ldr r9,[sp,#20] veor q1,q1,q9 add r5,r5,r6,ror#27 eor r11,r3,r4 veor q1,q1,q2 add r5,r5,r10 and r11,r11,r7 vadd.i32 q13,q0,q14 mov r7,r7,ror#2 add r5,r5,r11 veor q12,q12,q1 add r4,r4,r9 and r10,r7,r3 vshr.u32 q1,q12,#30 ldr r9,[sp,#24] add r4,r4,r5,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r7,r3 add r4,r4,r10 vsli.32 q1,q12,#2 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#28] add r3,r3,r4,ror#27 eor r11,r6,r7 add r3,r3,r10 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 add r7,r7,r9 and r10,r5,r6 ldr r9,[sp,#32] add r7,r7,r3,ror#27 eor r11,r5,r6 add r7,r7,r10 and r11,r11,r4 mov r4,r4,ror#2 add r7,r7,r11 vext.8 q12,q0,q1,#8 add r6,r6,r9 and r10,r4,r5 ldr r9,[sp,#36] veor q2,q2,q10 add r6,r6,r7,ror#27 eor r11,r4,r5 veor q2,q2,q3 add r6,r6,r10 and r11,r11,r3 vadd.i32 q13,q1,q14 mov r3,r3,ror#2 add r6,r6,r11 veor q12,q12,q2 add r5,r5,r9 and r10,r3,r4 vshr.u32 q2,q12,#30 ldr r9,[sp,#40] add r5,r5,r6,ror#27 vst1.32 {q13},[r12,:128]! eor r11,r3,r4 add r5,r5,r10 vsli.32 q2,q12,#2 and r11,r11,r7 mov r7,r7,ror#2 add r5,r5,r11 add r4,r4,r9 and r10,r7,r3 ldr r9,[sp,#44] add r4,r4,r5,ror#27 eor r11,r7,r3 add r4,r4,r10 and r11,r11,r6 mov r6,r6,ror#2 add r4,r4,r11 add r3,r3,r9 and r10,r6,r7 ldr r9,[sp,#48] add r3,r3,r4,ror#27 eor r11,r6,r7 add r3,r3,r10 and r11,r11,r5 mov r5,r5,ror#2 add r3,r3,r11 vext.8 q12,q1,q2,#8 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#52] veor q3,q3,q11 eor r11,r10,r5 add r7,r7,r3,ror#27 veor q3,q3,q8 mov r4,r4,ror#2 add r7,r7,r11 vadd.i32 q13,q2,q14 eor r10,r3,r5 add r6,r6,r9 veor q12,q12,q3 ldr r9,[sp,#56] eor r11,r10,r4 vshr.u32 q3,q12,#30 add r6,r6,r7,ror#27 mov r3,r3,ror#2 vst1.32 {q13},[r12,:128]! add r6,r6,r11 eor r10,r7,r4 vsli.32 q3,q12,#2 add r5,r5,r9 ldr r9,[sp,#60] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#0] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 vadd.i32 q13,q3,q14 eor r10,r5,r7 add r3,r3,r9 vst1.32 {q13},[r12,:128]! sub r12,r12,#64 teq r1,r2 sub r8,r8,#16 it eq subeq r1,r1,#64 vld1.8 {q0,q1},[r1]! ldr r9,[sp,#4] eor r11,r10,r6 vld1.8 {q2,q3},[r1]! add r3,r3,r4,ror#27 mov r5,r5,ror#2 vld1.32 {d28[],d29[]},[r8,:32]! add r3,r3,r11 eor r10,r4,r6 vrev32.8 q0,q0 add r7,r7,r9 ldr r9,[sp,#8] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#12] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#16] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 vrev32.8 q1,q1 eor r10,r6,r3 add r4,r4,r9 vadd.i32 q8,q0,q14 ldr r9,[sp,#20] eor r11,r10,r7 vst1.32 {q8},[r12,:128]! add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#24] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#28] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 eor r10,r3,r5 add r6,r6,r9 ldr r9,[sp,#32] eor r11,r10,r4 add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 vrev32.8 q2,q2 eor r10,r7,r4 add r5,r5,r9 vadd.i32 q9,q1,q14 ldr r9,[sp,#36] eor r11,r10,r3 vst1.32 {q9},[r12,:128]! add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#40] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 ldr r9,[sp,#44] eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 eor r10,r4,r6 add r7,r7,r9 ldr r9,[sp,#48] eor r11,r10,r5 add r7,r7,r3,ror#27 mov r4,r4,ror#2 add r7,r7,r11 vrev32.8 q3,q3 eor r10,r3,r5 add r6,r6,r9 vadd.i32 q10,q2,q14 ldr r9,[sp,#52] eor r11,r10,r4 vst1.32 {q10},[r12,:128]! add r6,r6,r7,ror#27 mov r3,r3,ror#2 add r6,r6,r11 eor r10,r7,r4 add r5,r5,r9 ldr r9,[sp,#56] eor r11,r10,r3 add r5,r5,r6,ror#27 mov r7,r7,ror#2 add r5,r5,r11 eor r10,r6,r3 add r4,r4,r9 ldr r9,[sp,#60] eor r11,r10,r7 add r4,r4,r5,ror#27 mov r6,r6,ror#2 add r4,r4,r11 eor r10,r5,r7 add r3,r3,r9 eor r11,r10,r6 add r3,r3,r4,ror#27 mov r5,r5,ror#2 add r3,r3,r11 ldmia r0,{r9,r10,r11,r12} @ accumulate context add r3,r3,r9 ldr r9,[r0,#16] add r4,r4,r10 add r5,r5,r11 add r6,r6,r12 it eq moveq sp,r14 add r7,r7,r9 it ne ldrne r9,[sp] stmia r0,{r3,r4,r5,r6,r7} itt ne addne r12,sp,#3*16 bne .Loop_neon @ vldmia sp!,{d8-d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} .size sha1_block_data_order_neon,.-sha1_block_data_order_neon #endif #if __ARM_MAX_ARCH__>=7 # if defined(__thumb2__) # define INST(a,b,c,d) .byte c,d|0xf,a,b # else # define INST(a,b,c,d) .byte a,b,c,d|0x10 # endif .globl sha1_block_data_order_hw .hidden sha1_block_data_order_hw .type sha1_block_data_order_hw,%function .align 5 sha1_block_data_order_hw: vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so veor q1,q1,q1 adr r3,.LK_00_19 vld1.32 {q0},[r0]! vld1.32 {d2[0]},[r0] sub r0,r0,#16 vld1.32 {d16[],d17[]},[r3,:32]! vld1.32 {d18[],d19[]},[r3,:32]! vld1.32 {d20[],d21[]},[r3,:32]! vld1.32 {d22[],d23[]},[r3,:32] .Loop_v8: vld1.8 {q4,q5},[r1]! vld1.8 {q6,q7},[r1]! vrev32.8 q4,q4 vrev32.8 q5,q5 vadd.i32 q12,q8,q4 vrev32.8 q6,q6 vmov q14,q0 @ offload subs r2,r2,#1 vadd.i32 q13,q8,q5 vrev32.8 q7,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0 INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12 vadd.i32 q12,q8,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1 INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 vadd.i32 q13,q8,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2 INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 vadd.i32 q12,q8,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3 INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13 vadd.i32 q13,q9,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4 INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12 vadd.i32 q12,q9,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q9,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q9,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q9,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q10,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q10,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q10,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11 INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 vadd.i32 q13,q10,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q10,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13 INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13 vadd.i32 q13,q11,q7 INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7 INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14 INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12 vadd.i32 q12,q11,q4 INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4 INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q11,q5 INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5 INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 vadd.i32 q12,q11,q6 INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q13,q11,q7 INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18 INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12 INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19 INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13 vadd.i32 q1,q1,q2 vadd.i32 q0,q0,q14 bne .Loop_v8 vst1.32 {q0},[r0]! vst1.32 {d2[0]},[r0] vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} bx lr @ bx lr .size sha1_block_data_order_hw,.-sha1_block_data_order_hw #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .text .globl _sha1_block_data_order_nohw .private_extern _sha1_block_data_order_nohw .align 6 _sha1_block_data_order_nohw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] ldp w20,w21,[x0] ldp w22,w23,[x0,#8] ldr w24,[x0,#16] Loop: ldr x3,[x1],#64 movz w28,#0x7999 sub x2,x2,#1 movk w28,#0x5a82,lsl#16 #ifdef __AARCH64EB__ ror x3,x3,#32 #else rev32 x3,x3 #endif add w24,w24,w28 // warm it up add w24,w24,w3 lsr x4,x3,#32 ldr x5,[x1,#-56] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w4 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x5,x5,#32 #else rev32 x5,x5 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w5 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x6,x5,#32 ldr x7,[x1,#-48] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w6 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x7,x7,#32 #else rev32 x7,x7 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w7 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x8,x7,#32 ldr x9,[x1,#-40] bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w8 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x9,x9,#32 #else rev32 x9,x9 #endif bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w9 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) lsr x10,x9,#32 ldr x11,[x1,#-32] bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w10 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x11,x11,#32 #else rev32 x11,x11 #endif bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w11 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) lsr x12,x11,#32 ldr x13,[x1,#-24] bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w12 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x13,x13,#32 #else rev32 x13,x13 #endif bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w13 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) lsr x14,x13,#32 ldr x15,[x1,#-16] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w14 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x15,x15,#32 #else rev32 x15,x15 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w15 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x16,x15,#32 ldr x17,[x1,#-8] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w16 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x17,x17,#32 #else rev32 x17,x17 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w17 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x19,x17,#32 eor w3,w3,w5 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w3,w3,w11 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w3,w3,w16 ror w22,w22,#2 add w24,w24,w19 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 eor w4,w4,w12 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) eor w4,w4,w17 ror w21,w21,#2 add w23,w23,w3 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 eor w5,w5,w13 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) eor w5,w5,w19 ror w20,w20,#2 add w22,w22,w4 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 eor w6,w6,w14 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) eor w6,w6,w3 ror w24,w24,#2 add w21,w21,w5 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 eor w7,w7,w15 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) eor w7,w7,w4 ror w23,w23,#2 add w20,w20,w6 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w7,w7,#31 movz w28,#0xeba1 movk w28,#0x6ed9,lsl#16 eor w8,w8,w10 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w8,w8,w16 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w8,w8,w5 ror w22,w22,#2 add w24,w24,w7 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w9,w9,w6 add w23,w23,w8 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w10,w10,w7 add w22,w22,w9 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w11,w11,w8 add w21,w21,w10 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w12,w12,w9 add w20,w20,w11 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w13,w13,w10 add w24,w24,w12 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w14,w14,w11 add w23,w23,w13 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w15,w15,w12 add w22,w22,w14 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w16,w16,w13 add w21,w21,w15 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w17,w17,w14 add w20,w20,w16 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w19,w19,w15 add w24,w24,w17 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w3,w3,w16 add w23,w23,w19 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w4,w4,w17 add w22,w22,w3 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w5,w5,w19 add w21,w21,w4 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w6,w6,w3 add w20,w20,w5 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w7,w7,w4 add w24,w24,w6 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w8,w8,w5 add w23,w23,w7 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w9,w9,w6 add w22,w22,w8 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w10,w10,w7 add w21,w21,w9 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w11,w11,w8 add w20,w20,w10 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w11,w11,#31 movz w28,#0xbcdc movk w28,#0x8f1b,lsl#16 eor w12,w12,w14 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w12,w12,w9 add w24,w24,w11 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w21,w22 and w26,w21,w22 eor w13,w13,w15 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w13,w13,w5 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w13,w13,w10 add w23,w23,w12 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w20,w21 and w26,w20,w21 eor w14,w14,w16 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w14,w14,w6 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w14,w14,w11 add w22,w22,w13 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w24,w20 and w26,w24,w20 eor w15,w15,w17 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w15,w15,w7 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w15,w15,w12 add w21,w21,w14 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w15,w15,#31 orr w25,w23,w24 and w26,w23,w24 eor w16,w16,w19 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w16,w16,w8 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w16,w16,w13 add w20,w20,w15 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w16,w16,#31 orr w25,w22,w23 and w26,w22,w23 eor w17,w17,w3 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w17,w17,w9 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w17,w17,w14 add w24,w24,w16 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w17,w17,#31 orr w25,w21,w22 and w26,w21,w22 eor w19,w19,w4 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w19,w19,w10 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w19,w19,w15 add w23,w23,w17 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w19,w19,#31 orr w25,w20,w21 and w26,w20,w21 eor w3,w3,w5 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w3,w3,w11 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w3,w3,w16 add w22,w22,w19 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w3,w3,#31 orr w25,w24,w20 and w26,w24,w20 eor w4,w4,w6 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w4,w4,w12 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w4,w4,w17 add w21,w21,w3 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w4,w4,#31 orr w25,w23,w24 and w26,w23,w24 eor w5,w5,w7 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w5,w5,w13 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w5,w5,w19 add w20,w20,w4 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w5,w5,#31 orr w25,w22,w23 and w26,w22,w23 eor w6,w6,w8 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w6,w6,w14 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w6,w6,w3 add w24,w24,w5 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w6,w6,#31 orr w25,w21,w22 and w26,w21,w22 eor w7,w7,w9 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w7,w7,w15 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w7,w7,w4 add w23,w23,w6 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w7,w7,#31 orr w25,w20,w21 and w26,w20,w21 eor w8,w8,w10 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w8,w8,w16 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w8,w8,w5 add w22,w22,w7 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w8,w8,#31 orr w25,w24,w20 and w26,w24,w20 eor w9,w9,w11 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w9,w9,w17 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w9,w9,w6 add w21,w21,w8 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w9,w9,#31 orr w25,w23,w24 and w26,w23,w24 eor w10,w10,w12 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w10,w10,w19 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w10,w10,w7 add w20,w20,w9 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w10,w10,#31 orr w25,w22,w23 and w26,w22,w23 eor w11,w11,w13 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w11,w11,w3 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w11,w11,w8 add w24,w24,w10 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w11,w11,#31 orr w25,w21,w22 and w26,w21,w22 eor w12,w12,w14 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w12,w12,w4 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w12,w12,w9 add w23,w23,w11 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w20,w21 and w26,w20,w21 eor w13,w13,w15 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w13,w13,w5 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w13,w13,w10 add w22,w22,w12 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w24,w20 and w26,w24,w20 eor w14,w14,w16 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w14,w14,w6 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w14,w14,w11 add w21,w21,w13 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w23,w24 and w26,w23,w24 eor w15,w15,w17 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w15,w15,w7 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w15,w15,w12 add w20,w20,w14 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w15,w15,#31 movz w28,#0xc1d6 movk w28,#0xca62,lsl#16 orr w25,w22,w23 and w26,w22,w23 eor w16,w16,w19 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w16,w16,w8 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w16,w16,w13 add w24,w24,w15 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w17,w17,w14 add w23,w23,w16 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w19,w19,w15 add w22,w22,w17 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w3,w3,w16 add w21,w21,w19 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w4,w4,w17 add w20,w20,w3 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w5,w5,w19 add w24,w24,w4 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w6,w6,w3 add w23,w23,w5 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w7,w7,w4 add w22,w22,w6 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w8,w8,w5 add w21,w21,w7 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w9,w9,w6 add w20,w20,w8 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w10,w10,w7 add w24,w24,w9 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w11,w11,w8 add w23,w23,w10 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w12,w12,w9 add w22,w22,w11 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w13,w13,w10 add w21,w21,w12 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w14,w14,w11 add w20,w20,w13 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w15,w15,w12 add w24,w24,w14 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w16,w16,w13 add w23,w23,w15 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w17,w17,w14 add w22,w22,w16 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w19,w19,w15 add w21,w21,w17 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w19,w19,#31 ldp w4,w5,[x0] eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w19 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ldp w6,w7,[x0,#8] eor w25,w24,w22 ror w27,w21,#27 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 ldr w8,[x0,#16] add w20,w20,w25 // e+=F(b,c,d) add w21,w21,w5 add w22,w22,w6 add w20,w20,w4 add w23,w23,w7 add w24,w24,w8 stp w20,w21,[x0] stp w22,w23,[x0,#8] str w24,[x0,#16] cbnz x2,Loop ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldr x29,[sp],#96 ret .globl _sha1_block_data_order_hw .private_extern _sha1_block_data_order_hw .align 6 _sha1_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 adrp x4,Lconst@PAGE add x4,x4,Lconst@PAGEOFF eor v1.16b,v1.16b,v1.16b ld1 {v0.4s},[x0],#16 ld1 {v1.s}[0],[x0] sub x0,x0,#16 ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b add v20.4s,v16.4s,v4.4s rev32 v6.16b,v6.16b orr v22.16b,v0.16b,v0.16b // offload add v21.4s,v16.4s,v5.4s rev32 v7.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b .long 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 add v20.4s,v16.4s,v6.4s .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 1 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v16.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 2 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v16.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 3 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 4 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 5 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 6 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 7 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 8 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 9 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 10 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 11 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 12 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 13 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 14 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 15 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 16 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 17 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e280803 //sha1h v3.16b,v0.16b // 18 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s .long 0x5e280802 //sha1h v2.16b,v0.16b // 19 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v1.4s,v1.4s,v2.4s add v0.4s,v0.4s,v22.4s cbnz x2,Loop_hw st1 {v0.4s},[x0],#16 st1 {v1.s}[0],[x0] ldr x29,[sp],#16 ret .section __TEXT,__const .align 6 Lconst: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .text .globl sha1_block_data_order_nohw .hidden sha1_block_data_order_nohw .type sha1_block_data_order_nohw,%function .align 6 sha1_block_data_order_nohw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] ldp w20,w21,[x0] ldp w22,w23,[x0,#8] ldr w24,[x0,#16] .Loop: ldr x3,[x1],#64 movz w28,#0x7999 sub x2,x2,#1 movk w28,#0x5a82,lsl#16 #ifdef __AARCH64EB__ ror x3,x3,#32 #else rev32 x3,x3 #endif add w24,w24,w28 // warm it up add w24,w24,w3 lsr x4,x3,#32 ldr x5,[x1,#-56] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w4 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x5,x5,#32 #else rev32 x5,x5 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w5 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x6,x5,#32 ldr x7,[x1,#-48] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w6 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x7,x7,#32 #else rev32 x7,x7 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w7 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x8,x7,#32 ldr x9,[x1,#-40] bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w8 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x9,x9,#32 #else rev32 x9,x9 #endif bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w9 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) lsr x10,x9,#32 ldr x11,[x1,#-32] bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w10 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x11,x11,#32 #else rev32 x11,x11 #endif bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w11 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) lsr x12,x11,#32 ldr x13,[x1,#-24] bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w12 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x13,x13,#32 #else rev32 x13,x13 #endif bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w13 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) lsr x14,x13,#32 ldr x15,[x1,#-16] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w14 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x15,x15,#32 #else rev32 x15,x15 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w15 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x16,x15,#32 ldr x17,[x1,#-8] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w16 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x17,x17,#32 #else rev32 x17,x17 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w17 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x19,x17,#32 eor w3,w3,w5 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w3,w3,w11 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w3,w3,w16 ror w22,w22,#2 add w24,w24,w19 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 eor w4,w4,w12 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) eor w4,w4,w17 ror w21,w21,#2 add w23,w23,w3 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 eor w5,w5,w13 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) eor w5,w5,w19 ror w20,w20,#2 add w22,w22,w4 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 eor w6,w6,w14 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) eor w6,w6,w3 ror w24,w24,#2 add w21,w21,w5 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 eor w7,w7,w15 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) eor w7,w7,w4 ror w23,w23,#2 add w20,w20,w6 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w7,w7,#31 movz w28,#0xeba1 movk w28,#0x6ed9,lsl#16 eor w8,w8,w10 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w8,w8,w16 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w8,w8,w5 ror w22,w22,#2 add w24,w24,w7 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w9,w9,w6 add w23,w23,w8 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w10,w10,w7 add w22,w22,w9 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w11,w11,w8 add w21,w21,w10 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w12,w12,w9 add w20,w20,w11 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w13,w13,w10 add w24,w24,w12 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w14,w14,w11 add w23,w23,w13 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w15,w15,w12 add w22,w22,w14 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w16,w16,w13 add w21,w21,w15 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w17,w17,w14 add w20,w20,w16 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w19,w19,w15 add w24,w24,w17 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w3,w3,w16 add w23,w23,w19 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w4,w4,w17 add w22,w22,w3 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w5,w5,w19 add w21,w21,w4 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w6,w6,w3 add w20,w20,w5 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w7,w7,w4 add w24,w24,w6 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w8,w8,w5 add w23,w23,w7 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w9,w9,w6 add w22,w22,w8 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w10,w10,w7 add w21,w21,w9 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w11,w11,w8 add w20,w20,w10 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w11,w11,#31 movz w28,#0xbcdc movk w28,#0x8f1b,lsl#16 eor w12,w12,w14 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w12,w12,w9 add w24,w24,w11 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w21,w22 and w26,w21,w22 eor w13,w13,w15 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w13,w13,w5 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w13,w13,w10 add w23,w23,w12 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w20,w21 and w26,w20,w21 eor w14,w14,w16 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w14,w14,w6 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w14,w14,w11 add w22,w22,w13 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w24,w20 and w26,w24,w20 eor w15,w15,w17 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w15,w15,w7 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w15,w15,w12 add w21,w21,w14 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w15,w15,#31 orr w25,w23,w24 and w26,w23,w24 eor w16,w16,w19 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w16,w16,w8 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w16,w16,w13 add w20,w20,w15 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w16,w16,#31 orr w25,w22,w23 and w26,w22,w23 eor w17,w17,w3 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w17,w17,w9 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w17,w17,w14 add w24,w24,w16 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w17,w17,#31 orr w25,w21,w22 and w26,w21,w22 eor w19,w19,w4 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w19,w19,w10 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w19,w19,w15 add w23,w23,w17 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w19,w19,#31 orr w25,w20,w21 and w26,w20,w21 eor w3,w3,w5 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w3,w3,w11 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w3,w3,w16 add w22,w22,w19 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w3,w3,#31 orr w25,w24,w20 and w26,w24,w20 eor w4,w4,w6 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w4,w4,w12 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w4,w4,w17 add w21,w21,w3 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w4,w4,#31 orr w25,w23,w24 and w26,w23,w24 eor w5,w5,w7 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w5,w5,w13 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w5,w5,w19 add w20,w20,w4 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w5,w5,#31 orr w25,w22,w23 and w26,w22,w23 eor w6,w6,w8 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w6,w6,w14 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w6,w6,w3 add w24,w24,w5 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w6,w6,#31 orr w25,w21,w22 and w26,w21,w22 eor w7,w7,w9 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w7,w7,w15 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w7,w7,w4 add w23,w23,w6 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w7,w7,#31 orr w25,w20,w21 and w26,w20,w21 eor w8,w8,w10 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w8,w8,w16 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w8,w8,w5 add w22,w22,w7 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w8,w8,#31 orr w25,w24,w20 and w26,w24,w20 eor w9,w9,w11 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w9,w9,w17 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w9,w9,w6 add w21,w21,w8 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w9,w9,#31 orr w25,w23,w24 and w26,w23,w24 eor w10,w10,w12 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w10,w10,w19 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w10,w10,w7 add w20,w20,w9 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w10,w10,#31 orr w25,w22,w23 and w26,w22,w23 eor w11,w11,w13 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w11,w11,w3 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w11,w11,w8 add w24,w24,w10 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w11,w11,#31 orr w25,w21,w22 and w26,w21,w22 eor w12,w12,w14 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w12,w12,w4 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w12,w12,w9 add w23,w23,w11 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w20,w21 and w26,w20,w21 eor w13,w13,w15 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w13,w13,w5 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w13,w13,w10 add w22,w22,w12 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w24,w20 and w26,w24,w20 eor w14,w14,w16 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w14,w14,w6 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w14,w14,w11 add w21,w21,w13 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w23,w24 and w26,w23,w24 eor w15,w15,w17 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w15,w15,w7 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w15,w15,w12 add w20,w20,w14 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w15,w15,#31 movz w28,#0xc1d6 movk w28,#0xca62,lsl#16 orr w25,w22,w23 and w26,w22,w23 eor w16,w16,w19 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w16,w16,w8 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w16,w16,w13 add w24,w24,w15 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w17,w17,w14 add w23,w23,w16 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w19,w19,w15 add w22,w22,w17 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w3,w3,w16 add w21,w21,w19 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w4,w4,w17 add w20,w20,w3 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w5,w5,w19 add w24,w24,w4 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w6,w6,w3 add w23,w23,w5 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w7,w7,w4 add w22,w22,w6 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w8,w8,w5 add w21,w21,w7 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w9,w9,w6 add w20,w20,w8 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w10,w10,w7 add w24,w24,w9 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w11,w11,w8 add w23,w23,w10 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w12,w12,w9 add w22,w22,w11 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w13,w13,w10 add w21,w21,w12 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w14,w14,w11 add w20,w20,w13 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w15,w15,w12 add w24,w24,w14 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w16,w16,w13 add w23,w23,w15 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w17,w17,w14 add w22,w22,w16 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w19,w19,w15 add w21,w21,w17 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w19,w19,#31 ldp w4,w5,[x0] eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w19 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ldp w6,w7,[x0,#8] eor w25,w24,w22 ror w27,w21,#27 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 ldr w8,[x0,#16] add w20,w20,w25 // e+=F(b,c,d) add w21,w21,w5 add w22,w22,w6 add w20,w20,w4 add w23,w23,w7 add w24,w24,w8 stp w20,w21,[x0] stp w22,w23,[x0,#8] str w24,[x0,#16] cbnz x2,.Loop ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldr x29,[sp],#96 ret .size sha1_block_data_order_nohw,.-sha1_block_data_order_nohw .globl sha1_block_data_order_hw .hidden sha1_block_data_order_hw .type sha1_block_data_order_hw,%function .align 6 sha1_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 adrp x4,.Lconst add x4,x4,:lo12:.Lconst eor v1.16b,v1.16b,v1.16b ld1 {v0.4s},[x0],#16 ld1 {v1.s}[0],[x0] sub x0,x0,#16 ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] .Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b add v20.4s,v16.4s,v4.4s rev32 v6.16b,v6.16b orr v22.16b,v0.16b,v0.16b // offload add v21.4s,v16.4s,v5.4s rev32 v7.16b,v7.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b .inst 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 add v20.4s,v16.4s,v6.4s .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 1 .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v16.4s,v7.4s .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 2 .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v16.4s,v4.4s .inst 0x5e281885 //sha1su1 v5.16b,v4.16b .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 3 .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 4 .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v6.4s .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 5 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v7.4s .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 6 .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v4.4s .inst 0x5e281885 //sha1su1 v5.16b,v4.16b .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 7 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 8 .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 9 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v7.4s .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 10 .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v4.4s .inst 0x5e281885 //sha1su1 v5.16b,v4.16b .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 11 .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v5.4s .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 12 .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 13 .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 14 .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v4.4s .inst 0x5e281885 //sha1su1 v5.16b,v4.16b .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 15 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v5.4s .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .inst 0x5e280803 //sha1h v3.16b,v0.16b // 16 .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v6.4s .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b .inst 0x5e280802 //sha1h v2.16b,v0.16b // 17 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .inst 0x5e280803 //sha1h v3.16b,v0.16b // 18 .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s .inst 0x5e280802 //sha1h v2.16b,v0.16b // 19 .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v1.4s,v1.4s,v2.4s add v0.4s,v0.4s,v22.4s cbnz x2,.Loop_hw st1 {v0.4s},[x0],#16 st1 {v1.s}[0],[x0] ldr x29,[sp],#16 ret .size sha1_block_data_order_hw,.-sha1_block_data_order_hw .section .rodata .align 6 .Lconst: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .text .globl sha1_block_data_order_nohw .def sha1_block_data_order_nohw .type 32 .endef .align 6 sha1_block_data_order_nohw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] ldp w20,w21,[x0] ldp w22,w23,[x0,#8] ldr w24,[x0,#16] Loop: ldr x3,[x1],#64 movz w28,#0x7999 sub x2,x2,#1 movk w28,#0x5a82,lsl#16 #ifdef __AARCH64EB__ ror x3,x3,#32 #else rev32 x3,x3 #endif add w24,w24,w28 // warm it up add w24,w24,w3 lsr x4,x3,#32 ldr x5,[x1,#-56] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w4 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x5,x5,#32 #else rev32 x5,x5 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w5 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x6,x5,#32 ldr x7,[x1,#-48] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w6 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x7,x7,#32 #else rev32 x7,x7 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w7 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x8,x7,#32 ldr x9,[x1,#-40] bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w8 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x9,x9,#32 #else rev32 x9,x9 #endif bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w9 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) lsr x10,x9,#32 ldr x11,[x1,#-32] bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w10 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x11,x11,#32 #else rev32 x11,x11 #endif bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w11 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) lsr x12,x11,#32 ldr x13,[x1,#-24] bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w12 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x13,x13,#32 #else rev32 x13,x13 #endif bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w13 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) lsr x14,x13,#32 ldr x15,[x1,#-16] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w14 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x15,x15,#32 #else rev32 x15,x15 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w15 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x16,x15,#32 ldr x17,[x1,#-8] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w16 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x17,x17,#32 #else rev32 x17,x17 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w17 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x19,x17,#32 eor w3,w3,w5 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w3,w3,w11 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w3,w3,w16 ror w22,w22,#2 add w24,w24,w19 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 eor w4,w4,w12 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) eor w4,w4,w17 ror w21,w21,#2 add w23,w23,w3 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 eor w5,w5,w13 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) eor w5,w5,w19 ror w20,w20,#2 add w22,w22,w4 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 eor w6,w6,w14 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) eor w6,w6,w3 ror w24,w24,#2 add w21,w21,w5 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 eor w7,w7,w15 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) eor w7,w7,w4 ror w23,w23,#2 add w20,w20,w6 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w7,w7,#31 movz w28,#0xeba1 movk w28,#0x6ed9,lsl#16 eor w8,w8,w10 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w8,w8,w16 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w8,w8,w5 ror w22,w22,#2 add w24,w24,w7 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w9,w9,w6 add w23,w23,w8 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w10,w10,w7 add w22,w22,w9 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w11,w11,w8 add w21,w21,w10 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w12,w12,w9 add w20,w20,w11 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w13,w13,w10 add w24,w24,w12 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w14,w14,w11 add w23,w23,w13 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w15,w15,w12 add w22,w22,w14 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w16,w16,w13 add w21,w21,w15 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w17,w17,w14 add w20,w20,w16 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w19,w19,w15 add w24,w24,w17 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w3,w3,w16 add w23,w23,w19 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w4,w4,w17 add w22,w22,w3 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w5,w5,w19 add w21,w21,w4 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w6,w6,w3 add w20,w20,w5 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w7,w7,w4 add w24,w24,w6 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w8,w8,w5 add w23,w23,w7 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w9,w9,w6 add w22,w22,w8 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w10,w10,w7 add w21,w21,w9 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w11,w11,w8 add w20,w20,w10 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w11,w11,#31 movz w28,#0xbcdc movk w28,#0x8f1b,lsl#16 eor w12,w12,w14 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w12,w12,w9 add w24,w24,w11 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w21,w22 and w26,w21,w22 eor w13,w13,w15 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w13,w13,w5 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w13,w13,w10 add w23,w23,w12 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w20,w21 and w26,w20,w21 eor w14,w14,w16 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w14,w14,w6 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w14,w14,w11 add w22,w22,w13 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w24,w20 and w26,w24,w20 eor w15,w15,w17 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w15,w15,w7 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w15,w15,w12 add w21,w21,w14 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w15,w15,#31 orr w25,w23,w24 and w26,w23,w24 eor w16,w16,w19 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w16,w16,w8 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w16,w16,w13 add w20,w20,w15 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w16,w16,#31 orr w25,w22,w23 and w26,w22,w23 eor w17,w17,w3 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w17,w17,w9 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w17,w17,w14 add w24,w24,w16 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w17,w17,#31 orr w25,w21,w22 and w26,w21,w22 eor w19,w19,w4 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w19,w19,w10 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w19,w19,w15 add w23,w23,w17 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w19,w19,#31 orr w25,w20,w21 and w26,w20,w21 eor w3,w3,w5 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w3,w3,w11 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w3,w3,w16 add w22,w22,w19 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w3,w3,#31 orr w25,w24,w20 and w26,w24,w20 eor w4,w4,w6 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w4,w4,w12 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w4,w4,w17 add w21,w21,w3 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w4,w4,#31 orr w25,w23,w24 and w26,w23,w24 eor w5,w5,w7 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w5,w5,w13 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w5,w5,w19 add w20,w20,w4 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w5,w5,#31 orr w25,w22,w23 and w26,w22,w23 eor w6,w6,w8 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w6,w6,w14 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w6,w6,w3 add w24,w24,w5 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w6,w6,#31 orr w25,w21,w22 and w26,w21,w22 eor w7,w7,w9 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w7,w7,w15 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w7,w7,w4 add w23,w23,w6 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w7,w7,#31 orr w25,w20,w21 and w26,w20,w21 eor w8,w8,w10 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w8,w8,w16 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w8,w8,w5 add w22,w22,w7 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w8,w8,#31 orr w25,w24,w20 and w26,w24,w20 eor w9,w9,w11 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w9,w9,w17 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w9,w9,w6 add w21,w21,w8 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w9,w9,#31 orr w25,w23,w24 and w26,w23,w24 eor w10,w10,w12 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w10,w10,w19 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w10,w10,w7 add w20,w20,w9 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w10,w10,#31 orr w25,w22,w23 and w26,w22,w23 eor w11,w11,w13 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w11,w11,w3 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w11,w11,w8 add w24,w24,w10 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w11,w11,#31 orr w25,w21,w22 and w26,w21,w22 eor w12,w12,w14 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w12,w12,w4 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w12,w12,w9 add w23,w23,w11 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w20,w21 and w26,w20,w21 eor w13,w13,w15 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w13,w13,w5 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w13,w13,w10 add w22,w22,w12 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w24,w20 and w26,w24,w20 eor w14,w14,w16 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w14,w14,w6 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w14,w14,w11 add w21,w21,w13 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w23,w24 and w26,w23,w24 eor w15,w15,w17 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w15,w15,w7 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w15,w15,w12 add w20,w20,w14 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w15,w15,#31 movz w28,#0xc1d6 movk w28,#0xca62,lsl#16 orr w25,w22,w23 and w26,w22,w23 eor w16,w16,w19 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w16,w16,w8 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w16,w16,w13 add w24,w24,w15 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w17,w17,w14 add w23,w23,w16 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w19,w19,w15 add w22,w22,w17 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w3,w3,w16 add w21,w21,w19 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w4,w4,w17 add w20,w20,w3 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w5,w5,w19 add w24,w24,w4 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w6,w6,w3 add w23,w23,w5 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w7,w7,w4 add w22,w22,w6 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w8,w8,w5 add w21,w21,w7 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w9,w9,w6 add w20,w20,w8 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w10,w10,w7 add w24,w24,w9 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w11,w11,w8 add w23,w23,w10 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w12,w12,w9 add w22,w22,w11 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w13,w13,w10 add w21,w21,w12 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w14,w14,w11 add w20,w20,w13 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w15,w15,w12 add w24,w24,w14 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w16,w16,w13 add w23,w23,w15 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w17,w17,w14 add w22,w22,w16 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w19,w19,w15 add w21,w21,w17 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w19,w19,#31 ldp w4,w5,[x0] eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w19 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ldp w6,w7,[x0,#8] eor w25,w24,w22 ror w27,w21,#27 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 ldr w8,[x0,#16] add w20,w20,w25 // e+=F(b,c,d) add w21,w21,w5 add w22,w22,w6 add w20,w20,w4 add w23,w23,w7 add w24,w24,w8 stp w20,w21,[x0] stp w22,w23,[x0,#8] str w24,[x0,#16] cbnz x2,Loop ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldr x29,[sp],#96 ret .globl sha1_block_data_order_hw .def sha1_block_data_order_hw .type 32 .endef .align 6 sha1_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 adrp x4,Lconst add x4,x4,:lo12:Lconst eor v1.16b,v1.16b,v1.16b ld1 {v0.4s},[x0],#16 ld1 {v1.s}[0],[x0] sub x0,x0,#16 ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b add v20.4s,v16.4s,v4.4s rev32 v6.16b,v6.16b orr v22.16b,v0.16b,v0.16b // offload add v21.4s,v16.4s,v5.4s rev32 v7.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b .long 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 add v20.4s,v16.4s,v6.4s .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 1 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v16.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 2 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v16.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 3 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 4 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 5 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 6 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 7 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 8 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 9 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 10 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 11 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 12 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 13 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 14 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 15 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 16 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 17 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e280803 //sha1h v3.16b,v0.16b // 18 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s .long 0x5e280802 //sha1h v2.16b,v0.16b // 19 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v1.4s,v1.4s,v2.4s add v0.4s,v0.4s,v22.4s cbnz x2,Loop_hw st1 {v0.4s},[x0],#16 st1 {v1.s}[0],[x0] ldr x29,[sp],#16 ret .section .rodata .align 6 Lconst: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _sha1_block_data_order_nohw .private_extern _sha1_block_data_order_nohw .p2align 4 _sha1_block_data_order_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 movq %rdi,%r8 subq $72,%rsp movq %rsi,%r9 andq $-64,%rsp movq %rdx,%r10 movq %rax,64(%rsp) L$prologue: movl 0(%r8),%esi movl 4(%r8),%edi movl 8(%r8),%r11d movl 12(%r8),%r12d movl 16(%r8),%r13d jmp L$loop .p2align 4 L$loop: movl 0(%r9),%edx bswapl %edx movl 4(%r9),%ebp movl %r12d,%eax movl %edx,0(%rsp) movl %esi,%ecx bswapl %ebp xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%rdx,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 8(%r9),%r14d movl %r11d,%eax movl %ebp,4(%rsp) movl %r13d,%ecx bswapl %r14d xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%rbp,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 12(%r9),%edx movl %edi,%eax movl %r14d,8(%rsp) movl %r12d,%ecx bswapl %edx xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%r14,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 16(%r9),%ebp movl %esi,%eax movl %edx,12(%rsp) movl %r11d,%ecx bswapl %ebp xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%rdx,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 20(%r9),%r14d movl %r13d,%eax movl %ebp,16(%rsp) movl %edi,%ecx bswapl %r14d xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%rbp,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi movl 24(%r9),%edx movl %r12d,%eax movl %r14d,20(%rsp) movl %esi,%ecx bswapl %edx xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%r14,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 28(%r9),%ebp movl %r11d,%eax movl %edx,24(%rsp) movl %r13d,%ecx bswapl %ebp xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%rdx,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 32(%r9),%r14d movl %edi,%eax movl %ebp,28(%rsp) movl %r12d,%ecx bswapl %r14d xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%rbp,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 36(%r9),%edx movl %esi,%eax movl %r14d,32(%rsp) movl %r11d,%ecx bswapl %edx xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%r14,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 40(%r9),%ebp movl %r13d,%eax movl %edx,36(%rsp) movl %edi,%ecx bswapl %ebp xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%rdx,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi movl 44(%r9),%r14d movl %r12d,%eax movl %ebp,40(%rsp) movl %esi,%ecx bswapl %r14d xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%rbp,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 48(%r9),%edx movl %r11d,%eax movl %r14d,44(%rsp) movl %r13d,%ecx bswapl %edx xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%r14,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 52(%r9),%ebp movl %edi,%eax movl %edx,48(%rsp) movl %r12d,%ecx bswapl %ebp xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%rdx,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 56(%r9),%r14d movl %esi,%eax movl %ebp,52(%rsp) movl %r11d,%ecx bswapl %r14d xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%rbp,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 60(%r9),%edx movl %r13d,%eax movl %r14d,56(%rsp) movl %edi,%ecx bswapl %edx xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%r14,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi xorl 0(%rsp),%ebp movl %r12d,%eax movl %edx,60(%rsp) movl %esi,%ecx xorl 8(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 32(%rsp),%ebp andl %edi,%eax leal 1518500249(%rdx,%r13,1),%r13d roll $30,%edi xorl %r12d,%eax addl %ecx,%r13d roll $1,%ebp addl %eax,%r13d xorl 4(%rsp),%r14d movl %r11d,%eax movl %ebp,0(%rsp) movl %r13d,%ecx xorl 12(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 36(%rsp),%r14d andl %esi,%eax leal 1518500249(%rbp,%r12,1),%r12d roll $30,%esi xorl %r11d,%eax addl %ecx,%r12d roll $1,%r14d addl %eax,%r12d xorl 8(%rsp),%edx movl %edi,%eax movl %r14d,4(%rsp) movl %r12d,%ecx xorl 16(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 40(%rsp),%edx andl %r13d,%eax leal 1518500249(%r14,%r11,1),%r11d roll $30,%r13d xorl %edi,%eax addl %ecx,%r11d roll $1,%edx addl %eax,%r11d xorl 12(%rsp),%ebp movl %esi,%eax movl %edx,8(%rsp) movl %r11d,%ecx xorl 20(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 44(%rsp),%ebp andl %r12d,%eax leal 1518500249(%rdx,%rdi,1),%edi roll $30,%r12d xorl %esi,%eax addl %ecx,%edi roll $1,%ebp addl %eax,%edi xorl 16(%rsp),%r14d movl %r13d,%eax movl %ebp,12(%rsp) movl %edi,%ecx xorl 24(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 48(%rsp),%r14d andl %r11d,%eax leal 1518500249(%rbp,%rsi,1),%esi roll $30,%r11d xorl %r13d,%eax addl %ecx,%esi roll $1,%r14d addl %eax,%esi xorl 20(%rsp),%edx movl %edi,%eax movl %r14d,16(%rsp) movl %esi,%ecx xorl 28(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 52(%rsp),%edx leal 1859775393(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 24(%rsp),%ebp movl %esi,%eax movl %edx,20(%rsp) movl %r13d,%ecx xorl 32(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 56(%rsp),%ebp leal 1859775393(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 28(%rsp),%r14d movl %r13d,%eax movl %ebp,24(%rsp) movl %r12d,%ecx xorl 36(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 60(%rsp),%r14d leal 1859775393(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 32(%rsp),%edx movl %r12d,%eax movl %r14d,28(%rsp) movl %r11d,%ecx xorl 40(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 0(%rsp),%edx leal 1859775393(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 36(%rsp),%ebp movl %r11d,%eax movl %edx,32(%rsp) movl %edi,%ecx xorl 44(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 4(%rsp),%ebp leal 1859775393(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 40(%rsp),%r14d movl %edi,%eax movl %ebp,36(%rsp) movl %esi,%ecx xorl 48(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 8(%rsp),%r14d leal 1859775393(%rbp,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%r14d xorl 44(%rsp),%edx movl %esi,%eax movl %r14d,40(%rsp) movl %r13d,%ecx xorl 52(%rsp),%edx xorl %r11d,%eax roll $5,%ecx xorl 12(%rsp),%edx leal 1859775393(%r14,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%edx xorl 48(%rsp),%ebp movl %r13d,%eax movl %edx,44(%rsp) movl %r12d,%ecx xorl 56(%rsp),%ebp xorl %edi,%eax roll $5,%ecx xorl 16(%rsp),%ebp leal 1859775393(%rdx,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%ebp xorl 52(%rsp),%r14d movl %r12d,%eax movl %ebp,48(%rsp) movl %r11d,%ecx xorl 60(%rsp),%r14d xorl %esi,%eax roll $5,%ecx xorl 20(%rsp),%r14d leal 1859775393(%rbp,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%r14d xorl 56(%rsp),%edx movl %r11d,%eax movl %r14d,52(%rsp) movl %edi,%ecx xorl 0(%rsp),%edx xorl %r13d,%eax roll $5,%ecx xorl 24(%rsp),%edx leal 1859775393(%r14,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%edx xorl 60(%rsp),%ebp movl %edi,%eax movl %edx,56(%rsp) movl %esi,%ecx xorl 4(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 28(%rsp),%ebp leal 1859775393(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 0(%rsp),%r14d movl %esi,%eax movl %ebp,60(%rsp) movl %r13d,%ecx xorl 8(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 32(%rsp),%r14d leal 1859775393(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 4(%rsp),%edx movl %r13d,%eax movl %r14d,0(%rsp) movl %r12d,%ecx xorl 12(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 36(%rsp),%edx leal 1859775393(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 8(%rsp),%ebp movl %r12d,%eax movl %edx,4(%rsp) movl %r11d,%ecx xorl 16(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 40(%rsp),%ebp leal 1859775393(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp xorl 12(%rsp),%r14d movl %r11d,%eax movl %ebp,8(%rsp) movl %edi,%ecx xorl 20(%rsp),%r14d xorl %r13d,%eax roll $5,%ecx xorl 44(%rsp),%r14d leal 1859775393(%rbp,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%r14d xorl 16(%rsp),%edx movl %edi,%eax movl %r14d,12(%rsp) movl %esi,%ecx xorl 24(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 48(%rsp),%edx leal 1859775393(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 20(%rsp),%ebp movl %esi,%eax movl %edx,16(%rsp) movl %r13d,%ecx xorl 28(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 52(%rsp),%ebp leal 1859775393(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 24(%rsp),%r14d movl %r13d,%eax movl %ebp,20(%rsp) movl %r12d,%ecx xorl 32(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 56(%rsp),%r14d leal 1859775393(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 28(%rsp),%edx movl %r12d,%eax movl %r14d,24(%rsp) movl %r11d,%ecx xorl 36(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 60(%rsp),%edx leal 1859775393(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 32(%rsp),%ebp movl %r11d,%eax movl %edx,28(%rsp) movl %edi,%ecx xorl 40(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 0(%rsp),%ebp leal 1859775393(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 36(%rsp),%r14d movl %r12d,%eax movl %ebp,32(%rsp) movl %r12d,%ebx xorl 44(%rsp),%r14d andl %r11d,%eax movl %esi,%ecx xorl 4(%rsp),%r14d leal -1894007588(%rbp,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%r14d andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 40(%rsp),%edx movl %r11d,%eax movl %r14d,36(%rsp) movl %r11d,%ebx xorl 48(%rsp),%edx andl %edi,%eax movl %r13d,%ecx xorl 8(%rsp),%edx leal -1894007588(%r14,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%edx andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 44(%rsp),%ebp movl %edi,%eax movl %edx,40(%rsp) movl %edi,%ebx xorl 52(%rsp),%ebp andl %esi,%eax movl %r12d,%ecx xorl 12(%rsp),%ebp leal -1894007588(%rdx,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%ebp andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 48(%rsp),%r14d movl %esi,%eax movl %ebp,44(%rsp) movl %esi,%ebx xorl 56(%rsp),%r14d andl %r13d,%eax movl %r11d,%ecx xorl 16(%rsp),%r14d leal -1894007588(%rbp,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%r14d andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 52(%rsp),%edx movl %r13d,%eax movl %r14d,48(%rsp) movl %r13d,%ebx xorl 60(%rsp),%edx andl %r12d,%eax movl %edi,%ecx xorl 20(%rsp),%edx leal -1894007588(%r14,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%edx andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 56(%rsp),%ebp movl %r12d,%eax movl %edx,52(%rsp) movl %r12d,%ebx xorl 0(%rsp),%ebp andl %r11d,%eax movl %esi,%ecx xorl 24(%rsp),%ebp leal -1894007588(%rdx,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%ebp andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 60(%rsp),%r14d movl %r11d,%eax movl %ebp,56(%rsp) movl %r11d,%ebx xorl 4(%rsp),%r14d andl %edi,%eax movl %r13d,%ecx xorl 28(%rsp),%r14d leal -1894007588(%rbp,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%r14d andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 0(%rsp),%edx movl %edi,%eax movl %r14d,60(%rsp) movl %edi,%ebx xorl 8(%rsp),%edx andl %esi,%eax movl %r12d,%ecx xorl 32(%rsp),%edx leal -1894007588(%r14,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%edx andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 4(%rsp),%ebp movl %esi,%eax movl %edx,0(%rsp) movl %esi,%ebx xorl 12(%rsp),%ebp andl %r13d,%eax movl %r11d,%ecx xorl 36(%rsp),%ebp leal -1894007588(%rdx,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%ebp andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 8(%rsp),%r14d movl %r13d,%eax movl %ebp,4(%rsp) movl %r13d,%ebx xorl 16(%rsp),%r14d andl %r12d,%eax movl %edi,%ecx xorl 40(%rsp),%r14d leal -1894007588(%rbp,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%r14d andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 12(%rsp),%edx movl %r12d,%eax movl %r14d,8(%rsp) movl %r12d,%ebx xorl 20(%rsp),%edx andl %r11d,%eax movl %esi,%ecx xorl 44(%rsp),%edx leal -1894007588(%r14,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%edx andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 16(%rsp),%ebp movl %r11d,%eax movl %edx,12(%rsp) movl %r11d,%ebx xorl 24(%rsp),%ebp andl %edi,%eax movl %r13d,%ecx xorl 48(%rsp),%ebp leal -1894007588(%rdx,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%ebp andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 20(%rsp),%r14d movl %edi,%eax movl %ebp,16(%rsp) movl %edi,%ebx xorl 28(%rsp),%r14d andl %esi,%eax movl %r12d,%ecx xorl 52(%rsp),%r14d leal -1894007588(%rbp,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%r14d andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 24(%rsp),%edx movl %esi,%eax movl %r14d,20(%rsp) movl %esi,%ebx xorl 32(%rsp),%edx andl %r13d,%eax movl %r11d,%ecx xorl 56(%rsp),%edx leal -1894007588(%r14,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%edx andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 28(%rsp),%ebp movl %r13d,%eax movl %edx,24(%rsp) movl %r13d,%ebx xorl 36(%rsp),%ebp andl %r12d,%eax movl %edi,%ecx xorl 60(%rsp),%ebp leal -1894007588(%rdx,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%ebp andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 32(%rsp),%r14d movl %r12d,%eax movl %ebp,28(%rsp) movl %r12d,%ebx xorl 40(%rsp),%r14d andl %r11d,%eax movl %esi,%ecx xorl 0(%rsp),%r14d leal -1894007588(%rbp,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%r14d andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 36(%rsp),%edx movl %r11d,%eax movl %r14d,32(%rsp) movl %r11d,%ebx xorl 44(%rsp),%edx andl %edi,%eax movl %r13d,%ecx xorl 4(%rsp),%edx leal -1894007588(%r14,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%edx andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 40(%rsp),%ebp movl %edi,%eax movl %edx,36(%rsp) movl %edi,%ebx xorl 48(%rsp),%ebp andl %esi,%eax movl %r12d,%ecx xorl 8(%rsp),%ebp leal -1894007588(%rdx,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%ebp andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 44(%rsp),%r14d movl %esi,%eax movl %ebp,40(%rsp) movl %esi,%ebx xorl 52(%rsp),%r14d andl %r13d,%eax movl %r11d,%ecx xorl 12(%rsp),%r14d leal -1894007588(%rbp,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%r14d andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 48(%rsp),%edx movl %r13d,%eax movl %r14d,44(%rsp) movl %r13d,%ebx xorl 56(%rsp),%edx andl %r12d,%eax movl %edi,%ecx xorl 16(%rsp),%edx leal -1894007588(%r14,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%edx andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 52(%rsp),%ebp movl %edi,%eax movl %edx,48(%rsp) movl %esi,%ecx xorl 60(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 20(%rsp),%ebp leal -899497514(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 56(%rsp),%r14d movl %esi,%eax movl %ebp,52(%rsp) movl %r13d,%ecx xorl 0(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 24(%rsp),%r14d leal -899497514(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 60(%rsp),%edx movl %r13d,%eax movl %r14d,56(%rsp) movl %r12d,%ecx xorl 4(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 28(%rsp),%edx leal -899497514(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 0(%rsp),%ebp movl %r12d,%eax movl %edx,60(%rsp) movl %r11d,%ecx xorl 8(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 32(%rsp),%ebp leal -899497514(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp xorl 4(%rsp),%r14d movl %r11d,%eax movl %ebp,0(%rsp) movl %edi,%ecx xorl 12(%rsp),%r14d xorl %r13d,%eax roll $5,%ecx xorl 36(%rsp),%r14d leal -899497514(%rbp,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%r14d xorl 8(%rsp),%edx movl %edi,%eax movl %r14d,4(%rsp) movl %esi,%ecx xorl 16(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 40(%rsp),%edx leal -899497514(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 12(%rsp),%ebp movl %esi,%eax movl %edx,8(%rsp) movl %r13d,%ecx xorl 20(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 44(%rsp),%ebp leal -899497514(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 16(%rsp),%r14d movl %r13d,%eax movl %ebp,12(%rsp) movl %r12d,%ecx xorl 24(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 48(%rsp),%r14d leal -899497514(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 20(%rsp),%edx movl %r12d,%eax movl %r14d,16(%rsp) movl %r11d,%ecx xorl 28(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 52(%rsp),%edx leal -899497514(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 24(%rsp),%ebp movl %r11d,%eax movl %edx,20(%rsp) movl %edi,%ecx xorl 32(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 56(%rsp),%ebp leal -899497514(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 28(%rsp),%r14d movl %edi,%eax movl %ebp,24(%rsp) movl %esi,%ecx xorl 36(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 60(%rsp),%r14d leal -899497514(%rbp,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%r14d xorl 32(%rsp),%edx movl %esi,%eax movl %r14d,28(%rsp) movl %r13d,%ecx xorl 40(%rsp),%edx xorl %r11d,%eax roll $5,%ecx xorl 0(%rsp),%edx leal -899497514(%r14,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%edx xorl 36(%rsp),%ebp movl %r13d,%eax movl %r12d,%ecx xorl 44(%rsp),%ebp xorl %edi,%eax roll $5,%ecx xorl 4(%rsp),%ebp leal -899497514(%rdx,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%ebp xorl 40(%rsp),%r14d movl %r12d,%eax movl %r11d,%ecx xorl 48(%rsp),%r14d xorl %esi,%eax roll $5,%ecx xorl 8(%rsp),%r14d leal -899497514(%rbp,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%r14d xorl 44(%rsp),%edx movl %r11d,%eax movl %edi,%ecx xorl 52(%rsp),%edx xorl %r13d,%eax roll $5,%ecx xorl 12(%rsp),%edx leal -899497514(%r14,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%edx xorl 48(%rsp),%ebp movl %edi,%eax movl %esi,%ecx xorl 56(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 16(%rsp),%ebp leal -899497514(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 52(%rsp),%r14d movl %esi,%eax movl %r13d,%ecx xorl 60(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 20(%rsp),%r14d leal -899497514(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 56(%rsp),%edx movl %r13d,%eax movl %r12d,%ecx xorl 0(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 24(%rsp),%edx leal -899497514(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 60(%rsp),%ebp movl %r12d,%eax movl %r11d,%ecx xorl 4(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 28(%rsp),%ebp leal -899497514(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp movl %r11d,%eax movl %edi,%ecx xorl %r13d,%eax leal -899497514(%rbp,%rsi,1),%esi roll $5,%ecx xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi addl 0(%r8),%esi addl 4(%r8),%edi addl 8(%r8),%r11d addl 12(%r8),%r12d addl 16(%r8),%r13d movl %esi,0(%r8) movl %edi,4(%r8) movl %r11d,8(%r8) movl %r12d,12(%r8) movl %r13d,16(%r8) subq $1,%r10 leaq 64(%r9),%r9 jnz L$loop movq 64(%rsp),%rsi movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue: ret .globl _sha1_block_data_order_hw .private_extern _sha1_block_data_order_hw .p2align 5 _sha1_block_data_order_hw: _CET_ENDBR movdqu (%rdi),%xmm0 movd 16(%rdi),%xmm1 movdqa K_XX_XX+160(%rip),%xmm3 movdqu (%rsi),%xmm4 pshufd $27,%xmm0,%xmm0 movdqu 16(%rsi),%xmm5 pshufd $27,%xmm1,%xmm1 movdqu 32(%rsi),%xmm6 .byte 102,15,56,0,227 movdqu 48(%rsi),%xmm7 .byte 102,15,56,0,235 .byte 102,15,56,0,243 movdqa %xmm1,%xmm9 .byte 102,15,56,0,251 jmp L$oop_shaext .p2align 4 L$oop_shaext: decq %rdx leaq 64(%rsi),%r8 paddd %xmm4,%xmm1 cmovneq %r8,%rsi prefetcht0 512(%rsi) movdqa %xmm0,%xmm8 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,0 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,0 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,1 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,1 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,2 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,2 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 movdqu (%rsi),%xmm4 movdqa %xmm0,%xmm2 .byte 15,58,204,193,3 .byte 15,56,200,213 movdqu 16(%rsi),%xmm5 .byte 102,15,56,0,227 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 15,56,200,206 movdqu 32(%rsi),%xmm6 .byte 102,15,56,0,235 movdqa %xmm0,%xmm2 .byte 15,58,204,193,3 .byte 15,56,200,215 movdqu 48(%rsi),%xmm7 .byte 102,15,56,0,243 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 65,15,56,200,201 .byte 102,15,56,0,251 paddd %xmm8,%xmm0 movdqa %xmm1,%xmm9 jnz L$oop_shaext pshufd $27,%xmm0,%xmm0 pshufd $27,%xmm1,%xmm1 movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) ret .globl _sha1_block_data_order_ssse3 .private_extern _sha1_block_data_order_ssse3 .p2align 4 _sha1_block_data_order_ssse3: _CET_ENDBR movq %rsp,%r11 pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 leaq -64(%rsp),%rsp andq $-64,%rsp movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 shlq $6,%r10 addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax movl 4(%r8),%ebx movl 8(%r8),%ecx movl 12(%r8),%edx movl %ebx,%esi movl 16(%r8),%ebp movl %ecx,%edi xorl %edx,%edi andl %edi,%esi movdqa 64(%r14),%xmm6 movdqa -64(%r14),%xmm9 movdqu 0(%r9),%xmm0 movdqu 16(%r9),%xmm1 movdqu 32(%r9),%xmm2 movdqu 48(%r9),%xmm3 .byte 102,15,56,0,198 .byte 102,15,56,0,206 .byte 102,15,56,0,214 addq $64,%r9 paddd %xmm9,%xmm0 .byte 102,15,56,0,222 paddd %xmm9,%xmm1 paddd %xmm9,%xmm2 movdqa %xmm0,0(%rsp) psubd %xmm9,%xmm0 movdqa %xmm1,16(%rsp) psubd %xmm9,%xmm1 movdqa %xmm2,32(%rsp) psubd %xmm9,%xmm2 jmp L$oop_ssse3 .p2align 4 L$oop_ssse3: rorl $2,%ebx pshufd $238,%xmm0,%xmm4 xorl %edx,%esi movdqa %xmm3,%xmm8 paddd %xmm3,%xmm9 movl %eax,%edi addl 0(%rsp),%ebp punpcklqdq %xmm1,%xmm4 xorl %ecx,%ebx roll $5,%eax addl %esi,%ebp psrldq $4,%xmm8 andl %ebx,%edi xorl %ecx,%ebx pxor %xmm0,%xmm4 addl %eax,%ebp rorl $7,%eax pxor %xmm2,%xmm8 xorl %ecx,%edi movl %ebp,%esi addl 4(%rsp),%edx pxor %xmm8,%xmm4 xorl %ebx,%eax roll $5,%ebp movdqa %xmm9,48(%rsp) addl %edi,%edx andl %eax,%esi movdqa %xmm4,%xmm10 xorl %ebx,%eax addl %ebp,%edx rorl $7,%ebp movdqa %xmm4,%xmm8 xorl %ebx,%esi pslldq $12,%xmm10 paddd %xmm4,%xmm4 movl %edx,%edi addl 8(%rsp),%ecx psrld $31,%xmm8 xorl %eax,%ebp roll $5,%edx addl %esi,%ecx movdqa %xmm10,%xmm9 andl %ebp,%edi xorl %eax,%ebp psrld $30,%xmm10 addl %edx,%ecx rorl $7,%edx por %xmm8,%xmm4 xorl %eax,%edi movl %ecx,%esi addl 12(%rsp),%ebx pslld $2,%xmm9 pxor %xmm10,%xmm4 xorl %ebp,%edx movdqa -64(%r14),%xmm10 roll $5,%ecx addl %edi,%ebx andl %edx,%esi pxor %xmm9,%xmm4 xorl %ebp,%edx addl %ecx,%ebx rorl $7,%ecx pshufd $238,%xmm1,%xmm5 xorl %ebp,%esi movdqa %xmm4,%xmm9 paddd %xmm4,%xmm10 movl %ebx,%edi addl 16(%rsp),%eax punpcklqdq %xmm2,%xmm5 xorl %edx,%ecx roll $5,%ebx addl %esi,%eax psrldq $4,%xmm9 andl %ecx,%edi xorl %edx,%ecx pxor %xmm1,%xmm5 addl %ebx,%eax rorl $7,%ebx pxor %xmm3,%xmm9 xorl %edx,%edi movl %eax,%esi addl 20(%rsp),%ebp pxor %xmm9,%xmm5 xorl %ecx,%ebx roll $5,%eax movdqa %xmm10,0(%rsp) addl %edi,%ebp andl %ebx,%esi movdqa %xmm5,%xmm8 xorl %ecx,%ebx addl %eax,%ebp rorl $7,%eax movdqa %xmm5,%xmm9 xorl %ecx,%esi pslldq $12,%xmm8 paddd %xmm5,%xmm5 movl %ebp,%edi addl 24(%rsp),%edx psrld $31,%xmm9 xorl %ebx,%eax roll $5,%ebp addl %esi,%edx movdqa %xmm8,%xmm10 andl %eax,%edi xorl %ebx,%eax psrld $30,%xmm8 addl %ebp,%edx rorl $7,%ebp por %xmm9,%xmm5 xorl %ebx,%edi movl %edx,%esi addl 28(%rsp),%ecx pslld $2,%xmm10 pxor %xmm8,%xmm5 xorl %eax,%ebp movdqa -32(%r14),%xmm8 roll $5,%edx addl %edi,%ecx andl %ebp,%esi pxor %xmm10,%xmm5 xorl %eax,%ebp addl %edx,%ecx rorl $7,%edx pshufd $238,%xmm2,%xmm6 xorl %eax,%esi movdqa %xmm5,%xmm10 paddd %xmm5,%xmm8 movl %ecx,%edi addl 32(%rsp),%ebx punpcklqdq %xmm3,%xmm6 xorl %ebp,%edx roll $5,%ecx addl %esi,%ebx psrldq $4,%xmm10 andl %edx,%edi xorl %ebp,%edx pxor %xmm2,%xmm6 addl %ecx,%ebx rorl $7,%ecx pxor %xmm4,%xmm10 xorl %ebp,%edi movl %ebx,%esi addl 36(%rsp),%eax pxor %xmm10,%xmm6 xorl %edx,%ecx roll $5,%ebx movdqa %xmm8,16(%rsp) addl %edi,%eax andl %ecx,%esi movdqa %xmm6,%xmm9 xorl %edx,%ecx addl %ebx,%eax rorl $7,%ebx movdqa %xmm6,%xmm10 xorl %edx,%esi pslldq $12,%xmm9 paddd %xmm6,%xmm6 movl %eax,%edi addl 40(%rsp),%ebp psrld $31,%xmm10 xorl %ecx,%ebx roll $5,%eax addl %esi,%ebp movdqa %xmm9,%xmm8 andl %ebx,%edi xorl %ecx,%ebx psrld $30,%xmm9 addl %eax,%ebp rorl $7,%eax por %xmm10,%xmm6 xorl %ecx,%edi movl %ebp,%esi addl 44(%rsp),%edx pslld $2,%xmm8 pxor %xmm9,%xmm6 xorl %ebx,%eax movdqa -32(%r14),%xmm9 roll $5,%ebp addl %edi,%edx andl %eax,%esi pxor %xmm8,%xmm6 xorl %ebx,%eax addl %ebp,%edx rorl $7,%ebp pshufd $238,%xmm3,%xmm7 xorl %ebx,%esi movdqa %xmm6,%xmm8 paddd %xmm6,%xmm9 movl %edx,%edi addl 48(%rsp),%ecx punpcklqdq %xmm4,%xmm7 xorl %eax,%ebp roll $5,%edx addl %esi,%ecx psrldq $4,%xmm8 andl %ebp,%edi xorl %eax,%ebp pxor %xmm3,%xmm7 addl %edx,%ecx rorl $7,%edx pxor %xmm5,%xmm8 xorl %eax,%edi movl %ecx,%esi addl 52(%rsp),%ebx pxor %xmm8,%xmm7 xorl %ebp,%edx roll $5,%ecx movdqa %xmm9,32(%rsp) addl %edi,%ebx andl %edx,%esi movdqa %xmm7,%xmm10 xorl %ebp,%edx addl %ecx,%ebx rorl $7,%ecx movdqa %xmm7,%xmm8 xorl %ebp,%esi pslldq $12,%xmm10 paddd %xmm7,%xmm7 movl %ebx,%edi addl 56(%rsp),%eax psrld $31,%xmm8 xorl %edx,%ecx roll $5,%ebx addl %esi,%eax movdqa %xmm10,%xmm9 andl %ecx,%edi xorl %edx,%ecx psrld $30,%xmm10 addl %ebx,%eax rorl $7,%ebx por %xmm8,%xmm7 xorl %edx,%edi movl %eax,%esi addl 60(%rsp),%ebp pslld $2,%xmm9 pxor %xmm10,%xmm7 xorl %ecx,%ebx movdqa -32(%r14),%xmm10 roll $5,%eax addl %edi,%ebp andl %ebx,%esi pxor %xmm9,%xmm7 pshufd $238,%xmm6,%xmm9 xorl %ecx,%ebx addl %eax,%ebp rorl $7,%eax pxor %xmm4,%xmm0 xorl %ecx,%esi movl %ebp,%edi addl 0(%rsp),%edx punpcklqdq %xmm7,%xmm9 xorl %ebx,%eax roll $5,%ebp pxor %xmm1,%xmm0 addl %esi,%edx andl %eax,%edi movdqa %xmm10,%xmm8 xorl %ebx,%eax paddd %xmm7,%xmm10 addl %ebp,%edx pxor %xmm9,%xmm0 rorl $7,%ebp xorl %ebx,%edi movl %edx,%esi addl 4(%rsp),%ecx movdqa %xmm0,%xmm9 xorl %eax,%ebp roll $5,%edx movdqa %xmm10,48(%rsp) addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp pslld $2,%xmm0 addl %edx,%ecx rorl $7,%edx psrld $30,%xmm9 xorl %eax,%esi movl %ecx,%edi addl 8(%rsp),%ebx por %xmm9,%xmm0 xorl %ebp,%edx roll $5,%ecx pshufd $238,%xmm7,%xmm10 addl %esi,%ebx andl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 12(%rsp),%eax xorl %ebp,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax pxor %xmm5,%xmm1 addl 16(%rsp),%ebp xorl %ecx,%esi punpcklqdq %xmm0,%xmm10 movl %eax,%edi roll $5,%eax pxor %xmm2,%xmm1 addl %esi,%ebp xorl %ecx,%edi movdqa %xmm8,%xmm9 rorl $7,%ebx paddd %xmm0,%xmm8 addl %eax,%ebp pxor %xmm10,%xmm1 addl 20(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp movdqa %xmm1,%xmm10 addl %edi,%edx xorl %ebx,%esi movdqa %xmm8,0(%rsp) rorl $7,%eax addl %ebp,%edx addl 24(%rsp),%ecx pslld $2,%xmm1 xorl %eax,%esi movl %edx,%edi psrld $30,%xmm10 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp por %xmm10,%xmm1 addl %edx,%ecx addl 28(%rsp),%ebx pshufd $238,%xmm0,%xmm8 xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx pxor %xmm6,%xmm2 addl 32(%rsp),%eax xorl %edx,%esi punpcklqdq %xmm1,%xmm8 movl %ebx,%edi roll $5,%ebx pxor %xmm3,%xmm2 addl %esi,%eax xorl %edx,%edi movdqa 0(%r14),%xmm10 rorl $7,%ecx paddd %xmm1,%xmm9 addl %ebx,%eax pxor %xmm8,%xmm2 addl 36(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax movdqa %xmm2,%xmm8 addl %edi,%ebp xorl %ecx,%esi movdqa %xmm9,16(%rsp) rorl $7,%ebx addl %eax,%ebp addl 40(%rsp),%edx pslld $2,%xmm2 xorl %ebx,%esi movl %ebp,%edi psrld $30,%xmm8 roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax por %xmm8,%xmm2 addl %ebp,%edx addl 44(%rsp),%ecx pshufd $238,%xmm1,%xmm9 xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx pxor %xmm7,%xmm3 addl 48(%rsp),%ebx xorl %ebp,%esi punpcklqdq %xmm2,%xmm9 movl %ecx,%edi roll $5,%ecx pxor %xmm4,%xmm3 addl %esi,%ebx xorl %ebp,%edi movdqa %xmm10,%xmm8 rorl $7,%edx paddd %xmm2,%xmm10 addl %ecx,%ebx pxor %xmm9,%xmm3 addl 52(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx movdqa %xmm3,%xmm9 addl %edi,%eax xorl %edx,%esi movdqa %xmm10,32(%rsp) rorl $7,%ecx addl %ebx,%eax addl 56(%rsp),%ebp pslld $2,%xmm3 xorl %ecx,%esi movl %eax,%edi psrld $30,%xmm9 roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx por %xmm9,%xmm3 addl %eax,%ebp addl 60(%rsp),%edx pshufd $238,%xmm2,%xmm10 xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx pxor %xmm0,%xmm4 addl 0(%rsp),%ecx xorl %eax,%esi punpcklqdq %xmm3,%xmm10 movl %edx,%edi roll $5,%edx pxor %xmm5,%xmm4 addl %esi,%ecx xorl %eax,%edi movdqa %xmm8,%xmm9 rorl $7,%ebp paddd %xmm3,%xmm8 addl %edx,%ecx pxor %xmm10,%xmm4 addl 4(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx movdqa %xmm4,%xmm10 addl %edi,%ebx xorl %ebp,%esi movdqa %xmm8,48(%rsp) rorl $7,%edx addl %ecx,%ebx addl 8(%rsp),%eax pslld $2,%xmm4 xorl %edx,%esi movl %ebx,%edi psrld $30,%xmm10 roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx por %xmm10,%xmm4 addl %ebx,%eax addl 12(%rsp),%ebp pshufd $238,%xmm3,%xmm8 xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp pxor %xmm1,%xmm5 addl 16(%rsp),%edx xorl %ebx,%esi punpcklqdq %xmm4,%xmm8 movl %ebp,%edi roll $5,%ebp pxor %xmm6,%xmm5 addl %esi,%edx xorl %ebx,%edi movdqa %xmm9,%xmm10 rorl $7,%eax paddd %xmm4,%xmm9 addl %ebp,%edx pxor %xmm8,%xmm5 addl 20(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx movdqa %xmm5,%xmm8 addl %edi,%ecx xorl %eax,%esi movdqa %xmm9,0(%rsp) rorl $7,%ebp addl %edx,%ecx addl 24(%rsp),%ebx pslld $2,%xmm5 xorl %ebp,%esi movl %ecx,%edi psrld $30,%xmm8 roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx por %xmm8,%xmm5 addl %ecx,%ebx addl 28(%rsp),%eax pshufd $238,%xmm4,%xmm9 rorl $7,%ecx movl %ebx,%esi xorl %edx,%edi roll $5,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax pxor %xmm2,%xmm6 addl 32(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx punpcklqdq %xmm5,%xmm9 movl %eax,%edi xorl %ecx,%esi pxor %xmm7,%xmm6 roll $5,%eax addl %esi,%ebp movdqa %xmm10,%xmm8 xorl %ebx,%edi paddd %xmm5,%xmm10 xorl %ecx,%ebx pxor %xmm9,%xmm6 addl %eax,%ebp addl 36(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx rorl $7,%eax movdqa %xmm6,%xmm9 movl %ebp,%esi xorl %ebx,%edi movdqa %xmm10,16(%rsp) roll $5,%ebp addl %edi,%edx xorl %eax,%esi pslld $2,%xmm6 xorl %ebx,%eax addl %ebp,%edx psrld $30,%xmm9 addl 40(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax por %xmm9,%xmm6 rorl $7,%ebp movl %edx,%edi xorl %eax,%esi roll $5,%edx pshufd $238,%xmm5,%xmm10 addl %esi,%ecx xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 44(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp rorl $7,%edx movl %ecx,%esi xorl %ebp,%edi roll $5,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx pxor %xmm3,%xmm7 addl 48(%rsp),%eax andl %edx,%esi xorl %ebp,%edx rorl $7,%ecx punpcklqdq %xmm6,%xmm10 movl %ebx,%edi xorl %edx,%esi pxor %xmm0,%xmm7 roll $5,%ebx addl %esi,%eax movdqa 32(%r14),%xmm9 xorl %ecx,%edi paddd %xmm6,%xmm8 xorl %edx,%ecx pxor %xmm10,%xmm7 addl %ebx,%eax addl 52(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx rorl $7,%ebx movdqa %xmm7,%xmm10 movl %eax,%esi xorl %ecx,%edi movdqa %xmm8,32(%rsp) roll $5,%eax addl %edi,%ebp xorl %ebx,%esi pslld $2,%xmm7 xorl %ecx,%ebx addl %eax,%ebp psrld $30,%xmm10 addl 56(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx por %xmm10,%xmm7 rorl $7,%eax movl %ebp,%edi xorl %ebx,%esi roll $5,%ebp pshufd $238,%xmm6,%xmm8 addl %esi,%edx xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 60(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax rorl $7,%ebp movl %edx,%esi xorl %eax,%edi roll $5,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx pxor %xmm4,%xmm0 addl 0(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp rorl $7,%edx punpcklqdq %xmm7,%xmm8 movl %ecx,%edi xorl %ebp,%esi pxor %xmm1,%xmm0 roll $5,%ecx addl %esi,%ebx movdqa %xmm9,%xmm10 xorl %edx,%edi paddd %xmm7,%xmm9 xorl %ebp,%edx pxor %xmm8,%xmm0 addl %ecx,%ebx addl 4(%rsp),%eax andl %edx,%edi xorl %ebp,%edx rorl $7,%ecx movdqa %xmm0,%xmm8 movl %ebx,%esi xorl %edx,%edi movdqa %xmm9,48(%rsp) roll $5,%ebx addl %edi,%eax xorl %ecx,%esi pslld $2,%xmm0 xorl %edx,%ecx addl %ebx,%eax psrld $30,%xmm8 addl 8(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx por %xmm8,%xmm0 rorl $7,%ebx movl %eax,%edi xorl %ecx,%esi roll $5,%eax pshufd $238,%xmm7,%xmm9 addl %esi,%ebp xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 12(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx rorl $7,%eax movl %ebp,%esi xorl %ebx,%edi roll $5,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx pxor %xmm5,%xmm1 addl 16(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax rorl $7,%ebp punpcklqdq %xmm0,%xmm9 movl %edx,%edi xorl %eax,%esi pxor %xmm2,%xmm1 roll $5,%edx addl %esi,%ecx movdqa %xmm10,%xmm8 xorl %ebp,%edi paddd %xmm0,%xmm10 xorl %eax,%ebp pxor %xmm9,%xmm1 addl %edx,%ecx addl 20(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp rorl $7,%edx movdqa %xmm1,%xmm9 movl %ecx,%esi xorl %ebp,%edi movdqa %xmm10,0(%rsp) roll $5,%ecx addl %edi,%ebx xorl %edx,%esi pslld $2,%xmm1 xorl %ebp,%edx addl %ecx,%ebx psrld $30,%xmm9 addl 24(%rsp),%eax andl %edx,%esi xorl %ebp,%edx por %xmm9,%xmm1 rorl $7,%ecx movl %ebx,%edi xorl %edx,%esi roll $5,%ebx pshufd $238,%xmm0,%xmm10 addl %esi,%eax xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 28(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%edi roll $5,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp pxor %xmm6,%xmm2 addl 32(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax punpcklqdq %xmm1,%xmm10 movl %ebp,%edi xorl %ebx,%esi pxor %xmm3,%xmm2 roll $5,%ebp addl %esi,%edx movdqa %xmm8,%xmm9 xorl %eax,%edi paddd %xmm1,%xmm8 xorl %ebx,%eax pxor %xmm10,%xmm2 addl %ebp,%edx addl 36(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax rorl $7,%ebp movdqa %xmm2,%xmm10 movl %edx,%esi xorl %eax,%edi movdqa %xmm8,16(%rsp) roll $5,%edx addl %edi,%ecx xorl %ebp,%esi pslld $2,%xmm2 xorl %eax,%ebp addl %edx,%ecx psrld $30,%xmm10 addl 40(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp por %xmm10,%xmm2 rorl $7,%edx movl %ecx,%edi xorl %ebp,%esi roll $5,%ecx pshufd $238,%xmm1,%xmm8 addl %esi,%ebx xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 44(%rsp),%eax andl %edx,%edi xorl %ebp,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%edi roll $5,%ebx addl %edi,%eax xorl %edx,%esi addl %ebx,%eax pxor %xmm7,%xmm3 addl 48(%rsp),%ebp xorl %ecx,%esi punpcklqdq %xmm2,%xmm8 movl %eax,%edi roll $5,%eax pxor %xmm4,%xmm3 addl %esi,%ebp xorl %ecx,%edi movdqa %xmm9,%xmm10 rorl $7,%ebx paddd %xmm2,%xmm9 addl %eax,%ebp pxor %xmm8,%xmm3 addl 52(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp movdqa %xmm3,%xmm8 addl %edi,%edx xorl %ebx,%esi movdqa %xmm9,32(%rsp) rorl $7,%eax addl %ebp,%edx addl 56(%rsp),%ecx pslld $2,%xmm3 xorl %eax,%esi movl %edx,%edi psrld $30,%xmm8 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp por %xmm8,%xmm3 addl %edx,%ecx addl 60(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx addl 0(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx paddd %xmm3,%xmm10 addl %esi,%eax xorl %edx,%edi movdqa %xmm10,48(%rsp) rorl $7,%ecx addl %ebx,%eax addl 4(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 8(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax addl %ebp,%edx addl 12(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx cmpq %r10,%r9 je L$done_ssse3 movdqa 64(%r14),%xmm6 movdqa -64(%r14),%xmm9 movdqu 0(%r9),%xmm0 movdqu 16(%r9),%xmm1 movdqu 32(%r9),%xmm2 movdqu 48(%r9),%xmm3 .byte 102,15,56,0,198 addq $64,%r9 addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi .byte 102,15,56,0,206 roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx paddd %xmm9,%xmm0 addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi movdqa %xmm0,0(%rsp) roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx psubd %xmm9,%xmm0 addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi .byte 102,15,56,0,214 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp paddd %xmm9,%xmm1 addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi movdqa %xmm1,16(%rsp) roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx psubd %xmm9,%xmm1 addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi .byte 102,15,56,0,222 roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax paddd %xmm9,%xmm2 addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi movdqa %xmm2,32(%rsp) roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp psubd %xmm9,%xmm2 addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax rorl $7,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx addl 12(%r8),%edx movl %eax,0(%r8) addl 16(%r8),%ebp movl %esi,4(%r8) movl %esi,%ebx movl %ecx,8(%r8) movl %ecx,%edi movl %edx,12(%r8) xorl %edx,%edi movl %ebp,16(%r8) andl %edi,%esi jmp L$oop_ssse3 .p2align 4 L$done_ssse3: addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax rorl $7,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx movl %eax,0(%r8) addl 12(%r8),%edx movl %esi,4(%r8) addl 16(%r8),%ebp movl %ecx,8(%r8) movl %edx,12(%r8) movl %ebp,16(%r8) movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp L$epilogue_ssse3: ret .globl _sha1_block_data_order_avx .private_extern _sha1_block_data_order_avx .p2align 4 _sha1_block_data_order_avx: _CET_ENDBR movq %rsp,%r11 pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 leaq -64(%rsp),%rsp vzeroupper andq $-64,%rsp movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 shlq $6,%r10 addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax movl 4(%r8),%ebx movl 8(%r8),%ecx movl 12(%r8),%edx movl %ebx,%esi movl 16(%r8),%ebp movl %ecx,%edi xorl %edx,%edi andl %edi,%esi vmovdqa 64(%r14),%xmm6 vmovdqa -64(%r14),%xmm11 vmovdqu 0(%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 addq $64,%r9 vpshufb %xmm6,%xmm1,%xmm1 vpshufb %xmm6,%xmm2,%xmm2 vpshufb %xmm6,%xmm3,%xmm3 vpaddd %xmm11,%xmm0,%xmm4 vpaddd %xmm11,%xmm1,%xmm5 vpaddd %xmm11,%xmm2,%xmm6 vmovdqa %xmm4,0(%rsp) vmovdqa %xmm5,16(%rsp) vmovdqa %xmm6,32(%rsp) jmp L$oop_avx .p2align 4 L$oop_avx: shrdl $2,%ebx,%ebx xorl %edx,%esi vpalignr $8,%xmm0,%xmm1,%xmm4 movl %eax,%edi addl 0(%rsp),%ebp vpaddd %xmm3,%xmm11,%xmm9 xorl %ecx,%ebx shldl $5,%eax,%eax vpsrldq $4,%xmm3,%xmm8 addl %esi,%ebp andl %ebx,%edi vpxor %xmm0,%xmm4,%xmm4 xorl %ecx,%ebx addl %eax,%ebp vpxor %xmm2,%xmm8,%xmm8 shrdl $7,%eax,%eax xorl %ecx,%edi movl %ebp,%esi addl 4(%rsp),%edx vpxor %xmm8,%xmm4,%xmm4 xorl %ebx,%eax shldl $5,%ebp,%ebp vmovdqa %xmm9,48(%rsp) addl %edi,%edx andl %eax,%esi vpsrld $31,%xmm4,%xmm8 xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%esi vpslldq $12,%xmm4,%xmm10 vpaddd %xmm4,%xmm4,%xmm4 movl %edx,%edi addl 8(%rsp),%ecx xorl %eax,%ebp shldl $5,%edx,%edx vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm4,%xmm4 addl %esi,%ecx andl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm4,%xmm4 shrdl $7,%edx,%edx xorl %eax,%edi movl %ecx,%esi addl 12(%rsp),%ebx vpxor %xmm10,%xmm4,%xmm4 xorl %ebp,%edx shldl $5,%ecx,%ecx addl %edi,%ebx andl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %ebp,%esi vpalignr $8,%xmm1,%xmm2,%xmm5 movl %ebx,%edi addl 16(%rsp),%eax vpaddd %xmm4,%xmm11,%xmm9 xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrldq $4,%xmm4,%xmm8 addl %esi,%eax andl %ecx,%edi vpxor %xmm1,%xmm5,%xmm5 xorl %edx,%ecx addl %ebx,%eax vpxor %xmm3,%xmm8,%xmm8 shrdl $7,%ebx,%ebx xorl %edx,%edi movl %eax,%esi addl 20(%rsp),%ebp vpxor %xmm8,%xmm5,%xmm5 xorl %ecx,%ebx shldl $5,%eax,%eax vmovdqa %xmm9,0(%rsp) addl %edi,%ebp andl %ebx,%esi vpsrld $31,%xmm5,%xmm8 xorl %ecx,%ebx addl %eax,%ebp shrdl $7,%eax,%eax xorl %ecx,%esi vpslldq $12,%xmm5,%xmm10 vpaddd %xmm5,%xmm5,%xmm5 movl %ebp,%edi addl 24(%rsp),%edx xorl %ebx,%eax shldl $5,%ebp,%ebp vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm5,%xmm5 addl %esi,%edx andl %eax,%edi xorl %ebx,%eax addl %ebp,%edx vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm5,%xmm5 shrdl $7,%ebp,%ebp xorl %ebx,%edi movl %edx,%esi addl 28(%rsp),%ecx vpxor %xmm10,%xmm5,%xmm5 xorl %eax,%ebp shldl $5,%edx,%edx vmovdqa -32(%r14),%xmm11 addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi vpalignr $8,%xmm2,%xmm3,%xmm6 movl %ecx,%edi addl 32(%rsp),%ebx vpaddd %xmm5,%xmm11,%xmm9 xorl %ebp,%edx shldl $5,%ecx,%ecx vpsrldq $4,%xmm5,%xmm8 addl %esi,%ebx andl %edx,%edi vpxor %xmm2,%xmm6,%xmm6 xorl %ebp,%edx addl %ecx,%ebx vpxor %xmm4,%xmm8,%xmm8 shrdl $7,%ecx,%ecx xorl %ebp,%edi movl %ebx,%esi addl 36(%rsp),%eax vpxor %xmm8,%xmm6,%xmm6 xorl %edx,%ecx shldl $5,%ebx,%ebx vmovdqa %xmm9,16(%rsp) addl %edi,%eax andl %ecx,%esi vpsrld $31,%xmm6,%xmm8 xorl %edx,%ecx addl %ebx,%eax shrdl $7,%ebx,%ebx xorl %edx,%esi vpslldq $12,%xmm6,%xmm10 vpaddd %xmm6,%xmm6,%xmm6 movl %eax,%edi addl 40(%rsp),%ebp xorl %ecx,%ebx shldl $5,%eax,%eax vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm6,%xmm6 addl %esi,%ebp andl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm6,%xmm6 shrdl $7,%eax,%eax xorl %ecx,%edi movl %ebp,%esi addl 44(%rsp),%edx vpxor %xmm10,%xmm6,%xmm6 xorl %ebx,%eax shldl $5,%ebp,%ebp addl %edi,%edx andl %eax,%esi xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%esi vpalignr $8,%xmm3,%xmm4,%xmm7 movl %edx,%edi addl 48(%rsp),%ecx vpaddd %xmm6,%xmm11,%xmm9 xorl %eax,%ebp shldl $5,%edx,%edx vpsrldq $4,%xmm6,%xmm8 addl %esi,%ecx andl %ebp,%edi vpxor %xmm3,%xmm7,%xmm7 xorl %eax,%ebp addl %edx,%ecx vpxor %xmm5,%xmm8,%xmm8 shrdl $7,%edx,%edx xorl %eax,%edi movl %ecx,%esi addl 52(%rsp),%ebx vpxor %xmm8,%xmm7,%xmm7 xorl %ebp,%edx shldl $5,%ecx,%ecx vmovdqa %xmm9,32(%rsp) addl %edi,%ebx andl %edx,%esi vpsrld $31,%xmm7,%xmm8 xorl %ebp,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %ebp,%esi vpslldq $12,%xmm7,%xmm10 vpaddd %xmm7,%xmm7,%xmm7 movl %ebx,%edi addl 56(%rsp),%eax xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm7,%xmm7 addl %esi,%eax andl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm7,%xmm7 shrdl $7,%ebx,%ebx xorl %edx,%edi movl %eax,%esi addl 60(%rsp),%ebp vpxor %xmm10,%xmm7,%xmm7 xorl %ecx,%ebx shldl $5,%eax,%eax addl %edi,%ebp andl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp vpalignr $8,%xmm6,%xmm7,%xmm8 vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax xorl %ecx,%esi movl %ebp,%edi addl 0(%rsp),%edx vpxor %xmm1,%xmm0,%xmm0 xorl %ebx,%eax shldl $5,%ebp,%ebp vpaddd %xmm7,%xmm11,%xmm9 addl %esi,%edx andl %eax,%edi vpxor %xmm8,%xmm0,%xmm0 xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%edi vpsrld $30,%xmm0,%xmm8 vmovdqa %xmm9,48(%rsp) movl %edx,%esi addl 4(%rsp),%ecx xorl %eax,%ebp shldl $5,%edx,%edx vpslld $2,%xmm0,%xmm0 addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi movl %ecx,%edi addl 8(%rsp),%ebx vpor %xmm8,%xmm0,%xmm0 xorl %ebp,%edx shldl $5,%ecx,%ecx addl %esi,%ebx andl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 12(%rsp),%eax xorl %ebp,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpalignr $8,%xmm7,%xmm0,%xmm8 vpxor %xmm5,%xmm1,%xmm1 addl 16(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax vpxor %xmm2,%xmm1,%xmm1 addl %esi,%ebp xorl %ecx,%edi vpaddd %xmm0,%xmm11,%xmm9 shrdl $7,%ebx,%ebx addl %eax,%ebp vpxor %xmm8,%xmm1,%xmm1 addl 20(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp vpsrld $30,%xmm1,%xmm8 vmovdqa %xmm9,0(%rsp) addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpslld $2,%xmm1,%xmm1 addl 24(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vpor %xmm8,%xmm1,%xmm1 addl 28(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpalignr $8,%xmm0,%xmm1,%xmm8 vpxor %xmm6,%xmm2,%xmm2 addl 32(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx vpxor %xmm3,%xmm2,%xmm2 addl %esi,%eax xorl %edx,%edi vpaddd %xmm1,%xmm11,%xmm9 vmovdqa 0(%r14),%xmm11 shrdl $7,%ecx,%ecx addl %ebx,%eax vpxor %xmm8,%xmm2,%xmm2 addl 36(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm8 vmovdqa %xmm9,16(%rsp) addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp vpslld $2,%xmm2,%xmm2 addl 40(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx vpor %xmm8,%xmm2,%xmm2 addl 44(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx vpalignr $8,%xmm1,%xmm2,%xmm8 vpxor %xmm7,%xmm3,%xmm3 addl 48(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx vpxor %xmm4,%xmm3,%xmm3 addl %esi,%ebx xorl %ebp,%edi vpaddd %xmm2,%xmm11,%xmm9 shrdl $7,%edx,%edx addl %ecx,%ebx vpxor %xmm8,%xmm3,%xmm3 addl 52(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm8 vmovdqa %xmm9,32(%rsp) addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 addl 56(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp vpor %xmm8,%xmm3,%xmm3 addl 60(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpalignr $8,%xmm2,%xmm3,%xmm8 vpxor %xmm0,%xmm4,%xmm4 addl 0(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx vpxor %xmm5,%xmm4,%xmm4 addl %esi,%ecx xorl %eax,%edi vpaddd %xmm3,%xmm11,%xmm9 shrdl $7,%ebp,%ebp addl %edx,%ecx vpxor %xmm8,%xmm4,%xmm4 addl 4(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx vpsrld $30,%xmm4,%xmm8 vmovdqa %xmm9,48(%rsp) addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpslld $2,%xmm4,%xmm4 addl 8(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax vpor %xmm8,%xmm4,%xmm4 addl 12(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp vpalignr $8,%xmm3,%xmm4,%xmm8 vpxor %xmm1,%xmm5,%xmm5 addl 16(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp vpxor %xmm6,%xmm5,%xmm5 addl %esi,%edx xorl %ebx,%edi vpaddd %xmm4,%xmm11,%xmm9 shrdl $7,%eax,%eax addl %ebp,%edx vpxor %xmm8,%xmm5,%xmm5 addl 20(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx vpsrld $30,%xmm5,%xmm8 vmovdqa %xmm9,0(%rsp) addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx vpslld $2,%xmm5,%xmm5 addl 24(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx vpor %xmm8,%xmm5,%xmm5 addl 28(%rsp),%eax shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax vpalignr $8,%xmm4,%xmm5,%xmm8 vpxor %xmm2,%xmm6,%xmm6 addl 32(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx shrdl $7,%ebx,%ebx vpxor %xmm7,%xmm6,%xmm6 movl %eax,%edi xorl %ecx,%esi vpaddd %xmm5,%xmm11,%xmm9 shldl $5,%eax,%eax addl %esi,%ebp vpxor %xmm8,%xmm6,%xmm6 xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 36(%rsp),%edx vpsrld $30,%xmm6,%xmm8 vmovdqa %xmm9,16(%rsp) andl %ebx,%edi xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%esi vpslld $2,%xmm6,%xmm6 xorl %ebx,%edi shldl $5,%ebp,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx addl 40(%rsp),%ecx andl %eax,%esi vpor %xmm8,%xmm6,%xmm6 xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%edi xorl %eax,%esi shldl $5,%edx,%edx addl %esi,%ecx xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 44(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%esi xorl %ebp,%edi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx vpalignr $8,%xmm5,%xmm6,%xmm8 vpxor %xmm3,%xmm7,%xmm7 addl 48(%rsp),%eax andl %edx,%esi xorl %ebp,%edx shrdl $7,%ecx,%ecx vpxor %xmm0,%xmm7,%xmm7 movl %ebx,%edi xorl %edx,%esi vpaddd %xmm6,%xmm11,%xmm9 vmovdqa 32(%r14),%xmm11 shldl $5,%ebx,%ebx addl %esi,%eax vpxor %xmm8,%xmm7,%xmm7 xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 52(%rsp),%ebp vpsrld $30,%xmm7,%xmm8 vmovdqa %xmm9,32(%rsp) andl %ecx,%edi xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi vpslld $2,%xmm7,%xmm7 xorl %ecx,%edi shldl $5,%eax,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp addl 56(%rsp),%edx andl %ebx,%esi vpor %xmm8,%xmm7,%xmm7 xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%edi xorl %ebx,%esi shldl $5,%ebp,%ebp addl %esi,%edx xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 60(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%esi xorl %eax,%edi shldl $5,%edx,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx vpalignr $8,%xmm6,%xmm7,%xmm8 vpxor %xmm4,%xmm0,%xmm0 addl 0(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp shrdl $7,%edx,%edx vpxor %xmm1,%xmm0,%xmm0 movl %ecx,%edi xorl %ebp,%esi vpaddd %xmm7,%xmm11,%xmm9 shldl $5,%ecx,%ecx addl %esi,%ebx vpxor %xmm8,%xmm0,%xmm0 xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 4(%rsp),%eax vpsrld $30,%xmm0,%xmm8 vmovdqa %xmm9,48(%rsp) andl %edx,%edi xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi vpslld $2,%xmm0,%xmm0 xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 8(%rsp),%ebp andl %ecx,%esi vpor %xmm8,%xmm0,%xmm0 xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%edi xorl %ecx,%esi shldl $5,%eax,%eax addl %esi,%ebp xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 12(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%esi xorl %ebx,%edi shldl $5,%ebp,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx vpalignr $8,%xmm7,%xmm0,%xmm8 vpxor %xmm5,%xmm1,%xmm1 addl 16(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax shrdl $7,%ebp,%ebp vpxor %xmm2,%xmm1,%xmm1 movl %edx,%edi xorl %eax,%esi vpaddd %xmm0,%xmm11,%xmm9 shldl $5,%edx,%edx addl %esi,%ecx vpxor %xmm8,%xmm1,%xmm1 xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 20(%rsp),%ebx vpsrld $30,%xmm1,%xmm8 vmovdqa %xmm9,0(%rsp) andl %ebp,%edi xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%esi vpslld $2,%xmm1,%xmm1 xorl %ebp,%edi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx addl 24(%rsp),%eax andl %edx,%esi vpor %xmm8,%xmm1,%xmm1 xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%edi xorl %edx,%esi shldl $5,%ebx,%ebx addl %esi,%eax xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 28(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi xorl %ecx,%edi shldl $5,%eax,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp vpalignr $8,%xmm0,%xmm1,%xmm8 vpxor %xmm6,%xmm2,%xmm2 addl 32(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx shrdl $7,%eax,%eax vpxor %xmm3,%xmm2,%xmm2 movl %ebp,%edi xorl %ebx,%esi vpaddd %xmm1,%xmm11,%xmm9 shldl $5,%ebp,%ebp addl %esi,%edx vpxor %xmm8,%xmm2,%xmm2 xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 36(%rsp),%ecx vpsrld $30,%xmm2,%xmm8 vmovdqa %xmm9,16(%rsp) andl %eax,%edi xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%esi vpslld $2,%xmm2,%xmm2 xorl %eax,%edi shldl $5,%edx,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx addl 40(%rsp),%ebx andl %ebp,%esi vpor %xmm8,%xmm2,%xmm2 xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%edi xorl %ebp,%esi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 44(%rsp),%eax andl %edx,%edi xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi addl %ebx,%eax vpalignr $8,%xmm1,%xmm2,%xmm8 vpxor %xmm7,%xmm3,%xmm3 addl 48(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax vpxor %xmm4,%xmm3,%xmm3 addl %esi,%ebp xorl %ecx,%edi vpaddd %xmm2,%xmm11,%xmm9 shrdl $7,%ebx,%ebx addl %eax,%ebp vpxor %xmm8,%xmm3,%xmm3 addl 52(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp vpsrld $30,%xmm3,%xmm8 vmovdqa %xmm9,32(%rsp) addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpslld $2,%xmm3,%xmm3 addl 56(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vpor %xmm8,%xmm3,%xmm3 addl 60(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 0(%rsp),%eax vpaddd %xmm3,%xmm11,%xmm9 xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax vmovdqa %xmm9,48(%rsp) xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 4(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 8(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx addl 12(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx cmpq %r10,%r9 je L$done_avx vmovdqa 64(%r14),%xmm6 vmovdqa -64(%r14),%xmm11 vmovdqu 0(%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 addq $64,%r9 addl 16(%rsp),%ebx xorl %ebp,%esi vpshufb %xmm6,%xmm1,%xmm1 movl %ecx,%edi shldl $5,%ecx,%ecx vpaddd %xmm11,%xmm0,%xmm4 addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx vmovdqa %xmm4,0(%rsp) addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi vpshufb %xmm6,%xmm2,%xmm2 movl %edx,%edi shldl $5,%edx,%edx vpaddd %xmm11,%xmm1,%xmm5 addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vmovdqa %xmm5,16(%rsp) addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi vpshufb %xmm6,%xmm3,%xmm3 movl %ebp,%edi shldl $5,%ebp,%ebp vpaddd %xmm11,%xmm2,%xmm6 addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx vmovdqa %xmm6,32(%rsp) addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx addl 12(%r8),%edx movl %eax,0(%r8) addl 16(%r8),%ebp movl %esi,4(%r8) movl %esi,%ebx movl %ecx,8(%r8) movl %ecx,%edi movl %edx,12(%r8) xorl %edx,%edi movl %ebp,16(%r8) andl %edi,%esi jmp L$oop_avx .p2align 4 L$done_avx: addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax vzeroupper addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx movl %eax,0(%r8) addl 12(%r8),%edx movl %esi,4(%r8) addl 16(%r8),%ebp movl %ecx,8(%r8) movl %edx,12(%r8) movl %ebp,16(%r8) movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp L$epilogue_avx: ret .globl _sha1_block_data_order_avx2 .private_extern _sha1_block_data_order_avx2 .p2align 4 _sha1_block_data_order_avx2: _CET_ENDBR movq %rsp,%r11 pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 vzeroupper movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 leaq -640(%rsp),%rsp shlq $6,%r10 leaq 64(%r9),%r13 andq $-128,%rsp addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax cmpq %r10,%r13 cmovaeq %r9,%r13 movl 4(%r8),%ebp movl 8(%r8),%ecx movl 12(%r8),%edx movl 16(%r8),%esi vmovdqu 64(%r14),%ymm6 vmovdqu (%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 leaq 64(%r9),%r9 vinserti128 $1,(%r13),%ymm0,%ymm0 vinserti128 $1,16(%r13),%ymm1,%ymm1 vpshufb %ymm6,%ymm0,%ymm0 vinserti128 $1,32(%r13),%ymm2,%ymm2 vpshufb %ymm6,%ymm1,%ymm1 vinserti128 $1,48(%r13),%ymm3,%ymm3 vpshufb %ymm6,%ymm2,%ymm2 vmovdqu -64(%r14),%ymm11 vpshufb %ymm6,%ymm3,%ymm3 vpaddd %ymm11,%ymm0,%ymm4 vpaddd %ymm11,%ymm1,%ymm5 vmovdqu %ymm4,0(%rsp) vpaddd %ymm11,%ymm2,%ymm6 vmovdqu %ymm5,32(%rsp) vpaddd %ymm11,%ymm3,%ymm7 vmovdqu %ymm6,64(%rsp) vmovdqu %ymm7,96(%rsp) vpalignr $8,%ymm0,%ymm1,%ymm4 vpsrldq $4,%ymm3,%ymm8 vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm2,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $31,%ymm4,%ymm8 vpslldq $12,%ymm4,%ymm10 vpaddd %ymm4,%ymm4,%ymm4 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm4,%ymm4 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm4,%ymm4 vpxor %ymm10,%ymm4,%ymm4 vpaddd %ymm11,%ymm4,%ymm9 vmovdqu %ymm9,128(%rsp) vpalignr $8,%ymm1,%ymm2,%ymm5 vpsrldq $4,%ymm4,%ymm8 vpxor %ymm1,%ymm5,%ymm5 vpxor %ymm3,%ymm8,%ymm8 vpxor %ymm8,%ymm5,%ymm5 vpsrld $31,%ymm5,%ymm8 vmovdqu -32(%r14),%ymm11 vpslldq $12,%ymm5,%ymm10 vpaddd %ymm5,%ymm5,%ymm5 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm5,%ymm5 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm10,%ymm5,%ymm5 vpaddd %ymm11,%ymm5,%ymm9 vmovdqu %ymm9,160(%rsp) vpalignr $8,%ymm2,%ymm3,%ymm6 vpsrldq $4,%ymm5,%ymm8 vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm4,%ymm8,%ymm8 vpxor %ymm8,%ymm6,%ymm6 vpsrld $31,%ymm6,%ymm8 vpslldq $12,%ymm6,%ymm10 vpaddd %ymm6,%ymm6,%ymm6 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm6,%ymm6 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm6,%ymm6 vpxor %ymm10,%ymm6,%ymm6 vpaddd %ymm11,%ymm6,%ymm9 vmovdqu %ymm9,192(%rsp) vpalignr $8,%ymm3,%ymm4,%ymm7 vpsrldq $4,%ymm6,%ymm8 vpxor %ymm3,%ymm7,%ymm7 vpxor %ymm5,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpsrld $31,%ymm7,%ymm8 vpslldq $12,%ymm7,%ymm10 vpaddd %ymm7,%ymm7,%ymm7 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm7,%ymm7 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm7,%ymm7 vpxor %ymm10,%ymm7,%ymm7 vpaddd %ymm11,%ymm7,%ymm9 vmovdqu %ymm9,224(%rsp) leaq 128(%rsp),%r13 jmp L$oop_avx2 .p2align 5 L$oop_avx2: rorxl $2,%ebp,%ebx andnl %edx,%ebp,%edi andl %ecx,%ebp xorl %edi,%ebp jmp L$align32_1 .p2align 5 L$align32_1: vpalignr $8,%ymm6,%ymm7,%ymm8 vpxor %ymm4,%ymm0,%ymm0 addl -128(%r13),%esi andnl %ecx,%eax,%edi vpxor %ymm1,%ymm0,%ymm0 addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpxor %ymm8,%ymm0,%ymm0 andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax vpsrld $30,%ymm0,%ymm8 vpslld $2,%ymm0,%ymm0 addl -124(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi vpor %ymm8,%ymm0,%ymm0 addl %r12d,%edx xorl %edi,%esi addl -120(%r13),%ecx andnl %ebp,%edx,%edi vpaddd %ymm11,%ymm0,%ymm9 addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx vmovdqu %ymm9,256(%rsp) addl %r12d,%ecx xorl %edi,%edx addl -116(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -96(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx vpalignr $8,%ymm7,%ymm0,%ymm8 vpxor %ymm5,%ymm1,%ymm1 addl -92(%r13),%eax andnl %edx,%ebp,%edi vpxor %ymm2,%ymm1,%ymm1 addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx vpxor %ymm8,%ymm1,%ymm1 andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp vpsrld $30,%ymm1,%ymm8 vpslld $2,%ymm1,%ymm1 addl -88(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax vpor %ymm8,%ymm1,%ymm1 addl %r12d,%esi xorl %edi,%eax addl -84(%r13),%edx andnl %ebx,%esi,%edi vpaddd %ymm11,%ymm1,%ymm9 addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi vmovdqu %ymm9,288(%rsp) addl %r12d,%edx xorl %edi,%esi addl -64(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -60(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx vpalignr $8,%ymm0,%ymm1,%ymm8 vpxor %ymm6,%ymm2,%ymm2 addl -56(%r13),%ebp andnl %esi,%ebx,%edi vpxor %ymm3,%ymm2,%ymm2 vmovdqu 0(%r14),%ymm11 addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpxor %ymm8,%ymm2,%ymm2 andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx vpsrld $30,%ymm2,%ymm8 vpslld $2,%ymm2,%ymm2 addl -52(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp vpor %ymm8,%ymm2,%ymm2 addl %r12d,%eax xorl %edi,%ebp addl -32(%r13),%esi andnl %ecx,%eax,%edi vpaddd %ymm11,%ymm2,%ymm9 addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax vmovdqu %ymm9,320(%rsp) addl %r12d,%esi xorl %edi,%eax addl -28(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -24(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx vpalignr $8,%ymm1,%ymm2,%ymm8 vpxor %ymm7,%ymm3,%ymm3 addl -20(%r13),%ebx andnl %eax,%ecx,%edi vpxor %ymm4,%ymm3,%ymm3 addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpxor %ymm8,%ymm3,%ymm3 andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx vpsrld $30,%ymm3,%ymm8 vpslld $2,%ymm3,%ymm3 addl 0(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx vpor %ymm8,%ymm3,%ymm3 addl %r12d,%ebp xorl %edi,%ebx addl 4(%r13),%eax andnl %edx,%ebp,%edi vpaddd %ymm11,%ymm3,%ymm9 addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp vmovdqu %ymm9,352(%rsp) addl %r12d,%eax xorl %edi,%ebp addl 8(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl 12(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vpalignr $8,%ymm2,%ymm3,%ymm8 vpxor %ymm0,%ymm4,%ymm4 addl 32(%r13),%ecx leal (%rcx,%rsi,1),%ecx vpxor %ymm5,%ymm4,%ymm4 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpxor %ymm8,%ymm4,%ymm4 addl %r12d,%ecx xorl %ebp,%edx addl 36(%r13),%ebx vpsrld $30,%ymm4,%ymm8 vpslld $2,%ymm4,%ymm4 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vpor %ymm8,%ymm4,%ymm4 addl 40(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpaddd %ymm11,%ymm4,%ymm9 xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 44(%r13),%eax vmovdqu %ymm9,384(%rsp) leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpalignr $8,%ymm3,%ymm4,%ymm8 vpxor %ymm1,%ymm5,%ymm5 addl 68(%r13),%edx leal (%rdx,%rax,1),%edx vpxor %ymm6,%ymm5,%ymm5 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi vpxor %ymm8,%ymm5,%ymm5 addl %r12d,%edx xorl %ebx,%esi addl 72(%r13),%ecx vpsrld $30,%ymm5,%ymm8 vpslld $2,%ymm5,%ymm5 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx vpor %ymm8,%ymm5,%ymm5 addl 76(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpaddd %ymm11,%ymm5,%ymm9 xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 96(%r13),%ebp vmovdqu %ymm9,416(%rsp) leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 100(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpalignr $8,%ymm4,%ymm5,%ymm8 vpxor %ymm2,%ymm6,%ymm6 addl 104(%r13),%esi leal (%rsi,%rbp,1),%esi vpxor %ymm7,%ymm6,%ymm6 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax vpxor %ymm8,%ymm6,%ymm6 addl %r12d,%esi xorl %ecx,%eax addl 108(%r13),%edx leaq 256(%r13),%r13 vpsrld $30,%ymm6,%ymm8 vpslld $2,%ymm6,%ymm6 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vpor %ymm8,%ymm6,%ymm6 addl -128(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpaddd %ymm11,%ymm6,%ymm9 xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -124(%r13),%ebx vmovdqu %ymm9,448(%rsp) leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -120(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpalignr $8,%ymm5,%ymm6,%ymm8 vpxor %ymm3,%ymm7,%ymm7 addl -116(%r13),%eax leal (%rax,%rbx,1),%eax vpxor %ymm0,%ymm7,%ymm7 vmovdqu 32(%r14),%ymm11 rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp vpxor %ymm8,%ymm7,%ymm7 addl %r12d,%eax xorl %edx,%ebp addl -96(%r13),%esi vpsrld $30,%ymm7,%ymm8 vpslld $2,%ymm7,%ymm7 leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpor %ymm8,%ymm7,%ymm7 addl -92(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpaddd %ymm11,%ymm7,%ymm9 xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -88(%r13),%ecx vmovdqu %ymm9,480(%rsp) leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -84(%r13),%ebx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx jmp L$align32_2 .p2align 5 L$align32_2: vpalignr $8,%ymm6,%ymm7,%ymm8 vpxor %ymm4,%ymm0,%ymm0 addl -64(%r13),%ebp xorl %esi,%ecx vpxor %ymm1,%ymm0,%ymm0 movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp vpxor %ymm8,%ymm0,%ymm0 rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx vpsrld $30,%ymm0,%ymm8 vpslld $2,%ymm0,%ymm0 addl %r12d,%ebp andl %edi,%ebx addl -60(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi vpor %ymm8,%ymm0,%ymm0 leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp vpaddd %ymm11,%ymm0,%ymm9 addl %r12d,%eax andl %edi,%ebp addl -56(%r13),%esi xorl %ecx,%ebp vmovdqu %ymm9,512(%rsp) movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl -52(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi addl -32(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx vpalignr $8,%ymm7,%ymm0,%ymm8 vpxor %ymm5,%ymm1,%ymm1 addl -28(%r13),%ebx xorl %eax,%edx vpxor %ymm2,%ymm1,%ymm1 movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx vpxor %ymm8,%ymm1,%ymm1 rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vpsrld $30,%ymm1,%ymm8 vpslld $2,%ymm1,%ymm1 addl %r12d,%ebx andl %edi,%ecx addl -24(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi vpor %ymm8,%ymm1,%ymm1 leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx vpaddd %ymm11,%ymm1,%ymm9 addl %r12d,%ebp andl %edi,%ebx addl -20(%r13),%eax xorl %edx,%ebx vmovdqu %ymm9,544(%rsp) movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 0(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl 4(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi vpalignr $8,%ymm0,%ymm1,%ymm8 vpxor %ymm6,%ymm2,%ymm2 addl 8(%r13),%ecx xorl %ebp,%esi vpxor %ymm3,%ymm2,%ymm2 movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx vpxor %ymm8,%ymm2,%ymm2 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpsrld $30,%ymm2,%ymm8 vpslld $2,%ymm2,%ymm2 addl %r12d,%ecx andl %edi,%edx addl 12(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi vpor %ymm8,%ymm2,%ymm2 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vpaddd %ymm11,%ymm2,%ymm9 addl %r12d,%ebx andl %edi,%ecx addl 32(%r13),%ebp xorl %esi,%ecx vmovdqu %ymm9,576(%rsp) movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 36(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 40(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax vpalignr $8,%ymm1,%ymm2,%ymm8 vpxor %ymm7,%ymm3,%ymm3 addl 44(%r13),%edx xorl %ebx,%eax vpxor %ymm4,%ymm3,%ymm3 movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx vpxor %ymm8,%ymm3,%ymm3 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi vpsrld $30,%ymm3,%ymm8 vpslld $2,%ymm3,%ymm3 addl %r12d,%edx andl %edi,%esi addl 64(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi vpor %ymm8,%ymm3,%ymm3 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpaddd %ymm11,%ymm3,%ymm9 addl %r12d,%ecx andl %edi,%edx addl 68(%r13),%ebx xorl %eax,%edx vmovdqu %ymm9,608(%rsp) movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl 72(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 76(%r13),%eax xorl %edx,%ebx leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl 100(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 104(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 108(%r13),%ebx leaq 256(%r13),%r13 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -128(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -124(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -120(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -116(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -96(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -92(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -88(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -84(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -60(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -56(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -52(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -32(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -28(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -24(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -20(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d addl %r12d,%edx leaq 128(%r9),%r13 leaq 128(%r9),%rdi cmpq %r10,%r13 cmovaeq %r9,%r13 addl 0(%r8),%edx addl 4(%r8),%esi addl 8(%r8),%ebp movl %edx,0(%r8) addl 12(%r8),%ebx movl %esi,4(%r8) movl %edx,%eax addl 16(%r8),%ecx movl %ebp,%r12d movl %ebp,8(%r8) movl %ebx,%edx movl %ebx,12(%r8) movl %esi,%ebp movl %ecx,16(%r8) movl %ecx,%esi movl %r12d,%ecx cmpq %r10,%r9 je L$done_avx2 vmovdqu 64(%r14),%ymm6 cmpq %r10,%rdi ja L$ast_avx2 vmovdqu -64(%rdi),%xmm0 vmovdqu -48(%rdi),%xmm1 vmovdqu -32(%rdi),%xmm2 vmovdqu -16(%rdi),%xmm3 vinserti128 $1,0(%r13),%ymm0,%ymm0 vinserti128 $1,16(%r13),%ymm1,%ymm1 vinserti128 $1,32(%r13),%ymm2,%ymm2 vinserti128 $1,48(%r13),%ymm3,%ymm3 jmp L$ast_avx2 .p2align 5 L$ast_avx2: leaq 128+16(%rsp),%r13 rorxl $2,%ebp,%ebx andnl %edx,%ebp,%edi andl %ecx,%ebp xorl %edi,%ebp subq $-128,%r9 addl -128(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -124(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -120(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -116(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -96(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl -92(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl -88(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -84(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -64(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -60(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -56(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl -52(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl -32(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -28(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -24(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -20(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl 0(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl 4(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl 8(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl 12(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 32(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 36(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 40(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 44(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vmovdqu -64(%r14),%ymm11 vpshufb %ymm6,%ymm0,%ymm0 addl 68(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 72(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 76(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 96(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 100(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpshufb %ymm6,%ymm1,%ymm1 vpaddd %ymm11,%ymm0,%ymm8 addl 104(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl 108(%r13),%edx leaq 256(%r13),%r13 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -128(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -124(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -120(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vmovdqu %ymm8,0(%rsp) vpshufb %ymm6,%ymm2,%ymm2 vpaddd %ymm11,%ymm1,%ymm9 addl -116(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -92(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -88(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -84(%r13),%ebx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx vmovdqu %ymm9,32(%rsp) vpshufb %ymm6,%ymm3,%ymm3 vpaddd %ymm11,%ymm2,%ymm6 addl -64(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl -60(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl -56(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl -52(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi addl -32(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx jmp L$align32_3 .p2align 5 L$align32_3: vmovdqu %ymm6,64(%rsp) vpaddd %ymm11,%ymm3,%ymm7 addl -28(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl -24(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl -20(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 0(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl 4(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi vmovdqu %ymm7,96(%rsp) addl 8(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx addl 12(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl 32(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 36(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 40(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax vpalignr $8,%ymm0,%ymm1,%ymm4 addl 44(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi vpsrldq $4,%ymm3,%ymm8 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm2,%ymm8,%ymm8 xorl %ebp,%esi addl %r12d,%edx vpxor %ymm8,%ymm4,%ymm4 andl %edi,%esi addl 64(%r13),%ecx xorl %ebp,%esi movl %eax,%edi vpsrld $31,%ymm4,%ymm8 xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d vpslldq $12,%ymm4,%ymm10 vpaddd %ymm4,%ymm4,%ymm4 rorxl $2,%edx,%esi xorl %eax,%edx vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm4,%ymm4 addl %r12d,%ecx andl %edi,%edx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm4,%ymm4 addl 68(%r13),%ebx xorl %eax,%edx vpxor %ymm10,%ymm4,%ymm4 movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx vpaddd %ymm11,%ymm4,%ymm9 rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vmovdqu %ymm9,128(%rsp) addl %r12d,%ebx andl %edi,%ecx addl 72(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 76(%r13),%eax xorl %edx,%ebx leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpalignr $8,%ymm1,%ymm2,%ymm5 addl 96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpsrldq $4,%ymm4,%ymm8 xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpxor %ymm1,%ymm5,%ymm5 vpxor %ymm3,%ymm8,%ymm8 addl 100(%r13),%edx leal (%rdx,%rax,1),%edx vpxor %ymm8,%ymm5,%ymm5 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx vpsrld $31,%ymm5,%ymm8 vmovdqu -32(%r14),%ymm11 xorl %ebx,%esi addl 104(%r13),%ecx leal (%rcx,%rsi,1),%ecx vpslldq $12,%ymm5,%ymm10 vpaddd %ymm5,%ymm5,%ymm5 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm5,%ymm5 xorl %eax,%edx addl %r12d,%ecx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm5,%ymm5 xorl %ebp,%edx addl 108(%r13),%ebx leaq 256(%r13),%r13 vpxor %ymm10,%ymm5,%ymm5 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpaddd %ymm11,%ymm5,%ymm9 xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vmovdqu %ymm9,160(%rsp) addl -128(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpalignr $8,%ymm2,%ymm3,%ymm6 addl -124(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx vpsrldq $4,%ymm5,%ymm8 xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm4,%ymm8,%ymm8 addl -120(%r13),%esi leal (%rsi,%rbp,1),%esi vpxor %ymm8,%ymm6,%ymm6 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi vpsrld $31,%ymm6,%ymm8 xorl %ecx,%eax addl -116(%r13),%edx leal (%rdx,%rax,1),%edx vpslldq $12,%ymm6,%ymm10 vpaddd %ymm6,%ymm6,%ymm6 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm6,%ymm6 xorl %ebp,%esi addl %r12d,%edx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm6,%ymm6 xorl %ebx,%esi addl -96(%r13),%ecx vpxor %ymm10,%ymm6,%ymm6 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpaddd %ymm11,%ymm6,%ymm9 xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx vmovdqu %ymm9,192(%rsp) addl -92(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vpalignr $8,%ymm3,%ymm4,%ymm7 addl -88(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpsrldq $4,%ymm6,%ymm8 xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpxor %ymm3,%ymm7,%ymm7 vpxor %ymm5,%ymm8,%ymm8 addl -84(%r13),%eax leal (%rax,%rbx,1),%eax vpxor %ymm8,%ymm7,%ymm7 rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax vpsrld $31,%ymm7,%ymm8 xorl %edx,%ebp addl -64(%r13),%esi leal (%rsi,%rbp,1),%esi vpslldq $12,%ymm7,%ymm10 vpaddd %ymm7,%ymm7,%ymm7 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm7,%ymm7 xorl %ebx,%eax addl %r12d,%esi vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm7,%ymm7 xorl %ecx,%eax addl -60(%r13),%edx vpxor %ymm10,%ymm7,%ymm7 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpaddd %ymm11,%ymm7,%ymm9 xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vmovdqu %ymm9,224(%rsp) addl -56(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -52(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -32(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -28(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -24(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -20(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d addl %r12d,%edx leaq 128(%rsp),%r13 addl 0(%r8),%edx addl 4(%r8),%esi addl 8(%r8),%ebp movl %edx,0(%r8) addl 12(%r8),%ebx movl %esi,4(%r8) movl %edx,%eax addl 16(%r8),%ecx movl %ebp,%r12d movl %ebp,8(%r8) movl %ebx,%edx movl %ebx,12(%r8) movl %esi,%ebp movl %ecx,16(%r8) movl %ecx,%esi movl %r12d,%ecx cmpq %r10,%r9 jbe L$oop_avx2 L$done_avx2: vzeroupper movq -40(%r11),%r14 movq -32(%r11),%r13 movq -24(%r11),%r12 movq -16(%r11),%rbp movq -8(%r11),%rbx leaq (%r11),%rsp L$epilogue_avx2: ret .section __DATA,__const .p2align 6 K_XX_XX: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha1-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl sha1_block_data_order_nohw .hidden sha1_block_data_order_nohw .type sha1_block_data_order_nohw,@function .align 16 sha1_block_data_order_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 movq %rdi,%r8 subq $72,%rsp movq %rsi,%r9 andq $-64,%rsp movq %rdx,%r10 movq %rax,64(%rsp) .cfi_escape 0x0f,0x06,0x77,0xc0,0x00,0x06,0x23,0x08 .Lprologue: movl 0(%r8),%esi movl 4(%r8),%edi movl 8(%r8),%r11d movl 12(%r8),%r12d movl 16(%r8),%r13d jmp .Lloop .align 16 .Lloop: movl 0(%r9),%edx bswapl %edx movl 4(%r9),%ebp movl %r12d,%eax movl %edx,0(%rsp) movl %esi,%ecx bswapl %ebp xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%rdx,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 8(%r9),%r14d movl %r11d,%eax movl %ebp,4(%rsp) movl %r13d,%ecx bswapl %r14d xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%rbp,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 12(%r9),%edx movl %edi,%eax movl %r14d,8(%rsp) movl %r12d,%ecx bswapl %edx xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%r14,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 16(%r9),%ebp movl %esi,%eax movl %edx,12(%rsp) movl %r11d,%ecx bswapl %ebp xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%rdx,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 20(%r9),%r14d movl %r13d,%eax movl %ebp,16(%rsp) movl %edi,%ecx bswapl %r14d xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%rbp,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi movl 24(%r9),%edx movl %r12d,%eax movl %r14d,20(%rsp) movl %esi,%ecx bswapl %edx xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%r14,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 28(%r9),%ebp movl %r11d,%eax movl %edx,24(%rsp) movl %r13d,%ecx bswapl %ebp xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%rdx,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 32(%r9),%r14d movl %edi,%eax movl %ebp,28(%rsp) movl %r12d,%ecx bswapl %r14d xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%rbp,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 36(%r9),%edx movl %esi,%eax movl %r14d,32(%rsp) movl %r11d,%ecx bswapl %edx xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%r14,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 40(%r9),%ebp movl %r13d,%eax movl %edx,36(%rsp) movl %edi,%ecx bswapl %ebp xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%rdx,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi movl 44(%r9),%r14d movl %r12d,%eax movl %ebp,40(%rsp) movl %esi,%ecx bswapl %r14d xorl %r11d,%eax roll $5,%ecx andl %edi,%eax leal 1518500249(%rbp,%r13,1),%r13d addl %ecx,%r13d xorl %r12d,%eax roll $30,%edi addl %eax,%r13d movl 48(%r9),%edx movl %r11d,%eax movl %r14d,44(%rsp) movl %r13d,%ecx bswapl %edx xorl %edi,%eax roll $5,%ecx andl %esi,%eax leal 1518500249(%r14,%r12,1),%r12d addl %ecx,%r12d xorl %r11d,%eax roll $30,%esi addl %eax,%r12d movl 52(%r9),%ebp movl %edi,%eax movl %edx,48(%rsp) movl %r12d,%ecx bswapl %ebp xorl %esi,%eax roll $5,%ecx andl %r13d,%eax leal 1518500249(%rdx,%r11,1),%r11d addl %ecx,%r11d xorl %edi,%eax roll $30,%r13d addl %eax,%r11d movl 56(%r9),%r14d movl %esi,%eax movl %ebp,52(%rsp) movl %r11d,%ecx bswapl %r14d xorl %r13d,%eax roll $5,%ecx andl %r12d,%eax leal 1518500249(%rbp,%rdi,1),%edi addl %ecx,%edi xorl %esi,%eax roll $30,%r12d addl %eax,%edi movl 60(%r9),%edx movl %r13d,%eax movl %r14d,56(%rsp) movl %edi,%ecx bswapl %edx xorl %r12d,%eax roll $5,%ecx andl %r11d,%eax leal 1518500249(%r14,%rsi,1),%esi addl %ecx,%esi xorl %r13d,%eax roll $30,%r11d addl %eax,%esi xorl 0(%rsp),%ebp movl %r12d,%eax movl %edx,60(%rsp) movl %esi,%ecx xorl 8(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 32(%rsp),%ebp andl %edi,%eax leal 1518500249(%rdx,%r13,1),%r13d roll $30,%edi xorl %r12d,%eax addl %ecx,%r13d roll $1,%ebp addl %eax,%r13d xorl 4(%rsp),%r14d movl %r11d,%eax movl %ebp,0(%rsp) movl %r13d,%ecx xorl 12(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 36(%rsp),%r14d andl %esi,%eax leal 1518500249(%rbp,%r12,1),%r12d roll $30,%esi xorl %r11d,%eax addl %ecx,%r12d roll $1,%r14d addl %eax,%r12d xorl 8(%rsp),%edx movl %edi,%eax movl %r14d,4(%rsp) movl %r12d,%ecx xorl 16(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 40(%rsp),%edx andl %r13d,%eax leal 1518500249(%r14,%r11,1),%r11d roll $30,%r13d xorl %edi,%eax addl %ecx,%r11d roll $1,%edx addl %eax,%r11d xorl 12(%rsp),%ebp movl %esi,%eax movl %edx,8(%rsp) movl %r11d,%ecx xorl 20(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 44(%rsp),%ebp andl %r12d,%eax leal 1518500249(%rdx,%rdi,1),%edi roll $30,%r12d xorl %esi,%eax addl %ecx,%edi roll $1,%ebp addl %eax,%edi xorl 16(%rsp),%r14d movl %r13d,%eax movl %ebp,12(%rsp) movl %edi,%ecx xorl 24(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 48(%rsp),%r14d andl %r11d,%eax leal 1518500249(%rbp,%rsi,1),%esi roll $30,%r11d xorl %r13d,%eax addl %ecx,%esi roll $1,%r14d addl %eax,%esi xorl 20(%rsp),%edx movl %edi,%eax movl %r14d,16(%rsp) movl %esi,%ecx xorl 28(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 52(%rsp),%edx leal 1859775393(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 24(%rsp),%ebp movl %esi,%eax movl %edx,20(%rsp) movl %r13d,%ecx xorl 32(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 56(%rsp),%ebp leal 1859775393(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 28(%rsp),%r14d movl %r13d,%eax movl %ebp,24(%rsp) movl %r12d,%ecx xorl 36(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 60(%rsp),%r14d leal 1859775393(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 32(%rsp),%edx movl %r12d,%eax movl %r14d,28(%rsp) movl %r11d,%ecx xorl 40(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 0(%rsp),%edx leal 1859775393(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 36(%rsp),%ebp movl %r11d,%eax movl %edx,32(%rsp) movl %edi,%ecx xorl 44(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 4(%rsp),%ebp leal 1859775393(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 40(%rsp),%r14d movl %edi,%eax movl %ebp,36(%rsp) movl %esi,%ecx xorl 48(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 8(%rsp),%r14d leal 1859775393(%rbp,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%r14d xorl 44(%rsp),%edx movl %esi,%eax movl %r14d,40(%rsp) movl %r13d,%ecx xorl 52(%rsp),%edx xorl %r11d,%eax roll $5,%ecx xorl 12(%rsp),%edx leal 1859775393(%r14,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%edx xorl 48(%rsp),%ebp movl %r13d,%eax movl %edx,44(%rsp) movl %r12d,%ecx xorl 56(%rsp),%ebp xorl %edi,%eax roll $5,%ecx xorl 16(%rsp),%ebp leal 1859775393(%rdx,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%ebp xorl 52(%rsp),%r14d movl %r12d,%eax movl %ebp,48(%rsp) movl %r11d,%ecx xorl 60(%rsp),%r14d xorl %esi,%eax roll $5,%ecx xorl 20(%rsp),%r14d leal 1859775393(%rbp,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%r14d xorl 56(%rsp),%edx movl %r11d,%eax movl %r14d,52(%rsp) movl %edi,%ecx xorl 0(%rsp),%edx xorl %r13d,%eax roll $5,%ecx xorl 24(%rsp),%edx leal 1859775393(%r14,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%edx xorl 60(%rsp),%ebp movl %edi,%eax movl %edx,56(%rsp) movl %esi,%ecx xorl 4(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 28(%rsp),%ebp leal 1859775393(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 0(%rsp),%r14d movl %esi,%eax movl %ebp,60(%rsp) movl %r13d,%ecx xorl 8(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 32(%rsp),%r14d leal 1859775393(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 4(%rsp),%edx movl %r13d,%eax movl %r14d,0(%rsp) movl %r12d,%ecx xorl 12(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 36(%rsp),%edx leal 1859775393(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 8(%rsp),%ebp movl %r12d,%eax movl %edx,4(%rsp) movl %r11d,%ecx xorl 16(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 40(%rsp),%ebp leal 1859775393(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp xorl 12(%rsp),%r14d movl %r11d,%eax movl %ebp,8(%rsp) movl %edi,%ecx xorl 20(%rsp),%r14d xorl %r13d,%eax roll $5,%ecx xorl 44(%rsp),%r14d leal 1859775393(%rbp,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%r14d xorl 16(%rsp),%edx movl %edi,%eax movl %r14d,12(%rsp) movl %esi,%ecx xorl 24(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 48(%rsp),%edx leal 1859775393(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 20(%rsp),%ebp movl %esi,%eax movl %edx,16(%rsp) movl %r13d,%ecx xorl 28(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 52(%rsp),%ebp leal 1859775393(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 24(%rsp),%r14d movl %r13d,%eax movl %ebp,20(%rsp) movl %r12d,%ecx xorl 32(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 56(%rsp),%r14d leal 1859775393(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 28(%rsp),%edx movl %r12d,%eax movl %r14d,24(%rsp) movl %r11d,%ecx xorl 36(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 60(%rsp),%edx leal 1859775393(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 32(%rsp),%ebp movl %r11d,%eax movl %edx,28(%rsp) movl %edi,%ecx xorl 40(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 0(%rsp),%ebp leal 1859775393(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 36(%rsp),%r14d movl %r12d,%eax movl %ebp,32(%rsp) movl %r12d,%ebx xorl 44(%rsp),%r14d andl %r11d,%eax movl %esi,%ecx xorl 4(%rsp),%r14d leal -1894007588(%rbp,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%r14d andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 40(%rsp),%edx movl %r11d,%eax movl %r14d,36(%rsp) movl %r11d,%ebx xorl 48(%rsp),%edx andl %edi,%eax movl %r13d,%ecx xorl 8(%rsp),%edx leal -1894007588(%r14,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%edx andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 44(%rsp),%ebp movl %edi,%eax movl %edx,40(%rsp) movl %edi,%ebx xorl 52(%rsp),%ebp andl %esi,%eax movl %r12d,%ecx xorl 12(%rsp),%ebp leal -1894007588(%rdx,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%ebp andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 48(%rsp),%r14d movl %esi,%eax movl %ebp,44(%rsp) movl %esi,%ebx xorl 56(%rsp),%r14d andl %r13d,%eax movl %r11d,%ecx xorl 16(%rsp),%r14d leal -1894007588(%rbp,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%r14d andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 52(%rsp),%edx movl %r13d,%eax movl %r14d,48(%rsp) movl %r13d,%ebx xorl 60(%rsp),%edx andl %r12d,%eax movl %edi,%ecx xorl 20(%rsp),%edx leal -1894007588(%r14,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%edx andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 56(%rsp),%ebp movl %r12d,%eax movl %edx,52(%rsp) movl %r12d,%ebx xorl 0(%rsp),%ebp andl %r11d,%eax movl %esi,%ecx xorl 24(%rsp),%ebp leal -1894007588(%rdx,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%ebp andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 60(%rsp),%r14d movl %r11d,%eax movl %ebp,56(%rsp) movl %r11d,%ebx xorl 4(%rsp),%r14d andl %edi,%eax movl %r13d,%ecx xorl 28(%rsp),%r14d leal -1894007588(%rbp,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%r14d andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 0(%rsp),%edx movl %edi,%eax movl %r14d,60(%rsp) movl %edi,%ebx xorl 8(%rsp),%edx andl %esi,%eax movl %r12d,%ecx xorl 32(%rsp),%edx leal -1894007588(%r14,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%edx andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 4(%rsp),%ebp movl %esi,%eax movl %edx,0(%rsp) movl %esi,%ebx xorl 12(%rsp),%ebp andl %r13d,%eax movl %r11d,%ecx xorl 36(%rsp),%ebp leal -1894007588(%rdx,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%ebp andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 8(%rsp),%r14d movl %r13d,%eax movl %ebp,4(%rsp) movl %r13d,%ebx xorl 16(%rsp),%r14d andl %r12d,%eax movl %edi,%ecx xorl 40(%rsp),%r14d leal -1894007588(%rbp,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%r14d andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 12(%rsp),%edx movl %r12d,%eax movl %r14d,8(%rsp) movl %r12d,%ebx xorl 20(%rsp),%edx andl %r11d,%eax movl %esi,%ecx xorl 44(%rsp),%edx leal -1894007588(%r14,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%edx andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 16(%rsp),%ebp movl %r11d,%eax movl %edx,12(%rsp) movl %r11d,%ebx xorl 24(%rsp),%ebp andl %edi,%eax movl %r13d,%ecx xorl 48(%rsp),%ebp leal -1894007588(%rdx,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%ebp andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 20(%rsp),%r14d movl %edi,%eax movl %ebp,16(%rsp) movl %edi,%ebx xorl 28(%rsp),%r14d andl %esi,%eax movl %r12d,%ecx xorl 52(%rsp),%r14d leal -1894007588(%rbp,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%r14d andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 24(%rsp),%edx movl %esi,%eax movl %r14d,20(%rsp) movl %esi,%ebx xorl 32(%rsp),%edx andl %r13d,%eax movl %r11d,%ecx xorl 56(%rsp),%edx leal -1894007588(%r14,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%edx andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 28(%rsp),%ebp movl %r13d,%eax movl %edx,24(%rsp) movl %r13d,%ebx xorl 36(%rsp),%ebp andl %r12d,%eax movl %edi,%ecx xorl 60(%rsp),%ebp leal -1894007588(%rdx,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%ebp andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 32(%rsp),%r14d movl %r12d,%eax movl %ebp,28(%rsp) movl %r12d,%ebx xorl 40(%rsp),%r14d andl %r11d,%eax movl %esi,%ecx xorl 0(%rsp),%r14d leal -1894007588(%rbp,%r13,1),%r13d xorl %r11d,%ebx roll $5,%ecx addl %eax,%r13d roll $1,%r14d andl %edi,%ebx addl %ecx,%r13d roll $30,%edi addl %ebx,%r13d xorl 36(%rsp),%edx movl %r11d,%eax movl %r14d,32(%rsp) movl %r11d,%ebx xorl 44(%rsp),%edx andl %edi,%eax movl %r13d,%ecx xorl 4(%rsp),%edx leal -1894007588(%r14,%r12,1),%r12d xorl %edi,%ebx roll $5,%ecx addl %eax,%r12d roll $1,%edx andl %esi,%ebx addl %ecx,%r12d roll $30,%esi addl %ebx,%r12d xorl 40(%rsp),%ebp movl %edi,%eax movl %edx,36(%rsp) movl %edi,%ebx xorl 48(%rsp),%ebp andl %esi,%eax movl %r12d,%ecx xorl 8(%rsp),%ebp leal -1894007588(%rdx,%r11,1),%r11d xorl %esi,%ebx roll $5,%ecx addl %eax,%r11d roll $1,%ebp andl %r13d,%ebx addl %ecx,%r11d roll $30,%r13d addl %ebx,%r11d xorl 44(%rsp),%r14d movl %esi,%eax movl %ebp,40(%rsp) movl %esi,%ebx xorl 52(%rsp),%r14d andl %r13d,%eax movl %r11d,%ecx xorl 12(%rsp),%r14d leal -1894007588(%rbp,%rdi,1),%edi xorl %r13d,%ebx roll $5,%ecx addl %eax,%edi roll $1,%r14d andl %r12d,%ebx addl %ecx,%edi roll $30,%r12d addl %ebx,%edi xorl 48(%rsp),%edx movl %r13d,%eax movl %r14d,44(%rsp) movl %r13d,%ebx xorl 56(%rsp),%edx andl %r12d,%eax movl %edi,%ecx xorl 16(%rsp),%edx leal -1894007588(%r14,%rsi,1),%esi xorl %r12d,%ebx roll $5,%ecx addl %eax,%esi roll $1,%edx andl %r11d,%ebx addl %ecx,%esi roll $30,%r11d addl %ebx,%esi xorl 52(%rsp),%ebp movl %edi,%eax movl %edx,48(%rsp) movl %esi,%ecx xorl 60(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 20(%rsp),%ebp leal -899497514(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 56(%rsp),%r14d movl %esi,%eax movl %ebp,52(%rsp) movl %r13d,%ecx xorl 0(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 24(%rsp),%r14d leal -899497514(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 60(%rsp),%edx movl %r13d,%eax movl %r14d,56(%rsp) movl %r12d,%ecx xorl 4(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 28(%rsp),%edx leal -899497514(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 0(%rsp),%ebp movl %r12d,%eax movl %edx,60(%rsp) movl %r11d,%ecx xorl 8(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 32(%rsp),%ebp leal -899497514(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp xorl 4(%rsp),%r14d movl %r11d,%eax movl %ebp,0(%rsp) movl %edi,%ecx xorl 12(%rsp),%r14d xorl %r13d,%eax roll $5,%ecx xorl 36(%rsp),%r14d leal -899497514(%rbp,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%r14d xorl 8(%rsp),%edx movl %edi,%eax movl %r14d,4(%rsp) movl %esi,%ecx xorl 16(%rsp),%edx xorl %r12d,%eax roll $5,%ecx xorl 40(%rsp),%edx leal -899497514(%r14,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%edx xorl 12(%rsp),%ebp movl %esi,%eax movl %edx,8(%rsp) movl %r13d,%ecx xorl 20(%rsp),%ebp xorl %r11d,%eax roll $5,%ecx xorl 44(%rsp),%ebp leal -899497514(%rdx,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%ebp xorl 16(%rsp),%r14d movl %r13d,%eax movl %ebp,12(%rsp) movl %r12d,%ecx xorl 24(%rsp),%r14d xorl %edi,%eax roll $5,%ecx xorl 48(%rsp),%r14d leal -899497514(%rbp,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%r14d xorl 20(%rsp),%edx movl %r12d,%eax movl %r14d,16(%rsp) movl %r11d,%ecx xorl 28(%rsp),%edx xorl %esi,%eax roll $5,%ecx xorl 52(%rsp),%edx leal -899497514(%r14,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%edx xorl 24(%rsp),%ebp movl %r11d,%eax movl %edx,20(%rsp) movl %edi,%ecx xorl 32(%rsp),%ebp xorl %r13d,%eax roll $5,%ecx xorl 56(%rsp),%ebp leal -899497514(%rdx,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%ebp xorl 28(%rsp),%r14d movl %edi,%eax movl %ebp,24(%rsp) movl %esi,%ecx xorl 36(%rsp),%r14d xorl %r12d,%eax roll $5,%ecx xorl 60(%rsp),%r14d leal -899497514(%rbp,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%r14d xorl 32(%rsp),%edx movl %esi,%eax movl %r14d,28(%rsp) movl %r13d,%ecx xorl 40(%rsp),%edx xorl %r11d,%eax roll $5,%ecx xorl 0(%rsp),%edx leal -899497514(%r14,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%edx xorl 36(%rsp),%ebp movl %r13d,%eax movl %r12d,%ecx xorl 44(%rsp),%ebp xorl %edi,%eax roll $5,%ecx xorl 4(%rsp),%ebp leal -899497514(%rdx,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%ebp xorl 40(%rsp),%r14d movl %r12d,%eax movl %r11d,%ecx xorl 48(%rsp),%r14d xorl %esi,%eax roll $5,%ecx xorl 8(%rsp),%r14d leal -899497514(%rbp,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%r14d xorl 44(%rsp),%edx movl %r11d,%eax movl %edi,%ecx xorl 52(%rsp),%edx xorl %r13d,%eax roll $5,%ecx xorl 12(%rsp),%edx leal -899497514(%r14,%rsi,1),%esi xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi roll $1,%edx xorl 48(%rsp),%ebp movl %edi,%eax movl %esi,%ecx xorl 56(%rsp),%ebp xorl %r12d,%eax roll $5,%ecx xorl 16(%rsp),%ebp leal -899497514(%rdx,%r13,1),%r13d xorl %r11d,%eax addl %ecx,%r13d roll $30,%edi addl %eax,%r13d roll $1,%ebp xorl 52(%rsp),%r14d movl %esi,%eax movl %r13d,%ecx xorl 60(%rsp),%r14d xorl %r11d,%eax roll $5,%ecx xorl 20(%rsp),%r14d leal -899497514(%rbp,%r12,1),%r12d xorl %edi,%eax addl %ecx,%r12d roll $30,%esi addl %eax,%r12d roll $1,%r14d xorl 56(%rsp),%edx movl %r13d,%eax movl %r12d,%ecx xorl 0(%rsp),%edx xorl %edi,%eax roll $5,%ecx xorl 24(%rsp),%edx leal -899497514(%r14,%r11,1),%r11d xorl %esi,%eax addl %ecx,%r11d roll $30,%r13d addl %eax,%r11d roll $1,%edx xorl 60(%rsp),%ebp movl %r12d,%eax movl %r11d,%ecx xorl 4(%rsp),%ebp xorl %esi,%eax roll $5,%ecx xorl 28(%rsp),%ebp leal -899497514(%rdx,%rdi,1),%edi xorl %r13d,%eax addl %ecx,%edi roll $30,%r12d addl %eax,%edi roll $1,%ebp movl %r11d,%eax movl %edi,%ecx xorl %r13d,%eax leal -899497514(%rbp,%rsi,1),%esi roll $5,%ecx xorl %r12d,%eax addl %ecx,%esi roll $30,%r11d addl %eax,%esi addl 0(%r8),%esi addl 4(%r8),%edi addl 8(%r8),%r11d addl 12(%r8),%r12d addl 16(%r8),%r13d movl %esi,0(%r8) movl %edi,4(%r8) movl %r11d,8(%r8) movl %r12d,12(%r8) movl %r13d,16(%r8) subq $1,%r10 leaq 64(%r9),%r9 jnz .Lloop movq 64(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue: ret .cfi_endproc .size sha1_block_data_order_nohw,.-sha1_block_data_order_nohw .globl sha1_block_data_order_hw .hidden sha1_block_data_order_hw .type sha1_block_data_order_hw,@function .align 32 sha1_block_data_order_hw: .cfi_startproc _CET_ENDBR movdqu (%rdi),%xmm0 movd 16(%rdi),%xmm1 movdqa K_XX_XX+160(%rip),%xmm3 movdqu (%rsi),%xmm4 pshufd $27,%xmm0,%xmm0 movdqu 16(%rsi),%xmm5 pshufd $27,%xmm1,%xmm1 movdqu 32(%rsi),%xmm6 .byte 102,15,56,0,227 movdqu 48(%rsi),%xmm7 .byte 102,15,56,0,235 .byte 102,15,56,0,243 movdqa %xmm1,%xmm9 .byte 102,15,56,0,251 jmp .Loop_shaext .align 16 .Loop_shaext: decq %rdx leaq 64(%rsi),%r8 paddd %xmm4,%xmm1 cmovneq %r8,%rsi prefetcht0 512(%rsi) movdqa %xmm0,%xmm8 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,0 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,0 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,0 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,1 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,1 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,1 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,2 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 .byte 15,56,201,229 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,213 pxor %xmm6,%xmm4 .byte 15,56,201,238 .byte 15,56,202,231 movdqa %xmm0,%xmm1 .byte 15,58,204,194,2 .byte 15,56,200,206 pxor %xmm7,%xmm5 .byte 15,56,202,236 .byte 15,56,201,247 movdqa %xmm0,%xmm2 .byte 15,58,204,193,2 .byte 15,56,200,215 pxor %xmm4,%xmm6 .byte 15,56,201,252 .byte 15,56,202,245 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 15,56,200,204 pxor %xmm5,%xmm7 .byte 15,56,202,254 movdqu (%rsi),%xmm4 movdqa %xmm0,%xmm2 .byte 15,58,204,193,3 .byte 15,56,200,213 movdqu 16(%rsi),%xmm5 .byte 102,15,56,0,227 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 15,56,200,206 movdqu 32(%rsi),%xmm6 .byte 102,15,56,0,235 movdqa %xmm0,%xmm2 .byte 15,58,204,193,3 .byte 15,56,200,215 movdqu 48(%rsi),%xmm7 .byte 102,15,56,0,243 movdqa %xmm0,%xmm1 .byte 15,58,204,194,3 .byte 65,15,56,200,201 .byte 102,15,56,0,251 paddd %xmm8,%xmm0 movdqa %xmm1,%xmm9 jnz .Loop_shaext pshufd $27,%xmm0,%xmm0 pshufd $27,%xmm1,%xmm1 movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) ret .cfi_endproc .size sha1_block_data_order_hw,.-sha1_block_data_order_hw .globl sha1_block_data_order_ssse3 .hidden sha1_block_data_order_ssse3 .type sha1_block_data_order_ssse3,@function .align 16 sha1_block_data_order_ssse3: .cfi_startproc _CET_ENDBR movq %rsp,%r11 .cfi_def_cfa_register %r11 pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 leaq -64(%rsp),%rsp andq $-64,%rsp movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 shlq $6,%r10 addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax movl 4(%r8),%ebx movl 8(%r8),%ecx movl 12(%r8),%edx movl %ebx,%esi movl 16(%r8),%ebp movl %ecx,%edi xorl %edx,%edi andl %edi,%esi movdqa 64(%r14),%xmm6 movdqa -64(%r14),%xmm9 movdqu 0(%r9),%xmm0 movdqu 16(%r9),%xmm1 movdqu 32(%r9),%xmm2 movdqu 48(%r9),%xmm3 .byte 102,15,56,0,198 .byte 102,15,56,0,206 .byte 102,15,56,0,214 addq $64,%r9 paddd %xmm9,%xmm0 .byte 102,15,56,0,222 paddd %xmm9,%xmm1 paddd %xmm9,%xmm2 movdqa %xmm0,0(%rsp) psubd %xmm9,%xmm0 movdqa %xmm1,16(%rsp) psubd %xmm9,%xmm1 movdqa %xmm2,32(%rsp) psubd %xmm9,%xmm2 jmp .Loop_ssse3 .align 16 .Loop_ssse3: rorl $2,%ebx pshufd $238,%xmm0,%xmm4 xorl %edx,%esi movdqa %xmm3,%xmm8 paddd %xmm3,%xmm9 movl %eax,%edi addl 0(%rsp),%ebp punpcklqdq %xmm1,%xmm4 xorl %ecx,%ebx roll $5,%eax addl %esi,%ebp psrldq $4,%xmm8 andl %ebx,%edi xorl %ecx,%ebx pxor %xmm0,%xmm4 addl %eax,%ebp rorl $7,%eax pxor %xmm2,%xmm8 xorl %ecx,%edi movl %ebp,%esi addl 4(%rsp),%edx pxor %xmm8,%xmm4 xorl %ebx,%eax roll $5,%ebp movdqa %xmm9,48(%rsp) addl %edi,%edx andl %eax,%esi movdqa %xmm4,%xmm10 xorl %ebx,%eax addl %ebp,%edx rorl $7,%ebp movdqa %xmm4,%xmm8 xorl %ebx,%esi pslldq $12,%xmm10 paddd %xmm4,%xmm4 movl %edx,%edi addl 8(%rsp),%ecx psrld $31,%xmm8 xorl %eax,%ebp roll $5,%edx addl %esi,%ecx movdqa %xmm10,%xmm9 andl %ebp,%edi xorl %eax,%ebp psrld $30,%xmm10 addl %edx,%ecx rorl $7,%edx por %xmm8,%xmm4 xorl %eax,%edi movl %ecx,%esi addl 12(%rsp),%ebx pslld $2,%xmm9 pxor %xmm10,%xmm4 xorl %ebp,%edx movdqa -64(%r14),%xmm10 roll $5,%ecx addl %edi,%ebx andl %edx,%esi pxor %xmm9,%xmm4 xorl %ebp,%edx addl %ecx,%ebx rorl $7,%ecx pshufd $238,%xmm1,%xmm5 xorl %ebp,%esi movdqa %xmm4,%xmm9 paddd %xmm4,%xmm10 movl %ebx,%edi addl 16(%rsp),%eax punpcklqdq %xmm2,%xmm5 xorl %edx,%ecx roll $5,%ebx addl %esi,%eax psrldq $4,%xmm9 andl %ecx,%edi xorl %edx,%ecx pxor %xmm1,%xmm5 addl %ebx,%eax rorl $7,%ebx pxor %xmm3,%xmm9 xorl %edx,%edi movl %eax,%esi addl 20(%rsp),%ebp pxor %xmm9,%xmm5 xorl %ecx,%ebx roll $5,%eax movdqa %xmm10,0(%rsp) addl %edi,%ebp andl %ebx,%esi movdqa %xmm5,%xmm8 xorl %ecx,%ebx addl %eax,%ebp rorl $7,%eax movdqa %xmm5,%xmm9 xorl %ecx,%esi pslldq $12,%xmm8 paddd %xmm5,%xmm5 movl %ebp,%edi addl 24(%rsp),%edx psrld $31,%xmm9 xorl %ebx,%eax roll $5,%ebp addl %esi,%edx movdqa %xmm8,%xmm10 andl %eax,%edi xorl %ebx,%eax psrld $30,%xmm8 addl %ebp,%edx rorl $7,%ebp por %xmm9,%xmm5 xorl %ebx,%edi movl %edx,%esi addl 28(%rsp),%ecx pslld $2,%xmm10 pxor %xmm8,%xmm5 xorl %eax,%ebp movdqa -32(%r14),%xmm8 roll $5,%edx addl %edi,%ecx andl %ebp,%esi pxor %xmm10,%xmm5 xorl %eax,%ebp addl %edx,%ecx rorl $7,%edx pshufd $238,%xmm2,%xmm6 xorl %eax,%esi movdqa %xmm5,%xmm10 paddd %xmm5,%xmm8 movl %ecx,%edi addl 32(%rsp),%ebx punpcklqdq %xmm3,%xmm6 xorl %ebp,%edx roll $5,%ecx addl %esi,%ebx psrldq $4,%xmm10 andl %edx,%edi xorl %ebp,%edx pxor %xmm2,%xmm6 addl %ecx,%ebx rorl $7,%ecx pxor %xmm4,%xmm10 xorl %ebp,%edi movl %ebx,%esi addl 36(%rsp),%eax pxor %xmm10,%xmm6 xorl %edx,%ecx roll $5,%ebx movdqa %xmm8,16(%rsp) addl %edi,%eax andl %ecx,%esi movdqa %xmm6,%xmm9 xorl %edx,%ecx addl %ebx,%eax rorl $7,%ebx movdqa %xmm6,%xmm10 xorl %edx,%esi pslldq $12,%xmm9 paddd %xmm6,%xmm6 movl %eax,%edi addl 40(%rsp),%ebp psrld $31,%xmm10 xorl %ecx,%ebx roll $5,%eax addl %esi,%ebp movdqa %xmm9,%xmm8 andl %ebx,%edi xorl %ecx,%ebx psrld $30,%xmm9 addl %eax,%ebp rorl $7,%eax por %xmm10,%xmm6 xorl %ecx,%edi movl %ebp,%esi addl 44(%rsp),%edx pslld $2,%xmm8 pxor %xmm9,%xmm6 xorl %ebx,%eax movdqa -32(%r14),%xmm9 roll $5,%ebp addl %edi,%edx andl %eax,%esi pxor %xmm8,%xmm6 xorl %ebx,%eax addl %ebp,%edx rorl $7,%ebp pshufd $238,%xmm3,%xmm7 xorl %ebx,%esi movdqa %xmm6,%xmm8 paddd %xmm6,%xmm9 movl %edx,%edi addl 48(%rsp),%ecx punpcklqdq %xmm4,%xmm7 xorl %eax,%ebp roll $5,%edx addl %esi,%ecx psrldq $4,%xmm8 andl %ebp,%edi xorl %eax,%ebp pxor %xmm3,%xmm7 addl %edx,%ecx rorl $7,%edx pxor %xmm5,%xmm8 xorl %eax,%edi movl %ecx,%esi addl 52(%rsp),%ebx pxor %xmm8,%xmm7 xorl %ebp,%edx roll $5,%ecx movdqa %xmm9,32(%rsp) addl %edi,%ebx andl %edx,%esi movdqa %xmm7,%xmm10 xorl %ebp,%edx addl %ecx,%ebx rorl $7,%ecx movdqa %xmm7,%xmm8 xorl %ebp,%esi pslldq $12,%xmm10 paddd %xmm7,%xmm7 movl %ebx,%edi addl 56(%rsp),%eax psrld $31,%xmm8 xorl %edx,%ecx roll $5,%ebx addl %esi,%eax movdqa %xmm10,%xmm9 andl %ecx,%edi xorl %edx,%ecx psrld $30,%xmm10 addl %ebx,%eax rorl $7,%ebx por %xmm8,%xmm7 xorl %edx,%edi movl %eax,%esi addl 60(%rsp),%ebp pslld $2,%xmm9 pxor %xmm10,%xmm7 xorl %ecx,%ebx movdqa -32(%r14),%xmm10 roll $5,%eax addl %edi,%ebp andl %ebx,%esi pxor %xmm9,%xmm7 pshufd $238,%xmm6,%xmm9 xorl %ecx,%ebx addl %eax,%ebp rorl $7,%eax pxor %xmm4,%xmm0 xorl %ecx,%esi movl %ebp,%edi addl 0(%rsp),%edx punpcklqdq %xmm7,%xmm9 xorl %ebx,%eax roll $5,%ebp pxor %xmm1,%xmm0 addl %esi,%edx andl %eax,%edi movdqa %xmm10,%xmm8 xorl %ebx,%eax paddd %xmm7,%xmm10 addl %ebp,%edx pxor %xmm9,%xmm0 rorl $7,%ebp xorl %ebx,%edi movl %edx,%esi addl 4(%rsp),%ecx movdqa %xmm0,%xmm9 xorl %eax,%ebp roll $5,%edx movdqa %xmm10,48(%rsp) addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp pslld $2,%xmm0 addl %edx,%ecx rorl $7,%edx psrld $30,%xmm9 xorl %eax,%esi movl %ecx,%edi addl 8(%rsp),%ebx por %xmm9,%xmm0 xorl %ebp,%edx roll $5,%ecx pshufd $238,%xmm7,%xmm10 addl %esi,%ebx andl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 12(%rsp),%eax xorl %ebp,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax pxor %xmm5,%xmm1 addl 16(%rsp),%ebp xorl %ecx,%esi punpcklqdq %xmm0,%xmm10 movl %eax,%edi roll $5,%eax pxor %xmm2,%xmm1 addl %esi,%ebp xorl %ecx,%edi movdqa %xmm8,%xmm9 rorl $7,%ebx paddd %xmm0,%xmm8 addl %eax,%ebp pxor %xmm10,%xmm1 addl 20(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp movdqa %xmm1,%xmm10 addl %edi,%edx xorl %ebx,%esi movdqa %xmm8,0(%rsp) rorl $7,%eax addl %ebp,%edx addl 24(%rsp),%ecx pslld $2,%xmm1 xorl %eax,%esi movl %edx,%edi psrld $30,%xmm10 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp por %xmm10,%xmm1 addl %edx,%ecx addl 28(%rsp),%ebx pshufd $238,%xmm0,%xmm8 xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx pxor %xmm6,%xmm2 addl 32(%rsp),%eax xorl %edx,%esi punpcklqdq %xmm1,%xmm8 movl %ebx,%edi roll $5,%ebx pxor %xmm3,%xmm2 addl %esi,%eax xorl %edx,%edi movdqa 0(%r14),%xmm10 rorl $7,%ecx paddd %xmm1,%xmm9 addl %ebx,%eax pxor %xmm8,%xmm2 addl 36(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax movdqa %xmm2,%xmm8 addl %edi,%ebp xorl %ecx,%esi movdqa %xmm9,16(%rsp) rorl $7,%ebx addl %eax,%ebp addl 40(%rsp),%edx pslld $2,%xmm2 xorl %ebx,%esi movl %ebp,%edi psrld $30,%xmm8 roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax por %xmm8,%xmm2 addl %ebp,%edx addl 44(%rsp),%ecx pshufd $238,%xmm1,%xmm9 xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx pxor %xmm7,%xmm3 addl 48(%rsp),%ebx xorl %ebp,%esi punpcklqdq %xmm2,%xmm9 movl %ecx,%edi roll $5,%ecx pxor %xmm4,%xmm3 addl %esi,%ebx xorl %ebp,%edi movdqa %xmm10,%xmm8 rorl $7,%edx paddd %xmm2,%xmm10 addl %ecx,%ebx pxor %xmm9,%xmm3 addl 52(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx movdqa %xmm3,%xmm9 addl %edi,%eax xorl %edx,%esi movdqa %xmm10,32(%rsp) rorl $7,%ecx addl %ebx,%eax addl 56(%rsp),%ebp pslld $2,%xmm3 xorl %ecx,%esi movl %eax,%edi psrld $30,%xmm9 roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx por %xmm9,%xmm3 addl %eax,%ebp addl 60(%rsp),%edx pshufd $238,%xmm2,%xmm10 xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx pxor %xmm0,%xmm4 addl 0(%rsp),%ecx xorl %eax,%esi punpcklqdq %xmm3,%xmm10 movl %edx,%edi roll $5,%edx pxor %xmm5,%xmm4 addl %esi,%ecx xorl %eax,%edi movdqa %xmm8,%xmm9 rorl $7,%ebp paddd %xmm3,%xmm8 addl %edx,%ecx pxor %xmm10,%xmm4 addl 4(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx movdqa %xmm4,%xmm10 addl %edi,%ebx xorl %ebp,%esi movdqa %xmm8,48(%rsp) rorl $7,%edx addl %ecx,%ebx addl 8(%rsp),%eax pslld $2,%xmm4 xorl %edx,%esi movl %ebx,%edi psrld $30,%xmm10 roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx por %xmm10,%xmm4 addl %ebx,%eax addl 12(%rsp),%ebp pshufd $238,%xmm3,%xmm8 xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp pxor %xmm1,%xmm5 addl 16(%rsp),%edx xorl %ebx,%esi punpcklqdq %xmm4,%xmm8 movl %ebp,%edi roll $5,%ebp pxor %xmm6,%xmm5 addl %esi,%edx xorl %ebx,%edi movdqa %xmm9,%xmm10 rorl $7,%eax paddd %xmm4,%xmm9 addl %ebp,%edx pxor %xmm8,%xmm5 addl 20(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx movdqa %xmm5,%xmm8 addl %edi,%ecx xorl %eax,%esi movdqa %xmm9,0(%rsp) rorl $7,%ebp addl %edx,%ecx addl 24(%rsp),%ebx pslld $2,%xmm5 xorl %ebp,%esi movl %ecx,%edi psrld $30,%xmm8 roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx por %xmm8,%xmm5 addl %ecx,%ebx addl 28(%rsp),%eax pshufd $238,%xmm4,%xmm9 rorl $7,%ecx movl %ebx,%esi xorl %edx,%edi roll $5,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax pxor %xmm2,%xmm6 addl 32(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx punpcklqdq %xmm5,%xmm9 movl %eax,%edi xorl %ecx,%esi pxor %xmm7,%xmm6 roll $5,%eax addl %esi,%ebp movdqa %xmm10,%xmm8 xorl %ebx,%edi paddd %xmm5,%xmm10 xorl %ecx,%ebx pxor %xmm9,%xmm6 addl %eax,%ebp addl 36(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx rorl $7,%eax movdqa %xmm6,%xmm9 movl %ebp,%esi xorl %ebx,%edi movdqa %xmm10,16(%rsp) roll $5,%ebp addl %edi,%edx xorl %eax,%esi pslld $2,%xmm6 xorl %ebx,%eax addl %ebp,%edx psrld $30,%xmm9 addl 40(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax por %xmm9,%xmm6 rorl $7,%ebp movl %edx,%edi xorl %eax,%esi roll $5,%edx pshufd $238,%xmm5,%xmm10 addl %esi,%ecx xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 44(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp rorl $7,%edx movl %ecx,%esi xorl %ebp,%edi roll $5,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx pxor %xmm3,%xmm7 addl 48(%rsp),%eax andl %edx,%esi xorl %ebp,%edx rorl $7,%ecx punpcklqdq %xmm6,%xmm10 movl %ebx,%edi xorl %edx,%esi pxor %xmm0,%xmm7 roll $5,%ebx addl %esi,%eax movdqa 32(%r14),%xmm9 xorl %ecx,%edi paddd %xmm6,%xmm8 xorl %edx,%ecx pxor %xmm10,%xmm7 addl %ebx,%eax addl 52(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx rorl $7,%ebx movdqa %xmm7,%xmm10 movl %eax,%esi xorl %ecx,%edi movdqa %xmm8,32(%rsp) roll $5,%eax addl %edi,%ebp xorl %ebx,%esi pslld $2,%xmm7 xorl %ecx,%ebx addl %eax,%ebp psrld $30,%xmm10 addl 56(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx por %xmm10,%xmm7 rorl $7,%eax movl %ebp,%edi xorl %ebx,%esi roll $5,%ebp pshufd $238,%xmm6,%xmm8 addl %esi,%edx xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 60(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax rorl $7,%ebp movl %edx,%esi xorl %eax,%edi roll $5,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx pxor %xmm4,%xmm0 addl 0(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp rorl $7,%edx punpcklqdq %xmm7,%xmm8 movl %ecx,%edi xorl %ebp,%esi pxor %xmm1,%xmm0 roll $5,%ecx addl %esi,%ebx movdqa %xmm9,%xmm10 xorl %edx,%edi paddd %xmm7,%xmm9 xorl %ebp,%edx pxor %xmm8,%xmm0 addl %ecx,%ebx addl 4(%rsp),%eax andl %edx,%edi xorl %ebp,%edx rorl $7,%ecx movdqa %xmm0,%xmm8 movl %ebx,%esi xorl %edx,%edi movdqa %xmm9,48(%rsp) roll $5,%ebx addl %edi,%eax xorl %ecx,%esi pslld $2,%xmm0 xorl %edx,%ecx addl %ebx,%eax psrld $30,%xmm8 addl 8(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx por %xmm8,%xmm0 rorl $7,%ebx movl %eax,%edi xorl %ecx,%esi roll $5,%eax pshufd $238,%xmm7,%xmm9 addl %esi,%ebp xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 12(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx rorl $7,%eax movl %ebp,%esi xorl %ebx,%edi roll $5,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx pxor %xmm5,%xmm1 addl 16(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax rorl $7,%ebp punpcklqdq %xmm0,%xmm9 movl %edx,%edi xorl %eax,%esi pxor %xmm2,%xmm1 roll $5,%edx addl %esi,%ecx movdqa %xmm10,%xmm8 xorl %ebp,%edi paddd %xmm0,%xmm10 xorl %eax,%ebp pxor %xmm9,%xmm1 addl %edx,%ecx addl 20(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp rorl $7,%edx movdqa %xmm1,%xmm9 movl %ecx,%esi xorl %ebp,%edi movdqa %xmm10,0(%rsp) roll $5,%ecx addl %edi,%ebx xorl %edx,%esi pslld $2,%xmm1 xorl %ebp,%edx addl %ecx,%ebx psrld $30,%xmm9 addl 24(%rsp),%eax andl %edx,%esi xorl %ebp,%edx por %xmm9,%xmm1 rorl $7,%ecx movl %ebx,%edi xorl %edx,%esi roll $5,%ebx pshufd $238,%xmm0,%xmm10 addl %esi,%eax xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 28(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%edi roll $5,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp pxor %xmm6,%xmm2 addl 32(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax punpcklqdq %xmm1,%xmm10 movl %ebp,%edi xorl %ebx,%esi pxor %xmm3,%xmm2 roll $5,%ebp addl %esi,%edx movdqa %xmm8,%xmm9 xorl %eax,%edi paddd %xmm1,%xmm8 xorl %ebx,%eax pxor %xmm10,%xmm2 addl %ebp,%edx addl 36(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax rorl $7,%ebp movdqa %xmm2,%xmm10 movl %edx,%esi xorl %eax,%edi movdqa %xmm8,16(%rsp) roll $5,%edx addl %edi,%ecx xorl %ebp,%esi pslld $2,%xmm2 xorl %eax,%ebp addl %edx,%ecx psrld $30,%xmm10 addl 40(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp por %xmm10,%xmm2 rorl $7,%edx movl %ecx,%edi xorl %ebp,%esi roll $5,%ecx pshufd $238,%xmm1,%xmm8 addl %esi,%ebx xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 44(%rsp),%eax andl %edx,%edi xorl %ebp,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%edi roll $5,%ebx addl %edi,%eax xorl %edx,%esi addl %ebx,%eax pxor %xmm7,%xmm3 addl 48(%rsp),%ebp xorl %ecx,%esi punpcklqdq %xmm2,%xmm8 movl %eax,%edi roll $5,%eax pxor %xmm4,%xmm3 addl %esi,%ebp xorl %ecx,%edi movdqa %xmm9,%xmm10 rorl $7,%ebx paddd %xmm2,%xmm9 addl %eax,%ebp pxor %xmm8,%xmm3 addl 52(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp movdqa %xmm3,%xmm8 addl %edi,%edx xorl %ebx,%esi movdqa %xmm9,32(%rsp) rorl $7,%eax addl %ebp,%edx addl 56(%rsp),%ecx pslld $2,%xmm3 xorl %eax,%esi movl %edx,%edi psrld $30,%xmm8 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp por %xmm8,%xmm3 addl %edx,%ecx addl 60(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx addl 0(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx paddd %xmm3,%xmm10 addl %esi,%eax xorl %edx,%edi movdqa %xmm10,48(%rsp) rorl $7,%ecx addl %ebx,%eax addl 4(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 8(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax addl %ebp,%edx addl 12(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx cmpq %r10,%r9 je .Ldone_ssse3 movdqa 64(%r14),%xmm6 movdqa -64(%r14),%xmm9 movdqu 0(%r9),%xmm0 movdqu 16(%r9),%xmm1 movdqu 32(%r9),%xmm2 movdqu 48(%r9),%xmm3 .byte 102,15,56,0,198 addq $64,%r9 addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi .byte 102,15,56,0,206 roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx paddd %xmm9,%xmm0 addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi movdqa %xmm0,0(%rsp) roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx psubd %xmm9,%xmm0 addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi .byte 102,15,56,0,214 roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp paddd %xmm9,%xmm1 addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi movdqa %xmm1,16(%rsp) roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx psubd %xmm9,%xmm1 addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi .byte 102,15,56,0,222 roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax paddd %xmm9,%xmm2 addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi movdqa %xmm2,32(%rsp) roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp psubd %xmm9,%xmm2 addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax rorl $7,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx addl 12(%r8),%edx movl %eax,0(%r8) addl 16(%r8),%ebp movl %esi,4(%r8) movl %esi,%ebx movl %ecx,8(%r8) movl %ecx,%edi movl %edx,12(%r8) xorl %edx,%edi movl %ebp,16(%r8) andl %edi,%esi jmp .Loop_ssse3 .align 16 .Ldone_ssse3: addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi roll $5,%eax addl %esi,%ebp xorl %ecx,%edi rorl $7,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi roll $5,%ebp addl %edi,%edx xorl %ebx,%esi rorl $7,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi roll $5,%edx addl %esi,%ecx xorl %eax,%edi rorl $7,%ebp addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi roll $5,%ecx addl %edi,%ebx xorl %ebp,%esi rorl $7,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi roll $5,%ebx addl %esi,%eax xorl %edx,%edi rorl $7,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi roll $5,%eax addl %edi,%ebp xorl %ecx,%esi rorl $7,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi roll $5,%ebp addl %esi,%edx xorl %ebx,%edi rorl $7,%eax addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi roll $5,%edx addl %edi,%ecx xorl %eax,%esi rorl $7,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi roll $5,%ecx addl %esi,%ebx xorl %ebp,%edi rorl $7,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi roll $5,%ebx addl %edi,%eax rorl $7,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx movl %eax,0(%r8) addl 12(%r8),%edx movl %esi,4(%r8) addl 16(%r8),%ebp movl %ecx,8(%r8) movl %edx,12(%r8) movl %ebp,16(%r8) movq -40(%r11),%r14 .cfi_restore %r14 movq -32(%r11),%r13 .cfi_restore %r13 movq -24(%r11),%r12 .cfi_restore %r12 movq -16(%r11),%rbp .cfi_restore %rbp movq -8(%r11),%rbx .cfi_restore %rbx leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lepilogue_ssse3: ret .cfi_endproc .size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3 .globl sha1_block_data_order_avx .hidden sha1_block_data_order_avx .type sha1_block_data_order_avx,@function .align 16 sha1_block_data_order_avx: .cfi_startproc _CET_ENDBR movq %rsp,%r11 .cfi_def_cfa_register %r11 pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 leaq -64(%rsp),%rsp vzeroupper andq $-64,%rsp movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 shlq $6,%r10 addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax movl 4(%r8),%ebx movl 8(%r8),%ecx movl 12(%r8),%edx movl %ebx,%esi movl 16(%r8),%ebp movl %ecx,%edi xorl %edx,%edi andl %edi,%esi vmovdqa 64(%r14),%xmm6 vmovdqa -64(%r14),%xmm11 vmovdqu 0(%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 addq $64,%r9 vpshufb %xmm6,%xmm1,%xmm1 vpshufb %xmm6,%xmm2,%xmm2 vpshufb %xmm6,%xmm3,%xmm3 vpaddd %xmm11,%xmm0,%xmm4 vpaddd %xmm11,%xmm1,%xmm5 vpaddd %xmm11,%xmm2,%xmm6 vmovdqa %xmm4,0(%rsp) vmovdqa %xmm5,16(%rsp) vmovdqa %xmm6,32(%rsp) jmp .Loop_avx .align 16 .Loop_avx: shrdl $2,%ebx,%ebx xorl %edx,%esi vpalignr $8,%xmm0,%xmm1,%xmm4 movl %eax,%edi addl 0(%rsp),%ebp vpaddd %xmm3,%xmm11,%xmm9 xorl %ecx,%ebx shldl $5,%eax,%eax vpsrldq $4,%xmm3,%xmm8 addl %esi,%ebp andl %ebx,%edi vpxor %xmm0,%xmm4,%xmm4 xorl %ecx,%ebx addl %eax,%ebp vpxor %xmm2,%xmm8,%xmm8 shrdl $7,%eax,%eax xorl %ecx,%edi movl %ebp,%esi addl 4(%rsp),%edx vpxor %xmm8,%xmm4,%xmm4 xorl %ebx,%eax shldl $5,%ebp,%ebp vmovdqa %xmm9,48(%rsp) addl %edi,%edx andl %eax,%esi vpsrld $31,%xmm4,%xmm8 xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%esi vpslldq $12,%xmm4,%xmm10 vpaddd %xmm4,%xmm4,%xmm4 movl %edx,%edi addl 8(%rsp),%ecx xorl %eax,%ebp shldl $5,%edx,%edx vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm4,%xmm4 addl %esi,%ecx andl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm4,%xmm4 shrdl $7,%edx,%edx xorl %eax,%edi movl %ecx,%esi addl 12(%rsp),%ebx vpxor %xmm10,%xmm4,%xmm4 xorl %ebp,%edx shldl $5,%ecx,%ecx addl %edi,%ebx andl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %ebp,%esi vpalignr $8,%xmm1,%xmm2,%xmm5 movl %ebx,%edi addl 16(%rsp),%eax vpaddd %xmm4,%xmm11,%xmm9 xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrldq $4,%xmm4,%xmm8 addl %esi,%eax andl %ecx,%edi vpxor %xmm1,%xmm5,%xmm5 xorl %edx,%ecx addl %ebx,%eax vpxor %xmm3,%xmm8,%xmm8 shrdl $7,%ebx,%ebx xorl %edx,%edi movl %eax,%esi addl 20(%rsp),%ebp vpxor %xmm8,%xmm5,%xmm5 xorl %ecx,%ebx shldl $5,%eax,%eax vmovdqa %xmm9,0(%rsp) addl %edi,%ebp andl %ebx,%esi vpsrld $31,%xmm5,%xmm8 xorl %ecx,%ebx addl %eax,%ebp shrdl $7,%eax,%eax xorl %ecx,%esi vpslldq $12,%xmm5,%xmm10 vpaddd %xmm5,%xmm5,%xmm5 movl %ebp,%edi addl 24(%rsp),%edx xorl %ebx,%eax shldl $5,%ebp,%ebp vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm5,%xmm5 addl %esi,%edx andl %eax,%edi xorl %ebx,%eax addl %ebp,%edx vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm5,%xmm5 shrdl $7,%ebp,%ebp xorl %ebx,%edi movl %edx,%esi addl 28(%rsp),%ecx vpxor %xmm10,%xmm5,%xmm5 xorl %eax,%ebp shldl $5,%edx,%edx vmovdqa -32(%r14),%xmm11 addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi vpalignr $8,%xmm2,%xmm3,%xmm6 movl %ecx,%edi addl 32(%rsp),%ebx vpaddd %xmm5,%xmm11,%xmm9 xorl %ebp,%edx shldl $5,%ecx,%ecx vpsrldq $4,%xmm5,%xmm8 addl %esi,%ebx andl %edx,%edi vpxor %xmm2,%xmm6,%xmm6 xorl %ebp,%edx addl %ecx,%ebx vpxor %xmm4,%xmm8,%xmm8 shrdl $7,%ecx,%ecx xorl %ebp,%edi movl %ebx,%esi addl 36(%rsp),%eax vpxor %xmm8,%xmm6,%xmm6 xorl %edx,%ecx shldl $5,%ebx,%ebx vmovdqa %xmm9,16(%rsp) addl %edi,%eax andl %ecx,%esi vpsrld $31,%xmm6,%xmm8 xorl %edx,%ecx addl %ebx,%eax shrdl $7,%ebx,%ebx xorl %edx,%esi vpslldq $12,%xmm6,%xmm10 vpaddd %xmm6,%xmm6,%xmm6 movl %eax,%edi addl 40(%rsp),%ebp xorl %ecx,%ebx shldl $5,%eax,%eax vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm6,%xmm6 addl %esi,%ebp andl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm6,%xmm6 shrdl $7,%eax,%eax xorl %ecx,%edi movl %ebp,%esi addl 44(%rsp),%edx vpxor %xmm10,%xmm6,%xmm6 xorl %ebx,%eax shldl $5,%ebp,%ebp addl %edi,%edx andl %eax,%esi xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%esi vpalignr $8,%xmm3,%xmm4,%xmm7 movl %edx,%edi addl 48(%rsp),%ecx vpaddd %xmm6,%xmm11,%xmm9 xorl %eax,%ebp shldl $5,%edx,%edx vpsrldq $4,%xmm6,%xmm8 addl %esi,%ecx andl %ebp,%edi vpxor %xmm3,%xmm7,%xmm7 xorl %eax,%ebp addl %edx,%ecx vpxor %xmm5,%xmm8,%xmm8 shrdl $7,%edx,%edx xorl %eax,%edi movl %ecx,%esi addl 52(%rsp),%ebx vpxor %xmm8,%xmm7,%xmm7 xorl %ebp,%edx shldl $5,%ecx,%ecx vmovdqa %xmm9,32(%rsp) addl %edi,%ebx andl %edx,%esi vpsrld $31,%xmm7,%xmm8 xorl %ebp,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %ebp,%esi vpslldq $12,%xmm7,%xmm10 vpaddd %xmm7,%xmm7,%xmm7 movl %ebx,%edi addl 56(%rsp),%eax xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrld $30,%xmm10,%xmm9 vpor %xmm8,%xmm7,%xmm7 addl %esi,%eax andl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax vpslld $2,%xmm10,%xmm10 vpxor %xmm9,%xmm7,%xmm7 shrdl $7,%ebx,%ebx xorl %edx,%edi movl %eax,%esi addl 60(%rsp),%ebp vpxor %xmm10,%xmm7,%xmm7 xorl %ecx,%ebx shldl $5,%eax,%eax addl %edi,%ebp andl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp vpalignr $8,%xmm6,%xmm7,%xmm8 vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax xorl %ecx,%esi movl %ebp,%edi addl 0(%rsp),%edx vpxor %xmm1,%xmm0,%xmm0 xorl %ebx,%eax shldl $5,%ebp,%ebp vpaddd %xmm7,%xmm11,%xmm9 addl %esi,%edx andl %eax,%edi vpxor %xmm8,%xmm0,%xmm0 xorl %ebx,%eax addl %ebp,%edx shrdl $7,%ebp,%ebp xorl %ebx,%edi vpsrld $30,%xmm0,%xmm8 vmovdqa %xmm9,48(%rsp) movl %edx,%esi addl 4(%rsp),%ecx xorl %eax,%ebp shldl $5,%edx,%edx vpslld $2,%xmm0,%xmm0 addl %edi,%ecx andl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi movl %ecx,%edi addl 8(%rsp),%ebx vpor %xmm8,%xmm0,%xmm0 xorl %ebp,%edx shldl $5,%ecx,%ecx addl %esi,%ebx andl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 12(%rsp),%eax xorl %ebp,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpalignr $8,%xmm7,%xmm0,%xmm8 vpxor %xmm5,%xmm1,%xmm1 addl 16(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax vpxor %xmm2,%xmm1,%xmm1 addl %esi,%ebp xorl %ecx,%edi vpaddd %xmm0,%xmm11,%xmm9 shrdl $7,%ebx,%ebx addl %eax,%ebp vpxor %xmm8,%xmm1,%xmm1 addl 20(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp vpsrld $30,%xmm1,%xmm8 vmovdqa %xmm9,0(%rsp) addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpslld $2,%xmm1,%xmm1 addl 24(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vpor %xmm8,%xmm1,%xmm1 addl 28(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpalignr $8,%xmm0,%xmm1,%xmm8 vpxor %xmm6,%xmm2,%xmm2 addl 32(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx vpxor %xmm3,%xmm2,%xmm2 addl %esi,%eax xorl %edx,%edi vpaddd %xmm1,%xmm11,%xmm9 vmovdqa 0(%r14),%xmm11 shrdl $7,%ecx,%ecx addl %ebx,%eax vpxor %xmm8,%xmm2,%xmm2 addl 36(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm8 vmovdqa %xmm9,16(%rsp) addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp vpslld $2,%xmm2,%xmm2 addl 40(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx vpor %xmm8,%xmm2,%xmm2 addl 44(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx vpalignr $8,%xmm1,%xmm2,%xmm8 vpxor %xmm7,%xmm3,%xmm3 addl 48(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx vpxor %xmm4,%xmm3,%xmm3 addl %esi,%ebx xorl %ebp,%edi vpaddd %xmm2,%xmm11,%xmm9 shrdl $7,%edx,%edx addl %ecx,%ebx vpxor %xmm8,%xmm3,%xmm3 addl 52(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm8 vmovdqa %xmm9,32(%rsp) addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 addl 56(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp vpor %xmm8,%xmm3,%xmm3 addl 60(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpalignr $8,%xmm2,%xmm3,%xmm8 vpxor %xmm0,%xmm4,%xmm4 addl 0(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx vpxor %xmm5,%xmm4,%xmm4 addl %esi,%ecx xorl %eax,%edi vpaddd %xmm3,%xmm11,%xmm9 shrdl $7,%ebp,%ebp addl %edx,%ecx vpxor %xmm8,%xmm4,%xmm4 addl 4(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx vpsrld $30,%xmm4,%xmm8 vmovdqa %xmm9,48(%rsp) addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpslld $2,%xmm4,%xmm4 addl 8(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax vpor %xmm8,%xmm4,%xmm4 addl 12(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp vpalignr $8,%xmm3,%xmm4,%xmm8 vpxor %xmm1,%xmm5,%xmm5 addl 16(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp vpxor %xmm6,%xmm5,%xmm5 addl %esi,%edx xorl %ebx,%edi vpaddd %xmm4,%xmm11,%xmm9 shrdl $7,%eax,%eax addl %ebp,%edx vpxor %xmm8,%xmm5,%xmm5 addl 20(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx vpsrld $30,%xmm5,%xmm8 vmovdqa %xmm9,0(%rsp) addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx vpslld $2,%xmm5,%xmm5 addl 24(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx vpor %xmm8,%xmm5,%xmm5 addl 28(%rsp),%eax shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax vpalignr $8,%xmm4,%xmm5,%xmm8 vpxor %xmm2,%xmm6,%xmm6 addl 32(%rsp),%ebp andl %ecx,%esi xorl %edx,%ecx shrdl $7,%ebx,%ebx vpxor %xmm7,%xmm6,%xmm6 movl %eax,%edi xorl %ecx,%esi vpaddd %xmm5,%xmm11,%xmm9 shldl $5,%eax,%eax addl %esi,%ebp vpxor %xmm8,%xmm6,%xmm6 xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 36(%rsp),%edx vpsrld $30,%xmm6,%xmm8 vmovdqa %xmm9,16(%rsp) andl %ebx,%edi xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%esi vpslld $2,%xmm6,%xmm6 xorl %ebx,%edi shldl $5,%ebp,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx addl 40(%rsp),%ecx andl %eax,%esi vpor %xmm8,%xmm6,%xmm6 xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%edi xorl %eax,%esi shldl $5,%edx,%edx addl %esi,%ecx xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 44(%rsp),%ebx andl %ebp,%edi xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%esi xorl %ebp,%edi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx vpalignr $8,%xmm5,%xmm6,%xmm8 vpxor %xmm3,%xmm7,%xmm7 addl 48(%rsp),%eax andl %edx,%esi xorl %ebp,%edx shrdl $7,%ecx,%ecx vpxor %xmm0,%xmm7,%xmm7 movl %ebx,%edi xorl %edx,%esi vpaddd %xmm6,%xmm11,%xmm9 vmovdqa 32(%r14),%xmm11 shldl $5,%ebx,%ebx addl %esi,%eax vpxor %xmm8,%xmm7,%xmm7 xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 52(%rsp),%ebp vpsrld $30,%xmm7,%xmm8 vmovdqa %xmm9,32(%rsp) andl %ecx,%edi xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi vpslld $2,%xmm7,%xmm7 xorl %ecx,%edi shldl $5,%eax,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp addl 56(%rsp),%edx andl %ebx,%esi vpor %xmm8,%xmm7,%xmm7 xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%edi xorl %ebx,%esi shldl $5,%ebp,%ebp addl %esi,%edx xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 60(%rsp),%ecx andl %eax,%edi xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%esi xorl %eax,%edi shldl $5,%edx,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx vpalignr $8,%xmm6,%xmm7,%xmm8 vpxor %xmm4,%xmm0,%xmm0 addl 0(%rsp),%ebx andl %ebp,%esi xorl %eax,%ebp shrdl $7,%edx,%edx vpxor %xmm1,%xmm0,%xmm0 movl %ecx,%edi xorl %ebp,%esi vpaddd %xmm7,%xmm11,%xmm9 shldl $5,%ecx,%ecx addl %esi,%ebx vpxor %xmm8,%xmm0,%xmm0 xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 4(%rsp),%eax vpsrld $30,%xmm0,%xmm8 vmovdqa %xmm9,48(%rsp) andl %edx,%edi xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi vpslld $2,%xmm0,%xmm0 xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 8(%rsp),%ebp andl %ecx,%esi vpor %xmm8,%xmm0,%xmm0 xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%edi xorl %ecx,%esi shldl $5,%eax,%eax addl %esi,%ebp xorl %ebx,%edi xorl %ecx,%ebx addl %eax,%ebp addl 12(%rsp),%edx andl %ebx,%edi xorl %ecx,%ebx shrdl $7,%eax,%eax movl %ebp,%esi xorl %ebx,%edi shldl $5,%ebp,%ebp addl %edi,%edx xorl %eax,%esi xorl %ebx,%eax addl %ebp,%edx vpalignr $8,%xmm7,%xmm0,%xmm8 vpxor %xmm5,%xmm1,%xmm1 addl 16(%rsp),%ecx andl %eax,%esi xorl %ebx,%eax shrdl $7,%ebp,%ebp vpxor %xmm2,%xmm1,%xmm1 movl %edx,%edi xorl %eax,%esi vpaddd %xmm0,%xmm11,%xmm9 shldl $5,%edx,%edx addl %esi,%ecx vpxor %xmm8,%xmm1,%xmm1 xorl %ebp,%edi xorl %eax,%ebp addl %edx,%ecx addl 20(%rsp),%ebx vpsrld $30,%xmm1,%xmm8 vmovdqa %xmm9,0(%rsp) andl %ebp,%edi xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%esi vpslld $2,%xmm1,%xmm1 xorl %ebp,%edi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %edx,%esi xorl %ebp,%edx addl %ecx,%ebx addl 24(%rsp),%eax andl %edx,%esi vpor %xmm8,%xmm1,%xmm1 xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%edi xorl %edx,%esi shldl $5,%ebx,%ebx addl %esi,%eax xorl %ecx,%edi xorl %edx,%ecx addl %ebx,%eax addl 28(%rsp),%ebp andl %ecx,%edi xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi xorl %ecx,%edi shldl $5,%eax,%eax addl %edi,%ebp xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%ebp vpalignr $8,%xmm0,%xmm1,%xmm8 vpxor %xmm6,%xmm2,%xmm2 addl 32(%rsp),%edx andl %ebx,%esi xorl %ecx,%ebx shrdl $7,%eax,%eax vpxor %xmm3,%xmm2,%xmm2 movl %ebp,%edi xorl %ebx,%esi vpaddd %xmm1,%xmm11,%xmm9 shldl $5,%ebp,%ebp addl %esi,%edx vpxor %xmm8,%xmm2,%xmm2 xorl %eax,%edi xorl %ebx,%eax addl %ebp,%edx addl 36(%rsp),%ecx vpsrld $30,%xmm2,%xmm8 vmovdqa %xmm9,16(%rsp) andl %eax,%edi xorl %ebx,%eax shrdl $7,%ebp,%ebp movl %edx,%esi vpslld $2,%xmm2,%xmm2 xorl %eax,%edi shldl $5,%edx,%edx addl %edi,%ecx xorl %ebp,%esi xorl %eax,%ebp addl %edx,%ecx addl 40(%rsp),%ebx andl %ebp,%esi vpor %xmm8,%xmm2,%xmm2 xorl %eax,%ebp shrdl $7,%edx,%edx movl %ecx,%edi xorl %ebp,%esi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edx,%edi xorl %ebp,%edx addl %ecx,%ebx addl 44(%rsp),%eax andl %edx,%edi xorl %ebp,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%edi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi addl %ebx,%eax vpalignr $8,%xmm1,%xmm2,%xmm8 vpxor %xmm7,%xmm3,%xmm3 addl 48(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax vpxor %xmm4,%xmm3,%xmm3 addl %esi,%ebp xorl %ecx,%edi vpaddd %xmm2,%xmm11,%xmm9 shrdl $7,%ebx,%ebx addl %eax,%ebp vpxor %xmm8,%xmm3,%xmm3 addl 52(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp vpsrld $30,%xmm3,%xmm8 vmovdqa %xmm9,32(%rsp) addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx vpslld $2,%xmm3,%xmm3 addl 56(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vpor %xmm8,%xmm3,%xmm3 addl 60(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 0(%rsp),%eax vpaddd %xmm3,%xmm11,%xmm9 xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax vmovdqa %xmm9,48(%rsp) xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 4(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 8(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx addl 12(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx cmpq %r10,%r9 je .Ldone_avx vmovdqa 64(%r14),%xmm6 vmovdqa -64(%r14),%xmm11 vmovdqu 0(%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 addq $64,%r9 addl 16(%rsp),%ebx xorl %ebp,%esi vpshufb %xmm6,%xmm1,%xmm1 movl %ecx,%edi shldl $5,%ecx,%ecx vpaddd %xmm11,%xmm0,%xmm4 addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx vmovdqa %xmm4,0(%rsp) addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi vpshufb %xmm6,%xmm2,%xmm2 movl %edx,%edi shldl $5,%edx,%edx vpaddd %xmm11,%xmm1,%xmm5 addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx vmovdqa %xmm5,16(%rsp) addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi vpshufb %xmm6,%xmm3,%xmm3 movl %ebp,%edi shldl $5,%ebp,%ebp vpaddd %xmm11,%xmm2,%xmm6 addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx vmovdqa %xmm6,32(%rsp) addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx addl 12(%r8),%edx movl %eax,0(%r8) addl 16(%r8),%ebp movl %esi,4(%r8) movl %esi,%ebx movl %ecx,8(%r8) movl %ecx,%edi movl %edx,12(%r8) xorl %edx,%edi movl %ebp,16(%r8) andl %edi,%esi jmp .Loop_avx .align 16 .Ldone_avx: addl 16(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 20(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%rsp),%ebp xorl %ecx,%esi movl %eax,%edi shldl $5,%eax,%eax addl %esi,%ebp xorl %ecx,%edi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 28(%rsp),%edx xorl %ebx,%edi movl %ebp,%esi shldl $5,%ebp,%ebp addl %edi,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %ebp,%edx addl 32(%rsp),%ecx xorl %eax,%esi movl %edx,%edi shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%edi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 36(%rsp),%ebx xorl %ebp,%edi movl %ecx,%esi shldl $5,%ecx,%ecx addl %edi,%ebx xorl %ebp,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%rsp),%eax xorl %edx,%esi movl %ebx,%edi shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%edi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%rsp),%ebp xorl %ecx,%edi movl %eax,%esi shldl $5,%eax,%eax addl %edi,%ebp xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%ebp addl 48(%rsp),%edx xorl %ebx,%esi movl %ebp,%edi shldl $5,%ebp,%ebp addl %esi,%edx xorl %ebx,%edi shrdl $7,%eax,%eax addl %ebp,%edx addl 52(%rsp),%ecx xorl %eax,%edi movl %edx,%esi shldl $5,%edx,%edx addl %edi,%ecx xorl %eax,%esi shrdl $7,%ebp,%ebp addl %edx,%ecx addl 56(%rsp),%ebx xorl %ebp,%esi movl %ecx,%edi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %ebp,%edi shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%rsp),%eax xorl %edx,%edi movl %ebx,%esi shldl $5,%ebx,%ebx addl %edi,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax vzeroupper addl 0(%r8),%eax addl 4(%r8),%esi addl 8(%r8),%ecx movl %eax,0(%r8) addl 12(%r8),%edx movl %esi,4(%r8) addl 16(%r8),%ebp movl %ecx,8(%r8) movl %edx,12(%r8) movl %ebp,16(%r8) movq -40(%r11),%r14 .cfi_restore %r14 movq -32(%r11),%r13 .cfi_restore %r13 movq -24(%r11),%r12 .cfi_restore %r12 movq -16(%r11),%rbp .cfi_restore %rbp movq -8(%r11),%rbx .cfi_restore %rbx leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx: ret .cfi_endproc .size sha1_block_data_order_avx,.-sha1_block_data_order_avx .globl sha1_block_data_order_avx2 .hidden sha1_block_data_order_avx2 .type sha1_block_data_order_avx2,@function .align 16 sha1_block_data_order_avx2: .cfi_startproc _CET_ENDBR movq %rsp,%r11 .cfi_def_cfa_register %r11 pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 vzeroupper movq %rdi,%r8 movq %rsi,%r9 movq %rdx,%r10 leaq -640(%rsp),%rsp shlq $6,%r10 leaq 64(%r9),%r13 andq $-128,%rsp addq %r9,%r10 leaq K_XX_XX+64(%rip),%r14 movl 0(%r8),%eax cmpq %r10,%r13 cmovaeq %r9,%r13 movl 4(%r8),%ebp movl 8(%r8),%ecx movl 12(%r8),%edx movl 16(%r8),%esi vmovdqu 64(%r14),%ymm6 vmovdqu (%r9),%xmm0 vmovdqu 16(%r9),%xmm1 vmovdqu 32(%r9),%xmm2 vmovdqu 48(%r9),%xmm3 leaq 64(%r9),%r9 vinserti128 $1,(%r13),%ymm0,%ymm0 vinserti128 $1,16(%r13),%ymm1,%ymm1 vpshufb %ymm6,%ymm0,%ymm0 vinserti128 $1,32(%r13),%ymm2,%ymm2 vpshufb %ymm6,%ymm1,%ymm1 vinserti128 $1,48(%r13),%ymm3,%ymm3 vpshufb %ymm6,%ymm2,%ymm2 vmovdqu -64(%r14),%ymm11 vpshufb %ymm6,%ymm3,%ymm3 vpaddd %ymm11,%ymm0,%ymm4 vpaddd %ymm11,%ymm1,%ymm5 vmovdqu %ymm4,0(%rsp) vpaddd %ymm11,%ymm2,%ymm6 vmovdqu %ymm5,32(%rsp) vpaddd %ymm11,%ymm3,%ymm7 vmovdqu %ymm6,64(%rsp) vmovdqu %ymm7,96(%rsp) vpalignr $8,%ymm0,%ymm1,%ymm4 vpsrldq $4,%ymm3,%ymm8 vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm2,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $31,%ymm4,%ymm8 vpslldq $12,%ymm4,%ymm10 vpaddd %ymm4,%ymm4,%ymm4 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm4,%ymm4 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm4,%ymm4 vpxor %ymm10,%ymm4,%ymm4 vpaddd %ymm11,%ymm4,%ymm9 vmovdqu %ymm9,128(%rsp) vpalignr $8,%ymm1,%ymm2,%ymm5 vpsrldq $4,%ymm4,%ymm8 vpxor %ymm1,%ymm5,%ymm5 vpxor %ymm3,%ymm8,%ymm8 vpxor %ymm8,%ymm5,%ymm5 vpsrld $31,%ymm5,%ymm8 vmovdqu -32(%r14),%ymm11 vpslldq $12,%ymm5,%ymm10 vpaddd %ymm5,%ymm5,%ymm5 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm5,%ymm5 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm10,%ymm5,%ymm5 vpaddd %ymm11,%ymm5,%ymm9 vmovdqu %ymm9,160(%rsp) vpalignr $8,%ymm2,%ymm3,%ymm6 vpsrldq $4,%ymm5,%ymm8 vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm4,%ymm8,%ymm8 vpxor %ymm8,%ymm6,%ymm6 vpsrld $31,%ymm6,%ymm8 vpslldq $12,%ymm6,%ymm10 vpaddd %ymm6,%ymm6,%ymm6 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm6,%ymm6 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm6,%ymm6 vpxor %ymm10,%ymm6,%ymm6 vpaddd %ymm11,%ymm6,%ymm9 vmovdqu %ymm9,192(%rsp) vpalignr $8,%ymm3,%ymm4,%ymm7 vpsrldq $4,%ymm6,%ymm8 vpxor %ymm3,%ymm7,%ymm7 vpxor %ymm5,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpsrld $31,%ymm7,%ymm8 vpslldq $12,%ymm7,%ymm10 vpaddd %ymm7,%ymm7,%ymm7 vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm7,%ymm7 vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm7,%ymm7 vpxor %ymm10,%ymm7,%ymm7 vpaddd %ymm11,%ymm7,%ymm9 vmovdqu %ymm9,224(%rsp) leaq 128(%rsp),%r13 jmp .Loop_avx2 .align 32 .Loop_avx2: rorxl $2,%ebp,%ebx andnl %edx,%ebp,%edi andl %ecx,%ebp xorl %edi,%ebp jmp .Lalign32_1 .align 32 .Lalign32_1: vpalignr $8,%ymm6,%ymm7,%ymm8 vpxor %ymm4,%ymm0,%ymm0 addl -128(%r13),%esi andnl %ecx,%eax,%edi vpxor %ymm1,%ymm0,%ymm0 addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpxor %ymm8,%ymm0,%ymm0 andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax vpsrld $30,%ymm0,%ymm8 vpslld $2,%ymm0,%ymm0 addl -124(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi vpor %ymm8,%ymm0,%ymm0 addl %r12d,%edx xorl %edi,%esi addl -120(%r13),%ecx andnl %ebp,%edx,%edi vpaddd %ymm11,%ymm0,%ymm9 addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx vmovdqu %ymm9,256(%rsp) addl %r12d,%ecx xorl %edi,%edx addl -116(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -96(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx vpalignr $8,%ymm7,%ymm0,%ymm8 vpxor %ymm5,%ymm1,%ymm1 addl -92(%r13),%eax andnl %edx,%ebp,%edi vpxor %ymm2,%ymm1,%ymm1 addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx vpxor %ymm8,%ymm1,%ymm1 andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp vpsrld $30,%ymm1,%ymm8 vpslld $2,%ymm1,%ymm1 addl -88(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax vpor %ymm8,%ymm1,%ymm1 addl %r12d,%esi xorl %edi,%eax addl -84(%r13),%edx andnl %ebx,%esi,%edi vpaddd %ymm11,%ymm1,%ymm9 addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi vmovdqu %ymm9,288(%rsp) addl %r12d,%edx xorl %edi,%esi addl -64(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -60(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx vpalignr $8,%ymm0,%ymm1,%ymm8 vpxor %ymm6,%ymm2,%ymm2 addl -56(%r13),%ebp andnl %esi,%ebx,%edi vpxor %ymm3,%ymm2,%ymm2 vmovdqu 0(%r14),%ymm11 addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpxor %ymm8,%ymm2,%ymm2 andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx vpsrld $30,%ymm2,%ymm8 vpslld $2,%ymm2,%ymm2 addl -52(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp vpor %ymm8,%ymm2,%ymm2 addl %r12d,%eax xorl %edi,%ebp addl -32(%r13),%esi andnl %ecx,%eax,%edi vpaddd %ymm11,%ymm2,%ymm9 addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax vmovdqu %ymm9,320(%rsp) addl %r12d,%esi xorl %edi,%eax addl -28(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -24(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx vpalignr $8,%ymm1,%ymm2,%ymm8 vpxor %ymm7,%ymm3,%ymm3 addl -20(%r13),%ebx andnl %eax,%ecx,%edi vpxor %ymm4,%ymm3,%ymm3 addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpxor %ymm8,%ymm3,%ymm3 andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx vpsrld $30,%ymm3,%ymm8 vpslld $2,%ymm3,%ymm3 addl 0(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx vpor %ymm8,%ymm3,%ymm3 addl %r12d,%ebp xorl %edi,%ebx addl 4(%r13),%eax andnl %edx,%ebp,%edi vpaddd %ymm11,%ymm3,%ymm9 addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp vmovdqu %ymm9,352(%rsp) addl %r12d,%eax xorl %edi,%ebp addl 8(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl 12(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vpalignr $8,%ymm2,%ymm3,%ymm8 vpxor %ymm0,%ymm4,%ymm4 addl 32(%r13),%ecx leal (%rcx,%rsi,1),%ecx vpxor %ymm5,%ymm4,%ymm4 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpxor %ymm8,%ymm4,%ymm4 addl %r12d,%ecx xorl %ebp,%edx addl 36(%r13),%ebx vpsrld $30,%ymm4,%ymm8 vpslld $2,%ymm4,%ymm4 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vpor %ymm8,%ymm4,%ymm4 addl 40(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpaddd %ymm11,%ymm4,%ymm9 xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 44(%r13),%eax vmovdqu %ymm9,384(%rsp) leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpalignr $8,%ymm3,%ymm4,%ymm8 vpxor %ymm1,%ymm5,%ymm5 addl 68(%r13),%edx leal (%rdx,%rax,1),%edx vpxor %ymm6,%ymm5,%ymm5 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi vpxor %ymm8,%ymm5,%ymm5 addl %r12d,%edx xorl %ebx,%esi addl 72(%r13),%ecx vpsrld $30,%ymm5,%ymm8 vpslld $2,%ymm5,%ymm5 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx vpor %ymm8,%ymm5,%ymm5 addl 76(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpaddd %ymm11,%ymm5,%ymm9 xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 96(%r13),%ebp vmovdqu %ymm9,416(%rsp) leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 100(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpalignr $8,%ymm4,%ymm5,%ymm8 vpxor %ymm2,%ymm6,%ymm6 addl 104(%r13),%esi leal (%rsi,%rbp,1),%esi vpxor %ymm7,%ymm6,%ymm6 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax vpxor %ymm8,%ymm6,%ymm6 addl %r12d,%esi xorl %ecx,%eax addl 108(%r13),%edx leaq 256(%r13),%r13 vpsrld $30,%ymm6,%ymm8 vpslld $2,%ymm6,%ymm6 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vpor %ymm8,%ymm6,%ymm6 addl -128(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpaddd %ymm11,%ymm6,%ymm9 xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -124(%r13),%ebx vmovdqu %ymm9,448(%rsp) leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -120(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpalignr $8,%ymm5,%ymm6,%ymm8 vpxor %ymm3,%ymm7,%ymm7 addl -116(%r13),%eax leal (%rax,%rbx,1),%eax vpxor %ymm0,%ymm7,%ymm7 vmovdqu 32(%r14),%ymm11 rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp vpxor %ymm8,%ymm7,%ymm7 addl %r12d,%eax xorl %edx,%ebp addl -96(%r13),%esi vpsrld $30,%ymm7,%ymm8 vpslld $2,%ymm7,%ymm7 leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpor %ymm8,%ymm7,%ymm7 addl -92(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpaddd %ymm11,%ymm7,%ymm9 xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -88(%r13),%ecx vmovdqu %ymm9,480(%rsp) leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -84(%r13),%ebx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx jmp .Lalign32_2 .align 32 .Lalign32_2: vpalignr $8,%ymm6,%ymm7,%ymm8 vpxor %ymm4,%ymm0,%ymm0 addl -64(%r13),%ebp xorl %esi,%ecx vpxor %ymm1,%ymm0,%ymm0 movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp vpxor %ymm8,%ymm0,%ymm0 rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx vpsrld $30,%ymm0,%ymm8 vpslld $2,%ymm0,%ymm0 addl %r12d,%ebp andl %edi,%ebx addl -60(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi vpor %ymm8,%ymm0,%ymm0 leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp vpaddd %ymm11,%ymm0,%ymm9 addl %r12d,%eax andl %edi,%ebp addl -56(%r13),%esi xorl %ecx,%ebp vmovdqu %ymm9,512(%rsp) movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl -52(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi addl -32(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx vpalignr $8,%ymm7,%ymm0,%ymm8 vpxor %ymm5,%ymm1,%ymm1 addl -28(%r13),%ebx xorl %eax,%edx vpxor %ymm2,%ymm1,%ymm1 movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx vpxor %ymm8,%ymm1,%ymm1 rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vpsrld $30,%ymm1,%ymm8 vpslld $2,%ymm1,%ymm1 addl %r12d,%ebx andl %edi,%ecx addl -24(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi vpor %ymm8,%ymm1,%ymm1 leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx vpaddd %ymm11,%ymm1,%ymm9 addl %r12d,%ebp andl %edi,%ebx addl -20(%r13),%eax xorl %edx,%ebx vmovdqu %ymm9,544(%rsp) movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 0(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl 4(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi vpalignr $8,%ymm0,%ymm1,%ymm8 vpxor %ymm6,%ymm2,%ymm2 addl 8(%r13),%ecx xorl %ebp,%esi vpxor %ymm3,%ymm2,%ymm2 movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx vpxor %ymm8,%ymm2,%ymm2 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpsrld $30,%ymm2,%ymm8 vpslld $2,%ymm2,%ymm2 addl %r12d,%ecx andl %edi,%edx addl 12(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi vpor %ymm8,%ymm2,%ymm2 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vpaddd %ymm11,%ymm2,%ymm9 addl %r12d,%ebx andl %edi,%ecx addl 32(%r13),%ebp xorl %esi,%ecx vmovdqu %ymm9,576(%rsp) movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 36(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 40(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax vpalignr $8,%ymm1,%ymm2,%ymm8 vpxor %ymm7,%ymm3,%ymm3 addl 44(%r13),%edx xorl %ebx,%eax vpxor %ymm4,%ymm3,%ymm3 movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx vpxor %ymm8,%ymm3,%ymm3 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi vpsrld $30,%ymm3,%ymm8 vpslld $2,%ymm3,%ymm3 addl %r12d,%edx andl %edi,%esi addl 64(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi vpor %ymm8,%ymm3,%ymm3 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx vpaddd %ymm11,%ymm3,%ymm9 addl %r12d,%ecx andl %edi,%edx addl 68(%r13),%ebx xorl %eax,%edx vmovdqu %ymm9,608(%rsp) movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl 72(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 76(%r13),%eax xorl %edx,%ebx leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl 100(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 104(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 108(%r13),%ebx leaq 256(%r13),%r13 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -128(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -124(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -120(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -116(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -96(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -92(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -88(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -84(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -60(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -56(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -52(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -32(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -28(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -24(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -20(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d addl %r12d,%edx leaq 128(%r9),%r13 leaq 128(%r9),%rdi cmpq %r10,%r13 cmovaeq %r9,%r13 addl 0(%r8),%edx addl 4(%r8),%esi addl 8(%r8),%ebp movl %edx,0(%r8) addl 12(%r8),%ebx movl %esi,4(%r8) movl %edx,%eax addl 16(%r8),%ecx movl %ebp,%r12d movl %ebp,8(%r8) movl %ebx,%edx movl %ebx,12(%r8) movl %esi,%ebp movl %ecx,16(%r8) movl %ecx,%esi movl %r12d,%ecx cmpq %r10,%r9 je .Ldone_avx2 vmovdqu 64(%r14),%ymm6 cmpq %r10,%rdi ja .Last_avx2 vmovdqu -64(%rdi),%xmm0 vmovdqu -48(%rdi),%xmm1 vmovdqu -32(%rdi),%xmm2 vmovdqu -16(%rdi),%xmm3 vinserti128 $1,0(%r13),%ymm0,%ymm0 vinserti128 $1,16(%r13),%ymm1,%ymm1 vinserti128 $1,32(%r13),%ymm2,%ymm2 vinserti128 $1,48(%r13),%ymm3,%ymm3 jmp .Last_avx2 .align 32 .Last_avx2: leaq 128+16(%rsp),%r13 rorxl $2,%ebp,%ebx andnl %edx,%ebp,%edi andl %ecx,%ebp xorl %edi,%ebp subq $-128,%r9 addl -128(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -124(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -120(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -116(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -96(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl -92(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl -88(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -84(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -64(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -60(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl -56(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl -52(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl -32(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl -28(%r13),%edx andnl %ebx,%esi,%edi addl %eax,%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax andl %ebp,%esi addl %r12d,%edx xorl %edi,%esi addl -24(%r13),%ecx andnl %ebp,%edx,%edi addl %esi,%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi andl %eax,%edx addl %r12d,%ecx xorl %edi,%edx addl -20(%r13),%ebx andnl %eax,%ecx,%edi addl %edx,%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx andl %esi,%ecx addl %r12d,%ebx xorl %edi,%ecx addl 0(%r13),%ebp andnl %esi,%ebx,%edi addl %ecx,%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx andl %edx,%ebx addl %r12d,%ebp xorl %edi,%ebx addl 4(%r13),%eax andnl %edx,%ebp,%edi addl %ebx,%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx andl %ecx,%ebp addl %r12d,%eax xorl %edi,%ebp addl 8(%r13),%esi andnl %ecx,%eax,%edi addl %ebp,%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp andl %ebx,%eax addl %r12d,%esi xorl %edi,%eax addl 12(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 32(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 36(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 40(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 44(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl 64(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vmovdqu -64(%r14),%ymm11 vpshufb %ymm6,%ymm0,%ymm0 addl 68(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl 72(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl 76(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl 96(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl 100(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpshufb %ymm6,%ymm1,%ymm1 vpaddd %ymm11,%ymm0,%ymm8 addl 104(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl 108(%r13),%edx leaq 256(%r13),%r13 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -128(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -124(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -120(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vmovdqu %ymm8,0(%rsp) vpshufb %ymm6,%ymm2,%ymm2 vpaddd %ymm11,%ymm1,%ymm9 addl -116(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -92(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi addl -88(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -84(%r13),%ebx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx vmovdqu %ymm9,32(%rsp) vpshufb %ymm6,%ymm3,%ymm3 vpaddd %ymm11,%ymm2,%ymm6 addl -64(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl -60(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl -56(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl -52(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi addl -32(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx jmp .Lalign32_3 .align 32 .Lalign32_3: vmovdqu %ymm6,64(%rsp) vpaddd %ymm11,%ymm3,%ymm7 addl -28(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl -24(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl -20(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 0(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax addl 4(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx andl %edi,%esi vmovdqu %ymm7,96(%rsp) addl 8(%r13),%ecx xorl %ebp,%esi movl %eax,%edi xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx andl %edi,%edx addl 12(%r13),%ebx xorl %eax,%edx movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx andl %edi,%ecx addl 32(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 36(%r13),%eax xorl %edx,%ebx movl %ecx,%edi xorl %edx,%edi leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax andl %edi,%ebp addl 40(%r13),%esi xorl %ecx,%ebp movl %ebx,%edi xorl %ecx,%edi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi andl %edi,%eax vpalignr $8,%ymm0,%ymm1,%ymm4 addl 44(%r13),%edx xorl %ebx,%eax movl %ebp,%edi xorl %ebx,%edi vpsrldq $4,%ymm3,%ymm8 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpxor %ymm0,%ymm4,%ymm4 vpxor %ymm2,%ymm8,%ymm8 xorl %ebp,%esi addl %r12d,%edx vpxor %ymm8,%ymm4,%ymm4 andl %edi,%esi addl 64(%r13),%ecx xorl %ebp,%esi movl %eax,%edi vpsrld $31,%ymm4,%ymm8 xorl %ebp,%edi leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d vpslldq $12,%ymm4,%ymm10 vpaddd %ymm4,%ymm4,%ymm4 rorxl $2,%edx,%esi xorl %eax,%edx vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm4,%ymm4 addl %r12d,%ecx andl %edi,%edx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm4,%ymm4 addl 68(%r13),%ebx xorl %eax,%edx vpxor %ymm10,%ymm4,%ymm4 movl %esi,%edi xorl %eax,%edi leal (%rbx,%rdx,1),%ebx vpaddd %ymm11,%ymm4,%ymm9 rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx vmovdqu %ymm9,128(%rsp) addl %r12d,%ebx andl %edi,%ecx addl 72(%r13),%ebp xorl %esi,%ecx movl %edx,%edi xorl %esi,%edi leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp andl %edi,%ebx addl 76(%r13),%eax xorl %edx,%ebx leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpalignr $8,%ymm1,%ymm2,%ymm5 addl 96(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpsrldq $4,%ymm4,%ymm8 xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax vpxor %ymm1,%ymm5,%ymm5 vpxor %ymm3,%ymm8,%ymm8 addl 100(%r13),%edx leal (%rdx,%rax,1),%edx vpxor %ymm8,%ymm5,%ymm5 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax xorl %ebp,%esi addl %r12d,%edx vpsrld $31,%ymm5,%ymm8 vmovdqu -32(%r14),%ymm11 xorl %ebx,%esi addl 104(%r13),%ecx leal (%rcx,%rsi,1),%ecx vpslldq $12,%ymm5,%ymm10 vpaddd %ymm5,%ymm5,%ymm5 rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm5,%ymm5 xorl %eax,%edx addl %r12d,%ecx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm5,%ymm5 xorl %ebp,%edx addl 108(%r13),%ebx leaq 256(%r13),%r13 vpxor %ymm10,%ymm5,%ymm5 leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx vpaddd %ymm11,%ymm5,%ymm9 xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vmovdqu %ymm9,160(%rsp) addl -128(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpalignr $8,%ymm2,%ymm3,%ymm6 addl -124(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx vpsrldq $4,%ymm5,%ymm8 xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp vpxor %ymm2,%ymm6,%ymm6 vpxor %ymm4,%ymm8,%ymm8 addl -120(%r13),%esi leal (%rsi,%rbp,1),%esi vpxor %ymm8,%ymm6,%ymm6 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi vpsrld $31,%ymm6,%ymm8 xorl %ecx,%eax addl -116(%r13),%edx leal (%rdx,%rax,1),%edx vpslldq $12,%ymm6,%ymm10 vpaddd %ymm6,%ymm6,%ymm6 rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm6,%ymm6 xorl %ebp,%esi addl %r12d,%edx vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm6,%ymm6 xorl %ebx,%esi addl -96(%r13),%ecx vpxor %ymm10,%ymm6,%ymm6 leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi vpaddd %ymm11,%ymm6,%ymm9 xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx vmovdqu %ymm9,192(%rsp) addl -92(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx vpalignr $8,%ymm3,%ymm4,%ymm7 addl -88(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx vpsrldq $4,%ymm6,%ymm8 xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx vpxor %ymm3,%ymm7,%ymm7 vpxor %ymm5,%ymm8,%ymm8 addl -84(%r13),%eax leal (%rax,%rbx,1),%eax vpxor %ymm8,%ymm7,%ymm7 rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax vpsrld $31,%ymm7,%ymm8 xorl %edx,%ebp addl -64(%r13),%esi leal (%rsi,%rbp,1),%esi vpslldq $12,%ymm7,%ymm10 vpaddd %ymm7,%ymm7,%ymm7 rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp vpsrld $30,%ymm10,%ymm9 vpor %ymm8,%ymm7,%ymm7 xorl %ebx,%eax addl %r12d,%esi vpslld $2,%ymm10,%ymm10 vpxor %ymm9,%ymm7,%ymm7 xorl %ecx,%eax addl -60(%r13),%edx vpxor %ymm10,%ymm7,%ymm7 leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d rorxl $2,%esi,%eax vpaddd %ymm11,%ymm7,%ymm9 xorl %ebp,%esi addl %r12d,%edx xorl %ebx,%esi vmovdqu %ymm9,224(%rsp) addl -56(%r13),%ecx leal (%rcx,%rsi,1),%ecx rorxl $27,%edx,%r12d rorxl $2,%edx,%esi xorl %eax,%edx addl %r12d,%ecx xorl %ebp,%edx addl -52(%r13),%ebx leal (%rbx,%rdx,1),%ebx rorxl $27,%ecx,%r12d rorxl $2,%ecx,%edx xorl %esi,%ecx addl %r12d,%ebx xorl %eax,%ecx addl -32(%r13),%ebp leal (%rcx,%rbp,1),%ebp rorxl $27,%ebx,%r12d rorxl $2,%ebx,%ecx xorl %edx,%ebx addl %r12d,%ebp xorl %esi,%ebx addl -28(%r13),%eax leal (%rax,%rbx,1),%eax rorxl $27,%ebp,%r12d rorxl $2,%ebp,%ebx xorl %ecx,%ebp addl %r12d,%eax xorl %edx,%ebp addl -24(%r13),%esi leal (%rsi,%rbp,1),%esi rorxl $27,%eax,%r12d rorxl $2,%eax,%ebp xorl %ebx,%eax addl %r12d,%esi xorl %ecx,%eax addl -20(%r13),%edx leal (%rdx,%rax,1),%edx rorxl $27,%esi,%r12d addl %r12d,%edx leaq 128(%rsp),%r13 addl 0(%r8),%edx addl 4(%r8),%esi addl 8(%r8),%ebp movl %edx,0(%r8) addl 12(%r8),%ebx movl %esi,4(%r8) movl %edx,%eax addl 16(%r8),%ecx movl %ebp,%r12d movl %ebp,8(%r8) movl %ebx,%edx movl %ebx,12(%r8) movl %esi,%ebp movl %ecx,16(%r8) movl %ecx,%esi movl %r12d,%ecx cmpq %r10,%r9 jbe .Loop_avx2 .Ldone_avx2: vzeroupper movq -40(%r11),%r14 .cfi_restore %r14 movq -32(%r11),%r13 .cfi_restore %r13 movq -24(%r11),%r12 .cfi_restore %r12 movq -16(%r11),%rbp .cfi_restore %rbp movq -8(%r11),%rbx .cfi_restore %rbx leaq (%r11),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx2: ret .cfi_endproc .size sha1_block_data_order_avx2,.-sha1_block_data_order_avx2 .section .rodata .align 64 K_XX_XX: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw .align 4 _sha256_block_data_order_nohw: L_sha256_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L000pic_point L000pic_point: popl %ebp leal LK256-L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) L001no_xmm: subl %edi,%eax cmpl $256,%eax jae L002unrolled jmp L003loop .align 4,0x90 L003loop: movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx bswap %eax movl 12(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 16(%edi),%eax movl 20(%edi),%ebx movl 24(%edi),%ecx bswap %eax movl 28(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 32(%edi),%eax movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %eax movl 44(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 48(%edi),%eax movl 52(%edi),%ebx movl 56(%edi),%ecx bswap %eax movl 60(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx addl $64,%edi leal -36(%esp),%esp movl %edi,104(%esp) movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,8(%esp) xorl %ecx,%ebx movl %ecx,12(%esp) movl %edi,16(%esp) movl %ebx,(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) movl %edi,32(%esp) .align 4,0x90 L00400_15: movl %edx,%ecx movl 24(%esp),%esi rorl $14,%ecx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl 96(%esp),%ebx rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax addl $4,%ebp addl %ebx,%eax cmpl $3248222580,%esi jne L00400_15 movl 156(%esp),%ecx jmp L00516_63 .align 4,0x90 L00516_63: movl %ecx,%ebx movl 104(%esp),%esi rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 160(%esp),%ebx shrl $10,%edi addl 124(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 24(%esp),%esi rorl $14,%ecx addl %edi,%ebx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl %ebx,96(%esp) rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax movl 156(%esp),%ecx addl $4,%ebp addl %ebx,%eax cmpl $3329325298,%esi jne L00516_63 movl 356(%esp),%esi movl 8(%esp),%ebx movl 16(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl 24(%esp),%eax movl 28(%esp),%ebx movl 32(%esp),%ecx movl 360(%esp),%edi addl 16(%esi),%edx addl 20(%esi),%eax addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %eax,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) leal 356(%esp),%esp subl $256,%ebp cmpl 8(%esp),%edi jb L003loop movl 12(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 LK256: .long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 .long 66051,67438087,134810123,202182159 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 .align 4,0x90 L002unrolled: leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebp movl 8(%esi),%ecx movl 12(%esi),%ebx movl %ebp,4(%esp) xorl %ecx,%ebp movl %ecx,8(%esp) movl %ebx,12(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%esi movl %ebx,20(%esp) movl %ecx,24(%esp) movl %esi,28(%esp) jmp L006grand_loop .align 4,0x90 L006grand_loop: movl (%edi),%ebx movl 4(%edi),%ecx bswap %ebx movl 8(%edi),%esi bswap %ecx movl %ebx,32(%esp) bswap %esi movl %ecx,36(%esp) movl %esi,40(%esp) movl 12(%edi),%ebx movl 16(%edi),%ecx bswap %ebx movl 20(%edi),%esi bswap %ecx movl %ebx,44(%esp) bswap %esi movl %ecx,48(%esp) movl %esi,52(%esp) movl 24(%edi),%ebx movl 28(%edi),%ecx bswap %ebx movl 32(%edi),%esi bswap %ecx movl %ebx,56(%esp) bswap %esi movl %ecx,60(%esp) movl %esi,64(%esp) movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %ebx movl 44(%edi),%esi bswap %ecx movl %ebx,68(%esp) bswap %esi movl %ecx,72(%esp) movl %esi,76(%esp) movl 48(%edi),%ebx movl 52(%edi),%ecx bswap %ebx movl 56(%edi),%esi bswap %ecx movl %ebx,80(%esp) bswap %esi movl %ecx,84(%esp) movl %esi,88(%esp) movl 60(%edi),%ebx addl $64,%edi bswap %ebx movl %edi,100(%esp) movl %ebx,92(%esp) movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 32(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1116352408(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 36(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1899447441(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 40(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3049323471(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 44(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3921009573(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 48(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 961987163(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 52(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1508970993(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 56(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2453635748(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 60(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2870763221(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 64(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3624381080(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 68(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 310598401(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 72(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 607225278(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 76(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1426881987(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 80(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1925078388(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 84(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2162078206(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 88(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2614888103(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 92(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3248222580(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3835390401(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 4022224774(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 264347078(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 604807628(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 770255983(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1249150122(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1555081692(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1996064986(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2554220882(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2821834349(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2952996808(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3210313671(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3336571891(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3584528711(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 113926993(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 338241895(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 666307205(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 773529912(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1294757372(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1396182291(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1695183700(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1986661051(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2177026350(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2456956037(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2730485921(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2820302411(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3259730800(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3345764771(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3516065817(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3600352804(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 4094571909(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 275423344(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 430227734(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 506948616(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 659060556(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 883997877(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 958139571(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1322822218(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1537002063(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1747873779(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1955562222(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2024104815(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2227730452(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2361852424(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2428436474(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2756734187(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3204031479(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3329325298(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 96(%esp),%esi xorl %edi,%ebp movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebp addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebp,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebp,4(%esp) xorl %edi,%ebp movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ebx movl 28(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) movl %edi,20(%esp) movl 100(%esp),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) cmpl 104(%esp),%edi jb L006grand_loop movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _sha256_block_data_order_ssse3 .private_extern _sha256_block_data_order_ssse3 .align 4 _sha256_block_data_order_ssse3: L_sha256_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L007pic_point L007pic_point: popl %ebp leal LK256-L007pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) movdqa 256(%ebp),%xmm7 jmp L008grand_ssse3 .align 4,0x90 L008grand_ssse3: movdqu (%edi),%xmm0 movdqu 16(%edi),%xmm1 movdqu 32(%edi),%xmm2 movdqu 48(%edi),%xmm3 addl $64,%edi .byte 102,15,56,0,199 movl %edi,100(%esp) .byte 102,15,56,0,207 movdqa (%ebp),%xmm4 .byte 102,15,56,0,215 movdqa 16(%ebp),%xmm5 paddd %xmm0,%xmm4 .byte 102,15,56,0,223 movdqa 32(%ebp),%xmm6 paddd %xmm1,%xmm5 movdqa 48(%ebp),%xmm7 movdqa %xmm4,32(%esp) paddd %xmm2,%xmm6 movdqa %xmm5,48(%esp) paddd %xmm3,%xmm7 movdqa %xmm6,64(%esp) movdqa %xmm7,80(%esp) jmp L009ssse3_00_47 .align 4,0x90 L009ssse3_00_47: addl $64,%ebp movl %edx,%ecx movdqa %xmm1,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,224,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,250,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm3,%xmm7 xorl %esi,%ecx addl 32(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm0 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm0 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm0,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa (%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm0,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,32(%esp) movl %edx,%ecx movdqa %xmm2,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,225,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,251,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm0,%xmm7 xorl %esi,%ecx addl 48(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm1 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm1 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm1,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 16(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm1,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,48(%esp) movl %edx,%ecx movdqa %xmm3,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,226,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,248,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm1,%xmm7 xorl %esi,%ecx addl 64(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm2 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm2 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm2,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 32(%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm2,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,64(%esp) movl %edx,%ecx movdqa %xmm0,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,227,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,249,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm2,%xmm7 xorl %esi,%ecx addl 80(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm3 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm3 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm3,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 48(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm3,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne L009ssse3_00_47 movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi movdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb L008grand_ssse3 movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _sha256_block_data_order_avx .private_extern _sha256_block_data_order_avx .align 4 _sha256_block_data_order_avx: L_sha256_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L010pic_point L010pic_point: popl %ebp leal LK256-L010pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp vzeroall movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) vmovdqa 256(%ebp),%xmm7 jmp L011grand_avx .align 5,0x90 L011grand_avx: vmovdqu (%edi),%xmm0 vmovdqu 16(%edi),%xmm1 vmovdqu 32(%edi),%xmm2 vmovdqu 48(%edi),%xmm3 addl $64,%edi vpshufb %xmm7,%xmm0,%xmm0 movl %edi,100(%esp) vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd (%ebp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 16(%ebp),%xmm1,%xmm5 vpaddd 32(%ebp),%xmm2,%xmm6 vpaddd 48(%ebp),%xmm3,%xmm7 vmovdqa %xmm4,32(%esp) vmovdqa %xmm5,48(%esp) vmovdqa %xmm6,64(%esp) vmovdqa %xmm7,80(%esp) jmp L012avx_00_47 .align 4,0x90 L012avx_00_47: addl $64,%ebp vpalignr $4,%xmm0,%xmm1,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm2,%xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm3,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm0,%xmm0 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm0,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm0,%xmm0 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd (%ebp),%xmm0,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,32(%esp) vpalignr $4,%xmm1,%xmm2,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm3,%xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm0,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm1,%xmm1 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm1,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm1,%xmm1 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 16(%ebp),%xmm1,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,48(%esp) vpalignr $4,%xmm2,%xmm3,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm0,%xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm1,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm2,%xmm2 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm2,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm2,%xmm2 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd 32(%ebp),%xmm2,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,64(%esp) vpalignr $4,%xmm3,%xmm0,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm1,%xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm2,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm3,%xmm3 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm3,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm3,%xmm3 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 48(%ebp),%xmm3,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne L012avx_00_47 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi vmovdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb L011grand_avx movl 108(%esp),%esp vzeroall popl %edi popl %esi popl %ebx popl %ebp ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,@function .align 16 sha256_block_data_order_nohw: .L_sha256_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L000pic_point .L000pic_point: popl %ebp leal .LK256-.L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) .L001no_xmm: subl %edi,%eax cmpl $256,%eax jae .L002unrolled jmp .L003loop .align 16 .L003loop: movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx bswap %eax movl 12(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 16(%edi),%eax movl 20(%edi),%ebx movl 24(%edi),%ecx bswap %eax movl 28(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 32(%edi),%eax movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %eax movl 44(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 48(%edi),%eax movl 52(%edi),%ebx movl 56(%edi),%ecx bswap %eax movl 60(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx addl $64,%edi leal -36(%esp),%esp movl %edi,104(%esp) movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,8(%esp) xorl %ecx,%ebx movl %ecx,12(%esp) movl %edi,16(%esp) movl %ebx,(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) movl %edi,32(%esp) .align 16 .L00400_15: movl %edx,%ecx movl 24(%esp),%esi rorl $14,%ecx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl 96(%esp),%ebx rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax addl $4,%ebp addl %ebx,%eax cmpl $3248222580,%esi jne .L00400_15 movl 156(%esp),%ecx jmp .L00516_63 .align 16 .L00516_63: movl %ecx,%ebx movl 104(%esp),%esi rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 160(%esp),%ebx shrl $10,%edi addl 124(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 24(%esp),%esi rorl $14,%ecx addl %edi,%ebx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl %ebx,96(%esp) rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax movl 156(%esp),%ecx addl $4,%ebp addl %ebx,%eax cmpl $3329325298,%esi jne .L00516_63 movl 356(%esp),%esi movl 8(%esp),%ebx movl 16(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl 24(%esp),%eax movl 28(%esp),%ebx movl 32(%esp),%ecx movl 360(%esp),%edi addl 16(%esi),%edx addl 20(%esi),%eax addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %eax,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) leal 356(%esp),%esp subl $256,%ebp cmpl 8(%esp),%edi jb .L003loop movl 12(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 64 .LK256: .long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 .long 66051,67438087,134810123,202182159 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 .align 16 .L002unrolled: leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebp movl 8(%esi),%ecx movl 12(%esi),%ebx movl %ebp,4(%esp) xorl %ecx,%ebp movl %ecx,8(%esp) movl %ebx,12(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%esi movl %ebx,20(%esp) movl %ecx,24(%esp) movl %esi,28(%esp) jmp .L006grand_loop .align 16 .L006grand_loop: movl (%edi),%ebx movl 4(%edi),%ecx bswap %ebx movl 8(%edi),%esi bswap %ecx movl %ebx,32(%esp) bswap %esi movl %ecx,36(%esp) movl %esi,40(%esp) movl 12(%edi),%ebx movl 16(%edi),%ecx bswap %ebx movl 20(%edi),%esi bswap %ecx movl %ebx,44(%esp) bswap %esi movl %ecx,48(%esp) movl %esi,52(%esp) movl 24(%edi),%ebx movl 28(%edi),%ecx bswap %ebx movl 32(%edi),%esi bswap %ecx movl %ebx,56(%esp) bswap %esi movl %ecx,60(%esp) movl %esi,64(%esp) movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %ebx movl 44(%edi),%esi bswap %ecx movl %ebx,68(%esp) bswap %esi movl %ecx,72(%esp) movl %esi,76(%esp) movl 48(%edi),%ebx movl 52(%edi),%ecx bswap %ebx movl 56(%edi),%esi bswap %ecx movl %ebx,80(%esp) bswap %esi movl %ecx,84(%esp) movl %esi,88(%esp) movl 60(%edi),%ebx addl $64,%edi bswap %ebx movl %edi,100(%esp) movl %ebx,92(%esp) movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 32(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1116352408(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 36(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1899447441(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 40(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3049323471(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 44(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3921009573(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 48(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 961987163(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 52(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1508970993(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 56(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2453635748(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 60(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2870763221(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 64(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3624381080(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 68(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 310598401(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 72(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 607225278(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 76(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1426881987(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 80(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1925078388(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 84(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2162078206(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 88(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2614888103(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 92(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3248222580(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3835390401(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 4022224774(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 264347078(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 604807628(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 770255983(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1249150122(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1555081692(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1996064986(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2554220882(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2821834349(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2952996808(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3210313671(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3336571891(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3584528711(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 113926993(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 338241895(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 666307205(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 773529912(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1294757372(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1396182291(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1695183700(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1986661051(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2177026350(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2456956037(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2730485921(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2820302411(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3259730800(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3345764771(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3516065817(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3600352804(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 4094571909(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 275423344(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 430227734(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 506948616(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 659060556(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 883997877(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 958139571(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1322822218(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1537002063(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1747873779(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1955562222(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2024104815(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2227730452(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2361852424(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2428436474(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2756734187(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3204031479(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3329325298(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 96(%esp),%esi xorl %edi,%ebp movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebp addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebp,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebp,4(%esp) xorl %edi,%ebp movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ebx movl 28(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) movl %edi,20(%esp) movl 100(%esp),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) cmpl 104(%esp),%edi jb .L006grand_loop movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_nohw,.-.L_sha256_block_data_order_nohw_begin .globl sha256_block_data_order_ssse3 .hidden sha256_block_data_order_ssse3 .type sha256_block_data_order_ssse3,@function .align 16 sha256_block_data_order_ssse3: .L_sha256_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L007pic_point .L007pic_point: popl %ebp leal .LK256-.L007pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) movdqa 256(%ebp),%xmm7 jmp .L008grand_ssse3 .align 16 .L008grand_ssse3: movdqu (%edi),%xmm0 movdqu 16(%edi),%xmm1 movdqu 32(%edi),%xmm2 movdqu 48(%edi),%xmm3 addl $64,%edi .byte 102,15,56,0,199 movl %edi,100(%esp) .byte 102,15,56,0,207 movdqa (%ebp),%xmm4 .byte 102,15,56,0,215 movdqa 16(%ebp),%xmm5 paddd %xmm0,%xmm4 .byte 102,15,56,0,223 movdqa 32(%ebp),%xmm6 paddd %xmm1,%xmm5 movdqa 48(%ebp),%xmm7 movdqa %xmm4,32(%esp) paddd %xmm2,%xmm6 movdqa %xmm5,48(%esp) paddd %xmm3,%xmm7 movdqa %xmm6,64(%esp) movdqa %xmm7,80(%esp) jmp .L009ssse3_00_47 .align 16 .L009ssse3_00_47: addl $64,%ebp movl %edx,%ecx movdqa %xmm1,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,224,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,250,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm3,%xmm7 xorl %esi,%ecx addl 32(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm0 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm0 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm0,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa (%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm0,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,32(%esp) movl %edx,%ecx movdqa %xmm2,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,225,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,251,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm0,%xmm7 xorl %esi,%ecx addl 48(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm1 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm1 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm1,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 16(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm1,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,48(%esp) movl %edx,%ecx movdqa %xmm3,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,226,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,248,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm1,%xmm7 xorl %esi,%ecx addl 64(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm2 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm2 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm2,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 32(%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm2,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,64(%esp) movl %edx,%ecx movdqa %xmm0,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,227,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,249,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm2,%xmm7 xorl %esi,%ecx addl 80(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm3 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm3 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm3,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 48(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm3,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne .L009ssse3_00_47 movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi movdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb .L008grand_ssse3 movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_ssse3,.-.L_sha256_block_data_order_ssse3_begin .globl sha256_block_data_order_avx .hidden sha256_block_data_order_avx .type sha256_block_data_order_avx,@function .align 16 sha256_block_data_order_avx: .L_sha256_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L010pic_point .L010pic_point: popl %ebp leal .LK256-.L010pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp vzeroall movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) vmovdqa 256(%ebp),%xmm7 jmp .L011grand_avx .align 32 .L011grand_avx: vmovdqu (%edi),%xmm0 vmovdqu 16(%edi),%xmm1 vmovdqu 32(%edi),%xmm2 vmovdqu 48(%edi),%xmm3 addl $64,%edi vpshufb %xmm7,%xmm0,%xmm0 movl %edi,100(%esp) vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd (%ebp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 16(%ebp),%xmm1,%xmm5 vpaddd 32(%ebp),%xmm2,%xmm6 vpaddd 48(%ebp),%xmm3,%xmm7 vmovdqa %xmm4,32(%esp) vmovdqa %xmm5,48(%esp) vmovdqa %xmm6,64(%esp) vmovdqa %xmm7,80(%esp) jmp .L012avx_00_47 .align 16 .L012avx_00_47: addl $64,%ebp vpalignr $4,%xmm0,%xmm1,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm2,%xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm3,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm0,%xmm0 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm0,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm0,%xmm0 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd (%ebp),%xmm0,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,32(%esp) vpalignr $4,%xmm1,%xmm2,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm3,%xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm0,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm1,%xmm1 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm1,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm1,%xmm1 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 16(%ebp),%xmm1,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,48(%esp) vpalignr $4,%xmm2,%xmm3,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm0,%xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm1,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm2,%xmm2 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm2,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm2,%xmm2 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd 32(%ebp),%xmm2,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,64(%esp) vpalignr $4,%xmm3,%xmm0,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm1,%xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm2,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm3,%xmm3 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm3,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm3,%xmm3 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 48(%ebp),%xmm3,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne .L012avx_00_47 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi vmovdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb .L011grand_avx movl 108(%esp),%esp vzeroall popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_avx,.-.L_sha256_block_data_order_avx_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-armv4-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. @ Performance is ~2x better than gcc 3.4 generated code and in "abso- @ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per @ byte [on single-issue Xscale PXA250 core]. @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 22% improvement on @ Cortex A8 core and ~20 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 16% @ improvement on Cortex A8 core and ~15.4 cycles per processed byte. @ September 2013. @ @ Add NEON implementation. On Cortex A8 it was measured to process one @ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon @ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only @ code (meaning that latter performs sub-optimally, nothing was done @ about it). @ May 2014. @ @ Add ARMv8 code path performing at 2.0 cpb on Apple A7. #ifndef __KERNEL__ # include #else # define __ARM_ARCH __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those @ instructions are manually-encoded. (See unsha256.) .arch armv7-a .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .type K256,%object .align 5 K256: .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 .word 0 @ terminator .align 5 .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,%function sha256_block_data_order_nohw: add r2,r1,r2,lsl#6 @ len to point at the end of inp stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} adr r14,K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: # if __ARM_ARCH>=7 ldr r2,[r1],#4 # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ magic eor r12,r12,r12 #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 0 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 0 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 0==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 0<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 1 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 1 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 1==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 1<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 2 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 2 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 2==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 2<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 3 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 3 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 3==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 3<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 4 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 4 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 4==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 4<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 5 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 5==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 5<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 6 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 6 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 6==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 6<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 7 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 7==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 7<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 8 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 8 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 8==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 8<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 9 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 9 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 9==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 9<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 10 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 10 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 10==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 10<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 11 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 11 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 11==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 11<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 12 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 12 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 12==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 12<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 13 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 13 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 13==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 13<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 14 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 14 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 14==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 14<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 15 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 15 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 15==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 15<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) .Lrounds_16_xx: @ ldr r2,[sp,#1*4] @ 16 @ ldr r1,[sp,#14*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#0*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#9*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 16==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 16<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#2*4] @ 17 @ ldr r1,[sp,#15*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#1*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#10*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 17==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 17<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#3*4] @ 18 @ ldr r1,[sp,#0*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#2*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#11*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 18==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 18<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#4*4] @ 19 @ ldr r1,[sp,#1*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#3*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#12*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 19==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 19<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#5*4] @ 20 @ ldr r1,[sp,#2*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#4*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#13*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 20==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 20<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#6*4] @ 21 @ ldr r1,[sp,#3*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#5*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#14*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 21==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 21<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#7*4] @ 22 @ ldr r1,[sp,#4*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#6*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#15*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 22==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 22<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#8*4] @ 23 @ ldr r1,[sp,#5*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#7*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#0*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 23==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 23<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#9*4] @ 24 @ ldr r1,[sp,#6*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#8*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#1*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 24==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 24<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#10*4] @ 25 @ ldr r1,[sp,#7*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#9*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#2*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 25==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 25<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#11*4] @ 26 @ ldr r1,[sp,#8*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#10*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#3*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 26==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 26<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#12*4] @ 27 @ ldr r1,[sp,#9*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#11*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#4*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 27==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 27<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#13*4] @ 28 @ ldr r1,[sp,#10*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#12*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#5*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 28==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 28<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#14*4] @ 29 @ ldr r1,[sp,#11*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#13*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#6*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 29==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 29<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#15*4] @ 30 @ ldr r1,[sp,#12*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#14*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#7*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 30==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 30<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#0*4] @ 31 @ ldr r1,[sp,#13*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#15*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#8*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 31==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 31<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 ite eq @ Thumb2 thing, sanity check in ARM #endif ldreq r3,[sp,#16*4] @ pull ctx bne .Lrounds_16_xx add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r0,[r3,#0] ldr r2,[r3,#4] ldr r12,[r3,#8] add r4,r4,r0 ldr r0,[r3,#12] add r5,r5,r2 ldr r2,[r3,#16] add r6,r6,r12 ldr r12,[r3,#20] add r7,r7,r0 ldr r0,[r3,#24] add r8,r8,r2 ldr r2,[r3,#28] add r9,r9,r12 ldr r1,[sp,#17*4] @ pull inp ldr r12,[sp,#18*4] @ pull inp+len add r10,r10,r0 add r11,r11,r2 stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} cmp r1,r12 sub r14,r14,#256 @ rewind Ktbl bne .Loop add sp,sp,#19*4 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .LK256_shortcut_neon: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(.LK256_add_neon+4) #else .word K256-(.LK256_add_neon+8) #endif .globl sha256_block_data_order_neon .hidden sha256_block_data_order_neon .type sha256_block_data_order_neon,%function .align 5 .skip 16 sha256_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} sub r11,sp,#16*4+16 @ K256 is just at the boundary of being easily referenced by an ADR from @ this function. In Arm mode, when building with __ARM_ARCH=6, it does @ not fit. By moving code around, we could make it fit, but this is too @ fragile. For simplicity, just load the offset from @ .LK256_shortcut_neon. @ @ TODO(davidben): adrl would avoid a load, but clang-assembler does not @ support it. We might be able to emulate it with a macro, but Android's @ did not work when I tried it. @ https://android.googlesource.com/platform/ndk/+/refs/heads/main/docs/ClangMigration.md#arm ldr r14,.LK256_shortcut_neon .LK256_add_neon: add r14,pc,r14 bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca add r2,r1,r2,lsl#6 @ len to point at the end of inp vld1.8 {q0},[r1]! vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! vld1.32 {q8},[r14,:128]! vld1.32 {q9},[r14,:128]! vld1.32 {q10},[r14,:128]! vld1.32 {q11},[r14,:128]! vrev32.8 q0,q0 @ yes, even on str r0,[sp,#64] vrev32.8 q1,q1 @ big-endian str r1,[sp,#68] mov r1,sp vrev32.8 q2,q2 str r2,[sp,#72] vrev32.8 q3,q3 str r12,[sp,#76] @ save original sp vadd.i32 q8,q8,q0 vadd.i32 q9,q9,q1 vst1.32 {q8},[r1,:128]! vadd.i32 q10,q10,q2 vst1.32 {q9},[r1,:128]! vadd.i32 q11,q11,q3 vst1.32 {q10},[r1,:128]! vst1.32 {q11},[r1,:128]! ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r1,r1,#64 ldr r2,[sp,#0] eor r12,r12,r12 eor r3,r5,r6 b .L_00_48 .align 4 .L_00_48: vext.8 q8,q0,q1,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q2,q3,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q0,q0,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#4] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d7,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d7,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d7,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q0,q0,q9 add r10,r10,r2 ldr r2,[sp,#8] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d7,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d7,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d0,d0,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d0,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d0,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d0,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#12] and r3,r3,r12 vshr.u32 d24,d0,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d0,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d1,d1,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q0 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q1,q2,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q3,q0,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q1,q1,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#20] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d1,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d1,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d1,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q1,q1,q9 add r6,r6,r2 ldr r2,[sp,#24] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d1,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d1,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d2,d2,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d2,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d2,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d2,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#28] and r3,r3,r12 vshr.u32 d24,d2,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d2,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d3,d3,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q1 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 vext.8 q8,q2,q3,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q0,q1,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q2,q2,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#36] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d3,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d3,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d3,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q2,q2,q9 add r10,r10,r2 ldr r2,[sp,#40] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d3,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d3,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d4,d4,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d4,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d4,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d4,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#44] and r3,r3,r12 vshr.u32 d24,d4,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d4,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d5,d5,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q2 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q3,q0,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q1,q2,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q3,q3,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#52] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d5,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d5,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d5,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q3,q3,q9 add r6,r6,r2 ldr r2,[sp,#56] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d5,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d5,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d6,d6,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d6,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d6,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d6,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#60] and r3,r3,r12 vshr.u32 d24,d6,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d6,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d7,d7,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q3 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[r14] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 teq r2,#0 @ check for K256 terminator ldr r2,[sp,#0] sub r1,r1,#64 bne .L_00_48 ldr r1,[sp,#68] ldr r0,[sp,#72] sub r14,r14,#256 @ rewind r14 teq r1,r0 it eq subeq r1,r1,#64 @ avoid SEGV vld1.8 {q0},[r1]! @ load next input block vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! it ne strne r1,[sp,#68] mov r1,sp add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q0,q0 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q0 ldr r2,[sp,#4] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#8] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#12] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q1,q1 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q1 ldr r2,[sp,#20] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#24] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#28] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q2,q2 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q2 ldr r2,[sp,#36] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#40] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#44] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q3,q3 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q3 ldr r2,[sp,#52] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#56] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#60] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#64] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! ldr r0,[r2,#0] add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r12,[r2,#4] ldr r3,[r2,#8] ldr r1,[r2,#12] add r4,r4,r0 @ accumulate ldr r0,[r2,#16] add r5,r5,r12 ldr r12,[r2,#20] add r6,r6,r3 ldr r3,[r2,#24] add r7,r7,r1 ldr r1,[r2,#28] add r8,r8,r0 str r4,[r2],#4 add r9,r9,r12 str r5,[r2],#4 add r10,r10,r3 str r6,[r2],#4 add r11,r11,r1 str r7,[r2],#4 stmia r2,{r8,r9,r10,r11} ittte ne movne r1,sp ldrne r2,[sp,#0] eorne r12,r12,r12 ldreq sp,[sp,#76] @ restore original sp itt ne eorne r3,r5,r6 bne .L_00_48 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} .size sha256_block_data_order_neon,.-sha256_block_data_order_neon #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) # if defined(__thumb2__) # define INST(a,b,c,d) .byte c,d|0xc,a,b # else # define INST(a,b,c,d) .byte a,b,c,d # endif .LK256_shortcut_hw: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(.LK256_add_hw+4) #else .word K256-(.LK256_add_hw+8) #endif .globl sha256_block_data_order_hw .hidden sha256_block_data_order_hw .type sha256_block_data_order_hw,%function .align 5 sha256_block_data_order_hw: @ K256 is too far to reference from one ADR command in Thumb mode. In @ Arm mode, we could make it fit by aligning the ADR offset to a 64-byte @ boundary. For simplicity, just load the offset from .LK256_shortcut_hw. ldr r3,.LK256_shortcut_hw .LK256_add_hw: add r3,pc,r3 vld1.32 {q0,q1},[r0] add r2,r1,r2,lsl#6 @ len to point at the end of inp b .Loop_v8 .align 4 .Loop_v8: vld1.8 {q8,q9},[r1]! vld1.8 {q10,q11},[r1]! vld1.32 {q12},[r3]! vrev32.8 q8,q8 vrev32.8 q9,q9 vrev32.8 q10,q10 vrev32.8 q11,q11 vmov q14,q0 @ offload vmov q15,q1 teq r1,r2 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q10 INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q11 INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 vld1.32 {q13},[r3]! vadd.i32 q12,q12,q8 vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 vld1.32 {q12},[r3]! vadd.i32 q13,q13,q9 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 vld1.32 {q13},[r3] vadd.i32 q12,q12,q10 sub r3,r3,#256-16 @ rewind vmov q2,q0 INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 vadd.i32 q13,q13,q11 vmov q2,q0 INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 vadd.i32 q0,q0,q14 vadd.i32 q1,q1,q15 it ne bne .Loop_v8 vst1.32 {q0,q1},[r0] bx lr @ bx lr .size sha256_block_data_order_hw,.-sha256_block_data_order_hw #endif .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw .align 6 _sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,LK256@PAGE add x30,x30,LK256@PAGEOFF stp x0,x2,[x29,#96] Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section __TEXT,__const .align 6 LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl _sha256_block_data_order_hw .private_extern _sha256_block_data_order_hw .align 6 _sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,LK256@PAGE add x3,x3,LK256@PAGEOFF Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,%function .align 6 sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,.LK256 add x30,x30,:lo12:.LK256 stp x0,x2,[x29,#96] .Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 .Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,.Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne .Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw .section .rodata .align 6 .type .LK256,%object .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .size .LK256,.-.LK256 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha256_block_data_order_hw .hidden sha256_block_data_order_hw .type sha256_block_data_order_hw,%function .align 6 sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,.LK256 add x3,x3,:lo12:.LK256 .Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,.Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret .size sha256_block_data_order_hw,.-sha256_block_data_order_hw #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl sha256_block_data_order_nohw .def sha256_block_data_order_nohw .type 32 .endef .align 6 sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,LK256 add x30,x30,:lo12:LK256 stp x0,x2,[x29,#96] Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha256_block_data_order_hw .def sha256_block_data_order_hw .type 32 .endef .align 6 sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,LK256 add x3,x3,:lo12:LK256 Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw .p2align 4 _sha256_block_data_order_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $64+32,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp L$loop .p2align 4 L$loop: movl %ebx,%edi leaq K256(%rip),%rbp xorl %ecx,%edi movl 0(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 4(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 8(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 12(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 16(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 20(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 24(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 28(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp addl %r14d,%eax movl 32(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 36(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 40(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 44(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 48(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 52(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 56(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 60(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp jmp L$rounds_16_xx .p2align 4 L$rounds_16_xx: movl 4(%rsp),%r13d movl 56(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 36(%rsp),%r12d addl 0(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 8(%rsp),%r13d movl 60(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 40(%rsp),%r12d addl 4(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 12(%rsp),%r13d movl 0(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 44(%rsp),%r12d addl 8(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 16(%rsp),%r13d movl 4(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 48(%rsp),%r12d addl 12(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 20(%rsp),%r13d movl 8(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 52(%rsp),%r12d addl 16(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 24(%rsp),%r13d movl 12(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 56(%rsp),%r12d addl 20(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 28(%rsp),%r13d movl 16(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 60(%rsp),%r12d addl 24(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 32(%rsp),%r13d movl 20(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 0(%rsp),%r12d addl 28(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp movl 36(%rsp),%r13d movl 24(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 4(%rsp),%r12d addl 32(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 40(%rsp),%r13d movl 28(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 8(%rsp),%r12d addl 36(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 44(%rsp),%r13d movl 32(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 12(%rsp),%r12d addl 40(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 48(%rsp),%r13d movl 36(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 16(%rsp),%r12d addl 44(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 52(%rsp),%r13d movl 40(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 20(%rsp),%r12d addl 48(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 56(%rsp),%r13d movl 44(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 24(%rsp),%r12d addl 52(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 60(%rsp),%r13d movl 48(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 28(%rsp),%r12d addl 56(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 0(%rsp),%r13d movl 52(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 32(%rsp),%r12d addl 60(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp cmpb $0,3(%rbp) jnz L$rounds_16_xx movq 64+0(%rsp),%rdi addl %r14d,%eax leaq 64(%rsi),%rsi addl 0(%rdi),%eax addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop movq 88(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue: ret .section __DATA,__const .p2align 6 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _sha256_block_data_order_hw .private_extern _sha256_block_data_order_hw .p2align 6 _sha256_block_data_order_hw: _CET_ENDBR leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa 512-128(%rcx),%xmm7 pshufd $0x1b,%xmm1,%xmm0 pshufd $0xb1,%xmm1,%xmm1 pshufd $0x1b,%xmm2,%xmm2 movdqa %xmm7,%xmm8 .byte 102,15,58,15,202,8 punpcklqdq %xmm0,%xmm2 jmp L$oop_shaext .p2align 4 L$oop_shaext: movdqu (%rsi),%xmm3 movdqu 16(%rsi),%xmm4 movdqu 32(%rsi),%xmm5 .byte 102,15,56,0,223 movdqu 48(%rsi),%xmm6 movdqa 0-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 102,15,56,0,231 movdqa %xmm2,%xmm10 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 nop movdqa %xmm1,%xmm9 .byte 15,56,203,202 movdqa 32-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 102,15,56,0,239 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 leaq 64(%rsi),%rsi .byte 15,56,204,220 .byte 15,56,203,202 movdqa 64-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 102,15,56,0,247 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 96-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 128-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 160-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 192-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 224-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 256-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 288-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 320-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 352-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 384-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 416-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 .byte 15,56,203,202 paddd %xmm7,%xmm6 movdqa 448-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 .byte 15,56,205,245 movdqa %xmm8,%xmm7 .byte 15,56,203,202 movdqa 480-128(%rcx),%xmm0 paddd %xmm6,%xmm0 nop .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 decq %rdx nop .byte 15,56,203,202 paddd %xmm10,%xmm2 paddd %xmm9,%xmm1 jnz L$oop_shaext pshufd $0xb1,%xmm2,%xmm2 pshufd $0x1b,%xmm1,%xmm7 pshufd $0xb1,%xmm1,%xmm1 punpckhqdq %xmm2,%xmm1 .byte 102,15,58,15,215,8 movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) ret .globl _sha256_block_data_order_ssse3 .private_extern _sha256_block_data_order_ssse3 .p2align 6 _sha256_block_data_order_ssse3: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue_ssse3: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp L$loop_ssse3 .p2align 4 L$loop_ssse3: movdqa K256+512(%rip),%xmm7 movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 .byte 102,15,56,0,199 movdqu 48(%rsi),%xmm3 leaq K256(%rip),%rbp .byte 102,15,56,0,207 movdqa 0(%rbp),%xmm4 movdqa 32(%rbp),%xmm5 .byte 102,15,56,0,215 paddd %xmm0,%xmm4 movdqa 64(%rbp),%xmm6 .byte 102,15,56,0,223 movdqa 96(%rbp),%xmm7 paddd %xmm1,%xmm5 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 movdqa %xmm4,0(%rsp) movl %eax,%r14d movdqa %xmm5,16(%rsp) movl %ebx,%edi movdqa %xmm6,32(%rsp) xorl %ecx,%edi movdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp L$ssse3_00_47 .p2align 4 L$ssse3_00_47: subq $-128,%rbp rorl $14,%r13d movdqa %xmm1,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm3,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,224,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,250,4 addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm0 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm3,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 4(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm0 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm0 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm0,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 0(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm0 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm0,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,0(%rsp) rorl $14,%r13d movdqa %xmm2,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm0,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,225,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,251,4 addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm1 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm0,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 20(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm1 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm1 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm1,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 32(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm1 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm1,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,16(%rsp) rorl $14,%r13d movdqa %xmm3,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm1,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,226,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,248,4 addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm2 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm1,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 36(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm2 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm2 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm2,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 64(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm2 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm2,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,32(%rsp) rorl $14,%r13d movdqa %xmm0,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm2,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,227,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,249,4 addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm3 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm2,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 52(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm3 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm3 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm3,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 96(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm3 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm3,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne L$ssse3_00_47 rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop_ssse3 movq 88(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_ssse3: ret .globl _sha256_block_data_order_avx .private_extern _sha256_block_data_order_avx .p2align 6 _sha256_block_data_order_avx: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue_avx: vzeroupper movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d vmovdqa K256+512+32(%rip),%xmm8 vmovdqa K256+512+64(%rip),%xmm9 jmp L$loop_avx .p2align 4 L$loop_avx: vmovdqa K256+512(%rip),%xmm7 vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm7,%xmm0,%xmm0 leaq K256(%rip),%rbp vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd 0(%rbp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 32(%rbp),%xmm1,%xmm5 vpaddd 64(%rbp),%xmm2,%xmm6 vpaddd 96(%rbp),%xmm3,%xmm7 vmovdqa %xmm4,0(%rsp) movl %eax,%r14d vmovdqa %xmm5,16(%rsp) movl %ebx,%edi vmovdqa %xmm6,32(%rsp) xorl %ecx,%edi vmovdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp L$avx_00_47 .p2align 4 L$avx_00_47: subq $-128,%rbp vpalignr $4,%xmm0,%xmm1,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm2,%xmm3,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm0,%xmm0 xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm3,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm0,%xmm0 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm0,%xmm0 andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d vpshufd $80,%xmm0,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm0,%xmm0 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 0(%rbp),%xmm0,%xmm6 xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,0(%rsp) vpalignr $4,%xmm1,%xmm2,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm3,%xmm0,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm1,%xmm1 xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm0,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm1,%xmm1 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm1,%xmm1 andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx vpshufd $80,%xmm1,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm1,%xmm1 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 32(%rbp),%xmm1,%xmm6 xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,16(%rsp) vpalignr $4,%xmm2,%xmm3,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm0,%xmm1,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm2,%xmm2 xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm1,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm2,%xmm2 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm2,%xmm2 andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d vpshufd $80,%xmm2,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm2,%xmm2 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 64(%rbp),%xmm2,%xmm6 xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,32(%rsp) vpalignr $4,%xmm3,%xmm0,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm1,%xmm2,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm3,%xmm3 xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm2,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm3,%xmm3 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm3,%xmm3 andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx vpshufd $80,%xmm3,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm3,%xmm3 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 96(%rbp),%xmm3,%xmm6 xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne L$avx_00_47 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop_avx movq 88(%rsp),%rsi vzeroupper movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_avx: ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha256-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,@function .align 16 sha256_block_data_order_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $64+32,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp .Lloop .align 16 .Lloop: movl %ebx,%edi leaq K256(%rip),%rbp xorl %ecx,%edi movl 0(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 4(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 8(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 12(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 16(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 20(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 24(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 28(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp addl %r14d,%eax movl 32(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 36(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 40(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 44(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 48(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 52(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 56(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 60(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp jmp .Lrounds_16_xx .align 16 .Lrounds_16_xx: movl 4(%rsp),%r13d movl 56(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 36(%rsp),%r12d addl 0(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 8(%rsp),%r13d movl 60(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 40(%rsp),%r12d addl 4(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 12(%rsp),%r13d movl 0(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 44(%rsp),%r12d addl 8(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 16(%rsp),%r13d movl 4(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 48(%rsp),%r12d addl 12(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 20(%rsp),%r13d movl 8(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 52(%rsp),%r12d addl 16(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 24(%rsp),%r13d movl 12(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 56(%rsp),%r12d addl 20(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 28(%rsp),%r13d movl 16(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 60(%rsp),%r12d addl 24(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 32(%rsp),%r13d movl 20(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 0(%rsp),%r12d addl 28(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp movl 36(%rsp),%r13d movl 24(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 4(%rsp),%r12d addl 32(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 40(%rsp),%r13d movl 28(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 8(%rsp),%r12d addl 36(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 44(%rsp),%r13d movl 32(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 12(%rsp),%r12d addl 40(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 48(%rsp),%r13d movl 36(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 16(%rsp),%r12d addl 44(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 52(%rsp),%r13d movl 40(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 20(%rsp),%r12d addl 48(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 56(%rsp),%r13d movl 44(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 24(%rsp),%r12d addl 52(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 60(%rsp),%r13d movl 48(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 28(%rsp),%r12d addl 56(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 0(%rsp),%r13d movl 52(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 32(%rsp),%r12d addl 60(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp cmpb $0,3(%rbp) jnz .Lrounds_16_xx movq 64+0(%rsp),%rdi addl %r14d,%eax leaq 64(%rsi),%rsi addl 0(%rdi),%eax addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue: ret .cfi_endproc .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw .section .rodata .align 64 .type K256,@object K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl sha256_block_data_order_hw .hidden sha256_block_data_order_hw .type sha256_block_data_order_hw,@function .align 64 sha256_block_data_order_hw: .cfi_startproc _CET_ENDBR leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa 512-128(%rcx),%xmm7 pshufd $0x1b,%xmm1,%xmm0 pshufd $0xb1,%xmm1,%xmm1 pshufd $0x1b,%xmm2,%xmm2 movdqa %xmm7,%xmm8 .byte 102,15,58,15,202,8 punpcklqdq %xmm0,%xmm2 jmp .Loop_shaext .align 16 .Loop_shaext: movdqu (%rsi),%xmm3 movdqu 16(%rsi),%xmm4 movdqu 32(%rsi),%xmm5 .byte 102,15,56,0,223 movdqu 48(%rsi),%xmm6 movdqa 0-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 102,15,56,0,231 movdqa %xmm2,%xmm10 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 nop movdqa %xmm1,%xmm9 .byte 15,56,203,202 movdqa 32-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 102,15,56,0,239 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 leaq 64(%rsi),%rsi .byte 15,56,204,220 .byte 15,56,203,202 movdqa 64-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 102,15,56,0,247 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 96-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 128-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 160-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 192-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 224-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 256-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 288-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 320-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 352-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 384-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 416-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 .byte 15,56,203,202 paddd %xmm7,%xmm6 movdqa 448-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 .byte 15,56,205,245 movdqa %xmm8,%xmm7 .byte 15,56,203,202 movdqa 480-128(%rcx),%xmm0 paddd %xmm6,%xmm0 nop .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 decq %rdx nop .byte 15,56,203,202 paddd %xmm10,%xmm2 paddd %xmm9,%xmm1 jnz .Loop_shaext pshufd $0xb1,%xmm2,%xmm2 pshufd $0x1b,%xmm1,%xmm7 pshufd $0xb1,%xmm1,%xmm1 punpckhqdq %xmm2,%xmm1 .byte 102,15,58,15,215,8 movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) ret .cfi_endproc .size sha256_block_data_order_hw,.-sha256_block_data_order_hw .globl sha256_block_data_order_ssse3 .hidden sha256_block_data_order_ssse3 .type sha256_block_data_order_ssse3,@function .align 64 sha256_block_data_order_ssse3: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue_ssse3: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp .Lloop_ssse3 .align 16 .Lloop_ssse3: movdqa K256+512(%rip),%xmm7 movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 .byte 102,15,56,0,199 movdqu 48(%rsi),%xmm3 leaq K256(%rip),%rbp .byte 102,15,56,0,207 movdqa 0(%rbp),%xmm4 movdqa 32(%rbp),%xmm5 .byte 102,15,56,0,215 paddd %xmm0,%xmm4 movdqa 64(%rbp),%xmm6 .byte 102,15,56,0,223 movdqa 96(%rbp),%xmm7 paddd %xmm1,%xmm5 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 movdqa %xmm4,0(%rsp) movl %eax,%r14d movdqa %xmm5,16(%rsp) movl %ebx,%edi movdqa %xmm6,32(%rsp) xorl %ecx,%edi movdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp .Lssse3_00_47 .align 16 .Lssse3_00_47: subq $-128,%rbp rorl $14,%r13d movdqa %xmm1,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm3,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,224,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,250,4 addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm0 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm3,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 4(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm0 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm0 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm0,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 0(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm0 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm0,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,0(%rsp) rorl $14,%r13d movdqa %xmm2,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm0,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,225,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,251,4 addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm1 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm0,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 20(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm1 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm1 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm1,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 32(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm1 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm1,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,16(%rsp) rorl $14,%r13d movdqa %xmm3,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm1,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,226,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,248,4 addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm2 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm1,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 36(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm2 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm2 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm2,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 64(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm2 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm2,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,32(%rsp) rorl $14,%r13d movdqa %xmm0,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm2,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,227,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,249,4 addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm3 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm2,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 52(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm3 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm3 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm3,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 96(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm3 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm3,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne .Lssse3_00_47 rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop_ssse3 movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_ssse3: ret .cfi_endproc .size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3 .globl sha256_block_data_order_avx .hidden sha256_block_data_order_avx .type sha256_block_data_order_avx,@function .align 64 sha256_block_data_order_avx: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08 .Lprologue_avx: vzeroupper movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d vmovdqa K256+512+32(%rip),%xmm8 vmovdqa K256+512+64(%rip),%xmm9 jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa K256+512(%rip),%xmm7 vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm7,%xmm0,%xmm0 leaq K256(%rip),%rbp vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd 0(%rbp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 32(%rbp),%xmm1,%xmm5 vpaddd 64(%rbp),%xmm2,%xmm6 vpaddd 96(%rbp),%xmm3,%xmm7 vmovdqa %xmm4,0(%rsp) movl %eax,%r14d vmovdqa %xmm5,16(%rsp) movl %ebx,%edi vmovdqa %xmm6,32(%rsp) xorl %ecx,%edi vmovdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp .Lavx_00_47 .align 16 .Lavx_00_47: subq $-128,%rbp vpalignr $4,%xmm0,%xmm1,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm2,%xmm3,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm0,%xmm0 xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm3,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm0,%xmm0 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm0,%xmm0 andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d vpshufd $80,%xmm0,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm0,%xmm0 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 0(%rbp),%xmm0,%xmm6 xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,0(%rsp) vpalignr $4,%xmm1,%xmm2,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm3,%xmm0,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm1,%xmm1 xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm0,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm1,%xmm1 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm1,%xmm1 andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx vpshufd $80,%xmm1,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm1,%xmm1 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 32(%rbp),%xmm1,%xmm6 xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,16(%rsp) vpalignr $4,%xmm2,%xmm3,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm0,%xmm1,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm2,%xmm2 xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm1,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm2,%xmm2 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm2,%xmm2 andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d vpshufd $80,%xmm2,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm2,%xmm2 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 64(%rbp),%xmm2,%xmm6 xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,32(%rsp) vpalignr $4,%xmm3,%xmm0,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm1,%xmm2,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm3,%xmm3 xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm2,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm3,%xmm3 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm3,%xmm3 andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx vpshufd $80,%xmm3,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm3,%xmm3 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 96(%rbp),%xmm3,%xmm6 xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne .Lavx_00_47 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb .Lloop_avx movq 88(%rsp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx: ret .cfi_endproc .size sha256_block_data_order_avx,.-sha256_block_data_order_avx #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw .align 4 _sha512_block_data_order_nohw: L_sha512_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L000pic_point L000pic_point: popl %ebp leal LK512-L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) movq (%esi),%mm0 movq 8(%esi),%mm1 movq 16(%esi),%mm2 movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 subl $80,%esp jmp L001loop_sse2 .align 4,0x90 L001loop_sse2: movq %mm1,8(%esp) movq %mm2,16(%esp) movq %mm3,24(%esp) movq %mm5,40(%esp) movq %mm6,48(%esp) pxor %mm1,%mm2 movq %mm7,56(%esp) movq %mm0,%mm3 movl (%edi),%eax movl 4(%edi),%ebx addl $8,%edi movl $15,%edx bswap %eax bswap %ebx jmp L00200_14_sse2 .align 4,0x90 L00200_14_sse2: movd %eax,%mm1 movl (%edi),%eax movd %ebx,%mm7 movl 4(%edi),%ebx addl $8,%edi bswap %eax bswap %ebx punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 movq 48(%esp),%mm6 decl %edx jnz L00200_14_sse2 movd %eax,%mm1 movd %ebx,%mm7 punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 pxor %mm0,%mm0 movl $32,%edx jmp L00316_79_sse2 .align 4,0x90 L00316_79_sse2: movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm0 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm2 addl $8,%ebp movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm2 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm0 addl $8,%ebp decl %edx jnz L00316_79_sse2 paddq %mm3,%mm0 movq 8(%esp),%mm1 movq 24(%esp),%mm3 movq 40(%esp),%mm5 movq 48(%esp),%mm6 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movl $640,%eax movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) leal (%esp,%eax,1),%esp subl %eax,%ebp cmpl 88(%esp),%edi jb L001loop_sse2 movl 92(%esp),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .globl _sha512_block_data_order_ssse3 .private_extern _sha512_block_data_order_ssse3 .align 4 _sha512_block_data_order_ssse3: L_sha512_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call L004pic_point L004pic_point: popl %ebp leal LK512-L004pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) movq (%esi),%mm0 movq 8(%esi),%mm1 movq 16(%esi),%mm2 movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 leal -64(%esp),%edx subl $256,%esp movdqa 640(%ebp),%xmm1 movdqu (%edi),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%edi),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%edi),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%edi),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%edi),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%edi),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%edi),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%edi),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movdqa %xmm2,-16(%edx) nop .align 5,0x90 L005loop_ssse3: movdqa 16(%edx),%xmm2 movdqa %xmm3,48(%edx) leal 128(%ebp),%ebp movq %mm1,8(%esp) movl %edi,%ebx movq %mm2,16(%esp) leal 128(%edi),%edi movq %mm3,24(%esp) cmpl %eax,%edi movq %mm5,40(%esp) cmovbl %edi,%ebx movq %mm6,48(%esp) movl $4,%ecx pxor %mm1,%mm2 movq %mm7,56(%esp) pxor %mm3,%mm3 jmp L00600_47_ssse3 .align 5,0x90 L00600_47_ssse3: movdqa %xmm5,%xmm3 movdqa %xmm2,%xmm1 .byte 102,15,58,15,208,8 movdqa %xmm4,(%edx) .byte 102,15,58,15,220,8 movdqa %xmm2,%xmm4 psrlq $7,%xmm2 paddq %xmm3,%xmm0 movdqa %xmm4,%xmm3 psrlq $1,%xmm4 psllq $56,%xmm3 pxor %xmm4,%xmm2 psrlq $7,%xmm4 pxor %xmm3,%xmm2 psllq $7,%xmm3 pxor %xmm4,%xmm2 movdqa %xmm7,%xmm4 pxor %xmm3,%xmm2 movdqa %xmm7,%xmm3 psrlq $6,%xmm4 paddq %xmm2,%xmm0 movdqa %xmm7,%xmm2 psrlq $19,%xmm3 psllq $3,%xmm2 pxor %xmm3,%xmm4 psrlq $42,%xmm3 pxor %xmm2,%xmm4 psllq $42,%xmm2 pxor %xmm3,%xmm4 movdqa 32(%edx),%xmm3 pxor %xmm2,%xmm4 movdqa (%ebp),%xmm2 movq %mm4,%mm1 paddq %xmm4,%xmm0 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm0,%xmm2 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm2,-128(%edx) movdqa %xmm6,%xmm4 movdqa %xmm3,%xmm2 .byte 102,15,58,15,217,8 movdqa %xmm5,16(%edx) .byte 102,15,58,15,229,8 movdqa %xmm3,%xmm5 psrlq $7,%xmm3 paddq %xmm4,%xmm1 movdqa %xmm5,%xmm4 psrlq $1,%xmm5 psllq $56,%xmm4 pxor %xmm5,%xmm3 psrlq $7,%xmm5 pxor %xmm4,%xmm3 psllq $7,%xmm4 pxor %xmm5,%xmm3 movdqa %xmm0,%xmm5 pxor %xmm4,%xmm3 movdqa %xmm0,%xmm4 psrlq $6,%xmm5 paddq %xmm3,%xmm1 movdqa %xmm0,%xmm3 psrlq $19,%xmm4 psllq $3,%xmm3 pxor %xmm4,%xmm5 psrlq $42,%xmm4 pxor %xmm3,%xmm5 psllq $42,%xmm3 pxor %xmm4,%xmm5 movdqa 48(%edx),%xmm4 pxor %xmm3,%xmm5 movdqa 16(%ebp),%xmm3 movq %mm4,%mm1 paddq %xmm5,%xmm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm1,%xmm3 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm3,-112(%edx) movdqa %xmm7,%xmm5 movdqa %xmm4,%xmm3 .byte 102,15,58,15,226,8 movdqa %xmm6,32(%edx) .byte 102,15,58,15,238,8 movdqa %xmm4,%xmm6 psrlq $7,%xmm4 paddq %xmm5,%xmm2 movdqa %xmm6,%xmm5 psrlq $1,%xmm6 psllq $56,%xmm5 pxor %xmm6,%xmm4 psrlq $7,%xmm6 pxor %xmm5,%xmm4 psllq $7,%xmm5 pxor %xmm6,%xmm4 movdqa %xmm1,%xmm6 pxor %xmm5,%xmm4 movdqa %xmm1,%xmm5 psrlq $6,%xmm6 paddq %xmm4,%xmm2 movdqa %xmm1,%xmm4 psrlq $19,%xmm5 psllq $3,%xmm4 pxor %xmm5,%xmm6 psrlq $42,%xmm5 pxor %xmm4,%xmm6 psllq $42,%xmm4 pxor %xmm5,%xmm6 movdqa (%edx),%xmm5 pxor %xmm4,%xmm6 movdqa 32(%ebp),%xmm4 movq %mm4,%mm1 paddq %xmm6,%xmm2 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm2,%xmm4 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm4,-96(%edx) movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm4 .byte 102,15,58,15,235,8 movdqa %xmm7,48(%edx) .byte 102,15,58,15,247,8 movdqa %xmm5,%xmm7 psrlq $7,%xmm5 paddq %xmm6,%xmm3 movdqa %xmm7,%xmm6 psrlq $1,%xmm7 psllq $56,%xmm6 pxor %xmm7,%xmm5 psrlq $7,%xmm7 pxor %xmm6,%xmm5 psllq $7,%xmm6 pxor %xmm7,%xmm5 movdqa %xmm2,%xmm7 pxor %xmm6,%xmm5 movdqa %xmm2,%xmm6 psrlq $6,%xmm7 paddq %xmm5,%xmm3 movdqa %xmm2,%xmm5 psrlq $19,%xmm6 psllq $3,%xmm5 pxor %xmm6,%xmm7 psrlq $42,%xmm6 pxor %xmm5,%xmm7 psllq $42,%xmm5 pxor %xmm6,%xmm7 movdqa 16(%edx),%xmm6 pxor %xmm5,%xmm7 movdqa 48(%ebp),%xmm5 movq %mm4,%mm1 paddq %xmm7,%xmm3 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm3,%xmm5 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm5,-80(%edx) movdqa %xmm1,%xmm7 movdqa %xmm6,%xmm5 .byte 102,15,58,15,244,8 movdqa %xmm0,(%edx) .byte 102,15,58,15,248,8 movdqa %xmm6,%xmm0 psrlq $7,%xmm6 paddq %xmm7,%xmm4 movdqa %xmm0,%xmm7 psrlq $1,%xmm0 psllq $56,%xmm7 pxor %xmm0,%xmm6 psrlq $7,%xmm0 pxor %xmm7,%xmm6 psllq $7,%xmm7 pxor %xmm0,%xmm6 movdqa %xmm3,%xmm0 pxor %xmm7,%xmm6 movdqa %xmm3,%xmm7 psrlq $6,%xmm0 paddq %xmm6,%xmm4 movdqa %xmm3,%xmm6 psrlq $19,%xmm7 psllq $3,%xmm6 pxor %xmm7,%xmm0 psrlq $42,%xmm7 pxor %xmm6,%xmm0 psllq $42,%xmm6 pxor %xmm7,%xmm0 movdqa 32(%edx),%xmm7 pxor %xmm6,%xmm0 movdqa 64(%ebp),%xmm6 movq %mm4,%mm1 paddq %xmm0,%xmm4 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm4,%xmm6 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm6,-64(%edx) movdqa %xmm2,%xmm0 movdqa %xmm7,%xmm6 .byte 102,15,58,15,253,8 movdqa %xmm1,16(%edx) .byte 102,15,58,15,193,8 movdqa %xmm7,%xmm1 psrlq $7,%xmm7 paddq %xmm0,%xmm5 movdqa %xmm1,%xmm0 psrlq $1,%xmm1 psllq $56,%xmm0 pxor %xmm1,%xmm7 psrlq $7,%xmm1 pxor %xmm0,%xmm7 psllq $7,%xmm0 pxor %xmm1,%xmm7 movdqa %xmm4,%xmm1 pxor %xmm0,%xmm7 movdqa %xmm4,%xmm0 psrlq $6,%xmm1 paddq %xmm7,%xmm5 movdqa %xmm4,%xmm7 psrlq $19,%xmm0 psllq $3,%xmm7 pxor %xmm0,%xmm1 psrlq $42,%xmm0 pxor %xmm7,%xmm1 psllq $42,%xmm7 pxor %xmm0,%xmm1 movdqa 48(%edx),%xmm0 pxor %xmm7,%xmm1 movdqa 80(%ebp),%xmm7 movq %mm4,%mm1 paddq %xmm1,%xmm5 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm5,%xmm7 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm7,-48(%edx) movdqa %xmm3,%xmm1 movdqa %xmm0,%xmm7 .byte 102,15,58,15,198,8 movdqa %xmm2,32(%edx) .byte 102,15,58,15,202,8 movdqa %xmm0,%xmm2 psrlq $7,%xmm0 paddq %xmm1,%xmm6 movdqa %xmm2,%xmm1 psrlq $1,%xmm2 psllq $56,%xmm1 pxor %xmm2,%xmm0 psrlq $7,%xmm2 pxor %xmm1,%xmm0 psllq $7,%xmm1 pxor %xmm2,%xmm0 movdqa %xmm5,%xmm2 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm1 psrlq $6,%xmm2 paddq %xmm0,%xmm6 movdqa %xmm5,%xmm0 psrlq $19,%xmm1 psllq $3,%xmm0 pxor %xmm1,%xmm2 psrlq $42,%xmm1 pxor %xmm0,%xmm2 psllq $42,%xmm0 pxor %xmm1,%xmm2 movdqa (%edx),%xmm1 pxor %xmm0,%xmm2 movdqa 96(%ebp),%xmm0 movq %mm4,%mm1 paddq %xmm2,%xmm6 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm6,%xmm0 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm0,-32(%edx) movdqa %xmm4,%xmm2 movdqa %xmm1,%xmm0 .byte 102,15,58,15,207,8 movdqa %xmm3,48(%edx) .byte 102,15,58,15,211,8 movdqa %xmm1,%xmm3 psrlq $7,%xmm1 paddq %xmm2,%xmm7 movdqa %xmm3,%xmm2 psrlq $1,%xmm3 psllq $56,%xmm2 pxor %xmm3,%xmm1 psrlq $7,%xmm3 pxor %xmm2,%xmm1 psllq $7,%xmm2 pxor %xmm3,%xmm1 movdqa %xmm6,%xmm3 pxor %xmm2,%xmm1 movdqa %xmm6,%xmm2 psrlq $6,%xmm3 paddq %xmm1,%xmm7 movdqa %xmm6,%xmm1 psrlq $19,%xmm2 psllq $3,%xmm1 pxor %xmm2,%xmm3 psrlq $42,%xmm2 pxor %xmm1,%xmm3 psllq $42,%xmm1 pxor %xmm2,%xmm3 movdqa 16(%edx),%xmm2 pxor %xmm1,%xmm3 movdqa 112(%ebp),%xmm1 movq %mm4,%mm1 paddq %xmm3,%xmm7 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm7,%xmm1 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm1,-16(%edx) leal 128(%ebp),%ebp decl %ecx jnz L00600_47_ssse3 movdqa (%ebp),%xmm1 leal -640(%ebp),%ebp movdqu (%ebx),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%ebx),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movq %mm4,%mm1 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%ebx),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movq %mm4,%mm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%ebx),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movq %mm4,%mm1 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%ebx),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movq %mm4,%mm1 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%ebx),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movq %mm4,%mm1 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%ebx),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movq %mm4,%mm1 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%ebx),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movq %mm4,%mm1 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movq %mm4,%mm1 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm2,-16(%edx) movq 8(%esp),%mm1 paddq %mm3,%mm0 movq 24(%esp),%mm3 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) cmpl %eax,%edi jb L005loop_ssse3 movl 76(%edx),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 LK512: .long 3609767458,1116352408 .long 602891725,1899447441 .long 3964484399,3049323471 .long 2173295548,3921009573 .long 4081628472,961987163 .long 3053834265,1508970993 .long 2937671579,2453635748 .long 3664609560,2870763221 .long 2734883394,3624381080 .long 1164996542,310598401 .long 1323610764,607225278 .long 3590304994,1426881987 .long 4068182383,1925078388 .long 991336113,2162078206 .long 633803317,2614888103 .long 3479774868,3248222580 .long 2666613458,3835390401 .long 944711139,4022224774 .long 2341262773,264347078 .long 2007800933,604807628 .long 1495990901,770255983 .long 1856431235,1249150122 .long 3175218132,1555081692 .long 2198950837,1996064986 .long 3999719339,2554220882 .long 766784016,2821834349 .long 2566594879,2952996808 .long 3203337956,3210313671 .long 1034457026,3336571891 .long 2466948901,3584528711 .long 3758326383,113926993 .long 168717936,338241895 .long 1188179964,666307205 .long 1546045734,773529912 .long 1522805485,1294757372 .long 2643833823,1396182291 .long 2343527390,1695183700 .long 1014477480,1986661051 .long 1206759142,2177026350 .long 344077627,2456956037 .long 1290863460,2730485921 .long 3158454273,2820302411 .long 3505952657,3259730800 .long 106217008,3345764771 .long 3606008344,3516065817 .long 1432725776,3600352804 .long 1467031594,4094571909 .long 851169720,275423344 .long 3100823752,430227734 .long 1363258195,506948616 .long 3750685593,659060556 .long 3785050280,883997877 .long 3318307427,958139571 .long 3812723403,1322822218 .long 2003034995,1537002063 .long 3602036899,1747873779 .long 1575990012,1955562222 .long 1125592928,2024104815 .long 2716904306,2227730452 .long 442776044,2361852424 .long 593698344,2428436474 .long 3733110249,2756734187 .long 2999351573,3204031479 .long 3815920427,3329325298 .long 3928383900,3391569614 .long 566280711,3515267271 .long 3454069534,3940187606 .long 4000239992,4118630271 .long 1914138554,116418474 .long 2731055270,174292421 .long 3203993006,289380356 .long 320620315,460393269 .long 587496836,685471733 .long 1086792851,852142971 .long 365543100,1017036298 .long 2618297676,1126000580 .long 3409855158,1288033470 .long 4234509866,1501505948 .long 987167468,1607167915 .long 1246189591,1816402316 .long 67438087,66051 .long 202182159,134810123 .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,@function .align 16 sha512_block_data_order_nohw: .L_sha512_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L000pic_point .L000pic_point: popl %ebp leal .LK512-.L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) movq (%esi),%mm0 movq 8(%esi),%mm1 movq 16(%esi),%mm2 movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 subl $80,%esp jmp .L001loop_sse2 .align 16 .L001loop_sse2: movq %mm1,8(%esp) movq %mm2,16(%esp) movq %mm3,24(%esp) movq %mm5,40(%esp) movq %mm6,48(%esp) pxor %mm1,%mm2 movq %mm7,56(%esp) movq %mm0,%mm3 movl (%edi),%eax movl 4(%edi),%ebx addl $8,%edi movl $15,%edx bswap %eax bswap %ebx jmp .L00200_14_sse2 .align 16 .L00200_14_sse2: movd %eax,%mm1 movl (%edi),%eax movd %ebx,%mm7 movl 4(%edi),%ebx addl $8,%edi bswap %eax bswap %ebx punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 movq 48(%esp),%mm6 decl %edx jnz .L00200_14_sse2 movd %eax,%mm1 movd %ebx,%mm7 punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 pxor %mm0,%mm0 movl $32,%edx jmp .L00316_79_sse2 .align 16 .L00316_79_sse2: movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm0 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm2 addl $8,%ebp movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm2 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm0 addl $8,%ebp decl %edx jnz .L00316_79_sse2 paddq %mm3,%mm0 movq 8(%esp),%mm1 movq 24(%esp),%mm3 movq 40(%esp),%mm5 movq 48(%esp),%mm6 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movl $640,%eax movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) leal (%esp,%eax,1),%esp subl %eax,%ebp cmpl 88(%esp),%edi jb .L001loop_sse2 movl 92(%esp),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .size sha512_block_data_order_nohw,.-.L_sha512_block_data_order_nohw_begin .globl sha512_block_data_order_ssse3 .hidden sha512_block_data_order_ssse3 .type sha512_block_data_order_ssse3,@function .align 16 sha512_block_data_order_ssse3: .L_sha512_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L004pic_point .L004pic_point: popl %ebp leal .LK512-.L004pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) movq (%esi),%mm0 movq 8(%esi),%mm1 movq 16(%esi),%mm2 movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 leal -64(%esp),%edx subl $256,%esp movdqa 640(%ebp),%xmm1 movdqu (%edi),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%edi),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%edi),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%edi),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%edi),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%edi),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%edi),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%edi),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movdqa %xmm2,-16(%edx) nop .align 32 .L005loop_ssse3: movdqa 16(%edx),%xmm2 movdqa %xmm3,48(%edx) leal 128(%ebp),%ebp movq %mm1,8(%esp) movl %edi,%ebx movq %mm2,16(%esp) leal 128(%edi),%edi movq %mm3,24(%esp) cmpl %eax,%edi movq %mm5,40(%esp) cmovbl %edi,%ebx movq %mm6,48(%esp) movl $4,%ecx pxor %mm1,%mm2 movq %mm7,56(%esp) pxor %mm3,%mm3 jmp .L00600_47_ssse3 .align 32 .L00600_47_ssse3: movdqa %xmm5,%xmm3 movdqa %xmm2,%xmm1 .byte 102,15,58,15,208,8 movdqa %xmm4,(%edx) .byte 102,15,58,15,220,8 movdqa %xmm2,%xmm4 psrlq $7,%xmm2 paddq %xmm3,%xmm0 movdqa %xmm4,%xmm3 psrlq $1,%xmm4 psllq $56,%xmm3 pxor %xmm4,%xmm2 psrlq $7,%xmm4 pxor %xmm3,%xmm2 psllq $7,%xmm3 pxor %xmm4,%xmm2 movdqa %xmm7,%xmm4 pxor %xmm3,%xmm2 movdqa %xmm7,%xmm3 psrlq $6,%xmm4 paddq %xmm2,%xmm0 movdqa %xmm7,%xmm2 psrlq $19,%xmm3 psllq $3,%xmm2 pxor %xmm3,%xmm4 psrlq $42,%xmm3 pxor %xmm2,%xmm4 psllq $42,%xmm2 pxor %xmm3,%xmm4 movdqa 32(%edx),%xmm3 pxor %xmm2,%xmm4 movdqa (%ebp),%xmm2 movq %mm4,%mm1 paddq %xmm4,%xmm0 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm0,%xmm2 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm2,-128(%edx) movdqa %xmm6,%xmm4 movdqa %xmm3,%xmm2 .byte 102,15,58,15,217,8 movdqa %xmm5,16(%edx) .byte 102,15,58,15,229,8 movdqa %xmm3,%xmm5 psrlq $7,%xmm3 paddq %xmm4,%xmm1 movdqa %xmm5,%xmm4 psrlq $1,%xmm5 psllq $56,%xmm4 pxor %xmm5,%xmm3 psrlq $7,%xmm5 pxor %xmm4,%xmm3 psllq $7,%xmm4 pxor %xmm5,%xmm3 movdqa %xmm0,%xmm5 pxor %xmm4,%xmm3 movdqa %xmm0,%xmm4 psrlq $6,%xmm5 paddq %xmm3,%xmm1 movdqa %xmm0,%xmm3 psrlq $19,%xmm4 psllq $3,%xmm3 pxor %xmm4,%xmm5 psrlq $42,%xmm4 pxor %xmm3,%xmm5 psllq $42,%xmm3 pxor %xmm4,%xmm5 movdqa 48(%edx),%xmm4 pxor %xmm3,%xmm5 movdqa 16(%ebp),%xmm3 movq %mm4,%mm1 paddq %xmm5,%xmm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm1,%xmm3 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm3,-112(%edx) movdqa %xmm7,%xmm5 movdqa %xmm4,%xmm3 .byte 102,15,58,15,226,8 movdqa %xmm6,32(%edx) .byte 102,15,58,15,238,8 movdqa %xmm4,%xmm6 psrlq $7,%xmm4 paddq %xmm5,%xmm2 movdqa %xmm6,%xmm5 psrlq $1,%xmm6 psllq $56,%xmm5 pxor %xmm6,%xmm4 psrlq $7,%xmm6 pxor %xmm5,%xmm4 psllq $7,%xmm5 pxor %xmm6,%xmm4 movdqa %xmm1,%xmm6 pxor %xmm5,%xmm4 movdqa %xmm1,%xmm5 psrlq $6,%xmm6 paddq %xmm4,%xmm2 movdqa %xmm1,%xmm4 psrlq $19,%xmm5 psllq $3,%xmm4 pxor %xmm5,%xmm6 psrlq $42,%xmm5 pxor %xmm4,%xmm6 psllq $42,%xmm4 pxor %xmm5,%xmm6 movdqa (%edx),%xmm5 pxor %xmm4,%xmm6 movdqa 32(%ebp),%xmm4 movq %mm4,%mm1 paddq %xmm6,%xmm2 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm2,%xmm4 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm4,-96(%edx) movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm4 .byte 102,15,58,15,235,8 movdqa %xmm7,48(%edx) .byte 102,15,58,15,247,8 movdqa %xmm5,%xmm7 psrlq $7,%xmm5 paddq %xmm6,%xmm3 movdqa %xmm7,%xmm6 psrlq $1,%xmm7 psllq $56,%xmm6 pxor %xmm7,%xmm5 psrlq $7,%xmm7 pxor %xmm6,%xmm5 psllq $7,%xmm6 pxor %xmm7,%xmm5 movdqa %xmm2,%xmm7 pxor %xmm6,%xmm5 movdqa %xmm2,%xmm6 psrlq $6,%xmm7 paddq %xmm5,%xmm3 movdqa %xmm2,%xmm5 psrlq $19,%xmm6 psllq $3,%xmm5 pxor %xmm6,%xmm7 psrlq $42,%xmm6 pxor %xmm5,%xmm7 psllq $42,%xmm5 pxor %xmm6,%xmm7 movdqa 16(%edx),%xmm6 pxor %xmm5,%xmm7 movdqa 48(%ebp),%xmm5 movq %mm4,%mm1 paddq %xmm7,%xmm3 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm3,%xmm5 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm5,-80(%edx) movdqa %xmm1,%xmm7 movdqa %xmm6,%xmm5 .byte 102,15,58,15,244,8 movdqa %xmm0,(%edx) .byte 102,15,58,15,248,8 movdqa %xmm6,%xmm0 psrlq $7,%xmm6 paddq %xmm7,%xmm4 movdqa %xmm0,%xmm7 psrlq $1,%xmm0 psllq $56,%xmm7 pxor %xmm0,%xmm6 psrlq $7,%xmm0 pxor %xmm7,%xmm6 psllq $7,%xmm7 pxor %xmm0,%xmm6 movdqa %xmm3,%xmm0 pxor %xmm7,%xmm6 movdqa %xmm3,%xmm7 psrlq $6,%xmm0 paddq %xmm6,%xmm4 movdqa %xmm3,%xmm6 psrlq $19,%xmm7 psllq $3,%xmm6 pxor %xmm7,%xmm0 psrlq $42,%xmm7 pxor %xmm6,%xmm0 psllq $42,%xmm6 pxor %xmm7,%xmm0 movdqa 32(%edx),%xmm7 pxor %xmm6,%xmm0 movdqa 64(%ebp),%xmm6 movq %mm4,%mm1 paddq %xmm0,%xmm4 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm4,%xmm6 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm6,-64(%edx) movdqa %xmm2,%xmm0 movdqa %xmm7,%xmm6 .byte 102,15,58,15,253,8 movdqa %xmm1,16(%edx) .byte 102,15,58,15,193,8 movdqa %xmm7,%xmm1 psrlq $7,%xmm7 paddq %xmm0,%xmm5 movdqa %xmm1,%xmm0 psrlq $1,%xmm1 psllq $56,%xmm0 pxor %xmm1,%xmm7 psrlq $7,%xmm1 pxor %xmm0,%xmm7 psllq $7,%xmm0 pxor %xmm1,%xmm7 movdqa %xmm4,%xmm1 pxor %xmm0,%xmm7 movdqa %xmm4,%xmm0 psrlq $6,%xmm1 paddq %xmm7,%xmm5 movdqa %xmm4,%xmm7 psrlq $19,%xmm0 psllq $3,%xmm7 pxor %xmm0,%xmm1 psrlq $42,%xmm0 pxor %xmm7,%xmm1 psllq $42,%xmm7 pxor %xmm0,%xmm1 movdqa 48(%edx),%xmm0 pxor %xmm7,%xmm1 movdqa 80(%ebp),%xmm7 movq %mm4,%mm1 paddq %xmm1,%xmm5 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm5,%xmm7 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm7,-48(%edx) movdqa %xmm3,%xmm1 movdqa %xmm0,%xmm7 .byte 102,15,58,15,198,8 movdqa %xmm2,32(%edx) .byte 102,15,58,15,202,8 movdqa %xmm0,%xmm2 psrlq $7,%xmm0 paddq %xmm1,%xmm6 movdqa %xmm2,%xmm1 psrlq $1,%xmm2 psllq $56,%xmm1 pxor %xmm2,%xmm0 psrlq $7,%xmm2 pxor %xmm1,%xmm0 psllq $7,%xmm1 pxor %xmm2,%xmm0 movdqa %xmm5,%xmm2 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm1 psrlq $6,%xmm2 paddq %xmm0,%xmm6 movdqa %xmm5,%xmm0 psrlq $19,%xmm1 psllq $3,%xmm0 pxor %xmm1,%xmm2 psrlq $42,%xmm1 pxor %xmm0,%xmm2 psllq $42,%xmm0 pxor %xmm1,%xmm2 movdqa (%edx),%xmm1 pxor %xmm0,%xmm2 movdqa 96(%ebp),%xmm0 movq %mm4,%mm1 paddq %xmm2,%xmm6 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm6,%xmm0 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm0,-32(%edx) movdqa %xmm4,%xmm2 movdqa %xmm1,%xmm0 .byte 102,15,58,15,207,8 movdqa %xmm3,48(%edx) .byte 102,15,58,15,211,8 movdqa %xmm1,%xmm3 psrlq $7,%xmm1 paddq %xmm2,%xmm7 movdqa %xmm3,%xmm2 psrlq $1,%xmm3 psllq $56,%xmm2 pxor %xmm3,%xmm1 psrlq $7,%xmm3 pxor %xmm2,%xmm1 psllq $7,%xmm2 pxor %xmm3,%xmm1 movdqa %xmm6,%xmm3 pxor %xmm2,%xmm1 movdqa %xmm6,%xmm2 psrlq $6,%xmm3 paddq %xmm1,%xmm7 movdqa %xmm6,%xmm1 psrlq $19,%xmm2 psllq $3,%xmm1 pxor %xmm2,%xmm3 psrlq $42,%xmm2 pxor %xmm1,%xmm3 psllq $42,%xmm1 pxor %xmm2,%xmm3 movdqa 16(%edx),%xmm2 pxor %xmm1,%xmm3 movdqa 112(%ebp),%xmm1 movq %mm4,%mm1 paddq %xmm3,%xmm7 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm7,%xmm1 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm1,-16(%edx) leal 128(%ebp),%ebp decl %ecx jnz .L00600_47_ssse3 movdqa (%ebp),%xmm1 leal -640(%ebp),%ebp movdqu (%ebx),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%ebx),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movq %mm4,%mm1 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%ebx),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movq %mm4,%mm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%ebx),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movq %mm4,%mm1 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%ebx),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movq %mm4,%mm1 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%ebx),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movq %mm4,%mm1 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%ebx),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movq %mm4,%mm1 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%ebx),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movq %mm4,%mm1 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movq %mm4,%mm1 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm2,-16(%edx) movq 8(%esp),%mm1 paddq %mm3,%mm0 movq 24(%esp),%mm3 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) cmpl %eax,%edi jb .L005loop_ssse3 movl 76(%edx),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .size sha512_block_data_order_ssse3,.-.L_sha512_block_data_order_ssse3_begin .align 64 .LK512: .long 3609767458,1116352408 .long 602891725,1899447441 .long 3964484399,3049323471 .long 2173295548,3921009573 .long 4081628472,961987163 .long 3053834265,1508970993 .long 2937671579,2453635748 .long 3664609560,2870763221 .long 2734883394,3624381080 .long 1164996542,310598401 .long 1323610764,607225278 .long 3590304994,1426881987 .long 4068182383,1925078388 .long 991336113,2162078206 .long 633803317,2614888103 .long 3479774868,3248222580 .long 2666613458,3835390401 .long 944711139,4022224774 .long 2341262773,264347078 .long 2007800933,604807628 .long 1495990901,770255983 .long 1856431235,1249150122 .long 3175218132,1555081692 .long 2198950837,1996064986 .long 3999719339,2554220882 .long 766784016,2821834349 .long 2566594879,2952996808 .long 3203337956,3210313671 .long 1034457026,3336571891 .long 2466948901,3584528711 .long 3758326383,113926993 .long 168717936,338241895 .long 1188179964,666307205 .long 1546045734,773529912 .long 1522805485,1294757372 .long 2643833823,1396182291 .long 2343527390,1695183700 .long 1014477480,1986661051 .long 1206759142,2177026350 .long 344077627,2456956037 .long 1290863460,2730485921 .long 3158454273,2820302411 .long 3505952657,3259730800 .long 106217008,3345764771 .long 3606008344,3516065817 .long 1432725776,3600352804 .long 1467031594,4094571909 .long 851169720,275423344 .long 3100823752,430227734 .long 1363258195,506948616 .long 3750685593,659060556 .long 3785050280,883997877 .long 3318307427,958139571 .long 3812723403,1322822218 .long 2003034995,1537002063 .long 3602036899,1747873779 .long 1575990012,1955562222 .long 1125592928,2024104815 .long 2716904306,2227730452 .long 442776044,2361852424 .long 593698344,2428436474 .long 3733110249,2756734187 .long 2999351573,3204031479 .long 3815920427,3329325298 .long 3928383900,3391569614 .long 566280711,3515267271 .long 3454069534,3940187606 .long 4000239992,4118630271 .long 1914138554,116418474 .long 2731055270,174292421 .long 3203993006,289380356 .long 320620315,460393269 .long 587496836,685471733 .long 1086792851,852142971 .long 365543100,1017036298 .long 2618297676,1126000580 .long 3409855158,1288033470 .long 4234509866,1501505948 .long 987167468,1607167915 .long 1246189591,1816402316 .long 67438087,66051 .long 202182159,134810123 .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-armv4-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA512 block procedure for ARMv4. September 2007. @ This code is ~4.5 (four and a half) times faster than code generated @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue @ Xscale PXA250 core]. @ @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 6% improvement on @ Cortex A8 core and ~40 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 7% @ improvement on Coxtex A8 core and ~38 cycles per byte. @ March 2011. @ @ Add NEON implementation. On Cortex A8 it was measured to process @ one byte in 23.3 cycles or ~60% faster than integer-only code. @ August 2012. @ @ Improve NEON performance by 12% on Snapdragon S4. In absolute @ terms it's 22.6 cycles per byte, which is disappointing result. @ Technical writers asserted that 3-way S4 pipeline can sustain @ multiple NEON instructions per cycle, but dual NEON issue could @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html @ for further details. On side note Cortex-A15 processes one byte in @ 16 cycles. @ Byte order [in]dependence. ========================================= @ @ Originally caller was expected to maintain specific *dword* order in @ h[0-7], namely with most significant dword at *lower* address, which @ was reflected in below two parameters as 0 and 4. Now caller is @ expected to maintain native byte order for whole 64-bit values. #ifndef __KERNEL__ # include # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} #else # define __ARM_MAX_ARCH__ 7 # define VFP_ABI_PUSH # define VFP_ABI_POP #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a #ifdef __ARMEL__ # define LO 0 # define HI 4 # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 #else # define HI 0 # define LO 4 # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 #endif .text #if defined(__thumb2__) .syntax unified .thumb # define adrl adr #else .code 32 #endif .type K512,%object .align 5 K512: WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .size K512,.-K512 .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,%function sha512_block_data_order_nohw: add r2,r1,r2,lsl#7 @ len to point at the end of inp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} adr r14,K512 sub sp,sp,#9*8 ldr r7,[r0,#32+LO] ldr r8,[r0,#32+HI] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] .Loop: str r9, [sp,#48+0] str r10, [sp,#48+4] str r11, [sp,#56+0] str r12, [sp,#56+4] ldr r5,[r0,#0+LO] ldr r6,[r0,#0+HI] ldr r3,[r0,#8+LO] ldr r4,[r0,#8+HI] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] str r3,[sp,#8+0] str r4,[sp,#8+4] str r9, [sp,#16+0] str r10, [sp,#16+4] str r11, [sp,#24+0] str r12, [sp,#24+4] ldr r3,[r0,#40+LO] ldr r4,[r0,#40+HI] str r3,[sp,#40+0] str r4,[sp,#40+4] .L00_15: #if __ARM_ARCH<7 ldrb r3,[r1,#7] ldrb r9, [r1,#6] ldrb r10, [r1,#5] ldrb r11, [r1,#4] ldrb r4,[r1,#3] ldrb r12, [r1,#2] orr r3,r3,r9,lsl#8 ldrb r9, [r1,#1] orr r3,r3,r10,lsl#16 ldrb r10, [r1],#8 orr r3,r3,r11,lsl#24 orr r4,r4,r12,lsl#8 orr r4,r4,r9,lsl#16 orr r4,r4,r10,lsl#24 #else ldr r3,[r1,#4] ldr r4,[r1],#8 #ifdef __ARMEL__ rev r3,r3 rev r4,r4 #endif #endif @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#148 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 tst r14,#1 beq .L00_15 ldr r9,[sp,#184+0] ldr r10,[sp,#184+4] bic r14,r14,#1 .L16_79: @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 mov r3,r9,lsr#1 ldr r11,[sp,#80+0] mov r4,r10,lsr#1 ldr r12,[sp,#80+4] eor r3,r3,r10,lsl#31 eor r4,r4,r9,lsl#31 eor r3,r3,r9,lsr#8 eor r4,r4,r10,lsr#8 eor r3,r3,r10,lsl#24 eor r4,r4,r9,lsl#24 eor r3,r3,r9,lsr#7 eor r4,r4,r10,lsr#7 eor r3,r3,r10,lsl#25 @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 mov r9,r11,lsr#19 mov r10,r12,lsr#19 eor r9,r9,r12,lsl#13 eor r10,r10,r11,lsl#13 eor r9,r9,r12,lsr#29 eor r10,r10,r11,lsr#29 eor r9,r9,r11,lsl#3 eor r10,r10,r12,lsl#3 eor r9,r9,r11,lsr#6 eor r10,r10,r12,lsr#6 ldr r11,[sp,#120+0] eor r9,r9,r12,lsl#26 ldr r12,[sp,#120+4] adds r3,r3,r9 ldr r9,[sp,#192+0] adc r4,r4,r10 ldr r10,[sp,#192+4] adds r3,r3,r11 adc r4,r4,r12 adds r3,r3,r9 adc r4,r4,r10 @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 str r3,[sp,#64+0] mov r10,r8,lsr#14 str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 eor r10,r10,r7,lsl#14 eor r9,r9,r8,lsr#9 eor r10,r10,r7,lsr#9 eor r9,r9,r7,lsl#23 eor r10,r10,r8,lsl#23 @ Sigma1(e) adds r3,r3,r9 ldr r9,[sp,#40+0] @ f.lo adc r4,r4,r10 @ T += Sigma1(e) ldr r10,[sp,#40+4] @ f.hi adds r3,r3,r11 ldr r11,[sp,#48+0] @ g.lo adc r4,r4,r12 @ T += h ldr r12,[sp,#48+4] @ g.hi eor r9,r9,r11 str r7,[sp,#32+0] eor r10,r10,r12 str r8,[sp,#32+4] and r9,r9,r7 str r5,[sp,#0+0] and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T teq r9,#23 ldr r12,[sp,#16+0] @ c.lo #if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 mov r9,r5,lsr#28 mov r10,r6,lsr#28 eor r9,r9,r6,lsl#4 eor r10,r10,r5,lsl#4 eor r9,r9,r6,lsr#2 eor r10,r10,r5,lsr#2 eor r9,r9,r5,lsl#30 eor r10,r10,r6,lsl#30 eor r9,r9,r6,lsr#7 eor r10,r10,r5,lsr#7 eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) ldr r10,[sp,#8+4] @ b.hi orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 and r12,r6,r10 orr r6,r6,r10 orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 adds r5,r5,r3 orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 adc r6,r6,r4 @ h += T tst r14,#1 add r14,r14,#8 #if __ARM_ARCH>=7 ittt eq @ Thumb2 thing, sanity check in ARM #endif ldreq r9,[sp,#184+0] ldreq r10,[sp,#184+4] beq .L16_79 bic r14,r14,#1 ldr r3,[sp,#8+0] ldr r4,[sp,#8+4] ldr r9, [r0,#0+LO] ldr r10, [r0,#0+HI] ldr r11, [r0,#8+LO] ldr r12, [r0,#8+HI] adds r9,r5,r9 str r9, [r0,#0+LO] adc r10,r6,r10 str r10, [r0,#0+HI] adds r11,r3,r11 str r11, [r0,#8+LO] adc r12,r4,r12 str r12, [r0,#8+HI] ldr r5,[sp,#16+0] ldr r6,[sp,#16+4] ldr r3,[sp,#24+0] ldr r4,[sp,#24+4] ldr r9, [r0,#16+LO] ldr r10, [r0,#16+HI] ldr r11, [r0,#24+LO] ldr r12, [r0,#24+HI] adds r9,r5,r9 str r9, [r0,#16+LO] adc r10,r6,r10 str r10, [r0,#16+HI] adds r11,r3,r11 str r11, [r0,#24+LO] adc r12,r4,r12 str r12, [r0,#24+HI] ldr r3,[sp,#40+0] ldr r4,[sp,#40+4] ldr r9, [r0,#32+LO] ldr r10, [r0,#32+HI] ldr r11, [r0,#40+LO] ldr r12, [r0,#40+HI] adds r7,r7,r9 str r7,[r0,#32+LO] adc r8,r8,r10 str r8,[r0,#32+HI] adds r11,r3,r11 str r11, [r0,#40+LO] adc r12,r4,r12 str r12, [r0,#40+HI] ldr r5,[sp,#48+0] ldr r6,[sp,#48+4] ldr r3,[sp,#56+0] ldr r4,[sp,#56+4] ldr r9, [r0,#48+LO] ldr r10, [r0,#48+HI] ldr r11, [r0,#56+LO] ldr r12, [r0,#56+HI] adds r9,r5,r9 str r9, [r0,#48+LO] adc r10,r6,r10 str r10, [r0,#48+HI] adds r11,r3,r11 str r11, [r0,#56+LO] adc r12,r4,r12 str r12, [r0,#56+HI] add sp,sp,#640 sub r14,r14,#640 teq r1,r2 bne .Loop add sp,sp,#8*9 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl sha512_block_data_order_neon .hidden sha512_block_data_order_neon .type sha512_block_data_order_neon,%function .align 4 sha512_block_data_order_neon: dmb @ errata #451034 on early Cortex A8 add r2,r1,r2,lsl#7 @ len to point at the end of inp adr r3,K512 VFP_ABI_PUSH vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context .Loop_neon: vshr.u64 d24,d20,#14 @ 0 #if 0<16 vld1.64 {d0},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 0>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 0<16 && defined(__ARMEL__) vrev64.8 d0,d0 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 1 #if 1<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 1>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 1<16 && defined(__ARMEL__) vrev64.8 d1,d1 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 2 #if 2<16 vld1.64 {d2},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 2>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 2<16 && defined(__ARMEL__) vrev64.8 d2,d2 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 3 #if 3<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 3>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 3<16 && defined(__ARMEL__) vrev64.8 d3,d3 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 4 #if 4<16 vld1.64 {d4},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 4>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 4<16 && defined(__ARMEL__) vrev64.8 d4,d4 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 5 #if 5<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 5>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 5<16 && defined(__ARMEL__) vrev64.8 d5,d5 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 6 #if 6<16 vld1.64 {d6},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 6>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 6<16 && defined(__ARMEL__) vrev64.8 d6,d6 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 7 #if 7<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 7>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 7<16 && defined(__ARMEL__) vrev64.8 d7,d7 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 d24,d20,#14 @ 8 #if 8<16 vld1.64 {d8},[r1]! @ handles unaligned #endif vshr.u64 d25,d20,#18 #if 8>0 vadd.i64 d16,d30 @ h+=Maj from the past #endif vshr.u64 d26,d20,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 8<16 && defined(__ARMEL__) vrev64.8 d8,d8 #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 9 #if 9<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 9>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 9<16 && defined(__ARMEL__) vrev64.8 d9,d9 #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 d24,d18,#14 @ 10 #if 10<16 vld1.64 {d10},[r1]! @ handles unaligned #endif vshr.u64 d25,d18,#18 #if 10>0 vadd.i64 d22,d30 @ h+=Maj from the past #endif vshr.u64 d26,d18,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 10<16 && defined(__ARMEL__) vrev64.8 d10,d10 #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 11 #if 11<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 11>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 11<16 && defined(__ARMEL__) vrev64.8 d11,d11 #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 d24,d16,#14 @ 12 #if 12<16 vld1.64 {d12},[r1]! @ handles unaligned #endif vshr.u64 d25,d16,#18 #if 12>0 vadd.i64 d20,d30 @ h+=Maj from the past #endif vshr.u64 d26,d16,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 12<16 && defined(__ARMEL__) vrev64.8 d12,d12 #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 13 #if 13<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 13>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 13<16 && defined(__ARMEL__) vrev64.8 d13,d13 #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 d24,d22,#14 @ 14 #if 14<16 vld1.64 {d14},[r1]! @ handles unaligned #endif vshr.u64 d25,d22,#18 #if 14>0 vadd.i64 d18,d30 @ h+=Maj from the past #endif vshr.u64 d26,d22,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 14<16 && defined(__ARMEL__) vrev64.8 d14,d14 #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 15 #if 15<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 15>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 15<16 && defined(__ARMEL__) vrev64.8 d15,d15 #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 mov r12,#4 .L16_79_neon: subs r12,#1 vshr.u64 q12,q7,#19 vshr.u64 q13,q7,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q7,#6 vsli.64 q12,q7,#45 vext.8 q14,q0,q1,#8 @ X[i+1] vsli.64 q13,q7,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q0,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q4,q5,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q0,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q0,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 16<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d0 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 17 #if 17<16 vld1.64 {d1},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 17>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 17<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d1 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q0,#19 vshr.u64 q13,q0,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q0,#6 vsli.64 q12,q0,#45 vext.8 q14,q1,q2,#8 @ X[i+1] vsli.64 q13,q0,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q1,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q5,q6,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q1,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q1,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 18<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d2 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 19 #if 19<16 vld1.64 {d3},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 19>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 19<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d3 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q1,#19 vshr.u64 q13,q1,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q1,#6 vsli.64 q12,q1,#45 vext.8 q14,q2,q3,#8 @ X[i+1] vsli.64 q13,q1,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q2,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q6,q7,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q2,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q2,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 20<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d4 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 21 #if 21<16 vld1.64 {d5},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 21>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 21<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d5 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q2,#19 vshr.u64 q13,q2,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q2,#6 vsli.64 q12,q2,#45 vext.8 q14,q3,q4,#8 @ X[i+1] vsli.64 q13,q2,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q3,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q7,q0,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q3,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q3,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 22<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d6 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 23 #if 23<16 vld1.64 {d7},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 23>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 23<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d7 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 vshr.u64 q12,q3,#19 vshr.u64 q13,q3,#61 vadd.i64 d16,d30 @ h+=Maj from the past vshr.u64 q15,q3,#6 vsli.64 q12,q3,#45 vext.8 q14,q4,q5,#8 @ X[i+1] vsli.64 q13,q3,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q4,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q0,q1,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d20,#14 @ from NEON_00_15 vadd.i64 q4,q14 vshr.u64 d25,d20,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d20,#41 @ from NEON_00_15 vadd.i64 q4,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d20,#50 vsli.64 d25,d20,#46 vmov d29,d20 vsli.64 d26,d20,#23 #if 24<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d21,d22 @ Ch(e,f,g) vshr.u64 d24,d16,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d23 vshr.u64 d25,d16,#34 vsli.64 d24,d16,#36 vadd.i64 d27,d26 vshr.u64 d26,d16,#39 vadd.i64 d28,d8 vsli.64 d25,d16,#30 veor d30,d16,d17 vsli.64 d26,d16,#25 veor d23,d24,d25 vadd.i64 d27,d28 vbsl d30,d18,d17 @ Maj(a,b,c) veor d23,d26 @ Sigma0(a) vadd.i64 d19,d27 vadd.i64 d30,d27 @ vadd.i64 d23,d30 vshr.u64 d24,d19,#14 @ 25 #if 25<16 vld1.64 {d9},[r1]! @ handles unaligned #endif vshr.u64 d25,d19,#18 #if 25>0 vadd.i64 d23,d30 @ h+=Maj from the past #endif vshr.u64 d26,d19,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d19,#50 vsli.64 d25,d19,#46 vmov d29,d19 vsli.64 d26,d19,#23 #if 25<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d20,d21 @ Ch(e,f,g) vshr.u64 d24,d23,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d22 vshr.u64 d25,d23,#34 vsli.64 d24,d23,#36 vadd.i64 d27,d26 vshr.u64 d26,d23,#39 vadd.i64 d28,d9 vsli.64 d25,d23,#30 veor d30,d23,d16 vsli.64 d26,d23,#25 veor d22,d24,d25 vadd.i64 d27,d28 vbsl d30,d17,d16 @ Maj(a,b,c) veor d22,d26 @ Sigma0(a) vadd.i64 d18,d27 vadd.i64 d30,d27 @ vadd.i64 d22,d30 vshr.u64 q12,q4,#19 vshr.u64 q13,q4,#61 vadd.i64 d22,d30 @ h+=Maj from the past vshr.u64 q15,q4,#6 vsli.64 q12,q4,#45 vext.8 q14,q5,q6,#8 @ X[i+1] vsli.64 q13,q4,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q5,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q1,q2,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d18,#14 @ from NEON_00_15 vadd.i64 q5,q14 vshr.u64 d25,d18,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d18,#41 @ from NEON_00_15 vadd.i64 q5,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d18,#50 vsli.64 d25,d18,#46 vmov d29,d18 vsli.64 d26,d18,#23 #if 26<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d19,d20 @ Ch(e,f,g) vshr.u64 d24,d22,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d21 vshr.u64 d25,d22,#34 vsli.64 d24,d22,#36 vadd.i64 d27,d26 vshr.u64 d26,d22,#39 vadd.i64 d28,d10 vsli.64 d25,d22,#30 veor d30,d22,d23 vsli.64 d26,d22,#25 veor d21,d24,d25 vadd.i64 d27,d28 vbsl d30,d16,d23 @ Maj(a,b,c) veor d21,d26 @ Sigma0(a) vadd.i64 d17,d27 vadd.i64 d30,d27 @ vadd.i64 d21,d30 vshr.u64 d24,d17,#14 @ 27 #if 27<16 vld1.64 {d11},[r1]! @ handles unaligned #endif vshr.u64 d25,d17,#18 #if 27>0 vadd.i64 d21,d30 @ h+=Maj from the past #endif vshr.u64 d26,d17,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d17,#50 vsli.64 d25,d17,#46 vmov d29,d17 vsli.64 d26,d17,#23 #if 27<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d18,d19 @ Ch(e,f,g) vshr.u64 d24,d21,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d20 vshr.u64 d25,d21,#34 vsli.64 d24,d21,#36 vadd.i64 d27,d26 vshr.u64 d26,d21,#39 vadd.i64 d28,d11 vsli.64 d25,d21,#30 veor d30,d21,d22 vsli.64 d26,d21,#25 veor d20,d24,d25 vadd.i64 d27,d28 vbsl d30,d23,d22 @ Maj(a,b,c) veor d20,d26 @ Sigma0(a) vadd.i64 d16,d27 vadd.i64 d30,d27 @ vadd.i64 d20,d30 vshr.u64 q12,q5,#19 vshr.u64 q13,q5,#61 vadd.i64 d20,d30 @ h+=Maj from the past vshr.u64 q15,q5,#6 vsli.64 q12,q5,#45 vext.8 q14,q6,q7,#8 @ X[i+1] vsli.64 q13,q5,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q6,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q2,q3,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d16,#14 @ from NEON_00_15 vadd.i64 q6,q14 vshr.u64 d25,d16,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d16,#41 @ from NEON_00_15 vadd.i64 q6,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d16,#50 vsli.64 d25,d16,#46 vmov d29,d16 vsli.64 d26,d16,#23 #if 28<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d17,d18 @ Ch(e,f,g) vshr.u64 d24,d20,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d19 vshr.u64 d25,d20,#34 vsli.64 d24,d20,#36 vadd.i64 d27,d26 vshr.u64 d26,d20,#39 vadd.i64 d28,d12 vsli.64 d25,d20,#30 veor d30,d20,d21 vsli.64 d26,d20,#25 veor d19,d24,d25 vadd.i64 d27,d28 vbsl d30,d22,d21 @ Maj(a,b,c) veor d19,d26 @ Sigma0(a) vadd.i64 d23,d27 vadd.i64 d30,d27 @ vadd.i64 d19,d30 vshr.u64 d24,d23,#14 @ 29 #if 29<16 vld1.64 {d13},[r1]! @ handles unaligned #endif vshr.u64 d25,d23,#18 #if 29>0 vadd.i64 d19,d30 @ h+=Maj from the past #endif vshr.u64 d26,d23,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d23,#50 vsli.64 d25,d23,#46 vmov d29,d23 vsli.64 d26,d23,#23 #if 29<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d16,d17 @ Ch(e,f,g) vshr.u64 d24,d19,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d18 vshr.u64 d25,d19,#34 vsli.64 d24,d19,#36 vadd.i64 d27,d26 vshr.u64 d26,d19,#39 vadd.i64 d28,d13 vsli.64 d25,d19,#30 veor d30,d19,d20 vsli.64 d26,d19,#25 veor d18,d24,d25 vadd.i64 d27,d28 vbsl d30,d21,d20 @ Maj(a,b,c) veor d18,d26 @ Sigma0(a) vadd.i64 d22,d27 vadd.i64 d30,d27 @ vadd.i64 d18,d30 vshr.u64 q12,q6,#19 vshr.u64 q13,q6,#61 vadd.i64 d18,d30 @ h+=Maj from the past vshr.u64 q15,q6,#6 vsli.64 q12,q6,#45 vext.8 q14,q7,q0,#8 @ X[i+1] vsli.64 q13,q6,#3 veor q15,q12 vshr.u64 q12,q14,#1 veor q15,q13 @ sigma1(X[i+14]) vshr.u64 q13,q14,#8 vadd.i64 q7,q15 vshr.u64 q15,q14,#7 vsli.64 q12,q14,#63 vsli.64 q13,q14,#56 vext.8 q14,q3,q4,#8 @ X[i+9] veor q15,q12 vshr.u64 d24,d22,#14 @ from NEON_00_15 vadd.i64 q7,q14 vshr.u64 d25,d22,#18 @ from NEON_00_15 veor q15,q13 @ sigma0(X[i+1]) vshr.u64 d26,d22,#41 @ from NEON_00_15 vadd.i64 q7,q15 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d22,#50 vsli.64 d25,d22,#46 vmov d29,d22 vsli.64 d26,d22,#23 #if 30<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d23,d16 @ Ch(e,f,g) vshr.u64 d24,d18,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d17 vshr.u64 d25,d18,#34 vsli.64 d24,d18,#36 vadd.i64 d27,d26 vshr.u64 d26,d18,#39 vadd.i64 d28,d14 vsli.64 d25,d18,#30 veor d30,d18,d19 vsli.64 d26,d18,#25 veor d17,d24,d25 vadd.i64 d27,d28 vbsl d30,d20,d19 @ Maj(a,b,c) veor d17,d26 @ Sigma0(a) vadd.i64 d21,d27 vadd.i64 d30,d27 @ vadd.i64 d17,d30 vshr.u64 d24,d21,#14 @ 31 #if 31<16 vld1.64 {d15},[r1]! @ handles unaligned #endif vshr.u64 d25,d21,#18 #if 31>0 vadd.i64 d17,d30 @ h+=Maj from the past #endif vshr.u64 d26,d21,#41 vld1.64 {d28},[r3,:64]! @ K[i++] vsli.64 d24,d21,#50 vsli.64 d25,d21,#46 vmov d29,d21 vsli.64 d26,d21,#23 #if 31<16 && defined(__ARMEL__) vrev64.8 , #endif veor d25,d24 vbsl d29,d22,d23 @ Ch(e,f,g) vshr.u64 d24,d17,#28 veor d26,d25 @ Sigma1(e) vadd.i64 d27,d29,d16 vshr.u64 d25,d17,#34 vsli.64 d24,d17,#36 vadd.i64 d27,d26 vshr.u64 d26,d17,#39 vadd.i64 d28,d15 vsli.64 d25,d17,#30 veor d30,d17,d18 vsli.64 d26,d17,#25 veor d16,d24,d25 vadd.i64 d27,d28 vbsl d30,d19,d18 @ Maj(a,b,c) veor d16,d26 @ Sigma0(a) vadd.i64 d20,d27 vadd.i64 d30,d27 @ vadd.i64 d16,d30 bne .L16_79_neon vadd.i64 d16,d30 @ h+=Maj from the past vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp vadd.i64 q8,q12 @ vectorized accumulate vadd.i64 q9,q13 vadd.i64 q10,q14 vadd.i64 q11,q15 vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context teq r1,r2 sub r3,#640 @ rewind K512 bne .Loop_neon VFP_ABI_POP bx lr @ .word 0xe12fff1e .size sha512_block_data_order_neon,.-sha512_block_data_order_neon #endif .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw .align 6 _sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,LK512@PAGE add x30,x30,LK512@PAGEOFF stp x0,x2,[x29,#96] Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section __TEXT,__const .align 6 LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl _sha512_block_data_order_hw .private_extern _sha512_block_data_order_hw .align 6 _sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,LK512@PAGE add x3,x3,LK512@PAGEOFF rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b Loop_hw .align 4 Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,%function .align 6 sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,.LK512 add x30,x30,:lo12:.LK512 stp x0,x2,[x29,#96] .Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 .Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,.Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne .Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw .section .rodata .align 6 .type .LK512,%object .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .size .LK512,.-.LK512 .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha512_block_data_order_hw .hidden sha512_block_data_order_hw .type sha512_block_data_order_hw,%function .align 6 sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,.LK512 add x3,x3,:lo12:.LK512 rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b .Loop_hw .align 4 .Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,.Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret .size sha512_block_data_order_hw,.-sha512_block_data_order_hw #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include #endif .text .globl sha512_block_data_order_nohw .def sha512_block_data_order_nohw .type 32 .endef .align 6 sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,LK512 add x30,x30,:lo12:LK512 stp x0,x2,[x29,#96] Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha512_block_data_order_hw .def sha512_block_data_order_hw .type 32 .endef .align 6 sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,LK512 add x3,x3,:lo12:LK512 rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b Loop_hw .align 4 Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw .p2align 4 _sha512_block_data_order_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $128+32,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) L$prologue: movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp L$loop .p2align 4 L$loop: movq %rbx,%rdi leaq K512(%rip),%rbp xorq %rcx,%rdi movq 0(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 8(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 16(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 24(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 32(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 40(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 48(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 56(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp addq %r14,%rax movq 64(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 72(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 80(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 88(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 96(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 104(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 112(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 120(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp jmp L$rounds_16_xx .p2align 4 L$rounds_16_xx: movq 8(%rsp),%r13 movq 112(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 72(%rsp),%r12 addq 0(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 16(%rsp),%r13 movq 120(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 80(%rsp),%r12 addq 8(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 24(%rsp),%r13 movq 0(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 88(%rsp),%r12 addq 16(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 32(%rsp),%r13 movq 8(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 96(%rsp),%r12 addq 24(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 40(%rsp),%r13 movq 16(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 104(%rsp),%r12 addq 32(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 48(%rsp),%r13 movq 24(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 112(%rsp),%r12 addq 40(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 56(%rsp),%r13 movq 32(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 120(%rsp),%r12 addq 48(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 64(%rsp),%r13 movq 40(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 0(%rsp),%r12 addq 56(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp movq 72(%rsp),%r13 movq 48(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 8(%rsp),%r12 addq 64(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 80(%rsp),%r13 movq 56(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 16(%rsp),%r12 addq 72(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 88(%rsp),%r13 movq 64(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 24(%rsp),%r12 addq 80(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 96(%rsp),%r13 movq 72(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 32(%rsp),%r12 addq 88(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 104(%rsp),%r13 movq 80(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 40(%rsp),%r12 addq 96(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 112(%rsp),%r13 movq 88(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 48(%rsp),%r12 addq 104(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 120(%rsp),%r13 movq 96(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 56(%rsp),%r12 addq 112(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 0(%rsp),%r13 movq 104(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 64(%rsp),%r12 addq 120(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp cmpb $0,7(%rbp) jnz L$rounds_16_xx movq 128+0(%rsp),%rdi addq %r14,%rax leaq 128(%rsi),%rsi addq 0(%rdi),%rax addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb L$loop movq 152(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue: ret .section __DATA,__const .p2align 6 K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x0001020304050607,0x08090a0b0c0d0e0f .quad 0x0001020304050607,0x08090a0b0c0d0e0f .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _sha512_block_data_order_avx .private_extern _sha512_block_data_order_avx .p2align 6 _sha512_block_data_order_avx: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $160,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) L$prologue_avx: vzeroupper movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp L$loop_avx .p2align 4 L$loop_avx: vmovdqa K512+1280(%rip),%xmm11 vmovdqu 0(%rsi),%xmm0 leaq K512+128(%rip),%rbp vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vpshufb %xmm11,%xmm0,%xmm0 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm11,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm4 vpshufb %xmm11,%xmm2,%xmm2 vmovdqu 80(%rsi),%xmm5 vpshufb %xmm11,%xmm3,%xmm3 vmovdqu 96(%rsi),%xmm6 vpshufb %xmm11,%xmm4,%xmm4 vmovdqu 112(%rsi),%xmm7 vpshufb %xmm11,%xmm5,%xmm5 vpaddq -128(%rbp),%xmm0,%xmm8 vpshufb %xmm11,%xmm6,%xmm6 vpaddq -96(%rbp),%xmm1,%xmm9 vpshufb %xmm11,%xmm7,%xmm7 vpaddq -64(%rbp),%xmm2,%xmm10 vpaddq -32(%rbp),%xmm3,%xmm11 vmovdqa %xmm8,0(%rsp) vpaddq 0(%rbp),%xmm4,%xmm8 vmovdqa %xmm9,16(%rsp) vpaddq 32(%rbp),%xmm5,%xmm9 vmovdqa %xmm10,32(%rsp) vpaddq 64(%rbp),%xmm6,%xmm10 vmovdqa %xmm11,48(%rsp) vpaddq 96(%rbp),%xmm7,%xmm11 vmovdqa %xmm8,64(%rsp) movq %rax,%r14 vmovdqa %xmm9,80(%rsp) movq %rbx,%rdi vmovdqa %xmm10,96(%rsp) xorq %rcx,%rdi vmovdqa %xmm11,112(%rsp) movq %r8,%r13 jmp L$avx_00_47 .p2align 4 L$avx_00_47: addq $256,%rbp vpalignr $8,%xmm0,%xmm1,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm4,%xmm5,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm0,%xmm0 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 0(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm7,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm7,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm0,%xmm0 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm7,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 8(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm0,%xmm0 xorq %r11,%r14 addq %r13,%r10 vpaddq -128(%rbp),%xmm0,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,0(%rsp) vpalignr $8,%xmm1,%xmm2,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm5,%xmm6,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm1,%xmm1 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 16(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm0,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm0,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm1,%xmm1 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm0,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 24(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm1,%xmm1 xorq %r9,%r14 addq %r13,%r8 vpaddq -96(%rbp),%xmm1,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,16(%rsp) vpalignr $8,%xmm2,%xmm3,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm6,%xmm7,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm2,%xmm2 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 32(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm1,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm1,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm2,%xmm2 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm1,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 40(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm2,%xmm2 xorq %rdx,%r14 addq %r13,%rcx vpaddq -64(%rbp),%xmm2,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,32(%rsp) vpalignr $8,%xmm3,%xmm4,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm7,%xmm0,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm3,%xmm3 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 48(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm2,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm2,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm3,%xmm3 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm2,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 56(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm3,%xmm3 xorq %rbx,%r14 addq %r13,%rax vpaddq -32(%rbp),%xmm3,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,48(%rsp) vpalignr $8,%xmm4,%xmm5,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm0,%xmm1,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm4,%xmm4 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 64(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm3,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm3,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm4,%xmm4 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm3,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 72(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm4,%xmm4 xorq %r11,%r14 addq %r13,%r10 vpaddq 0(%rbp),%xmm4,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,64(%rsp) vpalignr $8,%xmm5,%xmm6,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm1,%xmm2,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm5,%xmm5 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 80(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm4,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm4,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm5,%xmm5 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm4,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 88(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm5,%xmm5 xorq %r9,%r14 addq %r13,%r8 vpaddq 32(%rbp),%xmm5,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,80(%rsp) vpalignr $8,%xmm6,%xmm7,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm2,%xmm3,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm6,%xmm6 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 96(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm5,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm5,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm6,%xmm6 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm5,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 104(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm6,%xmm6 xorq %rdx,%r14 addq %r13,%rcx vpaddq 64(%rbp),%xmm6,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,96(%rsp) vpalignr $8,%xmm7,%xmm0,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm3,%xmm4,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm7,%xmm7 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 112(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm6,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm6,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm7,%xmm7 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm6,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 120(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm7,%xmm7 xorq %rbx,%r14 addq %r13,%rax vpaddq 96(%rbp),%xmm7,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,112(%rsp) cmpb $0,135(%rbp) jne L$avx_00_47 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 0(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 8(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 16(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 24(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 32(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 40(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 48(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 56(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 64(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 72(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 80(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 88(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 96(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 104(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 112(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 120(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 movq 128+0(%rsp),%rdi movq %r14,%rax addq 0(%rdi),%rax leaq 128(%rsi),%rsi addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb L$loop_avx movq 152(%rsp),%rsi vzeroupper movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_avx: ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/sha512-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,@function .align 16 sha512_block_data_order_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $128+32,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) .cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 .Lprologue: movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp .Lloop .align 16 .Lloop: movq %rbx,%rdi leaq K512(%rip),%rbp xorq %rcx,%rdi movq 0(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 8(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 16(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 24(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 32(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 40(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 48(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 56(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp addq %r14,%rax movq 64(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 72(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 80(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 88(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 96(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 104(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 112(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 120(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp jmp .Lrounds_16_xx .align 16 .Lrounds_16_xx: movq 8(%rsp),%r13 movq 112(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 72(%rsp),%r12 addq 0(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 16(%rsp),%r13 movq 120(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 80(%rsp),%r12 addq 8(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 24(%rsp),%r13 movq 0(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 88(%rsp),%r12 addq 16(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 32(%rsp),%r13 movq 8(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 96(%rsp),%r12 addq 24(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 40(%rsp),%r13 movq 16(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 104(%rsp),%r12 addq 32(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 48(%rsp),%r13 movq 24(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 112(%rsp),%r12 addq 40(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 56(%rsp),%r13 movq 32(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 120(%rsp),%r12 addq 48(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 64(%rsp),%r13 movq 40(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 0(%rsp),%r12 addq 56(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp movq 72(%rsp),%r13 movq 48(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 8(%rsp),%r12 addq 64(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 80(%rsp),%r13 movq 56(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 16(%rsp),%r12 addq 72(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 88(%rsp),%r13 movq 64(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 24(%rsp),%r12 addq 80(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 96(%rsp),%r13 movq 72(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 32(%rsp),%r12 addq 88(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 104(%rsp),%r13 movq 80(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 40(%rsp),%r12 addq 96(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 112(%rsp),%r13 movq 88(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 48(%rsp),%r12 addq 104(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 120(%rsp),%r13 movq 96(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 56(%rsp),%r12 addq 112(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 0(%rsp),%r13 movq 104(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 64(%rsp),%r12 addq 120(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp cmpb $0,7(%rbp) jnz .Lrounds_16_xx movq 128+0(%rsp),%rdi addq %r14,%rax leaq 128(%rsi),%rsi addq 0(%rdi),%rax addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb .Lloop movq 152(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue: ret .cfi_endproc .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw .section .rodata .align 64 .type K512,@object K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x0001020304050607,0x08090a0b0c0d0e0f .quad 0x0001020304050607,0x08090a0b0c0d0e0f .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl sha512_block_data_order_avx .hidden sha512_block_data_order_avx .type sha512_block_data_order_avx,@function .align 64 sha512_block_data_order_avx: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $160,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) .cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 .Lprologue_avx: vzeroupper movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa K512+1280(%rip),%xmm11 vmovdqu 0(%rsi),%xmm0 leaq K512+128(%rip),%rbp vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vpshufb %xmm11,%xmm0,%xmm0 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm11,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm4 vpshufb %xmm11,%xmm2,%xmm2 vmovdqu 80(%rsi),%xmm5 vpshufb %xmm11,%xmm3,%xmm3 vmovdqu 96(%rsi),%xmm6 vpshufb %xmm11,%xmm4,%xmm4 vmovdqu 112(%rsi),%xmm7 vpshufb %xmm11,%xmm5,%xmm5 vpaddq -128(%rbp),%xmm0,%xmm8 vpshufb %xmm11,%xmm6,%xmm6 vpaddq -96(%rbp),%xmm1,%xmm9 vpshufb %xmm11,%xmm7,%xmm7 vpaddq -64(%rbp),%xmm2,%xmm10 vpaddq -32(%rbp),%xmm3,%xmm11 vmovdqa %xmm8,0(%rsp) vpaddq 0(%rbp),%xmm4,%xmm8 vmovdqa %xmm9,16(%rsp) vpaddq 32(%rbp),%xmm5,%xmm9 vmovdqa %xmm10,32(%rsp) vpaddq 64(%rbp),%xmm6,%xmm10 vmovdqa %xmm11,48(%rsp) vpaddq 96(%rbp),%xmm7,%xmm11 vmovdqa %xmm8,64(%rsp) movq %rax,%r14 vmovdqa %xmm9,80(%rsp) movq %rbx,%rdi vmovdqa %xmm10,96(%rsp) xorq %rcx,%rdi vmovdqa %xmm11,112(%rsp) movq %r8,%r13 jmp .Lavx_00_47 .align 16 .Lavx_00_47: addq $256,%rbp vpalignr $8,%xmm0,%xmm1,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm4,%xmm5,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm0,%xmm0 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 0(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm7,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm7,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm0,%xmm0 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm7,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 8(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm0,%xmm0 xorq %r11,%r14 addq %r13,%r10 vpaddq -128(%rbp),%xmm0,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,0(%rsp) vpalignr $8,%xmm1,%xmm2,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm5,%xmm6,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm1,%xmm1 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 16(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm0,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm0,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm1,%xmm1 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm0,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 24(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm1,%xmm1 xorq %r9,%r14 addq %r13,%r8 vpaddq -96(%rbp),%xmm1,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,16(%rsp) vpalignr $8,%xmm2,%xmm3,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm6,%xmm7,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm2,%xmm2 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 32(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm1,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm1,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm2,%xmm2 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm1,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 40(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm2,%xmm2 xorq %rdx,%r14 addq %r13,%rcx vpaddq -64(%rbp),%xmm2,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,32(%rsp) vpalignr $8,%xmm3,%xmm4,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm7,%xmm0,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm3,%xmm3 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 48(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm2,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm2,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm3,%xmm3 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm2,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 56(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm3,%xmm3 xorq %rbx,%r14 addq %r13,%rax vpaddq -32(%rbp),%xmm3,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,48(%rsp) vpalignr $8,%xmm4,%xmm5,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm0,%xmm1,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm4,%xmm4 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 64(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm3,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm3,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm4,%xmm4 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm3,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 72(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm4,%xmm4 xorq %r11,%r14 addq %r13,%r10 vpaddq 0(%rbp),%xmm4,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,64(%rsp) vpalignr $8,%xmm5,%xmm6,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm1,%xmm2,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm5,%xmm5 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 80(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm4,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm4,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm5,%xmm5 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm4,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 88(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm5,%xmm5 xorq %r9,%r14 addq %r13,%r8 vpaddq 32(%rbp),%xmm5,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,80(%rsp) vpalignr $8,%xmm6,%xmm7,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm2,%xmm3,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm6,%xmm6 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 96(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm5,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm5,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm6,%xmm6 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm5,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 104(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm6,%xmm6 xorq %rdx,%r14 addq %r13,%rcx vpaddq 64(%rbp),%xmm6,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,96(%rsp) vpalignr $8,%xmm7,%xmm0,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm3,%xmm4,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm7,%xmm7 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 112(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm6,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm6,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm7,%xmm7 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm6,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 120(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm7,%xmm7 xorq %rbx,%r14 addq %r13,%rax vpaddq 96(%rbp),%xmm7,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,112(%rsp) cmpb $0,135(%rbp) jne .Lavx_00_47 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 0(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 8(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 16(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 24(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 32(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 40(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 48(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 56(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 64(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 72(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 80(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 88(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 96(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 104(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 112(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 120(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 movq 128+0(%rsp),%rdi movq %r14,%rax addq 0(%rdi),%rax leaq 128(%rsi),%rsi addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb .Lloop_avx movq 152(%rsp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx: ret .cfi_endproc .size sha512_block_data_order_avx,.-sha512_block_data_order_avx #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-armv7-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) .syntax unified .arch armv7-a .fpu neon #if defined(__thumb2__) .thumb #else .code 32 #endif .text .type _vpaes_consts,%object .align 7 @ totally strategic alignment _vpaes_consts: .Lk_mc_forward:@ mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward:@ mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr:@ sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 @ @ "Hot" constants @ .Lk_inv:@ inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_ipt:@ input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sbo:@ sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_sb1:@ sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .Lk_sb2:@ sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .size _vpaes_consts,.-_vpaes_consts .align 6 @@ @@ _aes_preheat @@ @@ Fills q9-q15 as specified below. @@ .type _vpaes_preheat,%function .align 4 _vpaes_preheat: adr r10, .Lk_inv vmov.i8 q9, #0x0f @ .Lk_s0F vld1.64 {q10,q11}, [r10]! @ .Lk_inv add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo vld1.64 {q12,q13}, [r10]! @ .Lk_sb1 vld1.64 {q14,q15}, [r10] @ .Lk_sb2 bx lr @@ @@ _aes_encrypt_core @@ @@ AES-encrypt q0. @@ @@ Inputs: @@ q0 = input @@ q9-q15 as in _vpaes_preheat @@ [r2] = scheduled keys @@ @@ Output in q0 @@ Clobbers q1-q5, r8-r11 @@ Preserves q6-q8 so you get some local vectors @@ @@ .type _vpaes_encrypt_core,%function .align 4 _vpaes_encrypt_core: mov r9, r2 ldr r8, [r2,#240] @ pull rounds adr r11, .Lk_ipt @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi vld1.64 {q2, q3}, [r11] adr r11, .Lk_mc_forward+16 vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 vtbl.8 d3, {q2}, d3 vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 vtbl.8 d5, {q3}, d1 veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 @ .Lenc_entry ends with a bnz instruction which is normally paired with @ subs in .Lenc_loop. tst r8, r8 b .Lenc_entry .align 4 .Lenc_loop: @ middle of middle round add r10, r11, #0x40 vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u vtbl.8 d9, {q13}, d5 vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t vtbl.8 d1, {q12}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u vtbl.8 d11, {q15}, d5 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t vtbl.8 d5, {q14}, d7 vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B vtbl.8 d7, {q0}, d3 veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A @ Write to q5 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D vtbl.8 d11, {q0}, d9 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C vtbl.8 d9, {q3}, d3 @ Here we restore the original q0/q5 usage. veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D subs r8, r8, #1 @ nr-- .Lenc_entry: @ top of round vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k vtbl.8 d11, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak vtbl.8 d5, {q10}, d7 vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak vtbl.8 d7, {q10}, d9 veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 bne .Lenc_loop @ middle of last round add r10, r11, #0x80 adr r11, .Lk_sbo @ Read to q1 instead of q4, so the vtbl.8 instruction below does not @ overlap table and destination registers. vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou vtbl.8 d9, {q1}, d5 vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] @ Write to q2 instead of q0 below, to avoid overlapping table and @ destination registers. vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t vtbl.8 d5, {q0}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A @ Here we restore the original q0/q2 usage. vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 vtbl.8 d1, {q2}, d3 bx lr .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,%function .align 4 vpaes_encrypt: @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack @ alignment. stmdb sp!, {r7,r8,r9,r10,r11,lr} @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11} vld1.64 {q0}, [r0] bl _vpaes_preheat bl _vpaes_encrypt_core vst1.64 {q0}, [r1] vldmia sp!, {d8,d9,d10,d11} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_encrypt,.-vpaes_encrypt @ @ Decryption stuff @ .type _vpaes_decrypt_consts,%object .align 4 _vpaes_decrypt_consts: .Lk_dipt:@ decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 .Lk_dsbo:@ decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C .Lk_dsb9:@ decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 .Lk_dsbd:@ decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 .Lk_dsbb:@ decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B .Lk_dsbe:@ decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 .size _vpaes_decrypt_consts,.-_vpaes_decrypt_consts @@ @@ Decryption core @@ @@ Same API as encryption core, except it clobbers q12-q15 rather than using @@ the values from _vpaes_preheat. q9-q11 must still be set from @@ _vpaes_preheat. @@ .type _vpaes_decrypt_core,%function .align 4 _vpaes_decrypt_core: mov r9, r2 ldr r8, [r2,#240] @ pull rounds @ This function performs shuffles with various constants. The x86_64 @ version loads them on-demand into %xmm0-%xmm5. This does not work well @ for ARMv7 because those registers are shuffle destinations. The ARMv8 @ version preloads those constants into registers, but ARMv7 has half @ the registers to work with. Instead, we load them on-demand into @ q12-q15, registers normally use for preloaded constants. This is fine @ because decryption doesn't use those constants. The values are @ constant, so this does not interfere with potential 2x optimizations. adr r7, .Lk_dipt vld1.64 {q12,q13}, [r7] @ vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11 eor r11, r11, #0x30 @ xor $0x30, %r11 adr r10, .Lk_sr and r11, r11, #0x30 @ and $0x30, %r11 add r11, r11, r10 adr r10, .Lk_mc_forward+48 vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 vtbl.8 d5, {q12}, d3 vld1.64 {q5}, [r10] @ vmovdqa .Lk_mc_forward+48(%rip), %xmm5 @ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 vtbl.8 d1, {q13}, d1 veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 @ .Ldec_entry ends with a bnz instruction which is normally paired with @ subs in .Ldec_loop. tst r8, r8 b .Ldec_entry .align 4 .Ldec_loop: @ @ Inverse mix columns @ @ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of @ the function. adr r10, .Lk_dsb9 vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u @ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t @ Load sbd* ahead of time. vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu @ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u vtbl.8 d9, {q12}, d5 vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t vtbl.8 d3, {q13}, d7 veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch @ Load sbb* ahead of time. vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu @ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu vtbl.8 d9, {q14}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt vtbl.8 d3, {q15}, d7 @ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch @ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt @ Load sbd* ahead of time. vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu @ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu vtbl.8 d9, {q12}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt vtbl.8 d3, {q13}, d7 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu vtbl.8 d9, {q14}, d5 @ Write to q1 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch vtbl.8 d3, {q0}, d11 @ Here we restore the original q0/q1 usage. This instruction is @ reordered from the ARMv8 version so we do not clobber the vtbl.8 @ below. veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet vtbl.8 d3, {q15}, d7 vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5 veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch subs r8, r8, #1 @ sub $1,%rax # nr-- .Ldec_entry: @ top of round vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k vtbl.8 d5, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak vtbl.8 d5, {q10}, d7 vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak vtbl.8 d7, {q10}, d9 veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0 bne .Ldec_loop @ middle of last round adr r10, .Lk_dsbo @ Write to q1 rather than q4 to avoid overlapping table and destination. vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou vtbl.8 d9, {q1}, d5 @ Write to q2 rather than q1 to avoid overlapping table and destination. vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t vtbl.8 d3, {q2}, d7 vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k @ Write to q1 rather than q0 so the table and destination registers @ below do not overlap. veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0 vtbl.8 d1, {q1}, d5 bx lr .size _vpaes_decrypt_core,.-_vpaes_decrypt_core .globl vpaes_decrypt .hidden vpaes_decrypt .type vpaes_decrypt,%function .align 4 vpaes_decrypt: @ _vpaes_decrypt_core uses r7-r11. stmdb sp!, {r7,r8,r9,r10,r11,lr} @ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11} vld1.64 {q0}, [r0] bl _vpaes_preheat bl _vpaes_decrypt_core vst1.64 {q0}, [r1] vldmia sp!, {d8,d9,d10,d11} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_decrypt,.-vpaes_decrypt @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@ @@ @@ AES key schedule @@ @@ @@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @ This function diverges from both x86_64 and armv7 in which constants are @ pinned. x86_64 has a common preheat function for all operations. aarch64 @ separates them because it has enough registers to pin nearly all constants. @ armv7 does not have enough registers, but needing explicit loads and stores @ also complicates using x86_64's register allocation directly. @ @ We pin some constants for convenience and leave q14 and q15 free to load @ others on demand. @ @ Key schedule constants @ .type _vpaes_key_consts,%object .align 4 _vpaes_key_consts: .Lk_dksd:@ decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E .Lk_dksb:@ decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 .Lk_dkse:@ decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 .Lk_dks9:@ decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE .Lk_rcon:@ rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_opt:@ output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew:@ deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .size _vpaes_key_consts,.-_vpaes_key_consts .type _vpaes_key_preheat,%function .align 4 _vpaes_key_preheat: adr r11, .Lk_rcon vmov.i8 q12, #0x5b @ .Lk_s63 adr r10, .Lk_inv @ Must be aligned to 8 mod 16. vmov.i8 q9, #0x0f @ .Lk_s0F vld1.64 {q10,q11}, [r10] @ .Lk_inv vld1.64 {q8}, [r11] @ .Lk_rcon bx lr .size _vpaes_key_preheat,.-_vpaes_key_preheat .type _vpaes_schedule_core,%function .align 4 _vpaes_schedule_core: @ We only need to save lr, but ARM requires an 8-byte stack alignment, @ so save an extra register. stmdb sp!, {r3,lr} bl _vpaes_key_preheat @ load the tables adr r11, .Lk_ipt @ Must be aligned to 8 mod 16. vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) @ input transform @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not @ overlap table and destination. vmov q4, q0 @ vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform adr r10, .Lk_sr @ Must be aligned to 8 mod 16. vmov q7, q0 @ vmovdqa %xmm0, %xmm7 add r8, r8, r10 tst r3, r3 bne .Lschedule_am_decrypting @ encrypting, output zeroth round key after transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) b .Lschedule_go .Lschedule_am_decrypting: @ decrypting, output zeroth round key after shiftrows vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q4}, d3 vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx) eor r8, r8, #0x30 @ xor $0x30, %r8 .Lschedule_go: cmp r1, #192 @ cmp $192, %esi bhi .Lschedule_256 beq .Lschedule_192 @ 128: fall though @@ @@ .schedule_128 @@ @@ 128-bit specific part of key schedule. @@ @@ This schedule is really simple, because all its parts @@ are accomplished by the subroutines. @@ .Lschedule_128: mov r0, #10 @ mov $10, %esi .Loop_schedule_128: bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq .Lschedule_mangle_last bl _vpaes_schedule_mangle @ write output b .Loop_schedule_128 @@ @@ .aes_schedule_192 @@ @@ 192-bit specific part of key schedule. @@ @@ The main body of this schedule is the same as the 128-bit @@ schedule, but with more smearing. The long, high side is @@ stored in q7 as before, and the short, low side is in @@ the high bits of q6. @@ @@ This schedule is somewhat nastier, however, because each @@ round produces 192 bits of key material, or 1.5 round keys. @@ Therefore, on each cycle we do 2 rounds and produce 3 round @@ keys. @@ .align 4 .Lschedule_192: sub r0, r0, #8 vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform @ input transform vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4 @ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov r0, #4 @ mov $4, %esi .Loop_schedule_192: bl _vpaes_schedule_round vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle @ save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle @ save key n+1 bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq .Lschedule_mangle_last bl _vpaes_schedule_mangle @ save key n+2 bl _vpaes_schedule_192_smear b .Loop_schedule_192 @@ @@ .aes_schedule_256 @@ @@ 256-bit specific part of key schedule. @@ @@ The structure here is very similar to the 128-bit @@ schedule, but with an additional "low side" in @@ q6. The low side's rounds are the same as the @@ high side's, except no rcon and no rotation. @@ .align 4 .Lschedule_256: vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform @ input transform mov r0, #7 @ mov $7, %esi .Loop_schedule_256: bl _vpaes_schedule_mangle @ output low result vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 @ high round bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq .Lschedule_mangle_last bl _vpaes_schedule_mangle @ low round. swap xmm7 and xmm6 vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vmov.i8 q4, #0 vmov q5, q7 @ vmovdqa %xmm7, %xmm5 vmov q7, q6 @ vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round vmov q7, q5 @ vmovdqa %xmm5, %xmm7 b .Loop_schedule_256 @@ @@ .aes_schedule_mangle_last @@ @@ Mangler for last round of key schedule @@ Mangles q0 @@ when encrypting, outputs out(q0) ^ 63 @@ when decrypting, outputs unskew(q0) @@ @@ Always called right before return... jumps to cleanup and exits @@ .align 4 .Lschedule_mangle_last: @ schedule last round key from xmm0 adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew tst r3, r3 bne .Lschedule_mangle_last_dec @ encrypting vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform add r2, r2, #32 @ add $32, %rdx vmov q2, q0 vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute vtbl.8 d1, {q2}, d3 .Lschedule_mangle_last_dec: sub r2, r2, #16 @ add $-16, %rdx veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform @ output transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key @ cleanup veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 ldmia sp!, {r3,pc} @ return .size _vpaes_schedule_core,.-_vpaes_schedule_core @@ @@ .aes_schedule_192_smear @@ @@ Smear the short, low side in the 192-bit key schedule. @@ @@ Inputs: @@ q7: high side, b a x y @@ q6: low side, d c 0 0 @@ @@ Outputs: @@ q6: b+c+d b+c 0 0 @@ q0: b+c+d b+c b a @@ .type _vpaes_schedule_192_smear,%function .align 4 _vpaes_schedule_192_smear: vmov.i8 q1, #0 vdup.32 q0, d15[1] vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a vmov q0, q6 @ vmovdqa %xmm6, %xmm0 vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros bx lr .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear @@ @@ .aes_schedule_round @@ @@ Runs one main round of the key schedule on q0, q7 @@ @@ Specifically, runs subbytes on the high dword of q0 @@ then rotates it by one byte and xors into the low dword of @@ q7. @@ @@ Adds rcon from low byte of q8, then rotates q8 for @@ next rcon. @@ @@ Smears the dwords of q7 by xoring the low into the @@ second low, result into third, result into highest. @@ @@ Returns results in q7 = q0. @@ Clobbers q1-q4, r11. @@ .type _vpaes_schedule_round,%function .align 4 _vpaes_schedule_round: @ extract rcon from xmm8 vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 @ rotate vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 @ fall through... @ low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. @ We pin other values in _vpaes_key_preheat, so load them now. adr r11, .Lk_sb1 vld1.64 {q14,q15}, [r11] @ smear xmm7 vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 @ subbytes vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k vtbl.8 d5, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7 vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak vtbl.8 d7, {q10}, d7 veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak vtbl.8 d5, {q10}, d9 veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou vtbl.8 d9, {q15}, d7 vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t vtbl.8 d3, {q14}, d5 veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output @ add in smeared stuff veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 bx lr .size _vpaes_schedule_round,.-_vpaes_schedule_round @@ @@ .aes_schedule_transform @@ @@ Linear-transform q0 according to tables at [r11] @@ @@ Requires that q9 = 0x0F0F... as in preheat @@ Output in q0 @@ Clobbers q1, q2, q14, q15 @@ .type _vpaes_schedule_transform,%function .align 4 _vpaes_schedule_transform: vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo @ vmovdqa 16(%r11), %xmm1 # hi vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 vtbl.8 d5, {q14}, d3 vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 vtbl.8 d1, {q15}, d1 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 bx lr .size _vpaes_schedule_transform,.-_vpaes_schedule_transform @@ @@ .aes_schedule_mangle @@ @@ Mangles q0 from (basis-transformed) standard version @@ to our version. @@ @@ On encrypt, @@ xor with 0x63 @@ multiply by circulant 0,1,1,1 @@ apply shiftrows transform @@ @@ On decrypt, @@ xor with 0x63 @@ multiply by "inverse mixcolumns" circulant E,B,D,9 @@ deskew @@ apply shiftrows transform @@ @@ @@ Writes out to [r2], and increments or decrements it @@ Keeps track of round number mod 4 in r8 @@ Preserves q0 @@ Clobbers q1-q5 @@ .type _vpaes_schedule_mangle,%function .align 4 _vpaes_schedule_mangle: tst r3, r3 vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16. vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5 bne .Lschedule_mangle_dec @ encrypting @ Write to q2 so we do not overlap table and destination below. veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4 add r2, r2, #16 @ add $16, %rdx vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 vtbl.8 d9, {q2}, d11 vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 vtbl.8 d3, {q4}, d11 vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 vtbl.8 d7, {q1}, d11 veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 b .Lschedule_mangle_both .align 4 .Lschedule_mangle_dec: @ inverse mix columns adr r11, .Lk_dksd @ lea .Lk_dksd(%rip),%r11 vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2 @ vmovdqa 0x10(%r11), %xmm3 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dksb ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2 @ vmovdqa 0x30(%r11), %xmm3 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dkse ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2 @ vmovdqa 0x50(%r11), %xmm3 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d7, {q15}, d3 @ Load .Lk_dkse ahead of time. vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2 @ vmovdqa 0x70(%r11), %xmm4 @ Write to q13 so we do not overlap table and destination. veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2 vtbl.8 d5, {q14}, d9 vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3 vtbl.8 d7, {q13}, d11 vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4 vtbl.8 d9, {q15}, d3 vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2 veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3 sub r2, r2, #16 @ add $-16, %rdx .Lschedule_mangle_both: @ Write to q2 so table and destination do not overlap. vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d5, {q3}, d3 add r8, r8, #64-16 @ add $-16, %r8 and r8, r8, #~(1<<6) @ and $0x30, %r8 vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) bx lr .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,%function .align 4 vpaes_set_encrypt_key: stmdb sp!, {r7,r8,r9,r10,r11, lr} vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} lsr r9, r1, #5 @ shr $5,%eax add r9, r9, #5 @ $5,%eax str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov r3, #0 @ mov $0,%ecx mov r8, #0x30 @ mov $0x30,%r8d bl _vpaes_schedule_core eor r0, r0, r0 vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key .globl vpaes_set_decrypt_key .hidden vpaes_set_decrypt_key .type vpaes_set_decrypt_key,%function .align 4 vpaes_set_decrypt_key: stmdb sp!, {r7,r8,r9,r10,r11, lr} vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} lsr r9, r1, #5 @ shr $5,%eax add r9, r9, #5 @ $5,%eax str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl r9, r9, #4 @ shl $4,%eax add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx add r2, r2, r9 mov r3, #1 @ mov $1,%ecx lsr r8, r1, #1 @ shr $1,%r8d and r8, r8, #32 @ and $32,%r8d eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key @ Additional constants for converting to bsaes. .type _vpaes_convert_consts,%object .align 4 _vpaes_convert_consts: @ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear @ transform in the AES S-box. 0x63 is incorporated into the low half of the @ table. This was computed with the following script: @ @ def u64s_to_u128(x, y): @ return x | (y << 64) @ def u128_to_u64s(w): @ return w & ((1<<64)-1), w >> 64 @ def get_byte(w, i): @ return (w >> (i*8)) & 0xff @ def apply_table(table, b): @ lo = b & 0xf @ hi = b >> 4 @ return get_byte(table[0], lo) ^ get_byte(table[1], hi) @ def opt(b): @ table = [ @ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), @ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), @ ] @ return apply_table(table, b) @ def rot_byte(b, n): @ return 0xff & ((b << n) | (b >> (8-n))) @ def skew(x): @ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ @ rot_byte(x, 4)) @ table = [0, 0] @ for i in range(16): @ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) @ table[1] |= skew(opt(i<<4)) << (i*8) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) .Lk_opt_then_skew: .quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b .quad 0x1f30062936192f00, 0xb49bad829db284ab @ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation @ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344 @ becomes 0x22334411 and then 0x11443322. .Lk_decrypt_transform: .quad 0x0704050603000102, 0x0f0c0d0e0b08090a .size _vpaes_convert_consts,.-_vpaes_convert_consts @ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); .globl vpaes_encrypt_key_to_bsaes .hidden vpaes_encrypt_key_to_bsaes .type vpaes_encrypt_key_to_bsaes,%function .align 4 vpaes_encrypt_key_to_bsaes: stmdb sp!, {r11, lr} @ See _vpaes_schedule_core for the key schedule logic. In particular, @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last @ contain the transformations not in the bsaes representation. This @ function inverts those transforms. @ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key @ representation, which does not match the other aes_nohw_* @ implementations. The ARM aes_nohw_* stores each 32-bit word @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the @ cost of extra REV and VREV32 operations in little-endian ARM. vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16. add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) vld1.64 {q12}, [r2] vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64 adr r11, .Lk_opt @ Must be aligned to 8 mod 16. vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied @ vpaes stores one fewer round count than bsaes, but the number of keys @ is the same. ldr r2, [r1,#240] add r2, r2, #1 str r2, [r0,#240] @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). @ Invert this with .Lk_opt. vld1.64 {q0}, [r1]! bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. .Loop_enc_key_to_bsaes: vld1.64 {q0}, [r1]! @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. @ We use r3 rather than r8 to avoid a callee-saved register. vld1.64 {q1}, [r3] vtbl.8 d4, {q0}, d2 vtbl.8 d5, {q0}, d3 add r3, r3, #16 and r3, r3, #~(1<<6) vmov q0, q2 @ Handle the last key differently. subs r2, r2, #1 beq .Loop_enc_key_to_bsaes_last @ Multiply by the circulant. This is its own inverse. vtbl.8 d2, {q0}, d24 vtbl.8 d3, {q0}, d25 vmov q0, q1 vtbl.8 d4, {q1}, d24 vtbl.8 d5, {q1}, d25 veor q0, q0, q2 vtbl.8 d2, {q2}, d24 vtbl.8 d3, {q2}, d25 veor q0, q0, q1 @ XOR and finish. veor q0, q0, q10 bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! b .Loop_enc_key_to_bsaes .Loop_enc_key_to_bsaes_last: @ The final key does not have a basis transform (note @ .Lschedule_mangle_last inverts the original transform). It only XORs @ 0x63 and applies ShiftRows. The latter was already inverted in the @ loop. Note that, because we act on the original representation, we use @ q11, not q10. veor q0, q0, q11 vrev32.8 q0, q0 vst1.64 {q0}, [r0] @ Wipe registers which contained key material. veor q0, q0, q0 veor q1, q1, q1 veor q2, q2, q2 ldmia sp!, {r11, pc} @ return .size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes @ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes); .globl vpaes_decrypt_key_to_bsaes .hidden vpaes_decrypt_key_to_bsaes .type vpaes_decrypt_key_to_bsaes,%function .align 4 vpaes_decrypt_key_to_bsaes: stmdb sp!, {r11, lr} @ See _vpaes_schedule_core for the key schedule logic. Note vpaes @ computes the decryption key schedule in reverse. Additionally, @ aes-x86_64.pl shares some transformations, so we must only partially @ invert vpaes's transformations. In general, vpaes computes in a @ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of @ MixColumns, ShiftRows, and the affine part of the AES S-box (which is @ split into a linear skew and XOR of 0x63). We undo all but MixColumns. @ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key @ representation, which does not match the other aes_nohw_* @ implementations. The ARM aes_nohw_* stores each 32-bit word @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the @ cost of extra REV and VREV32 operations in little-endian ARM. adr r2, .Lk_decrypt_transform adr r3, .Lk_sr+0x30 adr r11, .Lk_opt_then_skew @ Input to _vpaes_schedule_transform. vld1.64 {q12}, [r2] @ Reuse q12 from encryption. vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform @ vpaes stores one fewer round count than bsaes, but the number of keys @ is the same. ldr r2, [r1,#240] add r2, r2, #1 str r2, [r0,#240] @ Undo the basis change and reapply the S-box affine transform. See @ .Lschedule_mangle_last. vld1.64 {q0}, [r1]! bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ See _vpaes_schedule_mangle for the transform on the middle keys. Note @ it simultaneously inverts MixColumns and the S-box affine transform. @ See .Lk_dksd through .Lk_dks9. .Loop_dec_key_to_bsaes: vld1.64 {q0}, [r1]! @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going @ forwards cancels inverting for which direction we cycle r3. We use r3 @ rather than r8 to avoid a callee-saved register. vld1.64 {q1}, [r3] vtbl.8 d4, {q0}, d2 vtbl.8 d5, {q0}, d3 add r3, r3, #64-16 and r3, r3, #~(1<<6) vmov q0, q2 @ Handle the last key differently. subs r2, r2, #1 beq .Loop_dec_key_to_bsaes_last @ Undo the basis change and reapply the S-box affine transform. bl _vpaes_schedule_transform @ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We @ combine the two operations in .Lk_decrypt_transform. @ @ TODO(davidben): Where does the rotation come from? vtbl.8 d2, {q0}, d24 vtbl.8 d3, {q0}, d25 vst1.64 {q1}, [r0]! b .Loop_dec_key_to_bsaes .Loop_dec_key_to_bsaes_last: @ The final key only inverts ShiftRows (already done in the loop). See @ .Lschedule_am_decrypting. Its basis is not transformed. vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ Wipe registers which contained key material. veor q0, q0, q0 veor q1, q1, q1 veor q2, q2, q2 ldmia sp!, {r11, pc} @ return .size vpaes_decrypt_key_to_bsaes,.-vpaes_decrypt_key_to_bsaes .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,%function .align 4 vpaes_ctr32_encrypt_blocks: mov ip, sp stmdb sp!, {r7,r8,r9,r10,r11, lr} @ This function uses q4-q7 (d8-d15), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} cmp r2, #0 @ r8 is passed on the stack. ldr r8, [ip] beq .Lctr32_done @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. mov r9, r3 mov r3, r2 mov r2, r9 @ Load the IV and counter portion. ldr r7, [r8, #12] vld1.8 {q7}, [r8] bl _vpaes_preheat rev r7, r7 @ The counter is big-endian. .Lctr32_loop: vmov q0, q7 vld1.8 {q6}, [r0]! @ .Load input ahead of time bl _vpaes_encrypt_core veor q0, q0, q6 @ XOR input and result vst1.8 {q0}, [r1]! subs r3, r3, #1 @ Update the counter. add r7, r7, #1 rev r9, r7 vmov.32 d15[1], r9 bne .Lctr32_loop .Lctr32_done: vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .section __TEXT,__const .align 7 // totally strategic alignment _vpaes_consts: Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Decryption stuff // Lk_dipt: // decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 Lk_dsbo: // decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C Lk_dsb9: // decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 Lk_dsbd: // decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 Lk_dsbb: // decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B Lk_dsbe: // decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 // // Key schedule constants // Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .align 4 _vpaes_encrypt_preheat: adrp x10, Lk_inv@PAGE add x10, x10, Lk_inv@PAGEOFF movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 ret ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward@PAGE+16 add x11, x11, Lk_mc_forward@PAGEOFF+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Lenc_entry .align 4 Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .globl _vpaes_encrypt .private_extern _vpaes_encrypt .align 4 _vpaes_encrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_encrypt_preheat bl _vpaes_encrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward@PAGE+16 add x11, x11, Lk_mc_forward@PAGEOFF+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Lenc_2x_entry .align 4 Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret .align 4 _vpaes_decrypt_preheat: adrp x10, Lk_inv@PAGE add x10, x10, Lk_inv@PAGEOFF movi v17.16b, #0x0f adrp x11, Lk_dipt@PAGE add x11, x11, Lk_dipt@PAGEOFF ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // Lk_dipt, Lk_dsbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // Lk_dsb9, Lk_dsbd ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // Lk_dsbb, Lk_dsbe ret ## ## Decryption core ## ## Same API as encryption core. ## .align 4 _vpaes_decrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr@PAGE add x10, x10, Lk_sr@PAGEOFF and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward@PAGE+48 add x10, x10, Lk_mc_forward@PAGEOFF+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Ldec_entry .align 4 Ldec_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 ret .globl _vpaes_decrypt .private_extern _vpaes_decrypt .align 4 _vpaes_decrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_decrypt_preheat bl _vpaes_decrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // v14-v15 input, v0-v1 output .align 4 _vpaes_decrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr@PAGE add x10, x10, Lk_sr@PAGEOFF and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward@PAGE+48 add x10, x10, Lk_mc_forward@PAGEOFF+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 tbl v10.16b, {v20.16b},v9.16b ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 tbl v8.16b, {v21.16b},v8.16b eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v10.16b, v10.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Ldec_2x_entry .align 4 Ldec_2x_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v12.16b, {v24.16b}, v10.16b tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t tbl v9.16b, {v25.16b}, v11.16b eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 eor v8.16b, v12.16b, v16.16b // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v12.16b, {v26.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt tbl v9.16b, {v27.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v12.16b, {v28.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt tbl v9.16b, {v29.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v12.16b, {v30.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet tbl v9.16b, {v31.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k tbl v10.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v10.16b eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v10.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_2x_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t tbl v9.16b, {v23.16b}, v11.16b ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A eor v8.16b, v9.16b, v12.16b tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v2.16b ret ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .align 4 _vpaes_key_preheat: adrp x10, Lk_inv@PAGE add x10, x10, Lk_inv@PAGEOFF movi v16.16b, #0x5b // Lk_s63 adrp x11, Lk_sb1@PAGE add x11, x11, Lk_sb1@PAGEOFF movi v17.16b, #0x0f // Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt adrp x10, Lk_dksd@PAGE add x10, x10, Lk_dksd@PAGEOFF ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 adrp x11, Lk_mc_forward@PAGE add x11, x11, Lk_mc_forward@PAGEOFF ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 ld1 {v8.2d}, [x10] // Lk_rcon ld1 {v9.2d}, [x11] // Lk_mc_forward[0] ret .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10 add x10, x10, Lk_sr@PAGEOFF add x8, x8, x10 cbnz w3, Lschedule_am_decrypting // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) b Lschedule_go Lschedule_am_decrypting: // decrypting, output zeroth round key after shiftrows ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) eor x8, x8, #0x30 // xor $0x30, %r8 Lschedule_go: cmp w1, #192 // cmp $192, %esi b.hi Lschedule_256 b.eq Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## Lschedule_128: mov x0, #10 // mov $10, %esi Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, Lk_deskew@PAGEOFF cbnz w3, Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, Lk_opt@PAGEOFF add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 cbnz w3, Lschedule_mangle_dec // encrypting eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 b Lschedule_mangle_both .align 4 Lschedule_mangle_dec: // inverse mix columns // lea .Lk_dksd(%rip),%r11 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo // vmovdqa 0x00(%r11), %xmm2 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 // vmovdqa 0x10(%r11), %xmm3 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x20(%r11), %xmm2 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x30(%r11), %xmm3 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x40(%r11), %xmm2 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x50(%r11), %xmm3 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 // vmovdqa 0x60(%r11), %xmm2 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x70(%r11), %xmm4 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 sub x2, x2, #16 // add $-16, %rdx Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key .align 4 _vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_set_decrypt_key .private_extern _vpaes_set_decrypt_key .align 4 _vpaes_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl w9, w9, #4 // shl $4,%eax add x2, x2, #16 // lea 16(%rdx,%rax),%rdx add x2, x2, x9 mov w3, #1 // mov $1,%ecx lsr w8, w1, #1 // shr $1,%r8d and x8, x8, #32 // and $32,%r8d eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_cbc_encrypt .private_extern _vpaes_cbc_encrypt .align 4 _vpaes_cbc_encrypt: AARCH64_SIGN_LINK_REGISTER cbz x2, Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v0.16b}, [x4] // load ivec bl _vpaes_encrypt_preheat b Lcbc_enc_loop .align 4 Lcbc_enc_loop: ld1 {v7.16b}, [x0],#16 // load input eor v7.16b, v7.16b, v0.16b // xor with ivec bl _vpaes_encrypt_core st1 {v0.16b}, [x1],#16 // save output subs x17, x17, #16 b.hi Lcbc_enc_loop st1 {v0.16b}, [x4] // write ivec ldp x29,x30,[sp],#16 Lcbc_abort: AARCH64_VALIDATE_LINK_REGISTER ret .align 4 vpaes_cbc_decrypt: // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v6.16b}, [x4] // load ivec bl _vpaes_decrypt_preheat tst x17, #16 b.eq Lcbc_dec_loop2x ld1 {v7.16b}, [x0], #16 // load input bl _vpaes_decrypt_core eor v0.16b, v0.16b, v6.16b // xor with ivec orr v6.16b, v7.16b, v7.16b // next ivec value st1 {v0.16b}, [x1], #16 subs x17, x17, #16 b.ls Lcbc_dec_done .align 4 Lcbc_dec_loop2x: ld1 {v14.16b,v15.16b}, [x0], #32 bl _vpaes_decrypt_2x eor v0.16b, v0.16b, v6.16b // xor with ivec eor v1.16b, v1.16b, v14.16b orr v6.16b, v15.16b, v15.16b st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #32 b.hi Lcbc_dec_loop2x Lcbc_dec_done: st1 {v6.16b}, [x4] ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_ctr32_encrypt_blocks .private_extern _vpaes_ctr32_encrypt_blocks .align 4 _vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls Lctr32_done Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi Lctr32_loop Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .section .rodata .type _vpaes_consts,%object .align 7 // totally strategic alignment _vpaes_consts: .Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // .Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Decryption stuff // .Lk_dipt: // decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 .Lk_dsbo: // decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C .Lk_dsb9: // decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 .Lk_dsbd: // decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 .Lk_dsbb: // decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B .Lk_dsbe: // decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 // // Key schedule constants // .Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E .Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 .Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 .Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE .Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .size _vpaes_consts,.-_vpaes_consts .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .type _vpaes_encrypt_preheat,%function .align 4 _vpaes_encrypt_preheat: adrp x10, .Lk_inv add x10, x10, :lo12:.Lk_inv movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 ret .size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .type _vpaes_encrypt_core,%function .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, .Lk_mc_forward+16 add x11, x11, :lo12:.Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b .Lenc_entry .align 4 .Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- .Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, .Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,%function .align 4 vpaes_encrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_encrypt_preheat bl _vpaes_encrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_encrypt,.-vpaes_encrypt .type _vpaes_encrypt_2x,%function .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, .Lk_mc_forward+16 add x11, x11, :lo12:.Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b .Lenc_2x_entry .align 4 .Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- .Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, .Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret .size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x .type _vpaes_decrypt_preheat,%function .align 4 _vpaes_decrypt_preheat: adrp x10, .Lk_inv add x10, x10, :lo12:.Lk_inv movi v17.16b, #0x0f adrp x11, .Lk_dipt add x11, x11, :lo12:.Lk_dipt ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe ret .size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat ## ## Decryption core ## ## Same API as encryption core. ## .type _vpaes_decrypt_core,%function .align 4 _vpaes_decrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, .Lk_sr add x10, x10, :lo12:.Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, .Lk_mc_forward+48 add x10, x10, :lo12:.Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b .Ldec_entry .align 4 .Ldec_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch sub w8, w8, #1 // sub $1,%rax # nr-- .Ldec_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, .Ldec_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 ret .size _vpaes_decrypt_core,.-_vpaes_decrypt_core .globl vpaes_decrypt .hidden vpaes_decrypt .type vpaes_decrypt,%function .align 4 vpaes_decrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_decrypt_preheat bl _vpaes_decrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_decrypt,.-vpaes_decrypt // v14-v15 input, v0-v1 output .type _vpaes_decrypt_2x,%function .align 4 _vpaes_decrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, .Lk_sr add x10, x10, :lo12:.Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, .Lk_mc_forward+48 add x10, x10, :lo12:.Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 tbl v10.16b, {v20.16b},v9.16b ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 tbl v8.16b, {v21.16b},v8.16b eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v10.16b, v10.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b .Ldec_2x_entry .align 4 .Ldec_2x_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v12.16b, {v24.16b}, v10.16b tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t tbl v9.16b, {v25.16b}, v11.16b eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 eor v8.16b, v12.16b, v16.16b // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v12.16b, {v26.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt tbl v9.16b, {v27.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v12.16b, {v28.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt tbl v9.16b, {v29.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v12.16b, {v30.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet tbl v9.16b, {v31.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b sub w8, w8, #1 // sub $1,%rax # nr-- .Ldec_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k tbl v10.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v10.16b eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v10.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, .Ldec_2x_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t tbl v9.16b, {v23.16b}, v11.16b ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A eor v8.16b, v9.16b, v12.16b tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v2.16b ret .size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .type _vpaes_key_preheat,%function .align 4 _vpaes_key_preheat: adrp x10, .Lk_inv add x10, x10, :lo12:.Lk_inv movi v16.16b, #0x5b // .Lk_s63 adrp x11, .Lk_sb1 add x11, x11, :lo12:.Lk_sb1 movi v17.16b, #0x0f // .Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt adrp x10, .Lk_dksd add x10, x10, :lo12:.Lk_dksd ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1 adrp x11, .Lk_mc_forward add x11, x11, :lo12:.Lk_mc_forward ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 ld1 {v8.2d}, [x10] // .Lk_rcon ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] ret .size _vpaes_key_preheat,.-_vpaes_key_preheat .type _vpaes_schedule_core,%function .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10 add x10, x10, :lo12:.Lk_sr add x8, x8, x10 cbnz w3, .Lschedule_am_decrypting // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) b .Lschedule_go .Lschedule_am_decrypting: // decrypting, output zeroth round key after shiftrows ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) eor x8, x8, #0x30 // xor $0x30, %r8 .Lschedule_go: cmp w1, #192 // cmp $192, %esi b.hi .Lschedule_256 b.eq .Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## .Lschedule_128: mov x0, #10 // mov $10, %esi .Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b .Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 .Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi .Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b .Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 .Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi .Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b .Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 .Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, :lo12:.Lk_deskew cbnz w3, .Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, :lo12:.Lk_opt add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute .Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size _vpaes_schedule_core,.-_vpaes_schedule_core ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .type _vpaes_schedule_192_smear,%function .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .type _vpaes_schedule_round,%function .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .type _vpaes_schedule_transform,%function .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret .size _vpaes_schedule_transform,.-_vpaes_schedule_transform ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .type _vpaes_schedule_mangle,%function .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 cbnz w3, .Lschedule_mangle_dec // encrypting eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 b .Lschedule_mangle_both .align 4 .Lschedule_mangle_dec: // inverse mix columns // lea .Lk_dksd(%rip),%r11 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo // vmovdqa 0x00(%r11), %xmm2 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 // vmovdqa 0x10(%r11), %xmm3 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x20(%r11), %xmm2 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x30(%r11), %xmm3 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x40(%r11), %xmm2 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x50(%r11), %xmm3 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 // vmovdqa 0x60(%r11), %xmm2 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x70(%r11), %xmm4 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 sub x2, x2, #16 // add $-16, %rdx .Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,%function .align 4 vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key .globl vpaes_set_decrypt_key .hidden vpaes_set_decrypt_key .type vpaes_set_decrypt_key,%function .align 4 vpaes_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl w9, w9, #4 // shl $4,%eax add x2, x2, #16 // lea 16(%rdx,%rax),%rdx add x2, x2, x9 mov w3, #1 // mov $1,%ecx lsr w8, w1, #1 // shr $1,%r8d and x8, x8, #32 // and $32,%r8d eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key .globl vpaes_cbc_encrypt .hidden vpaes_cbc_encrypt .type vpaes_cbc_encrypt,%function .align 4 vpaes_cbc_encrypt: AARCH64_SIGN_LINK_REGISTER cbz x2, .Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v0.16b}, [x4] // load ivec bl _vpaes_encrypt_preheat b .Lcbc_enc_loop .align 4 .Lcbc_enc_loop: ld1 {v7.16b}, [x0],#16 // load input eor v7.16b, v7.16b, v0.16b // xor with ivec bl _vpaes_encrypt_core st1 {v0.16b}, [x1],#16 // save output subs x17, x17, #16 b.hi .Lcbc_enc_loop st1 {v0.16b}, [x4] // write ivec ldp x29,x30,[sp],#16 .Lcbc_abort: AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt .type vpaes_cbc_decrypt,%function .align 4 vpaes_cbc_decrypt: // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v6.16b}, [x4] // load ivec bl _vpaes_decrypt_preheat tst x17, #16 b.eq .Lcbc_dec_loop2x ld1 {v7.16b}, [x0], #16 // load input bl _vpaes_decrypt_core eor v0.16b, v0.16b, v6.16b // xor with ivec orr v6.16b, v7.16b, v7.16b // next ivec value st1 {v0.16b}, [x1], #16 subs x17, x17, #16 b.ls .Lcbc_dec_done .align 4 .Lcbc_dec_loop2x: ld1 {v14.16b,v15.16b}, [x0], #32 bl _vpaes_decrypt_2x eor v0.16b, v0.16b, v6.16b // xor with ivec eor v1.16b, v1.16b, v14.16b orr v6.16b, v15.16b, v15.16b st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #32 b.hi .Lcbc_dec_loop2x .Lcbc_dec_done: st1 {v6.16b}, [x4] ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,%function .align 4 vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, .Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq .Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // .Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls .Lctr32_done .Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 .Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi .Lctr32_loop .Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .section .rodata .align 7 // totally strategic alignment _vpaes_consts: Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Decryption stuff // Lk_dipt: // decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 Lk_dsbo: // decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C Lk_dsb9: // decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 Lk_dsbd: // decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 Lk_dsbb: // decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B Lk_dsbe: // decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 // // Key schedule constants // Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .def _vpaes_encrypt_preheat .type 32 .endef .align 4 _vpaes_encrypt_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 ret ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .def _vpaes_encrypt_core .type 32 .endef .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Lenc_entry .align 4 Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .globl vpaes_encrypt .def vpaes_encrypt .type 32 .endef .align 4 vpaes_encrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_encrypt_preheat bl _vpaes_encrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .def _vpaes_encrypt_2x .type 32 .endef .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Lenc_2x_entry .align 4 Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret .def _vpaes_decrypt_preheat .type 32 .endef .align 4 _vpaes_decrypt_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v17.16b, #0x0f adrp x11, Lk_dipt add x11, x11, :lo12:Lk_dipt ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // Lk_dipt, Lk_dsbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // Lk_dsb9, Lk_dsbd ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // Lk_dsbb, Lk_dsbe ret ## ## Decryption core ## ## Same API as encryption core. ## .def _vpaes_decrypt_core .type 32 .endef .align 4 _vpaes_decrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr add x10, x10, :lo12:Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward+48 add x10, x10, :lo12:Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Ldec_entry .align 4 Ldec_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 ret .globl vpaes_decrypt .def vpaes_decrypt .type 32 .endef .align 4 vpaes_decrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_decrypt_preheat bl _vpaes_decrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // v14-v15 input, v0-v1 output .def _vpaes_decrypt_2x .type 32 .endef .align 4 _vpaes_decrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr add x10, x10, :lo12:Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward+48 add x10, x10, :lo12:Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 tbl v10.16b, {v20.16b},v9.16b ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 tbl v8.16b, {v21.16b},v8.16b eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v10.16b, v10.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Ldec_2x_entry .align 4 Ldec_2x_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v12.16b, {v24.16b}, v10.16b tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t tbl v9.16b, {v25.16b}, v11.16b eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 eor v8.16b, v12.16b, v16.16b // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v12.16b, {v26.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt tbl v9.16b, {v27.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v12.16b, {v28.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt tbl v9.16b, {v29.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v12.16b, {v30.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet tbl v9.16b, {v31.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k tbl v10.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v10.16b eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v10.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_2x_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t tbl v9.16b, {v23.16b}, v11.16b ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A eor v8.16b, v9.16b, v12.16b tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v2.16b ret ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .def _vpaes_key_preheat .type 32 .endef .align 4 _vpaes_key_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v16.16b, #0x5b // Lk_s63 adrp x11, Lk_sb1 add x11, x11, :lo12:Lk_sb1 movi v17.16b, #0x0f // Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt adrp x10, Lk_dksd add x10, x10, :lo12:Lk_dksd ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 adrp x11, Lk_mc_forward add x11, x11, :lo12:Lk_mc_forward ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 ld1 {v8.2d}, [x10] // Lk_rcon ld1 {v9.2d}, [x11] // Lk_mc_forward[0] ret .def _vpaes_schedule_core .type 32 .endef .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, Lk_sr // lea Lk_sr(%rip),%r10 add x10, x10, :lo12:Lk_sr add x8, x8, x10 cbnz w3, Lschedule_am_decrypting // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) b Lschedule_go Lschedule_am_decrypting: // decrypting, output zeroth round key after shiftrows ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) eor x8, x8, #0x30 // xor $0x30, %r8 Lschedule_go: cmp w1, #192 // cmp $192, %esi b.hi Lschedule_256 b.eq Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## Lschedule_128: mov x0, #10 // mov $10, %esi Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, :lo12:Lk_deskew cbnz w3, Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, :lo12:Lk_opt add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .def _vpaes_schedule_192_smear .type 32 .endef .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .def _vpaes_schedule_round .type 32 .endef .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .def _vpaes_schedule_transform .type 32 .endef .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .def _vpaes_schedule_mangle .type 32 .endef .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 cbnz w3, Lschedule_mangle_dec // encrypting eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 b Lschedule_mangle_both .align 4 Lschedule_mangle_dec: // inverse mix columns // lea .Lk_dksd(%rip),%r11 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo // vmovdqa 0x00(%r11), %xmm2 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 // vmovdqa 0x10(%r11), %xmm3 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x20(%r11), %xmm2 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x30(%r11), %xmm3 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x40(%r11), %xmm2 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x50(%r11), %xmm3 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 // vmovdqa 0x60(%r11), %xmm2 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x70(%r11), %xmm4 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 sub x2, x2, #16 // add $-16, %rdx Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .globl vpaes_set_encrypt_key .def vpaes_set_encrypt_key .type 32 .endef .align 4 vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_set_decrypt_key .def vpaes_set_decrypt_key .type 32 .endef .align 4 vpaes_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl w9, w9, #4 // shl $4,%eax add x2, x2, #16 // lea 16(%rdx,%rax),%rdx add x2, x2, x9 mov w3, #1 // mov $1,%ecx lsr w8, w1, #1 // shr $1,%r8d and x8, x8, #32 // and $32,%r8d eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_cbc_encrypt .def vpaes_cbc_encrypt .type 32 .endef .align 4 vpaes_cbc_encrypt: AARCH64_SIGN_LINK_REGISTER cbz x2, Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v0.16b}, [x4] // load ivec bl _vpaes_encrypt_preheat b Lcbc_enc_loop .align 4 Lcbc_enc_loop: ld1 {v7.16b}, [x0],#16 // load input eor v7.16b, v7.16b, v0.16b // xor with ivec bl _vpaes_encrypt_core st1 {v0.16b}, [x1],#16 // save output subs x17, x17, #16 b.hi Lcbc_enc_loop st1 {v0.16b}, [x4] // write ivec ldp x29,x30,[sp],#16 Lcbc_abort: AARCH64_VALIDATE_LINK_REGISTER ret .def vpaes_cbc_decrypt .type 32 .endef .align 4 vpaes_cbc_decrypt: // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v6.16b}, [x4] // load ivec bl _vpaes_decrypt_preheat tst x17, #16 b.eq Lcbc_dec_loop2x ld1 {v7.16b}, [x0], #16 // load input bl _vpaes_decrypt_core eor v0.16b, v0.16b, v6.16b // xor with ivec orr v6.16b, v7.16b, v7.16b // next ivec value st1 {v0.16b}, [x1], #16 subs x17, x17, #16 b.ls Lcbc_dec_done .align 4 Lcbc_dec_loop2x: ld1 {v14.16b,v15.16b}, [x0], #32 bl _vpaes_decrypt_2x eor v0.16b, v0.16b, v6.16b // xor with ivec eor v1.16b, v1.16b, v14.16b orr v6.16b, v15.16b, v15.16b st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #32 b.hi Lcbc_dec_loop2x Lcbc_dec_done: st1 {v6.16b}, [x4] ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_ctr32_encrypt_blocks .def vpaes_ctr32_encrypt_blocks .type 32 .endef .align 4 vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls Lctr32_done Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi Lctr32_loop Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-x86-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .align 6,0x90 L_vpaes_consts: .long 218628480,235210255,168496130,67568393 .long 252381056,17041926,33884169,51187212 .long 252645135,252645135,252645135,252645135 .long 1512730624,3266504856,1377990664,3401244816 .long 830229760,1275146365,2969422977,3447763452 .long 3411033600,2979783055,338359620,2782886510 .long 4209124096,907596821,221174255,1006095553 .long 191964160,3799684038,3164090317,1589111125 .long 182528256,1777043520,2877432650,3265356744 .long 1874708224,3503451415,3305285752,363511674 .long 1606117888,3487855781,1093350906,2384367825 .long 197121,67569157,134941193,202313229 .long 67569157,134941193,202313229,197121 .long 134941193,202313229,197121,67569157 .long 202313229,197121,67569157,134941193 .long 33619971,100992007,168364043,235736079 .long 235736079,33619971,100992007,168364043 .long 168364043,235736079,33619971,100992007 .long 100992007,168364043,235736079,33619971 .long 50462976,117835012,185207048,252579084 .long 252314880,51251460,117574920,184942860 .long 184682752,252054788,50987272,118359308 .long 118099200,185467140,251790600,50727180 .long 2946363062,528716217,1300004225,1881839624 .long 1532713819,1532713819,1532713819,1532713819 .long 3602276352,4288629033,3737020424,4153884961 .long 1354558464,32357713,2958822624,3775749553 .long 1201988352,132424512,1572796698,503232858 .long 2213177600,1597421020,4103937655,675398315 .long 2749646592,4273543773,1511898873,121693092 .long 3040248576,1103263732,2871565598,1608280554 .long 2236667136,2588920351,482954393,64377734 .long 3069987328,291237287,2117370568,3650299247 .long 533321216,3573750986,2572112006,1401264716 .long 1339849704,2721158661,548607111,3445553514 .long 2128193280,3054596040,2183486460,1257083700 .long 655635200,1165381986,3923443150,2344132524 .long 190078720,256924420,290342170,357187870 .long 1610966272,2263057382,4103205268,309794674 .long 2592527872,2233205587,1335446729,3402964816 .long 3973531904,3225098121,3002836325,1918774430 .long 3870401024,2102906079,2284471353,4117666579 .long 617007872,1021508343,366931923,691083277 .long 2528395776,3491914898,2968704004,1613121270 .long 3445188352,3247741094,844474987,4093578302 .long 651481088,1190302358,1689581232,574775300 .long 4289380608,206939853,2555985458,2489840491 .long 2130264064,327674451,3566485037,3349835193 .long 2470714624,316102159,3636825756,3393945945 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 .byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 .byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 .byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 .byte 118,101,114,115,105,116,121,41,0 .align 6,0x90 .private_extern __vpaes_preheat .align 4 __vpaes_preheat: addl (%esp),%ebp movdqa -48(%ebp),%xmm7 movdqa -16(%ebp),%xmm6 ret .private_extern __vpaes_encrypt_core .align 4 __vpaes_encrypt_core: movl $16,%ecx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa (%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 movdqu (%edx),%xmm5 .byte 102,15,56,0,208 movdqa 16(%ebp),%xmm0 pxor %xmm5,%xmm2 psrld $4,%xmm1 addl $16,%edx .byte 102,15,56,0,193 leal 192(%ebp),%ebx pxor %xmm2,%xmm0 jmp L000enc_entry .align 4,0x90 L001enc_loop: movdqa 32(%ebp),%xmm4 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa 64(%ebp),%xmm5 pxor %xmm4,%xmm0 movdqa -64(%ebx,%ecx,1),%xmm1 .byte 102,15,56,0,234 movdqa 80(%ebp),%xmm2 movdqa (%ebx,%ecx,1),%xmm4 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addl $16,%edx pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addl $16,%ecx pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andl $48,%ecx subl $1,%eax pxor %xmm3,%xmm0 L000enc_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm6,%xmm0 .byte 102,15,56,0,232 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm7,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm5 pxor %xmm1,%xmm3 jnz L001enc_loop movdqa 96(%ebp),%xmm4 movdqa 112(%ebp),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%ebx,%ecx,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .private_extern __vpaes_decrypt_core .align 4 __vpaes_decrypt_core: leal 608(%ebp),%ebx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa -64(%ebx),%xmm2 pandn %xmm0,%xmm1 movl %eax,%ecx psrld $4,%xmm1 movdqu (%edx),%xmm5 shll $4,%ecx pand %xmm6,%xmm0 .byte 102,15,56,0,208 movdqa -48(%ebx),%xmm0 xorl $48,%ecx .byte 102,15,56,0,193 andl $48,%ecx pxor %xmm5,%xmm2 movdqa 176(%ebp),%xmm5 pxor %xmm2,%xmm0 addl $16,%edx leal -352(%ebx,%ecx,1),%ecx jmp L002dec_entry .align 4,0x90 L003dec_loop: movdqa -32(%ebx),%xmm4 movdqa -16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa (%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 32(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 48(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 64(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 80(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 addl $16,%edx .byte 102,15,58,15,237,12 pxor %xmm1,%xmm0 subl $1,%eax L002dec_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 psrld $4,%xmm1 .byte 102,15,56,0,208 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm2,%xmm3 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm7,%xmm2 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm0 pxor %xmm1,%xmm3 jnz L003dec_loop movdqa 96(%ebx),%xmm4 .byte 102,15,56,0,226 pxor %xmm0,%xmm4 movdqa 112(%ebx),%xmm0 movdqa (%ecx),%xmm2 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 .byte 102,15,56,0,194 ret .private_extern __vpaes_schedule_core .align 4 __vpaes_schedule_core: addl (%esp),%ebp movdqu (%esi),%xmm0 movdqa 320(%ebp),%xmm2 movdqa %xmm0,%xmm3 leal (%ebp),%ebx movdqa %xmm2,4(%esp) call __vpaes_schedule_transform movdqa %xmm0,%xmm7 testl %edi,%edi jnz L004schedule_am_decrypting movdqu %xmm0,(%edx) jmp L005schedule_go L004schedule_am_decrypting: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%edx) xorl $48,%ecx L005schedule_go: cmpl $192,%eax ja L006schedule_256 je L007schedule_192 L008schedule_128: movl $10,%eax L009loop_schedule_128: call __vpaes_schedule_round decl %eax jz L010schedule_mangle_last call __vpaes_schedule_mangle jmp L009loop_schedule_128 .align 4,0x90 L007schedule_192: movdqu 8(%esi),%xmm0 call __vpaes_schedule_transform movdqa %xmm0,%xmm6 pxor %xmm4,%xmm4 movhlps %xmm4,%xmm6 movl $4,%eax L011loop_schedule_192: call __vpaes_schedule_round .byte 102,15,58,15,198,8 call __vpaes_schedule_mangle call __vpaes_schedule_192_smear call __vpaes_schedule_mangle call __vpaes_schedule_round decl %eax jz L010schedule_mangle_last call __vpaes_schedule_mangle call __vpaes_schedule_192_smear jmp L011loop_schedule_192 .align 4,0x90 L006schedule_256: movdqu 16(%esi),%xmm0 call __vpaes_schedule_transform movl $7,%eax L012loop_schedule_256: call __vpaes_schedule_mangle movdqa %xmm0,%xmm6 call __vpaes_schedule_round decl %eax jz L010schedule_mangle_last call __vpaes_schedule_mangle pshufd $255,%xmm0,%xmm0 movdqa %xmm7,20(%esp) movdqa %xmm6,%xmm7 call L_vpaes_schedule_low_round movdqa 20(%esp),%xmm7 jmp L012loop_schedule_256 .align 4,0x90 L010schedule_mangle_last: leal 384(%ebp),%ebx testl %edi,%edi jnz L013schedule_mangle_last_dec movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,193 leal 352(%ebp),%ebx addl $32,%edx L013schedule_mangle_last_dec: addl $-16,%edx pxor 336(%ebp),%xmm0 call __vpaes_schedule_transform movdqu %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .private_extern __vpaes_schedule_192_smear .align 4 __vpaes_schedule_192_smear: pshufd $128,%xmm6,%xmm1 pshufd $254,%xmm7,%xmm0 pxor %xmm1,%xmm6 pxor %xmm1,%xmm1 pxor %xmm0,%xmm6 movdqa %xmm6,%xmm0 movhlps %xmm1,%xmm6 ret .private_extern __vpaes_schedule_round .align 4 __vpaes_schedule_round: movdqa 8(%esp),%xmm2 pxor %xmm1,%xmm1 .byte 102,15,58,15,202,15 .byte 102,15,58,15,210,15 pxor %xmm1,%xmm7 pshufd $255,%xmm0,%xmm0 .byte 102,15,58,15,192,1 movdqa %xmm2,8(%esp) L_vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor 336(%ebp),%xmm7 movdqa -16(%ebp),%xmm4 movdqa -48(%ebp),%xmm5 movdqa %xmm4,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm4,%xmm0 movdqa -32(%ebp),%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm5,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm5,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm5,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa 32(%ebp),%xmm4 .byte 102,15,56,0,226 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .private_extern __vpaes_schedule_transform .align 4 __vpaes_schedule_transform: movdqa -16(%ebp),%xmm2 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 movdqa (%ebx),%xmm2 .byte 102,15,56,0,208 movdqa 16(%ebx),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .private_extern __vpaes_schedule_mangle .align 4 __vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa 128(%ebp),%xmm5 testl %edi,%edi jnz L014schedule_mangle_dec addl $16,%edx pxor 336(%ebp),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp L015schedule_mangle_both .align 4,0x90 L014schedule_mangle_dec: movdqa -16(%ebp),%xmm2 leal 416(%ebp),%esi movdqa %xmm2,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm4 movdqa (%esi),%xmm2 .byte 102,15,56,0,212 movdqa 16(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addl $-16,%edx L015schedule_mangle_both: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 addl $-16,%ecx andl $48,%ecx movdqu %xmm3,(%edx) ret .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key .align 4 _vpaes_set_encrypt_key: L_vpaes_set_encrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L016pic_for_function_hit L016pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+5-L016pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) movl $48,%ecx movl $0,%edi leal L_vpaes_consts+0x30-L017pic_point,%ebp call __vpaes_schedule_core L017pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .globl _vpaes_set_decrypt_key .private_extern _vpaes_set_decrypt_key .align 4 _vpaes_set_decrypt_key: L_vpaes_set_decrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) shll $4,%ebx leal 16(%edx,%ebx,1),%edx movl $1,%edi movl %eax,%ecx shrl $1,%ecx andl $32,%ecx xorl $32,%ecx leal L_vpaes_consts+0x30-L018pic_point,%ebp call __vpaes_schedule_core L018pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .globl _vpaes_encrypt .private_extern _vpaes_encrypt .align 4 _vpaes_encrypt: L_vpaes_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call L019pic_for_function_hit L019pic_for_function_hit: popl %ebx leal _BORINGSSL_function_hit+4-L019pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif leal L_vpaes_consts+0x30-L020pic_point,%ebp call __vpaes_preheat L020pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call __vpaes_encrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _vpaes_decrypt .private_extern _vpaes_decrypt .align 4 _vpaes_decrypt: L_vpaes_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi leal L_vpaes_consts+0x30-L021pic_point,%ebp call __vpaes_preheat L021pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call __vpaes_decrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _vpaes_cbc_encrypt .private_extern _vpaes_cbc_encrypt .align 4 _vpaes_cbc_encrypt: L_vpaes_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx subl $16,%eax jc L022cbc_abort leal -56(%esp),%ebx movl 36(%esp),%ebp andl $-16,%ebx movl 40(%esp),%ecx xchgl %esp,%ebx movdqu (%ebp),%xmm1 subl %esi,%edi movl %ebx,48(%esp) movl %edi,(%esp) movl %edx,4(%esp) movl %ebp,8(%esp) movl %eax,%edi leal L_vpaes_consts+0x30-L023pic_point,%ebp call __vpaes_preheat L023pic_point: cmpl $0,%ecx je L024cbc_dec_loop jmp L025cbc_enc_loop .align 4,0x90 L025cbc_enc_loop: movdqu (%esi),%xmm0 pxor %xmm1,%xmm0 call __vpaes_encrypt_core movl (%esp),%ebx movl 4(%esp),%edx movdqa %xmm0,%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc L025cbc_enc_loop jmp L026cbc_done .align 4,0x90 L024cbc_dec_loop: movdqu (%esi),%xmm0 movdqa %xmm1,16(%esp) movdqa %xmm0,32(%esp) call __vpaes_decrypt_core movl (%esp),%ebx movl 4(%esp),%edx pxor 16(%esp),%xmm0 movdqa 32(%esp),%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc L024cbc_dec_loop L026cbc_done: movl 8(%esp),%ebx movl 48(%esp),%esp movdqu %xmm1,(%ebx) L022cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-x86-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .align 64 .L_vpaes_consts: .long 218628480,235210255,168496130,67568393 .long 252381056,17041926,33884169,51187212 .long 252645135,252645135,252645135,252645135 .long 1512730624,3266504856,1377990664,3401244816 .long 830229760,1275146365,2969422977,3447763452 .long 3411033600,2979783055,338359620,2782886510 .long 4209124096,907596821,221174255,1006095553 .long 191964160,3799684038,3164090317,1589111125 .long 182528256,1777043520,2877432650,3265356744 .long 1874708224,3503451415,3305285752,363511674 .long 1606117888,3487855781,1093350906,2384367825 .long 197121,67569157,134941193,202313229 .long 67569157,134941193,202313229,197121 .long 134941193,202313229,197121,67569157 .long 202313229,197121,67569157,134941193 .long 33619971,100992007,168364043,235736079 .long 235736079,33619971,100992007,168364043 .long 168364043,235736079,33619971,100992007 .long 100992007,168364043,235736079,33619971 .long 50462976,117835012,185207048,252579084 .long 252314880,51251460,117574920,184942860 .long 184682752,252054788,50987272,118359308 .long 118099200,185467140,251790600,50727180 .long 2946363062,528716217,1300004225,1881839624 .long 1532713819,1532713819,1532713819,1532713819 .long 3602276352,4288629033,3737020424,4153884961 .long 1354558464,32357713,2958822624,3775749553 .long 1201988352,132424512,1572796698,503232858 .long 2213177600,1597421020,4103937655,675398315 .long 2749646592,4273543773,1511898873,121693092 .long 3040248576,1103263732,2871565598,1608280554 .long 2236667136,2588920351,482954393,64377734 .long 3069987328,291237287,2117370568,3650299247 .long 533321216,3573750986,2572112006,1401264716 .long 1339849704,2721158661,548607111,3445553514 .long 2128193280,3054596040,2183486460,1257083700 .long 655635200,1165381986,3923443150,2344132524 .long 190078720,256924420,290342170,357187870 .long 1610966272,2263057382,4103205268,309794674 .long 2592527872,2233205587,1335446729,3402964816 .long 3973531904,3225098121,3002836325,1918774430 .long 3870401024,2102906079,2284471353,4117666579 .long 617007872,1021508343,366931923,691083277 .long 2528395776,3491914898,2968704004,1613121270 .long 3445188352,3247741094,844474987,4093578302 .long 651481088,1190302358,1689581232,574775300 .long 4289380608,206939853,2555985458,2489840491 .long 2130264064,327674451,3566485037,3349835193 .long 2470714624,316102159,3636825756,3393945945 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 .byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 .byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 .byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 .byte 118,101,114,115,105,116,121,41,0 .align 64 .hidden _vpaes_preheat .type _vpaes_preheat,@function .align 16 _vpaes_preheat: addl (%esp),%ebp movdqa -48(%ebp),%xmm7 movdqa -16(%ebp),%xmm6 ret .size _vpaes_preheat,.-_vpaes_preheat .hidden _vpaes_encrypt_core .type _vpaes_encrypt_core,@function .align 16 _vpaes_encrypt_core: movl $16,%ecx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa (%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 movdqu (%edx),%xmm5 .byte 102,15,56,0,208 movdqa 16(%ebp),%xmm0 pxor %xmm5,%xmm2 psrld $4,%xmm1 addl $16,%edx .byte 102,15,56,0,193 leal 192(%ebp),%ebx pxor %xmm2,%xmm0 jmp .L000enc_entry .align 16 .L001enc_loop: movdqa 32(%ebp),%xmm4 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa 64(%ebp),%xmm5 pxor %xmm4,%xmm0 movdqa -64(%ebx,%ecx,1),%xmm1 .byte 102,15,56,0,234 movdqa 80(%ebp),%xmm2 movdqa (%ebx,%ecx,1),%xmm4 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addl $16,%edx pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addl $16,%ecx pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andl $48,%ecx subl $1,%eax pxor %xmm3,%xmm0 .L000enc_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm6,%xmm0 .byte 102,15,56,0,232 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm7,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm5 pxor %xmm1,%xmm3 jnz .L001enc_loop movdqa 96(%ebp),%xmm4 movdqa 112(%ebp),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%ebx,%ecx,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .hidden _vpaes_decrypt_core .type _vpaes_decrypt_core,@function .align 16 _vpaes_decrypt_core: leal 608(%ebp),%ebx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa -64(%ebx),%xmm2 pandn %xmm0,%xmm1 movl %eax,%ecx psrld $4,%xmm1 movdqu (%edx),%xmm5 shll $4,%ecx pand %xmm6,%xmm0 .byte 102,15,56,0,208 movdqa -48(%ebx),%xmm0 xorl $48,%ecx .byte 102,15,56,0,193 andl $48,%ecx pxor %xmm5,%xmm2 movdqa 176(%ebp),%xmm5 pxor %xmm2,%xmm0 addl $16,%edx leal -352(%ebx,%ecx,1),%ecx jmp .L002dec_entry .align 16 .L003dec_loop: movdqa -32(%ebx),%xmm4 movdqa -16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa (%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 32(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 48(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 64(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 80(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 addl $16,%edx .byte 102,15,58,15,237,12 pxor %xmm1,%xmm0 subl $1,%eax .L002dec_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 psrld $4,%xmm1 .byte 102,15,56,0,208 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm2,%xmm3 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm7,%xmm2 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm0 pxor %xmm1,%xmm3 jnz .L003dec_loop movdqa 96(%ebx),%xmm4 .byte 102,15,56,0,226 pxor %xmm0,%xmm4 movdqa 112(%ebx),%xmm0 movdqa (%ecx),%xmm2 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 .byte 102,15,56,0,194 ret .size _vpaes_decrypt_core,.-_vpaes_decrypt_core .hidden _vpaes_schedule_core .type _vpaes_schedule_core,@function .align 16 _vpaes_schedule_core: addl (%esp),%ebp movdqu (%esi),%xmm0 movdqa 320(%ebp),%xmm2 movdqa %xmm0,%xmm3 leal (%ebp),%ebx movdqa %xmm2,4(%esp) call _vpaes_schedule_transform movdqa %xmm0,%xmm7 testl %edi,%edi jnz .L004schedule_am_decrypting movdqu %xmm0,(%edx) jmp .L005schedule_go .L004schedule_am_decrypting: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%edx) xorl $48,%ecx .L005schedule_go: cmpl $192,%eax ja .L006schedule_256 je .L007schedule_192 .L008schedule_128: movl $10,%eax .L009loop_schedule_128: call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle jmp .L009loop_schedule_128 .align 16 .L007schedule_192: movdqu 8(%esi),%xmm0 call _vpaes_schedule_transform movdqa %xmm0,%xmm6 pxor %xmm4,%xmm4 movhlps %xmm4,%xmm6 movl $4,%eax .L011loop_schedule_192: call _vpaes_schedule_round .byte 102,15,58,15,198,8 call _vpaes_schedule_mangle call _vpaes_schedule_192_smear call _vpaes_schedule_mangle call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle call _vpaes_schedule_192_smear jmp .L011loop_schedule_192 .align 16 .L006schedule_256: movdqu 16(%esi),%xmm0 call _vpaes_schedule_transform movl $7,%eax .L012loop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle pshufd $255,%xmm0,%xmm0 movdqa %xmm7,20(%esp) movdqa %xmm6,%xmm7 call .L_vpaes_schedule_low_round movdqa 20(%esp),%xmm7 jmp .L012loop_schedule_256 .align 16 .L010schedule_mangle_last: leal 384(%ebp),%ebx testl %edi,%edi jnz .L013schedule_mangle_last_dec movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,193 leal 352(%ebp),%ebx addl $32,%edx .L013schedule_mangle_last_dec: addl $-16,%edx pxor 336(%ebp),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .size _vpaes_schedule_core,.-_vpaes_schedule_core .hidden _vpaes_schedule_192_smear .type _vpaes_schedule_192_smear,@function .align 16 _vpaes_schedule_192_smear: pshufd $128,%xmm6,%xmm1 pshufd $254,%xmm7,%xmm0 pxor %xmm1,%xmm6 pxor %xmm1,%xmm1 pxor %xmm0,%xmm6 movdqa %xmm6,%xmm0 movhlps %xmm1,%xmm6 ret .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear .hidden _vpaes_schedule_round .type _vpaes_schedule_round,@function .align 16 _vpaes_schedule_round: movdqa 8(%esp),%xmm2 pxor %xmm1,%xmm1 .byte 102,15,58,15,202,15 .byte 102,15,58,15,210,15 pxor %xmm1,%xmm7 pshufd $255,%xmm0,%xmm0 .byte 102,15,58,15,192,1 movdqa %xmm2,8(%esp) .L_vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor 336(%ebp),%xmm7 movdqa -16(%ebp),%xmm4 movdqa -48(%ebp),%xmm5 movdqa %xmm4,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm4,%xmm0 movdqa -32(%ebp),%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm5,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm5,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm5,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa 32(%ebp),%xmm4 .byte 102,15,56,0,226 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round .hidden _vpaes_schedule_transform .type _vpaes_schedule_transform,@function .align 16 _vpaes_schedule_transform: movdqa -16(%ebp),%xmm2 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 movdqa (%ebx),%xmm2 .byte 102,15,56,0,208 movdqa 16(%ebx),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .size _vpaes_schedule_transform,.-_vpaes_schedule_transform .hidden _vpaes_schedule_mangle .type _vpaes_schedule_mangle,@function .align 16 _vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa 128(%ebp),%xmm5 testl %edi,%edi jnz .L014schedule_mangle_dec addl $16,%edx pxor 336(%ebp),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp .L015schedule_mangle_both .align 16 .L014schedule_mangle_dec: movdqa -16(%ebp),%xmm2 leal 416(%ebp),%esi movdqa %xmm2,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm4 movdqa (%esi),%xmm2 .byte 102,15,56,0,212 movdqa 16(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addl $-16,%edx .L015schedule_mangle_both: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 addl $-16,%ecx andl $48,%ecx movdqu %xmm3,(%edx) ret .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,@function .align 16 vpaes_set_encrypt_key: .L_vpaes_set_encrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L016pic_for_function_hit .L016pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+5-.L016pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) movl $48,%ecx movl $0,%edi leal .L_vpaes_consts+0x30-.L017pic_point,%ebp call _vpaes_schedule_core .L017pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin .globl vpaes_set_decrypt_key .hidden vpaes_set_decrypt_key .type vpaes_set_decrypt_key,@function .align 16 vpaes_set_decrypt_key: .L_vpaes_set_decrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) shll $4,%ebx leal 16(%edx,%ebx,1),%edx movl $1,%edi movl %eax,%ecx shrl $1,%ecx andl $32,%ecx xorl $32,%ecx leal .L_vpaes_consts+0x30-.L018pic_point,%ebp call _vpaes_schedule_core .L018pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,@function .align 16 vpaes_encrypt: .L_vpaes_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L019pic_for_function_hit .L019pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+4-.L019pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif leal .L_vpaes_consts+0x30-.L020pic_point,%ebp call _vpaes_preheat .L020pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call _vpaes_encrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_encrypt,.-.L_vpaes_encrypt_begin .globl vpaes_decrypt .hidden vpaes_decrypt .type vpaes_decrypt,@function .align 16 vpaes_decrypt: .L_vpaes_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi leal .L_vpaes_consts+0x30-.L021pic_point,%ebp call _vpaes_preheat .L021pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call _vpaes_decrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_decrypt,.-.L_vpaes_decrypt_begin .globl vpaes_cbc_encrypt .hidden vpaes_cbc_encrypt .type vpaes_cbc_encrypt,@function .align 16 vpaes_cbc_encrypt: .L_vpaes_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx subl $16,%eax jc .L022cbc_abort leal -56(%esp),%ebx movl 36(%esp),%ebp andl $-16,%ebx movl 40(%esp),%ecx xchgl %esp,%ebx movdqu (%ebp),%xmm1 subl %esi,%edi movl %ebx,48(%esp) movl %edi,(%esp) movl %edx,4(%esp) movl %ebp,8(%esp) movl %eax,%edi leal .L_vpaes_consts+0x30-.L023pic_point,%ebp call _vpaes_preheat .L023pic_point: cmpl $0,%ecx je .L024cbc_dec_loop jmp .L025cbc_enc_loop .align 16 .L025cbc_enc_loop: movdqu (%esi),%xmm0 pxor %xmm1,%xmm0 call _vpaes_encrypt_core movl (%esp),%ebx movl 4(%esp),%edx movdqa %xmm0,%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc .L025cbc_enc_loop jmp .L026cbc_done .align 16 .L024cbc_dec_loop: movdqu (%esi),%xmm0 movdqa %xmm1,16(%esp) movdqa %xmm0,32(%esp) call _vpaes_decrypt_core movl (%esp),%ebx movl 4(%esp),%edx pxor 16(%esp),%xmm0 movdqa 32(%esp),%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc .L024cbc_dec_loop .L026cbc_done: movl 8(%esp),%ebx movl 48(%esp),%esp movdqu %xmm1,(%ebx) .L022cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 4 _vpaes_encrypt_core: movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa L$k_ipt(%rip),%xmm2 pandn %xmm0,%xmm1 movdqu (%r9),%xmm5 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa L$k_ipt+16(%rip),%xmm0 .byte 102,15,56,0,193 pxor %xmm5,%xmm2 addq $16,%r9 pxor %xmm2,%xmm0 leaq L$k_mc_backward(%rip),%r10 jmp L$enc_entry .p2align 4 L$enc_loop: movdqa %xmm13,%xmm4 movdqa %xmm12,%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa %xmm15,%xmm5 pxor %xmm4,%xmm0 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 movdqa (%r11,%r10,1),%xmm4 movdqa %xmm14,%xmm2 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addq $16,%r9 pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 L$enc_entry: movdqa %xmm9,%xmm1 movdqa %xmm11,%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,232 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm10,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 jnz L$enc_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .p2align 4 _vpaes_encrypt_core_2x: movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa L$k_ipt(%rip),%xmm2 movdqa %xmm2,%xmm8 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 movdqu (%r9),%xmm5 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,208 .byte 102,68,15,56,0,198 movdqa L$k_ipt+16(%rip),%xmm0 movdqa %xmm0,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,247 pxor %xmm5,%xmm2 pxor %xmm5,%xmm8 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 leaq L$k_mc_backward(%rip),%r10 jmp L$enc2x_entry .p2align 4 L$enc2x_loop: movdqa L$k_sb1(%rip),%xmm4 movdqa L$k_sb1+16(%rip),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 movdqa L$k_sb2(%rip),%xmm5 movdqa %xmm5,%xmm13 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 .byte 102,69,15,56,0,232 movdqa (%r11,%r10,1),%xmm4 movdqa L$k_sb2+16(%rip),%xmm2 movdqa %xmm2,%xmm8 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm0,%xmm3 movdqa %xmm6,%xmm11 pxor %xmm5,%xmm2 pxor %xmm13,%xmm8 .byte 102,15,56,0,193 .byte 102,15,56,0,241 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 .byte 102,15,56,0,220 .byte 102,68,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 pxor %xmm6,%xmm11 .byte 102,15,56,0,193 .byte 102,15,56,0,241 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 pxor %xmm11,%xmm6 L$enc2x_entry: movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa L$k_inv+16(%rip),%xmm5 movdqa %xmm5,%xmm13 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,232 .byte 102,68,15,56,0,238 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm1,%xmm0 pxor %xmm7,%xmm6 .byte 102,15,56,0,217 .byte 102,68,15,56,0,223 movdqa %xmm10,%xmm4 movdqa %xmm10,%xmm12 pxor %xmm5,%xmm3 pxor %xmm13,%xmm11 .byte 102,15,56,0,224 .byte 102,68,15,56,0,230 movdqa %xmm10,%xmm2 movdqa %xmm10,%xmm8 pxor %xmm5,%xmm4 pxor %xmm13,%xmm12 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm0,%xmm2 pxor %xmm6,%xmm8 .byte 102,15,56,0,220 .byte 102,69,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 pxor %xmm7,%xmm11 jnz L$enc2x_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 ret .p2align 4 _vpaes_decrypt_core: movq %rdx,%r9 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa L$k_dipt(%rip),%xmm2 pandn %xmm0,%xmm1 movq %rax,%r11 psrld $4,%xmm1 movdqu (%r9),%xmm5 shlq $4,%r11 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa L$k_dipt+16(%rip),%xmm0 xorq $0x30,%r11 leaq L$k_dsbd(%rip),%r10 .byte 102,15,56,0,193 andq $0x30,%r11 pxor %xmm5,%xmm2 movdqa L$k_mc_forward+48(%rip),%xmm5 pxor %xmm2,%xmm0 addq $16,%r9 addq %r10,%r11 jmp L$dec_entry .p2align 4 L$dec_loop: movdqa -32(%r10),%xmm4 movdqa -16(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 0(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 16(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 32(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 48(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 64(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 80(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 addq $16,%r9 .byte 102,15,58,15,237,12 pxor %xmm1,%xmm0 subq $1,%rax L$dec_entry: movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 movdqa %xmm11,%xmm2 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm2,%xmm3 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm0 pxor %xmm1,%xmm3 jnz L$dec_loop movdqa 96(%r10),%xmm4 .byte 102,15,56,0,226 pxor %xmm0,%xmm4 movdqa 112(%r10),%xmm0 movdqa -352(%r11),%xmm2 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 .byte 102,15,56,0,194 ret .p2align 4 _vpaes_schedule_core: call _vpaes_preheat movdqa L$k_rcon(%rip),%xmm8 movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm3 leaq L$k_ipt(%rip),%r11 call _vpaes_schedule_transform movdqa %xmm0,%xmm7 leaq L$k_sr(%rip),%r10 testq %rcx,%rcx jnz L$schedule_am_decrypting movdqu %xmm0,(%rdx) jmp L$schedule_go L$schedule_am_decrypting: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%rdx) xorq $0x30,%r8 L$schedule_go: cmpl $192,%esi ja L$schedule_256 je L$schedule_192 L$schedule_128: movl $10,%esi L$oop_schedule_128: call _vpaes_schedule_round decq %rsi jz L$schedule_mangle_last call _vpaes_schedule_mangle jmp L$oop_schedule_128 .p2align 4 L$schedule_192: movdqu 8(%rdi),%xmm0 call _vpaes_schedule_transform movdqa %xmm0,%xmm6 pxor %xmm4,%xmm4 movhlps %xmm4,%xmm6 movl $4,%esi L$oop_schedule_192: call _vpaes_schedule_round .byte 102,15,58,15,198,8 call _vpaes_schedule_mangle call _vpaes_schedule_192_smear call _vpaes_schedule_mangle call _vpaes_schedule_round decq %rsi jz L$schedule_mangle_last call _vpaes_schedule_mangle call _vpaes_schedule_192_smear jmp L$oop_schedule_192 .p2align 4 L$schedule_256: movdqu 16(%rdi),%xmm0 call _vpaes_schedule_transform movl $7,%esi L$oop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decq %rsi jz L$schedule_mangle_last call _vpaes_schedule_mangle pshufd $0xFF,%xmm0,%xmm0 movdqa %xmm7,%xmm5 movdqa %xmm6,%xmm7 call _vpaes_schedule_low_round movdqa %xmm5,%xmm7 jmp L$oop_schedule_256 .p2align 4 L$schedule_mangle_last: leaq L$k_deskew(%rip),%r11 testq %rcx,%rcx jnz L$schedule_mangle_last_dec movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,193 leaq L$k_opt(%rip),%r11 addq $32,%rdx L$schedule_mangle_last_dec: addq $-16,%rdx pxor L$k_s63(%rip),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%rdx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .p2align 4 _vpaes_schedule_192_smear: pshufd $0x80,%xmm6,%xmm1 pshufd $0xFE,%xmm7,%xmm0 pxor %xmm1,%xmm6 pxor %xmm1,%xmm1 pxor %xmm0,%xmm6 movdqa %xmm6,%xmm0 movhlps %xmm1,%xmm6 ret .p2align 4 _vpaes_schedule_round: pxor %xmm1,%xmm1 .byte 102,65,15,58,15,200,15 .byte 102,69,15,58,15,192,15 pxor %xmm1,%xmm7 pshufd $0xFF,%xmm0,%xmm0 .byte 102,15,58,15,192,1 _vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor L$k_s63(%rip),%xmm7 movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa %xmm11,%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm10,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm10,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm10,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa %xmm13,%xmm4 .byte 102,15,56,0,226 movdqa %xmm12,%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .p2align 4 _vpaes_schedule_transform: movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa (%r11),%xmm2 .byte 102,15,56,0,208 movdqa 16(%r11),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .p2align 4 _vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa L$k_mc_forward(%rip),%xmm5 testq %rcx,%rcx jnz L$schedule_mangle_dec addq $16,%rdx pxor L$k_s63(%rip),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp L$schedule_mangle_both .p2align 4 L$schedule_mangle_dec: leaq L$k_dksd(%rip),%r11 movdqa %xmm9,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm4 movdqa 0(%r11),%xmm2 .byte 102,15,56,0,212 movdqa 16(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addq $-16,%rdx L$schedule_mangle_both: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 addq $-16,%r8 andq $0x30,%r8 movdqu %xmm3,(%rdx) ret .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key .p2align 4 _vpaes_set_encrypt_key: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+5(%rip) #endif movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) movl $0,%ecx movl $0x30,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .globl _vpaes_set_decrypt_key .private_extern _vpaes_set_decrypt_key .p2align 4 _vpaes_set_decrypt_key: _CET_ENDBR movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) shll $4,%eax leaq 16(%rdx,%rax,1),%rdx movl $1,%ecx movl %esi,%r8d shrl $1,%r8d andl $32,%r8d xorl $32,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .globl _vpaes_encrypt .private_extern _vpaes_encrypt .p2align 4 _vpaes_encrypt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,_BORINGSSL_function_hit+4(%rip) #endif movdqu (%rdi),%xmm0 call _vpaes_preheat call _vpaes_encrypt_core movdqu %xmm0,(%rsi) ret .globl _vpaes_decrypt .private_extern _vpaes_decrypt .p2align 4 _vpaes_decrypt: _CET_ENDBR movdqu (%rdi),%xmm0 call _vpaes_preheat call _vpaes_decrypt_core movdqu %xmm0,(%rsi) ret .globl _vpaes_cbc_encrypt .private_extern _vpaes_cbc_encrypt .p2align 4 _vpaes_cbc_encrypt: _CET_ENDBR xchgq %rcx,%rdx subq $16,%rcx jc L$cbc_abort movdqu (%r8),%xmm6 subq %rdi,%rsi call _vpaes_preheat cmpl $0,%r9d je L$cbc_dec_loop jmp L$cbc_enc_loop .p2align 4 L$cbc_enc_loop: movdqu (%rdi),%xmm0 pxor %xmm6,%xmm0 call _vpaes_encrypt_core movdqa %xmm0,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) leaq 16(%rdi),%rdi subq $16,%rcx jnc L$cbc_enc_loop jmp L$cbc_done .p2align 4 L$cbc_dec_loop: movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm7 call _vpaes_decrypt_core pxor %xmm6,%xmm0 movdqa %xmm7,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) leaq 16(%rdi),%rdi subq $16,%rcx jnc L$cbc_dec_loop L$cbc_done: movdqu %xmm6,(%r8) L$cbc_abort: ret .globl _vpaes_ctr32_encrypt_blocks .private_extern _vpaes_ctr32_encrypt_blocks .p2align 4 _vpaes_ctr32_encrypt_blocks: _CET_ENDBR xchgq %rcx,%rdx testq %rcx,%rcx jz L$ctr32_abort movdqu (%r8),%xmm0 movdqa L$ctr_add_one(%rip),%xmm8 subq %rdi,%rsi call _vpaes_preheat movdqa %xmm0,%xmm6 pshufb L$rev_ctr(%rip),%xmm6 testq $1,%rcx jz L$ctr32_prep_loop movdqu (%rdi),%xmm7 call _vpaes_encrypt_core pxor %xmm7,%xmm0 paddd %xmm8,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) subq $1,%rcx leaq 16(%rdi),%rdi jz L$ctr32_done L$ctr32_prep_loop: movdqa %xmm6,%xmm14 movdqa %xmm6,%xmm15 paddd %xmm8,%xmm15 L$ctr32_loop: movdqa L$rev_ctr(%rip),%xmm1 movdqa %xmm14,%xmm0 movdqa %xmm15,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 call _vpaes_encrypt_core_2x movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa L$ctr_add_two(%rip),%xmm3 pxor %xmm1,%xmm0 pxor %xmm2,%xmm6 paddd %xmm3,%xmm14 paddd %xmm3,%xmm15 movdqu %xmm0,(%rsi,%rdi,1) movdqu %xmm6,16(%rsi,%rdi,1) subq $2,%rcx leaq 32(%rdi),%rdi jnz L$ctr32_loop L$ctr32_done: L$ctr32_abort: ret .p2align 4 _vpaes_preheat: leaq L$k_s0F(%rip),%r10 movdqa -32(%r10),%xmm10 movdqa -16(%r10),%xmm11 movdqa 0(%r10),%xmm9 movdqa 48(%r10),%xmm13 movdqa 64(%r10),%xmm12 movdqa 80(%r10),%xmm15 movdqa 96(%r10),%xmm14 ret .section __DATA,__const .p2align 6 _vpaes_consts: L$k_inv: .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 L$k_s0F: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F L$k_ipt: .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 L$k_sb1: .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF L$k_sb2: .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A L$k_sbo: .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA L$k_mc_forward: .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 L$k_mc_backward: .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F L$k_sr: .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 L$k_rcon: .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 L$k_s63: .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B L$k_opt: .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 L$k_deskew: .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 L$k_dksd: .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E L$k_dksb: .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 L$k_dkse: .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 L$k_dks9: .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE L$k_dipt: .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 L$k_dsb9: .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 L$k_dsbd: .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 L$k_dsbb: .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B L$k_dsbe: .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 L$k_dsbo: .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C L$rev_ctr: .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 L$ctr_add_one: .quad 0x0000000000000000, 0x0000000100000000 L$ctr_add_two: .quad 0x0000000000000000, 0x0000000200000000 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .p2align 6 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/vpaes-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type _vpaes_encrypt_core,@function .align 16 _vpaes_encrypt_core: .cfi_startproc movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa .Lk_ipt(%rip),%xmm2 pandn %xmm0,%xmm1 movdqu (%r9),%xmm5 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa .Lk_ipt+16(%rip),%xmm0 .byte 102,15,56,0,193 pxor %xmm5,%xmm2 addq $16,%r9 pxor %xmm2,%xmm0 leaq .Lk_mc_backward(%rip),%r10 jmp .Lenc_entry .align 16 .Lenc_loop: movdqa %xmm13,%xmm4 movdqa %xmm12,%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa %xmm15,%xmm5 pxor %xmm4,%xmm0 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 movdqa (%r11,%r10,1),%xmm4 movdqa %xmm14,%xmm2 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addq $16,%r9 pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 .Lenc_entry: movdqa %xmm9,%xmm1 movdqa %xmm11,%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,232 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm10,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 jnz .Lenc_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .cfi_endproc .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .type _vpaes_encrypt_core_2x,@function .align 16 _vpaes_encrypt_core_2x: .cfi_startproc movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa .Lk_ipt(%rip),%xmm2 movdqa %xmm2,%xmm8 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 movdqu (%r9),%xmm5 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,208 .byte 102,68,15,56,0,198 movdqa .Lk_ipt+16(%rip),%xmm0 movdqa %xmm0,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,247 pxor %xmm5,%xmm2 pxor %xmm5,%xmm8 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 leaq .Lk_mc_backward(%rip),%r10 jmp .Lenc2x_entry .align 16 .Lenc2x_loop: movdqa .Lk_sb1(%rip),%xmm4 movdqa .Lk_sb1+16(%rip),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 movdqa .Lk_sb2(%rip),%xmm5 movdqa %xmm5,%xmm13 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 .byte 102,69,15,56,0,232 movdqa (%r11,%r10,1),%xmm4 movdqa .Lk_sb2+16(%rip),%xmm2 movdqa %xmm2,%xmm8 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm0,%xmm3 movdqa %xmm6,%xmm11 pxor %xmm5,%xmm2 pxor %xmm13,%xmm8 .byte 102,15,56,0,193 .byte 102,15,56,0,241 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 .byte 102,15,56,0,220 .byte 102,68,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 pxor %xmm6,%xmm11 .byte 102,15,56,0,193 .byte 102,15,56,0,241 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 pxor %xmm11,%xmm6 .Lenc2x_entry: movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa .Lk_inv+16(%rip),%xmm5 movdqa %xmm5,%xmm13 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,232 .byte 102,68,15,56,0,238 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm1,%xmm0 pxor %xmm7,%xmm6 .byte 102,15,56,0,217 .byte 102,68,15,56,0,223 movdqa %xmm10,%xmm4 movdqa %xmm10,%xmm12 pxor %xmm5,%xmm3 pxor %xmm13,%xmm11 .byte 102,15,56,0,224 .byte 102,68,15,56,0,230 movdqa %xmm10,%xmm2 movdqa %xmm10,%xmm8 pxor %xmm5,%xmm4 pxor %xmm13,%xmm12 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm0,%xmm2 pxor %xmm6,%xmm8 .byte 102,15,56,0,220 .byte 102,69,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 pxor %xmm7,%xmm11 jnz .Lenc2x_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 ret .cfi_endproc .size _vpaes_encrypt_core_2x,.-_vpaes_encrypt_core_2x .type _vpaes_decrypt_core,@function .align 16 _vpaes_decrypt_core: .cfi_startproc movq %rdx,%r9 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa .Lk_dipt(%rip),%xmm2 pandn %xmm0,%xmm1 movq %rax,%r11 psrld $4,%xmm1 movdqu (%r9),%xmm5 shlq $4,%r11 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa .Lk_dipt+16(%rip),%xmm0 xorq $0x30,%r11 leaq .Lk_dsbd(%rip),%r10 .byte 102,15,56,0,193 andq $0x30,%r11 pxor %xmm5,%xmm2 movdqa .Lk_mc_forward+48(%rip),%xmm5 pxor %xmm2,%xmm0 addq $16,%r9 addq %r10,%r11 jmp .Ldec_entry .align 16 .Ldec_loop: movdqa -32(%r10),%xmm4 movdqa -16(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 0(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 16(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 32(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 48(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 64(%r10),%xmm4 pxor %xmm1,%xmm0 movdqa 80(%r10),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 addq $16,%r9 .byte 102,15,58,15,237,12 pxor %xmm1,%xmm0 subq $1,%rax .Ldec_entry: movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 movdqa %xmm11,%xmm2 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm2,%xmm3 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm0 pxor %xmm1,%xmm3 jnz .Ldec_loop movdqa 96(%r10),%xmm4 .byte 102,15,56,0,226 pxor %xmm0,%xmm4 movdqa 112(%r10),%xmm0 movdqa -352(%r11),%xmm2 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 .byte 102,15,56,0,194 ret .cfi_endproc .size _vpaes_decrypt_core,.-_vpaes_decrypt_core .type _vpaes_schedule_core,@function .align 16 _vpaes_schedule_core: .cfi_startproc call _vpaes_preheat movdqa .Lk_rcon(%rip),%xmm8 movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm3 leaq .Lk_ipt(%rip),%r11 call _vpaes_schedule_transform movdqa %xmm0,%xmm7 leaq .Lk_sr(%rip),%r10 testq %rcx,%rcx jnz .Lschedule_am_decrypting movdqu %xmm0,(%rdx) jmp .Lschedule_go .Lschedule_am_decrypting: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%rdx) xorq $0x30,%r8 .Lschedule_go: cmpl $192,%esi ja .Lschedule_256 je .Lschedule_192 .Lschedule_128: movl $10,%esi .Loop_schedule_128: call _vpaes_schedule_round decq %rsi jz .Lschedule_mangle_last call _vpaes_schedule_mangle jmp .Loop_schedule_128 .align 16 .Lschedule_192: movdqu 8(%rdi),%xmm0 call _vpaes_schedule_transform movdqa %xmm0,%xmm6 pxor %xmm4,%xmm4 movhlps %xmm4,%xmm6 movl $4,%esi .Loop_schedule_192: call _vpaes_schedule_round .byte 102,15,58,15,198,8 call _vpaes_schedule_mangle call _vpaes_schedule_192_smear call _vpaes_schedule_mangle call _vpaes_schedule_round decq %rsi jz .Lschedule_mangle_last call _vpaes_schedule_mangle call _vpaes_schedule_192_smear jmp .Loop_schedule_192 .align 16 .Lschedule_256: movdqu 16(%rdi),%xmm0 call _vpaes_schedule_transform movl $7,%esi .Loop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decq %rsi jz .Lschedule_mangle_last call _vpaes_schedule_mangle pshufd $0xFF,%xmm0,%xmm0 movdqa %xmm7,%xmm5 movdqa %xmm6,%xmm7 call _vpaes_schedule_low_round movdqa %xmm5,%xmm7 jmp .Loop_schedule_256 .align 16 .Lschedule_mangle_last: leaq .Lk_deskew(%rip),%r11 testq %rcx,%rcx jnz .Lschedule_mangle_last_dec movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,193 leaq .Lk_opt(%rip),%r11 addq $32,%rdx .Lschedule_mangle_last_dec: addq $-16,%rdx pxor .Lk_s63(%rip),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%rdx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .cfi_endproc .size _vpaes_schedule_core,.-_vpaes_schedule_core .type _vpaes_schedule_192_smear,@function .align 16 _vpaes_schedule_192_smear: .cfi_startproc pshufd $0x80,%xmm6,%xmm1 pshufd $0xFE,%xmm7,%xmm0 pxor %xmm1,%xmm6 pxor %xmm1,%xmm1 pxor %xmm0,%xmm6 movdqa %xmm6,%xmm0 movhlps %xmm1,%xmm6 ret .cfi_endproc .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear .type _vpaes_schedule_round,@function .align 16 _vpaes_schedule_round: .cfi_startproc pxor %xmm1,%xmm1 .byte 102,65,15,58,15,200,15 .byte 102,69,15,58,15,192,15 pxor %xmm1,%xmm7 pshufd $0xFF,%xmm0,%xmm0 .byte 102,15,58,15,192,1 _vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor .Lk_s63(%rip),%xmm7 movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa %xmm11,%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm10,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm10,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm10,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa %xmm13,%xmm4 .byte 102,15,56,0,226 movdqa %xmm12,%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .cfi_endproc .size _vpaes_schedule_round,.-_vpaes_schedule_round .type _vpaes_schedule_transform,@function .align 16 _vpaes_schedule_transform: .cfi_startproc movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa (%r11),%xmm2 .byte 102,15,56,0,208 movdqa 16(%r11),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .cfi_endproc .size _vpaes_schedule_transform,.-_vpaes_schedule_transform .type _vpaes_schedule_mangle,@function .align 16 _vpaes_schedule_mangle: .cfi_startproc movdqa %xmm0,%xmm4 movdqa .Lk_mc_forward(%rip),%xmm5 testq %rcx,%rcx jnz .Lschedule_mangle_dec addq $16,%rdx pxor .Lk_s63(%rip),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp .Lschedule_mangle_both .align 16 .Lschedule_mangle_dec: leaq .Lk_dksd(%rip),%r11 movdqa %xmm9,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm4 movdqa 0(%r11),%xmm2 .byte 102,15,56,0,212 movdqa 16(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%r11),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%r11),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addq $-16,%rdx .Lschedule_mangle_both: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 addq $-16,%r8 andq $0x30,%r8 movdqu %xmm3,(%rdx) ret .cfi_endproc .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,@function .align 16 vpaes_set_encrypt_key: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+5(%rip) #endif movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) movl $0,%ecx movl $0x30,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .cfi_endproc .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key .globl vpaes_set_decrypt_key .hidden vpaes_set_decrypt_key .type vpaes_set_decrypt_key,@function .align 16 vpaes_set_decrypt_key: .cfi_startproc _CET_ENDBR movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) shll $4,%eax leaq 16(%rdx,%rax,1),%rdx movl $1,%ecx movl %esi,%r8d shrl $1,%r8d andl $32,%r8d xorl $32,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .cfi_endproc .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,@function .align 16 vpaes_encrypt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+4(%rip) #endif movdqu (%rdi),%xmm0 call _vpaes_preheat call _vpaes_encrypt_core movdqu %xmm0,(%rsi) ret .cfi_endproc .size vpaes_encrypt,.-vpaes_encrypt .globl vpaes_decrypt .hidden vpaes_decrypt .type vpaes_decrypt,@function .align 16 vpaes_decrypt: .cfi_startproc _CET_ENDBR movdqu (%rdi),%xmm0 call _vpaes_preheat call _vpaes_decrypt_core movdqu %xmm0,(%rsi) ret .cfi_endproc .size vpaes_decrypt,.-vpaes_decrypt .globl vpaes_cbc_encrypt .hidden vpaes_cbc_encrypt .type vpaes_cbc_encrypt,@function .align 16 vpaes_cbc_encrypt: .cfi_startproc _CET_ENDBR xchgq %rcx,%rdx subq $16,%rcx jc .Lcbc_abort movdqu (%r8),%xmm6 subq %rdi,%rsi call _vpaes_preheat cmpl $0,%r9d je .Lcbc_dec_loop jmp .Lcbc_enc_loop .align 16 .Lcbc_enc_loop: movdqu (%rdi),%xmm0 pxor %xmm6,%xmm0 call _vpaes_encrypt_core movdqa %xmm0,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) leaq 16(%rdi),%rdi subq $16,%rcx jnc .Lcbc_enc_loop jmp .Lcbc_done .align 16 .Lcbc_dec_loop: movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm7 call _vpaes_decrypt_core pxor %xmm6,%xmm0 movdqa %xmm7,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) leaq 16(%rdi),%rdi subq $16,%rcx jnc .Lcbc_dec_loop .Lcbc_done: movdqu %xmm6,(%r8) .Lcbc_abort: ret .cfi_endproc .size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,@function .align 16 vpaes_ctr32_encrypt_blocks: .cfi_startproc _CET_ENDBR xchgq %rcx,%rdx testq %rcx,%rcx jz .Lctr32_abort movdqu (%r8),%xmm0 movdqa .Lctr_add_one(%rip),%xmm8 subq %rdi,%rsi call _vpaes_preheat movdqa %xmm0,%xmm6 pshufb .Lrev_ctr(%rip),%xmm6 testq $1,%rcx jz .Lctr32_prep_loop movdqu (%rdi),%xmm7 call _vpaes_encrypt_core pxor %xmm7,%xmm0 paddd %xmm8,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) subq $1,%rcx leaq 16(%rdi),%rdi jz .Lctr32_done .Lctr32_prep_loop: movdqa %xmm6,%xmm14 movdqa %xmm6,%xmm15 paddd %xmm8,%xmm15 .Lctr32_loop: movdqa .Lrev_ctr(%rip),%xmm1 movdqa %xmm14,%xmm0 movdqa %xmm15,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 call _vpaes_encrypt_core_2x movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa .Lctr_add_two(%rip),%xmm3 pxor %xmm1,%xmm0 pxor %xmm2,%xmm6 paddd %xmm3,%xmm14 paddd %xmm3,%xmm15 movdqu %xmm0,(%rsi,%rdi,1) movdqu %xmm6,16(%rsi,%rdi,1) subq $2,%rcx leaq 32(%rdi),%rdi jnz .Lctr32_loop .Lctr32_done: .Lctr32_abort: ret .cfi_endproc .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks .type _vpaes_preheat,@function .align 16 _vpaes_preheat: .cfi_startproc leaq .Lk_s0F(%rip),%r10 movdqa -32(%r10),%xmm10 movdqa -16(%r10),%xmm11 movdqa 0(%r10),%xmm9 movdqa 48(%r10),%xmm13 movdqa 64(%r10),%xmm12 movdqa 80(%r10),%xmm15 movdqa 96(%r10),%xmm14 ret .cfi_endproc .size _vpaes_preheat,.-_vpaes_preheat .type _vpaes_consts,@object .section .rodata .align 64 _vpaes_consts: .Lk_inv: .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_s0F: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F .Lk_ipt: .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sb1: .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .Lk_sb2: .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .Lk_sbo: .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_mc_forward: .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward: .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr: .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 .Lk_rcon: .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_s63: .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B .Lk_opt: .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew: .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .Lk_dksd: .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E .Lk_dksb: .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 .Lk_dkse: .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 .Lk_dks9: .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE .Lk_dipt: .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 .Lk_dsb9: .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 .Lk_dsbd: .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 .Lk_dsbb: .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B .Lk_dsbe: .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 .Lk_dsbo: .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C .Lrev_ctr: .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 .Lctr_add_one: .quad 0x0000000000000000, 0x0000000100000000 .Lctr_add_two: .quad 0x0000000000000000, 0x0000000200000000 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 64 .size _vpaes_consts,.-_vpaes_consts .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86-mont-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _bn_mul_mont .private_extern _bn_mul_mont .align 4 _bn_mul_mont: L_bn_mul_mont_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %eax,%eax movl 40(%esp),%edi cmpl $4,%edi jl L000just_leave leal 20(%esp),%esi leal 24(%esp),%edx addl $2,%edi negl %edi leal -32(%esp,%edi,4),%ebp negl %edi movl %ebp,%eax subl %edx,%eax andl $2047,%eax subl %eax,%ebp xorl %ebp,%edx andl $2048,%edx xorl $2048,%edx subl %edx,%ebp andl $-64,%ebp movl %esp,%eax subl %ebp,%eax andl $-4096,%eax movl %esp,%edx leal (%ebp,%eax,1),%esp movl (%esp),%eax cmpl %ebp,%esp ja L001page_walk jmp L002page_walk_done .align 4,0x90 L001page_walk: leal -4096(%esp),%esp movl (%esp),%eax cmpl %ebp,%esp ja L001page_walk L002page_walk_done: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%ebp movl 16(%esi),%esi movl (%esi),%esi movl %eax,4(%esp) movl %ebx,8(%esp) movl %ecx,12(%esp) movl %ebp,16(%esp) movl %esi,20(%esp) leal -3(%edi),%ebx movl %edx,24(%esp) movl $-1,%eax movd %eax,%mm7 movl 8(%esp),%esi movl 12(%esp),%edi movl 16(%esp),%ebp xorl %edx,%edx xorl %ecx,%ecx movd (%edi),%mm4 movd (%esi),%mm5 movd (%ebp),%mm3 pmuludq %mm4,%mm5 movq %mm5,%mm2 movq %mm5,%mm0 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 incl %ecx .align 4,0x90 L0031st: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 leal 1(%ecx),%ecx cmpl %ebx,%ecx jl L0031st pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm2,%mm3 movq %mm3,32(%esp,%ebx,4) incl %edx L004outer: xorl %ecx,%ecx movd (%edi,%edx,4),%mm4 movd (%esi),%mm5 movd 32(%esp),%mm6 movd (%ebp),%mm3 pmuludq %mm4,%mm5 paddq %mm6,%mm5 movq %mm5,%mm0 movq %mm5,%mm2 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 36(%esp),%mm6 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm6,%mm2 incl %ecx decl %ebx L005inner: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 movd 36(%esp,%ecx,4),%mm6 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 paddq %mm6,%mm2 decl %ebx leal 1(%ecx),%ecx jnz L005inner movl %ecx,%ebx pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 movd 36(%esp,%ebx,4),%mm6 paddq %mm2,%mm3 paddq %mm6,%mm3 movq %mm3,32(%esp,%ebx,4) leal 1(%edx),%edx cmpl %ebx,%edx jle L004outer emms jmp L006common_tail .align 4,0x90 L006common_tail: movl 16(%esp),%ebp movl 4(%esp),%edi leal 32(%esp),%esi movl (%esi),%eax movl %ebx,%ecx xorl %edx,%edx .align 4,0x90 L007sub: sbbl (%ebp,%edx,4),%eax movl %eax,(%edi,%edx,4) decl %ecx movl 4(%esi,%edx,4),%eax leal 1(%edx),%edx jge L007sub sbbl $0,%eax movl $-1,%edx xorl %eax,%edx jmp L008copy .align 4,0x90 L008copy: movl 32(%esp,%ebx,4),%esi movl (%edi,%ebx,4),%ebp movl %ecx,32(%esp,%ebx,4) andl %eax,%esi andl %edx,%ebp orl %esi,%ebp movl %ebp,(%edi,%ebx,4) decl %ebx jge L008copy movl 24(%esp),%esp movl $1,%eax L000just_leave: popl %edi popl %esi popl %ebx popl %ebp ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 .byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 .byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 .byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 .byte 111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86-mont-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_mont .hidden bn_mul_mont .type bn_mul_mont,@function .align 16 bn_mul_mont: .L_bn_mul_mont_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %eax,%eax movl 40(%esp),%edi cmpl $4,%edi jl .L000just_leave leal 20(%esp),%esi leal 24(%esp),%edx addl $2,%edi negl %edi leal -32(%esp,%edi,4),%ebp negl %edi movl %ebp,%eax subl %edx,%eax andl $2047,%eax subl %eax,%ebp xorl %ebp,%edx andl $2048,%edx xorl $2048,%edx subl %edx,%ebp andl $-64,%ebp movl %esp,%eax subl %ebp,%eax andl $-4096,%eax movl %esp,%edx leal (%ebp,%eax,1),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L001page_walk jmp .L002page_walk_done .align 16 .L001page_walk: leal -4096(%esp),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L001page_walk .L002page_walk_done: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%ebp movl 16(%esi),%esi movl (%esi),%esi movl %eax,4(%esp) movl %ebx,8(%esp) movl %ecx,12(%esp) movl %ebp,16(%esp) movl %esi,20(%esp) leal -3(%edi),%ebx movl %edx,24(%esp) movl $-1,%eax movd %eax,%mm7 movl 8(%esp),%esi movl 12(%esp),%edi movl 16(%esp),%ebp xorl %edx,%edx xorl %ecx,%ecx movd (%edi),%mm4 movd (%esi),%mm5 movd (%ebp),%mm3 pmuludq %mm4,%mm5 movq %mm5,%mm2 movq %mm5,%mm0 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 incl %ecx .align 16 .L0031st: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 leal 1(%ecx),%ecx cmpl %ebx,%ecx jl .L0031st pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm2,%mm3 movq %mm3,32(%esp,%ebx,4) incl %edx .L004outer: xorl %ecx,%ecx movd (%edi,%edx,4),%mm4 movd (%esi),%mm5 movd 32(%esp),%mm6 movd (%ebp),%mm3 pmuludq %mm4,%mm5 paddq %mm6,%mm5 movq %mm5,%mm0 movq %mm5,%mm2 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 36(%esp),%mm6 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm6,%mm2 incl %ecx decl %ebx .L005inner: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 movd 36(%esp,%ecx,4),%mm6 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 paddq %mm6,%mm2 decl %ebx leal 1(%ecx),%ecx jnz .L005inner movl %ecx,%ebx pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 movd 36(%esp,%ebx,4),%mm6 paddq %mm2,%mm3 paddq %mm6,%mm3 movq %mm3,32(%esp,%ebx,4) leal 1(%edx),%edx cmpl %ebx,%edx jle .L004outer emms jmp .L006common_tail .align 16 .L006common_tail: movl 16(%esp),%ebp movl 4(%esp),%edi leal 32(%esp),%esi movl (%esi),%eax movl %ebx,%ecx xorl %edx,%edx .align 16 .L007sub: sbbl (%ebp,%edx,4),%eax movl %eax,(%edi,%edx,4) decl %ecx movl 4(%esi,%edx,4),%eax leal 1(%edx),%edx jge .L007sub sbbl $0,%eax movl $-1,%edx xorl %eax,%edx jmp .L008copy .align 16 .L008copy: movl 32(%esp,%ebx,4),%esi movl (%edi,%ebx,4),%ebp movl %ecx,32(%esp,%ebx,4) andl %eax,%esi andl %edx,%ebp orl %esi,%ebp movl %ebp,(%edi,%ebx,4) decl %ebx jge .L008copy movl 24(%esp),%esp movl $1,%eax .L000just_leave: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_mont,.-.L_bn_mul_mont_begin .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 .byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 .byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 .byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 .byte 111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86_64-mont-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _bn_mul_mont_nohw .private_extern _bn_mul_mont_nohw .p2align 4 _bn_mul_mont_nohw: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 negq %r9 movq %rsp,%r11 leaq -16(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk jmp L$mul_page_walk_done .p2align 4 L$mul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk L$mul_page_walk_done: movq %rax,8(%rsp,%r9,8) L$mul_body: movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$1st_enter .p2align 4 L$1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne L$1st addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp L$outer .p2align 4 L$outer: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$inner_enter .p2align 4 L$inner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$inner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne L$inner addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb L$outer xorq %r14,%r14 movq (%rsp),%rax movq %r9,%r15 .p2align 4 L$sub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsp,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz L$sub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 L$copy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r9,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz L$copy movq 8(%rsp,%r9,8),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul_epilogue: ret .globl _bn_mul4x_mont .private_extern _bn_mul4x_mont .p2align 4 _bn_mul4x_mont: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 negq %r9 movq %rsp,%r11 leaq -32(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul4x_page_walk jmp L$mul4x_page_walk_done L$mul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul4x_page_walk L$mul4x_page_walk_done: movq %rax,8(%rsp,%r9,8) L$mul4x_body: movq %rdi,16(%rsp,%r9,8) movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp L$1st4x .p2align 4 L$1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb L$1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) leaq 1(%r14),%r14 .p2align 2 L$outer4x: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq (%rsp),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%rsp),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp L$inner4x .p2align 4 L$inner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx addq 8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb L$inner4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 1(%r14),%r14 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%rsp,%r9,8),%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) cmpq %r9,%r14 jb L$outer4x movq 16(%rsp,%r9,8),%rdi leaq -4(%r9),%r15 movq 0(%rsp),%rax movq 8(%rsp),%rdx shrq $2,%r15 leaq (%rsp),%rsi xorq %r14,%r14 subq 0(%rcx),%rax movq 16(%rsi),%rbx movq 24(%rsi),%rbp sbbq 8(%rcx),%rdx L$sub4x: movq %rax,0(%rdi,%r14,8) movq %rdx,8(%rdi,%r14,8) sbbq 16(%rcx,%r14,8),%rbx movq 32(%rsi,%r14,8),%rax movq 40(%rsi,%r14,8),%rdx sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) movq %rbp,24(%rdi,%r14,8) sbbq 32(%rcx,%r14,8),%rax movq 48(%rsi,%r14,8),%rbx movq 56(%rsi,%r14,8),%rbp sbbq 40(%rcx,%r14,8),%rdx leaq 4(%r14),%r14 decq %r15 jnz L$sub4x movq %rax,0(%rdi,%r14,8) movq 32(%rsi,%r14,8),%rax sbbq 16(%rcx,%r14,8),%rbx movq %rdx,8(%rdi,%r14,8) sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) sbbq $0,%rax movq %rbp,24(%rdi,%r14,8) pxor %xmm0,%xmm0 .byte 102,72,15,110,224 pcmpeqd %xmm5,%xmm5 pshufd $0,%xmm4,%xmm4 movq %r9,%r15 pxor %xmm4,%xmm5 shrq $2,%r15 xorl %eax,%eax jmp L$copy4x .p2align 4 L$copy4x: movdqa (%rsp,%rax,1),%xmm1 movdqu (%rdi,%rax,1),%xmm2 pand %xmm4,%xmm1 pand %xmm5,%xmm2 movdqa 16(%rsp,%rax,1),%xmm3 movdqa %xmm0,(%rsp,%rax,1) por %xmm2,%xmm1 movdqu 16(%rdi,%rax,1),%xmm2 movdqu %xmm1,(%rdi,%rax,1) pand %xmm4,%xmm3 pand %xmm5,%xmm2 movdqa %xmm0,16(%rsp,%rax,1) por %xmm2,%xmm3 movdqu %xmm3,16(%rdi,%rax,1) leaq 32(%rax),%rax decq %r15 jnz L$copy4x movq 8(%rsp,%r9,8),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul4x_epilogue: ret .globl _bn_sqr8x_mont .private_extern _bn_sqr8x_mont .p2align 5 _bn_sqr8x_mont: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqr8x_prologue: movl %r9d,%r10d shll $3,%r9d shlq $3+2,%r10 negq %r9 leaq -64(%rsp,%r9,2),%r11 movq %rsp,%rbp movq (%r8),%r8 subq %rsi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$sqr8x_sp_alt subq %r11,%rbp leaq -64(%rbp,%r9,2),%rbp jmp L$sqr8x_sp_done .p2align 5 L$sqr8x_sp_alt: leaq 4096-64(,%r9,2),%r10 leaq -64(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$sqr8x_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$sqr8x_page_walk jmp L$sqr8x_page_walk_done .p2align 4 L$sqr8x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$sqr8x_page_walk L$sqr8x_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) L$sqr8x_body: .byte 102,72,15,110,209 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,73,15,110,218 testq %rdx,%rdx jz L$sqr8x_nox call _bn_sqrx8x_internal leaq (%r8,%rcx,1),%rbx movq %rcx,%r9 movq %rcx,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp L$sqr8x_sub .p2align 5 L$sqr8x_nox: call _bn_sqr8x_internal leaq (%rdi,%r9,1),%rbx movq %r9,%rcx movq %r9,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp L$sqr8x_sub .p2align 5 L$sqr8x_sub: movq 0(%rbx),%r12 movq 8(%rbx),%r13 movq 16(%rbx),%r14 movq 24(%rbx),%r15 leaq 32(%rbx),%rbx sbbq 0(%rbp),%r12 sbbq 8(%rbp),%r13 sbbq 16(%rbp),%r14 sbbq 24(%rbp),%r15 leaq 32(%rbp),%rbp movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz L$sqr8x_sub sbbq $0,%rax leaq (%rbx,%r9,1),%rbx leaq (%rdi,%r9,1),%rdi .byte 102,72,15,110,200 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi jmp L$sqr8x_cond_copy .p2align 5 L$sqr8x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) movdqa %xmm0,-32(%rbx,%rdx,1) movdqa %xmm0,-16(%rbx,%rdx,1) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) addq $32,%r9 jnz L$sqr8x_cond_copy movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$sqr8x_epilogue: ret .globl _bn_mulx4x_mont .private_extern _bn_mulx4x_mont .p2align 5 _bn_mulx4x_mont: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx4x_prologue: shll $3,%r9d xorq %r10,%r10 subq %r9,%r10 movq (%r8),%r8 leaq -72(%rsp,%r10,1),%rbp andq $-128,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk jmp L$mulx4x_page_walk_done .p2align 4 L$mulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk L$mulx4x_page_walk_done: leaq (%rdx,%r9,1),%r10 movq %r9,0(%rsp) shrq $5,%r9 movq %r10,16(%rsp) subq $1,%r9 movq %r8,24(%rsp) movq %rdi,32(%rsp) movq %rax,40(%rsp) movq %r9,48(%rsp) jmp L$mulx4x_body .p2align 5 L$mulx4x_body: leaq 8(%rdx),%rdi movq (%rdx),%rdx leaq 64+32(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r14 addq %rax,%r11 movq %rdi,8(%rsp) mulxq 16(%rsi),%r12,%r13 adcq %r14,%r12 adcq $0,%r13 movq %r8,%rdi imulq 24(%rsp),%r8 xorq %rbp,%rbp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%rdi adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 movq 48(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp L$mulx4x_1st .p2align 5 L$mulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_1st movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 addq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) jmp L$mulx4x_outer .p2align 5 L$mulx4x_outer: movq (%rdi),%rdx leaq 8(%rdi),%rdi subq %rax,%rsi movq %r15,(%rbx) leaq 64+32(%rsp),%rbx subq %rax,%rcx mulxq 0(%rsi),%r8,%r11 xorl %ebp,%ebp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 adoxq -16(%rbx),%r12 adcxq %rbp,%r13 adoxq %rbp,%r13 movq %rdi,8(%rsp) movq %r8,%r15 imulq 24(%rsp),%r8 xorl %ebp,%ebp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx adcxq %rax,%r13 adoxq -8(%rbx),%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi adoxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) leaq 32(%rcx),%rcx adcxq %rax,%r12 adoxq %rbp,%r15 movq 48(%rsp),%rdi movq %r12,-16(%rbx) jmp L$mulx4x_inner .p2align 5 L$mulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-32(%rbx) movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_inner movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 subq 0(%rbx),%rbp adcq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) cmpq 16(%rsp),%rdi jne L$mulx4x_outer leaq 64(%rsp),%rbx subq %rax,%rcx negq %r15 movq %rax,%rdx shrq $3+2,%rax movq 32(%rsp),%rdi jmp L$mulx4x_sub .p2align 5 L$mulx4x_sub: movq 0(%rbx),%r11 movq 8(%rbx),%r12 movq 16(%rbx),%r13 movq 24(%rbx),%r14 leaq 32(%rbx),%rbx sbbq 0(%rcx),%r11 sbbq 8(%rcx),%r12 sbbq 16(%rcx),%r13 sbbq 24(%rcx),%r14 leaq 32(%rcx),%rcx movq %r11,0(%rdi) movq %r12,8(%rdi) movq %r13,16(%rdi) movq %r14,24(%rdi) leaq 32(%rdi),%rdi decq %rax jnz L$mulx4x_sub sbbq $0,%r15 leaq 64(%rsp),%rbx subq %rdx,%rdi .byte 102,73,15,110,207 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi jmp L$mulx4x_cond_copy .p2align 5 L$mulx4x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) subq $32,%rdx jnz L$mulx4x_cond_copy movq %rdx,(%rbx) movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mulx4x_epilogue: ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 4 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86_64-mont-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl bn_mul_mont_nohw .hidden bn_mul_mont_nohw .type bn_mul_mont_nohw,@function .align 16 bn_mul_mont_nohw: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 negq %r9 movq %rsp,%r11 leaq -16(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk jmp .Lmul_page_walk_done .align 16 .Lmul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk .Lmul_page_walk_done: movq %rax,8(%rsp,%r9,8) .cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 .Lmul_body: movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp .L1st_enter .align 16 .L1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .L1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne .L1st addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp .Louter .align 16 .Louter: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp .Linner_enter .align 16 .Linner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .Linner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne .Linner addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb .Louter xorq %r14,%r14 movq (%rsp),%rax movq %r9,%r15 .align 16 .Lsub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsp,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz .Lsub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 .Lcopy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r9,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz .Lcopy movq 8(%rsp,%r9,8),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul_epilogue: ret .cfi_endproc .size bn_mul_mont_nohw,.-bn_mul_mont_nohw .globl bn_mul4x_mont .hidden bn_mul4x_mont .type bn_mul4x_mont,@function .align 16 bn_mul4x_mont: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 negq %r9 movq %rsp,%r11 leaq -32(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul4x_page_walk jmp .Lmul4x_page_walk_done .Lmul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul4x_page_walk .Lmul4x_page_walk_done: movq %rax,8(%rsp,%r9,8) .cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 .Lmul4x_body: movq %rdi,16(%rsp,%r9,8) movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp .L1st4x .align 16 .L1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb .L1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) leaq 1(%r14),%r14 .align 4 .Louter4x: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq (%rsp),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%rsp),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp .Linner4x .align 16 .Linner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx addq 8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb .Linner4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 1(%r14),%r14 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%rsp,%r9,8),%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) cmpq %r9,%r14 jb .Louter4x movq 16(%rsp,%r9,8),%rdi leaq -4(%r9),%r15 movq 0(%rsp),%rax movq 8(%rsp),%rdx shrq $2,%r15 leaq (%rsp),%rsi xorq %r14,%r14 subq 0(%rcx),%rax movq 16(%rsi),%rbx movq 24(%rsi),%rbp sbbq 8(%rcx),%rdx .Lsub4x: movq %rax,0(%rdi,%r14,8) movq %rdx,8(%rdi,%r14,8) sbbq 16(%rcx,%r14,8),%rbx movq 32(%rsi,%r14,8),%rax movq 40(%rsi,%r14,8),%rdx sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) movq %rbp,24(%rdi,%r14,8) sbbq 32(%rcx,%r14,8),%rax movq 48(%rsi,%r14,8),%rbx movq 56(%rsi,%r14,8),%rbp sbbq 40(%rcx,%r14,8),%rdx leaq 4(%r14),%r14 decq %r15 jnz .Lsub4x movq %rax,0(%rdi,%r14,8) movq 32(%rsi,%r14,8),%rax sbbq 16(%rcx,%r14,8),%rbx movq %rdx,8(%rdi,%r14,8) sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) sbbq $0,%rax movq %rbp,24(%rdi,%r14,8) pxor %xmm0,%xmm0 .byte 102,72,15,110,224 pcmpeqd %xmm5,%xmm5 pshufd $0,%xmm4,%xmm4 movq %r9,%r15 pxor %xmm4,%xmm5 shrq $2,%r15 xorl %eax,%eax jmp .Lcopy4x .align 16 .Lcopy4x: movdqa (%rsp,%rax,1),%xmm1 movdqu (%rdi,%rax,1),%xmm2 pand %xmm4,%xmm1 pand %xmm5,%xmm2 movdqa 16(%rsp,%rax,1),%xmm3 movdqa %xmm0,(%rsp,%rax,1) por %xmm2,%xmm1 movdqu 16(%rdi,%rax,1),%xmm2 movdqu %xmm1,(%rdi,%rax,1) pand %xmm4,%xmm3 pand %xmm5,%xmm2 movdqa %xmm0,16(%rsp,%rax,1) por %xmm2,%xmm3 movdqu %xmm3,16(%rdi,%rax,1) leaq 32(%rax),%rax decq %r15 jnz .Lcopy4x movq 8(%rsp,%r9,8),%rsi .cfi_def_cfa %rsi, 8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul4x_epilogue: ret .cfi_endproc .size bn_mul4x_mont,.-bn_mul4x_mont .extern bn_sqrx8x_internal .hidden bn_sqrx8x_internal .extern bn_sqr8x_internal .hidden bn_sqr8x_internal .globl bn_sqr8x_mont .hidden bn_sqr8x_mont .type bn_sqr8x_mont,@function .align 32 bn_sqr8x_mont: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lsqr8x_prologue: movl %r9d,%r10d shll $3,%r9d shlq $3+2,%r10 negq %r9 leaq -64(%rsp,%r9,2),%r11 movq %rsp,%rbp movq (%r8),%r8 subq %rsi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lsqr8x_sp_alt subq %r11,%rbp leaq -64(%rbp,%r9,2),%rbp jmp .Lsqr8x_sp_done .align 32 .Lsqr8x_sp_alt: leaq 4096-64(,%r9,2),%r10 leaq -64(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lsqr8x_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lsqr8x_page_walk jmp .Lsqr8x_page_walk_done .align 16 .Lsqr8x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lsqr8x_page_walk .Lsqr8x_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lsqr8x_body: .byte 102,72,15,110,209 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,73,15,110,218 testq %rdx,%rdx jz .Lsqr8x_nox call bn_sqrx8x_internal leaq (%r8,%rcx,1),%rbx movq %rcx,%r9 movq %rcx,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp .Lsqr8x_sub .align 32 .Lsqr8x_nox: call bn_sqr8x_internal leaq (%rdi,%r9,1),%rbx movq %r9,%rcx movq %r9,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp .Lsqr8x_sub .align 32 .Lsqr8x_sub: movq 0(%rbx),%r12 movq 8(%rbx),%r13 movq 16(%rbx),%r14 movq 24(%rbx),%r15 leaq 32(%rbx),%rbx sbbq 0(%rbp),%r12 sbbq 8(%rbp),%r13 sbbq 16(%rbp),%r14 sbbq 24(%rbp),%r15 leaq 32(%rbp),%rbp movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz .Lsqr8x_sub sbbq $0,%rax leaq (%rbx,%r9,1),%rbx leaq (%rdi,%r9,1),%rdi .byte 102,72,15,110,200 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 jmp .Lsqr8x_cond_copy .align 32 .Lsqr8x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) movdqa %xmm0,-32(%rbx,%rdx,1) movdqa %xmm0,-16(%rbx,%rdx,1) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) addq $32,%r9 jnz .Lsqr8x_cond_copy movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lsqr8x_epilogue: ret .cfi_endproc .size bn_sqr8x_mont,.-bn_sqr8x_mont .globl bn_mulx4x_mont .hidden bn_mulx4x_mont .type bn_mulx4x_mont,@function .align 32 bn_mulx4x_mont: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmulx4x_prologue: shll $3,%r9d xorq %r10,%r10 subq %r9,%r10 movq (%r8),%r8 leaq -72(%rsp,%r10,1),%rbp andq $-128,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk jmp .Lmulx4x_page_walk_done .align 16 .Lmulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk .Lmulx4x_page_walk_done: leaq (%rdx,%r9,1),%r10 movq %r9,0(%rsp) shrq $5,%r9 movq %r10,16(%rsp) subq $1,%r9 movq %r8,24(%rsp) movq %rdi,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 movq %r9,48(%rsp) jmp .Lmulx4x_body .align 32 .Lmulx4x_body: leaq 8(%rdx),%rdi movq (%rdx),%rdx leaq 64+32(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r14 addq %rax,%r11 movq %rdi,8(%rsp) mulxq 16(%rsi),%r12,%r13 adcq %r14,%r12 adcq $0,%r13 movq %r8,%rdi imulq 24(%rsp),%r8 xorq %rbp,%rbp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%rdi adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 movq 48(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp .Lmulx4x_1st .align 32 .Lmulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_1st movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 addq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) jmp .Lmulx4x_outer .align 32 .Lmulx4x_outer: movq (%rdi),%rdx leaq 8(%rdi),%rdi subq %rax,%rsi movq %r15,(%rbx) leaq 64+32(%rsp),%rbx subq %rax,%rcx mulxq 0(%rsi),%r8,%r11 xorl %ebp,%ebp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 adoxq -16(%rbx),%r12 adcxq %rbp,%r13 adoxq %rbp,%r13 movq %rdi,8(%rsp) movq %r8,%r15 imulq 24(%rsp),%r8 xorl %ebp,%ebp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx adcxq %rax,%r13 adoxq -8(%rbx),%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi adoxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) leaq 32(%rcx),%rcx adcxq %rax,%r12 adoxq %rbp,%r15 movq 48(%rsp),%rdi movq %r12,-16(%rbx) jmp .Lmulx4x_inner .align 32 .Lmulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-32(%rbx) movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_inner movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 subq 0(%rbx),%rbp adcq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) cmpq 16(%rsp),%rdi jne .Lmulx4x_outer leaq 64(%rsp),%rbx subq %rax,%rcx negq %r15 movq %rax,%rdx shrq $3+2,%rax movq 32(%rsp),%rdi jmp .Lmulx4x_sub .align 32 .Lmulx4x_sub: movq 0(%rbx),%r11 movq 8(%rbx),%r12 movq 16(%rbx),%r13 movq 24(%rbx),%r14 leaq 32(%rbx),%rbx sbbq 0(%rcx),%r11 sbbq 8(%rcx),%r12 sbbq 16(%rcx),%r13 sbbq 24(%rcx),%r14 leaq 32(%rcx),%rcx movq %r11,0(%rdi) movq %r12,8(%rdi) movq %r13,16(%rdi) movq %r14,24(%rdi) leaq 32(%rdi),%rdi decq %rax jnz .Lmulx4x_sub sbbq $0,%r15 leaq 64(%rsp),%rbx subq %rdx,%rdi .byte 102,73,15,110,207 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 jmp .Lmulx4x_cond_copy .align 32 .Lmulx4x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) subq $32,%rdx jnz .Lmulx4x_cond_copy movq %rdx,(%rbx) movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmulx4x_epilogue: ret .cfi_endproc .size bn_mulx4x_mont,.-bn_mulx4x_mont .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 16 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86_64-mont5-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _bn_mul_mont_gather5_nohw .private_extern _bn_mul_mont_gather5_nohw .p2align 6 _bn_mul_mont_gather5_nohw: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax movd 8(%rsp),%xmm5 pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 negq %r9 movq %rsp,%r11 leaq -280(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk jmp L$mul_page_walk_done L$mul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk L$mul_page_walk_done: leaq L$inc(%rip),%r10 movq %rax,8(%rsp,%r9,8) L$mul_body: leaq 128(%rdx),%r12 movdqa 0(%r10),%xmm0 movdqa 16(%r10),%xmm1 leaq 24-112(%rsp,%r9,8),%r10 andq $-16,%r10 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r8),%r8 movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$1st_enter .p2align 4 L$1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne L$1st addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r9,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp L$outer .p2align 4 L$outer: leaq 24+128(%rsp,%r9,8),%rdx andq $-16,%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 movq (%rsi),%rax .byte 102,72,15,126,195 xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$inner_enter .p2align 4 L$inner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$inner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne L$inner addq %rax,%r13 adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r9,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r9,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb L$outer xorq %r14,%r14 movq (%rsp),%rax leaq (%rsp),%rsi movq %r9,%r15 jmp L$sub .p2align 4 L$sub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsi,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz L$sub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 L$copy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r14,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz L$copy movq 8(%rsp,%r9,8),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul_epilogue: ret .globl _bn_mul4x_mont_gather5 .private_extern _bn_mul4x_mont_gather5 .p2align 5 _bn_mul4x_mont_gather5: _CET_ENDBR .byte 0x67 movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mul4x_prologue: .byte 0x67 shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$mul4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$mul4xsp_done .p2align 5 L$mul4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$mul4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mul4x_page_walk jmp L$mul4x_page_walk_done L$mul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mul4x_page_walk L$mul4x_page_walk_done: negq %r9 movq %rax,40(%rsp) L$mul4x_body: call mul4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul4x_epilogue: ret .p2align 5 mul4x_internal: shlq $5,%r9 movd 8(%rax),%xmm5 leaq L$inc(%rip),%rax leaq 128(%rdx,%r9,1),%r13 shrq $5,%r9 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r9,1),%r10 leaq 128(%rdx),%r12 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67,0x67 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq %r13,16+8(%rsp) movq %rdi,56+8(%rsp) movq (%r8),%r8 movq (%rsi),%rax leaq (%rsi,%r9,1),%rsi negq %r9 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp leaq 64+8(%rsp),%r14 movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 jmp L$1st4x .p2align 5 L$1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 addq $32,%r15 jnz L$1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%r14) jmp L$outer4x .p2align 5 L$outer4x: leaq 16+128(%r14),%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r14,%r9,1),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 movq %rdi,(%r14) leaq (%r14,%r9,1),%r14 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdx,%r13 jmp L$inner4x .p2align 5 L$inner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx addq (%r14),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%r13 addq $32,%r15 jnz L$inner4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq %rbp,%rax movq -8(%rcx),%rbp adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 movq %rdi,-16(%r14) leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%r14),%r13 adcq $0,%rdi movq %r13,-8(%r14) cmpq 16+8(%rsp),%r12 jb L$outer4x xorq %rax,%rax subq %r13,%rbp adcq %r15,%r15 orq %r15,%rdi subq %rdi,%rax leaq (%r14,%r9,1),%rbx movq (%rcx),%r12 leaq (%rcx),%rbp movq %r9,%rcx sarq $3+2,%rcx movq 56+8(%rsp),%rdi decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqr4x_sub_entry .globl _bn_power5_nohw .private_extern _bn_power5_nohw .p2align 5 _bn_power5_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$power5_prologue: shll $3,%r9d leal (%r9,%r9,2),%r10d negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$pwr_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$pwr_sp_done .p2align 5 L$pwr_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$pwr_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwr_page_walk jmp L$pwr_page_walk_done L$pwr_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwr_page_walk L$pwr_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) L$power5_body: .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq %rsi,%rdi movq 40(%rsp),%rax leaq 32(%rsp),%r8 call mul4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$power5_epilogue: ret .globl _bn_sqr8x_internal .private_extern _bn_sqr8x_internal .private_extern _bn_sqr8x_internal .p2align 5 _bn_sqr8x_internal: __bn_sqr8x_internal: _CET_ENDBR leaq 32(%r10),%rbp leaq (%rsi,%r9,1),%rsi movq %r9,%rcx movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 movq %r10,-24(%rdi,%rbp,1) mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx movq %r11,-16(%rdi,%rbp,1) movq %rdx,%r10 movq -8(%rsi,%rbp,1),%rbx mulq %r15 movq %rax,%r12 movq %rbx,%rax movq %rdx,%r13 leaq (%rbp),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) jmp L$sqr4x_1st .p2align 5 L$sqr4x_1st: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq 16(%rsi,%rcx,1),%rbx movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %r10,8(%rdi,%rcx,1) movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 24(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,16(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 leaq 32(%rcx),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne L$sqr4x_1st mulq %r15 addq %rax,%r13 leaq 16(%rbp),%rbp adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) jmp L$sqr4x_outer .p2align 5 L$sqr4x_outer: movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq -24(%rdi,%rbp,1),%r10 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx movq %r10,-24(%rdi,%rbp,1) movq %rdx,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx addq -16(%rdi,%rbp,1),%r11 movq %rdx,%r10 adcq $0,%r10 movq %r11,-16(%rdi,%rbp,1) xorq %r12,%r12 movq -8(%rsi,%rbp,1),%rbx mulq %r15 addq %rax,%r12 movq %rbx,%rax adcq $0,%rdx addq -8(%rdi,%rbp,1),%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rbp,1) leaq (%rbp),%rcx jmp L$sqr4x_inner .p2align 5 L$sqr4x_inner: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 addq (%rdi,%rcx,1),%r13 adcq $0,%r12 .byte 0x67 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %r11,(%rdi,%rcx,1) movq %rbx,%rax movq %rdx,%r13 adcq $0,%r13 addq 8(%rdi,%rcx,1),%r12 leaq 16(%rcx),%rcx adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne L$sqr4x_inner .byte 0x67 mulq %r15 addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) addq $16,%rbp jnz L$sqr4x_outer movq -32(%rsi),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi),%rbx movq %rax,%r15 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq %r10,-24(%rdi) movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 movq -8(%rsi),%rbx adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,-16(%rdi) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi) mulq %r15 addq %rax,%r13 movq -16(%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) mulq %rbx addq $16,%rbp xorq %r14,%r14 subq %r9,%rbp xorq %r15,%r15 addq %r12,%rax adcq $0,%rdx movq %rax,8(%rdi) movq %rdx,16(%rdi) movq %r15,24(%rdi) movq -16(%rsi,%rbp,1),%rax leaq 48+8(%rsp),%rdi xorq %r10,%r10 movq 8(%rdi),%r11 leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 leaq 16(%rbp),%rbp movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi jmp L$sqr4x_shift_n_add .p2align 5 L$sqr4x_shift_n_add: leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 0(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 8(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,-16(%rdi) adcq %rdx,%r8 leaq (%r14,%r10,2),%r12 movq %r8,-8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq 8(%rsi,%rbp,1),%rax movq %r12,0(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 16(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi addq $32,%rbp jnz L$sqr4x_shift_n_add leaq (%r14,%r10,2),%r12 .byte 0x67 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 mulq %rax negq %r15 adcq %rax,%rbx adcq %rdx,%r8 movq %rbx,-16(%rdi) movq %r8,-8(%rdi) .byte 102,72,15,126,213 __bn_sqr8x_reduction: xorq %rax,%rax leaq (%r9,%rbp,1),%rcx leaq 48+8(%rsp,%r9,2),%rdx movq %rcx,0+8(%rsp) leaq 48+8(%rsp,%r9,1),%rdi movq %rdx,8+8(%rsp) negq %r9 jmp L$8x_reduction_loop .p2align 5 L$8x_reduction_loop: leaq (%rdi,%r9,1),%rdi .byte 0x66 movq 0(%rdi),%rbx movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,(%rdx) leaq 64(%rdi),%rdi .byte 0x67 movq %rbx,%r8 imulq 32+8(%rsp),%rbx movq 0(%rbp),%rax movl $8,%ecx jmp L$8x_reduce .p2align 5 L$8x_reduce: mulq %rbx movq 8(%rbp),%rax negq %r8 movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 movq %rbx,48-8+8(%rsp,%rcx,8) movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq 32+8(%rsp),%rsi movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx imulq %r8,%rsi addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq %rsi,%rbx addq %rax,%r15 movq 0(%rbp),%rax adcq $0,%rdx addq %r15,%r14 movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz L$8x_reduce leaq 64(%rbp),%rbp xorq %rax,%rax movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae L$8x_no_tail .byte 0x66 addq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movq 48+56+8(%rsp),%rbx movl $8,%ecx movq 0(%rbp),%rax jmp L$8x_tail .p2align 5 L$8x_tail: mulq %rbx addq %rax,%r8 movq 8(%rbp),%rax movq %r8,(%rdi) movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 leaq 8(%rdi),%rdi movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq 48-16+8(%rsp,%rcx,8),%rbx addq %rax,%r15 adcq $0,%rdx addq %r15,%r14 movq 0(%rbp),%rax movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz L$8x_tail leaq 64(%rbp),%rbp movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae L$8x_tail_done movq 48+56+8(%rsp),%rbx negq %rsi movq 0(%rbp),%rax adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movl $8,%ecx jmp L$8x_tail .p2align 5 L$8x_tail_done: xorq %rax,%rax addq (%rdx),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax negq %rsi L$8x_no_tail: adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq -8(%rbp),%rcx xorq %rsi,%rsi .byte 102,72,15,126,213 movq %r8,0(%rdi) movq %r9,8(%rdi) .byte 102,73,15,126,217 movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi),%rdi cmpq %rdx,%rdi jb L$8x_reduction_loop ret .p2align 5 __bn_post4x_internal: movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx .byte 102,72,15,126,207 negq %rax .byte 102,72,15,126,206 sarq $3+2,%rcx decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqr4x_sub_entry .p2align 4 L$sqr4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 L$sqr4x_sub_entry: leaq 32(%rbp),%rbp notq %r12 notq %r13 notq %r14 notq %r15 andq %rax,%r12 andq %rax,%r13 andq %rax,%r14 andq %rax,%r15 negq %r10 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 adcq 16(%rbx),%r14 adcq 24(%rbx),%r15 movq %r12,0(%rdi) leaq 32(%rbx),%rbx movq %r13,8(%rdi) sbbq %r10,%r10 movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz L$sqr4x_sub movq %r9,%r10 negq %r9 ret .globl _bn_mulx4x_mont_gather5 .private_extern _bn_mulx4x_mont_gather5 .p2align 5 _bn_mulx4x_mont_gather5: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx4x_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$mulx4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$mulx4xsp_done L$mulx4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$mulx4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk jmp L$mulx4x_page_walk_done L$mulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk L$mulx4x_page_walk_done: movq %r8,32(%rsp) movq %rax,40(%rsp) L$mulx4x_body: call mulx4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mulx4x_epilogue: ret .p2align 5 mulx4x_internal: movq %r9,8(%rsp) movq %r9,%r10 negq %r9 shlq $5,%r9 negq %r10 leaq 128(%rdx,%r9,1),%r13 shrq $5+5,%r9 movd 8(%rax),%xmm5 subq $1,%r9 leaq L$inc(%rip),%rax movq %r13,16+8(%rsp) movq %r9,24+8(%rsp) movq %rdi,56+8(%rsp) movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r10,1),%r10 leaq 128(%rdx),%rdi pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67 movdqa %xmm1,%xmm2 .byte 0x67 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 .byte 0x67 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%rdi),%xmm0 pand 80(%rdi),%xmm1 pand 96(%rdi),%xmm2 movdqa %xmm3,352(%r10) pand 112(%rdi),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%rdi),%xmm4 movdqa -112(%rdi),%xmm5 movdqa -96(%rdi),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%rdi),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%rdi),%xmm4 movdqa -48(%rdi),%xmm5 movdqa -32(%rdi),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%rdi),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%rdi),%xmm4 movdqa 16(%rdi),%xmm5 movdqa 32(%rdi),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%rdi),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 pxor %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 leaq 64+32+8(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r12 addq %rax,%r11 mulxq 16(%rsi),%rax,%r13 adcq %rax,%r12 adcq $0,%r13 mulxq 24(%rsi),%rax,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 xorq %rbp,%rbp movq %r8,%rdx movq %rdi,8+8(%rsp) leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp L$mulx4x_1st .p2align 5 L$mulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_1st movq 8(%rsp),%rax adcq %rbp,%r15 leaq (%rsi,%rax,1),%rsi addq %r15,%r14 movq 8+8(%rsp),%rdi adcq %rbp,%rbp movq %r14,-8(%rbx) jmp L$mulx4x_outer .p2align 5 L$mulx4x_outer: leaq 16-256(%rbx),%r10 pxor %xmm4,%xmm4 .byte 0x67,0x67 pxor %xmm5,%xmm5 movdqa -128(%rdi),%xmm0 movdqa -112(%rdi),%xmm1 movdqa -96(%rdi),%xmm2 pand 256(%r10),%xmm0 movdqa -80(%rdi),%xmm3 pand 272(%r10),%xmm1 por %xmm0,%xmm4 pand 288(%r10),%xmm2 por %xmm1,%xmm5 pand 304(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%rdi),%xmm0 movdqa -48(%rdi),%xmm1 movdqa -32(%rdi),%xmm2 pand 320(%r10),%xmm0 movdqa -16(%rdi),%xmm3 pand 336(%r10),%xmm1 por %xmm0,%xmm4 pand 352(%r10),%xmm2 por %xmm1,%xmm5 pand 368(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%rdi),%xmm0 movdqa 16(%rdi),%xmm1 movdqa 32(%rdi),%xmm2 pand 384(%r10),%xmm0 movdqa 48(%rdi),%xmm3 pand 400(%r10),%xmm1 por %xmm0,%xmm4 pand 416(%r10),%xmm2 por %xmm1,%xmm5 pand 432(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%rdi),%xmm0 movdqa 80(%rdi),%xmm1 movdqa 96(%rdi),%xmm2 pand 448(%r10),%xmm0 movdqa 112(%rdi),%xmm3 pand 464(%r10),%xmm1 por %xmm0,%xmm4 pand 480(%r10),%xmm2 por %xmm1,%xmm5 pand 496(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 movq %rbp,(%rbx) leaq 32(%rbx,%rax,1),%rbx mulxq 0(%rsi),%r8,%r11 xorq %rbp,%rbp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 mulxq 24(%rsi),%rdx,%r14 adoxq -16(%rbx),%r12 adcxq %rdx,%r13 leaq (%rcx,%rax,1),%rcx leaq 32(%rsi),%rsi adoxq -8(%rbx),%r13 adcxq %rbp,%r14 adoxq %rbp,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 movq %r8,%rdx xorq %rbp,%rbp movq %rdi,8+8(%rsp) mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r12 movq %r11,-24(%rbx) adoxq %rbp,%r15 movq %r12,-16(%rbx) leaq 32(%rcx),%rcx jmp L$mulx4x_inner .p2align 5 L$mulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 movq %r11,-32(%rbx) mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx leaq 32(%rcx),%rcx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_inner movq 0+8(%rsp),%rax adcq %rbp,%r15 subq 0(%rbx),%rdi movq 8+8(%rsp),%rdi movq 16+8(%rsp),%r10 adcq %r15,%r14 leaq (%rsi,%rax,1),%rsi adcq %rbp,%rbp movq %r14,-8(%rbx) cmpq %r10,%rdi jb L$mulx4x_outer movq -8(%rcx),%r10 movq %rbp,%r8 movq (%rcx,%rax,1),%r12 leaq (%rcx,%rax,1),%rbp movq %rax,%rcx leaq (%rbx,%rax,1),%rdi xorl %eax,%eax xorq %r15,%r15 subq %r14,%r10 adcq %r15,%r15 orq %r15,%r8 sarq $3+2,%rcx subq %r8,%rax movq 56+8(%rsp),%rdx decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqrx4x_sub_entry .globl _bn_powerx5 .private_extern _bn_powerx5 .p2align 5 _bn_powerx5: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$powerx5_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$pwrx_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$pwrx_sp_done .p2align 5 L$pwrx_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$pwrx_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwrx_page_walk jmp L$pwrx_page_walk_done L$pwrx_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwrx_page_walk L$pwrx_page_walk_done: movq %r9,%r10 negq %r9 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 movq %r8,32(%rsp) movq %rax,40(%rsp) L$powerx5_body: call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal movq %r10,%r9 movq %rsi,%rdi .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq 40(%rsp),%rax call mulx4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$powerx5_epilogue: ret .globl _bn_sqrx8x_internal .private_extern _bn_sqrx8x_internal .private_extern _bn_sqrx8x_internal .p2align 5 _bn_sqrx8x_internal: __bn_sqrx8x_internal: _CET_ENDBR leaq 48+8(%rsp),%rdi leaq (%rsi,%r9,1),%rbp movq %r9,0+8(%rsp) movq %rbp,8+8(%rsp) jmp L$sqr8x_zero_start .p2align 5 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 L$sqrx8x_zero: .byte 0x3e movdqa %xmm0,0(%rdi) movdqa %xmm0,16(%rdi) movdqa %xmm0,32(%rdi) movdqa %xmm0,48(%rdi) L$sqr8x_zero_start: movdqa %xmm0,64(%rdi) movdqa %xmm0,80(%rdi) movdqa %xmm0,96(%rdi) movdqa %xmm0,112(%rdi) leaq 128(%rdi),%rdi subq $64,%r9 jnz L$sqrx8x_zero movq 0(%rsi),%rdx xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 leaq 48+8(%rsp),%rdi xorq %rbp,%rbp jmp L$sqrx8x_outer_loop .p2align 5 L$sqrx8x_outer_loop: mulxq 8(%rsi),%r8,%rax adcxq %r9,%r8 adoxq %rax,%r10 mulxq 16(%rsi),%r9,%rax adcxq %r10,%r9 adoxq %rax,%r11 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 adcxq %r11,%r10 adoxq %rax,%r12 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 adcxq %r12,%r11 adoxq %rax,%r13 mulxq 40(%rsi),%r12,%rax adcxq %r13,%r12 adoxq %rax,%r14 mulxq 48(%rsi),%r13,%rax adcxq %r14,%r13 adoxq %r15,%rax mulxq 56(%rsi),%r14,%r15 movq 8(%rsi),%rdx adcxq %rax,%r14 adoxq %rbp,%r15 adcq 64(%rdi),%r15 movq %r8,8(%rdi) movq %r9,16(%rdi) sbbq %rcx,%rcx xorq %rbp,%rbp mulxq 16(%rsi),%r8,%rbx mulxq 24(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 32(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %rbx,%r11 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 adcxq %r13,%r11 adoxq %r14,%r12 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 movq 16(%rsi),%rdx adcxq %rax,%r12 adoxq %rbx,%r13 adcxq %r15,%r13 adoxq %rbp,%r14 adcxq %rbp,%r14 movq %r8,24(%rdi) movq %r9,32(%rdi) mulxq 24(%rsi),%r8,%rbx mulxq 32(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 40(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %r13,%r11 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 .byte 0x3e movq 24(%rsi),%rdx adcxq %rbx,%r11 adoxq %rax,%r12 adcxq %r14,%r12 movq %r8,40(%rdi) movq %r9,48(%rdi) mulxq 32(%rsi),%r8,%rax adoxq %rbp,%r13 adcxq %rbp,%r13 mulxq 40(%rsi),%r9,%rbx adcxq %r10,%r8 adoxq %rax,%r9 mulxq 48(%rsi),%r10,%rax adcxq %r11,%r9 adoxq %r12,%r10 mulxq 56(%rsi),%r11,%r12 movq 32(%rsi),%rdx movq 40(%rsi),%r14 adcxq %rbx,%r10 adoxq %rax,%r11 movq 48(%rsi),%r15 adcxq %r13,%r11 adoxq %rbp,%r12 adcxq %rbp,%r12 movq %r8,56(%rdi) movq %r9,64(%rdi) mulxq %r14,%r9,%rax movq 56(%rsi),%r8 adcxq %r10,%r9 mulxq %r15,%r10,%rbx adoxq %rax,%r10 adcxq %r11,%r10 mulxq %r8,%r11,%rax movq %r14,%rdx adoxq %rbx,%r11 adcxq %r12,%r11 adcxq %rbp,%rax mulxq %r15,%r14,%rbx mulxq %r8,%r12,%r13 movq %r15,%rdx leaq 64(%rsi),%rsi adcxq %r14,%r11 adoxq %rbx,%r12 adcxq %rax,%r12 adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %r8,%r8,%r14 adcxq %r8,%r13 adcxq %rbp,%r14 cmpq 8+8(%rsp),%rsi je L$sqrx8x_outer_break negq %rcx movq $-8,%rcx movq %rbp,%r15 movq 64(%rdi),%r8 adcxq 72(%rdi),%r9 adcxq 80(%rdi),%r10 adcxq 88(%rdi),%r11 adcq 96(%rdi),%r12 adcq 104(%rdi),%r13 adcq 112(%rdi),%r14 adcq 120(%rdi),%r15 leaq (%rsi),%rbp leaq 128(%rdi),%rdi sbbq %rax,%rax movq -64(%rsi),%rdx movq %rax,16+8(%rsp) movq %rdi,24+8(%rsp) xorl %eax,%eax jmp L$sqrx8x_loop .p2align 5 L$sqrx8x_loop: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 movq %rbx,(%rdi,%rcx,8) movl $0,%ebx adcxq %rax,%r13 adoxq %r15,%r14 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 movq 8(%rsi,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rbx,%r15 adcxq %rbx,%r15 .byte 0x67 incq %rcx jnz L$sqrx8x_loop leaq 64(%rbp),%rbp movq $-8,%rcx cmpq 8+8(%rsp),%rbp je L$sqrx8x_break subq 16+8(%rsp),%rbx .byte 0x66 movq -64(%rsi),%rdx adcxq 0(%rdi),%r8 adcxq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi .byte 0x67 sbbq %rax,%rax xorl %ebx,%ebx movq %rax,16+8(%rsp) jmp L$sqrx8x_loop .p2align 5 L$sqrx8x_break: xorq %rbp,%rbp subq 16+8(%rsp),%rbx adcxq %rbp,%r8 movq 24+8(%rsp),%rcx adcxq %rbp,%r9 movq 0(%rsi),%rdx adcq $0,%r10 movq %r8,0(%rdi) adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 cmpq %rcx,%rdi je L$sqrx8x_outer_loop movq %r9,8(%rdi) movq 8(%rcx),%r9 movq %r10,16(%rdi) movq 16(%rcx),%r10 movq %r11,24(%rdi) movq 24(%rcx),%r11 movq %r12,32(%rdi) movq 32(%rcx),%r12 movq %r13,40(%rdi) movq 40(%rcx),%r13 movq %r14,48(%rdi) movq 48(%rcx),%r14 movq %r15,56(%rdi) movq 56(%rcx),%r15 movq %rcx,%rdi jmp L$sqrx8x_outer_loop .p2align 5 L$sqrx8x_outer_break: movq %r9,72(%rdi) .byte 102,72,15,126,217 movq %r10,80(%rdi) movq %r11,88(%rdi) movq %r12,96(%rdi) movq %r13,104(%rdi) movq %r14,112(%rdi) leaq 48+8(%rsp),%rdi movq (%rsi,%rcx,1),%rdx movq 8(%rdi),%r11 xorq %r10,%r10 movq 0+8(%rsp),%r9 adoxq %r11,%r11 movq 16(%rdi),%r12 movq 24(%rdi),%r13 .p2align 5 L$sqrx4x_shift_n_add: mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 adoxq %r13,%r13 adcxq %r11,%rbx movq 40(%rdi),%r11 movq %rax,0(%rdi) movq %rbx,8(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax movq 16(%rsi,%rcx,1),%rdx movq 48(%rdi),%r12 adoxq %r11,%r11 adcxq %r13,%rbx movq 56(%rdi),%r13 movq %rax,16(%rdi) movq %rbx,24(%rdi) mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax movq 24(%rsi,%rcx,1),%rdx leaq 32(%rcx),%rcx movq 64(%rdi),%r10 adoxq %r13,%r13 adcxq %r11,%rbx movq 72(%rdi),%r11 movq %rax,32(%rdi) movq %rbx,40(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax jrcxz L$sqrx4x_shift_n_add_break .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 adoxq %r11,%r11 adcxq %r13,%rbx movq 80(%rdi),%r12 movq 88(%rdi),%r13 movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi nop jmp L$sqrx4x_shift_n_add .p2align 5 L$sqrx4x_shift_n_add_break: adcxq %r13,%rbx movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi .byte 102,72,15,126,213 __bn_sqrx8x_reduction: xorl %eax,%eax movq 32+8(%rsp),%rbx movq 48+8(%rsp),%rdx leaq -64(%rbp,%r9,1),%rcx movq %rcx,0+8(%rsp) movq %rdi,8+8(%rsp) leaq 48+8(%rsp),%rdi jmp L$sqrx8x_reduction_loop .p2align 5 L$sqrx8x_reduction_loop: movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq %rdx,%r8 imulq %rbx,%rdx movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,24+8(%rsp) leaq 64(%rdi),%rdi xorq %rsi,%rsi movq $-8,%rcx jmp L$sqrx8x_reduce .p2align 5 L$sqrx8x_reduce: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rbx,%rax adoxq %r9,%r8 mulxq 8(%rbp),%rbx,%r9 adcxq %rbx,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rbx,%r10 adcxq %rbx,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rbx,%r11 adcxq %rbx,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 movq %rdx,%rax movq %r8,%rdx adcxq %rbx,%r11 adoxq %r13,%r12 mulxq 32+8(%rsp),%rbx,%rdx movq %rax,%rdx movq %rax,64+48+8(%rsp,%rcx,8) mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq %rbx,%rdx adcxq %rax,%r14 adoxq %rsi,%r15 adcxq %rsi,%r15 .byte 0x67,0x67,0x67 incq %rcx jnz L$sqrx8x_reduce movq %rsi,%rax cmpq 0+8(%rsp),%rbp jae L$sqrx8x_no_tail movq 48+8(%rsp),%rdx addq 0(%rdi),%r8 leaq 64(%rbp),%rbp movq $-8,%rcx adcxq 8(%rdi),%r9 adcxq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp L$sqrx8x_tail .p2align 5 L$sqrx8x_tail: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq 72+48+8(%rsp,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rsi,%r15 movq %rbx,(%rdi,%rcx,8) movq %r8,%rbx adcxq %rsi,%r15 incq %rcx jnz L$sqrx8x_tail cmpq 0+8(%rsp),%rbp jae L$sqrx8x_tail_done subq 16+8(%rsp),%rsi movq 48+8(%rsp),%rdx leaq 64(%rbp),%rbp adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax subq $8,%rcx xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp L$sqrx8x_tail .p2align 5 L$sqrx8x_tail_done: xorq %rax,%rax addq 24+8(%rsp),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax subq 16+8(%rsp),%rsi L$sqrx8x_no_tail: adcq 0(%rdi),%r8 .byte 102,72,15,126,217 adcq 8(%rdi),%r9 movq 56(%rbp),%rsi .byte 102,72,15,126,213 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq 32+8(%rsp),%rbx movq 64(%rdi,%rcx,1),%rdx movq %r8,0(%rdi) leaq 64(%rdi),%r8 movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi,%rcx,1),%rdi cmpq 8+8(%rsp),%r8 jb L$sqrx8x_reduction_loop ret .p2align 5 __bn_postx4x_internal: movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 negq %rax sarq $3+2,%rcx .byte 102,72,15,126,202 .byte 102,72,15,126,206 decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqrx4x_sub_entry .p2align 4 L$sqrx4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 L$sqrx4x_sub_entry: andnq %rax,%r12,%r12 leaq 32(%rbp),%rbp andnq %rax,%r13,%r13 andnq %rax,%r14,%r14 andnq %rax,%r15,%r15 negq %r8 adcq 0(%rdi),%r12 adcq 8(%rdi),%r13 adcq 16(%rdi),%r14 adcq 24(%rdi),%r15 movq %r12,0(%rdx) leaq 32(%rdi),%rdi movq %r13,8(%rdx) sbbq %r8,%r8 movq %r14,16(%rdx) movq %r15,24(%rdx) leaq 32(%rdx),%rdx incq %rcx jnz L$sqrx4x_sub negq %r9 ret .globl _bn_scatter5 .private_extern _bn_scatter5 .p2align 4 _bn_scatter5: _CET_ENDBR cmpl $0,%esi jz L$scatter_epilogue leaq (%rdx,%rcx,8),%rdx L$scatter: movq (%rdi),%rax leaq 8(%rdi),%rdi movq %rax,(%rdx) leaq 256(%rdx),%rdx subl $1,%esi jnz L$scatter L$scatter_epilogue: ret .globl _bn_gather5 .private_extern _bn_gather5 .p2align 5 _bn_gather5: L$SEH_begin_bn_gather5: _CET_ENDBR .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 leaq L$inc(%rip),%rax andq $-16,%rsp movd %ecx,%xmm5 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 128(%rdx),%r11 leaq 128(%rsp),%rax pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-128(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-112(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-96(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-80(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-48(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-16(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,0(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,16(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,48(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,80(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,96(%rax) movdqa %xmm4,%xmm2 movdqa %xmm3,112(%rax) jmp L$gather .p2align 5 L$gather: pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r11),%xmm0 movdqa -112(%r11),%xmm1 movdqa -96(%r11),%xmm2 pand -128(%rax),%xmm0 movdqa -80(%r11),%xmm3 pand -112(%rax),%xmm1 por %xmm0,%xmm4 pand -96(%rax),%xmm2 por %xmm1,%xmm5 pand -80(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r11),%xmm0 movdqa -48(%r11),%xmm1 movdqa -32(%r11),%xmm2 pand -64(%rax),%xmm0 movdqa -16(%r11),%xmm3 pand -48(%rax),%xmm1 por %xmm0,%xmm4 pand -32(%rax),%xmm2 por %xmm1,%xmm5 pand -16(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r11),%xmm0 movdqa 16(%r11),%xmm1 movdqa 32(%r11),%xmm2 pand 0(%rax),%xmm0 movdqa 48(%r11),%xmm3 pand 16(%rax),%xmm1 por %xmm0,%xmm4 pand 32(%rax),%xmm2 por %xmm1,%xmm5 pand 48(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r11),%xmm0 movdqa 80(%r11),%xmm1 movdqa 96(%r11),%xmm2 pand 64(%rax),%xmm0 movdqa 112(%r11),%xmm3 pand 80(%rax),%xmm1 por %xmm0,%xmm4 pand 96(%rax),%xmm2 por %xmm1,%xmm5 pand 112(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 leaq 256(%r11),%r11 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 movq %xmm0,(%rdi) leaq 8(%rdi),%rdi subl $1,%esi jnz L$gather leaq (%r10),%rsp ret L$SEH_end_bn_gather5: .section __DATA,__const .p2align 6 L$inc: .long 0,0, 1,1 .long 2,2, 2,2 .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/bcm/x86_64-mont5-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl bn_mul_mont_gather5_nohw .hidden bn_mul_mont_gather5_nohw .type bn_mul_mont_gather5_nohw,@function .align 64 bn_mul_mont_gather5_nohw: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax movd 8(%rsp),%xmm5 pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 negq %r9 movq %rsp,%r11 leaq -280(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk jmp .Lmul_page_walk_done .Lmul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk .Lmul_page_walk_done: leaq .Linc(%rip),%r10 movq %rax,8(%rsp,%r9,8) .cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 .Lmul_body: leaq 128(%rdx),%r12 movdqa 0(%r10),%xmm0 movdqa 16(%r10),%xmm1 leaq 24-112(%rsp,%r9,8),%r10 andq $-16,%r10 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r8),%r8 movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp .L1st_enter .align 16 .L1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .L1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne .L1st addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r9,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp .Louter .align 16 .Louter: leaq 24+128(%rsp,%r9,8),%rdx andq $-16,%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 movq (%rsi),%rax .byte 102,72,15,126,195 xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp .Linner_enter .align 16 .Linner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .Linner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne .Linner addq %rax,%r13 adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r9,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r9,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb .Louter xorq %r14,%r14 movq (%rsp),%rax leaq (%rsp),%rsi movq %r9,%r15 jmp .Lsub .align 16 .Lsub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsi,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz .Lsub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 .Lcopy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r14,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz .Lcopy movq 8(%rsp,%r9,8),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul_epilogue: ret .cfi_endproc .size bn_mul_mont_gather5_nohw,.-bn_mul_mont_gather5_nohw .globl bn_mul4x_mont_gather5 .hidden bn_mul4x_mont_gather5 .type bn_mul4x_mont_gather5,@function .align 32 bn_mul4x_mont_gather5: .cfi_startproc _CET_ENDBR .byte 0x67 movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmul4x_prologue: .byte 0x67 shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lmul4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lmul4xsp_done .align 32 .Lmul4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lmul4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmul4x_page_walk jmp .Lmul4x_page_walk_done .Lmul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmul4x_page_walk .Lmul4x_page_walk_done: negq %r9 movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lmul4x_body: call mul4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul4x_epilogue: ret .cfi_endproc .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5 .type mul4x_internal,@function .align 32 mul4x_internal: .cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax leaq 128(%rdx,%r9,1),%r13 shrq $5,%r9 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r9,1),%r10 leaq 128(%rdx),%r12 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67,0x67 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq %r13,16+8(%rsp) movq %rdi,56+8(%rsp) movq (%r8),%r8 movq (%rsi),%rax leaq (%rsi,%r9,1),%rsi negq %r9 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp leaq 64+8(%rsp),%r14 movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 jmp .L1st4x .align 32 .L1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 addq $32,%r15 jnz .L1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%r14) jmp .Louter4x .align 32 .Louter4x: leaq 16+128(%r14),%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r14,%r9,1),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 movq %rdi,(%r14) leaq (%r14,%r9,1),%r14 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdx,%r13 jmp .Linner4x .align 32 .Linner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx addq (%r14),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%r13 addq $32,%r15 jnz .Linner4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq %rbp,%rax movq -8(%rcx),%rbp adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 movq %rdi,-16(%r14) leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%r14),%r13 adcq $0,%rdi movq %r13,-8(%r14) cmpq 16+8(%rsp),%r12 jb .Louter4x xorq %rax,%rax subq %r13,%rbp adcq %r15,%r15 orq %r15,%rdi subq %rdi,%rax leaq (%r14,%r9,1),%rbx movq (%rcx),%r12 leaq (%rcx),%rbp movq %r9,%rcx sarq $3+2,%rcx movq 56+8(%rsp),%rdi decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry .cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5_nohw .hidden bn_power5_nohw .type bn_power5_nohw,@function .align 32 bn_power5_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lpower5_prologue: shll $3,%r9d leal (%r9,%r9,2),%r10d negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lpwr_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lpwr_sp_done .align 32 .Lpwr_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lpwr_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwr_page_walk jmp .Lpwr_page_walk_done .Lpwr_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwr_page_walk .Lpwr_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lpower5_body: .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq %rsi,%rdi movq 40(%rsp),%rax leaq 32(%rsp),%r8 call mul4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpower5_epilogue: ret .cfi_endproc .size bn_power5_nohw,.-bn_power5_nohw .globl bn_sqr8x_internal .hidden bn_sqr8x_internal .hidden bn_sqr8x_internal .type bn_sqr8x_internal,@function .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: .cfi_startproc _CET_ENDBR leaq 32(%r10),%rbp leaq (%rsi,%r9,1),%rsi movq %r9,%rcx movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 movq %r10,-24(%rdi,%rbp,1) mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx movq %r11,-16(%rdi,%rbp,1) movq %rdx,%r10 movq -8(%rsi,%rbp,1),%rbx mulq %r15 movq %rax,%r12 movq %rbx,%rax movq %rdx,%r13 leaq (%rbp),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) jmp .Lsqr4x_1st .align 32 .Lsqr4x_1st: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq 16(%rsi,%rcx,1),%rbx movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %r10,8(%rdi,%rcx,1) movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 24(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,16(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 leaq 32(%rcx),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne .Lsqr4x_1st mulq %r15 addq %rax,%r13 leaq 16(%rbp),%rbp adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) jmp .Lsqr4x_outer .align 32 .Lsqr4x_outer: movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq -24(%rdi,%rbp,1),%r10 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx movq %r10,-24(%rdi,%rbp,1) movq %rdx,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx addq -16(%rdi,%rbp,1),%r11 movq %rdx,%r10 adcq $0,%r10 movq %r11,-16(%rdi,%rbp,1) xorq %r12,%r12 movq -8(%rsi,%rbp,1),%rbx mulq %r15 addq %rax,%r12 movq %rbx,%rax adcq $0,%rdx addq -8(%rdi,%rbp,1),%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rbp,1) leaq (%rbp),%rcx jmp .Lsqr4x_inner .align 32 .Lsqr4x_inner: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 addq (%rdi,%rcx,1),%r13 adcq $0,%r12 .byte 0x67 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %r11,(%rdi,%rcx,1) movq %rbx,%rax movq %rdx,%r13 adcq $0,%r13 addq 8(%rdi,%rcx,1),%r12 leaq 16(%rcx),%rcx adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne .Lsqr4x_inner .byte 0x67 mulq %r15 addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) addq $16,%rbp jnz .Lsqr4x_outer movq -32(%rsi),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi),%rbx movq %rax,%r15 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq %r10,-24(%rdi) movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 movq -8(%rsi),%rbx adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,-16(%rdi) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi) mulq %r15 addq %rax,%r13 movq -16(%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) mulq %rbx addq $16,%rbp xorq %r14,%r14 subq %r9,%rbp xorq %r15,%r15 addq %r12,%rax adcq $0,%rdx movq %rax,8(%rdi) movq %rdx,16(%rdi) movq %r15,24(%rdi) movq -16(%rsi,%rbp,1),%rax leaq 48+8(%rsp),%rdi xorq %r10,%r10 movq 8(%rdi),%r11 leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 leaq 16(%rbp),%rbp movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi jmp .Lsqr4x_shift_n_add .align 32 .Lsqr4x_shift_n_add: leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 0(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 8(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,-16(%rdi) adcq %rdx,%r8 leaq (%r14,%r10,2),%r12 movq %r8,-8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq 8(%rsi,%rbp,1),%rax movq %r12,0(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 16(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi addq $32,%rbp jnz .Lsqr4x_shift_n_add leaq (%r14,%r10,2),%r12 .byte 0x67 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 mulq %rax negq %r15 adcq %rax,%rbx adcq %rdx,%r8 movq %rbx,-16(%rdi) movq %r8,-8(%rdi) .byte 102,72,15,126,213 __bn_sqr8x_reduction: xorq %rax,%rax leaq (%r9,%rbp,1),%rcx leaq 48+8(%rsp,%r9,2),%rdx movq %rcx,0+8(%rsp) leaq 48+8(%rsp,%r9,1),%rdi movq %rdx,8+8(%rsp) negq %r9 jmp .L8x_reduction_loop .align 32 .L8x_reduction_loop: leaq (%rdi,%r9,1),%rdi .byte 0x66 movq 0(%rdi),%rbx movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,(%rdx) leaq 64(%rdi),%rdi .byte 0x67 movq %rbx,%r8 imulq 32+8(%rsp),%rbx movq 0(%rbp),%rax movl $8,%ecx jmp .L8x_reduce .align 32 .L8x_reduce: mulq %rbx movq 8(%rbp),%rax negq %r8 movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 movq %rbx,48-8+8(%rsp,%rcx,8) movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq 32+8(%rsp),%rsi movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx imulq %r8,%rsi addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq %rsi,%rbx addq %rax,%r15 movq 0(%rbp),%rax adcq $0,%rdx addq %r15,%r14 movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz .L8x_reduce leaq 64(%rbp),%rbp xorq %rax,%rax movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae .L8x_no_tail .byte 0x66 addq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movq 48+56+8(%rsp),%rbx movl $8,%ecx movq 0(%rbp),%rax jmp .L8x_tail .align 32 .L8x_tail: mulq %rbx addq %rax,%r8 movq 8(%rbp),%rax movq %r8,(%rdi) movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 leaq 8(%rdi),%rdi movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq 48-16+8(%rsp,%rcx,8),%rbx addq %rax,%r15 adcq $0,%rdx addq %r15,%r14 movq 0(%rbp),%rax movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz .L8x_tail leaq 64(%rbp),%rbp movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae .L8x_tail_done movq 48+56+8(%rsp),%rbx negq %rsi movq 0(%rbp),%rax adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movl $8,%ecx jmp .L8x_tail .align 32 .L8x_tail_done: xorq %rax,%rax addq (%rdx),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax negq %rsi .L8x_no_tail: adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq -8(%rbp),%rcx xorq %rsi,%rsi .byte 102,72,15,126,213 movq %r8,0(%rdi) movq %r9,8(%rdi) .byte 102,73,15,126,217 movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi),%rdi cmpq %rdx,%rdi jb .L8x_reduction_loop ret .cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: .cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx .byte 102,72,15,126,207 negq %rax .byte 102,72,15,126,206 sarq $3+2,%rcx decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry .align 16 .Lsqr4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 .Lsqr4x_sub_entry: leaq 32(%rbp),%rbp notq %r12 notq %r13 notq %r14 notq %r15 andq %rax,%r12 andq %rax,%r13 andq %rax,%r14 andq %rax,%r15 negq %r10 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 adcq 16(%rbx),%r14 adcq 24(%rbx),%r15 movq %r12,0(%rdi) leaq 32(%rbx),%rbx movq %r13,8(%rdi) sbbq %r10,%r10 movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz .Lsqr4x_sub movq %r9,%r10 negq %r9 ret .cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_mulx4x_mont_gather5 .hidden bn_mulx4x_mont_gather5 .type bn_mulx4x_mont_gather5,@function .align 32 bn_mulx4x_mont_gather5: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmulx4x_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lmulx4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lmulx4xsp_done .Lmulx4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lmulx4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk jmp .Lmulx4x_page_walk_done .Lmulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk .Lmulx4x_page_walk_done: movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lmulx4x_body: call mulx4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmulx4x_epilogue: ret .cfi_endproc .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5 .type mulx4x_internal,@function .align 32 mulx4x_internal: .cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 shlq $5,%r9 negq %r10 leaq 128(%rdx,%r9,1),%r13 shrq $5+5,%r9 movd 8(%rax),%xmm5 subq $1,%r9 leaq .Linc(%rip),%rax movq %r13,16+8(%rsp) movq %r9,24+8(%rsp) movq %rdi,56+8(%rsp) movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r10,1),%r10 leaq 128(%rdx),%rdi pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67 movdqa %xmm1,%xmm2 .byte 0x67 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 .byte 0x67 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%rdi),%xmm0 pand 80(%rdi),%xmm1 pand 96(%rdi),%xmm2 movdqa %xmm3,352(%r10) pand 112(%rdi),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%rdi),%xmm4 movdqa -112(%rdi),%xmm5 movdqa -96(%rdi),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%rdi),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%rdi),%xmm4 movdqa -48(%rdi),%xmm5 movdqa -32(%rdi),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%rdi),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%rdi),%xmm4 movdqa 16(%rdi),%xmm5 movdqa 32(%rdi),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%rdi),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 pxor %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 leaq 64+32+8(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r12 addq %rax,%r11 mulxq 16(%rsi),%rax,%r13 adcq %rax,%r12 adcq $0,%r13 mulxq 24(%rsi),%rax,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 xorq %rbp,%rbp movq %r8,%rdx movq %rdi,8+8(%rsp) leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp .Lmulx4x_1st .align 32 .Lmulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_1st movq 8(%rsp),%rax adcq %rbp,%r15 leaq (%rsi,%rax,1),%rsi addq %r15,%r14 movq 8+8(%rsp),%rdi adcq %rbp,%rbp movq %r14,-8(%rbx) jmp .Lmulx4x_outer .align 32 .Lmulx4x_outer: leaq 16-256(%rbx),%r10 pxor %xmm4,%xmm4 .byte 0x67,0x67 pxor %xmm5,%xmm5 movdqa -128(%rdi),%xmm0 movdqa -112(%rdi),%xmm1 movdqa -96(%rdi),%xmm2 pand 256(%r10),%xmm0 movdqa -80(%rdi),%xmm3 pand 272(%r10),%xmm1 por %xmm0,%xmm4 pand 288(%r10),%xmm2 por %xmm1,%xmm5 pand 304(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%rdi),%xmm0 movdqa -48(%rdi),%xmm1 movdqa -32(%rdi),%xmm2 pand 320(%r10),%xmm0 movdqa -16(%rdi),%xmm3 pand 336(%r10),%xmm1 por %xmm0,%xmm4 pand 352(%r10),%xmm2 por %xmm1,%xmm5 pand 368(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%rdi),%xmm0 movdqa 16(%rdi),%xmm1 movdqa 32(%rdi),%xmm2 pand 384(%r10),%xmm0 movdqa 48(%rdi),%xmm3 pand 400(%r10),%xmm1 por %xmm0,%xmm4 pand 416(%r10),%xmm2 por %xmm1,%xmm5 pand 432(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%rdi),%xmm0 movdqa 80(%rdi),%xmm1 movdqa 96(%rdi),%xmm2 pand 448(%r10),%xmm0 movdqa 112(%rdi),%xmm3 pand 464(%r10),%xmm1 por %xmm0,%xmm4 pand 480(%r10),%xmm2 por %xmm1,%xmm5 pand 496(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 movq %rbp,(%rbx) leaq 32(%rbx,%rax,1),%rbx mulxq 0(%rsi),%r8,%r11 xorq %rbp,%rbp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 mulxq 24(%rsi),%rdx,%r14 adoxq -16(%rbx),%r12 adcxq %rdx,%r13 leaq (%rcx,%rax,1),%rcx leaq 32(%rsi),%rsi adoxq -8(%rbx),%r13 adcxq %rbp,%r14 adoxq %rbp,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 movq %r8,%rdx xorq %rbp,%rbp movq %rdi,8+8(%rsp) mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r12 movq %r11,-24(%rbx) adoxq %rbp,%r15 movq %r12,-16(%rbx) leaq 32(%rcx),%rcx jmp .Lmulx4x_inner .align 32 .Lmulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 movq %r11,-32(%rbx) mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx leaq 32(%rcx),%rcx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_inner movq 0+8(%rsp),%rax adcq %rbp,%r15 subq 0(%rbx),%rdi movq 8+8(%rsp),%rdi movq 16+8(%rsp),%r10 adcq %r15,%r14 leaq (%rsi,%rax,1),%rsi adcq %rbp,%rbp movq %r14,-8(%rbx) cmpq %r10,%rdi jb .Lmulx4x_outer movq -8(%rcx),%r10 movq %rbp,%r8 movq (%rcx,%rax,1),%r12 leaq (%rcx,%rax,1),%rbp movq %rax,%rcx leaq (%rbx,%rax,1),%rdi xorl %eax,%eax xorq %r15,%r15 subq %r14,%r10 adcq %r15,%r15 orq %r15,%r8 sarq $3+2,%rcx subq %r8,%rax movq 56+8(%rsp),%rdx decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry .cfi_endproc .size mulx4x_internal,.-mulx4x_internal .globl bn_powerx5 .hidden bn_powerx5 .type bn_powerx5,@function .align 32 bn_powerx5: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lpowerx5_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lpwrx_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lpwrx_sp_done .align 32 .Lpwrx_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lpwrx_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwrx_page_walk jmp .Lpwrx_page_walk_done .Lpwrx_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwrx_page_walk .Lpwrx_page_walk_done: movq %r9,%r10 negq %r9 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lpowerx5_body: call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal movq %r10,%r9 movq %rsi,%rdi .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq 40(%rsp),%rax call mulx4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpowerx5_epilogue: ret .cfi_endproc .size bn_powerx5,.-bn_powerx5 .globl bn_sqrx8x_internal .hidden bn_sqrx8x_internal .hidden bn_sqrx8x_internal .type bn_sqrx8x_internal,@function .align 32 bn_sqrx8x_internal: __bn_sqrx8x_internal: .cfi_startproc _CET_ENDBR leaq 48+8(%rsp),%rdi leaq (%rsi,%r9,1),%rbp movq %r9,0+8(%rsp) movq %rbp,8+8(%rsp) jmp .Lsqr8x_zero_start .align 32 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 .Lsqrx8x_zero: .byte 0x3e movdqa %xmm0,0(%rdi) movdqa %xmm0,16(%rdi) movdqa %xmm0,32(%rdi) movdqa %xmm0,48(%rdi) .Lsqr8x_zero_start: movdqa %xmm0,64(%rdi) movdqa %xmm0,80(%rdi) movdqa %xmm0,96(%rdi) movdqa %xmm0,112(%rdi) leaq 128(%rdi),%rdi subq $64,%r9 jnz .Lsqrx8x_zero movq 0(%rsi),%rdx xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 leaq 48+8(%rsp),%rdi xorq %rbp,%rbp jmp .Lsqrx8x_outer_loop .align 32 .Lsqrx8x_outer_loop: mulxq 8(%rsi),%r8,%rax adcxq %r9,%r8 adoxq %rax,%r10 mulxq 16(%rsi),%r9,%rax adcxq %r10,%r9 adoxq %rax,%r11 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 adcxq %r11,%r10 adoxq %rax,%r12 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 adcxq %r12,%r11 adoxq %rax,%r13 mulxq 40(%rsi),%r12,%rax adcxq %r13,%r12 adoxq %rax,%r14 mulxq 48(%rsi),%r13,%rax adcxq %r14,%r13 adoxq %r15,%rax mulxq 56(%rsi),%r14,%r15 movq 8(%rsi),%rdx adcxq %rax,%r14 adoxq %rbp,%r15 adcq 64(%rdi),%r15 movq %r8,8(%rdi) movq %r9,16(%rdi) sbbq %rcx,%rcx xorq %rbp,%rbp mulxq 16(%rsi),%r8,%rbx mulxq 24(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 32(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %rbx,%r11 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 adcxq %r13,%r11 adoxq %r14,%r12 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 movq 16(%rsi),%rdx adcxq %rax,%r12 adoxq %rbx,%r13 adcxq %r15,%r13 adoxq %rbp,%r14 adcxq %rbp,%r14 movq %r8,24(%rdi) movq %r9,32(%rdi) mulxq 24(%rsi),%r8,%rbx mulxq 32(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 40(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %r13,%r11 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 .byte 0x3e movq 24(%rsi),%rdx adcxq %rbx,%r11 adoxq %rax,%r12 adcxq %r14,%r12 movq %r8,40(%rdi) movq %r9,48(%rdi) mulxq 32(%rsi),%r8,%rax adoxq %rbp,%r13 adcxq %rbp,%r13 mulxq 40(%rsi),%r9,%rbx adcxq %r10,%r8 adoxq %rax,%r9 mulxq 48(%rsi),%r10,%rax adcxq %r11,%r9 adoxq %r12,%r10 mulxq 56(%rsi),%r11,%r12 movq 32(%rsi),%rdx movq 40(%rsi),%r14 adcxq %rbx,%r10 adoxq %rax,%r11 movq 48(%rsi),%r15 adcxq %r13,%r11 adoxq %rbp,%r12 adcxq %rbp,%r12 movq %r8,56(%rdi) movq %r9,64(%rdi) mulxq %r14,%r9,%rax movq 56(%rsi),%r8 adcxq %r10,%r9 mulxq %r15,%r10,%rbx adoxq %rax,%r10 adcxq %r11,%r10 mulxq %r8,%r11,%rax movq %r14,%rdx adoxq %rbx,%r11 adcxq %r12,%r11 adcxq %rbp,%rax mulxq %r15,%r14,%rbx mulxq %r8,%r12,%r13 movq %r15,%rdx leaq 64(%rsi),%rsi adcxq %r14,%r11 adoxq %rbx,%r12 adcxq %rax,%r12 adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %r8,%r8,%r14 adcxq %r8,%r13 adcxq %rbp,%r14 cmpq 8+8(%rsp),%rsi je .Lsqrx8x_outer_break negq %rcx movq $-8,%rcx movq %rbp,%r15 movq 64(%rdi),%r8 adcxq 72(%rdi),%r9 adcxq 80(%rdi),%r10 adcxq 88(%rdi),%r11 adcq 96(%rdi),%r12 adcq 104(%rdi),%r13 adcq 112(%rdi),%r14 adcq 120(%rdi),%r15 leaq (%rsi),%rbp leaq 128(%rdi),%rdi sbbq %rax,%rax movq -64(%rsi),%rdx movq %rax,16+8(%rsp) movq %rdi,24+8(%rsp) xorl %eax,%eax jmp .Lsqrx8x_loop .align 32 .Lsqrx8x_loop: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 movq %rbx,(%rdi,%rcx,8) movl $0,%ebx adcxq %rax,%r13 adoxq %r15,%r14 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 movq 8(%rsi,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rbx,%r15 adcxq %rbx,%r15 .byte 0x67 incq %rcx jnz .Lsqrx8x_loop leaq 64(%rbp),%rbp movq $-8,%rcx cmpq 8+8(%rsp),%rbp je .Lsqrx8x_break subq 16+8(%rsp),%rbx .byte 0x66 movq -64(%rsi),%rdx adcxq 0(%rdi),%r8 adcxq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi .byte 0x67 sbbq %rax,%rax xorl %ebx,%ebx movq %rax,16+8(%rsp) jmp .Lsqrx8x_loop .align 32 .Lsqrx8x_break: xorq %rbp,%rbp subq 16+8(%rsp),%rbx adcxq %rbp,%r8 movq 24+8(%rsp),%rcx adcxq %rbp,%r9 movq 0(%rsi),%rdx adcq $0,%r10 movq %r8,0(%rdi) adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 cmpq %rcx,%rdi je .Lsqrx8x_outer_loop movq %r9,8(%rdi) movq 8(%rcx),%r9 movq %r10,16(%rdi) movq 16(%rcx),%r10 movq %r11,24(%rdi) movq 24(%rcx),%r11 movq %r12,32(%rdi) movq 32(%rcx),%r12 movq %r13,40(%rdi) movq 40(%rcx),%r13 movq %r14,48(%rdi) movq 48(%rcx),%r14 movq %r15,56(%rdi) movq 56(%rcx),%r15 movq %rcx,%rdi jmp .Lsqrx8x_outer_loop .align 32 .Lsqrx8x_outer_break: movq %r9,72(%rdi) .byte 102,72,15,126,217 movq %r10,80(%rdi) movq %r11,88(%rdi) movq %r12,96(%rdi) movq %r13,104(%rdi) movq %r14,112(%rdi) leaq 48+8(%rsp),%rdi movq (%rsi,%rcx,1),%rdx movq 8(%rdi),%r11 xorq %r10,%r10 movq 0+8(%rsp),%r9 adoxq %r11,%r11 movq 16(%rdi),%r12 movq 24(%rdi),%r13 .align 32 .Lsqrx4x_shift_n_add: mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 adoxq %r13,%r13 adcxq %r11,%rbx movq 40(%rdi),%r11 movq %rax,0(%rdi) movq %rbx,8(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax movq 16(%rsi,%rcx,1),%rdx movq 48(%rdi),%r12 adoxq %r11,%r11 adcxq %r13,%rbx movq 56(%rdi),%r13 movq %rax,16(%rdi) movq %rbx,24(%rdi) mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax movq 24(%rsi,%rcx,1),%rdx leaq 32(%rcx),%rcx movq 64(%rdi),%r10 adoxq %r13,%r13 adcxq %r11,%rbx movq 72(%rdi),%r11 movq %rax,32(%rdi) movq %rbx,40(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax jrcxz .Lsqrx4x_shift_n_add_break .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 adoxq %r11,%r11 adcxq %r13,%rbx movq 80(%rdi),%r12 movq 88(%rdi),%r13 movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi nop jmp .Lsqrx4x_shift_n_add .align 32 .Lsqrx4x_shift_n_add_break: adcxq %r13,%rbx movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi .byte 102,72,15,126,213 __bn_sqrx8x_reduction: xorl %eax,%eax movq 32+8(%rsp),%rbx movq 48+8(%rsp),%rdx leaq -64(%rbp,%r9,1),%rcx movq %rcx,0+8(%rsp) movq %rdi,8+8(%rsp) leaq 48+8(%rsp),%rdi jmp .Lsqrx8x_reduction_loop .align 32 .Lsqrx8x_reduction_loop: movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq %rdx,%r8 imulq %rbx,%rdx movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,24+8(%rsp) leaq 64(%rdi),%rdi xorq %rsi,%rsi movq $-8,%rcx jmp .Lsqrx8x_reduce .align 32 .Lsqrx8x_reduce: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rbx,%rax adoxq %r9,%r8 mulxq 8(%rbp),%rbx,%r9 adcxq %rbx,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rbx,%r10 adcxq %rbx,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rbx,%r11 adcxq %rbx,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 movq %rdx,%rax movq %r8,%rdx adcxq %rbx,%r11 adoxq %r13,%r12 mulxq 32+8(%rsp),%rbx,%rdx movq %rax,%rdx movq %rax,64+48+8(%rsp,%rcx,8) mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq %rbx,%rdx adcxq %rax,%r14 adoxq %rsi,%r15 adcxq %rsi,%r15 .byte 0x67,0x67,0x67 incq %rcx jnz .Lsqrx8x_reduce movq %rsi,%rax cmpq 0+8(%rsp),%rbp jae .Lsqrx8x_no_tail movq 48+8(%rsp),%rdx addq 0(%rdi),%r8 leaq 64(%rbp),%rbp movq $-8,%rcx adcxq 8(%rdi),%r9 adcxq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp .Lsqrx8x_tail .align 32 .Lsqrx8x_tail: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq 72+48+8(%rsp,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rsi,%r15 movq %rbx,(%rdi,%rcx,8) movq %r8,%rbx adcxq %rsi,%r15 incq %rcx jnz .Lsqrx8x_tail cmpq 0+8(%rsp),%rbp jae .Lsqrx8x_tail_done subq 16+8(%rsp),%rsi movq 48+8(%rsp),%rdx leaq 64(%rbp),%rbp adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax subq $8,%rcx xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp .Lsqrx8x_tail .align 32 .Lsqrx8x_tail_done: xorq %rax,%rax addq 24+8(%rsp),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax subq 16+8(%rsp),%rsi .Lsqrx8x_no_tail: adcq 0(%rdi),%r8 .byte 102,72,15,126,217 adcq 8(%rdi),%r9 movq 56(%rbp),%rsi .byte 102,72,15,126,213 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq 32+8(%rsp),%rbx movq 64(%rdi,%rcx,1),%rdx movq %r8,0(%rdi) leaq 64(%rdi),%r8 movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi,%rcx,1),%rdi cmpq 8+8(%rsp),%r8 jb .Lsqrx8x_reduction_loop ret .cfi_endproc .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 .type __bn_postx4x_internal,@function __bn_postx4x_internal: .cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 negq %rax sarq $3+2,%rcx .byte 102,72,15,126,202 .byte 102,72,15,126,206 decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry .align 16 .Lsqrx4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 .Lsqrx4x_sub_entry: andnq %rax,%r12,%r12 leaq 32(%rbp),%rbp andnq %rax,%r13,%r13 andnq %rax,%r14,%r14 andnq %rax,%r15,%r15 negq %r8 adcq 0(%rdi),%r12 adcq 8(%rdi),%r13 adcq 16(%rdi),%r14 adcq 24(%rdi),%r15 movq %r12,0(%rdx) leaq 32(%rdi),%rdi movq %r13,8(%rdx) sbbq %r8,%r8 movq %r14,16(%rdx) movq %r15,24(%rdx) leaq 32(%rdx),%rdx incq %rcx jnz .Lsqrx4x_sub negq %r9 ret .cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_scatter5 .hidden bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: .cfi_startproc _CET_ENDBR cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx .Lscatter: movq (%rdi),%rax leaq 8(%rdi),%rdi movq %rax,(%rdx) leaq 256(%rdx),%rdx subl $1,%esi jnz .Lscatter .Lscatter_epilogue: ret .cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 .hidden bn_gather5 .type bn_gather5,@function .align 32 bn_gather5: .cfi_startproc .LSEH_begin_bn_gather5: _CET_ENDBR .byte 0x4c,0x8d,0x14,0x24 .cfi_def_cfa_register %r10 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 leaq .Linc(%rip),%rax andq $-16,%rsp movd %ecx,%xmm5 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 128(%rdx),%r11 leaq 128(%rsp),%rax pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-128(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-112(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-96(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-80(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-48(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-16(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,0(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,16(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,48(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,80(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,96(%rax) movdqa %xmm4,%xmm2 movdqa %xmm3,112(%rax) jmp .Lgather .align 32 .Lgather: pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r11),%xmm0 movdqa -112(%r11),%xmm1 movdqa -96(%r11),%xmm2 pand -128(%rax),%xmm0 movdqa -80(%r11),%xmm3 pand -112(%rax),%xmm1 por %xmm0,%xmm4 pand -96(%rax),%xmm2 por %xmm1,%xmm5 pand -80(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r11),%xmm0 movdqa -48(%r11),%xmm1 movdqa -32(%r11),%xmm2 pand -64(%rax),%xmm0 movdqa -16(%r11),%xmm3 pand -48(%rax),%xmm1 por %xmm0,%xmm4 pand -32(%rax),%xmm2 por %xmm1,%xmm5 pand -16(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r11),%xmm0 movdqa 16(%r11),%xmm1 movdqa 32(%r11),%xmm2 pand 0(%rax),%xmm0 movdqa 48(%r11),%xmm3 pand 16(%rax),%xmm1 por %xmm0,%xmm4 pand 32(%rax),%xmm2 por %xmm1,%xmm5 pand 48(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r11),%xmm0 movdqa 80(%r11),%xmm1 movdqa 96(%r11),%xmm2 pand 64(%rax),%xmm0 movdqa 112(%r11),%xmm3 pand 80(%rax),%xmm1 por %xmm0,%xmm4 pand 96(%rax),%xmm2 por %xmm1,%xmm5 pand 112(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 leaq 256(%r11),%r11 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 movq %xmm0,(%rdi) leaq 8(%rdi),%rdi subl $1,%esi jnz .Lgather leaq (%r10),%rsp .cfi_def_cfa_register %rsp ret .LSEH_end_bn_gather5: .cfi_endproc .size bn_gather5,.-bn_gather5 .section .rodata .align 64 .Linc: .long 0,0, 1,1 .long 2,2, 2,2 .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/aes128gcmsiv-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 4 one: .quad 1,0 two: .quad 2,0 three: .quad 3,0 four: .quad 4,0 five: .quad 5,0 six: .quad 6,0 seven: .quad 7,0 eight: .quad 8,0 OR_MASK: .long 0x00000000,0x00000000,0x00000000,0x80000000 poly: .quad 0x1, 0xc200000000000000 mask: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d con1: .long 1,1,1,1 con2: .long 0x1b,0x1b,0x1b,0x1b con3: .byte -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7 and_mask: .long 0,0xffffffff, 0xffffffff, 0xffffffff .text .p2align 4 GFMUL: vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm2,%xmm2 vpxor %xmm3,%xmm5,%xmm5 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 vpshufd $78,%xmm2,%xmm4 vpxor %xmm4,%xmm3,%xmm2 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 vpshufd $78,%xmm2,%xmm4 vpxor %xmm4,%xmm3,%xmm2 vpxor %xmm5,%xmm2,%xmm0 ret .globl _aesgcmsiv_htable_init .private_extern _aesgcmsiv_htable_init .p2align 4 _aesgcmsiv_htable_init: _CET_ENDBR vmovdqa (%rsi),%xmm0 vmovdqa %xmm0,%xmm1 vmovdqa %xmm0,(%rdi) call GFMUL vmovdqa %xmm0,16(%rdi) call GFMUL vmovdqa %xmm0,32(%rdi) call GFMUL vmovdqa %xmm0,48(%rdi) call GFMUL vmovdqa %xmm0,64(%rdi) call GFMUL vmovdqa %xmm0,80(%rdi) call GFMUL vmovdqa %xmm0,96(%rdi) call GFMUL vmovdqa %xmm0,112(%rdi) ret .globl _aesgcmsiv_htable6_init .private_extern _aesgcmsiv_htable6_init .p2align 4 _aesgcmsiv_htable6_init: _CET_ENDBR vmovdqa (%rsi),%xmm0 vmovdqa %xmm0,%xmm1 vmovdqa %xmm0,(%rdi) call GFMUL vmovdqa %xmm0,16(%rdi) call GFMUL vmovdqa %xmm0,32(%rdi) call GFMUL vmovdqa %xmm0,48(%rdi) call GFMUL vmovdqa %xmm0,64(%rdi) call GFMUL vmovdqa %xmm0,80(%rdi) ret .globl _aesgcmsiv_htable_polyval .private_extern _aesgcmsiv_htable_polyval .p2align 4 _aesgcmsiv_htable_polyval: _CET_ENDBR testq %rdx,%rdx jnz L$htable_polyval_start ret L$htable_polyval_start: vzeroall movq %rdx,%r11 andq $127,%r11 jz L$htable_polyval_no_prefix vpxor %xmm9,%xmm9,%xmm9 vmovdqa (%rcx),%xmm1 subq %r11,%rdx subq $16,%r11 vmovdqu (%rsi),%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm5 vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm3 vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm4 vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 leaq 16(%rsi),%rsi testq %r11,%r11 jnz L$htable_polyval_prefix_loop jmp L$htable_polyval_prefix_complete .p2align 6 L$htable_polyval_prefix_loop: subq $16,%r11 vmovdqu (%rsi),%xmm0 vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 testq %r11,%r11 leaq 16(%rsi),%rsi jnz L$htable_polyval_prefix_loop L$htable_polyval_prefix_complete: vpsrldq $8,%xmm5,%xmm6 vpslldq $8,%xmm5,%xmm5 vpxor %xmm6,%xmm4,%xmm9 vpxor %xmm5,%xmm3,%xmm1 jmp L$htable_polyval_main_loop L$htable_polyval_no_prefix: vpxor %xmm1,%xmm1,%xmm1 vmovdqa (%rcx),%xmm9 .p2align 6 L$htable_polyval_main_loop: subq $0x80,%rdx jb L$htable_polyval_out vmovdqu 112(%rsi),%xmm0 vpclmulqdq $0x01,(%rdi),%xmm0,%xmm5 vpclmulqdq $0x00,(%rdi),%xmm0,%xmm3 vpclmulqdq $0x11,(%rdi),%xmm0,%xmm4 vpclmulqdq $0x10,(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 96(%rsi),%xmm0 vpclmulqdq $0x01,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 80(%rsi),%xmm0 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 vpalignr $8,%xmm1,%xmm1,%xmm1 vpclmulqdq $0x01,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm7,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm0 vpclmulqdq $0x01,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 48(%rsi),%xmm0 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 vpalignr $8,%xmm1,%xmm1,%xmm1 vpclmulqdq $0x01,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm7,%xmm1,%xmm1 vmovdqu 32(%rsi),%xmm0 vpclmulqdq $0x01,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm9,%xmm1,%xmm1 vmovdqu 16(%rsi),%xmm0 vpclmulqdq $0x01,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 0(%rsi),%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpclmulqdq $0x01,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpsrldq $8,%xmm5,%xmm6 vpslldq $8,%xmm5,%xmm5 vpxor %xmm6,%xmm4,%xmm9 vpxor %xmm5,%xmm3,%xmm1 leaq 128(%rsi),%rsi jmp L$htable_polyval_main_loop L$htable_polyval_out: vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 vpalignr $8,%xmm1,%xmm1,%xmm1 vpxor %xmm6,%xmm1,%xmm1 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 vpalignr $8,%xmm1,%xmm1,%xmm1 vpxor %xmm6,%xmm1,%xmm1 vpxor %xmm9,%xmm1,%xmm1 vmovdqu %xmm1,(%rcx) vzeroupper ret .globl _aesgcmsiv_polyval_horner .private_extern _aesgcmsiv_polyval_horner .p2align 4 _aesgcmsiv_polyval_horner: _CET_ENDBR testq %rcx,%rcx jnz L$polyval_horner_start ret L$polyval_horner_start: xorq %r10,%r10 shlq $4,%rcx vmovdqa (%rsi),%xmm1 vmovdqa (%rdi),%xmm0 L$polyval_horner_loop: vpxor (%rdx,%r10,1),%xmm0,%xmm0 call GFMUL addq $16,%r10 cmpq %r10,%rcx jne L$polyval_horner_loop vmovdqa %xmm0,(%rdi) ret .globl _aes128gcmsiv_aes_ks .private_extern _aes128gcmsiv_aes_ks .p2align 4 _aes128gcmsiv_aes_ks: _CET_ENDBR vmovdqu (%rdi),%xmm1 vmovdqa %xmm1,(%rsi) vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 movq $8,%rax L$ks128_loop: addq $16,%rsi subq $1,%rax vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) jne L$ks128_loop vmovdqa con2(%rip),%xmm0 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,16(%rsi) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,32(%rsi) ret .globl _aes256gcmsiv_aes_ks .private_extern _aes256gcmsiv_aes_ks .p2align 4 _aes256gcmsiv_aes_ks: _CET_ENDBR vmovdqu (%rdi),%xmm1 vmovdqu 16(%rdi),%xmm3 vmovdqa %xmm1,(%rsi) vmovdqa %xmm3,16(%rsi) vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vpxor %xmm14,%xmm14,%xmm14 movq $6,%rax L$ks256_loop: addq $32,%rsi subq $1,%rax vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpsllq $32,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpshufb con3(%rip),%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vmovdqa %xmm3,16(%rsi) jne L$ks256_loop vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpsllq $32,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,32(%rsi) ret .globl _aes128gcmsiv_aes_ks_enc_x1 .private_extern _aes128gcmsiv_aes_ks_enc_x1 .p2align 4 _aes128gcmsiv_aes_ks_enc_x1: _CET_ENDBR vmovdqa (%rcx),%xmm1 vmovdqa 0(%rdi),%xmm4 vmovdqa %xmm1,(%rdx) vpxor %xmm1,%xmm4,%xmm4 vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,16(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,32(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,48(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,64(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,80(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,96(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,112(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,128(%rdx) vmovdqa con2(%rip),%xmm0 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,144(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenclast %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,160(%rdx) vmovdqa %xmm4,0(%rsi) ret .globl _aes128gcmsiv_kdf .private_extern _aes128gcmsiv_kdf .p2align 4 _aes128gcmsiv_kdf: _CET_ENDBR vmovdqa (%rdx),%xmm1 vmovdqa 0(%rdi),%xmm9 vmovdqa and_mask(%rip),%xmm12 vmovdqa one(%rip),%xmm13 vpshufd $0x90,%xmm9,%xmm9 vpand %xmm12,%xmm9,%xmm9 vpaddd %xmm13,%xmm9,%xmm10 vpaddd %xmm13,%xmm10,%xmm11 vpaddd %xmm13,%xmm11,%xmm12 vpxor %xmm1,%xmm9,%xmm9 vpxor %xmm1,%xmm10,%xmm10 vpxor %xmm1,%xmm11,%xmm11 vpxor %xmm1,%xmm12,%xmm12 vmovdqa 16(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 32(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 48(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 64(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 80(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 96(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 112(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 128(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 144(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 160(%rdx),%xmm2 vaesenclast %xmm2,%xmm9,%xmm9 vaesenclast %xmm2,%xmm10,%xmm10 vaesenclast %xmm2,%xmm11,%xmm11 vaesenclast %xmm2,%xmm12,%xmm12 vmovdqa %xmm9,0(%rsi) vmovdqa %xmm10,16(%rsi) vmovdqa %xmm11,32(%rsi) vmovdqa %xmm12,48(%rsi) ret .globl _aes128gcmsiv_enc_msg_x4 .private_extern _aes128gcmsiv_enc_msg_x4 .p2align 4 _aes128gcmsiv_enc_msg_x4: _CET_ENDBR testq %r8,%r8 jnz L$128_enc_msg_x4_start ret L$128_enc_msg_x4_start: pushq %r12 pushq %r13 shrq $4,%r8 movq %r8,%r10 shlq $62,%r10 shrq $62,%r10 vmovdqa (%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 vmovdqu four(%rip),%xmm4 vmovdqa %xmm15,%xmm0 vpaddd one(%rip),%xmm15,%xmm1 vpaddd two(%rip),%xmm15,%xmm2 vpaddd three(%rip),%xmm15,%xmm3 shrq $2,%r8 je L$128_enc_msg_x4_check_remainder subq $64,%rsi subq $64,%rdi L$128_enc_msg_x4_loop1: addq $64,%rsi addq $64,%rdi vmovdqa %xmm0,%xmm5 vmovdqa %xmm1,%xmm6 vmovdqa %xmm2,%xmm7 vmovdqa %xmm3,%xmm8 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm0,%xmm0 vmovdqu 32(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm1,%xmm1 vmovdqu 48(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm2,%xmm2 vmovdqu 64(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm3,%xmm3 vmovdqu 80(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 96(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 112(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 128(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 144(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm12 vaesenclast %xmm12,%xmm5,%xmm5 vaesenclast %xmm12,%xmm6,%xmm6 vaesenclast %xmm12,%xmm7,%xmm7 vaesenclast %xmm12,%xmm8,%xmm8 vpxor 0(%rdi),%xmm5,%xmm5 vpxor 16(%rdi),%xmm6,%xmm6 vpxor 32(%rdi),%xmm7,%xmm7 vpxor 48(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm5,0(%rsi) vmovdqu %xmm6,16(%rsi) vmovdqu %xmm7,32(%rsi) vmovdqu %xmm8,48(%rsi) jne L$128_enc_msg_x4_loop1 addq $64,%rsi addq $64,%rdi L$128_enc_msg_x4_check_remainder: cmpq $0,%r10 je L$128_enc_msg_x4_out L$128_enc_msg_x4_loop2: vmovdqa %xmm0,%xmm5 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm5,%xmm5 vaesenc 16(%rcx),%xmm5,%xmm5 vaesenc 32(%rcx),%xmm5,%xmm5 vaesenc 48(%rcx),%xmm5,%xmm5 vaesenc 64(%rcx),%xmm5,%xmm5 vaesenc 80(%rcx),%xmm5,%xmm5 vaesenc 96(%rcx),%xmm5,%xmm5 vaesenc 112(%rcx),%xmm5,%xmm5 vaesenc 128(%rcx),%xmm5,%xmm5 vaesenc 144(%rcx),%xmm5,%xmm5 vaesenclast 160(%rcx),%xmm5,%xmm5 vpxor (%rdi),%xmm5,%xmm5 vmovdqu %xmm5,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jne L$128_enc_msg_x4_loop2 L$128_enc_msg_x4_out: popq %r13 popq %r12 ret .globl _aes128gcmsiv_enc_msg_x8 .private_extern _aes128gcmsiv_enc_msg_x8 .p2align 4 _aes128gcmsiv_enc_msg_x8: _CET_ENDBR testq %r8,%r8 jnz L$128_enc_msg_x8_start ret L$128_enc_msg_x8_start: pushq %r12 pushq %r13 pushq %rbp movq %rsp,%rbp subq $128,%rsp andq $-64,%rsp shrq $4,%r8 movq %r8,%r10 shlq $61,%r10 shrq $61,%r10 vmovdqu (%rdx),%xmm1 vpor OR_MASK(%rip),%xmm1,%xmm1 vpaddd seven(%rip),%xmm1,%xmm0 vmovdqu %xmm0,(%rsp) vpaddd one(%rip),%xmm1,%xmm9 vpaddd two(%rip),%xmm1,%xmm10 vpaddd three(%rip),%xmm1,%xmm11 vpaddd four(%rip),%xmm1,%xmm12 vpaddd five(%rip),%xmm1,%xmm13 vpaddd six(%rip),%xmm1,%xmm14 vmovdqa %xmm1,%xmm0 shrq $3,%r8 je L$128_enc_msg_x8_check_remainder subq $128,%rsi subq $128,%rdi L$128_enc_msg_x8_loop1: addq $128,%rsi addq $128,%rdi vmovdqa %xmm0,%xmm1 vmovdqa %xmm9,%xmm2 vmovdqa %xmm10,%xmm3 vmovdqa %xmm11,%xmm4 vmovdqa %xmm12,%xmm5 vmovdqa %xmm13,%xmm6 vmovdqa %xmm14,%xmm7 vmovdqu (%rsp),%xmm8 vpxor (%rcx),%xmm1,%xmm1 vpxor (%rcx),%xmm2,%xmm2 vpxor (%rcx),%xmm3,%xmm3 vpxor (%rcx),%xmm4,%xmm4 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu (%rsp),%xmm14 vpaddd eight(%rip),%xmm14,%xmm14 vmovdqu %xmm14,(%rsp) vmovdqu 32(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpsubd one(%rip),%xmm14,%xmm14 vmovdqu 48(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm0,%xmm0 vmovdqu 64(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm9,%xmm9 vmovdqu 80(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm10,%xmm10 vmovdqu 96(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm11,%xmm11 vmovdqu 112(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm12,%xmm12 vmovdqu 128(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm13,%xmm13 vmovdqu 144(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm15 vaesenclast %xmm15,%xmm1,%xmm1 vaesenclast %xmm15,%xmm2,%xmm2 vaesenclast %xmm15,%xmm3,%xmm3 vaesenclast %xmm15,%xmm4,%xmm4 vaesenclast %xmm15,%xmm5,%xmm5 vaesenclast %xmm15,%xmm6,%xmm6 vaesenclast %xmm15,%xmm7,%xmm7 vaesenclast %xmm15,%xmm8,%xmm8 vpxor 0(%rdi),%xmm1,%xmm1 vpxor 16(%rdi),%xmm2,%xmm2 vpxor 32(%rdi),%xmm3,%xmm3 vpxor 48(%rdi),%xmm4,%xmm4 vpxor 64(%rdi),%xmm5,%xmm5 vpxor 80(%rdi),%xmm6,%xmm6 vpxor 96(%rdi),%xmm7,%xmm7 vpxor 112(%rdi),%xmm8,%xmm8 decq %r8 vmovdqu %xmm1,0(%rsi) vmovdqu %xmm2,16(%rsi) vmovdqu %xmm3,32(%rsi) vmovdqu %xmm4,48(%rsi) vmovdqu %xmm5,64(%rsi) vmovdqu %xmm6,80(%rsi) vmovdqu %xmm7,96(%rsi) vmovdqu %xmm8,112(%rsi) jne L$128_enc_msg_x8_loop1 addq $128,%rsi addq $128,%rdi L$128_enc_msg_x8_check_remainder: cmpq $0,%r10 je L$128_enc_msg_x8_out L$128_enc_msg_x8_loop2: vmovdqa %xmm0,%xmm1 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm1,%xmm1 vaesenc 16(%rcx),%xmm1,%xmm1 vaesenc 32(%rcx),%xmm1,%xmm1 vaesenc 48(%rcx),%xmm1,%xmm1 vaesenc 64(%rcx),%xmm1,%xmm1 vaesenc 80(%rcx),%xmm1,%xmm1 vaesenc 96(%rcx),%xmm1,%xmm1 vaesenc 112(%rcx),%xmm1,%xmm1 vaesenc 128(%rcx),%xmm1,%xmm1 vaesenc 144(%rcx),%xmm1,%xmm1 vaesenclast 160(%rcx),%xmm1,%xmm1 vpxor (%rdi),%xmm1,%xmm1 vmovdqu %xmm1,(%rsi) addq $16,%rdi addq $16,%rsi decq %r10 jne L$128_enc_msg_x8_loop2 L$128_enc_msg_x8_out: movq %rbp,%rsp popq %rbp popq %r13 popq %r12 ret .globl _aes128gcmsiv_dec .private_extern _aes128gcmsiv_dec .p2align 4 _aes128gcmsiv_dec: _CET_ENDBR testq $~15,%r9 jnz L$128_dec_start ret L$128_dec_start: vzeroupper vmovdqa (%rdx),%xmm0 vmovdqu 16(%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 movq %rdx,%rax leaq 32(%rax),%rax leaq 32(%rcx),%rcx andq $~15,%r9 cmpq $96,%r9 jb L$128_dec_loop2 subq $96,%r9 vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vpxor (%r8),%xmm7,%xmm7 vpxor (%r8),%xmm8,%xmm8 vpxor (%r8),%xmm9,%xmm9 vpxor (%r8),%xmm10,%xmm10 vpxor (%r8),%xmm11,%xmm11 vpxor (%r8),%xmm12,%xmm12 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vaesenclast %xmm4,%xmm8,%xmm8 vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm4,%xmm10,%xmm10 vaesenclast %xmm4,%xmm11,%xmm11 vaesenclast %xmm4,%xmm12,%xmm12 vpxor 0(%rdi),%xmm7,%xmm7 vpxor 16(%rdi),%xmm8,%xmm8 vpxor 32(%rdi),%xmm9,%xmm9 vpxor 48(%rdi),%xmm10,%xmm10 vpxor 64(%rdi),%xmm11,%xmm11 vpxor 80(%rdi),%xmm12,%xmm12 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) addq $96,%rdi addq $96,%rsi jmp L$128_dec_loop1 .p2align 6 L$128_dec_loop1: cmpq $96,%r9 jb L$128_dec_finish_96 subq $96,%r9 vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vmovdqa (%r8),%xmm4 vpxor %xmm4,%xmm7,%xmm7 vpxor %xmm4,%xmm8,%xmm8 vpxor %xmm4,%xmm9,%xmm9 vpxor %xmm4,%xmm10,%xmm10 vpxor %xmm4,%xmm11,%xmm11 vpxor %xmm4,%xmm12,%xmm12 vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqa 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm6 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor 0(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vpxor 16(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm8,%xmm8 vpxor 32(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm9,%xmm9 vpxor 48(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm10,%xmm10 vpxor 64(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm11,%xmm11 vpxor 80(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm12,%xmm12 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) vpxor %xmm5,%xmm0,%xmm0 leaq 96(%rdi),%rdi leaq 96(%rsi),%rsi jmp L$128_dec_loop1 L$128_dec_finish_96: vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor %xmm5,%xmm0,%xmm0 L$128_dec_loop2: cmpq $16,%r9 jb L$128_dec_out subq $16,%r9 vmovdqa %xmm15,%xmm2 vpaddd one(%rip),%xmm15,%xmm15 vpxor 0(%r8),%xmm2,%xmm2 vaesenc 16(%r8),%xmm2,%xmm2 vaesenc 32(%r8),%xmm2,%xmm2 vaesenc 48(%r8),%xmm2,%xmm2 vaesenc 64(%r8),%xmm2,%xmm2 vaesenc 80(%r8),%xmm2,%xmm2 vaesenc 96(%r8),%xmm2,%xmm2 vaesenc 112(%r8),%xmm2,%xmm2 vaesenc 128(%r8),%xmm2,%xmm2 vaesenc 144(%r8),%xmm2,%xmm2 vaesenclast 160(%r8),%xmm2,%xmm2 vpxor (%rdi),%xmm2,%xmm2 vmovdqu %xmm2,(%rsi) addq $16,%rdi addq $16,%rsi vpxor %xmm2,%xmm0,%xmm0 vmovdqa -32(%rcx),%xmm1 call GFMUL jmp L$128_dec_loop2 L$128_dec_out: vmovdqu %xmm0,(%rdx) ret .globl _aes128gcmsiv_ecb_enc_block .private_extern _aes128gcmsiv_ecb_enc_block .p2align 4 _aes128gcmsiv_ecb_enc_block: _CET_ENDBR vmovdqa (%rdi),%xmm1 vpxor (%rdx),%xmm1,%xmm1 vaesenc 16(%rdx),%xmm1,%xmm1 vaesenc 32(%rdx),%xmm1,%xmm1 vaesenc 48(%rdx),%xmm1,%xmm1 vaesenc 64(%rdx),%xmm1,%xmm1 vaesenc 80(%rdx),%xmm1,%xmm1 vaesenc 96(%rdx),%xmm1,%xmm1 vaesenc 112(%rdx),%xmm1,%xmm1 vaesenc 128(%rdx),%xmm1,%xmm1 vaesenc 144(%rdx),%xmm1,%xmm1 vaesenclast 160(%rdx),%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) ret .globl _aes256gcmsiv_aes_ks_enc_x1 .private_extern _aes256gcmsiv_aes_ks_enc_x1 .p2align 4 _aes256gcmsiv_aes_ks_enc_x1: _CET_ENDBR vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vmovdqa (%rdi),%xmm8 vmovdqa (%rcx),%xmm1 vmovdqa 16(%rcx),%xmm3 vpxor %xmm1,%xmm8,%xmm8 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm1,(%rdx) vmovdqu %xmm3,16(%rdx) vpxor %xmm14,%xmm14,%xmm14 vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,32(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,48(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,64(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,80(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,96(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,112(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,128(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,144(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,160(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,176(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,192(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,208(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenclast %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,224(%rdx) vmovdqa %xmm8,(%rsi) ret .globl _aes256gcmsiv_ecb_enc_block .private_extern _aes256gcmsiv_ecb_enc_block .p2align 4 _aes256gcmsiv_ecb_enc_block: _CET_ENDBR vmovdqa (%rdi),%xmm1 vpxor (%rdx),%xmm1,%xmm1 vaesenc 16(%rdx),%xmm1,%xmm1 vaesenc 32(%rdx),%xmm1,%xmm1 vaesenc 48(%rdx),%xmm1,%xmm1 vaesenc 64(%rdx),%xmm1,%xmm1 vaesenc 80(%rdx),%xmm1,%xmm1 vaesenc 96(%rdx),%xmm1,%xmm1 vaesenc 112(%rdx),%xmm1,%xmm1 vaesenc 128(%rdx),%xmm1,%xmm1 vaesenc 144(%rdx),%xmm1,%xmm1 vaesenc 160(%rdx),%xmm1,%xmm1 vaesenc 176(%rdx),%xmm1,%xmm1 vaesenc 192(%rdx),%xmm1,%xmm1 vaesenc 208(%rdx),%xmm1,%xmm1 vaesenclast 224(%rdx),%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) ret .globl _aes256gcmsiv_enc_msg_x4 .private_extern _aes256gcmsiv_enc_msg_x4 .p2align 4 _aes256gcmsiv_enc_msg_x4: _CET_ENDBR testq %r8,%r8 jnz L$256_enc_msg_x4_start ret L$256_enc_msg_x4_start: movq %r8,%r10 shrq $4,%r8 shlq $60,%r10 jz L$256_enc_msg_x4_start2 addq $1,%r8 L$256_enc_msg_x4_start2: movq %r8,%r10 shlq $62,%r10 shrq $62,%r10 vmovdqa (%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 vmovdqa four(%rip),%xmm4 vmovdqa %xmm15,%xmm0 vpaddd one(%rip),%xmm15,%xmm1 vpaddd two(%rip),%xmm15,%xmm2 vpaddd three(%rip),%xmm15,%xmm3 shrq $2,%r8 je L$256_enc_msg_x4_check_remainder subq $64,%rsi subq $64,%rdi L$256_enc_msg_x4_loop1: addq $64,%rsi addq $64,%rdi vmovdqa %xmm0,%xmm5 vmovdqa %xmm1,%xmm6 vmovdqa %xmm2,%xmm7 vmovdqa %xmm3,%xmm8 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm0,%xmm0 vmovdqu 32(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm1,%xmm1 vmovdqu 48(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm2,%xmm2 vmovdqu 64(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm3,%xmm3 vmovdqu 80(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 96(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 112(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 128(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 144(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 176(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 192(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 208(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 224(%rcx),%xmm12 vaesenclast %xmm12,%xmm5,%xmm5 vaesenclast %xmm12,%xmm6,%xmm6 vaesenclast %xmm12,%xmm7,%xmm7 vaesenclast %xmm12,%xmm8,%xmm8 vpxor 0(%rdi),%xmm5,%xmm5 vpxor 16(%rdi),%xmm6,%xmm6 vpxor 32(%rdi),%xmm7,%xmm7 vpxor 48(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm5,0(%rsi) vmovdqu %xmm6,16(%rsi) vmovdqu %xmm7,32(%rsi) vmovdqu %xmm8,48(%rsi) jne L$256_enc_msg_x4_loop1 addq $64,%rsi addq $64,%rdi L$256_enc_msg_x4_check_remainder: cmpq $0,%r10 je L$256_enc_msg_x4_out L$256_enc_msg_x4_loop2: vmovdqa %xmm0,%xmm5 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm5,%xmm5 vaesenc 16(%rcx),%xmm5,%xmm5 vaesenc 32(%rcx),%xmm5,%xmm5 vaesenc 48(%rcx),%xmm5,%xmm5 vaesenc 64(%rcx),%xmm5,%xmm5 vaesenc 80(%rcx),%xmm5,%xmm5 vaesenc 96(%rcx),%xmm5,%xmm5 vaesenc 112(%rcx),%xmm5,%xmm5 vaesenc 128(%rcx),%xmm5,%xmm5 vaesenc 144(%rcx),%xmm5,%xmm5 vaesenc 160(%rcx),%xmm5,%xmm5 vaesenc 176(%rcx),%xmm5,%xmm5 vaesenc 192(%rcx),%xmm5,%xmm5 vaesenc 208(%rcx),%xmm5,%xmm5 vaesenclast 224(%rcx),%xmm5,%xmm5 vpxor (%rdi),%xmm5,%xmm5 vmovdqu %xmm5,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jne L$256_enc_msg_x4_loop2 L$256_enc_msg_x4_out: ret .globl _aes256gcmsiv_enc_msg_x8 .private_extern _aes256gcmsiv_enc_msg_x8 .p2align 4 _aes256gcmsiv_enc_msg_x8: _CET_ENDBR testq %r8,%r8 jnz L$256_enc_msg_x8_start ret L$256_enc_msg_x8_start: movq %rsp,%r11 subq $16,%r11 andq $-64,%r11 movq %r8,%r10 shrq $4,%r8 shlq $60,%r10 jz L$256_enc_msg_x8_start2 addq $1,%r8 L$256_enc_msg_x8_start2: movq %r8,%r10 shlq $61,%r10 shrq $61,%r10 vmovdqa (%rdx),%xmm1 vpor OR_MASK(%rip),%xmm1,%xmm1 vpaddd seven(%rip),%xmm1,%xmm0 vmovdqa %xmm0,(%r11) vpaddd one(%rip),%xmm1,%xmm9 vpaddd two(%rip),%xmm1,%xmm10 vpaddd three(%rip),%xmm1,%xmm11 vpaddd four(%rip),%xmm1,%xmm12 vpaddd five(%rip),%xmm1,%xmm13 vpaddd six(%rip),%xmm1,%xmm14 vmovdqa %xmm1,%xmm0 shrq $3,%r8 jz L$256_enc_msg_x8_check_remainder subq $128,%rsi subq $128,%rdi L$256_enc_msg_x8_loop1: addq $128,%rsi addq $128,%rdi vmovdqa %xmm0,%xmm1 vmovdqa %xmm9,%xmm2 vmovdqa %xmm10,%xmm3 vmovdqa %xmm11,%xmm4 vmovdqa %xmm12,%xmm5 vmovdqa %xmm13,%xmm6 vmovdqa %xmm14,%xmm7 vmovdqa (%r11),%xmm8 vpxor (%rcx),%xmm1,%xmm1 vpxor (%rcx),%xmm2,%xmm2 vpxor (%rcx),%xmm3,%xmm3 vpxor (%rcx),%xmm4,%xmm4 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqa (%r11),%xmm14 vpaddd eight(%rip),%xmm14,%xmm14 vmovdqa %xmm14,(%r11) vmovdqu 32(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpsubd one(%rip),%xmm14,%xmm14 vmovdqu 48(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm0,%xmm0 vmovdqu 64(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm9,%xmm9 vmovdqu 80(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm10,%xmm10 vmovdqu 96(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm11,%xmm11 vmovdqu 112(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm12,%xmm12 vmovdqu 128(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm13,%xmm13 vmovdqu 144(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 176(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 192(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 208(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 224(%rcx),%xmm15 vaesenclast %xmm15,%xmm1,%xmm1 vaesenclast %xmm15,%xmm2,%xmm2 vaesenclast %xmm15,%xmm3,%xmm3 vaesenclast %xmm15,%xmm4,%xmm4 vaesenclast %xmm15,%xmm5,%xmm5 vaesenclast %xmm15,%xmm6,%xmm6 vaesenclast %xmm15,%xmm7,%xmm7 vaesenclast %xmm15,%xmm8,%xmm8 vpxor 0(%rdi),%xmm1,%xmm1 vpxor 16(%rdi),%xmm2,%xmm2 vpxor 32(%rdi),%xmm3,%xmm3 vpxor 48(%rdi),%xmm4,%xmm4 vpxor 64(%rdi),%xmm5,%xmm5 vpxor 80(%rdi),%xmm6,%xmm6 vpxor 96(%rdi),%xmm7,%xmm7 vpxor 112(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm1,0(%rsi) vmovdqu %xmm2,16(%rsi) vmovdqu %xmm3,32(%rsi) vmovdqu %xmm4,48(%rsi) vmovdqu %xmm5,64(%rsi) vmovdqu %xmm6,80(%rsi) vmovdqu %xmm7,96(%rsi) vmovdqu %xmm8,112(%rsi) jne L$256_enc_msg_x8_loop1 addq $128,%rsi addq $128,%rdi L$256_enc_msg_x8_check_remainder: cmpq $0,%r10 je L$256_enc_msg_x8_out L$256_enc_msg_x8_loop2: vmovdqa %xmm0,%xmm1 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm1,%xmm1 vaesenc 16(%rcx),%xmm1,%xmm1 vaesenc 32(%rcx),%xmm1,%xmm1 vaesenc 48(%rcx),%xmm1,%xmm1 vaesenc 64(%rcx),%xmm1,%xmm1 vaesenc 80(%rcx),%xmm1,%xmm1 vaesenc 96(%rcx),%xmm1,%xmm1 vaesenc 112(%rcx),%xmm1,%xmm1 vaesenc 128(%rcx),%xmm1,%xmm1 vaesenc 144(%rcx),%xmm1,%xmm1 vaesenc 160(%rcx),%xmm1,%xmm1 vaesenc 176(%rcx),%xmm1,%xmm1 vaesenc 192(%rcx),%xmm1,%xmm1 vaesenc 208(%rcx),%xmm1,%xmm1 vaesenclast 224(%rcx),%xmm1,%xmm1 vpxor (%rdi),%xmm1,%xmm1 vmovdqu %xmm1,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jnz L$256_enc_msg_x8_loop2 L$256_enc_msg_x8_out: ret .globl _aes256gcmsiv_dec .private_extern _aes256gcmsiv_dec .p2align 4 _aes256gcmsiv_dec: _CET_ENDBR testq $~15,%r9 jnz L$256_dec_start ret L$256_dec_start: vzeroupper vmovdqa (%rdx),%xmm0 vmovdqu 16(%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 movq %rdx,%rax leaq 32(%rax),%rax leaq 32(%rcx),%rcx andq $~15,%r9 cmpq $96,%r9 jb L$256_dec_loop2 subq $96,%r9 vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vpxor (%r8),%xmm7,%xmm7 vpxor (%r8),%xmm8,%xmm8 vpxor (%r8),%xmm9,%xmm9 vpxor (%r8),%xmm10,%xmm10 vpxor (%r8),%xmm11,%xmm11 vpxor (%r8),%xmm12,%xmm12 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 176(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 192(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 208(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 224(%r8),%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vaesenclast %xmm4,%xmm8,%xmm8 vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm4,%xmm10,%xmm10 vaesenclast %xmm4,%xmm11,%xmm11 vaesenclast %xmm4,%xmm12,%xmm12 vpxor 0(%rdi),%xmm7,%xmm7 vpxor 16(%rdi),%xmm8,%xmm8 vpxor 32(%rdi),%xmm9,%xmm9 vpxor 48(%rdi),%xmm10,%xmm10 vpxor 64(%rdi),%xmm11,%xmm11 vpxor 80(%rdi),%xmm12,%xmm12 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) addq $96,%rdi addq $96,%rsi jmp L$256_dec_loop1 .p2align 6 L$256_dec_loop1: cmpq $96,%r9 jb L$256_dec_finish_96 subq $96,%r9 vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vmovdqa (%r8),%xmm4 vpxor %xmm4,%xmm7,%xmm7 vpxor %xmm4,%xmm8,%xmm8 vpxor %xmm4,%xmm9,%xmm9 vpxor %xmm4,%xmm10,%xmm10 vpxor %xmm4,%xmm11,%xmm11 vpxor %xmm4,%xmm12,%xmm12 vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqa 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 176(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 192(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 208(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 224(%r8),%xmm6 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor 0(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vpxor 16(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm8,%xmm8 vpxor 32(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm9,%xmm9 vpxor 48(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm10,%xmm10 vpxor 64(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm11,%xmm11 vpxor 80(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm12,%xmm12 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) vpxor %xmm5,%xmm0,%xmm0 leaq 96(%rdi),%rdi leaq 96(%rsi),%rsi jmp L$256_dec_loop1 L$256_dec_finish_96: vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor %xmm5,%xmm0,%xmm0 L$256_dec_loop2: cmpq $16,%r9 jb L$256_dec_out subq $16,%r9 vmovdqa %xmm15,%xmm2 vpaddd one(%rip),%xmm15,%xmm15 vpxor 0(%r8),%xmm2,%xmm2 vaesenc 16(%r8),%xmm2,%xmm2 vaesenc 32(%r8),%xmm2,%xmm2 vaesenc 48(%r8),%xmm2,%xmm2 vaesenc 64(%r8),%xmm2,%xmm2 vaesenc 80(%r8),%xmm2,%xmm2 vaesenc 96(%r8),%xmm2,%xmm2 vaesenc 112(%r8),%xmm2,%xmm2 vaesenc 128(%r8),%xmm2,%xmm2 vaesenc 144(%r8),%xmm2,%xmm2 vaesenc 160(%r8),%xmm2,%xmm2 vaesenc 176(%r8),%xmm2,%xmm2 vaesenc 192(%r8),%xmm2,%xmm2 vaesenc 208(%r8),%xmm2,%xmm2 vaesenclast 224(%r8),%xmm2,%xmm2 vpxor (%rdi),%xmm2,%xmm2 vmovdqu %xmm2,(%rsi) addq $16,%rdi addq $16,%rsi vpxor %xmm2,%xmm0,%xmm0 vmovdqa -32(%rcx),%xmm1 call GFMUL jmp L$256_dec_loop2 L$256_dec_out: vmovdqu %xmm0,(%rdx) ret .globl _aes256gcmsiv_kdf .private_extern _aes256gcmsiv_kdf .p2align 4 _aes256gcmsiv_kdf: _CET_ENDBR vmovdqa (%rdx),%xmm1 vmovdqa 0(%rdi),%xmm4 vmovdqa and_mask(%rip),%xmm11 vmovdqa one(%rip),%xmm8 vpshufd $0x90,%xmm4,%xmm4 vpand %xmm11,%xmm4,%xmm4 vpaddd %xmm8,%xmm4,%xmm6 vpaddd %xmm8,%xmm6,%xmm7 vpaddd %xmm8,%xmm7,%xmm11 vpaddd %xmm8,%xmm11,%xmm12 vpaddd %xmm8,%xmm12,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm1,%xmm6,%xmm6 vpxor %xmm1,%xmm7,%xmm7 vpxor %xmm1,%xmm11,%xmm11 vpxor %xmm1,%xmm12,%xmm12 vpxor %xmm1,%xmm13,%xmm13 vmovdqa 16(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 32(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 48(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 64(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 80(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 96(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 112(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 128(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 144(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 160(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 176(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 192(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 208(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 224(%rdx),%xmm2 vaesenclast %xmm2,%xmm4,%xmm4 vaesenclast %xmm2,%xmm6,%xmm6 vaesenclast %xmm2,%xmm7,%xmm7 vaesenclast %xmm2,%xmm11,%xmm11 vaesenclast %xmm2,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vmovdqa %xmm4,0(%rsi) vmovdqa %xmm6,16(%rsi) vmovdqa %xmm7,32(%rsi) vmovdqa %xmm11,48(%rsi) vmovdqa %xmm12,64(%rsi) vmovdqa %xmm13,80(%rsi) ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/aes128gcmsiv-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .section .rodata .align 16 one: .quad 1,0 two: .quad 2,0 three: .quad 3,0 four: .quad 4,0 five: .quad 5,0 six: .quad 6,0 seven: .quad 7,0 eight: .quad 8,0 OR_MASK: .long 0x00000000,0x00000000,0x00000000,0x80000000 poly: .quad 0x1, 0xc200000000000000 mask: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d con1: .long 1,1,1,1 con2: .long 0x1b,0x1b,0x1b,0x1b con3: .byte -1,-1,-1,-1,-1,-1,-1,-1,4,5,6,7,4,5,6,7 and_mask: .long 0,0xffffffff, 0xffffffff, 0xffffffff .text .type GFMUL,@function .align 16 GFMUL: .cfi_startproc vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm2,%xmm2 vpxor %xmm3,%xmm5,%xmm5 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 vpshufd $78,%xmm2,%xmm4 vpxor %xmm4,%xmm3,%xmm2 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3 vpshufd $78,%xmm2,%xmm4 vpxor %xmm4,%xmm3,%xmm2 vpxor %xmm5,%xmm2,%xmm0 ret .cfi_endproc .size GFMUL, .-GFMUL .globl aesgcmsiv_htable_init .hidden aesgcmsiv_htable_init .type aesgcmsiv_htable_init,@function .align 16 aesgcmsiv_htable_init: .cfi_startproc _CET_ENDBR vmovdqa (%rsi),%xmm0 vmovdqa %xmm0,%xmm1 vmovdqa %xmm0,(%rdi) call GFMUL vmovdqa %xmm0,16(%rdi) call GFMUL vmovdqa %xmm0,32(%rdi) call GFMUL vmovdqa %xmm0,48(%rdi) call GFMUL vmovdqa %xmm0,64(%rdi) call GFMUL vmovdqa %xmm0,80(%rdi) call GFMUL vmovdqa %xmm0,96(%rdi) call GFMUL vmovdqa %xmm0,112(%rdi) ret .cfi_endproc .size aesgcmsiv_htable_init, .-aesgcmsiv_htable_init .globl aesgcmsiv_htable6_init .hidden aesgcmsiv_htable6_init .type aesgcmsiv_htable6_init,@function .align 16 aesgcmsiv_htable6_init: .cfi_startproc _CET_ENDBR vmovdqa (%rsi),%xmm0 vmovdqa %xmm0,%xmm1 vmovdqa %xmm0,(%rdi) call GFMUL vmovdqa %xmm0,16(%rdi) call GFMUL vmovdqa %xmm0,32(%rdi) call GFMUL vmovdqa %xmm0,48(%rdi) call GFMUL vmovdqa %xmm0,64(%rdi) call GFMUL vmovdqa %xmm0,80(%rdi) ret .cfi_endproc .size aesgcmsiv_htable6_init, .-aesgcmsiv_htable6_init .globl aesgcmsiv_htable_polyval .hidden aesgcmsiv_htable_polyval .type aesgcmsiv_htable_polyval,@function .align 16 aesgcmsiv_htable_polyval: .cfi_startproc _CET_ENDBR testq %rdx,%rdx jnz .Lhtable_polyval_start ret .Lhtable_polyval_start: vzeroall movq %rdx,%r11 andq $127,%r11 jz .Lhtable_polyval_no_prefix vpxor %xmm9,%xmm9,%xmm9 vmovdqa (%rcx),%xmm1 subq %r11,%rdx subq $16,%r11 vmovdqu (%rsi),%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm5 vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm3 vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm4 vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 leaq 16(%rsi),%rsi testq %r11,%r11 jnz .Lhtable_polyval_prefix_loop jmp .Lhtable_polyval_prefix_complete .align 64 .Lhtable_polyval_prefix_loop: subq $16,%r11 vmovdqu (%rsi),%xmm0 vpclmulqdq $0x00,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x01,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x10,(%rdi,%r11,1),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 testq %r11,%r11 leaq 16(%rsi),%rsi jnz .Lhtable_polyval_prefix_loop .Lhtable_polyval_prefix_complete: vpsrldq $8,%xmm5,%xmm6 vpslldq $8,%xmm5,%xmm5 vpxor %xmm6,%xmm4,%xmm9 vpxor %xmm5,%xmm3,%xmm1 jmp .Lhtable_polyval_main_loop .Lhtable_polyval_no_prefix: vpxor %xmm1,%xmm1,%xmm1 vmovdqa (%rcx),%xmm9 .align 64 .Lhtable_polyval_main_loop: subq $0x80,%rdx jb .Lhtable_polyval_out vmovdqu 112(%rsi),%xmm0 vpclmulqdq $0x01,(%rdi),%xmm0,%xmm5 vpclmulqdq $0x00,(%rdi),%xmm0,%xmm3 vpclmulqdq $0x11,(%rdi),%xmm0,%xmm4 vpclmulqdq $0x10,(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 96(%rsi),%xmm0 vpclmulqdq $0x01,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,16(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 80(%rsi),%xmm0 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 vpalignr $8,%xmm1,%xmm1,%xmm1 vpclmulqdq $0x01,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,32(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm7,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm0 vpclmulqdq $0x01,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,48(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 48(%rsi),%xmm0 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm7 vpalignr $8,%xmm1,%xmm1,%xmm1 vpclmulqdq $0x01,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,64(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm7,%xmm1,%xmm1 vmovdqu 32(%rsi),%xmm0 vpclmulqdq $0x01,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,80(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpxor %xmm9,%xmm1,%xmm1 vmovdqu 16(%rsi),%xmm0 vpclmulqdq $0x01,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,96(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vmovdqu 0(%rsi),%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpclmulqdq $0x01,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x00,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm3,%xmm3 vpclmulqdq $0x11,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm4,%xmm4 vpclmulqdq $0x10,112(%rdi),%xmm0,%xmm6 vpxor %xmm6,%xmm5,%xmm5 vpsrldq $8,%xmm5,%xmm6 vpslldq $8,%xmm5,%xmm5 vpxor %xmm6,%xmm4,%xmm9 vpxor %xmm5,%xmm3,%xmm1 leaq 128(%rsi),%rsi jmp .Lhtable_polyval_main_loop .Lhtable_polyval_out: vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 vpalignr $8,%xmm1,%xmm1,%xmm1 vpxor %xmm6,%xmm1,%xmm1 vpclmulqdq $0x10,poly(%rip),%xmm1,%xmm6 vpalignr $8,%xmm1,%xmm1,%xmm1 vpxor %xmm6,%xmm1,%xmm1 vpxor %xmm9,%xmm1,%xmm1 vmovdqu %xmm1,(%rcx) vzeroupper ret .cfi_endproc .size aesgcmsiv_htable_polyval,.-aesgcmsiv_htable_polyval .globl aesgcmsiv_polyval_horner .hidden aesgcmsiv_polyval_horner .type aesgcmsiv_polyval_horner,@function .align 16 aesgcmsiv_polyval_horner: .cfi_startproc _CET_ENDBR testq %rcx,%rcx jnz .Lpolyval_horner_start ret .Lpolyval_horner_start: xorq %r10,%r10 shlq $4,%rcx vmovdqa (%rsi),%xmm1 vmovdqa (%rdi),%xmm0 .Lpolyval_horner_loop: vpxor (%rdx,%r10,1),%xmm0,%xmm0 call GFMUL addq $16,%r10 cmpq %r10,%rcx jne .Lpolyval_horner_loop vmovdqa %xmm0,(%rdi) ret .cfi_endproc .size aesgcmsiv_polyval_horner,.-aesgcmsiv_polyval_horner .globl aes128gcmsiv_aes_ks .hidden aes128gcmsiv_aes_ks .type aes128gcmsiv_aes_ks,@function .align 16 aes128gcmsiv_aes_ks: .cfi_startproc _CET_ENDBR vmovdqu (%rdi),%xmm1 vmovdqa %xmm1,(%rsi) vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 movq $8,%rax .Lks128_loop: addq $16,%rsi subq $1,%rax vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) jne .Lks128_loop vmovdqa con2(%rip),%xmm0 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,16(%rsi) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslldq $4,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpslldq $4,%xmm3,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,32(%rsi) ret .cfi_endproc .size aes128gcmsiv_aes_ks,.-aes128gcmsiv_aes_ks .globl aes256gcmsiv_aes_ks .hidden aes256gcmsiv_aes_ks .type aes256gcmsiv_aes_ks,@function .align 16 aes256gcmsiv_aes_ks: .cfi_startproc _CET_ENDBR vmovdqu (%rdi),%xmm1 vmovdqu 16(%rdi),%xmm3 vmovdqa %xmm1,(%rsi) vmovdqa %xmm3,16(%rsi) vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vpxor %xmm14,%xmm14,%xmm14 movq $6,%rax .Lks256_loop: addq $32,%rsi subq $1,%rax vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpsllq $32,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpshufb con3(%rip),%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vmovdqa %xmm3,16(%rsi) jne .Lks256_loop vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpsllq $32,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm1,32(%rsi) ret .cfi_endproc .globl aes128gcmsiv_aes_ks_enc_x1 .hidden aes128gcmsiv_aes_ks_enc_x1 .type aes128gcmsiv_aes_ks_enc_x1,@function .align 16 aes128gcmsiv_aes_ks_enc_x1: .cfi_startproc _CET_ENDBR vmovdqa (%rcx),%xmm1 vmovdqa 0(%rdi),%xmm4 vmovdqa %xmm1,(%rdx) vpxor %xmm1,%xmm4,%xmm4 vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,16(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,32(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,48(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,64(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,80(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,96(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,112(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,128(%rdx) vmovdqa con2(%rip),%xmm0 vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,144(%rdx) vpshufb %xmm15,%xmm1,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpsllq $32,%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpshufb con3(%rip),%xmm1,%xmm3 vpxor %xmm3,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenclast %xmm1,%xmm4,%xmm4 vmovdqa %xmm1,160(%rdx) vmovdqa %xmm4,0(%rsi) ret .cfi_endproc .size aes128gcmsiv_aes_ks_enc_x1,.-aes128gcmsiv_aes_ks_enc_x1 .globl aes128gcmsiv_kdf .hidden aes128gcmsiv_kdf .type aes128gcmsiv_kdf,@function .align 16 aes128gcmsiv_kdf: .cfi_startproc _CET_ENDBR vmovdqa (%rdx),%xmm1 vmovdqa 0(%rdi),%xmm9 vmovdqa and_mask(%rip),%xmm12 vmovdqa one(%rip),%xmm13 vpshufd $0x90,%xmm9,%xmm9 vpand %xmm12,%xmm9,%xmm9 vpaddd %xmm13,%xmm9,%xmm10 vpaddd %xmm13,%xmm10,%xmm11 vpaddd %xmm13,%xmm11,%xmm12 vpxor %xmm1,%xmm9,%xmm9 vpxor %xmm1,%xmm10,%xmm10 vpxor %xmm1,%xmm11,%xmm11 vpxor %xmm1,%xmm12,%xmm12 vmovdqa 16(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 32(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 48(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 64(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 80(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 96(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 112(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 128(%rdx),%xmm2 vaesenc %xmm2,%xmm9,%xmm9 vaesenc %xmm2,%xmm10,%xmm10 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vmovdqa 144(%rdx),%xmm1 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vmovdqa 160(%rdx),%xmm2 vaesenclast %xmm2,%xmm9,%xmm9 vaesenclast %xmm2,%xmm10,%xmm10 vaesenclast %xmm2,%xmm11,%xmm11 vaesenclast %xmm2,%xmm12,%xmm12 vmovdqa %xmm9,0(%rsi) vmovdqa %xmm10,16(%rsi) vmovdqa %xmm11,32(%rsi) vmovdqa %xmm12,48(%rsi) ret .cfi_endproc .size aes128gcmsiv_kdf,.-aes128gcmsiv_kdf .globl aes128gcmsiv_enc_msg_x4 .hidden aes128gcmsiv_enc_msg_x4 .type aes128gcmsiv_enc_msg_x4,@function .align 16 aes128gcmsiv_enc_msg_x4: .cfi_startproc _CET_ENDBR testq %r8,%r8 jnz .L128_enc_msg_x4_start ret .L128_enc_msg_x4_start: pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 shrq $4,%r8 movq %r8,%r10 shlq $62,%r10 shrq $62,%r10 vmovdqa (%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 vmovdqu four(%rip),%xmm4 vmovdqa %xmm15,%xmm0 vpaddd one(%rip),%xmm15,%xmm1 vpaddd two(%rip),%xmm15,%xmm2 vpaddd three(%rip),%xmm15,%xmm3 shrq $2,%r8 je .L128_enc_msg_x4_check_remainder subq $64,%rsi subq $64,%rdi .L128_enc_msg_x4_loop1: addq $64,%rsi addq $64,%rdi vmovdqa %xmm0,%xmm5 vmovdqa %xmm1,%xmm6 vmovdqa %xmm2,%xmm7 vmovdqa %xmm3,%xmm8 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm0,%xmm0 vmovdqu 32(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm1,%xmm1 vmovdqu 48(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm2,%xmm2 vmovdqu 64(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm3,%xmm3 vmovdqu 80(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 96(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 112(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 128(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 144(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm12 vaesenclast %xmm12,%xmm5,%xmm5 vaesenclast %xmm12,%xmm6,%xmm6 vaesenclast %xmm12,%xmm7,%xmm7 vaesenclast %xmm12,%xmm8,%xmm8 vpxor 0(%rdi),%xmm5,%xmm5 vpxor 16(%rdi),%xmm6,%xmm6 vpxor 32(%rdi),%xmm7,%xmm7 vpxor 48(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm5,0(%rsi) vmovdqu %xmm6,16(%rsi) vmovdqu %xmm7,32(%rsi) vmovdqu %xmm8,48(%rsi) jne .L128_enc_msg_x4_loop1 addq $64,%rsi addq $64,%rdi .L128_enc_msg_x4_check_remainder: cmpq $0,%r10 je .L128_enc_msg_x4_out .L128_enc_msg_x4_loop2: vmovdqa %xmm0,%xmm5 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm5,%xmm5 vaesenc 16(%rcx),%xmm5,%xmm5 vaesenc 32(%rcx),%xmm5,%xmm5 vaesenc 48(%rcx),%xmm5,%xmm5 vaesenc 64(%rcx),%xmm5,%xmm5 vaesenc 80(%rcx),%xmm5,%xmm5 vaesenc 96(%rcx),%xmm5,%xmm5 vaesenc 112(%rcx),%xmm5,%xmm5 vaesenc 128(%rcx),%xmm5,%xmm5 vaesenc 144(%rcx),%xmm5,%xmm5 vaesenclast 160(%rcx),%xmm5,%xmm5 vpxor (%rdi),%xmm5,%xmm5 vmovdqu %xmm5,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jne .L128_enc_msg_x4_loop2 .L128_enc_msg_x4_out: popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes128gcmsiv_enc_msg_x4,.-aes128gcmsiv_enc_msg_x4 .globl aes128gcmsiv_enc_msg_x8 .hidden aes128gcmsiv_enc_msg_x8 .type aes128gcmsiv_enc_msg_x8,@function .align 16 aes128gcmsiv_enc_msg_x8: .cfi_startproc _CET_ENDBR testq %r8,%r8 jnz .L128_enc_msg_x8_start ret .L128_enc_msg_x8_start: pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-16 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-24 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-32 movq %rsp,%rbp .cfi_def_cfa_register rbp subq $128,%rsp andq $-64,%rsp shrq $4,%r8 movq %r8,%r10 shlq $61,%r10 shrq $61,%r10 vmovdqu (%rdx),%xmm1 vpor OR_MASK(%rip),%xmm1,%xmm1 vpaddd seven(%rip),%xmm1,%xmm0 vmovdqu %xmm0,(%rsp) vpaddd one(%rip),%xmm1,%xmm9 vpaddd two(%rip),%xmm1,%xmm10 vpaddd three(%rip),%xmm1,%xmm11 vpaddd four(%rip),%xmm1,%xmm12 vpaddd five(%rip),%xmm1,%xmm13 vpaddd six(%rip),%xmm1,%xmm14 vmovdqa %xmm1,%xmm0 shrq $3,%r8 je .L128_enc_msg_x8_check_remainder subq $128,%rsi subq $128,%rdi .L128_enc_msg_x8_loop1: addq $128,%rsi addq $128,%rdi vmovdqa %xmm0,%xmm1 vmovdqa %xmm9,%xmm2 vmovdqa %xmm10,%xmm3 vmovdqa %xmm11,%xmm4 vmovdqa %xmm12,%xmm5 vmovdqa %xmm13,%xmm6 vmovdqa %xmm14,%xmm7 vmovdqu (%rsp),%xmm8 vpxor (%rcx),%xmm1,%xmm1 vpxor (%rcx),%xmm2,%xmm2 vpxor (%rcx),%xmm3,%xmm3 vpxor (%rcx),%xmm4,%xmm4 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu (%rsp),%xmm14 vpaddd eight(%rip),%xmm14,%xmm14 vmovdqu %xmm14,(%rsp) vmovdqu 32(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpsubd one(%rip),%xmm14,%xmm14 vmovdqu 48(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm0,%xmm0 vmovdqu 64(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm9,%xmm9 vmovdqu 80(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm10,%xmm10 vmovdqu 96(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm11,%xmm11 vmovdqu 112(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm12,%xmm12 vmovdqu 128(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm13,%xmm13 vmovdqu 144(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm15 vaesenclast %xmm15,%xmm1,%xmm1 vaesenclast %xmm15,%xmm2,%xmm2 vaesenclast %xmm15,%xmm3,%xmm3 vaesenclast %xmm15,%xmm4,%xmm4 vaesenclast %xmm15,%xmm5,%xmm5 vaesenclast %xmm15,%xmm6,%xmm6 vaesenclast %xmm15,%xmm7,%xmm7 vaesenclast %xmm15,%xmm8,%xmm8 vpxor 0(%rdi),%xmm1,%xmm1 vpxor 16(%rdi),%xmm2,%xmm2 vpxor 32(%rdi),%xmm3,%xmm3 vpxor 48(%rdi),%xmm4,%xmm4 vpxor 64(%rdi),%xmm5,%xmm5 vpxor 80(%rdi),%xmm6,%xmm6 vpxor 96(%rdi),%xmm7,%xmm7 vpxor 112(%rdi),%xmm8,%xmm8 decq %r8 vmovdqu %xmm1,0(%rsi) vmovdqu %xmm2,16(%rsi) vmovdqu %xmm3,32(%rsi) vmovdqu %xmm4,48(%rsi) vmovdqu %xmm5,64(%rsi) vmovdqu %xmm6,80(%rsi) vmovdqu %xmm7,96(%rsi) vmovdqu %xmm8,112(%rsi) jne .L128_enc_msg_x8_loop1 addq $128,%rsi addq $128,%rdi .L128_enc_msg_x8_check_remainder: cmpq $0,%r10 je .L128_enc_msg_x8_out .L128_enc_msg_x8_loop2: vmovdqa %xmm0,%xmm1 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm1,%xmm1 vaesenc 16(%rcx),%xmm1,%xmm1 vaesenc 32(%rcx),%xmm1,%xmm1 vaesenc 48(%rcx),%xmm1,%xmm1 vaesenc 64(%rcx),%xmm1,%xmm1 vaesenc 80(%rcx),%xmm1,%xmm1 vaesenc 96(%rcx),%xmm1,%xmm1 vaesenc 112(%rcx),%xmm1,%xmm1 vaesenc 128(%rcx),%xmm1,%xmm1 vaesenc 144(%rcx),%xmm1,%xmm1 vaesenclast 160(%rcx),%xmm1,%xmm1 vpxor (%rdi),%xmm1,%xmm1 vmovdqu %xmm1,(%rsi) addq $16,%rdi addq $16,%rsi decq %r10 jne .L128_enc_msg_x8_loop2 .L128_enc_msg_x8_out: movq %rbp,%rsp .cfi_def_cfa_register %rsp popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 ret .cfi_endproc .size aes128gcmsiv_enc_msg_x8,.-aes128gcmsiv_enc_msg_x8 .globl aes128gcmsiv_dec .hidden aes128gcmsiv_dec .type aes128gcmsiv_dec,@function .align 16 aes128gcmsiv_dec: .cfi_startproc _CET_ENDBR testq $~15,%r9 jnz .L128_dec_start ret .L128_dec_start: vzeroupper vmovdqa (%rdx),%xmm0 vmovdqu 16(%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 movq %rdx,%rax leaq 32(%rax),%rax leaq 32(%rcx),%rcx andq $~15,%r9 cmpq $96,%r9 jb .L128_dec_loop2 subq $96,%r9 vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vpxor (%r8),%xmm7,%xmm7 vpxor (%r8),%xmm8,%xmm8 vpxor (%r8),%xmm9,%xmm9 vpxor (%r8),%xmm10,%xmm10 vpxor (%r8),%xmm11,%xmm11 vpxor (%r8),%xmm12,%xmm12 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vaesenclast %xmm4,%xmm8,%xmm8 vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm4,%xmm10,%xmm10 vaesenclast %xmm4,%xmm11,%xmm11 vaesenclast %xmm4,%xmm12,%xmm12 vpxor 0(%rdi),%xmm7,%xmm7 vpxor 16(%rdi),%xmm8,%xmm8 vpxor 32(%rdi),%xmm9,%xmm9 vpxor 48(%rdi),%xmm10,%xmm10 vpxor 64(%rdi),%xmm11,%xmm11 vpxor 80(%rdi),%xmm12,%xmm12 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) addq $96,%rdi addq $96,%rsi jmp .L128_dec_loop1 .align 64 .L128_dec_loop1: cmpq $96,%r9 jb .L128_dec_finish_96 subq $96,%r9 vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vmovdqa (%r8),%xmm4 vpxor %xmm4,%xmm7,%xmm7 vpxor %xmm4,%xmm8,%xmm8 vpxor %xmm4,%xmm9,%xmm9 vpxor %xmm4,%xmm10,%xmm10 vpxor %xmm4,%xmm11,%xmm11 vpxor %xmm4,%xmm12,%xmm12 vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqa 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm6 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor 0(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vpxor 16(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm8,%xmm8 vpxor 32(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm9,%xmm9 vpxor 48(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm10,%xmm10 vpxor 64(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm11,%xmm11 vpxor 80(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm12,%xmm12 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) vpxor %xmm5,%xmm0,%xmm0 leaq 96(%rdi),%rdi leaq 96(%rsi),%rsi jmp .L128_dec_loop1 .L128_dec_finish_96: vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor %xmm5,%xmm0,%xmm0 .L128_dec_loop2: cmpq $16,%r9 jb .L128_dec_out subq $16,%r9 vmovdqa %xmm15,%xmm2 vpaddd one(%rip),%xmm15,%xmm15 vpxor 0(%r8),%xmm2,%xmm2 vaesenc 16(%r8),%xmm2,%xmm2 vaesenc 32(%r8),%xmm2,%xmm2 vaesenc 48(%r8),%xmm2,%xmm2 vaesenc 64(%r8),%xmm2,%xmm2 vaesenc 80(%r8),%xmm2,%xmm2 vaesenc 96(%r8),%xmm2,%xmm2 vaesenc 112(%r8),%xmm2,%xmm2 vaesenc 128(%r8),%xmm2,%xmm2 vaesenc 144(%r8),%xmm2,%xmm2 vaesenclast 160(%r8),%xmm2,%xmm2 vpxor (%rdi),%xmm2,%xmm2 vmovdqu %xmm2,(%rsi) addq $16,%rdi addq $16,%rsi vpxor %xmm2,%xmm0,%xmm0 vmovdqa -32(%rcx),%xmm1 call GFMUL jmp .L128_dec_loop2 .L128_dec_out: vmovdqu %xmm0,(%rdx) ret .cfi_endproc .size aes128gcmsiv_dec, .-aes128gcmsiv_dec .globl aes128gcmsiv_ecb_enc_block .hidden aes128gcmsiv_ecb_enc_block .type aes128gcmsiv_ecb_enc_block,@function .align 16 aes128gcmsiv_ecb_enc_block: .cfi_startproc _CET_ENDBR vmovdqa (%rdi),%xmm1 vpxor (%rdx),%xmm1,%xmm1 vaesenc 16(%rdx),%xmm1,%xmm1 vaesenc 32(%rdx),%xmm1,%xmm1 vaesenc 48(%rdx),%xmm1,%xmm1 vaesenc 64(%rdx),%xmm1,%xmm1 vaesenc 80(%rdx),%xmm1,%xmm1 vaesenc 96(%rdx),%xmm1,%xmm1 vaesenc 112(%rdx),%xmm1,%xmm1 vaesenc 128(%rdx),%xmm1,%xmm1 vaesenc 144(%rdx),%xmm1,%xmm1 vaesenclast 160(%rdx),%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) ret .cfi_endproc .size aes128gcmsiv_ecb_enc_block,.-aes128gcmsiv_ecb_enc_block .globl aes256gcmsiv_aes_ks_enc_x1 .hidden aes256gcmsiv_aes_ks_enc_x1 .type aes256gcmsiv_aes_ks_enc_x1,@function .align 16 aes256gcmsiv_aes_ks_enc_x1: .cfi_startproc _CET_ENDBR vmovdqa con1(%rip),%xmm0 vmovdqa mask(%rip),%xmm15 vmovdqa (%rdi),%xmm8 vmovdqa (%rcx),%xmm1 vmovdqa 16(%rcx),%xmm3 vpxor %xmm1,%xmm8,%xmm8 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm1,(%rdx) vmovdqu %xmm3,16(%rdx) vpxor %xmm14,%xmm14,%xmm14 vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,32(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,48(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,64(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,80(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,96(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,112(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,128(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,144(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,160(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,176(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslld $1,%xmm0,%xmm0 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenc %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,192(%rdx) vpshufd $0xff,%xmm1,%xmm2 vaesenclast %xmm14,%xmm2,%xmm2 vpslldq $4,%xmm3,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpxor %xmm2,%xmm3,%xmm3 vaesenc %xmm3,%xmm8,%xmm8 vmovdqu %xmm3,208(%rdx) vpshufb %xmm15,%xmm3,%xmm2 vaesenclast %xmm0,%xmm2,%xmm2 vpslldq $4,%xmm1,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpslldq $4,%xmm4,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpxor %xmm2,%xmm1,%xmm1 vaesenclast %xmm1,%xmm8,%xmm8 vmovdqu %xmm1,224(%rdx) vmovdqa %xmm8,(%rsi) ret .cfi_endproc .size aes256gcmsiv_aes_ks_enc_x1,.-aes256gcmsiv_aes_ks_enc_x1 .globl aes256gcmsiv_ecb_enc_block .hidden aes256gcmsiv_ecb_enc_block .type aes256gcmsiv_ecb_enc_block,@function .align 16 aes256gcmsiv_ecb_enc_block: .cfi_startproc _CET_ENDBR vmovdqa (%rdi),%xmm1 vpxor (%rdx),%xmm1,%xmm1 vaesenc 16(%rdx),%xmm1,%xmm1 vaesenc 32(%rdx),%xmm1,%xmm1 vaesenc 48(%rdx),%xmm1,%xmm1 vaesenc 64(%rdx),%xmm1,%xmm1 vaesenc 80(%rdx),%xmm1,%xmm1 vaesenc 96(%rdx),%xmm1,%xmm1 vaesenc 112(%rdx),%xmm1,%xmm1 vaesenc 128(%rdx),%xmm1,%xmm1 vaesenc 144(%rdx),%xmm1,%xmm1 vaesenc 160(%rdx),%xmm1,%xmm1 vaesenc 176(%rdx),%xmm1,%xmm1 vaesenc 192(%rdx),%xmm1,%xmm1 vaesenc 208(%rdx),%xmm1,%xmm1 vaesenclast 224(%rdx),%xmm1,%xmm1 vmovdqa %xmm1,(%rsi) ret .cfi_endproc .size aes256gcmsiv_ecb_enc_block,.-aes256gcmsiv_ecb_enc_block .globl aes256gcmsiv_enc_msg_x4 .hidden aes256gcmsiv_enc_msg_x4 .type aes256gcmsiv_enc_msg_x4,@function .align 16 aes256gcmsiv_enc_msg_x4: .cfi_startproc _CET_ENDBR testq %r8,%r8 jnz .L256_enc_msg_x4_start ret .L256_enc_msg_x4_start: movq %r8,%r10 shrq $4,%r8 shlq $60,%r10 jz .L256_enc_msg_x4_start2 addq $1,%r8 .L256_enc_msg_x4_start2: movq %r8,%r10 shlq $62,%r10 shrq $62,%r10 vmovdqa (%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 vmovdqa four(%rip),%xmm4 vmovdqa %xmm15,%xmm0 vpaddd one(%rip),%xmm15,%xmm1 vpaddd two(%rip),%xmm15,%xmm2 vpaddd three(%rip),%xmm15,%xmm3 shrq $2,%r8 je .L256_enc_msg_x4_check_remainder subq $64,%rsi subq $64,%rdi .L256_enc_msg_x4_loop1: addq $64,%rsi addq $64,%rdi vmovdqa %xmm0,%xmm5 vmovdqa %xmm1,%xmm6 vmovdqa %xmm2,%xmm7 vmovdqa %xmm3,%xmm8 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm0,%xmm0 vmovdqu 32(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm1,%xmm1 vmovdqu 48(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm2,%xmm2 vmovdqu 64(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vpaddd %xmm4,%xmm3,%xmm3 vmovdqu 80(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 96(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 112(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 128(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 144(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 176(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 192(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 208(%rcx),%xmm12 vaesenc %xmm12,%xmm5,%xmm5 vaesenc %xmm12,%xmm6,%xmm6 vaesenc %xmm12,%xmm7,%xmm7 vaesenc %xmm12,%xmm8,%xmm8 vmovdqu 224(%rcx),%xmm12 vaesenclast %xmm12,%xmm5,%xmm5 vaesenclast %xmm12,%xmm6,%xmm6 vaesenclast %xmm12,%xmm7,%xmm7 vaesenclast %xmm12,%xmm8,%xmm8 vpxor 0(%rdi),%xmm5,%xmm5 vpxor 16(%rdi),%xmm6,%xmm6 vpxor 32(%rdi),%xmm7,%xmm7 vpxor 48(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm5,0(%rsi) vmovdqu %xmm6,16(%rsi) vmovdqu %xmm7,32(%rsi) vmovdqu %xmm8,48(%rsi) jne .L256_enc_msg_x4_loop1 addq $64,%rsi addq $64,%rdi .L256_enc_msg_x4_check_remainder: cmpq $0,%r10 je .L256_enc_msg_x4_out .L256_enc_msg_x4_loop2: vmovdqa %xmm0,%xmm5 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm5,%xmm5 vaesenc 16(%rcx),%xmm5,%xmm5 vaesenc 32(%rcx),%xmm5,%xmm5 vaesenc 48(%rcx),%xmm5,%xmm5 vaesenc 64(%rcx),%xmm5,%xmm5 vaesenc 80(%rcx),%xmm5,%xmm5 vaesenc 96(%rcx),%xmm5,%xmm5 vaesenc 112(%rcx),%xmm5,%xmm5 vaesenc 128(%rcx),%xmm5,%xmm5 vaesenc 144(%rcx),%xmm5,%xmm5 vaesenc 160(%rcx),%xmm5,%xmm5 vaesenc 176(%rcx),%xmm5,%xmm5 vaesenc 192(%rcx),%xmm5,%xmm5 vaesenc 208(%rcx),%xmm5,%xmm5 vaesenclast 224(%rcx),%xmm5,%xmm5 vpxor (%rdi),%xmm5,%xmm5 vmovdqu %xmm5,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jne .L256_enc_msg_x4_loop2 .L256_enc_msg_x4_out: ret .cfi_endproc .size aes256gcmsiv_enc_msg_x4,.-aes256gcmsiv_enc_msg_x4 .globl aes256gcmsiv_enc_msg_x8 .hidden aes256gcmsiv_enc_msg_x8 .type aes256gcmsiv_enc_msg_x8,@function .align 16 aes256gcmsiv_enc_msg_x8: .cfi_startproc _CET_ENDBR testq %r8,%r8 jnz .L256_enc_msg_x8_start ret .L256_enc_msg_x8_start: movq %rsp,%r11 subq $16,%r11 andq $-64,%r11 movq %r8,%r10 shrq $4,%r8 shlq $60,%r10 jz .L256_enc_msg_x8_start2 addq $1,%r8 .L256_enc_msg_x8_start2: movq %r8,%r10 shlq $61,%r10 shrq $61,%r10 vmovdqa (%rdx),%xmm1 vpor OR_MASK(%rip),%xmm1,%xmm1 vpaddd seven(%rip),%xmm1,%xmm0 vmovdqa %xmm0,(%r11) vpaddd one(%rip),%xmm1,%xmm9 vpaddd two(%rip),%xmm1,%xmm10 vpaddd three(%rip),%xmm1,%xmm11 vpaddd four(%rip),%xmm1,%xmm12 vpaddd five(%rip),%xmm1,%xmm13 vpaddd six(%rip),%xmm1,%xmm14 vmovdqa %xmm1,%xmm0 shrq $3,%r8 jz .L256_enc_msg_x8_check_remainder subq $128,%rsi subq $128,%rdi .L256_enc_msg_x8_loop1: addq $128,%rsi addq $128,%rdi vmovdqa %xmm0,%xmm1 vmovdqa %xmm9,%xmm2 vmovdqa %xmm10,%xmm3 vmovdqa %xmm11,%xmm4 vmovdqa %xmm12,%xmm5 vmovdqa %xmm13,%xmm6 vmovdqa %xmm14,%xmm7 vmovdqa (%r11),%xmm8 vpxor (%rcx),%xmm1,%xmm1 vpxor (%rcx),%xmm2,%xmm2 vpxor (%rcx),%xmm3,%xmm3 vpxor (%rcx),%xmm4,%xmm4 vpxor (%rcx),%xmm5,%xmm5 vpxor (%rcx),%xmm6,%xmm6 vpxor (%rcx),%xmm7,%xmm7 vpxor (%rcx),%xmm8,%xmm8 vmovdqu 16(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqa (%r11),%xmm14 vpaddd eight(%rip),%xmm14,%xmm14 vmovdqa %xmm14,(%r11) vmovdqu 32(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpsubd one(%rip),%xmm14,%xmm14 vmovdqu 48(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm0,%xmm0 vmovdqu 64(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm9,%xmm9 vmovdqu 80(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm10,%xmm10 vmovdqu 96(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm11,%xmm11 vmovdqu 112(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm12,%xmm12 vmovdqu 128(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vpaddd eight(%rip),%xmm13,%xmm13 vmovdqu 144(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 160(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 176(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 192(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 208(%rcx),%xmm15 vaesenc %xmm15,%xmm1,%xmm1 vaesenc %xmm15,%xmm2,%xmm2 vaesenc %xmm15,%xmm3,%xmm3 vaesenc %xmm15,%xmm4,%xmm4 vaesenc %xmm15,%xmm5,%xmm5 vaesenc %xmm15,%xmm6,%xmm6 vaesenc %xmm15,%xmm7,%xmm7 vaesenc %xmm15,%xmm8,%xmm8 vmovdqu 224(%rcx),%xmm15 vaesenclast %xmm15,%xmm1,%xmm1 vaesenclast %xmm15,%xmm2,%xmm2 vaesenclast %xmm15,%xmm3,%xmm3 vaesenclast %xmm15,%xmm4,%xmm4 vaesenclast %xmm15,%xmm5,%xmm5 vaesenclast %xmm15,%xmm6,%xmm6 vaesenclast %xmm15,%xmm7,%xmm7 vaesenclast %xmm15,%xmm8,%xmm8 vpxor 0(%rdi),%xmm1,%xmm1 vpxor 16(%rdi),%xmm2,%xmm2 vpxor 32(%rdi),%xmm3,%xmm3 vpxor 48(%rdi),%xmm4,%xmm4 vpxor 64(%rdi),%xmm5,%xmm5 vpxor 80(%rdi),%xmm6,%xmm6 vpxor 96(%rdi),%xmm7,%xmm7 vpxor 112(%rdi),%xmm8,%xmm8 subq $1,%r8 vmovdqu %xmm1,0(%rsi) vmovdqu %xmm2,16(%rsi) vmovdqu %xmm3,32(%rsi) vmovdqu %xmm4,48(%rsi) vmovdqu %xmm5,64(%rsi) vmovdqu %xmm6,80(%rsi) vmovdqu %xmm7,96(%rsi) vmovdqu %xmm8,112(%rsi) jne .L256_enc_msg_x8_loop1 addq $128,%rsi addq $128,%rdi .L256_enc_msg_x8_check_remainder: cmpq $0,%r10 je .L256_enc_msg_x8_out .L256_enc_msg_x8_loop2: vmovdqa %xmm0,%xmm1 vpaddd one(%rip),%xmm0,%xmm0 vpxor (%rcx),%xmm1,%xmm1 vaesenc 16(%rcx),%xmm1,%xmm1 vaesenc 32(%rcx),%xmm1,%xmm1 vaesenc 48(%rcx),%xmm1,%xmm1 vaesenc 64(%rcx),%xmm1,%xmm1 vaesenc 80(%rcx),%xmm1,%xmm1 vaesenc 96(%rcx),%xmm1,%xmm1 vaesenc 112(%rcx),%xmm1,%xmm1 vaesenc 128(%rcx),%xmm1,%xmm1 vaesenc 144(%rcx),%xmm1,%xmm1 vaesenc 160(%rcx),%xmm1,%xmm1 vaesenc 176(%rcx),%xmm1,%xmm1 vaesenc 192(%rcx),%xmm1,%xmm1 vaesenc 208(%rcx),%xmm1,%xmm1 vaesenclast 224(%rcx),%xmm1,%xmm1 vpxor (%rdi),%xmm1,%xmm1 vmovdqu %xmm1,(%rsi) addq $16,%rdi addq $16,%rsi subq $1,%r10 jnz .L256_enc_msg_x8_loop2 .L256_enc_msg_x8_out: ret .cfi_endproc .size aes256gcmsiv_enc_msg_x8,.-aes256gcmsiv_enc_msg_x8 .globl aes256gcmsiv_dec .hidden aes256gcmsiv_dec .type aes256gcmsiv_dec,@function .align 16 aes256gcmsiv_dec: .cfi_startproc _CET_ENDBR testq $~15,%r9 jnz .L256_dec_start ret .L256_dec_start: vzeroupper vmovdqa (%rdx),%xmm0 vmovdqu 16(%rdx),%xmm15 vpor OR_MASK(%rip),%xmm15,%xmm15 movq %rdx,%rax leaq 32(%rax),%rax leaq 32(%rcx),%rcx andq $~15,%r9 cmpq $96,%r9 jb .L256_dec_loop2 subq $96,%r9 vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vpxor (%r8),%xmm7,%xmm7 vpxor (%r8),%xmm8,%xmm8 vpxor (%r8),%xmm9,%xmm9 vpxor (%r8),%xmm10,%xmm10 vpxor (%r8),%xmm11,%xmm11 vpxor (%r8),%xmm12,%xmm12 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 176(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 192(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 208(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 224(%r8),%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vaesenclast %xmm4,%xmm8,%xmm8 vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm4,%xmm10,%xmm10 vaesenclast %xmm4,%xmm11,%xmm11 vaesenclast %xmm4,%xmm12,%xmm12 vpxor 0(%rdi),%xmm7,%xmm7 vpxor 16(%rdi),%xmm8,%xmm8 vpxor 32(%rdi),%xmm9,%xmm9 vpxor 48(%rdi),%xmm10,%xmm10 vpxor 64(%rdi),%xmm11,%xmm11 vpxor 80(%rdi),%xmm12,%xmm12 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) addq $96,%rdi addq $96,%rsi jmp .L256_dec_loop1 .align 64 .L256_dec_loop1: cmpq $96,%r9 jb .L256_dec_finish_96 subq $96,%r9 vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqa %xmm15,%xmm7 vpaddd one(%rip),%xmm7,%xmm8 vpaddd two(%rip),%xmm7,%xmm9 vpaddd one(%rip),%xmm9,%xmm10 vpaddd two(%rip),%xmm9,%xmm11 vpaddd one(%rip),%xmm11,%xmm12 vpaddd two(%rip),%xmm11,%xmm15 vmovdqa (%r8),%xmm4 vpxor %xmm4,%xmm7,%xmm7 vpxor %xmm4,%xmm8,%xmm8 vpxor %xmm4,%xmm9,%xmm9 vpxor %xmm4,%xmm10,%xmm10 vpxor %xmm4,%xmm11,%xmm11 vpxor %xmm4,%xmm12,%xmm12 vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 48(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 64(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 96(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 112(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqa 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 128(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vmovdqu 144(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 160(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 176(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 192(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 208(%r8),%xmm4 vaesenc %xmm4,%xmm7,%xmm7 vaesenc %xmm4,%xmm8,%xmm8 vaesenc %xmm4,%xmm9,%xmm9 vaesenc %xmm4,%xmm10,%xmm10 vaesenc %xmm4,%xmm11,%xmm11 vaesenc %xmm4,%xmm12,%xmm12 vmovdqu 224(%r8),%xmm6 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor 0(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm7,%xmm7 vpxor 16(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm8,%xmm8 vpxor 32(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm9,%xmm9 vpxor 48(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm10,%xmm10 vpxor 64(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm11,%xmm11 vpxor 80(%rdi),%xmm6,%xmm4 vaesenclast %xmm4,%xmm12,%xmm12 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vmovdqu %xmm7,0(%rsi) vmovdqu %xmm8,16(%rsi) vmovdqu %xmm9,32(%rsi) vmovdqu %xmm10,48(%rsi) vmovdqu %xmm11,64(%rsi) vmovdqu %xmm12,80(%rsi) vpxor %xmm5,%xmm0,%xmm0 leaq 96(%rdi),%rdi leaq 96(%rsi),%rsi jmp .L256_dec_loop1 .L256_dec_finish_96: vmovdqa %xmm12,%xmm6 vmovdqa %xmm11,16-32(%rax) vmovdqa %xmm10,32-32(%rax) vmovdqa %xmm9,48-32(%rax) vmovdqa %xmm8,64-32(%rax) vmovdqa %xmm7,80-32(%rax) vmovdqu 0-32(%rcx),%xmm4 vpclmulqdq $0x10,%xmm4,%xmm6,%xmm1 vpclmulqdq $0x11,%xmm4,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm4,%xmm6,%xmm3 vpclmulqdq $0x01,%xmm4,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu -16(%rax),%xmm6 vmovdqu -16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 0(%rax),%xmm6 vmovdqu 0(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 16(%rax),%xmm6 vmovdqu 16(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 32(%rax),%xmm6 vmovdqu 32(%rcx),%xmm13 vpclmulqdq $0x10,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x11,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x01,%xmm13,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vmovdqu 80-32(%rax),%xmm6 vpxor %xmm0,%xmm6,%xmm6 vmovdqu 80-32(%rcx),%xmm5 vpclmulqdq $0x11,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpclmulqdq $0x10,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x01,%xmm5,%xmm6,%xmm4 vpxor %xmm4,%xmm1,%xmm1 vpsrldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm2,%xmm5 vpslldq $8,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm0 vmovdqa poly(%rip),%xmm3 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpalignr $8,%xmm0,%xmm0,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm0 vpxor %xmm0,%xmm2,%xmm0 vpxor %xmm5,%xmm0,%xmm0 .L256_dec_loop2: cmpq $16,%r9 jb .L256_dec_out subq $16,%r9 vmovdqa %xmm15,%xmm2 vpaddd one(%rip),%xmm15,%xmm15 vpxor 0(%r8),%xmm2,%xmm2 vaesenc 16(%r8),%xmm2,%xmm2 vaesenc 32(%r8),%xmm2,%xmm2 vaesenc 48(%r8),%xmm2,%xmm2 vaesenc 64(%r8),%xmm2,%xmm2 vaesenc 80(%r8),%xmm2,%xmm2 vaesenc 96(%r8),%xmm2,%xmm2 vaesenc 112(%r8),%xmm2,%xmm2 vaesenc 128(%r8),%xmm2,%xmm2 vaesenc 144(%r8),%xmm2,%xmm2 vaesenc 160(%r8),%xmm2,%xmm2 vaesenc 176(%r8),%xmm2,%xmm2 vaesenc 192(%r8),%xmm2,%xmm2 vaesenc 208(%r8),%xmm2,%xmm2 vaesenclast 224(%r8),%xmm2,%xmm2 vpxor (%rdi),%xmm2,%xmm2 vmovdqu %xmm2,(%rsi) addq $16,%rdi addq $16,%rsi vpxor %xmm2,%xmm0,%xmm0 vmovdqa -32(%rcx),%xmm1 call GFMUL jmp .L256_dec_loop2 .L256_dec_out: vmovdqu %xmm0,(%rdx) ret .cfi_endproc .size aes256gcmsiv_dec, .-aes256gcmsiv_dec .globl aes256gcmsiv_kdf .hidden aes256gcmsiv_kdf .type aes256gcmsiv_kdf,@function .align 16 aes256gcmsiv_kdf: .cfi_startproc _CET_ENDBR vmovdqa (%rdx),%xmm1 vmovdqa 0(%rdi),%xmm4 vmovdqa and_mask(%rip),%xmm11 vmovdqa one(%rip),%xmm8 vpshufd $0x90,%xmm4,%xmm4 vpand %xmm11,%xmm4,%xmm4 vpaddd %xmm8,%xmm4,%xmm6 vpaddd %xmm8,%xmm6,%xmm7 vpaddd %xmm8,%xmm7,%xmm11 vpaddd %xmm8,%xmm11,%xmm12 vpaddd %xmm8,%xmm12,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm1,%xmm6,%xmm6 vpxor %xmm1,%xmm7,%xmm7 vpxor %xmm1,%xmm11,%xmm11 vpxor %xmm1,%xmm12,%xmm12 vpxor %xmm1,%xmm13,%xmm13 vmovdqa 16(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 32(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 48(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 64(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 80(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 96(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 112(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 128(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 144(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 160(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 176(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 192(%rdx),%xmm2 vaesenc %xmm2,%xmm4,%xmm4 vaesenc %xmm2,%xmm6,%xmm6 vaesenc %xmm2,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vaesenc %xmm2,%xmm12,%xmm12 vaesenc %xmm2,%xmm13,%xmm13 vmovdqa 208(%rdx),%xmm1 vaesenc %xmm1,%xmm4,%xmm4 vaesenc %xmm1,%xmm6,%xmm6 vaesenc %xmm1,%xmm7,%xmm7 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovdqa 224(%rdx),%xmm2 vaesenclast %xmm2,%xmm4,%xmm4 vaesenclast %xmm2,%xmm6,%xmm6 vaesenclast %xmm2,%xmm7,%xmm7 vaesenclast %xmm2,%xmm11,%xmm11 vaesenclast %xmm2,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vmovdqa %xmm4,0(%rsi) vmovdqa %xmm6,16(%rsi) vmovdqa %xmm7,32(%rsi) vmovdqa %xmm11,48(%rsi) vmovdqa %xmm12,64(%rsi) vmovdqa %xmm13,80(%rsi) ret .cfi_endproc .size aes256gcmsiv_kdf, .-aes256gcmsiv_kdf #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-armv4-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if defined(__thumb2__) || defined(__clang__) #define ldrhsb ldrbhs #endif .align 5 .Lsigma: .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral .Lone: .long 1,0,0,0 .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,%function .align 5 ChaCha20_ctr32_nohw: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,.Lsigma ldmia r12,{r4,r5,r6,r7} @ load counter and nonce sub sp,sp,#4*(16) @ off-load area stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key ldmia r14,{r0,r1,r2,r3} @ load sigma stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key stmdb sp!,{r0,r1,r2,r3} @ copy sigma str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" b .Loop_outer_enter .align 4 .Loop_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material str r11,[sp,#4*(32+2)] @ save len str r12, [sp,#4*(32+1)] @ save inp str r14, [sp,#4*(32+0)] @ save out .Loop_outer_enter: ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(16+15)] mov r11,#10 b .Loop .align 4 .Loop: subs r11,r11,#1 add r0,r0,r4 mov r12,r12,ror#16 add r1,r1,r5 mov r10,r10,ror#16 eor r12,r12,r0,ror#16 eor r10,r10,r1,ror#16 add r8,r8,r12 mov r4,r4,ror#20 add r9,r9,r10 mov r5,r5,ror#20 eor r4,r4,r8,ror#20 eor r5,r5,r9,ror#20 add r0,r0,r4 mov r12,r12,ror#24 add r1,r1,r5 mov r10,r10,ror#24 eor r12,r12,r0,ror#24 eor r10,r10,r1,ror#24 add r8,r8,r12 mov r4,r4,ror#25 add r9,r9,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+13)] ldr r10,[sp,#4*(16+15)] eor r4,r4,r8,ror#25 eor r5,r5,r9,ror#25 str r8,[sp,#4*(16+8)] ldr r8,[sp,#4*(16+10)] add r2,r2,r6 mov r14,r14,ror#16 str r9,[sp,#4*(16+9)] ldr r9,[sp,#4*(16+11)] add r3,r3,r7 mov r10,r10,ror#16 eor r14,r14,r2,ror#16 eor r10,r10,r3,ror#16 add r8,r8,r14 mov r6,r6,ror#20 add r9,r9,r10 mov r7,r7,ror#20 eor r6,r6,r8,ror#20 eor r7,r7,r9,ror#20 add r2,r2,r6 mov r14,r14,ror#24 add r3,r3,r7 mov r10,r10,ror#24 eor r14,r14,r2,ror#24 eor r10,r10,r3,ror#24 add r8,r8,r14 mov r6,r6,ror#25 add r9,r9,r10 mov r7,r7,ror#25 eor r6,r6,r8,ror#25 eor r7,r7,r9,ror#25 add r0,r0,r5 mov r10,r10,ror#16 add r1,r1,r6 mov r12,r12,ror#16 eor r10,r10,r0,ror#16 eor r12,r12,r1,ror#16 add r8,r8,r10 mov r5,r5,ror#20 add r9,r9,r12 mov r6,r6,ror#20 eor r5,r5,r8,ror#20 eor r6,r6,r9,ror#20 add r0,r0,r5 mov r10,r10,ror#24 add r1,r1,r6 mov r12,r12,ror#24 eor r10,r10,r0,ror#24 eor r12,r12,r1,ror#24 add r8,r8,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+15)] ldr r10,[sp,#4*(16+13)] add r9,r9,r12 mov r6,r6,ror#25 eor r5,r5,r8,ror#25 eor r6,r6,r9,ror#25 str r8,[sp,#4*(16+10)] ldr r8,[sp,#4*(16+8)] add r2,r2,r7 mov r10,r10,ror#16 str r9,[sp,#4*(16+11)] ldr r9,[sp,#4*(16+9)] add r3,r3,r4 mov r14,r14,ror#16 eor r10,r10,r2,ror#16 eor r14,r14,r3,ror#16 add r8,r8,r10 mov r7,r7,ror#20 add r9,r9,r14 mov r4,r4,ror#20 eor r7,r7,r8,ror#20 eor r4,r4,r9,ror#20 add r2,r2,r7 mov r10,r10,ror#24 add r3,r3,r4 mov r14,r14,ror#24 eor r10,r10,r2,ror#24 eor r14,r14,r3,ror#24 add r8,r8,r10 mov r7,r7,ror#25 add r9,r9,r14 mov r4,r4,ror#25 eor r7,r7,r8,ror#25 eor r4,r4,r9,ror#25 bne .Loop ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) cmp r11,#64 @ done yet? #ifdef __thumb2__ itete lo #endif addlo r12,sp,#4*(0) @ shortcut or ... ldrhs r12,[sp,#4*(32+1)] @ ... load inp addlo r14,sp,#4*(0) @ shortcut or ... ldrhs r14,[sp,#4*(32+0)] @ ... load out ldr r8,[sp,#4*(0)] @ load key material ldr r9,[sp,#4*(1)] #if __ARM_ARCH>=6 || !defined(__ARMEB__) # if __ARM_ARCH<7 orr r10,r12,r14 tst r10,#3 @ are input and output aligned? ldr r10,[sp,#4*(2)] bne .Lunaligned cmp r11,#64 @ restore flags # else ldr r10,[sp,#4*(2)] # endif ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 @ xor with input eorhs r1,r1,r9 add r8,sp,#4*(4) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r1,[r14,#-12] str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 add r8,sp,#4*(8) str r4,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r5,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 eorhs r1,r1,r9 add r8,sp,#4*(12) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 str r1,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r4,[r14],#16 @ store output str r5,[r14,#-12] # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 str r6,[r14,#-8] str r7,[r14,#-4] bhi .Loop_outer beq .Ldone # if __ARM_ARCH<7 b .Ltail .align 4 .Lunaligned:@ unaligned endian-neutral path cmp r11,#64 @ restore flags # endif #endif #if __ARM_ARCH<7 ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+0) ldmia r8,{r8,r9,r10,r11} @ load key material add r0,sp,#4*(16+8) add r4,r4,r8 @ accumulate key material add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] add r8,sp,#4*(4+4) ldmia r8,{r8,r9,r10,r11} @ load key material ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" strhi r11,[sp,#4*(16+11)] @ copy "rx" add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+8) ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 bhi .Loop_outer beq .Ldone #endif .Ltail: ldr r12,[sp,#4*(32+1)] @ load inp add r9,sp,#4*(0) ldr r14,[sp,#4*(32+0)] @ load out .Loop_tail: ldrb r10,[r9],#1 @ read buffer on stack ldrb r11,[r12],#1 @ read input subs r8,r8,#1 eor r11,r11,r10 strb r11,[r14],#1 @ store output bne .Loop_tail .Ldone: add sp,sp,#4*(32+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl ChaCha20_ctr32_neon .hidden ChaCha20_ctr32_neon .type ChaCha20_ctr32_neon,%function .align 5 ChaCha20_ctr32_neon: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,.Lsigma vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so stmdb sp!,{r0,r1,r2,r3} vld1.32 {q1,q2},[r3] @ load key ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key sub sp,sp,#4*(16+16) vld1.32 {q3},[r12] @ load counter and nonce add r12,sp,#4*8 ldmia r14,{r0,r1,r2,r3} @ load sigma vld1.32 {q0},[r14]! @ load sigma vld1.32 {q12},[r14] @ one vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce vst1.32 {q0,q1},[sp] @ copy sigma|1/2key str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" vshl.i32 d26,d24,#1 @ two vstr d24,[sp,#4*(16+0)] vshl.i32 d28,d24,#2 @ four vstr d26,[sp,#4*(16+2)] vmov q4,q0 vstr d28,[sp,#4*(16+4)] vmov q8,q0 vmov q5,q1 vmov q9,q1 b .Loop_neon_enter .align 4 .Loop_neon_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material cmp r11,#64*2 @ if len<=64*2 bls .Lbreak_neon @ switch to integer-only vmov q4,q0 str r11,[sp,#4*(32+2)] @ save len vmov q8,q0 str r12, [sp,#4*(32+1)] @ save inp vmov q5,q1 str r14, [sp,#4*(32+0)] @ save out vmov q9,q1 .Loop_neon_enter: ldr r11, [sp,#4*(15)] vadd.i32 q7,q3,q12 @ counter+1 ldr r12,[sp,#4*(12)] @ modulo-scheduled load vmov q6,q2 ldr r10, [sp,#4*(13)] vmov q10,q2 ldr r14,[sp,#4*(14)] vadd.i32 q11,q7,q12 @ counter+2 str r11, [sp,#4*(16+15)] mov r11,#10 add r12,r12,#3 @ counter+3 b .Loop_neon .align 4 .Loop_neon: subs r11,r11,#1 vadd.i32 q0,q0,q1 add r0,r0,r4 vadd.i32 q4,q4,q5 mov r12,r12,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r5 veor q3,q3,q0 mov r10,r10,ror#16 veor q7,q7,q4 eor r12,r12,r0,ror#16 veor q11,q11,q8 eor r10,r10,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r12 vrev32.16 q7,q7 mov r4,r4,ror#20 vrev32.16 q11,q11 add r9,r9,r10 vadd.i32 q2,q2,q3 mov r5,r5,ror#20 vadd.i32 q6,q6,q7 eor r4,r4,r8,ror#20 vadd.i32 q10,q10,q11 eor r5,r5,r9,ror#20 veor q12,q1,q2 add r0,r0,r4 veor q13,q5,q6 mov r12,r12,ror#24 veor q14,q9,q10 add r1,r1,r5 vshr.u32 q1,q12,#20 mov r10,r10,ror#24 vshr.u32 q5,q13,#20 eor r12,r12,r0,ror#24 vshr.u32 q9,q14,#20 eor r10,r10,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r12 vsli.32 q5,q13,#12 mov r4,r4,ror#25 vsli.32 q9,q14,#12 add r9,r9,r10 vadd.i32 q0,q0,q1 mov r5,r5,ror#25 vadd.i32 q4,q4,q5 str r10,[sp,#4*(16+13)] vadd.i32 q8,q8,q9 ldr r10,[sp,#4*(16+15)] veor q12,q3,q0 eor r4,r4,r8,ror#25 veor q13,q7,q4 eor r5,r5,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+8)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+10)] vshr.u32 q7,q13,#24 add r2,r2,r6 vshr.u32 q11,q14,#24 mov r14,r14,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+9)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+11)] vsli.32 q11,q14,#8 add r3,r3,r7 vadd.i32 q2,q2,q3 mov r10,r10,ror#16 vadd.i32 q6,q6,q7 eor r14,r14,r2,ror#16 vadd.i32 q10,q10,q11 eor r10,r10,r3,ror#16 veor q12,q1,q2 add r8,r8,r14 veor q13,q5,q6 mov r6,r6,ror#20 veor q14,q9,q10 add r9,r9,r10 vshr.u32 q1,q12,#25 mov r7,r7,ror#20 vshr.u32 q5,q13,#25 eor r6,r6,r8,ror#20 vshr.u32 q9,q14,#25 eor r7,r7,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r6 vsli.32 q5,q13,#7 mov r14,r14,ror#24 vsli.32 q9,q14,#7 add r3,r3,r7 vext.8 q2,q2,q2,#8 mov r10,r10,ror#24 vext.8 q6,q6,q6,#8 eor r14,r14,r2,ror#24 vext.8 q10,q10,q10,#8 eor r10,r10,r3,ror#24 vext.8 q1,q1,q1,#4 add r8,r8,r14 vext.8 q5,q5,q5,#4 mov r6,r6,ror#25 vext.8 q9,q9,q9,#4 add r9,r9,r10 vext.8 q3,q3,q3,#12 mov r7,r7,ror#25 vext.8 q7,q7,q7,#12 eor r6,r6,r8,ror#25 vext.8 q11,q11,q11,#12 eor r7,r7,r9,ror#25 vadd.i32 q0,q0,q1 add r0,r0,r5 vadd.i32 q4,q4,q5 mov r10,r10,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r6 veor q3,q3,q0 mov r12,r12,ror#16 veor q7,q7,q4 eor r10,r10,r0,ror#16 veor q11,q11,q8 eor r12,r12,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r10 vrev32.16 q7,q7 mov r5,r5,ror#20 vrev32.16 q11,q11 add r9,r9,r12 vadd.i32 q2,q2,q3 mov r6,r6,ror#20 vadd.i32 q6,q6,q7 eor r5,r5,r8,ror#20 vadd.i32 q10,q10,q11 eor r6,r6,r9,ror#20 veor q12,q1,q2 add r0,r0,r5 veor q13,q5,q6 mov r10,r10,ror#24 veor q14,q9,q10 add r1,r1,r6 vshr.u32 q1,q12,#20 mov r12,r12,ror#24 vshr.u32 q5,q13,#20 eor r10,r10,r0,ror#24 vshr.u32 q9,q14,#20 eor r12,r12,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r10 vsli.32 q5,q13,#12 mov r5,r5,ror#25 vsli.32 q9,q14,#12 str r10,[sp,#4*(16+15)] vadd.i32 q0,q0,q1 ldr r10,[sp,#4*(16+13)] vadd.i32 q4,q4,q5 add r9,r9,r12 vadd.i32 q8,q8,q9 mov r6,r6,ror#25 veor q12,q3,q0 eor r5,r5,r8,ror#25 veor q13,q7,q4 eor r6,r6,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+10)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+8)] vshr.u32 q7,q13,#24 add r2,r2,r7 vshr.u32 q11,q14,#24 mov r10,r10,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+11)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+9)] vsli.32 q11,q14,#8 add r3,r3,r4 vadd.i32 q2,q2,q3 mov r14,r14,ror#16 vadd.i32 q6,q6,q7 eor r10,r10,r2,ror#16 vadd.i32 q10,q10,q11 eor r14,r14,r3,ror#16 veor q12,q1,q2 add r8,r8,r10 veor q13,q5,q6 mov r7,r7,ror#20 veor q14,q9,q10 add r9,r9,r14 vshr.u32 q1,q12,#25 mov r4,r4,ror#20 vshr.u32 q5,q13,#25 eor r7,r7,r8,ror#20 vshr.u32 q9,q14,#25 eor r4,r4,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r7 vsli.32 q5,q13,#7 mov r10,r10,ror#24 vsli.32 q9,q14,#7 add r3,r3,r4 vext.8 q2,q2,q2,#8 mov r14,r14,ror#24 vext.8 q6,q6,q6,#8 eor r10,r10,r2,ror#24 vext.8 q10,q10,q10,#8 eor r14,r14,r3,ror#24 vext.8 q1,q1,q1,#12 add r8,r8,r10 vext.8 q5,q5,q5,#12 mov r7,r7,ror#25 vext.8 q9,q9,q9,#12 add r9,r9,r14 vext.8 q3,q3,q3,#4 mov r4,r4,ror#25 vext.8 q7,q7,q7,#4 eor r7,r7,r8,ror#25 vext.8 q11,q11,q11,#4 eor r4,r4,r9,ror#25 bne .Loop_neon add r11,sp,#32 vld1.32 {q12,q13},[sp] @ load key material vld1.32 {q14,q15},[r11] ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) ldr r12,[sp,#4*(32+1)] @ load inp ldr r14,[sp,#4*(32+0)] @ load out vadd.i32 q0,q0,q12 @ accumulate key material vadd.i32 q4,q4,q12 vadd.i32 q8,q8,q12 vldr d24,[sp,#4*(16+0)] @ one vadd.i32 q1,q1,q13 vadd.i32 q5,q5,q13 vadd.i32 q9,q9,q13 vldr d26,[sp,#4*(16+2)] @ two vadd.i32 q2,q2,q14 vadd.i32 q6,q6,q14 vadd.i32 q10,q10,q14 vadd.i32 d14,d14,d24 @ counter+1 vadd.i32 d22,d22,d26 @ counter+2 vadd.i32 q3,q3,q15 vadd.i32 q7,q7,q15 vadd.i32 q11,q11,q15 cmp r11,#64*4 blo .Ltail_neon vld1.8 {q12,q13},[r12]! @ load input mov r11,sp vld1.8 {q14,q15},[r12]! veor q0,q0,q12 @ xor with input veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 vst1.8 {q0,q1},[r14]! @ store output veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vld1.32 {q0,q1},[r11]! @ load for next iteration veor d25,d25,d25 vldr d24,[sp,#4*(16+4)] @ four veor q9,q9,q13 vld1.32 {q2,q3},[r11] veor q10,q10,q14 vst1.8 {q4,q5},[r14]! veor q11,q11,q15 vst1.8 {q6,q7},[r14]! vadd.i32 d6,d6,d24 @ next counter value vldr d24,[sp,#4*(16+0)] @ one ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input vst1.8 {q8,q9},[r14]! add r1,r1,r9 ldr r9,[r12,#-12] vst1.8 {q10,q11},[r14]! add r2,r2,r10 ldr r10,[r12,#-8] add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 @ xor with input add r8,sp,#4*(4) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r5,r5,r9 ldr r9,[r12,#-12] add r6,r6,r10 ldr r10,[r12,#-8] add r7,r7,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 add r8,sp,#4*(8) eor r5,r5,r9 str r4,[r14],#16 @ store output eor r6,r6,r10 str r5,[r14,#-12] eor r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r1,r1,r9 ldr r9,[r12,#-12] # ifdef __thumb2__ it hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it add r2,r2,r10 ldr r10,[r12,#-8] # ifdef __thumb2__ it hi # endif strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 add r8,sp,#4*(12) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r8,r8,#4 @ next counter value add r5,r5,r9 str r8,[sp,#4*(12)] @ save next counter value ldr r8,[r12],#16 @ load input add r6,r6,r10 add r4,r4,#3 @ counter+3 ldr r9,[r12,#-12] add r7,r7,r11 ldr r10,[r12,#-8] ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 # ifdef __thumb2__ it hi # endif ldrhi r8,[sp,#4*(32+2)] @ re-load len eor r5,r5,r9 eor r6,r6,r10 str r4,[r14],#16 @ store output eor r7,r7,r11 str r5,[r14,#-12] sub r11,r8,#64*4 @ len-=64*4 str r6,[r14,#-8] str r7,[r14,#-4] bhi .Loop_neon_outer b .Ldone_neon .align 4 .Lbreak_neon: @ harmonize NEON and integer-only stack frames: load data @ from NEON frame, but save to integer-only one; distance @ between the two is 4*(32+4+16-32)=4*(20). str r11, [sp,#4*(20+32+2)] @ save len add r11,sp,#4*(32+4) str r12, [sp,#4*(20+32+1)] @ save inp str r14, [sp,#4*(20+32+0)] @ save out ldr r12,[sp,#4*(16+10)] ldr r14,[sp,#4*(16+11)] vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement str r12,[sp,#4*(20+16+10)] @ copy "rx" str r14,[sp,#4*(20+16+11)] @ copy "rx" ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(20+16+15)] add r11,sp,#4*(20) vst1.32 {q0,q1},[r11]! @ copy key add sp,sp,#4*(20) @ switch frame vst1.32 {q2,q3},[r11] mov r11,#10 b .Loop @ go integer-only .align 4 .Ltail_neon: cmp r11,#64*3 bhs .L192_or_more_neon cmp r11,#64*2 bhs .L128_or_more_neon cmp r11,#64*1 bhs .L64_or_more_neon add r8,sp,#4*(8) vst1.8 {q0,q1},[sp] add r10,sp,#4*(0) vst1.8 {q2,q3},[r8] b .Loop_tail_neon .align 4 .L64_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 veor q2,q2,q14 veor q3,q3,q15 vst1.8 {q0,q1},[r14]! vst1.8 {q2,q3},[r14]! beq .Ldone_neon add r8,sp,#4*(8) vst1.8 {q4,q5},[sp] add r10,sp,#4*(0) vst1.8 {q6,q7},[r8] sub r11,r11,#64*1 @ len-=64*1 b .Loop_tail_neon .align 4 .L128_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vst1.8 {q0,q1},[r14]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vst1.8 {q4,q5},[r14]! vst1.8 {q6,q7},[r14]! beq .Ldone_neon add r8,sp,#4*(8) vst1.8 {q8,q9},[sp] add r10,sp,#4*(0) vst1.8 {q10,q11},[r8] sub r11,r11,#64*2 @ len-=64*2 b .Loop_tail_neon .align 4 .L192_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q0,q1},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vst1.8 {q2,q3},[r14]! veor q9,q9,q13 vst1.8 {q4,q5},[r14]! veor q10,q10,q14 vst1.8 {q6,q7},[r14]! veor q11,q11,q15 vst1.8 {q8,q9},[r14]! vst1.8 {q10,q11},[r14]! beq .Ldone_neon ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material add r8,sp,#4*(4) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r6,r6,r10 add r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7} add r0,sp,#4*(16+8) ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r8,sp,#4*(12) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r4,r4,#3 @ counter+3 add r6,r6,r10 add r7,r7,r11 ldr r11,[sp,#4*(32+2)] @ re-load len # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7} add r10,sp,#4*(0) sub r11,r11,#64*3 @ len-=64*3 .Loop_tail_neon: ldrb r8,[r10],#1 @ read buffer on stack ldrb r9,[r12],#1 @ read input subs r11,r11,#1 eor r8,r8,r9 strb r8,[r14],#1 @ store output bne .Loop_tail_neon .Ldone_neon: add sp,sp,#4*(32+4) vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15} add sp,sp,#4*(16+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} .size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .section __TEXT,__const .align 5 Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .align 5 _ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 Ltail: add x2,x2,#64 Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ChaCha20_ctr32_neon .private_extern _ChaCha20_ctr32_neon .align 5 _ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b Last_neon Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b Last_neon Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b Last_neon .align 4 Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma@PAGE add x5,x5,Lsigma@PAGEOFF stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b Loop_outer Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .section .rodata .align 5 .Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral .Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,%function .align 5 ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif .Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 .Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,.Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo .Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi .Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 .Ltail: add x2,x2,#64 .Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] .Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,.Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw .globl ChaCha20_ctr32_neon .hidden ChaCha20_ctr32_neon .type ChaCha20_ctr32_neon,%function .align 5 ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs .L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 .Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 .Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,.Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo .Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi .Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo .Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq .Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo .Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq .Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo .Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq .Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b .Last_neon .Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b .Last_neon .Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b .Last_neon .align 4 .Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 .Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,.Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] .Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon .type ChaCha20_512_neon,%function .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] .L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo .Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 .Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,.Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 .Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,.Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs .Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq .Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs .Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b .Loop_outer .Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_512_neon,.-ChaCha20_512_neon #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .section .rodata .align 5 Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl ChaCha20_ctr32_nohw .def ChaCha20_ctr32_nohw .type 32 .endef .align 5 ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 Ltail: add x2,x2,#64 Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ChaCha20_ctr32_neon .def ChaCha20_ctr32_neon .type 32 .endef .align 5 ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b Last_neon Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b Last_neon Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b Last_neon .align 4 Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .def ChaCha20_512_neon .type 32 .endef .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b Loop_outer Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-x86-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .align 4 _ChaCha20_ctr32_nohw: L_ChaCha20_ctr32_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 32(%esp),%esi movl 36(%esp),%edi subl $132,%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx movl %eax,80(%esp) movl %ebx,84(%esp) movl %ecx,88(%esp) movl %edx,92(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx movl %eax,96(%esp) movl %ebx,100(%esp) movl %ecx,104(%esp) movl %edx,108(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx subl $1,%eax movl %eax,112(%esp) movl %ebx,116(%esp) movl %ecx,120(%esp) movl %edx,124(%esp) jmp L000entry .align 4,0x90 L001outer_loop: movl %ebx,156(%esp) movl %eax,152(%esp) movl %ecx,160(%esp) L000entry: movl $1634760805,%eax movl $857760878,4(%esp) movl $2036477234,8(%esp) movl $1797285236,12(%esp) movl 84(%esp),%ebx movl 88(%esp),%ebp movl 104(%esp),%ecx movl 108(%esp),%esi movl 116(%esp),%edx movl 120(%esp),%edi movl %ebx,20(%esp) movl %ebp,24(%esp) movl %ecx,40(%esp) movl %esi,44(%esp) movl %edx,52(%esp) movl %edi,56(%esp) movl 92(%esp),%ebx movl 124(%esp),%edi movl 112(%esp),%edx movl 80(%esp),%ebp movl 96(%esp),%ecx movl 100(%esp),%esi addl $1,%edx movl %ebx,28(%esp) movl %edi,60(%esp) movl %edx,112(%esp) movl $10,%ebx jmp L002loop .align 4,0x90 L002loop: addl %ebp,%eax movl %ebx,128(%esp) movl %ebp,%ebx xorl %eax,%edx roll $16,%edx addl %edx,%ecx xorl %ecx,%ebx movl 52(%esp),%edi roll $12,%ebx movl 20(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,48(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,32(%esp) roll $16,%edi movl %ebx,16(%esp) addl %edi,%esi movl 40(%esp),%ecx xorl %esi,%ebp movl 56(%esp),%edx roll $12,%ebp movl 24(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,52(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,36(%esp) roll $16,%edx movl %ebp,20(%esp) addl %edx,%ecx movl 44(%esp),%esi xorl %ecx,%ebx movl 60(%esp),%edi roll $12,%ebx movl 28(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,56(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,24(%esp) addl %edi,%esi xorl %esi,%ebp roll $12,%ebp movl 20(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,%edx xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx roll $16,%edx movl %ebp,28(%esp) addl %edx,%ecx xorl %ecx,%ebx movl 48(%esp),%edi roll $12,%ebx movl 24(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,60(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,40(%esp) roll $16,%edi movl %ebx,20(%esp) addl %edi,%esi movl 32(%esp),%ecx xorl %esi,%ebp movl 52(%esp),%edx roll $12,%ebp movl 28(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,48(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,44(%esp) roll $16,%edx movl %ebp,24(%esp) addl %edx,%ecx movl 36(%esp),%esi xorl %ecx,%ebx movl 56(%esp),%edi roll $12,%ebx movl 16(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,52(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,28(%esp) addl %edi,%esi xorl %esi,%ebp movl 48(%esp),%edx roll $12,%ebp movl 128(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,56(%esp) xorl %esi,%ebp roll $7,%ebp decl %ebx jnz L002loop movl 160(%esp),%ebx addl $1634760805,%eax addl 80(%esp),%ebp addl 96(%esp),%ecx addl 100(%esp),%esi cmpl $64,%ebx jb L003tail movl 156(%esp),%ebx addl 112(%esp),%edx addl 120(%esp),%edi xorl (%ebx),%eax xorl 16(%ebx),%ebp movl %eax,(%esp) movl 152(%esp),%eax xorl 32(%ebx),%ecx xorl 36(%ebx),%esi xorl 48(%ebx),%edx xorl 56(%ebx),%edi movl %ebp,16(%eax) movl %ecx,32(%eax) movl %esi,36(%eax) movl %edx,48(%eax) movl %edi,56(%eax) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi xorl 4(%ebx),%ebp xorl 8(%ebx),%ecx xorl 12(%ebx),%esi xorl 20(%ebx),%edx xorl 24(%ebx),%edi movl %ebp,4(%eax) movl %ecx,8(%eax) movl %esi,12(%eax) movl %edx,20(%eax) movl %edi,24(%eax) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi xorl 28(%ebx),%ebp xorl 40(%ebx),%ecx xorl 44(%ebx),%esi xorl 52(%ebx),%edx xorl 60(%ebx),%edi leal 64(%ebx),%ebx movl %ebp,28(%eax) movl (%esp),%ebp movl %ecx,40(%eax) movl 160(%esp),%ecx movl %esi,44(%eax) movl %edx,52(%eax) movl %edi,60(%eax) movl %ebp,(%eax) leal 64(%eax),%eax subl $64,%ecx jnz L001outer_loop jmp L004done L003tail: addl 112(%esp),%edx addl 120(%esp),%edi movl %eax,(%esp) movl %ebp,16(%esp) movl %ecx,32(%esp) movl %esi,36(%esp) movl %edx,48(%esp) movl %edi,56(%esp) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi movl %ebp,4(%esp) movl %ecx,8(%esp) movl %esi,12(%esp) movl %edx,20(%esp) movl %edi,24(%esp) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi movl %ebp,28(%esp) movl 156(%esp),%ebp movl %ecx,40(%esp) movl 152(%esp),%ecx movl %esi,44(%esp) xorl %esi,%esi movl %edx,52(%esp) movl %edi,60(%esp) xorl %eax,%eax xorl %edx,%edx L005tail_loop: movb (%esi,%ebp,1),%al movb (%esp,%esi,1),%dl leal 1(%esi),%esi xorb %dl,%al movb %al,-1(%ecx,%esi,1) decl %ebx jnz L005tail_loop L004done: addl $132,%esp popl %edi popl %esi popl %ebx popl %ebp ret .globl _ChaCha20_ctr32_ssse3 .private_extern _ChaCha20_ctr32_ssse3 .align 4 _ChaCha20_ctr32_ssse3: L_ChaCha20_ctr32_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call Lpic_point Lpic_point: popl %eax movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%ecx movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $524,%esp andl $-64,%esp movl %ebp,512(%esp) leal Lssse3_data-Lpic_point(%eax),%eax movdqu (%ebx),%xmm3 cmpl $256,%ecx jb L0061x movl %edx,516(%esp) movl %ebx,520(%esp) subl $256,%ecx leal 384(%esp),%ebp movdqu (%edx),%xmm7 pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 paddd 48(%eax),%xmm0 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 psubd 64(%eax),%xmm0 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,64(%ebp) movdqa %xmm1,80(%ebp) movdqa %xmm2,96(%ebp) movdqa %xmm3,112(%ebp) movdqu 16(%edx),%xmm3 movdqa %xmm4,-64(%ebp) movdqa %xmm5,-48(%ebp) movdqa %xmm6,-32(%ebp) movdqa %xmm7,-16(%ebp) movdqa 32(%eax),%xmm7 leal 128(%esp),%ebx pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,(%ebp) movdqa %xmm1,16(%ebp) movdqa %xmm2,32(%ebp) movdqa %xmm3,48(%ebp) movdqa %xmm4,-128(%ebp) movdqa %xmm5,-112(%ebp) movdqa %xmm6,-96(%ebp) movdqa %xmm7,-80(%ebp) leal 128(%esi),%esi leal 128(%edi),%edi jmp L007outer_loop .align 4,0x90 L007outer_loop: movdqa -112(%ebp),%xmm1 movdqa -96(%ebp),%xmm2 movdqa -80(%ebp),%xmm3 movdqa -48(%ebp),%xmm5 movdqa -32(%ebp),%xmm6 movdqa -16(%ebp),%xmm7 movdqa %xmm1,-112(%ebx) movdqa %xmm2,-96(%ebx) movdqa %xmm3,-80(%ebx) movdqa %xmm5,-48(%ebx) movdqa %xmm6,-32(%ebx) movdqa %xmm7,-16(%ebx) movdqa 32(%ebp),%xmm2 movdqa 48(%ebp),%xmm3 movdqa 64(%ebp),%xmm4 movdqa 80(%ebp),%xmm5 movdqa 96(%ebp),%xmm6 movdqa 112(%ebp),%xmm7 paddd 64(%eax),%xmm4 movdqa %xmm2,32(%ebx) movdqa %xmm3,48(%ebx) movdqa %xmm4,64(%ebx) movdqa %xmm5,80(%ebx) movdqa %xmm6,96(%ebx) movdqa %xmm7,112(%ebx) movdqa %xmm4,64(%ebp) movdqa -128(%ebp),%xmm0 movdqa %xmm4,%xmm6 movdqa -64(%ebp),%xmm3 movdqa (%ebp),%xmm4 movdqa 16(%ebp),%xmm5 movl $10,%edx nop .align 4,0x90 L008loop: paddd %xmm3,%xmm0 movdqa %xmm3,%xmm2 pxor %xmm0,%xmm6 pshufb (%eax),%xmm6 paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -48(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 80(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,64(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-64(%ebx) paddd %xmm7,%xmm5 movdqa 32(%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -32(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 96(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,80(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,16(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-48(%ebx) paddd %xmm6,%xmm4 movdqa 48(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -16(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 112(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,96(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-32(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa -48(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,%xmm6 pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 pshufb (%eax),%xmm6 movdqa %xmm3,-16(%ebx) paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -32(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 64(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,112(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,32(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-48(%ebx) paddd %xmm7,%xmm5 movdqa (%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -16(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 80(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,64(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,48(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-32(%ebx) paddd %xmm6,%xmm4 movdqa 16(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -64(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 96(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,80(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-16(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 64(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,96(%ebx) pxor %xmm5,%xmm3 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 por %xmm1,%xmm3 decl %edx jnz L008loop movdqa %xmm3,-64(%ebx) movdqa %xmm4,(%ebx) movdqa %xmm5,16(%ebx) movdqa %xmm6,64(%ebx) movdqa %xmm7,96(%ebx) movdqa -112(%ebx),%xmm1 movdqa -96(%ebx),%xmm2 movdqa -80(%ebx),%xmm3 paddd -128(%ebp),%xmm0 paddd -112(%ebp),%xmm1 paddd -96(%ebp),%xmm2 paddd -80(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa -64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa -48(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa -32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa -16(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd -64(%ebp),%xmm0 paddd -48(%ebp),%xmm1 paddd -32(%ebp),%xmm2 paddd -16(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa (%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 16(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 48(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd (%ebp),%xmm0 paddd 16(%ebp),%xmm1 paddd 32(%ebp),%xmm2 paddd 48(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa 64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 80(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 96(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 112(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd 64(%ebp),%xmm0 paddd 80(%ebp),%xmm1 paddd 96(%ebp),%xmm2 paddd 112(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 208(%esi),%esi pxor %xmm0,%xmm4 pxor %xmm1,%xmm5 pxor %xmm2,%xmm6 pxor %xmm3,%xmm7 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 208(%edi),%edi subl $256,%ecx jnc L007outer_loop addl $256,%ecx jz L009done movl 520(%esp),%ebx leal -128(%esi),%esi movl 516(%esp),%edx leal -128(%edi),%edi movd 64(%ebp),%xmm2 movdqu (%ebx),%xmm3 paddd 96(%eax),%xmm2 pand 112(%eax),%xmm3 por %xmm2,%xmm3 L0061x: movdqa 32(%eax),%xmm0 movdqu (%edx),%xmm1 movdqu 16(%edx),%xmm2 movdqa (%eax),%xmm6 movdqa 16(%eax),%xmm7 movl %ebp,48(%esp) movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) movl $10,%edx jmp L010loop1x .align 4,0x90 L011outer1x: movdqa 80(%eax),%xmm3 movdqa (%esp),%xmm0 movdqa 16(%esp),%xmm1 movdqa 32(%esp),%xmm2 paddd 48(%esp),%xmm3 movl $10,%edx movdqa %xmm3,48(%esp) jmp L010loop1x .align 4,0x90 L010loop1x: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decl %edx jnz L010loop1x paddd (%esp),%xmm0 paddd 16(%esp),%xmm1 paddd 32(%esp),%xmm2 paddd 48(%esp),%xmm3 cmpl $64,%ecx jb L012tail movdqu (%esi),%xmm4 movdqu 16(%esi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%esi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%esi),%xmm5 pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 leal 64(%esi),%esi movdqu %xmm0,(%edi) movdqu %xmm1,16(%edi) movdqu %xmm2,32(%edi) movdqu %xmm3,48(%edi) leal 64(%edi),%edi subl $64,%ecx jnz L011outer1x jmp L009done L012tail: movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) xorl %eax,%eax xorl %edx,%edx xorl %ebp,%ebp L013tail_loop: movb (%esp,%ebp,1),%al movb (%esi,%ebp,1),%dl leal 1(%ebp),%ebp xorb %dl,%al movb %al,-1(%edi,%ebp,1) decl %ecx jnz L013tail_loop L009done: movl 512(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 6,0x90 Lssse3_data: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .long 1634760805,857760878,2036477234,1797285236 .long 0,1,2,3 .long 4,4,4,4 .long 1,0,0,0 .long 4,0,0,0 .long 0,-1,-1,-1 .align 6,0x90 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 .byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 .byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 .byte 114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-x86-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,@function .align 16 ChaCha20_ctr32_nohw: .L_ChaCha20_ctr32_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 32(%esp),%esi movl 36(%esp),%edi subl $132,%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx movl %eax,80(%esp) movl %ebx,84(%esp) movl %ecx,88(%esp) movl %edx,92(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx movl %eax,96(%esp) movl %ebx,100(%esp) movl %ecx,104(%esp) movl %edx,108(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx subl $1,%eax movl %eax,112(%esp) movl %ebx,116(%esp) movl %ecx,120(%esp) movl %edx,124(%esp) jmp .L000entry .align 16 .L001outer_loop: movl %ebx,156(%esp) movl %eax,152(%esp) movl %ecx,160(%esp) .L000entry: movl $1634760805,%eax movl $857760878,4(%esp) movl $2036477234,8(%esp) movl $1797285236,12(%esp) movl 84(%esp),%ebx movl 88(%esp),%ebp movl 104(%esp),%ecx movl 108(%esp),%esi movl 116(%esp),%edx movl 120(%esp),%edi movl %ebx,20(%esp) movl %ebp,24(%esp) movl %ecx,40(%esp) movl %esi,44(%esp) movl %edx,52(%esp) movl %edi,56(%esp) movl 92(%esp),%ebx movl 124(%esp),%edi movl 112(%esp),%edx movl 80(%esp),%ebp movl 96(%esp),%ecx movl 100(%esp),%esi addl $1,%edx movl %ebx,28(%esp) movl %edi,60(%esp) movl %edx,112(%esp) movl $10,%ebx jmp .L002loop .align 16 .L002loop: addl %ebp,%eax movl %ebx,128(%esp) movl %ebp,%ebx xorl %eax,%edx roll $16,%edx addl %edx,%ecx xorl %ecx,%ebx movl 52(%esp),%edi roll $12,%ebx movl 20(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,48(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,32(%esp) roll $16,%edi movl %ebx,16(%esp) addl %edi,%esi movl 40(%esp),%ecx xorl %esi,%ebp movl 56(%esp),%edx roll $12,%ebp movl 24(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,52(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,36(%esp) roll $16,%edx movl %ebp,20(%esp) addl %edx,%ecx movl 44(%esp),%esi xorl %ecx,%ebx movl 60(%esp),%edi roll $12,%ebx movl 28(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,56(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,24(%esp) addl %edi,%esi xorl %esi,%ebp roll $12,%ebp movl 20(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,%edx xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx roll $16,%edx movl %ebp,28(%esp) addl %edx,%ecx xorl %ecx,%ebx movl 48(%esp),%edi roll $12,%ebx movl 24(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,60(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,40(%esp) roll $16,%edi movl %ebx,20(%esp) addl %edi,%esi movl 32(%esp),%ecx xorl %esi,%ebp movl 52(%esp),%edx roll $12,%ebp movl 28(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,48(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,44(%esp) roll $16,%edx movl %ebp,24(%esp) addl %edx,%ecx movl 36(%esp),%esi xorl %ecx,%ebx movl 56(%esp),%edi roll $12,%ebx movl 16(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,52(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,28(%esp) addl %edi,%esi xorl %esi,%ebp movl 48(%esp),%edx roll $12,%ebp movl 128(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,56(%esp) xorl %esi,%ebp roll $7,%ebp decl %ebx jnz .L002loop movl 160(%esp),%ebx addl $1634760805,%eax addl 80(%esp),%ebp addl 96(%esp),%ecx addl 100(%esp),%esi cmpl $64,%ebx jb .L003tail movl 156(%esp),%ebx addl 112(%esp),%edx addl 120(%esp),%edi xorl (%ebx),%eax xorl 16(%ebx),%ebp movl %eax,(%esp) movl 152(%esp),%eax xorl 32(%ebx),%ecx xorl 36(%ebx),%esi xorl 48(%ebx),%edx xorl 56(%ebx),%edi movl %ebp,16(%eax) movl %ecx,32(%eax) movl %esi,36(%eax) movl %edx,48(%eax) movl %edi,56(%eax) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi xorl 4(%ebx),%ebp xorl 8(%ebx),%ecx xorl 12(%ebx),%esi xorl 20(%ebx),%edx xorl 24(%ebx),%edi movl %ebp,4(%eax) movl %ecx,8(%eax) movl %esi,12(%eax) movl %edx,20(%eax) movl %edi,24(%eax) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi xorl 28(%ebx),%ebp xorl 40(%ebx),%ecx xorl 44(%ebx),%esi xorl 52(%ebx),%edx xorl 60(%ebx),%edi leal 64(%ebx),%ebx movl %ebp,28(%eax) movl (%esp),%ebp movl %ecx,40(%eax) movl 160(%esp),%ecx movl %esi,44(%eax) movl %edx,52(%eax) movl %edi,60(%eax) movl %ebp,(%eax) leal 64(%eax),%eax subl $64,%ecx jnz .L001outer_loop jmp .L004done .L003tail: addl 112(%esp),%edx addl 120(%esp),%edi movl %eax,(%esp) movl %ebp,16(%esp) movl %ecx,32(%esp) movl %esi,36(%esp) movl %edx,48(%esp) movl %edi,56(%esp) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi movl %ebp,4(%esp) movl %ecx,8(%esp) movl %esi,12(%esp) movl %edx,20(%esp) movl %edi,24(%esp) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi movl %ebp,28(%esp) movl 156(%esp),%ebp movl %ecx,40(%esp) movl 152(%esp),%ecx movl %esi,44(%esp) xorl %esi,%esi movl %edx,52(%esp) movl %edi,60(%esp) xorl %eax,%eax xorl %edx,%edx .L005tail_loop: movb (%esi,%ebp,1),%al movb (%esp,%esi,1),%dl leal 1(%esi),%esi xorb %dl,%al movb %al,-1(%ecx,%esi,1) decl %ebx jnz .L005tail_loop .L004done: addl $132,%esp popl %edi popl %esi popl %ebx popl %ebp ret .size ChaCha20_ctr32_nohw,.-.L_ChaCha20_ctr32_nohw_begin .globl ChaCha20_ctr32_ssse3 .hidden ChaCha20_ctr32_ssse3 .type ChaCha20_ctr32_ssse3,@function .align 16 ChaCha20_ctr32_ssse3: .L_ChaCha20_ctr32_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .Lpic_point .Lpic_point: popl %eax movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%ecx movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $524,%esp andl $-64,%esp movl %ebp,512(%esp) leal .Lssse3_data-.Lpic_point(%eax),%eax movdqu (%ebx),%xmm3 cmpl $256,%ecx jb .L0061x movl %edx,516(%esp) movl %ebx,520(%esp) subl $256,%ecx leal 384(%esp),%ebp movdqu (%edx),%xmm7 pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 paddd 48(%eax),%xmm0 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 psubd 64(%eax),%xmm0 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,64(%ebp) movdqa %xmm1,80(%ebp) movdqa %xmm2,96(%ebp) movdqa %xmm3,112(%ebp) movdqu 16(%edx),%xmm3 movdqa %xmm4,-64(%ebp) movdqa %xmm5,-48(%ebp) movdqa %xmm6,-32(%ebp) movdqa %xmm7,-16(%ebp) movdqa 32(%eax),%xmm7 leal 128(%esp),%ebx pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,(%ebp) movdqa %xmm1,16(%ebp) movdqa %xmm2,32(%ebp) movdqa %xmm3,48(%ebp) movdqa %xmm4,-128(%ebp) movdqa %xmm5,-112(%ebp) movdqa %xmm6,-96(%ebp) movdqa %xmm7,-80(%ebp) leal 128(%esi),%esi leal 128(%edi),%edi jmp .L007outer_loop .align 16 .L007outer_loop: movdqa -112(%ebp),%xmm1 movdqa -96(%ebp),%xmm2 movdqa -80(%ebp),%xmm3 movdqa -48(%ebp),%xmm5 movdqa -32(%ebp),%xmm6 movdqa -16(%ebp),%xmm7 movdqa %xmm1,-112(%ebx) movdqa %xmm2,-96(%ebx) movdqa %xmm3,-80(%ebx) movdqa %xmm5,-48(%ebx) movdqa %xmm6,-32(%ebx) movdqa %xmm7,-16(%ebx) movdqa 32(%ebp),%xmm2 movdqa 48(%ebp),%xmm3 movdqa 64(%ebp),%xmm4 movdqa 80(%ebp),%xmm5 movdqa 96(%ebp),%xmm6 movdqa 112(%ebp),%xmm7 paddd 64(%eax),%xmm4 movdqa %xmm2,32(%ebx) movdqa %xmm3,48(%ebx) movdqa %xmm4,64(%ebx) movdqa %xmm5,80(%ebx) movdqa %xmm6,96(%ebx) movdqa %xmm7,112(%ebx) movdqa %xmm4,64(%ebp) movdqa -128(%ebp),%xmm0 movdqa %xmm4,%xmm6 movdqa -64(%ebp),%xmm3 movdqa (%ebp),%xmm4 movdqa 16(%ebp),%xmm5 movl $10,%edx nop .align 16 .L008loop: paddd %xmm3,%xmm0 movdqa %xmm3,%xmm2 pxor %xmm0,%xmm6 pshufb (%eax),%xmm6 paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -48(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 80(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,64(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-64(%ebx) paddd %xmm7,%xmm5 movdqa 32(%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -32(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 96(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,80(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,16(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-48(%ebx) paddd %xmm6,%xmm4 movdqa 48(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -16(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 112(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,96(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-32(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa -48(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,%xmm6 pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 pshufb (%eax),%xmm6 movdqa %xmm3,-16(%ebx) paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -32(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 64(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,112(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,32(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-48(%ebx) paddd %xmm7,%xmm5 movdqa (%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -16(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 80(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,64(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,48(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-32(%ebx) paddd %xmm6,%xmm4 movdqa 16(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -64(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 96(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,80(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-16(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 64(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,96(%ebx) pxor %xmm5,%xmm3 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 por %xmm1,%xmm3 decl %edx jnz .L008loop movdqa %xmm3,-64(%ebx) movdqa %xmm4,(%ebx) movdqa %xmm5,16(%ebx) movdqa %xmm6,64(%ebx) movdqa %xmm7,96(%ebx) movdqa -112(%ebx),%xmm1 movdqa -96(%ebx),%xmm2 movdqa -80(%ebx),%xmm3 paddd -128(%ebp),%xmm0 paddd -112(%ebp),%xmm1 paddd -96(%ebp),%xmm2 paddd -80(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa -64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa -48(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa -32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa -16(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd -64(%ebp),%xmm0 paddd -48(%ebp),%xmm1 paddd -32(%ebp),%xmm2 paddd -16(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa (%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 16(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 48(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd (%ebp),%xmm0 paddd 16(%ebp),%xmm1 paddd 32(%ebp),%xmm2 paddd 48(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa 64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 80(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 96(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 112(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd 64(%ebp),%xmm0 paddd 80(%ebp),%xmm1 paddd 96(%ebp),%xmm2 paddd 112(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 208(%esi),%esi pxor %xmm0,%xmm4 pxor %xmm1,%xmm5 pxor %xmm2,%xmm6 pxor %xmm3,%xmm7 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 208(%edi),%edi subl $256,%ecx jnc .L007outer_loop addl $256,%ecx jz .L009done movl 520(%esp),%ebx leal -128(%esi),%esi movl 516(%esp),%edx leal -128(%edi),%edi movd 64(%ebp),%xmm2 movdqu (%ebx),%xmm3 paddd 96(%eax),%xmm2 pand 112(%eax),%xmm3 por %xmm2,%xmm3 .L0061x: movdqa 32(%eax),%xmm0 movdqu (%edx),%xmm1 movdqu 16(%edx),%xmm2 movdqa (%eax),%xmm6 movdqa 16(%eax),%xmm7 movl %ebp,48(%esp) movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) movl $10,%edx jmp .L010loop1x .align 16 .L011outer1x: movdqa 80(%eax),%xmm3 movdqa (%esp),%xmm0 movdqa 16(%esp),%xmm1 movdqa 32(%esp),%xmm2 paddd 48(%esp),%xmm3 movl $10,%edx movdqa %xmm3,48(%esp) jmp .L010loop1x .align 16 .L010loop1x: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decl %edx jnz .L010loop1x paddd (%esp),%xmm0 paddd 16(%esp),%xmm1 paddd 32(%esp),%xmm2 paddd 48(%esp),%xmm3 cmpl $64,%ecx jb .L012tail movdqu (%esi),%xmm4 movdqu 16(%esi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%esi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%esi),%xmm5 pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 leal 64(%esi),%esi movdqu %xmm0,(%edi) movdqu %xmm1,16(%edi) movdqu %xmm2,32(%edi) movdqu %xmm3,48(%edi) leal 64(%edi),%edi subl $64,%ecx jnz .L011outer1x jmp .L009done .L012tail: movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) xorl %eax,%eax xorl %edx,%edx xorl %ebp,%ebp .L013tail_loop: movb (%esp,%ebp,1),%al movb (%esi,%ebp,1),%dl leal 1(%ebp),%ebp xorb %dl,%al movb %al,-1(%edi,%ebp,1) decl %ecx jnz .L013tail_loop .L009done: movl 512(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size ChaCha20_ctr32_ssse3,.-.L_ChaCha20_ctr32_ssse3_begin .align 64 .Lssse3_data: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .long 1634760805,857760878,2036477234,1797285236 .long 0,1,2,3 .long 4,4,4,4 .long 1,0,0,0 .long 4,0,0,0 .long 0,-1,-1,-1 .align 64 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 .byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 .byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 .byte 114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .section __DATA,__const .p2align 6 L$zero: .long 0,0,0,0 L$one: .long 1,0,0,0 L$inc: .long 0,1,2,3 L$four: .long 4,4,4,4 L$incy: .long 0,2,4,6,1,3,5,7 L$eight: .long 8,8,8,8,8,8,8,8 L$rot16: .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd L$rot24: .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe L$sigma: .byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 .p2align 6 L$zeroz: .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 L$fourz: .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 L$incz: .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 L$sixteen: .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .p2align 6 _ChaCha20_ctr32_nohw: _CET_ENDBR pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $64+24,%rsp L$ctr32_body: movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa L$one(%rip),%xmm4 movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq %rdx,%rbp jmp L$oop_outer .p2align 5 L$oop_outer: movl $0x61707865,%eax movl $0x3320646e,%ebx movl $0x79622d32,%ecx movl $0x6b206574,%edx movl 16(%rsp),%r8d movl 20(%rsp),%r9d movl 24(%rsp),%r10d movl 28(%rsp),%r11d movd %xmm3,%r12d movl 52(%rsp),%r13d movl 56(%rsp),%r14d movl 60(%rsp),%r15d movq %rbp,64+0(%rsp) movl $10,%ebp movq %rsi,64+8(%rsp) .byte 102,72,15,126,214 movq %rdi,64+16(%rsp) movq %rsi,%rdi shrq $32,%rdi jmp L$oop .p2align 5 L$oop: addl %r8d,%eax xorl %eax,%r12d roll $16,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $16,%r13d addl %r12d,%esi xorl %esi,%r8d roll $12,%r8d addl %r13d,%edi xorl %edi,%r9d roll $12,%r9d addl %r8d,%eax xorl %eax,%r12d roll $8,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $8,%r13d addl %r12d,%esi xorl %esi,%r8d roll $7,%r8d addl %r13d,%edi xorl %edi,%r9d roll $7,%r9d movl %esi,32(%rsp) movl %edi,36(%rsp) movl 40(%rsp),%esi movl 44(%rsp),%edi addl %r10d,%ecx xorl %ecx,%r14d roll $16,%r14d addl %r11d,%edx xorl %edx,%r15d roll $16,%r15d addl %r14d,%esi xorl %esi,%r10d roll $12,%r10d addl %r15d,%edi xorl %edi,%r11d roll $12,%r11d addl %r10d,%ecx xorl %ecx,%r14d roll $8,%r14d addl %r11d,%edx xorl %edx,%r15d roll $8,%r15d addl %r14d,%esi xorl %esi,%r10d roll $7,%r10d addl %r15d,%edi xorl %edi,%r11d roll $7,%r11d addl %r9d,%eax xorl %eax,%r15d roll $16,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $16,%r12d addl %r15d,%esi xorl %esi,%r9d roll $12,%r9d addl %r12d,%edi xorl %edi,%r10d roll $12,%r10d addl %r9d,%eax xorl %eax,%r15d roll $8,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $8,%r12d addl %r15d,%esi xorl %esi,%r9d roll $7,%r9d addl %r12d,%edi xorl %edi,%r10d roll $7,%r10d movl %esi,40(%rsp) movl %edi,44(%rsp) movl 32(%rsp),%esi movl 36(%rsp),%edi addl %r11d,%ecx xorl %ecx,%r13d roll $16,%r13d addl %r8d,%edx xorl %edx,%r14d roll $16,%r14d addl %r13d,%esi xorl %esi,%r11d roll $12,%r11d addl %r14d,%edi xorl %edi,%r8d roll $12,%r8d addl %r11d,%ecx xorl %ecx,%r13d roll $8,%r13d addl %r8d,%edx xorl %edx,%r14d roll $8,%r14d addl %r13d,%esi xorl %esi,%r11d roll $7,%r11d addl %r14d,%edi xorl %edi,%r8d roll $7,%r8d decl %ebp jnz L$oop movl %edi,36(%rsp) movl %esi,32(%rsp) movq 64(%rsp),%rbp movdqa %xmm2,%xmm1 movq 64+8(%rsp),%rsi paddd %xmm4,%xmm3 movq 64+16(%rsp),%rdi addl $0x61707865,%eax addl $0x3320646e,%ebx addl $0x79622d32,%ecx addl $0x6b206574,%edx addl 16(%rsp),%r8d addl 20(%rsp),%r9d addl 24(%rsp),%r10d addl 28(%rsp),%r11d addl 48(%rsp),%r12d addl 52(%rsp),%r13d addl 56(%rsp),%r14d addl 60(%rsp),%r15d paddd 32(%rsp),%xmm1 cmpq $64,%rbp jb L$tail xorl 0(%rsi),%eax xorl 4(%rsi),%ebx xorl 8(%rsi),%ecx xorl 12(%rsi),%edx xorl 16(%rsi),%r8d xorl 20(%rsi),%r9d xorl 24(%rsi),%r10d xorl 28(%rsi),%r11d movdqu 32(%rsi),%xmm0 xorl 48(%rsi),%r12d xorl 52(%rsi),%r13d xorl 56(%rsi),%r14d xorl 60(%rsi),%r15d leaq 64(%rsi),%rsi pxor %xmm1,%xmm0 movdqa %xmm2,32(%rsp) movd %xmm3,48(%rsp) movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) movdqu %xmm0,32(%rdi) movl %r12d,48(%rdi) movl %r13d,52(%rdi) movl %r14d,56(%rdi) movl %r15d,60(%rdi) leaq 64(%rdi),%rdi subq $64,%rbp jnz L$oop_outer jmp L$done .p2align 4 L$tail: movl %eax,0(%rsp) movl %ebx,4(%rsp) xorq %rbx,%rbx movl %ecx,8(%rsp) movl %edx,12(%rsp) movl %r8d,16(%rsp) movl %r9d,20(%rsp) movl %r10d,24(%rsp) movl %r11d,28(%rsp) movdqa %xmm1,32(%rsp) movl %r12d,48(%rsp) movl %r13d,52(%rsp) movl %r14d,56(%rsp) movl %r15d,60(%rsp) L$oop_tail: movzbl (%rsi,%rbx,1),%eax movzbl (%rsp,%rbx,1),%edx leaq 1(%rbx),%rbx xorl %edx,%eax movb %al,-1(%rdi,%rbx,1) decq %rbp jnz L$oop_tail L$done: leaq 64+24+48(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$no_data: ret .globl _ChaCha20_ctr32_ssse3 .private_extern _ChaCha20_ctr32_ssse3 .p2align 5 _ChaCha20_ctr32_ssse3: _CET_ENDBR movq %rsp,%r9 subq $64+8,%rsp movdqa L$sigma(%rip),%xmm0 movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa L$rot16(%rip),%xmm6 movdqa L$rot24(%rip),%xmm7 movdqa %xmm0,0(%rsp) movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq $10,%r8 jmp L$oop_ssse3 .p2align 5 L$oop_outer_ssse3: movdqa L$one(%rip),%xmm3 movdqa 0(%rsp),%xmm0 movdqa 16(%rsp),%xmm1 movdqa 32(%rsp),%xmm2 paddd 48(%rsp),%xmm3 movq $10,%r8 movdqa %xmm3,48(%rsp) jmp L$oop_ssse3 .p2align 5 L$oop_ssse3: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decq %r8 jnz L$oop_ssse3 paddd 0(%rsp),%xmm0 paddd 16(%rsp),%xmm1 paddd 32(%rsp),%xmm2 paddd 48(%rsp),%xmm3 cmpq $64,%rdx jb L$tail_ssse3 movdqu 0(%rsi),%xmm4 movdqu 16(%rsi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%rsi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%rsi),%xmm5 leaq 64(%rsi),%rsi pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 movdqu %xmm0,0(%rdi) movdqu %xmm1,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 64(%rdi),%rdi subq $64,%rdx jnz L$oop_outer_ssse3 jmp L$done_ssse3 .p2align 4 L$tail_ssse3: movdqa %xmm0,0(%rsp) movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) xorq %r8,%r8 L$oop_tail_ssse3: movzbl (%rsi,%r8,1),%eax movzbl (%rsp,%r8,1),%ecx leaq 1(%r8),%r8 xorl %ecx,%eax movb %al,-1(%rdi,%r8,1) decq %rdx jnz L$oop_tail_ssse3 L$done_ssse3: leaq (%r9),%rsp L$ssse3_epilogue: ret .globl _ChaCha20_ctr32_ssse3_4x .private_extern _ChaCha20_ctr32_ssse3_4x .p2align 5 _ChaCha20_ctr32_ssse3_4x: _CET_ENDBR movq %rsp,%r9 subq $0x140+8,%rsp movdqa L$sigma(%rip),%xmm11 movdqu (%rcx),%xmm15 movdqu 16(%rcx),%xmm7 movdqu (%r8),%xmm3 leaq 256(%rsp),%rcx leaq L$rot16(%rip),%r10 leaq L$rot24(%rip),%r11 pshufd $0x00,%xmm11,%xmm8 pshufd $0x55,%xmm11,%xmm9 movdqa %xmm8,64(%rsp) pshufd $0xaa,%xmm11,%xmm10 movdqa %xmm9,80(%rsp) pshufd $0xff,%xmm11,%xmm11 movdqa %xmm10,96(%rsp) movdqa %xmm11,112(%rsp) pshufd $0x00,%xmm15,%xmm12 pshufd $0x55,%xmm15,%xmm13 movdqa %xmm12,128-256(%rcx) pshufd $0xaa,%xmm15,%xmm14 movdqa %xmm13,144-256(%rcx) pshufd $0xff,%xmm15,%xmm15 movdqa %xmm14,160-256(%rcx) movdqa %xmm15,176-256(%rcx) pshufd $0x00,%xmm7,%xmm4 pshufd $0x55,%xmm7,%xmm5 movdqa %xmm4,192-256(%rcx) pshufd $0xaa,%xmm7,%xmm6 movdqa %xmm5,208-256(%rcx) pshufd $0xff,%xmm7,%xmm7 movdqa %xmm6,224-256(%rcx) movdqa %xmm7,240-256(%rcx) pshufd $0x00,%xmm3,%xmm0 pshufd $0x55,%xmm3,%xmm1 paddd L$inc(%rip),%xmm0 pshufd $0xaa,%xmm3,%xmm2 movdqa %xmm1,272-256(%rcx) pshufd $0xff,%xmm3,%xmm3 movdqa %xmm2,288-256(%rcx) movdqa %xmm3,304-256(%rcx) jmp L$oop_enter4x .p2align 5 L$oop_outer4x: movdqa 64(%rsp),%xmm8 movdqa 80(%rsp),%xmm9 movdqa 96(%rsp),%xmm10 movdqa 112(%rsp),%xmm11 movdqa 128-256(%rcx),%xmm12 movdqa 144-256(%rcx),%xmm13 movdqa 160-256(%rcx),%xmm14 movdqa 176-256(%rcx),%xmm15 movdqa 192-256(%rcx),%xmm4 movdqa 208-256(%rcx),%xmm5 movdqa 224-256(%rcx),%xmm6 movdqa 240-256(%rcx),%xmm7 movdqa 256-256(%rcx),%xmm0 movdqa 272-256(%rcx),%xmm1 movdqa 288-256(%rcx),%xmm2 movdqa 304-256(%rcx),%xmm3 paddd L$four(%rip),%xmm0 L$oop_enter4x: movdqa %xmm6,32(%rsp) movdqa %xmm7,48(%rsp) movdqa (%r10),%xmm7 movl $10,%eax movdqa %xmm0,256-256(%rcx) jmp L$oop4x .p2align 5 L$oop4x: paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,199 .byte 102,15,56,0,207 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm6 pslld $12,%xmm12 psrld $20,%xmm6 movdqa %xmm13,%xmm7 pslld $12,%xmm13 por %xmm6,%xmm12 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm13 paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,198 .byte 102,15,56,0,206 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm7 pslld $7,%xmm12 psrld $25,%xmm7 movdqa %xmm13,%xmm6 pslld $7,%xmm13 por %xmm7,%xmm12 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm13 movdqa %xmm4,0(%rsp) movdqa %xmm5,16(%rsp) movdqa 32(%rsp),%xmm4 movdqa 48(%rsp),%xmm5 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,215 .byte 102,15,56,0,223 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm6 pslld $12,%xmm14 psrld $20,%xmm6 movdqa %xmm15,%xmm7 pslld $12,%xmm15 por %xmm6,%xmm14 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm15 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,214 .byte 102,15,56,0,222 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm7 pslld $7,%xmm14 psrld $25,%xmm7 movdqa %xmm15,%xmm6 pslld $7,%xmm15 por %xmm7,%xmm14 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm15 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,223 .byte 102,15,56,0,199 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm6 pslld $12,%xmm13 psrld $20,%xmm6 movdqa %xmm14,%xmm7 pslld $12,%xmm14 por %xmm6,%xmm13 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm14 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,222 .byte 102,15,56,0,198 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm7 pslld $7,%xmm13 psrld $25,%xmm7 movdqa %xmm14,%xmm6 pslld $7,%xmm14 por %xmm7,%xmm13 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm14 movdqa %xmm4,32(%rsp) movdqa %xmm5,48(%rsp) movdqa 0(%rsp),%xmm4 movdqa 16(%rsp),%xmm5 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,207 .byte 102,15,56,0,215 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm6 pslld $12,%xmm15 psrld $20,%xmm6 movdqa %xmm12,%xmm7 pslld $12,%xmm12 por %xmm6,%xmm15 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm12 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,206 .byte 102,15,56,0,214 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm7 pslld $7,%xmm15 psrld $25,%xmm7 movdqa %xmm12,%xmm6 pslld $7,%xmm12 por %xmm7,%xmm15 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm12 decl %eax jnz L$oop4x paddd 64(%rsp),%xmm8 paddd 80(%rsp),%xmm9 paddd 96(%rsp),%xmm10 paddd 112(%rsp),%xmm11 movdqa %xmm8,%xmm6 punpckldq %xmm9,%xmm8 movdqa %xmm10,%xmm7 punpckldq %xmm11,%xmm10 punpckhdq %xmm9,%xmm6 punpckhdq %xmm11,%xmm7 movdqa %xmm8,%xmm9 punpcklqdq %xmm10,%xmm8 movdqa %xmm6,%xmm11 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm10,%xmm9 punpckhqdq %xmm7,%xmm11 paddd 128-256(%rcx),%xmm12 paddd 144-256(%rcx),%xmm13 paddd 160-256(%rcx),%xmm14 paddd 176-256(%rcx),%xmm15 movdqa %xmm8,0(%rsp) movdqa %xmm9,16(%rsp) movdqa 32(%rsp),%xmm8 movdqa 48(%rsp),%xmm9 movdqa %xmm12,%xmm10 punpckldq %xmm13,%xmm12 movdqa %xmm14,%xmm7 punpckldq %xmm15,%xmm14 punpckhdq %xmm13,%xmm10 punpckhdq %xmm15,%xmm7 movdqa %xmm12,%xmm13 punpcklqdq %xmm14,%xmm12 movdqa %xmm10,%xmm15 punpcklqdq %xmm7,%xmm10 punpckhqdq %xmm14,%xmm13 punpckhqdq %xmm7,%xmm15 paddd 192-256(%rcx),%xmm4 paddd 208-256(%rcx),%xmm5 paddd 224-256(%rcx),%xmm8 paddd 240-256(%rcx),%xmm9 movdqa %xmm6,32(%rsp) movdqa %xmm11,48(%rsp) movdqa %xmm4,%xmm14 punpckldq %xmm5,%xmm4 movdqa %xmm8,%xmm7 punpckldq %xmm9,%xmm8 punpckhdq %xmm5,%xmm14 punpckhdq %xmm9,%xmm7 movdqa %xmm4,%xmm5 punpcklqdq %xmm8,%xmm4 movdqa %xmm14,%xmm9 punpcklqdq %xmm7,%xmm14 punpckhqdq %xmm8,%xmm5 punpckhqdq %xmm7,%xmm9 paddd 256-256(%rcx),%xmm0 paddd 272-256(%rcx),%xmm1 paddd 288-256(%rcx),%xmm2 paddd 304-256(%rcx),%xmm3 movdqa %xmm0,%xmm8 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm8 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm8,%xmm3 punpcklqdq %xmm7,%xmm8 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 cmpq $256,%rdx jb L$tail4x movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 48(%rsp),%xmm6 pxor %xmm15,%xmm11 pxor %xmm9,%xmm2 pxor %xmm3,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi subq $256,%rdx jnz L$oop_outer4x jmp L$done4x L$tail4x: cmpq $192,%rdx jae L$192_or_more4x cmpq $128,%rdx jae L$128_or_more4x cmpq $64,%rdx jae L$64_or_more4x xorq %r10,%r10 movdqa %xmm12,16(%rsp) movdqa %xmm4,32(%rsp) movdqa %xmm0,48(%rsp) jmp L$oop_tail4x .p2align 5 L$64_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je L$done4x movdqa 16(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm13,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm5,32(%rsp) subq $64,%rdx movdqa %xmm1,48(%rsp) jmp L$oop_tail4x .p2align 5 L$128_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) je L$done4x movdqa 32(%rsp),%xmm6 leaq 128(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm10,16(%rsp) leaq 128(%rdi),%rdi movdqa %xmm14,32(%rsp) subq $128,%rdx movdqa %xmm8,48(%rsp) jmp L$oop_tail4x .p2align 5 L$192_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je L$done4x movdqa 48(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm15,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm9,32(%rsp) subq $192,%rdx movdqa %xmm3,48(%rsp) L$oop_tail4x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz L$oop_tail4x L$done4x: leaq (%r9),%rsp L$4x_epilogue: ret .globl _ChaCha20_ctr32_avx2 .private_extern _ChaCha20_ctr32_avx2 .p2align 5 _ChaCha20_ctr32_avx2: _CET_ENDBR movq %rsp,%r9 subq $0x280+8,%rsp andq $-32,%rsp vzeroupper vbroadcasti128 L$sigma(%rip),%ymm11 vbroadcasti128 (%rcx),%ymm3 vbroadcasti128 16(%rcx),%ymm15 vbroadcasti128 (%r8),%ymm7 leaq 256(%rsp),%rcx leaq 512(%rsp),%rax leaq L$rot16(%rip),%r10 leaq L$rot24(%rip),%r11 vpshufd $0x00,%ymm11,%ymm8 vpshufd $0x55,%ymm11,%ymm9 vmovdqa %ymm8,128-256(%rcx) vpshufd $0xaa,%ymm11,%ymm10 vmovdqa %ymm9,160-256(%rcx) vpshufd $0xff,%ymm11,%ymm11 vmovdqa %ymm10,192-256(%rcx) vmovdqa %ymm11,224-256(%rcx) vpshufd $0x00,%ymm3,%ymm0 vpshufd $0x55,%ymm3,%ymm1 vmovdqa %ymm0,256-256(%rcx) vpshufd $0xaa,%ymm3,%ymm2 vmovdqa %ymm1,288-256(%rcx) vpshufd $0xff,%ymm3,%ymm3 vmovdqa %ymm2,320-256(%rcx) vmovdqa %ymm3,352-256(%rcx) vpshufd $0x00,%ymm15,%ymm12 vpshufd $0x55,%ymm15,%ymm13 vmovdqa %ymm12,384-512(%rax) vpshufd $0xaa,%ymm15,%ymm14 vmovdqa %ymm13,416-512(%rax) vpshufd $0xff,%ymm15,%ymm15 vmovdqa %ymm14,448-512(%rax) vmovdqa %ymm15,480-512(%rax) vpshufd $0x00,%ymm7,%ymm4 vpshufd $0x55,%ymm7,%ymm5 vpaddd L$incy(%rip),%ymm4,%ymm4 vpshufd $0xaa,%ymm7,%ymm6 vmovdqa %ymm5,544-512(%rax) vpshufd $0xff,%ymm7,%ymm7 vmovdqa %ymm6,576-512(%rax) vmovdqa %ymm7,608-512(%rax) jmp L$oop_enter8x .p2align 5 L$oop_outer8x: vmovdqa 128-256(%rcx),%ymm8 vmovdqa 160-256(%rcx),%ymm9 vmovdqa 192-256(%rcx),%ymm10 vmovdqa 224-256(%rcx),%ymm11 vmovdqa 256-256(%rcx),%ymm0 vmovdqa 288-256(%rcx),%ymm1 vmovdqa 320-256(%rcx),%ymm2 vmovdqa 352-256(%rcx),%ymm3 vmovdqa 384-512(%rax),%ymm12 vmovdqa 416-512(%rax),%ymm13 vmovdqa 448-512(%rax),%ymm14 vmovdqa 480-512(%rax),%ymm15 vmovdqa 512-512(%rax),%ymm4 vmovdqa 544-512(%rax),%ymm5 vmovdqa 576-512(%rax),%ymm6 vmovdqa 608-512(%rax),%ymm7 vpaddd L$eight(%rip),%ymm4,%ymm4 L$oop_enter8x: vmovdqa %ymm14,64(%rsp) vmovdqa %ymm15,96(%rsp) vbroadcasti128 (%r10),%ymm15 vmovdqa %ymm4,512-512(%rax) movl $10,%eax jmp L$oop8x .p2align 5 L$oop8x: vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $12,%ymm0,%ymm14 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $12,%ymm1,%ymm15 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $7,%ymm0,%ymm15 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $7,%ymm1,%ymm14 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vmovdqa %ymm12,0(%rsp) vmovdqa %ymm13,32(%rsp) vmovdqa 64(%rsp),%ymm12 vmovdqa 96(%rsp),%ymm13 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $12,%ymm2,%ymm14 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $12,%ymm3,%ymm15 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $7,%ymm2,%ymm15 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $7,%ymm3,%ymm14 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $12,%ymm1,%ymm14 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $12,%ymm2,%ymm15 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $7,%ymm1,%ymm15 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $7,%ymm2,%ymm14 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vmovdqa %ymm12,64(%rsp) vmovdqa %ymm13,96(%rsp) vmovdqa 0(%rsp),%ymm12 vmovdqa 32(%rsp),%ymm13 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $12,%ymm3,%ymm14 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $12,%ymm0,%ymm15 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $7,%ymm3,%ymm15 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $7,%ymm0,%ymm14 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 decl %eax jnz L$oop8x leaq 512(%rsp),%rax vpaddd 128-256(%rcx),%ymm8,%ymm8 vpaddd 160-256(%rcx),%ymm9,%ymm9 vpaddd 192-256(%rcx),%ymm10,%ymm10 vpaddd 224-256(%rcx),%ymm11,%ymm11 vpunpckldq %ymm9,%ymm8,%ymm14 vpunpckldq %ymm11,%ymm10,%ymm15 vpunpckhdq %ymm9,%ymm8,%ymm8 vpunpckhdq %ymm11,%ymm10,%ymm10 vpunpcklqdq %ymm15,%ymm14,%ymm9 vpunpckhqdq %ymm15,%ymm14,%ymm14 vpunpcklqdq %ymm10,%ymm8,%ymm11 vpunpckhqdq %ymm10,%ymm8,%ymm8 vpaddd 256-256(%rcx),%ymm0,%ymm0 vpaddd 288-256(%rcx),%ymm1,%ymm1 vpaddd 320-256(%rcx),%ymm2,%ymm2 vpaddd 352-256(%rcx),%ymm3,%ymm3 vpunpckldq %ymm1,%ymm0,%ymm10 vpunpckldq %ymm3,%ymm2,%ymm15 vpunpckhdq %ymm1,%ymm0,%ymm0 vpunpckhdq %ymm3,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm10,%ymm1 vpunpckhqdq %ymm15,%ymm10,%ymm10 vpunpcklqdq %ymm2,%ymm0,%ymm3 vpunpckhqdq %ymm2,%ymm0,%ymm0 vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 vmovdqa %ymm15,0(%rsp) vmovdqa %ymm9,32(%rsp) vmovdqa 64(%rsp),%ymm15 vmovdqa 96(%rsp),%ymm9 vpaddd 384-512(%rax),%ymm12,%ymm12 vpaddd 416-512(%rax),%ymm13,%ymm13 vpaddd 448-512(%rax),%ymm15,%ymm15 vpaddd 480-512(%rax),%ymm9,%ymm9 vpunpckldq %ymm13,%ymm12,%ymm2 vpunpckldq %ymm9,%ymm15,%ymm8 vpunpckhdq %ymm13,%ymm12,%ymm12 vpunpckhdq %ymm9,%ymm15,%ymm15 vpunpcklqdq %ymm8,%ymm2,%ymm13 vpunpckhqdq %ymm8,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm12,%ymm9 vpunpckhqdq %ymm15,%ymm12,%ymm12 vpaddd 512-512(%rax),%ymm4,%ymm4 vpaddd 544-512(%rax),%ymm5,%ymm5 vpaddd 576-512(%rax),%ymm6,%ymm6 vpaddd 608-512(%rax),%ymm7,%ymm7 vpunpckldq %ymm5,%ymm4,%ymm15 vpunpckldq %ymm7,%ymm6,%ymm8 vpunpckhdq %ymm5,%ymm4,%ymm4 vpunpckhdq %ymm7,%ymm6,%ymm6 vpunpcklqdq %ymm8,%ymm15,%ymm5 vpunpckhqdq %ymm8,%ymm15,%ymm15 vpunpcklqdq %ymm6,%ymm4,%ymm7 vpunpckhqdq %ymm6,%ymm4,%ymm4 vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 vmovdqa 0(%rsp),%ymm6 vmovdqa 32(%rsp),%ymm12 cmpq $512,%rdx jb L$tail8x vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 leaq 128(%rsi),%rsi vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm12,%ymm12 vpxor 32(%rsi),%ymm13,%ymm13 vpxor 64(%rsi),%ymm10,%ymm10 vpxor 96(%rsi),%ymm15,%ymm15 leaq 128(%rsi),%rsi vmovdqu %ymm12,0(%rdi) vmovdqu %ymm13,32(%rdi) vmovdqu %ymm10,64(%rdi) vmovdqu %ymm15,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm14,%ymm14 vpxor 32(%rsi),%ymm2,%ymm2 vpxor 64(%rsi),%ymm3,%ymm3 vpxor 96(%rsi),%ymm7,%ymm7 leaq 128(%rsi),%rsi vmovdqu %ymm14,0(%rdi) vmovdqu %ymm2,32(%rdi) vmovdqu %ymm3,64(%rdi) vmovdqu %ymm7,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm11,%ymm11 vpxor 32(%rsi),%ymm9,%ymm9 vpxor 64(%rsi),%ymm0,%ymm0 vpxor 96(%rsi),%ymm4,%ymm4 leaq 128(%rsi),%rsi vmovdqu %ymm11,0(%rdi) vmovdqu %ymm9,32(%rdi) vmovdqu %ymm0,64(%rdi) vmovdqu %ymm4,96(%rdi) leaq 128(%rdi),%rdi subq $512,%rdx jnz L$oop_outer8x jmp L$done8x L$tail8x: cmpq $448,%rdx jae L$448_or_more8x cmpq $384,%rdx jae L$384_or_more8x cmpq $320,%rdx jae L$320_or_more8x cmpq $256,%rdx jae L$256_or_more8x cmpq $192,%rdx jae L$192_or_more8x cmpq $128,%rdx jae L$128_or_more8x cmpq $64,%rdx jae L$64_or_more8x xorq %r10,%r10 vmovdqa %ymm6,0(%rsp) vmovdqa %ymm8,32(%rsp) jmp L$oop_tail8x .p2align 5 L$64_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) je L$done8x leaq 64(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm1,0(%rsp) leaq 64(%rdi),%rdi subq $64,%rdx vmovdqa %ymm5,32(%rsp) jmp L$oop_tail8x .p2align 5 L$128_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) je L$done8x leaq 128(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm12,0(%rsp) leaq 128(%rdi),%rdi subq $128,%rdx vmovdqa %ymm13,32(%rsp) jmp L$oop_tail8x .p2align 5 L$192_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) je L$done8x leaq 192(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm10,0(%rsp) leaq 192(%rdi),%rdi subq $192,%rdx vmovdqa %ymm15,32(%rsp) jmp L$oop_tail8x .p2align 5 L$256_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) je L$done8x leaq 256(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm14,0(%rsp) leaq 256(%rdi),%rdi subq $256,%rdx vmovdqa %ymm2,32(%rsp) jmp L$oop_tail8x .p2align 5 L$320_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) je L$done8x leaq 320(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm3,0(%rsp) leaq 320(%rdi),%rdi subq $320,%rdx vmovdqa %ymm7,32(%rsp) jmp L$oop_tail8x .p2align 5 L$384_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) je L$done8x leaq 384(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm11,0(%rsp) leaq 384(%rdi),%rdi subq $384,%rdx vmovdqa %ymm9,32(%rsp) jmp L$oop_tail8x .p2align 5 L$448_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vpxor 384(%rsi),%ymm11,%ymm11 vpxor 416(%rsi),%ymm9,%ymm9 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) vmovdqu %ymm11,384(%rdi) vmovdqu %ymm9,416(%rdi) je L$done8x leaq 448(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm0,0(%rsp) leaq 448(%rdi),%rdi subq $448,%rdx vmovdqa %ymm4,32(%rsp) L$oop_tail8x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz L$oop_tail8x L$done8x: vzeroall leaq (%r9),%rsp L$8x_epilogue: ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .section .rodata .align 64 .Lzero: .long 0,0,0,0 .Lone: .long 1,0,0,0 .Linc: .long 0,1,2,3 .Lfour: .long 4,4,4,4 .Lincy: .long 0,2,4,6,1,3,5,7 .Leight: .long 8,8,8,8,8,8,8,8 .Lrot16: .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd .Lrot24: .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe .Lsigma: .byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 .align 64 .Lzeroz: .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 .Lfourz: .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 .Lincz: .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .Lsixteen: .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,@function .align 64 ChaCha20_ctr32_nohw: .cfi_startproc _CET_ENDBR pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset r15,-56 subq $64+24,%rsp .cfi_adjust_cfa_offset 88 .Lctr32_body: movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa .Lone(%rip),%xmm4 movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq %rdx,%rbp jmp .Loop_outer .align 32 .Loop_outer: movl $0x61707865,%eax movl $0x3320646e,%ebx movl $0x79622d32,%ecx movl $0x6b206574,%edx movl 16(%rsp),%r8d movl 20(%rsp),%r9d movl 24(%rsp),%r10d movl 28(%rsp),%r11d movd %xmm3,%r12d movl 52(%rsp),%r13d movl 56(%rsp),%r14d movl 60(%rsp),%r15d movq %rbp,64+0(%rsp) movl $10,%ebp movq %rsi,64+8(%rsp) .byte 102,72,15,126,214 movq %rdi,64+16(%rsp) movq %rsi,%rdi shrq $32,%rdi jmp .Loop .align 32 .Loop: addl %r8d,%eax xorl %eax,%r12d roll $16,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $16,%r13d addl %r12d,%esi xorl %esi,%r8d roll $12,%r8d addl %r13d,%edi xorl %edi,%r9d roll $12,%r9d addl %r8d,%eax xorl %eax,%r12d roll $8,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $8,%r13d addl %r12d,%esi xorl %esi,%r8d roll $7,%r8d addl %r13d,%edi xorl %edi,%r9d roll $7,%r9d movl %esi,32(%rsp) movl %edi,36(%rsp) movl 40(%rsp),%esi movl 44(%rsp),%edi addl %r10d,%ecx xorl %ecx,%r14d roll $16,%r14d addl %r11d,%edx xorl %edx,%r15d roll $16,%r15d addl %r14d,%esi xorl %esi,%r10d roll $12,%r10d addl %r15d,%edi xorl %edi,%r11d roll $12,%r11d addl %r10d,%ecx xorl %ecx,%r14d roll $8,%r14d addl %r11d,%edx xorl %edx,%r15d roll $8,%r15d addl %r14d,%esi xorl %esi,%r10d roll $7,%r10d addl %r15d,%edi xorl %edi,%r11d roll $7,%r11d addl %r9d,%eax xorl %eax,%r15d roll $16,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $16,%r12d addl %r15d,%esi xorl %esi,%r9d roll $12,%r9d addl %r12d,%edi xorl %edi,%r10d roll $12,%r10d addl %r9d,%eax xorl %eax,%r15d roll $8,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $8,%r12d addl %r15d,%esi xorl %esi,%r9d roll $7,%r9d addl %r12d,%edi xorl %edi,%r10d roll $7,%r10d movl %esi,40(%rsp) movl %edi,44(%rsp) movl 32(%rsp),%esi movl 36(%rsp),%edi addl %r11d,%ecx xorl %ecx,%r13d roll $16,%r13d addl %r8d,%edx xorl %edx,%r14d roll $16,%r14d addl %r13d,%esi xorl %esi,%r11d roll $12,%r11d addl %r14d,%edi xorl %edi,%r8d roll $12,%r8d addl %r11d,%ecx xorl %ecx,%r13d roll $8,%r13d addl %r8d,%edx xorl %edx,%r14d roll $8,%r14d addl %r13d,%esi xorl %esi,%r11d roll $7,%r11d addl %r14d,%edi xorl %edi,%r8d roll $7,%r8d decl %ebp jnz .Loop movl %edi,36(%rsp) movl %esi,32(%rsp) movq 64(%rsp),%rbp movdqa %xmm2,%xmm1 movq 64+8(%rsp),%rsi paddd %xmm4,%xmm3 movq 64+16(%rsp),%rdi addl $0x61707865,%eax addl $0x3320646e,%ebx addl $0x79622d32,%ecx addl $0x6b206574,%edx addl 16(%rsp),%r8d addl 20(%rsp),%r9d addl 24(%rsp),%r10d addl 28(%rsp),%r11d addl 48(%rsp),%r12d addl 52(%rsp),%r13d addl 56(%rsp),%r14d addl 60(%rsp),%r15d paddd 32(%rsp),%xmm1 cmpq $64,%rbp jb .Ltail xorl 0(%rsi),%eax xorl 4(%rsi),%ebx xorl 8(%rsi),%ecx xorl 12(%rsi),%edx xorl 16(%rsi),%r8d xorl 20(%rsi),%r9d xorl 24(%rsi),%r10d xorl 28(%rsi),%r11d movdqu 32(%rsi),%xmm0 xorl 48(%rsi),%r12d xorl 52(%rsi),%r13d xorl 56(%rsi),%r14d xorl 60(%rsi),%r15d leaq 64(%rsi),%rsi pxor %xmm1,%xmm0 movdqa %xmm2,32(%rsp) movd %xmm3,48(%rsp) movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) movdqu %xmm0,32(%rdi) movl %r12d,48(%rdi) movl %r13d,52(%rdi) movl %r14d,56(%rdi) movl %r15d,60(%rdi) leaq 64(%rdi),%rdi subq $64,%rbp jnz .Loop_outer jmp .Ldone .align 16 .Ltail: movl %eax,0(%rsp) movl %ebx,4(%rsp) xorq %rbx,%rbx movl %ecx,8(%rsp) movl %edx,12(%rsp) movl %r8d,16(%rsp) movl %r9d,20(%rsp) movl %r10d,24(%rsp) movl %r11d,28(%rsp) movdqa %xmm1,32(%rsp) movl %r12d,48(%rsp) movl %r13d,52(%rsp) movl %r14d,56(%rsp) movl %r15d,60(%rsp) .Loop_tail: movzbl (%rsi,%rbx,1),%eax movzbl (%rsp,%rbx,1),%edx leaq 1(%rbx),%rbx xorl %edx,%eax movb %al,-1(%rdi,%rbx,1) decq %rbp jnz .Loop_tail .Ldone: leaq 64+24+48(%rsp),%rsi movq -48(%rsi),%r15 .cfi_restore r15 movq -40(%rsi),%r14 .cfi_restore r14 movq -32(%rsi),%r13 .cfi_restore r13 movq -24(%rsi),%r12 .cfi_restore r12 movq -16(%rsi),%rbp .cfi_restore rbp movq -8(%rsi),%rbx .cfi_restore rbx leaq (%rsi),%rsp .cfi_adjust_cfa_offset -136 .Lno_data: ret .cfi_endproc .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw .globl ChaCha20_ctr32_ssse3 .hidden ChaCha20_ctr32_ssse3 .type ChaCha20_ctr32_ssse3,@function .align 32 ChaCha20_ctr32_ssse3: .cfi_startproc _CET_ENDBR movq %rsp,%r9 .cfi_def_cfa_register r9 subq $64+8,%rsp movdqa .Lsigma(%rip),%xmm0 movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa .Lrot16(%rip),%xmm6 movdqa .Lrot24(%rip),%xmm7 movdqa %xmm0,0(%rsp) movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq $10,%r8 jmp .Loop_ssse3 .align 32 .Loop_outer_ssse3: movdqa .Lone(%rip),%xmm3 movdqa 0(%rsp),%xmm0 movdqa 16(%rsp),%xmm1 movdqa 32(%rsp),%xmm2 paddd 48(%rsp),%xmm3 movq $10,%r8 movdqa %xmm3,48(%rsp) jmp .Loop_ssse3 .align 32 .Loop_ssse3: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decq %r8 jnz .Loop_ssse3 paddd 0(%rsp),%xmm0 paddd 16(%rsp),%xmm1 paddd 32(%rsp),%xmm2 paddd 48(%rsp),%xmm3 cmpq $64,%rdx jb .Ltail_ssse3 movdqu 0(%rsi),%xmm4 movdqu 16(%rsi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%rsi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%rsi),%xmm5 leaq 64(%rsi),%rsi pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 movdqu %xmm0,0(%rdi) movdqu %xmm1,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 64(%rdi),%rdi subq $64,%rdx jnz .Loop_outer_ssse3 jmp .Ldone_ssse3 .align 16 .Ltail_ssse3: movdqa %xmm0,0(%rsp) movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) xorq %r8,%r8 .Loop_tail_ssse3: movzbl (%rsi,%r8,1),%eax movzbl (%rsp,%r8,1),%ecx leaq 1(%r8),%r8 xorl %ecx,%eax movb %al,-1(%rdi,%r8,1) decq %rdx jnz .Loop_tail_ssse3 .Ldone_ssse3: leaq (%r9),%rsp .cfi_def_cfa_register rsp .Lssse3_epilogue: ret .cfi_endproc .size ChaCha20_ctr32_ssse3,.-ChaCha20_ctr32_ssse3 .globl ChaCha20_ctr32_ssse3_4x .hidden ChaCha20_ctr32_ssse3_4x .type ChaCha20_ctr32_ssse3_4x,@function .align 32 ChaCha20_ctr32_ssse3_4x: .cfi_startproc _CET_ENDBR movq %rsp,%r9 .cfi_def_cfa_register r9 subq $0x140+8,%rsp movdqa .Lsigma(%rip),%xmm11 movdqu (%rcx),%xmm15 movdqu 16(%rcx),%xmm7 movdqu (%r8),%xmm3 leaq 256(%rsp),%rcx leaq .Lrot16(%rip),%r10 leaq .Lrot24(%rip),%r11 pshufd $0x00,%xmm11,%xmm8 pshufd $0x55,%xmm11,%xmm9 movdqa %xmm8,64(%rsp) pshufd $0xaa,%xmm11,%xmm10 movdqa %xmm9,80(%rsp) pshufd $0xff,%xmm11,%xmm11 movdqa %xmm10,96(%rsp) movdqa %xmm11,112(%rsp) pshufd $0x00,%xmm15,%xmm12 pshufd $0x55,%xmm15,%xmm13 movdqa %xmm12,128-256(%rcx) pshufd $0xaa,%xmm15,%xmm14 movdqa %xmm13,144-256(%rcx) pshufd $0xff,%xmm15,%xmm15 movdqa %xmm14,160-256(%rcx) movdqa %xmm15,176-256(%rcx) pshufd $0x00,%xmm7,%xmm4 pshufd $0x55,%xmm7,%xmm5 movdqa %xmm4,192-256(%rcx) pshufd $0xaa,%xmm7,%xmm6 movdqa %xmm5,208-256(%rcx) pshufd $0xff,%xmm7,%xmm7 movdqa %xmm6,224-256(%rcx) movdqa %xmm7,240-256(%rcx) pshufd $0x00,%xmm3,%xmm0 pshufd $0x55,%xmm3,%xmm1 paddd .Linc(%rip),%xmm0 pshufd $0xaa,%xmm3,%xmm2 movdqa %xmm1,272-256(%rcx) pshufd $0xff,%xmm3,%xmm3 movdqa %xmm2,288-256(%rcx) movdqa %xmm3,304-256(%rcx) jmp .Loop_enter4x .align 32 .Loop_outer4x: movdqa 64(%rsp),%xmm8 movdqa 80(%rsp),%xmm9 movdqa 96(%rsp),%xmm10 movdqa 112(%rsp),%xmm11 movdqa 128-256(%rcx),%xmm12 movdqa 144-256(%rcx),%xmm13 movdqa 160-256(%rcx),%xmm14 movdqa 176-256(%rcx),%xmm15 movdqa 192-256(%rcx),%xmm4 movdqa 208-256(%rcx),%xmm5 movdqa 224-256(%rcx),%xmm6 movdqa 240-256(%rcx),%xmm7 movdqa 256-256(%rcx),%xmm0 movdqa 272-256(%rcx),%xmm1 movdqa 288-256(%rcx),%xmm2 movdqa 304-256(%rcx),%xmm3 paddd .Lfour(%rip),%xmm0 .Loop_enter4x: movdqa %xmm6,32(%rsp) movdqa %xmm7,48(%rsp) movdqa (%r10),%xmm7 movl $10,%eax movdqa %xmm0,256-256(%rcx) jmp .Loop4x .align 32 .Loop4x: paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,199 .byte 102,15,56,0,207 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm6 pslld $12,%xmm12 psrld $20,%xmm6 movdqa %xmm13,%xmm7 pslld $12,%xmm13 por %xmm6,%xmm12 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm13 paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,198 .byte 102,15,56,0,206 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm7 pslld $7,%xmm12 psrld $25,%xmm7 movdqa %xmm13,%xmm6 pslld $7,%xmm13 por %xmm7,%xmm12 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm13 movdqa %xmm4,0(%rsp) movdqa %xmm5,16(%rsp) movdqa 32(%rsp),%xmm4 movdqa 48(%rsp),%xmm5 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,215 .byte 102,15,56,0,223 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm6 pslld $12,%xmm14 psrld $20,%xmm6 movdqa %xmm15,%xmm7 pslld $12,%xmm15 por %xmm6,%xmm14 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm15 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,214 .byte 102,15,56,0,222 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm7 pslld $7,%xmm14 psrld $25,%xmm7 movdqa %xmm15,%xmm6 pslld $7,%xmm15 por %xmm7,%xmm14 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm15 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,223 .byte 102,15,56,0,199 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm6 pslld $12,%xmm13 psrld $20,%xmm6 movdqa %xmm14,%xmm7 pslld $12,%xmm14 por %xmm6,%xmm13 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm14 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,222 .byte 102,15,56,0,198 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm7 pslld $7,%xmm13 psrld $25,%xmm7 movdqa %xmm14,%xmm6 pslld $7,%xmm14 por %xmm7,%xmm13 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm14 movdqa %xmm4,32(%rsp) movdqa %xmm5,48(%rsp) movdqa 0(%rsp),%xmm4 movdqa 16(%rsp),%xmm5 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,207 .byte 102,15,56,0,215 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm6 pslld $12,%xmm15 psrld $20,%xmm6 movdqa %xmm12,%xmm7 pslld $12,%xmm12 por %xmm6,%xmm15 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm12 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,206 .byte 102,15,56,0,214 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm7 pslld $7,%xmm15 psrld $25,%xmm7 movdqa %xmm12,%xmm6 pslld $7,%xmm12 por %xmm7,%xmm15 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm12 decl %eax jnz .Loop4x paddd 64(%rsp),%xmm8 paddd 80(%rsp),%xmm9 paddd 96(%rsp),%xmm10 paddd 112(%rsp),%xmm11 movdqa %xmm8,%xmm6 punpckldq %xmm9,%xmm8 movdqa %xmm10,%xmm7 punpckldq %xmm11,%xmm10 punpckhdq %xmm9,%xmm6 punpckhdq %xmm11,%xmm7 movdqa %xmm8,%xmm9 punpcklqdq %xmm10,%xmm8 movdqa %xmm6,%xmm11 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm10,%xmm9 punpckhqdq %xmm7,%xmm11 paddd 128-256(%rcx),%xmm12 paddd 144-256(%rcx),%xmm13 paddd 160-256(%rcx),%xmm14 paddd 176-256(%rcx),%xmm15 movdqa %xmm8,0(%rsp) movdqa %xmm9,16(%rsp) movdqa 32(%rsp),%xmm8 movdqa 48(%rsp),%xmm9 movdqa %xmm12,%xmm10 punpckldq %xmm13,%xmm12 movdqa %xmm14,%xmm7 punpckldq %xmm15,%xmm14 punpckhdq %xmm13,%xmm10 punpckhdq %xmm15,%xmm7 movdqa %xmm12,%xmm13 punpcklqdq %xmm14,%xmm12 movdqa %xmm10,%xmm15 punpcklqdq %xmm7,%xmm10 punpckhqdq %xmm14,%xmm13 punpckhqdq %xmm7,%xmm15 paddd 192-256(%rcx),%xmm4 paddd 208-256(%rcx),%xmm5 paddd 224-256(%rcx),%xmm8 paddd 240-256(%rcx),%xmm9 movdqa %xmm6,32(%rsp) movdqa %xmm11,48(%rsp) movdqa %xmm4,%xmm14 punpckldq %xmm5,%xmm4 movdqa %xmm8,%xmm7 punpckldq %xmm9,%xmm8 punpckhdq %xmm5,%xmm14 punpckhdq %xmm9,%xmm7 movdqa %xmm4,%xmm5 punpcklqdq %xmm8,%xmm4 movdqa %xmm14,%xmm9 punpcklqdq %xmm7,%xmm14 punpckhqdq %xmm8,%xmm5 punpckhqdq %xmm7,%xmm9 paddd 256-256(%rcx),%xmm0 paddd 272-256(%rcx),%xmm1 paddd 288-256(%rcx),%xmm2 paddd 304-256(%rcx),%xmm3 movdqa %xmm0,%xmm8 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm8 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm8,%xmm3 punpcklqdq %xmm7,%xmm8 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 cmpq $256,%rdx jb .Ltail4x movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 48(%rsp),%xmm6 pxor %xmm15,%xmm11 pxor %xmm9,%xmm2 pxor %xmm3,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi subq $256,%rdx jnz .Loop_outer4x jmp .Ldone4x .Ltail4x: cmpq $192,%rdx jae .L192_or_more4x cmpq $128,%rdx jae .L128_or_more4x cmpq $64,%rdx jae .L64_or_more4x xorq %r10,%r10 movdqa %xmm12,16(%rsp) movdqa %xmm4,32(%rsp) movdqa %xmm0,48(%rsp) jmp .Loop_tail4x .align 32 .L64_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je .Ldone4x movdqa 16(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm13,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm5,32(%rsp) subq $64,%rdx movdqa %xmm1,48(%rsp) jmp .Loop_tail4x .align 32 .L128_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) je .Ldone4x movdqa 32(%rsp),%xmm6 leaq 128(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm10,16(%rsp) leaq 128(%rdi),%rdi movdqa %xmm14,32(%rsp) subq $128,%rdx movdqa %xmm8,48(%rsp) jmp .Loop_tail4x .align 32 .L192_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je .Ldone4x movdqa 48(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm15,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm9,32(%rsp) subq $192,%rdx movdqa %xmm3,48(%rsp) .Loop_tail4x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz .Loop_tail4x .Ldone4x: leaq (%r9),%rsp .cfi_def_cfa_register rsp .L4x_epilogue: ret .cfi_endproc .size ChaCha20_ctr32_ssse3_4x,.-ChaCha20_ctr32_ssse3_4x .globl ChaCha20_ctr32_avx2 .hidden ChaCha20_ctr32_avx2 .type ChaCha20_ctr32_avx2,@function .align 32 ChaCha20_ctr32_avx2: .cfi_startproc _CET_ENDBR movq %rsp,%r9 .cfi_def_cfa_register r9 subq $0x280+8,%rsp andq $-32,%rsp vzeroupper vbroadcasti128 .Lsigma(%rip),%ymm11 vbroadcasti128 (%rcx),%ymm3 vbroadcasti128 16(%rcx),%ymm15 vbroadcasti128 (%r8),%ymm7 leaq 256(%rsp),%rcx leaq 512(%rsp),%rax leaq .Lrot16(%rip),%r10 leaq .Lrot24(%rip),%r11 vpshufd $0x00,%ymm11,%ymm8 vpshufd $0x55,%ymm11,%ymm9 vmovdqa %ymm8,128-256(%rcx) vpshufd $0xaa,%ymm11,%ymm10 vmovdqa %ymm9,160-256(%rcx) vpshufd $0xff,%ymm11,%ymm11 vmovdqa %ymm10,192-256(%rcx) vmovdqa %ymm11,224-256(%rcx) vpshufd $0x00,%ymm3,%ymm0 vpshufd $0x55,%ymm3,%ymm1 vmovdqa %ymm0,256-256(%rcx) vpshufd $0xaa,%ymm3,%ymm2 vmovdqa %ymm1,288-256(%rcx) vpshufd $0xff,%ymm3,%ymm3 vmovdqa %ymm2,320-256(%rcx) vmovdqa %ymm3,352-256(%rcx) vpshufd $0x00,%ymm15,%ymm12 vpshufd $0x55,%ymm15,%ymm13 vmovdqa %ymm12,384-512(%rax) vpshufd $0xaa,%ymm15,%ymm14 vmovdqa %ymm13,416-512(%rax) vpshufd $0xff,%ymm15,%ymm15 vmovdqa %ymm14,448-512(%rax) vmovdqa %ymm15,480-512(%rax) vpshufd $0x00,%ymm7,%ymm4 vpshufd $0x55,%ymm7,%ymm5 vpaddd .Lincy(%rip),%ymm4,%ymm4 vpshufd $0xaa,%ymm7,%ymm6 vmovdqa %ymm5,544-512(%rax) vpshufd $0xff,%ymm7,%ymm7 vmovdqa %ymm6,576-512(%rax) vmovdqa %ymm7,608-512(%rax) jmp .Loop_enter8x .align 32 .Loop_outer8x: vmovdqa 128-256(%rcx),%ymm8 vmovdqa 160-256(%rcx),%ymm9 vmovdqa 192-256(%rcx),%ymm10 vmovdqa 224-256(%rcx),%ymm11 vmovdqa 256-256(%rcx),%ymm0 vmovdqa 288-256(%rcx),%ymm1 vmovdqa 320-256(%rcx),%ymm2 vmovdqa 352-256(%rcx),%ymm3 vmovdqa 384-512(%rax),%ymm12 vmovdqa 416-512(%rax),%ymm13 vmovdqa 448-512(%rax),%ymm14 vmovdqa 480-512(%rax),%ymm15 vmovdqa 512-512(%rax),%ymm4 vmovdqa 544-512(%rax),%ymm5 vmovdqa 576-512(%rax),%ymm6 vmovdqa 608-512(%rax),%ymm7 vpaddd .Leight(%rip),%ymm4,%ymm4 .Loop_enter8x: vmovdqa %ymm14,64(%rsp) vmovdqa %ymm15,96(%rsp) vbroadcasti128 (%r10),%ymm15 vmovdqa %ymm4,512-512(%rax) movl $10,%eax jmp .Loop8x .align 32 .Loop8x: vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $12,%ymm0,%ymm14 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $12,%ymm1,%ymm15 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $7,%ymm0,%ymm15 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $7,%ymm1,%ymm14 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vmovdqa %ymm12,0(%rsp) vmovdqa %ymm13,32(%rsp) vmovdqa 64(%rsp),%ymm12 vmovdqa 96(%rsp),%ymm13 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $12,%ymm2,%ymm14 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $12,%ymm3,%ymm15 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $7,%ymm2,%ymm15 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $7,%ymm3,%ymm14 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $12,%ymm1,%ymm14 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $12,%ymm2,%ymm15 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $7,%ymm1,%ymm15 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $7,%ymm2,%ymm14 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vmovdqa %ymm12,64(%rsp) vmovdqa %ymm13,96(%rsp) vmovdqa 0(%rsp),%ymm12 vmovdqa 32(%rsp),%ymm13 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $12,%ymm3,%ymm14 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $12,%ymm0,%ymm15 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $7,%ymm3,%ymm15 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $7,%ymm0,%ymm14 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 decl %eax jnz .Loop8x leaq 512(%rsp),%rax vpaddd 128-256(%rcx),%ymm8,%ymm8 vpaddd 160-256(%rcx),%ymm9,%ymm9 vpaddd 192-256(%rcx),%ymm10,%ymm10 vpaddd 224-256(%rcx),%ymm11,%ymm11 vpunpckldq %ymm9,%ymm8,%ymm14 vpunpckldq %ymm11,%ymm10,%ymm15 vpunpckhdq %ymm9,%ymm8,%ymm8 vpunpckhdq %ymm11,%ymm10,%ymm10 vpunpcklqdq %ymm15,%ymm14,%ymm9 vpunpckhqdq %ymm15,%ymm14,%ymm14 vpunpcklqdq %ymm10,%ymm8,%ymm11 vpunpckhqdq %ymm10,%ymm8,%ymm8 vpaddd 256-256(%rcx),%ymm0,%ymm0 vpaddd 288-256(%rcx),%ymm1,%ymm1 vpaddd 320-256(%rcx),%ymm2,%ymm2 vpaddd 352-256(%rcx),%ymm3,%ymm3 vpunpckldq %ymm1,%ymm0,%ymm10 vpunpckldq %ymm3,%ymm2,%ymm15 vpunpckhdq %ymm1,%ymm0,%ymm0 vpunpckhdq %ymm3,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm10,%ymm1 vpunpckhqdq %ymm15,%ymm10,%ymm10 vpunpcklqdq %ymm2,%ymm0,%ymm3 vpunpckhqdq %ymm2,%ymm0,%ymm0 vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 vmovdqa %ymm15,0(%rsp) vmovdqa %ymm9,32(%rsp) vmovdqa 64(%rsp),%ymm15 vmovdqa 96(%rsp),%ymm9 vpaddd 384-512(%rax),%ymm12,%ymm12 vpaddd 416-512(%rax),%ymm13,%ymm13 vpaddd 448-512(%rax),%ymm15,%ymm15 vpaddd 480-512(%rax),%ymm9,%ymm9 vpunpckldq %ymm13,%ymm12,%ymm2 vpunpckldq %ymm9,%ymm15,%ymm8 vpunpckhdq %ymm13,%ymm12,%ymm12 vpunpckhdq %ymm9,%ymm15,%ymm15 vpunpcklqdq %ymm8,%ymm2,%ymm13 vpunpckhqdq %ymm8,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm12,%ymm9 vpunpckhqdq %ymm15,%ymm12,%ymm12 vpaddd 512-512(%rax),%ymm4,%ymm4 vpaddd 544-512(%rax),%ymm5,%ymm5 vpaddd 576-512(%rax),%ymm6,%ymm6 vpaddd 608-512(%rax),%ymm7,%ymm7 vpunpckldq %ymm5,%ymm4,%ymm15 vpunpckldq %ymm7,%ymm6,%ymm8 vpunpckhdq %ymm5,%ymm4,%ymm4 vpunpckhdq %ymm7,%ymm6,%ymm6 vpunpcklqdq %ymm8,%ymm15,%ymm5 vpunpckhqdq %ymm8,%ymm15,%ymm15 vpunpcklqdq %ymm6,%ymm4,%ymm7 vpunpckhqdq %ymm6,%ymm4,%ymm4 vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 vmovdqa 0(%rsp),%ymm6 vmovdqa 32(%rsp),%ymm12 cmpq $512,%rdx jb .Ltail8x vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 leaq 128(%rsi),%rsi vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm12,%ymm12 vpxor 32(%rsi),%ymm13,%ymm13 vpxor 64(%rsi),%ymm10,%ymm10 vpxor 96(%rsi),%ymm15,%ymm15 leaq 128(%rsi),%rsi vmovdqu %ymm12,0(%rdi) vmovdqu %ymm13,32(%rdi) vmovdqu %ymm10,64(%rdi) vmovdqu %ymm15,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm14,%ymm14 vpxor 32(%rsi),%ymm2,%ymm2 vpxor 64(%rsi),%ymm3,%ymm3 vpxor 96(%rsi),%ymm7,%ymm7 leaq 128(%rsi),%rsi vmovdqu %ymm14,0(%rdi) vmovdqu %ymm2,32(%rdi) vmovdqu %ymm3,64(%rdi) vmovdqu %ymm7,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm11,%ymm11 vpxor 32(%rsi),%ymm9,%ymm9 vpxor 64(%rsi),%ymm0,%ymm0 vpxor 96(%rsi),%ymm4,%ymm4 leaq 128(%rsi),%rsi vmovdqu %ymm11,0(%rdi) vmovdqu %ymm9,32(%rdi) vmovdqu %ymm0,64(%rdi) vmovdqu %ymm4,96(%rdi) leaq 128(%rdi),%rdi subq $512,%rdx jnz .Loop_outer8x jmp .Ldone8x .Ltail8x: cmpq $448,%rdx jae .L448_or_more8x cmpq $384,%rdx jae .L384_or_more8x cmpq $320,%rdx jae .L320_or_more8x cmpq $256,%rdx jae .L256_or_more8x cmpq $192,%rdx jae .L192_or_more8x cmpq $128,%rdx jae .L128_or_more8x cmpq $64,%rdx jae .L64_or_more8x xorq %r10,%r10 vmovdqa %ymm6,0(%rsp) vmovdqa %ymm8,32(%rsp) jmp .Loop_tail8x .align 32 .L64_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) je .Ldone8x leaq 64(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm1,0(%rsp) leaq 64(%rdi),%rdi subq $64,%rdx vmovdqa %ymm5,32(%rsp) jmp .Loop_tail8x .align 32 .L128_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) je .Ldone8x leaq 128(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm12,0(%rsp) leaq 128(%rdi),%rdi subq $128,%rdx vmovdqa %ymm13,32(%rsp) jmp .Loop_tail8x .align 32 .L192_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) je .Ldone8x leaq 192(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm10,0(%rsp) leaq 192(%rdi),%rdi subq $192,%rdx vmovdqa %ymm15,32(%rsp) jmp .Loop_tail8x .align 32 .L256_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) je .Ldone8x leaq 256(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm14,0(%rsp) leaq 256(%rdi),%rdi subq $256,%rdx vmovdqa %ymm2,32(%rsp) jmp .Loop_tail8x .align 32 .L320_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) je .Ldone8x leaq 320(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm3,0(%rsp) leaq 320(%rdi),%rdi subq $320,%rdx vmovdqa %ymm7,32(%rsp) jmp .Loop_tail8x .align 32 .L384_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) je .Ldone8x leaq 384(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm11,0(%rsp) leaq 384(%rdi),%rdi subq $384,%rdx vmovdqa %ymm9,32(%rsp) jmp .Loop_tail8x .align 32 .L448_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vpxor 384(%rsi),%ymm11,%ymm11 vpxor 416(%rsi),%ymm9,%ymm9 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) vmovdqu %ymm11,384(%rdi) vmovdqu %ymm9,416(%rdi) je .Ldone8x leaq 448(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm0,0(%rsp) leaq 448(%rdi),%rdi subq $448,%rdx vmovdqa %ymm4,32(%rsp) .Loop_tail8x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz .Loop_tail8x .Ldone8x: vzeroall leaq (%r9),%rsp .cfi_def_cfa_register rsp .L8x_epilogue: ret .cfi_endproc .size ChaCha20_ctr32_avx2,.-ChaCha20_ctr32_avx2 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha20_poly1305_armv8-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include .section __TEXT,__const .align 7 Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' Linc: .long 1,2,3,4 Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .align 6 Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, Lpoly_hash_intro ret Lpoly_hash_intro: cmp x4, #16 b.lt Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lpoly_hash_ad_internal Lpoly_hash_ad_tail: cbz x4, Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lpoly_hash_ad_ret: ret .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl _chacha20_poly1305_seal .private_extern _chacha20_poly1305_seal .align 6 _chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 Lseal_main_loop: adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b Lseal_main_loop Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b Lseal_tail Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b Lseal_tail_64 Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt Lseal_tail16_compose_extra_in add x3, x3, x12 Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_hash_extra: cbz x4, Lseal_finalize Lseal_hash_extra_loop: cmp x4, #16 b.lt Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lseal_hash_extra_loop Lseal_hash_extra_tail: cbz x4, Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal b Lseal_tail .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl _chacha20_poly1305_open .private_extern _chacha20_poly1305_open .align 6 _chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes Lopen_main_loop: cmp x2, #192 b.lt Lopen_tail adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, Lopen_main_loop_rounds_short .align 5 Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt Lopen_main_loop_rounds subs x6, x6, #1 b.ge Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_main_loop Lopen_tail: cbz x2, Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le Lopen_tail_64 cmp x2, #128 b.le Lopen_tail_128 Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, Lopen_tail_192_rounds_no_hash Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt Lopen_tail_192_rounds subs x6, x6, #1 b.ge Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left Lopen_tail_192_hash: cbz x4, Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_tail_192_hash Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b Lopen_tail_64_store Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_128_rounds cbz x4, Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_128_rounds Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_tail_64_store Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_64_rounds cbz x4, Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_64_rounds Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s Lopen_tail_64_store: cmp x2, #16 b.lt Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b Lopen_tail_64_store Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lopen_tail_16_store Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_128_store: cmp x2, #64 b.lt Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 Lopen_128_hash_64: cbz x4, Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_128_hash_64 .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha20_poly1305_armv8-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include .section .rodata .align 7 .Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .Linc: .long 1,2,3,4 .Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .type .Lpoly_hash_ad_internal,%function .align 6 .Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, .Lpoly_hash_intro ret .Lpoly_hash_intro: cmp x4, #16 b.lt .Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b .Lpoly_hash_ad_internal .Lpoly_hash_ad_tail: cbz x4, .Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 .Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge .Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lpoly_hash_ad_ret: ret .cfi_endproc .size .Lpoly_hash_ad_internal, .-.Lpoly_hash_ad_internal ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl chacha20_poly1305_seal .hidden chacha20_poly1305_seal .type chacha20_poly1305_seal,%function .align 6 chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le .Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 .Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi .Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl .Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le .Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 .Lseal_main_loop: adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 .Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge .Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt .Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le .Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b .Lseal_main_loop .Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt .Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b .Lseal_tail .Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt .Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b .Lseal_tail_64 .Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, .Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, .Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // .Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 .Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt .Lseal_tail16_compose_extra_in add x3, x3, x12 .Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt .Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b .Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt .Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lseal_hash_extra: cbz x4, .Lseal_finalize .Lseal_hash_extra_loop: cmp x4, #16 b.lt .Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b .Lseal_hash_extra_loop .Lseal_hash_extra_tail: cbz x4, .Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 .Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt .Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret .Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 .Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi .Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl .Lpoly_hash_ad_internal b .Lseal_tail .cfi_endproc .size chacha20_poly1305_seal,.-chacha20_poly1305_seal ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl chacha20_poly1305_open .hidden chacha20_poly1305_open .type chacha20_poly1305_open,%function .align 6 chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le .Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 .Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi .Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl .Lpoly_hash_ad_internal .Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes .Lopen_main_loop: cmp x2, #192 b.lt .Lopen_tail adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, .Lopen_main_loop_rounds_short .align 5 .Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt .Lopen_main_loop_rounds subs x6, x6, #1 b.ge .Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt .Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt .Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b .Lopen_main_loop .Lopen_tail: cbz x2, .Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le .Lopen_tail_64 cmp x2, #128 b.le .Lopen_tail_128 .Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, .Lopen_tail_192_rounds_no_hash .Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt .Lopen_tail_192_rounds subs x6, x6, #1 b.ge .Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left .Lopen_tail_192_hash: cbz x4, .Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b .Lopen_tail_192_hash .Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b .Lopen_tail_64_store .Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 .Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt .Lopen_tail_128_rounds cbz x4, .Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b .Lopen_tail_128_rounds .Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b .Lopen_tail_64_store .Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 .Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt .Lopen_tail_64_rounds cbz x4, .Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b .Lopen_tail_64_rounds .Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s .Lopen_tail_64_store: cmp x2, #16 b.lt .Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b .Lopen_tail_64_store .Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, .Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 .Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt .Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b .Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt .Lopen_tail_16_store .Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret .Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 .Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi .Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl .Lpoly_hash_ad_internal .Lopen_128_store: cmp x2, #64 b.lt .Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b .Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 .Lopen_128_hash_64: cbz x4, .Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b .Lopen_128_hash_64 .cfi_endproc .size chacha20_poly1305_open,.-chacha20_poly1305_open #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha20_poly1305_armv8-win.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include .section .rodata .align 7 Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' Linc: .long 1,2,3,4 Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .def Lpoly_hash_ad_internal .type 32 .endef .align 6 Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, Lpoly_hash_intro ret Lpoly_hash_intro: cmp x4, #16 b.lt Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lpoly_hash_ad_internal Lpoly_hash_ad_tail: cbz x4, Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lpoly_hash_ad_ret: ret .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl chacha20_poly1305_seal .def chacha20_poly1305_seal .type 32 .endef .align 6 chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 Lseal_main_loop: adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b Lseal_main_loop Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b Lseal_tail Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b Lseal_tail_64 Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt Lseal_tail16_compose_extra_in add x3, x3, x12 Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_hash_extra: cbz x4, Lseal_finalize Lseal_hash_extra_loop: cmp x4, #16 b.lt Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lseal_hash_extra_loop Lseal_hash_extra_tail: cbz x4, Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal b Lseal_tail .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl chacha20_poly1305_open .def chacha20_poly1305_open .type 32 .endef .align 6 chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes Lopen_main_loop: cmp x2, #192 b.lt Lopen_tail adrp x11, Lchacha20_consts add x11, x11, :lo12:Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, Lopen_main_loop_rounds_short .align 5 Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt Lopen_main_loop_rounds subs x6, x6, #1 b.ge Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_main_loop Lopen_tail: cbz x2, Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le Lopen_tail_64 cmp x2, #128 b.le Lopen_tail_128 Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, Lopen_tail_192_rounds_no_hash Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt Lopen_tail_192_rounds subs x6, x6, #1 b.ge Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left Lopen_tail_192_hash: cbz x4, Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_tail_192_hash Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b Lopen_tail_64_store Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_128_rounds cbz x4, Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_128_rounds Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_tail_64_store Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_64_rounds cbz x4, Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_64_rounds Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s Lopen_tail_64_store: cmp x2, #16 b.lt Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b Lopen_tail_64_store Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lopen_tail_16_store Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_128_store: cmp x2, #64 b.lt Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 Lopen_128_hash_64: cbz x4, Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_128_hash_64 .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha20_poly1305_x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .section __DATA,__const .p2align 6 chacha20_poly1305_constants: L$chacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' L$rol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 L$rol16: .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 L$avx2_init: .long 0,0,0,0 L$sse_inc: .long 1,0,0,0 L$avx2_inc: .long 2,0,0,0,2,0,0,0 L$clamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF .p2align 4 L$and_masks: .byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff .text .p2align 6 poly_hash_ad_internal: xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 cmpq $13,%r8 jne L$hash_ad_loop L$poly_fast_tls_ad: movq (%rcx),%r10 movq 5(%rcx),%r11 shrq $24,%r11 movq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 ret L$hash_ad_loop: cmpq $16,%r8 jb L$hash_ad_tail addq 0+0(%rcx),%r10 adcq 8+0(%rcx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rcx),%rcx subq $16,%r8 jmp L$hash_ad_loop L$hash_ad_tail: cmpq $0,%r8 je L$hash_ad_done xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 addq %r8,%rcx L$hash_ad_tail_loop: shldq $8,%r13,%r14 shlq $8,%r13 movzbq -1(%rcx),%r15 xorq %r15,%r13 decq %rcx decq %r8 jne L$hash_ad_tail_loop addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$hash_ad_done: ret .globl _chacha20_poly1305_open_nohw .private_extern _chacha20_poly1305_open_nohw .p2align 6 _chacha20_poly1305_open_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) cmpq $128,%rbx jbe L$open_sse_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm7 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movq $10,%r10 L$open_sse_init_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jne L$open_sse_init_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 pand L$clamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal L$open_sse_main_loop: cmpq $256,%rbx jb L$open_sse_tail movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $4,%rcx movq %rsi,%r8 L$open_sse_main_loop_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 leaq 16(%r8),%r8 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %rcx jge L$open_sse_main_loop_rounds addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 cmpq $-6,%rcx jg L$open_sse_main_loop_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor 0+80(%rbp),%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp L$open_sse_main_loop L$open_sse_tail: testq %rbx,%rbx jz L$open_sse_finalize cmpq $192,%rbx ja L$open_sse_tail_256 cmpq $128,%rbx ja L$open_sse_tail_192 cmpq $64,%rbx ja L$open_sse_tail_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) xorq %r8,%r8 movq %rbx,%rcx cmpq $16,%rcx jb L$open_sse_tail_64_rounds L$open_sse_tail_64_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx L$open_sse_tail_64_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 cmpq $16,%rcx jae L$open_sse_tail_64_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_64_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_128: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movq %rbx,%rcx andq $-16,%rcx xorq %r8,%r8 L$open_sse_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_128_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 cmpq %rcx,%r8 jb L$open_sse_tail_128_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_128_rounds paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) subq $64,%rbx leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_192: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movq %rbx,%rcx movq $160,%r8 cmpq $160,%rcx cmovgq %r8,%rcx andq $-16,%rcx xorq %r8,%r8 L$open_sse_tail_192_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_192_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 cmpq %rcx,%r8 jb L$open_sse_tail_192_rounds_and_x1hash cmpq $160,%r8 jne L$open_sse_tail_192_rounds cmpq $176,%rbx jb L$open_sse_tail_192_finish addq 0+160(%rsi),%r10 adcq 8+160(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 cmpq $192,%rbx jb L$open_sse_tail_192_finish addq 0+176(%rsi),%r10 adcq 8+176(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_tail_192_finish: paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) subq $128,%rbx leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_256: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) xorq %r8,%r8 L$open_sse_tail_256_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 movdqa 0+80(%rbp),%xmm11 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 movdqa 0+80(%rbp),%xmm9 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 movdqa 0+80(%rbp),%xmm11 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb L$rol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 movdqa 0+80(%rbp),%xmm9 addq $16,%r8 cmpq $160,%r8 jb L$open_sse_tail_256_rounds_and_x1hash movq %rbx,%rcx andq $-16,%rcx L$open_sse_tail_256_hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%r8 cmpq %rcx,%r8 jb L$open_sse_tail_256_hash paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqa 0+80(%rbp),%xmm12 subq $192,%rbx leaq 192(%rsi),%rsi leaq 192(%rdi),%rdi L$open_sse_tail_64_dec_loop: cmpq $16,%rbx jb L$open_sse_tail_16_init subq $16,%rbx movdqu (%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 jmp L$open_sse_tail_64_dec_loop L$open_sse_tail_16_init: movdqa %xmm0,%xmm1 L$open_sse_tail_16: testq %rbx,%rbx jz L$open_sse_finalize pxor %xmm3,%xmm3 leaq -1(%rsi,%rbx,1),%rsi movq %rbx,%r8 L$open_sse_tail_16_compose: pslldq $1,%xmm3 pinsrb $0,(%rsi),%xmm3 subq $1,%rsi subq $1,%r8 jnz L$open_sse_tail_16_compose .byte 102,73,15,126,221 pextrq $1,%xmm3,%r14 pxor %xmm1,%xmm3 L$open_sse_tail_16_extract: pextrb $0,%xmm3,(%rdi) psrldq $1,%xmm3 addq $1,%rdi subq $1,%rbx jne L$open_sse_tail_16_extract addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_sse_finalize: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 addq $288 + 0 + 32,%rsp popq %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp ret L$open_sse_128: movdqu L$chacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm13,%xmm15 movq $10,%r10 L$open_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz L$open_sse_128_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd L$chacha20_consts(%rip),%xmm1 paddd L$chacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm9 paddd %xmm11,%xmm10 paddd %xmm15,%xmm13 paddd L$sse_inc(%rip),%xmm15 paddd %xmm15,%xmm14 pand L$clamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal L$open_sse_128_xor_hash: cmpq $16,%rbx jb L$open_sse_tail_16 subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm1 movdqu %xmm1,0(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 movdqa %xmm2,%xmm13 movdqa %xmm6,%xmm2 movdqa %xmm10,%xmm6 movdqa %xmm14,%xmm10 jmp L$open_sse_128_xor_hash .globl _chacha20_poly1305_seal_nohw .private_extern _chacha20_poly1305_seal_nohw .p2align 6 _chacha20_poly1305_seal_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx cmpq $128,%rbx jbe L$seal_sse_128 movdqa L$chacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm14 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $10,%r10 L$seal_sse_init_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jnz L$seal_sse_init_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 pand L$clamp(%rip),%xmm3 movdqa %xmm3,0+0(%rbp) movdqa %xmm7,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) cmpq $192,%rbx ja L$seal_sse_main_init movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_main_init: movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 128(%rdi) movdqu %xmm4,16 + 128(%rdi) movdqu %xmm8,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi movq $2,%rcx movq $8,%r8 cmpq $64,%rbx jbe L$seal_sse_tail_64 cmpq $128,%rbx jbe L$seal_sse_tail_128 cmpq $192,%rbx jbe L$seal_sse_tail_192 L$seal_sse_main_loop: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd L$sse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) .p2align 5 L$seal_sse_main_rounds: movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa L$rol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa L$rol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 leaq 16(%rdi),%rdi decq %r8 jge L$seal_sse_main_rounds addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_main_rounds paddd L$chacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm14,0+80(%rbp) movdqa %xmm14,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm14 pxor %xmm3,%xmm14 movdqu %xmm14,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm14 pxor %xmm7,%xmm14 movdqu %xmm14,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm14 pxor %xmm11,%xmm14 movdqu %xmm14,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm14 pxor %xmm15,%xmm14 movdqu %xmm14,48 + 0(%rdi) movdqa 0+80(%rbp),%xmm14 movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) cmpq $256,%rbx ja L$seal_sse_main_loop_xor movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_main_loop_xor: movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi subq $256,%rbx movq $6,%rcx movq $4,%r8 cmpq $192,%rbx jg L$seal_sse_main_loop movq %rbx,%rcx testq %rbx,%rbx je L$seal_sse_128_tail_hash movq $6,%rcx cmpq $128,%rbx ja L$seal_sse_tail_192 cmpq $64,%rbx ja L$seal_sse_tail_128 L$seal_sse_tail_64: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) L$seal_sse_tail_64_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_64_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_64_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_64_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp L$seal_sse_128_tail_xor L$seal_sse_tail_128: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) L$seal_sse_tail_128_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_128_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_128_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_128_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movq $64,%rcx subq $64,%rbx leaq 64(%rsi),%rsi jmp L$seal_sse_128_tail_hash L$seal_sse_tail_192: movdqa L$chacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd L$sse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) L$seal_sse_tail_192_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_sse_tail_192_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 leaq 16(%rdi),%rdi decq %rcx jg L$seal_sse_tail_192_rounds_and_x2hash decq %r8 jge L$seal_sse_tail_192_rounds_and_x1hash paddd L$chacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd L$chacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd L$chacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi L$seal_sse_128_tail_hash: cmpq $16,%rcx jb L$seal_sse_128_tail_xor addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx leaq 16(%rdi),%rdi jmp L$seal_sse_128_tail_hash L$seal_sse_128_tail_xor: cmpq $16,%rbx jb L$seal_sse_tail_16 subq $16,%rbx movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,0(%rdi) addq 0(%rdi),%r10 adcq 8(%rdi),%r11 adcq $1,%r12 leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 movdqa %xmm1,%xmm12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 jmp L$seal_sse_128_tail_xor L$seal_sse_tail_16: testq %rbx,%rbx jz L$process_blocks_of_extra_in movq %rbx,%r8 movq %rbx,%rcx leaq -1(%rsi,%rbx,1),%rsi pxor %xmm15,%xmm15 L$seal_sse_tail_16_compose: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi decq %rcx jne L$seal_sse_tail_16_compose pxor %xmm0,%xmm15 movq %rbx,%rcx movdqu %xmm15,%xmm0 L$seal_sse_tail_16_extract: pextrb $0,%xmm0,(%rdi) psrldq $1,%xmm0 addq $1,%rdi subq $1,%rcx jnz L$seal_sse_tail_16_extract movq 288 + 0 + 32(%rsp),%r9 movq 56(%r9),%r14 movq 48(%r9),%r13 testq %r14,%r14 jz L$process_partial_block movq $16,%r15 subq %rbx,%r15 cmpq %r15,%r14 jge L$load_extra_in movq %r14,%r15 L$load_extra_in: leaq -1(%r13,%r15,1),%rsi addq %r15,%r13 subq %r15,%r14 movq %r13,48(%r9) movq %r14,56(%r9) addq %r15,%r8 pxor %xmm11,%xmm11 L$load_extra_load_loop: pslldq $1,%xmm11 pinsrb $0,(%rsi),%xmm11 leaq -1(%rsi),%rsi subq $1,%r15 jnz L$load_extra_load_loop movq %rbx,%r15 L$load_extra_shift_loop: pslldq $1,%xmm11 subq $1,%r15 jnz L$load_extra_shift_loop leaq L$and_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 por %xmm11,%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$process_blocks_of_extra_in: movq 288+32+0 (%rsp),%r9 movq 48(%r9),%rsi movq 56(%r9),%r8 movq %r8,%rcx shrq $4,%r8 L$process_extra_hash_loop: jz process_extra_in_trailer addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rsi),%rsi subq $1,%r8 jmp L$process_extra_hash_loop process_extra_in_trailer: andq $15,%rcx movq %rcx,%rbx jz L$do_length_block leaq -1(%rsi,%rcx,1),%rsi L$process_extra_in_trailer_load: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi subq $1,%rcx jnz L$process_extra_in_trailer_load L$process_partial_block: leaq L$and_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$do_length_block: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 addq $288 + 0 + 32,%rsp popq %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx popq %rbp ret L$seal_sse_128: movdqu L$chacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm14 movdqa %xmm14,%xmm12 paddd L$sse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd L$sse_inc(%rip),%xmm13 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 movq $10,%r10 L$seal_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb L$rol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb L$rol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb L$rol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz L$seal_sse_128_rounds paddd L$chacha20_consts(%rip),%xmm0 paddd L$chacha20_consts(%rip),%xmm1 paddd L$chacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm8 paddd %xmm11,%xmm9 paddd %xmm15,%xmm12 paddd L$sse_inc(%rip),%xmm15 paddd %xmm15,%xmm13 pand L$clamp(%rip),%xmm2 movdqa %xmm2,0+0(%rbp) movdqa %xmm6,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal jmp L$seal_sse_128_tail_xor .globl _chacha20_poly1305_open_avx2 .private_extern _chacha20_poly1305_open_avx2 .p2align 6 _chacha20_poly1305_open_avx2: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) vzeroupper vmovdqa L$chacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd L$avx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe L$open_avx2_192 cmpq $320,%rbx jbe L$open_avx2_320 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%r10 L$open_avx2_init_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 decq %r10 jne L$open_avx2_init_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx L$open_avx2_init_hash: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%rcx cmpq $64,%rcx jne L$open_avx2_init_hash vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vmovdqu %ymm0,0(%rdi) vmovdqu %ymm4,32(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi subq $64,%rbx L$open_avx2_main_loop: cmpq $512,%rbx jb L$open_avx2_main_loop_done vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx L$open_avx2_main_loop_rounds: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rsi,%rcx,1),%r10 adcq 8+16(%rsi,%rcx,1),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rsi,%rcx,1),%r10 adcq 8+32(%rsi,%rcx,1),%r11 adcq $1,%r12 leaq 48(%rcx),%rcx vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 cmpq $60*8,%rcx jne L$open_avx2_main_loop_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+60*8(%rsi),%r10 adcq 8+60*8(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) addq 0+60*8+16(%rsi),%r10 adcq 8+60*8+16(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi leaq 512(%rdi),%rdi subq $512,%rbx jmp L$open_avx2_main_loop L$open_avx2_main_loop_done: testq %rbx,%rbx vzeroupper je L$open_sse_finalize cmpq $384,%rbx ja L$open_avx2_tail_512 cmpq $256,%rbx ja L$open_avx2_tail_384 cmpq $128,%rbx ja L$open_avx2_tail_256 vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) xorq %r8,%r8 movq %rbx,%rcx andq $-16,%rcx testq %rcx,%rcx je L$open_avx2_tail_128_rounds L$open_avx2_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$open_avx2_tail_128_rounds: addq $16,%r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb L$open_avx2_tail_128_rounds_and_x1hash cmpq $160,%r8 jne L$open_avx2_tail_128_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp L$open_avx2_tail_128_xor L$open_avx2_tail_256: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $128,%rcx shrq $4,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 L$open_avx2_tail_256_rounds_and_x1hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx L$open_avx2_tail_256_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 incq %r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 cmpq %rcx,%r8 jb L$open_avx2_tail_256_rounds_and_x1hash cmpq $10,%r8 jne L$open_avx2_tail_256_rounds movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx L$open_avx2_tail_256_hash: addq $16,%rcx cmpq %rbx,%rcx jg L$open_avx2_tail_256_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp L$open_avx2_tail_256_hash L$open_avx2_tail_256_done: vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi subq $128,%rbx jmp L$open_avx2_tail_128_xor L$open_avx2_tail_384: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $256,%rcx shrq $4,%rcx addq $6,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 L$open_avx2_tail_384_rounds_and_x2hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx L$open_avx2_tail_384_rounds_and_x1hash: vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx incq %r8 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb L$open_avx2_tail_384_rounds_and_x2hash cmpq $10,%r8 jne L$open_avx2_tail_384_rounds_and_x1hash movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx L$open_avx2_384_tail_hash: addq $16,%rcx cmpq %rbx,%rcx jg L$open_avx2_384_tail_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp L$open_avx2_384_tail_hash L$open_avx2_384_tail_done: vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp L$open_avx2_tail_128_xor L$open_avx2_tail_512: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx movq %rsi,%r8 L$open_avx2_tail_512_rounds_and_x2hash: addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 L$open_avx2_tail_512_rounds_and_x1hash: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+16(%r8),%r10 adcq 8+16(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%r8),%r8 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 incq %rcx cmpq $4,%rcx jl L$open_avx2_tail_512_rounds_and_x2hash cmpq $10,%rcx jne L$open_avx2_tail_512_rounds_and_x1hash movq %rbx,%rcx subq $384,%rcx andq $-16,%rcx L$open_avx2_tail_512_hash: testq %rcx,%rcx je L$open_avx2_tail_512_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 subq $16,%rcx jmp L$open_avx2_tail_512_hash L$open_avx2_tail_512_done: vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 384(%rsi),%rsi leaq 384(%rdi),%rdi subq $384,%rbx L$open_avx2_tail_128_xor: cmpq $32,%rbx jb L$open_avx2_tail_32_xor subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 jmp L$open_avx2_tail_128_xor L$open_avx2_tail_32_xor: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb L$open_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm1 vmovdqu %xmm1,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 vmovdqa %xmm0,%xmm1 L$open_avx2_exit: vzeroupper jmp L$open_sse_tail_16 L$open_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 L$open_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne L$open_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 L$open_avx2_short: movq %r8,%r8 call poly_hash_ad_internal L$open_avx2_short_hash_and_xor_loop: cmpq $32,%rbx jb L$open_avx2_short_tail_32 subq $32,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rsi),%r10 adcq 8+16(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp L$open_avx2_short_hash_and_xor_loop L$open_avx2_short_tail_32: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb L$open_avx2_short_tail_32_exit subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm1 L$open_avx2_short_tail_32_exit: vzeroupper jmp L$open_sse_tail_16 L$open_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 L$open_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne L$open_avx2_320_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp L$open_avx2_short .globl _chacha20_poly1305_seal_avx2 .private_extern _chacha20_poly1305_seal_avx2 .p2align 6 _chacha20_poly1305_seal_avx2: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %r9 subq $288 + 0 + 32,%rsp leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx vzeroupper vmovdqa L$chacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd L$avx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe L$seal_avx2_192 cmpq $320,%rbx jbe L$seal_avx2_320 vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm4,%ymm7 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vmovdqa %ymm8,%ymm11 vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,%ymm15 vpaddd L$avx2_inc(%rip),%ymm15,%ymm14 vpaddd L$avx2_inc(%rip),%ymm14,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm15,0+256(%rbp) movq $10,%r10 L$seal_avx2_init_rounds: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %r10 jnz L$seal_avx2_init_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 vpand L$clamp(%rip),%ymm15,%ymm15 vmovdqa %ymm15,0+0(%rbp) movq %r8,%r8 call poly_hash_ad_internal vpxor 0(%rsi),%ymm3,%ymm3 vpxor 32(%rsi),%ymm11,%ymm11 vmovdqu %ymm3,0(%rdi) vmovdqu %ymm11,32(%rdi) vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+64(%rsi),%ymm15,%ymm15 vpxor 32+64(%rsi),%ymm2,%ymm2 vpxor 64+64(%rsi),%ymm6,%ymm6 vpxor 96+64(%rsi),%ymm10,%ymm10 vmovdqu %ymm15,0+64(%rdi) vmovdqu %ymm2,32+64(%rdi) vmovdqu %ymm6,64+64(%rdi) vmovdqu %ymm10,96+64(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+192(%rsi),%ymm15,%ymm15 vpxor 32+192(%rsi),%ymm1,%ymm1 vpxor 64+192(%rsi),%ymm5,%ymm5 vpxor 96+192(%rsi),%ymm9,%ymm9 vmovdqu %ymm15,0+192(%rdi) vmovdqu %ymm1,32+192(%rdi) vmovdqu %ymm5,64+192(%rdi) vmovdqu %ymm9,96+192(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm15,%ymm8 leaq 320(%rsi),%rsi subq $320,%rbx movq $320,%rcx cmpq $128,%rbx jbe L$seal_avx2_short_hash_remainder vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vpxor 64(%rsi),%ymm8,%ymm8 vpxor 96(%rsi),%ymm12,%ymm12 vmovdqu %ymm0,320(%rdi) vmovdqu %ymm4,352(%rdi) vmovdqu %ymm8,384(%rdi) vmovdqu %ymm12,416(%rdi) leaq 128(%rsi),%rsi subq $128,%rbx movq $8,%rcx movq $2,%r8 cmpq $128,%rbx jbe L$seal_avx2_tail_128 cmpq $256,%rbx jbe L$seal_avx2_tail_256 cmpq $384,%rbx jbe L$seal_avx2_tail_384 cmpq $512,%rbx jbe L$seal_avx2_tail_512 vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 subq $16,%rdi movq $9,%rcx jmp L$seal_avx2_main_loop_rounds_entry .p2align 5 L$seal_avx2_main_loop: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%rcx .p2align 5 L$seal_avx2_main_loop_rounds: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 L$seal_avx2_main_loop_rounds_entry: vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rdi),%r10 adcq 8+32(%rdi),%r11 adcq $1,%r12 leaq 48(%rdi),%rdi vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %rcx jne L$seal_avx2_main_loop_rounds vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi subq $512,%rbx cmpq $512,%rbx jg L$seal_avx2_main_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi movq $10,%rcx xorq %r8,%r8 cmpq $384,%rbx ja L$seal_avx2_tail_512 cmpq $256,%rbx ja L$seal_avx2_tail_384 cmpq $128,%rbx ja L$seal_avx2_tail_256 L$seal_avx2_tail_128: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) L$seal_avx2_tail_128_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_128_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_128_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_128_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp L$seal_avx2_short_loop L$seal_avx2_tail_256: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) L$seal_avx2_tail_256_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_256_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_256_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_256_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $128,%rcx leaq 128(%rsi),%rsi subq $128,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_tail_384: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) L$seal_avx2_tail_384_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_384_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_384_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_384_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $256,%rcx leaq 256(%rsi),%rsi subq $256,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_tail_512: vmovdqa L$chacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa L$avx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) L$seal_avx2_tail_512_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi L$seal_avx2_tail_512_rounds_and_2xhash: vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 addq %rax,%r15 adcq %rdx,%r9 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa L$rol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa L$rol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg L$seal_avx2_tail_512_rounds_and_3xhash decq %r8 jge L$seal_avx2_tail_512_rounds_and_2xhash vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $384,%rcx leaq 384(%rsi),%rsi subq $384,%rbx jmp L$seal_avx2_short_hash_remainder L$seal_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vpaddd L$avx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 L$seal_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb L$rol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne L$seal_avx2_320_rounds vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0 vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1 vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp L$seal_avx2_short L$seal_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd L$avx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 L$seal_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb L$rol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb L$rol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne L$seal_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand L$clamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 L$seal_avx2_short: movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx L$seal_avx2_short_hash_remainder: cmpq $16,%rcx jb L$seal_avx2_short_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx addq $16,%rdi jmp L$seal_avx2_short_hash_remainder L$seal_avx2_short_loop: cmpq $32,%rbx jb L$seal_avx2_short_tail subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp L$seal_avx2_short_loop L$seal_avx2_short_tail: cmpq $16,%rbx jb L$seal_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm0 L$seal_avx2_exit: vzeroupper jmp L$seal_sse_tail_16 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/chacha20_poly1305_x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .section .rodata .align 64 chacha20_poly1305_constants: .Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .Lrol16: .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .Lavx2_init: .long 0,0,0,0 .Lsse_inc: .long 1,0,0,0 .Lavx2_inc: .long 2,0,0,0,2,0,0,0 .Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF .align 16 .Land_masks: .byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff .text .type poly_hash_ad_internal,@function .align 64 poly_hash_ad_internal: .cfi_startproc .cfi_def_cfa rsp, 8 xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 cmpq $13,%r8 jne .Lhash_ad_loop .Lpoly_fast_tls_ad: movq (%rcx),%r10 movq 5(%rcx),%r11 shrq $24,%r11 movq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 ret .Lhash_ad_loop: cmpq $16,%r8 jb .Lhash_ad_tail addq 0+0(%rcx),%r10 adcq 8+0(%rcx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rcx),%rcx subq $16,%r8 jmp .Lhash_ad_loop .Lhash_ad_tail: cmpq $0,%r8 je .Lhash_ad_done xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 addq %r8,%rcx .Lhash_ad_tail_loop: shldq $8,%r13,%r14 shlq $8,%r13 movzbq -1(%rcx),%r15 xorq %r15,%r13 decq %rcx decq %r8 jne .Lhash_ad_tail_loop addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lhash_ad_done: ret .cfi_endproc .size poly_hash_ad_internal, .-poly_hash_ad_internal .globl chacha20_poly1305_open_nohw .hidden chacha20_poly1305_open_nohw .type chacha20_poly1305_open_nohw,@function .align 64 chacha20_poly1305_open_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) cmpq $128,%rbx jbe .Lopen_sse_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm7 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movq $10,%r10 .Lopen_sse_init_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jne .Lopen_sse_init_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 pand .Lclamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal .Lopen_sse_main_loop: cmpq $256,%rbx jb .Lopen_sse_tail movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $4,%rcx movq %rsi,%r8 .Lopen_sse_main_loop_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 leaq 16(%r8),%r8 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %rcx jge .Lopen_sse_main_loop_rounds addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 cmpq $-6,%rcx jg .Lopen_sse_main_loop_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor 0+80(%rbp),%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp .Lopen_sse_main_loop .Lopen_sse_tail: testq %rbx,%rbx jz .Lopen_sse_finalize cmpq $192,%rbx ja .Lopen_sse_tail_256 cmpq $128,%rbx ja .Lopen_sse_tail_192 cmpq $64,%rbx ja .Lopen_sse_tail_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) xorq %r8,%r8 movq %rbx,%rcx cmpq $16,%rcx jb .Lopen_sse_tail_64_rounds .Lopen_sse_tail_64_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx .Lopen_sse_tail_64_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 cmpq $16,%rcx jae .Lopen_sse_tail_64_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_64_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_128: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movq %rbx,%rcx andq $-16,%rcx xorq %r8,%r8 .Lopen_sse_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_128_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 cmpq %rcx,%r8 jb .Lopen_sse_tail_128_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_128_rounds paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) subq $64,%rbx leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_192: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movq %rbx,%rcx movq $160,%r8 cmpq $160,%rcx cmovgq %r8,%rcx andq $-16,%rcx xorq %r8,%r8 .Lopen_sse_tail_192_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_192_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 cmpq %rcx,%r8 jb .Lopen_sse_tail_192_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_192_rounds cmpq $176,%rbx jb .Lopen_sse_tail_192_finish addq 0+160(%rsi),%r10 adcq 8+160(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 cmpq $192,%rbx jb .Lopen_sse_tail_192_finish addq 0+176(%rsi),%r10 adcq 8+176(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_192_finish: paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) subq $128,%rbx leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_256: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) xorq %r8,%r8 .Lopen_sse_tail_256_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 movdqa 0+80(%rbp),%xmm11 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 movdqa 0+80(%rbp),%xmm9 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 movdqa 0+80(%rbp),%xmm11 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 movdqa 0+80(%rbp),%xmm9 addq $16,%r8 cmpq $160,%r8 jb .Lopen_sse_tail_256_rounds_and_x1hash movq %rbx,%rcx andq $-16,%rcx .Lopen_sse_tail_256_hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%r8 cmpq %rcx,%r8 jb .Lopen_sse_tail_256_hash paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqa 0+80(%rbp),%xmm12 subq $192,%rbx leaq 192(%rsi),%rsi leaq 192(%rdi),%rdi .Lopen_sse_tail_64_dec_loop: cmpq $16,%rbx jb .Lopen_sse_tail_16_init subq $16,%rbx movdqu (%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_16_init: movdqa %xmm0,%xmm1 .Lopen_sse_tail_16: testq %rbx,%rbx jz .Lopen_sse_finalize pxor %xmm3,%xmm3 leaq -1(%rsi,%rbx,1),%rsi movq %rbx,%r8 .Lopen_sse_tail_16_compose: pslldq $1,%xmm3 pinsrb $0,(%rsi),%xmm3 subq $1,%rsi subq $1,%r8 jnz .Lopen_sse_tail_16_compose .byte 102,73,15,126,221 pextrq $1,%xmm3,%r14 pxor %xmm1,%xmm3 .Lopen_sse_tail_16_extract: pextrb $0,%xmm3,(%rdi) psrldq $1,%xmm3 addq $1,%rdi subq $1,%rbx jne .Lopen_sse_tail_16_extract addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_finalize: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 .cfi_remember_state addq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset -(288 + 32) popq %r9 .cfi_adjust_cfa_offset -8 .cfi_restore %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp ret .Lopen_sse_128: .cfi_restore_state movdqu .Lchacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm13,%xmm15 movq $10,%r10 .Lopen_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz .Lopen_sse_128_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd .Lchacha20_consts(%rip),%xmm1 paddd .Lchacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm9 paddd %xmm11,%xmm10 paddd %xmm15,%xmm13 paddd .Lsse_inc(%rip),%xmm15 paddd %xmm15,%xmm14 pand .Lclamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal .Lopen_sse_128_xor_hash: cmpq $16,%rbx jb .Lopen_sse_tail_16 subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm1 movdqu %xmm1,0(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 movdqa %xmm2,%xmm13 movdqa %xmm6,%xmm2 movdqa %xmm10,%xmm6 movdqa %xmm14,%xmm10 jmp .Lopen_sse_128_xor_hash .size chacha20_poly1305_open_nohw, .-chacha20_poly1305_open_nohw .cfi_endproc .globl chacha20_poly1305_seal_nohw .hidden chacha20_poly1305_seal_nohw .type chacha20_poly1305_seal_nohw,@function .align 64 chacha20_poly1305_seal_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx cmpq $128,%rbx jbe .Lseal_sse_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm14 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $10,%r10 .Lseal_sse_init_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jnz .Lseal_sse_init_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 pand .Lclamp(%rip),%xmm3 movdqa %xmm3,0+0(%rbp) movdqa %xmm7,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) cmpq $192,%rbx ja .Lseal_sse_main_init movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_main_init: movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 128(%rdi) movdqu %xmm4,16 + 128(%rdi) movdqu %xmm8,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi movq $2,%rcx movq $8,%r8 cmpq $64,%rbx jbe .Lseal_sse_tail_64 cmpq $128,%rbx jbe .Lseal_sse_tail_128 cmpq $192,%rbx jbe .Lseal_sse_tail_192 .Lseal_sse_main_loop: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) .align 32 .Lseal_sse_main_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 leaq 16(%rdi),%rdi decq %r8 jge .Lseal_sse_main_rounds addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_main_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm14,0+80(%rbp) movdqa %xmm14,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm14 pxor %xmm3,%xmm14 movdqu %xmm14,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm14 pxor %xmm7,%xmm14 movdqu %xmm14,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm14 pxor %xmm11,%xmm14 movdqu %xmm14,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm14 pxor %xmm15,%xmm14 movdqu %xmm14,48 + 0(%rdi) movdqa 0+80(%rbp),%xmm14 movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) cmpq $256,%rbx ja .Lseal_sse_main_loop_xor movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_main_loop_xor: movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi subq $256,%rbx movq $6,%rcx movq $4,%r8 cmpq $192,%rbx jg .Lseal_sse_main_loop movq %rbx,%rcx testq %rbx,%rbx je .Lseal_sse_128_tail_hash movq $6,%rcx cmpq $128,%rbx ja .Lseal_sse_tail_192 cmpq $64,%rbx ja .Lseal_sse_tail_128 .Lseal_sse_tail_64: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) .Lseal_sse_tail_64_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_64_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_64_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_64_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp .Lseal_sse_128_tail_xor .Lseal_sse_tail_128: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) .Lseal_sse_tail_128_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_128_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_128_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_128_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movq $64,%rcx subq $64,%rbx leaq 64(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_tail_192: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) .Lseal_sse_tail_192_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_192_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_192_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_192_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi .Lseal_sse_128_tail_hash: cmpq $16,%rcx jb .Lseal_sse_128_tail_xor addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx leaq 16(%rdi),%rdi jmp .Lseal_sse_128_tail_hash .Lseal_sse_128_tail_xor: cmpq $16,%rbx jb .Lseal_sse_tail_16 subq $16,%rbx movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,0(%rdi) addq 0(%rdi),%r10 adcq 8(%rdi),%r11 adcq $1,%r12 leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 movdqa %xmm1,%xmm12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 jmp .Lseal_sse_128_tail_xor .Lseal_sse_tail_16: testq %rbx,%rbx jz .Lprocess_blocks_of_extra_in movq %rbx,%r8 movq %rbx,%rcx leaq -1(%rsi,%rbx,1),%rsi pxor %xmm15,%xmm15 .Lseal_sse_tail_16_compose: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi decq %rcx jne .Lseal_sse_tail_16_compose pxor %xmm0,%xmm15 movq %rbx,%rcx movdqu %xmm15,%xmm0 .Lseal_sse_tail_16_extract: pextrb $0,%xmm0,(%rdi) psrldq $1,%xmm0 addq $1,%rdi subq $1,%rcx jnz .Lseal_sse_tail_16_extract movq 288 + 0 + 32(%rsp),%r9 movq 56(%r9),%r14 movq 48(%r9),%r13 testq %r14,%r14 jz .Lprocess_partial_block movq $16,%r15 subq %rbx,%r15 cmpq %r15,%r14 jge .Lload_extra_in movq %r14,%r15 .Lload_extra_in: leaq -1(%r13,%r15,1),%rsi addq %r15,%r13 subq %r15,%r14 movq %r13,48(%r9) movq %r14,56(%r9) addq %r15,%r8 pxor %xmm11,%xmm11 .Lload_extra_load_loop: pslldq $1,%xmm11 pinsrb $0,(%rsi),%xmm11 leaq -1(%rsi),%rsi subq $1,%r15 jnz .Lload_extra_load_loop movq %rbx,%r15 .Lload_extra_shift_loop: pslldq $1,%xmm11 subq $1,%r15 jnz .Lload_extra_shift_loop leaq .Land_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 por %xmm11,%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lprocess_blocks_of_extra_in: movq 288+32+0 (%rsp),%r9 movq 48(%r9),%rsi movq 56(%r9),%r8 movq %r8,%rcx shrq $4,%r8 .Lprocess_extra_hash_loop: jz process_extra_in_trailer addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rsi),%rsi subq $1,%r8 jmp .Lprocess_extra_hash_loop process_extra_in_trailer: andq $15,%rcx movq %rcx,%rbx jz .Ldo_length_block leaq -1(%rsi,%rcx,1),%rsi .Lprocess_extra_in_trailer_load: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi subq $1,%rcx jnz .Lprocess_extra_in_trailer_load .Lprocess_partial_block: leaq .Land_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Ldo_length_block: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 .cfi_remember_state addq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset -(288 + 32) popq %r9 .cfi_adjust_cfa_offset -8 .cfi_restore %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp ret .Lseal_sse_128: .cfi_restore_state movdqu .Lchacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm14 movdqa %xmm14,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 movq $10,%r10 .Lseal_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz .Lseal_sse_128_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd .Lchacha20_consts(%rip),%xmm1 paddd .Lchacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm8 paddd %xmm11,%xmm9 paddd %xmm15,%xmm12 paddd .Lsse_inc(%rip),%xmm15 paddd %xmm15,%xmm13 pand .Lclamp(%rip),%xmm2 movdqa %xmm2,0+0(%rbp) movdqa %xmm6,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal jmp .Lseal_sse_128_tail_xor .size chacha20_poly1305_seal_nohw, .-chacha20_poly1305_seal_nohw .cfi_endproc .globl chacha20_poly1305_open_avx2 .hidden chacha20_poly1305_open_avx2 .type chacha20_poly1305_open_avx2,@function .align 64 chacha20_poly1305_open_avx2: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) vzeroupper vmovdqa .Lchacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd .Lavx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe .Lopen_avx2_192 cmpq $320,%rbx jbe .Lopen_avx2_320 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%r10 .Lopen_avx2_init_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 decq %r10 jne .Lopen_avx2_init_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx .Lopen_avx2_init_hash: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%rcx cmpq $64,%rcx jne .Lopen_avx2_init_hash vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vmovdqu %ymm0,0(%rdi) vmovdqu %ymm4,32(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi subq $64,%rbx .Lopen_avx2_main_loop: cmpq $512,%rbx jb .Lopen_avx2_main_loop_done vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx .Lopen_avx2_main_loop_rounds: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rsi,%rcx,1),%r10 adcq 8+16(%rsi,%rcx,1),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rsi,%rcx,1),%r10 adcq 8+32(%rsi,%rcx,1),%r11 adcq $1,%r12 leaq 48(%rcx),%rcx vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 cmpq $60*8,%rcx jne .Lopen_avx2_main_loop_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+60*8(%rsi),%r10 adcq 8+60*8(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) addq 0+60*8+16(%rsi),%r10 adcq 8+60*8+16(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi leaq 512(%rdi),%rdi subq $512,%rbx jmp .Lopen_avx2_main_loop .Lopen_avx2_main_loop_done: testq %rbx,%rbx vzeroupper je .Lopen_sse_finalize cmpq $384,%rbx ja .Lopen_avx2_tail_512 cmpq $256,%rbx ja .Lopen_avx2_tail_384 cmpq $128,%rbx ja .Lopen_avx2_tail_256 vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) xorq %r8,%r8 movq %rbx,%rcx andq $-16,%rcx testq %rcx,%rcx je .Lopen_avx2_tail_128_rounds .Lopen_avx2_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_avx2_tail_128_rounds: addq $16,%r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb .Lopen_avx2_tail_128_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_avx2_tail_128_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_256: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $128,%rcx shrq $4,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 .Lopen_avx2_tail_256_rounds_and_x1hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx .Lopen_avx2_tail_256_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 incq %r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 cmpq %rcx,%r8 jb .Lopen_avx2_tail_256_rounds_and_x1hash cmpq $10,%r8 jne .Lopen_avx2_tail_256_rounds movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx .Lopen_avx2_tail_256_hash: addq $16,%rcx cmpq %rbx,%rcx jg .Lopen_avx2_tail_256_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp .Lopen_avx2_tail_256_hash .Lopen_avx2_tail_256_done: vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi subq $128,%rbx jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_384: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $256,%rcx shrq $4,%rcx addq $6,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 .Lopen_avx2_tail_384_rounds_and_x2hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx .Lopen_avx2_tail_384_rounds_and_x1hash: vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx incq %r8 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb .Lopen_avx2_tail_384_rounds_and_x2hash cmpq $10,%r8 jne .Lopen_avx2_tail_384_rounds_and_x1hash movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx .Lopen_avx2_384_tail_hash: addq $16,%rcx cmpq %rbx,%rcx jg .Lopen_avx2_384_tail_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp .Lopen_avx2_384_tail_hash .Lopen_avx2_384_tail_done: vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_512: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx movq %rsi,%r8 .Lopen_avx2_tail_512_rounds_and_x2hash: addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 .Lopen_avx2_tail_512_rounds_and_x1hash: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+16(%r8),%r10 adcq 8+16(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%r8),%r8 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 incq %rcx cmpq $4,%rcx jl .Lopen_avx2_tail_512_rounds_and_x2hash cmpq $10,%rcx jne .Lopen_avx2_tail_512_rounds_and_x1hash movq %rbx,%rcx subq $384,%rcx andq $-16,%rcx .Lopen_avx2_tail_512_hash: testq %rcx,%rcx je .Lopen_avx2_tail_512_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 subq $16,%rcx jmp .Lopen_avx2_tail_512_hash .Lopen_avx2_tail_512_done: vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 384(%rsi),%rsi leaq 384(%rdi),%rdi subq $384,%rbx .Lopen_avx2_tail_128_xor: cmpq $32,%rbx jb .Lopen_avx2_tail_32_xor subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_32_xor: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb .Lopen_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm1 vmovdqu %xmm1,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 vmovdqa %xmm0,%xmm1 .Lopen_avx2_exit: vzeroupper jmp .Lopen_sse_tail_16 .Lopen_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 .Lopen_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne .Lopen_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 .Lopen_avx2_short: movq %r8,%r8 call poly_hash_ad_internal .Lopen_avx2_short_hash_and_xor_loop: cmpq $32,%rbx jb .Lopen_avx2_short_tail_32 subq $32,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rsi),%r10 adcq 8+16(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp .Lopen_avx2_short_hash_and_xor_loop .Lopen_avx2_short_tail_32: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb .Lopen_avx2_short_tail_32_exit subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm1 .Lopen_avx2_short_tail_32_exit: vzeroupper jmp .Lopen_sse_tail_16 .Lopen_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 .Lopen_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne .Lopen_avx2_320_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp .Lopen_avx2_short .size chacha20_poly1305_open_avx2, .-chacha20_poly1305_open_avx2 .cfi_endproc .globl chacha20_poly1305_seal_avx2 .hidden chacha20_poly1305_seal_avx2 .type chacha20_poly1305_seal_avx2,@function .align 64 chacha20_poly1305_seal_avx2: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx vzeroupper vmovdqa .Lchacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd .Lavx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe .Lseal_avx2_192 cmpq $320,%rbx jbe .Lseal_avx2_320 vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm4,%ymm7 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vmovdqa %ymm8,%ymm11 vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,%ymm15 vpaddd .Lavx2_inc(%rip),%ymm15,%ymm14 vpaddd .Lavx2_inc(%rip),%ymm14,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm15,0+256(%rbp) movq $10,%r10 .Lseal_avx2_init_rounds: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %r10 jnz .Lseal_avx2_init_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 vpand .Lclamp(%rip),%ymm15,%ymm15 vmovdqa %ymm15,0+0(%rbp) movq %r8,%r8 call poly_hash_ad_internal vpxor 0(%rsi),%ymm3,%ymm3 vpxor 32(%rsi),%ymm11,%ymm11 vmovdqu %ymm3,0(%rdi) vmovdqu %ymm11,32(%rdi) vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+64(%rsi),%ymm15,%ymm15 vpxor 32+64(%rsi),%ymm2,%ymm2 vpxor 64+64(%rsi),%ymm6,%ymm6 vpxor 96+64(%rsi),%ymm10,%ymm10 vmovdqu %ymm15,0+64(%rdi) vmovdqu %ymm2,32+64(%rdi) vmovdqu %ymm6,64+64(%rdi) vmovdqu %ymm10,96+64(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+192(%rsi),%ymm15,%ymm15 vpxor 32+192(%rsi),%ymm1,%ymm1 vpxor 64+192(%rsi),%ymm5,%ymm5 vpxor 96+192(%rsi),%ymm9,%ymm9 vmovdqu %ymm15,0+192(%rdi) vmovdqu %ymm1,32+192(%rdi) vmovdqu %ymm5,64+192(%rdi) vmovdqu %ymm9,96+192(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm15,%ymm8 leaq 320(%rsi),%rsi subq $320,%rbx movq $320,%rcx cmpq $128,%rbx jbe .Lseal_avx2_short_hash_remainder vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vpxor 64(%rsi),%ymm8,%ymm8 vpxor 96(%rsi),%ymm12,%ymm12 vmovdqu %ymm0,320(%rdi) vmovdqu %ymm4,352(%rdi) vmovdqu %ymm8,384(%rdi) vmovdqu %ymm12,416(%rdi) leaq 128(%rsi),%rsi subq $128,%rbx movq $8,%rcx movq $2,%r8 cmpq $128,%rbx jbe .Lseal_avx2_tail_128 cmpq $256,%rbx jbe .Lseal_avx2_tail_256 cmpq $384,%rbx jbe .Lseal_avx2_tail_384 cmpq $512,%rbx jbe .Lseal_avx2_tail_512 vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 subq $16,%rdi movq $9,%rcx jmp .Lseal_avx2_main_loop_rounds_entry .align 32 .Lseal_avx2_main_loop: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%rcx .align 32 .Lseal_avx2_main_loop_rounds: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lseal_avx2_main_loop_rounds_entry: vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rdi),%r10 adcq 8+32(%rdi),%r11 adcq $1,%r12 leaq 48(%rdi),%rdi vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %rcx jne .Lseal_avx2_main_loop_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi subq $512,%rbx cmpq $512,%rbx jg .Lseal_avx2_main_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi movq $10,%rcx xorq %r8,%r8 cmpq $384,%rbx ja .Lseal_avx2_tail_512 cmpq $256,%rbx ja .Lseal_avx2_tail_384 cmpq $128,%rbx ja .Lseal_avx2_tail_256 .Lseal_avx2_tail_128: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) .Lseal_avx2_tail_128_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_128_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_128_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_128_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp .Lseal_avx2_short_loop .Lseal_avx2_tail_256: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) .Lseal_avx2_tail_256_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_256_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_256_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_256_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $128,%rcx leaq 128(%rsi),%rsi subq $128,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_tail_384: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) .Lseal_avx2_tail_384_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_384_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_384_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_384_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $256,%rcx leaq 256(%rsi),%rsi subq $256,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_tail_512: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) .Lseal_avx2_tail_512_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_512_rounds_and_2xhash: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 addq %rax,%r15 adcq %rdx,%r9 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_512_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_512_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $384,%rcx leaq 384(%rsi),%rsi subq $384,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 .Lseal_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne .Lseal_avx2_320_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp .Lseal_avx2_short .Lseal_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 .Lseal_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne .Lseal_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 .Lseal_avx2_short: movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx .Lseal_avx2_short_hash_remainder: cmpq $16,%rcx jb .Lseal_avx2_short_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx addq $16,%rdi jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_short_loop: cmpq $32,%rbx jb .Lseal_avx2_short_tail subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp .Lseal_avx2_short_loop .Lseal_avx2_short_tail: cmpq $16,%rbx jb .Lseal_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm0 .Lseal_avx2_exit: vzeroupper jmp .Lseal_sse_tail_16 .cfi_endproc .size chacha20_poly1305_seal_avx2, .-chacha20_poly1305_seal_avx2 #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/err_data.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file was generated by go run ./util/pregenerate. */ #include #include #include static_assert(ERR_LIB_NONE == 1, "library value changed"); static_assert(ERR_LIB_SYS == 2, "library value changed"); static_assert(ERR_LIB_BN == 3, "library value changed"); static_assert(ERR_LIB_RSA == 4, "library value changed"); static_assert(ERR_LIB_DH == 5, "library value changed"); static_assert(ERR_LIB_EVP == 6, "library value changed"); static_assert(ERR_LIB_BUF == 7, "library value changed"); static_assert(ERR_LIB_OBJ == 8, "library value changed"); static_assert(ERR_LIB_PEM == 9, "library value changed"); static_assert(ERR_LIB_DSA == 10, "library value changed"); static_assert(ERR_LIB_X509 == 11, "library value changed"); static_assert(ERR_LIB_ASN1 == 12, "library value changed"); static_assert(ERR_LIB_CONF == 13, "library value changed"); static_assert(ERR_LIB_CRYPTO == 14, "library value changed"); static_assert(ERR_LIB_EC == 15, "library value changed"); static_assert(ERR_LIB_SSL == 16, "library value changed"); static_assert(ERR_LIB_BIO == 17, "library value changed"); static_assert(ERR_LIB_PKCS7 == 18, "library value changed"); static_assert(ERR_LIB_PKCS8 == 19, "library value changed"); static_assert(ERR_LIB_X509V3 == 20, "library value changed"); static_assert(ERR_LIB_RAND == 21, "library value changed"); static_assert(ERR_LIB_ENGINE == 22, "library value changed"); static_assert(ERR_LIB_OCSP == 23, "library value changed"); static_assert(ERR_LIB_UI == 24, "library value changed"); static_assert(ERR_LIB_COMP == 25, "library value changed"); static_assert(ERR_LIB_ECDSA == 26, "library value changed"); static_assert(ERR_LIB_ECDH == 27, "library value changed"); static_assert(ERR_LIB_HMAC == 28, "library value changed"); static_assert(ERR_LIB_DIGEST == 29, "library value changed"); static_assert(ERR_LIB_CIPHER == 30, "library value changed"); static_assert(ERR_LIB_HKDF == 31, "library value changed"); static_assert(ERR_LIB_TRUST_TOKEN == 32, "library value changed"); static_assert(ERR_LIB_USER == 33, "library value changed"); static_assert(ERR_NUM_LIBS == 34, "number of libraries changed"); extern const uint32_t kOpenSSLReasonValues[]; const uint32_t kOpenSSLReasonValues[] = { 0xc320885, 0xc32889f, 0xc3308ae, 0xc3388be, 0xc3408cd, 0xc3488e6, 0xc3508f2, 0xc35890f, 0xc36092f, 0xc36893d, 0xc37094d, 0xc37895a, 0xc38096a, 0xc388975, 0xc39098b, 0xc39899a, 0xc3a09ae, 0xc3a8892, 0xc3b00f7, 0xc3b8921, 0x10320892, 0x10329672, 0x1033167e, 0x10339697, 0x103416aa, 0x10348f93, 0x10350cdf, 0x103596bd, 0x103616e7, 0x103696fa, 0x10371719, 0x10379732, 0x10381747, 0x10389765, 0x10391774, 0x10399790, 0x103a17ab, 0x103a97ba, 0x103b17d6, 0x103b97f1, 0x103c1817, 0x103c80f7, 0x103d1828, 0x103d983c, 0x103e185b, 0x103e986a, 0x103f1881, 0x103f9894, 0x10400ca3, 0x104098a7, 0x104118c5, 0x104198d8, 0x104218f2, 0x10429902, 0x10431916, 0x1043992c, 0x10441944, 0x10449959, 0x1045196d, 0x1045997f, 0x10460635, 0x1046899a, 0x10471994, 0x104799ab, 0x104819c0, 0x104899ce, 0x10490edf, 0x10499808, 0x104a16d2, 0x14320c73, 0x14328c94, 0x14330ca3, 0x14338cb5, 0x143400b9, 0x143480f7, 0x14350c81, 0x18320090, 0x18328fe9, 0x183300b9, 0x18338fff, 0x18341013, 0x183480f7, 0x18351032, 0x1835904a, 0x18361072, 0x18369086, 0x183710be, 0x183790d4, 0x183810e8, 0x183890f8, 0x18390ac0, 0x18399108, 0x183a112e, 0x183a9154, 0x183b0ceb, 0x183b91a3, 0x183c11b5, 0x183c91c0, 0x183d11d0, 0x183d91e1, 0x183e11f2, 0x183e9204, 0x183f122d, 0x183f9246, 0x1840125e, 0x1840870d, 0x18411177, 0x18419142, 0x18421161, 0x18428c81, 0x1843111d, 0x18439189, 0x18441028, 0x184490aa, 0x1845105f, 0x20321298, 0x20329285, 0x243212a4, 0x243289e0, 0x243312b6, 0x243392c3, 0x243412d0, 0x243492e2, 0x243512f1, 0x2435930e, 0x2436131b, 0x24369329, 0x24371337, 0x24379345, 0x2438134e, 0x2438935b, 0x2439136e, 0x24399385, 0x28320cd3, 0x28328ceb, 0x28330ca3, 0x28338cfe, 0x28340cdf, 0x283480b9, 0x283500f7, 0x28358c81, 0x2836099a, 0x2c323305, 0x2c3293a3, 0x2c333313, 0x2c33b325, 0x2c343339, 0x2c34b34b, 0x2c353366, 0x2c35b378, 0x2c3633a8, 0x2c36833a, 0x2c3733b5, 0x2c37b3e1, 0x2c38341f, 0x2c38b436, 0x2c393454, 0x2c39b464, 0x2c3a3476, 0x2c3ab48a, 0x2c3b349b, 0x2c3bb4ba, 0x2c3c13b5, 0x2c3c93cb, 0x2c3d34ff, 0x2c3d93e4, 0x2c3e3529, 0x2c3eb537, 0x2c3f354f, 0x2c3fb567, 0x2c403591, 0x2c409298, 0x2c4135a2, 0x2c41b5b5, 0x2c42125e, 0x2c42b5c6, 0x2c43076d, 0x2c43b4ac, 0x2c4433f4, 0x2c44b574, 0x2c45338b, 0x2c45b3c7, 0x2c463444, 0x2c46b4ce, 0x2c4734e3, 0x2c47b51c, 0x2c483406, 0x30320000, 0x30328015, 0x3033001f, 0x30338038, 0x30340057, 0x30348071, 0x30350078, 0x30358090, 0x303600a1, 0x303680b9, 0x303700c6, 0x303780d5, 0x303800f7, 0x30388104, 0x30390117, 0x30398132, 0x303a0147, 0x303a815b, 0x303b016f, 0x303b8180, 0x303c0199, 0x303c81b6, 0x303d01c4, 0x303d81d8, 0x303e01e8, 0x303e8201, 0x303f0211, 0x303f8224, 0x30400233, 0x3040823f, 0x30410254, 0x30418264, 0x3042027b, 0x30428288, 0x3043029b, 0x304382aa, 0x304402bf, 0x304482e0, 0x304502f3, 0x30458306, 0x3046031f, 0x3046833a, 0x30470372, 0x30478384, 0x304803a2, 0x304883b3, 0x304903c2, 0x304983da, 0x304a03ec, 0x304a8400, 0x304b0418, 0x304b842b, 0x304c0436, 0x304c8447, 0x304d0453, 0x304d8469, 0x304e0477, 0x304e848d, 0x304f049f, 0x304f84b1, 0x305004d4, 0x305084e7, 0x305104f8, 0x30518508, 0x30520520, 0x30528535, 0x3053054d, 0x30538561, 0x30540579, 0x30548592, 0x305505ab, 0x305585c8, 0x305605d3, 0x305685eb, 0x305705fb, 0x3057860c, 0x3058061f, 0x30588635, 0x3059063e, 0x30598653, 0x305a0666, 0x305a8675, 0x305b0695, 0x305b86a4, 0x305c06c5, 0x305c86e1, 0x305d06ed, 0x305d870d, 0x305e0729, 0x305e874d, 0x305f0763, 0x305f876d, 0x306004c4, 0x3060804a, 0x30610357, 0x3061873a, 0x30620392, 0x34320bb0, 0x34328bc4, 0x34330be1, 0x34338bf4, 0x34340c03, 0x34348c5d, 0x34350c41, 0x34358c20, 0x3c320090, 0x3c328d28, 0x3c330d41, 0x3c338d5c, 0x3c340d79, 0x3c348da3, 0x3c350dbe, 0x3c358de4, 0x3c360dfd, 0x3c368e15, 0x3c370e26, 0x3c378e34, 0x3c380e41, 0x3c388e55, 0x3c390ceb, 0x3c398e78, 0x3c3a0e8c, 0x3c3a895a, 0x3c3b0e9c, 0x3c3b8eb7, 0x3c3c0ec9, 0x3c3c8efc, 0x3c3d0f06, 0x3c3d8f1a, 0x3c3e0f28, 0x3c3e8f4d, 0x3c3f0d14, 0x3c3f8f36, 0x3c4000b9, 0x3c4080f7, 0x3c410d94, 0x3c418dd3, 0x3c420edf, 0x3c428e69, 0x40321a3a, 0x40329a50, 0x40331a7e, 0x40339a88, 0x40341a9f, 0x40349abd, 0x40351acd, 0x40359adf, 0x40361aec, 0x40369af8, 0x40371b0d, 0x40379b1f, 0x40381b2a, 0x40389b3c, 0x40390f93, 0x40399b4c, 0x403a1b5f, 0x403a9b80, 0x403b1b91, 0x403b9ba1, 0x403c0071, 0x403c8090, 0x403d1c02, 0x403d9c18, 0x403e1c27, 0x403e9c5f, 0x403f1c79, 0x403f9ca1, 0x40401cb6, 0x40409cca, 0x40411d05, 0x40419d20, 0x40421d39, 0x40429d4c, 0x40431d60, 0x40439d8e, 0x40441da5, 0x404480b9, 0x40451dba, 0x40459dcc, 0x40461df0, 0x40469e10, 0x40471e1e, 0x40479e45, 0x40481eb6, 0x40489f70, 0x40491f87, 0x40499fa1, 0x404a1fb8, 0x404a9fd6, 0x404b1fee, 0x404ba01b, 0x404c2031, 0x404ca043, 0x404d2064, 0x404da09d, 0x404e20b1, 0x404ea0be, 0x404f216f, 0x404fa1e5, 0x40502254, 0x4050a268, 0x4051229b, 0x405222ab, 0x4052a2cf, 0x405322e7, 0x4053a2fa, 0x4054230f, 0x4054a332, 0x4055235d, 0x4055a39a, 0x405623bf, 0x4056a3d8, 0x405723f0, 0x4057a403, 0x40582418, 0x4058a43f, 0x4059246e, 0x4059a4ae, 0x405aa4c2, 0x405b24da, 0x405ba4eb, 0x405c24fe, 0x405ca53d, 0x405d254a, 0x405da56f, 0x405e25ad, 0x405e8afe, 0x405f25ce, 0x405fa5db, 0x406025e9, 0x4060a60b, 0x4061266c, 0x4061a6a4, 0x406226bb, 0x4062a6cc, 0x40632719, 0x4063a72e, 0x40642745, 0x4064a771, 0x4065278c, 0x4065a7a3, 0x406627bb, 0x4066a7e5, 0x40672810, 0x4067a855, 0x4068289d, 0x4068a8be, 0x406928f0, 0x4069a91e, 0x406a293f, 0x406aa95f, 0x406b2ae7, 0x406bab0a, 0x406c2b20, 0x406cae2a, 0x406d2e59, 0x406dae81, 0x406e2eaf, 0x406eaefc, 0x406f2f55, 0x406faf8d, 0x40702fa0, 0x4070afbd, 0x4071084d, 0x4071afcf, 0x40722fe2, 0x4072b018, 0x40733030, 0x407395cd, 0x40743044, 0x4074b05e, 0x4075306f, 0x4075b083, 0x40763091, 0x4076935b, 0x407730b6, 0x4077b0f6, 0x40783111, 0x4078b14a, 0x40793161, 0x4079b177, 0x407a31a3, 0x407ab1b6, 0x407b31cb, 0x407bb1dd, 0x407c320e, 0x407cb217, 0x407d28d9, 0x407da20d, 0x407e3126, 0x407ea44f, 0x407f1e32, 0x407fa005, 0x4080217f, 0x40809e5a, 0x408122bd, 0x4081a10c, 0x40822e9a, 0x40829bad, 0x4083242a, 0x4083a756, 0x40841e6e, 0x4084a487, 0x4085250f, 0x4085a633, 0x4086258f, 0x4086a227, 0x40872ee0, 0x4087a681, 0x40881beb, 0x4088a868, 0x40891c3a, 0x40899bc7, 0x408a2b58, 0x408a99e5, 0x408b31f2, 0x408baf6a, 0x408c251f, 0x408d1f56, 0x408d9ea0, 0x408e2086, 0x408ea37a, 0x408f287c, 0x408fa64f, 0x40902831, 0x4090a561, 0x40912b40, 0x40919a1d, 0x40921c87, 0x4092af1b, 0x40932ffb, 0x4093a238, 0x40941e82, 0x4094ab71, 0x409526dd, 0x4095b183, 0x40962ec7, 0x4096a198, 0x40972283, 0x4097a0d5, 0x40981ce7, 0x4098a6f1, 0x40992f37, 0x4099a3a7, 0x409a2340, 0x409a9a01, 0x409b1edc, 0x409b9f07, 0x409c30d8, 0x409c9f2f, 0x409d2154, 0x409da122, 0x409e1d78, 0x409ea1cd, 0x409f21b5, 0x409f9ecf, 0x40a021f5, 0x40a0a0ef, 0x40a1213d, 0x40a1a49b, 0x41f42a12, 0x41f92aa4, 0x41fe2997, 0x41feac4d, 0x41ff2d7b, 0x42032a2b, 0x42082a4d, 0x4208aa89, 0x4209297b, 0x4209aac3, 0x420a29d2, 0x420aa9b2, 0x420b29f2, 0x420baa6b, 0x420c2d97, 0x420cab81, 0x420d2c34, 0x420dac6b, 0x42122c9e, 0x42172d5e, 0x4217ace0, 0x421c2d02, 0x421f2cbd, 0x42212e0f, 0x42262d41, 0x422b2ded, 0x422bac0f, 0x422c2dcf, 0x422cabc2, 0x422d2b9b, 0x422dadae, 0x422e2bee, 0x42302d1d, 0x4230ac85, 0x44320778, 0x44328787, 0x44330793, 0x443387a1, 0x443407b4, 0x443487c5, 0x443507cc, 0x443587d6, 0x443607e9, 0x443687ff, 0x44370811, 0x4437881e, 0x4438082d, 0x44388835, 0x4439084d, 0x4439885b, 0x443a086e, 0x483213a3, 0x483293b5, 0x483313cb, 0x483393e4, 0x4c321421, 0x4c329431, 0x4c331444, 0x4c339464, 0x4c3400b9, 0x4c3480f7, 0x4c351470, 0x4c35947e, 0x4c36149a, 0x4c3694c0, 0x4c3714cf, 0x4c3794dd, 0x4c3814f2, 0x4c3894fe, 0x4c39151e, 0x4c399548, 0x4c3a1561, 0x4c3a957a, 0x4c3b0635, 0x4c3b9593, 0x4c3c15a5, 0x4c3c95b4, 0x4c3d15cd, 0x4c3d8cc6, 0x4c3e163a, 0x4c3e95dc, 0x4c3f165c, 0x4c3f935b, 0x4c4015f2, 0x4c40940d, 0x4c41162a, 0x4c4194ad, 0x4c421616, 0x4c4293f5, 0x503235d8, 0x5032b5e7, 0x503335f2, 0x5033b602, 0x5034361b, 0x5034b635, 0x50353643, 0x5035b659, 0x5036366b, 0x5036b681, 0x5037369a, 0x5037b6ad, 0x503836c5, 0x5038b6d6, 0x503936eb, 0x5039b6ff, 0x503a371f, 0x503ab735, 0x503b374d, 0x503bb75f, 0x503c377b, 0x503cb792, 0x503d37ab, 0x503db7c1, 0x503e37ce, 0x503eb7e4, 0x503f37f6, 0x503f83b3, 0x50403809, 0x5040b819, 0x50413833, 0x5041b842, 0x5042385c, 0x5042b879, 0x50433889, 0x5043b899, 0x504438b6, 0x50448469, 0x504538ca, 0x5045b8e8, 0x504638fb, 0x5046b911, 0x50473923, 0x5047b938, 0x5048395e, 0x5048b96c, 0x5049397f, 0x5049b994, 0x504a39aa, 0x504ab9ba, 0x504b39da, 0x504bb9ed, 0x504c3a10, 0x504cba3e, 0x504d3a6b, 0x504dba88, 0x504e3aa3, 0x504ebabf, 0x504f3ad1, 0x504fbae8, 0x50503af7, 0x50508729, 0x50513b0a, 0x5051b8a8, 0x50523a50, 0x58320fd1, 0x68320f93, 0x68328ceb, 0x68330cfe, 0x68338fa1, 0x68340fb1, 0x683480f7, 0x6835099a, 0x6c320f59, 0x6c328cb5, 0x6c330f64, 0x6c338f7d, 0x74320a66, 0x743280b9, 0x74330cc6, 0x783209cb, 0x783289e0, 0x783309ec, 0x78338090, 0x783409fb, 0x78348a10, 0x78350a2f, 0x78358a51, 0x78360a66, 0x78368a7c, 0x78370a8c, 0x78378aad, 0x78380ac0, 0x78388ad2, 0x78390adf, 0x78398afe, 0x783a0b13, 0x783a8b21, 0x783b0b2b, 0x783b8b3f, 0x783c0b56, 0x783c8b6b, 0x783d0b82, 0x783d8b97, 0x783e0aed, 0x783e8a9f, 0x7c321274, 0x803214c0, 0x80328090, 0x803332d4, 0x803380b9, 0x803432e3, 0x8034b24b, 0x80353269, 0x8035b2f7, 0x803632ab, 0x8036b25a, 0x8037329d, 0x8037b238, 0x803832be, 0x8038b27a, 0x8039328f, }; extern const size_t kOpenSSLReasonValuesLen; const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); extern const char kOpenSSLReasonStringData[]; const char kOpenSSLReasonStringData[] = "ASN1_LENGTH_MISMATCH\0" "AUX_ERROR\0" "BAD_GET_ASN1_OBJECT_CALL\0" "BAD_OBJECT_HEADER\0" "BAD_TEMPLATE\0" "BMPSTRING_IS_WRONG_LENGTH\0" "BN_LIB\0" "BOOLEAN_IS_WRONG_LENGTH\0" "BUFFER_TOO_SMALL\0" "CONTEXT_NOT_INITIALISED\0" "DECODE_ERROR\0" "DEPTH_EXCEEDED\0" "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\0" "ENCODE_ERROR\0" "ERROR_GETTING_TIME\0" "EXPECTING_AN_ASN1_SEQUENCE\0" "EXPECTING_AN_INTEGER\0" "EXPECTING_AN_OBJECT\0" "EXPECTING_A_BOOLEAN\0" "EXPECTING_A_TIME\0" "EXPLICIT_LENGTH_MISMATCH\0" "EXPLICIT_TAG_NOT_CONSTRUCTED\0" "FIELD_MISSING\0" "FIRST_NUM_TOO_LARGE\0" "HEADER_TOO_LONG\0" "ILLEGAL_BITSTRING_FORMAT\0" "ILLEGAL_BOOLEAN\0" "ILLEGAL_CHARACTERS\0" "ILLEGAL_FORMAT\0" "ILLEGAL_HEX\0" "ILLEGAL_IMPLICIT_TAG\0" "ILLEGAL_INTEGER\0" "ILLEGAL_NESTED_TAGGING\0" "ILLEGAL_NULL\0" "ILLEGAL_NULL_VALUE\0" "ILLEGAL_OBJECT\0" "ILLEGAL_OPTIONAL_ANY\0" "ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE\0" "ILLEGAL_TAGGED_ANY\0" "ILLEGAL_TIME_VALUE\0" "INTEGER_NOT_ASCII_FORMAT\0" "INTEGER_TOO_LARGE_FOR_LONG\0" "INVALID_BIT_STRING_BITS_LEFT\0" "INVALID_BIT_STRING_PADDING\0" "INVALID_BMPSTRING\0" "INVALID_DIGIT\0" "INVALID_INTEGER\0" "INVALID_MODIFIER\0" "INVALID_NUMBER\0" "INVALID_OBJECT_ENCODING\0" "INVALID_SEPARATOR\0" "INVALID_TIME_FORMAT\0" "INVALID_UNIVERSALSTRING\0" "INVALID_UTF8STRING\0" "LIST_ERROR\0" "MISSING_ASN1_EOS\0" "MISSING_EOC\0" "MISSING_SECOND_NUMBER\0" "MISSING_VALUE\0" "MSTRING_NOT_UNIVERSAL\0" "MSTRING_WRONG_TAG\0" "NESTED_ASN1_ERROR\0" "NESTED_ASN1_STRING\0" "NESTED_TOO_DEEP\0" "NON_HEX_CHARACTERS\0" "NOT_ASCII_FORMAT\0" "NOT_ENOUGH_DATA\0" "NO_MATCHING_CHOICE_TYPE\0" "NULL_IS_WRONG_LENGTH\0" "OBJECT_NOT_ASCII_FORMAT\0" "ODD_NUMBER_OF_CHARS\0" "SECOND_NUMBER_TOO_LARGE\0" "SEQUENCE_LENGTH_MISMATCH\0" "SEQUENCE_NOT_CONSTRUCTED\0" "SEQUENCE_OR_SET_NEEDS_CONFIG\0" "SHORT_LINE\0" "STREAMING_NOT_SUPPORTED\0" "STRING_TOO_LONG\0" "STRING_TOO_SHORT\0" "TAG_VALUE_TOO_HIGH\0" "TIME_NOT_ASCII_FORMAT\0" "TOO_LONG\0" "TYPE_NOT_CONSTRUCTED\0" "TYPE_NOT_PRIMITIVE\0" "UNEXPECTED_EOC\0" "UNIVERSALSTRING_IS_WRONG_LENGTH\0" "UNKNOWN_FORMAT\0" "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\0" "UNKNOWN_SIGNATURE_ALGORITHM\0" "UNKNOWN_TAG\0" "UNSUPPORTED_ANY_DEFINED_BY_TYPE\0" "UNSUPPORTED_PUBLIC_KEY_TYPE\0" "UNSUPPORTED_TYPE\0" "WRONG_INTEGER_TYPE\0" "WRONG_PUBLIC_KEY_TYPE\0" "WRONG_TAG\0" "WRONG_TYPE\0" "BAD_FOPEN_MODE\0" "BROKEN_PIPE\0" "CONNECT_ERROR\0" "ERROR_SETTING_NBIO\0" "INVALID_ARGUMENT\0" "IN_USE\0" "KEEPALIVE\0" "NBIO_CONNECT_ERROR\0" "NO_HOSTNAME_SPECIFIED\0" "NO_PORT_SPECIFIED\0" "NO_SUCH_FILE\0" "NULL_PARAMETER\0" "SYS_LIB\0" "UNABLE_TO_CREATE_SOCKET\0" "UNINITIALIZED\0" "UNSUPPORTED_METHOD\0" "WRITE_TO_READ_ONLY_BIO\0" "ARG2_LT_ARG3\0" "BAD_ENCODING\0" "BAD_RECIPROCAL\0" "BIGNUM_TOO_LONG\0" "BITS_TOO_SMALL\0" "CALLED_WITH_EVEN_MODULUS\0" "DIV_BY_ZERO\0" "EXPAND_ON_STATIC_BIGNUM_DATA\0" "INPUT_NOT_REDUCED\0" "INVALID_INPUT\0" "INVALID_RANGE\0" "NEGATIVE_NUMBER\0" "NOT_A_SQUARE\0" "NOT_INITIALIZED\0" "NO_INVERSE\0" "PRIVATE_KEY_TOO_LARGE\0" "P_IS_NOT_PRIME\0" "TOO_MANY_ITERATIONS\0" "TOO_MANY_TEMPORARY_VARIABLES\0" "AES_KEY_SETUP_FAILED\0" "BAD_DECRYPT\0" "BAD_KEY_LENGTH\0" "CTRL_NOT_IMPLEMENTED\0" "CTRL_OPERATION_NOT_IMPLEMENTED\0" "DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH\0" "INITIALIZATION_ERROR\0" "INPUT_NOT_INITIALIZED\0" "INVALID_AD_SIZE\0" "INVALID_KEY_LENGTH\0" "INVALID_NONCE\0" "INVALID_NONCE_SIZE\0" "INVALID_OPERATION\0" "IV_TOO_LARGE\0" "NO_CIPHER_SET\0" "NO_DIRECTION_SET\0" "OUTPUT_ALIASES_INPUT\0" "TAG_TOO_LARGE\0" "TOO_LARGE\0" "UNSUPPORTED_AD_SIZE\0" "UNSUPPORTED_INPUT_SIZE\0" "UNSUPPORTED_KEY_SIZE\0" "UNSUPPORTED_NONCE_SIZE\0" "UNSUPPORTED_TAG_SIZE\0" "WRONG_FINAL_BLOCK_LENGTH\0" "LIST_CANNOT_BE_NULL\0" "MISSING_CLOSE_SQUARE_BRACKET\0" "MISSING_EQUAL_SIGN\0" "NO_CLOSE_BRACE\0" "UNABLE_TO_CREATE_NEW_SECTION\0" "VARIABLE_EXPANSION_NOT_SUPPORTED\0" "VARIABLE_EXPANSION_TOO_LONG\0" "VARIABLE_HAS_NO_VALUE\0" "BAD_GENERATOR\0" "INVALID_PARAMETERS\0" "INVALID_PUBKEY\0" "MODULUS_TOO_LARGE\0" "NO_PRIVATE_VALUE\0" "UNKNOWN_HASH\0" "BAD_Q_VALUE\0" "BAD_VERSION\0" "MISSING_PARAMETERS\0" "NEED_NEW_SETUP_VALUES\0" "BIGNUM_OUT_OF_RANGE\0" "COORDINATES_OUT_OF_RANGE\0" "D2I_ECPKPARAMETERS_FAILURE\0" "EC_GROUP_NEW_BY_NAME_FAILURE\0" "GROUP2PKPARAMETERS_FAILURE\0" "GROUP_MISMATCH\0" "I2D_ECPKPARAMETERS_FAILURE\0" "INCOMPATIBLE_OBJECTS\0" "INVALID_COFACTOR\0" "INVALID_COMPRESSED_POINT\0" "INVALID_COMPRESSION_BIT\0" "INVALID_ENCODING\0" "INVALID_FIELD\0" "INVALID_FORM\0" "INVALID_GROUP_ORDER\0" "INVALID_PRIVATE_KEY\0" "INVALID_SCALAR\0" "MISSING_PRIVATE_KEY\0" "NON_NAMED_CURVE\0" "PKPARAMETERS2GROUP_FAILURE\0" "POINT_AT_INFINITY\0" "POINT_IS_NOT_ON_CURVE\0" "PUBLIC_KEY_VALIDATION_FAILED\0" "SLOT_FULL\0" "UNDEFINED_GENERATOR\0" "UNKNOWN_GROUP\0" "UNKNOWN_ORDER\0" "WRONG_CURVE_PARAMETERS\0" "WRONG_ORDER\0" "KDF_FAILED\0" "POINT_ARITHMETIC_FAILURE\0" "UNKNOWN_DIGEST_LENGTH\0" "BAD_SIGNATURE\0" "NOT_IMPLEMENTED\0" "RANDOM_NUMBER_GENERATION_FAILED\0" "OPERATION_NOT_SUPPORTED\0" "COMMAND_NOT_SUPPORTED\0" "DIFFERENT_KEY_TYPES\0" "DIFFERENT_PARAMETERS\0" "EMPTY_PSK\0" "EXPECTING_AN_EC_KEY_KEY\0" "EXPECTING_AN_RSA_KEY\0" "EXPECTING_A_DH_KEY\0" "EXPECTING_A_DSA_KEY\0" "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\0" "INVALID_BUFFER_SIZE\0" "INVALID_DIGEST_LENGTH\0" "INVALID_DIGEST_TYPE\0" "INVALID_KEYBITS\0" "INVALID_MGF1_MD\0" "INVALID_PADDING_MODE\0" "INVALID_PEER_KEY\0" "INVALID_PSS_SALTLEN\0" "INVALID_SIGNATURE\0" "KEYS_NOT_SET\0" "MEMORY_LIMIT_EXCEEDED\0" "NOT_A_PRIVATE_KEY\0" "NOT_XOF_OR_INVALID_LENGTH\0" "NO_DEFAULT_DIGEST\0" "NO_KEY_SET\0" "NO_MDC2_SUPPORT\0" "NO_NID_FOR_CURVE\0" "NO_OPERATION_SET\0" "NO_PARAMETERS_SET\0" "OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE\0" "OPERATON_NOT_INITIALIZED\0" "UNKNOWN_PUBLIC_KEY_TYPE\0" "UNSUPPORTED_ALGORITHM\0" "OUTPUT_TOO_LARGE\0" "INVALID_OID_STRING\0" "UNKNOWN_NID\0" "BAD_BASE64_DECODE\0" "BAD_END_LINE\0" "BAD_IV_CHARS\0" "BAD_PASSWORD_READ\0" "CIPHER_IS_NULL\0" "ERROR_CONVERTING_PRIVATE_KEY\0" "NOT_DEK_INFO\0" "NOT_ENCRYPTED\0" "NOT_PROC_TYPE\0" "NO_START_LINE\0" "READ_KEY\0" "SHORT_HEADER\0" "UNSUPPORTED_CIPHER\0" "UNSUPPORTED_ENCRYPTION\0" "UNSUPPORTED_PROC_TYPE_VERSION\0" "BAD_PKCS7_VERSION\0" "NOT_PKCS7_SIGNED_DATA\0" "NO_CERTIFICATES_INCLUDED\0" "NO_CRLS_INCLUDED\0" "AMBIGUOUS_FRIENDLY_NAME\0" "BAD_ITERATION_COUNT\0" "BAD_PKCS12_DATA\0" "BAD_PKCS12_VERSION\0" "CIPHER_HAS_NO_OBJECT_IDENTIFIER\0" "CRYPT_ERROR\0" "ENCRYPT_ERROR\0" "ERROR_SETTING_CIPHER_PARAMS\0" "INCORRECT_PASSWORD\0" "INVALID_CHARACTERS\0" "KEYGEN_FAILURE\0" "KEY_GEN_ERROR\0" "METHOD_NOT_SUPPORTED\0" "MISSING_MAC\0" "MULTIPLE_PRIVATE_KEYS_IN_PKCS12\0" "PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED\0" "PKCS12_TOO_DEEPLY_NESTED\0" "PRIVATE_KEY_DECODE_ERROR\0" "PRIVATE_KEY_ENCODE_ERROR\0" "UNKNOWN_ALGORITHM\0" "UNKNOWN_CIPHER\0" "UNKNOWN_CIPHER_ALGORITHM\0" "UNKNOWN_DIGEST\0" "UNSUPPORTED_KEYLENGTH\0" "UNSUPPORTED_KEY_DERIVATION_FUNCTION\0" "UNSUPPORTED_OPTIONS\0" "UNSUPPORTED_PRF\0" "UNSUPPORTED_PRIVATE_KEY_ALGORITHM\0" "UNSUPPORTED_SALT_TYPE\0" "BAD_E_VALUE\0" "BAD_FIXED_HEADER_DECRYPT\0" "BAD_PAD_BYTE_COUNT\0" "BAD_RSA_PARAMETERS\0" "BLOCK_TYPE_IS_NOT_01\0" "BLOCK_TYPE_IS_NOT_02\0" "BN_NOT_INITIALIZED\0" "CANNOT_RECOVER_MULTI_PRIME_KEY\0" "CRT_PARAMS_ALREADY_GIVEN\0" "CRT_VALUES_INCORRECT\0" "DATA_LEN_NOT_EQUAL_TO_MOD_LEN\0" "DATA_TOO_LARGE\0" "DATA_TOO_LARGE_FOR_KEY_SIZE\0" "DATA_TOO_LARGE_FOR_MODULUS\0" "DATA_TOO_SMALL\0" "DATA_TOO_SMALL_FOR_KEY_SIZE\0" "DIGEST_TOO_BIG_FOR_RSA_KEY\0" "D_E_NOT_CONGRUENT_TO_1\0" "D_OUT_OF_RANGE\0" "EMPTY_PUBLIC_KEY\0" "FIRST_OCTET_INVALID\0" "INCONSISTENT_SET_OF_CRT_VALUES\0" "INTERNAL_ERROR\0" "INVALID_MESSAGE_LENGTH\0" "KEY_SIZE_TOO_SMALL\0" "LAST_OCTET_INVALID\0" "MUST_HAVE_AT_LEAST_TWO_PRIMES\0" "NO_PUBLIC_EXPONENT\0" "NULL_BEFORE_BLOCK_MISSING\0" "N_NOT_EQUAL_P_Q\0" "OAEP_DECODING_ERROR\0" "ONLY_ONE_OF_P_Q_GIVEN\0" "OUTPUT_BUFFER_TOO_SMALL\0" "PADDING_CHECK_FAILED\0" "PKCS_DECODING_ERROR\0" "SLEN_CHECK_FAILED\0" "SLEN_RECOVERY_FAILED\0" "UNKNOWN_ALGORITHM_TYPE\0" "UNKNOWN_PADDING_TYPE\0" "VALUE_MISSING\0" "WRONG_SIGNATURE_LENGTH\0" "ALPN_MISMATCH_ON_EARLY_DATA\0" "ALPS_MISMATCH_ON_EARLY_DATA\0" "APPLICATION_DATA_ON_SHUTDOWN\0" "APP_DATA_IN_HANDSHAKE\0" "ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT\0" "BAD_ALERT\0" "BAD_CHANGE_CIPHER_SPEC\0" "BAD_DATA_RETURNED_BY_CALLBACK\0" "BAD_DH_P_LENGTH\0" "BAD_DIGEST_LENGTH\0" "BAD_ECC_CERT\0" "BAD_ECPOINT\0" "BAD_HANDSHAKE_RECORD\0" "BAD_HELLO_REQUEST\0" "BAD_LENGTH\0" "BAD_PACKET_LENGTH\0" "BAD_RSA_ENCRYPT\0" "BAD_SRTP_MKI_VALUE\0" "BAD_SRTP_PROTECTION_PROFILE_LIST\0" "BAD_SSL_FILETYPE\0" "BAD_WRITE_RETRY\0" "BIO_NOT_SET\0" "BLOCK_CIPHER_PAD_IS_WRONG\0" "CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD\0" "CANNOT_PARSE_LEAF_CERT\0" "CA_DN_LENGTH_MISMATCH\0" "CA_DN_TOO_LONG\0" "CCS_RECEIVED_EARLY\0" "CERTIFICATE_AND_PRIVATE_KEY_MISMATCH\0" "CERTIFICATE_VERIFY_FAILED\0" "CERT_CB_ERROR\0" "CERT_DECOMPRESSION_FAILED\0" "CERT_LENGTH_MISMATCH\0" "CHANNEL_ID_NOT_P256\0" "CHANNEL_ID_SIGNATURE_INVALID\0" "CIPHER_MISMATCH_ON_EARLY_DATA\0" "CIPHER_OR_HASH_UNAVAILABLE\0" "CLIENTHELLO_PARSE_FAILED\0" "CLIENTHELLO_TLSEXT\0" "CONNECTION_REJECTED\0" "CONNECTION_TYPE_NOT_SET\0" "COULD_NOT_PARSE_HINTS\0" "CUSTOM_EXTENSION_ERROR\0" "DATA_LENGTH_TOO_LONG\0" "DECRYPTION_FAILED\0" "DECRYPTION_FAILED_OR_BAD_RECORD_MAC\0" "DH_PUBLIC_VALUE_LENGTH_IS_WRONG\0" "DH_P_TOO_LONG\0" "DIGEST_CHECK_FAILED\0" "DOWNGRADE_DETECTED\0" "DTLS_MESSAGE_TOO_BIG\0" "DUPLICATE_EXTENSION\0" "DUPLICATE_KEY_SHARE\0" "DUPLICATE_SIGNATURE_ALGORITHM\0" "EARLY_DATA_NOT_IN_USE\0" "ECC_CERT_NOT_FOR_SIGNING\0" "ECH_REJECTED\0" "ECH_SERVER_CONFIG_AND_PRIVATE_KEY_MISMATCH\0" "ECH_SERVER_CONFIG_UNSUPPORTED_EXTENSION\0" "ECH_SERVER_WOULD_HAVE_NO_RETRY_CONFIGS\0" "EMPTY_HELLO_RETRY_REQUEST\0" "EMS_STATE_INCONSISTENT\0" "ENCRYPTED_LENGTH_TOO_LONG\0" "ERROR_ADDING_EXTENSION\0" "ERROR_IN_RECEIVED_CIPHER_LIST\0" "ERROR_PARSING_EXTENSION\0" "EXCESSIVE_MESSAGE_SIZE\0" "EXCESS_HANDSHAKE_DATA\0" "EXTRA_DATA_IN_MESSAGE\0" "FRAGMENT_MISMATCH\0" "GOT_NEXT_PROTO_WITHOUT_EXTENSION\0" "HANDSHAKE_FAILURE_ON_CLIENT_HELLO\0" "HANDSHAKE_NOT_COMPLETE\0" "HTTPS_PROXY_REQUEST\0" "HTTP_REQUEST\0" "INAPPROPRIATE_FALLBACK\0" "INCONSISTENT_CLIENT_HELLO\0" "INCONSISTENT_ECH_NEGOTIATION\0" "INVALID_ALPN_PROTOCOL\0" "INVALID_ALPN_PROTOCOL_LIST\0" "INVALID_ALPS_CODEPOINT\0" "INVALID_CLIENT_HELLO_INNER\0" "INVALID_COMMAND\0" "INVALID_COMPRESSION_LIST\0" "INVALID_DELEGATED_CREDENTIAL\0" "INVALID_ECH_CONFIG_LIST\0" "INVALID_ECH_PUBLIC_NAME\0" "INVALID_MESSAGE\0" "INVALID_OUTER_EXTENSION\0" "INVALID_OUTER_RECORD_TYPE\0" "INVALID_SCT_LIST\0" "INVALID_SIGNATURE_ALGORITHM\0" "INVALID_SSL_SESSION\0" "INVALID_TICKET_KEYS_LENGTH\0" "KEY_USAGE_BIT_INCORRECT\0" "LENGTH_MISMATCH\0" "MISSING_EXTENSION\0" "MISSING_KEY_SHARE\0" "MISSING_RSA_CERTIFICATE\0" "MISSING_TMP_DH_KEY\0" "MISSING_TMP_ECDH_KEY\0" "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\0" "MTU_TOO_SMALL\0" "NEGOTIATED_ALPS_WITHOUT_ALPN\0" "NEGOTIATED_BOTH_NPN_AND_ALPN\0" "NEGOTIATED_TB_WITHOUT_EMS_OR_RI\0" "NESTED_GROUP\0" "NO_APPLICATION_PROTOCOL\0" "NO_CERTIFICATES_RETURNED\0" "NO_CERTIFICATE_ASSIGNED\0" "NO_CERTIFICATE_SET\0" "NO_CIPHERS_AVAILABLE\0" "NO_CIPHERS_PASSED\0" "NO_CIPHERS_SPECIFIED\0" "NO_CIPHER_MATCH\0" "NO_COMMON_SIGNATURE_ALGORITHMS\0" "NO_COMPRESSION_SPECIFIED\0" "NO_GROUPS_SPECIFIED\0" "NO_MATCHING_ISSUER\0" "NO_METHOD_SPECIFIED\0" "NO_PRIVATE_KEY_ASSIGNED\0" "NO_RENEGOTIATION\0" "NO_REQUIRED_DIGEST\0" "NO_SHARED_CIPHER\0" "NO_SHARED_GROUP\0" "NO_SUPPORTED_VERSIONS_ENABLED\0" "NULL_SSL_CTX\0" "NULL_SSL_METHOD_PASSED\0" "OCSP_CB_ERROR\0" "OLD_SESSION_CIPHER_NOT_RETURNED\0" "OLD_SESSION_PRF_HASH_MISMATCH\0" "OLD_SESSION_VERSION_NOT_RETURNED\0" "PARSE_TLSEXT\0" "PATH_TOO_LONG\0" "PEER_DID_NOT_RETURN_A_CERTIFICATE\0" "PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE\0" "PRE_SHARED_KEY_MUST_BE_LAST\0" "PRIVATE_KEY_OPERATION_FAILED\0" "PROTOCOL_IS_SHUTDOWN\0" "PSK_IDENTITY_BINDER_COUNT_MISMATCH\0" "PSK_IDENTITY_NOT_FOUND\0" "PSK_NO_CLIENT_CB\0" "PSK_NO_SERVER_CB\0" "QUIC_INTERNAL_ERROR\0" "QUIC_TRANSPORT_PARAMETERS_MISCONFIGURED\0" "READ_TIMEOUT_EXPIRED\0" "RECORD_LENGTH_MISMATCH\0" "RECORD_TOO_LARGE\0" "RENEGOTIATION_EMS_MISMATCH\0" "RENEGOTIATION_ENCODING_ERR\0" "RENEGOTIATION_MISMATCH\0" "REQUIRED_CIPHER_MISSING\0" "RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION\0" "RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION\0" "SCSV_RECEIVED_WHEN_RENEGOTIATING\0" "SECOND_SERVERHELLO_VERSION_MISMATCH\0" "SERVERHELLO_TLSEXT\0" "SERVER_CERT_CHANGED\0" "SERVER_ECHOED_INVALID_SESSION_ID\0" "SESSION_ID_CONTEXT_UNINITIALIZED\0" "SESSION_MAY_NOT_BE_CREATED\0" "SHUTDOWN_WHILE_IN_INIT\0" "SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER\0" "SRTP_COULD_NOT_ALLOCATE_PROFILES\0" "SRTP_UNKNOWN_PROTECTION_PROFILE\0" "SSL3_EXT_INVALID_SERVERNAME\0" "SSLV3_ALERT_BAD_CERTIFICATE\0" "SSLV3_ALERT_BAD_RECORD_MAC\0" "SSLV3_ALERT_CERTIFICATE_EXPIRED\0" "SSLV3_ALERT_CERTIFICATE_REVOKED\0" "SSLV3_ALERT_CERTIFICATE_UNKNOWN\0" "SSLV3_ALERT_CLOSE_NOTIFY\0" "SSLV3_ALERT_DECOMPRESSION_FAILURE\0" "SSLV3_ALERT_HANDSHAKE_FAILURE\0" "SSLV3_ALERT_ILLEGAL_PARAMETER\0" "SSLV3_ALERT_NO_CERTIFICATE\0" "SSLV3_ALERT_UNEXPECTED_MESSAGE\0" "SSLV3_ALERT_UNSUPPORTED_CERTIFICATE\0" "SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION\0" "SSL_HANDSHAKE_FAILURE\0" "SSL_SESSION_ID_CONTEXT_TOO_LONG\0" "SSL_SESSION_ID_TOO_LONG\0" "TICKET_ENCRYPTION_FAILED\0" "TLS13_DOWNGRADE\0" "TLSV1_ALERT_ACCESS_DENIED\0" "TLSV1_ALERT_BAD_CERTIFICATE_HASH_VALUE\0" "TLSV1_ALERT_BAD_CERTIFICATE_STATUS_RESPONSE\0" "TLSV1_ALERT_CERTIFICATE_REQUIRED\0" "TLSV1_ALERT_CERTIFICATE_UNOBTAINABLE\0" "TLSV1_ALERT_DECODE_ERROR\0" "TLSV1_ALERT_DECRYPTION_FAILED\0" "TLSV1_ALERT_DECRYPT_ERROR\0" "TLSV1_ALERT_ECH_REQUIRED\0" "TLSV1_ALERT_EXPORT_RESTRICTION\0" "TLSV1_ALERT_INAPPROPRIATE_FALLBACK\0" "TLSV1_ALERT_INSUFFICIENT_SECURITY\0" "TLSV1_ALERT_INTERNAL_ERROR\0" "TLSV1_ALERT_NO_APPLICATION_PROTOCOL\0" "TLSV1_ALERT_NO_RENEGOTIATION\0" "TLSV1_ALERT_PROTOCOL_VERSION\0" "TLSV1_ALERT_RECORD_OVERFLOW\0" "TLSV1_ALERT_UNKNOWN_CA\0" "TLSV1_ALERT_UNKNOWN_PSK_IDENTITY\0" "TLSV1_ALERT_UNRECOGNIZED_NAME\0" "TLSV1_ALERT_UNSUPPORTED_EXTENSION\0" "TLSV1_ALERT_USER_CANCELLED\0" "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\0" "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\0" "TOO_MANY_EMPTY_FRAGMENTS\0" "TOO_MANY_KEY_UPDATES\0" "TOO_MANY_WARNING_ALERTS\0" "TOO_MUCH_READ_EARLY_DATA\0" "TOO_MUCH_SKIPPED_EARLY_DATA\0" "UNABLE_TO_FIND_ECDH_PARAMETERS\0" "UNCOMPRESSED_CERT_TOO_LARGE\0" "UNEXPECTED_COMPATIBILITY_MODE\0" "UNEXPECTED_EXTENSION\0" "UNEXPECTED_EXTENSION_ON_EARLY_DATA\0" "UNEXPECTED_MESSAGE\0" "UNEXPECTED_OPERATOR_IN_GROUP\0" "UNEXPECTED_RECORD\0" "UNKNOWN_ALERT_TYPE\0" "UNKNOWN_CERTIFICATE_TYPE\0" "UNKNOWN_CERT_COMPRESSION_ALG\0" "UNKNOWN_CIPHER_RETURNED\0" "UNKNOWN_CIPHER_TYPE\0" "UNKNOWN_KEY_EXCHANGE_TYPE\0" "UNKNOWN_PROTOCOL\0" "UNKNOWN_SSL_VERSION\0" "UNKNOWN_STATE\0" "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\0" "UNSUPPORTED_COMPRESSION_ALGORITHM\0" "UNSUPPORTED_ECH_SERVER_CONFIG\0" "UNSUPPORTED_ELLIPTIC_CURVE\0" "UNSUPPORTED_PROTOCOL\0" "UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY\0" "WRONG_CERTIFICATE_TYPE\0" "WRONG_CIPHER_RETURNED\0" "WRONG_CURVE\0" "WRONG_ENCRYPTION_LEVEL_RECEIVED\0" "WRONG_MESSAGE_TYPE\0" "WRONG_SIGNATURE_TYPE\0" "WRONG_SSL_VERSION\0" "WRONG_VERSION_NUMBER\0" "WRONG_VERSION_ON_EARLY_DATA\0" "X509_LIB\0" "X509_VERIFICATION_SETUP_PROBLEMS\0" "BAD_VALIDITY_CHECK\0" "DECODE_FAILURE\0" "INVALID_KEY_ID\0" "INVALID_METADATA\0" "INVALID_METADATA_KEY\0" "INVALID_PROOF\0" "INVALID_TOKEN\0" "NO_KEYS_CONFIGURED\0" "NO_SRR_KEY_CONFIGURED\0" "OVER_BATCHSIZE\0" "SRR_SIGNATURE_ERROR\0" "TOO_MANY_KEYS\0" "AKID_MISMATCH\0" "BAD_X509_FILETYPE\0" "BASE64_DECODE_ERROR\0" "CANT_CHECK_DH_KEY\0" "CERT_ALREADY_IN_HASH_TABLE\0" "CRL_ALREADY_DELTA\0" "CRL_VERIFY_FAILURE\0" "DELTA_CRL_WITHOUT_CRL_NUMBER\0" "IDP_MISMATCH\0" "INVALID_DIRECTORY\0" "INVALID_FIELD_FOR_VERSION\0" "INVALID_FIELD_NAME\0" "INVALID_PARAMETER\0" "INVALID_POLICY_EXTENSION\0" "INVALID_PSS_PARAMETERS\0" "INVALID_TRUST\0" "INVALID_VERSION\0" "ISSUER_MISMATCH\0" "KEY_TYPE_MISMATCH\0" "KEY_VALUES_MISMATCH\0" "LOADING_CERT_DIR\0" "LOADING_DEFAULTS\0" "NAME_TOO_LONG\0" "NEWER_CRL_NOT_NEWER\0" "NO_CERTIFICATE_FOUND\0" "NO_CERTIFICATE_OR_CRL_FOUND\0" "NO_CERT_SET_FOR_US_TO_VERIFY\0" "NO_CRL_FOUND\0" "NO_CRL_NUMBER\0" "PUBLIC_KEY_DECODE_ERROR\0" "PUBLIC_KEY_ENCODE_ERROR\0" "SHOULD_RETRY\0" "SIGNATURE_ALGORITHM_MISMATCH\0" "UNKNOWN_KEY_TYPE\0" "UNKNOWN_PURPOSE_ID\0" "UNKNOWN_TRUST_ID\0" "WRONG_LOOKUP_TYPE\0" "BAD_IP_ADDRESS\0" "BAD_OBJECT\0" "BN_DEC2BN_ERROR\0" "BN_TO_ASN1_INTEGER_ERROR\0" "CANNOT_FIND_FREE_FUNCTION\0" "DIRNAME_ERROR\0" "DISTPOINT_ALREADY_SET\0" "DUPLICATE_ZONE_ID\0" "ERROR_CONVERTING_ZONE\0" "ERROR_CREATING_EXTENSION\0" "ERROR_IN_EXTENSION\0" "EXPECTED_A_SECTION_NAME\0" "EXTENSION_EXISTS\0" "EXTENSION_NAME_ERROR\0" "EXTENSION_NOT_FOUND\0" "EXTENSION_SETTING_NOT_SUPPORTED\0" "EXTENSION_VALUE_ERROR\0" "ILLEGAL_EMPTY_EXTENSION\0" "ILLEGAL_HEX_DIGIT\0" "INCORRECT_POLICY_SYNTAX_TAG\0" "INVALID_BOOLEAN_STRING\0" "INVALID_EXTENSION_STRING\0" "INVALID_MULTIPLE_RDNS\0" "INVALID_NAME\0" "INVALID_NULL_ARGUMENT\0" "INVALID_NULL_NAME\0" "INVALID_NULL_VALUE\0" "INVALID_NUMBERS\0" "INVALID_OBJECT_IDENTIFIER\0" "INVALID_OPTION\0" "INVALID_POLICY_IDENTIFIER\0" "INVALID_PROXY_POLICY_SETTING\0" "INVALID_PURPOSE\0" "INVALID_SECTION\0" "INVALID_SYNTAX\0" "INVALID_VALUE\0" "ISSUER_DECODE_ERROR\0" "NEED_ORGANIZATION_AND_NUMBERS\0" "NO_CONFIG_DATABASE\0" "NO_ISSUER_CERTIFICATE\0" "NO_ISSUER_DETAILS\0" "NO_POLICY_IDENTIFIER\0" "NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED\0" "NO_PUBLIC_KEY\0" "NO_SUBJECT_DETAILS\0" "ODD_NUMBER_OF_DIGITS\0" "OPERATION_NOT_DEFINED\0" "OTHERNAME_ERROR\0" "POLICY_LANGUAGE_ALREADY_DEFINED\0" "POLICY_PATH_LENGTH\0" "POLICY_PATH_LENGTH_ALREADY_DEFINED\0" "POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY\0" "SECTION_NOT_FOUND\0" "TRAILING_DATA_IN_EXTENSION\0" "UNABLE_TO_GET_ISSUER_DETAILS\0" "UNABLE_TO_GET_ISSUER_KEYID\0" "UNKNOWN_BIT_STRING_ARGUMENT\0" "UNKNOWN_EXTENSION\0" "UNKNOWN_EXTENSION_NAME\0" "UNKNOWN_OPTION\0" "UNSUPPORTED_OPTION\0" "USER_TOO_LONG\0" ""; ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/md5-586-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) .text .globl _md5_block_asm_data_order .private_extern _md5_block_asm_data_order .align 4 _md5_block_asm_data_order: L_md5_block_asm_data_order_begin: pushl %esi pushl %edi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%ecx pushl %ebp shll $6,%ecx pushl %ebx addl %esi,%ecx subl $64,%ecx movl (%edi),%eax pushl %ecx movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx L000start: # R0 section movl %ecx,%edi movl (%esi),%ebp # R0 0 xorl %edx,%edi andl %ebx,%edi leal 3614090360(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 4(%esi),%ebp addl %ebx,%eax # R0 1 xorl %ecx,%edi andl %eax,%edi leal 3905402710(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 8(%esi),%ebp addl %eax,%edx # R0 2 xorl %ebx,%edi andl %edx,%edi leal 606105819(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 12(%esi),%ebp addl %edx,%ecx # R0 3 xorl %eax,%edi andl %ecx,%edi leal 3250441966(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 16(%esi),%ebp addl %ecx,%ebx # R0 4 xorl %edx,%edi andl %ebx,%edi leal 4118548399(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 20(%esi),%ebp addl %ebx,%eax # R0 5 xorl %ecx,%edi andl %eax,%edi leal 1200080426(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 24(%esi),%ebp addl %eax,%edx # R0 6 xorl %ebx,%edi andl %edx,%edi leal 2821735955(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 28(%esi),%ebp addl %edx,%ecx # R0 7 xorl %eax,%edi andl %ecx,%edi leal 4249261313(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 32(%esi),%ebp addl %ecx,%ebx # R0 8 xorl %edx,%edi andl %ebx,%edi leal 1770035416(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 36(%esi),%ebp addl %ebx,%eax # R0 9 xorl %ecx,%edi andl %eax,%edi leal 2336552879(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 40(%esi),%ebp addl %eax,%edx # R0 10 xorl %ebx,%edi andl %edx,%edi leal 4294925233(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 44(%esi),%ebp addl %edx,%ecx # R0 11 xorl %eax,%edi andl %ecx,%edi leal 2304563134(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 48(%esi),%ebp addl %ecx,%ebx # R0 12 xorl %edx,%edi andl %ebx,%edi leal 1804603682(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 52(%esi),%ebp addl %ebx,%eax # R0 13 xorl %ecx,%edi andl %eax,%edi leal 4254626195(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 56(%esi),%ebp addl %eax,%edx # R0 14 xorl %ebx,%edi andl %edx,%edi leal 2792965006(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 60(%esi),%ebp addl %edx,%ecx # R0 15 xorl %eax,%edi andl %ecx,%edi leal 1236535329(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 4(%esi),%ebp addl %ecx,%ebx # R1 section # R1 16 leal 4129170786(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 24(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 17 leal 3225465664(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 44(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 18 leal 643717713(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl (%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 19 leal 3921069994(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 20 leal 3593408605(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 40(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 21 leal 38016083(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 60(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 22 leal 3634488961(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 16(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 23 leal 3889429448(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 36(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 24 leal 568446438(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 56(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 25 leal 3275163606(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 12(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 26 leal 4107603335(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 32(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 27 leal 1163531501(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 52(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R1 28 leal 2850285829(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 8(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax # R1 29 leal 4243563512(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 28(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx # R1 30 leal 1735328473(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 48(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx # R1 31 leal 2368359562(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx # R2 section # R2 32 xorl %edx,%edi xorl %ebx,%edi leal 4294588738(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 32(%esi),%ebp movl %ebx,%edi # R2 33 leal 2272392833(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 44(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 34 xorl %ebx,%edi xorl %edx,%edi leal 1839030562(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 56(%esi),%ebp movl %edx,%edi # R2 35 leal 4259657740(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 4(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 36 xorl %edx,%edi xorl %ebx,%edi leal 2763975236(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 16(%esi),%ebp movl %ebx,%edi # R2 37 leal 1272893353(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 28(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 38 xorl %ebx,%edi xorl %edx,%edi leal 4139469664(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 40(%esi),%ebp movl %edx,%edi # R2 39 leal 3200236656(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 52(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 40 xorl %edx,%edi xorl %ebx,%edi leal 681279174(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl (%esi),%ebp movl %ebx,%edi # R2 41 leal 3936430074(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 12(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 42 xorl %ebx,%edi xorl %edx,%edi leal 3572445317(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 24(%esi),%ebp movl %edx,%edi # R2 43 leal 76029189(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 36(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx # R2 44 xorl %edx,%edi xorl %ebx,%edi leal 3654602809(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 48(%esi),%ebp movl %ebx,%edi # R2 45 leal 3873151461(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 60(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx # R2 46 xorl %ebx,%edi xorl %edx,%edi leal 530742520(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 8(%esi),%ebp movl %edx,%edi # R2 47 leal 3299628645(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl (%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $23,%ebx addl %ecx,%ebx # R3 section # R3 48 xorl %edx,%edi orl %ebx,%edi leal 4096336452(%eax,%ebp,1),%eax xorl %ecx,%edi movl 28(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 49 orl %eax,%edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx,%edi movl 56(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 50 orl %edx,%edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 20(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 51 orl %ecx,%edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 48(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 52 orl %ebx,%edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx,%edi movl 12(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 53 orl %eax,%edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx,%edi movl 40(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 54 orl %edx,%edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 4(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 55 orl %ecx,%edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 32(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 56 orl %ebx,%edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx,%edi movl 60(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 57 orl %eax,%edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx,%edi movl 24(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 58 orl %edx,%edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 52(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 59 orl %ecx,%edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 16(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx # R3 60 orl %ebx,%edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx,%edi movl 44(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax # R3 61 orl %eax,%edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx,%edi movl 8(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx # R3 62 orl %edx,%edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 36(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx # R3 63 orl %ecx,%edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 24(%esp),%ebp addl %edi,%ebx addl $64,%esi roll $21,%ebx movl (%ebp),%edi addl %ecx,%ebx addl %edi,%eax movl 4(%ebp),%edi addl %edi,%ebx movl 8(%ebp),%edi addl %edi,%ecx movl 12(%ebp),%edi addl %edi,%edx movl %eax,(%ebp) movl %ebx,4(%ebp) movl (%esp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) cmpl %esi,%edi jae L000start popl %eax popl %ebx popl %ebp popl %edi popl %esi ret #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/md5-586-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl md5_block_asm_data_order .hidden md5_block_asm_data_order .type md5_block_asm_data_order,@function .align 16 md5_block_asm_data_order: .L_md5_block_asm_data_order_begin: pushl %esi pushl %edi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%ecx pushl %ebp shll $6,%ecx pushl %ebx addl %esi,%ecx subl $64,%ecx movl (%edi),%eax pushl %ecx movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx .L000start: movl %ecx,%edi movl (%esi),%ebp xorl %edx,%edi andl %ebx,%edi leal 3614090360(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 4(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 3905402710(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 8(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 606105819(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 12(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 3250441966(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 16(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 4118548399(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 20(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 1200080426(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 24(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 2821735955(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 28(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 4249261313(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 32(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 1770035416(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 36(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 2336552879(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 40(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 4294925233(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 44(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 2304563134(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 48(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 1804603682(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 52(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 4254626195(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 56(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 2792965006(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 60(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 1236535329(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 4(%esi),%ebp addl %ecx,%ebx leal 4129170786(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 24(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 3225465664(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 44(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 643717713(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl (%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 3921069994(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 3593408605(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 40(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 38016083(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 60(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 3634488961(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 16(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 3889429448(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 36(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 568446438(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 56(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 3275163606(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 12(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 4107603335(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 32(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 1163531501(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 52(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 2850285829(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 8(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 4243563512(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 28(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 1735328473(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 48(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 2368359562(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 4294588738(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 32(%esi),%ebp movl %ebx,%edi leal 2272392833(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 44(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 1839030562(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 56(%esi),%ebp movl %edx,%edi leal 4259657740(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 4(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 2763975236(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 16(%esi),%ebp movl %ebx,%edi leal 1272893353(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 28(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 4139469664(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 40(%esi),%ebp movl %edx,%edi leal 3200236656(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 52(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 681279174(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl (%esi),%ebp movl %ebx,%edi leal 3936430074(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 12(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 3572445317(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 24(%esi),%ebp movl %edx,%edi leal 76029189(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 36(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 3654602809(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 48(%esi),%ebp movl %ebx,%edi leal 3873151461(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 60(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 530742520(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 8(%esi),%ebp movl %edx,%edi leal 3299628645(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl (%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi orl %ebx,%edi leal 4096336452(%eax,%ebp,1),%eax xorl %ecx,%edi movl 28(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx,%edi movl 56(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 20(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 48(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx,%edi movl 12(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx,%edi movl 40(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 4(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 32(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx,%edi movl 60(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx,%edi movl 24(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 52(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 16(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx,%edi movl 44(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx,%edi movl 8(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 36(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 24(%esp),%ebp addl %edi,%ebx addl $64,%esi roll $21,%ebx movl (%ebp),%edi addl %ecx,%ebx addl %edi,%eax movl 4(%ebp),%edi addl %edi,%ebx movl 8(%ebp),%edi addl %edi,%ecx movl 12(%ebp),%edi addl %edi,%edx movl %eax,(%ebp) movl %ebx,4(%ebp) movl (%esp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) cmpl %esi,%edi jae .L000start popl %eax popl %ebx popl %ebp popl %edi popl %esi ret .size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/md5-x86_64-apple.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 4 .globl _md5_block_asm_data_order .private_extern _md5_block_asm_data_order _md5_block_asm_data_order: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r14 pushq %r15 L$prologue: movq %rdi,%rbp shlq $6,%rdx leaq (%rsi,%rdx,1),%rdi movl 0(%rbp),%eax movl 4(%rbp),%ebx movl 8(%rbp),%ecx movl 12(%rbp),%edx cmpq %rdi,%rsi je L$end L$loop: movl %eax,%r8d movl %ebx,%r9d movl %ecx,%r14d movl %edx,%r15d movl 0(%rsi),%r10d movl %edx,%r11d xorl %ecx,%r11d leal -680876936(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 4(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -389564586(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 8(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal 606105819(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 12(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -1044525330(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 16(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal -176418897(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 20(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal 1200080426(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 24(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -1473231341(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 28(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -45705983(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 32(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal 1770035416(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 36(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -1958414417(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 40(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -42063(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 44(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -1990404162(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 48(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal 1804603682(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 52(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -40341101(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 56(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -1502002290(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 60(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal 1236535329(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 0(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx movl 4(%rsi),%r10d movl %edx,%r11d movl %edx,%r12d notl %r11d leal -165796510(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 24(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -1069501632(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 44(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal 643717713(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 0(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -373897302(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 20(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal -701558691(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 40(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal 38016083(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 60(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal -660478335(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 16(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -405537848(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 36(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal 568446438(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 56(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -1019803690(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 12(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal -187363961(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 32(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal 1163531501(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 52(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal -1444681467(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 8(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -51403784(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 28(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal 1735328473(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 48(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -1926607734(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 0(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx movl 20(%rsi),%r10d movl %ecx,%r11d leal -378558(%rax,%r10,1),%eax movl 32(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -2022574463(%rdx,%r10,1),%edx movl 44(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal 1839030562(%rcx,%r10,1),%ecx movl 56(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -35309556(%rbx,%r10,1),%ebx movl 4(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal -1530992060(%rax,%r10,1),%eax movl 16(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal 1272893353(%rdx,%r10,1),%edx movl 28(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal -155497632(%rcx,%r10,1),%ecx movl 40(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -1094730640(%rbx,%r10,1),%ebx movl 52(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal 681279174(%rax,%r10,1),%eax movl 0(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -358537222(%rdx,%r10,1),%edx movl 12(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal -722521979(%rcx,%r10,1),%ecx movl 24(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal 76029189(%rbx,%r10,1),%ebx movl 36(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal -640364487(%rax,%r10,1),%eax movl 48(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -421815835(%rdx,%r10,1),%edx movl 60(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal 530742520(%rcx,%r10,1),%ecx movl 8(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -995338651(%rbx,%r10,1),%ebx movl 0(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx movl 0(%rsi),%r10d movl $0xffffffff,%r11d xorl %edx,%r11d leal -198630844(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 28(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal 1126891415(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 56(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1416354905(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 20(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -57434055(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 48(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal 1700485571(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 12(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -1894986606(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 40(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1051523(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 4(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -2054922799(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 32(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal 1873313359(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 60(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -30611744(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 24(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1560198380(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 52(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal 1309151649(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 16(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal -145523070(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 44(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -1120210379(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 8(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal 718787259(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 36(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -343485551(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 0(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx addl %r8d,%eax addl %r9d,%ebx addl %r14d,%ecx addl %r15d,%edx addq $64,%rsi cmpq %rdi,%rsi jb L$loop L$end: movl %eax,0(%rbp) movl %ebx,4(%rbp) movl %ecx,8(%rbp) movl %edx,12(%rbp) movq (%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r12 movq 24(%rsp),%rbx movq 32(%rsp),%rbp addq $40,%rsp L$epilogue: ret #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/gen/crypto/md5-x86_64-linux.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL // This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .align 16 .globl md5_block_asm_data_order .hidden md5_block_asm_data_order .type md5_block_asm_data_order,@function md5_block_asm_data_order: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset r12,-32 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset r14,-40 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset r15,-48 .Lprologue: movq %rdi,%rbp shlq $6,%rdx leaq (%rsi,%rdx,1),%rdi movl 0(%rbp),%eax movl 4(%rbp),%ebx movl 8(%rbp),%ecx movl 12(%rbp),%edx cmpq %rdi,%rsi je .Lend .Lloop: movl %eax,%r8d movl %ebx,%r9d movl %ecx,%r14d movl %edx,%r15d movl 0(%rsi),%r10d movl %edx,%r11d xorl %ecx,%r11d leal -680876936(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 4(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -389564586(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 8(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal 606105819(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 12(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -1044525330(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 16(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal -176418897(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 20(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal 1200080426(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 24(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -1473231341(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 28(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -45705983(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 32(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal 1770035416(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 36(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -1958414417(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 40(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -42063(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 44(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal -1990404162(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 48(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx xorl %ecx,%r11d leal 1804603682(%rax,%r10,1),%eax andl %ebx,%r11d xorl %edx,%r11d movl 52(%rsi),%r10d addl %r11d,%eax roll $7,%eax movl %ecx,%r11d addl %ebx,%eax xorl %ebx,%r11d leal -40341101(%rdx,%r10,1),%edx andl %eax,%r11d xorl %ecx,%r11d movl 56(%rsi),%r10d addl %r11d,%edx roll $12,%edx movl %ebx,%r11d addl %eax,%edx xorl %eax,%r11d leal -1502002290(%rcx,%r10,1),%ecx andl %edx,%r11d xorl %ebx,%r11d movl 60(%rsi),%r10d addl %r11d,%ecx roll $17,%ecx movl %eax,%r11d addl %edx,%ecx xorl %edx,%r11d leal 1236535329(%rbx,%r10,1),%ebx andl %ecx,%r11d xorl %eax,%r11d movl 0(%rsi),%r10d addl %r11d,%ebx roll $22,%ebx movl %edx,%r11d addl %ecx,%ebx movl 4(%rsi),%r10d movl %edx,%r11d movl %edx,%r12d notl %r11d leal -165796510(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 24(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -1069501632(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 44(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal 643717713(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 0(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -373897302(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 20(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal -701558691(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 40(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal 38016083(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 60(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal -660478335(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 16(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -405537848(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 36(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal 568446438(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 56(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -1019803690(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 12(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal -187363961(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 32(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal 1163531501(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 52(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx notl %r11d leal -1444681467(%rax,%r10,1),%eax andl %ebx,%r12d andl %ecx,%r11d movl 8(%rsi),%r10d orl %r11d,%r12d movl %ecx,%r11d addl %r12d,%eax movl %ecx,%r12d roll $5,%eax addl %ebx,%eax notl %r11d leal -51403784(%rdx,%r10,1),%edx andl %eax,%r12d andl %ebx,%r11d movl 28(%rsi),%r10d orl %r11d,%r12d movl %ebx,%r11d addl %r12d,%edx movl %ebx,%r12d roll $9,%edx addl %eax,%edx notl %r11d leal 1735328473(%rcx,%r10,1),%ecx andl %edx,%r12d andl %eax,%r11d movl 48(%rsi),%r10d orl %r11d,%r12d movl %eax,%r11d addl %r12d,%ecx movl %eax,%r12d roll $14,%ecx addl %edx,%ecx notl %r11d leal -1926607734(%rbx,%r10,1),%ebx andl %ecx,%r12d andl %edx,%r11d movl 0(%rsi),%r10d orl %r11d,%r12d movl %edx,%r11d addl %r12d,%ebx movl %edx,%r12d roll $20,%ebx addl %ecx,%ebx movl 20(%rsi),%r10d movl %ecx,%r11d leal -378558(%rax,%r10,1),%eax movl 32(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -2022574463(%rdx,%r10,1),%edx movl 44(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal 1839030562(%rcx,%r10,1),%ecx movl 56(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -35309556(%rbx,%r10,1),%ebx movl 4(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal -1530992060(%rax,%r10,1),%eax movl 16(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal 1272893353(%rdx,%r10,1),%edx movl 28(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal -155497632(%rcx,%r10,1),%ecx movl 40(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -1094730640(%rbx,%r10,1),%ebx movl 52(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal 681279174(%rax,%r10,1),%eax movl 0(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -358537222(%rdx,%r10,1),%edx movl 12(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal -722521979(%rcx,%r10,1),%ecx movl 24(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal 76029189(%rbx,%r10,1),%ebx movl 36(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx leal -640364487(%rax,%r10,1),%eax movl 48(%rsi),%r10d xorl %edx,%r11d xorl %ebx,%r11d addl %r11d,%eax roll $4,%eax movl %ebx,%r11d addl %ebx,%eax leal -421815835(%rdx,%r10,1),%edx movl 60(%rsi),%r10d xorl %ecx,%r11d xorl %eax,%r11d addl %r11d,%edx roll $11,%edx movl %eax,%r11d addl %eax,%edx leal 530742520(%rcx,%r10,1),%ecx movl 8(%rsi),%r10d xorl %ebx,%r11d xorl %edx,%r11d addl %r11d,%ecx roll $16,%ecx movl %edx,%r11d addl %edx,%ecx leal -995338651(%rbx,%r10,1),%ebx movl 0(%rsi),%r10d xorl %eax,%r11d xorl %ecx,%r11d addl %r11d,%ebx roll $23,%ebx movl %ecx,%r11d addl %ecx,%ebx movl 0(%rsi),%r10d movl $0xffffffff,%r11d xorl %edx,%r11d leal -198630844(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 28(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal 1126891415(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 56(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1416354905(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 20(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -57434055(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 48(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal 1700485571(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 12(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -1894986606(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 40(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1051523(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 4(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -2054922799(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 32(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal 1873313359(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 60(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -30611744(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 24(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal -1560198380(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 52(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal 1309151649(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 16(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx leal -145523070(%rax,%r10,1),%eax orl %ebx,%r11d xorl %ecx,%r11d addl %r11d,%eax movl 44(%rsi),%r10d movl $0xffffffff,%r11d roll $6,%eax xorl %ecx,%r11d addl %ebx,%eax leal -1120210379(%rdx,%r10,1),%edx orl %eax,%r11d xorl %ebx,%r11d addl %r11d,%edx movl 8(%rsi),%r10d movl $0xffffffff,%r11d roll $10,%edx xorl %ebx,%r11d addl %eax,%edx leal 718787259(%rcx,%r10,1),%ecx orl %edx,%r11d xorl %eax,%r11d addl %r11d,%ecx movl 36(%rsi),%r10d movl $0xffffffff,%r11d roll $15,%ecx xorl %eax,%r11d addl %edx,%ecx leal -343485551(%rbx,%r10,1),%ebx orl %ecx,%r11d xorl %edx,%r11d addl %r11d,%ebx movl 0(%rsi),%r10d movl $0xffffffff,%r11d roll $21,%ebx xorl %edx,%r11d addl %ecx,%ebx addl %r8d,%eax addl %r9d,%ebx addl %r14d,%ecx addl %r15d,%edx addq $64,%rsi cmpq %rdi,%rsi jb .Lloop .Lend: movl %eax,0(%rbp) movl %ebx,4(%rbp) movl %ecx,8(%rbp) movl %edx,12(%rbp) movq (%rsp),%r15 .cfi_restore r15 movq 8(%rsp),%r14 .cfi_restore r14 movq 16(%rsp),%r12 .cfi_restore r12 movq 24(%rsp),%rbx .cfi_restore rbx movq 32(%rsp),%rbp .cfi_restore rbp addq $40,%rsp .cfi_adjust_cfa_offset -40 .Lepilogue: ret .cfi_endproc .size md5_block_asm_data_order,.-md5_block_asm_data_order #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/hash.txt ================================================ This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision 817ab07ebb53da35afea409ab9328f578492832d ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL.h ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// #ifndef C_NIO_BORINGSSL_H #define C_NIO_BORINGSSL_H #include "CNIOBoringSSL_aead.h" #include "CNIOBoringSSL_aes.h" #include "CNIOBoringSSL_arm_arch.h" #include "CNIOBoringSSL_asm_base.h" #include "CNIOBoringSSL_asn1_mac.h" #include "CNIOBoringSSL_asn1t.h" #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_blake2.h" #include "CNIOBoringSSL_blowfish.h" #include "CNIOBoringSSL_bn.h" #include "CNIOBoringSSL_boringssl_prefix_symbols.h" #include "CNIOBoringSSL_boringssl_prefix_symbols_asm.h" #include "CNIOBoringSSL_cast.h" #include "CNIOBoringSSL_chacha.h" #include "CNIOBoringSSL_ctrdrbg.h" #include "CNIOBoringSSL_cmac.h" #include "CNIOBoringSSL_conf.h" #include "CNIOBoringSSL_cpu.h" #include "CNIOBoringSSL_curve25519.h" #include "CNIOBoringSSL_des.h" #include "CNIOBoringSSL_dtls1.h" #include "CNIOBoringSSL_e_os2.h" #include "CNIOBoringSSL_ec.h" #include "CNIOBoringSSL_ec_key.h" #include "CNIOBoringSSL_ecdsa.h" #include "CNIOBoringSSL_err.h" #include "CNIOBoringSSL_evp.h" #include "CNIOBoringSSL_hkdf.h" #include "CNIOBoringSSL_hmac.h" #include "CNIOBoringSSL_hpke.h" #include "CNIOBoringSSL_hrss.h" #include "CNIOBoringSSL_kdf.h" #include "CNIOBoringSSL_md4.h" #include "CNIOBoringSSL_md5.h" #include "CNIOBoringSSL_mldsa.h" #include "CNIOBoringSSL_mlkem.h" #include "CNIOBoringSSL_obj_mac.h" #include "CNIOBoringSSL_objects.h" #include "CNIOBoringSSL_opensslv.h" #include "CNIOBoringSSL_ossl_typ.h" #include "CNIOBoringSSL_pkcs12.h" #include "CNIOBoringSSL_poly1305.h" #include "CNIOBoringSSL_rand.h" #include "CNIOBoringSSL_rc4.h" #include "CNIOBoringSSL_ripemd.h" #include "CNIOBoringSSL_rsa.h" #include "CNIOBoringSSL_safestack.h" #include "CNIOBoringSSL_service_indicator.h" #include "CNIOBoringSSL_sha.h" #include "CNIOBoringSSL_siphash.h" #include "CNIOBoringSSL_slhdsa.h" #include "CNIOBoringSSL_srtp.h" #include "CNIOBoringSSL_ssl.h" #include "CNIOBoringSSL_time.h" #include "CNIOBoringSSL_trust_token.h" #include "CNIOBoringSSL_type_check.h" #include "CNIOBoringSSL_x509_vfy.h" #include "CNIOBoringSSL_x509v3.h" #include "experimental/CNIOBoringSSL_kyber.h" #endif // C_NIO_BORINGSSL_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_aead.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_AEAD_H #define OPENSSL_HEADER_AEAD_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Authenticated Encryption with Additional Data. // // AEAD couples confidentiality and integrity in a single primitive. AEAD // algorithms take a key and then can seal and open individual messages. Each // message has a unique, per-message nonce and, optionally, additional data // which is authenticated but not included in the ciphertext. // // The |EVP_AEAD_CTX_init| function initialises an |EVP_AEAD_CTX| structure and // performs any precomputation needed to use |aead| with |key|. The length of // the key, |key_len|, is given in bytes. // // The |tag_len| argument contains the length of the tags, in bytes, and allows // for the processing of truncated authenticators. A zero value indicates that // the default tag length should be used and this is defined as // |EVP_AEAD_DEFAULT_TAG_LENGTH| in order to make the code clear. Using // truncated tags increases an attacker's chance of creating a valid forgery. // Be aware that the attacker's chance may increase more than exponentially as // would naively be expected. // // When no longer needed, the initialised |EVP_AEAD_CTX| structure must be // passed to |EVP_AEAD_CTX_cleanup|, which will deallocate any memory used. // // With an |EVP_AEAD_CTX| in hand, one can seal and open messages. These // operations are intended to meet the standard notions of privacy and // authenticity for authenticated encryption. For formal definitions see // Bellare and Namprempre, "Authenticated encryption: relations among notions // and analysis of the generic composition paradigm," Lecture Notes in Computer // Science B<1976> (2000), 531–545, // http://www-cse.ucsd.edu/~mihir/papers/oem.html. // // When sealing messages, a nonce must be given. The length of the nonce is // fixed by the AEAD in use and is returned by |EVP_AEAD_nonce_length|. *The // nonce must be unique for all messages with the same key*. This is critically // important - nonce reuse may completely undermine the security of the AEAD. // Nonces may be predictable and public, so long as they are unique. Uniqueness // may be achieved with a simple counter or, if large enough, may be generated // randomly. The nonce must be passed into the "open" operation by the receiver // so must either be implicit (e.g. a counter), or must be transmitted along // with the sealed message. // // The "seal" and "open" operations are atomic - an entire message must be // encrypted or decrypted in a single call. Large messages may have to be split // up in order to accommodate this. When doing so, be mindful of the need not to // repeat nonces and the possibility that an attacker could duplicate, reorder // or drop message chunks. For example, using a single key for a given (large) // message and sealing chunks with nonces counting from zero would be secure as // long as the number of chunks was securely transmitted. (Otherwise an // attacker could truncate the message by dropping chunks from the end.) // // The number of chunks could be transmitted by prefixing it to the plaintext, // for example. This also assumes that no other message would ever use the same // key otherwise the rule that nonces must be unique for a given key would be // violated. // // The "seal" and "open" operations also permit additional data to be // authenticated via the |ad| parameter. This data is not included in the // ciphertext and must be identical for both the "seal" and "open" call. This // permits implicit context to be authenticated but may be empty if not needed. // // The "seal" and "open" operations may work in-place if the |out| and |in| // arguments are equal. Otherwise, if |out| and |in| alias, input data may be // overwritten before it is read. This situation will cause an error. // // The "seal" and "open" operations return one on success and zero on error. // AEAD algorithms. // EVP_aead_aes_128_gcm is AES-128 in Galois Counter Mode. // // Note: AES-GCM should only be used with 12-byte (96-bit) nonces. Although it // is specified to take a variable-length nonce, nonces with other lengths are // effectively randomized, which means one must consider collisions. Unless // implementing an existing protocol which has already specified incorrect // parameters, only use 12-byte nonces. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm(void); // EVP_aead_aes_192_gcm is AES-192 in Galois Counter Mode. // // WARNING: AES-192 is superfluous and shouldn't exist. NIST should never have // defined it. Use only when interop with another system requires it, never // de novo. // // Note: AES-GCM should only be used with 12-byte (96-bit) nonces. Although it // is specified to take a variable-length nonce, nonces with other lengths are // effectively randomized, which means one must consider collisions. Unless // implementing an existing protocol which has already specified incorrect // parameters, only use 12-byte nonces. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_192_gcm(void); // EVP_aead_aes_256_gcm is AES-256 in Galois Counter Mode. // // Note: AES-GCM should only be used with 12-byte (96-bit) nonces. Although it // is specified to take a variable-length nonce, nonces with other lengths are // effectively randomized, which means one must consider collisions. Unless // implementing an existing protocol which has already specified incorrect // parameters, only use 12-byte nonces. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm(void); // EVP_aead_chacha20_poly1305 is the AEAD built from ChaCha20 and // Poly1305 as described in RFC 8439. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_chacha20_poly1305(void); // EVP_aead_xchacha20_poly1305 is ChaCha20-Poly1305 with an extended nonce that // makes random generation of nonces safe. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_xchacha20_poly1305(void); // EVP_aead_aes_128_ctr_hmac_sha256 is AES-128 in CTR mode with HMAC-SHA256 for // authentication. The nonce is 12 bytes; the bottom 32-bits are used as the // block counter, thus the maximum plaintext size is 64GB. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void); // EVP_aead_aes_256_ctr_hmac_sha256 is AES-256 in CTR mode with HMAC-SHA256 for // authentication. See |EVP_aead_aes_128_ctr_hmac_sha256| for details. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void); // EVP_aead_aes_128_gcm_siv is AES-128 in GCM-SIV mode. See RFC 8452. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void); // EVP_aead_aes_256_gcm_siv is AES-256 in GCM-SIV mode. See RFC 8452. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void); // EVP_aead_aes_128_gcm_randnonce is AES-128 in Galois Counter Mode with // internal nonce generation. The 12-byte nonce is appended to the tag // and is generated internally. The "tag", for the purpurses of the API, is thus // 12 bytes larger. The nonce parameter when using this AEAD must be // zero-length. Since the nonce is random, a single key should not be used for // more than 2^32 seal operations. // // Warning: this is for use for FIPS compliance only. It is probably not // suitable for other uses. Using standard AES-GCM AEADs allows one to achieve // the same effect, but gives more control over nonce storage. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_randnonce(void); // EVP_aead_aes_256_gcm_randnonce is AES-256 in Galois Counter Mode with // internal nonce generation. The 12-byte nonce is appended to the tag // and is generated internally. The "tag", for the purpurses of the API, is thus // 12 bytes larger. The nonce parameter when using this AEAD must be // zero-length. Since the nonce is random, a single key should not be used for // more than 2^32 seal operations. // // Warning: this is for use for FIPS compliance only. It is probably not // suitable for other uses. Using standard AES-GCM AEADs allows one to achieve // the same effect, but gives more control over nonce storage. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_randnonce(void); // EVP_aead_aes_128_ccm_bluetooth is AES-128-CCM with M=4 and L=2 (4-byte tags // and 13-byte nonces), as decribed in the Bluetooth Core Specification v5.0, // Volume 6, Part E, Section 1. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_ccm_bluetooth(void); // EVP_aead_aes_128_ccm_bluetooth_8 is AES-128-CCM with M=8 and L=2 (8-byte tags // and 13-byte nonces), as used in the Bluetooth Mesh Networking Specification // v1.0. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_ccm_bluetooth_8(void); // EVP_aead_aes_128_ccm_matter is AES-128-CCM with M=16 and L=2 (16-byte tags // and 13-byte nonces), as used in the Matter specification. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_ccm_matter(void); // EVP_has_aes_hardware returns one if we enable hardware support for fast and // constant-time AES-GCM. OPENSSL_EXPORT int EVP_has_aes_hardware(void); // Utility functions. // EVP_AEAD_key_length returns the length, in bytes, of the keys used by // |aead|. OPENSSL_EXPORT size_t EVP_AEAD_key_length(const EVP_AEAD *aead); // EVP_AEAD_nonce_length returns the length, in bytes, of the per-message nonce // for |aead|. OPENSSL_EXPORT size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead); // EVP_AEAD_max_overhead returns the maximum number of additional bytes added // by the act of sealing data with |aead|. OPENSSL_EXPORT size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead); // EVP_AEAD_max_tag_len returns the maximum tag length when using |aead|. This // is the largest value that can be passed as |tag_len| to // |EVP_AEAD_CTX_init|. OPENSSL_EXPORT size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead); // AEAD operations. union evp_aead_ctx_st_state { uint8_t opaque[564]; uint64_t alignment; }; // An evp_aead_ctx_st (typedefed as |EVP_AEAD_CTX| in base.h) represents an AEAD // algorithm configured with a specific key and message-independent IV. struct evp_aead_ctx_st { const EVP_AEAD *aead; union evp_aead_ctx_st_state state; // tag_len may contain the actual length of the authentication tag if it is // known at initialization time. uint8_t tag_len; }; // EVP_AEAD_MAX_KEY_LENGTH contains the maximum key length used by // any AEAD defined in this header. #define EVP_AEAD_MAX_KEY_LENGTH 80 // EVP_AEAD_MAX_NONCE_LENGTH contains the maximum nonce length used by // any AEAD defined in this header. #define EVP_AEAD_MAX_NONCE_LENGTH 24 // EVP_AEAD_MAX_OVERHEAD contains the maximum overhead used by any AEAD // defined in this header. #define EVP_AEAD_MAX_OVERHEAD 64 // EVP_AEAD_DEFAULT_TAG_LENGTH is a magic value that can be passed to // EVP_AEAD_CTX_init to indicate that the default tag length for an AEAD should // be used. #define EVP_AEAD_DEFAULT_TAG_LENGTH 0 // EVP_AEAD_CTX_zero sets an uninitialized |ctx| to the zero state. It must be // initialized with |EVP_AEAD_CTX_init| before use. It is safe, but not // necessary, to call |EVP_AEAD_CTX_cleanup| in this state. This may be used for // more uniform cleanup of |EVP_AEAD_CTX|. OPENSSL_EXPORT void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx); // EVP_AEAD_CTX_new allocates an |EVP_AEAD_CTX|, calls |EVP_AEAD_CTX_init| and // returns the |EVP_AEAD_CTX|, or NULL on error. OPENSSL_EXPORT EVP_AEAD_CTX *EVP_AEAD_CTX_new(const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len); // EVP_AEAD_CTX_free calls |EVP_AEAD_CTX_cleanup| and |OPENSSL_free| on // |ctx|. OPENSSL_EXPORT void EVP_AEAD_CTX_free(EVP_AEAD_CTX *ctx); // EVP_AEAD_CTX_init initializes |ctx| for the given AEAD algorithm. The |impl| // argument is ignored and should be NULL. Authentication tags may be truncated // by passing a size as |tag_len|. A |tag_len| of zero indicates the default // tag length and this is defined as EVP_AEAD_DEFAULT_TAG_LENGTH for // readability. // // Returns 1 on success. Otherwise returns 0 and pushes to the error stack. In // the error case, you do not need to call |EVP_AEAD_CTX_cleanup|, but it's // harmless to do so. OPENSSL_EXPORT int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, ENGINE *impl); // EVP_AEAD_CTX_cleanup frees any data allocated by |ctx|. It is a no-op to // call |EVP_AEAD_CTX_cleanup| on a |EVP_AEAD_CTX| that has been |memset| to // all zeros. OPENSSL_EXPORT void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx); // EVP_AEAD_CTX_seal encrypts and authenticates |in_len| bytes from |in| and // authenticates |ad_len| bytes from |ad| and writes the result to |out|. It // returns one on success and zero otherwise. // // This function may be called concurrently with itself or any other seal/open // function on the same |EVP_AEAD_CTX|. // // At most |max_out_len| bytes are written to |out| and, in order to ensure // success, |max_out_len| should be |in_len| plus the result of // |EVP_AEAD_max_overhead|. On successful return, |*out_len| is set to the // actual number of bytes written. // // The length of |nonce|, |nonce_len|, must be equal to the result of // |EVP_AEAD_nonce_length| for this AEAD. // // |EVP_AEAD_CTX_seal| never results in a partial output. If |max_out_len| is // insufficient, zero will be returned. If any error occurs, |out| will be // filled with zero bytes and |*out_len| set to zero. // // If |in| and |out| alias then |out| must be == |in|. OPENSSL_EXPORT int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); // EVP_AEAD_CTX_open authenticates |in_len| bytes from |in| and |ad_len| bytes // from |ad| and decrypts at most |in_len| bytes into |out|. It returns one on // success and zero otherwise. // // This function may be called concurrently with itself or any other seal/open // function on the same |EVP_AEAD_CTX|. // // At most |in_len| bytes are written to |out|. In order to ensure success, // |max_out_len| should be at least |in_len|. On successful return, |*out_len| // is set to the the actual number of bytes written. // // The length of |nonce|, |nonce_len|, must be equal to the result of // |EVP_AEAD_nonce_length| for this AEAD. // // |EVP_AEAD_CTX_open| never results in a partial output. If |max_out_len| is // insufficient, zero will be returned. If any error occurs, |out| will be // filled with zero bytes and |*out_len| set to zero. // // If |in| and |out| alias then |out| must be == |in|. OPENSSL_EXPORT int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); // EVP_AEAD_CTX_seal_scatter encrypts and authenticates |in_len| bytes from |in| // and authenticates |ad_len| bytes from |ad|. It writes |in_len| bytes of // ciphertext to |out| and the authentication tag to |out_tag|. It returns one // on success and zero otherwise. // // This function may be called concurrently with itself or any other seal/open // function on the same |EVP_AEAD_CTX|. // // Exactly |in_len| bytes are written to |out|, and up to // |EVP_AEAD_max_overhead+extra_in_len| bytes to |out_tag|. On successful // return, |*out_tag_len| is set to the actual number of bytes written to // |out_tag|. // // |extra_in| may point to an additional plaintext input buffer if the cipher // supports it. If present, |extra_in_len| additional bytes of plaintext are // encrypted and authenticated, and the ciphertext is written (before the tag) // to |out_tag|. |max_out_tag_len| must be sized to allow for the additional // |extra_in_len| bytes. // // The length of |nonce|, |nonce_len|, must be equal to the result of // |EVP_AEAD_nonce_length| for this AEAD. // // |EVP_AEAD_CTX_seal_scatter| never results in a partial output. If // |max_out_tag_len| is insufficient, zero will be returned. If any error // occurs, |out| and |out_tag| will be filled with zero bytes and |*out_tag_len| // set to zero. // // If |in| and |out| alias then |out| must be == |in|. |out_tag| may not alias // any other argument. OPENSSL_EXPORT int EVP_AEAD_CTX_seal_scatter( const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len, const uint8_t *ad, size_t ad_len); // EVP_AEAD_CTX_open_gather decrypts and authenticates |in_len| bytes from |in| // and authenticates |ad_len| bytes from |ad| using |in_tag_len| bytes of // authentication tag from |in_tag|. If successful, it writes |in_len| bytes of // plaintext to |out|. It returns one on success and zero otherwise. // // This function may be called concurrently with itself or any other seal/open // function on the same |EVP_AEAD_CTX|. // // The length of |nonce|, |nonce_len|, must be equal to the result of // |EVP_AEAD_nonce_length| for this AEAD. // // |EVP_AEAD_CTX_open_gather| never results in a partial output. If any error // occurs, |out| will be filled with zero bytes. // // If |in| and |out| alias then |out| must be == |in|. OPENSSL_EXPORT int EVP_AEAD_CTX_open_gather( const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, size_t in_tag_len, const uint8_t *ad, size_t ad_len); // EVP_AEAD_CTX_aead returns the underlying AEAD for |ctx|, or NULL if one has // not been set. OPENSSL_EXPORT const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx); // TLS-specific AEAD algorithms. // // These AEAD primitives do not meet the definition of generic AEADs. They are // all specific to TLS and should not be used outside of that context. They must // be initialized with |EVP_AEAD_CTX_init_with_direction|, are stateful, and may // not be used concurrently. Any nonces are used as IVs, so they must be // unpredictable. They only accept an |ad| parameter of length 11 (the standard // TLS one with length omitted). OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void); // EVP_aead_aes_128_gcm_tls12 is AES-128 in Galois Counter Mode using the TLS // 1.2 nonce construction. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_tls12(void); // EVP_aead_aes_256_gcm_tls12 is AES-256 in Galois Counter Mode using the TLS // 1.2 nonce construction. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_tls12(void); // EVP_aead_aes_128_gcm_tls13 is AES-128 in Galois Counter Mode using the TLS // 1.3 nonce construction. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_tls13(void); // EVP_aead_aes_256_gcm_tls13 is AES-256 in Galois Counter Mode using the TLS // 1.3 nonce construction. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_tls13(void); // Obscure functions. // evp_aead_direction_t denotes the direction of an AEAD operation. enum evp_aead_direction_t { evp_aead_open, evp_aead_seal, }; // EVP_AEAD_CTX_init_with_direction calls |EVP_AEAD_CTX_init| for normal // AEADs. For TLS-specific and SSL3-specific AEADs, it initializes |ctx| for a // given direction. OPENSSL_EXPORT int EVP_AEAD_CTX_init_with_direction( EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir); // EVP_AEAD_CTX_get_iv sets |*out_len| to the length of the IV for |ctx| and // sets |*out_iv| to point to that many bytes of the current IV. This is only // meaningful for AEADs with implicit IVs (i.e. CBC mode in TLS 1.0). // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_len); // EVP_AEAD_CTX_tag_len computes the exact byte length of the tag written by // |EVP_AEAD_CTX_seal_scatter| and writes it to |*out_tag_len|. It returns one // on success or zero on error. |in_len| and |extra_in_len| must equal the // arguments of the same names passed to |EVP_AEAD_CTX_seal_scatter|. OPENSSL_EXPORT int EVP_AEAD_CTX_tag_len(const EVP_AEAD_CTX *ctx, size_t *out_tag_len, const size_t in_len, const size_t extra_in_len); #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN using ScopedEVP_AEAD_CTX = internal::StackAllocated; BORINGSSL_MAKE_DELETER(EVP_AEAD_CTX, EVP_AEAD_CTX_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif #endif // OPENSSL_HEADER_AEAD_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_aes.h ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_AES_H #define OPENSSL_HEADER_AES_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Raw AES functions. #define AES_ENCRYPT 1 #define AES_DECRYPT 0 // AES_MAXNR is the maximum number of AES rounds. #define AES_MAXNR 14 #define AES_BLOCK_SIZE 16 // aes_key_st should be an opaque type, but EVP requires that the size be // known. struct aes_key_st { uint32_t rd_key[4 * (AES_MAXNR + 1)]; unsigned rounds; }; typedef struct aes_key_st AES_KEY; // AES_set_encrypt_key configures |aeskey| to encrypt with the |bits|-bit key, // |key|. |key| must point to |bits|/8 bytes. It returns zero on success and a // negative number if |bits| is an invalid AES key size. // // WARNING: this function breaks the usual return value convention. OPENSSL_EXPORT int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); // AES_set_decrypt_key configures |aeskey| to decrypt with the |bits|-bit key, // |key|. |key| must point to |bits|/8 bytes. It returns zero on success and a // negative number if |bits| is an invalid AES key size. // // WARNING: this function breaks the usual return value convention. OPENSSL_EXPORT int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); // AES_encrypt encrypts a single block from |in| to |out| with |key|. The |in| // and |out| pointers may overlap. OPENSSL_EXPORT void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); // AES_decrypt decrypts a single block from |in| to |out| with |key|. The |in| // and |out| pointers may overlap. OPENSSL_EXPORT void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); // Block cipher modes. // AES_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the // first call and |ivec| will be incremented. This function may be called // in-place with |in| equal to |out|, but otherwise the buffers may not // partially overlap. A partial overlap may overwrite input data before it is // read. OPENSSL_EXPORT void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE], uint8_t ecount_buf[AES_BLOCK_SIZE], unsigned int *num); // AES_ecb_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) a single, // 16 byte block from |in| to |out|. This function may be called in-place with // |in| equal to |out|, but otherwise the buffers may not partially overlap. A // partial overlap may overwrite input data before it is read. OPENSSL_EXPORT void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key, const int enc); // AES_cbc_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| // bytes from |in| to |out|. The length must be a multiple of the block size. // This function may be called in-place with |in| equal to |out|, but otherwise // the buffers may not partially overlap. A partial overlap may overwrite input // data before it is read. OPENSSL_EXPORT void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, const int enc); // AES_ofb128_encrypt encrypts (or decrypts, it's the same in OFB mode) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the // first call. This function may be called in-place with |in| equal to |out|, // but otherwise the buffers may not partially overlap. A partial overlap may // overwrite input data before it is read. OPENSSL_EXPORT void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num); // AES_cfb128_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the // first call. This function may be called in-place with |in| equal to |out|, // but otherwise the buffers may not partially overlap. A partial overlap may // overwrite input data before it is read. OPENSSL_EXPORT void AES_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num, int enc); // AES key wrap. // // These functions implement AES Key Wrap mode, as defined in RFC 3394. They // should never be used except to interoperate with existing systems that use // this mode. // AES_wrap_key performs AES key wrap on |in| which must be a multiple of 8 // bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. // |key| must have been configured for encryption. On success, it writes // |in_len| + 8 bytes to |out| and returns |in_len| + 8. Otherwise, it returns // -1. OPENSSL_EXPORT int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len); // AES_unwrap_key performs AES key unwrap on |in| which must be a multiple of 8 // bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. // |key| must have been configured for decryption. On success, it writes // |in_len| - 8 bytes to |out| and returns |in_len| - 8. Otherwise, it returns // -1. OPENSSL_EXPORT int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len); // AES key wrap with padding. // // These functions implement AES Key Wrap with Padding mode, as defined in RFC // 5649. They should never be used except to interoperate with existing systems // that use this mode. // AES_wrap_key_padded performs a padded AES key wrap on |in| which must be // between 1 and 2^32-1 bytes. |key| must have been configured for encryption. // On success it writes at most |max_out| bytes of ciphertext to |out|, sets // |*out_len| to the number of bytes written, and returns one. On failure it // returns zero. To ensure success, set |max_out| to at least |in_len| + 15. OPENSSL_EXPORT int AES_wrap_key_padded(const AES_KEY *key, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); // AES_unwrap_key_padded performs a padded AES key unwrap on |in| which must be // a multiple of 8 bytes. |key| must have been configured for decryption. On // success it writes at most |max_out| bytes to |out|, sets |*out_len| to the // number of bytes written, and returns one. On failure it returns zero. Setting // |max_out| to |in_len| is a sensible estimate. OPENSSL_EXPORT int AES_unwrap_key_padded(const AES_KEY *key, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_AES_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_arm_arch.h ================================================ /* * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ARM_ARCH_H #define OPENSSL_HEADER_ARM_ARCH_H #include "CNIOBoringSSL_target.h" // arm_arch.h contains symbols used by ARM assembly, and the C code that calls // it. It is included as a public header to simplify the build, but is not // intended for external use. #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) // ARMV7_NEON is true when a NEON unit is present in the current CPU. #define ARMV7_NEON (1 << 0) // ARMV8_AES indicates support for hardware AES instructions. #define ARMV8_AES (1 << 2) // ARMV8_SHA1 indicates support for hardware SHA-1 instructions. #define ARMV8_SHA1 (1 << 3) // ARMV8_SHA256 indicates support for hardware SHA-256 instructions. #define ARMV8_SHA256 (1 << 4) // ARMV8_PMULL indicates support for carryless multiplication. #define ARMV8_PMULL (1 << 5) // ARMV8_SHA512 indicates support for hardware SHA-512 instructions. #define ARMV8_SHA512 (1 << 6) #endif // ARM || AARCH64 #endif // OPENSSL_HEADER_ARM_ARCH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_asm_base.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_ASM_BASE_H #define OPENSSL_HEADER_ASM_BASE_H #include "CNIOBoringSSL_target.h" // This header contains symbols and common sections used by assembly files. It // is included as a public header to simplify the build, but is not intended for // external use. // // Every assembly file must include this header. Some linker features require // all object files to be tagged with some section metadata. This header file, // when included in assembly, adds that metadata. It also makes defines like // |OPENSSL_X86_64| available and includes the prefixing macros. // // Including this header in an assembly file imples: // // - The file does not require an executable stack. // // - The file, on aarch64, uses the macros defined below to be compatible with // BTI and PAC. // // - The file, on x86_64, requires the program to be compatible with Intel IBT // and SHSTK #if defined(__ASSEMBLER__) #if defined(BORINGSSL_PREFIX) #include "CNIOBoringSSL_boringssl_prefix_symbols_asm.h" #endif #if defined(__ELF__) // Every ELF object file, even empty ones, should disable executable stacks. See // https://www.airs.com/blog/archives/518. .pushsection .note.GNU-stack, "", %progbits .popsection #endif #if defined(__CET__) && defined(OPENSSL_X86_64) // Clang and GCC define __CET__ and provide when they support Intel's // Indirect Branch Tracking. // https://lpc.events/event/7/contributions/729/attachments/496/903/CET-LPC-2020.pdf // // cet.h defines _CET_ENDBR which is used to mark function entry points for IBT. // and adds the assembly marker. The value of _CET_ENDBR is made dependant on if // '-fcf-protection' is passed to the compiler. _CET_ENDBR is only required when // the function is the target of an indirect jump, but BoringSSL chooses to mark // all assembly entry points because it is easier, and allows BoringSSL's ABI // tester to call the assembly entry points via an indirect jump. #include #else #define _CET_ENDBR #endif #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) // We require the ARM assembler provide |__ARM_ARCH| from Arm C Language // Extensions (ACLE). This is supported in GCC 4.8+ and Clang 3.2+. MSVC does // not implement ACLE, but we require Clang's assembler on Windows. #if !defined(__ARM_ARCH) #error "ARM assembler must define __ARM_ARCH" #endif // Even when building for 32-bit ARM, support for aarch64 crypto instructions // will be included. // // TODO(davidben): Remove this and the corresponding ifdefs? This is only // defined because some OpenSSL assembly files would allow disabling the NEON // code entirely. I think we'd prefer to do that by lifting the dispatch to C // anyway. #define __ARM_MAX_ARCH__ 8 // Support macros for // - Armv8.3-A Pointer Authentication and // - Armv8.5-A Branch Target Identification // features which require emitting a .note.gnu.property section with the // appropriate architecture-dependent feature bits set. // // |AARCH64_SIGN_LINK_REGISTER| and |AARCH64_VALIDATE_LINK_REGISTER| expand to // PACIxSP and AUTIxSP, respectively. |AARCH64_SIGN_LINK_REGISTER| should be // used immediately before saving the LR register (x30) to the stack. // |AARCH64_VALIDATE_LINK_REGISTER| should be used immediately after restoring // it. Note |AARCH64_SIGN_LINK_REGISTER|'s modifications to LR must be undone // with |AARCH64_VALIDATE_LINK_REGISTER| before RET. The SP register must also // have the same value at the two points. For example: // // .global f // f: // AARCH64_SIGN_LINK_REGISTER // stp x29, x30, [sp, #-96]! // mov x29, sp // ... // ldp x29, x30, [sp], #96 // AARCH64_VALIDATE_LINK_REGISTER // ret // // |AARCH64_VALID_CALL_TARGET| expands to BTI 'c'. Either it, or // |AARCH64_SIGN_LINK_REGISTER|, must be used at every point that may be an // indirect call target. In particular, all symbols exported from a file must // begin with one of these macros. For example, a leaf function that does not // save LR can instead use |AARCH64_VALID_CALL_TARGET|: // // .globl return_zero // return_zero: // AARCH64_VALID_CALL_TARGET // mov x0, #0 // ret // // A non-leaf function which does not immediately save LR may need both macros // because |AARCH64_SIGN_LINK_REGISTER| appears late. For example, the function // may jump to an alternate implementation before setting up the stack: // // .globl with_early_jump // with_early_jump: // AARCH64_VALID_CALL_TARGET // cmp x0, #128 // b.lt .Lwith_early_jump_128 // AARCH64_SIGN_LINK_REGISTER // stp x29, x30, [sp, #-96]! // mov x29, sp // ... // ldp x29, x30, [sp], #96 // AARCH64_VALIDATE_LINK_REGISTER // ret // // .Lwith_early_jump_128: // ... // ret // // These annotations are only required with indirect calls. Private symbols that // are only the target of direct calls do not require annotations. Also note // that |AARCH64_VALID_CALL_TARGET| is only valid for indirect calls (BLR), not // indirect jumps (BR). Indirect jumps in assembly are currently not supported // and would require a macro for BTI 'j'. // // Although not necessary, it is safe to use these macros in 32-bit ARM // assembly. This may be used to simplify dual 32-bit and 64-bit files. // // References: // - "ELF for the Arm® 64-bit Architecture" // https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst // - "Providing protection for complex software" // https://developer.arm.com/architectures/learn-the-architecture/providing-protection-for-complex-software #if defined(__ARM_FEATURE_BTI_DEFAULT) && __ARM_FEATURE_BTI_DEFAULT == 1 #define GNU_PROPERTY_AARCH64_BTI (1 << 0) // Has Branch Target Identification #define AARCH64_VALID_CALL_TARGET hint #34 // BTI 'c' #else #define GNU_PROPERTY_AARCH64_BTI 0 // No Branch Target Identification #define AARCH64_VALID_CALL_TARGET #endif #if defined(__ARM_FEATURE_PAC_DEFAULT) && \ (__ARM_FEATURE_PAC_DEFAULT & 1) == 1 // Signed with A-key #define GNU_PROPERTY_AARCH64_POINTER_AUTH \ (1 << 1) // Has Pointer Authentication #define AARCH64_SIGN_LINK_REGISTER hint #25 // PACIASP #define AARCH64_VALIDATE_LINK_REGISTER hint #29 // AUTIASP #elif defined(__ARM_FEATURE_PAC_DEFAULT) && \ (__ARM_FEATURE_PAC_DEFAULT & 2) == 2 // Signed with B-key #define GNU_PROPERTY_AARCH64_POINTER_AUTH \ (1 << 1) // Has Pointer Authentication #define AARCH64_SIGN_LINK_REGISTER hint #27 // PACIBSP #define AARCH64_VALIDATE_LINK_REGISTER hint #31 // AUTIBSP #else #define GNU_PROPERTY_AARCH64_POINTER_AUTH 0 // No Pointer Authentication #if GNU_PROPERTY_AARCH64_BTI != 0 #define AARCH64_SIGN_LINK_REGISTER AARCH64_VALID_CALL_TARGET #else #define AARCH64_SIGN_LINK_REGISTER #endif #define AARCH64_VALIDATE_LINK_REGISTER #endif #if GNU_PROPERTY_AARCH64_POINTER_AUTH != 0 || GNU_PROPERTY_AARCH64_BTI != 0 .pushsection .note.gnu.property, "a"; .balign 8; .long 4; .long 0x10; .long 0x5; .asciz "GNU"; .long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ .long 4; .long (GNU_PROPERTY_AARCH64_POINTER_AUTH | GNU_PROPERTY_AARCH64_BTI); .long 0; .popsection; #endif #endif // ARM || AARCH64 #endif // __ASSEMBLER__ #endif // OPENSSL_HEADER_ASM_BASE_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_asn1.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ASN1_H #define OPENSSL_HEADER_ASN1_H #include "CNIOBoringSSL_base.h" #include #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_bn.h" #include "CNIOBoringSSL_stack.h" #if defined(__cplusplus) extern "C" { #endif // Legacy ASN.1 library. // // This header is part of OpenSSL's ASN.1 implementation. It is retained for // compatibility but should not be used by new code. The functions are difficult // to use correctly, and have buggy or non-standard behaviors. They are thus // particularly prone to behavior changes and API removals, as BoringSSL // iterates on these issues. // // Use the new |CBS| and |CBB| library in instead. // Tag constants. // // These constants are used in various APIs to specify ASN.1 types and tag // components. See the specific API's documentation for details on which values // are used and how. // The following constants are tag classes. #define V_ASN1_UNIVERSAL 0x00 #define V_ASN1_APPLICATION 0x40 #define V_ASN1_CONTEXT_SPECIFIC 0x80 #define V_ASN1_PRIVATE 0xc0 // V_ASN1_CONSTRUCTED indicates an element is constructed, rather than // primitive. #define V_ASN1_CONSTRUCTED 0x20 // V_ASN1_PRIMITIVE_TAG is the highest tag number which can be encoded in a // single byte. Note this is unrelated to whether an element is constructed or // primitive. // // TODO(davidben): Make this private. #define V_ASN1_PRIMITIVE_TAG 0x1f // V_ASN1_MAX_UNIVERSAL is the highest supported universal tag number. It is // necessary to avoid ambiguity with |V_ASN1_NEG| and |MBSTRING_FLAG|. // // TODO(davidben): Make this private. #define V_ASN1_MAX_UNIVERSAL 0xff // V_ASN1_UNDEF is used in some APIs to indicate an ASN.1 element is omitted. #define V_ASN1_UNDEF (-1) // V_ASN1_OTHER is used in |ASN1_TYPE| to indicate a non-universal ASN.1 type. #define V_ASN1_OTHER (-3) // V_ASN1_ANY is used by the ASN.1 templates to indicate an ANY type. #define V_ASN1_ANY (-4) // The following constants are tag numbers for universal types. #define V_ASN1_EOC 0 #define V_ASN1_BOOLEAN 1 #define V_ASN1_INTEGER 2 #define V_ASN1_BIT_STRING 3 #define V_ASN1_OCTET_STRING 4 #define V_ASN1_NULL 5 #define V_ASN1_OBJECT 6 #define V_ASN1_OBJECT_DESCRIPTOR 7 #define V_ASN1_EXTERNAL 8 #define V_ASN1_REAL 9 #define V_ASN1_ENUMERATED 10 #define V_ASN1_UTF8STRING 12 #define V_ASN1_SEQUENCE 16 #define V_ASN1_SET 17 #define V_ASN1_NUMERICSTRING 18 #define V_ASN1_PRINTABLESTRING 19 #define V_ASN1_T61STRING 20 #define V_ASN1_TELETEXSTRING 20 #define V_ASN1_VIDEOTEXSTRING 21 #define V_ASN1_IA5STRING 22 #define V_ASN1_UTCTIME 23 #define V_ASN1_GENERALIZEDTIME 24 #define V_ASN1_GRAPHICSTRING 25 #define V_ASN1_ISO64STRING 26 #define V_ASN1_VISIBLESTRING 26 #define V_ASN1_GENERALSTRING 27 #define V_ASN1_UNIVERSALSTRING 28 #define V_ASN1_BMPSTRING 30 // The following constants are used for |ASN1_STRING| values that represent // negative INTEGER and ENUMERATED values. See |ASN1_STRING| for more details. #define V_ASN1_NEG 0x100 #define V_ASN1_NEG_INTEGER (V_ASN1_INTEGER | V_ASN1_NEG) #define V_ASN1_NEG_ENUMERATED (V_ASN1_ENUMERATED | V_ASN1_NEG) // The following constants are bitmask representations of ASN.1 types. #define B_ASN1_NUMERICSTRING 0x0001 #define B_ASN1_PRINTABLESTRING 0x0002 #define B_ASN1_T61STRING 0x0004 #define B_ASN1_TELETEXSTRING 0x0004 #define B_ASN1_VIDEOTEXSTRING 0x0008 #define B_ASN1_IA5STRING 0x0010 #define B_ASN1_GRAPHICSTRING 0x0020 #define B_ASN1_ISO64STRING 0x0040 #define B_ASN1_VISIBLESTRING 0x0040 #define B_ASN1_GENERALSTRING 0x0080 #define B_ASN1_UNIVERSALSTRING 0x0100 #define B_ASN1_OCTET_STRING 0x0200 #define B_ASN1_BIT_STRING 0x0400 #define B_ASN1_BMPSTRING 0x0800 #define B_ASN1_UNKNOWN 0x1000 #define B_ASN1_UTF8STRING 0x2000 #define B_ASN1_UTCTIME 0x4000 #define B_ASN1_GENERALIZEDTIME 0x8000 #define B_ASN1_SEQUENCE 0x10000 // ASN1_tag2bit converts |tag| from the tag number of a universal type to a // corresponding |B_ASN1_*| constant, |B_ASN1_UNKNOWN|, or zero. If the // |B_ASN1_*| constant above is defined, it will map the corresponding // |V_ASN1_*| constant to it. Otherwise, whether it returns |B_ASN1_UNKNOWN| or // zero is ill-defined and callers should not rely on it. // // TODO(https://crbug.com/boringssl/412): Figure out what |B_ASN1_UNNOWN| vs // zero is meant to be. The main impact is what values go in |B_ASN1_PRINTABLE|. // To that end, we must return zero on types that can't go in |ASN1_STRING|. OPENSSL_EXPORT unsigned long ASN1_tag2bit(int tag); // ASN1_tag2str returns a string representation of |tag|, interpret as a tag // number for a universal type, or |V_ASN1_NEG_*|. OPENSSL_EXPORT const char *ASN1_tag2str(int tag); // API conventions. // // The following sample functions document the calling conventions used by // legacy ASN.1 APIs. #if 0 // Sample functions // d2i_SAMPLE parses a structure from up to |len| bytes at |*inp|. On success, // it advances |*inp| by the number of bytes read and returns a newly-allocated // |SAMPLE| object containing the parsed structure. If |out| is non-NULL, it // additionally frees the previous value at |*out| and updates |*out| to the // result. If parsing or allocating the result fails, it returns NULL. // // This function does not reject trailing data in the input. This allows the // caller to parse a sequence of concatenated structures. Callers parsing only // one structure should check for trailing data by comparing the updated |*inp| // with the end of the input. // // Note: If |out| and |*out| are both non-NULL, the object at |*out| is not // updated in-place. Instead, it is freed, and the pointer is updated to the // new object. This differs from OpenSSL. Callers are recommended to set |out| // to NULL and instead use the return value. SAMPLE *d2i_SAMPLE(SAMPLE **out, const uint8_t **inp, long len); // i2d_SAMPLE marshals |in|. On error, it returns a negative value. On success, // it returns the length of the result and outputs it via |outp| as follows: // // If |outp| is NULL, the function writes nothing. This mode can be used to size // buffers. // // If |outp| is non-NULL but |*outp| is NULL, the function sets |*outp| to a // newly-allocated buffer containing the result. The caller is responsible for // releasing |*outp| with |OPENSSL_free|. This mode is recommended for most // callers. // // If |outp| and |*outp| are non-NULL, the function writes the result to // |*outp|, which must have enough space available, and advances |*outp| just // past the output. // // WARNING: In the third mode, the function does not internally check output // bounds. Failing to correctly size the buffer will result in a potentially // exploitable memory error. int i2d_SAMPLE(const SAMPLE *in, uint8_t **outp); #endif // Sample functions // The following typedefs are sometimes used for pointers to functions like // |d2i_SAMPLE| and |i2d_SAMPLE|. Note, however, that these act on |void*|. // Calling a function with a different pointer type is undefined in C, so this // is only valid with a wrapper. typedef void *d2i_of_void(void **, const unsigned char **, long); typedef int i2d_of_void(const void *, unsigned char **); // ASN.1 types. // // An |ASN1_ITEM| represents an ASN.1 type and allows working with ASN.1 types // generically. // // |ASN1_ITEM|s use a different namespace from C types and are accessed via // |ASN1_ITEM_*| macros. So, for example, |ASN1_OCTET_STRING| is both a C type // and the name of an |ASN1_ITEM|, referenced as // |ASN1_ITEM_rptr(ASN1_OCTET_STRING)|. // // Each |ASN1_ITEM| has a corresponding C type, typically with the same name, // which represents values in the ASN.1 type. This type is either a pointer type // or |ASN1_BOOLEAN|. When it is a pointer, NULL pointers represent omitted // values. For example, an OCTET STRING value is declared with the C type // |ASN1_OCTET_STRING*| and uses the |ASN1_ITEM| named |ASN1_OCTET_STRING|. An // OPTIONAL OCTET STRING uses the same C type and represents an omitted value // with a NULL pointer. |ASN1_BOOLEAN| is described in a later section. // DECLARE_ASN1_ITEM declares an |ASN1_ITEM| with name |name|. The |ASN1_ITEM| // may be referenced with |ASN1_ITEM_rptr|. Uses of this macro should document // the corresponding ASN.1 and C types. #define DECLARE_ASN1_ITEM(name) extern OPENSSL_EXPORT const ASN1_ITEM name##_it; // ASN1_ITEM_rptr returns the |const ASN1_ITEM *| named |name|. #define ASN1_ITEM_rptr(name) (&(name##_it)) // ASN1_ITEM_EXP is an abstraction for referencing an |ASN1_ITEM| in a // constant-initialized structure, such as a method table. It exists because, on // some OpenSSL platforms, |ASN1_ITEM| references are indirected through // functions. Structures reference the |ASN1_ITEM| by declaring a field like // |ASN1_ITEM_EXP *item| and initializing it with |ASN1_ITEM_ref|. typedef const ASN1_ITEM ASN1_ITEM_EXP; // ASN1_ITEM_ref returns an |ASN1_ITEM_EXP*| for the |ASN1_ITEM| named |name|. #define ASN1_ITEM_ref(name) (&(name##_it)) // ASN1_ITEM_ptr converts |iptr|, which must be an |ASN1_ITEM_EXP*| to a // |const ASN1_ITEM*|. #define ASN1_ITEM_ptr(iptr) (iptr) // ASN1_VALUE_st (aka |ASN1_VALUE|) is an opaque type used as a placeholder for // the C type corresponding to an |ASN1_ITEM|. typedef struct ASN1_VALUE_st ASN1_VALUE; // ASN1_item_new allocates a new value of the C type corresponding to |it|, or // NULL on error. On success, the caller must release the value with // |ASN1_item_free|, or the corresponding C type's free function, when done. The // new value will initialize fields of the value to some default state, such as // an empty string. Note, however, that this default state sometimes omits // required values, such as with CHOICE types. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Casting the result of this function to the wrong type is a // potentially exploitable memory error. Callers must ensure the value is used // consistently with |it|. Prefer using type-specific functions such as // |ASN1_OCTET_STRING_new|. OPENSSL_EXPORT ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it); // ASN1_item_free releases memory associated with |val|, which must be an object // of the C type corresponding to |it|. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Passing a pointer of the wrong type into this function is a // potentially exploitable memory error. Callers must ensure |val| is consistent // with |it|. Prefer using type-specific functions such as // |ASN1_OCTET_STRING_free|. OPENSSL_EXPORT void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it); // ASN1_item_d2i parses the ASN.1 type |it| from up to |len| bytes at |*inp|. // It behaves like |d2i_SAMPLE|, except that |out| and the return value are cast // to |ASN1_VALUE| pointers. // // TODO(https://crbug.com/boringssl/444): C strict aliasing forbids type-punning // |T*| and |ASN1_VALUE*| the way this function signature does. When that bug is // resolved, we will need to pick which type |*out| is (probably |T*|). Do not // use a non-NULL |out| to avoid ending up on the wrong side of this question. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Casting the result of this function to the wrong type, or passing a // pointer of the wrong type into this function, are potentially exploitable // memory errors. Callers must ensure |out| is consistent with |it|. Prefer // using type-specific functions such as |d2i_ASN1_OCTET_STRING|. OPENSSL_EXPORT ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **out, const unsigned char **inp, long len, const ASN1_ITEM *it); // ASN1_item_i2d marshals |val| as the ASN.1 type associated with |it|, as // described in |i2d_SAMPLE|. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Passing a pointer of the wrong type into this function is a // potentially exploitable memory error. Callers must ensure |val| is consistent // with |it|. Prefer using type-specific functions such as // |i2d_ASN1_OCTET_STRING|. OPENSSL_EXPORT int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **outp, const ASN1_ITEM *it); // ASN1_item_dup returns a newly-allocated copy of |x|, or NULL on error. |x| // must be an object of |it|'s C type. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Casting the result of this function to the wrong type, or passing a // pointer of the wrong type into this function, are potentially exploitable // memory errors. Prefer using type-specific functions such as // |ASN1_STRING_dup|. OPENSSL_EXPORT void *ASN1_item_dup(const ASN1_ITEM *it, void *x); // The following functions behave like |ASN1_item_d2i| but read from |in| // instead. |out| is the same parameter as in |ASN1_item_d2i|, but written with // |void*| instead. The return values similarly match. // // These functions may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: These functions do not bound how much data is read from |in|. // Parsing an untrusted input could consume unbounded memory. OPENSSL_EXPORT void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *out); OPENSSL_EXPORT void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *out); // The following functions behave like |ASN1_item_i2d| but write to |out| // instead. |in| is the same parameter as in |ASN1_item_i2d|, but written with // |void*| instead. // // These functions may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. OPENSSL_EXPORT int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *in); OPENSSL_EXPORT int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *in); // ASN1_item_unpack parses |oct|'s contents as |it|'s ASN.1 type. It returns a // newly-allocated instance of |it|'s C type on success, or NULL on error. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Casting the result of this function to the wrong type is a // potentially exploitable memory error. Callers must ensure the value is used // consistently with |it|. OPENSSL_EXPORT void *ASN1_item_unpack(const ASN1_STRING *oct, const ASN1_ITEM *it); // ASN1_item_pack marshals |obj| as |it|'s ASN.1 type. If |out| is NULL, it // returns a newly-allocated |ASN1_STRING| with the result, or NULL on error. // If |out| is non-NULL, but |*out| is NULL, it does the same but additionally // sets |*out| to the result. If both |out| and |*out| are non-NULL, it writes // the result to |*out| and returns |*out| on success or NULL on error. // // This function may not be used with |ASN1_ITEM|s whose C type is // |ASN1_BOOLEAN|. // // WARNING: Passing a pointer of the wrong type into this function is a // potentially exploitable memory error. Callers must ensure |val| is consistent // with |it|. OPENSSL_EXPORT ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, ASN1_STRING **out); // Booleans. // // This library represents ASN.1 BOOLEAN values with |ASN1_BOOLEAN|, which is an // integer type. FALSE is zero, TRUE is 0xff, and an omitted OPTIONAL BOOLEAN is // -1. // ASN1_BOOLEAN_FALSE is FALSE as an |ASN1_BOOLEAN|. #define ASN1_BOOLEAN_FALSE 0 // ASN1_BOOLEAN_TRUE is TRUE as an |ASN1_BOOLEAN|. Some code incorrectly uses // 1, so prefer |b != ASN1_BOOLEAN_FALSE| over |b == ASN1_BOOLEAN_TRUE|. #define ASN1_BOOLEAN_TRUE 0xff // ASN1_BOOLEAN_NONE, in contexts where the |ASN1_BOOLEAN| represents an // OPTIONAL BOOLEAN, is an omitted value. Using this value in other contexts is // undefined and may be misinterpreted as TRUE. #define ASN1_BOOLEAN_NONE (-1) // d2i_ASN1_BOOLEAN parses a DER-encoded ASN.1 BOOLEAN from up to |len| bytes at // |*inp|. On success, it advances |*inp| by the number of bytes read and // returns the result. If |out| is non-NULL, it additionally writes the result // to |*out|. On error, it returns |ASN1_BOOLEAN_NONE|. // // This function does not reject trailing data in the input. This allows the // caller to parse a sequence of concatenated structures. Callers parsing only // one structure should check for trailing data by comparing the updated |*inp| // with the end of the input. // // WARNING: This function's is slightly different from other |d2i_*| functions // because |ASN1_BOOLEAN| is not a pointer type. OPENSSL_EXPORT ASN1_BOOLEAN d2i_ASN1_BOOLEAN(ASN1_BOOLEAN *out, const unsigned char **inp, long len); // i2d_ASN1_BOOLEAN marshals |a| as a DER-encoded ASN.1 BOOLEAN, as described in // |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_BOOLEAN(ASN1_BOOLEAN a, unsigned char **outp); // The following |ASN1_ITEM|s have ASN.1 type BOOLEAN and C type |ASN1_BOOLEAN|. // |ASN1_TBOOLEAN| and |ASN1_FBOOLEAN| must be marked OPTIONAL. When omitted, // they are parsed as TRUE and FALSE, respectively, rather than // |ASN1_BOOLEAN_NONE|. DECLARE_ASN1_ITEM(ASN1_BOOLEAN) DECLARE_ASN1_ITEM(ASN1_TBOOLEAN) DECLARE_ASN1_ITEM(ASN1_FBOOLEAN) // Strings. // // ASN.1 contains a myriad of string types, as well as types that contain data // that may be encoded into a string. This library uses a single type, // |ASN1_STRING|, to represent most values. // An asn1_string_st (aka |ASN1_STRING|) represents a value of a string-like // ASN.1 type. It contains a |type| field, and a byte string |data| field with a // type-specific representation. This type-specific representation does not // always correspond to the DER encoding of the type. // // If |type| is one of |V_ASN1_OCTET_STRING|, |V_ASN1_UTF8STRING|, // |V_ASN1_NUMERICSTRING|, |V_ASN1_PRINTABLESTRING|, |V_ASN1_T61STRING|, // |V_ASN1_VIDEOTEXSTRING|, |V_ASN1_IA5STRING|, |V_ASN1_GRAPHICSTRING|, // |V_ASN1_ISO64STRING|, |V_ASN1_VISIBLESTRING|, |V_ASN1_GENERALSTRING|, // |V_ASN1_UNIVERSALSTRING|, or |V_ASN1_BMPSTRING|, the object represents an // ASN.1 string type. The data contains the byte representation of the // string. // // If |type| is |V_ASN1_BIT_STRING|, the object represents a BIT STRING value. // See bit string documentation below for the data and flags. // // If |type| is one of |V_ASN1_INTEGER|, |V_ASN1_NEG_INTEGER|, // |V_ASN1_ENUMERATED|, or |V_ASN1_NEG_ENUMERATED|, the object represents an // INTEGER or ENUMERATED value. See integer documentation below for details. // // If |type| is |V_ASN1_GENERALIZEDTIME| or |V_ASN1_UTCTIME|, the object // represents a GeneralizedTime or UTCTime value, respectively. The data // contains the DER encoding of the value. For example, the UNIX epoch would be // "19700101000000Z" for a GeneralizedTime and "700101000000Z" for a UTCTime. // // If |type| is |V_ASN1_SEQUENCE|, |V_ASN1_SET|, or |V_ASN1_OTHER|, the object // represents a SEQUENCE, SET, or arbitrary ASN.1 value, respectively. Unlike // the above cases, the data contains the DER encoding of the entire structure, // including the header. If the value is explicitly or implicitly tagged, this // too will be reflected in the data field. As this case handles unknown types, // the contents are not checked when parsing or serializing. // // Other values of |type| do not represent a valid ASN.1 value, though // default-constructed objects may set |type| to -1. Such objects cannot be // serialized. // // |ASN1_STRING| additionally has the following typedefs: |ASN1_BIT_STRING|, // |ASN1_BMPSTRING|, |ASN1_ENUMERATED|, |ASN1_GENERALIZEDTIME|, // |ASN1_GENERALSTRING|, |ASN1_IA5STRING|, |ASN1_INTEGER|, |ASN1_OCTET_STRING|, // |ASN1_PRINTABLESTRING|, |ASN1_T61STRING|, |ASN1_TIME|, // |ASN1_UNIVERSALSTRING|, |ASN1_UTCTIME|, |ASN1_UTF8STRING|, and // |ASN1_VISIBLESTRING|. Other than |ASN1_TIME|, these correspond to universal // ASN.1 types. |ASN1_TIME| represents a CHOICE of UTCTime and GeneralizedTime, // with a cutoff of 2049, as used in Section 4.1.2.5 of RFC 5280. // // For clarity, callers are encouraged to use the appropriate typedef when // available. They are the same type as |ASN1_STRING|, so a caller may freely // pass them into functions expecting |ASN1_STRING|, such as // |ASN1_STRING_length|. // // If a function returns an |ASN1_STRING| where the typedef or ASN.1 structure // implies constraints on |type|, callers may assume that |type| is correct. // However, if a function takes an |ASN1_STRING| as input, callers must ensure // |type| matches. These invariants are not captured by the C type system and // may not be checked at runtime. For example, callers may assume the output of // |X509_get0_serialNumber| has type |V_ASN1_INTEGER| or |V_ASN1_NEG_INTEGER|. // Callers must not pass a string of type |V_ASN1_OCTET_STRING| to // |X509_set_serialNumber|. Doing so may break invariants on the |X509| object // and break the |X509_get0_serialNumber| invariant. // // TODO(https://crbug.com/boringssl/445): This is very unfriendly. Getting the // type field wrong should not cause memory errors, but it may do strange // things. We should add runtime checks to anything that consumes |ASN1_STRING|s // from the caller. struct asn1_string_st { int length; int type; unsigned char *data; long flags; }; // ASN1_STRING_FLAG_BITS_LEFT indicates, in a BIT STRING |ASN1_STRING|, that // flags & 0x7 contains the number of padding bits added to the BIT STRING // value. When not set, all trailing zero bits in the last byte are implicitly // treated as padding. This behavior is deprecated and should not be used. #define ASN1_STRING_FLAG_BITS_LEFT 0x08 // ASN1_STRING_type_new returns a newly-allocated empty |ASN1_STRING| object of // type |type|, or NULL on error. OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_type_new(int type); // ASN1_STRING_new returns a newly-allocated empty |ASN1_STRING| object with an // arbitrary type. Prefer one of the type-specific constructors, such as // |ASN1_OCTET_STRING_new|, or |ASN1_STRING_type_new|. OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_new(void); // ASN1_STRING_free releases memory associated with |str|. OPENSSL_EXPORT void ASN1_STRING_free(ASN1_STRING *str); // ASN1_STRING_copy sets |dst| to a copy of |str|. It returns one on success and // zero on error. OPENSSL_EXPORT int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str); // ASN1_STRING_dup returns a newly-allocated copy of |str|, or NULL on error. OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *str); // ASN1_STRING_type returns the type of |str|. This value will be one of the // |V_ASN1_*| constants. OPENSSL_EXPORT int ASN1_STRING_type(const ASN1_STRING *str); // ASN1_STRING_get0_data returns a pointer to |str|'s contents. Callers should // use |ASN1_STRING_length| to determine the length of the string. The string // may have embedded NUL bytes and may not be NUL-terminated. // // The contents of an |ASN1_STRING| encode the value in some type-specific // representation that does not always correspond to the DER encoding of the // type. See the documentation for |ASN1_STRING| for details. OPENSSL_EXPORT const unsigned char *ASN1_STRING_get0_data( const ASN1_STRING *str); // ASN1_STRING_data returns a mutable pointer to |str|'s contents. Callers // should use |ASN1_STRING_length| to determine the length of the string. The // string may have embedded NUL bytes and may not be NUL-terminated. // // The contents of an |ASN1_STRING| encode the value in some type-specific // representation that does not always correspond to the DER encoding of the // type. See the documentation for |ASN1_STRING| for details. // // Prefer |ASN1_STRING_get0_data|. OPENSSL_EXPORT unsigned char *ASN1_STRING_data(ASN1_STRING *str); // ASN1_STRING_length returns the length of |str|, in bytes. // // The contents of an |ASN1_STRING| encode the value in some type-specific // representation that does not always correspond to the DER encoding of the // type. See the documentation for |ASN1_STRING| for details. OPENSSL_EXPORT int ASN1_STRING_length(const ASN1_STRING *str); // ASN1_STRING_cmp compares |a| and |b|'s type and contents. It returns an // integer equal to, less than, or greater than zero if |a| is equal to, less // than, or greater than |b|, respectively. This function compares by length, // then data, then type. Note the data compared is the |ASN1_STRING| internal // representation and the type order is arbitrary. While this comparison is // suitable for sorting, callers should not rely on the exact order when |a| // and |b| are different types. // // Note that, if |a| and |b| are INTEGERs, this comparison does not order the // values numerically. For a numerical comparison, use |ASN1_INTEGER_cmp|. OPENSSL_EXPORT int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b); // ASN1_STRING_set sets the contents of |str| to a copy of |len| bytes from // |data|. It returns one on success and zero on error. If |data| is NULL, it // updates the length and allocates the buffer as needed, but does not // initialize the contents. OPENSSL_EXPORT int ASN1_STRING_set(ASN1_STRING *str, const void *data, ossl_ssize_t len); // ASN1_STRING_set0 sets the contents of |str| to |len| bytes from |data|. It // takes ownership of |data|, which must have been allocated with // |OPENSSL_malloc|. OPENSSL_EXPORT void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len); // The following functions call |ASN1_STRING_type_new| with the corresponding // |V_ASN1_*| constant. OPENSSL_EXPORT ASN1_BMPSTRING *ASN1_BMPSTRING_new(void); OPENSSL_EXPORT ASN1_GENERALSTRING *ASN1_GENERALSTRING_new(void); OPENSSL_EXPORT ASN1_IA5STRING *ASN1_IA5STRING_new(void); OPENSSL_EXPORT ASN1_OCTET_STRING *ASN1_OCTET_STRING_new(void); OPENSSL_EXPORT ASN1_PRINTABLESTRING *ASN1_PRINTABLESTRING_new(void); OPENSSL_EXPORT ASN1_T61STRING *ASN1_T61STRING_new(void); OPENSSL_EXPORT ASN1_UNIVERSALSTRING *ASN1_UNIVERSALSTRING_new(void); OPENSSL_EXPORT ASN1_UTF8STRING *ASN1_UTF8STRING_new(void); OPENSSL_EXPORT ASN1_VISIBLESTRING *ASN1_VISIBLESTRING_new(void); // The following functions call |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_BMPSTRING_free(ASN1_BMPSTRING *str); OPENSSL_EXPORT void ASN1_GENERALSTRING_free(ASN1_GENERALSTRING *str); OPENSSL_EXPORT void ASN1_IA5STRING_free(ASN1_IA5STRING *str); OPENSSL_EXPORT void ASN1_OCTET_STRING_free(ASN1_OCTET_STRING *str); OPENSSL_EXPORT void ASN1_PRINTABLESTRING_free(ASN1_PRINTABLESTRING *str); OPENSSL_EXPORT void ASN1_T61STRING_free(ASN1_T61STRING *str); OPENSSL_EXPORT void ASN1_UNIVERSALSTRING_free(ASN1_UNIVERSALSTRING *str); OPENSSL_EXPORT void ASN1_UTF8STRING_free(ASN1_UTF8STRING *str); OPENSSL_EXPORT void ASN1_VISIBLESTRING_free(ASN1_VISIBLESTRING *str); // The following functions parse up to |len| bytes from |*inp| as a // DER-encoded ASN.1 value of the corresponding type, as described in // |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_BMPSTRING *d2i_ASN1_BMPSTRING(ASN1_BMPSTRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_GENERALSTRING *d2i_ASN1_GENERALSTRING( ASN1_GENERALSTRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_IA5STRING *d2i_ASN1_IA5STRING(ASN1_IA5STRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_OCTET_STRING *d2i_ASN1_OCTET_STRING(ASN1_OCTET_STRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_PRINTABLESTRING *d2i_ASN1_PRINTABLESTRING( ASN1_PRINTABLESTRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_T61STRING *d2i_ASN1_T61STRING(ASN1_T61STRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_UNIVERSALSTRING *d2i_ASN1_UNIVERSALSTRING( ASN1_UNIVERSALSTRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_UTF8STRING *d2i_ASN1_UTF8STRING(ASN1_UTF8STRING **out, const uint8_t **inp, long len); OPENSSL_EXPORT ASN1_VISIBLESTRING *d2i_ASN1_VISIBLESTRING( ASN1_VISIBLESTRING **out, const uint8_t **inp, long len); // The following functions marshal |in| as a DER-encoded ASN.1 value of the // corresponding type, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_BMPSTRING(const ASN1_BMPSTRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_GENERALSTRING(const ASN1_GENERALSTRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_IA5STRING(const ASN1_IA5STRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_OCTET_STRING(const ASN1_OCTET_STRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_PRINTABLESTRING(const ASN1_PRINTABLESTRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_T61STRING(const ASN1_T61STRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_UNIVERSALSTRING(const ASN1_UNIVERSALSTRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_UTF8STRING(const ASN1_UTF8STRING *in, uint8_t **outp); OPENSSL_EXPORT int i2d_ASN1_VISIBLESTRING(const ASN1_VISIBLESTRING *in, uint8_t **outp); // The following |ASN1_ITEM|s have the ASN.1 type referred to in their name and // C type |ASN1_STRING*|. The C type may also be written as the corresponding // typedef. DECLARE_ASN1_ITEM(ASN1_BMPSTRING) DECLARE_ASN1_ITEM(ASN1_GENERALSTRING) DECLARE_ASN1_ITEM(ASN1_IA5STRING) DECLARE_ASN1_ITEM(ASN1_OCTET_STRING) DECLARE_ASN1_ITEM(ASN1_PRINTABLESTRING) DECLARE_ASN1_ITEM(ASN1_T61STRING) DECLARE_ASN1_ITEM(ASN1_UNIVERSALSTRING) DECLARE_ASN1_ITEM(ASN1_UTF8STRING) DECLARE_ASN1_ITEM(ASN1_VISIBLESTRING) // ASN1_OCTET_STRING_dup calls |ASN1_STRING_dup|. OPENSSL_EXPORT ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup( const ASN1_OCTET_STRING *a); // ASN1_OCTET_STRING_cmp calls |ASN1_STRING_cmp|. OPENSSL_EXPORT int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a, const ASN1_OCTET_STRING *b); // ASN1_OCTET_STRING_set calls |ASN1_STRING_set|. OPENSSL_EXPORT int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *str, const unsigned char *data, int len); // ASN1_STRING_to_UTF8 converts |in| to UTF-8. On success, sets |*out| to a // newly-allocated buffer containing the resulting string and returns the length // of the string. The caller must call |OPENSSL_free| to release |*out| when // done. On error, it returns a negative number. OPENSSL_EXPORT int ASN1_STRING_to_UTF8(unsigned char **out, const ASN1_STRING *in); // The following formats define encodings for use with functions like // |ASN1_mbstring_copy|. Note |MBSTRING_ASC| refers to Latin-1, not ASCII. #define MBSTRING_FLAG 0x1000 #define MBSTRING_UTF8 (MBSTRING_FLAG) #define MBSTRING_ASC (MBSTRING_FLAG | 1) #define MBSTRING_BMP (MBSTRING_FLAG | 2) #define MBSTRING_UNIV (MBSTRING_FLAG | 4) // DIRSTRING_TYPE contains the valid string types in an X.509 DirectoryString. #define DIRSTRING_TYPE \ (B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | B_ASN1_BMPSTRING | \ B_ASN1_UTF8STRING) // PKCS9STRING_TYPE contains the valid string types in a PKCS9String. #define PKCS9STRING_TYPE (DIRSTRING_TYPE | B_ASN1_IA5STRING) // ASN1_mbstring_copy converts |len| bytes from |in| to an ASN.1 string. If // |len| is -1, |in| must be NUL-terminated and the length is determined by // |strlen|. |in| is decoded according to |inform|, which must be one of // |MBSTRING_*|. |mask| determines the set of valid output types and is a // bitmask containing a subset of |B_ASN1_PRINTABLESTRING|, |B_ASN1_IA5STRING|, // |B_ASN1_T61STRING|, |B_ASN1_BMPSTRING|, |B_ASN1_UNIVERSALSTRING|, and // |B_ASN1_UTF8STRING|, in that preference order. This function chooses the // first output type in |mask| which can represent |in|. It interprets T61String // as Latin-1, rather than T.61. // // If |mask| is zero, |DIRSTRING_TYPE| is used by default. // // On success, this function returns the |V_ASN1_*| constant corresponding to // the selected output type and, if |out| and |*out| are both non-NULL, updates // the object at |*out| with the result. If |out| is non-NULL and |*out| is // NULL, it instead sets |*out| to a newly-allocated |ASN1_STRING| containing // the result. If |out| is NULL, it returns the selected output type without // constructing an |ASN1_STRING|. On error, this function returns -1. OPENSSL_EXPORT int ASN1_mbstring_copy(ASN1_STRING **out, const uint8_t *in, ossl_ssize_t len, int inform, unsigned long mask); // ASN1_mbstring_ncopy behaves like |ASN1_mbstring_copy| but returns an error if // the input is less than |minsize| or greater than |maxsize| codepoints long. A // |maxsize| value of zero is ignored. Note the sizes are measured in // codepoints, not output bytes. OPENSSL_EXPORT int ASN1_mbstring_ncopy(ASN1_STRING **out, const uint8_t *in, ossl_ssize_t len, int inform, unsigned long mask, ossl_ssize_t minsize, ossl_ssize_t maxsize); // ASN1_STRING_set_by_NID behaves like |ASN1_mbstring_ncopy|, but determines // |mask|, |minsize|, and |maxsize| based on |nid|. When |nid| is a recognized // X.509 attribute type, it will pick a suitable ASN.1 string type and bounds. // For most attribute types, it preferentially chooses UTF8String. If |nid| is // unrecognized, it uses UTF8String by default. // // Slightly unlike |ASN1_mbstring_ncopy|, this function interprets |out| and // returns its result as follows: If |out| is NULL, it returns a newly-allocated // |ASN1_STRING| containing the result. If |out| is non-NULL and // |*out| is NULL, it additionally sets |*out| to the result. If both |out| and // |*out| are non-NULL, it instead updates the object at |*out| and returns // |*out|. In all cases, it returns NULL on error. // // This function supports the following NIDs: |NID_countryName|, // |NID_dnQualifier|, |NID_domainComponent|, |NID_friendlyName|, // |NID_givenName|, |NID_initials|, |NID_localityName|, |NID_ms_csp_name|, // |NID_name|, |NID_organizationalUnitName|, |NID_organizationName|, // |NID_pkcs9_challengePassword|, |NID_pkcs9_emailAddress|, // |NID_pkcs9_unstructuredAddress|, |NID_pkcs9_unstructuredName|, // |NID_serialNumber|, |NID_stateOrProvinceName|, and |NID_surname|. Additional // NIDs may be registered with |ASN1_STRING_set_by_NID|, but it is recommended // to call |ASN1_mbstring_ncopy| directly instead. OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out, const unsigned char *in, ossl_ssize_t len, int inform, int nid); // STABLE_NO_MASK causes |ASN1_STRING_TABLE_add| to allow types other than // UTF8String. #define STABLE_NO_MASK 0x02 // ASN1_STRING_TABLE_add registers the corresponding parameters with |nid|, for // use with |ASN1_STRING_set_by_NID|. It returns one on success and zero on // error. It is an error to call this function if |nid| is a built-in NID, or // was already registered by a previous call. // // WARNING: This function affects global state in the library. If two libraries // in the same address space register information for the same OID, one call // will fail. Prefer directly passing the desired parametrs to // |ASN1_mbstring_copy| or |ASN1_mbstring_ncopy| instead. OPENSSL_EXPORT int ASN1_STRING_TABLE_add(int nid, long minsize, long maxsize, unsigned long mask, unsigned long flags); // Multi-strings. // // A multi-string, or "MSTRING", is an |ASN1_STRING| that represents a CHOICE of // several string or string-like types, such as X.509's DirectoryString. The // |ASN1_STRING|'s type field determines which type is used. // // Multi-string types are associated with a bitmask, using the |B_ASN1_*| // constants, which defines which types are valid. // B_ASN1_DIRECTORYSTRING is a bitmask of types allowed in an X.509 // DirectoryString (RFC 5280). #define B_ASN1_DIRECTORYSTRING \ (B_ASN1_PRINTABLESTRING | B_ASN1_TELETEXSTRING | B_ASN1_BMPSTRING | \ B_ASN1_UNIVERSALSTRING | B_ASN1_UTF8STRING) // DIRECTORYSTRING_new returns a newly-allocated |ASN1_STRING| with type -1, or // NULL on error. The resulting |ASN1_STRING| is not a valid X.509 // DirectoryString until initialized with a value. OPENSSL_EXPORT ASN1_STRING *DIRECTORYSTRING_new(void); // DIRECTORYSTRING_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void DIRECTORYSTRING_free(ASN1_STRING *str); // d2i_DIRECTORYSTRING parses up to |len| bytes from |*inp| as a DER-encoded // X.509 DirectoryString (RFC 5280), as described in |d2i_SAMPLE|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. // // TODO(https://crbug.com/boringssl/449): DirectoryString's non-empty string // requirement is not currently enforced. OPENSSL_EXPORT ASN1_STRING *d2i_DIRECTORYSTRING(ASN1_STRING **out, const uint8_t **inp, long len); // i2d_DIRECTORYSTRING marshals |in| as a DER-encoded X.509 DirectoryString (RFC // 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_DIRECTORYSTRING(const ASN1_STRING *in, uint8_t **outp); // DIRECTORYSTRING is an |ASN1_ITEM| whose ASN.1 type is X.509 DirectoryString // (RFC 5280) and C type is |ASN1_STRING*|. DECLARE_ASN1_ITEM(DIRECTORYSTRING) // B_ASN1_DISPLAYTEXT is a bitmask of types allowed in an X.509 DisplayText (RFC // 5280). #define B_ASN1_DISPLAYTEXT \ (B_ASN1_IA5STRING | B_ASN1_VISIBLESTRING | B_ASN1_BMPSTRING | \ B_ASN1_UTF8STRING) // DISPLAYTEXT_new returns a newly-allocated |ASN1_STRING| with type -1, or NULL // on error. The resulting |ASN1_STRING| is not a valid X.509 DisplayText until // initialized with a value. OPENSSL_EXPORT ASN1_STRING *DISPLAYTEXT_new(void); // DISPLAYTEXT_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void DISPLAYTEXT_free(ASN1_STRING *str); // d2i_DISPLAYTEXT parses up to |len| bytes from |*inp| as a DER-encoded X.509 // DisplayText (RFC 5280), as described in |d2i_SAMPLE|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. // // TODO(https://crbug.com/boringssl/449): DisplayText's size limits are not // currently enforced. OPENSSL_EXPORT ASN1_STRING *d2i_DISPLAYTEXT(ASN1_STRING **out, const uint8_t **inp, long len); // i2d_DISPLAYTEXT marshals |in| as a DER-encoded X.509 DisplayText (RFC 5280), // as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_DISPLAYTEXT(const ASN1_STRING *in, uint8_t **outp); // DISPLAYTEXT is an |ASN1_ITEM| whose ASN.1 type is X.509 DisplayText (RFC // 5280) and C type is |ASN1_STRING*|. DECLARE_ASN1_ITEM(DISPLAYTEXT) // Bit strings. // // An ASN.1 BIT STRING type represents a string of bits. The string may not // necessarily be a whole number of bytes. BIT STRINGs occur in ASN.1 structures // in several forms: // // Some BIT STRINGs represent a bitmask of named bits, such as the X.509 key // usage extension in RFC 5280, section 4.2.1.3. For such bit strings, DER // imposes an additional restriction that trailing zero bits are removed. Some // functions like |ASN1_BIT_STRING_set_bit| help in maintaining this. // // Other BIT STRINGs are arbitrary strings of bits used as identifiers and do // not have this constraint, such as the X.509 issuerUniqueID field. // // Finally, some structures use BIT STRINGs as a container for byte strings. For // example, the signatureValue field in X.509 and the subjectPublicKey field in // SubjectPublicKeyInfo are defined as BIT STRINGs with a value specific to the // AlgorithmIdentifier. While some unknown algorithm could choose to store // arbitrary bit strings, all supported algorithms use a byte string, with bit // order matching the DER encoding. Callers interpreting a BIT STRING as a byte // string should use |ASN1_BIT_STRING_num_bytes| instead of |ASN1_STRING_length| // and reject bit strings that are not a whole number of bytes. // // This library represents BIT STRINGs as |ASN1_STRING|s with type // |V_ASN1_BIT_STRING|. The data contains the encoded form of the BIT STRING, // including any padding bits added to round to a whole number of bytes, but // excluding the leading byte containing the number of padding bits. If // |ASN1_STRING_FLAG_BITS_LEFT| is set, the bottom three bits contains the // number of padding bits. For example, DER encodes the BIT STRING {1, 0} as // {0x06, 0x80 = 0b10_000000}. The |ASN1_STRING| representation has data of // {0x80} and flags of ASN1_STRING_FLAG_BITS_LEFT | 6. If // |ASN1_STRING_FLAG_BITS_LEFT| is unset, trailing zero bits are implicitly // removed. Callers should not rely this representation when constructing bit // strings. The padding bits in the |ASN1_STRING| data must be zero. // ASN1_BIT_STRING_new calls |ASN1_STRING_type_new| with |V_ASN1_BIT_STRING|. OPENSSL_EXPORT ASN1_BIT_STRING *ASN1_BIT_STRING_new(void); // ASN1_BIT_STRING_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_BIT_STRING_free(ASN1_BIT_STRING *str); // d2i_ASN1_BIT_STRING parses up to |len| bytes from |*inp| as a DER-encoded // ASN.1 BIT STRING, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_BIT_STRING *d2i_ASN1_BIT_STRING(ASN1_BIT_STRING **out, const uint8_t **inp, long len); // i2d_ASN1_BIT_STRING marshals |in| as a DER-encoded ASN.1 BIT STRING, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_BIT_STRING(const ASN1_BIT_STRING *in, uint8_t **outp); // c2i_ASN1_BIT_STRING decodes |len| bytes from |*inp| as the contents of a // DER-encoded BIT STRING, excluding the tag and length. It behaves like // |d2i_SAMPLE| except, on success, it always consumes all |len| bytes. OPENSSL_EXPORT ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **out, const uint8_t **inp, long len); // i2c_ASN1_BIT_STRING encodes |in| as the contents of a DER-encoded BIT STRING, // excluding the tag and length. If |outp| is non-NULL, it writes the result to // |*outp|, advances |*outp| just past the output, and returns the number of // bytes written. |*outp| must have space available for the result. If |outp| is // NULL, it returns the number of bytes without writing anything. On error, it // returns a value <= 0. // // Note this function differs slightly from |i2d_SAMPLE|. If |outp| is non-NULL // and |*outp| is NULL, it does not allocate a new buffer. // // TODO(davidben): This function currently returns zero on error instead of -1, // but it is also mostly infallible. I've currently documented <= 0 to suggest // callers work with both. OPENSSL_EXPORT int i2c_ASN1_BIT_STRING(const ASN1_BIT_STRING *in, uint8_t **outp); // ASN1_BIT_STRING is an |ASN1_ITEM| with ASN.1 type BIT STRING and C type // |ASN1_BIT_STRING*|. DECLARE_ASN1_ITEM(ASN1_BIT_STRING) // ASN1_BIT_STRING_num_bytes computes the length of |str| in bytes. If |str|'s // bit length is a multiple of 8, it sets |*out| to the byte length and returns // one. Otherwise, it returns zero. // // This function may be used with |ASN1_STRING_get0_data| to interpret |str| as // a byte string. OPENSSL_EXPORT int ASN1_BIT_STRING_num_bytes(const ASN1_BIT_STRING *str, size_t *out); // ASN1_BIT_STRING_set calls |ASN1_STRING_set|. It leaves flags unchanged, so // the caller must set the number of unused bits. // // TODO(davidben): Maybe it should? Wrapping a byte string in a bit string is a // common use case. OPENSSL_EXPORT int ASN1_BIT_STRING_set(ASN1_BIT_STRING *str, const unsigned char *d, ossl_ssize_t length); // ASN1_BIT_STRING_set_bit sets bit |n| of |str| to one if |value| is non-zero // and zero if |value| is zero, resizing |str| as needed. It then truncates // trailing zeros in |str| to align with the DER represention for a bit string // with named bits. It returns one on success and zero on error. |n| is indexed // beginning from zero. OPENSSL_EXPORT int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *str, int n, int value); // ASN1_BIT_STRING_get_bit returns one if bit |n| of |a| is in bounds and set, // and zero otherwise. |n| is indexed beginning from zero. OPENSSL_EXPORT int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *str, int n); // ASN1_BIT_STRING_check returns one if |str| only contains bits that are set in // the |flags_len| bytes pointed by |flags|. Otherwise it returns zero. Bits in // |flags| are arranged according to the DER representation, so bit 0 // corresponds to the MSB of |flags[0]|. OPENSSL_EXPORT int ASN1_BIT_STRING_check(const ASN1_BIT_STRING *str, const unsigned char *flags, int flags_len); // Integers and enumerated values. // // INTEGER and ENUMERATED values are represented as |ASN1_STRING|s where the // data contains the big-endian encoding of the absolute value of the integer. // The sign bit is encoded in the type: non-negative values have a type of // |V_ASN1_INTEGER| or |V_ASN1_ENUMERATED|, while negative values have a type of // |V_ASN1_NEG_INTEGER| or |V_ASN1_NEG_ENUMERATED|. Note this differs from DER's // two's complement representation. // // The data in the |ASN1_STRING| may not have leading zeros. Note this means // zero is represented as the empty string. Parsing functions will never return // invalid representations. If an invalid input is constructed, the marshaling // functions will skip leading zeros, however other functions, such as // |ASN1_INTEGER_cmp| or |ASN1_INTEGER_get|, may not return the correct result. DEFINE_STACK_OF(ASN1_INTEGER) // ASN1_INTEGER_new calls |ASN1_STRING_type_new| with |V_ASN1_INTEGER|. The // resulting object has value zero. OPENSSL_EXPORT ASN1_INTEGER *ASN1_INTEGER_new(void); // ASN1_INTEGER_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_INTEGER_free(ASN1_INTEGER *str); // ASN1_INTEGER_dup calls |ASN1_STRING_dup|. OPENSSL_EXPORT ASN1_INTEGER *ASN1_INTEGER_dup(const ASN1_INTEGER *x); // d2i_ASN1_INTEGER parses up to |len| bytes from |*inp| as a DER-encoded // ASN.1 INTEGER, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_INTEGER *d2i_ASN1_INTEGER(ASN1_INTEGER **out, const uint8_t **inp, long len); // i2d_ASN1_INTEGER marshals |in| as a DER-encoded ASN.1 INTEGER, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_INTEGER(const ASN1_INTEGER *in, uint8_t **outp); // c2i_ASN1_INTEGER decodes |len| bytes from |*inp| as the contents of a // DER-encoded INTEGER, excluding the tag and length. It behaves like // |d2i_SAMPLE| except, on success, it always consumes all |len| bytes. OPENSSL_EXPORT ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **in, const uint8_t **outp, long len); // i2c_ASN1_INTEGER encodes |in| as the contents of a DER-encoded INTEGER, // excluding the tag and length. If |outp| is non-NULL, it writes the result to // |*outp|, advances |*outp| just past the output, and returns the number of // bytes written. |*outp| must have space available for the result. If |outp| is // NULL, it returns the number of bytes without writing anything. On error, it // returns a value <= 0. // // Note this function differs slightly from |i2d_SAMPLE|. If |outp| is non-NULL // and |*outp| is NULL, it does not allocate a new buffer. // // TODO(davidben): This function currently returns zero on error instead of -1, // but it is also mostly infallible. I've currently documented <= 0 to suggest // callers work with both. OPENSSL_EXPORT int i2c_ASN1_INTEGER(const ASN1_INTEGER *in, uint8_t **outp); // ASN1_INTEGER is an |ASN1_ITEM| with ASN.1 type INTEGER and C type // |ASN1_INTEGER*|. DECLARE_ASN1_ITEM(ASN1_INTEGER) // ASN1_INTEGER_set_uint64 sets |a| to an INTEGER with value |v|. It returns one // on success and zero on error. OPENSSL_EXPORT int ASN1_INTEGER_set_uint64(ASN1_INTEGER *out, uint64_t v); // ASN1_INTEGER_set_int64 sets |a| to an INTEGER with value |v|. It returns one // on success and zero on error. OPENSSL_EXPORT int ASN1_INTEGER_set_int64(ASN1_INTEGER *out, int64_t v); // ASN1_INTEGER_get_uint64 converts |a| to a |uint64_t|. On success, it returns // one and sets |*out| to the result. If |a| did not fit or has the wrong type, // it returns zero. OPENSSL_EXPORT int ASN1_INTEGER_get_uint64(uint64_t *out, const ASN1_INTEGER *a); // ASN1_INTEGER_get_int64 converts |a| to a |int64_t|. On success, it returns // one and sets |*out| to the result. If |a| did not fit or has the wrong type, // it returns zero. OPENSSL_EXPORT int ASN1_INTEGER_get_int64(int64_t *out, const ASN1_INTEGER *a); // BN_to_ASN1_INTEGER sets |ai| to an INTEGER with value |bn| and returns |ai| // on success or NULL or error. If |ai| is NULL, it returns a newly-allocated // |ASN1_INTEGER| on success instead, which the caller must release with // |ASN1_INTEGER_free|. OPENSSL_EXPORT ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai); // ASN1_INTEGER_to_BN sets |bn| to the value of |ai| and returns |bn| on success // or NULL or error. If |bn| is NULL, it returns a newly-allocated |BIGNUM| on // success instead, which the caller must release with |BN_free|. OPENSSL_EXPORT BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai, BIGNUM *bn); // ASN1_INTEGER_cmp compares the values of |x| and |y|. It returns an integer // equal to, less than, or greater than zero if |x| is equal to, less than, or // greater than |y|, respectively. OPENSSL_EXPORT int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, const ASN1_INTEGER *y); // ASN1_ENUMERATED_new calls |ASN1_STRING_type_new| with |V_ASN1_ENUMERATED|. // The resulting object has value zero. OPENSSL_EXPORT ASN1_ENUMERATED *ASN1_ENUMERATED_new(void); // ASN1_ENUMERATED_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_ENUMERATED_free(ASN1_ENUMERATED *str); // d2i_ASN1_ENUMERATED parses up to |len| bytes from |*inp| as a DER-encoded // ASN.1 ENUMERATED, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_ENUMERATED *d2i_ASN1_ENUMERATED(ASN1_ENUMERATED **out, const uint8_t **inp, long len); // i2d_ASN1_ENUMERATED marshals |in| as a DER-encoded ASN.1 ENUMERATED, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_ENUMERATED(const ASN1_ENUMERATED *in, uint8_t **outp); // ASN1_ENUMERATED is an |ASN1_ITEM| with ASN.1 type ENUMERATED and C type // |ASN1_ENUMERATED*|. DECLARE_ASN1_ITEM(ASN1_ENUMERATED) // ASN1_ENUMERATED_set_uint64 sets |a| to an ENUMERATED with value |v|. It // returns one on success and zero on error. OPENSSL_EXPORT int ASN1_ENUMERATED_set_uint64(ASN1_ENUMERATED *out, uint64_t v); // ASN1_ENUMERATED_set_int64 sets |a| to an ENUMERATED with value |v|. It // returns one on success and zero on error. OPENSSL_EXPORT int ASN1_ENUMERATED_set_int64(ASN1_ENUMERATED *out, int64_t v); // ASN1_ENUMERATED_get_uint64 converts |a| to a |uint64_t|. On success, it // returns one and sets |*out| to the result. If |a| did not fit or has the // wrong type, it returns zero. OPENSSL_EXPORT int ASN1_ENUMERATED_get_uint64(uint64_t *out, const ASN1_ENUMERATED *a); // ASN1_ENUMERATED_get_int64 converts |a| to a |int64_t|. On success, it // returns one and sets |*out| to the result. If |a| did not fit or has the // wrong type, it returns zero. OPENSSL_EXPORT int ASN1_ENUMERATED_get_int64(int64_t *out, const ASN1_ENUMERATED *a); // BN_to_ASN1_ENUMERATED sets |ai| to an ENUMERATED with value |bn| and returns // |ai| on success or NULL or error. If |ai| is NULL, it returns a // newly-allocated |ASN1_ENUMERATED| on success instead, which the caller must // release with |ASN1_ENUMERATED_free|. OPENSSL_EXPORT ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(const BIGNUM *bn, ASN1_ENUMERATED *ai); // ASN1_ENUMERATED_to_BN sets |bn| to the value of |ai| and returns |bn| on // success or NULL or error. If |bn| is NULL, it returns a newly-allocated // |BIGNUM| on success instead, which the caller must release with |BN_free|. OPENSSL_EXPORT BIGNUM *ASN1_ENUMERATED_to_BN(const ASN1_ENUMERATED *ai, BIGNUM *bn); // Time. // // GeneralizedTime and UTCTime values are represented as |ASN1_STRING|s. The // type field is |V_ASN1_GENERALIZEDTIME| or |V_ASN1_UTCTIME|, respectively. The // data field contains the DER encoding of the value. For example, the UNIX // epoch would be "19700101000000Z" for a GeneralizedTime and "700101000000Z" // for a UTCTime. // // ASN.1 does not define how to interpret UTCTime's two-digit year. RFC 5280 // defines it as a range from 1950 to 2049 for X.509. The library uses the // RFC 5280 interpretation. It does not currently enforce the restrictions from // BER, and the additional restrictions from RFC 5280, but future versions may. // Callers should not rely on fractional seconds and non-UTC time zones. // // The |ASN1_TIME| typedef is a multi-string representing the X.509 Time type, // which is a CHOICE of GeneralizedTime and UTCTime, using UTCTime when the // value is in range. // ASN1_UTCTIME_new calls |ASN1_STRING_type_new| with |V_ASN1_UTCTIME|. The // resulting object contains empty contents and must be initialized to be a // valid UTCTime. OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_new(void); // ASN1_UTCTIME_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_UTCTIME_free(ASN1_UTCTIME *str); // d2i_ASN1_UTCTIME parses up to |len| bytes from |*inp| as a DER-encoded // ASN.1 UTCTime, as described in |d2i_SAMPLE|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_UTCTIME *d2i_ASN1_UTCTIME(ASN1_UTCTIME **out, const uint8_t **inp, long len); // i2d_ASN1_UTCTIME marshals |in| as a DER-encoded ASN.1 UTCTime, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_UTCTIME(const ASN1_UTCTIME *in, uint8_t **outp); // ASN1_UTCTIME is an |ASN1_ITEM| with ASN.1 type UTCTime and C type // |ASN1_UTCTIME*|. DECLARE_ASN1_ITEM(ASN1_UTCTIME) // ASN1_UTCTIME_check returns one if |a| is a valid UTCTime and zero otherwise. OPENSSL_EXPORT int ASN1_UTCTIME_check(const ASN1_UTCTIME *a); // ASN1_UTCTIME_set represents |posix_time| as a UTCTime and writes the result // to |s|. It returns |s| on success and NULL on error. If |s| is NULL, it // returns a newly-allocated |ASN1_UTCTIME| instead. // // Note this function may fail if the time is out of range for UTCTime. OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s, int64_t posix_time); // ASN1_UTCTIME_adj adds |offset_day| days and |offset_sec| seconds to // |posix_time| and writes the result to |s| as a UTCTime. It returns |s| on // success and NULL on error. If |s| is NULL, it returns a newly-allocated // |ASN1_UTCTIME| instead. // // Note this function may fail if the time overflows or is out of range for // UTCTime. OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, int64_t posix_time, int offset_day, long offset_sec); // ASN1_UTCTIME_set_string sets |s| to a UTCTime whose contents are a copy of // |str|. It returns one on success and zero on error or if |str| is not a valid // UTCTime. // // If |s| is NULL, this function validates |str| without copying it. OPENSSL_EXPORT int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str); // ASN1_GENERALIZEDTIME_new calls |ASN1_STRING_type_new| with // |V_ASN1_GENERALIZEDTIME|. The resulting object contains empty contents and // must be initialized to be a valid GeneralizedTime. OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_new(void); // ASN1_GENERALIZEDTIME_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_GENERALIZEDTIME_free(ASN1_GENERALIZEDTIME *str); // d2i_ASN1_GENERALIZEDTIME parses up to |len| bytes from |*inp| as a // DER-encoded ASN.1 GeneralizedTime, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_GENERALIZEDTIME *d2i_ASN1_GENERALIZEDTIME( ASN1_GENERALIZEDTIME **out, const uint8_t **inp, long len); // i2d_ASN1_GENERALIZEDTIME marshals |in| as a DER-encoded ASN.1 // GeneralizedTime, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_GENERALIZEDTIME(const ASN1_GENERALIZEDTIME *in, uint8_t **outp); // ASN1_GENERALIZEDTIME is an |ASN1_ITEM| with ASN.1 type GeneralizedTime and C // type |ASN1_GENERALIZEDTIME*|. DECLARE_ASN1_ITEM(ASN1_GENERALIZEDTIME) // ASN1_GENERALIZEDTIME_check returns one if |a| is a valid GeneralizedTime and // zero otherwise. OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *a); // ASN1_GENERALIZEDTIME_set represents |posix_time| as a GeneralizedTime and // writes the result to |s|. It returns |s| on success and NULL on error. If |s| // is NULL, it returns a newly-allocated |ASN1_GENERALIZEDTIME| instead. // // Note this function may fail if the time is out of range for GeneralizedTime. OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set( ASN1_GENERALIZEDTIME *s, int64_t posix_time); // ASN1_GENERALIZEDTIME_adj adds |offset_day| days and |offset_sec| seconds to // |posix_time| and writes the result to |s| as a GeneralizedTime. It returns // |s| on success and NULL on error. If |s| is NULL, it returns a // newly-allocated |ASN1_GENERALIZEDTIME| instead. // // Note this function may fail if the time overflows or is out of range for // GeneralizedTime. OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj( ASN1_GENERALIZEDTIME *s, int64_t posix_time, int offset_day, long offset_sec); // ASN1_GENERALIZEDTIME_set_string sets |s| to a GeneralizedTime whose contents // are a copy of |str|. It returns one on success and zero on error or if |str| // is not a valid GeneralizedTime. // // If |s| is NULL, this function validates |str| without copying it. OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str); // B_ASN1_TIME is a bitmask of types allowed in an X.509 Time. #define B_ASN1_TIME (B_ASN1_UTCTIME | B_ASN1_GENERALIZEDTIME) // ASN1_TIME_new returns a newly-allocated |ASN1_TIME| with type -1, or NULL on // error. The resulting |ASN1_TIME| is not a valid X.509 Time until initialized // with a value. OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_new(void); // ASN1_TIME_free releases memory associated with |str|. OPENSSL_EXPORT void ASN1_TIME_free(ASN1_TIME *str); // d2i_ASN1_TIME parses up to |len| bytes from |*inp| as a DER-encoded X.509 // Time (RFC 5280), as described in |d2i_SAMPLE|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_TIME *d2i_ASN1_TIME(ASN1_TIME **out, const uint8_t **inp, long len); // i2d_ASN1_TIME marshals |in| as a DER-encoded X.509 Time (RFC 5280), as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_TIME(const ASN1_TIME *in, uint8_t **outp); // ASN1_TIME is an |ASN1_ITEM| whose ASN.1 type is X.509 Time (RFC 5280) and C // type is |ASN1_TIME*|. DECLARE_ASN1_ITEM(ASN1_TIME) // ASN1_TIME_diff computes |to| - |from|. On success, it sets |*out_days| to the // difference in days, rounded towards zero, sets |*out_seconds| to the // remainder, and returns one. On error, it returns zero. // // If |from| is before |to|, both outputs will be <= 0, with at least one // negative. If |from| is after |to|, both will be >= 0, with at least one // positive. If they are equal, ignoring fractional seconds, both will be zero. // // Note this function may fail on overflow, or if |from| or |to| cannot be // decoded. OPENSSL_EXPORT int ASN1_TIME_diff(int *out_days, int *out_seconds, const ASN1_TIME *from, const ASN1_TIME *to); // ASN1_TIME_set_posix represents |posix_time| as a GeneralizedTime or UTCTime // and writes the result to |s|. As in RFC 5280, section 4.1.2.5, it uses // UTCTime when the time fits and GeneralizedTime otherwise. It returns |s| on // success and NULL on error. If |s| is NULL, it returns a newly-allocated // |ASN1_TIME| instead. // // Note this function may fail if the time is out of range for GeneralizedTime. OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_set_posix(ASN1_TIME *s, int64_t posix_time); // ASN1_TIME_set is exactly the same as |ASN1_TIME_set_posix| but with a // time_t as input for compatibility. OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t time); // ASN1_TIME_adj adds |offset_day| days and |offset_sec| seconds to // |posix_time| and writes the result to |s|. As in RFC 5280, section 4.1.2.5, // it uses UTCTime when the time fits and GeneralizedTime otherwise. It returns // |s| on success and NULL on error. If |s| is NULL, it returns a // newly-allocated |ASN1_GENERALIZEDTIME| instead. // // Note this function may fail if the time overflows or is out of range for // GeneralizedTime. OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, int64_t posix_time, int offset_day, long offset_sec); // ASN1_TIME_check returns one if |t| is a valid UTCTime or GeneralizedTime, and // zero otherwise. |t|'s type determines which check is performed. This // function does not enforce that UTCTime was used when possible. OPENSSL_EXPORT int ASN1_TIME_check(const ASN1_TIME *t); // ASN1_TIME_to_generalizedtime converts |t| to a GeneralizedTime. If |out| is // NULL, it returns a newly-allocated |ASN1_GENERALIZEDTIME| on success, or NULL // on error. If |out| is non-NULL and |*out| is NULL, it additionally sets // |*out| to the result. If |out| and |*out| are non-NULL, it instead updates // the object pointed by |*out| and returns |*out| on success or NULL on error. OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime( const ASN1_TIME *t, ASN1_GENERALIZEDTIME **out); // ASN1_TIME_set_string behaves like |ASN1_UTCTIME_set_string| if |str| is a // valid UTCTime, and |ASN1_GENERALIZEDTIME_set_string| if |str| is a valid // GeneralizedTime. If |str| is neither, it returns zero. OPENSSL_EXPORT int ASN1_TIME_set_string(ASN1_TIME *s, const char *str); // ASN1_TIME_set_string_X509 behaves like |ASN1_TIME_set_string| except it // additionally converts GeneralizedTime to UTCTime if it is in the range where // UTCTime is used. See RFC 5280, section 4.1.2.5. OPENSSL_EXPORT int ASN1_TIME_set_string_X509(ASN1_TIME *s, const char *str); // ASN1_TIME_to_time_t converts |t| to a time_t value in |out|. On // success, one is returned. On failure, zero is returned. This function // will fail if the time can not be represented in a time_t. OPENSSL_EXPORT int ASN1_TIME_to_time_t(const ASN1_TIME *t, time_t *out); // ASN1_TIME_to_posix converts |t| to a POSIX time value in |out|. On // success, one is returned. On failure, zero is returned. OPENSSL_EXPORT int ASN1_TIME_to_posix(const ASN1_TIME *t, int64_t *out); // ASN1_TIME_to_posix_nonstandard converts |t| to a POSIX time value in // |out|. It is exactly the same as |ASN1_TIME_to_posix| but allows for // non-standard four-digit timezone offsets on UTC times. On success, one is // returned. On failure, zero is returned. |ASN1_TIME_to_posix| should normally // be used instead of this function. OPENSSL_EXPORT int ASN1_TIME_to_posix_nonstandard( const ASN1_TIME *t, int64_t *out); // TODO(davidben): Expand and document function prototypes generated in macros. // NULL values. // // This library represents the ASN.1 NULL value by a non-NULL pointer to the // opaque type |ASN1_NULL|. An omitted OPTIONAL ASN.1 NULL value is a NULL // pointer. Unlike other pointer types, it is not necessary to free |ASN1_NULL| // pointers, but it is safe to do so. // ASN1_NULL_new returns an opaque, non-NULL pointer. It is safe to call // |ASN1_NULL_free| on the result, but not necessary. OPENSSL_EXPORT ASN1_NULL *ASN1_NULL_new(void); // ASN1_NULL_free does nothing. OPENSSL_EXPORT void ASN1_NULL_free(ASN1_NULL *null); // d2i_ASN1_NULL parses a DER-encoded ASN.1 NULL value from up to |len| bytes // at |*inp|, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_NULL *d2i_ASN1_NULL(ASN1_NULL **out, const uint8_t **inp, long len); // i2d_ASN1_NULL marshals |in| as a DER-encoded ASN.1 NULL value, as described // in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_NULL(const ASN1_NULL *in, uint8_t **outp); // ASN1_NULL is an |ASN1_ITEM| with ASN.1 type NULL and C type |ASN1_NULL*|. DECLARE_ASN1_ITEM(ASN1_NULL) // Object identifiers. // // An |ASN1_OBJECT| represents a ASN.1 OBJECT IDENTIFIER. See also obj.h for // additional functions relating to |ASN1_OBJECT|. // // TODO(davidben): What's the relationship between asn1.h and obj.h? Most of // obj.h deals with the large NID table, but then functions like |OBJ_get0_data| // or |OBJ_dup| are general |ASN1_OBJECT| functions. DEFINE_STACK_OF(ASN1_OBJECT) // ASN1_OBJECT_create returns a newly-allocated |ASN1_OBJECT| with |len| bytes // from |data| as the encoded OID, or NULL on error. |data| should contain the // DER-encoded identifier, excluding the tag and length. // // |nid| should be |NID_undef|. Passing a NID value that does not match |data| // will cause some functions to misbehave. |sn| and |ln| should be NULL. If // non-NULL, they are stored as short and long names, respectively, but these // values have no effect for |ASN1_OBJECT|s created through this function. // // TODO(davidben): Should we just ignore all those parameters? NIDs and names // are only relevant for |ASN1_OBJECT|s in the obj.h table. OPENSSL_EXPORT ASN1_OBJECT *ASN1_OBJECT_create(int nid, const uint8_t *data, size_t len, const char *sn, const char *ln); // ASN1_OBJECT_free releases memory associated with |a|. If |a| is a static // |ASN1_OBJECT|, returned from |OBJ_nid2obj|, this function does nothing. OPENSSL_EXPORT void ASN1_OBJECT_free(ASN1_OBJECT *a); // d2i_ASN1_OBJECT parses a DER-encoded ASN.1 OBJECT IDENTIFIER from up to |len| // bytes at |*inp|, as described in |d2i_SAMPLE|. OPENSSL_EXPORT ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **out, const uint8_t **inp, long len); // i2d_ASN1_OBJECT marshals |in| as a DER-encoded ASN.1 OBJECT IDENTIFIER, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_OBJECT(const ASN1_OBJECT *in, uint8_t **outp); // c2i_ASN1_OBJECT decodes |len| bytes from |*inp| as the contents of a // DER-encoded OBJECT IDENTIFIER, excluding the tag and length. It behaves like // |d2i_SAMPLE| except, on success, it always consumes all |len| bytes. OPENSSL_EXPORT ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **out, const uint8_t **inp, long len); // ASN1_OBJECT is an |ASN1_ITEM| with ASN.1 type OBJECT IDENTIFIER and C type // |ASN1_OBJECT*|. DECLARE_ASN1_ITEM(ASN1_OBJECT) // Arbitrary elements. // An asn1_type_st (aka |ASN1_TYPE|) represents an arbitrary ASN.1 element, // typically used for ANY types. It contains a |type| field and a |value| union // dependent on |type|. // // WARNING: This struct has a complex representation. Callers must not construct // |ASN1_TYPE| values manually. Use |ASN1_TYPE_set| and |ASN1_TYPE_set1| // instead. Additionally, callers performing non-trivial operations on this type // are encouraged to use |CBS| and |CBB| from , and // convert to or from |ASN1_TYPE| with |d2i_ASN1_TYPE| or |i2d_ASN1_TYPE|. // // The |type| field corresponds to the tag of the ASN.1 element being // represented: // // If |type| is a |V_ASN1_*| constant for an ASN.1 string-like type, as defined // by |ASN1_STRING|, the tag matches the constant. |value| contains an // |ASN1_STRING| pointer (equivalently, one of the more specific typedefs). See // |ASN1_STRING| for details on the representation. Unlike |ASN1_STRING|, // |ASN1_TYPE| does not use the |V_ASN1_NEG| flag for negative INTEGER and // ENUMERATE values. For a negative value, the |ASN1_TYPE|'s |type| will be // |V_ASN1_INTEGER| or |V_ASN1_ENUMERATED|, but |value| will an |ASN1_STRING| // whose |type| is |V_ASN1_NEG_INTEGER| or |V_ASN1_NEG_ENUMERATED|. // // If |type| is |V_ASN1_OBJECT|, the tag is OBJECT IDENTIFIER and |value| // contains an |ASN1_OBJECT| pointer. // // If |type| is |V_ASN1_NULL|, the tag is NULL. |value| contains a NULL pointer. // // If |type| is |V_ASN1_BOOLEAN|, the tag is BOOLEAN. |value| contains an // |ASN1_BOOLEAN|. // // If |type| is |V_ASN1_SEQUENCE|, |V_ASN1_SET|, or |V_ASN1_OTHER|, the tag is // SEQUENCE, SET, or some arbitrary tag, respectively. |value| uses the // corresponding |ASN1_STRING| representation. Although any type may be // represented in |V_ASN1_OTHER|, the parser will always return the more // specific encoding when available. // // Other values of |type| do not represent a valid ASN.1 value, though // default-constructed objects may set |type| to -1. Such objects cannot be // serialized. struct asn1_type_st { int type; union { char *ptr; ASN1_BOOLEAN boolean; ASN1_STRING *asn1_string; ASN1_OBJECT *object; ASN1_INTEGER *integer; ASN1_ENUMERATED *enumerated; ASN1_BIT_STRING *bit_string; ASN1_OCTET_STRING *octet_string; ASN1_PRINTABLESTRING *printablestring; ASN1_T61STRING *t61string; ASN1_IA5STRING *ia5string; ASN1_GENERALSTRING *generalstring; ASN1_BMPSTRING *bmpstring; ASN1_UNIVERSALSTRING *universalstring; ASN1_UTCTIME *utctime; ASN1_GENERALIZEDTIME *generalizedtime; ASN1_VISIBLESTRING *visiblestring; ASN1_UTF8STRING *utf8string; // set and sequence are left complete and still contain the entire element. ASN1_STRING *set; ASN1_STRING *sequence; ASN1_VALUE *asn1_value; } value; }; DEFINE_STACK_OF(ASN1_TYPE) // ASN1_TYPE_new returns a newly-allocated |ASN1_TYPE|, or NULL on allocation // failure. The resulting object has type -1 and must be initialized to be // a valid ANY value. OPENSSL_EXPORT ASN1_TYPE *ASN1_TYPE_new(void); // ASN1_TYPE_free releases memory associated with |a|. OPENSSL_EXPORT void ASN1_TYPE_free(ASN1_TYPE *a); // d2i_ASN1_TYPE parses up to |len| bytes from |*inp| as an ASN.1 value of any // type, as described in |d2i_SAMPLE|. Note this function only validates // primitive, universal types supported by this library. Values of type // |V_ASN1_SEQUENCE|, |V_ASN1_SET|, |V_ASN1_OTHER|, or an unsupported primitive // type must be validated by the caller when interpreting. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_TYPE *d2i_ASN1_TYPE(ASN1_TYPE **out, const uint8_t **inp, long len); // i2d_ASN1_TYPE marshals |in| as DER, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_TYPE(const ASN1_TYPE *in, uint8_t **outp); // ASN1_ANY is an |ASN1_ITEM| with ASN.1 type ANY and C type |ASN1_TYPE*|. Note // the |ASN1_ITEM| name and C type do not match. DECLARE_ASN1_ITEM(ASN1_ANY) // ASN1_TYPE_get returns the type of |a|, which will be one of the |V_ASN1_*| // constants, or zero if |a| is not fully initialized. OPENSSL_EXPORT int ASN1_TYPE_get(const ASN1_TYPE *a); // ASN1_TYPE_set sets |a| to an |ASN1_TYPE| of type |type| and value |value|, // releasing the previous contents of |a|. // // If |type| is |V_ASN1_BOOLEAN|, |a| is set to FALSE if |value| is NULL and // TRUE otherwise. If setting |a| to TRUE, |value| may be an invalid pointer, // such as (void*)1. // // If |type| is |V_ASN1_NULL|, |value| must be NULL. // // For other values of |type|, this function takes ownership of |value|, which // must point to an object of the corresponding type. See |ASN1_TYPE| for // details. OPENSSL_EXPORT void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value); // ASN1_TYPE_set1 behaves like |ASN1_TYPE_set| except it does not take ownership // of |value|. It returns one on success and zero on error. OPENSSL_EXPORT int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value); // ASN1_TYPE_cmp returns zero if |a| and |b| are equal and some non-zero value // otherwise. Note this function can only be used for equality checks, not an // ordering. OPENSSL_EXPORT int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b); typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY; // d2i_ASN1_SEQUENCE_ANY parses up to |len| bytes from |*inp| as a DER-encoded // ASN.1 SEQUENCE OF ANY structure, as described in |d2i_SAMPLE|. The resulting // |ASN1_SEQUENCE_ANY| owns its contents and thus must be released with // |sk_ASN1_TYPE_pop_free| and |ASN1_TYPE_free|, not |sk_ASN1_TYPE_free|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_SEQUENCE_ANY *d2i_ASN1_SEQUENCE_ANY(ASN1_SEQUENCE_ANY **out, const uint8_t **inp, long len); // i2d_ASN1_SEQUENCE_ANY marshals |in| as a DER-encoded SEQUENCE OF ANY // structure, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_SEQUENCE_ANY(const ASN1_SEQUENCE_ANY *in, uint8_t **outp); // d2i_ASN1_SET_ANY parses up to |len| bytes from |*inp| as a DER-encoded ASN.1 // SET OF ANY structure, as described in |d2i_SAMPLE|. The resulting // |ASN1_SEQUENCE_ANY| owns its contents and thus must be released with // |sk_ASN1_TYPE_pop_free| and |ASN1_TYPE_free|, not |sk_ASN1_TYPE_free|. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_SEQUENCE_ANY *d2i_ASN1_SET_ANY(ASN1_SEQUENCE_ANY **out, const uint8_t **inp, long len); // i2d_ASN1_SET_ANY marshals |in| as a DER-encoded SET OF ANY structure, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_ASN1_SET_ANY(const ASN1_SEQUENCE_ANY *in, uint8_t **outp); // Human-readable output. // // The following functions output types in some human-readable format. These // functions may be used for debugging and logging. However, the output should // not be consumed programmatically. They may be ambiguous or lose information. // ASN1_UTCTIME_print writes a human-readable representation of |a| to |out|. It // returns one on success and zero on error. OPENSSL_EXPORT int ASN1_UTCTIME_print(BIO *out, const ASN1_UTCTIME *a); // ASN1_GENERALIZEDTIME_print writes a human-readable representation of |a| to // |out|. It returns one on success and zero on error. OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_print(BIO *out, const ASN1_GENERALIZEDTIME *a); // ASN1_TIME_print writes a human-readable representation of |a| to |out|. It // returns one on success and zero on error. OPENSSL_EXPORT int ASN1_TIME_print(BIO *out, const ASN1_TIME *a); // ASN1_STRING_print writes a human-readable representation of |str| to |out|. // It returns one on success and zero on error. Unprintable characters are // replaced with '.'. OPENSSL_EXPORT int ASN1_STRING_print(BIO *out, const ASN1_STRING *str); // The following flags must not collide with |XN_FLAG_*|. // ASN1_STRFLGS_ESC_2253 causes characters to be escaped as in RFC 2253, section // 2.4. #define ASN1_STRFLGS_ESC_2253 1ul // ASN1_STRFLGS_ESC_CTRL causes all control characters to be escaped. #define ASN1_STRFLGS_ESC_CTRL 2ul // ASN1_STRFLGS_ESC_MSB causes all characters above 127 to be escaped. #define ASN1_STRFLGS_ESC_MSB 4ul // ASN1_STRFLGS_ESC_QUOTE causes the string to be surrounded by quotes, rather // than using backslashes, when characters are escaped. Fewer characters will // require escapes in this case. #define ASN1_STRFLGS_ESC_QUOTE 8ul // ASN1_STRFLGS_UTF8_CONVERT causes the string to be encoded as UTF-8, with each // byte in the UTF-8 encoding treated as an individual character for purposes of // escape sequences. If not set, each Unicode codepoint in the string is treated // as a character, with wide characters escaped as "\Uxxxx" or "\Wxxxxxxxx". // Note this can be ambiguous if |ASN1_STRFLGS_ESC_*| are all unset. In that // case, backslashes are not escaped, but wide characters are. #define ASN1_STRFLGS_UTF8_CONVERT 0x10ul // ASN1_STRFLGS_IGNORE_TYPE causes the string type to be ignored. The // |ASN1_STRING| in-memory representation will be printed directly. #define ASN1_STRFLGS_IGNORE_TYPE 0x20ul // ASN1_STRFLGS_SHOW_TYPE causes the string type to be included in the output. #define ASN1_STRFLGS_SHOW_TYPE 0x40ul // ASN1_STRFLGS_DUMP_ALL causes all strings to be printed as a hexdump, using // RFC 2253 hexstring notation, such as "#0123456789ABCDEF". #define ASN1_STRFLGS_DUMP_ALL 0x80ul // ASN1_STRFLGS_DUMP_UNKNOWN behaves like |ASN1_STRFLGS_DUMP_ALL| but only // applies to values of unknown type. If unset, unknown values will print // their contents as single-byte characters with escape sequences. #define ASN1_STRFLGS_DUMP_UNKNOWN 0x100ul // ASN1_STRFLGS_DUMP_DER causes hexdumped strings (as determined by // |ASN1_STRFLGS_DUMP_ALL| or |ASN1_STRFLGS_DUMP_UNKNOWN|) to print the entire // DER element as in RFC 2253, rather than only the contents of the // |ASN1_STRING|. #define ASN1_STRFLGS_DUMP_DER 0x200ul // ASN1_STRFLGS_RFC2253 causes the string to be escaped as in RFC 2253, // additionally escaping control characters. #define ASN1_STRFLGS_RFC2253 \ (ASN1_STRFLGS_ESC_2253 | ASN1_STRFLGS_ESC_CTRL | ASN1_STRFLGS_ESC_MSB | \ ASN1_STRFLGS_UTF8_CONVERT | ASN1_STRFLGS_DUMP_UNKNOWN | \ ASN1_STRFLGS_DUMP_DER) // ASN1_STRING_print_ex writes a human-readable representation of |str| to // |out|. It returns the number of bytes written on success and -1 on error. If // |out| is NULL, it returns the number of bytes it would have written, without // writing anything. // // The |flags| should be a combination of combination of |ASN1_STRFLGS_*| // constants. See the documentation for each flag for how it controls the // output. If unsure, use |ASN1_STRFLGS_RFC2253|. OPENSSL_EXPORT int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, unsigned long flags); // ASN1_STRING_print_ex_fp behaves like |ASN1_STRING_print_ex| but writes to a // |FILE| rather than a |BIO|. OPENSSL_EXPORT int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str, unsigned long flags); // i2a_ASN1_INTEGER writes a human-readable representation of |a| to |bp|. It // returns the number of bytes written on success, or a negative number on // error. On error, this function may have written a partial output to |bp|. OPENSSL_EXPORT int i2a_ASN1_INTEGER(BIO *bp, const ASN1_INTEGER *a); // i2a_ASN1_ENUMERATED writes a human-readable representation of |a| to |bp|. It // returns the number of bytes written on success, or a negative number on // error. On error, this function may have written a partial output to |bp|. OPENSSL_EXPORT int i2a_ASN1_ENUMERATED(BIO *bp, const ASN1_ENUMERATED *a); // i2a_ASN1_OBJECT writes a human-readable representation of |a| to |bp|. It // returns the number of bytes written on success, or a negative number on // error. On error, this function may have written a partial output to |bp|. OPENSSL_EXPORT int i2a_ASN1_OBJECT(BIO *bp, const ASN1_OBJECT *a); // i2a_ASN1_STRING writes a text representation of |a|'s contents to |bp|. It // returns the number of bytes written on success, or a negative number on // error. On error, this function may have written a partial output to |bp|. // |type| is ignored. // // This function does not decode |a| into a Unicode string. It only hex-encodes // the internal representation of |a|. This is suitable for printing an OCTET // STRING, but may not be human-readable for any other string type. OPENSSL_EXPORT int i2a_ASN1_STRING(BIO *bp, const ASN1_STRING *a, int type); // i2t_ASN1_OBJECT calls |OBJ_obj2txt| with |always_return_oid| set to zero. OPENSSL_EXPORT int i2t_ASN1_OBJECT(char *buf, int buf_len, const ASN1_OBJECT *a); // Low-level encoding functions. // ASN1_get_object parses a BER element from up to |max_len| bytes at |*inp|. It // returns |V_ASN1_CONSTRUCTED| if it successfully parsed a constructed element, // zero if it successfully parsed a primitive element, and 0x80 on error. On // success, it additionally advances |*inp| to the element body, sets // |*out_length|, |*out_tag|, and |*out_class| to the element's length, tag // number, and tag class, respectively, // // Unlike OpenSSL, this function only supports DER. Indefinite and non-minimal // lengths are rejected. // // This function is difficult to use correctly. Use |CBS_get_asn1| and related // functions from bytestring.h. OPENSSL_EXPORT int ASN1_get_object(const unsigned char **inp, long *out_length, int *out_tag, int *out_class, long max_len); // ASN1_put_object writes the header for a DER or BER element to |*outp| and // advances |*outp| by the number of bytes written. The caller is responsible // for ensuring |*outp| has enough space for the output. The header describes an // element with length |length|, tag number |tag|, and class |xclass|. |xclass| // should be one of the |V_ASN1_*| tag class constants. The element is primitive // if |constructed| is zero and constructed if it is one or two. If // |constructed| is two, |length| is ignored and the element uses // indefinite-length encoding. // // Use |CBB_add_asn1| instead. OPENSSL_EXPORT void ASN1_put_object(unsigned char **outp, int constructed, int length, int tag, int xclass); // ASN1_put_eoc writes two zero bytes to |*outp|, advances |*outp| to point past // those bytes, and returns two. // // Use definite-length encoding instead. OPENSSL_EXPORT int ASN1_put_eoc(unsigned char **outp); // ASN1_object_size returns the number of bytes needed to encode a DER or BER // value with length |length| and tag number |tag|, or -1 on error. |tag| should // not include the constructed bit or tag class. If |constructed| is zero or // one, the result uses a definite-length encoding with minimally-encoded // length, as in DER. If |constructed| is two, the result uses BER // indefinite-length encoding. // // Use |CBB_add_asn1| instead. OPENSSL_EXPORT int ASN1_object_size(int constructed, int length, int tag); // Function declaration macros. // // The following macros declare functions for ASN.1 types. Prefer writing the // prototypes directly. Particularly when |type|, |itname|, or |name| differ, // the macros can be difficult to understand. #define DECLARE_ASN1_FUNCTIONS(type) DECLARE_ASN1_FUNCTIONS_name(type, type) #define DECLARE_ASN1_ALLOC_FUNCTIONS(type) \ DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, type) #define DECLARE_ASN1_FUNCTIONS_name(type, name) \ DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ DECLARE_ASN1_ENCODE_FUNCTIONS(type, name, name) #define DECLARE_ASN1_FUNCTIONS_fname(type, itname, name) \ DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) #define DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) \ OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, \ long len); \ OPENSSL_EXPORT int i2d_##name(type *a, unsigned char **out); \ DECLARE_ASN1_ITEM(itname) #define DECLARE_ASN1_ENCODE_FUNCTIONS_const(type, name) \ OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, \ long len); \ OPENSSL_EXPORT int i2d_##name(const type *a, unsigned char **out); \ DECLARE_ASN1_ITEM(name) #define DECLARE_ASN1_FUNCTIONS_const(name) \ DECLARE_ASN1_ALLOC_FUNCTIONS(name) \ DECLARE_ASN1_ENCODE_FUNCTIONS_const(name, name) #define DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ OPENSSL_EXPORT type *name##_new(void); \ OPENSSL_EXPORT void name##_free(type *a); // Deprecated functions. // ASN1_STRING_set_default_mask does nothing. OPENSSL_EXPORT void ASN1_STRING_set_default_mask(unsigned long mask); // ASN1_STRING_set_default_mask_asc returns one. OPENSSL_EXPORT int ASN1_STRING_set_default_mask_asc(const char *p); // ASN1_STRING_get_default_mask returns |B_ASN1_UTF8STRING|. OPENSSL_EXPORT unsigned long ASN1_STRING_get_default_mask(void); // ASN1_STRING_TABLE_cleanup does nothing. OPENSSL_EXPORT void ASN1_STRING_TABLE_cleanup(void); // M_ASN1_* are legacy aliases for various |ASN1_STRING| functions. Use the // functions themselves. #define M_ASN1_STRING_length(x) ASN1_STRING_length(x) #define M_ASN1_STRING_type(x) ASN1_STRING_type(x) #define M_ASN1_STRING_data(x) ASN1_STRING_data(x) #define M_ASN1_BIT_STRING_new() ASN1_BIT_STRING_new() #define M_ASN1_BIT_STRING_free(a) ASN1_BIT_STRING_free(a) #define M_ASN1_BIT_STRING_dup(a) ASN1_STRING_dup(a) #define M_ASN1_BIT_STRING_cmp(a, b) ASN1_STRING_cmp(a, b) #define M_ASN1_BIT_STRING_set(a, b, c) ASN1_BIT_STRING_set(a, b, c) #define M_ASN1_INTEGER_new() ASN1_INTEGER_new() #define M_ASN1_INTEGER_free(a) ASN1_INTEGER_free(a) #define M_ASN1_INTEGER_dup(a) ASN1_INTEGER_dup(a) #define M_ASN1_INTEGER_cmp(a, b) ASN1_INTEGER_cmp(a, b) #define M_ASN1_ENUMERATED_new() ASN1_ENUMERATED_new() #define M_ASN1_ENUMERATED_free(a) ASN1_ENUMERATED_free(a) #define M_ASN1_ENUMERATED_dup(a) ASN1_STRING_dup(a) #define M_ASN1_ENUMERATED_cmp(a, b) ASN1_STRING_cmp(a, b) #define M_ASN1_OCTET_STRING_new() ASN1_OCTET_STRING_new() #define M_ASN1_OCTET_STRING_free(a) ASN1_OCTET_STRING_free() #define M_ASN1_OCTET_STRING_dup(a) ASN1_OCTET_STRING_dup(a) #define M_ASN1_OCTET_STRING_cmp(a, b) ASN1_OCTET_STRING_cmp(a, b) #define M_ASN1_OCTET_STRING_set(a, b, c) ASN1_OCTET_STRING_set(a, b, c) #define M_ASN1_OCTET_STRING_print(a, b) ASN1_STRING_print(a, b) #define M_ASN1_PRINTABLESTRING_new() ASN1_PRINTABLESTRING_new() #define M_ASN1_PRINTABLESTRING_free(a) ASN1_PRINTABLESTRING_free(a) #define M_ASN1_IA5STRING_new() ASN1_IA5STRING_new() #define M_ASN1_IA5STRING_free(a) ASN1_IA5STRING_free(a) #define M_ASN1_IA5STRING_dup(a) ASN1_STRING_dup(a) #define M_ASN1_UTCTIME_new() ASN1_UTCTIME_new() #define M_ASN1_UTCTIME_free(a) ASN1_UTCTIME_free(a) #define M_ASN1_UTCTIME_dup(a) ASN1_STRING_dup(a) #define M_ASN1_T61STRING_new() ASN1_T61STRING_new() #define M_ASN1_T61STRING_free(a) ASN1_T61STRING_free(a) #define M_ASN1_GENERALIZEDTIME_new() ASN1_GENERALIZEDTIME_new() #define M_ASN1_GENERALIZEDTIME_free(a) ASN1_GENERALIZEDTIME_free(a) #define M_ASN1_GENERALIZEDTIME_dup(a) ASN1_STRING_dup(a) #define M_ASN1_GENERALSTRING_new() ASN1_GENERALSTRING_new() #define M_ASN1_GENERALSTRING_free(a) ASN1_GENERALSTRING_free(a) #define M_ASN1_UNIVERSALSTRING_new() ASN1_UNIVERSALSTRING_new() #define M_ASN1_UNIVERSALSTRING_free(a) ASN1_UNIVERSALSTRING_free(a) #define M_ASN1_BMPSTRING_new() ASN1_BMPSTRING_new() #define M_ASN1_BMPSTRING_free(a) ASN1_BMPSTRING_free(a) #define M_ASN1_VISIBLESTRING_new() ASN1_VISIBLESTRING_new() #define M_ASN1_VISIBLESTRING_free(a) ASN1_VISIBLESTRING_free(a) #define M_ASN1_UTF8STRING_new() ASN1_UTF8STRING_new() #define M_ASN1_UTF8STRING_free(a) ASN1_UTF8STRING_free(a) // B_ASN1_PRINTABLE is a bitmask for an ad-hoc subset of string-like types. Note // the presence of |B_ASN1_UNKNOWN| means it includes types which |ASN1_tag2bit| // maps to |B_ASN1_UNKNOWN|. // // Do not use this. Despite the name, it has no connection to PrintableString or // printable characters. See https://crbug.com/boringssl/412. #define B_ASN1_PRINTABLE \ (B_ASN1_NUMERICSTRING | B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | \ B_ASN1_IA5STRING | B_ASN1_BIT_STRING | B_ASN1_UNIVERSALSTRING | \ B_ASN1_BMPSTRING | B_ASN1_UTF8STRING | B_ASN1_SEQUENCE | B_ASN1_UNKNOWN) // ASN1_PRINTABLE_new returns a newly-allocated |ASN1_STRING| with type -1, or // NULL on error. The resulting |ASN1_STRING| is not a valid ASN.1 value until // initialized with a value. OPENSSL_EXPORT ASN1_STRING *ASN1_PRINTABLE_new(void); // ASN1_PRINTABLE_free calls |ASN1_STRING_free|. OPENSSL_EXPORT void ASN1_PRINTABLE_free(ASN1_STRING *str); // d2i_ASN1_PRINTABLE parses up to |len| bytes from |*inp| as a DER-encoded // CHOICE of an ad-hoc subset of string-like types, as described in // |d2i_SAMPLE|. // // Do not use this. Despite, the name it has no connection to PrintableString or // printable characters. See https://crbug.com/boringssl/412. // // TODO(https://crbug.com/boringssl/354): This function currently also accepts // BER, but this will be removed in the future. OPENSSL_EXPORT ASN1_STRING *d2i_ASN1_PRINTABLE(ASN1_STRING **out, const uint8_t **inp, long len); // i2d_ASN1_PRINTABLE marshals |in| as DER, as described in |i2d_SAMPLE|. // // Do not use this. Despite the name, it has no connection to PrintableString or // printable characters. See https://crbug.com/boringssl/412. OPENSSL_EXPORT int i2d_ASN1_PRINTABLE(const ASN1_STRING *in, uint8_t **outp); // ASN1_PRINTABLE is an |ASN1_ITEM| whose ASN.1 type is a CHOICE of an ad-hoc // subset of string-like types, and whose C type is |ASN1_STRING*|. // // Do not use this. Despite the name, it has no connection to PrintableString or // printable characters. See https://crbug.com/boringssl/412. DECLARE_ASN1_ITEM(ASN1_PRINTABLE) // ASN1_INTEGER_set sets |a| to an INTEGER with value |v|. It returns one on // success and zero on error. // // Use |ASN1_INTEGER_set_uint64| and |ASN1_INTEGER_set_int64| instead. OPENSSL_EXPORT int ASN1_INTEGER_set(ASN1_INTEGER *a, long v); // ASN1_ENUMERATED_set sets |a| to an ENUMERATED with value |v|. It returns one // on success and zero on error. // // Use |ASN1_ENUMERATED_set_uint64| and |ASN1_ENUMERATED_set_int64| instead. OPENSSL_EXPORT int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v); // ASN1_INTEGER_get returns the value of |a| as a |long|, or -1 if |a| is out of // range or the wrong type. // // WARNING: This function's return value cannot distinguish errors from -1. // Use |ASN1_INTEGER_get_uint64| and |ASN1_INTEGER_get_int64| instead. OPENSSL_EXPORT long ASN1_INTEGER_get(const ASN1_INTEGER *a); // ASN1_ENUMERATED_get returns the value of |a| as a |long|, or -1 if |a| is out // of range or the wrong type. // // WARNING: This function's return value cannot distinguish errors from -1. // Use |ASN1_ENUMERATED_get_uint64| and |ASN1_ENUMERATED_get_int64| instead. OPENSSL_EXPORT long ASN1_ENUMERATED_get(const ASN1_ENUMERATED *a); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(ASN1_OBJECT, ASN1_OBJECT_free) BORINGSSL_MAKE_DELETER(ASN1_STRING, ASN1_STRING_free) BORINGSSL_MAKE_DELETER(ASN1_TYPE, ASN1_TYPE_free) BSSL_NAMESPACE_END } // extern C++ #endif #define ASN1_R_ASN1_LENGTH_MISMATCH 100 #define ASN1_R_AUX_ERROR 101 #define ASN1_R_BAD_GET_ASN1_OBJECT_CALL 102 #define ASN1_R_BAD_OBJECT_HEADER 103 #define ASN1_R_BMPSTRING_IS_WRONG_LENGTH 104 #define ASN1_R_BN_LIB 105 #define ASN1_R_BOOLEAN_IS_WRONG_LENGTH 106 #define ASN1_R_BUFFER_TOO_SMALL 107 #define ASN1_R_CONTEXT_NOT_INITIALISED 108 #define ASN1_R_DECODE_ERROR 109 #define ASN1_R_DEPTH_EXCEEDED 110 #define ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED 111 #define ASN1_R_ENCODE_ERROR 112 #define ASN1_R_ERROR_GETTING_TIME 113 #define ASN1_R_EXPECTING_AN_ASN1_SEQUENCE 114 #define ASN1_R_EXPECTING_AN_INTEGER 115 #define ASN1_R_EXPECTING_AN_OBJECT 116 #define ASN1_R_EXPECTING_A_BOOLEAN 117 #define ASN1_R_EXPECTING_A_TIME 118 #define ASN1_R_EXPLICIT_LENGTH_MISMATCH 119 #define ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED 120 #define ASN1_R_FIELD_MISSING 121 #define ASN1_R_FIRST_NUM_TOO_LARGE 122 #define ASN1_R_HEADER_TOO_LONG 123 #define ASN1_R_ILLEGAL_BITSTRING_FORMAT 124 #define ASN1_R_ILLEGAL_BOOLEAN 125 #define ASN1_R_ILLEGAL_CHARACTERS 126 #define ASN1_R_ILLEGAL_FORMAT 127 #define ASN1_R_ILLEGAL_HEX 128 #define ASN1_R_ILLEGAL_IMPLICIT_TAG 129 #define ASN1_R_ILLEGAL_INTEGER 130 #define ASN1_R_ILLEGAL_NESTED_TAGGING 131 #define ASN1_R_ILLEGAL_NULL 132 #define ASN1_R_ILLEGAL_NULL_VALUE 133 #define ASN1_R_ILLEGAL_OBJECT 134 #define ASN1_R_ILLEGAL_OPTIONAL_ANY 135 #define ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE 136 #define ASN1_R_ILLEGAL_TAGGED_ANY 137 #define ASN1_R_ILLEGAL_TIME_VALUE 138 #define ASN1_R_INTEGER_NOT_ASCII_FORMAT 139 #define ASN1_R_INTEGER_TOO_LARGE_FOR_LONG 140 #define ASN1_R_INVALID_BIT_STRING_BITS_LEFT 141 #define ASN1_R_INVALID_BMPSTRING 142 #define ASN1_R_INVALID_DIGIT 143 #define ASN1_R_INVALID_MODIFIER 144 #define ASN1_R_INVALID_NUMBER 145 #define ASN1_R_INVALID_OBJECT_ENCODING 146 #define ASN1_R_INVALID_SEPARATOR 147 #define ASN1_R_INVALID_TIME_FORMAT 148 #define ASN1_R_INVALID_UNIVERSALSTRING 149 #define ASN1_R_INVALID_UTF8STRING 150 #define ASN1_R_LIST_ERROR 151 #define ASN1_R_MISSING_ASN1_EOS 152 #define ASN1_R_MISSING_EOC 153 #define ASN1_R_MISSING_SECOND_NUMBER 154 #define ASN1_R_MISSING_VALUE 155 #define ASN1_R_MSTRING_NOT_UNIVERSAL 156 #define ASN1_R_MSTRING_WRONG_TAG 157 #define ASN1_R_NESTED_ASN1_ERROR 158 #define ASN1_R_NESTED_ASN1_STRING 159 #define ASN1_R_NON_HEX_CHARACTERS 160 #define ASN1_R_NOT_ASCII_FORMAT 161 #define ASN1_R_NOT_ENOUGH_DATA 162 #define ASN1_R_NO_MATCHING_CHOICE_TYPE 163 #define ASN1_R_NULL_IS_WRONG_LENGTH 164 #define ASN1_R_OBJECT_NOT_ASCII_FORMAT 165 #define ASN1_R_ODD_NUMBER_OF_CHARS 166 #define ASN1_R_SECOND_NUMBER_TOO_LARGE 167 #define ASN1_R_SEQUENCE_LENGTH_MISMATCH 168 #define ASN1_R_SEQUENCE_NOT_CONSTRUCTED 169 #define ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG 170 #define ASN1_R_SHORT_LINE 171 #define ASN1_R_STREAMING_NOT_SUPPORTED 172 #define ASN1_R_STRING_TOO_LONG 173 #define ASN1_R_STRING_TOO_SHORT 174 #define ASN1_R_TAG_VALUE_TOO_HIGH 175 #define ASN1_R_TIME_NOT_ASCII_FORMAT 176 #define ASN1_R_TOO_LONG 177 #define ASN1_R_TYPE_NOT_CONSTRUCTED 178 #define ASN1_R_TYPE_NOT_PRIMITIVE 179 #define ASN1_R_UNEXPECTED_EOC 180 #define ASN1_R_UNIVERSALSTRING_IS_WRONG_LENGTH 181 #define ASN1_R_UNKNOWN_FORMAT 182 #define ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM 183 #define ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM 184 #define ASN1_R_UNKNOWN_TAG 185 #define ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE 186 #define ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE 187 #define ASN1_R_UNSUPPORTED_TYPE 188 #define ASN1_R_WRONG_PUBLIC_KEY_TYPE 189 #define ASN1_R_WRONG_TAG 190 #define ASN1_R_WRONG_TYPE 191 #define ASN1_R_NESTED_TOO_DEEP 192 #define ASN1_R_BAD_TEMPLATE 193 #define ASN1_R_INVALID_BIT_STRING_PADDING 194 #define ASN1_R_WRONG_INTEGER_TYPE 195 #define ASN1_R_INVALID_INTEGER 196 #endif // OPENSSL_HEADER_ASN1_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_asn1_mac.h ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_asn1.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_asn1t.h ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ASN1T_H #define OPENSSL_HEADER_ASN1T_H #include "CNIOBoringSSL_asn1.h" #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif /* Legacy ASN.1 library template definitions. * * This header is used to define new types in OpenSSL's ASN.1 implementation. It * is deprecated and will be unexported from the library. Use the new |CBS| and * |CBB| library in instead. */ typedef struct ASN1_TEMPLATE_st ASN1_TEMPLATE; typedef struct ASN1_TLC_st ASN1_TLC; /* Macro to obtain ASN1_ADB pointer from a type (only used internally) */ #define ASN1_ADB_ptr(iptr) ((const ASN1_ADB *)(iptr)) /* Macros for start and end of ASN1_ITEM definition */ #define ASN1_ITEM_start(itname) const ASN1_ITEM itname##_it = { #define ASN1_ITEM_end(itname) \ } \ ; /* Macros to aid ASN1 template writing */ #define ASN1_ITEM_TEMPLATE(tname) static const ASN1_TEMPLATE tname##_item_tt #define ASN1_ITEM_TEMPLATE_END(tname) \ ; \ ASN1_ITEM_start(tname) ASN1_ITYPE_PRIMITIVE, -1, &tname##_item_tt, 0, NULL, \ 0, #tname ASN1_ITEM_end(tname) /* This is a ASN1 type which just embeds a template */ /* This pair helps declare a SEQUENCE. We can do: * * ASN1_SEQUENCE(stname) = { * ... SEQUENCE components ... * } ASN1_SEQUENCE_END(stname) * * This will produce an ASN1_ITEM called stname_it * for a structure called stname. * * If you want the same structure but a different * name then use: * * ASN1_SEQUENCE(itname) = { * ... SEQUENCE components ... * } ASN1_SEQUENCE_END_name(stname, itname) * * This will create an item called itname_it using * a structure called stname. */ #define ASN1_SEQUENCE(tname) static const ASN1_TEMPLATE tname##_seq_tt[] #define ASN1_SEQUENCE_END(stname) ASN1_SEQUENCE_END_name(stname, stname) #define ASN1_SEQUENCE_END_name(stname, tname) \ ; \ ASN1_ITEM_start(tname) ASN1_ITYPE_SEQUENCE, V_ASN1_SEQUENCE, tname##_seq_tt, \ sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE), NULL, sizeof(stname), \ #stname ASN1_ITEM_end(tname) #define ASN1_SEQUENCE_cb(tname, cb) \ static const ASN1_AUX tname##_aux = {NULL, 0, 0, cb, 0}; \ ASN1_SEQUENCE(tname) #define ASN1_SEQUENCE_ref(tname, cb) \ static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_REFCOUNT, \ offsetof(tname, references), cb, 0}; \ ASN1_SEQUENCE(tname) #define ASN1_SEQUENCE_enc(tname, enc, cb) \ static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_ENCODING, 0, cb, \ offsetof(tname, enc)}; \ ASN1_SEQUENCE(tname) #define ASN1_SEQUENCE_END_enc(stname, tname) \ ASN1_SEQUENCE_END_ref(stname, tname) #define ASN1_SEQUENCE_END_cb(stname, tname) ASN1_SEQUENCE_END_ref(stname, tname) #define ASN1_SEQUENCE_END_ref(stname, tname) \ ; \ ASN1_ITEM_start(tname) ASN1_ITYPE_SEQUENCE, V_ASN1_SEQUENCE, tname##_seq_tt, \ sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE), &tname##_aux, \ sizeof(stname), #stname ASN1_ITEM_end(tname) /* This pair helps declare a CHOICE type. We can do: * * ASN1_CHOICE(chname) = { * ... CHOICE options ... * ASN1_CHOICE_END(chname) * * This will produce an ASN1_ITEM called chname_it * for a structure called chname. The structure * definition must look like this: * typedef struct { * int type; * union { * ASN1_SOMETHING *opt1; * ASN1_SOMEOTHER *opt2; * } value; * } chname; * * the name of the selector must be 'type'. * to use an alternative selector name use the * ASN1_CHOICE_END_selector() version. */ #define ASN1_CHOICE(tname) static const ASN1_TEMPLATE tname##_ch_tt[] #define ASN1_CHOICE_cb(tname, cb) \ static const ASN1_AUX tname##_aux = {NULL, 0, 0, cb, 0}; \ ASN1_CHOICE(tname) #define ASN1_CHOICE_END(stname) ASN1_CHOICE_END_name(stname, stname) #define ASN1_CHOICE_END_name(stname, tname) \ ASN1_CHOICE_END_selector(stname, tname, type) #define ASN1_CHOICE_END_selector(stname, tname, selname) \ ; \ ASN1_ITEM_start(tname) ASN1_ITYPE_CHOICE, offsetof(stname, selname), \ tname##_ch_tt, sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE), NULL, \ sizeof(stname), #stname ASN1_ITEM_end(tname) #define ASN1_CHOICE_END_cb(stname, tname, selname) \ ; \ ASN1_ITEM_start(tname) ASN1_ITYPE_CHOICE, offsetof(stname, selname), \ tname##_ch_tt, sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE), \ &tname##_aux, sizeof(stname), #stname ASN1_ITEM_end(tname) /* This helps with the template wrapper form of ASN1_ITEM */ #define ASN1_EX_TEMPLATE_TYPE(flags, tag, name, type) \ { (flags), (tag), 0, #name, ASN1_ITEM_ref(type) } /* These help with SEQUENCE or CHOICE components */ /* used to declare other types */ #define ASN1_EX_TYPE(flags, tag, stname, field, type) \ { (flags), (tag), offsetof(stname, field), #field, ASN1_ITEM_ref(type) } /* implicit and explicit helper macros */ #define ASN1_IMP_EX(stname, field, type, tag, ex) \ ASN1_EX_TYPE(ASN1_TFLG_IMPLICIT | ex, tag, stname, field, type) #define ASN1_EXP_EX(stname, field, type, tag, ex) \ ASN1_EX_TYPE(ASN1_TFLG_EXPLICIT | ex, tag, stname, field, type) /* Any defined by macros: the field used is in the table itself */ #define ASN1_ADB_OBJECT(tblname) \ { ASN1_TFLG_ADB_OID, -1, 0, #tblname, (const ASN1_ITEM *)&(tblname##_adb) } /* Plain simple type */ #define ASN1_SIMPLE(stname, field, type) ASN1_EX_TYPE(0, 0, stname, field, type) /* OPTIONAL simple type */ #define ASN1_OPT(stname, field, type) \ ASN1_EX_TYPE(ASN1_TFLG_OPTIONAL, 0, stname, field, type) /* IMPLICIT tagged simple type */ #define ASN1_IMP(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, 0) /* IMPLICIT tagged OPTIONAL simple type */ #define ASN1_IMP_OPT(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL) /* Same as above but EXPLICIT */ #define ASN1_EXP(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, 0) #define ASN1_EXP_OPT(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL) /* SEQUENCE OF type */ #define ASN1_SEQUENCE_OF(stname, field, type) \ ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, stname, field, type) /* OPTIONAL SEQUENCE OF */ #define ASN1_SEQUENCE_OF_OPT(stname, field, type) \ ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF | ASN1_TFLG_OPTIONAL, 0, stname, field, \ type) /* Same as above but for SET OF */ #define ASN1_SET_OF(stname, field, type) \ ASN1_EX_TYPE(ASN1_TFLG_SET_OF, 0, stname, field, type) #define ASN1_SET_OF_OPT(stname, field, type) \ ASN1_EX_TYPE(ASN1_TFLG_SET_OF | ASN1_TFLG_OPTIONAL, 0, stname, field, type) /* Finally compound types of SEQUENCE, SET, IMPLICIT, EXPLICIT and OPTIONAL */ #define ASN1_IMP_SET_OF(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF) #define ASN1_EXP_SET_OF(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF) #define ASN1_IMP_SET_OF_OPT(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF | ASN1_TFLG_OPTIONAL) #define ASN1_EXP_SET_OF_OPT(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF | ASN1_TFLG_OPTIONAL) #define ASN1_IMP_SEQUENCE_OF(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF) #define ASN1_IMP_SEQUENCE_OF_OPT(stname, field, type, tag) \ ASN1_IMP_EX(stname, field, type, tag, \ ASN1_TFLG_SEQUENCE_OF | ASN1_TFLG_OPTIONAL) #define ASN1_EXP_SEQUENCE_OF(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF) #define ASN1_EXP_SEQUENCE_OF_OPT(stname, field, type, tag) \ ASN1_EXP_EX(stname, field, type, tag, \ ASN1_TFLG_SEQUENCE_OF | ASN1_TFLG_OPTIONAL) /* Macros for the ASN1_ADB structure */ #define ASN1_ADB(name) static const ASN1_ADB_TABLE name##_adbtbl[] #define ASN1_ADB_END(name, flags, field, app_table, def, none) \ ; \ static const ASN1_ADB name##_adb = { \ flags, \ offsetof(name, field), \ app_table, \ name##_adbtbl, \ sizeof(name##_adbtbl) / sizeof(ASN1_ADB_TABLE), \ def, \ none} #define ADB_ENTRY(val, template) \ { val, template } #define ASN1_ADB_TEMPLATE(name) static const ASN1_TEMPLATE name##_tt /* This is the ASN1 template structure that defines * a wrapper round the actual type. It determines the * actual position of the field in the value structure, * various flags such as OPTIONAL and the field name. */ struct ASN1_TEMPLATE_st { uint32_t flags; /* Various flags */ int tag; /* tag, not used if no tagging */ unsigned long offset; /* Offset of this field in structure */ const char *field_name; /* Field name */ ASN1_ITEM_EXP *item; /* Relevant ASN1_ITEM or ASN1_ADB */ }; /* Macro to extract ASN1_ITEM and ASN1_ADB pointer from ASN1_TEMPLATE */ #define ASN1_TEMPLATE_item(t) (t->item_ptr) #define ASN1_TEMPLATE_adb(t) (t->item_ptr) typedef struct ASN1_ADB_TABLE_st ASN1_ADB_TABLE; typedef struct ASN1_ADB_st ASN1_ADB; typedef struct asn1_must_be_null_st ASN1_MUST_BE_NULL; struct ASN1_ADB_st { uint32_t flags; /* Various flags */ unsigned long offset; /* Offset of selector field */ ASN1_MUST_BE_NULL *unused; const ASN1_ADB_TABLE *tbl; /* Table of possible types */ long tblcount; /* Number of entries in tbl */ const ASN1_TEMPLATE *default_tt; /* Type to use if no match */ const ASN1_TEMPLATE *null_tt; /* Type to use if selector is NULL */ }; struct ASN1_ADB_TABLE_st { int value; /* NID for an object */ const ASN1_TEMPLATE tt; /* item for this value */ }; /* template flags */ /* Field is optional */ #define ASN1_TFLG_OPTIONAL (0x1) /* Field is a SET OF */ #define ASN1_TFLG_SET_OF (0x1 << 1) /* Field is a SEQUENCE OF */ #define ASN1_TFLG_SEQUENCE_OF (0x2 << 1) /* Mask for SET OF or SEQUENCE OF */ #define ASN1_TFLG_SK_MASK (0x3 << 1) /* These flags mean the tag should be taken from the * tag field. If EXPLICIT then the underlying type * is used for the inner tag. */ /* IMPLICIT tagging */ #define ASN1_TFLG_IMPTAG (0x1 << 3) /* EXPLICIT tagging, inner tag from underlying type */ #define ASN1_TFLG_EXPTAG (0x2 << 3) #define ASN1_TFLG_TAG_MASK (0x3 << 3) /* context specific IMPLICIT */ #define ASN1_TFLG_IMPLICIT ASN1_TFLG_IMPTAG | ASN1_TFLG_CONTEXT /* context specific EXPLICIT */ #define ASN1_TFLG_EXPLICIT ASN1_TFLG_EXPTAG | ASN1_TFLG_CONTEXT /* If tagging is in force these determine the * type of tag to use. Otherwise the tag is * determined by the underlying type. These * values reflect the actual octet format. */ /* Universal tag */ #define ASN1_TFLG_UNIVERSAL (0x0 << 6) /* Application tag */ #define ASN1_TFLG_APPLICATION (0x1 << 6) /* Context specific tag */ #define ASN1_TFLG_CONTEXT (0x2 << 6) /* Private tag */ #define ASN1_TFLG_PRIVATE (0x3 << 6) #define ASN1_TFLG_TAG_CLASS (0x3 << 6) /* These are for ANY DEFINED BY type. In this case * the 'item' field points to an ASN1_ADB structure * which contains a table of values to decode the * relevant type */ #define ASN1_TFLG_ADB_MASK (0x3 << 8) #define ASN1_TFLG_ADB_OID (0x1 << 8) /* This is the actual ASN1 item itself */ struct ASN1_ITEM_st { char itype; /* The item type, primitive, SEQUENCE, CHOICE or extern */ int utype; /* underlying type */ const ASN1_TEMPLATE *templates; /* If SEQUENCE or CHOICE this contains the contents */ long tcount; /* Number of templates if SEQUENCE or CHOICE */ const void *funcs; /* functions that handle this type */ long size; /* Structure size (usually)*/ const char *sname; /* Structure name */ }; /* These are values for the itype field and * determine how the type is interpreted. * * For PRIMITIVE types the underlying type * determines the behaviour if items is NULL. * * Otherwise templates must contain a single * template and the type is treated in the * same way as the type specified in the template. * * For SEQUENCE types the templates field points * to the members, the size field is the * structure size. * * For CHOICE types the templates field points * to each possible member (typically a union) * and the 'size' field is the offset of the * selector. * * The 'funcs' field is used for application * specific functions. * * The EXTERN type uses a new style d2i/i2d. * The new style should be used where possible * because it avoids things like the d2i IMPLICIT * hack. * * MSTRING is a multiple string type, it is used * for a CHOICE of character strings where the * actual strings all occupy an ASN1_STRING * structure. In this case the 'utype' field * has a special meaning, it is used as a mask * of acceptable types using the B_ASN1 constants. * */ #define ASN1_ITYPE_PRIMITIVE 0x0 #define ASN1_ITYPE_SEQUENCE 0x1 #define ASN1_ITYPE_CHOICE 0x2 #define ASN1_ITYPE_EXTERN 0x4 #define ASN1_ITYPE_MSTRING 0x5 /* Deprecated tag and length cache */ struct ASN1_TLC_st; /* This is the ASN1_AUX structure: it handles various * miscellaneous requirements. For example the use of * reference counts and an informational callback. * * The "informational callback" is called at various * points during the ASN1 encoding and decoding. It can * be used to provide minor customisation of the structures * used. This is most useful where the supplied routines * *almost* do the right thing but need some extra help * at a few points. If the callback returns zero then * it is assumed a fatal error has occurred and the * main operation should be abandoned. * * If major changes in the default behaviour are required * then an external type is more appropriate. */ typedef int ASN1_aux_cb(int operation, ASN1_VALUE **in, const ASN1_ITEM *it, void *exarg); typedef struct ASN1_AUX_st { void *app_data; uint32_t flags; int ref_offset; /* Offset of reference value */ ASN1_aux_cb *asn1_cb; int enc_offset; /* Offset of ASN1_ENCODING structure */ } ASN1_AUX; /* Flags in ASN1_AUX */ /* Use a reference count */ #define ASN1_AFLG_REFCOUNT 1 /* Save the encoding of structure (useful for signatures) */ #define ASN1_AFLG_ENCODING 2 /* operation values for asn1_cb */ #define ASN1_OP_NEW_PRE 0 #define ASN1_OP_NEW_POST 1 #define ASN1_OP_FREE_PRE 2 #define ASN1_OP_FREE_POST 3 #define ASN1_OP_D2I_PRE 4 #define ASN1_OP_D2I_POST 5 /* ASN1_OP_I2D_PRE and ASN1_OP_I2D_POST are not supported. We leave the * constants undefined so code relying on them does not accidentally compile. */ #define ASN1_OP_PRINT_PRE 8 #define ASN1_OP_PRINT_POST 9 #define ASN1_OP_STREAM_PRE 10 #define ASN1_OP_STREAM_POST 11 #define ASN1_OP_DETACHED_PRE 12 #define ASN1_OP_DETACHED_POST 13 /* Macro to implement a primitive type */ #define IMPLEMENT_ASN1_TYPE(stname) IMPLEMENT_ASN1_TYPE_ex(stname, stname, 0) #define IMPLEMENT_ASN1_TYPE_ex(itname, vname, ex) \ ASN1_ITEM_start(itname) ASN1_ITYPE_PRIMITIVE, V_##vname, NULL, 0, NULL, ex, \ #itname ASN1_ITEM_end(itname) /* Macro to implement a multi string type */ #define IMPLEMENT_ASN1_MSTRING(itname, mask) \ ASN1_ITEM_start(itname) ASN1_ITYPE_MSTRING, mask, NULL, 0, NULL, \ sizeof(ASN1_STRING), #itname ASN1_ITEM_end(itname) #define IMPLEMENT_EXTERN_ASN1(sname, tag, fptrs) \ ASN1_ITEM_start(sname) ASN1_ITYPE_EXTERN, tag, NULL, 0, &fptrs, 0, \ #sname ASN1_ITEM_end(sname) /* Macro to implement standard functions in terms of ASN1_ITEM structures */ #define IMPLEMENT_ASN1_FUNCTIONS(stname) \ IMPLEMENT_ASN1_FUNCTIONS_fname(stname, stname, stname) #define IMPLEMENT_ASN1_FUNCTIONS_name(stname, itname) \ IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, itname) #define IMPLEMENT_ASN1_FUNCTIONS_ENCODE_name(stname, itname) \ IMPLEMENT_ASN1_FUNCTIONS_ENCODE_fname(stname, itname, itname) #define IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(stname) \ IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(static, stname, stname, stname) #define IMPLEMENT_ASN1_ALLOC_FUNCTIONS(stname) \ IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, stname, stname) #define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(pre, stname, itname, fname) \ pre stname *fname##_new(void) { \ return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \ } \ pre void fname##_free(stname *a) { \ ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \ } #define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) \ stname *fname##_new(void) { \ return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \ } \ void fname##_free(stname *a) { \ ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \ } #define IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, fname) \ IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \ IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) #define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \ stname *d2i_##fname(stname **a, const unsigned char **in, long len) { \ return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, \ ASN1_ITEM_rptr(itname)); \ } \ int i2d_##fname(stname *a, unsigned char **out) { \ return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname)); \ } /* This includes evil casts to remove const: they will go away when full * ASN1 constification is done. */ #define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \ stname *d2i_##fname(stname **a, const unsigned char **in, long len) { \ return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, \ ASN1_ITEM_rptr(itname)); \ } \ int i2d_##fname(const stname *a, unsigned char **out) { \ return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname)); \ } #define IMPLEMENT_ASN1_DUP_FUNCTION(stname) \ stname *stname##_dup(stname *x) { \ return (stname *)ASN1_item_dup(ASN1_ITEM_rptr(stname), x); \ } #define IMPLEMENT_ASN1_DUP_FUNCTION_const(stname) \ stname *stname##_dup(const stname *x) { \ return (stname *)ASN1_item_dup(ASN1_ITEM_rptr(stname), (void *)x); \ } #define IMPLEMENT_ASN1_FUNCTIONS_const(name) \ IMPLEMENT_ASN1_FUNCTIONS_const_fname(name, name, name) #define IMPLEMENT_ASN1_FUNCTIONS_const_fname(stname, itname, fname) \ IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \ IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) /* external definitions for primitive types */ DECLARE_ASN1_ITEM(ASN1_SEQUENCE) DEFINE_STACK_OF(ASN1_VALUE) #if defined(__cplusplus) } // extern "C" #endif #endif // OPENSSL_HEADER_ASN1T_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_base.h ================================================ /* * Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BASE_H #define OPENSSL_HEADER_BASE_H #define BORINGSSL_PREFIX CNIOBoringSSL // This file should be the first included by all BoringSSL headers. #include #include #include #include #if defined(__MINGW32__) // stdio.h is needed on MinGW for __MINGW_PRINTF_FORMAT. #include #endif #if defined(__APPLE__) #include #endif // Include a BoringSSL-only header so consumers including this header without // setting up include paths do not accidentally pick up the system // opensslconf.h. #include "CNIOBoringSSL_is_boringssl.h" #include "CNIOBoringSSL_opensslconf.h" #include "CNIOBoringSSL_target.h" // IWYU pragma: export #if defined(BORINGSSL_PREFIX) #include "CNIOBoringSSL_boringssl_prefix_symbols.h" #endif #if defined(__cplusplus) extern "C" { #endif #if defined(__APPLE__) // Note |TARGET_OS_MAC| is set for all Apple OS variants. |TARGET_OS_OSX| // targets macOS specifically. #if defined(TARGET_OS_OSX) && TARGET_OS_OSX #define OPENSSL_MACOS #endif #if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE #define OPENSSL_IOS #endif #endif #define OPENSSL_IS_BORINGSSL #define OPENSSL_VERSION_NUMBER 0x1010107f #define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER // BORINGSSL_API_VERSION is a positive integer that increments as BoringSSL // changes over time. The value itself is not meaningful. It will be incremented // whenever is convenient to coordinate an API change with consumers. This will // not denote any special point in development. // // A consumer may use this symbol in the preprocessor to temporarily build // against multiple revisions of BoringSSL at the same time. It is not // recommended to do so for longer than is necessary. #define BORINGSSL_API_VERSION 34 #if defined(BORINGSSL_SHARED_LIBRARY) #if defined(OPENSSL_WINDOWS) #if defined(BORINGSSL_IMPLEMENTATION) #define OPENSSL_EXPORT __declspec(dllexport) #else #define OPENSSL_EXPORT __declspec(dllimport) #endif #else // defined(OPENSSL_WINDOWS) #if defined(BORINGSSL_IMPLEMENTATION) #define OPENSSL_EXPORT __attribute__((visibility("default"))) #else #define OPENSSL_EXPORT #endif #endif // defined(OPENSSL_WINDOWS) #else // defined(BORINGSSL_SHARED_LIBRARY) #define OPENSSL_EXPORT #endif // defined(BORINGSSL_SHARED_LIBRARY) #if defined(_MSC_VER) // OPENSSL_DEPRECATED is used to mark a function as deprecated. Use // of any functions so marked in caller code will produce a warning. // OPENSSL_BEGIN_ALLOW_DEPRECATED and OPENSSL_END_ALLOW_DEPRECATED // can be used to suppress the warning in regions of caller code. #define OPENSSL_DEPRECATED __declspec(deprecated) #define OPENSSL_BEGIN_ALLOW_DEPRECATED \ __pragma(warning(push)) __pragma(warning(disable : 4996)) #define OPENSSL_END_ALLOW_DEPRECATED __pragma(warning(pop)) #elif defined(__GNUC__) || defined(__clang__) #define OPENSSL_DEPRECATED __attribute__((__deprecated__)) #define OPENSSL_BEGIN_ALLOW_DEPRECATED \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #define OPENSSL_END_ALLOW_DEPRECATED _Pragma("GCC diagnostic pop") #else #define OPENSSL_DEPRECATED #define OPENSSL_BEGIN_ALLOW_DEPRECATED #define OPENSSL_END_ALLOW_DEPRECATED #endif #if defined(__GNUC__) || defined(__clang__) // MinGW has two different printf implementations. Ensure the format macro // matches the selected implementation. See // https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/. #if defined(__MINGW_PRINTF_FORMAT) #define OPENSSL_PRINTF_FORMAT_FUNC(string_index, first_to_check) \ __attribute__(( \ __format__(__MINGW_PRINTF_FORMAT, string_index, first_to_check))) #else #define OPENSSL_PRINTF_FORMAT_FUNC(string_index, first_to_check) \ __attribute__((__format__(__printf__, string_index, first_to_check))) #endif #else #define OPENSSL_PRINTF_FORMAT_FUNC(string_index, first_to_check) #endif // OPENSSL_CLANG_PRAGMA emits a pragma on clang and nothing on other compilers. #if defined(__clang__) #define OPENSSL_CLANG_PRAGMA(arg) _Pragma(arg) #else #define OPENSSL_CLANG_PRAGMA(arg) #endif // OPENSSL_MSVC_PRAGMA emits a pragma on MSVC and nothing on other compilers. #if defined(_MSC_VER) #define OPENSSL_MSVC_PRAGMA(arg) __pragma(arg) #else #define OPENSSL_MSVC_PRAGMA(arg) #endif #if defined(__GNUC__) || defined(__clang__) #define OPENSSL_UNUSED __attribute__((unused)) #elif defined(_MSC_VER) // __pragma wants to be on a separate line. The following is what it takes to // stop clang-format from messing with that. // clang-format off #define OPENSSL_UNUSED __pragma(warning(suppress : 4505)) \ /* */ // clang-format on #else #define OPENSSL_UNUSED #endif // C and C++ handle inline functions differently. In C++, an inline function is // defined in just the header file, potentially emitted in multiple compilation // units (in cases the compiler did not inline), but each copy must be identical // to satsify ODR. In C, a non-static inline must be manually emitted in exactly // one compilation unit with a separate extern inline declaration. // // In both languages, exported inline functions referencing file-local symbols // are problematic. C forbids this altogether (though GCC and Clang seem not to // enforce it). It works in C++, but ODR requires the definitions be identical, // including all names in the definitions resolving to the "same entity". In // practice, this is unlikely to be a problem, but an inline function that // returns a pointer to a file-local symbol // could compile oddly. // // Historically, we used static inline in headers. However, to satisfy ODR, use // plain inline in C++, to allow inline consumer functions to call our header // functions. Plain inline would also work better with C99 inline, but that is // not used much in practice, extern inline is tedious, and there are conflicts // with the old gnu89 model: // https://stackoverflow.com/questions/216510/extern-inline #if defined(__cplusplus) #define OPENSSL_INLINE inline #else // Add OPENSSL_UNUSED so that, should an inline function be emitted via macro // (e.g. a |STACK_OF(T)| implementation) in a source file without tripping // clang's -Wunused-function. #define OPENSSL_INLINE static inline OPENSSL_UNUSED #endif #if defined(__cplusplus) // enums can be predeclared, but only in C++ and only if given an explicit type. // C doesn't support setting an explicit type for enums thus a #define is used // to do this only for C++. However, the ABI type between C and C++ need to have // equal sizes, which is confirmed in a unittest. #define BORINGSSL_ENUM_INT : int enum ssl_early_data_reason_t BORINGSSL_ENUM_INT; enum ssl_encryption_level_t BORINGSSL_ENUM_INT; enum ssl_private_key_result_t BORINGSSL_ENUM_INT; enum ssl_renegotiate_mode_t BORINGSSL_ENUM_INT; enum ssl_select_cert_result_t BORINGSSL_ENUM_INT; enum ssl_select_cert_result_t BORINGSSL_ENUM_INT; enum ssl_ticket_aead_result_t BORINGSSL_ENUM_INT; enum ssl_verify_result_t BORINGSSL_ENUM_INT; #else #define BORINGSSL_ENUM_INT #endif // ossl_ssize_t is a signed type which is large enough to fit the size of any // valid memory allocation. We prefer using |size_t|, but sometimes we need a // signed type for OpenSSL API compatibility. This type can be used in such // cases to avoid overflow. // // Not all |size_t| values fit in |ossl_ssize_t|, but all |size_t| values that // are sizes of or indices into C objects, can be converted without overflow. typedef ptrdiff_t ossl_ssize_t; // CBS_ASN1_TAG is the type used by |CBS| and |CBB| for ASN.1 tags. See that // header for details. This type is defined in base.h as a forward declaration. typedef uint32_t CBS_ASN1_TAG; // CRYPTO_THREADID is a dummy value. typedef int CRYPTO_THREADID; // An |ASN1_NULL| is an opaque type. asn1.h represents the ASN.1 NULL value as // an opaque, non-NULL |ASN1_NULL*| pointer. typedef struct asn1_null_st ASN1_NULL; typedef int ASN1_BOOLEAN; typedef struct ASN1_ITEM_st ASN1_ITEM; typedef struct asn1_object_st ASN1_OBJECT; typedef struct asn1_pctx_st ASN1_PCTX; typedef struct asn1_string_st ASN1_BIT_STRING; typedef struct asn1_string_st ASN1_BMPSTRING; typedef struct asn1_string_st ASN1_ENUMERATED; typedef struct asn1_string_st ASN1_GENERALIZEDTIME; typedef struct asn1_string_st ASN1_GENERALSTRING; typedef struct asn1_string_st ASN1_IA5STRING; typedef struct asn1_string_st ASN1_INTEGER; typedef struct asn1_string_st ASN1_OCTET_STRING; typedef struct asn1_string_st ASN1_PRINTABLESTRING; typedef struct asn1_string_st ASN1_STRING; typedef struct asn1_string_st ASN1_T61STRING; typedef struct asn1_string_st ASN1_TIME; typedef struct asn1_string_st ASN1_UNIVERSALSTRING; typedef struct asn1_string_st ASN1_UTCTIME; typedef struct asn1_string_st ASN1_UTF8STRING; typedef struct asn1_string_st ASN1_VISIBLESTRING; typedef struct asn1_type_st ASN1_TYPE; typedef struct AUTHORITY_KEYID_st AUTHORITY_KEYID; typedef struct BASIC_CONSTRAINTS_st BASIC_CONSTRAINTS; typedef struct DIST_POINT_st DIST_POINT; typedef struct DSA_SIG_st DSA_SIG; typedef struct GENERAL_NAME_st GENERAL_NAME; typedef struct ISSUING_DIST_POINT_st ISSUING_DIST_POINT; typedef struct NAME_CONSTRAINTS_st NAME_CONSTRAINTS; typedef struct Netscape_spkac_st NETSCAPE_SPKAC; typedef struct Netscape_spki_st NETSCAPE_SPKI; typedef struct RIPEMD160state_st RIPEMD160_CTX; typedef struct X509_VERIFY_PARAM_st X509_VERIFY_PARAM; typedef struct X509_algor_st X509_ALGOR; typedef struct X509_crl_st X509_CRL; typedef struct X509_extension_st X509_EXTENSION; typedef struct X509_info_st X509_INFO; typedef struct X509_name_entry_st X509_NAME_ENTRY; typedef struct X509_name_st X509_NAME; typedef struct X509_pubkey_st X509_PUBKEY; typedef struct X509_req_st X509_REQ; typedef struct X509_sig_st X509_SIG; typedef struct bignum_ctx BN_CTX; typedef struct bignum_st BIGNUM; typedef struct bio_method_st BIO_METHOD; typedef struct bio_st BIO; typedef struct blake2b_state_st BLAKE2B_CTX; typedef struct bn_gencb_st BN_GENCB; typedef struct bn_mont_ctx_st BN_MONT_CTX; typedef struct buf_mem_st BUF_MEM; typedef struct cbb_st CBB; typedef struct cbs_st CBS; typedef struct cmac_ctx_st CMAC_CTX; typedef struct conf_st CONF; typedef struct conf_value_st CONF_VALUE; typedef struct crypto_buffer_pool_st CRYPTO_BUFFER_POOL; typedef struct crypto_buffer_st CRYPTO_BUFFER; typedef struct ctr_drbg_state_st CTR_DRBG_STATE; typedef struct dh_st DH; typedef struct dsa_st DSA; typedef struct ec_group_st EC_GROUP; typedef struct ec_key_st EC_KEY; typedef struct ec_point_st EC_POINT; typedef struct ecdsa_method_st ECDSA_METHOD; typedef struct ecdsa_sig_st ECDSA_SIG; typedef struct engine_st ENGINE; typedef struct env_md_ctx_st EVP_MD_CTX; typedef struct env_md_st EVP_MD; typedef struct evp_aead_st EVP_AEAD; typedef struct evp_aead_ctx_st EVP_AEAD_CTX; typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX; typedef struct evp_cipher_st EVP_CIPHER; typedef struct evp_encode_ctx_st EVP_ENCODE_CTX; typedef struct evp_hpke_aead_st EVP_HPKE_AEAD; typedef struct evp_hpke_ctx_st EVP_HPKE_CTX; typedef struct evp_hpke_kdf_st EVP_HPKE_KDF; typedef struct evp_hpke_kem_st EVP_HPKE_KEM; typedef struct evp_hpke_key_st EVP_HPKE_KEY; typedef struct evp_pkey_ctx_st EVP_PKEY_CTX; typedef struct evp_pkey_st EVP_PKEY; typedef struct hmac_ctx_st HMAC_CTX; typedef struct md4_state_st MD4_CTX; typedef struct md5_state_st MD5_CTX; typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS; typedef struct pkcs12_st PKCS12; typedef struct pkcs8_priv_key_info_st PKCS8_PRIV_KEY_INFO; typedef struct private_key_st X509_PKEY; typedef struct rand_meth_st RAND_METHOD; typedef struct rc4_key_st RC4_KEY; typedef struct rsa_meth_st RSA_METHOD; typedef struct rsa_pss_params_st RSA_PSS_PARAMS; typedef struct rsa_st RSA; typedef struct sha256_state_st SHA256_CTX; typedef struct sha512_state_st SHA512_CTX; typedef struct sha_state_st SHA_CTX; typedef struct spake2_ctx_st SPAKE2_CTX; typedef struct srtp_protection_profile_st SRTP_PROTECTION_PROFILE; typedef struct ssl_cipher_st SSL_CIPHER; typedef struct ssl_credential_st SSL_CREDENTIAL; typedef struct ssl_ctx_st SSL_CTX; typedef struct ssl_early_callback_ctx SSL_CLIENT_HELLO; typedef struct ssl_ech_keys_st SSL_ECH_KEYS; typedef struct ssl_method_st SSL_METHOD; typedef struct ssl_private_key_method_st SSL_PRIVATE_KEY_METHOD; typedef struct ssl_quic_method_st SSL_QUIC_METHOD; typedef struct ssl_session_st SSL_SESSION; typedef struct ssl_st SSL; typedef struct ssl_ticket_aead_method_st SSL_TICKET_AEAD_METHOD; typedef struct st_ERR_FNS ERR_FNS; typedef struct trust_token_st TRUST_TOKEN; typedef struct trust_token_client_st TRUST_TOKEN_CLIENT; typedef struct trust_token_issuer_st TRUST_TOKEN_ISSUER; typedef struct trust_token_method_st TRUST_TOKEN_METHOD; typedef struct v3_ext_ctx X509V3_CTX; typedef struct v3_ext_method X509V3_EXT_METHOD; typedef struct x509_attributes_st X509_ATTRIBUTE; typedef struct x509_lookup_st X509_LOOKUP; typedef struct x509_lookup_method_st X509_LOOKUP_METHOD; typedef struct x509_object_st X509_OBJECT; typedef struct x509_purpose_st X509_PURPOSE; typedef struct x509_revoked_st X509_REVOKED; typedef struct x509_st X509; typedef struct x509_store_ctx_st X509_STORE_CTX; typedef struct x509_store_st X509_STORE; typedef void *OPENSSL_BLOCK; // BSSL_CHECK aborts if |condition| is not true. #define BSSL_CHECK(condition) \ do { \ if (!(condition)) { \ abort(); \ } \ } while (0); #if defined(__cplusplus) } // extern C #elif !defined(BORINGSSL_NO_CXX) #define BORINGSSL_NO_CXX #endif #if defined(BORINGSSL_PREFIX) #define BSSL_NAMESPACE_BEGIN \ namespace bssl { \ inline namespace BORINGSSL_PREFIX { #define BSSL_NAMESPACE_END \ } \ } #else #define BSSL_NAMESPACE_BEGIN namespace bssl { #define BSSL_NAMESPACE_END } #endif // MSVC doesn't set __cplusplus to 201103 to indicate C++11 support (see // https://connect.microsoft.com/VisualStudio/feedback/details/763051/a-value-of-predefined-macro-cplusplus-is-still-199711l) // so MSVC is just assumed to support C++11. #if !defined(BORINGSSL_NO_CXX) && __cplusplus < 201103L && !defined(_MSC_VER) #define BORINGSSL_NO_CXX #endif #if !defined(BORINGSSL_NO_CXX) extern "C++" { #include // STLPort, used by some Android consumers, not have std::unique_ptr. #if defined(_STLPORT_VERSION) #define BORINGSSL_NO_CXX #endif } // extern C++ #endif // !BORINGSSL_NO_CXX #if defined(BORINGSSL_NO_CXX) #define BORINGSSL_MAKE_DELETER(type, deleter) #define BORINGSSL_MAKE_UP_REF(type, up_ref_func) #else extern "C++" { BSSL_NAMESPACE_BEGIN namespace internal { // The Enable parameter is ignored and only exists so specializations can use // SFINAE. template struct DeleterImpl {}; struct Deleter { template void operator()(T *ptr) { // Rather than specialize Deleter for each type, we specialize // DeleterImpl. This allows bssl::UniquePtr to be used while only // including base.h as long as the destructor is not emitted. This matches // std::unique_ptr's behavior on forward-declared types. // // DeleterImpl itself is specialized in the corresponding module's header // and must be included to release an object. If not included, the compiler // will error that DeleterImpl does not have a method Free. DeleterImpl::Free(ptr); } }; template class StackAllocated { public: StackAllocated() { init(&ctx_); } ~StackAllocated() { cleanup(&ctx_); } StackAllocated(const StackAllocated &) = delete; StackAllocated &operator=(const StackAllocated &) = delete; T *get() { return &ctx_; } const T *get() const { return &ctx_; } T *operator->() { return &ctx_; } const T *operator->() const { return &ctx_; } void Reset() { cleanup(&ctx_); init(&ctx_); } private: T ctx_; }; template class StackAllocatedMovable { public: StackAllocatedMovable() { init(&ctx_); } ~StackAllocatedMovable() { cleanup(&ctx_); } StackAllocatedMovable(StackAllocatedMovable &&other) { init(&ctx_); move(&ctx_, &other.ctx_); } StackAllocatedMovable &operator=(StackAllocatedMovable &&other) { move(&ctx_, &other.ctx_); return *this; } T *get() { return &ctx_; } const T *get() const { return &ctx_; } T *operator->() { return &ctx_; } const T *operator->() const { return &ctx_; } void Reset() { cleanup(&ctx_); init(&ctx_); } private: T ctx_; }; } // namespace internal #define BORINGSSL_MAKE_DELETER(type, deleter) \ namespace internal { \ template <> \ struct DeleterImpl { \ static void Free(type *ptr) { deleter(ptr); } \ }; \ } // Holds ownership of heap-allocated BoringSSL structures. Sample usage: // bssl::UniquePtr rsa(RSA_new()); // bssl::UniquePtr bio(BIO_new(BIO_s_mem())); template using UniquePtr = std::unique_ptr; #define BORINGSSL_MAKE_UP_REF(type, up_ref_func) \ inline UniquePtr UpRef(type *v) { \ if (v != nullptr) { \ up_ref_func(v); \ } \ return UniquePtr(v); \ } \ \ inline UniquePtr UpRef(const UniquePtr &ptr) { \ return UpRef(ptr.get()); \ } BSSL_NAMESPACE_END } // extern C++ #endif // !BORINGSSL_NO_CXX #endif // OPENSSL_HEADER_BASE_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_base64.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BASE64_H #define OPENSSL_HEADER_BASE64_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // base64 functions. // // For historical reasons, these functions have the EVP_ prefix but just do // base64 encoding and decoding. Note that BoringSSL is a cryptography library, // so these functions are implemented with side channel protections, at a // performance cost. For other base64 uses, use a general-purpose base64 // implementation. // Encoding // EVP_EncodeBlock encodes |src_len| bytes from |src| and writes the // result to |dst| with a trailing NUL. It returns the number of bytes // written, not including this trailing NUL. OPENSSL_EXPORT size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len); // EVP_EncodedLength sets |*out_len| to the number of bytes that will be needed // to call |EVP_EncodeBlock| on an input of length |len|. This includes the // final NUL that |EVP_EncodeBlock| writes. It returns one on success or zero // on error. OPENSSL_EXPORT int EVP_EncodedLength(size_t *out_len, size_t len); // Decoding // EVP_DecodedLength sets |*out_len| to the maximum number of bytes that will // be needed to call |EVP_DecodeBase64| on an input of length |len|. It returns // one on success or zero if |len| is not a valid length for a base64-encoded // string. OPENSSL_EXPORT int EVP_DecodedLength(size_t *out_len, size_t len); // EVP_DecodeBase64 decodes |in_len| bytes from base64 and writes // |*out_len| bytes to |out|. |max_out| is the size of the output // buffer. If it is not enough for the maximum output size, the // operation fails. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); // Deprecated functions. // // OpenSSL provides a streaming base64 implementation, however its behavior is // very specific to PEM. It is also very lenient of invalid input. Use of any of // these functions is thus deprecated. // EVP_ENCODE_CTX_new returns a newly-allocated |EVP_ENCODE_CTX| or NULL on // error. The caller must release the result with |EVP_ENCODE_CTX_free| when // done. OPENSSL_EXPORT EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void); // EVP_ENCODE_CTX_free releases memory associated with |ctx|. OPENSSL_EXPORT void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx); // EVP_EncodeInit initialises |*ctx|, which is typically stack // allocated, for an encoding operation. // // NOTE: The encoding operation breaks its output with newlines every // 64 characters of output (48 characters of input). Use // EVP_EncodeBlock to encode raw base64. OPENSSL_EXPORT void EVP_EncodeInit(EVP_ENCODE_CTX *ctx); // EVP_EncodeUpdate encodes |in_len| bytes from |in| and writes an encoded // version of them to |out| and sets |*out_len| to the number of bytes written. // Some state may be contained in |ctx| so |EVP_EncodeFinal| must be used to // flush it before using the encoded data. OPENSSL_EXPORT void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len); // EVP_EncodeFinal flushes any remaining output bytes from |ctx| to |out| and // sets |*out_len| to the number of bytes written. OPENSSL_EXPORT void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len); // EVP_DecodeInit initialises |*ctx|, which is typically stack allocated, for // a decoding operation. // // TODO(davidben): This isn't a straight-up base64 decode either. Document // and/or fix exactly what's going on here; maximum line length and such. OPENSSL_EXPORT void EVP_DecodeInit(EVP_ENCODE_CTX *ctx); // EVP_DecodeUpdate decodes |in_len| bytes from |in| and writes the decoded // data to |out| and sets |*out_len| to the number of bytes written. Some state // may be contained in |ctx| so |EVP_DecodeFinal| must be used to flush it // before using the encoded data. // // It returns -1 on error, one if a full line of input was processed and zero // if the line was short (i.e. it was the last line). OPENSSL_EXPORT int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len); // EVP_DecodeFinal flushes any remaining output bytes from |ctx| to |out| and // sets |*out_len| to the number of bytes written. It returns one on success // and minus one on error. OPENSSL_EXPORT int EVP_DecodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len); // EVP_DecodeBlock encodes |src_len| bytes from |src| and writes the result to // |dst|. It returns the number of bytes written or -1 on error. // // WARNING: EVP_DecodeBlock's return value does not take padding into // account. It also strips leading whitespace and trailing // whitespace and minuses. OPENSSL_EXPORT int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len); struct evp_encode_ctx_st { // data_used indicates the number of bytes of |data| that are valid. When // encoding, |data| will be filled and encoded as a lump. When decoding, only // the first four bytes of |data| will be used. unsigned data_used; uint8_t data[48]; // eof_seen indicates that the end of the base64 data has been seen when // decoding. Only whitespace can follow. char eof_seen; // error_encountered indicates that invalid base64 data was found. This will // cause all future calls to fail. char error_encountered; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BASE64_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_bcm_public.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_BCM_PUBLIC_H_ #define OPENSSL_HEADER_BCM_PUBLIC_H_ #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Public types referenced by BoringCrypto // // This header contains public types referenced by BCM. Such types are difficult // to hide from the libcrypto interface, so we treat them as part of BCM. // BCM_SHA_CBLOCK is the block size of SHA-1. #define BCM_SHA_CBLOCK 64 // SHA_CTX struct sha_state_st { #if defined(__cplusplus) || defined(OPENSSL_WINDOWS) uint32_t h[5]; #else // wpa_supplicant accesses |h0|..|h4| so we must support those names for // compatibility with it until it can be updated. Anonymous unions are only // standard in C11, so disable this workaround in C++. union { uint32_t h[5]; struct { uint32_t h0; uint32_t h1; uint32_t h2; uint32_t h3; uint32_t h4; }; }; #endif uint32_t Nl, Nh; uint8_t data[BCM_SHA_CBLOCK]; unsigned num; }; // SHA256_CBLOCK is the block size of SHA-256. #define BCM_SHA256_CBLOCK 64 // SHA256_CTX struct sha256_state_st { uint32_t h[8]; uint32_t Nl, Nh; uint8_t data[BCM_SHA256_CBLOCK]; unsigned num, md_len; }; // BCM_SHA512_CBLOCK is the block size of SHA-512. #define BCM_SHA512_CBLOCK 128 struct sha512_state_st { uint64_t h[8]; uint64_t Nl, Nh; uint8_t p[BCM_SHA512_CBLOCK]; unsigned num, md_len; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BCM_PUBLIC_H_ ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_bio.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BIO_H #define OPENSSL_HEADER_BIO_H #include "CNIOBoringSSL_base.h" #include // For FILE #include "CNIOBoringSSL_buffer.h" #include "CNIOBoringSSL_err.h" // for ERR_print_errors_fp #include "CNIOBoringSSL_ex_data.h" #include "CNIOBoringSSL_stack.h" #include "CNIOBoringSSL_thread.h" #if defined(__cplusplus) extern "C" { #endif // BIO abstracts over a file-descriptor like interface. // Allocation and freeing. DEFINE_STACK_OF(BIO) // BIO_new creates a new BIO with the given method and a reference count of one. // It returns the fresh |BIO|, or NULL on error. OPENSSL_EXPORT BIO *BIO_new(const BIO_METHOD *method); // BIO_free decrements the reference count of |bio|. If the reference count // drops to zero, it calls the destroy callback, if present, on the method and // frees |bio| itself. It then repeats that for the next BIO in the chain, if // any. // // It returns one on success or zero otherwise. OPENSSL_EXPORT int BIO_free(BIO *bio); // BIO_vfree performs the same actions as |BIO_free|, but has a void return // value. This is provided for API-compat. // // TODO(fork): remove. OPENSSL_EXPORT void BIO_vfree(BIO *bio); // BIO_up_ref increments the reference count of |bio| and returns one. OPENSSL_EXPORT int BIO_up_ref(BIO *bio); // Basic I/O. // BIO_read attempts to read |len| bytes into |data|. It returns the number of // bytes read, zero on EOF, or a negative number on error. OPENSSL_EXPORT int BIO_read(BIO *bio, void *data, int len); // BIO_gets reads a line from |bio| and writes at most |size| bytes into |buf|. // It returns the number of bytes read or a negative number on error. This // function's output always includes a trailing NUL byte, so it will read at // most |size - 1| bytes. // // If the function read a complete line, the output will include the newline // character, '\n'. If no newline was found before |size - 1| bytes or EOF, it // outputs the bytes which were available. OPENSSL_EXPORT int BIO_gets(BIO *bio, char *buf, int size); // BIO_write writes |len| bytes from |data| to |bio|. It returns the number of // bytes written or a negative number on error. OPENSSL_EXPORT int BIO_write(BIO *bio, const void *data, int len); // BIO_write_all writes |len| bytes from |data| to |bio|, looping as necessary. // It returns one if all bytes were successfully written and zero on error. OPENSSL_EXPORT int BIO_write_all(BIO *bio, const void *data, size_t len); // BIO_puts writes a NUL terminated string from |buf| to |bio|. It returns the // number of bytes written or a negative number on error. OPENSSL_EXPORT int BIO_puts(BIO *bio, const char *buf); // BIO_flush flushes any buffered output. It returns one on success and zero // otherwise. OPENSSL_EXPORT int BIO_flush(BIO *bio); // Low-level control functions. // // These are generic functions for sending control requests to a BIO. In // general one should use the wrapper functions like |BIO_get_close|. // BIO_ctrl sends the control request |cmd| to |bio|. The |cmd| argument should // be one of the |BIO_C_*| values. OPENSSL_EXPORT long BIO_ctrl(BIO *bio, int cmd, long larg, void *parg); // BIO_ptr_ctrl acts like |BIO_ctrl| but passes the address of a |void*| // pointer as |parg| and returns the value that is written to it, or NULL if // the control request returns <= 0. OPENSSL_EXPORT char *BIO_ptr_ctrl(BIO *bp, int cmd, long larg); // BIO_int_ctrl acts like |BIO_ctrl| but passes the address of a copy of |iarg| // as |parg|. OPENSSL_EXPORT long BIO_int_ctrl(BIO *bp, int cmd, long larg, int iarg); // BIO_reset resets |bio| to its initial state, the precise meaning of which // depends on the concrete type of |bio|. It returns one on success and zero // otherwise. OPENSSL_EXPORT int BIO_reset(BIO *bio); // BIO_eof returns non-zero when |bio| has reached end-of-file. The precise // meaning of which depends on the concrete type of |bio|. Note that in the // case of BIO_pair this always returns non-zero. OPENSSL_EXPORT int BIO_eof(BIO *bio); // BIO_set_flags ORs |flags| with |bio->flags|. OPENSSL_EXPORT void BIO_set_flags(BIO *bio, int flags); // BIO_test_flags returns |bio->flags| AND |flags|. OPENSSL_EXPORT int BIO_test_flags(const BIO *bio, int flags); // BIO_should_read returns non-zero if |bio| encountered a temporary error // while reading (i.e. EAGAIN), indicating that the caller should retry the // read. OPENSSL_EXPORT int BIO_should_read(const BIO *bio); // BIO_should_write returns non-zero if |bio| encountered a temporary error // while writing (i.e. EAGAIN), indicating that the caller should retry the // write. OPENSSL_EXPORT int BIO_should_write(const BIO *bio); // BIO_should_retry returns non-zero if the reason that caused a failed I/O // operation is temporary and thus the operation should be retried. Otherwise, // it was a permanent error and it returns zero. OPENSSL_EXPORT int BIO_should_retry(const BIO *bio); // BIO_should_io_special returns non-zero if |bio| encountered a temporary // error while performing a special I/O operation, indicating that the caller // should retry. The operation that caused the error is returned by // |BIO_get_retry_reason|. OPENSSL_EXPORT int BIO_should_io_special(const BIO *bio); // BIO_RR_CONNECT indicates that a connect would have blocked #define BIO_RR_CONNECT 0x02 // BIO_RR_ACCEPT indicates that an accept would have blocked #define BIO_RR_ACCEPT 0x03 // BIO_get_retry_reason returns the special I/O operation that needs to be // retried. The return value is one of the |BIO_RR_*| values. OPENSSL_EXPORT int BIO_get_retry_reason(const BIO *bio); // BIO_set_retry_reason sets the special I/O operation that needs to be retried // to |reason|, which should be one of the |BIO_RR_*| values. OPENSSL_EXPORT void BIO_set_retry_reason(BIO *bio, int reason); // BIO_clear_flags ANDs |bio->flags| with the bitwise-complement of |flags|. OPENSSL_EXPORT void BIO_clear_flags(BIO *bio, int flags); // BIO_set_retry_read sets the |BIO_FLAGS_READ| and |BIO_FLAGS_SHOULD_RETRY| // flags on |bio|. OPENSSL_EXPORT void BIO_set_retry_read(BIO *bio); // BIO_set_retry_write sets the |BIO_FLAGS_WRITE| and |BIO_FLAGS_SHOULD_RETRY| // flags on |bio|. OPENSSL_EXPORT void BIO_set_retry_write(BIO *bio); // BIO_get_retry_flags gets the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, // |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. OPENSSL_EXPORT int BIO_get_retry_flags(BIO *bio); // BIO_clear_retry_flags clears the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, // |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. OPENSSL_EXPORT void BIO_clear_retry_flags(BIO *bio); // BIO_method_type returns the type of |bio|, which is one of the |BIO_TYPE_*| // values. OPENSSL_EXPORT int BIO_method_type(const BIO *bio); // These are passed to the BIO callback #define BIO_CB_FREE 0x01 #define BIO_CB_READ 0x02 #define BIO_CB_WRITE 0x03 #define BIO_CB_PUTS 0x04 #define BIO_CB_GETS 0x05 #define BIO_CB_CTRL 0x06 // The callback is called before and after the underling operation, // The BIO_CB_RETURN flag indicates if it is after the call #define BIO_CB_RETURN 0x80 // bio_info_cb is the type of a callback function that can be called for most // BIO operations. The |event| argument is one of |BIO_CB_*| and can be ORed // with |BIO_CB_RETURN| if the callback is being made after the operation in // question. In that case, |return_value| will contain the return value from // the operation. typedef long (*bio_info_cb)(BIO *bio, int event, const char *parg, int cmd, long larg, long return_value); // BIO_callback_ctrl allows the callback function to be manipulated. The |cmd| // arg will generally be |BIO_CTRL_SET_CALLBACK| but arbitrary command values // can be interpreted by the |BIO|. OPENSSL_EXPORT long BIO_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp); // BIO_pending returns the number of bytes pending to be read. OPENSSL_EXPORT size_t BIO_pending(const BIO *bio); // BIO_ctrl_pending calls |BIO_pending| and exists only for compatibility with // OpenSSL. OPENSSL_EXPORT size_t BIO_ctrl_pending(const BIO *bio); // BIO_wpending returns the number of bytes pending to be written. OPENSSL_EXPORT size_t BIO_wpending(const BIO *bio); // BIO_set_close sets the close flag for |bio|. The meaning of which depends on // the type of |bio| but, for example, a memory BIO interprets the close flag // as meaning that it owns its buffer. It returns one on success and zero // otherwise. OPENSSL_EXPORT int BIO_set_close(BIO *bio, int close_flag); // BIO_number_read returns the number of bytes that have been read from // |bio|. OPENSSL_EXPORT uint64_t BIO_number_read(const BIO *bio); // BIO_number_written returns the number of bytes that have been written to // |bio|. OPENSSL_EXPORT uint64_t BIO_number_written(const BIO *bio); // Managing chains of BIOs. // // BIOs can be put into chains where the output of one is used as the input of // the next etc. The most common case is a buffering BIO, which accepts and // buffers writes until flushed into the next BIO in the chain. // BIO_push adds |appended_bio| to the end of the chain with |bio| at the head. // It returns |bio|. Note that |appended_bio| may be the head of a chain itself // and thus this function can be used to join two chains. // // BIO_push takes ownership of the caller's reference to |appended_bio|. OPENSSL_EXPORT BIO *BIO_push(BIO *bio, BIO *appended_bio); // BIO_pop removes |bio| from the head of a chain and returns the next BIO in // the chain, or NULL if there is no next BIO. // // The caller takes ownership of the chain's reference to |bio|. OPENSSL_EXPORT BIO *BIO_pop(BIO *bio); // BIO_next returns the next BIO in the chain after |bio|, or NULL if there is // no such BIO. OPENSSL_EXPORT BIO *BIO_next(BIO *bio); // BIO_free_all calls |BIO_free|. // // TODO(fork): update callers and remove. OPENSSL_EXPORT void BIO_free_all(BIO *bio); // BIO_find_type walks a chain of BIOs and returns the first that matches // |type|, which is one of the |BIO_TYPE_*| values. OPENSSL_EXPORT BIO *BIO_find_type(BIO *bio, int type); // BIO_copy_next_retry sets the retry flags and |retry_reason| of |bio| from // the next BIO in the chain. OPENSSL_EXPORT void BIO_copy_next_retry(BIO *bio); // Printf functions. // BIO_printf behaves like |printf| but outputs to |bio| rather than a |FILE|. // It returns the number of bytes written or a negative number on error. OPENSSL_EXPORT int BIO_printf(BIO *bio, const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(2, 3); // Utility functions. // BIO_indent prints min(|indent|, |max_indent|) spaces. It returns one on // success and zero otherwise. OPENSSL_EXPORT int BIO_indent(BIO *bio, unsigned indent, unsigned max_indent); // BIO_hexdump writes a hex dump of |data| to |bio|. Each line will be indented // by |indent| spaces. It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_hexdump(BIO *bio, const uint8_t *data, size_t len, unsigned indent); // ERR_print_errors prints the current contents of the error stack to |bio| // using human readable strings where possible. OPENSSL_EXPORT void ERR_print_errors(BIO *bio); // BIO_read_asn1 reads a single ASN.1 object from |bio|. If successful it sets // |*out| to be an allocated buffer (that should be freed with |OPENSSL_free|), // |*out_size| to the length, in bytes, of that buffer and returns one. // Otherwise it returns zero. // // If the length of the object is greater than |max_len| or 2^32 then the // function will fail. Long-form tags are not supported. If the length of the // object is indefinite the full contents of |bio| are read, unless it would be // greater than |max_len|, in which case the function fails. // // If the function fails then some unknown amount of data may have been read // from |bio|. OPENSSL_EXPORT int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len); // Memory BIOs. // // Memory BIOs can be used as a read-only source (with |BIO_new_mem_buf|) or a // writable sink (with |BIO_new|, |BIO_s_mem| and |BIO_mem_contents|). Data // written to a writable, memory BIO can be recalled by reading from it. // // Calling |BIO_reset| on a read-only BIO resets it to the original contents. // On a writable BIO, it clears any data. // // If the close flag is set to |BIO_NOCLOSE| (not the default) then the // underlying |BUF_MEM| will not be freed when the |BIO| is freed. // // Memory BIOs support |BIO_gets| and |BIO_puts|. // // |BIO_ctrl_pending| returns the number of bytes currently stored. // BIO_NOCLOSE and |BIO_CLOSE| can be used as symbolic arguments when a "close // flag" is passed to a BIO function. #define BIO_NOCLOSE 0 #define BIO_CLOSE 1 // BIO_s_mem returns a |BIO_METHOD| that uses a in-memory buffer. OPENSSL_EXPORT const BIO_METHOD *BIO_s_mem(void); // BIO_new_mem_buf creates read-only BIO that reads from |len| bytes at |buf|. // It returns the BIO or NULL on error. This function does not copy or take // ownership of |buf|. The caller must ensure the memory pointed to by |buf| // outlives the |BIO|. // // If |len| is negative, then |buf| is treated as a NUL-terminated string, but // don't depend on this in new code. OPENSSL_EXPORT BIO *BIO_new_mem_buf(const void *buf, ossl_ssize_t len); // BIO_mem_contents sets |*out_contents| to point to the current contents of // |bio| and |*out_len| to contain the length of that data. It returns one on // success and zero otherwise. OPENSSL_EXPORT int BIO_mem_contents(const BIO *bio, const uint8_t **out_contents, size_t *out_len); // BIO_get_mem_data sets |*contents| to point to the current contents of |bio| // and returns the length of the data. // // WARNING: don't use this, use |BIO_mem_contents|. A return value of zero from // this function can mean either that it failed or that the memory buffer is // empty. OPENSSL_EXPORT long BIO_get_mem_data(BIO *bio, char **contents); // BIO_get_mem_ptr sets |*out| to a BUF_MEM containing the current contents of // |bio|. It returns one on success or zero on error. OPENSSL_EXPORT int BIO_get_mem_ptr(BIO *bio, BUF_MEM **out); // BIO_set_mem_buf sets |b| as the contents of |bio|. If |take_ownership| is // non-zero, then |b| will be freed when |bio| is closed. Returns one on // success or zero otherwise. OPENSSL_EXPORT int BIO_set_mem_buf(BIO *bio, BUF_MEM *b, int take_ownership); // BIO_set_mem_eof_return sets the value that will be returned from reading // |bio| when empty. If |eof_value| is zero then an empty memory BIO will // return EOF (that is it will return zero and |BIO_should_retry| will be // false). If |eof_value| is non zero then it will return |eof_value| when it // is empty and it will set the read retry flag (that is |BIO_read_retry| is // true). To avoid ambiguity with a normal positive return value, |eof_value| // should be set to a negative value, typically -1. // // For a read-only BIO, the default is zero (EOF). For a writable BIO, the // default is -1 so that additional data can be written once exhausted. OPENSSL_EXPORT int BIO_set_mem_eof_return(BIO *bio, int eof_value); // File descriptor BIOs. // // File descriptor BIOs are wrappers around the system's |read| and |write| // functions. If the close flag is set then then |close| is called on the // underlying file descriptor when the BIO is freed. // // |BIO_reset| attempts to seek the file pointer to the start of file using // |lseek|. #if !defined(OPENSSL_NO_POSIX_IO) // BIO_s_fd returns a |BIO_METHOD| for file descriptor fds. OPENSSL_EXPORT const BIO_METHOD *BIO_s_fd(void); // BIO_new_fd creates a new file descriptor BIO wrapping |fd|. If |close_flag| // is non-zero, then |fd| will be closed when the BIO is. OPENSSL_EXPORT BIO *BIO_new_fd(int fd, int close_flag); #endif // BIO_set_fd sets the file descriptor of |bio| to |fd|. If |close_flag| is // non-zero then |fd| will be closed when |bio| is. It returns one on success // or zero on error. // // This function may also be used with socket BIOs (see |BIO_s_socket| and // |BIO_new_socket|). OPENSSL_EXPORT int BIO_set_fd(BIO *bio, int fd, int close_flag); // BIO_get_fd returns the file descriptor currently in use by |bio| or -1 if // |bio| does not wrap a file descriptor. If there is a file descriptor and // |out_fd| is not NULL, it also sets |*out_fd| to the file descriptor. // // This function may also be used with socket BIOs (see |BIO_s_socket| and // |BIO_new_socket|). OPENSSL_EXPORT int BIO_get_fd(BIO *bio, int *out_fd); // File BIOs. // // File BIOs are wrappers around a C |FILE| object. // // |BIO_flush| on a file BIO calls |fflush| on the wrapped stream. // // |BIO_reset| attempts to seek the file pointer to the start of file using // |fseek|. // // Setting the close flag causes |fclose| to be called on the stream when the // BIO is freed. // BIO_s_file returns a BIO_METHOD that wraps a |FILE|. OPENSSL_EXPORT const BIO_METHOD *BIO_s_file(void); // BIO_new_file creates a file BIO by opening |filename| with the given mode. // See the |fopen| manual page for details of the mode argument. On Windows, // files may be opened in either binary or text mode so, as in |fopen|, callers // must specify the desired option in |mode|. OPENSSL_EXPORT BIO *BIO_new_file(const char *filename, const char *mode); // BIO_FP_TEXT indicates the |FILE| should be switched to text mode on Windows. // It has no effect on non-Windows platforms. #define BIO_FP_TEXT 0x10 // BIO_new_fp creates a new file BIO that wraps |file|. If |flags| contains // |BIO_CLOSE|, then |fclose| will be called on |file| when the BIO is closed. // // On Windows, if |flags| contains |BIO_FP_TEXT|, this function will // additionally switch |file| to text mode. This is not recommended, but may be // required for OpenSSL compatibility. If |file| was not already in text mode, // mode changes can cause unflushed data in |file| to be written in unexpected // ways. See |_setmode| in Windows documentation for details. // // Unlike OpenSSL, if |flags| does not contain |BIO_FP_TEXT|, the translation // mode of |file| is left as-is. In OpenSSL, |file| will be set to binary, with // the same pitfalls as above. BoringSSL does not do this so that wrapping a // |FILE| in a |BIO| will not inadvertently change its state. // // To avoid these pitfalls, callers should set the desired translation mode when // opening the file. If targeting just BoringSSL, this is sufficient. If // targeting both OpenSSL and BoringSSL, callers should set |BIO_FP_TEXT| to // match the desired state of the file. OPENSSL_EXPORT BIO *BIO_new_fp(FILE *file, int flags); // BIO_get_fp sets |*out_file| to the current |FILE| for |bio|. It returns one // on success and zero otherwise. OPENSSL_EXPORT int BIO_get_fp(BIO *bio, FILE **out_file); // BIO_set_fp sets the |FILE| for |bio|. If |flags| contains |BIO_CLOSE| then // |fclose| will be called on |file| when |bio| is closed. It returns one on // success and zero otherwise. // // On Windows, if |flags| contains |BIO_FP_TEXT|, this function will // additionally switch |file| to text mode. This is not recommended, but may be // required for OpenSSL compatibility. If |file| was not already in text mode, // mode changes can cause unflushed data in |file| to be written in unexpected // ways. See |_setmode| in Windows documentation for details. // // Unlike OpenSSL, if |flags| does not contain |BIO_FP_TEXT|, the translation // mode of |file| is left as-is. In OpenSSL, |file| will be set to binary, with // the same pitfalls as above. BoringSSL does not do this so that wrapping a // |FILE| in a |BIO| will not inadvertently change its state. // // To avoid these pitfalls, callers should set the desired translation mode when // opening the file. If targeting just BoringSSL, this is sufficient. If // targeting both OpenSSL and BoringSSL, callers should set |BIO_FP_TEXT| to // match the desired state of the file. OPENSSL_EXPORT int BIO_set_fp(BIO *bio, FILE *file, int flags); // BIO_read_filename opens |filename| for reading and sets the result as the // |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| // will be closed when |bio| is freed. On Windows, the file is opened in binary // mode. OPENSSL_EXPORT int BIO_read_filename(BIO *bio, const char *filename); // BIO_write_filename opens |filename| for writing and sets the result as the // |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| // will be closed when |bio| is freed. On Windows, the file is opened in binary // mode. OPENSSL_EXPORT int BIO_write_filename(BIO *bio, const char *filename); // BIO_append_filename opens |filename| for appending and sets the result as // the |FILE| for |bio|. It returns one on success and zero otherwise. The // |FILE| will be closed when |bio| is freed. On Windows, the file is opened in // binary mode. OPENSSL_EXPORT int BIO_append_filename(BIO *bio, const char *filename); // BIO_rw_filename opens |filename| for reading and writing and sets the result // as the |FILE| for |bio|. It returns one on success and zero otherwise. The // |FILE| will be closed when |bio| is freed. On Windows, the file is opened in // binary mode. OPENSSL_EXPORT int BIO_rw_filename(BIO *bio, const char *filename); // BIO_tell returns the file offset of |bio|, or a negative number on error or // if |bio| does not support the operation. // // TODO(https://crbug.com/boringssl/465): On platforms where |long| is 32-bit, // this function cannot report 64-bit offsets. OPENSSL_EXPORT long BIO_tell(BIO *bio); // BIO_seek sets the file offset of |bio| to |offset|. It returns a non-negative // number on success and a negative number on error. If |bio| is a file // descriptor |BIO|, it returns the resulting file offset on success. If |bio| // is a file |BIO|, it returns zero on success. // // WARNING: This function's return value conventions differs from most functions // in this library. // // TODO(https://crbug.com/boringssl/465): On platforms where |long| is 32-bit, // this function cannot handle 64-bit offsets. OPENSSL_EXPORT long BIO_seek(BIO *bio, long offset); // Socket BIOs. // // Socket BIOs behave like file descriptor BIOs but, on Windows systems, wrap // the system's |recv| and |send| functions instead of |read| and |write|. On // Windows, file descriptors are provided by C runtime and are not // interchangeable with sockets. // // Socket BIOs may be used with |BIO_set_fd| and |BIO_get_fd|. // // TODO(davidben): Add separate APIs and fix the internals to use |SOCKET|s // around rather than rely on int casts. #if !defined(OPENSSL_NO_SOCK) OPENSSL_EXPORT const BIO_METHOD *BIO_s_socket(void); // BIO_new_socket allocates and initialises a fresh BIO which will read and // write to the socket |fd|. If |close_flag| is |BIO_CLOSE| then closing the // BIO will close |fd|. It returns the fresh |BIO| or NULL on error. OPENSSL_EXPORT BIO *BIO_new_socket(int fd, int close_flag); #endif // !OPENSSL_NO_SOCK // Connect BIOs. // // A connection BIO creates a network connection and transfers data over the // resulting socket. #if !defined(OPENSSL_NO_SOCK) OPENSSL_EXPORT const BIO_METHOD *BIO_s_connect(void); // BIO_new_connect returns a BIO that connects to the given hostname and port. // The |host_and_optional_port| argument should be of the form // "www.example.com" or "www.example.com:443". If the port is omitted, it must // be provided with |BIO_set_conn_port|. // // It returns the new BIO on success, or NULL on error. OPENSSL_EXPORT BIO *BIO_new_connect(const char *host_and_optional_port); // BIO_set_conn_hostname sets |host_and_optional_port| as the hostname and // optional port that |bio| will connect to. If the port is omitted, it must be // provided with |BIO_set_conn_port|. // // It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_hostname(BIO *bio, const char *host_and_optional_port); // BIO_set_conn_port sets |port_str| as the port or service name that |bio| // will connect to. It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_port(BIO *bio, const char *port_str); // BIO_set_conn_int_port sets |*port| as the port that |bio| will connect to. // It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_int_port(BIO *bio, const int *port); // BIO_set_nbio sets whether |bio| will use non-blocking I/O operations. It // returns one on success and zero otherwise. This only works for connect BIOs // and must be called before |bio| is connected to take effect. // // For socket and fd BIOs, callers must configure blocking vs. non-blocking I/O // using the underlying platform APIs. OPENSSL_EXPORT int BIO_set_nbio(BIO *bio, int on); // BIO_do_connect connects |bio| if it has not been connected yet. It returns // one on success and <= 0 otherwise. OPENSSL_EXPORT int BIO_do_connect(BIO *bio); #endif // !OPENSSL_NO_SOCK // Datagram BIOs. // // TODO(fork): not implemented. #define BIO_CTRL_DGRAM_QUERY_MTU 40 // as kernel for current MTU #define BIO_CTRL_DGRAM_SET_MTU 42 /* set cached value for MTU. want to use this if asking the kernel fails */ #define BIO_CTRL_DGRAM_MTU_EXCEEDED 43 /* check whether the MTU was exceed in the previous write operation. */ // BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT is unsupported as it is unused by consumers // and depends on |timeval|, which is not 2038-clean on all platforms. #define BIO_CTRL_DGRAM_GET_PEER 46 #define BIO_CTRL_DGRAM_GET_FALLBACK_MTU 47 // BIO Pairs. // // BIO pairs provide a "loopback" like system: a pair of BIOs where data // written to one can be read from the other and vice versa. // BIO_new_bio_pair sets |*out1| and |*out2| to two freshly created BIOs where // data written to one can be read from the other and vice versa. The // |writebuf1| argument gives the size of the buffer used in |*out1| and // |writebuf2| for |*out2|. It returns one on success and zero on error. OPENSSL_EXPORT int BIO_new_bio_pair(BIO **out1, size_t writebuf1, BIO **out2, size_t writebuf2); // BIO_ctrl_get_read_request returns the number of bytes that the other side of // |bio| tried (unsuccessfully) to read. OPENSSL_EXPORT size_t BIO_ctrl_get_read_request(BIO *bio); // BIO_ctrl_get_write_guarantee returns the number of bytes that |bio| (which // must have been returned by |BIO_new_bio_pair|) will accept on the next // |BIO_write| call. OPENSSL_EXPORT size_t BIO_ctrl_get_write_guarantee(BIO *bio); // BIO_shutdown_wr marks |bio| as closed, from the point of view of the other // side of the pair. Future |BIO_write| calls on |bio| will fail. It returns // one on success and zero otherwise. OPENSSL_EXPORT int BIO_shutdown_wr(BIO *bio); // Custom BIOs. // // Consumers can create custom |BIO|s by filling in a |BIO_METHOD| and using // low-level control functions to set state. // BIO_get_new_index returns a new "type" value for a custom |BIO|. OPENSSL_EXPORT int BIO_get_new_index(void); // BIO_meth_new returns a newly-allocated |BIO_METHOD| or NULL on allocation // error. The |type| specifies the type that will be returned by // |BIO_method_type|. If this is unnecessary, this value may be zero. The |name| // parameter is vestigial and may be NULL. // // Use the |BIO_meth_set_*| functions below to initialize the |BIO_METHOD|. The // function implementations may use |BIO_set_data| and |BIO_get_data| to add // method-specific state to associated |BIO|s. Additionally, |BIO_set_init| must // be called after an associated |BIO| is fully initialized. State set via // |BIO_set_data| may be released by configuring a destructor with // |BIO_meth_set_destroy|. OPENSSL_EXPORT BIO_METHOD *BIO_meth_new(int type, const char *name); // BIO_meth_free releases memory associated with |method|. OPENSSL_EXPORT void BIO_meth_free(BIO_METHOD *method); // BIO_meth_set_create sets a function to be called on |BIO_new| for |method| // and returns one. The function should return one on success and zero on // error. OPENSSL_EXPORT int BIO_meth_set_create(BIO_METHOD *method, int (*create_func)(BIO *)); // BIO_meth_set_destroy sets a function to release data associated with a |BIO| // and returns one. The function's return value is ignored. OPENSSL_EXPORT int BIO_meth_set_destroy(BIO_METHOD *method, int (*destroy_func)(BIO *)); // BIO_meth_set_write sets the implementation of |BIO_write| for |method| and // returns one. |BIO_METHOD|s which implement |BIO_write| should also implement // |BIO_CTRL_FLUSH|. (See |BIO_meth_set_ctrl|.) OPENSSL_EXPORT int BIO_meth_set_write(BIO_METHOD *method, int (*write_func)(BIO *, const char *, int)); // BIO_meth_set_read sets the implementation of |BIO_read| for |method| and // returns one. OPENSSL_EXPORT int BIO_meth_set_read(BIO_METHOD *method, int (*read_func)(BIO *, char *, int)); // BIO_meth_set_gets sets the implementation of |BIO_gets| for |method| and // returns one. OPENSSL_EXPORT int BIO_meth_set_gets(BIO_METHOD *method, int (*gets_func)(BIO *, char *, int)); // BIO_meth_set_ctrl sets the implementation of |BIO_ctrl| for |method| and // returns one. OPENSSL_EXPORT int BIO_meth_set_ctrl(BIO_METHOD *method, long (*ctrl_func)(BIO *, int, long, void *)); // BIO_set_data sets custom data on |bio|. It may be retried with // |BIO_get_data|. // // This function should only be called by the implementation of a custom |BIO|. // In particular, the data pointer of a built-in |BIO| is private to the // library. For other uses, see |BIO_set_ex_data| and |BIO_set_app_data|. OPENSSL_EXPORT void BIO_set_data(BIO *bio, void *ptr); // BIO_get_data returns custom data on |bio| set by |BIO_get_data|. // // This function should only be called by the implementation of a custom |BIO|. // In particular, the data pointer of a built-in |BIO| is private to the // library. For other uses, see |BIO_get_ex_data| and |BIO_get_app_data|. OPENSSL_EXPORT void *BIO_get_data(BIO *bio); // BIO_set_init sets whether |bio| has been fully initialized. Until fully // initialized, |BIO_read| and |BIO_write| will fail. OPENSSL_EXPORT void BIO_set_init(BIO *bio, int init); // BIO_get_init returns whether |bio| has been fully initialized. OPENSSL_EXPORT int BIO_get_init(BIO *bio); // These are values of the |cmd| argument to |BIO_ctrl|. // BIO_CTRL_RESET implements |BIO_reset|. The arguments are unused. #define BIO_CTRL_RESET 1 // BIO_CTRL_EOF implements |BIO_eof|. The arguments are unused. #define BIO_CTRL_EOF 2 // BIO_CTRL_INFO is a legacy command that returns information specific to the // type of |BIO|. It is not safe to call generically and should not be // implemented in new |BIO| types. #define BIO_CTRL_INFO 3 // BIO_CTRL_GET_CLOSE returns the close flag set by |BIO_CTRL_SET_CLOSE|. The // arguments are unused. #define BIO_CTRL_GET_CLOSE 8 // BIO_CTRL_SET_CLOSE implements |BIO_set_close|. The |larg| argument is the // close flag. #define BIO_CTRL_SET_CLOSE 9 // BIO_CTRL_PENDING implements |BIO_pending|. The arguments are unused. #define BIO_CTRL_PENDING 10 // BIO_CTRL_FLUSH implements |BIO_flush|. The arguments are unused. #define BIO_CTRL_FLUSH 11 // BIO_CTRL_WPENDING implements |BIO_wpending|. The arguments are unused. #define BIO_CTRL_WPENDING 13 // BIO_CTRL_SET_CALLBACK sets an informational callback of type // int cb(BIO *bio, int state, int ret) #define BIO_CTRL_SET_CALLBACK 14 // BIO_CTRL_GET_CALLBACK returns the callback set by |BIO_CTRL_SET_CALLBACK|. #define BIO_CTRL_GET_CALLBACK 15 // The following are never used, but are defined to aid porting existing code. #define BIO_CTRL_SET 4 #define BIO_CTRL_GET 5 #define BIO_CTRL_PUSH 6 #define BIO_CTRL_POP 7 #define BIO_CTRL_DUP 12 #define BIO_CTRL_SET_FILENAME 30 // ex_data functions. // // See |ex_data.h| for details. OPENSSL_EXPORT int BIO_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int BIO_set_ex_data(BIO *bio, int idx, void *arg); OPENSSL_EXPORT void *BIO_get_ex_data(const BIO *bio, int idx); #define BIO_set_app_data(bio, arg) (BIO_set_ex_data(bio, 0, (char *)(arg))) #define BIO_get_app_data(bio) (BIO_get_ex_data(bio, 0)) // Deprecated functions. // BIO_f_base64 returns a filter |BIO| that base64-encodes data written into // it, and decodes data read from it. |BIO_gets| is not supported. Call // |BIO_flush| when done writing, to signal that no more data are to be // encoded. The flag |BIO_FLAGS_BASE64_NO_NL| may be set to encode all the data // on one line. // // Use |EVP_EncodeBlock| and |EVP_DecodeBase64| instead. OPENSSL_EXPORT const BIO_METHOD *BIO_f_base64(void); OPENSSL_EXPORT void BIO_set_retry_special(BIO *bio); // BIO_set_write_buffer_size returns zero. OPENSSL_EXPORT int BIO_set_write_buffer_size(BIO *bio, int buffer_size); // BIO_set_shutdown sets a method-specific "shutdown" bit on |bio|. OPENSSL_EXPORT void BIO_set_shutdown(BIO *bio, int shutdown); // BIO_get_shutdown returns the method-specific "shutdown" bit. OPENSSL_EXPORT int BIO_get_shutdown(BIO *bio); // BIO_meth_set_puts returns one. |BIO_puts| is implemented with |BIO_write| in // BoringSSL. OPENSSL_EXPORT int BIO_meth_set_puts(BIO_METHOD *method, int (*puts)(BIO *, const char *)); // Private functions #define BIO_FLAGS_READ 0x01 #define BIO_FLAGS_WRITE 0x02 #define BIO_FLAGS_IO_SPECIAL 0x04 #define BIO_FLAGS_RWS (BIO_FLAGS_READ | BIO_FLAGS_WRITE | BIO_FLAGS_IO_SPECIAL) #define BIO_FLAGS_SHOULD_RETRY 0x08 #define BIO_FLAGS_BASE64_NO_NL 0x100 // BIO_FLAGS_MEM_RDONLY is used with memory BIOs. It means we shouldn't free up // or change the data in any way. #define BIO_FLAGS_MEM_RDONLY 0x200 // BIO_TYPE_DESCRIPTOR denotes that the |BIO| responds to the |BIO_C_SET_FD| // (|BIO_set_fd|) and |BIO_C_GET_FD| (|BIO_get_fd|) control hooks. #define BIO_TYPE_DESCRIPTOR 0x0100 // socket, fd, connect or accept #define BIO_TYPE_FILTER 0x0200 #define BIO_TYPE_SOURCE_SINK 0x0400 // These are the 'types' of BIOs #define BIO_TYPE_NONE 0 #define BIO_TYPE_MEM (1 | BIO_TYPE_SOURCE_SINK) #define BIO_TYPE_FILE (2 | BIO_TYPE_SOURCE_SINK) #define BIO_TYPE_FD (4 | BIO_TYPE_SOURCE_SINK | BIO_TYPE_DESCRIPTOR) #define BIO_TYPE_SOCKET (5 | BIO_TYPE_SOURCE_SINK | BIO_TYPE_DESCRIPTOR) #define BIO_TYPE_NULL (6 | BIO_TYPE_SOURCE_SINK) #define BIO_TYPE_SSL (7 | BIO_TYPE_FILTER) #define BIO_TYPE_MD (8 | BIO_TYPE_FILTER) #define BIO_TYPE_BUFFER (9 | BIO_TYPE_FILTER) #define BIO_TYPE_CIPHER (10 | BIO_TYPE_FILTER) #define BIO_TYPE_BASE64 (11 | BIO_TYPE_FILTER) #define BIO_TYPE_CONNECT (12 | BIO_TYPE_SOURCE_SINK | BIO_TYPE_DESCRIPTOR) #define BIO_TYPE_ACCEPT (13 | BIO_TYPE_SOURCE_SINK | BIO_TYPE_DESCRIPTOR) #define BIO_TYPE_PROXY_CLIENT (14 | BIO_TYPE_FILTER) #define BIO_TYPE_PROXY_SERVER (15 | BIO_TYPE_FILTER) #define BIO_TYPE_NBIO_TEST (16 | BIO_TYPE_FILTER) #define BIO_TYPE_NULL_FILTER (17 | BIO_TYPE_FILTER) #define BIO_TYPE_BER (18 | BIO_TYPE_FILTER) // BER -> bin filter #define BIO_TYPE_BIO (19 | BIO_TYPE_SOURCE_SINK) // (half a) BIO pair #define BIO_TYPE_LINEBUFFER (20 | BIO_TYPE_FILTER) #define BIO_TYPE_DGRAM (21 | BIO_TYPE_SOURCE_SINK | BIO_TYPE_DESCRIPTOR) #define BIO_TYPE_ASN1 (22 | BIO_TYPE_FILTER) #define BIO_TYPE_COMP (23 | BIO_TYPE_FILTER) // BIO_TYPE_START is the first user-allocated |BIO| type. No pre-defined type, // flag bits aside, may exceed this value. #define BIO_TYPE_START 128 struct bio_method_st { int type; const char *name; int (*bwrite)(BIO *, const char *, int); int (*bread)(BIO *, char *, int); // TODO(fork): remove bputs. int (*bputs)(BIO *, const char *); int (*bgets)(BIO *, char *, int); long (*ctrl)(BIO *, int, long, void *); int (*create)(BIO *); int (*destroy)(BIO *); long (*callback_ctrl)(BIO *, int, bio_info_cb); }; struct bio_st { const BIO_METHOD *method; CRYPTO_EX_DATA ex_data; // init is non-zero if this |BIO| has been initialised. int init; // shutdown is often used by specific |BIO_METHOD|s to determine whether // they own some underlying resource. This flag can often by controlled by // |BIO_set_close|. For example, whether an fd BIO closes the underlying fd // when it, itself, is closed. int shutdown; int flags; int retry_reason; // num is a BIO-specific value. For example, in fd BIOs it's used to store a // file descriptor. int num; CRYPTO_refcount_t references; void *ptr; // next_bio points to the next |BIO| in a chain. This |BIO| owns a reference // to |next_bio|. BIO *next_bio; // used by filter BIOs uint64_t num_read, num_write; }; #define BIO_C_SET_CONNECT 100 #define BIO_C_DO_STATE_MACHINE 101 #define BIO_C_SET_NBIO 102 #define BIO_C_SET_PROXY_PARAM 103 #define BIO_C_SET_FD 104 #define BIO_C_GET_FD 105 #define BIO_C_SET_FILE_PTR 106 #define BIO_C_GET_FILE_PTR 107 #define BIO_C_SET_FILENAME 108 #define BIO_C_SET_SSL 109 #define BIO_C_SET_MD 111 #define BIO_C_GET_MD 112 #define BIO_C_GET_CIPHER_STATUS 113 #define BIO_C_SET_BUF_MEM 114 #define BIO_C_GET_BUF_MEM_PTR 115 #define BIO_C_GET_BUFF_NUM_LINES 116 #define BIO_C_SET_BUFF_SIZE 117 #define BIO_C_SET_ACCEPT 118 #define BIO_C_SSL_MODE 119 #define BIO_C_GET_MD_CTX 120 #define BIO_C_GET_PROXY_PARAM 121 #define BIO_C_SET_BUFF_READ_DATA 122 // data to read first #define BIO_C_GET_ACCEPT 124 #define BIO_C_FILE_SEEK 128 #define BIO_C_GET_CIPHER_CTX 129 #define BIO_C_SET_BUF_MEM_EOF_RETURN 130 // return end of input value #define BIO_C_SET_BIND_MODE 131 #define BIO_C_GET_BIND_MODE 132 #define BIO_C_FILE_TELL 133 #define BIO_C_GET_SOCKS 134 #define BIO_C_SET_SOCKS 135 #define BIO_C_SET_WRITE_BUF_SIZE 136 // for BIO_s_bio #define BIO_C_GET_WRITE_BUF_SIZE 137 #define BIO_C_GET_WRITE_GUARANTEE 140 #define BIO_C_GET_READ_REQUEST 141 #define BIO_C_SHUTDOWN_WR 142 #define BIO_C_NREAD0 143 #define BIO_C_NREAD 144 #define BIO_C_NWRITE0 145 #define BIO_C_NWRITE 146 #define BIO_C_RESET_READ_REQUEST 147 #define BIO_C_SET_MD_CTX 148 #define BIO_C_SET_PREFIX 149 #define BIO_C_GET_PREFIX 150 #define BIO_C_SET_SUFFIX 151 #define BIO_C_GET_SUFFIX 152 #define BIO_C_SET_EX_ARG 153 #define BIO_C_GET_EX_ARG 154 #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(BIO, BIO_free) BORINGSSL_MAKE_UP_REF(BIO, BIO_up_ref) BORINGSSL_MAKE_DELETER(BIO_METHOD, BIO_meth_free) BSSL_NAMESPACE_END } // extern C++ #endif #define BIO_R_BAD_FOPEN_MODE 100 #define BIO_R_BROKEN_PIPE 101 #define BIO_R_CONNECT_ERROR 102 #define BIO_R_ERROR_SETTING_NBIO 103 #define BIO_R_INVALID_ARGUMENT 104 #define BIO_R_IN_USE 105 #define BIO_R_KEEPALIVE 106 #define BIO_R_NBIO_CONNECT_ERROR 107 #define BIO_R_NO_HOSTNAME_SPECIFIED 108 #define BIO_R_NO_PORT_SPECIFIED 109 #define BIO_R_NO_SUCH_FILE 110 #define BIO_R_NULL_PARAMETER 111 #define BIO_R_SYS_LIB 112 #define BIO_R_UNABLE_TO_CREATE_SOCKET 113 #define BIO_R_UNINITIALIZED 114 #define BIO_R_UNSUPPORTED_METHOD 115 #define BIO_R_WRITE_TO_READ_ONLY_BIO 116 #endif // OPENSSL_HEADER_BIO_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_blake2.h ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_BLAKE2_H #define OPENSSL_HEADER_BLAKE2_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif #define BLAKE2B256_DIGEST_LENGTH (256 / 8) #define BLAKE2B_CBLOCK 128 struct blake2b_state_st { uint64_t h[8]; uint64_t t_low, t_high; uint8_t block[BLAKE2B_CBLOCK]; size_t block_used; }; // BLAKE2B256_Init initialises |b2b| to perform a BLAKE2b-256 hash. There are no // pointers inside |b2b| thus release of |b2b| is purely managed by the caller. OPENSSL_EXPORT void BLAKE2B256_Init(BLAKE2B_CTX *b2b); // BLAKE2B256_Update appends |len| bytes from |data| to the digest being // calculated by |b2b|. OPENSSL_EXPORT void BLAKE2B256_Update(BLAKE2B_CTX *b2b, const void *data, size_t len); // BLAKE2B256_Final completes the digest calculated by |b2b| and writes // |BLAKE2B256_DIGEST_LENGTH| bytes to |out|. OPENSSL_EXPORT void BLAKE2B256_Final(uint8_t out[BLAKE2B256_DIGEST_LENGTH], BLAKE2B_CTX *b2b); // BLAKE2B256 writes the BLAKE2b-256 digset of |len| bytes from |data| to // |out|. OPENSSL_EXPORT void BLAKE2B256(const uint8_t *data, size_t len, uint8_t out[BLAKE2B256_DIGEST_LENGTH]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_BLAKE2_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_blowfish.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BLOWFISH_H #define OPENSSL_HEADER_BLOWFISH_H #include "CNIOBoringSSL_base.h" #ifdef __cplusplus extern "C" { #endif #define BF_ENCRYPT 1 #define BF_DECRYPT 0 #define BF_ROUNDS 16 #define BF_BLOCK 8 typedef struct bf_key_st { uint32_t P[BF_ROUNDS + 2]; uint32_t S[4 * 256]; } BF_KEY; OPENSSL_EXPORT void BF_set_key(BF_KEY *key, size_t len, const uint8_t *data); OPENSSL_EXPORT void BF_encrypt(uint32_t *data, const BF_KEY *key); OPENSSL_EXPORT void BF_decrypt(uint32_t *data, const BF_KEY *key); OPENSSL_EXPORT void BF_ecb_encrypt(const uint8_t *in, uint8_t *out, const BF_KEY *key, int enc); OPENSSL_EXPORT void BF_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const BF_KEY *schedule, uint8_t *ivec, int enc); #ifdef __cplusplus } #endif #endif // OPENSSL_HEADER_BLOWFISH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BN_H #define OPENSSL_HEADER_BN_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_thread.h" #include #include // for FILE* #if defined(__cplusplus) extern "C" { #endif // BN provides support for working with arbitrary sized integers. For example, // although the largest integer supported by the compiler might be 64 bits, BN // will allow you to work with much larger numbers. // // This library is developed for use inside BoringSSL, and uses implementation // strategies that may not be ideal for other applications. Non-cryptographic // uses should use a more general-purpose integer library, especially if // performance-sensitive. // // Many functions in BN scale quadratically or higher in the bit length of their // input. Callers at this layer are assumed to have capped input sizes within // their performance tolerances. // BN_ULONG is the native word size when working with big integers. // // Note: on some platforms, inttypes.h does not define print format macros in // C++ unless |__STDC_FORMAT_MACROS| defined. This is due to text in C99 which // was never adopted in any C++ standard and explicitly overruled in C++11. As // this is a public header, bn.h does not define |__STDC_FORMAT_MACROS| itself. // Projects which use |BN_*_FMT*| with outdated C headers may need to define it // externally. #if defined(OPENSSL_64_BIT) typedef uint64_t BN_ULONG; #define BN_BITS2 64 #define BN_DEC_FMT1 "%" PRIu64 #define BN_HEX_FMT1 "%" PRIx64 #define BN_HEX_FMT2 "%016" PRIx64 #elif defined(OPENSSL_32_BIT) typedef uint32_t BN_ULONG; #define BN_BITS2 32 #define BN_DEC_FMT1 "%" PRIu32 #define BN_HEX_FMT1 "%" PRIx32 #define BN_HEX_FMT2 "%08" PRIx32 #else #error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" #endif // Allocation and freeing. // BN_new creates a new, allocated BIGNUM and initialises it. OPENSSL_EXPORT BIGNUM *BN_new(void); // BN_init initialises a stack allocated |BIGNUM|. OPENSSL_EXPORT void BN_init(BIGNUM *bn); // BN_free frees the data referenced by |bn| and, if |bn| was originally // allocated on the heap, frees |bn| also. OPENSSL_EXPORT void BN_free(BIGNUM *bn); // BN_clear_free erases and frees the data referenced by |bn| and, if |bn| was // originally allocated on the heap, frees |bn| also. OPENSSL_EXPORT void BN_clear_free(BIGNUM *bn); // BN_dup allocates a new BIGNUM and sets it equal to |src|. It returns the // allocated BIGNUM on success or NULL otherwise. OPENSSL_EXPORT BIGNUM *BN_dup(const BIGNUM *src); // BN_copy sets |dest| equal to |src| and returns |dest| or NULL on allocation // failure. OPENSSL_EXPORT BIGNUM *BN_copy(BIGNUM *dest, const BIGNUM *src); // BN_clear sets |bn| to zero and erases the old data. OPENSSL_EXPORT void BN_clear(BIGNUM *bn); // BN_value_one returns a static BIGNUM with value 1. OPENSSL_EXPORT const BIGNUM *BN_value_one(void); // Basic functions. // BN_num_bits returns the minimum number of bits needed to represent the // absolute value of |bn|. OPENSSL_EXPORT unsigned BN_num_bits(const BIGNUM *bn); // BN_num_bytes returns the minimum number of bytes needed to represent the // absolute value of |bn|. // // While |size_t| is the preferred type for byte counts, callers can assume that // |BIGNUM|s are bounded such that this value, and its corresponding bit count, // will always fit in |int|. OPENSSL_EXPORT unsigned BN_num_bytes(const BIGNUM *bn); // BN_zero sets |bn| to zero. OPENSSL_EXPORT void BN_zero(BIGNUM *bn); // BN_one sets |bn| to one. It returns one on success or zero on allocation // failure. OPENSSL_EXPORT int BN_one(BIGNUM *bn); // BN_set_word sets |bn| to |value|. It returns one on success or zero on // allocation failure. OPENSSL_EXPORT int BN_set_word(BIGNUM *bn, BN_ULONG value); // BN_set_u64 sets |bn| to |value|. It returns one on success or zero on // allocation failure. OPENSSL_EXPORT int BN_set_u64(BIGNUM *bn, uint64_t value); // BN_set_negative sets the sign of |bn|. OPENSSL_EXPORT void BN_set_negative(BIGNUM *bn, int sign); // BN_is_negative returns one if |bn| is negative and zero otherwise. OPENSSL_EXPORT int BN_is_negative(const BIGNUM *bn); // Conversion functions. // BN_bin2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as // a big-endian number, and returns |ret|. If |ret| is NULL then a fresh // |BIGNUM| is allocated and returned. It returns NULL on allocation // failure. OPENSSL_EXPORT BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret); // BN_bn2bin serialises the absolute value of |in| to |out| as a big-endian // integer, which must have |BN_num_bytes| of space available. It returns the // number of bytes written. Note this function leaks the magnitude of |in|. If // |in| is secret, use |BN_bn2bin_padded| instead. OPENSSL_EXPORT size_t BN_bn2bin(const BIGNUM *in, uint8_t *out); // BN_lebin2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as // a little-endian number, and returns |ret|. If |ret| is NULL then a fresh // |BIGNUM| is allocated and returned. It returns NULL on allocation // failure. OPENSSL_EXPORT BIGNUM *BN_lebin2bn(const uint8_t *in, size_t len, BIGNUM *ret); // BN_bn2le_padded serialises the absolute value of |in| to |out| as a // little-endian integer, which must have |len| of space available, padding // out the remainder of out with zeros. If |len| is smaller than |BN_num_bytes|, // the function fails and returns 0. Otherwise, it returns 1. OPENSSL_EXPORT int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in); // BN_bn2bin_padded serialises the absolute value of |in| to |out| as a // big-endian integer. The integer is padded with leading zeros up to size // |len|. If |len| is smaller than |BN_num_bytes|, the function fails and // returns 0. Otherwise, it returns 1. OPENSSL_EXPORT int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in); // BN_bn2cbb_padded behaves like |BN_bn2bin_padded| but writes to a |CBB|. OPENSSL_EXPORT int BN_bn2cbb_padded(CBB *out, size_t len, const BIGNUM *in); // BN_bn2hex returns an allocated string that contains a NUL-terminated, hex // representation of |bn|. If |bn| is negative, the first char in the resulting // string will be '-'. Returns NULL on allocation failure. OPENSSL_EXPORT char *BN_bn2hex(const BIGNUM *bn); // BN_hex2bn parses the leading hex number from |in|, which may be proceeded by // a '-' to indicate a negative number and may contain trailing, non-hex data. // If |outp| is not NULL, it constructs a BIGNUM equal to the hex number and // stores it in |*outp|. If |*outp| is NULL then it allocates a new BIGNUM and // updates |*outp|. It returns the number of bytes of |in| processed or zero on // error. OPENSSL_EXPORT int BN_hex2bn(BIGNUM **outp, const char *in); // BN_bn2dec returns an allocated string that contains a NUL-terminated, // decimal representation of |bn|. If |bn| is negative, the first char in the // resulting string will be '-'. Returns NULL on allocation failure. // // Converting an arbitrarily large integer to decimal is quadratic in the bit // length of |a|. This function assumes the caller has capped the input within // performance tolerances. OPENSSL_EXPORT char *BN_bn2dec(const BIGNUM *a); // BN_dec2bn parses the leading decimal number from |in|, which may be // proceeded by a '-' to indicate a negative number and may contain trailing, // non-decimal data. If |outp| is not NULL, it constructs a BIGNUM equal to the // decimal number and stores it in |*outp|. If |*outp| is NULL then it // allocates a new BIGNUM and updates |*outp|. It returns the number of bytes // of |in| processed or zero on error. // // Converting an arbitrarily large integer to decimal is quadratic in the bit // length of |a|. This function assumes the caller has capped the input within // performance tolerances. OPENSSL_EXPORT int BN_dec2bn(BIGNUM **outp, const char *in); // BN_asc2bn acts like |BN_dec2bn| or |BN_hex2bn| depending on whether |in| // begins with "0X" or "0x" (indicating hex) or not (indicating decimal). A // leading '-' is still permitted and comes before the optional 0X/0x. It // returns one on success or zero on error. OPENSSL_EXPORT int BN_asc2bn(BIGNUM **outp, const char *in); // BN_print writes a hex encoding of |a| to |bio|. It returns one on success // and zero on error. OPENSSL_EXPORT int BN_print(BIO *bio, const BIGNUM *a); // BN_print_fp acts like |BIO_print|, but wraps |fp| in a |BIO| first. OPENSSL_EXPORT int BN_print_fp(FILE *fp, const BIGNUM *a); // BN_get_word returns the absolute value of |bn| as a single word. If |bn| is // too large to be represented as a single word, the maximum possible value // will be returned. OPENSSL_EXPORT BN_ULONG BN_get_word(const BIGNUM *bn); // BN_get_u64 sets |*out| to the absolute value of |bn| as a |uint64_t| and // returns one. If |bn| is too large to be represented as a |uint64_t|, it // returns zero. OPENSSL_EXPORT int BN_get_u64(const BIGNUM *bn, uint64_t *out); // ASN.1 functions. // BN_parse_asn1_unsigned parses a non-negative DER INTEGER from |cbs| writes // the result to |ret|. It returns one on success and zero on failure. OPENSSL_EXPORT int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret); // BN_marshal_asn1 marshals |bn| as a non-negative DER INTEGER and appends the // result to |cbb|. It returns one on success and zero on failure. OPENSSL_EXPORT int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn); // BIGNUM pools. // // Certain BIGNUM operations need to use many temporary variables and // allocating and freeing them can be quite slow. Thus such operations typically // take a |BN_CTX| parameter, which contains a pool of |BIGNUMs|. The |ctx| // argument to a public function may be NULL, in which case a local |BN_CTX| // will be created just for the lifetime of that call. // // A function must call |BN_CTX_start| first. Then, |BN_CTX_get| may be called // repeatedly to obtain temporary |BIGNUM|s. All |BN_CTX_get| calls must be made // before calling any other functions that use the |ctx| as an argument. // // Finally, |BN_CTX_end| must be called before returning from the function. // When |BN_CTX_end| is called, the |BIGNUM| pointers obtained from // |BN_CTX_get| become invalid. // BN_CTX_new returns a new, empty BN_CTX or NULL on allocation failure. OPENSSL_EXPORT BN_CTX *BN_CTX_new(void); // BN_CTX_free frees all BIGNUMs contained in |ctx| and then frees |ctx| // itself. OPENSSL_EXPORT void BN_CTX_free(BN_CTX *ctx); // BN_CTX_start "pushes" a new entry onto the |ctx| stack and allows future // calls to |BN_CTX_get|. OPENSSL_EXPORT void BN_CTX_start(BN_CTX *ctx); // BN_CTX_get returns a new |BIGNUM|, or NULL on allocation failure. Once // |BN_CTX_get| has returned NULL, all future calls will also return NULL until // |BN_CTX_end| is called. OPENSSL_EXPORT BIGNUM *BN_CTX_get(BN_CTX *ctx); // BN_CTX_end invalidates all |BIGNUM|s returned from |BN_CTX_get| since the // matching |BN_CTX_start| call. OPENSSL_EXPORT void BN_CTX_end(BN_CTX *ctx); // Simple arithmetic // BN_add sets |r| = |a| + |b|, where |r| may be the same pointer as either |a| // or |b|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // BN_uadd sets |r| = |a| + |b|, considering only the absolute values of |a| and // |b|. |r| may be the same pointer as either |a| or |b|. It returns one on // success and zero on allocation failure. OPENSSL_EXPORT int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // BN_add_word adds |w| to |a|. It returns one on success and zero otherwise. OPENSSL_EXPORT int BN_add_word(BIGNUM *a, BN_ULONG w); // BN_sub sets |r| = |a| - |b|, where |r| may be the same pointer as either |a| // or |b|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // BN_usub sets |r| = |a| - |b|, considering only the absolute values of |a| and // |b|. The result must be non-negative, i.e. |b| <= |a|. |r| may be the same // pointer as either |a| or |b|. It returns one on success and zero on error. OPENSSL_EXPORT int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); // BN_sub_word subtracts |w| from |a|. It returns one on success and zero on // allocation failure. OPENSSL_EXPORT int BN_sub_word(BIGNUM *a, BN_ULONG w); // BN_mul sets |r| = |a| * |b|, where |r| may be the same pointer as |a| or // |b|. Returns one on success and zero otherwise. OPENSSL_EXPORT int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // BN_mul_word sets |bn| = |bn| * |w|. It returns one on success or zero on // allocation failure. OPENSSL_EXPORT int BN_mul_word(BIGNUM *bn, BN_ULONG w); // BN_sqr sets |r| = |a|^2 (i.e. squares), where |r| may be the same pointer as // |a|. Returns one on success and zero otherwise. This is more efficient than // BN_mul(r, a, a, ctx). OPENSSL_EXPORT int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); // BN_div divides |numerator| by |divisor| and places the result in |quotient| // and the remainder in |rem|. Either of |quotient| or |rem| may be NULL, in // which case the respective value is not returned. It returns one on success or // zero on error. It is an error condition if |divisor| is zero. // // The outputs will be such that |quotient| * |divisor| + |rem| = |numerator|, // with the quotient rounded towards zero. Thus, if |numerator| is negative, // |rem| will be zero or negative. If |divisor| is negative, the sign of // |quotient| will be flipped to compensate but otherwise rounding will be as if // |divisor| were its absolute value. OPENSSL_EXPORT int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, const BIGNUM *divisor, BN_CTX *ctx); // BN_div_word sets |numerator| = |numerator|/|divisor| and returns the // remainder or (BN_ULONG)-1 on error. OPENSSL_EXPORT BN_ULONG BN_div_word(BIGNUM *numerator, BN_ULONG divisor); // BN_sqrt sets |*out_sqrt| (which may be the same |BIGNUM| as |in|) to the // square root of |in|, using |ctx|. It returns one on success or zero on // error. Negative numbers and non-square numbers will result in an error with // appropriate errors on the error queue. OPENSSL_EXPORT int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx); // Comparison functions // BN_cmp returns a value less than, equal to or greater than zero if |a| is // less than, equal to or greater than |b|, respectively. OPENSSL_EXPORT int BN_cmp(const BIGNUM *a, const BIGNUM *b); // BN_cmp_word is like |BN_cmp| except it takes its second argument as a // |BN_ULONG| instead of a |BIGNUM|. OPENSSL_EXPORT int BN_cmp_word(const BIGNUM *a, BN_ULONG b); // BN_ucmp returns a value less than, equal to or greater than zero if the // absolute value of |a| is less than, equal to or greater than the absolute // value of |b|, respectively. OPENSSL_EXPORT int BN_ucmp(const BIGNUM *a, const BIGNUM *b); // BN_equal_consttime returns one if |a| is equal to |b|, and zero otherwise. // It takes an amount of time dependent on the sizes of |a| and |b|, but // independent of the contents (including the signs) of |a| and |b|. OPENSSL_EXPORT int BN_equal_consttime(const BIGNUM *a, const BIGNUM *b); // BN_abs_is_word returns one if the absolute value of |bn| equals |w| and zero // otherwise. OPENSSL_EXPORT int BN_abs_is_word(const BIGNUM *bn, BN_ULONG w); // BN_is_zero returns one if |bn| is zero and zero otherwise. OPENSSL_EXPORT int BN_is_zero(const BIGNUM *bn); // BN_is_one returns one if |bn| equals one and zero otherwise. OPENSSL_EXPORT int BN_is_one(const BIGNUM *bn); // BN_is_word returns one if |bn| is exactly |w| and zero otherwise. OPENSSL_EXPORT int BN_is_word(const BIGNUM *bn, BN_ULONG w); // BN_is_odd returns one if |bn| is odd and zero otherwise. OPENSSL_EXPORT int BN_is_odd(const BIGNUM *bn); // BN_is_pow2 returns 1 if |a| is a power of two, and 0 otherwise. OPENSSL_EXPORT int BN_is_pow2(const BIGNUM *a); // Bitwise operations. // BN_lshift sets |r| equal to |a| << n. The |a| and |r| arguments may be the // same |BIGNUM|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_lshift(BIGNUM *r, const BIGNUM *a, int n); // BN_lshift1 sets |r| equal to |a| << 1, where |r| and |a| may be the same // pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_lshift1(BIGNUM *r, const BIGNUM *a); // BN_rshift sets |r| equal to |a| >> n, where |r| and |a| may be the same // pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_rshift(BIGNUM *r, const BIGNUM *a, int n); // BN_rshift1 sets |r| equal to |a| >> 1, where |r| and |a| may be the same // pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_rshift1(BIGNUM *r, const BIGNUM *a); // BN_set_bit sets the |n|th, least-significant bit in |a|. For example, if |a| // is 2 then setting bit zero will make it 3. It returns one on success or zero // on allocation failure. OPENSSL_EXPORT int BN_set_bit(BIGNUM *a, int n); // BN_clear_bit clears the |n|th, least-significant bit in |a|. For example, if // |a| is 3, clearing bit zero will make it two. It returns one on success or // zero on allocation failure. OPENSSL_EXPORT int BN_clear_bit(BIGNUM *a, int n); // BN_is_bit_set returns one if the |n|th least-significant bit in |a| exists // and is set. Otherwise, it returns zero. OPENSSL_EXPORT int BN_is_bit_set(const BIGNUM *a, int n); // BN_mask_bits truncates |a| so that it is only |n| bits long. It returns one // on success or zero if |n| is negative. // // This differs from OpenSSL which additionally returns zero if |a|'s word // length is less than or equal to |n|, rounded down to a number of words. Note // word size is platform-dependent, so this behavior is also difficult to rely // on in OpenSSL and not very useful. OPENSSL_EXPORT int BN_mask_bits(BIGNUM *a, int n); // BN_count_low_zero_bits returns the number of low-order zero bits in |bn|, or // the number of factors of two which divide it. It returns zero if |bn| is // zero. OPENSSL_EXPORT int BN_count_low_zero_bits(const BIGNUM *bn); // Modulo arithmetic. // BN_mod_word returns |a| mod |w| or (BN_ULONG)-1 on error. OPENSSL_EXPORT BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w); // BN_mod_pow2 sets |r| = |a| mod 2^|e|. It returns 1 on success and // 0 on error. OPENSSL_EXPORT int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e); // BN_nnmod_pow2 sets |r| = |a| mod 2^|e| where |r| is always positive. // It returns 1 on success and 0 on error. OPENSSL_EXPORT int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e); // BN_mod is a helper macro that calls |BN_div| and discards the quotient. #define BN_mod(rem, numerator, divisor, ctx) \ BN_div(NULL, (rem), (numerator), (divisor), (ctx)) // BN_nnmod is a non-negative modulo function. It acts like |BN_mod|, but 0 <= // |rem| < |divisor| is always true. It returns one on success and zero on // error. OPENSSL_EXPORT int BN_nnmod(BIGNUM *rem, const BIGNUM *numerator, const BIGNUM *divisor, BN_CTX *ctx); // BN_mod_add sets |r| = |a| + |b| mod |m|. It returns one on success and zero // on error. OPENSSL_EXPORT int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); // BN_mod_add_quick acts like |BN_mod_add| but requires that |a| and |b| be // non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); // BN_mod_sub sets |r| = |a| - |b| mod |m|. It returns one on success and zero // on error. OPENSSL_EXPORT int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); // BN_mod_sub_quick acts like |BN_mod_sub| but requires that |a| and |b| be // non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); // BN_mod_mul sets |r| = |a|*|b| mod |m|. It returns one on success and zero // on error. OPENSSL_EXPORT int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); // BN_mod_sqr sets |r| = |a|^2 mod |m|. It returns one on success and zero // on error. OPENSSL_EXPORT int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); // BN_mod_lshift sets |r| = (|a| << n) mod |m|, where |r| and |a| may be the // same pointer. It returns one on success and zero on error. OPENSSL_EXPORT int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx); // BN_mod_lshift_quick acts like |BN_mod_lshift| but requires that |a| be // non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m); // BN_mod_lshift1 sets |r| = (|a| << 1) mod |m|, where |r| and |a| may be the // same pointer. It returns one on success and zero on error. OPENSSL_EXPORT int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); // BN_mod_lshift1_quick acts like |BN_mod_lshift1| but requires that |a| be // non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m); // BN_mod_sqrt returns a newly-allocated |BIGNUM|, r, such that // r^2 == a (mod p). It returns NULL on error or if |a| is not a square mod |p|. // In the latter case, it will add |BN_R_NOT_A_SQUARE| to the error queue. // If |a| is a square and |p| > 2, there are two possible square roots. This // function may return either and may even select one non-deterministically. // // This function only works if |p| is a prime. If |p| is composite, it may fail // or return an arbitrary value. Callers should not pass attacker-controlled // values of |p|. OPENSSL_EXPORT BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); // Random and prime number generation. // The following are values for the |top| parameter of |BN_rand|. #define BN_RAND_TOP_ANY (-1) #define BN_RAND_TOP_ONE 0 #define BN_RAND_TOP_TWO 1 // The following are values for the |bottom| parameter of |BN_rand|. #define BN_RAND_BOTTOM_ANY 0 #define BN_RAND_BOTTOM_ODD 1 // BN_rand sets |rnd| to a random number of length |bits|. It returns one on // success and zero otherwise. // // |top| must be one of the |BN_RAND_TOP_*| values. If |BN_RAND_TOP_ONE|, the // most-significant bit, if any, will be set. If |BN_RAND_TOP_TWO|, the two // most significant bits, if any, will be set. If |BN_RAND_TOP_ANY|, no extra // action will be taken and |BN_num_bits(rnd)| may not equal |bits| if the most // significant bits randomly ended up as zeros. // // |bottom| must be one of the |BN_RAND_BOTTOM_*| values. If // |BN_RAND_BOTTOM_ODD|, the least-significant bit, if any, will be set. If // |BN_RAND_BOTTOM_ANY|, no extra action will be taken. OPENSSL_EXPORT int BN_rand(BIGNUM *rnd, int bits, int top, int bottom); // BN_pseudo_rand is an alias for |BN_rand|. OPENSSL_EXPORT int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom); // BN_rand_range is equivalent to |BN_rand_range_ex| with |min_inclusive| set // to zero and |max_exclusive| set to |range|. OPENSSL_EXPORT int BN_rand_range(BIGNUM *rnd, const BIGNUM *range); // BN_rand_range_ex sets |rnd| to a random value in // [min_inclusive..max_exclusive). It returns one on success and zero // otherwise. OPENSSL_EXPORT int BN_rand_range_ex(BIGNUM *r, BN_ULONG min_inclusive, const BIGNUM *max_exclusive); // BN_pseudo_rand_range is an alias for BN_rand_range. OPENSSL_EXPORT int BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range); #define BN_GENCB_GENERATED 0 #define BN_GENCB_PRIME_TEST 1 // bn_gencb_st, or |BN_GENCB|, holds a callback function that is used by // generation functions that can take a very long time to complete. Use // |BN_GENCB_set| to initialise a |BN_GENCB| structure. // // The callback receives the address of that |BN_GENCB| structure as its last // argument and the user is free to put an arbitrary pointer in |arg|. The other // arguments are set as follows: // - event=BN_GENCB_GENERATED, n=i: after generating the i'th possible prime // number. // - event=BN_GENCB_PRIME_TEST, n=-1: when finished trial division primality // checks. // - event=BN_GENCB_PRIME_TEST, n=i: when the i'th primality test has finished. // // The callback can return zero to abort the generation progress or one to // allow it to continue. // // When other code needs to call a BN generation function it will often take a // BN_GENCB argument and may call the function with other argument values. struct bn_gencb_st { void *arg; // callback-specific data int (*callback)(int event, int n, struct bn_gencb_st *); }; // BN_GENCB_new returns a newly-allocated |BN_GENCB| object, or NULL on // allocation failure. The result must be released with |BN_GENCB_free| when // done. OPENSSL_EXPORT BN_GENCB *BN_GENCB_new(void); // BN_GENCB_free releases memory associated with |callback|. OPENSSL_EXPORT void BN_GENCB_free(BN_GENCB *callback); // BN_GENCB_set configures |callback| to call |f| and sets |callout->arg| to // |arg|. OPENSSL_EXPORT void BN_GENCB_set(BN_GENCB *callback, int (*f)(int event, int n, BN_GENCB *), void *arg); // BN_GENCB_call calls |callback|, if not NULL, and returns the return value of // the callback, or 1 if |callback| is NULL. OPENSSL_EXPORT int BN_GENCB_call(BN_GENCB *callback, int event, int n); // BN_GENCB_get_arg returns |callback->arg|. OPENSSL_EXPORT void *BN_GENCB_get_arg(const BN_GENCB *callback); // BN_generate_prime_ex sets |ret| to a prime number of |bits| length. If safe // is non-zero then the prime will be such that (ret-1)/2 is also a prime. // (This is needed for Diffie-Hellman groups to ensure that the only subgroups // are of size 2 and (p-1)/2.). // // If |add| is not NULL, the prime will fulfill the condition |ret| % |add| == // |rem| in order to suit a given generator. (If |rem| is NULL then |ret| % // |add| == 1.) // // If |cb| is not NULL, it will be called during processing to give an // indication of progress. See the comments for |BN_GENCB|. It returns one on // success and zero otherwise. OPENSSL_EXPORT int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, const BIGNUM *rem, BN_GENCB *cb); // BN_prime_checks_for_validation can be used as the |checks| argument to the // primarily testing functions when validating an externally-supplied candidate // prime. It gives a false positive rate of at most 2^{-128}. (The worst case // false positive rate for a single iteration is 1/4 per // https://eprint.iacr.org/2018/749. (1/4)^64 = 2^{-128}.) #define BN_prime_checks_for_validation 64 // BN_prime_checks_for_generation can be used as the |checks| argument to the // primality testing functions when generating random primes. It gives a false // positive rate at most the security level of the corresponding RSA key size. // // Note this value only performs enough checks if the candidate prime was // selected randomly. If validating an externally-supplied candidate, especially // one that may be selected adversarially, use |BN_prime_checks_for_validation| // instead. #define BN_prime_checks_for_generation 0 // bn_primality_result_t enumerates the outcomes of primality-testing. enum bn_primality_result_t { bn_probably_prime, bn_composite, bn_non_prime_power_composite, }; // BN_enhanced_miller_rabin_primality_test tests whether |w| is probably a prime // number using the Enhanced Miller-Rabin Test (FIPS 186-4 C.3.2) with // |checks| iterations and returns the result in |out_result|. Enhanced // Miller-Rabin tests primality for odd integers greater than 3, returning // |bn_probably_prime| if the number is probably prime, // |bn_non_prime_power_composite| if the number is a composite that is not the // power of a single prime, and |bn_composite| otherwise. It returns one on // success and zero on failure. If |cb| is not NULL, then it is called during // each iteration of the primality test. // // See |BN_prime_checks_for_validation| and |BN_prime_checks_for_generation| for // recommended values of |checks|. OPENSSL_EXPORT int BN_enhanced_miller_rabin_primality_test( enum bn_primality_result_t *out_result, const BIGNUM *w, int checks, BN_CTX *ctx, BN_GENCB *cb); // BN_primality_test sets |*is_probably_prime| to one if |candidate| is // probably a prime number by the Miller-Rabin test or zero if it's certainly // not. // // If |do_trial_division| is non-zero then |candidate| will be tested against a // list of small primes before Miller-Rabin tests. The probability of this // function returning a false positive is at most 2^{2*checks}. See // |BN_prime_checks_for_validation| and |BN_prime_checks_for_generation| for // recommended values of |checks|. // // If |cb| is not NULL then it is called during the checking process. See the // comment above |BN_GENCB|. // // The function returns one on success and zero on error. OPENSSL_EXPORT int BN_primality_test(int *is_probably_prime, const BIGNUM *candidate, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb); // BN_is_prime_fasttest_ex returns one if |candidate| is probably a prime // number by the Miller-Rabin test, zero if it's certainly not and -1 on error. // // If |do_trial_division| is non-zero then |candidate| will be tested against a // list of small primes before Miller-Rabin tests. The probability of this // function returning one when |candidate| is composite is at most 2^{2*checks}. // See |BN_prime_checks_for_validation| and |BN_prime_checks_for_generation| for // recommended values of |checks|. // // If |cb| is not NULL then it is called during the checking process. See the // comment above |BN_GENCB|. // // WARNING: deprecated. Use |BN_primality_test|. OPENSSL_EXPORT int BN_is_prime_fasttest_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb); // BN_is_prime_ex acts the same as |BN_is_prime_fasttest_ex| with // |do_trial_division| set to zero. // // WARNING: deprecated: Use |BN_primality_test|. OPENSSL_EXPORT int BN_is_prime_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, BN_GENCB *cb); // Number theory functions // BN_gcd sets |r| = gcd(|a|, |b|). It returns one on success and zero // otherwise. OPENSSL_EXPORT int BN_gcd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // BN_mod_inverse sets |out| equal to |a|^-1, mod |n|. If |out| is NULL, a // fresh BIGNUM is allocated. It returns the result or NULL on error. // // If |n| is even then the operation is performed using an algorithm that avoids // some branches but which isn't constant-time. This function shouldn't be used // for secret values; use |BN_mod_inverse_blinded| instead. Or, if |n| is // guaranteed to be prime, use // |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking // advantage of Fermat's Little Theorem. OPENSSL_EXPORT BIGNUM *BN_mod_inverse(BIGNUM *out, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); // BN_mod_inverse_blinded sets |out| equal to |a|^-1, mod |n|, where |n| is the // Montgomery modulus for |mont|. |a| must be non-negative and must be less // than |n|. |n| must be greater than 1. |a| is blinded (masked by a random // value) to protect it against side-channel attacks. On failure, if the failure // was caused by |a| having no inverse mod |n| then |*out_no_inverse| will be // set to one; otherwise it will be set to zero. // // Note this function may incorrectly report |a| has no inverse if the random // blinding value has no inverse. It should only be used when |n| has few // non-invertible elements, such as an RSA modulus. OPENSSL_EXPORT int BN_mod_inverse_blinded(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); // BN_mod_inverse_odd sets |out| equal to |a|^-1, mod |n|. |a| must be // non-negative and must be less than |n|. |n| must be odd. This function // shouldn't be used for secret values; use |BN_mod_inverse_blinded| instead. // Or, if |n| is guaranteed to be prime, use // |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking // advantage of Fermat's Little Theorem. It returns one on success or zero on // failure. On failure, if the failure was caused by |a| having no inverse mod // |n| then |*out_no_inverse| will be set to one; otherwise it will be set to // zero. int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); // Montgomery arithmetic. // BN_MONT_CTX contains the precomputed values needed to work in a specific // Montgomery domain. // BN_MONT_CTX_new_for_modulus returns a fresh |BN_MONT_CTX| given the modulus, // |mod| or NULL on error. Note this function assumes |mod| is public. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_new_for_modulus(const BIGNUM *mod, BN_CTX *ctx); // BN_MONT_CTX_new_consttime behaves like |BN_MONT_CTX_new_for_modulus| but // treats |mod| as secret. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_new_consttime(const BIGNUM *mod, BN_CTX *ctx); // BN_MONT_CTX_free frees memory associated with |mont|. OPENSSL_EXPORT void BN_MONT_CTX_free(BN_MONT_CTX *mont); // BN_MONT_CTX_copy sets |to| equal to |from|. It returns |to| on success or // NULL on error. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, const BN_MONT_CTX *from); // BN_to_montgomery sets |ret| equal to |a| in the Montgomery domain. |a| is // assumed to be in the range [0, n), where |n| is the Montgomery modulus. It // returns one on success or zero on error. OPENSSL_EXPORT int BN_to_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); // BN_from_montgomery sets |ret| equal to |a| * R^-1, i.e. translates values out // of the Montgomery domain. |a| is assumed to be in the range [0, n*R), where // |n| is the Montgomery modulus. Note n < R, so inputs in the range [0, n*n) // are valid. This function returns one on success or zero on error. OPENSSL_EXPORT int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); // BN_mod_mul_montgomery set |r| equal to |a| * |b|, in the Montgomery domain. // Both |a| and |b| must already be in the Montgomery domain (by // |BN_to_montgomery|). In particular, |a| and |b| are assumed to be in the // range [0, n), where |n| is the Montgomery modulus. It returns one on success // or zero on error. OPENSSL_EXPORT int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx); // Exponentiation. // BN_exp sets |r| equal to |a|^{|p|}. It does so with a square-and-multiply // algorithm that leaks side-channel information. It returns one on success or // zero otherwise. OPENSSL_EXPORT int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); // BN_mod_exp sets |r| equal to |a|^{|p|} mod |m|. It does so with the best // algorithm for the values provided. It returns one on success or zero // otherwise. The |BN_mod_exp_mont_consttime| variant must be used if the // exponent is secret. OPENSSL_EXPORT int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx); // BN_mod_exp_mont behaves like |BN_mod_exp| but treats |a| as secret and // requires 0 <= |a| < |m|. OPENSSL_EXPORT int BN_mod_exp_mont(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); // BN_mod_exp_mont_consttime behaves like |BN_mod_exp| but treats |a|, |p|, and // |m| as secret and requires 0 <= |a| < |m|. OPENSSL_EXPORT int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); // Deprecated functions // BN_bn2mpi serialises the value of |in| to |out|, using a format that consists // of the number's length in bytes represented as a 4-byte big-endian number, // and the number itself in big-endian format, where the most significant bit // signals a negative number. (The representation of numbers with the MSB set is // prefixed with null byte). |out| must have sufficient space available; to // find the needed amount of space, call the function with |out| set to NULL. OPENSSL_EXPORT size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out); // BN_mpi2bn parses |len| bytes from |in| and returns the resulting value. The // bytes at |in| are expected to be in the format emitted by |BN_bn2mpi|. // // If |out| is NULL then a fresh |BIGNUM| is allocated and returned, otherwise // |out| is reused and returned. On error, NULL is returned and the error queue // is updated. OPENSSL_EXPORT BIGNUM *BN_mpi2bn(const uint8_t *in, size_t len, BIGNUM *out); // BN_mod_exp_mont_word is like |BN_mod_exp_mont| except that the base |a| is // given as a |BN_ULONG| instead of a |BIGNUM *|. It returns one on success // or zero otherwise. OPENSSL_EXPORT int BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); // BN_mod_exp2_mont calculates (a1^p1) * (a2^p2) mod m. It returns 1 on success // or zero otherwise. OPENSSL_EXPORT int BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1, const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); // BN_MONT_CTX_new returns a fresh |BN_MONT_CTX| or NULL on allocation failure. // Use |BN_MONT_CTX_new_for_modulus| instead. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_new(void); // BN_MONT_CTX_set sets up a Montgomery context given the modulus, |mod|. It // returns one on success and zero on error. Use |BN_MONT_CTX_new_for_modulus| // instead. OPENSSL_EXPORT int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx); // BN_bn2binpad behaves like |BN_bn2bin_padded|, but it returns |len| on success // and -1 on error. // // Use |BN_bn2bin_padded| instead. It is |size_t|-clean. OPENSSL_EXPORT int BN_bn2binpad(const BIGNUM *in, uint8_t *out, int len); // BN_bn2lebinpad behaves like |BN_bn2le_padded|, but it returns |len| on // success and -1 on error. // // Use |BN_bn2le_padded| instead. It is |size_t|-clean. OPENSSL_EXPORT int BN_bn2lebinpad(const BIGNUM *in, uint8_t *out, int len); // BN_prime_checks is a deprecated alias for |BN_prime_checks_for_validation|. // Use |BN_prime_checks_for_generation| or |BN_prime_checks_for_validation| // instead. (This defaults to the |_for_validation| value in order to be // conservative.) #define BN_prime_checks BN_prime_checks_for_validation // BN_secure_new calls |BN_new|. OPENSSL_EXPORT BIGNUM *BN_secure_new(void); // BN_le2bn calls |BN_lebin2bn|. OPENSSL_EXPORT BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret); // Private functions struct bignum_st { // d is a pointer to an array of |width| |BN_BITS2|-bit chunks in // little-endian order. This stores the absolute value of the number. BN_ULONG *d; // width is the number of elements of |d| which are valid. This value is not // necessarily minimal; the most-significant words of |d| may be zero. // |width| determines a potentially loose upper-bound on the absolute value // of the |BIGNUM|. // // Functions taking |BIGNUM| inputs must compute the same answer for all // possible widths. |bn_minimal_width|, |bn_set_minimal_width|, and other // helpers may be used to recover the minimal width, provided it is not // secret. If it is secret, use a different algorithm. Functions may output // minimal or non-minimal |BIGNUM|s depending on secrecy requirements, but // those which cause widths to unboundedly grow beyond the minimal value // should be documented such. // // Note this is different from historical |BIGNUM| semantics. int width; // dmax is number of elements of |d| which are allocated. int dmax; // neg is one if the number if negative and zero otherwise. int neg; // flags is a bitmask of |BN_FLG_*| values int flags; }; struct bn_mont_ctx_st { // RR is R^2, reduced modulo |N|. It is used to convert to Montgomery form. It // is guaranteed to have the same width as |N|. BIGNUM RR; // N is the modulus. It is always stored in minimal form, so |N.width| // determines R. BIGNUM N; BN_ULONG n0[2]; // least significant words of (R*Ri-1)/N }; OPENSSL_EXPORT unsigned BN_num_bits_word(BN_ULONG l); #define BN_FLG_MALLOCED 0x01 #define BN_FLG_STATIC_DATA 0x02 // |BN_FLG_CONSTTIME| has been removed and intentionally omitted so code relying // on it will not compile. Consumers outside BoringSSL should use the // higher-level cryptographic algorithms exposed by other modules. Consumers // within the library should call the appropriate timing-sensitive algorithm // directly. #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(BIGNUM, BN_free) BORINGSSL_MAKE_DELETER(BN_CTX, BN_CTX_free) BORINGSSL_MAKE_DELETER(BN_MONT_CTX, BN_MONT_CTX_free) class BN_CTXScope { public: BN_CTXScope(BN_CTX *ctx) : ctx_(ctx) { BN_CTX_start(ctx_); } ~BN_CTXScope() { BN_CTX_end(ctx_); } private: BN_CTX *ctx_; BN_CTXScope(BN_CTXScope &) = delete; BN_CTXScope &operator=(BN_CTXScope &) = delete; }; BSSL_NAMESPACE_END } // extern C++ #endif #endif #define BN_R_ARG2_LT_ARG3 100 #define BN_R_BAD_RECIPROCAL 101 #define BN_R_BIGNUM_TOO_LONG 102 #define BN_R_BITS_TOO_SMALL 103 #define BN_R_CALLED_WITH_EVEN_MODULUS 104 #define BN_R_DIV_BY_ZERO 105 #define BN_R_EXPAND_ON_STATIC_BIGNUM_DATA 106 #define BN_R_INPUT_NOT_REDUCED 107 #define BN_R_INVALID_RANGE 108 #define BN_R_NEGATIVE_NUMBER 109 #define BN_R_NOT_A_SQUARE 110 #define BN_R_NOT_INITIALIZED 111 #define BN_R_NO_INVERSE 112 #define BN_R_PRIVATE_KEY_TOO_LARGE 113 #define BN_R_P_IS_NOT_PRIME 114 #define BN_R_TOO_MANY_ITERATIONS 115 #define BN_R_TOO_MANY_TEMPORARY_VARIABLES 116 #define BN_R_BAD_ENCODING 117 #define BN_R_ENCODE_ERROR 118 #define BN_R_INVALID_INPUT 119 #endif // OPENSSL_HEADER_BN_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_boringssl_prefix_symbols.h ================================================ // Copyright 2018 The BoringSSL Authors // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // BORINGSSL_ADD_PREFIX pastes two identifiers into one. It performs one // iteration of macro expansion on its arguments before pasting. #define BORINGSSL_ADD_PREFIX(a, b) BORINGSSL_ADD_PREFIX_INNER(a, b) #define BORINGSSL_ADD_PREFIX_INNER(a, b) a ## _ ## b #define ACCESS_DESCRIPTION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ACCESS_DESCRIPTION_free) #define ACCESS_DESCRIPTION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ACCESS_DESCRIPTION_new) #define AES_CMAC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_CMAC) #define AES_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_cbc_encrypt) #define AES_cfb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_cfb128_encrypt) #define AES_ctr128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_ctr128_encrypt) #define AES_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_decrypt) #define AES_ecb_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_ecb_encrypt) #define AES_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_encrypt) #define AES_ofb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_ofb128_encrypt) #define AES_set_decrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_set_decrypt_key) #define AES_set_encrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_set_encrypt_key) #define AES_unwrap_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_unwrap_key) #define AES_unwrap_key_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_unwrap_key_padded) #define AES_wrap_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_wrap_key) #define AES_wrap_key_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AES_wrap_key_padded) #define ASN1_ANY_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ANY_it) #define ASN1_BIT_STRING_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_check) #define ASN1_BIT_STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_free) #define ASN1_BIT_STRING_get_bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_get_bit) #define ASN1_BIT_STRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_it) #define ASN1_BIT_STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_new) #define ASN1_BIT_STRING_num_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_num_bytes) #define ASN1_BIT_STRING_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_set) #define ASN1_BIT_STRING_set_bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BIT_STRING_set_bit) #define ASN1_BMPSTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BMPSTRING_free) #define ASN1_BMPSTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BMPSTRING_it) #define ASN1_BMPSTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BMPSTRING_new) #define ASN1_BOOLEAN_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_BOOLEAN_it) #define ASN1_ENUMERATED_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_free) #define ASN1_ENUMERATED_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_get) #define ASN1_ENUMERATED_get_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_get_int64) #define ASN1_ENUMERATED_get_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_get_uint64) #define ASN1_ENUMERATED_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_it) #define ASN1_ENUMERATED_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_new) #define ASN1_ENUMERATED_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_set) #define ASN1_ENUMERATED_set_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_set_int64) #define ASN1_ENUMERATED_set_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_set_uint64) #define ASN1_ENUMERATED_to_BN BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_ENUMERATED_to_BN) #define ASN1_FBOOLEAN_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_FBOOLEAN_it) #define ASN1_GENERALIZEDTIME_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_adj) #define ASN1_GENERALIZEDTIME_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_check) #define ASN1_GENERALIZEDTIME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_free) #define ASN1_GENERALIZEDTIME_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_it) #define ASN1_GENERALIZEDTIME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_new) #define ASN1_GENERALIZEDTIME_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_print) #define ASN1_GENERALIZEDTIME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_set) #define ASN1_GENERALIZEDTIME_set_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_set_string) #define ASN1_GENERALSTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALSTRING_free) #define ASN1_GENERALSTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALSTRING_it) #define ASN1_GENERALSTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_GENERALSTRING_new) #define ASN1_IA5STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_IA5STRING_free) #define ASN1_IA5STRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_IA5STRING_it) #define ASN1_IA5STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_IA5STRING_new) #define ASN1_INTEGER_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_cmp) #define ASN1_INTEGER_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_dup) #define ASN1_INTEGER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_free) #define ASN1_INTEGER_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_get) #define ASN1_INTEGER_get_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_get_int64) #define ASN1_INTEGER_get_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_get_uint64) #define ASN1_INTEGER_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_it) #define ASN1_INTEGER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_new) #define ASN1_INTEGER_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_set) #define ASN1_INTEGER_set_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_set_int64) #define ASN1_INTEGER_set_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_set_uint64) #define ASN1_INTEGER_to_BN BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_INTEGER_to_BN) #define ASN1_NULL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_NULL_free) #define ASN1_NULL_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_NULL_it) #define ASN1_NULL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_NULL_new) #define ASN1_OBJECT_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OBJECT_create) #define ASN1_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OBJECT_free) #define ASN1_OBJECT_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OBJECT_it) #define ASN1_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OBJECT_new) #define ASN1_OCTET_STRING_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_cmp) #define ASN1_OCTET_STRING_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_dup) #define ASN1_OCTET_STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_free) #define ASN1_OCTET_STRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_it) #define ASN1_OCTET_STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_new) #define ASN1_OCTET_STRING_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_OCTET_STRING_set) #define ASN1_PRINTABLESTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_free) #define ASN1_PRINTABLESTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_it) #define ASN1_PRINTABLESTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_new) #define ASN1_PRINTABLE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLE_free) #define ASN1_PRINTABLE_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLE_it) #define ASN1_PRINTABLE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_PRINTABLE_new) #define ASN1_SEQUENCE_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_SEQUENCE_it) #define ASN1_STRING_TABLE_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_TABLE_add) #define ASN1_STRING_TABLE_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_TABLE_cleanup) #define ASN1_STRING_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_cmp) #define ASN1_STRING_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_copy) #define ASN1_STRING_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_data) #define ASN1_STRING_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_dup) #define ASN1_STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_free) #define ASN1_STRING_get0_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_get0_data) #define ASN1_STRING_get_default_mask BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_get_default_mask) #define ASN1_STRING_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_length) #define ASN1_STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_new) #define ASN1_STRING_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_print) #define ASN1_STRING_print_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_print_ex) #define ASN1_STRING_print_ex_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_print_ex_fp) #define ASN1_STRING_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_set) #define ASN1_STRING_set0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_set0) #define ASN1_STRING_set_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_set_by_NID) #define ASN1_STRING_set_default_mask BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_set_default_mask) #define ASN1_STRING_set_default_mask_asc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_set_default_mask_asc) #define ASN1_STRING_to_UTF8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_to_UTF8) #define ASN1_STRING_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_type) #define ASN1_STRING_type_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_STRING_type_new) #define ASN1_T61STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_T61STRING_free) #define ASN1_T61STRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_T61STRING_it) #define ASN1_T61STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_T61STRING_new) #define ASN1_TBOOLEAN_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TBOOLEAN_it) #define ASN1_TIME_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_adj) #define ASN1_TIME_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_check) #define ASN1_TIME_diff BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_diff) #define ASN1_TIME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_free) #define ASN1_TIME_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_it) #define ASN1_TIME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_new) #define ASN1_TIME_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_print) #define ASN1_TIME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_set) #define ASN1_TIME_set_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_set_posix) #define ASN1_TIME_set_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_set_string) #define ASN1_TIME_set_string_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_set_string_X509) #define ASN1_TIME_to_generalizedtime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_to_generalizedtime) #define ASN1_TIME_to_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_to_posix) #define ASN1_TIME_to_posix_nonstandard BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_to_posix_nonstandard) #define ASN1_TIME_to_time_t BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TIME_to_time_t) #define ASN1_TYPE_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_cmp) #define ASN1_TYPE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_free) #define ASN1_TYPE_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_get) #define ASN1_TYPE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_new) #define ASN1_TYPE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_set) #define ASN1_TYPE_set1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_TYPE_set1) #define ASN1_UNIVERSALSTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_free) #define ASN1_UNIVERSALSTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_it) #define ASN1_UNIVERSALSTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_new) #define ASN1_UTCTIME_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_adj) #define ASN1_UTCTIME_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_check) #define ASN1_UTCTIME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_free) #define ASN1_UTCTIME_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_it) #define ASN1_UTCTIME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_new) #define ASN1_UTCTIME_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_print) #define ASN1_UTCTIME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_set) #define ASN1_UTCTIME_set_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTCTIME_set_string) #define ASN1_UTF8STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTF8STRING_free) #define ASN1_UTF8STRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTF8STRING_it) #define ASN1_UTF8STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_UTF8STRING_new) #define ASN1_VISIBLESTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_free) #define ASN1_VISIBLESTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_it) #define ASN1_VISIBLESTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_new) #define ASN1_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_digest) #define ASN1_generate_v3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_generate_v3) #define ASN1_get_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_get_object) #define ASN1_item_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_d2i) #define ASN1_item_d2i_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_d2i_bio) #define ASN1_item_d2i_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_d2i_fp) #define ASN1_item_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_digest) #define ASN1_item_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_dup) #define ASN1_item_ex_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_ex_d2i) #define ASN1_item_ex_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_ex_free) #define ASN1_item_ex_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_ex_i2d) #define ASN1_item_ex_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_ex_new) #define ASN1_item_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_free) #define ASN1_item_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_i2d) #define ASN1_item_i2d_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_i2d_bio) #define ASN1_item_i2d_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_i2d_fp) #define ASN1_item_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_new) #define ASN1_item_pack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_pack) #define ASN1_item_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_sign) #define ASN1_item_sign_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_sign_ctx) #define ASN1_item_unpack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_unpack) #define ASN1_item_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_item_verify) #define ASN1_mbstring_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_mbstring_copy) #define ASN1_mbstring_ncopy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_mbstring_ncopy) #define ASN1_object_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_object_size) #define ASN1_primitive_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_primitive_free) #define ASN1_put_eoc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_put_eoc) #define ASN1_put_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_put_object) #define ASN1_tag2bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_tag2bit) #define ASN1_tag2str BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_tag2str) #define ASN1_template_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ASN1_template_free) #define AUTHORITY_INFO_ACCESS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_free) #define AUTHORITY_INFO_ACCESS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_it) #define AUTHORITY_INFO_ACCESS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_new) #define AUTHORITY_KEYID_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_KEYID_free) #define AUTHORITY_KEYID_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_KEYID_it) #define AUTHORITY_KEYID_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, AUTHORITY_KEYID_new) #define BASIC_CONSTRAINTS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_free) #define BASIC_CONSTRAINTS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_it) #define BASIC_CONSTRAINTS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_new) #define BCM_fips_186_2_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_fips_186_2_prf) #define BCM_mldsa65_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_generate_key) #define BCM_mldsa65_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_generate_key_external_entropy) #define BCM_mldsa65_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_marshal_private_key) #define BCM_mldsa65_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_marshal_public_key) #define BCM_mldsa65_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_parse_private_key) #define BCM_mldsa65_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_parse_public_key) #define BCM_mldsa65_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_private_key_from_seed) #define BCM_mldsa65_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_public_from_private) #define BCM_mldsa65_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_sign) #define BCM_mldsa65_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_sign_internal) #define BCM_mldsa65_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_verify) #define BCM_mldsa65_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa65_verify_internal) #define BCM_mldsa87_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_generate_key) #define BCM_mldsa87_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_generate_key_external_entropy) #define BCM_mldsa87_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_marshal_private_key) #define BCM_mldsa87_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_marshal_public_key) #define BCM_mldsa87_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_parse_private_key) #define BCM_mldsa87_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_parse_public_key) #define BCM_mldsa87_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_private_key_from_seed) #define BCM_mldsa87_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_public_from_private) #define BCM_mldsa87_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_sign) #define BCM_mldsa87_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_sign_internal) #define BCM_mldsa87_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_verify) #define BCM_mldsa87_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mldsa87_verify_internal) #define BCM_mlkem1024_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_decap) #define BCM_mlkem1024_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_encap) #define BCM_mlkem1024_encap_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_encap_external_entropy) #define BCM_mlkem1024_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_generate_key) #define BCM_mlkem1024_generate_key_external_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_generate_key_external_seed) #define BCM_mlkem1024_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_marshal_private_key) #define BCM_mlkem1024_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_marshal_public_key) #define BCM_mlkem1024_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_parse_private_key) #define BCM_mlkem1024_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_parse_public_key) #define BCM_mlkem1024_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_private_key_from_seed) #define BCM_mlkem1024_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem1024_public_from_private) #define BCM_mlkem768_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_decap) #define BCM_mlkem768_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_encap) #define BCM_mlkem768_encap_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_encap_external_entropy) #define BCM_mlkem768_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_generate_key) #define BCM_mlkem768_generate_key_external_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_generate_key_external_seed) #define BCM_mlkem768_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_marshal_private_key) #define BCM_mlkem768_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_marshal_public_key) #define BCM_mlkem768_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_parse_private_key) #define BCM_mlkem768_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_parse_public_key) #define BCM_mlkem768_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_private_key_from_seed) #define BCM_mlkem768_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_mlkem768_public_from_private) #define BCM_rand_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes) #define BCM_rand_bytes_hwrng BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes_hwrng) #define BCM_rand_bytes_with_additional_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_rand_bytes_with_additional_data) #define BCM_sha1_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha1_final) #define BCM_sha1_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha1_init) #define BCM_sha1_transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha1_transform) #define BCM_sha1_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha1_update) #define BCM_sha224_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha224_final) #define BCM_sha224_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha224_init) #define BCM_sha224_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha224_update) #define BCM_sha256_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha256_final) #define BCM_sha256_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha256_init) #define BCM_sha256_transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha256_transform) #define BCM_sha256_transform_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha256_transform_blocks) #define BCM_sha256_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha256_update) #define BCM_sha384_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha384_final) #define BCM_sha384_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha384_init) #define BCM_sha384_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha384_update) #define BCM_sha512_256_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_256_final) #define BCM_sha512_256_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_256_init) #define BCM_sha512_256_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_256_update) #define BCM_sha512_final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_final) #define BCM_sha512_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_init) #define BCM_sha512_transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_transform) #define BCM_sha512_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_sha512_update) #define BCM_slhdsa_sha2_128s_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_generate_key) #define BCM_slhdsa_sha2_128s_generate_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_generate_key_from_seed) #define BCM_slhdsa_sha2_128s_prehash_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_prehash_sign) #define BCM_slhdsa_sha2_128s_prehash_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_prehash_verify) #define BCM_slhdsa_sha2_128s_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_public_from_private) #define BCM_slhdsa_sha2_128s_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_sign) #define BCM_slhdsa_sha2_128s_sign_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_sign_internal) #define BCM_slhdsa_sha2_128s_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_verify) #define BCM_slhdsa_sha2_128s_verify_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_verify_internal) #define BIO_append_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_append_filename) #define BIO_callback_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_callback_ctrl) #define BIO_clear_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_clear_flags) #define BIO_clear_retry_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_clear_retry_flags) #define BIO_copy_next_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_copy_next_retry) #define BIO_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl) #define BIO_ctrl_get_read_request BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_get_read_request) #define BIO_ctrl_get_write_guarantee BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_get_write_guarantee) #define BIO_ctrl_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ctrl_pending) #define BIO_do_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_do_connect) #define BIO_eof BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_eof) #define BIO_f_ssl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_f_ssl) #define BIO_find_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_find_type) #define BIO_flush BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_flush) #define BIO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_free) #define BIO_free_all BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_free_all) #define BIO_get_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_data) #define BIO_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_ex_data) #define BIO_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_ex_new_index) #define BIO_get_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_fd) #define BIO_get_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_fp) #define BIO_get_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_init) #define BIO_get_mem_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_mem_data) #define BIO_get_mem_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_mem_ptr) #define BIO_get_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_new_index) #define BIO_get_retry_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_retry_flags) #define BIO_get_retry_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_retry_reason) #define BIO_get_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_get_shutdown) #define BIO_gets BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_gets) #define BIO_hexdump BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_hexdump) #define BIO_indent BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_indent) #define BIO_int_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_int_ctrl) #define BIO_mem_contents BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_mem_contents) #define BIO_meth_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_free) #define BIO_meth_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_new) #define BIO_meth_set_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_create) #define BIO_meth_set_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_ctrl) #define BIO_meth_set_destroy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_destroy) #define BIO_meth_set_gets BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_gets) #define BIO_meth_set_puts BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_puts) #define BIO_meth_set_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_read) #define BIO_meth_set_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_meth_set_write) #define BIO_method_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_method_type) #define BIO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new) #define BIO_new_bio_pair BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_bio_pair) #define BIO_new_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_connect) #define BIO_new_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_fd) #define BIO_new_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_file) #define BIO_new_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_fp) #define BIO_new_mem_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_mem_buf) #define BIO_new_socket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_new_socket) #define BIO_next BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_next) #define BIO_number_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_number_read) #define BIO_number_written BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_number_written) #define BIO_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_pending) #define BIO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_pop) #define BIO_printf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_printf) #define BIO_ptr_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_ptr_ctrl) #define BIO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_push) #define BIO_puts BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_puts) #define BIO_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_read) #define BIO_read_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_read_asn1) #define BIO_read_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_read_filename) #define BIO_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_reset) #define BIO_rw_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_rw_filename) #define BIO_s_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_connect) #define BIO_s_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_fd) #define BIO_s_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_file) #define BIO_s_mem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_mem) #define BIO_s_socket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_socket) #define BIO_seek BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_seek) #define BIO_set_close BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_close) #define BIO_set_conn_hostname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_hostname) #define BIO_set_conn_int_port BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_int_port) #define BIO_set_conn_port BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_conn_port) #define BIO_set_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_data) #define BIO_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_ex_data) #define BIO_set_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_fd) #define BIO_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_flags) #define BIO_set_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_fp) #define BIO_set_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_init) #define BIO_set_mem_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_mem_buf) #define BIO_set_mem_eof_return BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_mem_eof_return) #define BIO_set_nbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_nbio) #define BIO_set_retry_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_read) #define BIO_set_retry_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_reason) #define BIO_set_retry_special BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_special) #define BIO_set_retry_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_write) #define BIO_set_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_shutdown) #define BIO_set_ssl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_ssl) #define BIO_set_write_buffer_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_write_buffer_size) #define BIO_should_io_special BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_io_special) #define BIO_should_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_read) #define BIO_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_retry) #define BIO_should_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_write) #define BIO_shutdown_wr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_shutdown_wr) #define BIO_snprintf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_snprintf) #define BIO_tell BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_tell) #define BIO_test_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_test_flags) #define BIO_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_up_ref) #define BIO_vfree BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_vfree) #define BIO_vsnprintf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_vsnprintf) #define BIO_wpending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_wpending) #define BIO_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_write) #define BIO_write_all BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_write_all) #define BIO_write_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_write_filename) #define BLAKE2B256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BLAKE2B256) #define BLAKE2B256_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BLAKE2B256_Final) #define BLAKE2B256_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BLAKE2B256_Init) #define BLAKE2B256_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BLAKE2B256_Update) #define BN_BLINDING_convert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_BLINDING_convert) #define BN_BLINDING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_BLINDING_free) #define BN_BLINDING_invalidate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_BLINDING_invalidate) #define BN_BLINDING_invert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_BLINDING_invert) #define BN_BLINDING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_BLINDING_new) #define BN_CTX_end BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_end) #define BN_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_free) #define BN_CTX_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_get) #define BN_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_new) #define BN_CTX_start BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_start) #define BN_GENCB_call BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_call) #define BN_GENCB_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_free) #define BN_GENCB_get_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_get_arg) #define BN_GENCB_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_new) #define BN_GENCB_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_set) #define BN_MONT_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_copy) #define BN_MONT_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_free) #define BN_MONT_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_new) #define BN_MONT_CTX_new_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_new_consttime) #define BN_MONT_CTX_new_for_modulus BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_new_for_modulus) #define BN_MONT_CTX_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_set) #define BN_MONT_CTX_set_locked BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_set_locked) #define BN_abs_is_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_abs_is_word) #define BN_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_add) #define BN_add_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_add_word) #define BN_asc2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_asc2bn) #define BN_bin2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bin2bn) #define BN_bn2bin BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2bin) #define BN_bn2bin_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2bin_padded) #define BN_bn2binpad BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2binpad) #define BN_bn2cbb_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2cbb_padded) #define BN_bn2dec BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2dec) #define BN_bn2hex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2hex) #define BN_bn2le_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2le_padded) #define BN_bn2lebinpad BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2lebinpad) #define BN_bn2mpi BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_bn2mpi) #define BN_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_clear) #define BN_clear_bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_clear_bit) #define BN_clear_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_clear_free) #define BN_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_cmp) #define BN_cmp_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_cmp_word) #define BN_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_copy) #define BN_count_low_zero_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_count_low_zero_bits) #define BN_dec2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_dec2bn) #define BN_div BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_div) #define BN_div_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_div_word) #define BN_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_dup) #define BN_enhanced_miller_rabin_primality_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_enhanced_miller_rabin_primality_test) #define BN_equal_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_equal_consttime) #define BN_exp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_exp) #define BN_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_free) #define BN_from_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_from_montgomery) #define BN_gcd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_gcd) #define BN_generate_prime_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_generate_prime_ex) #define BN_get_rfc3526_prime_1536 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_1536) #define BN_get_rfc3526_prime_2048 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_2048) #define BN_get_rfc3526_prime_3072 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_3072) #define BN_get_rfc3526_prime_4096 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_4096) #define BN_get_rfc3526_prime_6144 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_6144) #define BN_get_rfc3526_prime_8192 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_rfc3526_prime_8192) #define BN_get_u64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_u64) #define BN_get_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_get_word) #define BN_hex2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_hex2bn) #define BN_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_init) #define BN_is_bit_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_bit_set) #define BN_is_negative BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_negative) #define BN_is_odd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_odd) #define BN_is_one BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_one) #define BN_is_pow2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_pow2) #define BN_is_prime_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_prime_ex) #define BN_is_prime_fasttest_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_prime_fasttest_ex) #define BN_is_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_word) #define BN_is_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_is_zero) #define BN_le2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_le2bn) #define BN_lebin2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_lebin2bn) #define BN_lshift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_lshift) #define BN_lshift1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_lshift1) #define BN_marshal_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_marshal_asn1) #define BN_mask_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mask_bits) #define BN_mod_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_add) #define BN_mod_add_quick BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_add_quick) #define BN_mod_exp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_exp) #define BN_mod_exp2_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_exp2_mont) #define BN_mod_exp_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_exp_mont) #define BN_mod_exp_mont_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_exp_mont_consttime) #define BN_mod_exp_mont_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_exp_mont_word) #define BN_mod_inverse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_inverse) #define BN_mod_inverse_blinded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_inverse_blinded) #define BN_mod_inverse_odd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_inverse_odd) #define BN_mod_lshift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_lshift) #define BN_mod_lshift1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_lshift1) #define BN_mod_lshift1_quick BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_lshift1_quick) #define BN_mod_lshift_quick BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_lshift_quick) #define BN_mod_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_mul) #define BN_mod_mul_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_mul_montgomery) #define BN_mod_pow2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_pow2) #define BN_mod_sqr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_sqr) #define BN_mod_sqrt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_sqrt) #define BN_mod_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_sub) #define BN_mod_sub_quick BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_sub_quick) #define BN_mod_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mod_word) #define BN_mpi2bn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mpi2bn) #define BN_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mul) #define BN_mul_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_mul_word) #define BN_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_new) #define BN_nnmod BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_nnmod) #define BN_nnmod_pow2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_nnmod_pow2) #define BN_num_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_num_bits) #define BN_num_bits_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_num_bits_word) #define BN_num_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_num_bytes) #define BN_one BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_one) #define BN_parse_asn1_unsigned BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_parse_asn1_unsigned) #define BN_primality_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_primality_test) #define BN_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_print) #define BN_print_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_print_fp) #define BN_pseudo_rand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_pseudo_rand) #define BN_pseudo_rand_range BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_pseudo_rand_range) #define BN_rand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_rand) #define BN_rand_range BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_rand_range) #define BN_rand_range_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_rand_range_ex) #define BN_rshift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_rshift) #define BN_rshift1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_rshift1) #define BN_secure_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_secure_new) #define BN_set_bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_set_bit) #define BN_set_negative BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_set_negative) #define BN_set_u64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_set_u64) #define BN_set_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_set_word) #define BN_sqr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_sqr) #define BN_sqrt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_sqrt) #define BN_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_sub) #define BN_sub_word BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_sub_word) #define BN_to_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_to_ASN1_ENUMERATED) #define BN_to_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_to_ASN1_INTEGER) #define BN_to_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_to_montgomery) #define BN_uadd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_uadd) #define BN_ucmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_ucmp) #define BN_usub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_usub) #define BN_value_one BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_value_one) #define BN_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_zero) #define BORINGSSL_keccak BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BORINGSSL_keccak) #define BORINGSSL_keccak_absorb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BORINGSSL_keccak_absorb) #define BORINGSSL_keccak_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BORINGSSL_keccak_init) #define BORINGSSL_keccak_squeeze BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BORINGSSL_keccak_squeeze) #define BORINGSSL_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BORINGSSL_self_test) #define BUF_MEM_append BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_append) #define BUF_MEM_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_free) #define BUF_MEM_grow BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_grow) #define BUF_MEM_grow_clean BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_grow_clean) #define BUF_MEM_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_new) #define BUF_MEM_reserve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_MEM_reserve) #define BUF_memdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_memdup) #define BUF_strdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_strdup) #define BUF_strlcat BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_strlcat) #define BUF_strlcpy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_strlcpy) #define BUF_strndup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_strndup) #define BUF_strnlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BUF_strnlen) #define CBB_add_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1) #define CBB_add_asn1_bool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_bool) #define CBB_add_asn1_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_int64) #define CBB_add_asn1_int64_with_tag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_int64_with_tag) #define CBB_add_asn1_octet_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_octet_string) #define CBB_add_asn1_oid_from_text BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_oid_from_text) #define CBB_add_asn1_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_uint64) #define CBB_add_asn1_uint64_with_tag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_asn1_uint64_with_tag) #define CBB_add_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_bytes) #define CBB_add_latin1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_latin1) #define CBB_add_space BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_space) #define CBB_add_u16 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u16) #define CBB_add_u16_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u16_length_prefixed) #define CBB_add_u16le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u16le) #define CBB_add_u24 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u24) #define CBB_add_u24_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u24_length_prefixed) #define CBB_add_u32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u32) #define CBB_add_u32le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u32le) #define CBB_add_u64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u64) #define CBB_add_u64le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u64le) #define CBB_add_u8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u8) #define CBB_add_u8_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u8_length_prefixed) #define CBB_add_ucs2_be BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_ucs2_be) #define CBB_add_utf32_be BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_utf32_be) #define CBB_add_utf8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_utf8) #define CBB_add_zeros BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_zeros) #define CBB_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_cleanup) #define CBB_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_data) #define CBB_did_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_did_write) #define CBB_discard_child BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_discard_child) #define CBB_finish BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_finish) #define CBB_finish_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_finish_i2d) #define CBB_flush BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_flush) #define CBB_flush_asn1_set_of BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_flush_asn1_set_of) #define CBB_get_utf8_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_get_utf8_len) #define CBB_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_init) #define CBB_init_fixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_init_fixed) #define CBB_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_len) #define CBB_reserve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_reserve) #define CBB_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_zero) #define CBS_asn1_ber_to_der BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_asn1_ber_to_der) #define CBS_asn1_bitstring_has_bit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_asn1_bitstring_has_bit) #define CBS_asn1_oid_to_text BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_asn1_oid_to_text) #define CBS_contains_zero_byte BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_contains_zero_byte) #define CBS_copy_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_copy_bytes) #define CBS_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_data) #define CBS_get_any_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_any_asn1) #define CBS_get_any_asn1_element BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_any_asn1_element) #define CBS_get_any_ber_asn1_element BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_any_ber_asn1_element) #define CBS_get_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1) #define CBS_get_asn1_bool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1_bool) #define CBS_get_asn1_element BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1_element) #define CBS_get_asn1_implicit_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1_implicit_string) #define CBS_get_asn1_int64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1_int64) #define CBS_get_asn1_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_asn1_uint64) #define CBS_get_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_bytes) #define CBS_get_last_u8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_last_u8) #define CBS_get_latin1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_latin1) #define CBS_get_optional_asn1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_optional_asn1) #define CBS_get_optional_asn1_bool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_optional_asn1_bool) #define CBS_get_optional_asn1_octet_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_optional_asn1_octet_string) #define CBS_get_optional_asn1_uint64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_optional_asn1_uint64) #define CBS_get_u16 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u16) #define CBS_get_u16_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u16_length_prefixed) #define CBS_get_u16le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u16le) #define CBS_get_u24 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u24) #define CBS_get_u24_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u24_length_prefixed) #define CBS_get_u32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u32) #define CBS_get_u32le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u32le) #define CBS_get_u64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u64) #define CBS_get_u64_decimal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u64_decimal) #define CBS_get_u64le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u64le) #define CBS_get_u8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u8) #define CBS_get_u8_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u8_length_prefixed) #define CBS_get_ucs2_be BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_ucs2_be) #define CBS_get_until_first BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_until_first) #define CBS_get_utf32_be BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_utf32_be) #define CBS_get_utf8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_utf8) #define CBS_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_init) #define CBS_is_unsigned_asn1_integer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_unsigned_asn1_integer) #define CBS_is_valid_asn1_bitstring BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_valid_asn1_bitstring) #define CBS_is_valid_asn1_integer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_valid_asn1_integer) #define CBS_is_valid_asn1_oid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_valid_asn1_oid) #define CBS_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_len) #define CBS_mem_equal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_mem_equal) #define CBS_parse_generalized_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_parse_generalized_time) #define CBS_parse_utc_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_parse_utc_time) #define CBS_peek_asn1_tag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_peek_asn1_tag) #define CBS_skip BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_skip) #define CBS_stow BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_stow) #define CBS_strdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_strdup) #define CERTIFICATEPOLICIES_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_free) #define CERTIFICATEPOLICIES_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_it) #define CERTIFICATEPOLICIES_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_new) #define CMAC_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_CTX_copy) #define CMAC_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_CTX_free) #define CMAC_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_CTX_new) #define CMAC_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_Final) #define CMAC_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_Init) #define CMAC_Reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_Reset) #define CMAC_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CMAC_Update) #define CONF_VALUE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CONF_VALUE_new) #define CONF_modules_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CONF_modules_free) #define CONF_modules_load_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CONF_modules_load_file) #define CONF_parse_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CONF_parse_list) #define CRL_DIST_POINTS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRL_DIST_POINTS_free) #define CRL_DIST_POINTS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRL_DIST_POINTS_it) #define CRL_DIST_POINTS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRL_DIST_POINTS_new) #define CRYPTO_BUFFER_POOL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_POOL_free) #define CRYPTO_BUFFER_POOL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_POOL_new) #define CRYPTO_BUFFER_alloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_alloc) #define CRYPTO_BUFFER_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_data) #define CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_free) #define CRYPTO_BUFFER_init_CBS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_init_CBS) #define CRYPTO_BUFFER_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_len) #define CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_new) #define CRYPTO_BUFFER_new_from_CBS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_new_from_CBS) #define CRYPTO_BUFFER_new_from_static_data_unsafe BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_new_from_static_data_unsafe) #define CRYPTO_BUFFER_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_BUFFER_up_ref) #define CRYPTO_MUTEX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_cleanup) #define CRYPTO_MUTEX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_init) #define CRYPTO_MUTEX_lock_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_lock_read) #define CRYPTO_MUTEX_lock_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_lock_write) #define CRYPTO_MUTEX_unlock_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_unlock_read) #define CRYPTO_MUTEX_unlock_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_MUTEX_unlock_write) #define CRYPTO_POLYVAL_finish BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_POLYVAL_finish) #define CRYPTO_POLYVAL_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_POLYVAL_init) #define CRYPTO_POLYVAL_update_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_POLYVAL_update_blocks) #define CRYPTO_THREADID_current BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_THREADID_current) #define CRYPTO_THREADID_set_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_THREADID_set_callback) #define CRYPTO_THREADID_set_numeric BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_THREADID_set_numeric) #define CRYPTO_THREADID_set_pointer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_THREADID_set_pointer) #define CRYPTO_atomic_compare_exchange_weak_u32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_atomic_compare_exchange_weak_u32) #define CRYPTO_atomic_load_u32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_atomic_load_u32) #define CRYPTO_atomic_store_u32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_atomic_store_u32) #define CRYPTO_cbc128_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cbc128_decrypt) #define CRYPTO_cbc128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cbc128_encrypt) #define CRYPTO_cfb128_1_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cfb128_1_encrypt) #define CRYPTO_cfb128_8_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cfb128_8_encrypt) #define CRYPTO_cfb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cfb128_encrypt) #define CRYPTO_chacha_20 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_chacha_20) #define CRYPTO_cleanup_all_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cleanup_all_ex_data) #define CRYPTO_cpu_avoid_zmm_registers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cpu_avoid_zmm_registers) #define CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_cpu_perf_is_like_silvermont) #define CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) #define CRYPTO_fips_186_2_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_fips_186_2_prf) #define CRYPTO_fork_detect_force_madv_wipeonfork_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_fork_detect_force_madv_wipeonfork_for_testing) #define CRYPTO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_free) #define CRYPTO_free_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_free_ex_data) #define CRYPTO_gcm128_aad BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_aad) #define CRYPTO_gcm128_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_decrypt) #define CRYPTO_gcm128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_encrypt) #define CRYPTO_gcm128_finish BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_finish) #define CRYPTO_gcm128_init_aes_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_init_aes_key) #define CRYPTO_gcm128_init_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_init_ctx) #define CRYPTO_gcm128_tag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_tag) #define CRYPTO_get_dynlock_create_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_dynlock_create_callback) #define CRYPTO_get_dynlock_destroy_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_dynlock_destroy_callback) #define CRYPTO_get_dynlock_lock_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_dynlock_lock_callback) #define CRYPTO_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_ex_data) #define CRYPTO_get_ex_new_index_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_ex_new_index_ex) #define CRYPTO_get_fork_generation BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_fork_generation) #define CRYPTO_get_lock_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_lock_name) #define CRYPTO_get_locking_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_locking_callback) #define CRYPTO_get_stderr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_stderr) #define CRYPTO_get_thread_local BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_thread_local) #define CRYPTO_ghash_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ghash_init) #define CRYPTO_has_asm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_has_asm) #define CRYPTO_hchacha20 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_hchacha20) #define CRYPTO_init_sysrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_init_sysrand) #define CRYPTO_is_ADX_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ADX_capable) #define CRYPTO_is_AESNI_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AESNI_capable) #define CRYPTO_is_ARMv8_AES_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_AES_capable) #define CRYPTO_is_ARMv8_PMULL_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_PMULL_capable) #define CRYPTO_is_ARMv8_SHA1_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA1_capable) #define CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA256_capable) #define CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA512_capable) #define CRYPTO_is_AVX2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX2_capable) #define CRYPTO_is_AVX512BW_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX512BW_capable) #define CRYPTO_is_AVX512VL_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX512VL_capable) #define CRYPTO_is_AVX_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_AVX_capable) #define CRYPTO_is_BMI1_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_BMI1_capable) #define CRYPTO_is_BMI2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_BMI2_capable) #define CRYPTO_is_FXSR_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_FXSR_capable) #define CRYPTO_is_MOVBE_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_MOVBE_capable) #define CRYPTO_is_NEON_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_NEON_capable) #define CRYPTO_is_PCLMUL_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_PCLMUL_capable) #define CRYPTO_is_RDRAND_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_RDRAND_capable) #define CRYPTO_is_SSE4_1_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_SSE4_1_capable) #define CRYPTO_is_SSSE3_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_SSSE3_capable) #define CRYPTO_is_VAES_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_VAES_capable) #define CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_VPCLMULQDQ_capable) #define CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define CRYPTO_is_intel_cpu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_intel_cpu) #define CRYPTO_is_x86_SHA_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_x86_SHA_capable) #define CRYPTO_library_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_library_init) #define CRYPTO_malloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_malloc) #define CRYPTO_malloc_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_malloc_init) #define CRYPTO_memcmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_memcmp) #define CRYPTO_new_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_new_ex_data) #define CRYPTO_num_locks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_num_locks) #define CRYPTO_ofb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ofb128_encrypt) #define CRYPTO_once BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_once) #define CRYPTO_poly1305_finish BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_poly1305_finish) #define CRYPTO_poly1305_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_poly1305_init) #define CRYPTO_poly1305_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_poly1305_update) #define CRYPTO_pre_sandbox_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_pre_sandbox_init) #define CRYPTO_rdrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_rdrand) #define CRYPTO_rdrand_multiple8_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_rdrand_multiple8_buf) #define CRYPTO_realloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_realloc) #define CRYPTO_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_refcount_dec_and_test_zero) #define CRYPTO_refcount_inc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_refcount_inc) #define CRYPTO_secure_malloc_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_secure_malloc_init) #define CRYPTO_secure_malloc_initialized BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_secure_malloc_initialized) #define CRYPTO_secure_used BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_secure_used) #define CRYPTO_set_add_lock_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_add_lock_callback) #define CRYPTO_set_dynlock_create_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_dynlock_create_callback) #define CRYPTO_set_dynlock_destroy_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_dynlock_destroy_callback) #define CRYPTO_set_dynlock_lock_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_dynlock_lock_callback) #define CRYPTO_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_ex_data) #define CRYPTO_set_id_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_id_callback) #define CRYPTO_set_locking_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_locking_callback) #define CRYPTO_set_thread_local BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_thread_local) #define CRYPTO_sysrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand) #define CRYPTO_sysrand_for_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand_for_seed) #define CRYPTO_sysrand_if_available BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand_if_available) #define CRYPTO_tls13_hkdf_expand_label BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_tls13_hkdf_expand_label) #define CRYPTO_tls1_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_tls1_prf) #define CRYPTO_xor16 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_xor16) #define CTR_DRBG_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_clear) #define CTR_DRBG_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_free) #define CTR_DRBG_generate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_generate) #define CTR_DRBG_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_init) #define CTR_DRBG_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_new) #define CTR_DRBG_reseed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_reseed) #define ChaCha20_ctr32_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_avx2) #define ChaCha20_ctr32_avx2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_avx2_capable) #define ChaCha20_ctr32_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_neon) #define ChaCha20_ctr32_neon_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_neon_capable) #define ChaCha20_ctr32_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_nohw) #define ChaCha20_ctr32_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3) #define ChaCha20_ctr32_ssse3_4x BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_4x) #define ChaCha20_ctr32_ssse3_4x_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_4x_capable) #define ChaCha20_ctr32_ssse3_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_capable) #define DES_decrypt3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_decrypt3) #define DES_ecb3_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ecb3_encrypt) #define DES_ecb3_encrypt_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ecb3_encrypt_ex) #define DES_ecb_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ecb_encrypt) #define DES_ecb_encrypt_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ecb_encrypt_ex) #define DES_ede2_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ede2_cbc_encrypt) #define DES_ede3_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ede3_cbc_encrypt) #define DES_ede3_cbc_encrypt_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ede3_cbc_encrypt_ex) #define DES_encrypt3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_encrypt3) #define DES_ncbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ncbc_encrypt) #define DES_ncbc_encrypt_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_ncbc_encrypt_ex) #define DES_set_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_set_key) #define DES_set_key_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_set_key_ex) #define DES_set_key_unchecked BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_set_key_unchecked) #define DES_set_odd_parity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DES_set_odd_parity) #define DH_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_bits) #define DH_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_check) #define DH_check_pub_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_check_pub_key) #define DH_compute_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_compute_key) #define DH_compute_key_hashed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_compute_key_hashed) #define DH_compute_key_padded BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_compute_key_padded) #define DH_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_free) #define DH_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_generate_key) #define DH_generate_parameters_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_generate_parameters_ex) #define DH_get0_g BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_g) #define DH_get0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_key) #define DH_get0_p BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_p) #define DH_get0_pqg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_pqg) #define DH_get0_priv_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_priv_key) #define DH_get0_pub_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_pub_key) #define DH_get0_q BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get0_q) #define DH_get_rfc7919_2048 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_get_rfc7919_2048) #define DH_marshal_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_marshal_parameters) #define DH_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_new) #define DH_num_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_num_bits) #define DH_parse_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_parse_parameters) #define DH_set0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_set0_key) #define DH_set0_pqg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_set0_pqg) #define DH_set_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_set_length) #define DH_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_size) #define DH_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DH_up_ref) #define DHparams_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DHparams_dup) #define DIRECTORYSTRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_free) #define DIRECTORYSTRING_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_it) #define DIRECTORYSTRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIRECTORYSTRING_new) #define DISPLAYTEXT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DISPLAYTEXT_free) #define DISPLAYTEXT_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DISPLAYTEXT_it) #define DISPLAYTEXT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DISPLAYTEXT_new) #define DIST_POINT_NAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIST_POINT_NAME_free) #define DIST_POINT_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIST_POINT_NAME_new) #define DIST_POINT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIST_POINT_free) #define DIST_POINT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIST_POINT_new) #define DIST_POINT_set_dpname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DIST_POINT_set_dpname) #define DSA_SIG_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_free) #define DSA_SIG_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_get0) #define DSA_SIG_marshal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_marshal) #define DSA_SIG_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_new) #define DSA_SIG_parse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_parse) #define DSA_SIG_set0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_SIG_set0) #define DSA_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_bits) #define DSA_check_signature BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_check_signature) #define DSA_do_check_signature BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_do_check_signature) #define DSA_do_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_do_sign) #define DSA_do_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_do_verify) #define DSA_dup_DH BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_dup_DH) #define DSA_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_free) #define DSA_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_generate_key) #define DSA_generate_parameters_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_generate_parameters_ex) #define DSA_get0_g BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_g) #define DSA_get0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_key) #define DSA_get0_p BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_p) #define DSA_get0_pqg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_pqg) #define DSA_get0_priv_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_priv_key) #define DSA_get0_pub_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_pub_key) #define DSA_get0_q BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get0_q) #define DSA_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get_ex_data) #define DSA_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_get_ex_new_index) #define DSA_marshal_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_marshal_parameters) #define DSA_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_marshal_private_key) #define DSA_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_marshal_public_key) #define DSA_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_new) #define DSA_parse_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_parse_parameters) #define DSA_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_parse_private_key) #define DSA_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_parse_public_key) #define DSA_set0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_set0_key) #define DSA_set0_pqg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_set0_pqg) #define DSA_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_set_ex_data) #define DSA_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_sign) #define DSA_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_size) #define DSA_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_up_ref) #define DSA_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSA_verify) #define DSAparams_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DSAparams_dup) #define DTLS_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLS_client_method) #define DTLS_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLS_method) #define DTLS_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLS_server_method) #define DTLS_with_buffers_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLS_with_buffers_method) #define DTLSv1_2_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_2_client_method) #define DTLSv1_2_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_2_method) #define DTLSv1_2_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_2_server_method) #define DTLSv1_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_client_method) #define DTLSv1_get_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_get_timeout) #define DTLSv1_handle_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_handle_timeout) #define DTLSv1_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_method) #define DTLSv1_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_server_method) #define DTLSv1_set_initial_timeout_duration BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, DTLSv1_set_initial_timeout_duration) #define ECDH_compute_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDH_compute_key) #define ECDH_compute_key_fips BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDH_compute_key_fips) #define ECDSA_SIG_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_free) #define ECDSA_SIG_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_from_bytes) #define ECDSA_SIG_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_get0) #define ECDSA_SIG_get0_r BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_get0_r) #define ECDSA_SIG_get0_s BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_get0_s) #define ECDSA_SIG_marshal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_marshal) #define ECDSA_SIG_max_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_max_len) #define ECDSA_SIG_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_new) #define ECDSA_SIG_parse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_parse) #define ECDSA_SIG_set0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_set0) #define ECDSA_SIG_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_SIG_to_bytes) #define ECDSA_do_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_do_sign) #define ECDSA_do_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_do_verify) #define ECDSA_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_sign) #define ECDSA_sign_with_nonce_and_leak_private_key_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_sign_with_nonce_and_leak_private_key_for_testing) #define ECDSA_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_size) #define ECDSA_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ECDSA_verify) #define EC_GFp_mont_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GFp_mont_method) #define EC_GFp_nistp224_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GFp_nistp224_method) #define EC_GFp_nistp256_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GFp_nistp256_method) #define EC_GFp_nistz256_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GFp_nistz256_method) #define EC_GROUP_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_cmp) #define EC_GROUP_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_dup) #define EC_GROUP_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_free) #define EC_GROUP_get0_generator BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get0_generator) #define EC_GROUP_get0_order BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get0_order) #define EC_GROUP_get_asn1_flag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_asn1_flag) #define EC_GROUP_get_cofactor BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_cofactor) #define EC_GROUP_get_curve_GFp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_curve_GFp) #define EC_GROUP_get_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_curve_name) #define EC_GROUP_get_degree BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_degree) #define EC_GROUP_get_order BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_get_order) #define EC_GROUP_method_of BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_method_of) #define EC_GROUP_new_by_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_new_by_curve_name) #define EC_GROUP_new_curve_GFp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_new_curve_GFp) #define EC_GROUP_order_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_order_bits) #define EC_GROUP_set_asn1_flag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_set_asn1_flag) #define EC_GROUP_set_generator BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_set_generator) #define EC_GROUP_set_point_conversion_form BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_GROUP_set_point_conversion_form) #define EC_KEY_check_fips BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_check_fips) #define EC_KEY_check_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_check_key) #define EC_KEY_derive_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_derive_from_secret) #define EC_KEY_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_dup) #define EC_KEY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_free) #define EC_KEY_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_generate_key) #define EC_KEY_generate_key_fips BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_generate_key_fips) #define EC_KEY_get0_group BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get0_group) #define EC_KEY_get0_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get0_private_key) #define EC_KEY_get0_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get0_public_key) #define EC_KEY_get_conv_form BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get_conv_form) #define EC_KEY_get_enc_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get_enc_flags) #define EC_KEY_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get_ex_data) #define EC_KEY_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_get_ex_new_index) #define EC_KEY_is_opaque BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_is_opaque) #define EC_KEY_key2buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_key2buf) #define EC_KEY_marshal_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_marshal_curve_name) #define EC_KEY_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_marshal_private_key) #define EC_KEY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_new) #define EC_KEY_new_by_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_new_by_curve_name) #define EC_KEY_new_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_new_method) #define EC_KEY_oct2key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_oct2key) #define EC_KEY_oct2priv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_oct2priv) #define EC_KEY_parse_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_parse_curve_name) #define EC_KEY_parse_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_parse_parameters) #define EC_KEY_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_parse_private_key) #define EC_KEY_priv2buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_priv2buf) #define EC_KEY_priv2oct BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_priv2oct) #define EC_KEY_set_asn1_flag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_asn1_flag) #define EC_KEY_set_conv_form BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_conv_form) #define EC_KEY_set_enc_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_enc_flags) #define EC_KEY_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_ex_data) #define EC_KEY_set_group BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_group) #define EC_KEY_set_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_private_key) #define EC_KEY_set_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_public_key) #define EC_KEY_set_public_key_affine_coordinates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_set_public_key_affine_coordinates) #define EC_KEY_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_KEY_up_ref) #define EC_METHOD_get_field_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_METHOD_get_field_type) #define EC_POINT_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_add) #define EC_POINT_clear_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_clear_free) #define EC_POINT_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_cmp) #define EC_POINT_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_copy) #define EC_POINT_dbl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_dbl) #define EC_POINT_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_dup) #define EC_POINT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_free) #define EC_POINT_get_affine_coordinates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_get_affine_coordinates) #define EC_POINT_get_affine_coordinates_GFp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_get_affine_coordinates_GFp) #define EC_POINT_invert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_invert) #define EC_POINT_is_at_infinity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_is_at_infinity) #define EC_POINT_is_on_curve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_is_on_curve) #define EC_POINT_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_mul) #define EC_POINT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_new) #define EC_POINT_oct2point BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_oct2point) #define EC_POINT_point2buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_point2buf) #define EC_POINT_point2cbb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_point2cbb) #define EC_POINT_point2oct BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_point2oct) #define EC_POINT_set_affine_coordinates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_set_affine_coordinates) #define EC_POINT_set_affine_coordinates_GFp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_set_affine_coordinates_GFp) #define EC_POINT_set_compressed_coordinates_GFp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_set_compressed_coordinates_GFp) #define EC_POINT_set_to_infinity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_POINT_set_to_infinity) #define EC_curve_nid2nist BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_curve_nid2nist) #define EC_curve_nist2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_curve_nist2nid) #define EC_get_builtin_curves BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_get_builtin_curves) #define EC_group_p224 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_group_p224) #define EC_group_p256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_group_p256) #define EC_group_p384 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_group_p384) #define EC_group_p521 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_group_p521) #define EC_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_hash_to_curve_p256_xmd_sha256_sswu) #define EC_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EC_hash_to_curve_p384_xmd_sha384_sswu) #define ED25519_keypair BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ED25519_keypair) #define ED25519_keypair_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ED25519_keypair_from_seed) #define ED25519_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ED25519_sign) #define ED25519_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ED25519_verify) #define EDIPARTYNAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EDIPARTYNAME_free) #define EDIPARTYNAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EDIPARTYNAME_new) #define ENGINE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_free) #define ENGINE_get_ECDSA_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_get_ECDSA_method) #define ENGINE_get_RSA_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_get_RSA_method) #define ENGINE_load_builtin_engines BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_load_builtin_engines) #define ENGINE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_new) #define ENGINE_register_all_complete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_register_all_complete) #define ENGINE_set_ECDSA_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_set_ECDSA_method) #define ENGINE_set_RSA_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ENGINE_set_RSA_method) #define ERR_GET_LIB BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_GET_LIB) #define ERR_GET_REASON BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_GET_REASON) #define ERR_SAVE_STATE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_SAVE_STATE_free) #define ERR_add_error_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_add_error_data) #define ERR_add_error_dataf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_add_error_dataf) #define ERR_clear_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_clear_error) #define ERR_clear_system_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_clear_system_error) #define ERR_error_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_error_string) #define ERR_error_string_n BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_error_string_n) #define ERR_free_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_free_strings) #define ERR_func_error_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_func_error_string) #define ERR_get_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_get_error) #define ERR_get_error_line BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_get_error_line) #define ERR_get_error_line_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_get_error_line_data) #define ERR_get_next_error_library BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_get_next_error_library) #define ERR_lib_error_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_lib_error_string) #define ERR_lib_symbol_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_lib_symbol_name) #define ERR_load_BIO_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_load_BIO_strings) #define ERR_load_ERR_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_load_ERR_strings) #define ERR_load_RAND_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_load_RAND_strings) #define ERR_load_SSL_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_load_SSL_strings) #define ERR_load_crypto_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_load_crypto_strings) #define ERR_peek_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_error) #define ERR_peek_error_line BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_error_line) #define ERR_peek_error_line_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_error_line_data) #define ERR_peek_last_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_last_error) #define ERR_peek_last_error_line BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_last_error_line) #define ERR_peek_last_error_line_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_peek_last_error_line_data) #define ERR_pop_to_mark BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_pop_to_mark) #define ERR_print_errors BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_print_errors) #define ERR_print_errors_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_print_errors_cb) #define ERR_print_errors_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_print_errors_fp) #define ERR_put_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_put_error) #define ERR_reason_error_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_reason_error_string) #define ERR_reason_symbol_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_reason_symbol_name) #define ERR_remove_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_remove_state) #define ERR_remove_thread_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_remove_thread_state) #define ERR_restore_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_restore_state) #define ERR_save_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_save_state) #define ERR_set_error_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_set_error_data) #define ERR_set_mark BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_set_mark) #define EVP_AEAD_CTX_aead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_aead) #define EVP_AEAD_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_cleanup) #define EVP_AEAD_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_free) #define EVP_AEAD_CTX_get_iv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_get_iv) #define EVP_AEAD_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_init) #define EVP_AEAD_CTX_init_with_direction BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_init_with_direction) #define EVP_AEAD_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_new) #define EVP_AEAD_CTX_open BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_open) #define EVP_AEAD_CTX_open_gather BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_open_gather) #define EVP_AEAD_CTX_seal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_seal) #define EVP_AEAD_CTX_seal_scatter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_seal_scatter) #define EVP_AEAD_CTX_tag_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_tag_len) #define EVP_AEAD_CTX_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_CTX_zero) #define EVP_AEAD_key_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_key_length) #define EVP_AEAD_max_overhead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_max_overhead) #define EVP_AEAD_max_tag_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_max_tag_len) #define EVP_AEAD_nonce_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_AEAD_nonce_length) #define EVP_BytesToKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_BytesToKey) #define EVP_CIPHER_CTX_block_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_block_size) #define EVP_CIPHER_CTX_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cipher) #define EVP_CIPHER_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cleanup) #define EVP_CIPHER_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_copy) #define EVP_CIPHER_CTX_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_ctrl) #define EVP_CIPHER_CTX_encrypting BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_encrypting) #define EVP_CIPHER_CTX_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_flags) #define EVP_CIPHER_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_free) #define EVP_CIPHER_CTX_get_app_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_get_app_data) #define EVP_CIPHER_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_init) #define EVP_CIPHER_CTX_iv_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_iv_length) #define EVP_CIPHER_CTX_key_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_key_length) #define EVP_CIPHER_CTX_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_mode) #define EVP_CIPHER_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_new) #define EVP_CIPHER_CTX_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_nid) #define EVP_CIPHER_CTX_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_reset) #define EVP_CIPHER_CTX_set_app_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_app_data) #define EVP_CIPHER_CTX_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_flags) #define EVP_CIPHER_CTX_set_key_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_key_length) #define EVP_CIPHER_CTX_set_padding BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_padding) #define EVP_CIPHER_block_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_block_size) #define EVP_CIPHER_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_flags) #define EVP_CIPHER_iv_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_iv_length) #define EVP_CIPHER_key_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_key_length) #define EVP_CIPHER_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_mode) #define EVP_CIPHER_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_nid) #define EVP_Cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_Cipher) #define EVP_CipherFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherFinal) #define EVP_CipherFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherFinal_ex) #define EVP_CipherInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherInit) #define EVP_CipherInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherInit_ex) #define EVP_CipherUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherUpdate) #define EVP_DecodeBase64 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodeBase64) #define EVP_DecodeBlock BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodeBlock) #define EVP_DecodeFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodeFinal) #define EVP_DecodeInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodeInit) #define EVP_DecodeUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodeUpdate) #define EVP_DecodedLength BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecodedLength) #define EVP_DecryptFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptFinal) #define EVP_DecryptFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptFinal_ex) #define EVP_DecryptInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptInit) #define EVP_DecryptInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptInit_ex) #define EVP_DecryptUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptUpdate) #define EVP_Digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_Digest) #define EVP_DigestFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestFinal) #define EVP_DigestFinalXOF BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestFinalXOF) #define EVP_DigestFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestFinal_ex) #define EVP_DigestInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestInit) #define EVP_DigestInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestInit_ex) #define EVP_DigestSign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestSign) #define EVP_DigestSignFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestSignFinal) #define EVP_DigestSignInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestSignInit) #define EVP_DigestSignUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestSignUpdate) #define EVP_DigestUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestUpdate) #define EVP_DigestVerify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestVerify) #define EVP_DigestVerifyFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestVerifyFinal) #define EVP_DigestVerifyInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestVerifyInit) #define EVP_DigestVerifyUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DigestVerifyUpdate) #define EVP_ENCODE_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_ENCODE_CTX_free) #define EVP_ENCODE_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_ENCODE_CTX_new) #define EVP_EncodeBlock BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncodeBlock) #define EVP_EncodeFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncodeFinal) #define EVP_EncodeInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncodeInit) #define EVP_EncodeUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncodeUpdate) #define EVP_EncodedLength BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncodedLength) #define EVP_EncryptFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptFinal) #define EVP_EncryptFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptFinal_ex) #define EVP_EncryptInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptInit) #define EVP_EncryptInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptInit_ex) #define EVP_EncryptUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptUpdate) #define EVP_HPKE_AEAD_aead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_AEAD_aead) #define EVP_HPKE_AEAD_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_AEAD_id) #define EVP_HPKE_CTX_aead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_aead) #define EVP_HPKE_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_cleanup) #define EVP_HPKE_CTX_export BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_export) #define EVP_HPKE_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_free) #define EVP_HPKE_CTX_kdf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_kdf) #define EVP_HPKE_CTX_kem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_kem) #define EVP_HPKE_CTX_max_overhead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_max_overhead) #define EVP_HPKE_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_new) #define EVP_HPKE_CTX_open BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_open) #define EVP_HPKE_CTX_seal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_seal) #define EVP_HPKE_CTX_setup_auth_recipient BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_recipient) #define EVP_HPKE_CTX_setup_auth_sender BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_sender) #define EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing) #define EVP_HPKE_CTX_setup_recipient BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_recipient) #define EVP_HPKE_CTX_setup_sender BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_sender) #define EVP_HPKE_CTX_setup_sender_with_seed_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_sender_with_seed_for_testing) #define EVP_HPKE_CTX_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_CTX_zero) #define EVP_HPKE_KDF_hkdf_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KDF_hkdf_md) #define EVP_HPKE_KDF_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KDF_id) #define EVP_HPKE_KEM_enc_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEM_enc_len) #define EVP_HPKE_KEM_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEM_id) #define EVP_HPKE_KEM_private_key_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEM_private_key_len) #define EVP_HPKE_KEM_public_key_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEM_public_key_len) #define EVP_HPKE_KEY_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_cleanup) #define EVP_HPKE_KEY_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_copy) #define EVP_HPKE_KEY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_free) #define EVP_HPKE_KEY_generate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_generate) #define EVP_HPKE_KEY_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_init) #define EVP_HPKE_KEY_kem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_kem) #define EVP_HPKE_KEY_move BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_move) #define EVP_HPKE_KEY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_new) #define EVP_HPKE_KEY_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_private_key) #define EVP_HPKE_KEY_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_public_key) #define EVP_HPKE_KEY_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_HPKE_KEY_zero) #define EVP_MD_CTX_block_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_block_size) #define EVP_MD_CTX_cleanse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_cleanse) #define EVP_MD_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_cleanup) #define EVP_MD_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_copy) #define EVP_MD_CTX_copy_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_copy_ex) #define EVP_MD_CTX_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_create) #define EVP_MD_CTX_destroy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_destroy) #define EVP_MD_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_free) #define EVP_MD_CTX_get0_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_get0_md) #define EVP_MD_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_init) #define EVP_MD_CTX_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_md) #define EVP_MD_CTX_move BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_move) #define EVP_MD_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_new) #define EVP_MD_CTX_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_reset) #define EVP_MD_CTX_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_set_flags) #define EVP_MD_CTX_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_size) #define EVP_MD_CTX_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_CTX_type) #define EVP_MD_block_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_block_size) #define EVP_MD_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_flags) #define EVP_MD_meth_get_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_meth_get_flags) #define EVP_MD_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_nid) #define EVP_MD_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_size) #define EVP_MD_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_MD_type) #define EVP_PBE_scrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PBE_scrypt) #define EVP_PKCS82PKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKCS82PKEY) #define EVP_PKEY2PKCS8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY2PKCS8) #define EVP_PKEY_CTX_add1_hkdf_info BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_add1_hkdf_info) #define EVP_PKEY_CTX_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_ctrl) #define EVP_PKEY_CTX_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_dup) #define EVP_PKEY_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_free) #define EVP_PKEY_CTX_get0_pkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get0_pkey) #define EVP_PKEY_CTX_get0_rsa_oaep_label BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get0_rsa_oaep_label) #define EVP_PKEY_CTX_get_rsa_mgf1_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_mgf1_md) #define EVP_PKEY_CTX_get_rsa_oaep_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_oaep_md) #define EVP_PKEY_CTX_get_rsa_padding BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_padding) #define EVP_PKEY_CTX_get_rsa_pss_saltlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_pss_saltlen) #define EVP_PKEY_CTX_get_signature_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_signature_md) #define EVP_PKEY_CTX_hkdf_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_hkdf_mode) #define EVP_PKEY_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_new) #define EVP_PKEY_CTX_new_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_new_id) #define EVP_PKEY_CTX_set0_rsa_oaep_label BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set0_rsa_oaep_label) #define EVP_PKEY_CTX_set1_hkdf_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set1_hkdf_key) #define EVP_PKEY_CTX_set1_hkdf_salt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set1_hkdf_salt) #define EVP_PKEY_CTX_set_dh_pad BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dh_pad) #define EVP_PKEY_CTX_set_dsa_paramgen_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dsa_paramgen_bits) #define EVP_PKEY_CTX_set_dsa_paramgen_q_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dsa_paramgen_q_bits) #define EVP_PKEY_CTX_set_ec_param_enc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_ec_param_enc) #define EVP_PKEY_CTX_set_ec_paramgen_curve_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_ec_paramgen_curve_nid) #define EVP_PKEY_CTX_set_hkdf_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_hkdf_md) #define EVP_PKEY_CTX_set_rsa_keygen_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_keygen_bits) #define EVP_PKEY_CTX_set_rsa_keygen_pubexp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_keygen_pubexp) #define EVP_PKEY_CTX_set_rsa_mgf1_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_mgf1_md) #define EVP_PKEY_CTX_set_rsa_oaep_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_oaep_md) #define EVP_PKEY_CTX_set_rsa_padding BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_padding) #define EVP_PKEY_CTX_set_rsa_pss_keygen_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_md) #define EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md) #define EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen) #define EVP_PKEY_CTX_set_rsa_pss_saltlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_saltlen) #define EVP_PKEY_CTX_set_signature_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_signature_md) #define EVP_PKEY_assign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_assign) #define EVP_PKEY_assign_DH BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_assign_DH) #define EVP_PKEY_assign_DSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_assign_DSA) #define EVP_PKEY_assign_EC_KEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_assign_EC_KEY) #define EVP_PKEY_assign_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_assign_RSA) #define EVP_PKEY_base_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_base_id) #define EVP_PKEY_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_bits) #define EVP_PKEY_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_cmp) #define EVP_PKEY_cmp_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_cmp_parameters) #define EVP_PKEY_copy_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_copy_parameters) #define EVP_PKEY_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_decrypt) #define EVP_PKEY_decrypt_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_decrypt_init) #define EVP_PKEY_derive BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_derive) #define EVP_PKEY_derive_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_derive_init) #define EVP_PKEY_derive_set_peer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_derive_set_peer) #define EVP_PKEY_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_encrypt) #define EVP_PKEY_encrypt_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_encrypt_init) #define EVP_PKEY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_free) #define EVP_PKEY_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get0) #define EVP_PKEY_get0_DH BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get0_DH) #define EVP_PKEY_get0_DSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get0_DSA) #define EVP_PKEY_get0_EC_KEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get0_EC_KEY) #define EVP_PKEY_get0_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get0_RSA) #define EVP_PKEY_get1_DH BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get1_DH) #define EVP_PKEY_get1_DSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get1_DSA) #define EVP_PKEY_get1_EC_KEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get1_EC_KEY) #define EVP_PKEY_get1_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get1_RSA) #define EVP_PKEY_get1_tls_encodedpoint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get1_tls_encodedpoint) #define EVP_PKEY_get_raw_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get_raw_private_key) #define EVP_PKEY_get_raw_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_get_raw_public_key) #define EVP_PKEY_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_id) #define EVP_PKEY_is_opaque BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_is_opaque) #define EVP_PKEY_keygen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_keygen) #define EVP_PKEY_keygen_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_keygen_init) #define EVP_PKEY_missing_parameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_missing_parameters) #define EVP_PKEY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_new) #define EVP_PKEY_new_raw_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_new_raw_private_key) #define EVP_PKEY_new_raw_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_new_raw_public_key) #define EVP_PKEY_paramgen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_paramgen) #define EVP_PKEY_paramgen_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_paramgen_init) #define EVP_PKEY_print_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_print_params) #define EVP_PKEY_print_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_print_private) #define EVP_PKEY_print_public BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_print_public) #define EVP_PKEY_set1_DH BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set1_DH) #define EVP_PKEY_set1_DSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set1_DSA) #define EVP_PKEY_set1_EC_KEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set1_EC_KEY) #define EVP_PKEY_set1_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set1_RSA) #define EVP_PKEY_set1_tls_encodedpoint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set1_tls_encodedpoint) #define EVP_PKEY_set_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_set_type) #define EVP_PKEY_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_sign) #define EVP_PKEY_sign_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_sign_init) #define EVP_PKEY_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_size) #define EVP_PKEY_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_type) #define EVP_PKEY_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_up_ref) #define EVP_PKEY_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_verify) #define EVP_PKEY_verify_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_verify_init) #define EVP_PKEY_verify_recover BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_verify_recover) #define EVP_PKEY_verify_recover_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_PKEY_verify_recover_init) #define EVP_SignFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_SignFinal) #define EVP_SignInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_SignInit) #define EVP_SignInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_SignInit_ex) #define EVP_SignUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_SignUpdate) #define EVP_VerifyFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_VerifyFinal) #define EVP_VerifyInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_VerifyInit) #define EVP_VerifyInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_VerifyInit_ex) #define EVP_VerifyUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_VerifyUpdate) #define EVP_add_cipher_alias BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_add_cipher_alias) #define EVP_add_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_add_digest) #define EVP_aead_aes_128_cbc_sha1_tls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha1_tls) #define EVP_aead_aes_128_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha1_tls_implicit_iv) #define EVP_aead_aes_128_cbc_sha256_tls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha256_tls) #define EVP_aead_aes_128_ccm_bluetooth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_bluetooth) #define EVP_aead_aes_128_ccm_bluetooth_8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_bluetooth_8) #define EVP_aead_aes_128_ccm_matter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_matter) #define EVP_aead_aes_128_ctr_hmac_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_ctr_hmac_sha256) #define EVP_aead_aes_128_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm) #define EVP_aead_aes_128_gcm_randnonce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_randnonce) #define EVP_aead_aes_128_gcm_siv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_siv) #define EVP_aead_aes_128_gcm_tls12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls12) #define EVP_aead_aes_128_gcm_tls13 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls13) #define EVP_aead_aes_192_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_192_gcm) #define EVP_aead_aes_256_cbc_sha1_tls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_cbc_sha1_tls) #define EVP_aead_aes_256_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_cbc_sha1_tls_implicit_iv) #define EVP_aead_aes_256_ctr_hmac_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_ctr_hmac_sha256) #define EVP_aead_aes_256_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm) #define EVP_aead_aes_256_gcm_randnonce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_randnonce) #define EVP_aead_aes_256_gcm_siv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_siv) #define EVP_aead_aes_256_gcm_tls12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls12) #define EVP_aead_aes_256_gcm_tls13 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls13) #define EVP_aead_chacha20_poly1305 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_chacha20_poly1305) #define EVP_aead_des_ede3_cbc_sha1_tls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_des_ede3_cbc_sha1_tls) #define EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv) #define EVP_aead_xchacha20_poly1305 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_xchacha20_poly1305) #define EVP_aes_128_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_cbc) #define EVP_aes_128_ctr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_ctr) #define EVP_aes_128_ecb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_ecb) #define EVP_aes_128_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_gcm) #define EVP_aes_128_ofb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_ofb) #define EVP_aes_192_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_192_cbc) #define EVP_aes_192_ctr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_192_ctr) #define EVP_aes_192_ecb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_192_ecb) #define EVP_aes_192_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_192_gcm) #define EVP_aes_192_ofb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_192_ofb) #define EVP_aes_256_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_256_cbc) #define EVP_aes_256_ctr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_256_ctr) #define EVP_aes_256_ecb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_256_ecb) #define EVP_aes_256_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_256_gcm) #define EVP_aes_256_ofb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_256_ofb) #define EVP_blake2b256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_blake2b256) #define EVP_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_cleanup) #define EVP_des_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_cbc) #define EVP_des_ecb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ecb) #define EVP_des_ede BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ede) #define EVP_des_ede3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ede3) #define EVP_des_ede3_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ede3_cbc) #define EVP_des_ede3_ecb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ede3_ecb) #define EVP_des_ede_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_des_ede_cbc) #define EVP_enc_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_enc_null) #define EVP_get_cipherbyname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_get_cipherbyname) #define EVP_get_cipherbynid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_get_cipherbynid) #define EVP_get_digestbyname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_get_digestbyname) #define EVP_get_digestbynid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_get_digestbynid) #define EVP_get_digestbyobj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_get_digestbyobj) #define EVP_has_aes_hardware BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_has_aes_hardware) #define EVP_hpke_aes_128_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_aes_128_gcm) #define EVP_hpke_aes_256_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_aes_256_gcm) #define EVP_hpke_chacha20_poly1305 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_chacha20_poly1305) #define EVP_hpke_hkdf_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_hkdf_sha256) #define EVP_hpke_p256_hkdf_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_p256_hkdf_sha256) #define EVP_hpke_x25519_hkdf_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_hpke_x25519_hkdf_sha256) #define EVP_marshal_digest_algorithm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_marshal_digest_algorithm) #define EVP_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_marshal_private_key) #define EVP_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_marshal_public_key) #define EVP_md4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_md4) #define EVP_md5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_md5) #define EVP_md5_sha1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_md5_sha1) #define EVP_parse_digest_algorithm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_parse_digest_algorithm) #define EVP_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_parse_private_key) #define EVP_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_parse_public_key) #define EVP_rc2_40_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_rc2_40_cbc) #define EVP_rc2_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_rc2_cbc) #define EVP_rc4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_rc4) #define EVP_sha1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha1) #define EVP_sha1_final_with_secret_suffix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha1_final_with_secret_suffix) #define EVP_sha224 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha224) #define EVP_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha256) #define EVP_sha256_final_with_secret_suffix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha256_final_with_secret_suffix) #define EVP_sha384 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha384) #define EVP_sha512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha512) #define EVP_sha512_256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_sha512_256) #define EVP_tls_cbc_copy_mac BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_tls_cbc_copy_mac) #define EVP_tls_cbc_digest_record BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_tls_cbc_digest_record) #define EVP_tls_cbc_record_digest_supported BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_tls_cbc_record_digest_supported) #define EVP_tls_cbc_remove_padding BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_tls_cbc_remove_padding) #define EXTENDED_KEY_USAGE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_free) #define EXTENDED_KEY_USAGE_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_it) #define EXTENDED_KEY_USAGE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_new) #define FIPS_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_mode) #define FIPS_mode_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_mode_set) #define FIPS_module_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_module_name) #define FIPS_query_algorithm_status BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_query_algorithm_status) #define FIPS_read_counter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_read_counter) #define FIPS_service_indicator_after_call BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_service_indicator_after_call) #define FIPS_service_indicator_before_call BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_service_indicator_before_call) #define FIPS_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, FIPS_version) #define GENERAL_NAMES_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAMES_free) #define GENERAL_NAMES_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAMES_it) #define GENERAL_NAMES_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAMES_new) #define GENERAL_NAME_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_cmp) #define GENERAL_NAME_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_dup) #define GENERAL_NAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_free) #define GENERAL_NAME_get0_otherName BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_get0_otherName) #define GENERAL_NAME_get0_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_get0_value) #define GENERAL_NAME_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_it) #define GENERAL_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_new) #define GENERAL_NAME_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_print) #define GENERAL_NAME_set0_othername BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_set0_othername) #define GENERAL_NAME_set0_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_NAME_set0_value) #define GENERAL_SUBTREE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_SUBTREE_free) #define GENERAL_SUBTREE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, GENERAL_SUBTREE_new) #define HKDF BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HKDF) #define HKDF_expand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HKDF_expand) #define HKDF_extract BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HKDF_extract) #define HMAC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC) #define HMAC_CTX_cleanse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_cleanse) #define HMAC_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_cleanup) #define HMAC_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_copy) #define HMAC_CTX_copy_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_copy_ex) #define HMAC_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_free) #define HMAC_CTX_get_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_get_md) #define HMAC_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_init) #define HMAC_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_new) #define HMAC_CTX_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_CTX_reset) #define HMAC_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_Final) #define HMAC_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_Init) #define HMAC_Init_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_Init_ex) #define HMAC_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_Update) #define HMAC_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HMAC_size) #define HRSS_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_decap) #define HRSS_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_encap) #define HRSS_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_generate_key) #define HRSS_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_marshal_public_key) #define HRSS_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_parse_public_key) #define HRSS_poly3_invert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_poly3_invert) #define HRSS_poly3_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, HRSS_poly3_mul) #define ISSUING_DIST_POINT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ISSUING_DIST_POINT_free) #define ISSUING_DIST_POINT_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ISSUING_DIST_POINT_it) #define ISSUING_DIST_POINT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ISSUING_DIST_POINT_new) #define KYBER_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_decap) #define KYBER_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_encap) #define KYBER_encap_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_encap_external_entropy) #define KYBER_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_generate_key) #define KYBER_generate_key_external_entropy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_generate_key_external_entropy) #define KYBER_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_marshal_private_key) #define KYBER_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_marshal_public_key) #define KYBER_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_parse_private_key) #define KYBER_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_parse_public_key) #define KYBER_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, KYBER_public_from_private) #define MD4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD4) #define MD4_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD4_Final) #define MD4_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD4_Init) #define MD4_Transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD4_Transform) #define MD4_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD4_Update) #define MD5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD5) #define MD5_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD5_Final) #define MD5_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD5_Init) #define MD5_Transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD5_Transform) #define MD5_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MD5_Update) #define METHOD_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, METHOD_ref) #define METHOD_unref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, METHOD_unref) #define MLDSA65_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_generate_key) #define MLDSA65_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_marshal_public_key) #define MLDSA65_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_parse_public_key) #define MLDSA65_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_private_key_from_seed) #define MLDSA65_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_public_from_private) #define MLDSA65_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_sign) #define MLDSA65_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLDSA65_verify) #define MLKEM1024_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_decap) #define MLKEM1024_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_encap) #define MLKEM1024_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_generate_key) #define MLKEM1024_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_marshal_public_key) #define MLKEM1024_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_parse_public_key) #define MLKEM1024_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_private_key_from_seed) #define MLKEM1024_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM1024_public_from_private) #define MLKEM768_decap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_decap) #define MLKEM768_encap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_encap) #define MLKEM768_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_generate_key) #define MLKEM768_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_marshal_public_key) #define MLKEM768_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_parse_public_key) #define MLKEM768_private_key_from_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_private_key_from_seed) #define MLKEM768_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, MLKEM768_public_from_private) #define NAME_CONSTRAINTS_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NAME_CONSTRAINTS_check) #define NAME_CONSTRAINTS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NAME_CONSTRAINTS_free) #define NAME_CONSTRAINTS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NAME_CONSTRAINTS_it) #define NAME_CONSTRAINTS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NAME_CONSTRAINTS_new) #define NCONF_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_free) #define NCONF_get_section BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_get_section) #define NCONF_get_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_get_string) #define NCONF_load BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_load) #define NCONF_load_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_load_bio) #define NCONF_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NCONF_new) #define NETSCAPE_SPKAC_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKAC_free) #define NETSCAPE_SPKAC_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKAC_it) #define NETSCAPE_SPKAC_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKAC_new) #define NETSCAPE_SPKI_b64_decode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_b64_decode) #define NETSCAPE_SPKI_b64_encode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_b64_encode) #define NETSCAPE_SPKI_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_free) #define NETSCAPE_SPKI_get_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_get_pubkey) #define NETSCAPE_SPKI_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_it) #define NETSCAPE_SPKI_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_new) #define NETSCAPE_SPKI_set_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_set_pubkey) #define NETSCAPE_SPKI_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_sign) #define NETSCAPE_SPKI_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NETSCAPE_SPKI_verify) #define NOTICEREF_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_free) #define NOTICEREF_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_it) #define NOTICEREF_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, NOTICEREF_new) #define OBJ_cbs2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cbs2nid) #define OBJ_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cleanup) #define OBJ_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_cmp) #define OBJ_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_create) #define OBJ_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_dup) #define OBJ_find_sigid_algs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_find_sigid_algs) #define OBJ_find_sigid_by_algs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_find_sigid_by_algs) #define OBJ_get0_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_get0_data) #define OBJ_get_undef BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_get_undef) #define OBJ_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_length) #define OBJ_ln2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_ln2nid) #define OBJ_nid2cbb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_nid2cbb) #define OBJ_nid2ln BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_nid2ln) #define OBJ_nid2obj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_nid2obj) #define OBJ_nid2sn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_nid2sn) #define OBJ_obj2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_obj2nid) #define OBJ_obj2txt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_obj2txt) #define OBJ_sn2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_sn2nid) #define OBJ_txt2nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_txt2nid) #define OBJ_txt2obj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OBJ_txt2obj) #define OPENSSL_add_all_algorithms_conf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_add_all_algorithms_conf) #define OPENSSL_armcap_P BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_armcap_P) #define OPENSSL_asprintf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_asprintf) #define OPENSSL_calloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_calloc) #define OPENSSL_cleanse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_cleanse) #define OPENSSL_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_cleanup) #define OPENSSL_clear_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_clear_free) #define OPENSSL_config BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_config) #define OPENSSL_cpuid_setup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_cpuid_setup) #define OPENSSL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_free) #define OPENSSL_fromxdigit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_fromxdigit) #define OPENSSL_get_armcap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_get_armcap) #define OPENSSL_get_armcap_pointer_for_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_get_armcap_pointer_for_test) #define OPENSSL_get_ia32cap BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_get_ia32cap) #define OPENSSL_gmtime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_gmtime) #define OPENSSL_gmtime_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_gmtime_adj) #define OPENSSL_gmtime_diff BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_gmtime_diff) #define OPENSSL_hash32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_hash32) #define OPENSSL_ia32cap_P BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_ia32cap_P) #define OPENSSL_init_cpuid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_init_cpuid) #define OPENSSL_init_crypto BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_init_crypto) #define OPENSSL_init_ssl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_init_ssl) #define OPENSSL_isalnum BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_isalnum) #define OPENSSL_isalpha BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_isalpha) #define OPENSSL_isdigit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_isdigit) #define OPENSSL_isspace BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_isspace) #define OPENSSL_isxdigit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_isxdigit) #define OPENSSL_lh_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_delete) #define OPENSSL_lh_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_doall_arg) #define OPENSSL_lh_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_free) #define OPENSSL_lh_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_insert) #define OPENSSL_lh_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_new) #define OPENSSL_lh_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_num_items) #define OPENSSL_lh_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_retrieve) #define OPENSSL_lh_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_lh_retrieve_key) #define OPENSSL_load_builtin_modules BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_load_builtin_modules) #define OPENSSL_malloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_malloc) #define OPENSSL_malloc_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_malloc_init) #define OPENSSL_memdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_memdup) #define OPENSSL_no_config BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_no_config) #define OPENSSL_posix_to_tm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_posix_to_tm) #define OPENSSL_realloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_realloc) #define OPENSSL_secure_clear_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_secure_clear_free) #define OPENSSL_secure_malloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_secure_malloc) #define OPENSSL_sk_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_deep_copy) #define OPENSSL_sk_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_delete) #define OPENSSL_sk_delete_if BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_delete_if) #define OPENSSL_sk_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_delete_ptr) #define OPENSSL_sk_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_dup) #define OPENSSL_sk_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_find) #define OPENSSL_sk_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_free) #define OPENSSL_sk_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_insert) #define OPENSSL_sk_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_is_sorted) #define OPENSSL_sk_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_new) #define OPENSSL_sk_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_new_null) #define OPENSSL_sk_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_num) #define OPENSSL_sk_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_pop) #define OPENSSL_sk_pop_free_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_pop_free_ex) #define OPENSSL_sk_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_push) #define OPENSSL_sk_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_set) #define OPENSSL_sk_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_set_cmp_func) #define OPENSSL_sk_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_shift) #define OPENSSL_sk_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_sort) #define OPENSSL_sk_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_value) #define OPENSSL_sk_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_sk_zero) #define OPENSSL_strcasecmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strcasecmp) #define OPENSSL_strdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strdup) #define OPENSSL_strhash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strhash) #define OPENSSL_strlcat BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strlcat) #define OPENSSL_strlcpy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strlcpy) #define OPENSSL_strncasecmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strncasecmp) #define OPENSSL_strndup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strndup) #define OPENSSL_strnlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strnlen) #define OPENSSL_timegm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_timegm) #define OPENSSL_tm_to_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_tm_to_posix) #define OPENSSL_tolower BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_tolower) #define OPENSSL_vasprintf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_vasprintf) #define OPENSSL_vasprintf_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_vasprintf_internal) #define OPENSSL_zalloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_zalloc) #define OTHERNAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OTHERNAME_free) #define OTHERNAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OTHERNAME_new) #define OpenSSL_add_all_algorithms BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_add_all_algorithms) #define OpenSSL_add_all_ciphers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_add_all_ciphers) #define OpenSSL_add_all_digests BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_add_all_digests) #define OpenSSL_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_version) #define OpenSSL_version_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_version_num) #define PEM_ASN1_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_ASN1_read) #define PEM_ASN1_read_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_ASN1_read_bio) #define PEM_ASN1_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_ASN1_write) #define PEM_ASN1_write_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_ASN1_write_bio) #define PEM_X509_INFO_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_X509_INFO_read) #define PEM_X509_INFO_read_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_X509_INFO_read_bio) #define PEM_bytes_read_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_bytes_read_bio) #define PEM_def_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_def_callback) #define PEM_do_header BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_do_header) #define PEM_get_EVP_CIPHER_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_get_EVP_CIPHER_INFO) #define PEM_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read) #define PEM_read_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_DHparams) #define PEM_read_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_DSAPrivateKey) #define PEM_read_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_DSA_PUBKEY) #define PEM_read_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_DSAparams) #define PEM_read_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_ECPrivateKey) #define PEM_read_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_EC_PUBKEY) #define PEM_read_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_PKCS7) #define PEM_read_PKCS8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_PKCS8) #define PEM_read_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_PKCS8_PRIV_KEY_INFO) #define PEM_read_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_PUBKEY) #define PEM_read_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_PrivateKey) #define PEM_read_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_RSAPrivateKey) #define PEM_read_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_RSAPublicKey) #define PEM_read_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_RSA_PUBKEY) #define PEM_read_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_SSL_SESSION) #define PEM_read_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_X509) #define PEM_read_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_X509_AUX) #define PEM_read_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_X509_CRL) #define PEM_read_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_X509_REQ) #define PEM_read_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio) #define PEM_read_bio_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_DHparams) #define PEM_read_bio_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_DSAPrivateKey) #define PEM_read_bio_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_DSA_PUBKEY) #define PEM_read_bio_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_DSAparams) #define PEM_read_bio_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_ECPrivateKey) #define PEM_read_bio_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_EC_PUBKEY) #define PEM_read_bio_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_PKCS7) #define PEM_read_bio_PKCS8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_PKCS8) #define PEM_read_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_PKCS8_PRIV_KEY_INFO) #define PEM_read_bio_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_PUBKEY) #define PEM_read_bio_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_PrivateKey) #define PEM_read_bio_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_RSAPrivateKey) #define PEM_read_bio_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_RSAPublicKey) #define PEM_read_bio_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_RSA_PUBKEY) #define PEM_read_bio_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_SSL_SESSION) #define PEM_read_bio_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_X509) #define PEM_read_bio_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_X509_AUX) #define PEM_read_bio_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_X509_CRL) #define PEM_read_bio_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_read_bio_X509_REQ) #define PEM_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write) #define PEM_write_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_DHparams) #define PEM_write_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_DSAPrivateKey) #define PEM_write_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_DSA_PUBKEY) #define PEM_write_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_DSAparams) #define PEM_write_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_ECPrivateKey) #define PEM_write_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_EC_PUBKEY) #define PEM_write_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PKCS7) #define PEM_write_PKCS8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PKCS8) #define PEM_write_PKCS8PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PKCS8PrivateKey) #define PEM_write_PKCS8PrivateKey_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PKCS8PrivateKey_nid) #define PEM_write_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PKCS8_PRIV_KEY_INFO) #define PEM_write_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PUBKEY) #define PEM_write_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_PrivateKey) #define PEM_write_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_RSAPrivateKey) #define PEM_write_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_RSAPublicKey) #define PEM_write_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_RSA_PUBKEY) #define PEM_write_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_SSL_SESSION) #define PEM_write_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_X509) #define PEM_write_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_X509_AUX) #define PEM_write_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_X509_CRL) #define PEM_write_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_X509_REQ) #define PEM_write_X509_REQ_NEW BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_X509_REQ_NEW) #define PEM_write_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio) #define PEM_write_bio_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_DHparams) #define PEM_write_bio_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_DSAPrivateKey) #define PEM_write_bio_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_DSA_PUBKEY) #define PEM_write_bio_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_DSAparams) #define PEM_write_bio_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_ECPrivateKey) #define PEM_write_bio_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_EC_PUBKEY) #define PEM_write_bio_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PKCS7) #define PEM_write_bio_PKCS8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PKCS8) #define PEM_write_bio_PKCS8PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PKCS8PrivateKey) #define PEM_write_bio_PKCS8PrivateKey_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PKCS8PrivateKey_nid) #define PEM_write_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PKCS8_PRIV_KEY_INFO) #define PEM_write_bio_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PUBKEY) #define PEM_write_bio_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_PrivateKey) #define PEM_write_bio_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_RSAPrivateKey) #define PEM_write_bio_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_RSAPublicKey) #define PEM_write_bio_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_RSA_PUBKEY) #define PEM_write_bio_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_SSL_SESSION) #define PEM_write_bio_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_X509) #define PEM_write_bio_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_X509_AUX) #define PEM_write_bio_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_X509_CRL) #define PEM_write_bio_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_X509_REQ) #define PEM_write_bio_X509_REQ_NEW BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PEM_write_bio_X509_REQ_NEW) #define PKCS12_PBE_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_PBE_add) #define PKCS12_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_create) #define PKCS12_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_free) #define PKCS12_get_key_and_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_get_key_and_certs) #define PKCS12_parse BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_parse) #define PKCS12_verify_mac BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS12_verify_mac) #define PKCS1_MGF1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS1_MGF1) #define PKCS5_PBKDF2_HMAC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS5_PBKDF2_HMAC) #define PKCS5_PBKDF2_HMAC_SHA1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS5_PBKDF2_HMAC_SHA1) #define PKCS5_pbe2_decrypt_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS5_pbe2_decrypt_init) #define PKCS5_pbe2_encrypt_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS5_pbe2_encrypt_init) #define PKCS7_bundle_CRLs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_bundle_CRLs) #define PKCS7_bundle_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_bundle_certificates) #define PKCS7_bundle_raw_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_bundle_raw_certificates) #define PKCS7_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_free) #define PKCS7_get_CRLs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_get_CRLs) #define PKCS7_get_PEM_CRLs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_get_PEM_CRLs) #define PKCS7_get_PEM_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_get_PEM_certificates) #define PKCS7_get_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_get_certificates) #define PKCS7_get_raw_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_get_raw_certificates) #define PKCS7_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_sign) #define PKCS7_type_is_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_data) #define PKCS7_type_is_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_digest) #define PKCS7_type_is_encrypted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_encrypted) #define PKCS7_type_is_enveloped BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_enveloped) #define PKCS7_type_is_signed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_signed) #define PKCS7_type_is_signedAndEnveloped BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS7_type_is_signedAndEnveloped) #define PKCS8_PRIV_KEY_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_PRIV_KEY_INFO_free) #define PKCS8_PRIV_KEY_INFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_PRIV_KEY_INFO_new) #define PKCS8_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_decrypt) #define PKCS8_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_encrypt) #define PKCS8_marshal_encrypted_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_marshal_encrypted_private_key) #define PKCS8_parse_encrypted_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, PKCS8_parse_encrypted_private_key) #define POLICYINFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYINFO_free) #define POLICYINFO_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYINFO_it) #define POLICYINFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYINFO_new) #define POLICYQUALINFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYQUALINFO_free) #define POLICYQUALINFO_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYQUALINFO_it) #define POLICYQUALINFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICYQUALINFO_new) #define POLICY_CONSTRAINTS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_free) #define POLICY_CONSTRAINTS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_it) #define POLICY_CONSTRAINTS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_new) #define POLICY_MAPPINGS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_MAPPINGS_it) #define POLICY_MAPPING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_MAPPING_free) #define POLICY_MAPPING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, POLICY_MAPPING_new) #define RAND_OpenSSL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_OpenSSL) #define RAND_SSLeay BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_SSLeay) #define RAND_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_add) #define RAND_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_bytes) #define RAND_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_cleanup) #define RAND_disable_fork_unsafe_buffering BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_disable_fork_unsafe_buffering) #define RAND_egd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_egd) #define RAND_enable_fork_unsafe_buffering BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_enable_fork_unsafe_buffering) #define RAND_file_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_file_name) #define RAND_get_rand_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_get_rand_method) #define RAND_get_system_entropy_for_custom_prng BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_get_system_entropy_for_custom_prng) #define RAND_load_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_load_file) #define RAND_poll BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_poll) #define RAND_pseudo_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_pseudo_bytes) #define RAND_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_seed) #define RAND_set_rand_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_set_rand_method) #define RAND_status BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_status) #define RC4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RC4) #define RC4_set_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RC4_set_key) #define RSAPrivateKey_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSAPrivateKey_dup) #define RSAPublicKey_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSAPublicKey_dup) #define RSAZ_1024_mod_exp_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSAZ_1024_mod_exp_avx2) #define RSA_PSS_PARAMS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_PSS_PARAMS_free) #define RSA_PSS_PARAMS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_PSS_PARAMS_it) #define RSA_PSS_PARAMS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_PSS_PARAMS_new) #define RSA_add_pkcs1_prefix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_add_pkcs1_prefix) #define RSA_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_bits) #define RSA_blinding_off BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_blinding_off) #define RSA_blinding_on BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_blinding_on) #define RSA_check_fips BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_check_fips) #define RSA_check_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_check_key) #define RSA_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_decrypt) #define RSA_default_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_default_method) #define RSA_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_encrypt) #define RSA_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_flags) #define RSA_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_free) #define RSA_generate_key_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_generate_key_ex) #define RSA_generate_key_fips BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_generate_key_fips) #define RSA_get0_crt_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_crt_params) #define RSA_get0_d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_d) #define RSA_get0_dmp1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_dmp1) #define RSA_get0_dmq1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_dmq1) #define RSA_get0_e BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_e) #define RSA_get0_factors BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_factors) #define RSA_get0_iqmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_iqmp) #define RSA_get0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_key) #define RSA_get0_n BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_n) #define RSA_get0_p BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_p) #define RSA_get0_pss_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_pss_params) #define RSA_get0_q BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get0_q) #define RSA_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get_ex_data) #define RSA_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_get_ex_new_index) #define RSA_is_opaque BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_is_opaque) #define RSA_marshal_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_marshal_private_key) #define RSA_marshal_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_marshal_public_key) #define RSA_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new) #define RSA_new_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_method) #define RSA_new_method_no_e BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_method_no_e) #define RSA_new_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_private_key) #define RSA_new_private_key_large_e BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_private_key_large_e) #define RSA_new_private_key_no_crt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_private_key_no_crt) #define RSA_new_private_key_no_e BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_private_key_no_e) #define RSA_new_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_public_key) #define RSA_new_public_key_large_e BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_new_public_key_large_e) #define RSA_padding_add_PKCS1_OAEP_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_OAEP_mgf1) #define RSA_padding_add_PKCS1_PSS_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_PSS_mgf1) #define RSA_padding_add_PKCS1_type_1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_type_1) #define RSA_padding_add_none BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_add_none) #define RSA_padding_check_PKCS1_OAEP_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_check_PKCS1_OAEP_mgf1) #define RSA_padding_check_PKCS1_type_1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_padding_check_PKCS1_type_1) #define RSA_parse_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_parse_private_key) #define RSA_parse_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_parse_public_key) #define RSA_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_print) #define RSA_private_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_private_decrypt) #define RSA_private_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_private_encrypt) #define RSA_private_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_private_key_from_bytes) #define RSA_private_key_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_private_key_to_bytes) #define RSA_public_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_public_decrypt) #define RSA_public_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_public_encrypt) #define RSA_public_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_public_key_from_bytes) #define RSA_public_key_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_public_key_to_bytes) #define RSA_set0_crt_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_set0_crt_params) #define RSA_set0_factors BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_set0_factors) #define RSA_set0_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_set0_key) #define RSA_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_set_ex_data) #define RSA_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_sign) #define RSA_sign_pss_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_sign_pss_mgf1) #define RSA_sign_raw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_sign_raw) #define RSA_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_size) #define RSA_test_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_test_flags) #define RSA_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_up_ref) #define RSA_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_verify) #define RSA_verify_PKCS1_PSS_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_verify_PKCS1_PSS_mgf1) #define RSA_verify_pss_mgf1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_verify_pss_mgf1) #define RSA_verify_raw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSA_verify_raw) #define SHA1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA1) #define SHA1_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA1_Final) #define SHA1_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA1_Init) #define SHA1_Transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA1_Transform) #define SHA1_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA1_Update) #define SHA224 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA224) #define SHA224_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA224_Final) #define SHA224_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA224_Init) #define SHA224_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA224_Update) #define SHA256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256) #define SHA256_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256_Final) #define SHA256_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256_Init) #define SHA256_Transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256_Transform) #define SHA256_TransformBlocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256_TransformBlocks) #define SHA256_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA256_Update) #define SHA384 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA384) #define SHA384_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA384_Final) #define SHA384_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA384_Init) #define SHA384_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA384_Update) #define SHA512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512) #define SHA512_256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_256) #define SHA512_256_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_256_Final) #define SHA512_256_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_256_Init) #define SHA512_256_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_256_Update) #define SHA512_Final BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_Final) #define SHA512_Init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_Init) #define SHA512_Transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_Transform) #define SHA512_Update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SHA512_Update) #define SIPHASH_24 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SIPHASH_24) #define SLHDSA_SHA2_128S_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_generate_key) #define SLHDSA_SHA2_128S_prehash_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_sign) #define SLHDSA_SHA2_128S_prehash_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_verify) #define SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign) #define SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify) #define SLHDSA_SHA2_128S_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_public_from_private) #define SLHDSA_SHA2_128S_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_sign) #define SLHDSA_SHA2_128S_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_verify) #define SPAKE2_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_CTX_free) #define SPAKE2_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_CTX_new) #define SPAKE2_generate_msg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_generate_msg) #define SPAKE2_process_msg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SPAKE2_process_msg) #define SSL_CIPHER_description BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_description) #define SSL_CIPHER_get_auth_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_auth_nid) #define SSL_CIPHER_get_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_bits) #define SSL_CIPHER_get_cipher_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_cipher_nid) #define SSL_CIPHER_get_digest_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_digest_nid) #define SSL_CIPHER_get_handshake_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_handshake_digest) #define SSL_CIPHER_get_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_id) #define SSL_CIPHER_get_kx_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_kx_name) #define SSL_CIPHER_get_kx_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_kx_nid) #define SSL_CIPHER_get_max_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_max_version) #define SSL_CIPHER_get_min_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_min_version) #define SSL_CIPHER_get_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_name) #define SSL_CIPHER_get_prf_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_prf_nid) #define SSL_CIPHER_get_protocol_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_protocol_id) #define SSL_CIPHER_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_get_version) #define SSL_CIPHER_is_aead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_is_aead) #define SSL_CIPHER_is_block_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_is_block_cipher) #define SSL_CIPHER_standard_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CIPHER_standard_name) #define SSL_COMP_add_compression_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_add_compression_method) #define SSL_COMP_free_compression_methods BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_free_compression_methods) #define SSL_COMP_get0_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_get0_name) #define SSL_COMP_get_compression_methods BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_get_compression_methods) #define SSL_COMP_get_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_get_id) #define SSL_COMP_get_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_COMP_get_name) #define SSL_CREDENTIAL_clear_must_match_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_clear_must_match_issuer) #define SSL_CREDENTIAL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_free) #define SSL_CREDENTIAL_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_get_ex_data) #define SSL_CREDENTIAL_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_get_ex_new_index) #define SSL_CREDENTIAL_must_match_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_must_match_issuer) #define SSL_CREDENTIAL_new_delegated BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_new_delegated) #define SSL_CREDENTIAL_new_x509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_new_x509) #define SSL_CREDENTIAL_set1_cert_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_cert_chain) #define SSL_CREDENTIAL_set1_delegated_credential BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_delegated_credential) #define SSL_CREDENTIAL_set1_ocsp_response BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_ocsp_response) #define SSL_CREDENTIAL_set1_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_private_key) #define SSL_CREDENTIAL_set1_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_signed_cert_timestamp_list) #define SSL_CREDENTIAL_set1_signing_algorithm_prefs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_signing_algorithm_prefs) #define SSL_CREDENTIAL_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_ex_data) #define SSL_CREDENTIAL_set_must_match_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_must_match_issuer) #define SSL_CREDENTIAL_set_private_key_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_private_key_method) #define SSL_CREDENTIAL_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CREDENTIAL_up_ref) #define SSL_CTX_add0_chain_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add0_chain_cert) #define SSL_CTX_add1_chain_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add1_chain_cert) #define SSL_CTX_add1_credential BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add1_credential) #define SSL_CTX_add_cert_compression_alg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add_cert_compression_alg) #define SSL_CTX_add_client_CA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add_client_CA) #define SSL_CTX_add_extra_chain_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add_extra_chain_cert) #define SSL_CTX_add_session BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_add_session) #define SSL_CTX_check_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_check_private_key) #define SSL_CTX_cipher_in_group BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_cipher_in_group) #define SSL_CTX_clear_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_clear_chain_certs) #define SSL_CTX_clear_extra_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_clear_extra_chain_certs) #define SSL_CTX_clear_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_clear_mode) #define SSL_CTX_clear_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_clear_options) #define SSL_CTX_enable_ocsp_stapling BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_enable_ocsp_stapling) #define SSL_CTX_enable_signed_cert_timestamps BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_enable_signed_cert_timestamps) #define SSL_CTX_enable_tls_channel_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_enable_tls_channel_id) #define SSL_CTX_flush_sessions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_flush_sessions) #define SSL_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_free) #define SSL_CTX_get0_certificate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get0_certificate) #define SSL_CTX_get0_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get0_chain) #define SSL_CTX_get0_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get0_chain_certs) #define SSL_CTX_get0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get0_param) #define SSL_CTX_get0_privatekey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get0_privatekey) #define SSL_CTX_get_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_cert_store) #define SSL_CTX_get_ciphers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_ciphers) #define SSL_CTX_get_client_CA_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_client_CA_list) #define SSL_CTX_get_compliance_policy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_compliance_policy) #define SSL_CTX_get_default_passwd_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_default_passwd_cb) #define SSL_CTX_get_default_passwd_cb_userdata BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_default_passwd_cb_userdata) #define SSL_CTX_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_ex_data) #define SSL_CTX_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_ex_new_index) #define SSL_CTX_get_extra_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_extra_chain_certs) #define SSL_CTX_get_info_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_info_callback) #define SSL_CTX_get_keylog_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_keylog_callback) #define SSL_CTX_get_max_cert_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_max_cert_list) #define SSL_CTX_get_max_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_max_proto_version) #define SSL_CTX_get_min_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_min_proto_version) #define SSL_CTX_get_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_mode) #define SSL_CTX_get_num_tickets BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_num_tickets) #define SSL_CTX_get_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_options) #define SSL_CTX_get_quiet_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_quiet_shutdown) #define SSL_CTX_get_read_ahead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_read_ahead) #define SSL_CTX_get_session_cache_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_session_cache_mode) #define SSL_CTX_get_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_timeout) #define SSL_CTX_get_tlsext_ticket_keys BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_tlsext_ticket_keys) #define SSL_CTX_get_verify_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_verify_callback) #define SSL_CTX_get_verify_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_verify_depth) #define SSL_CTX_get_verify_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_get_verify_mode) #define SSL_CTX_load_verify_locations BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_load_verify_locations) #define SSL_CTX_need_tmp_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_need_tmp_RSA) #define SSL_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_new) #define SSL_CTX_remove_session BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_remove_session) #define SSL_CTX_sess_accept BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_accept) #define SSL_CTX_sess_accept_good BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_accept_good) #define SSL_CTX_sess_accept_renegotiate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_accept_renegotiate) #define SSL_CTX_sess_cache_full BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_cache_full) #define SSL_CTX_sess_cb_hits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_cb_hits) #define SSL_CTX_sess_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_connect) #define SSL_CTX_sess_connect_good BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_connect_good) #define SSL_CTX_sess_connect_renegotiate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_connect_renegotiate) #define SSL_CTX_sess_get_cache_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_get_cache_size) #define SSL_CTX_sess_get_get_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_get_get_cb) #define SSL_CTX_sess_get_new_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_get_new_cb) #define SSL_CTX_sess_get_remove_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_get_remove_cb) #define SSL_CTX_sess_hits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_hits) #define SSL_CTX_sess_misses BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_misses) #define SSL_CTX_sess_number BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_number) #define SSL_CTX_sess_set_cache_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_set_cache_size) #define SSL_CTX_sess_set_get_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_set_get_cb) #define SSL_CTX_sess_set_new_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_set_new_cb) #define SSL_CTX_sess_set_remove_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_set_remove_cb) #define SSL_CTX_sess_timeouts BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_sess_timeouts) #define SSL_CTX_set0_buffer_pool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set0_buffer_pool) #define SSL_CTX_set0_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set0_chain) #define SSL_CTX_set0_client_CAs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set0_client_CAs) #define SSL_CTX_set0_verify_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set0_verify_cert_store) #define SSL_CTX_set1_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_chain) #define SSL_CTX_set1_curves BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_curves) #define SSL_CTX_set1_curves_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_curves_list) #define SSL_CTX_set1_ech_keys BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_ech_keys) #define SSL_CTX_set1_group_ids BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_group_ids) #define SSL_CTX_set1_groups BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_groups) #define SSL_CTX_set1_groups_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_groups_list) #define SSL_CTX_set1_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_param) #define SSL_CTX_set1_sigalgs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_sigalgs) #define SSL_CTX_set1_sigalgs_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_sigalgs_list) #define SSL_CTX_set1_tls_channel_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_tls_channel_id) #define SSL_CTX_set1_verify_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set1_verify_cert_store) #define SSL_CTX_set_allow_unknown_alpn_protos BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_allow_unknown_alpn_protos) #define SSL_CTX_set_alpn_protos BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_alpn_protos) #define SSL_CTX_set_alpn_select_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_alpn_select_cb) #define SSL_CTX_set_cert_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_cert_cb) #define SSL_CTX_set_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_cert_store) #define SSL_CTX_set_cert_verify_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_cert_verify_callback) #define SSL_CTX_set_chain_and_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_chain_and_key) #define SSL_CTX_set_cipher_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_cipher_list) #define SSL_CTX_set_client_CA_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_client_CA_list) #define SSL_CTX_set_client_cert_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_client_cert_cb) #define SSL_CTX_set_compliance_policy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_compliance_policy) #define SSL_CTX_set_current_time_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_current_time_cb) #define SSL_CTX_set_custom_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_custom_verify) #define SSL_CTX_set_default_passwd_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_default_passwd_cb) #define SSL_CTX_set_default_passwd_cb_userdata BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_default_passwd_cb_userdata) #define SSL_CTX_set_default_verify_paths BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_default_verify_paths) #define SSL_CTX_set_dos_protection_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_dos_protection_cb) #define SSL_CTX_set_early_data_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_early_data_enabled) #define SSL_CTX_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_ex_data) #define SSL_CTX_set_false_start_allowed_without_alpn BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_false_start_allowed_without_alpn) #define SSL_CTX_set_grease_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_grease_enabled) #define SSL_CTX_set_info_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_info_callback) #define SSL_CTX_set_keylog_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_keylog_callback) #define SSL_CTX_set_max_cert_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_max_cert_list) #define SSL_CTX_set_max_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_max_proto_version) #define SSL_CTX_set_max_send_fragment BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_max_send_fragment) #define SSL_CTX_set_min_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_min_proto_version) #define SSL_CTX_set_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_mode) #define SSL_CTX_set_msg_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_msg_callback) #define SSL_CTX_set_msg_callback_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_msg_callback_arg) #define SSL_CTX_set_next_proto_select_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_next_proto_select_cb) #define SSL_CTX_set_next_protos_advertised_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_next_protos_advertised_cb) #define SSL_CTX_set_num_tickets BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_num_tickets) #define SSL_CTX_set_ocsp_response BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_ocsp_response) #define SSL_CTX_set_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_options) #define SSL_CTX_set_permute_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_permute_extensions) #define SSL_CTX_set_private_key_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_private_key_method) #define SSL_CTX_set_psk_client_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_psk_client_callback) #define SSL_CTX_set_psk_server_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_psk_server_callback) #define SSL_CTX_set_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_purpose) #define SSL_CTX_set_quic_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_quic_method) #define SSL_CTX_set_quiet_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_quiet_shutdown) #define SSL_CTX_set_read_ahead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_read_ahead) #define SSL_CTX_set_record_protocol_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_record_protocol_version) #define SSL_CTX_set_retain_only_sha256_of_client_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_retain_only_sha256_of_client_certs) #define SSL_CTX_set_reverify_on_resume BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_reverify_on_resume) #define SSL_CTX_set_select_certificate_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_select_certificate_cb) #define SSL_CTX_set_session_cache_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_session_cache_mode) #define SSL_CTX_set_session_id_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_session_id_context) #define SSL_CTX_set_session_psk_dhe_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_session_psk_dhe_timeout) #define SSL_CTX_set_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_signed_cert_timestamp_list) #define SSL_CTX_set_signing_algorithm_prefs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_signing_algorithm_prefs) #define SSL_CTX_set_srtp_profiles BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_srtp_profiles) #define SSL_CTX_set_strict_cipher_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_strict_cipher_list) #define SSL_CTX_set_ticket_aead_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_ticket_aead_method) #define SSL_CTX_set_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_timeout) #define SSL_CTX_set_tls_channel_id_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tls_channel_id_enabled) #define SSL_CTX_set_tlsext_servername_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_servername_arg) #define SSL_CTX_set_tlsext_servername_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_servername_callback) #define SSL_CTX_set_tlsext_status_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_status_arg) #define SSL_CTX_set_tlsext_status_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_status_cb) #define SSL_CTX_set_tlsext_ticket_key_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_ticket_key_cb) #define SSL_CTX_set_tlsext_ticket_keys BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_ticket_keys) #define SSL_CTX_set_tlsext_use_srtp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_use_srtp) #define SSL_CTX_set_tmp_dh BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tmp_dh) #define SSL_CTX_set_tmp_dh_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tmp_dh_callback) #define SSL_CTX_set_tmp_ecdh BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tmp_ecdh) #define SSL_CTX_set_tmp_rsa BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tmp_rsa) #define SSL_CTX_set_tmp_rsa_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_tmp_rsa_callback) #define SSL_CTX_set_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_trust) #define SSL_CTX_set_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_verify) #define SSL_CTX_set_verify_algorithm_prefs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_verify_algorithm_prefs) #define SSL_CTX_set_verify_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_set_verify_depth) #define SSL_CTX_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_up_ref) #define SSL_CTX_use_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey) #define SSL_CTX_use_PrivateKey_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey_ASN1) #define SSL_CTX_use_PrivateKey_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey_file) #define SSL_CTX_use_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey) #define SSL_CTX_use_RSAPrivateKey_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey_ASN1) #define SSL_CTX_use_RSAPrivateKey_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey_file) #define SSL_CTX_use_certificate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_certificate) #define SSL_CTX_use_certificate_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_certificate_ASN1) #define SSL_CTX_use_certificate_chain_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_certificate_chain_file) #define SSL_CTX_use_certificate_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_certificate_file) #define SSL_CTX_use_psk_identity_hint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_CTX_use_psk_identity_hint) #define SSL_ECH_KEYS_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_add) #define SSL_ECH_KEYS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_free) #define SSL_ECH_KEYS_has_duplicate_config_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_has_duplicate_config_id) #define SSL_ECH_KEYS_marshal_retry_configs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_marshal_retry_configs) #define SSL_ECH_KEYS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_new) #define SSL_ECH_KEYS_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ECH_KEYS_up_ref) #define SSL_SESSION_copy_without_early_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_copy_without_early_data) #define SSL_SESSION_early_data_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_early_data_capable) #define SSL_SESSION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_free) #define SSL_SESSION_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_from_bytes) #define SSL_SESSION_get0_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_cipher) #define SSL_SESSION_get0_id_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_id_context) #define SSL_SESSION_get0_ocsp_response BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_ocsp_response) #define SSL_SESSION_get0_peer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_peer) #define SSL_SESSION_get0_peer_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_peer_certificates) #define SSL_SESSION_get0_peer_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_peer_sha256) #define SSL_SESSION_get0_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_signed_cert_timestamp_list) #define SSL_SESSION_get0_ticket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get0_ticket) #define SSL_SESSION_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_ex_data) #define SSL_SESSION_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_ex_new_index) #define SSL_SESSION_get_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_id) #define SSL_SESSION_get_master_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_master_key) #define SSL_SESSION_get_protocol_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_protocol_version) #define SSL_SESSION_get_ticket_lifetime_hint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_ticket_lifetime_hint) #define SSL_SESSION_get_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_time) #define SSL_SESSION_get_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_timeout) #define SSL_SESSION_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_get_version) #define SSL_SESSION_has_peer_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_has_peer_sha256) #define SSL_SESSION_has_ticket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_has_ticket) #define SSL_SESSION_is_resumable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_is_resumable) #define SSL_SESSION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_new) #define SSL_SESSION_set1_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set1_id) #define SSL_SESSION_set1_id_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set1_id_context) #define SSL_SESSION_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set_ex_data) #define SSL_SESSION_set_protocol_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set_protocol_version) #define SSL_SESSION_set_ticket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set_ticket) #define SSL_SESSION_set_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set_time) #define SSL_SESSION_set_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_set_timeout) #define SSL_SESSION_should_be_single_use BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_should_be_single_use) #define SSL_SESSION_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_to_bytes) #define SSL_SESSION_to_bytes_for_ticket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_to_bytes_for_ticket) #define SSL_SESSION_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_SESSION_up_ref) #define SSL_accept BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_accept) #define SSL_add0_chain_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add0_chain_cert) #define SSL_add1_chain_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add1_chain_cert) #define SSL_add1_credential BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add1_credential) #define SSL_add_application_settings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add_application_settings) #define SSL_add_bio_cert_subjects_to_stack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add_bio_cert_subjects_to_stack) #define SSL_add_client_CA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add_client_CA) #define SSL_add_file_cert_subjects_to_stack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_add_file_cert_subjects_to_stack) #define SSL_alert_desc_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_alert_desc_string) #define SSL_alert_desc_string_long BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_alert_desc_string_long) #define SSL_alert_from_verify_result BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_alert_from_verify_result) #define SSL_alert_type_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_alert_type_string) #define SSL_alert_type_string_long BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_alert_type_string_long) #define SSL_cache_hit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_cache_hit) #define SSL_can_release_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_can_release_private_key) #define SSL_certs_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_certs_clear) #define SSL_check_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_check_private_key) #define SSL_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_clear) #define SSL_clear_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_clear_chain_certs) #define SSL_clear_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_clear_mode) #define SSL_clear_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_clear_options) #define SSL_connect BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_connect) #define SSL_cutthrough_complete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_cutthrough_complete) #define SSL_do_handshake BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_do_handshake) #define SSL_dup_CA_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_dup_CA_list) #define SSL_early_callback_ctx_extension_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_early_callback_ctx_extension_get) #define SSL_early_data_accepted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_early_data_accepted) #define SSL_early_data_reason_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_early_data_reason_string) #define SSL_ech_accepted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_ech_accepted) #define SSL_enable_ocsp_stapling BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_enable_ocsp_stapling) #define SSL_enable_signed_cert_timestamps BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_enable_signed_cert_timestamps) #define SSL_enable_tls_channel_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_enable_tls_channel_id) #define SSL_error_description BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_error_description) #define SSL_export_keying_material BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_export_keying_material) #define SSL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_free) #define SSL_generate_key_block BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_generate_key_block) #define SSL_get0_alpn_selected BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_alpn_selected) #define SSL_get0_certificate_types BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_certificate_types) #define SSL_get0_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_chain) #define SSL_get0_chain_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_chain_certs) #define SSL_get0_ech_name_override BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_ech_name_override) #define SSL_get0_ech_retry_configs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_ech_retry_configs) #define SSL_get0_next_proto_negotiated BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_next_proto_negotiated) #define SSL_get0_ocsp_response BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_ocsp_response) #define SSL_get0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_param) #define SSL_get0_peer_application_settings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_peer_application_settings) #define SSL_get0_peer_certificates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_peer_certificates) #define SSL_get0_peer_delegation_algorithms BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_peer_delegation_algorithms) #define SSL_get0_peer_verify_algorithms BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_peer_verify_algorithms) #define SSL_get0_selected_credential BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_selected_credential) #define SSL_get0_server_requested_CAs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_server_requested_CAs) #define SSL_get0_session_id_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_session_id_context) #define SSL_get0_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get0_signed_cert_timestamp_list) #define SSL_get1_session BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get1_session) #define SSL_get_SSL_CTX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_SSL_CTX) #define SSL_get_all_cipher_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_cipher_names) #define SSL_get_all_curve_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_curve_names) #define SSL_get_all_group_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_group_names) #define SSL_get_all_signature_algorithm_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_signature_algorithm_names) #define SSL_get_all_standard_cipher_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_standard_cipher_names) #define SSL_get_all_version_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_all_version_names) #define SSL_get_certificate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_certificate) #define SSL_get_cipher_by_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_cipher_by_value) #define SSL_get_cipher_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_cipher_list) #define SSL_get_ciphers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ciphers) #define SSL_get_client_CA_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_client_CA_list) #define SSL_get_client_random BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_client_random) #define SSL_get_compliance_policy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_compliance_policy) #define SSL_get_current_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_current_cipher) #define SSL_get_current_compression BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_current_compression) #define SSL_get_current_expansion BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_current_expansion) #define SSL_get_curve_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_curve_id) #define SSL_get_curve_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_curve_name) #define SSL_get_default_timeout BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_default_timeout) #define SSL_get_early_data_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_early_data_reason) #define SSL_get_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_error) #define SSL_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ex_data) #define SSL_get_ex_data_X509_STORE_CTX_idx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ex_data_X509_STORE_CTX_idx) #define SSL_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ex_new_index) #define SSL_get_extms_support BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_extms_support) #define SSL_get_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_fd) #define SSL_get_finished BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_finished) #define SSL_get_group_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_group_id) #define SSL_get_group_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_group_name) #define SSL_get_info_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_info_callback) #define SSL_get_ivs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ivs) #define SSL_get_key_block_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_key_block_len) #define SSL_get_max_cert_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_max_cert_list) #define SSL_get_max_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_max_proto_version) #define SSL_get_min_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_min_proto_version) #define SSL_get_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_mode) #define SSL_get_negotiated_group BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_negotiated_group) #define SSL_get_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_options) #define SSL_get_peer_cert_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_cert_chain) #define SSL_get_peer_certificate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_certificate) #define SSL_get_peer_finished BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_finished) #define SSL_get_peer_full_cert_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_full_cert_chain) #define SSL_get_peer_quic_transport_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_quic_transport_params) #define SSL_get_peer_signature_algorithm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_peer_signature_algorithm) #define SSL_get_pending_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_pending_cipher) #define SSL_get_privatekey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_privatekey) #define SSL_get_psk_identity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_psk_identity) #define SSL_get_psk_identity_hint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_psk_identity_hint) #define SSL_get_quiet_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_quiet_shutdown) #define SSL_get_rbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_rbio) #define SSL_get_read_ahead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_read_ahead) #define SSL_get_read_sequence BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_read_sequence) #define SSL_get_rfd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_rfd) #define SSL_get_secure_renegotiation_support BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_secure_renegotiation_support) #define SSL_get_selected_srtp_profile BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_selected_srtp_profile) #define SSL_get_server_random BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_server_random) #define SSL_get_server_tmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_server_tmp_key) #define SSL_get_servername BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_servername) #define SSL_get_servername_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_servername_type) #define SSL_get_session BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_session) #define SSL_get_shared_ciphers BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_shared_ciphers) #define SSL_get_shared_sigalgs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_shared_sigalgs) #define SSL_get_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_shutdown) #define SSL_get_signature_algorithm_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_signature_algorithm_digest) #define SSL_get_signature_algorithm_key_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_signature_algorithm_key_type) #define SSL_get_signature_algorithm_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_signature_algorithm_name) #define SSL_get_srtp_profiles BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_srtp_profiles) #define SSL_get_ticket_age_skew BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_ticket_age_skew) #define SSL_get_tls_channel_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_tls_channel_id) #define SSL_get_tls_unique BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_tls_unique) #define SSL_get_tlsext_status_ocsp_resp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_tlsext_status_ocsp_resp) #define SSL_get_tlsext_status_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_tlsext_status_type) #define SSL_get_verify_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_verify_callback) #define SSL_get_verify_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_verify_depth) #define SSL_get_verify_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_verify_mode) #define SSL_get_verify_result BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_verify_result) #define SSL_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_version) #define SSL_get_wbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_wbio) #define SSL_get_wfd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_wfd) #define SSL_get_write_sequence BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_get_write_sequence) #define SSL_has_application_settings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_has_application_settings) #define SSL_has_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_has_pending) #define SSL_in_early_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_in_early_data) #define SSL_in_false_start BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_in_false_start) #define SSL_in_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_in_init) #define SSL_is_dtls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_is_dtls) #define SSL_is_init_finished BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_is_init_finished) #define SSL_is_quic BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_is_quic) #define SSL_is_server BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_is_server) #define SSL_is_signature_algorithm_rsa_pss BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_is_signature_algorithm_rsa_pss) #define SSL_key_update BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_key_update) #define SSL_library_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_library_init) #define SSL_load_client_CA_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_load_client_CA_file) #define SSL_load_error_strings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_load_error_strings) #define SSL_magic_pending_session_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_magic_pending_session_ptr) #define SSL_marshal_ech_config BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_marshal_ech_config) #define SSL_max_seal_overhead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_max_seal_overhead) #define SSL_need_tmp_RSA BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_need_tmp_RSA) #define SSL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_new) #define SSL_num_renegotiations BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_num_renegotiations) #define SSL_peek BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_peek) #define SSL_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_pending) #define SSL_process_quic_post_handshake BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_process_quic_post_handshake) #define SSL_process_tls13_new_session_ticket BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_process_tls13_new_session_ticket) #define SSL_provide_quic_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_provide_quic_data) #define SSL_quic_max_handshake_flight_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_quic_max_handshake_flight_len) #define SSL_quic_read_level BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_quic_read_level) #define SSL_quic_write_level BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_quic_write_level) #define SSL_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_read) #define SSL_renegotiate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_renegotiate) #define SSL_renegotiate_pending BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_renegotiate_pending) #define SSL_request_handshake_hints BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_request_handshake_hints) #define SSL_reset_early_data_reject BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_reset_early_data_reject) #define SSL_select_next_proto BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_select_next_proto) #define SSL_send_fatal_alert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_send_fatal_alert) #define SSL_serialize_capabilities BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_serialize_capabilities) #define SSL_serialize_handshake_hints BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_serialize_handshake_hints) #define SSL_session_reused BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_session_reused) #define SSL_set0_CA_names BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_CA_names) #define SSL_set0_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_chain) #define SSL_set0_client_CAs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_client_CAs) #define SSL_set0_rbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_rbio) #define SSL_set0_verify_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_verify_cert_store) #define SSL_set0_wbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set0_wbio) #define SSL_set1_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_chain) #define SSL_set1_curves BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_curves) #define SSL_set1_curves_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_curves_list) #define SSL_set1_ech_config_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_ech_config_list) #define SSL_set1_group_ids BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_group_ids) #define SSL_set1_groups BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_groups) #define SSL_set1_groups_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_groups_list) #define SSL_set1_host BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_host) #define SSL_set1_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_param) #define SSL_set1_sigalgs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_sigalgs) #define SSL_set1_sigalgs_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_sigalgs_list) #define SSL_set1_tls_channel_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_tls_channel_id) #define SSL_set1_verify_cert_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set1_verify_cert_store) #define SSL_set_SSL_CTX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_SSL_CTX) #define SSL_set_accept_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_accept_state) #define SSL_set_alpn_protos BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_alpn_protos) #define SSL_set_alps_use_new_codepoint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_alps_use_new_codepoint) #define SSL_set_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_bio) #define SSL_set_cert_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_cert_cb) #define SSL_set_chain_and_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_chain_and_key) #define SSL_set_check_client_certificate_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_check_client_certificate_type) #define SSL_set_check_ecdsa_curve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_check_ecdsa_curve) #define SSL_set_cipher_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_cipher_list) #define SSL_set_client_CA_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_client_CA_list) #define SSL_set_compliance_policy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_compliance_policy) #define SSL_set_connect_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_connect_state) #define SSL_set_custom_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_custom_verify) #define SSL_set_early_data_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_early_data_enabled) #define SSL_set_enable_ech_grease BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_enable_ech_grease) #define SSL_set_enforce_rsa_key_usage BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_enforce_rsa_key_usage) #define SSL_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_ex_data) #define SSL_set_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_fd) #define SSL_set_handshake_hints BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_handshake_hints) #define SSL_set_hostflags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_hostflags) #define SSL_set_info_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_info_callback) #define SSL_set_jdk11_workaround BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_jdk11_workaround) #define SSL_set_max_cert_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_max_cert_list) #define SSL_set_max_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_max_proto_version) #define SSL_set_max_send_fragment BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_max_send_fragment) #define SSL_set_min_proto_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_min_proto_version) #define SSL_set_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_mode) #define SSL_set_msg_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_msg_callback) #define SSL_set_msg_callback_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_msg_callback_arg) #define SSL_set_mtu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_mtu) #define SSL_set_ocsp_response BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_ocsp_response) #define SSL_set_options BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_options) #define SSL_set_permute_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_permute_extensions) #define SSL_set_private_key_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_private_key_method) #define SSL_set_psk_client_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_psk_client_callback) #define SSL_set_psk_server_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_psk_server_callback) #define SSL_set_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_purpose) #define SSL_set_quic_early_data_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_quic_early_data_context) #define SSL_set_quic_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_quic_method) #define SSL_set_quic_transport_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_quic_transport_params) #define SSL_set_quic_use_legacy_codepoint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_quic_use_legacy_codepoint) #define SSL_set_quiet_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_quiet_shutdown) #define SSL_set_read_ahead BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_read_ahead) #define SSL_set_renegotiate_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_renegotiate_mode) #define SSL_set_retain_only_sha256_of_client_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_retain_only_sha256_of_client_certs) #define SSL_set_rfd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_rfd) #define SSL_set_session BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_session) #define SSL_set_session_id_context BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_session_id_context) #define SSL_set_shed_handshake_config BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_shed_handshake_config) #define SSL_set_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_shutdown) #define SSL_set_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_signed_cert_timestamp_list) #define SSL_set_signing_algorithm_prefs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_signing_algorithm_prefs) #define SSL_set_srtp_profiles BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_srtp_profiles) #define SSL_set_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_state) #define SSL_set_strict_cipher_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_strict_cipher_list) #define SSL_set_tls_channel_id_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tls_channel_id_enabled) #define SSL_set_tlsext_host_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tlsext_host_name) #define SSL_set_tlsext_status_ocsp_resp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tlsext_status_ocsp_resp) #define SSL_set_tlsext_status_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tlsext_status_type) #define SSL_set_tlsext_use_srtp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tlsext_use_srtp) #define SSL_set_tmp_dh BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tmp_dh) #define SSL_set_tmp_dh_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tmp_dh_callback) #define SSL_set_tmp_ecdh BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tmp_ecdh) #define SSL_set_tmp_rsa BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tmp_rsa) #define SSL_set_tmp_rsa_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_tmp_rsa_callback) #define SSL_set_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_trust) #define SSL_set_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_verify) #define SSL_set_verify_algorithm_prefs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_verify_algorithm_prefs) #define SSL_set_verify_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_verify_depth) #define SSL_set_wfd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_set_wfd) #define SSL_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_shutdown) #define SSL_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_state) #define SSL_state_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_state_string) #define SSL_state_string_long BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_state_string_long) #define SSL_total_renegotiations BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_total_renegotiations) #define SSL_use_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_PrivateKey) #define SSL_use_PrivateKey_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_PrivateKey_ASN1) #define SSL_use_PrivateKey_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_PrivateKey_file) #define SSL_use_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey) #define SSL_use_RSAPrivateKey_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey_ASN1) #define SSL_use_RSAPrivateKey_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey_file) #define SSL_use_certificate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_certificate) #define SSL_use_certificate_ASN1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_certificate_ASN1) #define SSL_use_certificate_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_certificate_file) #define SSL_use_psk_identity_hint BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_use_psk_identity_hint) #define SSL_used_hello_retry_request BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_used_hello_retry_request) #define SSL_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_version) #define SSL_want BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_want) #define SSL_was_key_usage_invalid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_was_key_usage_invalid) #define SSL_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSL_write) #define SSLeay BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLeay) #define SSLeay_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLeay_version) #define SSLv23_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLv23_client_method) #define SSLv23_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLv23_method) #define SSLv23_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLv23_server_method) #define TLS_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLS_client_method) #define TLS_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLS_method) #define TLS_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLS_server_method) #define TLS_with_buffers_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLS_with_buffers_method) #define TLSv1_1_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_1_client_method) #define TLSv1_1_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_1_method) #define TLSv1_1_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_1_server_method) #define TLSv1_2_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_2_client_method) #define TLSv1_2_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_2_method) #define TLSv1_2_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_2_server_method) #define TLSv1_client_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_client_method) #define TLSv1_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_method) #define TLSv1_server_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TLSv1_server_method) #define TRUST_TOKEN_CLIENT_add_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_add_key) #define TRUST_TOKEN_CLIENT_begin_issuance BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_issuance) #define TRUST_TOKEN_CLIENT_begin_issuance_over_message BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_issuance_over_message) #define TRUST_TOKEN_CLIENT_begin_redemption BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_redemption) #define TRUST_TOKEN_CLIENT_finish_issuance BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_finish_issuance) #define TRUST_TOKEN_CLIENT_finish_redemption BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_finish_redemption) #define TRUST_TOKEN_CLIENT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_free) #define TRUST_TOKEN_CLIENT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_new) #define TRUST_TOKEN_CLIENT_set_srr_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_set_srr_key) #define TRUST_TOKEN_ISSUER_add_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_add_key) #define TRUST_TOKEN_ISSUER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_free) #define TRUST_TOKEN_ISSUER_issue BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_issue) #define TRUST_TOKEN_ISSUER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_new) #define TRUST_TOKEN_ISSUER_redeem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_redeem) #define TRUST_TOKEN_ISSUER_redeem_over_message BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_redeem_over_message) #define TRUST_TOKEN_ISSUER_set_metadata_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_set_metadata_key) #define TRUST_TOKEN_ISSUER_set_srr_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_set_srr_key) #define TRUST_TOKEN_PRETOKEN_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_PRETOKEN_free) #define TRUST_TOKEN_decode_private_metadata BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_decode_private_metadata) #define TRUST_TOKEN_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_derive_key_from_secret) #define TRUST_TOKEN_experiment_v1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v1) #define TRUST_TOKEN_experiment_v2_pmb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v2_pmb) #define TRUST_TOKEN_experiment_v2_voprf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v2_voprf) #define TRUST_TOKEN_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_free) #define TRUST_TOKEN_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_generate_key) #define TRUST_TOKEN_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_new) #define TRUST_TOKEN_pst_v1_pmb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_pst_v1_pmb) #define TRUST_TOKEN_pst_v1_voprf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, TRUST_TOKEN_pst_v1_voprf) #define USERNOTICE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, USERNOTICE_free) #define USERNOTICE_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, USERNOTICE_it) #define USERNOTICE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, USERNOTICE_new) #define X25519 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X25519) #define X25519_keypair BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X25519_keypair) #define X25519_public_from_private BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X25519_public_from_private) #define X509V3_EXT_CRL_add_nconf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_CRL_add_nconf) #define X509V3_EXT_REQ_add_nconf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_REQ_add_nconf) #define X509V3_EXT_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_add) #define X509V3_EXT_add_alias BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_add_alias) #define X509V3_EXT_add_nconf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_add_nconf) #define X509V3_EXT_add_nconf_sk BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_add_nconf_sk) #define X509V3_EXT_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_d2i) #define X509V3_EXT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_free) #define X509V3_EXT_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_get) #define X509V3_EXT_get_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_get_nid) #define X509V3_EXT_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_i2d) #define X509V3_EXT_nconf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_nconf) #define X509V3_EXT_nconf_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_nconf_nid) #define X509V3_EXT_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_print) #define X509V3_EXT_print_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_EXT_print_fp) #define X509V3_NAME_from_section BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_NAME_from_section) #define X509V3_add1_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_add1_i2d) #define X509V3_add_standard_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_add_standard_extensions) #define X509V3_add_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_add_value) #define X509V3_add_value_bool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_add_value_bool) #define X509V3_add_value_int BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_add_value_int) #define X509V3_bool_from_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_bool_from_string) #define X509V3_conf_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_conf_free) #define X509V3_extensions_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_extensions_print) #define X509V3_get_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_get_d2i) #define X509V3_get_section BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_get_section) #define X509V3_get_value_bool BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_get_value_bool) #define X509V3_get_value_int BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_get_value_int) #define X509V3_parse_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_parse_list) #define X509V3_set_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_set_ctx) #define X509V3_set_nconf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509V3_set_nconf) #define X509_ALGOR_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_cmp) #define X509_ALGOR_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_dup) #define X509_ALGOR_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_free) #define X509_ALGOR_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_get0) #define X509_ALGOR_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_it) #define X509_ALGOR_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_new) #define X509_ALGOR_set0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_set0) #define X509_ALGOR_set_md BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ALGOR_set_md) #define X509_ATTRIBUTE_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_count) #define X509_ATTRIBUTE_create BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_create) #define X509_ATTRIBUTE_create_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_NID) #define X509_ATTRIBUTE_create_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_OBJ) #define X509_ATTRIBUTE_create_by_txt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_txt) #define X509_ATTRIBUTE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_dup) #define X509_ATTRIBUTE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_free) #define X509_ATTRIBUTE_get0_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_data) #define X509_ATTRIBUTE_get0_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_object) #define X509_ATTRIBUTE_get0_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_type) #define X509_ATTRIBUTE_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_it) #define X509_ATTRIBUTE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_new) #define X509_ATTRIBUTE_set1_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_set1_data) #define X509_ATTRIBUTE_set1_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_ATTRIBUTE_set1_object) #define X509_CERT_AUX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CERT_AUX_free) #define X509_CERT_AUX_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CERT_AUX_it) #define X509_CERT_AUX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CERT_AUX_new) #define X509_CERT_AUX_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CERT_AUX_print) #define X509_CINF_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CINF_free) #define X509_CINF_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CINF_it) #define X509_CINF_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CINF_new) #define X509_CRL_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_INFO_free) #define X509_CRL_INFO_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_INFO_it) #define X509_CRL_INFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_INFO_new) #define X509_CRL_add0_revoked BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_add0_revoked) #define X509_CRL_add1_ext_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_add1_ext_i2d) #define X509_CRL_add_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_add_ext) #define X509_CRL_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_cmp) #define X509_CRL_delete_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_delete_ext) #define X509_CRL_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_digest) #define X509_CRL_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_dup) #define X509_CRL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_free) #define X509_CRL_get0_by_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_by_cert) #define X509_CRL_get0_by_serial BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_by_serial) #define X509_CRL_get0_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_extensions) #define X509_CRL_get0_lastUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_lastUpdate) #define X509_CRL_get0_nextUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_nextUpdate) #define X509_CRL_get0_signature BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get0_signature) #define X509_CRL_get_REVOKED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_REVOKED) #define X509_CRL_get_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext) #define X509_CRL_get_ext_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext_by_NID) #define X509_CRL_get_ext_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext_by_OBJ) #define X509_CRL_get_ext_by_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext_by_critical) #define X509_CRL_get_ext_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext_count) #define X509_CRL_get_ext_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_ext_d2i) #define X509_CRL_get_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_issuer) #define X509_CRL_get_lastUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_lastUpdate) #define X509_CRL_get_nextUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_nextUpdate) #define X509_CRL_get_signature_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_signature_nid) #define X509_CRL_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_get_version) #define X509_CRL_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_it) #define X509_CRL_match BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_match) #define X509_CRL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_new) #define X509_CRL_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_print) #define X509_CRL_print_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_print_fp) #define X509_CRL_set1_lastUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set1_lastUpdate) #define X509_CRL_set1_nextUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set1_nextUpdate) #define X509_CRL_set1_signature_algo BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set1_signature_algo) #define X509_CRL_set1_signature_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set1_signature_value) #define X509_CRL_set_issuer_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set_issuer_name) #define X509_CRL_set_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_set_version) #define X509_CRL_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_sign) #define X509_CRL_sign_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_sign_ctx) #define X509_CRL_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_sort) #define X509_CRL_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_up_ref) #define X509_CRL_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_CRL_verify) #define X509_EXTENSIONS_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSIONS_it) #define X509_EXTENSION_create_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_create_by_NID) #define X509_EXTENSION_create_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_create_by_OBJ) #define X509_EXTENSION_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_dup) #define X509_EXTENSION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_free) #define X509_EXTENSION_get_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_get_critical) #define X509_EXTENSION_get_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_get_data) #define X509_EXTENSION_get_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_get_object) #define X509_EXTENSION_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_it) #define X509_EXTENSION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_new) #define X509_EXTENSION_set_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_set_critical) #define X509_EXTENSION_set_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_set_data) #define X509_EXTENSION_set_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_EXTENSION_set_object) #define X509_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_INFO_free) #define X509_LOOKUP_add_dir BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_add_dir) #define X509_LOOKUP_ctrl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_ctrl) #define X509_LOOKUP_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_file) #define X509_LOOKUP_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_free) #define X509_LOOKUP_hash_dir BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_hash_dir) #define X509_LOOKUP_load_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_LOOKUP_load_file) #define X509_NAME_ENTRY_create_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_NID) #define X509_NAME_ENTRY_create_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_OBJ) #define X509_NAME_ENTRY_create_by_txt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_txt) #define X509_NAME_ENTRY_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_dup) #define X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_free) #define X509_NAME_ENTRY_get_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_get_data) #define X509_NAME_ENTRY_get_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_get_object) #define X509_NAME_ENTRY_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_it) #define X509_NAME_ENTRY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_new) #define X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_set) #define X509_NAME_ENTRY_set_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_set_data) #define X509_NAME_ENTRY_set_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_ENTRY_set_object) #define X509_NAME_add_entry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_add_entry) #define X509_NAME_add_entry_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_add_entry_by_NID) #define X509_NAME_add_entry_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_add_entry_by_OBJ) #define X509_NAME_add_entry_by_txt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_add_entry_by_txt) #define X509_NAME_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_cmp) #define X509_NAME_delete_entry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_delete_entry) #define X509_NAME_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_digest) #define X509_NAME_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_dup) #define X509_NAME_entry_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_entry_count) #define X509_NAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_free) #define X509_NAME_get0_der BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get0_der) #define X509_NAME_get_entry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get_entry) #define X509_NAME_get_index_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get_index_by_NID) #define X509_NAME_get_index_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get_index_by_OBJ) #define X509_NAME_get_text_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get_text_by_NID) #define X509_NAME_get_text_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_get_text_by_OBJ) #define X509_NAME_hash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_hash) #define X509_NAME_hash_old BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_hash_old) #define X509_NAME_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_it) #define X509_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_new) #define X509_NAME_oneline BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_oneline) #define X509_NAME_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_print) #define X509_NAME_print_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_print_ex) #define X509_NAME_print_ex_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_print_ex_fp) #define X509_NAME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_NAME_set) #define X509_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_OBJECT_free) #define X509_OBJECT_free_contents BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_OBJECT_free_contents) #define X509_OBJECT_get0_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_OBJECT_get0_X509) #define X509_OBJECT_get_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_OBJECT_get_type) #define X509_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_OBJECT_new) #define X509_PUBKEY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_free) #define X509_PUBKEY_get BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_get) #define X509_PUBKEY_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_get0) #define X509_PUBKEY_get0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_get0_param) #define X509_PUBKEY_get0_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_get0_public_key) #define X509_PUBKEY_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_it) #define X509_PUBKEY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_new) #define X509_PUBKEY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_set) #define X509_PUBKEY_set0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PUBKEY_set0_param) #define X509_PURPOSE_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PURPOSE_get0) #define X509_PURPOSE_get_by_sname BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PURPOSE_get_by_sname) #define X509_PURPOSE_get_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PURPOSE_get_id) #define X509_PURPOSE_get_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_PURPOSE_get_trust) #define X509_REQ_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_INFO_free) #define X509_REQ_INFO_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_INFO_it) #define X509_REQ_INFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_INFO_new) #define X509_REQ_add1_attr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add1_attr) #define X509_REQ_add1_attr_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_NID) #define X509_REQ_add1_attr_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_OBJ) #define X509_REQ_add1_attr_by_txt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_txt) #define X509_REQ_add_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add_extensions) #define X509_REQ_add_extensions_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_add_extensions_nid) #define X509_REQ_check_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_check_private_key) #define X509_REQ_delete_attr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_delete_attr) #define X509_REQ_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_digest) #define X509_REQ_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_dup) #define X509_REQ_extension_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_extension_nid) #define X509_REQ_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_free) #define X509_REQ_get0_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get0_pubkey) #define X509_REQ_get0_signature BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get0_signature) #define X509_REQ_get1_email BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get1_email) #define X509_REQ_get_attr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_attr) #define X509_REQ_get_attr_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_attr_by_NID) #define X509_REQ_get_attr_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_attr_by_OBJ) #define X509_REQ_get_attr_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_attr_count) #define X509_REQ_get_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_extensions) #define X509_REQ_get_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_pubkey) #define X509_REQ_get_signature_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_signature_nid) #define X509_REQ_get_subject_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_subject_name) #define X509_REQ_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_get_version) #define X509_REQ_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_it) #define X509_REQ_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_new) #define X509_REQ_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_print) #define X509_REQ_print_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_print_ex) #define X509_REQ_print_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_print_fp) #define X509_REQ_set1_signature_algo BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_set1_signature_algo) #define X509_REQ_set1_signature_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_set1_signature_value) #define X509_REQ_set_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_set_pubkey) #define X509_REQ_set_subject_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_set_subject_name) #define X509_REQ_set_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_set_version) #define X509_REQ_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_sign) #define X509_REQ_sign_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_sign_ctx) #define X509_REQ_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REQ_verify) #define X509_REVOKED_add1_ext_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_add1_ext_i2d) #define X509_REVOKED_add_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_add_ext) #define X509_REVOKED_delete_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_delete_ext) #define X509_REVOKED_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_dup) #define X509_REVOKED_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_free) #define X509_REVOKED_get0_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get0_extensions) #define X509_REVOKED_get0_revocationDate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get0_revocationDate) #define X509_REVOKED_get0_serialNumber BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get0_serialNumber) #define X509_REVOKED_get_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext) #define X509_REVOKED_get_ext_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_NID) #define X509_REVOKED_get_ext_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_OBJ) #define X509_REVOKED_get_ext_by_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_critical) #define X509_REVOKED_get_ext_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext_count) #define X509_REVOKED_get_ext_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_get_ext_d2i) #define X509_REVOKED_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_it) #define X509_REVOKED_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_new) #define X509_REVOKED_set_revocationDate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_set_revocationDate) #define X509_REVOKED_set_serialNumber BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_REVOKED_set_serialNumber) #define X509_SIG_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_SIG_free) #define X509_SIG_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_SIG_get0) #define X509_SIG_getm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_SIG_getm) #define X509_SIG_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_SIG_new) #define X509_STORE_CTX_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_cleanup) #define X509_STORE_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_free) #define X509_STORE_CTX_get0_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_cert) #define X509_STORE_CTX_get0_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_chain) #define X509_STORE_CTX_get0_current_crl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_current_crl) #define X509_STORE_CTX_get0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_param) #define X509_STORE_CTX_get0_parent_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_parent_ctx) #define X509_STORE_CTX_get0_store BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_store) #define X509_STORE_CTX_get0_untrusted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get0_untrusted) #define X509_STORE_CTX_get1_certs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get1_certs) #define X509_STORE_CTX_get1_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get1_chain) #define X509_STORE_CTX_get1_crls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get1_crls) #define X509_STORE_CTX_get1_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get1_issuer) #define X509_STORE_CTX_get_by_subject BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_by_subject) #define X509_STORE_CTX_get_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_chain) #define X509_STORE_CTX_get_current_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_current_cert) #define X509_STORE_CTX_get_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_error) #define X509_STORE_CTX_get_error_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_error_depth) #define X509_STORE_CTX_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_ex_data) #define X509_STORE_CTX_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_get_ex_new_index) #define X509_STORE_CTX_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_init) #define X509_STORE_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_new) #define X509_STORE_CTX_set0_crls BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set0_crls) #define X509_STORE_CTX_set0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set0_param) #define X509_STORE_CTX_set0_trusted_stack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set0_trusted_stack) #define X509_STORE_CTX_set_chain BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_chain) #define X509_STORE_CTX_set_default BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_default) #define X509_STORE_CTX_set_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_depth) #define X509_STORE_CTX_set_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_error) #define X509_STORE_CTX_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_ex_data) #define X509_STORE_CTX_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_flags) #define X509_STORE_CTX_set_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_purpose) #define X509_STORE_CTX_set_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_time) #define X509_STORE_CTX_set_time_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_time_posix) #define X509_STORE_CTX_set_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_trust) #define X509_STORE_CTX_set_verify_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_set_verify_cb) #define X509_STORE_CTX_trusted_stack BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_CTX_trusted_stack) #define X509_STORE_add_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_add_cert) #define X509_STORE_add_crl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_add_crl) #define X509_STORE_add_lookup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_add_lookup) #define X509_STORE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_free) #define X509_STORE_get0_objects BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_get0_objects) #define X509_STORE_get0_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_get0_param) #define X509_STORE_get1_objects BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_get1_objects) #define X509_STORE_load_locations BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_load_locations) #define X509_STORE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_new) #define X509_STORE_set1_param BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set1_param) #define X509_STORE_set_default_paths BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_default_paths) #define X509_STORE_set_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_depth) #define X509_STORE_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_flags) #define X509_STORE_set_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_purpose) #define X509_STORE_set_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_trust) #define X509_STORE_set_verify_cb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_set_verify_cb) #define X509_STORE_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_STORE_up_ref) #define X509_VAL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VAL_free) #define X509_VAL_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VAL_it) #define X509_VAL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VAL_new) #define X509_VERIFY_PARAM_add0_policy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_add0_policy) #define X509_VERIFY_PARAM_add1_host BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_add1_host) #define X509_VERIFY_PARAM_clear_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_clear_flags) #define X509_VERIFY_PARAM_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_free) #define X509_VERIFY_PARAM_get_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_get_depth) #define X509_VERIFY_PARAM_get_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_get_flags) #define X509_VERIFY_PARAM_inherit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_inherit) #define X509_VERIFY_PARAM_lookup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_lookup) #define X509_VERIFY_PARAM_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_new) #define X509_VERIFY_PARAM_set1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1) #define X509_VERIFY_PARAM_set1_email BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_email) #define X509_VERIFY_PARAM_set1_host BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_host) #define X509_VERIFY_PARAM_set1_ip BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_ip) #define X509_VERIFY_PARAM_set1_ip_asc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_ip_asc) #define X509_VERIFY_PARAM_set1_policies BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_policies) #define X509_VERIFY_PARAM_set_depth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_depth) #define X509_VERIFY_PARAM_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_flags) #define X509_VERIFY_PARAM_set_hostflags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_hostflags) #define X509_VERIFY_PARAM_set_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_purpose) #define X509_VERIFY_PARAM_set_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_time) #define X509_VERIFY_PARAM_set_time_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_time_posix) #define X509_VERIFY_PARAM_set_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_trust) #define X509_add1_ext_i2d BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_add1_ext_i2d) #define X509_add1_reject_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_add1_reject_object) #define X509_add1_trust_object BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_add1_trust_object) #define X509_add_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_add_ext) #define X509_alias_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_alias_get0) #define X509_alias_set1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_alias_set1) #define X509_chain_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_chain_up_ref) #define X509_check_akid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_akid) #define X509_check_ca BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_ca) #define X509_check_email BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_email) #define X509_check_host BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_host) #define X509_check_ip BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_ip) #define X509_check_ip_asc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_ip_asc) #define X509_check_issued BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_issued) #define X509_check_private_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_private_key) #define X509_check_purpose BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_purpose) #define X509_check_trust BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_check_trust) #define X509_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_cmp) #define X509_cmp_current_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_cmp_current_time) #define X509_cmp_time BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_cmp_time) #define X509_cmp_time_posix BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_cmp_time_posix) #define X509_delete_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_delete_ext) #define X509_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_digest) #define X509_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_dup) #define X509_email_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_email_free) #define X509_find_by_issuer_and_serial BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_find_by_issuer_and_serial) #define X509_find_by_subject BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_find_by_subject) #define X509_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_free) #define X509_get0_authority_issuer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_authority_issuer) #define X509_get0_authority_key_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_authority_key_id) #define X509_get0_authority_serial BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_authority_serial) #define X509_get0_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_extensions) #define X509_get0_notAfter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_notAfter) #define X509_get0_notBefore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_notBefore) #define X509_get0_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_pubkey) #define X509_get0_pubkey_bitstr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_pubkey_bitstr) #define X509_get0_serialNumber BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_serialNumber) #define X509_get0_signature BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_signature) #define X509_get0_subject_key_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_subject_key_id) #define X509_get0_tbs_sigalg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_tbs_sigalg) #define X509_get0_uids BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get0_uids) #define X509_get1_email BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get1_email) #define X509_get1_ocsp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get1_ocsp) #define X509_get_X509_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_X509_PUBKEY) #define X509_get_default_cert_area BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_cert_area) #define X509_get_default_cert_dir BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_cert_dir) #define X509_get_default_cert_dir_env BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_cert_dir_env) #define X509_get_default_cert_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_cert_file) #define X509_get_default_cert_file_env BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_cert_file_env) #define X509_get_default_private_dir BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_default_private_dir) #define X509_get_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ex_data) #define X509_get_ex_new_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ex_new_index) #define X509_get_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext) #define X509_get_ext_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext_by_NID) #define X509_get_ext_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext_by_OBJ) #define X509_get_ext_by_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext_by_critical) #define X509_get_ext_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext_count) #define X509_get_ext_d2i BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_ext_d2i) #define X509_get_extended_key_usage BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_extended_key_usage) #define X509_get_extension_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_extension_flags) #define X509_get_issuer_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_issuer_name) #define X509_get_key_usage BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_key_usage) #define X509_get_notAfter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_notAfter) #define X509_get_notBefore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_notBefore) #define X509_get_pathlen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_pathlen) #define X509_get_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_pubkey) #define X509_get_serialNumber BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_serialNumber) #define X509_get_signature_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_signature_nid) #define X509_get_subject_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_subject_name) #define X509_get_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_get_version) #define X509_getm_notAfter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_getm_notAfter) #define X509_getm_notBefore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_getm_notBefore) #define X509_gmtime_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_gmtime_adj) #define X509_is_valid_trust_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_is_valid_trust_id) #define X509_issuer_name_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_issuer_name_cmp) #define X509_issuer_name_hash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_issuer_name_hash) #define X509_issuer_name_hash_old BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_issuer_name_hash_old) #define X509_it BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_it) #define X509_keyid_get0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_keyid_get0) #define X509_keyid_set1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_keyid_set1) #define X509_load_cert_crl_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_load_cert_crl_file) #define X509_load_cert_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_load_cert_file) #define X509_load_crl_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_load_crl_file) #define X509_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_new) #define X509_parse_from_buffer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_parse_from_buffer) #define X509_policy_check BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_policy_check) #define X509_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_print) #define X509_print_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_print_ex) #define X509_print_ex_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_print_ex_fp) #define X509_print_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_print_fp) #define X509_pubkey_digest BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_pubkey_digest) #define X509_reject_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_reject_clear) #define X509_set1_notAfter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set1_notAfter) #define X509_set1_notBefore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set1_notBefore) #define X509_set1_signature_algo BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set1_signature_algo) #define X509_set1_signature_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set1_signature_value) #define X509_set_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_ex_data) #define X509_set_issuer_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_issuer_name) #define X509_set_notAfter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_notAfter) #define X509_set_notBefore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_notBefore) #define X509_set_pubkey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_pubkey) #define X509_set_serialNumber BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_serialNumber) #define X509_set_subject_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_subject_name) #define X509_set_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_set_version) #define X509_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_sign) #define X509_sign_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_sign_ctx) #define X509_signature_dump BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_signature_dump) #define X509_signature_print BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_signature_print) #define X509_subject_name_cmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_subject_name_cmp) #define X509_subject_name_hash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_subject_name_hash) #define X509_subject_name_hash_old BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_subject_name_hash_old) #define X509_supported_extension BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_supported_extension) #define X509_time_adj BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_time_adj) #define X509_time_adj_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_time_adj_ex) #define X509_trust_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_trust_clear) #define X509_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_up_ref) #define X509_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_verify) #define X509_verify_cert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_verify_cert) #define X509_verify_cert_error_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509_verify_cert_error_string) #define X509v3_add_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_add_ext) #define X509v3_delete_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_delete_ext) #define X509v3_get_ext BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_get_ext) #define X509v3_get_ext_by_NID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_get_ext_by_NID) #define X509v3_get_ext_by_OBJ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_get_ext_by_OBJ) #define X509v3_get_ext_by_critical BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_get_ext_by_critical) #define X509v3_get_ext_count BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, X509v3_get_ext_count) #define __clang_call_terminate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, __clang_call_terminate) #define a2i_IPADDRESS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, a2i_IPADDRESS) #define a2i_IPADDRESS_NC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, a2i_IPADDRESS_NC) #define aes128gcmsiv_aes_ks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_aes_ks) #define aes128gcmsiv_aes_ks_enc_x1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_aes_ks_enc_x1) #define aes128gcmsiv_dec BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_dec) #define aes128gcmsiv_ecb_enc_block BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_ecb_enc_block) #define aes128gcmsiv_enc_msg_x4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_enc_msg_x4) #define aes128gcmsiv_enc_msg_x8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_enc_msg_x8) #define aes128gcmsiv_kdf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes128gcmsiv_kdf) #define aes256gcmsiv_aes_ks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_aes_ks) #define aes256gcmsiv_aes_ks_enc_x1 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_aes_ks_enc_x1) #define aes256gcmsiv_dec BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_dec) #define aes256gcmsiv_ecb_enc_block BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_ecb_enc_block) #define aes256gcmsiv_enc_msg_x4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_enc_msg_x4) #define aes256gcmsiv_enc_msg_x8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_enc_msg_x8) #define aes256gcmsiv_kdf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes256gcmsiv_kdf) #define aes_ctr_set_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_ctr_set_key) #define aes_gcm_dec_kernel BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_kernel) #define aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_512) #define aes_gcm_dec_update_vaes_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx2) #define aes_gcm_enc_kernel BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_kernel) #define aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_512) #define aes_gcm_enc_update_vaes_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx2) #define aes_hw_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_cbc_encrypt) #define aes_hw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_ctr32_encrypt_blocks) #define aes_hw_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_decrypt) #define aes_hw_ecb_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_ecb_encrypt) #define aes_hw_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_encrypt) #define aes_hw_encrypt_key_to_decrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_encrypt_key_to_decrypt_key) #define aes_hw_set_decrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_set_decrypt_key) #define aes_hw_set_encrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_set_encrypt_key) #define aes_hw_set_encrypt_key_alt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_alt) #define aes_hw_set_encrypt_key_alt_preferred BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_alt_preferred) #define aes_hw_set_encrypt_key_base BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_base) #define aes_nohw_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_cbc_encrypt) #define aes_nohw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_ctr32_encrypt_blocks) #define aes_nohw_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_decrypt) #define aes_nohw_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_encrypt) #define aes_nohw_set_decrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_set_decrypt_key) #define aes_nohw_set_encrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aes_nohw_set_encrypt_key) #define aesgcmsiv_htable6_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesgcmsiv_htable6_init) #define aesgcmsiv_htable_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesgcmsiv_htable_init) #define aesgcmsiv_htable_polyval BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesgcmsiv_htable_polyval) #define aesgcmsiv_polyval_horner BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesgcmsiv_polyval_horner) #define aesni_gcm_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesni_gcm_decrypt) #define aesni_gcm_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, aesni_gcm_encrypt) #define asn1_bit_string_length BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_bit_string_length) #define asn1_do_adb BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_do_adb) #define asn1_enc_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_enc_free) #define asn1_enc_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_enc_init) #define asn1_enc_restore BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_enc_restore) #define asn1_enc_save BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_enc_save) #define asn1_encoding_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_encoding_clear) #define asn1_generalizedtime_to_tm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_generalizedtime_to_tm) #define asn1_get_choice_selector BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_get_choice_selector) #define asn1_get_field_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_get_field_ptr) #define asn1_get_string_table_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_get_string_table_for_testing) #define asn1_is_printable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_is_printable) #define asn1_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_refcount_dec_and_test_zero) #define asn1_refcount_set_one BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_refcount_set_one) #define asn1_set_choice_selector BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_set_choice_selector) #define asn1_type_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_type_cleanup) #define asn1_type_set0_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_type_set0_string) #define asn1_type_value_as_pointer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_type_value_as_pointer) #define asn1_utctime_to_tm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, asn1_utctime_to_tm) #define bcm_as_approved_status BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bcm_as_approved_status) #define bcm_success BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bcm_success) #define beeu_mod_inverse_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, beeu_mod_inverse_vartime) #define bio_clear_socket_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_clear_socket_error) #define bio_errno_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_errno_should_retry) #define bio_ip_and_port_to_socket_and_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_ip_and_port_to_socket_and_addr) #define bio_sock_error BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_sock_error) #define bio_socket_nbio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_socket_nbio) #define bio_socket_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bio_socket_should_retry) #define bn_abs_sub_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_abs_sub_consttime) #define bn_add_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_add_words) #define bn_assert_fits_in_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_assert_fits_in_bytes) #define bn_big_endian_to_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_big_endian_to_words) #define bn_copy_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_copy_words) #define bn_declassify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_declassify) #define bn_div_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_div_consttime) #define bn_expand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_expand) #define bn_fits_in_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_fits_in_words) #define bn_from_montgomery_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_from_montgomery_small) #define bn_gather5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_gather5) #define bn_in_range_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_in_range_words) #define bn_is_bit_set_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_is_bit_set_words) #define bn_is_relatively_prime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_is_relatively_prime) #define bn_jacobi BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_jacobi) #define bn_lcm_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_lcm_consttime) #define bn_less_than_montgomery_R BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_less_than_montgomery_R) #define bn_less_than_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_less_than_words) #define bn_miller_rabin_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_miller_rabin_init) #define bn_miller_rabin_iteration BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_miller_rabin_iteration) #define bn_minimal_width BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_minimal_width) #define bn_mod_add_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_add_consttime) #define bn_mod_add_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_add_words) #define bn_mod_exp_mont_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_exp_mont_small) #define bn_mod_inverse0_prime_mont_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_inverse0_prime_mont_small) #define bn_mod_inverse_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_inverse_consttime) #define bn_mod_inverse_prime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_inverse_prime) #define bn_mod_inverse_secret_prime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_inverse_secret_prime) #define bn_mod_lshift1_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_lshift1_consttime) #define bn_mod_lshift_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_lshift_consttime) #define bn_mod_mul_montgomery_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_mul_montgomery_small) #define bn_mod_sub_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_sub_consttime) #define bn_mod_sub_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_sub_words) #define bn_mod_u16_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mod_u16_consttime) #define bn_mont_ctx_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mont_ctx_cleanup) #define bn_mont_ctx_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mont_ctx_init) #define bn_mont_ctx_set_RR_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mont_ctx_set_RR_consttime) #define bn_mont_n0 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mont_n0) #define bn_mul4x_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul4x_mont) #define bn_mul4x_mont_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul4x_mont_capable) #define bn_mul4x_mont_gather5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul4x_mont_gather5) #define bn_mul4x_mont_gather5_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul4x_mont_gather5_capable) #define bn_mul_add_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_add_words) #define bn_mul_comba4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_comba4) #define bn_mul_comba8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_comba8) #define bn_mul_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_consttime) #define bn_mul_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_mont) #define bn_mul_mont_gather5_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_mont_gather5_nohw) #define bn_mul_mont_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_mont_nohw) #define bn_mul_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_small) #define bn_mul_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mul_words) #define bn_mulx4x_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mulx4x_mont) #define bn_mulx4x_mont_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mulx4x_mont_capable) #define bn_mulx4x_mont_gather5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mulx4x_mont_gather5) #define bn_mulx4x_mont_gather5_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mulx4x_mont_gather5_capable) #define bn_mulx_adx_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_mulx_adx_capable) #define bn_odd_number_is_obviously_composite BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_odd_number_is_obviously_composite) #define bn_one_to_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_one_to_montgomery) #define bn_power5_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_power5_capable) #define bn_power5_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_power5_nohw) #define bn_powerx5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_powerx5) #define bn_powerx5_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_powerx5_capable) #define bn_rand_range_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_rand_range_words) #define bn_rand_secret_range BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_rand_secret_range) #define bn_reduce_once BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_reduce_once) #define bn_reduce_once_in_place BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_reduce_once_in_place) #define bn_resize_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_resize_words) #define bn_rshift1_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_rshift1_words) #define bn_rshift_secret_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_rshift_secret_shift) #define bn_rshift_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_rshift_words) #define bn_scatter5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_scatter5) #define bn_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_secret) #define bn_select_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_select_words) #define bn_set_minimal_width BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_minimal_width) #define bn_set_static_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_static_words) #define bn_set_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_words) #define bn_sqr8x_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr8x_internal) #define bn_sqr8x_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr8x_mont) #define bn_sqr8x_mont_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr8x_mont_capable) #define bn_sqr_comba4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_comba4) #define bn_sqr_comba8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_comba8) #define bn_sqr_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_consttime) #define bn_sqr_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_small) #define bn_sqr_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_words) #define bn_sqrx8x_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqrx8x_internal) #define bn_sub_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sub_words) #define bn_to_montgomery_small BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_to_montgomery_small) #define bn_uadd_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_uadd_consttime) #define bn_usub_consttime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_usub_consttime) #define bn_wexpand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_wexpand) #define bn_words_to_big_endian BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_words_to_big_endian) #define boringssl_ensure_ecc_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_ensure_ecc_self_test) #define boringssl_ensure_ffdh_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_ensure_ffdh_self_test) #define boringssl_ensure_rsa_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_ensure_rsa_self_test) #define boringssl_fips_break_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_fips_break_test) #define boringssl_fips_inc_counter BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_fips_inc_counter) #define boringssl_self_test_hmac_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_self_test_hmac_sha256) #define boringssl_self_test_sha256 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_self_test_sha256) #define boringssl_self_test_sha512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, boringssl_self_test_sha512) #define bsaes_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bsaes_capable) #define bsaes_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bsaes_cbc_encrypt) #define c2i_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, c2i_ASN1_BIT_STRING) #define c2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, c2i_ASN1_INTEGER) #define c2i_ASN1_OBJECT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, c2i_ASN1_OBJECT) #define chacha20_poly1305_asm_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_asm_capable) #define chacha20_poly1305_open BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_open) #define chacha20_poly1305_open_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_open_avx2) #define chacha20_poly1305_open_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_open_nohw) #define chacha20_poly1305_seal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_seal) #define chacha20_poly1305_seal_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_seal_avx2) #define chacha20_poly1305_seal_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, chacha20_poly1305_seal_nohw) #define crypto_gcm_clmul_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, crypto_gcm_clmul_enabled) #define d2i_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_BIT_STRING) #define d2i_ASN1_BMPSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_BMPSTRING) #define d2i_ASN1_BOOLEAN BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_BOOLEAN) #define d2i_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_ENUMERATED) #define d2i_ASN1_GENERALIZEDTIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_GENERALIZEDTIME) #define d2i_ASN1_GENERALSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_GENERALSTRING) #define d2i_ASN1_IA5STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_IA5STRING) #define d2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_INTEGER) #define d2i_ASN1_NULL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_NULL) #define d2i_ASN1_OBJECT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_OBJECT) #define d2i_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_OCTET_STRING) #define d2i_ASN1_PRINTABLE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_PRINTABLE) #define d2i_ASN1_PRINTABLESTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_PRINTABLESTRING) #define d2i_ASN1_SEQUENCE_ANY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_SEQUENCE_ANY) #define d2i_ASN1_SET_ANY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_SET_ANY) #define d2i_ASN1_T61STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_T61STRING) #define d2i_ASN1_TIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_TIME) #define d2i_ASN1_TYPE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_TYPE) #define d2i_ASN1_UNIVERSALSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_UNIVERSALSTRING) #define d2i_ASN1_UTCTIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_UTCTIME) #define d2i_ASN1_UTF8STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_UTF8STRING) #define d2i_ASN1_VISIBLESTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ASN1_VISIBLESTRING) #define d2i_AUTHORITY_INFO_ACCESS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_AUTHORITY_INFO_ACCESS) #define d2i_AUTHORITY_KEYID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_AUTHORITY_KEYID) #define d2i_AutoPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_AutoPrivateKey) #define d2i_BASIC_CONSTRAINTS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_BASIC_CONSTRAINTS) #define d2i_CERTIFICATEPOLICIES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_CERTIFICATEPOLICIES) #define d2i_CRL_DIST_POINTS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_CRL_DIST_POINTS) #define d2i_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DHparams) #define d2i_DHparams_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DHparams_bio) #define d2i_DIRECTORYSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DIRECTORYSTRING) #define d2i_DISPLAYTEXT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DISPLAYTEXT) #define d2i_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSAPrivateKey) #define d2i_DSAPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSAPrivateKey_bio) #define d2i_DSAPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSAPrivateKey_fp) #define d2i_DSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSAPublicKey) #define d2i_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSA_PUBKEY) #define d2i_DSA_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSA_PUBKEY_bio) #define d2i_DSA_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSA_PUBKEY_fp) #define d2i_DSA_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSA_SIG) #define d2i_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_DSAparams) #define d2i_ECDSA_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECDSA_SIG) #define d2i_ECPKParameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECPKParameters) #define d2i_ECParameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECParameters) #define d2i_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECPrivateKey) #define d2i_ECPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECPrivateKey_bio) #define d2i_ECPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ECPrivateKey_fp) #define d2i_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_EC_PUBKEY) #define d2i_EC_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_EC_PUBKEY_bio) #define d2i_EC_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_EC_PUBKEY_fp) #define d2i_EXTENDED_KEY_USAGE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_EXTENDED_KEY_USAGE) #define d2i_GENERAL_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_GENERAL_NAME) #define d2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_GENERAL_NAMES) #define d2i_ISSUING_DIST_POINT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_ISSUING_DIST_POINT) #define d2i_NETSCAPE_SPKAC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_NETSCAPE_SPKAC) #define d2i_NETSCAPE_SPKI BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_NETSCAPE_SPKI) #define d2i_PKCS12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS12) #define d2i_PKCS12_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS12_bio) #define d2i_PKCS12_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS12_fp) #define d2i_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS7) #define d2i_PKCS7_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS7_bio) #define d2i_PKCS8PrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8PrivateKey_bio) #define d2i_PKCS8PrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8PrivateKey_fp) #define d2i_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO) #define d2i_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO_bio) #define d2i_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO_fp) #define d2i_PKCS8_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8_bio) #define d2i_PKCS8_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PKCS8_fp) #define d2i_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PUBKEY) #define d2i_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PUBKEY_bio) #define d2i_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PUBKEY_fp) #define d2i_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PrivateKey) #define d2i_PrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PrivateKey_bio) #define d2i_PrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PrivateKey_fp) #define d2i_PublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_PublicKey) #define d2i_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPrivateKey) #define d2i_RSAPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPrivateKey_bio) #define d2i_RSAPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPrivateKey_fp) #define d2i_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPublicKey) #define d2i_RSAPublicKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPublicKey_bio) #define d2i_RSAPublicKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSAPublicKey_fp) #define d2i_RSA_PSS_PARAMS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSA_PSS_PARAMS) #define d2i_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSA_PUBKEY) #define d2i_RSA_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSA_PUBKEY_bio) #define d2i_RSA_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_RSA_PUBKEY_fp) #define d2i_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_SSL_SESSION) #define d2i_SSL_SESSION_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_SSL_SESSION_bio) #define d2i_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509) #define d2i_X509_ALGOR BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_ALGOR) #define d2i_X509_ATTRIBUTE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_ATTRIBUTE) #define d2i_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_AUX) #define d2i_X509_CERT_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CERT_AUX) #define d2i_X509_CINF BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CINF) #define d2i_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CRL) #define d2i_X509_CRL_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CRL_INFO) #define d2i_X509_CRL_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CRL_bio) #define d2i_X509_CRL_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_CRL_fp) #define d2i_X509_EXTENSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_EXTENSION) #define d2i_X509_EXTENSIONS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_EXTENSIONS) #define d2i_X509_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_NAME) #define d2i_X509_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_PUBKEY) #define d2i_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_REQ) #define d2i_X509_REQ_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_REQ_INFO) #define d2i_X509_REQ_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_REQ_bio) #define d2i_X509_REQ_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_REQ_fp) #define d2i_X509_REVOKED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_REVOKED) #define d2i_X509_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_SIG) #define d2i_X509_VAL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_VAL) #define d2i_X509_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_bio) #define d2i_X509_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, d2i_X509_fp) #define dh_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dh_asn1_meth) #define dh_check_params_fast BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dh_check_params_fast) #define dh_compute_key_padded_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dh_compute_key_padded_no_self_test) #define dh_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dh_pkey_meth) #define dsa_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dsa_asn1_meth) #define dsa_check_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, dsa_check_key) #define ec_GFp_mont_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_add) #define ec_GFp_mont_dbl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_dbl) #define ec_GFp_mont_felem_exp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_exp) #define ec_GFp_mont_felem_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_from_bytes) #define ec_GFp_mont_felem_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_mul) #define ec_GFp_mont_felem_reduce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_reduce) #define ec_GFp_mont_felem_sqr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_sqr) #define ec_GFp_mont_felem_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_felem_to_bytes) #define ec_GFp_mont_init_precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_init_precomp) #define ec_GFp_mont_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_mul) #define ec_GFp_mont_mul_base BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_mul_base) #define ec_GFp_mont_mul_batch BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_mul_batch) #define ec_GFp_mont_mul_precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_mul_precomp) #define ec_GFp_mont_mul_public_batch BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_mont_mul_public_batch) #define ec_GFp_nistp_recode_scalar_bits BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_nistp_recode_scalar_bits) #define ec_GFp_simple_cmp_x_coordinate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_cmp_x_coordinate) #define ec_GFp_simple_felem_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_felem_from_bytes) #define ec_GFp_simple_felem_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_felem_to_bytes) #define ec_GFp_simple_group_get_curve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_group_get_curve) #define ec_GFp_simple_group_set_curve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_group_set_curve) #define ec_GFp_simple_invert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_invert) #define ec_GFp_simple_is_at_infinity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_is_at_infinity) #define ec_GFp_simple_is_on_curve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_is_on_curve) #define ec_GFp_simple_point_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_point_copy) #define ec_GFp_simple_point_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_point_init) #define ec_GFp_simple_point_set_to_infinity BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_point_set_to_infinity) #define ec_GFp_simple_points_equal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_GFp_simple_points_equal) #define ec_affine_jacobian_equal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_affine_jacobian_equal) #define ec_affine_select BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_affine_select) #define ec_affine_to_jacobian BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_affine_to_jacobian) #define ec_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_asn1_meth) #define ec_bignum_to_felem BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_bignum_to_felem) #define ec_bignum_to_scalar BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_bignum_to_scalar) #define ec_cmp_x_coordinate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_cmp_x_coordinate) #define ec_compute_wNAF BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_compute_wNAF) #define ec_felem_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_add) #define ec_felem_equal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_equal) #define ec_felem_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_from_bytes) #define ec_felem_neg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_neg) #define ec_felem_non_zero_mask BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_non_zero_mask) #define ec_felem_one BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_one) #define ec_felem_select BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_select) #define ec_felem_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_sub) #define ec_felem_to_bignum BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_to_bignum) #define ec_felem_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_felem_to_bytes) #define ec_get_x_coordinate_as_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_get_x_coordinate_as_bytes) #define ec_get_x_coordinate_as_scalar BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_get_x_coordinate_as_scalar) #define ec_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_hash_to_curve_p256_xmd_sha256_sswu) #define ec_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_hash_to_curve_p384_xmd_sha384_sswu) #define ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_hash_to_curve_p384_xmd_sha512_sswu_draft07) #define ec_hash_to_scalar_p384_xmd_sha384 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_hash_to_scalar_p384_xmd_sha384) #define ec_hash_to_scalar_p384_xmd_sha512_draft07 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_hash_to_scalar_p384_xmd_sha512_draft07) #define ec_init_precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_init_precomp) #define ec_jacobian_to_affine BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_jacobian_to_affine) #define ec_jacobian_to_affine_batch BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_jacobian_to_affine_batch) #define ec_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_pkey_meth) #define ec_point_byte_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_byte_len) #define ec_point_from_uncompressed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_from_uncompressed) #define ec_point_mul_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_no_self_test) #define ec_point_mul_scalar BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar) #define ec_point_mul_scalar_base BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar_base) #define ec_point_mul_scalar_batch BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar_batch) #define ec_point_mul_scalar_precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar_precomp) #define ec_point_mul_scalar_public BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar_public) #define ec_point_mul_scalar_public_batch BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_mul_scalar_public_batch) #define ec_point_select BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_select) #define ec_point_set_affine_coordinates BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_set_affine_coordinates) #define ec_point_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_point_to_bytes) #define ec_precomp_select BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_precomp_select) #define ec_random_nonzero_scalar BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_random_nonzero_scalar) #define ec_random_scalar BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_random_scalar) #define ec_scalar_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_add) #define ec_scalar_equal_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_equal_vartime) #define ec_scalar_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_from_bytes) #define ec_scalar_from_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_from_montgomery) #define ec_scalar_inv0_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_inv0_montgomery) #define ec_scalar_is_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_is_zero) #define ec_scalar_mul_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_mul_montgomery) #define ec_scalar_neg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_neg) #define ec_scalar_reduce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_reduce) #define ec_scalar_select BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_select) #define ec_scalar_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_sub) #define ec_scalar_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_to_bytes) #define ec_scalar_to_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_to_montgomery) #define ec_scalar_to_montgomery_inv_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_scalar_to_montgomery_inv_vartime) #define ec_set_to_safe_point BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_set_to_safe_point) #define ec_simple_scalar_inv0_montgomery BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_simple_scalar_inv0_montgomery) #define ec_simple_scalar_to_montgomery_inv_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ec_simple_scalar_to_montgomery_inv_vartime) #define ecdsa_sign_fixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecdsa_sign_fixed) #define ecdsa_sign_fixed_with_nonce_for_known_answer_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecdsa_sign_fixed_with_nonce_for_known_answer_test) #define ecdsa_verify_fixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecdsa_verify_fixed) #define ecdsa_verify_fixed_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecdsa_verify_fixed_no_self_test) #define ecp_nistz256_div_by_2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_div_by_2) #define ecp_nistz256_mul_by_2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_mul_by_2) #define ecp_nistz256_mul_by_3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_mul_by_3) #define ecp_nistz256_mul_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_mul_mont) #define ecp_nistz256_mul_mont_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_mul_mont_adx) #define ecp_nistz256_mul_mont_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_mul_mont_nohw) #define ecp_nistz256_neg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_neg) #define ecp_nistz256_ord_mul_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont) #define ecp_nistz256_ord_mul_mont_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont_adx) #define ecp_nistz256_ord_mul_mont_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont_nohw) #define ecp_nistz256_ord_sqr_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont) #define ecp_nistz256_ord_sqr_mont_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont_adx) #define ecp_nistz256_ord_sqr_mont_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont_nohw) #define ecp_nistz256_point_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add) #define ecp_nistz256_point_add_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add_adx) #define ecp_nistz256_point_add_affine BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine) #define ecp_nistz256_point_add_affine_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine_adx) #define ecp_nistz256_point_add_affine_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine_nohw) #define ecp_nistz256_point_add_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_add_nohw) #define ecp_nistz256_point_double BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_double) #define ecp_nistz256_point_double_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_double_adx) #define ecp_nistz256_point_double_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_point_double_nohw) #define ecp_nistz256_select_w5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w5) #define ecp_nistz256_select_w5_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w5_avx2) #define ecp_nistz256_select_w5_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w5_nohw) #define ecp_nistz256_select_w7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w7) #define ecp_nistz256_select_w7_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w7_avx2) #define ecp_nistz256_select_w7_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_select_w7_nohw) #define ecp_nistz256_sqr_mont BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont) #define ecp_nistz256_sqr_mont_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont_adx) #define ecp_nistz256_sqr_mont_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont_nohw) #define ecp_nistz256_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ecp_nistz256_sub) #define ed25519_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ed25519_asn1_meth) #define ed25519_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ed25519_pkey_meth) #define evp_pkey_set_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, evp_pkey_set_method) #define fiat_curve25519_adx_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, fiat_curve25519_adx_mul) #define fiat_curve25519_adx_square BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, fiat_curve25519_adx_square) #define fiat_p256_adx_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, fiat_p256_adx_mul) #define fiat_p256_adx_sqr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, fiat_p256_adx_sqr) #define gcm_ghash_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_avx) #define gcm_ghash_clmul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_clmul) #define gcm_ghash_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_neon) #define gcm_ghash_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_nohw) #define gcm_ghash_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_ssse3) #define gcm_ghash_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_v8) #define gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_512) #define gcm_ghash_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx2) #define gcm_gmult_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_avx) #define gcm_gmult_clmul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_clmul) #define gcm_gmult_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_neon) #define gcm_gmult_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_nohw) #define gcm_gmult_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_ssse3) #define gcm_gmult_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_v8) #define gcm_gmult_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx10) #define gcm_gmult_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx2) #define gcm_init_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_avx) #define gcm_init_clmul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_clmul) #define gcm_init_neon BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_neon) #define gcm_init_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_nohw) #define gcm_init_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_ssse3) #define gcm_init_v8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_v8) #define gcm_init_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx10_512) #define gcm_init_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx2) #define gcm_neon_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_neon_capable) #define gcm_pmull_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, gcm_pmull_capable) #define have_fast_rdrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, have_fast_rdrand) #define have_rdrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, have_rdrand) #define hkdf_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, hkdf_pkey_meth) #define hwaes_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, hwaes_capable) #define i2a_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2a_ASN1_ENUMERATED) #define i2a_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2a_ASN1_INTEGER) #define i2a_ASN1_OBJECT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2a_ASN1_OBJECT) #define i2a_ASN1_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2a_ASN1_STRING) #define i2c_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2c_ASN1_BIT_STRING) #define i2c_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2c_ASN1_INTEGER) #define i2d_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_BIT_STRING) #define i2d_ASN1_BMPSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_BMPSTRING) #define i2d_ASN1_BOOLEAN BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_BOOLEAN) #define i2d_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_ENUMERATED) #define i2d_ASN1_GENERALIZEDTIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_GENERALIZEDTIME) #define i2d_ASN1_GENERALSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_GENERALSTRING) #define i2d_ASN1_IA5STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_IA5STRING) #define i2d_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_INTEGER) #define i2d_ASN1_NULL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_NULL) #define i2d_ASN1_OBJECT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_OBJECT) #define i2d_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_OCTET_STRING) #define i2d_ASN1_PRINTABLE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_PRINTABLE) #define i2d_ASN1_PRINTABLESTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_PRINTABLESTRING) #define i2d_ASN1_SEQUENCE_ANY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_SEQUENCE_ANY) #define i2d_ASN1_SET_ANY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_SET_ANY) #define i2d_ASN1_T61STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_T61STRING) #define i2d_ASN1_TIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_TIME) #define i2d_ASN1_TYPE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_TYPE) #define i2d_ASN1_UNIVERSALSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_UNIVERSALSTRING) #define i2d_ASN1_UTCTIME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_UTCTIME) #define i2d_ASN1_UTF8STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_UTF8STRING) #define i2d_ASN1_VISIBLESTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ASN1_VISIBLESTRING) #define i2d_AUTHORITY_INFO_ACCESS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_AUTHORITY_INFO_ACCESS) #define i2d_AUTHORITY_KEYID BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_AUTHORITY_KEYID) #define i2d_BASIC_CONSTRAINTS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_BASIC_CONSTRAINTS) #define i2d_CERTIFICATEPOLICIES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_CERTIFICATEPOLICIES) #define i2d_CRL_DIST_POINTS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_CRL_DIST_POINTS) #define i2d_DHparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DHparams) #define i2d_DHparams_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DHparams_bio) #define i2d_DIRECTORYSTRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DIRECTORYSTRING) #define i2d_DISPLAYTEXT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DISPLAYTEXT) #define i2d_DSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSAPrivateKey) #define i2d_DSAPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSAPrivateKey_bio) #define i2d_DSAPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSAPrivateKey_fp) #define i2d_DSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSAPublicKey) #define i2d_DSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSA_PUBKEY) #define i2d_DSA_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSA_PUBKEY_bio) #define i2d_DSA_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSA_PUBKEY_fp) #define i2d_DSA_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSA_SIG) #define i2d_DSAparams BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_DSAparams) #define i2d_ECDSA_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECDSA_SIG) #define i2d_ECPKParameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECPKParameters) #define i2d_ECParameters BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECParameters) #define i2d_ECPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECPrivateKey) #define i2d_ECPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECPrivateKey_bio) #define i2d_ECPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ECPrivateKey_fp) #define i2d_EC_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_EC_PUBKEY) #define i2d_EC_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_EC_PUBKEY_bio) #define i2d_EC_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_EC_PUBKEY_fp) #define i2d_EXTENDED_KEY_USAGE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_EXTENDED_KEY_USAGE) #define i2d_GENERAL_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_GENERAL_NAME) #define i2d_GENERAL_NAMES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_GENERAL_NAMES) #define i2d_ISSUING_DIST_POINT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_ISSUING_DIST_POINT) #define i2d_NETSCAPE_SPKAC BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_NETSCAPE_SPKAC) #define i2d_NETSCAPE_SPKI BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_NETSCAPE_SPKI) #define i2d_PKCS12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS12) #define i2d_PKCS12_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS12_bio) #define i2d_PKCS12_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS12_fp) #define i2d_PKCS7 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS7) #define i2d_PKCS7_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS7_bio) #define i2d_PKCS8PrivateKeyInfo_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKeyInfo_bio) #define i2d_PKCS8PrivateKeyInfo_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKeyInfo_fp) #define i2d_PKCS8PrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_bio) #define i2d_PKCS8PrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_fp) #define i2d_PKCS8PrivateKey_nid_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_nid_bio) #define i2d_PKCS8PrivateKey_nid_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_nid_fp) #define i2d_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO) #define i2d_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO_bio) #define i2d_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO_fp) #define i2d_PKCS8_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8_bio) #define i2d_PKCS8_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PKCS8_fp) #define i2d_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PUBKEY) #define i2d_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PUBKEY_bio) #define i2d_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PUBKEY_fp) #define i2d_PrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PrivateKey) #define i2d_PrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PrivateKey_bio) #define i2d_PrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PrivateKey_fp) #define i2d_PublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_PublicKey) #define i2d_RSAPrivateKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPrivateKey) #define i2d_RSAPrivateKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPrivateKey_bio) #define i2d_RSAPrivateKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPrivateKey_fp) #define i2d_RSAPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPublicKey) #define i2d_RSAPublicKey_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPublicKey_bio) #define i2d_RSAPublicKey_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSAPublicKey_fp) #define i2d_RSA_PSS_PARAMS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSA_PSS_PARAMS) #define i2d_RSA_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSA_PUBKEY) #define i2d_RSA_PUBKEY_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSA_PUBKEY_bio) #define i2d_RSA_PUBKEY_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_RSA_PUBKEY_fp) #define i2d_SSL_SESSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_SSL_SESSION) #define i2d_SSL_SESSION_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_SSL_SESSION_bio) #define i2d_X509 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509) #define i2d_X509_ALGOR BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_ALGOR) #define i2d_X509_ATTRIBUTE BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_ATTRIBUTE) #define i2d_X509_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_AUX) #define i2d_X509_CERT_AUX BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CERT_AUX) #define i2d_X509_CINF BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CINF) #define i2d_X509_CRL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CRL) #define i2d_X509_CRL_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CRL_INFO) #define i2d_X509_CRL_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CRL_bio) #define i2d_X509_CRL_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CRL_fp) #define i2d_X509_CRL_tbs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_CRL_tbs) #define i2d_X509_EXTENSION BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_EXTENSION) #define i2d_X509_EXTENSIONS BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_EXTENSIONS) #define i2d_X509_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_NAME) #define i2d_X509_PUBKEY BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_PUBKEY) #define i2d_X509_REQ BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_REQ) #define i2d_X509_REQ_INFO BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_REQ_INFO) #define i2d_X509_REQ_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_REQ_bio) #define i2d_X509_REQ_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_REQ_fp) #define i2d_X509_REVOKED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_REVOKED) #define i2d_X509_SIG BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_SIG) #define i2d_X509_VAL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_VAL) #define i2d_X509_bio BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_bio) #define i2d_X509_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_fp) #define i2d_X509_tbs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_X509_tbs) #define i2d_re_X509_CRL_tbs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_re_X509_CRL_tbs) #define i2d_re_X509_REQ_tbs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_re_X509_REQ_tbs) #define i2d_re_X509_tbs BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2d_re_X509_tbs) #define i2o_ECPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2o_ECPublicKey) #define i2s_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2s_ASN1_ENUMERATED) #define i2s_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2s_ASN1_INTEGER) #define i2s_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2s_ASN1_OCTET_STRING) #define i2t_ASN1_OBJECT BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2t_ASN1_OBJECT) #define i2v_GENERAL_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2v_GENERAL_NAME) #define i2v_GENERAL_NAMES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, i2v_GENERAL_NAMES) #define k25519Precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, k25519Precomp) #define kBoringSSLRSASqrtTwo BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, kBoringSSLRSASqrtTwo) #define kBoringSSLRSASqrtTwoLen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, kBoringSSLRSASqrtTwoLen) #define kOpenSSLReasonStringData BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, kOpenSSLReasonStringData) #define kOpenSSLReasonValues BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, kOpenSSLReasonValues) #define kOpenSSLReasonValuesLen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, kOpenSSLReasonValuesLen) #define lh_CONF_SECTION_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_cmp_func) #define lh_CONF_SECTION_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_doall_arg) #define lh_CONF_SECTION_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_hash_func) #define lh_CONF_SECTION_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_doall_arg) #define lh_CONF_SECTION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_free) #define lh_CONF_SECTION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_insert) #define lh_CONF_SECTION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_new) #define lh_CONF_SECTION_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_retrieve) #define lh_CONF_VALUE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_cmp_func) #define lh_CONF_VALUE_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_doall_arg) #define lh_CONF_VALUE_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_hash_func) #define lh_CONF_VALUE_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_doall_arg) #define lh_CONF_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_free) #define lh_CONF_VALUE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_insert) #define lh_CONF_VALUE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_new) #define lh_CONF_VALUE_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_retrieve) #define lh_CRYPTO_BUFFER_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_cmp_func) #define lh_CRYPTO_BUFFER_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_hash_func) #define lh_CRYPTO_BUFFER_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_delete) #define lh_CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_free) #define lh_CRYPTO_BUFFER_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_insert) #define lh_CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_new) #define lh_CRYPTO_BUFFER_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_num_items) #define lh_CRYPTO_BUFFER_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_retrieve) #define md5_block_asm_data_order BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, md5_block_asm_data_order) #define o2i_ECPublicKey BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, o2i_ECPublicKey) #define pkcs12_iterations_acceptable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs12_iterations_acceptable) #define pkcs12_key_gen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs12_key_gen) #define pkcs12_pbe_encrypt_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs12_pbe_encrypt_init) #define pkcs7_add_signed_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs7_add_signed_data) #define pkcs7_parse_header BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs7_parse_header) #define pkcs8_pbe_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pkcs8_pbe_decrypt) #define pmbtoken_exp1_blind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_blind) #define pmbtoken_exp1_client_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_client_key_from_bytes) #define pmbtoken_exp1_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_derive_key_from_secret) #define pmbtoken_exp1_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_generate_key) #define pmbtoken_exp1_get_h_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_get_h_for_testing) #define pmbtoken_exp1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_issuer_key_from_bytes) #define pmbtoken_exp1_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_read) #define pmbtoken_exp1_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_sign) #define pmbtoken_exp1_unblind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp1_unblind) #define pmbtoken_exp2_blind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_blind) #define pmbtoken_exp2_client_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_client_key_from_bytes) #define pmbtoken_exp2_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_derive_key_from_secret) #define pmbtoken_exp2_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_generate_key) #define pmbtoken_exp2_get_h_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_get_h_for_testing) #define pmbtoken_exp2_issuer_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_issuer_key_from_bytes) #define pmbtoken_exp2_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_read) #define pmbtoken_exp2_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_sign) #define pmbtoken_exp2_unblind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_exp2_unblind) #define pmbtoken_pst1_blind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_blind) #define pmbtoken_pst1_client_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_client_key_from_bytes) #define pmbtoken_pst1_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_derive_key_from_secret) #define pmbtoken_pst1_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_generate_key) #define pmbtoken_pst1_get_h_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_get_h_for_testing) #define pmbtoken_pst1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_issuer_key_from_bytes) #define pmbtoken_pst1_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_read) #define pmbtoken_pst1_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_sign) #define pmbtoken_pst1_unblind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, pmbtoken_pst1_unblind) #define poly_Rq_mul BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, poly_Rq_mul) #define rand_fork_unsafe_buffering_enabled BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rand_fork_unsafe_buffering_enabled) #define rsa_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_asn1_meth) #define rsa_check_public_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_check_public_key) #define rsa_default_private_transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_default_private_transform) #define rsa_default_sign_raw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_default_sign_raw) #define rsa_invalidate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_invalidate_key) #define rsa_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_pkey_meth) #define rsa_private_transform BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_private_transform) #define rsa_private_transform_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_private_transform_no_self_test) #define rsa_sign_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_sign_no_self_test) #define rsa_verify_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_verify_no_self_test) #define rsa_verify_raw_no_self_test BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsa_verify_raw_no_self_test) #define rsaz_1024_gather5_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_gather5_avx2) #define rsaz_1024_mul_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_mul_avx2) #define rsaz_1024_norm2red_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_norm2red_avx2) #define rsaz_1024_red2norm_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_red2norm_avx2) #define rsaz_1024_scatter5_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_scatter5_avx2) #define rsaz_1024_sqr_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_1024_sqr_avx2) #define rsaz_avx2_preferred BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, rsaz_avx2_preferred) #define s2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, s2i_ASN1_INTEGER) #define s2i_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, s2i_ASN1_OCTET_STRING) #define sha1_avx2_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_avx2_capable) #define sha1_avx_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_avx_capable) #define sha1_block_data_order_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_block_data_order_avx) #define sha1_block_data_order_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_block_data_order_avx2) #define sha1_block_data_order_hw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_block_data_order_hw) #define sha1_block_data_order_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_block_data_order_nohw) #define sha1_block_data_order_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_block_data_order_ssse3) #define sha1_hw_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_hw_capable) #define sha1_ssse3_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha1_ssse3_capable) #define sha256_avx_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_avx_capable) #define sha256_block_data_order_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_block_data_order_avx) #define sha256_block_data_order_hw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_block_data_order_hw) #define sha256_block_data_order_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_block_data_order_nohw) #define sha256_block_data_order_ssse3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_block_data_order_ssse3) #define sha256_hw_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_hw_capable) #define sha256_ssse3_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha256_ssse3_capable) #define sha512_avx_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha512_avx_capable) #define sha512_block_data_order_avx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha512_block_data_order_avx) #define sha512_block_data_order_hw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha512_block_data_order_hw) #define sha512_block_data_order_nohw BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha512_block_data_order_nohw) #define sha512_hw_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sha512_hw_capable) #define sk_ACCESS_DESCRIPTION_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_call_free_func) #define sk_ACCESS_DESCRIPTION_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_new_null) #define sk_ACCESS_DESCRIPTION_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_num) #define sk_ACCESS_DESCRIPTION_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_pop_free) #define sk_ACCESS_DESCRIPTION_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_push) #define sk_ACCESS_DESCRIPTION_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_value) #define sk_ASN1_INTEGER_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_num) #define sk_ASN1_INTEGER_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_push) #define sk_ASN1_INTEGER_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_value) #define sk_ASN1_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_cmp_func) #define sk_ASN1_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_copy_func) #define sk_ASN1_OBJECT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_free_func) #define sk_ASN1_OBJECT_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_deep_copy) #define sk_ASN1_OBJECT_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_dup) #define sk_ASN1_OBJECT_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_find) #define sk_ASN1_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_free) #define sk_ASN1_OBJECT_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_is_sorted) #define sk_ASN1_OBJECT_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_new_null) #define sk_ASN1_OBJECT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_num) #define sk_ASN1_OBJECT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_pop_free) #define sk_ASN1_OBJECT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_push) #define sk_ASN1_OBJECT_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_set_cmp_func) #define sk_ASN1_OBJECT_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_sort) #define sk_ASN1_OBJECT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_value) #define sk_ASN1_TYPE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_num) #define sk_ASN1_TYPE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_push) #define sk_ASN1_TYPE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_value) #define sk_ASN1_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_free) #define sk_ASN1_VALUE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_new_null) #define sk_ASN1_VALUE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_num) #define sk_ASN1_VALUE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_pop) #define sk_ASN1_VALUE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_push) #define sk_ASN1_VALUE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_value) #define sk_CONF_VALUE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_call_free_func) #define sk_CONF_VALUE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_delete_ptr) #define sk_CONF_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_free) #define sk_CONF_VALUE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_new_null) #define sk_CONF_VALUE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_num) #define sk_CONF_VALUE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_pop) #define sk_CONF_VALUE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_pop_free) #define sk_CONF_VALUE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_push) #define sk_CONF_VALUE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_value) #define sk_CRYPTO_BUFFER_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_copy_func) #define sk_CRYPTO_BUFFER_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_free_func) #define sk_CRYPTO_BUFFER_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_deep_copy) #define sk_CRYPTO_BUFFER_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_new_null) #define sk_CRYPTO_BUFFER_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_num) #define sk_CRYPTO_BUFFER_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop) #define sk_CRYPTO_BUFFER_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop_free) #define sk_CRYPTO_BUFFER_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_push) #define sk_CRYPTO_BUFFER_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_set) #define sk_CRYPTO_BUFFER_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_value) #define sk_DIST_POINT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_call_free_func) #define sk_DIST_POINT_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_new_null) #define sk_DIST_POINT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_num) #define sk_DIST_POINT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_pop_free) #define sk_DIST_POINT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_push) #define sk_DIST_POINT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_value) #define sk_GENERAL_NAME_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_call_free_func) #define sk_GENERAL_NAME_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_new_null) #define sk_GENERAL_NAME_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_num) #define sk_GENERAL_NAME_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_pop_free) #define sk_GENERAL_NAME_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_push) #define sk_GENERAL_NAME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_set) #define sk_GENERAL_NAME_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_value) #define sk_GENERAL_SUBTREE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_new_null) #define sk_GENERAL_SUBTREE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_num) #define sk_GENERAL_SUBTREE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_push) #define sk_GENERAL_SUBTREE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_value) #define sk_OPENSSL_STRING_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_cmp_func) #define sk_OPENSSL_STRING_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_copy_func) #define sk_OPENSSL_STRING_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_free_func) #define sk_OPENSSL_STRING_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_deep_copy) #define sk_OPENSSL_STRING_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_find) #define sk_OPENSSL_STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_free) #define sk_OPENSSL_STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new) #define sk_OPENSSL_STRING_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new_null) #define sk_OPENSSL_STRING_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_num) #define sk_OPENSSL_STRING_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_pop_free) #define sk_OPENSSL_STRING_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_push) #define sk_OPENSSL_STRING_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_sort) #define sk_OPENSSL_STRING_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_value) #define sk_POLICYINFO_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_call_cmp_func) #define sk_POLICYINFO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_call_free_func) #define sk_POLICYINFO_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_find) #define sk_POLICYINFO_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_is_sorted) #define sk_POLICYINFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_new_null) #define sk_POLICYINFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_num) #define sk_POLICYINFO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_pop_free) #define sk_POLICYINFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_push) #define sk_POLICYINFO_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_set_cmp_func) #define sk_POLICYINFO_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_sort) #define sk_POLICYINFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_value) #define sk_POLICYQUALINFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_new_null) #define sk_POLICYQUALINFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_num) #define sk_POLICYQUALINFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_push) #define sk_POLICYQUALINFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_value) #define sk_POLICY_MAPPING_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_cmp_func) #define sk_POLICY_MAPPING_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_free_func) #define sk_POLICY_MAPPING_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_find) #define sk_POLICY_MAPPING_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_is_sorted) #define sk_POLICY_MAPPING_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_new_null) #define sk_POLICY_MAPPING_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_num) #define sk_POLICY_MAPPING_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_pop_free) #define sk_POLICY_MAPPING_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_push) #define sk_POLICY_MAPPING_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_set_cmp_func) #define sk_POLICY_MAPPING_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_sort) #define sk_POLICY_MAPPING_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_value) #define sk_SRTP_PROTECTION_PROFILE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_new_null) #define sk_SRTP_PROTECTION_PROFILE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_num) #define sk_SRTP_PROTECTION_PROFILE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_push) #define sk_SSL_CIPHER_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_call_cmp_func) #define sk_SSL_CIPHER_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_delete) #define sk_SSL_CIPHER_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_dup) #define sk_SSL_CIPHER_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_find) #define sk_SSL_CIPHER_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_new_null) #define sk_SSL_CIPHER_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_num) #define sk_SSL_CIPHER_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_push) #define sk_SSL_CIPHER_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_CIPHER_value) #define sk_TRUST_TOKEN_PRETOKEN_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_call_free_func) #define sk_TRUST_TOKEN_PRETOKEN_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_new_null) #define sk_TRUST_TOKEN_PRETOKEN_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_num) #define sk_TRUST_TOKEN_PRETOKEN_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_pop_free) #define sk_TRUST_TOKEN_PRETOKEN_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_push) #define sk_TRUST_TOKEN_PRETOKEN_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_value) #define sk_TRUST_TOKEN_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_call_free_func) #define sk_TRUST_TOKEN_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_new_null) #define sk_TRUST_TOKEN_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_pop_free) #define sk_TRUST_TOKEN_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_push) #define sk_X509_ATTRIBUTE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_delete) #define sk_X509_ATTRIBUTE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_new_null) #define sk_X509_ATTRIBUTE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_num) #define sk_X509_ATTRIBUTE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_push) #define sk_X509_ATTRIBUTE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_value) #define sk_X509_CRL_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_call_free_func) #define sk_X509_CRL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_free) #define sk_X509_CRL_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_new_null) #define sk_X509_CRL_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_num) #define sk_X509_CRL_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_pop) #define sk_X509_CRL_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_pop_free) #define sk_X509_CRL_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_push) #define sk_X509_CRL_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_value) #define sk_X509_EXTENSION_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_call_free_func) #define sk_X509_EXTENSION_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_delete) #define sk_X509_EXTENSION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_free) #define sk_X509_EXTENSION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_insert) #define sk_X509_EXTENSION_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_new_null) #define sk_X509_EXTENSION_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_num) #define sk_X509_EXTENSION_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_pop_free) #define sk_X509_EXTENSION_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_push) #define sk_X509_EXTENSION_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_set) #define sk_X509_EXTENSION_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_value) #define sk_X509_INFO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_call_free_func) #define sk_X509_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_free) #define sk_X509_INFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_new_null) #define sk_X509_INFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_num) #define sk_X509_INFO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_pop) #define sk_X509_INFO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_pop_free) #define sk_X509_INFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_push) #define sk_X509_INFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_value) #define sk_X509_LOOKUP_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_call_free_func) #define sk_X509_LOOKUP_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_new_null) #define sk_X509_LOOKUP_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_num) #define sk_X509_LOOKUP_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_pop_free) #define sk_X509_LOOKUP_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_push) #define sk_X509_LOOKUP_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_value) #define sk_X509_NAME_ENTRY_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_call_free_func) #define sk_X509_NAME_ENTRY_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_delete) #define sk_X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_free) #define sk_X509_NAME_ENTRY_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_insert) #define sk_X509_NAME_ENTRY_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_new_null) #define sk_X509_NAME_ENTRY_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_num) #define sk_X509_NAME_ENTRY_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_pop_free) #define sk_X509_NAME_ENTRY_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_push) #define sk_X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_set) #define sk_X509_NAME_ENTRY_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_value) #define sk_X509_NAME_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_cmp_func) #define sk_X509_NAME_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_copy_func) #define sk_X509_NAME_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_free_func) #define sk_X509_NAME_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_deep_copy) #define sk_X509_NAME_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_find) #define sk_X509_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_new) #define sk_X509_NAME_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_new_null) #define sk_X509_NAME_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_num) #define sk_X509_NAME_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_pop_free) #define sk_X509_NAME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_set) #define sk_X509_NAME_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_set_cmp_func) #define sk_X509_NAME_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_sort) #define sk_X509_NAME_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_value) #define sk_X509_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_cmp_func) #define sk_X509_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_copy_func) #define sk_X509_OBJECT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_free_func) #define sk_X509_OBJECT_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_deep_copy) #define sk_X509_OBJECT_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_find) #define sk_X509_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_new) #define sk_X509_OBJECT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_num) #define sk_X509_OBJECT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_pop_free) #define sk_X509_OBJECT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_push) #define sk_X509_OBJECT_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_sort) #define sk_X509_OBJECT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_value) #define sk_X509_REVOKED_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_call_cmp_func) #define sk_X509_REVOKED_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_find) #define sk_X509_REVOKED_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_is_sorted) #define sk_X509_REVOKED_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_new) #define sk_X509_REVOKED_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_num) #define sk_X509_REVOKED_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_push) #define sk_X509_REVOKED_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_set_cmp_func) #define sk_X509_REVOKED_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_sort) #define sk_X509_REVOKED_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_value) #define sk_X509_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_call_free_func) #define sk_X509_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_delete) #define sk_X509_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_delete_ptr) #define sk_X509_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_dup) #define sk_X509_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_free) #define sk_X509_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_new_null) #define sk_X509_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_num) #define sk_X509_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_pop) #define sk_X509_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_pop_free) #define sk_X509_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_push) #define sk_X509_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_set) #define sk_X509_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_shift) #define sk_X509_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_value) #define sk_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_free) #define sk_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_new_null) #define sk_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_num) #define sk_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_pop) #define sk_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_pop_free) #define sk_pop_free_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_pop_free_ex) #define sk_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_push) #define sk_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_value) #define sk_void_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_free) #define sk_void_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_new_null) #define sk_void_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_num) #define sk_void_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_push) #define sk_void_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_set) #define sk_void_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_value) #define slhdsa_copy_keypair_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_copy_keypair_addr) #define slhdsa_fors_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_fors_pk_from_sig) #define slhdsa_fors_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_fors_sign) #define slhdsa_fors_sk_gen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_fors_sk_gen) #define slhdsa_fors_treehash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_fors_treehash) #define slhdsa_get_tree_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_get_tree_index) #define slhdsa_ht_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_ht_sign) #define slhdsa_ht_verify BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_ht_verify) #define slhdsa_set_chain_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_chain_addr) #define slhdsa_set_hash_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_hash_addr) #define slhdsa_set_keypair_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_keypair_addr) #define slhdsa_set_layer_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_layer_addr) #define slhdsa_set_tree_addr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_tree_addr) #define slhdsa_set_tree_height BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_tree_height) #define slhdsa_set_tree_index BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_tree_index) #define slhdsa_set_type BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_set_type) #define slhdsa_thash_f BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_f) #define slhdsa_thash_h BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_h) #define slhdsa_thash_hmsg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_hmsg) #define slhdsa_thash_prf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_prf) #define slhdsa_thash_prfmsg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_prfmsg) #define slhdsa_thash_tk BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_tk) #define slhdsa_thash_tl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_thash_tl) #define slhdsa_treehash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_treehash) #define slhdsa_wots_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_wots_pk_from_sig) #define slhdsa_wots_pk_gen BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_wots_pk_gen) #define slhdsa_wots_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_wots_sign) #define slhdsa_xmss_pk_from_sig BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_xmss_pk_from_sig) #define slhdsa_xmss_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, slhdsa_xmss_sign) #define v2i_GENERAL_NAME BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAME) #define v2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAMES) #define v2i_GENERAL_NAME_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v2i_GENERAL_NAME_ex) #define v3_akey_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_akey_id) #define v3_alt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_alt) #define v3_bcons BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_bcons) #define v3_cpols BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_cpols) #define v3_crl_invdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_crl_invdate) #define v3_crl_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_crl_num) #define v3_crl_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_crl_reason) #define v3_crld BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_crld) #define v3_delta_crl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_delta_crl) #define v3_ext_ku BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_ext_ku) #define v3_freshest_crl BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_freshest_crl) #define v3_idp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_idp) #define v3_info BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_info) #define v3_inhibit_anyp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_inhibit_anyp) #define v3_key_usage BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_key_usage) #define v3_name_constraints BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_name_constraints) #define v3_ns_ia5_list BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_ns_ia5_list) #define v3_nscert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_nscert) #define v3_ocsp_accresp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_ocsp_accresp) #define v3_ocsp_nocheck BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_ocsp_nocheck) #define v3_policy_constraints BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_policy_constraints) #define v3_policy_mappings BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_policy_mappings) #define v3_sinfo BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_sinfo) #define v3_skey_id BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, v3_skey_id) #define voprf_exp2_blind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_blind) #define voprf_exp2_client_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_client_key_from_bytes) #define voprf_exp2_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_derive_key_from_secret) #define voprf_exp2_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_generate_key) #define voprf_exp2_issuer_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_issuer_key_from_bytes) #define voprf_exp2_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_read) #define voprf_exp2_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_sign) #define voprf_exp2_unblind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_exp2_unblind) #define voprf_pst1_blind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_blind) #define voprf_pst1_client_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_client_key_from_bytes) #define voprf_pst1_derive_key_from_secret BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_derive_key_from_secret) #define voprf_pst1_generate_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_generate_key) #define voprf_pst1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_issuer_key_from_bytes) #define voprf_pst1_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_read) #define voprf_pst1_sign BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_sign) #define voprf_pst1_sign_with_proof_scalar_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_sign_with_proof_scalar_for_testing) #define voprf_pst1_unblind BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, voprf_pst1_unblind) #define vpaes_capable BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_capable) #define vpaes_cbc_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_cbc_encrypt) #define vpaes_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_ctr32_encrypt_blocks) #define vpaes_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_decrypt) #define vpaes_decrypt_key_to_bsaes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_decrypt_key_to_bsaes) #define vpaes_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_encrypt) #define vpaes_set_decrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_set_decrypt_key) #define vpaes_set_encrypt_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, vpaes_set_encrypt_key) #define x25519_asn1_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_asn1_meth) #define x25519_ge_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_add) #define x25519_ge_frombytes_vartime BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_frombytes_vartime) #define x25519_ge_p1p1_to_p2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_p1p1_to_p2) #define x25519_ge_p1p1_to_p3 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_p1p1_to_p3) #define x25519_ge_p3_to_cached BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_p3_to_cached) #define x25519_ge_scalarmult BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_scalarmult) #define x25519_ge_scalarmult_base BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_scalarmult_base) #define x25519_ge_scalarmult_base_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_scalarmult_base_adx) #define x25519_ge_scalarmult_small_precomp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_scalarmult_small_precomp) #define x25519_ge_sub BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_sub) #define x25519_ge_tobytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_ge_tobytes) #define x25519_pkey_meth BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_pkey_meth) #define x25519_sc_reduce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_sc_reduce) #define x25519_scalar_mult_adx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x25519_scalar_mult_adx) #define x509V3_add_value_asn1_string BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509V3_add_value_asn1_string) #define x509_check_issued_with_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_check_issued_with_callback) #define x509_digest_sign_algorithm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_digest_sign_algorithm) #define x509_digest_verify_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_digest_verify_init) #define x509_print_rsa_pss_params BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_print_rsa_pss_params) #define x509_rsa_ctx_to_pss BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_rsa_ctx_to_pss) #define x509_rsa_pss_to_ctx BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509_rsa_pss_to_ctx) #define x509v3_a2i_ipadd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_a2i_ipadd) #define x509v3_bytes_to_hex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_bytes_to_hex) #define x509v3_cache_extensions BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_cache_extensions) #define x509v3_conf_name_matches BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_conf_name_matches) #define x509v3_hex_to_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_hex_to_bytes) #define x509v3_looks_like_dns_name BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, x509v3_looks_like_dns_name) #define sk_TRUST_TOKEN_PRETOKEN_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_call_free_func) #define sk_TRUST_TOKEN_PRETOKEN_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_call_copy_func) #define sk_TRUST_TOKEN_PRETOKEN_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_call_cmp_func) #define sk_TRUST_TOKEN_PRETOKEN_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_new) #define sk_TRUST_TOKEN_PRETOKEN_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_new_null) #define sk_TRUST_TOKEN_PRETOKEN_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_num) #define sk_TRUST_TOKEN_PRETOKEN_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_zero) #define sk_TRUST_TOKEN_PRETOKEN_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_value) #define sk_TRUST_TOKEN_PRETOKEN_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_set) #define sk_TRUST_TOKEN_PRETOKEN_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_free) #define sk_TRUST_TOKEN_PRETOKEN_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_pop_free) #define sk_TRUST_TOKEN_PRETOKEN_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_insert) #define sk_TRUST_TOKEN_PRETOKEN_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_delete) #define sk_TRUST_TOKEN_PRETOKEN_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_delete_ptr) #define sk_TRUST_TOKEN_PRETOKEN_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_find) #define sk_TRUST_TOKEN_PRETOKEN_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_shift) #define sk_TRUST_TOKEN_PRETOKEN_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_push) #define sk_TRUST_TOKEN_PRETOKEN_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_pop) #define sk_TRUST_TOKEN_PRETOKEN_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_dup) #define sk_TRUST_TOKEN_PRETOKEN_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_sort) #define sk_TRUST_TOKEN_PRETOKEN_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_is_sorted) #define sk_TRUST_TOKEN_PRETOKEN_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_set_cmp_func) #define sk_TRUST_TOKEN_PRETOKEN_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_deep_copy) #define sk_BIGNUM_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_call_free_func) #define sk_BIGNUM_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_call_copy_func) #define sk_BIGNUM_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_call_cmp_func) #define sk_BIGNUM_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_new) #define sk_BIGNUM_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_new_null) #define sk_BIGNUM_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_num) #define sk_BIGNUM_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_zero) #define sk_BIGNUM_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_value) #define sk_BIGNUM_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_set) #define sk_BIGNUM_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_free) #define sk_BIGNUM_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_pop_free) #define sk_BIGNUM_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_insert) #define sk_BIGNUM_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_delete) #define sk_BIGNUM_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_delete_ptr) #define sk_BIGNUM_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_find) #define sk_BIGNUM_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_shift) #define sk_BIGNUM_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_push) #define sk_BIGNUM_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_pop) #define sk_BIGNUM_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_dup) #define sk_BIGNUM_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_sort) #define sk_BIGNUM_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_is_sorted) #define sk_BIGNUM_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_set_cmp_func) #define sk_BIGNUM_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIGNUM_deep_copy) #define sk_X509_LOOKUP_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_call_free_func) #define sk_X509_LOOKUP_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_call_copy_func) #define sk_X509_LOOKUP_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_call_cmp_func) #define sk_X509_LOOKUP_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_new) #define sk_X509_LOOKUP_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_new_null) #define sk_X509_LOOKUP_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_num) #define sk_X509_LOOKUP_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_zero) #define sk_X509_LOOKUP_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_value) #define sk_X509_LOOKUP_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_set) #define sk_X509_LOOKUP_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_free) #define sk_X509_LOOKUP_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_pop_free) #define sk_X509_LOOKUP_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_insert) #define sk_X509_LOOKUP_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_delete) #define sk_X509_LOOKUP_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_delete_ptr) #define sk_X509_LOOKUP_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_find) #define sk_X509_LOOKUP_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_shift) #define sk_X509_LOOKUP_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_push) #define sk_X509_LOOKUP_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_pop) #define sk_X509_LOOKUP_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_dup) #define sk_X509_LOOKUP_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_sort) #define sk_X509_LOOKUP_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_is_sorted) #define sk_X509_LOOKUP_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_set_cmp_func) #define sk_X509_LOOKUP_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_LOOKUP_deep_copy) #define sk_BY_DIR_HASH_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_call_free_func) #define sk_BY_DIR_HASH_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_call_copy_func) #define sk_BY_DIR_HASH_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_call_cmp_func) #define sk_BY_DIR_HASH_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_new) #define sk_BY_DIR_HASH_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_new_null) #define sk_BY_DIR_HASH_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_num) #define sk_BY_DIR_HASH_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_zero) #define sk_BY_DIR_HASH_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_value) #define sk_BY_DIR_HASH_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_set) #define sk_BY_DIR_HASH_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_free) #define sk_BY_DIR_HASH_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_pop_free) #define sk_BY_DIR_HASH_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_insert) #define sk_BY_DIR_HASH_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_delete) #define sk_BY_DIR_HASH_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_delete_ptr) #define sk_BY_DIR_HASH_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_find) #define sk_BY_DIR_HASH_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_shift) #define sk_BY_DIR_HASH_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_push) #define sk_BY_DIR_HASH_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_pop) #define sk_BY_DIR_HASH_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_dup) #define sk_BY_DIR_HASH_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_sort) #define sk_BY_DIR_HASH_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_is_sorted) #define sk_BY_DIR_HASH_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_set_cmp_func) #define sk_BY_DIR_HASH_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_HASH_deep_copy) #define sk_BY_DIR_ENTRY_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_call_free_func) #define sk_BY_DIR_ENTRY_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_call_copy_func) #define sk_BY_DIR_ENTRY_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_call_cmp_func) #define sk_BY_DIR_ENTRY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_new) #define sk_BY_DIR_ENTRY_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_new_null) #define sk_BY_DIR_ENTRY_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_num) #define sk_BY_DIR_ENTRY_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_zero) #define sk_BY_DIR_ENTRY_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_value) #define sk_BY_DIR_ENTRY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_set) #define sk_BY_DIR_ENTRY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_free) #define sk_BY_DIR_ENTRY_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_pop_free) #define sk_BY_DIR_ENTRY_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_insert) #define sk_BY_DIR_ENTRY_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_delete) #define sk_BY_DIR_ENTRY_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_delete_ptr) #define sk_BY_DIR_ENTRY_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_find) #define sk_BY_DIR_ENTRY_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_shift) #define sk_BY_DIR_ENTRY_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_push) #define sk_BY_DIR_ENTRY_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_pop) #define sk_BY_DIR_ENTRY_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_dup) #define sk_BY_DIR_ENTRY_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_sort) #define sk_BY_DIR_ENTRY_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_is_sorted) #define sk_BY_DIR_ENTRY_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_set_cmp_func) #define sk_BY_DIR_ENTRY_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BY_DIR_ENTRY_deep_copy) #define sk_X509V3_EXT_METHOD_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_call_free_func) #define sk_X509V3_EXT_METHOD_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_call_copy_func) #define sk_X509V3_EXT_METHOD_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_call_cmp_func) #define sk_X509V3_EXT_METHOD_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_new) #define sk_X509V3_EXT_METHOD_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_new_null) #define sk_X509V3_EXT_METHOD_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_num) #define sk_X509V3_EXT_METHOD_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_zero) #define sk_X509V3_EXT_METHOD_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_value) #define sk_X509V3_EXT_METHOD_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_set) #define sk_X509V3_EXT_METHOD_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_free) #define sk_X509V3_EXT_METHOD_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_pop_free) #define sk_X509V3_EXT_METHOD_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_insert) #define sk_X509V3_EXT_METHOD_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_delete) #define sk_X509V3_EXT_METHOD_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_delete_ptr) #define sk_X509V3_EXT_METHOD_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_find) #define sk_X509V3_EXT_METHOD_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_shift) #define sk_X509V3_EXT_METHOD_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_push) #define sk_X509V3_EXT_METHOD_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_pop) #define sk_X509V3_EXT_METHOD_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_dup) #define sk_X509V3_EXT_METHOD_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_sort) #define sk_X509V3_EXT_METHOD_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_is_sorted) #define sk_X509V3_EXT_METHOD_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_set_cmp_func) #define sk_X509V3_EXT_METHOD_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509V3_EXT_METHOD_deep_copy) #define sk_X509_POLICY_NODE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_call_free_func) #define sk_X509_POLICY_NODE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_call_copy_func) #define sk_X509_POLICY_NODE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_call_cmp_func) #define sk_X509_POLICY_NODE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_new) #define sk_X509_POLICY_NODE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_new_null) #define sk_X509_POLICY_NODE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_num) #define sk_X509_POLICY_NODE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_zero) #define sk_X509_POLICY_NODE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_value) #define sk_X509_POLICY_NODE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_set) #define sk_X509_POLICY_NODE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_free) #define sk_X509_POLICY_NODE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_pop_free) #define sk_X509_POLICY_NODE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_insert) #define sk_X509_POLICY_NODE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_delete) #define sk_X509_POLICY_NODE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_delete_ptr) #define sk_X509_POLICY_NODE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_find) #define sk_X509_POLICY_NODE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_shift) #define sk_X509_POLICY_NODE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_push) #define sk_X509_POLICY_NODE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_pop) #define sk_X509_POLICY_NODE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_dup) #define sk_X509_POLICY_NODE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_sort) #define sk_X509_POLICY_NODE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_is_sorted) #define sk_X509_POLICY_NODE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_set_cmp_func) #define sk_X509_POLICY_NODE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_NODE_deep_copy) #define sk_X509_POLICY_LEVEL_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_call_free_func) #define sk_X509_POLICY_LEVEL_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_call_copy_func) #define sk_X509_POLICY_LEVEL_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_call_cmp_func) #define sk_X509_POLICY_LEVEL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_new) #define sk_X509_POLICY_LEVEL_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_new_null) #define sk_X509_POLICY_LEVEL_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_num) #define sk_X509_POLICY_LEVEL_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_zero) #define sk_X509_POLICY_LEVEL_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_value) #define sk_X509_POLICY_LEVEL_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_set) #define sk_X509_POLICY_LEVEL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_free) #define sk_X509_POLICY_LEVEL_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_pop_free) #define sk_X509_POLICY_LEVEL_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_insert) #define sk_X509_POLICY_LEVEL_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_delete) #define sk_X509_POLICY_LEVEL_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_delete_ptr) #define sk_X509_POLICY_LEVEL_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_find) #define sk_X509_POLICY_LEVEL_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_shift) #define sk_X509_POLICY_LEVEL_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_push) #define sk_X509_POLICY_LEVEL_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_pop) #define sk_X509_POLICY_LEVEL_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_dup) #define sk_X509_POLICY_LEVEL_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_sort) #define sk_X509_POLICY_LEVEL_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_is_sorted) #define sk_X509_POLICY_LEVEL_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_set_cmp_func) #define sk_X509_POLICY_LEVEL_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_POLICY_LEVEL_deep_copy) #define sk_STACK_OF_X509_NAME_ENTRY_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_call_free_func) #define sk_STACK_OF_X509_NAME_ENTRY_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_call_copy_func) #define sk_STACK_OF_X509_NAME_ENTRY_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_call_cmp_func) #define sk_STACK_OF_X509_NAME_ENTRY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_new) #define sk_STACK_OF_X509_NAME_ENTRY_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_new_null) #define sk_STACK_OF_X509_NAME_ENTRY_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_num) #define sk_STACK_OF_X509_NAME_ENTRY_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_zero) #define sk_STACK_OF_X509_NAME_ENTRY_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_value) #define sk_STACK_OF_X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_set) #define sk_STACK_OF_X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_free) #define sk_STACK_OF_X509_NAME_ENTRY_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_pop_free) #define sk_STACK_OF_X509_NAME_ENTRY_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_insert) #define sk_STACK_OF_X509_NAME_ENTRY_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_delete) #define sk_STACK_OF_X509_NAME_ENTRY_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_delete_ptr) #define sk_STACK_OF_X509_NAME_ENTRY_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_find) #define sk_STACK_OF_X509_NAME_ENTRY_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_shift) #define sk_STACK_OF_X509_NAME_ENTRY_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_push) #define sk_STACK_OF_X509_NAME_ENTRY_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_pop) #define sk_STACK_OF_X509_NAME_ENTRY_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_dup) #define sk_STACK_OF_X509_NAME_ENTRY_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_sort) #define sk_STACK_OF_X509_NAME_ENTRY_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_is_sorted) #define sk_STACK_OF_X509_NAME_ENTRY_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_set_cmp_func) #define sk_STACK_OF_X509_NAME_ENTRY_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_STACK_OF_X509_NAME_ENTRY_deep_copy) #define sk_CRYPTO_EX_DATA_FUNCS_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_call_free_func) #define sk_CRYPTO_EX_DATA_FUNCS_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_call_copy_func) #define sk_CRYPTO_EX_DATA_FUNCS_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_call_cmp_func) #define sk_CRYPTO_EX_DATA_FUNCS_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_new) #define sk_CRYPTO_EX_DATA_FUNCS_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_new_null) #define sk_CRYPTO_EX_DATA_FUNCS_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_num) #define sk_CRYPTO_EX_DATA_FUNCS_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_zero) #define sk_CRYPTO_EX_DATA_FUNCS_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_value) #define sk_CRYPTO_EX_DATA_FUNCS_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_set) #define sk_CRYPTO_EX_DATA_FUNCS_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_free) #define sk_CRYPTO_EX_DATA_FUNCS_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_pop_free) #define sk_CRYPTO_EX_DATA_FUNCS_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_insert) #define sk_CRYPTO_EX_DATA_FUNCS_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_delete) #define sk_CRYPTO_EX_DATA_FUNCS_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_delete_ptr) #define sk_CRYPTO_EX_DATA_FUNCS_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_find) #define sk_CRYPTO_EX_DATA_FUNCS_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_shift) #define sk_CRYPTO_EX_DATA_FUNCS_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_push) #define sk_CRYPTO_EX_DATA_FUNCS_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_pop) #define sk_CRYPTO_EX_DATA_FUNCS_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_dup) #define sk_CRYPTO_EX_DATA_FUNCS_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_sort) #define sk_CRYPTO_EX_DATA_FUNCS_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_is_sorted) #define sk_CRYPTO_EX_DATA_FUNCS_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_set_cmp_func) #define sk_CRYPTO_EX_DATA_FUNCS_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_EX_DATA_FUNCS_deep_copy) #define sk_X509_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_call_free_func) #define sk_X509_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_call_copy_func) #define sk_X509_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_call_cmp_func) #define sk_X509_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_new) #define sk_X509_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_new_null) #define sk_X509_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_num) #define sk_X509_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_zero) #define sk_X509_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_value) #define sk_X509_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_set) #define sk_X509_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_free) #define sk_X509_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_pop_free) #define sk_X509_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_insert) #define sk_X509_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_delete) #define sk_X509_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_delete_ptr) #define sk_X509_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_find) #define sk_X509_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_shift) #define sk_X509_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_push) #define sk_X509_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_pop) #define sk_X509_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_dup) #define sk_X509_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_sort) #define sk_X509_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_is_sorted) #define sk_X509_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_set_cmp_func) #define sk_X509_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_deep_copy) #define sk_GENERAL_NAME_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_call_free_func) #define sk_GENERAL_NAME_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_call_copy_func) #define sk_GENERAL_NAME_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_call_cmp_func) #define sk_GENERAL_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_new) #define sk_GENERAL_NAME_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_new_null) #define sk_GENERAL_NAME_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_num) #define sk_GENERAL_NAME_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_zero) #define sk_GENERAL_NAME_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_value) #define sk_GENERAL_NAME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_set) #define sk_GENERAL_NAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_free) #define sk_GENERAL_NAME_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_pop_free) #define sk_GENERAL_NAME_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_insert) #define sk_GENERAL_NAME_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_delete) #define sk_GENERAL_NAME_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_delete_ptr) #define sk_GENERAL_NAME_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_find) #define sk_GENERAL_NAME_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_shift) #define sk_GENERAL_NAME_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_push) #define sk_GENERAL_NAME_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_pop) #define sk_GENERAL_NAME_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_dup) #define sk_GENERAL_NAME_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_sort) #define sk_GENERAL_NAME_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_is_sorted) #define sk_GENERAL_NAME_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_set_cmp_func) #define sk_GENERAL_NAME_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_NAME_deep_copy) #define sk_X509_CRL_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_call_free_func) #define sk_X509_CRL_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_call_copy_func) #define sk_X509_CRL_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_call_cmp_func) #define sk_X509_CRL_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_new) #define sk_X509_CRL_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_new_null) #define sk_X509_CRL_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_num) #define sk_X509_CRL_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_zero) #define sk_X509_CRL_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_value) #define sk_X509_CRL_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_set) #define sk_X509_CRL_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_free) #define sk_X509_CRL_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_pop_free) #define sk_X509_CRL_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_insert) #define sk_X509_CRL_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_delete) #define sk_X509_CRL_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_delete_ptr) #define sk_X509_CRL_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_find) #define sk_X509_CRL_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_shift) #define sk_X509_CRL_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_push) #define sk_X509_CRL_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_pop) #define sk_X509_CRL_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_dup) #define sk_X509_CRL_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_sort) #define sk_X509_CRL_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_is_sorted) #define sk_X509_CRL_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_set_cmp_func) #define sk_X509_CRL_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_CRL_deep_copy) #define sk_X509_REVOKED_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_call_free_func) #define sk_X509_REVOKED_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_call_copy_func) #define sk_X509_REVOKED_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_call_cmp_func) #define sk_X509_REVOKED_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_new) #define sk_X509_REVOKED_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_new_null) #define sk_X509_REVOKED_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_num) #define sk_X509_REVOKED_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_zero) #define sk_X509_REVOKED_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_value) #define sk_X509_REVOKED_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_set) #define sk_X509_REVOKED_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_free) #define sk_X509_REVOKED_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_pop_free) #define sk_X509_REVOKED_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_insert) #define sk_X509_REVOKED_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_delete) #define sk_X509_REVOKED_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_delete_ptr) #define sk_X509_REVOKED_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_find) #define sk_X509_REVOKED_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_shift) #define sk_X509_REVOKED_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_push) #define sk_X509_REVOKED_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_pop) #define sk_X509_REVOKED_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_dup) #define sk_X509_REVOKED_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_sort) #define sk_X509_REVOKED_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_is_sorted) #define sk_X509_REVOKED_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_set_cmp_func) #define sk_X509_REVOKED_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_REVOKED_deep_copy) #define sk_X509_NAME_ENTRY_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_call_free_func) #define sk_X509_NAME_ENTRY_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_call_copy_func) #define sk_X509_NAME_ENTRY_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_call_cmp_func) #define sk_X509_NAME_ENTRY_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_new) #define sk_X509_NAME_ENTRY_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_new_null) #define sk_X509_NAME_ENTRY_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_num) #define sk_X509_NAME_ENTRY_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_zero) #define sk_X509_NAME_ENTRY_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_value) #define sk_X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_set) #define sk_X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_free) #define sk_X509_NAME_ENTRY_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_pop_free) #define sk_X509_NAME_ENTRY_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_insert) #define sk_X509_NAME_ENTRY_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_delete) #define sk_X509_NAME_ENTRY_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_delete_ptr) #define sk_X509_NAME_ENTRY_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_find) #define sk_X509_NAME_ENTRY_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_shift) #define sk_X509_NAME_ENTRY_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_push) #define sk_X509_NAME_ENTRY_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_pop) #define sk_X509_NAME_ENTRY_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_dup) #define sk_X509_NAME_ENTRY_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_sort) #define sk_X509_NAME_ENTRY_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_is_sorted) #define sk_X509_NAME_ENTRY_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_set_cmp_func) #define sk_X509_NAME_ENTRY_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_deep_copy) #define sk_X509_NAME_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_free_func) #define sk_X509_NAME_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_copy_func) #define sk_X509_NAME_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_call_cmp_func) #define sk_X509_NAME_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_new) #define sk_X509_NAME_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_new_null) #define sk_X509_NAME_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_num) #define sk_X509_NAME_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_zero) #define sk_X509_NAME_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_value) #define sk_X509_NAME_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_set) #define sk_X509_NAME_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_free) #define sk_X509_NAME_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_pop_free) #define sk_X509_NAME_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_insert) #define sk_X509_NAME_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_delete) #define sk_X509_NAME_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_delete_ptr) #define sk_X509_NAME_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_find) #define sk_X509_NAME_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_shift) #define sk_X509_NAME_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_push) #define sk_X509_NAME_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_pop) #define sk_X509_NAME_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_dup) #define sk_X509_NAME_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_sort) #define sk_X509_NAME_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_is_sorted) #define sk_X509_NAME_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_set_cmp_func) #define sk_X509_NAME_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_NAME_deep_copy) #define sk_X509_EXTENSION_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_call_free_func) #define sk_X509_EXTENSION_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_call_copy_func) #define sk_X509_EXTENSION_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_call_cmp_func) #define sk_X509_EXTENSION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_new) #define sk_X509_EXTENSION_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_new_null) #define sk_X509_EXTENSION_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_num) #define sk_X509_EXTENSION_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_zero) #define sk_X509_EXTENSION_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_value) #define sk_X509_EXTENSION_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_set) #define sk_X509_EXTENSION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_free) #define sk_X509_EXTENSION_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_pop_free) #define sk_X509_EXTENSION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_insert) #define sk_X509_EXTENSION_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_delete) #define sk_X509_EXTENSION_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_delete_ptr) #define sk_X509_EXTENSION_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_find) #define sk_X509_EXTENSION_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_shift) #define sk_X509_EXTENSION_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_push) #define sk_X509_EXTENSION_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_pop) #define sk_X509_EXTENSION_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_dup) #define sk_X509_EXTENSION_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_sort) #define sk_X509_EXTENSION_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_is_sorted) #define sk_X509_EXTENSION_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_set_cmp_func) #define sk_X509_EXTENSION_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_EXTENSION_deep_copy) #define sk_GENERAL_SUBTREE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_call_free_func) #define sk_GENERAL_SUBTREE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_call_copy_func) #define sk_GENERAL_SUBTREE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_call_cmp_func) #define sk_GENERAL_SUBTREE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_new) #define sk_GENERAL_SUBTREE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_new_null) #define sk_GENERAL_SUBTREE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_num) #define sk_GENERAL_SUBTREE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_zero) #define sk_GENERAL_SUBTREE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_value) #define sk_GENERAL_SUBTREE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_set) #define sk_GENERAL_SUBTREE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_free) #define sk_GENERAL_SUBTREE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_pop_free) #define sk_GENERAL_SUBTREE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_insert) #define sk_GENERAL_SUBTREE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_delete) #define sk_GENERAL_SUBTREE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_delete_ptr) #define sk_GENERAL_SUBTREE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_find) #define sk_GENERAL_SUBTREE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_shift) #define sk_GENERAL_SUBTREE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_push) #define sk_GENERAL_SUBTREE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_pop) #define sk_GENERAL_SUBTREE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_dup) #define sk_GENERAL_SUBTREE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_sort) #define sk_GENERAL_SUBTREE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_is_sorted) #define sk_GENERAL_SUBTREE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_set_cmp_func) #define sk_GENERAL_SUBTREE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_deep_copy) #define sk_ACCESS_DESCRIPTION_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_call_free_func) #define sk_ACCESS_DESCRIPTION_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_call_copy_func) #define sk_ACCESS_DESCRIPTION_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_call_cmp_func) #define sk_ACCESS_DESCRIPTION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_new) #define sk_ACCESS_DESCRIPTION_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_new_null) #define sk_ACCESS_DESCRIPTION_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_num) #define sk_ACCESS_DESCRIPTION_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_zero) #define sk_ACCESS_DESCRIPTION_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_value) #define sk_ACCESS_DESCRIPTION_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_set) #define sk_ACCESS_DESCRIPTION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_free) #define sk_ACCESS_DESCRIPTION_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_pop_free) #define sk_ACCESS_DESCRIPTION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_insert) #define sk_ACCESS_DESCRIPTION_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_delete) #define sk_ACCESS_DESCRIPTION_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_delete_ptr) #define sk_ACCESS_DESCRIPTION_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_find) #define sk_ACCESS_DESCRIPTION_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_shift) #define sk_ACCESS_DESCRIPTION_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_push) #define sk_ACCESS_DESCRIPTION_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_pop) #define sk_ACCESS_DESCRIPTION_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_dup) #define sk_ACCESS_DESCRIPTION_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_sort) #define sk_ACCESS_DESCRIPTION_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_is_sorted) #define sk_ACCESS_DESCRIPTION_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_set_cmp_func) #define sk_ACCESS_DESCRIPTION_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_deep_copy) #define sk_DIST_POINT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_call_free_func) #define sk_DIST_POINT_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_call_copy_func) #define sk_DIST_POINT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_call_cmp_func) #define sk_DIST_POINT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_new) #define sk_DIST_POINT_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_new_null) #define sk_DIST_POINT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_num) #define sk_DIST_POINT_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_zero) #define sk_DIST_POINT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_value) #define sk_DIST_POINT_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_set) #define sk_DIST_POINT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_free) #define sk_DIST_POINT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_pop_free) #define sk_DIST_POINT_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_insert) #define sk_DIST_POINT_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_delete) #define sk_DIST_POINT_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_delete_ptr) #define sk_DIST_POINT_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_find) #define sk_DIST_POINT_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_shift) #define sk_DIST_POINT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_push) #define sk_DIST_POINT_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_pop) #define sk_DIST_POINT_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_dup) #define sk_DIST_POINT_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_sort) #define sk_DIST_POINT_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_is_sorted) #define sk_DIST_POINT_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_set_cmp_func) #define sk_DIST_POINT_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_DIST_POINT_deep_copy) #define sk_POLICYQUALINFO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_call_free_func) #define sk_POLICYQUALINFO_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_call_copy_func) #define sk_POLICYQUALINFO_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_call_cmp_func) #define sk_POLICYQUALINFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_new) #define sk_POLICYQUALINFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_new_null) #define sk_POLICYQUALINFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_num) #define sk_POLICYQUALINFO_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_zero) #define sk_POLICYQUALINFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_value) #define sk_POLICYQUALINFO_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_set) #define sk_POLICYQUALINFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_free) #define sk_POLICYQUALINFO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_pop_free) #define sk_POLICYQUALINFO_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_insert) #define sk_POLICYQUALINFO_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_delete) #define sk_POLICYQUALINFO_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_delete_ptr) #define sk_POLICYQUALINFO_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_find) #define sk_POLICYQUALINFO_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_shift) #define sk_POLICYQUALINFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_push) #define sk_POLICYQUALINFO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_pop) #define sk_POLICYQUALINFO_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_dup) #define sk_POLICYQUALINFO_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_sort) #define sk_POLICYQUALINFO_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_is_sorted) #define sk_POLICYQUALINFO_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_set_cmp_func) #define sk_POLICYQUALINFO_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYQUALINFO_deep_copy) #define sk_POLICYINFO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_call_free_func) #define sk_POLICYINFO_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_call_copy_func) #define sk_POLICYINFO_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_call_cmp_func) #define sk_POLICYINFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_new) #define sk_POLICYINFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_new_null) #define sk_POLICYINFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_num) #define sk_POLICYINFO_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_zero) #define sk_POLICYINFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_value) #define sk_POLICYINFO_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_set) #define sk_POLICYINFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_free) #define sk_POLICYINFO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_pop_free) #define sk_POLICYINFO_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_insert) #define sk_POLICYINFO_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_delete) #define sk_POLICYINFO_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_delete_ptr) #define sk_POLICYINFO_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_find) #define sk_POLICYINFO_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_shift) #define sk_POLICYINFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_push) #define sk_POLICYINFO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_pop) #define sk_POLICYINFO_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_dup) #define sk_POLICYINFO_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_sort) #define sk_POLICYINFO_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_is_sorted) #define sk_POLICYINFO_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_set_cmp_func) #define sk_POLICYINFO_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICYINFO_deep_copy) #define sk_POLICY_MAPPING_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_free_func) #define sk_POLICY_MAPPING_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_copy_func) #define sk_POLICY_MAPPING_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_cmp_func) #define sk_POLICY_MAPPING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_new) #define sk_POLICY_MAPPING_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_new_null) #define sk_POLICY_MAPPING_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_num) #define sk_POLICY_MAPPING_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_zero) #define sk_POLICY_MAPPING_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_value) #define sk_POLICY_MAPPING_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_set) #define sk_POLICY_MAPPING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_free) #define sk_POLICY_MAPPING_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_pop_free) #define sk_POLICY_MAPPING_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_insert) #define sk_POLICY_MAPPING_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_delete) #define sk_POLICY_MAPPING_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_delete_ptr) #define sk_POLICY_MAPPING_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_find) #define sk_POLICY_MAPPING_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_shift) #define sk_POLICY_MAPPING_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_push) #define sk_POLICY_MAPPING_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_pop) #define sk_POLICY_MAPPING_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_dup) #define sk_POLICY_MAPPING_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_sort) #define sk_POLICY_MAPPING_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_is_sorted) #define sk_POLICY_MAPPING_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_set_cmp_func) #define sk_POLICY_MAPPING_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_POLICY_MAPPING_deep_copy) #define sk_X509_ALGOR_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_call_free_func) #define sk_X509_ALGOR_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_call_copy_func) #define sk_X509_ALGOR_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_call_cmp_func) #define sk_X509_ALGOR_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_new) #define sk_X509_ALGOR_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_new_null) #define sk_X509_ALGOR_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_num) #define sk_X509_ALGOR_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_zero) #define sk_X509_ALGOR_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_value) #define sk_X509_ALGOR_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_set) #define sk_X509_ALGOR_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_free) #define sk_X509_ALGOR_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_pop_free) #define sk_X509_ALGOR_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_insert) #define sk_X509_ALGOR_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_delete) #define sk_X509_ALGOR_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_delete_ptr) #define sk_X509_ALGOR_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_find) #define sk_X509_ALGOR_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_shift) #define sk_X509_ALGOR_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_push) #define sk_X509_ALGOR_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_pop) #define sk_X509_ALGOR_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_dup) #define sk_X509_ALGOR_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_sort) #define sk_X509_ALGOR_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_is_sorted) #define sk_X509_ALGOR_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_set_cmp_func) #define sk_X509_ALGOR_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ALGOR_deep_copy) #define sk_X509_ATTRIBUTE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_call_free_func) #define sk_X509_ATTRIBUTE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_call_copy_func) #define sk_X509_ATTRIBUTE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_call_cmp_func) #define sk_X509_ATTRIBUTE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_new) #define sk_X509_ATTRIBUTE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_new_null) #define sk_X509_ATTRIBUTE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_num) #define sk_X509_ATTRIBUTE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_zero) #define sk_X509_ATTRIBUTE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_value) #define sk_X509_ATTRIBUTE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_set) #define sk_X509_ATTRIBUTE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_free) #define sk_X509_ATTRIBUTE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_pop_free) #define sk_X509_ATTRIBUTE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_insert) #define sk_X509_ATTRIBUTE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_delete) #define sk_X509_ATTRIBUTE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_delete_ptr) #define sk_X509_ATTRIBUTE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_find) #define sk_X509_ATTRIBUTE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_shift) #define sk_X509_ATTRIBUTE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_push) #define sk_X509_ATTRIBUTE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_pop) #define sk_X509_ATTRIBUTE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_dup) #define sk_X509_ATTRIBUTE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_sort) #define sk_X509_ATTRIBUTE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_is_sorted) #define sk_X509_ATTRIBUTE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_set_cmp_func) #define sk_X509_ATTRIBUTE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_deep_copy) #define sk_X509_OBJECT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_free_func) #define sk_X509_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_copy_func) #define sk_X509_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_call_cmp_func) #define sk_X509_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_new) #define sk_X509_OBJECT_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_new_null) #define sk_X509_OBJECT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_num) #define sk_X509_OBJECT_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_zero) #define sk_X509_OBJECT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_value) #define sk_X509_OBJECT_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_set) #define sk_X509_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_free) #define sk_X509_OBJECT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_pop_free) #define sk_X509_OBJECT_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_insert) #define sk_X509_OBJECT_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_delete) #define sk_X509_OBJECT_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_delete_ptr) #define sk_X509_OBJECT_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_find) #define sk_X509_OBJECT_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_shift) #define sk_X509_OBJECT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_push) #define sk_X509_OBJECT_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_pop) #define sk_X509_OBJECT_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_dup) #define sk_X509_OBJECT_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_sort) #define sk_X509_OBJECT_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_is_sorted) #define sk_X509_OBJECT_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_set_cmp_func) #define sk_X509_OBJECT_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_OBJECT_deep_copy) #define sk_X509_INFO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_call_free_func) #define sk_X509_INFO_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_call_copy_func) #define sk_X509_INFO_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_call_cmp_func) #define sk_X509_INFO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_new) #define sk_X509_INFO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_new_null) #define sk_X509_INFO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_num) #define sk_X509_INFO_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_zero) #define sk_X509_INFO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_value) #define sk_X509_INFO_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_set) #define sk_X509_INFO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_free) #define sk_X509_INFO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_pop_free) #define sk_X509_INFO_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_insert) #define sk_X509_INFO_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_delete) #define sk_X509_INFO_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_delete_ptr) #define sk_X509_INFO_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_find) #define sk_X509_INFO_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_shift) #define sk_X509_INFO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_push) #define sk_X509_INFO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_pop) #define sk_X509_INFO_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_dup) #define sk_X509_INFO_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_sort) #define sk_X509_INFO_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_is_sorted) #define sk_X509_INFO_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_set_cmp_func) #define sk_X509_INFO_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_X509_INFO_deep_copy) #define sk_CRYPTO_BUFFER_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_free_func) #define sk_CRYPTO_BUFFER_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_copy_func) #define sk_CRYPTO_BUFFER_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_cmp_func) #define sk_CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_new) #define sk_CRYPTO_BUFFER_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_new_null) #define sk_CRYPTO_BUFFER_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_num) #define sk_CRYPTO_BUFFER_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_zero) #define sk_CRYPTO_BUFFER_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_value) #define sk_CRYPTO_BUFFER_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_set) #define sk_CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_free) #define sk_CRYPTO_BUFFER_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop_free) #define sk_CRYPTO_BUFFER_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_insert) #define sk_CRYPTO_BUFFER_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_delete) #define sk_CRYPTO_BUFFER_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_delete_ptr) #define sk_CRYPTO_BUFFER_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_find) #define sk_CRYPTO_BUFFER_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_shift) #define sk_CRYPTO_BUFFER_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_push) #define sk_CRYPTO_BUFFER_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop) #define sk_CRYPTO_BUFFER_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_dup) #define sk_CRYPTO_BUFFER_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_sort) #define sk_CRYPTO_BUFFER_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_is_sorted) #define sk_CRYPTO_BUFFER_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_set_cmp_func) #define sk_CRYPTO_BUFFER_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_deep_copy) #define sk_ASN1_INTEGER_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_call_free_func) #define sk_ASN1_INTEGER_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_call_copy_func) #define sk_ASN1_INTEGER_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_call_cmp_func) #define sk_ASN1_INTEGER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_new) #define sk_ASN1_INTEGER_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_new_null) #define sk_ASN1_INTEGER_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_num) #define sk_ASN1_INTEGER_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_zero) #define sk_ASN1_INTEGER_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_value) #define sk_ASN1_INTEGER_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_set) #define sk_ASN1_INTEGER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_free) #define sk_ASN1_INTEGER_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_pop_free) #define sk_ASN1_INTEGER_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_insert) #define sk_ASN1_INTEGER_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_delete) #define sk_ASN1_INTEGER_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_delete_ptr) #define sk_ASN1_INTEGER_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_find) #define sk_ASN1_INTEGER_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_shift) #define sk_ASN1_INTEGER_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_push) #define sk_ASN1_INTEGER_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_pop) #define sk_ASN1_INTEGER_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_dup) #define sk_ASN1_INTEGER_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_sort) #define sk_ASN1_INTEGER_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_is_sorted) #define sk_ASN1_INTEGER_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_set_cmp_func) #define sk_ASN1_INTEGER_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_INTEGER_deep_copy) #define sk_ASN1_OBJECT_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_free_func) #define sk_ASN1_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_copy_func) #define sk_ASN1_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_cmp_func) #define sk_ASN1_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_new) #define sk_ASN1_OBJECT_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_new_null) #define sk_ASN1_OBJECT_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_num) #define sk_ASN1_OBJECT_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_zero) #define sk_ASN1_OBJECT_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_value) #define sk_ASN1_OBJECT_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_set) #define sk_ASN1_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_free) #define sk_ASN1_OBJECT_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_pop_free) #define sk_ASN1_OBJECT_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_insert) #define sk_ASN1_OBJECT_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_delete) #define sk_ASN1_OBJECT_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_delete_ptr) #define sk_ASN1_OBJECT_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_find) #define sk_ASN1_OBJECT_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_shift) #define sk_ASN1_OBJECT_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_push) #define sk_ASN1_OBJECT_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_pop) #define sk_ASN1_OBJECT_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_dup) #define sk_ASN1_OBJECT_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_sort) #define sk_ASN1_OBJECT_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_is_sorted) #define sk_ASN1_OBJECT_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_set_cmp_func) #define sk_ASN1_OBJECT_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_OBJECT_deep_copy) #define sk_ASN1_TYPE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_call_free_func) #define sk_ASN1_TYPE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_call_copy_func) #define sk_ASN1_TYPE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_call_cmp_func) #define sk_ASN1_TYPE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_new) #define sk_ASN1_TYPE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_new_null) #define sk_ASN1_TYPE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_num) #define sk_ASN1_TYPE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_zero) #define sk_ASN1_TYPE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_value) #define sk_ASN1_TYPE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_set) #define sk_ASN1_TYPE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_free) #define sk_ASN1_TYPE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_pop_free) #define sk_ASN1_TYPE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_insert) #define sk_ASN1_TYPE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_delete) #define sk_ASN1_TYPE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_delete_ptr) #define sk_ASN1_TYPE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_find) #define sk_ASN1_TYPE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_shift) #define sk_ASN1_TYPE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_push) #define sk_ASN1_TYPE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_pop) #define sk_ASN1_TYPE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_dup) #define sk_ASN1_TYPE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_sort) #define sk_ASN1_TYPE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_is_sorted) #define sk_ASN1_TYPE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_set_cmp_func) #define sk_ASN1_TYPE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_TYPE_deep_copy) #define sk_BIO_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_call_free_func) #define sk_BIO_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_call_copy_func) #define sk_BIO_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_call_cmp_func) #define sk_BIO_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_new) #define sk_BIO_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_new_null) #define sk_BIO_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_num) #define sk_BIO_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_zero) #define sk_BIO_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_value) #define sk_BIO_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_set) #define sk_BIO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_free) #define sk_BIO_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_pop_free) #define sk_BIO_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_insert) #define sk_BIO_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_delete) #define sk_BIO_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_delete_ptr) #define sk_BIO_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_find) #define sk_BIO_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_shift) #define sk_BIO_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_push) #define sk_BIO_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_pop) #define sk_BIO_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_dup) #define sk_BIO_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_sort) #define sk_BIO_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_is_sorted) #define sk_BIO_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_set_cmp_func) #define sk_BIO_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_BIO_deep_copy) #define sk_CONF_VALUE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_call_free_func) #define sk_CONF_VALUE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_call_copy_func) #define sk_CONF_VALUE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_call_cmp_func) #define sk_CONF_VALUE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_new) #define sk_CONF_VALUE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_new_null) #define sk_CONF_VALUE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_num) #define sk_CONF_VALUE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_zero) #define sk_CONF_VALUE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_value) #define sk_CONF_VALUE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_set) #define sk_CONF_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_free) #define sk_CONF_VALUE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_pop_free) #define sk_CONF_VALUE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_insert) #define sk_CONF_VALUE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_delete) #define sk_CONF_VALUE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_delete_ptr) #define sk_CONF_VALUE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_find) #define sk_CONF_VALUE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_shift) #define sk_CONF_VALUE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_push) #define sk_CONF_VALUE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_pop) #define sk_CONF_VALUE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_dup) #define sk_CONF_VALUE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_sort) #define sk_CONF_VALUE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_is_sorted) #define sk_CONF_VALUE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_set_cmp_func) #define sk_CONF_VALUE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_CONF_VALUE_deep_copy) #define sk_SSL_COMP_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_call_free_func) #define sk_SSL_COMP_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_call_copy_func) #define sk_SSL_COMP_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_call_cmp_func) #define sk_SSL_COMP_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_new) #define sk_SSL_COMP_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_new_null) #define sk_SSL_COMP_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_num) #define sk_SSL_COMP_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_zero) #define sk_SSL_COMP_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_value) #define sk_SSL_COMP_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_set) #define sk_SSL_COMP_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_free) #define sk_SSL_COMP_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_pop_free) #define sk_SSL_COMP_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_insert) #define sk_SSL_COMP_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_delete) #define sk_SSL_COMP_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_delete_ptr) #define sk_SSL_COMP_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_find) #define sk_SSL_COMP_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_shift) #define sk_SSL_COMP_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_push) #define sk_SSL_COMP_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_pop) #define sk_SSL_COMP_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_dup) #define sk_SSL_COMP_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_sort) #define sk_SSL_COMP_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_is_sorted) #define sk_SSL_COMP_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_set_cmp_func) #define sk_SSL_COMP_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_SSL_COMP_deep_copy) #define sk_void_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_call_free_func) #define sk_void_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_call_copy_func) #define sk_void_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_call_cmp_func) #define sk_void_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_new) #define sk_void_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_new_null) #define sk_void_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_num) #define sk_void_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_zero) #define sk_void_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_value) #define sk_void_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_set) #define sk_void_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_free) #define sk_void_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_pop_free) #define sk_void_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_insert) #define sk_void_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_delete) #define sk_void_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_delete_ptr) #define sk_void_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_find) #define sk_void_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_shift) #define sk_void_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_push) #define sk_void_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_pop) #define sk_void_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_dup) #define sk_void_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_sort) #define sk_void_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_is_sorted) #define sk_void_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_set_cmp_func) #define sk_void_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_void_deep_copy) #define sk_OPENSSL_STRING_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_free_func) #define sk_OPENSSL_STRING_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_copy_func) #define sk_OPENSSL_STRING_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_cmp_func) #define sk_OPENSSL_STRING_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new) #define sk_OPENSSL_STRING_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new_null) #define sk_OPENSSL_STRING_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_num) #define sk_OPENSSL_STRING_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_zero) #define sk_OPENSSL_STRING_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_value) #define sk_OPENSSL_STRING_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_set) #define sk_OPENSSL_STRING_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_free) #define sk_OPENSSL_STRING_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_pop_free) #define sk_OPENSSL_STRING_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_insert) #define sk_OPENSSL_STRING_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_delete) #define sk_OPENSSL_STRING_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_delete_ptr) #define sk_OPENSSL_STRING_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_find) #define sk_OPENSSL_STRING_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_shift) #define sk_OPENSSL_STRING_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_push) #define sk_OPENSSL_STRING_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_pop) #define sk_OPENSSL_STRING_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_dup) #define sk_OPENSSL_STRING_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_sort) #define sk_OPENSSL_STRING_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_is_sorted) #define sk_OPENSSL_STRING_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_set_cmp_func) #define sk_OPENSSL_STRING_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_OPENSSL_STRING_deep_copy) #define sk_TRUST_TOKEN_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_call_free_func) #define sk_TRUST_TOKEN_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_call_copy_func) #define sk_TRUST_TOKEN_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_call_cmp_func) #define sk_TRUST_TOKEN_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_new) #define sk_TRUST_TOKEN_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_new_null) #define sk_TRUST_TOKEN_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_num) #define sk_TRUST_TOKEN_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_zero) #define sk_TRUST_TOKEN_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_value) #define sk_TRUST_TOKEN_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_set) #define sk_TRUST_TOKEN_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_free) #define sk_TRUST_TOKEN_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_pop_free) #define sk_TRUST_TOKEN_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_insert) #define sk_TRUST_TOKEN_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_delete) #define sk_TRUST_TOKEN_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_delete_ptr) #define sk_TRUST_TOKEN_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_find) #define sk_TRUST_TOKEN_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_shift) #define sk_TRUST_TOKEN_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_push) #define sk_TRUST_TOKEN_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_pop) #define sk_TRUST_TOKEN_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_dup) #define sk_TRUST_TOKEN_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_sort) #define sk_TRUST_TOKEN_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_is_sorted) #define sk_TRUST_TOKEN_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_set_cmp_func) #define sk_TRUST_TOKEN_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_TRUST_TOKEN_deep_copy) #define sk_ASN1_VALUE_call_free_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_call_free_func) #define sk_ASN1_VALUE_call_copy_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_call_copy_func) #define sk_ASN1_VALUE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_call_cmp_func) #define sk_ASN1_VALUE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_new) #define sk_ASN1_VALUE_new_null BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_new_null) #define sk_ASN1_VALUE_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_num) #define sk_ASN1_VALUE_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_zero) #define sk_ASN1_VALUE_value BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_value) #define sk_ASN1_VALUE_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_set) #define sk_ASN1_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_free) #define sk_ASN1_VALUE_pop_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_pop_free) #define sk_ASN1_VALUE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_insert) #define sk_ASN1_VALUE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_delete) #define sk_ASN1_VALUE_delete_ptr BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_delete_ptr) #define sk_ASN1_VALUE_find BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_find) #define sk_ASN1_VALUE_shift BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_shift) #define sk_ASN1_VALUE_push BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_push) #define sk_ASN1_VALUE_pop BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_pop) #define sk_ASN1_VALUE_dup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_dup) #define sk_ASN1_VALUE_sort BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_sort) #define sk_ASN1_VALUE_is_sorted BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_is_sorted) #define sk_ASN1_VALUE_set_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_set_cmp_func) #define sk_ASN1_VALUE_deep_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_ASN1_VALUE_deep_copy) #define lh_ASN1_STRING_TABLE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_call_cmp_func) #define lh_ASN1_STRING_TABLE_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_call_hash_func) #define lh_ASN1_STRING_TABLE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_new) #define lh_ASN1_STRING_TABLE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_free) #define lh_ASN1_STRING_TABLE_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_num_items) #define lh_ASN1_STRING_TABLE_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_retrieve) #define lh_ASN1_STRING_TABLE_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_call_cmp_key) #define lh_ASN1_STRING_TABLE_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_retrieve_key) #define lh_ASN1_STRING_TABLE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_insert) #define lh_ASN1_STRING_TABLE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_delete) #define lh_ASN1_STRING_TABLE_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_call_doall) #define lh_ASN1_STRING_TABLE_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_call_doall_arg) #define lh_ASN1_STRING_TABLE_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_doall) #define lh_ASN1_STRING_TABLE_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_STRING_TABLE_doall_arg) #define lh_ASN1_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_call_cmp_func) #define lh_ASN1_OBJECT_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_call_hash_func) #define lh_ASN1_OBJECT_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_new) #define lh_ASN1_OBJECT_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_free) #define lh_ASN1_OBJECT_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_num_items) #define lh_ASN1_OBJECT_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_retrieve) #define lh_ASN1_OBJECT_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_call_cmp_key) #define lh_ASN1_OBJECT_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_retrieve_key) #define lh_ASN1_OBJECT_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_insert) #define lh_ASN1_OBJECT_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_delete) #define lh_ASN1_OBJECT_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_call_doall) #define lh_ASN1_OBJECT_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_call_doall_arg) #define lh_ASN1_OBJECT_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_doall) #define lh_ASN1_OBJECT_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_ASN1_OBJECT_doall_arg) #define lh_CONF_SECTION_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_cmp_func) #define lh_CONF_SECTION_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_hash_func) #define lh_CONF_SECTION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_new) #define lh_CONF_SECTION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_free) #define lh_CONF_SECTION_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_num_items) #define lh_CONF_SECTION_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_retrieve) #define lh_CONF_SECTION_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_cmp_key) #define lh_CONF_SECTION_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_retrieve_key) #define lh_CONF_SECTION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_insert) #define lh_CONF_SECTION_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_delete) #define lh_CONF_SECTION_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_doall) #define lh_CONF_SECTION_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_call_doall_arg) #define lh_CONF_SECTION_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_doall) #define lh_CONF_SECTION_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_SECTION_doall_arg) #define lh_CONF_VALUE_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_cmp_func) #define lh_CONF_VALUE_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_hash_func) #define lh_CONF_VALUE_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_new) #define lh_CONF_VALUE_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_free) #define lh_CONF_VALUE_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_num_items) #define lh_CONF_VALUE_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_retrieve) #define lh_CONF_VALUE_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_cmp_key) #define lh_CONF_VALUE_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_retrieve_key) #define lh_CONF_VALUE_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_insert) #define lh_CONF_VALUE_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_delete) #define lh_CONF_VALUE_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_doall) #define lh_CONF_VALUE_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_call_doall_arg) #define lh_CONF_VALUE_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_doall) #define lh_CONF_VALUE_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CONF_VALUE_doall_arg) #define lh_CRYPTO_BUFFER_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_cmp_func) #define lh_CRYPTO_BUFFER_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_hash_func) #define lh_CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_new) #define lh_CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_free) #define lh_CRYPTO_BUFFER_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_num_items) #define lh_CRYPTO_BUFFER_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_retrieve) #define lh_CRYPTO_BUFFER_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_cmp_key) #define lh_CRYPTO_BUFFER_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_retrieve_key) #define lh_CRYPTO_BUFFER_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_insert) #define lh_CRYPTO_BUFFER_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_delete) #define lh_CRYPTO_BUFFER_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_doall) #define lh_CRYPTO_BUFFER_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_doall_arg) #define lh_CRYPTO_BUFFER_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_doall) #define lh_CRYPTO_BUFFER_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_doall_arg) #define lh_SSL_SESSION_call_cmp_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_call_cmp_func) #define lh_SSL_SESSION_call_hash_func BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_call_hash_func) #define lh_SSL_SESSION_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_new) #define lh_SSL_SESSION_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_free) #define lh_SSL_SESSION_num_items BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_num_items) #define lh_SSL_SESSION_retrieve BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_retrieve) #define lh_SSL_SESSION_call_cmp_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_call_cmp_key) #define lh_SSL_SESSION_retrieve_key BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_retrieve_key) #define lh_SSL_SESSION_insert BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_insert) #define lh_SSL_SESSION_delete BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_delete) #define lh_SSL_SESSION_call_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_call_doall) #define lh_SSL_SESSION_call_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_call_doall_arg) #define lh_SSL_SESSION_doall BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_doall) #define lh_SSL_SESSION_doall_arg BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_SSL_SESSION_doall_arg) #define ssl_credential_st BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ssl_credential_st) #define ssl_ctx_st BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ssl_ctx_st) #define ssl_ech_keys_st BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ssl_ech_keys_st) #define ssl_session_st BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ssl_session_st) #define ssl_st BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ssl_st) ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_boringssl_prefix_symbols_asm.h ================================================ // Copyright 2018 The BoringSSL Authors // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #if !defined(__APPLE__) #include "CNIOBoringSSL_boringssl_prefix_symbols.h" #else // On iOS and macOS, we need to treat assembly symbols differently from other // symbols. The linker expects symbols to be prefixed with an underscore. // Perlasm thus generates symbol with this underscore applied. Our macros must, // in turn, incorporate it. #define BORINGSSL_ADD_PREFIX_MAC_ASM(a, b) BORINGSSL_ADD_PREFIX_INNER_MAC_ASM(a, b) #define BORINGSSL_ADD_PREFIX_INNER_MAC_ASM(a, b) _ ## a ## _ ## b #define _ACCESS_DESCRIPTION_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ACCESS_DESCRIPTION_free) #define _ACCESS_DESCRIPTION_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ACCESS_DESCRIPTION_new) #define _AES_CMAC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_CMAC) #define _AES_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_cbc_encrypt) #define _AES_cfb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_cfb128_encrypt) #define _AES_ctr128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_ctr128_encrypt) #define _AES_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_decrypt) #define _AES_ecb_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_ecb_encrypt) #define _AES_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_encrypt) #define _AES_ofb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_ofb128_encrypt) #define _AES_set_decrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_set_decrypt_key) #define _AES_set_encrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_set_encrypt_key) #define _AES_unwrap_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_unwrap_key) #define _AES_unwrap_key_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_unwrap_key_padded) #define _AES_wrap_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_wrap_key) #define _AES_wrap_key_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AES_wrap_key_padded) #define _ASN1_ANY_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ANY_it) #define _ASN1_BIT_STRING_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_check) #define _ASN1_BIT_STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_free) #define _ASN1_BIT_STRING_get_bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_get_bit) #define _ASN1_BIT_STRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_it) #define _ASN1_BIT_STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_new) #define _ASN1_BIT_STRING_num_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_num_bytes) #define _ASN1_BIT_STRING_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_set) #define _ASN1_BIT_STRING_set_bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BIT_STRING_set_bit) #define _ASN1_BMPSTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BMPSTRING_free) #define _ASN1_BMPSTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BMPSTRING_it) #define _ASN1_BMPSTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BMPSTRING_new) #define _ASN1_BOOLEAN_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_BOOLEAN_it) #define _ASN1_ENUMERATED_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_free) #define _ASN1_ENUMERATED_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_get) #define _ASN1_ENUMERATED_get_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_get_int64) #define _ASN1_ENUMERATED_get_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_get_uint64) #define _ASN1_ENUMERATED_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_it) #define _ASN1_ENUMERATED_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_new) #define _ASN1_ENUMERATED_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_set) #define _ASN1_ENUMERATED_set_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_set_int64) #define _ASN1_ENUMERATED_set_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_set_uint64) #define _ASN1_ENUMERATED_to_BN BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_ENUMERATED_to_BN) #define _ASN1_FBOOLEAN_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_FBOOLEAN_it) #define _ASN1_GENERALIZEDTIME_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_adj) #define _ASN1_GENERALIZEDTIME_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_check) #define _ASN1_GENERALIZEDTIME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_free) #define _ASN1_GENERALIZEDTIME_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_it) #define _ASN1_GENERALIZEDTIME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_new) #define _ASN1_GENERALIZEDTIME_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_print) #define _ASN1_GENERALIZEDTIME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_set) #define _ASN1_GENERALIZEDTIME_set_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALIZEDTIME_set_string) #define _ASN1_GENERALSTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALSTRING_free) #define _ASN1_GENERALSTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALSTRING_it) #define _ASN1_GENERALSTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_GENERALSTRING_new) #define _ASN1_IA5STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_IA5STRING_free) #define _ASN1_IA5STRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_IA5STRING_it) #define _ASN1_IA5STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_IA5STRING_new) #define _ASN1_INTEGER_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_cmp) #define _ASN1_INTEGER_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_dup) #define _ASN1_INTEGER_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_free) #define _ASN1_INTEGER_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_get) #define _ASN1_INTEGER_get_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_get_int64) #define _ASN1_INTEGER_get_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_get_uint64) #define _ASN1_INTEGER_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_it) #define _ASN1_INTEGER_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_new) #define _ASN1_INTEGER_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_set) #define _ASN1_INTEGER_set_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_set_int64) #define _ASN1_INTEGER_set_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_set_uint64) #define _ASN1_INTEGER_to_BN BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_INTEGER_to_BN) #define _ASN1_NULL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_NULL_free) #define _ASN1_NULL_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_NULL_it) #define _ASN1_NULL_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_NULL_new) #define _ASN1_OBJECT_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OBJECT_create) #define _ASN1_OBJECT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OBJECT_free) #define _ASN1_OBJECT_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OBJECT_it) #define _ASN1_OBJECT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OBJECT_new) #define _ASN1_OCTET_STRING_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_cmp) #define _ASN1_OCTET_STRING_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_dup) #define _ASN1_OCTET_STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_free) #define _ASN1_OCTET_STRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_it) #define _ASN1_OCTET_STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_new) #define _ASN1_OCTET_STRING_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_OCTET_STRING_set) #define _ASN1_PRINTABLESTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_free) #define _ASN1_PRINTABLESTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_it) #define _ASN1_PRINTABLESTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLESTRING_new) #define _ASN1_PRINTABLE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLE_free) #define _ASN1_PRINTABLE_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLE_it) #define _ASN1_PRINTABLE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_PRINTABLE_new) #define _ASN1_SEQUENCE_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_SEQUENCE_it) #define _ASN1_STRING_TABLE_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_TABLE_add) #define _ASN1_STRING_TABLE_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_TABLE_cleanup) #define _ASN1_STRING_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_cmp) #define _ASN1_STRING_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_copy) #define _ASN1_STRING_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_data) #define _ASN1_STRING_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_dup) #define _ASN1_STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_free) #define _ASN1_STRING_get0_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_get0_data) #define _ASN1_STRING_get_default_mask BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_get_default_mask) #define _ASN1_STRING_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_length) #define _ASN1_STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_new) #define _ASN1_STRING_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_print) #define _ASN1_STRING_print_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_print_ex) #define _ASN1_STRING_print_ex_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_print_ex_fp) #define _ASN1_STRING_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_set) #define _ASN1_STRING_set0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_set0) #define _ASN1_STRING_set_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_set_by_NID) #define _ASN1_STRING_set_default_mask BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_set_default_mask) #define _ASN1_STRING_set_default_mask_asc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_set_default_mask_asc) #define _ASN1_STRING_to_UTF8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_to_UTF8) #define _ASN1_STRING_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_type) #define _ASN1_STRING_type_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_STRING_type_new) #define _ASN1_T61STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_T61STRING_free) #define _ASN1_T61STRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_T61STRING_it) #define _ASN1_T61STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_T61STRING_new) #define _ASN1_TBOOLEAN_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TBOOLEAN_it) #define _ASN1_TIME_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_adj) #define _ASN1_TIME_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_check) #define _ASN1_TIME_diff BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_diff) #define _ASN1_TIME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_free) #define _ASN1_TIME_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_it) #define _ASN1_TIME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_new) #define _ASN1_TIME_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_print) #define _ASN1_TIME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_set) #define _ASN1_TIME_set_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_set_posix) #define _ASN1_TIME_set_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_set_string) #define _ASN1_TIME_set_string_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_set_string_X509) #define _ASN1_TIME_to_generalizedtime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_to_generalizedtime) #define _ASN1_TIME_to_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_to_posix) #define _ASN1_TIME_to_posix_nonstandard BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_to_posix_nonstandard) #define _ASN1_TIME_to_time_t BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TIME_to_time_t) #define _ASN1_TYPE_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_cmp) #define _ASN1_TYPE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_free) #define _ASN1_TYPE_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_get) #define _ASN1_TYPE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_new) #define _ASN1_TYPE_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_set) #define _ASN1_TYPE_set1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_TYPE_set1) #define _ASN1_UNIVERSALSTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_free) #define _ASN1_UNIVERSALSTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_it) #define _ASN1_UNIVERSALSTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UNIVERSALSTRING_new) #define _ASN1_UTCTIME_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_adj) #define _ASN1_UTCTIME_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_check) #define _ASN1_UTCTIME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_free) #define _ASN1_UTCTIME_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_it) #define _ASN1_UTCTIME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_new) #define _ASN1_UTCTIME_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_print) #define _ASN1_UTCTIME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_set) #define _ASN1_UTCTIME_set_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTCTIME_set_string) #define _ASN1_UTF8STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTF8STRING_free) #define _ASN1_UTF8STRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTF8STRING_it) #define _ASN1_UTF8STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_UTF8STRING_new) #define _ASN1_VISIBLESTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_free) #define _ASN1_VISIBLESTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_it) #define _ASN1_VISIBLESTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_VISIBLESTRING_new) #define _ASN1_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_digest) #define _ASN1_generate_v3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_generate_v3) #define _ASN1_get_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_get_object) #define _ASN1_item_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_d2i) #define _ASN1_item_d2i_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_d2i_bio) #define _ASN1_item_d2i_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_d2i_fp) #define _ASN1_item_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_digest) #define _ASN1_item_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_dup) #define _ASN1_item_ex_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_ex_d2i) #define _ASN1_item_ex_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_ex_free) #define _ASN1_item_ex_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_ex_i2d) #define _ASN1_item_ex_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_ex_new) #define _ASN1_item_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_free) #define _ASN1_item_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_i2d) #define _ASN1_item_i2d_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_i2d_bio) #define _ASN1_item_i2d_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_i2d_fp) #define _ASN1_item_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_new) #define _ASN1_item_pack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_pack) #define _ASN1_item_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_sign) #define _ASN1_item_sign_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_sign_ctx) #define _ASN1_item_unpack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_unpack) #define _ASN1_item_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_item_verify) #define _ASN1_mbstring_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_mbstring_copy) #define _ASN1_mbstring_ncopy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_mbstring_ncopy) #define _ASN1_object_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_object_size) #define _ASN1_primitive_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_primitive_free) #define _ASN1_put_eoc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_put_eoc) #define _ASN1_put_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_put_object) #define _ASN1_tag2bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_tag2bit) #define _ASN1_tag2str BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_tag2str) #define _ASN1_template_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ASN1_template_free) #define _AUTHORITY_INFO_ACCESS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_free) #define _AUTHORITY_INFO_ACCESS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_it) #define _AUTHORITY_INFO_ACCESS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_INFO_ACCESS_new) #define _AUTHORITY_KEYID_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_KEYID_free) #define _AUTHORITY_KEYID_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_KEYID_it) #define _AUTHORITY_KEYID_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, AUTHORITY_KEYID_new) #define _BASIC_CONSTRAINTS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_free) #define _BASIC_CONSTRAINTS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_it) #define _BASIC_CONSTRAINTS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BASIC_CONSTRAINTS_new) #define _BCM_fips_186_2_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_fips_186_2_prf) #define _BCM_mldsa65_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_generate_key) #define _BCM_mldsa65_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_generate_key_external_entropy) #define _BCM_mldsa65_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_marshal_private_key) #define _BCM_mldsa65_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_marshal_public_key) #define _BCM_mldsa65_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_parse_private_key) #define _BCM_mldsa65_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_parse_public_key) #define _BCM_mldsa65_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_private_key_from_seed) #define _BCM_mldsa65_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_public_from_private) #define _BCM_mldsa65_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_sign) #define _BCM_mldsa65_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_sign_internal) #define _BCM_mldsa65_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_verify) #define _BCM_mldsa65_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa65_verify_internal) #define _BCM_mldsa87_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_generate_key) #define _BCM_mldsa87_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_generate_key_external_entropy) #define _BCM_mldsa87_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_marshal_private_key) #define _BCM_mldsa87_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_marshal_public_key) #define _BCM_mldsa87_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_parse_private_key) #define _BCM_mldsa87_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_parse_public_key) #define _BCM_mldsa87_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_private_key_from_seed) #define _BCM_mldsa87_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_public_from_private) #define _BCM_mldsa87_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_sign) #define _BCM_mldsa87_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_sign_internal) #define _BCM_mldsa87_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_verify) #define _BCM_mldsa87_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mldsa87_verify_internal) #define _BCM_mlkem1024_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_decap) #define _BCM_mlkem1024_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_encap) #define _BCM_mlkem1024_encap_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_encap_external_entropy) #define _BCM_mlkem1024_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_generate_key) #define _BCM_mlkem1024_generate_key_external_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_generate_key_external_seed) #define _BCM_mlkem1024_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_marshal_private_key) #define _BCM_mlkem1024_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_marshal_public_key) #define _BCM_mlkem1024_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_parse_private_key) #define _BCM_mlkem1024_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_parse_public_key) #define _BCM_mlkem1024_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_private_key_from_seed) #define _BCM_mlkem1024_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem1024_public_from_private) #define _BCM_mlkem768_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_decap) #define _BCM_mlkem768_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_encap) #define _BCM_mlkem768_encap_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_encap_external_entropy) #define _BCM_mlkem768_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_generate_key) #define _BCM_mlkem768_generate_key_external_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_generate_key_external_seed) #define _BCM_mlkem768_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_marshal_private_key) #define _BCM_mlkem768_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_marshal_public_key) #define _BCM_mlkem768_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_parse_private_key) #define _BCM_mlkem768_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_parse_public_key) #define _BCM_mlkem768_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_private_key_from_seed) #define _BCM_mlkem768_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_mlkem768_public_from_private) #define _BCM_rand_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes) #define _BCM_rand_bytes_hwrng BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes_hwrng) #define _BCM_rand_bytes_with_additional_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_rand_bytes_with_additional_data) #define _BCM_sha1_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha1_final) #define _BCM_sha1_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha1_init) #define _BCM_sha1_transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha1_transform) #define _BCM_sha1_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha1_update) #define _BCM_sha224_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha224_final) #define _BCM_sha224_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha224_init) #define _BCM_sha224_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha224_update) #define _BCM_sha256_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha256_final) #define _BCM_sha256_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha256_init) #define _BCM_sha256_transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha256_transform) #define _BCM_sha256_transform_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha256_transform_blocks) #define _BCM_sha256_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha256_update) #define _BCM_sha384_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha384_final) #define _BCM_sha384_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha384_init) #define _BCM_sha384_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha384_update) #define _BCM_sha512_256_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_256_final) #define _BCM_sha512_256_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_256_init) #define _BCM_sha512_256_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_256_update) #define _BCM_sha512_final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_final) #define _BCM_sha512_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_init) #define _BCM_sha512_transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_transform) #define _BCM_sha512_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_sha512_update) #define _BCM_slhdsa_sha2_128s_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_generate_key) #define _BCM_slhdsa_sha2_128s_generate_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_generate_key_from_seed) #define _BCM_slhdsa_sha2_128s_prehash_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_prehash_sign) #define _BCM_slhdsa_sha2_128s_prehash_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_prehash_verify) #define _BCM_slhdsa_sha2_128s_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_public_from_private) #define _BCM_slhdsa_sha2_128s_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_sign) #define _BCM_slhdsa_sha2_128s_sign_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_sign_internal) #define _BCM_slhdsa_sha2_128s_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_verify) #define _BCM_slhdsa_sha2_128s_verify_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BCM_slhdsa_sha2_128s_verify_internal) #define _BIO_append_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_append_filename) #define _BIO_callback_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_callback_ctrl) #define _BIO_clear_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_clear_flags) #define _BIO_clear_retry_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_clear_retry_flags) #define _BIO_copy_next_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_copy_next_retry) #define _BIO_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl) #define _BIO_ctrl_get_read_request BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_get_read_request) #define _BIO_ctrl_get_write_guarantee BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_get_write_guarantee) #define _BIO_ctrl_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ctrl_pending) #define _BIO_do_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_do_connect) #define _BIO_eof BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_eof) #define _BIO_f_ssl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_f_ssl) #define _BIO_find_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_find_type) #define _BIO_flush BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_flush) #define _BIO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_free) #define _BIO_free_all BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_free_all) #define _BIO_get_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_data) #define _BIO_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_ex_data) #define _BIO_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_ex_new_index) #define _BIO_get_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_fd) #define _BIO_get_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_fp) #define _BIO_get_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_init) #define _BIO_get_mem_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_mem_data) #define _BIO_get_mem_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_mem_ptr) #define _BIO_get_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_new_index) #define _BIO_get_retry_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_retry_flags) #define _BIO_get_retry_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_retry_reason) #define _BIO_get_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_get_shutdown) #define _BIO_gets BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_gets) #define _BIO_hexdump BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_hexdump) #define _BIO_indent BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_indent) #define _BIO_int_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_int_ctrl) #define _BIO_mem_contents BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_mem_contents) #define _BIO_meth_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_free) #define _BIO_meth_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_new) #define _BIO_meth_set_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_create) #define _BIO_meth_set_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_ctrl) #define _BIO_meth_set_destroy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_destroy) #define _BIO_meth_set_gets BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_gets) #define _BIO_meth_set_puts BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_puts) #define _BIO_meth_set_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_read) #define _BIO_meth_set_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_meth_set_write) #define _BIO_method_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_method_type) #define _BIO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new) #define _BIO_new_bio_pair BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_bio_pair) #define _BIO_new_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_connect) #define _BIO_new_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_fd) #define _BIO_new_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_file) #define _BIO_new_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_fp) #define _BIO_new_mem_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_mem_buf) #define _BIO_new_socket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_new_socket) #define _BIO_next BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_next) #define _BIO_number_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_number_read) #define _BIO_number_written BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_number_written) #define _BIO_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_pending) #define _BIO_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_pop) #define _BIO_printf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_printf) #define _BIO_ptr_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_ptr_ctrl) #define _BIO_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_push) #define _BIO_puts BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_puts) #define _BIO_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_read) #define _BIO_read_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_read_asn1) #define _BIO_read_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_read_filename) #define _BIO_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_reset) #define _BIO_rw_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_rw_filename) #define _BIO_s_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_connect) #define _BIO_s_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_fd) #define _BIO_s_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_file) #define _BIO_s_mem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_mem) #define _BIO_s_socket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_socket) #define _BIO_seek BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_seek) #define _BIO_set_close BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_close) #define _BIO_set_conn_hostname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_hostname) #define _BIO_set_conn_int_port BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_int_port) #define _BIO_set_conn_port BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_conn_port) #define _BIO_set_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_data) #define _BIO_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_ex_data) #define _BIO_set_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_fd) #define _BIO_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_flags) #define _BIO_set_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_fp) #define _BIO_set_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_init) #define _BIO_set_mem_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_mem_buf) #define _BIO_set_mem_eof_return BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_mem_eof_return) #define _BIO_set_nbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_nbio) #define _BIO_set_retry_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_read) #define _BIO_set_retry_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_reason) #define _BIO_set_retry_special BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_special) #define _BIO_set_retry_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_write) #define _BIO_set_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_shutdown) #define _BIO_set_ssl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_ssl) #define _BIO_set_write_buffer_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_write_buffer_size) #define _BIO_should_io_special BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_io_special) #define _BIO_should_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_read) #define _BIO_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_retry) #define _BIO_should_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_write) #define _BIO_shutdown_wr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_shutdown_wr) #define _BIO_snprintf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_snprintf) #define _BIO_tell BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_tell) #define _BIO_test_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_test_flags) #define _BIO_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_up_ref) #define _BIO_vfree BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_vfree) #define _BIO_vsnprintf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_vsnprintf) #define _BIO_wpending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_wpending) #define _BIO_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_write) #define _BIO_write_all BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_write_all) #define _BIO_write_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_write_filename) #define _BLAKE2B256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BLAKE2B256) #define _BLAKE2B256_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BLAKE2B256_Final) #define _BLAKE2B256_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BLAKE2B256_Init) #define _BLAKE2B256_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BLAKE2B256_Update) #define _BN_BLINDING_convert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_BLINDING_convert) #define _BN_BLINDING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_BLINDING_free) #define _BN_BLINDING_invalidate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_BLINDING_invalidate) #define _BN_BLINDING_invert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_BLINDING_invert) #define _BN_BLINDING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_BLINDING_new) #define _BN_CTX_end BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_end) #define _BN_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_free) #define _BN_CTX_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_get) #define _BN_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_new) #define _BN_CTX_start BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_start) #define _BN_GENCB_call BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_call) #define _BN_GENCB_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_free) #define _BN_GENCB_get_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_get_arg) #define _BN_GENCB_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_new) #define _BN_GENCB_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_set) #define _BN_MONT_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_copy) #define _BN_MONT_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_free) #define _BN_MONT_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_new) #define _BN_MONT_CTX_new_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_new_consttime) #define _BN_MONT_CTX_new_for_modulus BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_new_for_modulus) #define _BN_MONT_CTX_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_set) #define _BN_MONT_CTX_set_locked BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_set_locked) #define _BN_abs_is_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_abs_is_word) #define _BN_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_add) #define _BN_add_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_add_word) #define _BN_asc2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_asc2bn) #define _BN_bin2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bin2bn) #define _BN_bn2bin BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2bin) #define _BN_bn2bin_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2bin_padded) #define _BN_bn2binpad BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2binpad) #define _BN_bn2cbb_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2cbb_padded) #define _BN_bn2dec BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2dec) #define _BN_bn2hex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2hex) #define _BN_bn2le_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2le_padded) #define _BN_bn2lebinpad BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2lebinpad) #define _BN_bn2mpi BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_bn2mpi) #define _BN_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_clear) #define _BN_clear_bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_clear_bit) #define _BN_clear_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_clear_free) #define _BN_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_cmp) #define _BN_cmp_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_cmp_word) #define _BN_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_copy) #define _BN_count_low_zero_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_count_low_zero_bits) #define _BN_dec2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_dec2bn) #define _BN_div BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_div) #define _BN_div_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_div_word) #define _BN_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_dup) #define _BN_enhanced_miller_rabin_primality_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_enhanced_miller_rabin_primality_test) #define _BN_equal_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_equal_consttime) #define _BN_exp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_exp) #define _BN_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_free) #define _BN_from_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_from_montgomery) #define _BN_gcd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_gcd) #define _BN_generate_prime_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_generate_prime_ex) #define _BN_get_rfc3526_prime_1536 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_1536) #define _BN_get_rfc3526_prime_2048 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_2048) #define _BN_get_rfc3526_prime_3072 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_3072) #define _BN_get_rfc3526_prime_4096 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_4096) #define _BN_get_rfc3526_prime_6144 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_6144) #define _BN_get_rfc3526_prime_8192 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_rfc3526_prime_8192) #define _BN_get_u64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_u64) #define _BN_get_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_get_word) #define _BN_hex2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_hex2bn) #define _BN_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_init) #define _BN_is_bit_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_bit_set) #define _BN_is_negative BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_negative) #define _BN_is_odd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_odd) #define _BN_is_one BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_one) #define _BN_is_pow2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_pow2) #define _BN_is_prime_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_prime_ex) #define _BN_is_prime_fasttest_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_prime_fasttest_ex) #define _BN_is_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_word) #define _BN_is_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_is_zero) #define _BN_le2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_le2bn) #define _BN_lebin2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_lebin2bn) #define _BN_lshift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_lshift) #define _BN_lshift1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_lshift1) #define _BN_marshal_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_marshal_asn1) #define _BN_mask_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mask_bits) #define _BN_mod_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_add) #define _BN_mod_add_quick BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_add_quick) #define _BN_mod_exp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_exp) #define _BN_mod_exp2_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_exp2_mont) #define _BN_mod_exp_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_exp_mont) #define _BN_mod_exp_mont_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_exp_mont_consttime) #define _BN_mod_exp_mont_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_exp_mont_word) #define _BN_mod_inverse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_inverse) #define _BN_mod_inverse_blinded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_inverse_blinded) #define _BN_mod_inverse_odd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_inverse_odd) #define _BN_mod_lshift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_lshift) #define _BN_mod_lshift1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_lshift1) #define _BN_mod_lshift1_quick BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_lshift1_quick) #define _BN_mod_lshift_quick BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_lshift_quick) #define _BN_mod_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_mul) #define _BN_mod_mul_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_mul_montgomery) #define _BN_mod_pow2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_pow2) #define _BN_mod_sqr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_sqr) #define _BN_mod_sqrt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_sqrt) #define _BN_mod_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_sub) #define _BN_mod_sub_quick BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_sub_quick) #define _BN_mod_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mod_word) #define _BN_mpi2bn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mpi2bn) #define _BN_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mul) #define _BN_mul_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_mul_word) #define _BN_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_new) #define _BN_nnmod BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_nnmod) #define _BN_nnmod_pow2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_nnmod_pow2) #define _BN_num_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_num_bits) #define _BN_num_bits_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_num_bits_word) #define _BN_num_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_num_bytes) #define _BN_one BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_one) #define _BN_parse_asn1_unsigned BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_parse_asn1_unsigned) #define _BN_primality_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_primality_test) #define _BN_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_print) #define _BN_print_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_print_fp) #define _BN_pseudo_rand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_pseudo_rand) #define _BN_pseudo_rand_range BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_pseudo_rand_range) #define _BN_rand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_rand) #define _BN_rand_range BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_rand_range) #define _BN_rand_range_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_rand_range_ex) #define _BN_rshift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_rshift) #define _BN_rshift1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_rshift1) #define _BN_secure_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_secure_new) #define _BN_set_bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_set_bit) #define _BN_set_negative BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_set_negative) #define _BN_set_u64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_set_u64) #define _BN_set_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_set_word) #define _BN_sqr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_sqr) #define _BN_sqrt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_sqrt) #define _BN_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_sub) #define _BN_sub_word BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_sub_word) #define _BN_to_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_to_ASN1_ENUMERATED) #define _BN_to_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_to_ASN1_INTEGER) #define _BN_to_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_to_montgomery) #define _BN_uadd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_uadd) #define _BN_ucmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_ucmp) #define _BN_usub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_usub) #define _BN_value_one BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_value_one) #define _BN_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_zero) #define _BORINGSSL_keccak BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BORINGSSL_keccak) #define _BORINGSSL_keccak_absorb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BORINGSSL_keccak_absorb) #define _BORINGSSL_keccak_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BORINGSSL_keccak_init) #define _BORINGSSL_keccak_squeeze BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BORINGSSL_keccak_squeeze) #define _BORINGSSL_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BORINGSSL_self_test) #define _BUF_MEM_append BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_append) #define _BUF_MEM_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_free) #define _BUF_MEM_grow BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_grow) #define _BUF_MEM_grow_clean BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_grow_clean) #define _BUF_MEM_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_new) #define _BUF_MEM_reserve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_MEM_reserve) #define _BUF_memdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_memdup) #define _BUF_strdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_strdup) #define _BUF_strlcat BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_strlcat) #define _BUF_strlcpy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_strlcpy) #define _BUF_strndup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_strndup) #define _BUF_strnlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BUF_strnlen) #define _CBB_add_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1) #define _CBB_add_asn1_bool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_bool) #define _CBB_add_asn1_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_int64) #define _CBB_add_asn1_int64_with_tag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_int64_with_tag) #define _CBB_add_asn1_octet_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_octet_string) #define _CBB_add_asn1_oid_from_text BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_oid_from_text) #define _CBB_add_asn1_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_uint64) #define _CBB_add_asn1_uint64_with_tag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_asn1_uint64_with_tag) #define _CBB_add_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_bytes) #define _CBB_add_latin1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_latin1) #define _CBB_add_space BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_space) #define _CBB_add_u16 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u16) #define _CBB_add_u16_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u16_length_prefixed) #define _CBB_add_u16le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u16le) #define _CBB_add_u24 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u24) #define _CBB_add_u24_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u24_length_prefixed) #define _CBB_add_u32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u32) #define _CBB_add_u32le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u32le) #define _CBB_add_u64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u64) #define _CBB_add_u64le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u64le) #define _CBB_add_u8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u8) #define _CBB_add_u8_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u8_length_prefixed) #define _CBB_add_ucs2_be BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_ucs2_be) #define _CBB_add_utf32_be BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_utf32_be) #define _CBB_add_utf8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_utf8) #define _CBB_add_zeros BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_zeros) #define _CBB_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_cleanup) #define _CBB_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_data) #define _CBB_did_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_did_write) #define _CBB_discard_child BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_discard_child) #define _CBB_finish BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_finish) #define _CBB_finish_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_finish_i2d) #define _CBB_flush BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_flush) #define _CBB_flush_asn1_set_of BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_flush_asn1_set_of) #define _CBB_get_utf8_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_get_utf8_len) #define _CBB_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_init) #define _CBB_init_fixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_init_fixed) #define _CBB_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_len) #define _CBB_reserve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_reserve) #define _CBB_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_zero) #define _CBS_asn1_ber_to_der BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_asn1_ber_to_der) #define _CBS_asn1_bitstring_has_bit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_asn1_bitstring_has_bit) #define _CBS_asn1_oid_to_text BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_asn1_oid_to_text) #define _CBS_contains_zero_byte BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_contains_zero_byte) #define _CBS_copy_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_copy_bytes) #define _CBS_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_data) #define _CBS_get_any_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_any_asn1) #define _CBS_get_any_asn1_element BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_any_asn1_element) #define _CBS_get_any_ber_asn1_element BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_any_ber_asn1_element) #define _CBS_get_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1) #define _CBS_get_asn1_bool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1_bool) #define _CBS_get_asn1_element BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1_element) #define _CBS_get_asn1_implicit_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1_implicit_string) #define _CBS_get_asn1_int64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1_int64) #define _CBS_get_asn1_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_asn1_uint64) #define _CBS_get_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_bytes) #define _CBS_get_last_u8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_last_u8) #define _CBS_get_latin1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_latin1) #define _CBS_get_optional_asn1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_optional_asn1) #define _CBS_get_optional_asn1_bool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_optional_asn1_bool) #define _CBS_get_optional_asn1_octet_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_optional_asn1_octet_string) #define _CBS_get_optional_asn1_uint64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_optional_asn1_uint64) #define _CBS_get_u16 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u16) #define _CBS_get_u16_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u16_length_prefixed) #define _CBS_get_u16le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u16le) #define _CBS_get_u24 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u24) #define _CBS_get_u24_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u24_length_prefixed) #define _CBS_get_u32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u32) #define _CBS_get_u32le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u32le) #define _CBS_get_u64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u64) #define _CBS_get_u64_decimal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u64_decimal) #define _CBS_get_u64le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u64le) #define _CBS_get_u8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u8) #define _CBS_get_u8_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u8_length_prefixed) #define _CBS_get_ucs2_be BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_ucs2_be) #define _CBS_get_until_first BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_until_first) #define _CBS_get_utf32_be BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_utf32_be) #define _CBS_get_utf8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_utf8) #define _CBS_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_init) #define _CBS_is_unsigned_asn1_integer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_unsigned_asn1_integer) #define _CBS_is_valid_asn1_bitstring BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_valid_asn1_bitstring) #define _CBS_is_valid_asn1_integer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_valid_asn1_integer) #define _CBS_is_valid_asn1_oid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_valid_asn1_oid) #define _CBS_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_len) #define _CBS_mem_equal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_mem_equal) #define _CBS_parse_generalized_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_parse_generalized_time) #define _CBS_parse_utc_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_parse_utc_time) #define _CBS_peek_asn1_tag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_peek_asn1_tag) #define _CBS_skip BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_skip) #define _CBS_stow BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_stow) #define _CBS_strdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_strdup) #define _CERTIFICATEPOLICIES_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_free) #define _CERTIFICATEPOLICIES_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_it) #define _CERTIFICATEPOLICIES_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CERTIFICATEPOLICIES_new) #define _CMAC_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_CTX_copy) #define _CMAC_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_CTX_free) #define _CMAC_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_CTX_new) #define _CMAC_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_Final) #define _CMAC_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_Init) #define _CMAC_Reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_Reset) #define _CMAC_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CMAC_Update) #define _CONF_VALUE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CONF_VALUE_new) #define _CONF_modules_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CONF_modules_free) #define _CONF_modules_load_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CONF_modules_load_file) #define _CONF_parse_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CONF_parse_list) #define _CRL_DIST_POINTS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRL_DIST_POINTS_free) #define _CRL_DIST_POINTS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRL_DIST_POINTS_it) #define _CRL_DIST_POINTS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRL_DIST_POINTS_new) #define _CRYPTO_BUFFER_POOL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_POOL_free) #define _CRYPTO_BUFFER_POOL_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_POOL_new) #define _CRYPTO_BUFFER_alloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_alloc) #define _CRYPTO_BUFFER_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_data) #define _CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_free) #define _CRYPTO_BUFFER_init_CBS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_init_CBS) #define _CRYPTO_BUFFER_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_len) #define _CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_new) #define _CRYPTO_BUFFER_new_from_CBS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_new_from_CBS) #define _CRYPTO_BUFFER_new_from_static_data_unsafe BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_new_from_static_data_unsafe) #define _CRYPTO_BUFFER_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_BUFFER_up_ref) #define _CRYPTO_MUTEX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_cleanup) #define _CRYPTO_MUTEX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_init) #define _CRYPTO_MUTEX_lock_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_lock_read) #define _CRYPTO_MUTEX_lock_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_lock_write) #define _CRYPTO_MUTEX_unlock_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_unlock_read) #define _CRYPTO_MUTEX_unlock_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_MUTEX_unlock_write) #define _CRYPTO_POLYVAL_finish BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_POLYVAL_finish) #define _CRYPTO_POLYVAL_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_POLYVAL_init) #define _CRYPTO_POLYVAL_update_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_POLYVAL_update_blocks) #define _CRYPTO_THREADID_current BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_THREADID_current) #define _CRYPTO_THREADID_set_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_THREADID_set_callback) #define _CRYPTO_THREADID_set_numeric BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_THREADID_set_numeric) #define _CRYPTO_THREADID_set_pointer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_THREADID_set_pointer) #define _CRYPTO_atomic_compare_exchange_weak_u32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_atomic_compare_exchange_weak_u32) #define _CRYPTO_atomic_load_u32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_atomic_load_u32) #define _CRYPTO_atomic_store_u32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_atomic_store_u32) #define _CRYPTO_cbc128_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cbc128_decrypt) #define _CRYPTO_cbc128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cbc128_encrypt) #define _CRYPTO_cfb128_1_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cfb128_1_encrypt) #define _CRYPTO_cfb128_8_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cfb128_8_encrypt) #define _CRYPTO_cfb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cfb128_encrypt) #define _CRYPTO_chacha_20 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_chacha_20) #define _CRYPTO_cleanup_all_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cleanup_all_ex_data) #define _CRYPTO_cpu_avoid_zmm_registers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cpu_avoid_zmm_registers) #define _CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_cpu_perf_is_like_silvermont) #define _CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) #define _CRYPTO_fips_186_2_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_fips_186_2_prf) #define _CRYPTO_fork_detect_force_madv_wipeonfork_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_fork_detect_force_madv_wipeonfork_for_testing) #define _CRYPTO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_free) #define _CRYPTO_free_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_free_ex_data) #define _CRYPTO_gcm128_aad BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_aad) #define _CRYPTO_gcm128_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_decrypt) #define _CRYPTO_gcm128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_encrypt) #define _CRYPTO_gcm128_finish BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_finish) #define _CRYPTO_gcm128_init_aes_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_init_aes_key) #define _CRYPTO_gcm128_init_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_init_ctx) #define _CRYPTO_gcm128_tag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_tag) #define _CRYPTO_get_dynlock_create_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_dynlock_create_callback) #define _CRYPTO_get_dynlock_destroy_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_dynlock_destroy_callback) #define _CRYPTO_get_dynlock_lock_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_dynlock_lock_callback) #define _CRYPTO_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_ex_data) #define _CRYPTO_get_ex_new_index_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_ex_new_index_ex) #define _CRYPTO_get_fork_generation BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_fork_generation) #define _CRYPTO_get_lock_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_lock_name) #define _CRYPTO_get_locking_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_locking_callback) #define _CRYPTO_get_stderr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_stderr) #define _CRYPTO_get_thread_local BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_thread_local) #define _CRYPTO_ghash_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ghash_init) #define _CRYPTO_has_asm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_has_asm) #define _CRYPTO_hchacha20 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_hchacha20) #define _CRYPTO_init_sysrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_init_sysrand) #define _CRYPTO_is_ADX_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ADX_capable) #define _CRYPTO_is_AESNI_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AESNI_capable) #define _CRYPTO_is_ARMv8_AES_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_AES_capable) #define _CRYPTO_is_ARMv8_PMULL_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_PMULL_capable) #define _CRYPTO_is_ARMv8_SHA1_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA1_capable) #define _CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA256_capable) #define _CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_ARMv8_SHA512_capable) #define _CRYPTO_is_AVX2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX2_capable) #define _CRYPTO_is_AVX512BW_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX512BW_capable) #define _CRYPTO_is_AVX512VL_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX512VL_capable) #define _CRYPTO_is_AVX_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_AVX_capable) #define _CRYPTO_is_BMI1_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_BMI1_capable) #define _CRYPTO_is_BMI2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_BMI2_capable) #define _CRYPTO_is_FXSR_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_FXSR_capable) #define _CRYPTO_is_MOVBE_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_MOVBE_capable) #define _CRYPTO_is_NEON_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_NEON_capable) #define _CRYPTO_is_PCLMUL_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_PCLMUL_capable) #define _CRYPTO_is_RDRAND_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_RDRAND_capable) #define _CRYPTO_is_SSE4_1_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_SSE4_1_capable) #define _CRYPTO_is_SSSE3_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_SSSE3_capable) #define _CRYPTO_is_VAES_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_VAES_capable) #define _CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_VPCLMULQDQ_capable) #define _CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define _CRYPTO_is_intel_cpu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_intel_cpu) #define _CRYPTO_is_x86_SHA_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_x86_SHA_capable) #define _CRYPTO_library_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_library_init) #define _CRYPTO_malloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_malloc) #define _CRYPTO_malloc_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_malloc_init) #define _CRYPTO_memcmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_memcmp) #define _CRYPTO_new_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_new_ex_data) #define _CRYPTO_num_locks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_num_locks) #define _CRYPTO_ofb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ofb128_encrypt) #define _CRYPTO_once BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_once) #define _CRYPTO_poly1305_finish BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_poly1305_finish) #define _CRYPTO_poly1305_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_poly1305_init) #define _CRYPTO_poly1305_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_poly1305_update) #define _CRYPTO_pre_sandbox_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_pre_sandbox_init) #define _CRYPTO_rdrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_rdrand) #define _CRYPTO_rdrand_multiple8_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_rdrand_multiple8_buf) #define _CRYPTO_realloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_realloc) #define _CRYPTO_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_refcount_dec_and_test_zero) #define _CRYPTO_refcount_inc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_refcount_inc) #define _CRYPTO_secure_malloc_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_secure_malloc_init) #define _CRYPTO_secure_malloc_initialized BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_secure_malloc_initialized) #define _CRYPTO_secure_used BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_secure_used) #define _CRYPTO_set_add_lock_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_add_lock_callback) #define _CRYPTO_set_dynlock_create_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_dynlock_create_callback) #define _CRYPTO_set_dynlock_destroy_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_dynlock_destroy_callback) #define _CRYPTO_set_dynlock_lock_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_dynlock_lock_callback) #define _CRYPTO_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_ex_data) #define _CRYPTO_set_id_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_id_callback) #define _CRYPTO_set_locking_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_locking_callback) #define _CRYPTO_set_thread_local BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_thread_local) #define _CRYPTO_sysrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand) #define _CRYPTO_sysrand_for_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand_for_seed) #define _CRYPTO_sysrand_if_available BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand_if_available) #define _CRYPTO_tls13_hkdf_expand_label BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_tls13_hkdf_expand_label) #define _CRYPTO_tls1_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_tls1_prf) #define _CRYPTO_xor16 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_xor16) #define _CTR_DRBG_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_clear) #define _CTR_DRBG_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_free) #define _CTR_DRBG_generate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_generate) #define _CTR_DRBG_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_init) #define _CTR_DRBG_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_new) #define _CTR_DRBG_reseed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_reseed) #define _ChaCha20_ctr32_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_avx2) #define _ChaCha20_ctr32_avx2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_avx2_capable) #define _ChaCha20_ctr32_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_neon) #define _ChaCha20_ctr32_neon_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_neon_capable) #define _ChaCha20_ctr32_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_nohw) #define _ChaCha20_ctr32_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3) #define _ChaCha20_ctr32_ssse3_4x BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_4x) #define _ChaCha20_ctr32_ssse3_4x_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_4x_capable) #define _ChaCha20_ctr32_ssse3_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ChaCha20_ctr32_ssse3_capable) #define _DES_decrypt3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_decrypt3) #define _DES_ecb3_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ecb3_encrypt) #define _DES_ecb3_encrypt_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ecb3_encrypt_ex) #define _DES_ecb_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ecb_encrypt) #define _DES_ecb_encrypt_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ecb_encrypt_ex) #define _DES_ede2_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ede2_cbc_encrypt) #define _DES_ede3_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ede3_cbc_encrypt) #define _DES_ede3_cbc_encrypt_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ede3_cbc_encrypt_ex) #define _DES_encrypt3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_encrypt3) #define _DES_ncbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ncbc_encrypt) #define _DES_ncbc_encrypt_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_ncbc_encrypt_ex) #define _DES_set_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_set_key) #define _DES_set_key_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_set_key_ex) #define _DES_set_key_unchecked BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_set_key_unchecked) #define _DES_set_odd_parity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DES_set_odd_parity) #define _DH_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_bits) #define _DH_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_check) #define _DH_check_pub_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_check_pub_key) #define _DH_compute_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_compute_key) #define _DH_compute_key_hashed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_compute_key_hashed) #define _DH_compute_key_padded BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_compute_key_padded) #define _DH_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_free) #define _DH_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_generate_key) #define _DH_generate_parameters_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_generate_parameters_ex) #define _DH_get0_g BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_g) #define _DH_get0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_key) #define _DH_get0_p BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_p) #define _DH_get0_pqg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_pqg) #define _DH_get0_priv_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_priv_key) #define _DH_get0_pub_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_pub_key) #define _DH_get0_q BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get0_q) #define _DH_get_rfc7919_2048 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_get_rfc7919_2048) #define _DH_marshal_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_marshal_parameters) #define _DH_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_new) #define _DH_num_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_num_bits) #define _DH_parse_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_parse_parameters) #define _DH_set0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_set0_key) #define _DH_set0_pqg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_set0_pqg) #define _DH_set_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_set_length) #define _DH_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_size) #define _DH_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DH_up_ref) #define _DHparams_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DHparams_dup) #define _DIRECTORYSTRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_free) #define _DIRECTORYSTRING_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_it) #define _DIRECTORYSTRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIRECTORYSTRING_new) #define _DISPLAYTEXT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DISPLAYTEXT_free) #define _DISPLAYTEXT_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DISPLAYTEXT_it) #define _DISPLAYTEXT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DISPLAYTEXT_new) #define _DIST_POINT_NAME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIST_POINT_NAME_free) #define _DIST_POINT_NAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIST_POINT_NAME_new) #define _DIST_POINT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIST_POINT_free) #define _DIST_POINT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIST_POINT_new) #define _DIST_POINT_set_dpname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DIST_POINT_set_dpname) #define _DSA_SIG_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_free) #define _DSA_SIG_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_get0) #define _DSA_SIG_marshal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_marshal) #define _DSA_SIG_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_new) #define _DSA_SIG_parse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_parse) #define _DSA_SIG_set0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_SIG_set0) #define _DSA_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_bits) #define _DSA_check_signature BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_check_signature) #define _DSA_do_check_signature BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_do_check_signature) #define _DSA_do_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_do_sign) #define _DSA_do_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_do_verify) #define _DSA_dup_DH BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_dup_DH) #define _DSA_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_free) #define _DSA_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_generate_key) #define _DSA_generate_parameters_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_generate_parameters_ex) #define _DSA_get0_g BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_g) #define _DSA_get0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_key) #define _DSA_get0_p BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_p) #define _DSA_get0_pqg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_pqg) #define _DSA_get0_priv_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_priv_key) #define _DSA_get0_pub_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_pub_key) #define _DSA_get0_q BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get0_q) #define _DSA_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get_ex_data) #define _DSA_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_get_ex_new_index) #define _DSA_marshal_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_marshal_parameters) #define _DSA_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_marshal_private_key) #define _DSA_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_marshal_public_key) #define _DSA_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_new) #define _DSA_parse_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_parse_parameters) #define _DSA_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_parse_private_key) #define _DSA_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_parse_public_key) #define _DSA_set0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_set0_key) #define _DSA_set0_pqg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_set0_pqg) #define _DSA_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_set_ex_data) #define _DSA_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_sign) #define _DSA_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_size) #define _DSA_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_up_ref) #define _DSA_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSA_verify) #define _DSAparams_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DSAparams_dup) #define _DTLS_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLS_client_method) #define _DTLS_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLS_method) #define _DTLS_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLS_server_method) #define _DTLS_with_buffers_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLS_with_buffers_method) #define _DTLSv1_2_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_2_client_method) #define _DTLSv1_2_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_2_method) #define _DTLSv1_2_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_2_server_method) #define _DTLSv1_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_client_method) #define _DTLSv1_get_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_get_timeout) #define _DTLSv1_handle_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_handle_timeout) #define _DTLSv1_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_method) #define _DTLSv1_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_server_method) #define _DTLSv1_set_initial_timeout_duration BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, DTLSv1_set_initial_timeout_duration) #define _ECDH_compute_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDH_compute_key) #define _ECDH_compute_key_fips BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDH_compute_key_fips) #define _ECDSA_SIG_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_free) #define _ECDSA_SIG_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_from_bytes) #define _ECDSA_SIG_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_get0) #define _ECDSA_SIG_get0_r BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_get0_r) #define _ECDSA_SIG_get0_s BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_get0_s) #define _ECDSA_SIG_marshal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_marshal) #define _ECDSA_SIG_max_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_max_len) #define _ECDSA_SIG_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_new) #define _ECDSA_SIG_parse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_parse) #define _ECDSA_SIG_set0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_set0) #define _ECDSA_SIG_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_SIG_to_bytes) #define _ECDSA_do_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_do_sign) #define _ECDSA_do_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_do_verify) #define _ECDSA_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_sign) #define _ECDSA_sign_with_nonce_and_leak_private_key_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_sign_with_nonce_and_leak_private_key_for_testing) #define _ECDSA_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_size) #define _ECDSA_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ECDSA_verify) #define _EC_GFp_mont_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GFp_mont_method) #define _EC_GFp_nistp224_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GFp_nistp224_method) #define _EC_GFp_nistp256_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GFp_nistp256_method) #define _EC_GFp_nistz256_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GFp_nistz256_method) #define _EC_GROUP_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_cmp) #define _EC_GROUP_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_dup) #define _EC_GROUP_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_free) #define _EC_GROUP_get0_generator BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get0_generator) #define _EC_GROUP_get0_order BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get0_order) #define _EC_GROUP_get_asn1_flag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_asn1_flag) #define _EC_GROUP_get_cofactor BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_cofactor) #define _EC_GROUP_get_curve_GFp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_curve_GFp) #define _EC_GROUP_get_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_curve_name) #define _EC_GROUP_get_degree BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_degree) #define _EC_GROUP_get_order BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_get_order) #define _EC_GROUP_method_of BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_method_of) #define _EC_GROUP_new_by_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_new_by_curve_name) #define _EC_GROUP_new_curve_GFp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_new_curve_GFp) #define _EC_GROUP_order_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_order_bits) #define _EC_GROUP_set_asn1_flag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_set_asn1_flag) #define _EC_GROUP_set_generator BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_set_generator) #define _EC_GROUP_set_point_conversion_form BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_GROUP_set_point_conversion_form) #define _EC_KEY_check_fips BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_check_fips) #define _EC_KEY_check_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_check_key) #define _EC_KEY_derive_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_derive_from_secret) #define _EC_KEY_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_dup) #define _EC_KEY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_free) #define _EC_KEY_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_generate_key) #define _EC_KEY_generate_key_fips BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_generate_key_fips) #define _EC_KEY_get0_group BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get0_group) #define _EC_KEY_get0_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get0_private_key) #define _EC_KEY_get0_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get0_public_key) #define _EC_KEY_get_conv_form BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get_conv_form) #define _EC_KEY_get_enc_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get_enc_flags) #define _EC_KEY_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get_ex_data) #define _EC_KEY_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_get_ex_new_index) #define _EC_KEY_is_opaque BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_is_opaque) #define _EC_KEY_key2buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_key2buf) #define _EC_KEY_marshal_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_marshal_curve_name) #define _EC_KEY_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_marshal_private_key) #define _EC_KEY_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_new) #define _EC_KEY_new_by_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_new_by_curve_name) #define _EC_KEY_new_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_new_method) #define _EC_KEY_oct2key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_oct2key) #define _EC_KEY_oct2priv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_oct2priv) #define _EC_KEY_parse_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_parse_curve_name) #define _EC_KEY_parse_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_parse_parameters) #define _EC_KEY_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_parse_private_key) #define _EC_KEY_priv2buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_priv2buf) #define _EC_KEY_priv2oct BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_priv2oct) #define _EC_KEY_set_asn1_flag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_asn1_flag) #define _EC_KEY_set_conv_form BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_conv_form) #define _EC_KEY_set_enc_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_enc_flags) #define _EC_KEY_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_ex_data) #define _EC_KEY_set_group BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_group) #define _EC_KEY_set_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_private_key) #define _EC_KEY_set_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_public_key) #define _EC_KEY_set_public_key_affine_coordinates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_set_public_key_affine_coordinates) #define _EC_KEY_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_KEY_up_ref) #define _EC_METHOD_get_field_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_METHOD_get_field_type) #define _EC_POINT_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_add) #define _EC_POINT_clear_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_clear_free) #define _EC_POINT_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_cmp) #define _EC_POINT_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_copy) #define _EC_POINT_dbl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_dbl) #define _EC_POINT_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_dup) #define _EC_POINT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_free) #define _EC_POINT_get_affine_coordinates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_get_affine_coordinates) #define _EC_POINT_get_affine_coordinates_GFp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_get_affine_coordinates_GFp) #define _EC_POINT_invert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_invert) #define _EC_POINT_is_at_infinity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_is_at_infinity) #define _EC_POINT_is_on_curve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_is_on_curve) #define _EC_POINT_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_mul) #define _EC_POINT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_new) #define _EC_POINT_oct2point BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_oct2point) #define _EC_POINT_point2buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_point2buf) #define _EC_POINT_point2cbb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_point2cbb) #define _EC_POINT_point2oct BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_point2oct) #define _EC_POINT_set_affine_coordinates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_set_affine_coordinates) #define _EC_POINT_set_affine_coordinates_GFp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_set_affine_coordinates_GFp) #define _EC_POINT_set_compressed_coordinates_GFp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_set_compressed_coordinates_GFp) #define _EC_POINT_set_to_infinity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_POINT_set_to_infinity) #define _EC_curve_nid2nist BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_curve_nid2nist) #define _EC_curve_nist2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_curve_nist2nid) #define _EC_get_builtin_curves BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_get_builtin_curves) #define _EC_group_p224 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_group_p224) #define _EC_group_p256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_group_p256) #define _EC_group_p384 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_group_p384) #define _EC_group_p521 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_group_p521) #define _EC_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_hash_to_curve_p256_xmd_sha256_sswu) #define _EC_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EC_hash_to_curve_p384_xmd_sha384_sswu) #define _ED25519_keypair BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ED25519_keypair) #define _ED25519_keypair_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ED25519_keypair_from_seed) #define _ED25519_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ED25519_sign) #define _ED25519_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ED25519_verify) #define _EDIPARTYNAME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EDIPARTYNAME_free) #define _EDIPARTYNAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EDIPARTYNAME_new) #define _ENGINE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_free) #define _ENGINE_get_ECDSA_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_get_ECDSA_method) #define _ENGINE_get_RSA_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_get_RSA_method) #define _ENGINE_load_builtin_engines BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_load_builtin_engines) #define _ENGINE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_new) #define _ENGINE_register_all_complete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_register_all_complete) #define _ENGINE_set_ECDSA_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_set_ECDSA_method) #define _ENGINE_set_RSA_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ENGINE_set_RSA_method) #define _ERR_GET_LIB BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_GET_LIB) #define _ERR_GET_REASON BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_GET_REASON) #define _ERR_SAVE_STATE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_SAVE_STATE_free) #define _ERR_add_error_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_add_error_data) #define _ERR_add_error_dataf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_add_error_dataf) #define _ERR_clear_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_clear_error) #define _ERR_clear_system_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_clear_system_error) #define _ERR_error_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_error_string) #define _ERR_error_string_n BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_error_string_n) #define _ERR_free_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_free_strings) #define _ERR_func_error_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_func_error_string) #define _ERR_get_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_get_error) #define _ERR_get_error_line BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_get_error_line) #define _ERR_get_error_line_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_get_error_line_data) #define _ERR_get_next_error_library BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_get_next_error_library) #define _ERR_lib_error_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_lib_error_string) #define _ERR_lib_symbol_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_lib_symbol_name) #define _ERR_load_BIO_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_load_BIO_strings) #define _ERR_load_ERR_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_load_ERR_strings) #define _ERR_load_RAND_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_load_RAND_strings) #define _ERR_load_SSL_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_load_SSL_strings) #define _ERR_load_crypto_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_load_crypto_strings) #define _ERR_peek_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_error) #define _ERR_peek_error_line BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_error_line) #define _ERR_peek_error_line_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_error_line_data) #define _ERR_peek_last_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_last_error) #define _ERR_peek_last_error_line BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_last_error_line) #define _ERR_peek_last_error_line_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_peek_last_error_line_data) #define _ERR_pop_to_mark BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_pop_to_mark) #define _ERR_print_errors BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_print_errors) #define _ERR_print_errors_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_print_errors_cb) #define _ERR_print_errors_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_print_errors_fp) #define _ERR_put_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_put_error) #define _ERR_reason_error_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_reason_error_string) #define _ERR_reason_symbol_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_reason_symbol_name) #define _ERR_remove_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_remove_state) #define _ERR_remove_thread_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_remove_thread_state) #define _ERR_restore_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_restore_state) #define _ERR_save_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_save_state) #define _ERR_set_error_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_set_error_data) #define _ERR_set_mark BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_set_mark) #define _EVP_AEAD_CTX_aead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_aead) #define _EVP_AEAD_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_cleanup) #define _EVP_AEAD_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_free) #define _EVP_AEAD_CTX_get_iv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_get_iv) #define _EVP_AEAD_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_init) #define _EVP_AEAD_CTX_init_with_direction BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_init_with_direction) #define _EVP_AEAD_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_new) #define _EVP_AEAD_CTX_open BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_open) #define _EVP_AEAD_CTX_open_gather BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_open_gather) #define _EVP_AEAD_CTX_seal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_seal) #define _EVP_AEAD_CTX_seal_scatter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_seal_scatter) #define _EVP_AEAD_CTX_tag_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_tag_len) #define _EVP_AEAD_CTX_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_CTX_zero) #define _EVP_AEAD_key_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_key_length) #define _EVP_AEAD_max_overhead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_max_overhead) #define _EVP_AEAD_max_tag_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_max_tag_len) #define _EVP_AEAD_nonce_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_AEAD_nonce_length) #define _EVP_BytesToKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_BytesToKey) #define _EVP_CIPHER_CTX_block_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_block_size) #define _EVP_CIPHER_CTX_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cipher) #define _EVP_CIPHER_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cleanup) #define _EVP_CIPHER_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_copy) #define _EVP_CIPHER_CTX_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_ctrl) #define _EVP_CIPHER_CTX_encrypting BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_encrypting) #define _EVP_CIPHER_CTX_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_flags) #define _EVP_CIPHER_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_free) #define _EVP_CIPHER_CTX_get_app_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_get_app_data) #define _EVP_CIPHER_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_init) #define _EVP_CIPHER_CTX_iv_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_iv_length) #define _EVP_CIPHER_CTX_key_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_key_length) #define _EVP_CIPHER_CTX_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_mode) #define _EVP_CIPHER_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_new) #define _EVP_CIPHER_CTX_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_nid) #define _EVP_CIPHER_CTX_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_reset) #define _EVP_CIPHER_CTX_set_app_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_app_data) #define _EVP_CIPHER_CTX_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_flags) #define _EVP_CIPHER_CTX_set_key_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_key_length) #define _EVP_CIPHER_CTX_set_padding BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_set_padding) #define _EVP_CIPHER_block_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_block_size) #define _EVP_CIPHER_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_flags) #define _EVP_CIPHER_iv_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_iv_length) #define _EVP_CIPHER_key_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_key_length) #define _EVP_CIPHER_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_mode) #define _EVP_CIPHER_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_nid) #define _EVP_Cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_Cipher) #define _EVP_CipherFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherFinal) #define _EVP_CipherFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherFinal_ex) #define _EVP_CipherInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherInit) #define _EVP_CipherInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherInit_ex) #define _EVP_CipherUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherUpdate) #define _EVP_DecodeBase64 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodeBase64) #define _EVP_DecodeBlock BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodeBlock) #define _EVP_DecodeFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodeFinal) #define _EVP_DecodeInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodeInit) #define _EVP_DecodeUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodeUpdate) #define _EVP_DecodedLength BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecodedLength) #define _EVP_DecryptFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptFinal) #define _EVP_DecryptFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptFinal_ex) #define _EVP_DecryptInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptInit) #define _EVP_DecryptInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptInit_ex) #define _EVP_DecryptUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptUpdate) #define _EVP_Digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_Digest) #define _EVP_DigestFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestFinal) #define _EVP_DigestFinalXOF BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestFinalXOF) #define _EVP_DigestFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestFinal_ex) #define _EVP_DigestInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestInit) #define _EVP_DigestInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestInit_ex) #define _EVP_DigestSign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestSign) #define _EVP_DigestSignFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestSignFinal) #define _EVP_DigestSignInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestSignInit) #define _EVP_DigestSignUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestSignUpdate) #define _EVP_DigestUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestUpdate) #define _EVP_DigestVerify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestVerify) #define _EVP_DigestVerifyFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestVerifyFinal) #define _EVP_DigestVerifyInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestVerifyInit) #define _EVP_DigestVerifyUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DigestVerifyUpdate) #define _EVP_ENCODE_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_ENCODE_CTX_free) #define _EVP_ENCODE_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_ENCODE_CTX_new) #define _EVP_EncodeBlock BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncodeBlock) #define _EVP_EncodeFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncodeFinal) #define _EVP_EncodeInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncodeInit) #define _EVP_EncodeUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncodeUpdate) #define _EVP_EncodedLength BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncodedLength) #define _EVP_EncryptFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptFinal) #define _EVP_EncryptFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptFinal_ex) #define _EVP_EncryptInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptInit) #define _EVP_EncryptInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptInit_ex) #define _EVP_EncryptUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptUpdate) #define _EVP_HPKE_AEAD_aead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_AEAD_aead) #define _EVP_HPKE_AEAD_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_AEAD_id) #define _EVP_HPKE_CTX_aead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_aead) #define _EVP_HPKE_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_cleanup) #define _EVP_HPKE_CTX_export BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_export) #define _EVP_HPKE_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_free) #define _EVP_HPKE_CTX_kdf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_kdf) #define _EVP_HPKE_CTX_kem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_kem) #define _EVP_HPKE_CTX_max_overhead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_max_overhead) #define _EVP_HPKE_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_new) #define _EVP_HPKE_CTX_open BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_open) #define _EVP_HPKE_CTX_seal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_seal) #define _EVP_HPKE_CTX_setup_auth_recipient BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_recipient) #define _EVP_HPKE_CTX_setup_auth_sender BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_sender) #define _EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing) #define _EVP_HPKE_CTX_setup_recipient BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_recipient) #define _EVP_HPKE_CTX_setup_sender BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_sender) #define _EVP_HPKE_CTX_setup_sender_with_seed_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_setup_sender_with_seed_for_testing) #define _EVP_HPKE_CTX_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_CTX_zero) #define _EVP_HPKE_KDF_hkdf_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KDF_hkdf_md) #define _EVP_HPKE_KDF_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KDF_id) #define _EVP_HPKE_KEM_enc_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEM_enc_len) #define _EVP_HPKE_KEM_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEM_id) #define _EVP_HPKE_KEM_private_key_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEM_private_key_len) #define _EVP_HPKE_KEM_public_key_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEM_public_key_len) #define _EVP_HPKE_KEY_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_cleanup) #define _EVP_HPKE_KEY_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_copy) #define _EVP_HPKE_KEY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_free) #define _EVP_HPKE_KEY_generate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_generate) #define _EVP_HPKE_KEY_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_init) #define _EVP_HPKE_KEY_kem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_kem) #define _EVP_HPKE_KEY_move BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_move) #define _EVP_HPKE_KEY_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_new) #define _EVP_HPKE_KEY_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_private_key) #define _EVP_HPKE_KEY_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_public_key) #define _EVP_HPKE_KEY_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_HPKE_KEY_zero) #define _EVP_MD_CTX_block_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_block_size) #define _EVP_MD_CTX_cleanse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_cleanse) #define _EVP_MD_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_cleanup) #define _EVP_MD_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_copy) #define _EVP_MD_CTX_copy_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_copy_ex) #define _EVP_MD_CTX_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_create) #define _EVP_MD_CTX_destroy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_destroy) #define _EVP_MD_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_free) #define _EVP_MD_CTX_get0_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_get0_md) #define _EVP_MD_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_init) #define _EVP_MD_CTX_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_md) #define _EVP_MD_CTX_move BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_move) #define _EVP_MD_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_new) #define _EVP_MD_CTX_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_reset) #define _EVP_MD_CTX_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_set_flags) #define _EVP_MD_CTX_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_size) #define _EVP_MD_CTX_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_CTX_type) #define _EVP_MD_block_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_block_size) #define _EVP_MD_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_flags) #define _EVP_MD_meth_get_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_meth_get_flags) #define _EVP_MD_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_nid) #define _EVP_MD_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_size) #define _EVP_MD_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_MD_type) #define _EVP_PBE_scrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PBE_scrypt) #define _EVP_PKCS82PKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKCS82PKEY) #define _EVP_PKEY2PKCS8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY2PKCS8) #define _EVP_PKEY_CTX_add1_hkdf_info BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_add1_hkdf_info) #define _EVP_PKEY_CTX_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_ctrl) #define _EVP_PKEY_CTX_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_dup) #define _EVP_PKEY_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_free) #define _EVP_PKEY_CTX_get0_pkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get0_pkey) #define _EVP_PKEY_CTX_get0_rsa_oaep_label BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get0_rsa_oaep_label) #define _EVP_PKEY_CTX_get_rsa_mgf1_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_mgf1_md) #define _EVP_PKEY_CTX_get_rsa_oaep_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_oaep_md) #define _EVP_PKEY_CTX_get_rsa_padding BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_padding) #define _EVP_PKEY_CTX_get_rsa_pss_saltlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_rsa_pss_saltlen) #define _EVP_PKEY_CTX_get_signature_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_get_signature_md) #define _EVP_PKEY_CTX_hkdf_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_hkdf_mode) #define _EVP_PKEY_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_new) #define _EVP_PKEY_CTX_new_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_new_id) #define _EVP_PKEY_CTX_set0_rsa_oaep_label BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set0_rsa_oaep_label) #define _EVP_PKEY_CTX_set1_hkdf_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set1_hkdf_key) #define _EVP_PKEY_CTX_set1_hkdf_salt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set1_hkdf_salt) #define _EVP_PKEY_CTX_set_dh_pad BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dh_pad) #define _EVP_PKEY_CTX_set_dsa_paramgen_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dsa_paramgen_bits) #define _EVP_PKEY_CTX_set_dsa_paramgen_q_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_dsa_paramgen_q_bits) #define _EVP_PKEY_CTX_set_ec_param_enc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_ec_param_enc) #define _EVP_PKEY_CTX_set_ec_paramgen_curve_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_ec_paramgen_curve_nid) #define _EVP_PKEY_CTX_set_hkdf_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_hkdf_md) #define _EVP_PKEY_CTX_set_rsa_keygen_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_keygen_bits) #define _EVP_PKEY_CTX_set_rsa_keygen_pubexp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_keygen_pubexp) #define _EVP_PKEY_CTX_set_rsa_mgf1_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_mgf1_md) #define _EVP_PKEY_CTX_set_rsa_oaep_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_oaep_md) #define _EVP_PKEY_CTX_set_rsa_padding BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_padding) #define _EVP_PKEY_CTX_set_rsa_pss_keygen_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_md) #define _EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md) #define _EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen) #define _EVP_PKEY_CTX_set_rsa_pss_saltlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_rsa_pss_saltlen) #define _EVP_PKEY_CTX_set_signature_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_CTX_set_signature_md) #define _EVP_PKEY_assign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_assign) #define _EVP_PKEY_assign_DH BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_assign_DH) #define _EVP_PKEY_assign_DSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_assign_DSA) #define _EVP_PKEY_assign_EC_KEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_assign_EC_KEY) #define _EVP_PKEY_assign_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_assign_RSA) #define _EVP_PKEY_base_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_base_id) #define _EVP_PKEY_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_bits) #define _EVP_PKEY_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_cmp) #define _EVP_PKEY_cmp_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_cmp_parameters) #define _EVP_PKEY_copy_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_copy_parameters) #define _EVP_PKEY_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_decrypt) #define _EVP_PKEY_decrypt_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_decrypt_init) #define _EVP_PKEY_derive BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_derive) #define _EVP_PKEY_derive_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_derive_init) #define _EVP_PKEY_derive_set_peer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_derive_set_peer) #define _EVP_PKEY_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_encrypt) #define _EVP_PKEY_encrypt_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_encrypt_init) #define _EVP_PKEY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_free) #define _EVP_PKEY_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get0) #define _EVP_PKEY_get0_DH BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get0_DH) #define _EVP_PKEY_get0_DSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get0_DSA) #define _EVP_PKEY_get0_EC_KEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get0_EC_KEY) #define _EVP_PKEY_get0_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get0_RSA) #define _EVP_PKEY_get1_DH BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get1_DH) #define _EVP_PKEY_get1_DSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get1_DSA) #define _EVP_PKEY_get1_EC_KEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get1_EC_KEY) #define _EVP_PKEY_get1_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get1_RSA) #define _EVP_PKEY_get1_tls_encodedpoint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get1_tls_encodedpoint) #define _EVP_PKEY_get_raw_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get_raw_private_key) #define _EVP_PKEY_get_raw_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_get_raw_public_key) #define _EVP_PKEY_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_id) #define _EVP_PKEY_is_opaque BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_is_opaque) #define _EVP_PKEY_keygen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_keygen) #define _EVP_PKEY_keygen_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_keygen_init) #define _EVP_PKEY_missing_parameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_missing_parameters) #define _EVP_PKEY_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_new) #define _EVP_PKEY_new_raw_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_new_raw_private_key) #define _EVP_PKEY_new_raw_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_new_raw_public_key) #define _EVP_PKEY_paramgen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_paramgen) #define _EVP_PKEY_paramgen_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_paramgen_init) #define _EVP_PKEY_print_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_print_params) #define _EVP_PKEY_print_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_print_private) #define _EVP_PKEY_print_public BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_print_public) #define _EVP_PKEY_set1_DH BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set1_DH) #define _EVP_PKEY_set1_DSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set1_DSA) #define _EVP_PKEY_set1_EC_KEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set1_EC_KEY) #define _EVP_PKEY_set1_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set1_RSA) #define _EVP_PKEY_set1_tls_encodedpoint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set1_tls_encodedpoint) #define _EVP_PKEY_set_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_set_type) #define _EVP_PKEY_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_sign) #define _EVP_PKEY_sign_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_sign_init) #define _EVP_PKEY_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_size) #define _EVP_PKEY_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_type) #define _EVP_PKEY_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_up_ref) #define _EVP_PKEY_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_verify) #define _EVP_PKEY_verify_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_verify_init) #define _EVP_PKEY_verify_recover BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_verify_recover) #define _EVP_PKEY_verify_recover_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_PKEY_verify_recover_init) #define _EVP_SignFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_SignFinal) #define _EVP_SignInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_SignInit) #define _EVP_SignInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_SignInit_ex) #define _EVP_SignUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_SignUpdate) #define _EVP_VerifyFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_VerifyFinal) #define _EVP_VerifyInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_VerifyInit) #define _EVP_VerifyInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_VerifyInit_ex) #define _EVP_VerifyUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_VerifyUpdate) #define _EVP_add_cipher_alias BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_add_cipher_alias) #define _EVP_add_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_add_digest) #define _EVP_aead_aes_128_cbc_sha1_tls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha1_tls) #define _EVP_aead_aes_128_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha1_tls_implicit_iv) #define _EVP_aead_aes_128_cbc_sha256_tls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_cbc_sha256_tls) #define _EVP_aead_aes_128_ccm_bluetooth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_bluetooth) #define _EVP_aead_aes_128_ccm_bluetooth_8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_bluetooth_8) #define _EVP_aead_aes_128_ccm_matter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_ccm_matter) #define _EVP_aead_aes_128_ctr_hmac_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_ctr_hmac_sha256) #define _EVP_aead_aes_128_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm) #define _EVP_aead_aes_128_gcm_randnonce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_randnonce) #define _EVP_aead_aes_128_gcm_siv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_siv) #define _EVP_aead_aes_128_gcm_tls12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls12) #define _EVP_aead_aes_128_gcm_tls13 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls13) #define _EVP_aead_aes_192_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_192_gcm) #define _EVP_aead_aes_256_cbc_sha1_tls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_cbc_sha1_tls) #define _EVP_aead_aes_256_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_cbc_sha1_tls_implicit_iv) #define _EVP_aead_aes_256_ctr_hmac_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_ctr_hmac_sha256) #define _EVP_aead_aes_256_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm) #define _EVP_aead_aes_256_gcm_randnonce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_randnonce) #define _EVP_aead_aes_256_gcm_siv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_siv) #define _EVP_aead_aes_256_gcm_tls12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls12) #define _EVP_aead_aes_256_gcm_tls13 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls13) #define _EVP_aead_chacha20_poly1305 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_chacha20_poly1305) #define _EVP_aead_des_ede3_cbc_sha1_tls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_des_ede3_cbc_sha1_tls) #define _EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv) #define _EVP_aead_xchacha20_poly1305 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_xchacha20_poly1305) #define _EVP_aes_128_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_cbc) #define _EVP_aes_128_ctr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_ctr) #define _EVP_aes_128_ecb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_ecb) #define _EVP_aes_128_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_gcm) #define _EVP_aes_128_ofb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_ofb) #define _EVP_aes_192_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_192_cbc) #define _EVP_aes_192_ctr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_192_ctr) #define _EVP_aes_192_ecb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_192_ecb) #define _EVP_aes_192_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_192_gcm) #define _EVP_aes_192_ofb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_192_ofb) #define _EVP_aes_256_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_256_cbc) #define _EVP_aes_256_ctr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_256_ctr) #define _EVP_aes_256_ecb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_256_ecb) #define _EVP_aes_256_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_256_gcm) #define _EVP_aes_256_ofb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_256_ofb) #define _EVP_blake2b256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_blake2b256) #define _EVP_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_cleanup) #define _EVP_des_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_cbc) #define _EVP_des_ecb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ecb) #define _EVP_des_ede BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ede) #define _EVP_des_ede3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ede3) #define _EVP_des_ede3_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ede3_cbc) #define _EVP_des_ede3_ecb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ede3_ecb) #define _EVP_des_ede_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_des_ede_cbc) #define _EVP_enc_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_enc_null) #define _EVP_get_cipherbyname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_get_cipherbyname) #define _EVP_get_cipherbynid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_get_cipherbynid) #define _EVP_get_digestbyname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_get_digestbyname) #define _EVP_get_digestbynid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_get_digestbynid) #define _EVP_get_digestbyobj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_get_digestbyobj) #define _EVP_has_aes_hardware BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_has_aes_hardware) #define _EVP_hpke_aes_128_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_aes_128_gcm) #define _EVP_hpke_aes_256_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_aes_256_gcm) #define _EVP_hpke_chacha20_poly1305 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_chacha20_poly1305) #define _EVP_hpke_hkdf_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_hkdf_sha256) #define _EVP_hpke_p256_hkdf_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_p256_hkdf_sha256) #define _EVP_hpke_x25519_hkdf_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_hpke_x25519_hkdf_sha256) #define _EVP_marshal_digest_algorithm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_marshal_digest_algorithm) #define _EVP_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_marshal_private_key) #define _EVP_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_marshal_public_key) #define _EVP_md4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_md4) #define _EVP_md5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_md5) #define _EVP_md5_sha1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_md5_sha1) #define _EVP_parse_digest_algorithm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_parse_digest_algorithm) #define _EVP_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_parse_private_key) #define _EVP_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_parse_public_key) #define _EVP_rc2_40_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_rc2_40_cbc) #define _EVP_rc2_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_rc2_cbc) #define _EVP_rc4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_rc4) #define _EVP_sha1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha1) #define _EVP_sha1_final_with_secret_suffix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha1_final_with_secret_suffix) #define _EVP_sha224 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha224) #define _EVP_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha256) #define _EVP_sha256_final_with_secret_suffix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha256_final_with_secret_suffix) #define _EVP_sha384 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha384) #define _EVP_sha512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha512) #define _EVP_sha512_256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_sha512_256) #define _EVP_tls_cbc_copy_mac BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_tls_cbc_copy_mac) #define _EVP_tls_cbc_digest_record BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_tls_cbc_digest_record) #define _EVP_tls_cbc_record_digest_supported BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_tls_cbc_record_digest_supported) #define _EVP_tls_cbc_remove_padding BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_tls_cbc_remove_padding) #define _EXTENDED_KEY_USAGE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_free) #define _EXTENDED_KEY_USAGE_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_it) #define _EXTENDED_KEY_USAGE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EXTENDED_KEY_USAGE_new) #define _FIPS_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_mode) #define _FIPS_mode_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_mode_set) #define _FIPS_module_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_module_name) #define _FIPS_query_algorithm_status BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_query_algorithm_status) #define _FIPS_read_counter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_read_counter) #define _FIPS_service_indicator_after_call BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_service_indicator_after_call) #define _FIPS_service_indicator_before_call BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_service_indicator_before_call) #define _FIPS_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, FIPS_version) #define _GENERAL_NAMES_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAMES_free) #define _GENERAL_NAMES_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAMES_it) #define _GENERAL_NAMES_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAMES_new) #define _GENERAL_NAME_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_cmp) #define _GENERAL_NAME_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_dup) #define _GENERAL_NAME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_free) #define _GENERAL_NAME_get0_otherName BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_get0_otherName) #define _GENERAL_NAME_get0_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_get0_value) #define _GENERAL_NAME_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_it) #define _GENERAL_NAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_new) #define _GENERAL_NAME_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_print) #define _GENERAL_NAME_set0_othername BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_set0_othername) #define _GENERAL_NAME_set0_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_NAME_set0_value) #define _GENERAL_SUBTREE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_SUBTREE_free) #define _GENERAL_SUBTREE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, GENERAL_SUBTREE_new) #define _HKDF BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HKDF) #define _HKDF_expand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HKDF_expand) #define _HKDF_extract BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HKDF_extract) #define _HMAC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC) #define _HMAC_CTX_cleanse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_cleanse) #define _HMAC_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_cleanup) #define _HMAC_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_copy) #define _HMAC_CTX_copy_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_copy_ex) #define _HMAC_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_free) #define _HMAC_CTX_get_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_get_md) #define _HMAC_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_init) #define _HMAC_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_new) #define _HMAC_CTX_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_CTX_reset) #define _HMAC_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_Final) #define _HMAC_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_Init) #define _HMAC_Init_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_Init_ex) #define _HMAC_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_Update) #define _HMAC_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HMAC_size) #define _HRSS_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_decap) #define _HRSS_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_encap) #define _HRSS_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_generate_key) #define _HRSS_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_marshal_public_key) #define _HRSS_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_parse_public_key) #define _HRSS_poly3_invert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_poly3_invert) #define _HRSS_poly3_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, HRSS_poly3_mul) #define _ISSUING_DIST_POINT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ISSUING_DIST_POINT_free) #define _ISSUING_DIST_POINT_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ISSUING_DIST_POINT_it) #define _ISSUING_DIST_POINT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ISSUING_DIST_POINT_new) #define _KYBER_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_decap) #define _KYBER_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_encap) #define _KYBER_encap_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_encap_external_entropy) #define _KYBER_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_generate_key) #define _KYBER_generate_key_external_entropy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_generate_key_external_entropy) #define _KYBER_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_marshal_private_key) #define _KYBER_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_marshal_public_key) #define _KYBER_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_parse_private_key) #define _KYBER_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_parse_public_key) #define _KYBER_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, KYBER_public_from_private) #define _MD4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD4) #define _MD4_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD4_Final) #define _MD4_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD4_Init) #define _MD4_Transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD4_Transform) #define _MD4_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD4_Update) #define _MD5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD5) #define _MD5_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD5_Final) #define _MD5_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD5_Init) #define _MD5_Transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD5_Transform) #define _MD5_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MD5_Update) #define _METHOD_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, METHOD_ref) #define _METHOD_unref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, METHOD_unref) #define _MLDSA65_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_generate_key) #define _MLDSA65_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_marshal_public_key) #define _MLDSA65_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_parse_public_key) #define _MLDSA65_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_private_key_from_seed) #define _MLDSA65_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_public_from_private) #define _MLDSA65_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_sign) #define _MLDSA65_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLDSA65_verify) #define _MLKEM1024_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_decap) #define _MLKEM1024_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_encap) #define _MLKEM1024_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_generate_key) #define _MLKEM1024_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_marshal_public_key) #define _MLKEM1024_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_parse_public_key) #define _MLKEM1024_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_private_key_from_seed) #define _MLKEM1024_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM1024_public_from_private) #define _MLKEM768_decap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_decap) #define _MLKEM768_encap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_encap) #define _MLKEM768_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_generate_key) #define _MLKEM768_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_marshal_public_key) #define _MLKEM768_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_parse_public_key) #define _MLKEM768_private_key_from_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_private_key_from_seed) #define _MLKEM768_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, MLKEM768_public_from_private) #define _NAME_CONSTRAINTS_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NAME_CONSTRAINTS_check) #define _NAME_CONSTRAINTS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NAME_CONSTRAINTS_free) #define _NAME_CONSTRAINTS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NAME_CONSTRAINTS_it) #define _NAME_CONSTRAINTS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NAME_CONSTRAINTS_new) #define _NCONF_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_free) #define _NCONF_get_section BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_get_section) #define _NCONF_get_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_get_string) #define _NCONF_load BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_load) #define _NCONF_load_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_load_bio) #define _NCONF_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NCONF_new) #define _NETSCAPE_SPKAC_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKAC_free) #define _NETSCAPE_SPKAC_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKAC_it) #define _NETSCAPE_SPKAC_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKAC_new) #define _NETSCAPE_SPKI_b64_decode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_b64_decode) #define _NETSCAPE_SPKI_b64_encode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_b64_encode) #define _NETSCAPE_SPKI_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_free) #define _NETSCAPE_SPKI_get_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_get_pubkey) #define _NETSCAPE_SPKI_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_it) #define _NETSCAPE_SPKI_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_new) #define _NETSCAPE_SPKI_set_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_set_pubkey) #define _NETSCAPE_SPKI_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_sign) #define _NETSCAPE_SPKI_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NETSCAPE_SPKI_verify) #define _NOTICEREF_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_free) #define _NOTICEREF_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_it) #define _NOTICEREF_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, NOTICEREF_new) #define _OBJ_cbs2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cbs2nid) #define _OBJ_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cleanup) #define _OBJ_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_cmp) #define _OBJ_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_create) #define _OBJ_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_dup) #define _OBJ_find_sigid_algs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_find_sigid_algs) #define _OBJ_find_sigid_by_algs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_find_sigid_by_algs) #define _OBJ_get0_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_get0_data) #define _OBJ_get_undef BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_get_undef) #define _OBJ_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_length) #define _OBJ_ln2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_ln2nid) #define _OBJ_nid2cbb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_nid2cbb) #define _OBJ_nid2ln BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_nid2ln) #define _OBJ_nid2obj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_nid2obj) #define _OBJ_nid2sn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_nid2sn) #define _OBJ_obj2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_obj2nid) #define _OBJ_obj2txt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_obj2txt) #define _OBJ_sn2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_sn2nid) #define _OBJ_txt2nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_txt2nid) #define _OBJ_txt2obj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OBJ_txt2obj) #define _OPENSSL_add_all_algorithms_conf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_add_all_algorithms_conf) #define _OPENSSL_armcap_P BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_armcap_P) #define _OPENSSL_asprintf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_asprintf) #define _OPENSSL_calloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_calloc) #define _OPENSSL_cleanse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_cleanse) #define _OPENSSL_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_cleanup) #define _OPENSSL_clear_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_clear_free) #define _OPENSSL_config BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_config) #define _OPENSSL_cpuid_setup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_cpuid_setup) #define _OPENSSL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_free) #define _OPENSSL_fromxdigit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_fromxdigit) #define _OPENSSL_get_armcap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_get_armcap) #define _OPENSSL_get_armcap_pointer_for_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_get_armcap_pointer_for_test) #define _OPENSSL_get_ia32cap BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_get_ia32cap) #define _OPENSSL_gmtime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_gmtime) #define _OPENSSL_gmtime_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_gmtime_adj) #define _OPENSSL_gmtime_diff BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_gmtime_diff) #define _OPENSSL_hash32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_hash32) #define _OPENSSL_ia32cap_P BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_ia32cap_P) #define _OPENSSL_init_cpuid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_init_cpuid) #define _OPENSSL_init_crypto BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_init_crypto) #define _OPENSSL_init_ssl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_init_ssl) #define _OPENSSL_isalnum BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_isalnum) #define _OPENSSL_isalpha BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_isalpha) #define _OPENSSL_isdigit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_isdigit) #define _OPENSSL_isspace BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_isspace) #define _OPENSSL_isxdigit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_isxdigit) #define _OPENSSL_lh_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_delete) #define _OPENSSL_lh_doall_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_doall_arg) #define _OPENSSL_lh_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_free) #define _OPENSSL_lh_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_insert) #define _OPENSSL_lh_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_new) #define _OPENSSL_lh_num_items BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_num_items) #define _OPENSSL_lh_retrieve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_retrieve) #define _OPENSSL_lh_retrieve_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_lh_retrieve_key) #define _OPENSSL_load_builtin_modules BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_load_builtin_modules) #define _OPENSSL_malloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_malloc) #define _OPENSSL_malloc_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_malloc_init) #define _OPENSSL_memdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_memdup) #define _OPENSSL_no_config BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_no_config) #define _OPENSSL_posix_to_tm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_posix_to_tm) #define _OPENSSL_realloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_realloc) #define _OPENSSL_secure_clear_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_secure_clear_free) #define _OPENSSL_secure_malloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_secure_malloc) #define _OPENSSL_sk_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_deep_copy) #define _OPENSSL_sk_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_delete) #define _OPENSSL_sk_delete_if BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_delete_if) #define _OPENSSL_sk_delete_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_delete_ptr) #define _OPENSSL_sk_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_dup) #define _OPENSSL_sk_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_find) #define _OPENSSL_sk_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_free) #define _OPENSSL_sk_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_insert) #define _OPENSSL_sk_is_sorted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_is_sorted) #define _OPENSSL_sk_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_new) #define _OPENSSL_sk_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_new_null) #define _OPENSSL_sk_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_num) #define _OPENSSL_sk_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_pop) #define _OPENSSL_sk_pop_free_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_pop_free_ex) #define _OPENSSL_sk_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_push) #define _OPENSSL_sk_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_set) #define _OPENSSL_sk_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_set_cmp_func) #define _OPENSSL_sk_shift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_shift) #define _OPENSSL_sk_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_sort) #define _OPENSSL_sk_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_value) #define _OPENSSL_sk_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_sk_zero) #define _OPENSSL_strcasecmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strcasecmp) #define _OPENSSL_strdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strdup) #define _OPENSSL_strhash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strhash) #define _OPENSSL_strlcat BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strlcat) #define _OPENSSL_strlcpy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strlcpy) #define _OPENSSL_strncasecmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strncasecmp) #define _OPENSSL_strndup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strndup) #define _OPENSSL_strnlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strnlen) #define _OPENSSL_timegm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_timegm) #define _OPENSSL_tm_to_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_tm_to_posix) #define _OPENSSL_tolower BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_tolower) #define _OPENSSL_vasprintf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_vasprintf) #define _OPENSSL_vasprintf_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_vasprintf_internal) #define _OPENSSL_zalloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_zalloc) #define _OTHERNAME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OTHERNAME_free) #define _OTHERNAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OTHERNAME_new) #define _OpenSSL_add_all_algorithms BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_add_all_algorithms) #define _OpenSSL_add_all_ciphers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_add_all_ciphers) #define _OpenSSL_add_all_digests BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_add_all_digests) #define _OpenSSL_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_version) #define _OpenSSL_version_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_version_num) #define _PEM_ASN1_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_ASN1_read) #define _PEM_ASN1_read_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_ASN1_read_bio) #define _PEM_ASN1_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_ASN1_write) #define _PEM_ASN1_write_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_ASN1_write_bio) #define _PEM_X509_INFO_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_X509_INFO_read) #define _PEM_X509_INFO_read_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_X509_INFO_read_bio) #define _PEM_bytes_read_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_bytes_read_bio) #define _PEM_def_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_def_callback) #define _PEM_do_header BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_do_header) #define _PEM_get_EVP_CIPHER_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_get_EVP_CIPHER_INFO) #define _PEM_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read) #define _PEM_read_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_DHparams) #define _PEM_read_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_DSAPrivateKey) #define _PEM_read_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_DSA_PUBKEY) #define _PEM_read_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_DSAparams) #define _PEM_read_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_ECPrivateKey) #define _PEM_read_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_EC_PUBKEY) #define _PEM_read_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_PKCS7) #define _PEM_read_PKCS8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_PKCS8) #define _PEM_read_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_PKCS8_PRIV_KEY_INFO) #define _PEM_read_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_PUBKEY) #define _PEM_read_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_PrivateKey) #define _PEM_read_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_RSAPrivateKey) #define _PEM_read_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_RSAPublicKey) #define _PEM_read_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_RSA_PUBKEY) #define _PEM_read_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_SSL_SESSION) #define _PEM_read_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_X509) #define _PEM_read_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_X509_AUX) #define _PEM_read_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_X509_CRL) #define _PEM_read_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_X509_REQ) #define _PEM_read_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio) #define _PEM_read_bio_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_DHparams) #define _PEM_read_bio_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_DSAPrivateKey) #define _PEM_read_bio_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_DSA_PUBKEY) #define _PEM_read_bio_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_DSAparams) #define _PEM_read_bio_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_ECPrivateKey) #define _PEM_read_bio_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_EC_PUBKEY) #define _PEM_read_bio_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_PKCS7) #define _PEM_read_bio_PKCS8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_PKCS8) #define _PEM_read_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_PKCS8_PRIV_KEY_INFO) #define _PEM_read_bio_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_PUBKEY) #define _PEM_read_bio_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_PrivateKey) #define _PEM_read_bio_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_RSAPrivateKey) #define _PEM_read_bio_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_RSAPublicKey) #define _PEM_read_bio_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_RSA_PUBKEY) #define _PEM_read_bio_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_SSL_SESSION) #define _PEM_read_bio_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_X509) #define _PEM_read_bio_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_X509_AUX) #define _PEM_read_bio_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_X509_CRL) #define _PEM_read_bio_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_read_bio_X509_REQ) #define _PEM_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write) #define _PEM_write_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_DHparams) #define _PEM_write_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_DSAPrivateKey) #define _PEM_write_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_DSA_PUBKEY) #define _PEM_write_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_DSAparams) #define _PEM_write_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_ECPrivateKey) #define _PEM_write_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_EC_PUBKEY) #define _PEM_write_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PKCS7) #define _PEM_write_PKCS8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PKCS8) #define _PEM_write_PKCS8PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PKCS8PrivateKey) #define _PEM_write_PKCS8PrivateKey_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PKCS8PrivateKey_nid) #define _PEM_write_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PKCS8_PRIV_KEY_INFO) #define _PEM_write_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PUBKEY) #define _PEM_write_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_PrivateKey) #define _PEM_write_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_RSAPrivateKey) #define _PEM_write_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_RSAPublicKey) #define _PEM_write_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_RSA_PUBKEY) #define _PEM_write_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_SSL_SESSION) #define _PEM_write_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_X509) #define _PEM_write_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_X509_AUX) #define _PEM_write_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_X509_CRL) #define _PEM_write_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_X509_REQ) #define _PEM_write_X509_REQ_NEW BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_X509_REQ_NEW) #define _PEM_write_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio) #define _PEM_write_bio_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_DHparams) #define _PEM_write_bio_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_DSAPrivateKey) #define _PEM_write_bio_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_DSA_PUBKEY) #define _PEM_write_bio_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_DSAparams) #define _PEM_write_bio_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_ECPrivateKey) #define _PEM_write_bio_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_EC_PUBKEY) #define _PEM_write_bio_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PKCS7) #define _PEM_write_bio_PKCS8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PKCS8) #define _PEM_write_bio_PKCS8PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PKCS8PrivateKey) #define _PEM_write_bio_PKCS8PrivateKey_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PKCS8PrivateKey_nid) #define _PEM_write_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PKCS8_PRIV_KEY_INFO) #define _PEM_write_bio_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PUBKEY) #define _PEM_write_bio_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_PrivateKey) #define _PEM_write_bio_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_RSAPrivateKey) #define _PEM_write_bio_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_RSAPublicKey) #define _PEM_write_bio_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_RSA_PUBKEY) #define _PEM_write_bio_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_SSL_SESSION) #define _PEM_write_bio_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_X509) #define _PEM_write_bio_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_X509_AUX) #define _PEM_write_bio_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_X509_CRL) #define _PEM_write_bio_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_X509_REQ) #define _PEM_write_bio_X509_REQ_NEW BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PEM_write_bio_X509_REQ_NEW) #define _PKCS12_PBE_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_PBE_add) #define _PKCS12_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_create) #define _PKCS12_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_free) #define _PKCS12_get_key_and_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_get_key_and_certs) #define _PKCS12_parse BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_parse) #define _PKCS12_verify_mac BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS12_verify_mac) #define _PKCS1_MGF1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS1_MGF1) #define _PKCS5_PBKDF2_HMAC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS5_PBKDF2_HMAC) #define _PKCS5_PBKDF2_HMAC_SHA1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS5_PBKDF2_HMAC_SHA1) #define _PKCS5_pbe2_decrypt_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS5_pbe2_decrypt_init) #define _PKCS5_pbe2_encrypt_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS5_pbe2_encrypt_init) #define _PKCS7_bundle_CRLs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_bundle_CRLs) #define _PKCS7_bundle_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_bundle_certificates) #define _PKCS7_bundle_raw_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_bundle_raw_certificates) #define _PKCS7_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_free) #define _PKCS7_get_CRLs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_get_CRLs) #define _PKCS7_get_PEM_CRLs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_get_PEM_CRLs) #define _PKCS7_get_PEM_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_get_PEM_certificates) #define _PKCS7_get_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_get_certificates) #define _PKCS7_get_raw_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_get_raw_certificates) #define _PKCS7_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_sign) #define _PKCS7_type_is_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_data) #define _PKCS7_type_is_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_digest) #define _PKCS7_type_is_encrypted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_encrypted) #define _PKCS7_type_is_enveloped BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_enveloped) #define _PKCS7_type_is_signed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_signed) #define _PKCS7_type_is_signedAndEnveloped BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS7_type_is_signedAndEnveloped) #define _PKCS8_PRIV_KEY_INFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_PRIV_KEY_INFO_free) #define _PKCS8_PRIV_KEY_INFO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_PRIV_KEY_INFO_new) #define _PKCS8_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_decrypt) #define _PKCS8_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_encrypt) #define _PKCS8_marshal_encrypted_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_marshal_encrypted_private_key) #define _PKCS8_parse_encrypted_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, PKCS8_parse_encrypted_private_key) #define _POLICYINFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYINFO_free) #define _POLICYINFO_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYINFO_it) #define _POLICYINFO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYINFO_new) #define _POLICYQUALINFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYQUALINFO_free) #define _POLICYQUALINFO_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYQUALINFO_it) #define _POLICYQUALINFO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICYQUALINFO_new) #define _POLICY_CONSTRAINTS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_free) #define _POLICY_CONSTRAINTS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_it) #define _POLICY_CONSTRAINTS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_CONSTRAINTS_new) #define _POLICY_MAPPINGS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_MAPPINGS_it) #define _POLICY_MAPPING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_MAPPING_free) #define _POLICY_MAPPING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, POLICY_MAPPING_new) #define _RAND_OpenSSL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_OpenSSL) #define _RAND_SSLeay BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_SSLeay) #define _RAND_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_add) #define _RAND_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_bytes) #define _RAND_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_cleanup) #define _RAND_disable_fork_unsafe_buffering BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_disable_fork_unsafe_buffering) #define _RAND_egd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_egd) #define _RAND_enable_fork_unsafe_buffering BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_enable_fork_unsafe_buffering) #define _RAND_file_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_file_name) #define _RAND_get_rand_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_get_rand_method) #define _RAND_get_system_entropy_for_custom_prng BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_get_system_entropy_for_custom_prng) #define _RAND_load_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_load_file) #define _RAND_poll BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_poll) #define _RAND_pseudo_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_pseudo_bytes) #define _RAND_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_seed) #define _RAND_set_rand_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_set_rand_method) #define _RAND_status BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_status) #define _RC4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RC4) #define _RC4_set_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RC4_set_key) #define _RSAPrivateKey_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSAPrivateKey_dup) #define _RSAPublicKey_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSAPublicKey_dup) #define _RSAZ_1024_mod_exp_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSAZ_1024_mod_exp_avx2) #define _RSA_PSS_PARAMS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_PSS_PARAMS_free) #define _RSA_PSS_PARAMS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_PSS_PARAMS_it) #define _RSA_PSS_PARAMS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_PSS_PARAMS_new) #define _RSA_add_pkcs1_prefix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_add_pkcs1_prefix) #define _RSA_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_bits) #define _RSA_blinding_off BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_blinding_off) #define _RSA_blinding_on BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_blinding_on) #define _RSA_check_fips BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_check_fips) #define _RSA_check_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_check_key) #define _RSA_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_decrypt) #define _RSA_default_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_default_method) #define _RSA_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_encrypt) #define _RSA_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_flags) #define _RSA_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_free) #define _RSA_generate_key_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_generate_key_ex) #define _RSA_generate_key_fips BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_generate_key_fips) #define _RSA_get0_crt_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_crt_params) #define _RSA_get0_d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_d) #define _RSA_get0_dmp1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_dmp1) #define _RSA_get0_dmq1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_dmq1) #define _RSA_get0_e BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_e) #define _RSA_get0_factors BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_factors) #define _RSA_get0_iqmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_iqmp) #define _RSA_get0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_key) #define _RSA_get0_n BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_n) #define _RSA_get0_p BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_p) #define _RSA_get0_pss_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_pss_params) #define _RSA_get0_q BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get0_q) #define _RSA_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get_ex_data) #define _RSA_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_get_ex_new_index) #define _RSA_is_opaque BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_is_opaque) #define _RSA_marshal_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_marshal_private_key) #define _RSA_marshal_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_marshal_public_key) #define _RSA_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new) #define _RSA_new_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_method) #define _RSA_new_method_no_e BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_method_no_e) #define _RSA_new_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_private_key) #define _RSA_new_private_key_large_e BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_private_key_large_e) #define _RSA_new_private_key_no_crt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_private_key_no_crt) #define _RSA_new_private_key_no_e BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_private_key_no_e) #define _RSA_new_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_public_key) #define _RSA_new_public_key_large_e BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_new_public_key_large_e) #define _RSA_padding_add_PKCS1_OAEP_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_OAEP_mgf1) #define _RSA_padding_add_PKCS1_PSS_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_PSS_mgf1) #define _RSA_padding_add_PKCS1_type_1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_add_PKCS1_type_1) #define _RSA_padding_add_none BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_add_none) #define _RSA_padding_check_PKCS1_OAEP_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_check_PKCS1_OAEP_mgf1) #define _RSA_padding_check_PKCS1_type_1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_padding_check_PKCS1_type_1) #define _RSA_parse_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_parse_private_key) #define _RSA_parse_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_parse_public_key) #define _RSA_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_print) #define _RSA_private_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_private_decrypt) #define _RSA_private_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_private_encrypt) #define _RSA_private_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_private_key_from_bytes) #define _RSA_private_key_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_private_key_to_bytes) #define _RSA_public_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_public_decrypt) #define _RSA_public_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_public_encrypt) #define _RSA_public_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_public_key_from_bytes) #define _RSA_public_key_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_public_key_to_bytes) #define _RSA_set0_crt_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_set0_crt_params) #define _RSA_set0_factors BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_set0_factors) #define _RSA_set0_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_set0_key) #define _RSA_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_set_ex_data) #define _RSA_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_sign) #define _RSA_sign_pss_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_sign_pss_mgf1) #define _RSA_sign_raw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_sign_raw) #define _RSA_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_size) #define _RSA_test_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_test_flags) #define _RSA_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_up_ref) #define _RSA_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_verify) #define _RSA_verify_PKCS1_PSS_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_verify_PKCS1_PSS_mgf1) #define _RSA_verify_pss_mgf1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_verify_pss_mgf1) #define _RSA_verify_raw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSA_verify_raw) #define _SHA1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA1) #define _SHA1_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA1_Final) #define _SHA1_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA1_Init) #define _SHA1_Transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA1_Transform) #define _SHA1_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA1_Update) #define _SHA224 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA224) #define _SHA224_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA224_Final) #define _SHA224_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA224_Init) #define _SHA224_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA224_Update) #define _SHA256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256) #define _SHA256_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256_Final) #define _SHA256_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256_Init) #define _SHA256_Transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256_Transform) #define _SHA256_TransformBlocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256_TransformBlocks) #define _SHA256_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA256_Update) #define _SHA384 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA384) #define _SHA384_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA384_Final) #define _SHA384_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA384_Init) #define _SHA384_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA384_Update) #define _SHA512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512) #define _SHA512_256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_256) #define _SHA512_256_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_256_Final) #define _SHA512_256_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_256_Init) #define _SHA512_256_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_256_Update) #define _SHA512_Final BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_Final) #define _SHA512_Init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_Init) #define _SHA512_Transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_Transform) #define _SHA512_Update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SHA512_Update) #define _SIPHASH_24 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SIPHASH_24) #define _SLHDSA_SHA2_128S_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_generate_key) #define _SLHDSA_SHA2_128S_prehash_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_sign) #define _SLHDSA_SHA2_128S_prehash_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_verify) #define _SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign) #define _SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify) #define _SLHDSA_SHA2_128S_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_public_from_private) #define _SLHDSA_SHA2_128S_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_sign) #define _SLHDSA_SHA2_128S_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SLHDSA_SHA2_128S_verify) #define _SPAKE2_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_CTX_free) #define _SPAKE2_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_CTX_new) #define _SPAKE2_generate_msg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_generate_msg) #define _SPAKE2_process_msg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SPAKE2_process_msg) #define _SSL_CIPHER_description BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_description) #define _SSL_CIPHER_get_auth_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_auth_nid) #define _SSL_CIPHER_get_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_bits) #define _SSL_CIPHER_get_cipher_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_cipher_nid) #define _SSL_CIPHER_get_digest_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_digest_nid) #define _SSL_CIPHER_get_handshake_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_handshake_digest) #define _SSL_CIPHER_get_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_id) #define _SSL_CIPHER_get_kx_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_kx_name) #define _SSL_CIPHER_get_kx_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_kx_nid) #define _SSL_CIPHER_get_max_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_max_version) #define _SSL_CIPHER_get_min_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_min_version) #define _SSL_CIPHER_get_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_name) #define _SSL_CIPHER_get_prf_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_prf_nid) #define _SSL_CIPHER_get_protocol_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_protocol_id) #define _SSL_CIPHER_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_get_version) #define _SSL_CIPHER_is_aead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_is_aead) #define _SSL_CIPHER_is_block_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_is_block_cipher) #define _SSL_CIPHER_standard_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CIPHER_standard_name) #define _SSL_COMP_add_compression_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_add_compression_method) #define _SSL_COMP_free_compression_methods BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_free_compression_methods) #define _SSL_COMP_get0_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_get0_name) #define _SSL_COMP_get_compression_methods BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_get_compression_methods) #define _SSL_COMP_get_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_get_id) #define _SSL_COMP_get_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_COMP_get_name) #define _SSL_CREDENTIAL_clear_must_match_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_clear_must_match_issuer) #define _SSL_CREDENTIAL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_free) #define _SSL_CREDENTIAL_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_get_ex_data) #define _SSL_CREDENTIAL_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_get_ex_new_index) #define _SSL_CREDENTIAL_must_match_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_must_match_issuer) #define _SSL_CREDENTIAL_new_delegated BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_new_delegated) #define _SSL_CREDENTIAL_new_x509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_new_x509) #define _SSL_CREDENTIAL_set1_cert_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_cert_chain) #define _SSL_CREDENTIAL_set1_delegated_credential BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_delegated_credential) #define _SSL_CREDENTIAL_set1_ocsp_response BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_ocsp_response) #define _SSL_CREDENTIAL_set1_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_private_key) #define _SSL_CREDENTIAL_set1_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_signed_cert_timestamp_list) #define _SSL_CREDENTIAL_set1_signing_algorithm_prefs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set1_signing_algorithm_prefs) #define _SSL_CREDENTIAL_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_ex_data) #define _SSL_CREDENTIAL_set_must_match_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_must_match_issuer) #define _SSL_CREDENTIAL_set_private_key_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_set_private_key_method) #define _SSL_CREDENTIAL_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CREDENTIAL_up_ref) #define _SSL_CTX_add0_chain_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add0_chain_cert) #define _SSL_CTX_add1_chain_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add1_chain_cert) #define _SSL_CTX_add1_credential BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add1_credential) #define _SSL_CTX_add_cert_compression_alg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add_cert_compression_alg) #define _SSL_CTX_add_client_CA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add_client_CA) #define _SSL_CTX_add_extra_chain_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add_extra_chain_cert) #define _SSL_CTX_add_session BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_add_session) #define _SSL_CTX_check_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_check_private_key) #define _SSL_CTX_cipher_in_group BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_cipher_in_group) #define _SSL_CTX_clear_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_clear_chain_certs) #define _SSL_CTX_clear_extra_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_clear_extra_chain_certs) #define _SSL_CTX_clear_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_clear_mode) #define _SSL_CTX_clear_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_clear_options) #define _SSL_CTX_enable_ocsp_stapling BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_enable_ocsp_stapling) #define _SSL_CTX_enable_signed_cert_timestamps BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_enable_signed_cert_timestamps) #define _SSL_CTX_enable_tls_channel_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_enable_tls_channel_id) #define _SSL_CTX_flush_sessions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_flush_sessions) #define _SSL_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_free) #define _SSL_CTX_get0_certificate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get0_certificate) #define _SSL_CTX_get0_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get0_chain) #define _SSL_CTX_get0_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get0_chain_certs) #define _SSL_CTX_get0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get0_param) #define _SSL_CTX_get0_privatekey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get0_privatekey) #define _SSL_CTX_get_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_cert_store) #define _SSL_CTX_get_ciphers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_ciphers) #define _SSL_CTX_get_client_CA_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_client_CA_list) #define _SSL_CTX_get_compliance_policy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_compliance_policy) #define _SSL_CTX_get_default_passwd_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_default_passwd_cb) #define _SSL_CTX_get_default_passwd_cb_userdata BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_default_passwd_cb_userdata) #define _SSL_CTX_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_ex_data) #define _SSL_CTX_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_ex_new_index) #define _SSL_CTX_get_extra_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_extra_chain_certs) #define _SSL_CTX_get_info_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_info_callback) #define _SSL_CTX_get_keylog_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_keylog_callback) #define _SSL_CTX_get_max_cert_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_max_cert_list) #define _SSL_CTX_get_max_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_max_proto_version) #define _SSL_CTX_get_min_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_min_proto_version) #define _SSL_CTX_get_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_mode) #define _SSL_CTX_get_num_tickets BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_num_tickets) #define _SSL_CTX_get_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_options) #define _SSL_CTX_get_quiet_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_quiet_shutdown) #define _SSL_CTX_get_read_ahead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_read_ahead) #define _SSL_CTX_get_session_cache_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_session_cache_mode) #define _SSL_CTX_get_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_timeout) #define _SSL_CTX_get_tlsext_ticket_keys BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_tlsext_ticket_keys) #define _SSL_CTX_get_verify_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_verify_callback) #define _SSL_CTX_get_verify_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_verify_depth) #define _SSL_CTX_get_verify_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_get_verify_mode) #define _SSL_CTX_load_verify_locations BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_load_verify_locations) #define _SSL_CTX_need_tmp_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_need_tmp_RSA) #define _SSL_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_new) #define _SSL_CTX_remove_session BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_remove_session) #define _SSL_CTX_sess_accept BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_accept) #define _SSL_CTX_sess_accept_good BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_accept_good) #define _SSL_CTX_sess_accept_renegotiate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_accept_renegotiate) #define _SSL_CTX_sess_cache_full BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_cache_full) #define _SSL_CTX_sess_cb_hits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_cb_hits) #define _SSL_CTX_sess_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_connect) #define _SSL_CTX_sess_connect_good BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_connect_good) #define _SSL_CTX_sess_connect_renegotiate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_connect_renegotiate) #define _SSL_CTX_sess_get_cache_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_get_cache_size) #define _SSL_CTX_sess_get_get_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_get_get_cb) #define _SSL_CTX_sess_get_new_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_get_new_cb) #define _SSL_CTX_sess_get_remove_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_get_remove_cb) #define _SSL_CTX_sess_hits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_hits) #define _SSL_CTX_sess_misses BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_misses) #define _SSL_CTX_sess_number BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_number) #define _SSL_CTX_sess_set_cache_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_set_cache_size) #define _SSL_CTX_sess_set_get_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_set_get_cb) #define _SSL_CTX_sess_set_new_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_set_new_cb) #define _SSL_CTX_sess_set_remove_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_set_remove_cb) #define _SSL_CTX_sess_timeouts BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_sess_timeouts) #define _SSL_CTX_set0_buffer_pool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set0_buffer_pool) #define _SSL_CTX_set0_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set0_chain) #define _SSL_CTX_set0_client_CAs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set0_client_CAs) #define _SSL_CTX_set0_verify_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set0_verify_cert_store) #define _SSL_CTX_set1_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_chain) #define _SSL_CTX_set1_curves BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_curves) #define _SSL_CTX_set1_curves_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_curves_list) #define _SSL_CTX_set1_ech_keys BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_ech_keys) #define _SSL_CTX_set1_group_ids BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_group_ids) #define _SSL_CTX_set1_groups BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_groups) #define _SSL_CTX_set1_groups_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_groups_list) #define _SSL_CTX_set1_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_param) #define _SSL_CTX_set1_sigalgs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_sigalgs) #define _SSL_CTX_set1_sigalgs_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_sigalgs_list) #define _SSL_CTX_set1_tls_channel_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_tls_channel_id) #define _SSL_CTX_set1_verify_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set1_verify_cert_store) #define _SSL_CTX_set_allow_unknown_alpn_protos BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_allow_unknown_alpn_protos) #define _SSL_CTX_set_alpn_protos BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_alpn_protos) #define _SSL_CTX_set_alpn_select_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_alpn_select_cb) #define _SSL_CTX_set_cert_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_cert_cb) #define _SSL_CTX_set_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_cert_store) #define _SSL_CTX_set_cert_verify_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_cert_verify_callback) #define _SSL_CTX_set_chain_and_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_chain_and_key) #define _SSL_CTX_set_cipher_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_cipher_list) #define _SSL_CTX_set_client_CA_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_client_CA_list) #define _SSL_CTX_set_client_cert_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_client_cert_cb) #define _SSL_CTX_set_compliance_policy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_compliance_policy) #define _SSL_CTX_set_current_time_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_current_time_cb) #define _SSL_CTX_set_custom_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_custom_verify) #define _SSL_CTX_set_default_passwd_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_default_passwd_cb) #define _SSL_CTX_set_default_passwd_cb_userdata BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_default_passwd_cb_userdata) #define _SSL_CTX_set_default_verify_paths BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_default_verify_paths) #define _SSL_CTX_set_dos_protection_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_dos_protection_cb) #define _SSL_CTX_set_early_data_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_early_data_enabled) #define _SSL_CTX_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_ex_data) #define _SSL_CTX_set_false_start_allowed_without_alpn BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_false_start_allowed_without_alpn) #define _SSL_CTX_set_grease_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_grease_enabled) #define _SSL_CTX_set_info_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_info_callback) #define _SSL_CTX_set_keylog_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_keylog_callback) #define _SSL_CTX_set_max_cert_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_max_cert_list) #define _SSL_CTX_set_max_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_max_proto_version) #define _SSL_CTX_set_max_send_fragment BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_max_send_fragment) #define _SSL_CTX_set_min_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_min_proto_version) #define _SSL_CTX_set_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_mode) #define _SSL_CTX_set_msg_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_msg_callback) #define _SSL_CTX_set_msg_callback_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_msg_callback_arg) #define _SSL_CTX_set_next_proto_select_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_next_proto_select_cb) #define _SSL_CTX_set_next_protos_advertised_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_next_protos_advertised_cb) #define _SSL_CTX_set_num_tickets BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_num_tickets) #define _SSL_CTX_set_ocsp_response BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_ocsp_response) #define _SSL_CTX_set_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_options) #define _SSL_CTX_set_permute_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_permute_extensions) #define _SSL_CTX_set_private_key_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_private_key_method) #define _SSL_CTX_set_psk_client_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_psk_client_callback) #define _SSL_CTX_set_psk_server_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_psk_server_callback) #define _SSL_CTX_set_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_purpose) #define _SSL_CTX_set_quic_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_quic_method) #define _SSL_CTX_set_quiet_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_quiet_shutdown) #define _SSL_CTX_set_read_ahead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_read_ahead) #define _SSL_CTX_set_record_protocol_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_record_protocol_version) #define _SSL_CTX_set_retain_only_sha256_of_client_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_retain_only_sha256_of_client_certs) #define _SSL_CTX_set_reverify_on_resume BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_reverify_on_resume) #define _SSL_CTX_set_select_certificate_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_select_certificate_cb) #define _SSL_CTX_set_session_cache_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_session_cache_mode) #define _SSL_CTX_set_session_id_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_session_id_context) #define _SSL_CTX_set_session_psk_dhe_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_session_psk_dhe_timeout) #define _SSL_CTX_set_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_signed_cert_timestamp_list) #define _SSL_CTX_set_signing_algorithm_prefs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_signing_algorithm_prefs) #define _SSL_CTX_set_srtp_profiles BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_srtp_profiles) #define _SSL_CTX_set_strict_cipher_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_strict_cipher_list) #define _SSL_CTX_set_ticket_aead_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_ticket_aead_method) #define _SSL_CTX_set_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_timeout) #define _SSL_CTX_set_tls_channel_id_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tls_channel_id_enabled) #define _SSL_CTX_set_tlsext_servername_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_servername_arg) #define _SSL_CTX_set_tlsext_servername_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_servername_callback) #define _SSL_CTX_set_tlsext_status_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_status_arg) #define _SSL_CTX_set_tlsext_status_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_status_cb) #define _SSL_CTX_set_tlsext_ticket_key_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_ticket_key_cb) #define _SSL_CTX_set_tlsext_ticket_keys BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_ticket_keys) #define _SSL_CTX_set_tlsext_use_srtp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tlsext_use_srtp) #define _SSL_CTX_set_tmp_dh BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tmp_dh) #define _SSL_CTX_set_tmp_dh_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tmp_dh_callback) #define _SSL_CTX_set_tmp_ecdh BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tmp_ecdh) #define _SSL_CTX_set_tmp_rsa BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tmp_rsa) #define _SSL_CTX_set_tmp_rsa_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_tmp_rsa_callback) #define _SSL_CTX_set_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_trust) #define _SSL_CTX_set_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_verify) #define _SSL_CTX_set_verify_algorithm_prefs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_verify_algorithm_prefs) #define _SSL_CTX_set_verify_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_set_verify_depth) #define _SSL_CTX_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_up_ref) #define _SSL_CTX_use_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey) #define _SSL_CTX_use_PrivateKey_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey_ASN1) #define _SSL_CTX_use_PrivateKey_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_PrivateKey_file) #define _SSL_CTX_use_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey) #define _SSL_CTX_use_RSAPrivateKey_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey_ASN1) #define _SSL_CTX_use_RSAPrivateKey_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_RSAPrivateKey_file) #define _SSL_CTX_use_certificate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_certificate) #define _SSL_CTX_use_certificate_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_certificate_ASN1) #define _SSL_CTX_use_certificate_chain_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_certificate_chain_file) #define _SSL_CTX_use_certificate_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_certificate_file) #define _SSL_CTX_use_psk_identity_hint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_CTX_use_psk_identity_hint) #define _SSL_ECH_KEYS_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_add) #define _SSL_ECH_KEYS_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_free) #define _SSL_ECH_KEYS_has_duplicate_config_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_has_duplicate_config_id) #define _SSL_ECH_KEYS_marshal_retry_configs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_marshal_retry_configs) #define _SSL_ECH_KEYS_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_new) #define _SSL_ECH_KEYS_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ECH_KEYS_up_ref) #define _SSL_SESSION_copy_without_early_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_copy_without_early_data) #define _SSL_SESSION_early_data_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_early_data_capable) #define _SSL_SESSION_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_free) #define _SSL_SESSION_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_from_bytes) #define _SSL_SESSION_get0_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_cipher) #define _SSL_SESSION_get0_id_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_id_context) #define _SSL_SESSION_get0_ocsp_response BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_ocsp_response) #define _SSL_SESSION_get0_peer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_peer) #define _SSL_SESSION_get0_peer_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_peer_certificates) #define _SSL_SESSION_get0_peer_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_peer_sha256) #define _SSL_SESSION_get0_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_signed_cert_timestamp_list) #define _SSL_SESSION_get0_ticket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get0_ticket) #define _SSL_SESSION_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_ex_data) #define _SSL_SESSION_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_ex_new_index) #define _SSL_SESSION_get_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_id) #define _SSL_SESSION_get_master_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_master_key) #define _SSL_SESSION_get_protocol_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_protocol_version) #define _SSL_SESSION_get_ticket_lifetime_hint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_ticket_lifetime_hint) #define _SSL_SESSION_get_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_time) #define _SSL_SESSION_get_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_timeout) #define _SSL_SESSION_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_get_version) #define _SSL_SESSION_has_peer_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_has_peer_sha256) #define _SSL_SESSION_has_ticket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_has_ticket) #define _SSL_SESSION_is_resumable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_is_resumable) #define _SSL_SESSION_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_new) #define _SSL_SESSION_set1_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set1_id) #define _SSL_SESSION_set1_id_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set1_id_context) #define _SSL_SESSION_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set_ex_data) #define _SSL_SESSION_set_protocol_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set_protocol_version) #define _SSL_SESSION_set_ticket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set_ticket) #define _SSL_SESSION_set_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set_time) #define _SSL_SESSION_set_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_set_timeout) #define _SSL_SESSION_should_be_single_use BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_should_be_single_use) #define _SSL_SESSION_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_to_bytes) #define _SSL_SESSION_to_bytes_for_ticket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_to_bytes_for_ticket) #define _SSL_SESSION_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_SESSION_up_ref) #define _SSL_accept BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_accept) #define _SSL_add0_chain_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add0_chain_cert) #define _SSL_add1_chain_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add1_chain_cert) #define _SSL_add1_credential BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add1_credential) #define _SSL_add_application_settings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add_application_settings) #define _SSL_add_bio_cert_subjects_to_stack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add_bio_cert_subjects_to_stack) #define _SSL_add_client_CA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add_client_CA) #define _SSL_add_file_cert_subjects_to_stack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_add_file_cert_subjects_to_stack) #define _SSL_alert_desc_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_alert_desc_string) #define _SSL_alert_desc_string_long BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_alert_desc_string_long) #define _SSL_alert_from_verify_result BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_alert_from_verify_result) #define _SSL_alert_type_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_alert_type_string) #define _SSL_alert_type_string_long BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_alert_type_string_long) #define _SSL_cache_hit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_cache_hit) #define _SSL_can_release_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_can_release_private_key) #define _SSL_certs_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_certs_clear) #define _SSL_check_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_check_private_key) #define _SSL_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_clear) #define _SSL_clear_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_clear_chain_certs) #define _SSL_clear_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_clear_mode) #define _SSL_clear_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_clear_options) #define _SSL_connect BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_connect) #define _SSL_cutthrough_complete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_cutthrough_complete) #define _SSL_do_handshake BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_do_handshake) #define _SSL_dup_CA_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_dup_CA_list) #define _SSL_early_callback_ctx_extension_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_early_callback_ctx_extension_get) #define _SSL_early_data_accepted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_early_data_accepted) #define _SSL_early_data_reason_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_early_data_reason_string) #define _SSL_ech_accepted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_ech_accepted) #define _SSL_enable_ocsp_stapling BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_enable_ocsp_stapling) #define _SSL_enable_signed_cert_timestamps BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_enable_signed_cert_timestamps) #define _SSL_enable_tls_channel_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_enable_tls_channel_id) #define _SSL_error_description BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_error_description) #define _SSL_export_keying_material BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_export_keying_material) #define _SSL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_free) #define _SSL_generate_key_block BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_generate_key_block) #define _SSL_get0_alpn_selected BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_alpn_selected) #define _SSL_get0_certificate_types BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_certificate_types) #define _SSL_get0_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_chain) #define _SSL_get0_chain_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_chain_certs) #define _SSL_get0_ech_name_override BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_ech_name_override) #define _SSL_get0_ech_retry_configs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_ech_retry_configs) #define _SSL_get0_next_proto_negotiated BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_next_proto_negotiated) #define _SSL_get0_ocsp_response BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_ocsp_response) #define _SSL_get0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_param) #define _SSL_get0_peer_application_settings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_peer_application_settings) #define _SSL_get0_peer_certificates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_peer_certificates) #define _SSL_get0_peer_delegation_algorithms BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_peer_delegation_algorithms) #define _SSL_get0_peer_verify_algorithms BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_peer_verify_algorithms) #define _SSL_get0_selected_credential BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_selected_credential) #define _SSL_get0_server_requested_CAs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_server_requested_CAs) #define _SSL_get0_session_id_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_session_id_context) #define _SSL_get0_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get0_signed_cert_timestamp_list) #define _SSL_get1_session BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get1_session) #define _SSL_get_SSL_CTX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_SSL_CTX) #define _SSL_get_all_cipher_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_cipher_names) #define _SSL_get_all_curve_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_curve_names) #define _SSL_get_all_group_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_group_names) #define _SSL_get_all_signature_algorithm_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_signature_algorithm_names) #define _SSL_get_all_standard_cipher_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_standard_cipher_names) #define _SSL_get_all_version_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_all_version_names) #define _SSL_get_certificate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_certificate) #define _SSL_get_cipher_by_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_cipher_by_value) #define _SSL_get_cipher_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_cipher_list) #define _SSL_get_ciphers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ciphers) #define _SSL_get_client_CA_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_client_CA_list) #define _SSL_get_client_random BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_client_random) #define _SSL_get_compliance_policy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_compliance_policy) #define _SSL_get_current_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_current_cipher) #define _SSL_get_current_compression BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_current_compression) #define _SSL_get_current_expansion BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_current_expansion) #define _SSL_get_curve_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_curve_id) #define _SSL_get_curve_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_curve_name) #define _SSL_get_default_timeout BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_default_timeout) #define _SSL_get_early_data_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_early_data_reason) #define _SSL_get_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_error) #define _SSL_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ex_data) #define _SSL_get_ex_data_X509_STORE_CTX_idx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ex_data_X509_STORE_CTX_idx) #define _SSL_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ex_new_index) #define _SSL_get_extms_support BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_extms_support) #define _SSL_get_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_fd) #define _SSL_get_finished BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_finished) #define _SSL_get_group_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_group_id) #define _SSL_get_group_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_group_name) #define _SSL_get_info_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_info_callback) #define _SSL_get_ivs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ivs) #define _SSL_get_key_block_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_key_block_len) #define _SSL_get_max_cert_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_max_cert_list) #define _SSL_get_max_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_max_proto_version) #define _SSL_get_min_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_min_proto_version) #define _SSL_get_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_mode) #define _SSL_get_negotiated_group BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_negotiated_group) #define _SSL_get_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_options) #define _SSL_get_peer_cert_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_cert_chain) #define _SSL_get_peer_certificate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_certificate) #define _SSL_get_peer_finished BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_finished) #define _SSL_get_peer_full_cert_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_full_cert_chain) #define _SSL_get_peer_quic_transport_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_quic_transport_params) #define _SSL_get_peer_signature_algorithm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_peer_signature_algorithm) #define _SSL_get_pending_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_pending_cipher) #define _SSL_get_privatekey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_privatekey) #define _SSL_get_psk_identity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_psk_identity) #define _SSL_get_psk_identity_hint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_psk_identity_hint) #define _SSL_get_quiet_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_quiet_shutdown) #define _SSL_get_rbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_rbio) #define _SSL_get_read_ahead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_read_ahead) #define _SSL_get_read_sequence BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_read_sequence) #define _SSL_get_rfd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_rfd) #define _SSL_get_secure_renegotiation_support BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_secure_renegotiation_support) #define _SSL_get_selected_srtp_profile BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_selected_srtp_profile) #define _SSL_get_server_random BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_server_random) #define _SSL_get_server_tmp_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_server_tmp_key) #define _SSL_get_servername BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_servername) #define _SSL_get_servername_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_servername_type) #define _SSL_get_session BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_session) #define _SSL_get_shared_ciphers BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_shared_ciphers) #define _SSL_get_shared_sigalgs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_shared_sigalgs) #define _SSL_get_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_shutdown) #define _SSL_get_signature_algorithm_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_signature_algorithm_digest) #define _SSL_get_signature_algorithm_key_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_signature_algorithm_key_type) #define _SSL_get_signature_algorithm_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_signature_algorithm_name) #define _SSL_get_srtp_profiles BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_srtp_profiles) #define _SSL_get_ticket_age_skew BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_ticket_age_skew) #define _SSL_get_tls_channel_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_tls_channel_id) #define _SSL_get_tls_unique BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_tls_unique) #define _SSL_get_tlsext_status_ocsp_resp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_tlsext_status_ocsp_resp) #define _SSL_get_tlsext_status_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_tlsext_status_type) #define _SSL_get_verify_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_verify_callback) #define _SSL_get_verify_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_verify_depth) #define _SSL_get_verify_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_verify_mode) #define _SSL_get_verify_result BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_verify_result) #define _SSL_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_version) #define _SSL_get_wbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_wbio) #define _SSL_get_wfd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_wfd) #define _SSL_get_write_sequence BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_get_write_sequence) #define _SSL_has_application_settings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_has_application_settings) #define _SSL_has_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_has_pending) #define _SSL_in_early_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_in_early_data) #define _SSL_in_false_start BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_in_false_start) #define _SSL_in_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_in_init) #define _SSL_is_dtls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_is_dtls) #define _SSL_is_init_finished BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_is_init_finished) #define _SSL_is_quic BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_is_quic) #define _SSL_is_server BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_is_server) #define _SSL_is_signature_algorithm_rsa_pss BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_is_signature_algorithm_rsa_pss) #define _SSL_key_update BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_key_update) #define _SSL_library_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_library_init) #define _SSL_load_client_CA_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_load_client_CA_file) #define _SSL_load_error_strings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_load_error_strings) #define _SSL_magic_pending_session_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_magic_pending_session_ptr) #define _SSL_marshal_ech_config BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_marshal_ech_config) #define _SSL_max_seal_overhead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_max_seal_overhead) #define _SSL_need_tmp_RSA BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_need_tmp_RSA) #define _SSL_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_new) #define _SSL_num_renegotiations BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_num_renegotiations) #define _SSL_peek BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_peek) #define _SSL_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_pending) #define _SSL_process_quic_post_handshake BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_process_quic_post_handshake) #define _SSL_process_tls13_new_session_ticket BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_process_tls13_new_session_ticket) #define _SSL_provide_quic_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_provide_quic_data) #define _SSL_quic_max_handshake_flight_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_quic_max_handshake_flight_len) #define _SSL_quic_read_level BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_quic_read_level) #define _SSL_quic_write_level BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_quic_write_level) #define _SSL_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_read) #define _SSL_renegotiate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_renegotiate) #define _SSL_renegotiate_pending BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_renegotiate_pending) #define _SSL_request_handshake_hints BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_request_handshake_hints) #define _SSL_reset_early_data_reject BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_reset_early_data_reject) #define _SSL_select_next_proto BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_select_next_proto) #define _SSL_send_fatal_alert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_send_fatal_alert) #define _SSL_serialize_capabilities BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_serialize_capabilities) #define _SSL_serialize_handshake_hints BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_serialize_handshake_hints) #define _SSL_session_reused BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_session_reused) #define _SSL_set0_CA_names BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_CA_names) #define _SSL_set0_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_chain) #define _SSL_set0_client_CAs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_client_CAs) #define _SSL_set0_rbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_rbio) #define _SSL_set0_verify_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_verify_cert_store) #define _SSL_set0_wbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set0_wbio) #define _SSL_set1_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_chain) #define _SSL_set1_curves BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_curves) #define _SSL_set1_curves_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_curves_list) #define _SSL_set1_ech_config_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_ech_config_list) #define _SSL_set1_group_ids BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_group_ids) #define _SSL_set1_groups BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_groups) #define _SSL_set1_groups_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_groups_list) #define _SSL_set1_host BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_host) #define _SSL_set1_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_param) #define _SSL_set1_sigalgs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_sigalgs) #define _SSL_set1_sigalgs_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_sigalgs_list) #define _SSL_set1_tls_channel_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_tls_channel_id) #define _SSL_set1_verify_cert_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set1_verify_cert_store) #define _SSL_set_SSL_CTX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_SSL_CTX) #define _SSL_set_accept_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_accept_state) #define _SSL_set_alpn_protos BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_alpn_protos) #define _SSL_set_alps_use_new_codepoint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_alps_use_new_codepoint) #define _SSL_set_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_bio) #define _SSL_set_cert_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_cert_cb) #define _SSL_set_chain_and_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_chain_and_key) #define _SSL_set_check_client_certificate_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_check_client_certificate_type) #define _SSL_set_check_ecdsa_curve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_check_ecdsa_curve) #define _SSL_set_cipher_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_cipher_list) #define _SSL_set_client_CA_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_client_CA_list) #define _SSL_set_compliance_policy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_compliance_policy) #define _SSL_set_connect_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_connect_state) #define _SSL_set_custom_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_custom_verify) #define _SSL_set_early_data_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_early_data_enabled) #define _SSL_set_enable_ech_grease BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_enable_ech_grease) #define _SSL_set_enforce_rsa_key_usage BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_enforce_rsa_key_usage) #define _SSL_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_ex_data) #define _SSL_set_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_fd) #define _SSL_set_handshake_hints BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_handshake_hints) #define _SSL_set_hostflags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_hostflags) #define _SSL_set_info_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_info_callback) #define _SSL_set_jdk11_workaround BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_jdk11_workaround) #define _SSL_set_max_cert_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_max_cert_list) #define _SSL_set_max_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_max_proto_version) #define _SSL_set_max_send_fragment BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_max_send_fragment) #define _SSL_set_min_proto_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_min_proto_version) #define _SSL_set_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_mode) #define _SSL_set_msg_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_msg_callback) #define _SSL_set_msg_callback_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_msg_callback_arg) #define _SSL_set_mtu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_mtu) #define _SSL_set_ocsp_response BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_ocsp_response) #define _SSL_set_options BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_options) #define _SSL_set_permute_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_permute_extensions) #define _SSL_set_private_key_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_private_key_method) #define _SSL_set_psk_client_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_psk_client_callback) #define _SSL_set_psk_server_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_psk_server_callback) #define _SSL_set_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_purpose) #define _SSL_set_quic_early_data_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_quic_early_data_context) #define _SSL_set_quic_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_quic_method) #define _SSL_set_quic_transport_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_quic_transport_params) #define _SSL_set_quic_use_legacy_codepoint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_quic_use_legacy_codepoint) #define _SSL_set_quiet_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_quiet_shutdown) #define _SSL_set_read_ahead BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_read_ahead) #define _SSL_set_renegotiate_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_renegotiate_mode) #define _SSL_set_retain_only_sha256_of_client_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_retain_only_sha256_of_client_certs) #define _SSL_set_rfd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_rfd) #define _SSL_set_session BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_session) #define _SSL_set_session_id_context BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_session_id_context) #define _SSL_set_shed_handshake_config BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_shed_handshake_config) #define _SSL_set_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_shutdown) #define _SSL_set_signed_cert_timestamp_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_signed_cert_timestamp_list) #define _SSL_set_signing_algorithm_prefs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_signing_algorithm_prefs) #define _SSL_set_srtp_profiles BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_srtp_profiles) #define _SSL_set_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_state) #define _SSL_set_strict_cipher_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_strict_cipher_list) #define _SSL_set_tls_channel_id_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tls_channel_id_enabled) #define _SSL_set_tlsext_host_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tlsext_host_name) #define _SSL_set_tlsext_status_ocsp_resp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tlsext_status_ocsp_resp) #define _SSL_set_tlsext_status_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tlsext_status_type) #define _SSL_set_tlsext_use_srtp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tlsext_use_srtp) #define _SSL_set_tmp_dh BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tmp_dh) #define _SSL_set_tmp_dh_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tmp_dh_callback) #define _SSL_set_tmp_ecdh BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tmp_ecdh) #define _SSL_set_tmp_rsa BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tmp_rsa) #define _SSL_set_tmp_rsa_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_tmp_rsa_callback) #define _SSL_set_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_trust) #define _SSL_set_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_verify) #define _SSL_set_verify_algorithm_prefs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_verify_algorithm_prefs) #define _SSL_set_verify_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_verify_depth) #define _SSL_set_wfd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_set_wfd) #define _SSL_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_shutdown) #define _SSL_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_state) #define _SSL_state_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_state_string) #define _SSL_state_string_long BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_state_string_long) #define _SSL_total_renegotiations BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_total_renegotiations) #define _SSL_use_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_PrivateKey) #define _SSL_use_PrivateKey_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_PrivateKey_ASN1) #define _SSL_use_PrivateKey_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_PrivateKey_file) #define _SSL_use_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey) #define _SSL_use_RSAPrivateKey_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey_ASN1) #define _SSL_use_RSAPrivateKey_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_RSAPrivateKey_file) #define _SSL_use_certificate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_certificate) #define _SSL_use_certificate_ASN1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_certificate_ASN1) #define _SSL_use_certificate_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_certificate_file) #define _SSL_use_psk_identity_hint BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_use_psk_identity_hint) #define _SSL_used_hello_retry_request BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_used_hello_retry_request) #define _SSL_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_version) #define _SSL_want BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_want) #define _SSL_was_key_usage_invalid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_was_key_usage_invalid) #define _SSL_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSL_write) #define _SSLeay BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLeay) #define _SSLeay_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLeay_version) #define _SSLv23_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLv23_client_method) #define _SSLv23_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLv23_method) #define _SSLv23_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLv23_server_method) #define _TLS_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLS_client_method) #define _TLS_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLS_method) #define _TLS_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLS_server_method) #define _TLS_with_buffers_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLS_with_buffers_method) #define _TLSv1_1_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_1_client_method) #define _TLSv1_1_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_1_method) #define _TLSv1_1_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_1_server_method) #define _TLSv1_2_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_2_client_method) #define _TLSv1_2_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_2_method) #define _TLSv1_2_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_2_server_method) #define _TLSv1_client_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_client_method) #define _TLSv1_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_method) #define _TLSv1_server_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TLSv1_server_method) #define _TRUST_TOKEN_CLIENT_add_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_add_key) #define _TRUST_TOKEN_CLIENT_begin_issuance BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_issuance) #define _TRUST_TOKEN_CLIENT_begin_issuance_over_message BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_issuance_over_message) #define _TRUST_TOKEN_CLIENT_begin_redemption BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_begin_redemption) #define _TRUST_TOKEN_CLIENT_finish_issuance BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_finish_issuance) #define _TRUST_TOKEN_CLIENT_finish_redemption BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_finish_redemption) #define _TRUST_TOKEN_CLIENT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_free) #define _TRUST_TOKEN_CLIENT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_new) #define _TRUST_TOKEN_CLIENT_set_srr_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_CLIENT_set_srr_key) #define _TRUST_TOKEN_ISSUER_add_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_add_key) #define _TRUST_TOKEN_ISSUER_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_free) #define _TRUST_TOKEN_ISSUER_issue BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_issue) #define _TRUST_TOKEN_ISSUER_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_new) #define _TRUST_TOKEN_ISSUER_redeem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_redeem) #define _TRUST_TOKEN_ISSUER_redeem_over_message BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_redeem_over_message) #define _TRUST_TOKEN_ISSUER_set_metadata_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_set_metadata_key) #define _TRUST_TOKEN_ISSUER_set_srr_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_ISSUER_set_srr_key) #define _TRUST_TOKEN_PRETOKEN_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_PRETOKEN_free) #define _TRUST_TOKEN_decode_private_metadata BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_decode_private_metadata) #define _TRUST_TOKEN_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_derive_key_from_secret) #define _TRUST_TOKEN_experiment_v1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v1) #define _TRUST_TOKEN_experiment_v2_pmb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v2_pmb) #define _TRUST_TOKEN_experiment_v2_voprf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_experiment_v2_voprf) #define _TRUST_TOKEN_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_free) #define _TRUST_TOKEN_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_generate_key) #define _TRUST_TOKEN_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_new) #define _TRUST_TOKEN_pst_v1_pmb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_pst_v1_pmb) #define _TRUST_TOKEN_pst_v1_voprf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, TRUST_TOKEN_pst_v1_voprf) #define _USERNOTICE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, USERNOTICE_free) #define _USERNOTICE_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, USERNOTICE_it) #define _USERNOTICE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, USERNOTICE_new) #define _X25519 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X25519) #define _X25519_keypair BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X25519_keypair) #define _X25519_public_from_private BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X25519_public_from_private) #define _X509V3_EXT_CRL_add_nconf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_CRL_add_nconf) #define _X509V3_EXT_REQ_add_nconf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_REQ_add_nconf) #define _X509V3_EXT_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_add) #define _X509V3_EXT_add_alias BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_add_alias) #define _X509V3_EXT_add_nconf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_add_nconf) #define _X509V3_EXT_add_nconf_sk BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_add_nconf_sk) #define _X509V3_EXT_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_d2i) #define _X509V3_EXT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_free) #define _X509V3_EXT_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_get) #define _X509V3_EXT_get_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_get_nid) #define _X509V3_EXT_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_i2d) #define _X509V3_EXT_nconf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_nconf) #define _X509V3_EXT_nconf_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_nconf_nid) #define _X509V3_EXT_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_print) #define _X509V3_EXT_print_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_EXT_print_fp) #define _X509V3_NAME_from_section BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_NAME_from_section) #define _X509V3_add1_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_add1_i2d) #define _X509V3_add_standard_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_add_standard_extensions) #define _X509V3_add_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_add_value) #define _X509V3_add_value_bool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_add_value_bool) #define _X509V3_add_value_int BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_add_value_int) #define _X509V3_bool_from_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_bool_from_string) #define _X509V3_conf_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_conf_free) #define _X509V3_extensions_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_extensions_print) #define _X509V3_get_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_get_d2i) #define _X509V3_get_section BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_get_section) #define _X509V3_get_value_bool BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_get_value_bool) #define _X509V3_get_value_int BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_get_value_int) #define _X509V3_parse_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_parse_list) #define _X509V3_set_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_set_ctx) #define _X509V3_set_nconf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509V3_set_nconf) #define _X509_ALGOR_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_cmp) #define _X509_ALGOR_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_dup) #define _X509_ALGOR_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_free) #define _X509_ALGOR_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_get0) #define _X509_ALGOR_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_it) #define _X509_ALGOR_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_new) #define _X509_ALGOR_set0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_set0) #define _X509_ALGOR_set_md BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ALGOR_set_md) #define _X509_ATTRIBUTE_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_count) #define _X509_ATTRIBUTE_create BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_create) #define _X509_ATTRIBUTE_create_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_NID) #define _X509_ATTRIBUTE_create_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_OBJ) #define _X509_ATTRIBUTE_create_by_txt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_create_by_txt) #define _X509_ATTRIBUTE_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_dup) #define _X509_ATTRIBUTE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_free) #define _X509_ATTRIBUTE_get0_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_data) #define _X509_ATTRIBUTE_get0_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_object) #define _X509_ATTRIBUTE_get0_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_get0_type) #define _X509_ATTRIBUTE_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_it) #define _X509_ATTRIBUTE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_new) #define _X509_ATTRIBUTE_set1_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_set1_data) #define _X509_ATTRIBUTE_set1_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_ATTRIBUTE_set1_object) #define _X509_CERT_AUX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CERT_AUX_free) #define _X509_CERT_AUX_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CERT_AUX_it) #define _X509_CERT_AUX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CERT_AUX_new) #define _X509_CERT_AUX_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CERT_AUX_print) #define _X509_CINF_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CINF_free) #define _X509_CINF_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CINF_it) #define _X509_CINF_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CINF_new) #define _X509_CRL_INFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_INFO_free) #define _X509_CRL_INFO_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_INFO_it) #define _X509_CRL_INFO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_INFO_new) #define _X509_CRL_add0_revoked BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_add0_revoked) #define _X509_CRL_add1_ext_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_add1_ext_i2d) #define _X509_CRL_add_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_add_ext) #define _X509_CRL_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_cmp) #define _X509_CRL_delete_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_delete_ext) #define _X509_CRL_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_digest) #define _X509_CRL_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_dup) #define _X509_CRL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_free) #define _X509_CRL_get0_by_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_by_cert) #define _X509_CRL_get0_by_serial BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_by_serial) #define _X509_CRL_get0_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_extensions) #define _X509_CRL_get0_lastUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_lastUpdate) #define _X509_CRL_get0_nextUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_nextUpdate) #define _X509_CRL_get0_signature BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get0_signature) #define _X509_CRL_get_REVOKED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_REVOKED) #define _X509_CRL_get_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext) #define _X509_CRL_get_ext_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext_by_NID) #define _X509_CRL_get_ext_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext_by_OBJ) #define _X509_CRL_get_ext_by_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext_by_critical) #define _X509_CRL_get_ext_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext_count) #define _X509_CRL_get_ext_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_ext_d2i) #define _X509_CRL_get_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_issuer) #define _X509_CRL_get_lastUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_lastUpdate) #define _X509_CRL_get_nextUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_nextUpdate) #define _X509_CRL_get_signature_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_signature_nid) #define _X509_CRL_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_get_version) #define _X509_CRL_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_it) #define _X509_CRL_match BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_match) #define _X509_CRL_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_new) #define _X509_CRL_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_print) #define _X509_CRL_print_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_print_fp) #define _X509_CRL_set1_lastUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set1_lastUpdate) #define _X509_CRL_set1_nextUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set1_nextUpdate) #define _X509_CRL_set1_signature_algo BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set1_signature_algo) #define _X509_CRL_set1_signature_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set1_signature_value) #define _X509_CRL_set_issuer_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set_issuer_name) #define _X509_CRL_set_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_set_version) #define _X509_CRL_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_sign) #define _X509_CRL_sign_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_sign_ctx) #define _X509_CRL_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_sort) #define _X509_CRL_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_up_ref) #define _X509_CRL_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_CRL_verify) #define _X509_EXTENSIONS_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSIONS_it) #define _X509_EXTENSION_create_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_create_by_NID) #define _X509_EXTENSION_create_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_create_by_OBJ) #define _X509_EXTENSION_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_dup) #define _X509_EXTENSION_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_free) #define _X509_EXTENSION_get_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_get_critical) #define _X509_EXTENSION_get_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_get_data) #define _X509_EXTENSION_get_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_get_object) #define _X509_EXTENSION_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_it) #define _X509_EXTENSION_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_new) #define _X509_EXTENSION_set_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_set_critical) #define _X509_EXTENSION_set_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_set_data) #define _X509_EXTENSION_set_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_EXTENSION_set_object) #define _X509_INFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_INFO_free) #define _X509_LOOKUP_add_dir BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_add_dir) #define _X509_LOOKUP_ctrl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_ctrl) #define _X509_LOOKUP_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_file) #define _X509_LOOKUP_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_free) #define _X509_LOOKUP_hash_dir BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_hash_dir) #define _X509_LOOKUP_load_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_LOOKUP_load_file) #define _X509_NAME_ENTRY_create_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_NID) #define _X509_NAME_ENTRY_create_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_OBJ) #define _X509_NAME_ENTRY_create_by_txt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_create_by_txt) #define _X509_NAME_ENTRY_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_dup) #define _X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_free) #define _X509_NAME_ENTRY_get_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_get_data) #define _X509_NAME_ENTRY_get_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_get_object) #define _X509_NAME_ENTRY_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_it) #define _X509_NAME_ENTRY_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_new) #define _X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_set) #define _X509_NAME_ENTRY_set_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_set_data) #define _X509_NAME_ENTRY_set_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_ENTRY_set_object) #define _X509_NAME_add_entry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_add_entry) #define _X509_NAME_add_entry_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_add_entry_by_NID) #define _X509_NAME_add_entry_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_add_entry_by_OBJ) #define _X509_NAME_add_entry_by_txt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_add_entry_by_txt) #define _X509_NAME_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_cmp) #define _X509_NAME_delete_entry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_delete_entry) #define _X509_NAME_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_digest) #define _X509_NAME_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_dup) #define _X509_NAME_entry_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_entry_count) #define _X509_NAME_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_free) #define _X509_NAME_get0_der BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get0_der) #define _X509_NAME_get_entry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get_entry) #define _X509_NAME_get_index_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get_index_by_NID) #define _X509_NAME_get_index_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get_index_by_OBJ) #define _X509_NAME_get_text_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get_text_by_NID) #define _X509_NAME_get_text_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_get_text_by_OBJ) #define _X509_NAME_hash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_hash) #define _X509_NAME_hash_old BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_hash_old) #define _X509_NAME_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_it) #define _X509_NAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_new) #define _X509_NAME_oneline BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_oneline) #define _X509_NAME_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_print) #define _X509_NAME_print_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_print_ex) #define _X509_NAME_print_ex_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_print_ex_fp) #define _X509_NAME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_NAME_set) #define _X509_OBJECT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_OBJECT_free) #define _X509_OBJECT_free_contents BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_OBJECT_free_contents) #define _X509_OBJECT_get0_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_OBJECT_get0_X509) #define _X509_OBJECT_get_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_OBJECT_get_type) #define _X509_OBJECT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_OBJECT_new) #define _X509_PUBKEY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_free) #define _X509_PUBKEY_get BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_get) #define _X509_PUBKEY_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_get0) #define _X509_PUBKEY_get0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_get0_param) #define _X509_PUBKEY_get0_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_get0_public_key) #define _X509_PUBKEY_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_it) #define _X509_PUBKEY_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_new) #define _X509_PUBKEY_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_set) #define _X509_PUBKEY_set0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PUBKEY_set0_param) #define _X509_PURPOSE_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PURPOSE_get0) #define _X509_PURPOSE_get_by_sname BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PURPOSE_get_by_sname) #define _X509_PURPOSE_get_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PURPOSE_get_id) #define _X509_PURPOSE_get_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_PURPOSE_get_trust) #define _X509_REQ_INFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_INFO_free) #define _X509_REQ_INFO_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_INFO_it) #define _X509_REQ_INFO_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_INFO_new) #define _X509_REQ_add1_attr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add1_attr) #define _X509_REQ_add1_attr_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_NID) #define _X509_REQ_add1_attr_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_OBJ) #define _X509_REQ_add1_attr_by_txt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add1_attr_by_txt) #define _X509_REQ_add_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add_extensions) #define _X509_REQ_add_extensions_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_add_extensions_nid) #define _X509_REQ_check_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_check_private_key) #define _X509_REQ_delete_attr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_delete_attr) #define _X509_REQ_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_digest) #define _X509_REQ_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_dup) #define _X509_REQ_extension_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_extension_nid) #define _X509_REQ_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_free) #define _X509_REQ_get0_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get0_pubkey) #define _X509_REQ_get0_signature BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get0_signature) #define _X509_REQ_get1_email BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get1_email) #define _X509_REQ_get_attr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_attr) #define _X509_REQ_get_attr_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_attr_by_NID) #define _X509_REQ_get_attr_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_attr_by_OBJ) #define _X509_REQ_get_attr_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_attr_count) #define _X509_REQ_get_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_extensions) #define _X509_REQ_get_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_pubkey) #define _X509_REQ_get_signature_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_signature_nid) #define _X509_REQ_get_subject_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_subject_name) #define _X509_REQ_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_get_version) #define _X509_REQ_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_it) #define _X509_REQ_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_new) #define _X509_REQ_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_print) #define _X509_REQ_print_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_print_ex) #define _X509_REQ_print_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_print_fp) #define _X509_REQ_set1_signature_algo BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_set1_signature_algo) #define _X509_REQ_set1_signature_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_set1_signature_value) #define _X509_REQ_set_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_set_pubkey) #define _X509_REQ_set_subject_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_set_subject_name) #define _X509_REQ_set_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_set_version) #define _X509_REQ_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_sign) #define _X509_REQ_sign_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_sign_ctx) #define _X509_REQ_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REQ_verify) #define _X509_REVOKED_add1_ext_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_add1_ext_i2d) #define _X509_REVOKED_add_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_add_ext) #define _X509_REVOKED_delete_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_delete_ext) #define _X509_REVOKED_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_dup) #define _X509_REVOKED_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_free) #define _X509_REVOKED_get0_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get0_extensions) #define _X509_REVOKED_get0_revocationDate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get0_revocationDate) #define _X509_REVOKED_get0_serialNumber BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get0_serialNumber) #define _X509_REVOKED_get_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext) #define _X509_REVOKED_get_ext_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_NID) #define _X509_REVOKED_get_ext_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_OBJ) #define _X509_REVOKED_get_ext_by_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext_by_critical) #define _X509_REVOKED_get_ext_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext_count) #define _X509_REVOKED_get_ext_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_get_ext_d2i) #define _X509_REVOKED_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_it) #define _X509_REVOKED_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_new) #define _X509_REVOKED_set_revocationDate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_set_revocationDate) #define _X509_REVOKED_set_serialNumber BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_REVOKED_set_serialNumber) #define _X509_SIG_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_SIG_free) #define _X509_SIG_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_SIG_get0) #define _X509_SIG_getm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_SIG_getm) #define _X509_SIG_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_SIG_new) #define _X509_STORE_CTX_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_cleanup) #define _X509_STORE_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_free) #define _X509_STORE_CTX_get0_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_cert) #define _X509_STORE_CTX_get0_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_chain) #define _X509_STORE_CTX_get0_current_crl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_current_crl) #define _X509_STORE_CTX_get0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_param) #define _X509_STORE_CTX_get0_parent_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_parent_ctx) #define _X509_STORE_CTX_get0_store BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_store) #define _X509_STORE_CTX_get0_untrusted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get0_untrusted) #define _X509_STORE_CTX_get1_certs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get1_certs) #define _X509_STORE_CTX_get1_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get1_chain) #define _X509_STORE_CTX_get1_crls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get1_crls) #define _X509_STORE_CTX_get1_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get1_issuer) #define _X509_STORE_CTX_get_by_subject BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_by_subject) #define _X509_STORE_CTX_get_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_chain) #define _X509_STORE_CTX_get_current_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_current_cert) #define _X509_STORE_CTX_get_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_error) #define _X509_STORE_CTX_get_error_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_error_depth) #define _X509_STORE_CTX_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_ex_data) #define _X509_STORE_CTX_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_get_ex_new_index) #define _X509_STORE_CTX_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_init) #define _X509_STORE_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_new) #define _X509_STORE_CTX_set0_crls BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set0_crls) #define _X509_STORE_CTX_set0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set0_param) #define _X509_STORE_CTX_set0_trusted_stack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set0_trusted_stack) #define _X509_STORE_CTX_set_chain BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_chain) #define _X509_STORE_CTX_set_default BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_default) #define _X509_STORE_CTX_set_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_depth) #define _X509_STORE_CTX_set_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_error) #define _X509_STORE_CTX_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_ex_data) #define _X509_STORE_CTX_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_flags) #define _X509_STORE_CTX_set_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_purpose) #define _X509_STORE_CTX_set_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_time) #define _X509_STORE_CTX_set_time_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_time_posix) #define _X509_STORE_CTX_set_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_trust) #define _X509_STORE_CTX_set_verify_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_set_verify_cb) #define _X509_STORE_CTX_trusted_stack BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_CTX_trusted_stack) #define _X509_STORE_add_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_add_cert) #define _X509_STORE_add_crl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_add_crl) #define _X509_STORE_add_lookup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_add_lookup) #define _X509_STORE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_free) #define _X509_STORE_get0_objects BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_get0_objects) #define _X509_STORE_get0_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_get0_param) #define _X509_STORE_get1_objects BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_get1_objects) #define _X509_STORE_load_locations BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_load_locations) #define _X509_STORE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_new) #define _X509_STORE_set1_param BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set1_param) #define _X509_STORE_set_default_paths BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_default_paths) #define _X509_STORE_set_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_depth) #define _X509_STORE_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_flags) #define _X509_STORE_set_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_purpose) #define _X509_STORE_set_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_trust) #define _X509_STORE_set_verify_cb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_set_verify_cb) #define _X509_STORE_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_STORE_up_ref) #define _X509_VAL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VAL_free) #define _X509_VAL_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VAL_it) #define _X509_VAL_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VAL_new) #define _X509_VERIFY_PARAM_add0_policy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_add0_policy) #define _X509_VERIFY_PARAM_add1_host BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_add1_host) #define _X509_VERIFY_PARAM_clear_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_clear_flags) #define _X509_VERIFY_PARAM_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_free) #define _X509_VERIFY_PARAM_get_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_get_depth) #define _X509_VERIFY_PARAM_get_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_get_flags) #define _X509_VERIFY_PARAM_inherit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_inherit) #define _X509_VERIFY_PARAM_lookup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_lookup) #define _X509_VERIFY_PARAM_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_new) #define _X509_VERIFY_PARAM_set1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1) #define _X509_VERIFY_PARAM_set1_email BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_email) #define _X509_VERIFY_PARAM_set1_host BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_host) #define _X509_VERIFY_PARAM_set1_ip BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_ip) #define _X509_VERIFY_PARAM_set1_ip_asc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_ip_asc) #define _X509_VERIFY_PARAM_set1_policies BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set1_policies) #define _X509_VERIFY_PARAM_set_depth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_depth) #define _X509_VERIFY_PARAM_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_flags) #define _X509_VERIFY_PARAM_set_hostflags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_hostflags) #define _X509_VERIFY_PARAM_set_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_purpose) #define _X509_VERIFY_PARAM_set_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_time) #define _X509_VERIFY_PARAM_set_time_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_time_posix) #define _X509_VERIFY_PARAM_set_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_VERIFY_PARAM_set_trust) #define _X509_add1_ext_i2d BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_add1_ext_i2d) #define _X509_add1_reject_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_add1_reject_object) #define _X509_add1_trust_object BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_add1_trust_object) #define _X509_add_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_add_ext) #define _X509_alias_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_alias_get0) #define _X509_alias_set1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_alias_set1) #define _X509_chain_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_chain_up_ref) #define _X509_check_akid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_akid) #define _X509_check_ca BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_ca) #define _X509_check_email BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_email) #define _X509_check_host BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_host) #define _X509_check_ip BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_ip) #define _X509_check_ip_asc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_ip_asc) #define _X509_check_issued BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_issued) #define _X509_check_private_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_private_key) #define _X509_check_purpose BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_purpose) #define _X509_check_trust BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_check_trust) #define _X509_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_cmp) #define _X509_cmp_current_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_cmp_current_time) #define _X509_cmp_time BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_cmp_time) #define _X509_cmp_time_posix BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_cmp_time_posix) #define _X509_delete_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_delete_ext) #define _X509_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_digest) #define _X509_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_dup) #define _X509_email_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_email_free) #define _X509_find_by_issuer_and_serial BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_find_by_issuer_and_serial) #define _X509_find_by_subject BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_find_by_subject) #define _X509_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_free) #define _X509_get0_authority_issuer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_authority_issuer) #define _X509_get0_authority_key_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_authority_key_id) #define _X509_get0_authority_serial BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_authority_serial) #define _X509_get0_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_extensions) #define _X509_get0_notAfter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_notAfter) #define _X509_get0_notBefore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_notBefore) #define _X509_get0_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_pubkey) #define _X509_get0_pubkey_bitstr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_pubkey_bitstr) #define _X509_get0_serialNumber BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_serialNumber) #define _X509_get0_signature BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_signature) #define _X509_get0_subject_key_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_subject_key_id) #define _X509_get0_tbs_sigalg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_tbs_sigalg) #define _X509_get0_uids BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get0_uids) #define _X509_get1_email BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get1_email) #define _X509_get1_ocsp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get1_ocsp) #define _X509_get_X509_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_X509_PUBKEY) #define _X509_get_default_cert_area BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_cert_area) #define _X509_get_default_cert_dir BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_cert_dir) #define _X509_get_default_cert_dir_env BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_cert_dir_env) #define _X509_get_default_cert_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_cert_file) #define _X509_get_default_cert_file_env BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_cert_file_env) #define _X509_get_default_private_dir BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_default_private_dir) #define _X509_get_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ex_data) #define _X509_get_ex_new_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ex_new_index) #define _X509_get_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext) #define _X509_get_ext_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext_by_NID) #define _X509_get_ext_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext_by_OBJ) #define _X509_get_ext_by_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext_by_critical) #define _X509_get_ext_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext_count) #define _X509_get_ext_d2i BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_ext_d2i) #define _X509_get_extended_key_usage BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_extended_key_usage) #define _X509_get_extension_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_extension_flags) #define _X509_get_issuer_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_issuer_name) #define _X509_get_key_usage BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_key_usage) #define _X509_get_notAfter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_notAfter) #define _X509_get_notBefore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_notBefore) #define _X509_get_pathlen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_pathlen) #define _X509_get_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_pubkey) #define _X509_get_serialNumber BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_serialNumber) #define _X509_get_signature_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_signature_nid) #define _X509_get_subject_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_subject_name) #define _X509_get_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_get_version) #define _X509_getm_notAfter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_getm_notAfter) #define _X509_getm_notBefore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_getm_notBefore) #define _X509_gmtime_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_gmtime_adj) #define _X509_is_valid_trust_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_is_valid_trust_id) #define _X509_issuer_name_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_issuer_name_cmp) #define _X509_issuer_name_hash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_issuer_name_hash) #define _X509_issuer_name_hash_old BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_issuer_name_hash_old) #define _X509_it BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_it) #define _X509_keyid_get0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_keyid_get0) #define _X509_keyid_set1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_keyid_set1) #define _X509_load_cert_crl_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_load_cert_crl_file) #define _X509_load_cert_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_load_cert_file) #define _X509_load_crl_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_load_crl_file) #define _X509_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_new) #define _X509_parse_from_buffer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_parse_from_buffer) #define _X509_policy_check BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_policy_check) #define _X509_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_print) #define _X509_print_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_print_ex) #define _X509_print_ex_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_print_ex_fp) #define _X509_print_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_print_fp) #define _X509_pubkey_digest BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_pubkey_digest) #define _X509_reject_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_reject_clear) #define _X509_set1_notAfter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set1_notAfter) #define _X509_set1_notBefore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set1_notBefore) #define _X509_set1_signature_algo BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set1_signature_algo) #define _X509_set1_signature_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set1_signature_value) #define _X509_set_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_ex_data) #define _X509_set_issuer_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_issuer_name) #define _X509_set_notAfter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_notAfter) #define _X509_set_notBefore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_notBefore) #define _X509_set_pubkey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_pubkey) #define _X509_set_serialNumber BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_serialNumber) #define _X509_set_subject_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_subject_name) #define _X509_set_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_set_version) #define _X509_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_sign) #define _X509_sign_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_sign_ctx) #define _X509_signature_dump BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_signature_dump) #define _X509_signature_print BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_signature_print) #define _X509_subject_name_cmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_subject_name_cmp) #define _X509_subject_name_hash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_subject_name_hash) #define _X509_subject_name_hash_old BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_subject_name_hash_old) #define _X509_supported_extension BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_supported_extension) #define _X509_time_adj BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_time_adj) #define _X509_time_adj_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_time_adj_ex) #define _X509_trust_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_trust_clear) #define _X509_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_up_ref) #define _X509_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_verify) #define _X509_verify_cert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_verify_cert) #define _X509_verify_cert_error_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509_verify_cert_error_string) #define _X509v3_add_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_add_ext) #define _X509v3_delete_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_delete_ext) #define _X509v3_get_ext BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_get_ext) #define _X509v3_get_ext_by_NID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_get_ext_by_NID) #define _X509v3_get_ext_by_OBJ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_get_ext_by_OBJ) #define _X509v3_get_ext_by_critical BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_get_ext_by_critical) #define _X509v3_get_ext_count BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, X509v3_get_ext_count) #define ___clang_call_terminate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, __clang_call_terminate) #define _a2i_IPADDRESS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, a2i_IPADDRESS) #define _a2i_IPADDRESS_NC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, a2i_IPADDRESS_NC) #define _aes128gcmsiv_aes_ks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_aes_ks) #define _aes128gcmsiv_aes_ks_enc_x1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_aes_ks_enc_x1) #define _aes128gcmsiv_dec BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_dec) #define _aes128gcmsiv_ecb_enc_block BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_ecb_enc_block) #define _aes128gcmsiv_enc_msg_x4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_enc_msg_x4) #define _aes128gcmsiv_enc_msg_x8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_enc_msg_x8) #define _aes128gcmsiv_kdf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes128gcmsiv_kdf) #define _aes256gcmsiv_aes_ks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_aes_ks) #define _aes256gcmsiv_aes_ks_enc_x1 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_aes_ks_enc_x1) #define _aes256gcmsiv_dec BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_dec) #define _aes256gcmsiv_ecb_enc_block BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_ecb_enc_block) #define _aes256gcmsiv_enc_msg_x4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_enc_msg_x4) #define _aes256gcmsiv_enc_msg_x8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_enc_msg_x8) #define _aes256gcmsiv_kdf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes256gcmsiv_kdf) #define _aes_ctr_set_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_ctr_set_key) #define _aes_gcm_dec_kernel BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_kernel) #define _aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx10_512) #define _aes_gcm_dec_update_vaes_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_dec_update_vaes_avx2) #define _aes_gcm_enc_kernel BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_kernel) #define _aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx10_512) #define _aes_gcm_enc_update_vaes_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_gcm_enc_update_vaes_avx2) #define _aes_hw_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_cbc_encrypt) #define _aes_hw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_ctr32_encrypt_blocks) #define _aes_hw_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_decrypt) #define _aes_hw_ecb_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_ecb_encrypt) #define _aes_hw_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_encrypt) #define _aes_hw_encrypt_key_to_decrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_encrypt_key_to_decrypt_key) #define _aes_hw_set_decrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_set_decrypt_key) #define _aes_hw_set_encrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_set_encrypt_key) #define _aes_hw_set_encrypt_key_alt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_alt) #define _aes_hw_set_encrypt_key_alt_preferred BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_alt_preferred) #define _aes_hw_set_encrypt_key_base BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_hw_set_encrypt_key_base) #define _aes_nohw_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_cbc_encrypt) #define _aes_nohw_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_ctr32_encrypt_blocks) #define _aes_nohw_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_decrypt) #define _aes_nohw_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_encrypt) #define _aes_nohw_set_decrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_set_decrypt_key) #define _aes_nohw_set_encrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aes_nohw_set_encrypt_key) #define _aesgcmsiv_htable6_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesgcmsiv_htable6_init) #define _aesgcmsiv_htable_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesgcmsiv_htable_init) #define _aesgcmsiv_htable_polyval BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesgcmsiv_htable_polyval) #define _aesgcmsiv_polyval_horner BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesgcmsiv_polyval_horner) #define _aesni_gcm_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesni_gcm_decrypt) #define _aesni_gcm_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, aesni_gcm_encrypt) #define _asn1_bit_string_length BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_bit_string_length) #define _asn1_do_adb BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_do_adb) #define _asn1_enc_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_enc_free) #define _asn1_enc_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_enc_init) #define _asn1_enc_restore BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_enc_restore) #define _asn1_enc_save BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_enc_save) #define _asn1_encoding_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_encoding_clear) #define _asn1_generalizedtime_to_tm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_generalizedtime_to_tm) #define _asn1_get_choice_selector BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_get_choice_selector) #define _asn1_get_field_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_get_field_ptr) #define _asn1_get_string_table_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_get_string_table_for_testing) #define _asn1_is_printable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_is_printable) #define _asn1_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_refcount_dec_and_test_zero) #define _asn1_refcount_set_one BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_refcount_set_one) #define _asn1_set_choice_selector BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_set_choice_selector) #define _asn1_type_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_type_cleanup) #define _asn1_type_set0_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_type_set0_string) #define _asn1_type_value_as_pointer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_type_value_as_pointer) #define _asn1_utctime_to_tm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, asn1_utctime_to_tm) #define _bcm_as_approved_status BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bcm_as_approved_status) #define _bcm_success BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bcm_success) #define _beeu_mod_inverse_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, beeu_mod_inverse_vartime) #define _bio_clear_socket_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_clear_socket_error) #define _bio_errno_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_errno_should_retry) #define _bio_ip_and_port_to_socket_and_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_ip_and_port_to_socket_and_addr) #define _bio_sock_error BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_sock_error) #define _bio_socket_nbio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_socket_nbio) #define _bio_socket_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bio_socket_should_retry) #define _bn_abs_sub_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_abs_sub_consttime) #define _bn_add_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_add_words) #define _bn_assert_fits_in_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_assert_fits_in_bytes) #define _bn_big_endian_to_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_big_endian_to_words) #define _bn_copy_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_copy_words) #define _bn_declassify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_declassify) #define _bn_div_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_div_consttime) #define _bn_expand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_expand) #define _bn_fits_in_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_fits_in_words) #define _bn_from_montgomery_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_from_montgomery_small) #define _bn_gather5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_gather5) #define _bn_in_range_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_in_range_words) #define _bn_is_bit_set_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_is_bit_set_words) #define _bn_is_relatively_prime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_is_relatively_prime) #define _bn_jacobi BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_jacobi) #define _bn_lcm_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_lcm_consttime) #define _bn_less_than_montgomery_R BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_less_than_montgomery_R) #define _bn_less_than_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_less_than_words) #define _bn_miller_rabin_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_miller_rabin_init) #define _bn_miller_rabin_iteration BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_miller_rabin_iteration) #define _bn_minimal_width BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_minimal_width) #define _bn_mod_add_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_add_consttime) #define _bn_mod_add_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_add_words) #define _bn_mod_exp_mont_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_exp_mont_small) #define _bn_mod_inverse0_prime_mont_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_inverse0_prime_mont_small) #define _bn_mod_inverse_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_inverse_consttime) #define _bn_mod_inverse_prime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_inverse_prime) #define _bn_mod_inverse_secret_prime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_inverse_secret_prime) #define _bn_mod_lshift1_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_lshift1_consttime) #define _bn_mod_lshift_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_lshift_consttime) #define _bn_mod_mul_montgomery_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_mul_montgomery_small) #define _bn_mod_sub_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_sub_consttime) #define _bn_mod_sub_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_sub_words) #define _bn_mod_u16_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mod_u16_consttime) #define _bn_mont_ctx_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mont_ctx_cleanup) #define _bn_mont_ctx_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mont_ctx_init) #define _bn_mont_ctx_set_RR_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mont_ctx_set_RR_consttime) #define _bn_mont_n0 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mont_n0) #define _bn_mul4x_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul4x_mont) #define _bn_mul4x_mont_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul4x_mont_capable) #define _bn_mul4x_mont_gather5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul4x_mont_gather5) #define _bn_mul4x_mont_gather5_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul4x_mont_gather5_capable) #define _bn_mul_add_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_add_words) #define _bn_mul_comba4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_comba4) #define _bn_mul_comba8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_comba8) #define _bn_mul_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_consttime) #define _bn_mul_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_mont) #define _bn_mul_mont_gather5_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_mont_gather5_nohw) #define _bn_mul_mont_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_mont_nohw) #define _bn_mul_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_small) #define _bn_mul_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mul_words) #define _bn_mulx4x_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mulx4x_mont) #define _bn_mulx4x_mont_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mulx4x_mont_capable) #define _bn_mulx4x_mont_gather5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mulx4x_mont_gather5) #define _bn_mulx4x_mont_gather5_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mulx4x_mont_gather5_capable) #define _bn_mulx_adx_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_mulx_adx_capable) #define _bn_odd_number_is_obviously_composite BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_odd_number_is_obviously_composite) #define _bn_one_to_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_one_to_montgomery) #define _bn_power5_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_power5_capable) #define _bn_power5_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_power5_nohw) #define _bn_powerx5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_powerx5) #define _bn_powerx5_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_powerx5_capable) #define _bn_rand_range_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_rand_range_words) #define _bn_rand_secret_range BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_rand_secret_range) #define _bn_reduce_once BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_reduce_once) #define _bn_reduce_once_in_place BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_reduce_once_in_place) #define _bn_resize_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_resize_words) #define _bn_rshift1_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_rshift1_words) #define _bn_rshift_secret_shift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_rshift_secret_shift) #define _bn_rshift_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_rshift_words) #define _bn_scatter5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_scatter5) #define _bn_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_secret) #define _bn_select_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_select_words) #define _bn_set_minimal_width BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_minimal_width) #define _bn_set_static_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_static_words) #define _bn_set_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_words) #define _bn_sqr8x_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr8x_internal) #define _bn_sqr8x_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr8x_mont) #define _bn_sqr8x_mont_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr8x_mont_capable) #define _bn_sqr_comba4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_comba4) #define _bn_sqr_comba8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_comba8) #define _bn_sqr_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_consttime) #define _bn_sqr_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_small) #define _bn_sqr_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_words) #define _bn_sqrx8x_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqrx8x_internal) #define _bn_sub_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sub_words) #define _bn_to_montgomery_small BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_to_montgomery_small) #define _bn_uadd_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_uadd_consttime) #define _bn_usub_consttime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_usub_consttime) #define _bn_wexpand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_wexpand) #define _bn_words_to_big_endian BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_words_to_big_endian) #define _boringssl_ensure_ecc_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_ensure_ecc_self_test) #define _boringssl_ensure_ffdh_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_ensure_ffdh_self_test) #define _boringssl_ensure_rsa_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_ensure_rsa_self_test) #define _boringssl_fips_break_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_fips_break_test) #define _boringssl_fips_inc_counter BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_fips_inc_counter) #define _boringssl_self_test_hmac_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_self_test_hmac_sha256) #define _boringssl_self_test_sha256 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_self_test_sha256) #define _boringssl_self_test_sha512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, boringssl_self_test_sha512) #define _bsaes_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bsaes_capable) #define _bsaes_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bsaes_cbc_encrypt) #define _c2i_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, c2i_ASN1_BIT_STRING) #define _c2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, c2i_ASN1_INTEGER) #define _c2i_ASN1_OBJECT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, c2i_ASN1_OBJECT) #define _chacha20_poly1305_asm_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_asm_capable) #define _chacha20_poly1305_open BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_open) #define _chacha20_poly1305_open_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_open_avx2) #define _chacha20_poly1305_open_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_open_nohw) #define _chacha20_poly1305_seal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_seal) #define _chacha20_poly1305_seal_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_seal_avx2) #define _chacha20_poly1305_seal_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, chacha20_poly1305_seal_nohw) #define _crypto_gcm_clmul_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, crypto_gcm_clmul_enabled) #define _d2i_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_BIT_STRING) #define _d2i_ASN1_BMPSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_BMPSTRING) #define _d2i_ASN1_BOOLEAN BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_BOOLEAN) #define _d2i_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_ENUMERATED) #define _d2i_ASN1_GENERALIZEDTIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_GENERALIZEDTIME) #define _d2i_ASN1_GENERALSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_GENERALSTRING) #define _d2i_ASN1_IA5STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_IA5STRING) #define _d2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_INTEGER) #define _d2i_ASN1_NULL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_NULL) #define _d2i_ASN1_OBJECT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_OBJECT) #define _d2i_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_OCTET_STRING) #define _d2i_ASN1_PRINTABLE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_PRINTABLE) #define _d2i_ASN1_PRINTABLESTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_PRINTABLESTRING) #define _d2i_ASN1_SEQUENCE_ANY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_SEQUENCE_ANY) #define _d2i_ASN1_SET_ANY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_SET_ANY) #define _d2i_ASN1_T61STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_T61STRING) #define _d2i_ASN1_TIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_TIME) #define _d2i_ASN1_TYPE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_TYPE) #define _d2i_ASN1_UNIVERSALSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_UNIVERSALSTRING) #define _d2i_ASN1_UTCTIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_UTCTIME) #define _d2i_ASN1_UTF8STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_UTF8STRING) #define _d2i_ASN1_VISIBLESTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ASN1_VISIBLESTRING) #define _d2i_AUTHORITY_INFO_ACCESS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_AUTHORITY_INFO_ACCESS) #define _d2i_AUTHORITY_KEYID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_AUTHORITY_KEYID) #define _d2i_AutoPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_AutoPrivateKey) #define _d2i_BASIC_CONSTRAINTS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_BASIC_CONSTRAINTS) #define _d2i_CERTIFICATEPOLICIES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_CERTIFICATEPOLICIES) #define _d2i_CRL_DIST_POINTS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_CRL_DIST_POINTS) #define _d2i_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DHparams) #define _d2i_DHparams_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DHparams_bio) #define _d2i_DIRECTORYSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DIRECTORYSTRING) #define _d2i_DISPLAYTEXT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DISPLAYTEXT) #define _d2i_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSAPrivateKey) #define _d2i_DSAPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSAPrivateKey_bio) #define _d2i_DSAPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSAPrivateKey_fp) #define _d2i_DSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSAPublicKey) #define _d2i_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSA_PUBKEY) #define _d2i_DSA_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSA_PUBKEY_bio) #define _d2i_DSA_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSA_PUBKEY_fp) #define _d2i_DSA_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSA_SIG) #define _d2i_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_DSAparams) #define _d2i_ECDSA_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECDSA_SIG) #define _d2i_ECPKParameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECPKParameters) #define _d2i_ECParameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECParameters) #define _d2i_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECPrivateKey) #define _d2i_ECPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECPrivateKey_bio) #define _d2i_ECPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ECPrivateKey_fp) #define _d2i_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_EC_PUBKEY) #define _d2i_EC_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_EC_PUBKEY_bio) #define _d2i_EC_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_EC_PUBKEY_fp) #define _d2i_EXTENDED_KEY_USAGE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_EXTENDED_KEY_USAGE) #define _d2i_GENERAL_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_GENERAL_NAME) #define _d2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_GENERAL_NAMES) #define _d2i_ISSUING_DIST_POINT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_ISSUING_DIST_POINT) #define _d2i_NETSCAPE_SPKAC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_NETSCAPE_SPKAC) #define _d2i_NETSCAPE_SPKI BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_NETSCAPE_SPKI) #define _d2i_PKCS12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS12) #define _d2i_PKCS12_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS12_bio) #define _d2i_PKCS12_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS12_fp) #define _d2i_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS7) #define _d2i_PKCS7_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS7_bio) #define _d2i_PKCS8PrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8PrivateKey_bio) #define _d2i_PKCS8PrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8PrivateKey_fp) #define _d2i_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO) #define _d2i_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO_bio) #define _d2i_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8_PRIV_KEY_INFO_fp) #define _d2i_PKCS8_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8_bio) #define _d2i_PKCS8_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PKCS8_fp) #define _d2i_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PUBKEY) #define _d2i_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PUBKEY_bio) #define _d2i_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PUBKEY_fp) #define _d2i_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PrivateKey) #define _d2i_PrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PrivateKey_bio) #define _d2i_PrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PrivateKey_fp) #define _d2i_PublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_PublicKey) #define _d2i_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPrivateKey) #define _d2i_RSAPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPrivateKey_bio) #define _d2i_RSAPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPrivateKey_fp) #define _d2i_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPublicKey) #define _d2i_RSAPublicKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPublicKey_bio) #define _d2i_RSAPublicKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSAPublicKey_fp) #define _d2i_RSA_PSS_PARAMS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSA_PSS_PARAMS) #define _d2i_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSA_PUBKEY) #define _d2i_RSA_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSA_PUBKEY_bio) #define _d2i_RSA_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_RSA_PUBKEY_fp) #define _d2i_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_SSL_SESSION) #define _d2i_SSL_SESSION_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_SSL_SESSION_bio) #define _d2i_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509) #define _d2i_X509_ALGOR BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_ALGOR) #define _d2i_X509_ATTRIBUTE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_ATTRIBUTE) #define _d2i_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_AUX) #define _d2i_X509_CERT_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CERT_AUX) #define _d2i_X509_CINF BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CINF) #define _d2i_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CRL) #define _d2i_X509_CRL_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CRL_INFO) #define _d2i_X509_CRL_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CRL_bio) #define _d2i_X509_CRL_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_CRL_fp) #define _d2i_X509_EXTENSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_EXTENSION) #define _d2i_X509_EXTENSIONS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_EXTENSIONS) #define _d2i_X509_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_NAME) #define _d2i_X509_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_PUBKEY) #define _d2i_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_REQ) #define _d2i_X509_REQ_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_REQ_INFO) #define _d2i_X509_REQ_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_REQ_bio) #define _d2i_X509_REQ_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_REQ_fp) #define _d2i_X509_REVOKED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_REVOKED) #define _d2i_X509_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_SIG) #define _d2i_X509_VAL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_VAL) #define _d2i_X509_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_bio) #define _d2i_X509_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, d2i_X509_fp) #define _dh_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dh_asn1_meth) #define _dh_check_params_fast BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dh_check_params_fast) #define _dh_compute_key_padded_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dh_compute_key_padded_no_self_test) #define _dh_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dh_pkey_meth) #define _dsa_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dsa_asn1_meth) #define _dsa_check_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, dsa_check_key) #define _ec_GFp_mont_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_add) #define _ec_GFp_mont_dbl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_dbl) #define _ec_GFp_mont_felem_exp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_exp) #define _ec_GFp_mont_felem_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_from_bytes) #define _ec_GFp_mont_felem_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_mul) #define _ec_GFp_mont_felem_reduce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_reduce) #define _ec_GFp_mont_felem_sqr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_sqr) #define _ec_GFp_mont_felem_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_felem_to_bytes) #define _ec_GFp_mont_init_precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_init_precomp) #define _ec_GFp_mont_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_mul) #define _ec_GFp_mont_mul_base BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_mul_base) #define _ec_GFp_mont_mul_batch BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_mul_batch) #define _ec_GFp_mont_mul_precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_mul_precomp) #define _ec_GFp_mont_mul_public_batch BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_mont_mul_public_batch) #define _ec_GFp_nistp_recode_scalar_bits BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_nistp_recode_scalar_bits) #define _ec_GFp_simple_cmp_x_coordinate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_cmp_x_coordinate) #define _ec_GFp_simple_felem_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_felem_from_bytes) #define _ec_GFp_simple_felem_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_felem_to_bytes) #define _ec_GFp_simple_group_get_curve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_group_get_curve) #define _ec_GFp_simple_group_set_curve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_group_set_curve) #define _ec_GFp_simple_invert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_invert) #define _ec_GFp_simple_is_at_infinity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_is_at_infinity) #define _ec_GFp_simple_is_on_curve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_is_on_curve) #define _ec_GFp_simple_point_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_point_copy) #define _ec_GFp_simple_point_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_point_init) #define _ec_GFp_simple_point_set_to_infinity BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_point_set_to_infinity) #define _ec_GFp_simple_points_equal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_GFp_simple_points_equal) #define _ec_affine_jacobian_equal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_affine_jacobian_equal) #define _ec_affine_select BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_affine_select) #define _ec_affine_to_jacobian BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_affine_to_jacobian) #define _ec_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_asn1_meth) #define _ec_bignum_to_felem BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_bignum_to_felem) #define _ec_bignum_to_scalar BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_bignum_to_scalar) #define _ec_cmp_x_coordinate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_cmp_x_coordinate) #define _ec_compute_wNAF BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_compute_wNAF) #define _ec_felem_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_add) #define _ec_felem_equal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_equal) #define _ec_felem_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_from_bytes) #define _ec_felem_neg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_neg) #define _ec_felem_non_zero_mask BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_non_zero_mask) #define _ec_felem_one BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_one) #define _ec_felem_select BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_select) #define _ec_felem_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_sub) #define _ec_felem_to_bignum BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_to_bignum) #define _ec_felem_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_felem_to_bytes) #define _ec_get_x_coordinate_as_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_get_x_coordinate_as_bytes) #define _ec_get_x_coordinate_as_scalar BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_get_x_coordinate_as_scalar) #define _ec_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_hash_to_curve_p256_xmd_sha256_sswu) #define _ec_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_hash_to_curve_p384_xmd_sha384_sswu) #define _ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_hash_to_curve_p384_xmd_sha512_sswu_draft07) #define _ec_hash_to_scalar_p384_xmd_sha384 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_hash_to_scalar_p384_xmd_sha384) #define _ec_hash_to_scalar_p384_xmd_sha512_draft07 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_hash_to_scalar_p384_xmd_sha512_draft07) #define _ec_init_precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_init_precomp) #define _ec_jacobian_to_affine BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_jacobian_to_affine) #define _ec_jacobian_to_affine_batch BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_jacobian_to_affine_batch) #define _ec_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_pkey_meth) #define _ec_point_byte_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_byte_len) #define _ec_point_from_uncompressed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_from_uncompressed) #define _ec_point_mul_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_no_self_test) #define _ec_point_mul_scalar BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar) #define _ec_point_mul_scalar_base BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar_base) #define _ec_point_mul_scalar_batch BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar_batch) #define _ec_point_mul_scalar_precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar_precomp) #define _ec_point_mul_scalar_public BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar_public) #define _ec_point_mul_scalar_public_batch BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_mul_scalar_public_batch) #define _ec_point_select BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_select) #define _ec_point_set_affine_coordinates BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_set_affine_coordinates) #define _ec_point_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_point_to_bytes) #define _ec_precomp_select BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_precomp_select) #define _ec_random_nonzero_scalar BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_random_nonzero_scalar) #define _ec_random_scalar BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_random_scalar) #define _ec_scalar_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_add) #define _ec_scalar_equal_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_equal_vartime) #define _ec_scalar_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_from_bytes) #define _ec_scalar_from_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_from_montgomery) #define _ec_scalar_inv0_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_inv0_montgomery) #define _ec_scalar_is_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_is_zero) #define _ec_scalar_mul_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_mul_montgomery) #define _ec_scalar_neg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_neg) #define _ec_scalar_reduce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_reduce) #define _ec_scalar_select BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_select) #define _ec_scalar_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_sub) #define _ec_scalar_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_to_bytes) #define _ec_scalar_to_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_to_montgomery) #define _ec_scalar_to_montgomery_inv_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_scalar_to_montgomery_inv_vartime) #define _ec_set_to_safe_point BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_set_to_safe_point) #define _ec_simple_scalar_inv0_montgomery BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_simple_scalar_inv0_montgomery) #define _ec_simple_scalar_to_montgomery_inv_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ec_simple_scalar_to_montgomery_inv_vartime) #define _ecdsa_sign_fixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecdsa_sign_fixed) #define _ecdsa_sign_fixed_with_nonce_for_known_answer_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecdsa_sign_fixed_with_nonce_for_known_answer_test) #define _ecdsa_verify_fixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecdsa_verify_fixed) #define _ecdsa_verify_fixed_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecdsa_verify_fixed_no_self_test) #define _ecp_nistz256_div_by_2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_div_by_2) #define _ecp_nistz256_mul_by_2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_mul_by_2) #define _ecp_nistz256_mul_by_3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_mul_by_3) #define _ecp_nistz256_mul_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_mul_mont) #define _ecp_nistz256_mul_mont_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_mul_mont_adx) #define _ecp_nistz256_mul_mont_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_mul_mont_nohw) #define _ecp_nistz256_neg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_neg) #define _ecp_nistz256_ord_mul_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont) #define _ecp_nistz256_ord_mul_mont_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont_adx) #define _ecp_nistz256_ord_mul_mont_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_mul_mont_nohw) #define _ecp_nistz256_ord_sqr_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont) #define _ecp_nistz256_ord_sqr_mont_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont_adx) #define _ecp_nistz256_ord_sqr_mont_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_ord_sqr_mont_nohw) #define _ecp_nistz256_point_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add) #define _ecp_nistz256_point_add_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add_adx) #define _ecp_nistz256_point_add_affine BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine) #define _ecp_nistz256_point_add_affine_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine_adx) #define _ecp_nistz256_point_add_affine_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add_affine_nohw) #define _ecp_nistz256_point_add_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_add_nohw) #define _ecp_nistz256_point_double BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_double) #define _ecp_nistz256_point_double_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_double_adx) #define _ecp_nistz256_point_double_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_point_double_nohw) #define _ecp_nistz256_select_w5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w5) #define _ecp_nistz256_select_w5_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w5_avx2) #define _ecp_nistz256_select_w5_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w5_nohw) #define _ecp_nistz256_select_w7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w7) #define _ecp_nistz256_select_w7_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w7_avx2) #define _ecp_nistz256_select_w7_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_select_w7_nohw) #define _ecp_nistz256_sqr_mont BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont) #define _ecp_nistz256_sqr_mont_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont_adx) #define _ecp_nistz256_sqr_mont_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_sqr_mont_nohw) #define _ecp_nistz256_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ecp_nistz256_sub) #define _ed25519_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ed25519_asn1_meth) #define _ed25519_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ed25519_pkey_meth) #define _evp_pkey_set_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, evp_pkey_set_method) #define _fiat_curve25519_adx_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, fiat_curve25519_adx_mul) #define _fiat_curve25519_adx_square BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, fiat_curve25519_adx_square) #define _fiat_p256_adx_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, fiat_p256_adx_mul) #define _fiat_p256_adx_sqr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, fiat_p256_adx_sqr) #define _gcm_ghash_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_avx) #define _gcm_ghash_clmul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_clmul) #define _gcm_ghash_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_neon) #define _gcm_ghash_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_nohw) #define _gcm_ghash_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_ssse3) #define _gcm_ghash_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_v8) #define _gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx10_512) #define _gcm_ghash_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_ghash_vpclmulqdq_avx2) #define _gcm_gmult_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_avx) #define _gcm_gmult_clmul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_clmul) #define _gcm_gmult_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_neon) #define _gcm_gmult_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_nohw) #define _gcm_gmult_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_ssse3) #define _gcm_gmult_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_v8) #define _gcm_gmult_vpclmulqdq_avx10 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx10) #define _gcm_gmult_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_gmult_vpclmulqdq_avx2) #define _gcm_init_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_avx) #define _gcm_init_clmul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_clmul) #define _gcm_init_neon BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_neon) #define _gcm_init_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_nohw) #define _gcm_init_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_ssse3) #define _gcm_init_v8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_v8) #define _gcm_init_vpclmulqdq_avx10_512 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx10_512) #define _gcm_init_vpclmulqdq_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_init_vpclmulqdq_avx2) #define _gcm_neon_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_neon_capable) #define _gcm_pmull_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, gcm_pmull_capable) #define _have_fast_rdrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, have_fast_rdrand) #define _have_rdrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, have_rdrand) #define _hkdf_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, hkdf_pkey_meth) #define _hwaes_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, hwaes_capable) #define _i2a_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2a_ASN1_ENUMERATED) #define _i2a_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2a_ASN1_INTEGER) #define _i2a_ASN1_OBJECT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2a_ASN1_OBJECT) #define _i2a_ASN1_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2a_ASN1_STRING) #define _i2c_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2c_ASN1_BIT_STRING) #define _i2c_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2c_ASN1_INTEGER) #define _i2d_ASN1_BIT_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_BIT_STRING) #define _i2d_ASN1_BMPSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_BMPSTRING) #define _i2d_ASN1_BOOLEAN BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_BOOLEAN) #define _i2d_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_ENUMERATED) #define _i2d_ASN1_GENERALIZEDTIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_GENERALIZEDTIME) #define _i2d_ASN1_GENERALSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_GENERALSTRING) #define _i2d_ASN1_IA5STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_IA5STRING) #define _i2d_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_INTEGER) #define _i2d_ASN1_NULL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_NULL) #define _i2d_ASN1_OBJECT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_OBJECT) #define _i2d_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_OCTET_STRING) #define _i2d_ASN1_PRINTABLE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_PRINTABLE) #define _i2d_ASN1_PRINTABLESTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_PRINTABLESTRING) #define _i2d_ASN1_SEQUENCE_ANY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_SEQUENCE_ANY) #define _i2d_ASN1_SET_ANY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_SET_ANY) #define _i2d_ASN1_T61STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_T61STRING) #define _i2d_ASN1_TIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_TIME) #define _i2d_ASN1_TYPE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_TYPE) #define _i2d_ASN1_UNIVERSALSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_UNIVERSALSTRING) #define _i2d_ASN1_UTCTIME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_UTCTIME) #define _i2d_ASN1_UTF8STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_UTF8STRING) #define _i2d_ASN1_VISIBLESTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ASN1_VISIBLESTRING) #define _i2d_AUTHORITY_INFO_ACCESS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_AUTHORITY_INFO_ACCESS) #define _i2d_AUTHORITY_KEYID BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_AUTHORITY_KEYID) #define _i2d_BASIC_CONSTRAINTS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_BASIC_CONSTRAINTS) #define _i2d_CERTIFICATEPOLICIES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_CERTIFICATEPOLICIES) #define _i2d_CRL_DIST_POINTS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_CRL_DIST_POINTS) #define _i2d_DHparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DHparams) #define _i2d_DHparams_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DHparams_bio) #define _i2d_DIRECTORYSTRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DIRECTORYSTRING) #define _i2d_DISPLAYTEXT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DISPLAYTEXT) #define _i2d_DSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSAPrivateKey) #define _i2d_DSAPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSAPrivateKey_bio) #define _i2d_DSAPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSAPrivateKey_fp) #define _i2d_DSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSAPublicKey) #define _i2d_DSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSA_PUBKEY) #define _i2d_DSA_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSA_PUBKEY_bio) #define _i2d_DSA_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSA_PUBKEY_fp) #define _i2d_DSA_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSA_SIG) #define _i2d_DSAparams BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_DSAparams) #define _i2d_ECDSA_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECDSA_SIG) #define _i2d_ECPKParameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECPKParameters) #define _i2d_ECParameters BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECParameters) #define _i2d_ECPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECPrivateKey) #define _i2d_ECPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECPrivateKey_bio) #define _i2d_ECPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ECPrivateKey_fp) #define _i2d_EC_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_EC_PUBKEY) #define _i2d_EC_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_EC_PUBKEY_bio) #define _i2d_EC_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_EC_PUBKEY_fp) #define _i2d_EXTENDED_KEY_USAGE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_EXTENDED_KEY_USAGE) #define _i2d_GENERAL_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_GENERAL_NAME) #define _i2d_GENERAL_NAMES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_GENERAL_NAMES) #define _i2d_ISSUING_DIST_POINT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_ISSUING_DIST_POINT) #define _i2d_NETSCAPE_SPKAC BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_NETSCAPE_SPKAC) #define _i2d_NETSCAPE_SPKI BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_NETSCAPE_SPKI) #define _i2d_PKCS12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS12) #define _i2d_PKCS12_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS12_bio) #define _i2d_PKCS12_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS12_fp) #define _i2d_PKCS7 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS7) #define _i2d_PKCS7_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS7_bio) #define _i2d_PKCS8PrivateKeyInfo_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKeyInfo_bio) #define _i2d_PKCS8PrivateKeyInfo_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKeyInfo_fp) #define _i2d_PKCS8PrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_bio) #define _i2d_PKCS8PrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_fp) #define _i2d_PKCS8PrivateKey_nid_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_nid_bio) #define _i2d_PKCS8PrivateKey_nid_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8PrivateKey_nid_fp) #define _i2d_PKCS8_PRIV_KEY_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO) #define _i2d_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO_bio) #define _i2d_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8_PRIV_KEY_INFO_fp) #define _i2d_PKCS8_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8_bio) #define _i2d_PKCS8_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PKCS8_fp) #define _i2d_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PUBKEY) #define _i2d_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PUBKEY_bio) #define _i2d_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PUBKEY_fp) #define _i2d_PrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PrivateKey) #define _i2d_PrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PrivateKey_bio) #define _i2d_PrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PrivateKey_fp) #define _i2d_PublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_PublicKey) #define _i2d_RSAPrivateKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPrivateKey) #define _i2d_RSAPrivateKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPrivateKey_bio) #define _i2d_RSAPrivateKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPrivateKey_fp) #define _i2d_RSAPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPublicKey) #define _i2d_RSAPublicKey_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPublicKey_bio) #define _i2d_RSAPublicKey_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSAPublicKey_fp) #define _i2d_RSA_PSS_PARAMS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSA_PSS_PARAMS) #define _i2d_RSA_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSA_PUBKEY) #define _i2d_RSA_PUBKEY_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSA_PUBKEY_bio) #define _i2d_RSA_PUBKEY_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_RSA_PUBKEY_fp) #define _i2d_SSL_SESSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_SSL_SESSION) #define _i2d_SSL_SESSION_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_SSL_SESSION_bio) #define _i2d_X509 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509) #define _i2d_X509_ALGOR BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_ALGOR) #define _i2d_X509_ATTRIBUTE BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_ATTRIBUTE) #define _i2d_X509_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_AUX) #define _i2d_X509_CERT_AUX BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CERT_AUX) #define _i2d_X509_CINF BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CINF) #define _i2d_X509_CRL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CRL) #define _i2d_X509_CRL_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CRL_INFO) #define _i2d_X509_CRL_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CRL_bio) #define _i2d_X509_CRL_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CRL_fp) #define _i2d_X509_CRL_tbs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_CRL_tbs) #define _i2d_X509_EXTENSION BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_EXTENSION) #define _i2d_X509_EXTENSIONS BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_EXTENSIONS) #define _i2d_X509_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_NAME) #define _i2d_X509_PUBKEY BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_PUBKEY) #define _i2d_X509_REQ BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_REQ) #define _i2d_X509_REQ_INFO BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_REQ_INFO) #define _i2d_X509_REQ_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_REQ_bio) #define _i2d_X509_REQ_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_REQ_fp) #define _i2d_X509_REVOKED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_REVOKED) #define _i2d_X509_SIG BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_SIG) #define _i2d_X509_VAL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_VAL) #define _i2d_X509_bio BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_bio) #define _i2d_X509_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_fp) #define _i2d_X509_tbs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_X509_tbs) #define _i2d_re_X509_CRL_tbs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_re_X509_CRL_tbs) #define _i2d_re_X509_REQ_tbs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_re_X509_REQ_tbs) #define _i2d_re_X509_tbs BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2d_re_X509_tbs) #define _i2o_ECPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2o_ECPublicKey) #define _i2s_ASN1_ENUMERATED BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2s_ASN1_ENUMERATED) #define _i2s_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2s_ASN1_INTEGER) #define _i2s_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2s_ASN1_OCTET_STRING) #define _i2t_ASN1_OBJECT BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2t_ASN1_OBJECT) #define _i2v_GENERAL_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2v_GENERAL_NAME) #define _i2v_GENERAL_NAMES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, i2v_GENERAL_NAMES) #define _k25519Precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, k25519Precomp) #define _kBoringSSLRSASqrtTwo BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, kBoringSSLRSASqrtTwo) #define _kBoringSSLRSASqrtTwoLen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, kBoringSSLRSASqrtTwoLen) #define _kOpenSSLReasonStringData BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, kOpenSSLReasonStringData) #define _kOpenSSLReasonValues BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, kOpenSSLReasonValues) #define _kOpenSSLReasonValuesLen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, kOpenSSLReasonValuesLen) #define _lh_CONF_SECTION_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_call_cmp_func) #define _lh_CONF_SECTION_call_doall_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_call_doall_arg) #define _lh_CONF_SECTION_call_hash_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_call_hash_func) #define _lh_CONF_SECTION_doall_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_doall_arg) #define _lh_CONF_SECTION_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_free) #define _lh_CONF_SECTION_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_insert) #define _lh_CONF_SECTION_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_new) #define _lh_CONF_SECTION_retrieve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_SECTION_retrieve) #define _lh_CONF_VALUE_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_call_cmp_func) #define _lh_CONF_VALUE_call_doall_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_call_doall_arg) #define _lh_CONF_VALUE_call_hash_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_call_hash_func) #define _lh_CONF_VALUE_doall_arg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_doall_arg) #define _lh_CONF_VALUE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_free) #define _lh_CONF_VALUE_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_insert) #define _lh_CONF_VALUE_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_new) #define _lh_CONF_VALUE_retrieve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CONF_VALUE_retrieve) #define _lh_CRYPTO_BUFFER_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_cmp_func) #define _lh_CRYPTO_BUFFER_call_hash_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_call_hash_func) #define _lh_CRYPTO_BUFFER_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_delete) #define _lh_CRYPTO_BUFFER_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_free) #define _lh_CRYPTO_BUFFER_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_insert) #define _lh_CRYPTO_BUFFER_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_new) #define _lh_CRYPTO_BUFFER_num_items BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_num_items) #define _lh_CRYPTO_BUFFER_retrieve BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, lh_CRYPTO_BUFFER_retrieve) #define _md5_block_asm_data_order BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, md5_block_asm_data_order) #define _o2i_ECPublicKey BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, o2i_ECPublicKey) #define _pkcs12_iterations_acceptable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs12_iterations_acceptable) #define _pkcs12_key_gen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs12_key_gen) #define _pkcs12_pbe_encrypt_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs12_pbe_encrypt_init) #define _pkcs7_add_signed_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs7_add_signed_data) #define _pkcs7_parse_header BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs7_parse_header) #define _pkcs8_pbe_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pkcs8_pbe_decrypt) #define _pmbtoken_exp1_blind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_blind) #define _pmbtoken_exp1_client_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_client_key_from_bytes) #define _pmbtoken_exp1_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_derive_key_from_secret) #define _pmbtoken_exp1_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_generate_key) #define _pmbtoken_exp1_get_h_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_get_h_for_testing) #define _pmbtoken_exp1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_issuer_key_from_bytes) #define _pmbtoken_exp1_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_read) #define _pmbtoken_exp1_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_sign) #define _pmbtoken_exp1_unblind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp1_unblind) #define _pmbtoken_exp2_blind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_blind) #define _pmbtoken_exp2_client_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_client_key_from_bytes) #define _pmbtoken_exp2_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_derive_key_from_secret) #define _pmbtoken_exp2_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_generate_key) #define _pmbtoken_exp2_get_h_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_get_h_for_testing) #define _pmbtoken_exp2_issuer_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_issuer_key_from_bytes) #define _pmbtoken_exp2_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_read) #define _pmbtoken_exp2_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_sign) #define _pmbtoken_exp2_unblind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_exp2_unblind) #define _pmbtoken_pst1_blind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_blind) #define _pmbtoken_pst1_client_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_client_key_from_bytes) #define _pmbtoken_pst1_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_derive_key_from_secret) #define _pmbtoken_pst1_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_generate_key) #define _pmbtoken_pst1_get_h_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_get_h_for_testing) #define _pmbtoken_pst1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_issuer_key_from_bytes) #define _pmbtoken_pst1_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_read) #define _pmbtoken_pst1_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_sign) #define _pmbtoken_pst1_unblind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, pmbtoken_pst1_unblind) #define _poly_Rq_mul BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, poly_Rq_mul) #define _rand_fork_unsafe_buffering_enabled BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rand_fork_unsafe_buffering_enabled) #define _rsa_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_asn1_meth) #define _rsa_check_public_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_check_public_key) #define _rsa_default_private_transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_default_private_transform) #define _rsa_default_sign_raw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_default_sign_raw) #define _rsa_invalidate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_invalidate_key) #define _rsa_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_pkey_meth) #define _rsa_private_transform BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_private_transform) #define _rsa_private_transform_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_private_transform_no_self_test) #define _rsa_sign_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_sign_no_self_test) #define _rsa_verify_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_verify_no_self_test) #define _rsa_verify_raw_no_self_test BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsa_verify_raw_no_self_test) #define _rsaz_1024_gather5_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_gather5_avx2) #define _rsaz_1024_mul_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_mul_avx2) #define _rsaz_1024_norm2red_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_norm2red_avx2) #define _rsaz_1024_red2norm_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_red2norm_avx2) #define _rsaz_1024_scatter5_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_scatter5_avx2) #define _rsaz_1024_sqr_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_1024_sqr_avx2) #define _rsaz_avx2_preferred BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, rsaz_avx2_preferred) #define _s2i_ASN1_INTEGER BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, s2i_ASN1_INTEGER) #define _s2i_ASN1_OCTET_STRING BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, s2i_ASN1_OCTET_STRING) #define _sha1_avx2_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_avx2_capable) #define _sha1_avx_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_avx_capable) #define _sha1_block_data_order_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_block_data_order_avx) #define _sha1_block_data_order_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_block_data_order_avx2) #define _sha1_block_data_order_hw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_block_data_order_hw) #define _sha1_block_data_order_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_block_data_order_nohw) #define _sha1_block_data_order_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_block_data_order_ssse3) #define _sha1_hw_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_hw_capable) #define _sha1_ssse3_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha1_ssse3_capable) #define _sha256_avx_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_avx_capable) #define _sha256_block_data_order_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_block_data_order_avx) #define _sha256_block_data_order_hw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_block_data_order_hw) #define _sha256_block_data_order_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_block_data_order_nohw) #define _sha256_block_data_order_ssse3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_block_data_order_ssse3) #define _sha256_hw_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_hw_capable) #define _sha256_ssse3_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha256_ssse3_capable) #define _sha512_avx_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha512_avx_capable) #define _sha512_block_data_order_avx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha512_block_data_order_avx) #define _sha512_block_data_order_hw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha512_block_data_order_hw) #define _sha512_block_data_order_nohw BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha512_block_data_order_nohw) #define _sha512_hw_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sha512_hw_capable) #define _sk_ACCESS_DESCRIPTION_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_call_free_func) #define _sk_ACCESS_DESCRIPTION_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_new_null) #define _sk_ACCESS_DESCRIPTION_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_num) #define _sk_ACCESS_DESCRIPTION_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_pop_free) #define _sk_ACCESS_DESCRIPTION_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_push) #define _sk_ACCESS_DESCRIPTION_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ACCESS_DESCRIPTION_value) #define _sk_ASN1_INTEGER_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_INTEGER_num) #define _sk_ASN1_INTEGER_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_INTEGER_push) #define _sk_ASN1_INTEGER_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_INTEGER_value) #define _sk_ASN1_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_cmp_func) #define _sk_ASN1_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_copy_func) #define _sk_ASN1_OBJECT_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_call_free_func) #define _sk_ASN1_OBJECT_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_deep_copy) #define _sk_ASN1_OBJECT_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_dup) #define _sk_ASN1_OBJECT_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_find) #define _sk_ASN1_OBJECT_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_free) #define _sk_ASN1_OBJECT_is_sorted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_is_sorted) #define _sk_ASN1_OBJECT_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_new_null) #define _sk_ASN1_OBJECT_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_num) #define _sk_ASN1_OBJECT_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_pop_free) #define _sk_ASN1_OBJECT_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_push) #define _sk_ASN1_OBJECT_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_set_cmp_func) #define _sk_ASN1_OBJECT_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_sort) #define _sk_ASN1_OBJECT_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_OBJECT_value) #define _sk_ASN1_TYPE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_TYPE_num) #define _sk_ASN1_TYPE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_TYPE_push) #define _sk_ASN1_TYPE_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_TYPE_value) #define _sk_ASN1_VALUE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_free) #define _sk_ASN1_VALUE_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_new_null) #define _sk_ASN1_VALUE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_num) #define _sk_ASN1_VALUE_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_pop) #define _sk_ASN1_VALUE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_push) #define _sk_ASN1_VALUE_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_ASN1_VALUE_value) #define _sk_CONF_VALUE_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_call_free_func) #define _sk_CONF_VALUE_delete_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_delete_ptr) #define _sk_CONF_VALUE_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_free) #define _sk_CONF_VALUE_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_new_null) #define _sk_CONF_VALUE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_num) #define _sk_CONF_VALUE_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_pop) #define _sk_CONF_VALUE_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_pop_free) #define _sk_CONF_VALUE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_push) #define _sk_CONF_VALUE_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CONF_VALUE_value) #define _sk_CRYPTO_BUFFER_call_copy_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_copy_func) #define _sk_CRYPTO_BUFFER_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_call_free_func) #define _sk_CRYPTO_BUFFER_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_deep_copy) #define _sk_CRYPTO_BUFFER_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_new_null) #define _sk_CRYPTO_BUFFER_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_num) #define _sk_CRYPTO_BUFFER_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop) #define _sk_CRYPTO_BUFFER_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_pop_free) #define _sk_CRYPTO_BUFFER_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_push) #define _sk_CRYPTO_BUFFER_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_set) #define _sk_CRYPTO_BUFFER_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_CRYPTO_BUFFER_value) #define _sk_DIST_POINT_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_call_free_func) #define _sk_DIST_POINT_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_new_null) #define _sk_DIST_POINT_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_num) #define _sk_DIST_POINT_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_pop_free) #define _sk_DIST_POINT_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_push) #define _sk_DIST_POINT_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_DIST_POINT_value) #define _sk_GENERAL_NAME_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_call_free_func) #define _sk_GENERAL_NAME_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_new_null) #define _sk_GENERAL_NAME_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_num) #define _sk_GENERAL_NAME_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_pop_free) #define _sk_GENERAL_NAME_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_push) #define _sk_GENERAL_NAME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_set) #define _sk_GENERAL_NAME_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_NAME_value) #define _sk_GENERAL_SUBTREE_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_new_null) #define _sk_GENERAL_SUBTREE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_num) #define _sk_GENERAL_SUBTREE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_push) #define _sk_GENERAL_SUBTREE_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_GENERAL_SUBTREE_value) #define _sk_OPENSSL_STRING_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_cmp_func) #define _sk_OPENSSL_STRING_call_copy_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_copy_func) #define _sk_OPENSSL_STRING_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_call_free_func) #define _sk_OPENSSL_STRING_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_deep_copy) #define _sk_OPENSSL_STRING_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_find) #define _sk_OPENSSL_STRING_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_free) #define _sk_OPENSSL_STRING_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new) #define _sk_OPENSSL_STRING_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_new_null) #define _sk_OPENSSL_STRING_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_num) #define _sk_OPENSSL_STRING_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_pop_free) #define _sk_OPENSSL_STRING_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_push) #define _sk_OPENSSL_STRING_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_sort) #define _sk_OPENSSL_STRING_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_OPENSSL_STRING_value) #define _sk_POLICYINFO_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_call_cmp_func) #define _sk_POLICYINFO_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_call_free_func) #define _sk_POLICYINFO_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_find) #define _sk_POLICYINFO_is_sorted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_is_sorted) #define _sk_POLICYINFO_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_new_null) #define _sk_POLICYINFO_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_num) #define _sk_POLICYINFO_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_pop_free) #define _sk_POLICYINFO_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_push) #define _sk_POLICYINFO_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_set_cmp_func) #define _sk_POLICYINFO_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_sort) #define _sk_POLICYINFO_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYINFO_value) #define _sk_POLICYQUALINFO_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYQUALINFO_new_null) #define _sk_POLICYQUALINFO_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYQUALINFO_num) #define _sk_POLICYQUALINFO_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYQUALINFO_push) #define _sk_POLICYQUALINFO_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICYQUALINFO_value) #define _sk_POLICY_MAPPING_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_cmp_func) #define _sk_POLICY_MAPPING_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_call_free_func) #define _sk_POLICY_MAPPING_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_find) #define _sk_POLICY_MAPPING_is_sorted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_is_sorted) #define _sk_POLICY_MAPPING_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_new_null) #define _sk_POLICY_MAPPING_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_num) #define _sk_POLICY_MAPPING_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_pop_free) #define _sk_POLICY_MAPPING_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_push) #define _sk_POLICY_MAPPING_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_set_cmp_func) #define _sk_POLICY_MAPPING_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_sort) #define _sk_POLICY_MAPPING_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_POLICY_MAPPING_value) #define _sk_SRTP_PROTECTION_PROFILE_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_new_null) #define _sk_SRTP_PROTECTION_PROFILE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_num) #define _sk_SRTP_PROTECTION_PROFILE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SRTP_PROTECTION_PROFILE_push) #define _sk_SSL_CIPHER_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_call_cmp_func) #define _sk_SSL_CIPHER_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_delete) #define _sk_SSL_CIPHER_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_dup) #define _sk_SSL_CIPHER_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_find) #define _sk_SSL_CIPHER_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_new_null) #define _sk_SSL_CIPHER_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_num) #define _sk_SSL_CIPHER_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_push) #define _sk_SSL_CIPHER_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_SSL_CIPHER_value) #define _sk_TRUST_TOKEN_PRETOKEN_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_call_free_func) #define _sk_TRUST_TOKEN_PRETOKEN_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_new_null) #define _sk_TRUST_TOKEN_PRETOKEN_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_num) #define _sk_TRUST_TOKEN_PRETOKEN_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_pop_free) #define _sk_TRUST_TOKEN_PRETOKEN_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_push) #define _sk_TRUST_TOKEN_PRETOKEN_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_PRETOKEN_value) #define _sk_TRUST_TOKEN_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_call_free_func) #define _sk_TRUST_TOKEN_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_new_null) #define _sk_TRUST_TOKEN_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_pop_free) #define _sk_TRUST_TOKEN_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_TRUST_TOKEN_push) #define _sk_X509_ATTRIBUTE_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_delete) #define _sk_X509_ATTRIBUTE_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_new_null) #define _sk_X509_ATTRIBUTE_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_num) #define _sk_X509_ATTRIBUTE_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_push) #define _sk_X509_ATTRIBUTE_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_ATTRIBUTE_value) #define _sk_X509_CRL_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_call_free_func) #define _sk_X509_CRL_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_free) #define _sk_X509_CRL_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_new_null) #define _sk_X509_CRL_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_num) #define _sk_X509_CRL_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_pop) #define _sk_X509_CRL_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_pop_free) #define _sk_X509_CRL_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_push) #define _sk_X509_CRL_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_CRL_value) #define _sk_X509_EXTENSION_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_call_free_func) #define _sk_X509_EXTENSION_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_delete) #define _sk_X509_EXTENSION_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_free) #define _sk_X509_EXTENSION_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_insert) #define _sk_X509_EXTENSION_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_new_null) #define _sk_X509_EXTENSION_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_num) #define _sk_X509_EXTENSION_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_pop_free) #define _sk_X509_EXTENSION_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_push) #define _sk_X509_EXTENSION_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_set) #define _sk_X509_EXTENSION_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_EXTENSION_value) #define _sk_X509_INFO_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_call_free_func) #define _sk_X509_INFO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_free) #define _sk_X509_INFO_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_new_null) #define _sk_X509_INFO_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_num) #define _sk_X509_INFO_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_pop) #define _sk_X509_INFO_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_pop_free) #define _sk_X509_INFO_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_push) #define _sk_X509_INFO_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_INFO_value) #define _sk_X509_LOOKUP_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_call_free_func) #define _sk_X509_LOOKUP_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_new_null) #define _sk_X509_LOOKUP_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_num) #define _sk_X509_LOOKUP_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_pop_free) #define _sk_X509_LOOKUP_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_push) #define _sk_X509_LOOKUP_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_LOOKUP_value) #define _sk_X509_NAME_ENTRY_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_call_free_func) #define _sk_X509_NAME_ENTRY_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_delete) #define _sk_X509_NAME_ENTRY_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_free) #define _sk_X509_NAME_ENTRY_insert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_insert) #define _sk_X509_NAME_ENTRY_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_new_null) #define _sk_X509_NAME_ENTRY_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_num) #define _sk_X509_NAME_ENTRY_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_pop_free) #define _sk_X509_NAME_ENTRY_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_push) #define _sk_X509_NAME_ENTRY_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_set) #define _sk_X509_NAME_ENTRY_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_ENTRY_value) #define _sk_X509_NAME_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_call_cmp_func) #define _sk_X509_NAME_call_copy_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_call_copy_func) #define _sk_X509_NAME_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_call_free_func) #define _sk_X509_NAME_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_deep_copy) #define _sk_X509_NAME_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_find) #define _sk_X509_NAME_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_new) #define _sk_X509_NAME_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_new_null) #define _sk_X509_NAME_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_num) #define _sk_X509_NAME_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_pop_free) #define _sk_X509_NAME_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_set) #define _sk_X509_NAME_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_set_cmp_func) #define _sk_X509_NAME_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_sort) #define _sk_X509_NAME_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_NAME_value) #define _sk_X509_OBJECT_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_call_cmp_func) #define _sk_X509_OBJECT_call_copy_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_call_copy_func) #define _sk_X509_OBJECT_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_call_free_func) #define _sk_X509_OBJECT_deep_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_deep_copy) #define _sk_X509_OBJECT_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_find) #define _sk_X509_OBJECT_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_new) #define _sk_X509_OBJECT_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_num) #define _sk_X509_OBJECT_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_pop_free) #define _sk_X509_OBJECT_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_push) #define _sk_X509_OBJECT_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_sort) #define _sk_X509_OBJECT_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_OBJECT_value) #define _sk_X509_REVOKED_call_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_call_cmp_func) #define _sk_X509_REVOKED_find BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_find) #define _sk_X509_REVOKED_is_sorted BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_is_sorted) #define _sk_X509_REVOKED_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_new) #define _sk_X509_REVOKED_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_num) #define _sk_X509_REVOKED_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_push) #define _sk_X509_REVOKED_set_cmp_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_set_cmp_func) #define _sk_X509_REVOKED_sort BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_sort) #define _sk_X509_REVOKED_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_REVOKED_value) #define _sk_X509_call_free_func BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_call_free_func) #define _sk_X509_delete BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_delete) #define _sk_X509_delete_ptr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_delete_ptr) #define _sk_X509_dup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_dup) #define _sk_X509_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_free) #define _sk_X509_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_new_null) #define _sk_X509_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_num) #define _sk_X509_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_pop) #define _sk_X509_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_pop_free) #define _sk_X509_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_push) #define _sk_X509_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_set) #define _sk_X509_shift BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_shift) #define _sk_X509_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_X509_value) #define _sk_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_free) #define _sk_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_new_null) #define _sk_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_num) #define _sk_pop BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_pop) #define _sk_pop_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_pop_free) #define _sk_pop_free_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_pop_free_ex) #define _sk_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_push) #define _sk_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_value) #define _sk_void_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_free) #define _sk_void_new_null BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_new_null) #define _sk_void_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_num) #define _sk_void_push BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_push) #define _sk_void_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_set) #define _sk_void_value BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, sk_void_value) #define _slhdsa_copy_keypair_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_copy_keypair_addr) #define _slhdsa_fors_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_fors_pk_from_sig) #define _slhdsa_fors_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_fors_sign) #define _slhdsa_fors_sk_gen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_fors_sk_gen) #define _slhdsa_fors_treehash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_fors_treehash) #define _slhdsa_get_tree_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_get_tree_index) #define _slhdsa_ht_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_ht_sign) #define _slhdsa_ht_verify BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_ht_verify) #define _slhdsa_set_chain_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_chain_addr) #define _slhdsa_set_hash_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_hash_addr) #define _slhdsa_set_keypair_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_keypair_addr) #define _slhdsa_set_layer_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_layer_addr) #define _slhdsa_set_tree_addr BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_tree_addr) #define _slhdsa_set_tree_height BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_tree_height) #define _slhdsa_set_tree_index BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_tree_index) #define _slhdsa_set_type BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_set_type) #define _slhdsa_thash_f BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_f) #define _slhdsa_thash_h BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_h) #define _slhdsa_thash_hmsg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_hmsg) #define _slhdsa_thash_prf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_prf) #define _slhdsa_thash_prfmsg BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_prfmsg) #define _slhdsa_thash_tk BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_tk) #define _slhdsa_thash_tl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_thash_tl) #define _slhdsa_treehash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_treehash) #define _slhdsa_wots_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_wots_pk_from_sig) #define _slhdsa_wots_pk_gen BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_wots_pk_gen) #define _slhdsa_wots_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_wots_sign) #define _slhdsa_xmss_pk_from_sig BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_xmss_pk_from_sig) #define _slhdsa_xmss_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, slhdsa_xmss_sign) #define _v2i_GENERAL_NAME BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAME) #define _v2i_GENERAL_NAMES BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAMES) #define _v2i_GENERAL_NAME_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v2i_GENERAL_NAME_ex) #define _v3_akey_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_akey_id) #define _v3_alt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_alt) #define _v3_bcons BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_bcons) #define _v3_cpols BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_cpols) #define _v3_crl_invdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_crl_invdate) #define _v3_crl_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_crl_num) #define _v3_crl_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_crl_reason) #define _v3_crld BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_crld) #define _v3_delta_crl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_delta_crl) #define _v3_ext_ku BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_ext_ku) #define _v3_freshest_crl BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_freshest_crl) #define _v3_idp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_idp) #define _v3_info BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_info) #define _v3_inhibit_anyp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_inhibit_anyp) #define _v3_key_usage BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_key_usage) #define _v3_name_constraints BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_name_constraints) #define _v3_ns_ia5_list BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_ns_ia5_list) #define _v3_nscert BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_nscert) #define _v3_ocsp_accresp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_ocsp_accresp) #define _v3_ocsp_nocheck BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_ocsp_nocheck) #define _v3_policy_constraints BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_policy_constraints) #define _v3_policy_mappings BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_policy_mappings) #define _v3_sinfo BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_sinfo) #define _v3_skey_id BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, v3_skey_id) #define _voprf_exp2_blind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_blind) #define _voprf_exp2_client_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_client_key_from_bytes) #define _voprf_exp2_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_derive_key_from_secret) #define _voprf_exp2_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_generate_key) #define _voprf_exp2_issuer_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_issuer_key_from_bytes) #define _voprf_exp2_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_read) #define _voprf_exp2_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_sign) #define _voprf_exp2_unblind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_exp2_unblind) #define _voprf_pst1_blind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_blind) #define _voprf_pst1_client_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_client_key_from_bytes) #define _voprf_pst1_derive_key_from_secret BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_derive_key_from_secret) #define _voprf_pst1_generate_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_generate_key) #define _voprf_pst1_issuer_key_from_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_issuer_key_from_bytes) #define _voprf_pst1_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_read) #define _voprf_pst1_sign BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_sign) #define _voprf_pst1_sign_with_proof_scalar_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_sign_with_proof_scalar_for_testing) #define _voprf_pst1_unblind BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, voprf_pst1_unblind) #define _vpaes_capable BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_capable) #define _vpaes_cbc_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_cbc_encrypt) #define _vpaes_ctr32_encrypt_blocks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_ctr32_encrypt_blocks) #define _vpaes_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_decrypt) #define _vpaes_decrypt_key_to_bsaes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_decrypt_key_to_bsaes) #define _vpaes_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_encrypt) #define _vpaes_set_decrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_set_decrypt_key) #define _vpaes_set_encrypt_key BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, vpaes_set_encrypt_key) #define _x25519_asn1_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_asn1_meth) #define _x25519_ge_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_add) #define _x25519_ge_frombytes_vartime BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_frombytes_vartime) #define _x25519_ge_p1p1_to_p2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_p1p1_to_p2) #define _x25519_ge_p1p1_to_p3 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_p1p1_to_p3) #define _x25519_ge_p3_to_cached BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_p3_to_cached) #define _x25519_ge_scalarmult BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_scalarmult) #define _x25519_ge_scalarmult_base BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_scalarmult_base) #define _x25519_ge_scalarmult_base_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_scalarmult_base_adx) #define _x25519_ge_scalarmult_small_precomp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_scalarmult_small_precomp) #define _x25519_ge_sub BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_sub) #define _x25519_ge_tobytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_ge_tobytes) #define _x25519_pkey_meth BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_pkey_meth) #define _x25519_sc_reduce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_sc_reduce) #define _x25519_scalar_mult_adx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x25519_scalar_mult_adx) #define _x509V3_add_value_asn1_string BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509V3_add_value_asn1_string) #define _x509_check_issued_with_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_check_issued_with_callback) #define _x509_digest_sign_algorithm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_digest_sign_algorithm) #define _x509_digest_verify_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_digest_verify_init) #define _x509_print_rsa_pss_params BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_print_rsa_pss_params) #define _x509_rsa_ctx_to_pss BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_rsa_ctx_to_pss) #define _x509_rsa_pss_to_ctx BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509_rsa_pss_to_ctx) #define _x509v3_a2i_ipadd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_a2i_ipadd) #define _x509v3_bytes_to_hex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_bytes_to_hex) #define _x509v3_cache_extensions BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_cache_extensions) #define _x509v3_conf_name_matches BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_conf_name_matches) #define _x509v3_hex_to_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_hex_to_bytes) #define _x509v3_looks_like_dns_name BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, x509v3_looks_like_dns_name) #endif ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_buf.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_BUFFER_H #define OPENSSL_HEADER_BUFFER_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Memory and string functions, see also mem.h. // buf_mem_st (aka |BUF_MEM|) is a generic buffer object used by OpenSSL. struct buf_mem_st { size_t length; // current number of bytes char *data; size_t max; // size of buffer }; // BUF_MEM_new creates a new BUF_MEM which has no allocated data buffer. OPENSSL_EXPORT BUF_MEM *BUF_MEM_new(void); // BUF_MEM_free frees |buf->data| if needed and then frees |buf| itself. OPENSSL_EXPORT void BUF_MEM_free(BUF_MEM *buf); // BUF_MEM_reserve ensures |buf| has capacity |cap| and allocates memory if // needed. It returns one on success and zero on error. OPENSSL_EXPORT int BUF_MEM_reserve(BUF_MEM *buf, size_t cap); // BUF_MEM_grow ensures that |buf| has length |len| and allocates memory if // needed. If the length of |buf| increased, the new bytes are filled with // zeros. It returns the length of |buf|, or zero if there's an error. OPENSSL_EXPORT size_t BUF_MEM_grow(BUF_MEM *buf, size_t len); // BUF_MEM_grow_clean calls |BUF_MEM_grow|. BoringSSL always zeros memory // allocated memory on free. OPENSSL_EXPORT size_t BUF_MEM_grow_clean(BUF_MEM *buf, size_t len); // BUF_MEM_append appends |in| to |buf|. It returns one on success and zero on // error. OPENSSL_EXPORT int BUF_MEM_append(BUF_MEM *buf, const void *in, size_t len); // Deprecated functions. // BUF_strdup calls |OPENSSL_strdup|. OPENSSL_EXPORT char *BUF_strdup(const char *str); // BUF_strnlen calls |OPENSSL_strnlen|. OPENSSL_EXPORT size_t BUF_strnlen(const char *str, size_t max_len); // BUF_strndup calls |OPENSSL_strndup|. OPENSSL_EXPORT char *BUF_strndup(const char *str, size_t size); // BUF_memdup calls |OPENSSL_memdup|. OPENSSL_EXPORT void *BUF_memdup(const void *data, size_t size); // BUF_strlcpy calls |OPENSSL_strlcpy|. OPENSSL_EXPORT size_t BUF_strlcpy(char *dst, const char *src, size_t dst_size); // BUF_strlcat calls |OPENSSL_strlcat|. OPENSSL_EXPORT size_t BUF_strlcat(char *dst, const char *src, size_t dst_size); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(BUF_MEM, BUF_MEM_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_BUFFER_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_buffer.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_buf.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_bytestring.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_BYTESTRING_H #define OPENSSL_HEADER_BYTESTRING_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_span.h" #include #if defined(__cplusplus) extern "C" { #endif // Bytestrings are used for parsing and building TLS and ASN.1 messages. // // A "CBS" (CRYPTO ByteString) represents a string of bytes in memory and // provides utility functions for safely parsing length-prefixed structures // like TLS and ASN.1 from it. // // A "CBB" (CRYPTO ByteBuilder) is a memory buffer that grows as needed and // provides utility functions for building length-prefixed messages. // CRYPTO ByteString struct cbs_st { const uint8_t *data; size_t len; #if !defined(BORINGSSL_NO_CXX) // Allow implicit conversions to and from bssl::Span. cbs_st(bssl::Span span) : data(span.data()), len(span.size()) {} operator bssl::Span() const { return bssl::Span(data, len); } // Defining any constructors requires we explicitly default the others. cbs_st() = default; cbs_st(const cbs_st &) = default; cbs_st &operator=(const cbs_st &) = default; #endif }; // CBS_init sets |cbs| to point to |data|. It does not take ownership of // |data|. OPENSSL_INLINE void CBS_init(CBS *cbs, const uint8_t *data, size_t len) { cbs->data = data; cbs->len = len; } // CBS_skip advances |cbs| by |len| bytes. It returns one on success and zero // otherwise. OPENSSL_EXPORT int CBS_skip(CBS *cbs, size_t len); // CBS_data returns a pointer to the contents of |cbs|. OPENSSL_INLINE const uint8_t *CBS_data(const CBS *cbs) { return cbs->data; } // CBS_len returns the number of bytes remaining in |cbs|. OPENSSL_INLINE size_t CBS_len(const CBS *cbs) { return cbs->len; } // CBS_stow copies the current contents of |cbs| into |*out_ptr| and // |*out_len|. If |*out_ptr| is not NULL, the contents are freed with // OPENSSL_free. It returns one on success and zero on allocation failure. On // success, |*out_ptr| should be freed with OPENSSL_free. If |cbs| is empty, // |*out_ptr| will be NULL. OPENSSL_EXPORT int CBS_stow(const CBS *cbs, uint8_t **out_ptr, size_t *out_len); // CBS_strdup copies the current contents of |cbs| into |*out_ptr| as a // NUL-terminated C string. If |*out_ptr| is not NULL, the contents are freed // with OPENSSL_free. It returns one on success and zero on allocation // failure. On success, |*out_ptr| should be freed with OPENSSL_free. // // NOTE: If |cbs| contains NUL bytes, the string will be truncated. Call // |CBS_contains_zero_byte(cbs)| to check for NUL bytes. OPENSSL_EXPORT int CBS_strdup(const CBS *cbs, char **out_ptr); // CBS_contains_zero_byte returns one if the current contents of |cbs| contains // a NUL byte and zero otherwise. OPENSSL_EXPORT int CBS_contains_zero_byte(const CBS *cbs); // CBS_mem_equal compares the current contents of |cbs| with the |len| bytes // starting at |data|. If they're equal, it returns one, otherwise zero. If the // lengths match, it uses a constant-time comparison. OPENSSL_EXPORT int CBS_mem_equal(const CBS *cbs, const uint8_t *data, size_t len); // CBS_get_u8 sets |*out| to the next uint8_t from |cbs| and advances |cbs|. It // returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u8(CBS *cbs, uint8_t *out); // CBS_get_u16 sets |*out| to the next, big-endian uint16_t from |cbs| and // advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u16(CBS *cbs, uint16_t *out); // CBS_get_u16le sets |*out| to the next, little-endian uint16_t from |cbs| and // advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u16le(CBS *cbs, uint16_t *out); // CBS_get_u24 sets |*out| to the next, big-endian 24-bit value from |cbs| and // advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u24(CBS *cbs, uint32_t *out); // CBS_get_u32 sets |*out| to the next, big-endian uint32_t value from |cbs| // and advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u32(CBS *cbs, uint32_t *out); // CBS_get_u32le sets |*out| to the next, little-endian uint32_t value from // |cbs| and advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u32le(CBS *cbs, uint32_t *out); // CBS_get_u64 sets |*out| to the next, big-endian uint64_t value from |cbs| // and advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u64(CBS *cbs, uint64_t *out); // CBS_get_u64le sets |*out| to the next, little-endian uint64_t value from // |cbs| and advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u64le(CBS *cbs, uint64_t *out); // CBS_get_last_u8 sets |*out| to the last uint8_t from |cbs| and shortens // |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_last_u8(CBS *cbs, uint8_t *out); // CBS_get_bytes sets |*out| to the next |len| bytes from |cbs| and advances // |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_bytes(CBS *cbs, CBS *out, size_t len); // CBS_copy_bytes copies the next |len| bytes from |cbs| to |out| and advances // |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_copy_bytes(CBS *cbs, uint8_t *out, size_t len); // CBS_get_u8_length_prefixed sets |*out| to the contents of an 8-bit, // length-prefixed value from |cbs| and advances |cbs| over it. It returns one // on success and zero on error. OPENSSL_EXPORT int CBS_get_u8_length_prefixed(CBS *cbs, CBS *out); // CBS_get_u16_length_prefixed sets |*out| to the contents of a 16-bit, // big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It // returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u16_length_prefixed(CBS *cbs, CBS *out); // CBS_get_u24_length_prefixed sets |*out| to the contents of a 24-bit, // big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It // returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u24_length_prefixed(CBS *cbs, CBS *out); // CBS_get_until_first finds the first instance of |c| in |cbs|. If found, it // sets |*out| to the text before the match, advances |cbs| over it, and returns // one. Otherwise, it returns zero and leaves |cbs| unmodified. OPENSSL_EXPORT int CBS_get_until_first(CBS *cbs, CBS *out, uint8_t c); // CBS_get_u64_decimal reads a decimal integer from |cbs| and writes it to // |*out|. It stops reading at the end of the string, or the first non-digit // character. It returns one on success and zero on error. This function behaves // analogously to |strtoul| except it does not accept empty inputs, leading // zeros, or negative values. OPENSSL_EXPORT int CBS_get_u64_decimal(CBS *cbs, uint64_t *out); // Parsing ASN.1 // // |CBS| may be used to parse DER structures. Rather than using a schema // compiler, the following functions act on tag-length-value elements in the // serialization itself. Thus the caller is responsible for looping over a // SEQUENCE, branching on CHOICEs or OPTIONAL fields, checking for trailing // data, and handling explict vs. implicit tagging. // // Tags are represented as |CBS_ASN1_TAG| values in memory. The upper few bits // store the class and constructed bit, and the remaining bits store the tag // number. Note this differs from the DER serialization, to support tag numbers // beyond 31. Consumers must use the constants defined below to decompose or // assemble tags. // // This library treats an element's constructed bit as part of its tag. In DER, // the constructed bit is computable from the type. The constants for universal // types have the bit set. Callers must set it correctly for tagged types. // Explicitly-tagged types are always constructed, and implicitly-tagged types // inherit the underlying type's bit. // CBS_ASN1_TAG_SHIFT is how much the in-memory representation shifts the class // and constructed bits from the DER serialization. #define CBS_ASN1_TAG_SHIFT 24 // CBS_ASN1_CONSTRUCTED may be ORed into a tag to set the constructed bit. #define CBS_ASN1_CONSTRUCTED (0x20u << CBS_ASN1_TAG_SHIFT) // The following values specify the tag class and may be ORed into a tag number // to produce the final tag. If none is used, the tag will be UNIVERSAL. #define CBS_ASN1_UNIVERSAL (0u << CBS_ASN1_TAG_SHIFT) #define CBS_ASN1_APPLICATION (0x40u << CBS_ASN1_TAG_SHIFT) #define CBS_ASN1_CONTEXT_SPECIFIC (0x80u << CBS_ASN1_TAG_SHIFT) #define CBS_ASN1_PRIVATE (0xc0u << CBS_ASN1_TAG_SHIFT) // CBS_ASN1_CLASS_MASK may be ANDed with a tag to query its class. This will // give one of the four values above. #define CBS_ASN1_CLASS_MASK (0xc0u << CBS_ASN1_TAG_SHIFT) // CBS_ASN1_TAG_NUMBER_MASK may be ANDed with a tag to query its number. #define CBS_ASN1_TAG_NUMBER_MASK ((1u << (5 + CBS_ASN1_TAG_SHIFT)) - 1) // The following values are constants for UNIVERSAL tags. Note these constants // include the constructed bit. #define CBS_ASN1_BOOLEAN 0x1u #define CBS_ASN1_INTEGER 0x2u #define CBS_ASN1_BITSTRING 0x3u #define CBS_ASN1_OCTETSTRING 0x4u #define CBS_ASN1_NULL 0x5u #define CBS_ASN1_OBJECT 0x6u #define CBS_ASN1_ENUMERATED 0xau #define CBS_ASN1_UTF8STRING 0xcu #define CBS_ASN1_SEQUENCE (0x10u | CBS_ASN1_CONSTRUCTED) #define CBS_ASN1_SET (0x11u | CBS_ASN1_CONSTRUCTED) #define CBS_ASN1_NUMERICSTRING 0x12u #define CBS_ASN1_PRINTABLESTRING 0x13u #define CBS_ASN1_T61STRING 0x14u #define CBS_ASN1_VIDEOTEXSTRING 0x15u #define CBS_ASN1_IA5STRING 0x16u #define CBS_ASN1_UTCTIME 0x17u #define CBS_ASN1_GENERALIZEDTIME 0x18u #define CBS_ASN1_GRAPHICSTRING 0x19u #define CBS_ASN1_VISIBLESTRING 0x1au #define CBS_ASN1_GENERALSTRING 0x1bu #define CBS_ASN1_UNIVERSALSTRING 0x1cu #define CBS_ASN1_BMPSTRING 0x1eu // CBS_get_asn1 sets |*out| to the contents of DER-encoded, ASN.1 element (not // including tag and length bytes) and advances |cbs| over it. The ASN.1 // element must match |tag_value|. It returns one on success and zero // on error. OPENSSL_EXPORT int CBS_get_asn1(CBS *cbs, CBS *out, CBS_ASN1_TAG tag_value); // CBS_get_asn1_element acts like |CBS_get_asn1| but |out| will include the // ASN.1 header bytes too. OPENSSL_EXPORT int CBS_get_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG tag_value); // CBS_peek_asn1_tag looks ahead at the next ASN.1 tag and returns one // if the next ASN.1 element on |cbs| would have tag |tag_value|. If // |cbs| is empty or the tag does not match, it returns zero. Note: if // it returns one, CBS_get_asn1 may still fail if the rest of the // element is malformed. OPENSSL_EXPORT int CBS_peek_asn1_tag(const CBS *cbs, CBS_ASN1_TAG tag_value); // CBS_get_any_asn1 sets |*out| to contain the next ASN.1 element from |*cbs| // (not including tag and length bytes), sets |*out_tag| to the tag number, and // advances |*cbs|. It returns one on success and zero on error. Either of |out| // and |out_tag| may be NULL to ignore the value. OPENSSL_EXPORT int CBS_get_any_asn1(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag); // CBS_get_any_asn1_element sets |*out| to contain the next ASN.1 element from // |*cbs| (including header bytes) and advances |*cbs|. It sets |*out_tag| to // the tag number and |*out_header_len| to the length of the ASN.1 header. Each // of |out|, |out_tag|, and |out_header_len| may be NULL to ignore the value. OPENSSL_EXPORT int CBS_get_any_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag, size_t *out_header_len); // CBS_get_any_ber_asn1_element acts the same as |CBS_get_any_asn1_element| but // also allows indefinite-length elements to be returned and does not enforce // that lengths are minimal. It sets |*out_indefinite| to one if the length was // indefinite and zero otherwise. If indefinite, |*out_header_len| and // |CBS_len(out)| will be equal as only the header is returned (although this is // also true for empty elements so |*out_indefinite| should be checked). If // |out_ber_found| is not NULL then it is set to one if any case of invalid DER // but valid BER is found, and to zero otherwise. // // This function will not successfully parse an end-of-contents (EOC) as an // element. Callers parsing indefinite-length encoding must check for EOC // separately. OPENSSL_EXPORT int CBS_get_any_ber_asn1_element(CBS *cbs, CBS *out, CBS_ASN1_TAG *out_tag, size_t *out_header_len, int *out_ber_found, int *out_indefinite); // CBS_get_asn1_uint64 gets an ASN.1 INTEGER from |cbs| using |CBS_get_asn1| // and sets |*out| to its value. It returns one on success and zero on error, // where error includes the integer being negative, or too large to represent // in 64 bits. OPENSSL_EXPORT int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out); // CBS_get_asn1_int64 gets an ASN.1 INTEGER from |cbs| using |CBS_get_asn1| // and sets |*out| to its value. It returns one on success and zero on error, // where error includes the integer being too large to represent in 64 bits. OPENSSL_EXPORT int CBS_get_asn1_int64(CBS *cbs, int64_t *out); // CBS_get_asn1_bool gets an ASN.1 BOOLEAN from |cbs| and sets |*out| to zero // or one based on its value. It returns one on success or zero on error. OPENSSL_EXPORT int CBS_get_asn1_bool(CBS *cbs, int *out); // CBS_get_optional_asn1 gets an optional explicitly-tagged element from |cbs| // tagged with |tag| and sets |*out| to its contents, or ignores it if |out| is // NULL. If present and if |out_present| is not NULL, it sets |*out_present| to // one, otherwise zero. It returns one on success, whether or not the element // was present, and zero on decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1(CBS *cbs, CBS *out, int *out_present, CBS_ASN1_TAG tag); // CBS_get_optional_asn1_octet_string gets an optional // explicitly-tagged OCTET STRING from |cbs|. If present, it sets // |*out| to the string and |*out_present| to one. Otherwise, it sets // |*out| to empty and |*out_present| to zero. |out_present| may be // NULL. It returns one on success, whether or not the element was // present, and zero on decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1_octet_string(CBS *cbs, CBS *out, int *out_present, CBS_ASN1_TAG tag); // CBS_get_optional_asn1_uint64 gets an optional explicitly-tagged // INTEGER from |cbs|. If present, it sets |*out| to the // value. Otherwise, it sets |*out| to |default_value|. It returns one // on success, whether or not the element was present, and zero on // decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1_uint64(CBS *cbs, uint64_t *out, CBS_ASN1_TAG tag, uint64_t default_value); // CBS_get_optional_asn1_bool gets an optional, explicitly-tagged BOOLEAN from // |cbs|. If present, it sets |*out| to either zero or one, based on the // boolean. Otherwise, it sets |*out| to |default_value|. It returns one on // success, whether or not the element was present, and zero on decode // failure. OPENSSL_EXPORT int CBS_get_optional_asn1_bool(CBS *cbs, int *out, CBS_ASN1_TAG tag, int default_value); // CBS_is_valid_asn1_bitstring returns one if |cbs| is a valid ASN.1 BIT STRING // body and zero otherwise. OPENSSL_EXPORT int CBS_is_valid_asn1_bitstring(const CBS *cbs); // CBS_asn1_bitstring_has_bit returns one if |cbs| is a valid ASN.1 BIT STRING // body and the specified bit is present and set. Otherwise, it returns zero. // |bit| is indexed starting from zero. OPENSSL_EXPORT int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit); // CBS_is_valid_asn1_integer returns one if |cbs| is a valid ASN.1 INTEGER, // body and zero otherwise. On success, if |out_is_negative| is non-NULL, // |*out_is_negative| will be set to one if |cbs| is negative and zero // otherwise. OPENSSL_EXPORT int CBS_is_valid_asn1_integer(const CBS *cbs, int *out_is_negative); // CBS_is_unsigned_asn1_integer returns one if |cbs| is a valid non-negative // ASN.1 INTEGER body and zero otherwise. OPENSSL_EXPORT int CBS_is_unsigned_asn1_integer(const CBS *cbs); // CBS_is_valid_asn1_oid returns one if |cbs| is a valid DER-encoded ASN.1 // OBJECT IDENTIFIER contents (not including the element framing) and zero // otherwise. This function tolerates arbitrarily large OID components. OPENSSL_EXPORT int CBS_is_valid_asn1_oid(const CBS *cbs); // CBS_asn1_oid_to_text interprets |cbs| as DER-encoded ASN.1 OBJECT IDENTIFIER // contents (not including the element framing) and returns the ASCII // representation (e.g., "1.2.840.113554.4.1.72585") in a newly-allocated // string, or NULL on failure. The caller must release the result with // |OPENSSL_free|. // // This function may fail if |cbs| is an invalid OBJECT IDENTIFIER, or if any // OID components are too large. OPENSSL_EXPORT char *CBS_asn1_oid_to_text(const CBS *cbs); // CBS_parse_generalized_time returns one if |cbs| is a valid DER-encoded, ASN.1 // GeneralizedTime body within the limitations imposed by RFC 5280, or zero // otherwise. If |allow_timezone_offset| is non-zero, four-digit timezone // offsets, which would not be allowed by DER, are permitted. On success, if // |out_tm| is non-NULL, |*out_tm| will be zeroed, and then set to the // corresponding time in UTC. This function does not compute |out_tm->tm_wday| // or |out_tm->tm_yday|. OPENSSL_EXPORT int CBS_parse_generalized_time(const CBS *cbs, struct tm *out_tm, int allow_timezone_offset); // CBS_parse_utc_time returns one if |cbs| is a valid DER-encoded, ASN.1 // UTCTime body within the limitations imposed by RFC 5280, or zero otherwise. // If |allow_timezone_offset| is non-zero, four-digit timezone offsets, which // would not be allowed by DER, are permitted. On success, if |out_tm| is // non-NULL, |*out_tm| will be zeroed, and then set to the corresponding time // in UTC. This function does not compute |out_tm->tm_wday| or // |out_tm->tm_yday|. OPENSSL_EXPORT int CBS_parse_utc_time(const CBS *cbs, struct tm *out_tm, int allow_timezone_offset); // CRYPTO ByteBuilder. // // |CBB| objects allow one to build length-prefixed serialisations. A |CBB| // object is associated with a buffer and new buffers are created with // |CBB_init|. Several |CBB| objects can point at the same buffer when a // length-prefix is pending, however only a single |CBB| can be 'current' at // any one time. For example, if one calls |CBB_add_u8_length_prefixed| then // the new |CBB| points at the same buffer as the original. But if the original // |CBB| is used then the length prefix is written out and the new |CBB| must // not be used again. // // If one needs to force a length prefix to be written out because a |CBB| is // going out of scope, use |CBB_flush|. If an operation on a |CBB| fails, it is // in an undefined state and must not be used except to call |CBB_cleanup|. struct cbb_buffer_st { uint8_t *buf; // len is the number of valid bytes in |buf|. size_t len; // cap is the size of |buf|. size_t cap; // can_resize is one iff |buf| is owned by this object. If not then |buf| // cannot be resized. unsigned can_resize : 1; // error is one if there was an error writing to this CBB. All future // operations will fail. unsigned error : 1; }; struct cbb_child_st { // base is a pointer to the buffer this |CBB| writes to. struct cbb_buffer_st *base; // offset is the number of bytes from the start of |base->buf| to this |CBB|'s // pending length prefix. size_t offset; // pending_len_len contains the number of bytes in this |CBB|'s pending // length-prefix, or zero if no length-prefix is pending. uint8_t pending_len_len; unsigned pending_is_asn1 : 1; }; struct cbb_st { // child points to a child CBB if a length-prefix is pending. CBB *child; // is_child is one if this is a child |CBB| and zero if it is a top-level // |CBB|. This determines which arm of the union is valid. char is_child; union { struct cbb_buffer_st base; struct cbb_child_st child; } u; }; // CBB_zero sets an uninitialised |cbb| to the zero state. It must be // initialised with |CBB_init| or |CBB_init_fixed| before use, but it is safe to // call |CBB_cleanup| without a successful |CBB_init|. This may be used for more // uniform cleanup of a |CBB|. OPENSSL_EXPORT void CBB_zero(CBB *cbb); // CBB_init initialises |cbb| with |initial_capacity|. Since a |CBB| grows as // needed, the |initial_capacity| is just a hint. It returns one on success or // zero on allocation failure. OPENSSL_EXPORT int CBB_init(CBB *cbb, size_t initial_capacity); // CBB_init_fixed initialises |cbb| to write to |len| bytes at |buf|. Since // |buf| cannot grow, trying to write more than |len| bytes will cause CBB // functions to fail. This function is infallible and always returns one. It is // safe, but not necessary, to call |CBB_cleanup| on |cbb|. OPENSSL_EXPORT int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len); // CBB_cleanup frees all resources owned by |cbb| and other |CBB| objects // writing to the same buffer. This should be used in an error case where a // serialisation is abandoned. // // This function can only be called on a "top level" |CBB|, i.e. one initialised // with |CBB_init| or |CBB_init_fixed|, or a |CBB| set to the zero state with // |CBB_zero|. OPENSSL_EXPORT void CBB_cleanup(CBB *cbb); // CBB_finish completes any pending length prefix and sets |*out_data| to a // malloced buffer and |*out_len| to the length of that buffer. The caller // takes ownership of the buffer and, unless the buffer was fixed with // |CBB_init_fixed|, must call |OPENSSL_free| when done. // // It can only be called on a "top level" |CBB|, i.e. one initialised with // |CBB_init| or |CBB_init_fixed|. It returns one on success and zero on // error. OPENSSL_EXPORT int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len); // CBB_flush causes any pending length prefixes to be written out and any child // |CBB| objects of |cbb| to be invalidated. This allows |cbb| to continue to be // used after the children go out of scope, e.g. when local |CBB| objects are // added as children to a |CBB| that persists after a function returns. This // function returns one on success or zero on error. OPENSSL_EXPORT int CBB_flush(CBB *cbb); // CBB_data returns a pointer to the bytes written to |cbb|. It does not flush // |cbb|. The pointer is valid until the next operation to |cbb|. // // To avoid unfinalized length prefixes, it is a fatal error to call this on a // CBB with any active children. OPENSSL_EXPORT const uint8_t *CBB_data(const CBB *cbb); // CBB_len returns the number of bytes written to |cbb|. It does not flush // |cbb|. // // To avoid unfinalized length prefixes, it is a fatal error to call this on a // CBB with any active children. OPENSSL_EXPORT size_t CBB_len(const CBB *cbb); // CBB_add_u8_length_prefixed sets |*out_contents| to a new child of |cbb|. The // data written to |*out_contents| will be prefixed in |cbb| with an 8-bit // length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u8_length_prefixed(CBB *cbb, CBB *out_contents); // CBB_add_u16_length_prefixed sets |*out_contents| to a new child of |cbb|. // The data written to |*out_contents| will be prefixed in |cbb| with a 16-bit, // big-endian length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u16_length_prefixed(CBB *cbb, CBB *out_contents); // CBB_add_u24_length_prefixed sets |*out_contents| to a new child of |cbb|. // The data written to |*out_contents| will be prefixed in |cbb| with a 24-bit, // big-endian length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents); // CBB_add_asn1 sets |*out_contents| to a |CBB| into which the contents of an // ASN.1 object can be written. The |tag| argument will be used as the tag for // the object. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_asn1(CBB *cbb, CBB *out_contents, CBS_ASN1_TAG tag); // CBB_add_bytes appends |len| bytes from |data| to |cbb|. It returns one on // success and zero otherwise. OPENSSL_EXPORT int CBB_add_bytes(CBB *cbb, const uint8_t *data, size_t len); // CBB_add_zeros append |len| bytes with value zero to |cbb|. It returns one on // success and zero otherwise. OPENSSL_EXPORT int CBB_add_zeros(CBB *cbb, size_t len); // CBB_add_space appends |len| bytes to |cbb| and sets |*out_data| to point to // the beginning of that space. The caller must then write |len| bytes of // actual contents to |*out_data|. It returns one on success and zero // otherwise. OPENSSL_EXPORT int CBB_add_space(CBB *cbb, uint8_t **out_data, size_t len); // CBB_reserve ensures |cbb| has room for |len| additional bytes and sets // |*out_data| to point to the beginning of that space. It returns one on // success and zero otherwise. The caller may write up to |len| bytes to // |*out_data| and call |CBB_did_write| to complete the write. |*out_data| is // valid until the next operation on |cbb| or an ancestor |CBB|. OPENSSL_EXPORT int CBB_reserve(CBB *cbb, uint8_t **out_data, size_t len); // CBB_did_write advances |cbb| by |len| bytes, assuming the space has been // written to by the caller. It returns one on success and zero on error. OPENSSL_EXPORT int CBB_did_write(CBB *cbb, size_t len); // CBB_add_u8 appends an 8-bit number from |value| to |cbb|. It returns one on // success and zero otherwise. OPENSSL_EXPORT int CBB_add_u8(CBB *cbb, uint8_t value); // CBB_add_u16 appends a 16-bit, big-endian number from |value| to |cbb|. It // returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u16(CBB *cbb, uint16_t value); // CBB_add_u16le appends a 16-bit, little-endian number from |value| to |cbb|. // It returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u16le(CBB *cbb, uint16_t value); // CBB_add_u24 appends a 24-bit, big-endian number from |value| to |cbb|. It // returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u24(CBB *cbb, uint32_t value); // CBB_add_u32 appends a 32-bit, big-endian number from |value| to |cbb|. It // returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u32(CBB *cbb, uint32_t value); // CBB_add_u32le appends a 32-bit, little-endian number from |value| to |cbb|. // It returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u32le(CBB *cbb, uint32_t value); // CBB_add_u64 appends a 64-bit, big-endian number from |value| to |cbb|. It // returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u64(CBB *cbb, uint64_t value); // CBB_add_u64le appends a 64-bit, little-endian number from |value| to |cbb|. // It returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u64le(CBB *cbb, uint64_t value); // CBB_discard_child discards the current unflushed child of |cbb|. Neither the // child's contents nor the length prefix will be included in the output. OPENSSL_EXPORT void CBB_discard_child(CBB *cbb); // CBB_add_asn1_uint64 writes an ASN.1 INTEGER into |cbb| using |CBB_add_asn1| // and writes |value| in its contents. It returns one on success and zero on // error. OPENSSL_EXPORT int CBB_add_asn1_uint64(CBB *cbb, uint64_t value); // CBB_add_asn1_uint64_with_tag behaves like |CBB_add_asn1_uint64| but uses // |tag| as the tag instead of INTEGER. This is useful if the INTEGER type uses // implicit tagging. OPENSSL_EXPORT int CBB_add_asn1_uint64_with_tag(CBB *cbb, uint64_t value, CBS_ASN1_TAG tag); // CBB_add_asn1_int64 writes an ASN.1 INTEGER into |cbb| using |CBB_add_asn1| // and writes |value| in its contents. It returns one on success and zero on // error. OPENSSL_EXPORT int CBB_add_asn1_int64(CBB *cbb, int64_t value); // CBB_add_asn1_int64_with_tag behaves like |CBB_add_asn1_int64| but uses |tag| // as the tag instead of INTEGER. This is useful if the INTEGER type uses // implicit tagging. OPENSSL_EXPORT int CBB_add_asn1_int64_with_tag(CBB *cbb, int64_t value, CBS_ASN1_TAG tag); // CBB_add_asn1_octet_string writes an ASN.1 OCTET STRING into |cbb| with the // given contents. It returns one on success and zero on error. OPENSSL_EXPORT int CBB_add_asn1_octet_string(CBB *cbb, const uint8_t *data, size_t data_len); // CBB_add_asn1_bool writes an ASN.1 BOOLEAN into |cbb| which is true iff // |value| is non-zero. It returns one on success and zero on error. OPENSSL_EXPORT int CBB_add_asn1_bool(CBB *cbb, int value); // CBB_add_asn1_oid_from_text decodes |len| bytes from |text| as an ASCII OID // representation, e.g. "1.2.840.113554.4.1.72585", and writes the DER-encoded // contents to |cbb|. It returns one on success and zero on malloc failure or if // |text| was invalid. It does not include the OBJECT IDENTIFER framing, only // the element's contents. // // This function considers OID strings with components which do not fit in a // |uint64_t| to be invalid. OPENSSL_EXPORT int CBB_add_asn1_oid_from_text(CBB *cbb, const char *text, size_t len); // CBB_flush_asn1_set_of calls |CBB_flush| on |cbb| and then reorders the // contents for a DER-encoded ASN.1 SET OF type. It returns one on success and // zero on failure. DER canonicalizes SET OF contents by sorting // lexicographically by encoding. Call this function when encoding a SET OF // type in an order that is not already known to be canonical. // // Note a SET type has a slightly different ordering than a SET OF. OPENSSL_EXPORT int CBB_flush_asn1_set_of(CBB *cbb); // Unicode utilities. // // These functions consider noncharacters (see section 23.7 from Unicode 15.0.0) // to be invalid code points and will treat them as an error condition. // The following functions read one Unicode code point from |cbs| with the // corresponding encoding and store it in |*out|. They return one on success and // zero on error. OPENSSL_EXPORT int CBS_get_utf8(CBS *cbs, uint32_t *out); OPENSSL_EXPORT int CBS_get_latin1(CBS *cbs, uint32_t *out); OPENSSL_EXPORT int CBS_get_ucs2_be(CBS *cbs, uint32_t *out); OPENSSL_EXPORT int CBS_get_utf32_be(CBS *cbs, uint32_t *out); // CBB_get_utf8_len returns the number of bytes needed to represent |u| in // UTF-8. OPENSSL_EXPORT size_t CBB_get_utf8_len(uint32_t u); // The following functions encode |u| to |cbb| with the corresponding // encoding. They return one on success and zero on error. Error conditions // include |u| being an invalid code point, or |u| being unencodable in the // specified encoding. OPENSSL_EXPORT int CBB_add_utf8(CBB *cbb, uint32_t u); OPENSSL_EXPORT int CBB_add_latin1(CBB *cbb, uint32_t u); OPENSSL_EXPORT int CBB_add_ucs2_be(CBB *cbb, uint32_t u); OPENSSL_EXPORT int CBB_add_utf32_be(CBB *cbb, uint32_t u); #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN using ScopedCBB = internal::StackAllocated; BSSL_NAMESPACE_END } // extern C++ #endif #endif #endif // OPENSSL_HEADER_BYTESTRING_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_cast.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CAST_H #define OPENSSL_HEADER_CAST_H #include "CNIOBoringSSL_base.h" #ifdef __cplusplus extern "C" { #endif #define CAST_ENCRYPT 1 #define CAST_DECRYPT 0 #define CAST_BLOCK 8 #define CAST_KEY_LENGTH 16 typedef struct cast_key_st { uint32_t data[32]; int short_key; // Use reduced rounds for short key } CAST_KEY; OPENSSL_EXPORT void CAST_set_key(CAST_KEY *key, size_t len, const uint8_t *data); OPENSSL_EXPORT void CAST_ecb_encrypt(const uint8_t *in, uint8_t *out, const CAST_KEY *key, int enc); OPENSSL_EXPORT void CAST_encrypt(uint32_t *data, const CAST_KEY *key); OPENSSL_EXPORT void CAST_decrypt(uint32_t *data, const CAST_KEY *key); OPENSSL_EXPORT void CAST_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, const CAST_KEY *ks, uint8_t *iv, int enc); OPENSSL_EXPORT void CAST_cfb64_encrypt(const uint8_t *in, uint8_t *out, size_t length, const CAST_KEY *schedule, uint8_t *ivec, int *num, int enc); #ifdef __cplusplus } #endif #endif // OPENSSL_HEADER_CAST_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_chacha.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CHACHA_H #define OPENSSL_HEADER_CHACHA_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // ChaCha20. // // ChaCha20 is a stream cipher. See https://tools.ietf.org/html/rfc8439. // CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and // nonce and writes the result to |out|. If |in| and |out| alias, they must be // equal. The initial block counter is specified by |counter|. // // This function implements a 32-bit block counter as in RFC 8439. On overflow, // the counter wraps. Reusing a key, nonce, and block counter combination is not // secure, so wrapping is usually a bug in the caller. While it is possible to // wrap without reuse with a large initial block counter, this is not // recommended and may not be portable to other ChaCha20 implementations. OPENSSL_EXPORT void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t key[32], const uint8_t nonce[12], uint32_t counter); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CHACHA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_cipher.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CIPHER_H #define OPENSSL_HEADER_CIPHER_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Ciphers. // Cipher primitives. // // The following functions return |EVP_CIPHER| objects that implement the named // cipher algorithm. OPENSSL_EXPORT const EVP_CIPHER *EVP_rc4(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_ctr(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_ofb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_ctr(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_ofb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_xts(void); // EVP_enc_null returns a 'cipher' that passes plaintext through as // ciphertext. OPENSSL_EXPORT const EVP_CIPHER *EVP_enc_null(void); // EVP_rc2_cbc returns a cipher that implements 128-bit RC2 in CBC mode. OPENSSL_EXPORT const EVP_CIPHER *EVP_rc2_cbc(void); // EVP_rc2_40_cbc returns a cipher that implements 40-bit RC2 in CBC mode. This // is obviously very, very weak and is included only in order to read PKCS#12 // files, which often encrypt the certificate chain using this cipher. It is // deliberately not exported. const EVP_CIPHER *EVP_rc2_40_cbc(void); // EVP_get_cipherbynid returns the cipher corresponding to the given NID, or // NULL if no such cipher is known. Note using this function links almost every // cipher implemented by BoringSSL into the binary, whether the caller uses them // or not. Size-conscious callers, such as client software, should not use this // function. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbynid(int nid); // Cipher context allocation. // // An |EVP_CIPHER_CTX| represents the state of an encryption or decryption in // progress. // EVP_CIPHER_CTX_init initialises an, already allocated, |EVP_CIPHER_CTX|. OPENSSL_EXPORT void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_new allocates a fresh |EVP_CIPHER_CTX|, calls // |EVP_CIPHER_CTX_init| and returns it, or NULL on allocation failure. OPENSSL_EXPORT EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void); // EVP_CIPHER_CTX_cleanup frees any memory referenced by |ctx|. It returns // one. OPENSSL_EXPORT int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_free calls |EVP_CIPHER_CTX_cleanup| on |ctx| and then frees // |ctx| itself. OPENSSL_EXPORT void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_copy sets |out| to be a duplicate of the current state of // |in|. The |out| argument must have been previously initialised. OPENSSL_EXPORT int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in); // EVP_CIPHER_CTX_reset calls |EVP_CIPHER_CTX_cleanup| followed by // |EVP_CIPHER_CTX_init| and returns one. OPENSSL_EXPORT int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx); // Cipher context configuration. // EVP_CipherInit_ex configures |ctx| for a fresh encryption (or decryption, if // |enc| is zero) operation using |cipher|. If |ctx| has been previously // configured with a cipher then |cipher|, |key| and |iv| may be |NULL| and // |enc| may be -1 to reuse the previous values. The operation will use |key| // as the key and |iv| as the IV (if any). These should have the correct // lengths given by |EVP_CIPHER_key_length| and |EVP_CIPHER_iv_length|. It // returns one on success and zero on error. OPENSSL_EXPORT int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, const uint8_t *key, const uint8_t *iv, int enc); // EVP_EncryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to one. OPENSSL_EXPORT int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv); // EVP_DecryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to zero. OPENSSL_EXPORT int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv); // Cipher operations. // EVP_EncryptUpdate encrypts |in_len| bytes from |in| to |out|. The number // of output bytes may be up to |in_len| plus the block length minus one and // |out| must have sufficient space. The number of bytes actually output is // written to |*out_len|. It returns one on success and zero otherwise. // // If |ctx| is an AEAD cipher, e.g. |EVP_aes_128_gcm|, and |out| is NULL, this // function instead adds |in_len| bytes from |in| to the AAD and sets |*out_len| // to |in_len|. The AAD must be fully specified in this way before this function // is used to encrypt plaintext. OPENSSL_EXPORT int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); // EVP_EncryptFinal_ex writes at most a block of ciphertext to |out| and sets // |*out_len| to the number of bytes written. If padding is enabled (the // default) then standard padding is applied to create the final block. If // padding is disabled (with |EVP_CIPHER_CTX_set_padding|) then any partial // block remaining will cause an error. The function returns one on success and // zero otherwise. OPENSSL_EXPORT int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_DecryptUpdate decrypts |in_len| bytes from |in| to |out|. The number of // output bytes may be up to |in_len| plus the block length minus one and |out| // must have sufficient space. The number of bytes actually output is written // to |*out_len|. It returns one on success and zero otherwise. // // If |ctx| is an AEAD cipher, e.g. |EVP_aes_128_gcm|, and |out| is NULL, this // function instead adds |in_len| bytes from |in| to the AAD and sets |*out_len| // to |in_len|. The AAD must be fully specified in this way before this function // is used to decrypt ciphertext. OPENSSL_EXPORT int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); // EVP_DecryptFinal_ex writes at most a block of ciphertext to |out| and sets // |*out_len| to the number of bytes written. If padding is enabled (the // default) then padding is removed from the final block. // // WARNING: it is unsafe to call this function with unauthenticated // ciphertext if padding is enabled. OPENSSL_EXPORT int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_CipherUpdate calls either |EVP_EncryptUpdate| or |EVP_DecryptUpdate| // depending on how |ctx| has been setup. OPENSSL_EXPORT int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); // EVP_CipherFinal_ex calls either |EVP_EncryptFinal_ex| or // |EVP_DecryptFinal_ex| depending on how |ctx| has been setup. OPENSSL_EXPORT int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // Cipher context accessors. // EVP_CIPHER_CTX_cipher returns the |EVP_CIPHER| underlying |ctx|, or NULL if // none has been set. OPENSSL_EXPORT const EVP_CIPHER *EVP_CIPHER_CTX_cipher( const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_nid returns a NID identifying the |EVP_CIPHER| underlying // |ctx| (e.g. |NID_aes_128_gcm|). It will crash if no cipher has been // configured. OPENSSL_EXPORT int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_encrypting returns one if |ctx| is configured for encryption // and zero otherwise. OPENSSL_EXPORT int EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_block_size returns the block size, in bytes, of the cipher // underlying |ctx|, or one if the cipher is a stream cipher. It will crash if // no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_key_length returns the key size, in bytes, of the cipher // underlying |ctx| or zero if no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_iv_length returns the IV size, in bytes, of the cipher // underlying |ctx|. It will crash if no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_get_app_data returns the opaque, application data pointer for // |ctx|, or NULL if none has been set. OPENSSL_EXPORT void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_set_app_data sets the opaque, application data pointer for // |ctx| to |data|. OPENSSL_EXPORT void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data); // EVP_CIPHER_CTX_flags returns a value which is the OR of zero or more // |EVP_CIPH_*| flags. It will crash if no cipher has been configured. OPENSSL_EXPORT uint32_t EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_mode returns one of the |EVP_CIPH_*| cipher mode values // enumerated below. It will crash if no cipher has been configured. OPENSSL_EXPORT uint32_t EVP_CIPHER_CTX_mode(const EVP_CIPHER_CTX *ctx); // EVP_CIPHER_CTX_ctrl is an |ioctl| like function. The |command| argument // should be one of the |EVP_CTRL_*| values. The |arg| and |ptr| arguments are // specific to the command in question. OPENSSL_EXPORT int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int command, int arg, void *ptr); // EVP_CIPHER_CTX_set_padding sets whether padding is enabled for |ctx| and // returns one. Pass a non-zero |pad| to enable padding (the default) or zero // to disable. OPENSSL_EXPORT int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad); // EVP_CIPHER_CTX_set_key_length sets the key length for |ctx|. This is only // valid for ciphers that can take a variable length key. It returns one on // success and zero on error. OPENSSL_EXPORT int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *ctx, unsigned key_len); // Cipher accessors. // EVP_CIPHER_nid returns a NID identifying |cipher|. (For example, // |NID_aes_128_gcm|.) OPENSSL_EXPORT int EVP_CIPHER_nid(const EVP_CIPHER *cipher); // EVP_CIPHER_block_size returns the block size, in bytes, for |cipher|, or one // if |cipher| is a stream cipher. OPENSSL_EXPORT unsigned EVP_CIPHER_block_size(const EVP_CIPHER *cipher); // EVP_CIPHER_key_length returns the key size, in bytes, for |cipher|. If // |cipher| can take a variable key length then this function returns the // default key length and |EVP_CIPHER_flags| will return a value with // |EVP_CIPH_VARIABLE_LENGTH| set. OPENSSL_EXPORT unsigned EVP_CIPHER_key_length(const EVP_CIPHER *cipher); // EVP_CIPHER_iv_length returns the IV size, in bytes, of |cipher|, or zero if // |cipher| doesn't take an IV. OPENSSL_EXPORT unsigned EVP_CIPHER_iv_length(const EVP_CIPHER *cipher); // EVP_CIPHER_flags returns a value which is the OR of zero or more // |EVP_CIPH_*| flags. OPENSSL_EXPORT uint32_t EVP_CIPHER_flags(const EVP_CIPHER *cipher); // EVP_CIPHER_mode returns one of the cipher mode values enumerated below. OPENSSL_EXPORT uint32_t EVP_CIPHER_mode(const EVP_CIPHER *cipher); // Key derivation. // EVP_BytesToKey generates a key and IV for the cipher |type| by iterating // |md| |count| times using |data| and an optional |salt|, writing the result to // |key| and |iv|. If not NULL, the |key| and |iv| buffers must have enough // space to hold a key and IV for |type|, as returned by |EVP_CIPHER_key_length| // and |EVP_CIPHER_iv_length|. This function returns the length of the key // (without the IV) on success or zero on error. // // If |salt| is NULL, the empty string is used as the salt. Salt lengths other // than 0 and 8 are not supported by this function. Either of |key| or |iv| may // be NULL to skip that output. // // When the total data derived is less than the size of |md|, this function // implements PBKDF1 from RFC 8018. Otherwise, it generalizes PBKDF1 by // computing prepending the previous output to |data| and re-running PBKDF1 for // further output. // // This function is provided for compatibility with legacy uses of PBKDF1. New // applications should use a more modern algorithm, such as |EVP_PBE_scrypt|. OPENSSL_EXPORT int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, const uint8_t salt[8], const uint8_t *data, size_t data_len, unsigned count, uint8_t *key, uint8_t *iv); // Cipher modes (for |EVP_CIPHER_mode|). #define EVP_CIPH_STREAM_CIPHER 0x0 #define EVP_CIPH_ECB_MODE 0x1 #define EVP_CIPH_CBC_MODE 0x2 #define EVP_CIPH_CFB_MODE 0x3 #define EVP_CIPH_OFB_MODE 0x4 #define EVP_CIPH_CTR_MODE 0x5 #define EVP_CIPH_GCM_MODE 0x6 #define EVP_CIPH_XTS_MODE 0x7 // The following values are never returned from |EVP_CIPHER_mode| and are // included only to make it easier to compile code with BoringSSL. #define EVP_CIPH_CCM_MODE 0x8 #define EVP_CIPH_OCB_MODE 0x9 #define EVP_CIPH_WRAP_MODE 0xa // Cipher flags (for |EVP_CIPHER_flags|). // EVP_CIPH_VARIABLE_LENGTH indicates that the cipher takes a variable length // key. #define EVP_CIPH_VARIABLE_LENGTH 0x40 // EVP_CIPH_ALWAYS_CALL_INIT indicates that the |init| function for the cipher // should always be called when initialising a new operation, even if the key // is NULL to indicate that the same key is being used. #define EVP_CIPH_ALWAYS_CALL_INIT 0x80 // EVP_CIPH_CUSTOM_IV indicates that the cipher manages the IV itself rather // than keeping it in the |iv| member of |EVP_CIPHER_CTX|. #define EVP_CIPH_CUSTOM_IV 0x100 // EVP_CIPH_CTRL_INIT indicates that EVP_CTRL_INIT should be used when // initialising an |EVP_CIPHER_CTX|. #define EVP_CIPH_CTRL_INIT 0x200 // EVP_CIPH_FLAG_CUSTOM_CIPHER indicates that the cipher manages blocking // itself. This causes EVP_(En|De)crypt_ex to be simple wrapper functions. #define EVP_CIPH_FLAG_CUSTOM_CIPHER 0x400 // EVP_CIPH_FLAG_AEAD_CIPHER specifies that the cipher is an AEAD. This is an // older version of the proper AEAD interface. See aead.h for the current // one. #define EVP_CIPH_FLAG_AEAD_CIPHER 0x800 // EVP_CIPH_CUSTOM_COPY indicates that the |ctrl| callback should be called // with |EVP_CTRL_COPY| at the end of normal |EVP_CIPHER_CTX_copy| // processing. #define EVP_CIPH_CUSTOM_COPY 0x1000 // EVP_CIPH_FLAG_NON_FIPS_ALLOW is meaningless. In OpenSSL it permits non-FIPS // algorithms in FIPS mode. But BoringSSL FIPS mode doesn't prohibit algorithms // (it's up the the caller to use the FIPS module in a fashion compliant with // their needs). Thus this exists only to allow code to compile. #define EVP_CIPH_FLAG_NON_FIPS_ALLOW 0 // Deprecated functions // EVP_CipherInit acts like EVP_CipherInit_ex except that |EVP_CIPHER_CTX_init| // is called on |cipher| first, if |cipher| is not NULL. OPENSSL_EXPORT int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv, int enc); // EVP_EncryptInit calls |EVP_CipherInit| with |enc| equal to one. OPENSSL_EXPORT int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv); // EVP_DecryptInit calls |EVP_CipherInit| with |enc| equal to zero. OPENSSL_EXPORT int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv); // EVP_CipherFinal calls |EVP_CipherFinal_ex|. OPENSSL_EXPORT int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_EncryptFinal calls |EVP_EncryptFinal_ex|. OPENSSL_EXPORT int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_DecryptFinal calls |EVP_DecryptFinal_ex|. OPENSSL_EXPORT int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_Cipher historically exposed an internal implementation detail of |ctx| // and should not be used. Use |EVP_CipherUpdate| and |EVP_CipherFinal_ex| // instead. // // If |ctx|'s cipher does not have the |EVP_CIPH_FLAG_CUSTOM_CIPHER| flag, it // encrypts or decrypts |in_len| bytes from |in| and writes the resulting // |in_len| bytes to |out|. It returns one on success and zero on error. // |in_len| must be a multiple of the cipher's block size, or the behavior is // undefined. // // TODO(davidben): Rather than being undefined (it'll often round the length up // and likely read past the buffer), just fail the operation. // // If |ctx|'s cipher has the |EVP_CIPH_FLAG_CUSTOM_CIPHER| flag, it runs in one // of two modes: If |in| is non-NULL, it behaves like |EVP_CipherUpdate|. If // |in| is NULL, it behaves like |EVP_CipherFinal_ex|. In both cases, it returns // |*out_len| on success and -1 on error. // // WARNING: The two possible calling conventions of this function signal errors // incompatibly. In the first, zero indicates an error. In the second, zero // indicates success with zero bytes of output. OPENSSL_EXPORT int EVP_Cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len); // EVP_add_cipher_alias does nothing and returns one. OPENSSL_EXPORT int EVP_add_cipher_alias(const char *a, const char *b); // EVP_get_cipherbyname returns an |EVP_CIPHER| given a human readable name in // |name|, or NULL if the name is unknown. Note using this function links almost // every cipher implemented by BoringSSL into the binary, not just the ones the // caller requests. Size-conscious callers, such as client software, should not // use this function. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbyname(const char *name); // These AEADs are deprecated AES-GCM implementations that set // |EVP_CIPH_FLAG_CUSTOM_CIPHER|. Use |EVP_aead_aes_128_gcm| and // |EVP_aead_aes_256_gcm| instead. // // WARNING: Although these APIs allow streaming an individual AES-GCM operation, // this is not secure. Until calling |EVP_DecryptFinal_ex|, the tag has not yet // been checked and output released by |EVP_DecryptUpdate| is unauthenticated // and easily manipulated by attackers. Callers must buffer the output and may // not act on it until the entire operation is complete. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_gcm(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_gcm(void); // These are deprecated, 192-bit version of AES. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_ctr(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_gcm(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_ofb(void); // EVP_des_ede3_ecb is an alias for |EVP_des_ede3|. Use the former instead. OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3_ecb(void); // EVP_aes_128_cfb128 is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cfb128(void); // EVP_aes_128_cfb is an alias for |EVP_aes_128_cfb128| and is only available in // decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cfb(void); // EVP_aes_192_cfb128 is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cfb128(void); // EVP_aes_192_cfb is an alias for |EVP_aes_192_cfb128| and is only available in // decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cfb(void); // EVP_aes_256_cfb128 is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_cfb128(void); // EVP_aes_256_cfb is an alias for |EVP_aes_256_cfb128| and is only available in // decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_cfb(void); // EVP_bf_ecb is Blowfish in ECB mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_bf_ecb(void); // EVP_bf_cbc is Blowfish in CBC mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_bf_cbc(void); // EVP_bf_cfb is Blowfish in 64-bit CFB mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_bf_cfb(void); // EVP_cast5_ecb is CAST5 in ECB mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_cast5_ecb(void); // EVP_cast5_cbc is CAST5 in CBC mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_cast5_cbc(void); // The following flags do nothing and are included only to make it easier to // compile code with BoringSSL. #define EVP_CIPHER_CTX_FLAG_WRAP_ALLOW 0 // EVP_CIPHER_CTX_set_flags does nothing. OPENSSL_EXPORT void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, uint32_t flags); // Private functions. // EVP_CIPH_NO_PADDING disables padding in block ciphers. #define EVP_CIPH_NO_PADDING 0x800 // The following are |EVP_CIPHER_CTX_ctrl| commands. #define EVP_CTRL_INIT 0x0 #define EVP_CTRL_SET_KEY_LENGTH 0x1 #define EVP_CTRL_GET_RC2_KEY_BITS 0x2 #define EVP_CTRL_SET_RC2_KEY_BITS 0x3 #define EVP_CTRL_GET_RC5_ROUNDS 0x4 #define EVP_CTRL_SET_RC5_ROUNDS 0x5 #define EVP_CTRL_RAND_KEY 0x6 #define EVP_CTRL_PBE_PRF_NID 0x7 #define EVP_CTRL_COPY 0x8 #define EVP_CTRL_AEAD_SET_IVLEN 0x9 #define EVP_CTRL_AEAD_GET_TAG 0x10 #define EVP_CTRL_AEAD_SET_TAG 0x11 #define EVP_CTRL_AEAD_SET_IV_FIXED 0x12 #define EVP_CTRL_GCM_IV_GEN 0x13 #define EVP_CTRL_AEAD_SET_MAC_KEY 0x17 // EVP_CTRL_GCM_SET_IV_INV sets the GCM invocation field, decrypt only #define EVP_CTRL_GCM_SET_IV_INV 0x18 #define EVP_CTRL_GET_IVLEN 0x19 // The following constants are unused. #define EVP_GCM_TLS_FIXED_IV_LEN 4 #define EVP_GCM_TLS_EXPLICIT_IV_LEN 8 #define EVP_GCM_TLS_TAG_LEN 16 // The following are legacy aliases for AEAD |EVP_CIPHER_CTX_ctrl| values. #define EVP_CTRL_GCM_SET_IVLEN EVP_CTRL_AEAD_SET_IVLEN #define EVP_CTRL_GCM_GET_TAG EVP_CTRL_AEAD_GET_TAG #define EVP_CTRL_GCM_SET_TAG EVP_CTRL_AEAD_SET_TAG #define EVP_CTRL_GCM_SET_IV_FIXED EVP_CTRL_AEAD_SET_IV_FIXED #define EVP_MAX_KEY_LENGTH 64 #define EVP_MAX_IV_LENGTH 16 #define EVP_MAX_BLOCK_LENGTH 32 struct evp_cipher_ctx_st { // cipher contains the underlying cipher for this context. const EVP_CIPHER *cipher; // app_data is a pointer to opaque, user data. void *app_data; // application stuff // cipher_data points to the |cipher| specific state. void *cipher_data; // key_len contains the length of the key, which may differ from // |cipher->key_len| if the cipher can take a variable key length. unsigned key_len; // encrypt is one if encrypting and zero if decrypting. int encrypt; // flags contains the OR of zero or more |EVP_CIPH_*| flags, above. uint32_t flags; // oiv contains the original IV value. uint8_t oiv[EVP_MAX_IV_LENGTH]; // iv contains the current IV value, which may have been updated. uint8_t iv[EVP_MAX_IV_LENGTH]; // buf contains a partial block which is used by, for example, CTR mode to // store unused keystream bytes. uint8_t buf[EVP_MAX_BLOCK_LENGTH]; // buf_len contains the number of bytes of a partial block contained in // |buf|. int buf_len; // num contains the number of bytes of |iv| which are valid for modes that // manage partial blocks themselves. unsigned num; // final_used is non-zero if the |final| buffer contains plaintext. int final_used; uint8_t final[EVP_MAX_BLOCK_LENGTH]; // possible final block // Has this structure been rendered unusable by a failure. int poisoned; } /* EVP_CIPHER_CTX */; typedef struct evp_cipher_info_st { const EVP_CIPHER *cipher; unsigned char iv[EVP_MAX_IV_LENGTH]; } EVP_CIPHER_INFO; #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(EVP_CIPHER_CTX, EVP_CIPHER_CTX_free) using ScopedEVP_CIPHER_CTX = internal::StackAllocated; BSSL_NAMESPACE_END } // extern C++ #endif #endif #define CIPHER_R_AES_KEY_SETUP_FAILED 100 #define CIPHER_R_BAD_DECRYPT 101 #define CIPHER_R_BAD_KEY_LENGTH 102 #define CIPHER_R_BUFFER_TOO_SMALL 103 #define CIPHER_R_CTRL_NOT_IMPLEMENTED 104 #define CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED 105 #define CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH 106 #define CIPHER_R_INITIALIZATION_ERROR 107 #define CIPHER_R_INPUT_NOT_INITIALIZED 108 #define CIPHER_R_INVALID_AD_SIZE 109 #define CIPHER_R_INVALID_KEY_LENGTH 110 #define CIPHER_R_INVALID_NONCE_SIZE 111 #define CIPHER_R_INVALID_OPERATION 112 #define CIPHER_R_IV_TOO_LARGE 113 #define CIPHER_R_NO_CIPHER_SET 114 #define CIPHER_R_OUTPUT_ALIASES_INPUT 115 #define CIPHER_R_TAG_TOO_LARGE 116 #define CIPHER_R_TOO_LARGE 117 #define CIPHER_R_UNSUPPORTED_AD_SIZE 118 #define CIPHER_R_UNSUPPORTED_INPUT_SIZE 119 #define CIPHER_R_UNSUPPORTED_KEY_SIZE 120 #define CIPHER_R_UNSUPPORTED_NONCE_SIZE 121 #define CIPHER_R_UNSUPPORTED_TAG_SIZE 122 #define CIPHER_R_WRONG_FINAL_BLOCK_LENGTH 123 #define CIPHER_R_NO_DIRECTION_SET 124 #define CIPHER_R_INVALID_NONCE 125 #endif // OPENSSL_HEADER_CIPHER_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_cmac.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CMAC_H #define OPENSSL_HEADER_CMAC_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // CMAC. // // CMAC is a MAC based on AES-CBC and defined in // https://tools.ietf.org/html/rfc4493#section-2.3. // One-shot functions. // AES_CMAC calculates the 16-byte, CMAC authenticator of |in_len| bytes of // |in| and writes it to |out|. The |key_len| may be 16 or 32 bytes to select // between AES-128 and AES-256. It returns one on success or zero on error. OPENSSL_EXPORT int AES_CMAC(uint8_t out[16], const uint8_t *key, size_t key_len, const uint8_t *in, size_t in_len); // Incremental interface. // CMAC_CTX_new allocates a fresh |CMAC_CTX| and returns it, or NULL on // error. OPENSSL_EXPORT CMAC_CTX *CMAC_CTX_new(void); // CMAC_CTX_free frees a |CMAC_CTX|. OPENSSL_EXPORT void CMAC_CTX_free(CMAC_CTX *ctx); // CMAC_CTX_copy sets |out| to be a duplicate of the current state |in|. It // returns one on success and zero on error. OPENSSL_EXPORT int CMAC_CTX_copy(CMAC_CTX *out, const CMAC_CTX *in); // CMAC_Init configures |ctx| to use the given |key| and |cipher|. The CMAC RFC // only specifies the use of AES-128 thus |key_len| should be 16 and |cipher| // should be |EVP_aes_128_cbc()|. However, this implementation also supports // AES-256 by setting |key_len| to 32 and |cipher| to |EVP_aes_256_cbc()|. The // |engine| argument is ignored. // // It returns one on success or zero on error. OPENSSL_EXPORT int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len, const EVP_CIPHER *cipher, ENGINE *engine); // CMAC_Reset resets |ctx| so that a fresh message can be authenticated. OPENSSL_EXPORT int CMAC_Reset(CMAC_CTX *ctx); // CMAC_Update processes |in_len| bytes of message from |in|. It returns one on // success or zero on error. OPENSSL_EXPORT int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len); // CMAC_Final sets |*out_len| to 16 and, if |out| is not NULL, writes 16 bytes // of authenticator to it. It returns one on success or zero on error. OPENSSL_EXPORT int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(CMAC_CTX, CMAC_CTX_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_CMAC_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_conf.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_CONF_H #define OPENSSL_HEADER_CONF_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_stack.h" #include "CNIOBoringSSL_lhash.h" #if defined(__cplusplus) extern "C" { #endif // Config files. // // This library handles OpenSSL's config files, which look like: // // # Comment // // # This key is in the default section. // key=value // // [section_name] // key2=value2 // // Config files are represented by a |CONF|. Use of this module is strongly // discouraged. It is a remnant of the OpenSSL command-line tool. Parsing an // untrusted input as a config file risks string injection and denial of service // vulnerabilities. struct conf_value_st { char *section; char *name; char *value; }; DEFINE_STACK_OF(CONF_VALUE) DECLARE_LHASH_OF(CONF_VALUE) // NCONF_new returns a fresh, empty |CONF|, or NULL on error. The |method| // argument must be NULL. OPENSSL_EXPORT CONF *NCONF_new(void *method); // NCONF_free frees all the data owned by |conf| and then |conf| itself. OPENSSL_EXPORT void NCONF_free(CONF *conf); // NCONF_load parses the file named |filename| and adds the values found to // |conf|. It returns one on success and zero on error. In the event of an // error, if |out_error_line| is not NULL, |*out_error_line| is set to the // number of the line that contained the error. OPENSSL_EXPORT int NCONF_load(CONF *conf, const char *filename, long *out_error_line); // NCONF_load_bio acts like |NCONF_load| but reads from |bio| rather than from // a named file. OPENSSL_EXPORT int NCONF_load_bio(CONF *conf, BIO *bio, long *out_error_line); // NCONF_get_section returns a stack of values for a given section in |conf|. // If |section| is NULL, the default section is returned. It returns NULL on // error. OPENSSL_EXPORT const STACK_OF(CONF_VALUE) *NCONF_get_section( const CONF *conf, const char *section); // NCONF_get_string returns the value of the key |name|, in section |section|. // The |section| argument may be NULL to indicate the default section. It // returns the value or NULL on error. OPENSSL_EXPORT const char *NCONF_get_string(const CONF *conf, const char *section, const char *name); // Deprecated functions // These defines do nothing but are provided to make old code easier to // compile. #define CONF_MFLAGS_DEFAULT_SECTION 0 #define CONF_MFLAGS_IGNORE_MISSING_FILE 0 // CONF_modules_load_file returns one. BoringSSL is defined to have no config // file options, thus loading from |filename| always succeeds by doing nothing. OPENSSL_EXPORT int CONF_modules_load_file(const char *filename, const char *appname, unsigned long flags); // CONF_modules_free does nothing. OPENSSL_EXPORT void CONF_modules_free(void); // OPENSSL_config does nothing. OPENSSL_EXPORT void OPENSSL_config(const char *config_name); // OPENSSL_no_config does nothing. OPENSSL_EXPORT void OPENSSL_no_config(void); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(CONF, NCONF_free) BSSL_NAMESPACE_END } // extern C++ #endif #define CONF_R_LIST_CANNOT_BE_NULL 100 #define CONF_R_MISSING_CLOSE_SQUARE_BRACKET 101 #define CONF_R_MISSING_EQUAL_SIGN 102 #define CONF_R_NO_CLOSE_BRACE 103 #define CONF_R_UNABLE_TO_CREATE_NEW_SECTION 104 #define CONF_R_VARIABLE_HAS_NO_VALUE 105 #define CONF_R_VARIABLE_EXPANSION_TOO_LONG 106 #define CONF_R_VARIABLE_EXPANSION_NOT_SUPPORTED 107 #endif // OPENSSL_HEADER_THREAD_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_cpu.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This header is provided for compatibility with older revisions of BoringSSL. // TODO(davidben): Remove this header. #include "CNIOBoringSSL_crypto.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_crypto.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_H #define OPENSSL_HEADER_CRYPTO_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_sha.h" // Upstream OpenSSL defines |OPENSSL_malloc|, etc., in crypto.h rather than // mem.h. #include "CNIOBoringSSL_mem.h" // Upstream OpenSSL defines |CRYPTO_LOCK|, etc., in crypto.h rather than // thread.h. #include "CNIOBoringSSL_thread.h" #if defined(__cplusplus) extern "C" { #endif // crypto.h contains functions for library-wide initialization and properties. // CRYPTO_is_confidential_build returns one if the linked version of BoringSSL // has been built with the BORINGSSL_CONFIDENTIAL define and zero otherwise. // // This is used by some consumers to identify whether they are using an // internal version of BoringSSL. OPENSSL_EXPORT int CRYPTO_is_confidential_build(void); // CRYPTO_has_asm returns one unless BoringSSL was built with OPENSSL_NO_ASM, // in which case it returns zero. OPENSSL_EXPORT int CRYPTO_has_asm(void); // BORINGSSL_self_test triggers the FIPS KAT-based self tests. It returns one on // success and zero on error. OPENSSL_EXPORT int BORINGSSL_self_test(void); // BORINGSSL_integrity_test triggers the module's integrity test where the code // and data of the module is matched against a hash injected at build time. It // returns one on success or zero if there's a mismatch. This function only // exists if the module was built in FIPS mode without ASAN. OPENSSL_EXPORT int BORINGSSL_integrity_test(void); // CRYPTO_pre_sandbox_init initializes the crypto library, pre-acquiring some // unusual resources to aid running in sandboxed environments. It is safe to // call this function multiple times and concurrently from multiple threads. // // For more details on using BoringSSL in a sandboxed environment, see // SANDBOXING.md in the source tree. OPENSSL_EXPORT void CRYPTO_pre_sandbox_init(void); #if defined(OPENSSL_ARM) && defined(OPENSSL_LINUX) && \ !defined(OPENSSL_STATIC_ARMCAP) // CRYPTO_needs_hwcap2_workaround returns one if the ARMv8 AArch32 AT_HWCAP2 // workaround was needed. See https://crbug.com/boringssl/46. OPENSSL_EXPORT int CRYPTO_needs_hwcap2_workaround(void); #endif // OPENSSL_ARM && OPENSSL_LINUX && !OPENSSL_STATIC_ARMCAP // FIPS monitoring // FIPS_mode returns zero unless BoringSSL is built with BORINGSSL_FIPS, in // which case it returns one. OPENSSL_EXPORT int FIPS_mode(void); // fips_counter_t denotes specific APIs/algorithms. A counter is maintained for // each in FIPS mode so that tests can be written to assert that the expected, // FIPS functions are being called by a certain peice of code. enum fips_counter_t { fips_counter_evp_aes_128_gcm = 0, fips_counter_evp_aes_256_gcm = 1, fips_counter_evp_aes_128_ctr = 2, fips_counter_evp_aes_256_ctr = 3, fips_counter_max = 3, }; // FIPS_read_counter returns a counter of the number of times the specific // function denoted by |counter| has been used. This always returns zero unless // BoringSSL was built with BORINGSSL_FIPS_COUNTERS defined. OPENSSL_EXPORT size_t FIPS_read_counter(enum fips_counter_t counter); // Deprecated functions. // OPENSSL_VERSION_TEXT contains a string the identifies the version of // “OpenSSL”. node.js requires a version number in this text. #define OPENSSL_VERSION_TEXT "OpenSSL 1.1.1 (compatible; BoringSSL)" #define OPENSSL_VERSION 0 #define OPENSSL_CFLAGS 1 #define OPENSSL_BUILT_ON 2 #define OPENSSL_PLATFORM 3 #define OPENSSL_DIR 4 // OpenSSL_version is a compatibility function that returns the string // "BoringSSL" if |which| is |OPENSSL_VERSION| and placeholder strings // otherwise. OPENSSL_EXPORT const char *OpenSSL_version(int which); #define SSLEAY_VERSION OPENSSL_VERSION #define SSLEAY_CFLAGS OPENSSL_CFLAGS #define SSLEAY_BUILT_ON OPENSSL_BUILT_ON #define SSLEAY_PLATFORM OPENSSL_PLATFORM #define SSLEAY_DIR OPENSSL_DIR // SSLeay_version calls |OpenSSL_version|. OPENSSL_EXPORT const char *SSLeay_version(int which); // SSLeay is a compatibility function that returns OPENSSL_VERSION_NUMBER from // base.h. OPENSSL_EXPORT unsigned long SSLeay(void); // OpenSSL_version_num is a compatibility function that returns // OPENSSL_VERSION_NUMBER from base.h. OPENSSL_EXPORT unsigned long OpenSSL_version_num(void); // CRYPTO_malloc_init returns one. OPENSSL_EXPORT int CRYPTO_malloc_init(void); // OPENSSL_malloc_init returns one. OPENSSL_EXPORT int OPENSSL_malloc_init(void); // ENGINE_load_builtin_engines does nothing. OPENSSL_EXPORT void ENGINE_load_builtin_engines(void); // ENGINE_register_all_complete returns one. OPENSSL_EXPORT int ENGINE_register_all_complete(void); // OPENSSL_load_builtin_modules does nothing. OPENSSL_EXPORT void OPENSSL_load_builtin_modules(void); // OPENSSL_INIT_* are options in OpenSSL to configure the library. In BoringSSL, // they do nothing. #define OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS 0 #define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0 #define OPENSSL_INIT_ADD_ALL_CIPHERS 0 #define OPENSSL_INIT_ADD_ALL_DIGESTS 0 #define OPENSSL_INIT_NO_ADD_ALL_CIPHERS 0 #define OPENSSL_INIT_NO_ADD_ALL_DIGESTS 0 #define OPENSSL_INIT_LOAD_CONFIG 0 #define OPENSSL_INIT_NO_LOAD_CONFIG 0 #define OPENSSL_INIT_NO_ATEXIT 0 #define OPENSSL_INIT_ATFORK 0 #define OPENSSL_INIT_ENGINE_RDRAND 0 #define OPENSSL_INIT_ENGINE_DYNAMIC 0 #define OPENSSL_INIT_ENGINE_OPENSSL 0 #define OPENSSL_INIT_ENGINE_CRYPTODEV 0 #define OPENSSL_INIT_ENGINE_CAPI 0 #define OPENSSL_INIT_ENGINE_PADLOCK 0 #define OPENSSL_INIT_ENGINE_AFALG 0 #define OPENSSL_INIT_ENGINE_ALL_BUILTIN 0 // OPENSSL_init_crypto returns one. OPENSSL_EXPORT int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); // OPENSSL_cleanup does nothing. OPENSSL_EXPORT void OPENSSL_cleanup(void); // FIPS_mode_set returns one if |on| matches whether BoringSSL was built with // |BORINGSSL_FIPS| and zero otherwise. OPENSSL_EXPORT int FIPS_mode_set(int on); // FIPS_module_name returns the name of the FIPS module. OPENSSL_EXPORT const char *FIPS_module_name(void); // FIPS_module_hash returns the 32-byte hash of the FIPS module. OPENSSL_EXPORT const uint8_t *FIPS_module_hash(void); // FIPS_version returns the version of the FIPS module, or zero if the build // isn't exactly at a verified version. The version, expressed in base 10, will // be a date in the form yyyymmddXX where XX is often "00", but can be // incremented if multiple versions are defined on a single day. // // (This format exceeds a |uint32_t| in the year 4294.) OPENSSL_EXPORT uint32_t FIPS_version(void); // FIPS_query_algorithm_status returns one if |algorithm| is FIPS validated in // the current BoringSSL and zero otherwise. OPENSSL_EXPORT int FIPS_query_algorithm_status(const char *algorithm); #if defined(OPENSSL_ARM) && defined(OPENSSL_LINUX) && \ !defined(OPENSSL_STATIC_ARMCAP) // CRYPTO_has_broken_NEON returns zero. OPENSSL_EXPORT int CRYPTO_has_broken_NEON(void); #endif // CRYPTO_library_init does nothing. Historically, it was needed in some build // configurations to initialization the library. This is no longer necessary. OPENSSL_EXPORT void CRYPTO_library_init(void); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_CRYPTO_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ctrdrbg.h ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CTRDRBG_H #define OPENSSL_HEADER_CTRDRBG_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // FIPS pseudo-random number generator. // CTR-DRBG state objects. // // CTR_DRBG_STATE contains the state of a FIPS AES-CTR-based pseudo-random // number generator. If BoringSSL was built in FIPS mode then this is a FIPS // Approved algorithm. // CTR_DRBG_ENTROPY_LEN is the number of bytes of input entropy. See SP // 800-90Ar1, table 3. #define CTR_DRBG_ENTROPY_LEN 48 // CTR_DRBG_MAX_GENERATE_LENGTH is the maximum number of bytes that can be // generated in a single call to |CTR_DRBG_generate|. #define CTR_DRBG_MAX_GENERATE_LENGTH 65536 // CTR_DRBG_new returns an initialized |CTR_DRBG_STATE|, or NULL if either // allocation failed or if |personalization_len| is invalid. OPENSSL_EXPORT CTR_DRBG_STATE *CTR_DRBG_new( const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *personalization, size_t personalization_len); // CTR_DRBG_free frees |state| if non-NULL, or else does nothing. OPENSSL_EXPORT void CTR_DRBG_free(CTR_DRBG_STATE* state); // CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy // in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of // additional data. It returns one on success or zero on error. OPENSSL_EXPORT int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], const uint8_t *additional_data, size_t additional_data_len); // CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional // data (if any) and then writes |out_len| random bytes to |out|, where // |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or // zero on error. OPENSSL_EXPORT int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, const uint8_t *additional_data, size_t additional_data_len); // CTR_DRBG_clear zeroises the state of |drbg|. OPENSSL_EXPORT void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(CTR_DRBG_STATE, CTR_DRBG_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_CTRDRBG_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_curve25519.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CURVE25519_H #define OPENSSL_HEADER_CURVE25519_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Curve25519. // // Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748. // X25519. // // X25519 is the Diffie-Hellman primitive built from curve25519. It is // sometimes referred to as “curve25519”, but “X25519” is a more precise name. // See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748. #define X25519_PRIVATE_KEY_LEN 32 #define X25519_PUBLIC_VALUE_LEN 32 #define X25519_SHARED_KEY_LEN 32 // X25519_keypair sets |out_public_value| and |out_private_key| to a freshly // generated, public–private key pair. OPENSSL_EXPORT void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]); // X25519 writes a shared key to |out_shared_key| that is calculated from the // given private key and the peer's public value. It returns one on success and // zero on error. // // Don't use the shared key directly, rather use a KDF and also include the two // public values as inputs. OPENSSL_EXPORT int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], const uint8_t peer_public_value[32]); // X25519_public_from_private calculates a Diffie-Hellman public value from the // given private key and writes it to |out_public_value|. OPENSSL_EXPORT void X25519_public_from_private(uint8_t out_public_value[32], const uint8_t private_key[32]); // Ed25519. // // Ed25519 is a signature scheme using a twisted-Edwards curve that is // birationally equivalent to curve25519. // // Note that, unlike RFC 8032's formulation, our private key representation // includes a public key suffix to make multiple key signing operations with the // same key more efficient. The RFC 8032 private key is referred to in this // implementation as the "seed" and is the first 32 bytes of our private key. #define ED25519_PRIVATE_KEY_LEN 64 #define ED25519_PUBLIC_KEY_LEN 32 #define ED25519_SIGNATURE_LEN 64 // ED25519_keypair sets |out_public_key| and |out_private_key| to a freshly // generated, public–private key pair. OPENSSL_EXPORT void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]); // ED25519_sign sets |out_sig| to be a signature of |message_len| bytes from // |message| using |private_key|. It returns one on success or zero on // allocation failure. OPENSSL_EXPORT int ED25519_sign(uint8_t out_sig[64], const uint8_t *message, size_t message_len, const uint8_t private_key[64]); // ED25519_verify returns one iff |signature| is a valid signature, by // |public_key| of |message_len| bytes from |message|. It returns zero // otherwise. OPENSSL_EXPORT int ED25519_verify(const uint8_t *message, size_t message_len, const uint8_t signature[64], const uint8_t public_key[32]); // ED25519_keypair_from_seed calculates a public and private key from an // Ed25519 “seed”. Seed values are not exposed by this API (although they // happen to be the first 32 bytes of a private key) so this function is for // interoperating with systems that may store just a seed instead of a full // private key. OPENSSL_EXPORT void ED25519_keypair_from_seed(uint8_t out_public_key[32], uint8_t out_private_key[64], const uint8_t seed[32]); // SPAKE2. // // SPAKE2 is a password-authenticated key-exchange. It allows two parties, // who share a low-entropy secret (i.e. password), to agree on a shared key. // An attacker can only make one guess of the password per execution of the // protocol. // // See https://tools.ietf.org/html/draft-irtf-cfrg-spake2-02. // spake2_role_t enumerates the different “roles” in SPAKE2. The protocol // requires that the symmetry of the two parties be broken so one participant // must be “Alice” and the other be “Bob”. enum spake2_role_t { spake2_role_alice, spake2_role_bob, }; // SPAKE2_CTX_new creates a new |SPAKE2_CTX| (which can only be used for a // single execution of the protocol). SPAKE2 requires the symmetry of the two // parties to be broken which is indicated via |my_role| – each party must pass // a different value for this argument. // // The |my_name| and |their_name| arguments allow optional, opaque names to be // bound into the protocol. For example MAC addresses, hostnames, usernames // etc. These values are not exposed and can avoid context-confusion attacks // when a password is shared between several devices. OPENSSL_EXPORT SPAKE2_CTX *SPAKE2_CTX_new( enum spake2_role_t my_role, const uint8_t *my_name, size_t my_name_len, const uint8_t *their_name, size_t their_name_len); // SPAKE2_CTX_free frees |ctx| and all the resources that it has allocated. OPENSSL_EXPORT void SPAKE2_CTX_free(SPAKE2_CTX *ctx); // SPAKE2_MAX_MSG_SIZE is the maximum size of a SPAKE2 message. #define SPAKE2_MAX_MSG_SIZE 32 // SPAKE2_generate_msg generates a SPAKE2 message given |password|, writes // it to |out| and sets |*out_len| to the number of bytes written. // // At most |max_out_len| bytes are written to |out| and, in order to ensure // success, |max_out_len| should be at least |SPAKE2_MAX_MSG_SIZE| bytes. // // This function can only be called once for a given |SPAKE2_CTX|. // // It returns one on success and zero on error. OPENSSL_EXPORT int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *password, size_t password_len); // SPAKE2_MAX_KEY_SIZE is the maximum amount of key material that SPAKE2 will // produce. #define SPAKE2_MAX_KEY_SIZE 64 // SPAKE2_process_msg completes the SPAKE2 exchange given the peer's message in // |their_msg|, writes at most |max_out_key_len| bytes to |out_key| and sets // |*out_key_len| to the number of bytes written. // // The resulting keying material is suitable for: // - Using directly in a key-confirmation step: i.e. each side could // transmit a hash of their role, a channel-binding value and the key // material to prove to the other side that they know the shared key. // - Using as input keying material to HKDF to generate a variety of subkeys // for encryption etc. // // If |max_out_key_key| is smaller than the amount of key material generated // then the key is silently truncated. If you want to ensure that no truncation // occurs then |max_out_key| should be at least |SPAKE2_MAX_KEY_SIZE|. // // You must call |SPAKE2_generate_msg| on a given |SPAKE2_CTX| before calling // this function. On successful return, |ctx| is complete and calling // |SPAKE2_CTX_free| is the only acceptable operation on it. // // Returns one on success or zero on error. OPENSSL_EXPORT int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len, size_t max_out_key_len, const uint8_t *their_msg, size_t their_msg_len); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(SPAKE2_CTX, SPAKE2_CTX_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_CURVE25519_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_des.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DES_H #define OPENSSL_HEADER_DES_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // DES. // // This module is deprecated and retained for legacy reasons only. It is slow // and may leak key material with timing or cache side channels. Moreover, // single-keyed DES is broken and can be brute-forced in under a day. // // Use a modern cipher, such as AES-GCM or ChaCha20-Poly1305, instead. typedef struct DES_cblock_st { uint8_t bytes[8]; } DES_cblock; typedef struct DES_ks { uint32_t subkeys[16][2]; } DES_key_schedule; #define DES_KEY_SZ (sizeof(DES_cblock)) #define DES_SCHEDULE_SZ (sizeof(DES_key_schedule)) #define DES_ENCRYPT 1 #define DES_DECRYPT 0 #define DES_CBC_MODE 0 #define DES_PCBC_MODE 1 // DES_set_key performs a key schedule and initialises |schedule| with |key|. OPENSSL_EXPORT void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule); // DES_set_odd_parity sets the parity bits (the least-significant bits in each // byte) of |key| given the other bits in each byte. OPENSSL_EXPORT void DES_set_odd_parity(DES_cblock *key); // DES_ecb_encrypt encrypts (or decrypts, if |is_encrypt| is |DES_DECRYPT|) a // single DES block (8 bytes) from in to out, using the key configured in // |schedule|. OPENSSL_EXPORT void DES_ecb_encrypt(const DES_cblock *in, DES_cblock *out, const DES_key_schedule *schedule, int is_encrypt); // DES_ncbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| // bytes from |in| to |out| with DES in CBC mode. OPENSSL_EXPORT void DES_ncbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *schedule, DES_cblock *ivec, int enc); // DES_ecb3_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) a single // block (8 bytes) of data from |input| to |output| using 3DES. OPENSSL_EXPORT void DES_ecb3_encrypt(const DES_cblock *input, DES_cblock *output, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, int enc); // DES_ede3_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| // bytes from |in| to |out| with 3DES in CBC mode. 3DES uses three keys, thus // the function takes three different |DES_key_schedule|s. OPENSSL_EXPORT void DES_ede3_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, const DES_key_schedule *ks3, DES_cblock *ivec, int enc); // DES_ede2_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| // bytes from |in| to |out| with 3DES in CBC mode. With this keying option, the // first and third 3DES keys are identical. Thus, this function takes only two // different |DES_key_schedule|s. OPENSSL_EXPORT void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, const DES_key_schedule *ks2, DES_cblock *ivec, int enc); // Deprecated functions. // DES_set_key_unchecked calls |DES_set_key|. OPENSSL_EXPORT void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule); OPENSSL_EXPORT void DES_ede3_cfb64_encrypt(const uint8_t *in, uint8_t *out, long length, DES_key_schedule *ks1, DES_key_schedule *ks2, DES_key_schedule *ks3, DES_cblock *ivec, int *num, int enc); OPENSSL_EXPORT void DES_ede3_cfb_encrypt(const uint8_t *in, uint8_t *out, int numbits, long length, DES_key_schedule *ks1, DES_key_schedule *ks2, DES_key_schedule *ks3, DES_cblock *ivec, int enc); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_DES_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_dh.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DH_H #define OPENSSL_HEADER_DH_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_thread.h" #if defined(__cplusplus) extern "C" { #endif // DH contains functions for performing Diffie-Hellman key agreement in // multiplicative groups. // // This module is deprecated and retained for legacy reasons only. It is not // considered a priority for performance or hardening work. Do not use it in // new code. Use X25519 or ECDH with P-256 instead. // Allocation and destruction. // // A |DH| object represents a Diffie-Hellman key or group parameters. A given // object may be used concurrently on multiple threads by non-mutating // functions, provided no other thread is concurrently calling a mutating // function. Unless otherwise documented, functions which take a |const| pointer // are non-mutating and functions which take a non-|const| pointer are mutating. // DH_new returns a new, empty DH object or NULL on error. OPENSSL_EXPORT DH *DH_new(void); // DH_free decrements the reference count of |dh| and frees it if the reference // count drops to zero. OPENSSL_EXPORT void DH_free(DH *dh); // DH_up_ref increments the reference count of |dh| and returns one. It does not // mutate |dh| for thread-safety purposes and may be used concurrently. OPENSSL_EXPORT int DH_up_ref(DH *dh); // Properties. // OPENSSL_DH_MAX_MODULUS_BITS is the maximum supported Diffie-Hellman group // modulus, in bits. #define OPENSSL_DH_MAX_MODULUS_BITS 10000 // DH_bits returns the size of |dh|'s group modulus, in bits. OPENSSL_EXPORT unsigned DH_bits(const DH *dh); // DH_get0_pub_key returns |dh|'s public key. OPENSSL_EXPORT const BIGNUM *DH_get0_pub_key(const DH *dh); // DH_get0_priv_key returns |dh|'s private key, or NULL if |dh| is a public key. OPENSSL_EXPORT const BIGNUM *DH_get0_priv_key(const DH *dh); // DH_get0_p returns |dh|'s group modulus. OPENSSL_EXPORT const BIGNUM *DH_get0_p(const DH *dh); // DH_get0_q returns the size of |dh|'s subgroup, or NULL if it is unset. OPENSSL_EXPORT const BIGNUM *DH_get0_q(const DH *dh); // DH_get0_g returns |dh|'s group generator. OPENSSL_EXPORT const BIGNUM *DH_get0_g(const DH *dh); // DH_get0_key sets |*out_pub_key| and |*out_priv_key|, if non-NULL, to |dh|'s // public and private key, respectively. If |dh| is a public key, the private // key will be set to NULL. OPENSSL_EXPORT void DH_get0_key(const DH *dh, const BIGNUM **out_pub_key, const BIGNUM **out_priv_key); // DH_set0_key sets |dh|'s public and private key to the specified values. If // NULL, the field is left unchanged. On success, it takes ownership of each // argument and returns one. Otherwise, it returns zero. OPENSSL_EXPORT int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); // DH_get0_pqg sets |*out_p|, |*out_q|, and |*out_g|, if non-NULL, to |dh|'s p, // q, and g parameters, respectively. OPENSSL_EXPORT void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g); // DH_set0_pqg sets |dh|'s p, q, and g parameters to the specified values. If // NULL, the field is left unchanged. On success, it takes ownership of each // argument and returns one. Otherwise, it returns zero. |q| may be NULL, but // |p| and |g| must either be specified or already configured on |dh|. OPENSSL_EXPORT int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g); // DH_set_length sets the number of bits to use for the secret exponent when // calling |DH_generate_key| on |dh| and returns one. If unset, // |DH_generate_key| will use the bit length of p. OPENSSL_EXPORT int DH_set_length(DH *dh, unsigned priv_length); // Standard parameters. // DH_get_rfc7919_2048 returns the group `ffdhe2048` from // https://tools.ietf.org/html/rfc7919#appendix-A.1. It returns NULL if out // of memory. OPENSSL_EXPORT DH *DH_get_rfc7919_2048(void); // BN_get_rfc3526_prime_1536 sets |*ret| to the 1536-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_1536(BIGNUM *ret); // BN_get_rfc3526_prime_2048 sets |*ret| to the 2048-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_2048(BIGNUM *ret); // BN_get_rfc3526_prime_3072 sets |*ret| to the 3072-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_3072(BIGNUM *ret); // BN_get_rfc3526_prime_4096 sets |*ret| to the 4096-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_4096(BIGNUM *ret); // BN_get_rfc3526_prime_6144 sets |*ret| to the 6144-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_6144(BIGNUM *ret); // BN_get_rfc3526_prime_8192 sets |*ret| to the 8192-bit MODP group from RFC // 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated // and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_8192(BIGNUM *ret); // Parameter generation. #define DH_GENERATOR_2 2 #define DH_GENERATOR_5 5 // DH_generate_parameters_ex generates a suitable Diffie-Hellman group with a // prime that is |prime_bits| long and stores it in |dh|. The generator of the // group will be |generator|, which should be |DH_GENERATOR_2| unless there's a // good reason to use a different value. The |cb| argument contains a callback // function that will be called during the generation. See the documentation in // |bn.h| about this. In addition to the callback invocations from |BN|, |cb| // will also be called with |event| equal to three when the generation is // complete. OPENSSL_EXPORT int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb); // Diffie-Hellman operations. // DH_generate_key generates a new, random, private key and stores it in // |dh|, if |dh| does not already have a private key. Otherwise, it updates // |dh|'s public key to match the private key. It returns one on success and // zero on error. OPENSSL_EXPORT int DH_generate_key(DH *dh); // DH_compute_key_padded calculates the shared key between |dh| and |peers_key| // and writes it as a big-endian integer into |out|, padded up to |DH_size| // bytes. It returns the number of bytes written, which is always |DH_size|, or // a negative number on error. |out| must have |DH_size| bytes of space. // // WARNING: this differs from the usual BoringSSL return-value convention. // // Note this function differs from |DH_compute_key| in that it preserves leading // zeros in the secret. This function is the preferred variant. It matches PKCS // #3 and avoids some side channel attacks. However, the two functions are not // drop-in replacements for each other. Using a different variant than the // application expects will result in sporadic key mismatches. // // Callers that expect a fixed-width secret should use this function over // |DH_compute_key|. Callers that use either function should migrate to a modern // primitive such as X25519 or ECDH with P-256 instead. // // This function does not mutate |dh| for thread-safety purposes and may be used // concurrently. OPENSSL_EXPORT int DH_compute_key_padded(uint8_t *out, const BIGNUM *peers_key, DH *dh); // DH_compute_key_hashed calculates the shared key between |dh| and |peers_key| // and hashes it with the given |digest|. If the hash output is less than // |max_out_len| bytes then it writes the hash output to |out| and sets // |*out_len| to the number of bytes written. Otherwise it signals an error. It // returns one on success or zero on error. // // NOTE: this follows the usual BoringSSL return-value convention, but that's // different from |DH_compute_key| and |DH_compute_key_padded|. // // This function does not mutate |dh| for thread-safety purposes and may be used // concurrently. OPENSSL_EXPORT int DH_compute_key_hashed(DH *dh, uint8_t *out, size_t *out_len, size_t max_out_len, const BIGNUM *peers_key, const EVP_MD *digest); // Utility functions. // DH_size returns the number of bytes in the DH group's prime. OPENSSL_EXPORT int DH_size(const DH *dh); // DH_num_bits returns the minimum number of bits needed to represent the // absolute value of the DH group's prime. OPENSSL_EXPORT unsigned DH_num_bits(const DH *dh); #define DH_CHECK_P_NOT_PRIME 0x01 #define DH_CHECK_P_NOT_SAFE_PRIME 0x02 #define DH_CHECK_UNABLE_TO_CHECK_GENERATOR 0x04 #define DH_CHECK_NOT_SUITABLE_GENERATOR 0x08 #define DH_CHECK_Q_NOT_PRIME 0x10 #define DH_CHECK_INVALID_Q_VALUE 0x20 // These are compatibility defines. #define DH_NOT_SUITABLE_GENERATOR DH_CHECK_NOT_SUITABLE_GENERATOR #define DH_UNABLE_TO_CHECK_GENERATOR DH_CHECK_UNABLE_TO_CHECK_GENERATOR // DH_check checks the suitability of |dh| as a Diffie-Hellman group. and sets // |DH_CHECK_*| flags in |*out_flags| if it finds any errors. It returns one if // |*out_flags| was successfully set and zero on error. // // Note: these checks may be quite computationally expensive. OPENSSL_EXPORT int DH_check(const DH *dh, int *out_flags); #define DH_CHECK_PUBKEY_TOO_SMALL 0x1 #define DH_CHECK_PUBKEY_TOO_LARGE 0x2 #define DH_CHECK_PUBKEY_INVALID 0x4 // DH_check_pub_key checks the suitability of |pub_key| as a public key for the // DH group in |dh| and sets |DH_CHECK_PUBKEY_*| flags in |*out_flags| if it // finds any errors. It returns one if |*out_flags| was successfully set and // zero on error. OPENSSL_EXPORT int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags); // DHparams_dup allocates a fresh |DH| and copies the parameters from |dh| into // it. It returns the new |DH| or NULL on error. OPENSSL_EXPORT DH *DHparams_dup(const DH *dh); // ASN.1 functions. // DH_parse_parameters decodes a DER-encoded DHParameter structure (PKCS #3) // from |cbs| and advances |cbs|. It returns a newly-allocated |DH| or NULL on // error. OPENSSL_EXPORT DH *DH_parse_parameters(CBS *cbs); // DH_marshal_parameters marshals |dh| as a DER-encoded DHParameter structure // (PKCS #3) and appends the result to |cbb|. It returns one on success and zero // on error. OPENSSL_EXPORT int DH_marshal_parameters(CBB *cbb, const DH *dh); // Deprecated functions. // DH_generate_parameters behaves like |DH_generate_parameters_ex|, which is // what you should use instead. It returns NULL on error, or a newly-allocated // |DH| on success. This function is provided for compatibility only. OPENSSL_EXPORT DH *DH_generate_parameters(int prime_len, int generator, void (*callback)(int, int, void *), void *cb_arg); // d2i_DHparams parses a DER-encoded DHParameter structure (PKCS #3) from |len| // bytes at |*inp|, as in |d2i_SAMPLE|. // // Use |DH_parse_parameters| instead. OPENSSL_EXPORT DH *d2i_DHparams(DH **ret, const unsigned char **inp, long len); // i2d_DHparams marshals |in| to a DER-encoded DHParameter structure (PKCS #3), // as described in |i2d_SAMPLE|. // // Use |DH_marshal_parameters| instead. OPENSSL_EXPORT int i2d_DHparams(const DH *in, unsigned char **outp); // DH_compute_key behaves like |DH_compute_key_padded| but, contrary to PKCS #3, // returns a variable-length shared key with leading zeros. It returns the // number of bytes written, or a negative number on error. |out| must have // |DH_size| bytes of space. // // WARNING: this differs from the usual BoringSSL return-value convention. // // Note this function's running time and memory access pattern leaks information // about the shared secret. Particularly if |dh| is reused, this may result in // side channel attacks such as https://raccoon-attack.com/. // // |DH_compute_key_padded| is the preferred variant and avoids the above // attacks. However, the two functions are not drop-in replacements for each // other. Using a different variant than the application expects will result in // sporadic key mismatches. // // Callers that expect a fixed-width secret should use |DH_compute_key_padded| // instead. Callers that use either function should migrate to a modern // primitive such as X25519 or ECDH with P-256 instead. // // This function does not mutate |dh| for thread-safety purposes and may be used // concurrently. OPENSSL_EXPORT int DH_compute_key(uint8_t *out, const BIGNUM *peers_key, DH *dh); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(DH, DH_free) BORINGSSL_MAKE_UP_REF(DH, DH_up_ref) BSSL_NAMESPACE_END } // extern C++ #endif #define DH_R_BAD_GENERATOR 100 #define DH_R_INVALID_PUBKEY 101 #define DH_R_MODULUS_TOO_LARGE 102 #define DH_R_NO_PRIVATE_VALUE 103 #define DH_R_DECODE_ERROR 104 #define DH_R_ENCODE_ERROR 105 #define DH_R_INVALID_PARAMETERS 106 #endif // OPENSSL_HEADER_DH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_digest.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DIGEST_H #define OPENSSL_HEADER_DIGEST_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Digest functions. // // An EVP_MD abstracts the details of a specific hash function allowing code to // deal with the concept of a "hash function" without needing to know exactly // which hash function it is. // Hash algorithms. // // The following functions return |EVP_MD| objects that implement the named hash // function. OPENSSL_EXPORT const EVP_MD *EVP_md4(void); OPENSSL_EXPORT const EVP_MD *EVP_md5(void); OPENSSL_EXPORT const EVP_MD *EVP_sha1(void); OPENSSL_EXPORT const EVP_MD *EVP_sha224(void); OPENSSL_EXPORT const EVP_MD *EVP_sha256(void); OPENSSL_EXPORT const EVP_MD *EVP_sha384(void); OPENSSL_EXPORT const EVP_MD *EVP_sha512(void); OPENSSL_EXPORT const EVP_MD *EVP_sha512_256(void); OPENSSL_EXPORT const EVP_MD *EVP_blake2b256(void); // EVP_md5_sha1 is a TLS-specific |EVP_MD| which computes the concatenation of // MD5 and SHA-1, as used in TLS 1.1 and below. OPENSSL_EXPORT const EVP_MD *EVP_md5_sha1(void); // EVP_get_digestbynid returns an |EVP_MD| for the given NID, or NULL if no // such digest is known. OPENSSL_EXPORT const EVP_MD *EVP_get_digestbynid(int nid); // EVP_get_digestbyobj returns an |EVP_MD| for the given |ASN1_OBJECT|, or NULL // if no such digest is known. OPENSSL_EXPORT const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj); // Digest contexts. // // An EVP_MD_CTX represents the state of a specific digest operation in // progress. // EVP_MD_CTX_init initialises an, already allocated, |EVP_MD_CTX|. This is the // same as setting the structure to zero. OPENSSL_EXPORT void EVP_MD_CTX_init(EVP_MD_CTX *ctx); // EVP_MD_CTX_new allocates and initialises a fresh |EVP_MD_CTX| and returns // it, or NULL on allocation failure. The caller must use |EVP_MD_CTX_free| to // release the resulting object. OPENSSL_EXPORT EVP_MD_CTX *EVP_MD_CTX_new(void); // EVP_MD_CTX_cleanup frees any resources owned by |ctx| and resets it to a // freshly initialised state. It does not free |ctx| itself. It returns one. OPENSSL_EXPORT int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx); // EVP_MD_CTX_cleanse zeros the digest state in |ctx| and then performs the // actions of |EVP_MD_CTX_cleanup|. Note that some |EVP_MD_CTX| objects contain // more than just a digest (e.g. those resulting from |EVP_DigestSignInit|) but // this function does not zero out more than just the digest state even in that // case. OPENSSL_EXPORT void EVP_MD_CTX_cleanse(EVP_MD_CTX *ctx); // EVP_MD_CTX_free calls |EVP_MD_CTX_cleanup| and then frees |ctx| itself. OPENSSL_EXPORT void EVP_MD_CTX_free(EVP_MD_CTX *ctx); // EVP_MD_CTX_copy_ex sets |out|, which must already be initialised, to be a // copy of |in|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in); // EVP_MD_CTX_move sets |out|, which must already be initialised, to the hash // state in |in|. |in| is mutated and left in an empty state. OPENSSL_EXPORT void EVP_MD_CTX_move(EVP_MD_CTX *out, EVP_MD_CTX *in); // EVP_MD_CTX_reset calls |EVP_MD_CTX_cleanup| followed by |EVP_MD_CTX_init|. It // returns one. OPENSSL_EXPORT int EVP_MD_CTX_reset(EVP_MD_CTX *ctx); // Digest operations. // EVP_DigestInit_ex configures |ctx|, which must already have been // initialised, for a fresh hashing operation using |type|. It returns one on // success and zero on allocation failure. OPENSSL_EXPORT int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *engine); // EVP_DigestInit acts like |EVP_DigestInit_ex| except that |ctx| is // initialised before use. OPENSSL_EXPORT int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type); // EVP_DigestUpdate hashes |len| bytes from |data| into the hashing operation // in |ctx|. It returns one. OPENSSL_EXPORT int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); // EVP_MAX_MD_SIZE is the largest digest size supported, in bytes. // Functions that output a digest generally require the buffer have // at least this much space. #define EVP_MAX_MD_SIZE 64 // SHA-512 is the longest so far. // EVP_MAX_MD_BLOCK_SIZE is the largest digest block size supported, in // bytes. #define EVP_MAX_MD_BLOCK_SIZE 128 // SHA-512 is the longest so far. // EVP_DigestFinal_ex finishes the digest in |ctx| and writes the output to // |md_out|. |EVP_MD_CTX_size| bytes are written, which is at most // |EVP_MAX_MD_SIZE|. If |out_size| is not NULL then |*out_size| is set to the // number of bytes written. It returns one. After this call, the hash cannot be // updated or finished again until |EVP_DigestInit_ex| is called to start // another hashing operation. OPENSSL_EXPORT int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *out_size); // EVP_DigestFinal acts like |EVP_DigestFinal_ex| except that // |EVP_MD_CTX_cleanup| is called on |ctx| before returning. OPENSSL_EXPORT int EVP_DigestFinal(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *out_size); // EVP_Digest performs a complete hashing operation in one call. It hashes |len| // bytes from |data| and writes the digest to |md_out|. |EVP_MD_CTX_size| bytes // are written, which is at most |EVP_MAX_MD_SIZE|. If |out_size| is not NULL // then |*out_size| is set to the number of bytes written. It returns one on // success and zero otherwise. OPENSSL_EXPORT int EVP_Digest(const void *data, size_t len, uint8_t *md_out, unsigned int *md_out_size, const EVP_MD *type, ENGINE *impl); // Digest function accessors. // // These functions allow code to learn details about an abstract hash // function. // EVP_MD_type returns a NID identifying |md|. (For example, |NID_sha256|.) OPENSSL_EXPORT int EVP_MD_type(const EVP_MD *md); // EVP_MD_flags returns the flags for |md|, which is a set of |EVP_MD_FLAG_*| // values, ORed together. OPENSSL_EXPORT uint32_t EVP_MD_flags(const EVP_MD *md); // EVP_MD_size returns the digest size of |md|, in bytes. OPENSSL_EXPORT size_t EVP_MD_size(const EVP_MD *md); // EVP_MD_block_size returns the native block-size of |md|, in bytes. OPENSSL_EXPORT size_t EVP_MD_block_size(const EVP_MD *md); // EVP_MD_FLAG_PKEY_DIGEST indicates that the digest function is used with a // specific public key in order to verify signatures. (For example, // EVP_dss1.) #define EVP_MD_FLAG_PKEY_DIGEST 1 // EVP_MD_FLAG_DIGALGID_ABSENT indicates that the parameter type in an X.509 // DigestAlgorithmIdentifier representing this digest function should be // undefined rather than NULL. #define EVP_MD_FLAG_DIGALGID_ABSENT 2 // EVP_MD_FLAG_XOF indicates that the digest is an extensible-output function // (XOF). This flag is defined for compatibility and will never be set in any // |EVP_MD| in BoringSSL. #define EVP_MD_FLAG_XOF 4 // Digest operation accessors. // EVP_MD_CTX_get0_md returns the underlying digest function, or NULL if one has // not been set. OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_get0_md(const EVP_MD_CTX *ctx); // EVP_MD_CTX_md returns the underlying digest function, or NULL if one has not // been set. (This is the same as |EVP_MD_CTX_get0_md| but OpenSSL has // deprecated this spelling.) OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx); // EVP_MD_CTX_size returns the digest size of |ctx|, in bytes. It // will crash if a digest hasn't been set on |ctx|. OPENSSL_EXPORT size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx); // EVP_MD_CTX_block_size returns the block size of the digest function used by // |ctx|, in bytes. It will crash if a digest hasn't been set on |ctx|. OPENSSL_EXPORT size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx); // EVP_MD_CTX_type returns a NID describing the digest function used by |ctx|. // (For example, |NID_sha256|.) It will crash if a digest hasn't been set on // |ctx|. OPENSSL_EXPORT int EVP_MD_CTX_type(const EVP_MD_CTX *ctx); // ASN.1 functions. // // These functions allow code to parse and serialize AlgorithmIdentifiers for // hash functions. // EVP_parse_digest_algorithm parses an AlgorithmIdentifier structure containing // a hash function OID (for example, 2.16.840.1.101.3.4.2.1 is SHA-256) and // advances |cbs|. The parameters field may either be omitted or a NULL. It // returns the digest function or NULL on error. OPENSSL_EXPORT const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs); // EVP_marshal_digest_algorithm marshals |md| as an AlgorithmIdentifier // structure and appends the result to |cbb|. It returns one on success and zero // on error. OPENSSL_EXPORT int EVP_marshal_digest_algorithm(CBB *cbb, const EVP_MD *md); // Deprecated functions. // EVP_MD_CTX_copy sets |out|, which must /not/ be initialised, to be a copy of // |in|. It returns one on success and zero on error. OPENSSL_EXPORT int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in); // EVP_add_digest does nothing and returns one. It exists only for // compatibility with OpenSSL. OPENSSL_EXPORT int EVP_add_digest(const EVP_MD *digest); // EVP_get_digestbyname returns an |EVP_MD| given a human readable name in // |name|, or NULL if the name is unknown. OPENSSL_EXPORT const EVP_MD *EVP_get_digestbyname(const char *); // EVP_dss1 returns the value of EVP_sha1(). This was provided by OpenSSL to // specifiy the original DSA signatures, which were fixed to use SHA-1. Note, // however, that attempting to sign or verify DSA signatures with the EVP // interface will always fail. OPENSSL_EXPORT const EVP_MD *EVP_dss1(void); // EVP_MD_CTX_create calls |EVP_MD_CTX_new|. OPENSSL_EXPORT EVP_MD_CTX *EVP_MD_CTX_create(void); // EVP_MD_CTX_destroy calls |EVP_MD_CTX_free|. OPENSSL_EXPORT void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx); // EVP_DigestFinalXOF returns zero and adds an error to the error queue. // BoringSSL does not support any XOF digests. OPENSSL_EXPORT int EVP_DigestFinalXOF(EVP_MD_CTX *ctx, uint8_t *out, size_t len); // EVP_MD_meth_get_flags calls |EVP_MD_flags|. OPENSSL_EXPORT uint32_t EVP_MD_meth_get_flags(const EVP_MD *md); // EVP_MD_CTX_set_flags does nothing. OPENSSL_EXPORT void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags); // EVP_MD_CTX_FLAG_NON_FIPS_ALLOW is meaningless. In OpenSSL it permits non-FIPS // algorithms in FIPS mode. But BoringSSL FIPS mode doesn't prohibit algorithms // (it's up the the caller to use the FIPS module in a fashion compliant with // their needs). Thus this exists only to allow code to compile. #define EVP_MD_CTX_FLAG_NON_FIPS_ALLOW 0 // EVP_MD_nid calls |EVP_MD_type|. OPENSSL_EXPORT int EVP_MD_nid(const EVP_MD *md); struct evp_md_pctx_ops; struct env_md_ctx_st { // digest is the underlying digest function, or NULL if not set. const EVP_MD *digest; // md_data points to a block of memory that contains the hash-specific // context. void *md_data; // pctx is an opaque (at this layer) pointer to additional context that // EVP_PKEY functions may store in this object. EVP_PKEY_CTX *pctx; // pctx_ops, if not NULL, points to a vtable that contains functions to // manipulate |pctx|. const struct evp_md_pctx_ops *pctx_ops; } /* EVP_MD_CTX */; #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(EVP_MD_CTX, EVP_MD_CTX_free) using ScopedEVP_MD_CTX = internal::StackAllocatedMovable; BSSL_NAMESPACE_END } // extern C++ #endif #endif #define DIGEST_R_INPUT_NOT_INITIALIZED 100 #define DIGEST_R_DECODE_ERROR 101 #define DIGEST_R_UNKNOWN_HASH 102 #endif // OPENSSL_HEADER_DIGEST_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_dsa.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_DSA_H #define OPENSSL_HEADER_DSA_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_ex_data.h" #if defined(__cplusplus) extern "C" { #endif // DSA contains functions for signing and verifying with the Digital Signature // Algorithm. // // This module is deprecated and retained for legacy reasons only. It is not // considered a priority for performance or hardening work. Do not use it in // new code. Use Ed25519, ECDSA with P-256, or RSA instead. // Allocation and destruction. // // A |DSA| object represents a DSA key or group parameters. A given object may // be used concurrently on multiple threads by non-mutating functions, provided // no other thread is concurrently calling a mutating function. Unless otherwise // documented, functions which take a |const| pointer are non-mutating and // functions which take a non-|const| pointer are mutating. // DSA_new returns a new, empty DSA object or NULL on error. OPENSSL_EXPORT DSA *DSA_new(void); // DSA_free decrements the reference count of |dsa| and frees it if the // reference count drops to zero. OPENSSL_EXPORT void DSA_free(DSA *dsa); // DSA_up_ref increments the reference count of |dsa| and returns one. It does // not mutate |dsa| for thread-safety purposes and may be used concurrently. OPENSSL_EXPORT int DSA_up_ref(DSA *dsa); // Properties. // OPENSSL_DSA_MAX_MODULUS_BITS is the maximum supported DSA group modulus, in // bits. #define OPENSSL_DSA_MAX_MODULUS_BITS 10000 // DSA_bits returns the size of |dsa|'s group modulus, in bits. OPENSSL_EXPORT unsigned DSA_bits(const DSA *dsa); // DSA_get0_pub_key returns |dsa|'s public key. OPENSSL_EXPORT const BIGNUM *DSA_get0_pub_key(const DSA *dsa); // DSA_get0_priv_key returns |dsa|'s private key, or NULL if |dsa| is a public // key. OPENSSL_EXPORT const BIGNUM *DSA_get0_priv_key(const DSA *dsa); // DSA_get0_p returns |dsa|'s group modulus. OPENSSL_EXPORT const BIGNUM *DSA_get0_p(const DSA *dsa); // DSA_get0_q returns the size of |dsa|'s subgroup. OPENSSL_EXPORT const BIGNUM *DSA_get0_q(const DSA *dsa); // DSA_get0_g returns |dsa|'s group generator. OPENSSL_EXPORT const BIGNUM *DSA_get0_g(const DSA *dsa); // DSA_get0_key sets |*out_pub_key| and |*out_priv_key|, if non-NULL, to |dsa|'s // public and private key, respectively. If |dsa| is a public key, the private // key will be set to NULL. OPENSSL_EXPORT void DSA_get0_key(const DSA *dsa, const BIGNUM **out_pub_key, const BIGNUM **out_priv_key); // DSA_get0_pqg sets |*out_p|, |*out_q|, and |*out_g|, if non-NULL, to |dsa|'s // p, q, and g parameters, respectively. OPENSSL_EXPORT void DSA_get0_pqg(const DSA *dsa, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g); // DSA_set0_key sets |dsa|'s public and private key to |pub_key| and |priv_key|, // respectively, if non-NULL. On success, it takes ownership of each argument // and returns one. Otherwise, it returns zero. // // |priv_key| may be NULL, but |pub_key| must either be non-NULL or already // configured on |dsa|. OPENSSL_EXPORT int DSA_set0_key(DSA *dsa, BIGNUM *pub_key, BIGNUM *priv_key); // DSA_set0_pqg sets |dsa|'s parameters to |p|, |q|, and |g|, if non-NULL, and // takes ownership of them. On success, it takes ownership of each argument and // returns one. Otherwise, it returns zero. // // Each argument must either be non-NULL or already configured on |dsa|. OPENSSL_EXPORT int DSA_set0_pqg(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g); // Parameter generation. // DSA_generate_parameters_ex generates a set of DSA parameters by following // the procedure given in FIPS 186-4, appendix A. // (http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf) // // The larger prime will have a length of |bits| (e.g. 2048). The |seed| value // allows others to generate and verify the same parameters and should be // random input which is kept for reference. If |out_counter| or |out_h| are // not NULL then the counter and h value used in the generation are written to // them. // // The |cb| argument is passed to |BN_generate_prime_ex| and is thus called // during the generation process in order to indicate progress. See the // comments for that function for details. In addition to the calls made by // |BN_generate_prime_ex|, |DSA_generate_parameters_ex| will call it with // |event| equal to 2 and 3 at different stages of the process. // // It returns one on success and zero otherwise. OPENSSL_EXPORT int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed, size_t seed_len, int *out_counter, unsigned long *out_h, BN_GENCB *cb); // DSAparams_dup returns a freshly allocated |DSA| that contains a copy of the // parameters from |dsa|. It returns NULL on error. OPENSSL_EXPORT DSA *DSAparams_dup(const DSA *dsa); // Key generation. // DSA_generate_key generates a public/private key pair in |dsa|, which must // already have parameters setup. It returns one on success and zero on // error. OPENSSL_EXPORT int DSA_generate_key(DSA *dsa); // Signatures. // DSA_SIG_st (aka |DSA_SIG|) contains a DSA signature as a pair of integers. struct DSA_SIG_st { BIGNUM *r, *s; }; // DSA_SIG_new returns a freshly allocated, DIG_SIG structure or NULL on error. // Both |r| and |s| in the signature will be NULL. OPENSSL_EXPORT DSA_SIG *DSA_SIG_new(void); // DSA_SIG_free frees the contents of |sig| and then frees |sig| itself. OPENSSL_EXPORT void DSA_SIG_free(DSA_SIG *sig); // DSA_SIG_get0 sets |*out_r| and |*out_s|, if non-NULL, to the two components // of |sig|. OPENSSL_EXPORT void DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **out_r, const BIGNUM **out_s); // DSA_SIG_set0 sets |sig|'s components to |r| and |s|, neither of which may be // NULL. On success, it takes ownership of each argument and returns one. // Otherwise, it returns zero. OPENSSL_EXPORT int DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s); // DSA_do_sign returns a signature of the hash in |digest| by the key in |dsa| // and returns an allocated, DSA_SIG structure, or NULL on error. OPENSSL_EXPORT DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, const DSA *dsa); // DSA_do_verify verifies that |sig| is a valid signature, by the public key in // |dsa|, of the hash in |digest|. It returns one if so, zero if invalid and -1 // on error. // // WARNING: do not use. This function returns -1 for error, 0 for invalid and 1 // for valid. However, this is dangerously different to the usual OpenSSL // convention and could be a disaster if a user did |if (DSA_do_verify(...))|. // Because of this, |DSA_check_signature| is a safer version of this. // // TODO(fork): deprecate. OPENSSL_EXPORT int DSA_do_verify(const uint8_t *digest, size_t digest_len, const DSA_SIG *sig, const DSA *dsa); // DSA_do_check_signature sets |*out_valid| to zero. Then it verifies that |sig| // is a valid signature, by the public key in |dsa| of the hash in |digest| // and, if so, it sets |*out_valid| to one. // // It returns one if it was able to verify the signature as valid or invalid, // and zero on error. OPENSSL_EXPORT int DSA_do_check_signature(int *out_valid, const uint8_t *digest, size_t digest_len, const DSA_SIG *sig, const DSA *dsa); // ASN.1 signatures. // // These functions also perform DSA signature operations, but deal with ASN.1 // encoded signatures as opposed to raw |BIGNUM|s. If you don't know what // encoding a DSA signature is in, it's probably ASN.1. // DSA_sign signs |digest| with the key in |dsa| and writes the resulting // signature, in ASN.1 form, to |out_sig| and the length of the signature to // |*out_siglen|. There must be, at least, |DSA_size(dsa)| bytes of space in // |out_sig|. It returns one on success and zero otherwise. // // (The |type| argument is ignored.) OPENSSL_EXPORT int DSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *out_sig, unsigned int *out_siglen, const DSA *dsa); // DSA_verify verifies that |sig| is a valid, ASN.1 signature, by the public // key in |dsa|, of the hash in |digest|. It returns one if so, zero if invalid // and -1 on error. // // (The |type| argument is ignored.) // // WARNING: do not use. This function returns -1 for error, 0 for invalid and 1 // for valid. However, this is dangerously different to the usual OpenSSL // convention and could be a disaster if a user did |if (DSA_do_verify(...))|. // Because of this, |DSA_check_signature| is a safer version of this. // // TODO(fork): deprecate. OPENSSL_EXPORT int DSA_verify(int type, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const DSA *dsa); // DSA_check_signature sets |*out_valid| to zero. Then it verifies that |sig| // is a valid, ASN.1 signature, by the public key in |dsa|, of the hash in // |digest|. If so, it sets |*out_valid| to one. // // It returns one if it was able to verify the signature as valid or invalid, // and zero on error. OPENSSL_EXPORT int DSA_check_signature(int *out_valid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const DSA *dsa); // DSA_size returns the size, in bytes, of an ASN.1 encoded, DSA signature // generated by |dsa|. Parameters must already have been setup in |dsa|. OPENSSL_EXPORT int DSA_size(const DSA *dsa); // ASN.1 encoding. // DSA_SIG_parse parses a DER-encoded DSA-Sig-Value structure from |cbs| and // advances |cbs|. It returns a newly-allocated |DSA_SIG| or NULL on error. OPENSSL_EXPORT DSA_SIG *DSA_SIG_parse(CBS *cbs); // DSA_SIG_marshal marshals |sig| as a DER-encoded DSA-Sig-Value and appends the // result to |cbb|. It returns one on success and zero on error. OPENSSL_EXPORT int DSA_SIG_marshal(CBB *cbb, const DSA_SIG *sig); // DSA_parse_public_key parses a DER-encoded DSA public key from |cbs| and // advances |cbs|. It returns a newly-allocated |DSA| or NULL on error. OPENSSL_EXPORT DSA *DSA_parse_public_key(CBS *cbs); // DSA_marshal_public_key marshals |dsa| as a DER-encoded DSA public key and // appends the result to |cbb|. It returns one on success and zero on // failure. OPENSSL_EXPORT int DSA_marshal_public_key(CBB *cbb, const DSA *dsa); // DSA_parse_private_key parses a DER-encoded DSA private key from |cbs| and // advances |cbs|. It returns a newly-allocated |DSA| or NULL on error. OPENSSL_EXPORT DSA *DSA_parse_private_key(CBS *cbs); // DSA_marshal_private_key marshals |dsa| as a DER-encoded DSA private key and // appends the result to |cbb|. It returns one on success and zero on // failure. OPENSSL_EXPORT int DSA_marshal_private_key(CBB *cbb, const DSA *dsa); // DSA_parse_parameters parses a DER-encoded Dss-Parms structure (RFC 3279) // from |cbs| and advances |cbs|. It returns a newly-allocated |DSA| or NULL on // error. OPENSSL_EXPORT DSA *DSA_parse_parameters(CBS *cbs); // DSA_marshal_parameters marshals |dsa| as a DER-encoded Dss-Parms structure // (RFC 3279) and appends the result to |cbb|. It returns one on success and // zero on failure. OPENSSL_EXPORT int DSA_marshal_parameters(CBB *cbb, const DSA *dsa); // Conversion. // DSA_dup_DH returns a |DH| constructed from the parameters of |dsa|. This is // sometimes needed when Diffie-Hellman parameters are stored in the form of // DSA parameters. It returns an allocated |DH| on success or NULL on error. OPENSSL_EXPORT DH *DSA_dup_DH(const DSA *dsa); // ex_data functions. // // See |ex_data.h| for details. OPENSSL_EXPORT int DSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int DSA_set_ex_data(DSA *dsa, int idx, void *arg); OPENSSL_EXPORT void *DSA_get_ex_data(const DSA *dsa, int idx); // Deprecated functions. // d2i_DSA_SIG parses a DER-encoded DSA-Sig-Value structure from |len| bytes at // |*inp|, as described in |d2i_SAMPLE|. // // Use |DSA_SIG_parse| instead. OPENSSL_EXPORT DSA_SIG *d2i_DSA_SIG(DSA_SIG **out_sig, const uint8_t **inp, long len); // i2d_DSA_SIG marshals |in| to a DER-encoded DSA-Sig-Value structure, as // described in |i2d_SAMPLE|. // // Use |DSA_SIG_marshal| instead. OPENSSL_EXPORT int i2d_DSA_SIG(const DSA_SIG *in, uint8_t **outp); // d2i_DSAPublicKey parses a DER-encoded DSA public key from |len| bytes at // |*inp|, as described in |d2i_SAMPLE|. // // Use |DSA_parse_public_key| instead. OPENSSL_EXPORT DSA *d2i_DSAPublicKey(DSA **out, const uint8_t **inp, long len); // i2d_DSAPublicKey marshals |in| as a DER-encoded DSA public key, as described // in |i2d_SAMPLE|. // // Use |DSA_marshal_public_key| instead. OPENSSL_EXPORT int i2d_DSAPublicKey(const DSA *in, uint8_t **outp); // d2i_DSAPrivateKey parses a DER-encoded DSA private key from |len| bytes at // |*inp|, as described in |d2i_SAMPLE|. // // Use |DSA_parse_private_key| instead. OPENSSL_EXPORT DSA *d2i_DSAPrivateKey(DSA **out, const uint8_t **inp, long len); // i2d_DSAPrivateKey marshals |in| as a DER-encoded DSA private key, as // described in |i2d_SAMPLE|. // // Use |DSA_marshal_private_key| instead. OPENSSL_EXPORT int i2d_DSAPrivateKey(const DSA *in, uint8_t **outp); // d2i_DSAparams parses a DER-encoded Dss-Parms structure (RFC 3279) from |len| // bytes at |*inp|, as described in |d2i_SAMPLE|. // // Use |DSA_parse_parameters| instead. OPENSSL_EXPORT DSA *d2i_DSAparams(DSA **out, const uint8_t **inp, long len); // i2d_DSAparams marshals |in|'s parameters as a DER-encoded Dss-Parms structure // (RFC 3279), as described in |i2d_SAMPLE|. // // Use |DSA_marshal_parameters| instead. OPENSSL_EXPORT int i2d_DSAparams(const DSA *in, uint8_t **outp); // DSA_generate_parameters is a deprecated version of // |DSA_generate_parameters_ex| that creates and returns a |DSA*|. Don't use // it. OPENSSL_EXPORT DSA *DSA_generate_parameters(int bits, unsigned char *seed, int seed_len, int *counter_ret, unsigned long *h_ret, void (*callback)(int, int, void *), void *cb_arg); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(DSA, DSA_free) BORINGSSL_MAKE_UP_REF(DSA, DSA_up_ref) BORINGSSL_MAKE_DELETER(DSA_SIG, DSA_SIG_free) BSSL_NAMESPACE_END } // extern C++ #endif #define DSA_R_BAD_Q_VALUE 100 #define DSA_R_MISSING_PARAMETERS 101 #define DSA_R_MODULUS_TOO_LARGE 102 #define DSA_R_NEED_NEW_SETUP_VALUES 103 #define DSA_R_BAD_VERSION 104 #define DSA_R_DECODE_ERROR 105 #define DSA_R_ENCODE_ERROR 106 #define DSA_R_INVALID_PARAMETERS 107 #define DSA_R_TOO_MANY_ITERATIONS 108 #endif // OPENSSL_HEADER_DSA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_dtls1.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_e_os2.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_base.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ec.h ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EC_H #define OPENSSL_HEADER_EC_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Low-level operations on elliptic curves. // point_conversion_form_t enumerates forms, as defined in X9.62 (ECDSA), for // the encoding of a elliptic curve point (x,y) typedef enum { // POINT_CONVERSION_COMPRESSED indicates that the point is encoded as z||x, // where the octet z specifies which solution of the quadratic equation y // is. POINT_CONVERSION_COMPRESSED = 2, // POINT_CONVERSION_UNCOMPRESSED indicates that the point is encoded as // z||x||y, where z is the octet 0x04. POINT_CONVERSION_UNCOMPRESSED = 4, // POINT_CONVERSION_HYBRID indicates that the point is encoded as z||x||y, // where z specifies which solution of the quadratic equation y is. This is // not supported by the code and has never been observed in use. // // TODO(agl): remove once node.js no longer references this. POINT_CONVERSION_HYBRID = 6, } point_conversion_form_t; // Elliptic curve groups. // // Elliptic curve groups are represented by |EC_GROUP| objects. Unlike OpenSSL, // if limited to the APIs in this section, callers may treat |EC_GROUP|s as // static, immutable objects which do not need to be copied or released. In // BoringSSL, only custom |EC_GROUP|s created by |EC_GROUP_new_curve_GFp| // (deprecated) are dynamic. // // Callers may cast away |const| and use |EC_GROUP_dup| and |EC_GROUP_free| with // static groups, for compatibility with OpenSSL or dynamic groups, but it is // otherwise unnecessary. // EC_group_p224 returns an |EC_GROUP| for P-224, also known as secp224r1. OPENSSL_EXPORT const EC_GROUP *EC_group_p224(void); // EC_group_p256 returns an |EC_GROUP| for P-256, also known as secp256r1 or // prime256v1. OPENSSL_EXPORT const EC_GROUP *EC_group_p256(void); // EC_group_p384 returns an |EC_GROUP| for P-384, also known as secp384r1. OPENSSL_EXPORT const EC_GROUP *EC_group_p384(void); // EC_group_p521 returns an |EC_GROUP| for P-521, also known as secp521r1. OPENSSL_EXPORT const EC_GROUP *EC_group_p521(void); // EC_GROUP_new_by_curve_name returns the |EC_GROUP| object for the elliptic // curve specified by |nid|, or NULL on unsupported NID. For OpenSSL // compatibility, this function returns a non-const pointer which may be passed // to |EC_GROUP_free|. However, the resulting object is actually static and // calling |EC_GROUP_free| is optional. // // The supported NIDs are: // - |NID_secp224r1| (P-224) // - |NID_X9_62_prime256v1| (P-256) // - |NID_secp384r1| (P-384) // - |NID_secp521r1| (P-521) // // Calling this function causes all four curves to be linked into the binary. // Prefer calling |EC_group_*| to allow the static linker to drop unused curves. // // If in doubt, use |NID_X9_62_prime256v1|, or see the curve25519.h header for // more modern primitives. OPENSSL_EXPORT EC_GROUP *EC_GROUP_new_by_curve_name(int nid); // EC_GROUP_cmp returns zero if |a| and |b| are the same group and non-zero // otherwise. OPENSSL_EXPORT int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ignored); // EC_GROUP_get0_generator returns a pointer to the internal |EC_POINT| object // in |group| that specifies the generator for the group. OPENSSL_EXPORT const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group); // EC_GROUP_get0_order returns a pointer to the internal |BIGNUM| object in // |group| that specifies the order of the group. OPENSSL_EXPORT const BIGNUM *EC_GROUP_get0_order(const EC_GROUP *group); // EC_GROUP_order_bits returns the number of bits of the order of |group|. OPENSSL_EXPORT int EC_GROUP_order_bits(const EC_GROUP *group); // EC_GROUP_get_cofactor sets |*cofactor| to the cofactor of |group| using // |ctx|, if it's not NULL. It returns one on success and zero otherwise. OPENSSL_EXPORT int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx); // EC_GROUP_get_curve_GFp gets various parameters about a group. It sets // |*out_p| to the order of the coordinate field and |*out_a| and |*out_b| to // the parameters of the curve when expressed as y² = x³ + ax + b. Any of the // output parameters can be NULL. It returns one on success and zero on // error. OPENSSL_EXPORT int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *out_p, BIGNUM *out_a, BIGNUM *out_b, BN_CTX *ctx); // EC_GROUP_get_curve_name returns a NID that identifies |group|. OPENSSL_EXPORT int EC_GROUP_get_curve_name(const EC_GROUP *group); // EC_GROUP_get_degree returns the number of bits needed to represent an // element of the field underlying |group|. OPENSSL_EXPORT unsigned EC_GROUP_get_degree(const EC_GROUP *group); // EC_curve_nid2nist returns the NIST name of the elliptic curve specified by // |nid|, or NULL if |nid| is not a NIST curve. For example, it returns "P-256" // for |NID_X9_62_prime256v1|. OPENSSL_EXPORT const char *EC_curve_nid2nist(int nid); // EC_curve_nist2nid returns the NID of the elliptic curve specified by the NIST // name |name|, or |NID_undef| if |name| is not a recognized name. For example, // it returns |NID_X9_62_prime256v1| for "P-256". OPENSSL_EXPORT int EC_curve_nist2nid(const char *name); // Points on elliptic curves. // EC_POINT_new returns a fresh |EC_POINT| object in the given group, or NULL // on error. OPENSSL_EXPORT EC_POINT *EC_POINT_new(const EC_GROUP *group); // EC_POINT_free frees |point| and the data that it points to. OPENSSL_EXPORT void EC_POINT_free(EC_POINT *point); // EC_POINT_copy sets |*dest| equal to |*src|. It returns one on success and // zero otherwise. OPENSSL_EXPORT int EC_POINT_copy(EC_POINT *dest, const EC_POINT *src); // EC_POINT_dup returns a fresh |EC_POINT| that contains the same values as // |src|, or NULL on error. OPENSSL_EXPORT EC_POINT *EC_POINT_dup(const EC_POINT *src, const EC_GROUP *group); // EC_POINT_set_to_infinity sets |point| to be the "point at infinity" for the // given group. OPENSSL_EXPORT int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point); // EC_POINT_is_at_infinity returns one iff |point| is the point at infinity and // zero otherwise. OPENSSL_EXPORT int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *point); // EC_POINT_is_on_curve returns one if |point| is an element of |group| and // and zero otherwise or when an error occurs. This is different from OpenSSL, // which returns -1 on error. If |ctx| is non-NULL, it may be used. OPENSSL_EXPORT int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, BN_CTX *ctx); // EC_POINT_cmp returns zero if |a| is equal to |b|, greater than zero if // not equal and -1 on error. If |ctx| is not NULL, it may be used. OPENSSL_EXPORT int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx); // Point conversion. // EC_POINT_get_affine_coordinates_GFp sets |x| and |y| to the affine value of // |point| using |ctx|, if it's not NULL. It returns one on success and zero // otherwise. // // Either |x| or |y| may be NULL to skip computing that coordinate. This is // slightly faster in the common case where only the x-coordinate is needed. OPENSSL_EXPORT int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx); // EC_POINT_get_affine_coordinates is an alias of // |EC_POINT_get_affine_coordinates_GFp|. OPENSSL_EXPORT int EC_POINT_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx); // EC_POINT_set_affine_coordinates_GFp sets the value of |point| to be // (|x|, |y|). The |ctx| argument may be used if not NULL. It returns one // on success or zero on error. It's considered an error if the point is not on // the curve. // // Note that the corresponding function in OpenSSL versions prior to 1.0.2s does // not check if the point is on the curve. This is a security-critical check, so // code additionally supporting OpenSSL should repeat the check with // |EC_POINT_is_on_curve| or check for older OpenSSL versions with // |OPENSSL_VERSION_NUMBER|. OPENSSL_EXPORT int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx); // EC_POINT_set_affine_coordinates is an alias of // |EC_POINT_set_affine_coordinates_GFp|. OPENSSL_EXPORT int EC_POINT_set_affine_coordinates(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx); // EC_POINT_point2oct serialises |point| into the X9.62 form given by |form| // into, at most, |max_out| bytes at |buf|. It returns the number of bytes // written or zero on error if |buf| is non-NULL, else the number of bytes // needed. The |ctx| argument may be used if not NULL. OPENSSL_EXPORT size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, uint8_t *buf, size_t max_out, BN_CTX *ctx); // EC_POINT_point2buf serialises |point| into the X9.62 form given by |form| to // a newly-allocated buffer and sets |*out_buf| to point to it. It returns the // length of the result on success or zero on error. The caller must release // |*out_buf| with |OPENSSL_free| when done. OPENSSL_EXPORT size_t EC_POINT_point2buf(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, uint8_t **out_buf, BN_CTX *ctx); // EC_POINT_point2cbb behaves like |EC_POINT_point2oct| but appends the // serialised point to |cbb|. It returns one on success and zero on error. OPENSSL_EXPORT int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, BN_CTX *ctx); // EC_POINT_oct2point sets |point| from |len| bytes of X9.62 format // serialisation in |buf|. It returns one on success and zero on error. The // |ctx| argument may be used if not NULL. It's considered an error if |buf| // does not represent a point on the curve. OPENSSL_EXPORT int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *point, const uint8_t *buf, size_t len, BN_CTX *ctx); // EC_POINT_set_compressed_coordinates_GFp sets |point| to equal the point with // the given |x| coordinate and the y coordinate specified by |y_bit| (see // X9.62). It returns one on success and zero otherwise. OPENSSL_EXPORT int EC_POINT_set_compressed_coordinates_GFp( const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, int y_bit, BN_CTX *ctx); // Group operations. // EC_POINT_add sets |r| equal to |a| plus |b|. It returns one on success and // zero otherwise. If |ctx| is not NULL, it may be used. OPENSSL_EXPORT int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx); // EC_POINT_dbl sets |r| equal to |a| plus |a|. It returns one on success and // zero otherwise. If |ctx| is not NULL, it may be used. OPENSSL_EXPORT int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, BN_CTX *ctx); // EC_POINT_invert sets |a| equal to minus |a|. It returns one on success and // zero otherwise. If |ctx| is not NULL, it may be used. OPENSSL_EXPORT int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx); // EC_POINT_mul sets r = generator*n + q*m. It returns one on success and zero // otherwise. If |ctx| is not NULL, it may be used. OPENSSL_EXPORT int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, const EC_POINT *q, const BIGNUM *m, BN_CTX *ctx); // Hash-to-curve. // // The following functions implement primitives from RFC 9380. The |dst| // parameter in each function is the domain separation tag and must be unique // for each protocol and between the |hash_to_curve| and |hash_to_scalar| // variants. See section 3.1 of the spec for additional guidance on this // parameter. // EC_hash_to_curve_p256_xmd_sha256_sswu hashes |msg| to a point on |group| and // writes the result to |out|, implementing the P256_XMD:SHA-256_SSWU_RO_ suite // from RFC 9380. It returns one on success and zero on error. OPENSSL_EXPORT int EC_hash_to_curve_p256_xmd_sha256_sswu( const EC_GROUP *group, EC_POINT *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // EC_hash_to_curve_p384_xmd_sha384_sswu hashes |msg| to a point on |group| and // writes the result to |out|, implementing the P384_XMD:SHA-384_SSWU_RO_ suite // from RFC 9380. It returns one on success and zero on error. OPENSSL_EXPORT int EC_hash_to_curve_p384_xmd_sha384_sswu( const EC_GROUP *group, EC_POINT *out, const uint8_t *dst, size_t dst_len, const uint8_t *msg, size_t msg_len); // Deprecated functions. // EC_GROUP_free releases a reference to |group|, if |group| was created by // |EC_GROUP_new_curve_GFp|. If |group| is static, it does nothing. // // This function exists for OpenSSL compatibilty, and to manage dynamic // |EC_GROUP|s constructed by |EC_GROUP_new_curve_GFp|. Callers that do not need // either may ignore this function. OPENSSL_EXPORT void EC_GROUP_free(EC_GROUP *group); // EC_GROUP_dup increments |group|'s reference count and returns it, if |group| // was created by |EC_GROUP_new_curve_GFp|. If |group| is static, it simply // returns |group|. // // This function exists for OpenSSL compatibilty, and to manage dynamic // |EC_GROUP|s constructed by |EC_GROUP_new_curve_GFp|. Callers that do not need // either may ignore this function. OPENSSL_EXPORT EC_GROUP *EC_GROUP_dup(const EC_GROUP *group); // EC_GROUP_new_curve_GFp creates a new, arbitrary elliptic curve group based // on the equation y² = x³ + a·x + b. It returns the new group or NULL on // error. The lifetime of the resulting object must be managed with // |EC_GROUP_dup| and |EC_GROUP_free|. // // This new group has no generator. It is an error to use a generator-less group // with any functions except for |EC_GROUP_free|, |EC_POINT_new|, // |EC_POINT_set_affine_coordinates_GFp|, and |EC_GROUP_set_generator|. // // |EC_GROUP|s returned by this function will always compare as unequal via // |EC_GROUP_cmp| (even to themselves). |EC_GROUP_get_curve_name| will always // return |NID_undef|. // // This function is provided for compatibility with some legacy applications // only. Avoid using arbitrary curves and use |EC_GROUP_new_by_curve_name| // instead. This ensures the result meets preconditions necessary for // elliptic curve algorithms to function correctly and securely. // // Given invalid parameters, this function may fail or it may return an // |EC_GROUP| which breaks these preconditions. Subsequent operations may then // return arbitrary, incorrect values. Callers should not pass // attacker-controlled values to this function. OPENSSL_EXPORT EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); // EC_GROUP_set_generator sets the generator for |group| to |generator|, which // must have the given order and cofactor. It may only be used with |EC_GROUP| // objects returned by |EC_GROUP_new_curve_GFp| and may only be used once on // each group. |generator| must have been created using |group|. OPENSSL_EXPORT int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor); // EC_GROUP_get_order sets |*order| to the order of |group|, if it's not // NULL. It returns one on success and zero otherwise. |ctx| is ignored. Use // |EC_GROUP_get0_order| instead. OPENSSL_EXPORT int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx); #define OPENSSL_EC_EXPLICIT_CURVE 0 #define OPENSSL_EC_NAMED_CURVE 1 // EC_GROUP_set_asn1_flag does nothing. OPENSSL_EXPORT void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag); // EC_GROUP_get_asn1_flag returns |OPENSSL_EC_NAMED_CURVE|. OPENSSL_EXPORT int EC_GROUP_get_asn1_flag(const EC_GROUP *group); typedef struct ec_method_st EC_METHOD; // EC_GROUP_method_of returns a dummy non-NULL pointer. OPENSSL_EXPORT const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group); // EC_METHOD_get_field_type returns NID_X9_62_prime_field. OPENSSL_EXPORT int EC_METHOD_get_field_type(const EC_METHOD *meth); // EC_GROUP_set_point_conversion_form aborts the process if |form| is not // |POINT_CONVERSION_UNCOMPRESSED| and otherwise does nothing. OPENSSL_EXPORT void EC_GROUP_set_point_conversion_form( EC_GROUP *group, point_conversion_form_t form); // EC_builtin_curve describes a supported elliptic curve. typedef struct { int nid; const char *comment; } EC_builtin_curve; // EC_get_builtin_curves writes at most |max_num_curves| elements to // |out_curves| and returns the total number that it would have written, had // |max_num_curves| been large enough. // // The |EC_builtin_curve| items describe the supported elliptic curves. OPENSSL_EXPORT size_t EC_get_builtin_curves(EC_builtin_curve *out_curves, size_t max_num_curves); // EC_POINT_clear_free calls |EC_POINT_free|. OPENSSL_EXPORT void EC_POINT_clear_free(EC_POINT *point); #if defined(__cplusplus) } // extern C #endif // Old code expects to get EC_KEY from ec.h. #include "CNIOBoringSSL_ec_key.h" #if defined(__cplusplus) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(EC_POINT, EC_POINT_free) BORINGSSL_MAKE_DELETER(EC_GROUP, EC_GROUP_free) BSSL_NAMESPACE_END } // extern C++ #endif #define EC_R_BUFFER_TOO_SMALL 100 #define EC_R_COORDINATES_OUT_OF_RANGE 101 #define EC_R_D2I_ECPKPARAMETERS_FAILURE 102 #define EC_R_EC_GROUP_NEW_BY_NAME_FAILURE 103 #define EC_R_GROUP2PKPARAMETERS_FAILURE 104 #define EC_R_I2D_ECPKPARAMETERS_FAILURE 105 #define EC_R_INCOMPATIBLE_OBJECTS 106 #define EC_R_INVALID_COMPRESSED_POINT 107 #define EC_R_INVALID_COMPRESSION_BIT 108 #define EC_R_INVALID_ENCODING 109 #define EC_R_INVALID_FIELD 110 #define EC_R_INVALID_FORM 111 #define EC_R_INVALID_GROUP_ORDER 112 #define EC_R_INVALID_PRIVATE_KEY 113 #define EC_R_MISSING_PARAMETERS 114 #define EC_R_MISSING_PRIVATE_KEY 115 #define EC_R_NON_NAMED_CURVE 116 #define EC_R_NOT_INITIALIZED 117 #define EC_R_PKPARAMETERS2GROUP_FAILURE 118 #define EC_R_POINT_AT_INFINITY 119 #define EC_R_POINT_IS_NOT_ON_CURVE 120 #define EC_R_SLOT_FULL 121 #define EC_R_UNDEFINED_GENERATOR 122 #define EC_R_UNKNOWN_GROUP 123 #define EC_R_UNKNOWN_ORDER 124 #define EC_R_WRONG_ORDER 125 #define EC_R_BIGNUM_OUT_OF_RANGE 126 #define EC_R_WRONG_CURVE_PARAMETERS 127 #define EC_R_DECODE_ERROR 128 #define EC_R_ENCODE_ERROR 129 #define EC_R_GROUP_MISMATCH 130 #define EC_R_INVALID_COFACTOR 131 #define EC_R_PUBLIC_KEY_VALIDATION_FAILED 132 #define EC_R_INVALID_SCALAR 133 #endif // OPENSSL_HEADER_EC_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ec_key.h ================================================ /* * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EC_KEY_H #define OPENSSL_HEADER_EC_KEY_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_ec.h" #include "CNIOBoringSSL_engine.h" #include "CNIOBoringSSL_ex_data.h" #if defined(__cplusplus) extern "C" { #endif // ec_key.h contains functions that handle elliptic-curve points that are // public/private keys. // EC key objects. // // An |EC_KEY| object represents a public or private EC key. A given object may // be used concurrently on multiple threads by non-mutating functions, provided // no other thread is concurrently calling a mutating function. Unless otherwise // documented, functions which take a |const| pointer are non-mutating and // functions which take a non-|const| pointer are mutating. // EC_KEY_new returns a fresh |EC_KEY| object or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_new(void); // EC_KEY_new_method acts the same as |EC_KEY_new|, but takes an explicit // |ENGINE|. OPENSSL_EXPORT EC_KEY *EC_KEY_new_method(const ENGINE *engine); // EC_KEY_new_by_curve_name returns a fresh EC_KEY for group specified by |nid| // or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_new_by_curve_name(int nid); // EC_KEY_free frees all the data owned by |key| and |key| itself. OPENSSL_EXPORT void EC_KEY_free(EC_KEY *key); // EC_KEY_dup returns a fresh copy of |src| or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_dup(const EC_KEY *src); // EC_KEY_up_ref increases the reference count of |key| and returns one. It does // not mutate |key| for thread-safety purposes and may be used concurrently. OPENSSL_EXPORT int EC_KEY_up_ref(EC_KEY *key); // EC_KEY_is_opaque returns one if |key| is opaque and doesn't expose its key // material. Otherwise it return zero. OPENSSL_EXPORT int EC_KEY_is_opaque(const EC_KEY *key); // EC_KEY_get0_group returns a pointer to the |EC_GROUP| object inside |key|. OPENSSL_EXPORT const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key); // EC_KEY_set_group sets the |EC_GROUP| object that |key| will use to |group|. // It returns one on success and zero if |key| is already configured with a // different group. OPENSSL_EXPORT int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group); // EC_KEY_get0_private_key returns a pointer to the private key inside |key|. OPENSSL_EXPORT const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key); // EC_KEY_set_private_key sets the private key of |key| to |priv|. It returns // one on success and zero otherwise. |key| must already have had a group // configured (see |EC_KEY_set_group| and |EC_KEY_new_by_curve_name|). OPENSSL_EXPORT int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv); // EC_KEY_get0_public_key returns a pointer to the public key point inside // |key|. OPENSSL_EXPORT const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key); // EC_KEY_set_public_key sets the public key of |key| to |pub|, by copying it. // It returns one on success and zero otherwise. |key| must already have had a // group configured (see |EC_KEY_set_group| and |EC_KEY_new_by_curve_name|), and // |pub| must also belong to that group. OPENSSL_EXPORT int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub); #define EC_PKEY_NO_PARAMETERS 0x001 #define EC_PKEY_NO_PUBKEY 0x002 // EC_KEY_get_enc_flags returns the encoding flags for |key|, which is a // bitwise-OR of |EC_PKEY_*| values. OPENSSL_EXPORT unsigned EC_KEY_get_enc_flags(const EC_KEY *key); // EC_KEY_set_enc_flags sets the encoding flags for |key|, which is a // bitwise-OR of |EC_PKEY_*| values. OPENSSL_EXPORT void EC_KEY_set_enc_flags(EC_KEY *key, unsigned flags); // EC_KEY_get_conv_form returns the conversation form that will be used by // |key|. OPENSSL_EXPORT point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key); // EC_KEY_set_conv_form sets the conversion form to be used by |key|. OPENSSL_EXPORT void EC_KEY_set_conv_form(EC_KEY *key, point_conversion_form_t cform); // EC_KEY_check_key performs several checks on |key| (possibly including an // expensive check that the public key is in the primary subgroup). It returns // one if all checks pass and zero otherwise. If it returns zero then detail // about the problem can be found on the error stack. OPENSSL_EXPORT int EC_KEY_check_key(const EC_KEY *key); // EC_KEY_check_fips performs both a signing pairwise consistency test // (FIPS 140-2 4.9.2) and the consistency test from SP 800-56Ar3 section // 5.6.2.1.4. It returns one if it passes and zero otherwise. OPENSSL_EXPORT int EC_KEY_check_fips(const EC_KEY *key); // EC_KEY_set_public_key_affine_coordinates sets the public key in |key| to // (|x|, |y|). It returns one on success and zero on error. It's considered an // error if |x| and |y| do not represent a point on |key|'s curve. OPENSSL_EXPORT int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, const BIGNUM *x, const BIGNUM *y); // EC_KEY_oct2key decodes |len| bytes from |in| as an EC public key in X9.62 // form. |key| must already have a group configured. On success, it sets the // public key in |key| to the result and returns one. Otherwise, it returns // zero. OPENSSL_EXPORT int EC_KEY_oct2key(EC_KEY *key, const uint8_t *in, size_t len, BN_CTX *ctx); // EC_KEY_key2buf behaves like |EC_POINT_point2buf|, except it encodes the // public key in |key|. OPENSSL_EXPORT size_t EC_KEY_key2buf(const EC_KEY *key, point_conversion_form_t form, uint8_t **out_buf, BN_CTX *ctx); // EC_KEY_oct2priv decodes a big-endian, zero-padded integer from |len| bytes // from |in| and sets |key|'s private key to the result. It returns one on // success and zero on error. The input must be padded to the size of |key|'s // group order. OPENSSL_EXPORT int EC_KEY_oct2priv(EC_KEY *key, const uint8_t *in, size_t len); // EC_KEY_priv2oct serializes |key|'s private key as a big-endian integer, // zero-padded to the size of |key|'s group order and writes the result to at // most |max_out| bytes of |out|. It returns the number of bytes written on // success and zero on error. If |out| is NULL, it returns the number of bytes // needed without writing anything. OPENSSL_EXPORT size_t EC_KEY_priv2oct(const EC_KEY *key, uint8_t *out, size_t max_out); // EC_KEY_priv2buf behaves like |EC_KEY_priv2oct| but sets |*out_buf| to a // newly-allocated buffer containing the result. It returns the size of the // result on success and zero on error. The caller must release |*out_buf| with // |OPENSSL_free| when done. OPENSSL_EXPORT size_t EC_KEY_priv2buf(const EC_KEY *key, uint8_t **out_buf); // Key generation. // EC_KEY_generate_key generates a random, private key, calculates the // corresponding public key and stores both in |key|. It returns one on success // or zero otherwise. OPENSSL_EXPORT int EC_KEY_generate_key(EC_KEY *key); // EC_KEY_generate_key_fips behaves like |EC_KEY_generate_key| but performs // additional checks for FIPS compliance. This function is applicable when // generating keys for either signing/verification or key agreement because // both types of consistency check (PCT) are performed. OPENSSL_EXPORT int EC_KEY_generate_key_fips(EC_KEY *key); // EC_KEY_derive_from_secret deterministically derives a private key for |group| // from an input secret using HKDF-SHA256. It returns a newly-allocated |EC_KEY| // on success or NULL on error. |secret| must not be used in any other // algorithm. If using a base secret for multiple operations, derive separate // values with a KDF such as HKDF first. // // Note this function implements an arbitrary derivation scheme, rather than any // particular standard one. New protocols are recommended to use X25519 and // Ed25519, which have standard byte import functions. See // |X25519_public_from_private| and |ED25519_keypair_from_seed|. OPENSSL_EXPORT EC_KEY *EC_KEY_derive_from_secret(const EC_GROUP *group, const uint8_t *secret, size_t secret_len); // Serialisation. // EC_KEY_parse_private_key parses a DER-encoded ECPrivateKey structure (RFC // 5915) from |cbs| and advances |cbs|. It returns a newly-allocated |EC_KEY| or // NULL on error. If |group| is non-null, the parameters field of the // ECPrivateKey may be omitted (but must match |group| if present). Otherwise, // the parameters field is required. OPENSSL_EXPORT EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group); // EC_KEY_marshal_private_key marshals |key| as a DER-encoded ECPrivateKey // structure (RFC 5915) and appends the result to |cbb|. It returns one on // success and zero on failure. |enc_flags| is a combination of |EC_PKEY_*| // values and controls whether corresponding fields are omitted. OPENSSL_EXPORT int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, unsigned enc_flags); // EC_KEY_parse_curve_name parses a DER-encoded OBJECT IDENTIFIER as a curve // name from |cbs| and advances |cbs|. It returns the decoded |EC_GROUP| or NULL // on error. // // This function returns a non-const pointer which may be passed to // |EC_GROUP_free|. However, the resulting object is actually static and calling // |EC_GROUP_free| is optional. // // TODO(davidben): Make this return a const pointer, if it does not break too // many callers. OPENSSL_EXPORT EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs); // EC_KEY_marshal_curve_name marshals |group| as a DER-encoded OBJECT IDENTIFIER // and appends the result to |cbb|. It returns one on success and zero on // failure. OPENSSL_EXPORT int EC_KEY_marshal_curve_name(CBB *cbb, const EC_GROUP *group); // EC_KEY_parse_parameters parses a DER-encoded ECParameters structure (RFC // 5480) from |cbs| and advances |cbs|. It returns the resulting |EC_GROUP| or // NULL on error. It supports the namedCurve and specifiedCurve options, but use // of specifiedCurve is deprecated. Use |EC_KEY_parse_curve_name| instead. // // This function returns a non-const pointer which may be passed to // |EC_GROUP_free|. However, the resulting object is actually static and calling // |EC_GROUP_free| is optional. // // TODO(davidben): Make this return a const pointer, if it does not break too // many callers. OPENSSL_EXPORT EC_GROUP *EC_KEY_parse_parameters(CBS *cbs); // ex_data functions. // // These functions are wrappers. See |ex_data.h| for details. OPENSSL_EXPORT int EC_KEY_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int EC_KEY_set_ex_data(EC_KEY *r, int idx, void *arg); OPENSSL_EXPORT void *EC_KEY_get_ex_data(const EC_KEY *r, int idx); // ECDSA method. // ECDSA_FLAG_OPAQUE specifies that this ECDSA_METHOD does not expose its key // material. This may be set if, for instance, it is wrapping some other crypto // API, like a platform key store. #define ECDSA_FLAG_OPAQUE 1 // ecdsa_method_st is a structure of function pointers for implementing ECDSA. // See engine.h. struct ecdsa_method_st { struct openssl_method_common_st common; void *app_data; int (*init)(EC_KEY *key); int (*finish)(EC_KEY *key); // sign matches the arguments and behaviour of |ECDSA_sign|. int (*sign)(const uint8_t *digest, size_t digest_len, uint8_t *sig, unsigned int *sig_len, EC_KEY *eckey); int flags; }; // Deprecated functions. // EC_KEY_set_asn1_flag does nothing. OPENSSL_EXPORT void EC_KEY_set_asn1_flag(EC_KEY *key, int flag); // d2i_ECPrivateKey parses a DER-encoded ECPrivateKey structure (RFC 5915) from // |len| bytes at |*inp|, as described in |d2i_SAMPLE|. On input, if |*out_key| // is non-NULL and has a group configured, the parameters field may be omitted // but must match that group if present. // // Use |EC_KEY_parse_private_key| instead. OPENSSL_EXPORT EC_KEY *d2i_ECPrivateKey(EC_KEY **out_key, const uint8_t **inp, long len); // i2d_ECPrivateKey marshals |key| as a DER-encoded ECPrivateKey structure (RFC // 5915), as described in |i2d_SAMPLE|. // // Use |EC_KEY_marshal_private_key| instead. OPENSSL_EXPORT int i2d_ECPrivateKey(const EC_KEY *key, uint8_t **outp); // d2i_ECPKParameters parses a DER-encoded ECParameters structure (RFC 5480) // from |len| bytes at |*inp|, as described in |d2i_SAMPLE|. For legacy reasons, // it recognizes the specifiedCurve form, but only for curves that are already // supported as named curves. // // Use |EC_KEY_parse_parameters| or |EC_KEY_parse_curve_name| instead. OPENSSL_EXPORT EC_GROUP *d2i_ECPKParameters(EC_GROUP **out, const uint8_t **inp, long len); // i2d_ECPKParameters marshals |group| as a DER-encoded ECParameters structure // (RFC 5480), as described in |i2d_SAMPLE|. // // Use |EC_KEY_marshal_curve_name| instead. OPENSSL_EXPORT int i2d_ECPKParameters(const EC_GROUP *group, uint8_t **outp); // d2i_ECParameters parses a DER-encoded ECParameters structure (RFC 5480) from // |len| bytes at |*inp|, as described in |d2i_SAMPLE|. It returns the result as // an |EC_KEY| with parameters, but no key, configured. // // Use |EC_KEY_parse_parameters| or |EC_KEY_parse_curve_name| instead. OPENSSL_EXPORT EC_KEY *d2i_ECParameters(EC_KEY **out_key, const uint8_t **inp, long len); // i2d_ECParameters marshals |key|'s parameters as a DER-encoded OBJECT // IDENTIFIER, as described in |i2d_SAMPLE|. // // Use |EC_KEY_marshal_curve_name| instead. OPENSSL_EXPORT int i2d_ECParameters(const EC_KEY *key, uint8_t **outp); // o2i_ECPublicKey parses an EC point from |len| bytes at |*inp| into // |*out_key|. Note that this differs from the d2i format in that |*out_key| // must be non-NULL with a group set. On successful exit, |*inp| is advanced by // |len| bytes. It returns |*out_key| or NULL on error. // // Use |EC_POINT_oct2point| instead. OPENSSL_EXPORT EC_KEY *o2i_ECPublicKey(EC_KEY **out_key, const uint8_t **inp, long len); // i2o_ECPublicKey marshals an EC point from |key|, as described in // |i2d_SAMPLE|, except it returns zero on error instead of a negative value. // // Use |EC_POINT_point2cbb| instead. OPENSSL_EXPORT int i2o_ECPublicKey(const EC_KEY *key, unsigned char **outp); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(EC_KEY, EC_KEY_free) BORINGSSL_MAKE_UP_REF(EC_KEY, EC_KEY_up_ref) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_EC_KEY_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ecdh.h ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ECDH_H #define OPENSSL_HEADER_ECDH_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_ec_key.h" #if defined(__cplusplus) extern "C" { #endif // Elliptic curve Diffie-Hellman. // ECDH_compute_key calculates the shared key between |pub_key| and |priv_key|. // If |kdf| is not NULL, then it is called with the bytes of the shared key and // the parameter |out|. When |kdf| returns, the value of |*outlen| becomes the // return value. Otherwise, as many bytes of the shared key as will fit are // copied directly to, at most, |outlen| bytes at |out|. It returns the number // of bytes written to |out|, or -1 on error. OPENSSL_EXPORT int ECDH_compute_key( void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *priv_key, void *(*kdf)(const void *in, size_t inlen, void *out, size_t *outlen)); // ECDH_compute_key_fips calculates the shared key between |pub_key| and // |priv_key| and hashes it with the appropriate SHA function for |out_len|. The // only value values for |out_len| are thus 24 (SHA-224), 32 (SHA-256), 48 // (SHA-384), and 64 (SHA-512). It returns one on success and zero on error. // // Note that the return value is different to |ECDH_compute_key|: it returns an // error flag (as is common for BoringSSL) rather than the number of bytes // written. // // This function allows the FIPS module to compute an ECDH and KDF within the // module boundary without taking an arbitrary function pointer for the KDF, // which isn't very FIPSy. OPENSSL_EXPORT int ECDH_compute_key_fips(uint8_t *out, size_t out_len, const EC_POINT *pub_key, const EC_KEY *priv_key); #if defined(__cplusplus) } // extern C #endif #define ECDH_R_KDF_FAILED 100 #define ECDH_R_NO_PRIVATE_VALUE 101 #define ECDH_R_POINT_ARITHMETIC_FAILURE 102 #define ECDH_R_UNKNOWN_DIGEST_LENGTH 103 #endif // OPENSSL_HEADER_ECDH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ecdsa.h ================================================ /* * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ECDSA_H #define OPENSSL_HEADER_ECDSA_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_ec_key.h" #if defined(__cplusplus) extern "C" { #endif // ECDSA contains functions for signing and verifying with the Digital Signature // Algorithm over elliptic curves. // Signing and verifying. // ECDSA_sign signs |digest_len| bytes from |digest| with |key| and writes the // resulting signature to |sig|, which must have |ECDSA_size(key)| bytes of // space. On successful exit, |*sig_len| is set to the actual number of bytes // written. The |type| argument should be zero. It returns one on success and // zero otherwise. // // WARNING: |digest| must be the output of some hash function on the data to be // signed. Passing unhashed inputs will not result in a secure signature scheme. OPENSSL_EXPORT int ECDSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *sig, unsigned int *sig_len, const EC_KEY *key); // ECDSA_verify verifies that |sig_len| bytes from |sig| constitute a valid // signature by |key| of |digest|. (The |type| argument should be zero.) It // returns one on success or zero if the signature is invalid or an error // occurred. // // WARNING: |digest| must be the output of some hash function on the data to be // verified. Passing unhashed inputs will not result in a secure signature // scheme. OPENSSL_EXPORT int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *key); // ECDSA_size returns the maximum size of an ECDSA signature using |key|. It // returns zero if |key| is NULL or if it doesn't have a group set. OPENSSL_EXPORT size_t ECDSA_size(const EC_KEY *key); // Low-level signing and verification. // // Low-level functions handle signatures as |ECDSA_SIG| structures which allow // the two values in an ECDSA signature to be handled separately. struct ecdsa_sig_st { BIGNUM *r; BIGNUM *s; }; // ECDSA_SIG_new returns a fresh |ECDSA_SIG| structure or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_new(void); // ECDSA_SIG_free frees |sig| its member |BIGNUM|s. OPENSSL_EXPORT void ECDSA_SIG_free(ECDSA_SIG *sig); // ECDSA_SIG_get0_r returns the r component of |sig|. OPENSSL_EXPORT const BIGNUM *ECDSA_SIG_get0_r(const ECDSA_SIG *sig); // ECDSA_SIG_get0_s returns the s component of |sig|. OPENSSL_EXPORT const BIGNUM *ECDSA_SIG_get0_s(const ECDSA_SIG *sig); // ECDSA_SIG_get0 sets |*out_r| and |*out_s|, if non-NULL, to the two // components of |sig|. OPENSSL_EXPORT void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **out_r, const BIGNUM **out_s); // ECDSA_SIG_set0 sets |sig|'s components to |r| and |s|, neither of which may // be NULL. On success, it takes ownership of each argument and returns one. // Otherwise, it returns zero. OPENSSL_EXPORT int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s); // ECDSA_do_sign signs |digest_len| bytes from |digest| with |key| and returns // the resulting signature structure, or NULL on error. // // WARNING: |digest| must be the output of some hash function on the data to be // signed. Passing unhashed inputs will not result in a secure signature scheme. OPENSSL_EXPORT ECDSA_SIG *ECDSA_do_sign(const uint8_t *digest, size_t digest_len, const EC_KEY *key); // ECDSA_do_verify verifies that |sig| constitutes a valid signature by |key| // of |digest|. It returns one on success or zero if the signature is invalid // or on error. // // WARNING: |digest| must be the output of some hash function on the data to be // verified. Passing unhashed inputs will not result in a secure signature // scheme. OPENSSL_EXPORT int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, const ECDSA_SIG *sig, const EC_KEY *key); // ASN.1 functions. // ECDSA_SIG_parse parses a DER-encoded ECDSA-Sig-Value structure from |cbs| and // advances |cbs|. It returns a newly-allocated |ECDSA_SIG| or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_parse(CBS *cbs); // ECDSA_SIG_from_bytes parses |in| as a DER-encoded ECDSA-Sig-Value structure. // It returns a newly-allocated |ECDSA_SIG| structure or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_from_bytes(const uint8_t *in, size_t in_len); // ECDSA_SIG_marshal marshals |sig| as a DER-encoded ECDSA-Sig-Value and appends // the result to |cbb|. It returns one on success and zero on error. OPENSSL_EXPORT int ECDSA_SIG_marshal(CBB *cbb, const ECDSA_SIG *sig); // ECDSA_SIG_to_bytes marshals |sig| as a DER-encoded ECDSA-Sig-Value and, on // success, sets |*out_bytes| to a newly allocated buffer containing the result // and returns one. Otherwise, it returns zero. The result should be freed with // |OPENSSL_free|. OPENSSL_EXPORT int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len, const ECDSA_SIG *sig); // ECDSA_SIG_max_len returns the maximum length of a DER-encoded ECDSA-Sig-Value // structure for a group whose order is represented in |order_len| bytes, or // zero on overflow. OPENSSL_EXPORT size_t ECDSA_SIG_max_len(size_t order_len); // Testing-only functions. // ECDSA_sign_with_nonce_and_leak_private_key_for_testing behaves like // |ECDSA_do_sign| but uses |nonce| for the ECDSA nonce 'k', instead of a random // value. |nonce| is interpreted as a big-endian integer. It must be reduced // modulo the group order and padded with zeros up to |BN_num_bytes(order)| // bytes. // // WARNING: This function is only exported for testing purposes, when using test // vectors or fuzzing strategies. It must not be used outside tests and may leak // any private keys it is used with. OPENSSL_EXPORT ECDSA_SIG * ECDSA_sign_with_nonce_and_leak_private_key_for_testing(const uint8_t *digest, size_t digest_len, const EC_KEY *eckey, const uint8_t *nonce, size_t nonce_len); // Deprecated functions. // d2i_ECDSA_SIG parses aa DER-encoded ECDSA-Sig-Value structure from |len| // bytes at |*inp|, as described in |d2i_SAMPLE|. // // Use |ECDSA_SIG_parse| instead. OPENSSL_EXPORT ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **out, const uint8_t **inp, long len); // i2d_ECDSA_SIG marshals |sig| as a DER-encoded ECDSA-Sig-Value, as described // in |i2d_SAMPLE|. // // Use |ECDSA_SIG_marshal| instead. OPENSSL_EXPORT int i2d_ECDSA_SIG(const ECDSA_SIG *sig, uint8_t **outp); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(ECDSA_SIG, ECDSA_SIG_free) BSSL_NAMESPACE_END } // extern C++ #endif #define ECDSA_R_BAD_SIGNATURE 100 #define ECDSA_R_MISSING_PARAMETERS 101 #define ECDSA_R_NEED_NEW_SETUP_VALUES 102 #define ECDSA_R_NOT_IMPLEMENTED 103 #define ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED 104 #define ECDSA_R_ENCODE_ERROR 105 #define ECDSA_R_TOO_MANY_ITERATIONS 106 #endif // OPENSSL_HEADER_ECDSA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_engine.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_ENGINE_H #define OPENSSL_HEADER_ENGINE_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Engines are collections of methods. Methods are tables of function pointers, // defined for certain algorithms, that allow operations on those algorithms to // be overridden via a callback. This can be used, for example, to implement an // RSA* that forwards operations to a hardware module. // // Methods are reference counted but |ENGINE|s are not. When creating a method, // you should zero the whole structure and fill in the function pointers that // you wish before setting it on an |ENGINE|. Any functions pointers that // are NULL indicate that the default behaviour should be used. // Allocation and destruction. // ENGINE_new returns an empty ENGINE that uses the default method for all // algorithms. OPENSSL_EXPORT ENGINE *ENGINE_new(void); // ENGINE_free decrements the reference counts for all methods linked from // |engine| and frees |engine| itself. It returns one. OPENSSL_EXPORT int ENGINE_free(ENGINE *engine); // Method accessors. // // Method accessors take a method pointer and the size of the structure. The // size allows for ABI compatibility in the case that the method structure is // extended with extra elements at the end. Methods are always copied by the // set functions. // // Set functions return one on success and zero on allocation failure. OPENSSL_EXPORT int ENGINE_set_RSA_method(ENGINE *engine, const RSA_METHOD *method, size_t method_size); OPENSSL_EXPORT RSA_METHOD *ENGINE_get_RSA_method(const ENGINE *engine); OPENSSL_EXPORT int ENGINE_set_ECDSA_method(ENGINE *engine, const ECDSA_METHOD *method, size_t method_size); OPENSSL_EXPORT ECDSA_METHOD *ENGINE_get_ECDSA_method(const ENGINE *engine); // Generic method functions. // // These functions take a void* type but actually operate on all method // structures. // METHOD_ref increments the reference count of |method|. This is a no-op for // now because all methods are currently static. void METHOD_ref(void *method); // METHOD_unref decrements the reference count of |method| and frees it if the // reference count drops to zero. This is a no-op for now because all methods // are currently static. void METHOD_unref(void *method); // Private functions. // openssl_method_common_st contains the common part of all method structures. // This must be the first member of all method structures. struct openssl_method_common_st { int references; // dummy – not used. char is_static; }; #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(ENGINE, ENGINE_free) BSSL_NAMESPACE_END } // extern C++ #endif #define ENGINE_R_OPERATION_NOT_SUPPORTED 100 #endif // OPENSSL_HEADER_ENGINE_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_err.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_ERR_H #define OPENSSL_HEADER_ERR_H #include #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Error queue handling functions. // // Errors in OpenSSL are generally signaled by the return value of a function. // When a function fails it may add an entry to a per-thread error queue, // which is managed by the functions in this header. // // Each error contains: // 1) The library (i.e. ec, pem, rsa) which created it. // 2) The file and line number of the call that added the error. // 3) A pointer to some error specific data, which may be NULL. // // The library identifier and reason code are packed in a uint32_t and there // exist various functions for unpacking it. // // The typical behaviour is that an error will occur deep in a call queue and // that code will push an error onto the error queue. As the error queue // unwinds, other functions will push their own errors. Thus, the "least // recent" error is the most specific and the other errors will provide a // backtrace of sorts. // Startup and shutdown. // ERR_load_BIO_strings does nothing. // // TODO(fork): remove. libjingle calls this. OPENSSL_EXPORT void ERR_load_BIO_strings(void); // ERR_load_ERR_strings does nothing. OPENSSL_EXPORT void ERR_load_ERR_strings(void); // ERR_load_crypto_strings does nothing. OPENSSL_EXPORT void ERR_load_crypto_strings(void); // ERR_load_RAND_strings does nothing. OPENSSL_EXPORT void ERR_load_RAND_strings(void); // ERR_free_strings does nothing. OPENSSL_EXPORT void ERR_free_strings(void); // Reading and formatting errors. // ERR_GET_LIB returns the library code for the error. This is one of // the |ERR_LIB_*| values. OPENSSL_INLINE int ERR_GET_LIB(uint32_t packed_error) { return (int)((packed_error >> 24) & 0xff); } // ERR_GET_REASON returns the reason code for the error. This is one of // library-specific |LIB_R_*| values where |LIB| is the library (see // |ERR_GET_LIB|). Note that reason codes are specific to the library. OPENSSL_INLINE int ERR_GET_REASON(uint32_t packed_error) { return (int)(packed_error & 0xfff); } // ERR_get_error gets the packed error code for the least recent error and // removes that error from the queue. If there are no errors in the queue then // it returns zero. OPENSSL_EXPORT uint32_t ERR_get_error(void); // ERR_get_error_line acts like |ERR_get_error|, except that the file and line // number of the call that added the error are also returned. OPENSSL_EXPORT uint32_t ERR_get_error_line(const char **file, int *line); // ERR_FLAG_STRING means that the |data| member is a NUL-terminated string that // can be printed. This is always set if |data| is non-NULL. #define ERR_FLAG_STRING 1 // ERR_FLAG_MALLOCED is passed into |ERR_set_error_data| to indicate that |data| // was allocated with |OPENSSL_malloc|. // // It is, separately, returned in |*flags| from |ERR_get_error_line_data| to // indicate that |*data| has a non-static lifetime, but this lifetime is still // managed by the library. The caller must not call |OPENSSL_free| or |free| on // |data|. #define ERR_FLAG_MALLOCED 2 // ERR_get_error_line_data acts like |ERR_get_error_line|, but also returns the // error-specific data pointer and flags. The flags are a bitwise-OR of // |ERR_FLAG_*| values. The error-specific data is owned by the error queue // and the pointer becomes invalid after the next call that affects the same // thread's error queue. If |*flags| contains |ERR_FLAG_STRING| then |*data| is // human-readable. OPENSSL_EXPORT uint32_t ERR_get_error_line_data(const char **file, int *line, const char **data, int *flags); // The "peek" functions act like the |ERR_get_error| functions, above, but they // do not remove the error from the queue. OPENSSL_EXPORT uint32_t ERR_peek_error(void); OPENSSL_EXPORT uint32_t ERR_peek_error_line(const char **file, int *line); OPENSSL_EXPORT uint32_t ERR_peek_error_line_data(const char **file, int *line, const char **data, int *flags); // The "peek last" functions act like the "peek" functions, above, except that // they return the most recent error. OPENSSL_EXPORT uint32_t ERR_peek_last_error(void); OPENSSL_EXPORT uint32_t ERR_peek_last_error_line(const char **file, int *line); OPENSSL_EXPORT uint32_t ERR_peek_last_error_line_data(const char **file, int *line, const char **data, int *flags); // ERR_error_string_n generates a human-readable string representing // |packed_error|, places it at |buf|, and returns |buf|. It writes at most // |len| bytes (including the terminating NUL) and truncates the string if // necessary. If |len| is greater than zero then |buf| is always NUL terminated. // // The string will have the following format: // // error:[error code]:[library name]:OPENSSL_internal:[reason string] // // error code is an 8 digit hexadecimal number; library name and reason string // are ASCII text. OPENSSL_EXPORT char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len); // ERR_lib_error_string returns a string representation of the library that // generated |packed_error|, or a placeholder string is the library is // unrecognized. OPENSSL_EXPORT const char *ERR_lib_error_string(uint32_t packed_error); // ERR_reason_error_string returns a string representation of the reason for // |packed_error|, or a placeholder string if the reason is unrecognized. OPENSSL_EXPORT const char *ERR_reason_error_string(uint32_t packed_error); // ERR_lib_symbol_name returns the symbol name of library that generated // |packed_error|, or NULL if unrecognized. For example, an error from // |ERR_LIB_EVP| would return "EVP". OPENSSL_EXPORT const char *ERR_lib_symbol_name(uint32_t packed_error); // ERR_reason_symbol_name returns the symbol name of the reason for // |packed_error|, or NULL if unrecognized. For example, |ERR_R_INTERNAL_ERROR| // would return "INTERNAL_ERROR". // // Errors from the |ERR_LIB_SYS| library are typically |errno| values and will // return NULL. User-defined errors will also return NULL. OPENSSL_EXPORT const char *ERR_reason_symbol_name(uint32_t packed_error); // ERR_print_errors_callback_t is the type of a function used by // |ERR_print_errors_cb|. It takes a pointer to a human readable string (and // its length) that describes an entry in the error queue. The |ctx| argument // is an opaque pointer given to |ERR_print_errors_cb|. // // It should return one on success or zero on error, which will stop the // iteration over the error queue. typedef int (*ERR_print_errors_callback_t)(const char *str, size_t len, void *ctx); // ERR_print_errors_cb clears the current thread's error queue, calling // |callback| with a string representation of each error, from the least recent // to the most recent error. // // The string will have the following format (which differs from // |ERR_error_string|): // // [thread id]:error:[error code]:[library name]:OPENSSL_internal:[reason string]:[file]:[line number]:[optional string data] // // The callback can return one to continue the iteration or zero to stop it. // The |ctx| argument is an opaque value that is passed through to the // callback. OPENSSL_EXPORT void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx); // ERR_print_errors_fp clears the current thread's error queue, printing each // error to |file|. See |ERR_print_errors_cb| for the format. OPENSSL_EXPORT void ERR_print_errors_fp(FILE *file); // Clearing errors. // ERR_clear_error clears the error queue for the current thread. OPENSSL_EXPORT void ERR_clear_error(void); // ERR_set_mark "marks" the most recent error for use with |ERR_pop_to_mark|. // It returns one if an error was marked and zero if there are no errors. OPENSSL_EXPORT int ERR_set_mark(void); // ERR_pop_to_mark removes errors from the most recent to the least recent // until (and not including) a "marked" error. It returns zero if no marked // error was found (and thus all errors were removed) and one otherwise. Errors // are marked using |ERR_set_mark|. OPENSSL_EXPORT int ERR_pop_to_mark(void); // Custom errors. // ERR_get_next_error_library returns a value suitable for passing as the // |library| argument to |ERR_put_error|. This is intended for code that wishes // to push its own, non-standard errors to the error queue. OPENSSL_EXPORT int ERR_get_next_error_library(void); // Built-in library and reason codes. // The following values are built-in library codes. enum { ERR_LIB_NONE = 1, ERR_LIB_SYS, ERR_LIB_BN, ERR_LIB_RSA, ERR_LIB_DH, ERR_LIB_EVP, ERR_LIB_BUF, ERR_LIB_OBJ, ERR_LIB_PEM, ERR_LIB_DSA, ERR_LIB_X509, ERR_LIB_ASN1, ERR_LIB_CONF, ERR_LIB_CRYPTO, ERR_LIB_EC, ERR_LIB_SSL, ERR_LIB_BIO, ERR_LIB_PKCS7, ERR_LIB_PKCS8, ERR_LIB_X509V3, ERR_LIB_RAND, ERR_LIB_ENGINE, ERR_LIB_OCSP, ERR_LIB_UI, ERR_LIB_COMP, ERR_LIB_ECDSA, ERR_LIB_ECDH, ERR_LIB_HMAC, ERR_LIB_DIGEST, ERR_LIB_CIPHER, ERR_LIB_HKDF, ERR_LIB_TRUST_TOKEN, ERR_LIB_USER, ERR_NUM_LIBS }; // The following reason codes used to denote an error occuring in another // library. They are sometimes used for a stack trace. #define ERR_R_SYS_LIB ERR_LIB_SYS #define ERR_R_BN_LIB ERR_LIB_BN #define ERR_R_RSA_LIB ERR_LIB_RSA #define ERR_R_DH_LIB ERR_LIB_DH #define ERR_R_EVP_LIB ERR_LIB_EVP #define ERR_R_BUF_LIB ERR_LIB_BUF #define ERR_R_OBJ_LIB ERR_LIB_OBJ #define ERR_R_PEM_LIB ERR_LIB_PEM #define ERR_R_DSA_LIB ERR_LIB_DSA #define ERR_R_X509_LIB ERR_LIB_X509 #define ERR_R_ASN1_LIB ERR_LIB_ASN1 #define ERR_R_CONF_LIB ERR_LIB_CONF #define ERR_R_CRYPTO_LIB ERR_LIB_CRYPTO #define ERR_R_EC_LIB ERR_LIB_EC #define ERR_R_SSL_LIB ERR_LIB_SSL #define ERR_R_BIO_LIB ERR_LIB_BIO #define ERR_R_PKCS7_LIB ERR_LIB_PKCS7 #define ERR_R_PKCS8_LIB ERR_LIB_PKCS8 #define ERR_R_X509V3_LIB ERR_LIB_X509V3 #define ERR_R_RAND_LIB ERR_LIB_RAND #define ERR_R_DSO_LIB ERR_LIB_DSO #define ERR_R_ENGINE_LIB ERR_LIB_ENGINE #define ERR_R_OCSP_LIB ERR_LIB_OCSP #define ERR_R_UI_LIB ERR_LIB_UI #define ERR_R_COMP_LIB ERR_LIB_COMP #define ERR_R_ECDSA_LIB ERR_LIB_ECDSA #define ERR_R_ECDH_LIB ERR_LIB_ECDH #define ERR_R_STORE_LIB ERR_LIB_STORE #define ERR_R_FIPS_LIB ERR_LIB_FIPS #define ERR_R_CMS_LIB ERR_LIB_CMS #define ERR_R_TS_LIB ERR_LIB_TS #define ERR_R_HMAC_LIB ERR_LIB_HMAC #define ERR_R_JPAKE_LIB ERR_LIB_JPAKE #define ERR_R_USER_LIB ERR_LIB_USER #define ERR_R_DIGEST_LIB ERR_LIB_DIGEST #define ERR_R_CIPHER_LIB ERR_LIB_CIPHER #define ERR_R_HKDF_LIB ERR_LIB_HKDF #define ERR_R_TRUST_TOKEN_LIB ERR_LIB_TRUST_TOKEN // The following values are global reason codes. They may occur in any library. #define ERR_R_FATAL 64 #define ERR_R_MALLOC_FAILURE (1 | ERR_R_FATAL) #define ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED (2 | ERR_R_FATAL) #define ERR_R_PASSED_NULL_PARAMETER (3 | ERR_R_FATAL) #define ERR_R_INTERNAL_ERROR (4 | ERR_R_FATAL) #define ERR_R_OVERFLOW (5 | ERR_R_FATAL) // Deprecated functions. // ERR_remove_state calls |ERR_clear_error|. OPENSSL_EXPORT void ERR_remove_state(unsigned long pid); // ERR_remove_thread_state clears the error queue for the current thread if // |tid| is NULL. Otherwise it calls |assert(0)|, because it's no longer // possible to delete the error queue for other threads. // // Use |ERR_clear_error| instead. Note error queues are deleted automatically on // thread exit. You do not need to call this function to release memory. OPENSSL_EXPORT void ERR_remove_thread_state(const CRYPTO_THREADID *tid); // ERR_func_error_string returns the string "OPENSSL_internal". OPENSSL_EXPORT const char *ERR_func_error_string(uint32_t packed_error); // ERR_error_string behaves like |ERR_error_string_n| but |len| is implicitly // |ERR_ERROR_STRING_BUF_LEN|. // // Additionally, if |buf| is NULL, the error string is placed in a static buffer // which is returned. This is not thread-safe and only exists for backwards // compatibility with legacy callers. The static buffer will be overridden by // calls in other threads. // // Use |ERR_error_string_n| instead. // // TODO(fork): remove this function. OPENSSL_EXPORT char *ERR_error_string(uint32_t packed_error, char *buf); #define ERR_ERROR_STRING_BUF_LEN 120 // ERR_GET_FUNC returns zero. BoringSSL errors do not report a function code. OPENSSL_INLINE int ERR_GET_FUNC(uint32_t packed_error) { (void)packed_error; return 0; } // ERR_TXT_* are provided for compatibility with code that assumes that it's // using OpenSSL. #define ERR_TXT_STRING ERR_FLAG_STRING #define ERR_TXT_MALLOCED ERR_FLAG_MALLOCED // Private functions. // ERR_clear_system_error clears the system's error value (i.e. errno). OPENSSL_EXPORT void ERR_clear_system_error(void); // OPENSSL_PUT_ERROR is used by OpenSSL code to add an error to the error // queue. #define OPENSSL_PUT_ERROR(library, reason) \ ERR_put_error(ERR_LIB_##library, 0, reason, __FILE__, __LINE__) // OPENSSL_PUT_SYSTEM_ERROR is used by OpenSSL code to add an error from the // operating system to the error queue. // TODO(fork): include errno. #define OPENSSL_PUT_SYSTEM_ERROR() \ ERR_put_error(ERR_LIB_SYS, 0, 0, __FILE__, __LINE__); // ERR_put_error adds an error to the error queue, dropping the least recent // error if necessary for space reasons. OPENSSL_EXPORT void ERR_put_error(int library, int unused, int reason, const char *file, unsigned line); // ERR_add_error_data takes a variable number (|count|) of const char* // pointers, concatenates them and sets the result as the data on the most // recent error. OPENSSL_EXPORT void ERR_add_error_data(unsigned count, ...); // ERR_add_error_dataf takes a printf-style format and arguments, and sets the // result as the data on the most recent error. OPENSSL_EXPORT void ERR_add_error_dataf(const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(1, 2); // ERR_set_error_data sets the data on the most recent error to |data|, which // must be a NUL-terminated string. |flags| must contain |ERR_FLAG_STRING|. If // |flags| contains |ERR_FLAG_MALLOCED|, this function takes ownership of // |data|, which must have been allocated with |OPENSSL_malloc|. Otherwise, it // saves a copy of |data|. // // Note this differs from OpenSSL which, when |ERR_FLAG_MALLOCED| is unset, // saves the pointer as-is and requires it remain valid for the lifetime of the // address space. OPENSSL_EXPORT void ERR_set_error_data(char *data, int flags); // ERR_NUM_ERRORS is one more than the limit of the number of errors in the // queue. #define ERR_NUM_ERRORS 16 #define ERR_PACK(lib, reason) \ (((((uint32_t)(lib)) & 0xff) << 24) | ((((uint32_t)(reason)) & 0xfff))) // OPENSSL_DECLARE_ERROR_REASON is used by util/make_errors.h (which generates // the error defines) to recognise that an additional reason value is needed. // This is needed when the reason value is used outside of an // |OPENSSL_PUT_ERROR| macro. The resulting define will be // ${lib}_R_${reason}. #define OPENSSL_DECLARE_ERROR_REASON(lib, reason) #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_ERR_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_evp.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EVP_H #define OPENSSL_HEADER_EVP_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_evp_errors.h" // IWYU pragma: export #include "CNIOBoringSSL_thread.h" // OpenSSL included digest and cipher functions in this header so we include // them for users that still expect that. // // TODO(fork): clean up callers so that they include what they use. #include "CNIOBoringSSL_aead.h" #include "CNIOBoringSSL_base64.h" #include "CNIOBoringSSL_cipher.h" #include "CNIOBoringSSL_digest.h" #include "CNIOBoringSSL_nid.h" #if defined(__cplusplus) extern "C" { #endif // EVP abstracts over public/private key algorithms. // Public key objects. // // An |EVP_PKEY| object represents a public or private key. A given object may // be used concurrently on multiple threads by non-mutating functions, provided // no other thread is concurrently calling a mutating function. Unless otherwise // documented, functions which take a |const| pointer are non-mutating and // functions which take a non-|const| pointer are mutating. // EVP_PKEY_new creates a new, empty public-key object and returns it or NULL // on allocation failure. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new(void); // EVP_PKEY_free frees all data referenced by |pkey| and then frees |pkey| // itself. OPENSSL_EXPORT void EVP_PKEY_free(EVP_PKEY *pkey); // EVP_PKEY_up_ref increments the reference count of |pkey| and returns one. It // does not mutate |pkey| for thread-safety purposes and may be used // concurrently. OPENSSL_EXPORT int EVP_PKEY_up_ref(EVP_PKEY *pkey); // EVP_PKEY_is_opaque returns one if |pkey| is opaque. Opaque keys are backed by // custom implementations which do not expose key material and parameters. It is // an error to attempt to duplicate, export, or compare an opaque key. OPENSSL_EXPORT int EVP_PKEY_is_opaque(const EVP_PKEY *pkey); // EVP_PKEY_cmp compares |a| and |b| and returns one if they are equal, zero if // not and a negative number on error. // // WARNING: this differs from the traditional return value of a "cmp" // function. OPENSSL_EXPORT int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b); // EVP_PKEY_copy_parameters sets the parameters of |to| to equal the parameters // of |from|. It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from); // EVP_PKEY_missing_parameters returns one if |pkey| is missing needed // parameters or zero if not, or if the algorithm doesn't take parameters. OPENSSL_EXPORT int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey); // EVP_PKEY_size returns the maximum size, in bytes, of a signature signed by // |pkey|. For an RSA key, this returns the number of bytes needed to represent // the modulus. For an EC key, this returns the maximum size of a DER-encoded // ECDSA signature. OPENSSL_EXPORT int EVP_PKEY_size(const EVP_PKEY *pkey); // EVP_PKEY_bits returns the "size", in bits, of |pkey|. For an RSA key, this // returns the bit length of the modulus. For an EC key, this returns the bit // length of the group order. OPENSSL_EXPORT int EVP_PKEY_bits(const EVP_PKEY *pkey); // EVP_PKEY_id returns the type of |pkey|, which is one of the |EVP_PKEY_*| // values. OPENSSL_EXPORT int EVP_PKEY_id(const EVP_PKEY *pkey); // Getting and setting concrete public key types. // // The following functions get and set the underlying public key in an // |EVP_PKEY| object. The |set1| functions take an additional reference to the // underlying key and return one on success or zero if |key| is NULL. The // |assign| functions adopt the caller's reference and return one on success or // zero if |key| is NULL. The |get1| functions return a fresh reference to the // underlying object or NULL if |pkey| is not of the correct type. The |get0| // functions behave the same but return a non-owning pointer. // // The |get0| and |get1| functions take |const| pointers and are thus // non-mutating for thread-safety purposes, but mutating functions on the // returned lower-level objects are considered to also mutate the |EVP_PKEY| and // may not be called concurrently with other operations on the |EVP_PKEY|. OPENSSL_EXPORT int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, RSA *key); OPENSSL_EXPORT int EVP_PKEY_assign_RSA(EVP_PKEY *pkey, RSA *key); OPENSSL_EXPORT RSA *EVP_PKEY_get0_RSA(const EVP_PKEY *pkey); OPENSSL_EXPORT RSA *EVP_PKEY_get1_RSA(const EVP_PKEY *pkey); OPENSSL_EXPORT int EVP_PKEY_set1_DSA(EVP_PKEY *pkey, DSA *key); OPENSSL_EXPORT int EVP_PKEY_assign_DSA(EVP_PKEY *pkey, DSA *key); OPENSSL_EXPORT DSA *EVP_PKEY_get0_DSA(const EVP_PKEY *pkey); OPENSSL_EXPORT DSA *EVP_PKEY_get1_DSA(const EVP_PKEY *pkey); OPENSSL_EXPORT int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, EC_KEY *key); OPENSSL_EXPORT int EVP_PKEY_assign_EC_KEY(EVP_PKEY *pkey, EC_KEY *key); OPENSSL_EXPORT EC_KEY *EVP_PKEY_get0_EC_KEY(const EVP_PKEY *pkey); OPENSSL_EXPORT EC_KEY *EVP_PKEY_get1_EC_KEY(const EVP_PKEY *pkey); OPENSSL_EXPORT int EVP_PKEY_set1_DH(EVP_PKEY *pkey, DH *key); OPENSSL_EXPORT int EVP_PKEY_assign_DH(EVP_PKEY *pkey, DH *key); OPENSSL_EXPORT DH *EVP_PKEY_get0_DH(const EVP_PKEY *pkey); OPENSSL_EXPORT DH *EVP_PKEY_get1_DH(const EVP_PKEY *pkey); #define EVP_PKEY_NONE NID_undef #define EVP_PKEY_RSA NID_rsaEncryption #define EVP_PKEY_RSA_PSS NID_rsassaPss #define EVP_PKEY_DSA NID_dsa #define EVP_PKEY_EC NID_X9_62_id_ecPublicKey #define EVP_PKEY_ED25519 NID_ED25519 #define EVP_PKEY_X25519 NID_X25519 #define EVP_PKEY_HKDF NID_hkdf #define EVP_PKEY_DH NID_dhKeyAgreement // EVP_PKEY_set_type sets the type of |pkey| to |type|. It returns one if // successful or zero if the |type| argument is not one of the |EVP_PKEY_*| // values. If |pkey| is NULL, it simply reports whether the type is known. OPENSSL_EXPORT int EVP_PKEY_set_type(EVP_PKEY *pkey, int type); // EVP_PKEY_cmp_parameters compares the parameters of |a| and |b|. It returns // one if they match, zero if not, or a negative number of on error. // // WARNING: the return value differs from the usual return value convention. OPENSSL_EXPORT int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b); // ASN.1 functions // EVP_parse_public_key decodes a DER-encoded SubjectPublicKeyInfo structure // (RFC 5280) from |cbs| and advances |cbs|. It returns a newly-allocated // |EVP_PKEY| or NULL on error. If the key is an EC key, the curve is guaranteed // to be set. // // The caller must check the type of the parsed public key to ensure it is // suitable and validate other desired key properties such as RSA modulus size // or EC curve. OPENSSL_EXPORT EVP_PKEY *EVP_parse_public_key(CBS *cbs); // EVP_marshal_public_key marshals |key| as a DER-encoded SubjectPublicKeyInfo // structure (RFC 5280) and appends the result to |cbb|. It returns one on // success and zero on error. OPENSSL_EXPORT int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key); // EVP_parse_private_key decodes a DER-encoded PrivateKeyInfo structure (RFC // 5208) from |cbs| and advances |cbs|. It returns a newly-allocated |EVP_PKEY| // or NULL on error. // // The caller must check the type of the parsed private key to ensure it is // suitable and validate other desired key properties such as RSA modulus size // or EC curve. In particular, RSA private key operations scale cubicly, so // applications accepting RSA private keys from external sources may need to // bound key sizes (use |EVP_PKEY_bits| or |RSA_bits|) to avoid a DoS vector. // // A PrivateKeyInfo ends with an optional set of attributes. These are not // processed and so this function will silently ignore any trailing data in the // structure. OPENSSL_EXPORT EVP_PKEY *EVP_parse_private_key(CBS *cbs); // EVP_marshal_private_key marshals |key| as a DER-encoded PrivateKeyInfo // structure (RFC 5208) and appends the result to |cbb|. It returns one on // success and zero on error. OPENSSL_EXPORT int EVP_marshal_private_key(CBB *cbb, const EVP_PKEY *key); // Raw keys // // Some keys types support a "raw" serialization. Currently the only supported // raw formats are X25519 and Ed25519, where the formats are those specified in // RFC 7748 and RFC 8032, respectively. Note the RFC 8032 private key format is // the 32-byte prefix of |ED25519_sign|'s 64-byte private key. // EVP_PKEY_new_raw_private_key returns a newly allocated |EVP_PKEY| wrapping a // private key of the specified type. It returns one on success and zero on // error. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new_raw_private_key(int type, ENGINE *unused, const uint8_t *in, size_t len); // EVP_PKEY_new_raw_public_key returns a newly allocated |EVP_PKEY| wrapping a // public key of the specified type. It returns one on success and zero on // error. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new_raw_public_key(int type, ENGINE *unused, const uint8_t *in, size_t len); // EVP_PKEY_get_raw_private_key outputs the private key for |pkey| in raw form. // If |out| is NULL, it sets |*out_len| to the size of the raw private key. // Otherwise, it writes at most |*out_len| bytes to |out| and sets |*out_len| to // the number of bytes written. // // It returns one on success and zero if |pkey| has no private key, the key // type does not support a raw format, or the buffer is too small. OPENSSL_EXPORT int EVP_PKEY_get_raw_private_key(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len); // EVP_PKEY_get_raw_public_key outputs the public key for |pkey| in raw form. // If |out| is NULL, it sets |*out_len| to the size of the raw public key. // Otherwise, it writes at most |*out_len| bytes to |out| and sets |*out_len| to // the number of bytes written. // // It returns one on success and zero if |pkey| has no public key, the key // type does not support a raw format, or the buffer is too small. OPENSSL_EXPORT int EVP_PKEY_get_raw_public_key(const EVP_PKEY *pkey, uint8_t *out, size_t *out_len); // Signing // EVP_DigestSignInit sets up |ctx| for a signing operation with |type| and // |pkey|. The |ctx| argument must have been initialised with // |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing // operation will be written to |*pctx|; this can be used to set alternative // signing options. // // For single-shot signing algorithms which do not use a pre-hash, such as // Ed25519, |type| should be NULL. The |EVP_MD_CTX| itself is unused but is // present so the API is uniform. See |EVP_DigestSign|. // // This function does not mutate |pkey| for thread-safety purposes and may be // used concurrently with other non-mutating functions on |pkey|. // // It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); // EVP_DigestSignUpdate appends |len| bytes from |data| to the data which will // be signed in |EVP_DigestSignFinal|. It returns one. // // This function performs a streaming signing operation and will fail for // signature algorithms which do not support this. Use |EVP_DigestSign| for a // single-shot operation. OPENSSL_EXPORT int EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); // EVP_DigestSignFinal signs the data that has been included by one or more // calls to |EVP_DigestSignUpdate|. If |out_sig| is NULL then |*out_sig_len| is // set to the maximum number of output bytes. Otherwise, on entry, // |*out_sig_len| must contain the length of the |out_sig| buffer. If the call // is successful, the signature is written to |out_sig| and |*out_sig_len| is // set to its length. // // This function performs a streaming signing operation and will fail for // signature algorithms which do not support this. Use |EVP_DigestSign| for a // single-shot operation. // // It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestSignFinal(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len); // EVP_DigestSign signs |data_len| bytes from |data| using |ctx|. If |out_sig| // is NULL then |*out_sig_len| is set to the maximum number of output // bytes. Otherwise, on entry, |*out_sig_len| must contain the length of the // |out_sig| buffer. If the call is successful, the signature is written to // |out_sig| and |*out_sig_len| is set to its length. // // It returns one on success and zero on error. OPENSSL_EXPORT int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len, const uint8_t *data, size_t data_len); // Verifying // EVP_DigestVerifyInit sets up |ctx| for a signature verification operation // with |type| and |pkey|. The |ctx| argument must have been initialised with // |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing // operation will be written to |*pctx|; this can be used to set alternative // signing options. // // For single-shot signing algorithms which do not use a pre-hash, such as // Ed25519, |type| should be NULL. The |EVP_MD_CTX| itself is unused but is // present so the API is uniform. See |EVP_DigestVerify|. // // This function does not mutate |pkey| for thread-safety purposes and may be // used concurrently with other non-mutating functions on |pkey|. // // It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); // EVP_DigestVerifyUpdate appends |len| bytes from |data| to the data which // will be verified by |EVP_DigestVerifyFinal|. It returns one. // // This function performs streaming signature verification and will fail for // signature algorithms which do not support this. Use |EVP_PKEY_verify_message| // for a single-shot verification. OPENSSL_EXPORT int EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); // EVP_DigestVerifyFinal verifies that |sig_len| bytes of |sig| are a valid // signature for the data that has been included by one or more calls to // |EVP_DigestVerifyUpdate|. It returns one on success and zero otherwise. // // This function performs streaming signature verification and will fail for // signature algorithms which do not support this. Use |EVP_PKEY_verify_message| // for a single-shot verification. OPENSSL_EXPORT int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len); // EVP_DigestVerify verifies that |sig_len| bytes from |sig| are a valid // signature for |data|. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_DigestVerify(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, const uint8_t *data, size_t len); // Signing (old functions) // EVP_SignInit_ex configures |ctx|, which must already have been initialised, // for a fresh signing operation using the hash function |type|. It returns one // on success and zero otherwise. // // (In order to initialise |ctx|, either obtain it initialised with // |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) OPENSSL_EXPORT int EVP_SignInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); // EVP_SignInit is a deprecated version of |EVP_SignInit_ex|. // // TODO(fork): remove. OPENSSL_EXPORT int EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type); // EVP_SignUpdate appends |len| bytes from |data| to the data which will be // signed in |EVP_SignFinal|. OPENSSL_EXPORT int EVP_SignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); // EVP_SignFinal signs the data that has been included by one or more calls to // |EVP_SignUpdate|, using the key |pkey|, and writes it to |sig|. On entry, // |sig| must point to at least |EVP_PKEY_size(pkey)| bytes of space. The // actual size of the signature is written to |*out_sig_len|. // // It returns one on success and zero otherwise. // // It does not modify |ctx|, thus it's possible to continue to use |ctx| in // order to sign a longer message. It also does not mutate |pkey| for // thread-safety purposes and may be used concurrently with other non-mutating // functions on |pkey|. OPENSSL_EXPORT int EVP_SignFinal(const EVP_MD_CTX *ctx, uint8_t *sig, unsigned int *out_sig_len, EVP_PKEY *pkey); // Verifying (old functions) // EVP_VerifyInit_ex configures |ctx|, which must already have been // initialised, for a fresh signature verification operation using the hash // function |type|. It returns one on success and zero otherwise. // // (In order to initialise |ctx|, either obtain it initialised with // |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) OPENSSL_EXPORT int EVP_VerifyInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); // EVP_VerifyInit is a deprecated version of |EVP_VerifyInit_ex|. // // TODO(fork): remove. OPENSSL_EXPORT int EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type); // EVP_VerifyUpdate appends |len| bytes from |data| to the data which will be // signed in |EVP_VerifyFinal|. OPENSSL_EXPORT int EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); // EVP_VerifyFinal verifies that |sig_len| bytes of |sig| are a valid // signature, by |pkey|, for the data that has been included by one or more // calls to |EVP_VerifyUpdate|. // // It returns one on success and zero otherwise. // // It does not modify |ctx|, thus it's possible to continue to use |ctx| in // order to verify a longer message. It also does not mutate |pkey| for // thread-safety purposes and may be used concurrently with other non-mutating // functions on |pkey|. OPENSSL_EXPORT int EVP_VerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, EVP_PKEY *pkey); // Printing // EVP_PKEY_print_public prints a textual representation of the public key in // |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); // EVP_PKEY_print_private prints a textual representation of the private key in // |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); // EVP_PKEY_print_params prints a textual representation of the parameters in // |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); // Password stretching. // // Password stretching functions take a low-entropy password and apply a slow // function that results in a key suitable for use in symmetric // cryptography. // PKCS5_PBKDF2_HMAC computes |iterations| iterations of PBKDF2 of |password| // and |salt|, using |digest|, and outputs |key_len| bytes to |out_key|. It // returns one on success and zero on allocation failure or if iterations is 0. OPENSSL_EXPORT int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint32_t iterations, const EVP_MD *digest, size_t key_len, uint8_t *out_key); // PKCS5_PBKDF2_HMAC_SHA1 is the same as PKCS5_PBKDF2_HMAC, but with |digest| // fixed to |EVP_sha1|. OPENSSL_EXPORT int PKCS5_PBKDF2_HMAC_SHA1(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint32_t iterations, size_t key_len, uint8_t *out_key); // EVP_PBE_scrypt expands |password| into a secret key of length |key_len| using // scrypt, as described in RFC 7914, and writes the result to |out_key|. It // returns one on success and zero on allocation failure, if the memory required // for the operation exceeds |max_mem|, or if any of the parameters are invalid // as described below. // // |N|, |r|, and |p| are as described in RFC 7914 section 6. They determine the // cost of the operation. If |max_mem| is zero, a default limit of 32MiB will be // used. // // The parameters are considered invalid under any of the following conditions: // - |r| or |p| are zero // - |p| > (2^30 - 1) / |r| // - |N| is not a power of two // - |N| > 2^32 // - |N| > 2^(128 * |r| / 8) OPENSSL_EXPORT int EVP_PBE_scrypt(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, uint64_t N, uint64_t r, uint64_t p, size_t max_mem, uint8_t *out_key, size_t key_len); // Public key contexts. // // |EVP_PKEY_CTX| objects hold the context of an operation (e.g. signing or // encrypting) that uses a public key. // EVP_PKEY_CTX_new allocates a fresh |EVP_PKEY_CTX| for use with |pkey|. It // returns the context or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e); // EVP_PKEY_CTX_new_id allocates a fresh |EVP_PKEY_CTX| for a key of type |id| // (e.g. |EVP_PKEY_HMAC|). This can be used for key generation where // |EVP_PKEY_CTX_new| can't be used because there isn't an |EVP_PKEY| to pass // it. It returns the context or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e); // EVP_PKEY_CTX_free frees |ctx| and the data it owns. OPENSSL_EXPORT void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx); // EVP_PKEY_CTX_dup allocates a fresh |EVP_PKEY_CTX| and sets it equal to the // state of |ctx|. It returns the fresh |EVP_PKEY_CTX| or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *ctx); // EVP_PKEY_CTX_get0_pkey returns the |EVP_PKEY| associated with |ctx|. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx); // EVP_PKEY_sign_init initialises an |EVP_PKEY_CTX| for a signing operation. It // should be called before |EVP_PKEY_sign|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_sign signs |digest_len| bytes from |digest| using |ctx|. If |sig| is // NULL, the maximum size of the signature is written to |out_sig_len|. // Otherwise, |*sig_len| must contain the number of bytes of space available at // |sig|. If sufficient, the signature will be written to |sig| and |*sig_len| // updated with the true length. This function will fail for signature // algorithms like Ed25519 that do not support signing pre-hashed inputs. // // WARNING: |digest| must be the output of some hash function on the data to be // signed. Passing unhashed inputs will not result in a secure signature scheme. // Use |EVP_DigestSignInit| to sign an unhashed input. // // WARNING: Setting |sig| to NULL only gives the maximum size of the // signature. The actual signature may be smaller. // // It returns one on success or zero on error. (Note: this differs from // OpenSSL, which can also return negative values to indicate an error. ) OPENSSL_EXPORT int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *sig_len, const uint8_t *digest, size_t digest_len); // EVP_PKEY_verify_init initialises an |EVP_PKEY_CTX| for a signature // verification operation. It should be called before |EVP_PKEY_verify|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_verify verifies that |sig_len| bytes from |sig| are a valid // signature for |digest|. This function will fail for signature // algorithms like Ed25519 that do not support signing pre-hashed inputs. // // WARNING: |digest| must be the output of some hash function on the data to be // verified. Passing unhashed inputs will not result in a secure signature // scheme. Use |EVP_DigestVerifyInit| to verify a signature given the unhashed // input. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t sig_len, const uint8_t *digest, size_t digest_len); // EVP_PKEY_encrypt_init initialises an |EVP_PKEY_CTX| for an encryption // operation. It should be called before |EVP_PKEY_encrypt|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_encrypt encrypts |in_len| bytes from |in|. If |out| is NULL, the // maximum size of the ciphertext is written to |out_len|. Otherwise, |*out_len| // must contain the number of bytes of space available at |out|. If sufficient, // the ciphertext will be written to |out| and |*out_len| updated with the true // length. // // WARNING: Setting |out| to NULL only gives the maximum size of the // ciphertext. The actual ciphertext may be smaller. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *in, size_t in_len); // EVP_PKEY_decrypt_init initialises an |EVP_PKEY_CTX| for a decryption // operation. It should be called before |EVP_PKEY_decrypt|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_decrypt decrypts |in_len| bytes from |in|. If |out| is NULL, the // maximum size of the plaintext is written to |out_len|. Otherwise, |*out_len| // must contain the number of bytes of space available at |out|. If sufficient, // the ciphertext will be written to |out| and |*out_len| updated with the true // length. // // WARNING: Setting |out| to NULL only gives the maximum size of the // plaintext. The actual plaintext may be smaller. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *in, size_t in_len); // EVP_PKEY_verify_recover_init initialises an |EVP_PKEY_CTX| for a public-key // decryption operation. It should be called before |EVP_PKEY_verify_recover|. // // Public-key decryption is a very obscure operation that is only implemented // by RSA keys. It is effectively a signature verification operation that // returns the signed message directly. It is almost certainly not what you // want. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_recover_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_verify_recover decrypts |sig_len| bytes from |sig|. If |out| is // NULL, the maximum size of the plaintext is written to |out_len|. Otherwise, // |*out_len| must contain the number of bytes of space available at |out|. If // sufficient, the ciphertext will be written to |out| and |*out_len| updated // with the true length. // // WARNING: Setting |out| to NULL only gives the maximum size of the // plaintext. The actual plaintext may be smaller. // // See the warning about this operation in |EVP_PKEY_verify_recover_init|. It // is probably not what you want. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t siglen); // EVP_PKEY_derive_init initialises an |EVP_PKEY_CTX| for a key derivation // operation. It should be called before |EVP_PKEY_derive_set_peer| and // |EVP_PKEY_derive|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_derive_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_derive_set_peer sets the peer's key to be used for key derivation // by |ctx| to |peer|. It should be called after |EVP_PKEY_derive_init|. (For // example, this is used to set the peer's key in (EC)DH.) It returns one on // success and zero on error. OPENSSL_EXPORT int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer); // EVP_PKEY_derive derives a shared key from |ctx|. If |key| is non-NULL then, // on entry, |out_key_len| must contain the amount of space at |key|. If // sufficient then the shared key will be written to |key| and |*out_key_len| // will be set to the length. If |key| is NULL then |out_key_len| will be set to // the maximum length. // // WARNING: Setting |out| to NULL only gives the maximum size of the key. The // actual key may be smaller. // // It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_derive(EVP_PKEY_CTX *ctx, uint8_t *key, size_t *out_key_len); // EVP_PKEY_keygen_init initialises an |EVP_PKEY_CTX| for a key generation // operation. It should be called before |EVP_PKEY_keygen|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_keygen_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_keygen performs a key generation operation using the values from // |ctx|. If |*out_pkey| is non-NULL, it overwrites |*out_pkey| with the // resulting key. Otherwise, it sets |*out_pkey| to a newly-allocated |EVP_PKEY| // containing the result. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY **out_pkey); // EVP_PKEY_paramgen_init initialises an |EVP_PKEY_CTX| for a parameter // generation operation. It should be called before |EVP_PKEY_paramgen|. // // It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_paramgen_init(EVP_PKEY_CTX *ctx); // EVP_PKEY_paramgen performs a parameter generation using the values from // |ctx|. If |*out_pkey| is non-NULL, it overwrites |*out_pkey| with the // resulting parameters, but no key. Otherwise, it sets |*out_pkey| to a // newly-allocated |EVP_PKEY| containing the result. It returns one on success // or zero on error. OPENSSL_EXPORT int EVP_PKEY_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY **out_pkey); // Generic control functions. // EVP_PKEY_CTX_set_signature_md sets |md| as the digest to be used in a // signature operation. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // EVP_PKEY_CTX_get_signature_md sets |*out_md| to the digest to be used in a // signature operation. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); // RSA specific control functions. // EVP_PKEY_CTX_set_rsa_padding sets the padding type to use. It should be one // of the |RSA_*_PADDING| values. Returns one on success or zero on error. By // default, the padding is |RSA_PKCS1_PADDING|. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_padding(EVP_PKEY_CTX *ctx, int padding); // EVP_PKEY_CTX_get_rsa_padding sets |*out_padding| to the current padding // value, which is one of the |RSA_*_PADDING| values. Returns one on success or // zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_padding(EVP_PKEY_CTX *ctx, int *out_padding); // EVP_PKEY_CTX_set_rsa_pss_saltlen sets the length of the salt in a PSS-padded // signature. A value of -1 cause the salt to be the same length as the digest // in the signature. A value of -2 causes the salt to be the maximum length // that will fit when signing and recovered from the signature when verifying. // Otherwise the value gives the size of the salt in bytes. // // If unsure, use -1. // // Returns one on success or zero on error. // // TODO(davidben): The default is currently -2. Switch it to -1. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int salt_len); // EVP_PKEY_CTX_get_rsa_pss_saltlen sets |*out_salt_len| to the salt length of // a PSS-padded signature. See the documentation for // |EVP_PKEY_CTX_set_rsa_pss_saltlen| for details of the special values that it // can take. // // Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int *out_salt_len); // EVP_PKEY_CTX_set_rsa_keygen_bits sets the size of the desired RSA modulus, // in bits, for key generation. Returns one on success or zero on // error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_keygen_bits(EVP_PKEY_CTX *ctx, int bits); // EVP_PKEY_CTX_set_rsa_keygen_pubexp sets |e| as the public exponent for key // generation. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_keygen_pubexp(EVP_PKEY_CTX *ctx, BIGNUM *e); // EVP_PKEY_CTX_set_rsa_oaep_md sets |md| as the digest used in OAEP padding. // Returns one on success or zero on error. If unset, the default is SHA-1. // Callers are recommended to overwrite this default. // // TODO(davidben): Remove the default and require callers specify this. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // EVP_PKEY_CTX_get_rsa_oaep_md sets |*out_md| to the digest function used in // OAEP padding. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); // EVP_PKEY_CTX_set_rsa_mgf1_md sets |md| as the digest used in MGF1. Returns // one on success or zero on error. // // If unset, the default is the signing hash for |RSA_PKCS1_PSS_PADDING| and the // OAEP hash for |RSA_PKCS1_OAEP_PADDING|. Callers are recommended to use this // default and not call this function. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // EVP_PKEY_CTX_get_rsa_mgf1_md sets |*out_md| to the digest function used in // MGF1. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); // EVP_PKEY_CTX_set0_rsa_oaep_label sets |label_len| bytes from |label| as the // label used in OAEP. DANGER: On success, this call takes ownership of |label| // and will call |OPENSSL_free| on it when |ctx| is destroyed. // // Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, uint8_t *label, size_t label_len); // EVP_PKEY_CTX_get0_rsa_oaep_label sets |*out_label| to point to the internal // buffer containing the OAEP label (which may be NULL) and returns the length // of the label or a negative value on error. // // WARNING: the return value differs from the usual return value convention. OPENSSL_EXPORT int EVP_PKEY_CTX_get0_rsa_oaep_label(EVP_PKEY_CTX *ctx, const uint8_t **out_label); // EC specific control functions. // EVP_PKEY_CTX_set_ec_paramgen_curve_nid sets the curve used for // |EVP_PKEY_keygen| or |EVP_PKEY_paramgen| operations to |nid|. It returns one // on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid); // Diffie-Hellman-specific control functions. // EVP_PKEY_CTX_set_dh_pad configures configures whether |ctx|, which must be an // |EVP_PKEY_derive| operation, configures the handling of leading zeros in the // Diffie-Hellman shared secret. If |pad| is zero, leading zeros are removed // from the secret. If |pad| is non-zero, the fixed-width shared secret is used // unmodified, as in PKCS #3. If this function is not called, the default is to // remove leading zeros. // // WARNING: The behavior when |pad| is zero leaks information about the shared // secret. This may result in side channel attacks such as // https://raccoon-attack.com/, particularly when the same private key is used // for multiple operations. OPENSSL_EXPORT int EVP_PKEY_CTX_set_dh_pad(EVP_PKEY_CTX *ctx, int pad); // Deprecated functions. // EVP_PKEY_RSA2 was historically an alternate form for RSA public keys (OID // 2.5.8.1.1), but is no longer accepted. #define EVP_PKEY_RSA2 NID_rsa // EVP_PKEY_X448 is defined for OpenSSL compatibility, but we do not support // X448 and attempts to create keys will fail. #define EVP_PKEY_X448 NID_X448 // EVP_PKEY_ED448 is defined for OpenSSL compatibility, but we do not support // Ed448 and attempts to create keys will fail. #define EVP_PKEY_ED448 NID_ED448 // EVP_PKEY_get0 returns NULL. This function is provided for compatibility with // OpenSSL but does not return anything. Use the typed |EVP_PKEY_get0_*| // functions instead. OPENSSL_EXPORT void *EVP_PKEY_get0(const EVP_PKEY *pkey); // OpenSSL_add_all_algorithms does nothing. OPENSSL_EXPORT void OpenSSL_add_all_algorithms(void); // OPENSSL_add_all_algorithms_conf does nothing. OPENSSL_EXPORT void OPENSSL_add_all_algorithms_conf(void); // OpenSSL_add_all_ciphers does nothing. OPENSSL_EXPORT void OpenSSL_add_all_ciphers(void); // OpenSSL_add_all_digests does nothing. OPENSSL_EXPORT void OpenSSL_add_all_digests(void); // EVP_cleanup does nothing. OPENSSL_EXPORT void EVP_cleanup(void); OPENSSL_EXPORT void EVP_CIPHER_do_all_sorted( void (*callback)(const EVP_CIPHER *cipher, const char *name, const char *unused, void *arg), void *arg); OPENSSL_EXPORT void EVP_MD_do_all_sorted(void (*callback)(const EVP_MD *cipher, const char *name, const char *unused, void *arg), void *arg); OPENSSL_EXPORT void EVP_MD_do_all(void (*callback)(const EVP_MD *cipher, const char *name, const char *unused, void *arg), void *arg); // i2d_PrivateKey marshals a private key from |key| to type-specific format, as // described in |i2d_SAMPLE|. // // RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 8017) structure. // EC keys are serialized as a DER-encoded ECPrivateKey (RFC 5915) structure. // // Use |RSA_marshal_private_key| or |EC_KEY_marshal_private_key| instead. OPENSSL_EXPORT int i2d_PrivateKey(const EVP_PKEY *key, uint8_t **outp); // i2d_PublicKey marshals a public key from |key| to a type-specific format, as // described in |i2d_SAMPLE|. // // RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 8017) structure. // EC keys are serialized as an EC point per SEC 1. // // Use |RSA_marshal_public_key| or |EC_POINT_point2cbb| instead. OPENSSL_EXPORT int i2d_PublicKey(const EVP_PKEY *key, uint8_t **outp); // d2i_PrivateKey parses a DER-encoded private key from |len| bytes at |*inp|, // as described in |d2i_SAMPLE|. The private key must have type |type|, // otherwise it will be rejected. // // This function tries to detect one of several formats. Instead, use // |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an // RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. OPENSSL_EXPORT EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, long len); // d2i_AutoPrivateKey acts the same as |d2i_PrivateKey|, but detects the type // of the private key. // // This function tries to detect one of several formats. Instead, use // |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an // RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. OPENSSL_EXPORT EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len); // d2i_PublicKey parses a public key from |len| bytes at |*inp| in a type- // specific format specified by |type|, as described in |d2i_SAMPLE|. // // The only supported value for |type| is |EVP_PKEY_RSA|, which parses a // DER-encoded RSAPublicKey (RFC 8017) structure. Parsing EC keys is not // supported by this function. // // Use |RSA_parse_public_key| instead. OPENSSL_EXPORT EVP_PKEY *d2i_PublicKey(int type, EVP_PKEY **out, const uint8_t **inp, long len); // EVP_PKEY_CTX_set_ec_param_enc returns one if |encoding| is // |OPENSSL_EC_NAMED_CURVE| or zero with an error otherwise. OPENSSL_EXPORT int EVP_PKEY_CTX_set_ec_param_enc(EVP_PKEY_CTX *ctx, int encoding); // EVP_PKEY_set1_tls_encodedpoint replaces |pkey| with a public key encoded by // |in|. It returns one on success and zero on error. // // If |pkey| is an EC key, the format is an X9.62 point and |pkey| must already // have an EC group configured. If it is an X25519 key, it is the 32-byte X25519 // public key representation. This function is not supported for other key types // and will fail. OPENSSL_EXPORT int EVP_PKEY_set1_tls_encodedpoint(EVP_PKEY *pkey, const uint8_t *in, size_t len); // EVP_PKEY_get1_tls_encodedpoint sets |*out_ptr| to a newly-allocated buffer // containing the raw encoded public key for |pkey|. The caller must call // |OPENSSL_free| to release this buffer. The function returns the length of the // buffer on success and zero on error. // // If |pkey| is an EC key, the format is an X9.62 point with uncompressed // coordinates. If it is an X25519 key, it is the 32-byte X25519 public key // representation. This function is not supported for other key types and will // fail. OPENSSL_EXPORT size_t EVP_PKEY_get1_tls_encodedpoint(const EVP_PKEY *pkey, uint8_t **out_ptr); // EVP_PKEY_base_id calls |EVP_PKEY_id|. OPENSSL_EXPORT int EVP_PKEY_base_id(const EVP_PKEY *pkey); // EVP_PKEY_CTX_set_rsa_pss_keygen_md returns 0. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_pss_keygen_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen returns 0. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen(EVP_PKEY_CTX *ctx, int salt_len); // EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md returns 0. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // i2d_PUBKEY marshals |pkey| as a DER-encoded SubjectPublicKeyInfo, as // described in |i2d_SAMPLE|. // // Use |EVP_marshal_public_key| instead. OPENSSL_EXPORT int i2d_PUBKEY(const EVP_PKEY *pkey, uint8_t **outp); // d2i_PUBKEY parses a DER-encoded SubjectPublicKeyInfo from |len| bytes at // |*inp|, as described in |d2i_SAMPLE|. // // Use |EVP_parse_public_key| instead. OPENSSL_EXPORT EVP_PKEY *d2i_PUBKEY(EVP_PKEY **out, const uint8_t **inp, long len); // i2d_RSA_PUBKEY marshals |rsa| as a DER-encoded SubjectPublicKeyInfo // structure, as described in |i2d_SAMPLE|. // // Use |EVP_marshal_public_key| instead. OPENSSL_EXPORT int i2d_RSA_PUBKEY(const RSA *rsa, uint8_t **outp); // d2i_RSA_PUBKEY parses an RSA public key as a DER-encoded SubjectPublicKeyInfo // from |len| bytes at |*inp|, as described in |d2i_SAMPLE|. // SubjectPublicKeyInfo structures containing other key types are rejected. // // Use |EVP_parse_public_key| instead. OPENSSL_EXPORT RSA *d2i_RSA_PUBKEY(RSA **out, const uint8_t **inp, long len); // i2d_DSA_PUBKEY marshals |dsa| as a DER-encoded SubjectPublicKeyInfo, as // described in |i2d_SAMPLE|. // // Use |EVP_marshal_public_key| instead. OPENSSL_EXPORT int i2d_DSA_PUBKEY(const DSA *dsa, uint8_t **outp); // d2i_DSA_PUBKEY parses a DSA public key as a DER-encoded SubjectPublicKeyInfo // from |len| bytes at |*inp|, as described in |d2i_SAMPLE|. // SubjectPublicKeyInfo structures containing other key types are rejected. // // Use |EVP_parse_public_key| instead. OPENSSL_EXPORT DSA *d2i_DSA_PUBKEY(DSA **out, const uint8_t **inp, long len); // i2d_EC_PUBKEY marshals |ec_key| as a DER-encoded SubjectPublicKeyInfo, as // described in |i2d_SAMPLE|. // // Use |EVP_marshal_public_key| instead. OPENSSL_EXPORT int i2d_EC_PUBKEY(const EC_KEY *ec_key, uint8_t **outp); // d2i_EC_PUBKEY parses an EC public key as a DER-encoded SubjectPublicKeyInfo // from |len| bytes at |*inp|, as described in |d2i_SAMPLE|. // SubjectPublicKeyInfo structures containing other key types are rejected. // // Use |EVP_parse_public_key| instead. OPENSSL_EXPORT EC_KEY *d2i_EC_PUBKEY(EC_KEY **out, const uint8_t **inp, long len); // EVP_PKEY_CTX_set_dsa_paramgen_bits returns zero. OPENSSL_EXPORT int EVP_PKEY_CTX_set_dsa_paramgen_bits(EVP_PKEY_CTX *ctx, int nbits); // EVP_PKEY_CTX_set_dsa_paramgen_q_bits returns zero. OPENSSL_EXPORT int EVP_PKEY_CTX_set_dsa_paramgen_q_bits(EVP_PKEY_CTX *ctx, int qbits); // EVP_PKEY_assign sets the underlying key of |pkey| to |key|, which must be of // the given type. If successful, it returns one. If the |type| argument // is not one of |EVP_PKEY_RSA|, |EVP_PKEY_DSA|, or |EVP_PKEY_EC| values or if // |key| is NULL, it returns zero. This function may not be used with other // |EVP_PKEY_*| types. // // Use the |EVP_PKEY_assign_*| functions instead. OPENSSL_EXPORT int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key); // EVP_PKEY_type returns |nid|. OPENSSL_EXPORT int EVP_PKEY_type(int nid); // Preprocessor compatibility section (hidden). // // Historically, a number of APIs were implemented in OpenSSL as macros and // constants to 'ctrl' functions. To avoid breaking #ifdefs in consumers, this // section defines a number of legacy macros. // |BORINGSSL_PREFIX| already makes each of these symbols into macros, so there // is no need to define conflicting macros. #if !defined(BORINGSSL_PREFIX) #define EVP_PKEY_CTX_set_rsa_oaep_md EVP_PKEY_CTX_set_rsa_oaep_md #define EVP_PKEY_CTX_set0_rsa_oaep_label EVP_PKEY_CTX_set0_rsa_oaep_label #endif // Nodejs compatibility section (hidden). // // These defines exist for node.js, with the hope that we can eliminate the // need for them over time. #define EVPerr(function, reason) \ ERR_put_error(ERR_LIB_EVP, 0, reason, __FILE__, __LINE__) #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(EVP_PKEY, EVP_PKEY_free) BORINGSSL_MAKE_UP_REF(EVP_PKEY, EVP_PKEY_up_ref) BORINGSSL_MAKE_DELETER(EVP_PKEY_CTX, EVP_PKEY_CTX_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_EVP_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_evp_errors.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EVP_ERRORS_H #define OPENSSL_HEADER_EVP_ERRORS_H #define EVP_R_BUFFER_TOO_SMALL 100 #define EVP_R_COMMAND_NOT_SUPPORTED 101 #define EVP_R_DECODE_ERROR 102 #define EVP_R_DIFFERENT_KEY_TYPES 103 #define EVP_R_DIFFERENT_PARAMETERS 104 #define EVP_R_ENCODE_ERROR 105 #define EVP_R_EXPECTING_AN_EC_KEY_KEY 106 #define EVP_R_EXPECTING_AN_RSA_KEY 107 #define EVP_R_EXPECTING_A_DSA_KEY 108 #define EVP_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE 109 #define EVP_R_INVALID_DIGEST_LENGTH 110 #define EVP_R_INVALID_DIGEST_TYPE 111 #define EVP_R_INVALID_KEYBITS 112 #define EVP_R_INVALID_MGF1_MD 113 #define EVP_R_INVALID_OPERATION 114 #define EVP_R_INVALID_PADDING_MODE 115 #define EVP_R_INVALID_PSS_SALTLEN 116 #define EVP_R_KEYS_NOT_SET 117 #define EVP_R_MISSING_PARAMETERS 118 #define EVP_R_NO_DEFAULT_DIGEST 119 #define EVP_R_NO_KEY_SET 120 #define EVP_R_NO_MDC2_SUPPORT 121 #define EVP_R_NO_NID_FOR_CURVE 122 #define EVP_R_NO_OPERATION_SET 123 #define EVP_R_NO_PARAMETERS_SET 124 #define EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE 125 #define EVP_R_OPERATON_NOT_INITIALIZED 126 #define EVP_R_UNKNOWN_PUBLIC_KEY_TYPE 127 #define EVP_R_UNSUPPORTED_ALGORITHM 128 #define EVP_R_UNSUPPORTED_PUBLIC_KEY_TYPE 129 #define EVP_R_NOT_A_PRIVATE_KEY 130 #define EVP_R_INVALID_SIGNATURE 131 #define EVP_R_MEMORY_LIMIT_EXCEEDED 132 #define EVP_R_INVALID_PARAMETERS 133 #define EVP_R_INVALID_PEER_KEY 134 #define EVP_R_NOT_XOF_OR_INVALID_LENGTH 135 #define EVP_R_EMPTY_PSK 136 #define EVP_R_INVALID_BUFFER_SIZE 137 #define EVP_R_EXPECTING_A_DH_KEY 138 #endif // OPENSSL_HEADER_EVP_ERRORS_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ex_data.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_EX_DATA_H #define OPENSSL_HEADER_EX_DATA_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_stack.h" #if defined(__cplusplus) extern "C" { #endif // ex_data is a mechanism for associating arbitrary extra data with objects. // For each type of object that supports ex_data, different users can be // assigned indexes in which to store their data. Each index has callback // functions that are called when an object of that type is freed or // duplicated. typedef struct crypto_ex_data_st CRYPTO_EX_DATA; // Type-specific functions. #if 0 // Sample // Each type that supports ex_data provides three functions: // TYPE_get_ex_new_index allocates a new index for |TYPE|. An optional // |free_func| argument may be provided which is called when the owning object // is destroyed. See |CRYPTO_EX_free| for details. The |argl| and |argp| // arguments are opaque values that are passed to the callback. It returns the // new index or a negative number on error. OPENSSL_EXPORT int TYPE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); // TYPE_set_ex_data sets an extra data pointer on |t|. The |index| argument // must have been returned from a previous call to |TYPE_get_ex_new_index|. OPENSSL_EXPORT int TYPE_set_ex_data(TYPE *t, int index, void *arg); // TYPE_get_ex_data returns an extra data pointer for |t|, or NULL if no such // pointer exists. The |index| argument should have been returned from a // previous call to |TYPE_get_ex_new_index|. OPENSSL_EXPORT void *TYPE_get_ex_data(const TYPE *t, int index); // Some types additionally preallocate index zero, with all callbacks set to // NULL. Applications that do not need the general ex_data machinery may use // this instead. // TYPE_set_app_data sets |t|'s application data pointer to |arg|. It returns // one on success and zero on error. OPENSSL_EXPORT int TYPE_set_app_data(TYPE *t, void *arg); // TYPE_get_app_data returns the application data pointer for |t|, or NULL if no // such pointer exists. OPENSSL_EXPORT void *TYPE_get_app_data(const TYPE *t); #endif // Sample // Callback types. // CRYPTO_EX_free is a callback function that is called when an object of the // class with extra data pointers is being destroyed. For example, if this // callback has been passed to |SSL_get_ex_new_index| then it may be called each // time an |SSL*| is destroyed. // // The callback is passed the to-be-destroyed object (i.e. the |SSL*|) in // |parent|. As |parent| will shortly be destroyed, callers must not perform // operations that would increment its reference count, pass ownership, or // assume the object outlives the function call. The arguments |argl| and |argp| // contain opaque values that were given to |CRYPTO_get_ex_new_index_ex|. // // This callback may be called with a NULL value for |ptr| if |parent| has no // value set for this index. However, the callbacks may also be skipped entirely // if no extra data pointers are set on |parent| at all. typedef void CRYPTO_EX_free(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int index, long argl, void *argp); // Deprecated functions. // CRYPTO_cleanup_all_ex_data does nothing. OPENSSL_EXPORT void CRYPTO_cleanup_all_ex_data(void); // CRYPTO_EX_dup is a legacy callback function type which is ignored. typedef int CRYPTO_EX_dup(CRYPTO_EX_DATA *to, const CRYPTO_EX_DATA *from, void **from_d, int index, long argl, void *argp); // Private structures. // CRYPTO_EX_unused is a placeholder for an unused callback. It is aliased to // int to ensure non-NULL callers fail to compile rather than fail silently. typedef int CRYPTO_EX_unused; struct crypto_ex_data_st { STACK_OF(void) *sk; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_EX_DATA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_hkdf.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_HKDF_H #define OPENSSL_HEADER_HKDF_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // HKDF. // HKDF computes HKDF (as specified by RFC 5869) of initial keying material // |secret| with |salt| and |info| using |digest|, and outputs |out_len| bytes // to |out_key|. It returns one on success and zero on error. // // HKDF is an Extract-and-Expand algorithm. It does not do any key stretching, // and as such, is not suited to be used alone to generate a key from a // password. OPENSSL_EXPORT int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len, const uint8_t *info, size_t info_len); // HKDF_extract computes a HKDF PRK (as specified by RFC 5869) from initial // keying material |secret| and salt |salt| using |digest|, and outputs // |out_len| bytes to |out_key|. The maximum output size is |EVP_MAX_MD_SIZE|. // It returns one on success and zero on error. // // WARNING: This function orders the inputs differently from RFC 5869 // specification. Double-check which parameter is the secret/IKM and which is // the salt when using. OPENSSL_EXPORT int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len); // HKDF_expand computes a HKDF OKM (as specified by RFC 5869) of length // |out_len| from the PRK |prk| and info |info| using |digest|, and outputs // the result to |out_key|. It returns one on success and zero on error. OPENSSL_EXPORT int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *prk, size_t prk_len, const uint8_t *info, size_t info_len); #if defined(__cplusplus) } // extern C #endif #define HKDF_R_OUTPUT_TOO_LARGE 100 #endif // OPENSSL_HEADER_HKDF_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_hmac.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_HMAC_H #define OPENSSL_HEADER_HMAC_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_digest.h" #if defined(__cplusplus) extern "C" { #endif // HMAC contains functions for constructing PRFs from Merkle–Damgård hash // functions using HMAC. // One-shot operation. // HMAC calculates the HMAC of |data_len| bytes of |data|, using the given key // and hash function, and writes the result to |out|. On entry, |out| must // contain at least |EVP_MD_size| bytes of space. The actual length of the // result is written to |*out_len|. An output size of |EVP_MAX_MD_SIZE| will // always be large enough. It returns |out| or NULL on error. OPENSSL_EXPORT uint8_t *HMAC(const EVP_MD *evp_md, const void *key, size_t key_len, const uint8_t *data, size_t data_len, uint8_t *out, unsigned int *out_len); // Incremental operation. // HMAC_CTX_init initialises |ctx| for use in an HMAC operation. It's assumed // that HMAC_CTX objects will be allocated on the stack thus no allocation // function is provided. OPENSSL_EXPORT void HMAC_CTX_init(HMAC_CTX *ctx); // HMAC_CTX_new allocates and initialises a new |HMAC_CTX| and returns it, or // NULL on allocation failure. The caller must use |HMAC_CTX_free| to release // the resulting object. OPENSSL_EXPORT HMAC_CTX *HMAC_CTX_new(void); // HMAC_CTX_cleanup frees data owned by |ctx|. It does not free |ctx| itself. OPENSSL_EXPORT void HMAC_CTX_cleanup(HMAC_CTX *ctx); // HMAC_CTX_cleanse zeros the digest state from |ctx| and then performs the // actions of |HMAC_CTX_cleanup|. OPENSSL_EXPORT void HMAC_CTX_cleanse(HMAC_CTX *ctx); // HMAC_CTX_free calls |HMAC_CTX_cleanup| and then frees |ctx| itself. OPENSSL_EXPORT void HMAC_CTX_free(HMAC_CTX *ctx); // HMAC_Init_ex sets up an initialised |HMAC_CTX| to use |md| as the hash // function and |key| as the key. For a non-initial call, |md| may be NULL, in // which case the previous hash function will be used. If the hash function has // not changed and |key| is NULL, |ctx| reuses the previous key. It returns one // on success or zero on allocation failure. // // WARNING: NULL and empty keys are ambiguous on non-initial calls. Passing NULL // |key| but repeating the previous |md| reuses the previous key rather than the // empty key. OPENSSL_EXPORT int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl); // HMAC_Update hashes |data_len| bytes from |data| into the current HMAC // operation in |ctx|. It returns one. OPENSSL_EXPORT int HMAC_Update(HMAC_CTX *ctx, const uint8_t *data, size_t data_len); // HMAC_Final completes the HMAC operation in |ctx| and writes the result to // |out| and the sets |*out_len| to the length of the result. On entry, |out| // must contain at least |HMAC_size| bytes of space. An output size of // |EVP_MAX_MD_SIZE| will always be large enough. It returns one on success or // zero on allocation failure. OPENSSL_EXPORT int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len); // Utility functions. // HMAC_size returns the size, in bytes, of the HMAC that will be produced by // |ctx|. On entry, |ctx| must have been setup with |HMAC_Init_ex|. OPENSSL_EXPORT size_t HMAC_size(const HMAC_CTX *ctx); // HMAC_CTX_get_md returns |ctx|'s hash function. OPENSSL_EXPORT const EVP_MD *HMAC_CTX_get_md(const HMAC_CTX *ctx); // HMAC_CTX_copy_ex sets |dest| equal to |src|. On entry, |dest| must have been // initialised by calling |HMAC_CTX_init|. It returns one on success and zero // on error. OPENSSL_EXPORT int HMAC_CTX_copy_ex(HMAC_CTX *dest, const HMAC_CTX *src); // HMAC_CTX_reset calls |HMAC_CTX_cleanup| followed by |HMAC_CTX_init|. OPENSSL_EXPORT void HMAC_CTX_reset(HMAC_CTX *ctx); // Deprecated functions. OPENSSL_EXPORT int HMAC_Init(HMAC_CTX *ctx, const void *key, int key_len, const EVP_MD *md); // HMAC_CTX_copy calls |HMAC_CTX_init| on |dest| and then sets it equal to // |src|. On entry, |dest| must /not/ be initialised for an operation with // |HMAC_Init_ex|. It returns one on success and zero on error. OPENSSL_EXPORT int HMAC_CTX_copy(HMAC_CTX *dest, const HMAC_CTX *src); // Private functions struct hmac_ctx_st { const EVP_MD *md; EVP_MD_CTX md_ctx; EVP_MD_CTX i_ctx; EVP_MD_CTX o_ctx; } /* HMAC_CTX */; #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(HMAC_CTX, HMAC_CTX_free) using ScopedHMAC_CTX = internal::StackAllocated; BSSL_NAMESPACE_END } // extern C++ #endif #endif #endif // OPENSSL_HEADER_HMAC_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_hpke.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H #include "CNIOBoringSSL_aead.h" #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_curve25519.h" #include "CNIOBoringSSL_digest.h" #if defined(__cplusplus) extern "C" { #endif // Hybrid Public Key Encryption. // // Hybrid Public Key Encryption (HPKE) enables a sender to encrypt messages to a // receiver with a public key. // // See RFC 9180. // Parameters. // // An HPKE context is parameterized by KEM, KDF, and AEAD algorithms, // represented by |EVP_HPKE_KEM|, |EVP_HPKE_KDF|, and |EVP_HPKE_AEAD| types, // respectively. // The following constants are KEM identifiers. #define EVP_HPKE_DHKEM_P256_HKDF_SHA256 0x0010 #define EVP_HPKE_DHKEM_X25519_HKDF_SHA256 0x0020 // The following functions are KEM algorithms which may be used with HPKE. Note // that, while some HPKE KEMs use KDFs internally, this is separate from the // |EVP_HPKE_KDF| selection. OPENSSL_EXPORT const EVP_HPKE_KEM *EVP_hpke_x25519_hkdf_sha256(void); OPENSSL_EXPORT const EVP_HPKE_KEM *EVP_hpke_p256_hkdf_sha256(void); // EVP_HPKE_KEM_id returns the HPKE KEM identifier for |kem|, which // will be one of the |EVP_HPKE_KEM_*| constants. OPENSSL_EXPORT uint16_t EVP_HPKE_KEM_id(const EVP_HPKE_KEM *kem); // EVP_HPKE_MAX_PUBLIC_KEY_LENGTH is the maximum length of an encoded public key // for all KEMs currently supported by this library. #define EVP_HPKE_MAX_PUBLIC_KEY_LENGTH 65 // EVP_HPKE_KEM_public_key_len returns the length of a public key for |kem|. // This value will be at most |EVP_HPKE_MAX_PUBLIC_KEY_LENGTH|. OPENSSL_EXPORT size_t EVP_HPKE_KEM_public_key_len(const EVP_HPKE_KEM *kem); // EVP_HPKE_MAX_PRIVATE_KEY_LENGTH is the maximum length of an encoded private // key for all KEMs currently supported by this library. #define EVP_HPKE_MAX_PRIVATE_KEY_LENGTH 32 // EVP_HPKE_KEM_private_key_len returns the length of a private key for |kem|. // This value will be at most |EVP_HPKE_MAX_PRIVATE_KEY_LENGTH|. OPENSSL_EXPORT size_t EVP_HPKE_KEM_private_key_len(const EVP_HPKE_KEM *kem); // EVP_HPKE_MAX_ENC_LENGTH is the maximum length of "enc", the encapsulated // shared secret, for all KEMs currently supported by this library. #define EVP_HPKE_MAX_ENC_LENGTH 65 // EVP_HPKE_KEM_enc_len returns the length of the "enc", the encapsulated shared // secret, for |kem|. This value will be at most |EVP_HPKE_MAX_ENC_LENGTH|. OPENSSL_EXPORT size_t EVP_HPKE_KEM_enc_len(const EVP_HPKE_KEM *kem); // The following constants are KDF identifiers. #define EVP_HPKE_HKDF_SHA256 0x0001 // The following functions are KDF algorithms which may be used with HPKE. OPENSSL_EXPORT const EVP_HPKE_KDF *EVP_hpke_hkdf_sha256(void); // EVP_HPKE_KDF_id returns the HPKE KDF identifier for |kdf|. OPENSSL_EXPORT uint16_t EVP_HPKE_KDF_id(const EVP_HPKE_KDF *kdf); // EVP_HPKE_KDF_hkdf_md returns the HKDF hash function corresponding to |kdf|, // or NULL if |kdf| is not an HKDF-based KDF. All currently supported KDFs are // HKDF-based. OPENSSL_EXPORT const EVP_MD *EVP_HPKE_KDF_hkdf_md(const EVP_HPKE_KDF *kdf); // The following constants are AEAD identifiers. #define EVP_HPKE_AES_128_GCM 0x0001 #define EVP_HPKE_AES_256_GCM 0x0002 #define EVP_HPKE_CHACHA20_POLY1305 0x0003 // The following functions are AEAD algorithms which may be used with HPKE. OPENSSL_EXPORT const EVP_HPKE_AEAD *EVP_hpke_aes_128_gcm(void); OPENSSL_EXPORT const EVP_HPKE_AEAD *EVP_hpke_aes_256_gcm(void); OPENSSL_EXPORT const EVP_HPKE_AEAD *EVP_hpke_chacha20_poly1305(void); // EVP_HPKE_AEAD_id returns the HPKE AEAD identifier for |aead|. OPENSSL_EXPORT uint16_t EVP_HPKE_AEAD_id(const EVP_HPKE_AEAD *aead); // EVP_HPKE_AEAD_aead returns the |EVP_AEAD| corresponding to |aead|. OPENSSL_EXPORT const EVP_AEAD *EVP_HPKE_AEAD_aead(const EVP_HPKE_AEAD *aead); // Recipient keys. // // An HPKE recipient maintains a long-term KEM key. This library represents keys // with the |EVP_HPKE_KEY| type. // EVP_HPKE_KEY_zero sets an uninitialized |EVP_HPKE_KEY| to the zero state. The // caller should then use |EVP_HPKE_KEY_init|, |EVP_HPKE_KEY_copy|, or // |EVP_HPKE_KEY_generate| to finish initializing |key|. // // It is safe, but not necessary to call |EVP_HPKE_KEY_cleanup| in this state. // This may be used for more uniform cleanup of |EVP_HPKE_KEY|. OPENSSL_EXPORT void EVP_HPKE_KEY_zero(EVP_HPKE_KEY *key); // EVP_HPKE_KEY_cleanup releases memory referenced by |key|. OPENSSL_EXPORT void EVP_HPKE_KEY_cleanup(EVP_HPKE_KEY *key); // EVP_HPKE_KEY_new returns a newly-allocated |EVP_HPKE_KEY|, or NULL on error. // The caller must call |EVP_HPKE_KEY_free| on the result to release it. // // This is a convenience function for callers that need a heap-allocated // |EVP_HPKE_KEY|. OPENSSL_EXPORT EVP_HPKE_KEY *EVP_HPKE_KEY_new(void); // EVP_HPKE_KEY_free releases memory associated with |key|, which must have been // created with |EVP_HPKE_KEY_new|. OPENSSL_EXPORT void EVP_HPKE_KEY_free(EVP_HPKE_KEY *key); // EVP_HPKE_KEY_copy sets |dst| to a copy of |src|. It returns one on success // and zero on error. On success, the caller must call |EVP_HPKE_KEY_cleanup| to // release |dst|. On failure, calling |EVP_HPKE_KEY_cleanup| is safe, but not // necessary. OPENSSL_EXPORT int EVP_HPKE_KEY_copy(EVP_HPKE_KEY *dst, const EVP_HPKE_KEY *src); // EVP_HPKE_KEY_move sets |out|, which must be initialized or in the zero state, // to the key in |in|. |in| is mutated and left in the zero state. OPENSSL_EXPORT void EVP_HPKE_KEY_move(EVP_HPKE_KEY *out, EVP_HPKE_KEY *in); // EVP_HPKE_KEY_init decodes |priv_key| as a private key for |kem| and // initializes |key| with the result. It returns one on success and zero if // |priv_key| was invalid. On success, the caller must call // |EVP_HPKE_KEY_cleanup| to release the key. On failure, calling // |EVP_HPKE_KEY_cleanup| is safe, but not necessary. OPENSSL_EXPORT int EVP_HPKE_KEY_init(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem, const uint8_t *priv_key, size_t priv_key_len); // EVP_HPKE_KEY_generate sets |key| to a newly-generated key using |kem|. OPENSSL_EXPORT int EVP_HPKE_KEY_generate(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem); // EVP_HPKE_KEY_kem returns the HPKE KEM used by |key|. OPENSSL_EXPORT const EVP_HPKE_KEM *EVP_HPKE_KEY_kem(const EVP_HPKE_KEY *key); // EVP_HPKE_KEY_public_key writes |key|'s public key to |out| and sets // |*out_len| to the number of bytes written. On success, it returns one and // writes at most |max_out| bytes. If |max_out| is too small, it returns zero. // Setting |max_out| to |EVP_HPKE_MAX_PUBLIC_KEY_LENGTH| will ensure the public // key fits. An exact size can also be determined by // |EVP_HPKE_KEM_public_key_len|. OPENSSL_EXPORT int EVP_HPKE_KEY_public_key(const EVP_HPKE_KEY *key, uint8_t *out, size_t *out_len, size_t max_out); // EVP_HPKE_KEY_private_key writes |key|'s private key to |out| and sets // |*out_len| to the number of bytes written. On success, it returns one and // writes at most |max_out| bytes. If |max_out| is too small, it returns zero. // Setting |max_out| to |EVP_HPKE_MAX_PRIVATE_KEY_LENGTH| will ensure the // private key fits. An exact size can also be determined by // |EVP_HPKE_KEM_private_key_len|. OPENSSL_EXPORT int EVP_HPKE_KEY_private_key(const EVP_HPKE_KEY *key, uint8_t *out, size_t *out_len, size_t max_out); // Encryption contexts. // // An HPKE encryption context is represented by the |EVP_HPKE_CTX| type. // EVP_HPKE_CTX_zero sets an uninitialized |EVP_HPKE_CTX| to the zero state. The // caller should then use one of the |EVP_HPKE_CTX_setup_*| functions to finish // setting up |ctx|. // // It is safe, but not necessary to call |EVP_HPKE_CTX_cleanup| in this state. // This may be used for more uniform cleanup of |EVP_HPKE_CTX|. OPENSSL_EXPORT void EVP_HPKE_CTX_zero(EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_cleanup releases memory referenced by |ctx|. |ctx| must have // been initialized with |EVP_HPKE_CTX_zero| or one of the // |EVP_HPKE_CTX_setup_*| functions. OPENSSL_EXPORT void EVP_HPKE_CTX_cleanup(EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_new returns a newly-allocated |EVP_HPKE_CTX|, or NULL on error. // The caller must call |EVP_HPKE_CTX_free| on the result to release it. // // This is a convenience function for callers that need a heap-allocated // |EVP_HPKE_CTX|. OPENSSL_EXPORT EVP_HPKE_CTX *EVP_HPKE_CTX_new(void); // EVP_HPKE_CTX_free releases memory associated with |ctx|, which must have been // created with |EVP_HPKE_CTX_new|. OPENSSL_EXPORT void EVP_HPKE_CTX_free(EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_setup_sender implements the SetupBaseS HPKE operation. It // encapsulates a shared secret for |peer_public_key| and sets up |ctx| as a // sender context. It writes the encapsulated shared secret to |out_enc| and // sets |*out_enc_len| to the number of bytes written. It writes at most // |max_enc| bytes and fails if the buffer is too small. Setting |max_enc| to at // least |EVP_HPKE_MAX_ENC_LENGTH| will ensure the buffer is large enough. An // exact size may also be determined by |EVP_PKEY_KEM_enc_len|. // // This function returns one on success and zero on error. Note that // |peer_public_key| may be invalid, in which case this function will return an // error. // // On success, callers may call |EVP_HPKE_CTX_seal| to encrypt messages for the // recipient. Callers must then call |EVP_HPKE_CTX_cleanup| when done. On // failure, calling |EVP_HPKE_CTX_cleanup| is safe, but not required. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_sender( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len); // EVP_HPKE_CTX_setup_sender_with_seed_for_testing behaves like // |EVP_HPKE_CTX_setup_sender|, but takes a seed to behave deterministically. // The seed's format depends on |kem|. For X25519, it is the sender's // ephemeral private key. For P256, it's an HKDF input. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_sender_with_seed_for_testing( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len, const uint8_t *seed, size_t seed_len); // EVP_HPKE_CTX_setup_recipient implements the SetupBaseR HPKE operation. It // decapsulates the shared secret in |enc| with |key| and sets up |ctx| as a // recipient context. It returns one on success and zero on failure. Note that // |enc| may be invalid, in which case this function will return an error. // // On success, callers may call |EVP_HPKE_CTX_open| to decrypt messages from the // sender. Callers must then call |EVP_HPKE_CTX_cleanup| when done. On failure, // calling |EVP_HPKE_CTX_cleanup| is safe, but not required. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_recipient( EVP_HPKE_CTX *ctx, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *enc, size_t enc_len, const uint8_t *info, size_t info_len); // EVP_HPKE_CTX_setup_auth_sender implements the SetupAuthS HPKE operation. It // behaves like |EVP_HPKE_CTX_setup_sender| but authenticates the resulting // context with |key|. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_auth_sender( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len); // EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing behaves like // |EVP_HPKE_CTX_setup_auth_sender|, but takes a seed to behave // deterministically. The seed's format depends on |kem|. For X25519, it is the // sender's ephemeral private key. For P256, it's an HKDF input. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing( EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *peer_public_key, size_t peer_public_key_len, const uint8_t *info, size_t info_len, const uint8_t *seed, size_t seed_len); // EVP_HPKE_CTX_setup_auth_recipient implements the SetupAuthR HPKE operation. // It behaves like |EVP_HPKE_CTX_setup_recipient| but checks the resulting // context was authenticated with |peer_public_key|. OPENSSL_EXPORT int EVP_HPKE_CTX_setup_auth_recipient( EVP_HPKE_CTX *ctx, const EVP_HPKE_KEY *key, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead, const uint8_t *enc, size_t enc_len, const uint8_t *info, size_t info_len, const uint8_t *peer_public_key, size_t peer_public_key_len); // Using an HPKE context. // // Once set up, callers may encrypt or decrypt with an |EVP_HPKE_CTX| using the // following functions. // EVP_HPKE_CTX_open uses the HPKE context |ctx| to authenticate |in_len| bytes // from |in| and |ad_len| bytes from |ad| and to decrypt at most |in_len| bytes // into |out|. It returns one on success, and zero otherwise. // // This operation will fail if the |ctx| context is not set up as a receiver. // // Note that HPKE encryption is stateful and ordered. The sender's first call to // |EVP_HPKE_CTX_seal| must correspond to the recipient's first call to // |EVP_HPKE_CTX_open|, etc. // // At most |in_len| bytes are written to |out|. In order to ensure success, // |max_out_len| should be at least |in_len|. On successful return, |*out_len| // is set to the actual number of bytes written. OPENSSL_EXPORT int EVP_HPKE_CTX_open(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); // EVP_HPKE_CTX_seal uses the HPKE context |ctx| to encrypt and authenticate // |in_len| bytes of ciphertext |in| and authenticate |ad_len| bytes from |ad|, // writing the result to |out|. It returns one on success and zero otherwise. // // This operation will fail if the |ctx| context is not set up as a sender. // // Note that HPKE encryption is stateful and ordered. The sender's first call to // |EVP_HPKE_CTX_seal| must correspond to the recipient's first call to // |EVP_HPKE_CTX_open|, etc. // // At most, |max_out_len| encrypted bytes are written to |out|. On successful // return, |*out_len| is set to the actual number of bytes written. // // To ensure success, |max_out_len| should be |in_len| plus the result of // |EVP_HPKE_CTX_max_overhead| or |EVP_HPKE_MAX_OVERHEAD|. OPENSSL_EXPORT int EVP_HPKE_CTX_seal(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); // EVP_HPKE_CTX_export uses the HPKE context |ctx| to export a secret of // |secret_len| bytes into |out|. This function uses |context_len| bytes from // |context| as a context string for the secret. This is necessary to separate // different uses of exported secrets and bind relevant caller-specific context // into the output. It returns one on success and zero otherwise. OPENSSL_EXPORT int EVP_HPKE_CTX_export(const EVP_HPKE_CTX *ctx, uint8_t *out, size_t secret_len, const uint8_t *context, size_t context_len); // EVP_HPKE_MAX_OVERHEAD contains the largest value that // |EVP_HPKE_CTX_max_overhead| would ever return for any context. #define EVP_HPKE_MAX_OVERHEAD EVP_AEAD_MAX_OVERHEAD // EVP_HPKE_CTX_max_overhead returns the maximum number of additional bytes // added by sealing data with |EVP_HPKE_CTX_seal|. The |ctx| context must be set // up as a sender. OPENSSL_EXPORT size_t EVP_HPKE_CTX_max_overhead(const EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_kem returns |ctx|'s configured KEM, or NULL if the context has // not been set up. OPENSSL_EXPORT const EVP_HPKE_KEM *EVP_HPKE_CTX_kem(const EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_aead returns |ctx|'s configured AEAD, or NULL if the context has // not been set up. OPENSSL_EXPORT const EVP_HPKE_AEAD *EVP_HPKE_CTX_aead(const EVP_HPKE_CTX *ctx); // EVP_HPKE_CTX_kdf returns |ctx|'s configured KDF, or NULL if the context has // not been set up. OPENSSL_EXPORT const EVP_HPKE_KDF *EVP_HPKE_CTX_kdf(const EVP_HPKE_CTX *ctx); // Private structures. // // The following structures are exported so their types are stack-allocatable, // but accessing or modifying their fields is forbidden. struct evp_hpke_ctx_st { const EVP_HPKE_KEM *kem; const EVP_HPKE_AEAD *aead; const EVP_HPKE_KDF *kdf; EVP_AEAD_CTX aead_ctx; uint8_t base_nonce[EVP_AEAD_MAX_NONCE_LENGTH]; uint8_t exporter_secret[EVP_MAX_MD_SIZE]; uint64_t seq; int is_sender; }; struct evp_hpke_key_st { const EVP_HPKE_KEM *kem; uint8_t private_key[EVP_HPKE_MAX_PRIVATE_KEY_LENGTH]; uint8_t public_key[EVP_HPKE_MAX_PUBLIC_KEY_LENGTH]; }; #if defined(__cplusplus) } // extern C #endif #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN using ScopedEVP_HPKE_CTX = internal::StackAllocated; using ScopedEVP_HPKE_KEY = internal::StackAllocatedMovable; BORINGSSL_MAKE_DELETER(EVP_HPKE_CTX, EVP_HPKE_CTX_free) BORINGSSL_MAKE_DELETER(EVP_HPKE_KEY, EVP_HPKE_KEY_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_hrss.h ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_HRSS_H #define OPENSSL_HEADER_HRSS_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // HRSS // // HRSS is a structured-lattice-based post-quantum key encapsulation mechanism. // The best exposition is https://eprint.iacr.org/2017/667.pdf although this // implementation uses a different KEM construction based on // https://eprint.iacr.org/2017/1005.pdf. struct HRSS_private_key { uint8_t opaque[1808]; }; struct HRSS_public_key { uint8_t opaque[1424]; }; // HRSS_SAMPLE_BYTES is the number of bytes of entropy needed to generate a // short vector. There are 701 coefficients, but the final one is always set to // zero when sampling. Otherwise, we need one byte of input per coefficient. #define HRSS_SAMPLE_BYTES (701 - 1) // HRSS_GENERATE_KEY_BYTES is the number of bytes of entropy needed to generate // an HRSS key pair. #define HRSS_GENERATE_KEY_BYTES (HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES + 32) // HRSS_ENCAP_BYTES is the number of bytes of entropy needed to encapsulate a // session key. #define HRSS_ENCAP_BYTES (HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES) // HRSS_PUBLIC_KEY_BYTES is the number of bytes in a public key. #define HRSS_PUBLIC_KEY_BYTES 1138 // HRSS_CIPHERTEXT_BYTES is the number of bytes in a ciphertext. #define HRSS_CIPHERTEXT_BYTES 1138 // HRSS_KEY_BYTES is the number of bytes in a shared key. #define HRSS_KEY_BYTES 32 // HRSS_POLY3_BYTES is the number of bytes needed to serialise a mod 3 // polynomial. #define HRSS_POLY3_BYTES 140 #define HRSS_PRIVATE_KEY_BYTES \ (HRSS_POLY3_BYTES * 2 + HRSS_PUBLIC_KEY_BYTES + 2 + 32) // HRSS_generate_key is a deterministic function that outputs a public and // private key based on the given entropy. It returns one on success or zero // on malloc failure. OPENSSL_EXPORT int HRSS_generate_key( struct HRSS_public_key *out_pub, struct HRSS_private_key *out_priv, const uint8_t input[HRSS_GENERATE_KEY_BYTES]); // HRSS_encap is a deterministic function the generates and encrypts a random // session key from the given entropy, writing those values to |out_shared_key| // and |out_ciphertext|, respectively. It returns one on success or zero on // malloc failure. OPENSSL_EXPORT int HRSS_encap(uint8_t out_ciphertext[HRSS_CIPHERTEXT_BYTES], uint8_t out_shared_key[HRSS_KEY_BYTES], const struct HRSS_public_key *in_pub, const uint8_t in[HRSS_ENCAP_BYTES]); // HRSS_decap decrypts a session key from |ciphertext_len| bytes of // |ciphertext|. If the ciphertext is valid, the decrypted key is written to // |out_shared_key|. Otherwise the HMAC of |ciphertext| under a secret key (kept // in |in_priv|) is written. If the ciphertext is the wrong length then it will // leak which was done via side-channels. Otherwise it should perform either // action in constant-time. It returns one on success (whether the ciphertext // was valid or not) and zero on malloc failure. OPENSSL_EXPORT int HRSS_decap(uint8_t out_shared_key[HRSS_KEY_BYTES], const struct HRSS_private_key *in_priv, const uint8_t *ciphertext, size_t ciphertext_len); // HRSS_marshal_public_key serialises |in_pub| to |out|. OPENSSL_EXPORT void HRSS_marshal_public_key( uint8_t out[HRSS_PUBLIC_KEY_BYTES], const struct HRSS_public_key *in_pub); // HRSS_parse_public_key sets |*out| to the public-key encoded in |in|. It // returns true on success and zero on error. OPENSSL_EXPORT int HRSS_parse_public_key( struct HRSS_public_key *out, const uint8_t in[HRSS_PUBLIC_KEY_BYTES]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_HRSS_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_is_boringssl.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // This header is provided in order to catch include path errors in consuming // BoringSSL. ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_kdf.h ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_KDF_H #define OPENSSL_HEADER_KDF_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // KDF support for EVP. // HKDF-specific functions. // // The following functions are provided for OpenSSL compatibility. Prefer the // HKDF functions in . In each, |ctx| must be created with // |EVP_PKEY_CTX_new_id| with |EVP_PKEY_HKDF| and then initialized with // |EVP_PKEY_derive_init|. // EVP_PKEY_HKDEF_MODE_* define "modes" for use with |EVP_PKEY_CTX_hkdf_mode|. // The mispelling of "HKDF" as "HKDEF" is intentional for OpenSSL compatibility. #define EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND 0 #define EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY 1 #define EVP_PKEY_HKDEF_MODE_EXPAND_ONLY 2 // EVP_PKEY_CTX_hkdf_mode configures which HKDF operation to run. It returns one // on success and zero on error. |mode| must be one of |EVP_PKEY_HKDEF_MODE_*|. // By default, the mode is |EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND|. // // If |mode| is |EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND| or // |EVP_PKEY_HKDEF_MODE_EXPAND_ONLY|, the output is variable-length. // |EVP_PKEY_derive| uses the size of the output buffer as the output length for // HKDF-Expand. // // WARNING: Although this API calls it a "mode", HKDF-Extract and HKDF-Expand // are distinct operations with distinct inputs and distinct kinds of keys. // Callers should not pass input secrets for one operation into the other. OPENSSL_EXPORT int EVP_PKEY_CTX_hkdf_mode(EVP_PKEY_CTX *ctx, int mode); // EVP_PKEY_CTX_set_hkdf_md sets |md| as the digest to use with HKDF. It returns // one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_hkdf_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); // EVP_PKEY_CTX_set1_hkdf_key configures HKDF to use |key_len| bytes from |key| // as the "key", described below. It returns one on success and zero on error. // // Which input is the key depends on the "mode" (see |EVP_PKEY_CTX_hkdf_mode|). // If |EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND| or // |EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY|, this function specifies the input keying // material (IKM) for HKDF-Extract. If |EVP_PKEY_HKDEF_MODE_EXPAND_ONLY|, it // instead specifies the pseudorandom key (PRK) for HKDF-Expand. OPENSSL_EXPORT int EVP_PKEY_CTX_set1_hkdf_key(EVP_PKEY_CTX *ctx, const uint8_t *key, size_t key_len); // EVP_PKEY_CTX_set1_hkdf_salt configures HKDF to use |salt_len| bytes from // |salt| as the salt parameter to HKDF-Extract. It returns one on success and // zero on error. If performing HKDF-Expand only, this parameter is ignored. OPENSSL_EXPORT int EVP_PKEY_CTX_set1_hkdf_salt(EVP_PKEY_CTX *ctx, const uint8_t *salt, size_t salt_len); // EVP_PKEY_CTX_add1_hkdf_info appends |info_len| bytes from |info| to the info // parameter used with HKDF-Expand. It returns one on success and zero on error. // If performing HKDF-Extract only, this parameter is ignored. OPENSSL_EXPORT int EVP_PKEY_CTX_add1_hkdf_info(EVP_PKEY_CTX *ctx, const uint8_t *info, size_t info_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_KDF_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_lhash.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_LHASH_H #define OPENSSL_HEADER_LHASH_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // lhash is an internal library and not exported for use outside BoringSSL. This // header is provided for compatibility with code that expects OpenSSL. // These two macros are exported for compatibility with existing callers of // |X509V3_EXT_conf_nid|. Do not use these symbols outside BoringSSL. #define LHASH_OF(type) struct lhash_st_##type #define DECLARE_LHASH_OF(type) LHASH_OF(type); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_LHASH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_md4.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_MD4_H #define OPENSSL_HEADER_MD4_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // MD4. // MD4_CBLOCK is the block size of MD4. #define MD4_CBLOCK 64 // MD4_DIGEST_LENGTH is the length of an MD4 digest. #define MD4_DIGEST_LENGTH 16 // MD4_Init initialises |md4| and returns one. OPENSSL_EXPORT int MD4_Init(MD4_CTX *md4); // MD4_Update adds |len| bytes from |data| to |md4| and returns one. OPENSSL_EXPORT int MD4_Update(MD4_CTX *md4, const void *data, size_t len); // MD4_Final adds the final padding to |md4| and writes the resulting digest to // |out|, which must have at least |MD4_DIGEST_LENGTH| bytes of space. It // returns one. OPENSSL_EXPORT int MD4_Final(uint8_t out[MD4_DIGEST_LENGTH], MD4_CTX *md4); // MD4 writes the digest of |len| bytes from |data| to |out| and returns |out|. // There must be at least |MD4_DIGEST_LENGTH| bytes of space in |out|. OPENSSL_EXPORT uint8_t *MD4(const uint8_t *data, size_t len, uint8_t out[MD4_DIGEST_LENGTH]); // MD4_Transform is a low-level function that performs a single, MD4 block // transformation using the state from |md4| and 64 bytes from |block|. OPENSSL_EXPORT void MD4_Transform(MD4_CTX *md4, const uint8_t block[MD4_CBLOCK]); struct md4_state_st { uint32_t h[4]; uint32_t Nl, Nh; uint8_t data[MD4_CBLOCK]; unsigned num; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_MD4_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_md5.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_MD5_H #define OPENSSL_HEADER_MD5_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // MD5. // MD5_CBLOCK is the block size of MD5. #define MD5_CBLOCK 64 // MD5_DIGEST_LENGTH is the length of an MD5 digest. #define MD5_DIGEST_LENGTH 16 // MD5_Init initialises |md5| and returns one. OPENSSL_EXPORT int MD5_Init(MD5_CTX *md5); // MD5_Update adds |len| bytes from |data| to |md5| and returns one. OPENSSL_EXPORT int MD5_Update(MD5_CTX *md5, const void *data, size_t len); // MD5_Final adds the final padding to |md5| and writes the resulting digest to // |out|, which must have at least |MD5_DIGEST_LENGTH| bytes of space. It // returns one. OPENSSL_EXPORT int MD5_Final(uint8_t out[MD5_DIGEST_LENGTH], MD5_CTX *md5); // MD5 writes the digest of |len| bytes from |data| to |out| and returns |out|. // There must be at least |MD5_DIGEST_LENGTH| bytes of space in |out|. OPENSSL_EXPORT uint8_t *MD5(const uint8_t *data, size_t len, uint8_t out[MD5_DIGEST_LENGTH]); // MD5_Transform is a low-level function that performs a single, MD5 block // transformation using the state from |md5| and 64 bytes from |block|. OPENSSL_EXPORT void MD5_Transform(MD5_CTX *md5, const uint8_t block[MD5_CBLOCK]); struct md5_state_st { uint32_t h[4]; uint32_t Nl, Nh; uint8_t data[MD5_CBLOCK]; unsigned num; }; #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_MD5_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_mem.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_MEM_H #define OPENSSL_HEADER_MEM_H #include "CNIOBoringSSL_base.h" #include #include #if defined(__cplusplus) extern "C" { #endif // Memory and string functions, see also buf.h. // // BoringSSL has its own set of allocation functions, which keep track of // allocation lengths and zero them out before freeing. All memory returned by // BoringSSL API calls must therefore generally be freed using |OPENSSL_free| // unless stated otherwise. #ifndef _BORINGSSL_PROHIBIT_OPENSSL_MALLOC // OPENSSL_malloc is similar to a regular |malloc|, but allocates additional // private data. The resulting pointer must be freed with |OPENSSL_free|. In // the case of a malloc failure, prior to returning NULL |OPENSSL_malloc| will // push |ERR_R_MALLOC_FAILURE| onto the openssl error stack. OPENSSL_EXPORT void *OPENSSL_malloc(size_t size); // OPENSSL_zalloc behaves like |OPENSSL_malloc| except it also initializes the // resulting memory to zero. OPENSSL_EXPORT void *OPENSSL_zalloc(size_t size); // OPENSSL_calloc is similar to a regular |calloc|, but allocates data with // |OPENSSL_malloc|. On overflow, it will push |ERR_R_OVERFLOW| onto the error // queue. OPENSSL_EXPORT void *OPENSSL_calloc(size_t num, size_t size); // OPENSSL_realloc returns a pointer to a buffer of |new_size| bytes that // contains the contents of |ptr|. Unlike |realloc|, a new buffer is always // allocated and the data at |ptr| is always wiped and freed. Memory is // allocated with |OPENSSL_malloc| and must be freed with |OPENSSL_free|. OPENSSL_EXPORT void *OPENSSL_realloc(void *ptr, size_t new_size); #endif // !_BORINGSSL_PROHIBIT_OPENSSL_MALLOC // OPENSSL_free does nothing if |ptr| is NULL. Otherwise it zeros out the // memory allocated at |ptr| and frees it along with the private data. // It must only be used on on |ptr| values obtained from |OPENSSL_malloc| OPENSSL_EXPORT void OPENSSL_free(void *ptr); // OPENSSL_cleanse zeros out |len| bytes of memory at |ptr|. This is similar to // |memset_s| from C11. OPENSSL_EXPORT void OPENSSL_cleanse(void *ptr, size_t len); // CRYPTO_memcmp returns zero iff the |len| bytes at |a| and |b| are equal. It // takes an amount of time dependent on |len|, but independent of the contents // of |a| and |b|. Unlike memcmp, it cannot be used to put elements into a // defined order as the return value when a != b is undefined, other than to be // non-zero. OPENSSL_EXPORT int CRYPTO_memcmp(const void *a, const void *b, size_t len); // OPENSSL_hash32 implements the 32 bit, FNV-1a hash. OPENSSL_EXPORT uint32_t OPENSSL_hash32(const void *ptr, size_t len); // OPENSSL_strhash calls |OPENSSL_hash32| on the NUL-terminated string |s|. OPENSSL_EXPORT uint32_t OPENSSL_strhash(const char *s); // OPENSSL_strdup has the same behaviour as strdup(3). OPENSSL_EXPORT char *OPENSSL_strdup(const char *s); // OPENSSL_strnlen has the same behaviour as strnlen(3). OPENSSL_EXPORT size_t OPENSSL_strnlen(const char *s, size_t len); // OPENSSL_isalpha is a locale-independent, ASCII-only version of isalpha(3), It // only recognizes 'a' through 'z' and 'A' through 'Z' as alphabetic. OPENSSL_EXPORT int OPENSSL_isalpha(int c); // OPENSSL_isdigit is a locale-independent, ASCII-only version of isdigit(3), It // only recognizes '0' through '9' as digits. OPENSSL_EXPORT int OPENSSL_isdigit(int c); // OPENSSL_isxdigit is a locale-independent, ASCII-only version of isxdigit(3), // It only recognizes '0' through '9', 'a' through 'f', and 'A through 'F' as // digits. OPENSSL_EXPORT int OPENSSL_isxdigit(int c); // OPENSSL_fromxdigit returns one if |c| is a hexadecimal digit as recognized // by OPENSSL_isxdigit, and sets |out| to the corresponding value. Otherwise // zero is returned. OPENSSL_EXPORT int OPENSSL_fromxdigit(uint8_t *out, int c); // OPENSSL_isalnum is a locale-independent, ASCII-only version of isalnum(3), It // only recognizes what |OPENSSL_isalpha| and |OPENSSL_isdigit| recognize. OPENSSL_EXPORT int OPENSSL_isalnum(int c); // OPENSSL_tolower is a locale-independent, ASCII-only version of tolower(3). It // only lowercases ASCII values. Other values are returned as-is. OPENSSL_EXPORT int OPENSSL_tolower(int c); // OPENSSL_isspace is a locale-independent, ASCII-only version of isspace(3). It // only recognizes '\t', '\n', '\v', '\f', '\r', and ' '. OPENSSL_EXPORT int OPENSSL_isspace(int c); // OPENSSL_strcasecmp is a locale-independent, ASCII-only version of // strcasecmp(3). OPENSSL_EXPORT int OPENSSL_strcasecmp(const char *a, const char *b); // OPENSSL_strncasecmp is a locale-independent, ASCII-only version of // strncasecmp(3). OPENSSL_EXPORT int OPENSSL_strncasecmp(const char *a, const char *b, size_t n); // DECIMAL_SIZE returns an upper bound for the length of the decimal // representation of the given type. #define DECIMAL_SIZE(type) ((sizeof(type)*8+2)/3+1) // BIO_snprintf has the same behavior as snprintf(3). OPENSSL_EXPORT int BIO_snprintf(char *buf, size_t n, const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(3, 4); // BIO_vsnprintf has the same behavior as vsnprintf(3). OPENSSL_EXPORT int BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args) OPENSSL_PRINTF_FORMAT_FUNC(3, 0); // OPENSSL_vasprintf has the same behavior as vasprintf(3), except that // memory allocated in a returned string must be freed with |OPENSSL_free|. OPENSSL_EXPORT int OPENSSL_vasprintf(char **str, const char *format, va_list args) OPENSSL_PRINTF_FORMAT_FUNC(2, 0); // OPENSSL_asprintf has the same behavior as asprintf(3), except that // memory allocated in a returned string must be freed with |OPENSSL_free|. OPENSSL_EXPORT int OPENSSL_asprintf(char **str, const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(2, 3); // OPENSSL_strndup returns an allocated, duplicate of |str|, which is, at most, // |size| bytes. The result is always NUL terminated. The memory allocated // must be freed with |OPENSSL_free|. OPENSSL_EXPORT char *OPENSSL_strndup(const char *str, size_t size); // OPENSSL_memdup returns an allocated, duplicate of |size| bytes from |data| or // NULL on allocation failure. The memory allocated must be freed with // |OPENSSL_free|. OPENSSL_EXPORT void *OPENSSL_memdup(const void *data, size_t size); // OPENSSL_strlcpy acts like strlcpy(3). OPENSSL_EXPORT size_t OPENSSL_strlcpy(char *dst, const char *src, size_t dst_size); // OPENSSL_strlcat acts like strlcat(3). OPENSSL_EXPORT size_t OPENSSL_strlcat(char *dst, const char *src, size_t dst_size); // Deprecated functions. // CRYPTO_malloc calls |OPENSSL_malloc|. |file| and |line| are ignored. OPENSSL_EXPORT void *CRYPTO_malloc(size_t size, const char *file, int line); // CRYPTO_realloc calls |OPENSSL_realloc|. |file| and |line| are ignored. OPENSSL_EXPORT void *CRYPTO_realloc(void *ptr, size_t new_size, const char *file, int line); // CRYPTO_free calls |OPENSSL_free|. |file| and |line| are ignored. OPENSSL_EXPORT void CRYPTO_free(void *ptr, const char *file, int line); // OPENSSL_clear_free calls |OPENSSL_free|. BoringSSL automatically clears all // allocations on free, but we define |OPENSSL_clear_free| for compatibility. OPENSSL_EXPORT void OPENSSL_clear_free(void *ptr, size_t len); // CRYPTO_secure_malloc_init returns zero. OPENSSL_EXPORT int CRYPTO_secure_malloc_init(size_t size, size_t min_size); // CRYPTO_secure_malloc_initialized returns zero. OPENSSL_EXPORT int CRYPTO_secure_malloc_initialized(void); // CRYPTO_secure_used returns zero. OPENSSL_EXPORT size_t CRYPTO_secure_used(void); // OPENSSL_secure_malloc calls |OPENSSL_malloc|. OPENSSL_EXPORT void *OPENSSL_secure_malloc(size_t size); // OPENSSL_secure_clear_free calls |OPENSSL_clear_free|. OPENSSL_EXPORT void OPENSSL_secure_clear_free(void *ptr, size_t len); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(char, OPENSSL_free) BORINGSSL_MAKE_DELETER(uint8_t, OPENSSL_free) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_MEM_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_mldsa.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_MLDSA_H_ #define OPENSSL_HEADER_MLDSA_H_ #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // ML-DSA. // // This implements the Module-Lattice-Based Digital Signature Standard from // https://csrc.nist.gov/pubs/fips/204/final // MLDSA_SEED_BYTES is the number of bytes in an ML-DSA seed value. #define MLDSA_SEED_BYTES 32 // ML-DSA-65. // MLDSA65_private_key contains an ML-DSA-65 private key. The contents of this // object should never leave the address space since the format is unstable. struct MLDSA65_private_key { union { uint8_t bytes[32 + 32 + 64 + 256 * 4 * (5 + 6 + 6)]; uint32_t alignment; } opaque; }; // MLDSA65_public_key contains an ML-DSA-65 public key. The contents of this // object should never leave the address space since the format is unstable. struct MLDSA65_public_key { union { uint8_t bytes[32 + 64 + 256 * 4 * 6]; uint32_t alignment; } opaque; }; // MLDSA65_PRIVATE_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 // private key. #define MLDSA65_PRIVATE_KEY_BYTES 4032 // MLDSA65_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-DSA-65 // public key. #define MLDSA65_PUBLIC_KEY_BYTES 1952 // MLDSA65_SIGNATURE_BYTES is the number of bytes in an encoded ML-DSA-65 // signature. #define MLDSA65_SIGNATURE_BYTES 3309 // MLDSA65_generate_key generates a random public/private key pair, writes the // encoded public key to |out_encoded_public_key|, writes the seed to // |out_seed|, and sets |out_private_key| to the private key. Returns 1 on // success and 0 on allocation failure. OPENSSL_EXPORT int MLDSA65_generate_key( uint8_t out_encoded_public_key[MLDSA65_PUBLIC_KEY_BYTES], uint8_t out_seed[MLDSA_SEED_BYTES], struct MLDSA65_private_key *out_private_key); // MLDSA65_private_key_from_seed regenerates a private key from a seed value // that was generated by |MLDSA65_generate_key|. Returns 1 on success and 0 on // allocation failure or if |seed_len| is incorrect. OPENSSL_EXPORT int MLDSA65_private_key_from_seed( struct MLDSA65_private_key *out_private_key, const uint8_t *seed, size_t seed_len); // MLDSA65_public_from_private sets |*out_public_key| to the public key that // corresponds to |private_key|. Returns 1 on success and 0 on failure. OPENSSL_EXPORT int MLDSA65_public_from_private( struct MLDSA65_public_key *out_public_key, const struct MLDSA65_private_key *private_key); // MLDSA65_sign generates a signature for the message |msg| of length // |msg_len| using |private_key| (following the randomized algorithm), and // writes the encoded signature to |out_encoded_signature|. The |context| // argument is also signed over and can be used to include implicit contextual // information that isn't included in |msg|. The same value of |context| must be // presented to |MLDSA65_verify| in order for the generated signature to be // considered valid. |context| and |context_len| may be |NULL| and 0 to use an // empty context (this is common). Returns 1 on success and 0 on failure. OPENSSL_EXPORT int MLDSA65_sign( uint8_t out_encoded_signature[MLDSA65_SIGNATURE_BYTES], const struct MLDSA65_private_key *private_key, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); // MLDSA65_verify verifies that |signature| constitutes a valid // signature for the message |msg| of length |msg_len| using |public_key|. The // value of |context| must equal the value that was passed to |MLDSA65_sign| // when the signature was generated. Returns 1 on success or 0 on error. OPENSSL_EXPORT int MLDSA65_verify(const struct MLDSA65_public_key *public_key, const uint8_t *signature, size_t signature_len, const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); // MLDSA65_marshal_public_key serializes |public_key| to |out| in the standard // format for ML-DSA-65 public keys. It returns 1 on success or 0 on // allocation error. OPENSSL_EXPORT int MLDSA65_marshal_public_key( CBB *out, const struct MLDSA65_public_key *public_key); // MLDSA65_parse_public_key parses a public key, in the format generated by // |MLDSA65_marshal_public_key|, from |in| and writes the result to // |out_public_key|. It returns 1 on success or 0 on parse error or if // there are trailing bytes in |in|. OPENSSL_EXPORT int MLDSA65_parse_public_key( struct MLDSA65_public_key *public_key, CBS *in); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_MLDSA_H_ ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_mlkem.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_MLKEM_H #define OPENSSL_HEADER_MLKEM_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // ML-KEM-768. // // This implements the Module-Lattice-Based Key-Encapsulation Mechanism from // https://csrc.nist.gov/pubs/fips/204/final // MLKEM768_public_key contains an ML-KEM-768 public key. The contents of this // object should never leave the address space since the format is unstable. struct MLKEM768_public_key { union { uint8_t bytes[512 * (3 + 9) + 32 + 32]; uint16_t alignment; } opaque; }; // MLKEM768_private_key contains an ML-KEM-768 private key. The contents of this // object should never leave the address space since the format is unstable. struct MLKEM768_private_key { union { uint8_t bytes[512 * (3 + 3 + 9) + 32 + 32 + 32]; uint16_t alignment; } opaque; }; // MLKEM768_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-KEM-768 // public key. #define MLKEM768_PUBLIC_KEY_BYTES 1184 // MLKEM_SEED_BYTES is the number of bytes in an ML-KEM seed. #define MLKEM_SEED_BYTES 64 // MLKEM768_generate_key generates a random public/private key pair, writes the // encoded public key to |out_encoded_public_key| and sets |out_private_key| to // the private key. If |optional_out_seed| is not NULL then the seed used to // generate the private key is written to it. OPENSSL_EXPORT void MLKEM768_generate_key( uint8_t out_encoded_public_key[MLKEM768_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[MLKEM_SEED_BYTES], struct MLKEM768_private_key *out_private_key); // MLKEM768_private_key_from_seed derives a private key from a seed that was // generated by |MLKEM768_generate_key|. It fails and returns 0 if |seed_len| is // incorrect, otherwise it writes |*out_private_key| and returns 1. OPENSSL_EXPORT int MLKEM768_private_key_from_seed( struct MLKEM768_private_key *out_private_key, const uint8_t *seed, size_t seed_len); // MLKEM768_public_from_private sets |*out_public_key| to the public key that // corresponds to |private_key|. (This is faster than parsing the output of // |MLKEM768_generate_key| if, for some reason, you need to encapsulate to a key // that was just generated.) OPENSSL_EXPORT void MLKEM768_public_from_private( struct MLKEM768_public_key *out_public_key, const struct MLKEM768_private_key *private_key); // MLKEM768_CIPHERTEXT_BYTES is number of bytes in the ML-KEM-768 ciphertext. #define MLKEM768_CIPHERTEXT_BYTES 1088 // MLKEM_SHARED_SECRET_BYTES is the number of bytes in an ML-KEM shared secret. #define MLKEM_SHARED_SECRET_BYTES 32 // MLKEM768_encap encrypts a random shared secret for |public_key|, writes the // ciphertext to |out_ciphertext|, and writes the random shared secret to // |out_shared_secret|. OPENSSL_EXPORT void MLKEM768_encap( uint8_t out_ciphertext[MLKEM768_CIPHERTEXT_BYTES], uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const struct MLKEM768_public_key *public_key); // MLKEM768_decap decrypts a shared secret from |ciphertext| using |private_key| // and writes it to |out_shared_secret|. If |ciphertext_len| is incorrect it // returns 0, otherwise it returns 1. If |ciphertext| is invalid (but of the // correct length), |out_shared_secret| is filled with a key that will always be // the same for the same |ciphertext| and |private_key|, but which appears to be // random unless one has access to |private_key|. These alternatives occur in // constant time. Any subsequent symmetric encryption using |out_shared_secret| // must use an authenticated encryption scheme in order to discover the // decapsulation failure. OPENSSL_EXPORT int MLKEM768_decap( uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct MLKEM768_private_key *private_key); // Serialisation of keys. // MLKEM768_marshal_public_key serializes |public_key| to |out| in the standard // format for ML-KEM-768 public keys. It returns one on success or zero on // allocation error. OPENSSL_EXPORT int MLKEM768_marshal_public_key( CBB *out, const struct MLKEM768_public_key *public_key); // MLKEM768_parse_public_key parses a public key, in the format generated by // |MLKEM768_marshal_public_key|, from |in| and writes the result to // |out_public_key|. It returns one on success or zero on parse error or if // there are trailing bytes in |in|. OPENSSL_EXPORT int MLKEM768_parse_public_key( struct MLKEM768_public_key *out_public_key, CBS *in); // ML-KEM-1024 // // ML-KEM-1024 also exists. You should prefer ML-KEM-768 where possible. // MLKEM1024_public_key contains an ML-KEM-1024 public key. The contents of this // object should never leave the address space since the format is unstable. struct MLKEM1024_public_key { union { uint8_t bytes[512 * (4 + 16) + 32 + 32]; uint16_t alignment; } opaque; }; // MLKEM1024_private_key contains a ML-KEM-1024 private key. The contents of // this object should never leave the address space since the format is // unstable. struct MLKEM1024_private_key { union { uint8_t bytes[512 * (4 + 4 + 16) + 32 + 32 + 32]; uint16_t alignment; } opaque; }; // MLKEM1024_PUBLIC_KEY_BYTES is the number of bytes in an encoded ML-KEM-1024 // public key. #define MLKEM1024_PUBLIC_KEY_BYTES 1568 // MLKEM1024_generate_key generates a random public/private key pair, writes the // encoded public key to |out_encoded_public_key| and sets |out_private_key| to // the private key. If |optional_out_seed| is not NULL then the seed used to // generate the private key is written to it. OPENSSL_EXPORT void MLKEM1024_generate_key( uint8_t out_encoded_public_key[MLKEM1024_PUBLIC_KEY_BYTES], uint8_t optional_out_seed[MLKEM_SEED_BYTES], struct MLKEM1024_private_key *out_private_key); // MLKEM1024_private_key_from_seed derives a private key from a seed that was // generated by |MLKEM1024_generate_key|. It fails and returns 0 if |seed_len| // is incorrect, otherwise it writes |*out_private_key| and returns 1. OPENSSL_EXPORT int MLKEM1024_private_key_from_seed( struct MLKEM1024_private_key *out_private_key, const uint8_t *seed, size_t seed_len); // MLKEM1024_public_from_private sets |*out_public_key| to the public key that // corresponds to |private_key|. (This is faster than parsing the output of // |MLKEM1024_generate_key| if, for some reason, you need to encapsulate to a // key that was just generated.) OPENSSL_EXPORT void MLKEM1024_public_from_private( struct MLKEM1024_public_key *out_public_key, const struct MLKEM1024_private_key *private_key); // MLKEM1024_CIPHERTEXT_BYTES is number of bytes in the ML-KEM-1024 ciphertext. #define MLKEM1024_CIPHERTEXT_BYTES 1568 // MLKEM1024_encap encrypts a random shared secret for |public_key|, writes the // ciphertext to |out_ciphertext|, and writes the random shared secret to // |out_shared_secret|. OPENSSL_EXPORT void MLKEM1024_encap( uint8_t out_ciphertext[MLKEM1024_CIPHERTEXT_BYTES], uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const struct MLKEM1024_public_key *public_key); // MLKEM1024_decap decrypts a shared secret from |ciphertext| using // |private_key| and writes it to |out_shared_secret|. If |ciphertext_len| is // incorrect it returns 0, otherwise it returns 1. If |ciphertext| is invalid // (but of the correct length), |out_shared_secret| is filled with a key that // will always be the same for the same |ciphertext| and |private_key|, but // which appears to be random unless one has access to |private_key|. These // alternatives occur in constant time. Any subsequent symmetric encryption // using |out_shared_secret| must use an authenticated encryption scheme in // order to discover the decapsulation failure. OPENSSL_EXPORT int MLKEM1024_decap( uint8_t out_shared_secret[MLKEM_SHARED_SECRET_BYTES], const uint8_t *ciphertext, size_t ciphertext_len, const struct MLKEM1024_private_key *private_key); // Serialisation of ML-KEM-1024 keys. // MLKEM1024_marshal_public_key serializes |public_key| to |out| in the standard // format for ML-KEM-1024 public keys. It returns one on success or zero on // allocation error. OPENSSL_EXPORT int MLKEM1024_marshal_public_key( CBB *out, const struct MLKEM1024_public_key *public_key); // MLKEM1024_parse_public_key parses a public key, in the format generated by // |MLKEM1024_marshal_public_key|, from |in| and writes the result to // |out_public_key|. It returns one on success or zero on parse error or if // there are trailing bytes in |in|. OPENSSL_EXPORT int MLKEM1024_parse_public_key( struct MLKEM1024_public_key *out_public_key, CBS *in); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_MLKEM_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_nid.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* This file is generated by crypto/obj/objects.go. */ #ifndef OPENSSL_HEADER_NID_H #define OPENSSL_HEADER_NID_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif /* The nid library provides numbered values for ASN.1 object identifiers and * other symbols. These values are used by other libraries to identify * cryptographic primitives. * * A separate objects library, obj.h, provides functions for converting between * nids and object identifiers. However it depends on large internal tables with * the encodings of every nid defined. Consumers concerned with binary size * should instead embed the encodings of the few consumed OIDs and compare * against those. * * These values should not be used outside of a single process; they are not * stable identifiers. */ #define SN_undef "UNDEF" #define LN_undef "undefined" #define NID_undef 0 #define OBJ_undef 0L #define SN_rsadsi "rsadsi" #define LN_rsadsi "RSA Data Security, Inc." #define NID_rsadsi 1 #define OBJ_rsadsi 1L, 2L, 840L, 113549L #define SN_pkcs "pkcs" #define LN_pkcs "RSA Data Security, Inc. PKCS" #define NID_pkcs 2 #define OBJ_pkcs 1L, 2L, 840L, 113549L, 1L #define SN_md2 "MD2" #define LN_md2 "md2" #define NID_md2 3 #define OBJ_md2 1L, 2L, 840L, 113549L, 2L, 2L #define SN_md5 "MD5" #define LN_md5 "md5" #define NID_md5 4 #define OBJ_md5 1L, 2L, 840L, 113549L, 2L, 5L #define SN_rc4 "RC4" #define LN_rc4 "rc4" #define NID_rc4 5 #define OBJ_rc4 1L, 2L, 840L, 113549L, 3L, 4L #define LN_rsaEncryption "rsaEncryption" #define NID_rsaEncryption 6 #define OBJ_rsaEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 1L #define SN_md2WithRSAEncryption "RSA-MD2" #define LN_md2WithRSAEncryption "md2WithRSAEncryption" #define NID_md2WithRSAEncryption 7 #define OBJ_md2WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 2L #define SN_md5WithRSAEncryption "RSA-MD5" #define LN_md5WithRSAEncryption "md5WithRSAEncryption" #define NID_md5WithRSAEncryption 8 #define OBJ_md5WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 4L #define SN_pbeWithMD2AndDES_CBC "PBE-MD2-DES" #define LN_pbeWithMD2AndDES_CBC "pbeWithMD2AndDES-CBC" #define NID_pbeWithMD2AndDES_CBC 9 #define OBJ_pbeWithMD2AndDES_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 1L #define SN_pbeWithMD5AndDES_CBC "PBE-MD5-DES" #define LN_pbeWithMD5AndDES_CBC "pbeWithMD5AndDES-CBC" #define NID_pbeWithMD5AndDES_CBC 10 #define OBJ_pbeWithMD5AndDES_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 3L #define SN_X500 "X500" #define LN_X500 "directory services (X.500)" #define NID_X500 11 #define OBJ_X500 2L, 5L #define SN_X509 "X509" #define NID_X509 12 #define OBJ_X509 2L, 5L, 4L #define SN_commonName "CN" #define LN_commonName "commonName" #define NID_commonName 13 #define OBJ_commonName 2L, 5L, 4L, 3L #define SN_countryName "C" #define LN_countryName "countryName" #define NID_countryName 14 #define OBJ_countryName 2L, 5L, 4L, 6L #define SN_localityName "L" #define LN_localityName "localityName" #define NID_localityName 15 #define OBJ_localityName 2L, 5L, 4L, 7L #define SN_stateOrProvinceName "ST" #define LN_stateOrProvinceName "stateOrProvinceName" #define NID_stateOrProvinceName 16 #define OBJ_stateOrProvinceName 2L, 5L, 4L, 8L #define SN_organizationName "O" #define LN_organizationName "organizationName" #define NID_organizationName 17 #define OBJ_organizationName 2L, 5L, 4L, 10L #define SN_organizationalUnitName "OU" #define LN_organizationalUnitName "organizationalUnitName" #define NID_organizationalUnitName 18 #define OBJ_organizationalUnitName 2L, 5L, 4L, 11L #define SN_rsa "RSA" #define LN_rsa "rsa" #define NID_rsa 19 #define OBJ_rsa 2L, 5L, 8L, 1L, 1L #define SN_pkcs7 "pkcs7" #define NID_pkcs7 20 #define OBJ_pkcs7 1L, 2L, 840L, 113549L, 1L, 7L #define LN_pkcs7_data "pkcs7-data" #define NID_pkcs7_data 21 #define OBJ_pkcs7_data 1L, 2L, 840L, 113549L, 1L, 7L, 1L #define LN_pkcs7_signed "pkcs7-signedData" #define NID_pkcs7_signed 22 #define OBJ_pkcs7_signed 1L, 2L, 840L, 113549L, 1L, 7L, 2L #define LN_pkcs7_enveloped "pkcs7-envelopedData" #define NID_pkcs7_enveloped 23 #define OBJ_pkcs7_enveloped 1L, 2L, 840L, 113549L, 1L, 7L, 3L #define LN_pkcs7_signedAndEnveloped "pkcs7-signedAndEnvelopedData" #define NID_pkcs7_signedAndEnveloped 24 #define OBJ_pkcs7_signedAndEnveloped 1L, 2L, 840L, 113549L, 1L, 7L, 4L #define LN_pkcs7_digest "pkcs7-digestData" #define NID_pkcs7_digest 25 #define OBJ_pkcs7_digest 1L, 2L, 840L, 113549L, 1L, 7L, 5L #define LN_pkcs7_encrypted "pkcs7-encryptedData" #define NID_pkcs7_encrypted 26 #define OBJ_pkcs7_encrypted 1L, 2L, 840L, 113549L, 1L, 7L, 6L #define SN_pkcs3 "pkcs3" #define NID_pkcs3 27 #define OBJ_pkcs3 1L, 2L, 840L, 113549L, 1L, 3L #define LN_dhKeyAgreement "dhKeyAgreement" #define NID_dhKeyAgreement 28 #define OBJ_dhKeyAgreement 1L, 2L, 840L, 113549L, 1L, 3L, 1L #define SN_des_ecb "DES-ECB" #define LN_des_ecb "des-ecb" #define NID_des_ecb 29 #define OBJ_des_ecb 1L, 3L, 14L, 3L, 2L, 6L #define SN_des_cfb64 "DES-CFB" #define LN_des_cfb64 "des-cfb" #define NID_des_cfb64 30 #define OBJ_des_cfb64 1L, 3L, 14L, 3L, 2L, 9L #define SN_des_cbc "DES-CBC" #define LN_des_cbc "des-cbc" #define NID_des_cbc 31 #define OBJ_des_cbc 1L, 3L, 14L, 3L, 2L, 7L #define SN_des_ede_ecb "DES-EDE" #define LN_des_ede_ecb "des-ede" #define NID_des_ede_ecb 32 #define OBJ_des_ede_ecb 1L, 3L, 14L, 3L, 2L, 17L #define SN_des_ede3_ecb "DES-EDE3" #define LN_des_ede3_ecb "des-ede3" #define NID_des_ede3_ecb 33 #define SN_idea_cbc "IDEA-CBC" #define LN_idea_cbc "idea-cbc" #define NID_idea_cbc 34 #define OBJ_idea_cbc 1L, 3L, 6L, 1L, 4L, 1L, 188L, 7L, 1L, 1L, 2L #define SN_idea_cfb64 "IDEA-CFB" #define LN_idea_cfb64 "idea-cfb" #define NID_idea_cfb64 35 #define SN_idea_ecb "IDEA-ECB" #define LN_idea_ecb "idea-ecb" #define NID_idea_ecb 36 #define SN_rc2_cbc "RC2-CBC" #define LN_rc2_cbc "rc2-cbc" #define NID_rc2_cbc 37 #define OBJ_rc2_cbc 1L, 2L, 840L, 113549L, 3L, 2L #define SN_rc2_ecb "RC2-ECB" #define LN_rc2_ecb "rc2-ecb" #define NID_rc2_ecb 38 #define SN_rc2_cfb64 "RC2-CFB" #define LN_rc2_cfb64 "rc2-cfb" #define NID_rc2_cfb64 39 #define SN_rc2_ofb64 "RC2-OFB" #define LN_rc2_ofb64 "rc2-ofb" #define NID_rc2_ofb64 40 #define SN_sha "SHA" #define LN_sha "sha" #define NID_sha 41 #define OBJ_sha 1L, 3L, 14L, 3L, 2L, 18L #define SN_shaWithRSAEncryption "RSA-SHA" #define LN_shaWithRSAEncryption "shaWithRSAEncryption" #define NID_shaWithRSAEncryption 42 #define OBJ_shaWithRSAEncryption 1L, 3L, 14L, 3L, 2L, 15L #define SN_des_ede_cbc "DES-EDE-CBC" #define LN_des_ede_cbc "des-ede-cbc" #define NID_des_ede_cbc 43 #define SN_des_ede3_cbc "DES-EDE3-CBC" #define LN_des_ede3_cbc "des-ede3-cbc" #define NID_des_ede3_cbc 44 #define OBJ_des_ede3_cbc 1L, 2L, 840L, 113549L, 3L, 7L #define SN_des_ofb64 "DES-OFB" #define LN_des_ofb64 "des-ofb" #define NID_des_ofb64 45 #define OBJ_des_ofb64 1L, 3L, 14L, 3L, 2L, 8L #define SN_idea_ofb64 "IDEA-OFB" #define LN_idea_ofb64 "idea-ofb" #define NID_idea_ofb64 46 #define SN_pkcs9 "pkcs9" #define NID_pkcs9 47 #define OBJ_pkcs9 1L, 2L, 840L, 113549L, 1L, 9L #define LN_pkcs9_emailAddress "emailAddress" #define NID_pkcs9_emailAddress 48 #define OBJ_pkcs9_emailAddress 1L, 2L, 840L, 113549L, 1L, 9L, 1L #define LN_pkcs9_unstructuredName "unstructuredName" #define NID_pkcs9_unstructuredName 49 #define OBJ_pkcs9_unstructuredName 1L, 2L, 840L, 113549L, 1L, 9L, 2L #define LN_pkcs9_contentType "contentType" #define NID_pkcs9_contentType 50 #define OBJ_pkcs9_contentType 1L, 2L, 840L, 113549L, 1L, 9L, 3L #define LN_pkcs9_messageDigest "messageDigest" #define NID_pkcs9_messageDigest 51 #define OBJ_pkcs9_messageDigest 1L, 2L, 840L, 113549L, 1L, 9L, 4L #define LN_pkcs9_signingTime "signingTime" #define NID_pkcs9_signingTime 52 #define OBJ_pkcs9_signingTime 1L, 2L, 840L, 113549L, 1L, 9L, 5L #define LN_pkcs9_countersignature "countersignature" #define NID_pkcs9_countersignature 53 #define OBJ_pkcs9_countersignature 1L, 2L, 840L, 113549L, 1L, 9L, 6L #define LN_pkcs9_challengePassword "challengePassword" #define NID_pkcs9_challengePassword 54 #define OBJ_pkcs9_challengePassword 1L, 2L, 840L, 113549L, 1L, 9L, 7L #define LN_pkcs9_unstructuredAddress "unstructuredAddress" #define NID_pkcs9_unstructuredAddress 55 #define OBJ_pkcs9_unstructuredAddress 1L, 2L, 840L, 113549L, 1L, 9L, 8L #define LN_pkcs9_extCertAttributes "extendedCertificateAttributes" #define NID_pkcs9_extCertAttributes 56 #define OBJ_pkcs9_extCertAttributes 1L, 2L, 840L, 113549L, 1L, 9L, 9L #define SN_netscape "Netscape" #define LN_netscape "Netscape Communications Corp." #define NID_netscape 57 #define OBJ_netscape 2L, 16L, 840L, 1L, 113730L #define SN_netscape_cert_extension "nsCertExt" #define LN_netscape_cert_extension "Netscape Certificate Extension" #define NID_netscape_cert_extension 58 #define OBJ_netscape_cert_extension 2L, 16L, 840L, 1L, 113730L, 1L #define SN_netscape_data_type "nsDataType" #define LN_netscape_data_type "Netscape Data Type" #define NID_netscape_data_type 59 #define OBJ_netscape_data_type 2L, 16L, 840L, 1L, 113730L, 2L #define SN_des_ede_cfb64 "DES-EDE-CFB" #define LN_des_ede_cfb64 "des-ede-cfb" #define NID_des_ede_cfb64 60 #define SN_des_ede3_cfb64 "DES-EDE3-CFB" #define LN_des_ede3_cfb64 "des-ede3-cfb" #define NID_des_ede3_cfb64 61 #define SN_des_ede_ofb64 "DES-EDE-OFB" #define LN_des_ede_ofb64 "des-ede-ofb" #define NID_des_ede_ofb64 62 #define SN_des_ede3_ofb64 "DES-EDE3-OFB" #define LN_des_ede3_ofb64 "des-ede3-ofb" #define NID_des_ede3_ofb64 63 #define SN_sha1 "SHA1" #define LN_sha1 "sha1" #define NID_sha1 64 #define OBJ_sha1 1L, 3L, 14L, 3L, 2L, 26L #define SN_sha1WithRSAEncryption "RSA-SHA1" #define LN_sha1WithRSAEncryption "sha1WithRSAEncryption" #define NID_sha1WithRSAEncryption 65 #define OBJ_sha1WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 5L #define SN_dsaWithSHA "DSA-SHA" #define LN_dsaWithSHA "dsaWithSHA" #define NID_dsaWithSHA 66 #define OBJ_dsaWithSHA 1L, 3L, 14L, 3L, 2L, 13L #define SN_dsa_2 "DSA-old" #define LN_dsa_2 "dsaEncryption-old" #define NID_dsa_2 67 #define OBJ_dsa_2 1L, 3L, 14L, 3L, 2L, 12L #define SN_pbeWithSHA1AndRC2_CBC "PBE-SHA1-RC2-64" #define LN_pbeWithSHA1AndRC2_CBC "pbeWithSHA1AndRC2-CBC" #define NID_pbeWithSHA1AndRC2_CBC 68 #define OBJ_pbeWithSHA1AndRC2_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 11L #define LN_id_pbkdf2 "PBKDF2" #define NID_id_pbkdf2 69 #define OBJ_id_pbkdf2 1L, 2L, 840L, 113549L, 1L, 5L, 12L #define SN_dsaWithSHA1_2 "DSA-SHA1-old" #define LN_dsaWithSHA1_2 "dsaWithSHA1-old" #define NID_dsaWithSHA1_2 70 #define OBJ_dsaWithSHA1_2 1L, 3L, 14L, 3L, 2L, 27L #define SN_netscape_cert_type "nsCertType" #define LN_netscape_cert_type "Netscape Cert Type" #define NID_netscape_cert_type 71 #define OBJ_netscape_cert_type 2L, 16L, 840L, 1L, 113730L, 1L, 1L #define SN_netscape_base_url "nsBaseUrl" #define LN_netscape_base_url "Netscape Base Url" #define NID_netscape_base_url 72 #define OBJ_netscape_base_url 2L, 16L, 840L, 1L, 113730L, 1L, 2L #define SN_netscape_revocation_url "nsRevocationUrl" #define LN_netscape_revocation_url "Netscape Revocation Url" #define NID_netscape_revocation_url 73 #define OBJ_netscape_revocation_url 2L, 16L, 840L, 1L, 113730L, 1L, 3L #define SN_netscape_ca_revocation_url "nsCaRevocationUrl" #define LN_netscape_ca_revocation_url "Netscape CA Revocation Url" #define NID_netscape_ca_revocation_url 74 #define OBJ_netscape_ca_revocation_url 2L, 16L, 840L, 1L, 113730L, 1L, 4L #define SN_netscape_renewal_url "nsRenewalUrl" #define LN_netscape_renewal_url "Netscape Renewal Url" #define NID_netscape_renewal_url 75 #define OBJ_netscape_renewal_url 2L, 16L, 840L, 1L, 113730L, 1L, 7L #define SN_netscape_ca_policy_url "nsCaPolicyUrl" #define LN_netscape_ca_policy_url "Netscape CA Policy Url" #define NID_netscape_ca_policy_url 76 #define OBJ_netscape_ca_policy_url 2L, 16L, 840L, 1L, 113730L, 1L, 8L #define SN_netscape_ssl_server_name "nsSslServerName" #define LN_netscape_ssl_server_name "Netscape SSL Server Name" #define NID_netscape_ssl_server_name 77 #define OBJ_netscape_ssl_server_name 2L, 16L, 840L, 1L, 113730L, 1L, 12L #define SN_netscape_comment "nsComment" #define LN_netscape_comment "Netscape Comment" #define NID_netscape_comment 78 #define OBJ_netscape_comment 2L, 16L, 840L, 1L, 113730L, 1L, 13L #define SN_netscape_cert_sequence "nsCertSequence" #define LN_netscape_cert_sequence "Netscape Certificate Sequence" #define NID_netscape_cert_sequence 79 #define OBJ_netscape_cert_sequence 2L, 16L, 840L, 1L, 113730L, 2L, 5L #define SN_desx_cbc "DESX-CBC" #define LN_desx_cbc "desx-cbc" #define NID_desx_cbc 80 #define SN_id_ce "id-ce" #define NID_id_ce 81 #define OBJ_id_ce 2L, 5L, 29L #define SN_subject_key_identifier "subjectKeyIdentifier" #define LN_subject_key_identifier "X509v3 Subject Key Identifier" #define NID_subject_key_identifier 82 #define OBJ_subject_key_identifier 2L, 5L, 29L, 14L #define SN_key_usage "keyUsage" #define LN_key_usage "X509v3 Key Usage" #define NID_key_usage 83 #define OBJ_key_usage 2L, 5L, 29L, 15L #define SN_private_key_usage_period "privateKeyUsagePeriod" #define LN_private_key_usage_period "X509v3 Private Key Usage Period" #define NID_private_key_usage_period 84 #define OBJ_private_key_usage_period 2L, 5L, 29L, 16L #define SN_subject_alt_name "subjectAltName" #define LN_subject_alt_name "X509v3 Subject Alternative Name" #define NID_subject_alt_name 85 #define OBJ_subject_alt_name 2L, 5L, 29L, 17L #define SN_issuer_alt_name "issuerAltName" #define LN_issuer_alt_name "X509v3 Issuer Alternative Name" #define NID_issuer_alt_name 86 #define OBJ_issuer_alt_name 2L, 5L, 29L, 18L #define SN_basic_constraints "basicConstraints" #define LN_basic_constraints "X509v3 Basic Constraints" #define NID_basic_constraints 87 #define OBJ_basic_constraints 2L, 5L, 29L, 19L #define SN_crl_number "crlNumber" #define LN_crl_number "X509v3 CRL Number" #define NID_crl_number 88 #define OBJ_crl_number 2L, 5L, 29L, 20L #define SN_certificate_policies "certificatePolicies" #define LN_certificate_policies "X509v3 Certificate Policies" #define NID_certificate_policies 89 #define OBJ_certificate_policies 2L, 5L, 29L, 32L #define SN_authority_key_identifier "authorityKeyIdentifier" #define LN_authority_key_identifier "X509v3 Authority Key Identifier" #define NID_authority_key_identifier 90 #define OBJ_authority_key_identifier 2L, 5L, 29L, 35L #define SN_bf_cbc "BF-CBC" #define LN_bf_cbc "bf-cbc" #define NID_bf_cbc 91 #define OBJ_bf_cbc 1L, 3L, 6L, 1L, 4L, 1L, 3029L, 1L, 2L #define SN_bf_ecb "BF-ECB" #define LN_bf_ecb "bf-ecb" #define NID_bf_ecb 92 #define SN_bf_cfb64 "BF-CFB" #define LN_bf_cfb64 "bf-cfb" #define NID_bf_cfb64 93 #define SN_bf_ofb64 "BF-OFB" #define LN_bf_ofb64 "bf-ofb" #define NID_bf_ofb64 94 #define SN_mdc2 "MDC2" #define LN_mdc2 "mdc2" #define NID_mdc2 95 #define OBJ_mdc2 2L, 5L, 8L, 3L, 101L #define SN_mdc2WithRSA "RSA-MDC2" #define LN_mdc2WithRSA "mdc2WithRSA" #define NID_mdc2WithRSA 96 #define OBJ_mdc2WithRSA 2L, 5L, 8L, 3L, 100L #define SN_rc4_40 "RC4-40" #define LN_rc4_40 "rc4-40" #define NID_rc4_40 97 #define SN_rc2_40_cbc "RC2-40-CBC" #define LN_rc2_40_cbc "rc2-40-cbc" #define NID_rc2_40_cbc 98 #define SN_givenName "GN" #define LN_givenName "givenName" #define NID_givenName 99 #define OBJ_givenName 2L, 5L, 4L, 42L #define SN_surname "SN" #define LN_surname "surname" #define NID_surname 100 #define OBJ_surname 2L, 5L, 4L, 4L #define SN_initials "initials" #define LN_initials "initials" #define NID_initials 101 #define OBJ_initials 2L, 5L, 4L, 43L #define SN_crl_distribution_points "crlDistributionPoints" #define LN_crl_distribution_points "X509v3 CRL Distribution Points" #define NID_crl_distribution_points 103 #define OBJ_crl_distribution_points 2L, 5L, 29L, 31L #define SN_md5WithRSA "RSA-NP-MD5" #define LN_md5WithRSA "md5WithRSA" #define NID_md5WithRSA 104 #define OBJ_md5WithRSA 1L, 3L, 14L, 3L, 2L, 3L #define LN_serialNumber "serialNumber" #define NID_serialNumber 105 #define OBJ_serialNumber 2L, 5L, 4L, 5L #define SN_title "title" #define LN_title "title" #define NID_title 106 #define OBJ_title 2L, 5L, 4L, 12L #define LN_description "description" #define NID_description 107 #define OBJ_description 2L, 5L, 4L, 13L #define SN_cast5_cbc "CAST5-CBC" #define LN_cast5_cbc "cast5-cbc" #define NID_cast5_cbc 108 #define OBJ_cast5_cbc 1L, 2L, 840L, 113533L, 7L, 66L, 10L #define SN_cast5_ecb "CAST5-ECB" #define LN_cast5_ecb "cast5-ecb" #define NID_cast5_ecb 109 #define SN_cast5_cfb64 "CAST5-CFB" #define LN_cast5_cfb64 "cast5-cfb" #define NID_cast5_cfb64 110 #define SN_cast5_ofb64 "CAST5-OFB" #define LN_cast5_ofb64 "cast5-ofb" #define NID_cast5_ofb64 111 #define LN_pbeWithMD5AndCast5_CBC "pbeWithMD5AndCast5CBC" #define NID_pbeWithMD5AndCast5_CBC 112 #define OBJ_pbeWithMD5AndCast5_CBC 1L, 2L, 840L, 113533L, 7L, 66L, 12L #define SN_dsaWithSHA1 "DSA-SHA1" #define LN_dsaWithSHA1 "dsaWithSHA1" #define NID_dsaWithSHA1 113 #define OBJ_dsaWithSHA1 1L, 2L, 840L, 10040L, 4L, 3L #define SN_md5_sha1 "MD5-SHA1" #define LN_md5_sha1 "md5-sha1" #define NID_md5_sha1 114 #define SN_sha1WithRSA "RSA-SHA1-2" #define LN_sha1WithRSA "sha1WithRSA" #define NID_sha1WithRSA 115 #define OBJ_sha1WithRSA 1L, 3L, 14L, 3L, 2L, 29L #define SN_dsa "DSA" #define LN_dsa "dsaEncryption" #define NID_dsa 116 #define OBJ_dsa 1L, 2L, 840L, 10040L, 4L, 1L #define SN_ripemd160 "RIPEMD160" #define LN_ripemd160 "ripemd160" #define NID_ripemd160 117 #define OBJ_ripemd160 1L, 3L, 36L, 3L, 2L, 1L #define SN_ripemd160WithRSA "RSA-RIPEMD160" #define LN_ripemd160WithRSA "ripemd160WithRSA" #define NID_ripemd160WithRSA 119 #define OBJ_ripemd160WithRSA 1L, 3L, 36L, 3L, 3L, 1L, 2L #define SN_rc5_cbc "RC5-CBC" #define LN_rc5_cbc "rc5-cbc" #define NID_rc5_cbc 120 #define OBJ_rc5_cbc 1L, 2L, 840L, 113549L, 3L, 8L #define SN_rc5_ecb "RC5-ECB" #define LN_rc5_ecb "rc5-ecb" #define NID_rc5_ecb 121 #define SN_rc5_cfb64 "RC5-CFB" #define LN_rc5_cfb64 "rc5-cfb" #define NID_rc5_cfb64 122 #define SN_rc5_ofb64 "RC5-OFB" #define LN_rc5_ofb64 "rc5-ofb" #define NID_rc5_ofb64 123 #define SN_zlib_compression "ZLIB" #define LN_zlib_compression "zlib compression" #define NID_zlib_compression 125 #define OBJ_zlib_compression 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 8L #define SN_ext_key_usage "extendedKeyUsage" #define LN_ext_key_usage "X509v3 Extended Key Usage" #define NID_ext_key_usage 126 #define OBJ_ext_key_usage 2L, 5L, 29L, 37L #define SN_id_pkix "PKIX" #define NID_id_pkix 127 #define OBJ_id_pkix 1L, 3L, 6L, 1L, 5L, 5L, 7L #define SN_id_kp "id-kp" #define NID_id_kp 128 #define OBJ_id_kp 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L #define SN_server_auth "serverAuth" #define LN_server_auth "TLS Web Server Authentication" #define NID_server_auth 129 #define OBJ_server_auth 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 1L #define SN_client_auth "clientAuth" #define LN_client_auth "TLS Web Client Authentication" #define NID_client_auth 130 #define OBJ_client_auth 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 2L #define SN_code_sign "codeSigning" #define LN_code_sign "Code Signing" #define NID_code_sign 131 #define OBJ_code_sign 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 3L #define SN_email_protect "emailProtection" #define LN_email_protect "E-mail Protection" #define NID_email_protect 132 #define OBJ_email_protect 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 4L #define SN_time_stamp "timeStamping" #define LN_time_stamp "Time Stamping" #define NID_time_stamp 133 #define OBJ_time_stamp 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 8L #define SN_ms_code_ind "msCodeInd" #define LN_ms_code_ind "Microsoft Individual Code Signing" #define NID_ms_code_ind 134 #define OBJ_ms_code_ind 1L, 3L, 6L, 1L, 4L, 1L, 311L, 2L, 1L, 21L #define SN_ms_code_com "msCodeCom" #define LN_ms_code_com "Microsoft Commercial Code Signing" #define NID_ms_code_com 135 #define OBJ_ms_code_com 1L, 3L, 6L, 1L, 4L, 1L, 311L, 2L, 1L, 22L #define SN_ms_ctl_sign "msCTLSign" #define LN_ms_ctl_sign "Microsoft Trust List Signing" #define NID_ms_ctl_sign 136 #define OBJ_ms_ctl_sign 1L, 3L, 6L, 1L, 4L, 1L, 311L, 10L, 3L, 1L #define SN_ms_sgc "msSGC" #define LN_ms_sgc "Microsoft Server Gated Crypto" #define NID_ms_sgc 137 #define OBJ_ms_sgc 1L, 3L, 6L, 1L, 4L, 1L, 311L, 10L, 3L, 3L #define SN_ms_efs "msEFS" #define LN_ms_efs "Microsoft Encrypted File System" #define NID_ms_efs 138 #define OBJ_ms_efs 1L, 3L, 6L, 1L, 4L, 1L, 311L, 10L, 3L, 4L #define SN_ns_sgc "nsSGC" #define LN_ns_sgc "Netscape Server Gated Crypto" #define NID_ns_sgc 139 #define OBJ_ns_sgc 2L, 16L, 840L, 1L, 113730L, 4L, 1L #define SN_delta_crl "deltaCRL" #define LN_delta_crl "X509v3 Delta CRL Indicator" #define NID_delta_crl 140 #define OBJ_delta_crl 2L, 5L, 29L, 27L #define SN_crl_reason "CRLReason" #define LN_crl_reason "X509v3 CRL Reason Code" #define NID_crl_reason 141 #define OBJ_crl_reason 2L, 5L, 29L, 21L #define SN_invalidity_date "invalidityDate" #define LN_invalidity_date "Invalidity Date" #define NID_invalidity_date 142 #define OBJ_invalidity_date 2L, 5L, 29L, 24L #define SN_sxnet "SXNetID" #define LN_sxnet "Strong Extranet ID" #define NID_sxnet 143 #define OBJ_sxnet 1L, 3L, 101L, 1L, 4L, 1L #define SN_pbe_WithSHA1And128BitRC4 "PBE-SHA1-RC4-128" #define LN_pbe_WithSHA1And128BitRC4 "pbeWithSHA1And128BitRC4" #define NID_pbe_WithSHA1And128BitRC4 144 #define OBJ_pbe_WithSHA1And128BitRC4 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 1L #define SN_pbe_WithSHA1And40BitRC4 "PBE-SHA1-RC4-40" #define LN_pbe_WithSHA1And40BitRC4 "pbeWithSHA1And40BitRC4" #define NID_pbe_WithSHA1And40BitRC4 145 #define OBJ_pbe_WithSHA1And40BitRC4 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 2L #define SN_pbe_WithSHA1And3_Key_TripleDES_CBC "PBE-SHA1-3DES" #define LN_pbe_WithSHA1And3_Key_TripleDES_CBC "pbeWithSHA1And3-KeyTripleDES-CBC" #define NID_pbe_WithSHA1And3_Key_TripleDES_CBC 146 #define OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC \ 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 3L #define SN_pbe_WithSHA1And2_Key_TripleDES_CBC "PBE-SHA1-2DES" #define LN_pbe_WithSHA1And2_Key_TripleDES_CBC "pbeWithSHA1And2-KeyTripleDES-CBC" #define NID_pbe_WithSHA1And2_Key_TripleDES_CBC 147 #define OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC \ 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 4L #define SN_pbe_WithSHA1And128BitRC2_CBC "PBE-SHA1-RC2-128" #define LN_pbe_WithSHA1And128BitRC2_CBC "pbeWithSHA1And128BitRC2-CBC" #define NID_pbe_WithSHA1And128BitRC2_CBC 148 #define OBJ_pbe_WithSHA1And128BitRC2_CBC 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 5L #define SN_pbe_WithSHA1And40BitRC2_CBC "PBE-SHA1-RC2-40" #define LN_pbe_WithSHA1And40BitRC2_CBC "pbeWithSHA1And40BitRC2-CBC" #define NID_pbe_WithSHA1And40BitRC2_CBC 149 #define OBJ_pbe_WithSHA1And40BitRC2_CBC 1L, 2L, 840L, 113549L, 1L, 12L, 1L, 6L #define LN_keyBag "keyBag" #define NID_keyBag 150 #define OBJ_keyBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 1L #define LN_pkcs8ShroudedKeyBag "pkcs8ShroudedKeyBag" #define NID_pkcs8ShroudedKeyBag 151 #define OBJ_pkcs8ShroudedKeyBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 2L #define LN_certBag "certBag" #define NID_certBag 152 #define OBJ_certBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 3L #define LN_crlBag "crlBag" #define NID_crlBag 153 #define OBJ_crlBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 4L #define LN_secretBag "secretBag" #define NID_secretBag 154 #define OBJ_secretBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 5L #define LN_safeContentsBag "safeContentsBag" #define NID_safeContentsBag 155 #define OBJ_safeContentsBag 1L, 2L, 840L, 113549L, 1L, 12L, 10L, 1L, 6L #define LN_friendlyName "friendlyName" #define NID_friendlyName 156 #define OBJ_friendlyName 1L, 2L, 840L, 113549L, 1L, 9L, 20L #define LN_localKeyID "localKeyID" #define NID_localKeyID 157 #define OBJ_localKeyID 1L, 2L, 840L, 113549L, 1L, 9L, 21L #define LN_x509Certificate "x509Certificate" #define NID_x509Certificate 158 #define OBJ_x509Certificate 1L, 2L, 840L, 113549L, 1L, 9L, 22L, 1L #define LN_sdsiCertificate "sdsiCertificate" #define NID_sdsiCertificate 159 #define OBJ_sdsiCertificate 1L, 2L, 840L, 113549L, 1L, 9L, 22L, 2L #define LN_x509Crl "x509Crl" #define NID_x509Crl 160 #define OBJ_x509Crl 1L, 2L, 840L, 113549L, 1L, 9L, 23L, 1L #define LN_pbes2 "PBES2" #define NID_pbes2 161 #define OBJ_pbes2 1L, 2L, 840L, 113549L, 1L, 5L, 13L #define LN_pbmac1 "PBMAC1" #define NID_pbmac1 162 #define OBJ_pbmac1 1L, 2L, 840L, 113549L, 1L, 5L, 14L #define LN_hmacWithSHA1 "hmacWithSHA1" #define NID_hmacWithSHA1 163 #define OBJ_hmacWithSHA1 1L, 2L, 840L, 113549L, 2L, 7L #define SN_id_qt_cps "id-qt-cps" #define LN_id_qt_cps "Policy Qualifier CPS" #define NID_id_qt_cps 164 #define OBJ_id_qt_cps 1L, 3L, 6L, 1L, 5L, 5L, 7L, 2L, 1L #define SN_id_qt_unotice "id-qt-unotice" #define LN_id_qt_unotice "Policy Qualifier User Notice" #define NID_id_qt_unotice 165 #define OBJ_id_qt_unotice 1L, 3L, 6L, 1L, 5L, 5L, 7L, 2L, 2L #define SN_rc2_64_cbc "RC2-64-CBC" #define LN_rc2_64_cbc "rc2-64-cbc" #define NID_rc2_64_cbc 166 #define SN_SMIMECapabilities "SMIME-CAPS" #define LN_SMIMECapabilities "S/MIME Capabilities" #define NID_SMIMECapabilities 167 #define OBJ_SMIMECapabilities 1L, 2L, 840L, 113549L, 1L, 9L, 15L #define SN_pbeWithMD2AndRC2_CBC "PBE-MD2-RC2-64" #define LN_pbeWithMD2AndRC2_CBC "pbeWithMD2AndRC2-CBC" #define NID_pbeWithMD2AndRC2_CBC 168 #define OBJ_pbeWithMD2AndRC2_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 4L #define SN_pbeWithMD5AndRC2_CBC "PBE-MD5-RC2-64" #define LN_pbeWithMD5AndRC2_CBC "pbeWithMD5AndRC2-CBC" #define NID_pbeWithMD5AndRC2_CBC 169 #define OBJ_pbeWithMD5AndRC2_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 6L #define SN_pbeWithSHA1AndDES_CBC "PBE-SHA1-DES" #define LN_pbeWithSHA1AndDES_CBC "pbeWithSHA1AndDES-CBC" #define NID_pbeWithSHA1AndDES_CBC 170 #define OBJ_pbeWithSHA1AndDES_CBC 1L, 2L, 840L, 113549L, 1L, 5L, 10L #define SN_ms_ext_req "msExtReq" #define LN_ms_ext_req "Microsoft Extension Request" #define NID_ms_ext_req 171 #define OBJ_ms_ext_req 1L, 3L, 6L, 1L, 4L, 1L, 311L, 2L, 1L, 14L #define SN_ext_req "extReq" #define LN_ext_req "Extension Request" #define NID_ext_req 172 #define OBJ_ext_req 1L, 2L, 840L, 113549L, 1L, 9L, 14L #define SN_name "name" #define LN_name "name" #define NID_name 173 #define OBJ_name 2L, 5L, 4L, 41L #define SN_dnQualifier "dnQualifier" #define LN_dnQualifier "dnQualifier" #define NID_dnQualifier 174 #define OBJ_dnQualifier 2L, 5L, 4L, 46L #define SN_id_pe "id-pe" #define NID_id_pe 175 #define OBJ_id_pe 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L #define SN_id_ad "id-ad" #define NID_id_ad 176 #define OBJ_id_ad 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L #define SN_info_access "authorityInfoAccess" #define LN_info_access "Authority Information Access" #define NID_info_access 177 #define OBJ_info_access 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 1L #define SN_ad_OCSP "OCSP" #define LN_ad_OCSP "OCSP" #define NID_ad_OCSP 178 #define OBJ_ad_OCSP 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L #define SN_ad_ca_issuers "caIssuers" #define LN_ad_ca_issuers "CA Issuers" #define NID_ad_ca_issuers 179 #define OBJ_ad_ca_issuers 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 2L #define SN_OCSP_sign "OCSPSigning" #define LN_OCSP_sign "OCSP Signing" #define NID_OCSP_sign 180 #define OBJ_OCSP_sign 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 9L #define SN_iso "ISO" #define LN_iso "iso" #define NID_iso 181 #define OBJ_iso 1L #define SN_member_body "member-body" #define LN_member_body "ISO Member Body" #define NID_member_body 182 #define OBJ_member_body 1L, 2L #define SN_ISO_US "ISO-US" #define LN_ISO_US "ISO US Member Body" #define NID_ISO_US 183 #define OBJ_ISO_US 1L, 2L, 840L #define SN_X9_57 "X9-57" #define LN_X9_57 "X9.57" #define NID_X9_57 184 #define OBJ_X9_57 1L, 2L, 840L, 10040L #define SN_X9cm "X9cm" #define LN_X9cm "X9.57 CM ?" #define NID_X9cm 185 #define OBJ_X9cm 1L, 2L, 840L, 10040L, 4L #define SN_pkcs1 "pkcs1" #define NID_pkcs1 186 #define OBJ_pkcs1 1L, 2L, 840L, 113549L, 1L, 1L #define SN_pkcs5 "pkcs5" #define NID_pkcs5 187 #define OBJ_pkcs5 1L, 2L, 840L, 113549L, 1L, 5L #define SN_SMIME "SMIME" #define LN_SMIME "S/MIME" #define NID_SMIME 188 #define OBJ_SMIME 1L, 2L, 840L, 113549L, 1L, 9L, 16L #define SN_id_smime_mod "id-smime-mod" #define NID_id_smime_mod 189 #define OBJ_id_smime_mod 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L #define SN_id_smime_ct "id-smime-ct" #define NID_id_smime_ct 190 #define OBJ_id_smime_ct 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L #define SN_id_smime_aa "id-smime-aa" #define NID_id_smime_aa 191 #define OBJ_id_smime_aa 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L #define SN_id_smime_alg "id-smime-alg" #define NID_id_smime_alg 192 #define OBJ_id_smime_alg 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L #define SN_id_smime_cd "id-smime-cd" #define NID_id_smime_cd 193 #define OBJ_id_smime_cd 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 4L #define SN_id_smime_spq "id-smime-spq" #define NID_id_smime_spq 194 #define OBJ_id_smime_spq 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 5L #define SN_id_smime_cti "id-smime-cti" #define NID_id_smime_cti 195 #define OBJ_id_smime_cti 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L #define SN_id_smime_mod_cms "id-smime-mod-cms" #define NID_id_smime_mod_cms 196 #define OBJ_id_smime_mod_cms 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 1L #define SN_id_smime_mod_ess "id-smime-mod-ess" #define NID_id_smime_mod_ess 197 #define OBJ_id_smime_mod_ess 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 2L #define SN_id_smime_mod_oid "id-smime-mod-oid" #define NID_id_smime_mod_oid 198 #define OBJ_id_smime_mod_oid 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 3L #define SN_id_smime_mod_msg_v3 "id-smime-mod-msg-v3" #define NID_id_smime_mod_msg_v3 199 #define OBJ_id_smime_mod_msg_v3 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 4L #define SN_id_smime_mod_ets_eSignature_88 "id-smime-mod-ets-eSignature-88" #define NID_id_smime_mod_ets_eSignature_88 200 #define OBJ_id_smime_mod_ets_eSignature_88 \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 5L #define SN_id_smime_mod_ets_eSignature_97 "id-smime-mod-ets-eSignature-97" #define NID_id_smime_mod_ets_eSignature_97 201 #define OBJ_id_smime_mod_ets_eSignature_97 \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 6L #define SN_id_smime_mod_ets_eSigPolicy_88 "id-smime-mod-ets-eSigPolicy-88" #define NID_id_smime_mod_ets_eSigPolicy_88 202 #define OBJ_id_smime_mod_ets_eSigPolicy_88 \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 7L #define SN_id_smime_mod_ets_eSigPolicy_97 "id-smime-mod-ets-eSigPolicy-97" #define NID_id_smime_mod_ets_eSigPolicy_97 203 #define OBJ_id_smime_mod_ets_eSigPolicy_97 \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 0L, 8L #define SN_id_smime_ct_receipt "id-smime-ct-receipt" #define NID_id_smime_ct_receipt 204 #define OBJ_id_smime_ct_receipt 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 1L #define SN_id_smime_ct_authData "id-smime-ct-authData" #define NID_id_smime_ct_authData 205 #define OBJ_id_smime_ct_authData 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 2L #define SN_id_smime_ct_publishCert "id-smime-ct-publishCert" #define NID_id_smime_ct_publishCert 206 #define OBJ_id_smime_ct_publishCert 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 3L #define SN_id_smime_ct_TSTInfo "id-smime-ct-TSTInfo" #define NID_id_smime_ct_TSTInfo 207 #define OBJ_id_smime_ct_TSTInfo 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 4L #define SN_id_smime_ct_TDTInfo "id-smime-ct-TDTInfo" #define NID_id_smime_ct_TDTInfo 208 #define OBJ_id_smime_ct_TDTInfo 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 5L #define SN_id_smime_ct_contentInfo "id-smime-ct-contentInfo" #define NID_id_smime_ct_contentInfo 209 #define OBJ_id_smime_ct_contentInfo 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 6L #define SN_id_smime_ct_DVCSRequestData "id-smime-ct-DVCSRequestData" #define NID_id_smime_ct_DVCSRequestData 210 #define OBJ_id_smime_ct_DVCSRequestData \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 7L #define SN_id_smime_ct_DVCSResponseData "id-smime-ct-DVCSResponseData" #define NID_id_smime_ct_DVCSResponseData 211 #define OBJ_id_smime_ct_DVCSResponseData \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 8L #define SN_id_smime_aa_receiptRequest "id-smime-aa-receiptRequest" #define NID_id_smime_aa_receiptRequest 212 #define OBJ_id_smime_aa_receiptRequest \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 1L #define SN_id_smime_aa_securityLabel "id-smime-aa-securityLabel" #define NID_id_smime_aa_securityLabel 213 #define OBJ_id_smime_aa_securityLabel 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 2L #define SN_id_smime_aa_mlExpandHistory "id-smime-aa-mlExpandHistory" #define NID_id_smime_aa_mlExpandHistory 214 #define OBJ_id_smime_aa_mlExpandHistory \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 3L #define SN_id_smime_aa_contentHint "id-smime-aa-contentHint" #define NID_id_smime_aa_contentHint 215 #define OBJ_id_smime_aa_contentHint 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 4L #define SN_id_smime_aa_msgSigDigest "id-smime-aa-msgSigDigest" #define NID_id_smime_aa_msgSigDigest 216 #define OBJ_id_smime_aa_msgSigDigest 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 5L #define SN_id_smime_aa_encapContentType "id-smime-aa-encapContentType" #define NID_id_smime_aa_encapContentType 217 #define OBJ_id_smime_aa_encapContentType \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 6L #define SN_id_smime_aa_contentIdentifier "id-smime-aa-contentIdentifier" #define NID_id_smime_aa_contentIdentifier 218 #define OBJ_id_smime_aa_contentIdentifier \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 7L #define SN_id_smime_aa_macValue "id-smime-aa-macValue" #define NID_id_smime_aa_macValue 219 #define OBJ_id_smime_aa_macValue 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 8L #define SN_id_smime_aa_equivalentLabels "id-smime-aa-equivalentLabels" #define NID_id_smime_aa_equivalentLabels 220 #define OBJ_id_smime_aa_equivalentLabels \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 9L #define SN_id_smime_aa_contentReference "id-smime-aa-contentReference" #define NID_id_smime_aa_contentReference 221 #define OBJ_id_smime_aa_contentReference \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 10L #define SN_id_smime_aa_encrypKeyPref "id-smime-aa-encrypKeyPref" #define NID_id_smime_aa_encrypKeyPref 222 #define OBJ_id_smime_aa_encrypKeyPref \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 11L #define SN_id_smime_aa_signingCertificate "id-smime-aa-signingCertificate" #define NID_id_smime_aa_signingCertificate 223 #define OBJ_id_smime_aa_signingCertificate \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 12L #define SN_id_smime_aa_smimeEncryptCerts "id-smime-aa-smimeEncryptCerts" #define NID_id_smime_aa_smimeEncryptCerts 224 #define OBJ_id_smime_aa_smimeEncryptCerts \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 13L #define SN_id_smime_aa_timeStampToken "id-smime-aa-timeStampToken" #define NID_id_smime_aa_timeStampToken 225 #define OBJ_id_smime_aa_timeStampToken \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 14L #define SN_id_smime_aa_ets_sigPolicyId "id-smime-aa-ets-sigPolicyId" #define NID_id_smime_aa_ets_sigPolicyId 226 #define OBJ_id_smime_aa_ets_sigPolicyId \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 15L #define SN_id_smime_aa_ets_commitmentType "id-smime-aa-ets-commitmentType" #define NID_id_smime_aa_ets_commitmentType 227 #define OBJ_id_smime_aa_ets_commitmentType \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 16L #define SN_id_smime_aa_ets_signerLocation "id-smime-aa-ets-signerLocation" #define NID_id_smime_aa_ets_signerLocation 228 #define OBJ_id_smime_aa_ets_signerLocation \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 17L #define SN_id_smime_aa_ets_signerAttr "id-smime-aa-ets-signerAttr" #define NID_id_smime_aa_ets_signerAttr 229 #define OBJ_id_smime_aa_ets_signerAttr \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 18L #define SN_id_smime_aa_ets_otherSigCert "id-smime-aa-ets-otherSigCert" #define NID_id_smime_aa_ets_otherSigCert 230 #define OBJ_id_smime_aa_ets_otherSigCert \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 19L #define SN_id_smime_aa_ets_contentTimestamp "id-smime-aa-ets-contentTimestamp" #define NID_id_smime_aa_ets_contentTimestamp 231 #define OBJ_id_smime_aa_ets_contentTimestamp \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 20L #define SN_id_smime_aa_ets_CertificateRefs "id-smime-aa-ets-CertificateRefs" #define NID_id_smime_aa_ets_CertificateRefs 232 #define OBJ_id_smime_aa_ets_CertificateRefs \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 21L #define SN_id_smime_aa_ets_RevocationRefs "id-smime-aa-ets-RevocationRefs" #define NID_id_smime_aa_ets_RevocationRefs 233 #define OBJ_id_smime_aa_ets_RevocationRefs \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 22L #define SN_id_smime_aa_ets_certValues "id-smime-aa-ets-certValues" #define NID_id_smime_aa_ets_certValues 234 #define OBJ_id_smime_aa_ets_certValues \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 23L #define SN_id_smime_aa_ets_revocationValues "id-smime-aa-ets-revocationValues" #define NID_id_smime_aa_ets_revocationValues 235 #define OBJ_id_smime_aa_ets_revocationValues \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 24L #define SN_id_smime_aa_ets_escTimeStamp "id-smime-aa-ets-escTimeStamp" #define NID_id_smime_aa_ets_escTimeStamp 236 #define OBJ_id_smime_aa_ets_escTimeStamp \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 25L #define SN_id_smime_aa_ets_certCRLTimestamp "id-smime-aa-ets-certCRLTimestamp" #define NID_id_smime_aa_ets_certCRLTimestamp 237 #define OBJ_id_smime_aa_ets_certCRLTimestamp \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 26L #define SN_id_smime_aa_ets_archiveTimeStamp "id-smime-aa-ets-archiveTimeStamp" #define NID_id_smime_aa_ets_archiveTimeStamp 238 #define OBJ_id_smime_aa_ets_archiveTimeStamp \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 27L #define SN_id_smime_aa_signatureType "id-smime-aa-signatureType" #define NID_id_smime_aa_signatureType 239 #define OBJ_id_smime_aa_signatureType \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 28L #define SN_id_smime_aa_dvcs_dvc "id-smime-aa-dvcs-dvc" #define NID_id_smime_aa_dvcs_dvc 240 #define OBJ_id_smime_aa_dvcs_dvc 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 2L, 29L #define SN_id_smime_alg_ESDHwith3DES "id-smime-alg-ESDHwith3DES" #define NID_id_smime_alg_ESDHwith3DES 241 #define OBJ_id_smime_alg_ESDHwith3DES 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 1L #define SN_id_smime_alg_ESDHwithRC2 "id-smime-alg-ESDHwithRC2" #define NID_id_smime_alg_ESDHwithRC2 242 #define OBJ_id_smime_alg_ESDHwithRC2 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 2L #define SN_id_smime_alg_3DESwrap "id-smime-alg-3DESwrap" #define NID_id_smime_alg_3DESwrap 243 #define OBJ_id_smime_alg_3DESwrap 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 3L #define SN_id_smime_alg_RC2wrap "id-smime-alg-RC2wrap" #define NID_id_smime_alg_RC2wrap 244 #define OBJ_id_smime_alg_RC2wrap 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 4L #define SN_id_smime_alg_ESDH "id-smime-alg-ESDH" #define NID_id_smime_alg_ESDH 245 #define OBJ_id_smime_alg_ESDH 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 5L #define SN_id_smime_alg_CMS3DESwrap "id-smime-alg-CMS3DESwrap" #define NID_id_smime_alg_CMS3DESwrap 246 #define OBJ_id_smime_alg_CMS3DESwrap 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 6L #define SN_id_smime_alg_CMSRC2wrap "id-smime-alg-CMSRC2wrap" #define NID_id_smime_alg_CMSRC2wrap 247 #define OBJ_id_smime_alg_CMSRC2wrap 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 7L #define SN_id_smime_cd_ldap "id-smime-cd-ldap" #define NID_id_smime_cd_ldap 248 #define OBJ_id_smime_cd_ldap 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 4L, 1L #define SN_id_smime_spq_ets_sqt_uri "id-smime-spq-ets-sqt-uri" #define NID_id_smime_spq_ets_sqt_uri 249 #define OBJ_id_smime_spq_ets_sqt_uri 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 5L, 1L #define SN_id_smime_spq_ets_sqt_unotice "id-smime-spq-ets-sqt-unotice" #define NID_id_smime_spq_ets_sqt_unotice 250 #define OBJ_id_smime_spq_ets_sqt_unotice \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 5L, 2L #define SN_id_smime_cti_ets_proofOfOrigin "id-smime-cti-ets-proofOfOrigin" #define NID_id_smime_cti_ets_proofOfOrigin 251 #define OBJ_id_smime_cti_ets_proofOfOrigin \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 1L #define SN_id_smime_cti_ets_proofOfReceipt "id-smime-cti-ets-proofOfReceipt" #define NID_id_smime_cti_ets_proofOfReceipt 252 #define OBJ_id_smime_cti_ets_proofOfReceipt \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 2L #define SN_id_smime_cti_ets_proofOfDelivery "id-smime-cti-ets-proofOfDelivery" #define NID_id_smime_cti_ets_proofOfDelivery 253 #define OBJ_id_smime_cti_ets_proofOfDelivery \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 3L #define SN_id_smime_cti_ets_proofOfSender "id-smime-cti-ets-proofOfSender" #define NID_id_smime_cti_ets_proofOfSender 254 #define OBJ_id_smime_cti_ets_proofOfSender \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 4L #define SN_id_smime_cti_ets_proofOfApproval "id-smime-cti-ets-proofOfApproval" #define NID_id_smime_cti_ets_proofOfApproval 255 #define OBJ_id_smime_cti_ets_proofOfApproval \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 5L #define SN_id_smime_cti_ets_proofOfCreation "id-smime-cti-ets-proofOfCreation" #define NID_id_smime_cti_ets_proofOfCreation 256 #define OBJ_id_smime_cti_ets_proofOfCreation \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 6L, 6L #define SN_md4 "MD4" #define LN_md4 "md4" #define NID_md4 257 #define OBJ_md4 1L, 2L, 840L, 113549L, 2L, 4L #define SN_id_pkix_mod "id-pkix-mod" #define NID_id_pkix_mod 258 #define OBJ_id_pkix_mod 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L #define SN_id_qt "id-qt" #define NID_id_qt 259 #define OBJ_id_qt 1L, 3L, 6L, 1L, 5L, 5L, 7L, 2L #define SN_id_it "id-it" #define NID_id_it 260 #define OBJ_id_it 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L #define SN_id_pkip "id-pkip" #define NID_id_pkip 261 #define OBJ_id_pkip 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L #define SN_id_alg "id-alg" #define NID_id_alg 262 #define OBJ_id_alg 1L, 3L, 6L, 1L, 5L, 5L, 7L, 6L #define SN_id_cmc "id-cmc" #define NID_id_cmc 263 #define OBJ_id_cmc 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L #define SN_id_on "id-on" #define NID_id_on 264 #define OBJ_id_on 1L, 3L, 6L, 1L, 5L, 5L, 7L, 8L #define SN_id_pda "id-pda" #define NID_id_pda 265 #define OBJ_id_pda 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L #define SN_id_aca "id-aca" #define NID_id_aca 266 #define OBJ_id_aca 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L #define SN_id_qcs "id-qcs" #define NID_id_qcs 267 #define OBJ_id_qcs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 11L #define SN_id_cct "id-cct" #define NID_id_cct 268 #define OBJ_id_cct 1L, 3L, 6L, 1L, 5L, 5L, 7L, 12L #define SN_id_pkix1_explicit_88 "id-pkix1-explicit-88" #define NID_id_pkix1_explicit_88 269 #define OBJ_id_pkix1_explicit_88 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 1L #define SN_id_pkix1_implicit_88 "id-pkix1-implicit-88" #define NID_id_pkix1_implicit_88 270 #define OBJ_id_pkix1_implicit_88 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 2L #define SN_id_pkix1_explicit_93 "id-pkix1-explicit-93" #define NID_id_pkix1_explicit_93 271 #define OBJ_id_pkix1_explicit_93 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 3L #define SN_id_pkix1_implicit_93 "id-pkix1-implicit-93" #define NID_id_pkix1_implicit_93 272 #define OBJ_id_pkix1_implicit_93 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 4L #define SN_id_mod_crmf "id-mod-crmf" #define NID_id_mod_crmf 273 #define OBJ_id_mod_crmf 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 5L #define SN_id_mod_cmc "id-mod-cmc" #define NID_id_mod_cmc 274 #define OBJ_id_mod_cmc 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 6L #define SN_id_mod_kea_profile_88 "id-mod-kea-profile-88" #define NID_id_mod_kea_profile_88 275 #define OBJ_id_mod_kea_profile_88 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 7L #define SN_id_mod_kea_profile_93 "id-mod-kea-profile-93" #define NID_id_mod_kea_profile_93 276 #define OBJ_id_mod_kea_profile_93 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 8L #define SN_id_mod_cmp "id-mod-cmp" #define NID_id_mod_cmp 277 #define OBJ_id_mod_cmp 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 9L #define SN_id_mod_qualified_cert_88 "id-mod-qualified-cert-88" #define NID_id_mod_qualified_cert_88 278 #define OBJ_id_mod_qualified_cert_88 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 10L #define SN_id_mod_qualified_cert_93 "id-mod-qualified-cert-93" #define NID_id_mod_qualified_cert_93 279 #define OBJ_id_mod_qualified_cert_93 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 11L #define SN_id_mod_attribute_cert "id-mod-attribute-cert" #define NID_id_mod_attribute_cert 280 #define OBJ_id_mod_attribute_cert 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 12L #define SN_id_mod_timestamp_protocol "id-mod-timestamp-protocol" #define NID_id_mod_timestamp_protocol 281 #define OBJ_id_mod_timestamp_protocol 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 13L #define SN_id_mod_ocsp "id-mod-ocsp" #define NID_id_mod_ocsp 282 #define OBJ_id_mod_ocsp 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 14L #define SN_id_mod_dvcs "id-mod-dvcs" #define NID_id_mod_dvcs 283 #define OBJ_id_mod_dvcs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 15L #define SN_id_mod_cmp2000 "id-mod-cmp2000" #define NID_id_mod_cmp2000 284 #define OBJ_id_mod_cmp2000 1L, 3L, 6L, 1L, 5L, 5L, 7L, 0L, 16L #define SN_biometricInfo "biometricInfo" #define LN_biometricInfo "Biometric Info" #define NID_biometricInfo 285 #define OBJ_biometricInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 2L #define SN_qcStatements "qcStatements" #define NID_qcStatements 286 #define OBJ_qcStatements 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 3L #define SN_ac_auditEntity "ac-auditEntity" #define NID_ac_auditEntity 287 #define OBJ_ac_auditEntity 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 4L #define SN_ac_targeting "ac-targeting" #define NID_ac_targeting 288 #define OBJ_ac_targeting 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 5L #define SN_aaControls "aaControls" #define NID_aaControls 289 #define OBJ_aaControls 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 6L #define SN_sbgp_ipAddrBlock "sbgp-ipAddrBlock" #define NID_sbgp_ipAddrBlock 290 #define OBJ_sbgp_ipAddrBlock 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 7L #define SN_sbgp_autonomousSysNum "sbgp-autonomousSysNum" #define NID_sbgp_autonomousSysNum 291 #define OBJ_sbgp_autonomousSysNum 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 8L #define SN_sbgp_routerIdentifier "sbgp-routerIdentifier" #define NID_sbgp_routerIdentifier 292 #define OBJ_sbgp_routerIdentifier 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 9L #define SN_textNotice "textNotice" #define NID_textNotice 293 #define OBJ_textNotice 1L, 3L, 6L, 1L, 5L, 5L, 7L, 2L, 3L #define SN_ipsecEndSystem "ipsecEndSystem" #define LN_ipsecEndSystem "IPSec End System" #define NID_ipsecEndSystem 294 #define OBJ_ipsecEndSystem 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 5L #define SN_ipsecTunnel "ipsecTunnel" #define LN_ipsecTunnel "IPSec Tunnel" #define NID_ipsecTunnel 295 #define OBJ_ipsecTunnel 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 6L #define SN_ipsecUser "ipsecUser" #define LN_ipsecUser "IPSec User" #define NID_ipsecUser 296 #define OBJ_ipsecUser 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 7L #define SN_dvcs "DVCS" #define LN_dvcs "dvcs" #define NID_dvcs 297 #define OBJ_dvcs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 3L, 10L #define SN_id_it_caProtEncCert "id-it-caProtEncCert" #define NID_id_it_caProtEncCert 298 #define OBJ_id_it_caProtEncCert 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 1L #define SN_id_it_signKeyPairTypes "id-it-signKeyPairTypes" #define NID_id_it_signKeyPairTypes 299 #define OBJ_id_it_signKeyPairTypes 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 2L #define SN_id_it_encKeyPairTypes "id-it-encKeyPairTypes" #define NID_id_it_encKeyPairTypes 300 #define OBJ_id_it_encKeyPairTypes 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 3L #define SN_id_it_preferredSymmAlg "id-it-preferredSymmAlg" #define NID_id_it_preferredSymmAlg 301 #define OBJ_id_it_preferredSymmAlg 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 4L #define SN_id_it_caKeyUpdateInfo "id-it-caKeyUpdateInfo" #define NID_id_it_caKeyUpdateInfo 302 #define OBJ_id_it_caKeyUpdateInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 5L #define SN_id_it_currentCRL "id-it-currentCRL" #define NID_id_it_currentCRL 303 #define OBJ_id_it_currentCRL 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 6L #define SN_id_it_unsupportedOIDs "id-it-unsupportedOIDs" #define NID_id_it_unsupportedOIDs 304 #define OBJ_id_it_unsupportedOIDs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 7L #define SN_id_it_subscriptionRequest "id-it-subscriptionRequest" #define NID_id_it_subscriptionRequest 305 #define OBJ_id_it_subscriptionRequest 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 8L #define SN_id_it_subscriptionResponse "id-it-subscriptionResponse" #define NID_id_it_subscriptionResponse 306 #define OBJ_id_it_subscriptionResponse 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 9L #define SN_id_it_keyPairParamReq "id-it-keyPairParamReq" #define NID_id_it_keyPairParamReq 307 #define OBJ_id_it_keyPairParamReq 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 10L #define SN_id_it_keyPairParamRep "id-it-keyPairParamRep" #define NID_id_it_keyPairParamRep 308 #define OBJ_id_it_keyPairParamRep 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 11L #define SN_id_it_revPassphrase "id-it-revPassphrase" #define NID_id_it_revPassphrase 309 #define OBJ_id_it_revPassphrase 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 12L #define SN_id_it_implicitConfirm "id-it-implicitConfirm" #define NID_id_it_implicitConfirm 310 #define OBJ_id_it_implicitConfirm 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 13L #define SN_id_it_confirmWaitTime "id-it-confirmWaitTime" #define NID_id_it_confirmWaitTime 311 #define OBJ_id_it_confirmWaitTime 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 14L #define SN_id_it_origPKIMessage "id-it-origPKIMessage" #define NID_id_it_origPKIMessage 312 #define OBJ_id_it_origPKIMessage 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 15L #define SN_id_regCtrl "id-regCtrl" #define NID_id_regCtrl 313 #define OBJ_id_regCtrl 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L #define SN_id_regInfo "id-regInfo" #define NID_id_regInfo 314 #define OBJ_id_regInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 2L #define SN_id_regCtrl_regToken "id-regCtrl-regToken" #define NID_id_regCtrl_regToken 315 #define OBJ_id_regCtrl_regToken 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 1L #define SN_id_regCtrl_authenticator "id-regCtrl-authenticator" #define NID_id_regCtrl_authenticator 316 #define OBJ_id_regCtrl_authenticator 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 2L #define SN_id_regCtrl_pkiPublicationInfo "id-regCtrl-pkiPublicationInfo" #define NID_id_regCtrl_pkiPublicationInfo 317 #define OBJ_id_regCtrl_pkiPublicationInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 3L #define SN_id_regCtrl_pkiArchiveOptions "id-regCtrl-pkiArchiveOptions" #define NID_id_regCtrl_pkiArchiveOptions 318 #define OBJ_id_regCtrl_pkiArchiveOptions 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 4L #define SN_id_regCtrl_oldCertID "id-regCtrl-oldCertID" #define NID_id_regCtrl_oldCertID 319 #define OBJ_id_regCtrl_oldCertID 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 5L #define SN_id_regCtrl_protocolEncrKey "id-regCtrl-protocolEncrKey" #define NID_id_regCtrl_protocolEncrKey 320 #define OBJ_id_regCtrl_protocolEncrKey 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 1L, 6L #define SN_id_regInfo_utf8Pairs "id-regInfo-utf8Pairs" #define NID_id_regInfo_utf8Pairs 321 #define OBJ_id_regInfo_utf8Pairs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 2L, 1L #define SN_id_regInfo_certReq "id-regInfo-certReq" #define NID_id_regInfo_certReq 322 #define OBJ_id_regInfo_certReq 1L, 3L, 6L, 1L, 5L, 5L, 7L, 5L, 2L, 2L #define SN_id_alg_des40 "id-alg-des40" #define NID_id_alg_des40 323 #define OBJ_id_alg_des40 1L, 3L, 6L, 1L, 5L, 5L, 7L, 6L, 1L #define SN_id_alg_noSignature "id-alg-noSignature" #define NID_id_alg_noSignature 324 #define OBJ_id_alg_noSignature 1L, 3L, 6L, 1L, 5L, 5L, 7L, 6L, 2L #define SN_id_alg_dh_sig_hmac_sha1 "id-alg-dh-sig-hmac-sha1" #define NID_id_alg_dh_sig_hmac_sha1 325 #define OBJ_id_alg_dh_sig_hmac_sha1 1L, 3L, 6L, 1L, 5L, 5L, 7L, 6L, 3L #define SN_id_alg_dh_pop "id-alg-dh-pop" #define NID_id_alg_dh_pop 326 #define OBJ_id_alg_dh_pop 1L, 3L, 6L, 1L, 5L, 5L, 7L, 6L, 4L #define SN_id_cmc_statusInfo "id-cmc-statusInfo" #define NID_id_cmc_statusInfo 327 #define OBJ_id_cmc_statusInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 1L #define SN_id_cmc_identification "id-cmc-identification" #define NID_id_cmc_identification 328 #define OBJ_id_cmc_identification 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 2L #define SN_id_cmc_identityProof "id-cmc-identityProof" #define NID_id_cmc_identityProof 329 #define OBJ_id_cmc_identityProof 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 3L #define SN_id_cmc_dataReturn "id-cmc-dataReturn" #define NID_id_cmc_dataReturn 330 #define OBJ_id_cmc_dataReturn 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 4L #define SN_id_cmc_transactionId "id-cmc-transactionId" #define NID_id_cmc_transactionId 331 #define OBJ_id_cmc_transactionId 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 5L #define SN_id_cmc_senderNonce "id-cmc-senderNonce" #define NID_id_cmc_senderNonce 332 #define OBJ_id_cmc_senderNonce 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 6L #define SN_id_cmc_recipientNonce "id-cmc-recipientNonce" #define NID_id_cmc_recipientNonce 333 #define OBJ_id_cmc_recipientNonce 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 7L #define SN_id_cmc_addExtensions "id-cmc-addExtensions" #define NID_id_cmc_addExtensions 334 #define OBJ_id_cmc_addExtensions 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 8L #define SN_id_cmc_encryptedPOP "id-cmc-encryptedPOP" #define NID_id_cmc_encryptedPOP 335 #define OBJ_id_cmc_encryptedPOP 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 9L #define SN_id_cmc_decryptedPOP "id-cmc-decryptedPOP" #define NID_id_cmc_decryptedPOP 336 #define OBJ_id_cmc_decryptedPOP 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 10L #define SN_id_cmc_lraPOPWitness "id-cmc-lraPOPWitness" #define NID_id_cmc_lraPOPWitness 337 #define OBJ_id_cmc_lraPOPWitness 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 11L #define SN_id_cmc_getCert "id-cmc-getCert" #define NID_id_cmc_getCert 338 #define OBJ_id_cmc_getCert 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 15L #define SN_id_cmc_getCRL "id-cmc-getCRL" #define NID_id_cmc_getCRL 339 #define OBJ_id_cmc_getCRL 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 16L #define SN_id_cmc_revokeRequest "id-cmc-revokeRequest" #define NID_id_cmc_revokeRequest 340 #define OBJ_id_cmc_revokeRequest 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 17L #define SN_id_cmc_regInfo "id-cmc-regInfo" #define NID_id_cmc_regInfo 341 #define OBJ_id_cmc_regInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 18L #define SN_id_cmc_responseInfo "id-cmc-responseInfo" #define NID_id_cmc_responseInfo 342 #define OBJ_id_cmc_responseInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 19L #define SN_id_cmc_queryPending "id-cmc-queryPending" #define NID_id_cmc_queryPending 343 #define OBJ_id_cmc_queryPending 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 21L #define SN_id_cmc_popLinkRandom "id-cmc-popLinkRandom" #define NID_id_cmc_popLinkRandom 344 #define OBJ_id_cmc_popLinkRandom 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 22L #define SN_id_cmc_popLinkWitness "id-cmc-popLinkWitness" #define NID_id_cmc_popLinkWitness 345 #define OBJ_id_cmc_popLinkWitness 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 23L #define SN_id_cmc_confirmCertAcceptance "id-cmc-confirmCertAcceptance" #define NID_id_cmc_confirmCertAcceptance 346 #define OBJ_id_cmc_confirmCertAcceptance 1L, 3L, 6L, 1L, 5L, 5L, 7L, 7L, 24L #define SN_id_on_personalData "id-on-personalData" #define NID_id_on_personalData 347 #define OBJ_id_on_personalData 1L, 3L, 6L, 1L, 5L, 5L, 7L, 8L, 1L #define SN_id_pda_dateOfBirth "id-pda-dateOfBirth" #define NID_id_pda_dateOfBirth 348 #define OBJ_id_pda_dateOfBirth 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L, 1L #define SN_id_pda_placeOfBirth "id-pda-placeOfBirth" #define NID_id_pda_placeOfBirth 349 #define OBJ_id_pda_placeOfBirth 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L, 2L #define SN_id_pda_gender "id-pda-gender" #define NID_id_pda_gender 351 #define OBJ_id_pda_gender 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L, 3L #define SN_id_pda_countryOfCitizenship "id-pda-countryOfCitizenship" #define NID_id_pda_countryOfCitizenship 352 #define OBJ_id_pda_countryOfCitizenship 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L, 4L #define SN_id_pda_countryOfResidence "id-pda-countryOfResidence" #define NID_id_pda_countryOfResidence 353 #define OBJ_id_pda_countryOfResidence 1L, 3L, 6L, 1L, 5L, 5L, 7L, 9L, 5L #define SN_id_aca_authenticationInfo "id-aca-authenticationInfo" #define NID_id_aca_authenticationInfo 354 #define OBJ_id_aca_authenticationInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 1L #define SN_id_aca_accessIdentity "id-aca-accessIdentity" #define NID_id_aca_accessIdentity 355 #define OBJ_id_aca_accessIdentity 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 2L #define SN_id_aca_chargingIdentity "id-aca-chargingIdentity" #define NID_id_aca_chargingIdentity 356 #define OBJ_id_aca_chargingIdentity 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 3L #define SN_id_aca_group "id-aca-group" #define NID_id_aca_group 357 #define OBJ_id_aca_group 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 4L #define SN_id_aca_role "id-aca-role" #define NID_id_aca_role 358 #define OBJ_id_aca_role 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 5L #define SN_id_qcs_pkixQCSyntax_v1 "id-qcs-pkixQCSyntax-v1" #define NID_id_qcs_pkixQCSyntax_v1 359 #define OBJ_id_qcs_pkixQCSyntax_v1 1L, 3L, 6L, 1L, 5L, 5L, 7L, 11L, 1L #define SN_id_cct_crs "id-cct-crs" #define NID_id_cct_crs 360 #define OBJ_id_cct_crs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 12L, 1L #define SN_id_cct_PKIData "id-cct-PKIData" #define NID_id_cct_PKIData 361 #define OBJ_id_cct_PKIData 1L, 3L, 6L, 1L, 5L, 5L, 7L, 12L, 2L #define SN_id_cct_PKIResponse "id-cct-PKIResponse" #define NID_id_cct_PKIResponse 362 #define OBJ_id_cct_PKIResponse 1L, 3L, 6L, 1L, 5L, 5L, 7L, 12L, 3L #define SN_ad_timeStamping "ad_timestamping" #define LN_ad_timeStamping "AD Time Stamping" #define NID_ad_timeStamping 363 #define OBJ_ad_timeStamping 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 3L #define SN_ad_dvcs "AD_DVCS" #define LN_ad_dvcs "ad dvcs" #define NID_ad_dvcs 364 #define OBJ_ad_dvcs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 4L #define SN_id_pkix_OCSP_basic "basicOCSPResponse" #define LN_id_pkix_OCSP_basic "Basic OCSP Response" #define NID_id_pkix_OCSP_basic 365 #define OBJ_id_pkix_OCSP_basic 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 1L #define SN_id_pkix_OCSP_Nonce "Nonce" #define LN_id_pkix_OCSP_Nonce "OCSP Nonce" #define NID_id_pkix_OCSP_Nonce 366 #define OBJ_id_pkix_OCSP_Nonce 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 2L #define SN_id_pkix_OCSP_CrlID "CrlID" #define LN_id_pkix_OCSP_CrlID "OCSP CRL ID" #define NID_id_pkix_OCSP_CrlID 367 #define OBJ_id_pkix_OCSP_CrlID 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 3L #define SN_id_pkix_OCSP_acceptableResponses "acceptableResponses" #define LN_id_pkix_OCSP_acceptableResponses "Acceptable OCSP Responses" #define NID_id_pkix_OCSP_acceptableResponses 368 #define OBJ_id_pkix_OCSP_acceptableResponses \ 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 4L #define SN_id_pkix_OCSP_noCheck "noCheck" #define LN_id_pkix_OCSP_noCheck "OCSP No Check" #define NID_id_pkix_OCSP_noCheck 369 #define OBJ_id_pkix_OCSP_noCheck 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 5L #define SN_id_pkix_OCSP_archiveCutoff "archiveCutoff" #define LN_id_pkix_OCSP_archiveCutoff "OCSP Archive Cutoff" #define NID_id_pkix_OCSP_archiveCutoff 370 #define OBJ_id_pkix_OCSP_archiveCutoff 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 6L #define SN_id_pkix_OCSP_serviceLocator "serviceLocator" #define LN_id_pkix_OCSP_serviceLocator "OCSP Service Locator" #define NID_id_pkix_OCSP_serviceLocator 371 #define OBJ_id_pkix_OCSP_serviceLocator 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 7L #define SN_id_pkix_OCSP_extendedStatus "extendedStatus" #define LN_id_pkix_OCSP_extendedStatus "Extended OCSP Status" #define NID_id_pkix_OCSP_extendedStatus 372 #define OBJ_id_pkix_OCSP_extendedStatus 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 8L #define SN_id_pkix_OCSP_valid "valid" #define NID_id_pkix_OCSP_valid 373 #define OBJ_id_pkix_OCSP_valid 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 9L #define SN_id_pkix_OCSP_path "path" #define NID_id_pkix_OCSP_path 374 #define OBJ_id_pkix_OCSP_path 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 10L #define SN_id_pkix_OCSP_trustRoot "trustRoot" #define LN_id_pkix_OCSP_trustRoot "Trust Root" #define NID_id_pkix_OCSP_trustRoot 375 #define OBJ_id_pkix_OCSP_trustRoot 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 1L, 11L #define SN_algorithm "algorithm" #define LN_algorithm "algorithm" #define NID_algorithm 376 #define OBJ_algorithm 1L, 3L, 14L, 3L, 2L #define SN_rsaSignature "rsaSignature" #define NID_rsaSignature 377 #define OBJ_rsaSignature 1L, 3L, 14L, 3L, 2L, 11L #define SN_X500algorithms "X500algorithms" #define LN_X500algorithms "directory services - algorithms" #define NID_X500algorithms 378 #define OBJ_X500algorithms 2L, 5L, 8L #define SN_org "ORG" #define LN_org "org" #define NID_org 379 #define OBJ_org 1L, 3L #define SN_dod "DOD" #define LN_dod "dod" #define NID_dod 380 #define OBJ_dod 1L, 3L, 6L #define SN_iana "IANA" #define LN_iana "iana" #define NID_iana 381 #define OBJ_iana 1L, 3L, 6L, 1L #define SN_Directory "directory" #define LN_Directory "Directory" #define NID_Directory 382 #define OBJ_Directory 1L, 3L, 6L, 1L, 1L #define SN_Management "mgmt" #define LN_Management "Management" #define NID_Management 383 #define OBJ_Management 1L, 3L, 6L, 1L, 2L #define SN_Experimental "experimental" #define LN_Experimental "Experimental" #define NID_Experimental 384 #define OBJ_Experimental 1L, 3L, 6L, 1L, 3L #define SN_Private "private" #define LN_Private "Private" #define NID_Private 385 #define OBJ_Private 1L, 3L, 6L, 1L, 4L #define SN_Security "security" #define LN_Security "Security" #define NID_Security 386 #define OBJ_Security 1L, 3L, 6L, 1L, 5L #define SN_SNMPv2 "snmpv2" #define LN_SNMPv2 "SNMPv2" #define NID_SNMPv2 387 #define OBJ_SNMPv2 1L, 3L, 6L, 1L, 6L #define LN_Mail "Mail" #define NID_Mail 388 #define OBJ_Mail 1L, 3L, 6L, 1L, 7L #define SN_Enterprises "enterprises" #define LN_Enterprises "Enterprises" #define NID_Enterprises 389 #define OBJ_Enterprises 1L, 3L, 6L, 1L, 4L, 1L #define SN_dcObject "dcobject" #define LN_dcObject "dcObject" #define NID_dcObject 390 #define OBJ_dcObject 1L, 3L, 6L, 1L, 4L, 1L, 1466L, 344L #define SN_domainComponent "DC" #define LN_domainComponent "domainComponent" #define NID_domainComponent 391 #define OBJ_domainComponent 0L, 9L, 2342L, 19200300L, 100L, 1L, 25L #define SN_Domain "domain" #define LN_Domain "Domain" #define NID_Domain 392 #define OBJ_Domain 0L, 9L, 2342L, 19200300L, 100L, 4L, 13L #define SN_selected_attribute_types "selected-attribute-types" #define LN_selected_attribute_types "Selected Attribute Types" #define NID_selected_attribute_types 394 #define OBJ_selected_attribute_types 2L, 5L, 1L, 5L #define SN_clearance "clearance" #define NID_clearance 395 #define OBJ_clearance 2L, 5L, 1L, 5L, 55L #define SN_md4WithRSAEncryption "RSA-MD4" #define LN_md4WithRSAEncryption "md4WithRSAEncryption" #define NID_md4WithRSAEncryption 396 #define OBJ_md4WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 3L #define SN_ac_proxying "ac-proxying" #define NID_ac_proxying 397 #define OBJ_ac_proxying 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 10L #define SN_sinfo_access "subjectInfoAccess" #define LN_sinfo_access "Subject Information Access" #define NID_sinfo_access 398 #define OBJ_sinfo_access 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 11L #define SN_id_aca_encAttrs "id-aca-encAttrs" #define NID_id_aca_encAttrs 399 #define OBJ_id_aca_encAttrs 1L, 3L, 6L, 1L, 5L, 5L, 7L, 10L, 6L #define SN_role "role" #define LN_role "role" #define NID_role 400 #define OBJ_role 2L, 5L, 4L, 72L #define SN_policy_constraints "policyConstraints" #define LN_policy_constraints "X509v3 Policy Constraints" #define NID_policy_constraints 401 #define OBJ_policy_constraints 2L, 5L, 29L, 36L #define SN_target_information "targetInformation" #define LN_target_information "X509v3 AC Targeting" #define NID_target_information 402 #define OBJ_target_information 2L, 5L, 29L, 55L #define SN_no_rev_avail "noRevAvail" #define LN_no_rev_avail "X509v3 No Revocation Available" #define NID_no_rev_avail 403 #define OBJ_no_rev_avail 2L, 5L, 29L, 56L #define SN_ansi_X9_62 "ansi-X9-62" #define LN_ansi_X9_62 "ANSI X9.62" #define NID_ansi_X9_62 405 #define OBJ_ansi_X9_62 1L, 2L, 840L, 10045L #define SN_X9_62_prime_field "prime-field" #define NID_X9_62_prime_field 406 #define OBJ_X9_62_prime_field 1L, 2L, 840L, 10045L, 1L, 1L #define SN_X9_62_characteristic_two_field "characteristic-two-field" #define NID_X9_62_characteristic_two_field 407 #define OBJ_X9_62_characteristic_two_field 1L, 2L, 840L, 10045L, 1L, 2L #define SN_X9_62_id_ecPublicKey "id-ecPublicKey" #define NID_X9_62_id_ecPublicKey 408 #define OBJ_X9_62_id_ecPublicKey 1L, 2L, 840L, 10045L, 2L, 1L #define SN_X9_62_prime192v1 "prime192v1" #define NID_X9_62_prime192v1 409 #define OBJ_X9_62_prime192v1 1L, 2L, 840L, 10045L, 3L, 1L, 1L #define SN_X9_62_prime192v2 "prime192v2" #define NID_X9_62_prime192v2 410 #define OBJ_X9_62_prime192v2 1L, 2L, 840L, 10045L, 3L, 1L, 2L #define SN_X9_62_prime192v3 "prime192v3" #define NID_X9_62_prime192v3 411 #define OBJ_X9_62_prime192v3 1L, 2L, 840L, 10045L, 3L, 1L, 3L #define SN_X9_62_prime239v1 "prime239v1" #define NID_X9_62_prime239v1 412 #define OBJ_X9_62_prime239v1 1L, 2L, 840L, 10045L, 3L, 1L, 4L #define SN_X9_62_prime239v2 "prime239v2" #define NID_X9_62_prime239v2 413 #define OBJ_X9_62_prime239v2 1L, 2L, 840L, 10045L, 3L, 1L, 5L #define SN_X9_62_prime239v3 "prime239v3" #define NID_X9_62_prime239v3 414 #define OBJ_X9_62_prime239v3 1L, 2L, 840L, 10045L, 3L, 1L, 6L #define SN_X9_62_prime256v1 "prime256v1" #define NID_X9_62_prime256v1 415 #define OBJ_X9_62_prime256v1 1L, 2L, 840L, 10045L, 3L, 1L, 7L #define SN_ecdsa_with_SHA1 "ecdsa-with-SHA1" #define NID_ecdsa_with_SHA1 416 #define OBJ_ecdsa_with_SHA1 1L, 2L, 840L, 10045L, 4L, 1L #define SN_ms_csp_name "CSPName" #define LN_ms_csp_name "Microsoft CSP Name" #define NID_ms_csp_name 417 #define OBJ_ms_csp_name 1L, 3L, 6L, 1L, 4L, 1L, 311L, 17L, 1L #define SN_aes_128_ecb "AES-128-ECB" #define LN_aes_128_ecb "aes-128-ecb" #define NID_aes_128_ecb 418 #define OBJ_aes_128_ecb 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 1L #define SN_aes_128_cbc "AES-128-CBC" #define LN_aes_128_cbc "aes-128-cbc" #define NID_aes_128_cbc 419 #define OBJ_aes_128_cbc 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 2L #define SN_aes_128_ofb128 "AES-128-OFB" #define LN_aes_128_ofb128 "aes-128-ofb" #define NID_aes_128_ofb128 420 #define OBJ_aes_128_ofb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 3L #define SN_aes_128_cfb128 "AES-128-CFB" #define LN_aes_128_cfb128 "aes-128-cfb" #define NID_aes_128_cfb128 421 #define OBJ_aes_128_cfb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 4L #define SN_aes_192_ecb "AES-192-ECB" #define LN_aes_192_ecb "aes-192-ecb" #define NID_aes_192_ecb 422 #define OBJ_aes_192_ecb 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 21L #define SN_aes_192_cbc "AES-192-CBC" #define LN_aes_192_cbc "aes-192-cbc" #define NID_aes_192_cbc 423 #define OBJ_aes_192_cbc 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 22L #define SN_aes_192_ofb128 "AES-192-OFB" #define LN_aes_192_ofb128 "aes-192-ofb" #define NID_aes_192_ofb128 424 #define OBJ_aes_192_ofb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 23L #define SN_aes_192_cfb128 "AES-192-CFB" #define LN_aes_192_cfb128 "aes-192-cfb" #define NID_aes_192_cfb128 425 #define OBJ_aes_192_cfb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 24L #define SN_aes_256_ecb "AES-256-ECB" #define LN_aes_256_ecb "aes-256-ecb" #define NID_aes_256_ecb 426 #define OBJ_aes_256_ecb 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 41L #define SN_aes_256_cbc "AES-256-CBC" #define LN_aes_256_cbc "aes-256-cbc" #define NID_aes_256_cbc 427 #define OBJ_aes_256_cbc 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 42L #define SN_aes_256_ofb128 "AES-256-OFB" #define LN_aes_256_ofb128 "aes-256-ofb" #define NID_aes_256_ofb128 428 #define OBJ_aes_256_ofb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 43L #define SN_aes_256_cfb128 "AES-256-CFB" #define LN_aes_256_cfb128 "aes-256-cfb" #define NID_aes_256_cfb128 429 #define OBJ_aes_256_cfb128 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 44L #define SN_hold_instruction_code "holdInstructionCode" #define LN_hold_instruction_code "Hold Instruction Code" #define NID_hold_instruction_code 430 #define OBJ_hold_instruction_code 2L, 5L, 29L, 23L #define SN_hold_instruction_none "holdInstructionNone" #define LN_hold_instruction_none "Hold Instruction None" #define NID_hold_instruction_none 431 #define OBJ_hold_instruction_none 1L, 2L, 840L, 10040L, 2L, 1L #define SN_hold_instruction_call_issuer "holdInstructionCallIssuer" #define LN_hold_instruction_call_issuer "Hold Instruction Call Issuer" #define NID_hold_instruction_call_issuer 432 #define OBJ_hold_instruction_call_issuer 1L, 2L, 840L, 10040L, 2L, 2L #define SN_hold_instruction_reject "holdInstructionReject" #define LN_hold_instruction_reject "Hold Instruction Reject" #define NID_hold_instruction_reject 433 #define OBJ_hold_instruction_reject 1L, 2L, 840L, 10040L, 2L, 3L #define SN_data "data" #define NID_data 434 #define OBJ_data 0L, 9L #define SN_pss "pss" #define NID_pss 435 #define OBJ_pss 0L, 9L, 2342L #define SN_ucl "ucl" #define NID_ucl 436 #define OBJ_ucl 0L, 9L, 2342L, 19200300L #define SN_pilot "pilot" #define NID_pilot 437 #define OBJ_pilot 0L, 9L, 2342L, 19200300L, 100L #define LN_pilotAttributeType "pilotAttributeType" #define NID_pilotAttributeType 438 #define OBJ_pilotAttributeType 0L, 9L, 2342L, 19200300L, 100L, 1L #define LN_pilotAttributeSyntax "pilotAttributeSyntax" #define NID_pilotAttributeSyntax 439 #define OBJ_pilotAttributeSyntax 0L, 9L, 2342L, 19200300L, 100L, 3L #define LN_pilotObjectClass "pilotObjectClass" #define NID_pilotObjectClass 440 #define OBJ_pilotObjectClass 0L, 9L, 2342L, 19200300L, 100L, 4L #define LN_pilotGroups "pilotGroups" #define NID_pilotGroups 441 #define OBJ_pilotGroups 0L, 9L, 2342L, 19200300L, 100L, 10L #define LN_iA5StringSyntax "iA5StringSyntax" #define NID_iA5StringSyntax 442 #define OBJ_iA5StringSyntax 0L, 9L, 2342L, 19200300L, 100L, 3L, 4L #define LN_caseIgnoreIA5StringSyntax "caseIgnoreIA5StringSyntax" #define NID_caseIgnoreIA5StringSyntax 443 #define OBJ_caseIgnoreIA5StringSyntax 0L, 9L, 2342L, 19200300L, 100L, 3L, 5L #define LN_pilotObject "pilotObject" #define NID_pilotObject 444 #define OBJ_pilotObject 0L, 9L, 2342L, 19200300L, 100L, 4L, 3L #define LN_pilotPerson "pilotPerson" #define NID_pilotPerson 445 #define OBJ_pilotPerson 0L, 9L, 2342L, 19200300L, 100L, 4L, 4L #define SN_account "account" #define NID_account 446 #define OBJ_account 0L, 9L, 2342L, 19200300L, 100L, 4L, 5L #define SN_document "document" #define NID_document 447 #define OBJ_document 0L, 9L, 2342L, 19200300L, 100L, 4L, 6L #define SN_room "room" #define NID_room 448 #define OBJ_room 0L, 9L, 2342L, 19200300L, 100L, 4L, 7L #define LN_documentSeries "documentSeries" #define NID_documentSeries 449 #define OBJ_documentSeries 0L, 9L, 2342L, 19200300L, 100L, 4L, 9L #define LN_rFC822localPart "rFC822localPart" #define NID_rFC822localPart 450 #define OBJ_rFC822localPart 0L, 9L, 2342L, 19200300L, 100L, 4L, 14L #define LN_dNSDomain "dNSDomain" #define NID_dNSDomain 451 #define OBJ_dNSDomain 0L, 9L, 2342L, 19200300L, 100L, 4L, 15L #define LN_domainRelatedObject "domainRelatedObject" #define NID_domainRelatedObject 452 #define OBJ_domainRelatedObject 0L, 9L, 2342L, 19200300L, 100L, 4L, 17L #define LN_friendlyCountry "friendlyCountry" #define NID_friendlyCountry 453 #define OBJ_friendlyCountry 0L, 9L, 2342L, 19200300L, 100L, 4L, 18L #define LN_simpleSecurityObject "simpleSecurityObject" #define NID_simpleSecurityObject 454 #define OBJ_simpleSecurityObject 0L, 9L, 2342L, 19200300L, 100L, 4L, 19L #define LN_pilotOrganization "pilotOrganization" #define NID_pilotOrganization 455 #define OBJ_pilotOrganization 0L, 9L, 2342L, 19200300L, 100L, 4L, 20L #define LN_pilotDSA "pilotDSA" #define NID_pilotDSA 456 #define OBJ_pilotDSA 0L, 9L, 2342L, 19200300L, 100L, 4L, 21L #define LN_qualityLabelledData "qualityLabelledData" #define NID_qualityLabelledData 457 #define OBJ_qualityLabelledData 0L, 9L, 2342L, 19200300L, 100L, 4L, 22L #define SN_userId "UID" #define LN_userId "userId" #define NID_userId 458 #define OBJ_userId 0L, 9L, 2342L, 19200300L, 100L, 1L, 1L #define LN_textEncodedORAddress "textEncodedORAddress" #define NID_textEncodedORAddress 459 #define OBJ_textEncodedORAddress 0L, 9L, 2342L, 19200300L, 100L, 1L, 2L #define SN_rfc822Mailbox "mail" #define LN_rfc822Mailbox "rfc822Mailbox" #define NID_rfc822Mailbox 460 #define OBJ_rfc822Mailbox 0L, 9L, 2342L, 19200300L, 100L, 1L, 3L #define SN_info "info" #define NID_info 461 #define OBJ_info 0L, 9L, 2342L, 19200300L, 100L, 1L, 4L #define LN_favouriteDrink "favouriteDrink" #define NID_favouriteDrink 462 #define OBJ_favouriteDrink 0L, 9L, 2342L, 19200300L, 100L, 1L, 5L #define LN_roomNumber "roomNumber" #define NID_roomNumber 463 #define OBJ_roomNumber 0L, 9L, 2342L, 19200300L, 100L, 1L, 6L #define SN_photo "photo" #define NID_photo 464 #define OBJ_photo 0L, 9L, 2342L, 19200300L, 100L, 1L, 7L #define LN_userClass "userClass" #define NID_userClass 465 #define OBJ_userClass 0L, 9L, 2342L, 19200300L, 100L, 1L, 8L #define SN_host "host" #define NID_host 466 #define OBJ_host 0L, 9L, 2342L, 19200300L, 100L, 1L, 9L #define SN_manager "manager" #define NID_manager 467 #define OBJ_manager 0L, 9L, 2342L, 19200300L, 100L, 1L, 10L #define LN_documentIdentifier "documentIdentifier" #define NID_documentIdentifier 468 #define OBJ_documentIdentifier 0L, 9L, 2342L, 19200300L, 100L, 1L, 11L #define LN_documentTitle "documentTitle" #define NID_documentTitle 469 #define OBJ_documentTitle 0L, 9L, 2342L, 19200300L, 100L, 1L, 12L #define LN_documentVersion "documentVersion" #define NID_documentVersion 470 #define OBJ_documentVersion 0L, 9L, 2342L, 19200300L, 100L, 1L, 13L #define LN_documentAuthor "documentAuthor" #define NID_documentAuthor 471 #define OBJ_documentAuthor 0L, 9L, 2342L, 19200300L, 100L, 1L, 14L #define LN_documentLocation "documentLocation" #define NID_documentLocation 472 #define OBJ_documentLocation 0L, 9L, 2342L, 19200300L, 100L, 1L, 15L #define LN_homeTelephoneNumber "homeTelephoneNumber" #define NID_homeTelephoneNumber 473 #define OBJ_homeTelephoneNumber 0L, 9L, 2342L, 19200300L, 100L, 1L, 20L #define SN_secretary "secretary" #define NID_secretary 474 #define OBJ_secretary 0L, 9L, 2342L, 19200300L, 100L, 1L, 21L #define LN_otherMailbox "otherMailbox" #define NID_otherMailbox 475 #define OBJ_otherMailbox 0L, 9L, 2342L, 19200300L, 100L, 1L, 22L #define LN_lastModifiedTime "lastModifiedTime" #define NID_lastModifiedTime 476 #define OBJ_lastModifiedTime 0L, 9L, 2342L, 19200300L, 100L, 1L, 23L #define LN_lastModifiedBy "lastModifiedBy" #define NID_lastModifiedBy 477 #define OBJ_lastModifiedBy 0L, 9L, 2342L, 19200300L, 100L, 1L, 24L #define LN_aRecord "aRecord" #define NID_aRecord 478 #define OBJ_aRecord 0L, 9L, 2342L, 19200300L, 100L, 1L, 26L #define LN_pilotAttributeType27 "pilotAttributeType27" #define NID_pilotAttributeType27 479 #define OBJ_pilotAttributeType27 0L, 9L, 2342L, 19200300L, 100L, 1L, 27L #define LN_mXRecord "mXRecord" #define NID_mXRecord 480 #define OBJ_mXRecord 0L, 9L, 2342L, 19200300L, 100L, 1L, 28L #define LN_nSRecord "nSRecord" #define NID_nSRecord 481 #define OBJ_nSRecord 0L, 9L, 2342L, 19200300L, 100L, 1L, 29L #define LN_sOARecord "sOARecord" #define NID_sOARecord 482 #define OBJ_sOARecord 0L, 9L, 2342L, 19200300L, 100L, 1L, 30L #define LN_cNAMERecord "cNAMERecord" #define NID_cNAMERecord 483 #define OBJ_cNAMERecord 0L, 9L, 2342L, 19200300L, 100L, 1L, 31L #define LN_associatedDomain "associatedDomain" #define NID_associatedDomain 484 #define OBJ_associatedDomain 0L, 9L, 2342L, 19200300L, 100L, 1L, 37L #define LN_associatedName "associatedName" #define NID_associatedName 485 #define OBJ_associatedName 0L, 9L, 2342L, 19200300L, 100L, 1L, 38L #define LN_homePostalAddress "homePostalAddress" #define NID_homePostalAddress 486 #define OBJ_homePostalAddress 0L, 9L, 2342L, 19200300L, 100L, 1L, 39L #define LN_personalTitle "personalTitle" #define NID_personalTitle 487 #define OBJ_personalTitle 0L, 9L, 2342L, 19200300L, 100L, 1L, 40L #define LN_mobileTelephoneNumber "mobileTelephoneNumber" #define NID_mobileTelephoneNumber 488 #define OBJ_mobileTelephoneNumber 0L, 9L, 2342L, 19200300L, 100L, 1L, 41L #define LN_pagerTelephoneNumber "pagerTelephoneNumber" #define NID_pagerTelephoneNumber 489 #define OBJ_pagerTelephoneNumber 0L, 9L, 2342L, 19200300L, 100L, 1L, 42L #define LN_friendlyCountryName "friendlyCountryName" #define NID_friendlyCountryName 490 #define OBJ_friendlyCountryName 0L, 9L, 2342L, 19200300L, 100L, 1L, 43L #define LN_organizationalStatus "organizationalStatus" #define NID_organizationalStatus 491 #define OBJ_organizationalStatus 0L, 9L, 2342L, 19200300L, 100L, 1L, 45L #define LN_janetMailbox "janetMailbox" #define NID_janetMailbox 492 #define OBJ_janetMailbox 0L, 9L, 2342L, 19200300L, 100L, 1L, 46L #define LN_mailPreferenceOption "mailPreferenceOption" #define NID_mailPreferenceOption 493 #define OBJ_mailPreferenceOption 0L, 9L, 2342L, 19200300L, 100L, 1L, 47L #define LN_buildingName "buildingName" #define NID_buildingName 494 #define OBJ_buildingName 0L, 9L, 2342L, 19200300L, 100L, 1L, 48L #define LN_dSAQuality "dSAQuality" #define NID_dSAQuality 495 #define OBJ_dSAQuality 0L, 9L, 2342L, 19200300L, 100L, 1L, 49L #define LN_singleLevelQuality "singleLevelQuality" #define NID_singleLevelQuality 496 #define OBJ_singleLevelQuality 0L, 9L, 2342L, 19200300L, 100L, 1L, 50L #define LN_subtreeMinimumQuality "subtreeMinimumQuality" #define NID_subtreeMinimumQuality 497 #define OBJ_subtreeMinimumQuality 0L, 9L, 2342L, 19200300L, 100L, 1L, 51L #define LN_subtreeMaximumQuality "subtreeMaximumQuality" #define NID_subtreeMaximumQuality 498 #define OBJ_subtreeMaximumQuality 0L, 9L, 2342L, 19200300L, 100L, 1L, 52L #define LN_personalSignature "personalSignature" #define NID_personalSignature 499 #define OBJ_personalSignature 0L, 9L, 2342L, 19200300L, 100L, 1L, 53L #define LN_dITRedirect "dITRedirect" #define NID_dITRedirect 500 #define OBJ_dITRedirect 0L, 9L, 2342L, 19200300L, 100L, 1L, 54L #define SN_audio "audio" #define NID_audio 501 #define OBJ_audio 0L, 9L, 2342L, 19200300L, 100L, 1L, 55L #define LN_documentPublisher "documentPublisher" #define NID_documentPublisher 502 #define OBJ_documentPublisher 0L, 9L, 2342L, 19200300L, 100L, 1L, 56L #define LN_x500UniqueIdentifier "x500UniqueIdentifier" #define NID_x500UniqueIdentifier 503 #define OBJ_x500UniqueIdentifier 2L, 5L, 4L, 45L #define SN_mime_mhs "mime-mhs" #define LN_mime_mhs "MIME MHS" #define NID_mime_mhs 504 #define OBJ_mime_mhs 1L, 3L, 6L, 1L, 7L, 1L #define SN_mime_mhs_headings "mime-mhs-headings" #define LN_mime_mhs_headings "mime-mhs-headings" #define NID_mime_mhs_headings 505 #define OBJ_mime_mhs_headings 1L, 3L, 6L, 1L, 7L, 1L, 1L #define SN_mime_mhs_bodies "mime-mhs-bodies" #define LN_mime_mhs_bodies "mime-mhs-bodies" #define NID_mime_mhs_bodies 506 #define OBJ_mime_mhs_bodies 1L, 3L, 6L, 1L, 7L, 1L, 2L #define SN_id_hex_partial_message "id-hex-partial-message" #define LN_id_hex_partial_message "id-hex-partial-message" #define NID_id_hex_partial_message 507 #define OBJ_id_hex_partial_message 1L, 3L, 6L, 1L, 7L, 1L, 1L, 1L #define SN_id_hex_multipart_message "id-hex-multipart-message" #define LN_id_hex_multipart_message "id-hex-multipart-message" #define NID_id_hex_multipart_message 508 #define OBJ_id_hex_multipart_message 1L, 3L, 6L, 1L, 7L, 1L, 1L, 2L #define LN_generationQualifier "generationQualifier" #define NID_generationQualifier 509 #define OBJ_generationQualifier 2L, 5L, 4L, 44L #define LN_pseudonym "pseudonym" #define NID_pseudonym 510 #define OBJ_pseudonym 2L, 5L, 4L, 65L #define SN_id_set "id-set" #define LN_id_set "Secure Electronic Transactions" #define NID_id_set 512 #define OBJ_id_set 2L, 23L, 42L #define SN_set_ctype "set-ctype" #define LN_set_ctype "content types" #define NID_set_ctype 513 #define OBJ_set_ctype 2L, 23L, 42L, 0L #define SN_set_msgExt "set-msgExt" #define LN_set_msgExt "message extensions" #define NID_set_msgExt 514 #define OBJ_set_msgExt 2L, 23L, 42L, 1L #define SN_set_attr "set-attr" #define NID_set_attr 515 #define OBJ_set_attr 2L, 23L, 42L, 3L #define SN_set_policy "set-policy" #define NID_set_policy 516 #define OBJ_set_policy 2L, 23L, 42L, 5L #define SN_set_certExt "set-certExt" #define LN_set_certExt "certificate extensions" #define NID_set_certExt 517 #define OBJ_set_certExt 2L, 23L, 42L, 7L #define SN_set_brand "set-brand" #define NID_set_brand 518 #define OBJ_set_brand 2L, 23L, 42L, 8L #define SN_setct_PANData "setct-PANData" #define NID_setct_PANData 519 #define OBJ_setct_PANData 2L, 23L, 42L, 0L, 0L #define SN_setct_PANToken "setct-PANToken" #define NID_setct_PANToken 520 #define OBJ_setct_PANToken 2L, 23L, 42L, 0L, 1L #define SN_setct_PANOnly "setct-PANOnly" #define NID_setct_PANOnly 521 #define OBJ_setct_PANOnly 2L, 23L, 42L, 0L, 2L #define SN_setct_OIData "setct-OIData" #define NID_setct_OIData 522 #define OBJ_setct_OIData 2L, 23L, 42L, 0L, 3L #define SN_setct_PI "setct-PI" #define NID_setct_PI 523 #define OBJ_setct_PI 2L, 23L, 42L, 0L, 4L #define SN_setct_PIData "setct-PIData" #define NID_setct_PIData 524 #define OBJ_setct_PIData 2L, 23L, 42L, 0L, 5L #define SN_setct_PIDataUnsigned "setct-PIDataUnsigned" #define NID_setct_PIDataUnsigned 525 #define OBJ_setct_PIDataUnsigned 2L, 23L, 42L, 0L, 6L #define SN_setct_HODInput "setct-HODInput" #define NID_setct_HODInput 526 #define OBJ_setct_HODInput 2L, 23L, 42L, 0L, 7L #define SN_setct_AuthResBaggage "setct-AuthResBaggage" #define NID_setct_AuthResBaggage 527 #define OBJ_setct_AuthResBaggage 2L, 23L, 42L, 0L, 8L #define SN_setct_AuthRevReqBaggage "setct-AuthRevReqBaggage" #define NID_setct_AuthRevReqBaggage 528 #define OBJ_setct_AuthRevReqBaggage 2L, 23L, 42L, 0L, 9L #define SN_setct_AuthRevResBaggage "setct-AuthRevResBaggage" #define NID_setct_AuthRevResBaggage 529 #define OBJ_setct_AuthRevResBaggage 2L, 23L, 42L, 0L, 10L #define SN_setct_CapTokenSeq "setct-CapTokenSeq" #define NID_setct_CapTokenSeq 530 #define OBJ_setct_CapTokenSeq 2L, 23L, 42L, 0L, 11L #define SN_setct_PInitResData "setct-PInitResData" #define NID_setct_PInitResData 531 #define OBJ_setct_PInitResData 2L, 23L, 42L, 0L, 12L #define SN_setct_PI_TBS "setct-PI-TBS" #define NID_setct_PI_TBS 532 #define OBJ_setct_PI_TBS 2L, 23L, 42L, 0L, 13L #define SN_setct_PResData "setct-PResData" #define NID_setct_PResData 533 #define OBJ_setct_PResData 2L, 23L, 42L, 0L, 14L #define SN_setct_AuthReqTBS "setct-AuthReqTBS" #define NID_setct_AuthReqTBS 534 #define OBJ_setct_AuthReqTBS 2L, 23L, 42L, 0L, 16L #define SN_setct_AuthResTBS "setct-AuthResTBS" #define NID_setct_AuthResTBS 535 #define OBJ_setct_AuthResTBS 2L, 23L, 42L, 0L, 17L #define SN_setct_AuthResTBSX "setct-AuthResTBSX" #define NID_setct_AuthResTBSX 536 #define OBJ_setct_AuthResTBSX 2L, 23L, 42L, 0L, 18L #define SN_setct_AuthTokenTBS "setct-AuthTokenTBS" #define NID_setct_AuthTokenTBS 537 #define OBJ_setct_AuthTokenTBS 2L, 23L, 42L, 0L, 19L #define SN_setct_CapTokenData "setct-CapTokenData" #define NID_setct_CapTokenData 538 #define OBJ_setct_CapTokenData 2L, 23L, 42L, 0L, 20L #define SN_setct_CapTokenTBS "setct-CapTokenTBS" #define NID_setct_CapTokenTBS 539 #define OBJ_setct_CapTokenTBS 2L, 23L, 42L, 0L, 21L #define SN_setct_AcqCardCodeMsg "setct-AcqCardCodeMsg" #define NID_setct_AcqCardCodeMsg 540 #define OBJ_setct_AcqCardCodeMsg 2L, 23L, 42L, 0L, 22L #define SN_setct_AuthRevReqTBS "setct-AuthRevReqTBS" #define NID_setct_AuthRevReqTBS 541 #define OBJ_setct_AuthRevReqTBS 2L, 23L, 42L, 0L, 23L #define SN_setct_AuthRevResData "setct-AuthRevResData" #define NID_setct_AuthRevResData 542 #define OBJ_setct_AuthRevResData 2L, 23L, 42L, 0L, 24L #define SN_setct_AuthRevResTBS "setct-AuthRevResTBS" #define NID_setct_AuthRevResTBS 543 #define OBJ_setct_AuthRevResTBS 2L, 23L, 42L, 0L, 25L #define SN_setct_CapReqTBS "setct-CapReqTBS" #define NID_setct_CapReqTBS 544 #define OBJ_setct_CapReqTBS 2L, 23L, 42L, 0L, 26L #define SN_setct_CapReqTBSX "setct-CapReqTBSX" #define NID_setct_CapReqTBSX 545 #define OBJ_setct_CapReqTBSX 2L, 23L, 42L, 0L, 27L #define SN_setct_CapResData "setct-CapResData" #define NID_setct_CapResData 546 #define OBJ_setct_CapResData 2L, 23L, 42L, 0L, 28L #define SN_setct_CapRevReqTBS "setct-CapRevReqTBS" #define NID_setct_CapRevReqTBS 547 #define OBJ_setct_CapRevReqTBS 2L, 23L, 42L, 0L, 29L #define SN_setct_CapRevReqTBSX "setct-CapRevReqTBSX" #define NID_setct_CapRevReqTBSX 548 #define OBJ_setct_CapRevReqTBSX 2L, 23L, 42L, 0L, 30L #define SN_setct_CapRevResData "setct-CapRevResData" #define NID_setct_CapRevResData 549 #define OBJ_setct_CapRevResData 2L, 23L, 42L, 0L, 31L #define SN_setct_CredReqTBS "setct-CredReqTBS" #define NID_setct_CredReqTBS 550 #define OBJ_setct_CredReqTBS 2L, 23L, 42L, 0L, 32L #define SN_setct_CredReqTBSX "setct-CredReqTBSX" #define NID_setct_CredReqTBSX 551 #define OBJ_setct_CredReqTBSX 2L, 23L, 42L, 0L, 33L #define SN_setct_CredResData "setct-CredResData" #define NID_setct_CredResData 552 #define OBJ_setct_CredResData 2L, 23L, 42L, 0L, 34L #define SN_setct_CredRevReqTBS "setct-CredRevReqTBS" #define NID_setct_CredRevReqTBS 553 #define OBJ_setct_CredRevReqTBS 2L, 23L, 42L, 0L, 35L #define SN_setct_CredRevReqTBSX "setct-CredRevReqTBSX" #define NID_setct_CredRevReqTBSX 554 #define OBJ_setct_CredRevReqTBSX 2L, 23L, 42L, 0L, 36L #define SN_setct_CredRevResData "setct-CredRevResData" #define NID_setct_CredRevResData 555 #define OBJ_setct_CredRevResData 2L, 23L, 42L, 0L, 37L #define SN_setct_PCertReqData "setct-PCertReqData" #define NID_setct_PCertReqData 556 #define OBJ_setct_PCertReqData 2L, 23L, 42L, 0L, 38L #define SN_setct_PCertResTBS "setct-PCertResTBS" #define NID_setct_PCertResTBS 557 #define OBJ_setct_PCertResTBS 2L, 23L, 42L, 0L, 39L #define SN_setct_BatchAdminReqData "setct-BatchAdminReqData" #define NID_setct_BatchAdminReqData 558 #define OBJ_setct_BatchAdminReqData 2L, 23L, 42L, 0L, 40L #define SN_setct_BatchAdminResData "setct-BatchAdminResData" #define NID_setct_BatchAdminResData 559 #define OBJ_setct_BatchAdminResData 2L, 23L, 42L, 0L, 41L #define SN_setct_CardCInitResTBS "setct-CardCInitResTBS" #define NID_setct_CardCInitResTBS 560 #define OBJ_setct_CardCInitResTBS 2L, 23L, 42L, 0L, 42L #define SN_setct_MeAqCInitResTBS "setct-MeAqCInitResTBS" #define NID_setct_MeAqCInitResTBS 561 #define OBJ_setct_MeAqCInitResTBS 2L, 23L, 42L, 0L, 43L #define SN_setct_RegFormResTBS "setct-RegFormResTBS" #define NID_setct_RegFormResTBS 562 #define OBJ_setct_RegFormResTBS 2L, 23L, 42L, 0L, 44L #define SN_setct_CertReqData "setct-CertReqData" #define NID_setct_CertReqData 563 #define OBJ_setct_CertReqData 2L, 23L, 42L, 0L, 45L #define SN_setct_CertReqTBS "setct-CertReqTBS" #define NID_setct_CertReqTBS 564 #define OBJ_setct_CertReqTBS 2L, 23L, 42L, 0L, 46L #define SN_setct_CertResData "setct-CertResData" #define NID_setct_CertResData 565 #define OBJ_setct_CertResData 2L, 23L, 42L, 0L, 47L #define SN_setct_CertInqReqTBS "setct-CertInqReqTBS" #define NID_setct_CertInqReqTBS 566 #define OBJ_setct_CertInqReqTBS 2L, 23L, 42L, 0L, 48L #define SN_setct_ErrorTBS "setct-ErrorTBS" #define NID_setct_ErrorTBS 567 #define OBJ_setct_ErrorTBS 2L, 23L, 42L, 0L, 49L #define SN_setct_PIDualSignedTBE "setct-PIDualSignedTBE" #define NID_setct_PIDualSignedTBE 568 #define OBJ_setct_PIDualSignedTBE 2L, 23L, 42L, 0L, 50L #define SN_setct_PIUnsignedTBE "setct-PIUnsignedTBE" #define NID_setct_PIUnsignedTBE 569 #define OBJ_setct_PIUnsignedTBE 2L, 23L, 42L, 0L, 51L #define SN_setct_AuthReqTBE "setct-AuthReqTBE" #define NID_setct_AuthReqTBE 570 #define OBJ_setct_AuthReqTBE 2L, 23L, 42L, 0L, 52L #define SN_setct_AuthResTBE "setct-AuthResTBE" #define NID_setct_AuthResTBE 571 #define OBJ_setct_AuthResTBE 2L, 23L, 42L, 0L, 53L #define SN_setct_AuthResTBEX "setct-AuthResTBEX" #define NID_setct_AuthResTBEX 572 #define OBJ_setct_AuthResTBEX 2L, 23L, 42L, 0L, 54L #define SN_setct_AuthTokenTBE "setct-AuthTokenTBE" #define NID_setct_AuthTokenTBE 573 #define OBJ_setct_AuthTokenTBE 2L, 23L, 42L, 0L, 55L #define SN_setct_CapTokenTBE "setct-CapTokenTBE" #define NID_setct_CapTokenTBE 574 #define OBJ_setct_CapTokenTBE 2L, 23L, 42L, 0L, 56L #define SN_setct_CapTokenTBEX "setct-CapTokenTBEX" #define NID_setct_CapTokenTBEX 575 #define OBJ_setct_CapTokenTBEX 2L, 23L, 42L, 0L, 57L #define SN_setct_AcqCardCodeMsgTBE "setct-AcqCardCodeMsgTBE" #define NID_setct_AcqCardCodeMsgTBE 576 #define OBJ_setct_AcqCardCodeMsgTBE 2L, 23L, 42L, 0L, 58L #define SN_setct_AuthRevReqTBE "setct-AuthRevReqTBE" #define NID_setct_AuthRevReqTBE 577 #define OBJ_setct_AuthRevReqTBE 2L, 23L, 42L, 0L, 59L #define SN_setct_AuthRevResTBE "setct-AuthRevResTBE" #define NID_setct_AuthRevResTBE 578 #define OBJ_setct_AuthRevResTBE 2L, 23L, 42L, 0L, 60L #define SN_setct_AuthRevResTBEB "setct-AuthRevResTBEB" #define NID_setct_AuthRevResTBEB 579 #define OBJ_setct_AuthRevResTBEB 2L, 23L, 42L, 0L, 61L #define SN_setct_CapReqTBE "setct-CapReqTBE" #define NID_setct_CapReqTBE 580 #define OBJ_setct_CapReqTBE 2L, 23L, 42L, 0L, 62L #define SN_setct_CapReqTBEX "setct-CapReqTBEX" #define NID_setct_CapReqTBEX 581 #define OBJ_setct_CapReqTBEX 2L, 23L, 42L, 0L, 63L #define SN_setct_CapResTBE "setct-CapResTBE" #define NID_setct_CapResTBE 582 #define OBJ_setct_CapResTBE 2L, 23L, 42L, 0L, 64L #define SN_setct_CapRevReqTBE "setct-CapRevReqTBE" #define NID_setct_CapRevReqTBE 583 #define OBJ_setct_CapRevReqTBE 2L, 23L, 42L, 0L, 65L #define SN_setct_CapRevReqTBEX "setct-CapRevReqTBEX" #define NID_setct_CapRevReqTBEX 584 #define OBJ_setct_CapRevReqTBEX 2L, 23L, 42L, 0L, 66L #define SN_setct_CapRevResTBE "setct-CapRevResTBE" #define NID_setct_CapRevResTBE 585 #define OBJ_setct_CapRevResTBE 2L, 23L, 42L, 0L, 67L #define SN_setct_CredReqTBE "setct-CredReqTBE" #define NID_setct_CredReqTBE 586 #define OBJ_setct_CredReqTBE 2L, 23L, 42L, 0L, 68L #define SN_setct_CredReqTBEX "setct-CredReqTBEX" #define NID_setct_CredReqTBEX 587 #define OBJ_setct_CredReqTBEX 2L, 23L, 42L, 0L, 69L #define SN_setct_CredResTBE "setct-CredResTBE" #define NID_setct_CredResTBE 588 #define OBJ_setct_CredResTBE 2L, 23L, 42L, 0L, 70L #define SN_setct_CredRevReqTBE "setct-CredRevReqTBE" #define NID_setct_CredRevReqTBE 589 #define OBJ_setct_CredRevReqTBE 2L, 23L, 42L, 0L, 71L #define SN_setct_CredRevReqTBEX "setct-CredRevReqTBEX" #define NID_setct_CredRevReqTBEX 590 #define OBJ_setct_CredRevReqTBEX 2L, 23L, 42L, 0L, 72L #define SN_setct_CredRevResTBE "setct-CredRevResTBE" #define NID_setct_CredRevResTBE 591 #define OBJ_setct_CredRevResTBE 2L, 23L, 42L, 0L, 73L #define SN_setct_BatchAdminReqTBE "setct-BatchAdminReqTBE" #define NID_setct_BatchAdminReqTBE 592 #define OBJ_setct_BatchAdminReqTBE 2L, 23L, 42L, 0L, 74L #define SN_setct_BatchAdminResTBE "setct-BatchAdminResTBE" #define NID_setct_BatchAdminResTBE 593 #define OBJ_setct_BatchAdminResTBE 2L, 23L, 42L, 0L, 75L #define SN_setct_RegFormReqTBE "setct-RegFormReqTBE" #define NID_setct_RegFormReqTBE 594 #define OBJ_setct_RegFormReqTBE 2L, 23L, 42L, 0L, 76L #define SN_setct_CertReqTBE "setct-CertReqTBE" #define NID_setct_CertReqTBE 595 #define OBJ_setct_CertReqTBE 2L, 23L, 42L, 0L, 77L #define SN_setct_CertReqTBEX "setct-CertReqTBEX" #define NID_setct_CertReqTBEX 596 #define OBJ_setct_CertReqTBEX 2L, 23L, 42L, 0L, 78L #define SN_setct_CertResTBE "setct-CertResTBE" #define NID_setct_CertResTBE 597 #define OBJ_setct_CertResTBE 2L, 23L, 42L, 0L, 79L #define SN_setct_CRLNotificationTBS "setct-CRLNotificationTBS" #define NID_setct_CRLNotificationTBS 598 #define OBJ_setct_CRLNotificationTBS 2L, 23L, 42L, 0L, 80L #define SN_setct_CRLNotificationResTBS "setct-CRLNotificationResTBS" #define NID_setct_CRLNotificationResTBS 599 #define OBJ_setct_CRLNotificationResTBS 2L, 23L, 42L, 0L, 81L #define SN_setct_BCIDistributionTBS "setct-BCIDistributionTBS" #define NID_setct_BCIDistributionTBS 600 #define OBJ_setct_BCIDistributionTBS 2L, 23L, 42L, 0L, 82L #define SN_setext_genCrypt "setext-genCrypt" #define LN_setext_genCrypt "generic cryptogram" #define NID_setext_genCrypt 601 #define OBJ_setext_genCrypt 2L, 23L, 42L, 1L, 1L #define SN_setext_miAuth "setext-miAuth" #define LN_setext_miAuth "merchant initiated auth" #define NID_setext_miAuth 602 #define OBJ_setext_miAuth 2L, 23L, 42L, 1L, 3L #define SN_setext_pinSecure "setext-pinSecure" #define NID_setext_pinSecure 603 #define OBJ_setext_pinSecure 2L, 23L, 42L, 1L, 4L #define SN_setext_pinAny "setext-pinAny" #define NID_setext_pinAny 604 #define OBJ_setext_pinAny 2L, 23L, 42L, 1L, 5L #define SN_setext_track2 "setext-track2" #define NID_setext_track2 605 #define OBJ_setext_track2 2L, 23L, 42L, 1L, 7L #define SN_setext_cv "setext-cv" #define LN_setext_cv "additional verification" #define NID_setext_cv 606 #define OBJ_setext_cv 2L, 23L, 42L, 1L, 8L #define SN_set_policy_root "set-policy-root" #define NID_set_policy_root 607 #define OBJ_set_policy_root 2L, 23L, 42L, 5L, 0L #define SN_setCext_hashedRoot "setCext-hashedRoot" #define NID_setCext_hashedRoot 608 #define OBJ_setCext_hashedRoot 2L, 23L, 42L, 7L, 0L #define SN_setCext_certType "setCext-certType" #define NID_setCext_certType 609 #define OBJ_setCext_certType 2L, 23L, 42L, 7L, 1L #define SN_setCext_merchData "setCext-merchData" #define NID_setCext_merchData 610 #define OBJ_setCext_merchData 2L, 23L, 42L, 7L, 2L #define SN_setCext_cCertRequired "setCext-cCertRequired" #define NID_setCext_cCertRequired 611 #define OBJ_setCext_cCertRequired 2L, 23L, 42L, 7L, 3L #define SN_setCext_tunneling "setCext-tunneling" #define NID_setCext_tunneling 612 #define OBJ_setCext_tunneling 2L, 23L, 42L, 7L, 4L #define SN_setCext_setExt "setCext-setExt" #define NID_setCext_setExt 613 #define OBJ_setCext_setExt 2L, 23L, 42L, 7L, 5L #define SN_setCext_setQualf "setCext-setQualf" #define NID_setCext_setQualf 614 #define OBJ_setCext_setQualf 2L, 23L, 42L, 7L, 6L #define SN_setCext_PGWYcapabilities "setCext-PGWYcapabilities" #define NID_setCext_PGWYcapabilities 615 #define OBJ_setCext_PGWYcapabilities 2L, 23L, 42L, 7L, 7L #define SN_setCext_TokenIdentifier "setCext-TokenIdentifier" #define NID_setCext_TokenIdentifier 616 #define OBJ_setCext_TokenIdentifier 2L, 23L, 42L, 7L, 8L #define SN_setCext_Track2Data "setCext-Track2Data" #define NID_setCext_Track2Data 617 #define OBJ_setCext_Track2Data 2L, 23L, 42L, 7L, 9L #define SN_setCext_TokenType "setCext-TokenType" #define NID_setCext_TokenType 618 #define OBJ_setCext_TokenType 2L, 23L, 42L, 7L, 10L #define SN_setCext_IssuerCapabilities "setCext-IssuerCapabilities" #define NID_setCext_IssuerCapabilities 619 #define OBJ_setCext_IssuerCapabilities 2L, 23L, 42L, 7L, 11L #define SN_setAttr_Cert "setAttr-Cert" #define NID_setAttr_Cert 620 #define OBJ_setAttr_Cert 2L, 23L, 42L, 3L, 0L #define SN_setAttr_PGWYcap "setAttr-PGWYcap" #define LN_setAttr_PGWYcap "payment gateway capabilities" #define NID_setAttr_PGWYcap 621 #define OBJ_setAttr_PGWYcap 2L, 23L, 42L, 3L, 1L #define SN_setAttr_TokenType "setAttr-TokenType" #define NID_setAttr_TokenType 622 #define OBJ_setAttr_TokenType 2L, 23L, 42L, 3L, 2L #define SN_setAttr_IssCap "setAttr-IssCap" #define LN_setAttr_IssCap "issuer capabilities" #define NID_setAttr_IssCap 623 #define OBJ_setAttr_IssCap 2L, 23L, 42L, 3L, 3L #define SN_set_rootKeyThumb "set-rootKeyThumb" #define NID_set_rootKeyThumb 624 #define OBJ_set_rootKeyThumb 2L, 23L, 42L, 3L, 0L, 0L #define SN_set_addPolicy "set-addPolicy" #define NID_set_addPolicy 625 #define OBJ_set_addPolicy 2L, 23L, 42L, 3L, 0L, 1L #define SN_setAttr_Token_EMV "setAttr-Token-EMV" #define NID_setAttr_Token_EMV 626 #define OBJ_setAttr_Token_EMV 2L, 23L, 42L, 3L, 2L, 1L #define SN_setAttr_Token_B0Prime "setAttr-Token-B0Prime" #define NID_setAttr_Token_B0Prime 627 #define OBJ_setAttr_Token_B0Prime 2L, 23L, 42L, 3L, 2L, 2L #define SN_setAttr_IssCap_CVM "setAttr-IssCap-CVM" #define NID_setAttr_IssCap_CVM 628 #define OBJ_setAttr_IssCap_CVM 2L, 23L, 42L, 3L, 3L, 3L #define SN_setAttr_IssCap_T2 "setAttr-IssCap-T2" #define NID_setAttr_IssCap_T2 629 #define OBJ_setAttr_IssCap_T2 2L, 23L, 42L, 3L, 3L, 4L #define SN_setAttr_IssCap_Sig "setAttr-IssCap-Sig" #define NID_setAttr_IssCap_Sig 630 #define OBJ_setAttr_IssCap_Sig 2L, 23L, 42L, 3L, 3L, 5L #define SN_setAttr_GenCryptgrm "setAttr-GenCryptgrm" #define LN_setAttr_GenCryptgrm "generate cryptogram" #define NID_setAttr_GenCryptgrm 631 #define OBJ_setAttr_GenCryptgrm 2L, 23L, 42L, 3L, 3L, 3L, 1L #define SN_setAttr_T2Enc "setAttr-T2Enc" #define LN_setAttr_T2Enc "encrypted track 2" #define NID_setAttr_T2Enc 632 #define OBJ_setAttr_T2Enc 2L, 23L, 42L, 3L, 3L, 4L, 1L #define SN_setAttr_T2cleartxt "setAttr-T2cleartxt" #define LN_setAttr_T2cleartxt "cleartext track 2" #define NID_setAttr_T2cleartxt 633 #define OBJ_setAttr_T2cleartxt 2L, 23L, 42L, 3L, 3L, 4L, 2L #define SN_setAttr_TokICCsig "setAttr-TokICCsig" #define LN_setAttr_TokICCsig "ICC or token signature" #define NID_setAttr_TokICCsig 634 #define OBJ_setAttr_TokICCsig 2L, 23L, 42L, 3L, 3L, 5L, 1L #define SN_setAttr_SecDevSig "setAttr-SecDevSig" #define LN_setAttr_SecDevSig "secure device signature" #define NID_setAttr_SecDevSig 635 #define OBJ_setAttr_SecDevSig 2L, 23L, 42L, 3L, 3L, 5L, 2L #define SN_set_brand_IATA_ATA "set-brand-IATA-ATA" #define NID_set_brand_IATA_ATA 636 #define OBJ_set_brand_IATA_ATA 2L, 23L, 42L, 8L, 1L #define SN_set_brand_Diners "set-brand-Diners" #define NID_set_brand_Diners 637 #define OBJ_set_brand_Diners 2L, 23L, 42L, 8L, 30L #define SN_set_brand_AmericanExpress "set-brand-AmericanExpress" #define NID_set_brand_AmericanExpress 638 #define OBJ_set_brand_AmericanExpress 2L, 23L, 42L, 8L, 34L #define SN_set_brand_JCB "set-brand-JCB" #define NID_set_brand_JCB 639 #define OBJ_set_brand_JCB 2L, 23L, 42L, 8L, 35L #define SN_set_brand_Visa "set-brand-Visa" #define NID_set_brand_Visa 640 #define OBJ_set_brand_Visa 2L, 23L, 42L, 8L, 4L #define SN_set_brand_MasterCard "set-brand-MasterCard" #define NID_set_brand_MasterCard 641 #define OBJ_set_brand_MasterCard 2L, 23L, 42L, 8L, 5L #define SN_set_brand_Novus "set-brand-Novus" #define NID_set_brand_Novus 642 #define OBJ_set_brand_Novus 2L, 23L, 42L, 8L, 6011L #define SN_des_cdmf "DES-CDMF" #define LN_des_cdmf "des-cdmf" #define NID_des_cdmf 643 #define OBJ_des_cdmf 1L, 2L, 840L, 113549L, 3L, 10L #define SN_rsaOAEPEncryptionSET "rsaOAEPEncryptionSET" #define NID_rsaOAEPEncryptionSET 644 #define OBJ_rsaOAEPEncryptionSET 1L, 2L, 840L, 113549L, 1L, 1L, 6L #define SN_itu_t "ITU-T" #define LN_itu_t "itu-t" #define NID_itu_t 645 #define OBJ_itu_t 0L #define SN_joint_iso_itu_t "JOINT-ISO-ITU-T" #define LN_joint_iso_itu_t "joint-iso-itu-t" #define NID_joint_iso_itu_t 646 #define OBJ_joint_iso_itu_t 2L #define SN_international_organizations "international-organizations" #define LN_international_organizations "International Organizations" #define NID_international_organizations 647 #define OBJ_international_organizations 2L, 23L #define SN_ms_smartcard_login "msSmartcardLogin" #define LN_ms_smartcard_login "Microsoft Smartcardlogin" #define NID_ms_smartcard_login 648 #define OBJ_ms_smartcard_login 1L, 3L, 6L, 1L, 4L, 1L, 311L, 20L, 2L, 2L #define SN_ms_upn "msUPN" #define LN_ms_upn "Microsoft Universal Principal Name" #define NID_ms_upn 649 #define OBJ_ms_upn 1L, 3L, 6L, 1L, 4L, 1L, 311L, 20L, 2L, 3L #define SN_aes_128_cfb1 "AES-128-CFB1" #define LN_aes_128_cfb1 "aes-128-cfb1" #define NID_aes_128_cfb1 650 #define SN_aes_192_cfb1 "AES-192-CFB1" #define LN_aes_192_cfb1 "aes-192-cfb1" #define NID_aes_192_cfb1 651 #define SN_aes_256_cfb1 "AES-256-CFB1" #define LN_aes_256_cfb1 "aes-256-cfb1" #define NID_aes_256_cfb1 652 #define SN_aes_128_cfb8 "AES-128-CFB8" #define LN_aes_128_cfb8 "aes-128-cfb8" #define NID_aes_128_cfb8 653 #define SN_aes_192_cfb8 "AES-192-CFB8" #define LN_aes_192_cfb8 "aes-192-cfb8" #define NID_aes_192_cfb8 654 #define SN_aes_256_cfb8 "AES-256-CFB8" #define LN_aes_256_cfb8 "aes-256-cfb8" #define NID_aes_256_cfb8 655 #define SN_des_cfb1 "DES-CFB1" #define LN_des_cfb1 "des-cfb1" #define NID_des_cfb1 656 #define SN_des_cfb8 "DES-CFB8" #define LN_des_cfb8 "des-cfb8" #define NID_des_cfb8 657 #define SN_des_ede3_cfb1 "DES-EDE3-CFB1" #define LN_des_ede3_cfb1 "des-ede3-cfb1" #define NID_des_ede3_cfb1 658 #define SN_des_ede3_cfb8 "DES-EDE3-CFB8" #define LN_des_ede3_cfb8 "des-ede3-cfb8" #define NID_des_ede3_cfb8 659 #define SN_streetAddress "street" #define LN_streetAddress "streetAddress" #define NID_streetAddress 660 #define OBJ_streetAddress 2L, 5L, 4L, 9L #define LN_postalCode "postalCode" #define NID_postalCode 661 #define OBJ_postalCode 2L, 5L, 4L, 17L #define SN_id_ppl "id-ppl" #define NID_id_ppl 662 #define OBJ_id_ppl 1L, 3L, 6L, 1L, 5L, 5L, 7L, 21L #define SN_proxyCertInfo "proxyCertInfo" #define LN_proxyCertInfo "Proxy Certificate Information" #define NID_proxyCertInfo 663 #define OBJ_proxyCertInfo 1L, 3L, 6L, 1L, 5L, 5L, 7L, 1L, 14L #define SN_id_ppl_anyLanguage "id-ppl-anyLanguage" #define LN_id_ppl_anyLanguage "Any language" #define NID_id_ppl_anyLanguage 664 #define OBJ_id_ppl_anyLanguage 1L, 3L, 6L, 1L, 5L, 5L, 7L, 21L, 0L #define SN_id_ppl_inheritAll "id-ppl-inheritAll" #define LN_id_ppl_inheritAll "Inherit all" #define NID_id_ppl_inheritAll 665 #define OBJ_id_ppl_inheritAll 1L, 3L, 6L, 1L, 5L, 5L, 7L, 21L, 1L #define SN_name_constraints "nameConstraints" #define LN_name_constraints "X509v3 Name Constraints" #define NID_name_constraints 666 #define OBJ_name_constraints 2L, 5L, 29L, 30L #define SN_Independent "id-ppl-independent" #define LN_Independent "Independent" #define NID_Independent 667 #define OBJ_Independent 1L, 3L, 6L, 1L, 5L, 5L, 7L, 21L, 2L #define SN_sha256WithRSAEncryption "RSA-SHA256" #define LN_sha256WithRSAEncryption "sha256WithRSAEncryption" #define NID_sha256WithRSAEncryption 668 #define OBJ_sha256WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 11L #define SN_sha384WithRSAEncryption "RSA-SHA384" #define LN_sha384WithRSAEncryption "sha384WithRSAEncryption" #define NID_sha384WithRSAEncryption 669 #define OBJ_sha384WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 12L #define SN_sha512WithRSAEncryption "RSA-SHA512" #define LN_sha512WithRSAEncryption "sha512WithRSAEncryption" #define NID_sha512WithRSAEncryption 670 #define OBJ_sha512WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 13L #define SN_sha224WithRSAEncryption "RSA-SHA224" #define LN_sha224WithRSAEncryption "sha224WithRSAEncryption" #define NID_sha224WithRSAEncryption 671 #define OBJ_sha224WithRSAEncryption 1L, 2L, 840L, 113549L, 1L, 1L, 14L #define SN_sha256 "SHA256" #define LN_sha256 "sha256" #define NID_sha256 672 #define OBJ_sha256 2L, 16L, 840L, 1L, 101L, 3L, 4L, 2L, 1L #define SN_sha384 "SHA384" #define LN_sha384 "sha384" #define NID_sha384 673 #define OBJ_sha384 2L, 16L, 840L, 1L, 101L, 3L, 4L, 2L, 2L #define SN_sha512 "SHA512" #define LN_sha512 "sha512" #define NID_sha512 674 #define OBJ_sha512 2L, 16L, 840L, 1L, 101L, 3L, 4L, 2L, 3L #define SN_sha224 "SHA224" #define LN_sha224 "sha224" #define NID_sha224 675 #define OBJ_sha224 2L, 16L, 840L, 1L, 101L, 3L, 4L, 2L, 4L #define SN_identified_organization "identified-organization" #define NID_identified_organization 676 #define OBJ_identified_organization 1L, 3L #define SN_certicom_arc "certicom-arc" #define NID_certicom_arc 677 #define OBJ_certicom_arc 1L, 3L, 132L #define SN_wap "wap" #define NID_wap 678 #define OBJ_wap 2L, 23L, 43L #define SN_wap_wsg "wap-wsg" #define NID_wap_wsg 679 #define OBJ_wap_wsg 2L, 23L, 43L, 1L #define SN_X9_62_id_characteristic_two_basis "id-characteristic-two-basis" #define NID_X9_62_id_characteristic_two_basis 680 #define OBJ_X9_62_id_characteristic_two_basis 1L, 2L, 840L, 10045L, 1L, 2L, 3L #define SN_X9_62_onBasis "onBasis" #define NID_X9_62_onBasis 681 #define OBJ_X9_62_onBasis 1L, 2L, 840L, 10045L, 1L, 2L, 3L, 1L #define SN_X9_62_tpBasis "tpBasis" #define NID_X9_62_tpBasis 682 #define OBJ_X9_62_tpBasis 1L, 2L, 840L, 10045L, 1L, 2L, 3L, 2L #define SN_X9_62_ppBasis "ppBasis" #define NID_X9_62_ppBasis 683 #define OBJ_X9_62_ppBasis 1L, 2L, 840L, 10045L, 1L, 2L, 3L, 3L #define SN_X9_62_c2pnb163v1 "c2pnb163v1" #define NID_X9_62_c2pnb163v1 684 #define OBJ_X9_62_c2pnb163v1 1L, 2L, 840L, 10045L, 3L, 0L, 1L #define SN_X9_62_c2pnb163v2 "c2pnb163v2" #define NID_X9_62_c2pnb163v2 685 #define OBJ_X9_62_c2pnb163v2 1L, 2L, 840L, 10045L, 3L, 0L, 2L #define SN_X9_62_c2pnb163v3 "c2pnb163v3" #define NID_X9_62_c2pnb163v3 686 #define OBJ_X9_62_c2pnb163v3 1L, 2L, 840L, 10045L, 3L, 0L, 3L #define SN_X9_62_c2pnb176v1 "c2pnb176v1" #define NID_X9_62_c2pnb176v1 687 #define OBJ_X9_62_c2pnb176v1 1L, 2L, 840L, 10045L, 3L, 0L, 4L #define SN_X9_62_c2tnb191v1 "c2tnb191v1" #define NID_X9_62_c2tnb191v1 688 #define OBJ_X9_62_c2tnb191v1 1L, 2L, 840L, 10045L, 3L, 0L, 5L #define SN_X9_62_c2tnb191v2 "c2tnb191v2" #define NID_X9_62_c2tnb191v2 689 #define OBJ_X9_62_c2tnb191v2 1L, 2L, 840L, 10045L, 3L, 0L, 6L #define SN_X9_62_c2tnb191v3 "c2tnb191v3" #define NID_X9_62_c2tnb191v3 690 #define OBJ_X9_62_c2tnb191v3 1L, 2L, 840L, 10045L, 3L, 0L, 7L #define SN_X9_62_c2onb191v4 "c2onb191v4" #define NID_X9_62_c2onb191v4 691 #define OBJ_X9_62_c2onb191v4 1L, 2L, 840L, 10045L, 3L, 0L, 8L #define SN_X9_62_c2onb191v5 "c2onb191v5" #define NID_X9_62_c2onb191v5 692 #define OBJ_X9_62_c2onb191v5 1L, 2L, 840L, 10045L, 3L, 0L, 9L #define SN_X9_62_c2pnb208w1 "c2pnb208w1" #define NID_X9_62_c2pnb208w1 693 #define OBJ_X9_62_c2pnb208w1 1L, 2L, 840L, 10045L, 3L, 0L, 10L #define SN_X9_62_c2tnb239v1 "c2tnb239v1" #define NID_X9_62_c2tnb239v1 694 #define OBJ_X9_62_c2tnb239v1 1L, 2L, 840L, 10045L, 3L, 0L, 11L #define SN_X9_62_c2tnb239v2 "c2tnb239v2" #define NID_X9_62_c2tnb239v2 695 #define OBJ_X9_62_c2tnb239v2 1L, 2L, 840L, 10045L, 3L, 0L, 12L #define SN_X9_62_c2tnb239v3 "c2tnb239v3" #define NID_X9_62_c2tnb239v3 696 #define OBJ_X9_62_c2tnb239v3 1L, 2L, 840L, 10045L, 3L, 0L, 13L #define SN_X9_62_c2onb239v4 "c2onb239v4" #define NID_X9_62_c2onb239v4 697 #define OBJ_X9_62_c2onb239v4 1L, 2L, 840L, 10045L, 3L, 0L, 14L #define SN_X9_62_c2onb239v5 "c2onb239v5" #define NID_X9_62_c2onb239v5 698 #define OBJ_X9_62_c2onb239v5 1L, 2L, 840L, 10045L, 3L, 0L, 15L #define SN_X9_62_c2pnb272w1 "c2pnb272w1" #define NID_X9_62_c2pnb272w1 699 #define OBJ_X9_62_c2pnb272w1 1L, 2L, 840L, 10045L, 3L, 0L, 16L #define SN_X9_62_c2pnb304w1 "c2pnb304w1" #define NID_X9_62_c2pnb304w1 700 #define OBJ_X9_62_c2pnb304w1 1L, 2L, 840L, 10045L, 3L, 0L, 17L #define SN_X9_62_c2tnb359v1 "c2tnb359v1" #define NID_X9_62_c2tnb359v1 701 #define OBJ_X9_62_c2tnb359v1 1L, 2L, 840L, 10045L, 3L, 0L, 18L #define SN_X9_62_c2pnb368w1 "c2pnb368w1" #define NID_X9_62_c2pnb368w1 702 #define OBJ_X9_62_c2pnb368w1 1L, 2L, 840L, 10045L, 3L, 0L, 19L #define SN_X9_62_c2tnb431r1 "c2tnb431r1" #define NID_X9_62_c2tnb431r1 703 #define OBJ_X9_62_c2tnb431r1 1L, 2L, 840L, 10045L, 3L, 0L, 20L #define SN_secp112r1 "secp112r1" #define NID_secp112r1 704 #define OBJ_secp112r1 1L, 3L, 132L, 0L, 6L #define SN_secp112r2 "secp112r2" #define NID_secp112r2 705 #define OBJ_secp112r2 1L, 3L, 132L, 0L, 7L #define SN_secp128r1 "secp128r1" #define NID_secp128r1 706 #define OBJ_secp128r1 1L, 3L, 132L, 0L, 28L #define SN_secp128r2 "secp128r2" #define NID_secp128r2 707 #define OBJ_secp128r2 1L, 3L, 132L, 0L, 29L #define SN_secp160k1 "secp160k1" #define NID_secp160k1 708 #define OBJ_secp160k1 1L, 3L, 132L, 0L, 9L #define SN_secp160r1 "secp160r1" #define NID_secp160r1 709 #define OBJ_secp160r1 1L, 3L, 132L, 0L, 8L #define SN_secp160r2 "secp160r2" #define NID_secp160r2 710 #define OBJ_secp160r2 1L, 3L, 132L, 0L, 30L #define SN_secp192k1 "secp192k1" #define NID_secp192k1 711 #define OBJ_secp192k1 1L, 3L, 132L, 0L, 31L #define SN_secp224k1 "secp224k1" #define NID_secp224k1 712 #define OBJ_secp224k1 1L, 3L, 132L, 0L, 32L #define SN_secp224r1 "secp224r1" #define NID_secp224r1 713 #define OBJ_secp224r1 1L, 3L, 132L, 0L, 33L #define SN_secp256k1 "secp256k1" #define NID_secp256k1 714 #define OBJ_secp256k1 1L, 3L, 132L, 0L, 10L #define SN_secp384r1 "secp384r1" #define NID_secp384r1 715 #define OBJ_secp384r1 1L, 3L, 132L, 0L, 34L #define SN_secp521r1 "secp521r1" #define NID_secp521r1 716 #define OBJ_secp521r1 1L, 3L, 132L, 0L, 35L #define SN_sect113r1 "sect113r1" #define NID_sect113r1 717 #define OBJ_sect113r1 1L, 3L, 132L, 0L, 4L #define SN_sect113r2 "sect113r2" #define NID_sect113r2 718 #define OBJ_sect113r2 1L, 3L, 132L, 0L, 5L #define SN_sect131r1 "sect131r1" #define NID_sect131r1 719 #define OBJ_sect131r1 1L, 3L, 132L, 0L, 22L #define SN_sect131r2 "sect131r2" #define NID_sect131r2 720 #define OBJ_sect131r2 1L, 3L, 132L, 0L, 23L #define SN_sect163k1 "sect163k1" #define NID_sect163k1 721 #define OBJ_sect163k1 1L, 3L, 132L, 0L, 1L #define SN_sect163r1 "sect163r1" #define NID_sect163r1 722 #define OBJ_sect163r1 1L, 3L, 132L, 0L, 2L #define SN_sect163r2 "sect163r2" #define NID_sect163r2 723 #define OBJ_sect163r2 1L, 3L, 132L, 0L, 15L #define SN_sect193r1 "sect193r1" #define NID_sect193r1 724 #define OBJ_sect193r1 1L, 3L, 132L, 0L, 24L #define SN_sect193r2 "sect193r2" #define NID_sect193r2 725 #define OBJ_sect193r2 1L, 3L, 132L, 0L, 25L #define SN_sect233k1 "sect233k1" #define NID_sect233k1 726 #define OBJ_sect233k1 1L, 3L, 132L, 0L, 26L #define SN_sect233r1 "sect233r1" #define NID_sect233r1 727 #define OBJ_sect233r1 1L, 3L, 132L, 0L, 27L #define SN_sect239k1 "sect239k1" #define NID_sect239k1 728 #define OBJ_sect239k1 1L, 3L, 132L, 0L, 3L #define SN_sect283k1 "sect283k1" #define NID_sect283k1 729 #define OBJ_sect283k1 1L, 3L, 132L, 0L, 16L #define SN_sect283r1 "sect283r1" #define NID_sect283r1 730 #define OBJ_sect283r1 1L, 3L, 132L, 0L, 17L #define SN_sect409k1 "sect409k1" #define NID_sect409k1 731 #define OBJ_sect409k1 1L, 3L, 132L, 0L, 36L #define SN_sect409r1 "sect409r1" #define NID_sect409r1 732 #define OBJ_sect409r1 1L, 3L, 132L, 0L, 37L #define SN_sect571k1 "sect571k1" #define NID_sect571k1 733 #define OBJ_sect571k1 1L, 3L, 132L, 0L, 38L #define SN_sect571r1 "sect571r1" #define NID_sect571r1 734 #define OBJ_sect571r1 1L, 3L, 132L, 0L, 39L #define SN_wap_wsg_idm_ecid_wtls1 "wap-wsg-idm-ecid-wtls1" #define NID_wap_wsg_idm_ecid_wtls1 735 #define OBJ_wap_wsg_idm_ecid_wtls1 2L, 23L, 43L, 1L, 4L, 1L #define SN_wap_wsg_idm_ecid_wtls3 "wap-wsg-idm-ecid-wtls3" #define NID_wap_wsg_idm_ecid_wtls3 736 #define OBJ_wap_wsg_idm_ecid_wtls3 2L, 23L, 43L, 1L, 4L, 3L #define SN_wap_wsg_idm_ecid_wtls4 "wap-wsg-idm-ecid-wtls4" #define NID_wap_wsg_idm_ecid_wtls4 737 #define OBJ_wap_wsg_idm_ecid_wtls4 2L, 23L, 43L, 1L, 4L, 4L #define SN_wap_wsg_idm_ecid_wtls5 "wap-wsg-idm-ecid-wtls5" #define NID_wap_wsg_idm_ecid_wtls5 738 #define OBJ_wap_wsg_idm_ecid_wtls5 2L, 23L, 43L, 1L, 4L, 5L #define SN_wap_wsg_idm_ecid_wtls6 "wap-wsg-idm-ecid-wtls6" #define NID_wap_wsg_idm_ecid_wtls6 739 #define OBJ_wap_wsg_idm_ecid_wtls6 2L, 23L, 43L, 1L, 4L, 6L #define SN_wap_wsg_idm_ecid_wtls7 "wap-wsg-idm-ecid-wtls7" #define NID_wap_wsg_idm_ecid_wtls7 740 #define OBJ_wap_wsg_idm_ecid_wtls7 2L, 23L, 43L, 1L, 4L, 7L #define SN_wap_wsg_idm_ecid_wtls8 "wap-wsg-idm-ecid-wtls8" #define NID_wap_wsg_idm_ecid_wtls8 741 #define OBJ_wap_wsg_idm_ecid_wtls8 2L, 23L, 43L, 1L, 4L, 8L #define SN_wap_wsg_idm_ecid_wtls9 "wap-wsg-idm-ecid-wtls9" #define NID_wap_wsg_idm_ecid_wtls9 742 #define OBJ_wap_wsg_idm_ecid_wtls9 2L, 23L, 43L, 1L, 4L, 9L #define SN_wap_wsg_idm_ecid_wtls10 "wap-wsg-idm-ecid-wtls10" #define NID_wap_wsg_idm_ecid_wtls10 743 #define OBJ_wap_wsg_idm_ecid_wtls10 2L, 23L, 43L, 1L, 4L, 10L #define SN_wap_wsg_idm_ecid_wtls11 "wap-wsg-idm-ecid-wtls11" #define NID_wap_wsg_idm_ecid_wtls11 744 #define OBJ_wap_wsg_idm_ecid_wtls11 2L, 23L, 43L, 1L, 4L, 11L #define SN_wap_wsg_idm_ecid_wtls12 "wap-wsg-idm-ecid-wtls12" #define NID_wap_wsg_idm_ecid_wtls12 745 #define OBJ_wap_wsg_idm_ecid_wtls12 2L, 23L, 43L, 1L, 4L, 12L #define SN_any_policy "anyPolicy" #define LN_any_policy "X509v3 Any Policy" #define NID_any_policy 746 #define OBJ_any_policy 2L, 5L, 29L, 32L, 0L #define SN_policy_mappings "policyMappings" #define LN_policy_mappings "X509v3 Policy Mappings" #define NID_policy_mappings 747 #define OBJ_policy_mappings 2L, 5L, 29L, 33L #define SN_inhibit_any_policy "inhibitAnyPolicy" #define LN_inhibit_any_policy "X509v3 Inhibit Any Policy" #define NID_inhibit_any_policy 748 #define OBJ_inhibit_any_policy 2L, 5L, 29L, 54L #define SN_ipsec3 "Oakley-EC2N-3" #define LN_ipsec3 "ipsec3" #define NID_ipsec3 749 #define SN_ipsec4 "Oakley-EC2N-4" #define LN_ipsec4 "ipsec4" #define NID_ipsec4 750 #define SN_camellia_128_cbc "CAMELLIA-128-CBC" #define LN_camellia_128_cbc "camellia-128-cbc" #define NID_camellia_128_cbc 751 #define OBJ_camellia_128_cbc 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 1L, 2L #define SN_camellia_192_cbc "CAMELLIA-192-CBC" #define LN_camellia_192_cbc "camellia-192-cbc" #define NID_camellia_192_cbc 752 #define OBJ_camellia_192_cbc 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 1L, 3L #define SN_camellia_256_cbc "CAMELLIA-256-CBC" #define LN_camellia_256_cbc "camellia-256-cbc" #define NID_camellia_256_cbc 753 #define OBJ_camellia_256_cbc 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 1L, 4L #define SN_camellia_128_ecb "CAMELLIA-128-ECB" #define LN_camellia_128_ecb "camellia-128-ecb" #define NID_camellia_128_ecb 754 #define OBJ_camellia_128_ecb 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 1L #define SN_camellia_192_ecb "CAMELLIA-192-ECB" #define LN_camellia_192_ecb "camellia-192-ecb" #define NID_camellia_192_ecb 755 #define OBJ_camellia_192_ecb 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 21L #define SN_camellia_256_ecb "CAMELLIA-256-ECB" #define LN_camellia_256_ecb "camellia-256-ecb" #define NID_camellia_256_ecb 756 #define OBJ_camellia_256_ecb 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 41L #define SN_camellia_128_cfb128 "CAMELLIA-128-CFB" #define LN_camellia_128_cfb128 "camellia-128-cfb" #define NID_camellia_128_cfb128 757 #define OBJ_camellia_128_cfb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 4L #define SN_camellia_192_cfb128 "CAMELLIA-192-CFB" #define LN_camellia_192_cfb128 "camellia-192-cfb" #define NID_camellia_192_cfb128 758 #define OBJ_camellia_192_cfb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 24L #define SN_camellia_256_cfb128 "CAMELLIA-256-CFB" #define LN_camellia_256_cfb128 "camellia-256-cfb" #define NID_camellia_256_cfb128 759 #define OBJ_camellia_256_cfb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 44L #define SN_camellia_128_cfb1 "CAMELLIA-128-CFB1" #define LN_camellia_128_cfb1 "camellia-128-cfb1" #define NID_camellia_128_cfb1 760 #define SN_camellia_192_cfb1 "CAMELLIA-192-CFB1" #define LN_camellia_192_cfb1 "camellia-192-cfb1" #define NID_camellia_192_cfb1 761 #define SN_camellia_256_cfb1 "CAMELLIA-256-CFB1" #define LN_camellia_256_cfb1 "camellia-256-cfb1" #define NID_camellia_256_cfb1 762 #define SN_camellia_128_cfb8 "CAMELLIA-128-CFB8" #define LN_camellia_128_cfb8 "camellia-128-cfb8" #define NID_camellia_128_cfb8 763 #define SN_camellia_192_cfb8 "CAMELLIA-192-CFB8" #define LN_camellia_192_cfb8 "camellia-192-cfb8" #define NID_camellia_192_cfb8 764 #define SN_camellia_256_cfb8 "CAMELLIA-256-CFB8" #define LN_camellia_256_cfb8 "camellia-256-cfb8" #define NID_camellia_256_cfb8 765 #define SN_camellia_128_ofb128 "CAMELLIA-128-OFB" #define LN_camellia_128_ofb128 "camellia-128-ofb" #define NID_camellia_128_ofb128 766 #define OBJ_camellia_128_ofb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 3L #define SN_camellia_192_ofb128 "CAMELLIA-192-OFB" #define LN_camellia_192_ofb128 "camellia-192-ofb" #define NID_camellia_192_ofb128 767 #define OBJ_camellia_192_ofb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 23L #define SN_camellia_256_ofb128 "CAMELLIA-256-OFB" #define LN_camellia_256_ofb128 "camellia-256-ofb" #define NID_camellia_256_ofb128 768 #define OBJ_camellia_256_ofb128 0L, 3L, 4401L, 5L, 3L, 1L, 9L, 43L #define SN_subject_directory_attributes "subjectDirectoryAttributes" #define LN_subject_directory_attributes "X509v3 Subject Directory Attributes" #define NID_subject_directory_attributes 769 #define OBJ_subject_directory_attributes 2L, 5L, 29L, 9L #define SN_issuing_distribution_point "issuingDistributionPoint" #define LN_issuing_distribution_point "X509v3 Issuing Distribution Point" #define NID_issuing_distribution_point 770 #define OBJ_issuing_distribution_point 2L, 5L, 29L, 28L #define SN_certificate_issuer "certificateIssuer" #define LN_certificate_issuer "X509v3 Certificate Issuer" #define NID_certificate_issuer 771 #define OBJ_certificate_issuer 2L, 5L, 29L, 29L #define SN_kisa "KISA" #define LN_kisa "kisa" #define NID_kisa 773 #define OBJ_kisa 1L, 2L, 410L, 200004L #define SN_seed_ecb "SEED-ECB" #define LN_seed_ecb "seed-ecb" #define NID_seed_ecb 776 #define OBJ_seed_ecb 1L, 2L, 410L, 200004L, 1L, 3L #define SN_seed_cbc "SEED-CBC" #define LN_seed_cbc "seed-cbc" #define NID_seed_cbc 777 #define OBJ_seed_cbc 1L, 2L, 410L, 200004L, 1L, 4L #define SN_seed_ofb128 "SEED-OFB" #define LN_seed_ofb128 "seed-ofb" #define NID_seed_ofb128 778 #define OBJ_seed_ofb128 1L, 2L, 410L, 200004L, 1L, 6L #define SN_seed_cfb128 "SEED-CFB" #define LN_seed_cfb128 "seed-cfb" #define NID_seed_cfb128 779 #define OBJ_seed_cfb128 1L, 2L, 410L, 200004L, 1L, 5L #define SN_hmac_md5 "HMAC-MD5" #define LN_hmac_md5 "hmac-md5" #define NID_hmac_md5 780 #define OBJ_hmac_md5 1L, 3L, 6L, 1L, 5L, 5L, 8L, 1L, 1L #define SN_hmac_sha1 "HMAC-SHA1" #define LN_hmac_sha1 "hmac-sha1" #define NID_hmac_sha1 781 #define OBJ_hmac_sha1 1L, 3L, 6L, 1L, 5L, 5L, 8L, 1L, 2L #define SN_id_PasswordBasedMAC "id-PasswordBasedMAC" #define LN_id_PasswordBasedMAC "password based MAC" #define NID_id_PasswordBasedMAC 782 #define OBJ_id_PasswordBasedMAC 1L, 2L, 840L, 113533L, 7L, 66L, 13L #define SN_id_DHBasedMac "id-DHBasedMac" #define LN_id_DHBasedMac "Diffie-Hellman based MAC" #define NID_id_DHBasedMac 783 #define OBJ_id_DHBasedMac 1L, 2L, 840L, 113533L, 7L, 66L, 30L #define SN_id_it_suppLangTags "id-it-suppLangTags" #define NID_id_it_suppLangTags 784 #define OBJ_id_it_suppLangTags 1L, 3L, 6L, 1L, 5L, 5L, 7L, 4L, 16L #define SN_caRepository "caRepository" #define LN_caRepository "CA Repository" #define NID_caRepository 785 #define OBJ_caRepository 1L, 3L, 6L, 1L, 5L, 5L, 7L, 48L, 5L #define SN_id_smime_ct_compressedData "id-smime-ct-compressedData" #define NID_id_smime_ct_compressedData 786 #define OBJ_id_smime_ct_compressedData \ 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 9L #define SN_id_ct_asciiTextWithCRLF "id-ct-asciiTextWithCRLF" #define NID_id_ct_asciiTextWithCRLF 787 #define OBJ_id_ct_asciiTextWithCRLF 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 1L, 27L #define SN_id_aes128_wrap "id-aes128-wrap" #define NID_id_aes128_wrap 788 #define OBJ_id_aes128_wrap 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 5L #define SN_id_aes192_wrap "id-aes192-wrap" #define NID_id_aes192_wrap 789 #define OBJ_id_aes192_wrap 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 25L #define SN_id_aes256_wrap "id-aes256-wrap" #define NID_id_aes256_wrap 790 #define OBJ_id_aes256_wrap 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 45L #define SN_ecdsa_with_Recommended "ecdsa-with-Recommended" #define NID_ecdsa_with_Recommended 791 #define OBJ_ecdsa_with_Recommended 1L, 2L, 840L, 10045L, 4L, 2L #define SN_ecdsa_with_Specified "ecdsa-with-Specified" #define NID_ecdsa_with_Specified 792 #define OBJ_ecdsa_with_Specified 1L, 2L, 840L, 10045L, 4L, 3L #define SN_ecdsa_with_SHA224 "ecdsa-with-SHA224" #define NID_ecdsa_with_SHA224 793 #define OBJ_ecdsa_with_SHA224 1L, 2L, 840L, 10045L, 4L, 3L, 1L #define SN_ecdsa_with_SHA256 "ecdsa-with-SHA256" #define NID_ecdsa_with_SHA256 794 #define OBJ_ecdsa_with_SHA256 1L, 2L, 840L, 10045L, 4L, 3L, 2L #define SN_ecdsa_with_SHA384 "ecdsa-with-SHA384" #define NID_ecdsa_with_SHA384 795 #define OBJ_ecdsa_with_SHA384 1L, 2L, 840L, 10045L, 4L, 3L, 3L #define SN_ecdsa_with_SHA512 "ecdsa-with-SHA512" #define NID_ecdsa_with_SHA512 796 #define OBJ_ecdsa_with_SHA512 1L, 2L, 840L, 10045L, 4L, 3L, 4L #define LN_hmacWithMD5 "hmacWithMD5" #define NID_hmacWithMD5 797 #define OBJ_hmacWithMD5 1L, 2L, 840L, 113549L, 2L, 6L #define LN_hmacWithSHA224 "hmacWithSHA224" #define NID_hmacWithSHA224 798 #define OBJ_hmacWithSHA224 1L, 2L, 840L, 113549L, 2L, 8L #define LN_hmacWithSHA256 "hmacWithSHA256" #define NID_hmacWithSHA256 799 #define OBJ_hmacWithSHA256 1L, 2L, 840L, 113549L, 2L, 9L #define LN_hmacWithSHA384 "hmacWithSHA384" #define NID_hmacWithSHA384 800 #define OBJ_hmacWithSHA384 1L, 2L, 840L, 113549L, 2L, 10L #define LN_hmacWithSHA512 "hmacWithSHA512" #define NID_hmacWithSHA512 801 #define OBJ_hmacWithSHA512 1L, 2L, 840L, 113549L, 2L, 11L #define SN_dsa_with_SHA224 "dsa_with_SHA224" #define NID_dsa_with_SHA224 802 #define OBJ_dsa_with_SHA224 2L, 16L, 840L, 1L, 101L, 3L, 4L, 3L, 1L #define SN_dsa_with_SHA256 "dsa_with_SHA256" #define NID_dsa_with_SHA256 803 #define OBJ_dsa_with_SHA256 2L, 16L, 840L, 1L, 101L, 3L, 4L, 3L, 2L #define SN_whirlpool "whirlpool" #define NID_whirlpool 804 #define OBJ_whirlpool 1L, 0L, 10118L, 3L, 0L, 55L #define SN_cryptopro "cryptopro" #define NID_cryptopro 805 #define OBJ_cryptopro 1L, 2L, 643L, 2L, 2L #define SN_cryptocom "cryptocom" #define NID_cryptocom 806 #define OBJ_cryptocom 1L, 2L, 643L, 2L, 9L #define SN_id_GostR3411_94_with_GostR3410_2001 \ "id-GostR3411-94-with-GostR3410-2001" #define LN_id_GostR3411_94_with_GostR3410_2001 \ "GOST R 34.11-94 with GOST R 34.10-2001" #define NID_id_GostR3411_94_with_GostR3410_2001 807 #define OBJ_id_GostR3411_94_with_GostR3410_2001 1L, 2L, 643L, 2L, 2L, 3L #define SN_id_GostR3411_94_with_GostR3410_94 "id-GostR3411-94-with-GostR3410-94" #define LN_id_GostR3411_94_with_GostR3410_94 \ "GOST R 34.11-94 with GOST R 34.10-94" #define NID_id_GostR3411_94_with_GostR3410_94 808 #define OBJ_id_GostR3411_94_with_GostR3410_94 1L, 2L, 643L, 2L, 2L, 4L #define SN_id_GostR3411_94 "md_gost94" #define LN_id_GostR3411_94 "GOST R 34.11-94" #define NID_id_GostR3411_94 809 #define OBJ_id_GostR3411_94 1L, 2L, 643L, 2L, 2L, 9L #define SN_id_HMACGostR3411_94 "id-HMACGostR3411-94" #define LN_id_HMACGostR3411_94 "HMAC GOST 34.11-94" #define NID_id_HMACGostR3411_94 810 #define OBJ_id_HMACGostR3411_94 1L, 2L, 643L, 2L, 2L, 10L #define SN_id_GostR3410_2001 "gost2001" #define LN_id_GostR3410_2001 "GOST R 34.10-2001" #define NID_id_GostR3410_2001 811 #define OBJ_id_GostR3410_2001 1L, 2L, 643L, 2L, 2L, 19L #define SN_id_GostR3410_94 "gost94" #define LN_id_GostR3410_94 "GOST R 34.10-94" #define NID_id_GostR3410_94 812 #define OBJ_id_GostR3410_94 1L, 2L, 643L, 2L, 2L, 20L #define SN_id_Gost28147_89 "gost89" #define LN_id_Gost28147_89 "GOST 28147-89" #define NID_id_Gost28147_89 813 #define OBJ_id_Gost28147_89 1L, 2L, 643L, 2L, 2L, 21L #define SN_gost89_cnt "gost89-cnt" #define NID_gost89_cnt 814 #define SN_id_Gost28147_89_MAC "gost-mac" #define LN_id_Gost28147_89_MAC "GOST 28147-89 MAC" #define NID_id_Gost28147_89_MAC 815 #define OBJ_id_Gost28147_89_MAC 1L, 2L, 643L, 2L, 2L, 22L #define SN_id_GostR3411_94_prf "prf-gostr3411-94" #define LN_id_GostR3411_94_prf "GOST R 34.11-94 PRF" #define NID_id_GostR3411_94_prf 816 #define OBJ_id_GostR3411_94_prf 1L, 2L, 643L, 2L, 2L, 23L #define SN_id_GostR3410_2001DH "id-GostR3410-2001DH" #define LN_id_GostR3410_2001DH "GOST R 34.10-2001 DH" #define NID_id_GostR3410_2001DH 817 #define OBJ_id_GostR3410_2001DH 1L, 2L, 643L, 2L, 2L, 98L #define SN_id_GostR3410_94DH "id-GostR3410-94DH" #define LN_id_GostR3410_94DH "GOST R 34.10-94 DH" #define NID_id_GostR3410_94DH 818 #define OBJ_id_GostR3410_94DH 1L, 2L, 643L, 2L, 2L, 99L #define SN_id_Gost28147_89_CryptoPro_KeyMeshing \ "id-Gost28147-89-CryptoPro-KeyMeshing" #define NID_id_Gost28147_89_CryptoPro_KeyMeshing 819 #define OBJ_id_Gost28147_89_CryptoPro_KeyMeshing 1L, 2L, 643L, 2L, 2L, 14L, 1L #define SN_id_Gost28147_89_None_KeyMeshing "id-Gost28147-89-None-KeyMeshing" #define NID_id_Gost28147_89_None_KeyMeshing 820 #define OBJ_id_Gost28147_89_None_KeyMeshing 1L, 2L, 643L, 2L, 2L, 14L, 0L #define SN_id_GostR3411_94_TestParamSet "id-GostR3411-94-TestParamSet" #define NID_id_GostR3411_94_TestParamSet 821 #define OBJ_id_GostR3411_94_TestParamSet 1L, 2L, 643L, 2L, 2L, 30L, 0L #define SN_id_GostR3411_94_CryptoProParamSet "id-GostR3411-94-CryptoProParamSet" #define NID_id_GostR3411_94_CryptoProParamSet 822 #define OBJ_id_GostR3411_94_CryptoProParamSet 1L, 2L, 643L, 2L, 2L, 30L, 1L #define SN_id_Gost28147_89_TestParamSet "id-Gost28147-89-TestParamSet" #define NID_id_Gost28147_89_TestParamSet 823 #define OBJ_id_Gost28147_89_TestParamSet 1L, 2L, 643L, 2L, 2L, 31L, 0L #define SN_id_Gost28147_89_CryptoPro_A_ParamSet \ "id-Gost28147-89-CryptoPro-A-ParamSet" #define NID_id_Gost28147_89_CryptoPro_A_ParamSet 824 #define OBJ_id_Gost28147_89_CryptoPro_A_ParamSet 1L, 2L, 643L, 2L, 2L, 31L, 1L #define SN_id_Gost28147_89_CryptoPro_B_ParamSet \ "id-Gost28147-89-CryptoPro-B-ParamSet" #define NID_id_Gost28147_89_CryptoPro_B_ParamSet 825 #define OBJ_id_Gost28147_89_CryptoPro_B_ParamSet 1L, 2L, 643L, 2L, 2L, 31L, 2L #define SN_id_Gost28147_89_CryptoPro_C_ParamSet \ "id-Gost28147-89-CryptoPro-C-ParamSet" #define NID_id_Gost28147_89_CryptoPro_C_ParamSet 826 #define OBJ_id_Gost28147_89_CryptoPro_C_ParamSet 1L, 2L, 643L, 2L, 2L, 31L, 3L #define SN_id_Gost28147_89_CryptoPro_D_ParamSet \ "id-Gost28147-89-CryptoPro-D-ParamSet" #define NID_id_Gost28147_89_CryptoPro_D_ParamSet 827 #define OBJ_id_Gost28147_89_CryptoPro_D_ParamSet 1L, 2L, 643L, 2L, 2L, 31L, 4L #define SN_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet \ "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet" #define NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet 828 #define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet \ 1L, 2L, 643L, 2L, 2L, 31L, 5L #define SN_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet \ "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet" #define NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet 829 #define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet \ 1L, 2L, 643L, 2L, 2L, 31L, 6L #define SN_id_Gost28147_89_CryptoPro_RIC_1_ParamSet \ "id-Gost28147-89-CryptoPro-RIC-1-ParamSet" #define NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet 830 #define OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet \ 1L, 2L, 643L, 2L, 2L, 31L, 7L #define SN_id_GostR3410_94_TestParamSet "id-GostR3410-94-TestParamSet" #define NID_id_GostR3410_94_TestParamSet 831 #define OBJ_id_GostR3410_94_TestParamSet 1L, 2L, 643L, 2L, 2L, 32L, 0L #define SN_id_GostR3410_94_CryptoPro_A_ParamSet \ "id-GostR3410-94-CryptoPro-A-ParamSet" #define NID_id_GostR3410_94_CryptoPro_A_ParamSet 832 #define OBJ_id_GostR3410_94_CryptoPro_A_ParamSet 1L, 2L, 643L, 2L, 2L, 32L, 2L #define SN_id_GostR3410_94_CryptoPro_B_ParamSet \ "id-GostR3410-94-CryptoPro-B-ParamSet" #define NID_id_GostR3410_94_CryptoPro_B_ParamSet 833 #define OBJ_id_GostR3410_94_CryptoPro_B_ParamSet 1L, 2L, 643L, 2L, 2L, 32L, 3L #define SN_id_GostR3410_94_CryptoPro_C_ParamSet \ "id-GostR3410-94-CryptoPro-C-ParamSet" #define NID_id_GostR3410_94_CryptoPro_C_ParamSet 834 #define OBJ_id_GostR3410_94_CryptoPro_C_ParamSet 1L, 2L, 643L, 2L, 2L, 32L, 4L #define SN_id_GostR3410_94_CryptoPro_D_ParamSet \ "id-GostR3410-94-CryptoPro-D-ParamSet" #define NID_id_GostR3410_94_CryptoPro_D_ParamSet 835 #define OBJ_id_GostR3410_94_CryptoPro_D_ParamSet 1L, 2L, 643L, 2L, 2L, 32L, 5L #define SN_id_GostR3410_94_CryptoPro_XchA_ParamSet \ "id-GostR3410-94-CryptoPro-XchA-ParamSet" #define NID_id_GostR3410_94_CryptoPro_XchA_ParamSet 836 #define OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet \ 1L, 2L, 643L, 2L, 2L, 33L, 1L #define SN_id_GostR3410_94_CryptoPro_XchB_ParamSet \ "id-GostR3410-94-CryptoPro-XchB-ParamSet" #define NID_id_GostR3410_94_CryptoPro_XchB_ParamSet 837 #define OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet \ 1L, 2L, 643L, 2L, 2L, 33L, 2L #define SN_id_GostR3410_94_CryptoPro_XchC_ParamSet \ "id-GostR3410-94-CryptoPro-XchC-ParamSet" #define NID_id_GostR3410_94_CryptoPro_XchC_ParamSet 838 #define OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet \ 1L, 2L, 643L, 2L, 2L, 33L, 3L #define SN_id_GostR3410_2001_TestParamSet "id-GostR3410-2001-TestParamSet" #define NID_id_GostR3410_2001_TestParamSet 839 #define OBJ_id_GostR3410_2001_TestParamSet 1L, 2L, 643L, 2L, 2L, 35L, 0L #define SN_id_GostR3410_2001_CryptoPro_A_ParamSet \ "id-GostR3410-2001-CryptoPro-A-ParamSet" #define NID_id_GostR3410_2001_CryptoPro_A_ParamSet 840 #define OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet 1L, 2L, 643L, 2L, 2L, 35L, 1L #define SN_id_GostR3410_2001_CryptoPro_B_ParamSet \ "id-GostR3410-2001-CryptoPro-B-ParamSet" #define NID_id_GostR3410_2001_CryptoPro_B_ParamSet 841 #define OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet 1L, 2L, 643L, 2L, 2L, 35L, 2L #define SN_id_GostR3410_2001_CryptoPro_C_ParamSet \ "id-GostR3410-2001-CryptoPro-C-ParamSet" #define NID_id_GostR3410_2001_CryptoPro_C_ParamSet 842 #define OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet 1L, 2L, 643L, 2L, 2L, 35L, 3L #define SN_id_GostR3410_2001_CryptoPro_XchA_ParamSet \ "id-GostR3410-2001-CryptoPro-XchA-ParamSet" #define NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet 843 #define OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet \ 1L, 2L, 643L, 2L, 2L, 36L, 0L #define SN_id_GostR3410_2001_CryptoPro_XchB_ParamSet \ "id-GostR3410-2001-CryptoPro-XchB-ParamSet" #define NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet 844 #define OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet \ 1L, 2L, 643L, 2L, 2L, 36L, 1L #define SN_id_GostR3410_94_a "id-GostR3410-94-a" #define NID_id_GostR3410_94_a 845 #define OBJ_id_GostR3410_94_a 1L, 2L, 643L, 2L, 2L, 20L, 1L #define SN_id_GostR3410_94_aBis "id-GostR3410-94-aBis" #define NID_id_GostR3410_94_aBis 846 #define OBJ_id_GostR3410_94_aBis 1L, 2L, 643L, 2L, 2L, 20L, 2L #define SN_id_GostR3410_94_b "id-GostR3410-94-b" #define NID_id_GostR3410_94_b 847 #define OBJ_id_GostR3410_94_b 1L, 2L, 643L, 2L, 2L, 20L, 3L #define SN_id_GostR3410_94_bBis "id-GostR3410-94-bBis" #define NID_id_GostR3410_94_bBis 848 #define OBJ_id_GostR3410_94_bBis 1L, 2L, 643L, 2L, 2L, 20L, 4L #define SN_id_Gost28147_89_cc "id-Gost28147-89-cc" #define LN_id_Gost28147_89_cc "GOST 28147-89 Cryptocom ParamSet" #define NID_id_Gost28147_89_cc 849 #define OBJ_id_Gost28147_89_cc 1L, 2L, 643L, 2L, 9L, 1L, 6L, 1L #define SN_id_GostR3410_94_cc "gost94cc" #define LN_id_GostR3410_94_cc "GOST 34.10-94 Cryptocom" #define NID_id_GostR3410_94_cc 850 #define OBJ_id_GostR3410_94_cc 1L, 2L, 643L, 2L, 9L, 1L, 5L, 3L #define SN_id_GostR3410_2001_cc "gost2001cc" #define LN_id_GostR3410_2001_cc "GOST 34.10-2001 Cryptocom" #define NID_id_GostR3410_2001_cc 851 #define OBJ_id_GostR3410_2001_cc 1L, 2L, 643L, 2L, 9L, 1L, 5L, 4L #define SN_id_GostR3411_94_with_GostR3410_94_cc \ "id-GostR3411-94-with-GostR3410-94-cc" #define LN_id_GostR3411_94_with_GostR3410_94_cc \ "GOST R 34.11-94 with GOST R 34.10-94 Cryptocom" #define NID_id_GostR3411_94_with_GostR3410_94_cc 852 #define OBJ_id_GostR3411_94_with_GostR3410_94_cc \ 1L, 2L, 643L, 2L, 9L, 1L, 3L, 3L #define SN_id_GostR3411_94_with_GostR3410_2001_cc \ "id-GostR3411-94-with-GostR3410-2001-cc" #define LN_id_GostR3411_94_with_GostR3410_2001_cc \ "GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom" #define NID_id_GostR3411_94_with_GostR3410_2001_cc 853 #define OBJ_id_GostR3411_94_with_GostR3410_2001_cc \ 1L, 2L, 643L, 2L, 9L, 1L, 3L, 4L #define SN_id_GostR3410_2001_ParamSet_cc "id-GostR3410-2001-ParamSet-cc" #define LN_id_GostR3410_2001_ParamSet_cc \ "GOST R 3410-2001 Parameter Set Cryptocom" #define NID_id_GostR3410_2001_ParamSet_cc 854 #define OBJ_id_GostR3410_2001_ParamSet_cc 1L, 2L, 643L, 2L, 9L, 1L, 8L, 1L #define SN_hmac "HMAC" #define LN_hmac "hmac" #define NID_hmac 855 #define SN_LocalKeySet "LocalKeySet" #define LN_LocalKeySet "Microsoft Local Key set" #define NID_LocalKeySet 856 #define OBJ_LocalKeySet 1L, 3L, 6L, 1L, 4L, 1L, 311L, 17L, 2L #define SN_freshest_crl "freshestCRL" #define LN_freshest_crl "X509v3 Freshest CRL" #define NID_freshest_crl 857 #define OBJ_freshest_crl 2L, 5L, 29L, 46L #define SN_id_on_permanentIdentifier "id-on-permanentIdentifier" #define LN_id_on_permanentIdentifier "Permanent Identifier" #define NID_id_on_permanentIdentifier 858 #define OBJ_id_on_permanentIdentifier 1L, 3L, 6L, 1L, 5L, 5L, 7L, 8L, 3L #define LN_searchGuide "searchGuide" #define NID_searchGuide 859 #define OBJ_searchGuide 2L, 5L, 4L, 14L #define LN_businessCategory "businessCategory" #define NID_businessCategory 860 #define OBJ_businessCategory 2L, 5L, 4L, 15L #define LN_postalAddress "postalAddress" #define NID_postalAddress 861 #define OBJ_postalAddress 2L, 5L, 4L, 16L #define LN_postOfficeBox "postOfficeBox" #define NID_postOfficeBox 862 #define OBJ_postOfficeBox 2L, 5L, 4L, 18L #define LN_physicalDeliveryOfficeName "physicalDeliveryOfficeName" #define NID_physicalDeliveryOfficeName 863 #define OBJ_physicalDeliveryOfficeName 2L, 5L, 4L, 19L #define LN_telephoneNumber "telephoneNumber" #define NID_telephoneNumber 864 #define OBJ_telephoneNumber 2L, 5L, 4L, 20L #define LN_telexNumber "telexNumber" #define NID_telexNumber 865 #define OBJ_telexNumber 2L, 5L, 4L, 21L #define LN_teletexTerminalIdentifier "teletexTerminalIdentifier" #define NID_teletexTerminalIdentifier 866 #define OBJ_teletexTerminalIdentifier 2L, 5L, 4L, 22L #define LN_facsimileTelephoneNumber "facsimileTelephoneNumber" #define NID_facsimileTelephoneNumber 867 #define OBJ_facsimileTelephoneNumber 2L, 5L, 4L, 23L #define LN_x121Address "x121Address" #define NID_x121Address 868 #define OBJ_x121Address 2L, 5L, 4L, 24L #define LN_internationaliSDNNumber "internationaliSDNNumber" #define NID_internationaliSDNNumber 869 #define OBJ_internationaliSDNNumber 2L, 5L, 4L, 25L #define LN_registeredAddress "registeredAddress" #define NID_registeredAddress 870 #define OBJ_registeredAddress 2L, 5L, 4L, 26L #define LN_destinationIndicator "destinationIndicator" #define NID_destinationIndicator 871 #define OBJ_destinationIndicator 2L, 5L, 4L, 27L #define LN_preferredDeliveryMethod "preferredDeliveryMethod" #define NID_preferredDeliveryMethod 872 #define OBJ_preferredDeliveryMethod 2L, 5L, 4L, 28L #define LN_presentationAddress "presentationAddress" #define NID_presentationAddress 873 #define OBJ_presentationAddress 2L, 5L, 4L, 29L #define LN_supportedApplicationContext "supportedApplicationContext" #define NID_supportedApplicationContext 874 #define OBJ_supportedApplicationContext 2L, 5L, 4L, 30L #define SN_member "member" #define NID_member 875 #define OBJ_member 2L, 5L, 4L, 31L #define SN_owner "owner" #define NID_owner 876 #define OBJ_owner 2L, 5L, 4L, 32L #define LN_roleOccupant "roleOccupant" #define NID_roleOccupant 877 #define OBJ_roleOccupant 2L, 5L, 4L, 33L #define SN_seeAlso "seeAlso" #define NID_seeAlso 878 #define OBJ_seeAlso 2L, 5L, 4L, 34L #define LN_userPassword "userPassword" #define NID_userPassword 879 #define OBJ_userPassword 2L, 5L, 4L, 35L #define LN_userCertificate "userCertificate" #define NID_userCertificate 880 #define OBJ_userCertificate 2L, 5L, 4L, 36L #define LN_cACertificate "cACertificate" #define NID_cACertificate 881 #define OBJ_cACertificate 2L, 5L, 4L, 37L #define LN_authorityRevocationList "authorityRevocationList" #define NID_authorityRevocationList 882 #define OBJ_authorityRevocationList 2L, 5L, 4L, 38L #define LN_certificateRevocationList "certificateRevocationList" #define NID_certificateRevocationList 883 #define OBJ_certificateRevocationList 2L, 5L, 4L, 39L #define LN_crossCertificatePair "crossCertificatePair" #define NID_crossCertificatePair 884 #define OBJ_crossCertificatePair 2L, 5L, 4L, 40L #define LN_enhancedSearchGuide "enhancedSearchGuide" #define NID_enhancedSearchGuide 885 #define OBJ_enhancedSearchGuide 2L, 5L, 4L, 47L #define LN_protocolInformation "protocolInformation" #define NID_protocolInformation 886 #define OBJ_protocolInformation 2L, 5L, 4L, 48L #define LN_distinguishedName "distinguishedName" #define NID_distinguishedName 887 #define OBJ_distinguishedName 2L, 5L, 4L, 49L #define LN_uniqueMember "uniqueMember" #define NID_uniqueMember 888 #define OBJ_uniqueMember 2L, 5L, 4L, 50L #define LN_houseIdentifier "houseIdentifier" #define NID_houseIdentifier 889 #define OBJ_houseIdentifier 2L, 5L, 4L, 51L #define LN_supportedAlgorithms "supportedAlgorithms" #define NID_supportedAlgorithms 890 #define OBJ_supportedAlgorithms 2L, 5L, 4L, 52L #define LN_deltaRevocationList "deltaRevocationList" #define NID_deltaRevocationList 891 #define OBJ_deltaRevocationList 2L, 5L, 4L, 53L #define SN_dmdName "dmdName" #define NID_dmdName 892 #define OBJ_dmdName 2L, 5L, 4L, 54L #define SN_id_alg_PWRI_KEK "id-alg-PWRI-KEK" #define NID_id_alg_PWRI_KEK 893 #define OBJ_id_alg_PWRI_KEK 1L, 2L, 840L, 113549L, 1L, 9L, 16L, 3L, 9L #define SN_cmac "CMAC" #define LN_cmac "cmac" #define NID_cmac 894 #define SN_aes_128_gcm "id-aes128-GCM" #define LN_aes_128_gcm "aes-128-gcm" #define NID_aes_128_gcm 895 #define OBJ_aes_128_gcm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 6L #define SN_aes_128_ccm "id-aes128-CCM" #define LN_aes_128_ccm "aes-128-ccm" #define NID_aes_128_ccm 896 #define OBJ_aes_128_ccm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 7L #define SN_id_aes128_wrap_pad "id-aes128-wrap-pad" #define NID_id_aes128_wrap_pad 897 #define OBJ_id_aes128_wrap_pad 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 8L #define SN_aes_192_gcm "id-aes192-GCM" #define LN_aes_192_gcm "aes-192-gcm" #define NID_aes_192_gcm 898 #define OBJ_aes_192_gcm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 26L #define SN_aes_192_ccm "id-aes192-CCM" #define LN_aes_192_ccm "aes-192-ccm" #define NID_aes_192_ccm 899 #define OBJ_aes_192_ccm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 27L #define SN_id_aes192_wrap_pad "id-aes192-wrap-pad" #define NID_id_aes192_wrap_pad 900 #define OBJ_id_aes192_wrap_pad 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 28L #define SN_aes_256_gcm "id-aes256-GCM" #define LN_aes_256_gcm "aes-256-gcm" #define NID_aes_256_gcm 901 #define OBJ_aes_256_gcm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 46L #define SN_aes_256_ccm "id-aes256-CCM" #define LN_aes_256_ccm "aes-256-ccm" #define NID_aes_256_ccm 902 #define OBJ_aes_256_ccm 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 47L #define SN_id_aes256_wrap_pad "id-aes256-wrap-pad" #define NID_id_aes256_wrap_pad 903 #define OBJ_id_aes256_wrap_pad 2L, 16L, 840L, 1L, 101L, 3L, 4L, 1L, 48L #define SN_aes_128_ctr "AES-128-CTR" #define LN_aes_128_ctr "aes-128-ctr" #define NID_aes_128_ctr 904 #define SN_aes_192_ctr "AES-192-CTR" #define LN_aes_192_ctr "aes-192-ctr" #define NID_aes_192_ctr 905 #define SN_aes_256_ctr "AES-256-CTR" #define LN_aes_256_ctr "aes-256-ctr" #define NID_aes_256_ctr 906 #define SN_id_camellia128_wrap "id-camellia128-wrap" #define NID_id_camellia128_wrap 907 #define OBJ_id_camellia128_wrap 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 3L, 2L #define SN_id_camellia192_wrap "id-camellia192-wrap" #define NID_id_camellia192_wrap 908 #define OBJ_id_camellia192_wrap 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 3L, 3L #define SN_id_camellia256_wrap "id-camellia256-wrap" #define NID_id_camellia256_wrap 909 #define OBJ_id_camellia256_wrap 1L, 2L, 392L, 200011L, 61L, 1L, 1L, 3L, 4L #define SN_anyExtendedKeyUsage "anyExtendedKeyUsage" #define LN_anyExtendedKeyUsage "Any Extended Key Usage" #define NID_anyExtendedKeyUsage 910 #define OBJ_anyExtendedKeyUsage 2L, 5L, 29L, 37L, 0L #define SN_mgf1 "MGF1" #define LN_mgf1 "mgf1" #define NID_mgf1 911 #define OBJ_mgf1 1L, 2L, 840L, 113549L, 1L, 1L, 8L #define SN_rsassaPss "RSASSA-PSS" #define LN_rsassaPss "rsassaPss" #define NID_rsassaPss 912 #define OBJ_rsassaPss 1L, 2L, 840L, 113549L, 1L, 1L, 10L #define SN_aes_128_xts "AES-128-XTS" #define LN_aes_128_xts "aes-128-xts" #define NID_aes_128_xts 913 #define SN_aes_256_xts "AES-256-XTS" #define LN_aes_256_xts "aes-256-xts" #define NID_aes_256_xts 914 #define SN_rc4_hmac_md5 "RC4-HMAC-MD5" #define LN_rc4_hmac_md5 "rc4-hmac-md5" #define NID_rc4_hmac_md5 915 #define SN_aes_128_cbc_hmac_sha1 "AES-128-CBC-HMAC-SHA1" #define LN_aes_128_cbc_hmac_sha1 "aes-128-cbc-hmac-sha1" #define NID_aes_128_cbc_hmac_sha1 916 #define SN_aes_192_cbc_hmac_sha1 "AES-192-CBC-HMAC-SHA1" #define LN_aes_192_cbc_hmac_sha1 "aes-192-cbc-hmac-sha1" #define NID_aes_192_cbc_hmac_sha1 917 #define SN_aes_256_cbc_hmac_sha1 "AES-256-CBC-HMAC-SHA1" #define LN_aes_256_cbc_hmac_sha1 "aes-256-cbc-hmac-sha1" #define NID_aes_256_cbc_hmac_sha1 918 #define SN_rsaesOaep "RSAES-OAEP" #define LN_rsaesOaep "rsaesOaep" #define NID_rsaesOaep 919 #define OBJ_rsaesOaep 1L, 2L, 840L, 113549L, 1L, 1L, 7L #define SN_dhpublicnumber "dhpublicnumber" #define LN_dhpublicnumber "X9.42 DH" #define NID_dhpublicnumber 920 #define OBJ_dhpublicnumber 1L, 2L, 840L, 10046L, 2L, 1L #define SN_brainpoolP160r1 "brainpoolP160r1" #define NID_brainpoolP160r1 921 #define OBJ_brainpoolP160r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 1L #define SN_brainpoolP160t1 "brainpoolP160t1" #define NID_brainpoolP160t1 922 #define OBJ_brainpoolP160t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 2L #define SN_brainpoolP192r1 "brainpoolP192r1" #define NID_brainpoolP192r1 923 #define OBJ_brainpoolP192r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 3L #define SN_brainpoolP192t1 "brainpoolP192t1" #define NID_brainpoolP192t1 924 #define OBJ_brainpoolP192t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 4L #define SN_brainpoolP224r1 "brainpoolP224r1" #define NID_brainpoolP224r1 925 #define OBJ_brainpoolP224r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 5L #define SN_brainpoolP224t1 "brainpoolP224t1" #define NID_brainpoolP224t1 926 #define OBJ_brainpoolP224t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 6L #define SN_brainpoolP256r1 "brainpoolP256r1" #define NID_brainpoolP256r1 927 #define OBJ_brainpoolP256r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 7L #define SN_brainpoolP256t1 "brainpoolP256t1" #define NID_brainpoolP256t1 928 #define OBJ_brainpoolP256t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 8L #define SN_brainpoolP320r1 "brainpoolP320r1" #define NID_brainpoolP320r1 929 #define OBJ_brainpoolP320r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 9L #define SN_brainpoolP320t1 "brainpoolP320t1" #define NID_brainpoolP320t1 930 #define OBJ_brainpoolP320t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 10L #define SN_brainpoolP384r1 "brainpoolP384r1" #define NID_brainpoolP384r1 931 #define OBJ_brainpoolP384r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 11L #define SN_brainpoolP384t1 "brainpoolP384t1" #define NID_brainpoolP384t1 932 #define OBJ_brainpoolP384t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 12L #define SN_brainpoolP512r1 "brainpoolP512r1" #define NID_brainpoolP512r1 933 #define OBJ_brainpoolP512r1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 13L #define SN_brainpoolP512t1 "brainpoolP512t1" #define NID_brainpoolP512t1 934 #define OBJ_brainpoolP512t1 1L, 3L, 36L, 3L, 3L, 2L, 8L, 1L, 1L, 14L #define SN_pSpecified "PSPECIFIED" #define LN_pSpecified "pSpecified" #define NID_pSpecified 935 #define OBJ_pSpecified 1L, 2L, 840L, 113549L, 1L, 1L, 9L #define SN_dhSinglePass_stdDH_sha1kdf_scheme "dhSinglePass-stdDH-sha1kdf-scheme" #define NID_dhSinglePass_stdDH_sha1kdf_scheme 936 #define OBJ_dhSinglePass_stdDH_sha1kdf_scheme \ 1L, 3L, 133L, 16L, 840L, 63L, 0L, 2L #define SN_dhSinglePass_stdDH_sha224kdf_scheme \ "dhSinglePass-stdDH-sha224kdf-scheme" #define NID_dhSinglePass_stdDH_sha224kdf_scheme 937 #define OBJ_dhSinglePass_stdDH_sha224kdf_scheme 1L, 3L, 132L, 1L, 11L, 0L #define SN_dhSinglePass_stdDH_sha256kdf_scheme \ "dhSinglePass-stdDH-sha256kdf-scheme" #define NID_dhSinglePass_stdDH_sha256kdf_scheme 938 #define OBJ_dhSinglePass_stdDH_sha256kdf_scheme 1L, 3L, 132L, 1L, 11L, 1L #define SN_dhSinglePass_stdDH_sha384kdf_scheme \ "dhSinglePass-stdDH-sha384kdf-scheme" #define NID_dhSinglePass_stdDH_sha384kdf_scheme 939 #define OBJ_dhSinglePass_stdDH_sha384kdf_scheme 1L, 3L, 132L, 1L, 11L, 2L #define SN_dhSinglePass_stdDH_sha512kdf_scheme \ "dhSinglePass-stdDH-sha512kdf-scheme" #define NID_dhSinglePass_stdDH_sha512kdf_scheme 940 #define OBJ_dhSinglePass_stdDH_sha512kdf_scheme 1L, 3L, 132L, 1L, 11L, 3L #define SN_dhSinglePass_cofactorDH_sha1kdf_scheme \ "dhSinglePass-cofactorDH-sha1kdf-scheme" #define NID_dhSinglePass_cofactorDH_sha1kdf_scheme 941 #define OBJ_dhSinglePass_cofactorDH_sha1kdf_scheme \ 1L, 3L, 133L, 16L, 840L, 63L, 0L, 3L #define SN_dhSinglePass_cofactorDH_sha224kdf_scheme \ "dhSinglePass-cofactorDH-sha224kdf-scheme" #define NID_dhSinglePass_cofactorDH_sha224kdf_scheme 942 #define OBJ_dhSinglePass_cofactorDH_sha224kdf_scheme 1L, 3L, 132L, 1L, 14L, 0L #define SN_dhSinglePass_cofactorDH_sha256kdf_scheme \ "dhSinglePass-cofactorDH-sha256kdf-scheme" #define NID_dhSinglePass_cofactorDH_sha256kdf_scheme 943 #define OBJ_dhSinglePass_cofactorDH_sha256kdf_scheme 1L, 3L, 132L, 1L, 14L, 1L #define SN_dhSinglePass_cofactorDH_sha384kdf_scheme \ "dhSinglePass-cofactorDH-sha384kdf-scheme" #define NID_dhSinglePass_cofactorDH_sha384kdf_scheme 944 #define OBJ_dhSinglePass_cofactorDH_sha384kdf_scheme 1L, 3L, 132L, 1L, 14L, 2L #define SN_dhSinglePass_cofactorDH_sha512kdf_scheme \ "dhSinglePass-cofactorDH-sha512kdf-scheme" #define NID_dhSinglePass_cofactorDH_sha512kdf_scheme 945 #define OBJ_dhSinglePass_cofactorDH_sha512kdf_scheme 1L, 3L, 132L, 1L, 14L, 3L #define SN_dh_std_kdf "dh-std-kdf" #define NID_dh_std_kdf 946 #define SN_dh_cofactor_kdf "dh-cofactor-kdf" #define NID_dh_cofactor_kdf 947 #define SN_X25519 "X25519" #define NID_X25519 948 #define OBJ_X25519 1L, 3L, 101L, 110L #define SN_ED25519 "ED25519" #define NID_ED25519 949 #define OBJ_ED25519 1L, 3L, 101L, 112L #define SN_chacha20_poly1305 "ChaCha20-Poly1305" #define LN_chacha20_poly1305 "chacha20-poly1305" #define NID_chacha20_poly1305 950 #define SN_kx_rsa "KxRSA" #define LN_kx_rsa "kx-rsa" #define NID_kx_rsa 951 #define SN_kx_ecdhe "KxECDHE" #define LN_kx_ecdhe "kx-ecdhe" #define NID_kx_ecdhe 952 #define SN_kx_psk "KxPSK" #define LN_kx_psk "kx-psk" #define NID_kx_psk 953 #define SN_auth_rsa "AuthRSA" #define LN_auth_rsa "auth-rsa" #define NID_auth_rsa 954 #define SN_auth_ecdsa "AuthECDSA" #define LN_auth_ecdsa "auth-ecdsa" #define NID_auth_ecdsa 955 #define SN_auth_psk "AuthPSK" #define LN_auth_psk "auth-psk" #define NID_auth_psk 956 #define SN_kx_any "KxANY" #define LN_kx_any "kx-any" #define NID_kx_any 957 #define SN_auth_any "AuthANY" #define LN_auth_any "auth-any" #define NID_auth_any 958 #define SN_ED448 "ED448" #define NID_ED448 960 #define OBJ_ED448 1L, 3L, 101L, 113L #define SN_X448 "X448" #define NID_X448 961 #define OBJ_X448 1L, 3L, 101L, 111L #define SN_sha512_256 "SHA512-256" #define LN_sha512_256 "sha512-256" #define NID_sha512_256 962 #define OBJ_sha512_256 2L, 16L, 840L, 1L, 101L, 3L, 4L, 2L, 6L #define SN_hkdf "HKDF" #define LN_hkdf "hkdf" #define NID_hkdf 963 #define SN_X25519Kyber768Draft00 "X25519Kyber768Draft00" #define NID_X25519Kyber768Draft00 964 #define SN_X25519MLKEM768 "X25519MLKEM768" #define NID_X25519MLKEM768 965 #if defined(__cplusplus) } /* extern C */ #endif #endif /* OPENSSL_HEADER_NID_H */ ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_obj.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_OBJ_H #define OPENSSL_HEADER_OBJ_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_bytestring.h" #include "CNIOBoringSSL_nid.h" // IWYU pragma: export #if defined(__cplusplus) extern "C" { #endif // The objects library deals with the registration and indexing of ASN.1 object // identifiers. These values are often written as a dotted sequence of numbers, // e.g. 1.2.840.113549.1.9.16.3.9. // // Internally, OpenSSL likes to deal with these values by numbering them with // numbers called "nids". OpenSSL has a large, built-in database of common // object identifiers and also has both short and long names for them. // // This library provides functions for translating between object identifiers, // nids, short names and long names. // // The nid values should not be used outside of a single process: they are not // stable identifiers. // Basic operations. // OBJ_dup returns a duplicate copy of |obj| or NULL on allocation failure. The // caller must call |ASN1_OBJECT_free| on the result to release it. OPENSSL_EXPORT ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *obj); // OBJ_cmp returns a value less than, equal to or greater than zero if |a| is // less than, equal to or greater than |b|, respectively. OPENSSL_EXPORT int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b); // OBJ_get0_data returns a pointer to the DER representation of |obj|. This is // the contents of the DER-encoded identifier, not including the tag and length. // If |obj| does not have an associated object identifier (i.e. it is a nid-only // value), this value is the empty string. OPENSSL_EXPORT const uint8_t *OBJ_get0_data(const ASN1_OBJECT *obj); // OBJ_length returns the length of the DER representation of |obj|. This is the // contents of the DER-encoded identifier, not including the tag and length. If // |obj| does not have an associated object identifier (i.e. it is a nid-only // value), this value is the empty string. OPENSSL_EXPORT size_t OBJ_length(const ASN1_OBJECT *obj); // Looking up nids. // OBJ_obj2nid returns the nid corresponding to |obj|, or |NID_undef| if no // such object is known. OPENSSL_EXPORT int OBJ_obj2nid(const ASN1_OBJECT *obj); // OBJ_cbs2nid returns the nid corresponding to the DER data in |cbs|, or // |NID_undef| if no such object is known. OPENSSL_EXPORT int OBJ_cbs2nid(const CBS *cbs); // OBJ_sn2nid returns the nid corresponding to |short_name|, or |NID_undef| if // no such short name is known. OPENSSL_EXPORT int OBJ_sn2nid(const char *short_name); // OBJ_ln2nid returns the nid corresponding to |long_name|, or |NID_undef| if // no such long name is known. OPENSSL_EXPORT int OBJ_ln2nid(const char *long_name); // OBJ_txt2nid returns the nid corresponding to |s|, which may be a short name, // long name, or an ASCII string containing a dotted sequence of numbers. It // returns the nid or NID_undef if unknown. OPENSSL_EXPORT int OBJ_txt2nid(const char *s); // Getting information about nids. // OBJ_nid2obj returns the |ASN1_OBJECT| corresponding to |nid|, or NULL if // |nid| is unknown. // // Although the output is not const, this function returns a static, immutable // |ASN1_OBJECT|. It is not necessary to release the object with // |ASN1_OBJECT_free|. // // However, functions like |X509_ALGOR_set0| expect to take ownership of a // possibly dynamically-allocated |ASN1_OBJECT|. |ASN1_OBJECT_free| is a no-op // for static |ASN1_OBJECT|s, so |OBJ_nid2obj| is compatible with such // functions. // // Callers are encouraged to store the result of this function in a const // pointer. However, if using functions like |X509_ALGOR_set0|, callers may use // a non-const pointer and manage ownership. OPENSSL_EXPORT ASN1_OBJECT *OBJ_nid2obj(int nid); // OBJ_get_undef returns the object for |NID_undef|. Prefer this function over // |OBJ_nid2obj| to avoid pulling in the full OID table. OPENSSL_EXPORT const ASN1_OBJECT *OBJ_get_undef(void); // OBJ_nid2sn returns the short name for |nid|, or NULL if |nid| is unknown. OPENSSL_EXPORT const char *OBJ_nid2sn(int nid); // OBJ_nid2ln returns the long name for |nid|, or NULL if |nid| is unknown. OPENSSL_EXPORT const char *OBJ_nid2ln(int nid); // OBJ_nid2cbb writes |nid| as an ASN.1 OBJECT IDENTIFIER to |out|. It returns // one on success or zero otherwise. OPENSSL_EXPORT int OBJ_nid2cbb(CBB *out, int nid); // Dealing with textual representations of object identifiers. // OBJ_txt2obj returns an ASN1_OBJECT for the textual representation in |s|. // If |dont_search_names| is zero, then |s| will be matched against the long // and short names of a known objects to find a match. Otherwise |s| must // contain an ASCII string with a dotted sequence of numbers. The resulting // object need not be previously known. It returns a freshly allocated // |ASN1_OBJECT| or NULL on error. OPENSSL_EXPORT ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names); // OBJ_obj2txt converts |obj| to a textual representation. If // |always_return_oid| is zero then |obj| will be matched against known objects // and the long (preferably) or short name will be used if found. Otherwise // |obj| will be converted into a dotted sequence of integers. If |out| is not // NULL, then at most |out_len| bytes of the textual form will be written // there. If |out_len| is at least one, then string written to |out| will // always be NUL terminated. It returns the number of characters that could // have been written, not including the final NUL, or -1 on error. OPENSSL_EXPORT int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, int always_return_oid); // Adding objects at runtime. // OBJ_create adds a known object and returns the NID of the new object, or // NID_undef on error. // // WARNING: This function modifies global state. The table cannot contain // duplicate OIDs, short names, or long names. If two callers in the same // address space add conflicting values, only one registration will take effect. // Avoid this function if possible. Instead, callers can process OIDs unknown to // BoringSSL by acting on the byte representation directly. See // |ASN1_OBJECT_create|, |OBJ_get0_data|, and |OBJ_length|. OPENSSL_EXPORT int OBJ_create(const char *oid, const char *short_name, const char *long_name); // Handling signature algorithm identifiers. // // Some NIDs (e.g. sha256WithRSAEncryption) specify both a digest algorithm and // a public key algorithm. The following functions map between pairs of digest // and public-key algorithms and the NIDs that specify their combination. // // Sometimes the combination NID leaves the digest unspecified (e.g. // rsassaPss). In these cases, the digest NID is |NID_undef|. // OBJ_find_sigid_algs finds the digest and public-key NIDs that correspond to // the signing algorithm |sign_nid|. If successful, it sets |*out_digest_nid| // and |*out_pkey_nid| and returns one. Otherwise it returns zero. Any of // |out_digest_nid| or |out_pkey_nid| can be NULL if the caller doesn't need // that output value. OPENSSL_EXPORT int OBJ_find_sigid_algs(int sign_nid, int *out_digest_nid, int *out_pkey_nid); // OBJ_find_sigid_by_algs finds the signature NID that corresponds to the // combination of |digest_nid| and |pkey_nid|. If success, it sets // |*out_sign_nid| and returns one. Otherwise it returns zero. The // |out_sign_nid| argument can be NULL if the caller only wishes to learn // whether the combination is valid. OPENSSL_EXPORT int OBJ_find_sigid_by_algs(int *out_sign_nid, int digest_nid, int pkey_nid); // Deprecated functions. typedef struct obj_name_st { int type; int alias; const char *name; const char *data; } OBJ_NAME; #define OBJ_NAME_TYPE_MD_METH 1 #define OBJ_NAME_TYPE_CIPHER_METH 2 // OBJ_NAME_do_all_sorted calls |callback| zero or more times, each time with // the name of a different primitive. If |type| is |OBJ_NAME_TYPE_MD_METH| then // the primitives will be hash functions, alternatively if |type| is // |OBJ_NAME_TYPE_CIPHER_METH| then the primitives will be ciphers or cipher // modes. // // This function is ill-specified and should never be used. OPENSSL_EXPORT void OBJ_NAME_do_all_sorted( int type, void (*callback)(const OBJ_NAME *, void *arg), void *arg); // OBJ_NAME_do_all calls |OBJ_NAME_do_all_sorted|. OPENSSL_EXPORT void OBJ_NAME_do_all(int type, void (*callback)(const OBJ_NAME *, void *arg), void *arg); // OBJ_cleanup does nothing. OPENSSL_EXPORT void OBJ_cleanup(void); #if defined(__cplusplus) } // extern C #endif #define OBJ_R_UNKNOWN_NID 100 #define OBJ_R_INVALID_OID_STRING 101 #endif // OPENSSL_HEADER_OBJ_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_obj_mac.h ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_nid.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_objects.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_obj.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_opensslconf.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #ifndef OPENSSL_HEADER_OPENSSLCONF_H #define OPENSSL_HEADER_OPENSSLCONF_H /* Keep in sync with the list in rust/bssl-sys/build.rs */ #define OPENSSL_NO_ASYNC #define OPENSSL_NO_BF #define OPENSSL_NO_BLAKE2 #define OPENSSL_NO_BUF_FREELISTS #define OPENSSL_NO_CAMELLIA #define OPENSSL_NO_CAPIENG #define OPENSSL_NO_CAST #define OPENSSL_NO_CMS #define OPENSSL_NO_COMP #define OPENSSL_NO_CT #define OPENSSL_NO_DANE #define OPENSSL_NO_DEPRECATED #define OPENSSL_NO_DGRAM #define OPENSSL_NO_DYNAMIC_ENGINE #define OPENSSL_NO_EC_NISTP_64_GCC_128 #define OPENSSL_NO_EC2M #define OPENSSL_NO_EGD #define OPENSSL_NO_ENGINE #define OPENSSL_NO_GMP #define OPENSSL_NO_GOST #define OPENSSL_NO_HEARTBEATS #define OPENSSL_NO_HW #define OPENSSL_NO_IDEA #define OPENSSL_NO_JPAKE #define OPENSSL_NO_KRB5 #define OPENSSL_NO_MD2 #define OPENSSL_NO_MDC2 #define OPENSSL_NO_OCB #define OPENSSL_NO_OCSP #define OPENSSL_NO_RC2 #define OPENSSL_NO_RC5 #define OPENSSL_NO_RFC3779 #define OPENSSL_NO_RIPEMD #define OPENSSL_NO_RMD160 #define OPENSSL_NO_SCTP #define OPENSSL_NO_SEED #define OPENSSL_NO_SM2 #define OPENSSL_NO_SM3 #define OPENSSL_NO_SM4 #define OPENSSL_NO_SRP #define OPENSSL_NO_SSL_TRACE #define OPENSSL_NO_SSL2 #define OPENSSL_NO_SSL3 #define OPENSSL_NO_SSL3_METHOD #define OPENSSL_NO_STATIC_ENGINE #define OPENSSL_NO_STORE #define OPENSSL_NO_WHIRLPOOL #endif // OPENSSL_HEADER_OPENSSLCONF_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_opensslv.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_crypto.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ossl_typ.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_base.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_pem.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_PEM_H #define OPENSSL_HEADER_PEM_H #include "CNIOBoringSSL_base64.h" #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_cipher.h" #include "CNIOBoringSSL_digest.h" #include "CNIOBoringSSL_evp.h" #include "CNIOBoringSSL_pkcs7.h" #include "CNIOBoringSSL_stack.h" #include "CNIOBoringSSL_x509.h" // For compatibility with open-iscsi, which assumes that it can get // |OPENSSL_malloc| from pem.h or err.h #include "CNIOBoringSSL_crypto.h" #ifdef __cplusplus extern "C" { #endif #define PEM_BUFSIZE 1024 #define PEM_STRING_X509_OLD "X509 CERTIFICATE" #define PEM_STRING_X509 "CERTIFICATE" #define PEM_STRING_X509_PAIR "CERTIFICATE PAIR" #define PEM_STRING_X509_TRUSTED "TRUSTED CERTIFICATE" #define PEM_STRING_X509_REQ_OLD "NEW CERTIFICATE REQUEST" #define PEM_STRING_X509_REQ "CERTIFICATE REQUEST" #define PEM_STRING_X509_CRL "X509 CRL" #define PEM_STRING_EVP_PKEY "ANY PRIVATE KEY" #define PEM_STRING_PUBLIC "PUBLIC KEY" #define PEM_STRING_RSA "RSA PRIVATE KEY" #define PEM_STRING_RSA_PUBLIC "RSA PUBLIC KEY" #define PEM_STRING_DSA "DSA PRIVATE KEY" #define PEM_STRING_DSA_PUBLIC "DSA PUBLIC KEY" #define PEM_STRING_EC "EC PRIVATE KEY" #define PEM_STRING_PKCS7 "PKCS7" #define PEM_STRING_PKCS7_SIGNED "PKCS #7 SIGNED DATA" #define PEM_STRING_PKCS8 "ENCRYPTED PRIVATE KEY" #define PEM_STRING_PKCS8INF "PRIVATE KEY" #define PEM_STRING_DHPARAMS "DH PARAMETERS" #define PEM_STRING_SSL_SESSION "SSL SESSION PARAMETERS" #define PEM_STRING_DSAPARAMS "DSA PARAMETERS" #define PEM_STRING_ECDSA_PUBLIC "ECDSA PUBLIC KEY" #define PEM_STRING_ECPRIVATEKEY "EC PRIVATE KEY" #define PEM_STRING_CMS "CMS" // enc_type is one off #define PEM_TYPE_ENCRYPTED 10 #define PEM_TYPE_MIC_ONLY 20 #define PEM_TYPE_MIC_CLEAR 30 #define PEM_TYPE_CLEAR 40 // These macros make the PEM_read/PEM_write functions easier to maintain and // write. Now they are all implemented with either: // IMPLEMENT_PEM_rw(...) or IMPLEMENT_PEM_rw_cb(...) #define IMPLEMENT_PEM_read_fp(name, type, str, asn1) \ static void *pem_read_##name##_d2i(void **x, const unsigned char **inp, \ long len) { \ return d2i_##asn1((type **)x, inp, len); \ } \ OPENSSL_EXPORT type *PEM_read_##name(FILE *fp, type **x, \ pem_password_cb *cb, void *u) { \ return (type *)PEM_ASN1_read(pem_read_##name##_d2i, str, fp, (void **)x, \ cb, u); \ } #define IMPLEMENT_PEM_write_fp(name, type, str, asn1) \ static int pem_write_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_##name(FILE *fp, type *x) { \ return PEM_ASN1_write(pem_write_##name##_i2d, str, fp, x, NULL, NULL, 0, \ NULL, NULL); \ } #define IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) \ static int pem_write_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((const type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_##name(FILE *fp, const type *x) { \ return PEM_ASN1_write(pem_write_##name##_i2d, str, fp, (void *)x, NULL, \ NULL, 0, NULL, NULL); \ } #define IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) \ static int pem_write_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_##name( \ FILE *fp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u) { \ return PEM_ASN1_write(pem_write_##name##_i2d, str, fp, x, enc, pass, \ pass_len, cb, u); \ } #define IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) \ static int pem_write_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((const type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_##name( \ FILE *fp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u) { \ return PEM_ASN1_write(pem_write_##name##_i2d, str, fp, x, enc, pass, \ pass_len, cb, u); \ } #define IMPLEMENT_PEM_read_bio(name, type, str, asn1) \ static void *pem_read_bio_##name##_d2i(void **x, const unsigned char **inp, \ long len) { \ return d2i_##asn1((type **)x, inp, len); \ } \ OPENSSL_EXPORT type *PEM_read_bio_##name(BIO *bp, type **x, \ pem_password_cb *cb, void *u) { \ return (type *)PEM_ASN1_read_bio(pem_read_bio_##name##_d2i, str, bp, \ (void **)x, cb, u); \ } #define IMPLEMENT_PEM_write_bio(name, type, str, asn1) \ static int pem_write_bio_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_bio_##name(BIO *bp, type *x) { \ return PEM_ASN1_write_bio(pem_write_bio_##name##_i2d, str, bp, x, NULL, \ NULL, 0, NULL, NULL); \ } #define IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \ static int pem_write_bio_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((const type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_bio_##name(BIO *bp, const type *x) { \ return PEM_ASN1_write_bio(pem_write_bio_##name##_i2d, str, bp, (void *)x, \ NULL, NULL, 0, NULL, NULL); \ } #define IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \ static int pem_write_bio_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_bio_##name( \ BIO *bp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u) { \ return PEM_ASN1_write_bio(pem_write_bio_##name##_i2d, str, bp, x, enc, \ pass, pass_len, cb, u); \ } #define IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \ static int pem_write_bio_##name##_i2d(const void *x, unsigned char **outp) { \ return i2d_##asn1((const type *)x, outp); \ } \ OPENSSL_EXPORT int PEM_write_bio_##name( \ BIO *bp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u) { \ return PEM_ASN1_write_bio(pem_write_bio_##name##_i2d, str, bp, (void *)x, \ enc, pass, pass_len, cb, u); \ } #define IMPLEMENT_PEM_write(name, type, str, asn1) \ IMPLEMENT_PEM_write_bio(name, type, str, asn1) \ IMPLEMENT_PEM_write_fp(name, type, str, asn1) #define IMPLEMENT_PEM_write_const(name, type, str, asn1) \ IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \ IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) #define IMPLEMENT_PEM_write_cb(name, type, str, asn1) \ IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \ IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) #define IMPLEMENT_PEM_write_cb_const(name, type, str, asn1) \ IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \ IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) #define IMPLEMENT_PEM_read(name, type, str, asn1) \ IMPLEMENT_PEM_read_bio(name, type, str, asn1) \ IMPLEMENT_PEM_read_fp(name, type, str, asn1) #define IMPLEMENT_PEM_rw(name, type, str, asn1) \ IMPLEMENT_PEM_read(name, type, str, asn1) \ IMPLEMENT_PEM_write(name, type, str, asn1) #define IMPLEMENT_PEM_rw_const(name, type, str, asn1) \ IMPLEMENT_PEM_read(name, type, str, asn1) \ IMPLEMENT_PEM_write_const(name, type, str, asn1) #define IMPLEMENT_PEM_rw_cb(name, type, str, asn1) \ IMPLEMENT_PEM_read(name, type, str, asn1) \ IMPLEMENT_PEM_write_cb(name, type, str, asn1) // These are the same except they are for the declarations #define DECLARE_PEM_read_fp(name, type) \ OPENSSL_EXPORT type *PEM_read_##name(FILE *fp, type **x, \ pem_password_cb *cb, void *u); #define DECLARE_PEM_write_fp(name, type) \ OPENSSL_EXPORT int PEM_write_##name(FILE *fp, type *x); #define DECLARE_PEM_write_fp_const(name, type) \ OPENSSL_EXPORT int PEM_write_##name(FILE *fp, const type *x); #define DECLARE_PEM_write_cb_fp(name, type) \ OPENSSL_EXPORT int PEM_write_##name( \ FILE *fp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u); #define DECLARE_PEM_read_bio(name, type) \ OPENSSL_EXPORT type *PEM_read_bio_##name(BIO *bp, type **x, \ pem_password_cb *cb, void *u); #define DECLARE_PEM_write_bio(name, type) \ OPENSSL_EXPORT int PEM_write_bio_##name(BIO *bp, type *x); #define DECLARE_PEM_write_bio_const(name, type) \ OPENSSL_EXPORT int PEM_write_bio_##name(BIO *bp, const type *x); #define DECLARE_PEM_write_cb_bio(name, type) \ OPENSSL_EXPORT int PEM_write_bio_##name( \ BIO *bp, type *x, const EVP_CIPHER *enc, const unsigned char *pass, \ int pass_len, pem_password_cb *cb, void *u); #define DECLARE_PEM_write(name, type) \ DECLARE_PEM_write_bio(name, type) \ DECLARE_PEM_write_fp(name, type) #define DECLARE_PEM_write_const(name, type) \ DECLARE_PEM_write_bio_const(name, type) \ DECLARE_PEM_write_fp_const(name, type) #define DECLARE_PEM_write_cb(name, type) \ DECLARE_PEM_write_cb_bio(name, type) \ DECLARE_PEM_write_cb_fp(name, type) #define DECLARE_PEM_read(name, type) \ DECLARE_PEM_read_bio(name, type) \ DECLARE_PEM_read_fp(name, type) #define DECLARE_PEM_rw(name, type) \ DECLARE_PEM_read(name, type) \ DECLARE_PEM_write(name, type) #define DECLARE_PEM_rw_const(name, type) \ DECLARE_PEM_read(name, type) \ DECLARE_PEM_write_const(name, type) #define DECLARE_PEM_rw_cb(name, type) \ DECLARE_PEM_read(name, type) \ DECLARE_PEM_write_cb(name, type) // "userdata": new with OpenSSL 0.9.4 typedef int pem_password_cb(char *buf, int size, int rwflag, void *userdata); // PEM_read_bio reads from |bp|, until the next PEM block. If one is found, it // returns one and sets |*name|, |*header|, and |*data| to newly-allocated // buffers containing the PEM type, the header block, and the decoded data, // respectively. |*name| and |*header| are NUL-terminated C strings, while // |*data| has |*len| bytes. The caller must release each of |*name|, |*header|, // and |*data| with |OPENSSL_free| when done. If no PEM block is found, this // function returns zero and pushes |PEM_R_NO_START_LINE| to the error queue. If // one is found, but there is an error decoding it, it returns zero and pushes // some other error to the error queue. OPENSSL_EXPORT int PEM_read_bio(BIO *bp, char **name, char **header, unsigned char **data, long *len); // PEM_write_bio writes a PEM block to |bp|, containing |len| bytes from |data| // as data. |name| and |hdr| are NUL-terminated C strings containing the PEM // type and header block, respectively. This function returns zero on error and // the number of bytes written on success. OPENSSL_EXPORT int PEM_write_bio(BIO *bp, const char *name, const char *hdr, const unsigned char *data, long len); OPENSSL_EXPORT int PEM_bytes_read_bio(unsigned char **pdata, long *plen, char **pnm, const char *name, BIO *bp, pem_password_cb *cb, void *u); OPENSSL_EXPORT void *PEM_ASN1_read_bio(d2i_of_void *d2i, const char *name, BIO *bp, void **x, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp, void *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *cb, void *u); // PEM_X509_INFO_read_bio reads PEM blocks from |bp| and decodes any // certificates, CRLs, and private keys found. It returns a // |STACK_OF(X509_INFO)| structure containing the results, or NULL on error. // // If |sk| is NULL, the result on success will be a newly-allocated // |STACK_OF(X509_INFO)| structure which should be released with // |sk_X509_INFO_pop_free| and |X509_INFO_free| when done. // // If |sk| is non-NULL, it appends the results to |sk| instead and returns |sk| // on success. In this case, the caller retains ownership of |sk| in both // success and failure. // // This function will decrypt any encrypted certificates in |bp|, using |cb|, // but it will not decrypt encrypted private keys. Encrypted private keys are // instead represented as placeholder |X509_INFO| objects with an empty |x_pkey| // field. This allows this function to be used with inputs with unencrypted // certificates, but encrypted passwords, without knowing the password. However, // it also means that this function cannot be used to decrypt the private key // when the password is known. // // WARNING: If the input contains "TRUSTED CERTIFICATE" PEM blocks, this // function parses auxiliary properties as in |d2i_X509_AUX|. Passing untrusted // input to this function allows an attacker to influence those properties. See // |d2i_X509_AUX| for details. OPENSSL_EXPORT STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio( BIO *bp, STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u); // PEM_X509_INFO_read behaves like |PEM_X509_INFO_read_bio| but reads from a // |FILE|. OPENSSL_EXPORT STACK_OF(X509_INFO) *PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_read(FILE *fp, char **name, char **header, unsigned char **data, long *len); OPENSSL_EXPORT int PEM_write(FILE *fp, const char *name, const char *hdr, const unsigned char *data, long len); OPENSSL_EXPORT void *PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp, void *x, const EVP_CIPHER *enc, const unsigned char *pass, int pass_len, pem_password_cb *callback, void *u); // PEM_def_callback treats |userdata| as a string and copies it into |buf|, // assuming its |size| is sufficient. Returns the length of the string, or -1 on // error. Error cases the buffer being too small, or |buf| and |userdata| being // NULL. Note that this is different from OpenSSL, which prompts for a password. OPENSSL_EXPORT int PEM_def_callback(char *buf, int size, int rwflag, void *userdata); DECLARE_PEM_rw(X509, X509) // TODO(crbug.com/boringssl/426): When documenting these, copy the warning // about auxiliary properties from |PEM_X509_INFO_read_bio|. DECLARE_PEM_rw(X509_AUX, X509) DECLARE_PEM_rw(X509_REQ, X509_REQ) DECLARE_PEM_write(X509_REQ_NEW, X509_REQ) DECLARE_PEM_rw(X509_CRL, X509_CRL) DECLARE_PEM_rw(PKCS7, PKCS7) DECLARE_PEM_rw(PKCS8, X509_SIG) DECLARE_PEM_rw(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO) DECLARE_PEM_rw_cb(RSAPrivateKey, RSA) DECLARE_PEM_rw_const(RSAPublicKey, RSA) DECLARE_PEM_rw(RSA_PUBKEY, RSA) #ifndef OPENSSL_NO_DSA DECLARE_PEM_rw_cb(DSAPrivateKey, DSA) DECLARE_PEM_rw(DSA_PUBKEY, DSA) DECLARE_PEM_rw_const(DSAparams, DSA) #endif DECLARE_PEM_rw_cb(ECPrivateKey, EC_KEY) DECLARE_PEM_rw(EC_PUBKEY, EC_KEY) DECLARE_PEM_rw_const(DHparams, DH) DECLARE_PEM_rw_cb(PrivateKey, EVP_PKEY) DECLARE_PEM_rw(PUBKEY, EVP_PKEY) OPENSSL_EXPORT int PEM_write_bio_PKCS8PrivateKey_nid(BIO *bp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_write_bio_PKCS8PrivateKey(BIO *bp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT int i2d_PKCS8PrivateKey_bio(BIO *bp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT int i2d_PKCS8PrivateKey_nid_bio(BIO *bp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, void *u); OPENSSL_EXPORT int i2d_PKCS8PrivateKey_fp(FILE *fp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT int i2d_PKCS8PrivateKey_nid_fp(FILE *fp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_write_PKCS8PrivateKey_nid(FILE *fp, const EVP_PKEY *x, int nid, const char *pass, int pass_len, pem_password_cb *cb, void *u); OPENSSL_EXPORT EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_password_cb *cb, void *u); OPENSSL_EXPORT int PEM_write_PKCS8PrivateKey(FILE *fp, const EVP_PKEY *x, const EVP_CIPHER *enc, const char *pass, int pass_len, pem_password_cb *cd, void *u); #ifdef __cplusplus } // extern "C" #endif #define PEM_R_BAD_BASE64_DECODE 100 #define PEM_R_BAD_DECRYPT 101 #define PEM_R_BAD_END_LINE 102 #define PEM_R_BAD_IV_CHARS 103 #define PEM_R_BAD_PASSWORD_READ 104 #define PEM_R_CIPHER_IS_NULL 105 #define PEM_R_ERROR_CONVERTING_PRIVATE_KEY 106 #define PEM_R_NOT_DEK_INFO 107 #define PEM_R_NOT_ENCRYPTED 108 #define PEM_R_NOT_PROC_TYPE 109 #define PEM_R_NO_START_LINE 110 #define PEM_R_READ_KEY 111 #define PEM_R_SHORT_HEADER 112 #define PEM_R_UNSUPPORTED_CIPHER 113 #define PEM_R_UNSUPPORTED_ENCRYPTION 114 #define PEM_R_UNSUPPORTED_PROC_TYPE_VERSION 115 #endif // OPENSSL_HEADER_PEM_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_pkcs12.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_pkcs8.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_pkcs7.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_PKCS7_H #define OPENSSL_HEADER_PKCS7_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_stack.h" #if defined(__cplusplus) extern "C" { #endif // PKCS#7. // // This library contains functions for extracting information from PKCS#7 // structures (RFC 2315). DECLARE_STACK_OF(CRYPTO_BUFFER) DECLARE_STACK_OF(X509) DECLARE_STACK_OF(X509_CRL) // PKCS7_get_raw_certificates parses a PKCS#7, SignedData structure from |cbs| // and appends the included certificates to |out_certs|. It returns one on // success and zero on error. |cbs| is advanced passed the structure. // // Note that a SignedData structure may contain no certificates, in which case // this function succeeds but does not append any certificates. Additionally, // certificates in SignedData structures are unordered. Callers should not // assume a particular order in |*out_certs| and may need to search for matches // or run path-building algorithms. OPENSSL_EXPORT int PKCS7_get_raw_certificates( STACK_OF(CRYPTO_BUFFER) *out_certs, CBS *cbs, CRYPTO_BUFFER_POOL *pool); // PKCS7_get_certificates behaves like |PKCS7_get_raw_certificates| but parses // them into |X509| objects. OPENSSL_EXPORT int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs); // PKCS7_bundle_raw_certificates appends a PKCS#7, SignedData structure // containing |certs| to |out|. It returns one on success and zero on error. // Note that certificates in SignedData structures are unordered. The order in // |certs| will not be preserved. OPENSSL_EXPORT int PKCS7_bundle_raw_certificates( CBB *out, const STACK_OF(CRYPTO_BUFFER) *certs); // PKCS7_bundle_certificates behaves like |PKCS7_bundle_raw_certificates| but // takes |X509| objects as input. OPENSSL_EXPORT int PKCS7_bundle_certificates( CBB *out, const STACK_OF(X509) *certs); // PKCS7_get_CRLs parses a PKCS#7, SignedData structure from |cbs| and appends // the included CRLs to |out_crls|. It returns one on success and zero on error. // |cbs| is advanced passed the structure. // // Note that a SignedData structure may contain no CRLs, in which case this // function succeeds but does not append any CRLs. Additionally, CRLs in // SignedData structures are unordered. Callers should not assume an order in // |*out_crls| and may need to search for matches. OPENSSL_EXPORT int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs); // PKCS7_bundle_CRLs appends a PKCS#7, SignedData structure containing // |crls| to |out|. It returns one on success and zero on error. Note that CRLs // in SignedData structures are unordered. The order in |crls| will not be // preserved. OPENSSL_EXPORT int PKCS7_bundle_CRLs(CBB *out, const STACK_OF(X509_CRL) *crls); // PKCS7_get_PEM_certificates reads a PEM-encoded, PKCS#7, SignedData structure // from |pem_bio| and appends the included certificates to |out_certs|. It // returns one on success and zero on error. // // Note that a SignedData structure may contain no certificates, in which case // this function succeeds but does not append any certificates. Additionally, // certificates in SignedData structures are unordered. Callers should not // assume a particular order in |*out_certs| and may need to search for matches // or run path-building algorithms. OPENSSL_EXPORT int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, BIO *pem_bio); // PKCS7_get_PEM_CRLs reads a PEM-encoded, PKCS#7, SignedData structure from // |pem_bio| and appends the included CRLs to |out_crls|. It returns one on // success and zero on error. // // Note that a SignedData structure may contain no CRLs, in which case this // function succeeds but does not append any CRLs. Additionally, CRLs in // SignedData structures are unordered. Callers should not assume an order in // |*out_crls| and may need to search for matches. OPENSSL_EXPORT int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, BIO *pem_bio); // Deprecated functions. // // These functions are a compatibility layer over a subset of OpenSSL's PKCS#7 // API. It intentionally does not implement the whole thing, only the minimum // needed to build cryptography.io. typedef struct { STACK_OF(X509) *cert; STACK_OF(X509_CRL) *crl; } PKCS7_SIGNED; typedef struct { STACK_OF(X509) *cert; STACK_OF(X509_CRL) *crl; } PKCS7_SIGN_ENVELOPE; typedef void PKCS7_ENVELOPE; typedef void PKCS7_DIGEST; typedef void PKCS7_ENCRYPT; typedef void PKCS7_SIGNER_INFO; typedef struct { uint8_t *ber_bytes; size_t ber_len; // Unlike OpenSSL, the following fields are immutable. They filled in when the // object is parsed and ignored in serialization. ASN1_OBJECT *type; union { char *ptr; ASN1_OCTET_STRING *data; PKCS7_SIGNED *sign; PKCS7_ENVELOPE *enveloped; PKCS7_SIGN_ENVELOPE *signed_and_enveloped; PKCS7_DIGEST *digest; PKCS7_ENCRYPT *encrypted; ASN1_TYPE *other; } d; } PKCS7; // d2i_PKCS7 parses a BER-encoded, PKCS#7 signed data ContentInfo structure from // |len| bytes at |*inp|, as described in |d2i_SAMPLE|. OPENSSL_EXPORT PKCS7 *d2i_PKCS7(PKCS7 **out, const uint8_t **inp, size_t len); // d2i_PKCS7_bio behaves like |d2i_PKCS7| but reads the input from |bio|. If // the length of the object is indefinite the full contents of |bio| are read. // // If the function fails then some unknown amount of data may have been read // from |bio|. OPENSSL_EXPORT PKCS7 *d2i_PKCS7_bio(BIO *bio, PKCS7 **out); // i2d_PKCS7 marshals |p7| as a DER-encoded PKCS#7 ContentInfo structure, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_PKCS7(const PKCS7 *p7, uint8_t **out); // i2d_PKCS7_bio writes |p7| to |bio|. It returns one on success and zero on // error. OPENSSL_EXPORT int i2d_PKCS7_bio(BIO *bio, const PKCS7 *p7); // PKCS7_free releases memory associated with |p7|. OPENSSL_EXPORT void PKCS7_free(PKCS7 *p7); // PKCS7_type_is_data returns zero. OPENSSL_EXPORT int PKCS7_type_is_data(const PKCS7 *p7); // PKCS7_type_is_digest returns zero. OPENSSL_EXPORT int PKCS7_type_is_digest(const PKCS7 *p7); // PKCS7_type_is_encrypted returns zero. OPENSSL_EXPORT int PKCS7_type_is_encrypted(const PKCS7 *p7); // PKCS7_type_is_enveloped returns zero. OPENSSL_EXPORT int PKCS7_type_is_enveloped(const PKCS7 *p7); // PKCS7_type_is_signed returns one. (We only supporte signed data // ContentInfos.) OPENSSL_EXPORT int PKCS7_type_is_signed(const PKCS7 *p7); // PKCS7_type_is_signedAndEnveloped returns zero. OPENSSL_EXPORT int PKCS7_type_is_signedAndEnveloped(const PKCS7 *p7); // PKCS7_DETACHED indicates that the PKCS#7 file specifies its data externally. #define PKCS7_DETACHED 0x40 // The following flags cause |PKCS7_sign| to fail. #define PKCS7_TEXT 0x1 #define PKCS7_NOCERTS 0x2 #define PKCS7_NOSIGS 0x4 #define PKCS7_NOCHAIN 0x8 #define PKCS7_NOINTERN 0x10 #define PKCS7_NOVERIFY 0x20 #define PKCS7_BINARY 0x80 #define PKCS7_NOATTR 0x100 #define PKCS7_NOSMIMECAP 0x200 #define PKCS7_STREAM 0x1000 #define PKCS7_PARTIAL 0x4000 // PKCS7_sign can operate in two modes to provide some backwards compatibility: // // The first mode assembles |certs| into a PKCS#7 signed data ContentInfo with // external data and no signatures. It returns a newly-allocated |PKCS7| on // success or NULL on error. |sign_cert| and |pkey| must be NULL. |data| is // ignored. |flags| must be equal to |PKCS7_DETACHED|. Additionally, // certificates in SignedData structures are unordered. The order of |certs| // will not be preserved. // // The second mode generates a detached RSA SHA-256 signature of |data| using // |pkey| and produces a PKCS#7 SignedData structure containing it. |certs| // must be NULL and |flags| must be exactly |PKCS7_NOATTR | PKCS7_BINARY | // PKCS7_NOCERTS | PKCS7_DETACHED|. // // Note this function only implements a subset of the corresponding OpenSSL // function. It is provided for backwards compatibility only. OPENSSL_EXPORT PKCS7 *PKCS7_sign(X509 *sign_cert, EVP_PKEY *pkey, STACK_OF(X509) *certs, BIO *data, int flags); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(PKCS7, PKCS7_free) BSSL_NAMESPACE_END } // extern C++ #endif #define PKCS7_R_BAD_PKCS7_VERSION 100 #define PKCS7_R_NOT_PKCS7_SIGNED_DATA 101 #define PKCS7_R_NO_CERTIFICATES_INCLUDED 102 #define PKCS7_R_NO_CRLS_INCLUDED 103 #endif // OPENSSL_HEADER_PKCS7_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_pkcs8.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_PKCS8_H #define OPENSSL_HEADER_PKCS8_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_x509.h" #if defined(__cplusplus) extern "C" { #endif // PKCS8_encrypt serializes and encrypts a PKCS8_PRIV_KEY_INFO with PBES1 or // PBES2 as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, // pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, defined in PKCS // #12, and PBES2, are supported. PBES2 is selected by setting |cipher| and // passing -1 for |pbe_nid|. Otherwise, PBES1 is used and |cipher| is ignored. // // |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this // will be converted to a raw byte string as specified in B.1 of PKCS #12. If // |pass| is NULL, it will be encoded as the empty byte string rather than two // zero bytes, the PKCS #12 encoding of the empty string. // // If |salt| is NULL, a random salt of |salt_len| bytes is generated. If // |salt_len| is zero, a default salt length is used instead. // // The resulting structure is stored in an |X509_SIG| which must be freed by the // caller. OPENSSL_EXPORT X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, int pass_len, const uint8_t *salt, size_t salt_len, int iterations, PKCS8_PRIV_KEY_INFO *p8inf); // PKCS8_marshal_encrypted_private_key behaves like |PKCS8_encrypt| but encrypts // an |EVP_PKEY| and writes the serialized EncryptedPrivateKeyInfo to |out|. It // returns one on success and zero on error. OPENSSL_EXPORT int PKCS8_marshal_encrypted_private_key( CBB *out, int pbe_nid, const EVP_CIPHER *cipher, const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, int iterations, const EVP_PKEY *pkey); // PKCS8_decrypt decrypts and decodes a PKCS8_PRIV_KEY_INFO with PBES1 or PBES2 // as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, // pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, and PBES2, // defined in PKCS #12, are supported. // // |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this // will be converted to a raw byte string as specified in B.1 of PKCS #12. If // |pass| is NULL, it will be encoded as the empty byte string rather than two // zero bytes, the PKCS #12 encoding of the empty string. // // The resulting structure must be freed by the caller. OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, int pass_len); // PKCS8_parse_encrypted_private_key behaves like |PKCS8_decrypt| but it parses // the EncryptedPrivateKeyInfo structure from |cbs| and advances |cbs|. It // returns a newly-allocated |EVP_PKEY| on success and zero on error. OPENSSL_EXPORT EVP_PKEY *PKCS8_parse_encrypted_private_key(CBS *cbs, const char *pass, size_t pass_len); // PKCS12_get_key_and_certs parses a PKCS#12 structure from |in|, authenticates // and decrypts it using |password|, sets |*out_key| to the included private // key and appends the included certificates to |out_certs|. It returns one on // success and zero on error. The caller takes ownership of the outputs. // Any friendlyName attributes (RFC 2985) in the PKCS#12 structure will be // returned on the |X509| objects as aliases. See also |X509_alias_get0|. OPENSSL_EXPORT int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, CBS *in, const char *password); // Deprecated functions. // PKCS12_PBE_add does nothing. It exists for compatibility with OpenSSL. OPENSSL_EXPORT void PKCS12_PBE_add(void); // d2i_PKCS12 is a dummy function that copies |*ber_bytes| into a // |PKCS12| structure. The |out_p12| argument should be NULL(✝). On exit, // |*ber_bytes| will be advanced by |ber_len|. It returns a fresh |PKCS12| // structure or NULL on error. // // Note: unlike other d2i functions, |d2i_PKCS12| will always consume |ber_len| // bytes. // // (✝) If |out_p12| is not NULL and the function is successful, |*out_p12| will // be freed if not NULL itself and the result will be written to |*out_p12|. // New code should not depend on this. OPENSSL_EXPORT PKCS12 *d2i_PKCS12(PKCS12 **out_p12, const uint8_t **ber_bytes, size_t ber_len); // d2i_PKCS12_bio acts like |d2i_PKCS12| but reads from a |BIO|. OPENSSL_EXPORT PKCS12* d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12); // d2i_PKCS12_fp acts like |d2i_PKCS12| but reads from a |FILE|. OPENSSL_EXPORT PKCS12* d2i_PKCS12_fp(FILE *fp, PKCS12 **out_p12); // i2d_PKCS12 is a dummy function which copies the contents of |p12|. If |out| // is not NULL then the result is written to |*out| and |*out| is advanced just // past the output. It returns the number of bytes in the result, whether // written or not, or a negative value on error. OPENSSL_EXPORT int i2d_PKCS12(const PKCS12 *p12, uint8_t **out); // i2d_PKCS12_bio writes the contents of |p12| to |bio|. It returns one on // success and zero on error. OPENSSL_EXPORT int i2d_PKCS12_bio(BIO *bio, const PKCS12 *p12); // i2d_PKCS12_fp writes the contents of |p12| to |fp|. It returns one on // success and zero on error. OPENSSL_EXPORT int i2d_PKCS12_fp(FILE *fp, const PKCS12 *p12); // PKCS12_parse calls |PKCS12_get_key_and_certs| on the ASN.1 data stored in // |p12|. The |out_pkey| and |out_cert| arguments must not be NULL and, on // successful exit, the private key and matching certificate will be stored in // them. The |out_ca_certs| argument may be NULL but, if not, then any extra // certificates will be appended to |*out_ca_certs|. If |*out_ca_certs| is NULL // then it will be set to a freshly allocated stack containing the extra certs. // // Note if |p12| does not contain a private key, both |*out_pkey| and // |*out_cert| will be set to NULL and all certificates will be returned via // |*out_ca_certs|. Also note this function differs from OpenSSL in that extra // certificates are returned in the order they appear in the file. OpenSSL 1.1.1 // returns them in reverse order, but this will be fixed in OpenSSL 3.0. // // It returns one on success and zero on error. // // Use |PKCS12_get_key_and_certs| instead. OPENSSL_EXPORT int PKCS12_parse(const PKCS12 *p12, const char *password, EVP_PKEY **out_pkey, X509 **out_cert, STACK_OF(X509) **out_ca_certs); // PKCS12_verify_mac returns one if |password| is a valid password for |p12| // and zero otherwise. Since |PKCS12_parse| doesn't take a length parameter, // it's not actually possible to use a non-NUL-terminated password to actually // get anything from a |PKCS12|. Thus |password| and |password_len| may be // |NULL| and zero, respectively, or else |password_len| may be -1, or else // |password[password_len]| must be zero and no other NUL bytes may appear in // |password|. If the |password_len| checks fail, zero is returned // immediately. OPENSSL_EXPORT int PKCS12_verify_mac(const PKCS12 *p12, const char *password, int password_len); // PKCS12_DEFAULT_ITER is the default number of KDF iterations used when // creating a |PKCS12| object. #define PKCS12_DEFAULT_ITER 2048 // PKCS12_create returns a newly-allocated |PKCS12| object containing |pkey|, // |cert|, and |chain|, encrypted with the specified password. |name|, if not // NULL, specifies a user-friendly name to encode with the key and // certificate. The key and certificates are encrypted with |key_nid| and // |cert_nid|, respectively, using |iterations| iterations in the // KDF. |mac_iterations| is the number of iterations when deriving the MAC // key. |key_type| must be zero. |pkey| and |cert| may be NULL to omit them. // // Each of |key_nid|, |cert_nid|, |iterations|, and |mac_iterations| may be zero // to use defaults, which are |NID_pbe_WithSHA1And3_Key_TripleDES_CBC|, // |NID_pbe_WithSHA1And40BitRC2_CBC|, |PKCS12_DEFAULT_ITER|, and one, // respectively. // // |key_nid| or |cert_nid| may also be -1 to disable encryption of the key or // certificate, respectively. This option is not recommended and is only // implemented for compatibility with external packages. Note the output still // requires a password for the MAC. Unencrypted keys in PKCS#12 are also not // widely supported and may not open in other implementations. // // If |cert| or |chain| have associated aliases (see |X509_alias_set1|), they // will be included in the output as friendlyName attributes (RFC 2985). It is // an error to specify both an alias on |cert| and a non-NULL |name| // parameter. OPENSSL_EXPORT PKCS12 *PKCS12_create(const char *password, const char *name, const EVP_PKEY *pkey, X509 *cert, const STACK_OF(X509) *chain, int key_nid, int cert_nid, int iterations, int mac_iterations, int key_type); // PKCS12_free frees |p12| and its contents. OPENSSL_EXPORT void PKCS12_free(PKCS12 *p12); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(PKCS12, PKCS12_free) BORINGSSL_MAKE_DELETER(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO_free) BSSL_NAMESPACE_END } // extern C++ #endif #define PKCS8_R_BAD_PKCS12_DATA 100 #define PKCS8_R_BAD_PKCS12_VERSION 101 #define PKCS8_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER 102 #define PKCS8_R_CRYPT_ERROR 103 #define PKCS8_R_DECODE_ERROR 104 #define PKCS8_R_ENCODE_ERROR 105 #define PKCS8_R_ENCRYPT_ERROR 106 #define PKCS8_R_ERROR_SETTING_CIPHER_PARAMS 107 #define PKCS8_R_INCORRECT_PASSWORD 108 #define PKCS8_R_KEYGEN_FAILURE 109 #define PKCS8_R_KEY_GEN_ERROR 110 #define PKCS8_R_METHOD_NOT_SUPPORTED 111 #define PKCS8_R_MISSING_MAC 112 #define PKCS8_R_MULTIPLE_PRIVATE_KEYS_IN_PKCS12 113 #define PKCS8_R_PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED 114 #define PKCS8_R_PKCS12_TOO_DEEPLY_NESTED 115 #define PKCS8_R_PRIVATE_KEY_DECODE_ERROR 116 #define PKCS8_R_PRIVATE_KEY_ENCODE_ERROR 117 #define PKCS8_R_TOO_LONG 118 #define PKCS8_R_UNKNOWN_ALGORITHM 119 #define PKCS8_R_UNKNOWN_CIPHER 120 #define PKCS8_R_UNKNOWN_CIPHER_ALGORITHM 121 #define PKCS8_R_UNKNOWN_DIGEST 122 #define PKCS8_R_UNKNOWN_HASH 123 #define PKCS8_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM 124 #define PKCS8_R_UNSUPPORTED_KEYLENGTH 125 #define PKCS8_R_UNSUPPORTED_SALT_TYPE 126 #define PKCS8_R_UNSUPPORTED_CIPHER 127 #define PKCS8_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION 128 #define PKCS8_R_BAD_ITERATION_COUNT 129 #define PKCS8_R_UNSUPPORTED_PRF 130 #define PKCS8_R_INVALID_CHARACTERS 131 #define PKCS8_R_UNSUPPORTED_OPTIONS 132 #define PKCS8_R_AMBIGUOUS_FRIENDLY_NAME 133 #endif // OPENSSL_HEADER_PKCS8_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_poly1305.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_POLY1305_H #define OPENSSL_HEADER_POLY1305_H #include "CNIOBoringSSL_base.h" #ifdef __cplusplus extern "C" { #endif typedef uint8_t poly1305_state[512]; // CRYPTO_poly1305_init sets up |state| so that it can be used to calculate an // authentication tag with the one-time key |key|. Note that |key| is a // one-time key and therefore there is no `reset' method because that would // enable several messages to be authenticated with the same key. OPENSSL_EXPORT void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]); // CRYPTO_poly1305_update processes |in_len| bytes from |in|. It can be called // zero or more times after poly1305_init. OPENSSL_EXPORT void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *in, size_t in_len); // CRYPTO_poly1305_finish completes the poly1305 calculation and writes a 16 // byte authentication tag to |mac|. OPENSSL_EXPORT void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_POLY1305_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_pool.h ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_POOL_H #define OPENSSL_HEADER_POOL_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_stack.h" #if defined(__cplusplus) extern "C" { #endif // Buffers and buffer pools. // // |CRYPTO_BUFFER|s are simply reference-counted blobs. A |CRYPTO_BUFFER_POOL| // is an intern table for |CRYPTO_BUFFER|s. This allows for a single copy of a // given blob to be kept in memory and referenced from multiple places. DEFINE_STACK_OF(CRYPTO_BUFFER) // CRYPTO_BUFFER_POOL_new returns a freshly allocated |CRYPTO_BUFFER_POOL| or // NULL on error. OPENSSL_EXPORT CRYPTO_BUFFER_POOL* CRYPTO_BUFFER_POOL_new(void); // CRYPTO_BUFFER_POOL_free frees |pool|, which must be empty. OPENSSL_EXPORT void CRYPTO_BUFFER_POOL_free(CRYPTO_BUFFER_POOL *pool); // CRYPTO_BUFFER_new returns a |CRYPTO_BUFFER| containing a copy of |data|, or // else NULL on error. If |pool| is not NULL then the returned value may be a // reference to a previously existing |CRYPTO_BUFFER| that contained the same // data. Otherwise, the returned, fresh |CRYPTO_BUFFER| will be added to the // pool. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool); // CRYPTO_BUFFER_alloc creates an unpooled |CRYPTO_BUFFER| of the given size and // writes the underlying data pointer to |*out_data|. It returns NULL on error. // // After calling this function, |len| bytes of contents must be written to // |out_data| before passing the returned pointer to any other BoringSSL // functions. Once initialized, the |CRYPTO_BUFFER| should be treated as // immutable. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_alloc(uint8_t **out_data, size_t len); // CRYPTO_BUFFER_new_from_CBS acts the same as |CRYPTO_BUFFER_new|. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_new_from_CBS( const CBS *cbs, CRYPTO_BUFFER_POOL *pool); // CRYPTO_BUFFER_new_from_static_data_unsafe behaves like |CRYPTO_BUFFER_new| // but does not copy |data|. |data| must be immutable and last for the lifetime // of the address space. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_new_from_static_data_unsafe( const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool); // CRYPTO_BUFFER_free decrements the reference count of |buf|. If there are no // other references, or if the only remaining reference is from a pool, then // |buf| will be freed. OPENSSL_EXPORT void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf); // CRYPTO_BUFFER_up_ref increments the reference count of |buf| and returns // one. OPENSSL_EXPORT int CRYPTO_BUFFER_up_ref(CRYPTO_BUFFER *buf); // CRYPTO_BUFFER_data returns a pointer to the data contained in |buf|. OPENSSL_EXPORT const uint8_t *CRYPTO_BUFFER_data(const CRYPTO_BUFFER *buf); // CRYPTO_BUFFER_len returns the length, in bytes, of the data contained in // |buf|. OPENSSL_EXPORT size_t CRYPTO_BUFFER_len(const CRYPTO_BUFFER *buf); // CRYPTO_BUFFER_init_CBS initialises |out| to point at the data from |buf|. OPENSSL_EXPORT void CRYPTO_BUFFER_init_CBS(const CRYPTO_BUFFER *buf, CBS *out); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(CRYPTO_BUFFER_POOL, CRYPTO_BUFFER_POOL_free) BORINGSSL_MAKE_DELETER(CRYPTO_BUFFER, CRYPTO_BUFFER_free) BORINGSSL_MAKE_UP_REF(CRYPTO_BUFFER, CRYPTO_BUFFER_up_ref) BSSL_NAMESPACE_END } // extern C++ #endif #endif // OPENSSL_HEADER_POOL_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_posix_time.h ================================================ /* Copyright 2022 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_POSIX_TIME_H #define OPENSSL_HEADER_POSIX_TIME_H #include "CNIOBoringSSL_base.h" #include #if defined(__cplusplus) extern "C" { #endif // Time functions. // OPENSSL_posix_to_tm converts a int64_t POSIX time value in |time|, which must // be in the range of year 0000 to 9999, to a broken out time value in |tm|. It // returns one on success and zero on error. OPENSSL_EXPORT int OPENSSL_posix_to_tm(int64_t time, struct tm *out_tm); // OPENSSL_tm_to_posix converts a time value between the years 0 and 9999 in // |tm| to a POSIX time value in |out|. One is returned on success, zero is // returned on failure. It is a failure if |tm| contains out of range values. OPENSSL_EXPORT int OPENSSL_tm_to_posix(const struct tm *tm, int64_t *out); // OPENSSL_timegm converts a time value between the years 0 and 9999 in |tm| to // a time_t value in |out|. One is returned on success, zero is returned on // failure. It is a failure if the converted time can not be represented in a // time_t, or if the tm contains out of range values. OPENSSL_EXPORT int OPENSSL_timegm(const struct tm *tm, time_t *out); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_POSIX_TIME_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_rand.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_RAND_H #define OPENSSL_HEADER_RAND_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // Random number generation. // RAND_bytes writes |len| bytes of random data to |buf| and returns one. In the // event that sufficient random data can not be obtained, |abort| is called. OPENSSL_EXPORT int RAND_bytes(uint8_t *buf, size_t len); // Obscure functions. #if !defined(OPENSSL_WINDOWS) // RAND_enable_fork_unsafe_buffering indicates that clones of the address space, // e.g. via |fork|, will never call into BoringSSL. It may be used to disable // BoringSSL's more expensive fork-safety measures. However, calling this // function and then using BoringSSL across |fork| calls will leak secret keys. // |fd| must be -1. // // WARNING: This function affects BoringSSL for the entire address space. Thus // this function should never be called by library code, only by code with // global knowledge of the application's use of BoringSSL. // // Do not use this function unless a performance issue was measured with the // default behavior. BoringSSL can efficiently detect forks on most platforms, // in which case this function is a no-op and is unnecessary. In particular, // Linux kernel versions 4.14 or later provide |MADV_WIPEONFORK|. Future // versions of BoringSSL will remove this functionality when older kernels are // sufficiently rare. // // This function has an unusual name because it historically controlled internal // buffers, but no longer does. OPENSSL_EXPORT void RAND_enable_fork_unsafe_buffering(int fd); // RAND_disable_fork_unsafe_buffering restores BoringSSL's default fork-safety // protections. See also |RAND_enable_fork_unsafe_buffering|. OPENSSL_EXPORT void RAND_disable_fork_unsafe_buffering(void); #endif #if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) // RAND_reset_for_fuzzing resets the fuzzer-only deterministic RNG. This // function is only defined in the fuzzer-only build configuration. OPENSSL_EXPORT void RAND_reset_for_fuzzing(void); #endif // RAND_get_system_entropy_for_custom_prng writes |len| bytes of random data // from a system entropy source to |buf|. The maximum length of entropy which // may be requested is 256 bytes. If more than 256 bytes of data is requested, // or if sufficient random data can not be obtained, |abort| is called. // |RAND_bytes| should normally be used instead of this function. This function // should only be used for seed values or where |malloc| should not be called // from BoringSSL. This function is not FIPS compliant. OPENSSL_EXPORT void RAND_get_system_entropy_for_custom_prng(uint8_t *buf, size_t len); // Deprecated functions // RAND_pseudo_bytes is a wrapper around |RAND_bytes|. OPENSSL_EXPORT int RAND_pseudo_bytes(uint8_t *buf, size_t len); // RAND_seed reads a single byte of random data to ensure that any file // descriptors etc are opened. OPENSSL_EXPORT void RAND_seed(const void *buf, int num); // RAND_load_file returns a nonnegative number. OPENSSL_EXPORT int RAND_load_file(const char *path, long num); // RAND_file_name returns NULL. OPENSSL_EXPORT const char *RAND_file_name(char *buf, size_t num); // RAND_add does nothing. OPENSSL_EXPORT void RAND_add(const void *buf, int num, double entropy); // RAND_egd returns 255. OPENSSL_EXPORT int RAND_egd(const char *); // RAND_poll returns one. OPENSSL_EXPORT int RAND_poll(void); // RAND_status returns one. OPENSSL_EXPORT int RAND_status(void); // RAND_cleanup does nothing. OPENSSL_EXPORT void RAND_cleanup(void); // rand_meth_st is typedefed to |RAND_METHOD| in base.h. It isn't used; it // exists only to be the return type of |RAND_SSLeay|. It's // external so that variables of this type can be initialized. struct rand_meth_st { void (*seed) (const void *buf, int num); int (*bytes) (uint8_t *buf, size_t num); void (*cleanup) (void); void (*add) (const void *buf, int num, double entropy); int (*pseudorand) (uint8_t *buf, size_t num); int (*status) (void); }; // RAND_SSLeay returns a pointer to a dummy |RAND_METHOD|. OPENSSL_EXPORT RAND_METHOD *RAND_SSLeay(void); // RAND_OpenSSL returns a pointer to a dummy |RAND_METHOD|. OPENSSL_EXPORT RAND_METHOD *RAND_OpenSSL(void); // RAND_get_rand_method returns |RAND_SSLeay()|. OPENSSL_EXPORT const RAND_METHOD *RAND_get_rand_method(void); // RAND_set_rand_method returns one. OPENSSL_EXPORT int RAND_set_rand_method(const RAND_METHOD *); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_RAND_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_rc4.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_RC4_H #define OPENSSL_HEADER_RC4_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // RC4. struct rc4_key_st { uint32_t x, y; uint32_t data[256]; } /* RC4_KEY */; // RC4_set_key performs an RC4 key schedule and initialises |rc4key| with |len| // bytes of key material from |key|. OPENSSL_EXPORT void RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key); // RC4 encrypts (or decrypts, it's the same with RC4) |len| bytes from |in| to // |out|. OPENSSL_EXPORT void RC4(RC4_KEY *key, size_t len, const uint8_t *in, uint8_t *out); // Deprecated functions. // RC4_options returns the string "rc4(ptr,int)". OPENSSL_EXPORT const char *RC4_options(void); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_RC4_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ripemd.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_RIPEMD_H #define OPENSSL_HEADER_RIPEMD_H #include "CNIOBoringSSL_base.h" #ifdef __cplusplus extern "C" { #endif # define RIPEMD160_CBLOCK 64 # define RIPEMD160_LBLOCK (RIPEMD160_CBLOCK/4) # define RIPEMD160_DIGEST_LENGTH 20 struct RIPEMD160state_st { uint32_t h[5]; uint32_t Nl, Nh; uint8_t data[RIPEMD160_CBLOCK]; unsigned num; }; // RIPEMD160_Init initialises |ctx| and returns one. OPENSSL_EXPORT int RIPEMD160_Init(RIPEMD160_CTX *ctx); // RIPEMD160_Update adds |len| bytes from |data| to |ctx| and returns one. OPENSSL_EXPORT int RIPEMD160_Update(RIPEMD160_CTX *ctx, const void *data, size_t len); // RIPEMD160_Final adds the final padding to |ctx| and writes the resulting // digest to |out|, which must have at least |RIPEMD160_DIGEST_LENGTH| bytes of // space. It returns one. OPENSSL_EXPORT int RIPEMD160_Final(uint8_t out[RIPEMD160_DIGEST_LENGTH], RIPEMD160_CTX *ctx); // RIPEMD160 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |RIPEMD160_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *RIPEMD160(const uint8_t *data, size_t len, uint8_t out[RIPEMD160_DIGEST_LENGTH]); // RIPEMD160_Transform is a low-level function that performs a single, // RIPEMD160 block transformation using the state from |ctx| and 64 bytes from // |block|. OPENSSL_EXPORT void RIPEMD160_Transform(RIPEMD160_CTX *ctx, const uint8_t block[RIPEMD160_CBLOCK]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_RIPEMD_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_rsa.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_RSA_H #define OPENSSL_HEADER_RSA_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_engine.h" #include "CNIOBoringSSL_ex_data.h" #include "CNIOBoringSSL_thread.h" #if defined(__cplusplus) extern "C" { #endif // rsa.h contains functions for handling encryption and signature using RSA. // Allocation and destruction. // // An |RSA| object represents a public or private RSA key. A given object may be // used concurrently on multiple threads by non-mutating functions, provided no // other thread is concurrently calling a mutating function. Unless otherwise // documented, functions which take a |const| pointer are non-mutating and // functions which take a non-|const| pointer are mutating. // RSA_new_public_key returns a new |RSA| object containing a public key with // the specified parameters, or NULL on error or invalid input. OPENSSL_EXPORT RSA *RSA_new_public_key(const BIGNUM *n, const BIGNUM *e); // RSA_new_private_key returns a new |RSA| object containing a private key with // the specified parameters, or NULL on error or invalid input. All parameters // are mandatory and may not be NULL. // // This function creates standard RSA private keys with CRT parameters. OPENSSL_EXPORT RSA *RSA_new_private_key(const BIGNUM *n, const BIGNUM *e, const BIGNUM *d, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp); // RSA_new returns a new, empty |RSA| object or NULL on error. Prefer using // |RSA_new_public_key| or |RSA_new_private_key| to import an RSA key. OPENSSL_EXPORT RSA *RSA_new(void); // RSA_new_method acts the same as |RSA_new| but takes an explicit |ENGINE|. OPENSSL_EXPORT RSA *RSA_new_method(const ENGINE *engine); // RSA_free decrements the reference count of |rsa| and frees it if the // reference count drops to zero. OPENSSL_EXPORT void RSA_free(RSA *rsa); // RSA_up_ref increments the reference count of |rsa| and returns one. It does // not mutate |rsa| for thread-safety purposes and may be used concurrently. OPENSSL_EXPORT int RSA_up_ref(RSA *rsa); // Properties. // OPENSSL_RSA_MAX_MODULUS_BITS is the maximum supported RSA modulus, in bits. // // TODO(davidben): Reduce this to 8192. #define OPENSSL_RSA_MAX_MODULUS_BITS 16384 // RSA_bits returns the size of |rsa|, in bits. OPENSSL_EXPORT unsigned RSA_bits(const RSA *rsa); // RSA_get0_n returns |rsa|'s public modulus. OPENSSL_EXPORT const BIGNUM *RSA_get0_n(const RSA *rsa); // RSA_get0_e returns |rsa|'s public exponent. OPENSSL_EXPORT const BIGNUM *RSA_get0_e(const RSA *rsa); // RSA_get0_d returns |rsa|'s private exponent. If |rsa| is a public key, this // value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_d(const RSA *rsa); // RSA_get0_p returns |rsa|'s first private prime factor. If |rsa| is a public // key or lacks its prime factors, this value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_p(const RSA *rsa); // RSA_get0_q returns |rsa|'s second private prime factor. If |rsa| is a public // key or lacks its prime factors, this value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_q(const RSA *rsa); // RSA_get0_dmp1 returns d (mod p-1) for |rsa|. If |rsa| is a public key or // lacks CRT parameters, this value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_dmp1(const RSA *rsa); // RSA_get0_dmq1 returns d (mod q-1) for |rsa|. If |rsa| is a public key or // lacks CRT parameters, this value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_dmq1(const RSA *rsa); // RSA_get0_iqmp returns q^-1 (mod p). If |rsa| is a public key or lacks CRT // parameters, this value will be NULL. OPENSSL_EXPORT const BIGNUM *RSA_get0_iqmp(const RSA *rsa); // RSA_get0_key sets |*out_n|, |*out_e|, and |*out_d|, if non-NULL, to |rsa|'s // modulus, public exponent, and private exponent, respectively. If |rsa| is a // public key, the private exponent will be set to NULL. OPENSSL_EXPORT void RSA_get0_key(const RSA *rsa, const BIGNUM **out_n, const BIGNUM **out_e, const BIGNUM **out_d); // RSA_get0_factors sets |*out_p| and |*out_q|, if non-NULL, to |rsa|'s prime // factors. If |rsa| is a public key, they will be set to NULL. OPENSSL_EXPORT void RSA_get0_factors(const RSA *rsa, const BIGNUM **out_p, const BIGNUM **out_q); // RSA_get0_crt_params sets |*out_dmp1|, |*out_dmq1|, and |*out_iqmp|, if // non-NULL, to |rsa|'s CRT parameters. These are d (mod p-1), d (mod q-1) and // q^-1 (mod p), respectively. If |rsa| is a public key, each parameter will be // set to NULL. OPENSSL_EXPORT void RSA_get0_crt_params(const RSA *rsa, const BIGNUM **out_dmp1, const BIGNUM **out_dmq1, const BIGNUM **out_iqmp); // Setting individual properties. // // These functions allow setting individual properties of an |RSA| object. This // is typically used with |RSA_new| to construct an RSA key field by field. // Prefer instead to use |RSA_new_public_key| and |RSA_new_private_key|. These // functions defer some initialization to the first use of an |RSA| object. This // means invalid inputs may be caught late. // // TODO(crbug.com/boringssl/316): This deferred initialization also causes // performance problems in multi-threaded applications. The preferred APIs // currently have the same issues, but they will initialize eagerly in the // future. // RSA_set0_key sets |rsa|'s modulus, public exponent, and private exponent to // |n|, |e|, and |d| respectively, if non-NULL. On success, it takes ownership // of each argument and returns one. Otherwise, it returns zero. // // |d| may be NULL, but |n| and |e| must either be non-NULL or already // configured on |rsa|. // // It is an error to call this function after |rsa| has been used for a // cryptographic operation. Construct a new |RSA| object instead. OPENSSL_EXPORT int RSA_set0_key(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d); // RSA_set0_factors sets |rsa|'s prime factors to |p| and |q|, if non-NULL, and // takes ownership of them. On success, it takes ownership of each argument and // returns one. Otherwise, it returns zero. // // Each argument must either be non-NULL or already configured on |rsa|. // // It is an error to call this function after |rsa| has been used for a // cryptographic operation. Construct a new |RSA| object instead. OPENSSL_EXPORT int RSA_set0_factors(RSA *rsa, BIGNUM *p, BIGNUM *q); // RSA_set0_crt_params sets |rsa|'s CRT parameters to |dmp1|, |dmq1|, and // |iqmp|, if non-NULL, and takes ownership of them. On success, it takes // ownership of its parameters and returns one. Otherwise, it returns zero. // // Each argument must either be non-NULL or already configured on |rsa|. // // It is an error to call this function after |rsa| has been used for a // cryptographic operation. Construct a new |RSA| object instead. OPENSSL_EXPORT int RSA_set0_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp); // Key generation. // RSA_generate_key_ex generates a new RSA key where the modulus has size // |bits| and the public exponent is |e|. If unsure, |RSA_F4| is a good value // for |e|. If |cb| is not NULL then it is called during the key generation // process. In addition to the calls documented for |BN_generate_prime_ex|, it // is called with event=2 when the n'th prime is rejected as unsuitable and // with event=3 when a suitable value for |p| is found. // // It returns one on success or zero on error. OPENSSL_EXPORT int RSA_generate_key_ex(RSA *rsa, int bits, const BIGNUM *e, BN_GENCB *cb); // RSA_generate_key_fips behaves like |RSA_generate_key_ex| but performs // additional checks for FIPS compliance. The public exponent is always 65537 // and |bits| must be either 2048 or 3072. OPENSSL_EXPORT int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb); // Encryption / Decryption // // These functions are considered non-mutating for thread-safety purposes and // may be used concurrently. // RSA_PKCS1_PADDING denotes PKCS#1 v1.5 padding. When used with encryption, // this is RSAES-PKCS1-v1_5. When used with signing, this is RSASSA-PKCS1-v1_5. // // WARNING: The RSAES-PKCS1-v1_5 encryption scheme is vulnerable to a // chosen-ciphertext attack. Decrypting attacker-supplied ciphertext with // RSAES-PKCS1-v1_5 may give the attacker control over your private key. This // does not impact the RSASSA-PKCS1-v1_5 signature scheme. See "Chosen // Ciphertext Attacks Against Protocols Based on the RSA Encryption Standard // PKCS #1", Daniel Bleichenbacher, Advances in Cryptology (Crypto '98). #define RSA_PKCS1_PADDING 1 // RSA_NO_PADDING denotes a raw RSA operation. #define RSA_NO_PADDING 3 // RSA_PKCS1_OAEP_PADDING denotes the RSAES-OAEP encryption scheme. #define RSA_PKCS1_OAEP_PADDING 4 // RSA_PKCS1_PSS_PADDING denotes the RSASSA-PSS signature scheme. This value may // not be passed into |RSA_sign_raw|, only |EVP_PKEY_CTX_set_rsa_padding|. See // also |RSA_sign_pss_mgf1| and |RSA_verify_pss_mgf1|. #define RSA_PKCS1_PSS_PADDING 6 // RSA_encrypt encrypts |in_len| bytes from |in| to the public key from |rsa| // and writes, at most, |max_out| bytes of encrypted data to |out|. The // |max_out| argument must be, at least, |RSA_size| in order to ensure success. // // It returns 1 on success or zero on error. // // The |padding| argument must be one of the |RSA_*_PADDING| values. If in // doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. OPENSSL_EXPORT int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); // RSA_decrypt decrypts |in_len| bytes from |in| with the private key from // |rsa| and writes, at most, |max_out| bytes of plaintext to |out|. The // |max_out| argument must be, at least, |RSA_size| in order to ensure success. // // It returns 1 on success or zero on error. // // The |padding| argument must be one of the |RSA_*_PADDING| values. If in // doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. // // WARNING: Passing |RSA_PKCS1_PADDING| into this function is deprecated and // insecure. RSAES-PKCS1-v1_5 is vulnerable to a chosen-ciphertext attack. // Decrypting attacker-supplied ciphertext with RSAES-PKCS1-v1_5 may give the // attacker control over your private key. See "Chosen Ciphertext Attacks // Against Protocols Based on the RSA Encryption Standard PKCS #1", Daniel // Bleichenbacher, Advances in Cryptology (Crypto '98). // // In some limited cases, such as TLS RSA key exchange, it is possible to // mitigate this flaw with custom, protocol-specific padding logic. This // should be implemented with |RSA_NO_PADDING|, not |RSA_PKCS1_PADDING|. OPENSSL_EXPORT int RSA_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); // RSA_public_encrypt encrypts |flen| bytes from |from| to the public key in // |rsa| and writes the encrypted data to |to|. The |to| buffer must have at // least |RSA_size| bytes of space. It returns the number of bytes written, or // -1 on error. The |padding| argument must be one of the |RSA_*_PADDING| // values. If in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. // // WARNING: this function is dangerous because it breaks the usual return value // convention. Use |RSA_encrypt| instead. OPENSSL_EXPORT int RSA_public_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); // RSA_private_decrypt decrypts |flen| bytes from |from| with the public key in // |rsa| and writes the plaintext to |to|. The |to| buffer must have at least // |RSA_size| bytes of space. It returns the number of bytes written, or -1 on // error. The |padding| argument must be one of the |RSA_*_PADDING| values. If // in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. Passing // |RSA_PKCS1_PADDING| into this function is deprecated and insecure. See // |RSA_decrypt|. // // WARNING: this function is dangerous because it breaks the usual return value // convention. Use |RSA_decrypt| instead. OPENSSL_EXPORT int RSA_private_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); // Signing / Verification // // These functions are considered non-mutating for thread-safety purposes and // may be used concurrently. // RSA_sign signs |digest_len| bytes of digest from |digest| with |rsa| using // RSASSA-PKCS1-v1_5. It writes, at most, |RSA_size(rsa)| bytes to |out|. On // successful return, the actual number of bytes written is written to // |*out_len|. // // The |hash_nid| argument identifies the hash function used to calculate // |digest| and is embedded in the resulting signature. For example, it might be // |NID_sha256|. // // It returns 1 on success and zero on error. // // WARNING: |digest| must be the result of hashing the data to be signed with // |hash_nid|. Passing unhashed inputs will not result in a secure signature // scheme. OPENSSL_EXPORT int RSA_sign(int hash_nid, const uint8_t *digest, size_t digest_len, uint8_t *out, unsigned *out_len, RSA *rsa); // RSA_sign_pss_mgf1 signs |digest_len| bytes from |digest| with the public key // from |rsa| using RSASSA-PSS with MGF1 as the mask generation function. It // writes, at most, |max_out| bytes of signature data to |out|. The |max_out| // argument must be, at least, |RSA_size| in order to ensure success. It returns // 1 on success or zero on error. // // The |md| and |mgf1_md| arguments identify the hash used to calculate |digest| // and the MGF1 hash, respectively. If |mgf1_md| is NULL, |md| is // used. // // |salt_len| specifies the expected salt length in bytes. If |salt_len| is -1, // then the salt length is the same as the hash length. If -2, then the salt // length is maximal given the size of |rsa|. If unsure, use -1. // // WARNING: |digest| must be the result of hashing the data to be signed with // |md|. Passing unhashed inputs will not result in a secure signature scheme. OPENSSL_EXPORT int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *digest, size_t digest_len, const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len); // RSA_sign_raw performs the private key portion of computing a signature with // |rsa|. It writes, at most, |max_out| bytes of signature data to |out|. The // |max_out| argument must be, at least, |RSA_size| in order to ensure the // output fits. It returns 1 on success or zero on error. // // If |padding| is |RSA_PKCS1_PADDING|, this function wraps |in| with the // padding portion of RSASSA-PKCS1-v1_5 and then performs the raw private key // operation. The caller is responsible for hashing the input and wrapping it in // a DigestInfo structure. // // If |padding| is |RSA_NO_PADDING|, this function only performs the raw private // key operation, interpreting |in| as a integer modulo n. The caller is // responsible for hashing the input and encoding it for the signature scheme // being implemented. // // WARNING: This function is a building block for a signature scheme, not a // complete one. |in| must be the result of hashing and encoding the data as // needed for the scheme being implemented. Passing in arbitrary inputs will not // result in a secure signature scheme. OPENSSL_EXPORT int RSA_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); // RSA_verify verifies that |sig_len| bytes from |sig| are a valid, // RSASSA-PKCS1-v1_5 signature of |digest_len| bytes at |digest| by |rsa|. // // The |hash_nid| argument identifies the hash function used to calculate // |digest| and is embedded in the resulting signature in order to prevent hash // confusion attacks. For example, it might be |NID_sha256|. // // It returns one if the signature is valid and zero otherwise. // // WARNING: this differs from the original, OpenSSL function which additionally // returned -1 on error. // // WARNING: |digest| must be the result of hashing the data to be verified with // |hash_nid|. Passing unhashed input will not result in a secure signature // scheme. OPENSSL_EXPORT int RSA_verify(int hash_nid, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, RSA *rsa); // RSA_verify_pss_mgf1 verifies that |sig_len| bytes from |sig| are a valid, // RSASSA-PSS signature of |digest_len| bytes at |digest| by |rsa|. It returns // one if the signature is valid and zero otherwise. MGF1 is used as the mask // generation function. // // The |md| and |mgf1_md| arguments identify the hash used to calculate |digest| // and the MGF1 hash, respectively. If |mgf1_md| is NULL, |md| is // used. |salt_len| specifies the expected salt length in bytes. // // If |salt_len| is -1, then the salt length is the same as the hash length. If // -2, then the salt length is recovered and all values accepted. If unsure, use // -1. // // WARNING: |digest| must be the result of hashing the data to be verified with // |md|. Passing unhashed input will not result in a secure signature scheme. OPENSSL_EXPORT int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *digest, size_t digest_len, const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len, const uint8_t *sig, size_t sig_len); // RSA_verify_raw performs the public key portion of verifying |in_len| bytes of // signature from |in| using the public key from |rsa|. On success, it returns // one and writes, at most, |max_out| bytes of output to |out|. The |max_out| // argument must be, at least, |RSA_size| in order to ensure the output fits. On // failure or invalid input, it returns zero. // // If |padding| is |RSA_PKCS1_PADDING|, this function checks the padding portion // of RSASSA-PKCS1-v1_5 and outputs the remainder of the encoded digest. The // caller is responsible for checking the output is a DigestInfo-wrapped digest // of the message. // // If |padding| is |RSA_NO_PADDING|, this function only performs the raw public // key operation. The caller is responsible for checking the output is a valid // result for the signature scheme being implemented. // // WARNING: This function is a building block for a signature scheme, not a // complete one. Checking for arbitrary strings in |out| will not result in a // secure signature scheme. OPENSSL_EXPORT int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); // RSA_private_encrypt performs the private key portion of computing a signature // with |rsa|. It takes |flen| bytes from |from| as input and writes the result // to |to|. The |to| buffer must have at least |RSA_size| bytes of space. It // returns the number of bytes written, or -1 on error. // // For the interpretation of |padding| and the input, see |RSA_sign_raw|. // // WARNING: This function is a building block for a signature scheme, not a // complete one. See |RSA_sign_raw| for details. // // WARNING: This function is dangerous because it breaks the usual return value // convention. Use |RSA_sign_raw| instead. OPENSSL_EXPORT int RSA_private_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); // RSA_public_decrypt performs the public key portion of verifying |flen| bytes // of signature from |from| using the public key from |rsa|. It writes the // result to |to|, which must have at least |RSA_size| bytes of space. It // returns the number of bytes written, or -1 on error. // // For the interpretation of |padding| and the result, see |RSA_verify_raw|. // // WARNING: This function is a building block for a signature scheme, not a // complete one. See |RSA_verify_raw| for details. // // WARNING: This function is dangerous because it breaks the usual return value // convention. Use |RSA_verify_raw| instead. OPENSSL_EXPORT int RSA_public_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); // Utility functions. // RSA_size returns the number of bytes in the modulus, which is also the size // of a signature or encrypted value using |rsa|. OPENSSL_EXPORT unsigned RSA_size(const RSA *rsa); // RSA_is_opaque returns one if |rsa| is opaque and doesn't expose its key // material. Otherwise it returns zero. OPENSSL_EXPORT int RSA_is_opaque(const RSA *rsa); // RSAPublicKey_dup allocates a fresh |RSA| and copies the public key from // |rsa| into it. It returns the fresh |RSA| object, or NULL on error. OPENSSL_EXPORT RSA *RSAPublicKey_dup(const RSA *rsa); // RSAPrivateKey_dup allocates a fresh |RSA| and copies the private key from // |rsa| into it. It returns the fresh |RSA| object, or NULL on error. OPENSSL_EXPORT RSA *RSAPrivateKey_dup(const RSA *rsa); // RSA_check_key performs basic validity tests on |rsa|. It returns one if // they pass and zero otherwise. Opaque keys and public keys always pass. If it // returns zero then a more detailed error is available on the error queue. OPENSSL_EXPORT int RSA_check_key(const RSA *rsa); // RSA_check_fips performs public key validity tests on |key|. It returns one if // they pass and zero otherwise. Opaque keys always fail. This function does not // mutate |rsa| for thread-safety purposes and may be used concurrently. OPENSSL_EXPORT int RSA_check_fips(RSA *key); // RSA_verify_PKCS1_PSS_mgf1 verifies that |EM| is a correct PSS padding of // |mHash|, where |mHash| is a digest produced by |Hash|. |EM| must point to // exactly |RSA_size(rsa)| bytes of data. The |mgf1Hash| argument specifies the // hash function for generating the mask. If NULL, |Hash| is used. The |sLen| // argument specifies the expected salt length in bytes. If |sLen| is -1 then // the salt length is the same as the hash length. If -2, then the salt length // is recovered and all values accepted. // // If unsure, use -1. // // It returns one on success or zero on error. // // This function implements only the low-level padding logic. Use // |RSA_verify_pss_mgf1| instead. OPENSSL_EXPORT int RSA_verify_PKCS1_PSS_mgf1(const RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, const uint8_t *EM, int sLen); // RSA_padding_add_PKCS1_PSS_mgf1 writes a PSS padding of |mHash| to |EM|, // where |mHash| is a digest produced by |Hash|. |RSA_size(rsa)| bytes of // output will be written to |EM|. The |mgf1Hash| argument specifies the hash // function for generating the mask. If NULL, |Hash| is used. The |sLen| // argument specifies the expected salt length in bytes. If |sLen| is -1 then // the salt length is the same as the hash length. If -2, then the salt length // is maximal given the space in |EM|. // // It returns one on success or zero on error. // // This function implements only the low-level padding logic. Use // |RSA_sign_pss_mgf1| instead. OPENSSL_EXPORT int RSA_padding_add_PKCS1_PSS_mgf1(const RSA *rsa, uint8_t *EM, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, int sLen); // RSA_padding_add_PKCS1_OAEP_mgf1 writes an OAEP padding of |from| to |to| // with the given parameters and hash functions. If |md| is NULL then SHA-1 is // used. If |mgf1md| is NULL then the value of |md| is used (which means SHA-1 // if that, in turn, is NULL). // // It returns one on success or zero on error. OPENSSL_EXPORT int RSA_padding_add_PKCS1_OAEP_mgf1( uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md); // RSA_add_pkcs1_prefix builds a version of |digest| prefixed with the // DigestInfo header for the given hash function and sets |out_msg| to point to // it. On successful return, if |*is_alloced| is one, the caller must release // |*out_msg| with |OPENSSL_free|. OPENSSL_EXPORT int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, int *is_alloced, int hash_nid, const uint8_t *digest, size_t digest_len); // ASN.1 functions. // RSA_parse_public_key parses a DER-encoded RSAPublicKey structure (RFC 8017) // from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on // error. OPENSSL_EXPORT RSA *RSA_parse_public_key(CBS *cbs); // RSA_public_key_from_bytes parses |in| as a DER-encoded RSAPublicKey structure // (RFC 8017). It returns a newly-allocated |RSA| or NULL on error. OPENSSL_EXPORT RSA *RSA_public_key_from_bytes(const uint8_t *in, size_t in_len); // RSA_marshal_public_key marshals |rsa| as a DER-encoded RSAPublicKey structure // (RFC 8017) and appends the result to |cbb|. It returns one on success and // zero on failure. OPENSSL_EXPORT int RSA_marshal_public_key(CBB *cbb, const RSA *rsa); // RSA_public_key_to_bytes marshals |rsa| as a DER-encoded RSAPublicKey // structure (RFC 8017) and, on success, sets |*out_bytes| to a newly allocated // buffer containing the result and returns one. Otherwise, it returns zero. The // result should be freed with |OPENSSL_free|. OPENSSL_EXPORT int RSA_public_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa); // RSA_parse_private_key parses a DER-encoded RSAPrivateKey structure (RFC 8017) // from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on // error. OPENSSL_EXPORT RSA *RSA_parse_private_key(CBS *cbs); // RSA_private_key_from_bytes parses |in| as a DER-encoded RSAPrivateKey // structure (RFC 8017). It returns a newly-allocated |RSA| or NULL on error. OPENSSL_EXPORT RSA *RSA_private_key_from_bytes(const uint8_t *in, size_t in_len); // RSA_marshal_private_key marshals |rsa| as a DER-encoded RSAPrivateKey // structure (RFC 8017) and appends the result to |cbb|. It returns one on // success and zero on failure. OPENSSL_EXPORT int RSA_marshal_private_key(CBB *cbb, const RSA *rsa); // RSA_private_key_to_bytes marshals |rsa| as a DER-encoded RSAPrivateKey // structure (RFC 8017) and, on success, sets |*out_bytes| to a newly allocated // buffer containing the result and returns one. Otherwise, it returns zero. The // result should be freed with |OPENSSL_free|. OPENSSL_EXPORT int RSA_private_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa); // Obscure RSA variants. // // These functions allow creating RSA keys with obscure combinations of // parameters. // RSA_new_private_key_no_crt behaves like |RSA_new_private_key| but constructs // an RSA key without CRT coefficients. // // Keys created by this function will be less performant and cannot be // serialized. OPENSSL_EXPORT RSA *RSA_new_private_key_no_crt(const BIGNUM *n, const BIGNUM *e, const BIGNUM *d); // RSA_new_private_key_no_e behaves like |RSA_new_private_key| but constructs an // RSA key without CRT parameters or public exponent. // // Keys created by this function will be less performant, cannot be serialized, // and lack hardening measures that protect against side channels and fault // attacks. OPENSSL_EXPORT RSA *RSA_new_private_key_no_e(const BIGNUM *n, const BIGNUM *d); // RSA_new_public_key_large_e behaves like |RSA_new_public_key| but allows any // |e| up to |n|. // // BoringSSL typically bounds public exponents as a denial-of-service // mitigation. Keys created by this function may perform worse than those // created by |RSA_new_public_key|. OPENSSL_EXPORT RSA *RSA_new_public_key_large_e(const BIGNUM *n, const BIGNUM *e); // RSA_new_private_key_large_e behaves like |RSA_new_private_key| but allows any // |e| up to |n|. // // BoringSSL typically bounds public exponents as a denial-of-service // mitigation. Keys created by this function may perform worse than those // created by |RSA_new_private_key|. OPENSSL_EXPORT RSA *RSA_new_private_key_large_e( const BIGNUM *n, const BIGNUM *e, const BIGNUM *d, const BIGNUM *p, const BIGNUM *q, const BIGNUM *dmp1, const BIGNUM *dmq1, const BIGNUM *iqmp); // ex_data functions. // // See |ex_data.h| for details. OPENSSL_EXPORT int RSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int RSA_set_ex_data(RSA *rsa, int idx, void *arg); OPENSSL_EXPORT void *RSA_get_ex_data(const RSA *rsa, int idx); // Flags. // RSA_FLAG_OPAQUE specifies that this RSA_METHOD does not expose its key // material. This may be set if, for instance, it is wrapping some other crypto // API, like a platform key store. #define RSA_FLAG_OPAQUE 1 // RSA_FLAG_NO_BLINDING disables blinding of private operations, which is a // dangerous thing to do. This flag is set internally as part of self-tests but // is otherwise impossible to set externally. #define RSA_FLAG_NO_BLINDING 8 // RSA_FLAG_EXT_PKEY is deprecated and ignored. #define RSA_FLAG_EXT_PKEY 0x20 // RSA_FLAG_NO_PUBLIC_EXPONENT indicates that private keys without a public // exponent are allowed. This is an internal constant. Use // |RSA_new_private_key_no_e| to construct such keys. #define RSA_FLAG_NO_PUBLIC_EXPONENT 0x40 // RSA_FLAG_LARGE_PUBLIC_EXPONENT indicates that keys with a large public // exponent are allowed. This is an internal constant. Use // |RSA_new_public_key_large_e| and |RSA_new_private_key_large_e| to construct // such keys. #define RSA_FLAG_LARGE_PUBLIC_EXPONENT 0x80 // RSA public exponent values. #define RSA_3 0x3 #define RSA_F4 0x10001 // Deprecated functions. #define RSA_METHOD_FLAG_NO_CHECK RSA_FLAG_OPAQUE // RSA_flags returns the flags for |rsa|. These are a bitwise OR of |RSA_FLAG_*| // constants. OPENSSL_EXPORT int RSA_flags(const RSA *rsa); // RSA_test_flags returns the subset of flags in |flags| which are set in |rsa|. OPENSSL_EXPORT int RSA_test_flags(const RSA *rsa, int flags); // RSA_blinding_on returns one. OPENSSL_EXPORT int RSA_blinding_on(RSA *rsa, BN_CTX *ctx); // RSA_blinding_off does nothing. OPENSSL_EXPORT void RSA_blinding_off(RSA *rsa); // RSA_generate_key behaves like |RSA_generate_key_ex|, which is what you // should use instead. It returns NULL on error, or a newly-allocated |RSA| on // success. This function is provided for compatibility only. The |callback| // and |cb_arg| parameters must be NULL. OPENSSL_EXPORT RSA *RSA_generate_key(int bits, uint64_t e, void *callback, void *cb_arg); // d2i_RSAPublicKey parses a DER-encoded RSAPublicKey structure (RFC 8017) from // |len| bytes at |*inp|, as described in |d2i_SAMPLE|. // // Use |RSA_parse_public_key| instead. OPENSSL_EXPORT RSA *d2i_RSAPublicKey(RSA **out, const uint8_t **inp, long len); // i2d_RSAPublicKey marshals |in| to a DER-encoded RSAPublicKey structure (RFC // 8017), as described in |i2d_SAMPLE|. // // Use |RSA_marshal_public_key| instead. OPENSSL_EXPORT int i2d_RSAPublicKey(const RSA *in, uint8_t **outp); // d2i_RSAPrivateKey parses a DER-encoded RSAPrivateKey structure (RFC 8017) // from |len| bytes at |*inp|, as described in |d2i_SAMPLE|. // // Use |RSA_parse_private_key| instead. OPENSSL_EXPORT RSA *d2i_RSAPrivateKey(RSA **out, const uint8_t **inp, long len); // i2d_RSAPrivateKey marshals |in| to a DER-encoded RSAPrivateKey structure (RFC // 8017), as described in |i2d_SAMPLE|. // // Use |RSA_marshal_private_key| instead. OPENSSL_EXPORT int i2d_RSAPrivateKey(const RSA *in, uint8_t **outp); // RSA_padding_add_PKCS1_PSS acts like |RSA_padding_add_PKCS1_PSS_mgf1| but the // |mgf1Hash| parameter of the latter is implicitly set to |Hash|. // // This function implements only the low-level padding logic. Use // |RSA_sign_pss_mgf1| instead. OPENSSL_EXPORT int RSA_padding_add_PKCS1_PSS(const RSA *rsa, uint8_t *EM, const uint8_t *mHash, const EVP_MD *Hash, int sLen); // RSA_verify_PKCS1_PSS acts like |RSA_verify_PKCS1_PSS_mgf1| but the // |mgf1Hash| parameter of the latter is implicitly set to |Hash|. // // This function implements only the low-level padding logic. Use // |RSA_verify_pss_mgf1| instead. OPENSSL_EXPORT int RSA_verify_PKCS1_PSS(const RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const uint8_t *EM, int sLen); // RSA_padding_add_PKCS1_OAEP acts like |RSA_padding_add_PKCS1_OAEP_mgf1| but // the |md| and |mgf1md| parameters of the latter are implicitly set to NULL, // which means SHA-1. OPENSSL_EXPORT int RSA_padding_add_PKCS1_OAEP(uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len, const uint8_t *param, size_t param_len); // RSA_print prints a textual representation of |rsa| to |bio|. It returns one // on success or zero otherwise. OPENSSL_EXPORT int RSA_print(BIO *bio, const RSA *rsa, int indent); // RSA_get0_pss_params returns NULL. In OpenSSL, this function retries RSA-PSS // parameters associated with |RSA| objects, but BoringSSL does not support // the id-RSASSA-PSS key encoding. OPENSSL_EXPORT const RSA_PSS_PARAMS *RSA_get0_pss_params(const RSA *rsa); // RSA_new_method_no_e returns a newly-allocated |RSA| object backed by // |engine|, with a public modulus of |n| and no known public exponent. // // Do not use this function. It exists only to support Conscrypt, whose use // should be replaced with a more sound mechanism. See // https://crbug.com/boringssl/602. OPENSSL_EXPORT RSA *RSA_new_method_no_e(const ENGINE *engine, const BIGNUM *n); struct rsa_meth_st { struct openssl_method_common_st common; void *app_data; int (*init)(RSA *rsa); int (*finish)(RSA *rsa); int (*sign)(int type, const uint8_t *m, unsigned int m_length, uint8_t *sigret, unsigned int *siglen, const RSA *rsa); // These functions mirror the |RSA_*| functions of the same name. int (*sign_raw)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); int (*decrypt)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); // private_transform takes a big-endian integer from |in|, calculates the // d'th power of it, modulo the RSA modulus and writes the result as a // big-endian integer to |out|. Both |in| and |out| are |len| bytes long and // |len| is always equal to |RSA_size(rsa)|. If the result of the transform // can be represented in fewer than |len| bytes, then |out| must be zero // padded on the left. // // It returns one on success and zero otherwise. // // RSA decrypt and sign operations will call this, thus an ENGINE might wish // to override it in order to avoid having to implement the padding // functionality demanded by those, higher level, operations. int (*private_transform)(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); int flags; }; #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(RSA, RSA_free) BORINGSSL_MAKE_UP_REF(RSA, RSA_up_ref) BSSL_NAMESPACE_END } // extern C++ #endif #define RSA_R_BAD_ENCODING 100 #define RSA_R_BAD_E_VALUE 101 #define RSA_R_BAD_FIXED_HEADER_DECRYPT 102 #define RSA_R_BAD_PAD_BYTE_COUNT 103 #define RSA_R_BAD_RSA_PARAMETERS 104 #define RSA_R_BAD_SIGNATURE 105 #define RSA_R_BAD_VERSION 106 #define RSA_R_BLOCK_TYPE_IS_NOT_01 107 #define RSA_R_BN_NOT_INITIALIZED 108 #define RSA_R_CANNOT_RECOVER_MULTI_PRIME_KEY 109 #define RSA_R_CRT_PARAMS_ALREADY_GIVEN 110 #define RSA_R_CRT_VALUES_INCORRECT 111 #define RSA_R_DATA_LEN_NOT_EQUAL_TO_MOD_LEN 112 #define RSA_R_DATA_TOO_LARGE 113 #define RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE 114 #define RSA_R_DATA_TOO_LARGE_FOR_MODULUS 115 #define RSA_R_DATA_TOO_SMALL 116 #define RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE 117 #define RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY 118 #define RSA_R_D_E_NOT_CONGRUENT_TO_1 119 #define RSA_R_EMPTY_PUBLIC_KEY 120 #define RSA_R_ENCODE_ERROR 121 #define RSA_R_FIRST_OCTET_INVALID 122 #define RSA_R_INCONSISTENT_SET_OF_CRT_VALUES 123 #define RSA_R_INTERNAL_ERROR 124 #define RSA_R_INVALID_MESSAGE_LENGTH 125 #define RSA_R_KEY_SIZE_TOO_SMALL 126 #define RSA_R_LAST_OCTET_INVALID 127 #define RSA_R_MODULUS_TOO_LARGE 128 #define RSA_R_MUST_HAVE_AT_LEAST_TWO_PRIMES 129 #define RSA_R_NO_PUBLIC_EXPONENT 130 #define RSA_R_NULL_BEFORE_BLOCK_MISSING 131 #define RSA_R_N_NOT_EQUAL_P_Q 132 #define RSA_R_OAEP_DECODING_ERROR 133 #define RSA_R_ONLY_ONE_OF_P_Q_GIVEN 134 #define RSA_R_OUTPUT_BUFFER_TOO_SMALL 135 #define RSA_R_PADDING_CHECK_FAILED 136 #define RSA_R_PKCS_DECODING_ERROR 137 #define RSA_R_SLEN_CHECK_FAILED 138 #define RSA_R_SLEN_RECOVERY_FAILED 139 #define RSA_R_TOO_LONG 140 #define RSA_R_TOO_MANY_ITERATIONS 141 #define RSA_R_UNKNOWN_ALGORITHM_TYPE 142 #define RSA_R_UNKNOWN_PADDING_TYPE 143 #define RSA_R_VALUE_MISSING 144 #define RSA_R_WRONG_SIGNATURE_LENGTH 145 #define RSA_R_PUBLIC_KEY_VALIDATION_FAILED 146 #define RSA_R_D_OUT_OF_RANGE 147 #define RSA_R_BLOCK_TYPE_IS_NOT_02 148 #endif // OPENSSL_HEADER_RSA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_safestack.h ================================================ /* Copyright 2014 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_service_indicator.h ================================================ /* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SERVICE_INDICATOR_H #define OPENSSL_HEADER_SERVICE_INDICATOR_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // FIPS_service_indicator_before_call and |FIPS_service_indicator_after_call| // both currently return the same local thread counter which is slowly // incremented whenever approved services are called. The // |CALL_SERVICE_AND_CHECK_APPROVED| macro is strongly recommended over calling // these functions directly. // // |FIPS_service_indicator_before_call| is intended to be called immediately // before an approved service, while |FIPS_service_indicator_after_call| should // be called immediately after. If the values returned from these two functions // are not equal, this means that the service called inbetween is deemed to be // approved. If the values are still the same, this means the counter has not // been incremented, and the service called is not approved for FIPS. // // In non-FIPS builds, |FIPS_service_indicator_before_call| always returns zero // and |FIPS_service_indicator_after_call| always returns one. Thus calls always // appear to be approved. This is intended to simplify testing. OPENSSL_EXPORT uint64_t FIPS_service_indicator_before_call(void); OPENSSL_EXPORT uint64_t FIPS_service_indicator_after_call(void); #if defined(__cplusplus) } #if !defined(BORINGSSL_NO_CXX) extern "C++" { // CALL_SERVICE_AND_CHECK_APPROVED runs |func| and sets |approved| to one of the // |FIPSStatus*| values, above, depending on whether |func| invoked an // approved service. The result of |func| becomes the result of this macro. #define CALL_SERVICE_AND_CHECK_APPROVED(approved, func) \ [&] { \ bssl::FIPSIndicatorHelper fips_indicator_helper(&approved); \ return func; \ }() BSSL_NAMESPACE_BEGIN enum class FIPSStatus { NOT_APPROVED = 0, APPROVED = 1, }; // FIPSIndicatorHelper records whether the service indicator counter advanced // during its lifetime. class FIPSIndicatorHelper { public: FIPSIndicatorHelper(FIPSStatus *result) : result_(result), before_(FIPS_service_indicator_before_call()) { *result_ = FIPSStatus::NOT_APPROVED; } ~FIPSIndicatorHelper() { uint64_t after = FIPS_service_indicator_after_call(); if (after != before_) { *result_ = FIPSStatus::APPROVED; } } FIPSIndicatorHelper(const FIPSIndicatorHelper&) = delete; FIPSIndicatorHelper &operator=(const FIPSIndicatorHelper &) = delete; private: FIPSStatus *const result_; const uint64_t before_; }; BSSL_NAMESPACE_END } // extern "C++" #endif // !BORINGSSL_NO_CXX #endif // __cplusplus #endif // OPENSSL_HEADER_SERVICE_INDICATOR_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_sha.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_SHA_H #define OPENSSL_HEADER_SHA_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_bcm_public.h" // IWYU pragma: export #if defined(__cplusplus) extern "C" { #endif // The SHA family of hash functions (SHA-1 and SHA-2). // SHA_CBLOCK is the block size of SHA-1. #define SHA_CBLOCK 64 // SHA_DIGEST_LENGTH is the length of a SHA-1 digest. #define SHA_DIGEST_LENGTH 20 // SHA1_Init initialises |sha| and returns one. OPENSSL_EXPORT int SHA1_Init(SHA_CTX *sha); // SHA1_Update adds |len| bytes from |data| to |sha| and returns one. OPENSSL_EXPORT int SHA1_Update(SHA_CTX *sha, const void *data, size_t len); // SHA1_Final adds the final padding to |sha| and writes the resulting digest to // |out|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. It // returns one. OPENSSL_EXPORT int SHA1_Final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *sha); // SHA1 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]); // SHA1_Transform is a low-level function that performs a single, SHA-1 block // transformation using the state from |sha| and |SHA_CBLOCK| bytes from // |block|. OPENSSL_EXPORT void SHA1_Transform(SHA_CTX *sha, const uint8_t block[SHA_CBLOCK]); // CRYPTO_fips_186_2_prf derives |out_len| bytes from |xkey| using the PRF // defined in FIPS 186-2, Appendix 3.1, with change notice 1 applied. The b // parameter is 160 and seed, XKEY, is also 160 bits. The optional XSEED user // input is all zeros. // // The PRF generates a sequence of 320-bit numbers. Each number is encoded as a // 40-byte string in big-endian and then concatenated to form |out|. If // |out_len| is not a multiple of 40, the result is truncated. This matches the // construction used in Section 7 of RFC 4186 and Section 7 of RFC 4187. // // This PRF is based on SHA-1, a weak hash function, and should not be used // in new protocols. It is provided for compatibility with some legacy EAP // methods. OPENSSL_EXPORT void CRYPTO_fips_186_2_prf( uint8_t *out, size_t out_len, const uint8_t xkey[SHA_DIGEST_LENGTH]); // SHA-224. // SHA224_CBLOCK is the block size of SHA-224. #define SHA224_CBLOCK 64 // SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. #define SHA224_DIGEST_LENGTH 28 // SHA224_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA224_Init(SHA256_CTX *sha); // SHA224_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA224_Update(SHA256_CTX *sha, const void *data, size_t len); // SHA224_Final adds the final padding to |sha| and writes the resulting digest // to |out|, which must have at least |SHA224_DIGEST_LENGTH| bytes of space. It // returns 1. OPENSSL_EXPORT int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *sha); // SHA224 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA224_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t out[SHA224_DIGEST_LENGTH]); // SHA-256. // SHA256_CBLOCK is the block size of SHA-256. #define SHA256_CBLOCK 64 // SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. #define SHA256_DIGEST_LENGTH 32 // SHA256_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA256_Init(SHA256_CTX *sha); // SHA256_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA256_Update(SHA256_CTX *sha, const void *data, size_t len); // SHA256_Final adds the final padding to |sha| and writes the resulting digest // to |out|, which must have at least |SHA256_DIGEST_LENGTH| bytes of space. It // returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA256_Final(uint8_t out[SHA256_DIGEST_LENGTH], SHA256_CTX *sha); // SHA256 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA256_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t out[SHA256_DIGEST_LENGTH]); // SHA256_Transform is a low-level function that performs a single, SHA-256 // block transformation using the state from |sha| and |SHA256_CBLOCK| bytes // from |block|. OPENSSL_EXPORT void SHA256_Transform(SHA256_CTX *sha, const uint8_t block[SHA256_CBLOCK]); // SHA256_TransformBlocks is a low-level function that takes |num_blocks| * // |SHA256_CBLOCK| bytes of data and performs SHA-256 transforms on it to update // |state|. You should not use this function unless you are implementing a // derivative of SHA-256. OPENSSL_EXPORT void SHA256_TransformBlocks(uint32_t state[8], const uint8_t *data, size_t num_blocks); // SHA-384. // SHA384_CBLOCK is the block size of SHA-384. #define SHA384_CBLOCK 128 // SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. #define SHA384_DIGEST_LENGTH 48 // SHA384_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA384_Init(SHA512_CTX *sha); // SHA384_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len); // SHA384_Final adds the final padding to |sha| and writes the resulting digest // to |out|, which must have at least |SHA384_DIGEST_LENGTH| bytes of space. It // returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA384_Final(uint8_t out[SHA384_DIGEST_LENGTH], SHA512_CTX *sha); // SHA384 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA384_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t out[SHA384_DIGEST_LENGTH]); // SHA-512. // SHA512_CBLOCK is the block size of SHA-512. #define SHA512_CBLOCK 128 // SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. #define SHA512_DIGEST_LENGTH 64 // SHA512_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA512_Init(SHA512_CTX *sha); // SHA512_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA512_Update(SHA512_CTX *sha, const void *data, size_t len); // SHA512_Final adds the final padding to |sha| and writes the resulting digest // to |out|, which must have at least |SHA512_DIGEST_LENGTH| bytes of space. It // returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA512_Final(uint8_t out[SHA512_DIGEST_LENGTH], SHA512_CTX *sha); // SHA512 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA512_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t out[SHA512_DIGEST_LENGTH]); // SHA512_Transform is a low-level function that performs a single, SHA-512 // block transformation using the state from |sha| and |SHA512_CBLOCK| bytes // from |block|. OPENSSL_EXPORT void SHA512_Transform(SHA512_CTX *sha, const uint8_t block[SHA512_CBLOCK]); // SHA-512-256 // // See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf section 5.3.6 #define SHA512_256_DIGEST_LENGTH 32 // SHA512_256_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA512_256_Init(SHA512_CTX *sha); // SHA512_256_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA512_256_Update(SHA512_CTX *sha, const void *data, size_t len); // SHA512_256_Final adds the final padding to |sha| and writes the resulting // digest to |out|, which must have at least |SHA512_256_DIGEST_LENGTH| bytes of // space. It returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA512_256_Final(uint8_t out[SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha); // SHA512_256 writes the digest of |len| bytes from |data| to |out| and returns // |out|. There must be at least |SHA512_256_DIGEST_LENGTH| bytes of space in // |out|. OPENSSL_EXPORT uint8_t *SHA512_256(const uint8_t *data, size_t len, uint8_t out[SHA512_256_DIGEST_LENGTH]); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_SHA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_siphash.h ================================================ /* Copyright 2019 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SIPHASH_H #define OPENSSL_HEADER_SIPHASH_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // SipHash is a fast, secure PRF that is often used for hash tables. // SIPHASH_24 implements SipHash-2-4. See https://131002.net/siphash/siphash.pdf OPENSSL_EXPORT uint64_t SIPHASH_24(const uint64_t key[2], const uint8_t *input, size_t input_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_SIPHASH_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_slhdsa.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SLHDSA_H #define OPENSSL_HEADER_SLHDSA_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s public key. #define SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES 32 // SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s private key. #define SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES 64 // SLHDSA_SHA2_128S_SIGNATURE_BYTES is the number of bytes in an // SLH-DSA-SHA2-128s signature. #define SLHDSA_SHA2_128S_SIGNATURE_BYTES 7856 // SLHDSA_SHA2_128S_generate_key generates a SLH-DSA-SHA2-128s key pair and // writes the result to |out_public_key| and |out_private_key|. OPENSSL_EXPORT void SLHDSA_SHA2_128S_generate_key( uint8_t out_public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], uint8_t out_private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]); // SLHDSA_SHA2_128S_public_from_private writes the public key corresponding to // |private_key| to |out_public_key|. OPENSSL_EXPORT void SLHDSA_SHA2_128S_public_from_private( uint8_t out_public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES]); // SLHDSA_SHA2_128S_sign slowly generates a SLH-DSA-SHA2-128s signature of |msg| // using |private_key| and writes it to |out_signature|. The |context| argument // is also signed over and can be used to include implicit contextual // information that isn't included in |msg|. The same value of |context| must be // presented to |SLHDSA_SHA2_128S_verify| in order for the generated signature // to be considered valid. |context| and |context_len| may be |NULL| and 0 to // use an empty context (this is common). It returns 1 on success and 0 if // |context_len| is larger than 255. OPENSSL_EXPORT int SLHDSA_SHA2_128S_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); // SLHDSA_SHA2_128S_verify verifies that |signature| is a valid // SLH-DSA-SHA2-128s signature of |msg| by |public_key|. The value of |context| // must equal the value that was passed to |SLHDSA_SHA2_128S_sign| when the // signature was generated. It returns 1 if the signature is valid and 0 // otherwise. OPENSSL_EXPORT int SLHDSA_SHA2_128S_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *msg, size_t msg_len, const uint8_t *context, size_t context_len); // Prehashed SLH-DSA-SHA2-128s. // // These functions sign the hash of a message. They should generally not be // used. The general functions are perfectly capable of signing a hash if you // wish. These functions should only be used when: // // a) Compatibility with an external system that uses prehashed messages is // required. (The general signature of a hash is not compatible with a // "prehash" signature of the same hash.) // b) A single private key is used to sign both prehashed and raw messages, // and there's no other way to prevent ambiguity. // SLHDSA_SHA2_128S_prehash_sign slowly generates a SLH-DSA-SHA2-128s signature // of the prehashed |hashed_msg| using |private_key| and writes it to // |out_signature|. The |context| argument is also signed over and can be used // to include implicit contextual information that isn't included in // |hashed_msg|. The same value of |context| must be presented to // |SLHDSA_SHA2_128S_prehash_verify| in order for the generated signature to be // considered valid. |context| and |context_len| may be |NULL| and 0 to use an // empty context (this is common). // // The |hash_nid| argument must specify the hash function that was used to // generate |hashed_msg|. This function only accepts hash functions listed in // FIPS 205. // // This function returns 1 on success and 0 if |context_len| is larger than 255, // if the hash function is not supported, or if |hashed_msg| is the wrong // length. OPENSSL_EXPORT int SLHDSA_SHA2_128S_prehash_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); // SLHDSA_SHA2_128S_prehash_verify verifies that |signature| is a valid // SLH-DSA-SHA2-128s signature of the prehashed |hashed_msg| by |public_key|, // using the hash algorithm identified by |hash_nid|. The value of |context| // must equal the value that was passed to |SLHDSA_SHA2_128S_prehash_sign| when // the signature was generated. // // The |hash_nid| argument must specify the hash function that was used to // generate |hashed_msg|. This function only accepts hash functions that are // listed in FIPS 205. // // This function returns 1 if the signature is valid and 0 if the signature is // invalid, the hash function is not supported, or if |hashed_msg| is the wrong // length. OPENSSL_EXPORT int SLHDSA_SHA2_128S_prehash_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); // SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign slowly generates a // SLH-DSA-SHA2-128s signature of the prehashed |hashed_msg| using |private_key| // and writes it to |out_signature|. The |context| argument is also signed over // and can be used to include implicit contextual information that isn't // included in |hashed_msg|. The same value of |context| must be presented to // |SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify| in order for the // generated signature to be considered valid. |context| and |context_len| may // be |NULL| and 0 to use an empty context (this is common). // // The |hash_nid| argument must specify the hash function that was used to // generate |hashed_msg|. This function only accepts non-standard hash functions // that are not compliant with FIPS 205. // // This function returns 1 on success and 0 if |context_len| is larger than 255, // if the hash function is not supported, or if |hashed_msg| is the wrong // length. OPENSSL_EXPORT int SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign( uint8_t out_signature[SLHDSA_SHA2_128S_SIGNATURE_BYTES], const uint8_t private_key[SLHDSA_SHA2_128S_PRIVATE_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); // SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify verifies that |signature| // is a valid SLH-DSA-SHA2-128s signature of the prehashed |hashed_msg| by // |public_key|, using the hash algorithm identified by |hash_nid|. The value of // |context| must equal the value that was passed to // |SLHDSA_SHA2_128S_prehash_sign| when the signature was generated. // // The |hash_nid| argument must specify the hash function that was used to // generate |hashed_msg|. This function only accepts non-standard hash functions // that are not compliant with FIPS 205. // // This function returns 1 if the signature is valid and 0 if the signature is // invalid, the hash function is not supported, or if |hashed_msg| is the wrong // length. OPENSSL_EXPORT int SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify( const uint8_t *signature, size_t signature_len, const uint8_t public_key[SLHDSA_SHA2_128S_PUBLIC_KEY_BYTES], const uint8_t *hashed_msg, size_t hashed_msg_len, int hash_nid, const uint8_t *context, size_t context_len); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_SLHDSA_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_span.h ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_SSL_SPAN_H #define OPENSSL_HEADER_SSL_SPAN_H #include "CNIOBoringSSL_base.h" #if !defined(BORINGSSL_NO_CXX) extern "C++" { #include #include #include #include #if __has_include() #include #endif #if defined(__cpp_lib_ranges) && __cpp_lib_ranges >= 201911L #include BSSL_NAMESPACE_BEGIN template class Span; BSSL_NAMESPACE_END // Mark `Span` as satisfying the `view` and `borrowed_range` concepts. This // should be done before the definition of `Span`, so that any inlined calls to // range functionality use the correct specializations. template inline constexpr bool std::ranges::enable_view> = true; template inline constexpr bool std::ranges::enable_borrowed_range> = true; #endif BSSL_NAMESPACE_BEGIN template class Span; namespace internal { template class SpanBase { // Put comparison operator implementations into a base class with const T, so // they can be used with any type that implicitly converts into a Span. static_assert(std::is_const::value, "Span must be derived from SpanBase"); friend bool operator==(Span lhs, Span rhs) { return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); } friend bool operator!=(Span lhs, Span rhs) { return !(lhs == rhs); } }; // Heuristically test whether C is a container type that can be converted into // a Span by checking for data() and size() member functions. template using EnableIfContainer = std::enable_if_t< std::is_convertible_v().data()), T *> && std::is_integral_v().size())>>; } // namespace internal // A Span is a non-owning reference to a contiguous array of objects of type // |T|. Conceptually, a Span is a simple a pointer to |T| and a count of // elements accessible via that pointer. The elements referenced by the Span can // be mutated if |T| is mutable. // // A Span can be constructed from container types implementing |data()| and // |size()| methods. If |T| is constant, construction from a container type is // implicit. This allows writing methods that accept data from some unspecified // container type: // // // Foo views data referenced by v. // void Foo(bssl::Span v) { ... } // // std::vector vec; // Foo(vec); // // For mutable Spans, conversion is explicit: // // // FooMutate mutates data referenced by v. // void FooMutate(bssl::Span v) { ... } // // FooMutate(bssl::Span(vec)); // // You can also use C++17 class template argument deduction to construct Spans // in order to deduce the type of the Span automatically. // // FooMutate(bssl::Span(vec)); // // Note that Spans have value type sematics. They are cheap to construct and // copy, and should be passed by value whenever a method would otherwise accept // a reference or pointer to a container or array. template class Span : private internal::SpanBase { public: static const size_t npos = static_cast(-1); using element_type = T; using value_type = std::remove_cv_t; using size_type = size_t; using difference_type = ptrdiff_t; using pointer = T *; using const_pointer = const T *; using reference = T &; using const_reference = const T &; using iterator = T *; using const_iterator = const T *; constexpr Span() : Span(nullptr, 0) {} constexpr Span(T *ptr, size_t len) : data_(ptr), size_(len) {} template constexpr Span(T (&array)[N]) : Span(array, N) {} template , typename = std::enable_if_t::value, C>> constexpr Span(const C &container) : data_(container.data()), size_(container.size()) {} template , typename = std::enable_if_t::value, C>> constexpr explicit Span(C &container) : data_(container.data()), size_(container.size()) {} constexpr T *data() const { return data_; } constexpr size_t size() const { return size_; } constexpr bool empty() const { return size_ == 0; } constexpr iterator begin() const { return data_; } constexpr const_iterator cbegin() const { return data_; } constexpr iterator end() const { return data_ + size_; } constexpr const_iterator cend() const { return end(); } constexpr T &front() const { if (size_ == 0) { abort(); } return data_[0]; } constexpr T &back() const { if (size_ == 0) { abort(); } return data_[size_ - 1]; } constexpr T &operator[](size_t i) const { if (i >= size_) { abort(); } return data_[i]; } T &at(size_t i) const { return (*this)[i]; } constexpr Span subspan(size_t pos = 0, size_t len = npos) const { if (pos > size_) { // absl::Span throws an exception here. Note std::span and Chromium // base::span additionally forbid pos + len being out of range, with a // special case at npos/dynamic_extent, while absl::Span::subspan clips // the span. For now, we align with absl::Span in case we switch to it in // the future. abort(); } return Span(data_ + pos, std::min(size_ - pos, len)); } constexpr Span first(size_t len) const { if (len > size_) { abort(); } return Span(data_, len); } constexpr Span last(size_t len) const { if (len > size_) { abort(); } return Span(data_ + size_ - len, len); } private: T *data_; size_t size_; }; template const size_t Span::npos; template Span(T *, size_t) -> Span; template Span(T (&array)[size]) -> Span; template < typename C, typename T = std::remove_pointer_t().data())>, typename = internal::EnableIfContainer> Span(C &) -> Span; template constexpr Span MakeSpan(T *ptr, size_t size) { return Span(ptr, size); } template constexpr auto MakeSpan(C &c) -> decltype(MakeSpan(c.data(), c.size())) { return MakeSpan(c.data(), c.size()); } template constexpr Span MakeSpan(T (&array)[N]) { return Span(array, N); } template constexpr Span MakeConstSpan(T *ptr, size_t size) { return Span(ptr, size); } template constexpr auto MakeConstSpan(const C &c) -> decltype(MakeConstSpan(c.data(), c.size())) { return MakeConstSpan(c.data(), c.size()); } template constexpr Span MakeConstSpan(T (&array)[size]) { return array; } inline Span StringAsBytes(std::string_view s) { return MakeConstSpan(reinterpret_cast(s.data()), s.size()); } inline std::string_view BytesAsStringView(bssl::Span b) { return std::string_view(reinterpret_cast(b.data()), b.size()); } BSSL_NAMESPACE_END } // extern C++ #endif // !defined(BORINGSSL_NO_CXX) #endif // OPENSSL_HEADER_SSL_SPAN_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_srtp.h ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_ssl.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ssl.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_SSL_H #define OPENSSL_HEADER_SSL_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_buf.h" #include "CNIOBoringSSL_pem.h" #include "CNIOBoringSSL_span.h" #include "CNIOBoringSSL_ssl3.h" #include "CNIOBoringSSL_thread.h" #include "CNIOBoringSSL_tls1.h" #include "CNIOBoringSSL_x509.h" #if !defined(OPENSSL_WINDOWS) #include #endif // Forward-declare struct timeval. On Windows, it is defined in winsock2.h and // Windows headers define too many macros to be included in public headers. // However, only a forward declaration is needed. struct timeval; #if defined(__cplusplus) extern "C" { #endif // SSL implementation. // SSL contexts. // // |SSL_CTX| objects manage shared state and configuration between multiple TLS // or DTLS connections. Whether the connections are TLS or DTLS is selected by // an |SSL_METHOD| on creation. // // |SSL_CTX| are reference-counted and may be shared by connections across // multiple threads. Once shared, functions which change the |SSL_CTX|'s // configuration may not be used. // TLS_method is the |SSL_METHOD| used for TLS connections. OPENSSL_EXPORT const SSL_METHOD *TLS_method(void); // DTLS_method is the |SSL_METHOD| used for DTLS connections. OPENSSL_EXPORT const SSL_METHOD *DTLS_method(void); // TLS_with_buffers_method is like |TLS_method|, but avoids all use of // crypto/x509. All client connections created with |TLS_with_buffers_method| // will fail unless a certificate verifier is installed with // |SSL_set_custom_verify| or |SSL_CTX_set_custom_verify|. OPENSSL_EXPORT const SSL_METHOD *TLS_with_buffers_method(void); // DTLS_with_buffers_method is like |DTLS_method|, but avoids all use of // crypto/x509. OPENSSL_EXPORT const SSL_METHOD *DTLS_with_buffers_method(void); // SSL_CTX_new returns a newly-allocated |SSL_CTX| with default settings or NULL // on error. OPENSSL_EXPORT SSL_CTX *SSL_CTX_new(const SSL_METHOD *method); // SSL_CTX_up_ref increments the reference count of |ctx|. It returns one. OPENSSL_EXPORT int SSL_CTX_up_ref(SSL_CTX *ctx); // SSL_CTX_free releases memory associated with |ctx|. OPENSSL_EXPORT void SSL_CTX_free(SSL_CTX *ctx); // SSL connections. // // An |SSL| object represents a single TLS or DTLS connection. Although the // shared |SSL_CTX| is thread-safe, an |SSL| is not thread-safe and may only be // used on one thread at a time. // SSL_new returns a newly-allocated |SSL| using |ctx| or NULL on error. The new // connection inherits settings from |ctx| at the time of creation. Settings may // also be individually configured on the connection. // // On creation, an |SSL| is not configured to be either a client or server. Call // |SSL_set_connect_state| or |SSL_set_accept_state| to set this. OPENSSL_EXPORT SSL *SSL_new(SSL_CTX *ctx); // SSL_free releases memory associated with |ssl|. OPENSSL_EXPORT void SSL_free(SSL *ssl); // SSL_get_SSL_CTX returns the |SSL_CTX| associated with |ssl|. If // |SSL_set_SSL_CTX| is called, it returns the new |SSL_CTX|, not the initial // one. OPENSSL_EXPORT SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl); // SSL_set_connect_state configures |ssl| to be a client. OPENSSL_EXPORT void SSL_set_connect_state(SSL *ssl); // SSL_set_accept_state configures |ssl| to be a server. OPENSSL_EXPORT void SSL_set_accept_state(SSL *ssl); // SSL_is_server returns one if |ssl| is configured as a server and zero // otherwise. OPENSSL_EXPORT int SSL_is_server(const SSL *ssl); // SSL_is_dtls returns one if |ssl| is a DTLS connection and zero otherwise. OPENSSL_EXPORT int SSL_is_dtls(const SSL *ssl); // SSL_is_quic returns one if |ssl| is a QUIC connection and zero otherwise. OPENSSL_EXPORT int SSL_is_quic(const SSL *ssl); // SSL_set_bio configures |ssl| to read from |rbio| and write to |wbio|. |ssl| // takes ownership of the two |BIO|s. If |rbio| and |wbio| are the same, |ssl| // only takes ownership of one reference. See |SSL_set0_rbio| and // |SSL_set0_wbio| for requirements on |rbio| and |wbio|, respectively. // // If |rbio| is the same as the currently configured |BIO| for reading, that // side is left untouched and is not freed. // // If |wbio| is the same as the currently configured |BIO| for writing AND |ssl| // is not currently configured to read from and write to the same |BIO|, that // side is left untouched and is not freed. This asymmetry is present for // historical reasons. // // Due to the very complex historical behavior of this function, calling this // function if |ssl| already has |BIO|s configured is deprecated. Prefer // |SSL_set0_rbio| and |SSL_set0_wbio| instead. OPENSSL_EXPORT void SSL_set_bio(SSL *ssl, BIO *rbio, BIO *wbio); // SSL_set0_rbio configures |ssl| to read from |rbio|. It takes ownership of // |rbio|. |rbio| may be a custom |BIO|, in which case it must implement // |BIO_read| with |BIO_meth_set_read|. In DTLS, |rbio| must be non-blocking to // properly handle timeouts and retransmits. // // Note that, although this function and |SSL_set0_wbio| may be called on the // same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. OPENSSL_EXPORT void SSL_set0_rbio(SSL *ssl, BIO *rbio); // SSL_set0_wbio configures |ssl| to write to |wbio|. It takes ownership of // |wbio|. |wbio| may be a custom |BIO|, in which case it must implement // |BIO_write| with |BIO_meth_set_write|. It must additionally implement // |BIO_flush| with |BIO_meth_set_ctrl| and |BIO_CTRL_FLUSH|. If flushing is // unnecessary with |wbio|, |BIO_flush| should return one and do nothing. // // Note that, although this function and |SSL_set0_rbio| may be called on the // same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. OPENSSL_EXPORT void SSL_set0_wbio(SSL *ssl, BIO *wbio); // SSL_get_rbio returns the |BIO| that |ssl| reads from. OPENSSL_EXPORT BIO *SSL_get_rbio(const SSL *ssl); // SSL_get_wbio returns the |BIO| that |ssl| writes to. OPENSSL_EXPORT BIO *SSL_get_wbio(const SSL *ssl); // SSL_get_fd calls |SSL_get_rfd|. OPENSSL_EXPORT int SSL_get_fd(const SSL *ssl); // SSL_get_rfd returns the file descriptor that |ssl| is configured to read // from. If |ssl|'s read |BIO| is not configured or doesn't wrap a file // descriptor then it returns -1. // // Note: On Windows, this may return either a file descriptor or a socket (cast // to int), depending on whether |ssl| was configured with a file descriptor or // socket |BIO|. OPENSSL_EXPORT int SSL_get_rfd(const SSL *ssl); // SSL_get_wfd returns the file descriptor that |ssl| is configured to write // to. If |ssl|'s write |BIO| is not configured or doesn't wrap a file // descriptor then it returns -1. // // Note: On Windows, this may return either a file descriptor or a socket (cast // to int), depending on whether |ssl| was configured with a file descriptor or // socket |BIO|. OPENSSL_EXPORT int SSL_get_wfd(const SSL *ssl); #if !defined(OPENSSL_NO_SOCK) // SSL_set_fd configures |ssl| to read from and write to |fd|. It returns one // on success and zero on allocation error. The caller retains ownership of // |fd|. // // On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_fd(SSL *ssl, int fd); // SSL_set_rfd configures |ssl| to read from |fd|. It returns one on success and // zero on allocation error. The caller retains ownership of |fd|. // // On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_rfd(SSL *ssl, int fd); // SSL_set_wfd configures |ssl| to write to |fd|. It returns one on success and // zero on allocation error. The caller retains ownership of |fd|. // // On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_wfd(SSL *ssl, int fd); #endif // !OPENSSL_NO_SOCK // SSL_do_handshake continues the current handshake. If there is none or the // handshake has completed or False Started, it returns one. Otherwise, it // returns <= 0. The caller should pass the value into |SSL_get_error| to // determine how to proceed. // // In DTLS, the caller must drive retransmissions and timeouts. After calling // this function, the caller must use |DTLSv1_get_timeout| to determine the // current timeout, if any. If it expires before the application next calls into // |ssl|, call |DTLSv1_handle_timeout|. Note that DTLS handshake retransmissions // use fresh sequence numbers, so it is not sufficient to replay packets at the // transport. // // After the DTLS handshake, some retransmissions may remain. If |ssl| wrote // last in the handshake, it may need to retransmit the final flight in case of // packet loss. Additionally, in DTLS 1.3, it may need to retransmit // post-handshake messages. To handle these, the caller must always be prepared // to receive packets and process them with |SSL_read|, even when the // application protocol would otherwise not read from the connection. // // TODO(davidben): Ensure 0 is only returned on transport EOF. // https://crbug.com/466303. OPENSSL_EXPORT int SSL_do_handshake(SSL *ssl); // SSL_connect configures |ssl| as a client, if unconfigured, and calls // |SSL_do_handshake|. OPENSSL_EXPORT int SSL_connect(SSL *ssl); // SSL_accept configures |ssl| as a server, if unconfigured, and calls // |SSL_do_handshake|. OPENSSL_EXPORT int SSL_accept(SSL *ssl); // SSL_read reads up to |num| bytes from |ssl| into |buf|. It implicitly runs // any pending handshakes, including renegotiations when enabled. On success, it // returns the number of bytes read. Otherwise, it returns <= 0. The caller // should pass the value into |SSL_get_error| to determine how to proceed. // // In DTLS 1.3, the caller must also drive timeouts from retransmitting the // final flight of the handshake, as well as post-handshake messages. After // calling this function, the caller must use |DTLSv1_get_timeout| to determine // the current timeout, if any. If it expires before the application next calls // into |ssl|, call |DTLSv1_handle_timeout|. // // TODO(davidben): Ensure 0 is only returned on transport EOF. // https://crbug.com/466303. OPENSSL_EXPORT int SSL_read(SSL *ssl, void *buf, int num); // SSL_peek behaves like |SSL_read| but does not consume any bytes returned. OPENSSL_EXPORT int SSL_peek(SSL *ssl, void *buf, int num); // SSL_pending returns the number of buffered, decrypted bytes available for // read in |ssl|. It does not read from the transport. // // In DTLS, it is possible for this function to return zero while there is // buffered, undecrypted data from the transport in |ssl|. For example, // |SSL_read| may read a datagram with two records, decrypt the first, and leave // the second buffered for a subsequent call to |SSL_read|. Callers that wish to // detect this case can use |SSL_has_pending|. OPENSSL_EXPORT int SSL_pending(const SSL *ssl); // SSL_has_pending returns one if |ssl| has buffered, decrypted bytes available // for read, or if |ssl| has buffered data from the transport that has not yet // been decrypted. If |ssl| has neither, this function returns zero. // // In TLS, BoringSSL does not implement read-ahead, so this function returns one // if and only if |SSL_pending| would return a non-zero value. In DTLS, it is // possible for this function to return one while |SSL_pending| returns zero. // For example, |SSL_read| may read a datagram with two records, decrypt the // first, and leave the second buffered for a subsequent call to |SSL_read|. // // As a result, if this function returns one, the next call to |SSL_read| may // still fail, read from the transport, or both. The buffered, undecrypted data // may be invalid or incomplete. OPENSSL_EXPORT int SSL_has_pending(const SSL *ssl); // SSL_write writes up to |num| bytes from |buf| into |ssl|. It implicitly runs // any pending handshakes, including renegotiations when enabled. On success, it // returns the number of bytes written. Otherwise, it returns <= 0. The caller // should pass the value into |SSL_get_error| to determine how to proceed. // // In TLS, a non-blocking |SSL_write| differs from non-blocking |write| in that // a failed |SSL_write| still commits to the data passed in. When retrying, the // caller must supply the original write buffer (or a larger one containing the // original as a prefix). By default, retries will fail if they also do not // reuse the same |buf| pointer. This may be relaxed with // |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER|, but the buffer contents still must be // unchanged. // // By default, in TLS, |SSL_write| will not return success until all |num| bytes // are written. This may be relaxed with |SSL_MODE_ENABLE_PARTIAL_WRITE|. It // allows |SSL_write| to complete with a partial result when only part of the // input was written in a single record. // // In DTLS, neither |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER| and // |SSL_MODE_ENABLE_PARTIAL_WRITE| do anything. The caller may retry with a // different buffer freely. A single call to |SSL_write| only ever writes a // single record in a single packet, so |num| must be at most // |SSL3_RT_MAX_PLAIN_LENGTH|. // // TODO(davidben): Ensure 0 is only returned on transport EOF. // https://crbug.com/466303. OPENSSL_EXPORT int SSL_write(SSL *ssl, const void *buf, int num); // SSL_KEY_UPDATE_REQUESTED indicates that the peer should reply to a KeyUpdate // message with its own, thus updating traffic secrets for both directions on // the connection. #define SSL_KEY_UPDATE_REQUESTED 1 // SSL_KEY_UPDATE_NOT_REQUESTED indicates that the peer should not reply with // it's own KeyUpdate message. #define SSL_KEY_UPDATE_NOT_REQUESTED 0 // SSL_key_update queues a TLS 1.3 KeyUpdate message to be sent on |ssl| // if one is not already queued. The |request_type| argument must one of the // |SSL_KEY_UPDATE_*| values. This function requires that |ssl| have completed a // TLS >= 1.3 handshake. It returns one on success or zero on error. // // Note that this function does not _send_ the message itself. The next call to // |SSL_write| will cause the message to be sent. |SSL_write| may be called with // a zero length to flush a KeyUpdate message when no application data is // pending. OPENSSL_EXPORT int SSL_key_update(SSL *ssl, int request_type); // SSL_shutdown shuts down |ssl|. It runs in two stages. First, it sends // close_notify and returns zero or one on success or -1 on failure. Zero // indicates that close_notify was sent, but not received, and one additionally // indicates that the peer's close_notify had already been received. // // To then wait for the peer's close_notify, run |SSL_shutdown| to completion a // second time. This returns 1 on success and -1 on failure. Application data // is considered a fatal error at this point. To process or discard it, read // until close_notify with |SSL_read| instead. // // In both cases, on failure, pass the return value into |SSL_get_error| to // determine how to proceed. // // Most callers should stop at the first stage. Reading for close_notify is // primarily used for uncommon protocols where the underlying transport is // reused after TLS completes. Additionally, DTLS uses an unordered transport // and is unordered, so the second stage is a no-op in DTLS. OPENSSL_EXPORT int SSL_shutdown(SSL *ssl); // SSL_CTX_set_quiet_shutdown sets quiet shutdown on |ctx| to |mode|. If // enabled, |SSL_shutdown| will not send a close_notify alert or wait for one // from the peer. It will instead synchronously return one. OPENSSL_EXPORT void SSL_CTX_set_quiet_shutdown(SSL_CTX *ctx, int mode); // SSL_CTX_get_quiet_shutdown returns whether quiet shutdown is enabled for // |ctx|. OPENSSL_EXPORT int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx); // SSL_set_quiet_shutdown sets quiet shutdown on |ssl| to |mode|. If enabled, // |SSL_shutdown| will not send a close_notify alert or wait for one from the // peer. It will instead synchronously return one. OPENSSL_EXPORT void SSL_set_quiet_shutdown(SSL *ssl, int mode); // SSL_get_quiet_shutdown returns whether quiet shutdown is enabled for // |ssl|. OPENSSL_EXPORT int SSL_get_quiet_shutdown(const SSL *ssl); // SSL_get_error returns a |SSL_ERROR_*| value for the most recent operation on // |ssl|. It should be called after an operation failed to determine whether the // error was fatal and, if not, when to retry. OPENSSL_EXPORT int SSL_get_error(const SSL *ssl, int ret_code); // SSL_ERROR_NONE indicates the operation succeeded. #define SSL_ERROR_NONE 0 // SSL_ERROR_SSL indicates the operation failed within the library. The caller // may inspect the error queue (see |ERR_get_error|) for more information. #define SSL_ERROR_SSL 1 // SSL_ERROR_WANT_READ indicates the operation failed attempting to read from // the transport. The caller may retry the operation when the transport is ready // for reading. #define SSL_ERROR_WANT_READ 2 // SSL_ERROR_WANT_WRITE indicates the operation failed attempting to write to // the transport. The caller may retry the operation when the transport is ready // for writing. #define SSL_ERROR_WANT_WRITE 3 // SSL_ERROR_WANT_X509_LOOKUP indicates the operation failed in calling the // |cert_cb| or |client_cert_cb|. The caller may retry the operation when the // callback is ready to return a certificate or one has been configured // externally. // // See also |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb|. #define SSL_ERROR_WANT_X509_LOOKUP 4 // SSL_ERROR_SYSCALL indicates the operation failed externally to the library. // The caller should consult the system-specific error mechanism. This is // typically |errno| but may be something custom if using a custom |BIO|. It // may also be signaled if the transport returned EOF, in which case the // operation's return value will be zero. #define SSL_ERROR_SYSCALL 5 // SSL_ERROR_ZERO_RETURN indicates the operation failed because the connection // was cleanly shut down with a close_notify alert. #define SSL_ERROR_ZERO_RETURN 6 // SSL_ERROR_WANT_CONNECT indicates the operation failed attempting to connect // the transport (the |BIO| signaled |BIO_RR_CONNECT|). The caller may retry the // operation when the transport is ready. #define SSL_ERROR_WANT_CONNECT 7 // SSL_ERROR_WANT_ACCEPT indicates the operation failed attempting to accept a // connection from the transport (the |BIO| signaled |BIO_RR_ACCEPT|). The // caller may retry the operation when the transport is ready. // // TODO(davidben): Remove this. It's used by accept BIOs which are bizarre. #define SSL_ERROR_WANT_ACCEPT 8 // SSL_ERROR_WANT_CHANNEL_ID_LOOKUP is never used. // // TODO(davidben): Remove this. Some callers reference it when stringifying // errors. They should use |SSL_error_description| instead. #define SSL_ERROR_WANT_CHANNEL_ID_LOOKUP 9 // SSL_ERROR_PENDING_SESSION indicates the operation failed because the session // lookup callback indicated the session was unavailable. The caller may retry // the operation when lookup has completed. // // See also |SSL_CTX_sess_set_get_cb| and |SSL_magic_pending_session_ptr|. #define SSL_ERROR_PENDING_SESSION 11 // SSL_ERROR_PENDING_CERTIFICATE indicates the operation failed because the // early callback indicated certificate lookup was incomplete. The caller may // retry the operation when lookup has completed. // // See also |SSL_CTX_set_select_certificate_cb|. #define SSL_ERROR_PENDING_CERTIFICATE 12 // SSL_ERROR_WANT_PRIVATE_KEY_OPERATION indicates the operation failed because // a private key operation was unfinished. The caller may retry the operation // when the private key operation is complete. // // See also |SSL_set_private_key_method|, |SSL_CTX_set_private_key_method|, and // |SSL_CREDENTIAL_set_private_key_method|. #define SSL_ERROR_WANT_PRIVATE_KEY_OPERATION 13 // SSL_ERROR_PENDING_TICKET indicates that a ticket decryption is pending. The // caller may retry the operation when the decryption is ready. // // See also |SSL_CTX_set_ticket_aead_method|. #define SSL_ERROR_PENDING_TICKET 14 // SSL_ERROR_EARLY_DATA_REJECTED indicates that early data was rejected. The // caller should treat this as a connection failure and retry any operations // associated with the rejected early data. |SSL_reset_early_data_reject| may be // used to reuse the underlying connection for the retry. #define SSL_ERROR_EARLY_DATA_REJECTED 15 // SSL_ERROR_WANT_CERTIFICATE_VERIFY indicates the operation failed because // certificate verification was incomplete. The caller may retry the operation // when certificate verification is complete. // // See also |SSL_CTX_set_custom_verify|. #define SSL_ERROR_WANT_CERTIFICATE_VERIFY 16 #define SSL_ERROR_HANDOFF 17 #define SSL_ERROR_HANDBACK 18 // SSL_ERROR_WANT_RENEGOTIATE indicates the operation is pending a response to // a renegotiation request from the server. The caller may call // |SSL_renegotiate| to schedule a renegotiation and retry the operation. // // See also |ssl_renegotiate_explicit|. #define SSL_ERROR_WANT_RENEGOTIATE 19 // SSL_ERROR_HANDSHAKE_HINTS_READY indicates the handshake has progressed enough // for |SSL_serialize_handshake_hints| to be called. See also // |SSL_request_handshake_hints|. #define SSL_ERROR_HANDSHAKE_HINTS_READY 20 // SSL_error_description returns a string representation of |err|, where |err| // is one of the |SSL_ERROR_*| constants returned by |SSL_get_error|, or NULL // if the value is unrecognized. OPENSSL_EXPORT const char *SSL_error_description(int err); // SSL_set_mtu sets the |ssl|'s MTU in DTLS to |mtu|. It returns one on success // and zero on failure. OPENSSL_EXPORT int SSL_set_mtu(SSL *ssl, unsigned mtu); // DTLSv1_set_initial_timeout_duration sets the initial duration for a DTLS // handshake timeout. // // This duration overrides the default of 400 milliseconds, which is // recommendation of RFC 9147 for real-time protocols. OPENSSL_EXPORT void DTLSv1_set_initial_timeout_duration(SSL *ssl, uint32_t duration_ms); // DTLSv1_get_timeout queries the running DTLS timers. If there are any in // progress, it sets |*out| to the time remaining until the first timer expires // and returns one. Otherwise, it returns zero. Timers may be scheduled both // during and after the handshake. // // When the timeout expires, call |DTLSv1_handle_timeout| to handle the // retransmit behavior. // // NOTE: This function must be queried again whenever the state machine changes, // including when |DTLSv1_handle_timeout| is called. OPENSSL_EXPORT int DTLSv1_get_timeout(const SSL *ssl, struct timeval *out); // DTLSv1_handle_timeout is called when a DTLS timeout expires. If no timeout // had expired, it returns 0. Otherwise, it handles the timeout and returns 1 on // success or -1 on error. // // This function may write to the transport (e.g. to retransmit messages) or // update |ssl|'s internal state and schedule an updated timer. // // The caller's external timer should be compatible with the one |ssl| queries // within some fudge factor. Otherwise, the call will be a no-op, but // |DTLSv1_get_timeout| will return an updated timeout. // // If the function returns -1, checking if |SSL_get_error| returns // |SSL_ERROR_WANT_WRITE| may be used to determine if the retransmit failed due // to a non-fatal error at the write |BIO|. In this case, when the |BIO| is // writable, the operation may be retried by calling the original function, // |SSL_do_handshake| or |SSL_read|. // // WARNING: This function breaks the usual return value convention. // // TODO(davidben): We can make this function entirely optional by just checking // the timers in |SSL_do_handshake| or |SSL_read|. Then timers behave like any // other retry condition: rerun the operation and the library will make what // progress it can. OPENSSL_EXPORT int DTLSv1_handle_timeout(SSL *ssl); // Protocol versions. #define DTLS1_VERSION_MAJOR 0xfe #define SSL3_VERSION_MAJOR 0x03 #define SSL3_VERSION 0x0300 #define TLS1_VERSION 0x0301 #define TLS1_1_VERSION 0x0302 #define TLS1_2_VERSION 0x0303 #define TLS1_3_VERSION 0x0304 #define DTLS1_VERSION 0xfeff #define DTLS1_2_VERSION 0xfefd #define DTLS1_3_VERSION 0xfefc // SSL_CTX_set_min_proto_version sets the minimum protocol version for |ctx| to // |version|. If |version| is zero, the default minimum version is used. It // returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version); // SSL_CTX_set_max_proto_version sets the maximum protocol version for |ctx| to // |version|. If |version| is zero, the default maximum version is used. It // returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version); // SSL_CTX_get_min_proto_version returns the minimum protocol version for |ctx| OPENSSL_EXPORT uint16_t SSL_CTX_get_min_proto_version(const SSL_CTX *ctx); // SSL_CTX_get_max_proto_version returns the maximum protocol version for |ctx| OPENSSL_EXPORT uint16_t SSL_CTX_get_max_proto_version(const SSL_CTX *ctx); // SSL_set_min_proto_version sets the minimum protocol version for |ssl| to // |version|. If |version| is zero, the default minimum version is used. It // returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_set_min_proto_version(SSL *ssl, uint16_t version); // SSL_set_max_proto_version sets the maximum protocol version for |ssl| to // |version|. If |version| is zero, the default maximum version is used. It // returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_set_max_proto_version(SSL *ssl, uint16_t version); // SSL_get_min_proto_version returns the minimum protocol version for |ssl|. If // the connection's configuration has been shed, 0 is returned. OPENSSL_EXPORT uint16_t SSL_get_min_proto_version(const SSL *ssl); // SSL_get_max_proto_version returns the maximum protocol version for |ssl|. If // the connection's configuration has been shed, 0 is returned. OPENSSL_EXPORT uint16_t SSL_get_max_proto_version(const SSL *ssl); // SSL_version returns the TLS or DTLS protocol version used by |ssl|, which is // one of the |*_VERSION| values. (E.g. |TLS1_2_VERSION|.) Before the version // is negotiated, the result is undefined. OPENSSL_EXPORT int SSL_version(const SSL *ssl); // Options. // // Options configure protocol behavior. // SSL_OP_NO_QUERY_MTU, in DTLS, disables querying the MTU from the underlying // |BIO|. Instead, the MTU is configured with |SSL_set_mtu|. #define SSL_OP_NO_QUERY_MTU 0x00001000L // SSL_OP_NO_TICKET disables session ticket support (RFC 5077). #define SSL_OP_NO_TICKET 0x00004000L // SSL_OP_CIPHER_SERVER_PREFERENCE configures servers to select ciphers and // ECDHE curves according to the server's preferences instead of the // client's. #define SSL_OP_CIPHER_SERVER_PREFERENCE 0x00400000L // The following flags toggle individual protocol versions. This is deprecated. // Use |SSL_CTX_set_min_proto_version| and |SSL_CTX_set_max_proto_version| // instead. #define SSL_OP_NO_TLSv1 0x04000000L #define SSL_OP_NO_TLSv1_2 0x08000000L #define SSL_OP_NO_TLSv1_1 0x10000000L #define SSL_OP_NO_TLSv1_3 0x20000000L #define SSL_OP_NO_DTLSv1 SSL_OP_NO_TLSv1 #define SSL_OP_NO_DTLSv1_2 SSL_OP_NO_TLSv1_2 // SSL_CTX_set_options enables all options set in |options| (which should be one // or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a // bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_CTX_set_options(SSL_CTX *ctx, uint32_t options); // SSL_CTX_clear_options disables all options set in |options| (which should be // one or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a // bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_CTX_clear_options(SSL_CTX *ctx, uint32_t options); // SSL_CTX_get_options returns a bitmask of |SSL_OP_*| values that represent all // the options enabled for |ctx|. OPENSSL_EXPORT uint32_t SSL_CTX_get_options(const SSL_CTX *ctx); // SSL_set_options enables all options set in |options| (which should be one or // more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a bitmask // representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_set_options(SSL *ssl, uint32_t options); // SSL_clear_options disables all options set in |options| (which should be one // or more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a // bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_clear_options(SSL *ssl, uint32_t options); // SSL_get_options returns a bitmask of |SSL_OP_*| values that represent all the // options enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_get_options(const SSL *ssl); // Modes. // // Modes configure API behavior. // SSL_MODE_ENABLE_PARTIAL_WRITE, in TLS, allows |SSL_write| to complete with a // partial result when the only part of the input was written in a single // record. In DTLS, it does nothing. #define SSL_MODE_ENABLE_PARTIAL_WRITE 0x00000001L // SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, in TLS, allows retrying an incomplete // |SSL_write| with a different buffer. However, |SSL_write| still assumes the // buffer contents are unchanged. This is not the default to avoid the // misconception that non-blocking |SSL_write| behaves like non-blocking // |write|. In DTLS, it does nothing. #define SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER 0x00000002L // SSL_MODE_NO_AUTO_CHAIN disables automatically building a certificate chain // before sending certificates to the peer. This flag is set (and the feature // disabled) by default. // TODO(davidben): Remove this behavior. https://crbug.com/boringssl/42. #define SSL_MODE_NO_AUTO_CHAIN 0x00000008L // SSL_MODE_ENABLE_FALSE_START allows clients to send application data before // receipt of ChangeCipherSpec and Finished. This mode enables full handshakes // to 'complete' in one RTT. See RFC 7918. // // When False Start is enabled, |SSL_do_handshake| may succeed before the // handshake has completely finished. |SSL_write| will function at this point, // and |SSL_read| will transparently wait for the final handshake leg before // returning application data. To determine if False Start occurred or when the // handshake is completely finished, see |SSL_in_false_start|, |SSL_in_init|, // and |SSL_CB_HANDSHAKE_DONE| from |SSL_CTX_set_info_callback|. #define SSL_MODE_ENABLE_FALSE_START 0x00000080L // SSL_MODE_CBC_RECORD_SPLITTING causes multi-byte CBC records in TLS 1.0 to be // split in two: the first record will contain a single byte and the second will // contain the remainder. This effectively randomises the IV and prevents BEAST // attacks. #define SSL_MODE_CBC_RECORD_SPLITTING 0x00000100L // SSL_MODE_NO_SESSION_CREATION will cause any attempts to create a session to // fail with SSL_R_SESSION_MAY_NOT_BE_CREATED. This can be used to enforce that // session resumption is used for a given SSL*. #define SSL_MODE_NO_SESSION_CREATION 0x00000200L // SSL_MODE_SEND_FALLBACK_SCSV sends TLS_FALLBACK_SCSV in the ClientHello. // To be set only by applications that reconnect with a downgraded protocol // version; see RFC 7507 for details. // // DO NOT ENABLE THIS if your application attempts a normal handshake. Only use // this in explicit fallback retries, following the guidance in RFC 7507. #define SSL_MODE_SEND_FALLBACK_SCSV 0x00000400L // SSL_CTX_set_mode enables all modes set in |mode| (which should be one or more // of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a bitmask // representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_CTX_set_mode(SSL_CTX *ctx, uint32_t mode); // SSL_CTX_clear_mode disables all modes set in |mode| (which should be one or // more of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a // bitmask representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_CTX_clear_mode(SSL_CTX *ctx, uint32_t mode); // SSL_CTX_get_mode returns a bitmask of |SSL_MODE_*| values that represent all // the modes enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_CTX_get_mode(const SSL_CTX *ctx); // SSL_set_mode enables all modes set in |mode| (which should be one or more of // the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask // representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_set_mode(SSL *ssl, uint32_t mode); // SSL_clear_mode disables all modes set in |mode| (which should be one or more // of the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask // representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_clear_mode(SSL *ssl, uint32_t mode); // SSL_get_mode returns a bitmask of |SSL_MODE_*| values that represent all the // modes enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_get_mode(const SSL *ssl); // SSL_CTX_set0_buffer_pool sets a |CRYPTO_BUFFER_POOL| that will be used to // store certificates. This can allow multiple connections to share // certificates and thus save memory. // // The SSL_CTX does not take ownership of |pool| and the caller must ensure // that |pool| outlives |ctx| and all objects linked to it, including |SSL|, // |X509| and |SSL_SESSION| objects. Basically, don't ever free |pool|. OPENSSL_EXPORT void SSL_CTX_set0_buffer_pool(SSL_CTX *ctx, CRYPTO_BUFFER_POOL *pool); // Credentials. // // TLS endpoints may present authentication during the handshake, usually using // X.509 certificates. This is typically required for servers and optional for // clients. BoringSSL uses the |SSL_CREDENTIAL| object to abstract between // different kinds of credentials, as well as configure automatic selection // between multiple credentials. This may be used to select between ECDSA and // RSA certificates. // // |SSL_CTX| and |SSL| objects maintain lists of credentials in preference // order. During the handshake, BoringSSL will select the first usable // credential from the list. Non-credential APIs, such as // |SSL_CTX_use_certificate|, configure a "legacy credential", which is // appended to this list if configured. Using the legacy credential is the same // as configuring an equivalent credential with the |SSL_CREDENTIAL| API. // // When selecting credentials, BoringSSL considers the credential's type, its // cryptographic capabilities, and capabilities advertised by the peer. This // varies between TLS versions but includes: // // - Whether the peer supports the leaf certificate key // - Whether there is a common signature algorithm that is compatible with the // credential // - Whether there is a common cipher suite that is compatible with the // credential // // WARNING: In TLS 1.2 and below, there is no mechanism for servers to advertise // supported ECDSA curves to the client. BoringSSL clients will assume the // server accepts all ECDSA curves in client certificates. // // By default, BoringSSL does not check the following, though we may add APIs // in the future to enable them on a per-credential basis. // // - Whether the peer supports the signature algorithms in the certificate chain // - Whether the a server certificate is compatible with the server_name // extension (SNI) // - Whether the peer supports the certificate authority that issued the // certificate // // Credentials may be configured before the handshake or dynamically in the // early callback (see |SSL_CTX_set_select_certificate_cb|) and certificate // callback (see |SSL_CTX_set_cert_cb|). These callbacks allow applications to // use BoringSSL's built-in selection logic in tandem with custom logic. For // example, a callback could evaluate application-specific SNI rules to filter // down to an ECDSA and RSA credential, then configure both for BoringSSL to // select between the two. // SSL_CREDENTIAL_new_x509 returns a new, empty X.509 credential, or NULL on // error. Callers should release the result with |SSL_CREDENTIAL_free| when // done. // // Callers should configure a certificate chain and private key on the // credential, along with other properties, then add it with // |SSL_CTX_add1_credential|. OPENSSL_EXPORT SSL_CREDENTIAL *SSL_CREDENTIAL_new_x509(void); // SSL_CREDENTIAL_up_ref increments the reference count of |cred|. OPENSSL_EXPORT void SSL_CREDENTIAL_up_ref(SSL_CREDENTIAL *cred); // SSL_CREDENTIAL_free decrements the reference count of |cred|. If it reaches // zero, all data referenced by |cred| and |cred| itself are released. OPENSSL_EXPORT void SSL_CREDENTIAL_free(SSL_CREDENTIAL *cred); // SSL_CREDENTIAL_set1_private_key sets |cred|'s private key to |cred|. It // returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_private_key(SSL_CREDENTIAL *cred, EVP_PKEY *key); // SSL_CREDENTIAL_set1_signing_algorithm_prefs configures |cred| to use |prefs| // as the preference list when signing with |cred|'s private key. It returns one // on success and zero on error. |prefs| should not include the internal-only // value |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. // // It is an error to call this function with delegated credentials (see // |SSL_CREDENTIAL_new_delegated|) because delegated credentials already // constrain the key to a single algorithm. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_signing_algorithm_prefs( SSL_CREDENTIAL *cred, const uint16_t *prefs, size_t num_prefs); // SSL_CREDENTIAL_set1_cert_chain sets |cred|'s certificate chain, starting from // the leaf, to |num_cert|s certificates from |certs|. It returns one on success // and zero on error. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_cert_chain(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *const *certs, size_t num_certs); // SSL_CREDENTIAL_set1_ocsp_response sets |cred|'s stapled OCSP response to // |ocsp|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_ocsp_response(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *ocsp); // SSL_CREDENTIAL_set1_signed_cert_timestamp_list sets |cred|'s list of signed // certificate timestamps |sct_list|. |sct_list| must contain one or more SCT // structures serialised as a SignedCertificateTimestampList (see // https://tools.ietf.org/html/rfc6962#section-3.3) – i.e. each SCT is prefixed // by a big-endian, uint16 length and the concatenation of one or more such // prefixed SCTs are themselves also prefixed by a uint16 length. It returns one // on success and zero on error. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_signed_cert_timestamp_list( SSL_CREDENTIAL *cred, CRYPTO_BUFFER *sct_list); // SSL_CTX_add1_credential appends |cred| to |ctx|'s credential list. It returns // one on success and zero on error. The credential list is maintained in order // of decreasing preference, so earlier calls are preferred over later calls. // // After calling this function, it is an error to modify |cred|. Doing so may // result in inconsistent handshake behavior or race conditions. OPENSSL_EXPORT int SSL_CTX_add1_credential(SSL_CTX *ctx, SSL_CREDENTIAL *cred); // SSL_add1_credential appends |cred| to |ssl|'s credential list. It returns one // on success and zero on error. The credential list is maintained in order of // decreasing preference, so earlier calls are preferred over later calls. // // After calling this function, it is an error to modify |cred|. Doing so may // result in inconsistent handshake behavior or race conditions. OPENSSL_EXPORT int SSL_add1_credential(SSL *ssl, SSL_CREDENTIAL *cred); // SSL_certs_clear removes all credentials configured on |ssl|. It also removes // the certificate chain and private key on the legacy credential. OPENSSL_EXPORT void SSL_certs_clear(SSL *ssl); // SSL_get0_selected_credential returns the credential in use in the current // handshake on |ssl|. If there is current handshake on |ssl| or if the // handshake has not progressed to this point, it returns NULL. // // This function is intended for use with |SSL_CREDENTIAL_get_ex_data|. It may // be called from handshake callbacks, such as those in // |SSL_PRIVATE_KEY_METHOD|, to trigger credential-specific behavior. // // In applications that use the older APIs, such as |SSL_use_certificate|, this // function may return an internal |SSL_CREDENTIAL| object. This internal object // will have no ex_data installed. To avoid this, it is recommended that callers // moving to |SSL_CREDENTIAL| use the new APIs consistently. OPENSSL_EXPORT const SSL_CREDENTIAL *SSL_get0_selected_credential( const SSL *ssl); // Configuring certificates and private keys. // // These functions configure the connection's leaf certificate, private key, and // certificate chain. The certificate chain is ordered leaf to root (as sent on // the wire) but does not include the leaf. Both client and server certificates // use these functions. // // Prefer to configure the certificate before the private key. If configured in // the other order, inconsistent private keys will be silently dropped, rather // than return an error. Additionally, overwriting a previously-configured // certificate and key pair only works if the certificate is configured first. // // Each of these functions configures the single "legacy credential" on the // |SSL_CTX| or |SSL|. To select between multiple certificates, use // |SSL_CREDENTIAL_new_x509| and other APIs to configure a list of credentials. // SSL_CTX_use_certificate sets |ctx|'s leaf certificate to |x509|. It returns // one on success and zero on failure. If |ctx| has a private key which is // inconsistent with |x509|, the private key is silently dropped. OPENSSL_EXPORT int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x509); // SSL_use_certificate sets |ssl|'s leaf certificate to |x509|. It returns one // on success and zero on failure. If |ssl| has a private key which is // inconsistent with |x509|, the private key is silently dropped. OPENSSL_EXPORT int SSL_use_certificate(SSL *ssl, X509 *x509); // SSL_CTX_use_PrivateKey sets |ctx|'s private key to |pkey|. It returns one on // success and zero on failure. If |ctx| had a private key or // |SSL_PRIVATE_KEY_METHOD| previously configured, it is replaced. OPENSSL_EXPORT int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey); // SSL_use_PrivateKey sets |ssl|'s private key to |pkey|. It returns one on // success and zero on failure. If |ssl| had a private key or // |SSL_PRIVATE_KEY_METHOD| previously configured, it is replaced. OPENSSL_EXPORT int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey); // SSL_CTX_set0_chain sets |ctx|'s certificate chain, excluding the leaf, to // |chain|. On success, it returns one and takes ownership of |chain|. // Otherwise, it returns zero. OPENSSL_EXPORT int SSL_CTX_set0_chain(SSL_CTX *ctx, STACK_OF(X509) *chain); // SSL_CTX_set1_chain sets |ctx|'s certificate chain, excluding the leaf, to // |chain|. It returns one on success and zero on failure. The caller retains // ownership of |chain| and may release it freely. OPENSSL_EXPORT int SSL_CTX_set1_chain(SSL_CTX *ctx, STACK_OF(X509) *chain); // SSL_set0_chain sets |ssl|'s certificate chain, excluding the leaf, to // |chain|. On success, it returns one and takes ownership of |chain|. // Otherwise, it returns zero. OPENSSL_EXPORT int SSL_set0_chain(SSL *ssl, STACK_OF(X509) *chain); // SSL_set1_chain sets |ssl|'s certificate chain, excluding the leaf, to // |chain|. It returns one on success and zero on failure. The caller retains // ownership of |chain| and may release it freely. OPENSSL_EXPORT int SSL_set1_chain(SSL *ssl, STACK_OF(X509) *chain); // SSL_CTX_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On // success, it returns one and takes ownership of |x509|. Otherwise, it returns // zero. OPENSSL_EXPORT int SSL_CTX_add0_chain_cert(SSL_CTX *ctx, X509 *x509); // SSL_CTX_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It // returns one on success and zero on failure. The caller retains ownership of // |x509| and may release it freely. OPENSSL_EXPORT int SSL_CTX_add1_chain_cert(SSL_CTX *ctx, X509 *x509); // SSL_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On success, // it returns one and takes ownership of |x509|. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_add0_chain_cert(SSL *ssl, X509 *x509); // SSL_CTX_add_extra_chain_cert calls |SSL_CTX_add0_chain_cert|. OPENSSL_EXPORT int SSL_CTX_add_extra_chain_cert(SSL_CTX *ctx, X509 *x509); // SSL_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It returns // one on success and zero on failure. The caller retains ownership of |x509| // and may release it freely. OPENSSL_EXPORT int SSL_add1_chain_cert(SSL *ssl, X509 *x509); // SSL_CTX_clear_chain_certs clears |ctx|'s certificate chain and returns // one. OPENSSL_EXPORT int SSL_CTX_clear_chain_certs(SSL_CTX *ctx); // SSL_CTX_clear_extra_chain_certs calls |SSL_CTX_clear_chain_certs|. OPENSSL_EXPORT int SSL_CTX_clear_extra_chain_certs(SSL_CTX *ctx); // SSL_clear_chain_certs clears |ssl|'s certificate chain and returns one. OPENSSL_EXPORT int SSL_clear_chain_certs(SSL *ssl); // SSL_CTX_set_cert_cb sets a callback that is called to select a certificate. // The callback returns one on success, zero on internal error, and a negative // number on failure or to pause the handshake. If the handshake is paused, // |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. // // On the client, the callback may call |SSL_get0_certificate_types| and // |SSL_get_client_CA_list| for information on the server's certificate // request. // // On the server, the callback will be called after extensions have been // processed, but before the resumption decision has been made. This differs // from OpenSSL which handles resumption before selecting the certificate. OPENSSL_EXPORT void SSL_CTX_set_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, void *arg), void *arg); // SSL_set_cert_cb sets a callback that is called to select a certificate. The // callback returns one on success, zero on internal error, and a negative // number on failure or to pause the handshake. If the handshake is paused, // |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. // // On the client, the callback may call |SSL_get0_certificate_types| and // |SSL_get_client_CA_list| for information on the server's certificate // request. // // On the server, the callback will be called after extensions have been // processed, but before the resumption decision has been made. This differs // from OpenSSL which handles resumption before selecting the certificate. OPENSSL_EXPORT void SSL_set_cert_cb(SSL *ssl, int (*cb)(SSL *ssl, void *arg), void *arg); // SSL_get0_certificate_types, for a client, sets |*out_types| to an array // containing the client certificate types requested by a server. It returns the // length of the array. Note this list is always empty in TLS 1.3. The server // will instead send signature algorithms. See // |SSL_get0_peer_verify_algorithms|. // // The behavior of this function is undefined except during the callbacks set by // by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or when the // handshake is paused because of them. OPENSSL_EXPORT size_t SSL_get0_certificate_types(const SSL *ssl, const uint8_t **out_types); // SSL_get0_peer_verify_algorithms sets |*out_sigalgs| to an array containing // the signature algorithms the peer is able to verify. It returns the length of // the array. Note these values are only sent starting TLS 1.2 and only // mandatory starting TLS 1.3. If not sent, the empty array is returned. For the // historical client certificate types list, see |SSL_get0_certificate_types|. // // The behavior of this function is undefined except during the callbacks set by // by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or when the // handshake is paused because of them. OPENSSL_EXPORT size_t SSL_get0_peer_verify_algorithms(const SSL *ssl, const uint16_t **out_sigalgs); // SSL_get0_peer_delegation_algorithms sets |*out_sigalgs| to an array // containing the signature algorithms the peer is willing to use with delegated // credentials. It returns the length of the array. If not sent, the empty // array is returned. // // The behavior of this function is undefined except during the callbacks set by // by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or when the // handshake is paused because of them. OPENSSL_EXPORT size_t SSL_get0_peer_delegation_algorithms( const SSL *ssl, const uint16_t **out_sigalgs); // SSL_CTX_get0_certificate returns |ctx|'s leaf certificate. OPENSSL_EXPORT X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx); // SSL_get_certificate returns |ssl|'s leaf certificate. OPENSSL_EXPORT X509 *SSL_get_certificate(const SSL *ssl); // SSL_CTX_get0_privatekey returns |ctx|'s private key. OPENSSL_EXPORT EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx); // SSL_get_privatekey returns |ssl|'s private key. OPENSSL_EXPORT EVP_PKEY *SSL_get_privatekey(const SSL *ssl); // SSL_CTX_get0_chain_certs sets |*out_chain| to |ctx|'s certificate chain and // returns one. OPENSSL_EXPORT int SSL_CTX_get0_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain); // SSL_CTX_get_extra_chain_certs calls |SSL_CTX_get0_chain_certs|. OPENSSL_EXPORT int SSL_CTX_get_extra_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain); // SSL_get0_chain_certs sets |*out_chain| to |ssl|'s certificate chain and // returns one. OPENSSL_EXPORT int SSL_get0_chain_certs(const SSL *ssl, STACK_OF(X509) **out_chain); // SSL_CTX_set_signed_cert_timestamp_list sets the list of signed certificate // timestamps that is sent to clients that request it. The |list| argument must // contain one or more SCT structures serialised as a SignedCertificateTimestamp // List (see https://tools.ietf.org/html/rfc6962#section-3.3) – i.e. each SCT // is prefixed by a big-endian, uint16 length and the concatenation of one or // more such prefixed SCTs are themselves also prefixed by a uint16 length. It // returns one on success and zero on error. The caller retains ownership of // |list|. OPENSSL_EXPORT int SSL_CTX_set_signed_cert_timestamp_list(SSL_CTX *ctx, const uint8_t *list, size_t list_len); // SSL_set_signed_cert_timestamp_list sets the list of signed certificate // timestamps that is sent to clients that request is. The same format as the // one used for |SSL_CTX_set_signed_cert_timestamp_list| applies. The caller // retains ownership of |list|. OPENSSL_EXPORT int SSL_set_signed_cert_timestamp_list(SSL *ctx, const uint8_t *list, size_t list_len); // SSL_CTX_set_ocsp_response sets the OCSP response that is sent to clients // which request it. It returns one on success and zero on error. The caller // retains ownership of |response|. OPENSSL_EXPORT int SSL_CTX_set_ocsp_response(SSL_CTX *ctx, const uint8_t *response, size_t response_len); // SSL_set_ocsp_response sets the OCSP response that is sent to clients which // request it. It returns one on success and zero on error. The caller retains // ownership of |response|. OPENSSL_EXPORT int SSL_set_ocsp_response(SSL *ssl, const uint8_t *response, size_t response_len); // SSL_SIGN_* are signature algorithm values as defined in TLS 1.3. #define SSL_SIGN_RSA_PKCS1_SHA1 0x0201 #define SSL_SIGN_RSA_PKCS1_SHA256 0x0401 #define SSL_SIGN_RSA_PKCS1_SHA384 0x0501 #define SSL_SIGN_RSA_PKCS1_SHA512 0x0601 #define SSL_SIGN_ECDSA_SHA1 0x0203 #define SSL_SIGN_ECDSA_SECP256R1_SHA256 0x0403 #define SSL_SIGN_ECDSA_SECP384R1_SHA384 0x0503 #define SSL_SIGN_ECDSA_SECP521R1_SHA512 0x0603 #define SSL_SIGN_RSA_PSS_RSAE_SHA256 0x0804 #define SSL_SIGN_RSA_PSS_RSAE_SHA384 0x0805 #define SSL_SIGN_RSA_PSS_RSAE_SHA512 0x0806 #define SSL_SIGN_ED25519 0x0807 // SSL_SIGN_RSA_PKCS1_SHA256_LEGACY is a backport of RSASSA-PKCS1-v1_5 with // SHA-256 to TLS 1.3. It is disabled by default and only defined for client // certificates. #define SSL_SIGN_RSA_PKCS1_SHA256_LEGACY 0x0420 // SSL_SIGN_RSA_PKCS1_MD5_SHA1 is an internal signature algorithm used to // specify raw RSASSA-PKCS1-v1_5 with an MD5/SHA-1 concatenation, as used in TLS // before TLS 1.2. #define SSL_SIGN_RSA_PKCS1_MD5_SHA1 0xff01 // SSL_get_signature_algorithm_name returns a human-readable name for |sigalg|, // or NULL if unknown. If |include_curve| is one, the curve for ECDSA algorithms // is included as in TLS 1.3. Otherwise, it is excluded as in TLS 1.2. OPENSSL_EXPORT const char *SSL_get_signature_algorithm_name(uint16_t sigalg, int include_curve); // SSL_get_all_signature_algorithm_names outputs a list of possible strings // |SSL_get_signature_algorithm_name| may return in this version of BoringSSL. // It writes at most |max_out| entries to |out| and returns the total number it // would have written, if |max_out| had been large enough. |max_out| may be // initially set to zero to size the output. // // This function is only intended to help initialize tables in callers that want // possible strings pre-declared. This list would not be suitable to set a list // of supported features. It is in no particular order, and may contain // placeholder, experimental, or deprecated values that do not apply to every // caller. Future versions of BoringSSL may also return strings not in this // list, so this does not apply if, say, sending strings across services. OPENSSL_EXPORT size_t SSL_get_all_signature_algorithm_names(const char **out, size_t max_out); // SSL_get_signature_algorithm_key_type returns the key type associated with // |sigalg| as an |EVP_PKEY_*| constant or |EVP_PKEY_NONE| if unknown. OPENSSL_EXPORT int SSL_get_signature_algorithm_key_type(uint16_t sigalg); // SSL_get_signature_algorithm_digest returns the digest function associated // with |sigalg| or |NULL| if |sigalg| has no prehash (Ed25519) or is unknown. OPENSSL_EXPORT const EVP_MD *SSL_get_signature_algorithm_digest( uint16_t sigalg); // SSL_is_signature_algorithm_rsa_pss returns one if |sigalg| is an RSA-PSS // signature algorithm and zero otherwise. OPENSSL_EXPORT int SSL_is_signature_algorithm_rsa_pss(uint16_t sigalg); // SSL_CTX_set_signing_algorithm_prefs configures |ctx| to use |prefs| as the // preference list when signing with |ctx|'s private key. It returns one on // success and zero on error. |prefs| should not include the internal-only value // |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_CTX_set_signing_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, size_t num_prefs); // SSL_set_signing_algorithm_prefs configures |ssl| to use |prefs| as the // preference list when signing with |ssl|'s private key. It returns one on // success and zero on error. |prefs| should not include the internal-only value // |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_set_signing_algorithm_prefs(SSL *ssl, const uint16_t *prefs, size_t num_prefs); // Certificate and private key convenience functions. // SSL_CTX_set_chain_and_key sets the certificate chain and private key for a // TLS client or server. References to the given |CRYPTO_BUFFER| and |EVP_PKEY| // objects are added as needed. Exactly one of |privkey| or |privkey_method| // may be non-NULL. Returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_chain_and_key( SSL_CTX *ctx, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method); // SSL_set_chain_and_key sets the certificate chain and private key for a TLS // client or server. References to the given |CRYPTO_BUFFER| and |EVP_PKEY| // objects are added as needed. Exactly one of |privkey| or |privkey_method| // may be non-NULL. Returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_chain_and_key( SSL *ssl, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method); // SSL_CTX_get0_chain returns the list of |CRYPTO_BUFFER|s that were set by // |SSL_CTX_set_chain_and_key|. Reference counts are not incremented by this // call. The return value may be |NULL| if no chain has been set. // // (Note: if a chain was configured by non-|CRYPTO_BUFFER|-based functions then // the return value is undefined and, even if not NULL, the stack itself may // contain nullptrs. Thus you shouldn't mix this function with // non-|CRYPTO_BUFFER| functions for manipulating the chain.) OPENSSL_EXPORT const STACK_OF(CRYPTO_BUFFER) *SSL_CTX_get0_chain( const SSL_CTX *ctx); // SSL_get0_chain returns the list of |CRYPTO_BUFFER|s that were set by // |SSL_set_chain_and_key|, unless they have been discarded. Reference counts // are not incremented by this call. The return value may be |NULL| if no chain // has been set. // // (Note: if a chain was configured by non-|CRYPTO_BUFFER|-based functions then // the return value is undefined and, even if not NULL, the stack itself may // contain nullptrs. Thus you shouldn't mix this function with // non-|CRYPTO_BUFFER| functions for manipulating the chain.) // // This function may return nullptr if a handshake has completed even if // |SSL_set_chain_and_key| was previously called, since the configuration // containing the certificates is typically cleared after handshake completion. OPENSSL_EXPORT const STACK_OF(CRYPTO_BUFFER) *SSL_get0_chain(const SSL *ssl); // SSL_CTX_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one // on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa); // SSL_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one on // success and zero on failure. OPENSSL_EXPORT int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa); // The following functions configure certificates or private keys but take as // input DER-encoded structures. They return one on success and zero on // failure. OPENSSL_EXPORT int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, size_t der_len, const uint8_t *der); OPENSSL_EXPORT int SSL_use_certificate_ASN1(SSL *ssl, const uint8_t *der, size_t der_len); OPENSSL_EXPORT int SSL_CTX_use_PrivateKey_ASN1(int pk, SSL_CTX *ctx, const uint8_t *der, size_t der_len); OPENSSL_EXPORT int SSL_use_PrivateKey_ASN1(int type, SSL *ssl, const uint8_t *der, size_t der_len); OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const uint8_t *der, size_t der_len); OPENSSL_EXPORT int SSL_use_RSAPrivateKey_ASN1(SSL *ssl, const uint8_t *der, size_t der_len); // The following functions configure certificates or private keys but take as // input files to read from. They return one on success and zero on failure. The // |type| parameter is one of the |SSL_FILETYPE_*| values and determines whether // the file's contents are read as PEM or DER. #define SSL_FILETYPE_PEM 1 #define SSL_FILETYPE_ASN1 2 OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey_file(SSL_CTX *ctx, const char *file, int type); OPENSSL_EXPORT int SSL_use_RSAPrivateKey_file(SSL *ssl, const char *file, int type); OPENSSL_EXPORT int SSL_CTX_use_certificate_file(SSL_CTX *ctx, const char *file, int type); OPENSSL_EXPORT int SSL_use_certificate_file(SSL *ssl, const char *file, int type); OPENSSL_EXPORT int SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, int type); OPENSSL_EXPORT int SSL_use_PrivateKey_file(SSL *ssl, const char *file, int type); // SSL_CTX_use_certificate_chain_file configures certificates for |ctx|. It // reads the contents of |file| as a PEM-encoded leaf certificate followed // optionally by the certificate chain to send to the peer. It returns one on // success and zero on failure. // // WARNING: If the input contains "TRUSTED CERTIFICATE" PEM blocks, this // function parses auxiliary properties as in |d2i_X509_AUX|. Passing untrusted // input to this function allows an attacker to influence those properties. See // |d2i_X509_AUX| for details. OPENSSL_EXPORT int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file); // SSL_CTX_set_default_passwd_cb sets the password callback for PEM-based // convenience functions called on |ctx|. OPENSSL_EXPORT void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb); // SSL_CTX_get_default_passwd_cb returns the callback set by // |SSL_CTX_set_default_passwd_cb|. OPENSSL_EXPORT pem_password_cb *SSL_CTX_get_default_passwd_cb( const SSL_CTX *ctx); // SSL_CTX_set_default_passwd_cb_userdata sets the userdata parameter for // |ctx|'s password callback. OPENSSL_EXPORT void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *data); // SSL_CTX_get_default_passwd_cb_userdata returns the userdata parameter set by // |SSL_CTX_set_default_passwd_cb_userdata|. OPENSSL_EXPORT void *SSL_CTX_get_default_passwd_cb_userdata(const SSL_CTX *ctx); // Custom private keys. enum ssl_private_key_result_t BORINGSSL_ENUM_INT { ssl_private_key_success, ssl_private_key_retry, ssl_private_key_failure, }; // ssl_private_key_method_st (aka |SSL_PRIVATE_KEY_METHOD|) describes private // key hooks. This is used to off-load signing operations to a custom, // potentially asynchronous, backend. Metadata about the key such as the type // and size are parsed out of the certificate. struct ssl_private_key_method_st { // sign signs the message |in| in using the specified signature algorithm. On // success, it returns |ssl_private_key_success| and writes at most |max_out| // bytes of signature data to |out| and sets |*out_len| to the number of bytes // written. On failure, it returns |ssl_private_key_failure|. If the operation // has not completed, it returns |ssl_private_key_retry|. |sign| should // arrange for the high-level operation on |ssl| to be retried when the // operation is completed. This will result in a call to |complete|. // // |signature_algorithm| is one of the |SSL_SIGN_*| values, as defined in TLS // 1.3. Note that, in TLS 1.2, ECDSA algorithms do not require that curve // sizes match hash sizes, so the curve portion of |SSL_SIGN_ECDSA_*| values // must be ignored. BoringSSL will internally handle the curve matching logic // where appropriate. // // It is an error to call |sign| while another private key operation is in // progress on |ssl|. enum ssl_private_key_result_t (*sign)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, uint16_t signature_algorithm, const uint8_t *in, size_t in_len); // decrypt decrypts |in_len| bytes of encrypted data from |in|. On success it // returns |ssl_private_key_success|, writes at most |max_out| bytes of // decrypted data to |out| and sets |*out_len| to the actual number of bytes // written. On failure it returns |ssl_private_key_failure|. If the operation // has not completed, it returns |ssl_private_key_retry|. The caller should // arrange for the high-level operation on |ssl| to be retried when the // operation is completed, which will result in a call to |complete|. This // function only works with RSA keys and should perform a raw RSA decryption // operation with no padding. // // It is an error to call |decrypt| while another private key operation is in // progress on |ssl|. enum ssl_private_key_result_t (*decrypt)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); // complete completes a pending operation. If the operation has completed, it // returns |ssl_private_key_success| and writes the result to |out| as in // |sign|. Otherwise, it returns |ssl_private_key_failure| on failure and // |ssl_private_key_retry| if the operation is still in progress. // // |complete| may be called arbitrarily many times before completion, but it // is an error to call |complete| if there is no pending operation in progress // on |ssl|. enum ssl_private_key_result_t (*complete)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out); }; // SSL_set_private_key_method configures a custom private key on |ssl|. // |key_method| must remain valid for the lifetime of |ssl|. // // If using an RSA or ECDSA key, callers should configure signing capabilities // with |SSL_set_signing_algorithm_prefs|. Otherwise, BoringSSL may select a // signature algorithm that |key_method| does not support. OPENSSL_EXPORT void SSL_set_private_key_method( SSL *ssl, const SSL_PRIVATE_KEY_METHOD *key_method); // SSL_CTX_set_private_key_method configures a custom private key on |ctx|. // |key_method| must remain valid for the lifetime of |ctx|. // // If using an RSA or ECDSA key, callers should configure signing capabilities // with |SSL_CTX_set_signing_algorithm_prefs|. Otherwise, BoringSSL may select a // signature algorithm that |key_method| does not support. OPENSSL_EXPORT void SSL_CTX_set_private_key_method( SSL_CTX *ctx, const SSL_PRIVATE_KEY_METHOD *key_method); // SSL_CREDENTIAL_set_private_key_method configures a custom private key on // |cred|. |key_method| must remain valid for the lifetime of |cred|. It returns // one on success and zero if |cred| does not use private keys. // // If using an RSA or ECDSA key, callers should configure signing capabilities // with |SSL_CREDENTIAL_set1_signing_algorithm_prefs|. Otherwise, BoringSSL may // select a signature algorithm that |key_method| does not support. This is not // necessary for delegated credentials (see |SSL_CREDENTIAL_new_delegated|) // because delegated credentials only support a single signature algorithm. // // Functions in |key_method| will be passed an |SSL| object, but not |cred| // directly. Use |SSL_get0_selected_credential| to determine the selected // credential. From there, |SSL_CREDENTIAL_get_ex_data| can be used to look up // credential-specific state, such as a handle to the private key. OPENSSL_EXPORT int SSL_CREDENTIAL_set_private_key_method( SSL_CREDENTIAL *cred, const SSL_PRIVATE_KEY_METHOD *key_method); // SSL_CREDENTIAL_set_must_match_issuer sets the flag that this credential // should be considered only when it matches a peer request for a particular // issuer via a negotiation mechanism (such as the certificate_authorities // extension). OPENSSL_EXPORT void SSL_CREDENTIAL_set_must_match_issuer(SSL_CREDENTIAL *cred); // SSL_CREDENTIAL_clear_must_match_issuer clears the flag requiring issuer // matching, indicating this credential should be considered regardless of peer // issuer matching requests. (This is the default). OPENSSL_EXPORT void SSL_CREDENTIAL_clear_must_match_issuer( SSL_CREDENTIAL *cred); // SSL_CREDENTIAL_must_match_issuer returns the value of the flag indicating // that this credential should be considered only when it matches a peer request // for a particular issuer via a negotiation mechanism (such as the // certificate_authorities extension). OPENSSL_EXPORT int SSL_CREDENTIAL_must_match_issuer(const SSL_CREDENTIAL *cred); // SSL_can_release_private_key returns one if |ssl| will no longer call into the // private key and zero otherwise. If the function returns one, the caller can // release state associated with the private key. // // NOTE: This function assumes the caller does not use |SSL_clear| to reuse // |ssl| for a second connection. If |SSL_clear| is used, BoringSSL may still // use the private key on the second connection. OPENSSL_EXPORT int SSL_can_release_private_key(const SSL *ssl); // Cipher suites. // // |SSL_CIPHER| objects represent cipher suites. DEFINE_CONST_STACK_OF(SSL_CIPHER) // SSL_get_cipher_by_value returns the structure representing a TLS cipher // suite based on its assigned number, or NULL if unknown. See // https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_cipher_by_value(uint16_t value); // SSL_CIPHER_get_id returns |cipher|'s non-IANA id. This is not its // IANA-assigned number, which is called the "value" here, although it may be // cast to a |uint16_t| to get it. OPENSSL_EXPORT uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *cipher); // SSL_CIPHER_get_protocol_id returns |cipher|'s IANA-assigned number. OPENSSL_EXPORT uint16_t SSL_CIPHER_get_protocol_id(const SSL_CIPHER *cipher); // SSL_CIPHER_is_aead returns one if |cipher| uses an AEAD cipher. OPENSSL_EXPORT int SSL_CIPHER_is_aead(const SSL_CIPHER *cipher); // SSL_CIPHER_is_block_cipher returns one if |cipher| is a block cipher. OPENSSL_EXPORT int SSL_CIPHER_is_block_cipher(const SSL_CIPHER *cipher); // SSL_CIPHER_get_cipher_nid returns the NID for |cipher|'s bulk // cipher. Possible values are |NID_aes_128_gcm|, |NID_aes_256_gcm|, // |NID_chacha20_poly1305|, |NID_aes_128_cbc|, |NID_aes_256_cbc|, and // |NID_des_ede3_cbc|. OPENSSL_EXPORT int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *cipher); // SSL_CIPHER_get_digest_nid returns the NID for |cipher|'s HMAC if it is a // legacy cipher suite. For modern AEAD-based ciphers (see // |SSL_CIPHER_is_aead|), it returns |NID_undef|. // // Note this function only returns the legacy HMAC digest, not the PRF hash. OPENSSL_EXPORT int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *cipher); // SSL_CIPHER_get_kx_nid returns the NID for |cipher|'s key exchange. This may // be |NID_kx_rsa|, |NID_kx_ecdhe|, or |NID_kx_psk| for TLS 1.2. In TLS 1.3, // cipher suites do not specify the key exchange, so this function returns // |NID_kx_any|. OPENSSL_EXPORT int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *cipher); // SSL_CIPHER_get_auth_nid returns the NID for |cipher|'s authentication // type. This may be |NID_auth_rsa|, |NID_auth_ecdsa|, or |NID_auth_psk| for TLS // 1.2. In TLS 1.3, cipher suites do not specify authentication, so this // function returns |NID_auth_any|. OPENSSL_EXPORT int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *cipher); // SSL_CIPHER_get_handshake_digest returns |cipher|'s PRF hash. If |cipher| // is a pre-TLS-1.2 cipher, it returns |EVP_md5_sha1| but note these ciphers use // SHA-256 in TLS 1.2. Other return values may be treated uniformly in all // applicable versions. OPENSSL_EXPORT const EVP_MD *SSL_CIPHER_get_handshake_digest( const SSL_CIPHER *cipher); // SSL_CIPHER_get_prf_nid behaves like |SSL_CIPHER_get_handshake_digest| but // returns the NID constant. Use |SSL_CIPHER_get_handshake_digest| instead. OPENSSL_EXPORT int SSL_CIPHER_get_prf_nid(const SSL_CIPHER *cipher); // SSL_CIPHER_get_min_version returns the minimum protocol version required // for |cipher|. OPENSSL_EXPORT uint16_t SSL_CIPHER_get_min_version(const SSL_CIPHER *cipher); // SSL_CIPHER_get_max_version returns the maximum protocol version that // supports |cipher|. OPENSSL_EXPORT uint16_t SSL_CIPHER_get_max_version(const SSL_CIPHER *cipher); // SSL_CIPHER_standard_name returns the standard IETF name for |cipher|. For // example, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". OPENSSL_EXPORT const char *SSL_CIPHER_standard_name(const SSL_CIPHER *cipher); // SSL_CIPHER_get_name returns the OpenSSL name of |cipher|. For example, // "ECDHE-RSA-AES128-GCM-SHA256". Callers are recommended to use // |SSL_CIPHER_standard_name| instead. OPENSSL_EXPORT const char *SSL_CIPHER_get_name(const SSL_CIPHER *cipher); // SSL_CIPHER_get_kx_name returns a string that describes the key-exchange // method used by |cipher|. For example, "ECDHE_ECDSA". TLS 1.3 AEAD-only // ciphers return the string "GENERIC". OPENSSL_EXPORT const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher); // SSL_CIPHER_get_bits returns the strength, in bits, of |cipher|. If // |out_alg_bits| is not NULL, it writes the number of bits consumed by the // symmetric algorithm to |*out_alg_bits|. OPENSSL_EXPORT int SSL_CIPHER_get_bits(const SSL_CIPHER *cipher, int *out_alg_bits); // SSL_get_all_cipher_names outputs a list of possible strings // |SSL_CIPHER_get_name| may return in this version of BoringSSL. It writes at // most |max_out| entries to |out| and returns the total number it would have // written, if |max_out| had been large enough. |max_out| may be initially set // to zero to size the output. // // This function is only intended to help initialize tables in callers that want // possible strings pre-declared. This list would not be suitable to set a list // of supported features. It is in no particular order, and may contain // placeholder, experimental, or deprecated values that do not apply to every // caller. Future versions of BoringSSL may also return strings not in this // list, so this does not apply if, say, sending strings across services. OPENSSL_EXPORT size_t SSL_get_all_cipher_names(const char **out, size_t max_out); // SSL_get_all_standard_cipher_names outputs a list of possible strings // |SSL_CIPHER_standard_name| may return in this version of BoringSSL. It writes // at most |max_out| entries to |out| and returns the total number it would have // written, if |max_out| had been large enough. |max_out| may be initially set // to zero to size the output. // // This function is only intended to help initialize tables in callers that want // possible strings pre-declared. This list would not be suitable to set a list // of supported features. It is in no particular order, and may contain // placeholder, experimental, or deprecated values that do not apply to every // caller. Future versions of BoringSSL may also return strings not in this // list, so this does not apply if, say, sending strings across services. OPENSSL_EXPORT size_t SSL_get_all_standard_cipher_names(const char **out, size_t max_out); // Cipher suite configuration. // // OpenSSL uses a mini-language to configure cipher suites. The language // maintains an ordered list of enabled ciphers, along with an ordered list of // disabled but available ciphers. Initially, all ciphers are disabled with a // default ordering. The cipher string is then interpreted as a sequence of // directives, separated by colons, each of which modifies this state. // // Most directives consist of a one character or empty opcode followed by a // selector which matches a subset of available ciphers. // // Available opcodes are: // // - The empty opcode enables and appends all matching disabled ciphers to the // end of the enabled list. The newly appended ciphers are ordered relative to // each other matching their order in the disabled list. // // - |-| disables all matching enabled ciphers and prepends them to the disabled // list, with relative order from the enabled list preserved. This means the // most recently disabled ciphers get highest preference relative to other // disabled ciphers if re-enabled. // // - |+| moves all matching enabled ciphers to the end of the enabled list, with // relative order preserved. // // - |!| deletes all matching ciphers, enabled or not, from either list. Deleted // ciphers will not matched by future operations. // // A selector may be a specific cipher (using either the standard or OpenSSL // name for the cipher) or one or more rules separated by |+|. The final // selector matches the intersection of each rule. For instance, |AESGCM+aECDSA| // matches ECDSA-authenticated AES-GCM ciphers. // // Available cipher rules are: // // - |ALL| matches all ciphers, except for deprecated ciphers which must be // named explicitly. // // - |kRSA|, |kDHE|, |kECDHE|, and |kPSK| match ciphers using plain RSA, DHE, // ECDHE, and plain PSK key exchanges, respectively. Note that ECDHE_PSK is // matched by |kECDHE| and not |kPSK|. // // - |aRSA|, |aECDSA|, and |aPSK| match ciphers authenticated by RSA, ECDSA, and // a pre-shared key, respectively. // // - |RSA|, |DHE|, |ECDHE|, |PSK|, |ECDSA|, and |PSK| are aliases for the // corresponding |k*| or |a*| cipher rule. |RSA| is an alias for |kRSA|, not // |aRSA|. // // - |3DES|, |AES128|, |AES256|, |AES|, |AESGCM|, |CHACHA20| match ciphers // whose bulk cipher use the corresponding encryption scheme. Note that // |AES|, |AES128|, and |AES256| match both CBC and GCM ciphers. // // - |SHA1|, and its alias |SHA|, match legacy cipher suites using HMAC-SHA1. // // Deprecated cipher rules: // // - |kEDH|, |EDH|, |kEECDH|, and |EECDH| are legacy aliases for |kDHE|, |DHE|, // |kECDHE|, and |ECDHE|, respectively. // // - |HIGH| is an alias for |ALL|. // // - |FIPS| is an alias for |HIGH|. // // - |SSLv3| and |TLSv1| match ciphers available in TLS 1.1 or earlier. // |TLSv1_2| matches ciphers new in TLS 1.2. This is confusing and should not // be used. // // Unknown rules are silently ignored by legacy APIs, and rejected by APIs with // "strict" in the name, which should be preferred. Cipher lists can be long // and it's easy to commit typos. Strict functions will also reject the use of // spaces, semi-colons and commas as alternative separators. // // The special |@STRENGTH| directive will sort all enabled ciphers by strength. // // The |DEFAULT| directive, when appearing at the front of the string, expands // to the default ordering of available ciphers. // // If configuring a server, one may also configure equal-preference groups to // partially respect the client's preferences when // |SSL_OP_CIPHER_SERVER_PREFERENCE| is enabled. Ciphers in an equal-preference // group have equal priority and use the client order. This may be used to // enforce that AEADs are preferred but select AES-GCM vs. ChaCha20-Poly1305 // based on client preferences. An equal-preference is specified with square // brackets, combining multiple selectors separated by |. For example: // // [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256|TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256] // // Once an equal-preference group is used, future directives must be // opcode-less. Inside an equal-preference group, spaces are not allowed. // // TLS 1.3 ciphers do not participate in this mechanism and instead have a // built-in preference order. Functions to set cipher lists do not affect TLS // 1.3, and functions to query the cipher list do not include TLS 1.3 ciphers. // SSL_DEFAULT_CIPHER_LIST is the default cipher suite configuration. It is // substituted when a cipher string starts with 'DEFAULT'. #define SSL_DEFAULT_CIPHER_LIST "ALL" // SSL_CTX_set_strict_cipher_list configures the cipher list for |ctx|, // evaluating |str| as a cipher string and returning error if |str| contains // anything meaningless. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set_strict_cipher_list(SSL_CTX *ctx, const char *str); // SSL_CTX_set_cipher_list configures the cipher list for |ctx|, evaluating // |str| as a cipher string. It returns one on success and zero on failure. // // Prefer to use |SSL_CTX_set_strict_cipher_list|. This function tolerates // garbage inputs, unless an empty cipher list results. OPENSSL_EXPORT int SSL_CTX_set_cipher_list(SSL_CTX *ctx, const char *str); // SSL_set_strict_cipher_list configures the cipher list for |ssl|, evaluating // |str| as a cipher string and returning error if |str| contains anything // meaningless. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_set_strict_cipher_list(SSL *ssl, const char *str); // SSL_set_cipher_list configures the cipher list for |ssl|, evaluating |str| as // a cipher string. It returns one on success and zero on failure. // // Prefer to use |SSL_set_strict_cipher_list|. This function tolerates garbage // inputs, unless an empty cipher list results. OPENSSL_EXPORT int SSL_set_cipher_list(SSL *ssl, const char *str); // SSL_CTX_get_ciphers returns the cipher list for |ctx|, in order of // preference. OPENSSL_EXPORT STACK_OF(SSL_CIPHER) *SSL_CTX_get_ciphers(const SSL_CTX *ctx); // SSL_CTX_cipher_in_group returns one if the |i|th cipher (see // |SSL_CTX_get_ciphers|) is in the same equipreference group as the one // following it and zero otherwise. OPENSSL_EXPORT int SSL_CTX_cipher_in_group(const SSL_CTX *ctx, size_t i); // SSL_get_ciphers returns the cipher list for |ssl|, in order of preference. OPENSSL_EXPORT STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl); // Connection information. // SSL_is_init_finished returns one if |ssl| has completed its initial handshake // and has no pending handshake. It returns zero otherwise. OPENSSL_EXPORT int SSL_is_init_finished(const SSL *ssl); // SSL_in_init returns one if |ssl| has a pending handshake and zero // otherwise. OPENSSL_EXPORT int SSL_in_init(const SSL *ssl); // SSL_in_false_start returns one if |ssl| has a pending handshake that is in // False Start. |SSL_write| may be called at this point without waiting for the // peer, but |SSL_read| will complete the handshake before accepting application // data. // // See also |SSL_MODE_ENABLE_FALSE_START|. OPENSSL_EXPORT int SSL_in_false_start(const SSL *ssl); // SSL_get_peer_certificate returns the peer's leaf certificate or NULL if the // peer did not use certificates. The caller must call |X509_free| on the // result to release it. OPENSSL_EXPORT X509 *SSL_get_peer_certificate(const SSL *ssl); // SSL_get_peer_cert_chain returns the peer's certificate chain or NULL if // unavailable or the peer did not use certificates. This is the unverified list // of certificates as sent by the peer, not the final chain built during // verification. The caller does not take ownership of the result. // // WARNING: This function behaves differently between client and server. If // |ssl| is a server, the returned chain does not include the leaf certificate. // If a client, it does. OPENSSL_EXPORT STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl); // SSL_get_peer_full_cert_chain returns the peer's certificate chain, or NULL if // unavailable or the peer did not use certificates. This is the unverified list // of certificates as sent by the peer, not the final chain built during // verification. The caller does not take ownership of the result. // // This is the same as |SSL_get_peer_cert_chain| except that this function // always returns the full chain, i.e. the first element of the return value // (if any) will be the leaf certificate. In constrast, // |SSL_get_peer_cert_chain| returns only the intermediate certificates if the // |ssl| is a server. OPENSSL_EXPORT STACK_OF(X509) *SSL_get_peer_full_cert_chain(const SSL *ssl); // SSL_get0_peer_certificates returns the peer's certificate chain, or NULL if // unavailable or the peer did not use certificates. This is the unverified list // of certificates as sent by the peer, not the final chain built during // verification. The caller does not take ownership of the result. // // This is the |CRYPTO_BUFFER| variant of |SSL_get_peer_full_cert_chain|. OPENSSL_EXPORT const STACK_OF(CRYPTO_BUFFER) *SSL_get0_peer_certificates( const SSL *ssl); // SSL_get0_signed_cert_timestamp_list sets |*out| and |*out_len| to point to // |*out_len| bytes of SCT information from the server. This is only valid if // |ssl| is a client. The SCT information is a SignedCertificateTimestampList // (including the two leading length bytes). // See https://tools.ietf.org/html/rfc6962#section-3.3 // If no SCT was received then |*out_len| will be zero on return. // // WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_get0_signed_cert_timestamp_list(const SSL *ssl, const uint8_t **out, size_t *out_len); // SSL_get0_ocsp_response sets |*out| and |*out_len| to point to |*out_len| // bytes of an OCSP response from the server. This is the DER encoding of an // OCSPResponse type as defined in RFC 2560. // // WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_get0_ocsp_response(const SSL *ssl, const uint8_t **out, size_t *out_len); // SSL_get_tls_unique writes at most |max_out| bytes of the tls-unique value // for |ssl| to |out| and sets |*out_len| to the number of bytes written. It // returns one on success or zero on error. In general |max_out| should be at // least 12. // // This function will always fail if the initial handshake has not completed. // The tls-unique value will change after a renegotiation but, since // renegotiations can be initiated by the server at any point, the higher-level // protocol must either leave them disabled or define states in which the // tls-unique value can be read. // // The tls-unique value is defined by // https://tools.ietf.org/html/rfc5929#section-3.1. Due to a weakness in the // TLS protocol, tls-unique is broken for resumed connections unless the // Extended Master Secret extension is negotiated. Thus this function will // return zero if |ssl| performed session resumption unless EMS was used when // negotiating the original session. OPENSSL_EXPORT int SSL_get_tls_unique(const SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out); // SSL_get_extms_support returns one if the Extended Master Secret extension or // TLS 1.3 was negotiated. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_get_extms_support(const SSL *ssl); // SSL_get_current_cipher returns cipher suite used by |ssl|, or NULL if it has // not been negotiated yet. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_current_cipher(const SSL *ssl); // SSL_session_reused returns one if |ssl| performed an abbreviated handshake // and zero otherwise. // // TODO(davidben): Hammer down the semantics of this API while a handshake, // initial or renego, is in progress. OPENSSL_EXPORT int SSL_session_reused(const SSL *ssl); // SSL_get_secure_renegotiation_support returns one if the peer supports secure // renegotiation (RFC 5746) or TLS 1.3. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_get_secure_renegotiation_support(const SSL *ssl); // SSL_export_keying_material exports a connection-specific secret from |ssl|, // as specified in RFC 5705. It writes |out_len| bytes to |out| given a label // and optional context. If |use_context| is zero, the |context| parameter is // ignored. Prior to TLS 1.3, using a zero-length context and using no context // would give different output. // // It returns one on success and zero otherwise. OPENSSL_EXPORT int SSL_export_keying_material( SSL *ssl, uint8_t *out, size_t out_len, const char *label, size_t label_len, const uint8_t *context, size_t context_len, int use_context); // Sessions. // // An |SSL_SESSION| represents an SSL session that may be resumed in an // abbreviated handshake. It is reference-counted and immutable. Once // established, an |SSL_SESSION| may be shared by multiple |SSL| objects on // different threads and must not be modified. // // Note the TLS notion of "session" is not suitable for application-level // session state. It is an optional caching mechanism for the handshake. Not all // connections within an application-level session will reuse TLS sessions. TLS // sessions may be dropped by the client or ignored by the server at any time. DECLARE_PEM_rw(SSL_SESSION, SSL_SESSION) // SSL_SESSION_new returns a newly-allocated blank |SSL_SESSION| or NULL on // error. This may be useful when writing tests but should otherwise not be // used. OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_new(const SSL_CTX *ctx); // SSL_SESSION_up_ref increments the reference count of |session| and returns // one. OPENSSL_EXPORT int SSL_SESSION_up_ref(SSL_SESSION *session); // SSL_SESSION_free decrements the reference count of |session|. If it reaches // zero, all data referenced by |session| and |session| itself are released. OPENSSL_EXPORT void SSL_SESSION_free(SSL_SESSION *session); // SSL_SESSION_to_bytes serializes |in| into a newly allocated buffer and sets // |*out_data| to that buffer and |*out_len| to its length. The caller takes // ownership of the buffer and must call |OPENSSL_free| when done. It returns // one on success and zero on error. OPENSSL_EXPORT int SSL_SESSION_to_bytes(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len); // SSL_SESSION_to_bytes_for_ticket serializes |in|, but excludes the session // identification information, namely the session ID and ticket. OPENSSL_EXPORT int SSL_SESSION_to_bytes_for_ticket(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len); // SSL_SESSION_from_bytes parses |in_len| bytes from |in| as an SSL_SESSION. It // returns a newly-allocated |SSL_SESSION| on success or NULL on error. OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_from_bytes(const uint8_t *in, size_t in_len, const SSL_CTX *ctx); // SSL_SESSION_get_version returns a string describing the TLS or DTLS version // |session| was established at. For example, "TLSv1.2" or "DTLSv1". OPENSSL_EXPORT const char *SSL_SESSION_get_version(const SSL_SESSION *session); // SSL_SESSION_get_protocol_version returns the TLS or DTLS version |session| // was established at. OPENSSL_EXPORT uint16_t SSL_SESSION_get_protocol_version(const SSL_SESSION *session); // SSL_SESSION_set_protocol_version sets |session|'s TLS or DTLS version to // |version|. This may be useful when writing tests but should otherwise not be // used. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_SESSION_set_protocol_version(SSL_SESSION *session, uint16_t version); // SSL_MAX_SSL_SESSION_ID_LENGTH is the maximum length of an SSL session ID. #define SSL_MAX_SSL_SESSION_ID_LENGTH 32 // SSL_SESSION_get_id returns a pointer to a buffer containing |session|'s // session ID and sets |*out_len| to its length. // // This function should only be used for implementing a TLS session cache. TLS // sessions are not suitable for application-level session state, and a session // ID is an implementation detail of the TLS resumption handshake mechanism. Not // all resumption flows use session IDs, and not all connections within an // application-level session will reuse TLS sessions. // // To determine if resumption occurred, use |SSL_session_reused| instead. // Comparing session IDs will not give the right result in all cases. // // As a workaround for some broken applications, BoringSSL sometimes synthesizes // arbitrary session IDs for non-ID-based sessions. This behavior may be // removed in the future. OPENSSL_EXPORT const uint8_t *SSL_SESSION_get_id(const SSL_SESSION *session, unsigned *out_len); // SSL_SESSION_set1_id sets |session|'s session ID to |sid|, It returns one on // success and zero on error. This function may be useful in writing tests but // otherwise should not be used. OPENSSL_EXPORT int SSL_SESSION_set1_id(SSL_SESSION *session, const uint8_t *sid, size_t sid_len); // SSL_SESSION_get_time returns the time at which |session| was established in // seconds since the UNIX epoch. OPENSSL_EXPORT uint64_t SSL_SESSION_get_time(const SSL_SESSION *session); // SSL_SESSION_get_timeout returns the lifetime of |session| in seconds. OPENSSL_EXPORT uint32_t SSL_SESSION_get_timeout(const SSL_SESSION *session); // SSL_SESSION_get0_peer returns the peer leaf certificate stored in // |session|. // // TODO(davidben): This should return a const X509 *. OPENSSL_EXPORT X509 *SSL_SESSION_get0_peer(const SSL_SESSION *session); // SSL_SESSION_get0_peer_certificates returns the peer certificate chain stored // in |session|, or NULL if the peer did not use certificates. This is the // unverified list of certificates as sent by the peer, not the final chain // built during verification. The caller does not take ownership of the result. OPENSSL_EXPORT const STACK_OF(CRYPTO_BUFFER) * SSL_SESSION_get0_peer_certificates(const SSL_SESSION *session); // SSL_SESSION_get0_signed_cert_timestamp_list sets |*out| and |*out_len| to // point to |*out_len| bytes of SCT information stored in |session|. This is // only valid for client sessions. The SCT information is a // SignedCertificateTimestampList (including the two leading length bytes). See // https://tools.ietf.org/html/rfc6962#section-3.3 If no SCT was received then // |*out_len| will be zero on return. // // WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_SESSION_get0_signed_cert_timestamp_list( const SSL_SESSION *session, const uint8_t **out, size_t *out_len); // SSL_SESSION_get0_ocsp_response sets |*out| and |*out_len| to point to // |*out_len| bytes of an OCSP response from the server. This is the DER // encoding of an OCSPResponse type as defined in RFC 2560. // // WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_SESSION_get0_ocsp_response(const SSL_SESSION *session, const uint8_t **out, size_t *out_len); // SSL_MAX_MASTER_KEY_LENGTH is the maximum length of a master secret. #define SSL_MAX_MASTER_KEY_LENGTH 48 // SSL_SESSION_get_master_key writes up to |max_out| bytes of |session|'s secret // to |out| and returns the number of bytes written. If |max_out| is zero, it // returns the size of the secret. OPENSSL_EXPORT size_t SSL_SESSION_get_master_key(const SSL_SESSION *session, uint8_t *out, size_t max_out); // SSL_SESSION_set_time sets |session|'s creation time to |time| and returns // |time|. This function may be useful in writing tests but otherwise should not // be used. OPENSSL_EXPORT uint64_t SSL_SESSION_set_time(SSL_SESSION *session, uint64_t time); // SSL_SESSION_set_timeout sets |session|'s timeout to |timeout| and returns // one. This function may be useful in writing tests but otherwise should not // be used. OPENSSL_EXPORT uint32_t SSL_SESSION_set_timeout(SSL_SESSION *session, uint32_t timeout); // SSL_SESSION_get0_id_context returns a pointer to a buffer containing // |session|'s session ID context (see |SSL_CTX_set_session_id_context|) and // sets |*out_len| to its length. OPENSSL_EXPORT const uint8_t *SSL_SESSION_get0_id_context( const SSL_SESSION *session, unsigned *out_len); // SSL_SESSION_set1_id_context sets |session|'s session ID context (see // |SSL_CTX_set_session_id_context|) to |sid_ctx|. It returns one on success and // zero on error. This function may be useful in writing tests but otherwise // should not be used. OPENSSL_EXPORT int SSL_SESSION_set1_id_context(SSL_SESSION *session, const uint8_t *sid_ctx, size_t sid_ctx_len); // SSL_SESSION_should_be_single_use returns one if |session| should be // single-use (TLS 1.3 and later) and zero otherwise. // // If this function returns one, clients retain multiple sessions and use each // only once. This prevents passive observers from correlating connections with // tickets. See RFC 8446, appendix C.4. If it returns zero, |session| cannot be // used without leaking a correlator. OPENSSL_EXPORT int SSL_SESSION_should_be_single_use(const SSL_SESSION *session); // SSL_SESSION_is_resumable returns one if |session| is complete and contains a // session ID or ticket. It returns zero otherwise. Note this function does not // ensure |session| will be resumed. It may be expired, dropped by the server, // or associated with incompatible parameters. OPENSSL_EXPORT int SSL_SESSION_is_resumable(const SSL_SESSION *session); // SSL_SESSION_has_ticket returns one if |session| has a ticket and zero // otherwise. OPENSSL_EXPORT int SSL_SESSION_has_ticket(const SSL_SESSION *session); // SSL_SESSION_get0_ticket sets |*out_ticket| and |*out_len| to |session|'s // ticket, or NULL and zero if it does not have one. |out_ticket| may be NULL // if only the ticket length is needed. OPENSSL_EXPORT void SSL_SESSION_get0_ticket(const SSL_SESSION *session, const uint8_t **out_ticket, size_t *out_len); // SSL_SESSION_set_ticket sets |session|'s ticket to |ticket|. It returns one on // success and zero on error. This function may be useful in writing tests but // otherwise should not be used. OPENSSL_EXPORT int SSL_SESSION_set_ticket(SSL_SESSION *session, const uint8_t *ticket, size_t ticket_len); // SSL_SESSION_get_ticket_lifetime_hint returns ticket lifetime hint of // |session| in seconds or zero if none was set. OPENSSL_EXPORT uint32_t SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *session); // SSL_SESSION_get0_cipher returns the cipher negotiated by the connection which // established |session|. // // Note that, in TLS 1.3, there is no guarantee that resumptions with |session| // will use that cipher. Prefer calling |SSL_get_current_cipher| on the |SSL| // instead. OPENSSL_EXPORT const SSL_CIPHER *SSL_SESSION_get0_cipher( const SSL_SESSION *session); // SSL_SESSION_has_peer_sha256 returns one if |session| has a SHA-256 hash of // the peer's certificate retained and zero if the peer did not present a // certificate or if this was not enabled when |session| was created. See also // |SSL_CTX_set_retain_only_sha256_of_client_certs|. OPENSSL_EXPORT int SSL_SESSION_has_peer_sha256(const SSL_SESSION *session); // SSL_SESSION_get0_peer_sha256 sets |*out_ptr| and |*out_len| to the SHA-256 // hash of the peer certificate retained in |session|, or NULL and zero if it // does not have one. See also |SSL_CTX_set_retain_only_sha256_of_client_certs|. OPENSSL_EXPORT void SSL_SESSION_get0_peer_sha256(const SSL_SESSION *session, const uint8_t **out_ptr, size_t *out_len); // Session caching. // // Session caching allows connections to be established more efficiently based // on saved parameters from a previous connection, called a session (see // |SSL_SESSION|). The client offers a saved session, using an opaque identifier // from a previous connection. The server may accept the session, if it has the // parameters available. Otherwise, it will decline and continue with a full // handshake. // // This requires both the client and the server to retain session state. A // client does so with a stateful session cache. A server may do the same or, if // supported by both sides, statelessly using session tickets. For more // information on the latter, see the next section. // // For a server, the library implements a built-in internal session cache as an // in-memory hash table. Servers may also use |SSL_CTX_sess_set_get_cb| and // |SSL_CTX_sess_set_new_cb| to implement a custom external session cache. In // particular, this may be used to share a session cache between multiple // servers in a large deployment. An external cache may be used in addition to // or instead of the internal one. Use |SSL_CTX_set_session_cache_mode| to // toggle the internal cache. // // For a client, the only option is an external session cache. Clients may use // |SSL_CTX_sess_set_new_cb| to register a callback for when new sessions are // available. These may be cached and, in subsequent compatible connections, // configured with |SSL_set_session|. // // Note that offering or accepting a session short-circuits certificate // verification and most parameter negotiation. Resuming sessions across // different contexts may result in security failures and surprising // behavior. For a typical client, this means sessions for different hosts must // be cached under different keys. A client that connects to the same host with, // e.g., different cipher suite settings or client certificates should also use // separate session caches between those contexts. Servers should also partition // session caches between SNI hosts with |SSL_CTX_set_session_id_context|. // // Note also, in TLS 1.2 and earlier, offering sessions allows passive observers // to correlate different client connections. TLS 1.3 and later fix this, // provided clients use sessions at most once. Session caches are managed by the // caller in BoringSSL, so this must be implemented externally. See // |SSL_SESSION_should_be_single_use| for details. // SSL_SESS_CACHE_OFF disables all session caching. #define SSL_SESS_CACHE_OFF 0x0000 // SSL_SESS_CACHE_CLIENT enables session caching for a client. The internal // cache is never used on a client, so this only enables the callbacks. #define SSL_SESS_CACHE_CLIENT 0x0001 // SSL_SESS_CACHE_SERVER enables session caching for a server. #define SSL_SESS_CACHE_SERVER 0x0002 // SSL_SESS_CACHE_BOTH enables session caching for both client and server. #define SSL_SESS_CACHE_BOTH (SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_SERVER) // SSL_SESS_CACHE_NO_AUTO_CLEAR disables automatically calling // |SSL_CTX_flush_sessions| every 255 connections. #define SSL_SESS_CACHE_NO_AUTO_CLEAR 0x0080 // SSL_SESS_CACHE_NO_INTERNAL_LOOKUP, on a server, disables looking up a session // from the internal session cache. #define SSL_SESS_CACHE_NO_INTERNAL_LOOKUP 0x0100 // SSL_SESS_CACHE_NO_INTERNAL_STORE, on a server, disables storing sessions in // the internal session cache. #define SSL_SESS_CACHE_NO_INTERNAL_STORE 0x0200 // SSL_SESS_CACHE_NO_INTERNAL, on a server, disables the internal session // cache. #define SSL_SESS_CACHE_NO_INTERNAL \ (SSL_SESS_CACHE_NO_INTERNAL_LOOKUP | SSL_SESS_CACHE_NO_INTERNAL_STORE) // SSL_CTX_set_session_cache_mode sets the session cache mode bits for |ctx| to // |mode|. It returns the previous value. OPENSSL_EXPORT int SSL_CTX_set_session_cache_mode(SSL_CTX *ctx, int mode); // SSL_CTX_get_session_cache_mode returns the session cache mode bits for // |ctx| OPENSSL_EXPORT int SSL_CTX_get_session_cache_mode(const SSL_CTX *ctx); // SSL_set_session, for a client, configures |ssl| to offer to resume |session| // in the initial handshake and returns one. The caller retains ownership of // |session|. Note that configuring a session assumes the authentication in the // session is valid. For callers that wish to revalidate the session before // offering, see |SSL_SESSION_get0_peer_certificates|, // |SSL_SESSION_get0_signed_cert_timestamp_list|, and // |SSL_SESSION_get0_ocsp_response|. // // It is an error to call this function after the handshake has begun. OPENSSL_EXPORT int SSL_set_session(SSL *ssl, SSL_SESSION *session); // SSL_DEFAULT_SESSION_TIMEOUT is the default lifetime, in seconds, of a // session in TLS 1.2 or earlier. This is how long we are willing to use the // secret to encrypt traffic without fresh key material. #define SSL_DEFAULT_SESSION_TIMEOUT (2 * 60 * 60) // SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT is the default lifetime, in seconds, of a // session for TLS 1.3 psk_dhe_ke. This is how long we are willing to use the // secret as an authenticator. #define SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT (2 * 24 * 60 * 60) // SSL_DEFAULT_SESSION_AUTH_TIMEOUT is the default non-renewable lifetime, in // seconds, of a TLS 1.3 session. This is how long we are willing to trust the // signature in the initial handshake. #define SSL_DEFAULT_SESSION_AUTH_TIMEOUT (7 * 24 * 60 * 60) // SSL_CTX_set_timeout sets the lifetime, in seconds, of TLS 1.2 (or earlier) // sessions created in |ctx| to |timeout|. OPENSSL_EXPORT uint32_t SSL_CTX_set_timeout(SSL_CTX *ctx, uint32_t timeout); // SSL_CTX_set_session_psk_dhe_timeout sets the lifetime, in seconds, of TLS 1.3 // sessions created in |ctx| to |timeout|. OPENSSL_EXPORT void SSL_CTX_set_session_psk_dhe_timeout(SSL_CTX *ctx, uint32_t timeout); // SSL_CTX_get_timeout returns the lifetime, in seconds, of TLS 1.2 (or earlier) // sessions created in |ctx|. OPENSSL_EXPORT uint32_t SSL_CTX_get_timeout(const SSL_CTX *ctx); // SSL_MAX_SID_CTX_LENGTH is the maximum length of a session ID context. #define SSL_MAX_SID_CTX_LENGTH 32 // SSL_CTX_set_session_id_context sets |ctx|'s session ID context to |sid_ctx|. // It returns one on success and zero on error. The session ID context is an // application-defined opaque byte string. A session will not be used in a // connection without a matching session ID context. // // For a server, if |SSL_VERIFY_PEER| is enabled, it is an error to not set a // session ID context. OPENSSL_EXPORT int SSL_CTX_set_session_id_context(SSL_CTX *ctx, const uint8_t *sid_ctx, size_t sid_ctx_len); // SSL_set_session_id_context sets |ssl|'s session ID context to |sid_ctx|. It // returns one on success and zero on error. See also // |SSL_CTX_set_session_id_context|. OPENSSL_EXPORT int SSL_set_session_id_context(SSL *ssl, const uint8_t *sid_ctx, size_t sid_ctx_len); // SSL_get0_session_id_context returns a pointer to |ssl|'s session ID context // and sets |*out_len| to its length. It returns NULL on error. OPENSSL_EXPORT const uint8_t *SSL_get0_session_id_context(const SSL *ssl, size_t *out_len); // SSL_SESSION_CACHE_MAX_SIZE_DEFAULT is the default maximum size of a session // cache. #define SSL_SESSION_CACHE_MAX_SIZE_DEFAULT (1024 * 20) // SSL_CTX_sess_set_cache_size sets the maximum size of |ctx|'s internal session // cache to |size|. It returns the previous value. OPENSSL_EXPORT unsigned long SSL_CTX_sess_set_cache_size(SSL_CTX *ctx, unsigned long size); // SSL_CTX_sess_get_cache_size returns the maximum size of |ctx|'s internal // session cache. OPENSSL_EXPORT unsigned long SSL_CTX_sess_get_cache_size(const SSL_CTX *ctx); // SSL_CTX_sess_number returns the number of sessions in |ctx|'s internal // session cache. OPENSSL_EXPORT size_t SSL_CTX_sess_number(const SSL_CTX *ctx); // SSL_CTX_add_session inserts |session| into |ctx|'s internal session cache. It // returns one on success and zero on error or if |session| is already in the // cache. The caller retains its reference to |session|. OPENSSL_EXPORT int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session); // SSL_CTX_remove_session removes |session| from |ctx|'s internal session cache. // It returns one on success and zero if |session| was not in the cache. OPENSSL_EXPORT int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session); // SSL_CTX_flush_sessions removes all sessions from |ctx| which have expired as // of time |time|. If |time| is zero, all sessions are removed. OPENSSL_EXPORT void SSL_CTX_flush_sessions(SSL_CTX *ctx, uint64_t time); // SSL_CTX_sess_set_new_cb sets the callback to be called when a new session is // established and ready to be cached. If the session cache is disabled (the // appropriate one of |SSL_SESS_CACHE_CLIENT| or |SSL_SESS_CACHE_SERVER| is // unset), the callback is not called. // // The callback is passed a reference to |session|. It returns one if it takes // ownership (and then calls |SSL_SESSION_free| when done) and zero otherwise. A // consumer which places |session| into an in-memory cache will likely return // one, with the cache calling |SSL_SESSION_free|. A consumer which serializes // |session| with |SSL_SESSION_to_bytes| may not need to retain |session| and // will likely return zero. Returning one is equivalent to calling // |SSL_SESSION_up_ref| and then returning zero. // // Note: For a client, the callback may be called on abbreviated handshakes if a // ticket is renewed. Further, it may not be called until some time after // |SSL_do_handshake| or |SSL_connect| completes if False Start is enabled. Thus // it's recommended to use this callback over calling |SSL_get_session| on // handshake completion. OPENSSL_EXPORT void SSL_CTX_sess_set_new_cb( SSL_CTX *ctx, int (*new_session_cb)(SSL *ssl, SSL_SESSION *session)); // SSL_CTX_sess_get_new_cb returns the callback set by // |SSL_CTX_sess_set_new_cb|. OPENSSL_EXPORT int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx))( SSL *ssl, SSL_SESSION *session); // SSL_CTX_sess_set_remove_cb sets a callback which is called when a session is // removed from the internal session cache. // // TODO(davidben): What is the point of this callback? It seems useless since it // only fires on sessions in the internal cache. OPENSSL_EXPORT void SSL_CTX_sess_set_remove_cb( SSL_CTX *ctx, void (*remove_session_cb)(SSL_CTX *ctx, SSL_SESSION *session)); // SSL_CTX_sess_get_remove_cb returns the callback set by // |SSL_CTX_sess_set_remove_cb|. OPENSSL_EXPORT void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))( SSL_CTX *ctx, SSL_SESSION *session); // SSL_CTX_sess_set_get_cb sets a callback to look up a session by ID for a // server. The callback is passed the session ID and should return a matching // |SSL_SESSION| or NULL if not found. It should set |*out_copy| to zero and // return a new reference to the session. This callback is not used for a // client. // // For historical reasons, if |*out_copy| is set to one (default), the SSL // library will take a new reference to the returned |SSL_SESSION|, expecting // the callback to return a non-owning pointer. This is not recommended. If // |ctx| and thus the callback is used on multiple threads, the session may be // removed and invalidated before the SSL library calls |SSL_SESSION_up_ref|, // whereas the callback may synchronize internally. // // To look up a session asynchronously, the callback may return // |SSL_magic_pending_session_ptr|. See the documentation for that function and // |SSL_ERROR_PENDING_SESSION|. // // If the internal session cache is enabled, the callback is only consulted if // the internal cache does not return a match. OPENSSL_EXPORT void SSL_CTX_sess_set_get_cb( SSL_CTX *ctx, SSL_SESSION *(*get_session_cb)(SSL *ssl, const uint8_t *id, int id_len, int *out_copy)); // SSL_CTX_sess_get_get_cb returns the callback set by // |SSL_CTX_sess_set_get_cb|. OPENSSL_EXPORT SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))( SSL *ssl, const uint8_t *id, int id_len, int *out_copy); // SSL_magic_pending_session_ptr returns a magic |SSL_SESSION|* which indicates // that the session isn't currently unavailable. |SSL_get_error| will then // return |SSL_ERROR_PENDING_SESSION| and the handshake can be retried later // when the lookup has completed. OPENSSL_EXPORT SSL_SESSION *SSL_magic_pending_session_ptr(void); // Session tickets. // // Session tickets, from RFC 5077, allow session resumption without server-side // state. The server maintains a secret ticket key and sends the client opaque // encrypted session parameters, called a ticket. When offering the session, the // client sends the ticket which the server decrypts to recover session state. // Session tickets are enabled by default but may be disabled with // |SSL_OP_NO_TICKET|. // // On the client, ticket-based sessions use the same APIs as ID-based tickets. // Callers do not need to handle them differently. // // On the server, tickets are encrypted and authenticated with a secret key. // By default, an |SSL_CTX| will manage session ticket encryption keys by // generating them internally and rotating every 48 hours. Tickets are minted // and processed transparently. The following functions may be used to configure // a persistent key or implement more custom behavior, including key rotation // and sharing keys between multiple servers in a large deployment. There are // three levels of customisation possible: // // 1) One can simply set the keys with |SSL_CTX_set_tlsext_ticket_keys|. // 2) One can configure an |EVP_CIPHER_CTX| and |HMAC_CTX| directly for // encryption and authentication. // 3) One can configure an |SSL_TICKET_AEAD_METHOD| to have more control // and the option of asynchronous decryption. // // An attacker that compromises a server's session ticket key can impersonate // the server and, prior to TLS 1.3, retroactively decrypt all application // traffic from sessions using that ticket key. Thus ticket keys must be // regularly rotated for forward secrecy. Note the default key is rotated // automatically once every 48 hours but manually configured keys are not. // SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL is the interval with which the // default session ticket encryption key is rotated, if in use. If any // non-default ticket encryption mechanism is configured, automatic rotation is // disabled. #define SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL (2 * 24 * 60 * 60) // SSL_CTX_get_tlsext_ticket_keys writes |ctx|'s session ticket key material to // |len| bytes of |out|. It returns one on success and zero if |len| is not // 48. If |out| is NULL, it returns 48 instead. OPENSSL_EXPORT int SSL_CTX_get_tlsext_ticket_keys(SSL_CTX *ctx, void *out, size_t len); // SSL_CTX_set_tlsext_ticket_keys sets |ctx|'s session ticket key material to // |len| bytes of |in|. It returns one on success and zero if |len| is not // 48. If |in| is NULL, it returns 48 instead. OPENSSL_EXPORT int SSL_CTX_set_tlsext_ticket_keys(SSL_CTX *ctx, const void *in, size_t len); // SSL_TICKET_KEY_NAME_LEN is the length of the key name prefix of a session // ticket. #define SSL_TICKET_KEY_NAME_LEN 16 // SSL_CTX_set_tlsext_ticket_key_cb sets the ticket callback to |callback| and // returns one. |callback| will be called when encrypting a new ticket and when // decrypting a ticket from the client. // // In both modes, |ctx| and |hmac_ctx| will already have been initialized with // |EVP_CIPHER_CTX_init| and |HMAC_CTX_init|, respectively. |callback| // configures |hmac_ctx| with an HMAC digest and key, and configures |ctx| // for encryption or decryption, based on the mode. // // When encrypting a new ticket, |encrypt| will be one. It writes a public // 16-byte key name to |key_name| and a fresh IV to |iv|. The output IV length // must match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, // |callback| returns 1 on success, 0 to decline sending a ticket, and -1 on // error. // // When decrypting a ticket, |encrypt| will be zero. |key_name| will point to a // 16-byte key name and |iv| points to an IV. The length of the IV consumed must // match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, // |callback| returns -1 to abort the handshake, 0 if the ticket key was // unrecognized, and 1 or 2 on success. If it returns 2, the ticket will be // renewed. This may be used to re-key the ticket. // // WARNING: |callback| wildly breaks the usual return value convention and is // called in two different modes. OPENSSL_EXPORT int SSL_CTX_set_tlsext_ticket_key_cb( SSL_CTX *ctx, int (*callback)(SSL *ssl, uint8_t *key_name, uint8_t *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hmac_ctx, int encrypt)); // ssl_ticket_aead_result_t enumerates the possible results from decrypting a // ticket with an |SSL_TICKET_AEAD_METHOD|. enum ssl_ticket_aead_result_t BORINGSSL_ENUM_INT { // ssl_ticket_aead_success indicates that the ticket was successfully // decrypted. ssl_ticket_aead_success, // ssl_ticket_aead_retry indicates that the operation could not be // immediately completed and must be reattempted, via |open|, at a later // point. ssl_ticket_aead_retry, // ssl_ticket_aead_ignore_ticket indicates that the ticket should be ignored // (i.e. is corrupt or otherwise undecryptable). ssl_ticket_aead_ignore_ticket, // ssl_ticket_aead_error indicates that a fatal error occured and the // handshake should be terminated. ssl_ticket_aead_error, }; // ssl_ticket_aead_method_st (aka |SSL_TICKET_AEAD_METHOD|) contains methods // for encrypting and decrypting session tickets. struct ssl_ticket_aead_method_st { // max_overhead returns the maximum number of bytes of overhead that |seal| // may add. size_t (*max_overhead)(SSL *ssl); // seal encrypts and authenticates |in_len| bytes from |in|, writes, at most, // |max_out_len| bytes to |out|, and puts the number of bytes written in // |*out_len|. The |in| and |out| buffers may be equal but will not otherwise // alias. It returns one on success or zero on error. If the function returns // but |*out_len| is zero, BoringSSL will skip sending a ticket. int (*seal)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len); // open authenticates and decrypts |in_len| bytes from |in|, writes, at most, // |max_out_len| bytes of plaintext to |out|, and puts the number of bytes // written in |*out_len|. The |in| and |out| buffers may be equal but will // not otherwise alias. See |ssl_ticket_aead_result_t| for details of the // return values. In the case that a retry is indicated, the caller should // arrange for the high-level operation on |ssl| to be retried when the // operation is completed, which will result in another call to |open|. enum ssl_ticket_aead_result_t (*open)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *in, size_t in_len); }; // SSL_CTX_set_ticket_aead_method configures a custom ticket AEAD method table // on |ctx|. |aead_method| must remain valid for the lifetime of |ctx|. OPENSSL_EXPORT void SSL_CTX_set_ticket_aead_method( SSL_CTX *ctx, const SSL_TICKET_AEAD_METHOD *aead_method); // SSL_process_tls13_new_session_ticket processes an unencrypted TLS 1.3 // NewSessionTicket message from |buf| and returns a resumable |SSL_SESSION|, // or NULL on error. The caller takes ownership of the returned session and // must call |SSL_SESSION_free| to free it. // // |buf| contains |buf_len| bytes that represents a complete NewSessionTicket // message including its header, i.e., one byte for the type (0x04) and three // bytes for the length. |buf| must contain only one such message. // // This function may be used to process NewSessionTicket messages in TLS 1.3 // clients that are handling the record layer externally. OPENSSL_EXPORT SSL_SESSION *SSL_process_tls13_new_session_ticket( SSL *ssl, const uint8_t *buf, size_t buf_len); // SSL_CTX_set_num_tickets configures |ctx| to send |num_tickets| immediately // after a successful TLS 1.3 handshake as a server. It returns one. Large // values of |num_tickets| will be capped within the library. // // By default, BoringSSL sends two tickets. OPENSSL_EXPORT int SSL_CTX_set_num_tickets(SSL_CTX *ctx, size_t num_tickets); // SSL_CTX_get_num_tickets returns the number of tickets |ctx| will send // immediately after a successful TLS 1.3 handshake as a server. OPENSSL_EXPORT size_t SSL_CTX_get_num_tickets(const SSL_CTX *ctx); // Diffie-Hellman groups and ephemeral key exchanges. // // Most TLS handshakes (ECDHE cipher suites in TLS 1.2, and all supported TLS // 1.3 modes) incorporate an ephemeral key exchange, most commonly using // Elliptic Curve Diffie-Hellman (ECDH), as described in RFC 8422. The key // exchange algorithm is negotiated separately from the cipher suite, using // NamedGroup values, which define Diffie-Hellman groups. // // Historically, these values were known as "curves", in reference to ECDH, and // some APIs refer to the original name. RFC 7919 renamed them to "groups" in // reference to Diffie-Hellman in general. These values are also used to select // experimental post-quantum KEMs. Though not Diffie-Hellman groups, KEMs can // fill a similar role in TLS, so they use the same codepoints. // // In TLS 1.2, the ECDH values also negotiate elliptic curves used in ECDSA. In // TLS 1.3 and later, ECDSA curves are part of the signature algorithm. See // |SSL_SIGN_*|. // SSL_GROUP_* define TLS group IDs. #define SSL_GROUP_SECP224R1 21 #define SSL_GROUP_SECP256R1 23 #define SSL_GROUP_SECP384R1 24 #define SSL_GROUP_SECP521R1 25 #define SSL_GROUP_X25519 29 #define SSL_GROUP_X25519_MLKEM768 0x11ec #define SSL_GROUP_X25519_KYBER768_DRAFT00 0x6399 // SSL_CTX_set1_group_ids sets the preferred groups for |ctx| to |group_ids|. // Each element of |group_ids| should be one of the |SSL_GROUP_*| constants. It // returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set1_group_ids(SSL_CTX *ctx, const uint16_t *group_ids, size_t num_group_ids); // SSL_set1_group_ids sets the preferred groups for |ssl| to |group_ids|. Each // element of |group_ids| should be one of the |SSL_GROUP_*| constants. It // returns one on success and zero on failure. OPENSSL_EXPORT int SSL_set1_group_ids(SSL *ssl, const uint16_t *group_ids, size_t num_group_ids); // SSL_get_group_id returns the ID of the group used by |ssl|'s most recently // completed handshake, or 0 if not applicable. OPENSSL_EXPORT uint16_t SSL_get_group_id(const SSL *ssl); // SSL_get_group_name returns a human-readable name for the group specified by // the given TLS group ID, or NULL if the group is unknown. OPENSSL_EXPORT const char *SSL_get_group_name(uint16_t group_id); // SSL_get_all_group_names outputs a list of possible strings // |SSL_get_group_name| may return in this version of BoringSSL. It writes at // most |max_out| entries to |out| and returns the total number it would have // written, if |max_out| had been large enough. |max_out| may be initially set // to zero to size the output. // // This function is only intended to help initialize tables in callers that want // possible strings pre-declared. This list would not be suitable to set a list // of supported features. It is in no particular order, and may contain // placeholder, experimental, or deprecated values that do not apply to every // caller. Future versions of BoringSSL may also return strings not in this // list, so this does not apply if, say, sending strings across services. OPENSSL_EXPORT size_t SSL_get_all_group_names(const char **out, size_t max_out); // The following APIs also configure Diffie-Hellman groups, but use |NID_*| // constants instead of |SSL_GROUP_*| constants. These are provided for OpenSSL // compatibility. Where NIDs are unstable constants specific to OpenSSL and // BoringSSL, group IDs are defined by the TLS protocol. Prefer the group ID // representation if storing persistently, or exporting to another process or // library. // SSL_CTX_set1_groups sets the preferred groups for |ctx| to be |groups|. Each // element of |groups| should be a |NID_*| constant from nid.h. It returns one // on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set1_groups(SSL_CTX *ctx, const int *groups, size_t num_groups); // SSL_set1_groups sets the preferred groups for |ssl| to be |groups|. Each // element of |groups| should be a |NID_*| constant from nid.h. It returns one // on success and zero on failure. OPENSSL_EXPORT int SSL_set1_groups(SSL *ssl, const int *groups, size_t num_groups); // SSL_CTX_set1_groups_list decodes |groups| as a colon-separated list of group // names (e.g. "X25519" or "P-256") and sets |ctx|'s preferred groups to the // result. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set1_groups_list(SSL_CTX *ctx, const char *groups); // SSL_set1_groups_list decodes |groups| as a colon-separated list of group // names (e.g. "X25519" or "P-256") and sets |ssl|'s preferred groups to the // result. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_set1_groups_list(SSL *ssl, const char *groups); // SSL_get_negotiated_group returns the NID of the group used by |ssl|'s most // recently completed handshake, or |NID_undef| if not applicable. OPENSSL_EXPORT int SSL_get_negotiated_group(const SSL *ssl); // Certificate verification. // // SSL may authenticate either endpoint with an X.509 certificate. Typically // this is used to authenticate the server to the client. These functions // configure certificate verification. // // WARNING: By default, certificate verification errors on a client are not // fatal. See |SSL_VERIFY_NONE| This may be configured with // |SSL_CTX_set_verify|. // // By default clients are anonymous but a server may request a certificate from // the client by setting |SSL_VERIFY_PEER|. // // Many of these functions use OpenSSL's legacy X.509 stack which is // underdocumented and deprecated, but the replacement isn't ready yet. For // now, consumers may use the existing stack or bypass it by performing // certificate verification externally. This may be done with // |SSL_CTX_set_cert_verify_callback| or by extracting the chain with // |SSL_get_peer_cert_chain| after the handshake. In the future, functions will // be added to use the SSL stack without dependency on any part of the legacy // X.509 and ASN.1 stack. // // To augment certificate verification, a client may also enable OCSP stapling // (RFC 6066) and Certificate Transparency (RFC 6962) extensions. // SSL_VERIFY_NONE, on a client, verifies the server certificate but does not // make errors fatal. The result may be checked with |SSL_get_verify_result|. On // a server it does not request a client certificate. This is the default. #define SSL_VERIFY_NONE 0x00 // SSL_VERIFY_PEER, on a client, makes server certificate errors fatal. On a // server it requests a client certificate and makes errors fatal. However, // anonymous clients are still allowed. See // |SSL_VERIFY_FAIL_IF_NO_PEER_CERT|. #define SSL_VERIFY_PEER 0x01 // SSL_VERIFY_FAIL_IF_NO_PEER_CERT configures a server to reject connections if // the client declines to send a certificate. This flag must be used together // with |SSL_VERIFY_PEER|, otherwise it won't work. #define SSL_VERIFY_FAIL_IF_NO_PEER_CERT 0x02 // SSL_VERIFY_PEER_IF_NO_OBC configures a server to request a client certificate // if and only if Channel ID is not negotiated. #define SSL_VERIFY_PEER_IF_NO_OBC 0x04 // SSL_CTX_set_verify configures certificate verification behavior. |mode| is // one of the |SSL_VERIFY_*| values defined above. |callback| should be NULL. // // If |callback| is non-NULL, it is called as in |X509_STORE_CTX_set_verify_cb|, // which is a deprecated and fragile mechanism to run the default certificate // verification process, but suppress individual errors in it. See // |X509_STORE_CTX_set_verify_cb| for details, If set, the callback may use // |SSL_get_ex_data_X509_STORE_CTX_idx| with |X509_STORE_CTX_get_ex_data| to // look up the |SSL| from |store_ctx|. // // WARNING: |callback| is not suitable for implementing custom certificate // check, accepting all certificates, or extracting the certificate after // verification. It does not replace the default process and is called multiple // times throughout that process. It is also very difficult to implement this // callback safely, without inadvertently relying on implementation details or // making incorrect assumptions about when the callback is called. // // Instead, use |SSL_CTX_set_custom_verify| or // |SSL_CTX_set_cert_verify_callback| to customize certificate verification. // Those callbacks can inspect the peer-sent chain, call |X509_verify_cert| and // inspect the result, or perform other operations more straightforwardly. OPENSSL_EXPORT void SSL_CTX_set_verify( SSL_CTX *ctx, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)); // SSL_set_verify configures certificate verification behavior. |mode| is one of // the |SSL_VERIFY_*| values defined above. |callback| should be NULL. // // If |callback| is non-NULL, it is called as in |X509_STORE_CTX_set_verify_cb|, // which is a deprecated and fragile mechanism to run the default certificate // verification process, but suppress individual errors in it. See // |X509_STORE_CTX_set_verify_cb| for details, If set, the callback may use // |SSL_get_ex_data_X509_STORE_CTX_idx| with |X509_STORE_CTX_get_ex_data| to // look up the |SSL| from |store_ctx|. // // WARNING: |callback| is not suitable for implementing custom certificate // check, accepting all certificates, or extracting the certificate after // verification. It does not replace the default process and is called multiple // times throughout that process. It is also very difficult to implement this // callback safely, without inadvertently relying on implementation details or // making incorrect assumptions about when the callback is called. // // Instead, use |SSL_set_custom_verify| or |SSL_set_cert_verify_callback| to // customize certificate verification. Those callbacks can inspect the peer-sent // chain, call |X509_verify_cert| and inspect the result, or perform other // operations more straightforwardly. OPENSSL_EXPORT void SSL_set_verify(SSL *ssl, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)); enum ssl_verify_result_t BORINGSSL_ENUM_INT { ssl_verify_ok, ssl_verify_invalid, ssl_verify_retry, }; // SSL_CTX_set_custom_verify configures certificate verification. |mode| is one // of the |SSL_VERIFY_*| values defined above. |callback| performs the // certificate verification. // // The callback may call |SSL_get0_peer_certificates| for the certificate chain // to validate. The callback should return |ssl_verify_ok| if the certificate is // valid. If the certificate is invalid, the callback should return // |ssl_verify_invalid| and optionally set |*out_alert| to an alert to send to // the peer. Some useful alerts include |SSL_AD_CERTIFICATE_EXPIRED|, // |SSL_AD_CERTIFICATE_REVOKED|, |SSL_AD_UNKNOWN_CA|, |SSL_AD_BAD_CERTIFICATE|, // |SSL_AD_CERTIFICATE_UNKNOWN|, and |SSL_AD_INTERNAL_ERROR|. See RFC 5246 // section 7.2.2 for their precise meanings. If unspecified, // |SSL_AD_CERTIFICATE_UNKNOWN| will be sent by default. // // To verify a certificate asynchronously, the callback may return // |ssl_verify_retry|. The handshake will then pause with |SSL_get_error| // returning |SSL_ERROR_WANT_CERTIFICATE_VERIFY|. OPENSSL_EXPORT void SSL_CTX_set_custom_verify( SSL_CTX *ctx, int mode, enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)); // SSL_set_custom_verify behaves like |SSL_CTX_set_custom_verify| but configures // an individual |SSL|. OPENSSL_EXPORT void SSL_set_custom_verify( SSL *ssl, int mode, enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)); // SSL_CTX_get_verify_mode returns |ctx|'s verify mode, set by // |SSL_CTX_set_verify|. OPENSSL_EXPORT int SSL_CTX_get_verify_mode(const SSL_CTX *ctx); // SSL_get_verify_mode returns |ssl|'s verify mode, set by |SSL_CTX_set_verify| // or |SSL_set_verify|. It returns -1 on error. OPENSSL_EXPORT int SSL_get_verify_mode(const SSL *ssl); // SSL_CTX_get_verify_callback returns the callback set by // |SSL_CTX_set_verify|. OPENSSL_EXPORT int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx))( int ok, X509_STORE_CTX *store_ctx); // SSL_get_verify_callback returns the callback set by |SSL_CTX_set_verify| or // |SSL_set_verify|. OPENSSL_EXPORT int (*SSL_get_verify_callback(const SSL *ssl))( int ok, X509_STORE_CTX *store_ctx); // SSL_set1_host sets a DNS name that will be required to be present in the // verified leaf certificate. It returns one on success and zero on error. // // Note: unless _some_ name checking is performed, certificate validation is // ineffective. Simply checking that a host has some certificate from a CA is // rarely meaningful—you have to check that the CA believed that the host was // who you expect to be talking to. // // By default, both subject alternative names and the subject's common name // attribute are checked. The latter has long been deprecated, so callers should // call |SSL_set_hostflags| with |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| to use // the standard behavior. https://crbug.com/boringssl/464 tracks fixing the // default. OPENSSL_EXPORT int SSL_set1_host(SSL *ssl, const char *hostname); // SSL_set_hostflags calls |X509_VERIFY_PARAM_set_hostflags| on the // |X509_VERIFY_PARAM| associated with this |SSL*|. |flags| should be some // combination of the |X509_CHECK_*| constants. OPENSSL_EXPORT void SSL_set_hostflags(SSL *ssl, unsigned flags); // SSL_CTX_set_verify_depth sets the maximum depth of a certificate chain // accepted in verification. This count excludes both the target certificate and // the trust anchor (root certificate). OPENSSL_EXPORT void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth); // SSL_set_verify_depth sets the maximum depth of a certificate chain accepted // in verification. This count excludes both the target certificate and the // trust anchor (root certificate). OPENSSL_EXPORT void SSL_set_verify_depth(SSL *ssl, int depth); // SSL_CTX_get_verify_depth returns the maximum depth of a certificate accepted // in verification. OPENSSL_EXPORT int SSL_CTX_get_verify_depth(const SSL_CTX *ctx); // SSL_get_verify_depth returns the maximum depth of a certificate accepted in // verification. OPENSSL_EXPORT int SSL_get_verify_depth(const SSL *ssl); // SSL_CTX_set1_param sets verification parameters from |param|. It returns one // on success and zero on failure. The caller retains ownership of |param|. OPENSSL_EXPORT int SSL_CTX_set1_param(SSL_CTX *ctx, const X509_VERIFY_PARAM *param); // SSL_set1_param sets verification parameters from |param|. It returns one on // success and zero on failure. The caller retains ownership of |param|. OPENSSL_EXPORT int SSL_set1_param(SSL *ssl, const X509_VERIFY_PARAM *param); // SSL_CTX_get0_param returns |ctx|'s |X509_VERIFY_PARAM| for certificate // verification. The caller must not release the returned pointer but may call // functions on it to configure it. OPENSSL_EXPORT X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx); // SSL_get0_param returns |ssl|'s |X509_VERIFY_PARAM| for certificate // verification. The caller must not release the returned pointer but may call // functions on it to configure it. OPENSSL_EXPORT X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl); // SSL_CTX_set_purpose sets |ctx|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to // |purpose|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_purpose(SSL_CTX *ctx, int purpose); // SSL_set_purpose sets |ssl|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to // |purpose|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_purpose(SSL *ssl, int purpose); // SSL_CTX_set_trust sets |ctx|'s |X509_VERIFY_PARAM|'s 'trust' parameter to // |trust|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_trust(SSL_CTX *ctx, int trust); // SSL_set_trust sets |ssl|'s |X509_VERIFY_PARAM|'s 'trust' parameter to // |trust|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_trust(SSL *ssl, int trust); // SSL_CTX_set_cert_store sets |ctx|'s certificate store to |store|. It takes // ownership of |store|. The store is used for certificate verification. // // The store is also used for the auto-chaining feature, but this is deprecated. // See also |SSL_MODE_NO_AUTO_CHAIN|. OPENSSL_EXPORT void SSL_CTX_set_cert_store(SSL_CTX *ctx, X509_STORE *store); // SSL_CTX_get_cert_store returns |ctx|'s certificate store. OPENSSL_EXPORT X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *ctx); // SSL_CTX_set_default_verify_paths calls |X509_STORE_set_default_paths| on // |ctx|'s store. See that function for details. // // Using this function is not recommended. In OpenSSL, these defaults are // determined by OpenSSL's install prefix. There is no corresponding concept for // BoringSSL. Future versions of BoringSSL may change or remove this // functionality. OPENSSL_EXPORT int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx); // SSL_CTX_load_verify_locations calls |X509_STORE_load_locations| on |ctx|'s // store. See that function for details. OPENSSL_EXPORT int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *ca_file, const char *ca_dir); // SSL_get_verify_result returns the result of certificate verification. It is // either |X509_V_OK| or a |X509_V_ERR_*| value. OPENSSL_EXPORT long SSL_get_verify_result(const SSL *ssl); // SSL_alert_from_verify_result returns the SSL alert code, such as // |SSL_AD_CERTIFICATE_EXPIRED|, that corresponds to an |X509_V_ERR_*| value. // The return value is always an alert, even when |result| is |X509_V_OK|. OPENSSL_EXPORT int SSL_alert_from_verify_result(long result); // SSL_get_ex_data_X509_STORE_CTX_idx returns the ex_data index used to look up // the |SSL| associated with an |X509_STORE_CTX| in the verify callback. OPENSSL_EXPORT int SSL_get_ex_data_X509_STORE_CTX_idx(void); // SSL_CTX_set_cert_verify_callback sets a custom callback to be called on // certificate verification rather than |X509_verify_cert|. |store_ctx| contains // the verification parameters. The callback should return one on success and // zero on fatal error. It may use |X509_STORE_CTX_set_error| to set a // verification result. // // The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| to recover the // |SSL| object from |store_ctx|. OPENSSL_EXPORT void SSL_CTX_set_cert_verify_callback( SSL_CTX *ctx, int (*callback)(X509_STORE_CTX *store_ctx, void *arg), void *arg); // SSL_enable_signed_cert_timestamps causes |ssl| (which must be the client end // of a connection) to request SCTs from the server. See // https://tools.ietf.org/html/rfc6962. // // Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the // handshake. OPENSSL_EXPORT void SSL_enable_signed_cert_timestamps(SSL *ssl); // SSL_CTX_enable_signed_cert_timestamps enables SCT requests on all client SSL // objects created from |ctx|. // // Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the // handshake. OPENSSL_EXPORT void SSL_CTX_enable_signed_cert_timestamps(SSL_CTX *ctx); // SSL_enable_ocsp_stapling causes |ssl| (which must be the client end of a // connection) to request a stapled OCSP response from the server. // // Call |SSL_get0_ocsp_response| to recover the OCSP response after the // handshake. OPENSSL_EXPORT void SSL_enable_ocsp_stapling(SSL *ssl); // SSL_CTX_enable_ocsp_stapling enables OCSP stapling on all client SSL objects // created from |ctx|. // // Call |SSL_get0_ocsp_response| to recover the OCSP response after the // handshake. OPENSSL_EXPORT void SSL_CTX_enable_ocsp_stapling(SSL_CTX *ctx); // SSL_CTX_set0_verify_cert_store sets an |X509_STORE| that will be used // exclusively for certificate verification and returns one. Ownership of // |store| is transferred to the |SSL_CTX|. OPENSSL_EXPORT int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *store); // SSL_CTX_set1_verify_cert_store sets an |X509_STORE| that will be used // exclusively for certificate verification and returns one. An additional // reference to |store| will be taken. OPENSSL_EXPORT int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *store); // SSL_set0_verify_cert_store sets an |X509_STORE| that will be used // exclusively for certificate verification and returns one. Ownership of // |store| is transferred to the |SSL|. OPENSSL_EXPORT int SSL_set0_verify_cert_store(SSL *ssl, X509_STORE *store); // SSL_set1_verify_cert_store sets an |X509_STORE| that will be used // exclusively for certificate verification and returns one. An additional // reference to |store| will be taken. OPENSSL_EXPORT int SSL_set1_verify_cert_store(SSL *ssl, X509_STORE *store); // SSL_CTX_set_verify_algorithm_prefs configures |ctx| to use |prefs| as the // preference list when verifying signatures from the peer's long-term key. It // returns one on zero on error. |prefs| should not include the internal-only // value |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_CTX_set_verify_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, size_t num_prefs); // SSL_set_verify_algorithm_prefs configures |ssl| to use |prefs| as the // preference list when verifying signatures from the peer's long-term key. It // returns one on zero on error. |prefs| should not include the internal-only // value |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_set_verify_algorithm_prefs(SSL *ssl, const uint16_t *prefs, size_t num_prefs); // Client certificate CA list. // // When requesting a client certificate, a server may advertise a list of // certificate authorities which are accepted. These functions may be used to // configure this list. // SSL_set_client_CA_list sets |ssl|'s client certificate CA list to // |name_list|. It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_set_client_CA_list(SSL *ssl, STACK_OF(X509_NAME) *name_list); // SSL_CTX_set_client_CA_list sets |ctx|'s client certificate CA list to // |name_list|. It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list); // SSL_set0_client_CAs sets |ssl|'s client certificate CA list to |name_list|, // which should contain DER-encoded distinguished names (RFC 5280). It takes // ownership of |name_list|. OPENSSL_EXPORT void SSL_set0_client_CAs(SSL *ssl, STACK_OF(CRYPTO_BUFFER) *name_list); // SSL_set0_CA_names sets |ssl|'s CA name list for the certificate authorities // extension to |name_list|, which should contain DER-encoded distinguished // names (RFC 5280). It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_set0_CA_names(SSL *ssl, STACK_OF(CRYPTO_BUFFER) *name_list); // SSL_CTX_set0_client_CAs sets |ctx|'s client certificate CA list to // |name_list|, which should contain DER-encoded distinguished names (RFC 5280). // It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_CTX_set0_client_CAs(SSL_CTX *ctx, STACK_OF(CRYPTO_BUFFER) *name_list); // SSL_get_client_CA_list returns |ssl|'s client certificate CA list. If |ssl| // has not been configured as a client, this is the list configured by // |SSL_CTX_set_client_CA_list|. // // If configured as a client, it returns the client certificate CA list sent by // the server. In this mode, the behavior is undefined except during the // callbacks set by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or // when the handshake is paused because of them. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *ssl); // SSL_get0_server_requested_CAs returns the CAs sent by a server to guide a // client in certificate selection. They are a series of DER-encoded X.509 // names. This function may only be called during a callback set by // |SSL_CTX_set_cert_cb| or when the handshake is paused because of it. // // The returned stack is owned by |ssl|, as are its contents. It should not be // used past the point where the handshake is restarted after the callback. OPENSSL_EXPORT const STACK_OF(CRYPTO_BUFFER) *SSL_get0_server_requested_CAs( const SSL *ssl); // SSL_CTX_get_client_CA_list returns |ctx|'s client certificate CA list. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list( const SSL_CTX *ctx); // SSL_add_client_CA appends |x509|'s subject to the client certificate CA list. // It returns one on success or zero on error. The caller retains ownership of // |x509|. OPENSSL_EXPORT int SSL_add_client_CA(SSL *ssl, X509 *x509); // SSL_CTX_add_client_CA appends |x509|'s subject to the client certificate CA // list. It returns one on success or zero on error. The caller retains // ownership of |x509|. OPENSSL_EXPORT int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x509); // SSL_load_client_CA_file opens |file| and reads PEM-encoded certificates from // it. It returns a newly-allocated stack of the certificate subjects or NULL // on error. Duplicates in |file| are ignored. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file); // SSL_dup_CA_list makes a deep copy of |list|. It returns the new list on // success or NULL on allocation error. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list); // SSL_add_file_cert_subjects_to_stack behaves like |SSL_load_client_CA_file| // but appends the result to |out|. It returns one on success or zero on // error. OPENSSL_EXPORT int SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, const char *file); // SSL_add_bio_cert_subjects_to_stack behaves like // |SSL_add_file_cert_subjects_to_stack| but reads from |bio|. OPENSSL_EXPORT int SSL_add_bio_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, BIO *bio); // Server name indication. // // The server_name extension (RFC 3546) allows the client to advertise the name // of the server it is connecting to. This is used in virtual hosting // deployments to select one of a several certificates on a single IP. Only the // host_name name type is supported. #define TLSEXT_NAMETYPE_host_name 0 // SSL_set_tlsext_host_name, for a client, configures |ssl| to advertise |name| // in the server_name extension. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_tlsext_host_name(SSL *ssl, const char *name); // SSL_get_servername, for a server, returns the hostname supplied by the // client or NULL if there was none. The |type| argument must be // |TLSEXT_NAMETYPE_host_name|. OPENSSL_EXPORT const char *SSL_get_servername(const SSL *ssl, const int type); // SSL_get_servername_type, for a server, returns |TLSEXT_NAMETYPE_host_name| // if the client sent a hostname and -1 otherwise. OPENSSL_EXPORT int SSL_get_servername_type(const SSL *ssl); // SSL_CTX_set_tlsext_servername_callback configures |callback| to be called on // the server after ClientHello extensions have been parsed and returns one. // The callback may use |SSL_get_servername| to examine the server_name // extension and returns a |SSL_TLSEXT_ERR_*| value. The value of |arg| may be // set by calling |SSL_CTX_set_tlsext_servername_arg|. // // If the callback returns |SSL_TLSEXT_ERR_NOACK|, the server_name extension is // not acknowledged in the ServerHello. If the return value is // |SSL_TLSEXT_ERR_ALERT_FATAL|, then |*out_alert| is the alert to send, // defaulting to |SSL_AD_UNRECOGNIZED_NAME|. |SSL_TLSEXT_ERR_ALERT_WARNING| is // ignored and treated as |SSL_TLSEXT_ERR_OK|. OPENSSL_EXPORT int SSL_CTX_set_tlsext_servername_callback( SSL_CTX *ctx, int (*callback)(SSL *ssl, int *out_alert, void *arg)); // SSL_CTX_set_tlsext_servername_arg sets the argument to the servername // callback and returns one. See |SSL_CTX_set_tlsext_servername_callback|. OPENSSL_EXPORT int SSL_CTX_set_tlsext_servername_arg(SSL_CTX *ctx, void *arg); // SSL_TLSEXT_ERR_* are values returned by some extension-related callbacks. #define SSL_TLSEXT_ERR_OK 0 #define SSL_TLSEXT_ERR_ALERT_WARNING 1 #define SSL_TLSEXT_ERR_ALERT_FATAL 2 #define SSL_TLSEXT_ERR_NOACK 3 // SSL_set_SSL_CTX changes |ssl|'s |SSL_CTX|. |ssl| will use the // certificate-related settings from |ctx|, and |SSL_get_SSL_CTX| will report // |ctx|. This function may be used during the callbacks registered by // |SSL_CTX_set_select_certificate_cb|, // |SSL_CTX_set_tlsext_servername_callback|, and |SSL_CTX_set_cert_cb| or when // the handshake is paused from them. It is typically used to switch // certificates based on SNI. // // Note the session cache and related settings will continue to use the initial // |SSL_CTX|. Callers should use |SSL_CTX_set_session_id_context| to partition // the session cache between different domains. // // TODO(davidben): Should other settings change after this call? OPENSSL_EXPORT SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx); // Application-layer protocol negotiation. // // The ALPN extension (RFC 7301) allows negotiating different application-layer // protocols over a single port. This is used, for example, to negotiate // HTTP/2. // SSL_CTX_set_alpn_protos sets the client ALPN protocol list on |ctx| to // |protos|. |protos| must be in wire-format (i.e. a series of non-empty, 8-bit // length-prefixed strings), or the empty string to disable ALPN. It returns // zero on success and one on failure. Configuring a non-empty string enables // ALPN on a client. // // WARNING: this function is dangerous because it breaks the usual return value // convention. OPENSSL_EXPORT int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const uint8_t *protos, size_t protos_len); // SSL_set_alpn_protos sets the client ALPN protocol list on |ssl| to |protos|. // |protos| must be in wire-format (i.e. a series of non-empty, 8-bit // length-prefixed strings), or the empty string to disable ALPN. It returns // zero on success and one on failure. Configuring a non-empty string enables // ALPN on a client. // // WARNING: this function is dangerous because it breaks the usual return value // convention. OPENSSL_EXPORT int SSL_set_alpn_protos(SSL *ssl, const uint8_t *protos, size_t protos_len); // SSL_CTX_set_alpn_select_cb sets a callback function on |ctx| that is called // during ClientHello processing in order to select an ALPN protocol from the // client's list of offered protocols. |SSL_select_next_proto| is an optional // utility function which may be useful in implementing this callback. // // The callback is passed a wire-format (i.e. a series of non-empty, 8-bit // length-prefixed strings) ALPN protocol list in |in|. To select a protocol, // the callback should set |*out| and |*out_len| to the selected protocol and // return |SSL_TLSEXT_ERR_OK| on success. It does not pass ownership of the // buffer, so |*out| should point to a static string, a buffer that outlives the // callback call, or the corresponding entry in |in|. // // If the server supports ALPN, but there are no protocols in common, the // callback should return |SSL_TLSEXT_ERR_ALERT_FATAL| to abort the connection // with a no_application_protocol alert. // // If the server does not support ALPN, it can return |SSL_TLSEXT_ERR_NOACK| to // continue the handshake without negotiating a protocol. This may be useful if // multiple server configurations share an |SSL_CTX|, only some of which have // ALPN protocols configured. // // |SSL_TLSEXT_ERR_ALERT_WARNING| is ignored and will be treated as // |SSL_TLSEXT_ERR_NOACK|. // // The callback will only be called if the client supports ALPN. Callers that // wish to require ALPN for all clients must check |SSL_get0_alpn_selected| // after the handshake. In QUIC connections, this is done automatically. // // The cipher suite is selected before negotiating ALPN. The callback may use // |SSL_get_pending_cipher| to query the cipher suite. This may be used to // implement HTTP/2's cipher suite constraints. OPENSSL_EXPORT void SSL_CTX_set_alpn_select_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg); // SSL_get0_alpn_selected gets the selected ALPN protocol (if any) from |ssl|. // On return it sets |*out_data| to point to |*out_len| bytes of protocol name // (not including the leading length-prefix byte). If the server didn't respond // with a negotiated protocol then |*out_len| will be zero. OPENSSL_EXPORT void SSL_get0_alpn_selected(const SSL *ssl, const uint8_t **out_data, unsigned *out_len); // SSL_CTX_set_allow_unknown_alpn_protos configures client connections on |ctx| // to allow unknown ALPN protocols from the server. Otherwise, by default, the // client will require that the protocol be advertised in // |SSL_CTX_set_alpn_protos|. OPENSSL_EXPORT void SSL_CTX_set_allow_unknown_alpn_protos(SSL_CTX *ctx, int enabled); // Application-layer protocol settings // // The ALPS extension (draft-vvv-tls-alps) allows exchanging application-layer // settings in the TLS handshake for applications negotiated with ALPN. Note // that, when ALPS is negotiated, the client and server each advertise their own // settings, so there are functions to both configure setting to send and query // received settings. // SSL_add_application_settings configures |ssl| to enable ALPS with ALPN // protocol |proto|, sending an ALPS value of |settings|. It returns one on // success and zero on error. If |proto| is negotiated via ALPN and the peer // supports ALPS, |settings| will be sent to the peer. The peer's ALPS value can // be retrieved with |SSL_get0_peer_application_settings|. // // On the client, this function should be called before the handshake, once for // each supported ALPN protocol which uses ALPS. |proto| must be included in the // client's ALPN configuration (see |SSL_CTX_set_alpn_protos| and // |SSL_set_alpn_protos|). On the server, ALPS can be preconfigured for each // protocol as in the client, or configuration can be deferred to the ALPN // callback (see |SSL_CTX_set_alpn_select_cb|), in which case only the selected // protocol needs to be configured. // // ALPS can be independently configured from 0-RTT, however changes in protocol // settings will fallback to 1-RTT to negotiate the new value, so it is // recommended for |settings| to be relatively stable. OPENSSL_EXPORT int SSL_add_application_settings(SSL *ssl, const uint8_t *proto, size_t proto_len, const uint8_t *settings, size_t settings_len); // SSL_get0_peer_application_settings sets |*out_data| and |*out_len| to a // buffer containing the peer's ALPS value, or the empty string if ALPS was not // negotiated. Note an empty string could also indicate the peer sent an empty // settings value. Use |SSL_has_application_settings| to check if ALPS was // negotiated. The output buffer is owned by |ssl| and is valid until the next // time |ssl| is modified. OPENSSL_EXPORT void SSL_get0_peer_application_settings(const SSL *ssl, const uint8_t **out_data, size_t *out_len); // SSL_has_application_settings returns one if ALPS was negotiated on this // connection and zero otherwise. OPENSSL_EXPORT int SSL_has_application_settings(const SSL *ssl); // SSL_set_alps_use_new_codepoint configures whether to use the new ALPS // codepoint. By default, the old codepoint is used. OPENSSL_EXPORT void SSL_set_alps_use_new_codepoint(SSL *ssl, int use_new); // Certificate compression. // // Certificates in TLS 1.3 can be compressed (RFC 8879). BoringSSL supports this // as both a client and a server, but does not link against any specific // compression libraries in order to keep dependencies to a minimum. Instead, // hooks for compression and decompression can be installed in an |SSL_CTX| to // enable support. // ssl_cert_compression_func_t is a pointer to a function that performs // compression. It must write the compressed representation of |in| to |out|, // returning one on success and zero on error. The results of compressing // certificates are not cached internally. Implementations may wish to implement // their own cache if they expect it to be useful given the certificates that // they serve. typedef int (*ssl_cert_compression_func_t)(SSL *ssl, CBB *out, const uint8_t *in, size_t in_len); // ssl_cert_decompression_func_t is a pointer to a function that performs // decompression. The compressed data from the peer is passed as |in| and the // decompressed result must be exactly |uncompressed_len| bytes long. It returns // one on success, in which case |*out| must be set to the result of // decompressing |in|, or zero on error. Setting |*out| transfers ownership, // i.e. |CRYPTO_BUFFER_free| will be called on |*out| at some point in the // future. The results of decompressions are not cached internally. // Implementations may wish to implement their own cache if they expect it to be // useful. typedef int (*ssl_cert_decompression_func_t)(SSL *ssl, CRYPTO_BUFFER **out, size_t uncompressed_len, const uint8_t *in, size_t in_len); // SSL_CTX_add_cert_compression_alg registers a certificate compression // algorithm on |ctx| with ID |alg_id|. (The value of |alg_id| should be an IANA // assigned value and each can only be registered once.) // // One of the function pointers may be NULL to avoid having to implement both // sides of a compression algorithm if you're only going to use it in one // direction. In this case, the unimplemented direction acts like it was never // configured. // // For a server, algorithms are registered in preference order with the most // preferable first. It returns one on success or zero on error. OPENSSL_EXPORT int SSL_CTX_add_cert_compression_alg( SSL_CTX *ctx, uint16_t alg_id, ssl_cert_compression_func_t compress, ssl_cert_decompression_func_t decompress); // Next protocol negotiation. // // The NPN extension (draft-agl-tls-nextprotoneg-03) is the predecessor to ALPN // and deprecated in favor of it. // SSL_CTX_set_next_protos_advertised_cb sets a callback that is called when a // TLS server needs a list of supported protocols for Next Protocol Negotiation. // // If the callback wishes to advertise NPN to the client, it should return // |SSL_TLSEXT_ERR_OK| and then set |*out| and |*out_len| to describe to a // buffer containing a (possibly empty) list of supported protocols in wire // format. That is, each protocol is prefixed with a 1-byte length, then // concatenated. From there, the client will select a protocol, possibly one not // on the server's list. The caller can use |SSL_get0_next_proto_negotiated| // after the handshake completes to query the final protocol. // // The returned buffer must remain valid and unmodified for at least the // duration of the |SSL| operation (e.g. |SSL_do_handshake|) that triggered the // callback. // // If the caller wishes not to advertise NPN, it should return // |SSL_TLSEXT_ERR_NOACK|. No NPN extension will be included in the ServerHello, // and the TLS server will behave as if it does not implement NPN. OPENSSL_EXPORT void SSL_CTX_set_next_protos_advertised_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, unsigned *out_len, void *arg), void *arg); // SSL_CTX_set_next_proto_select_cb sets a callback that is called when a client // needs to select a protocol from the server's provided list, passed in wire // format in |in_len| bytes from |in|. The callback can assume that |in| is // syntactically valid. |SSL_select_next_proto| is an optional utility function // which may be useful in implementing this callback. // // On success, the callback should return |SSL_TLSEXT_ERR_OK| and set |*out| and // |*out_len| to describe a buffer containing the selected protocol, or an // empty buffer to select no protocol. The returned buffer may point within // |in|, or it may point to some other buffer that remains valid and unmodified // for at least the duration of the |SSL| operation (e.g. |SSL_do_handshake|) // that triggered the callback. // // Returning any other value indicates a fatal error and will terminate the TLS // connection. To proceed without selecting a protocol, the callback must return // |SSL_TLSEXT_ERR_OK| and set |*out| and |*out_len| to an empty buffer. (E.g. // NULL and zero, respectively.) // // Configuring this callback enables NPN on a client. Although the callback can // then decline to negotiate a protocol, merely configuring the callback causes // the client to offer NPN in the ClientHello. Callers thus should not configure // this callback in TLS client contexts that are not intended to use NPN. OPENSSL_EXPORT void SSL_CTX_set_next_proto_select_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg); // SSL_get0_next_proto_negotiated sets |*out_data| and |*out_len| to point to // the client's requested protocol for this connection. If the client didn't // request any protocol, then |*out_len| is set to zero. // // Note that the client can request any protocol it chooses. The value returned // from this function need not be a member of the list of supported protocols // provided by the server. OPENSSL_EXPORT void SSL_get0_next_proto_negotiated(const SSL *ssl, const uint8_t **out_data, unsigned *out_len); // SSL_select_next_proto implements the standard protocol selection for either // ALPN servers or NPN clients. It is expected that this function is called from // the callback set by |SSL_CTX_set_alpn_select_cb| or // |SSL_CTX_set_next_proto_select_cb|. // // |peer| and |supported| contain the peer and locally-configured protocols, // respectively. This function finds the first protocol in |peer| which is also // in |supported|. If one was found, it sets |*out| and |*out_len| to point to // it and returns |OPENSSL_NPN_NEGOTIATED|. Otherwise, it returns // |OPENSSL_NPN_NO_OVERLAP| and sets |*out| and |*out_len| to the first // supported protocol. // // In ALPN, the server should only select protocols among those that the client // offered. Thus, if this function returns |OPENSSL_NPN_NO_OVERLAP|, the caller // should ignore |*out| and return |SSL_TLSEXT_ERR_ALERT_FATAL| from // |SSL_CTX_set_alpn_select_cb|'s callback to indicate there was no match. // // In NPN, the client may either select one of the server's protocols, or an // "opportunistic" protocol as described in Section 6 of // draft-agl-tls-nextprotoneg-03. When this function returns // |OPENSSL_NPN_NO_OVERLAP|, |*out| implicitly selects the first supported // protocol for use as the opportunistic protocol. The caller may use it, // ignore it and select a different opportunistic protocol, or ignore it and // select no protocol (empty string). // // |peer| and |supported| must be vectors of 8-bit, length-prefixed byte // strings. The length byte itself is not included in the length. A byte string // of length 0 is invalid. No byte string may be truncated. |supported| must be // non-empty; a caller that supports no ALPN/NPN protocols should skip // negotiating the extension, rather than calling this function. If any of these // preconditions do not hold, this function will return |OPENSSL_NPN_NO_OVERLAP| // and set |*out| and |*out_len| to an empty buffer for robustness, but callers // are not recommended to rely on this. An empty buffer is not a valid output // for |SSL_CTX_set_alpn_select_cb|'s callback. // // WARNING: |*out| and |*out_len| may alias either |peer| or |supported| and may // not be used after one of those buffers is modified or released. Additionally, // this function is not const-correct for compatibility reasons. Although |*out| // is a non-const pointer, callers may not modify the buffer though |*out|. OPENSSL_EXPORT int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, const uint8_t *peer, unsigned peer_len, const uint8_t *supported, unsigned supported_len); #define OPENSSL_NPN_UNSUPPORTED 0 #define OPENSSL_NPN_NEGOTIATED 1 #define OPENSSL_NPN_NO_OVERLAP 2 // Channel ID. // // See draft-balfanz-tls-channelid-01. This is an old, experimental mechanism // and should not be used in new code. // SSL_CTX_set_tls_channel_id_enabled configures whether connections associated // with |ctx| should enable Channel ID as a server. OPENSSL_EXPORT void SSL_CTX_set_tls_channel_id_enabled(SSL_CTX *ctx, int enabled); // SSL_set_tls_channel_id_enabled configures whether |ssl| should enable Channel // ID as a server. OPENSSL_EXPORT void SSL_set_tls_channel_id_enabled(SSL *ssl, int enabled); // SSL_CTX_set1_tls_channel_id configures a TLS client to send a TLS Channel ID // to compatible servers. |private_key| must be a P-256 EC key. It returns one // on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set1_tls_channel_id(SSL_CTX *ctx, EVP_PKEY *private_key); // SSL_set1_tls_channel_id configures a TLS client to send a TLS Channel ID to // compatible servers. |private_key| must be a P-256 EC key. It returns one on // success and zero on error. OPENSSL_EXPORT int SSL_set1_tls_channel_id(SSL *ssl, EVP_PKEY *private_key); // SSL_get_tls_channel_id gets the client's TLS Channel ID from a server |SSL| // and copies up to the first |max_out| bytes into |out|. The Channel ID // consists of the client's P-256 public key as an (x,y) pair where each is a // 32-byte, big-endian field element. It returns 0 if the client didn't offer a // Channel ID and the length of the complete Channel ID otherwise. This function // always returns zero if |ssl| is a client. OPENSSL_EXPORT size_t SSL_get_tls_channel_id(SSL *ssl, uint8_t *out, size_t max_out); // DTLS-SRTP. // // See RFC 5764. // srtp_protection_profile_st (aka |SRTP_PROTECTION_PROFILE|) is an SRTP // profile for use with the use_srtp extension. struct srtp_protection_profile_st { const char *name; unsigned long id; } /* SRTP_PROTECTION_PROFILE */; DEFINE_CONST_STACK_OF(SRTP_PROTECTION_PROFILE) // SRTP_* define constants for SRTP profiles. #define SRTP_AES128_CM_SHA1_80 0x0001 #define SRTP_AES128_CM_SHA1_32 0x0002 #define SRTP_AES128_F8_SHA1_80 0x0003 #define SRTP_AES128_F8_SHA1_32 0x0004 #define SRTP_NULL_SHA1_80 0x0005 #define SRTP_NULL_SHA1_32 0x0006 #define SRTP_AEAD_AES_128_GCM 0x0007 #define SRTP_AEAD_AES_256_GCM 0x0008 // SSL_CTX_set_srtp_profiles enables SRTP for all SSL objects created from // |ctx|. |profile| contains a colon-separated list of profile names. It returns // one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set_srtp_profiles(SSL_CTX *ctx, const char *profiles); // SSL_set_srtp_profiles enables SRTP for |ssl|. |profile| contains a // colon-separated list of profile names. It returns one on success and zero on // failure. OPENSSL_EXPORT int SSL_set_srtp_profiles(SSL *ssl, const char *profiles); // SSL_get_srtp_profiles returns the SRTP profiles supported by |ssl|. OPENSSL_EXPORT const STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles( const SSL *ssl); // SSL_get_selected_srtp_profile returns the selected SRTP profile, or NULL if // SRTP was not negotiated. OPENSSL_EXPORT const SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile( SSL *ssl); // Pre-shared keys. // // Connections may be configured with PSK (Pre-Shared Key) cipher suites. These // authenticate using out-of-band pre-shared keys rather than certificates. See // RFC 4279. // // This implementation uses NUL-terminated C strings for identities and identity // hints, so values with a NUL character are not supported. (RFC 4279 does not // specify the format of an identity.) // PSK_MAX_IDENTITY_LEN is the maximum supported length of a PSK identity, // excluding the NUL terminator. #define PSK_MAX_IDENTITY_LEN 128 // PSK_MAX_PSK_LEN is the maximum supported length of a pre-shared key. #define PSK_MAX_PSK_LEN 256 // SSL_CTX_set_psk_client_callback sets the callback to be called when PSK is // negotiated on the client. This callback must be set to enable PSK cipher // suites on the client. // // The callback is passed the identity hint in |hint| or NULL if none was // provided. It should select a PSK identity and write the identity and the // corresponding PSK to |identity| and |psk|, respectively. The identity is // written as a NUL-terminated C string of length (excluding the NUL terminator) // at most |max_identity_len|. The PSK's length must be at most |max_psk_len|. // The callback returns the length of the PSK or 0 if no suitable identity was // found. OPENSSL_EXPORT void SSL_CTX_set_psk_client_callback( SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len)); // SSL_set_psk_client_callback sets the callback to be called when PSK is // negotiated on the client. This callback must be set to enable PSK cipher // suites on the client. See also |SSL_CTX_set_psk_client_callback|. OPENSSL_EXPORT void SSL_set_psk_client_callback( SSL *ssl, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len)); // SSL_CTX_set_psk_server_callback sets the callback to be called when PSK is // negotiated on the server. This callback must be set to enable PSK cipher // suites on the server. // // The callback is passed the identity in |identity|. It should write a PSK of // length at most |max_psk_len| to |psk| and return the number of bytes written // or zero if the PSK identity is unknown. OPENSSL_EXPORT void SSL_CTX_set_psk_server_callback( SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len)); // SSL_set_psk_server_callback sets the callback to be called when PSK is // negotiated on the server. This callback must be set to enable PSK cipher // suites on the server. See also |SSL_CTX_set_psk_server_callback|. OPENSSL_EXPORT void SSL_set_psk_server_callback( SSL *ssl, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len)); // SSL_CTX_use_psk_identity_hint configures server connections to advertise an // identity hint of |identity_hint|. It returns one on success and zero on // error. OPENSSL_EXPORT int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint); // SSL_use_psk_identity_hint configures server connections to advertise an // identity hint of |identity_hint|. It returns one on success and zero on // error. OPENSSL_EXPORT int SSL_use_psk_identity_hint(SSL *ssl, const char *identity_hint); // SSL_get_psk_identity_hint returns the PSK identity hint advertised for |ssl| // or NULL if there is none. OPENSSL_EXPORT const char *SSL_get_psk_identity_hint(const SSL *ssl); // SSL_get_psk_identity, after the handshake completes, returns the PSK identity // that was negotiated by |ssl| or NULL if PSK was not used. OPENSSL_EXPORT const char *SSL_get_psk_identity(const SSL *ssl); // Delegated credentials. // // Delegated credentials (RFC 9345) allow a TLS 1.3 endpoint to use its // certificate to issue new credentials for authentication. Once issued, // credentials can't be revoked. In order to mitigate the damage in case the // credential secret key is compromised, the credential is only valid for a // short time (days, hours, or even minutes). // // Currently only the authenticating side, as a server, is implemented. To // authenticate with delegated credentials, construct an |SSL_CREDENTIAL| with // |SSL_CREDENTIAL_new_delegated| and add it to the credential list. See also // |SSL_CTX_add1_credential|. Callers may configure a mix of delegated // credentials and X.509 credentials on the same |SSL| or |SSL_CTX| to support a // range of clients. // SSL_CREDENTIAL_new_delegated returns a new, empty delegated credential, or // NULL on error. Callers should release the result with |SSL_CREDENTIAL_free| // when done. // // Callers should configure a delegated credential, certificate chain and // private key on the credential, along with other properties, then add it with // |SSL_CTX_add1_credential|. OPENSSL_EXPORT SSL_CREDENTIAL *SSL_CREDENTIAL_new_delegated(void); // SSL_CREDENTIAL_set1_delegated_credential sets |cred|'s delegated credentials // structure to |dc|. It returns one on success and zero on error, including if // |dc| is malformed. This should be a DelegatedCredential structure, signed by // the end-entity certificate, as described in RFC 9345. OPENSSL_EXPORT int SSL_CREDENTIAL_set1_delegated_credential( SSL_CREDENTIAL *cred, CRYPTO_BUFFER *dc); // QUIC integration. // // QUIC acts as an underlying transport for the TLS 1.3 handshake. The following // functions allow a QUIC implementation to serve as the underlying transport as // described in RFC 9001. // // When configured for QUIC, |SSL_do_handshake| will drive the handshake as // before, but it will not use the configured |BIO|. It will call functions on // |SSL_QUIC_METHOD| to configure secrets and send data. If data is needed from // the peer, it will return |SSL_ERROR_WANT_READ|. As the caller receives data // it can decrypt, it calls |SSL_provide_quic_data|. Subsequent // |SSL_do_handshake| calls will then consume that data and progress the // handshake. After the handshake is complete, the caller should continue to // call |SSL_provide_quic_data| for any post-handshake data, followed by // |SSL_process_quic_post_handshake| to process it. It is an error to call // |SSL_read| and |SSL_write| in QUIC. // // 0-RTT behaves similarly to |TLS_method|'s usual behavior. |SSL_do_handshake| // returns early as soon as the client (respectively, server) is allowed to send // 0-RTT (respectively, half-RTT) data. The caller should then call // |SSL_do_handshake| again to consume the remaining handshake messages and // confirm the handshake. As a client, |SSL_ERROR_EARLY_DATA_REJECTED| and // |SSL_reset_early_data_reject| behave as usual. // // See https://www.rfc-editor.org/rfc/rfc9001.html#section-4.1 for more details. // // To avoid DoS attacks, the QUIC implementation must limit the amount of data // being queued up. The implementation can call // |SSL_quic_max_handshake_flight_len| to get the maximum buffer length at each // encryption level. // // QUIC implementations must additionally configure transport parameters with // |SSL_set_quic_transport_params|. |SSL_get_peer_quic_transport_params| may be // used to query the value received from the peer. BoringSSL handles this // extension as an opaque byte string. The caller is responsible for serializing // and parsing them. See https://www.rfc-editor.org/rfc/rfc9000#section-7.4 for // details. // // QUIC additionally imposes restrictions on 0-RTT. In particular, the QUIC // transport layer requires that if a server accepts 0-RTT data, then the // transport parameters sent on the resumed connection must not lower any limits // compared to the transport parameters that the server sent on the connection // where the ticket for 0-RTT was issued. In effect, the server must remember // the transport parameters with the ticket. Application protocols running on // QUIC may impose similar restrictions, for example HTTP/3's restrictions on // SETTINGS frames. // // BoringSSL implements this check by doing a byte-for-byte comparison of an // opaque context passed in by the server. This context must be the same on the // connection where the ticket was issued and the connection where that ticket // is used for 0-RTT. If there is a mismatch, or the context was not set, // BoringSSL will reject early data (but not reject the resumption attempt). // This context is set via |SSL_set_quic_early_data_context| and should cover // both transport parameters and any application state. // |SSL_set_quic_early_data_context| must be called on the server with a // non-empty context if the server is to support 0-RTT in QUIC. // // BoringSSL does not perform any client-side checks on the transport // parameters received from a server that also accepted early data. It is up to // the caller to verify that the received transport parameters do not lower any // limits, and to close the QUIC connection if that is not the case. The same // holds for any application protocol state remembered for 0-RTT, e.g. HTTP/3 // SETTINGS. // ssl_encryption_level_t represents an encryption level in TLS 1.3. Values in // this enum match the first 4 epochs used in DTLS 1.3 (section 6.1). enum ssl_encryption_level_t BORINGSSL_ENUM_INT { ssl_encryption_initial = 0, ssl_encryption_early_data = 1, ssl_encryption_handshake = 2, ssl_encryption_application = 3, }; // ssl_quic_method_st (aka |SSL_QUIC_METHOD|) describes custom QUIC hooks. struct ssl_quic_method_st { // set_read_secret configures the read secret and cipher suite for the given // encryption level. It returns one on success and zero to terminate the // handshake with an error. It will be called at most once per encryption // level. // // BoringSSL will not release read keys before QUIC may use them. Once a level // has been initialized, QUIC may begin processing data from it. Handshake // data should be passed to |SSL_provide_quic_data| and application data (if // |level| is |ssl_encryption_early_data| or |ssl_encryption_application|) may // be processed according to the rules of the QUIC protocol. // // QUIC ACKs packets at the same encryption level they were received at, // except that client |ssl_encryption_early_data| (0-RTT) packets trigger // server |ssl_encryption_application| (1-RTT) ACKs. BoringSSL will always // install ACK-writing keys with |set_write_secret| before the packet-reading // keys with |set_read_secret|. This ensures the caller can always ACK any // packet it decrypts. Note this means the server installs 1-RTT write keys // before 0-RTT read keys. // // The converse is not true. An encryption level may be configured with write // secrets a roundtrip before the corresponding secrets for reading ACKs is // available. int (*set_read_secret)(SSL *ssl, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); // set_write_secret behaves like |set_read_secret| but configures the write // secret and cipher suite for the given encryption level. It will be called // at most once per encryption level. // // BoringSSL will not release write keys before QUIC may use them. If |level| // is |ssl_encryption_early_data| or |ssl_encryption_application|, QUIC may // begin sending application data at |level|. However, note that BoringSSL // configures server |ssl_encryption_application| write keys before the client // Finished. This allows QUIC to send half-RTT data, but the handshake is not // confirmed at this point and, if requesting client certificates, the client // is not yet authenticated. // // See |set_read_secret| for additional invariants between packets and their // ACKs. // // Note that, on 0-RTT reject, the |ssl_encryption_early_data| write secret // may use a different cipher suite from the other keys. int (*set_write_secret)(SSL *ssl, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); // add_handshake_data adds handshake data to the current flight at the given // encryption level. It returns one on success and zero on error. // // BoringSSL will pack data from a single encryption level together, but a // single handshake flight may include multiple encryption levels. Callers // should defer writing data to the network until |flush_flight| to better // pack QUIC packets into transport datagrams. // // If |level| is not |ssl_encryption_initial|, this function will not be // called before |level| is initialized with |set_write_secret|. int (*add_handshake_data)(SSL *ssl, enum ssl_encryption_level_t level, const uint8_t *data, size_t len); // flush_flight is called when the current flight is complete and should be // written to the transport. Note a flight may contain data at several // encryption levels. It returns one on success and zero on error. int (*flush_flight)(SSL *ssl); // send_alert sends a fatal alert at the specified encryption level. It // returns one on success and zero on error. // // If |level| is not |ssl_encryption_initial|, this function will not be // called before |level| is initialized with |set_write_secret|. int (*send_alert)(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert); }; // SSL_quic_max_handshake_flight_len returns returns the maximum number of bytes // that may be received at the given encryption level. This function should be // used to limit buffering in the QUIC implementation. // // See https://www.rfc-editor.org/rfc/rfc9000#section-7.5 OPENSSL_EXPORT size_t SSL_quic_max_handshake_flight_len( const SSL *ssl, enum ssl_encryption_level_t level); // SSL_quic_read_level returns the current read encryption level. // // TODO(davidben): Is it still necessary to expose this function to callers? // QUICHE does not use it. OPENSSL_EXPORT enum ssl_encryption_level_t SSL_quic_read_level(const SSL *ssl); // SSL_quic_write_level returns the current write encryption level. // // TODO(davidben): Is it still necessary to expose this function to callers? // QUICHE does not use it. OPENSSL_EXPORT enum ssl_encryption_level_t SSL_quic_write_level(const SSL *ssl); // SSL_provide_quic_data provides data from QUIC at a particular encryption // level |level|. It returns one on success and zero on error. Note this // function will return zero if the handshake is not expecting data from |level| // at this time. The QUIC implementation should then close the connection with // an error. OPENSSL_EXPORT int SSL_provide_quic_data(SSL *ssl, enum ssl_encryption_level_t level, const uint8_t *data, size_t len); // SSL_process_quic_post_handshake processes any data that QUIC has provided // after the handshake has completed. This includes NewSessionTicket messages // sent by the server. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_process_quic_post_handshake(SSL *ssl); // SSL_CTX_set_quic_method configures the QUIC hooks. This should only be // configured with a minimum version of TLS 1.3. |quic_method| must remain valid // for the lifetime of |ctx|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_quic_method(SSL_CTX *ctx, const SSL_QUIC_METHOD *quic_method); // SSL_set_quic_method configures the QUIC hooks. This should only be // configured with a minimum version of TLS 1.3. |quic_method| must remain valid // for the lifetime of |ssl|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_quic_method(SSL *ssl, const SSL_QUIC_METHOD *quic_method); // SSL_set_quic_transport_params configures |ssl| to send |params| (of length // |params_len|) in the quic_transport_parameters extension in either the // ClientHello or EncryptedExtensions handshake message. It is an error to set // transport parameters if |ssl| is not configured for QUIC. The buffer pointed // to by |params| only need be valid for the duration of the call to this // function. This function returns 1 on success and 0 on failure. OPENSSL_EXPORT int SSL_set_quic_transport_params(SSL *ssl, const uint8_t *params, size_t params_len); // SSL_get_peer_quic_transport_params provides the caller with the value of the // quic_transport_parameters extension sent by the peer. A pointer to the buffer // containing the TransportParameters will be put in |*out_params|, and its // length in |*params_len|. This buffer will be valid for the lifetime of the // |SSL|. If no params were received from the peer, |*out_params_len| will be 0. OPENSSL_EXPORT void SSL_get_peer_quic_transport_params( const SSL *ssl, const uint8_t **out_params, size_t *out_params_len); // SSL_set_quic_use_legacy_codepoint configures whether to use the legacy QUIC // extension codepoint 0xffa5 as opposed to the official value 57. Call with // |use_legacy| set to 1 to use 0xffa5 and call with 0 to use 57. By default, // the standard code point is used. OPENSSL_EXPORT void SSL_set_quic_use_legacy_codepoint(SSL *ssl, int use_legacy); // SSL_set_quic_early_data_context configures a context string in QUIC servers // for accepting early data. If a resumption connection offers early data, the // server will check if the value matches that of the connection which minted // the ticket. If not, resumption still succeeds but early data is rejected. // This should include all QUIC Transport Parameters except ones specified that // the client MUST NOT remember. This should also include any application // protocol-specific state. For HTTP/3, this should be the serialized server // SETTINGS frame and the QUIC Transport Parameters (except the stateless reset // token). // // This function may be called before |SSL_do_handshake| or during server // certificate selection. It returns 1 on success and 0 on failure. OPENSSL_EXPORT int SSL_set_quic_early_data_context(SSL *ssl, const uint8_t *context, size_t context_len); // Early data. // // WARNING: 0-RTT support in BoringSSL is currently experimental and not fully // implemented. It may cause interoperability or security failures when used. // // Early data, or 0-RTT, is a feature in TLS 1.3 which allows clients to send // data on the first flight during a resumption handshake. This can save a // round-trip in some application protocols. // // WARNING: A 0-RTT handshake has different security properties from normal // handshake, so it is off by default unless opted in. In particular, early data // is replayable by a network attacker. Callers must account for this when // sending or processing data before the handshake is confirmed. See RFC 8446 // for more information. // // As a server, if early data is accepted, |SSL_do_handshake| will complete as // soon as the ClientHello is processed and server flight sent. |SSL_write| may // be used to send half-RTT data. |SSL_read| will consume early data and // transition to 1-RTT data as appropriate. Prior to the transition, // |SSL_in_init| will report the handshake is still in progress. Callers may use // it or |SSL_in_early_data| to defer or reject requests as needed. // // Early data as a client is more complex. If the offered session (see // |SSL_set_session|) is 0-RTT-capable, the handshake will return after sending // the ClientHello. The predicted peer certificates and ALPN protocol will be // available via the usual APIs. |SSL_write| will write early data, up to the // session's limit. Writes past this limit and |SSL_read| will complete the // handshake before continuing. Callers may also call |SSL_do_handshake| again // to complete the handshake sooner. // // If the server accepts early data, the handshake will succeed. |SSL_read| and // |SSL_write| will then act as in a 1-RTT handshake. The peer certificates and // ALPN protocol will be as predicted and need not be re-queried. // // If the server rejects early data, |SSL_do_handshake| (and thus |SSL_read| and // |SSL_write|) will then fail with |SSL_get_error| returning // |SSL_ERROR_EARLY_DATA_REJECTED|. The caller should treat this as a connection // error and most likely perform a high-level retry. Note the server may still // have processed the early data due to attacker replays. // // To then continue the handshake on the original connection, use // |SSL_reset_early_data_reject|. The connection will then behave as one which // had not yet completed the handshake. This allows a faster retry than making a // fresh connection. |SSL_do_handshake| will complete the full handshake, // possibly resulting in different peer certificates, ALPN protocol, and other // properties. The caller must disregard any values from before the reset and // query again. // // Finally, to implement the fallback described in RFC 8446 appendix D.3, retry // on a fresh connection without 0-RTT if the handshake fails with // |SSL_R_WRONG_VERSION_ON_EARLY_DATA|. // SSL_CTX_set_early_data_enabled sets whether early data is allowed to be used // with resumptions using |ctx|. OPENSSL_EXPORT void SSL_CTX_set_early_data_enabled(SSL_CTX *ctx, int enabled); // SSL_set_early_data_enabled sets whether early data is allowed to be used // with resumptions using |ssl|. See |SSL_CTX_set_early_data_enabled| for more // information. OPENSSL_EXPORT void SSL_set_early_data_enabled(SSL *ssl, int enabled); // SSL_in_early_data returns one if |ssl| has a pending handshake that has // progressed enough to send or receive early data. Clients may call |SSL_write| // to send early data, but |SSL_read| will complete the handshake before // accepting application data. Servers may call |SSL_read| to read early data // and |SSL_write| to send half-RTT data. OPENSSL_EXPORT int SSL_in_early_data(const SSL *ssl); // SSL_SESSION_early_data_capable returns whether early data would have been // attempted with |session| if enabled. OPENSSL_EXPORT int SSL_SESSION_early_data_capable(const SSL_SESSION *session); // SSL_SESSION_copy_without_early_data returns a copy of |session| with early // data disabled. If |session| already does not support early data, it returns // |session| with the reference count increased. The caller takes ownership of // the result and must release it with |SSL_SESSION_free|. // // This function may be used on the client to clear early data support from // existing sessions when the server rejects early data. In particular, // |SSL_R_WRONG_VERSION_ON_EARLY_DATA| requires a fresh connection to retry, and // the client would not want 0-RTT enabled for the next connection attempt. OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_copy_without_early_data( SSL_SESSION *session); // SSL_early_data_accepted returns whether early data was accepted on the // handshake performed by |ssl|. OPENSSL_EXPORT int SSL_early_data_accepted(const SSL *ssl); // SSL_reset_early_data_reject resets |ssl| after an early data reject. All // 0-RTT state is discarded, including any pending |SSL_write| calls. The caller // should treat |ssl| as a logically fresh connection, usually by driving the // handshake to completion using |SSL_do_handshake|. // // It is an error to call this function on an |SSL| object that is not signaling // |SSL_ERROR_EARLY_DATA_REJECTED|. OPENSSL_EXPORT void SSL_reset_early_data_reject(SSL *ssl); // SSL_get_ticket_age_skew returns the difference, in seconds, between the // client-sent ticket age and the server-computed value in TLS 1.3 server // connections which resumed a session. OPENSSL_EXPORT int32_t SSL_get_ticket_age_skew(const SSL *ssl); // An ssl_early_data_reason_t describes why 0-RTT was accepted or rejected. // These values are persisted to logs. Entries should not be renumbered and // numeric values should never be reused. enum ssl_early_data_reason_t BORINGSSL_ENUM_INT { // The handshake has not progressed far enough for the 0-RTT status to be // known. ssl_early_data_unknown = 0, // 0-RTT is disabled for this connection. ssl_early_data_disabled = 1, // 0-RTT was accepted. ssl_early_data_accepted = 2, // The negotiated protocol version does not support 0-RTT. ssl_early_data_protocol_version = 3, // The peer declined to offer or accept 0-RTT for an unknown reason. ssl_early_data_peer_declined = 4, // The client did not offer a session. ssl_early_data_no_session_offered = 5, // The server declined to resume the session. ssl_early_data_session_not_resumed = 6, // The session does not support 0-RTT. ssl_early_data_unsupported_for_session = 7, // The server sent a HelloRetryRequest. ssl_early_data_hello_retry_request = 8, // The negotiated ALPN protocol did not match the session. ssl_early_data_alpn_mismatch = 9, // The connection negotiated Channel ID, which is incompatible with 0-RTT. ssl_early_data_channel_id = 10, // Value 11 is reserved. (It has historically |ssl_early_data_token_binding|.) // The client and server ticket age were too far apart. ssl_early_data_ticket_age_skew = 12, // QUIC parameters differ between this connection and the original. ssl_early_data_quic_parameter_mismatch = 13, // The application settings did not match the session. ssl_early_data_alps_mismatch = 14, // The value of the largest entry. ssl_early_data_reason_max_value = ssl_early_data_alps_mismatch, }; // SSL_get_early_data_reason returns details why 0-RTT was accepted or rejected // on |ssl|. This is primarily useful on the server. OPENSSL_EXPORT enum ssl_early_data_reason_t SSL_get_early_data_reason( const SSL *ssl); // SSL_early_data_reason_string returns a string representation for |reason|, or // NULL if |reason| is unknown. This function may be used for logging. OPENSSL_EXPORT const char *SSL_early_data_reason_string( enum ssl_early_data_reason_t reason); // Encrypted ClientHello. // // ECH is a mechanism for encrypting the entire ClientHello message in TLS 1.3. // This can prevent observers from seeing cleartext information about the // connection, such as the server_name extension. // // By default, BoringSSL will treat the server name, session ticket, and client // certificate as secret, but most other parameters, such as the ALPN protocol // list will be treated as public and sent in the cleartext ClientHello. Other // APIs may be added for applications with different secrecy requirements. // // ECH support in BoringSSL is still experimental and under development. // // See https://tools.ietf.org/html/draft-ietf-tls-esni-13. // SSL_set_enable_ech_grease configures whether the client will send a GREASE // ECH extension when no supported ECHConfig is available. OPENSSL_EXPORT void SSL_set_enable_ech_grease(SSL *ssl, int enable); // SSL_set1_ech_config_list configures |ssl| to, as a client, offer ECH with the // specified configuration. |ech_config_list| should contain a serialized // ECHConfigList structure. It returns one on success and zero on error. // // This function returns an error if the input is malformed. If the input is // valid but none of the ECHConfigs implement supported parameters, it will // return success and proceed without ECH. // // If a supported ECHConfig is found, |ssl| will encrypt the true ClientHello // parameters. If the server cannot decrypt it, e.g. due to a key mismatch, ECH // has a recovery flow. |ssl| will handshake using the cleartext parameters, // including a public name in the ECHConfig. If using // |SSL_CTX_set_custom_verify|, callers should use |SSL_get0_ech_name_override| // to verify the certificate with the public name. If using the built-in // verifier, the |X509_STORE_CTX| will be configured automatically. // // If no other errors are found in this handshake, it will fail with // |SSL_R_ECH_REJECTED|. Since it didn't use the true parameters, the connection // cannot be used for application data. Instead, callers should handle this // error by calling |SSL_get0_ech_retry_configs| and retrying the connection // with updated ECH parameters. If the retry also fails with // |SSL_R_ECH_REJECTED|, the caller should report a connection failure. OPENSSL_EXPORT int SSL_set1_ech_config_list(SSL *ssl, const uint8_t *ech_config_list, size_t ech_config_list_len); // SSL_get0_ech_name_override, if |ssl| is a client and the server rejected ECH, // sets |*out_name| and |*out_name_len| to point to a buffer containing the ECH // public name. Otherwise, the buffer will be empty. // // When offering ECH as a client, this function should be called during the // certificate verification callback (see |SSL_CTX_set_custom_verify|). If // |*out_name_len| is non-zero, the caller should verify the certificate against // the result, interpreted as a DNS name, rather than the true server name. In // this case, the handshake will never succeed and is only used to authenticate // retry configs. See also |SSL_get0_ech_retry_configs|. OPENSSL_EXPORT void SSL_get0_ech_name_override(const SSL *ssl, const char **out_name, size_t *out_name_len); // SSL_get0_ech_retry_configs sets |*out_retry_configs| and // |*out_retry_configs_len| to a buffer containing a serialized ECHConfigList. // If the server did not provide an ECHConfigList, |*out_retry_configs_len| will // be zero. // // When handling an |SSL_R_ECH_REJECTED| error code as a client, callers should // use this function to recover from potential key mismatches. If the result is // non-empty, the caller should retry the connection, passing this buffer to // |SSL_set1_ech_config_list|. If the result is empty, the server has rolled // back ECH support, and the caller should retry without ECH. // // This function must only be called in response to an |SSL_R_ECH_REJECTED| // error code. Calling this function on |ssl|s that have not authenticated the // rejection handshake will assert in debug builds and otherwise return an // unparsable list. OPENSSL_EXPORT void SSL_get0_ech_retry_configs( const SSL *ssl, const uint8_t **out_retry_configs, size_t *out_retry_configs_len); // SSL_marshal_ech_config constructs a new serialized ECHConfig. On success, it // sets |*out| to a newly-allocated buffer containing the result and |*out_len| // to the size of the buffer. The caller must call |OPENSSL_free| on |*out| to // release the memory. On failure, it returns zero. // // The |config_id| field is a single byte identifier for the ECHConfig. Reusing // config IDs is allowed, but if multiple ECHConfigs with the same config ID are // active at a time, server load may increase. See // |SSL_ECH_KEYS_has_duplicate_config_id|. // // The public key and KEM algorithm are taken from |key|. |public_name| is the // DNS name used to authenticate the recovery flow. |max_name_len| should be the // length of the longest name in the ECHConfig's anonymity set and influences // client padding decisions. OPENSSL_EXPORT int SSL_marshal_ech_config(uint8_t **out, size_t *out_len, uint8_t config_id, const EVP_HPKE_KEY *key, const char *public_name, size_t max_name_len); // SSL_ECH_KEYS_new returns a newly-allocated |SSL_ECH_KEYS| or NULL on error. OPENSSL_EXPORT SSL_ECH_KEYS *SSL_ECH_KEYS_new(void); // SSL_ECH_KEYS_up_ref increments the reference count of |keys|. OPENSSL_EXPORT void SSL_ECH_KEYS_up_ref(SSL_ECH_KEYS *keys); // SSL_ECH_KEYS_free releases memory associated with |keys|. OPENSSL_EXPORT void SSL_ECH_KEYS_free(SSL_ECH_KEYS *keys); // SSL_ECH_KEYS_add decodes |ech_config| as an ECHConfig and appends it with // |key| to |keys|. If |is_retry_config| is non-zero, this config will be // returned to the client on configuration mismatch. It returns one on success // and zero on error. // // This function should be called successively to register each ECHConfig in // decreasing order of preference. This configuration must be completed before // setting |keys| on an |SSL_CTX| with |SSL_CTX_set1_ech_keys|. After that // point, |keys| is immutable; no more ECHConfig values may be added. // // See also |SSL_CTX_set1_ech_keys|. OPENSSL_EXPORT int SSL_ECH_KEYS_add(SSL_ECH_KEYS *keys, int is_retry_config, const uint8_t *ech_config, size_t ech_config_len, const EVP_HPKE_KEY *key); // SSL_ECH_KEYS_has_duplicate_config_id returns one if |keys| has duplicate // config IDs or zero otherwise. Duplicate config IDs still work, but may // increase server load due to trial decryption. OPENSSL_EXPORT int SSL_ECH_KEYS_has_duplicate_config_id( const SSL_ECH_KEYS *keys); // SSL_ECH_KEYS_marshal_retry_configs serializes the retry configs in |keys| as // an ECHConfigList. On success, it sets |*out| to a newly-allocated buffer // containing the result and |*out_len| to the size of the buffer. The caller // must call |OPENSSL_free| on |*out| to release the memory. On failure, it // returns zero. // // This output may be advertised to clients in DNS. OPENSSL_EXPORT int SSL_ECH_KEYS_marshal_retry_configs(const SSL_ECH_KEYS *keys, uint8_t **out, size_t *out_len); // SSL_CTX_set1_ech_keys configures |ctx| to use |keys| to decrypt encrypted // ClientHellos. It returns one on success, and zero on failure. If |keys| does // not contain any retry configs, this function will fail. Retry configs are // marked as such when they are added to |keys| with |SSL_ECH_KEYS_add|. // // Once |keys| has been passed to this function, it is immutable. Unlike most // |SSL_CTX| configuration functions, this function may be called even if |ctx| // already has associated connections on multiple threads. This may be used to // rotate keys in a long-lived server process. // // The configured ECHConfig values should also be advertised out-of-band via DNS // (see draft-ietf-dnsop-svcb-https). Before advertising an ECHConfig in DNS, // deployments should ensure all instances of the service are configured with // the ECHConfig and corresponding private key. // // Only the most recent fully-deployed ECHConfigs should be advertised in DNS. // |keys| may contain a newer set if those ECHConfigs are mid-deployment. It // should also contain older sets, until the DNS change has rolled out and the // old records have expired from caches. // // If there is a mismatch, |SSL| objects associated with |ctx| will complete the // handshake using the cleartext ClientHello and send updated ECHConfig values // to the client. The client will then retry to recover, but with a latency // penalty. This recovery flow depends on the public name in the ECHConfig. // Before advertising an ECHConfig in DNS, deployments must ensure all instances // of the service can present a valid certificate for the public name. // // BoringSSL negotiates ECH before certificate selection callbacks are called, // including |SSL_CTX_set_select_certificate_cb|. If ECH is negotiated, the // reported |SSL_CLIENT_HELLO| structure and |SSL_get_servername| function will // transparently reflect the inner ClientHello. Callers should select parameters // based on these values to correctly handle ECH as well as the recovery flow. OPENSSL_EXPORT int SSL_CTX_set1_ech_keys(SSL_CTX *ctx, SSL_ECH_KEYS *keys); // SSL_ech_accepted returns one if |ssl| negotiated ECH and zero otherwise. OPENSSL_EXPORT int SSL_ech_accepted(const SSL *ssl); // Alerts. // // TLS uses alerts to signal error conditions. Alerts have a type (warning or // fatal) and description. OpenSSL internally handles fatal alerts with // dedicated error codes (see |SSL_AD_REASON_OFFSET|). Except for close_notify, // warning alerts are silently ignored and may only be surfaced with // |SSL_CTX_set_info_callback|. // SSL_AD_REASON_OFFSET is the offset between error reasons and |SSL_AD_*| // values. Any error code under |ERR_LIB_SSL| with an error reason above this // value corresponds to an alert description. Consumers may add or subtract // |SSL_AD_REASON_OFFSET| to convert between them. // // make_errors.go reserves error codes above 1000 for manually-assigned errors. // This value must be kept in sync with reservedReasonCode in make_errors.h #define SSL_AD_REASON_OFFSET 1000 // SSL_AD_* are alert descriptions. #define SSL_AD_CLOSE_NOTIFY SSL3_AD_CLOSE_NOTIFY #define SSL_AD_UNEXPECTED_MESSAGE SSL3_AD_UNEXPECTED_MESSAGE #define SSL_AD_BAD_RECORD_MAC SSL3_AD_BAD_RECORD_MAC #define SSL_AD_DECRYPTION_FAILED TLS1_AD_DECRYPTION_FAILED #define SSL_AD_RECORD_OVERFLOW TLS1_AD_RECORD_OVERFLOW #define SSL_AD_DECOMPRESSION_FAILURE SSL3_AD_DECOMPRESSION_FAILURE #define SSL_AD_HANDSHAKE_FAILURE SSL3_AD_HANDSHAKE_FAILURE #define SSL_AD_NO_CERTIFICATE SSL3_AD_NO_CERTIFICATE // Legacy SSL 3.0 value #define SSL_AD_BAD_CERTIFICATE SSL3_AD_BAD_CERTIFICATE #define SSL_AD_UNSUPPORTED_CERTIFICATE SSL3_AD_UNSUPPORTED_CERTIFICATE #define SSL_AD_CERTIFICATE_REVOKED SSL3_AD_CERTIFICATE_REVOKED #define SSL_AD_CERTIFICATE_EXPIRED SSL3_AD_CERTIFICATE_EXPIRED #define SSL_AD_CERTIFICATE_UNKNOWN SSL3_AD_CERTIFICATE_UNKNOWN #define SSL_AD_ILLEGAL_PARAMETER SSL3_AD_ILLEGAL_PARAMETER #define SSL_AD_UNKNOWN_CA TLS1_AD_UNKNOWN_CA #define SSL_AD_ACCESS_DENIED TLS1_AD_ACCESS_DENIED #define SSL_AD_DECODE_ERROR TLS1_AD_DECODE_ERROR #define SSL_AD_DECRYPT_ERROR TLS1_AD_DECRYPT_ERROR #define SSL_AD_EXPORT_RESTRICTION TLS1_AD_EXPORT_RESTRICTION #define SSL_AD_PROTOCOL_VERSION TLS1_AD_PROTOCOL_VERSION #define SSL_AD_INSUFFICIENT_SECURITY TLS1_AD_INSUFFICIENT_SECURITY #define SSL_AD_INTERNAL_ERROR TLS1_AD_INTERNAL_ERROR #define SSL_AD_INAPPROPRIATE_FALLBACK SSL3_AD_INAPPROPRIATE_FALLBACK #define SSL_AD_USER_CANCELLED TLS1_AD_USER_CANCELLED #define SSL_AD_NO_RENEGOTIATION TLS1_AD_NO_RENEGOTIATION #define SSL_AD_MISSING_EXTENSION TLS1_AD_MISSING_EXTENSION #define SSL_AD_UNSUPPORTED_EXTENSION TLS1_AD_UNSUPPORTED_EXTENSION #define SSL_AD_CERTIFICATE_UNOBTAINABLE TLS1_AD_CERTIFICATE_UNOBTAINABLE #define SSL_AD_UNRECOGNIZED_NAME TLS1_AD_UNRECOGNIZED_NAME #define SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE \ TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE #define SSL_AD_BAD_CERTIFICATE_HASH_VALUE TLS1_AD_BAD_CERTIFICATE_HASH_VALUE #define SSL_AD_UNKNOWN_PSK_IDENTITY TLS1_AD_UNKNOWN_PSK_IDENTITY #define SSL_AD_CERTIFICATE_REQUIRED TLS1_AD_CERTIFICATE_REQUIRED #define SSL_AD_NO_APPLICATION_PROTOCOL TLS1_AD_NO_APPLICATION_PROTOCOL #define SSL_AD_ECH_REQUIRED TLS1_AD_ECH_REQUIRED // SSL_alert_type_string_long returns a string description of |value| as an // alert type (warning or fatal). OPENSSL_EXPORT const char *SSL_alert_type_string_long(int value); // SSL_alert_desc_string_long returns a string description of |value| as an // alert description or "unknown" if unknown. OPENSSL_EXPORT const char *SSL_alert_desc_string_long(int value); // SSL_send_fatal_alert sends a fatal alert over |ssl| of the specified type, // which should be one of the |SSL_AD_*| constants. It returns one on success // and <= 0 on error. The caller should pass the return value into // |SSL_get_error| to determine how to proceed. Once this function has been // called, future calls to |SSL_write| will fail. // // If retrying a failed operation due to |SSL_ERROR_WANT_WRITE|, subsequent // calls must use the same |alert| parameter. OPENSSL_EXPORT int SSL_send_fatal_alert(SSL *ssl, uint8_t alert); // ex_data functions. // // See |ex_data.h| for details. OPENSSL_EXPORT int SSL_set_ex_data(SSL *ssl, int idx, void *data); OPENSSL_EXPORT void *SSL_get_ex_data(const SSL *ssl, int idx); OPENSSL_EXPORT int SSL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int SSL_SESSION_set_ex_data(SSL_SESSION *session, int idx, void *data); OPENSSL_EXPORT void *SSL_SESSION_get_ex_data(const SSL_SESSION *session, int idx); OPENSSL_EXPORT int SSL_SESSION_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int SSL_CTX_set_ex_data(SSL_CTX *ctx, int idx, void *data); OPENSSL_EXPORT void *SSL_CTX_get_ex_data(const SSL_CTX *ctx, int idx); OPENSSL_EXPORT int SSL_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int SSL_CREDENTIAL_set_ex_data(SSL_CREDENTIAL *cred, int idx, void *data); OPENSSL_EXPORT void *SSL_CREDENTIAL_get_ex_data(const SSL_CREDENTIAL *cred, int idx); OPENSSL_EXPORT int SSL_CREDENTIAL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); // Low-level record-layer state. // SSL_get_ivs sets |*out_iv_len| to the length of the IVs for the ciphers // underlying |ssl| and sets |*out_read_iv| and |*out_write_iv| to point to the // current IVs for the read and write directions. This is only meaningful for // connections with implicit IVs (i.e. CBC mode with TLS 1.0). // // It returns one on success or zero on error. OPENSSL_EXPORT int SSL_get_ivs(const SSL *ssl, const uint8_t **out_read_iv, const uint8_t **out_write_iv, size_t *out_iv_len); // SSL_get_key_block_len returns the length of |ssl|'s key block, for TLS 1.2 // and below. It is an error to call this function during a handshake, or if // |ssl| negotiated TLS 1.3. OPENSSL_EXPORT size_t SSL_get_key_block_len(const SSL *ssl); // SSL_generate_key_block generates |out_len| bytes of key material for |ssl|'s // current connection state, for TLS 1.2 and below. It is an error to call this // function during a handshake, or if |ssl| negotiated TLS 1.3. OPENSSL_EXPORT int SSL_generate_key_block(const SSL *ssl, uint8_t *out, size_t out_len); // SSL_get_read_sequence returns, in TLS, the expected sequence number of the // next incoming record in the current epoch. In DTLS, it returns the maximum // sequence number received in the current epoch and includes the epoch number // in the two most significant bytes. OPENSSL_EXPORT uint64_t SSL_get_read_sequence(const SSL *ssl); // SSL_get_write_sequence returns the sequence number of the next outgoing // record in the current epoch. In DTLS, it includes the epoch number in the // two most significant bytes. OPENSSL_EXPORT uint64_t SSL_get_write_sequence(const SSL *ssl); // SSL_CTX_set_record_protocol_version returns whether |version| is zero. OPENSSL_EXPORT int SSL_CTX_set_record_protocol_version(SSL_CTX *ctx, int version); // Handshake hints. // // WARNING: Contact the BoringSSL team before using this API. While this // mechanism was designed to gracefully recover from version skew and // configuration mismatch, splitting a single TLS server into multiple services // is complex. // // Some server deployments make asynchronous RPC calls in both ClientHello // dispatch and private key operations. In TLS handshakes where the private key // operation occurs in the first round-trip, this results in two consecutive RPC // round-trips. Handshake hints allow the RPC service to predict a signature. // If correctly predicted, this can skip the second RPC call. // // First, the server installs a certificate selection callback (see // |SSL_CTX_set_select_certificate_cb|). When that is called, it performs the // RPC as before, but includes the ClientHello and a capabilities string from // |SSL_serialize_capabilities|. // // Next, the RPC service creates its own |SSL| object, applies the results of // certificate selection, calls |SSL_request_handshake_hints|, and runs the // handshake. If this successfully computes handshake hints (see // |SSL_serialize_handshake_hints|), the RPC server should send the hints // alongside any certificate selection results. // // Finally, the server calls |SSL_set_handshake_hints| and applies any // configuration from the RPC server. It then completes the handshake as before. // If the hints apply, BoringSSL will use the predicted signature and skip the // private key callbacks. Otherwise, BoringSSL will call private key callbacks // to generate a signature as before. // // Callers should synchronize configuration across the two services. // Configuration mismatches and some cases of version skew are not fatal, but // may result in the hints not applying. Additionally, some handshake flows use // the private key in later round-trips, such as TLS 1.3 HelloRetryRequest. In // those cases, BoringSSL will not predict a signature as there is no benefit. // Callers must allow for handshakes to complete without a predicted signature. // SSL_serialize_capabilities writes an opaque byte string to |out| describing // some of |ssl|'s capabilities. It returns one on success and zero on error. // // This string is used by BoringSSL internally to reduce the impact of version // skew. OPENSSL_EXPORT int SSL_serialize_capabilities(const SSL *ssl, CBB *out); // SSL_request_handshake_hints configures |ssl| to generate a handshake hint for // |client_hello|. It returns one on success and zero on error. |client_hello| // should contain a serialized ClientHello structure, from the |client_hello| // and |client_hello_len| fields of the |SSL_CLIENT_HELLO| structure. // |capabilities| should contain the output of |SSL_serialize_capabilities|. // // When configured, |ssl| will perform no I/O (so there is no need to configure // |BIO|s). For QUIC, the caller should still configure an |SSL_QUIC_METHOD|, // but the callbacks themselves will never be called and may be left NULL or // report failure. |SSL_provide_quic_data| also should not be called. // // If hint generation is successful, |SSL_do_handshake| will stop the handshake // early with |SSL_get_error| returning |SSL_ERROR_HANDSHAKE_HINTS_READY|. At // this point, the caller should run |SSL_serialize_handshake_hints| to extract // the resulting hints. // // Hint generation may fail if, e.g., |ssl| was unable to process the // ClientHello. Callers should then complete the certificate selection RPC and // continue the original handshake with no hint. It will likely fail, but this // reports the correct alert to the client and is more robust in case of // mismatch. OPENSSL_EXPORT int SSL_request_handshake_hints(SSL *ssl, const uint8_t *client_hello, size_t client_hello_len, const uint8_t *capabilities, size_t capabilities_len); // SSL_serialize_handshake_hints writes an opaque byte string to |out| // containing the handshake hints computed by |out|. It returns one on success // and zero on error. This function should only be called if // |SSL_request_handshake_hints| was configured and the handshake terminated // with |SSL_ERROR_HANDSHAKE_HINTS_READY|. // // This string may be passed to |SSL_set_handshake_hints| on another |SSL| to // avoid an extra signature call. OPENSSL_EXPORT int SSL_serialize_handshake_hints(const SSL *ssl, CBB *out); // SSL_set_handshake_hints configures |ssl| to use |hints| as handshake hints. // It returns one on success and zero on error. The handshake will then continue // as before, but apply predicted values from |hints| where applicable. // // Hints may contain connection and session secrets, so they must not leak and // must come from a source trusted to terminate the connection. However, they // will not change |ssl|'s configuration. The caller is responsible for // serializing and applying options from the RPC server as needed. This ensures // |ssl|'s behavior is self-consistent and consistent with the caller's local // decisions. OPENSSL_EXPORT int SSL_set_handshake_hints(SSL *ssl, const uint8_t *hints, size_t hints_len); // Obscure functions. // SSL_CTX_set_msg_callback installs |cb| as the message callback for |ctx|. // This callback will be called when sending or receiving low-level record // headers, complete handshake messages, ChangeCipherSpec, alerts, and DTLS // ACKs. |write_p| is one for outgoing messages and zero for incoming messages. // // For each record header, |cb| is called with |version| = 0 and |content_type| // = |SSL3_RT_HEADER|. The |len| bytes from |buf| contain the header. Note that // this does not include the record body. If the record is sealed, the length // in the header is the length of the ciphertext. // // For each handshake message, ChangeCipherSpec, alert, and DTLS ACK, |version| // is the protocol version and |content_type| is the corresponding record type. // The |len| bytes from |buf| contain the handshake message, one-byte // ChangeCipherSpec body, two-byte alert, and ACK respectively. // // In connections that enable ECH, |cb| is additionally called with // |content_type| = |SSL3_RT_CLIENT_HELLO_INNER| for each ClientHelloInner that // is encrypted or decrypted. The |len| bytes from |buf| contain the // ClientHelloInner, including the reconstructed outer extensions and handshake // header. // // For a V2ClientHello, |version| is |SSL2_VERSION|, |content_type| is zero, and // the |len| bytes from |buf| contain the V2ClientHello structure. OPENSSL_EXPORT void SSL_CTX_set_msg_callback( SSL_CTX *ctx, void (*cb)(int is_write, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); // SSL_CTX_set_msg_callback_arg sets the |arg| parameter of the message // callback. OPENSSL_EXPORT void SSL_CTX_set_msg_callback_arg(SSL_CTX *ctx, void *arg); // SSL_set_msg_callback installs |cb| as the message callback of |ssl|. See // |SSL_CTX_set_msg_callback| for when this callback is called. OPENSSL_EXPORT void SSL_set_msg_callback( SSL *ssl, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); // SSL_set_msg_callback_arg sets the |arg| parameter of the message callback. OPENSSL_EXPORT void SSL_set_msg_callback_arg(SSL *ssl, void *arg); // SSL_CTX_set_keylog_callback configures a callback to log key material. This // is intended for debugging use with tools like Wireshark. The |cb| function // should log |line| followed by a newline, synchronizing with any concurrent // access to the log. // // The format is described in // https://www.ietf.org/archive/id/draft-ietf-tls-keylogfile-01.html // // WARNING: The data in |line| allows an attacker to break security properties // of the TLS protocol, including confidentiality, integrity, and forward // secrecy. This impacts both the current connection, and, in TLS 1.2, future // connections that resume a session from it. Both direct access to the data and // side channel leaks from application code are possible attack vectors. This // callback is intended for debugging and should not be used in production // connections. OPENSSL_EXPORT void SSL_CTX_set_keylog_callback(SSL_CTX *ctx, void (*cb)(const SSL *ssl, const char *line)); // SSL_CTX_get_keylog_callback returns the callback configured by // |SSL_CTX_set_keylog_callback|. OPENSSL_EXPORT void (*SSL_CTX_get_keylog_callback(const SSL_CTX *ctx))( const SSL *ssl, const char *line); // SSL_CTX_set_current_time_cb configures a callback to retrieve the current // time, which should be set in |*out_clock|. This can be used for testing // purposes; for example, a callback can be configured that returns a time // set explicitly by the test. The |ssl| pointer passed to |cb| is always null. OPENSSL_EXPORT void SSL_CTX_set_current_time_cb( SSL_CTX *ctx, void (*cb)(const SSL *ssl, struct timeval *out_clock)); // SSL_set_shed_handshake_config allows some of the configuration of |ssl| to be // freed after its handshake completes. Once configuration has been shed, APIs // that query it may fail. "Configuration" in this context means anything that // was set by the caller, as distinct from information derived from the // handshake. For example, |SSL_get_ciphers| queries how the |SSL| was // configured by the caller, and fails after configuration has been shed, // whereas |SSL_get_cipher| queries the result of the handshake, and is // unaffected by configuration shedding. // // If configuration shedding is enabled, it is an error to call |SSL_clear|. // // Note that configuration shedding as a client additionally depends on // renegotiation being disabled (see |SSL_set_renegotiate_mode|). If // renegotiation is possible, the configuration will be retained. If // configuration shedding is enabled and renegotiation later disabled after the // handshake, |SSL_set_renegotiate_mode| will shed configuration then. This may // be useful for clients which support renegotiation with some ALPN protocols, // such as HTTP/1.1, and not others, such as HTTP/2. OPENSSL_EXPORT void SSL_set_shed_handshake_config(SSL *ssl, int enable); enum ssl_renegotiate_mode_t BORINGSSL_ENUM_INT { ssl_renegotiate_never = 0, ssl_renegotiate_once, ssl_renegotiate_freely, ssl_renegotiate_ignore, ssl_renegotiate_explicit, }; // SSL_set_renegotiate_mode configures how |ssl|, a client, reacts to // renegotiation attempts by a server. If |ssl| is a server, peer-initiated // renegotiations are *always* rejected and this function does nothing. // // WARNING: Renegotiation is error-prone, complicates TLS's security properties, // and increases its attack surface. When enabled, many common assumptions about // BoringSSL's behavior no longer hold, and the calling application must handle // more cases. Renegotiation is also incompatible with many application // protocols, e.g. section 9.2.1 of RFC 7540. Many functions behave in ambiguous // or undefined ways during a renegotiation. // // The renegotiation mode defaults to |ssl_renegotiate_never|, but may be set // at any point in a connection's lifetime. Set it to |ssl_renegotiate_once| to // allow one renegotiation, |ssl_renegotiate_freely| to allow all // renegotiations or |ssl_renegotiate_ignore| to ignore HelloRequest messages. // Note that ignoring HelloRequest messages may cause the connection to stall // if the server waits for the renegotiation to complete. // // If set to |ssl_renegotiate_explicit|, |SSL_read| and |SSL_peek| calls which // encounter a HelloRequest will pause with |SSL_ERROR_WANT_RENEGOTIATE|. // |SSL_write| will continue to work while paused. The caller may call // |SSL_renegotiate| to begin the renegotiation at a later point. This mode may // be used if callers wish to eagerly call |SSL_peek| without triggering a // renegotiation. // // If configuration shedding is enabled (see |SSL_set_shed_handshake_config|), // configuration is released if, at any point after the handshake, renegotiation // is disabled. It is not possible to switch from disabling renegotiation to // enabling it on a given connection. Callers that condition renegotiation on, // e.g., ALPN must enable renegotiation before the handshake and conditionally // disable it afterwards. // // When enabled, renegotiation can cause properties of |ssl|, such as the cipher // suite, to change during the lifetime of the connection. More over, during a // renegotiation, not all properties of the new handshake are available or fully // established. In BoringSSL, most functions, such as |SSL_get_current_cipher|, // report information from the most recently completed handshake, not the // pending one. However, renegotiation may rerun handshake callbacks, such as // |SSL_CTX_set_cert_cb|. Such callbacks must ensure they are acting on the // desired versions of each property. // // BoringSSL does not reverify peer certificates on renegotiation and instead // requires they match between handshakes, so certificate verification callbacks // (see |SSL_CTX_set_custom_verify|) may assume |ssl| is in the initial // handshake and use |SSL_get0_peer_certificates|, etc. // // There is no support in BoringSSL for initiating renegotiations as a client // or server. OPENSSL_EXPORT void SSL_set_renegotiate_mode(SSL *ssl, enum ssl_renegotiate_mode_t mode); // SSL_renegotiate starts a deferred renegotiation on |ssl| if it was configured // with |ssl_renegotiate_explicit| and has a pending HelloRequest. It returns // one on success and zero on error. // // This function does not do perform any I/O. On success, a subsequent // |SSL_do_handshake| call will run the handshake. |SSL_write| and // |SSL_read| will also complete the handshake before sending or receiving // application data. OPENSSL_EXPORT int SSL_renegotiate(SSL *ssl); // SSL_renegotiate_pending returns one if |ssl| is in the middle of a // renegotiation. OPENSSL_EXPORT int SSL_renegotiate_pending(SSL *ssl); // SSL_total_renegotiations returns the total number of renegotiation handshakes // performed by |ssl|. This includes the pending renegotiation, if any. OPENSSL_EXPORT int SSL_total_renegotiations(const SSL *ssl); // SSL_MAX_CERT_LIST_DEFAULT is the default maximum length, in bytes, of a peer // certificate chain. #define SSL_MAX_CERT_LIST_DEFAULT (1024 * 100) // SSL_CTX_get_max_cert_list returns the maximum length, in bytes, of a peer // certificate chain accepted by |ctx|. OPENSSL_EXPORT size_t SSL_CTX_get_max_cert_list(const SSL_CTX *ctx); // SSL_CTX_set_max_cert_list sets the maximum length, in bytes, of a peer // certificate chain to |max_cert_list|. This affects how much memory may be // consumed during the handshake. OPENSSL_EXPORT void SSL_CTX_set_max_cert_list(SSL_CTX *ctx, size_t max_cert_list); // SSL_get_max_cert_list returns the maximum length, in bytes, of a peer // certificate chain accepted by |ssl|. OPENSSL_EXPORT size_t SSL_get_max_cert_list(const SSL *ssl); // SSL_set_max_cert_list sets the maximum length, in bytes, of a peer // certificate chain to |max_cert_list|. This affects how much memory may be // consumed during the handshake. OPENSSL_EXPORT void SSL_set_max_cert_list(SSL *ssl, size_t max_cert_list); // SSL_CTX_set_max_send_fragment sets the maximum length, in bytes, of records // sent by |ctx|. Beyond this length, handshake messages and application data // will be split into multiple records. It returns one on success or zero on // error. OPENSSL_EXPORT int SSL_CTX_set_max_send_fragment(SSL_CTX *ctx, size_t max_send_fragment); // SSL_set_max_send_fragment sets the maximum length, in bytes, of records sent // by |ssl|. Beyond this length, handshake messages and application data will // be split into multiple records. It returns one on success or zero on // error. OPENSSL_EXPORT int SSL_set_max_send_fragment(SSL *ssl, size_t max_send_fragment); // ssl_early_callback_ctx (aka |SSL_CLIENT_HELLO|) is passed to certain // callbacks that are called very early on during the server handshake. At this // point, much of the SSL* hasn't been filled out and only the ClientHello can // be depended on. struct ssl_early_callback_ctx { SSL *ssl; const uint8_t *client_hello; size_t client_hello_len; uint16_t version; const uint8_t *random; size_t random_len; const uint8_t *session_id; size_t session_id_len; const uint8_t *dtls_cookie; size_t dtls_cookie_len; const uint8_t *cipher_suites; size_t cipher_suites_len; const uint8_t *compression_methods; size_t compression_methods_len; const uint8_t *extensions; size_t extensions_len; } /* SSL_CLIENT_HELLO */; // ssl_select_cert_result_t enumerates the possible results from selecting a // certificate with |select_certificate_cb|. enum ssl_select_cert_result_t BORINGSSL_ENUM_INT { // ssl_select_cert_success indicates that the certificate selection was // successful. ssl_select_cert_success = 1, // ssl_select_cert_retry indicates that the operation could not be // immediately completed and must be reattempted at a later point. ssl_select_cert_retry = 0, // ssl_select_cert_error indicates that a fatal error occured and the // handshake should be terminated. ssl_select_cert_error = -1, // ssl_select_cert_disable_ech indicates that, although an encrypted // ClientHelloInner was decrypted, it should be discarded. The certificate // selection callback will then be called again, passing in the // ClientHelloOuter instead. From there, the handshake will proceed // without retry_configs, to signal to the client to disable ECH. // // This value may only be returned when |SSL_ech_accepted| returnes one. It // may be useful if the ClientHelloInner indicated a service which does not // support ECH, e.g. if it is a TLS-1.2 only service. ssl_select_cert_disable_ech = -2, }; // SSL_early_callback_ctx_extension_get searches the extensions in // |client_hello| for an extension of the given type. If not found, it returns // zero. Otherwise it sets |out_data| to point to the extension contents (not // including the type and length bytes), sets |out_len| to the length of the // extension contents and returns one. OPENSSL_EXPORT int SSL_early_callback_ctx_extension_get( const SSL_CLIENT_HELLO *client_hello, uint16_t extension_type, const uint8_t **out_data, size_t *out_len); // SSL_CTX_set_select_certificate_cb sets a callback that is called before most // ClientHello processing and before the decision whether to resume a session // is made. The callback may inspect the ClientHello and configure the // connection. See |ssl_select_cert_result_t| for details of the return values. // // In the case that a retry is indicated, |SSL_get_error| will return // |SSL_ERROR_PENDING_CERTIFICATE| and the caller should arrange for the // high-level operation on |ssl| to be retried at a later time, which will // result in another call to |cb|. // // |SSL_get_servername| may be used during this callback. // // Note: The |SSL_CLIENT_HELLO| is only valid for the duration of the callback // and is not valid while the handshake is paused. OPENSSL_EXPORT void SSL_CTX_set_select_certificate_cb( SSL_CTX *ctx, enum ssl_select_cert_result_t (*cb)(const SSL_CLIENT_HELLO *)); // SSL_CTX_set_dos_protection_cb sets a callback that is called once the // resumption decision for a ClientHello has been made. It can return one to // allow the handshake to continue or zero to cause the handshake to abort. OPENSSL_EXPORT void SSL_CTX_set_dos_protection_cb( SSL_CTX *ctx, int (*cb)(const SSL_CLIENT_HELLO *)); // SSL_CTX_set_reverify_on_resume configures whether the certificate // verification callback will be used to reverify stored certificates // when resuming a session. This only works with |SSL_CTX_set_custom_verify|. // For now, this is incompatible with |SSL_VERIFY_NONE| mode, and is only // respected on clients. OPENSSL_EXPORT void SSL_CTX_set_reverify_on_resume(SSL_CTX *ctx, int enabled); // SSL_set_enforce_rsa_key_usage configures whether, when |ssl| is a client // negotiating TLS 1.2 or below, the keyUsage extension of RSA leaf server // certificates will be checked for consistency with the TLS usage. In all other // cases, this check is always enabled. // // This parameter may be set late; it will not be read until after the // certificate verification callback. OPENSSL_EXPORT void SSL_set_enforce_rsa_key_usage(SSL *ssl, int enabled); // SSL_was_key_usage_invalid returns one if |ssl|'s handshake succeeded despite // using TLS parameters which were incompatible with the leaf certificate's // keyUsage extension. Otherwise, it returns zero. // // If |SSL_set_enforce_rsa_key_usage| is enabled or not applicable, this // function will always return zero because key usages will be consistently // checked. OPENSSL_EXPORT int SSL_was_key_usage_invalid(const SSL *ssl); // SSL_ST_* are possible values for |SSL_state|, the bitmasks that make them up, // and some historical values for compatibility. Only |SSL_ST_INIT| and // |SSL_ST_OK| are ever returned. #define SSL_ST_CONNECT 0x1000 #define SSL_ST_ACCEPT 0x2000 #define SSL_ST_MASK 0x0FFF #define SSL_ST_INIT (SSL_ST_CONNECT | SSL_ST_ACCEPT) #define SSL_ST_OK 0x03 #define SSL_ST_RENEGOTIATE (0x04 | SSL_ST_INIT) #define SSL_ST_BEFORE (0x05 | SSL_ST_INIT) // TLS_ST_* are aliases for |SSL_ST_*| for OpenSSL 1.1.0 compatibility. #define TLS_ST_OK SSL_ST_OK #define TLS_ST_BEFORE SSL_ST_BEFORE // SSL_CB_* are possible values for the |type| parameter in the info // callback and the bitmasks that make them up. #define SSL_CB_LOOP 0x01 #define SSL_CB_EXIT 0x02 #define SSL_CB_READ 0x04 #define SSL_CB_WRITE 0x08 #define SSL_CB_ALERT 0x4000 #define SSL_CB_READ_ALERT (SSL_CB_ALERT | SSL_CB_READ) #define SSL_CB_WRITE_ALERT (SSL_CB_ALERT | SSL_CB_WRITE) #define SSL_CB_ACCEPT_LOOP (SSL_ST_ACCEPT | SSL_CB_LOOP) #define SSL_CB_ACCEPT_EXIT (SSL_ST_ACCEPT | SSL_CB_EXIT) #define SSL_CB_CONNECT_LOOP (SSL_ST_CONNECT | SSL_CB_LOOP) #define SSL_CB_CONNECT_EXIT (SSL_ST_CONNECT | SSL_CB_EXIT) #define SSL_CB_HANDSHAKE_START 0x10 #define SSL_CB_HANDSHAKE_DONE 0x20 // SSL_CTX_set_info_callback configures a callback to be run when various // events occur during a connection's lifetime. The |type| argument determines // the type of event and the meaning of the |value| argument. Callbacks must // ignore unexpected |type| values. // // |SSL_CB_READ_ALERT| is signaled for each alert received, warning or fatal. // The |value| argument is a 16-bit value where the alert level (either // |SSL3_AL_WARNING| or |SSL3_AL_FATAL|) is in the most-significant eight bits // and the alert type (one of |SSL_AD_*|) is in the least-significant eight. // // |SSL_CB_WRITE_ALERT| is signaled for each alert sent. The |value| argument // is constructed as with |SSL_CB_READ_ALERT|. // // |SSL_CB_HANDSHAKE_START| is signaled when a handshake begins. The |value| // argument is always one. // // |SSL_CB_HANDSHAKE_DONE| is signaled when a handshake completes successfully. // The |value| argument is always one. If a handshake False Starts, this event // may be used to determine when the Finished message is received. // // The following event types expose implementation details of the handshake // state machine. Consuming them is deprecated. // // |SSL_CB_ACCEPT_LOOP| (respectively, |SSL_CB_CONNECT_LOOP|) is signaled when // a server (respectively, client) handshake progresses. The |value| argument // is always one. // // |SSL_CB_ACCEPT_EXIT| (respectively, |SSL_CB_CONNECT_EXIT|) is signaled when // a server (respectively, client) handshake completes, fails, or is paused. // The |value| argument is one if the handshake succeeded and <= 0 // otherwise. OPENSSL_EXPORT void SSL_CTX_set_info_callback(SSL_CTX *ctx, void (*cb)(const SSL *ssl, int type, int value)); // SSL_CTX_get_info_callback returns the callback set by // |SSL_CTX_set_info_callback|. OPENSSL_EXPORT void (*SSL_CTX_get_info_callback(SSL_CTX *ctx))(const SSL *ssl, int type, int value); // SSL_set_info_callback configures a callback to be run at various events // during a connection's lifetime. See |SSL_CTX_set_info_callback|. OPENSSL_EXPORT void SSL_set_info_callback(SSL *ssl, void (*cb)(const SSL *ssl, int type, int value)); // SSL_get_info_callback returns the callback set by |SSL_set_info_callback|. OPENSSL_EXPORT void (*SSL_get_info_callback(const SSL *ssl))(const SSL *ssl, int type, int value); // SSL_state_string_long returns the current state of the handshake state // machine as a string. This may be useful for debugging and logging. OPENSSL_EXPORT const char *SSL_state_string_long(const SSL *ssl); #define SSL_SENT_SHUTDOWN 1 #define SSL_RECEIVED_SHUTDOWN 2 // SSL_get_shutdown returns a bitmask with a subset of |SSL_SENT_SHUTDOWN| and // |SSL_RECEIVED_SHUTDOWN| to query whether close_notify was sent or received, // respectively. OPENSSL_EXPORT int SSL_get_shutdown(const SSL *ssl); // SSL_get_peer_signature_algorithm returns the signature algorithm used by the // peer. If not applicable, it returns zero. OPENSSL_EXPORT uint16_t SSL_get_peer_signature_algorithm(const SSL *ssl); // SSL_get_client_random writes up to |max_out| bytes of the most recent // handshake's client_random to |out| and returns the number of bytes written. // If |max_out| is zero, it returns the size of the client_random. OPENSSL_EXPORT size_t SSL_get_client_random(const SSL *ssl, uint8_t *out, size_t max_out); // SSL_get_server_random writes up to |max_out| bytes of the most recent // handshake's server_random to |out| and returns the number of bytes written. // If |max_out| is zero, it returns the size of the server_random. OPENSSL_EXPORT size_t SSL_get_server_random(const SSL *ssl, uint8_t *out, size_t max_out); // SSL_get_pending_cipher returns the cipher suite for the current handshake or // NULL if one has not been negotiated yet or there is no pending handshake. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_pending_cipher(const SSL *ssl); // SSL_set_retain_only_sha256_of_client_certs, on a server, sets whether only // the SHA-256 hash of peer's certificate should be saved in memory and in the // session. This can save memory, ticket size and session cache space. If // enabled, |SSL_get_peer_certificate| will return NULL after the handshake // completes. See |SSL_SESSION_has_peer_sha256| and // |SSL_SESSION_get0_peer_sha256| to query the hash. OPENSSL_EXPORT void SSL_set_retain_only_sha256_of_client_certs(SSL *ssl, int enable); // SSL_CTX_set_retain_only_sha256_of_client_certs, on a server, sets whether // only the SHA-256 hash of peer's certificate should be saved in memory and in // the session. This can save memory, ticket size and session cache space. If // enabled, |SSL_get_peer_certificate| will return NULL after the handshake // completes. See |SSL_SESSION_has_peer_sha256| and // |SSL_SESSION_get0_peer_sha256| to query the hash. OPENSSL_EXPORT void SSL_CTX_set_retain_only_sha256_of_client_certs(SSL_CTX *ctx, int enable); // SSL_CTX_set_grease_enabled configures whether sockets on |ctx| should enable // GREASE. See RFC 8701. OPENSSL_EXPORT void SSL_CTX_set_grease_enabled(SSL_CTX *ctx, int enabled); // SSL_CTX_set_permute_extensions configures whether sockets on |ctx| should // permute extensions. For now, this is only implemented for the ClientHello. OPENSSL_EXPORT void SSL_CTX_set_permute_extensions(SSL_CTX *ctx, int enabled); // SSL_set_permute_extensions configures whether sockets on |ssl| should // permute extensions. For now, this is only implemented for the ClientHello. OPENSSL_EXPORT void SSL_set_permute_extensions(SSL *ssl, int enabled); // SSL_max_seal_overhead returns the maximum overhead, in bytes, of sealing a // record with |ssl|. OPENSSL_EXPORT size_t SSL_max_seal_overhead(const SSL *ssl); // SSL_CTX_set_false_start_allowed_without_alpn configures whether connections // on |ctx| may use False Start (if |SSL_MODE_ENABLE_FALSE_START| is enabled) // without negotiating ALPN. OPENSSL_EXPORT void SSL_CTX_set_false_start_allowed_without_alpn(SSL_CTX *ctx, int allowed); // SSL_used_hello_retry_request returns one if the TLS 1.3 HelloRetryRequest // message has been either sent by the server or received by the client. It // returns zero otherwise. OPENSSL_EXPORT int SSL_used_hello_retry_request(const SSL *ssl); // SSL_set_jdk11_workaround configures whether to workaround various bugs in // JDK 11's TLS 1.3 implementation by disabling TLS 1.3 for such clients. // // https://bugs.openjdk.java.net/browse/JDK-8211806 // https://bugs.openjdk.java.net/browse/JDK-8212885 // https://bugs.openjdk.java.net/browse/JDK-8213202 OPENSSL_EXPORT void SSL_set_jdk11_workaround(SSL *ssl, int enable); // SSL_set_check_client_certificate_type configures whether the client, in // TLS 1.2 and below, will check its certificate against the server's requested // certificate types. // // By default, this option is enabled. If disabled, certificate selection within // the library may not function correctly. This flag is provided temporarily in // case of compatibility issues. It will be removed sometime after June 2024. OPENSSL_EXPORT void SSL_set_check_client_certificate_type(SSL *ssl, int enable); // SSL_set_check_ecdsa_curve configures whether the server, in TLS 1.2 and // below, will check its certificate against the client's supported ECDSA // curves. // // By default, this option is enabled. If disabled, certificate selection within // the library may not function correctly. This flag is provided temporarily in // case of compatibility issues. It will be removed sometime after June 2024. OPENSSL_EXPORT void SSL_set_check_ecdsa_curve(SSL *ssl, int enable); // Deprecated functions. // SSL_library_init returns one. OPENSSL_EXPORT int SSL_library_init(void); // SSL_CIPHER_description writes a description of |cipher| into |buf| and // returns |buf|. If |buf| is NULL, it returns a newly allocated string, to be // freed with |OPENSSL_free|, or NULL on error. // // The description includes a trailing newline and has the form: // AES128-SHA Kx=RSA Au=RSA Enc=AES(128) Mac=SHA1 // // Consider |SSL_CIPHER_standard_name| or |SSL_CIPHER_get_name| instead. OPENSSL_EXPORT const char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, int len); // SSL_CIPHER_get_version returns the string "TLSv1/SSLv3". OPENSSL_EXPORT const char *SSL_CIPHER_get_version(const SSL_CIPHER *cipher); typedef void COMP_METHOD; typedef struct ssl_comp_st SSL_COMP; // SSL_COMP_get_compression_methods returns NULL. OPENSSL_EXPORT STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void); // SSL_COMP_add_compression_method returns one. OPENSSL_EXPORT int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm); // SSL_COMP_get_name returns NULL. OPENSSL_EXPORT const char *SSL_COMP_get_name(const COMP_METHOD *comp); // SSL_COMP_get0_name returns the |name| member of |comp|. OPENSSL_EXPORT const char *SSL_COMP_get0_name(const SSL_COMP *comp); // SSL_COMP_get_id returns the |id| member of |comp|. OPENSSL_EXPORT int SSL_COMP_get_id(const SSL_COMP *comp); // SSL_COMP_free_compression_methods does nothing. OPENSSL_EXPORT void SSL_COMP_free_compression_methods(void); // SSLv23_method calls |TLS_method|. OPENSSL_EXPORT const SSL_METHOD *SSLv23_method(void); // These version-specific methods behave exactly like |TLS_method| and // |DTLS_method| except they also call |SSL_CTX_set_min_proto_version| and // |SSL_CTX_set_max_proto_version| to lock connections to that protocol // version. OPENSSL_EXPORT const SSL_METHOD *TLSv1_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_1_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_2_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_method(void); // These client- and server-specific methods call their corresponding generic // methods. OPENSSL_EXPORT const SSL_METHOD *TLS_server_method(void); OPENSSL_EXPORT const SSL_METHOD *TLS_client_method(void); OPENSSL_EXPORT const SSL_METHOD *SSLv23_server_method(void); OPENSSL_EXPORT const SSL_METHOD *SSLv23_client_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_server_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_client_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_1_server_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_1_client_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_2_server_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_2_client_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLS_server_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLS_client_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_server_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_client_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_server_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_client_method(void); // SSL_clear resets |ssl| to allow another connection and returns one on success // or zero on failure. It returns most configuration state but releases memory // associated with the current connection. // // Free |ssl| and create a new one instead. OPENSSL_EXPORT int SSL_clear(SSL *ssl); // SSL_CTX_set_tmp_rsa_callback does nothing. OPENSSL_EXPORT void SSL_CTX_set_tmp_rsa_callback( SSL_CTX *ctx, RSA *(*cb)(SSL *ssl, int is_export, int keylength)); // SSL_set_tmp_rsa_callback does nothing. OPENSSL_EXPORT void SSL_set_tmp_rsa_callback(SSL *ssl, RSA *(*cb)(SSL *ssl, int is_export, int keylength)); // SSL_CTX_sess_connect returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect(const SSL_CTX *ctx); // SSL_CTX_sess_connect_good returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect_good(const SSL_CTX *ctx); // SSL_CTX_sess_connect_renegotiate returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect_renegotiate(const SSL_CTX *ctx); // SSL_CTX_sess_accept returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept(const SSL_CTX *ctx); // SSL_CTX_sess_accept_renegotiate returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept_renegotiate(const SSL_CTX *ctx); // SSL_CTX_sess_accept_good returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept_good(const SSL_CTX *ctx); // SSL_CTX_sess_hits returns zero. OPENSSL_EXPORT int SSL_CTX_sess_hits(const SSL_CTX *ctx); // SSL_CTX_sess_cb_hits returns zero. OPENSSL_EXPORT int SSL_CTX_sess_cb_hits(const SSL_CTX *ctx); // SSL_CTX_sess_misses returns zero. OPENSSL_EXPORT int SSL_CTX_sess_misses(const SSL_CTX *ctx); // SSL_CTX_sess_timeouts returns zero. OPENSSL_EXPORT int SSL_CTX_sess_timeouts(const SSL_CTX *ctx); // SSL_CTX_sess_cache_full returns zero. OPENSSL_EXPORT int SSL_CTX_sess_cache_full(const SSL_CTX *ctx); // SSL_cutthrough_complete calls |SSL_in_false_start|. OPENSSL_EXPORT int SSL_cutthrough_complete(const SSL *ssl); // SSL_num_renegotiations calls |SSL_total_renegotiations|. OPENSSL_EXPORT int SSL_num_renegotiations(const SSL *ssl); // SSL_CTX_need_tmp_RSA returns zero. OPENSSL_EXPORT int SSL_CTX_need_tmp_RSA(const SSL_CTX *ctx); // SSL_need_tmp_RSA returns zero. OPENSSL_EXPORT int SSL_need_tmp_RSA(const SSL *ssl); // SSL_CTX_set_tmp_rsa returns one. OPENSSL_EXPORT int SSL_CTX_set_tmp_rsa(SSL_CTX *ctx, const RSA *rsa); // SSL_set_tmp_rsa returns one. OPENSSL_EXPORT int SSL_set_tmp_rsa(SSL *ssl, const RSA *rsa); // SSL_CTX_get_read_ahead returns zero. OPENSSL_EXPORT int SSL_CTX_get_read_ahead(const SSL_CTX *ctx); // SSL_CTX_set_read_ahead returns one. OPENSSL_EXPORT int SSL_CTX_set_read_ahead(SSL_CTX *ctx, int yes); // SSL_get_read_ahead returns zero. OPENSSL_EXPORT int SSL_get_read_ahead(const SSL *ssl); // SSL_set_read_ahead returns one. OPENSSL_EXPORT int SSL_set_read_ahead(SSL *ssl, int yes); // SSL_set_state does nothing. OPENSSL_EXPORT void SSL_set_state(SSL *ssl, int state); // SSL_get_shared_ciphers writes an empty string to |buf| and returns a // pointer to |buf|, or NULL if |len| is less than or equal to zero. OPENSSL_EXPORT char *SSL_get_shared_ciphers(const SSL *ssl, char *buf, int len); // SSL_get_shared_sigalgs returns zero. OPENSSL_EXPORT int SSL_get_shared_sigalgs(SSL *ssl, int idx, int *psign, int *phash, int *psignandhash, uint8_t *rsig, uint8_t *rhash); // SSL_MODE_HANDSHAKE_CUTTHROUGH is the same as SSL_MODE_ENABLE_FALSE_START. #define SSL_MODE_HANDSHAKE_CUTTHROUGH SSL_MODE_ENABLE_FALSE_START // i2d_SSL_SESSION serializes |in|, as described in |i2d_SAMPLE|. // // Use |SSL_SESSION_to_bytes| instead. OPENSSL_EXPORT int i2d_SSL_SESSION(SSL_SESSION *in, uint8_t **pp); // d2i_SSL_SESSION parses a serialized session from the |length| bytes pointed // to by |*pp|, as described in |d2i_SAMPLE|. // // Use |SSL_SESSION_from_bytes| instead. OPENSSL_EXPORT SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const uint8_t **pp, long length); // i2d_SSL_SESSION_bio serializes |session| and writes the result to |bio|. It // returns the number of bytes written on success and <= 0 on error. OPENSSL_EXPORT int i2d_SSL_SESSION_bio(BIO *bio, const SSL_SESSION *session); // d2i_SSL_SESSION_bio reads a serialized |SSL_SESSION| from |bio| and returns a // newly-allocated |SSL_SESSION| or NULL on error. If |out| is not NULL, it also // frees |*out| and sets |*out| to the new |SSL_SESSION|. OPENSSL_EXPORT SSL_SESSION *d2i_SSL_SESSION_bio(BIO *bio, SSL_SESSION **out); // ERR_load_SSL_strings does nothing. OPENSSL_EXPORT void ERR_load_SSL_strings(void); // SSL_load_error_strings does nothing. OPENSSL_EXPORT void SSL_load_error_strings(void); // SSL_CTX_set_tlsext_use_srtp calls |SSL_CTX_set_srtp_profiles|. It returns // zero on success and one on failure. // // WARNING: this function is dangerous because it breaks the usual return value // convention. Use |SSL_CTX_set_srtp_profiles| instead. OPENSSL_EXPORT int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles); // SSL_set_tlsext_use_srtp calls |SSL_set_srtp_profiles|. It returns zero on // success and one on failure. // // WARNING: this function is dangerous because it breaks the usual return value // convention. Use |SSL_set_srtp_profiles| instead. OPENSSL_EXPORT int SSL_set_tlsext_use_srtp(SSL *ssl, const char *profiles); // SSL_get_current_compression returns NULL. OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_compression(SSL *ssl); // SSL_get_current_expansion returns NULL. OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_expansion(SSL *ssl); // SSL_get_server_tmp_key returns zero. OPENSSL_EXPORT int SSL_get_server_tmp_key(SSL *ssl, EVP_PKEY **out_key); // SSL_CTX_set_tmp_dh returns 1. OPENSSL_EXPORT int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh); // SSL_set_tmp_dh returns 1. OPENSSL_EXPORT int SSL_set_tmp_dh(SSL *ssl, const DH *dh); // SSL_CTX_set_tmp_dh_callback does nothing. OPENSSL_EXPORT void SSL_CTX_set_tmp_dh_callback( SSL_CTX *ctx, DH *(*cb)(SSL *ssl, int is_export, int keylength)); // SSL_set_tmp_dh_callback does nothing. OPENSSL_EXPORT void SSL_set_tmp_dh_callback(SSL *ssl, DH *(*cb)(SSL *ssl, int is_export, int keylength)); // SSL_CTX_set1_sigalgs takes |num_values| ints and interprets them as pairs // where the first is the nid of a hash function and the second is an // |EVP_PKEY_*| value. It configures the signature algorithm preferences for // |ctx| based on them and returns one on success or zero on error. // // This API is compatible with OpenSSL. However, BoringSSL-specific code should // prefer |SSL_CTX_set_signing_algorithm_prefs| because it's clearer and it's // more convenient to codesearch for specific algorithm values. OPENSSL_EXPORT int SSL_CTX_set1_sigalgs(SSL_CTX *ctx, const int *values, size_t num_values); // SSL_set1_sigalgs takes |num_values| ints and interprets them as pairs where // the first is the nid of a hash function and the second is an |EVP_PKEY_*| // value. It configures the signature algorithm preferences for |ssl| based on // them and returns one on success or zero on error. // // This API is compatible with OpenSSL. However, BoringSSL-specific code should // prefer |SSL_CTX_set_signing_algorithm_prefs| because it's clearer and it's // more convenient to codesearch for specific algorithm values. OPENSSL_EXPORT int SSL_set1_sigalgs(SSL *ssl, const int *values, size_t num_values); // SSL_CTX_set1_sigalgs_list takes a textual specification of a set of signature // algorithms and configures them on |ctx|. It returns one on success and zero // on error. See // https://www.openssl.org/docs/man1.1.0/man3/SSL_CTX_set1_sigalgs_list.html for // a description of the text format. Also note that TLS 1.3 names (e.g. // "rsa_pkcs1_md5_sha1") can also be used (as in OpenSSL, although OpenSSL // doesn't document that). // // This API is compatible with OpenSSL. However, BoringSSL-specific code should // prefer |SSL_CTX_set_signing_algorithm_prefs| because it's clearer and it's // more convenient to codesearch for specific algorithm values. OPENSSL_EXPORT int SSL_CTX_set1_sigalgs_list(SSL_CTX *ctx, const char *str); // SSL_set1_sigalgs_list takes a textual specification of a set of signature // algorithms and configures them on |ssl|. It returns one on success and zero // on error. See // https://www.openssl.org/docs/man1.1.0/man3/SSL_CTX_set1_sigalgs_list.html for // a description of the text format. Also note that TLS 1.3 names (e.g. // "rsa_pkcs1_md5_sha1") can also be used (as in OpenSSL, although OpenSSL // doesn't document that). // // This API is compatible with OpenSSL. However, BoringSSL-specific code should // prefer |SSL_CTX_set_signing_algorithm_prefs| because it's clearer and it's // more convenient to codesearch for specific algorithm values. OPENSSL_EXPORT int SSL_set1_sigalgs_list(SSL *ssl, const char *str); #define SSL_set_app_data(s, arg) (SSL_set_ex_data(s, 0, (char *)(arg))) #define SSL_get_app_data(s) (SSL_get_ex_data(s, 0)) #define SSL_SESSION_set_app_data(s, a) \ (SSL_SESSION_set_ex_data(s, 0, (char *)(a))) #define SSL_SESSION_get_app_data(s) (SSL_SESSION_get_ex_data(s, 0)) #define SSL_CTX_get_app_data(ctx) (SSL_CTX_get_ex_data(ctx, 0)) #define SSL_CTX_set_app_data(ctx, arg) \ (SSL_CTX_set_ex_data(ctx, 0, (char *)(arg))) #define OpenSSL_add_ssl_algorithms() SSL_library_init() #define SSLeay_add_ssl_algorithms() SSL_library_init() #define SSL_get_cipher(ssl) SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)) #define SSL_get_cipher_bits(ssl, out_alg_bits) \ SSL_CIPHER_get_bits(SSL_get_current_cipher(ssl), out_alg_bits) #define SSL_get_cipher_version(ssl) \ SSL_CIPHER_get_version(SSL_get_current_cipher(ssl)) #define SSL_get_cipher_name(ssl) \ SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)) #define SSL_get_time(session) SSL_SESSION_get_time(session) #define SSL_set_time(session, time) SSL_SESSION_set_time((session), (time)) #define SSL_get_timeout(session) SSL_SESSION_get_timeout(session) #define SSL_set_timeout(session, timeout) \ SSL_SESSION_set_timeout((session), (timeout)) struct ssl_comp_st { int id; const char *name; char *method; }; DEFINE_STACK_OF(SSL_COMP) // The following flags do nothing and are included only to make it easier to // compile code with BoringSSL. #define SSL_MODE_AUTO_RETRY 0 #define SSL_MODE_RELEASE_BUFFERS 0 #define SSL_MODE_SEND_CLIENTHELLO_TIME 0 #define SSL_MODE_SEND_SERVERHELLO_TIME 0 #define SSL_OP_ALL 0 #define SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION 0 #define SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS 0 #define SSL_OP_EPHEMERAL_RSA 0 #define SSL_OP_LEGACY_SERVER_CONNECT 0 #define SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER 0 #define SSL_OP_MICROSOFT_SESS_ID_BUG 0 #define SSL_OP_MSIE_SSLV2_RSA_PADDING 0 #define SSL_OP_NETSCAPE_CA_DN_BUG 0 #define SSL_OP_NETSCAPE_CHALLENGE_BUG 0 #define SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG 0 #define SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG 0 #define SSL_OP_NO_COMPRESSION 0 #define SSL_OP_NO_RENEGOTIATION 0 // ssl_renegotiate_never is the default #define SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION 0 #define SSL_OP_NO_SSLv2 0 #define SSL_OP_NO_SSLv3 0 #define SSL_OP_PKCS1_CHECK_1 0 #define SSL_OP_PKCS1_CHECK_2 0 #define SSL_OP_SINGLE_DH_USE 0 #define SSL_OP_SINGLE_ECDH_USE 0 #define SSL_OP_SSLEAY_080_CLIENT_DH_BUG 0 #define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG 0 #define SSL_OP_TLS_BLOCK_PADDING_BUG 0 #define SSL_OP_TLS_D5_BUG 0 #define SSL_OP_TLS_ROLLBACK_BUG 0 #define SSL_VERIFY_CLIENT_ONCE 0 // SSL_cache_hit calls |SSL_session_reused|. OPENSSL_EXPORT int SSL_cache_hit(SSL *ssl); // SSL_get_default_timeout returns |SSL_DEFAULT_SESSION_TIMEOUT|. OPENSSL_EXPORT long SSL_get_default_timeout(const SSL *ssl); // SSL_get_version returns a string describing the TLS version used by |ssl|. // For example, "TLSv1.2" or "DTLSv1". OPENSSL_EXPORT const char *SSL_get_version(const SSL *ssl); // SSL_get_all_version_names outputs a list of possible strings // |SSL_get_version| may return in this version of BoringSSL. It writes at most // |max_out| entries to |out| and returns the total number it would have // written, if |max_out| had been large enough. |max_out| may be initially set // to zero to size the output. // // This function is only intended to help initialize tables in callers that want // possible strings pre-declared. This list would not be suitable to set a list // of supported features. It is in no particular order, and may contain // placeholder, experimental, or deprecated values that do not apply to every // caller. Future versions of BoringSSL may also return strings not in this // list, so this does not apply if, say, sending strings across services. OPENSSL_EXPORT size_t SSL_get_all_version_names(const char **out, size_t max_out); // SSL_get_cipher_list returns the name of the |n|th cipher in the output of // |SSL_get_ciphers| or NULL if out of range. Use |SSL_get_ciphers| instead. OPENSSL_EXPORT const char *SSL_get_cipher_list(const SSL *ssl, int n); // SSL_CTX_set_client_cert_cb sets a callback which is called on the client if // the server requests a client certificate and none is configured. On success, // the callback should return one and set |*out_x509| to |*out_pkey| to a leaf // certificate and private key, respectively, passing ownership. It should // return zero to send no certificate and -1 to fail or pause the handshake. If // the handshake is paused, |SSL_get_error| will return // |SSL_ERROR_WANT_X509_LOOKUP|. // // The callback may call |SSL_get0_certificate_types| and // |SSL_get_client_CA_list| for information on the server's certificate request. // // Use |SSL_CTX_set_cert_cb| instead. Configuring intermediate certificates with // this function is confusing. This callback may not be registered concurrently // with |SSL_CTX_set_cert_cb| or |SSL_set_cert_cb|. OPENSSL_EXPORT void SSL_CTX_set_client_cert_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey)); #define SSL_NOTHING SSL_ERROR_NONE #define SSL_WRITING SSL_ERROR_WANT_WRITE #define SSL_READING SSL_ERROR_WANT_READ // SSL_want returns one of the above values to determine what the most recent // operation on |ssl| was blocked on. Use |SSL_get_error| instead. OPENSSL_EXPORT int SSL_want(const SSL *ssl); #define SSL_want_read(ssl) (SSL_want(ssl) == SSL_READING) #define SSL_want_write(ssl) (SSL_want(ssl) == SSL_WRITING) // SSL_get_finished writes up to |count| bytes of the Finished message sent by // |ssl| to |buf|. It returns the total untruncated length or zero if none has // been sent yet. At TLS 1.3 and later, it returns zero. // // Use |SSL_get_tls_unique| instead. OPENSSL_EXPORT size_t SSL_get_finished(const SSL *ssl, void *buf, size_t count); // SSL_get_peer_finished writes up to |count| bytes of the Finished message // received from |ssl|'s peer to |buf|. It returns the total untruncated length // or zero if none has been received yet. At TLS 1.3 and later, it returns // zero. // // Use |SSL_get_tls_unique| instead. OPENSSL_EXPORT size_t SSL_get_peer_finished(const SSL *ssl, void *buf, size_t count); // SSL_alert_type_string returns "!". Use |SSL_alert_type_string_long| // instead. OPENSSL_EXPORT const char *SSL_alert_type_string(int value); // SSL_alert_desc_string returns "!!". Use |SSL_alert_desc_string_long| // instead. OPENSSL_EXPORT const char *SSL_alert_desc_string(int value); // SSL_state_string returns "!!!!!!". Use |SSL_state_string_long| for a more // intelligible string. OPENSSL_EXPORT const char *SSL_state_string(const SSL *ssl); // SSL_TXT_* expand to strings. #define SSL_TXT_MEDIUM "MEDIUM" #define SSL_TXT_HIGH "HIGH" #define SSL_TXT_FIPS "FIPS" #define SSL_TXT_kRSA "kRSA" #define SSL_TXT_kDHE "kDHE" #define SSL_TXT_kEDH "kEDH" #define SSL_TXT_kECDHE "kECDHE" #define SSL_TXT_kEECDH "kEECDH" #define SSL_TXT_kPSK "kPSK" #define SSL_TXT_aRSA "aRSA" #define SSL_TXT_aECDSA "aECDSA" #define SSL_TXT_aPSK "aPSK" #define SSL_TXT_DH "DH" #define SSL_TXT_DHE "DHE" #define SSL_TXT_EDH "EDH" #define SSL_TXT_RSA "RSA" #define SSL_TXT_ECDH "ECDH" #define SSL_TXT_ECDHE "ECDHE" #define SSL_TXT_EECDH "EECDH" #define SSL_TXT_ECDSA "ECDSA" #define SSL_TXT_PSK "PSK" #define SSL_TXT_3DES "3DES" #define SSL_TXT_RC4 "RC4" #define SSL_TXT_AES128 "AES128" #define SSL_TXT_AES256 "AES256" #define SSL_TXT_AES "AES" #define SSL_TXT_AES_GCM "AESGCM" #define SSL_TXT_CHACHA20 "CHACHA20" #define SSL_TXT_MD5 "MD5" #define SSL_TXT_SHA1 "SHA1" #define SSL_TXT_SHA "SHA" #define SSL_TXT_SHA256 "SHA256" #define SSL_TXT_SHA384 "SHA384" #define SSL_TXT_SSLV3 "SSLv3" #define SSL_TXT_TLSV1 "TLSv1" #define SSL_TXT_TLSV1_1 "TLSv1.1" #define SSL_TXT_TLSV1_2 "TLSv1.2" #define SSL_TXT_TLSV1_3 "TLSv1.3" #define SSL_TXT_ALL "ALL" #define SSL_TXT_CMPDEF "COMPLEMENTOFDEFAULT" typedef struct ssl_conf_ctx_st SSL_CONF_CTX; // SSL_state returns |SSL_ST_INIT| if a handshake is in progress and |SSL_ST_OK| // otherwise. // // Use |SSL_is_init| instead. OPENSSL_EXPORT int SSL_state(const SSL *ssl); #define SSL_get_state(ssl) SSL_state(ssl) // SSL_set_shutdown causes |ssl| to behave as if the shutdown bitmask (see // |SSL_get_shutdown|) were |mode|. This may be used to skip sending or // receiving close_notify in |SSL_shutdown| by causing the implementation to // believe the events already happened. // // It is an error to use |SSL_set_shutdown| to unset a bit that has already been // set. Doing so will trigger an |assert| in debug builds and otherwise be // ignored. // // Use |SSL_CTX_set_quiet_shutdown| instead. OPENSSL_EXPORT void SSL_set_shutdown(SSL *ssl, int mode); // SSL_CTX_set_tmp_ecdh calls |SSL_CTX_set1_groups| with a one-element list // containing |ec_key|'s curve. The remainder of |ec_key| is ignored. OPENSSL_EXPORT int SSL_CTX_set_tmp_ecdh(SSL_CTX *ctx, const EC_KEY *ec_key); // SSL_set_tmp_ecdh calls |SSL_set1_groups| with a one-element list containing // |ec_key|'s curve. The remainder of |ec_key| is ignored. OPENSSL_EXPORT int SSL_set_tmp_ecdh(SSL *ssl, const EC_KEY *ec_key); #if !defined(OPENSSL_NO_FILESYSTEM) // SSL_add_dir_cert_subjects_to_stack lists files in directory |dir|. It calls // |SSL_add_file_cert_subjects_to_stack| on each file and returns one on success // or zero on error. This function is only available from the libdecrepit // library. OPENSSL_EXPORT int SSL_add_dir_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, const char *dir); #endif // SSL_CTX_enable_tls_channel_id calls |SSL_CTX_set_tls_channel_id_enabled|. OPENSSL_EXPORT int SSL_CTX_enable_tls_channel_id(SSL_CTX *ctx); // SSL_enable_tls_channel_id calls |SSL_set_tls_channel_id_enabled|. OPENSSL_EXPORT int SSL_enable_tls_channel_id(SSL *ssl); // BIO_f_ssl returns a |BIO_METHOD| that can wrap an |SSL*| in a |BIO*|. Note // that this has quite different behaviour from the version in OpenSSL (notably // that it doesn't try to auto renegotiate). // // IMPORTANT: if you are not curl, don't use this. OPENSSL_EXPORT const BIO_METHOD *BIO_f_ssl(void); // BIO_set_ssl sets |ssl| as the underlying connection for |bio|, which must // have been created using |BIO_f_ssl|. If |take_owership| is true, |bio| will // call |SSL_free| on |ssl| when closed. It returns one on success or something // other than one on error. OPENSSL_EXPORT long BIO_set_ssl(BIO *bio, SSL *ssl, int take_owership); // SSL_CTX_set_ecdh_auto returns one. #define SSL_CTX_set_ecdh_auto(ctx, onoff) 1 // SSL_set_ecdh_auto returns one. #define SSL_set_ecdh_auto(ssl, onoff) 1 // SSL_get_session returns a non-owning pointer to |ssl|'s session. For // historical reasons, which session it returns depends on |ssl|'s state. // // Prior to the start of the initial handshake, it returns the session the // caller set with |SSL_set_session|. After the initial handshake has finished // and if no additional handshakes are in progress, it returns the currently // active session. Its behavior is undefined while a handshake is in progress. // // If trying to add new sessions to an external session cache, use // |SSL_CTX_sess_set_new_cb| instead. In particular, using the callback is // required as of TLS 1.3. For compatibility, this function will return an // unresumable session which may be cached, but will never be resumed. // // If querying properties of the connection, use APIs on the |SSL| object. OPENSSL_EXPORT SSL_SESSION *SSL_get_session(const SSL *ssl); // SSL_get0_session is an alias for |SSL_get_session|. #define SSL_get0_session SSL_get_session // SSL_get1_session acts like |SSL_get_session| but returns a new reference to // the session. OPENSSL_EXPORT SSL_SESSION *SSL_get1_session(SSL *ssl); #define OPENSSL_INIT_NO_LOAD_SSL_STRINGS 0 #define OPENSSL_INIT_LOAD_SSL_STRINGS 0 #define OPENSSL_INIT_SSL_DEFAULT 0 // OPENSSL_init_ssl returns one. OPENSSL_EXPORT int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); // The following constants are legacy aliases for RSA-PSS with rsaEncryption // keys. Use the new names instead. #define SSL_SIGN_RSA_PSS_SHA256 SSL_SIGN_RSA_PSS_RSAE_SHA256 #define SSL_SIGN_RSA_PSS_SHA384 SSL_SIGN_RSA_PSS_RSAE_SHA384 #define SSL_SIGN_RSA_PSS_SHA512 SSL_SIGN_RSA_PSS_RSAE_SHA512 // SSL_set_tlsext_status_type configures a client to request OCSP stapling if // |type| is |TLSEXT_STATUSTYPE_ocsp| and disables it otherwise. It returns one // on success and zero if handshake configuration has already been shed. // // Use |SSL_enable_ocsp_stapling| instead. OPENSSL_EXPORT int SSL_set_tlsext_status_type(SSL *ssl, int type); // SSL_get_tlsext_status_type returns |TLSEXT_STATUSTYPE_ocsp| if the client // requested OCSP stapling and |TLSEXT_STATUSTYPE_nothing| otherwise. On the // client, this reflects whether OCSP stapling was enabled via, e.g., // |SSL_set_tlsext_status_type|. On the server, this is determined during the // handshake. It may be queried in callbacks set by |SSL_CTX_set_cert_cb|. The // result is undefined after the handshake completes. OPENSSL_EXPORT int SSL_get_tlsext_status_type(const SSL *ssl); // SSL_set_tlsext_status_ocsp_resp sets the OCSP response. It returns one on // success and zero on error. On success, |ssl| takes ownership of |resp|, which // must have been allocated by |OPENSSL_malloc|. // // Use |SSL_set_ocsp_response| instead. OPENSSL_EXPORT int SSL_set_tlsext_status_ocsp_resp(SSL *ssl, uint8_t *resp, size_t resp_len); // SSL_get_tlsext_status_ocsp_resp sets |*out| to point to the OCSP response // from the server. It returns the length of the response. If there was no // response, it sets |*out| to NULL and returns zero. // // Use |SSL_get0_ocsp_response| instead. // // WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT size_t SSL_get_tlsext_status_ocsp_resp(const SSL *ssl, const uint8_t **out); // SSL_CTX_set_tlsext_status_cb configures the legacy OpenSSL OCSP callback and // returns one. Though the type signature is the same, this callback has // different behavior for client and server connections: // // For clients, the callback is called after certificate verification. It should // return one for success, zero for a bad OCSP response, and a negative number // for internal error. Instead, handle this as part of certificate verification. // (Historically, OpenSSL verified certificates just before parsing stapled OCSP // responses, but BoringSSL fixes this ordering. All server credentials are // available during verification.) // // Do not use this callback as a server. It is provided for compatibility // purposes only. For servers, it is called to configure server credentials. It // should return |SSL_TLSEXT_ERR_OK| on success, |SSL_TLSEXT_ERR_NOACK| to // ignore OCSP requests, or |SSL_TLSEXT_ERR_ALERT_FATAL| on error. It is usually // used to fetch OCSP responses on demand, which is not ideal. Instead, treat // OCSP responses like other server credentials, such as certificates or SCT // lists. Configure, store, and refresh them eagerly. This avoids downtime if // the CA's OCSP responder is briefly offline. OPENSSL_EXPORT int SSL_CTX_set_tlsext_status_cb(SSL_CTX *ctx, int (*callback)(SSL *ssl, void *arg)); // SSL_CTX_set_tlsext_status_arg sets additional data for // |SSL_CTX_set_tlsext_status_cb|'s callback and returns one. OPENSSL_EXPORT int SSL_CTX_set_tlsext_status_arg(SSL_CTX *ctx, void *arg); // The following symbols are compatibility aliases for reason codes used when // receiving an alert from the peer. Use the other names instead, which fit the // naming convention. // // TODO(davidben): Fix references to |SSL_R_TLSV1_CERTIFICATE_REQUIRED| and // remove the compatibility value. The others come from OpenSSL. #define SSL_R_TLSV1_UNSUPPORTED_EXTENSION \ SSL_R_TLSV1_ALERT_UNSUPPORTED_EXTENSION #define SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE \ SSL_R_TLSV1_ALERT_CERTIFICATE_UNOBTAINABLE #define SSL_R_TLSV1_UNRECOGNIZED_NAME SSL_R_TLSV1_ALERT_UNRECOGNIZED_NAME #define SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE \ SSL_R_TLSV1_ALERT_BAD_CERTIFICATE_STATUS_RESPONSE #define SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE \ SSL_R_TLSV1_ALERT_BAD_CERTIFICATE_HASH_VALUE #define SSL_R_TLSV1_CERTIFICATE_REQUIRED SSL_R_TLSV1_ALERT_CERTIFICATE_REQUIRED // The following symbols are compatibility aliases for |SSL_GROUP_*|. #define SSL_CURVE_SECP224R1 SSL_GROUP_SECP224R1 #define SSL_CURVE_SECP256R1 SSL_GROUP_SECP256R1 #define SSL_CURVE_SECP384R1 SSL_GROUP_SECP384R1 #define SSL_CURVE_SECP521R1 SSL_GROUP_SECP521R1 #define SSL_CURVE_X25519 SSL_GROUP_X25519 #define SSL_CURVE_X25519_KYBER768_DRAFT00 SSL_GROUP_X25519_KYBER768_DRAFT00 // SSL_get_curve_id calls |SSL_get_group_id|. OPENSSL_EXPORT uint16_t SSL_get_curve_id(const SSL *ssl); // SSL_get_curve_name calls |SSL_get_group_name|. OPENSSL_EXPORT const char *SSL_get_curve_name(uint16_t curve_id); // SSL_get_all_curve_names calls |SSL_get_all_group_names|. OPENSSL_EXPORT size_t SSL_get_all_curve_names(const char **out, size_t max_out); // SSL_CTX_set1_curves calls |SSL_CTX_set1_groups|. OPENSSL_EXPORT int SSL_CTX_set1_curves(SSL_CTX *ctx, const int *curves, size_t num_curves); // SSL_set1_curves calls |SSL_set1_groups|. OPENSSL_EXPORT int SSL_set1_curves(SSL *ssl, const int *curves, size_t num_curves); // SSL_CTX_set1_curves_list calls |SSL_CTX_set1_groups_list|. OPENSSL_EXPORT int SSL_CTX_set1_curves_list(SSL_CTX *ctx, const char *curves); // SSL_set1_curves_list calls |SSL_set1_groups_list|. OPENSSL_EXPORT int SSL_set1_curves_list(SSL *ssl, const char *curves); // TLSEXT_nid_unknown is a constant used in OpenSSL for // |SSL_get_negotiated_group| to return an unrecognized group. BoringSSL never // returns this value, but we define this constant for compatibility. #define TLSEXT_nid_unknown 0x1000000 // SSL_CTX_check_private_key returns one if |ctx| has both a certificate and // private key, and zero otherwise. // // This function does not check consistency because the library checks when the // certificate and key are individually configured. However, if the private key // is configured before the certificate, inconsistent private keys are silently // dropped. Some callers are inadvertently relying on this function to detect // when this happens. // // Instead, callers should configure the certificate first, then the private // key, checking for errors in each. This function is then unnecessary. OPENSSL_EXPORT int SSL_CTX_check_private_key(const SSL_CTX *ctx); // SSL_check_private_key returns one if |ssl| has both a certificate and private // key, and zero otherwise. // // See discussion in |SSL_CTX_check_private_key|. OPENSSL_EXPORT int SSL_check_private_key(const SSL *ssl); // Compliance policy configurations // // A TLS connection has a large number of different parameters. Some are well // known, like cipher suites, but many are obscure and configuration functions // for them may not exist. These policy controls allow broad configuration // goals to be specified so that they can flow down to all the different // parameters of a TLS connection. enum ssl_compliance_policy_t BORINGSSL_ENUM_INT { // ssl_compliance_policy_none does nothing. However, since setting this // doesn't undo other policies it's an error to try and set it. ssl_compliance_policy_none, // ssl_compliance_policy_fips_202205 configures a TLS connection to use: // * TLS 1.2 or 1.3 // * For TLS 1.2, only ECDHE_[RSA|ECDSA]_WITH_AES_*_GCM_SHA*. // * For TLS 1.3, only AES-GCM // * P-256 or P-384 for key agreement. // * For server signatures, only PKCS#1/PSS with SHA256/384/512, or ECDSA // with P-256 or P-384. // // Note: this policy can be configured even if BoringSSL has not been built in // FIPS mode. Call |FIPS_mode| to check that. // // Note: this setting aids with compliance with NIST requirements but does not // guarantee it. Careful reading of SP 800-52r2 is recommended. ssl_compliance_policy_fips_202205, // ssl_compliance_policy_wpa3_192_202304 configures a TLS connection to use: // * TLS 1.2 or 1.3. // * For TLS 1.2, only TLS_ECDHE_[ECDSA|RSA]_WITH_AES_256_GCM_SHA384. // * For TLS 1.3, only AES-256-GCM. // * P-384 for key agreement. // * For handshake signatures, only ECDSA with P-384 and SHA-384, or RSA // with SHA-384 or SHA-512. // // No limitations on the certificate chain nor leaf public key are imposed, // other than by the supported signature algorithms. But WPA3's "192-bit" // mode requires at least P-384 or 3072-bit along the chain. The caller must // enforce this themselves on the verified chain using functions such as // `X509_STORE_CTX_get0_chain`. // // Note that this setting is less secure than the default. The // implementation risks of using a more obscure primitive like P-384 // dominate other considerations. ssl_compliance_policy_wpa3_192_202304, // ssl_compliance_policy_cnsa_202407 confingures a TLS connection to use: // * For TLS 1.3, AES-256-GCM over AES-128-GCM over ChaCha20-Poly1305. // // I.e. it ensures that AES-GCM will be used whenever the client supports it. // The cipher suite configuration mini-language can be used to similarly // configure prior TLS versions if they are enabled. ssl_compliance_policy_cnsa_202407, }; // SSL_CTX_set_compliance_policy configures various aspects of |ctx| based on // the given policy requirements. Subsequently calling other functions that // configure |ctx| may override |policy|, or may not. This should be the final // configuration function called in order to have defined behaviour. It's a // fatal error if |policy| is |ssl_compliance_policy_none|. OPENSSL_EXPORT int SSL_CTX_set_compliance_policy( SSL_CTX *ctx, enum ssl_compliance_policy_t policy); // SSL_CTX_get_compliance_policy returns the compliance policy configured on // |ctx|. OPENSSL_EXPORT enum ssl_compliance_policy_t SSL_CTX_get_compliance_policy( const SSL_CTX *ctx); // SSL_set_compliance_policy acts the same as |SSL_CTX_set_compliance_policy|, // but only configures a single |SSL*|. OPENSSL_EXPORT int SSL_set_compliance_policy( SSL *ssl, enum ssl_compliance_policy_t policy); // SSL_get_compliance_policy returns the compliance policy configured on // |ssl|. OPENSSL_EXPORT enum ssl_compliance_policy_t SSL_get_compliance_policy( const SSL *ssl); // Nodejs compatibility section (hidden). // // These defines exist for node.js, with the hope that we can eliminate the // need for them over time. #define SSLerr(function, reason) \ ERR_put_error(ERR_LIB_SSL, 0, reason, __FILE__, __LINE__) // Preprocessor compatibility section (hidden). // // Historically, a number of APIs were implemented in OpenSSL as macros and // constants to 'ctrl' functions. To avoid breaking #ifdefs in consumers, this // section defines a number of legacy macros. // // Although using either the CTRL values or their wrapper macros in #ifdefs is // still supported, the CTRL values may not be passed to |SSL_ctrl| and // |SSL_CTX_ctrl|. Call the functions (previously wrapper macros) instead. // // See PORTING.md in the BoringSSL source tree for a table of corresponding // functions. // https://boringssl.googlesource.com/boringssl/+/main/PORTING.md#Replacements-for-values #define DTLS_CTRL_GET_TIMEOUT doesnt_exist #define DTLS_CTRL_HANDLE_TIMEOUT doesnt_exist #define SSL_CTRL_CHAIN doesnt_exist #define SSL_CTRL_CHAIN_CERT doesnt_exist #define SSL_CTRL_CHANNEL_ID doesnt_exist #define SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS doesnt_exist #define SSL_CTRL_CLEAR_MODE doesnt_exist #define SSL_CTRL_CLEAR_OPTIONS doesnt_exist #define SSL_CTRL_EXTRA_CHAIN_CERT doesnt_exist #define SSL_CTRL_GET_CHAIN_CERTS doesnt_exist #define SSL_CTRL_GET_CHANNEL_ID doesnt_exist #define SSL_CTRL_GET_CLIENT_CERT_TYPES doesnt_exist #define SSL_CTRL_GET_EXTRA_CHAIN_CERTS doesnt_exist #define SSL_CTRL_GET_MAX_CERT_LIST doesnt_exist #define SSL_CTRL_GET_NEGOTIATED_GROUP doesnt_exist #define SSL_CTRL_GET_NUM_RENEGOTIATIONS doesnt_exist #define SSL_CTRL_GET_READ_AHEAD doesnt_exist #define SSL_CTRL_GET_RI_SUPPORT doesnt_exist #define SSL_CTRL_GET_SERVER_TMP_KEY doesnt_exist #define SSL_CTRL_GET_SESSION_REUSED doesnt_exist #define SSL_CTRL_GET_SESS_CACHE_MODE doesnt_exist #define SSL_CTRL_GET_SESS_CACHE_SIZE doesnt_exist #define SSL_CTRL_GET_TLSEXT_TICKET_KEYS doesnt_exist #define SSL_CTRL_GET_TOTAL_RENEGOTIATIONS doesnt_exist #define SSL_CTRL_MODE doesnt_exist #define SSL_CTRL_NEED_TMP_RSA doesnt_exist #define SSL_CTRL_OPTIONS doesnt_exist #define SSL_CTRL_SESS_NUMBER doesnt_exist #define SSL_CTRL_SET_CURVES doesnt_exist #define SSL_CTRL_SET_CURVES_LIST doesnt_exist #define SSL_CTRL_SET_GROUPS doesnt_exist #define SSL_CTRL_SET_GROUPS_LIST doesnt_exist #define SSL_CTRL_SET_ECDH_AUTO doesnt_exist #define SSL_CTRL_SET_MAX_CERT_LIST doesnt_exist #define SSL_CTRL_SET_MAX_SEND_FRAGMENT doesnt_exist #define SSL_CTRL_SET_MSG_CALLBACK doesnt_exist #define SSL_CTRL_SET_MSG_CALLBACK_ARG doesnt_exist #define SSL_CTRL_SET_MTU doesnt_exist #define SSL_CTRL_SET_READ_AHEAD doesnt_exist #define SSL_CTRL_SET_SESS_CACHE_MODE doesnt_exist #define SSL_CTRL_SET_SESS_CACHE_SIZE doesnt_exist #define SSL_CTRL_SET_TLSEXT_HOSTNAME doesnt_exist #define SSL_CTRL_SET_TLSEXT_SERVERNAME_ARG doesnt_exist #define SSL_CTRL_SET_TLSEXT_SERVERNAME_CB doesnt_exist #define SSL_CTRL_SET_TLSEXT_TICKET_KEYS doesnt_exist #define SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB doesnt_exist #define SSL_CTRL_SET_TMP_DH doesnt_exist #define SSL_CTRL_SET_TMP_DH_CB doesnt_exist #define SSL_CTRL_SET_TMP_ECDH doesnt_exist #define SSL_CTRL_SET_TMP_ECDH_CB doesnt_exist #define SSL_CTRL_SET_TMP_RSA doesnt_exist #define SSL_CTRL_SET_TMP_RSA_CB doesnt_exist // |BORINGSSL_PREFIX| already makes each of these symbols into macros, so there // is no need to define conflicting macros. #if !defined(BORINGSSL_PREFIX) #define DTLSv1_get_timeout DTLSv1_get_timeout #define DTLSv1_handle_timeout DTLSv1_handle_timeout #define SSL_CTX_add0_chain_cert SSL_CTX_add0_chain_cert #define SSL_CTX_add1_chain_cert SSL_CTX_add1_chain_cert #define SSL_CTX_add_extra_chain_cert SSL_CTX_add_extra_chain_cert #define SSL_CTX_clear_extra_chain_certs SSL_CTX_clear_extra_chain_certs #define SSL_CTX_clear_chain_certs SSL_CTX_clear_chain_certs #define SSL_CTX_clear_mode SSL_CTX_clear_mode #define SSL_CTX_clear_options SSL_CTX_clear_options #define SSL_CTX_get0_chain_certs SSL_CTX_get0_chain_certs #define SSL_CTX_get_extra_chain_certs SSL_CTX_get_extra_chain_certs #define SSL_CTX_get_max_cert_list SSL_CTX_get_max_cert_list #define SSL_CTX_get_mode SSL_CTX_get_mode #define SSL_CTX_get_options SSL_CTX_get_options #define SSL_CTX_get_read_ahead SSL_CTX_get_read_ahead #define SSL_CTX_get_session_cache_mode SSL_CTX_get_session_cache_mode #define SSL_CTX_get_tlsext_ticket_keys SSL_CTX_get_tlsext_ticket_keys #define SSL_CTX_need_tmp_RSA SSL_CTX_need_tmp_RSA #define SSL_CTX_sess_get_cache_size SSL_CTX_sess_get_cache_size #define SSL_CTX_sess_number SSL_CTX_sess_number #define SSL_CTX_sess_set_cache_size SSL_CTX_sess_set_cache_size #define SSL_CTX_set0_chain SSL_CTX_set0_chain #define SSL_CTX_set1_chain SSL_CTX_set1_chain #define SSL_CTX_set1_curves SSL_CTX_set1_curves #define SSL_CTX_set1_groups SSL_CTX_set1_groups #define SSL_CTX_set_max_cert_list SSL_CTX_set_max_cert_list #define SSL_CTX_set_max_send_fragment SSL_CTX_set_max_send_fragment #define SSL_CTX_set_mode SSL_CTX_set_mode #define SSL_CTX_set_msg_callback_arg SSL_CTX_set_msg_callback_arg #define SSL_CTX_set_options SSL_CTX_set_options #define SSL_CTX_set_read_ahead SSL_CTX_set_read_ahead #define SSL_CTX_set_session_cache_mode SSL_CTX_set_session_cache_mode #define SSL_CTX_set_tlsext_servername_arg SSL_CTX_set_tlsext_servername_arg #define SSL_CTX_set_tlsext_servername_callback \ SSL_CTX_set_tlsext_servername_callback #define SSL_CTX_set_tlsext_ticket_key_cb SSL_CTX_set_tlsext_ticket_key_cb #define SSL_CTX_set_tlsext_ticket_keys SSL_CTX_set_tlsext_ticket_keys #define SSL_CTX_set_tmp_dh SSL_CTX_set_tmp_dh #define SSL_CTX_set_tmp_ecdh SSL_CTX_set_tmp_ecdh #define SSL_CTX_set_tmp_rsa SSL_CTX_set_tmp_rsa #define SSL_add0_chain_cert SSL_add0_chain_cert #define SSL_add1_chain_cert SSL_add1_chain_cert #define SSL_clear_chain_certs SSL_clear_chain_certs #define SSL_clear_mode SSL_clear_mode #define SSL_clear_options SSL_clear_options #define SSL_get0_certificate_types SSL_get0_certificate_types #define SSL_get0_chain_certs SSL_get0_chain_certs #define SSL_get_max_cert_list SSL_get_max_cert_list #define SSL_get_mode SSL_get_mode #define SSL_get_negotiated_group SSL_get_negotiated_group #define SSL_get_options SSL_get_options #define SSL_get_secure_renegotiation_support \ SSL_get_secure_renegotiation_support #define SSL_need_tmp_RSA SSL_need_tmp_RSA #define SSL_num_renegotiations SSL_num_renegotiations #define SSL_session_reused SSL_session_reused #define SSL_set0_chain SSL_set0_chain #define SSL_set1_chain SSL_set1_chain #define SSL_set1_curves SSL_set1_curves #define SSL_set1_groups SSL_set1_groups #define SSL_set_max_cert_list SSL_set_max_cert_list #define SSL_set_max_send_fragment SSL_set_max_send_fragment #define SSL_set_mode SSL_set_mode #define SSL_set_msg_callback_arg SSL_set_msg_callback_arg #define SSL_set_mtu SSL_set_mtu #define SSL_set_options SSL_set_options #define SSL_set_tlsext_host_name SSL_set_tlsext_host_name #define SSL_set_tmp_dh SSL_set_tmp_dh #define SSL_set_tmp_ecdh SSL_set_tmp_ecdh #define SSL_set_tmp_rsa SSL_set_tmp_rsa #define SSL_total_renegotiations SSL_total_renegotiations #endif // !defined(BORINGSSL_PREFIX) #if defined(__cplusplus) } // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(SSL, SSL_free) BORINGSSL_MAKE_DELETER(SSL_CREDENTIAL, SSL_CREDENTIAL_free) BORINGSSL_MAKE_UP_REF(SSL_CREDENTIAL, SSL_CREDENTIAL_up_ref) BORINGSSL_MAKE_DELETER(SSL_CTX, SSL_CTX_free) BORINGSSL_MAKE_UP_REF(SSL_CTX, SSL_CTX_up_ref) BORINGSSL_MAKE_DELETER(SSL_ECH_KEYS, SSL_ECH_KEYS_free) BORINGSSL_MAKE_UP_REF(SSL_ECH_KEYS, SSL_ECH_KEYS_up_ref) BORINGSSL_MAKE_DELETER(SSL_SESSION, SSL_SESSION_free) BORINGSSL_MAKE_UP_REF(SSL_SESSION, SSL_SESSION_up_ref) // *** DEPRECATED EXPERIMENT — DO NOT USE *** // // Split handshakes. // // WARNING: This mechanism is deprecated and should not be used. It is very // fragile and difficult to use correctly. The relationship between // configuration options across the two halves is ill-defined and not // self-consistent. Additionally, version skew across the two halves risks // unusual behavior and connection failure. New development should use the // handshake hints API. Existing deployments should migrate to handshake hints // to reduce the risk of service outages. // // Split handshakes allows the handshake part of a TLS connection to be // performed in a different process (or on a different machine) than the data // exchange. This only applies to servers. // // In the first part of a split handshake, an |SSL| (where the |SSL_CTX| has // been configured with |SSL_CTX_set_handoff_mode|) is used normally. Once the // ClientHello message has been received, the handshake will stop and // |SSL_get_error| will indicate |SSL_ERROR_HANDOFF|. At this point (and only // at this point), |SSL_serialize_handoff| can be called to write the “handoff” // state of the connection. // // Elsewhere, a fresh |SSL| can be used with |SSL_apply_handoff| to continue // the connection. The connection from the client is fed into this |SSL|, and // the handshake resumed. When the handshake stops again and |SSL_get_error| // indicates |SSL_ERROR_HANDBACK|, |SSL_serialize_handback| should be called to // serialize the state of the handshake again. // // Back at the first location, a fresh |SSL| can be used with // |SSL_apply_handback|. Then the client's connection can be processed mostly // as normal. // // Lastly, when a connection is in the handoff state, whether or not // |SSL_serialize_handoff| is called, |SSL_decline_handoff| will move it back // into a normal state where the connection can proceed without impact. // // WARNING: Currently only works with TLS 1.0–1.2. // WARNING: The serialisation formats are not yet stable: version skew may be // fatal. // WARNING: The handback data contains sensitive key material and must be // protected. // WARNING: Some calls on the final |SSL| will not work. Just as an example, // calls like |SSL_get0_session_id_context| and |SSL_get_privatekey| won't // work because the certificate used for handshaking isn't available. // WARNING: |SSL_apply_handoff| may trigger “msg” callback calls. OPENSSL_EXPORT void SSL_CTX_set_handoff_mode(SSL_CTX *ctx, bool on); OPENSSL_EXPORT void SSL_set_handoff_mode(SSL *SSL, bool on); OPENSSL_EXPORT bool SSL_serialize_handoff(const SSL *ssl, CBB *out, SSL_CLIENT_HELLO *out_hello); OPENSSL_EXPORT bool SSL_decline_handoff(SSL *ssl); OPENSSL_EXPORT bool SSL_apply_handoff(SSL *ssl, Span handoff); OPENSSL_EXPORT bool SSL_serialize_handback(const SSL *ssl, CBB *out); OPENSSL_EXPORT bool SSL_apply_handback(SSL *ssl, Span handback); // SSL_get_traffic_secrets sets |*out_read_traffic_secret| and // |*out_write_traffic_secret| to reference the current TLS 1.3 traffic secrets // for |ssl|. It returns true on success and false on error. // // This function is only valid on TLS 1.3 connections that have completed the // handshake. It is not valid for QUIC or DTLS, where multiple traffic secrets // may be active at a time. OPENSSL_EXPORT bool SSL_get_traffic_secrets( const SSL *ssl, Span *out_read_traffic_secret, Span *out_write_traffic_secret); // SSL_CTX_set_aes_hw_override_for_testing sets |override_value| to // override checking for aes hardware support for testing. If |override_value| // is set to true, the library will behave as if aes hardware support is // present. If it is set to false, the library will behave as if aes hardware // support is not present. OPENSSL_EXPORT void SSL_CTX_set_aes_hw_override_for_testing( SSL_CTX *ctx, bool override_value); // SSL_set_aes_hw_override_for_testing acts the same as // |SSL_CTX_set_aes_override_for_testing| but only configures a single |SSL*|. OPENSSL_EXPORT void SSL_set_aes_hw_override_for_testing(SSL *ssl, bool override_value); BSSL_NAMESPACE_END } // extern C++ #endif // !defined(BORINGSSL_NO_CXX) #endif #define SSL_R_APP_DATA_IN_HANDSHAKE 100 #define SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT 101 #define SSL_R_BAD_ALERT 102 #define SSL_R_BAD_CHANGE_CIPHER_SPEC 103 #define SSL_R_BAD_DATA_RETURNED_BY_CALLBACK 104 #define SSL_R_BAD_DH_P_LENGTH 105 #define SSL_R_BAD_DIGEST_LENGTH 106 #define SSL_R_BAD_ECC_CERT 107 #define SSL_R_BAD_ECPOINT 108 #define SSL_R_BAD_HANDSHAKE_RECORD 109 #define SSL_R_BAD_HELLO_REQUEST 110 #define SSL_R_BAD_LENGTH 111 #define SSL_R_BAD_PACKET_LENGTH 112 #define SSL_R_BAD_RSA_ENCRYPT 113 #define SSL_R_BAD_SIGNATURE 114 #define SSL_R_BAD_SRTP_MKI_VALUE 115 #define SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST 116 #define SSL_R_BAD_SSL_FILETYPE 117 #define SSL_R_BAD_WRITE_RETRY 118 #define SSL_R_BIO_NOT_SET 119 #define SSL_R_BN_LIB 120 #define SSL_R_BUFFER_TOO_SMALL 121 #define SSL_R_CA_DN_LENGTH_MISMATCH 122 #define SSL_R_CA_DN_TOO_LONG 123 #define SSL_R_CCS_RECEIVED_EARLY 124 #define SSL_R_CERTIFICATE_VERIFY_FAILED 125 #define SSL_R_CERT_CB_ERROR 126 #define SSL_R_CERT_LENGTH_MISMATCH 127 #define SSL_R_CHANNEL_ID_NOT_P256 128 #define SSL_R_CHANNEL_ID_SIGNATURE_INVALID 129 #define SSL_R_CIPHER_OR_HASH_UNAVAILABLE 130 #define SSL_R_CLIENTHELLO_PARSE_FAILED 131 #define SSL_R_CLIENTHELLO_TLSEXT 132 #define SSL_R_CONNECTION_REJECTED 133 #define SSL_R_CONNECTION_TYPE_NOT_SET 134 #define SSL_R_CUSTOM_EXTENSION_ERROR 135 #define SSL_R_DATA_LENGTH_TOO_LONG 136 #define SSL_R_DECODE_ERROR 137 #define SSL_R_DECRYPTION_FAILED 138 #define SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC 139 #define SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG 140 #define SSL_R_DH_P_TOO_LONG 141 #define SSL_R_DIGEST_CHECK_FAILED 142 #define SSL_R_DTLS_MESSAGE_TOO_BIG 143 #define SSL_R_ECC_CERT_NOT_FOR_SIGNING 144 #define SSL_R_EMS_STATE_INCONSISTENT 145 #define SSL_R_ENCRYPTED_LENGTH_TOO_LONG 146 #define SSL_R_ERROR_ADDING_EXTENSION 147 #define SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST 148 #define SSL_R_ERROR_PARSING_EXTENSION 149 #define SSL_R_EXCESSIVE_MESSAGE_SIZE 150 #define SSL_R_EXTRA_DATA_IN_MESSAGE 151 #define SSL_R_FRAGMENT_MISMATCH 152 #define SSL_R_GOT_NEXT_PROTO_WITHOUT_EXTENSION 153 #define SSL_R_HANDSHAKE_FAILURE_ON_CLIENT_HELLO 154 #define SSL_R_HTTPS_PROXY_REQUEST 155 #define SSL_R_HTTP_REQUEST 156 #define SSL_R_INAPPROPRIATE_FALLBACK 157 #define SSL_R_INVALID_COMMAND 158 #define SSL_R_INVALID_MESSAGE 159 #define SSL_R_INVALID_SSL_SESSION 160 #define SSL_R_INVALID_TICKET_KEYS_LENGTH 161 #define SSL_R_LENGTH_MISMATCH 162 #define SSL_R_MISSING_EXTENSION 164 #define SSL_R_MISSING_RSA_CERTIFICATE 165 #define SSL_R_MISSING_TMP_DH_KEY 166 #define SSL_R_MISSING_TMP_ECDH_KEY 167 #define SSL_R_MIXED_SPECIAL_OPERATOR_WITH_GROUPS 168 #define SSL_R_MTU_TOO_SMALL 169 #define SSL_R_NEGOTIATED_BOTH_NPN_AND_ALPN 170 #define SSL_R_NESTED_GROUP 171 #define SSL_R_NO_CERTIFICATES_RETURNED 172 #define SSL_R_NO_CERTIFICATE_ASSIGNED 173 #define SSL_R_NO_CERTIFICATE_SET 174 #define SSL_R_NO_CIPHERS_AVAILABLE 175 #define SSL_R_NO_CIPHERS_PASSED 176 #define SSL_R_NO_CIPHER_MATCH 177 #define SSL_R_NO_COMPRESSION_SPECIFIED 178 #define SSL_R_NO_METHOD_SPECIFIED 179 #define SSL_R_NO_PRIVATE_KEY_ASSIGNED 181 #define SSL_R_NO_RENEGOTIATION 182 #define SSL_R_NO_REQUIRED_DIGEST 183 #define SSL_R_NO_SHARED_CIPHER 184 #define SSL_R_NULL_SSL_CTX 185 #define SSL_R_NULL_SSL_METHOD_PASSED 186 #define SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED 187 #define SSL_R_OLD_SESSION_VERSION_NOT_RETURNED 188 #define SSL_R_OUTPUT_ALIASES_INPUT 189 #define SSL_R_PARSE_TLSEXT 190 #define SSL_R_PATH_TOO_LONG 191 #define SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE 192 #define SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE 193 #define SSL_R_PROTOCOL_IS_SHUTDOWN 194 #define SSL_R_PSK_IDENTITY_NOT_FOUND 195 #define SSL_R_PSK_NO_CLIENT_CB 196 #define SSL_R_PSK_NO_SERVER_CB 197 #define SSL_R_READ_TIMEOUT_EXPIRED 198 #define SSL_R_RECORD_LENGTH_MISMATCH 199 #define SSL_R_RECORD_TOO_LARGE 200 #define SSL_R_RENEGOTIATION_ENCODING_ERR 201 #define SSL_R_RENEGOTIATION_MISMATCH 202 #define SSL_R_REQUIRED_CIPHER_MISSING 203 #define SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION 204 #define SSL_R_RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION 205 #define SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING 206 #define SSL_R_SERVERHELLO_TLSEXT 207 #define SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED 208 #define SSL_R_SESSION_MAY_NOT_BE_CREATED 209 #define SSL_R_SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER 210 #define SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES 211 #define SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE 212 #define SSL_R_SSL3_EXT_INVALID_SERVERNAME 213 #define SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION 214 #define SSL_R_SSL_HANDSHAKE_FAILURE 215 #define SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG 216 #define SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST 217 #define SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG 218 #define SSL_R_TOO_MANY_EMPTY_FRAGMENTS 219 #define SSL_R_TOO_MANY_WARNING_ALERTS 220 #define SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS 221 #define SSL_R_UNEXPECTED_EXTENSION 222 #define SSL_R_UNEXPECTED_MESSAGE 223 #define SSL_R_UNEXPECTED_OPERATOR_IN_GROUP 224 #define SSL_R_UNEXPECTED_RECORD 225 #define SSL_R_UNINITIALIZED 226 #define SSL_R_UNKNOWN_ALERT_TYPE 227 #define SSL_R_UNKNOWN_CERTIFICATE_TYPE 228 #define SSL_R_UNKNOWN_CIPHER_RETURNED 229 #define SSL_R_UNKNOWN_CIPHER_TYPE 230 #define SSL_R_UNKNOWN_DIGEST 231 #define SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE 232 #define SSL_R_UNKNOWN_PROTOCOL 233 #define SSL_R_UNKNOWN_SSL_VERSION 234 #define SSL_R_UNKNOWN_STATE 235 #define SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED 236 #define SSL_R_UNSUPPORTED_CIPHER 237 #define SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM 238 #define SSL_R_UNSUPPORTED_ELLIPTIC_CURVE 239 #define SSL_R_UNSUPPORTED_PROTOCOL 240 #define SSL_R_WRONG_CERTIFICATE_TYPE 241 #define SSL_R_WRONG_CIPHER_RETURNED 242 #define SSL_R_WRONG_CURVE 243 #define SSL_R_WRONG_MESSAGE_TYPE 244 #define SSL_R_WRONG_SIGNATURE_TYPE 245 #define SSL_R_WRONG_SSL_VERSION 246 #define SSL_R_WRONG_VERSION_NUMBER 247 #define SSL_R_X509_LIB 248 #define SSL_R_X509_VERIFICATION_SETUP_PROBLEMS 249 #define SSL_R_SHUTDOWN_WHILE_IN_INIT 250 #define SSL_R_INVALID_OUTER_RECORD_TYPE 251 #define SSL_R_UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY 252 #define SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS 253 #define SSL_R_DOWNGRADE_DETECTED 254 #define SSL_R_EXCESS_HANDSHAKE_DATA 255 #define SSL_R_INVALID_COMPRESSION_LIST 256 #define SSL_R_DUPLICATE_EXTENSION 257 #define SSL_R_MISSING_KEY_SHARE 258 #define SSL_R_INVALID_ALPN_PROTOCOL 259 #define SSL_R_TOO_MANY_KEY_UPDATES 260 #define SSL_R_BLOCK_CIPHER_PAD_IS_WRONG 261 #define SSL_R_NO_CIPHERS_SPECIFIED 262 #define SSL_R_RENEGOTIATION_EMS_MISMATCH 263 #define SSL_R_DUPLICATE_KEY_SHARE 264 #define SSL_R_NO_GROUPS_SPECIFIED 265 #define SSL_R_NO_SHARED_GROUP 266 #define SSL_R_PRE_SHARED_KEY_MUST_BE_LAST 267 #define SSL_R_OLD_SESSION_PRF_HASH_MISMATCH 268 #define SSL_R_INVALID_SCT_LIST 269 #define SSL_R_TOO_MUCH_SKIPPED_EARLY_DATA 270 #define SSL_R_PSK_IDENTITY_BINDER_COUNT_MISMATCH 271 #define SSL_R_CANNOT_PARSE_LEAF_CERT 272 #define SSL_R_SERVER_CERT_CHANGED 273 #define SSL_R_CERTIFICATE_AND_PRIVATE_KEY_MISMATCH 274 #define SSL_R_CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD 275 #define SSL_R_TICKET_ENCRYPTION_FAILED 276 #define SSL_R_ALPN_MISMATCH_ON_EARLY_DATA 277 #define SSL_R_WRONG_VERSION_ON_EARLY_DATA 278 #define SSL_R_UNEXPECTED_EXTENSION_ON_EARLY_DATA 279 #define SSL_R_NO_SUPPORTED_VERSIONS_ENABLED 280 #define SSL_R_EMPTY_HELLO_RETRY_REQUEST 282 #define SSL_R_EARLY_DATA_NOT_IN_USE 283 #define SSL_R_HANDSHAKE_NOT_COMPLETE 284 #define SSL_R_NEGOTIATED_TB_WITHOUT_EMS_OR_RI 285 #define SSL_R_SERVER_ECHOED_INVALID_SESSION_ID 286 #define SSL_R_PRIVATE_KEY_OPERATION_FAILED 287 #define SSL_R_SECOND_SERVERHELLO_VERSION_MISMATCH 288 #define SSL_R_OCSP_CB_ERROR 289 #define SSL_R_SSL_SESSION_ID_TOO_LONG 290 #define SSL_R_APPLICATION_DATA_ON_SHUTDOWN 291 #define SSL_R_CERT_DECOMPRESSION_FAILED 292 #define SSL_R_UNCOMPRESSED_CERT_TOO_LARGE 293 #define SSL_R_UNKNOWN_CERT_COMPRESSION_ALG 294 #define SSL_R_INVALID_SIGNATURE_ALGORITHM 295 #define SSL_R_DUPLICATE_SIGNATURE_ALGORITHM 296 #define SSL_R_TLS13_DOWNGRADE 297 #define SSL_R_QUIC_INTERNAL_ERROR 298 #define SSL_R_WRONG_ENCRYPTION_LEVEL_RECEIVED 299 #define SSL_R_TOO_MUCH_READ_EARLY_DATA 300 #define SSL_R_INVALID_DELEGATED_CREDENTIAL 301 #define SSL_R_KEY_USAGE_BIT_INCORRECT 302 #define SSL_R_INCONSISTENT_CLIENT_HELLO 303 #define SSL_R_CIPHER_MISMATCH_ON_EARLY_DATA 304 #define SSL_R_QUIC_TRANSPORT_PARAMETERS_MISCONFIGURED 305 #define SSL_R_UNEXPECTED_COMPATIBILITY_MODE 306 #define SSL_R_NO_APPLICATION_PROTOCOL 307 #define SSL_R_NEGOTIATED_ALPS_WITHOUT_ALPN 308 #define SSL_R_ALPS_MISMATCH_ON_EARLY_DATA 309 #define SSL_R_ECH_SERVER_CONFIG_AND_PRIVATE_KEY_MISMATCH 310 #define SSL_R_ECH_SERVER_CONFIG_UNSUPPORTED_EXTENSION 311 #define SSL_R_UNSUPPORTED_ECH_SERVER_CONFIG 312 #define SSL_R_ECH_SERVER_WOULD_HAVE_NO_RETRY_CONFIGS 313 #define SSL_R_INVALID_CLIENT_HELLO_INNER 314 #define SSL_R_INVALID_ALPN_PROTOCOL_LIST 315 #define SSL_R_COULD_NOT_PARSE_HINTS 316 #define SSL_R_INVALID_ECH_PUBLIC_NAME 317 #define SSL_R_INVALID_ECH_CONFIG_LIST 318 #define SSL_R_ECH_REJECTED 319 #define SSL_R_INVALID_OUTER_EXTENSION 320 #define SSL_R_INCONSISTENT_ECH_NEGOTIATION 321 #define SSL_R_INVALID_ALPS_CODEPOINT 322 #define SSL_R_NO_MATCHING_ISSUER 323 #define SSL_R_SSLV3_ALERT_CLOSE_NOTIFY 1000 #define SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE 1010 #define SSL_R_SSLV3_ALERT_BAD_RECORD_MAC 1020 #define SSL_R_TLSV1_ALERT_DECRYPTION_FAILED 1021 #define SSL_R_TLSV1_ALERT_RECORD_OVERFLOW 1022 #define SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE 1030 #define SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE 1040 #define SSL_R_SSLV3_ALERT_NO_CERTIFICATE 1041 #define SSL_R_SSLV3_ALERT_BAD_CERTIFICATE 1042 #define SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE 1043 #define SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED 1044 #define SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED 1045 #define SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN 1046 #define SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER 1047 #define SSL_R_TLSV1_ALERT_UNKNOWN_CA 1048 #define SSL_R_TLSV1_ALERT_ACCESS_DENIED 1049 #define SSL_R_TLSV1_ALERT_DECODE_ERROR 1050 #define SSL_R_TLSV1_ALERT_DECRYPT_ERROR 1051 #define SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION 1060 #define SSL_R_TLSV1_ALERT_PROTOCOL_VERSION 1070 #define SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY 1071 #define SSL_R_TLSV1_ALERT_INTERNAL_ERROR 1080 #define SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK 1086 #define SSL_R_TLSV1_ALERT_USER_CANCELLED 1090 #define SSL_R_TLSV1_ALERT_NO_RENEGOTIATION 1100 #define SSL_R_TLSV1_ALERT_UNSUPPORTED_EXTENSION 1110 #define SSL_R_TLSV1_ALERT_CERTIFICATE_UNOBTAINABLE 1111 #define SSL_R_TLSV1_ALERT_UNRECOGNIZED_NAME 1112 #define SSL_R_TLSV1_ALERT_BAD_CERTIFICATE_STATUS_RESPONSE 1113 #define SSL_R_TLSV1_ALERT_BAD_CERTIFICATE_HASH_VALUE 1114 #define SSL_R_TLSV1_ALERT_UNKNOWN_PSK_IDENTITY 1115 #define SSL_R_TLSV1_ALERT_CERTIFICATE_REQUIRED 1116 #define SSL_R_TLSV1_ALERT_NO_APPLICATION_PROTOCOL 1120 #define SSL_R_TLSV1_ALERT_ECH_REQUIRED 1121 #endif // OPENSSL_HEADER_SSL_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_ssl3.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_SSL3_H #define OPENSSL_HEADER_SSL3_H #include "CNIOBoringSSL_aead.h" #ifdef __cplusplus extern "C" { #endif // These are kept to support clients that negotiates higher protocol versions // using SSLv2 client hello records. #define SSL2_MT_CLIENT_HELLO 1 #define SSL2_VERSION 0x0002 // Signalling cipher suite value from RFC 5746. #define SSL3_CK_SCSV 0x030000FF // Fallback signalling cipher suite value from RFC 7507. #define SSL3_CK_FALLBACK_SCSV 0x03005600 #define SSL3_CK_RSA_NULL_MD5 0x03000001 #define SSL3_CK_RSA_NULL_SHA 0x03000002 #define SSL3_CK_RSA_RC4_40_MD5 0x03000003 #define SSL3_CK_RSA_RC4_128_MD5 0x03000004 #define SSL3_CK_RSA_RC4_128_SHA 0x03000005 #define SSL3_CK_RSA_RC2_40_MD5 0x03000006 #define SSL3_CK_RSA_IDEA_128_SHA 0x03000007 #define SSL3_CK_RSA_DES_40_CBC_SHA 0x03000008 #define SSL3_CK_RSA_DES_64_CBC_SHA 0x03000009 #define SSL3_CK_RSA_DES_192_CBC3_SHA 0x0300000A #define SSL3_CK_DH_DSS_DES_40_CBC_SHA 0x0300000B #define SSL3_CK_DH_DSS_DES_64_CBC_SHA 0x0300000C #define SSL3_CK_DH_DSS_DES_192_CBC3_SHA 0x0300000D #define SSL3_CK_DH_RSA_DES_40_CBC_SHA 0x0300000E #define SSL3_CK_DH_RSA_DES_64_CBC_SHA 0x0300000F #define SSL3_CK_DH_RSA_DES_192_CBC3_SHA 0x03000010 #define SSL3_CK_EDH_DSS_DES_40_CBC_SHA 0x03000011 #define SSL3_CK_EDH_DSS_DES_64_CBC_SHA 0x03000012 #define SSL3_CK_EDH_DSS_DES_192_CBC3_SHA 0x03000013 #define SSL3_CK_EDH_RSA_DES_40_CBC_SHA 0x03000014 #define SSL3_CK_EDH_RSA_DES_64_CBC_SHA 0x03000015 #define SSL3_CK_EDH_RSA_DES_192_CBC3_SHA 0x03000016 #define SSL3_CK_ADH_RC4_40_MD5 0x03000017 #define SSL3_CK_ADH_RC4_128_MD5 0x03000018 #define SSL3_CK_ADH_DES_40_CBC_SHA 0x03000019 #define SSL3_CK_ADH_DES_64_CBC_SHA 0x0300001A #define SSL3_CK_ADH_DES_192_CBC_SHA 0x0300001B #define SSL3_TXT_RSA_NULL_MD5 "NULL-MD5" #define SSL3_TXT_RSA_NULL_SHA "NULL-SHA" #define SSL3_TXT_RSA_RC4_40_MD5 "EXP-RC4-MD5" #define SSL3_TXT_RSA_RC4_128_MD5 "RC4-MD5" #define SSL3_TXT_RSA_RC4_128_SHA "RC4-SHA" #define SSL3_TXT_RSA_RC2_40_MD5 "EXP-RC2-CBC-MD5" #define SSL3_TXT_RSA_IDEA_128_SHA "IDEA-CBC-SHA" #define SSL3_TXT_RSA_DES_40_CBC_SHA "EXP-DES-CBC-SHA" #define SSL3_TXT_RSA_DES_64_CBC_SHA "DES-CBC-SHA" #define SSL3_TXT_RSA_DES_192_CBC3_SHA "DES-CBC3-SHA" #define SSL3_TXT_DH_DSS_DES_40_CBC_SHA "EXP-DH-DSS-DES-CBC-SHA" #define SSL3_TXT_DH_DSS_DES_64_CBC_SHA "DH-DSS-DES-CBC-SHA" #define SSL3_TXT_DH_DSS_DES_192_CBC3_SHA "DH-DSS-DES-CBC3-SHA" #define SSL3_TXT_DH_RSA_DES_40_CBC_SHA "EXP-DH-RSA-DES-CBC-SHA" #define SSL3_TXT_DH_RSA_DES_64_CBC_SHA "DH-RSA-DES-CBC-SHA" #define SSL3_TXT_DH_RSA_DES_192_CBC3_SHA "DH-RSA-DES-CBC3-SHA" #define SSL3_TXT_EDH_DSS_DES_40_CBC_SHA "EXP-EDH-DSS-DES-CBC-SHA" #define SSL3_TXT_EDH_DSS_DES_64_CBC_SHA "EDH-DSS-DES-CBC-SHA" #define SSL3_TXT_EDH_DSS_DES_192_CBC3_SHA "EDH-DSS-DES-CBC3-SHA" #define SSL3_TXT_EDH_RSA_DES_40_CBC_SHA "EXP-EDH-RSA-DES-CBC-SHA" #define SSL3_TXT_EDH_RSA_DES_64_CBC_SHA "EDH-RSA-DES-CBC-SHA" #define SSL3_TXT_EDH_RSA_DES_192_CBC3_SHA "EDH-RSA-DES-CBC3-SHA" #define SSL3_TXT_ADH_RC4_40_MD5 "EXP-ADH-RC4-MD5" #define SSL3_TXT_ADH_RC4_128_MD5 "ADH-RC4-MD5" #define SSL3_TXT_ADH_DES_40_CBC_SHA "EXP-ADH-DES-CBC-SHA" #define SSL3_TXT_ADH_DES_64_CBC_SHA "ADH-DES-CBC-SHA" #define SSL3_TXT_ADH_DES_192_CBC_SHA "ADH-DES-CBC3-SHA" #define SSL3_SSL_SESSION_ID_LENGTH 32 #define SSL3_MAX_SSL_SESSION_ID_LENGTH 32 #define SSL3_MASTER_SECRET_SIZE 48 #define SSL3_RANDOM_SIZE 32 #define SSL3_SESSION_ID_SIZE 32 #define SSL3_RT_HEADER_LENGTH 5 #define SSL3_HM_HEADER_LENGTH 4 #ifndef SSL3_ALIGN_PAYLOAD // Some will argue that this increases memory footprint, but it's not actually // true. Point is that malloc has to return at least 64-bit aligned pointers, // meaning that allocating 5 bytes wastes 3 bytes in either case. Suggested // pre-gaping simply moves these wasted bytes from the end of allocated region // to its front, but makes data payload aligned, which improves performance. #define SSL3_ALIGN_PAYLOAD 8 #else #if (SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) != 0 #error "insane SSL3_ALIGN_PAYLOAD" #undef SSL3_ALIGN_PAYLOAD #endif #endif // This is the maximum MAC (digest) size used by the SSL library. Currently // maximum of 20 is used by SHA1, but we reserve for future extension for // 512-bit hashes. #define SSL3_RT_MAX_MD_SIZE 64 // Maximum block size used in all ciphersuites. Currently 16 for AES. #define SSL_RT_MAX_CIPHER_BLOCK_SIZE 16 // Maximum plaintext length: defined by SSL/TLS standards #define SSL3_RT_MAX_PLAIN_LENGTH 16384 // Maximum compression overhead: defined by SSL/TLS standards #define SSL3_RT_MAX_COMPRESSED_OVERHEAD 1024 // The standards give a maximum encryption overhead of 1024 bytes. In practice // the value is lower than this. The overhead is the maximum number of padding // bytes (256) plus the mac size. // // TODO(davidben): This derivation doesn't take AEADs into account, or TLS 1.1 // explicit nonces. It happens to work because |SSL3_RT_MAX_MD_SIZE| is larger // than necessary and no true AEAD has variable overhead in TLS 1.2. #define SSL3_RT_MAX_ENCRYPTED_OVERHEAD (256 + SSL3_RT_MAX_MD_SIZE) // SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD is the maximum overhead in encrypting a // record. This does not include the record header. Some ciphers use explicit // nonces, so it includes both the AEAD overhead as well as the nonce. #define SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \ (EVP_AEAD_MAX_OVERHEAD + EVP_AEAD_MAX_NONCE_LENGTH) // SSL3_RT_MAX_COMPRESSED_LENGTH is an alias for // |SSL3_RT_MAX_PLAIN_LENGTH|. Compression is gone, so don't include the // compression overhead. #define SSL3_RT_MAX_COMPRESSED_LENGTH SSL3_RT_MAX_PLAIN_LENGTH #define SSL3_RT_MAX_ENCRYPTED_LENGTH \ (SSL3_RT_MAX_ENCRYPTED_OVERHEAD + SSL3_RT_MAX_COMPRESSED_LENGTH) #define SSL3_RT_MAX_PACKET_SIZE \ (SSL3_RT_MAX_ENCRYPTED_LENGTH + SSL3_RT_HEADER_LENGTH) #define SSL3_MD_CLIENT_FINISHED_CONST "\x43\x4C\x4E\x54" #define SSL3_MD_SERVER_FINISHED_CONST "\x53\x52\x56\x52" #define SSL3_RT_CHANGE_CIPHER_SPEC 20 #define SSL3_RT_ALERT 21 #define SSL3_RT_HANDSHAKE 22 #define SSL3_RT_APPLICATION_DATA 23 #define SSL3_RT_ACK 26 // Pseudo content type for SSL/TLS header info #define SSL3_RT_HEADER 0x100 #define SSL3_RT_CLIENT_HELLO_INNER 0x101 #define SSL3_AL_WARNING 1 #define SSL3_AL_FATAL 2 #define SSL3_AD_CLOSE_NOTIFY 0 #define SSL3_AD_UNEXPECTED_MESSAGE 10 // fatal #define SSL3_AD_BAD_RECORD_MAC 20 // fatal #define SSL3_AD_DECOMPRESSION_FAILURE 30 // fatal #define SSL3_AD_HANDSHAKE_FAILURE 40 // fatal #define SSL3_AD_NO_CERTIFICATE 41 #define SSL3_AD_BAD_CERTIFICATE 42 #define SSL3_AD_UNSUPPORTED_CERTIFICATE 43 #define SSL3_AD_CERTIFICATE_REVOKED 44 #define SSL3_AD_CERTIFICATE_EXPIRED 45 #define SSL3_AD_CERTIFICATE_UNKNOWN 46 #define SSL3_AD_ILLEGAL_PARAMETER 47 // fatal #define SSL3_AD_INAPPROPRIATE_FALLBACK 86 // fatal #define SSL3_CT_RSA_SIGN 1 #define SSL3_MT_HELLO_REQUEST 0 #define SSL3_MT_CLIENT_HELLO 1 #define SSL3_MT_SERVER_HELLO 2 #define SSL3_MT_NEW_SESSION_TICKET 4 #define SSL3_MT_END_OF_EARLY_DATA 5 #define SSL3_MT_ENCRYPTED_EXTENSIONS 8 #define SSL3_MT_CERTIFICATE 11 #define SSL3_MT_SERVER_KEY_EXCHANGE 12 #define SSL3_MT_CERTIFICATE_REQUEST 13 #define SSL3_MT_SERVER_HELLO_DONE 14 #define SSL3_MT_CERTIFICATE_VERIFY 15 #define SSL3_MT_CLIENT_KEY_EXCHANGE 16 #define SSL3_MT_FINISHED 20 #define SSL3_MT_CERTIFICATE_STATUS 22 #define SSL3_MT_SUPPLEMENTAL_DATA 23 #define SSL3_MT_KEY_UPDATE 24 #define SSL3_MT_COMPRESSED_CERTIFICATE 25 #define SSL3_MT_NEXT_PROTO 67 #define SSL3_MT_CHANNEL_ID 203 #define SSL3_MT_MESSAGE_HASH 254 #define DTLS1_MT_HELLO_VERIFY_REQUEST 3 // The following are legacy aliases for consumers which use // |SSL_CTX_set_msg_callback|. #define SSL3_MT_SERVER_DONE SSL3_MT_SERVER_HELLO_DONE #define SSL3_MT_NEWSESSION_TICKET SSL3_MT_NEW_SESSION_TICKET #define SSL3_MT_CCS 1 #ifdef __cplusplus } // extern C #endif #endif // OPENSSL_HEADER_SSL3_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_stack.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_STACK_H #define OPENSSL_HEADER_STACK_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // A stack, in OpenSSL, is an array of pointers. They are the most commonly // used collection object. // // This file defines macros for type-safe use of the stack functions. A stack // type is named like |STACK_OF(FOO)| and is accessed with functions named // like |sk_FOO_*|. Note the stack will typically contain /pointers/ to |FOO|. // // The |DECLARE_STACK_OF| macro makes |STACK_OF(FOO)| available, and // |DEFINE_STACK_OF| makes the corresponding functions available. // Defining stacks. // STACK_OF expands to the stack type for |type|. #define STACK_OF(type) struct stack_st_##type // DECLARE_STACK_OF declares the |STACK_OF(type)| type. It does not make the // corresponding |sk_type_*| functions available. This macro should be used in // files which only need the type. #define DECLARE_STACK_OF(type) STACK_OF(type); // DEFINE_NAMED_STACK_OF defines |STACK_OF(name)| to be a stack whose elements // are |type| *. This macro makes the |sk_name_*| functions available. // // It is not necessary to use |DECLARE_STACK_OF| in files which use this macro. #define DEFINE_NAMED_STACK_OF(name, type) \ BORINGSSL_DEFINE_STACK_OF_IMPL(name, type *, const type *) \ BORINGSSL_DEFINE_STACK_TRAITS(name, type, false) // DEFINE_STACK_OF defines |STACK_OF(type)| to be a stack whose elements are // |type| *. This macro makes the |sk_type_*| functions available. // // It is not necessary to use |DECLARE_STACK_OF| in files which use this macro. #define DEFINE_STACK_OF(type) DEFINE_NAMED_STACK_OF(type, type) // DEFINE_CONST_STACK_OF defines |STACK_OF(type)| to be a stack whose elements // are const |type| *. This macro makes the |sk_type_*| functions available. // // It is not necessary to use |DECLARE_STACK_OF| in files which use this macro. #define DEFINE_CONST_STACK_OF(type) \ BORINGSSL_DEFINE_STACK_OF_IMPL(type, const type *, const type *) \ BORINGSSL_DEFINE_STACK_TRAITS(type, const type, true) // Using stacks. // // After the |DEFINE_STACK_OF| macro is used, the following functions are // available. #if 0 // Sample // sk_SAMPLE_free_func is a callback to free an element in a stack. typedef void (*sk_SAMPLE_free_func)(SAMPLE *); // sk_SAMPLE_copy_func is a callback to copy an element in a stack. It should // return the copy or NULL on error. typedef SAMPLE *(*sk_SAMPLE_copy_func)(const SAMPLE *); // sk_SAMPLE_cmp_func is a callback to compare |*a| to |*b|. It should return a // value < 0, 0, or > 0 if |*a| is less than, equal to, or greater than |*b|, // respectively. Note the extra indirection - the function is given a pointer // to a pointer to the element. This is the |qsort|/|bsearch| comparison // function applied to an array of |SAMPLE*|. typedef int (*sk_SAMPLE_cmp_func)(const SAMPLE *const *a, const SAMPLE *const *b); // sk_SAMPLE_new creates a new, empty stack with the given comparison function, // which may be NULL. It returns the new stack or NULL on allocation failure. STACK_OF(SAMPLE) *sk_SAMPLE_new(sk_SAMPLE_cmp_func comp); // sk_SAMPLE_new_null creates a new, empty stack. It returns the new stack or // NULL on allocation failure. STACK_OF(SAMPLE) *sk_SAMPLE_new_null(void); // sk_SAMPLE_num returns the number of elements in |sk|. It is safe to cast this // value to |int|. |sk| is guaranteed to have at most |INT_MAX| elements. If // |sk| is NULL, it is treated as the empty list and this function returns zero. size_t sk_SAMPLE_num(const STACK_OF(SAMPLE) *sk); // sk_SAMPLE_zero resets |sk| to the empty state but does nothing to free the // individual elements themselves. void sk_SAMPLE_zero(STACK_OF(SAMPLE) *sk); // sk_SAMPLE_value returns the |i|th pointer in |sk|, or NULL if |i| is out of // range. If |sk| is NULL, it is treated as an empty list and the function // returns NULL. SAMPLE *sk_SAMPLE_value(const STACK_OF(SAMPLE) *sk, size_t i); // sk_SAMPLE_set sets the |i|th pointer in |sk| to |p| and returns |p|. If |i| // is out of range, it returns NULL. SAMPLE *sk_SAMPLE_set(STACK_OF(SAMPLE) *sk, size_t i, SAMPLE *p); // sk_SAMPLE_free frees |sk|, but does nothing to free the individual elements. // Use |sk_SAMPLE_pop_free| to also free the elements. void sk_SAMPLE_free(STACK_OF(SAMPLE) *sk); // sk_SAMPLE_pop_free calls |free_func| on each element in |sk| and then // frees the stack itself. void sk_SAMPLE_pop_free(STACK_OF(SAMPLE) *sk, sk_SAMPLE_free_func free_func); // sk_SAMPLE_insert inserts |p| into the stack at index |where|, moving existing // elements if needed. It returns the length of the new stack, or zero on // error. size_t sk_SAMPLE_insert(STACK_OF(SAMPLE) *sk, SAMPLE *p, size_t where); // sk_SAMPLE_delete removes the pointer at index |where|, moving other elements // down if needed. It returns the removed pointer, or NULL if |where| is out of // range. SAMPLE *sk_SAMPLE_delete(STACK_OF(SAMPLE) *sk, size_t where); // sk_SAMPLE_delete_ptr removes, at most, one instance of |p| from |sk| based on // pointer equality. If an instance of |p| is found then |p| is returned, // otherwise it returns NULL. SAMPLE *sk_SAMPLE_delete_ptr(STACK_OF(SAMPLE) *sk, const SAMPLE *p); // sk_SAMPLE_delete_if_func is the callback function for |sk_SAMPLE_delete_if|. // It should return one to remove |p| and zero to keep it. typedef int (*sk_SAMPLE_delete_if_func)(SAMPLE *p, void *data); // sk_SAMPLE_delete_if calls |func| with each element of |sk| and removes the // entries where |func| returned one. This function does not free or return // removed pointers so, if |sk| owns its contents, |func| should release the // pointers prior to returning one. void sk_SAMPLE_delete_if(STACK_OF(SAMPLE) *sk, sk_SAMPLE_delete_if_func func, void *data); // sk_SAMPLE_find find the first value in |sk| equal to |p|. |sk|'s comparison // function determines equality, or pointer equality if |sk| has no comparison // function. // // If the stack is sorted (see |sk_SAMPLE_sort|), this function uses a binary // search. Otherwise it performs a linear search. If it finds a matching // element, it writes the index to |*out_index| (if |out_index| is not NULL) and // returns one. Otherwise, it returns zero. If |sk| is NULL, it is treated as // the empty list and the function returns zero. // // Note this differs from OpenSSL. The type signature is slightly different, and // OpenSSL's version will implicitly sort |sk| if it has a comparison function // defined. int sk_SAMPLE_find(const STACK_OF(SAMPLE) *sk, size_t *out_index, const SAMPLE *p); // sk_SAMPLE_shift removes and returns the first element in |sk|, or NULL if // |sk| is empty. SAMPLE *sk_SAMPLE_shift(STACK_OF(SAMPLE) *sk); // sk_SAMPLE_push appends |p| to |sk| and returns the length of the new stack, // or 0 on allocation failure. size_t sk_SAMPLE_push(STACK_OF(SAMPLE) *sk, SAMPLE *p); // sk_SAMPLE_pop removes and returns the last element of |sk|, or NULL if |sk| // is empty. SAMPLE *sk_SAMPLE_pop(STACK_OF(SAMPLE) *sk); // sk_SAMPLE_dup performs a shallow copy of a stack and returns the new stack, // or NULL on error. Use |sk_SAMPLE_deep_copy| to also copy the elements. STACK_OF(SAMPLE) *sk_SAMPLE_dup(const STACK_OF(SAMPLE) *sk); // sk_SAMPLE_sort sorts the elements of |sk| into ascending order based on the // comparison function. The stack maintains a "sorted" flag and sorting an // already sorted stack is a no-op. void sk_SAMPLE_sort(STACK_OF(SAMPLE) *sk); // sk_SAMPLE_is_sorted returns one if |sk| is known to be sorted and zero // otherwise. int sk_SAMPLE_is_sorted(const STACK_OF(SAMPLE) *sk); // sk_SAMPLE_set_cmp_func sets the comparison function to be used by |sk| and // returns the previous one. sk_SAMPLE_cmp_func sk_SAMPLE_set_cmp_func(STACK_OF(SAMPLE) *sk, sk_SAMPLE_cmp_func comp); // sk_SAMPLE_deep_copy performs a copy of |sk| and of each of the non-NULL // elements in |sk| by using |copy_func|. If an error occurs, it calls // |free_func| to free any copies already made and returns NULL. STACK_OF(SAMPLE) *sk_SAMPLE_deep_copy(const STACK_OF(SAMPLE) *sk, sk_SAMPLE_copy_func copy_func, sk_SAMPLE_free_func free_func); #endif // Sample // Private functions. // // The |sk_*| functions generated above are implemented internally using the // type-erased functions below. Callers should use the typed wrappers instead. // When using the type-erased functions, callers are responsible for ensuring // the underlying types are correct. Casting pointers to the wrong types will // result in memory errors. // OPENSSL_sk_free_func is a function that frees an element in a stack. Note its // actual type is void (*)(T *) for some T. Low-level |sk_*| functions will be // passed a type-specific wrapper to call it correctly. typedef void (*OPENSSL_sk_free_func)(void *ptr); // OPENSSL_sk_copy_func is a function that copies an element in a stack. Note // its actual type is T *(*)(const T *) for some T. Low-level |sk_*| functions // will be passed a type-specific wrapper to call it correctly. typedef void *(*OPENSSL_sk_copy_func)(const void *ptr); // OPENSSL_sk_cmp_func is a comparison function that returns a value < 0, 0 or > // 0 if |*a| is less than, equal to or greater than |*b|, respectively. Note // the extra indirection - the function is given a pointer to a pointer to the // element. This differs from the usual qsort/bsearch comparison function. // // Note its actual type is |int (*)(const T *const *a, const T *const *b)|. // Low-level |sk_*| functions will be passed a type-specific wrapper to call it // correctly. typedef int (*OPENSSL_sk_cmp_func)(const void *const *a, const void *const *b); // OPENSSL_sk_delete_if_func is the generic version of // |sk_SAMPLE_delete_if_func|. typedef int (*OPENSSL_sk_delete_if_func)(void *obj, void *data); // The following function types call the above type-erased signatures with the // true types. typedef void (*OPENSSL_sk_call_free_func)(OPENSSL_sk_free_func, void *); typedef void *(*OPENSSL_sk_call_copy_func)(OPENSSL_sk_copy_func, const void *); typedef int (*OPENSSL_sk_call_cmp_func)(OPENSSL_sk_cmp_func, const void *, const void *); typedef int (*OPENSSL_sk_call_delete_if_func)(OPENSSL_sk_delete_if_func, void *, void *); // An OPENSSL_STACK contains an array of pointers. It is not designed to be used // directly, rather the wrapper macros should be used. typedef struct stack_st OPENSSL_STACK; // The following are raw stack functions. They implement the corresponding typed // |sk_SAMPLE_*| functions generated by |DEFINE_STACK_OF|. Callers shouldn't be // using them. Rather, callers should use the typed functions. OPENSSL_EXPORT OPENSSL_STACK *OPENSSL_sk_new(OPENSSL_sk_cmp_func comp); OPENSSL_EXPORT OPENSSL_STACK *OPENSSL_sk_new_null(void); OPENSSL_EXPORT size_t OPENSSL_sk_num(const OPENSSL_STACK *sk); OPENSSL_EXPORT void OPENSSL_sk_zero(OPENSSL_STACK *sk); OPENSSL_EXPORT void *OPENSSL_sk_value(const OPENSSL_STACK *sk, size_t i); OPENSSL_EXPORT void *OPENSSL_sk_set(OPENSSL_STACK *sk, size_t i, void *p); OPENSSL_EXPORT void OPENSSL_sk_free(OPENSSL_STACK *sk); OPENSSL_EXPORT void OPENSSL_sk_pop_free_ex( OPENSSL_STACK *sk, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func); OPENSSL_EXPORT size_t OPENSSL_sk_insert(OPENSSL_STACK *sk, void *p, size_t where); OPENSSL_EXPORT void *OPENSSL_sk_delete(OPENSSL_STACK *sk, size_t where); OPENSSL_EXPORT void *OPENSSL_sk_delete_ptr(OPENSSL_STACK *sk, const void *p); OPENSSL_EXPORT void OPENSSL_sk_delete_if( OPENSSL_STACK *sk, OPENSSL_sk_call_delete_if_func call_func, OPENSSL_sk_delete_if_func func, void *data); OPENSSL_EXPORT int OPENSSL_sk_find(const OPENSSL_STACK *sk, size_t *out_index, const void *p, OPENSSL_sk_call_cmp_func call_cmp_func); OPENSSL_EXPORT void *OPENSSL_sk_shift(OPENSSL_STACK *sk); OPENSSL_EXPORT size_t OPENSSL_sk_push(OPENSSL_STACK *sk, void *p); OPENSSL_EXPORT void *OPENSSL_sk_pop(OPENSSL_STACK *sk); OPENSSL_EXPORT OPENSSL_STACK *OPENSSL_sk_dup(const OPENSSL_STACK *sk); OPENSSL_EXPORT void OPENSSL_sk_sort(OPENSSL_STACK *sk, OPENSSL_sk_call_cmp_func call_cmp_func); OPENSSL_EXPORT int OPENSSL_sk_is_sorted(const OPENSSL_STACK *sk); OPENSSL_EXPORT OPENSSL_sk_cmp_func OPENSSL_sk_set_cmp_func(OPENSSL_STACK *sk, OPENSSL_sk_cmp_func comp); OPENSSL_EXPORT OPENSSL_STACK *OPENSSL_sk_deep_copy( const OPENSSL_STACK *sk, OPENSSL_sk_call_copy_func call_copy_func, OPENSSL_sk_copy_func copy_func, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func); // Deprecated private functions (hidden). // // TODO(crbug.com/boringssl/499): Migrate callers to the typed wrappers, or at // least the new names and remove the old ones. // // TODO(b/290792019, b/290785937): Ideally these would at least be inline // functions, so we do not squat the symbols. typedef OPENSSL_STACK _STACK; // The following functions call the corresponding |OPENSSL_sk_*| function. OPENSSL_EXPORT OPENSSL_DEPRECATED OPENSSL_STACK *sk_new_null(void); OPENSSL_EXPORT OPENSSL_DEPRECATED size_t sk_num(const OPENSSL_STACK *sk); OPENSSL_EXPORT OPENSSL_DEPRECATED void *sk_value(const OPENSSL_STACK *sk, size_t i); OPENSSL_EXPORT OPENSSL_DEPRECATED void sk_free(OPENSSL_STACK *sk); OPENSSL_EXPORT OPENSSL_DEPRECATED size_t sk_push(OPENSSL_STACK *sk, void *p); OPENSSL_EXPORT OPENSSL_DEPRECATED void *sk_pop(OPENSSL_STACK *sk); // sk_pop_free_ex calls |OPENSSL_sk_pop_free_ex|. // // TODO(b/291994116): Remove this. OPENSSL_EXPORT OPENSSL_DEPRECATED void sk_pop_free_ex( OPENSSL_STACK *sk, OPENSSL_sk_call_free_func call_free_func, OPENSSL_sk_free_func free_func); // sk_pop_free behaves like |OPENSSL_sk_pop_free_ex| but performs an invalid // function pointer cast. It exists because some existing callers called // |sk_pop_free| directly. // // TODO(davidben): Migrate callers to bssl::UniquePtr and remove this. OPENSSL_EXPORT OPENSSL_DEPRECATED void sk_pop_free( OPENSSL_STACK *sk, OPENSSL_sk_free_func free_func); #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN namespace internal { template struct StackTraits {}; } BSSL_NAMESPACE_END } #define BORINGSSL_DEFINE_STACK_TRAITS(name, type, is_const) \ extern "C++" { \ BSSL_NAMESPACE_BEGIN \ namespace internal { \ template <> \ struct StackTraits { \ static constexpr bool kIsStack = true; \ using Type = type; \ static constexpr bool kIsConst = is_const; \ }; \ } \ BSSL_NAMESPACE_END \ } #else #define BORINGSSL_DEFINE_STACK_TRAITS(name, type, is_const) #endif #define BORINGSSL_DEFINE_STACK_OF_IMPL(name, ptrtype, constptrtype) \ /* We disable MSVC C4191 in this macro, which warns when pointers are cast \ * to the wrong type. While the cast itself is valid, it is often a bug \ * because calling it through the cast is UB. However, we never actually \ * call functions as |OPENSSL_sk_cmp_func|. The type is just a type-erased \ * function pointer. (C does not guarantee function pointers fit in \ * |void*|, and GCC will warn on this.) Thus we just disable the false \ * positive warning. */ \ OPENSSL_MSVC_PRAGMA(warning(push)) \ OPENSSL_MSVC_PRAGMA(warning(disable : 4191)) \ OPENSSL_CLANG_PRAGMA("clang diagnostic push") \ OPENSSL_CLANG_PRAGMA("clang diagnostic ignored \"-Wunknown-warning-option\"") \ OPENSSL_CLANG_PRAGMA("clang diagnostic ignored \"-Wcast-function-type-strict\"") \ \ DECLARE_STACK_OF(name) \ \ typedef void (*sk_##name##_free_func)(ptrtype); \ typedef ptrtype (*sk_##name##_copy_func)(constptrtype); \ typedef int (*sk_##name##_cmp_func)(constptrtype const *, \ constptrtype const *); \ typedef int (*sk_##name##_delete_if_func)(ptrtype, void *); \ \ OPENSSL_INLINE void sk_##name##_call_free_func( \ OPENSSL_sk_free_func free_func, void *ptr) { \ ((sk_##name##_free_func)free_func)((ptrtype)ptr); \ } \ \ OPENSSL_INLINE void *sk_##name##_call_copy_func( \ OPENSSL_sk_copy_func copy_func, const void *ptr) { \ return (void *)((sk_##name##_copy_func)copy_func)((constptrtype)ptr); \ } \ \ OPENSSL_INLINE int sk_##name##_call_cmp_func(OPENSSL_sk_cmp_func cmp_func, \ const void *a, const void *b) { \ constptrtype a_ptr = (constptrtype)a; \ constptrtype b_ptr = (constptrtype)b; \ /* |cmp_func| expects an extra layer of pointers to match qsort. */ \ return ((sk_##name##_cmp_func)cmp_func)(&a_ptr, &b_ptr); \ } \ \ OPENSSL_INLINE int sk_##name##_call_delete_if_func( \ OPENSSL_sk_delete_if_func func, void *obj, void *data) { \ return ((sk_##name##_delete_if_func)func)((ptrtype)obj, data); \ } \ \ OPENSSL_INLINE STACK_OF(name) *sk_##name##_new(sk_##name##_cmp_func comp) { \ return (STACK_OF(name) *)OPENSSL_sk_new((OPENSSL_sk_cmp_func)comp); \ } \ \ OPENSSL_INLINE STACK_OF(name) *sk_##name##_new_null(void) { \ return (STACK_OF(name) *)OPENSSL_sk_new_null(); \ } \ \ OPENSSL_INLINE size_t sk_##name##_num(const STACK_OF(name) *sk) { \ return OPENSSL_sk_num((const OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE void sk_##name##_zero(STACK_OF(name) *sk) { \ OPENSSL_sk_zero((OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_value(const STACK_OF(name) *sk, \ size_t i) { \ return (ptrtype)OPENSSL_sk_value((const OPENSSL_STACK *)sk, i); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_set(STACK_OF(name) *sk, size_t i, \ ptrtype p) { \ return (ptrtype)OPENSSL_sk_set((OPENSSL_STACK *)sk, i, (void *)p); \ } \ \ OPENSSL_INLINE void sk_##name##_free(STACK_OF(name) *sk) { \ OPENSSL_sk_free((OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE void sk_##name##_pop_free(STACK_OF(name) *sk, \ sk_##name##_free_func free_func) { \ OPENSSL_sk_pop_free_ex((OPENSSL_STACK *)sk, sk_##name##_call_free_func, \ (OPENSSL_sk_free_func)free_func); \ } \ \ OPENSSL_INLINE size_t sk_##name##_insert(STACK_OF(name) *sk, ptrtype p, \ size_t where) { \ return OPENSSL_sk_insert((OPENSSL_STACK *)sk, (void *)p, where); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_delete(STACK_OF(name) *sk, \ size_t where) { \ return (ptrtype)OPENSSL_sk_delete((OPENSSL_STACK *)sk, where); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_delete_ptr(STACK_OF(name) *sk, \ constptrtype p) { \ return (ptrtype)OPENSSL_sk_delete_ptr((OPENSSL_STACK *)sk, \ (const void *)p); \ } \ \ OPENSSL_INLINE void sk_##name##_delete_if( \ STACK_OF(name) *sk, sk_##name##_delete_if_func func, void *data) { \ OPENSSL_sk_delete_if((OPENSSL_STACK *)sk, sk_##name##_call_delete_if_func, \ (OPENSSL_sk_delete_if_func)func, data); \ } \ \ OPENSSL_INLINE int sk_##name##_find(const STACK_OF(name) *sk, \ size_t *out_index, constptrtype p) { \ return OPENSSL_sk_find((const OPENSSL_STACK *)sk, out_index, \ (const void *)p, sk_##name##_call_cmp_func); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_shift(STACK_OF(name) *sk) { \ return (ptrtype)OPENSSL_sk_shift((OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE size_t sk_##name##_push(STACK_OF(name) *sk, ptrtype p) { \ return OPENSSL_sk_push((OPENSSL_STACK *)sk, (void *)p); \ } \ \ OPENSSL_INLINE ptrtype sk_##name##_pop(STACK_OF(name) *sk) { \ return (ptrtype)OPENSSL_sk_pop((OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE STACK_OF(name) *sk_##name##_dup(const STACK_OF(name) *sk) { \ return (STACK_OF(name) *)OPENSSL_sk_dup((const OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE void sk_##name##_sort(STACK_OF(name) *sk) { \ OPENSSL_sk_sort((OPENSSL_STACK *)sk, sk_##name##_call_cmp_func); \ } \ \ OPENSSL_INLINE int sk_##name##_is_sorted(const STACK_OF(name) *sk) { \ return OPENSSL_sk_is_sorted((const OPENSSL_STACK *)sk); \ } \ \ OPENSSL_INLINE sk_##name##_cmp_func sk_##name##_set_cmp_func( \ STACK_OF(name) *sk, sk_##name##_cmp_func comp) { \ return (sk_##name##_cmp_func)OPENSSL_sk_set_cmp_func( \ (OPENSSL_STACK *)sk, (OPENSSL_sk_cmp_func)comp); \ } \ \ OPENSSL_INLINE STACK_OF(name) *sk_##name##_deep_copy( \ const STACK_OF(name) *sk, sk_##name##_copy_func copy_func, \ sk_##name##_free_func free_func) { \ return (STACK_OF(name) *)OPENSSL_sk_deep_copy( \ (const OPENSSL_STACK *)sk, sk_##name##_call_copy_func, \ (OPENSSL_sk_copy_func)copy_func, sk_##name##_call_free_func, \ (OPENSSL_sk_free_func)free_func); \ } \ \ OPENSSL_CLANG_PRAGMA("clang diagnostic pop") \ OPENSSL_MSVC_PRAGMA(warning(pop)) // Built-in stacks. typedef char *OPENSSL_STRING; DEFINE_STACK_OF(void) DEFINE_NAMED_STACK_OF(OPENSSL_STRING, char) #if defined(__cplusplus) } // extern C #endif #if !defined(BORINGSSL_NO_CXX) extern "C++" { #include BSSL_NAMESPACE_BEGIN namespace internal { // Stacks defined with |DEFINE_CONST_STACK_OF| are freed with |sk_free|. template struct DeleterImpl::kIsConst>> { static void Free(Stack *sk) { OPENSSL_sk_free(reinterpret_cast(sk)); } }; // Stacks defined with |DEFINE_STACK_OF| are freed with |sk_pop_free| and the // corresponding type's deleter. template struct DeleterImpl::kIsConst>> { static void Free(Stack *sk) { // sk_FOO_pop_free is defined by macros and bound by name, so we cannot // access it from C++ here. using Type = typename StackTraits::Type; OPENSSL_sk_pop_free_ex( reinterpret_cast(sk), [](OPENSSL_sk_free_func /* unused */, void *ptr) { DeleterImpl::Free(reinterpret_cast(ptr)); }, nullptr); } }; template class StackIteratorImpl { public: using Type = typename StackTraits::Type; // Iterators must be default-constructable. StackIteratorImpl() : sk_(nullptr), idx_(0) {} StackIteratorImpl(const Stack *sk, size_t idx) : sk_(sk), idx_(idx) {} bool operator==(StackIteratorImpl other) const { return sk_ == other.sk_ && idx_ == other.idx_; } bool operator!=(StackIteratorImpl other) const { return !(*this == other); } Type *operator*() const { return reinterpret_cast( OPENSSL_sk_value(reinterpret_cast(sk_), idx_)); } StackIteratorImpl &operator++(/* prefix */) { idx_++; return *this; } StackIteratorImpl operator++(int /* postfix */) { StackIteratorImpl copy(*this); ++(*this); return copy; } private: const Stack *sk_; size_t idx_; }; template using StackIterator = std::enable_if_t::kIsStack, StackIteratorImpl>; } // namespace internal // PushToStack pushes |elem| to |sk|. It returns true on success and false on // allocation failure. template inline std::enable_if_t::kIsConst, bool> PushToStack(Stack *sk, UniquePtr::Type> elem) { if (!OPENSSL_sk_push(reinterpret_cast(sk), elem.get())) { return false; } // OPENSSL_sk_push takes ownership on success. elem.release(); return true; } BSSL_NAMESPACE_END // Define begin() and end() for stack types so C++ range for loops work. template inline bssl::internal::StackIterator begin(const Stack *sk) { return bssl::internal::StackIterator(sk, 0); } template inline bssl::internal::StackIterator end(const Stack *sk) { return bssl::internal::StackIterator( sk, OPENSSL_sk_num(reinterpret_cast(sk))); } } // extern C++ #endif #endif // OPENSSL_HEADER_STACK_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_target.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_TARGET_H #define OPENSSL_HEADER_TARGET_H // Preprocessor symbols that define the target platform. // // This file may be included in C, C++, and assembler and must be compatible // with each environment. It is separated out only to share code between // and . Prefer to include those headers // instead. #if defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) #define OPENSSL_64_BIT #define OPENSSL_X86_64 #elif defined(__x86) || defined(__i386) || defined(__i386__) || defined(_M_IX86) #define OPENSSL_32_BIT #define OPENSSL_X86 #elif defined(__AARCH64EL__) || defined(_M_ARM64) #define OPENSSL_64_BIT #define OPENSSL_AARCH64 #elif defined(__ARMEL__) || defined(_M_ARM) #define OPENSSL_32_BIT #define OPENSSL_ARM #elif defined(__MIPSEL__) && !defined(__LP64__) #define OPENSSL_32_BIT #define OPENSSL_MIPS #elif defined(__MIPSEL__) && defined(__LP64__) #define OPENSSL_64_BIT #define OPENSSL_MIPS64 #elif defined(__riscv) && __SIZEOF_POINTER__ == 8 #define OPENSSL_64_BIT #define OPENSSL_RISCV64 #elif defined(__riscv) && __SIZEOF_POINTER__ == 4 #define OPENSSL_32_BIT #elif defined(__pnacl__) #define OPENSSL_32_BIT #define OPENSSL_PNACL #elif defined(__wasm__) #define OPENSSL_32_BIT #elif defined(__asmjs__) #define OPENSSL_32_BIT #elif defined(__myriad2__) #define OPENSSL_32_BIT #else // The list above enumerates the platforms that BoringSSL supports. For these // platforms we keep a reasonable bar of not breaking them: automated test // coverage, for one, but also we need access to these types for machines for // fixing them. // // However, we know that anything that seems to work will soon be expected // to work and, quickly, the implicit expectation is that every machine will // always work. So this list serves to mark the boundary of what we guarantee. // Of course, you can run the code any many more machines, but then you're // taking on the burden of fixing it and, if you're doing that, then you must // be able to carry local patches. In which case patching this list is trivial. // // BoringSSL will only possibly work on standard 32-bit and 64-bit // two's-complement, little-endian architectures. Functions will not produce // the correct answer on other systems. Run the crypto_test binary, notably // crypto/compiler_test.cc, before trying a new architecture. #error "Unknown target CPU" #endif #if defined(__APPLE__) #define OPENSSL_APPLE #endif #if defined(_WIN32) #define OPENSSL_WINDOWS #endif // Trusty and Android baremetal aren't Linux but currently define __linux__. // As a workaround, we exclude them here. // We also exclude nanolibc/CrOS EC. nanolibc/CrOS EC sometimes build for a // non-Linux target (which should not define __linux__), but also sometimes // build for Linux. Although technically running in Linux userspace, this lacks // all the libc APIs we'd normally expect on Linux, so we treat it as a // non-Linux target. // // TODO(b/169780122): Remove this workaround once Trusty no longer defines it. // TODO(b/291101350): Remove this workaround once Android baremetal no longer // defines it. #if defined(__linux__) && !defined(__TRUSTY__) && \ !defined(ANDROID_BAREMETAL) && !defined(OPENSSL_NANOLIBC) && \ !defined(CROS_EC) #define OPENSSL_LINUX #endif #if defined(__Fuchsia__) #define OPENSSL_FUCHSIA #endif // Trusty is Android's TEE target. See // https://source.android.com/docs/security/features/trusty // // Defining this on any other platform is not supported. Other embedded // platforms must introduce their own defines. #if defined(__TRUSTY__) #define OPENSSL_TRUSTY #define OPENSSL_NO_FILESYSTEM #define OPENSSL_NO_POSIX_IO #define OPENSSL_NO_SOCK #define OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED #endif // nanolibc is a particular minimal libc implementation. Defining this on any // other platform is not supported. Other embedded platforms must introduce // their own defines. #if defined(OPENSSL_NANOLIBC) #define OPENSSL_NO_FILESYSTEM #define OPENSSL_NO_POSIX_IO #define OPENSSL_NO_SOCK #define OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED #endif // Android baremetal is an embedded target that uses a subset of bionic. // Defining this on any other platform is not supported. Other embedded // platforms must introduce their own defines. #if defined(ANDROID_BAREMETAL) #define OPENSSL_NO_FILESYSTEM #define OPENSSL_NO_POSIX_IO #define OPENSSL_NO_SOCK #define OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED #endif // CROS_EC is an embedded target for ChromeOS Embedded Controller. Defining // this on any other platform is not supported. Other embedded platforms must // introduce their own defines. // // https://chromium.googlesource.com/chromiumos/platform/ec/+/HEAD/README.md #if defined(CROS_EC) #define OPENSSL_NO_FILESYSTEM #define OPENSSL_NO_POSIX_IO #define OPENSSL_NO_SOCK #define OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED #endif // Zephyr is an open source RTOS, optimized for embedded devices. // Defining this on any other platform is not supported. Other embedded // platforms must introduce their own defines. // // Zephyr supports multithreading with cooperative and preemptive scheduling. // It also implements POSIX Threads (pthread) API, so it's not necessary to // implement BoringSSL internal threading API using some custom API. // // https://www.zephyrproject.org/ #if defined(__ZEPHYR__) #define OPENSSL_NO_FILESYSTEM #define OPENSSL_NO_POSIX_IO #define OPENSSL_NO_SOCK #endif #if defined(__ANDROID_API__) #define OPENSSL_ANDROID #endif #if defined(__FreeBSD__) #define OPENSSL_FREEBSD #endif #if defined(__OpenBSD__) #define OPENSSL_OPENBSD #endif // BoringSSL requires platform's locking APIs to make internal global state // thread-safe, including the PRNG. On some single-threaded embedded platforms, // locking APIs may not exist, so this dependency may be disabled with the // following build flag. // // IMPORTANT: Doing so means the consumer promises the library will never be // used in any multi-threaded context. It causes BoringSSL to be globally // thread-unsafe. Setting it inappropriately will subtly and unpredictably // corrupt memory and leak secret keys. // // Do not set this flag on any platform where threads are possible. BoringSSL // maintainers will not provide support for any consumers that do so. Changes // which break such unsupported configurations will not be reverted. #if !defined(OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED) #define OPENSSL_THREADS #endif #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) && \ !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) #define BORINGSSL_UNSAFE_DETERMINISTIC_MODE #endif #if defined(__has_feature) #if __has_feature(address_sanitizer) #define OPENSSL_ASAN #endif #if __has_feature(thread_sanitizer) #define OPENSSL_TSAN #endif #if __has_feature(memory_sanitizer) #define OPENSSL_MSAN #define OPENSSL_ASM_INCOMPATIBLE #endif #if __has_feature(hwaddress_sanitizer) #define OPENSSL_HWASAN #endif #endif // Disable 32-bit Arm assembly on Apple platforms. The last iOS version that // supported 32-bit Arm was iOS 10. #if defined(OPENSSL_APPLE) && defined(OPENSSL_ARM) #define OPENSSL_ASM_INCOMPATIBLE #endif #if defined(OPENSSL_ASM_INCOMPATIBLE) #undef OPENSSL_ASM_INCOMPATIBLE #if !defined(OPENSSL_NO_ASM) #define OPENSSL_NO_ASM #endif #endif // OPENSSL_ASM_INCOMPATIBLE // We do not detect any features at runtime on several 32-bit Arm platforms. // Apple platforms and OpenBSD require NEON and moved to 64-bit to pick up Armv8 // extensions. Android baremetal does not aim to support 32-bit Arm at all, but // it simplifies things to make it build. #if defined(OPENSSL_ARM) && !defined(OPENSSL_STATIC_ARMCAP) && \ (defined(OPENSSL_APPLE) || defined(OPENSSL_OPENBSD) || \ defined(ANDROID_BAREMETAL)) #define OPENSSL_STATIC_ARMCAP #endif #endif // OPENSSL_HEADER_TARGET_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_thread.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_THREAD_H #define OPENSSL_HEADER_THREAD_H #include #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // CRYPTO_refcount_t is the type of a reference count. // // Since some platforms use C11 atomics to access this, it should have the // _Atomic qualifier. However, this header is included by C++ programs as well // as C code that might not set -std=c11. So, in practice, it's not possible to // do that. Instead we statically assert that the size and native alignment of // a plain uint32_t and an _Atomic uint32_t are equal in refcount.c. typedef uint32_t CRYPTO_refcount_t; // Deprecated functions. // // Historically, OpenSSL required callers to provide locking callbacks. // BoringSSL does not use external callbacks for locking, but some old code // calls these functions and so no-op implementations are provided. // These defines do nothing but are provided to make old code easier to // compile. #define CRYPTO_LOCK 1 #define CRYPTO_UNLOCK 2 #define CRYPTO_READ 4 #define CRYPTO_WRITE 8 // CRYPTO_num_locks returns one. (This is non-zero that callers who allocate // sizeof(lock) times this value don't get zero and then fail because malloc(0) // returned NULL.) OPENSSL_EXPORT int CRYPTO_num_locks(void); // CRYPTO_set_locking_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_locking_callback( void (*func)(int mode, int lock_num, const char *file, int line)); // CRYPTO_set_add_lock_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_add_lock_callback(int (*func)( int *num, int amount, int lock_num, const char *file, int line)); // CRYPTO_get_locking_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_locking_callback(void))(int mode, int lock_num, const char *file, int line); // CRYPTO_get_lock_name returns a fixed, dummy string. OPENSSL_EXPORT const char *CRYPTO_get_lock_name(int lock_num); // CRYPTO_THREADID_set_callback returns one. OPENSSL_EXPORT int CRYPTO_THREADID_set_callback( void (*threadid_func)(CRYPTO_THREADID *threadid)); // CRYPTO_THREADID_set_numeric does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_set_numeric(CRYPTO_THREADID *id, unsigned long val); // CRYPTO_THREADID_set_pointer does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_set_pointer(CRYPTO_THREADID *id, void *ptr); // CRYPTO_THREADID_current does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_current(CRYPTO_THREADID *id); // CRYPTO_set_id_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_id_callback(unsigned long (*func)(void)); typedef struct { int references; struct CRYPTO_dynlock_value *data; } CRYPTO_dynlock; // CRYPTO_set_dynlock_create_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_create_callback( struct CRYPTO_dynlock_value *(*dyn_create_function)(const char *file, int line)); // CRYPTO_set_dynlock_lock_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_lock_callback(void (*dyn_lock_function)( int mode, struct CRYPTO_dynlock_value *l, const char *file, int line)); // CRYPTO_set_dynlock_destroy_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_destroy_callback( void (*dyn_destroy_function)(struct CRYPTO_dynlock_value *l, const char *file, int line)); // CRYPTO_get_dynlock_create_callback returns NULL. OPENSSL_EXPORT struct CRYPTO_dynlock_value *( *CRYPTO_get_dynlock_create_callback(void))(const char *file, int line); // CRYPTO_get_dynlock_lock_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_dynlock_lock_callback(void))( int mode, struct CRYPTO_dynlock_value *l, const char *file, int line); // CRYPTO_get_dynlock_destroy_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_dynlock_destroy_callback(void))( struct CRYPTO_dynlock_value *l, const char *file, int line); #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_THREAD_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_time.h ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_TIME_H #define OPENSSL_HEADER_TIME_H // Compatibility header, to be deprecated. use instead. #include "CNIOBoringSSL_posix_time.h" #endif // OPENSSL_HEADER_TIME_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_tls1.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_TLS1_H #define OPENSSL_HEADER_TLS1_H #include "CNIOBoringSSL_base.h" #ifdef __cplusplus extern "C" { #endif #define TLS1_AD_END_OF_EARLY_DATA 1 #define TLS1_AD_DECRYPTION_FAILED 21 #define TLS1_AD_RECORD_OVERFLOW 22 #define TLS1_AD_UNKNOWN_CA 48 #define TLS1_AD_ACCESS_DENIED 49 #define TLS1_AD_DECODE_ERROR 50 #define TLS1_AD_DECRYPT_ERROR 51 #define TLS1_AD_EXPORT_RESTRICTION 60 #define TLS1_AD_PROTOCOL_VERSION 70 #define TLS1_AD_INSUFFICIENT_SECURITY 71 #define TLS1_AD_INTERNAL_ERROR 80 #define TLS1_AD_USER_CANCELLED 90 #define TLS1_AD_NO_RENEGOTIATION 100 #define TLS1_AD_MISSING_EXTENSION 109 #define TLS1_AD_UNSUPPORTED_EXTENSION 110 #define TLS1_AD_CERTIFICATE_UNOBTAINABLE 111 #define TLS1_AD_UNRECOGNIZED_NAME 112 #define TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE 113 #define TLS1_AD_BAD_CERTIFICATE_HASH_VALUE 114 #define TLS1_AD_UNKNOWN_PSK_IDENTITY 115 #define TLS1_AD_CERTIFICATE_REQUIRED 116 #define TLS1_AD_NO_APPLICATION_PROTOCOL 120 #define TLS1_AD_ECH_REQUIRED 121 // draft-ietf-tls-esni-13 // ExtensionType values from RFC 6066 #define TLSEXT_TYPE_server_name 0 #define TLSEXT_TYPE_status_request 5 // ExtensionType values from RFC 4492 #define TLSEXT_TYPE_ec_point_formats 11 // ExtensionType values from RFC 5246 #define TLSEXT_TYPE_signature_algorithms 13 // ExtensionType value from RFC 5764 #define TLSEXT_TYPE_srtp 14 // ExtensionType value from RFC 7301 #define TLSEXT_TYPE_application_layer_protocol_negotiation 16 // ExtensionType value from RFC 7685 #define TLSEXT_TYPE_padding 21 // ExtensionType value from RFC 7627 #define TLSEXT_TYPE_extended_master_secret 23 // ExtensionType value from draft-ietf-quic-tls. Drafts 00 through 32 use // 0xffa5 which is part of the Private Use section of the registry, and it // collides with TLS-LTS and, based on scans, something else too (though this // hasn't been a problem in practice since it's QUIC-only). Drafts 33 onward // use the value 57 which was officially registered with IANA. #define TLSEXT_TYPE_quic_transport_parameters_legacy 0xffa5 // ExtensionType value from RFC 9000 #define TLSEXT_TYPE_quic_transport_parameters 57 // TLSEXT_TYPE_quic_transport_parameters_standard is an alias for // |TLSEXT_TYPE_quic_transport_parameters|. Use // |TLSEXT_TYPE_quic_transport_parameters| instead. #define TLSEXT_TYPE_quic_transport_parameters_standard \ TLSEXT_TYPE_quic_transport_parameters // ExtensionType value from RFC 8879 #define TLSEXT_TYPE_cert_compression 27 // ExtensionType value from RFC 4507 #define TLSEXT_TYPE_session_ticket 35 // ExtensionType values from RFC 8446 #define TLSEXT_TYPE_supported_groups 10 #define TLSEXT_TYPE_pre_shared_key 41 #define TLSEXT_TYPE_early_data 42 #define TLSEXT_TYPE_supported_versions 43 #define TLSEXT_TYPE_cookie 44 #define TLSEXT_TYPE_psk_key_exchange_modes 45 #define TLSEXT_TYPE_certificate_authorities 47 #define TLSEXT_TYPE_signature_algorithms_cert 50 #define TLSEXT_TYPE_key_share 51 // ExtensionType value from RFC 5746 #define TLSEXT_TYPE_renegotiate 0xff01 // ExtensionType value from RFC 9345 #define TLSEXT_TYPE_delegated_credential 34 // ExtensionType value from draft-vvv-tls-alps. This is not an IANA defined // extension number. #define TLSEXT_TYPE_application_settings_old 17513 #define TLSEXT_TYPE_application_settings 17613 // ExtensionType values from draft-ietf-tls-esni-13. This is not an IANA defined // extension number. #define TLSEXT_TYPE_encrypted_client_hello 0xfe0d #define TLSEXT_TYPE_ech_outer_extensions 0xfd00 // ExtensionType value from RFC 6962 #define TLSEXT_TYPE_certificate_timestamp 18 // This is not an IANA defined extension number #define TLSEXT_TYPE_next_proto_neg 13172 // This is not an IANA defined extension number #define TLSEXT_TYPE_channel_id 30032 // status request value from RFC 3546 #define TLSEXT_STATUSTYPE_nothing (-1) #define TLSEXT_STATUSTYPE_ocsp 1 // ECPointFormat values from RFC 4492 #define TLSEXT_ECPOINTFORMAT_uncompressed 0 #define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_prime 1 // Signature and hash algorithms from RFC 5246 #define TLSEXT_signature_anonymous 0 #define TLSEXT_signature_rsa 1 #define TLSEXT_signature_dsa 2 #define TLSEXT_signature_ecdsa 3 #define TLSEXT_hash_none 0 #define TLSEXT_hash_md5 1 #define TLSEXT_hash_sha1 2 #define TLSEXT_hash_sha224 3 #define TLSEXT_hash_sha256 4 #define TLSEXT_hash_sha384 5 #define TLSEXT_hash_sha512 6 // From https://www.rfc-editor.org/rfc/rfc8879.html#section-3 #define TLSEXT_cert_compression_zlib 1 #define TLSEXT_cert_compression_brotli 2 #define TLSEXT_MAXLEN_host_name 255 // PSK ciphersuites from 4279 #define TLS1_CK_PSK_WITH_RC4_128_SHA 0x0300008A #define TLS1_CK_PSK_WITH_3DES_EDE_CBC_SHA 0x0300008B #define TLS1_CK_PSK_WITH_AES_128_CBC_SHA 0x0300008C #define TLS1_CK_PSK_WITH_AES_256_CBC_SHA 0x0300008D // PSK ciphersuites from RFC 5489 #define TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA 0x0300C035 #define TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA 0x0300C036 // Additional TLS ciphersuites from expired Internet Draft // draft-ietf-tls-56-bit-ciphersuites-01.txt // (available if TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES is defined, see // s3_lib.c). We actually treat them like SSL 3.0 ciphers, which we probably // shouldn't. Note that the first two are actually not in the IDs. #define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_MD5 0x03000060 // not in ID #define TLS1_CK_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 0x03000061 // not in ID #define TLS1_CK_RSA_EXPORT1024_WITH_DES_CBC_SHA 0x03000062 #define TLS1_CK_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA 0x03000063 #define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_SHA 0x03000064 #define TLS1_CK_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA 0x03000065 #define TLS1_CK_DHE_DSS_WITH_RC4_128_SHA 0x03000066 // AES ciphersuites from RFC 3268 #define TLS1_CK_RSA_WITH_AES_128_SHA 0x0300002F #define TLS1_CK_DH_DSS_WITH_AES_128_SHA 0x03000030 #define TLS1_CK_DH_RSA_WITH_AES_128_SHA 0x03000031 #define TLS1_CK_DHE_DSS_WITH_AES_128_SHA 0x03000032 #define TLS1_CK_DHE_RSA_WITH_AES_128_SHA 0x03000033 #define TLS1_CK_ADH_WITH_AES_128_SHA 0x03000034 #define TLS1_CK_RSA_WITH_AES_256_SHA 0x03000035 #define TLS1_CK_DH_DSS_WITH_AES_256_SHA 0x03000036 #define TLS1_CK_DH_RSA_WITH_AES_256_SHA 0x03000037 #define TLS1_CK_DHE_DSS_WITH_AES_256_SHA 0x03000038 #define TLS1_CK_DHE_RSA_WITH_AES_256_SHA 0x03000039 #define TLS1_CK_ADH_WITH_AES_256_SHA 0x0300003A // TLS v1.2 ciphersuites #define TLS1_CK_RSA_WITH_NULL_SHA256 0x0300003B #define TLS1_CK_RSA_WITH_AES_128_SHA256 0x0300003C #define TLS1_CK_RSA_WITH_AES_256_SHA256 0x0300003D #define TLS1_CK_DH_DSS_WITH_AES_128_SHA256 0x0300003E #define TLS1_CK_DH_RSA_WITH_AES_128_SHA256 0x0300003F #define TLS1_CK_DHE_DSS_WITH_AES_128_SHA256 0x03000040 // Camellia ciphersuites from RFC 4132 #define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000041 #define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA 0x03000042 #define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000043 #define TLS1_CK_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA 0x03000044 #define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000045 #define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA 0x03000046 // TLS v1.2 ciphersuites #define TLS1_CK_DHE_RSA_WITH_AES_128_SHA256 0x03000067 #define TLS1_CK_DH_DSS_WITH_AES_256_SHA256 0x03000068 #define TLS1_CK_DH_RSA_WITH_AES_256_SHA256 0x03000069 #define TLS1_CK_DHE_DSS_WITH_AES_256_SHA256 0x0300006A #define TLS1_CK_DHE_RSA_WITH_AES_256_SHA256 0x0300006B #define TLS1_CK_ADH_WITH_AES_128_SHA256 0x0300006C #define TLS1_CK_ADH_WITH_AES_256_SHA256 0x0300006D // Camellia ciphersuites from RFC 4132 #define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000084 #define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA 0x03000085 #define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000086 #define TLS1_CK_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA 0x03000087 #define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000088 #define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA 0x03000089 // SEED ciphersuites from RFC 4162 #define TLS1_CK_RSA_WITH_SEED_SHA 0x03000096 #define TLS1_CK_DH_DSS_WITH_SEED_SHA 0x03000097 #define TLS1_CK_DH_RSA_WITH_SEED_SHA 0x03000098 #define TLS1_CK_DHE_DSS_WITH_SEED_SHA 0x03000099 #define TLS1_CK_DHE_RSA_WITH_SEED_SHA 0x0300009A #define TLS1_CK_ADH_WITH_SEED_SHA 0x0300009B // TLS v1.2 GCM ciphersuites from RFC 5288 #define TLS1_CK_RSA_WITH_AES_128_GCM_SHA256 0x0300009C #define TLS1_CK_RSA_WITH_AES_256_GCM_SHA384 0x0300009D #define TLS1_CK_DHE_RSA_WITH_AES_128_GCM_SHA256 0x0300009E #define TLS1_CK_DHE_RSA_WITH_AES_256_GCM_SHA384 0x0300009F #define TLS1_CK_DH_RSA_WITH_AES_128_GCM_SHA256 0x030000A0 #define TLS1_CK_DH_RSA_WITH_AES_256_GCM_SHA384 0x030000A1 #define TLS1_CK_DHE_DSS_WITH_AES_128_GCM_SHA256 0x030000A2 #define TLS1_CK_DHE_DSS_WITH_AES_256_GCM_SHA384 0x030000A3 #define TLS1_CK_DH_DSS_WITH_AES_128_GCM_SHA256 0x030000A4 #define TLS1_CK_DH_DSS_WITH_AES_256_GCM_SHA384 0x030000A5 #define TLS1_CK_ADH_WITH_AES_128_GCM_SHA256 0x030000A6 #define TLS1_CK_ADH_WITH_AES_256_GCM_SHA384 0x030000A7 // ECC ciphersuites from RFC 4492 #define TLS1_CK_ECDH_ECDSA_WITH_NULL_SHA 0x0300C001 #define TLS1_CK_ECDH_ECDSA_WITH_RC4_128_SHA 0x0300C002 #define TLS1_CK_ECDH_ECDSA_WITH_DES_192_CBC3_SHA 0x0300C003 #define TLS1_CK_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0x0300C004 #define TLS1_CK_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0x0300C005 #define TLS1_CK_ECDHE_ECDSA_WITH_NULL_SHA 0x0300C006 #define TLS1_CK_ECDHE_ECDSA_WITH_RC4_128_SHA 0x0300C007 #define TLS1_CK_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA 0x0300C008 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0x0300C009 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0x0300C00A #define TLS1_CK_ECDH_RSA_WITH_NULL_SHA 0x0300C00B #define TLS1_CK_ECDH_RSA_WITH_RC4_128_SHA 0x0300C00C #define TLS1_CK_ECDH_RSA_WITH_DES_192_CBC3_SHA 0x0300C00D #define TLS1_CK_ECDH_RSA_WITH_AES_128_CBC_SHA 0x0300C00E #define TLS1_CK_ECDH_RSA_WITH_AES_256_CBC_SHA 0x0300C00F #define TLS1_CK_ECDHE_RSA_WITH_NULL_SHA 0x0300C010 #define TLS1_CK_ECDHE_RSA_WITH_RC4_128_SHA 0x0300C011 #define TLS1_CK_ECDHE_RSA_WITH_DES_192_CBC3_SHA 0x0300C012 #define TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA 0x0300C013 #define TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA 0x0300C014 #define TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA256 0x0300C027 #define TLS1_CK_ECDH_anon_WITH_NULL_SHA 0x0300C015 #define TLS1_CK_ECDH_anon_WITH_RC4_128_SHA 0x0300C016 #define TLS1_CK_ECDH_anon_WITH_DES_192_CBC3_SHA 0x0300C017 #define TLS1_CK_ECDH_anon_WITH_AES_128_CBC_SHA 0x0300C018 #define TLS1_CK_ECDH_anon_WITH_AES_256_CBC_SHA 0x0300C019 // SRP ciphersuites from RFC 5054 #define TLS1_CK_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0x0300C01A #define TLS1_CK_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0x0300C01B #define TLS1_CK_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0x0300C01C #define TLS1_CK_SRP_SHA_WITH_AES_128_CBC_SHA 0x0300C01D #define TLS1_CK_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0x0300C01E #define TLS1_CK_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0x0300C01F #define TLS1_CK_SRP_SHA_WITH_AES_256_CBC_SHA 0x0300C020 #define TLS1_CK_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0x0300C021 #define TLS1_CK_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0x0300C022 // ECDH HMAC based ciphersuites from RFC 5289 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256 0x0300C023 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384 0x0300C024 #define TLS1_CK_ECDH_ECDSA_WITH_AES_128_SHA256 0x0300C025 #define TLS1_CK_ECDH_ECDSA_WITH_AES_256_SHA384 0x0300C026 #define TLS1_CK_ECDHE_RSA_WITH_AES_128_SHA256 0x0300C027 #define TLS1_CK_ECDHE_RSA_WITH_AES_256_SHA384 0x0300C028 #define TLS1_CK_ECDH_RSA_WITH_AES_128_SHA256 0x0300C029 #define TLS1_CK_ECDH_RSA_WITH_AES_256_SHA384 0x0300C02A // ECDH GCM based ciphersuites from RFC 5289 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02B #define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0x0300C02C #define TLS1_CK_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02D #define TLS1_CK_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0x0300C02E #define TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0x0300C02F #define TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0x0300C030 #define TLS1_CK_ECDH_RSA_WITH_AES_128_GCM_SHA256 0x0300C031 #define TLS1_CK_ECDH_RSA_WITH_AES_256_GCM_SHA384 0x0300C032 // ChaCha20-Poly1305 cipher suites from RFC 7905. #define TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 0x0300CCA8 #define TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 0x0300CCA9 #define TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 0x0300CCAC // TLS 1.3 ciphersuites from RFC 8446. #define TLS1_3_CK_AES_128_GCM_SHA256 0x03001301 #define TLS1_3_CK_AES_256_GCM_SHA384 0x03001302 #define TLS1_3_CK_CHACHA20_POLY1305_SHA256 0x03001303 // The following constants are legacy aliases of |TLS1_3_CK_*|. // TODO(davidben): Migrate callers to the new name and remove these. #define TLS1_CK_AES_128_GCM_SHA256 TLS1_3_CK_AES_128_GCM_SHA256 #define TLS1_CK_AES_256_GCM_SHA384 TLS1_3_CK_AES_256_GCM_SHA384 #define TLS1_CK_CHACHA20_POLY1305_SHA256 TLS1_3_CK_CHACHA20_POLY1305_SHA256 // XXX // Inconsistency alert: // The OpenSSL names of ciphers with ephemeral DH here include the string // "DHE", while elsewhere it has always been "EDH". // (The alias for the list of all such ciphers also is "EDH".) // The specifications speak of "EDH"; maybe we should allow both forms // for everything. #define TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_MD5 "EXP1024-RC4-MD5" #define TLS1_TXT_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 "EXP1024-RC2-CBC-MD5" #define TLS1_TXT_RSA_EXPORT1024_WITH_DES_CBC_SHA "EXP1024-DES-CBC-SHA" #define TLS1_TXT_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA \ "EXP1024-DHE-DSS-DES-CBC-SHA" #define TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_SHA "EXP1024-RC4-SHA" #define TLS1_TXT_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA "EXP1024-DHE-DSS-RC4-SHA" #define TLS1_TXT_DHE_DSS_WITH_RC4_128_SHA "DHE-DSS-RC4-SHA" // AES ciphersuites from RFC 3268 #define TLS1_TXT_RSA_WITH_AES_128_SHA "AES128-SHA" #define TLS1_TXT_DH_DSS_WITH_AES_128_SHA "DH-DSS-AES128-SHA" #define TLS1_TXT_DH_RSA_WITH_AES_128_SHA "DH-RSA-AES128-SHA" #define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA "DHE-DSS-AES128-SHA" #define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA "DHE-RSA-AES128-SHA" #define TLS1_TXT_ADH_WITH_AES_128_SHA "ADH-AES128-SHA" #define TLS1_TXT_RSA_WITH_AES_256_SHA "AES256-SHA" #define TLS1_TXT_DH_DSS_WITH_AES_256_SHA "DH-DSS-AES256-SHA" #define TLS1_TXT_DH_RSA_WITH_AES_256_SHA "DH-RSA-AES256-SHA" #define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA "DHE-DSS-AES256-SHA" #define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA "DHE-RSA-AES256-SHA" #define TLS1_TXT_ADH_WITH_AES_256_SHA "ADH-AES256-SHA" // ECC ciphersuites from RFC 4492 #define TLS1_TXT_ECDH_ECDSA_WITH_NULL_SHA "ECDH-ECDSA-NULL-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_RC4_128_SHA "ECDH-ECDSA-RC4-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_DES_192_CBC3_SHA "ECDH-ECDSA-DES-CBC3-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_CBC_SHA "ECDH-ECDSA-AES128-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_CBC_SHA "ECDH-ECDSA-AES256-SHA" #define TLS1_TXT_ECDHE_ECDSA_WITH_NULL_SHA "ECDHE-ECDSA-NULL-SHA" #define TLS1_TXT_ECDHE_ECDSA_WITH_RC4_128_SHA "ECDHE-ECDSA-RC4-SHA" #define TLS1_TXT_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA "ECDHE-ECDSA-DES-CBC3-SHA" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CBC_SHA "ECDHE-ECDSA-AES128-SHA" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CBC_SHA "ECDHE-ECDSA-AES256-SHA" #define TLS1_TXT_ECDH_RSA_WITH_NULL_SHA "ECDH-RSA-NULL-SHA" #define TLS1_TXT_ECDH_RSA_WITH_RC4_128_SHA "ECDH-RSA-RC4-SHA" #define TLS1_TXT_ECDH_RSA_WITH_DES_192_CBC3_SHA "ECDH-RSA-DES-CBC3-SHA" #define TLS1_TXT_ECDH_RSA_WITH_AES_128_CBC_SHA "ECDH-RSA-AES128-SHA" #define TLS1_TXT_ECDH_RSA_WITH_AES_256_CBC_SHA "ECDH-RSA-AES256-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_NULL_SHA "ECDHE-RSA-NULL-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_RC4_128_SHA "ECDHE-RSA-RC4-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_DES_192_CBC3_SHA "ECDHE-RSA-DES-CBC3-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA "ECDHE-RSA-AES128-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_AES_256_CBC_SHA "ECDHE-RSA-AES256-SHA" #define TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA256 "ECDHE-RSA-AES128-SHA256" #define TLS1_TXT_ECDH_anon_WITH_NULL_SHA "AECDH-NULL-SHA" #define TLS1_TXT_ECDH_anon_WITH_RC4_128_SHA "AECDH-RC4-SHA" #define TLS1_TXT_ECDH_anon_WITH_DES_192_CBC3_SHA "AECDH-DES-CBC3-SHA" #define TLS1_TXT_ECDH_anon_WITH_AES_128_CBC_SHA "AECDH-AES128-SHA" #define TLS1_TXT_ECDH_anon_WITH_AES_256_CBC_SHA "AECDH-AES256-SHA" // PSK ciphersuites from RFC 4279 #define TLS1_TXT_PSK_WITH_RC4_128_SHA "PSK-RC4-SHA" #define TLS1_TXT_PSK_WITH_3DES_EDE_CBC_SHA "PSK-3DES-EDE-CBC-SHA" #define TLS1_TXT_PSK_WITH_AES_128_CBC_SHA "PSK-AES128-CBC-SHA" #define TLS1_TXT_PSK_WITH_AES_256_CBC_SHA "PSK-AES256-CBC-SHA" // PSK ciphersuites from RFC 5489 #define TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA "ECDHE-PSK-AES128-CBC-SHA" #define TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA "ECDHE-PSK-AES256-CBC-SHA" // SRP ciphersuite from RFC 5054 #define TLS1_TXT_SRP_SHA_WITH_3DES_EDE_CBC_SHA "SRP-3DES-EDE-CBC-SHA" #define TLS1_TXT_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA "SRP-RSA-3DES-EDE-CBC-SHA" #define TLS1_TXT_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA "SRP-DSS-3DES-EDE-CBC-SHA" #define TLS1_TXT_SRP_SHA_WITH_AES_128_CBC_SHA "SRP-AES-128-CBC-SHA" #define TLS1_TXT_SRP_SHA_RSA_WITH_AES_128_CBC_SHA "SRP-RSA-AES-128-CBC-SHA" #define TLS1_TXT_SRP_SHA_DSS_WITH_AES_128_CBC_SHA "SRP-DSS-AES-128-CBC-SHA" #define TLS1_TXT_SRP_SHA_WITH_AES_256_CBC_SHA "SRP-AES-256-CBC-SHA" #define TLS1_TXT_SRP_SHA_RSA_WITH_AES_256_CBC_SHA "SRP-RSA-AES-256-CBC-SHA" #define TLS1_TXT_SRP_SHA_DSS_WITH_AES_256_CBC_SHA "SRP-DSS-AES-256-CBC-SHA" // Camellia ciphersuites from RFC 4132 #define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA "CAMELLIA128-SHA" #define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA "DH-DSS-CAMELLIA128-SHA" #define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA "DH-RSA-CAMELLIA128-SHA" #define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA "DHE-DSS-CAMELLIA128-SHA" #define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA "DHE-RSA-CAMELLIA128-SHA" #define TLS1_TXT_ADH_WITH_CAMELLIA_128_CBC_SHA "ADH-CAMELLIA128-SHA" #define TLS1_TXT_RSA_WITH_CAMELLIA_256_CBC_SHA "CAMELLIA256-SHA" #define TLS1_TXT_DH_DSS_WITH_CAMELLIA_256_CBC_SHA "DH-DSS-CAMELLIA256-SHA" #define TLS1_TXT_DH_RSA_WITH_CAMELLIA_256_CBC_SHA "DH-RSA-CAMELLIA256-SHA" #define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA "DHE-DSS-CAMELLIA256-SHA" #define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA "DHE-RSA-CAMELLIA256-SHA" #define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA "ADH-CAMELLIA256-SHA" // SEED ciphersuites from RFC 4162 #define TLS1_TXT_RSA_WITH_SEED_SHA "SEED-SHA" #define TLS1_TXT_DH_DSS_WITH_SEED_SHA "DH-DSS-SEED-SHA" #define TLS1_TXT_DH_RSA_WITH_SEED_SHA "DH-RSA-SEED-SHA" #define TLS1_TXT_DHE_DSS_WITH_SEED_SHA "DHE-DSS-SEED-SHA" #define TLS1_TXT_DHE_RSA_WITH_SEED_SHA "DHE-RSA-SEED-SHA" #define TLS1_TXT_ADH_WITH_SEED_SHA "ADH-SEED-SHA" // TLS v1.2 ciphersuites #define TLS1_TXT_RSA_WITH_NULL_SHA256 "NULL-SHA256" #define TLS1_TXT_RSA_WITH_AES_128_SHA256 "AES128-SHA256" #define TLS1_TXT_RSA_WITH_AES_256_SHA256 "AES256-SHA256" #define TLS1_TXT_DH_DSS_WITH_AES_128_SHA256 "DH-DSS-AES128-SHA256" #define TLS1_TXT_DH_RSA_WITH_AES_128_SHA256 "DH-RSA-AES128-SHA256" #define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA256 "DHE-DSS-AES128-SHA256" #define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA256 "DHE-RSA-AES128-SHA256" #define TLS1_TXT_DH_DSS_WITH_AES_256_SHA256 "DH-DSS-AES256-SHA256" #define TLS1_TXT_DH_RSA_WITH_AES_256_SHA256 "DH-RSA-AES256-SHA256" #define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA256 "DHE-DSS-AES256-SHA256" #define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA256 "DHE-RSA-AES256-SHA256" #define TLS1_TXT_ADH_WITH_AES_128_SHA256 "ADH-AES128-SHA256" #define TLS1_TXT_ADH_WITH_AES_256_SHA256 "ADH-AES256-SHA256" // TLS v1.2 GCM ciphersuites from RFC 5288 #define TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256 "AES128-GCM-SHA256" #define TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384 "AES256-GCM-SHA384" #define TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256 "DHE-RSA-AES128-GCM-SHA256" #define TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384 "DHE-RSA-AES256-GCM-SHA384" #define TLS1_TXT_DH_RSA_WITH_AES_128_GCM_SHA256 "DH-RSA-AES128-GCM-SHA256" #define TLS1_TXT_DH_RSA_WITH_AES_256_GCM_SHA384 "DH-RSA-AES256-GCM-SHA384" #define TLS1_TXT_DHE_DSS_WITH_AES_128_GCM_SHA256 "DHE-DSS-AES128-GCM-SHA256" #define TLS1_TXT_DHE_DSS_WITH_AES_256_GCM_SHA384 "DHE-DSS-AES256-GCM-SHA384" #define TLS1_TXT_DH_DSS_WITH_AES_128_GCM_SHA256 "DH-DSS-AES128-GCM-SHA256" #define TLS1_TXT_DH_DSS_WITH_AES_256_GCM_SHA384 "DH-DSS-AES256-GCM-SHA384" #define TLS1_TXT_ADH_WITH_AES_128_GCM_SHA256 "ADH-AES128-GCM-SHA256" #define TLS1_TXT_ADH_WITH_AES_256_GCM_SHA384 "ADH-AES256-GCM-SHA384" // ECDH HMAC based ciphersuites from RFC 5289 #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_SHA256 "ECDHE-ECDSA-AES128-SHA256" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_SHA384 "ECDHE-ECDSA-AES256-SHA384" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_SHA256 "ECDH-ECDSA-AES128-SHA256" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_SHA384 "ECDH-ECDSA-AES256-SHA384" #define TLS1_TXT_ECDHE_RSA_WITH_AES_128_SHA256 "ECDHE-RSA-AES128-SHA256" #define TLS1_TXT_ECDHE_RSA_WITH_AES_256_SHA384 "ECDHE-RSA-AES256-SHA384" #define TLS1_TXT_ECDH_RSA_WITH_AES_128_SHA256 "ECDH-RSA-AES128-SHA256" #define TLS1_TXT_ECDH_RSA_WITH_AES_256_SHA384 "ECDH-RSA-AES256-SHA384" // ECDH GCM based ciphersuites from RFC 5289 #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 \ "ECDHE-ECDSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 \ "ECDHE-ECDSA-AES256-GCM-SHA384" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 \ "ECDH-ECDSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 \ "ECDH-ECDSA-AES256-GCM-SHA384" #define TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256 "ECDHE-RSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384 "ECDHE-RSA-AES256-GCM-SHA384" #define TLS1_TXT_ECDH_RSA_WITH_AES_128_GCM_SHA256 "ECDH-RSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDH_RSA_WITH_AES_256_GCM_SHA384 "ECDH-RSA-AES256-GCM-SHA384" #define TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 \ "ECDHE-RSA-CHACHA20-POLY1305" #define TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 \ "ECDHE-ECDSA-CHACHA20-POLY1305" #define TLS1_TXT_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 \ "ECDHE-PSK-CHACHA20-POLY1305" // TLS 1.3 ciphersuites from RFC 8446. #define TLS1_3_RFC_AES_128_GCM_SHA256 "TLS_AES_128_GCM_SHA256" #define TLS1_3_RFC_AES_256_GCM_SHA384 "TLS_AES_256_GCM_SHA384" #define TLS1_3_RFC_CHACHA20_POLY1305_SHA256 "TLS_CHACHA20_POLY1305_SHA256" // The following constants are legacy aliases of |TLS1_3_CK_*|. // TODO(bbe): Migrate callers to the new name and remove these. #define TLS1_TXT_AES_128_GCM_SHA256 TLS1_3_RFC_AES_128_GCM_SHA256 #define TLS1_TXT_AES_256_GCM_SHA384 TLS1_3_RFC_AES_256_GCM_SHA384 #define TLS1_TXT_CHACHA20_POLY1305_SHA256 TLS1_3_RFC_CHACHA20_POLY1305_SHA256 #define TLS_CT_RSA_SIGN 1 #define TLS_CT_DSS_SIGN 2 #define TLS_CT_RSA_FIXED_DH 3 #define TLS_CT_DSS_FIXED_DH 4 #define TLS_CT_ECDSA_SIGN 64 #define TLS_CT_RSA_FIXED_ECDH 65 #define TLS_CT_ECDSA_FIXED_ECDH 66 #define TLS_MD_MAX_CONST_SIZE 20 #ifdef __cplusplus } // extern C #endif #endif // OPENSSL_HEADER_TLS1_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_trust_token.h ================================================ /* Copyright 2020 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_TRUST_TOKEN_H #define OPENSSL_HEADER_TRUST_TOKEN_H #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_stack.h" #if defined(__cplusplus) extern "C" { #endif // Trust Token implementation. // // Trust Token is an implementation of an experimental mechanism similar to // Privacy Pass which allows issuance and redemption of anonymized tokens with // limited private metadata. // // References: // https://eprint.iacr.org/2020/072.pdf // https://github.com/ietf-wg-privacypass/base-drafts // https://github.com/WICG/trust-token-api/blob/main/README.md // // WARNING: This API is unstable and subject to change. // TRUST_TOKEN_experiment_v1 is an experimental Trust Tokens protocol using // PMBTokens and P-384. OPENSSL_EXPORT const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v1(void); // TRUST_TOKEN_experiment_v2_voprf is an experimental Trust Tokens protocol // using VOPRFs and P-384 with up to 6 keys, without RR verification. OPENSSL_EXPORT const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v2_voprf(void); // TRUST_TOKEN_experiment_v2_pmb is an experimental Trust Tokens protocol using // PMBTokens and P-384 with up to 3 keys, without RR verification. OPENSSL_EXPORT const TRUST_TOKEN_METHOD *TRUST_TOKEN_experiment_v2_pmb(void); // TRUST_TOKEN_pst_v1_voprf is an experimental Trust Tokens protocol // using VOPRFs and P-384 with up to 6 keys, without RR verification. OPENSSL_EXPORT const TRUST_TOKEN_METHOD *TRUST_TOKEN_pst_v1_voprf(void); // TRUST_TOKEN_pst_v1_pmb is an experimental Trust Tokens protocol using // PMBTokens and P-384 with up to 3 keys, without RR verification. OPENSSL_EXPORT const TRUST_TOKEN_METHOD *TRUST_TOKEN_pst_v1_pmb(void); // trust_token_st represents a single-use token for the Trust Token protocol. // For the client, this is the token and its corresponding signature. For the // issuer, this is the token itself. struct trust_token_st { uint8_t *data; size_t len; }; DEFINE_STACK_OF(TRUST_TOKEN) // TRUST_TOKEN_new creates a newly-allocated |TRUST_TOKEN| with value |data| or // NULL on allocation failure. OPENSSL_EXPORT TRUST_TOKEN *TRUST_TOKEN_new(const uint8_t *data, size_t len); // TRUST_TOKEN_free releases memory associated with |token|. OPENSSL_EXPORT void TRUST_TOKEN_free(TRUST_TOKEN *token); #define TRUST_TOKEN_MAX_PRIVATE_KEY_SIZE 512 #define TRUST_TOKEN_MAX_PUBLIC_KEY_SIZE 512 // TRUST_TOKEN_generate_key creates a new Trust Token keypair labeled with |id| // and serializes the private and public keys, writing the private key to // |out_priv_key| and setting |*out_priv_key_len| to the number of bytes // written, and writing the public key to |out_pub_key| and setting // |*out_pub_key_len| to the number of bytes written. // // At most |max_priv_key_len| and |max_pub_key_len| bytes are written. In order // to ensure success, these should be at least // |TRUST_TOKEN_MAX_PRIVATE_KEY_SIZE| and |TRUST_TOKEN_MAX_PUBLIC_KEY_SIZE|. // // This function returns one on success or zero on error. OPENSSL_EXPORT int TRUST_TOKEN_generate_key( const TRUST_TOKEN_METHOD *method, uint8_t *out_priv_key, size_t *out_priv_key_len, size_t max_priv_key_len, uint8_t *out_pub_key, size_t *out_pub_key_len, size_t max_pub_key_len, uint32_t id); // TRUST_TOKEN_derive_key_from_secret deterministically derives a new Trust // Token keypair labeled with |id| from an input |secret| and serializes the // private and public keys, writing the private key to |out_priv_key| and // setting |*out_priv_key_len| to the number of bytes written, and writing the // public key to |out_pub_key| and setting |*out_pub_key_len| to the number of // bytes written. // // At most |max_priv_key_len| and |max_pub_key_len| bytes are written. In order // to ensure success, these should be at least // |TRUST_TOKEN_MAX_PRIVATE_KEY_SIZE| and |TRUST_TOKEN_MAX_PUBLIC_KEY_SIZE|. // // This function returns one on success or zero on error. OPENSSL_EXPORT int TRUST_TOKEN_derive_key_from_secret( const TRUST_TOKEN_METHOD *method, uint8_t *out_priv_key, size_t *out_priv_key_len, size_t max_priv_key_len, uint8_t *out_pub_key, size_t *out_pub_key_len, size_t max_pub_key_len, uint32_t id, const uint8_t *secret, size_t secret_len); // Trust Token client implementation. // // These functions implements the client half of the Trust Token protocol. A // single |TRUST_TOKEN_CLIENT| can perform a single protocol operation. // TRUST_TOKEN_CLIENT_new returns a newly-allocated |TRUST_TOKEN_CLIENT| // configured to use a max batchsize of |max_batchsize| or NULL on error. // Issuance requests must be made in batches smaller than |max_batchsize|. This // function will return an error if |max_batchsize| is too large for Trust // Tokens. OPENSSL_EXPORT TRUST_TOKEN_CLIENT *TRUST_TOKEN_CLIENT_new( const TRUST_TOKEN_METHOD *method, size_t max_batchsize); // TRUST_TOKEN_CLIENT_free releases memory associated with |ctx|. OPENSSL_EXPORT void TRUST_TOKEN_CLIENT_free(TRUST_TOKEN_CLIENT *ctx); // TRUST_TOKEN_CLIENT_add_key configures the |ctx| to support the public key // |key|. It sets |*out_key_index| to the index this key has been configured to. // It returns one on success or zero on error if the |key| can't be parsed or // too many keys have been configured. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_add_key(TRUST_TOKEN_CLIENT *ctx, size_t *out_key_index, const uint8_t *key, size_t key_len); // TRUST_TOKEN_CLIENT_set_srr_key sets the public key used to verify the SRR. It // returns one on success and zero on error. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_set_srr_key(TRUST_TOKEN_CLIENT *ctx, EVP_PKEY *key); // TRUST_TOKEN_CLIENT_begin_issuance produces a request for |count| trust tokens // and serializes the request into a newly-allocated buffer, setting |*out| to // that buffer and |*out_len| to its length. The caller takes ownership of the // buffer and must call |OPENSSL_free| when done. It returns one on success and // zero on error. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_begin_issuance(TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, size_t count); // TRUST_TOKEN_CLIENT_begin_issuance_over_message produces a request for a trust // token derived from |msg| and serializes the request into a newly-allocated // buffer, setting |*out| to that buffer and |*out_len| to its length. The // caller takes ownership of the buffer and must call |OPENSSL_free| when done. // It returns one on success and zero on error. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_begin_issuance_over_message( TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, size_t count, const uint8_t *msg, size_t msg_len); // TRUST_TOKEN_CLIENT_finish_issuance consumes |response| from the issuer and // extracts the tokens, returning a list of tokens and the index of the key used // to sign the tokens in |*out_key_index|. The caller can use this to determine // what key was used in an issuance and to drop tokens if a new key commitment // arrives without the specified key present. The caller takes ownership of the // list and must call |sk_TRUST_TOKEN_pop_free| when done. The list is empty if // issuance fails. OPENSSL_EXPORT STACK_OF(TRUST_TOKEN) * TRUST_TOKEN_CLIENT_finish_issuance(TRUST_TOKEN_CLIENT *ctx, size_t *out_key_index, const uint8_t *response, size_t response_len); // TRUST_TOKEN_CLIENT_begin_redemption produces a request to redeem a token // |token| and receive a signature over |data| and serializes the request into // a newly-allocated buffer, setting |*out| to that buffer and |*out_len| to // its length. |time| is the number of seconds since the UNIX epoch and used to // verify the validity of the issuer's response in TrustTokenV1 and ignored in // other versions. The caller takes ownership of the buffer and must call // |OPENSSL_free| when done. It returns one on success or zero on error. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_begin_redemption( TRUST_TOKEN_CLIENT *ctx, uint8_t **out, size_t *out_len, const TRUST_TOKEN *token, const uint8_t *data, size_t data_len, uint64_t time); // TRUST_TOKEN_CLIENT_finish_redemption consumes |response| from the issuer. In // |TRUST_TOKEN_experiment_v1|, it then verifies the SRR and if valid sets // |*out_rr| and |*out_rr_len| (respectively, |*out_sig| and |*out_sig_len|) // to a newly-allocated buffer containing the SRR (respectively, the SRR // signature). In other versions, it sets |*out_rr| and |*out_rr_len| // to a newly-allocated buffer containing |response| and leaves all validation // to the caller. It returns one on success or zero on failure. OPENSSL_EXPORT int TRUST_TOKEN_CLIENT_finish_redemption( TRUST_TOKEN_CLIENT *ctx, uint8_t **out_rr, size_t *out_rr_len, uint8_t **out_sig, size_t *out_sig_len, const uint8_t *response, size_t response_len); // Trust Token issuer implementation. // // These functions implement the issuer half of the Trust Token protocol. A // |TRUST_TOKEN_ISSUER| can be reused across multiple protocol operations. It // may be used concurrently on multiple threads by non-mutating functions, // provided no other thread is concurrently calling a mutating function. // Functions which take a |const| pointer are non-mutating and functions which // take a non-|const| pointer are mutating. // TRUST_TOKEN_ISSUER_new returns a newly-allocated |TRUST_TOKEN_ISSUER| // configured to use a max batchsize of |max_batchsize| or NULL on error. // Issuance requests must be made in batches smaller than |max_batchsize|. This // function will return an error if |max_batchsize| is too large for Trust // Tokens. OPENSSL_EXPORT TRUST_TOKEN_ISSUER *TRUST_TOKEN_ISSUER_new( const TRUST_TOKEN_METHOD *method, size_t max_batchsize); // TRUST_TOKEN_ISSUER_free releases memory associated with |ctx|. OPENSSL_EXPORT void TRUST_TOKEN_ISSUER_free(TRUST_TOKEN_ISSUER *ctx); // TRUST_TOKEN_ISSUER_add_key configures the |ctx| to support the private key // |key|. It must be a private key returned by |TRUST_TOKEN_generate_key|. It // returns one on success or zero on error. This function may fail if the |key| // can't be parsed or too many keys have been configured. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_add_key(TRUST_TOKEN_ISSUER *ctx, const uint8_t *key, size_t key_len); // TRUST_TOKEN_ISSUER_set_srr_key sets the private key used to sign the SRR. It // returns one on success and zero on error. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_set_srr_key(TRUST_TOKEN_ISSUER *ctx, EVP_PKEY *key); // TRUST_TOKEN_ISSUER_set_metadata_key sets the key used to encrypt the private // metadata. The key is a randomly generated bytestring of at least 32 bytes // used to encode the private metadata bit in the SRR. It returns one on success // and zero on error. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_set_metadata_key(TRUST_TOKEN_ISSUER *ctx, const uint8_t *key, size_t len); // TRUST_TOKEN_ISSUER_issue ingests |request| for token issuance // and generates up to |max_issuance| valid tokens, producing a list of blinded // tokens and storing the response into a newly-allocated buffer and setting // |*out| to that buffer, |*out_len| to its length, and |*out_tokens_issued| to // the number of tokens issued. The tokens are issued with public metadata of // |public_metadata| and a private metadata value of |private_metadata|. // |public_metadata| must be one of the previously configured key IDs. // |private_metadata| must be 0 or 1. The caller takes ownership of the buffer // and must call |OPENSSL_free| when done. It returns one on success or zero on // error. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_issue( const TRUST_TOKEN_ISSUER *ctx, uint8_t **out, size_t *out_len, size_t *out_tokens_issued, const uint8_t *request, size_t request_len, uint32_t public_metadata, uint8_t private_metadata, size_t max_issuance); // TRUST_TOKEN_ISSUER_redeem ingests a |request| for token redemption and // verifies the token. The public metadata is stored in |*out_public|. The // private metadata (if any) is stored in |*out_private|. The extracted // |TRUST_TOKEN| is stored into a newly-allocated buffer and stored in // |*out_token|. The extracted client data is stored into a newly-allocated // buffer and stored in |*out_client_data|. The caller takes ownership of each // output buffer and must call |OPENSSL_free| when done. It returns one on // success or zero on error. // // The caller must keep track of all values of |*out_token| seen globally before // returning a response to the client. If the value has been reused, the caller // must report an error to the client. Returning a response with replayed values // allows an attacker to double-spend tokens. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_redeem( const TRUST_TOKEN_ISSUER *ctx, uint32_t *out_public, uint8_t *out_private, TRUST_TOKEN **out_token, uint8_t **out_client_data, size_t *out_client_data_len, const uint8_t *request, size_t request_len); // TRUST_TOKEN_ISSUER_redeem_raw is a legacy alias for // |TRUST_TOKEN_ISSUER_redeem|. #define TRUST_TOKEN_ISSUER_redeem_raw TRUST_TOKEN_ISSUER_redeem // TRUST_TOKEN_ISSUER_redeem_over_message ingests a |request| for token // redemption and a message and verifies the token and that it is derived from // the provided |msg|. The public metadata is stored in // |*out_public|. The private metadata (if any) is stored in |*out_private|. The // extracted |TRUST_TOKEN| is stored into a newly-allocated buffer and stored in // |*out_token|. The extracted client data is stored into a newly-allocated // buffer and stored in |*out_client_data|. The caller takes ownership of each // output buffer and must call |OPENSSL_free| when done. It returns one on // success or zero on error. // // The caller must keep track of all values of |*out_token| seen globally before // returning a response to the client. If the value has been reused, the caller // must report an error to the client. Returning a response with replayed values // allows an attacker to double-spend tokens. OPENSSL_EXPORT int TRUST_TOKEN_ISSUER_redeem_over_message( const TRUST_TOKEN_ISSUER *ctx, uint32_t *out_public, uint8_t *out_private, TRUST_TOKEN **out_token, uint8_t **out_client_data, size_t *out_client_data_len, const uint8_t *request, size_t request_len, const uint8_t *msg, size_t msg_len); // TRUST_TOKEN_decode_private_metadata decodes |encrypted_bit| using the // private metadata key specified by a |key| buffer of length |key_len| and the // nonce by a |nonce| buffer of length |nonce_len|. The nonce in // |TRUST_TOKEN_experiment_v1| is the token-hash field of the SRR. |*out_value| // is set to the decrypted value, either zero or one. It returns one on success // and zero on error. OPENSSL_EXPORT int TRUST_TOKEN_decode_private_metadata( const TRUST_TOKEN_METHOD *method, uint8_t *out_value, const uint8_t *key, size_t key_len, const uint8_t *nonce, size_t nonce_len, uint8_t encrypted_bit); #if defined(__cplusplus) } // extern C extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(TRUST_TOKEN, TRUST_TOKEN_free) BORINGSSL_MAKE_DELETER(TRUST_TOKEN_CLIENT, TRUST_TOKEN_CLIENT_free) BORINGSSL_MAKE_DELETER(TRUST_TOKEN_ISSUER, TRUST_TOKEN_ISSUER_free) BSSL_NAMESPACE_END } // extern C++ #endif #define TRUST_TOKEN_R_KEYGEN_FAILURE 100 #define TRUST_TOKEN_R_BUFFER_TOO_SMALL 101 #define TRUST_TOKEN_R_OVER_BATCHSIZE 102 #define TRUST_TOKEN_R_DECODE_ERROR 103 #define TRUST_TOKEN_R_SRR_SIGNATURE_ERROR 104 #define TRUST_TOKEN_R_DECODE_FAILURE 105 #define TRUST_TOKEN_R_INVALID_METADATA 106 #define TRUST_TOKEN_R_TOO_MANY_KEYS 107 #define TRUST_TOKEN_R_NO_KEYS_CONFIGURED 108 #define TRUST_TOKEN_R_INVALID_KEY_ID 109 #define TRUST_TOKEN_R_INVALID_TOKEN 110 #define TRUST_TOKEN_R_BAD_VALIDITY_CHECK 111 #define TRUST_TOKEN_R_NO_SRR_KEY_CONFIGURED 112 #define TRUST_TOKEN_R_INVALID_METADATA_KEY 113 #define TRUST_TOKEN_R_INVALID_PROOF 114 #endif // OPENSSL_HEADER_TRUST_TOKEN_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_type_check.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_TYPE_CHECK_H #define OPENSSL_HEADER_TYPE_CHECK_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif // CHECKED_CAST casts |p| from type |from| to type |to|. // // TODO(davidben): Although this macro is not public API and is unused in // BoringSSL, wpa_supplicant uses it to define its own stacks. Remove this once // wpa_supplicant has been fixed. #define CHECKED_CAST(to, from, p) ((to) (1 ? (p) : (from)0)) #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_TYPE_CHECK_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_x509.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_X509_H #define OPENSSL_HEADER_X509_H #include "CNIOBoringSSL_base.h" #include #include "CNIOBoringSSL_asn1.h" #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_cipher.h" #include "CNIOBoringSSL_conf.h" #include "CNIOBoringSSL_dh.h" #include "CNIOBoringSSL_dsa.h" #include "CNIOBoringSSL_ec.h" #include "CNIOBoringSSL_ecdh.h" #include "CNIOBoringSSL_ecdsa.h" #include "CNIOBoringSSL_evp.h" #include "CNIOBoringSSL_lhash.h" #include "CNIOBoringSSL_obj.h" #include "CNIOBoringSSL_pkcs7.h" #include "CNIOBoringSSL_pool.h" #include "CNIOBoringSSL_rsa.h" #include "CNIOBoringSSL_sha.h" #include "CNIOBoringSSL_stack.h" #include "CNIOBoringSSL_thread.h" #include "CNIOBoringSSL_x509v3_errors.h" // IWYU pragma: export #if defined(__cplusplus) extern "C" { #endif // Legacy X.509 library. // // This header is part of OpenSSL's X.509 implementation. It is retained for // compatibility but should not be used by new code. The functions are difficult // to use correctly, and have buggy or non-standard behaviors. They are thus // particularly prone to behavior changes and API removals, as BoringSSL // iterates on these issues. // // In the future, a replacement library will be available. Meanwhile, minimize // dependencies on this header where possible. // Certificates. // // An |X509| object represents an X.509 certificate, defined in RFC 5280. // // Although an |X509| is a mutable object, mutating an |X509| can give incorrect // results. Callers typically obtain |X509|s by parsing some input with // |d2i_X509|, etc. Such objects carry information such as the serialized // TBSCertificate and decoded extensions, which will become inconsistent when // mutated. // // Instead, mutation functions should only be used when issuing new // certificates, as described in a later section. DEFINE_STACK_OF(X509) // X509 is an |ASN1_ITEM| whose ASN.1 type is X.509 Certificate (RFC 5280) and C // type is |X509*|. DECLARE_ASN1_ITEM(X509) // X509_up_ref adds one to the reference count of |x509| and returns one. OPENSSL_EXPORT int X509_up_ref(X509 *x509); // X509_chain_up_ref returns a newly-allocated |STACK_OF(X509)| containing a // shallow copy of |chain|, or NULL on error. That is, the return value has the // same contents as |chain|, and each |X509|'s reference count is incremented by // one. OPENSSL_EXPORT STACK_OF(X509) *X509_chain_up_ref(STACK_OF(X509) *chain); // X509_dup returns a newly-allocated copy of |x509|, or NULL on error. This // function works by serializing the structure, so auxiliary properties (see // |i2d_X509_AUX|) are not preserved. Additionally, if |x509| is incomplete, // this function may fail. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |crl| was // mutated. OPENSSL_EXPORT X509 *X509_dup(X509 *x509); // X509_free decrements |x509|'s reference count and, if zero, releases memory // associated with |x509|. OPENSSL_EXPORT void X509_free(X509 *x509); // d2i_X509 parses up to |len| bytes from |*inp| as a DER-encoded X.509 // Certificate (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509 *d2i_X509(X509 **out, const uint8_t **inp, long len); // X509_parse_from_buffer parses an X.509 structure from |buf| and returns a // fresh X509 or NULL on error. There must not be any trailing data in |buf|. // The returned structure (if any) holds a reference to |buf| rather than // copying parts of it as a normal |d2i_X509| call would do. OPENSSL_EXPORT X509 *X509_parse_from_buffer(CRYPTO_BUFFER *buf); // i2d_X509 marshals |x509| as a DER-encoded X.509 Certificate (RFC 5280), as // described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |x509| was // mutated. OPENSSL_EXPORT int i2d_X509(X509 *x509, uint8_t **outp); // X509_VERSION_* are X.509 version numbers. Note the numerical values of all // defined X.509 versions are one less than the named version. #define X509_VERSION_1 0 #define X509_VERSION_2 1 #define X509_VERSION_3 2 // X509_get_version returns the numerical value of |x509|'s version, which will // be one of the |X509_VERSION_*| constants. OPENSSL_EXPORT long X509_get_version(const X509 *x509); // X509_get0_serialNumber returns |x509|'s serial number. OPENSSL_EXPORT const ASN1_INTEGER *X509_get0_serialNumber(const X509 *x509); // X509_get0_notBefore returns |x509|'s notBefore time. OPENSSL_EXPORT const ASN1_TIME *X509_get0_notBefore(const X509 *x509); // X509_get0_notAfter returns |x509|'s notAfter time. OPENSSL_EXPORT const ASN1_TIME *X509_get0_notAfter(const X509 *x509); // X509_get_issuer_name returns |x509|'s issuer. OPENSSL_EXPORT X509_NAME *X509_get_issuer_name(const X509 *x509); // X509_get_subject_name returns |x509|'s subject. OPENSSL_EXPORT X509_NAME *X509_get_subject_name(const X509 *x509); // X509_get_X509_PUBKEY returns the public key of |x509|. Note this function is // not const-correct for legacy reasons. Callers should not modify the returned // object. OPENSSL_EXPORT X509_PUBKEY *X509_get_X509_PUBKEY(const X509 *x509); // X509_get0_pubkey returns |x509|'s public key as an |EVP_PKEY|, or NULL if the // public key was unsupported or could not be decoded. The |EVP_PKEY| is cached // in |x509|, so callers must not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_get0_pubkey(const X509 *x509); // X509_get_pubkey behaves like |X509_get0_pubkey| but increments the reference // count on the |EVP_PKEY|. The caller must release the result with // |EVP_PKEY_free| when done. The |EVP_PKEY| is cached in |x509|, so callers // must not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_get_pubkey(const X509 *x509); // X509_get0_pubkey_bitstr returns the BIT STRING portion of |x509|'s public // key. Note this does not contain the AlgorithmIdentifier portion. // // WARNING: This function returns a non-const pointer for OpenSSL compatibility, // but the caller must not modify the resulting object. Doing so will break // internal invariants in |x509|. OPENSSL_EXPORT ASN1_BIT_STRING *X509_get0_pubkey_bitstr(const X509 *x509); // X509_check_private_key returns one if |x509|'s public key matches |pkey| and // zero otherwise. OPENSSL_EXPORT int X509_check_private_key(const X509 *x509, const EVP_PKEY *pkey); // X509_get0_uids sets |*out_issuer_uid| to a non-owning pointer to the // issuerUID field of |x509|, or NULL if |x509| has no issuerUID. It similarly // outputs |x509|'s subjectUID field to |*out_subject_uid|. // // Callers may pass NULL to either |out_issuer_uid| or |out_subject_uid| to // ignore the corresponding field. OPENSSL_EXPORT void X509_get0_uids(const X509 *x509, const ASN1_BIT_STRING **out_issuer_uid, const ASN1_BIT_STRING **out_subject_uid); // The following bits are returned from |X509_get_extension_flags|. // EXFLAG_BCONS indicates the certificate has a basic constraints extension. #define EXFLAG_BCONS 0x1 // EXFLAG_KUSAGE indicates the certifcate has a key usage extension. #define EXFLAG_KUSAGE 0x2 // EXFLAG_XKUSAGE indicates the certifcate has an extended key usage extension. #define EXFLAG_XKUSAGE 0x4 // EXFLAG_CA indicates the certificate has a basic constraints extension with // the CA bit set. #define EXFLAG_CA 0x10 // EXFLAG_SI indicates the certificate is self-issued, i.e. its subject and // issuer names match. #define EXFLAG_SI 0x20 // EXFLAG_V1 indicates an X.509v1 certificate. #define EXFLAG_V1 0x40 // EXFLAG_INVALID indicates an error processing some extension. The certificate // should not be accepted. Note the lack of this bit does not imply all // extensions are valid, only those used to compute extension flags. #define EXFLAG_INVALID 0x80 // EXFLAG_SET is an internal bit that indicates extension flags were computed. #define EXFLAG_SET 0x100 // EXFLAG_CRITICAL indicates an unsupported critical extension. The certificate // should not be accepted. #define EXFLAG_CRITICAL 0x200 // EXFLAG_SS indicates the certificate is likely self-signed. That is, if it is // self-issued, its authority key identifier (if any) matches itself, and its // key usage extension (if any) allows certificate signatures. The signature // itself is not checked in computing this bit. #define EXFLAG_SS 0x2000 // X509_get_extension_flags decodes a set of extensions from |x509| and returns // a collection of |EXFLAG_*| bits which reflect |x509|. If there was an error // in computing this bitmask, the result will include the |EXFLAG_INVALID| bit. OPENSSL_EXPORT uint32_t X509_get_extension_flags(X509 *x509); // X509_get_pathlen returns path length constraint from the basic constraints // extension in |x509|. (See RFC 5280, section 4.2.1.9.) It returns -1 if the // constraint is not present, or if some extension in |x509| was invalid. // // TODO(crbug.com/boringssl/381): Decoding an |X509| object will not check for // invalid extensions. To detect the error case, call // |X509_get_extension_flags| and check the |EXFLAG_INVALID| bit. OPENSSL_EXPORT long X509_get_pathlen(X509 *x509); // X509v3_KU_* are key usage bits returned from |X509_get_key_usage|. #define X509v3_KU_DIGITAL_SIGNATURE 0x0080 #define X509v3_KU_NON_REPUDIATION 0x0040 #define X509v3_KU_KEY_ENCIPHERMENT 0x0020 #define X509v3_KU_DATA_ENCIPHERMENT 0x0010 #define X509v3_KU_KEY_AGREEMENT 0x0008 #define X509v3_KU_KEY_CERT_SIGN 0x0004 #define X509v3_KU_CRL_SIGN 0x0002 #define X509v3_KU_ENCIPHER_ONLY 0x0001 #define X509v3_KU_DECIPHER_ONLY 0x8000 // X509_get_key_usage returns a bitmask of key usages (see Section 4.2.1.3 of // RFC 5280) which |x509| is valid for. This function only reports the first 16 // bits, in a little-endian byte order, but big-endian bit order. That is, bits // 0 though 7 are reported at 1<<7 through 1<<0, and bits 8 through 15 are // reported at 1<<15 through 1<<8. // // Instead of depending on this bit order, callers should compare against the // |X509v3_KU_*| constants. // // If |x509| has no key usage extension, all key usages are valid and this // function returns |UINT32_MAX|. If there was an error processing |x509|'s // extensions, or if the first 16 bits in the key usage extension were all zero, // this function returns zero. OPENSSL_EXPORT uint32_t X509_get_key_usage(X509 *x509); // XKU_* are extended key usage bits returned from // |X509_get_extended_key_usage|. #define XKU_SSL_SERVER 0x1 #define XKU_SSL_CLIENT 0x2 #define XKU_SMIME 0x4 #define XKU_CODE_SIGN 0x8 #define XKU_SGC 0x10 #define XKU_OCSP_SIGN 0x20 #define XKU_TIMESTAMP 0x40 #define XKU_DVCS 0x80 #define XKU_ANYEKU 0x100 // X509_get_extended_key_usage returns a bitmask of extended key usages (see // Section 4.2.1.12 of RFC 5280) which |x509| is valid for. The result will be // a combination of |XKU_*| constants. If checking an extended key usage not // defined above, callers should extract the extended key usage extension // separately, e.g. via |X509_get_ext_d2i|. // // If |x509| has no extended key usage extension, all extended key usages are // valid and this function returns |UINT32_MAX|. If there was an error // processing |x509|'s extensions, or if |x509|'s extended key usage extension // contained no recognized usages, this function returns zero. OPENSSL_EXPORT uint32_t X509_get_extended_key_usage(X509 *x509); // X509_get0_subject_key_id returns |x509|'s subject key identifier, if present. // (See RFC 5280, section 4.2.1.2.) It returns NULL if the extension is not // present or if some extension in |x509| was invalid. // // TODO(crbug.com/boringssl/381): Decoding an |X509| object will not check for // invalid extensions. To detect the error case, call // |X509_get_extension_flags| and check the |EXFLAG_INVALID| bit. OPENSSL_EXPORT const ASN1_OCTET_STRING *X509_get0_subject_key_id(X509 *x509); // X509_get0_authority_key_id returns keyIdentifier of |x509|'s authority key // identifier, if the extension and field are present. (See RFC 5280, // section 4.2.1.1.) It returns NULL if the extension is not present, if it is // present but lacks a keyIdentifier field, or if some extension in |x509| was // invalid. // // TODO(crbug.com/boringssl/381): Decoding an |X509| object will not check for // invalid extensions. To detect the error case, call // |X509_get_extension_flags| and check the |EXFLAG_INVALID| bit. OPENSSL_EXPORT const ASN1_OCTET_STRING *X509_get0_authority_key_id(X509 *x509); DEFINE_STACK_OF(GENERAL_NAME) typedef STACK_OF(GENERAL_NAME) GENERAL_NAMES; // X509_get0_authority_issuer returns the authorityCertIssuer of |x509|'s // authority key identifier, if the extension and field are present. (See // RFC 5280, section 4.2.1.1.) It returns NULL if the extension is not present, // if it is present but lacks a authorityCertIssuer field, or if some extension // in |x509| was invalid. // // TODO(crbug.com/boringssl/381): Decoding an |X509| object will not check for // invalid extensions. To detect the error case, call // |X509_get_extension_flags| and check the |EXFLAG_INVALID| bit. OPENSSL_EXPORT const GENERAL_NAMES *X509_get0_authority_issuer(X509 *x509); // X509_get0_authority_serial returns the authorityCertSerialNumber of |x509|'s // authority key identifier, if the extension and field are present. (See // RFC 5280, section 4.2.1.1.) It returns NULL if the extension is not present, // if it is present but lacks a authorityCertSerialNumber field, or if some // extension in |x509| was invalid. // // TODO(crbug.com/boringssl/381): Decoding an |X509| object will not check for // invalid extensions. To detect the error case, call // |X509_get_extension_flags| and check the |EXFLAG_INVALID| bit. OPENSSL_EXPORT const ASN1_INTEGER *X509_get0_authority_serial(X509 *x509); // X509_get0_extensions returns |x509|'s extension list, or NULL if |x509| omits // it. OPENSSL_EXPORT const STACK_OF(X509_EXTENSION) *X509_get0_extensions( const X509 *x509); // X509_get_ext_count returns the number of extensions in |x|. OPENSSL_EXPORT int X509_get_ext_count(const X509 *x); // X509_get_ext_by_NID behaves like |X509v3_get_ext_by_NID| but searches for // extensions in |x|. OPENSSL_EXPORT int X509_get_ext_by_NID(const X509 *x, int nid, int lastpos); // X509_get_ext_by_OBJ behaves like |X509v3_get_ext_by_OBJ| but searches for // extensions in |x|. OPENSSL_EXPORT int X509_get_ext_by_OBJ(const X509 *x, const ASN1_OBJECT *obj, int lastpos); // X509_get_ext_by_critical behaves like |X509v3_get_ext_by_critical| but // searches for extensions in |x|. OPENSSL_EXPORT int X509_get_ext_by_critical(const X509 *x, int crit, int lastpos); // X509_get_ext returns the extension in |x| at index |loc|, or NULL if |loc| is // out of bounds. This function returns a non-const pointer for OpenSSL // compatibility, but callers should not mutate the result. OPENSSL_EXPORT X509_EXTENSION *X509_get_ext(const X509 *x, int loc); // X509_get_ext_d2i behaves like |X509V3_get_d2i| but looks for the extension in // |x509|'s extension list. // // WARNING: This function is difficult to use correctly. See the documentation // for |X509V3_get_d2i| for details. OPENSSL_EXPORT void *X509_get_ext_d2i(const X509 *x509, int nid, int *out_critical, int *out_idx); // X509_get0_tbs_sigalg returns the signature algorithm in |x509|'s // TBSCertificate. For the outer signature algorithm, see |X509_get0_signature|. // // Certificates with mismatched signature algorithms will successfully parse, // but they will be rejected when verifying. OPENSSL_EXPORT const X509_ALGOR *X509_get0_tbs_sigalg(const X509 *x509); // X509_get0_signature sets |*out_sig| and |*out_alg| to the signature and // signature algorithm of |x509|, respectively. Either output pointer may be // NULL to ignore the value. // // This function outputs the outer signature algorithm. For the one in the // TBSCertificate, see |X509_get0_tbs_sigalg|. Certificates with mismatched // signature algorithms will successfully parse, but they will be rejected when // verifying. OPENSSL_EXPORT void X509_get0_signature(const ASN1_BIT_STRING **out_sig, const X509_ALGOR **out_alg, const X509 *x509); // X509_get_signature_nid returns the NID corresponding to |x509|'s signature // algorithm, or |NID_undef| if the signature algorithm does not correspond to // a known NID. OPENSSL_EXPORT int X509_get_signature_nid(const X509 *x509); // i2d_X509_tbs serializes the TBSCertificate portion of |x509|, as described in // |i2d_SAMPLE|. // // This function preserves the original encoding of the TBSCertificate and may // not reflect modifications made to |x509|. It may be used to manually verify // the signature of an existing certificate. To generate certificates, use // |i2d_re_X509_tbs| instead. OPENSSL_EXPORT int i2d_X509_tbs(X509 *x509, unsigned char **outp); // X509_verify checks that |x509| has a valid signature by |pkey|. It returns // one if the signature is valid and zero otherwise. Note this function only // checks the signature itself and does not perform a full certificate // validation. OPENSSL_EXPORT int X509_verify(X509 *x509, EVP_PKEY *pkey); // X509_get1_email returns a newly-allocated list of NUL-terminated strings // containing all email addresses in |x509|'s subject and all rfc822name names // in |x509|'s subject alternative names. Email addresses which contain embedded // NUL bytes are skipped. // // On error, or if there are no such email addresses, it returns NULL. When // done, the caller must release the result with |X509_email_free|. OPENSSL_EXPORT STACK_OF(OPENSSL_STRING) *X509_get1_email(const X509 *x509); // X509_get1_ocsp returns a newly-allocated list of NUL-terminated strings // containing all OCSP URIs in |x509|. That is, it collects all URI // AccessDescriptions with an accessMethod of id-ad-ocsp in |x509|'s authority // information access extension. URIs which contain embedded NUL bytes are // skipped. // // On error, or if there are no such URIs, it returns NULL. When done, the // caller must release the result with |X509_email_free|. OPENSSL_EXPORT STACK_OF(OPENSSL_STRING) *X509_get1_ocsp(const X509 *x509); // X509_email_free releases memory associated with |sk|, including |sk| itself. // Each |OPENSSL_STRING| in |sk| must be a NUL-terminated string allocated with // |OPENSSL_malloc|. If |sk| is NULL, no action is taken. OPENSSL_EXPORT void X509_email_free(STACK_OF(OPENSSL_STRING) *sk); // X509_cmp compares |a| and |b| and returns zero if they are equal, a negative // number if |b| sorts after |a| and a negative number if |a| sorts after |b|. // The sort order implemented by this function is arbitrary and does not // reflect properties of the certificate such as expiry. Applications should not // rely on the order itself. // // TODO(https://crbug.com/boringssl/355): This function works by comparing a // cached hash of the encoded certificate. If |a| or |b| could not be // serialized, the current behavior is to compare all unencodable certificates // as equal. This function should only be used with |X509| objects that were // parsed from bytes and never mutated. // // TODO(https://crbug.com/boringssl/407): This function is const, but it is not // always thread-safe, notably if |a| and |b| were mutated. OPENSSL_EXPORT int X509_cmp(const X509 *a, const X509 *b); // Issuing certificates. // // An |X509| object may also represent an incomplete certificate. Callers may // construct empty |X509| objects, fill in fields individually, and finally sign // the result. The following functions may be used for this purpose. // X509_new returns a newly-allocated, empty |X509| object, or NULL on error. // This produces an incomplete certificate which may be filled in to issue a new // certificate. OPENSSL_EXPORT X509 *X509_new(void); // X509_set_version sets |x509|'s version to |version|, which should be one of // the |X509V_VERSION_*| constants. It returns one on success and zero on error. // // If unsure, use |X509_VERSION_3|. OPENSSL_EXPORT int X509_set_version(X509 *x509, long version); // X509_set_serialNumber sets |x509|'s serial number to |serial|. It returns one // on success and zero on error. OPENSSL_EXPORT int X509_set_serialNumber(X509 *x509, const ASN1_INTEGER *serial); // X509_set1_notBefore sets |x509|'s notBefore time to |tm|. It returns one on // success and zero on error. OPENSSL_EXPORT int X509_set1_notBefore(X509 *x509, const ASN1_TIME *tm); // X509_set1_notAfter sets |x509|'s notAfter time to |tm|. it returns one on // success and zero on error. OPENSSL_EXPORT int X509_set1_notAfter(X509 *x509, const ASN1_TIME *tm); // X509_getm_notBefore returns a mutable pointer to |x509|'s notBefore time. OPENSSL_EXPORT ASN1_TIME *X509_getm_notBefore(X509 *x509); // X509_getm_notAfter returns a mutable pointer to |x509|'s notAfter time. OPENSSL_EXPORT ASN1_TIME *X509_getm_notAfter(X509 *x); // X509_set_issuer_name sets |x509|'s issuer to a copy of |name|. It returns one // on success and zero on error. OPENSSL_EXPORT int X509_set_issuer_name(X509 *x509, X509_NAME *name); // X509_set_subject_name sets |x509|'s subject to a copy of |name|. It returns // one on success and zero on error. OPENSSL_EXPORT int X509_set_subject_name(X509 *x509, X509_NAME *name); // X509_set_pubkey sets |x509|'s public key to |pkey|. It returns one on success // and zero on error. This function does not take ownership of |pkey| and // internally copies and updates reference counts as needed. OPENSSL_EXPORT int X509_set_pubkey(X509 *x509, EVP_PKEY *pkey); // X509_delete_ext removes the extension in |x| at index |loc| and returns the // removed extension, or NULL if |loc| was out of bounds. If non-NULL, the // caller must release the result with |X509_EXTENSION_free|. OPENSSL_EXPORT X509_EXTENSION *X509_delete_ext(X509 *x, int loc); // X509_add_ext adds a copy of |ex| to |x|. It returns one on success and zero // on failure. The caller retains ownership of |ex| and can release it // independently of |x|. // // The new extension is inserted at index |loc|, shifting extensions to the // right. If |loc| is -1 or out of bounds, the new extension is appended to the // list. OPENSSL_EXPORT int X509_add_ext(X509 *x, const X509_EXTENSION *ex, int loc); // X509_add1_ext_i2d behaves like |X509V3_add1_i2d| but adds the extension to // |x|'s extension list. // // WARNING: This function may return zero or -1 on error. The caller must also // ensure |value|'s type matches |nid|. See the documentation for // |X509V3_add1_i2d| for details. OPENSSL_EXPORT int X509_add1_ext_i2d(X509 *x, int nid, void *value, int crit, unsigned long flags); // X509_sign signs |x509| with |pkey| and replaces the signature algorithm and // signature fields. It returns the length of the signature on success and zero // on error. This function uses digest algorithm |md|, or |pkey|'s default if // NULL. Other signing parameters use |pkey|'s defaults. To customize them, use // |X509_sign_ctx|. OPENSSL_EXPORT int X509_sign(X509 *x509, EVP_PKEY *pkey, const EVP_MD *md); // X509_sign_ctx signs |x509| with |ctx| and replaces the signature algorithm // and signature fields. It returns the length of the signature on success and // zero on error. The signature algorithm and parameters come from |ctx|, which // must have been initialized with |EVP_DigestSignInit|. The caller should // configure the corresponding |EVP_PKEY_CTX| before calling this function. // // On success or failure, this function mutates |ctx| and resets it to the empty // state. Caller should not rely on its contents after the function returns. OPENSSL_EXPORT int X509_sign_ctx(X509 *x509, EVP_MD_CTX *ctx); // i2d_re_X509_tbs serializes the TBSCertificate portion of |x509|, as described // in |i2d_SAMPLE|. // // This function re-encodes the TBSCertificate and may not reflect |x509|'s // original encoding. It may be used to manually generate a signature for a new // certificate. To verify certificates, use |i2d_X509_tbs| instead. OPENSSL_EXPORT int i2d_re_X509_tbs(X509 *x509, unsigned char **outp); // X509_set1_signature_algo sets |x509|'s signature algorithm to |algo| and // returns one on success or zero on error. It updates both the signature field // of the TBSCertificate structure, and the signatureAlgorithm field of the // Certificate. OPENSSL_EXPORT int X509_set1_signature_algo(X509 *x509, const X509_ALGOR *algo); // X509_set1_signature_value sets |x509|'s signature to a copy of the |sig_len| // bytes pointed by |sig|. It returns one on success and zero on error. // // Due to a specification error, X.509 certificates store signatures in ASN.1 // BIT STRINGs, but signature algorithms return byte strings rather than bit // strings. This function creates a BIT STRING containing a whole number of // bytes, with the bit order matching the DER encoding. This matches the // encoding used by all X.509 signature algorithms. OPENSSL_EXPORT int X509_set1_signature_value(X509 *x509, const uint8_t *sig, size_t sig_len); // Auxiliary certificate properties. // // |X509| objects optionally maintain auxiliary properties. These are not part // of the certificates themselves, and thus are not covered by signatures or // preserved by the standard serialization. They are used as inputs or outputs // to other functions in this library. // i2d_X509_AUX marshals |x509| as a DER-encoded X.509 Certificate (RFC 5280), // followed optionally by a separate, OpenSSL-specific structure with auxiliary // properties. It behaves as described in |i2d_SAMPLE|. // // Unlike similarly-named functions, this function does not output a single // ASN.1 element. Directly embedding the output in a larger ASN.1 structure will // not behave correctly. // // TODO(crbug.com/boringssl/407): |x509| should be const. OPENSSL_EXPORT int i2d_X509_AUX(X509 *x509, uint8_t **outp); // d2i_X509_AUX parses up to |length| bytes from |*inp| as a DER-encoded X.509 // Certificate (RFC 5280), followed optionally by a separate, OpenSSL-specific // structure with auxiliary properties. It behaves as described in |d2i_SAMPLE|. // // WARNING: Passing untrusted input to this function allows an attacker to // control auxiliary properties. This can allow unexpected influence over the // application if the certificate is used in a context that reads auxiliary // properties. This includes PKCS#12 serialization, trusted certificates in // |X509_STORE|, and callers of |X509_alias_get0| or |X509_keyid_get0|. // // Unlike similarly-named functions, this function does not parse a single // ASN.1 element. Trying to parse data directly embedded in a larger ASN.1 // structure will not behave correctly. OPENSSL_EXPORT X509 *d2i_X509_AUX(X509 **x509, const uint8_t **inp, long length); // X509_alias_set1 sets |x509|'s alias to |len| bytes from |name|. If |name| is // NULL, the alias is cleared instead. Aliases are not part of the certificate // itself and will not be serialized by |i2d_X509|. If |x509| is serialized in // a PKCS#12 structure, the friendlyName attribute (RFC 2985) will contain this // alias. OPENSSL_EXPORT int X509_alias_set1(X509 *x509, const uint8_t *name, ossl_ssize_t len); // X509_keyid_set1 sets |x509|'s key ID to |len| bytes from |id|. If |id| is // NULL, the key ID is cleared instead. Key IDs are not part of the certificate // itself and will not be serialized by |i2d_X509|. OPENSSL_EXPORT int X509_keyid_set1(X509 *x509, const uint8_t *id, ossl_ssize_t len); // X509_alias_get0 looks up |x509|'s alias. If found, it sets |*out_len| to the // alias's length and returns a pointer to a buffer containing the contents. If // not found, it outputs the empty string by returning NULL and setting // |*out_len| to zero. // // If |x509| was parsed from a PKCS#12 structure (see // |PKCS12_get_key_and_certs|), the alias will reflect the friendlyName // attribute (RFC 2985). // // WARNING: In OpenSSL, this function did not set |*out_len| when the alias was // missing. Callers that target both OpenSSL and BoringSSL should set the value // to zero before calling this function. OPENSSL_EXPORT const uint8_t *X509_alias_get0(const X509 *x509, int *out_len); // X509_keyid_get0 looks up |x509|'s key ID. If found, it sets |*out_len| to the // key ID's length and returns a pointer to a buffer containing the contents. If // not found, it outputs the empty string by returning NULL and setting // |*out_len| to zero. // // WARNING: In OpenSSL, this function did not set |*out_len| when the alias was // missing. Callers that target both OpenSSL and BoringSSL should set the value // to zero before calling this function. OPENSSL_EXPORT const uint8_t *X509_keyid_get0(const X509 *x509, int *out_len); // X509_add1_trust_object configures |x509| as a valid trust anchor for |obj|. // It returns one on success and zero on error. |obj| should be a certificate // usage OID associated with an |X509_TRUST_*| constant. // // See |X509_VERIFY_PARAM_set_trust| for details on how this value is evaluated. // Note this only takes effect if |x509| was configured as a trusted certificate // via |X509_STORE|. OPENSSL_EXPORT int X509_add1_trust_object(X509 *x509, const ASN1_OBJECT *obj); // X509_add1_reject_object configures |x509| as distrusted for |obj|. It returns // one on success and zero on error. |obj| should be a certificate usage OID // associated with an |X509_TRUST_*| constant. // // See |X509_VERIFY_PARAM_set_trust| for details on how this value is evaluated. // Note this only takes effect if |x509| was configured as a trusted certificate // via |X509_STORE|. OPENSSL_EXPORT int X509_add1_reject_object(X509 *x509, const ASN1_OBJECT *obj); // X509_trust_clear clears the list of OIDs for which |x509| is trusted. See // also |X509_add1_trust_object|. OPENSSL_EXPORT void X509_trust_clear(X509 *x509); // X509_reject_clear clears the list of OIDs for which |x509| is distrusted. See // also |X509_add1_reject_object|. OPENSSL_EXPORT void X509_reject_clear(X509 *x509); // Certificate revocation lists. // // An |X509_CRL| object represents an X.509 certificate revocation list (CRL), // defined in RFC 5280. A CRL is a signed list of certificates, the // revokedCertificates field, which are no longer considered valid. Each entry // of this list is represented with an |X509_REVOKED| object, documented in the // "CRL entries" section below. // // Although an |X509_CRL| is a mutable object, mutating an |X509_CRL| or its // |X509_REVOKED|s can give incorrect results. Callers typically obtain // |X509_CRL|s by parsing some input with |d2i_X509_CRL|, etc. Such objects // carry information such as the serialized TBSCertList and decoded extensions, // which will become inconsistent when mutated. // // Instead, mutation functions should only be used when issuing new CRLs, as // described in a later section. DEFINE_STACK_OF(X509_CRL) DEFINE_STACK_OF(X509_REVOKED) // X509_CRL_up_ref adds one to the reference count of |crl| and returns one. OPENSSL_EXPORT int X509_CRL_up_ref(X509_CRL *crl); // X509_CRL_dup returns a newly-allocated copy of |crl|, or NULL on error. This // function works by serializing the structure, so if |crl| is incomplete, it // may fail. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |crl| was // mutated. OPENSSL_EXPORT X509_CRL *X509_CRL_dup(X509_CRL *crl); // X509_CRL_free decrements |crl|'s reference count and, if zero, releases // memory associated with |crl|. OPENSSL_EXPORT void X509_CRL_free(X509_CRL *crl); // d2i_X509_CRL parses up to |len| bytes from |*inp| as a DER-encoded X.509 // CertificateList (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_CRL *d2i_X509_CRL(X509_CRL **out, const uint8_t **inp, long len); // i2d_X509_CRL marshals |crl| as a X.509 CertificateList (RFC 5280), as // described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |crl| was // mutated. OPENSSL_EXPORT int i2d_X509_CRL(X509_CRL *crl, uint8_t **outp); // X509_CRL_match compares |a| and |b| and returns zero if they are equal, a // negative number if |b| sorts after |a| and a negative number if |a| sorts // after |b|. The sort order implemented by this function is arbitrary and does // not reflect properties of the CRL such as expiry. Applications should not // rely on the order itself. // // TODO(https://crbug.com/boringssl/355): This function works by comparing a // cached hash of the encoded CRL. This cached hash is computed when the CRL is // parsed, but not when mutating or issuing CRLs. This function should only be // used with |X509_CRL| objects that were parsed from bytes and never mutated. OPENSSL_EXPORT int X509_CRL_match(const X509_CRL *a, const X509_CRL *b); #define X509_CRL_VERSION_1 0 #define X509_CRL_VERSION_2 1 // X509_CRL_get_version returns the numerical value of |crl|'s version, which // will be one of the |X509_CRL_VERSION_*| constants. OPENSSL_EXPORT long X509_CRL_get_version(const X509_CRL *crl); // X509_CRL_get0_lastUpdate returns |crl|'s thisUpdate time. The OpenSSL API // refers to this field as lastUpdate. OPENSSL_EXPORT const ASN1_TIME *X509_CRL_get0_lastUpdate(const X509_CRL *crl); // X509_CRL_get0_nextUpdate returns |crl|'s nextUpdate time, or NULL if |crl| // has none. OPENSSL_EXPORT const ASN1_TIME *X509_CRL_get0_nextUpdate(const X509_CRL *crl); // X509_CRL_get_issuer returns |crl|'s issuer name. Note this function is not // const-correct for legacy reasons. OPENSSL_EXPORT X509_NAME *X509_CRL_get_issuer(const X509_CRL *crl); // X509_CRL_get0_by_serial finds the entry in |crl| whose serial number is // |serial|. If found, it sets |*out| to the entry and returns one. If not // found, it returns zero. // // On success, |*out| continues to be owned by |crl|. It is an error to free or // otherwise modify |*out|. // // TODO(crbug.com/boringssl/600): Ideally |crl| would be const. It is broadly // thread-safe, but changes the order of entries in |crl|. It cannot be called // concurrently with |i2d_X509_CRL|. OPENSSL_EXPORT int X509_CRL_get0_by_serial(X509_CRL *crl, X509_REVOKED **out, const ASN1_INTEGER *serial); // X509_CRL_get0_by_cert behaves like |X509_CRL_get0_by_serial|, except it looks // for the entry that matches |x509|. OPENSSL_EXPORT int X509_CRL_get0_by_cert(X509_CRL *crl, X509_REVOKED **out, X509 *x509); // X509_CRL_get_REVOKED returns the list of revoked certificates in |crl|, or // NULL if |crl| omits it. // // TOOD(davidben): This function was originally a macro, without clear const // semantics. It should take a const input and give const output, but the latter // would break existing callers. For now, we match upstream. OPENSSL_EXPORT STACK_OF(X509_REVOKED) *X509_CRL_get_REVOKED(X509_CRL *crl); // X509_CRL_get0_extensions returns |crl|'s extension list, or NULL if |crl| // omits it. A CRL can have extensions on individual entries, which is // |X509_REVOKED_get0_extensions|, or on the overall CRL, which is this // function. OPENSSL_EXPORT const STACK_OF(X509_EXTENSION) *X509_CRL_get0_extensions( const X509_CRL *crl); // X509_CRL_get_ext_count returns the number of extensions in |x|. OPENSSL_EXPORT int X509_CRL_get_ext_count(const X509_CRL *x); // X509_CRL_get_ext_by_NID behaves like |X509v3_get_ext_by_NID| but searches for // extensions in |x|. OPENSSL_EXPORT int X509_CRL_get_ext_by_NID(const X509_CRL *x, int nid, int lastpos); // X509_CRL_get_ext_by_OBJ behaves like |X509v3_get_ext_by_OBJ| but searches for // extensions in |x|. OPENSSL_EXPORT int X509_CRL_get_ext_by_OBJ(const X509_CRL *x, const ASN1_OBJECT *obj, int lastpos); // X509_CRL_get_ext_by_critical behaves like |X509v3_get_ext_by_critical| but // searches for extensions in |x|. OPENSSL_EXPORT int X509_CRL_get_ext_by_critical(const X509_CRL *x, int crit, int lastpos); // X509_CRL_get_ext returns the extension in |x| at index |loc|, or NULL if // |loc| is out of bounds. This function returns a non-const pointer for OpenSSL // compatibility, but callers should not mutate the result. OPENSSL_EXPORT X509_EXTENSION *X509_CRL_get_ext(const X509_CRL *x, int loc); // X509_CRL_get_ext_d2i behaves like |X509V3_get_d2i| but looks for the // extension in |crl|'s extension list. // // WARNING: This function is difficult to use correctly. See the documentation // for |X509V3_get_d2i| for details. OPENSSL_EXPORT void *X509_CRL_get_ext_d2i(const X509_CRL *crl, int nid, int *out_critical, int *out_idx); // X509_CRL_get0_signature sets |*out_sig| and |*out_alg| to the signature and // signature algorithm of |crl|, respectively. Either output pointer may be NULL // to ignore the value. // // This function outputs the outer signature algorithm, not the one in the // TBSCertList. CRLs with mismatched signature algorithms will successfully // parse, but they will be rejected when verifying. OPENSSL_EXPORT void X509_CRL_get0_signature(const X509_CRL *crl, const ASN1_BIT_STRING **out_sig, const X509_ALGOR **out_alg); // X509_CRL_get_signature_nid returns the NID corresponding to |crl|'s signature // algorithm, or |NID_undef| if the signature algorithm does not correspond to // a known NID. OPENSSL_EXPORT int X509_CRL_get_signature_nid(const X509_CRL *crl); // i2d_X509_CRL_tbs serializes the TBSCertList portion of |crl|, as described in // |i2d_SAMPLE|. // // This function preserves the original encoding of the TBSCertList and may not // reflect modifications made to |crl|. It may be used to manually verify the // signature of an existing CRL. To generate CRLs, use |i2d_re_X509_CRL_tbs| // instead. OPENSSL_EXPORT int i2d_X509_CRL_tbs(X509_CRL *crl, unsigned char **outp); // X509_CRL_verify checks that |crl| has a valid signature by |pkey|. It returns // one if the signature is valid and zero otherwise. OPENSSL_EXPORT int X509_CRL_verify(X509_CRL *crl, EVP_PKEY *pkey); // Issuing certificate revocation lists. // // An |X509_CRL| object may also represent an incomplete CRL. Callers may // construct empty |X509_CRL| objects, fill in fields individually, and finally // sign the result. The following functions may be used for this purpose. // X509_CRL_new returns a newly-allocated, empty |X509_CRL| object, or NULL on // error. This object may be filled in and then signed to construct a CRL. OPENSSL_EXPORT X509_CRL *X509_CRL_new(void); // X509_CRL_set_version sets |crl|'s version to |version|, which should be one // of the |X509_CRL_VERSION_*| constants. It returns one on success and zero on // error. // // If unsure, use |X509_CRL_VERSION_2|. Note that, unlike certificates, CRL // versions are only defined up to v2. Callers should not use |X509_VERSION_3|. OPENSSL_EXPORT int X509_CRL_set_version(X509_CRL *crl, long version); // X509_CRL_set_issuer_name sets |crl|'s issuer to a copy of |name|. It returns // one on success and zero on error. OPENSSL_EXPORT int X509_CRL_set_issuer_name(X509_CRL *crl, X509_NAME *name); // X509_CRL_set1_lastUpdate sets |crl|'s thisUpdate time to |tm|. It returns one // on success and zero on error. The OpenSSL API refers to this field as // lastUpdate. OPENSSL_EXPORT int X509_CRL_set1_lastUpdate(X509_CRL *crl, const ASN1_TIME *tm); // X509_CRL_set1_nextUpdate sets |crl|'s nextUpdate time to |tm|. It returns one // on success and zero on error. OPENSSL_EXPORT int X509_CRL_set1_nextUpdate(X509_CRL *crl, const ASN1_TIME *tm); // X509_CRL_add0_revoked adds |rev| to |crl|. On success, it takes ownership of // |rev| and returns one. On error, it returns zero. If this function fails, the // caller retains ownership of |rev| and must release it when done. OPENSSL_EXPORT int X509_CRL_add0_revoked(X509_CRL *crl, X509_REVOKED *rev); // X509_CRL_sort sorts the entries in |crl| by serial number. It returns one on // success and zero on error. OPENSSL_EXPORT int X509_CRL_sort(X509_CRL *crl); // X509_CRL_delete_ext removes the extension in |x| at index |loc| and returns // the removed extension, or NULL if |loc| was out of bounds. If non-NULL, the // caller must release the result with |X509_EXTENSION_free|. OPENSSL_EXPORT X509_EXTENSION *X509_CRL_delete_ext(X509_CRL *x, int loc); // X509_CRL_add_ext adds a copy of |ex| to |x|. It returns one on success and // zero on failure. The caller retains ownership of |ex| and can release it // independently of |x|. // // The new extension is inserted at index |loc|, shifting extensions to the // right. If |loc| is -1 or out of bounds, the new extension is appended to the // list. OPENSSL_EXPORT int X509_CRL_add_ext(X509_CRL *x, const X509_EXTENSION *ex, int loc); // X509_CRL_add1_ext_i2d behaves like |X509V3_add1_i2d| but adds the extension // to |x|'s extension list. // // WARNING: This function may return zero or -1 on error. The caller must also // ensure |value|'s type matches |nid|. See the documentation for // |X509V3_add1_i2d| for details. OPENSSL_EXPORT int X509_CRL_add1_ext_i2d(X509_CRL *x, int nid, void *value, int crit, unsigned long flags); // X509_CRL_sign signs |crl| with |pkey| and replaces the signature algorithm // and signature fields. It returns the length of the signature on success and // zero on error. This function uses digest algorithm |md|, or |pkey|'s default // if NULL. Other signing parameters use |pkey|'s defaults. To customize them, // use |X509_CRL_sign_ctx|. OPENSSL_EXPORT int X509_CRL_sign(X509_CRL *crl, EVP_PKEY *pkey, const EVP_MD *md); // X509_CRL_sign_ctx signs |crl| with |ctx| and replaces the signature algorithm // and signature fields. It returns the length of the signature on success and // zero on error. The signature algorithm and parameters come from |ctx|, which // must have been initialized with |EVP_DigestSignInit|. The caller should // configure the corresponding |EVP_PKEY_CTX| before calling this function. // // On success or failure, this function mutates |ctx| and resets it to the empty // state. Caller should not rely on its contents after the function returns. OPENSSL_EXPORT int X509_CRL_sign_ctx(X509_CRL *crl, EVP_MD_CTX *ctx); // i2d_re_X509_CRL_tbs serializes the TBSCertList portion of |crl|, as described // in |i2d_SAMPLE|. // // This function re-encodes the TBSCertList and may not reflect |crl|'s original // encoding. It may be used to manually generate a signature for a new CRL. To // verify CRLs, use |i2d_X509_CRL_tbs| instead. OPENSSL_EXPORT int i2d_re_X509_CRL_tbs(X509_CRL *crl, unsigned char **outp); // X509_CRL_set1_signature_algo sets |crl|'s signature algorithm to |algo| and // returns one on success or zero on error. It updates both the signature field // of the TBSCertList structure, and the signatureAlgorithm field of the CRL. OPENSSL_EXPORT int X509_CRL_set1_signature_algo(X509_CRL *crl, const X509_ALGOR *algo); // X509_CRL_set1_signature_value sets |crl|'s signature to a copy of the // |sig_len| bytes pointed by |sig|. It returns one on success and zero on // error. // // Due to a specification error, X.509 CRLs store signatures in ASN.1 BIT // STRINGs, but signature algorithms return byte strings rather than bit // strings. This function creates a BIT STRING containing a whole number of // bytes, with the bit order matching the DER encoding. This matches the // encoding used by all X.509 signature algorithms. OPENSSL_EXPORT int X509_CRL_set1_signature_value(X509_CRL *crl, const uint8_t *sig, size_t sig_len); // CRL entries. // // Each entry of a CRL is represented as an |X509_REVOKED| object, which // describes a revoked certificate by serial number. // // When an |X509_REVOKED| is obtained from an |X509_CRL| object, it is an error // to mutate the object. Doing so may break |X509_CRL|'s and cause the library // to behave incorrectly. // X509_REVOKED_new returns a newly-allocated, empty |X509_REVOKED| object, or // NULL on allocation error. OPENSSL_EXPORT X509_REVOKED *X509_REVOKED_new(void); // X509_REVOKED_free releases memory associated with |rev|. OPENSSL_EXPORT void X509_REVOKED_free(X509_REVOKED *rev); // d2i_X509_REVOKED parses up to |len| bytes from |*inp| as a DER-encoded X.509 // CRL entry, as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_REVOKED *d2i_X509_REVOKED(X509_REVOKED **out, const uint8_t **inp, long len); // i2d_X509_REVOKED marshals |alg| as a DER-encoded X.509 CRL entry, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_REVOKED(const X509_REVOKED *alg, uint8_t **outp); // X509_REVOKED_dup returns a newly-allocated copy of |rev|, or NULL on error. // This function works by serializing the structure, so if |rev| is incomplete, // it may fail. OPENSSL_EXPORT X509_REVOKED *X509_REVOKED_dup(const X509_REVOKED *rev); // X509_REVOKED_get0_serialNumber returns the serial number of the certificate // revoked by |revoked|. OPENSSL_EXPORT const ASN1_INTEGER *X509_REVOKED_get0_serialNumber( const X509_REVOKED *revoked); // X509_REVOKED_set_serialNumber sets |revoked|'s serial number to |serial|. It // returns one on success or zero on error. OPENSSL_EXPORT int X509_REVOKED_set_serialNumber(X509_REVOKED *revoked, const ASN1_INTEGER *serial); // X509_REVOKED_get0_revocationDate returns the revocation time of the // certificate revoked by |revoked|. OPENSSL_EXPORT const ASN1_TIME *X509_REVOKED_get0_revocationDate( const X509_REVOKED *revoked); // X509_REVOKED_set_revocationDate sets |revoked|'s revocation time to |tm|. It // returns one on success or zero on error. OPENSSL_EXPORT int X509_REVOKED_set_revocationDate(X509_REVOKED *revoked, const ASN1_TIME *tm); // X509_REVOKED_get0_extensions returns |r|'s extensions list, or NULL if |r| // omits it. A CRL can have extensions on individual entries, which is this // function, or on the overall CRL, which is |X509_CRL_get0_extensions|. OPENSSL_EXPORT const STACK_OF(X509_EXTENSION) *X509_REVOKED_get0_extensions( const X509_REVOKED *r); // X509_REVOKED_get_ext_count returns the number of extensions in |x|. OPENSSL_EXPORT int X509_REVOKED_get_ext_count(const X509_REVOKED *x); // X509_REVOKED_get_ext_by_NID behaves like |X509v3_get_ext_by_NID| but searches // for extensions in |x|. OPENSSL_EXPORT int X509_REVOKED_get_ext_by_NID(const X509_REVOKED *x, int nid, int lastpos); // X509_REVOKED_get_ext_by_OBJ behaves like |X509v3_get_ext_by_OBJ| but searches // for extensions in |x|. OPENSSL_EXPORT int X509_REVOKED_get_ext_by_OBJ(const X509_REVOKED *x, const ASN1_OBJECT *obj, int lastpos); // X509_REVOKED_get_ext_by_critical behaves like |X509v3_get_ext_by_critical| // but searches for extensions in |x|. OPENSSL_EXPORT int X509_REVOKED_get_ext_by_critical(const X509_REVOKED *x, int crit, int lastpos); // X509_REVOKED_get_ext returns the extension in |x| at index |loc|, or NULL if // |loc| is out of bounds. This function returns a non-const pointer for OpenSSL // compatibility, but callers should not mutate the result. OPENSSL_EXPORT X509_EXTENSION *X509_REVOKED_get_ext(const X509_REVOKED *x, int loc); // X509_REVOKED_delete_ext removes the extension in |x| at index |loc| and // returns the removed extension, or NULL if |loc| was out of bounds. If // non-NULL, the caller must release the result with |X509_EXTENSION_free|. OPENSSL_EXPORT X509_EXTENSION *X509_REVOKED_delete_ext(X509_REVOKED *x, int loc); // X509_REVOKED_add_ext adds a copy of |ex| to |x|. It returns one on success // and zero on failure. The caller retains ownership of |ex| and can release it // independently of |x|. // // The new extension is inserted at index |loc|, shifting extensions to the // right. If |loc| is -1 or out of bounds, the new extension is appended to the // list. OPENSSL_EXPORT int X509_REVOKED_add_ext(X509_REVOKED *x, const X509_EXTENSION *ex, int loc); // X509_REVOKED_get_ext_d2i behaves like |X509V3_get_d2i| but looks for the // extension in |revoked|'s extension list. // // WARNING: This function is difficult to use correctly. See the documentation // for |X509V3_get_d2i| for details. OPENSSL_EXPORT void *X509_REVOKED_get_ext_d2i(const X509_REVOKED *revoked, int nid, int *out_critical, int *out_idx); // X509_REVOKED_add1_ext_i2d behaves like |X509V3_add1_i2d| but adds the // extension to |x|'s extension list. // // WARNING: This function may return zero or -1 on error. The caller must also // ensure |value|'s type matches |nid|. See the documentation for // |X509V3_add1_i2d| for details. OPENSSL_EXPORT int X509_REVOKED_add1_ext_i2d(X509_REVOKED *x, int nid, void *value, int crit, unsigned long flags); // Certificate requests. // // An |X509_REQ| represents a PKCS #10 certificate request (RFC 2986). These are // also referred to as certificate signing requests or CSRs. CSRs are a common // format used to request a certificate from a CA. // // Although an |X509_REQ| is a mutable object, mutating an |X509_REQ| can give // incorrect results. Callers typically obtain |X509_REQ|s by parsing some input // with |d2i_X509_REQ|, etc. Such objects carry information such as the // serialized CertificationRequestInfo, which will become inconsistent when // mutated. // // Instead, mutation functions should only be used when issuing new CRLs, as // described in a later section. // X509_REQ_dup returns a newly-allocated copy of |req|, or NULL on error. This // function works by serializing the structure, so if |req| is incomplete, it // may fail. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |req| was // mutated. OPENSSL_EXPORT X509_REQ *X509_REQ_dup(X509_REQ *req); // X509_REQ_free releases memory associated with |req|. OPENSSL_EXPORT void X509_REQ_free(X509_REQ *req); // d2i_X509_REQ parses up to |len| bytes from |*inp| as a DER-encoded // CertificateRequest (RFC 2986), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_REQ *d2i_X509_REQ(X509_REQ **out, const uint8_t **inp, long len); // i2d_X509_REQ marshals |req| as a CertificateRequest (RFC 2986), as described // in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |req| was // mutated. OPENSSL_EXPORT int i2d_X509_REQ(X509_REQ *req, uint8_t **outp); // X509_REQ_VERSION_1 is the version constant for |X509_REQ| objects. No other // versions are defined. #define X509_REQ_VERSION_1 0 // X509_REQ_get_version returns the numerical value of |req|'s version. This // will always be |X509_REQ_VERSION_1| for valid CSRs. For compatibility, // |d2i_X509_REQ| also accepts some invalid version numbers, in which case this // function may return other values. OPENSSL_EXPORT long X509_REQ_get_version(const X509_REQ *req); // X509_REQ_get_subject_name returns |req|'s subject name. Note this function is // not const-correct for legacy reasons. OPENSSL_EXPORT X509_NAME *X509_REQ_get_subject_name(const X509_REQ *req); // X509_REQ_get0_pubkey returns |req|'s public key as an |EVP_PKEY|, or NULL if // the public key was unsupported or could not be decoded. The |EVP_PKEY| is // cached in |req|, so callers must not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_REQ_get0_pubkey(const X509_REQ *req); // X509_REQ_get_pubkey behaves like |X509_REQ_get0_pubkey| but increments the // reference count on the |EVP_PKEY|. The caller must release the result with // |EVP_PKEY_free| when done. The |EVP_PKEY| is cached in |req|, so callers must // not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_REQ_get_pubkey(const X509_REQ *req); // X509_REQ_check_private_key returns one if |req|'s public key matches |pkey| // and zero otherwise. OPENSSL_EXPORT int X509_REQ_check_private_key(const X509_REQ *req, const EVP_PKEY *pkey); // X509_REQ_get_attr_count returns the number of attributes in |req|. OPENSSL_EXPORT int X509_REQ_get_attr_count(const X509_REQ *req); // X509_REQ_get_attr returns the attribute at index |loc| in |req|, or NULL if // out of bounds. OPENSSL_EXPORT X509_ATTRIBUTE *X509_REQ_get_attr(const X509_REQ *req, int loc); // X509_REQ_get_attr_by_NID returns the index of the attribute in |req| of type // |nid|, or a negative number if not found. If found, callers can use // |X509_REQ_get_attr| to look up the attribute by index. // // If |lastpos| is non-negative, it begins searching at |lastpos| + 1. Callers // can thus loop over all matching attributes by first passing -1 and then // passing the previously-returned value until no match is returned. OPENSSL_EXPORT int X509_REQ_get_attr_by_NID(const X509_REQ *req, int nid, int lastpos); // X509_REQ_get_attr_by_OBJ behaves like |X509_REQ_get_attr_by_NID| but looks // for attributes of type |obj|. OPENSSL_EXPORT int X509_REQ_get_attr_by_OBJ(const X509_REQ *req, const ASN1_OBJECT *obj, int lastpos); // X509_REQ_extension_nid returns one if |nid| is a supported CSR attribute type // for carrying extensions and zero otherwise. The supported types are // |NID_ext_req| (pkcs-9-at-extensionRequest from RFC 2985) and |NID_ms_ext_req| // (a Microsoft szOID_CERT_EXTENSIONS variant). OPENSSL_EXPORT int X509_REQ_extension_nid(int nid); // X509_REQ_get_extensions decodes the most preferred list of requested // extensions in |req| and returns a newly-allocated |STACK_OF(X509_EXTENSION)| // containing the result. It returns NULL on error, or if |req| did not request // extensions. // // CSRs do not store extensions directly. Instead there are attribute types // which are defined to hold extensions. See |X509_REQ_extension_nid|. This // function supports both pkcs-9-at-extensionRequest from RFC 2985 and the // Microsoft szOID_CERT_EXTENSIONS variant. If both are present, // pkcs-9-at-extensionRequest is preferred. OPENSSL_EXPORT STACK_OF(X509_EXTENSION) *X509_REQ_get_extensions( const X509_REQ *req); // X509_REQ_get0_signature sets |*out_sig| and |*out_alg| to the signature and // signature algorithm of |req|, respectively. Either output pointer may be NULL // to ignore the value. OPENSSL_EXPORT void X509_REQ_get0_signature(const X509_REQ *req, const ASN1_BIT_STRING **out_sig, const X509_ALGOR **out_alg); // X509_REQ_get_signature_nid returns the NID corresponding to |req|'s signature // algorithm, or |NID_undef| if the signature algorithm does not correspond to // a known NID. OPENSSL_EXPORT int X509_REQ_get_signature_nid(const X509_REQ *req); // X509_REQ_verify checks that |req| has a valid signature by |pkey|. It returns // one if the signature is valid and zero otherwise. OPENSSL_EXPORT int X509_REQ_verify(X509_REQ *req, EVP_PKEY *pkey); // X509_REQ_get1_email returns a newly-allocated list of NUL-terminated strings // containing all email addresses in |req|'s subject and all rfc822name names // in |req|'s subject alternative names. The subject alternative names extension // is extracted from the result of |X509_REQ_get_extensions|. Email addresses // which contain embedded NUL bytes are skipped. // // On error, or if there are no such email addresses, it returns NULL. When // done, the caller must release the result with |X509_email_free|. OPENSSL_EXPORT STACK_OF(OPENSSL_STRING) *X509_REQ_get1_email( const X509_REQ *req); // Issuing certificate requests. // // An |X509_REQ| object may also represent an incomplete CSR. Callers may // construct empty |X509_REQ| objects, fill in fields individually, and finally // sign the result. The following functions may be used for this purpose. // X509_REQ_new returns a newly-allocated, empty |X509_REQ| object, or NULL on // error. This object may be filled in and then signed to construct a CSR. OPENSSL_EXPORT X509_REQ *X509_REQ_new(void); // X509_REQ_set_version sets |req|'s version to |version|, which should be // |X509_REQ_VERSION_1|. It returns one on success and zero on error. // // The only defined CSR version is |X509_REQ_VERSION_1|, so there is no need to // call this function. OPENSSL_EXPORT int X509_REQ_set_version(X509_REQ *req, long version); // X509_REQ_set_subject_name sets |req|'s subject to a copy of |name|. It // returns one on success and zero on error. OPENSSL_EXPORT int X509_REQ_set_subject_name(X509_REQ *req, X509_NAME *name); // X509_REQ_set_pubkey sets |req|'s public key to |pkey|. It returns one on // success and zero on error. This function does not take ownership of |pkey| // and internally copies and updates reference counts as needed. OPENSSL_EXPORT int X509_REQ_set_pubkey(X509_REQ *req, EVP_PKEY *pkey); // X509_REQ_delete_attr removes the attribute at index |loc| in |req|. It // returns the removed attribute to the caller, or NULL if |loc| was out of // bounds. If non-NULL, the caller must release the result with // |X509_ATTRIBUTE_free| when done. It is also safe, but not necessary, to call // |X509_ATTRIBUTE_free| if the result is NULL. OPENSSL_EXPORT X509_ATTRIBUTE *X509_REQ_delete_attr(X509_REQ *req, int loc); // X509_REQ_add1_attr appends a copy of |attr| to |req|'s list of attributes. It // returns one on success and zero on error. OPENSSL_EXPORT int X509_REQ_add1_attr(X509_REQ *req, const X509_ATTRIBUTE *attr); // X509_REQ_add1_attr_by_OBJ appends a new attribute to |req| with type |obj|. // It returns one on success and zero on error. The value is determined by // |X509_ATTRIBUTE_set1_data|. // // WARNING: The interpretation of |attrtype|, |data|, and |len| is complex and // error-prone. See |X509_ATTRIBUTE_set1_data| for details. OPENSSL_EXPORT int X509_REQ_add1_attr_by_OBJ(X509_REQ *req, const ASN1_OBJECT *obj, int attrtype, const unsigned char *data, int len); // X509_REQ_add1_attr_by_NID behaves like |X509_REQ_add1_attr_by_OBJ| except the // attribute type is determined by |nid|. OPENSSL_EXPORT int X509_REQ_add1_attr_by_NID(X509_REQ *req, int nid, int attrtype, const unsigned char *data, int len); // X509_REQ_add1_attr_by_txt behaves like |X509_REQ_add1_attr_by_OBJ| except the // attribute type is determined by calling |OBJ_txt2obj| with |attrname|. OPENSSL_EXPORT int X509_REQ_add1_attr_by_txt(X509_REQ *req, const char *attrname, int attrtype, const unsigned char *data, int len); // X509_REQ_add_extensions_nid adds an attribute to |req| of type |nid|, to // request the certificate extensions in |exts|. It returns one on success and // zero on error. |nid| should be |NID_ext_req| or |NID_ms_ext_req|. OPENSSL_EXPORT int X509_REQ_add_extensions_nid( X509_REQ *req, const STACK_OF(X509_EXTENSION) *exts, int nid); // X509_REQ_add_extensions behaves like |X509_REQ_add_extensions_nid|, using the // standard |NID_ext_req| for the attribute type. OPENSSL_EXPORT int X509_REQ_add_extensions( X509_REQ *req, const STACK_OF(X509_EXTENSION) *exts); // X509_REQ_sign signs |req| with |pkey| and replaces the signature algorithm // and signature fields. It returns the length of the signature on success and // zero on error. This function uses digest algorithm |md|, or |pkey|'s default // if NULL. Other signing parameters use |pkey|'s defaults. To customize them, // use |X509_REQ_sign_ctx|. OPENSSL_EXPORT int X509_REQ_sign(X509_REQ *req, EVP_PKEY *pkey, const EVP_MD *md); // X509_REQ_sign_ctx signs |req| with |ctx| and replaces the signature algorithm // and signature fields. It returns the length of the signature on success and // zero on error. The signature algorithm and parameters come from |ctx|, which // must have been initialized with |EVP_DigestSignInit|. The caller should // configure the corresponding |EVP_PKEY_CTX| before calling this function. // // On success or failure, this function mutates |ctx| and resets it to the empty // state. Caller should not rely on its contents after the function returns. OPENSSL_EXPORT int X509_REQ_sign_ctx(X509_REQ *req, EVP_MD_CTX *ctx); // i2d_re_X509_REQ_tbs serializes the CertificationRequestInfo (see RFC 2986) // portion of |req|, as described in |i2d_SAMPLE|. // // This function re-encodes the CertificationRequestInfo and may not reflect // |req|'s original encoding. It may be used to manually generate a signature // for a new certificate request. OPENSSL_EXPORT int i2d_re_X509_REQ_tbs(X509_REQ *req, uint8_t **outp); // X509_REQ_set1_signature_algo sets |req|'s signature algorithm to |algo| and // returns one on success or zero on error. OPENSSL_EXPORT int X509_REQ_set1_signature_algo(X509_REQ *req, const X509_ALGOR *algo); // X509_REQ_set1_signature_value sets |req|'s signature to a copy of the // |sig_len| bytes pointed by |sig|. It returns one on success and zero on // error. // // Due to a specification error, PKCS#10 certificate requests store signatures // in ASN.1 BIT STRINGs, but signature algorithms return byte strings rather // than bit strings. This function creates a BIT STRING containing a whole // number of bytes, with the bit order matching the DER encoding. This matches // the encoding used by all X.509 signature algorithms. OPENSSL_EXPORT int X509_REQ_set1_signature_value(X509_REQ *req, const uint8_t *sig, size_t sig_len); // Names. // // An |X509_NAME| represents an X.509 Name structure (RFC 5280). X.509 names are // a complex, hierarchical structure over a collection of attributes. Each name // is sequence of relative distinguished names (RDNs), decreasing in // specificity. For example, the first RDN may specify the country, while the // next RDN may specify a locality. Each RDN is, itself, a set of attributes. // Having more than one attribute in an RDN is uncommon, but possible. Within an // RDN, attributes have the same level in specificity. Attribute types are // OBJECT IDENTIFIERs. This determines the ASN.1 type of the value, which is // commonly a string but may be other types. // // The |X509_NAME| representation flattens this two-level structure into a // single list of attributes. Each attribute is stored in an |X509_NAME_ENTRY|, // with also maintains the index of the RDN it is part of, accessible via // |X509_NAME_ENTRY_set|. This can be used to recover the two-level structure. // // X.509 names are largely vestigial. Historically, DNS names were parsed out of // the subject's common name attribute, but this is deprecated and has since // moved to the subject alternative name extension. In modern usage, X.509 names // are primarily opaque identifiers to link a certificate with its issuer. DEFINE_STACK_OF(X509_NAME_ENTRY) DEFINE_STACK_OF(X509_NAME) // X509_NAME is an |ASN1_ITEM| whose ASN.1 type is X.509 Name (RFC 5280) and C // type is |X509_NAME*|. DECLARE_ASN1_ITEM(X509_NAME) // X509_NAME_new returns a new, empty |X509_NAME|, or NULL on error. OPENSSL_EXPORT X509_NAME *X509_NAME_new(void); // X509_NAME_free releases memory associated with |name|. OPENSSL_EXPORT void X509_NAME_free(X509_NAME *name); // d2i_X509_NAME parses up to |len| bytes from |*inp| as a DER-encoded X.509 // Name (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_NAME *d2i_X509_NAME(X509_NAME **out, const uint8_t **inp, long len); // i2d_X509_NAME marshals |in| as a DER-encoded X.509 Name (RFC 5280), as // described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |in| was // mutated. OPENSSL_EXPORT int i2d_X509_NAME(X509_NAME *in, uint8_t **outp); // X509_NAME_dup returns a newly-allocated copy of |name|, or NULL on error. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |name| was // mutated. OPENSSL_EXPORT X509_NAME *X509_NAME_dup(X509_NAME *name); // X509_NAME_cmp compares |a| and |b|'s canonicalized forms. It returns zero if // they are equal, one if |a| sorts after |b|, -1 if |b| sorts after |a|, and -2 // on error. // // TODO(https://crbug.com/boringssl/407): This function is const, but it is not // always thread-safe, notably if |name| was mutated. // // TODO(https://crbug.com/boringssl/355): The -2 return is very inconvenient to // pass to a sorting function. Can we make this infallible? In the meantime, // prefer to use this function only for equality checks rather than comparisons. // Although even the library itself passes this to a sorting function. OPENSSL_EXPORT int X509_NAME_cmp(const X509_NAME *a, const X509_NAME *b); // X509_NAME_get0_der marshals |name| as a DER-encoded X.509 Name (RFC 5280). On // success, it returns one and sets |*out_der| and |*out_der_len| to a buffer // containing the result. Otherwise, it returns zero. |*out_der| is owned by // |name| and must not be freed by the caller. It is invalidated after |name| is // mutated or freed. // // Avoid this function and prefer |i2d_X509_NAME|. It is one of the reasons // |X509_NAME| functions, including this one, are not consistently thread-safe // or const-correct. Depending on the resolution of // https://crbug.com/boringssl/407, this function may be removed or cause poor // performance. OPENSSL_EXPORT int X509_NAME_get0_der(X509_NAME *name, const uint8_t **out_der, size_t *out_der_len); // X509_NAME_set makes a copy of |name|. On success, it frees |*xn|, sets |*xn| // to the copy, and returns one. Otherwise, it returns zero. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |name| was // mutated. OPENSSL_EXPORT int X509_NAME_set(X509_NAME **xn, X509_NAME *name); // X509_NAME_entry_count returns the number of entries in |name|. OPENSSL_EXPORT int X509_NAME_entry_count(const X509_NAME *name); // X509_NAME_get_index_by_NID returns the zero-based index of the first // attribute in |name| with type |nid|, or -1 if there is none. |nid| should be // one of the |NID_*| constants. If |lastpos| is non-negative, it begins // searching at |lastpos+1|. To search all attributes, pass in -1, not zero. // // Indices from this function refer to |X509_NAME|'s flattened representation. OPENSSL_EXPORT int X509_NAME_get_index_by_NID(const X509_NAME *name, int nid, int lastpos); // X509_NAME_get_index_by_OBJ behaves like |X509_NAME_get_index_by_NID| but // looks for attributes with type |obj|. OPENSSL_EXPORT int X509_NAME_get_index_by_OBJ(const X509_NAME *name, const ASN1_OBJECT *obj, int lastpos); // X509_NAME_get_entry returns the attribute in |name| at index |loc|, or NULL // if |loc| is out of range. |loc| is interpreted using |X509_NAME|'s flattened // representation. This function returns a non-const pointer for OpenSSL // compatibility, but callers should not mutate the result. Doing so will break // internal invariants in the library. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_get_entry(const X509_NAME *name, int loc); // X509_NAME_delete_entry removes and returns the attribute in |name| at index // |loc|, or NULL if |loc| is out of range. |loc| is interpreted using // |X509_NAME|'s flattened representation. If the attribute is found, the caller // is responsible for releasing the result with |X509_NAME_ENTRY_free|. // // This function will internally update RDN indices (see |X509_NAME_ENTRY_set|) // so they continue to be consecutive. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc); // X509_NAME_add_entry adds a copy of |entry| to |name| and returns one on // success or zero on error. If |loc| is -1, the entry is appended to |name|. // Otherwise, it is inserted at index |loc|. If |set| is -1, the entry is added // to the previous entry's RDN. If it is 0, the entry becomes a singleton RDN. // If 1, it is added to next entry's RDN. // // This function will internally update RDN indices (see |X509_NAME_ENTRY_set|) // so they continue to be consecutive. OPENSSL_EXPORT int X509_NAME_add_entry(X509_NAME *name, const X509_NAME_ENTRY *entry, int loc, int set); // X509_NAME_add_entry_by_OBJ adds a new entry to |name| and returns one on // success or zero on error. The entry's attribute type is |obj|. The entry's // attribute value is determined by |type|, |bytes|, and |len|, as in // |X509_NAME_ENTRY_set_data|. The entry's position is determined by |loc| and // |set| as in |X509_NAME_add_entry|. OPENSSL_EXPORT int X509_NAME_add_entry_by_OBJ(X509_NAME *name, const ASN1_OBJECT *obj, int type, const uint8_t *bytes, ossl_ssize_t len, int loc, int set); // X509_NAME_add_entry_by_NID behaves like |X509_NAME_add_entry_by_OBJ| but sets // the entry's attribute type to |nid|, which should be one of the |NID_*| // constants. OPENSSL_EXPORT int X509_NAME_add_entry_by_NID(X509_NAME *name, int nid, int type, const uint8_t *bytes, ossl_ssize_t len, int loc, int set); // X509_NAME_add_entry_by_txt behaves like |X509_NAME_add_entry_by_OBJ| but sets // the entry's attribute type to |field|, which is passed to |OBJ_txt2obj|. OPENSSL_EXPORT int X509_NAME_add_entry_by_txt(X509_NAME *name, const char *field, int type, const uint8_t *bytes, ossl_ssize_t len, int loc, int set); // X509_NAME_ENTRY_new returns a new, empty |X509_NAME_ENTRY|, or NULL on error. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_new(void); // X509_NAME_ENTRY_free releases memory associated with |entry|. OPENSSL_EXPORT void X509_NAME_ENTRY_free(X509_NAME_ENTRY *entry); // X509_NAME_ENTRY_dup returns a newly-allocated copy of |entry|, or NULL on // error. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_dup( const X509_NAME_ENTRY *entry); // X509_NAME_ENTRY_get_object returns |entry|'s attribute type. This function // returns a non-const pointer for OpenSSL compatibility, but callers should not // mutate the result. Doing so will break internal invariants in the library. OPENSSL_EXPORT ASN1_OBJECT *X509_NAME_ENTRY_get_object( const X509_NAME_ENTRY *entry); // X509_NAME_ENTRY_set_object sets |entry|'s attribute type to |obj|. It returns // one on success and zero on error. OPENSSL_EXPORT int X509_NAME_ENTRY_set_object(X509_NAME_ENTRY *entry, const ASN1_OBJECT *obj); // X509_NAME_ENTRY_get_data returns |entry|'s attribute value, represented as an // |ASN1_STRING|. This value may have any ASN.1 type, so callers must check the // type before interpreting the contents. This function returns a non-const // pointer for OpenSSL compatibility, but callers should not mutate the result. // Doing so will break internal invariants in the library. // // TODO(https://crbug.com/boringssl/412): Although the spec says any ASN.1 type // is allowed, we currently only allow an ad-hoc set of types. Additionally, it // is unclear if some types can even be represented by this function. OPENSSL_EXPORT ASN1_STRING *X509_NAME_ENTRY_get_data( const X509_NAME_ENTRY *entry); // X509_NAME_ENTRY_set_data sets |entry|'s value to |len| bytes from |bytes|. It // returns one on success and zero on error. If |len| is -1, |bytes| must be a // NUL-terminated C string and the length is determined by |strlen|. |bytes| is // converted to an ASN.1 type as follows: // // If |type| is a |MBSTRING_*| constant, the value is an ASN.1 string. The // string is determined by decoding |bytes| in the encoding specified by |type|, // and then re-encoding it in a form appropriate for |entry|'s attribute type. // See |ASN1_STRING_set_by_NID| for details. // // Otherwise, the value is an |ASN1_STRING| with type |type| and value |bytes|. // See |ASN1_STRING| for how to format ASN.1 types as an |ASN1_STRING|. If // |type| is |V_ASN1_UNDEF| the previous |ASN1_STRING| type is reused. OPENSSL_EXPORT int X509_NAME_ENTRY_set_data(X509_NAME_ENTRY *entry, int type, const uint8_t *bytes, ossl_ssize_t len); // X509_NAME_ENTRY_set returns the zero-based index of the RDN which contains // |entry|. Consecutive entries with the same index are part of the same RDN. OPENSSL_EXPORT int X509_NAME_ENTRY_set(const X509_NAME_ENTRY *entry); // X509_NAME_ENTRY_create_by_OBJ creates a new |X509_NAME_ENTRY| with attribute // type |obj|. The attribute value is determined from |type|, |bytes|, and |len| // as in |X509_NAME_ENTRY_set_data|. It returns the |X509_NAME_ENTRY| on success // and NULL on error. // // If |out| is non-NULL and |*out| is NULL, it additionally sets |*out| to the // result on success. If both |out| and |*out| are non-NULL, it updates the // object at |*out| instead of allocating a new one. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ( X509_NAME_ENTRY **out, const ASN1_OBJECT *obj, int type, const uint8_t *bytes, ossl_ssize_t len); // X509_NAME_ENTRY_create_by_NID behaves like |X509_NAME_ENTRY_create_by_OBJ| // except the attribute type is |nid|, which should be one of the |NID_*| // constants. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID( X509_NAME_ENTRY **out, int nid, int type, const uint8_t *bytes, ossl_ssize_t len); // X509_NAME_ENTRY_create_by_txt behaves like |X509_NAME_ENTRY_create_by_OBJ| // except the attribute type is |field|, which is passed to |OBJ_txt2obj|. OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt( X509_NAME_ENTRY **out, const char *field, int type, const uint8_t *bytes, ossl_ssize_t len); // Public keys. // // X.509 encodes public keys as SubjectPublicKeyInfo (RFC 5280), sometimes // referred to as SPKI. These are represented in this library by |X509_PUBKEY|. // X509_PUBKEY_new returns a newly-allocated, empty |X509_PUBKEY| object, or // NULL on error. OPENSSL_EXPORT X509_PUBKEY *X509_PUBKEY_new(void); // X509_PUBKEY_free releases memory associated with |key|. OPENSSL_EXPORT void X509_PUBKEY_free(X509_PUBKEY *key); // d2i_X509_PUBKEY parses up to |len| bytes from |*inp| as a DER-encoded // SubjectPublicKeyInfo, as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_PUBKEY *d2i_X509_PUBKEY(X509_PUBKEY **out, const uint8_t **inp, long len); // i2d_X509_PUBKEY marshals |key| as a DER-encoded SubjectPublicKeyInfo, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_PUBKEY(const X509_PUBKEY *key, uint8_t **outp); // X509_PUBKEY_set serializes |pkey| into a newly-allocated |X509_PUBKEY| // structure. On success, it frees |*x| if non-NULL, then sets |*x| to the new // object, and returns one. Otherwise, it returns zero. OPENSSL_EXPORT int X509_PUBKEY_set(X509_PUBKEY **x, EVP_PKEY *pkey); // X509_PUBKEY_get0 returns |key| as an |EVP_PKEY|, or NULL if |key| either // could not be parsed or is an unrecognized algorithm. The |EVP_PKEY| is cached // in |key|, so callers must not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_PUBKEY_get0(const X509_PUBKEY *key); // X509_PUBKEY_get behaves like |X509_PUBKEY_get0| but increments the reference // count on the |EVP_PKEY|. The caller must release the result with // |EVP_PKEY_free| when done. The |EVP_PKEY| is cached in |key|, so callers must // not mutate the result. OPENSSL_EXPORT EVP_PKEY *X509_PUBKEY_get(const X509_PUBKEY *key); // X509_PUBKEY_set0_param sets |pub| to a key with AlgorithmIdentifier // determined by |obj|, |param_type|, and |param_value|, and an encoded // public key of |key|. On success, it gives |pub| ownership of all the other // parameters and returns one. Otherwise, it returns zero. |key| must have been // allocated by |OPENSSL_malloc|. |obj| and, if applicable, |param_value| must // not be freed after a successful call, and must have been allocated in a // manner compatible with |ASN1_OBJECT_free| or |ASN1_STRING_free|. // // |obj|, |param_type|, and |param_value| are interpreted as in // |X509_ALGOR_set0|. See |X509_ALGOR_set0| for details. OPENSSL_EXPORT int X509_PUBKEY_set0_param(X509_PUBKEY *pub, ASN1_OBJECT *obj, int param_type, void *param_value, uint8_t *key, int key_len); // X509_PUBKEY_get0_param outputs fields of |pub| and returns one. If |out_obj| // is not NULL, it sets |*out_obj| to AlgorithmIdentifier's OID. If |out_key| // is not NULL, it sets |*out_key| and |*out_key_len| to the encoded public key. // If |out_alg| is not NULL, it sets |*out_alg| to the AlgorithmIdentifier. // // All pointers outputted by this function are internal to |pub| and must not be // freed by the caller. Additionally, although some outputs are non-const, // callers must not mutate the resulting objects. // // Note: X.509 SubjectPublicKeyInfo structures store the encoded public key as a // BIT STRING. |*out_key| and |*out_key_len| will silently pad the key with zero // bits if |pub| did not contain a whole number of bytes. Use // |X509_PUBKEY_get0_public_key| to preserve this information. OPENSSL_EXPORT int X509_PUBKEY_get0_param(ASN1_OBJECT **out_obj, const uint8_t **out_key, int *out_key_len, X509_ALGOR **out_alg, X509_PUBKEY *pub); // X509_PUBKEY_get0_public_key returns |pub|'s encoded public key. OPENSSL_EXPORT const ASN1_BIT_STRING *X509_PUBKEY_get0_public_key( const X509_PUBKEY *pub); // Extensions. // // X.509 certificates and CRLs may contain a list of extensions (RFC 5280). // Extensions have a type, specified by an object identifier (|ASN1_OBJECT|) and // a byte string value, which should a DER-encoded structure whose type is // determined by the extension type. This library represents extensions with the // |X509_EXTENSION| type. // X509_EXTENSION is an |ASN1_ITEM| whose ASN.1 type is X.509 Extension (RFC // 5280) and C type is |X509_EXTENSION*|. DECLARE_ASN1_ITEM(X509_EXTENSION) // X509_EXTENSION_new returns a newly-allocated, empty |X509_EXTENSION| object // or NULL on error. OPENSSL_EXPORT X509_EXTENSION *X509_EXTENSION_new(void); // X509_EXTENSION_free releases memory associated with |ex|. OPENSSL_EXPORT void X509_EXTENSION_free(X509_EXTENSION *ex); // d2i_X509_EXTENSION parses up to |len| bytes from |*inp| as a DER-encoded // X.509 Extension (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_EXTENSION *d2i_X509_EXTENSION(X509_EXTENSION **out, const uint8_t **inp, long len); // i2d_X509_EXTENSION marshals |ex| as a DER-encoded X.509 Extension (RFC // 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_EXTENSION(const X509_EXTENSION *ex, uint8_t **outp); // X509_EXTENSION_dup returns a newly-allocated copy of |ex|, or NULL on error. // This function works by serializing the structure, so if |ex| is incomplete, // it may fail. OPENSSL_EXPORT X509_EXTENSION *X509_EXTENSION_dup(const X509_EXTENSION *ex); // X509_EXTENSION_create_by_NID creates a new |X509_EXTENSION| with type |nid|, // value |data|, and critical bit |crit|. It returns an |X509_EXTENSION| on // success, and NULL on error. |nid| should be a |NID_*| constant. // // If |ex| and |*ex| are both non-NULL, |*ex| is used to hold the result, // otherwise a new object is allocated. If |ex| is non-NULL and |*ex| is NULL, // the function sets |*ex| to point to the newly allocated result, in addition // to returning the result. OPENSSL_EXPORT X509_EXTENSION *X509_EXTENSION_create_by_NID( X509_EXTENSION **ex, int nid, int crit, const ASN1_OCTET_STRING *data); // X509_EXTENSION_create_by_OBJ behaves like |X509_EXTENSION_create_by_NID|, but // the extension type is determined by an |ASN1_OBJECT|. OPENSSL_EXPORT X509_EXTENSION *X509_EXTENSION_create_by_OBJ( X509_EXTENSION **ex, const ASN1_OBJECT *obj, int crit, const ASN1_OCTET_STRING *data); // X509_EXTENSION_get_object returns |ex|'s extension type. This function // returns a non-const pointer for OpenSSL compatibility, but callers should not // mutate the result. OPENSSL_EXPORT ASN1_OBJECT *X509_EXTENSION_get_object(const X509_EXTENSION *ex); // X509_EXTENSION_get_data returns |ne|'s extension value. This function returns // a non-const pointer for OpenSSL compatibility, but callers should not mutate // the result. OPENSSL_EXPORT ASN1_OCTET_STRING *X509_EXTENSION_get_data( const X509_EXTENSION *ne); // X509_EXTENSION_get_critical returns one if |ex| is critical and zero // otherwise. OPENSSL_EXPORT int X509_EXTENSION_get_critical(const X509_EXTENSION *ex); // X509_EXTENSION_set_object sets |ex|'s extension type to |obj|. It returns one // on success and zero on error. OPENSSL_EXPORT int X509_EXTENSION_set_object(X509_EXTENSION *ex, const ASN1_OBJECT *obj); // X509_EXTENSION_set_critical sets |ex| to critical if |crit| is non-zero and // to non-critical if |crit| is zero. OPENSSL_EXPORT int X509_EXTENSION_set_critical(X509_EXTENSION *ex, int crit); // X509_EXTENSION_set_data set's |ex|'s extension value to a copy of |data|. It // returns one on success and zero on error. OPENSSL_EXPORT int X509_EXTENSION_set_data(X509_EXTENSION *ex, const ASN1_OCTET_STRING *data); // Extension lists. // // The following functions manipulate lists of extensions. Most of them have // corresponding functions on the containing |X509|, |X509_CRL|, or // |X509_REVOKED|. DEFINE_STACK_OF(X509_EXTENSION) typedef STACK_OF(X509_EXTENSION) X509_EXTENSIONS; // d2i_X509_EXTENSIONS parses up to |len| bytes from |*inp| as a DER-encoded // SEQUENCE OF Extension (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_EXTENSIONS *d2i_X509_EXTENSIONS(X509_EXTENSIONS **out, const uint8_t **inp, long len); // i2d_X509_EXTENSIONS marshals |alg| as a DER-encoded SEQUENCE OF Extension // (RFC 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_EXTENSIONS(const X509_EXTENSIONS *alg, uint8_t **outp); // X509v3_get_ext_count returns the number of extensions in |x|. OPENSSL_EXPORT int X509v3_get_ext_count(const STACK_OF(X509_EXTENSION) *x); // X509v3_get_ext_by_NID returns the index of the first extension in |x| with // type |nid|, or a negative number if not found. If found, callers can use // |X509v3_get_ext| to look up the extension by index. // // If |lastpos| is non-negative, it begins searching at |lastpos| + 1. Callers // can thus loop over all matching extensions by first passing -1 and then // passing the previously-returned value until no match is returned. OPENSSL_EXPORT int X509v3_get_ext_by_NID(const STACK_OF(X509_EXTENSION) *x, int nid, int lastpos); // X509v3_get_ext_by_OBJ behaves like |X509v3_get_ext_by_NID| but looks for // extensions matching |obj|. OPENSSL_EXPORT int X509v3_get_ext_by_OBJ(const STACK_OF(X509_EXTENSION) *x, const ASN1_OBJECT *obj, int lastpos); // X509v3_get_ext_by_critical returns the index of the first extension in |x| // whose critical bit matches |crit|, or a negative number if no such extension // was found. // // If |lastpos| is non-negative, it begins searching at |lastpos| + 1. Callers // can thus loop over all matching extensions by first passing -1 and then // passing the previously-returned value until no match is returned. OPENSSL_EXPORT int X509v3_get_ext_by_critical(const STACK_OF(X509_EXTENSION) *x, int crit, int lastpos); // X509v3_get_ext returns the extension in |x| at index |loc|, or NULL if |loc| // is out of bounds. This function returns a non-const pointer for OpenSSL // compatibility, but callers should not mutate the result. OPENSSL_EXPORT X509_EXTENSION *X509v3_get_ext(const STACK_OF(X509_EXTENSION) *x, int loc); // X509v3_delete_ext removes the extension in |x| at index |loc| and returns the // removed extension, or NULL if |loc| was out of bounds. If an extension was // returned, the caller must release it with |X509_EXTENSION_free|. OPENSSL_EXPORT X509_EXTENSION *X509v3_delete_ext(STACK_OF(X509_EXTENSION) *x, int loc); // X509v3_add_ext adds a copy of |ex| to the extension list in |*x|. If |*x| is // NULL, it allocates a new |STACK_OF(X509_EXTENSION)| to hold the copy and sets // |*x| to the new list. It returns |*x| on success and NULL on error. The // caller retains ownership of |ex| and can release it independently of |*x|. // // The new extension is inserted at index |loc|, shifting extensions to the // right. If |loc| is -1 or out of bounds, the new extension is appended to the // list. OPENSSL_EXPORT STACK_OF(X509_EXTENSION) *X509v3_add_ext( STACK_OF(X509_EXTENSION) **x, const X509_EXTENSION *ex, int loc); // Built-in extensions. // // Several functions in the library encode and decode extension values into a // C structure to that extension. The following extensions are supported: // // - |NID_authority_key_identifier| with type |AUTHORITY_KEYID| // - |NID_basic_constraints| with type |BASIC_CONSTRAINTS| // - |NID_certificate_issuer| with type |GENERAL_NAMES| // - |NID_certificate_policies| with type |CERTIFICATEPOLICIES| // - |NID_crl_distribution_points| with type |CRL_DIST_POINTS| // - |NID_crl_number| with type |ASN1_INTEGER| // - |NID_crl_reason| with type |ASN1_ENUMERATED| // - |NID_delta_crl| with type |ASN1_INTEGER| // - |NID_ext_key_usage| with type |EXTENDED_KEY_USAGE| // - |NID_freshest_crl| with type |ISSUING_DIST_POINT| // - |NID_id_pkix_OCSP_noCheck| with type |ASN1_NULL| // - |NID_info_access| with type |AUTHORITY_INFO_ACCESS| // - |NID_inhibit_any_policy| with type |ASN1_INTEGER| // - |NID_invalidity_date| with type |ASN1_GENERALIZEDTIME| // - |NID_issuer_alt_name| with type |GENERAL_NAMES| // - |NID_issuing_distribution_point| with type |ISSUING_DIST_POINT| // - |NID_key_usage| with type |ASN1_BIT_STRING| // - |NID_name_constraints| with type |NAME_CONSTRAINTS| // - |NID_netscape_base_url| with type |ASN1_IA5STRING| // - |NID_netscape_ca_policy_url| with type |ASN1_IA5STRING| // - |NID_netscape_ca_revocation_url| with type |ASN1_IA5STRING| // - |NID_netscape_cert_type| with type |ASN1_BIT_STRING| // - |NID_netscape_comment| with type |ASN1_IA5STRING| // - |NID_netscape_renewal_url| with type |ASN1_IA5STRING| // - |NID_netscape_revocation_url| with type |ASN1_IA5STRING| // - |NID_netscape_ssl_server_name| with type |ASN1_IA5STRING| // - |NID_policy_constraints| with type |POLICY_CONSTRAINTS| // - |NID_policy_mappings| with type |POLICY_MAPPINGS| // - |NID_sinfo_access| with type |AUTHORITY_INFO_ACCESS| // - |NID_subject_alt_name| with type |GENERAL_NAMES| // - |NID_subject_key_identifier| with type |ASN1_OCTET_STRING| // // If an extension does not appear in this list, e.g. for a custom extension, // callers can instead use functions such as |X509_get_ext_by_OBJ|, // |X509_EXTENSION_get_data|, and |X509_EXTENSION_create_by_OBJ| to inspect or // create extensions directly. Although the |X509V3_EXT_METHOD| mechanism allows // registering custom extensions, doing so is deprecated and may result in // threading or memory errors. // X509V3_EXT_d2i decodes |ext| and returns a pointer to a newly-allocated // structure, with type dependent on the type of the extension. It returns NULL // if |ext| is an unsupported extension or if there was a syntax error in the // extension. The caller should cast the return value to the expected type and // free the structure when done. // // WARNING: Casting the return value to the wrong type is a potentially // exploitable memory error, so callers must not use this function before // checking |ext| is of a known type. See the list at the top of this section // for the correct types. OPENSSL_EXPORT void *X509V3_EXT_d2i(const X509_EXTENSION *ext); // X509V3_get_d2i finds and decodes the extension in |extensions| of type |nid|. // If found, it decodes it and returns a newly-allocated structure, with type // dependent on |nid|. If the extension is not found or on error, it returns // NULL. The caller may distinguish these cases using the |out_critical| value. // // If |out_critical| is not NULL, this function sets |*out_critical| to one if // the extension is found and critical, zero if it is found and not critical, -1 // if it is not found, and -2 if there is an invalid duplicate extension. Note // this function may set |*out_critical| to one or zero and still return NULL if // the extension is found but has a syntax error. // // If |out_idx| is not NULL, this function looks for the first occurrence of the // extension after |*out_idx|. It then sets |*out_idx| to the index of the // extension, or -1 if not found. If |out_idx| is non-NULL, duplicate extensions // are not treated as an error. Callers, however, should not rely on this // behavior as it may be removed in the future. Duplicate extensions are // forbidden in RFC 5280. // // WARNING: This function is difficult to use correctly. Callers should pass a // non-NULL |out_critical| and check both the return value and |*out_critical| // to handle errors. If the return value is NULL and |*out_critical| is not -1, // there was an error. Otherwise, the function succeeded and but may return NULL // for a missing extension. Callers should pass NULL to |out_idx| so that // duplicate extensions are handled correctly. // // Additionally, casting the return value to the wrong type is a potentially // exploitable memory error, so callers must ensure the cast and |nid| match. // See the list at the top of this section for the correct types. OPENSSL_EXPORT void *X509V3_get_d2i(const STACK_OF(X509_EXTENSION) *extensions, int nid, int *out_critical, int *out_idx); // X509V3_EXT_free casts |ext_data| into the type that corresponds to |nid| and // releases memory associated with it. It returns one on success and zero if // |nid| is not a known extension. // // WARNING: Casting |ext_data| to the wrong type is a potentially exploitable // memory error, so callers must ensure |ext_data|'s type matches |nid|. See the // list at the top of this section for the correct types. // // TODO(davidben): OpenSSL upstream no longer exposes this function. Remove it? OPENSSL_EXPORT int X509V3_EXT_free(int nid, void *ext_data); // X509V3_EXT_i2d casts |ext_struc| into the type that corresponds to // |ext_nid|, serializes it, and returns a newly-allocated |X509_EXTENSION| // object containing the serialization, or NULL on error. The |X509_EXTENSION| // has OID |ext_nid| and is critical if |crit| is one. // // WARNING: Casting |ext_struc| to the wrong type is a potentially exploitable // memory error, so callers must ensure |ext_struct|'s type matches |ext_nid|. // See the list at the top of this section for the correct types. OPENSSL_EXPORT X509_EXTENSION *X509V3_EXT_i2d(int ext_nid, int crit, void *ext_struc); // The following constants control the behavior of |X509V3_add1_i2d| and related // functions. // X509V3_ADD_OP_MASK can be ANDed with the flags to determine how duplicate // extensions are processed. #define X509V3_ADD_OP_MASK 0xfL // X509V3_ADD_DEFAULT causes the function to fail if the extension was already // present. #define X509V3_ADD_DEFAULT 0L // X509V3_ADD_APPEND causes the function to unconditionally appended the new // extension to to the extensions list, even if there is a duplicate. #define X509V3_ADD_APPEND 1L // X509V3_ADD_REPLACE causes the function to replace the existing extension, or // append if it is not present. #define X509V3_ADD_REPLACE 2L // X509V3_ADD_REPLACE_EXISTING causes the function to replace the existing // extension and fail if it is not present. #define X509V3_ADD_REPLACE_EXISTING 3L // X509V3_ADD_KEEP_EXISTING causes the function to succeed without replacing the // extension if already present. #define X509V3_ADD_KEEP_EXISTING 4L // X509V3_ADD_DELETE causes the function to remove the matching extension. No // new extension is added. If there is no matching extension, the function // fails. The |value| parameter is ignored in this mode. #define X509V3_ADD_DELETE 5L // X509V3_ADD_SILENT may be ORed into one of the values above to indicate the // function should not add to the error queue on duplicate or missing extension. // The function will continue to return zero in those cases, and it will // continue to return -1 and add to the error queue on other errors. #define X509V3_ADD_SILENT 0x10 // X509V3_add1_i2d casts |value| to the type that corresponds to |nid|, // serializes it, and appends it to the extension list in |*x|. If |*x| is NULL, // it will set |*x| to a newly-allocated |STACK_OF(X509_EXTENSION)| as needed. // The |crit| parameter determines whether the new extension is critical. // |flags| may be some combination of the |X509V3_ADD_*| constants to control // the function's behavior on duplicate extension. // // This function returns one on success, zero if the operation failed due to a // missing or duplicate extension, and -1 on other errors. // // WARNING: Casting |value| to the wrong type is a potentially exploitable // memory error, so callers must ensure |value|'s type matches |nid|. See the // list at the top of this section for the correct types. OPENSSL_EXPORT int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, int crit, unsigned long flags); // Basic constraints. // // The basic constraints extension (RFC 5280, section 4.2.1.9) determines // whether a certificate is a CA certificate and, if so, optionally constrains // the maximum depth of the certificate chain. // A BASIC_CONSTRAINTS_st, aka |BASIC_CONSTRAINTS| represents an // BasicConstraints structure (RFC 5280). struct BASIC_CONSTRAINTS_st { ASN1_BOOLEAN ca; ASN1_INTEGER *pathlen; } /* BASIC_CONSTRAINTS */; // BASIC_CONSTRAINTS is an |ASN1_ITEM| whose ASN.1 type is BasicConstraints (RFC // 5280) and C type is |BASIC_CONSTRAINTS*|. DECLARE_ASN1_ITEM(BASIC_CONSTRAINTS) // BASIC_CONSTRAINTS_new returns a newly-allocated, empty |BASIC_CONSTRAINTS| // object, or NULL on error. OPENSSL_EXPORT BASIC_CONSTRAINTS *BASIC_CONSTRAINTS_new(void); // BASIC_CONSTRAINTS_free releases memory associated with |bcons|. OPENSSL_EXPORT void BASIC_CONSTRAINTS_free(BASIC_CONSTRAINTS *bcons); // d2i_BASIC_CONSTRAINTS parses up to |len| bytes from |*inp| as a DER-encoded // BasicConstraints (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT BASIC_CONSTRAINTS *d2i_BASIC_CONSTRAINTS(BASIC_CONSTRAINTS **out, const uint8_t **inp, long len); // i2d_BASIC_CONSTRAINTS marshals |bcons| as a DER-encoded BasicConstraints (RFC // 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_BASIC_CONSTRAINTS(const BASIC_CONSTRAINTS *bcons, uint8_t **outp); // Extended key usage. // // The extended key usage extension (RFC 5280, section 4.2.1.12) indicates the // purposes of the certificate's public key. Such constraints are important to // avoid cross-protocol attacks. typedef STACK_OF(ASN1_OBJECT) EXTENDED_KEY_USAGE; // EXTENDED_KEY_USAGE is an |ASN1_ITEM| whose ASN.1 type is ExtKeyUsageSyntax // (RFC 5280) and C type is |STACK_OF(ASN1_OBJECT)*|, or |EXTENDED_KEY_USAGE*|. DECLARE_ASN1_ITEM(EXTENDED_KEY_USAGE) // EXTENDED_KEY_USAGE_new returns a newly-allocated, empty |EXTENDED_KEY_USAGE| // object, or NULL on error. OPENSSL_EXPORT EXTENDED_KEY_USAGE *EXTENDED_KEY_USAGE_new(void); // EXTENDED_KEY_USAGE_free releases memory associated with |eku|. OPENSSL_EXPORT void EXTENDED_KEY_USAGE_free(EXTENDED_KEY_USAGE *eku); // d2i_EXTENDED_KEY_USAGE parses up to |len| bytes from |*inp| as a DER-encoded // ExtKeyUsageSyntax (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT EXTENDED_KEY_USAGE *d2i_EXTENDED_KEY_USAGE( EXTENDED_KEY_USAGE **out, const uint8_t **inp, long len); // i2d_EXTENDED_KEY_USAGE marshals |eku| as a DER-encoded ExtKeyUsageSyntax (RFC // 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_EXTENDED_KEY_USAGE(const EXTENDED_KEY_USAGE *eku, uint8_t **outp); // General names. // // A |GENERAL_NAME| represents an X.509 GeneralName structure, defined in RFC // 5280, Section 4.2.1.6. General names are distinct from names (|X509_NAME|). A // general name is a CHOICE type which may contain one of several name types, // most commonly a DNS name or an IP address. General names most commonly appear // in the subject alternative name (SAN) extension, though they are also used in // other extensions. // // Many extensions contain a SEQUENCE OF GeneralName, or GeneralNames, so // |STACK_OF(GENERAL_NAME)| is defined and aliased to |GENERAL_NAMES|. typedef struct otherName_st { ASN1_OBJECT *type_id; ASN1_TYPE *value; } OTHERNAME; typedef struct EDIPartyName_st { ASN1_STRING *nameAssigner; ASN1_STRING *partyName; } EDIPARTYNAME; // GEN_* are constants for the |type| field of |GENERAL_NAME|, defined below. #define GEN_OTHERNAME 0 #define GEN_EMAIL 1 #define GEN_DNS 2 #define GEN_X400 3 #define GEN_DIRNAME 4 #define GEN_EDIPARTY 5 #define GEN_URI 6 #define GEN_IPADD 7 #define GEN_RID 8 // A GENERAL_NAME_st, aka |GENERAL_NAME|, represents an X.509 GeneralName. The // |type| field determines which member of |d| is active. A |GENERAL_NAME| may // also be empty, in which case |type| is -1 and |d| is NULL. Empty // |GENERAL_NAME|s are invalid and will never be returned from the parser, but // may be created temporarily, e.g. by |GENERAL_NAME_new|. // // WARNING: |type| and |d| must be kept consistent. An inconsistency will result // in a potentially exploitable memory error. struct GENERAL_NAME_st { int type; union { char *ptr; OTHERNAME *otherName; ASN1_IA5STRING *rfc822Name; ASN1_IA5STRING *dNSName; ASN1_STRING *x400Address; X509_NAME *directoryName; EDIPARTYNAME *ediPartyName; ASN1_IA5STRING *uniformResourceIdentifier; ASN1_OCTET_STRING *iPAddress; ASN1_OBJECT *registeredID; // Old names ASN1_OCTET_STRING *ip; // iPAddress X509_NAME *dirn; // dirn ASN1_IA5STRING *ia5; // rfc822Name, dNSName, uniformResourceIdentifier ASN1_OBJECT *rid; // registeredID } d; } /* GENERAL_NAME */; // GENERAL_NAME_new returns a new, empty |GENERAL_NAME|, or NULL on error. OPENSSL_EXPORT GENERAL_NAME *GENERAL_NAME_new(void); // GENERAL_NAME_free releases memory associated with |gen|. OPENSSL_EXPORT void GENERAL_NAME_free(GENERAL_NAME *gen); // d2i_GENERAL_NAME parses up to |len| bytes from |*inp| as a DER-encoded X.509 // GeneralName (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT GENERAL_NAME *d2i_GENERAL_NAME(GENERAL_NAME **out, const uint8_t **inp, long len); // i2d_GENERAL_NAME marshals |in| as a DER-encoded X.509 GeneralName (RFC 5280), // as described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |in| is an // directoryName and the |X509_NAME| has been modified. OPENSSL_EXPORT int i2d_GENERAL_NAME(GENERAL_NAME *in, uint8_t **outp); // GENERAL_NAME_dup returns a newly-allocated copy of |gen|, or NULL on error. // This function works by serializing the structure, so it will fail if |gen| is // empty. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if |gen| is an // directoryName and the |X509_NAME| has been modified. OPENSSL_EXPORT GENERAL_NAME *GENERAL_NAME_dup(GENERAL_NAME *gen); // GENERAL_NAMES_new returns a new, empty |GENERAL_NAMES|, or NULL on error. OPENSSL_EXPORT GENERAL_NAMES *GENERAL_NAMES_new(void); // GENERAL_NAMES_free releases memory associated with |gens|. OPENSSL_EXPORT void GENERAL_NAMES_free(GENERAL_NAMES *gens); // d2i_GENERAL_NAMES parses up to |len| bytes from |*inp| as a DER-encoded // SEQUENCE OF GeneralName, as described in |d2i_SAMPLE|. OPENSSL_EXPORT GENERAL_NAMES *d2i_GENERAL_NAMES(GENERAL_NAMES **out, const uint8_t **inp, long len); // i2d_GENERAL_NAMES marshals |in| as a DER-encoded SEQUENCE OF GeneralName, as // described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): This function should be const and // thread-safe but is currently neither in some cases, notably if some element // of |in| is an directoryName and the |X509_NAME| has been modified. OPENSSL_EXPORT int i2d_GENERAL_NAMES(GENERAL_NAMES *in, uint8_t **outp); // OTHERNAME_new returns a new, empty |OTHERNAME|, or NULL on error. OPENSSL_EXPORT OTHERNAME *OTHERNAME_new(void); // OTHERNAME_free releases memory associated with |name|. OPENSSL_EXPORT void OTHERNAME_free(OTHERNAME *name); // EDIPARTYNAME_new returns a new, empty |EDIPARTYNAME|, or NULL on error. // EDIPartyName is rarely used in practice, so callers are unlikely to need this // function. OPENSSL_EXPORT EDIPARTYNAME *EDIPARTYNAME_new(void); // EDIPARTYNAME_free releases memory associated with |name|. EDIPartyName is // rarely used in practice, so callers are unlikely to need this function. OPENSSL_EXPORT void EDIPARTYNAME_free(EDIPARTYNAME *name); // GENERAL_NAME_set0_value set |gen|'s type and value to |type| and |value|. // |type| must be a |GEN_*| constant and |value| must be an object of the // corresponding type. |gen| takes ownership of |value|, so |value| must have // been an allocated object. // // WARNING: |gen| must be empty (typically as returned from |GENERAL_NAME_new|) // before calling this function. If |gen| already contained a value, the // previous contents will be leaked. OPENSSL_EXPORT void GENERAL_NAME_set0_value(GENERAL_NAME *gen, int type, void *value); // GENERAL_NAME_get0_value returns the in-memory representation of |gen|'s // contents and, |out_type| is not NULL, sets |*out_type| to the type of |gen|, // which will be a |GEN_*| constant. If |gen| is incomplete, the return value // will be NULL and the type will be -1. // // WARNING: Casting the result of this function to the wrong type is a // potentially exploitable memory error. Callers must check |gen|'s type, either // via |*out_type| or checking |gen->type| directly, before inspecting the // result. // // WARNING: This function is not const-correct. The return value should be // const. Callers shoudl not mutate the returned object. OPENSSL_EXPORT void *GENERAL_NAME_get0_value(const GENERAL_NAME *gen, int *out_type); // GENERAL_NAME_set0_othername sets |gen| to be an OtherName with type |oid| and // value |value|. On success, it returns one and takes ownership of |oid| and // |value|, which must be created in a way compatible with |ASN1_OBJECT_free| // and |ASN1_TYPE_free|, respectively. On allocation failure, it returns zero. // In the failure case, the caller retains ownership of |oid| and |value| and // must release them when done. // // WARNING: |gen| must be empty (typically as returned from |GENERAL_NAME_new|) // before calling this function. If |gen| already contained a value, the // previously contents will be leaked. OPENSSL_EXPORT int GENERAL_NAME_set0_othername(GENERAL_NAME *gen, ASN1_OBJECT *oid, ASN1_TYPE *value); // GENERAL_NAME_get0_otherName, if |gen| is an OtherName, sets |*out_oid| and // |*out_value| to the OtherName's type-id and value, respectively, and returns // one. If |gen| is not an OtherName, it returns zero and leaves |*out_oid| and // |*out_value| unmodified. Either of |out_oid| or |out_value| may be NULL to // ignore the value. // // WARNING: This function is not const-correct. |out_oid| and |out_value| are // not const, but callers should not mutate the resulting objects. OPENSSL_EXPORT int GENERAL_NAME_get0_otherName(const GENERAL_NAME *gen, ASN1_OBJECT **out_oid, ASN1_TYPE **out_value); // Authority key identifier. // // The authority key identifier extension (RFC 5280, section 4.2.1.1) allows a // certificate to more precisely identify its issuer. This is helpful when // multiple certificates share a name. Only the keyIdentifier (|keyid| in // |AUTHORITY_KEYID|) field is used in practice. // A AUTHORITY_KEYID_st, aka |AUTHORITY_KEYID|, represents an // AuthorityKeyIdentifier structure (RFC 5280). struct AUTHORITY_KEYID_st { ASN1_OCTET_STRING *keyid; GENERAL_NAMES *issuer; ASN1_INTEGER *serial; } /* AUTHORITY_KEYID */; // AUTHORITY_KEYID is an |ASN1_ITEM| whose ASN.1 type is AuthorityKeyIdentifier // (RFC 5280) and C type is |AUTHORITY_KEYID*|. DECLARE_ASN1_ITEM(AUTHORITY_KEYID) // AUTHORITY_KEYID_new returns a newly-allocated, empty |AUTHORITY_KEYID| // object, or NULL on error. OPENSSL_EXPORT AUTHORITY_KEYID *AUTHORITY_KEYID_new(void); // AUTHORITY_KEYID_free releases memory associated with |akid|. OPENSSL_EXPORT void AUTHORITY_KEYID_free(AUTHORITY_KEYID *akid); // d2i_AUTHORITY_KEYID parses up to |len| bytes from |*inp| as a DER-encoded // AuthorityKeyIdentifier (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT AUTHORITY_KEYID *d2i_AUTHORITY_KEYID(AUTHORITY_KEYID **out, const uint8_t **inp, long len); // i2d_AUTHORITY_KEYID marshals |akid| as a DER-encoded AuthorityKeyIdentifier // (RFC 5280), as described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): |akid| is not const because it // contains an |X509_NAME|. OPENSSL_EXPORT int i2d_AUTHORITY_KEYID(AUTHORITY_KEYID *akid, uint8_t **outp); // Name constraints. // // The name constraints extension (RFC 5280, section 4.2.1.10) constrains which // names may be asserted by certificates issued by some CA. For example, a // general CA may issue an intermediate certificate to the owner of example.com, // but constrained to ".example.com". // A GENERAL_SUBTREE represents a GeneralSubtree structure (RFC 5280). typedef struct GENERAL_SUBTREE_st { GENERAL_NAME *base; ASN1_INTEGER *minimum; ASN1_INTEGER *maximum; } GENERAL_SUBTREE; DEFINE_STACK_OF(GENERAL_SUBTREE) // GENERAL_SUBTREE_new returns a newly-allocated, empty |GENERAL_SUBTREE| // object, or NULL on error. OPENSSL_EXPORT GENERAL_SUBTREE *GENERAL_SUBTREE_new(void); // GENERAL_SUBTREE_free releases memory associated with |subtree|. OPENSSL_EXPORT void GENERAL_SUBTREE_free(GENERAL_SUBTREE *subtree); // A NAME_CONSTRAINTS_st, aka |NAME_CONSTRAINTS|, represents a NameConstraints // structure (RFC 5280). struct NAME_CONSTRAINTS_st { STACK_OF(GENERAL_SUBTREE) *permittedSubtrees; STACK_OF(GENERAL_SUBTREE) *excludedSubtrees; } /* NAME_CONSTRAINTS */; // NAME_CONSTRAINTS is an |ASN1_ITEM| whose ASN.1 type is NameConstraints (RFC // 5280) and C type is |NAME_CONSTRAINTS*|. DECLARE_ASN1_ITEM(NAME_CONSTRAINTS) // NAME_CONSTRAINTS_new returns a newly-allocated, empty |NAME_CONSTRAINTS| // object, or NULL on error. OPENSSL_EXPORT NAME_CONSTRAINTS *NAME_CONSTRAINTS_new(void); // NAME_CONSTRAINTS_free releases memory associated with |ncons|. OPENSSL_EXPORT void NAME_CONSTRAINTS_free(NAME_CONSTRAINTS *ncons); // Authority information access. // // The authority information access extension (RFC 5280, 4.2.2.1) describes // where to obtain information about the issuer of a certificate. It is most // commonly used with accessMethod values of id-ad-caIssuers and id-ad-ocsp, to // indicate where to fetch the issuer certificate (if not provided in-band) and // the issuer's OCSP responder, respectively. // An ACCESS_DESCRIPTION represents an AccessDescription structure (RFC 5280). typedef struct ACCESS_DESCRIPTION_st { ASN1_OBJECT *method; GENERAL_NAME *location; } ACCESS_DESCRIPTION; DEFINE_STACK_OF(ACCESS_DESCRIPTION) // ACCESS_DESCRIPTION_new returns a newly-allocated, empty |ACCESS_DESCRIPTION| // object, or NULL on error. OPENSSL_EXPORT ACCESS_DESCRIPTION *ACCESS_DESCRIPTION_new(void); // ACCESS_DESCRIPTION_free releases memory associated with |desc|. OPENSSL_EXPORT void ACCESS_DESCRIPTION_free(ACCESS_DESCRIPTION *desc); typedef STACK_OF(ACCESS_DESCRIPTION) AUTHORITY_INFO_ACCESS; // AUTHORITY_INFO_ACCESS is an |ASN1_ITEM| whose ASN.1 type is // AuthorityInfoAccessSyntax (RFC 5280) and C type is // |STACK_OF(ACCESS_DESCRIPTION)*|, or |AUTHORITY_INFO_ACCESS*|. DECLARE_ASN1_ITEM(AUTHORITY_INFO_ACCESS) // AUTHORITY_INFO_ACCESS_new returns a newly-allocated, empty // |AUTHORITY_INFO_ACCESS| object, or NULL on error. OPENSSL_EXPORT AUTHORITY_INFO_ACCESS *AUTHORITY_INFO_ACCESS_new(void); // AUTHORITY_INFO_ACCESS_free releases memory associated with |aia|. OPENSSL_EXPORT void AUTHORITY_INFO_ACCESS_free(AUTHORITY_INFO_ACCESS *aia); // d2i_AUTHORITY_INFO_ACCESS parses up to |len| bytes from |*inp| as a // DER-encoded AuthorityInfoAccessSyntax (RFC 5280), as described in // |d2i_SAMPLE|. OPENSSL_EXPORT AUTHORITY_INFO_ACCESS *d2i_AUTHORITY_INFO_ACCESS( AUTHORITY_INFO_ACCESS **out, const uint8_t **inp, long len); // i2d_AUTHORITY_INFO_ACCESS marshals |aia| as a DER-encoded // AuthorityInfoAccessSyntax (RFC 5280), as described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): |aia| is not const because it // contains an |X509_NAME|. OPENSSL_EXPORT int i2d_AUTHORITY_INFO_ACCESS(AUTHORITY_INFO_ACCESS *aia, uint8_t **outp); // CRL distribution points. // // The CRL distribution points extension (RFC 5280, 4.2.1.13) indicates where to // fetch a certificate issuer's CRL. The corresponding issuing distribution // point CRL extension (RFC 5280, section 5.2.5) matches against this extension. // A DIST_POINT_NAME represents a DistributionPointName structure (RFC 5280). // The |name| field contains the CHOICE value and is determined by |type|. If // |type| is zero, |name| must be a |fullname|. If |type| is one, |name| must be // a |relativename|. // // WARNING: |type| and |name| must be kept consistent. An inconsistency will // result in a potentially exploitable memory error. typedef struct DIST_POINT_NAME_st { int type; union { GENERAL_NAMES *fullname; STACK_OF(X509_NAME_ENTRY) *relativename; } name; // If relativename then this contains the full distribution point name X509_NAME *dpname; } DIST_POINT_NAME; // DIST_POINT_NAME_new returns a newly-allocated, empty |DIST_POINT_NAME| // object, or NULL on error. OPENSSL_EXPORT DIST_POINT_NAME *DIST_POINT_NAME_new(void); // DIST_POINT_NAME_free releases memory associated with |name|. OPENSSL_EXPORT void DIST_POINT_NAME_free(DIST_POINT_NAME *name); // A DIST_POINT_st, aka |DIST_POINT|, represents a DistributionPoint structure // (RFC 5280). struct DIST_POINT_st { DIST_POINT_NAME *distpoint; ASN1_BIT_STRING *reasons; GENERAL_NAMES *CRLissuer; } /* DIST_POINT */; DEFINE_STACK_OF(DIST_POINT) // DIST_POINT_new returns a newly-allocated, empty |DIST_POINT| object, or NULL // on error. OPENSSL_EXPORT DIST_POINT *DIST_POINT_new(void); // DIST_POINT_free releases memory associated with |dp|. OPENSSL_EXPORT void DIST_POINT_free(DIST_POINT *dp); typedef STACK_OF(DIST_POINT) CRL_DIST_POINTS; // CRL_DIST_POINTS is an |ASN1_ITEM| whose ASN.1 type is CRLDistributionPoints // (RFC 5280) and C type is |CRL_DIST_POINTS*|. DECLARE_ASN1_ITEM(CRL_DIST_POINTS) // CRL_DIST_POINTS_new returns a newly-allocated, empty |CRL_DIST_POINTS| // object, or NULL on error. OPENSSL_EXPORT CRL_DIST_POINTS *CRL_DIST_POINTS_new(void); // CRL_DIST_POINTS_free releases memory associated with |crldp|. OPENSSL_EXPORT void CRL_DIST_POINTS_free(CRL_DIST_POINTS *crldp); // d2i_CRL_DIST_POINTS parses up to |len| bytes from |*inp| as a DER-encoded // CRLDistributionPoints (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT CRL_DIST_POINTS *d2i_CRL_DIST_POINTS(CRL_DIST_POINTS **out, const uint8_t **inp, long len); // i2d_CRL_DIST_POINTS marshals |crldp| as a DER-encoded CRLDistributionPoints // (RFC 5280), as described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): |crldp| is not const because it // contains an |X509_NAME|. OPENSSL_EXPORT int i2d_CRL_DIST_POINTS(CRL_DIST_POINTS *crldp, uint8_t **outp); // A ISSUING_DIST_POINT_st, aka |ISSUING_DIST_POINT|, represents a // IssuingDistributionPoint structure (RFC 5280). struct ISSUING_DIST_POINT_st { DIST_POINT_NAME *distpoint; ASN1_BOOLEAN onlyuser; ASN1_BOOLEAN onlyCA; ASN1_BIT_STRING *onlysomereasons; ASN1_BOOLEAN indirectCRL; ASN1_BOOLEAN onlyattr; } /* ISSUING_DIST_POINT */; // ISSUING_DIST_POINT is an |ASN1_ITEM| whose ASN.1 type is // IssuingDistributionPoint (RFC 5280) and C type is |ISSUING_DIST_POINT*|. DECLARE_ASN1_ITEM(ISSUING_DIST_POINT) // ISSUING_DIST_POINT_new returns a newly-allocated, empty |ISSUING_DIST_POINT| // object, or NULL on error. OPENSSL_EXPORT ISSUING_DIST_POINT *ISSUING_DIST_POINT_new(void); // ISSUING_DIST_POINT_free releases memory associated with |idp|. OPENSSL_EXPORT void ISSUING_DIST_POINT_free(ISSUING_DIST_POINT *idp); // d2i_ISSUING_DIST_POINT parses up to |len| bytes from |*inp| as a DER-encoded // IssuingDistributionPoint (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT ISSUING_DIST_POINT *d2i_ISSUING_DIST_POINT( ISSUING_DIST_POINT **out, const uint8_t **inp, long len); // i2d_ISSUING_DIST_POINT marshals |idp| as a DER-encoded // IssuingDistributionPoint (RFC 5280), as described in |i2d_SAMPLE|. // // TODO(https://crbug.com/boringssl/407): |idp| is not const because it // contains an |X509_NAME|. OPENSSL_EXPORT int i2d_ISSUING_DIST_POINT(ISSUING_DIST_POINT *idp, uint8_t **outp); // Certificate policies. // // The certificate policies extension (RFC 5280, section 4.2.1.4), along with a // suite of related extensions determines the "policies" that apply to a // certificate path. Evaluating these policies is extremely complex and has led // to denial-of-service vulnerabilities in several X.509 implementations. See // draft-ietf-lamps-x509-policy-graph. // // Do not use this mechanism. // A NOTICEREF represents a NoticeReference structure (RFC 5280). typedef struct NOTICEREF_st { ASN1_STRING *organization; STACK_OF(ASN1_INTEGER) *noticenos; } NOTICEREF; // NOTICEREF_new returns a newly-allocated, empty |NOTICEREF| object, or NULL // on error. OPENSSL_EXPORT NOTICEREF *NOTICEREF_new(void); // NOTICEREF_free releases memory associated with |ref|. OPENSSL_EXPORT void NOTICEREF_free(NOTICEREF *ref); // A USERNOTICE represents a UserNotice structure (RFC 5280). typedef struct USERNOTICE_st { NOTICEREF *noticeref; ASN1_STRING *exptext; } USERNOTICE; // USERNOTICE_new returns a newly-allocated, empty |USERNOTICE| object, or NULL // on error. OPENSSL_EXPORT USERNOTICE *USERNOTICE_new(void); // USERNOTICE_free releases memory associated with |notice|. OPENSSL_EXPORT void USERNOTICE_free(USERNOTICE *notice); // A POLICYQUALINFO represents a PolicyQualifierInfo structure (RFC 5280). |d| // contains the qualifier field of the PolicyQualifierInfo. Its type is // determined by |pqualid|. If |pqualid| is |NID_id_qt_cps|, |d| must be // |cpsuri|. If |pqualid| is |NID_id_qt_unotice|, |d| must be |usernotice|. // Otherwise, |d| must be |other|. // // WARNING: |pqualid| and |d| must be kept consistent. An inconsistency will // result in a potentially exploitable memory error. typedef struct POLICYQUALINFO_st { ASN1_OBJECT *pqualid; union { ASN1_IA5STRING *cpsuri; USERNOTICE *usernotice; ASN1_TYPE *other; } d; } POLICYQUALINFO; DEFINE_STACK_OF(POLICYQUALINFO) // POLICYQUALINFO_new returns a newly-allocated, empty |POLICYQUALINFO| object, // or NULL on error. OPENSSL_EXPORT POLICYQUALINFO *POLICYQUALINFO_new(void); // POLICYQUALINFO_free releases memory associated with |info|. OPENSSL_EXPORT void POLICYQUALINFO_free(POLICYQUALINFO *info); // A POLICYINFO represents a PolicyInformation structure (RFC 5280). typedef struct POLICYINFO_st { ASN1_OBJECT *policyid; STACK_OF(POLICYQUALINFO) *qualifiers; } POLICYINFO; DEFINE_STACK_OF(POLICYINFO) // POLICYINFO_new returns a newly-allocated, empty |POLICYINFO| object, or NULL // on error. OPENSSL_EXPORT POLICYINFO *POLICYINFO_new(void); // POLICYINFO_free releases memory associated with |info|. OPENSSL_EXPORT void POLICYINFO_free(POLICYINFO *info); typedef STACK_OF(POLICYINFO) CERTIFICATEPOLICIES; // CERTIFICATEPOLICIES is an |ASN1_ITEM| whose ASN.1 type is CertificatePolicies // (RFC 5280) and C type is |STACK_OF(POLICYINFO)*|, or |CERTIFICATEPOLICIES*|. DECLARE_ASN1_ITEM(CERTIFICATEPOLICIES) // CERTIFICATEPOLICIES_new returns a newly-allocated, empty // |CERTIFICATEPOLICIES| object, or NULL on error. OPENSSL_EXPORT CERTIFICATEPOLICIES *CERTIFICATEPOLICIES_new(void); // CERTIFICATEPOLICIES_free releases memory associated with |policies|. OPENSSL_EXPORT void CERTIFICATEPOLICIES_free(CERTIFICATEPOLICIES *policies); // d2i_CERTIFICATEPOLICIES parses up to |len| bytes from |*inp| as a DER-encoded // CertificatePolicies (RFC 5280), as described in |d2i_SAMPLE|. OPENSSL_EXPORT CERTIFICATEPOLICIES *d2i_CERTIFICATEPOLICIES( CERTIFICATEPOLICIES **out, const uint8_t **inp, long len); // i2d_CERTIFICATEPOLICIES marshals |policies| as a DER-encoded // CertificatePolicies (RFC 5280), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_CERTIFICATEPOLICIES(const CERTIFICATEPOLICIES *policies, uint8_t **outp); // A POLICY_MAPPING represents an individual element of a PolicyMappings // structure (RFC 5280). typedef struct POLICY_MAPPING_st { ASN1_OBJECT *issuerDomainPolicy; ASN1_OBJECT *subjectDomainPolicy; } POLICY_MAPPING; DEFINE_STACK_OF(POLICY_MAPPING) // POLICY_MAPPING_new returns a newly-allocated, empty |POLICY_MAPPING| object, // or NULL on error. OPENSSL_EXPORT POLICY_MAPPING *POLICY_MAPPING_new(void); // POLICY_MAPPING_free releases memory associated with |mapping|. OPENSSL_EXPORT void POLICY_MAPPING_free(POLICY_MAPPING *mapping); typedef STACK_OF(POLICY_MAPPING) POLICY_MAPPINGS; // POLICY_MAPPINGS is an |ASN1_ITEM| whose ASN.1 type is PolicyMappings (RFC // 5280) and C type is |STACK_OF(POLICY_MAPPING)*|, or |POLICY_MAPPINGS*|. DECLARE_ASN1_ITEM(POLICY_MAPPINGS) // A POLICY_CONSTRAINTS represents a PolicyConstraints structure (RFC 5280). typedef struct POLICY_CONSTRAINTS_st { ASN1_INTEGER *requireExplicitPolicy; ASN1_INTEGER *inhibitPolicyMapping; } POLICY_CONSTRAINTS; // POLICY_CONSTRAINTS is an |ASN1_ITEM| whose ASN.1 type is PolicyConstraints // (RFC 5280) and C type is |POLICY_CONSTRAINTS*|. DECLARE_ASN1_ITEM(POLICY_CONSTRAINTS) // POLICY_CONSTRAINTS_new returns a newly-allocated, empty |POLICY_CONSTRAINTS| // object, or NULL on error. OPENSSL_EXPORT POLICY_CONSTRAINTS *POLICY_CONSTRAINTS_new(void); // POLICY_CONSTRAINTS_free releases memory associated with |pcons|. OPENSSL_EXPORT void POLICY_CONSTRAINTS_free(POLICY_CONSTRAINTS *pcons); // Algorithm identifiers. // // An |X509_ALGOR| represents an AlgorithmIdentifier structure, used in X.509 // to represent signature algorithms and public key algorithms. DEFINE_STACK_OF(X509_ALGOR) // X509_ALGOR is an |ASN1_ITEM| whose ASN.1 type is AlgorithmIdentifier and C // type is |X509_ALGOR*|. DECLARE_ASN1_ITEM(X509_ALGOR) // X509_ALGOR_new returns a newly-allocated, empty |X509_ALGOR| object, or NULL // on error. OPENSSL_EXPORT X509_ALGOR *X509_ALGOR_new(void); // X509_ALGOR_dup returns a newly-allocated copy of |alg|, or NULL on error. // This function works by serializing the structure, so if |alg| is incomplete, // it may fail. OPENSSL_EXPORT X509_ALGOR *X509_ALGOR_dup(const X509_ALGOR *alg); // X509_ALGOR_free releases memory associated with |alg|. OPENSSL_EXPORT void X509_ALGOR_free(X509_ALGOR *alg); // d2i_X509_ALGOR parses up to |len| bytes from |*inp| as a DER-encoded // AlgorithmIdentifier, as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_ALGOR *d2i_X509_ALGOR(X509_ALGOR **out, const uint8_t **inp, long len); // i2d_X509_ALGOR marshals |alg| as a DER-encoded AlgorithmIdentifier, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_ALGOR(const X509_ALGOR *alg, uint8_t **outp); // X509_ALGOR_set0 sets |alg| to an AlgorithmIdentifier with algorithm |obj| and // parameter determined by |param_type| and |param_value|. It returns one on // success and zero on error. This function takes ownership of |obj| and // |param_value| on success. // // If |param_type| is |V_ASN1_UNDEF|, the parameter is omitted. If |param_type| // is zero, the parameter is left unchanged. Otherwise, |param_type| and // |param_value| are interpreted as in |ASN1_TYPE_set|. // // Note omitting the parameter (|V_ASN1_UNDEF|) and encoding an explicit NULL // value (|V_ASN1_NULL|) are different. Some algorithms require one and some the // other. Consult the relevant specification before calling this function. The // correct parameter for an RSASSA-PKCS1-v1_5 signature is |V_ASN1_NULL|. The // correct one for an ECDSA or Ed25519 signature is |V_ASN1_UNDEF|. OPENSSL_EXPORT int X509_ALGOR_set0(X509_ALGOR *alg, ASN1_OBJECT *obj, int param_type, void *param_value); // X509_ALGOR_get0 sets |*out_obj| to the |alg|'s algorithm. If |alg|'s // parameter is omitted, it sets |*out_param_type| and |*out_param_value| to // |V_ASN1_UNDEF| and NULL. Otherwise, it sets |*out_param_type| and // |*out_param_value| to the parameter, using the same representation as // |ASN1_TYPE_set0|. See |ASN1_TYPE_set0| and |ASN1_TYPE| for details. // // Callers that require the parameter in serialized form should, after checking // for |V_ASN1_UNDEF|, use |ASN1_TYPE_set1| and |d2i_ASN1_TYPE|, rather than // inspecting |*out_param_value|. // // Each of |out_obj|, |out_param_type|, and |out_param_value| may be NULL to // ignore the output. If |out_param_type| is NULL, |out_param_value| is ignored. // // WARNING: If |*out_param_type| is set to |V_ASN1_UNDEF|, OpenSSL and older // revisions of BoringSSL leave |*out_param_value| unset rather than setting it // to NULL. Callers that support both OpenSSL and BoringSSL should not assume // |*out_param_value| is uniformly initialized. OPENSSL_EXPORT void X509_ALGOR_get0(const ASN1_OBJECT **out_obj, int *out_param_type, const void **out_param_value, const X509_ALGOR *alg); // X509_ALGOR_set_md sets |alg| to the hash function |md|. Note this // AlgorithmIdentifier represents the hash function itself, not a signature // algorithm that uses |md|. It returns one on success and zero on error. // // Due to historical specification mistakes (see Section 2.1 of RFC 4055), the // parameters field is sometimes omitted and sometimes a NULL value. When used // in RSASSA-PSS and RSAES-OAEP, it should be a NULL value. In other contexts, // the parameters should be omitted. This function assumes the caller is // constructing a RSASSA-PSS or RSAES-OAEP AlgorithmIdentifier and includes a // NULL parameter. This differs from OpenSSL's behavior. // // TODO(davidben): Rename this function, or perhaps just add a bespoke API for // constructing PSS and move on. OPENSSL_EXPORT int X509_ALGOR_set_md(X509_ALGOR *alg, const EVP_MD *md); // X509_ALGOR_cmp returns zero if |a| and |b| are equal, and some non-zero value // otherwise. Note this function can only be used for equality checks, not an // ordering. OPENSSL_EXPORT int X509_ALGOR_cmp(const X509_ALGOR *a, const X509_ALGOR *b); // Attributes. // // Unlike certificates and CRLs, CSRs use a separate Attribute structure (RFC // 2985, RFC 2986) for extensibility. This is represented by the library as // |X509_ATTRIBUTE|. DEFINE_STACK_OF(X509_ATTRIBUTE) // X509_ATTRIBUTE_new returns a newly-allocated, empty |X509_ATTRIBUTE| object, // or NULL on error. |X509_ATTRIBUTE_set1_*| may be used to finish initializing // it. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_new(void); // X509_ATTRIBUTE_dup returns a newly-allocated copy of |attr|, or NULL on // error. This function works by serializing the structure, so if |attr| is // incomplete, it may fail. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_dup(const X509_ATTRIBUTE *attr); // X509_ATTRIBUTE_free releases memory associated with |attr|. OPENSSL_EXPORT void X509_ATTRIBUTE_free(X509_ATTRIBUTE *attr); // d2i_X509_ATTRIBUTE parses up to |len| bytes from |*inp| as a DER-encoded // Attribute (RFC 2986), as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_ATTRIBUTE *d2i_X509_ATTRIBUTE(X509_ATTRIBUTE **out, const uint8_t **inp, long len); // i2d_X509_ATTRIBUTE marshals |alg| as a DER-encoded Attribute (RFC 2986), as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_ATTRIBUTE(const X509_ATTRIBUTE *alg, uint8_t **outp); // X509_ATTRIBUTE_create returns a newly-allocated |X509_ATTRIBUTE|, or NULL on // error. The attribute has type |nid| and contains a single value determined by // |attrtype| and |value|, which are interpreted as in |ASN1_TYPE_set|. Note // this function takes ownership of |value|. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_create(int nid, int attrtype, void *value); // X509_ATTRIBUTE_create_by_NID returns a newly-allocated |X509_ATTRIBUTE| of // type |nid|, or NULL on error. The value is determined as in // |X509_ATTRIBUTE_set1_data|. // // If |attr| is non-NULL, the resulting |X509_ATTRIBUTE| is also written to // |*attr|. If |*attr| was non-NULL when the function was called, |*attr| is // reused instead of creating a new object. // // WARNING: The interpretation of |attrtype|, |data|, and |len| is complex and // error-prone. See |X509_ATTRIBUTE_set1_data| for details. // // WARNING: The object reuse form is deprecated and may be removed in the // future. It also currently incorrectly appends to the reused object's value // set rather than overwriting it. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_NID( X509_ATTRIBUTE **attr, int nid, int attrtype, const void *data, int len); // X509_ATTRIBUTE_create_by_OBJ behaves like |X509_ATTRIBUTE_create_by_NID| // except the attribute's type is determined by |obj|. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_OBJ( X509_ATTRIBUTE **attr, const ASN1_OBJECT *obj, int attrtype, const void *data, int len); // X509_ATTRIBUTE_create_by_txt behaves like |X509_ATTRIBUTE_create_by_NID| // except the attribute's type is determined by calling |OBJ_txt2obj| with // |attrname|. OPENSSL_EXPORT X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_txt( X509_ATTRIBUTE **attr, const char *attrname, int type, const unsigned char *bytes, int len); // X509_ATTRIBUTE_set1_object sets |attr|'s type to |obj|. It returns one on // success and zero on error. OPENSSL_EXPORT int X509_ATTRIBUTE_set1_object(X509_ATTRIBUTE *attr, const ASN1_OBJECT *obj); // X509_ATTRIBUTE_set1_data appends a value to |attr|'s value set and returns // one on success or zero on error. The value is determined as follows: // // If |attrtype| is zero, this function returns one and does nothing. This form // may be used when calling |X509_ATTRIBUTE_create_by_*| to create an attribute // with an empty value set. Such attributes are invalid, but OpenSSL supports // creating them. // // Otherwise, if |attrtype| is a |MBSTRING_*| constant, the value is an ASN.1 // string. The string is determined by decoding |len| bytes from |data| in the // encoding specified by |attrtype|, and then re-encoding it in a form // appropriate for |attr|'s type. If |len| is -1, |strlen(data)| is used // instead. See |ASN1_STRING_set_by_NID| for details. // // Otherwise, if |len| is not -1, the value is an ASN.1 string. |attrtype| is an // |ASN1_STRING| type value and the |len| bytes from |data| are copied as the // type-specific representation of |ASN1_STRING|. See |ASN1_STRING| for details. // // Otherwise, if |len| is -1, the value is constructed by passing |attrtype| and // |data| to |ASN1_TYPE_set1|. That is, |attrtype| is an |ASN1_TYPE| type value, // and |data| is cast to the corresponding pointer type. // // WARNING: Despite the name, this function appends to |attr|'s value set, // rather than overwriting it. To overwrite the value set, create a new // |X509_ATTRIBUTE| with |X509_ATTRIBUTE_new|. // // WARNING: If using the |MBSTRING_*| form, pass a length rather than relying on // |strlen|. In particular, |strlen| will not behave correctly if the input is // |MBSTRING_BMP| or |MBSTRING_UNIV|. // // WARNING: This function currently misinterprets |V_ASN1_OTHER| as an // |MBSTRING_*| constant. This matches OpenSSL but means it is impossible to // construct a value with a non-universal tag. OPENSSL_EXPORT int X509_ATTRIBUTE_set1_data(X509_ATTRIBUTE *attr, int attrtype, const void *data, int len); // X509_ATTRIBUTE_get0_data returns the |idx|th value of |attr| in a // type-specific representation to |attrtype|, or NULL if out of bounds or the // type does not match. |attrtype| is one of the type values in |ASN1_TYPE|. On // match, the return value uses the same representation as |ASN1_TYPE_set0|. See // |ASN1_TYPE| for details. OPENSSL_EXPORT void *X509_ATTRIBUTE_get0_data(X509_ATTRIBUTE *attr, int idx, int attrtype, void *unused); // X509_ATTRIBUTE_count returns the number of values in |attr|. OPENSSL_EXPORT int X509_ATTRIBUTE_count(const X509_ATTRIBUTE *attr); // X509_ATTRIBUTE_get0_object returns the type of |attr|. OPENSSL_EXPORT ASN1_OBJECT *X509_ATTRIBUTE_get0_object(X509_ATTRIBUTE *attr); // X509_ATTRIBUTE_get0_type returns the |idx|th value in |attr|, or NULL if out // of bounds. Note this function returns one of |attr|'s values, not the type. OPENSSL_EXPORT ASN1_TYPE *X509_ATTRIBUTE_get0_type(X509_ATTRIBUTE *attr, int idx); // Certificate stores. // // An |X509_STORE| contains trusted certificates, CRLs, and verification // parameters that are shared between multiple certificate verifications. // // Certificates in an |X509_STORE| are referred to as "trusted certificates", // but an individual certificate verification may not necessarily treat every // trusted certificate as a trust anchor. See |X509_VERIFY_PARAM_set_trust| for // details. // // WARNING: Although a trusted certificate which fails the // |X509_VERIFY_PARAM_set_trust| check is functionally an untrusted // intermediate certificate, callers should not rely on this to configure // untrusted intermediates in an |X509_STORE|. The trust check is complex, so // this risks inadvertently treating it as a trust anchor. Instead, configure // untrusted intermediates with the |chain| parameter of |X509_STORE_CTX_init|. // // Certificates in |X509_STORE| may be specified in several ways: // - Added by |X509_STORE_add_cert|. // - Returned by an |X509_LOOKUP| added by |X509_STORE_add_lookup|. // // |X509_STORE|s are reference-counted and may be shared by certificate // verifications running concurrently on multiple threads. However, an // |X509_STORE|'s verification parameters may not be modified concurrently with // certificate verification or other operations. Unless otherwise documented, // functions which take const pointer may be used concurrently, while // functions which take a non-const pointer may not. Callers that wish to modify // verification parameters in a shared |X509_STORE| should instead modify // |X509_STORE_CTX|s individually. // // Objects in an |X509_STORE| are represented as an |X509_OBJECT|. Some // functions in this library return values with this type. // X509_STORE_new returns a newly-allocated |X509_STORE|, or NULL on error. OPENSSL_EXPORT X509_STORE *X509_STORE_new(void); // X509_STORE_up_ref adds one to the reference count of |store| and returns one. // Although |store| is not const, this function's use of |store| is thread-safe. OPENSSL_EXPORT int X509_STORE_up_ref(X509_STORE *store); // X509_STORE_free releases memory associated with |store|. OPENSSL_EXPORT void X509_STORE_free(X509_STORE *store); // X509_STORE_add_cert adds |x509| to |store| as a trusted certificate. It // returns one on success and zero on error. This function internally increments // |x509|'s reference count, so the caller retains ownership of |x509|. // // Certificates configured by this function are still subject to the checks // described in |X509_VERIFY_PARAM_set_trust|. // // Although |store| is not const, this function's use of |store| is thread-safe. // However, if this function is called concurrently with |X509_verify_cert|, it // is a race condition whether |x509| is available for issuer lookups. // Moreover, the result may differ for each issuer lookup performed by a single // |X509_verify_cert| call. OPENSSL_EXPORT int X509_STORE_add_cert(X509_STORE *store, X509 *x509); // X509_STORE_add_crl adds |crl| to |store|. It returns one on success and zero // on error. This function internally increments |crl|'s reference count, so the // caller retains ownership of |crl|. CRLs added in this way are candidates for // CRL lookup when |X509_V_FLAG_CRL_CHECK| is set. // // Although |store| is not const, this function's use of |store| is thread-safe. // However, if this function is called concurrently with |X509_verify_cert|, it // is a race condition whether |crl| is available for CRL checks. Moreover, the // result may differ for each CRL check performed by a single // |X509_verify_cert| call. // // Note there are no supported APIs to remove CRLs from |store| once inserted. // To vary the set of CRLs over time, callers should either create a new // |X509_STORE| or configure CRLs on a per-verification basis with // |X509_STORE_CTX_set0_crls|. OPENSSL_EXPORT int X509_STORE_add_crl(X509_STORE *store, X509_CRL *crl); // X509_STORE_get0_param returns |store|'s verification parameters. This object // is mutable and may be modified by the caller. For an individual certificate // verification operation, |X509_STORE_CTX_init| initializes the // |X509_STORE_CTX|'s parameters with these parameters. // // WARNING: |X509_STORE_CTX_init| applies some default parameters (as in // |X509_VERIFY_PARAM_inherit|) after copying |store|'s parameters. This means // it is impossible to leave some parameters unset at |store|. They must be // explicitly unset after creating the |X509_STORE_CTX|. // // As of writing these late defaults are a depth limit (see // |X509_VERIFY_PARAM_set_depth|) and the |X509_V_FLAG_TRUSTED_FIRST| flag. This // warning does not apply if the parameters were set in |store|. // // TODO(crbug.com/boringssl/441): This behavior is very surprising. Can we // remove this notion of late defaults? The unsettable value at |X509_STORE| is // -1, which rejects everything but explicitly-trusted self-signed certificates. // |X509_V_FLAG_TRUSTED_FIRST| is mostly a workaround for poor path-building. OPENSSL_EXPORT X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *store); // X509_STORE_set1_param copies verification parameters from |param| as in // |X509_VERIFY_PARAM_set1|. It returns one on success and zero on error. OPENSSL_EXPORT int X509_STORE_set1_param(X509_STORE *store, const X509_VERIFY_PARAM *param); // X509_STORE_set_flags enables all values in |flags| in |store|'s verification // flags. |flags| should be a combination of |X509_V_FLAG_*| constants. // // WARNING: These flags will be combined with default flags when copied to an // |X509_STORE_CTX|. This means it is impossible to unset those defaults from // the |X509_STORE|. See discussion in |X509_STORE_get0_param|. OPENSSL_EXPORT int X509_STORE_set_flags(X509_STORE *store, unsigned long flags); // X509_STORE_set_depth configures |store| to, by default, limit certificate // chains to |depth| intermediate certificates. This count excludes both the // target certificate and the trust anchor (root certificate). OPENSSL_EXPORT int X509_STORE_set_depth(X509_STORE *store, int depth); // X509_STORE_set_purpose configures the purpose check for |store|. See // |X509_VERIFY_PARAM_set_purpose| for details. OPENSSL_EXPORT int X509_STORE_set_purpose(X509_STORE *store, int purpose); // X509_STORE_set_trust configures the trust check for |store|. See // |X509_VERIFY_PARAM_set_trust| for details. OPENSSL_EXPORT int X509_STORE_set_trust(X509_STORE *store, int trust); // The following constants indicate the type of an |X509_OBJECT|. #define X509_LU_NONE 0 #define X509_LU_X509 1 #define X509_LU_CRL 2 #define X509_LU_PKEY 3 DEFINE_STACK_OF(X509_OBJECT) // X509_OBJECT_new returns a newly-allocated, empty |X509_OBJECT| or NULL on // error. OPENSSL_EXPORT X509_OBJECT *X509_OBJECT_new(void); // X509_OBJECT_free releases memory associated with |obj|. OPENSSL_EXPORT void X509_OBJECT_free(X509_OBJECT *obj); // X509_OBJECT_get_type returns the type of |obj|, which will be one of the // |X509_LU_*| constants. OPENSSL_EXPORT int X509_OBJECT_get_type(const X509_OBJECT *obj); // X509_OBJECT_get0_X509 returns |obj| as a certificate, or NULL if |obj| is not // a certificate. OPENSSL_EXPORT X509 *X509_OBJECT_get0_X509(const X509_OBJECT *obj); // X509_STORE_get1_objects returns a newly-allocated stack containing the // contents of |store|, or NULL on error. The caller must release the result // with |sk_X509_OBJECT_pop_free| and |X509_OBJECT_free| when done. // // The result will include all certificates and CRLs added via // |X509_STORE_add_cert| and |X509_STORE_add_crl|, as well as any cached objects // added by |X509_LOOKUP_add_dir|. The last of these may change over time, as // different objects are loaded from the filesystem. Callers should not depend // on this caching behavior. The objects are returned in no particular order. OPENSSL_EXPORT STACK_OF(X509_OBJECT) *X509_STORE_get1_objects( X509_STORE *store); // Certificate verification. // // An |X509_STORE_CTX| object represents a single certificate verification // operation. To verify a certificate chain, callers construct an // |X509_STORE_CTX|, initialize it with |X509_STORE_CTX_init|, configure extra // parameters with |X509_STORE_CTX_get0_param|, and call |X509_verify_cert|. // X509_STORE_CTX_new returns a newly-allocated, empty |X509_STORE_CTX|, or NULL // on error. OPENSSL_EXPORT X509_STORE_CTX *X509_STORE_CTX_new(void); // X509_STORE_CTX_free releases memory associated with |ctx|. OPENSSL_EXPORT void X509_STORE_CTX_free(X509_STORE_CTX *ctx); // X509_STORE_CTX_init initializes |ctx| to verify |x509|, using trusted // certificates and parameters in |store|. It returns one on success and zero on // error. |chain| is a list of untrusted intermediate certificates to use in // verification. // // |ctx| stores pointers to |store|, |x509|, and |chain|. Each of these objects // must outlive |ctx| and may not be mutated for the duration of the certificate // verification. OPENSSL_EXPORT int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store, X509 *x509, STACK_OF(X509) *chain); // X509_verify_cert performs certifice verification with |ctx|, which must have // been initialized with |X509_STORE_CTX_init|. It returns one on success and // zero on error. On success, |X509_STORE_CTX_get0_chain| or // |X509_STORE_CTX_get1_chain| may be used to return the verified certificate // chain. On error, |X509_STORE_CTX_get_error| may be used to return additional // error information. // // WARNING: Most failure conditions from this function do not use the error // queue. Use |X509_STORE_CTX_get_error| to determine the cause of the error. OPENSSL_EXPORT int X509_verify_cert(X509_STORE_CTX *ctx); // X509_STORE_CTX_get0_chain, after a successful |X509_verify_cert| call, // returns the verified certificate chain. The chain begins with the leaf and // ends with trust anchor. // // At other points, such as after a failed verification or during the deprecated // verification callback, it returns the partial chain built so far. Callers // should avoid relying on this as this exposes unstable library implementation // details. OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get0_chain( const X509_STORE_CTX *ctx); // X509_STORE_CTX_get1_chain behaves like |X509_STORE_CTX_get0_chain| but // returns a newly-allocated |STACK_OF(X509)| containing the completed chain, // with each certificate's reference count incremented. Callers must free the // result with |sk_X509_pop_free| and |X509_free| when done. OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get1_chain( const X509_STORE_CTX *ctx); // The following values are possible outputs of |X509_STORE_CTX_get_error|. #define X509_V_OK 0 #define X509_V_ERR_UNSPECIFIED 1 #define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT 2 #define X509_V_ERR_UNABLE_TO_GET_CRL 3 #define X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE 4 #define X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE 5 #define X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY 6 #define X509_V_ERR_CERT_SIGNATURE_FAILURE 7 #define X509_V_ERR_CRL_SIGNATURE_FAILURE 8 #define X509_V_ERR_CERT_NOT_YET_VALID 9 #define X509_V_ERR_CERT_HAS_EXPIRED 10 #define X509_V_ERR_CRL_NOT_YET_VALID 11 #define X509_V_ERR_CRL_HAS_EXPIRED 12 #define X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD 13 #define X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD 14 #define X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD 15 #define X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD 16 #define X509_V_ERR_OUT_OF_MEM 17 #define X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT 18 #define X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN 19 #define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY 20 #define X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE 21 #define X509_V_ERR_CERT_CHAIN_TOO_LONG 22 #define X509_V_ERR_CERT_REVOKED 23 #define X509_V_ERR_INVALID_CA 24 #define X509_V_ERR_PATH_LENGTH_EXCEEDED 25 #define X509_V_ERR_INVALID_PURPOSE 26 #define X509_V_ERR_CERT_UNTRUSTED 27 #define X509_V_ERR_CERT_REJECTED 28 #define X509_V_ERR_SUBJECT_ISSUER_MISMATCH 29 #define X509_V_ERR_AKID_SKID_MISMATCH 30 #define X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH 31 #define X509_V_ERR_KEYUSAGE_NO_CERTSIGN 32 #define X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER 33 #define X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION 34 #define X509_V_ERR_KEYUSAGE_NO_CRL_SIGN 35 #define X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION 36 #define X509_V_ERR_INVALID_NON_CA 37 #define X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED 38 #define X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE 39 #define X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED 40 #define X509_V_ERR_INVALID_EXTENSION 41 #define X509_V_ERR_INVALID_POLICY_EXTENSION 42 #define X509_V_ERR_NO_EXPLICIT_POLICY 43 #define X509_V_ERR_DIFFERENT_CRL_SCOPE 44 #define X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE 45 #define X509_V_ERR_UNNESTED_RESOURCE 46 #define X509_V_ERR_PERMITTED_VIOLATION 47 #define X509_V_ERR_EXCLUDED_VIOLATION 48 #define X509_V_ERR_SUBTREE_MINMAX 49 #define X509_V_ERR_APPLICATION_VERIFICATION 50 #define X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE 51 #define X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX 52 #define X509_V_ERR_UNSUPPORTED_NAME_SYNTAX 53 #define X509_V_ERR_CRL_PATH_VALIDATION_ERROR 54 #define X509_V_ERR_HOSTNAME_MISMATCH 62 #define X509_V_ERR_EMAIL_MISMATCH 63 #define X509_V_ERR_IP_ADDRESS_MISMATCH 64 #define X509_V_ERR_INVALID_CALL 65 #define X509_V_ERR_STORE_LOOKUP 66 #define X509_V_ERR_NAME_CONSTRAINTS_WITHOUT_SANS 67 // X509_STORE_CTX_get_error, after |X509_verify_cert| returns, returns // |X509_V_OK| if verification succeeded or an |X509_V_ERR_*| describing why // verification failed. This will be consistent with |X509_verify_cert|'s return // value, unless the caller used the deprecated verification callback (see // |X509_STORE_CTX_set_verify_cb|) in a way that breaks |ctx|'s invariants. // // If called during the deprecated verification callback when |ok| is zero, it // returns the current error under consideration. OPENSSL_EXPORT int X509_STORE_CTX_get_error(const X509_STORE_CTX *ctx); // X509_STORE_CTX_set_error sets |ctx|'s error to |err|, which should be // |X509_V_OK| or an |X509_V_ERR_*| constant. It is not expected to be called in // typical |X509_STORE_CTX| usage, but may be used in callback APIs where // applications synthesize |X509_STORE_CTX| error conditions. See also // |X509_STORE_CTX_set_verify_cb| and |SSL_CTX_set_cert_verify_callback|. OPENSSL_EXPORT void X509_STORE_CTX_set_error(X509_STORE_CTX *ctx, int err); // X509_verify_cert_error_string returns |err| as a human-readable string, where // |err| should be one of the |X509_V_*| values. If |err| is unknown, it returns // a default description. OPENSSL_EXPORT const char *X509_verify_cert_error_string(long err); // X509_STORE_CTX_get_error_depth returns the depth at which the error returned // by |X509_STORE_CTX_get_error| occured. This is zero-indexed integer into the // certificate chain. Zero indicates the target certificate, one its issuer, and // so on. OPENSSL_EXPORT int X509_STORE_CTX_get_error_depth(const X509_STORE_CTX *ctx); // X509_STORE_CTX_get_current_cert returns the certificate which caused the // error returned by |X509_STORE_CTX_get_error|. OPENSSL_EXPORT X509 *X509_STORE_CTX_get_current_cert(const X509_STORE_CTX *ctx); // X509_STORE_CTX_get0_current_crl returns the CRL which caused the error // returned by |X509_STORE_CTX_get_error|. OPENSSL_EXPORT X509_CRL *X509_STORE_CTX_get0_current_crl( const X509_STORE_CTX *ctx); // X509_STORE_CTX_get0_store returns the |X509_STORE| that |ctx| uses. OPENSSL_EXPORT X509_STORE *X509_STORE_CTX_get0_store(const X509_STORE_CTX *ctx); // X509_STORE_CTX_get0_cert returns the leaf certificate that |ctx| is // verifying. OPENSSL_EXPORT X509 *X509_STORE_CTX_get0_cert(const X509_STORE_CTX *ctx); // X509_STORE_CTX_get0_untrusted returns the stack of untrusted intermediates // used by |ctx| for certificate verification. OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get0_untrusted( const X509_STORE_CTX *ctx); // X509_STORE_CTX_set0_trusted_stack configures |ctx| to trust the certificates // in |sk|. |sk| must remain valid for the duration of |ctx|. Calling this // function causes |ctx| to ignore any certificates configured in the // |X509_STORE|. Certificates in |sk| are still subject to the check described // in |X509_VERIFY_PARAM_set_trust|. // // WARNING: This function differs from most |set0| functions in that it does not // take ownership of its input. The caller is required to ensure the lifetimes // are consistent. OPENSSL_EXPORT void X509_STORE_CTX_set0_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk); // X509_STORE_CTX_set0_crls configures |ctx| to consider the CRLs in |sk| as // candidates for CRL lookup. |sk| must remain valid for the duration of |ctx|. // These CRLs are considered in addition to CRLs found in |X509_STORE|. // // WARNING: This function differs from most |set0| functions in that it does not // take ownership of its input. The caller is required to ensure the lifetimes // are consistent. OPENSSL_EXPORT void X509_STORE_CTX_set0_crls(X509_STORE_CTX *ctx, STACK_OF(X509_CRL) *sk); // X509_STORE_CTX_set_default looks up the set of parameters named |name| and // applies those default verification parameters for |ctx|. As in // |X509_VERIFY_PARAM_inherit|, only unset parameters are changed. This function // returns one on success and zero on error. // // The supported values of |name| are: // - "default" is an internal value which configures some late defaults. See the // discussion in |X509_STORE_get0_param|. // - "pkcs7" configures default trust and purpose checks for PKCS#7 signatures. // - "smime_sign" configures trust and purpose checks for S/MIME signatures. // - "ssl_client" configures trust and purpose checks for TLS clients. // - "ssl_server" configures trust and purpose checks for TLS servers. // // TODO(crbug.com/boringssl/441): Make "default" a no-op. OPENSSL_EXPORT int X509_STORE_CTX_set_default(X509_STORE_CTX *ctx, const char *name); // X509_STORE_CTX_get0_param returns |ctx|'s verification parameters. This // object is mutable and may be modified by the caller. OPENSSL_EXPORT X509_VERIFY_PARAM *X509_STORE_CTX_get0_param( X509_STORE_CTX *ctx); // X509_STORE_CTX_set0_param returns |ctx|'s verification parameters to |param| // and takes ownership of |param|. After this function returns, the caller // should not free |param|. // // WARNING: This function discards any values which were previously applied in // |ctx|, including the "default" parameters applied late in // |X509_STORE_CTX_init|. These late defaults are not applied to parameters // created standalone by |X509_VERIFY_PARAM_new|. // // TODO(crbug.com/boringssl/441): This behavior is very surprising. Should we // re-apply the late defaults in |param|, or somehow avoid this notion of late // defaults altogether? OPENSSL_EXPORT void X509_STORE_CTX_set0_param(X509_STORE_CTX *ctx, X509_VERIFY_PARAM *param); // X509_STORE_CTX_set_flags enables all values in |flags| in |ctx|'s // verification flags. |flags| should be a combination of |X509_V_FLAG_*| // constants. OPENSSL_EXPORT void X509_STORE_CTX_set_flags(X509_STORE_CTX *ctx, unsigned long flags); // X509_STORE_CTX_set_time configures certificate verification to use |t| // instead of the current time. |flags| is ignored and should be zero. OPENSSL_EXPORT void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags, time_t t); // X509_STORE_CTX_set_time_posix configures certificate verification to use |t| // instead of the current time. |t| is interpreted as a POSIX timestamp in // seconds. |flags| is ignored and should be zero. OPENSSL_EXPORT void X509_STORE_CTX_set_time_posix(X509_STORE_CTX *ctx, unsigned long flags, int64_t t); // X509_STORE_CTX_set_depth configures |ctx| to, by default, limit certificate // chains to |depth| intermediate certificates. This count excludes both the // target certificate and the trust anchor (root certificate). OPENSSL_EXPORT void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth); // X509_STORE_CTX_set_purpose simultaneously configures |ctx|'s purpose and // trust checks, if unset. It returns one on success and zero if |purpose| is // not a valid purpose value. |purpose| should be an |X509_PURPOSE_*| constant. // If so, it configures |ctx| with a purpose check of |purpose| and a trust // check of |purpose|'s corresponding trust value. If either the purpose or // trust check had already been specified for |ctx|, that corresponding // modification is silently dropped. // // See |X509_VERIFY_PARAM_set_purpose| and |X509_VERIFY_PARAM_set_trust| for // details on the purpose and trust checks, respectively. // // If |purpose| is |X509_PURPOSE_ANY|, this function returns an error because it // has no corresponding |X509_TRUST_*| value. It is not possible to set // |X509_PURPOSE_ANY| with this function, only |X509_VERIFY_PARAM_set_purpose|. // // WARNING: Unlike similarly named functions in this header, this function // silently does not behave the same as |X509_VERIFY_PARAM_set_purpose|. Callers // may use |X509_VERIFY_PARAM_set_purpose| with |X509_STORE_CTX_get0_param| to // avoid this difference. OPENSSL_EXPORT int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose); // X509_STORE_CTX_set_trust configures |ctx|'s trust check, if unset. It returns // one on success and zero if |trust| is not a valid trust value. |trust| should // be an |X509_TRUST_*| constant. If so, it configures |ctx| with a trust check // of |trust|. If the trust check had already been specified for |ctx|, it // silently does nothing. // // See |X509_VERIFY_PARAM_set_trust| for details on the purpose and trust check. // // WARNING: Unlike similarly named functions in this header, this function // does not behave the same as |X509_VERIFY_PARAM_set_trust|. Callers may use // |X509_VERIFY_PARAM_set_trust| with |X509_STORE_CTX_get0_param| to avoid this // difference. OPENSSL_EXPORT int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust); // Verification parameters. // // An |X509_VERIFY_PARAM| contains a set of parameters for certificate // verification. // X509_VERIFY_PARAM_new returns a newly-allocated |X509_VERIFY_PARAM|, or NULL // on error. OPENSSL_EXPORT X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void); // X509_VERIFY_PARAM_free releases memory associated with |param|. OPENSSL_EXPORT void X509_VERIFY_PARAM_free(X509_VERIFY_PARAM *param); // X509_VERIFY_PARAM_inherit applies |from| as the default values for |to|. That // is, for each parameter that is unset in |to|, it copies the value in |from|. // This function returns one on success and zero on error. OPENSSL_EXPORT int X509_VERIFY_PARAM_inherit(X509_VERIFY_PARAM *to, const X509_VERIFY_PARAM *from); // X509_VERIFY_PARAM_set1 copies parameters from |from| to |to|. If a parameter // is unset in |from|, the existing value in |to| is preserved. This function // returns one on success and zero on error. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1(X509_VERIFY_PARAM *to, const X509_VERIFY_PARAM *from); // X509_V_FLAG_* are flags for |X509_VERIFY_PARAM_set_flags| and // |X509_VERIFY_PARAM_clear_flags|. // X509_V_FLAG_CB_ISSUER_CHECK causes the deprecated verify callback (see // |X509_STORE_CTX_set_verify_cb|) to be called for errors while matching // subject and issuer certificates. #define X509_V_FLAG_CB_ISSUER_CHECK 0x1 // X509_V_FLAG_USE_CHECK_TIME is an internal flag used to track whether // |X509_STORE_CTX_set_time| has been used. If cleared, the system time is // restored. #define X509_V_FLAG_USE_CHECK_TIME 0x2 // X509_V_FLAG_CRL_CHECK enables CRL lookup and checking for the leaf. #define X509_V_FLAG_CRL_CHECK 0x4 // X509_V_FLAG_CRL_CHECK_ALL enables CRL lookup and checking for the entire // certificate chain. |X509_V_FLAG_CRL_CHECK| must be set for this flag to take // effect. #define X509_V_FLAG_CRL_CHECK_ALL 0x8 // X509_V_FLAG_IGNORE_CRITICAL ignores unhandled critical extensions. Do not use // this option. Critical extensions ensure the verifier does not bypass // unrecognized security restrictions in certificates. #define X509_V_FLAG_IGNORE_CRITICAL 0x10 // X509_V_FLAG_X509_STRICT does nothing. Its functionality has been enabled by // default. #define X509_V_FLAG_X509_STRICT 0x00 // X509_V_FLAG_ALLOW_PROXY_CERTS does nothing. Proxy certificate support has // been removed. #define X509_V_FLAG_ALLOW_PROXY_CERTS 0x40 // X509_V_FLAG_POLICY_CHECK does nothing. Policy checking is always enabled. #define X509_V_FLAG_POLICY_CHECK 0x80 // X509_V_FLAG_EXPLICIT_POLICY requires some policy OID to be asserted by the // final certificate chain. See initial-explicit-policy from RFC 5280, // section 6.1.1. #define X509_V_FLAG_EXPLICIT_POLICY 0x100 // X509_V_FLAG_INHIBIT_ANY inhibits the anyPolicy OID. See // initial-any-policy-inhibit from RFC 5280, section 6.1.1. #define X509_V_FLAG_INHIBIT_ANY 0x200 // X509_V_FLAG_INHIBIT_MAP inhibits policy mapping. See // initial-policy-mapping-inhibit from RFC 5280, section 6.1.1. #define X509_V_FLAG_INHIBIT_MAP 0x400 // X509_V_FLAG_NOTIFY_POLICY does nothing. Its functionality has been removed. #define X509_V_FLAG_NOTIFY_POLICY 0x800 // X509_V_FLAG_EXTENDED_CRL_SUPPORT causes all verifications to fail. Extended // CRL features have been removed. #define X509_V_FLAG_EXTENDED_CRL_SUPPORT 0x1000 // X509_V_FLAG_USE_DELTAS causes all verifications to fail. Delta CRL support // has been removed. #define X509_V_FLAG_USE_DELTAS 0x2000 // X509_V_FLAG_CHECK_SS_SIGNATURE checks the redundant signature on self-signed // trust anchors. This check provides no security benefit and only wastes CPU. #define X509_V_FLAG_CHECK_SS_SIGNATURE 0x4000 // X509_V_FLAG_TRUSTED_FIRST, during path-building, checks for a match in the // trust store before considering an untrusted intermediate. This flag is // enabled by default. #define X509_V_FLAG_TRUSTED_FIRST 0x8000 // X509_V_FLAG_PARTIAL_CHAIN treats all trusted certificates as trust anchors, // independent of the |X509_VERIFY_PARAM_set_trust| setting. #define X509_V_FLAG_PARTIAL_CHAIN 0x80000 // X509_V_FLAG_NO_ALT_CHAINS disables building alternative chains if the initial // one was rejected. #define X509_V_FLAG_NO_ALT_CHAINS 0x100000 // X509_V_FLAG_NO_CHECK_TIME disables all time checks in certificate // verification. #define X509_V_FLAG_NO_CHECK_TIME 0x200000 // X509_VERIFY_PARAM_set_flags enables all values in |flags| in |param|'s // verification flags and returns one. |flags| should be a combination of // |X509_V_FLAG_*| constants. OPENSSL_EXPORT int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param, unsigned long flags); // X509_VERIFY_PARAM_clear_flags disables all values in |flags| in |param|'s // verification flags and returns one. |flags| should be a combination of // |X509_V_FLAG_*| constants. OPENSSL_EXPORT int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param, unsigned long flags); // X509_VERIFY_PARAM_get_flags returns |param|'s verification flags. OPENSSL_EXPORT unsigned long X509_VERIFY_PARAM_get_flags( const X509_VERIFY_PARAM *param); // X509_VERIFY_PARAM_set_depth configures |param| to limit certificate chains to // |depth| intermediate certificates. This count excludes both the target // certificate and the trust anchor (root certificate). OPENSSL_EXPORT void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth); // X509_VERIFY_PARAM_get_depth returns the maximum depth configured in |param|. // See |X509_VERIFY_PARAM_set_depth|. OPENSSL_EXPORT int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param); // X509_VERIFY_PARAM_set_time configures certificate verification to use |t| // instead of the current time. OPENSSL_EXPORT void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t); // X509_VERIFY_PARAM_set_time_posix configures certificate verification to use // |t| instead of the current time. |t| is interpreted as a POSIX timestamp in // seconds. OPENSSL_EXPORT void X509_VERIFY_PARAM_set_time_posix(X509_VERIFY_PARAM *param, int64_t t); // X509_VERIFY_PARAM_add0_policy adds |policy| to the user-initial-policy-set // (see Section 6.1.1 of RFC 5280). On success, it takes ownership of // |policy| and returns one. Otherwise, it returns zero and the caller retains // owneship of |policy|. OPENSSL_EXPORT int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param, ASN1_OBJECT *policy); // X509_VERIFY_PARAM_set1_policies sets the user-initial-policy-set (see // Section 6.1.1 of RFC 5280) to a copy of |policies|. It returns one on success // and zero on error. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1_policies( X509_VERIFY_PARAM *param, const STACK_OF(ASN1_OBJECT) *policies); // X509_VERIFY_PARAM_set1_host configures |param| to check for the DNS name // specified by |name|. It returns one on success and zero on error. // // By default, both subject alternative names and the subject's common name // attribute are checked. The latter has long been deprecated, so callers should // call |X509_VERIFY_PARAM_set_hostflags| with // |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| to use the standard behavior. // https://crbug.com/boringssl/464 tracks fixing the default. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param, const char *name, size_t name_len); // X509_VERIFY_PARAM_add1_host adds |name| to the list of names checked by // |param|. If any configured DNS name matches the certificate, verification // succeeds. It returns one on success and zero on error. // // By default, both subject alternative names and the subject's common name // attribute are checked. The latter has long been deprecated, so callers should // call |X509_VERIFY_PARAM_set_hostflags| with // |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| to use the standard behavior. // https://crbug.com/boringssl/464 tracks fixing the default. OPENSSL_EXPORT int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param, const char *name, size_t name_len); // X509_CHECK_FLAG_NO_WILDCARDS disables wildcard matching for DNS names. #define X509_CHECK_FLAG_NO_WILDCARDS 0x2 // X509_CHECK_FLAG_NEVER_CHECK_SUBJECT disables the subject fallback, normally // enabled when subjectAltNames is missing. #define X509_CHECK_FLAG_NEVER_CHECK_SUBJECT 0x20 // X509_VERIFY_PARAM_set_hostflags sets the name-checking flags on |param| to // |flags|. |flags| should be a combination of |X509_CHECK_FLAG_*| constants. OPENSSL_EXPORT void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param, unsigned int flags); // X509_VERIFY_PARAM_set1_email configures |param| to check for the email // address specified by |email|. It returns one on success and zero on error. // // By default, both subject alternative names and the subject's email address // attribute are checked. The |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| flag may be // used to change this behavior. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param, const char *email, size_t email_len); // X509_VERIFY_PARAM_set1_ip configures |param| to check for the IP address // specified by |ip|. It returns one on success and zero on error. The IP // address is specified in its binary representation. |ip_len| must be 4 for an // IPv4 address and 16 for an IPv6 address. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param, const uint8_t *ip, size_t ip_len); // X509_VERIFY_PARAM_set1_ip_asc decodes |ipasc| as the ASCII representation of // an IPv4 or IPv6 address, and configures |param| to check for it. It returns // one on success and zero on error. OPENSSL_EXPORT int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param, const char *ipasc); // X509_PURPOSE_SSL_CLIENT validates TLS client certificates. It checks for the // id-kp-clientAuth EKU and one of digitalSignature or keyAgreement key usages. // The TLS library is expected to check for the key usage specific to the // negotiated TLS parameters. #define X509_PURPOSE_SSL_CLIENT 1 // X509_PURPOSE_SSL_SERVER validates TLS server certificates. It checks for the // id-kp-clientAuth EKU and one of digitalSignature, keyAgreement, or // keyEncipherment key usages. The TLS library is expected to check for the key // usage specific to the negotiated TLS parameters. #define X509_PURPOSE_SSL_SERVER 2 // X509_PURPOSE_NS_SSL_SERVER is a legacy mode. It behaves like // |X509_PURPOSE_SSL_SERVER|, but only accepts the keyEncipherment key usage, // used by SSL 2.0 and RSA key exchange. Do not use this. #define X509_PURPOSE_NS_SSL_SERVER 3 // X509_PURPOSE_SMIME_SIGN validates S/MIME signing certificates. It checks for // the id-kp-emailProtection EKU and one of digitalSignature or nonRepudiation // key usages. #define X509_PURPOSE_SMIME_SIGN 4 // X509_PURPOSE_SMIME_ENCRYPT validates S/MIME encryption certificates. It // checks for the id-kp-emailProtection EKU and keyEncipherment key usage. #define X509_PURPOSE_SMIME_ENCRYPT 5 // X509_PURPOSE_CRL_SIGN validates indirect CRL signers. It checks for the // cRLSign key usage. BoringSSL does not support indirect CRLs and does not use // this mode. #define X509_PURPOSE_CRL_SIGN 6 // X509_PURPOSE_ANY performs no EKU or key usage checks. Such checks are the // responsibility of the caller. #define X509_PURPOSE_ANY 7 // X509_PURPOSE_OCSP_HELPER performs no EKU or key usage checks. It was // historically used in OpenSSL's OCSP implementation, which left those checks // to the OCSP implementation itself. #define X509_PURPOSE_OCSP_HELPER 8 // X509_PURPOSE_TIMESTAMP_SIGN validates Time Stamping Authority (RFC 3161) // certificates. It checks for the id-kp-timeStamping EKU and one of // digitalSignature or nonRepudiation key usages. It additionally checks that // the EKU extension is critical and that no other EKUs or key usages are // asserted. #define X509_PURPOSE_TIMESTAMP_SIGN 9 // X509_VERIFY_PARAM_set_purpose configures |param| to validate certificates for // a specified purpose. It returns one on success and zero if |purpose| is not a // valid purpose type. |purpose| should be one of the |X509_PURPOSE_*| values. // // This option controls checking the extended key usage (EKU) and key usage // extensions. These extensions specify how a certificate's public key may be // used and are important to avoid cross-protocol attacks, particularly in PKIs // that may issue certificates for multiple protocols, or for protocols that use // keys in multiple ways. If not configured, these security checks are the // caller's responsibility. // // This library applies the EKU checks to all untrusted intermediates. Although // not defined in RFC 5280, this matches widely-deployed practice. It also does // not accept anyExtendedKeyUsage. // // Many purpose values have a corresponding trust value, which is not configured // by this function. See |X509_VERIFY_PARAM_set_trust| for details. Callers // that wish to configure both should either call both functions, or use // |X509_STORE_CTX_set_purpose|. // // It is currently not possible to configure custom EKU OIDs or key usage bits. // Contact the BoringSSL maintainers if your application needs to do so. OpenSSL // had an |X509_PURPOSE_add| API, but it was not thread-safe and relied on // global mutable state, so we removed it. // // TODO(davidben): This function additionally configures checking the legacy // Netscape certificate type extension. Remove this. OPENSSL_EXPORT int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose); // X509_TRUST_COMPAT evaluates trust using only the self-signed fallback. Trust // and distrust OIDs are ignored. #define X509_TRUST_COMPAT 1 // X509_TRUST_SSL_CLIENT evaluates trust with the |NID_client_auth| OID, for // validating TLS client certificates. #define X509_TRUST_SSL_CLIENT 2 // X509_TRUST_SSL_SERVER evaluates trust with the |NID_server_auth| OID, for // validating TLS server certificates. #define X509_TRUST_SSL_SERVER 3 // X509_TRUST_EMAIL evaluates trust with the |NID_email_protect| OID, for // validating S/MIME email certificates. #define X509_TRUST_EMAIL 4 // X509_TRUST_OBJECT_SIGN evaluates trust with the |NID_code_sign| OID, for // validating code signing certificates. #define X509_TRUST_OBJECT_SIGN 5 // X509_TRUST_TSA evaluates trust with the |NID_time_stamp| OID, for validating // Time Stamping Authority (RFC 3161) certificates. #define X509_TRUST_TSA 8 // X509_VERIFY_PARAM_set_trust configures which certificates from |X509_STORE| // are trust anchors. It returns one on success and zero if |trust| is not a // valid trust value. |trust| should be one of the |X509_TRUST_*| constants. // This function allows applications to vary trust anchors when the same set of // trusted certificates is used in multiple contexts. // // Two properties determine whether a certificate is a trust anchor: // // - Whether it is trusted or distrusted for some OID, via auxiliary information // configured by |X509_add1_trust_object| or |X509_add1_reject_object|. // // - Whether it is "self-signed". That is, whether |X509_get_extension_flags| // includes |EXFLAG_SS|. The signature itself is not checked. // // When this function is called, |trust| determines the OID to check in the // first case. If the certificate is not explicitly trusted or distrusted for // any OID, it is trusted if self-signed instead. // // If unset, the default behavior is to check for the |NID_anyExtendedKeyUsage| // OID. If the certificate is not explicitly trusted or distrusted for this OID, // it is trusted if self-signed instead. Note this slightly differs from the // above. // // If the |X509_V_FLAG_PARTIAL_CHAIN| is set, every certificate from // |X509_STORE| is a trust anchor, unless it was explicitly distrusted for the // OID. // // It is currently not possible to configure custom trust OIDs. Contact the // BoringSSL maintainers if your application needs to do so. OpenSSL had an // |X509_TRUST_add| API, but it was not thread-safe and relied on global mutable // state, so we removed it. OPENSSL_EXPORT int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust); // Filesystem-based certificate stores. // // An |X509_STORE| may be configured to get its contents from the filesystem. // This is done by adding |X509_LOOKUP| structures to the |X509_STORE| with // |X509_STORE_add_lookup| and then configuring the |X509_LOOKUP| with paths. // // Most cases can use |X509_STORE_load_locations|, which configures the same // thing but is simpler to use. // X509_STORE_load_locations configures |store| to load data from filepaths // |file| and |dir|. It returns one on success and zero on error. Either of // |file| or |dir| may be NULL, but at least one must be non-NULL. // // If |file| is non-NULL, it loads CRLs and trusted certificates in PEM format // from the file at |file|, and them to |store|, as in |X509_load_cert_crl_file| // with |X509_FILETYPE_PEM|. // // If |dir| is non-NULL, it configures |store| to load CRLs and trusted // certificates from the directory at |dir| in PEM format, as in // |X509_LOOKUP_add_dir| with |X509_FILETYPE_PEM|. OPENSSL_EXPORT int X509_STORE_load_locations(X509_STORE *store, const char *file, const char *dir); // X509_STORE_add_lookup returns an |X509_LOOKUP| associated with |store| with // type |method|, or NULL on error. The result is owned by |store|, so callers // are not expected to free it. This may be used with |X509_LOOKUP_add_dir| or // |X509_LOOKUP_load_file|, depending on |method|, to configure |store|. // // A single |X509_LOOKUP| may be configured with multiple paths, and an // |X509_STORE| only contains one |X509_LOOKUP| of each type, so there is no // need to call this function multiple times for a single type. Calling it // multiple times will return the previous |X509_LOOKUP| of that type. OPENSSL_EXPORT X509_LOOKUP *X509_STORE_add_lookup( X509_STORE *store, const X509_LOOKUP_METHOD *method); // X509_LOOKUP_hash_dir creates |X509_LOOKUP|s that may be used with // |X509_LOOKUP_add_dir|. OPENSSL_EXPORT const X509_LOOKUP_METHOD *X509_LOOKUP_hash_dir(void); // X509_LOOKUP_file creates |X509_LOOKUP|s that may be used with // |X509_LOOKUP_load_file|. // // Although this is modeled as an |X509_LOOKUP|, this function is redundant. It // has the same effect as loading a certificate or CRL from the filesystem, in // the caller's desired format, and then adding it with |X509_STORE_add_cert| // and |X509_STORE_add_crl|. OPENSSL_EXPORT const X509_LOOKUP_METHOD *X509_LOOKUP_file(void); // The following constants are used to specify the format of files in an // |X509_LOOKUP|. #define X509_FILETYPE_PEM 1 #define X509_FILETYPE_ASN1 2 #define X509_FILETYPE_DEFAULT 3 // X509_LOOKUP_load_file calls |X509_load_cert_crl_file|. |lookup| must have // been constructed with |X509_LOOKUP_file|. // // If |type| is |X509_FILETYPE_DEFAULT|, it ignores |file| and instead uses some // default system path with |X509_FILETYPE_PEM|. See also // |X509_STORE_set_default_paths|. OPENSSL_EXPORT int X509_LOOKUP_load_file(X509_LOOKUP *lookup, const char *file, int type); // X509_LOOKUP_add_dir configures |lookup| to load CRLs and trusted certificates // from the directories in |path|. It returns one on success and zero on error. // |lookup| must have been constructed with |X509_LOOKUP_hash_dir|. // // WARNING: |path| is interpreted as a colon-separated (semicolon-separated on // Windows) list of paths. It is not possible to configure a path containing the // separator character. https://crbug.com/boringssl/691 tracks removing this // behavior. // // |type| should be one of the |X509_FILETYPE_*| constants and determines the // format of the files. If |type| is |X509_FILETYPE_DEFAULT|, |path| is ignored // and some default system path is used with |X509_FILETYPE_PEM|. See also // |X509_STORE_set_default_paths|. // // Trusted certificates should be named HASH.N and CRLs should be // named HASH.rN. HASH is |X509_NAME_hash| of the certificate subject and CRL // issuer, respectively, in hexadecimal. N is in decimal and counts hash // collisions consecutively, starting from zero. For example, "002c0b4f.0" and // "002c0b4f.r0". // // WARNING: Objects from |path| are loaded on demand, but cached in memory on // the |X509_STORE|. If a CA is removed from the directory, existing // |X509_STORE|s will continue to trust it. Cache entries are not evicted for // the lifetime of the |X509_STORE|. // // WARNING: This mechanism is also not well-suited for CRL updates. // |X509_STORE|s rely on this cache and never load the same CRL file twice. CRL // updates must use a new file, with an incremented suffix, to be reflected in // existing |X509_STORE|s. However, this means each CRL update will use // additional storage and memory. Instead, configure inputs that vary per // verification, such as CRLs, on each |X509_STORE_CTX| separately, using // functions like |X509_STORE_CTX_set0_crl|. OPENSSL_EXPORT int X509_LOOKUP_add_dir(X509_LOOKUP *lookup, const char *path, int type); // X509_L_* are commands for |X509_LOOKUP_ctrl|. #define X509_L_FILE_LOAD 1 #define X509_L_ADD_DIR 2 // X509_LOOKUP_ctrl implements commands on |lookup|. |cmd| specifies the // command. The other arguments specify the operation in a command-specific way. // Use |X509_LOOKUP_load_file| or |X509_LOOKUP_add_dir| instead. OPENSSL_EXPORT int X509_LOOKUP_ctrl(X509_LOOKUP *lookup, int cmd, const char *argc, long argl, char **ret); // X509_load_cert_file loads trusted certificates from |file| and adds them to // |lookup|'s |X509_STORE|. It returns one on success and zero on error. // // If |type| is |X509_FILETYPE_ASN1|, it loads a single DER-encoded certificate. // If |type| is |X509_FILETYPE_PEM|, it loads a sequence of PEM-encoded // certificates. |type| may not be |X509_FILETYPE_DEFAULT|. OPENSSL_EXPORT int X509_load_cert_file(X509_LOOKUP *lookup, const char *file, int type); // X509_load_crl_file loads CRLs from |file| and add them it to |lookup|'s // |X509_STORE|. It returns one on success and zero on error. // // If |type| is |X509_FILETYPE_ASN1|, it loads a single DER-encoded CRL. If // |type| is |X509_FILETYPE_PEM|, it loads a sequence of PEM-encoded CRLs. // |type| may not be |X509_FILETYPE_DEFAULT|. OPENSSL_EXPORT int X509_load_crl_file(X509_LOOKUP *lookup, const char *file, int type); // X509_load_cert_crl_file loads CRLs and trusted certificates from |file| and // adds them to |lookup|'s |X509_STORE|. It returns one on success and zero on // error. // // If |type| is |X509_FILETYPE_ASN1|, it loads a single DER-encoded certificate. // This function cannot be used to load a DER-encoded CRL. If |type| is // |X509_FILETYPE_PEM|, it loads a sequence of PEM-encoded certificates and // CRLs. |type| may not be |X509_FILETYPE_DEFAULT|. OPENSSL_EXPORT int X509_load_cert_crl_file(X509_LOOKUP *lookup, const char *file, int type); // X509_NAME_hash returns a hash of |name|, or zero on error. This is the new // hash used by |X509_LOOKUP_add_dir|. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. It also depends on an OpenSSL-specific // canonicalization process. // // TODO(https://crbug.com/boringssl/407): This should be const and thread-safe // but currently is neither, notably if |name| was modified from its parsed // value. OPENSSL_EXPORT uint32_t X509_NAME_hash(X509_NAME *name); // X509_NAME_hash_old returns a hash of |name|, or zero on error. This is the // legacy hash used by |X509_LOOKUP_add_dir|, which is still supported for // compatibility. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. // // TODO(https://crbug.com/boringssl/407): This should be const and thread-safe // but currently is neither, notably if |name| was modified from its parsed // value. OPENSSL_EXPORT uint32_t X509_NAME_hash_old(X509_NAME *name); // X509_STORE_set_default_paths configures |store| to read from some "default" // filesystem paths. It returns one on success and zero on error. The filesystem // paths are determined by a combination of hardcoded paths and the SSL_CERT_DIR // and SSL_CERT_FILE environment variables. // // Using this function is not recommended. In OpenSSL, these defaults are // determined by OpenSSL's install prefix. There is no corresponding concept for // BoringSSL. Future versions of BoringSSL may change or remove this // functionality. OPENSSL_EXPORT int X509_STORE_set_default_paths(X509_STORE *store); // The following functions return filesystem paths used to determine the above // "default" paths, when the corresponding environment variables are not set. // // Using these functions is not recommended. In OpenSSL, these defaults are // determined by OpenSSL's install prefix. There is no corresponding concept for // BoringSSL. Future versions of BoringSSL may change or remove this // functionality. OPENSSL_EXPORT const char *X509_get_default_cert_area(void); OPENSSL_EXPORT const char *X509_get_default_cert_dir(void); OPENSSL_EXPORT const char *X509_get_default_cert_file(void); OPENSSL_EXPORT const char *X509_get_default_private_dir(void); // X509_get_default_cert_dir_env returns "SSL_CERT_DIR", an environment variable // used to determine the above "default" paths. OPENSSL_EXPORT const char *X509_get_default_cert_dir_env(void); // X509_get_default_cert_file_env returns "SSL_CERT_FILE", an environment // variable used to determine the above "default" paths. OPENSSL_EXPORT const char *X509_get_default_cert_file_env(void); // SignedPublicKeyAndChallenge structures. // // The SignedPublicKeyAndChallenge (SPKAC) is a legacy structure to request // certificates, primarily in the legacy HTML tag. An SPKAC structure // is represented by a |NETSCAPE_SPKI| structure. // // The structure is described in // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/keygen // A Netscape_spki_st, or |NETSCAPE_SPKI|, represents a // SignedPublicKeyAndChallenge structure. Although this structure contains a // |spkac| field of type |NETSCAPE_SPKAC|, these are misnamed. The SPKAC is the // entire structure, not the signed portion. struct Netscape_spki_st { NETSCAPE_SPKAC *spkac; X509_ALGOR *sig_algor; ASN1_BIT_STRING *signature; } /* NETSCAPE_SPKI */; // NETSCAPE_SPKI_new returns a newly-allocated, empty |NETSCAPE_SPKI| object, or // NULL on error. OPENSSL_EXPORT NETSCAPE_SPKI *NETSCAPE_SPKI_new(void); // NETSCAPE_SPKI_free releases memory associated with |spki|. OPENSSL_EXPORT void NETSCAPE_SPKI_free(NETSCAPE_SPKI *spki); // d2i_NETSCAPE_SPKI parses up to |len| bytes from |*inp| as a DER-encoded // SignedPublicKeyAndChallenge structure, as described in |d2i_SAMPLE|. OPENSSL_EXPORT NETSCAPE_SPKI *d2i_NETSCAPE_SPKI(NETSCAPE_SPKI **out, const uint8_t **inp, long len); // i2d_NETSCAPE_SPKI marshals |spki| as a DER-encoded // SignedPublicKeyAndChallenge structure, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_NETSCAPE_SPKI(const NETSCAPE_SPKI *spki, uint8_t **outp); // NETSCAPE_SPKI_verify checks that |spki| has a valid signature by |pkey|. It // returns one if the signature is valid and zero otherwise. OPENSSL_EXPORT int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *spki, EVP_PKEY *pkey); // NETSCAPE_SPKI_b64_decode decodes |len| bytes from |str| as a base64-encoded // SignedPublicKeyAndChallenge structure. It returns a newly-allocated // |NETSCAPE_SPKI| structure with the result, or NULL on error. If |len| is 0 or // negative, the length is calculated with |strlen| and |str| must be a // NUL-terminated C string. OPENSSL_EXPORT NETSCAPE_SPKI *NETSCAPE_SPKI_b64_decode(const char *str, ossl_ssize_t len); // NETSCAPE_SPKI_b64_encode encodes |spki| as a base64-encoded // SignedPublicKeyAndChallenge structure. It returns a newly-allocated // NUL-terminated C string with the result, or NULL on error. The caller must // release the memory with |OPENSSL_free| when done. OPENSSL_EXPORT char *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *spki); // NETSCAPE_SPKI_get_pubkey decodes and returns the public key in |spki| as an // |EVP_PKEY|, or NULL on error. The caller takes ownership of the resulting // pointer and must call |EVP_PKEY_free| when done. OPENSSL_EXPORT EVP_PKEY *NETSCAPE_SPKI_get_pubkey(const NETSCAPE_SPKI *spki); // NETSCAPE_SPKI_set_pubkey sets |spki|'s public key to |pkey|. It returns one // on success or zero on error. This function does not take ownership of |pkey|, // so the caller may continue to manage its lifetime independently of |spki|. OPENSSL_EXPORT int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *spki, EVP_PKEY *pkey); // NETSCAPE_SPKI_sign signs |spki| with |pkey| and replaces the signature // algorithm and signature fields. It returns the length of the signature on // success and zero on error. This function uses digest algorithm |md|, or // |pkey|'s default if NULL. Other signing parameters use |pkey|'s defaults. OPENSSL_EXPORT int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *spki, EVP_PKEY *pkey, const EVP_MD *md); // A Netscape_spkac_st, or |NETSCAPE_SPKAC|, represents a PublicKeyAndChallenge // structure. This type is misnamed. The full SPKAC includes the signature, // which is represented with the |NETSCAPE_SPKI| type. struct Netscape_spkac_st { X509_PUBKEY *pubkey; ASN1_IA5STRING *challenge; } /* NETSCAPE_SPKAC */; // NETSCAPE_SPKAC_new returns a newly-allocated, empty |NETSCAPE_SPKAC| object, // or NULL on error. OPENSSL_EXPORT NETSCAPE_SPKAC *NETSCAPE_SPKAC_new(void); // NETSCAPE_SPKAC_free releases memory associated with |spkac|. OPENSSL_EXPORT void NETSCAPE_SPKAC_free(NETSCAPE_SPKAC *spkac); // d2i_NETSCAPE_SPKAC parses up to |len| bytes from |*inp| as a DER-encoded // PublicKeyAndChallenge structure, as described in |d2i_SAMPLE|. OPENSSL_EXPORT NETSCAPE_SPKAC *d2i_NETSCAPE_SPKAC(NETSCAPE_SPKAC **out, const uint8_t **inp, long len); // i2d_NETSCAPE_SPKAC marshals |spkac| as a DER-encoded PublicKeyAndChallenge // structure, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_NETSCAPE_SPKAC(const NETSCAPE_SPKAC *spkac, uint8_t **outp); // RSASSA-PSS Parameters. // // In X.509, RSASSA-PSS signatures and keys use a complex parameter structure, // defined in RFC 4055. The following functions are provided for compatibility // with some OpenSSL APIs relating to this. Use of RSASSA-PSS in X.509 is // discouraged. The parameters structure is very complex, and it takes more // bytes to merely encode parameters than an entire P-256 ECDSA signature. // An rsa_pss_params_st, aka |RSA_PSS_PARAMS|, represents a parsed // RSASSA-PSS-params structure, as defined in (RFC 4055). struct rsa_pss_params_st { X509_ALGOR *hashAlgorithm; X509_ALGOR *maskGenAlgorithm; ASN1_INTEGER *saltLength; ASN1_INTEGER *trailerField; // OpenSSL caches the MGF hash on |RSA_PSS_PARAMS| in some cases. None of the // cases apply to BoringSSL, so this is always NULL, but Node expects the // field to be present. X509_ALGOR *maskHash; } /* RSA_PSS_PARAMS */; // RSA_PSS_PARAMS is an |ASN1_ITEM| whose ASN.1 type is RSASSA-PSS-params (RFC // 4055) and C type is |RSA_PSS_PARAMS*|. DECLARE_ASN1_ITEM(RSA_PSS_PARAMS) // RSA_PSS_PARAMS_new returns a new, empty |RSA_PSS_PARAMS|, or NULL on error. OPENSSL_EXPORT RSA_PSS_PARAMS *RSA_PSS_PARAMS_new(void); // RSA_PSS_PARAMS_free releases memory associated with |params|. OPENSSL_EXPORT void RSA_PSS_PARAMS_free(RSA_PSS_PARAMS *params); // d2i_RSA_PSS_PARAMS parses up to |len| bytes from |*inp| as a DER-encoded // RSASSA-PSS-params (RFC 4055), as described in |d2i_SAMPLE|. OPENSSL_EXPORT RSA_PSS_PARAMS *d2i_RSA_PSS_PARAMS(RSA_PSS_PARAMS **out, const uint8_t **inp, long len); // i2d_RSA_PSS_PARAMS marshals |in| as a DER-encoded RSASSA-PSS-params (RFC // 4055), as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_RSA_PSS_PARAMS(const RSA_PSS_PARAMS *in, uint8_t **outp); // PKCS#8 private keys. // // The |PKCS8_PRIV_KEY_INFO| type represents a PKCS#8 PrivateKeyInfo (RFC 5208) // structure. This is analogous to SubjectPublicKeyInfo and uses the same // AlgorithmIdentifiers, but carries private keys and is not part of X.509 // itself. // // TODO(davidben): Do these functions really belong in this header? // PKCS8_PRIV_KEY_INFO_new returns a newly-allocated, empty // |PKCS8_PRIV_KEY_INFO| object, or NULL on error. OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *PKCS8_PRIV_KEY_INFO_new(void); // PKCS8_PRIV_KEY_INFO_free releases memory associated with |key|. OPENSSL_EXPORT void PKCS8_PRIV_KEY_INFO_free(PKCS8_PRIV_KEY_INFO *key); // d2i_PKCS8_PRIV_KEY_INFO parses up to |len| bytes from |*inp| as a DER-encoded // PrivateKeyInfo, as described in |d2i_SAMPLE|. OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO( PKCS8_PRIV_KEY_INFO **out, const uint8_t **inp, long len); // i2d_PKCS8_PRIV_KEY_INFO marshals |key| as a DER-encoded PrivateKeyInfo, as // described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_PKCS8_PRIV_KEY_INFO(const PKCS8_PRIV_KEY_INFO *key, uint8_t **outp); // EVP_PKCS82PKEY returns |p8| as a newly-allocated |EVP_PKEY|, or NULL if the // key was unsupported or could not be decoded. The caller must release the // result with |EVP_PKEY_free| when done. // // Use |EVP_parse_private_key| instead. OPENSSL_EXPORT EVP_PKEY *EVP_PKCS82PKEY(const PKCS8_PRIV_KEY_INFO *p8); // EVP_PKEY2PKCS8 encodes |pkey| as a PKCS#8 PrivateKeyInfo (RFC 5208), // represented as a newly-allocated |PKCS8_PRIV_KEY_INFO|, or NULL on error. The // caller must release the result with |PKCS8_PRIV_KEY_INFO_free| when done. // // Use |EVP_marshal_private_key| instead. OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(const EVP_PKEY *pkey); // Algorithm and octet string pairs. // // The |X509_SIG| type represents an ASN.1 SEQUENCE type of an // AlgorithmIdentifier and an OCTET STRING. Although named |X509_SIG|, there is // no type in X.509 which matches this format. The two common types which do are // DigestInfo (RFC 2315 and RFC 8017), and EncryptedPrivateKeyInfo (RFC 5208). // X509_SIG_new returns a newly-allocated, empty |X509_SIG| object, or NULL on // error. OPENSSL_EXPORT X509_SIG *X509_SIG_new(void); // X509_SIG_free releases memory associated with |key|. OPENSSL_EXPORT void X509_SIG_free(X509_SIG *key); // d2i_X509_SIG parses up to |len| bytes from |*inp| as a DER-encoded algorithm // and octet string pair, as described in |d2i_SAMPLE|. OPENSSL_EXPORT X509_SIG *d2i_X509_SIG(X509_SIG **out, const uint8_t **inp, long len); // i2d_X509_SIG marshals |sig| as a DER-encoded algorithm // and octet string pair, as described in |i2d_SAMPLE|. OPENSSL_EXPORT int i2d_X509_SIG(const X509_SIG *sig, uint8_t **outp); // X509_SIG_get0 sets |*out_alg| and |*out_digest| to non-owning pointers to // |sig|'s algorithm and digest fields, respectively. Either |out_alg| and // |out_digest| may be NULL to skip those fields. OPENSSL_EXPORT void X509_SIG_get0(const X509_SIG *sig, const X509_ALGOR **out_alg, const ASN1_OCTET_STRING **out_digest); // X509_SIG_getm behaves like |X509_SIG_get0| but returns mutable pointers. OPENSSL_EXPORT void X509_SIG_getm(X509_SIG *sig, X509_ALGOR **out_alg, ASN1_OCTET_STRING **out_digest); // Printing functions. // // The following functions output human-readable representations of // X.509-related structures. They should only be used for debugging or logging // and not parsed programmatically. In many cases, the outputs are ambiguous, so // attempting to parse them can lead to string injection vulnerabilities. // The following flags control |X509_print_ex| and |X509_REQ_print_ex|. These // flags co-exist with |X509V3_EXT_*|, so avoid collisions when adding new ones. // X509_FLAG_COMPAT disables all flags. It additionally causes names to be // printed with a 16-byte indent. #define X509_FLAG_COMPAT 0 // X509_FLAG_NO_HEADER skips a header identifying the type of object printed. #define X509_FLAG_NO_HEADER 1L // X509_FLAG_NO_VERSION skips printing the X.509 version number. #define X509_FLAG_NO_VERSION (1L << 1) // X509_FLAG_NO_SERIAL skips printing the serial number. It is ignored in // |X509_REQ_print_fp|. #define X509_FLAG_NO_SERIAL (1L << 2) // X509_FLAG_NO_SIGNAME skips printing the signature algorithm in the // TBSCertificate. It is ignored in |X509_REQ_print_fp|. #define X509_FLAG_NO_SIGNAME (1L << 3) // X509_FLAG_NO_ISSUER skips printing the issuer. #define X509_FLAG_NO_ISSUER (1L << 4) // X509_FLAG_NO_VALIDITY skips printing the notBefore and notAfter times. It is // ignored in |X509_REQ_print_fp|. #define X509_FLAG_NO_VALIDITY (1L << 5) // X509_FLAG_NO_SUBJECT skips printing the subject. #define X509_FLAG_NO_SUBJECT (1L << 6) // X509_FLAG_NO_PUBKEY skips printing the public key. #define X509_FLAG_NO_PUBKEY (1L << 7) // X509_FLAG_NO_EXTENSIONS skips printing the extension list. It is ignored in // |X509_REQ_print_fp|. CSRs instead have attributes, which is controlled by // |X509_FLAG_NO_ATTRIBUTES|. #define X509_FLAG_NO_EXTENSIONS (1L << 8) // X509_FLAG_NO_SIGDUMP skips printing the signature and outer signature // algorithm. #define X509_FLAG_NO_SIGDUMP (1L << 9) // X509_FLAG_NO_AUX skips printing auxiliary properties. (See |d2i_X509_AUX| and // related functions.) #define X509_FLAG_NO_AUX (1L << 10) // X509_FLAG_NO_ATTRIBUTES skips printing CSR attributes. It does nothing for // certificates and CRLs. #define X509_FLAG_NO_ATTRIBUTES (1L << 11) // X509_FLAG_NO_IDS skips printing the issuerUniqueID and subjectUniqueID in a // certificate. It is ignored in |X509_REQ_print_fp|. #define X509_FLAG_NO_IDS (1L << 12) // The following flags control |X509_print_ex|, |X509_REQ_print_ex|, // |X509V3_EXT_print|, and |X509V3_extensions_print|. These flags coexist with // |X509_FLAG_*|, so avoid collisions when adding new ones. // X509V3_EXT_UNKNOWN_MASK is a mask that determines how unknown extensions are // processed. #define X509V3_EXT_UNKNOWN_MASK (0xfL << 16) // X509V3_EXT_DEFAULT causes unknown extensions or syntax errors to return // failure. #define X509V3_EXT_DEFAULT 0 // X509V3_EXT_ERROR_UNKNOWN causes unknown extensions or syntax errors to print // as "" or "", respectively. #define X509V3_EXT_ERROR_UNKNOWN (1L << 16) // X509V3_EXT_PARSE_UNKNOWN is deprecated and behaves like // |X509V3_EXT_DUMP_UNKNOWN|. #define X509V3_EXT_PARSE_UNKNOWN (2L << 16) // X509V3_EXT_DUMP_UNKNOWN causes unknown extensions to be displayed as a // hexdump. #define X509V3_EXT_DUMP_UNKNOWN (3L << 16) // X509_print_ex writes a human-readable representation of |x| to |bp|. It // returns one on success and zero on error. |nmflags| is the flags parameter // for |X509_NAME_print_ex| when printing the subject and issuer. |cflag| should // be some combination of the |X509_FLAG_*| and |X509V3_EXT_*| constants. OPENSSL_EXPORT int X509_print_ex(BIO *bp, X509 *x, unsigned long nmflag, unsigned long cflag); // X509_print_ex_fp behaves like |X509_print_ex| but writes to |fp|. OPENSSL_EXPORT int X509_print_ex_fp(FILE *fp, X509 *x, unsigned long nmflag, unsigned long cflag); // X509_print calls |X509_print_ex| with |XN_FLAG_COMPAT| and |X509_FLAG_COMPAT| // flags. OPENSSL_EXPORT int X509_print(BIO *bp, X509 *x); // X509_print_fp behaves like |X509_print| but writes to |fp|. OPENSSL_EXPORT int X509_print_fp(FILE *fp, X509 *x); // X509_CRL_print writes a human-readable representation of |x| to |bp|. It // returns one on success and zero on error. OPENSSL_EXPORT int X509_CRL_print(BIO *bp, X509_CRL *x); // X509_CRL_print_fp behaves like |X509_CRL_print| but writes to |fp|. OPENSSL_EXPORT int X509_CRL_print_fp(FILE *fp, X509_CRL *x); // X509_REQ_print_ex writes a human-readable representation of |x| to |bp|. It // returns one on success and zero on error. |nmflags| is the flags parameter // for |X509_NAME_print_ex|, when printing the subject. |cflag| should be some // combination of the |X509_FLAG_*| and |X509V3_EXT_*| constants. OPENSSL_EXPORT int X509_REQ_print_ex(BIO *bp, X509_REQ *x, unsigned long nmflag, unsigned long cflag); // X509_REQ_print calls |X509_REQ_print_ex| with |XN_FLAG_COMPAT| and // |X509_FLAG_COMPAT| flags. OPENSSL_EXPORT int X509_REQ_print(BIO *bp, X509_REQ *req); // X509_REQ_print_fp behaves like |X509_REQ_print| but writes to |fp|. OPENSSL_EXPORT int X509_REQ_print_fp(FILE *fp, X509_REQ *req); // The following flags are control |X509_NAME_print_ex|. They must not collide // with |ASN1_STRFLGS_*|. // // TODO(davidben): This is far, far too many options and most of them are // useless. Trim this down. // XN_FLAG_COMPAT prints with |X509_NAME_print|'s format and return value // convention. #define XN_FLAG_COMPAT 0ul // XN_FLAG_SEP_MASK determines the separators to use between attributes. #define XN_FLAG_SEP_MASK (0xful << 16) // XN_FLAG_SEP_COMMA_PLUS separates RDNs with "," and attributes within an RDN // with "+", as in RFC 2253. #define XN_FLAG_SEP_COMMA_PLUS (1ul << 16) // XN_FLAG_SEP_CPLUS_SPC behaves like |XN_FLAG_SEP_COMMA_PLUS| but adds spaces // between the separators. #define XN_FLAG_SEP_CPLUS_SPC (2ul << 16) // XN_FLAG_SEP_SPLUS_SPC separates RDNs with "; " and attributes within an RDN // with " + ". #define XN_FLAG_SEP_SPLUS_SPC (3ul << 16) // XN_FLAG_SEP_MULTILINE prints each attribute on one line. #define XN_FLAG_SEP_MULTILINE (4ul << 16) // XN_FLAG_DN_REV prints RDNs in reverse, from least significant to most // significant, as RFC 2253. #define XN_FLAG_DN_REV (1ul << 20) // XN_FLAG_FN_MASK determines how attribute types are displayed. #define XN_FLAG_FN_MASK (0x3ul << 21) // XN_FLAG_FN_SN uses the attribute type's short name, when available. #define XN_FLAG_FN_SN 0ul // XN_FLAG_SPC_EQ wraps the "=" operator with spaces when printing attributes. #define XN_FLAG_SPC_EQ (1ul << 23) // XN_FLAG_DUMP_UNKNOWN_FIELDS causes unknown attribute types to be printed in // hex, as in RFC 2253. #define XN_FLAG_DUMP_UNKNOWN_FIELDS (1ul << 24) // XN_FLAG_RFC2253 prints like RFC 2253. #define XN_FLAG_RFC2253 \ (ASN1_STRFLGS_RFC2253 | XN_FLAG_SEP_COMMA_PLUS | XN_FLAG_DN_REV | \ XN_FLAG_FN_SN | XN_FLAG_DUMP_UNKNOWN_FIELDS) // XN_FLAG_ONELINE prints a one-line representation of the name. #define XN_FLAG_ONELINE \ (ASN1_STRFLGS_RFC2253 | ASN1_STRFLGS_ESC_QUOTE | XN_FLAG_SEP_CPLUS_SPC | \ XN_FLAG_SPC_EQ | XN_FLAG_FN_SN) // X509_NAME_print_ex writes a human-readable representation of |nm| to |out|. // Each line of output is indented by |indent| spaces. It returns the number of // bytes written on success, and -1 on error. If |out| is NULL, it returns the // number of bytes it would have written but does not write anything. |flags| // should be some combination of |XN_FLAG_*| and |ASN1_STRFLGS_*| values and // determines the output. If unsure, use |XN_FLAG_RFC2253|. // // If |flags| is |XN_FLAG_COMPAT|, or zero, this function calls // |X509_NAME_print| instead. In that case, it returns one on success, rather // than the output length. OPENSSL_EXPORT int X509_NAME_print_ex(BIO *out, const X509_NAME *nm, int indent, unsigned long flags); // X509_NAME_print prints a human-readable representation of |name| to |bp|. It // returns one on success and zero on error. |obase| is ignored. // // This function outputs a legacy format that does not correctly handle string // encodings and other cases. Prefer |X509_NAME_print_ex| if printing a name for // debugging purposes. OPENSSL_EXPORT int X509_NAME_print(BIO *bp, const X509_NAME *name, int obase); // X509_NAME_oneline writes a human-readable representation to |name| to a // buffer as a NUL-terminated C string. // // If |buf| is NULL, returns a newly-allocated buffer containing the result on // success, or NULL on error. The buffer must be released with |OPENSSL_free| // when done. // // If |buf| is non-NULL, at most |size| bytes of output are written to |buf| // instead. |size| includes the trailing NUL. The function then returns |buf| on // success or NULL on error. If the output does not fit in |size| bytes, the // output is silently truncated at an attribute boundary. // // This function outputs a legacy format that does not correctly handle string // encodings and other cases. Prefer |X509_NAME_print_ex| if printing a name for // debugging purposes. OPENSSL_EXPORT char *X509_NAME_oneline(const X509_NAME *name, char *buf, int size); // X509_NAME_print_ex_fp behaves like |X509_NAME_print_ex| but writes to |fp|. OPENSSL_EXPORT int X509_NAME_print_ex_fp(FILE *fp, const X509_NAME *nm, int indent, unsigned long flags); // X509_signature_dump writes a human-readable representation of |sig| to |bio|, // indented with |indent| spaces. It returns one on success and zero on error. OPENSSL_EXPORT int X509_signature_dump(BIO *bio, const ASN1_STRING *sig, int indent); // X509_signature_print writes a human-readable representation of |alg| and // |sig| to |bio|. It returns one on success and zero on error. OPENSSL_EXPORT int X509_signature_print(BIO *bio, const X509_ALGOR *alg, const ASN1_STRING *sig); // X509V3_EXT_print prints a human-readable representation of |ext| to out. It // returns one on success and zero on error. The output is indented by |indent| // spaces. |flag| is one of the |X509V3_EXT_*| constants and controls printing // of unknown extensions and syntax errors. // // WARNING: Although some applications programmatically parse the output of this // function to process X.509 extensions, this is not safe. In many cases, the // outputs are ambiguous to attempting to parse them can lead to string // injection vulnerabilities. These functions should only be used for debugging // or logging. OPENSSL_EXPORT int X509V3_EXT_print(BIO *out, const X509_EXTENSION *ext, unsigned long flag, int indent); // X509V3_EXT_print_fp behaves like |X509V3_EXT_print| but writes to a |FILE| // instead of a |BIO|. OPENSSL_EXPORT int X509V3_EXT_print_fp(FILE *out, const X509_EXTENSION *ext, int flag, int indent); // X509V3_extensions_print prints |title|, followed by a human-readable // representation of |exts| to |out|. It returns one on success and zero on // error. The output is indented by |indent| spaces. |flag| is one of the // |X509V3_EXT_*| constants and controls printing of unknown extensions and // syntax errors. OPENSSL_EXPORT int X509V3_extensions_print(BIO *out, const char *title, const STACK_OF(X509_EXTENSION) *exts, unsigned long flag, int indent); // GENERAL_NAME_print prints a human-readable representation of |gen| to |out|. // It returns one on success and zero on error. // // TODO(davidben): Actually, it just returns one and doesn't check for I/O or // allocation errors. But it should return zero on error. OPENSSL_EXPORT int GENERAL_NAME_print(BIO *out, const GENERAL_NAME *gen); // Convenience functions. // X509_pubkey_digest hashes the contents of the BIT STRING in |x509|'s // subjectPublicKeyInfo field with |md| and writes the result to |out|. // |EVP_MD_CTX_size| bytes are written, which is at most |EVP_MAX_MD_SIZE|. If // |out_len| is not NULL, |*out_len| is set to the number of bytes written. This // function returns one on success and zero on error. // // This hash omits the BIT STRING tag, length, and number of unused bits. It // also omits the AlgorithmIdentifier which describes the key type. It // corresponds to the OCSP KeyHash definition and is not suitable for other // purposes. OPENSSL_EXPORT int X509_pubkey_digest(const X509 *x509, const EVP_MD *md, uint8_t *out, unsigned *out_len); // X509_digest hashes |x509|'s DER encoding with |md| and writes the result to // |out|. |EVP_MD_CTX_size| bytes are written, which is at most // |EVP_MAX_MD_SIZE|. If |out_len| is not NULL, |*out_len| is set to the number // of bytes written. This function returns one on success and zero on error. // Note this digest covers the entire certificate, not just the signed portion. OPENSSL_EXPORT int X509_digest(const X509 *x509, const EVP_MD *md, uint8_t *out, unsigned *out_len); // X509_CRL_digest hashes |crl|'s DER encoding with |md| and writes the result // to |out|. |EVP_MD_CTX_size| bytes are written, which is at most // |EVP_MAX_MD_SIZE|. If |out_len| is not NULL, |*out_len| is set to the number // of bytes written. This function returns one on success and zero on error. // Note this digest covers the entire CRL, not just the signed portion. OPENSSL_EXPORT int X509_CRL_digest(const X509_CRL *crl, const EVP_MD *md, uint8_t *out, unsigned *out_len); // X509_REQ_digest hashes |req|'s DER encoding with |md| and writes the result // to |out|. |EVP_MD_CTX_size| bytes are written, which is at most // |EVP_MAX_MD_SIZE|. If |out_len| is not NULL, |*out_len| is set to the number // of bytes written. This function returns one on success and zero on error. // Note this digest covers the entire certificate request, not just the signed // portion. OPENSSL_EXPORT int X509_REQ_digest(const X509_REQ *req, const EVP_MD *md, uint8_t *out, unsigned *out_len); // X509_NAME_digest hashes |name|'s DER encoding with |md| and writes the result // to |out|. |EVP_MD_CTX_size| bytes are written, which is at most // |EVP_MAX_MD_SIZE|. If |out_len| is not NULL, |*out_len| is set to the number // of bytes written. This function returns one on success and zero on error. OPENSSL_EXPORT int X509_NAME_digest(const X509_NAME *name, const EVP_MD *md, uint8_t *out, unsigned *out_len); // The following functions behave like the corresponding unsuffixed |d2i_*| // functions, but read the result from |bp| instead. Callers using these // functions with memory |BIO|s to parse structures already in memory should use // |d2i_*| instead. OPENSSL_EXPORT X509 *d2i_X509_bio(BIO *bp, X509 **x509); OPENSSL_EXPORT X509_CRL *d2i_X509_CRL_bio(BIO *bp, X509_CRL **crl); OPENSSL_EXPORT X509_REQ *d2i_X509_REQ_bio(BIO *bp, X509_REQ **req); OPENSSL_EXPORT RSA *d2i_RSAPrivateKey_bio(BIO *bp, RSA **rsa); OPENSSL_EXPORT RSA *d2i_RSAPublicKey_bio(BIO *bp, RSA **rsa); OPENSSL_EXPORT RSA *d2i_RSA_PUBKEY_bio(BIO *bp, RSA **rsa); OPENSSL_EXPORT DSA *d2i_DSA_PUBKEY_bio(BIO *bp, DSA **dsa); OPENSSL_EXPORT DSA *d2i_DSAPrivateKey_bio(BIO *bp, DSA **dsa); OPENSSL_EXPORT EC_KEY *d2i_EC_PUBKEY_bio(BIO *bp, EC_KEY **eckey); OPENSSL_EXPORT EC_KEY *d2i_ECPrivateKey_bio(BIO *bp, EC_KEY **eckey); OPENSSL_EXPORT X509_SIG *d2i_PKCS8_bio(BIO *bp, X509_SIG **p8); OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_bio( BIO *bp, PKCS8_PRIV_KEY_INFO **p8inf); OPENSSL_EXPORT EVP_PKEY *d2i_PUBKEY_bio(BIO *bp, EVP_PKEY **a); OPENSSL_EXPORT DH *d2i_DHparams_bio(BIO *bp, DH **dh); // d2i_PrivateKey_bio behaves like |d2i_AutoPrivateKey|, but reads from |bp| // instead. OPENSSL_EXPORT EVP_PKEY *d2i_PrivateKey_bio(BIO *bp, EVP_PKEY **a); // The following functions behave like the corresponding unsuffixed |i2d_*| // functions, but write the result to |bp|. They return one on success and zero // on error. Callers using them with memory |BIO|s to encode structures to // memory should use |i2d_*| directly instead. OPENSSL_EXPORT int i2d_X509_bio(BIO *bp, X509 *x509); OPENSSL_EXPORT int i2d_X509_CRL_bio(BIO *bp, X509_CRL *crl); OPENSSL_EXPORT int i2d_X509_REQ_bio(BIO *bp, X509_REQ *req); OPENSSL_EXPORT int i2d_RSAPrivateKey_bio(BIO *bp, RSA *rsa); OPENSSL_EXPORT int i2d_RSAPublicKey_bio(BIO *bp, RSA *rsa); OPENSSL_EXPORT int i2d_RSA_PUBKEY_bio(BIO *bp, RSA *rsa); OPENSSL_EXPORT int i2d_DSA_PUBKEY_bio(BIO *bp, DSA *dsa); OPENSSL_EXPORT int i2d_DSAPrivateKey_bio(BIO *bp, DSA *dsa); OPENSSL_EXPORT int i2d_EC_PUBKEY_bio(BIO *bp, EC_KEY *eckey); OPENSSL_EXPORT int i2d_ECPrivateKey_bio(BIO *bp, EC_KEY *eckey); OPENSSL_EXPORT int i2d_PKCS8_bio(BIO *bp, X509_SIG *p8); OPENSSL_EXPORT int i2d_PKCS8_PRIV_KEY_INFO_bio(BIO *bp, PKCS8_PRIV_KEY_INFO *p8inf); OPENSSL_EXPORT int i2d_PrivateKey_bio(BIO *bp, EVP_PKEY *pkey); OPENSSL_EXPORT int i2d_PUBKEY_bio(BIO *bp, EVP_PKEY *pkey); OPENSSL_EXPORT int i2d_DHparams_bio(BIO *bp, const DH *dh); // i2d_PKCS8PrivateKeyInfo_bio encodes |key| as a PKCS#8 PrivateKeyInfo // structure (see |EVP_marshal_private_key|) and writes the result to |bp|. It // returns one on success and zero on error. OPENSSL_EXPORT int i2d_PKCS8PrivateKeyInfo_bio(BIO *bp, EVP_PKEY *key); // The following functions behave like the corresponding |d2i_*_bio| functions, // but read from |fp| instead. OPENSSL_EXPORT X509 *d2i_X509_fp(FILE *fp, X509 **x509); OPENSSL_EXPORT X509_CRL *d2i_X509_CRL_fp(FILE *fp, X509_CRL **crl); OPENSSL_EXPORT X509_REQ *d2i_X509_REQ_fp(FILE *fp, X509_REQ **req); OPENSSL_EXPORT RSA *d2i_RSAPrivateKey_fp(FILE *fp, RSA **rsa); OPENSSL_EXPORT RSA *d2i_RSAPublicKey_fp(FILE *fp, RSA **rsa); OPENSSL_EXPORT RSA *d2i_RSA_PUBKEY_fp(FILE *fp, RSA **rsa); OPENSSL_EXPORT DSA *d2i_DSA_PUBKEY_fp(FILE *fp, DSA **dsa); OPENSSL_EXPORT DSA *d2i_DSAPrivateKey_fp(FILE *fp, DSA **dsa); OPENSSL_EXPORT EC_KEY *d2i_EC_PUBKEY_fp(FILE *fp, EC_KEY **eckey); OPENSSL_EXPORT EC_KEY *d2i_ECPrivateKey_fp(FILE *fp, EC_KEY **eckey); OPENSSL_EXPORT X509_SIG *d2i_PKCS8_fp(FILE *fp, X509_SIG **p8); OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_fp( FILE *fp, PKCS8_PRIV_KEY_INFO **p8inf); OPENSSL_EXPORT EVP_PKEY *d2i_PrivateKey_fp(FILE *fp, EVP_PKEY **a); OPENSSL_EXPORT EVP_PKEY *d2i_PUBKEY_fp(FILE *fp, EVP_PKEY **a); // The following functions behave like the corresponding |i2d_*_bio| functions, // but write to |fp| instead. OPENSSL_EXPORT int i2d_X509_fp(FILE *fp, X509 *x509); OPENSSL_EXPORT int i2d_X509_CRL_fp(FILE *fp, X509_CRL *crl); OPENSSL_EXPORT int i2d_X509_REQ_fp(FILE *fp, X509_REQ *req); OPENSSL_EXPORT int i2d_RSAPrivateKey_fp(FILE *fp, RSA *rsa); OPENSSL_EXPORT int i2d_RSAPublicKey_fp(FILE *fp, RSA *rsa); OPENSSL_EXPORT int i2d_RSA_PUBKEY_fp(FILE *fp, RSA *rsa); OPENSSL_EXPORT int i2d_DSA_PUBKEY_fp(FILE *fp, DSA *dsa); OPENSSL_EXPORT int i2d_DSAPrivateKey_fp(FILE *fp, DSA *dsa); OPENSSL_EXPORT int i2d_EC_PUBKEY_fp(FILE *fp, EC_KEY *eckey); OPENSSL_EXPORT int i2d_ECPrivateKey_fp(FILE *fp, EC_KEY *eckey); OPENSSL_EXPORT int i2d_PKCS8_fp(FILE *fp, X509_SIG *p8); OPENSSL_EXPORT int i2d_PKCS8_PRIV_KEY_INFO_fp(FILE *fp, PKCS8_PRIV_KEY_INFO *p8inf); OPENSSL_EXPORT int i2d_PKCS8PrivateKeyInfo_fp(FILE *fp, EVP_PKEY *key); OPENSSL_EXPORT int i2d_PrivateKey_fp(FILE *fp, EVP_PKEY *pkey); OPENSSL_EXPORT int i2d_PUBKEY_fp(FILE *fp, EVP_PKEY *pkey); // X509_find_by_issuer_and_serial returns the first |X509| in |sk| whose issuer // and serial are |name| and |serial|, respectively. If no match is found, it // returns NULL. OPENSSL_EXPORT X509 *X509_find_by_issuer_and_serial(const STACK_OF(X509) *sk, X509_NAME *name, const ASN1_INTEGER *serial); // X509_find_by_subject returns the first |X509| in |sk| whose subject is // |name|. If no match is found, it returns NULL. OPENSSL_EXPORT X509 *X509_find_by_subject(const STACK_OF(X509) *sk, X509_NAME *name); // X509_cmp_time compares |s| against |*t|. On success, it returns a negative // number if |s| <= |*t| and a positive number if |s| > |*t|. On error, it // returns zero. If |t| is NULL, it uses the current time instead of |*t|. // // WARNING: Unlike most comparison functions, this function returns zero on // error, not equality. OPENSSL_EXPORT int X509_cmp_time(const ASN1_TIME *s, const time_t *t); // X509_cmp_time_posix compares |s| against |t|. On success, it returns a // negative number if |s| <= |t| and a positive number if |s| > |t|. On error, // it returns zero. // // WARNING: Unlike most comparison functions, this function returns zero on // error, not equality. OPENSSL_EXPORT int X509_cmp_time_posix(const ASN1_TIME *s, int64_t t); // X509_cmp_current_time behaves like |X509_cmp_time| but compares |s| against // the current time. OPENSSL_EXPORT int X509_cmp_current_time(const ASN1_TIME *s); // X509_time_adj calls |X509_time_adj_ex| with |offset_day| equal to zero. OPENSSL_EXPORT ASN1_TIME *X509_time_adj(ASN1_TIME *s, long offset_sec, const time_t *t); // X509_time_adj_ex behaves like |ASN1_TIME_adj|, but adds an offset to |*t|. If // |t| is NULL, it uses the current time instead of |*t|. OPENSSL_EXPORT ASN1_TIME *X509_time_adj_ex(ASN1_TIME *s, int offset_day, long offset_sec, const time_t *t); // X509_gmtime_adj behaves like |X509_time_adj_ex| but adds |offset_sec| to the // current time. OPENSSL_EXPORT ASN1_TIME *X509_gmtime_adj(ASN1_TIME *s, long offset_sec); // X509_issuer_name_cmp behaves like |X509_NAME_cmp|, but compares |a| and |b|'s // issuer names. OPENSSL_EXPORT int X509_issuer_name_cmp(const X509 *a, const X509 *b); // X509_subject_name_cmp behaves like |X509_NAME_cmp|, but compares |a| and // |b|'s subject names. OPENSSL_EXPORT int X509_subject_name_cmp(const X509 *a, const X509 *b); // X509_CRL_cmp behaves like |X509_NAME_cmp|, but compares |a| and |b|'s // issuer names. // // WARNING: This function is misnamed. It does not compare other parts of the // CRL, only the issuer fields using |X509_NAME_cmp|. OPENSSL_EXPORT int X509_CRL_cmp(const X509_CRL *a, const X509_CRL *b); // X509_issuer_name_hash returns the hash of |x509|'s issuer name with // |X509_NAME_hash|. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. It also depends on an OpenSSL-specific // canonicalization process. OPENSSL_EXPORT uint32_t X509_issuer_name_hash(X509 *x509); // X509_subject_name_hash returns the hash of |x509|'s subject name with // |X509_NAME_hash|. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. It also depends on an OpenSSL-specific // canonicalization process. OPENSSL_EXPORT uint32_t X509_subject_name_hash(X509 *x509); // X509_issuer_name_hash_old returns the hash of |x509|'s issuer name with // |X509_NAME_hash_old|. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. OPENSSL_EXPORT uint32_t X509_issuer_name_hash_old(X509 *x509); // X509_subject_name_hash_old returns the hash of |x509|'s usjbect name with // |X509_NAME_hash_old|. // // This hash is specific to the |X509_LOOKUP_add_dir| filesystem format and is // not suitable for general-purpose X.509 name processing. It is very short, so // there will be hash collisions. OPENSSL_EXPORT uint32_t X509_subject_name_hash_old(X509 *x509); // ex_data functions. // // See |ex_data.h| for details. OPENSSL_EXPORT int X509_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int X509_set_ex_data(X509 *r, int idx, void *arg); OPENSSL_EXPORT void *X509_get_ex_data(X509 *r, int idx); OPENSSL_EXPORT int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx, int idx, void *data); OPENSSL_EXPORT void *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx, int idx); #define X509_STORE_CTX_set_app_data(ctx, data) \ X509_STORE_CTX_set_ex_data(ctx, 0, data) #define X509_STORE_CTX_get_app_data(ctx) X509_STORE_CTX_get_ex_data(ctx, 0) // Hashing and signing ASN.1 structures. // ASN1_digest serializes |data| with |i2d| and then hashes the result with // |type|. On success, it returns one, writes the digest to |md|, and sets // |*len| to the digest length if non-NULL. On error, it returns zero. // // |EVP_MD_CTX_size| bytes are written, which is at most |EVP_MAX_MD_SIZE|. The // buffer must have sufficient space for this output. OPENSSL_EXPORT int ASN1_digest(i2d_of_void *i2d, const EVP_MD *type, char *data, unsigned char *md, unsigned int *len); // ASN1_item_digest serializes |data| with |it| and then hashes the result with // |type|. On success, it returns one, writes the digest to |md|, and sets // |*len| to the digest length if non-NULL. On error, it returns zero. // // |EVP_MD_CTX_size| bytes are written, which is at most |EVP_MAX_MD_SIZE|. The // buffer must have sufficient space for this output. // // WARNING: |data| must be a pointer with the same type as |it|'s corresponding // C type. Using the wrong type is a potentially exploitable memory error. OPENSSL_EXPORT int ASN1_item_digest(const ASN1_ITEM *it, const EVP_MD *type, void *data, unsigned char *md, unsigned int *len); // ASN1_item_verify serializes |data| with |it| and then verifies |signature| is // a valid signature for the result with |algor1| and |pkey|. It returns one on // success and zero on error. The signature and algorithm are interpreted as in // X.509. // // WARNING: |data| must be a pointer with the same type as |it|'s corresponding // C type. Using the wrong type is a potentially exploitable memory error. OPENSSL_EXPORT int ASN1_item_verify(const ASN1_ITEM *it, const X509_ALGOR *algor1, const ASN1_BIT_STRING *signature, void *data, EVP_PKEY *pkey); // ASN1_item_sign serializes |data| with |it| and then signs the result with // the private key |pkey|. It returns the length of the signature on success and // zero on error. On success, it writes the signature to |signature| and the // signature algorithm to each of |algor1| and |algor2|. Either of |algor1| or // |algor2| may be NULL to ignore them. This function uses digest algorithm // |md|, or |pkey|'s default if NULL. Other signing parameters use |pkey|'s // defaults. To customize them, use |ASN1_item_sign_ctx|. // // WARNING: |data| must be a pointer with the same type as |it|'s corresponding // C type. Using the wrong type is a potentially exploitable memory error. OPENSSL_EXPORT int ASN1_item_sign(const ASN1_ITEM *it, X509_ALGOR *algor1, X509_ALGOR *algor2, ASN1_BIT_STRING *signature, void *data, EVP_PKEY *pkey, const EVP_MD *type); // ASN1_item_sign_ctx behaves like |ASN1_item_sign| except the signature is // signed with |ctx|, |ctx|, which must have been initialized with // |EVP_DigestSignInit|. The caller should configure the corresponding // |EVP_PKEY_CTX| with any additional parameters before calling this function. // // On success or failure, this function mutates |ctx| and resets it to the empty // state. Caller should not rely on its contents after the function returns. // // WARNING: |data| must be a pointer with the same type as |it|'s corresponding // C type. Using the wrong type is a potentially exploitable memory error. OPENSSL_EXPORT int ASN1_item_sign_ctx(const ASN1_ITEM *it, X509_ALGOR *algor1, X509_ALGOR *algor2, ASN1_BIT_STRING *signature, void *asn, EVP_MD_CTX *ctx); // Verification internals. // // The following functions expose portions of certificate validation. They are // exported for compatibility with existing callers, or to support some obscure // use cases. Most callers, however, will not need these functions and should // instead use |X509_STORE_CTX| APIs. // X509_supported_extension returns one if |ex| is a critical X.509 certificate // extension, supported by |X509_verify_cert|, and zero otherwise. // // Note this function only reports certificate extensions (as opposed to CRL or // CRL extensions), and only extensions that are expected to be marked critical. // Additionally, |X509_verify_cert| checks for unsupported critical extensions // internally, so most callers will not need to call this function separately. OPENSSL_EXPORT int X509_supported_extension(const X509_EXTENSION *ex); // X509_check_ca returns one if |x509| may be considered a CA certificate, // according to basic constraints and key usage extensions. Otherwise, it // returns zero. If |x509| is an X509v1 certificate, and thus has no extensions, // it is considered eligible. // // This function returning one does not indicate that |x509| is trusted, only // that it is eligible to be a CA. // // TODO(crbug.com/boringssl/407): |x509| should be const. OPENSSL_EXPORT int X509_check_ca(X509 *x509); // X509_check_issued checks if |issuer| and |subject|'s name, authority key // identifier, and key usage fields allow |issuer| to have issued |subject|. It // returns |X509_V_OK| on success and an |X509_V_ERR_*| value otherwise. // // This function does not check the signature on |subject|. Rather, it is // intended to prune the set of possible issuer certificates during // path-building. // // TODO(crbug.com/boringssl/407): Both parameters should be const. OPENSSL_EXPORT int X509_check_issued(X509 *issuer, X509 *subject); // NAME_CONSTRAINTS_check checks if |x509| satisfies name constraints in |nc|. // It returns |X509_V_OK| on success and some |X509_V_ERR_*| constant on error. // // TODO(crbug.com/boringssl/407): Both parameters should be const. OPENSSL_EXPORT int NAME_CONSTRAINTS_check(X509 *x509, NAME_CONSTRAINTS *nc); // X509_check_host checks if |x509| matches the DNS name |chk|. It returns one // on match, zero on mismatch, or a negative number on error. |flags| should be // some combination of |X509_CHECK_FLAG_*| and modifies the behavior. On match, // if |out_peername| is non-NULL, it additionally sets |*out_peername| to a // newly-allocated, NUL-terminated string containing the DNS name or wildcard in // the certificate which matched. The caller must then free |*out_peername| with // |OPENSSL_free| when done. // // By default, both subject alternative names and the subject's common name // attribute are checked. The latter has long been deprecated, so callers should // include |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| in |flags| to use the standard // behavior. https://crbug.com/boringssl/464 tracks fixing the default. // // This function does not check if |x509| is a trusted certificate, only if, // were it trusted, it would match |chk|. // // WARNING: This function differs from the usual calling convention and may // return either 0 or a negative number on error. // // TODO(davidben): Make the error case also return zero. OPENSSL_EXPORT int X509_check_host(const X509 *x509, const char *chk, size_t chklen, unsigned int flags, char **out_peername); // X509_check_email checks if |x509| matches the email address |chk|. It returns // one on match, zero on mismatch, or a negative number on error. |flags| should // be some combination of |X509_CHECK_FLAG_*| and modifies the behavior. // // By default, both subject alternative names and the subject's email address // attribute are checked. The |X509_CHECK_FLAG_NEVER_CHECK_SUBJECT| flag may be // used to change this behavior. // // This function does not check if |x509| is a trusted certificate, only if, // were it trusted, it would match |chk|. // // WARNING: This function differs from the usual calling convention and may // return either 0 or a negative number on error. // // TODO(davidben): Make the error case also return zero. OPENSSL_EXPORT int X509_check_email(const X509 *x509, const char *chk, size_t chklen, unsigned int flags); // X509_check_ip checks if |x509| matches the IP address |chk|. The IP address // is represented in byte form and should be 4 bytes for an IPv4 address and 16 // bytes for an IPv6 address. It returns one on match, zero on mismatch, or a // negative number on error. |flags| should be some combination of // |X509_CHECK_FLAG_*| and modifies the behavior. // // This function does not check if |x509| is a trusted certificate, only if, // were it trusted, it would match |chk|. // // WARNING: This function differs from the usual calling convention and may // return either 0 or a negative number on error. // // TODO(davidben): Make the error case also return zero. OPENSSL_EXPORT int X509_check_ip(const X509 *x509, const uint8_t *chk, size_t chklen, unsigned int flags); // X509_check_ip_asc behaves like |X509_check_ip| except the IP address is // specified in textual form in |ipasc|. // // WARNING: This function differs from the usual calling convention and may // return either 0 or a negative number on error. // // TODO(davidben): Make the error case also return zero. OPENSSL_EXPORT int X509_check_ip_asc(const X509 *x509, const char *ipasc, unsigned int flags); // X509_STORE_CTX_get1_issuer looks up a candidate trusted issuer for |x509| out // of |ctx|'s |X509_STORE|, based on the criteria in |X509_check_issued|. If one // was found, it returns one and sets |*out_issuer| to the issuer. The caller // must release |*out_issuer| with |X509_free| when done. If none was found, it // returns zero and leaves |*out_issuer| unchanged. // // This function only searches for trusted issuers. It does not consider // untrusted intermediates passed in to |X509_STORE_CTX_init|. // // TODO(crbug.com/boringssl/407): |x509| should be const. OPENSSL_EXPORT int X509_STORE_CTX_get1_issuer(X509 **out_issuer, X509_STORE_CTX *ctx, X509 *x509); // X509_check_purpose performs checks if |x509|'s basic constraints, key usage, // and extended key usage extensions for the specified purpose. |purpose| should // be one of |X509_PURPOSE_*| constants. See |X509_VERIFY_PARAM_set_purpose| for // details. It returns one if |x509|'s extensions are consistent with |purpose| // and zero otherwise. If |ca| is non-zero, |x509| is checked as a CA // certificate. Otherwise, it is checked as an end-entity certificate. // // If |purpose| is -1, this function performs no purpose checks, but it parses // some extensions in |x509| and may return zero on syntax error. Historically, // callers primarily used this function to trigger this parsing, but this is no // longer necessary. Functions acting on |X509| will internally parse as needed. OPENSSL_EXPORT int X509_check_purpose(X509 *x509, int purpose, int ca); #define X509_TRUST_TRUSTED 1 #define X509_TRUST_REJECTED 2 #define X509_TRUST_UNTRUSTED 3 // X509_check_trust checks if |x509| is a valid trust anchor for trust type // |id|. See |X509_VERIFY_PARAM_set_trust| for details. It returns // |X509_TRUST_TRUSTED| if |x509| is a trust anchor, |X509_TRUST_REJECTED| if it // was distrusted, and |X509_TRUST_UNTRUSTED| otherwise. |id| should be one of // the |X509_TRUST_*| constants, or zero to indicate the default behavior. // |flags| should be zero and is ignored. OPENSSL_EXPORT int X509_check_trust(X509 *x509, int id, int flags); // X509_STORE_CTX_get1_certs returns a newly-allocated stack containing all // trusted certificates in |ctx|'s |X509_STORE| whose subject matches |name|, or // NULL on error. The caller must release the result with |sk_X509_pop_free| and // |X509_free| when done. // // TODO(crbug.com/boringssl/407): |name| should be const. OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get1_certs(X509_STORE_CTX *ctx, X509_NAME *name); // X509_STORE_CTX_get1_crls returns a newly-allocated stack containing all // CRLs in |ctx|'s |X509_STORE| whose subject matches |name|, or NULL on error. // The caller must release the result with |sk_X509_CRL_pop_free| and // |X509_CRL_free| when done. // // TODO(crbug.com/boringssl/407): |name| should be const. OPENSSL_EXPORT STACK_OF(X509_CRL) *X509_STORE_CTX_get1_crls(X509_STORE_CTX *ctx, X509_NAME *name); // X509_STORE_CTX_get_by_subject looks up an object of type |type| in |ctx|'s // |X509_STORE| that matches |name|. |type| should be one of the |X509_LU_*| // constants to indicate the type of object. If a match was found, it stores the // result in |ret| and returns one. Otherwise, it returns zero. If multiple // objects match, this function outputs an arbitray one. // // WARNING: |ret| must be in the empty state, as returned by |X509_OBJECT_new|. // Otherwise, the object currently in |ret| will be leaked when overwritten. // https://crbug.com/boringssl/685 tracks fixing this. // // WARNING: Multiple trusted certificates or CRLs may share a name. In this // case, this function returns an arbitrary match. Use // |X509_STORE_CTX_get1_certs| or |X509_STORE_CTX_get1_crls| instead. // // TODO(crbug.com/boringssl/407): |name| should be const. OPENSSL_EXPORT int X509_STORE_CTX_get_by_subject(X509_STORE_CTX *ctx, int type, X509_NAME *name, X509_OBJECT *ret); // X.509 information. // // |X509_INFO| is the return type for |PEM_X509_INFO_read_bio|, defined in // . It is used to store a certificate, CRL, or private key. This // type is defined in this header for OpenSSL compatibility. struct private_key_st { EVP_PKEY *dec_pkey; } /* X509_PKEY */; struct X509_info_st { X509 *x509; X509_CRL *crl; X509_PKEY *x_pkey; EVP_CIPHER_INFO enc_cipher; int enc_len; char *enc_data; } /* X509_INFO */; DEFINE_STACK_OF(X509_INFO) // X509_INFO_free releases memory associated with |info|. OPENSSL_EXPORT void X509_INFO_free(X509_INFO *info); // Deprecated custom extension registration. // // The following functions allow callers to register custom extensions for use // with |X509V3_EXT_d2i| and related functions. This mechanism is deprecated and // will be removed in the future. As discussed in |X509V3_EXT_add|, it is not // possible to safely register a custom extension without risking race // conditions and memory errors when linked with other users of BoringSSL. // // Moreover, it is not necessary to register a custom extension to process // extensions unknown to BoringSSL. Registration does not impact certificate // verification. Caller should instead use functions such as // |ASN1_OBJECT_create|, |X509_get_ext_by_OBJ|, |X509_EXTENSION_get_data|, and // |X509_EXTENSION_create_by_OBJ| to inspect or create extensions directly. // The following function pointer types are used in |X509V3_EXT_METHOD|. typedef void *(*X509V3_EXT_NEW)(void); typedef void (*X509V3_EXT_FREE)(void *ext); typedef void *(*X509V3_EXT_D2I)(void *ext, const uint8_t **inp, long len); typedef int (*X509V3_EXT_I2D)(void *ext, uint8_t **outp); typedef STACK_OF(CONF_VALUE) *(*X509V3_EXT_I2V)(const X509V3_EXT_METHOD *method, void *ext, STACK_OF(CONF_VALUE) *extlist); typedef void *(*X509V3_EXT_V2I)(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const STACK_OF(CONF_VALUE) *values); typedef char *(*X509V3_EXT_I2S)(const X509V3_EXT_METHOD *method, void *ext); typedef void *(*X509V3_EXT_S2I)(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str); typedef int (*X509V3_EXT_I2R)(const X509V3_EXT_METHOD *method, void *ext, BIO *out, int indent); typedef void *(*X509V3_EXT_R2I)(const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str); // A v3_ext_method, aka |X509V3_EXT_METHOD|, is a deprecated type which defines // a custom extension. struct v3_ext_method { // ext_nid is the NID of the extension. int ext_nid; // ext_flags is a combination of |X509V3_EXT_*| constants. int ext_flags; // it determines how values of this extension are allocated, released, parsed, // and marshalled. This must be non-NULL. ASN1_ITEM_EXP *it; // The following functions are ignored in favor of |it|. They are retained in // the struct only for source compatibility with existing struct definitions. X509V3_EXT_NEW ext_new; X509V3_EXT_FREE ext_free; X509V3_EXT_D2I d2i; X509V3_EXT_I2D i2d; // The following functions are used for string extensions. X509V3_EXT_I2S i2s; X509V3_EXT_S2I s2i; // The following functions are used for multi-valued extensions. X509V3_EXT_I2V i2v; X509V3_EXT_V2I v2i; // The following functions are used for "raw" extensions, which implement // custom printing behavior. X509V3_EXT_I2R i2r; X509V3_EXT_R2I r2i; void *usr_data; // Any extension specific data } /* X509V3_EXT_METHOD */; // X509V3_EXT_MULTILINE causes the result of an |X509V3_EXT_METHOD|'s |i2v| // function to be printed on separate lines, rather than separated by commas. #define X509V3_EXT_MULTILINE 0x4 // X509V3_EXT_get returns the |X509V3_EXT_METHOD| corresponding to |ext|'s // extension type, or NULL if none was registered. OPENSSL_EXPORT const X509V3_EXT_METHOD *X509V3_EXT_get( const X509_EXTENSION *ext); // X509V3_EXT_get_nid returns the |X509V3_EXT_METHOD| corresponding to |nid|, or // NULL if none was registered. OPENSSL_EXPORT const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int nid); // X509V3_EXT_add registers |ext| as a custom extension for the extension type // |ext->ext_nid|. |ext| must be valid for the remainder of the address space's // lifetime. It returns one on success and zero on error. // // WARNING: This function modifies global state. If other code in the same // address space also registers an extension with type |ext->ext_nid|, the two // registrations will conflict. Which registration takes effect is undefined. If // the two registrations use incompatible in-memory representations, code // expecting the other registration will then cast a type to the wrong type, // resulting in a potentially exploitable memory error. This conflict can also // occur if BoringSSL later adds support for |ext->ext_nid|, with a different // in-memory representation than the one expected by |ext|. // // This function, additionally, is not thread-safe and cannot be called // concurrently with any other BoringSSL function. // // As a result, it is impossible to safely use this function. Registering a // custom extension has no impact on certificate verification so, instead, // callers should simply handle the custom extension with the byte-based // |X509_EXTENSION| APIs directly. Registering |ext| with the library has little // practical value. OPENSSL_EXPORT OPENSSL_DEPRECATED int X509V3_EXT_add(X509V3_EXT_METHOD *ext); // X509V3_EXT_add_alias registers a custom extension with NID |nid_to|. The // corresponding ASN.1 type is copied from |nid_from|. It returns one on success // and zero on error. // // WARNING: Do not use this function. See |X509V3_EXT_add|. OPENSSL_EXPORT OPENSSL_DEPRECATED int X509V3_EXT_add_alias(int nid_to, int nid_from); // Deprecated config-based extension creation. // // The following functions allow specifying X.509 extensions using OpenSSL's // config file syntax, from the OpenSSL command-line tool. They are retained, // for now, for compatibility with legacy software but may be removed in the // future. Construct the extensions using the typed C APIs instead. // // Callers should especially avoid these functions if passing in non-constant // values. They use ad-hoc, string-based formats which are prone to injection // vulnerabilities. For a CA, this means using them risks misissuance. // // These functions are not safe to use with untrusted inputs. The string formats // may implicitly reference context information and, in OpenSSL (though not // BoringSSL), one even allows reading arbitrary files. Many formats can also // produce far larger outputs than their inputs, so untrusted inputs may lead to // denial-of-service attacks. Finally, the parsers see much less testing and // review than most of the library and may have bugs including memory leaks or // crashes. // v3_ext_ctx, aka |X509V3_CTX|, contains additional context information for // constructing extensions. Some string formats reference additional values in // these objects. It must be initialized with |X509V3_set_ctx| or // |X509V3_set_ctx_test| before use. struct v3_ext_ctx { int flags; const X509 *issuer_cert; const X509 *subject_cert; const X509_REQ *subject_req; const X509_CRL *crl; const CONF *db; }; #define X509V3_CTX_TEST 0x1 // X509V3_set_ctx initializes |ctx| with the specified objects. Some string // formats will reference fields in these objects. Each object may be NULL to // omit it, in which case those formats cannot be used. |flags| should be zero, // unless called via |X509V3_set_ctx_test|. // // |issuer|, |subject|, |req|, and |crl|, if non-NULL, must outlive |ctx|. OPENSSL_EXPORT void X509V3_set_ctx(X509V3_CTX *ctx, const X509 *issuer, const X509 *subject, const X509_REQ *req, const X509_CRL *crl, int flags); // X509V3_set_ctx_test calls |X509V3_set_ctx| without any reference objects and // mocks out some features that use them. The resulting extensions may be // incomplete and should be discarded. This can be used to partially validate // syntax. // // TODO(davidben): Can we remove this? #define X509V3_set_ctx_test(ctx) \ X509V3_set_ctx(ctx, NULL, NULL, NULL, NULL, X509V3_CTX_TEST) // X509V3_set_nconf sets |ctx| to use |conf| as the config database. |ctx| must // have previously been initialized by |X509V3_set_ctx| or // |X509V3_set_ctx_test|. Some string formats will reference sections in |conf|. // |conf| may be NULL, in which case these formats cannot be used. If non-NULL, // |conf| must outlive |ctx|. OPENSSL_EXPORT void X509V3_set_nconf(X509V3_CTX *ctx, const CONF *conf); // X509V3_set_ctx_nodb calls |X509V3_set_nconf| with no config database. #define X509V3_set_ctx_nodb(ctx) X509V3_set_nconf(ctx, NULL) // X509V3_EXT_nconf constructs an extension of type specified by |name|, and // value specified by |value|. It returns a newly-allocated |X509_EXTENSION| // object on success, or NULL on error. |conf| and |ctx| specify additional // information referenced by some formats. Either |conf| or |ctx| may be NULL, // in which case features which use it will be disabled. // // If non-NULL, |ctx| must be initialized with |X509V3_set_ctx| or // |X509V3_set_ctx_test|. // // Both |conf| and |ctx| provide a |CONF| object. When |ctx| is non-NULL, most // features use the |ctx| copy, configured with |X509V3_set_ctx|, but some use // |conf|. Callers should ensure the two match to avoid surprisingly behavior. OPENSSL_EXPORT X509_EXTENSION *X509V3_EXT_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *name, const char *value); // X509V3_EXT_nconf_nid behaves like |X509V3_EXT_nconf|, except the extension // type is specified as a NID. OPENSSL_EXPORT X509_EXTENSION *X509V3_EXT_nconf_nid(const CONF *conf, const X509V3_CTX *ctx, int ext_nid, const char *value); // X509V3_EXT_conf_nid calls |X509V3_EXT_nconf_nid|. |conf| must be NULL. // // TODO(davidben): This is the only exposed instance of an LHASH in our public // headers. cryptography.io wraps this function so we cannot, yet, replace the // type with a dummy struct. OPENSSL_EXPORT X509_EXTENSION *X509V3_EXT_conf_nid(LHASH_OF(CONF_VALUE) *conf, const X509V3_CTX *ctx, int ext_nid, const char *value); // X509V3_EXT_add_nconf_sk looks up the section named |section| in |conf|. For // each |CONF_VALUE| in the section, it constructs an extension as in // |X509V3_EXT_nconf|, taking |name| and |value| from the |CONF_VALUE|. Each new // extension is appended to |*sk|. If |*sk| is non-NULL, and at least one // extension is added, it sets |*sk| to a newly-allocated // |STACK_OF(X509_EXTENSION)|. It returns one on success and zero on error. OPENSSL_EXPORT int X509V3_EXT_add_nconf_sk(const CONF *conf, const X509V3_CTX *ctx, const char *section, STACK_OF(X509_EXTENSION) **sk); // X509V3_EXT_add_nconf adds extensions to |cert| as in // |X509V3_EXT_add_nconf_sk|. It returns one on success and zero on error. OPENSSL_EXPORT int X509V3_EXT_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509 *cert); // X509V3_EXT_REQ_add_nconf adds extensions to |req| as in // |X509V3_EXT_add_nconf_sk|. It returns one on success and zero on error. OPENSSL_EXPORT int X509V3_EXT_REQ_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509_REQ *req); // X509V3_EXT_CRL_add_nconf adds extensions to |crl| as in // |X509V3_EXT_add_nconf_sk|. It returns one on success and zero on error. OPENSSL_EXPORT int X509V3_EXT_CRL_add_nconf(const CONF *conf, const X509V3_CTX *ctx, const char *section, X509_CRL *crl); // i2s_ASN1_OCTET_STRING returns a human-readable representation of |oct| as a // newly-allocated, NUL-terminated string, or NULL on error. |method| is // ignored. The caller must release the result with |OPENSSL_free| when done. OPENSSL_EXPORT char *i2s_ASN1_OCTET_STRING(const X509V3_EXT_METHOD *method, const ASN1_OCTET_STRING *oct); // s2i_ASN1_OCTET_STRING decodes |str| as a hexdecimal byte string, with // optional colon separators between bytes. It returns a newly-allocated // |ASN1_OCTET_STRING| with the result on success, or NULL on error. |method| // and |ctx| are ignored. OPENSSL_EXPORT ASN1_OCTET_STRING *s2i_ASN1_OCTET_STRING( const X509V3_EXT_METHOD *method, const X509V3_CTX *ctx, const char *str); // i2s_ASN1_INTEGER returns a human-readable representation of |aint| as a // newly-allocated, NUL-terminated string, or NULL on error. |method| is // ignored. The caller must release the result with |OPENSSL_free| when done. OPENSSL_EXPORT char *i2s_ASN1_INTEGER(const X509V3_EXT_METHOD *method, const ASN1_INTEGER *aint); // s2i_ASN1_INTEGER decodes |value| as the ASCII representation of an integer, // and returns a newly-allocated |ASN1_INTEGER| containing the result, or NULL // on error. |method| is ignored. If |value| begins with "0x" or "0X", the input // is decoded in hexadecimal, otherwise decimal. OPENSSL_EXPORT ASN1_INTEGER *s2i_ASN1_INTEGER(const X509V3_EXT_METHOD *method, const char *value); // i2s_ASN1_ENUMERATED returns a human-readable representation of |aint| as a // newly-allocated, NUL-terminated string, or NULL on error. |method| is // ignored. The caller must release the result with |OPENSSL_free| when done. OPENSSL_EXPORT char *i2s_ASN1_ENUMERATED(const X509V3_EXT_METHOD *method, const ASN1_ENUMERATED *aint); // X509V3_conf_free releases memory associated with |CONF_VALUE|. OPENSSL_EXPORT void X509V3_conf_free(CONF_VALUE *val); // i2v_GENERAL_NAME serializes |gen| as a |CONF_VALUE|. If |ret| is non-NULL, it // appends the value to |ret| and returns |ret| on success or NULL on error. If // it returns NULL, the caller is still responsible for freeing |ret|. If |ret| // is NULL, it returns a newly-allocated |STACK_OF(CONF_VALUE)| containing the // result. |method| is ignored. When done, the caller should release the result // with |sk_CONF_VALUE_pop_free| and |X509V3_conf_free|. // // Do not use this function. This is an internal implementation detail of the // human-readable print functions. If extracting a SAN list from a certificate, // look at |gen| directly. OPENSSL_EXPORT STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME( const X509V3_EXT_METHOD *method, const GENERAL_NAME *gen, STACK_OF(CONF_VALUE) *ret); // i2v_GENERAL_NAMES serializes |gen| as a list of |CONF_VALUE|s. If |ret| is // non-NULL, it appends the values to |ret| and returns |ret| on success or NULL // on error. If it returns NULL, the caller is still responsible for freeing // |ret|. If |ret| is NULL, it returns a newly-allocated |STACK_OF(CONF_VALUE)| // containing the results. |method| is ignored. // // Do not use this function. This is an internal implementation detail of the // human-readable print functions. If extracting a SAN list from a certificate, // look at |gen| directly. OPENSSL_EXPORT STACK_OF(CONF_VALUE) *i2v_GENERAL_NAMES( const X509V3_EXT_METHOD *method, const GENERAL_NAMES *gen, STACK_OF(CONF_VALUE) *extlist); // a2i_IPADDRESS decodes |ipasc| as the textual representation of an IPv4 or // IPv6 address. On success, it returns a newly-allocated |ASN1_OCTET_STRING| // containing the decoded IP address. IPv4 addresses are represented as 4-byte // strings and IPv6 addresses as 16-byte strings. On failure, it returns NULL. OPENSSL_EXPORT ASN1_OCTET_STRING *a2i_IPADDRESS(const char *ipasc); // a2i_IPADDRESS_NC decodes |ipasc| as the textual representation of an IPv4 or // IPv6 address range. On success, it returns a newly-allocated // |ASN1_OCTET_STRING| containing the decoded IP address, followed by the // decoded mask. IPv4 ranges are represented as 8-byte strings and IPv6 ranges // as 32-byte strings. On failure, it returns NULL. // // The text format decoded by this function is not the standard CIDR notiation. // Instead, the mask after the "/" is represented as another IP address. For // example, "192.168.0.0/16" would be written "192.168.0.0/255.255.0.0". OPENSSL_EXPORT ASN1_OCTET_STRING *a2i_IPADDRESS_NC(const char *ipasc); // Deprecated functions. // X509_get_notBefore returns |x509|'s notBefore time. Note this function is not // const-correct for legacy reasons. Use |X509_get0_notBefore| or // |X509_getm_notBefore| instead. OPENSSL_EXPORT ASN1_TIME *X509_get_notBefore(const X509 *x509); // X509_get_notAfter returns |x509|'s notAfter time. Note this function is not // const-correct for legacy reasons. Use |X509_get0_notAfter| or // |X509_getm_notAfter| instead. OPENSSL_EXPORT ASN1_TIME *X509_get_notAfter(const X509 *x509); // X509_set_notBefore calls |X509_set1_notBefore|. Use |X509_set1_notBefore| // instead. OPENSSL_EXPORT int X509_set_notBefore(X509 *x509, const ASN1_TIME *tm); // X509_set_notAfter calls |X509_set1_notAfter|. Use |X509_set1_notAfter| // instead. OPENSSL_EXPORT int X509_set_notAfter(X509 *x509, const ASN1_TIME *tm); // X509_CRL_get_lastUpdate returns a mutable pointer to |crl|'s thisUpdate time. // The OpenSSL API refers to this field as lastUpdate. // // Use |X509_CRL_get0_lastUpdate| or |X509_CRL_set1_lastUpdate| instead. OPENSSL_EXPORT ASN1_TIME *X509_CRL_get_lastUpdate(X509_CRL *crl); // X509_CRL_get_nextUpdate returns a mutable pointer to |crl|'s nextUpdate time, // or NULL if |crl| has none. Use |X509_CRL_get0_nextUpdate| or // |X509_CRL_set1_nextUpdate| instead. OPENSSL_EXPORT ASN1_TIME *X509_CRL_get_nextUpdate(X509_CRL *crl); // X509_extract_key is a legacy alias to |X509_get_pubkey|. Use // |X509_get_pubkey| instead. #define X509_extract_key(x) X509_get_pubkey(x) // X509_REQ_extract_key is a legacy alias for |X509_REQ_get_pubkey|. #define X509_REQ_extract_key(a) X509_REQ_get_pubkey(a) // X509_name_cmp is a legacy alias for |X509_NAME_cmp|. #define X509_name_cmp(a, b) X509_NAME_cmp((a), (b)) // The following symbols are deprecated aliases to |X509_CRL_set1_*|. #define X509_CRL_set_lastUpdate X509_CRL_set1_lastUpdate #define X509_CRL_set_nextUpdate X509_CRL_set1_nextUpdate // X509_get_serialNumber returns a mutable pointer to |x509|'s serial number. // Prefer |X509_get0_serialNumber|. OPENSSL_EXPORT ASN1_INTEGER *X509_get_serialNumber(X509 *x509); // X509_NAME_get_text_by_OBJ finds the first attribute with type |obj| in // |name|. If found, it writes the value's UTF-8 representation to |buf|. // followed by a NUL byte, and returns the number of bytes in the output, // excluding the NUL byte. This is unlike OpenSSL which returns the raw // ASN1_STRING data. The UTF-8 encoding of the |ASN1_STRING| may not contain a 0 // codepoint. // // This function writes at most |len| bytes, including the NUL byte. If |buf| // is NULL, it writes nothing and returns the number of bytes in the // output, excluding the NUL byte that would be required for the full UTF-8 // output. // // This function may return -1 if an error occurs for any reason, including the // value not being a recognized string type, |len| being of insufficient size to // hold the full UTF-8 encoding and NUL byte, memory allocation failures, an // object with type |obj| not existing in |name|, or if the UTF-8 encoding of // the string contains a zero byte. OPENSSL_EXPORT int X509_NAME_get_text_by_OBJ(const X509_NAME *name, const ASN1_OBJECT *obj, char *buf, int len); // X509_NAME_get_text_by_NID behaves like |X509_NAME_get_text_by_OBJ| except it // finds an attribute of type |nid|, which should be one of the |NID_*| // constants. OPENSSL_EXPORT int X509_NAME_get_text_by_NID(const X509_NAME *name, int nid, char *buf, int len); // X509_STORE_CTX_get0_parent_ctx returns NULL. OPENSSL_EXPORT X509_STORE_CTX *X509_STORE_CTX_get0_parent_ctx( const X509_STORE_CTX *ctx); // X509_OBJECT_free_contents sets |obj| to the empty object, freeing any values // that were previously there. // // TODO(davidben): Unexport this function after rust-openssl is fixed to no // longer call it. OPENSSL_EXPORT void X509_OBJECT_free_contents(X509_OBJECT *obj); // X509_LOOKUP_free releases memory associated with |ctx|. This function should // never be used outside the library. No function in the public API hands // ownership of an |X509_LOOKUP| to the caller. // // TODO(davidben): Unexport this function after rust-openssl is fixed to no // longer call it. OPENSSL_EXPORT void X509_LOOKUP_free(X509_LOOKUP *ctx); // X509_STORE_CTX_cleanup resets |ctx| to the empty state. // // This function is a remnant of when |X509_STORE_CTX| was stack-allocated and // should not be used. If releasing |ctx|, call |X509_STORE_CTX_free|. If // reusing |ctx| for a new verification, release the old one and create a new // one. OPENSSL_EXPORT void X509_STORE_CTX_cleanup(X509_STORE_CTX *ctx); // X509V3_add_standard_extensions returns one. OPENSSL_EXPORT int X509V3_add_standard_extensions(void); // The following symbols are legacy aliases for |X509_STORE_CTX| functions. #define X509_STORE_get_by_subject X509_STORE_CTX_get_by_subject #define X509_STORE_get1_certs X509_STORE_CTX_get1_certs #define X509_STORE_get1_crls X509_STORE_CTX_get1_crls // X509_STORE_CTX_get_chain is a legacy alias for |X509_STORE_CTX_get0_chain|. OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get_chain( const X509_STORE_CTX *ctx); // X509_STORE_CTX_trusted_stack is a deprecated alias for // |X509_STORE_CTX_set0_trusted_stack|. OPENSSL_EXPORT void X509_STORE_CTX_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk); typedef int (*X509_STORE_CTX_verify_cb)(int, X509_STORE_CTX *); // X509_STORE_CTX_set_verify_cb configures a callback function for |ctx| that is // called multiple times during |X509_verify_cert|. The callback returns zero to // fail verification and one to proceed. Typically, it will return |ok|, which // preserves the default behavior. Returning one when |ok| is zero will proceed // past some error. The callback may inspect |ctx| and the error queue to // attempt to determine the current stage of certificate verification, but this // is often unreliable. When synthesizing an error, callbacks should use // |X509_STORE_CTX_set_error| to set a corresponding error. // // WARNING: Do not use this function. It is extremely fragile and unpredictable. // This callback exposes implementation details of certificate verification, // which change as the library evolves. Attempting to use it for security checks // can introduce vulnerabilities if making incorrect assumptions about when the // callback is called. Some errors, when suppressed, may implicitly suppress // other errors due to internal implementation details. Additionally, overriding // |ok| may leave |ctx| in an inconsistent state and break invariants. // // Instead, customize certificate verification by configuring options on the // |X509_STORE_CTX| before verification, or applying additional checks after // |X509_verify_cert| completes successfully. OPENSSL_EXPORT void X509_STORE_CTX_set_verify_cb( X509_STORE_CTX *ctx, int (*verify_cb)(int ok, X509_STORE_CTX *ctx)); // X509_STORE_set_verify_cb acts like |X509_STORE_CTX_set_verify_cb| but sets // the verify callback for any |X509_STORE_CTX| created from this |X509_STORE| // // Do not use this function. See |X509_STORE_CTX_set_verify_cb| for details. OPENSSL_EXPORT void X509_STORE_set_verify_cb( X509_STORE *store, X509_STORE_CTX_verify_cb verify_cb); // X509_STORE_set_verify_cb_func is a deprecated alias for // |X509_STORE_set_verify_cb|. #define X509_STORE_set_verify_cb_func(store, func) \ X509_STORE_set_verify_cb((store), (func)) // X509_STORE_CTX_set_chain configures |ctx| to use |sk| for untrusted // intermediate certificates to use in verification. This function is redundant // with the |chain| parameter of |X509_STORE_CTX_init|. Use the parameter // instead. // // WARNING: Despite the similar name, this function is unrelated to // |X509_STORE_CTX_get0_chain|. // // WARNING: This function saves a pointer to |sk| without copying or // incrementing reference counts. |sk| must outlive |ctx| and may not be mutated // for the duration of the certificate verification. OPENSSL_EXPORT void X509_STORE_CTX_set_chain(X509_STORE_CTX *ctx, STACK_OF(X509) *sk); // The following flags do nothing. The corresponding non-standard options have // been removed. #define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0 #define X509_CHECK_FLAG_MULTI_LABEL_WILDCARDS 0 #define X509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS 0 // X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS does nothing, but is necessary in // OpenSSL to enable standard wildcard matching. In BoringSSL, this behavior is // always enabled. #define X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 0 // X509_STORE_get0_objects returns a non-owning pointer of |store|'s internal // object list. Although this function is not const, callers must not modify // the result of this function. // // WARNING: This function is not thread-safe. If |store| is shared across // multiple threads, callers cannot safely inspect the result of this function, // because another thread may have concurrently added to it. In particular, // |X509_LOOKUP_add_dir| treats this list as a cache and may add to it in the // course of certificate verification. This API additionally prevents fixing // some quadratic worst-case behavior in |X509_STORE| and may be removed in the // future. Use |X509_STORE_get1_objects| instead. OPENSSL_EXPORT STACK_OF(X509_OBJECT) *X509_STORE_get0_objects( X509_STORE *store); // X509_PURPOSE_get_by_sname returns the |X509_PURPOSE_*| constant corresponding // a short name |sname|, or -1 if |sname| was not recognized. // // Use |X509_PURPOSE_*| constants directly instead. The short names used by this // function look like "sslserver" or "smimeencrypt", so they do not make // especially good APIs. // // This function differs from OpenSSL, which returns an "index" to be passed to // |X509_PURPOSE_get0|, followed by |X509_PURPOSE_get_id|, to finally obtain an // |X509_PURPOSE_*| value suitable for use with |X509_VERIFY_PARAM_set_purpose|. OPENSSL_EXPORT int X509_PURPOSE_get_by_sname(const char *sname); // X509_PURPOSE_get0 returns the |X509_PURPOSE| object corresponding to |id|, // which should be one of the |X509_PURPOSE_*| constants, or NULL if none // exists. // // This function differs from OpenSSL, which takes an "index", returned from // |X509_PURPOSE_get_by_sname|. In BoringSSL, indices and |X509_PURPOSE_*| IDs // are the same. OPENSSL_EXPORT const X509_PURPOSE *X509_PURPOSE_get0(int id); // X509_PURPOSE_get_id returns |purpose|'s ID. This will be one of the // |X509_PURPOSE_*| constants. OPENSSL_EXPORT int X509_PURPOSE_get_id(const X509_PURPOSE *purpose); // The following constants are values for the legacy Netscape certificate type // X.509 extension, a precursor to extended key usage. These values correspond // to the DER encoding of the first byte of the BIT STRING. That is, 0x80 is // bit zero and 0x01 is bit seven. // // TODO(davidben): These constants are only used by OpenVPN, which deprecated // the feature in 2017. The documentation says it was removed, but they did not // actually remove it. See if OpenVPN will accept a patch to finish this. #define NS_SSL_CLIENT 0x80 #define NS_SSL_SERVER 0x40 #define NS_SMIME 0x20 #define NS_OBJSIGN 0x10 #define NS_SSL_CA 0x04 #define NS_SMIME_CA 0x02 #define NS_OBJSIGN_CA 0x01 #define NS_ANY_CA (NS_SSL_CA | NS_SMIME_CA | NS_OBJSIGN_CA) // Private structures. struct X509_algor_st { ASN1_OBJECT *algorithm; ASN1_TYPE *parameter; } /* X509_ALGOR */; #if defined(__cplusplus) } // extern C #endif #if !defined(BORINGSSL_NO_CXX) extern "C++" { BSSL_NAMESPACE_BEGIN BORINGSSL_MAKE_DELETER(ACCESS_DESCRIPTION, ACCESS_DESCRIPTION_free) BORINGSSL_MAKE_DELETER(AUTHORITY_KEYID, AUTHORITY_KEYID_free) BORINGSSL_MAKE_DELETER(BASIC_CONSTRAINTS, BASIC_CONSTRAINTS_free) // TODO(davidben): Move this to conf.h and rename to CONF_VALUE_free. BORINGSSL_MAKE_DELETER(CONF_VALUE, X509V3_conf_free) BORINGSSL_MAKE_DELETER(DIST_POINT, DIST_POINT_free) BORINGSSL_MAKE_DELETER(GENERAL_NAME, GENERAL_NAME_free) BORINGSSL_MAKE_DELETER(GENERAL_SUBTREE, GENERAL_SUBTREE_free) BORINGSSL_MAKE_DELETER(NAME_CONSTRAINTS, NAME_CONSTRAINTS_free) BORINGSSL_MAKE_DELETER(NETSCAPE_SPKI, NETSCAPE_SPKI_free) BORINGSSL_MAKE_DELETER(POLICY_MAPPING, POLICY_MAPPING_free) BORINGSSL_MAKE_DELETER(POLICYINFO, POLICYINFO_free) BORINGSSL_MAKE_DELETER(RSA_PSS_PARAMS, RSA_PSS_PARAMS_free) BORINGSSL_MAKE_DELETER(X509, X509_free) BORINGSSL_MAKE_UP_REF(X509, X509_up_ref) BORINGSSL_MAKE_DELETER(X509_ALGOR, X509_ALGOR_free) BORINGSSL_MAKE_DELETER(X509_ATTRIBUTE, X509_ATTRIBUTE_free) BORINGSSL_MAKE_DELETER(X509_CRL, X509_CRL_free) BORINGSSL_MAKE_UP_REF(X509_CRL, X509_CRL_up_ref) BORINGSSL_MAKE_DELETER(X509_EXTENSION, X509_EXTENSION_free) BORINGSSL_MAKE_DELETER(X509_INFO, X509_INFO_free) BORINGSSL_MAKE_DELETER(X509_LOOKUP, X509_LOOKUP_free) BORINGSSL_MAKE_DELETER(X509_NAME, X509_NAME_free) BORINGSSL_MAKE_DELETER(X509_NAME_ENTRY, X509_NAME_ENTRY_free) BORINGSSL_MAKE_DELETER(X509_OBJECT, X509_OBJECT_free) BORINGSSL_MAKE_DELETER(X509_PUBKEY, X509_PUBKEY_free) BORINGSSL_MAKE_DELETER(X509_REQ, X509_REQ_free) BORINGSSL_MAKE_DELETER(X509_REVOKED, X509_REVOKED_free) BORINGSSL_MAKE_DELETER(X509_SIG, X509_SIG_free) BORINGSSL_MAKE_DELETER(X509_STORE, X509_STORE_free) BORINGSSL_MAKE_UP_REF(X509_STORE, X509_STORE_up_ref) BORINGSSL_MAKE_DELETER(X509_STORE_CTX, X509_STORE_CTX_free) BORINGSSL_MAKE_DELETER(X509_VERIFY_PARAM, X509_VERIFY_PARAM_free) BSSL_NAMESPACE_END } // extern C++ #endif // !BORINGSSL_NO_CXX #define X509_R_AKID_MISMATCH 100 #define X509_R_BAD_PKCS7_VERSION 101 #define X509_R_BAD_X509_FILETYPE 102 #define X509_R_BASE64_DECODE_ERROR 103 #define X509_R_CANT_CHECK_DH_KEY 104 #define X509_R_CERT_ALREADY_IN_HASH_TABLE 105 #define X509_R_CRL_ALREADY_DELTA 106 #define X509_R_CRL_VERIFY_FAILURE 107 #define X509_R_IDP_MISMATCH 108 #define X509_R_INVALID_BIT_STRING_BITS_LEFT 109 #define X509_R_INVALID_DIRECTORY 110 #define X509_R_INVALID_FIELD_NAME 111 #define X509_R_INVALID_PSS_PARAMETERS 112 #define X509_R_INVALID_TRUST 113 #define X509_R_ISSUER_MISMATCH 114 #define X509_R_KEY_TYPE_MISMATCH 115 #define X509_R_KEY_VALUES_MISMATCH 116 #define X509_R_LOADING_CERT_DIR 117 #define X509_R_LOADING_DEFAULTS 118 #define X509_R_NEWER_CRL_NOT_NEWER 119 #define X509_R_NOT_PKCS7_SIGNED_DATA 120 #define X509_R_NO_CERTIFICATES_INCLUDED 121 #define X509_R_NO_CERT_SET_FOR_US_TO_VERIFY 122 #define X509_R_NO_CRLS_INCLUDED 123 #define X509_R_NO_CRL_NUMBER 124 #define X509_R_PUBLIC_KEY_DECODE_ERROR 125 #define X509_R_PUBLIC_KEY_ENCODE_ERROR 126 #define X509_R_SHOULD_RETRY 127 #define X509_R_UNKNOWN_KEY_TYPE 128 #define X509_R_UNKNOWN_NID 129 #define X509_R_UNKNOWN_PURPOSE_ID 130 #define X509_R_UNKNOWN_TRUST_ID 131 #define X509_R_UNSUPPORTED_ALGORITHM 132 #define X509_R_WRONG_LOOKUP_TYPE 133 #define X509_R_WRONG_TYPE 134 #define X509_R_NAME_TOO_LONG 135 #define X509_R_INVALID_PARAMETER 136 #define X509_R_SIGNATURE_ALGORITHM_MISMATCH 137 #define X509_R_DELTA_CRL_WITHOUT_CRL_NUMBER 138 #define X509_R_INVALID_FIELD_FOR_VERSION 139 #define X509_R_INVALID_VERSION 140 #define X509_R_NO_CERTIFICATE_FOUND 141 #define X509_R_NO_CERTIFICATE_OR_CRL_FOUND 142 #define X509_R_NO_CRL_FOUND 143 #define X509_R_INVALID_POLICY_EXTENSION 144 #endif // OPENSSL_HEADER_X509_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_x509_vfy.h ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This header is provided in order to make compiling against code that expects OpenSSL easier. */ #include "CNIOBoringSSL_x509.h" ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_x509v3.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_X509V3_H #define OPENSSL_HEADER_X509V3_H // This header primarily exists in order to make compiling against code that // expects OpenSSL easier. We have merged this header into . // However, due to conflicts, some deprecated symbols are defined here. #include "CNIOBoringSSL_x509.h" // CRL reason constants. // TODO(davidben): These constants live here because strongswan defines // conflicting symbols and has been relying on them only being defined in // . Defining the constants in would break // strongswan, but we would also like for new code to only need // . Introduce properly namespaced versions of these constants // and, separately, see if we can fix strongswan to similarly avoid the // conflict. Between OpenSSL, strongswan, and wincrypt.h all defining these // constants, it seems best for everyone to just avoid them going forward. #define CRL_REASON_NONE (-1) #define CRL_REASON_UNSPECIFIED 0 #define CRL_REASON_KEY_COMPROMISE 1 #define CRL_REASON_CA_COMPROMISE 2 #define CRL_REASON_AFFILIATION_CHANGED 3 #define CRL_REASON_SUPERSEDED 4 #define CRL_REASON_CESSATION_OF_OPERATION 5 #define CRL_REASON_CERTIFICATE_HOLD 6 #define CRL_REASON_REMOVE_FROM_CRL 8 #define CRL_REASON_PRIVILEGE_WITHDRAWN 9 #define CRL_REASON_AA_COMPROMISE 10 // Deprecated constants. // The following constants are legacy aliases for |X509v3_KU_*|. They are // defined here instead of in because NSS's public headers use // the same symbols. Some callers have inadvertently relied on the conflicts // only being defined in this header. #define KU_DIGITAL_SIGNATURE X509v3_KU_DIGITAL_SIGNATURE #define KU_NON_REPUDIATION X509v3_KU_NON_REPUDIATION #define KU_KEY_ENCIPHERMENT X509v3_KU_KEY_ENCIPHERMENT #define KU_DATA_ENCIPHERMENT X509v3_KU_DATA_ENCIPHERMENT #define KU_KEY_AGREEMENT X509v3_KU_KEY_AGREEMENT #define KU_KEY_CERT_SIGN X509v3_KU_KEY_CERT_SIGN #define KU_CRL_SIGN X509v3_KU_CRL_SIGN #define KU_ENCIPHER_ONLY X509v3_KU_ENCIPHER_ONLY #define KU_DECIPHER_ONLY X509v3_KU_DECIPHER_ONLY #endif // OPENSSL_HEADER_X509V3_H ================================================ FILE: Sources/CNIOBoringSSL/include/CNIOBoringSSL_x509v3_errors.h ================================================ /* * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_X509V3_ERRORS_H #define OPENSSL_HEADER_X509V3_ERRORS_H #define X509V3_R_BAD_IP_ADDRESS 100 #define X509V3_R_BAD_OBJECT 101 #define X509V3_R_BN_DEC2BN_ERROR 102 #define X509V3_R_BN_TO_ASN1_INTEGER_ERROR 103 #define X509V3_R_CANNOT_FIND_FREE_FUNCTION 104 #define X509V3_R_DIRNAME_ERROR 105 #define X509V3_R_DISTPOINT_ALREADY_SET 106 #define X509V3_R_DUPLICATE_ZONE_ID 107 #define X509V3_R_ERROR_CONVERTING_ZONE 108 #define X509V3_R_ERROR_CREATING_EXTENSION 109 #define X509V3_R_ERROR_IN_EXTENSION 110 #define X509V3_R_EXPECTED_A_SECTION_NAME 111 #define X509V3_R_EXTENSION_EXISTS 112 #define X509V3_R_EXTENSION_NAME_ERROR 113 #define X509V3_R_EXTENSION_NOT_FOUND 114 #define X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED 115 #define X509V3_R_EXTENSION_VALUE_ERROR 116 #define X509V3_R_ILLEGAL_EMPTY_EXTENSION 117 #define X509V3_R_ILLEGAL_HEX_DIGIT 118 #define X509V3_R_INCORRECT_POLICY_SYNTAX_TAG 119 #define X509V3_R_INVALID_BOOLEAN_STRING 120 #define X509V3_R_INVALID_EXTENSION_STRING 121 #define X509V3_R_INVALID_MULTIPLE_RDNS 122 #define X509V3_R_INVALID_NAME 123 #define X509V3_R_INVALID_NULL_ARGUMENT 124 #define X509V3_R_INVALID_NULL_NAME 125 #define X509V3_R_INVALID_NULL_VALUE 126 #define X509V3_R_INVALID_NUMBER 127 #define X509V3_R_INVALID_NUMBERS 128 #define X509V3_R_INVALID_OBJECT_IDENTIFIER 129 #define X509V3_R_INVALID_OPTION 130 #define X509V3_R_INVALID_POLICY_IDENTIFIER 131 #define X509V3_R_INVALID_PROXY_POLICY_SETTING 132 #define X509V3_R_INVALID_PURPOSE 133 #define X509V3_R_INVALID_SECTION 134 #define X509V3_R_INVALID_SYNTAX 135 #define X509V3_R_ISSUER_DECODE_ERROR 136 #define X509V3_R_MISSING_VALUE 137 #define X509V3_R_NEED_ORGANIZATION_AND_NUMBERS 138 #define X509V3_R_NO_CONFIG_DATABASE 139 #define X509V3_R_NO_ISSUER_CERTIFICATE 140 #define X509V3_R_NO_ISSUER_DETAILS 141 #define X509V3_R_NO_POLICY_IDENTIFIER 142 #define X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED 143 #define X509V3_R_NO_PUBLIC_KEY 144 #define X509V3_R_NO_SUBJECT_DETAILS 145 #define X509V3_R_ODD_NUMBER_OF_DIGITS 146 #define X509V3_R_OPERATION_NOT_DEFINED 147 #define X509V3_R_OTHERNAME_ERROR 148 #define X509V3_R_POLICY_LANGUAGE_ALREADY_DEFINED 149 #define X509V3_R_POLICY_PATH_LENGTH 150 #define X509V3_R_POLICY_PATH_LENGTH_ALREADY_DEFINED 151 #define X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY 152 #define X509V3_R_SECTION_NOT_FOUND 153 #define X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS 154 #define X509V3_R_UNABLE_TO_GET_ISSUER_KEYID 155 #define X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT 156 #define X509V3_R_UNKNOWN_EXTENSION 157 #define X509V3_R_UNKNOWN_EXTENSION_NAME 158 #define X509V3_R_UNKNOWN_OPTION 159 #define X509V3_R_UNSUPPORTED_OPTION 160 #define X509V3_R_UNSUPPORTED_TYPE 161 #define X509V3_R_USER_TOO_LONG 162 #define X509V3_R_INVALID_VALUE 163 #define X509V3_R_TRAILING_DATA_IN_EXTENSION 164 #endif // OPENSSL_HEADER_X509V3_ERRORS_H ================================================ FILE: Sources/CNIOBoringSSL/include/boringssl_prefix_symbols_nasm.inc ================================================ ; Copyright 2018 The BoringSSL Authors ; ; Permission to use, copy, modify, and/or distribute this software for any ; purpose with or without fee is hereby granted, provided that the above ; copyright notice and this permission notice appear in all copies. ; ; THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ; WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ; MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ; SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ; WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ; OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ; CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ; 32-bit Windows adds underscores to C functions, while 64-bit Windows does not. %ifidn __OUTPUT_FORMAT__, win32 %xdefine _ACCESS_DESCRIPTION_free _ %+ BORINGSSL_PREFIX %+ _ACCESS_DESCRIPTION_free %xdefine _ACCESS_DESCRIPTION_new _ %+ BORINGSSL_PREFIX %+ _ACCESS_DESCRIPTION_new %xdefine _AES_CMAC _ %+ BORINGSSL_PREFIX %+ _AES_CMAC %xdefine _AES_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_cbc_encrypt %xdefine _AES_cfb128_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_cfb128_encrypt %xdefine _AES_ctr128_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_ctr128_encrypt %xdefine _AES_decrypt _ %+ BORINGSSL_PREFIX %+ _AES_decrypt %xdefine _AES_ecb_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_ecb_encrypt %xdefine _AES_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_encrypt %xdefine _AES_ofb128_encrypt _ %+ BORINGSSL_PREFIX %+ _AES_ofb128_encrypt %xdefine _AES_set_decrypt_key _ %+ BORINGSSL_PREFIX %+ _AES_set_decrypt_key %xdefine _AES_set_encrypt_key _ %+ BORINGSSL_PREFIX %+ _AES_set_encrypt_key %xdefine _AES_unwrap_key _ %+ BORINGSSL_PREFIX %+ _AES_unwrap_key %xdefine _AES_unwrap_key_padded _ %+ BORINGSSL_PREFIX %+ _AES_unwrap_key_padded %xdefine _AES_wrap_key _ %+ BORINGSSL_PREFIX %+ _AES_wrap_key %xdefine _AES_wrap_key_padded _ %+ BORINGSSL_PREFIX %+ _AES_wrap_key_padded %xdefine _ASN1_ANY_it _ %+ BORINGSSL_PREFIX %+ _ASN1_ANY_it %xdefine _ASN1_BIT_STRING_check _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_check %xdefine _ASN1_BIT_STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_free %xdefine _ASN1_BIT_STRING_get_bit _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_get_bit %xdefine _ASN1_BIT_STRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_it %xdefine _ASN1_BIT_STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_new %xdefine _ASN1_BIT_STRING_num_bytes _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_num_bytes %xdefine _ASN1_BIT_STRING_set _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_set %xdefine _ASN1_BIT_STRING_set_bit _ %+ BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_set_bit %xdefine _ASN1_BMPSTRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_free %xdefine _ASN1_BMPSTRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_it %xdefine _ASN1_BMPSTRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_new %xdefine _ASN1_BOOLEAN_it _ %+ BORINGSSL_PREFIX %+ _ASN1_BOOLEAN_it %xdefine _ASN1_ENUMERATED_free _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_free %xdefine _ASN1_ENUMERATED_get _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get %xdefine _ASN1_ENUMERATED_get_int64 _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get_int64 %xdefine _ASN1_ENUMERATED_get_uint64 _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get_uint64 %xdefine _ASN1_ENUMERATED_it _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_it %xdefine _ASN1_ENUMERATED_new _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_new %xdefine _ASN1_ENUMERATED_set _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set %xdefine _ASN1_ENUMERATED_set_int64 _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set_int64 %xdefine _ASN1_ENUMERATED_set_uint64 _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set_uint64 %xdefine _ASN1_ENUMERATED_to_BN _ %+ BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_to_BN %xdefine _ASN1_FBOOLEAN_it _ %+ BORINGSSL_PREFIX %+ _ASN1_FBOOLEAN_it %xdefine _ASN1_GENERALIZEDTIME_adj _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_adj %xdefine _ASN1_GENERALIZEDTIME_check _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_check %xdefine _ASN1_GENERALIZEDTIME_free _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_free %xdefine _ASN1_GENERALIZEDTIME_it _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_it %xdefine _ASN1_GENERALIZEDTIME_new _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_new %xdefine _ASN1_GENERALIZEDTIME_print _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_print %xdefine _ASN1_GENERALIZEDTIME_set _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_set %xdefine _ASN1_GENERALIZEDTIME_set_string _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_set_string %xdefine _ASN1_GENERALSTRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_free %xdefine _ASN1_GENERALSTRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_it %xdefine _ASN1_GENERALSTRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_new %xdefine _ASN1_IA5STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_IA5STRING_free %xdefine _ASN1_IA5STRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_IA5STRING_it %xdefine _ASN1_IA5STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_IA5STRING_new %xdefine _ASN1_INTEGER_cmp _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_cmp %xdefine _ASN1_INTEGER_dup _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_dup %xdefine _ASN1_INTEGER_free _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_free %xdefine _ASN1_INTEGER_get _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_get %xdefine _ASN1_INTEGER_get_int64 _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_get_int64 %xdefine _ASN1_INTEGER_get_uint64 _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_get_uint64 %xdefine _ASN1_INTEGER_it _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_it %xdefine _ASN1_INTEGER_new _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_new %xdefine _ASN1_INTEGER_set _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_set %xdefine _ASN1_INTEGER_set_int64 _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_set_int64 %xdefine _ASN1_INTEGER_set_uint64 _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_set_uint64 %xdefine _ASN1_INTEGER_to_BN _ %+ BORINGSSL_PREFIX %+ _ASN1_INTEGER_to_BN %xdefine _ASN1_NULL_free _ %+ BORINGSSL_PREFIX %+ _ASN1_NULL_free %xdefine _ASN1_NULL_it _ %+ BORINGSSL_PREFIX %+ _ASN1_NULL_it %xdefine _ASN1_NULL_new _ %+ BORINGSSL_PREFIX %+ _ASN1_NULL_new %xdefine _ASN1_OBJECT_create _ %+ BORINGSSL_PREFIX %+ _ASN1_OBJECT_create %xdefine _ASN1_OBJECT_free _ %+ BORINGSSL_PREFIX %+ _ASN1_OBJECT_free %xdefine _ASN1_OBJECT_it _ %+ BORINGSSL_PREFIX %+ _ASN1_OBJECT_it %xdefine _ASN1_OBJECT_new _ %+ BORINGSSL_PREFIX %+ _ASN1_OBJECT_new %xdefine _ASN1_OCTET_STRING_cmp _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_cmp %xdefine _ASN1_OCTET_STRING_dup _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_dup %xdefine _ASN1_OCTET_STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_free %xdefine _ASN1_OCTET_STRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_it %xdefine _ASN1_OCTET_STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_new %xdefine _ASN1_OCTET_STRING_set _ %+ BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_set %xdefine _ASN1_PRINTABLESTRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_free %xdefine _ASN1_PRINTABLESTRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_it %xdefine _ASN1_PRINTABLESTRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_new %xdefine _ASN1_PRINTABLE_free _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_free %xdefine _ASN1_PRINTABLE_it _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_it %xdefine _ASN1_PRINTABLE_new _ %+ BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_new %xdefine _ASN1_SEQUENCE_it _ %+ BORINGSSL_PREFIX %+ _ASN1_SEQUENCE_it %xdefine _ASN1_STRING_TABLE_add _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_TABLE_add %xdefine _ASN1_STRING_TABLE_cleanup _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_TABLE_cleanup %xdefine _ASN1_STRING_cmp _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_cmp %xdefine _ASN1_STRING_copy _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_copy %xdefine _ASN1_STRING_data _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_data %xdefine _ASN1_STRING_dup _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_dup %xdefine _ASN1_STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_free %xdefine _ASN1_STRING_get0_data _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_get0_data %xdefine _ASN1_STRING_get_default_mask _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_get_default_mask %xdefine _ASN1_STRING_length _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_length %xdefine _ASN1_STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_new %xdefine _ASN1_STRING_print _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_print %xdefine _ASN1_STRING_print_ex _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_print_ex %xdefine _ASN1_STRING_print_ex_fp _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_print_ex_fp %xdefine _ASN1_STRING_set _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_set %xdefine _ASN1_STRING_set0 _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_set0 %xdefine _ASN1_STRING_set_by_NID _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_set_by_NID %xdefine _ASN1_STRING_set_default_mask _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_set_default_mask %xdefine _ASN1_STRING_set_default_mask_asc _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_set_default_mask_asc %xdefine _ASN1_STRING_to_UTF8 _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_to_UTF8 %xdefine _ASN1_STRING_type _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_type %xdefine _ASN1_STRING_type_new _ %+ BORINGSSL_PREFIX %+ _ASN1_STRING_type_new %xdefine _ASN1_T61STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_T61STRING_free %xdefine _ASN1_T61STRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_T61STRING_it %xdefine _ASN1_T61STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_T61STRING_new %xdefine _ASN1_TBOOLEAN_it _ %+ BORINGSSL_PREFIX %+ _ASN1_TBOOLEAN_it %xdefine _ASN1_TIME_adj _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_adj %xdefine _ASN1_TIME_check _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_check %xdefine _ASN1_TIME_diff _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_diff %xdefine _ASN1_TIME_free _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_free %xdefine _ASN1_TIME_it _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_it %xdefine _ASN1_TIME_new _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_new %xdefine _ASN1_TIME_print _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_print %xdefine _ASN1_TIME_set _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_set %xdefine _ASN1_TIME_set_posix _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_set_posix %xdefine _ASN1_TIME_set_string _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_set_string %xdefine _ASN1_TIME_set_string_X509 _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_set_string_X509 %xdefine _ASN1_TIME_to_generalizedtime _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_to_generalizedtime %xdefine _ASN1_TIME_to_posix _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_to_posix %xdefine _ASN1_TIME_to_posix_nonstandard _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_to_posix_nonstandard %xdefine _ASN1_TIME_to_time_t _ %+ BORINGSSL_PREFIX %+ _ASN1_TIME_to_time_t %xdefine _ASN1_TYPE_cmp _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_cmp %xdefine _ASN1_TYPE_free _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_free %xdefine _ASN1_TYPE_get _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_get %xdefine _ASN1_TYPE_new _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_new %xdefine _ASN1_TYPE_set _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_set %xdefine _ASN1_TYPE_set1 _ %+ BORINGSSL_PREFIX %+ _ASN1_TYPE_set1 %xdefine _ASN1_UNIVERSALSTRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_free %xdefine _ASN1_UNIVERSALSTRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_it %xdefine _ASN1_UNIVERSALSTRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_new %xdefine _ASN1_UTCTIME_adj _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_adj %xdefine _ASN1_UTCTIME_check _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_check %xdefine _ASN1_UTCTIME_free _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_free %xdefine _ASN1_UTCTIME_it _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_it %xdefine _ASN1_UTCTIME_new _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_new %xdefine _ASN1_UTCTIME_print _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_print %xdefine _ASN1_UTCTIME_set _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_set %xdefine _ASN1_UTCTIME_set_string _ %+ BORINGSSL_PREFIX %+ _ASN1_UTCTIME_set_string %xdefine _ASN1_UTF8STRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_free %xdefine _ASN1_UTF8STRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_it %xdefine _ASN1_UTF8STRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_new %xdefine _ASN1_VISIBLESTRING_free _ %+ BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_free %xdefine _ASN1_VISIBLESTRING_it _ %+ BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_it %xdefine _ASN1_VISIBLESTRING_new _ %+ BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_new %xdefine _ASN1_digest _ %+ BORINGSSL_PREFIX %+ _ASN1_digest %xdefine _ASN1_generate_v3 _ %+ BORINGSSL_PREFIX %+ _ASN1_generate_v3 %xdefine _ASN1_get_object _ %+ BORINGSSL_PREFIX %+ _ASN1_get_object %xdefine _ASN1_item_d2i _ %+ BORINGSSL_PREFIX %+ _ASN1_item_d2i %xdefine _ASN1_item_d2i_bio _ %+ BORINGSSL_PREFIX %+ _ASN1_item_d2i_bio %xdefine _ASN1_item_d2i_fp _ %+ BORINGSSL_PREFIX %+ _ASN1_item_d2i_fp %xdefine _ASN1_item_digest _ %+ BORINGSSL_PREFIX %+ _ASN1_item_digest %xdefine _ASN1_item_dup _ %+ BORINGSSL_PREFIX %+ _ASN1_item_dup %xdefine _ASN1_item_ex_d2i _ %+ BORINGSSL_PREFIX %+ _ASN1_item_ex_d2i %xdefine _ASN1_item_ex_free _ %+ BORINGSSL_PREFIX %+ _ASN1_item_ex_free %xdefine _ASN1_item_ex_i2d _ %+ BORINGSSL_PREFIX %+ _ASN1_item_ex_i2d %xdefine _ASN1_item_ex_new _ %+ BORINGSSL_PREFIX %+ _ASN1_item_ex_new %xdefine _ASN1_item_free _ %+ BORINGSSL_PREFIX %+ _ASN1_item_free %xdefine _ASN1_item_i2d _ %+ BORINGSSL_PREFIX %+ _ASN1_item_i2d %xdefine _ASN1_item_i2d_bio _ %+ BORINGSSL_PREFIX %+ _ASN1_item_i2d_bio %xdefine _ASN1_item_i2d_fp _ %+ BORINGSSL_PREFIX %+ _ASN1_item_i2d_fp %xdefine _ASN1_item_new _ %+ BORINGSSL_PREFIX %+ _ASN1_item_new %xdefine _ASN1_item_pack _ %+ BORINGSSL_PREFIX %+ _ASN1_item_pack %xdefine _ASN1_item_sign _ %+ BORINGSSL_PREFIX %+ _ASN1_item_sign %xdefine _ASN1_item_sign_ctx _ %+ BORINGSSL_PREFIX %+ _ASN1_item_sign_ctx %xdefine _ASN1_item_unpack _ %+ BORINGSSL_PREFIX %+ _ASN1_item_unpack %xdefine _ASN1_item_verify _ %+ BORINGSSL_PREFIX %+ _ASN1_item_verify %xdefine _ASN1_mbstring_copy _ %+ BORINGSSL_PREFIX %+ _ASN1_mbstring_copy %xdefine _ASN1_mbstring_ncopy _ %+ BORINGSSL_PREFIX %+ _ASN1_mbstring_ncopy %xdefine _ASN1_object_size _ %+ BORINGSSL_PREFIX %+ _ASN1_object_size %xdefine _ASN1_primitive_free _ %+ BORINGSSL_PREFIX %+ _ASN1_primitive_free %xdefine _ASN1_put_eoc _ %+ BORINGSSL_PREFIX %+ _ASN1_put_eoc %xdefine _ASN1_put_object _ %+ BORINGSSL_PREFIX %+ _ASN1_put_object %xdefine _ASN1_tag2bit _ %+ BORINGSSL_PREFIX %+ _ASN1_tag2bit %xdefine _ASN1_tag2str _ %+ BORINGSSL_PREFIX %+ _ASN1_tag2str %xdefine _ASN1_template_free _ %+ BORINGSSL_PREFIX %+ _ASN1_template_free %xdefine _AUTHORITY_INFO_ACCESS_free _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_free %xdefine _AUTHORITY_INFO_ACCESS_it _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_it %xdefine _AUTHORITY_INFO_ACCESS_new _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_new %xdefine _AUTHORITY_KEYID_free _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_free %xdefine _AUTHORITY_KEYID_it _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_it %xdefine _AUTHORITY_KEYID_new _ %+ BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_new %xdefine _BASIC_CONSTRAINTS_free _ %+ BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_free %xdefine _BASIC_CONSTRAINTS_it _ %+ BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_it %xdefine _BASIC_CONSTRAINTS_new _ %+ BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_new %xdefine _BCM_fips_186_2_prf _ %+ BORINGSSL_PREFIX %+ _BCM_fips_186_2_prf %xdefine _BCM_mldsa65_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key %xdefine _BCM_mldsa65_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key_external_entropy %xdefine _BCM_mldsa65_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_private_key %xdefine _BCM_mldsa65_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_public_key %xdefine _BCM_mldsa65_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_private_key %xdefine _BCM_mldsa65_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_public_key %xdefine _BCM_mldsa65_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_private_key_from_seed %xdefine _BCM_mldsa65_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_public_from_private %xdefine _BCM_mldsa65_sign _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_sign %xdefine _BCM_mldsa65_sign_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_sign_internal %xdefine _BCM_mldsa65_verify _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_verify %xdefine _BCM_mldsa65_verify_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa65_verify_internal %xdefine _BCM_mldsa87_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key %xdefine _BCM_mldsa87_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key_external_entropy %xdefine _BCM_mldsa87_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_private_key %xdefine _BCM_mldsa87_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_public_key %xdefine _BCM_mldsa87_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_private_key %xdefine _BCM_mldsa87_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_public_key %xdefine _BCM_mldsa87_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_private_key_from_seed %xdefine _BCM_mldsa87_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_public_from_private %xdefine _BCM_mldsa87_sign _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_sign %xdefine _BCM_mldsa87_sign_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_sign_internal %xdefine _BCM_mldsa87_verify _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_verify %xdefine _BCM_mldsa87_verify_internal _ %+ BORINGSSL_PREFIX %+ _BCM_mldsa87_verify_internal %xdefine _BCM_mlkem1024_decap _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_decap %xdefine _BCM_mlkem1024_encap _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_encap %xdefine _BCM_mlkem1024_encap_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_encap_external_entropy %xdefine _BCM_mlkem1024_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_generate_key %xdefine _BCM_mlkem1024_generate_key_external_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_generate_key_external_seed %xdefine _BCM_mlkem1024_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_marshal_private_key %xdefine _BCM_mlkem1024_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_marshal_public_key %xdefine _BCM_mlkem1024_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_parse_private_key %xdefine _BCM_mlkem1024_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_parse_public_key %xdefine _BCM_mlkem1024_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_private_key_from_seed %xdefine _BCM_mlkem1024_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem1024_public_from_private %xdefine _BCM_mlkem768_decap _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_decap %xdefine _BCM_mlkem768_encap _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_encap %xdefine _BCM_mlkem768_encap_external_entropy _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_encap_external_entropy %xdefine _BCM_mlkem768_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_generate_key %xdefine _BCM_mlkem768_generate_key_external_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_generate_key_external_seed %xdefine _BCM_mlkem768_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_marshal_private_key %xdefine _BCM_mlkem768_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_marshal_public_key %xdefine _BCM_mlkem768_parse_private_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_parse_private_key %xdefine _BCM_mlkem768_parse_public_key _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_parse_public_key %xdefine _BCM_mlkem768_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_private_key_from_seed %xdefine _BCM_mlkem768_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_mlkem768_public_from_private %xdefine _BCM_rand_bytes _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes %xdefine _BCM_rand_bytes_hwrng _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes_hwrng %xdefine _BCM_rand_bytes_with_additional_data _ %+ BORINGSSL_PREFIX %+ _BCM_rand_bytes_with_additional_data %xdefine _BCM_sha1_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha1_final %xdefine _BCM_sha1_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha1_init %xdefine _BCM_sha1_transform _ %+ BORINGSSL_PREFIX %+ _BCM_sha1_transform %xdefine _BCM_sha1_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha1_update %xdefine _BCM_sha224_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha224_final %xdefine _BCM_sha224_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha224_init %xdefine _BCM_sha224_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha224_update %xdefine _BCM_sha256_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha256_final %xdefine _BCM_sha256_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha256_init %xdefine _BCM_sha256_transform _ %+ BORINGSSL_PREFIX %+ _BCM_sha256_transform %xdefine _BCM_sha256_transform_blocks _ %+ BORINGSSL_PREFIX %+ _BCM_sha256_transform_blocks %xdefine _BCM_sha256_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha256_update %xdefine _BCM_sha384_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha384_final %xdefine _BCM_sha384_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha384_init %xdefine _BCM_sha384_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha384_update %xdefine _BCM_sha512_256_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_256_final %xdefine _BCM_sha512_256_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_256_init %xdefine _BCM_sha512_256_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_256_update %xdefine _BCM_sha512_final _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_final %xdefine _BCM_sha512_init _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_init %xdefine _BCM_sha512_transform _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_transform %xdefine _BCM_sha512_update _ %+ BORINGSSL_PREFIX %+ _BCM_sha512_update %xdefine _BCM_slhdsa_sha2_128s_generate_key _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_generate_key %xdefine _BCM_slhdsa_sha2_128s_generate_key_from_seed _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_generate_key_from_seed %xdefine _BCM_slhdsa_sha2_128s_prehash_sign _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_prehash_sign %xdefine _BCM_slhdsa_sha2_128s_prehash_verify _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_prehash_verify %xdefine _BCM_slhdsa_sha2_128s_public_from_private _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_public_from_private %xdefine _BCM_slhdsa_sha2_128s_sign _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_sign %xdefine _BCM_slhdsa_sha2_128s_sign_internal _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_sign_internal %xdefine _BCM_slhdsa_sha2_128s_verify _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_verify %xdefine _BCM_slhdsa_sha2_128s_verify_internal _ %+ BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_verify_internal %xdefine _BIO_append_filename _ %+ BORINGSSL_PREFIX %+ _BIO_append_filename %xdefine _BIO_callback_ctrl _ %+ BORINGSSL_PREFIX %+ _BIO_callback_ctrl %xdefine _BIO_clear_flags _ %+ BORINGSSL_PREFIX %+ _BIO_clear_flags %xdefine _BIO_clear_retry_flags _ %+ BORINGSSL_PREFIX %+ _BIO_clear_retry_flags %xdefine _BIO_copy_next_retry _ %+ BORINGSSL_PREFIX %+ _BIO_copy_next_retry %xdefine _BIO_ctrl _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl %xdefine _BIO_ctrl_get_read_request _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_get_read_request %xdefine _BIO_ctrl_get_write_guarantee _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_get_write_guarantee %xdefine _BIO_ctrl_pending _ %+ BORINGSSL_PREFIX %+ _BIO_ctrl_pending %xdefine _BIO_do_connect _ %+ BORINGSSL_PREFIX %+ _BIO_do_connect %xdefine _BIO_eof _ %+ BORINGSSL_PREFIX %+ _BIO_eof %xdefine _BIO_f_ssl _ %+ BORINGSSL_PREFIX %+ _BIO_f_ssl %xdefine _BIO_find_type _ %+ BORINGSSL_PREFIX %+ _BIO_find_type %xdefine _BIO_flush _ %+ BORINGSSL_PREFIX %+ _BIO_flush %xdefine _BIO_free _ %+ BORINGSSL_PREFIX %+ _BIO_free %xdefine _BIO_free_all _ %+ BORINGSSL_PREFIX %+ _BIO_free_all %xdefine _BIO_get_data _ %+ BORINGSSL_PREFIX %+ _BIO_get_data %xdefine _BIO_get_ex_data _ %+ BORINGSSL_PREFIX %+ _BIO_get_ex_data %xdefine _BIO_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _BIO_get_ex_new_index %xdefine _BIO_get_fd _ %+ BORINGSSL_PREFIX %+ _BIO_get_fd %xdefine _BIO_get_fp _ %+ BORINGSSL_PREFIX %+ _BIO_get_fp %xdefine _BIO_get_init _ %+ BORINGSSL_PREFIX %+ _BIO_get_init %xdefine _BIO_get_mem_data _ %+ BORINGSSL_PREFIX %+ _BIO_get_mem_data %xdefine _BIO_get_mem_ptr _ %+ BORINGSSL_PREFIX %+ _BIO_get_mem_ptr %xdefine _BIO_get_new_index _ %+ BORINGSSL_PREFIX %+ _BIO_get_new_index %xdefine _BIO_get_retry_flags _ %+ BORINGSSL_PREFIX %+ _BIO_get_retry_flags %xdefine _BIO_get_retry_reason _ %+ BORINGSSL_PREFIX %+ _BIO_get_retry_reason %xdefine _BIO_get_shutdown _ %+ BORINGSSL_PREFIX %+ _BIO_get_shutdown %xdefine _BIO_gets _ %+ BORINGSSL_PREFIX %+ _BIO_gets %xdefine _BIO_hexdump _ %+ BORINGSSL_PREFIX %+ _BIO_hexdump %xdefine _BIO_indent _ %+ BORINGSSL_PREFIX %+ _BIO_indent %xdefine _BIO_int_ctrl _ %+ BORINGSSL_PREFIX %+ _BIO_int_ctrl %xdefine _BIO_mem_contents _ %+ BORINGSSL_PREFIX %+ _BIO_mem_contents %xdefine _BIO_meth_free _ %+ BORINGSSL_PREFIX %+ _BIO_meth_free %xdefine _BIO_meth_new _ %+ BORINGSSL_PREFIX %+ _BIO_meth_new %xdefine _BIO_meth_set_create _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_create %xdefine _BIO_meth_set_ctrl _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_ctrl %xdefine _BIO_meth_set_destroy _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_destroy %xdefine _BIO_meth_set_gets _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_gets %xdefine _BIO_meth_set_puts _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_puts %xdefine _BIO_meth_set_read _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_read %xdefine _BIO_meth_set_write _ %+ BORINGSSL_PREFIX %+ _BIO_meth_set_write %xdefine _BIO_method_type _ %+ BORINGSSL_PREFIX %+ _BIO_method_type %xdefine _BIO_new _ %+ BORINGSSL_PREFIX %+ _BIO_new %xdefine _BIO_new_bio_pair _ %+ BORINGSSL_PREFIX %+ _BIO_new_bio_pair %xdefine _BIO_new_connect _ %+ BORINGSSL_PREFIX %+ _BIO_new_connect %xdefine _BIO_new_fd _ %+ BORINGSSL_PREFIX %+ _BIO_new_fd %xdefine _BIO_new_file _ %+ BORINGSSL_PREFIX %+ _BIO_new_file %xdefine _BIO_new_fp _ %+ BORINGSSL_PREFIX %+ _BIO_new_fp %xdefine _BIO_new_mem_buf _ %+ BORINGSSL_PREFIX %+ _BIO_new_mem_buf %xdefine _BIO_new_socket _ %+ BORINGSSL_PREFIX %+ _BIO_new_socket %xdefine _BIO_next _ %+ BORINGSSL_PREFIX %+ _BIO_next %xdefine _BIO_number_read _ %+ BORINGSSL_PREFIX %+ _BIO_number_read %xdefine _BIO_number_written _ %+ BORINGSSL_PREFIX %+ _BIO_number_written %xdefine _BIO_pending _ %+ BORINGSSL_PREFIX %+ _BIO_pending %xdefine _BIO_pop _ %+ BORINGSSL_PREFIX %+ _BIO_pop %xdefine _BIO_printf _ %+ BORINGSSL_PREFIX %+ _BIO_printf %xdefine _BIO_ptr_ctrl _ %+ BORINGSSL_PREFIX %+ _BIO_ptr_ctrl %xdefine _BIO_push _ %+ BORINGSSL_PREFIX %+ _BIO_push %xdefine _BIO_puts _ %+ BORINGSSL_PREFIX %+ _BIO_puts %xdefine _BIO_read _ %+ BORINGSSL_PREFIX %+ _BIO_read %xdefine _BIO_read_asn1 _ %+ BORINGSSL_PREFIX %+ _BIO_read_asn1 %xdefine _BIO_read_filename _ %+ BORINGSSL_PREFIX %+ _BIO_read_filename %xdefine _BIO_reset _ %+ BORINGSSL_PREFIX %+ _BIO_reset %xdefine _BIO_rw_filename _ %+ BORINGSSL_PREFIX %+ _BIO_rw_filename %xdefine _BIO_s_connect _ %+ BORINGSSL_PREFIX %+ _BIO_s_connect %xdefine _BIO_s_fd _ %+ BORINGSSL_PREFIX %+ _BIO_s_fd %xdefine _BIO_s_file _ %+ BORINGSSL_PREFIX %+ _BIO_s_file %xdefine _BIO_s_mem _ %+ BORINGSSL_PREFIX %+ _BIO_s_mem %xdefine _BIO_s_socket _ %+ BORINGSSL_PREFIX %+ _BIO_s_socket %xdefine _BIO_seek _ %+ BORINGSSL_PREFIX %+ _BIO_seek %xdefine _BIO_set_close _ %+ BORINGSSL_PREFIX %+ _BIO_set_close %xdefine _BIO_set_conn_hostname _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_hostname %xdefine _BIO_set_conn_int_port _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_int_port %xdefine _BIO_set_conn_port _ %+ BORINGSSL_PREFIX %+ _BIO_set_conn_port %xdefine _BIO_set_data _ %+ BORINGSSL_PREFIX %+ _BIO_set_data %xdefine _BIO_set_ex_data _ %+ BORINGSSL_PREFIX %+ _BIO_set_ex_data %xdefine _BIO_set_fd _ %+ BORINGSSL_PREFIX %+ _BIO_set_fd %xdefine _BIO_set_flags _ %+ BORINGSSL_PREFIX %+ _BIO_set_flags %xdefine _BIO_set_fp _ %+ BORINGSSL_PREFIX %+ _BIO_set_fp %xdefine _BIO_set_init _ %+ BORINGSSL_PREFIX %+ _BIO_set_init %xdefine _BIO_set_mem_buf _ %+ BORINGSSL_PREFIX %+ _BIO_set_mem_buf %xdefine _BIO_set_mem_eof_return _ %+ BORINGSSL_PREFIX %+ _BIO_set_mem_eof_return %xdefine _BIO_set_nbio _ %+ BORINGSSL_PREFIX %+ _BIO_set_nbio %xdefine _BIO_set_retry_read _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_read %xdefine _BIO_set_retry_reason _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine _BIO_set_retry_special _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_special %xdefine _BIO_set_retry_write _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_write %xdefine _BIO_set_shutdown _ %+ BORINGSSL_PREFIX %+ _BIO_set_shutdown %xdefine _BIO_set_ssl _ %+ BORINGSSL_PREFIX %+ _BIO_set_ssl %xdefine _BIO_set_write_buffer_size _ %+ BORINGSSL_PREFIX %+ _BIO_set_write_buffer_size %xdefine _BIO_should_io_special _ %+ BORINGSSL_PREFIX %+ _BIO_should_io_special %xdefine _BIO_should_read _ %+ BORINGSSL_PREFIX %+ _BIO_should_read %xdefine _BIO_should_retry _ %+ BORINGSSL_PREFIX %+ _BIO_should_retry %xdefine _BIO_should_write _ %+ BORINGSSL_PREFIX %+ _BIO_should_write %xdefine _BIO_shutdown_wr _ %+ BORINGSSL_PREFIX %+ _BIO_shutdown_wr %xdefine _BIO_snprintf _ %+ BORINGSSL_PREFIX %+ _BIO_snprintf %xdefine _BIO_tell _ %+ BORINGSSL_PREFIX %+ _BIO_tell %xdefine _BIO_test_flags _ %+ BORINGSSL_PREFIX %+ _BIO_test_flags %xdefine _BIO_up_ref _ %+ BORINGSSL_PREFIX %+ _BIO_up_ref %xdefine _BIO_vfree _ %+ BORINGSSL_PREFIX %+ _BIO_vfree %xdefine _BIO_vsnprintf _ %+ BORINGSSL_PREFIX %+ _BIO_vsnprintf %xdefine _BIO_wpending _ %+ BORINGSSL_PREFIX %+ _BIO_wpending %xdefine _BIO_write _ %+ BORINGSSL_PREFIX %+ _BIO_write %xdefine _BIO_write_all _ %+ BORINGSSL_PREFIX %+ _BIO_write_all %xdefine _BIO_write_filename _ %+ BORINGSSL_PREFIX %+ _BIO_write_filename %xdefine _BLAKE2B256 _ %+ BORINGSSL_PREFIX %+ _BLAKE2B256 %xdefine _BLAKE2B256_Final _ %+ BORINGSSL_PREFIX %+ _BLAKE2B256_Final %xdefine _BLAKE2B256_Init _ %+ BORINGSSL_PREFIX %+ _BLAKE2B256_Init %xdefine _BLAKE2B256_Update _ %+ BORINGSSL_PREFIX %+ _BLAKE2B256_Update %xdefine _BN_BLINDING_convert _ %+ BORINGSSL_PREFIX %+ _BN_BLINDING_convert %xdefine _BN_BLINDING_free _ %+ BORINGSSL_PREFIX %+ _BN_BLINDING_free %xdefine _BN_BLINDING_invalidate _ %+ BORINGSSL_PREFIX %+ _BN_BLINDING_invalidate %xdefine _BN_BLINDING_invert _ %+ BORINGSSL_PREFIX %+ _BN_BLINDING_invert %xdefine _BN_BLINDING_new _ %+ BORINGSSL_PREFIX %+ _BN_BLINDING_new %xdefine _BN_CTX_end _ %+ BORINGSSL_PREFIX %+ _BN_CTX_end %xdefine _BN_CTX_free _ %+ BORINGSSL_PREFIX %+ _BN_CTX_free %xdefine _BN_CTX_get _ %+ BORINGSSL_PREFIX %+ _BN_CTX_get %xdefine _BN_CTX_new _ %+ BORINGSSL_PREFIX %+ _BN_CTX_new %xdefine _BN_CTX_start _ %+ BORINGSSL_PREFIX %+ _BN_CTX_start %xdefine _BN_GENCB_call _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_call %xdefine _BN_GENCB_free _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_free %xdefine _BN_GENCB_get_arg _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_get_arg %xdefine _BN_GENCB_new _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_new %xdefine _BN_GENCB_set _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_set %xdefine _BN_MONT_CTX_copy _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_copy %xdefine _BN_MONT_CTX_free _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_free %xdefine _BN_MONT_CTX_new _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_new %xdefine _BN_MONT_CTX_new_consttime _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_new_consttime %xdefine _BN_MONT_CTX_new_for_modulus _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_new_for_modulus %xdefine _BN_MONT_CTX_set _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_set %xdefine _BN_MONT_CTX_set_locked _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_set_locked %xdefine _BN_abs_is_word _ %+ BORINGSSL_PREFIX %+ _BN_abs_is_word %xdefine _BN_add _ %+ BORINGSSL_PREFIX %+ _BN_add %xdefine _BN_add_word _ %+ BORINGSSL_PREFIX %+ _BN_add_word %xdefine _BN_asc2bn _ %+ BORINGSSL_PREFIX %+ _BN_asc2bn %xdefine _BN_bin2bn _ %+ BORINGSSL_PREFIX %+ _BN_bin2bn %xdefine _BN_bn2bin _ %+ BORINGSSL_PREFIX %+ _BN_bn2bin %xdefine _BN_bn2bin_padded _ %+ BORINGSSL_PREFIX %+ _BN_bn2bin_padded %xdefine _BN_bn2binpad _ %+ BORINGSSL_PREFIX %+ _BN_bn2binpad %xdefine _BN_bn2cbb_padded _ %+ BORINGSSL_PREFIX %+ _BN_bn2cbb_padded %xdefine _BN_bn2dec _ %+ BORINGSSL_PREFIX %+ _BN_bn2dec %xdefine _BN_bn2hex _ %+ BORINGSSL_PREFIX %+ _BN_bn2hex %xdefine _BN_bn2le_padded _ %+ BORINGSSL_PREFIX %+ _BN_bn2le_padded %xdefine _BN_bn2lebinpad _ %+ BORINGSSL_PREFIX %+ _BN_bn2lebinpad %xdefine _BN_bn2mpi _ %+ BORINGSSL_PREFIX %+ _BN_bn2mpi %xdefine _BN_clear _ %+ BORINGSSL_PREFIX %+ _BN_clear %xdefine _BN_clear_bit _ %+ BORINGSSL_PREFIX %+ _BN_clear_bit %xdefine _BN_clear_free _ %+ BORINGSSL_PREFIX %+ _BN_clear_free %xdefine _BN_cmp _ %+ BORINGSSL_PREFIX %+ _BN_cmp %xdefine _BN_cmp_word _ %+ BORINGSSL_PREFIX %+ _BN_cmp_word %xdefine _BN_copy _ %+ BORINGSSL_PREFIX %+ _BN_copy %xdefine _BN_count_low_zero_bits _ %+ BORINGSSL_PREFIX %+ _BN_count_low_zero_bits %xdefine _BN_dec2bn _ %+ BORINGSSL_PREFIX %+ _BN_dec2bn %xdefine _BN_div _ %+ BORINGSSL_PREFIX %+ _BN_div %xdefine _BN_div_word _ %+ BORINGSSL_PREFIX %+ _BN_div_word %xdefine _BN_dup _ %+ BORINGSSL_PREFIX %+ _BN_dup %xdefine _BN_enhanced_miller_rabin_primality_test _ %+ BORINGSSL_PREFIX %+ _BN_enhanced_miller_rabin_primality_test %xdefine _BN_equal_consttime _ %+ BORINGSSL_PREFIX %+ _BN_equal_consttime %xdefine _BN_exp _ %+ BORINGSSL_PREFIX %+ _BN_exp %xdefine _BN_free _ %+ BORINGSSL_PREFIX %+ _BN_free %xdefine _BN_from_montgomery _ %+ BORINGSSL_PREFIX %+ _BN_from_montgomery %xdefine _BN_gcd _ %+ BORINGSSL_PREFIX %+ _BN_gcd %xdefine _BN_generate_prime_ex _ %+ BORINGSSL_PREFIX %+ _BN_generate_prime_ex %xdefine _BN_get_rfc3526_prime_1536 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_1536 %xdefine _BN_get_rfc3526_prime_2048 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_2048 %xdefine _BN_get_rfc3526_prime_3072 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_3072 %xdefine _BN_get_rfc3526_prime_4096 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_4096 %xdefine _BN_get_rfc3526_prime_6144 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_6144 %xdefine _BN_get_rfc3526_prime_8192 _ %+ BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_8192 %xdefine _BN_get_u64 _ %+ BORINGSSL_PREFIX %+ _BN_get_u64 %xdefine _BN_get_word _ %+ BORINGSSL_PREFIX %+ _BN_get_word %xdefine _BN_hex2bn _ %+ BORINGSSL_PREFIX %+ _BN_hex2bn %xdefine _BN_init _ %+ BORINGSSL_PREFIX %+ _BN_init %xdefine _BN_is_bit_set _ %+ BORINGSSL_PREFIX %+ _BN_is_bit_set %xdefine _BN_is_negative _ %+ BORINGSSL_PREFIX %+ _BN_is_negative %xdefine _BN_is_odd _ %+ BORINGSSL_PREFIX %+ _BN_is_odd %xdefine _BN_is_one _ %+ BORINGSSL_PREFIX %+ _BN_is_one %xdefine _BN_is_pow2 _ %+ BORINGSSL_PREFIX %+ _BN_is_pow2 %xdefine _BN_is_prime_ex _ %+ BORINGSSL_PREFIX %+ _BN_is_prime_ex %xdefine _BN_is_prime_fasttest_ex _ %+ BORINGSSL_PREFIX %+ _BN_is_prime_fasttest_ex %xdefine _BN_is_word _ %+ BORINGSSL_PREFIX %+ _BN_is_word %xdefine _BN_is_zero _ %+ BORINGSSL_PREFIX %+ _BN_is_zero %xdefine _BN_le2bn _ %+ BORINGSSL_PREFIX %+ _BN_le2bn %xdefine _BN_lebin2bn _ %+ BORINGSSL_PREFIX %+ _BN_lebin2bn %xdefine _BN_lshift _ %+ BORINGSSL_PREFIX %+ _BN_lshift %xdefine _BN_lshift1 _ %+ BORINGSSL_PREFIX %+ _BN_lshift1 %xdefine _BN_marshal_asn1 _ %+ BORINGSSL_PREFIX %+ _BN_marshal_asn1 %xdefine _BN_mask_bits _ %+ BORINGSSL_PREFIX %+ _BN_mask_bits %xdefine _BN_mod_add _ %+ BORINGSSL_PREFIX %+ _BN_mod_add %xdefine _BN_mod_add_quick _ %+ BORINGSSL_PREFIX %+ _BN_mod_add_quick %xdefine _BN_mod_exp _ %+ BORINGSSL_PREFIX %+ _BN_mod_exp %xdefine _BN_mod_exp2_mont _ %+ BORINGSSL_PREFIX %+ _BN_mod_exp2_mont %xdefine _BN_mod_exp_mont _ %+ BORINGSSL_PREFIX %+ _BN_mod_exp_mont %xdefine _BN_mod_exp_mont_consttime _ %+ BORINGSSL_PREFIX %+ _BN_mod_exp_mont_consttime %xdefine _BN_mod_exp_mont_word _ %+ BORINGSSL_PREFIX %+ _BN_mod_exp_mont_word %xdefine _BN_mod_inverse _ %+ BORINGSSL_PREFIX %+ _BN_mod_inverse %xdefine _BN_mod_inverse_blinded _ %+ BORINGSSL_PREFIX %+ _BN_mod_inverse_blinded %xdefine _BN_mod_inverse_odd _ %+ BORINGSSL_PREFIX %+ _BN_mod_inverse_odd %xdefine _BN_mod_lshift _ %+ BORINGSSL_PREFIX %+ _BN_mod_lshift %xdefine _BN_mod_lshift1 _ %+ BORINGSSL_PREFIX %+ _BN_mod_lshift1 %xdefine _BN_mod_lshift1_quick _ %+ BORINGSSL_PREFIX %+ _BN_mod_lshift1_quick %xdefine _BN_mod_lshift_quick _ %+ BORINGSSL_PREFIX %+ _BN_mod_lshift_quick %xdefine _BN_mod_mul _ %+ BORINGSSL_PREFIX %+ _BN_mod_mul %xdefine _BN_mod_mul_montgomery _ %+ BORINGSSL_PREFIX %+ _BN_mod_mul_montgomery %xdefine _BN_mod_pow2 _ %+ BORINGSSL_PREFIX %+ _BN_mod_pow2 %xdefine _BN_mod_sqr _ %+ BORINGSSL_PREFIX %+ _BN_mod_sqr %xdefine _BN_mod_sqrt _ %+ BORINGSSL_PREFIX %+ _BN_mod_sqrt %xdefine _BN_mod_sub _ %+ BORINGSSL_PREFIX %+ _BN_mod_sub %xdefine _BN_mod_sub_quick _ %+ BORINGSSL_PREFIX %+ _BN_mod_sub_quick %xdefine _BN_mod_word _ %+ BORINGSSL_PREFIX %+ _BN_mod_word %xdefine _BN_mpi2bn _ %+ BORINGSSL_PREFIX %+ _BN_mpi2bn %xdefine _BN_mul _ %+ BORINGSSL_PREFIX %+ _BN_mul %xdefine _BN_mul_word _ %+ BORINGSSL_PREFIX %+ _BN_mul_word %xdefine _BN_new _ %+ BORINGSSL_PREFIX %+ _BN_new %xdefine _BN_nnmod _ %+ BORINGSSL_PREFIX %+ _BN_nnmod %xdefine _BN_nnmod_pow2 _ %+ BORINGSSL_PREFIX %+ _BN_nnmod_pow2 %xdefine _BN_num_bits _ %+ BORINGSSL_PREFIX %+ _BN_num_bits %xdefine _BN_num_bits_word _ %+ BORINGSSL_PREFIX %+ _BN_num_bits_word %xdefine _BN_num_bytes _ %+ BORINGSSL_PREFIX %+ _BN_num_bytes %xdefine _BN_one _ %+ BORINGSSL_PREFIX %+ _BN_one %xdefine _BN_parse_asn1_unsigned _ %+ BORINGSSL_PREFIX %+ _BN_parse_asn1_unsigned %xdefine _BN_primality_test _ %+ BORINGSSL_PREFIX %+ _BN_primality_test %xdefine _BN_print _ %+ BORINGSSL_PREFIX %+ _BN_print %xdefine _BN_print_fp _ %+ BORINGSSL_PREFIX %+ _BN_print_fp %xdefine _BN_pseudo_rand _ %+ BORINGSSL_PREFIX %+ _BN_pseudo_rand %xdefine _BN_pseudo_rand_range _ %+ BORINGSSL_PREFIX %+ _BN_pseudo_rand_range %xdefine _BN_rand _ %+ BORINGSSL_PREFIX %+ _BN_rand %xdefine _BN_rand_range _ %+ BORINGSSL_PREFIX %+ _BN_rand_range %xdefine _BN_rand_range_ex _ %+ BORINGSSL_PREFIX %+ _BN_rand_range_ex %xdefine _BN_rshift _ %+ BORINGSSL_PREFIX %+ _BN_rshift %xdefine _BN_rshift1 _ %+ BORINGSSL_PREFIX %+ _BN_rshift1 %xdefine _BN_secure_new _ %+ BORINGSSL_PREFIX %+ _BN_secure_new %xdefine _BN_set_bit _ %+ BORINGSSL_PREFIX %+ _BN_set_bit %xdefine _BN_set_negative _ %+ BORINGSSL_PREFIX %+ _BN_set_negative %xdefine _BN_set_u64 _ %+ BORINGSSL_PREFIX %+ _BN_set_u64 %xdefine _BN_set_word _ %+ BORINGSSL_PREFIX %+ _BN_set_word %xdefine _BN_sqr _ %+ BORINGSSL_PREFIX %+ _BN_sqr %xdefine _BN_sqrt _ %+ BORINGSSL_PREFIX %+ _BN_sqrt %xdefine _BN_sub _ %+ BORINGSSL_PREFIX %+ _BN_sub %xdefine _BN_sub_word _ %+ BORINGSSL_PREFIX %+ _BN_sub_word %xdefine _BN_to_ASN1_ENUMERATED _ %+ BORINGSSL_PREFIX %+ _BN_to_ASN1_ENUMERATED %xdefine _BN_to_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _BN_to_ASN1_INTEGER %xdefine _BN_to_montgomery _ %+ BORINGSSL_PREFIX %+ _BN_to_montgomery %xdefine _BN_uadd _ %+ BORINGSSL_PREFIX %+ _BN_uadd %xdefine _BN_ucmp _ %+ BORINGSSL_PREFIX %+ _BN_ucmp %xdefine _BN_usub _ %+ BORINGSSL_PREFIX %+ _BN_usub %xdefine _BN_value_one _ %+ BORINGSSL_PREFIX %+ _BN_value_one %xdefine _BN_zero _ %+ BORINGSSL_PREFIX %+ _BN_zero %xdefine _BORINGSSL_keccak _ %+ BORINGSSL_PREFIX %+ _BORINGSSL_keccak %xdefine _BORINGSSL_keccak_absorb _ %+ BORINGSSL_PREFIX %+ _BORINGSSL_keccak_absorb %xdefine _BORINGSSL_keccak_init _ %+ BORINGSSL_PREFIX %+ _BORINGSSL_keccak_init %xdefine _BORINGSSL_keccak_squeeze _ %+ BORINGSSL_PREFIX %+ _BORINGSSL_keccak_squeeze %xdefine _BORINGSSL_self_test _ %+ BORINGSSL_PREFIX %+ _BORINGSSL_self_test %xdefine _BUF_MEM_append _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_append %xdefine _BUF_MEM_free _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_free %xdefine _BUF_MEM_grow _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_grow %xdefine _BUF_MEM_grow_clean _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_grow_clean %xdefine _BUF_MEM_new _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_new %xdefine _BUF_MEM_reserve _ %+ BORINGSSL_PREFIX %+ _BUF_MEM_reserve %xdefine _BUF_memdup _ %+ BORINGSSL_PREFIX %+ _BUF_memdup %xdefine _BUF_strdup _ %+ BORINGSSL_PREFIX %+ _BUF_strdup %xdefine _BUF_strlcat _ %+ BORINGSSL_PREFIX %+ _BUF_strlcat %xdefine _BUF_strlcpy _ %+ BORINGSSL_PREFIX %+ _BUF_strlcpy %xdefine _BUF_strndup _ %+ BORINGSSL_PREFIX %+ _BUF_strndup %xdefine _BUF_strnlen _ %+ BORINGSSL_PREFIX %+ _BUF_strnlen %xdefine _CBB_add_asn1 _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1 %xdefine _CBB_add_asn1_bool _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_bool %xdefine _CBB_add_asn1_int64 _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_int64 %xdefine _CBB_add_asn1_int64_with_tag _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_int64_with_tag %xdefine _CBB_add_asn1_octet_string _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_octet_string %xdefine _CBB_add_asn1_oid_from_text _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_oid_from_text %xdefine _CBB_add_asn1_uint64 _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_uint64 %xdefine _CBB_add_asn1_uint64_with_tag _ %+ BORINGSSL_PREFIX %+ _CBB_add_asn1_uint64_with_tag %xdefine _CBB_add_bytes _ %+ BORINGSSL_PREFIX %+ _CBB_add_bytes %xdefine _CBB_add_latin1 _ %+ BORINGSSL_PREFIX %+ _CBB_add_latin1 %xdefine _CBB_add_space _ %+ BORINGSSL_PREFIX %+ _CBB_add_space %xdefine _CBB_add_u16 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u16 %xdefine _CBB_add_u16_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBB_add_u16_length_prefixed %xdefine _CBB_add_u16le _ %+ BORINGSSL_PREFIX %+ _CBB_add_u16le %xdefine _CBB_add_u24 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u24 %xdefine _CBB_add_u24_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBB_add_u24_length_prefixed %xdefine _CBB_add_u32 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u32 %xdefine _CBB_add_u32le _ %+ BORINGSSL_PREFIX %+ _CBB_add_u32le %xdefine _CBB_add_u64 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u64 %xdefine _CBB_add_u64le _ %+ BORINGSSL_PREFIX %+ _CBB_add_u64le %xdefine _CBB_add_u8 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u8 %xdefine _CBB_add_u8_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBB_add_u8_length_prefixed %xdefine _CBB_add_ucs2_be _ %+ BORINGSSL_PREFIX %+ _CBB_add_ucs2_be %xdefine _CBB_add_utf32_be _ %+ BORINGSSL_PREFIX %+ _CBB_add_utf32_be %xdefine _CBB_add_utf8 _ %+ BORINGSSL_PREFIX %+ _CBB_add_utf8 %xdefine _CBB_add_zeros _ %+ BORINGSSL_PREFIX %+ _CBB_add_zeros %xdefine _CBB_cleanup _ %+ BORINGSSL_PREFIX %+ _CBB_cleanup %xdefine _CBB_data _ %+ BORINGSSL_PREFIX %+ _CBB_data %xdefine _CBB_did_write _ %+ BORINGSSL_PREFIX %+ _CBB_did_write %xdefine _CBB_discard_child _ %+ BORINGSSL_PREFIX %+ _CBB_discard_child %xdefine _CBB_finish _ %+ BORINGSSL_PREFIX %+ _CBB_finish %xdefine _CBB_finish_i2d _ %+ BORINGSSL_PREFIX %+ _CBB_finish_i2d %xdefine _CBB_flush _ %+ BORINGSSL_PREFIX %+ _CBB_flush %xdefine _CBB_flush_asn1_set_of _ %+ BORINGSSL_PREFIX %+ _CBB_flush_asn1_set_of %xdefine _CBB_get_utf8_len _ %+ BORINGSSL_PREFIX %+ _CBB_get_utf8_len %xdefine _CBB_init _ %+ BORINGSSL_PREFIX %+ _CBB_init %xdefine _CBB_init_fixed _ %+ BORINGSSL_PREFIX %+ _CBB_init_fixed %xdefine _CBB_len _ %+ BORINGSSL_PREFIX %+ _CBB_len %xdefine _CBB_reserve _ %+ BORINGSSL_PREFIX %+ _CBB_reserve %xdefine _CBB_zero _ %+ BORINGSSL_PREFIX %+ _CBB_zero %xdefine _CBS_asn1_ber_to_der _ %+ BORINGSSL_PREFIX %+ _CBS_asn1_ber_to_der %xdefine _CBS_asn1_bitstring_has_bit _ %+ BORINGSSL_PREFIX %+ _CBS_asn1_bitstring_has_bit %xdefine _CBS_asn1_oid_to_text _ %+ BORINGSSL_PREFIX %+ _CBS_asn1_oid_to_text %xdefine _CBS_contains_zero_byte _ %+ BORINGSSL_PREFIX %+ _CBS_contains_zero_byte %xdefine _CBS_copy_bytes _ %+ BORINGSSL_PREFIX %+ _CBS_copy_bytes %xdefine _CBS_data _ %+ BORINGSSL_PREFIX %+ _CBS_data %xdefine _CBS_get_any_asn1 _ %+ BORINGSSL_PREFIX %+ _CBS_get_any_asn1 %xdefine _CBS_get_any_asn1_element _ %+ BORINGSSL_PREFIX %+ _CBS_get_any_asn1_element %xdefine _CBS_get_any_ber_asn1_element _ %+ BORINGSSL_PREFIX %+ _CBS_get_any_ber_asn1_element %xdefine _CBS_get_asn1 _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1 %xdefine _CBS_get_asn1_bool _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1_bool %xdefine _CBS_get_asn1_element _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1_element %xdefine _CBS_get_asn1_implicit_string _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1_implicit_string %xdefine _CBS_get_asn1_int64 _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1_int64 %xdefine _CBS_get_asn1_uint64 _ %+ BORINGSSL_PREFIX %+ _CBS_get_asn1_uint64 %xdefine _CBS_get_bytes _ %+ BORINGSSL_PREFIX %+ _CBS_get_bytes %xdefine _CBS_get_last_u8 _ %+ BORINGSSL_PREFIX %+ _CBS_get_last_u8 %xdefine _CBS_get_latin1 _ %+ BORINGSSL_PREFIX %+ _CBS_get_latin1 %xdefine _CBS_get_optional_asn1 _ %+ BORINGSSL_PREFIX %+ _CBS_get_optional_asn1 %xdefine _CBS_get_optional_asn1_bool _ %+ BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_bool %xdefine _CBS_get_optional_asn1_octet_string _ %+ BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_octet_string %xdefine _CBS_get_optional_asn1_uint64 _ %+ BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_uint64 %xdefine _CBS_get_u16 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u16 %xdefine _CBS_get_u16_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBS_get_u16_length_prefixed %xdefine _CBS_get_u16le _ %+ BORINGSSL_PREFIX %+ _CBS_get_u16le %xdefine _CBS_get_u24 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u24 %xdefine _CBS_get_u24_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBS_get_u24_length_prefixed %xdefine _CBS_get_u32 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u32 %xdefine _CBS_get_u32le _ %+ BORINGSSL_PREFIX %+ _CBS_get_u32le %xdefine _CBS_get_u64 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u64 %xdefine _CBS_get_u64_decimal _ %+ BORINGSSL_PREFIX %+ _CBS_get_u64_decimal %xdefine _CBS_get_u64le _ %+ BORINGSSL_PREFIX %+ _CBS_get_u64le %xdefine _CBS_get_u8 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u8 %xdefine _CBS_get_u8_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBS_get_u8_length_prefixed %xdefine _CBS_get_ucs2_be _ %+ BORINGSSL_PREFIX %+ _CBS_get_ucs2_be %xdefine _CBS_get_until_first _ %+ BORINGSSL_PREFIX %+ _CBS_get_until_first %xdefine _CBS_get_utf32_be _ %+ BORINGSSL_PREFIX %+ _CBS_get_utf32_be %xdefine _CBS_get_utf8 _ %+ BORINGSSL_PREFIX %+ _CBS_get_utf8 %xdefine _CBS_init _ %+ BORINGSSL_PREFIX %+ _CBS_init %xdefine _CBS_is_unsigned_asn1_integer _ %+ BORINGSSL_PREFIX %+ _CBS_is_unsigned_asn1_integer %xdefine _CBS_is_valid_asn1_bitstring _ %+ BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_bitstring %xdefine _CBS_is_valid_asn1_integer _ %+ BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_integer %xdefine _CBS_is_valid_asn1_oid _ %+ BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_oid %xdefine _CBS_len _ %+ BORINGSSL_PREFIX %+ _CBS_len %xdefine _CBS_mem_equal _ %+ BORINGSSL_PREFIX %+ _CBS_mem_equal %xdefine _CBS_parse_generalized_time _ %+ BORINGSSL_PREFIX %+ _CBS_parse_generalized_time %xdefine _CBS_parse_utc_time _ %+ BORINGSSL_PREFIX %+ _CBS_parse_utc_time %xdefine _CBS_peek_asn1_tag _ %+ BORINGSSL_PREFIX %+ _CBS_peek_asn1_tag %xdefine _CBS_skip _ %+ BORINGSSL_PREFIX %+ _CBS_skip %xdefine _CBS_stow _ %+ BORINGSSL_PREFIX %+ _CBS_stow %xdefine _CBS_strdup _ %+ BORINGSSL_PREFIX %+ _CBS_strdup %xdefine _CERTIFICATEPOLICIES_free _ %+ BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_free %xdefine _CERTIFICATEPOLICIES_it _ %+ BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_it %xdefine _CERTIFICATEPOLICIES_new _ %+ BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_new %xdefine _CMAC_CTX_copy _ %+ BORINGSSL_PREFIX %+ _CMAC_CTX_copy %xdefine _CMAC_CTX_free _ %+ BORINGSSL_PREFIX %+ _CMAC_CTX_free %xdefine _CMAC_CTX_new _ %+ BORINGSSL_PREFIX %+ _CMAC_CTX_new %xdefine _CMAC_Final _ %+ BORINGSSL_PREFIX %+ _CMAC_Final %xdefine _CMAC_Init _ %+ BORINGSSL_PREFIX %+ _CMAC_Init %xdefine _CMAC_Reset _ %+ BORINGSSL_PREFIX %+ _CMAC_Reset %xdefine _CMAC_Update _ %+ BORINGSSL_PREFIX %+ _CMAC_Update %xdefine _CONF_VALUE_new _ %+ BORINGSSL_PREFIX %+ _CONF_VALUE_new %xdefine _CONF_modules_free _ %+ BORINGSSL_PREFIX %+ _CONF_modules_free %xdefine _CONF_modules_load_file _ %+ BORINGSSL_PREFIX %+ _CONF_modules_load_file %xdefine _CONF_parse_list _ %+ BORINGSSL_PREFIX %+ _CONF_parse_list %xdefine _CRL_DIST_POINTS_free _ %+ BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_free %xdefine _CRL_DIST_POINTS_it _ %+ BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_it %xdefine _CRL_DIST_POINTS_new _ %+ BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_new %xdefine _CRYPTO_BUFFER_POOL_free _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_POOL_free %xdefine _CRYPTO_BUFFER_POOL_new _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_POOL_new %xdefine _CRYPTO_BUFFER_alloc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_alloc %xdefine _CRYPTO_BUFFER_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_data %xdefine _CRYPTO_BUFFER_free _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_free %xdefine _CRYPTO_BUFFER_init_CBS _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_init_CBS %xdefine _CRYPTO_BUFFER_len _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_len %xdefine _CRYPTO_BUFFER_new _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new %xdefine _CRYPTO_BUFFER_new_from_CBS _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new_from_CBS %xdefine _CRYPTO_BUFFER_new_from_static_data_unsafe _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new_from_static_data_unsafe %xdefine _CRYPTO_BUFFER_up_ref _ %+ BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_up_ref %xdefine _CRYPTO_MUTEX_cleanup _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_cleanup %xdefine _CRYPTO_MUTEX_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_init %xdefine _CRYPTO_MUTEX_lock_read _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_lock_read %xdefine _CRYPTO_MUTEX_lock_write _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_lock_write %xdefine _CRYPTO_MUTEX_unlock_read _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_unlock_read %xdefine _CRYPTO_MUTEX_unlock_write _ %+ BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_unlock_write %xdefine _CRYPTO_POLYVAL_finish _ %+ BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_finish %xdefine _CRYPTO_POLYVAL_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_init %xdefine _CRYPTO_POLYVAL_update_blocks _ %+ BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_update_blocks %xdefine _CRYPTO_THREADID_current _ %+ BORINGSSL_PREFIX %+ _CRYPTO_THREADID_current %xdefine _CRYPTO_THREADID_set_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_callback %xdefine _CRYPTO_THREADID_set_numeric _ %+ BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_numeric %xdefine _CRYPTO_THREADID_set_pointer _ %+ BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_pointer %xdefine _CRYPTO_atomic_compare_exchange_weak_u32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_atomic_compare_exchange_weak_u32 %xdefine _CRYPTO_atomic_load_u32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_atomic_load_u32 %xdefine _CRYPTO_atomic_store_u32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_atomic_store_u32 %xdefine _CRYPTO_cbc128_decrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cbc128_decrypt %xdefine _CRYPTO_cbc128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cbc128_encrypt %xdefine _CRYPTO_cfb128_1_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cfb128_1_encrypt %xdefine _CRYPTO_cfb128_8_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cfb128_8_encrypt %xdefine _CRYPTO_cfb128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cfb128_encrypt %xdefine _CRYPTO_chacha_20 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_chacha_20 %xdefine _CRYPTO_cleanup_all_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cleanup_all_ex_data %xdefine _CRYPTO_cpu_avoid_zmm_registers _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cpu_avoid_zmm_registers %xdefine _CRYPTO_cpu_perf_is_like_silvermont _ %+ BORINGSSL_PREFIX %+ _CRYPTO_cpu_perf_is_like_silvermont %xdefine _CRYPTO_ctr128_encrypt_ctr32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 %xdefine _CRYPTO_fips_186_2_prf _ %+ BORINGSSL_PREFIX %+ _CRYPTO_fips_186_2_prf %xdefine _CRYPTO_fork_detect_force_madv_wipeonfork_for_testing _ %+ BORINGSSL_PREFIX %+ _CRYPTO_fork_detect_force_madv_wipeonfork_for_testing %xdefine _CRYPTO_free _ %+ BORINGSSL_PREFIX %+ _CRYPTO_free %xdefine _CRYPTO_free_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_free_ex_data %xdefine _CRYPTO_gcm128_aad _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_aad %xdefine _CRYPTO_gcm128_decrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_decrypt %xdefine _CRYPTO_gcm128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_encrypt %xdefine _CRYPTO_gcm128_finish _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_finish %xdefine _CRYPTO_gcm128_init_aes_key _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_init_aes_key %xdefine _CRYPTO_gcm128_init_ctx _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_init_ctx %xdefine _CRYPTO_gcm128_tag _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_tag %xdefine _CRYPTO_get_dynlock_create_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_create_callback %xdefine _CRYPTO_get_dynlock_destroy_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_destroy_callback %xdefine _CRYPTO_get_dynlock_lock_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_lock_callback %xdefine _CRYPTO_get_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_ex_data %xdefine _CRYPTO_get_ex_new_index_ex _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_ex_new_index_ex %xdefine _CRYPTO_get_fork_generation _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_fork_generation %xdefine _CRYPTO_get_lock_name _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_lock_name %xdefine _CRYPTO_get_locking_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_locking_callback %xdefine _CRYPTO_get_stderr _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_stderr %xdefine _CRYPTO_get_thread_local _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_thread_local %xdefine _CRYPTO_ghash_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ghash_init %xdefine _CRYPTO_has_asm _ %+ BORINGSSL_PREFIX %+ _CRYPTO_has_asm %xdefine _CRYPTO_hchacha20 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_hchacha20 %xdefine _CRYPTO_init_sysrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_init_sysrand %xdefine _CRYPTO_is_ADX_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ADX_capable %xdefine _CRYPTO_is_AESNI_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AESNI_capable %xdefine _CRYPTO_is_ARMv8_AES_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_AES_capable %xdefine _CRYPTO_is_ARMv8_PMULL_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_PMULL_capable %xdefine _CRYPTO_is_ARMv8_SHA1_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA1_capable %xdefine _CRYPTO_is_ARMv8_SHA256_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA256_capable %xdefine _CRYPTO_is_ARMv8_SHA512_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA512_capable %xdefine _CRYPTO_is_AVX2_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX2_capable %xdefine _CRYPTO_is_AVX512BW_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512BW_capable %xdefine _CRYPTO_is_AVX512VL_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512VL_capable %xdefine _CRYPTO_is_AVX_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_AVX_capable %xdefine _CRYPTO_is_BMI1_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_BMI1_capable %xdefine _CRYPTO_is_BMI2_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_BMI2_capable %xdefine _CRYPTO_is_FXSR_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_FXSR_capable %xdefine _CRYPTO_is_MOVBE_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_MOVBE_capable %xdefine _CRYPTO_is_NEON_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_NEON_capable %xdefine _CRYPTO_is_PCLMUL_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_PCLMUL_capable %xdefine _CRYPTO_is_RDRAND_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_RDRAND_capable %xdefine _CRYPTO_is_SSE4_1_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_SSE4_1_capable %xdefine _CRYPTO_is_SSSE3_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_SSSE3_capable %xdefine _CRYPTO_is_VAES_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_VAES_capable %xdefine _CRYPTO_is_VPCLMULQDQ_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_VPCLMULQDQ_capable %xdefine _CRYPTO_is_confidential_build _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine _CRYPTO_is_intel_cpu _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_intel_cpu %xdefine _CRYPTO_is_x86_SHA_capable _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_x86_SHA_capable %xdefine _CRYPTO_library_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_library_init %xdefine _CRYPTO_malloc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_malloc %xdefine _CRYPTO_malloc_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_malloc_init %xdefine _CRYPTO_memcmp _ %+ BORINGSSL_PREFIX %+ _CRYPTO_memcmp %xdefine _CRYPTO_new_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_new_ex_data %xdefine _CRYPTO_num_locks _ %+ BORINGSSL_PREFIX %+ _CRYPTO_num_locks %xdefine _CRYPTO_ofb128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ofb128_encrypt %xdefine _CRYPTO_once _ %+ BORINGSSL_PREFIX %+ _CRYPTO_once %xdefine _CRYPTO_poly1305_finish _ %+ BORINGSSL_PREFIX %+ _CRYPTO_poly1305_finish %xdefine _CRYPTO_poly1305_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_poly1305_init %xdefine _CRYPTO_poly1305_update _ %+ BORINGSSL_PREFIX %+ _CRYPTO_poly1305_update %xdefine _CRYPTO_pre_sandbox_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_pre_sandbox_init %xdefine _CRYPTO_rdrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_rdrand %xdefine _CRYPTO_rdrand_multiple8_buf _ %+ BORINGSSL_PREFIX %+ _CRYPTO_rdrand_multiple8_buf %xdefine _CRYPTO_realloc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_realloc %xdefine _CRYPTO_refcount_dec_and_test_zero _ %+ BORINGSSL_PREFIX %+ _CRYPTO_refcount_dec_and_test_zero %xdefine _CRYPTO_refcount_inc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_refcount_inc %xdefine _CRYPTO_secure_malloc_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_secure_malloc_init %xdefine _CRYPTO_secure_malloc_initialized _ %+ BORINGSSL_PREFIX %+ _CRYPTO_secure_malloc_initialized %xdefine _CRYPTO_secure_used _ %+ BORINGSSL_PREFIX %+ _CRYPTO_secure_used %xdefine _CRYPTO_set_add_lock_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_add_lock_callback %xdefine _CRYPTO_set_dynlock_create_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_create_callback %xdefine _CRYPTO_set_dynlock_destroy_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_destroy_callback %xdefine _CRYPTO_set_dynlock_lock_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_lock_callback %xdefine _CRYPTO_set_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_ex_data %xdefine _CRYPTO_set_id_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_id_callback %xdefine _CRYPTO_set_locking_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_locking_callback %xdefine _CRYPTO_set_thread_local _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_thread_local %xdefine _CRYPTO_sysrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand %xdefine _CRYPTO_sysrand_for_seed _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand_for_seed %xdefine _CRYPTO_sysrand_if_available _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand_if_available %xdefine _CRYPTO_tls13_hkdf_expand_label _ %+ BORINGSSL_PREFIX %+ _CRYPTO_tls13_hkdf_expand_label %xdefine _CRYPTO_tls1_prf _ %+ BORINGSSL_PREFIX %+ _CRYPTO_tls1_prf %xdefine _CRYPTO_xor16 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_xor16 %xdefine _CTR_DRBG_clear _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_clear %xdefine _CTR_DRBG_free _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_free %xdefine _CTR_DRBG_generate _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_generate %xdefine _CTR_DRBG_init _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_init %xdefine _CTR_DRBG_new _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_new %xdefine _CTR_DRBG_reseed _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_reseed %xdefine _ChaCha20_ctr32_avx2 _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_avx2 %xdefine _ChaCha20_ctr32_avx2_capable _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_avx2_capable %xdefine _ChaCha20_ctr32_neon _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_neon %xdefine _ChaCha20_ctr32_neon_capable _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_neon_capable %xdefine _ChaCha20_ctr32_nohw _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_nohw %xdefine _ChaCha20_ctr32_ssse3 _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3 %xdefine _ChaCha20_ctr32_ssse3_4x _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_4x %xdefine _ChaCha20_ctr32_ssse3_4x_capable _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_4x_capable %xdefine _ChaCha20_ctr32_ssse3_capable _ %+ BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_capable %xdefine _DES_decrypt3 _ %+ BORINGSSL_PREFIX %+ _DES_decrypt3 %xdefine _DES_ecb3_encrypt _ %+ BORINGSSL_PREFIX %+ _DES_ecb3_encrypt %xdefine _DES_ecb3_encrypt_ex _ %+ BORINGSSL_PREFIX %+ _DES_ecb3_encrypt_ex %xdefine _DES_ecb_encrypt _ %+ BORINGSSL_PREFIX %+ _DES_ecb_encrypt %xdefine _DES_ecb_encrypt_ex _ %+ BORINGSSL_PREFIX %+ _DES_ecb_encrypt_ex %xdefine _DES_ede2_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _DES_ede2_cbc_encrypt %xdefine _DES_ede3_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _DES_ede3_cbc_encrypt %xdefine _DES_ede3_cbc_encrypt_ex _ %+ BORINGSSL_PREFIX %+ _DES_ede3_cbc_encrypt_ex %xdefine _DES_encrypt3 _ %+ BORINGSSL_PREFIX %+ _DES_encrypt3 %xdefine _DES_ncbc_encrypt _ %+ BORINGSSL_PREFIX %+ _DES_ncbc_encrypt %xdefine _DES_ncbc_encrypt_ex _ %+ BORINGSSL_PREFIX %+ _DES_ncbc_encrypt_ex %xdefine _DES_set_key _ %+ BORINGSSL_PREFIX %+ _DES_set_key %xdefine _DES_set_key_ex _ %+ BORINGSSL_PREFIX %+ _DES_set_key_ex %xdefine _DES_set_key_unchecked _ %+ BORINGSSL_PREFIX %+ _DES_set_key_unchecked %xdefine _DES_set_odd_parity _ %+ BORINGSSL_PREFIX %+ _DES_set_odd_parity %xdefine _DH_bits _ %+ BORINGSSL_PREFIX %+ _DH_bits %xdefine _DH_check _ %+ BORINGSSL_PREFIX %+ _DH_check %xdefine _DH_check_pub_key _ %+ BORINGSSL_PREFIX %+ _DH_check_pub_key %xdefine _DH_compute_key _ %+ BORINGSSL_PREFIX %+ _DH_compute_key %xdefine _DH_compute_key_hashed _ %+ BORINGSSL_PREFIX %+ _DH_compute_key_hashed %xdefine _DH_compute_key_padded _ %+ BORINGSSL_PREFIX %+ _DH_compute_key_padded %xdefine _DH_free _ %+ BORINGSSL_PREFIX %+ _DH_free %xdefine _DH_generate_key _ %+ BORINGSSL_PREFIX %+ _DH_generate_key %xdefine _DH_generate_parameters_ex _ %+ BORINGSSL_PREFIX %+ _DH_generate_parameters_ex %xdefine _DH_get0_g _ %+ BORINGSSL_PREFIX %+ _DH_get0_g %xdefine _DH_get0_key _ %+ BORINGSSL_PREFIX %+ _DH_get0_key %xdefine _DH_get0_p _ %+ BORINGSSL_PREFIX %+ _DH_get0_p %xdefine _DH_get0_pqg _ %+ BORINGSSL_PREFIX %+ _DH_get0_pqg %xdefine _DH_get0_priv_key _ %+ BORINGSSL_PREFIX %+ _DH_get0_priv_key %xdefine _DH_get0_pub_key _ %+ BORINGSSL_PREFIX %+ _DH_get0_pub_key %xdefine _DH_get0_q _ %+ BORINGSSL_PREFIX %+ _DH_get0_q %xdefine _DH_get_rfc7919_2048 _ %+ BORINGSSL_PREFIX %+ _DH_get_rfc7919_2048 %xdefine _DH_marshal_parameters _ %+ BORINGSSL_PREFIX %+ _DH_marshal_parameters %xdefine _DH_new _ %+ BORINGSSL_PREFIX %+ _DH_new %xdefine _DH_num_bits _ %+ BORINGSSL_PREFIX %+ _DH_num_bits %xdefine _DH_parse_parameters _ %+ BORINGSSL_PREFIX %+ _DH_parse_parameters %xdefine _DH_set0_key _ %+ BORINGSSL_PREFIX %+ _DH_set0_key %xdefine _DH_set0_pqg _ %+ BORINGSSL_PREFIX %+ _DH_set0_pqg %xdefine _DH_set_length _ %+ BORINGSSL_PREFIX %+ _DH_set_length %xdefine _DH_size _ %+ BORINGSSL_PREFIX %+ _DH_size %xdefine _DH_up_ref _ %+ BORINGSSL_PREFIX %+ _DH_up_ref %xdefine _DHparams_dup _ %+ BORINGSSL_PREFIX %+ _DHparams_dup %xdefine _DIRECTORYSTRING_free _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_free %xdefine _DIRECTORYSTRING_it _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_it %xdefine _DIRECTORYSTRING_new _ %+ BORINGSSL_PREFIX %+ _DIRECTORYSTRING_new %xdefine _DISPLAYTEXT_free _ %+ BORINGSSL_PREFIX %+ _DISPLAYTEXT_free %xdefine _DISPLAYTEXT_it _ %+ BORINGSSL_PREFIX %+ _DISPLAYTEXT_it %xdefine _DISPLAYTEXT_new _ %+ BORINGSSL_PREFIX %+ _DISPLAYTEXT_new %xdefine _DIST_POINT_NAME_free _ %+ BORINGSSL_PREFIX %+ _DIST_POINT_NAME_free %xdefine _DIST_POINT_NAME_new _ %+ BORINGSSL_PREFIX %+ _DIST_POINT_NAME_new %xdefine _DIST_POINT_free _ %+ BORINGSSL_PREFIX %+ _DIST_POINT_free %xdefine _DIST_POINT_new _ %+ BORINGSSL_PREFIX %+ _DIST_POINT_new %xdefine _DIST_POINT_set_dpname _ %+ BORINGSSL_PREFIX %+ _DIST_POINT_set_dpname %xdefine _DSA_SIG_free _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_free %xdefine _DSA_SIG_get0 _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_get0 %xdefine _DSA_SIG_marshal _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_marshal %xdefine _DSA_SIG_new _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_new %xdefine _DSA_SIG_parse _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_parse %xdefine _DSA_SIG_set0 _ %+ BORINGSSL_PREFIX %+ _DSA_SIG_set0 %xdefine _DSA_bits _ %+ BORINGSSL_PREFIX %+ _DSA_bits %xdefine _DSA_check_signature _ %+ BORINGSSL_PREFIX %+ _DSA_check_signature %xdefine _DSA_do_check_signature _ %+ BORINGSSL_PREFIX %+ _DSA_do_check_signature %xdefine _DSA_do_sign _ %+ BORINGSSL_PREFIX %+ _DSA_do_sign %xdefine _DSA_do_verify _ %+ BORINGSSL_PREFIX %+ _DSA_do_verify %xdefine _DSA_dup_DH _ %+ BORINGSSL_PREFIX %+ _DSA_dup_DH %xdefine _DSA_free _ %+ BORINGSSL_PREFIX %+ _DSA_free %xdefine _DSA_generate_key _ %+ BORINGSSL_PREFIX %+ _DSA_generate_key %xdefine _DSA_generate_parameters_ex _ %+ BORINGSSL_PREFIX %+ _DSA_generate_parameters_ex %xdefine _DSA_get0_g _ %+ BORINGSSL_PREFIX %+ _DSA_get0_g %xdefine _DSA_get0_key _ %+ BORINGSSL_PREFIX %+ _DSA_get0_key %xdefine _DSA_get0_p _ %+ BORINGSSL_PREFIX %+ _DSA_get0_p %xdefine _DSA_get0_pqg _ %+ BORINGSSL_PREFIX %+ _DSA_get0_pqg %xdefine _DSA_get0_priv_key _ %+ BORINGSSL_PREFIX %+ _DSA_get0_priv_key %xdefine _DSA_get0_pub_key _ %+ BORINGSSL_PREFIX %+ _DSA_get0_pub_key %xdefine _DSA_get0_q _ %+ BORINGSSL_PREFIX %+ _DSA_get0_q %xdefine _DSA_get_ex_data _ %+ BORINGSSL_PREFIX %+ _DSA_get_ex_data %xdefine _DSA_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _DSA_get_ex_new_index %xdefine _DSA_marshal_parameters _ %+ BORINGSSL_PREFIX %+ _DSA_marshal_parameters %xdefine _DSA_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _DSA_marshal_private_key %xdefine _DSA_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _DSA_marshal_public_key %xdefine _DSA_new _ %+ BORINGSSL_PREFIX %+ _DSA_new %xdefine _DSA_parse_parameters _ %+ BORINGSSL_PREFIX %+ _DSA_parse_parameters %xdefine _DSA_parse_private_key _ %+ BORINGSSL_PREFIX %+ _DSA_parse_private_key %xdefine _DSA_parse_public_key _ %+ BORINGSSL_PREFIX %+ _DSA_parse_public_key %xdefine _DSA_set0_key _ %+ BORINGSSL_PREFIX %+ _DSA_set0_key %xdefine _DSA_set0_pqg _ %+ BORINGSSL_PREFIX %+ _DSA_set0_pqg %xdefine _DSA_set_ex_data _ %+ BORINGSSL_PREFIX %+ _DSA_set_ex_data %xdefine _DSA_sign _ %+ BORINGSSL_PREFIX %+ _DSA_sign %xdefine _DSA_size _ %+ BORINGSSL_PREFIX %+ _DSA_size %xdefine _DSA_up_ref _ %+ BORINGSSL_PREFIX %+ _DSA_up_ref %xdefine _DSA_verify _ %+ BORINGSSL_PREFIX %+ _DSA_verify %xdefine _DSAparams_dup _ %+ BORINGSSL_PREFIX %+ _DSAparams_dup %xdefine _DTLS_client_method _ %+ BORINGSSL_PREFIX %+ _DTLS_client_method %xdefine _DTLS_method _ %+ BORINGSSL_PREFIX %+ _DTLS_method %xdefine _DTLS_server_method _ %+ BORINGSSL_PREFIX %+ _DTLS_server_method %xdefine _DTLS_with_buffers_method _ %+ BORINGSSL_PREFIX %+ _DTLS_with_buffers_method %xdefine _DTLSv1_2_client_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_2_client_method %xdefine _DTLSv1_2_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_2_method %xdefine _DTLSv1_2_server_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_2_server_method %xdefine _DTLSv1_client_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_client_method %xdefine _DTLSv1_get_timeout _ %+ BORINGSSL_PREFIX %+ _DTLSv1_get_timeout %xdefine _DTLSv1_handle_timeout _ %+ BORINGSSL_PREFIX %+ _DTLSv1_handle_timeout %xdefine _DTLSv1_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_method %xdefine _DTLSv1_server_method _ %+ BORINGSSL_PREFIX %+ _DTLSv1_server_method %xdefine _DTLSv1_set_initial_timeout_duration _ %+ BORINGSSL_PREFIX %+ _DTLSv1_set_initial_timeout_duration %xdefine _ECDH_compute_key _ %+ BORINGSSL_PREFIX %+ _ECDH_compute_key %xdefine _ECDH_compute_key_fips _ %+ BORINGSSL_PREFIX %+ _ECDH_compute_key_fips %xdefine _ECDSA_SIG_free _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_free %xdefine _ECDSA_SIG_from_bytes _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_from_bytes %xdefine _ECDSA_SIG_get0 _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_get0 %xdefine _ECDSA_SIG_get0_r _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_get0_r %xdefine _ECDSA_SIG_get0_s _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_get0_s %xdefine _ECDSA_SIG_marshal _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_marshal %xdefine _ECDSA_SIG_max_len _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_max_len %xdefine _ECDSA_SIG_new _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_new %xdefine _ECDSA_SIG_parse _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_parse %xdefine _ECDSA_SIG_set0 _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_set0 %xdefine _ECDSA_SIG_to_bytes _ %+ BORINGSSL_PREFIX %+ _ECDSA_SIG_to_bytes %xdefine _ECDSA_do_sign _ %+ BORINGSSL_PREFIX %+ _ECDSA_do_sign %xdefine _ECDSA_do_verify _ %+ BORINGSSL_PREFIX %+ _ECDSA_do_verify %xdefine _ECDSA_sign _ %+ BORINGSSL_PREFIX %+ _ECDSA_sign %xdefine _ECDSA_sign_with_nonce_and_leak_private_key_for_testing _ %+ BORINGSSL_PREFIX %+ _ECDSA_sign_with_nonce_and_leak_private_key_for_testing %xdefine _ECDSA_size _ %+ BORINGSSL_PREFIX %+ _ECDSA_size %xdefine _ECDSA_verify _ %+ BORINGSSL_PREFIX %+ _ECDSA_verify %xdefine _EC_GFp_mont_method _ %+ BORINGSSL_PREFIX %+ _EC_GFp_mont_method %xdefine _EC_GFp_nistp224_method _ %+ BORINGSSL_PREFIX %+ _EC_GFp_nistp224_method %xdefine _EC_GFp_nistp256_method _ %+ BORINGSSL_PREFIX %+ _EC_GFp_nistp256_method %xdefine _EC_GFp_nistz256_method _ %+ BORINGSSL_PREFIX %+ _EC_GFp_nistz256_method %xdefine _EC_GROUP_cmp _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_cmp %xdefine _EC_GROUP_dup _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_dup %xdefine _EC_GROUP_free _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_free %xdefine _EC_GROUP_get0_generator _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get0_generator %xdefine _EC_GROUP_get0_order _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get0_order %xdefine _EC_GROUP_get_asn1_flag _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_asn1_flag %xdefine _EC_GROUP_get_cofactor _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_cofactor %xdefine _EC_GROUP_get_curve_GFp _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_curve_GFp %xdefine _EC_GROUP_get_curve_name _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_curve_name %xdefine _EC_GROUP_get_degree _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_degree %xdefine _EC_GROUP_get_order _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_get_order %xdefine _EC_GROUP_method_of _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_method_of %xdefine _EC_GROUP_new_by_curve_name _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_new_by_curve_name %xdefine _EC_GROUP_new_curve_GFp _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_new_curve_GFp %xdefine _EC_GROUP_order_bits _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_order_bits %xdefine _EC_GROUP_set_asn1_flag _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_set_asn1_flag %xdefine _EC_GROUP_set_generator _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_set_generator %xdefine _EC_GROUP_set_point_conversion_form _ %+ BORINGSSL_PREFIX %+ _EC_GROUP_set_point_conversion_form %xdefine _EC_KEY_check_fips _ %+ BORINGSSL_PREFIX %+ _EC_KEY_check_fips %xdefine _EC_KEY_check_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_check_key %xdefine _EC_KEY_derive_from_secret _ %+ BORINGSSL_PREFIX %+ _EC_KEY_derive_from_secret %xdefine _EC_KEY_dup _ %+ BORINGSSL_PREFIX %+ _EC_KEY_dup %xdefine _EC_KEY_free _ %+ BORINGSSL_PREFIX %+ _EC_KEY_free %xdefine _EC_KEY_generate_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_generate_key %xdefine _EC_KEY_generate_key_fips _ %+ BORINGSSL_PREFIX %+ _EC_KEY_generate_key_fips %xdefine _EC_KEY_get0_group _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get0_group %xdefine _EC_KEY_get0_private_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get0_private_key %xdefine _EC_KEY_get0_public_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get0_public_key %xdefine _EC_KEY_get_conv_form _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get_conv_form %xdefine _EC_KEY_get_enc_flags _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get_enc_flags %xdefine _EC_KEY_get_ex_data _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get_ex_data %xdefine _EC_KEY_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _EC_KEY_get_ex_new_index %xdefine _EC_KEY_is_opaque _ %+ BORINGSSL_PREFIX %+ _EC_KEY_is_opaque %xdefine _EC_KEY_key2buf _ %+ BORINGSSL_PREFIX %+ _EC_KEY_key2buf %xdefine _EC_KEY_marshal_curve_name _ %+ BORINGSSL_PREFIX %+ _EC_KEY_marshal_curve_name %xdefine _EC_KEY_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_marshal_private_key %xdefine _EC_KEY_new _ %+ BORINGSSL_PREFIX %+ _EC_KEY_new %xdefine _EC_KEY_new_by_curve_name _ %+ BORINGSSL_PREFIX %+ _EC_KEY_new_by_curve_name %xdefine _EC_KEY_new_method _ %+ BORINGSSL_PREFIX %+ _EC_KEY_new_method %xdefine _EC_KEY_oct2key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_oct2key %xdefine _EC_KEY_oct2priv _ %+ BORINGSSL_PREFIX %+ _EC_KEY_oct2priv %xdefine _EC_KEY_parse_curve_name _ %+ BORINGSSL_PREFIX %+ _EC_KEY_parse_curve_name %xdefine _EC_KEY_parse_parameters _ %+ BORINGSSL_PREFIX %+ _EC_KEY_parse_parameters %xdefine _EC_KEY_parse_private_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_parse_private_key %xdefine _EC_KEY_priv2buf _ %+ BORINGSSL_PREFIX %+ _EC_KEY_priv2buf %xdefine _EC_KEY_priv2oct _ %+ BORINGSSL_PREFIX %+ _EC_KEY_priv2oct %xdefine _EC_KEY_set_asn1_flag _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_asn1_flag %xdefine _EC_KEY_set_conv_form _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_conv_form %xdefine _EC_KEY_set_enc_flags _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_enc_flags %xdefine _EC_KEY_set_ex_data _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_ex_data %xdefine _EC_KEY_set_group _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_group %xdefine _EC_KEY_set_private_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_private_key %xdefine _EC_KEY_set_public_key _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_public_key %xdefine _EC_KEY_set_public_key_affine_coordinates _ %+ BORINGSSL_PREFIX %+ _EC_KEY_set_public_key_affine_coordinates %xdefine _EC_KEY_up_ref _ %+ BORINGSSL_PREFIX %+ _EC_KEY_up_ref %xdefine _EC_METHOD_get_field_type _ %+ BORINGSSL_PREFIX %+ _EC_METHOD_get_field_type %xdefine _EC_POINT_add _ %+ BORINGSSL_PREFIX %+ _EC_POINT_add %xdefine _EC_POINT_clear_free _ %+ BORINGSSL_PREFIX %+ _EC_POINT_clear_free %xdefine _EC_POINT_cmp _ %+ BORINGSSL_PREFIX %+ _EC_POINT_cmp %xdefine _EC_POINT_copy _ %+ BORINGSSL_PREFIX %+ _EC_POINT_copy %xdefine _EC_POINT_dbl _ %+ BORINGSSL_PREFIX %+ _EC_POINT_dbl %xdefine _EC_POINT_dup _ %+ BORINGSSL_PREFIX %+ _EC_POINT_dup %xdefine _EC_POINT_free _ %+ BORINGSSL_PREFIX %+ _EC_POINT_free %xdefine _EC_POINT_get_affine_coordinates _ %+ BORINGSSL_PREFIX %+ _EC_POINT_get_affine_coordinates %xdefine _EC_POINT_get_affine_coordinates_GFp _ %+ BORINGSSL_PREFIX %+ _EC_POINT_get_affine_coordinates_GFp %xdefine _EC_POINT_invert _ %+ BORINGSSL_PREFIX %+ _EC_POINT_invert %xdefine _EC_POINT_is_at_infinity _ %+ BORINGSSL_PREFIX %+ _EC_POINT_is_at_infinity %xdefine _EC_POINT_is_on_curve _ %+ BORINGSSL_PREFIX %+ _EC_POINT_is_on_curve %xdefine _EC_POINT_mul _ %+ BORINGSSL_PREFIX %+ _EC_POINT_mul %xdefine _EC_POINT_new _ %+ BORINGSSL_PREFIX %+ _EC_POINT_new %xdefine _EC_POINT_oct2point _ %+ BORINGSSL_PREFIX %+ _EC_POINT_oct2point %xdefine _EC_POINT_point2buf _ %+ BORINGSSL_PREFIX %+ _EC_POINT_point2buf %xdefine _EC_POINT_point2cbb _ %+ BORINGSSL_PREFIX %+ _EC_POINT_point2cbb %xdefine _EC_POINT_point2oct _ %+ BORINGSSL_PREFIX %+ _EC_POINT_point2oct %xdefine _EC_POINT_set_affine_coordinates _ %+ BORINGSSL_PREFIX %+ _EC_POINT_set_affine_coordinates %xdefine _EC_POINT_set_affine_coordinates_GFp _ %+ BORINGSSL_PREFIX %+ _EC_POINT_set_affine_coordinates_GFp %xdefine _EC_POINT_set_compressed_coordinates_GFp _ %+ BORINGSSL_PREFIX %+ _EC_POINT_set_compressed_coordinates_GFp %xdefine _EC_POINT_set_to_infinity _ %+ BORINGSSL_PREFIX %+ _EC_POINT_set_to_infinity %xdefine _EC_curve_nid2nist _ %+ BORINGSSL_PREFIX %+ _EC_curve_nid2nist %xdefine _EC_curve_nist2nid _ %+ BORINGSSL_PREFIX %+ _EC_curve_nist2nid %xdefine _EC_get_builtin_curves _ %+ BORINGSSL_PREFIX %+ _EC_get_builtin_curves %xdefine _EC_group_p224 _ %+ BORINGSSL_PREFIX %+ _EC_group_p224 %xdefine _EC_group_p256 _ %+ BORINGSSL_PREFIX %+ _EC_group_p256 %xdefine _EC_group_p384 _ %+ BORINGSSL_PREFIX %+ _EC_group_p384 %xdefine _EC_group_p521 _ %+ BORINGSSL_PREFIX %+ _EC_group_p521 %xdefine _EC_hash_to_curve_p256_xmd_sha256_sswu _ %+ BORINGSSL_PREFIX %+ _EC_hash_to_curve_p256_xmd_sha256_sswu %xdefine _EC_hash_to_curve_p384_xmd_sha384_sswu _ %+ BORINGSSL_PREFIX %+ _EC_hash_to_curve_p384_xmd_sha384_sswu %xdefine _ED25519_keypair _ %+ BORINGSSL_PREFIX %+ _ED25519_keypair %xdefine _ED25519_keypair_from_seed _ %+ BORINGSSL_PREFIX %+ _ED25519_keypair_from_seed %xdefine _ED25519_sign _ %+ BORINGSSL_PREFIX %+ _ED25519_sign %xdefine _ED25519_verify _ %+ BORINGSSL_PREFIX %+ _ED25519_verify %xdefine _EDIPARTYNAME_free _ %+ BORINGSSL_PREFIX %+ _EDIPARTYNAME_free %xdefine _EDIPARTYNAME_new _ %+ BORINGSSL_PREFIX %+ _EDIPARTYNAME_new %xdefine _ENGINE_free _ %+ BORINGSSL_PREFIX %+ _ENGINE_free %xdefine _ENGINE_get_ECDSA_method _ %+ BORINGSSL_PREFIX %+ _ENGINE_get_ECDSA_method %xdefine _ENGINE_get_RSA_method _ %+ BORINGSSL_PREFIX %+ _ENGINE_get_RSA_method %xdefine _ENGINE_load_builtin_engines _ %+ BORINGSSL_PREFIX %+ _ENGINE_load_builtin_engines %xdefine _ENGINE_new _ %+ BORINGSSL_PREFIX %+ _ENGINE_new %xdefine _ENGINE_register_all_complete _ %+ BORINGSSL_PREFIX %+ _ENGINE_register_all_complete %xdefine _ENGINE_set_ECDSA_method _ %+ BORINGSSL_PREFIX %+ _ENGINE_set_ECDSA_method %xdefine _ENGINE_set_RSA_method _ %+ BORINGSSL_PREFIX %+ _ENGINE_set_RSA_method %xdefine _ERR_GET_LIB _ %+ BORINGSSL_PREFIX %+ _ERR_GET_LIB %xdefine _ERR_GET_REASON _ %+ BORINGSSL_PREFIX %+ _ERR_GET_REASON %xdefine _ERR_SAVE_STATE_free _ %+ BORINGSSL_PREFIX %+ _ERR_SAVE_STATE_free %xdefine _ERR_add_error_data _ %+ BORINGSSL_PREFIX %+ _ERR_add_error_data %xdefine _ERR_add_error_dataf _ %+ BORINGSSL_PREFIX %+ _ERR_add_error_dataf %xdefine _ERR_clear_error _ %+ BORINGSSL_PREFIX %+ _ERR_clear_error %xdefine _ERR_clear_system_error _ %+ BORINGSSL_PREFIX %+ _ERR_clear_system_error %xdefine _ERR_error_string _ %+ BORINGSSL_PREFIX %+ _ERR_error_string %xdefine _ERR_error_string_n _ %+ BORINGSSL_PREFIX %+ _ERR_error_string_n %xdefine _ERR_free_strings _ %+ BORINGSSL_PREFIX %+ _ERR_free_strings %xdefine _ERR_func_error_string _ %+ BORINGSSL_PREFIX %+ _ERR_func_error_string %xdefine _ERR_get_error _ %+ BORINGSSL_PREFIX %+ _ERR_get_error %xdefine _ERR_get_error_line _ %+ BORINGSSL_PREFIX %+ _ERR_get_error_line %xdefine _ERR_get_error_line_data _ %+ BORINGSSL_PREFIX %+ _ERR_get_error_line_data %xdefine _ERR_get_next_error_library _ %+ BORINGSSL_PREFIX %+ _ERR_get_next_error_library %xdefine _ERR_lib_error_string _ %+ BORINGSSL_PREFIX %+ _ERR_lib_error_string %xdefine _ERR_lib_symbol_name _ %+ BORINGSSL_PREFIX %+ _ERR_lib_symbol_name %xdefine _ERR_load_BIO_strings _ %+ BORINGSSL_PREFIX %+ _ERR_load_BIO_strings %xdefine _ERR_load_ERR_strings _ %+ BORINGSSL_PREFIX %+ _ERR_load_ERR_strings %xdefine _ERR_load_RAND_strings _ %+ BORINGSSL_PREFIX %+ _ERR_load_RAND_strings %xdefine _ERR_load_SSL_strings _ %+ BORINGSSL_PREFIX %+ _ERR_load_SSL_strings %xdefine _ERR_load_crypto_strings _ %+ BORINGSSL_PREFIX %+ _ERR_load_crypto_strings %xdefine _ERR_peek_error _ %+ BORINGSSL_PREFIX %+ _ERR_peek_error %xdefine _ERR_peek_error_line _ %+ BORINGSSL_PREFIX %+ _ERR_peek_error_line %xdefine _ERR_peek_error_line_data _ %+ BORINGSSL_PREFIX %+ _ERR_peek_error_line_data %xdefine _ERR_peek_last_error _ %+ BORINGSSL_PREFIX %+ _ERR_peek_last_error %xdefine _ERR_peek_last_error_line _ %+ BORINGSSL_PREFIX %+ _ERR_peek_last_error_line %xdefine _ERR_peek_last_error_line_data _ %+ BORINGSSL_PREFIX %+ _ERR_peek_last_error_line_data %xdefine _ERR_pop_to_mark _ %+ BORINGSSL_PREFIX %+ _ERR_pop_to_mark %xdefine _ERR_print_errors _ %+ BORINGSSL_PREFIX %+ _ERR_print_errors %xdefine _ERR_print_errors_cb _ %+ BORINGSSL_PREFIX %+ _ERR_print_errors_cb %xdefine _ERR_print_errors_fp _ %+ BORINGSSL_PREFIX %+ _ERR_print_errors_fp %xdefine _ERR_put_error _ %+ BORINGSSL_PREFIX %+ _ERR_put_error %xdefine _ERR_reason_error_string _ %+ BORINGSSL_PREFIX %+ _ERR_reason_error_string %xdefine _ERR_reason_symbol_name _ %+ BORINGSSL_PREFIX %+ _ERR_reason_symbol_name %xdefine _ERR_remove_state _ %+ BORINGSSL_PREFIX %+ _ERR_remove_state %xdefine _ERR_remove_thread_state _ %+ BORINGSSL_PREFIX %+ _ERR_remove_thread_state %xdefine _ERR_restore_state _ %+ BORINGSSL_PREFIX %+ _ERR_restore_state %xdefine _ERR_save_state _ %+ BORINGSSL_PREFIX %+ _ERR_save_state %xdefine _ERR_set_error_data _ %+ BORINGSSL_PREFIX %+ _ERR_set_error_data %xdefine _ERR_set_mark _ %+ BORINGSSL_PREFIX %+ _ERR_set_mark %xdefine _EVP_AEAD_CTX_aead _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_aead %xdefine _EVP_AEAD_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_cleanup %xdefine _EVP_AEAD_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_free %xdefine _EVP_AEAD_CTX_get_iv _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_get_iv %xdefine _EVP_AEAD_CTX_init _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_init %xdefine _EVP_AEAD_CTX_init_with_direction _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_init_with_direction %xdefine _EVP_AEAD_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_new %xdefine _EVP_AEAD_CTX_open _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_open %xdefine _EVP_AEAD_CTX_open_gather _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_open_gather %xdefine _EVP_AEAD_CTX_seal _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_seal %xdefine _EVP_AEAD_CTX_seal_scatter _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_seal_scatter %xdefine _EVP_AEAD_CTX_tag_len _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_tag_len %xdefine _EVP_AEAD_CTX_zero _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_zero %xdefine _EVP_AEAD_key_length _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_key_length %xdefine _EVP_AEAD_max_overhead _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_max_overhead %xdefine _EVP_AEAD_max_tag_len _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_max_tag_len %xdefine _EVP_AEAD_nonce_length _ %+ BORINGSSL_PREFIX %+ _EVP_AEAD_nonce_length %xdefine _EVP_BytesToKey _ %+ BORINGSSL_PREFIX %+ _EVP_BytesToKey %xdefine _EVP_CIPHER_CTX_block_size _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_block_size %xdefine _EVP_CIPHER_CTX_cipher _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cipher %xdefine _EVP_CIPHER_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cleanup %xdefine _EVP_CIPHER_CTX_copy _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_copy %xdefine _EVP_CIPHER_CTX_ctrl _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_ctrl %xdefine _EVP_CIPHER_CTX_encrypting _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_encrypting %xdefine _EVP_CIPHER_CTX_flags _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_flags %xdefine _EVP_CIPHER_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_free %xdefine _EVP_CIPHER_CTX_get_app_data _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_get_app_data %xdefine _EVP_CIPHER_CTX_init _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_init %xdefine _EVP_CIPHER_CTX_iv_length _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_iv_length %xdefine _EVP_CIPHER_CTX_key_length _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_key_length %xdefine _EVP_CIPHER_CTX_mode _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_mode %xdefine _EVP_CIPHER_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_new %xdefine _EVP_CIPHER_CTX_nid _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_nid %xdefine _EVP_CIPHER_CTX_reset _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_reset %xdefine _EVP_CIPHER_CTX_set_app_data _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_app_data %xdefine _EVP_CIPHER_CTX_set_flags _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_flags %xdefine _EVP_CIPHER_CTX_set_key_length _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_key_length %xdefine _EVP_CIPHER_CTX_set_padding _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_padding %xdefine _EVP_CIPHER_block_size _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_block_size %xdefine _EVP_CIPHER_flags _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_flags %xdefine _EVP_CIPHER_iv_length _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_iv_length %xdefine _EVP_CIPHER_key_length _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_key_length %xdefine _EVP_CIPHER_mode _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_mode %xdefine _EVP_CIPHER_nid _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_nid %xdefine _EVP_Cipher _ %+ BORINGSSL_PREFIX %+ _EVP_Cipher %xdefine _EVP_CipherFinal _ %+ BORINGSSL_PREFIX %+ _EVP_CipherFinal %xdefine _EVP_CipherFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_CipherFinal_ex %xdefine _EVP_CipherInit _ %+ BORINGSSL_PREFIX %+ _EVP_CipherInit %xdefine _EVP_CipherInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_CipherInit_ex %xdefine _EVP_CipherUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_CipherUpdate %xdefine _EVP_DecodeBase64 _ %+ BORINGSSL_PREFIX %+ _EVP_DecodeBase64 %xdefine _EVP_DecodeBlock _ %+ BORINGSSL_PREFIX %+ _EVP_DecodeBlock %xdefine _EVP_DecodeFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DecodeFinal %xdefine _EVP_DecodeInit _ %+ BORINGSSL_PREFIX %+ _EVP_DecodeInit %xdefine _EVP_DecodeUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DecodeUpdate %xdefine _EVP_DecodedLength _ %+ BORINGSSL_PREFIX %+ _EVP_DecodedLength %xdefine _EVP_DecryptFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptFinal %xdefine _EVP_DecryptFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptFinal_ex %xdefine _EVP_DecryptInit _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptInit %xdefine _EVP_DecryptInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptInit_ex %xdefine _EVP_DecryptUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptUpdate %xdefine _EVP_Digest _ %+ BORINGSSL_PREFIX %+ _EVP_Digest %xdefine _EVP_DigestFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DigestFinal %xdefine _EVP_DigestFinalXOF _ %+ BORINGSSL_PREFIX %+ _EVP_DigestFinalXOF %xdefine _EVP_DigestFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DigestFinal_ex %xdefine _EVP_DigestInit _ %+ BORINGSSL_PREFIX %+ _EVP_DigestInit %xdefine _EVP_DigestInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DigestInit_ex %xdefine _EVP_DigestSign _ %+ BORINGSSL_PREFIX %+ _EVP_DigestSign %xdefine _EVP_DigestSignFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DigestSignFinal %xdefine _EVP_DigestSignInit _ %+ BORINGSSL_PREFIX %+ _EVP_DigestSignInit %xdefine _EVP_DigestSignUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DigestSignUpdate %xdefine _EVP_DigestUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DigestUpdate %xdefine _EVP_DigestVerify _ %+ BORINGSSL_PREFIX %+ _EVP_DigestVerify %xdefine _EVP_DigestVerifyFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DigestVerifyFinal %xdefine _EVP_DigestVerifyInit _ %+ BORINGSSL_PREFIX %+ _EVP_DigestVerifyInit %xdefine _EVP_DigestVerifyUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DigestVerifyUpdate %xdefine _EVP_ENCODE_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_ENCODE_CTX_free %xdefine _EVP_ENCODE_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_ENCODE_CTX_new %xdefine _EVP_EncodeBlock _ %+ BORINGSSL_PREFIX %+ _EVP_EncodeBlock %xdefine _EVP_EncodeFinal _ %+ BORINGSSL_PREFIX %+ _EVP_EncodeFinal %xdefine _EVP_EncodeInit _ %+ BORINGSSL_PREFIX %+ _EVP_EncodeInit %xdefine _EVP_EncodeUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_EncodeUpdate %xdefine _EVP_EncodedLength _ %+ BORINGSSL_PREFIX %+ _EVP_EncodedLength %xdefine _EVP_EncryptFinal _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptFinal %xdefine _EVP_EncryptFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptFinal_ex %xdefine _EVP_EncryptInit _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptInit %xdefine _EVP_EncryptInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptInit_ex %xdefine _EVP_EncryptUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptUpdate %xdefine _EVP_HPKE_AEAD_aead _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_AEAD_aead %xdefine _EVP_HPKE_AEAD_id _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_AEAD_id %xdefine _EVP_HPKE_CTX_aead _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_aead %xdefine _EVP_HPKE_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_cleanup %xdefine _EVP_HPKE_CTX_export _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_export %xdefine _EVP_HPKE_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_free %xdefine _EVP_HPKE_CTX_kdf _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_kdf %xdefine _EVP_HPKE_CTX_kem _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_kem %xdefine _EVP_HPKE_CTX_max_overhead _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_max_overhead %xdefine _EVP_HPKE_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_new %xdefine _EVP_HPKE_CTX_open _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_open %xdefine _EVP_HPKE_CTX_seal _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_seal %xdefine _EVP_HPKE_CTX_setup_auth_recipient _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_recipient %xdefine _EVP_HPKE_CTX_setup_auth_sender _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_sender %xdefine _EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing %xdefine _EVP_HPKE_CTX_setup_recipient _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_recipient %xdefine _EVP_HPKE_CTX_setup_sender _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_sender %xdefine _EVP_HPKE_CTX_setup_sender_with_seed_for_testing _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_sender_with_seed_for_testing %xdefine _EVP_HPKE_CTX_zero _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_zero %xdefine _EVP_HPKE_KDF_hkdf_md _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KDF_hkdf_md %xdefine _EVP_HPKE_KDF_id _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KDF_id %xdefine _EVP_HPKE_KEM_enc_len _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_enc_len %xdefine _EVP_HPKE_KEM_id _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_id %xdefine _EVP_HPKE_KEM_private_key_len _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_private_key_len %xdefine _EVP_HPKE_KEM_public_key_len _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_public_key_len %xdefine _EVP_HPKE_KEY_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_cleanup %xdefine _EVP_HPKE_KEY_copy _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_copy %xdefine _EVP_HPKE_KEY_free _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_free %xdefine _EVP_HPKE_KEY_generate _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_generate %xdefine _EVP_HPKE_KEY_init _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_init %xdefine _EVP_HPKE_KEY_kem _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_kem %xdefine _EVP_HPKE_KEY_move _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_move %xdefine _EVP_HPKE_KEY_new _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_new %xdefine _EVP_HPKE_KEY_private_key _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_private_key %xdefine _EVP_HPKE_KEY_public_key _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_public_key %xdefine _EVP_HPKE_KEY_zero _ %+ BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_zero %xdefine _EVP_MD_CTX_block_size _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_block_size %xdefine _EVP_MD_CTX_cleanse _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_cleanse %xdefine _EVP_MD_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_cleanup %xdefine _EVP_MD_CTX_copy _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_copy %xdefine _EVP_MD_CTX_copy_ex _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_copy_ex %xdefine _EVP_MD_CTX_create _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_create %xdefine _EVP_MD_CTX_destroy _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_destroy %xdefine _EVP_MD_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_free %xdefine _EVP_MD_CTX_get0_md _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_get0_md %xdefine _EVP_MD_CTX_init _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_init %xdefine _EVP_MD_CTX_md _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_md %xdefine _EVP_MD_CTX_move _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_move %xdefine _EVP_MD_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_new %xdefine _EVP_MD_CTX_reset _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_reset %xdefine _EVP_MD_CTX_set_flags _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_set_flags %xdefine _EVP_MD_CTX_size _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_size %xdefine _EVP_MD_CTX_type _ %+ BORINGSSL_PREFIX %+ _EVP_MD_CTX_type %xdefine _EVP_MD_block_size _ %+ BORINGSSL_PREFIX %+ _EVP_MD_block_size %xdefine _EVP_MD_flags _ %+ BORINGSSL_PREFIX %+ _EVP_MD_flags %xdefine _EVP_MD_meth_get_flags _ %+ BORINGSSL_PREFIX %+ _EVP_MD_meth_get_flags %xdefine _EVP_MD_nid _ %+ BORINGSSL_PREFIX %+ _EVP_MD_nid %xdefine _EVP_MD_size _ %+ BORINGSSL_PREFIX %+ _EVP_MD_size %xdefine _EVP_MD_type _ %+ BORINGSSL_PREFIX %+ _EVP_MD_type %xdefine _EVP_PBE_scrypt _ %+ BORINGSSL_PREFIX %+ _EVP_PBE_scrypt %xdefine _EVP_PKCS82PKEY _ %+ BORINGSSL_PREFIX %+ _EVP_PKCS82PKEY %xdefine _EVP_PKEY2PKCS8 _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY2PKCS8 %xdefine _EVP_PKEY_CTX_add1_hkdf_info _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_add1_hkdf_info %xdefine _EVP_PKEY_CTX_ctrl _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_ctrl %xdefine _EVP_PKEY_CTX_dup _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_dup %xdefine _EVP_PKEY_CTX_free _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_free %xdefine _EVP_PKEY_CTX_get0_pkey _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get0_pkey %xdefine _EVP_PKEY_CTX_get0_rsa_oaep_label _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get0_rsa_oaep_label %xdefine _EVP_PKEY_CTX_get_rsa_mgf1_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_mgf1_md %xdefine _EVP_PKEY_CTX_get_rsa_oaep_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_oaep_md %xdefine _EVP_PKEY_CTX_get_rsa_padding _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_padding %xdefine _EVP_PKEY_CTX_get_rsa_pss_saltlen _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_pss_saltlen %xdefine _EVP_PKEY_CTX_get_signature_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_signature_md %xdefine _EVP_PKEY_CTX_hkdf_mode _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_hkdf_mode %xdefine _EVP_PKEY_CTX_new _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_new %xdefine _EVP_PKEY_CTX_new_id _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_new_id %xdefine _EVP_PKEY_CTX_set0_rsa_oaep_label _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set0_rsa_oaep_label %xdefine _EVP_PKEY_CTX_set1_hkdf_key _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set1_hkdf_key %xdefine _EVP_PKEY_CTX_set1_hkdf_salt _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set1_hkdf_salt %xdefine _EVP_PKEY_CTX_set_dh_pad _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dh_pad %xdefine _EVP_PKEY_CTX_set_dsa_paramgen_bits _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dsa_paramgen_bits %xdefine _EVP_PKEY_CTX_set_dsa_paramgen_q_bits _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dsa_paramgen_q_bits %xdefine _EVP_PKEY_CTX_set_ec_param_enc _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_ec_param_enc %xdefine _EVP_PKEY_CTX_set_ec_paramgen_curve_nid _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_ec_paramgen_curve_nid %xdefine _EVP_PKEY_CTX_set_hkdf_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_hkdf_md %xdefine _EVP_PKEY_CTX_set_rsa_keygen_bits _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_keygen_bits %xdefine _EVP_PKEY_CTX_set_rsa_keygen_pubexp _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_keygen_pubexp %xdefine _EVP_PKEY_CTX_set_rsa_mgf1_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_mgf1_md %xdefine _EVP_PKEY_CTX_set_rsa_oaep_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_oaep_md %xdefine _EVP_PKEY_CTX_set_rsa_padding _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_padding %xdefine _EVP_PKEY_CTX_set_rsa_pss_keygen_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_md %xdefine _EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md %xdefine _EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen %xdefine _EVP_PKEY_CTX_set_rsa_pss_saltlen _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_saltlen %xdefine _EVP_PKEY_CTX_set_signature_md _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_signature_md %xdefine _EVP_PKEY_assign _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_assign %xdefine _EVP_PKEY_assign_DH _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_assign_DH %xdefine _EVP_PKEY_assign_DSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_assign_DSA %xdefine _EVP_PKEY_assign_EC_KEY _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_assign_EC_KEY %xdefine _EVP_PKEY_assign_RSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_assign_RSA %xdefine _EVP_PKEY_base_id _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_base_id %xdefine _EVP_PKEY_bits _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_bits %xdefine _EVP_PKEY_cmp _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_cmp %xdefine _EVP_PKEY_cmp_parameters _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_cmp_parameters %xdefine _EVP_PKEY_copy_parameters _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_copy_parameters %xdefine _EVP_PKEY_decrypt _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_decrypt %xdefine _EVP_PKEY_decrypt_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_decrypt_init %xdefine _EVP_PKEY_derive _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_derive %xdefine _EVP_PKEY_derive_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_derive_init %xdefine _EVP_PKEY_derive_set_peer _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_derive_set_peer %xdefine _EVP_PKEY_encrypt _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_encrypt %xdefine _EVP_PKEY_encrypt_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_encrypt_init %xdefine _EVP_PKEY_free _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_free %xdefine _EVP_PKEY_get0 _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get0 %xdefine _EVP_PKEY_get0_DH _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get0_DH %xdefine _EVP_PKEY_get0_DSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get0_DSA %xdefine _EVP_PKEY_get0_EC_KEY _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get0_EC_KEY %xdefine _EVP_PKEY_get0_RSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get0_RSA %xdefine _EVP_PKEY_get1_DH _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get1_DH %xdefine _EVP_PKEY_get1_DSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get1_DSA %xdefine _EVP_PKEY_get1_EC_KEY _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get1_EC_KEY %xdefine _EVP_PKEY_get1_RSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get1_RSA %xdefine _EVP_PKEY_get1_tls_encodedpoint _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get1_tls_encodedpoint %xdefine _EVP_PKEY_get_raw_private_key _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get_raw_private_key %xdefine _EVP_PKEY_get_raw_public_key _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_get_raw_public_key %xdefine _EVP_PKEY_id _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_id %xdefine _EVP_PKEY_is_opaque _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_is_opaque %xdefine _EVP_PKEY_keygen _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_keygen %xdefine _EVP_PKEY_keygen_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_keygen_init %xdefine _EVP_PKEY_missing_parameters _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_missing_parameters %xdefine _EVP_PKEY_new _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_new %xdefine _EVP_PKEY_new_raw_private_key _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_new_raw_private_key %xdefine _EVP_PKEY_new_raw_public_key _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_new_raw_public_key %xdefine _EVP_PKEY_paramgen _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_paramgen %xdefine _EVP_PKEY_paramgen_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_paramgen_init %xdefine _EVP_PKEY_print_params _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_print_params %xdefine _EVP_PKEY_print_private _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_print_private %xdefine _EVP_PKEY_print_public _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_print_public %xdefine _EVP_PKEY_set1_DH _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set1_DH %xdefine _EVP_PKEY_set1_DSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set1_DSA %xdefine _EVP_PKEY_set1_EC_KEY _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set1_EC_KEY %xdefine _EVP_PKEY_set1_RSA _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set1_RSA %xdefine _EVP_PKEY_set1_tls_encodedpoint _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set1_tls_encodedpoint %xdefine _EVP_PKEY_set_type _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_set_type %xdefine _EVP_PKEY_sign _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_sign %xdefine _EVP_PKEY_sign_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_sign_init %xdefine _EVP_PKEY_size _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_size %xdefine _EVP_PKEY_type _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_type %xdefine _EVP_PKEY_up_ref _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_up_ref %xdefine _EVP_PKEY_verify _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_verify %xdefine _EVP_PKEY_verify_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_verify_init %xdefine _EVP_PKEY_verify_recover _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_verify_recover %xdefine _EVP_PKEY_verify_recover_init _ %+ BORINGSSL_PREFIX %+ _EVP_PKEY_verify_recover_init %xdefine _EVP_SignFinal _ %+ BORINGSSL_PREFIX %+ _EVP_SignFinal %xdefine _EVP_SignInit _ %+ BORINGSSL_PREFIX %+ _EVP_SignInit %xdefine _EVP_SignInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_SignInit_ex %xdefine _EVP_SignUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_SignUpdate %xdefine _EVP_VerifyFinal _ %+ BORINGSSL_PREFIX %+ _EVP_VerifyFinal %xdefine _EVP_VerifyInit _ %+ BORINGSSL_PREFIX %+ _EVP_VerifyInit %xdefine _EVP_VerifyInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_VerifyInit_ex %xdefine _EVP_VerifyUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_VerifyUpdate %xdefine _EVP_add_cipher_alias _ %+ BORINGSSL_PREFIX %+ _EVP_add_cipher_alias %xdefine _EVP_add_digest _ %+ BORINGSSL_PREFIX %+ _EVP_add_digest %xdefine _EVP_aead_aes_128_cbc_sha1_tls _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha1_tls %xdefine _EVP_aead_aes_128_cbc_sha1_tls_implicit_iv _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha1_tls_implicit_iv %xdefine _EVP_aead_aes_128_cbc_sha256_tls _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha256_tls %xdefine _EVP_aead_aes_128_ccm_bluetooth _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_bluetooth %xdefine _EVP_aead_aes_128_ccm_bluetooth_8 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_bluetooth_8 %xdefine _EVP_aead_aes_128_ccm_matter _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_matter %xdefine _EVP_aead_aes_128_ctr_hmac_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ctr_hmac_sha256 %xdefine _EVP_aead_aes_128_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm %xdefine _EVP_aead_aes_128_gcm_randnonce _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_randnonce %xdefine _EVP_aead_aes_128_gcm_siv _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_siv %xdefine _EVP_aead_aes_128_gcm_tls12 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls12 %xdefine _EVP_aead_aes_128_gcm_tls13 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls13 %xdefine _EVP_aead_aes_192_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_192_gcm %xdefine _EVP_aead_aes_256_cbc_sha1_tls _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_cbc_sha1_tls %xdefine _EVP_aead_aes_256_cbc_sha1_tls_implicit_iv _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_cbc_sha1_tls_implicit_iv %xdefine _EVP_aead_aes_256_ctr_hmac_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_ctr_hmac_sha256 %xdefine _EVP_aead_aes_256_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm %xdefine _EVP_aead_aes_256_gcm_randnonce _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_randnonce %xdefine _EVP_aead_aes_256_gcm_siv _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_siv %xdefine _EVP_aead_aes_256_gcm_tls12 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls12 %xdefine _EVP_aead_aes_256_gcm_tls13 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls13 %xdefine _EVP_aead_chacha20_poly1305 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_chacha20_poly1305 %xdefine _EVP_aead_des_ede3_cbc_sha1_tls _ %+ BORINGSSL_PREFIX %+ _EVP_aead_des_ede3_cbc_sha1_tls %xdefine _EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv _ %+ BORINGSSL_PREFIX %+ _EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv %xdefine _EVP_aead_xchacha20_poly1305 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_xchacha20_poly1305 %xdefine _EVP_aes_128_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_cbc %xdefine _EVP_aes_128_ctr _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_ctr %xdefine _EVP_aes_128_ecb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_ecb %xdefine _EVP_aes_128_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_gcm %xdefine _EVP_aes_128_ofb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_ofb %xdefine _EVP_aes_192_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_aes_192_cbc %xdefine _EVP_aes_192_ctr _ %+ BORINGSSL_PREFIX %+ _EVP_aes_192_ctr %xdefine _EVP_aes_192_ecb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_192_ecb %xdefine _EVP_aes_192_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aes_192_gcm %xdefine _EVP_aes_192_ofb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_192_ofb %xdefine _EVP_aes_256_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_aes_256_cbc %xdefine _EVP_aes_256_ctr _ %+ BORINGSSL_PREFIX %+ _EVP_aes_256_ctr %xdefine _EVP_aes_256_ecb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_256_ecb %xdefine _EVP_aes_256_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aes_256_gcm %xdefine _EVP_aes_256_ofb _ %+ BORINGSSL_PREFIX %+ _EVP_aes_256_ofb %xdefine _EVP_blake2b256 _ %+ BORINGSSL_PREFIX %+ _EVP_blake2b256 %xdefine _EVP_cleanup _ %+ BORINGSSL_PREFIX %+ _EVP_cleanup %xdefine _EVP_des_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_des_cbc %xdefine _EVP_des_ecb _ %+ BORINGSSL_PREFIX %+ _EVP_des_ecb %xdefine _EVP_des_ede _ %+ BORINGSSL_PREFIX %+ _EVP_des_ede %xdefine _EVP_des_ede3 _ %+ BORINGSSL_PREFIX %+ _EVP_des_ede3 %xdefine _EVP_des_ede3_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_des_ede3_cbc %xdefine _EVP_des_ede3_ecb _ %+ BORINGSSL_PREFIX %+ _EVP_des_ede3_ecb %xdefine _EVP_des_ede_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_des_ede_cbc %xdefine _EVP_enc_null _ %+ BORINGSSL_PREFIX %+ _EVP_enc_null %xdefine _EVP_get_cipherbyname _ %+ BORINGSSL_PREFIX %+ _EVP_get_cipherbyname %xdefine _EVP_get_cipherbynid _ %+ BORINGSSL_PREFIX %+ _EVP_get_cipherbynid %xdefine _EVP_get_digestbyname _ %+ BORINGSSL_PREFIX %+ _EVP_get_digestbyname %xdefine _EVP_get_digestbynid _ %+ BORINGSSL_PREFIX %+ _EVP_get_digestbynid %xdefine _EVP_get_digestbyobj _ %+ BORINGSSL_PREFIX %+ _EVP_get_digestbyobj %xdefine _EVP_has_aes_hardware _ %+ BORINGSSL_PREFIX %+ _EVP_has_aes_hardware %xdefine _EVP_hpke_aes_128_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_aes_128_gcm %xdefine _EVP_hpke_aes_256_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_aes_256_gcm %xdefine _EVP_hpke_chacha20_poly1305 _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_chacha20_poly1305 %xdefine _EVP_hpke_hkdf_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_hkdf_sha256 %xdefine _EVP_hpke_p256_hkdf_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_p256_hkdf_sha256 %xdefine _EVP_hpke_x25519_hkdf_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_hpke_x25519_hkdf_sha256 %xdefine _EVP_marshal_digest_algorithm _ %+ BORINGSSL_PREFIX %+ _EVP_marshal_digest_algorithm %xdefine _EVP_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _EVP_marshal_private_key %xdefine _EVP_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _EVP_marshal_public_key %xdefine _EVP_md4 _ %+ BORINGSSL_PREFIX %+ _EVP_md4 %xdefine _EVP_md5 _ %+ BORINGSSL_PREFIX %+ _EVP_md5 %xdefine _EVP_md5_sha1 _ %+ BORINGSSL_PREFIX %+ _EVP_md5_sha1 %xdefine _EVP_parse_digest_algorithm _ %+ BORINGSSL_PREFIX %+ _EVP_parse_digest_algorithm %xdefine _EVP_parse_private_key _ %+ BORINGSSL_PREFIX %+ _EVP_parse_private_key %xdefine _EVP_parse_public_key _ %+ BORINGSSL_PREFIX %+ _EVP_parse_public_key %xdefine _EVP_rc2_40_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_rc2_40_cbc %xdefine _EVP_rc2_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_rc2_cbc %xdefine _EVP_rc4 _ %+ BORINGSSL_PREFIX %+ _EVP_rc4 %xdefine _EVP_sha1 _ %+ BORINGSSL_PREFIX %+ _EVP_sha1 %xdefine _EVP_sha1_final_with_secret_suffix _ %+ BORINGSSL_PREFIX %+ _EVP_sha1_final_with_secret_suffix %xdefine _EVP_sha224 _ %+ BORINGSSL_PREFIX %+ _EVP_sha224 %xdefine _EVP_sha256 _ %+ BORINGSSL_PREFIX %+ _EVP_sha256 %xdefine _EVP_sha256_final_with_secret_suffix _ %+ BORINGSSL_PREFIX %+ _EVP_sha256_final_with_secret_suffix %xdefine _EVP_sha384 _ %+ BORINGSSL_PREFIX %+ _EVP_sha384 %xdefine _EVP_sha512 _ %+ BORINGSSL_PREFIX %+ _EVP_sha512 %xdefine _EVP_sha512_256 _ %+ BORINGSSL_PREFIX %+ _EVP_sha512_256 %xdefine _EVP_tls_cbc_copy_mac _ %+ BORINGSSL_PREFIX %+ _EVP_tls_cbc_copy_mac %xdefine _EVP_tls_cbc_digest_record _ %+ BORINGSSL_PREFIX %+ _EVP_tls_cbc_digest_record %xdefine _EVP_tls_cbc_record_digest_supported _ %+ BORINGSSL_PREFIX %+ _EVP_tls_cbc_record_digest_supported %xdefine _EVP_tls_cbc_remove_padding _ %+ BORINGSSL_PREFIX %+ _EVP_tls_cbc_remove_padding %xdefine _EXTENDED_KEY_USAGE_free _ %+ BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_free %xdefine _EXTENDED_KEY_USAGE_it _ %+ BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_it %xdefine _EXTENDED_KEY_USAGE_new _ %+ BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_new %xdefine _FIPS_mode _ %+ BORINGSSL_PREFIX %+ _FIPS_mode %xdefine _FIPS_mode_set _ %+ BORINGSSL_PREFIX %+ _FIPS_mode_set %xdefine _FIPS_module_name _ %+ BORINGSSL_PREFIX %+ _FIPS_module_name %xdefine _FIPS_query_algorithm_status _ %+ BORINGSSL_PREFIX %+ _FIPS_query_algorithm_status %xdefine _FIPS_read_counter _ %+ BORINGSSL_PREFIX %+ _FIPS_read_counter %xdefine _FIPS_service_indicator_after_call _ %+ BORINGSSL_PREFIX %+ _FIPS_service_indicator_after_call %xdefine _FIPS_service_indicator_before_call _ %+ BORINGSSL_PREFIX %+ _FIPS_service_indicator_before_call %xdefine _FIPS_version _ %+ BORINGSSL_PREFIX %+ _FIPS_version %xdefine _GENERAL_NAMES_free _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAMES_free %xdefine _GENERAL_NAMES_it _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAMES_it %xdefine _GENERAL_NAMES_new _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAMES_new %xdefine _GENERAL_NAME_cmp _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_cmp %xdefine _GENERAL_NAME_dup _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_dup %xdefine _GENERAL_NAME_free _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_free %xdefine _GENERAL_NAME_get0_otherName _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_get0_otherName %xdefine _GENERAL_NAME_get0_value _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_get0_value %xdefine _GENERAL_NAME_it _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_it %xdefine _GENERAL_NAME_new _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_new %xdefine _GENERAL_NAME_print _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_print %xdefine _GENERAL_NAME_set0_othername _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_set0_othername %xdefine _GENERAL_NAME_set0_value _ %+ BORINGSSL_PREFIX %+ _GENERAL_NAME_set0_value %xdefine _GENERAL_SUBTREE_free _ %+ BORINGSSL_PREFIX %+ _GENERAL_SUBTREE_free %xdefine _GENERAL_SUBTREE_new _ %+ BORINGSSL_PREFIX %+ _GENERAL_SUBTREE_new %xdefine _HKDF _ %+ BORINGSSL_PREFIX %+ _HKDF %xdefine _HKDF_expand _ %+ BORINGSSL_PREFIX %+ _HKDF_expand %xdefine _HKDF_extract _ %+ BORINGSSL_PREFIX %+ _HKDF_extract %xdefine _HMAC _ %+ BORINGSSL_PREFIX %+ _HMAC %xdefine _HMAC_CTX_cleanse _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_cleanse %xdefine _HMAC_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_cleanup %xdefine _HMAC_CTX_copy _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_copy %xdefine _HMAC_CTX_copy_ex _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_copy_ex %xdefine _HMAC_CTX_free _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_free %xdefine _HMAC_CTX_get_md _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_get_md %xdefine _HMAC_CTX_init _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_init %xdefine _HMAC_CTX_new _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_new %xdefine _HMAC_CTX_reset _ %+ BORINGSSL_PREFIX %+ _HMAC_CTX_reset %xdefine _HMAC_Final _ %+ BORINGSSL_PREFIX %+ _HMAC_Final %xdefine _HMAC_Init _ %+ BORINGSSL_PREFIX %+ _HMAC_Init %xdefine _HMAC_Init_ex _ %+ BORINGSSL_PREFIX %+ _HMAC_Init_ex %xdefine _HMAC_Update _ %+ BORINGSSL_PREFIX %+ _HMAC_Update %xdefine _HMAC_size _ %+ BORINGSSL_PREFIX %+ _HMAC_size %xdefine _HRSS_decap _ %+ BORINGSSL_PREFIX %+ _HRSS_decap %xdefine _HRSS_encap _ %+ BORINGSSL_PREFIX %+ _HRSS_encap %xdefine _HRSS_generate_key _ %+ BORINGSSL_PREFIX %+ _HRSS_generate_key %xdefine _HRSS_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _HRSS_marshal_public_key %xdefine _HRSS_parse_public_key _ %+ BORINGSSL_PREFIX %+ _HRSS_parse_public_key %xdefine _HRSS_poly3_invert _ %+ BORINGSSL_PREFIX %+ _HRSS_poly3_invert %xdefine _HRSS_poly3_mul _ %+ BORINGSSL_PREFIX %+ _HRSS_poly3_mul %xdefine _ISSUING_DIST_POINT_free _ %+ BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_free %xdefine _ISSUING_DIST_POINT_it _ %+ BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_it %xdefine _ISSUING_DIST_POINT_new _ %+ BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_new %xdefine _KYBER_decap _ %+ BORINGSSL_PREFIX %+ _KYBER_decap %xdefine _KYBER_encap _ %+ BORINGSSL_PREFIX %+ _KYBER_encap %xdefine _KYBER_encap_external_entropy _ %+ BORINGSSL_PREFIX %+ _KYBER_encap_external_entropy %xdefine _KYBER_generate_key _ %+ BORINGSSL_PREFIX %+ _KYBER_generate_key %xdefine _KYBER_generate_key_external_entropy _ %+ BORINGSSL_PREFIX %+ _KYBER_generate_key_external_entropy %xdefine _KYBER_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _KYBER_marshal_private_key %xdefine _KYBER_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _KYBER_marshal_public_key %xdefine _KYBER_parse_private_key _ %+ BORINGSSL_PREFIX %+ _KYBER_parse_private_key %xdefine _KYBER_parse_public_key _ %+ BORINGSSL_PREFIX %+ _KYBER_parse_public_key %xdefine _KYBER_public_from_private _ %+ BORINGSSL_PREFIX %+ _KYBER_public_from_private %xdefine _MD4 _ %+ BORINGSSL_PREFIX %+ _MD4 %xdefine _MD4_Final _ %+ BORINGSSL_PREFIX %+ _MD4_Final %xdefine _MD4_Init _ %+ BORINGSSL_PREFIX %+ _MD4_Init %xdefine _MD4_Transform _ %+ BORINGSSL_PREFIX %+ _MD4_Transform %xdefine _MD4_Update _ %+ BORINGSSL_PREFIX %+ _MD4_Update %xdefine _MD5 _ %+ BORINGSSL_PREFIX %+ _MD5 %xdefine _MD5_Final _ %+ BORINGSSL_PREFIX %+ _MD5_Final %xdefine _MD5_Init _ %+ BORINGSSL_PREFIX %+ _MD5_Init %xdefine _MD5_Transform _ %+ BORINGSSL_PREFIX %+ _MD5_Transform %xdefine _MD5_Update _ %+ BORINGSSL_PREFIX %+ _MD5_Update %xdefine _METHOD_ref _ %+ BORINGSSL_PREFIX %+ _METHOD_ref %xdefine _METHOD_unref _ %+ BORINGSSL_PREFIX %+ _METHOD_unref %xdefine _MLDSA65_generate_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_generate_key %xdefine _MLDSA65_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_marshal_public_key %xdefine _MLDSA65_parse_public_key _ %+ BORINGSSL_PREFIX %+ _MLDSA65_parse_public_key %xdefine _MLDSA65_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _MLDSA65_private_key_from_seed %xdefine _MLDSA65_public_from_private _ %+ BORINGSSL_PREFIX %+ _MLDSA65_public_from_private %xdefine _MLDSA65_sign _ %+ BORINGSSL_PREFIX %+ _MLDSA65_sign %xdefine _MLDSA65_verify _ %+ BORINGSSL_PREFIX %+ _MLDSA65_verify %xdefine _MLKEM1024_decap _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_decap %xdefine _MLKEM1024_encap _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_encap %xdefine _MLKEM1024_generate_key _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_generate_key %xdefine _MLKEM1024_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_marshal_public_key %xdefine _MLKEM1024_parse_public_key _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_parse_public_key %xdefine _MLKEM1024_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_private_key_from_seed %xdefine _MLKEM1024_public_from_private _ %+ BORINGSSL_PREFIX %+ _MLKEM1024_public_from_private %xdefine _MLKEM768_decap _ %+ BORINGSSL_PREFIX %+ _MLKEM768_decap %xdefine _MLKEM768_encap _ %+ BORINGSSL_PREFIX %+ _MLKEM768_encap %xdefine _MLKEM768_generate_key _ %+ BORINGSSL_PREFIX %+ _MLKEM768_generate_key %xdefine _MLKEM768_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _MLKEM768_marshal_public_key %xdefine _MLKEM768_parse_public_key _ %+ BORINGSSL_PREFIX %+ _MLKEM768_parse_public_key %xdefine _MLKEM768_private_key_from_seed _ %+ BORINGSSL_PREFIX %+ _MLKEM768_private_key_from_seed %xdefine _MLKEM768_public_from_private _ %+ BORINGSSL_PREFIX %+ _MLKEM768_public_from_private %xdefine _NAME_CONSTRAINTS_check _ %+ BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_check %xdefine _NAME_CONSTRAINTS_free _ %+ BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_free %xdefine _NAME_CONSTRAINTS_it _ %+ BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_it %xdefine _NAME_CONSTRAINTS_new _ %+ BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_new %xdefine _NCONF_free _ %+ BORINGSSL_PREFIX %+ _NCONF_free %xdefine _NCONF_get_section _ %+ BORINGSSL_PREFIX %+ _NCONF_get_section %xdefine _NCONF_get_string _ %+ BORINGSSL_PREFIX %+ _NCONF_get_string %xdefine _NCONF_load _ %+ BORINGSSL_PREFIX %+ _NCONF_load %xdefine _NCONF_load_bio _ %+ BORINGSSL_PREFIX %+ _NCONF_load_bio %xdefine _NCONF_new _ %+ BORINGSSL_PREFIX %+ _NCONF_new %xdefine _NETSCAPE_SPKAC_free _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_free %xdefine _NETSCAPE_SPKAC_it _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_it %xdefine _NETSCAPE_SPKAC_new _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_new %xdefine _NETSCAPE_SPKI_b64_decode _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_b64_decode %xdefine _NETSCAPE_SPKI_b64_encode _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_b64_encode %xdefine _NETSCAPE_SPKI_free _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_free %xdefine _NETSCAPE_SPKI_get_pubkey _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_get_pubkey %xdefine _NETSCAPE_SPKI_it _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_it %xdefine _NETSCAPE_SPKI_new _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_new %xdefine _NETSCAPE_SPKI_set_pubkey _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_set_pubkey %xdefine _NETSCAPE_SPKI_sign _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_sign %xdefine _NETSCAPE_SPKI_verify _ %+ BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_verify %xdefine _NOTICEREF_free _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_free %xdefine _NOTICEREF_it _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_it %xdefine _NOTICEREF_new _ %+ BORINGSSL_PREFIX %+ _NOTICEREF_new %xdefine _OBJ_cbs2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_cbs2nid %xdefine _OBJ_cleanup _ %+ BORINGSSL_PREFIX %+ _OBJ_cleanup %xdefine _OBJ_cmp _ %+ BORINGSSL_PREFIX %+ _OBJ_cmp %xdefine _OBJ_create _ %+ BORINGSSL_PREFIX %+ _OBJ_create %xdefine _OBJ_dup _ %+ BORINGSSL_PREFIX %+ _OBJ_dup %xdefine _OBJ_find_sigid_algs _ %+ BORINGSSL_PREFIX %+ _OBJ_find_sigid_algs %xdefine _OBJ_find_sigid_by_algs _ %+ BORINGSSL_PREFIX %+ _OBJ_find_sigid_by_algs %xdefine _OBJ_get0_data _ %+ BORINGSSL_PREFIX %+ _OBJ_get0_data %xdefine _OBJ_get_undef _ %+ BORINGSSL_PREFIX %+ _OBJ_get_undef %xdefine _OBJ_length _ %+ BORINGSSL_PREFIX %+ _OBJ_length %xdefine _OBJ_ln2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_ln2nid %xdefine _OBJ_nid2cbb _ %+ BORINGSSL_PREFIX %+ _OBJ_nid2cbb %xdefine _OBJ_nid2ln _ %+ BORINGSSL_PREFIX %+ _OBJ_nid2ln %xdefine _OBJ_nid2obj _ %+ BORINGSSL_PREFIX %+ _OBJ_nid2obj %xdefine _OBJ_nid2sn _ %+ BORINGSSL_PREFIX %+ _OBJ_nid2sn %xdefine _OBJ_obj2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_obj2nid %xdefine _OBJ_obj2txt _ %+ BORINGSSL_PREFIX %+ _OBJ_obj2txt %xdefine _OBJ_sn2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_sn2nid %xdefine _OBJ_txt2nid _ %+ BORINGSSL_PREFIX %+ _OBJ_txt2nid %xdefine _OBJ_txt2obj _ %+ BORINGSSL_PREFIX %+ _OBJ_txt2obj %xdefine _OPENSSL_add_all_algorithms_conf _ %+ BORINGSSL_PREFIX %+ _OPENSSL_add_all_algorithms_conf %xdefine _OPENSSL_armcap_P _ %+ BORINGSSL_PREFIX %+ _OPENSSL_armcap_P %xdefine _OPENSSL_asprintf _ %+ BORINGSSL_PREFIX %+ _OPENSSL_asprintf %xdefine _OPENSSL_calloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_calloc %xdefine _OPENSSL_cleanse _ %+ BORINGSSL_PREFIX %+ _OPENSSL_cleanse %xdefine _OPENSSL_cleanup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_cleanup %xdefine _OPENSSL_clear_free _ %+ BORINGSSL_PREFIX %+ _OPENSSL_clear_free %xdefine _OPENSSL_config _ %+ BORINGSSL_PREFIX %+ _OPENSSL_config %xdefine _OPENSSL_cpuid_setup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_cpuid_setup %xdefine _OPENSSL_free _ %+ BORINGSSL_PREFIX %+ _OPENSSL_free %xdefine _OPENSSL_fromxdigit _ %+ BORINGSSL_PREFIX %+ _OPENSSL_fromxdigit %xdefine _OPENSSL_get_armcap _ %+ BORINGSSL_PREFIX %+ _OPENSSL_get_armcap %xdefine _OPENSSL_get_armcap_pointer_for_test _ %+ BORINGSSL_PREFIX %+ _OPENSSL_get_armcap_pointer_for_test %xdefine _OPENSSL_get_ia32cap _ %+ BORINGSSL_PREFIX %+ _OPENSSL_get_ia32cap %xdefine _OPENSSL_gmtime _ %+ BORINGSSL_PREFIX %+ _OPENSSL_gmtime %xdefine _OPENSSL_gmtime_adj _ %+ BORINGSSL_PREFIX %+ _OPENSSL_gmtime_adj %xdefine _OPENSSL_gmtime_diff _ %+ BORINGSSL_PREFIX %+ _OPENSSL_gmtime_diff %xdefine _OPENSSL_hash32 _ %+ BORINGSSL_PREFIX %+ _OPENSSL_hash32 %xdefine _OPENSSL_ia32cap_P _ %+ BORINGSSL_PREFIX %+ _OPENSSL_ia32cap_P %xdefine _OPENSSL_init_cpuid _ %+ BORINGSSL_PREFIX %+ _OPENSSL_init_cpuid %xdefine _OPENSSL_init_crypto _ %+ BORINGSSL_PREFIX %+ _OPENSSL_init_crypto %xdefine _OPENSSL_init_ssl _ %+ BORINGSSL_PREFIX %+ _OPENSSL_init_ssl %xdefine _OPENSSL_isalnum _ %+ BORINGSSL_PREFIX %+ _OPENSSL_isalnum %xdefine _OPENSSL_isalpha _ %+ BORINGSSL_PREFIX %+ _OPENSSL_isalpha %xdefine _OPENSSL_isdigit _ %+ BORINGSSL_PREFIX %+ _OPENSSL_isdigit %xdefine _OPENSSL_isspace _ %+ BORINGSSL_PREFIX %+ _OPENSSL_isspace %xdefine _OPENSSL_isxdigit _ %+ BORINGSSL_PREFIX %+ _OPENSSL_isxdigit %xdefine _OPENSSL_lh_delete _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_delete %xdefine _OPENSSL_lh_doall_arg _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_doall_arg %xdefine _OPENSSL_lh_free _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_free %xdefine _OPENSSL_lh_insert _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_insert %xdefine _OPENSSL_lh_new _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_new %xdefine _OPENSSL_lh_num_items _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_num_items %xdefine _OPENSSL_lh_retrieve _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_retrieve %xdefine _OPENSSL_lh_retrieve_key _ %+ BORINGSSL_PREFIX %+ _OPENSSL_lh_retrieve_key %xdefine _OPENSSL_load_builtin_modules _ %+ BORINGSSL_PREFIX %+ _OPENSSL_load_builtin_modules %xdefine _OPENSSL_malloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_malloc %xdefine _OPENSSL_malloc_init _ %+ BORINGSSL_PREFIX %+ _OPENSSL_malloc_init %xdefine _OPENSSL_memdup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_memdup %xdefine _OPENSSL_no_config _ %+ BORINGSSL_PREFIX %+ _OPENSSL_no_config %xdefine _OPENSSL_posix_to_tm _ %+ BORINGSSL_PREFIX %+ _OPENSSL_posix_to_tm %xdefine _OPENSSL_realloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_realloc %xdefine _OPENSSL_secure_clear_free _ %+ BORINGSSL_PREFIX %+ _OPENSSL_secure_clear_free %xdefine _OPENSSL_secure_malloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_secure_malloc %xdefine _OPENSSL_sk_deep_copy _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_deep_copy %xdefine _OPENSSL_sk_delete _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_delete %xdefine _OPENSSL_sk_delete_if _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_delete_if %xdefine _OPENSSL_sk_delete_ptr _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_delete_ptr %xdefine _OPENSSL_sk_dup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_dup %xdefine _OPENSSL_sk_find _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_find %xdefine _OPENSSL_sk_free _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_free %xdefine _OPENSSL_sk_insert _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_insert %xdefine _OPENSSL_sk_is_sorted _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_is_sorted %xdefine _OPENSSL_sk_new _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_new %xdefine _OPENSSL_sk_new_null _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_new_null %xdefine _OPENSSL_sk_num _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_num %xdefine _OPENSSL_sk_pop _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_pop %xdefine _OPENSSL_sk_pop_free_ex _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_pop_free_ex %xdefine _OPENSSL_sk_push _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_push %xdefine _OPENSSL_sk_set _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_set %xdefine _OPENSSL_sk_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_set_cmp_func %xdefine _OPENSSL_sk_shift _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_shift %xdefine _OPENSSL_sk_sort _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_sort %xdefine _OPENSSL_sk_value _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_value %xdefine _OPENSSL_sk_zero _ %+ BORINGSSL_PREFIX %+ _OPENSSL_sk_zero %xdefine _OPENSSL_strcasecmp _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strcasecmp %xdefine _OPENSSL_strdup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strdup %xdefine _OPENSSL_strhash _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strhash %xdefine _OPENSSL_strlcat _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strlcat %xdefine _OPENSSL_strlcpy _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strlcpy %xdefine _OPENSSL_strncasecmp _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strncasecmp %xdefine _OPENSSL_strndup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strndup %xdefine _OPENSSL_strnlen _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strnlen %xdefine _OPENSSL_timegm _ %+ BORINGSSL_PREFIX %+ _OPENSSL_timegm %xdefine _OPENSSL_tm_to_posix _ %+ BORINGSSL_PREFIX %+ _OPENSSL_tm_to_posix %xdefine _OPENSSL_tolower _ %+ BORINGSSL_PREFIX %+ _OPENSSL_tolower %xdefine _OPENSSL_vasprintf _ %+ BORINGSSL_PREFIX %+ _OPENSSL_vasprintf %xdefine _OPENSSL_vasprintf_internal _ %+ BORINGSSL_PREFIX %+ _OPENSSL_vasprintf_internal %xdefine _OPENSSL_zalloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_zalloc %xdefine _OTHERNAME_free _ %+ BORINGSSL_PREFIX %+ _OTHERNAME_free %xdefine _OTHERNAME_new _ %+ BORINGSSL_PREFIX %+ _OTHERNAME_new %xdefine _OpenSSL_add_all_algorithms _ %+ BORINGSSL_PREFIX %+ _OpenSSL_add_all_algorithms %xdefine _OpenSSL_add_all_ciphers _ %+ BORINGSSL_PREFIX %+ _OpenSSL_add_all_ciphers %xdefine _OpenSSL_add_all_digests _ %+ BORINGSSL_PREFIX %+ _OpenSSL_add_all_digests %xdefine _OpenSSL_version _ %+ BORINGSSL_PREFIX %+ _OpenSSL_version %xdefine _OpenSSL_version_num _ %+ BORINGSSL_PREFIX %+ _OpenSSL_version_num %xdefine _PEM_ASN1_read _ %+ BORINGSSL_PREFIX %+ _PEM_ASN1_read %xdefine _PEM_ASN1_read_bio _ %+ BORINGSSL_PREFIX %+ _PEM_ASN1_read_bio %xdefine _PEM_ASN1_write _ %+ BORINGSSL_PREFIX %+ _PEM_ASN1_write %xdefine _PEM_ASN1_write_bio _ %+ BORINGSSL_PREFIX %+ _PEM_ASN1_write_bio %xdefine _PEM_X509_INFO_read _ %+ BORINGSSL_PREFIX %+ _PEM_X509_INFO_read %xdefine _PEM_X509_INFO_read_bio _ %+ BORINGSSL_PREFIX %+ _PEM_X509_INFO_read_bio %xdefine _PEM_bytes_read_bio _ %+ BORINGSSL_PREFIX %+ _PEM_bytes_read_bio %xdefine _PEM_def_callback _ %+ BORINGSSL_PREFIX %+ _PEM_def_callback %xdefine _PEM_do_header _ %+ BORINGSSL_PREFIX %+ _PEM_do_header %xdefine _PEM_get_EVP_CIPHER_INFO _ %+ BORINGSSL_PREFIX %+ _PEM_get_EVP_CIPHER_INFO %xdefine _PEM_read _ %+ BORINGSSL_PREFIX %+ _PEM_read %xdefine _PEM_read_DHparams _ %+ BORINGSSL_PREFIX %+ _PEM_read_DHparams %xdefine _PEM_read_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_DSAPrivateKey %xdefine _PEM_read_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_DSA_PUBKEY %xdefine _PEM_read_DSAparams _ %+ BORINGSSL_PREFIX %+ _PEM_read_DSAparams %xdefine _PEM_read_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_ECPrivateKey %xdefine _PEM_read_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_EC_PUBKEY %xdefine _PEM_read_PKCS7 _ %+ BORINGSSL_PREFIX %+ _PEM_read_PKCS7 %xdefine _PEM_read_PKCS8 _ %+ BORINGSSL_PREFIX %+ _PEM_read_PKCS8 %xdefine _PEM_read_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _PEM_read_PKCS8_PRIV_KEY_INFO %xdefine _PEM_read_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_PUBKEY %xdefine _PEM_read_PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_PrivateKey %xdefine _PEM_read_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_RSAPrivateKey %xdefine _PEM_read_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_RSAPublicKey %xdefine _PEM_read_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_RSA_PUBKEY %xdefine _PEM_read_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _PEM_read_SSL_SESSION %xdefine _PEM_read_X509 _ %+ BORINGSSL_PREFIX %+ _PEM_read_X509 %xdefine _PEM_read_X509_AUX _ %+ BORINGSSL_PREFIX %+ _PEM_read_X509_AUX %xdefine _PEM_read_X509_CRL _ %+ BORINGSSL_PREFIX %+ _PEM_read_X509_CRL %xdefine _PEM_read_X509_REQ _ %+ BORINGSSL_PREFIX %+ _PEM_read_X509_REQ %xdefine _PEM_read_bio _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio %xdefine _PEM_read_bio_DHparams _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_DHparams %xdefine _PEM_read_bio_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_DSAPrivateKey %xdefine _PEM_read_bio_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_DSA_PUBKEY %xdefine _PEM_read_bio_DSAparams _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_DSAparams %xdefine _PEM_read_bio_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_ECPrivateKey %xdefine _PEM_read_bio_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_EC_PUBKEY %xdefine _PEM_read_bio_PKCS7 _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS7 %xdefine _PEM_read_bio_PKCS8 _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS8 %xdefine _PEM_read_bio_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS8_PRIV_KEY_INFO %xdefine _PEM_read_bio_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_PUBKEY %xdefine _PEM_read_bio_PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_PrivateKey %xdefine _PEM_read_bio_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_RSAPrivateKey %xdefine _PEM_read_bio_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_RSAPublicKey %xdefine _PEM_read_bio_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_RSA_PUBKEY %xdefine _PEM_read_bio_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_SSL_SESSION %xdefine _PEM_read_bio_X509 _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_X509 %xdefine _PEM_read_bio_X509_AUX _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_X509_AUX %xdefine _PEM_read_bio_X509_CRL _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_X509_CRL %xdefine _PEM_read_bio_X509_REQ _ %+ BORINGSSL_PREFIX %+ _PEM_read_bio_X509_REQ %xdefine _PEM_write _ %+ BORINGSSL_PREFIX %+ _PEM_write %xdefine _PEM_write_DHparams _ %+ BORINGSSL_PREFIX %+ _PEM_write_DHparams %xdefine _PEM_write_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_DSAPrivateKey %xdefine _PEM_write_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_DSA_PUBKEY %xdefine _PEM_write_DSAparams _ %+ BORINGSSL_PREFIX %+ _PEM_write_DSAparams %xdefine _PEM_write_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_ECPrivateKey %xdefine _PEM_write_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_EC_PUBKEY %xdefine _PEM_write_PKCS7 _ %+ BORINGSSL_PREFIX %+ _PEM_write_PKCS7 %xdefine _PEM_write_PKCS8 _ %+ BORINGSSL_PREFIX %+ _PEM_write_PKCS8 %xdefine _PEM_write_PKCS8PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_PKCS8PrivateKey %xdefine _PEM_write_PKCS8PrivateKey_nid _ %+ BORINGSSL_PREFIX %+ _PEM_write_PKCS8PrivateKey_nid %xdefine _PEM_write_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _PEM_write_PKCS8_PRIV_KEY_INFO %xdefine _PEM_write_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_PUBKEY %xdefine _PEM_write_PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_PrivateKey %xdefine _PEM_write_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_RSAPrivateKey %xdefine _PEM_write_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_RSAPublicKey %xdefine _PEM_write_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_RSA_PUBKEY %xdefine _PEM_write_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _PEM_write_SSL_SESSION %xdefine _PEM_write_X509 _ %+ BORINGSSL_PREFIX %+ _PEM_write_X509 %xdefine _PEM_write_X509_AUX _ %+ BORINGSSL_PREFIX %+ _PEM_write_X509_AUX %xdefine _PEM_write_X509_CRL _ %+ BORINGSSL_PREFIX %+ _PEM_write_X509_CRL %xdefine _PEM_write_X509_REQ _ %+ BORINGSSL_PREFIX %+ _PEM_write_X509_REQ %xdefine _PEM_write_X509_REQ_NEW _ %+ BORINGSSL_PREFIX %+ _PEM_write_X509_REQ_NEW %xdefine _PEM_write_bio _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio %xdefine _PEM_write_bio_DHparams _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_DHparams %xdefine _PEM_write_bio_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_DSAPrivateKey %xdefine _PEM_write_bio_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_DSA_PUBKEY %xdefine _PEM_write_bio_DSAparams _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_DSAparams %xdefine _PEM_write_bio_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_ECPrivateKey %xdefine _PEM_write_bio_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_EC_PUBKEY %xdefine _PEM_write_bio_PKCS7 _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS7 %xdefine _PEM_write_bio_PKCS8 _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8 %xdefine _PEM_write_bio_PKCS8PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8PrivateKey %xdefine _PEM_write_bio_PKCS8PrivateKey_nid _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8PrivateKey_nid %xdefine _PEM_write_bio_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8_PRIV_KEY_INFO %xdefine _PEM_write_bio_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PUBKEY %xdefine _PEM_write_bio_PrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_PrivateKey %xdefine _PEM_write_bio_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_RSAPrivateKey %xdefine _PEM_write_bio_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_RSAPublicKey %xdefine _PEM_write_bio_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_RSA_PUBKEY %xdefine _PEM_write_bio_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_SSL_SESSION %xdefine _PEM_write_bio_X509 _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_X509 %xdefine _PEM_write_bio_X509_AUX _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_X509_AUX %xdefine _PEM_write_bio_X509_CRL _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_X509_CRL %xdefine _PEM_write_bio_X509_REQ _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_X509_REQ %xdefine _PEM_write_bio_X509_REQ_NEW _ %+ BORINGSSL_PREFIX %+ _PEM_write_bio_X509_REQ_NEW %xdefine _PKCS12_PBE_add _ %+ BORINGSSL_PREFIX %+ _PKCS12_PBE_add %xdefine _PKCS12_create _ %+ BORINGSSL_PREFIX %+ _PKCS12_create %xdefine _PKCS12_free _ %+ BORINGSSL_PREFIX %+ _PKCS12_free %xdefine _PKCS12_get_key_and_certs _ %+ BORINGSSL_PREFIX %+ _PKCS12_get_key_and_certs %xdefine _PKCS12_parse _ %+ BORINGSSL_PREFIX %+ _PKCS12_parse %xdefine _PKCS12_verify_mac _ %+ BORINGSSL_PREFIX %+ _PKCS12_verify_mac %xdefine _PKCS1_MGF1 _ %+ BORINGSSL_PREFIX %+ _PKCS1_MGF1 %xdefine _PKCS5_PBKDF2_HMAC _ %+ BORINGSSL_PREFIX %+ _PKCS5_PBKDF2_HMAC %xdefine _PKCS5_PBKDF2_HMAC_SHA1 _ %+ BORINGSSL_PREFIX %+ _PKCS5_PBKDF2_HMAC_SHA1 %xdefine _PKCS5_pbe2_decrypt_init _ %+ BORINGSSL_PREFIX %+ _PKCS5_pbe2_decrypt_init %xdefine _PKCS5_pbe2_encrypt_init _ %+ BORINGSSL_PREFIX %+ _PKCS5_pbe2_encrypt_init %xdefine _PKCS7_bundle_CRLs _ %+ BORINGSSL_PREFIX %+ _PKCS7_bundle_CRLs %xdefine _PKCS7_bundle_certificates _ %+ BORINGSSL_PREFIX %+ _PKCS7_bundle_certificates %xdefine _PKCS7_bundle_raw_certificates _ %+ BORINGSSL_PREFIX %+ _PKCS7_bundle_raw_certificates %xdefine _PKCS7_free _ %+ BORINGSSL_PREFIX %+ _PKCS7_free %xdefine _PKCS7_get_CRLs _ %+ BORINGSSL_PREFIX %+ _PKCS7_get_CRLs %xdefine _PKCS7_get_PEM_CRLs _ %+ BORINGSSL_PREFIX %+ _PKCS7_get_PEM_CRLs %xdefine _PKCS7_get_PEM_certificates _ %+ BORINGSSL_PREFIX %+ _PKCS7_get_PEM_certificates %xdefine _PKCS7_get_certificates _ %+ BORINGSSL_PREFIX %+ _PKCS7_get_certificates %xdefine _PKCS7_get_raw_certificates _ %+ BORINGSSL_PREFIX %+ _PKCS7_get_raw_certificates %xdefine _PKCS7_sign _ %+ BORINGSSL_PREFIX %+ _PKCS7_sign %xdefine _PKCS7_type_is_data _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_data %xdefine _PKCS7_type_is_digest _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_digest %xdefine _PKCS7_type_is_encrypted _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_encrypted %xdefine _PKCS7_type_is_enveloped _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_enveloped %xdefine _PKCS7_type_is_signed _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_signed %xdefine _PKCS7_type_is_signedAndEnveloped _ %+ BORINGSSL_PREFIX %+ _PKCS7_type_is_signedAndEnveloped %xdefine _PKCS8_PRIV_KEY_INFO_free _ %+ BORINGSSL_PREFIX %+ _PKCS8_PRIV_KEY_INFO_free %xdefine _PKCS8_PRIV_KEY_INFO_new _ %+ BORINGSSL_PREFIX %+ _PKCS8_PRIV_KEY_INFO_new %xdefine _PKCS8_decrypt _ %+ BORINGSSL_PREFIX %+ _PKCS8_decrypt %xdefine _PKCS8_encrypt _ %+ BORINGSSL_PREFIX %+ _PKCS8_encrypt %xdefine _PKCS8_marshal_encrypted_private_key _ %+ BORINGSSL_PREFIX %+ _PKCS8_marshal_encrypted_private_key %xdefine _PKCS8_parse_encrypted_private_key _ %+ BORINGSSL_PREFIX %+ _PKCS8_parse_encrypted_private_key %xdefine _POLICYINFO_free _ %+ BORINGSSL_PREFIX %+ _POLICYINFO_free %xdefine _POLICYINFO_it _ %+ BORINGSSL_PREFIX %+ _POLICYINFO_it %xdefine _POLICYINFO_new _ %+ BORINGSSL_PREFIX %+ _POLICYINFO_new %xdefine _POLICYQUALINFO_free _ %+ BORINGSSL_PREFIX %+ _POLICYQUALINFO_free %xdefine _POLICYQUALINFO_it _ %+ BORINGSSL_PREFIX %+ _POLICYQUALINFO_it %xdefine _POLICYQUALINFO_new _ %+ BORINGSSL_PREFIX %+ _POLICYQUALINFO_new %xdefine _POLICY_CONSTRAINTS_free _ %+ BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_free %xdefine _POLICY_CONSTRAINTS_it _ %+ BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_it %xdefine _POLICY_CONSTRAINTS_new _ %+ BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_new %xdefine _POLICY_MAPPINGS_it _ %+ BORINGSSL_PREFIX %+ _POLICY_MAPPINGS_it %xdefine _POLICY_MAPPING_free _ %+ BORINGSSL_PREFIX %+ _POLICY_MAPPING_free %xdefine _POLICY_MAPPING_new _ %+ BORINGSSL_PREFIX %+ _POLICY_MAPPING_new %xdefine _RAND_OpenSSL _ %+ BORINGSSL_PREFIX %+ _RAND_OpenSSL %xdefine _RAND_SSLeay _ %+ BORINGSSL_PREFIX %+ _RAND_SSLeay %xdefine _RAND_add _ %+ BORINGSSL_PREFIX %+ _RAND_add %xdefine _RAND_bytes _ %+ BORINGSSL_PREFIX %+ _RAND_bytes %xdefine _RAND_cleanup _ %+ BORINGSSL_PREFIX %+ _RAND_cleanup %xdefine _RAND_disable_fork_unsafe_buffering _ %+ BORINGSSL_PREFIX %+ _RAND_disable_fork_unsafe_buffering %xdefine _RAND_egd _ %+ BORINGSSL_PREFIX %+ _RAND_egd %xdefine _RAND_enable_fork_unsafe_buffering _ %+ BORINGSSL_PREFIX %+ _RAND_enable_fork_unsafe_buffering %xdefine _RAND_file_name _ %+ BORINGSSL_PREFIX %+ _RAND_file_name %xdefine _RAND_get_rand_method _ %+ BORINGSSL_PREFIX %+ _RAND_get_rand_method %xdefine _RAND_get_system_entropy_for_custom_prng _ %+ BORINGSSL_PREFIX %+ _RAND_get_system_entropy_for_custom_prng %xdefine _RAND_load_file _ %+ BORINGSSL_PREFIX %+ _RAND_load_file %xdefine _RAND_poll _ %+ BORINGSSL_PREFIX %+ _RAND_poll %xdefine _RAND_pseudo_bytes _ %+ BORINGSSL_PREFIX %+ _RAND_pseudo_bytes %xdefine _RAND_seed _ %+ BORINGSSL_PREFIX %+ _RAND_seed %xdefine _RAND_set_rand_method _ %+ BORINGSSL_PREFIX %+ _RAND_set_rand_method %xdefine _RAND_status _ %+ BORINGSSL_PREFIX %+ _RAND_status %xdefine _RC4 _ %+ BORINGSSL_PREFIX %+ _RC4 %xdefine _RC4_set_key _ %+ BORINGSSL_PREFIX %+ _RC4_set_key %xdefine _RSAPrivateKey_dup _ %+ BORINGSSL_PREFIX %+ _RSAPrivateKey_dup %xdefine _RSAPublicKey_dup _ %+ BORINGSSL_PREFIX %+ _RSAPublicKey_dup %xdefine _RSAZ_1024_mod_exp_avx2 _ %+ BORINGSSL_PREFIX %+ _RSAZ_1024_mod_exp_avx2 %xdefine _RSA_PSS_PARAMS_free _ %+ BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_free %xdefine _RSA_PSS_PARAMS_it _ %+ BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_it %xdefine _RSA_PSS_PARAMS_new _ %+ BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_new %xdefine _RSA_add_pkcs1_prefix _ %+ BORINGSSL_PREFIX %+ _RSA_add_pkcs1_prefix %xdefine _RSA_bits _ %+ BORINGSSL_PREFIX %+ _RSA_bits %xdefine _RSA_blinding_off _ %+ BORINGSSL_PREFIX %+ _RSA_blinding_off %xdefine _RSA_blinding_on _ %+ BORINGSSL_PREFIX %+ _RSA_blinding_on %xdefine _RSA_check_fips _ %+ BORINGSSL_PREFIX %+ _RSA_check_fips %xdefine _RSA_check_key _ %+ BORINGSSL_PREFIX %+ _RSA_check_key %xdefine _RSA_decrypt _ %+ BORINGSSL_PREFIX %+ _RSA_decrypt %xdefine _RSA_default_method _ %+ BORINGSSL_PREFIX %+ _RSA_default_method %xdefine _RSA_encrypt _ %+ BORINGSSL_PREFIX %+ _RSA_encrypt %xdefine _RSA_flags _ %+ BORINGSSL_PREFIX %+ _RSA_flags %xdefine _RSA_free _ %+ BORINGSSL_PREFIX %+ _RSA_free %xdefine _RSA_generate_key_ex _ %+ BORINGSSL_PREFIX %+ _RSA_generate_key_ex %xdefine _RSA_generate_key_fips _ %+ BORINGSSL_PREFIX %+ _RSA_generate_key_fips %xdefine _RSA_get0_crt_params _ %+ BORINGSSL_PREFIX %+ _RSA_get0_crt_params %xdefine _RSA_get0_d _ %+ BORINGSSL_PREFIX %+ _RSA_get0_d %xdefine _RSA_get0_dmp1 _ %+ BORINGSSL_PREFIX %+ _RSA_get0_dmp1 %xdefine _RSA_get0_dmq1 _ %+ BORINGSSL_PREFIX %+ _RSA_get0_dmq1 %xdefine _RSA_get0_e _ %+ BORINGSSL_PREFIX %+ _RSA_get0_e %xdefine _RSA_get0_factors _ %+ BORINGSSL_PREFIX %+ _RSA_get0_factors %xdefine _RSA_get0_iqmp _ %+ BORINGSSL_PREFIX %+ _RSA_get0_iqmp %xdefine _RSA_get0_key _ %+ BORINGSSL_PREFIX %+ _RSA_get0_key %xdefine _RSA_get0_n _ %+ BORINGSSL_PREFIX %+ _RSA_get0_n %xdefine _RSA_get0_p _ %+ BORINGSSL_PREFIX %+ _RSA_get0_p %xdefine _RSA_get0_pss_params _ %+ BORINGSSL_PREFIX %+ _RSA_get0_pss_params %xdefine _RSA_get0_q _ %+ BORINGSSL_PREFIX %+ _RSA_get0_q %xdefine _RSA_get_ex_data _ %+ BORINGSSL_PREFIX %+ _RSA_get_ex_data %xdefine _RSA_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _RSA_get_ex_new_index %xdefine _RSA_is_opaque _ %+ BORINGSSL_PREFIX %+ _RSA_is_opaque %xdefine _RSA_marshal_private_key _ %+ BORINGSSL_PREFIX %+ _RSA_marshal_private_key %xdefine _RSA_marshal_public_key _ %+ BORINGSSL_PREFIX %+ _RSA_marshal_public_key %xdefine _RSA_new _ %+ BORINGSSL_PREFIX %+ _RSA_new %xdefine _RSA_new_method _ %+ BORINGSSL_PREFIX %+ _RSA_new_method %xdefine _RSA_new_method_no_e _ %+ BORINGSSL_PREFIX %+ _RSA_new_method_no_e %xdefine _RSA_new_private_key _ %+ BORINGSSL_PREFIX %+ _RSA_new_private_key %xdefine _RSA_new_private_key_large_e _ %+ BORINGSSL_PREFIX %+ _RSA_new_private_key_large_e %xdefine _RSA_new_private_key_no_crt _ %+ BORINGSSL_PREFIX %+ _RSA_new_private_key_no_crt %xdefine _RSA_new_private_key_no_e _ %+ BORINGSSL_PREFIX %+ _RSA_new_private_key_no_e %xdefine _RSA_new_public_key _ %+ BORINGSSL_PREFIX %+ _RSA_new_public_key %xdefine _RSA_new_public_key_large_e _ %+ BORINGSSL_PREFIX %+ _RSA_new_public_key_large_e %xdefine _RSA_padding_add_PKCS1_OAEP_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_OAEP_mgf1 %xdefine _RSA_padding_add_PKCS1_PSS_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_PSS_mgf1 %xdefine _RSA_padding_add_PKCS1_type_1 _ %+ BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_type_1 %xdefine _RSA_padding_add_none _ %+ BORINGSSL_PREFIX %+ _RSA_padding_add_none %xdefine _RSA_padding_check_PKCS1_OAEP_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_padding_check_PKCS1_OAEP_mgf1 %xdefine _RSA_padding_check_PKCS1_type_1 _ %+ BORINGSSL_PREFIX %+ _RSA_padding_check_PKCS1_type_1 %xdefine _RSA_parse_private_key _ %+ BORINGSSL_PREFIX %+ _RSA_parse_private_key %xdefine _RSA_parse_public_key _ %+ BORINGSSL_PREFIX %+ _RSA_parse_public_key %xdefine _RSA_print _ %+ BORINGSSL_PREFIX %+ _RSA_print %xdefine _RSA_private_decrypt _ %+ BORINGSSL_PREFIX %+ _RSA_private_decrypt %xdefine _RSA_private_encrypt _ %+ BORINGSSL_PREFIX %+ _RSA_private_encrypt %xdefine _RSA_private_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _RSA_private_key_from_bytes %xdefine _RSA_private_key_to_bytes _ %+ BORINGSSL_PREFIX %+ _RSA_private_key_to_bytes %xdefine _RSA_public_decrypt _ %+ BORINGSSL_PREFIX %+ _RSA_public_decrypt %xdefine _RSA_public_encrypt _ %+ BORINGSSL_PREFIX %+ _RSA_public_encrypt %xdefine _RSA_public_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _RSA_public_key_from_bytes %xdefine _RSA_public_key_to_bytes _ %+ BORINGSSL_PREFIX %+ _RSA_public_key_to_bytes %xdefine _RSA_set0_crt_params _ %+ BORINGSSL_PREFIX %+ _RSA_set0_crt_params %xdefine _RSA_set0_factors _ %+ BORINGSSL_PREFIX %+ _RSA_set0_factors %xdefine _RSA_set0_key _ %+ BORINGSSL_PREFIX %+ _RSA_set0_key %xdefine _RSA_set_ex_data _ %+ BORINGSSL_PREFIX %+ _RSA_set_ex_data %xdefine _RSA_sign _ %+ BORINGSSL_PREFIX %+ _RSA_sign %xdefine _RSA_sign_pss_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_sign_pss_mgf1 %xdefine _RSA_sign_raw _ %+ BORINGSSL_PREFIX %+ _RSA_sign_raw %xdefine _RSA_size _ %+ BORINGSSL_PREFIX %+ _RSA_size %xdefine _RSA_test_flags _ %+ BORINGSSL_PREFIX %+ _RSA_test_flags %xdefine _RSA_up_ref _ %+ BORINGSSL_PREFIX %+ _RSA_up_ref %xdefine _RSA_verify _ %+ BORINGSSL_PREFIX %+ _RSA_verify %xdefine _RSA_verify_PKCS1_PSS_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_verify_PKCS1_PSS_mgf1 %xdefine _RSA_verify_pss_mgf1 _ %+ BORINGSSL_PREFIX %+ _RSA_verify_pss_mgf1 %xdefine _RSA_verify_raw _ %+ BORINGSSL_PREFIX %+ _RSA_verify_raw %xdefine _SHA1 _ %+ BORINGSSL_PREFIX %+ _SHA1 %xdefine _SHA1_Final _ %+ BORINGSSL_PREFIX %+ _SHA1_Final %xdefine _SHA1_Init _ %+ BORINGSSL_PREFIX %+ _SHA1_Init %xdefine _SHA1_Transform _ %+ BORINGSSL_PREFIX %+ _SHA1_Transform %xdefine _SHA1_Update _ %+ BORINGSSL_PREFIX %+ _SHA1_Update %xdefine _SHA224 _ %+ BORINGSSL_PREFIX %+ _SHA224 %xdefine _SHA224_Final _ %+ BORINGSSL_PREFIX %+ _SHA224_Final %xdefine _SHA224_Init _ %+ BORINGSSL_PREFIX %+ _SHA224_Init %xdefine _SHA224_Update _ %+ BORINGSSL_PREFIX %+ _SHA224_Update %xdefine _SHA256 _ %+ BORINGSSL_PREFIX %+ _SHA256 %xdefine _SHA256_Final _ %+ BORINGSSL_PREFIX %+ _SHA256_Final %xdefine _SHA256_Init _ %+ BORINGSSL_PREFIX %+ _SHA256_Init %xdefine _SHA256_Transform _ %+ BORINGSSL_PREFIX %+ _SHA256_Transform %xdefine _SHA256_TransformBlocks _ %+ BORINGSSL_PREFIX %+ _SHA256_TransformBlocks %xdefine _SHA256_Update _ %+ BORINGSSL_PREFIX %+ _SHA256_Update %xdefine _SHA384 _ %+ BORINGSSL_PREFIX %+ _SHA384 %xdefine _SHA384_Final _ %+ BORINGSSL_PREFIX %+ _SHA384_Final %xdefine _SHA384_Init _ %+ BORINGSSL_PREFIX %+ _SHA384_Init %xdefine _SHA384_Update _ %+ BORINGSSL_PREFIX %+ _SHA384_Update %xdefine _SHA512 _ %+ BORINGSSL_PREFIX %+ _SHA512 %xdefine _SHA512_256 _ %+ BORINGSSL_PREFIX %+ _SHA512_256 %xdefine _SHA512_256_Final _ %+ BORINGSSL_PREFIX %+ _SHA512_256_Final %xdefine _SHA512_256_Init _ %+ BORINGSSL_PREFIX %+ _SHA512_256_Init %xdefine _SHA512_256_Update _ %+ BORINGSSL_PREFIX %+ _SHA512_256_Update %xdefine _SHA512_Final _ %+ BORINGSSL_PREFIX %+ _SHA512_Final %xdefine _SHA512_Init _ %+ BORINGSSL_PREFIX %+ _SHA512_Init %xdefine _SHA512_Transform _ %+ BORINGSSL_PREFIX %+ _SHA512_Transform %xdefine _SHA512_Update _ %+ BORINGSSL_PREFIX %+ _SHA512_Update %xdefine _SIPHASH_24 _ %+ BORINGSSL_PREFIX %+ _SIPHASH_24 %xdefine _SLHDSA_SHA2_128S_generate_key _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_generate_key %xdefine _SLHDSA_SHA2_128S_prehash_sign _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_sign %xdefine _SLHDSA_SHA2_128S_prehash_verify _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_verify %xdefine _SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign %xdefine _SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify %xdefine _SLHDSA_SHA2_128S_public_from_private _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_public_from_private %xdefine _SLHDSA_SHA2_128S_sign _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_sign %xdefine _SLHDSA_SHA2_128S_verify _ %+ BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_verify %xdefine _SPAKE2_CTX_free _ %+ BORINGSSL_PREFIX %+ _SPAKE2_CTX_free %xdefine _SPAKE2_CTX_new _ %+ BORINGSSL_PREFIX %+ _SPAKE2_CTX_new %xdefine _SPAKE2_generate_msg _ %+ BORINGSSL_PREFIX %+ _SPAKE2_generate_msg %xdefine _SPAKE2_process_msg _ %+ BORINGSSL_PREFIX %+ _SPAKE2_process_msg %xdefine _SSL_CIPHER_description _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_description %xdefine _SSL_CIPHER_get_auth_nid _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_auth_nid %xdefine _SSL_CIPHER_get_bits _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_bits %xdefine _SSL_CIPHER_get_cipher_nid _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_cipher_nid %xdefine _SSL_CIPHER_get_digest_nid _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_digest_nid %xdefine _SSL_CIPHER_get_handshake_digest _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_handshake_digest %xdefine _SSL_CIPHER_get_id _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_id %xdefine _SSL_CIPHER_get_kx_name _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_kx_name %xdefine _SSL_CIPHER_get_kx_nid _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_kx_nid %xdefine _SSL_CIPHER_get_max_version _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_max_version %xdefine _SSL_CIPHER_get_min_version _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_min_version %xdefine _SSL_CIPHER_get_name _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_name %xdefine _SSL_CIPHER_get_prf_nid _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_prf_nid %xdefine _SSL_CIPHER_get_protocol_id _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_protocol_id %xdefine _SSL_CIPHER_get_version _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_get_version %xdefine _SSL_CIPHER_is_aead _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_is_aead %xdefine _SSL_CIPHER_is_block_cipher _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_is_block_cipher %xdefine _SSL_CIPHER_standard_name _ %+ BORINGSSL_PREFIX %+ _SSL_CIPHER_standard_name %xdefine _SSL_COMP_add_compression_method _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_add_compression_method %xdefine _SSL_COMP_free_compression_methods _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_free_compression_methods %xdefine _SSL_COMP_get0_name _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_get0_name %xdefine _SSL_COMP_get_compression_methods _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_get_compression_methods %xdefine _SSL_COMP_get_id _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_get_id %xdefine _SSL_COMP_get_name _ %+ BORINGSSL_PREFIX %+ _SSL_COMP_get_name %xdefine _SSL_CREDENTIAL_clear_must_match_issuer _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_clear_must_match_issuer %xdefine _SSL_CREDENTIAL_free _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_free %xdefine _SSL_CREDENTIAL_get_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_get_ex_data %xdefine _SSL_CREDENTIAL_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_get_ex_new_index %xdefine _SSL_CREDENTIAL_must_match_issuer _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_must_match_issuer %xdefine _SSL_CREDENTIAL_new_delegated _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_new_delegated %xdefine _SSL_CREDENTIAL_new_x509 _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_new_x509 %xdefine _SSL_CREDENTIAL_set1_cert_chain _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_cert_chain %xdefine _SSL_CREDENTIAL_set1_delegated_credential _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_delegated_credential %xdefine _SSL_CREDENTIAL_set1_ocsp_response _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_ocsp_response %xdefine _SSL_CREDENTIAL_set1_private_key _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_private_key %xdefine _SSL_CREDENTIAL_set1_signed_cert_timestamp_list _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_signed_cert_timestamp_list %xdefine _SSL_CREDENTIAL_set1_signing_algorithm_prefs _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_signing_algorithm_prefs %xdefine _SSL_CREDENTIAL_set_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_ex_data %xdefine _SSL_CREDENTIAL_set_must_match_issuer _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_must_match_issuer %xdefine _SSL_CREDENTIAL_set_private_key_method _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_private_key_method %xdefine _SSL_CREDENTIAL_up_ref _ %+ BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_up_ref %xdefine _SSL_CTX_add0_chain_cert _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add0_chain_cert %xdefine _SSL_CTX_add1_chain_cert _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add1_chain_cert %xdefine _SSL_CTX_add1_credential _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add1_credential %xdefine _SSL_CTX_add_cert_compression_alg _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add_cert_compression_alg %xdefine _SSL_CTX_add_client_CA _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add_client_CA %xdefine _SSL_CTX_add_extra_chain_cert _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add_extra_chain_cert %xdefine _SSL_CTX_add_session _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_add_session %xdefine _SSL_CTX_check_private_key _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_check_private_key %xdefine _SSL_CTX_cipher_in_group _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_cipher_in_group %xdefine _SSL_CTX_clear_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_clear_chain_certs %xdefine _SSL_CTX_clear_extra_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_clear_extra_chain_certs %xdefine _SSL_CTX_clear_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_clear_mode %xdefine _SSL_CTX_clear_options _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_clear_options %xdefine _SSL_CTX_enable_ocsp_stapling _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_enable_ocsp_stapling %xdefine _SSL_CTX_enable_signed_cert_timestamps _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_enable_signed_cert_timestamps %xdefine _SSL_CTX_enable_tls_channel_id _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_enable_tls_channel_id %xdefine _SSL_CTX_flush_sessions _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_flush_sessions %xdefine _SSL_CTX_free _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_free %xdefine _SSL_CTX_get0_certificate _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get0_certificate %xdefine _SSL_CTX_get0_chain _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get0_chain %xdefine _SSL_CTX_get0_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get0_chain_certs %xdefine _SSL_CTX_get0_param _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get0_param %xdefine _SSL_CTX_get0_privatekey _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get0_privatekey %xdefine _SSL_CTX_get_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_cert_store %xdefine _SSL_CTX_get_ciphers _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_ciphers %xdefine _SSL_CTX_get_client_CA_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_client_CA_list %xdefine _SSL_CTX_get_compliance_policy _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_compliance_policy %xdefine _SSL_CTX_get_default_passwd_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_default_passwd_cb %xdefine _SSL_CTX_get_default_passwd_cb_userdata _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_default_passwd_cb_userdata %xdefine _SSL_CTX_get_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_ex_data %xdefine _SSL_CTX_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_ex_new_index %xdefine _SSL_CTX_get_extra_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_extra_chain_certs %xdefine _SSL_CTX_get_info_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_info_callback %xdefine _SSL_CTX_get_keylog_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_keylog_callback %xdefine _SSL_CTX_get_max_cert_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_max_cert_list %xdefine _SSL_CTX_get_max_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_max_proto_version %xdefine _SSL_CTX_get_min_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_min_proto_version %xdefine _SSL_CTX_get_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_mode %xdefine _SSL_CTX_get_num_tickets _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_num_tickets %xdefine _SSL_CTX_get_options _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_options %xdefine _SSL_CTX_get_quiet_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_quiet_shutdown %xdefine _SSL_CTX_get_read_ahead _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_read_ahead %xdefine _SSL_CTX_get_session_cache_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_session_cache_mode %xdefine _SSL_CTX_get_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_timeout %xdefine _SSL_CTX_get_tlsext_ticket_keys _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_tlsext_ticket_keys %xdefine _SSL_CTX_get_verify_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_callback %xdefine _SSL_CTX_get_verify_depth _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_depth %xdefine _SSL_CTX_get_verify_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_mode %xdefine _SSL_CTX_load_verify_locations _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_load_verify_locations %xdefine _SSL_CTX_need_tmp_RSA _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_need_tmp_RSA %xdefine _SSL_CTX_new _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_new %xdefine _SSL_CTX_remove_session _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_remove_session %xdefine _SSL_CTX_sess_accept _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept %xdefine _SSL_CTX_sess_accept_good _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept_good %xdefine _SSL_CTX_sess_accept_renegotiate _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept_renegotiate %xdefine _SSL_CTX_sess_cache_full _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_cache_full %xdefine _SSL_CTX_sess_cb_hits _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_cb_hits %xdefine _SSL_CTX_sess_connect _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect %xdefine _SSL_CTX_sess_connect_good _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect_good %xdefine _SSL_CTX_sess_connect_renegotiate _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect_renegotiate %xdefine _SSL_CTX_sess_get_cache_size _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_cache_size %xdefine _SSL_CTX_sess_get_get_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_get_cb %xdefine _SSL_CTX_sess_get_new_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_new_cb %xdefine _SSL_CTX_sess_get_remove_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_remove_cb %xdefine _SSL_CTX_sess_hits _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_hits %xdefine _SSL_CTX_sess_misses _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_misses %xdefine _SSL_CTX_sess_number _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_number %xdefine _SSL_CTX_sess_set_cache_size _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_cache_size %xdefine _SSL_CTX_sess_set_get_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_get_cb %xdefine _SSL_CTX_sess_set_new_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_new_cb %xdefine _SSL_CTX_sess_set_remove_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_remove_cb %xdefine _SSL_CTX_sess_timeouts _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_sess_timeouts %xdefine _SSL_CTX_set0_buffer_pool _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set0_buffer_pool %xdefine _SSL_CTX_set0_chain _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set0_chain %xdefine _SSL_CTX_set0_client_CAs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set0_client_CAs %xdefine _SSL_CTX_set0_verify_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set0_verify_cert_store %xdefine _SSL_CTX_set1_chain _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_chain %xdefine _SSL_CTX_set1_curves _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_curves %xdefine _SSL_CTX_set1_curves_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_curves_list %xdefine _SSL_CTX_set1_ech_keys _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_ech_keys %xdefine _SSL_CTX_set1_group_ids _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_group_ids %xdefine _SSL_CTX_set1_groups _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_groups %xdefine _SSL_CTX_set1_groups_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_groups_list %xdefine _SSL_CTX_set1_param _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_param %xdefine _SSL_CTX_set1_sigalgs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_sigalgs %xdefine _SSL_CTX_set1_sigalgs_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_sigalgs_list %xdefine _SSL_CTX_set1_tls_channel_id _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_tls_channel_id %xdefine _SSL_CTX_set1_verify_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set1_verify_cert_store %xdefine _SSL_CTX_set_allow_unknown_alpn_protos _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_allow_unknown_alpn_protos %xdefine _SSL_CTX_set_alpn_protos _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_alpn_protos %xdefine _SSL_CTX_set_alpn_select_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_alpn_select_cb %xdefine _SSL_CTX_set_cert_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_cb %xdefine _SSL_CTX_set_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_store %xdefine _SSL_CTX_set_cert_verify_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_verify_callback %xdefine _SSL_CTX_set_chain_and_key _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_chain_and_key %xdefine _SSL_CTX_set_cipher_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_cipher_list %xdefine _SSL_CTX_set_client_CA_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_client_CA_list %xdefine _SSL_CTX_set_client_cert_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_client_cert_cb %xdefine _SSL_CTX_set_compliance_policy _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_compliance_policy %xdefine _SSL_CTX_set_current_time_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_current_time_cb %xdefine _SSL_CTX_set_custom_verify _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_custom_verify %xdefine _SSL_CTX_set_default_passwd_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_default_passwd_cb %xdefine _SSL_CTX_set_default_passwd_cb_userdata _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_default_passwd_cb_userdata %xdefine _SSL_CTX_set_default_verify_paths _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_default_verify_paths %xdefine _SSL_CTX_set_dos_protection_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_dos_protection_cb %xdefine _SSL_CTX_set_early_data_enabled _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_early_data_enabled %xdefine _SSL_CTX_set_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_ex_data %xdefine _SSL_CTX_set_false_start_allowed_without_alpn _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_false_start_allowed_without_alpn %xdefine _SSL_CTX_set_grease_enabled _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_grease_enabled %xdefine _SSL_CTX_set_info_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_info_callback %xdefine _SSL_CTX_set_keylog_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_keylog_callback %xdefine _SSL_CTX_set_max_cert_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_max_cert_list %xdefine _SSL_CTX_set_max_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_max_proto_version %xdefine _SSL_CTX_set_max_send_fragment _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_max_send_fragment %xdefine _SSL_CTX_set_min_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_min_proto_version %xdefine _SSL_CTX_set_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_mode %xdefine _SSL_CTX_set_msg_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_msg_callback %xdefine _SSL_CTX_set_msg_callback_arg _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_msg_callback_arg %xdefine _SSL_CTX_set_next_proto_select_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_next_proto_select_cb %xdefine _SSL_CTX_set_next_protos_advertised_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_next_protos_advertised_cb %xdefine _SSL_CTX_set_num_tickets _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_num_tickets %xdefine _SSL_CTX_set_ocsp_response _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_ocsp_response %xdefine _SSL_CTX_set_options _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_options %xdefine _SSL_CTX_set_permute_extensions _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_permute_extensions %xdefine _SSL_CTX_set_private_key_method _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_private_key_method %xdefine _SSL_CTX_set_psk_client_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_psk_client_callback %xdefine _SSL_CTX_set_psk_server_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_psk_server_callback %xdefine _SSL_CTX_set_purpose _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_purpose %xdefine _SSL_CTX_set_quic_method _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_quic_method %xdefine _SSL_CTX_set_quiet_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_quiet_shutdown %xdefine _SSL_CTX_set_read_ahead _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_read_ahead %xdefine _SSL_CTX_set_record_protocol_version _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_record_protocol_version %xdefine _SSL_CTX_set_retain_only_sha256_of_client_certs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_retain_only_sha256_of_client_certs %xdefine _SSL_CTX_set_reverify_on_resume _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_reverify_on_resume %xdefine _SSL_CTX_set_select_certificate_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_select_certificate_cb %xdefine _SSL_CTX_set_session_cache_mode _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_session_cache_mode %xdefine _SSL_CTX_set_session_id_context _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_session_id_context %xdefine _SSL_CTX_set_session_psk_dhe_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_session_psk_dhe_timeout %xdefine _SSL_CTX_set_signed_cert_timestamp_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_signed_cert_timestamp_list %xdefine _SSL_CTX_set_signing_algorithm_prefs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_signing_algorithm_prefs %xdefine _SSL_CTX_set_srtp_profiles _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_srtp_profiles %xdefine _SSL_CTX_set_strict_cipher_list _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_strict_cipher_list %xdefine _SSL_CTX_set_ticket_aead_method _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_ticket_aead_method %xdefine _SSL_CTX_set_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_timeout %xdefine _SSL_CTX_set_tls_channel_id_enabled _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tls_channel_id_enabled %xdefine _SSL_CTX_set_tlsext_servername_arg _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_servername_arg %xdefine _SSL_CTX_set_tlsext_servername_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_servername_callback %xdefine _SSL_CTX_set_tlsext_status_arg _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_status_arg %xdefine _SSL_CTX_set_tlsext_status_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_status_cb %xdefine _SSL_CTX_set_tlsext_ticket_key_cb _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_ticket_key_cb %xdefine _SSL_CTX_set_tlsext_ticket_keys _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_ticket_keys %xdefine _SSL_CTX_set_tlsext_use_srtp _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_use_srtp %xdefine _SSL_CTX_set_tmp_dh _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_dh %xdefine _SSL_CTX_set_tmp_dh_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_dh_callback %xdefine _SSL_CTX_set_tmp_ecdh _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_ecdh %xdefine _SSL_CTX_set_tmp_rsa _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_rsa %xdefine _SSL_CTX_set_tmp_rsa_callback _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_rsa_callback %xdefine _SSL_CTX_set_trust _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_trust %xdefine _SSL_CTX_set_verify _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_verify %xdefine _SSL_CTX_set_verify_algorithm_prefs _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_verify_algorithm_prefs %xdefine _SSL_CTX_set_verify_depth _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_set_verify_depth %xdefine _SSL_CTX_up_ref _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_up_ref %xdefine _SSL_CTX_use_PrivateKey _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey %xdefine _SSL_CTX_use_PrivateKey_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey_ASN1 %xdefine _SSL_CTX_use_PrivateKey_file _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey_file %xdefine _SSL_CTX_use_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey %xdefine _SSL_CTX_use_RSAPrivateKey_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey_ASN1 %xdefine _SSL_CTX_use_RSAPrivateKey_file _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey_file %xdefine _SSL_CTX_use_certificate _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate %xdefine _SSL_CTX_use_certificate_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_ASN1 %xdefine _SSL_CTX_use_certificate_chain_file _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_chain_file %xdefine _SSL_CTX_use_certificate_file _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_file %xdefine _SSL_CTX_use_psk_identity_hint _ %+ BORINGSSL_PREFIX %+ _SSL_CTX_use_psk_identity_hint %xdefine _SSL_ECH_KEYS_add _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_add %xdefine _SSL_ECH_KEYS_free _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_free %xdefine _SSL_ECH_KEYS_has_duplicate_config_id _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_has_duplicate_config_id %xdefine _SSL_ECH_KEYS_marshal_retry_configs _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_marshal_retry_configs %xdefine _SSL_ECH_KEYS_new _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_new %xdefine _SSL_ECH_KEYS_up_ref _ %+ BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_up_ref %xdefine _SSL_SESSION_copy_without_early_data _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_copy_without_early_data %xdefine _SSL_SESSION_early_data_capable _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_early_data_capable %xdefine _SSL_SESSION_free _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_free %xdefine _SSL_SESSION_from_bytes _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_from_bytes %xdefine _SSL_SESSION_get0_cipher _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_cipher %xdefine _SSL_SESSION_get0_id_context _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_id_context %xdefine _SSL_SESSION_get0_ocsp_response _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_ocsp_response %xdefine _SSL_SESSION_get0_peer _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer %xdefine _SSL_SESSION_get0_peer_certificates _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer_certificates %xdefine _SSL_SESSION_get0_peer_sha256 _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer_sha256 %xdefine _SSL_SESSION_get0_signed_cert_timestamp_list _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_signed_cert_timestamp_list %xdefine _SSL_SESSION_get0_ticket _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get0_ticket %xdefine _SSL_SESSION_get_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_ex_data %xdefine _SSL_SESSION_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_ex_new_index %xdefine _SSL_SESSION_get_id _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_id %xdefine _SSL_SESSION_get_master_key _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_master_key %xdefine _SSL_SESSION_get_protocol_version _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_protocol_version %xdefine _SSL_SESSION_get_ticket_lifetime_hint _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_ticket_lifetime_hint %xdefine _SSL_SESSION_get_time _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_time %xdefine _SSL_SESSION_get_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_timeout %xdefine _SSL_SESSION_get_version _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_get_version %xdefine _SSL_SESSION_has_peer_sha256 _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_has_peer_sha256 %xdefine _SSL_SESSION_has_ticket _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_has_ticket %xdefine _SSL_SESSION_is_resumable _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_is_resumable %xdefine _SSL_SESSION_new _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_new %xdefine _SSL_SESSION_set1_id _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set1_id %xdefine _SSL_SESSION_set1_id_context _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set1_id_context %xdefine _SSL_SESSION_set_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set_ex_data %xdefine _SSL_SESSION_set_protocol_version _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set_protocol_version %xdefine _SSL_SESSION_set_ticket _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set_ticket %xdefine _SSL_SESSION_set_time _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set_time %xdefine _SSL_SESSION_set_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_set_timeout %xdefine _SSL_SESSION_should_be_single_use _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_should_be_single_use %xdefine _SSL_SESSION_to_bytes _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_to_bytes %xdefine _SSL_SESSION_to_bytes_for_ticket _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_to_bytes_for_ticket %xdefine _SSL_SESSION_up_ref _ %+ BORINGSSL_PREFIX %+ _SSL_SESSION_up_ref %xdefine _SSL_accept _ %+ BORINGSSL_PREFIX %+ _SSL_accept %xdefine _SSL_add0_chain_cert _ %+ BORINGSSL_PREFIX %+ _SSL_add0_chain_cert %xdefine _SSL_add1_chain_cert _ %+ BORINGSSL_PREFIX %+ _SSL_add1_chain_cert %xdefine _SSL_add1_credential _ %+ BORINGSSL_PREFIX %+ _SSL_add1_credential %xdefine _SSL_add_application_settings _ %+ BORINGSSL_PREFIX %+ _SSL_add_application_settings %xdefine _SSL_add_bio_cert_subjects_to_stack _ %+ BORINGSSL_PREFIX %+ _SSL_add_bio_cert_subjects_to_stack %xdefine _SSL_add_client_CA _ %+ BORINGSSL_PREFIX %+ _SSL_add_client_CA %xdefine _SSL_add_file_cert_subjects_to_stack _ %+ BORINGSSL_PREFIX %+ _SSL_add_file_cert_subjects_to_stack %xdefine _SSL_alert_desc_string _ %+ BORINGSSL_PREFIX %+ _SSL_alert_desc_string %xdefine _SSL_alert_desc_string_long _ %+ BORINGSSL_PREFIX %+ _SSL_alert_desc_string_long %xdefine _SSL_alert_from_verify_result _ %+ BORINGSSL_PREFIX %+ _SSL_alert_from_verify_result %xdefine _SSL_alert_type_string _ %+ BORINGSSL_PREFIX %+ _SSL_alert_type_string %xdefine _SSL_alert_type_string_long _ %+ BORINGSSL_PREFIX %+ _SSL_alert_type_string_long %xdefine _SSL_cache_hit _ %+ BORINGSSL_PREFIX %+ _SSL_cache_hit %xdefine _SSL_can_release_private_key _ %+ BORINGSSL_PREFIX %+ _SSL_can_release_private_key %xdefine _SSL_certs_clear _ %+ BORINGSSL_PREFIX %+ _SSL_certs_clear %xdefine _SSL_check_private_key _ %+ BORINGSSL_PREFIX %+ _SSL_check_private_key %xdefine _SSL_clear _ %+ BORINGSSL_PREFIX %+ _SSL_clear %xdefine _SSL_clear_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_clear_chain_certs %xdefine _SSL_clear_mode _ %+ BORINGSSL_PREFIX %+ _SSL_clear_mode %xdefine _SSL_clear_options _ %+ BORINGSSL_PREFIX %+ _SSL_clear_options %xdefine _SSL_connect _ %+ BORINGSSL_PREFIX %+ _SSL_connect %xdefine _SSL_cutthrough_complete _ %+ BORINGSSL_PREFIX %+ _SSL_cutthrough_complete %xdefine _SSL_do_handshake _ %+ BORINGSSL_PREFIX %+ _SSL_do_handshake %xdefine _SSL_dup_CA_list _ %+ BORINGSSL_PREFIX %+ _SSL_dup_CA_list %xdefine _SSL_early_callback_ctx_extension_get _ %+ BORINGSSL_PREFIX %+ _SSL_early_callback_ctx_extension_get %xdefine _SSL_early_data_accepted _ %+ BORINGSSL_PREFIX %+ _SSL_early_data_accepted %xdefine _SSL_early_data_reason_string _ %+ BORINGSSL_PREFIX %+ _SSL_early_data_reason_string %xdefine _SSL_ech_accepted _ %+ BORINGSSL_PREFIX %+ _SSL_ech_accepted %xdefine _SSL_enable_ocsp_stapling _ %+ BORINGSSL_PREFIX %+ _SSL_enable_ocsp_stapling %xdefine _SSL_enable_signed_cert_timestamps _ %+ BORINGSSL_PREFIX %+ _SSL_enable_signed_cert_timestamps %xdefine _SSL_enable_tls_channel_id _ %+ BORINGSSL_PREFIX %+ _SSL_enable_tls_channel_id %xdefine _SSL_error_description _ %+ BORINGSSL_PREFIX %+ _SSL_error_description %xdefine _SSL_export_keying_material _ %+ BORINGSSL_PREFIX %+ _SSL_export_keying_material %xdefine _SSL_free _ %+ BORINGSSL_PREFIX %+ _SSL_free %xdefine _SSL_generate_key_block _ %+ BORINGSSL_PREFIX %+ _SSL_generate_key_block %xdefine _SSL_get0_alpn_selected _ %+ BORINGSSL_PREFIX %+ _SSL_get0_alpn_selected %xdefine _SSL_get0_certificate_types _ %+ BORINGSSL_PREFIX %+ _SSL_get0_certificate_types %xdefine _SSL_get0_chain _ %+ BORINGSSL_PREFIX %+ _SSL_get0_chain %xdefine _SSL_get0_chain_certs _ %+ BORINGSSL_PREFIX %+ _SSL_get0_chain_certs %xdefine _SSL_get0_ech_name_override _ %+ BORINGSSL_PREFIX %+ _SSL_get0_ech_name_override %xdefine _SSL_get0_ech_retry_configs _ %+ BORINGSSL_PREFIX %+ _SSL_get0_ech_retry_configs %xdefine _SSL_get0_next_proto_negotiated _ %+ BORINGSSL_PREFIX %+ _SSL_get0_next_proto_negotiated %xdefine _SSL_get0_ocsp_response _ %+ BORINGSSL_PREFIX %+ _SSL_get0_ocsp_response %xdefine _SSL_get0_param _ %+ BORINGSSL_PREFIX %+ _SSL_get0_param %xdefine _SSL_get0_peer_application_settings _ %+ BORINGSSL_PREFIX %+ _SSL_get0_peer_application_settings %xdefine _SSL_get0_peer_certificates _ %+ BORINGSSL_PREFIX %+ _SSL_get0_peer_certificates %xdefine _SSL_get0_peer_delegation_algorithms _ %+ BORINGSSL_PREFIX %+ _SSL_get0_peer_delegation_algorithms %xdefine _SSL_get0_peer_verify_algorithms _ %+ BORINGSSL_PREFIX %+ _SSL_get0_peer_verify_algorithms %xdefine _SSL_get0_selected_credential _ %+ BORINGSSL_PREFIX %+ _SSL_get0_selected_credential %xdefine _SSL_get0_server_requested_CAs _ %+ BORINGSSL_PREFIX %+ _SSL_get0_server_requested_CAs %xdefine _SSL_get0_session_id_context _ %+ BORINGSSL_PREFIX %+ _SSL_get0_session_id_context %xdefine _SSL_get0_signed_cert_timestamp_list _ %+ BORINGSSL_PREFIX %+ _SSL_get0_signed_cert_timestamp_list %xdefine _SSL_get1_session _ %+ BORINGSSL_PREFIX %+ _SSL_get1_session %xdefine _SSL_get_SSL_CTX _ %+ BORINGSSL_PREFIX %+ _SSL_get_SSL_CTX %xdefine _SSL_get_all_cipher_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_cipher_names %xdefine _SSL_get_all_curve_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_curve_names %xdefine _SSL_get_all_group_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_group_names %xdefine _SSL_get_all_signature_algorithm_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_signature_algorithm_names %xdefine _SSL_get_all_standard_cipher_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_standard_cipher_names %xdefine _SSL_get_all_version_names _ %+ BORINGSSL_PREFIX %+ _SSL_get_all_version_names %xdefine _SSL_get_certificate _ %+ BORINGSSL_PREFIX %+ _SSL_get_certificate %xdefine _SSL_get_cipher_by_value _ %+ BORINGSSL_PREFIX %+ _SSL_get_cipher_by_value %xdefine _SSL_get_cipher_list _ %+ BORINGSSL_PREFIX %+ _SSL_get_cipher_list %xdefine _SSL_get_ciphers _ %+ BORINGSSL_PREFIX %+ _SSL_get_ciphers %xdefine _SSL_get_client_CA_list _ %+ BORINGSSL_PREFIX %+ _SSL_get_client_CA_list %xdefine _SSL_get_client_random _ %+ BORINGSSL_PREFIX %+ _SSL_get_client_random %xdefine _SSL_get_compliance_policy _ %+ BORINGSSL_PREFIX %+ _SSL_get_compliance_policy %xdefine _SSL_get_current_cipher _ %+ BORINGSSL_PREFIX %+ _SSL_get_current_cipher %xdefine _SSL_get_current_compression _ %+ BORINGSSL_PREFIX %+ _SSL_get_current_compression %xdefine _SSL_get_current_expansion _ %+ BORINGSSL_PREFIX %+ _SSL_get_current_expansion %xdefine _SSL_get_curve_id _ %+ BORINGSSL_PREFIX %+ _SSL_get_curve_id %xdefine _SSL_get_curve_name _ %+ BORINGSSL_PREFIX %+ _SSL_get_curve_name %xdefine _SSL_get_default_timeout _ %+ BORINGSSL_PREFIX %+ _SSL_get_default_timeout %xdefine _SSL_get_early_data_reason _ %+ BORINGSSL_PREFIX %+ _SSL_get_early_data_reason %xdefine _SSL_get_error _ %+ BORINGSSL_PREFIX %+ _SSL_get_error %xdefine _SSL_get_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_get_ex_data %xdefine _SSL_get_ex_data_X509_STORE_CTX_idx _ %+ BORINGSSL_PREFIX %+ _SSL_get_ex_data_X509_STORE_CTX_idx %xdefine _SSL_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _SSL_get_ex_new_index %xdefine _SSL_get_extms_support _ %+ BORINGSSL_PREFIX %+ _SSL_get_extms_support %xdefine _SSL_get_fd _ %+ BORINGSSL_PREFIX %+ _SSL_get_fd %xdefine _SSL_get_finished _ %+ BORINGSSL_PREFIX %+ _SSL_get_finished %xdefine _SSL_get_group_id _ %+ BORINGSSL_PREFIX %+ _SSL_get_group_id %xdefine _SSL_get_group_name _ %+ BORINGSSL_PREFIX %+ _SSL_get_group_name %xdefine _SSL_get_info_callback _ %+ BORINGSSL_PREFIX %+ _SSL_get_info_callback %xdefine _SSL_get_ivs _ %+ BORINGSSL_PREFIX %+ _SSL_get_ivs %xdefine _SSL_get_key_block_len _ %+ BORINGSSL_PREFIX %+ _SSL_get_key_block_len %xdefine _SSL_get_max_cert_list _ %+ BORINGSSL_PREFIX %+ _SSL_get_max_cert_list %xdefine _SSL_get_max_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_get_max_proto_version %xdefine _SSL_get_min_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_get_min_proto_version %xdefine _SSL_get_mode _ %+ BORINGSSL_PREFIX %+ _SSL_get_mode %xdefine _SSL_get_negotiated_group _ %+ BORINGSSL_PREFIX %+ _SSL_get_negotiated_group %xdefine _SSL_get_options _ %+ BORINGSSL_PREFIX %+ _SSL_get_options %xdefine _SSL_get_peer_cert_chain _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_cert_chain %xdefine _SSL_get_peer_certificate _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_certificate %xdefine _SSL_get_peer_finished _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_finished %xdefine _SSL_get_peer_full_cert_chain _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_full_cert_chain %xdefine _SSL_get_peer_quic_transport_params _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_quic_transport_params %xdefine _SSL_get_peer_signature_algorithm _ %+ BORINGSSL_PREFIX %+ _SSL_get_peer_signature_algorithm %xdefine _SSL_get_pending_cipher _ %+ BORINGSSL_PREFIX %+ _SSL_get_pending_cipher %xdefine _SSL_get_privatekey _ %+ BORINGSSL_PREFIX %+ _SSL_get_privatekey %xdefine _SSL_get_psk_identity _ %+ BORINGSSL_PREFIX %+ _SSL_get_psk_identity %xdefine _SSL_get_psk_identity_hint _ %+ BORINGSSL_PREFIX %+ _SSL_get_psk_identity_hint %xdefine _SSL_get_quiet_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_get_quiet_shutdown %xdefine _SSL_get_rbio _ %+ BORINGSSL_PREFIX %+ _SSL_get_rbio %xdefine _SSL_get_read_ahead _ %+ BORINGSSL_PREFIX %+ _SSL_get_read_ahead %xdefine _SSL_get_read_sequence _ %+ BORINGSSL_PREFIX %+ _SSL_get_read_sequence %xdefine _SSL_get_rfd _ %+ BORINGSSL_PREFIX %+ _SSL_get_rfd %xdefine _SSL_get_secure_renegotiation_support _ %+ BORINGSSL_PREFIX %+ _SSL_get_secure_renegotiation_support %xdefine _SSL_get_selected_srtp_profile _ %+ BORINGSSL_PREFIX %+ _SSL_get_selected_srtp_profile %xdefine _SSL_get_server_random _ %+ BORINGSSL_PREFIX %+ _SSL_get_server_random %xdefine _SSL_get_server_tmp_key _ %+ BORINGSSL_PREFIX %+ _SSL_get_server_tmp_key %xdefine _SSL_get_servername _ %+ BORINGSSL_PREFIX %+ _SSL_get_servername %xdefine _SSL_get_servername_type _ %+ BORINGSSL_PREFIX %+ _SSL_get_servername_type %xdefine _SSL_get_session _ %+ BORINGSSL_PREFIX %+ _SSL_get_session %xdefine _SSL_get_shared_ciphers _ %+ BORINGSSL_PREFIX %+ _SSL_get_shared_ciphers %xdefine _SSL_get_shared_sigalgs _ %+ BORINGSSL_PREFIX %+ _SSL_get_shared_sigalgs %xdefine _SSL_get_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_get_shutdown %xdefine _SSL_get_signature_algorithm_digest _ %+ BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_digest %xdefine _SSL_get_signature_algorithm_key_type _ %+ BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_key_type %xdefine _SSL_get_signature_algorithm_name _ %+ BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_name %xdefine _SSL_get_srtp_profiles _ %+ BORINGSSL_PREFIX %+ _SSL_get_srtp_profiles %xdefine _SSL_get_ticket_age_skew _ %+ BORINGSSL_PREFIX %+ _SSL_get_ticket_age_skew %xdefine _SSL_get_tls_channel_id _ %+ BORINGSSL_PREFIX %+ _SSL_get_tls_channel_id %xdefine _SSL_get_tls_unique _ %+ BORINGSSL_PREFIX %+ _SSL_get_tls_unique %xdefine _SSL_get_tlsext_status_ocsp_resp _ %+ BORINGSSL_PREFIX %+ _SSL_get_tlsext_status_ocsp_resp %xdefine _SSL_get_tlsext_status_type _ %+ BORINGSSL_PREFIX %+ _SSL_get_tlsext_status_type %xdefine _SSL_get_verify_callback _ %+ BORINGSSL_PREFIX %+ _SSL_get_verify_callback %xdefine _SSL_get_verify_depth _ %+ BORINGSSL_PREFIX %+ _SSL_get_verify_depth %xdefine _SSL_get_verify_mode _ %+ BORINGSSL_PREFIX %+ _SSL_get_verify_mode %xdefine _SSL_get_verify_result _ %+ BORINGSSL_PREFIX %+ _SSL_get_verify_result %xdefine _SSL_get_version _ %+ BORINGSSL_PREFIX %+ _SSL_get_version %xdefine _SSL_get_wbio _ %+ BORINGSSL_PREFIX %+ _SSL_get_wbio %xdefine _SSL_get_wfd _ %+ BORINGSSL_PREFIX %+ _SSL_get_wfd %xdefine _SSL_get_write_sequence _ %+ BORINGSSL_PREFIX %+ _SSL_get_write_sequence %xdefine _SSL_has_application_settings _ %+ BORINGSSL_PREFIX %+ _SSL_has_application_settings %xdefine _SSL_has_pending _ %+ BORINGSSL_PREFIX %+ _SSL_has_pending %xdefine _SSL_in_early_data _ %+ BORINGSSL_PREFIX %+ _SSL_in_early_data %xdefine _SSL_in_false_start _ %+ BORINGSSL_PREFIX %+ _SSL_in_false_start %xdefine _SSL_in_init _ %+ BORINGSSL_PREFIX %+ _SSL_in_init %xdefine _SSL_is_dtls _ %+ BORINGSSL_PREFIX %+ _SSL_is_dtls %xdefine _SSL_is_init_finished _ %+ BORINGSSL_PREFIX %+ _SSL_is_init_finished %xdefine _SSL_is_quic _ %+ BORINGSSL_PREFIX %+ _SSL_is_quic %xdefine _SSL_is_server _ %+ BORINGSSL_PREFIX %+ _SSL_is_server %xdefine _SSL_is_signature_algorithm_rsa_pss _ %+ BORINGSSL_PREFIX %+ _SSL_is_signature_algorithm_rsa_pss %xdefine _SSL_key_update _ %+ BORINGSSL_PREFIX %+ _SSL_key_update %xdefine _SSL_library_init _ %+ BORINGSSL_PREFIX %+ _SSL_library_init %xdefine _SSL_load_client_CA_file _ %+ BORINGSSL_PREFIX %+ _SSL_load_client_CA_file %xdefine _SSL_load_error_strings _ %+ BORINGSSL_PREFIX %+ _SSL_load_error_strings %xdefine _SSL_magic_pending_session_ptr _ %+ BORINGSSL_PREFIX %+ _SSL_magic_pending_session_ptr %xdefine _SSL_marshal_ech_config _ %+ BORINGSSL_PREFIX %+ _SSL_marshal_ech_config %xdefine _SSL_max_seal_overhead _ %+ BORINGSSL_PREFIX %+ _SSL_max_seal_overhead %xdefine _SSL_need_tmp_RSA _ %+ BORINGSSL_PREFIX %+ _SSL_need_tmp_RSA %xdefine _SSL_new _ %+ BORINGSSL_PREFIX %+ _SSL_new %xdefine _SSL_num_renegotiations _ %+ BORINGSSL_PREFIX %+ _SSL_num_renegotiations %xdefine _SSL_peek _ %+ BORINGSSL_PREFIX %+ _SSL_peek %xdefine _SSL_pending _ %+ BORINGSSL_PREFIX %+ _SSL_pending %xdefine _SSL_process_quic_post_handshake _ %+ BORINGSSL_PREFIX %+ _SSL_process_quic_post_handshake %xdefine _SSL_process_tls13_new_session_ticket _ %+ BORINGSSL_PREFIX %+ _SSL_process_tls13_new_session_ticket %xdefine _SSL_provide_quic_data _ %+ BORINGSSL_PREFIX %+ _SSL_provide_quic_data %xdefine _SSL_quic_max_handshake_flight_len _ %+ BORINGSSL_PREFIX %+ _SSL_quic_max_handshake_flight_len %xdefine _SSL_quic_read_level _ %+ BORINGSSL_PREFIX %+ _SSL_quic_read_level %xdefine _SSL_quic_write_level _ %+ BORINGSSL_PREFIX %+ _SSL_quic_write_level %xdefine _SSL_read _ %+ BORINGSSL_PREFIX %+ _SSL_read %xdefine _SSL_renegotiate _ %+ BORINGSSL_PREFIX %+ _SSL_renegotiate %xdefine _SSL_renegotiate_pending _ %+ BORINGSSL_PREFIX %+ _SSL_renegotiate_pending %xdefine _SSL_request_handshake_hints _ %+ BORINGSSL_PREFIX %+ _SSL_request_handshake_hints %xdefine _SSL_reset_early_data_reject _ %+ BORINGSSL_PREFIX %+ _SSL_reset_early_data_reject %xdefine _SSL_select_next_proto _ %+ BORINGSSL_PREFIX %+ _SSL_select_next_proto %xdefine _SSL_send_fatal_alert _ %+ BORINGSSL_PREFIX %+ _SSL_send_fatal_alert %xdefine _SSL_serialize_capabilities _ %+ BORINGSSL_PREFIX %+ _SSL_serialize_capabilities %xdefine _SSL_serialize_handshake_hints _ %+ BORINGSSL_PREFIX %+ _SSL_serialize_handshake_hints %xdefine _SSL_session_reused _ %+ BORINGSSL_PREFIX %+ _SSL_session_reused %xdefine _SSL_set0_CA_names _ %+ BORINGSSL_PREFIX %+ _SSL_set0_CA_names %xdefine _SSL_set0_chain _ %+ BORINGSSL_PREFIX %+ _SSL_set0_chain %xdefine _SSL_set0_client_CAs _ %+ BORINGSSL_PREFIX %+ _SSL_set0_client_CAs %xdefine _SSL_set0_rbio _ %+ BORINGSSL_PREFIX %+ _SSL_set0_rbio %xdefine _SSL_set0_verify_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_set0_verify_cert_store %xdefine _SSL_set0_wbio _ %+ BORINGSSL_PREFIX %+ _SSL_set0_wbio %xdefine _SSL_set1_chain _ %+ BORINGSSL_PREFIX %+ _SSL_set1_chain %xdefine _SSL_set1_curves _ %+ BORINGSSL_PREFIX %+ _SSL_set1_curves %xdefine _SSL_set1_curves_list _ %+ BORINGSSL_PREFIX %+ _SSL_set1_curves_list %xdefine _SSL_set1_ech_config_list _ %+ BORINGSSL_PREFIX %+ _SSL_set1_ech_config_list %xdefine _SSL_set1_group_ids _ %+ BORINGSSL_PREFIX %+ _SSL_set1_group_ids %xdefine _SSL_set1_groups _ %+ BORINGSSL_PREFIX %+ _SSL_set1_groups %xdefine _SSL_set1_groups_list _ %+ BORINGSSL_PREFIX %+ _SSL_set1_groups_list %xdefine _SSL_set1_host _ %+ BORINGSSL_PREFIX %+ _SSL_set1_host %xdefine _SSL_set1_param _ %+ BORINGSSL_PREFIX %+ _SSL_set1_param %xdefine _SSL_set1_sigalgs _ %+ BORINGSSL_PREFIX %+ _SSL_set1_sigalgs %xdefine _SSL_set1_sigalgs_list _ %+ BORINGSSL_PREFIX %+ _SSL_set1_sigalgs_list %xdefine _SSL_set1_tls_channel_id _ %+ BORINGSSL_PREFIX %+ _SSL_set1_tls_channel_id %xdefine _SSL_set1_verify_cert_store _ %+ BORINGSSL_PREFIX %+ _SSL_set1_verify_cert_store %xdefine _SSL_set_SSL_CTX _ %+ BORINGSSL_PREFIX %+ _SSL_set_SSL_CTX %xdefine _SSL_set_accept_state _ %+ BORINGSSL_PREFIX %+ _SSL_set_accept_state %xdefine _SSL_set_alpn_protos _ %+ BORINGSSL_PREFIX %+ _SSL_set_alpn_protos %xdefine _SSL_set_alps_use_new_codepoint _ %+ BORINGSSL_PREFIX %+ _SSL_set_alps_use_new_codepoint %xdefine _SSL_set_bio _ %+ BORINGSSL_PREFIX %+ _SSL_set_bio %xdefine _SSL_set_cert_cb _ %+ BORINGSSL_PREFIX %+ _SSL_set_cert_cb %xdefine _SSL_set_chain_and_key _ %+ BORINGSSL_PREFIX %+ _SSL_set_chain_and_key %xdefine _SSL_set_check_client_certificate_type _ %+ BORINGSSL_PREFIX %+ _SSL_set_check_client_certificate_type %xdefine _SSL_set_check_ecdsa_curve _ %+ BORINGSSL_PREFIX %+ _SSL_set_check_ecdsa_curve %xdefine _SSL_set_cipher_list _ %+ BORINGSSL_PREFIX %+ _SSL_set_cipher_list %xdefine _SSL_set_client_CA_list _ %+ BORINGSSL_PREFIX %+ _SSL_set_client_CA_list %xdefine _SSL_set_compliance_policy _ %+ BORINGSSL_PREFIX %+ _SSL_set_compliance_policy %xdefine _SSL_set_connect_state _ %+ BORINGSSL_PREFIX %+ _SSL_set_connect_state %xdefine _SSL_set_custom_verify _ %+ BORINGSSL_PREFIX %+ _SSL_set_custom_verify %xdefine _SSL_set_early_data_enabled _ %+ BORINGSSL_PREFIX %+ _SSL_set_early_data_enabled %xdefine _SSL_set_enable_ech_grease _ %+ BORINGSSL_PREFIX %+ _SSL_set_enable_ech_grease %xdefine _SSL_set_enforce_rsa_key_usage _ %+ BORINGSSL_PREFIX %+ _SSL_set_enforce_rsa_key_usage %xdefine _SSL_set_ex_data _ %+ BORINGSSL_PREFIX %+ _SSL_set_ex_data %xdefine _SSL_set_fd _ %+ BORINGSSL_PREFIX %+ _SSL_set_fd %xdefine _SSL_set_handshake_hints _ %+ BORINGSSL_PREFIX %+ _SSL_set_handshake_hints %xdefine _SSL_set_hostflags _ %+ BORINGSSL_PREFIX %+ _SSL_set_hostflags %xdefine _SSL_set_info_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_info_callback %xdefine _SSL_set_jdk11_workaround _ %+ BORINGSSL_PREFIX %+ _SSL_set_jdk11_workaround %xdefine _SSL_set_max_cert_list _ %+ BORINGSSL_PREFIX %+ _SSL_set_max_cert_list %xdefine _SSL_set_max_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_set_max_proto_version %xdefine _SSL_set_max_send_fragment _ %+ BORINGSSL_PREFIX %+ _SSL_set_max_send_fragment %xdefine _SSL_set_min_proto_version _ %+ BORINGSSL_PREFIX %+ _SSL_set_min_proto_version %xdefine _SSL_set_mode _ %+ BORINGSSL_PREFIX %+ _SSL_set_mode %xdefine _SSL_set_msg_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_msg_callback %xdefine _SSL_set_msg_callback_arg _ %+ BORINGSSL_PREFIX %+ _SSL_set_msg_callback_arg %xdefine _SSL_set_mtu _ %+ BORINGSSL_PREFIX %+ _SSL_set_mtu %xdefine _SSL_set_ocsp_response _ %+ BORINGSSL_PREFIX %+ _SSL_set_ocsp_response %xdefine _SSL_set_options _ %+ BORINGSSL_PREFIX %+ _SSL_set_options %xdefine _SSL_set_permute_extensions _ %+ BORINGSSL_PREFIX %+ _SSL_set_permute_extensions %xdefine _SSL_set_private_key_method _ %+ BORINGSSL_PREFIX %+ _SSL_set_private_key_method %xdefine _SSL_set_psk_client_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_psk_client_callback %xdefine _SSL_set_psk_server_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_psk_server_callback %xdefine _SSL_set_purpose _ %+ BORINGSSL_PREFIX %+ _SSL_set_purpose %xdefine _SSL_set_quic_early_data_context _ %+ BORINGSSL_PREFIX %+ _SSL_set_quic_early_data_context %xdefine _SSL_set_quic_method _ %+ BORINGSSL_PREFIX %+ _SSL_set_quic_method %xdefine _SSL_set_quic_transport_params _ %+ BORINGSSL_PREFIX %+ _SSL_set_quic_transport_params %xdefine _SSL_set_quic_use_legacy_codepoint _ %+ BORINGSSL_PREFIX %+ _SSL_set_quic_use_legacy_codepoint %xdefine _SSL_set_quiet_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_set_quiet_shutdown %xdefine _SSL_set_read_ahead _ %+ BORINGSSL_PREFIX %+ _SSL_set_read_ahead %xdefine _SSL_set_renegotiate_mode _ %+ BORINGSSL_PREFIX %+ _SSL_set_renegotiate_mode %xdefine _SSL_set_retain_only_sha256_of_client_certs _ %+ BORINGSSL_PREFIX %+ _SSL_set_retain_only_sha256_of_client_certs %xdefine _SSL_set_rfd _ %+ BORINGSSL_PREFIX %+ _SSL_set_rfd %xdefine _SSL_set_session _ %+ BORINGSSL_PREFIX %+ _SSL_set_session %xdefine _SSL_set_session_id_context _ %+ BORINGSSL_PREFIX %+ _SSL_set_session_id_context %xdefine _SSL_set_shed_handshake_config _ %+ BORINGSSL_PREFIX %+ _SSL_set_shed_handshake_config %xdefine _SSL_set_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_set_shutdown %xdefine _SSL_set_signed_cert_timestamp_list _ %+ BORINGSSL_PREFIX %+ _SSL_set_signed_cert_timestamp_list %xdefine _SSL_set_signing_algorithm_prefs _ %+ BORINGSSL_PREFIX %+ _SSL_set_signing_algorithm_prefs %xdefine _SSL_set_srtp_profiles _ %+ BORINGSSL_PREFIX %+ _SSL_set_srtp_profiles %xdefine _SSL_set_state _ %+ BORINGSSL_PREFIX %+ _SSL_set_state %xdefine _SSL_set_strict_cipher_list _ %+ BORINGSSL_PREFIX %+ _SSL_set_strict_cipher_list %xdefine _SSL_set_tls_channel_id_enabled _ %+ BORINGSSL_PREFIX %+ _SSL_set_tls_channel_id_enabled %xdefine _SSL_set_tlsext_host_name _ %+ BORINGSSL_PREFIX %+ _SSL_set_tlsext_host_name %xdefine _SSL_set_tlsext_status_ocsp_resp _ %+ BORINGSSL_PREFIX %+ _SSL_set_tlsext_status_ocsp_resp %xdefine _SSL_set_tlsext_status_type _ %+ BORINGSSL_PREFIX %+ _SSL_set_tlsext_status_type %xdefine _SSL_set_tlsext_use_srtp _ %+ BORINGSSL_PREFIX %+ _SSL_set_tlsext_use_srtp %xdefine _SSL_set_tmp_dh _ %+ BORINGSSL_PREFIX %+ _SSL_set_tmp_dh %xdefine _SSL_set_tmp_dh_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_tmp_dh_callback %xdefine _SSL_set_tmp_ecdh _ %+ BORINGSSL_PREFIX %+ _SSL_set_tmp_ecdh %xdefine _SSL_set_tmp_rsa _ %+ BORINGSSL_PREFIX %+ _SSL_set_tmp_rsa %xdefine _SSL_set_tmp_rsa_callback _ %+ BORINGSSL_PREFIX %+ _SSL_set_tmp_rsa_callback %xdefine _SSL_set_trust _ %+ BORINGSSL_PREFIX %+ _SSL_set_trust %xdefine _SSL_set_verify _ %+ BORINGSSL_PREFIX %+ _SSL_set_verify %xdefine _SSL_set_verify_algorithm_prefs _ %+ BORINGSSL_PREFIX %+ _SSL_set_verify_algorithm_prefs %xdefine _SSL_set_verify_depth _ %+ BORINGSSL_PREFIX %+ _SSL_set_verify_depth %xdefine _SSL_set_wfd _ %+ BORINGSSL_PREFIX %+ _SSL_set_wfd %xdefine _SSL_shutdown _ %+ BORINGSSL_PREFIX %+ _SSL_shutdown %xdefine _SSL_state _ %+ BORINGSSL_PREFIX %+ _SSL_state %xdefine _SSL_state_string _ %+ BORINGSSL_PREFIX %+ _SSL_state_string %xdefine _SSL_state_string_long _ %+ BORINGSSL_PREFIX %+ _SSL_state_string_long %xdefine _SSL_total_renegotiations _ %+ BORINGSSL_PREFIX %+ _SSL_total_renegotiations %xdefine _SSL_use_PrivateKey _ %+ BORINGSSL_PREFIX %+ _SSL_use_PrivateKey %xdefine _SSL_use_PrivateKey_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_use_PrivateKey_ASN1 %xdefine _SSL_use_PrivateKey_file _ %+ BORINGSSL_PREFIX %+ _SSL_use_PrivateKey_file %xdefine _SSL_use_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey %xdefine _SSL_use_RSAPrivateKey_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey_ASN1 %xdefine _SSL_use_RSAPrivateKey_file _ %+ BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey_file %xdefine _SSL_use_certificate _ %+ BORINGSSL_PREFIX %+ _SSL_use_certificate %xdefine _SSL_use_certificate_ASN1 _ %+ BORINGSSL_PREFIX %+ _SSL_use_certificate_ASN1 %xdefine _SSL_use_certificate_file _ %+ BORINGSSL_PREFIX %+ _SSL_use_certificate_file %xdefine _SSL_use_psk_identity_hint _ %+ BORINGSSL_PREFIX %+ _SSL_use_psk_identity_hint %xdefine _SSL_used_hello_retry_request _ %+ BORINGSSL_PREFIX %+ _SSL_used_hello_retry_request %xdefine _SSL_version _ %+ BORINGSSL_PREFIX %+ _SSL_version %xdefine _SSL_want _ %+ BORINGSSL_PREFIX %+ _SSL_want %xdefine _SSL_was_key_usage_invalid _ %+ BORINGSSL_PREFIX %+ _SSL_was_key_usage_invalid %xdefine _SSL_write _ %+ BORINGSSL_PREFIX %+ _SSL_write %xdefine _SSLeay _ %+ BORINGSSL_PREFIX %+ _SSLeay %xdefine _SSLeay_version _ %+ BORINGSSL_PREFIX %+ _SSLeay_version %xdefine _SSLv23_client_method _ %+ BORINGSSL_PREFIX %+ _SSLv23_client_method %xdefine _SSLv23_method _ %+ BORINGSSL_PREFIX %+ _SSLv23_method %xdefine _SSLv23_server_method _ %+ BORINGSSL_PREFIX %+ _SSLv23_server_method %xdefine _TLS_client_method _ %+ BORINGSSL_PREFIX %+ _TLS_client_method %xdefine _TLS_method _ %+ BORINGSSL_PREFIX %+ _TLS_method %xdefine _TLS_server_method _ %+ BORINGSSL_PREFIX %+ _TLS_server_method %xdefine _TLS_with_buffers_method _ %+ BORINGSSL_PREFIX %+ _TLS_with_buffers_method %xdefine _TLSv1_1_client_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_1_client_method %xdefine _TLSv1_1_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_1_method %xdefine _TLSv1_1_server_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_1_server_method %xdefine _TLSv1_2_client_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_2_client_method %xdefine _TLSv1_2_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_2_method %xdefine _TLSv1_2_server_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_2_server_method %xdefine _TLSv1_client_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_client_method %xdefine _TLSv1_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_method %xdefine _TLSv1_server_method _ %+ BORINGSSL_PREFIX %+ _TLSv1_server_method %xdefine _TRUST_TOKEN_CLIENT_add_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_add_key %xdefine _TRUST_TOKEN_CLIENT_begin_issuance _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_issuance %xdefine _TRUST_TOKEN_CLIENT_begin_issuance_over_message _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_issuance_over_message %xdefine _TRUST_TOKEN_CLIENT_begin_redemption _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_redemption %xdefine _TRUST_TOKEN_CLIENT_finish_issuance _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_finish_issuance %xdefine _TRUST_TOKEN_CLIENT_finish_redemption _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_finish_redemption %xdefine _TRUST_TOKEN_CLIENT_free _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_free %xdefine _TRUST_TOKEN_CLIENT_new _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_new %xdefine _TRUST_TOKEN_CLIENT_set_srr_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_set_srr_key %xdefine _TRUST_TOKEN_ISSUER_add_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_add_key %xdefine _TRUST_TOKEN_ISSUER_free _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_free %xdefine _TRUST_TOKEN_ISSUER_issue _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_issue %xdefine _TRUST_TOKEN_ISSUER_new _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_new %xdefine _TRUST_TOKEN_ISSUER_redeem _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_redeem %xdefine _TRUST_TOKEN_ISSUER_redeem_over_message _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_redeem_over_message %xdefine _TRUST_TOKEN_ISSUER_set_metadata_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_set_metadata_key %xdefine _TRUST_TOKEN_ISSUER_set_srr_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_set_srr_key %xdefine _TRUST_TOKEN_PRETOKEN_free _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_PRETOKEN_free %xdefine _TRUST_TOKEN_decode_private_metadata _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_decode_private_metadata %xdefine _TRUST_TOKEN_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_derive_key_from_secret %xdefine _TRUST_TOKEN_experiment_v1 _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v1 %xdefine _TRUST_TOKEN_experiment_v2_pmb _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v2_pmb %xdefine _TRUST_TOKEN_experiment_v2_voprf _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v2_voprf %xdefine _TRUST_TOKEN_free _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_free %xdefine _TRUST_TOKEN_generate_key _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_generate_key %xdefine _TRUST_TOKEN_new _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_new %xdefine _TRUST_TOKEN_pst_v1_pmb _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_pst_v1_pmb %xdefine _TRUST_TOKEN_pst_v1_voprf _ %+ BORINGSSL_PREFIX %+ _TRUST_TOKEN_pst_v1_voprf %xdefine _USERNOTICE_free _ %+ BORINGSSL_PREFIX %+ _USERNOTICE_free %xdefine _USERNOTICE_it _ %+ BORINGSSL_PREFIX %+ _USERNOTICE_it %xdefine _USERNOTICE_new _ %+ BORINGSSL_PREFIX %+ _USERNOTICE_new %xdefine _X25519 _ %+ BORINGSSL_PREFIX %+ _X25519 %xdefine _X25519_keypair _ %+ BORINGSSL_PREFIX %+ _X25519_keypair %xdefine _X25519_public_from_private _ %+ BORINGSSL_PREFIX %+ _X25519_public_from_private %xdefine _X509V3_EXT_CRL_add_nconf _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_CRL_add_nconf %xdefine _X509V3_EXT_REQ_add_nconf _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_REQ_add_nconf %xdefine _X509V3_EXT_add _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_add %xdefine _X509V3_EXT_add_alias _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_add_alias %xdefine _X509V3_EXT_add_nconf _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_add_nconf %xdefine _X509V3_EXT_add_nconf_sk _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_add_nconf_sk %xdefine _X509V3_EXT_d2i _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_d2i %xdefine _X509V3_EXT_free _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_free %xdefine _X509V3_EXT_get _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_get %xdefine _X509V3_EXT_get_nid _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_get_nid %xdefine _X509V3_EXT_i2d _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_i2d %xdefine _X509V3_EXT_nconf _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_nconf %xdefine _X509V3_EXT_nconf_nid _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_nconf_nid %xdefine _X509V3_EXT_print _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_print %xdefine _X509V3_EXT_print_fp _ %+ BORINGSSL_PREFIX %+ _X509V3_EXT_print_fp %xdefine _X509V3_NAME_from_section _ %+ BORINGSSL_PREFIX %+ _X509V3_NAME_from_section %xdefine _X509V3_add1_i2d _ %+ BORINGSSL_PREFIX %+ _X509V3_add1_i2d %xdefine _X509V3_add_standard_extensions _ %+ BORINGSSL_PREFIX %+ _X509V3_add_standard_extensions %xdefine _X509V3_add_value _ %+ BORINGSSL_PREFIX %+ _X509V3_add_value %xdefine _X509V3_add_value_bool _ %+ BORINGSSL_PREFIX %+ _X509V3_add_value_bool %xdefine _X509V3_add_value_int _ %+ BORINGSSL_PREFIX %+ _X509V3_add_value_int %xdefine _X509V3_bool_from_string _ %+ BORINGSSL_PREFIX %+ _X509V3_bool_from_string %xdefine _X509V3_conf_free _ %+ BORINGSSL_PREFIX %+ _X509V3_conf_free %xdefine _X509V3_extensions_print _ %+ BORINGSSL_PREFIX %+ _X509V3_extensions_print %xdefine _X509V3_get_d2i _ %+ BORINGSSL_PREFIX %+ _X509V3_get_d2i %xdefine _X509V3_get_section _ %+ BORINGSSL_PREFIX %+ _X509V3_get_section %xdefine _X509V3_get_value_bool _ %+ BORINGSSL_PREFIX %+ _X509V3_get_value_bool %xdefine _X509V3_get_value_int _ %+ BORINGSSL_PREFIX %+ _X509V3_get_value_int %xdefine _X509V3_parse_list _ %+ BORINGSSL_PREFIX %+ _X509V3_parse_list %xdefine _X509V3_set_ctx _ %+ BORINGSSL_PREFIX %+ _X509V3_set_ctx %xdefine _X509V3_set_nconf _ %+ BORINGSSL_PREFIX %+ _X509V3_set_nconf %xdefine _X509_ALGOR_cmp _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_cmp %xdefine _X509_ALGOR_dup _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_dup %xdefine _X509_ALGOR_free _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_free %xdefine _X509_ALGOR_get0 _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_get0 %xdefine _X509_ALGOR_it _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_it %xdefine _X509_ALGOR_new _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_new %xdefine _X509_ALGOR_set0 _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_set0 %xdefine _X509_ALGOR_set_md _ %+ BORINGSSL_PREFIX %+ _X509_ALGOR_set_md %xdefine _X509_ATTRIBUTE_count _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_count %xdefine _X509_ATTRIBUTE_create _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create %xdefine _X509_ATTRIBUTE_create_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_NID %xdefine _X509_ATTRIBUTE_create_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_OBJ %xdefine _X509_ATTRIBUTE_create_by_txt _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_txt %xdefine _X509_ATTRIBUTE_dup _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_dup %xdefine _X509_ATTRIBUTE_free _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_free %xdefine _X509_ATTRIBUTE_get0_data _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_data %xdefine _X509_ATTRIBUTE_get0_object _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_object %xdefine _X509_ATTRIBUTE_get0_type _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_type %xdefine _X509_ATTRIBUTE_it _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_it %xdefine _X509_ATTRIBUTE_new _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_new %xdefine _X509_ATTRIBUTE_set1_data _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_set1_data %xdefine _X509_ATTRIBUTE_set1_object _ %+ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_set1_object %xdefine _X509_CERT_AUX_free _ %+ BORINGSSL_PREFIX %+ _X509_CERT_AUX_free %xdefine _X509_CERT_AUX_it _ %+ BORINGSSL_PREFIX %+ _X509_CERT_AUX_it %xdefine _X509_CERT_AUX_new _ %+ BORINGSSL_PREFIX %+ _X509_CERT_AUX_new %xdefine _X509_CERT_AUX_print _ %+ BORINGSSL_PREFIX %+ _X509_CERT_AUX_print %xdefine _X509_CINF_free _ %+ BORINGSSL_PREFIX %+ _X509_CINF_free %xdefine _X509_CINF_it _ %+ BORINGSSL_PREFIX %+ _X509_CINF_it %xdefine _X509_CINF_new _ %+ BORINGSSL_PREFIX %+ _X509_CINF_new %xdefine _X509_CRL_INFO_free _ %+ BORINGSSL_PREFIX %+ _X509_CRL_INFO_free %xdefine _X509_CRL_INFO_it _ %+ BORINGSSL_PREFIX %+ _X509_CRL_INFO_it %xdefine _X509_CRL_INFO_new _ %+ BORINGSSL_PREFIX %+ _X509_CRL_INFO_new %xdefine _X509_CRL_add0_revoked _ %+ BORINGSSL_PREFIX %+ _X509_CRL_add0_revoked %xdefine _X509_CRL_add1_ext_i2d _ %+ BORINGSSL_PREFIX %+ _X509_CRL_add1_ext_i2d %xdefine _X509_CRL_add_ext _ %+ BORINGSSL_PREFIX %+ _X509_CRL_add_ext %xdefine _X509_CRL_cmp _ %+ BORINGSSL_PREFIX %+ _X509_CRL_cmp %xdefine _X509_CRL_delete_ext _ %+ BORINGSSL_PREFIX %+ _X509_CRL_delete_ext %xdefine _X509_CRL_digest _ %+ BORINGSSL_PREFIX %+ _X509_CRL_digest %xdefine _X509_CRL_dup _ %+ BORINGSSL_PREFIX %+ _X509_CRL_dup %xdefine _X509_CRL_free _ %+ BORINGSSL_PREFIX %+ _X509_CRL_free %xdefine _X509_CRL_get0_by_cert _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_by_cert %xdefine _X509_CRL_get0_by_serial _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_by_serial %xdefine _X509_CRL_get0_extensions _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_extensions %xdefine _X509_CRL_get0_lastUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_lastUpdate %xdefine _X509_CRL_get0_nextUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_nextUpdate %xdefine _X509_CRL_get0_signature _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get0_signature %xdefine _X509_CRL_get_REVOKED _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_REVOKED %xdefine _X509_CRL_get_ext _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext %xdefine _X509_CRL_get_ext_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_NID %xdefine _X509_CRL_get_ext_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_OBJ %xdefine _X509_CRL_get_ext_by_critical _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_critical %xdefine _X509_CRL_get_ext_count _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_count %xdefine _X509_CRL_get_ext_d2i _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_d2i %xdefine _X509_CRL_get_issuer _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_issuer %xdefine _X509_CRL_get_lastUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_lastUpdate %xdefine _X509_CRL_get_nextUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_nextUpdate %xdefine _X509_CRL_get_signature_nid _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_signature_nid %xdefine _X509_CRL_get_version _ %+ BORINGSSL_PREFIX %+ _X509_CRL_get_version %xdefine _X509_CRL_it _ %+ BORINGSSL_PREFIX %+ _X509_CRL_it %xdefine _X509_CRL_match _ %+ BORINGSSL_PREFIX %+ _X509_CRL_match %xdefine _X509_CRL_new _ %+ BORINGSSL_PREFIX %+ _X509_CRL_new %xdefine _X509_CRL_print _ %+ BORINGSSL_PREFIX %+ _X509_CRL_print %xdefine _X509_CRL_print_fp _ %+ BORINGSSL_PREFIX %+ _X509_CRL_print_fp %xdefine _X509_CRL_set1_lastUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set1_lastUpdate %xdefine _X509_CRL_set1_nextUpdate _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set1_nextUpdate %xdefine _X509_CRL_set1_signature_algo _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set1_signature_algo %xdefine _X509_CRL_set1_signature_value _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set1_signature_value %xdefine _X509_CRL_set_issuer_name _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set_issuer_name %xdefine _X509_CRL_set_version _ %+ BORINGSSL_PREFIX %+ _X509_CRL_set_version %xdefine _X509_CRL_sign _ %+ BORINGSSL_PREFIX %+ _X509_CRL_sign %xdefine _X509_CRL_sign_ctx _ %+ BORINGSSL_PREFIX %+ _X509_CRL_sign_ctx %xdefine _X509_CRL_sort _ %+ BORINGSSL_PREFIX %+ _X509_CRL_sort %xdefine _X509_CRL_up_ref _ %+ BORINGSSL_PREFIX %+ _X509_CRL_up_ref %xdefine _X509_CRL_verify _ %+ BORINGSSL_PREFIX %+ _X509_CRL_verify %xdefine _X509_EXTENSIONS_it _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSIONS_it %xdefine _X509_EXTENSION_create_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_create_by_NID %xdefine _X509_EXTENSION_create_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_create_by_OBJ %xdefine _X509_EXTENSION_dup _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_dup %xdefine _X509_EXTENSION_free _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_free %xdefine _X509_EXTENSION_get_critical _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_get_critical %xdefine _X509_EXTENSION_get_data _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_get_data %xdefine _X509_EXTENSION_get_object _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_get_object %xdefine _X509_EXTENSION_it _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_it %xdefine _X509_EXTENSION_new _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_new %xdefine _X509_EXTENSION_set_critical _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_set_critical %xdefine _X509_EXTENSION_set_data _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_set_data %xdefine _X509_EXTENSION_set_object _ %+ BORINGSSL_PREFIX %+ _X509_EXTENSION_set_object %xdefine _X509_INFO_free _ %+ BORINGSSL_PREFIX %+ _X509_INFO_free %xdefine _X509_LOOKUP_add_dir _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_add_dir %xdefine _X509_LOOKUP_ctrl _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_ctrl %xdefine _X509_LOOKUP_file _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_file %xdefine _X509_LOOKUP_free _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_free %xdefine _X509_LOOKUP_hash_dir _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_hash_dir %xdefine _X509_LOOKUP_load_file _ %+ BORINGSSL_PREFIX %+ _X509_LOOKUP_load_file %xdefine _X509_NAME_ENTRY_create_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_NID %xdefine _X509_NAME_ENTRY_create_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_OBJ %xdefine _X509_NAME_ENTRY_create_by_txt _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_txt %xdefine _X509_NAME_ENTRY_dup _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_dup %xdefine _X509_NAME_ENTRY_free _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_free %xdefine _X509_NAME_ENTRY_get_data _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_get_data %xdefine _X509_NAME_ENTRY_get_object _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_get_object %xdefine _X509_NAME_ENTRY_it _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_it %xdefine _X509_NAME_ENTRY_new _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_new %xdefine _X509_NAME_ENTRY_set _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set %xdefine _X509_NAME_ENTRY_set_data _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set_data %xdefine _X509_NAME_ENTRY_set_object _ %+ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set_object %xdefine _X509_NAME_add_entry _ %+ BORINGSSL_PREFIX %+ _X509_NAME_add_entry %xdefine _X509_NAME_add_entry_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_NID %xdefine _X509_NAME_add_entry_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_OBJ %xdefine _X509_NAME_add_entry_by_txt _ %+ BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_txt %xdefine _X509_NAME_cmp _ %+ BORINGSSL_PREFIX %+ _X509_NAME_cmp %xdefine _X509_NAME_delete_entry _ %+ BORINGSSL_PREFIX %+ _X509_NAME_delete_entry %xdefine _X509_NAME_digest _ %+ BORINGSSL_PREFIX %+ _X509_NAME_digest %xdefine _X509_NAME_dup _ %+ BORINGSSL_PREFIX %+ _X509_NAME_dup %xdefine _X509_NAME_entry_count _ %+ BORINGSSL_PREFIX %+ _X509_NAME_entry_count %xdefine _X509_NAME_free _ %+ BORINGSSL_PREFIX %+ _X509_NAME_free %xdefine _X509_NAME_get0_der _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get0_der %xdefine _X509_NAME_get_entry _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get_entry %xdefine _X509_NAME_get_index_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get_index_by_NID %xdefine _X509_NAME_get_index_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get_index_by_OBJ %xdefine _X509_NAME_get_text_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get_text_by_NID %xdefine _X509_NAME_get_text_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_NAME_get_text_by_OBJ %xdefine _X509_NAME_hash _ %+ BORINGSSL_PREFIX %+ _X509_NAME_hash %xdefine _X509_NAME_hash_old _ %+ BORINGSSL_PREFIX %+ _X509_NAME_hash_old %xdefine _X509_NAME_it _ %+ BORINGSSL_PREFIX %+ _X509_NAME_it %xdefine _X509_NAME_new _ %+ BORINGSSL_PREFIX %+ _X509_NAME_new %xdefine _X509_NAME_oneline _ %+ BORINGSSL_PREFIX %+ _X509_NAME_oneline %xdefine _X509_NAME_print _ %+ BORINGSSL_PREFIX %+ _X509_NAME_print %xdefine _X509_NAME_print_ex _ %+ BORINGSSL_PREFIX %+ _X509_NAME_print_ex %xdefine _X509_NAME_print_ex_fp _ %+ BORINGSSL_PREFIX %+ _X509_NAME_print_ex_fp %xdefine _X509_NAME_set _ %+ BORINGSSL_PREFIX %+ _X509_NAME_set %xdefine _X509_OBJECT_free _ %+ BORINGSSL_PREFIX %+ _X509_OBJECT_free %xdefine _X509_OBJECT_free_contents _ %+ BORINGSSL_PREFIX %+ _X509_OBJECT_free_contents %xdefine _X509_OBJECT_get0_X509 _ %+ BORINGSSL_PREFIX %+ _X509_OBJECT_get0_X509 %xdefine _X509_OBJECT_get_type _ %+ BORINGSSL_PREFIX %+ _X509_OBJECT_get_type %xdefine _X509_OBJECT_new _ %+ BORINGSSL_PREFIX %+ _X509_OBJECT_new %xdefine _X509_PUBKEY_free _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_free %xdefine _X509_PUBKEY_get _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_get %xdefine _X509_PUBKEY_get0 _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_get0 %xdefine _X509_PUBKEY_get0_param _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_get0_param %xdefine _X509_PUBKEY_get0_public_key _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_get0_public_key %xdefine _X509_PUBKEY_it _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_it %xdefine _X509_PUBKEY_new _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_new %xdefine _X509_PUBKEY_set _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_set %xdefine _X509_PUBKEY_set0_param _ %+ BORINGSSL_PREFIX %+ _X509_PUBKEY_set0_param %xdefine _X509_PURPOSE_get0 _ %+ BORINGSSL_PREFIX %+ _X509_PURPOSE_get0 %xdefine _X509_PURPOSE_get_by_sname _ %+ BORINGSSL_PREFIX %+ _X509_PURPOSE_get_by_sname %xdefine _X509_PURPOSE_get_id _ %+ BORINGSSL_PREFIX %+ _X509_PURPOSE_get_id %xdefine _X509_PURPOSE_get_trust _ %+ BORINGSSL_PREFIX %+ _X509_PURPOSE_get_trust %xdefine _X509_REQ_INFO_free _ %+ BORINGSSL_PREFIX %+ _X509_REQ_INFO_free %xdefine _X509_REQ_INFO_it _ %+ BORINGSSL_PREFIX %+ _X509_REQ_INFO_it %xdefine _X509_REQ_INFO_new _ %+ BORINGSSL_PREFIX %+ _X509_REQ_INFO_new %xdefine _X509_REQ_add1_attr _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add1_attr %xdefine _X509_REQ_add1_attr_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_NID %xdefine _X509_REQ_add1_attr_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_OBJ %xdefine _X509_REQ_add1_attr_by_txt _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_txt %xdefine _X509_REQ_add_extensions _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add_extensions %xdefine _X509_REQ_add_extensions_nid _ %+ BORINGSSL_PREFIX %+ _X509_REQ_add_extensions_nid %xdefine _X509_REQ_check_private_key _ %+ BORINGSSL_PREFIX %+ _X509_REQ_check_private_key %xdefine _X509_REQ_delete_attr _ %+ BORINGSSL_PREFIX %+ _X509_REQ_delete_attr %xdefine _X509_REQ_digest _ %+ BORINGSSL_PREFIX %+ _X509_REQ_digest %xdefine _X509_REQ_dup _ %+ BORINGSSL_PREFIX %+ _X509_REQ_dup %xdefine _X509_REQ_extension_nid _ %+ BORINGSSL_PREFIX %+ _X509_REQ_extension_nid %xdefine _X509_REQ_free _ %+ BORINGSSL_PREFIX %+ _X509_REQ_free %xdefine _X509_REQ_get0_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get0_pubkey %xdefine _X509_REQ_get0_signature _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get0_signature %xdefine _X509_REQ_get1_email _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get1_email %xdefine _X509_REQ_get_attr _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_attr %xdefine _X509_REQ_get_attr_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_attr_by_NID %xdefine _X509_REQ_get_attr_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_attr_by_OBJ %xdefine _X509_REQ_get_attr_count _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_attr_count %xdefine _X509_REQ_get_extensions _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_extensions %xdefine _X509_REQ_get_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_pubkey %xdefine _X509_REQ_get_signature_nid _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_signature_nid %xdefine _X509_REQ_get_subject_name _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_subject_name %xdefine _X509_REQ_get_version _ %+ BORINGSSL_PREFIX %+ _X509_REQ_get_version %xdefine _X509_REQ_it _ %+ BORINGSSL_PREFIX %+ _X509_REQ_it %xdefine _X509_REQ_new _ %+ BORINGSSL_PREFIX %+ _X509_REQ_new %xdefine _X509_REQ_print _ %+ BORINGSSL_PREFIX %+ _X509_REQ_print %xdefine _X509_REQ_print_ex _ %+ BORINGSSL_PREFIX %+ _X509_REQ_print_ex %xdefine _X509_REQ_print_fp _ %+ BORINGSSL_PREFIX %+ _X509_REQ_print_fp %xdefine _X509_REQ_set1_signature_algo _ %+ BORINGSSL_PREFIX %+ _X509_REQ_set1_signature_algo %xdefine _X509_REQ_set1_signature_value _ %+ BORINGSSL_PREFIX %+ _X509_REQ_set1_signature_value %xdefine _X509_REQ_set_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_REQ_set_pubkey %xdefine _X509_REQ_set_subject_name _ %+ BORINGSSL_PREFIX %+ _X509_REQ_set_subject_name %xdefine _X509_REQ_set_version _ %+ BORINGSSL_PREFIX %+ _X509_REQ_set_version %xdefine _X509_REQ_sign _ %+ BORINGSSL_PREFIX %+ _X509_REQ_sign %xdefine _X509_REQ_sign_ctx _ %+ BORINGSSL_PREFIX %+ _X509_REQ_sign_ctx %xdefine _X509_REQ_verify _ %+ BORINGSSL_PREFIX %+ _X509_REQ_verify %xdefine _X509_REVOKED_add1_ext_i2d _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_add1_ext_i2d %xdefine _X509_REVOKED_add_ext _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_add_ext %xdefine _X509_REVOKED_delete_ext _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_delete_ext %xdefine _X509_REVOKED_dup _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_dup %xdefine _X509_REVOKED_free _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_free %xdefine _X509_REVOKED_get0_extensions _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get0_extensions %xdefine _X509_REVOKED_get0_revocationDate _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get0_revocationDate %xdefine _X509_REVOKED_get0_serialNumber _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get0_serialNumber %xdefine _X509_REVOKED_get_ext _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext %xdefine _X509_REVOKED_get_ext_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_NID %xdefine _X509_REVOKED_get_ext_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_OBJ %xdefine _X509_REVOKED_get_ext_by_critical _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_critical %xdefine _X509_REVOKED_get_ext_count _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_count %xdefine _X509_REVOKED_get_ext_d2i _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_d2i %xdefine _X509_REVOKED_it _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_it %xdefine _X509_REVOKED_new _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_new %xdefine _X509_REVOKED_set_revocationDate _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_set_revocationDate %xdefine _X509_REVOKED_set_serialNumber _ %+ BORINGSSL_PREFIX %+ _X509_REVOKED_set_serialNumber %xdefine _X509_SIG_free _ %+ BORINGSSL_PREFIX %+ _X509_SIG_free %xdefine _X509_SIG_get0 _ %+ BORINGSSL_PREFIX %+ _X509_SIG_get0 %xdefine _X509_SIG_getm _ %+ BORINGSSL_PREFIX %+ _X509_SIG_getm %xdefine _X509_SIG_new _ %+ BORINGSSL_PREFIX %+ _X509_SIG_new %xdefine _X509_STORE_CTX_cleanup _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_cleanup %xdefine _X509_STORE_CTX_free _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_free %xdefine _X509_STORE_CTX_get0_cert _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_cert %xdefine _X509_STORE_CTX_get0_chain _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_chain %xdefine _X509_STORE_CTX_get0_current_crl _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_current_crl %xdefine _X509_STORE_CTX_get0_param _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_param %xdefine _X509_STORE_CTX_get0_parent_ctx _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_parent_ctx %xdefine _X509_STORE_CTX_get0_store _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_store %xdefine _X509_STORE_CTX_get0_untrusted _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_untrusted %xdefine _X509_STORE_CTX_get1_certs _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_certs %xdefine _X509_STORE_CTX_get1_chain _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_chain %xdefine _X509_STORE_CTX_get1_crls _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_crls %xdefine _X509_STORE_CTX_get1_issuer _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_issuer %xdefine _X509_STORE_CTX_get_by_subject _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_by_subject %xdefine _X509_STORE_CTX_get_chain _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_chain %xdefine _X509_STORE_CTX_get_current_cert _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_current_cert %xdefine _X509_STORE_CTX_get_error _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_error %xdefine _X509_STORE_CTX_get_error_depth _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_error_depth %xdefine _X509_STORE_CTX_get_ex_data _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_ex_data %xdefine _X509_STORE_CTX_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_ex_new_index %xdefine _X509_STORE_CTX_init _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_init %xdefine _X509_STORE_CTX_new _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_new %xdefine _X509_STORE_CTX_set0_crls _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_crls %xdefine _X509_STORE_CTX_set0_param _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_param %xdefine _X509_STORE_CTX_set0_trusted_stack _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_trusted_stack %xdefine _X509_STORE_CTX_set_chain _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_chain %xdefine _X509_STORE_CTX_set_default _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_default %xdefine _X509_STORE_CTX_set_depth _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_depth %xdefine _X509_STORE_CTX_set_error _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_error %xdefine _X509_STORE_CTX_set_ex_data _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_ex_data %xdefine _X509_STORE_CTX_set_flags _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_flags %xdefine _X509_STORE_CTX_set_purpose _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_purpose %xdefine _X509_STORE_CTX_set_time _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_time %xdefine _X509_STORE_CTX_set_time_posix _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_time_posix %xdefine _X509_STORE_CTX_set_trust _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_trust %xdefine _X509_STORE_CTX_set_verify_cb _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_verify_cb %xdefine _X509_STORE_CTX_trusted_stack _ %+ BORINGSSL_PREFIX %+ _X509_STORE_CTX_trusted_stack %xdefine _X509_STORE_add_cert _ %+ BORINGSSL_PREFIX %+ _X509_STORE_add_cert %xdefine _X509_STORE_add_crl _ %+ BORINGSSL_PREFIX %+ _X509_STORE_add_crl %xdefine _X509_STORE_add_lookup _ %+ BORINGSSL_PREFIX %+ _X509_STORE_add_lookup %xdefine _X509_STORE_free _ %+ BORINGSSL_PREFIX %+ _X509_STORE_free %xdefine _X509_STORE_get0_objects _ %+ BORINGSSL_PREFIX %+ _X509_STORE_get0_objects %xdefine _X509_STORE_get0_param _ %+ BORINGSSL_PREFIX %+ _X509_STORE_get0_param %xdefine _X509_STORE_get1_objects _ %+ BORINGSSL_PREFIX %+ _X509_STORE_get1_objects %xdefine _X509_STORE_load_locations _ %+ BORINGSSL_PREFIX %+ _X509_STORE_load_locations %xdefine _X509_STORE_new _ %+ BORINGSSL_PREFIX %+ _X509_STORE_new %xdefine _X509_STORE_set1_param _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set1_param %xdefine _X509_STORE_set_default_paths _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_default_paths %xdefine _X509_STORE_set_depth _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_depth %xdefine _X509_STORE_set_flags _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_flags %xdefine _X509_STORE_set_purpose _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_purpose %xdefine _X509_STORE_set_trust _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_trust %xdefine _X509_STORE_set_verify_cb _ %+ BORINGSSL_PREFIX %+ _X509_STORE_set_verify_cb %xdefine _X509_STORE_up_ref _ %+ BORINGSSL_PREFIX %+ _X509_STORE_up_ref %xdefine _X509_VAL_free _ %+ BORINGSSL_PREFIX %+ _X509_VAL_free %xdefine _X509_VAL_it _ %+ BORINGSSL_PREFIX %+ _X509_VAL_it %xdefine _X509_VAL_new _ %+ BORINGSSL_PREFIX %+ _X509_VAL_new %xdefine _X509_VERIFY_PARAM_add0_policy _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_add0_policy %xdefine _X509_VERIFY_PARAM_add1_host _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_add1_host %xdefine _X509_VERIFY_PARAM_clear_flags _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_clear_flags %xdefine _X509_VERIFY_PARAM_free _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_free %xdefine _X509_VERIFY_PARAM_get_depth _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_get_depth %xdefine _X509_VERIFY_PARAM_get_flags _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_get_flags %xdefine _X509_VERIFY_PARAM_inherit _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_inherit %xdefine _X509_VERIFY_PARAM_lookup _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_lookup %xdefine _X509_VERIFY_PARAM_new _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_new %xdefine _X509_VERIFY_PARAM_set1 _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1 %xdefine _X509_VERIFY_PARAM_set1_email _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_email %xdefine _X509_VERIFY_PARAM_set1_host _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_host %xdefine _X509_VERIFY_PARAM_set1_ip _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_ip %xdefine _X509_VERIFY_PARAM_set1_ip_asc _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_ip_asc %xdefine _X509_VERIFY_PARAM_set1_policies _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_policies %xdefine _X509_VERIFY_PARAM_set_depth _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_depth %xdefine _X509_VERIFY_PARAM_set_flags _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_flags %xdefine _X509_VERIFY_PARAM_set_hostflags _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_hostflags %xdefine _X509_VERIFY_PARAM_set_purpose _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_purpose %xdefine _X509_VERIFY_PARAM_set_time _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_time %xdefine _X509_VERIFY_PARAM_set_time_posix _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_time_posix %xdefine _X509_VERIFY_PARAM_set_trust _ %+ BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_trust %xdefine _X509_add1_ext_i2d _ %+ BORINGSSL_PREFIX %+ _X509_add1_ext_i2d %xdefine _X509_add1_reject_object _ %+ BORINGSSL_PREFIX %+ _X509_add1_reject_object %xdefine _X509_add1_trust_object _ %+ BORINGSSL_PREFIX %+ _X509_add1_trust_object %xdefine _X509_add_ext _ %+ BORINGSSL_PREFIX %+ _X509_add_ext %xdefine _X509_alias_get0 _ %+ BORINGSSL_PREFIX %+ _X509_alias_get0 %xdefine _X509_alias_set1 _ %+ BORINGSSL_PREFIX %+ _X509_alias_set1 %xdefine _X509_chain_up_ref _ %+ BORINGSSL_PREFIX %+ _X509_chain_up_ref %xdefine _X509_check_akid _ %+ BORINGSSL_PREFIX %+ _X509_check_akid %xdefine _X509_check_ca _ %+ BORINGSSL_PREFIX %+ _X509_check_ca %xdefine _X509_check_email _ %+ BORINGSSL_PREFIX %+ _X509_check_email %xdefine _X509_check_host _ %+ BORINGSSL_PREFIX %+ _X509_check_host %xdefine _X509_check_ip _ %+ BORINGSSL_PREFIX %+ _X509_check_ip %xdefine _X509_check_ip_asc _ %+ BORINGSSL_PREFIX %+ _X509_check_ip_asc %xdefine _X509_check_issued _ %+ BORINGSSL_PREFIX %+ _X509_check_issued %xdefine _X509_check_private_key _ %+ BORINGSSL_PREFIX %+ _X509_check_private_key %xdefine _X509_check_purpose _ %+ BORINGSSL_PREFIX %+ _X509_check_purpose %xdefine _X509_check_trust _ %+ BORINGSSL_PREFIX %+ _X509_check_trust %xdefine _X509_cmp _ %+ BORINGSSL_PREFIX %+ _X509_cmp %xdefine _X509_cmp_current_time _ %+ BORINGSSL_PREFIX %+ _X509_cmp_current_time %xdefine _X509_cmp_time _ %+ BORINGSSL_PREFIX %+ _X509_cmp_time %xdefine _X509_cmp_time_posix _ %+ BORINGSSL_PREFIX %+ _X509_cmp_time_posix %xdefine _X509_delete_ext _ %+ BORINGSSL_PREFIX %+ _X509_delete_ext %xdefine _X509_digest _ %+ BORINGSSL_PREFIX %+ _X509_digest %xdefine _X509_dup _ %+ BORINGSSL_PREFIX %+ _X509_dup %xdefine _X509_email_free _ %+ BORINGSSL_PREFIX %+ _X509_email_free %xdefine _X509_find_by_issuer_and_serial _ %+ BORINGSSL_PREFIX %+ _X509_find_by_issuer_and_serial %xdefine _X509_find_by_subject _ %+ BORINGSSL_PREFIX %+ _X509_find_by_subject %xdefine _X509_free _ %+ BORINGSSL_PREFIX %+ _X509_free %xdefine _X509_get0_authority_issuer _ %+ BORINGSSL_PREFIX %+ _X509_get0_authority_issuer %xdefine _X509_get0_authority_key_id _ %+ BORINGSSL_PREFIX %+ _X509_get0_authority_key_id %xdefine _X509_get0_authority_serial _ %+ BORINGSSL_PREFIX %+ _X509_get0_authority_serial %xdefine _X509_get0_extensions _ %+ BORINGSSL_PREFIX %+ _X509_get0_extensions %xdefine _X509_get0_notAfter _ %+ BORINGSSL_PREFIX %+ _X509_get0_notAfter %xdefine _X509_get0_notBefore _ %+ BORINGSSL_PREFIX %+ _X509_get0_notBefore %xdefine _X509_get0_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_get0_pubkey %xdefine _X509_get0_pubkey_bitstr _ %+ BORINGSSL_PREFIX %+ _X509_get0_pubkey_bitstr %xdefine _X509_get0_serialNumber _ %+ BORINGSSL_PREFIX %+ _X509_get0_serialNumber %xdefine _X509_get0_signature _ %+ BORINGSSL_PREFIX %+ _X509_get0_signature %xdefine _X509_get0_subject_key_id _ %+ BORINGSSL_PREFIX %+ _X509_get0_subject_key_id %xdefine _X509_get0_tbs_sigalg _ %+ BORINGSSL_PREFIX %+ _X509_get0_tbs_sigalg %xdefine _X509_get0_uids _ %+ BORINGSSL_PREFIX %+ _X509_get0_uids %xdefine _X509_get1_email _ %+ BORINGSSL_PREFIX %+ _X509_get1_email %xdefine _X509_get1_ocsp _ %+ BORINGSSL_PREFIX %+ _X509_get1_ocsp %xdefine _X509_get_X509_PUBKEY _ %+ BORINGSSL_PREFIX %+ _X509_get_X509_PUBKEY %xdefine _X509_get_default_cert_area _ %+ BORINGSSL_PREFIX %+ _X509_get_default_cert_area %xdefine _X509_get_default_cert_dir _ %+ BORINGSSL_PREFIX %+ _X509_get_default_cert_dir %xdefine _X509_get_default_cert_dir_env _ %+ BORINGSSL_PREFIX %+ _X509_get_default_cert_dir_env %xdefine _X509_get_default_cert_file _ %+ BORINGSSL_PREFIX %+ _X509_get_default_cert_file %xdefine _X509_get_default_cert_file_env _ %+ BORINGSSL_PREFIX %+ _X509_get_default_cert_file_env %xdefine _X509_get_default_private_dir _ %+ BORINGSSL_PREFIX %+ _X509_get_default_private_dir %xdefine _X509_get_ex_data _ %+ BORINGSSL_PREFIX %+ _X509_get_ex_data %xdefine _X509_get_ex_new_index _ %+ BORINGSSL_PREFIX %+ _X509_get_ex_new_index %xdefine _X509_get_ext _ %+ BORINGSSL_PREFIX %+ _X509_get_ext %xdefine _X509_get_ext_by_NID _ %+ BORINGSSL_PREFIX %+ _X509_get_ext_by_NID %xdefine _X509_get_ext_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509_get_ext_by_OBJ %xdefine _X509_get_ext_by_critical _ %+ BORINGSSL_PREFIX %+ _X509_get_ext_by_critical %xdefine _X509_get_ext_count _ %+ BORINGSSL_PREFIX %+ _X509_get_ext_count %xdefine _X509_get_ext_d2i _ %+ BORINGSSL_PREFIX %+ _X509_get_ext_d2i %xdefine _X509_get_extended_key_usage _ %+ BORINGSSL_PREFIX %+ _X509_get_extended_key_usage %xdefine _X509_get_extension_flags _ %+ BORINGSSL_PREFIX %+ _X509_get_extension_flags %xdefine _X509_get_issuer_name _ %+ BORINGSSL_PREFIX %+ _X509_get_issuer_name %xdefine _X509_get_key_usage _ %+ BORINGSSL_PREFIX %+ _X509_get_key_usage %xdefine _X509_get_notAfter _ %+ BORINGSSL_PREFIX %+ _X509_get_notAfter %xdefine _X509_get_notBefore _ %+ BORINGSSL_PREFIX %+ _X509_get_notBefore %xdefine _X509_get_pathlen _ %+ BORINGSSL_PREFIX %+ _X509_get_pathlen %xdefine _X509_get_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_get_pubkey %xdefine _X509_get_serialNumber _ %+ BORINGSSL_PREFIX %+ _X509_get_serialNumber %xdefine _X509_get_signature_nid _ %+ BORINGSSL_PREFIX %+ _X509_get_signature_nid %xdefine _X509_get_subject_name _ %+ BORINGSSL_PREFIX %+ _X509_get_subject_name %xdefine _X509_get_version _ %+ BORINGSSL_PREFIX %+ _X509_get_version %xdefine _X509_getm_notAfter _ %+ BORINGSSL_PREFIX %+ _X509_getm_notAfter %xdefine _X509_getm_notBefore _ %+ BORINGSSL_PREFIX %+ _X509_getm_notBefore %xdefine _X509_gmtime_adj _ %+ BORINGSSL_PREFIX %+ _X509_gmtime_adj %xdefine _X509_is_valid_trust_id _ %+ BORINGSSL_PREFIX %+ _X509_is_valid_trust_id %xdefine _X509_issuer_name_cmp _ %+ BORINGSSL_PREFIX %+ _X509_issuer_name_cmp %xdefine _X509_issuer_name_hash _ %+ BORINGSSL_PREFIX %+ _X509_issuer_name_hash %xdefine _X509_issuer_name_hash_old _ %+ BORINGSSL_PREFIX %+ _X509_issuer_name_hash_old %xdefine _X509_it _ %+ BORINGSSL_PREFIX %+ _X509_it %xdefine _X509_keyid_get0 _ %+ BORINGSSL_PREFIX %+ _X509_keyid_get0 %xdefine _X509_keyid_set1 _ %+ BORINGSSL_PREFIX %+ _X509_keyid_set1 %xdefine _X509_load_cert_crl_file _ %+ BORINGSSL_PREFIX %+ _X509_load_cert_crl_file %xdefine _X509_load_cert_file _ %+ BORINGSSL_PREFIX %+ _X509_load_cert_file %xdefine _X509_load_crl_file _ %+ BORINGSSL_PREFIX %+ _X509_load_crl_file %xdefine _X509_new _ %+ BORINGSSL_PREFIX %+ _X509_new %xdefine _X509_parse_from_buffer _ %+ BORINGSSL_PREFIX %+ _X509_parse_from_buffer %xdefine _X509_policy_check _ %+ BORINGSSL_PREFIX %+ _X509_policy_check %xdefine _X509_print _ %+ BORINGSSL_PREFIX %+ _X509_print %xdefine _X509_print_ex _ %+ BORINGSSL_PREFIX %+ _X509_print_ex %xdefine _X509_print_ex_fp _ %+ BORINGSSL_PREFIX %+ _X509_print_ex_fp %xdefine _X509_print_fp _ %+ BORINGSSL_PREFIX %+ _X509_print_fp %xdefine _X509_pubkey_digest _ %+ BORINGSSL_PREFIX %+ _X509_pubkey_digest %xdefine _X509_reject_clear _ %+ BORINGSSL_PREFIX %+ _X509_reject_clear %xdefine _X509_set1_notAfter _ %+ BORINGSSL_PREFIX %+ _X509_set1_notAfter %xdefine _X509_set1_notBefore _ %+ BORINGSSL_PREFIX %+ _X509_set1_notBefore %xdefine _X509_set1_signature_algo _ %+ BORINGSSL_PREFIX %+ _X509_set1_signature_algo %xdefine _X509_set1_signature_value _ %+ BORINGSSL_PREFIX %+ _X509_set1_signature_value %xdefine _X509_set_ex_data _ %+ BORINGSSL_PREFIX %+ _X509_set_ex_data %xdefine _X509_set_issuer_name _ %+ BORINGSSL_PREFIX %+ _X509_set_issuer_name %xdefine _X509_set_notAfter _ %+ BORINGSSL_PREFIX %+ _X509_set_notAfter %xdefine _X509_set_notBefore _ %+ BORINGSSL_PREFIX %+ _X509_set_notBefore %xdefine _X509_set_pubkey _ %+ BORINGSSL_PREFIX %+ _X509_set_pubkey %xdefine _X509_set_serialNumber _ %+ BORINGSSL_PREFIX %+ _X509_set_serialNumber %xdefine _X509_set_subject_name _ %+ BORINGSSL_PREFIX %+ _X509_set_subject_name %xdefine _X509_set_version _ %+ BORINGSSL_PREFIX %+ _X509_set_version %xdefine _X509_sign _ %+ BORINGSSL_PREFIX %+ _X509_sign %xdefine _X509_sign_ctx _ %+ BORINGSSL_PREFIX %+ _X509_sign_ctx %xdefine _X509_signature_dump _ %+ BORINGSSL_PREFIX %+ _X509_signature_dump %xdefine _X509_signature_print _ %+ BORINGSSL_PREFIX %+ _X509_signature_print %xdefine _X509_subject_name_cmp _ %+ BORINGSSL_PREFIX %+ _X509_subject_name_cmp %xdefine _X509_subject_name_hash _ %+ BORINGSSL_PREFIX %+ _X509_subject_name_hash %xdefine _X509_subject_name_hash_old _ %+ BORINGSSL_PREFIX %+ _X509_subject_name_hash_old %xdefine _X509_supported_extension _ %+ BORINGSSL_PREFIX %+ _X509_supported_extension %xdefine _X509_time_adj _ %+ BORINGSSL_PREFIX %+ _X509_time_adj %xdefine _X509_time_adj_ex _ %+ BORINGSSL_PREFIX %+ _X509_time_adj_ex %xdefine _X509_trust_clear _ %+ BORINGSSL_PREFIX %+ _X509_trust_clear %xdefine _X509_up_ref _ %+ BORINGSSL_PREFIX %+ _X509_up_ref %xdefine _X509_verify _ %+ BORINGSSL_PREFIX %+ _X509_verify %xdefine _X509_verify_cert _ %+ BORINGSSL_PREFIX %+ _X509_verify_cert %xdefine _X509_verify_cert_error_string _ %+ BORINGSSL_PREFIX %+ _X509_verify_cert_error_string %xdefine _X509v3_add_ext _ %+ BORINGSSL_PREFIX %+ _X509v3_add_ext %xdefine _X509v3_delete_ext _ %+ BORINGSSL_PREFIX %+ _X509v3_delete_ext %xdefine _X509v3_get_ext _ %+ BORINGSSL_PREFIX %+ _X509v3_get_ext %xdefine _X509v3_get_ext_by_NID _ %+ BORINGSSL_PREFIX %+ _X509v3_get_ext_by_NID %xdefine _X509v3_get_ext_by_OBJ _ %+ BORINGSSL_PREFIX %+ _X509v3_get_ext_by_OBJ %xdefine _X509v3_get_ext_by_critical _ %+ BORINGSSL_PREFIX %+ _X509v3_get_ext_by_critical %xdefine _X509v3_get_ext_count _ %+ BORINGSSL_PREFIX %+ _X509v3_get_ext_count %xdefine ___clang_call_terminate _ %+ BORINGSSL_PREFIX %+ ___clang_call_terminate %xdefine _a2i_IPADDRESS _ %+ BORINGSSL_PREFIX %+ _a2i_IPADDRESS %xdefine _a2i_IPADDRESS_NC _ %+ BORINGSSL_PREFIX %+ _a2i_IPADDRESS_NC %xdefine _aes128gcmsiv_aes_ks _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_aes_ks %xdefine _aes128gcmsiv_aes_ks_enc_x1 _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_aes_ks_enc_x1 %xdefine _aes128gcmsiv_dec _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_dec %xdefine _aes128gcmsiv_ecb_enc_block _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_ecb_enc_block %xdefine _aes128gcmsiv_enc_msg_x4 _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_enc_msg_x4 %xdefine _aes128gcmsiv_enc_msg_x8 _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_enc_msg_x8 %xdefine _aes128gcmsiv_kdf _ %+ BORINGSSL_PREFIX %+ _aes128gcmsiv_kdf %xdefine _aes256gcmsiv_aes_ks _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_aes_ks %xdefine _aes256gcmsiv_aes_ks_enc_x1 _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_aes_ks_enc_x1 %xdefine _aes256gcmsiv_dec _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_dec %xdefine _aes256gcmsiv_ecb_enc_block _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_ecb_enc_block %xdefine _aes256gcmsiv_enc_msg_x4 _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_enc_msg_x4 %xdefine _aes256gcmsiv_enc_msg_x8 _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_enc_msg_x8 %xdefine _aes256gcmsiv_kdf _ %+ BORINGSSL_PREFIX %+ _aes256gcmsiv_kdf %xdefine _aes_ctr_set_key _ %+ BORINGSSL_PREFIX %+ _aes_ctr_set_key %xdefine _aes_gcm_dec_kernel _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_kernel %xdefine _aes_gcm_dec_update_vaes_avx10_512 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_512 %xdefine _aes_gcm_dec_update_vaes_avx2 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx2 %xdefine _aes_gcm_enc_kernel _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_kernel %xdefine _aes_gcm_enc_update_vaes_avx10_512 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_512 %xdefine _aes_gcm_enc_update_vaes_avx2 _ %+ BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx2 %xdefine _aes_hw_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_cbc_encrypt %xdefine _aes_hw_ctr32_encrypt_blocks _ %+ BORINGSSL_PREFIX %+ _aes_hw_ctr32_encrypt_blocks %xdefine _aes_hw_decrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_decrypt %xdefine _aes_hw_ecb_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_ecb_encrypt %xdefine _aes_hw_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_hw_encrypt %xdefine _aes_hw_encrypt_key_to_decrypt_key _ %+ BORINGSSL_PREFIX %+ _aes_hw_encrypt_key_to_decrypt_key %xdefine _aes_hw_set_decrypt_key _ %+ BORINGSSL_PREFIX %+ _aes_hw_set_decrypt_key %xdefine _aes_hw_set_encrypt_key _ %+ BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key %xdefine _aes_hw_set_encrypt_key_alt _ %+ BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_alt %xdefine _aes_hw_set_encrypt_key_alt_preferred _ %+ BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_alt_preferred %xdefine _aes_hw_set_encrypt_key_base _ %+ BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_base %xdefine _aes_nohw_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_nohw_cbc_encrypt %xdefine _aes_nohw_ctr32_encrypt_blocks _ %+ BORINGSSL_PREFIX %+ _aes_nohw_ctr32_encrypt_blocks %xdefine _aes_nohw_decrypt _ %+ BORINGSSL_PREFIX %+ _aes_nohw_decrypt %xdefine _aes_nohw_encrypt _ %+ BORINGSSL_PREFIX %+ _aes_nohw_encrypt %xdefine _aes_nohw_set_decrypt_key _ %+ BORINGSSL_PREFIX %+ _aes_nohw_set_decrypt_key %xdefine _aes_nohw_set_encrypt_key _ %+ BORINGSSL_PREFIX %+ _aes_nohw_set_encrypt_key %xdefine _aesgcmsiv_htable6_init _ %+ BORINGSSL_PREFIX %+ _aesgcmsiv_htable6_init %xdefine _aesgcmsiv_htable_init _ %+ BORINGSSL_PREFIX %+ _aesgcmsiv_htable_init %xdefine _aesgcmsiv_htable_polyval _ %+ BORINGSSL_PREFIX %+ _aesgcmsiv_htable_polyval %xdefine _aesgcmsiv_polyval_horner _ %+ BORINGSSL_PREFIX %+ _aesgcmsiv_polyval_horner %xdefine _aesni_gcm_decrypt _ %+ BORINGSSL_PREFIX %+ _aesni_gcm_decrypt %xdefine _aesni_gcm_encrypt _ %+ BORINGSSL_PREFIX %+ _aesni_gcm_encrypt %xdefine _asn1_bit_string_length _ %+ BORINGSSL_PREFIX %+ _asn1_bit_string_length %xdefine _asn1_do_adb _ %+ BORINGSSL_PREFIX %+ _asn1_do_adb %xdefine _asn1_enc_free _ %+ BORINGSSL_PREFIX %+ _asn1_enc_free %xdefine _asn1_enc_init _ %+ BORINGSSL_PREFIX %+ _asn1_enc_init %xdefine _asn1_enc_restore _ %+ BORINGSSL_PREFIX %+ _asn1_enc_restore %xdefine _asn1_enc_save _ %+ BORINGSSL_PREFIX %+ _asn1_enc_save %xdefine _asn1_encoding_clear _ %+ BORINGSSL_PREFIX %+ _asn1_encoding_clear %xdefine _asn1_generalizedtime_to_tm _ %+ BORINGSSL_PREFIX %+ _asn1_generalizedtime_to_tm %xdefine _asn1_get_choice_selector _ %+ BORINGSSL_PREFIX %+ _asn1_get_choice_selector %xdefine _asn1_get_field_ptr _ %+ BORINGSSL_PREFIX %+ _asn1_get_field_ptr %xdefine _asn1_get_string_table_for_testing _ %+ BORINGSSL_PREFIX %+ _asn1_get_string_table_for_testing %xdefine _asn1_is_printable _ %+ BORINGSSL_PREFIX %+ _asn1_is_printable %xdefine _asn1_refcount_dec_and_test_zero _ %+ BORINGSSL_PREFIX %+ _asn1_refcount_dec_and_test_zero %xdefine _asn1_refcount_set_one _ %+ BORINGSSL_PREFIX %+ _asn1_refcount_set_one %xdefine _asn1_set_choice_selector _ %+ BORINGSSL_PREFIX %+ _asn1_set_choice_selector %xdefine _asn1_type_cleanup _ %+ BORINGSSL_PREFIX %+ _asn1_type_cleanup %xdefine _asn1_type_set0_string _ %+ BORINGSSL_PREFIX %+ _asn1_type_set0_string %xdefine _asn1_type_value_as_pointer _ %+ BORINGSSL_PREFIX %+ _asn1_type_value_as_pointer %xdefine _asn1_utctime_to_tm _ %+ BORINGSSL_PREFIX %+ _asn1_utctime_to_tm %xdefine _bcm_as_approved_status _ %+ BORINGSSL_PREFIX %+ _bcm_as_approved_status %xdefine _bcm_success _ %+ BORINGSSL_PREFIX %+ _bcm_success %xdefine _beeu_mod_inverse_vartime _ %+ BORINGSSL_PREFIX %+ _beeu_mod_inverse_vartime %xdefine _bio_clear_socket_error _ %+ BORINGSSL_PREFIX %+ _bio_clear_socket_error %xdefine _bio_errno_should_retry _ %+ BORINGSSL_PREFIX %+ _bio_errno_should_retry %xdefine _bio_ip_and_port_to_socket_and_addr _ %+ BORINGSSL_PREFIX %+ _bio_ip_and_port_to_socket_and_addr %xdefine _bio_sock_error _ %+ BORINGSSL_PREFIX %+ _bio_sock_error %xdefine _bio_socket_nbio _ %+ BORINGSSL_PREFIX %+ _bio_socket_nbio %xdefine _bio_socket_should_retry _ %+ BORINGSSL_PREFIX %+ _bio_socket_should_retry %xdefine _bn_abs_sub_consttime _ %+ BORINGSSL_PREFIX %+ _bn_abs_sub_consttime %xdefine _bn_add_words _ %+ BORINGSSL_PREFIX %+ _bn_add_words %xdefine _bn_assert_fits_in_bytes _ %+ BORINGSSL_PREFIX %+ _bn_assert_fits_in_bytes %xdefine _bn_big_endian_to_words _ %+ BORINGSSL_PREFIX %+ _bn_big_endian_to_words %xdefine _bn_copy_words _ %+ BORINGSSL_PREFIX %+ _bn_copy_words %xdefine _bn_declassify _ %+ BORINGSSL_PREFIX %+ _bn_declassify %xdefine _bn_div_consttime _ %+ BORINGSSL_PREFIX %+ _bn_div_consttime %xdefine _bn_expand _ %+ BORINGSSL_PREFIX %+ _bn_expand %xdefine _bn_fits_in_words _ %+ BORINGSSL_PREFIX %+ _bn_fits_in_words %xdefine _bn_from_montgomery_small _ %+ BORINGSSL_PREFIX %+ _bn_from_montgomery_small %xdefine _bn_gather5 _ %+ BORINGSSL_PREFIX %+ _bn_gather5 %xdefine _bn_in_range_words _ %+ BORINGSSL_PREFIX %+ _bn_in_range_words %xdefine _bn_is_bit_set_words _ %+ BORINGSSL_PREFIX %+ _bn_is_bit_set_words %xdefine _bn_is_relatively_prime _ %+ BORINGSSL_PREFIX %+ _bn_is_relatively_prime %xdefine _bn_jacobi _ %+ BORINGSSL_PREFIX %+ _bn_jacobi %xdefine _bn_lcm_consttime _ %+ BORINGSSL_PREFIX %+ _bn_lcm_consttime %xdefine _bn_less_than_montgomery_R _ %+ BORINGSSL_PREFIX %+ _bn_less_than_montgomery_R %xdefine _bn_less_than_words _ %+ BORINGSSL_PREFIX %+ _bn_less_than_words %xdefine _bn_miller_rabin_init _ %+ BORINGSSL_PREFIX %+ _bn_miller_rabin_init %xdefine _bn_miller_rabin_iteration _ %+ BORINGSSL_PREFIX %+ _bn_miller_rabin_iteration %xdefine _bn_minimal_width _ %+ BORINGSSL_PREFIX %+ _bn_minimal_width %xdefine _bn_mod_add_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_add_consttime %xdefine _bn_mod_add_words _ %+ BORINGSSL_PREFIX %+ _bn_mod_add_words %xdefine _bn_mod_exp_mont_small _ %+ BORINGSSL_PREFIX %+ _bn_mod_exp_mont_small %xdefine _bn_mod_inverse0_prime_mont_small _ %+ BORINGSSL_PREFIX %+ _bn_mod_inverse0_prime_mont_small %xdefine _bn_mod_inverse_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_inverse_consttime %xdefine _bn_mod_inverse_prime _ %+ BORINGSSL_PREFIX %+ _bn_mod_inverse_prime %xdefine _bn_mod_inverse_secret_prime _ %+ BORINGSSL_PREFIX %+ _bn_mod_inverse_secret_prime %xdefine _bn_mod_lshift1_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_lshift1_consttime %xdefine _bn_mod_lshift_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_lshift_consttime %xdefine _bn_mod_mul_montgomery_small _ %+ BORINGSSL_PREFIX %+ _bn_mod_mul_montgomery_small %xdefine _bn_mod_sub_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_sub_consttime %xdefine _bn_mod_sub_words _ %+ BORINGSSL_PREFIX %+ _bn_mod_sub_words %xdefine _bn_mod_u16_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mod_u16_consttime %xdefine _bn_mont_ctx_cleanup _ %+ BORINGSSL_PREFIX %+ _bn_mont_ctx_cleanup %xdefine _bn_mont_ctx_init _ %+ BORINGSSL_PREFIX %+ _bn_mont_ctx_init %xdefine _bn_mont_ctx_set_RR_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mont_ctx_set_RR_consttime %xdefine _bn_mont_n0 _ %+ BORINGSSL_PREFIX %+ _bn_mont_n0 %xdefine _bn_mul4x_mont _ %+ BORINGSSL_PREFIX %+ _bn_mul4x_mont %xdefine _bn_mul4x_mont_capable _ %+ BORINGSSL_PREFIX %+ _bn_mul4x_mont_capable %xdefine _bn_mul4x_mont_gather5 _ %+ BORINGSSL_PREFIX %+ _bn_mul4x_mont_gather5 %xdefine _bn_mul4x_mont_gather5_capable _ %+ BORINGSSL_PREFIX %+ _bn_mul4x_mont_gather5_capable %xdefine _bn_mul_add_words _ %+ BORINGSSL_PREFIX %+ _bn_mul_add_words %xdefine _bn_mul_comba4 _ %+ BORINGSSL_PREFIX %+ _bn_mul_comba4 %xdefine _bn_mul_comba8 _ %+ BORINGSSL_PREFIX %+ _bn_mul_comba8 %xdefine _bn_mul_consttime _ %+ BORINGSSL_PREFIX %+ _bn_mul_consttime %xdefine _bn_mul_mont _ %+ BORINGSSL_PREFIX %+ _bn_mul_mont %xdefine _bn_mul_mont_gather5_nohw _ %+ BORINGSSL_PREFIX %+ _bn_mul_mont_gather5_nohw %xdefine _bn_mul_mont_nohw _ %+ BORINGSSL_PREFIX %+ _bn_mul_mont_nohw %xdefine _bn_mul_small _ %+ BORINGSSL_PREFIX %+ _bn_mul_small %xdefine _bn_mul_words _ %+ BORINGSSL_PREFIX %+ _bn_mul_words %xdefine _bn_mulx4x_mont _ %+ BORINGSSL_PREFIX %+ _bn_mulx4x_mont %xdefine _bn_mulx4x_mont_capable _ %+ BORINGSSL_PREFIX %+ _bn_mulx4x_mont_capable %xdefine _bn_mulx4x_mont_gather5 _ %+ BORINGSSL_PREFIX %+ _bn_mulx4x_mont_gather5 %xdefine _bn_mulx4x_mont_gather5_capable _ %+ BORINGSSL_PREFIX %+ _bn_mulx4x_mont_gather5_capable %xdefine _bn_mulx_adx_capable _ %+ BORINGSSL_PREFIX %+ _bn_mulx_adx_capable %xdefine _bn_odd_number_is_obviously_composite _ %+ BORINGSSL_PREFIX %+ _bn_odd_number_is_obviously_composite %xdefine _bn_one_to_montgomery _ %+ BORINGSSL_PREFIX %+ _bn_one_to_montgomery %xdefine _bn_power5_capable _ %+ BORINGSSL_PREFIX %+ _bn_power5_capable %xdefine _bn_power5_nohw _ %+ BORINGSSL_PREFIX %+ _bn_power5_nohw %xdefine _bn_powerx5 _ %+ BORINGSSL_PREFIX %+ _bn_powerx5 %xdefine _bn_powerx5_capable _ %+ BORINGSSL_PREFIX %+ _bn_powerx5_capable %xdefine _bn_rand_range_words _ %+ BORINGSSL_PREFIX %+ _bn_rand_range_words %xdefine _bn_rand_secret_range _ %+ BORINGSSL_PREFIX %+ _bn_rand_secret_range %xdefine _bn_reduce_once _ %+ BORINGSSL_PREFIX %+ _bn_reduce_once %xdefine _bn_reduce_once_in_place _ %+ BORINGSSL_PREFIX %+ _bn_reduce_once_in_place %xdefine _bn_resize_words _ %+ BORINGSSL_PREFIX %+ _bn_resize_words %xdefine _bn_rshift1_words _ %+ BORINGSSL_PREFIX %+ _bn_rshift1_words %xdefine _bn_rshift_secret_shift _ %+ BORINGSSL_PREFIX %+ _bn_rshift_secret_shift %xdefine _bn_rshift_words _ %+ BORINGSSL_PREFIX %+ _bn_rshift_words %xdefine _bn_scatter5 _ %+ BORINGSSL_PREFIX %+ _bn_scatter5 %xdefine _bn_secret _ %+ BORINGSSL_PREFIX %+ _bn_secret %xdefine _bn_select_words _ %+ BORINGSSL_PREFIX %+ _bn_select_words %xdefine _bn_set_minimal_width _ %+ BORINGSSL_PREFIX %+ _bn_set_minimal_width %xdefine _bn_set_static_words _ %+ BORINGSSL_PREFIX %+ _bn_set_static_words %xdefine _bn_set_words _ %+ BORINGSSL_PREFIX %+ _bn_set_words %xdefine _bn_sqr8x_internal _ %+ BORINGSSL_PREFIX %+ _bn_sqr8x_internal %xdefine _bn_sqr8x_mont _ %+ BORINGSSL_PREFIX %+ _bn_sqr8x_mont %xdefine _bn_sqr8x_mont_capable _ %+ BORINGSSL_PREFIX %+ _bn_sqr8x_mont_capable %xdefine _bn_sqr_comba4 _ %+ BORINGSSL_PREFIX %+ _bn_sqr_comba4 %xdefine _bn_sqr_comba8 _ %+ BORINGSSL_PREFIX %+ _bn_sqr_comba8 %xdefine _bn_sqr_consttime _ %+ BORINGSSL_PREFIX %+ _bn_sqr_consttime %xdefine _bn_sqr_small _ %+ BORINGSSL_PREFIX %+ _bn_sqr_small %xdefine _bn_sqr_words _ %+ BORINGSSL_PREFIX %+ _bn_sqr_words %xdefine _bn_sqrx8x_internal _ %+ BORINGSSL_PREFIX %+ _bn_sqrx8x_internal %xdefine _bn_sub_words _ %+ BORINGSSL_PREFIX %+ _bn_sub_words %xdefine _bn_to_montgomery_small _ %+ BORINGSSL_PREFIX %+ _bn_to_montgomery_small %xdefine _bn_uadd_consttime _ %+ BORINGSSL_PREFIX %+ _bn_uadd_consttime %xdefine _bn_usub_consttime _ %+ BORINGSSL_PREFIX %+ _bn_usub_consttime %xdefine _bn_wexpand _ %+ BORINGSSL_PREFIX %+ _bn_wexpand %xdefine _bn_words_to_big_endian _ %+ BORINGSSL_PREFIX %+ _bn_words_to_big_endian %xdefine _boringssl_ensure_ecc_self_test _ %+ BORINGSSL_PREFIX %+ _boringssl_ensure_ecc_self_test %xdefine _boringssl_ensure_ffdh_self_test _ %+ BORINGSSL_PREFIX %+ _boringssl_ensure_ffdh_self_test %xdefine _boringssl_ensure_rsa_self_test _ %+ BORINGSSL_PREFIX %+ _boringssl_ensure_rsa_self_test %xdefine _boringssl_fips_break_test _ %+ BORINGSSL_PREFIX %+ _boringssl_fips_break_test %xdefine _boringssl_fips_inc_counter _ %+ BORINGSSL_PREFIX %+ _boringssl_fips_inc_counter %xdefine _boringssl_self_test_hmac_sha256 _ %+ BORINGSSL_PREFIX %+ _boringssl_self_test_hmac_sha256 %xdefine _boringssl_self_test_sha256 _ %+ BORINGSSL_PREFIX %+ _boringssl_self_test_sha256 %xdefine _boringssl_self_test_sha512 _ %+ BORINGSSL_PREFIX %+ _boringssl_self_test_sha512 %xdefine _bsaes_capable _ %+ BORINGSSL_PREFIX %+ _bsaes_capable %xdefine _bsaes_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _bsaes_cbc_encrypt %xdefine _c2i_ASN1_BIT_STRING _ %+ BORINGSSL_PREFIX %+ _c2i_ASN1_BIT_STRING %xdefine _c2i_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _c2i_ASN1_INTEGER %xdefine _c2i_ASN1_OBJECT _ %+ BORINGSSL_PREFIX %+ _c2i_ASN1_OBJECT %xdefine _chacha20_poly1305_asm_capable _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_asm_capable %xdefine _chacha20_poly1305_open _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_open %xdefine _chacha20_poly1305_open_avx2 _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_open_avx2 %xdefine _chacha20_poly1305_open_nohw _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_open_nohw %xdefine _chacha20_poly1305_seal _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_seal %xdefine _chacha20_poly1305_seal_avx2 _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_seal_avx2 %xdefine _chacha20_poly1305_seal_nohw _ %+ BORINGSSL_PREFIX %+ _chacha20_poly1305_seal_nohw %xdefine _crypto_gcm_clmul_enabled _ %+ BORINGSSL_PREFIX %+ _crypto_gcm_clmul_enabled %xdefine _d2i_ASN1_BIT_STRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_BIT_STRING %xdefine _d2i_ASN1_BMPSTRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_BMPSTRING %xdefine _d2i_ASN1_BOOLEAN _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_BOOLEAN %xdefine _d2i_ASN1_ENUMERATED _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_ENUMERATED %xdefine _d2i_ASN1_GENERALIZEDTIME _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_GENERALIZEDTIME %xdefine _d2i_ASN1_GENERALSTRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_GENERALSTRING %xdefine _d2i_ASN1_IA5STRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_IA5STRING %xdefine _d2i_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_INTEGER %xdefine _d2i_ASN1_NULL _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_NULL %xdefine _d2i_ASN1_OBJECT _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_OBJECT %xdefine _d2i_ASN1_OCTET_STRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_OCTET_STRING %xdefine _d2i_ASN1_PRINTABLE _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_PRINTABLE %xdefine _d2i_ASN1_PRINTABLESTRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_PRINTABLESTRING %xdefine _d2i_ASN1_SEQUENCE_ANY _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_SEQUENCE_ANY %xdefine _d2i_ASN1_SET_ANY _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_SET_ANY %xdefine _d2i_ASN1_T61STRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_T61STRING %xdefine _d2i_ASN1_TIME _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_TIME %xdefine _d2i_ASN1_TYPE _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_TYPE %xdefine _d2i_ASN1_UNIVERSALSTRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_UNIVERSALSTRING %xdefine _d2i_ASN1_UTCTIME _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_UTCTIME %xdefine _d2i_ASN1_UTF8STRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_UTF8STRING %xdefine _d2i_ASN1_VISIBLESTRING _ %+ BORINGSSL_PREFIX %+ _d2i_ASN1_VISIBLESTRING %xdefine _d2i_AUTHORITY_INFO_ACCESS _ %+ BORINGSSL_PREFIX %+ _d2i_AUTHORITY_INFO_ACCESS %xdefine _d2i_AUTHORITY_KEYID _ %+ BORINGSSL_PREFIX %+ _d2i_AUTHORITY_KEYID %xdefine _d2i_AutoPrivateKey _ %+ BORINGSSL_PREFIX %+ _d2i_AutoPrivateKey %xdefine _d2i_BASIC_CONSTRAINTS _ %+ BORINGSSL_PREFIX %+ _d2i_BASIC_CONSTRAINTS %xdefine _d2i_CERTIFICATEPOLICIES _ %+ BORINGSSL_PREFIX %+ _d2i_CERTIFICATEPOLICIES %xdefine _d2i_CRL_DIST_POINTS _ %+ BORINGSSL_PREFIX %+ _d2i_CRL_DIST_POINTS %xdefine _d2i_DHparams _ %+ BORINGSSL_PREFIX %+ _d2i_DHparams %xdefine _d2i_DHparams_bio _ %+ BORINGSSL_PREFIX %+ _d2i_DHparams_bio %xdefine _d2i_DIRECTORYSTRING _ %+ BORINGSSL_PREFIX %+ _d2i_DIRECTORYSTRING %xdefine _d2i_DISPLAYTEXT _ %+ BORINGSSL_PREFIX %+ _d2i_DISPLAYTEXT %xdefine _d2i_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey %xdefine _d2i_DSAPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey_bio %xdefine _d2i_DSAPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey_fp %xdefine _d2i_DSAPublicKey _ %+ BORINGSSL_PREFIX %+ _d2i_DSAPublicKey %xdefine _d2i_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY %xdefine _d2i_DSA_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY_bio %xdefine _d2i_DSA_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY_fp %xdefine _d2i_DSA_SIG _ %+ BORINGSSL_PREFIX %+ _d2i_DSA_SIG %xdefine _d2i_DSAparams _ %+ BORINGSSL_PREFIX %+ _d2i_DSAparams %xdefine _d2i_ECDSA_SIG _ %+ BORINGSSL_PREFIX %+ _d2i_ECDSA_SIG %xdefine _d2i_ECPKParameters _ %+ BORINGSSL_PREFIX %+ _d2i_ECPKParameters %xdefine _d2i_ECParameters _ %+ BORINGSSL_PREFIX %+ _d2i_ECParameters %xdefine _d2i_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _d2i_ECPrivateKey %xdefine _d2i_ECPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_ECPrivateKey_bio %xdefine _d2i_ECPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_ECPrivateKey_fp %xdefine _d2i_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY %xdefine _d2i_EC_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY_bio %xdefine _d2i_EC_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY_fp %xdefine _d2i_EXTENDED_KEY_USAGE _ %+ BORINGSSL_PREFIX %+ _d2i_EXTENDED_KEY_USAGE %xdefine _d2i_GENERAL_NAME _ %+ BORINGSSL_PREFIX %+ _d2i_GENERAL_NAME %xdefine _d2i_GENERAL_NAMES _ %+ BORINGSSL_PREFIX %+ _d2i_GENERAL_NAMES %xdefine _d2i_ISSUING_DIST_POINT _ %+ BORINGSSL_PREFIX %+ _d2i_ISSUING_DIST_POINT %xdefine _d2i_NETSCAPE_SPKAC _ %+ BORINGSSL_PREFIX %+ _d2i_NETSCAPE_SPKAC %xdefine _d2i_NETSCAPE_SPKI _ %+ BORINGSSL_PREFIX %+ _d2i_NETSCAPE_SPKI %xdefine _d2i_PKCS12 _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS12 %xdefine _d2i_PKCS12_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS12_bio %xdefine _d2i_PKCS12_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS12_fp %xdefine _d2i_PKCS7 _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS7 %xdefine _d2i_PKCS7_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS7_bio %xdefine _d2i_PKCS8PrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8PrivateKey_bio %xdefine _d2i_PKCS8PrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8PrivateKey_fp %xdefine _d2i_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO %xdefine _d2i_PKCS8_PRIV_KEY_INFO_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO_bio %xdefine _d2i_PKCS8_PRIV_KEY_INFO_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO_fp %xdefine _d2i_PKCS8_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8_bio %xdefine _d2i_PKCS8_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PKCS8_fp %xdefine _d2i_PUBKEY _ %+ BORINGSSL_PREFIX %+ _d2i_PUBKEY %xdefine _d2i_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PUBKEY_bio %xdefine _d2i_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PUBKEY_fp %xdefine _d2i_PrivateKey _ %+ BORINGSSL_PREFIX %+ _d2i_PrivateKey %xdefine _d2i_PrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_PrivateKey_bio %xdefine _d2i_PrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_PrivateKey_fp %xdefine _d2i_PublicKey _ %+ BORINGSSL_PREFIX %+ _d2i_PublicKey %xdefine _d2i_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey %xdefine _d2i_RSAPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey_bio %xdefine _d2i_RSAPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey_fp %xdefine _d2i_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPublicKey %xdefine _d2i_RSAPublicKey_bio _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPublicKey_bio %xdefine _d2i_RSAPublicKey_fp _ %+ BORINGSSL_PREFIX %+ _d2i_RSAPublicKey_fp %xdefine _d2i_RSA_PSS_PARAMS _ %+ BORINGSSL_PREFIX %+ _d2i_RSA_PSS_PARAMS %xdefine _d2i_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY %xdefine _d2i_RSA_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY_bio %xdefine _d2i_RSA_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY_fp %xdefine _d2i_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _d2i_SSL_SESSION %xdefine _d2i_SSL_SESSION_bio _ %+ BORINGSSL_PREFIX %+ _d2i_SSL_SESSION_bio %xdefine _d2i_X509 _ %+ BORINGSSL_PREFIX %+ _d2i_X509 %xdefine _d2i_X509_ALGOR _ %+ BORINGSSL_PREFIX %+ _d2i_X509_ALGOR %xdefine _d2i_X509_ATTRIBUTE _ %+ BORINGSSL_PREFIX %+ _d2i_X509_ATTRIBUTE %xdefine _d2i_X509_AUX _ %+ BORINGSSL_PREFIX %+ _d2i_X509_AUX %xdefine _d2i_X509_CERT_AUX _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CERT_AUX %xdefine _d2i_X509_CINF _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CINF %xdefine _d2i_X509_CRL _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CRL %xdefine _d2i_X509_CRL_INFO _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CRL_INFO %xdefine _d2i_X509_CRL_bio _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CRL_bio %xdefine _d2i_X509_CRL_fp _ %+ BORINGSSL_PREFIX %+ _d2i_X509_CRL_fp %xdefine _d2i_X509_EXTENSION _ %+ BORINGSSL_PREFIX %+ _d2i_X509_EXTENSION %xdefine _d2i_X509_EXTENSIONS _ %+ BORINGSSL_PREFIX %+ _d2i_X509_EXTENSIONS %xdefine _d2i_X509_NAME _ %+ BORINGSSL_PREFIX %+ _d2i_X509_NAME %xdefine _d2i_X509_PUBKEY _ %+ BORINGSSL_PREFIX %+ _d2i_X509_PUBKEY %xdefine _d2i_X509_REQ _ %+ BORINGSSL_PREFIX %+ _d2i_X509_REQ %xdefine _d2i_X509_REQ_INFO _ %+ BORINGSSL_PREFIX %+ _d2i_X509_REQ_INFO %xdefine _d2i_X509_REQ_bio _ %+ BORINGSSL_PREFIX %+ _d2i_X509_REQ_bio %xdefine _d2i_X509_REQ_fp _ %+ BORINGSSL_PREFIX %+ _d2i_X509_REQ_fp %xdefine _d2i_X509_REVOKED _ %+ BORINGSSL_PREFIX %+ _d2i_X509_REVOKED %xdefine _d2i_X509_SIG _ %+ BORINGSSL_PREFIX %+ _d2i_X509_SIG %xdefine _d2i_X509_VAL _ %+ BORINGSSL_PREFIX %+ _d2i_X509_VAL %xdefine _d2i_X509_bio _ %+ BORINGSSL_PREFIX %+ _d2i_X509_bio %xdefine _d2i_X509_fp _ %+ BORINGSSL_PREFIX %+ _d2i_X509_fp %xdefine _dh_asn1_meth _ %+ BORINGSSL_PREFIX %+ _dh_asn1_meth %xdefine _dh_check_params_fast _ %+ BORINGSSL_PREFIX %+ _dh_check_params_fast %xdefine _dh_compute_key_padded_no_self_test _ %+ BORINGSSL_PREFIX %+ _dh_compute_key_padded_no_self_test %xdefine _dh_pkey_meth _ %+ BORINGSSL_PREFIX %+ _dh_pkey_meth %xdefine _dsa_asn1_meth _ %+ BORINGSSL_PREFIX %+ _dsa_asn1_meth %xdefine _dsa_check_key _ %+ BORINGSSL_PREFIX %+ _dsa_check_key %xdefine _ec_GFp_mont_add _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_add %xdefine _ec_GFp_mont_dbl _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_dbl %xdefine _ec_GFp_mont_felem_exp _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_exp %xdefine _ec_GFp_mont_felem_from_bytes _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_from_bytes %xdefine _ec_GFp_mont_felem_mul _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_mul %xdefine _ec_GFp_mont_felem_reduce _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_reduce %xdefine _ec_GFp_mont_felem_sqr _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_sqr %xdefine _ec_GFp_mont_felem_to_bytes _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_to_bytes %xdefine _ec_GFp_mont_init_precomp _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_init_precomp %xdefine _ec_GFp_mont_mul _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_mul %xdefine _ec_GFp_mont_mul_base _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_base %xdefine _ec_GFp_mont_mul_batch _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_batch %xdefine _ec_GFp_mont_mul_precomp _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_precomp %xdefine _ec_GFp_mont_mul_public_batch _ %+ BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_public_batch %xdefine _ec_GFp_nistp_recode_scalar_bits _ %+ BORINGSSL_PREFIX %+ _ec_GFp_nistp_recode_scalar_bits %xdefine _ec_GFp_simple_cmp_x_coordinate _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_cmp_x_coordinate %xdefine _ec_GFp_simple_felem_from_bytes _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_felem_from_bytes %xdefine _ec_GFp_simple_felem_to_bytes _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_felem_to_bytes %xdefine _ec_GFp_simple_group_get_curve _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_group_get_curve %xdefine _ec_GFp_simple_group_set_curve _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_group_set_curve %xdefine _ec_GFp_simple_invert _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_invert %xdefine _ec_GFp_simple_is_at_infinity _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_is_at_infinity %xdefine _ec_GFp_simple_is_on_curve _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_is_on_curve %xdefine _ec_GFp_simple_point_copy _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_point_copy %xdefine _ec_GFp_simple_point_init _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_point_init %xdefine _ec_GFp_simple_point_set_to_infinity _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_point_set_to_infinity %xdefine _ec_GFp_simple_points_equal _ %+ BORINGSSL_PREFIX %+ _ec_GFp_simple_points_equal %xdefine _ec_affine_jacobian_equal _ %+ BORINGSSL_PREFIX %+ _ec_affine_jacobian_equal %xdefine _ec_affine_select _ %+ BORINGSSL_PREFIX %+ _ec_affine_select %xdefine _ec_affine_to_jacobian _ %+ BORINGSSL_PREFIX %+ _ec_affine_to_jacobian %xdefine _ec_asn1_meth _ %+ BORINGSSL_PREFIX %+ _ec_asn1_meth %xdefine _ec_bignum_to_felem _ %+ BORINGSSL_PREFIX %+ _ec_bignum_to_felem %xdefine _ec_bignum_to_scalar _ %+ BORINGSSL_PREFIX %+ _ec_bignum_to_scalar %xdefine _ec_cmp_x_coordinate _ %+ BORINGSSL_PREFIX %+ _ec_cmp_x_coordinate %xdefine _ec_compute_wNAF _ %+ BORINGSSL_PREFIX %+ _ec_compute_wNAF %xdefine _ec_felem_add _ %+ BORINGSSL_PREFIX %+ _ec_felem_add %xdefine _ec_felem_equal _ %+ BORINGSSL_PREFIX %+ _ec_felem_equal %xdefine _ec_felem_from_bytes _ %+ BORINGSSL_PREFIX %+ _ec_felem_from_bytes %xdefine _ec_felem_neg _ %+ BORINGSSL_PREFIX %+ _ec_felem_neg %xdefine _ec_felem_non_zero_mask _ %+ BORINGSSL_PREFIX %+ _ec_felem_non_zero_mask %xdefine _ec_felem_one _ %+ BORINGSSL_PREFIX %+ _ec_felem_one %xdefine _ec_felem_select _ %+ BORINGSSL_PREFIX %+ _ec_felem_select %xdefine _ec_felem_sub _ %+ BORINGSSL_PREFIX %+ _ec_felem_sub %xdefine _ec_felem_to_bignum _ %+ BORINGSSL_PREFIX %+ _ec_felem_to_bignum %xdefine _ec_felem_to_bytes _ %+ BORINGSSL_PREFIX %+ _ec_felem_to_bytes %xdefine _ec_get_x_coordinate_as_bytes _ %+ BORINGSSL_PREFIX %+ _ec_get_x_coordinate_as_bytes %xdefine _ec_get_x_coordinate_as_scalar _ %+ BORINGSSL_PREFIX %+ _ec_get_x_coordinate_as_scalar %xdefine _ec_hash_to_curve_p256_xmd_sha256_sswu _ %+ BORINGSSL_PREFIX %+ _ec_hash_to_curve_p256_xmd_sha256_sswu %xdefine _ec_hash_to_curve_p384_xmd_sha384_sswu _ %+ BORINGSSL_PREFIX %+ _ec_hash_to_curve_p384_xmd_sha384_sswu %xdefine _ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 _ %+ BORINGSSL_PREFIX %+ _ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 %xdefine _ec_hash_to_scalar_p384_xmd_sha384 _ %+ BORINGSSL_PREFIX %+ _ec_hash_to_scalar_p384_xmd_sha384 %xdefine _ec_hash_to_scalar_p384_xmd_sha512_draft07 _ %+ BORINGSSL_PREFIX %+ _ec_hash_to_scalar_p384_xmd_sha512_draft07 %xdefine _ec_init_precomp _ %+ BORINGSSL_PREFIX %+ _ec_init_precomp %xdefine _ec_jacobian_to_affine _ %+ BORINGSSL_PREFIX %+ _ec_jacobian_to_affine %xdefine _ec_jacobian_to_affine_batch _ %+ BORINGSSL_PREFIX %+ _ec_jacobian_to_affine_batch %xdefine _ec_pkey_meth _ %+ BORINGSSL_PREFIX %+ _ec_pkey_meth %xdefine _ec_point_byte_len _ %+ BORINGSSL_PREFIX %+ _ec_point_byte_len %xdefine _ec_point_from_uncompressed _ %+ BORINGSSL_PREFIX %+ _ec_point_from_uncompressed %xdefine _ec_point_mul_no_self_test _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_no_self_test %xdefine _ec_point_mul_scalar _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar %xdefine _ec_point_mul_scalar_base _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar_base %xdefine _ec_point_mul_scalar_batch _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar_batch %xdefine _ec_point_mul_scalar_precomp _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar_precomp %xdefine _ec_point_mul_scalar_public _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar_public %xdefine _ec_point_mul_scalar_public_batch _ %+ BORINGSSL_PREFIX %+ _ec_point_mul_scalar_public_batch %xdefine _ec_point_select _ %+ BORINGSSL_PREFIX %+ _ec_point_select %xdefine _ec_point_set_affine_coordinates _ %+ BORINGSSL_PREFIX %+ _ec_point_set_affine_coordinates %xdefine _ec_point_to_bytes _ %+ BORINGSSL_PREFIX %+ _ec_point_to_bytes %xdefine _ec_precomp_select _ %+ BORINGSSL_PREFIX %+ _ec_precomp_select %xdefine _ec_random_nonzero_scalar _ %+ BORINGSSL_PREFIX %+ _ec_random_nonzero_scalar %xdefine _ec_random_scalar _ %+ BORINGSSL_PREFIX %+ _ec_random_scalar %xdefine _ec_scalar_add _ %+ BORINGSSL_PREFIX %+ _ec_scalar_add %xdefine _ec_scalar_equal_vartime _ %+ BORINGSSL_PREFIX %+ _ec_scalar_equal_vartime %xdefine _ec_scalar_from_bytes _ %+ BORINGSSL_PREFIX %+ _ec_scalar_from_bytes %xdefine _ec_scalar_from_montgomery _ %+ BORINGSSL_PREFIX %+ _ec_scalar_from_montgomery %xdefine _ec_scalar_inv0_montgomery _ %+ BORINGSSL_PREFIX %+ _ec_scalar_inv0_montgomery %xdefine _ec_scalar_is_zero _ %+ BORINGSSL_PREFIX %+ _ec_scalar_is_zero %xdefine _ec_scalar_mul_montgomery _ %+ BORINGSSL_PREFIX %+ _ec_scalar_mul_montgomery %xdefine _ec_scalar_neg _ %+ BORINGSSL_PREFIX %+ _ec_scalar_neg %xdefine _ec_scalar_reduce _ %+ BORINGSSL_PREFIX %+ _ec_scalar_reduce %xdefine _ec_scalar_select _ %+ BORINGSSL_PREFIX %+ _ec_scalar_select %xdefine _ec_scalar_sub _ %+ BORINGSSL_PREFIX %+ _ec_scalar_sub %xdefine _ec_scalar_to_bytes _ %+ BORINGSSL_PREFIX %+ _ec_scalar_to_bytes %xdefine _ec_scalar_to_montgomery _ %+ BORINGSSL_PREFIX %+ _ec_scalar_to_montgomery %xdefine _ec_scalar_to_montgomery_inv_vartime _ %+ BORINGSSL_PREFIX %+ _ec_scalar_to_montgomery_inv_vartime %xdefine _ec_set_to_safe_point _ %+ BORINGSSL_PREFIX %+ _ec_set_to_safe_point %xdefine _ec_simple_scalar_inv0_montgomery _ %+ BORINGSSL_PREFIX %+ _ec_simple_scalar_inv0_montgomery %xdefine _ec_simple_scalar_to_montgomery_inv_vartime _ %+ BORINGSSL_PREFIX %+ _ec_simple_scalar_to_montgomery_inv_vartime %xdefine _ecdsa_sign_fixed _ %+ BORINGSSL_PREFIX %+ _ecdsa_sign_fixed %xdefine _ecdsa_sign_fixed_with_nonce_for_known_answer_test _ %+ BORINGSSL_PREFIX %+ _ecdsa_sign_fixed_with_nonce_for_known_answer_test %xdefine _ecdsa_verify_fixed _ %+ BORINGSSL_PREFIX %+ _ecdsa_verify_fixed %xdefine _ecdsa_verify_fixed_no_self_test _ %+ BORINGSSL_PREFIX %+ _ecdsa_verify_fixed_no_self_test %xdefine _ecp_nistz256_div_by_2 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_div_by_2 %xdefine _ecp_nistz256_mul_by_2 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_mul_by_2 %xdefine _ecp_nistz256_mul_by_3 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_mul_by_3 %xdefine _ecp_nistz256_mul_mont _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont %xdefine _ecp_nistz256_mul_mont_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont_adx %xdefine _ecp_nistz256_mul_mont_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont_nohw %xdefine _ecp_nistz256_neg _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_neg %xdefine _ecp_nistz256_ord_mul_mont _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont %xdefine _ecp_nistz256_ord_mul_mont_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont_adx %xdefine _ecp_nistz256_ord_mul_mont_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont_nohw %xdefine _ecp_nistz256_ord_sqr_mont _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont %xdefine _ecp_nistz256_ord_sqr_mont_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont_adx %xdefine _ecp_nistz256_ord_sqr_mont_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont_nohw %xdefine _ecp_nistz256_point_add _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add %xdefine _ecp_nistz256_point_add_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_adx %xdefine _ecp_nistz256_point_add_affine _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine %xdefine _ecp_nistz256_point_add_affine_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine_adx %xdefine _ecp_nistz256_point_add_affine_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine_nohw %xdefine _ecp_nistz256_point_add_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_nohw %xdefine _ecp_nistz256_point_double _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_double %xdefine _ecp_nistz256_point_double_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_double_adx %xdefine _ecp_nistz256_point_double_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_point_double_nohw %xdefine _ecp_nistz256_select_w5 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5 %xdefine _ecp_nistz256_select_w5_avx2 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5_avx2 %xdefine _ecp_nistz256_select_w5_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5_nohw %xdefine _ecp_nistz256_select_w7 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7 %xdefine _ecp_nistz256_select_w7_avx2 _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7_avx2 %xdefine _ecp_nistz256_select_w7_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7_nohw %xdefine _ecp_nistz256_sqr_mont _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont %xdefine _ecp_nistz256_sqr_mont_adx _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont_adx %xdefine _ecp_nistz256_sqr_mont_nohw _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont_nohw %xdefine _ecp_nistz256_sub _ %+ BORINGSSL_PREFIX %+ _ecp_nistz256_sub %xdefine _ed25519_asn1_meth _ %+ BORINGSSL_PREFIX %+ _ed25519_asn1_meth %xdefine _ed25519_pkey_meth _ %+ BORINGSSL_PREFIX %+ _ed25519_pkey_meth %xdefine _evp_pkey_set_method _ %+ BORINGSSL_PREFIX %+ _evp_pkey_set_method %xdefine _fiat_curve25519_adx_mul _ %+ BORINGSSL_PREFIX %+ _fiat_curve25519_adx_mul %xdefine _fiat_curve25519_adx_square _ %+ BORINGSSL_PREFIX %+ _fiat_curve25519_adx_square %xdefine _fiat_p256_adx_mul _ %+ BORINGSSL_PREFIX %+ _fiat_p256_adx_mul %xdefine _fiat_p256_adx_sqr _ %+ BORINGSSL_PREFIX %+ _fiat_p256_adx_sqr %xdefine _gcm_ghash_avx _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_avx %xdefine _gcm_ghash_clmul _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_clmul %xdefine _gcm_ghash_neon _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_neon %xdefine _gcm_ghash_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_nohw %xdefine _gcm_ghash_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_ssse3 %xdefine _gcm_ghash_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_v8 %xdefine _gcm_ghash_vpclmulqdq_avx10_512 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_512 %xdefine _gcm_ghash_vpclmulqdq_avx2 _ %+ BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx2 %xdefine _gcm_gmult_avx _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_avx %xdefine _gcm_gmult_clmul _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_clmul %xdefine _gcm_gmult_neon _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_neon %xdefine _gcm_gmult_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_nohw %xdefine _gcm_gmult_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_ssse3 %xdefine _gcm_gmult_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_v8 %xdefine _gcm_gmult_vpclmulqdq_avx10 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx10 %xdefine _gcm_gmult_vpclmulqdq_avx2 _ %+ BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx2 %xdefine _gcm_init_avx _ %+ BORINGSSL_PREFIX %+ _gcm_init_avx %xdefine _gcm_init_clmul _ %+ BORINGSSL_PREFIX %+ _gcm_init_clmul %xdefine _gcm_init_neon _ %+ BORINGSSL_PREFIX %+ _gcm_init_neon %xdefine _gcm_init_nohw _ %+ BORINGSSL_PREFIX %+ _gcm_init_nohw %xdefine _gcm_init_ssse3 _ %+ BORINGSSL_PREFIX %+ _gcm_init_ssse3 %xdefine _gcm_init_v8 _ %+ BORINGSSL_PREFIX %+ _gcm_init_v8 %xdefine _gcm_init_vpclmulqdq_avx10_512 _ %+ BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx10_512 %xdefine _gcm_init_vpclmulqdq_avx2 _ %+ BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx2 %xdefine _gcm_neon_capable _ %+ BORINGSSL_PREFIX %+ _gcm_neon_capable %xdefine _gcm_pmull_capable _ %+ BORINGSSL_PREFIX %+ _gcm_pmull_capable %xdefine _have_fast_rdrand _ %+ BORINGSSL_PREFIX %+ _have_fast_rdrand %xdefine _have_rdrand _ %+ BORINGSSL_PREFIX %+ _have_rdrand %xdefine _hkdf_pkey_meth _ %+ BORINGSSL_PREFIX %+ _hkdf_pkey_meth %xdefine _hwaes_capable _ %+ BORINGSSL_PREFIX %+ _hwaes_capable %xdefine _i2a_ASN1_ENUMERATED _ %+ BORINGSSL_PREFIX %+ _i2a_ASN1_ENUMERATED %xdefine _i2a_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _i2a_ASN1_INTEGER %xdefine _i2a_ASN1_OBJECT _ %+ BORINGSSL_PREFIX %+ _i2a_ASN1_OBJECT %xdefine _i2a_ASN1_STRING _ %+ BORINGSSL_PREFIX %+ _i2a_ASN1_STRING %xdefine _i2c_ASN1_BIT_STRING _ %+ BORINGSSL_PREFIX %+ _i2c_ASN1_BIT_STRING %xdefine _i2c_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _i2c_ASN1_INTEGER %xdefine _i2d_ASN1_BIT_STRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_BIT_STRING %xdefine _i2d_ASN1_BMPSTRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_BMPSTRING %xdefine _i2d_ASN1_BOOLEAN _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_BOOLEAN %xdefine _i2d_ASN1_ENUMERATED _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_ENUMERATED %xdefine _i2d_ASN1_GENERALIZEDTIME _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_GENERALIZEDTIME %xdefine _i2d_ASN1_GENERALSTRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_GENERALSTRING %xdefine _i2d_ASN1_IA5STRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_IA5STRING %xdefine _i2d_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_INTEGER %xdefine _i2d_ASN1_NULL _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_NULL %xdefine _i2d_ASN1_OBJECT _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_OBJECT %xdefine _i2d_ASN1_OCTET_STRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_OCTET_STRING %xdefine _i2d_ASN1_PRINTABLE _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_PRINTABLE %xdefine _i2d_ASN1_PRINTABLESTRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_PRINTABLESTRING %xdefine _i2d_ASN1_SEQUENCE_ANY _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_SEQUENCE_ANY %xdefine _i2d_ASN1_SET_ANY _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_SET_ANY %xdefine _i2d_ASN1_T61STRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_T61STRING %xdefine _i2d_ASN1_TIME _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_TIME %xdefine _i2d_ASN1_TYPE _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_TYPE %xdefine _i2d_ASN1_UNIVERSALSTRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_UNIVERSALSTRING %xdefine _i2d_ASN1_UTCTIME _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_UTCTIME %xdefine _i2d_ASN1_UTF8STRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_UTF8STRING %xdefine _i2d_ASN1_VISIBLESTRING _ %+ BORINGSSL_PREFIX %+ _i2d_ASN1_VISIBLESTRING %xdefine _i2d_AUTHORITY_INFO_ACCESS _ %+ BORINGSSL_PREFIX %+ _i2d_AUTHORITY_INFO_ACCESS %xdefine _i2d_AUTHORITY_KEYID _ %+ BORINGSSL_PREFIX %+ _i2d_AUTHORITY_KEYID %xdefine _i2d_BASIC_CONSTRAINTS _ %+ BORINGSSL_PREFIX %+ _i2d_BASIC_CONSTRAINTS %xdefine _i2d_CERTIFICATEPOLICIES _ %+ BORINGSSL_PREFIX %+ _i2d_CERTIFICATEPOLICIES %xdefine _i2d_CRL_DIST_POINTS _ %+ BORINGSSL_PREFIX %+ _i2d_CRL_DIST_POINTS %xdefine _i2d_DHparams _ %+ BORINGSSL_PREFIX %+ _i2d_DHparams %xdefine _i2d_DHparams_bio _ %+ BORINGSSL_PREFIX %+ _i2d_DHparams_bio %xdefine _i2d_DIRECTORYSTRING _ %+ BORINGSSL_PREFIX %+ _i2d_DIRECTORYSTRING %xdefine _i2d_DISPLAYTEXT _ %+ BORINGSSL_PREFIX %+ _i2d_DISPLAYTEXT %xdefine _i2d_DSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey %xdefine _i2d_DSAPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey_bio %xdefine _i2d_DSAPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey_fp %xdefine _i2d_DSAPublicKey _ %+ BORINGSSL_PREFIX %+ _i2d_DSAPublicKey %xdefine _i2d_DSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY %xdefine _i2d_DSA_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY_bio %xdefine _i2d_DSA_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY_fp %xdefine _i2d_DSA_SIG _ %+ BORINGSSL_PREFIX %+ _i2d_DSA_SIG %xdefine _i2d_DSAparams _ %+ BORINGSSL_PREFIX %+ _i2d_DSAparams %xdefine _i2d_ECDSA_SIG _ %+ BORINGSSL_PREFIX %+ _i2d_ECDSA_SIG %xdefine _i2d_ECPKParameters _ %+ BORINGSSL_PREFIX %+ _i2d_ECPKParameters %xdefine _i2d_ECParameters _ %+ BORINGSSL_PREFIX %+ _i2d_ECParameters %xdefine _i2d_ECPrivateKey _ %+ BORINGSSL_PREFIX %+ _i2d_ECPrivateKey %xdefine _i2d_ECPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_ECPrivateKey_bio %xdefine _i2d_ECPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_ECPrivateKey_fp %xdefine _i2d_EC_PUBKEY _ %+ BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY %xdefine _i2d_EC_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY_bio %xdefine _i2d_EC_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY_fp %xdefine _i2d_EXTENDED_KEY_USAGE _ %+ BORINGSSL_PREFIX %+ _i2d_EXTENDED_KEY_USAGE %xdefine _i2d_GENERAL_NAME _ %+ BORINGSSL_PREFIX %+ _i2d_GENERAL_NAME %xdefine _i2d_GENERAL_NAMES _ %+ BORINGSSL_PREFIX %+ _i2d_GENERAL_NAMES %xdefine _i2d_ISSUING_DIST_POINT _ %+ BORINGSSL_PREFIX %+ _i2d_ISSUING_DIST_POINT %xdefine _i2d_NETSCAPE_SPKAC _ %+ BORINGSSL_PREFIX %+ _i2d_NETSCAPE_SPKAC %xdefine _i2d_NETSCAPE_SPKI _ %+ BORINGSSL_PREFIX %+ _i2d_NETSCAPE_SPKI %xdefine _i2d_PKCS12 _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS12 %xdefine _i2d_PKCS12_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS12_bio %xdefine _i2d_PKCS12_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS12_fp %xdefine _i2d_PKCS7 _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS7 %xdefine _i2d_PKCS7_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS7_bio %xdefine _i2d_PKCS8PrivateKeyInfo_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKeyInfo_bio %xdefine _i2d_PKCS8PrivateKeyInfo_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKeyInfo_fp %xdefine _i2d_PKCS8PrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_bio %xdefine _i2d_PKCS8PrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_fp %xdefine _i2d_PKCS8PrivateKey_nid_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_nid_bio %xdefine _i2d_PKCS8PrivateKey_nid_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_nid_fp %xdefine _i2d_PKCS8_PRIV_KEY_INFO _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO %xdefine _i2d_PKCS8_PRIV_KEY_INFO_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO_bio %xdefine _i2d_PKCS8_PRIV_KEY_INFO_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO_fp %xdefine _i2d_PKCS8_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8_bio %xdefine _i2d_PKCS8_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PKCS8_fp %xdefine _i2d_PUBKEY _ %+ BORINGSSL_PREFIX %+ _i2d_PUBKEY %xdefine _i2d_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PUBKEY_bio %xdefine _i2d_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PUBKEY_fp %xdefine _i2d_PrivateKey _ %+ BORINGSSL_PREFIX %+ _i2d_PrivateKey %xdefine _i2d_PrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_PrivateKey_bio %xdefine _i2d_PrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_PrivateKey_fp %xdefine _i2d_PublicKey _ %+ BORINGSSL_PREFIX %+ _i2d_PublicKey %xdefine _i2d_RSAPrivateKey _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey %xdefine _i2d_RSAPrivateKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey_bio %xdefine _i2d_RSAPrivateKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey_fp %xdefine _i2d_RSAPublicKey _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPublicKey %xdefine _i2d_RSAPublicKey_bio _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPublicKey_bio %xdefine _i2d_RSAPublicKey_fp _ %+ BORINGSSL_PREFIX %+ _i2d_RSAPublicKey_fp %xdefine _i2d_RSA_PSS_PARAMS _ %+ BORINGSSL_PREFIX %+ _i2d_RSA_PSS_PARAMS %xdefine _i2d_RSA_PUBKEY _ %+ BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY %xdefine _i2d_RSA_PUBKEY_bio _ %+ BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY_bio %xdefine _i2d_RSA_PUBKEY_fp _ %+ BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY_fp %xdefine _i2d_SSL_SESSION _ %+ BORINGSSL_PREFIX %+ _i2d_SSL_SESSION %xdefine _i2d_SSL_SESSION_bio _ %+ BORINGSSL_PREFIX %+ _i2d_SSL_SESSION_bio %xdefine _i2d_X509 _ %+ BORINGSSL_PREFIX %+ _i2d_X509 %xdefine _i2d_X509_ALGOR _ %+ BORINGSSL_PREFIX %+ _i2d_X509_ALGOR %xdefine _i2d_X509_ATTRIBUTE _ %+ BORINGSSL_PREFIX %+ _i2d_X509_ATTRIBUTE %xdefine _i2d_X509_AUX _ %+ BORINGSSL_PREFIX %+ _i2d_X509_AUX %xdefine _i2d_X509_CERT_AUX _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CERT_AUX %xdefine _i2d_X509_CINF _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CINF %xdefine _i2d_X509_CRL _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CRL %xdefine _i2d_X509_CRL_INFO _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CRL_INFO %xdefine _i2d_X509_CRL_bio _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CRL_bio %xdefine _i2d_X509_CRL_fp _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CRL_fp %xdefine _i2d_X509_CRL_tbs _ %+ BORINGSSL_PREFIX %+ _i2d_X509_CRL_tbs %xdefine _i2d_X509_EXTENSION _ %+ BORINGSSL_PREFIX %+ _i2d_X509_EXTENSION %xdefine _i2d_X509_EXTENSIONS _ %+ BORINGSSL_PREFIX %+ _i2d_X509_EXTENSIONS %xdefine _i2d_X509_NAME _ %+ BORINGSSL_PREFIX %+ _i2d_X509_NAME %xdefine _i2d_X509_PUBKEY _ %+ BORINGSSL_PREFIX %+ _i2d_X509_PUBKEY %xdefine _i2d_X509_REQ _ %+ BORINGSSL_PREFIX %+ _i2d_X509_REQ %xdefine _i2d_X509_REQ_INFO _ %+ BORINGSSL_PREFIX %+ _i2d_X509_REQ_INFO %xdefine _i2d_X509_REQ_bio _ %+ BORINGSSL_PREFIX %+ _i2d_X509_REQ_bio %xdefine _i2d_X509_REQ_fp _ %+ BORINGSSL_PREFIX %+ _i2d_X509_REQ_fp %xdefine _i2d_X509_REVOKED _ %+ BORINGSSL_PREFIX %+ _i2d_X509_REVOKED %xdefine _i2d_X509_SIG _ %+ BORINGSSL_PREFIX %+ _i2d_X509_SIG %xdefine _i2d_X509_VAL _ %+ BORINGSSL_PREFIX %+ _i2d_X509_VAL %xdefine _i2d_X509_bio _ %+ BORINGSSL_PREFIX %+ _i2d_X509_bio %xdefine _i2d_X509_fp _ %+ BORINGSSL_PREFIX %+ _i2d_X509_fp %xdefine _i2d_X509_tbs _ %+ BORINGSSL_PREFIX %+ _i2d_X509_tbs %xdefine _i2d_re_X509_CRL_tbs _ %+ BORINGSSL_PREFIX %+ _i2d_re_X509_CRL_tbs %xdefine _i2d_re_X509_REQ_tbs _ %+ BORINGSSL_PREFIX %+ _i2d_re_X509_REQ_tbs %xdefine _i2d_re_X509_tbs _ %+ BORINGSSL_PREFIX %+ _i2d_re_X509_tbs %xdefine _i2o_ECPublicKey _ %+ BORINGSSL_PREFIX %+ _i2o_ECPublicKey %xdefine _i2s_ASN1_ENUMERATED _ %+ BORINGSSL_PREFIX %+ _i2s_ASN1_ENUMERATED %xdefine _i2s_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _i2s_ASN1_INTEGER %xdefine _i2s_ASN1_OCTET_STRING _ %+ BORINGSSL_PREFIX %+ _i2s_ASN1_OCTET_STRING %xdefine _i2t_ASN1_OBJECT _ %+ BORINGSSL_PREFIX %+ _i2t_ASN1_OBJECT %xdefine _i2v_GENERAL_NAME _ %+ BORINGSSL_PREFIX %+ _i2v_GENERAL_NAME %xdefine _i2v_GENERAL_NAMES _ %+ BORINGSSL_PREFIX %+ _i2v_GENERAL_NAMES %xdefine _k25519Precomp _ %+ BORINGSSL_PREFIX %+ _k25519Precomp %xdefine _kBoringSSLRSASqrtTwo _ %+ BORINGSSL_PREFIX %+ _kBoringSSLRSASqrtTwo %xdefine _kBoringSSLRSASqrtTwoLen _ %+ BORINGSSL_PREFIX %+ _kBoringSSLRSASqrtTwoLen %xdefine _kOpenSSLReasonStringData _ %+ BORINGSSL_PREFIX %+ _kOpenSSLReasonStringData %xdefine _kOpenSSLReasonValues _ %+ BORINGSSL_PREFIX %+ _kOpenSSLReasonValues %xdefine _kOpenSSLReasonValuesLen _ %+ BORINGSSL_PREFIX %+ _kOpenSSLReasonValuesLen %xdefine _lh_CONF_SECTION_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_cmp_func %xdefine _lh_CONF_SECTION_call_doall_arg _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_doall_arg %xdefine _lh_CONF_SECTION_call_hash_func _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_hash_func %xdefine _lh_CONF_SECTION_doall_arg _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_doall_arg %xdefine _lh_CONF_SECTION_free _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_free %xdefine _lh_CONF_SECTION_insert _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_insert %xdefine _lh_CONF_SECTION_new _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_new %xdefine _lh_CONF_SECTION_retrieve _ %+ BORINGSSL_PREFIX %+ _lh_CONF_SECTION_retrieve %xdefine _lh_CONF_VALUE_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_cmp_func %xdefine _lh_CONF_VALUE_call_doall_arg _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_doall_arg %xdefine _lh_CONF_VALUE_call_hash_func _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_hash_func %xdefine _lh_CONF_VALUE_doall_arg _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_doall_arg %xdefine _lh_CONF_VALUE_free _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_free %xdefine _lh_CONF_VALUE_insert _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_insert %xdefine _lh_CONF_VALUE_new _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_new %xdefine _lh_CONF_VALUE_retrieve _ %+ BORINGSSL_PREFIX %+ _lh_CONF_VALUE_retrieve %xdefine _lh_CRYPTO_BUFFER_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_call_cmp_func %xdefine _lh_CRYPTO_BUFFER_call_hash_func _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_call_hash_func %xdefine _lh_CRYPTO_BUFFER_delete _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_delete %xdefine _lh_CRYPTO_BUFFER_free _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_free %xdefine _lh_CRYPTO_BUFFER_insert _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_insert %xdefine _lh_CRYPTO_BUFFER_new _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_new %xdefine _lh_CRYPTO_BUFFER_num_items _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_num_items %xdefine _lh_CRYPTO_BUFFER_retrieve _ %+ BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_retrieve %xdefine _md5_block_asm_data_order _ %+ BORINGSSL_PREFIX %+ _md5_block_asm_data_order %xdefine _o2i_ECPublicKey _ %+ BORINGSSL_PREFIX %+ _o2i_ECPublicKey %xdefine _pkcs12_iterations_acceptable _ %+ BORINGSSL_PREFIX %+ _pkcs12_iterations_acceptable %xdefine _pkcs12_key_gen _ %+ BORINGSSL_PREFIX %+ _pkcs12_key_gen %xdefine _pkcs12_pbe_encrypt_init _ %+ BORINGSSL_PREFIX %+ _pkcs12_pbe_encrypt_init %xdefine _pkcs7_add_signed_data _ %+ BORINGSSL_PREFIX %+ _pkcs7_add_signed_data %xdefine _pkcs7_parse_header _ %+ BORINGSSL_PREFIX %+ _pkcs7_parse_header %xdefine _pkcs8_pbe_decrypt _ %+ BORINGSSL_PREFIX %+ _pkcs8_pbe_decrypt %xdefine _pmbtoken_exp1_blind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_blind %xdefine _pmbtoken_exp1_client_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_client_key_from_bytes %xdefine _pmbtoken_exp1_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_derive_key_from_secret %xdefine _pmbtoken_exp1_generate_key _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_generate_key %xdefine _pmbtoken_exp1_get_h_for_testing _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_get_h_for_testing %xdefine _pmbtoken_exp1_issuer_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_issuer_key_from_bytes %xdefine _pmbtoken_exp1_read _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_read %xdefine _pmbtoken_exp1_sign _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_sign %xdefine _pmbtoken_exp1_unblind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp1_unblind %xdefine _pmbtoken_exp2_blind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_blind %xdefine _pmbtoken_exp2_client_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_client_key_from_bytes %xdefine _pmbtoken_exp2_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_derive_key_from_secret %xdefine _pmbtoken_exp2_generate_key _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_generate_key %xdefine _pmbtoken_exp2_get_h_for_testing _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_get_h_for_testing %xdefine _pmbtoken_exp2_issuer_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_issuer_key_from_bytes %xdefine _pmbtoken_exp2_read _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_read %xdefine _pmbtoken_exp2_sign _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_sign %xdefine _pmbtoken_exp2_unblind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_exp2_unblind %xdefine _pmbtoken_pst1_blind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_blind %xdefine _pmbtoken_pst1_client_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_client_key_from_bytes %xdefine _pmbtoken_pst1_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_derive_key_from_secret %xdefine _pmbtoken_pst1_generate_key _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_generate_key %xdefine _pmbtoken_pst1_get_h_for_testing _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_get_h_for_testing %xdefine _pmbtoken_pst1_issuer_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_issuer_key_from_bytes %xdefine _pmbtoken_pst1_read _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_read %xdefine _pmbtoken_pst1_sign _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_sign %xdefine _pmbtoken_pst1_unblind _ %+ BORINGSSL_PREFIX %+ _pmbtoken_pst1_unblind %xdefine _poly_Rq_mul _ %+ BORINGSSL_PREFIX %+ _poly_Rq_mul %xdefine _rand_fork_unsafe_buffering_enabled _ %+ BORINGSSL_PREFIX %+ _rand_fork_unsafe_buffering_enabled %xdefine _rsa_asn1_meth _ %+ BORINGSSL_PREFIX %+ _rsa_asn1_meth %xdefine _rsa_check_public_key _ %+ BORINGSSL_PREFIX %+ _rsa_check_public_key %xdefine _rsa_default_private_transform _ %+ BORINGSSL_PREFIX %+ _rsa_default_private_transform %xdefine _rsa_default_sign_raw _ %+ BORINGSSL_PREFIX %+ _rsa_default_sign_raw %xdefine _rsa_invalidate_key _ %+ BORINGSSL_PREFIX %+ _rsa_invalidate_key %xdefine _rsa_pkey_meth _ %+ BORINGSSL_PREFIX %+ _rsa_pkey_meth %xdefine _rsa_private_transform _ %+ BORINGSSL_PREFIX %+ _rsa_private_transform %xdefine _rsa_private_transform_no_self_test _ %+ BORINGSSL_PREFIX %+ _rsa_private_transform_no_self_test %xdefine _rsa_sign_no_self_test _ %+ BORINGSSL_PREFIX %+ _rsa_sign_no_self_test %xdefine _rsa_verify_no_self_test _ %+ BORINGSSL_PREFIX %+ _rsa_verify_no_self_test %xdefine _rsa_verify_raw_no_self_test _ %+ BORINGSSL_PREFIX %+ _rsa_verify_raw_no_self_test %xdefine _rsaz_1024_gather5_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_gather5_avx2 %xdefine _rsaz_1024_mul_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_mul_avx2 %xdefine _rsaz_1024_norm2red_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_norm2red_avx2 %xdefine _rsaz_1024_red2norm_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_red2norm_avx2 %xdefine _rsaz_1024_scatter5_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_scatter5_avx2 %xdefine _rsaz_1024_sqr_avx2 _ %+ BORINGSSL_PREFIX %+ _rsaz_1024_sqr_avx2 %xdefine _rsaz_avx2_preferred _ %+ BORINGSSL_PREFIX %+ _rsaz_avx2_preferred %xdefine _s2i_ASN1_INTEGER _ %+ BORINGSSL_PREFIX %+ _s2i_ASN1_INTEGER %xdefine _s2i_ASN1_OCTET_STRING _ %+ BORINGSSL_PREFIX %+ _s2i_ASN1_OCTET_STRING %xdefine _sha1_avx2_capable _ %+ BORINGSSL_PREFIX %+ _sha1_avx2_capable %xdefine _sha1_avx_capable _ %+ BORINGSSL_PREFIX %+ _sha1_avx_capable %xdefine _sha1_block_data_order_avx _ %+ BORINGSSL_PREFIX %+ _sha1_block_data_order_avx %xdefine _sha1_block_data_order_avx2 _ %+ BORINGSSL_PREFIX %+ _sha1_block_data_order_avx2 %xdefine _sha1_block_data_order_hw _ %+ BORINGSSL_PREFIX %+ _sha1_block_data_order_hw %xdefine _sha1_block_data_order_nohw _ %+ BORINGSSL_PREFIX %+ _sha1_block_data_order_nohw %xdefine _sha1_block_data_order_ssse3 _ %+ BORINGSSL_PREFIX %+ _sha1_block_data_order_ssse3 %xdefine _sha1_hw_capable _ %+ BORINGSSL_PREFIX %+ _sha1_hw_capable %xdefine _sha1_ssse3_capable _ %+ BORINGSSL_PREFIX %+ _sha1_ssse3_capable %xdefine _sha256_avx_capable _ %+ BORINGSSL_PREFIX %+ _sha256_avx_capable %xdefine _sha256_block_data_order_avx _ %+ BORINGSSL_PREFIX %+ _sha256_block_data_order_avx %xdefine _sha256_block_data_order_hw _ %+ BORINGSSL_PREFIX %+ _sha256_block_data_order_hw %xdefine _sha256_block_data_order_nohw _ %+ BORINGSSL_PREFIX %+ _sha256_block_data_order_nohw %xdefine _sha256_block_data_order_ssse3 _ %+ BORINGSSL_PREFIX %+ _sha256_block_data_order_ssse3 %xdefine _sha256_hw_capable _ %+ BORINGSSL_PREFIX %+ _sha256_hw_capable %xdefine _sha256_ssse3_capable _ %+ BORINGSSL_PREFIX %+ _sha256_ssse3_capable %xdefine _sha512_avx_capable _ %+ BORINGSSL_PREFIX %+ _sha512_avx_capable %xdefine _sha512_block_data_order_avx _ %+ BORINGSSL_PREFIX %+ _sha512_block_data_order_avx %xdefine _sha512_block_data_order_hw _ %+ BORINGSSL_PREFIX %+ _sha512_block_data_order_hw %xdefine _sha512_block_data_order_nohw _ %+ BORINGSSL_PREFIX %+ _sha512_block_data_order_nohw %xdefine _sha512_hw_capable _ %+ BORINGSSL_PREFIX %+ _sha512_hw_capable %xdefine _sk_ACCESS_DESCRIPTION_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_call_free_func %xdefine _sk_ACCESS_DESCRIPTION_new_null _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_new_null %xdefine _sk_ACCESS_DESCRIPTION_num _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_num %xdefine _sk_ACCESS_DESCRIPTION_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_pop_free %xdefine _sk_ACCESS_DESCRIPTION_push _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_push %xdefine _sk_ACCESS_DESCRIPTION_value _ %+ BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_value %xdefine _sk_ASN1_INTEGER_num _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_num %xdefine _sk_ASN1_INTEGER_push _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_push %xdefine _sk_ASN1_INTEGER_value _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_value %xdefine _sk_ASN1_OBJECT_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_cmp_func %xdefine _sk_ASN1_OBJECT_call_copy_func _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_copy_func %xdefine _sk_ASN1_OBJECT_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_free_func %xdefine _sk_ASN1_OBJECT_deep_copy _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_deep_copy %xdefine _sk_ASN1_OBJECT_dup _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_dup %xdefine _sk_ASN1_OBJECT_find _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_find %xdefine _sk_ASN1_OBJECT_free _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_free %xdefine _sk_ASN1_OBJECT_is_sorted _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_is_sorted %xdefine _sk_ASN1_OBJECT_new_null _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_new_null %xdefine _sk_ASN1_OBJECT_num _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_num %xdefine _sk_ASN1_OBJECT_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_pop_free %xdefine _sk_ASN1_OBJECT_push _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_push %xdefine _sk_ASN1_OBJECT_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_set_cmp_func %xdefine _sk_ASN1_OBJECT_sort _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_sort %xdefine _sk_ASN1_OBJECT_value _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_value %xdefine _sk_ASN1_TYPE_num _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_num %xdefine _sk_ASN1_TYPE_push _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_push %xdefine _sk_ASN1_TYPE_value _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_value %xdefine _sk_ASN1_VALUE_free _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_free %xdefine _sk_ASN1_VALUE_new_null _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_new_null %xdefine _sk_ASN1_VALUE_num _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_num %xdefine _sk_ASN1_VALUE_pop _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_pop %xdefine _sk_ASN1_VALUE_push _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_push %xdefine _sk_ASN1_VALUE_value _ %+ BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_value %xdefine _sk_CONF_VALUE_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_call_free_func %xdefine _sk_CONF_VALUE_delete_ptr _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_delete_ptr %xdefine _sk_CONF_VALUE_free _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_free %xdefine _sk_CONF_VALUE_new_null _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_new_null %xdefine _sk_CONF_VALUE_num _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_num %xdefine _sk_CONF_VALUE_pop _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_pop %xdefine _sk_CONF_VALUE_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_pop_free %xdefine _sk_CONF_VALUE_push _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_push %xdefine _sk_CONF_VALUE_value _ %+ BORINGSSL_PREFIX %+ _sk_CONF_VALUE_value %xdefine _sk_CRYPTO_BUFFER_call_copy_func _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_call_copy_func %xdefine _sk_CRYPTO_BUFFER_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_call_free_func %xdefine _sk_CRYPTO_BUFFER_deep_copy _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_deep_copy %xdefine _sk_CRYPTO_BUFFER_new_null _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_new_null %xdefine _sk_CRYPTO_BUFFER_num _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_num %xdefine _sk_CRYPTO_BUFFER_pop _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_pop %xdefine _sk_CRYPTO_BUFFER_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_pop_free %xdefine _sk_CRYPTO_BUFFER_push _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_push %xdefine _sk_CRYPTO_BUFFER_set _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_set %xdefine _sk_CRYPTO_BUFFER_value _ %+ BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_value %xdefine _sk_DIST_POINT_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_call_free_func %xdefine _sk_DIST_POINT_new_null _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_new_null %xdefine _sk_DIST_POINT_num _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_num %xdefine _sk_DIST_POINT_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_pop_free %xdefine _sk_DIST_POINT_push _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_push %xdefine _sk_DIST_POINT_value _ %+ BORINGSSL_PREFIX %+ _sk_DIST_POINT_value %xdefine _sk_GENERAL_NAME_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_call_free_func %xdefine _sk_GENERAL_NAME_new_null _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_new_null %xdefine _sk_GENERAL_NAME_num _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_num %xdefine _sk_GENERAL_NAME_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_pop_free %xdefine _sk_GENERAL_NAME_push _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_push %xdefine _sk_GENERAL_NAME_set _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_set %xdefine _sk_GENERAL_NAME_value _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_value %xdefine _sk_GENERAL_SUBTREE_new_null _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_new_null %xdefine _sk_GENERAL_SUBTREE_num _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_num %xdefine _sk_GENERAL_SUBTREE_push _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_push %xdefine _sk_GENERAL_SUBTREE_value _ %+ BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_value %xdefine _sk_OPENSSL_STRING_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_cmp_func %xdefine _sk_OPENSSL_STRING_call_copy_func _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_copy_func %xdefine _sk_OPENSSL_STRING_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_free_func %xdefine _sk_OPENSSL_STRING_deep_copy _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_deep_copy %xdefine _sk_OPENSSL_STRING_find _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_find %xdefine _sk_OPENSSL_STRING_free _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_free %xdefine _sk_OPENSSL_STRING_new _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_new %xdefine _sk_OPENSSL_STRING_new_null _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_new_null %xdefine _sk_OPENSSL_STRING_num _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_num %xdefine _sk_OPENSSL_STRING_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_pop_free %xdefine _sk_OPENSSL_STRING_push _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_push %xdefine _sk_OPENSSL_STRING_sort _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_sort %xdefine _sk_OPENSSL_STRING_value _ %+ BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_value %xdefine _sk_POLICYINFO_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_call_cmp_func %xdefine _sk_POLICYINFO_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_call_free_func %xdefine _sk_POLICYINFO_find _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_find %xdefine _sk_POLICYINFO_is_sorted _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_is_sorted %xdefine _sk_POLICYINFO_new_null _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_new_null %xdefine _sk_POLICYINFO_num _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_num %xdefine _sk_POLICYINFO_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_pop_free %xdefine _sk_POLICYINFO_push _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_push %xdefine _sk_POLICYINFO_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_set_cmp_func %xdefine _sk_POLICYINFO_sort _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_sort %xdefine _sk_POLICYINFO_value _ %+ BORINGSSL_PREFIX %+ _sk_POLICYINFO_value %xdefine _sk_POLICYQUALINFO_new_null _ %+ BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_new_null %xdefine _sk_POLICYQUALINFO_num _ %+ BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_num %xdefine _sk_POLICYQUALINFO_push _ %+ BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_push %xdefine _sk_POLICYQUALINFO_value _ %+ BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_value %xdefine _sk_POLICY_MAPPING_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_call_cmp_func %xdefine _sk_POLICY_MAPPING_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_call_free_func %xdefine _sk_POLICY_MAPPING_find _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_find %xdefine _sk_POLICY_MAPPING_is_sorted _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_is_sorted %xdefine _sk_POLICY_MAPPING_new_null _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_new_null %xdefine _sk_POLICY_MAPPING_num _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_num %xdefine _sk_POLICY_MAPPING_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_pop_free %xdefine _sk_POLICY_MAPPING_push _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_push %xdefine _sk_POLICY_MAPPING_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_set_cmp_func %xdefine _sk_POLICY_MAPPING_sort _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_sort %xdefine _sk_POLICY_MAPPING_value _ %+ BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_value %xdefine _sk_SRTP_PROTECTION_PROFILE_new_null _ %+ BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_new_null %xdefine _sk_SRTP_PROTECTION_PROFILE_num _ %+ BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_num %xdefine _sk_SRTP_PROTECTION_PROFILE_push _ %+ BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_push %xdefine _sk_SSL_CIPHER_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_call_cmp_func %xdefine _sk_SSL_CIPHER_delete _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_delete %xdefine _sk_SSL_CIPHER_dup _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_dup %xdefine _sk_SSL_CIPHER_find _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_find %xdefine _sk_SSL_CIPHER_new_null _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_new_null %xdefine _sk_SSL_CIPHER_num _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_num %xdefine _sk_SSL_CIPHER_push _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_push %xdefine _sk_SSL_CIPHER_value _ %+ BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_value %xdefine _sk_TRUST_TOKEN_PRETOKEN_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_call_free_func %xdefine _sk_TRUST_TOKEN_PRETOKEN_new_null _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_new_null %xdefine _sk_TRUST_TOKEN_PRETOKEN_num _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_num %xdefine _sk_TRUST_TOKEN_PRETOKEN_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_pop_free %xdefine _sk_TRUST_TOKEN_PRETOKEN_push _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_push %xdefine _sk_TRUST_TOKEN_PRETOKEN_value _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_value %xdefine _sk_TRUST_TOKEN_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_call_free_func %xdefine _sk_TRUST_TOKEN_new_null _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_new_null %xdefine _sk_TRUST_TOKEN_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_pop_free %xdefine _sk_TRUST_TOKEN_push _ %+ BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_push %xdefine _sk_X509_ATTRIBUTE_delete _ %+ BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_delete %xdefine _sk_X509_ATTRIBUTE_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_new_null %xdefine _sk_X509_ATTRIBUTE_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_num %xdefine _sk_X509_ATTRIBUTE_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_push %xdefine _sk_X509_ATTRIBUTE_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_value %xdefine _sk_X509_CRL_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_call_free_func %xdefine _sk_X509_CRL_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_free %xdefine _sk_X509_CRL_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_new_null %xdefine _sk_X509_CRL_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_num %xdefine _sk_X509_CRL_pop _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_pop %xdefine _sk_X509_CRL_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_pop_free %xdefine _sk_X509_CRL_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_push %xdefine _sk_X509_CRL_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_CRL_value %xdefine _sk_X509_EXTENSION_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_call_free_func %xdefine _sk_X509_EXTENSION_delete _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_delete %xdefine _sk_X509_EXTENSION_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_free %xdefine _sk_X509_EXTENSION_insert _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_insert %xdefine _sk_X509_EXTENSION_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_new_null %xdefine _sk_X509_EXTENSION_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_num %xdefine _sk_X509_EXTENSION_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_pop_free %xdefine _sk_X509_EXTENSION_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_push %xdefine _sk_X509_EXTENSION_set _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_set %xdefine _sk_X509_EXTENSION_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_value %xdefine _sk_X509_INFO_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_call_free_func %xdefine _sk_X509_INFO_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_free %xdefine _sk_X509_INFO_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_new_null %xdefine _sk_X509_INFO_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_num %xdefine _sk_X509_INFO_pop _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_pop %xdefine _sk_X509_INFO_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_pop_free %xdefine _sk_X509_INFO_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_push %xdefine _sk_X509_INFO_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_INFO_value %xdefine _sk_X509_LOOKUP_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_call_free_func %xdefine _sk_X509_LOOKUP_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_new_null %xdefine _sk_X509_LOOKUP_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_num %xdefine _sk_X509_LOOKUP_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_pop_free %xdefine _sk_X509_LOOKUP_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_push %xdefine _sk_X509_LOOKUP_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_value %xdefine _sk_X509_NAME_ENTRY_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_call_free_func %xdefine _sk_X509_NAME_ENTRY_delete _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_delete %xdefine _sk_X509_NAME_ENTRY_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_free %xdefine _sk_X509_NAME_ENTRY_insert _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_insert %xdefine _sk_X509_NAME_ENTRY_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_new_null %xdefine _sk_X509_NAME_ENTRY_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_num %xdefine _sk_X509_NAME_ENTRY_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_pop_free %xdefine _sk_X509_NAME_ENTRY_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_push %xdefine _sk_X509_NAME_ENTRY_set _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_set %xdefine _sk_X509_NAME_ENTRY_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_value %xdefine _sk_X509_NAME_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_call_cmp_func %xdefine _sk_X509_NAME_call_copy_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_call_copy_func %xdefine _sk_X509_NAME_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_call_free_func %xdefine _sk_X509_NAME_deep_copy _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_deep_copy %xdefine _sk_X509_NAME_find _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_find %xdefine _sk_X509_NAME_new _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_new %xdefine _sk_X509_NAME_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_new_null %xdefine _sk_X509_NAME_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_num %xdefine _sk_X509_NAME_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_pop_free %xdefine _sk_X509_NAME_set _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_set %xdefine _sk_X509_NAME_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_set_cmp_func %xdefine _sk_X509_NAME_sort _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_sort %xdefine _sk_X509_NAME_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_NAME_value %xdefine _sk_X509_OBJECT_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_cmp_func %xdefine _sk_X509_OBJECT_call_copy_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_copy_func %xdefine _sk_X509_OBJECT_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_free_func %xdefine _sk_X509_OBJECT_deep_copy _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_deep_copy %xdefine _sk_X509_OBJECT_find _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_find %xdefine _sk_X509_OBJECT_new _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_new %xdefine _sk_X509_OBJECT_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_num %xdefine _sk_X509_OBJECT_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_pop_free %xdefine _sk_X509_OBJECT_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_push %xdefine _sk_X509_OBJECT_sort _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_sort %xdefine _sk_X509_OBJECT_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_OBJECT_value %xdefine _sk_X509_REVOKED_call_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_call_cmp_func %xdefine _sk_X509_REVOKED_find _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_find %xdefine _sk_X509_REVOKED_is_sorted _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_is_sorted %xdefine _sk_X509_REVOKED_new _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_new %xdefine _sk_X509_REVOKED_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_num %xdefine _sk_X509_REVOKED_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_push %xdefine _sk_X509_REVOKED_set_cmp_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_set_cmp_func %xdefine _sk_X509_REVOKED_sort _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_sort %xdefine _sk_X509_REVOKED_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_REVOKED_value %xdefine _sk_X509_call_free_func _ %+ BORINGSSL_PREFIX %+ _sk_X509_call_free_func %xdefine _sk_X509_delete _ %+ BORINGSSL_PREFIX %+ _sk_X509_delete %xdefine _sk_X509_delete_ptr _ %+ BORINGSSL_PREFIX %+ _sk_X509_delete_ptr %xdefine _sk_X509_dup _ %+ BORINGSSL_PREFIX %+ _sk_X509_dup %xdefine _sk_X509_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_free %xdefine _sk_X509_new_null _ %+ BORINGSSL_PREFIX %+ _sk_X509_new_null %xdefine _sk_X509_num _ %+ BORINGSSL_PREFIX %+ _sk_X509_num %xdefine _sk_X509_pop _ %+ BORINGSSL_PREFIX %+ _sk_X509_pop %xdefine _sk_X509_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_X509_pop_free %xdefine _sk_X509_push _ %+ BORINGSSL_PREFIX %+ _sk_X509_push %xdefine _sk_X509_set _ %+ BORINGSSL_PREFIX %+ _sk_X509_set %xdefine _sk_X509_shift _ %+ BORINGSSL_PREFIX %+ _sk_X509_shift %xdefine _sk_X509_value _ %+ BORINGSSL_PREFIX %+ _sk_X509_value %xdefine _sk_free _ %+ BORINGSSL_PREFIX %+ _sk_free %xdefine _sk_new_null _ %+ BORINGSSL_PREFIX %+ _sk_new_null %xdefine _sk_num _ %+ BORINGSSL_PREFIX %+ _sk_num %xdefine _sk_pop _ %+ BORINGSSL_PREFIX %+ _sk_pop %xdefine _sk_pop_free _ %+ BORINGSSL_PREFIX %+ _sk_pop_free %xdefine _sk_pop_free_ex _ %+ BORINGSSL_PREFIX %+ _sk_pop_free_ex %xdefine _sk_push _ %+ BORINGSSL_PREFIX %+ _sk_push %xdefine _sk_value _ %+ BORINGSSL_PREFIX %+ _sk_value %xdefine _sk_void_free _ %+ BORINGSSL_PREFIX %+ _sk_void_free %xdefine _sk_void_new_null _ %+ BORINGSSL_PREFIX %+ _sk_void_new_null %xdefine _sk_void_num _ %+ BORINGSSL_PREFIX %+ _sk_void_num %xdefine _sk_void_push _ %+ BORINGSSL_PREFIX %+ _sk_void_push %xdefine _sk_void_set _ %+ BORINGSSL_PREFIX %+ _sk_void_set %xdefine _sk_void_value _ %+ BORINGSSL_PREFIX %+ _sk_void_value %xdefine _slhdsa_copy_keypair_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_copy_keypair_addr %xdefine _slhdsa_fors_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _slhdsa_fors_pk_from_sig %xdefine _slhdsa_fors_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_fors_sign %xdefine _slhdsa_fors_sk_gen _ %+ BORINGSSL_PREFIX %+ _slhdsa_fors_sk_gen %xdefine _slhdsa_fors_treehash _ %+ BORINGSSL_PREFIX %+ _slhdsa_fors_treehash %xdefine _slhdsa_get_tree_index _ %+ BORINGSSL_PREFIX %+ _slhdsa_get_tree_index %xdefine _slhdsa_ht_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_ht_sign %xdefine _slhdsa_ht_verify _ %+ BORINGSSL_PREFIX %+ _slhdsa_ht_verify %xdefine _slhdsa_set_chain_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_chain_addr %xdefine _slhdsa_set_hash_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_hash_addr %xdefine _slhdsa_set_keypair_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_keypair_addr %xdefine _slhdsa_set_layer_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_layer_addr %xdefine _slhdsa_set_tree_addr _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_tree_addr %xdefine _slhdsa_set_tree_height _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_tree_height %xdefine _slhdsa_set_tree_index _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_tree_index %xdefine _slhdsa_set_type _ %+ BORINGSSL_PREFIX %+ _slhdsa_set_type %xdefine _slhdsa_thash_f _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_f %xdefine _slhdsa_thash_h _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_h %xdefine _slhdsa_thash_hmsg _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_hmsg %xdefine _slhdsa_thash_prf _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_prf %xdefine _slhdsa_thash_prfmsg _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_prfmsg %xdefine _slhdsa_thash_tk _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_tk %xdefine _slhdsa_thash_tl _ %+ BORINGSSL_PREFIX %+ _slhdsa_thash_tl %xdefine _slhdsa_treehash _ %+ BORINGSSL_PREFIX %+ _slhdsa_treehash %xdefine _slhdsa_wots_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _slhdsa_wots_pk_from_sig %xdefine _slhdsa_wots_pk_gen _ %+ BORINGSSL_PREFIX %+ _slhdsa_wots_pk_gen %xdefine _slhdsa_wots_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_wots_sign %xdefine _slhdsa_xmss_pk_from_sig _ %+ BORINGSSL_PREFIX %+ _slhdsa_xmss_pk_from_sig %xdefine _slhdsa_xmss_sign _ %+ BORINGSSL_PREFIX %+ _slhdsa_xmss_sign %xdefine _v2i_GENERAL_NAME _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME %xdefine _v2i_GENERAL_NAMES _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAMES %xdefine _v2i_GENERAL_NAME_ex _ %+ BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME_ex %xdefine _v3_akey_id _ %+ BORINGSSL_PREFIX %+ _v3_akey_id %xdefine _v3_alt _ %+ BORINGSSL_PREFIX %+ _v3_alt %xdefine _v3_bcons _ %+ BORINGSSL_PREFIX %+ _v3_bcons %xdefine _v3_cpols _ %+ BORINGSSL_PREFIX %+ _v3_cpols %xdefine _v3_crl_invdate _ %+ BORINGSSL_PREFIX %+ _v3_crl_invdate %xdefine _v3_crl_num _ %+ BORINGSSL_PREFIX %+ _v3_crl_num %xdefine _v3_crl_reason _ %+ BORINGSSL_PREFIX %+ _v3_crl_reason %xdefine _v3_crld _ %+ BORINGSSL_PREFIX %+ _v3_crld %xdefine _v3_delta_crl _ %+ BORINGSSL_PREFIX %+ _v3_delta_crl %xdefine _v3_ext_ku _ %+ BORINGSSL_PREFIX %+ _v3_ext_ku %xdefine _v3_freshest_crl _ %+ BORINGSSL_PREFIX %+ _v3_freshest_crl %xdefine _v3_idp _ %+ BORINGSSL_PREFIX %+ _v3_idp %xdefine _v3_info _ %+ BORINGSSL_PREFIX %+ _v3_info %xdefine _v3_inhibit_anyp _ %+ BORINGSSL_PREFIX %+ _v3_inhibit_anyp %xdefine _v3_key_usage _ %+ BORINGSSL_PREFIX %+ _v3_key_usage %xdefine _v3_name_constraints _ %+ BORINGSSL_PREFIX %+ _v3_name_constraints %xdefine _v3_ns_ia5_list _ %+ BORINGSSL_PREFIX %+ _v3_ns_ia5_list %xdefine _v3_nscert _ %+ BORINGSSL_PREFIX %+ _v3_nscert %xdefine _v3_ocsp_accresp _ %+ BORINGSSL_PREFIX %+ _v3_ocsp_accresp %xdefine _v3_ocsp_nocheck _ %+ BORINGSSL_PREFIX %+ _v3_ocsp_nocheck %xdefine _v3_policy_constraints _ %+ BORINGSSL_PREFIX %+ _v3_policy_constraints %xdefine _v3_policy_mappings _ %+ BORINGSSL_PREFIX %+ _v3_policy_mappings %xdefine _v3_sinfo _ %+ BORINGSSL_PREFIX %+ _v3_sinfo %xdefine _v3_skey_id _ %+ BORINGSSL_PREFIX %+ _v3_skey_id %xdefine _voprf_exp2_blind _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_blind %xdefine _voprf_exp2_client_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_client_key_from_bytes %xdefine _voprf_exp2_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_derive_key_from_secret %xdefine _voprf_exp2_generate_key _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_generate_key %xdefine _voprf_exp2_issuer_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_issuer_key_from_bytes %xdefine _voprf_exp2_read _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_read %xdefine _voprf_exp2_sign _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_sign %xdefine _voprf_exp2_unblind _ %+ BORINGSSL_PREFIX %+ _voprf_exp2_unblind %xdefine _voprf_pst1_blind _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_blind %xdefine _voprf_pst1_client_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_client_key_from_bytes %xdefine _voprf_pst1_derive_key_from_secret _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_derive_key_from_secret %xdefine _voprf_pst1_generate_key _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_generate_key %xdefine _voprf_pst1_issuer_key_from_bytes _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_issuer_key_from_bytes %xdefine _voprf_pst1_read _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_read %xdefine _voprf_pst1_sign _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_sign %xdefine _voprf_pst1_sign_with_proof_scalar_for_testing _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_sign_with_proof_scalar_for_testing %xdefine _voprf_pst1_unblind _ %+ BORINGSSL_PREFIX %+ _voprf_pst1_unblind %xdefine _vpaes_capable _ %+ BORINGSSL_PREFIX %+ _vpaes_capable %xdefine _vpaes_cbc_encrypt _ %+ BORINGSSL_PREFIX %+ _vpaes_cbc_encrypt %xdefine _vpaes_ctr32_encrypt_blocks _ %+ BORINGSSL_PREFIX %+ _vpaes_ctr32_encrypt_blocks %xdefine _vpaes_decrypt _ %+ BORINGSSL_PREFIX %+ _vpaes_decrypt %xdefine _vpaes_decrypt_key_to_bsaes _ %+ BORINGSSL_PREFIX %+ _vpaes_decrypt_key_to_bsaes %xdefine _vpaes_encrypt _ %+ BORINGSSL_PREFIX %+ _vpaes_encrypt %xdefine _vpaes_set_decrypt_key _ %+ BORINGSSL_PREFIX %+ _vpaes_set_decrypt_key %xdefine _vpaes_set_encrypt_key _ %+ BORINGSSL_PREFIX %+ _vpaes_set_encrypt_key %xdefine _x25519_asn1_meth _ %+ BORINGSSL_PREFIX %+ _x25519_asn1_meth %xdefine _x25519_ge_add _ %+ BORINGSSL_PREFIX %+ _x25519_ge_add %xdefine _x25519_ge_frombytes_vartime _ %+ BORINGSSL_PREFIX %+ _x25519_ge_frombytes_vartime %xdefine _x25519_ge_p1p1_to_p2 _ %+ BORINGSSL_PREFIX %+ _x25519_ge_p1p1_to_p2 %xdefine _x25519_ge_p1p1_to_p3 _ %+ BORINGSSL_PREFIX %+ _x25519_ge_p1p1_to_p3 %xdefine _x25519_ge_p3_to_cached _ %+ BORINGSSL_PREFIX %+ _x25519_ge_p3_to_cached %xdefine _x25519_ge_scalarmult _ %+ BORINGSSL_PREFIX %+ _x25519_ge_scalarmult %xdefine _x25519_ge_scalarmult_base _ %+ BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_base %xdefine _x25519_ge_scalarmult_base_adx _ %+ BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_base_adx %xdefine _x25519_ge_scalarmult_small_precomp _ %+ BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_small_precomp %xdefine _x25519_ge_sub _ %+ BORINGSSL_PREFIX %+ _x25519_ge_sub %xdefine _x25519_ge_tobytes _ %+ BORINGSSL_PREFIX %+ _x25519_ge_tobytes %xdefine _x25519_pkey_meth _ %+ BORINGSSL_PREFIX %+ _x25519_pkey_meth %xdefine _x25519_sc_reduce _ %+ BORINGSSL_PREFIX %+ _x25519_sc_reduce %xdefine _x25519_scalar_mult_adx _ %+ BORINGSSL_PREFIX %+ _x25519_scalar_mult_adx %xdefine _x509V3_add_value_asn1_string _ %+ BORINGSSL_PREFIX %+ _x509V3_add_value_asn1_string %xdefine _x509_check_issued_with_callback _ %+ BORINGSSL_PREFIX %+ _x509_check_issued_with_callback %xdefine _x509_digest_sign_algorithm _ %+ BORINGSSL_PREFIX %+ _x509_digest_sign_algorithm %xdefine _x509_digest_verify_init _ %+ BORINGSSL_PREFIX %+ _x509_digest_verify_init %xdefine _x509_print_rsa_pss_params _ %+ BORINGSSL_PREFIX %+ _x509_print_rsa_pss_params %xdefine _x509_rsa_ctx_to_pss _ %+ BORINGSSL_PREFIX %+ _x509_rsa_ctx_to_pss %xdefine _x509_rsa_pss_to_ctx _ %+ BORINGSSL_PREFIX %+ _x509_rsa_pss_to_ctx %xdefine _x509v3_a2i_ipadd _ %+ BORINGSSL_PREFIX %+ _x509v3_a2i_ipadd %xdefine _x509v3_bytes_to_hex _ %+ BORINGSSL_PREFIX %+ _x509v3_bytes_to_hex %xdefine _x509v3_cache_extensions _ %+ BORINGSSL_PREFIX %+ _x509v3_cache_extensions %xdefine _x509v3_conf_name_matches _ %+ BORINGSSL_PREFIX %+ _x509v3_conf_name_matches %xdefine _x509v3_hex_to_bytes _ %+ BORINGSSL_PREFIX %+ _x509v3_hex_to_bytes %xdefine _x509v3_looks_like_dns_name _ %+ BORINGSSL_PREFIX %+ _x509v3_looks_like_dns_name %else %xdefine ACCESS_DESCRIPTION_free BORINGSSL_PREFIX %+ _ACCESS_DESCRIPTION_free %xdefine ACCESS_DESCRIPTION_new BORINGSSL_PREFIX %+ _ACCESS_DESCRIPTION_new %xdefine AES_CMAC BORINGSSL_PREFIX %+ _AES_CMAC %xdefine AES_cbc_encrypt BORINGSSL_PREFIX %+ _AES_cbc_encrypt %xdefine AES_cfb128_encrypt BORINGSSL_PREFIX %+ _AES_cfb128_encrypt %xdefine AES_ctr128_encrypt BORINGSSL_PREFIX %+ _AES_ctr128_encrypt %xdefine AES_decrypt BORINGSSL_PREFIX %+ _AES_decrypt %xdefine AES_ecb_encrypt BORINGSSL_PREFIX %+ _AES_ecb_encrypt %xdefine AES_encrypt BORINGSSL_PREFIX %+ _AES_encrypt %xdefine AES_ofb128_encrypt BORINGSSL_PREFIX %+ _AES_ofb128_encrypt %xdefine AES_set_decrypt_key BORINGSSL_PREFIX %+ _AES_set_decrypt_key %xdefine AES_set_encrypt_key BORINGSSL_PREFIX %+ _AES_set_encrypt_key %xdefine AES_unwrap_key BORINGSSL_PREFIX %+ _AES_unwrap_key %xdefine AES_unwrap_key_padded BORINGSSL_PREFIX %+ _AES_unwrap_key_padded %xdefine AES_wrap_key BORINGSSL_PREFIX %+ _AES_wrap_key %xdefine AES_wrap_key_padded BORINGSSL_PREFIX %+ _AES_wrap_key_padded %xdefine ASN1_ANY_it BORINGSSL_PREFIX %+ _ASN1_ANY_it %xdefine ASN1_BIT_STRING_check BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_check %xdefine ASN1_BIT_STRING_free BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_free %xdefine ASN1_BIT_STRING_get_bit BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_get_bit %xdefine ASN1_BIT_STRING_it BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_it %xdefine ASN1_BIT_STRING_new BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_new %xdefine ASN1_BIT_STRING_num_bytes BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_num_bytes %xdefine ASN1_BIT_STRING_set BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_set %xdefine ASN1_BIT_STRING_set_bit BORINGSSL_PREFIX %+ _ASN1_BIT_STRING_set_bit %xdefine ASN1_BMPSTRING_free BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_free %xdefine ASN1_BMPSTRING_it BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_it %xdefine ASN1_BMPSTRING_new BORINGSSL_PREFIX %+ _ASN1_BMPSTRING_new %xdefine ASN1_BOOLEAN_it BORINGSSL_PREFIX %+ _ASN1_BOOLEAN_it %xdefine ASN1_ENUMERATED_free BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_free %xdefine ASN1_ENUMERATED_get BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get %xdefine ASN1_ENUMERATED_get_int64 BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get_int64 %xdefine ASN1_ENUMERATED_get_uint64 BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_get_uint64 %xdefine ASN1_ENUMERATED_it BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_it %xdefine ASN1_ENUMERATED_new BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_new %xdefine ASN1_ENUMERATED_set BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set %xdefine ASN1_ENUMERATED_set_int64 BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set_int64 %xdefine ASN1_ENUMERATED_set_uint64 BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_set_uint64 %xdefine ASN1_ENUMERATED_to_BN BORINGSSL_PREFIX %+ _ASN1_ENUMERATED_to_BN %xdefine ASN1_FBOOLEAN_it BORINGSSL_PREFIX %+ _ASN1_FBOOLEAN_it %xdefine ASN1_GENERALIZEDTIME_adj BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_adj %xdefine ASN1_GENERALIZEDTIME_check BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_check %xdefine ASN1_GENERALIZEDTIME_free BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_free %xdefine ASN1_GENERALIZEDTIME_it BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_it %xdefine ASN1_GENERALIZEDTIME_new BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_new %xdefine ASN1_GENERALIZEDTIME_print BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_print %xdefine ASN1_GENERALIZEDTIME_set BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_set %xdefine ASN1_GENERALIZEDTIME_set_string BORINGSSL_PREFIX %+ _ASN1_GENERALIZEDTIME_set_string %xdefine ASN1_GENERALSTRING_free BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_free %xdefine ASN1_GENERALSTRING_it BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_it %xdefine ASN1_GENERALSTRING_new BORINGSSL_PREFIX %+ _ASN1_GENERALSTRING_new %xdefine ASN1_IA5STRING_free BORINGSSL_PREFIX %+ _ASN1_IA5STRING_free %xdefine ASN1_IA5STRING_it BORINGSSL_PREFIX %+ _ASN1_IA5STRING_it %xdefine ASN1_IA5STRING_new BORINGSSL_PREFIX %+ _ASN1_IA5STRING_new %xdefine ASN1_INTEGER_cmp BORINGSSL_PREFIX %+ _ASN1_INTEGER_cmp %xdefine ASN1_INTEGER_dup BORINGSSL_PREFIX %+ _ASN1_INTEGER_dup %xdefine ASN1_INTEGER_free BORINGSSL_PREFIX %+ _ASN1_INTEGER_free %xdefine ASN1_INTEGER_get BORINGSSL_PREFIX %+ _ASN1_INTEGER_get %xdefine ASN1_INTEGER_get_int64 BORINGSSL_PREFIX %+ _ASN1_INTEGER_get_int64 %xdefine ASN1_INTEGER_get_uint64 BORINGSSL_PREFIX %+ _ASN1_INTEGER_get_uint64 %xdefine ASN1_INTEGER_it BORINGSSL_PREFIX %+ _ASN1_INTEGER_it %xdefine ASN1_INTEGER_new BORINGSSL_PREFIX %+ _ASN1_INTEGER_new %xdefine ASN1_INTEGER_set BORINGSSL_PREFIX %+ _ASN1_INTEGER_set %xdefine ASN1_INTEGER_set_int64 BORINGSSL_PREFIX %+ _ASN1_INTEGER_set_int64 %xdefine ASN1_INTEGER_set_uint64 BORINGSSL_PREFIX %+ _ASN1_INTEGER_set_uint64 %xdefine ASN1_INTEGER_to_BN BORINGSSL_PREFIX %+ _ASN1_INTEGER_to_BN %xdefine ASN1_NULL_free BORINGSSL_PREFIX %+ _ASN1_NULL_free %xdefine ASN1_NULL_it BORINGSSL_PREFIX %+ _ASN1_NULL_it %xdefine ASN1_NULL_new BORINGSSL_PREFIX %+ _ASN1_NULL_new %xdefine ASN1_OBJECT_create BORINGSSL_PREFIX %+ _ASN1_OBJECT_create %xdefine ASN1_OBJECT_free BORINGSSL_PREFIX %+ _ASN1_OBJECT_free %xdefine ASN1_OBJECT_it BORINGSSL_PREFIX %+ _ASN1_OBJECT_it %xdefine ASN1_OBJECT_new BORINGSSL_PREFIX %+ _ASN1_OBJECT_new %xdefine ASN1_OCTET_STRING_cmp BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_cmp %xdefine ASN1_OCTET_STRING_dup BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_dup %xdefine ASN1_OCTET_STRING_free BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_free %xdefine ASN1_OCTET_STRING_it BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_it %xdefine ASN1_OCTET_STRING_new BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_new %xdefine ASN1_OCTET_STRING_set BORINGSSL_PREFIX %+ _ASN1_OCTET_STRING_set %xdefine ASN1_PRINTABLESTRING_free BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_free %xdefine ASN1_PRINTABLESTRING_it BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_it %xdefine ASN1_PRINTABLESTRING_new BORINGSSL_PREFIX %+ _ASN1_PRINTABLESTRING_new %xdefine ASN1_PRINTABLE_free BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_free %xdefine ASN1_PRINTABLE_it BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_it %xdefine ASN1_PRINTABLE_new BORINGSSL_PREFIX %+ _ASN1_PRINTABLE_new %xdefine ASN1_SEQUENCE_it BORINGSSL_PREFIX %+ _ASN1_SEQUENCE_it %xdefine ASN1_STRING_TABLE_add BORINGSSL_PREFIX %+ _ASN1_STRING_TABLE_add %xdefine ASN1_STRING_TABLE_cleanup BORINGSSL_PREFIX %+ _ASN1_STRING_TABLE_cleanup %xdefine ASN1_STRING_cmp BORINGSSL_PREFIX %+ _ASN1_STRING_cmp %xdefine ASN1_STRING_copy BORINGSSL_PREFIX %+ _ASN1_STRING_copy %xdefine ASN1_STRING_data BORINGSSL_PREFIX %+ _ASN1_STRING_data %xdefine ASN1_STRING_dup BORINGSSL_PREFIX %+ _ASN1_STRING_dup %xdefine ASN1_STRING_free BORINGSSL_PREFIX %+ _ASN1_STRING_free %xdefine ASN1_STRING_get0_data BORINGSSL_PREFIX %+ _ASN1_STRING_get0_data %xdefine ASN1_STRING_get_default_mask BORINGSSL_PREFIX %+ _ASN1_STRING_get_default_mask %xdefine ASN1_STRING_length BORINGSSL_PREFIX %+ _ASN1_STRING_length %xdefine ASN1_STRING_new BORINGSSL_PREFIX %+ _ASN1_STRING_new %xdefine ASN1_STRING_print BORINGSSL_PREFIX %+ _ASN1_STRING_print %xdefine ASN1_STRING_print_ex BORINGSSL_PREFIX %+ _ASN1_STRING_print_ex %xdefine ASN1_STRING_print_ex_fp BORINGSSL_PREFIX %+ _ASN1_STRING_print_ex_fp %xdefine ASN1_STRING_set BORINGSSL_PREFIX %+ _ASN1_STRING_set %xdefine ASN1_STRING_set0 BORINGSSL_PREFIX %+ _ASN1_STRING_set0 %xdefine ASN1_STRING_set_by_NID BORINGSSL_PREFIX %+ _ASN1_STRING_set_by_NID %xdefine ASN1_STRING_set_default_mask BORINGSSL_PREFIX %+ _ASN1_STRING_set_default_mask %xdefine ASN1_STRING_set_default_mask_asc BORINGSSL_PREFIX %+ _ASN1_STRING_set_default_mask_asc %xdefine ASN1_STRING_to_UTF8 BORINGSSL_PREFIX %+ _ASN1_STRING_to_UTF8 %xdefine ASN1_STRING_type BORINGSSL_PREFIX %+ _ASN1_STRING_type %xdefine ASN1_STRING_type_new BORINGSSL_PREFIX %+ _ASN1_STRING_type_new %xdefine ASN1_T61STRING_free BORINGSSL_PREFIX %+ _ASN1_T61STRING_free %xdefine ASN1_T61STRING_it BORINGSSL_PREFIX %+ _ASN1_T61STRING_it %xdefine ASN1_T61STRING_new BORINGSSL_PREFIX %+ _ASN1_T61STRING_new %xdefine ASN1_TBOOLEAN_it BORINGSSL_PREFIX %+ _ASN1_TBOOLEAN_it %xdefine ASN1_TIME_adj BORINGSSL_PREFIX %+ _ASN1_TIME_adj %xdefine ASN1_TIME_check BORINGSSL_PREFIX %+ _ASN1_TIME_check %xdefine ASN1_TIME_diff BORINGSSL_PREFIX %+ _ASN1_TIME_diff %xdefine ASN1_TIME_free BORINGSSL_PREFIX %+ _ASN1_TIME_free %xdefine ASN1_TIME_it BORINGSSL_PREFIX %+ _ASN1_TIME_it %xdefine ASN1_TIME_new BORINGSSL_PREFIX %+ _ASN1_TIME_new %xdefine ASN1_TIME_print BORINGSSL_PREFIX %+ _ASN1_TIME_print %xdefine ASN1_TIME_set BORINGSSL_PREFIX %+ _ASN1_TIME_set %xdefine ASN1_TIME_set_posix BORINGSSL_PREFIX %+ _ASN1_TIME_set_posix %xdefine ASN1_TIME_set_string BORINGSSL_PREFIX %+ _ASN1_TIME_set_string %xdefine ASN1_TIME_set_string_X509 BORINGSSL_PREFIX %+ _ASN1_TIME_set_string_X509 %xdefine ASN1_TIME_to_generalizedtime BORINGSSL_PREFIX %+ _ASN1_TIME_to_generalizedtime %xdefine ASN1_TIME_to_posix BORINGSSL_PREFIX %+ _ASN1_TIME_to_posix %xdefine ASN1_TIME_to_posix_nonstandard BORINGSSL_PREFIX %+ _ASN1_TIME_to_posix_nonstandard %xdefine ASN1_TIME_to_time_t BORINGSSL_PREFIX %+ _ASN1_TIME_to_time_t %xdefine ASN1_TYPE_cmp BORINGSSL_PREFIX %+ _ASN1_TYPE_cmp %xdefine ASN1_TYPE_free BORINGSSL_PREFIX %+ _ASN1_TYPE_free %xdefine ASN1_TYPE_get BORINGSSL_PREFIX %+ _ASN1_TYPE_get %xdefine ASN1_TYPE_new BORINGSSL_PREFIX %+ _ASN1_TYPE_new %xdefine ASN1_TYPE_set BORINGSSL_PREFIX %+ _ASN1_TYPE_set %xdefine ASN1_TYPE_set1 BORINGSSL_PREFIX %+ _ASN1_TYPE_set1 %xdefine ASN1_UNIVERSALSTRING_free BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_free %xdefine ASN1_UNIVERSALSTRING_it BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_it %xdefine ASN1_UNIVERSALSTRING_new BORINGSSL_PREFIX %+ _ASN1_UNIVERSALSTRING_new %xdefine ASN1_UTCTIME_adj BORINGSSL_PREFIX %+ _ASN1_UTCTIME_adj %xdefine ASN1_UTCTIME_check BORINGSSL_PREFIX %+ _ASN1_UTCTIME_check %xdefine ASN1_UTCTIME_free BORINGSSL_PREFIX %+ _ASN1_UTCTIME_free %xdefine ASN1_UTCTIME_it BORINGSSL_PREFIX %+ _ASN1_UTCTIME_it %xdefine ASN1_UTCTIME_new BORINGSSL_PREFIX %+ _ASN1_UTCTIME_new %xdefine ASN1_UTCTIME_print BORINGSSL_PREFIX %+ _ASN1_UTCTIME_print %xdefine ASN1_UTCTIME_set BORINGSSL_PREFIX %+ _ASN1_UTCTIME_set %xdefine ASN1_UTCTIME_set_string BORINGSSL_PREFIX %+ _ASN1_UTCTIME_set_string %xdefine ASN1_UTF8STRING_free BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_free %xdefine ASN1_UTF8STRING_it BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_it %xdefine ASN1_UTF8STRING_new BORINGSSL_PREFIX %+ _ASN1_UTF8STRING_new %xdefine ASN1_VISIBLESTRING_free BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_free %xdefine ASN1_VISIBLESTRING_it BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_it %xdefine ASN1_VISIBLESTRING_new BORINGSSL_PREFIX %+ _ASN1_VISIBLESTRING_new %xdefine ASN1_digest BORINGSSL_PREFIX %+ _ASN1_digest %xdefine ASN1_generate_v3 BORINGSSL_PREFIX %+ _ASN1_generate_v3 %xdefine ASN1_get_object BORINGSSL_PREFIX %+ _ASN1_get_object %xdefine ASN1_item_d2i BORINGSSL_PREFIX %+ _ASN1_item_d2i %xdefine ASN1_item_d2i_bio BORINGSSL_PREFIX %+ _ASN1_item_d2i_bio %xdefine ASN1_item_d2i_fp BORINGSSL_PREFIX %+ _ASN1_item_d2i_fp %xdefine ASN1_item_digest BORINGSSL_PREFIX %+ _ASN1_item_digest %xdefine ASN1_item_dup BORINGSSL_PREFIX %+ _ASN1_item_dup %xdefine ASN1_item_ex_d2i BORINGSSL_PREFIX %+ _ASN1_item_ex_d2i %xdefine ASN1_item_ex_free BORINGSSL_PREFIX %+ _ASN1_item_ex_free %xdefine ASN1_item_ex_i2d BORINGSSL_PREFIX %+ _ASN1_item_ex_i2d %xdefine ASN1_item_ex_new BORINGSSL_PREFIX %+ _ASN1_item_ex_new %xdefine ASN1_item_free BORINGSSL_PREFIX %+ _ASN1_item_free %xdefine ASN1_item_i2d BORINGSSL_PREFIX %+ _ASN1_item_i2d %xdefine ASN1_item_i2d_bio BORINGSSL_PREFIX %+ _ASN1_item_i2d_bio %xdefine ASN1_item_i2d_fp BORINGSSL_PREFIX %+ _ASN1_item_i2d_fp %xdefine ASN1_item_new BORINGSSL_PREFIX %+ _ASN1_item_new %xdefine ASN1_item_pack BORINGSSL_PREFIX %+ _ASN1_item_pack %xdefine ASN1_item_sign BORINGSSL_PREFIX %+ _ASN1_item_sign %xdefine ASN1_item_sign_ctx BORINGSSL_PREFIX %+ _ASN1_item_sign_ctx %xdefine ASN1_item_unpack BORINGSSL_PREFIX %+ _ASN1_item_unpack %xdefine ASN1_item_verify BORINGSSL_PREFIX %+ _ASN1_item_verify %xdefine ASN1_mbstring_copy BORINGSSL_PREFIX %+ _ASN1_mbstring_copy %xdefine ASN1_mbstring_ncopy BORINGSSL_PREFIX %+ _ASN1_mbstring_ncopy %xdefine ASN1_object_size BORINGSSL_PREFIX %+ _ASN1_object_size %xdefine ASN1_primitive_free BORINGSSL_PREFIX %+ _ASN1_primitive_free %xdefine ASN1_put_eoc BORINGSSL_PREFIX %+ _ASN1_put_eoc %xdefine ASN1_put_object BORINGSSL_PREFIX %+ _ASN1_put_object %xdefine ASN1_tag2bit BORINGSSL_PREFIX %+ _ASN1_tag2bit %xdefine ASN1_tag2str BORINGSSL_PREFIX %+ _ASN1_tag2str %xdefine ASN1_template_free BORINGSSL_PREFIX %+ _ASN1_template_free %xdefine AUTHORITY_INFO_ACCESS_free BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_free %xdefine AUTHORITY_INFO_ACCESS_it BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_it %xdefine AUTHORITY_INFO_ACCESS_new BORINGSSL_PREFIX %+ _AUTHORITY_INFO_ACCESS_new %xdefine AUTHORITY_KEYID_free BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_free %xdefine AUTHORITY_KEYID_it BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_it %xdefine AUTHORITY_KEYID_new BORINGSSL_PREFIX %+ _AUTHORITY_KEYID_new %xdefine BASIC_CONSTRAINTS_free BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_free %xdefine BASIC_CONSTRAINTS_it BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_it %xdefine BASIC_CONSTRAINTS_new BORINGSSL_PREFIX %+ _BASIC_CONSTRAINTS_new %xdefine BCM_fips_186_2_prf BORINGSSL_PREFIX %+ _BCM_fips_186_2_prf %xdefine BCM_mldsa65_generate_key BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key %xdefine BCM_mldsa65_generate_key_external_entropy BORINGSSL_PREFIX %+ _BCM_mldsa65_generate_key_external_entropy %xdefine BCM_mldsa65_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_private_key %xdefine BCM_mldsa65_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mldsa65_marshal_public_key %xdefine BCM_mldsa65_parse_private_key BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_private_key %xdefine BCM_mldsa65_parse_public_key BORINGSSL_PREFIX %+ _BCM_mldsa65_parse_public_key %xdefine BCM_mldsa65_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mldsa65_private_key_from_seed %xdefine BCM_mldsa65_public_from_private BORINGSSL_PREFIX %+ _BCM_mldsa65_public_from_private %xdefine BCM_mldsa65_sign BORINGSSL_PREFIX %+ _BCM_mldsa65_sign %xdefine BCM_mldsa65_sign_internal BORINGSSL_PREFIX %+ _BCM_mldsa65_sign_internal %xdefine BCM_mldsa65_verify BORINGSSL_PREFIX %+ _BCM_mldsa65_verify %xdefine BCM_mldsa65_verify_internal BORINGSSL_PREFIX %+ _BCM_mldsa65_verify_internal %xdefine BCM_mldsa87_generate_key BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key %xdefine BCM_mldsa87_generate_key_external_entropy BORINGSSL_PREFIX %+ _BCM_mldsa87_generate_key_external_entropy %xdefine BCM_mldsa87_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_private_key %xdefine BCM_mldsa87_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mldsa87_marshal_public_key %xdefine BCM_mldsa87_parse_private_key BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_private_key %xdefine BCM_mldsa87_parse_public_key BORINGSSL_PREFIX %+ _BCM_mldsa87_parse_public_key %xdefine BCM_mldsa87_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mldsa87_private_key_from_seed %xdefine BCM_mldsa87_public_from_private BORINGSSL_PREFIX %+ _BCM_mldsa87_public_from_private %xdefine BCM_mldsa87_sign BORINGSSL_PREFIX %+ _BCM_mldsa87_sign %xdefine BCM_mldsa87_sign_internal BORINGSSL_PREFIX %+ _BCM_mldsa87_sign_internal %xdefine BCM_mldsa87_verify BORINGSSL_PREFIX %+ _BCM_mldsa87_verify %xdefine BCM_mldsa87_verify_internal BORINGSSL_PREFIX %+ _BCM_mldsa87_verify_internal %xdefine BCM_mlkem1024_decap BORINGSSL_PREFIX %+ _BCM_mlkem1024_decap %xdefine BCM_mlkem1024_encap BORINGSSL_PREFIX %+ _BCM_mlkem1024_encap %xdefine BCM_mlkem1024_encap_external_entropy BORINGSSL_PREFIX %+ _BCM_mlkem1024_encap_external_entropy %xdefine BCM_mlkem1024_generate_key BORINGSSL_PREFIX %+ _BCM_mlkem1024_generate_key %xdefine BCM_mlkem1024_generate_key_external_seed BORINGSSL_PREFIX %+ _BCM_mlkem1024_generate_key_external_seed %xdefine BCM_mlkem1024_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mlkem1024_marshal_private_key %xdefine BCM_mlkem1024_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mlkem1024_marshal_public_key %xdefine BCM_mlkem1024_parse_private_key BORINGSSL_PREFIX %+ _BCM_mlkem1024_parse_private_key %xdefine BCM_mlkem1024_parse_public_key BORINGSSL_PREFIX %+ _BCM_mlkem1024_parse_public_key %xdefine BCM_mlkem1024_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mlkem1024_private_key_from_seed %xdefine BCM_mlkem1024_public_from_private BORINGSSL_PREFIX %+ _BCM_mlkem1024_public_from_private %xdefine BCM_mlkem768_decap BORINGSSL_PREFIX %+ _BCM_mlkem768_decap %xdefine BCM_mlkem768_encap BORINGSSL_PREFIX %+ _BCM_mlkem768_encap %xdefine BCM_mlkem768_encap_external_entropy BORINGSSL_PREFIX %+ _BCM_mlkem768_encap_external_entropy %xdefine BCM_mlkem768_generate_key BORINGSSL_PREFIX %+ _BCM_mlkem768_generate_key %xdefine BCM_mlkem768_generate_key_external_seed BORINGSSL_PREFIX %+ _BCM_mlkem768_generate_key_external_seed %xdefine BCM_mlkem768_marshal_private_key BORINGSSL_PREFIX %+ _BCM_mlkem768_marshal_private_key %xdefine BCM_mlkem768_marshal_public_key BORINGSSL_PREFIX %+ _BCM_mlkem768_marshal_public_key %xdefine BCM_mlkem768_parse_private_key BORINGSSL_PREFIX %+ _BCM_mlkem768_parse_private_key %xdefine BCM_mlkem768_parse_public_key BORINGSSL_PREFIX %+ _BCM_mlkem768_parse_public_key %xdefine BCM_mlkem768_private_key_from_seed BORINGSSL_PREFIX %+ _BCM_mlkem768_private_key_from_seed %xdefine BCM_mlkem768_public_from_private BORINGSSL_PREFIX %+ _BCM_mlkem768_public_from_private %xdefine BCM_rand_bytes BORINGSSL_PREFIX %+ _BCM_rand_bytes %xdefine BCM_rand_bytes_hwrng BORINGSSL_PREFIX %+ _BCM_rand_bytes_hwrng %xdefine BCM_rand_bytes_with_additional_data BORINGSSL_PREFIX %+ _BCM_rand_bytes_with_additional_data %xdefine BCM_sha1_final BORINGSSL_PREFIX %+ _BCM_sha1_final %xdefine BCM_sha1_init BORINGSSL_PREFIX %+ _BCM_sha1_init %xdefine BCM_sha1_transform BORINGSSL_PREFIX %+ _BCM_sha1_transform %xdefine BCM_sha1_update BORINGSSL_PREFIX %+ _BCM_sha1_update %xdefine BCM_sha224_final BORINGSSL_PREFIX %+ _BCM_sha224_final %xdefine BCM_sha224_init BORINGSSL_PREFIX %+ _BCM_sha224_init %xdefine BCM_sha224_update BORINGSSL_PREFIX %+ _BCM_sha224_update %xdefine BCM_sha256_final BORINGSSL_PREFIX %+ _BCM_sha256_final %xdefine BCM_sha256_init BORINGSSL_PREFIX %+ _BCM_sha256_init %xdefine BCM_sha256_transform BORINGSSL_PREFIX %+ _BCM_sha256_transform %xdefine BCM_sha256_transform_blocks BORINGSSL_PREFIX %+ _BCM_sha256_transform_blocks %xdefine BCM_sha256_update BORINGSSL_PREFIX %+ _BCM_sha256_update %xdefine BCM_sha384_final BORINGSSL_PREFIX %+ _BCM_sha384_final %xdefine BCM_sha384_init BORINGSSL_PREFIX %+ _BCM_sha384_init %xdefine BCM_sha384_update BORINGSSL_PREFIX %+ _BCM_sha384_update %xdefine BCM_sha512_256_final BORINGSSL_PREFIX %+ _BCM_sha512_256_final %xdefine BCM_sha512_256_init BORINGSSL_PREFIX %+ _BCM_sha512_256_init %xdefine BCM_sha512_256_update BORINGSSL_PREFIX %+ _BCM_sha512_256_update %xdefine BCM_sha512_final BORINGSSL_PREFIX %+ _BCM_sha512_final %xdefine BCM_sha512_init BORINGSSL_PREFIX %+ _BCM_sha512_init %xdefine BCM_sha512_transform BORINGSSL_PREFIX %+ _BCM_sha512_transform %xdefine BCM_sha512_update BORINGSSL_PREFIX %+ _BCM_sha512_update %xdefine BCM_slhdsa_sha2_128s_generate_key BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_generate_key %xdefine BCM_slhdsa_sha2_128s_generate_key_from_seed BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_generate_key_from_seed %xdefine BCM_slhdsa_sha2_128s_prehash_sign BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_prehash_sign %xdefine BCM_slhdsa_sha2_128s_prehash_verify BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_prehash_verify %xdefine BCM_slhdsa_sha2_128s_public_from_private BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_public_from_private %xdefine BCM_slhdsa_sha2_128s_sign BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_sign %xdefine BCM_slhdsa_sha2_128s_sign_internal BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_sign_internal %xdefine BCM_slhdsa_sha2_128s_verify BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_verify %xdefine BCM_slhdsa_sha2_128s_verify_internal BORINGSSL_PREFIX %+ _BCM_slhdsa_sha2_128s_verify_internal %xdefine BIO_append_filename BORINGSSL_PREFIX %+ _BIO_append_filename %xdefine BIO_callback_ctrl BORINGSSL_PREFIX %+ _BIO_callback_ctrl %xdefine BIO_clear_flags BORINGSSL_PREFIX %+ _BIO_clear_flags %xdefine BIO_clear_retry_flags BORINGSSL_PREFIX %+ _BIO_clear_retry_flags %xdefine BIO_copy_next_retry BORINGSSL_PREFIX %+ _BIO_copy_next_retry %xdefine BIO_ctrl BORINGSSL_PREFIX %+ _BIO_ctrl %xdefine BIO_ctrl_get_read_request BORINGSSL_PREFIX %+ _BIO_ctrl_get_read_request %xdefine BIO_ctrl_get_write_guarantee BORINGSSL_PREFIX %+ _BIO_ctrl_get_write_guarantee %xdefine BIO_ctrl_pending BORINGSSL_PREFIX %+ _BIO_ctrl_pending %xdefine BIO_do_connect BORINGSSL_PREFIX %+ _BIO_do_connect %xdefine BIO_eof BORINGSSL_PREFIX %+ _BIO_eof %xdefine BIO_f_ssl BORINGSSL_PREFIX %+ _BIO_f_ssl %xdefine BIO_find_type BORINGSSL_PREFIX %+ _BIO_find_type %xdefine BIO_flush BORINGSSL_PREFIX %+ _BIO_flush %xdefine BIO_free BORINGSSL_PREFIX %+ _BIO_free %xdefine BIO_free_all BORINGSSL_PREFIX %+ _BIO_free_all %xdefine BIO_get_data BORINGSSL_PREFIX %+ _BIO_get_data %xdefine BIO_get_ex_data BORINGSSL_PREFIX %+ _BIO_get_ex_data %xdefine BIO_get_ex_new_index BORINGSSL_PREFIX %+ _BIO_get_ex_new_index %xdefine BIO_get_fd BORINGSSL_PREFIX %+ _BIO_get_fd %xdefine BIO_get_fp BORINGSSL_PREFIX %+ _BIO_get_fp %xdefine BIO_get_init BORINGSSL_PREFIX %+ _BIO_get_init %xdefine BIO_get_mem_data BORINGSSL_PREFIX %+ _BIO_get_mem_data %xdefine BIO_get_mem_ptr BORINGSSL_PREFIX %+ _BIO_get_mem_ptr %xdefine BIO_get_new_index BORINGSSL_PREFIX %+ _BIO_get_new_index %xdefine BIO_get_retry_flags BORINGSSL_PREFIX %+ _BIO_get_retry_flags %xdefine BIO_get_retry_reason BORINGSSL_PREFIX %+ _BIO_get_retry_reason %xdefine BIO_get_shutdown BORINGSSL_PREFIX %+ _BIO_get_shutdown %xdefine BIO_gets BORINGSSL_PREFIX %+ _BIO_gets %xdefine BIO_hexdump BORINGSSL_PREFIX %+ _BIO_hexdump %xdefine BIO_indent BORINGSSL_PREFIX %+ _BIO_indent %xdefine BIO_int_ctrl BORINGSSL_PREFIX %+ _BIO_int_ctrl %xdefine BIO_mem_contents BORINGSSL_PREFIX %+ _BIO_mem_contents %xdefine BIO_meth_free BORINGSSL_PREFIX %+ _BIO_meth_free %xdefine BIO_meth_new BORINGSSL_PREFIX %+ _BIO_meth_new %xdefine BIO_meth_set_create BORINGSSL_PREFIX %+ _BIO_meth_set_create %xdefine BIO_meth_set_ctrl BORINGSSL_PREFIX %+ _BIO_meth_set_ctrl %xdefine BIO_meth_set_destroy BORINGSSL_PREFIX %+ _BIO_meth_set_destroy %xdefine BIO_meth_set_gets BORINGSSL_PREFIX %+ _BIO_meth_set_gets %xdefine BIO_meth_set_puts BORINGSSL_PREFIX %+ _BIO_meth_set_puts %xdefine BIO_meth_set_read BORINGSSL_PREFIX %+ _BIO_meth_set_read %xdefine BIO_meth_set_write BORINGSSL_PREFIX %+ _BIO_meth_set_write %xdefine BIO_method_type BORINGSSL_PREFIX %+ _BIO_method_type %xdefine BIO_new BORINGSSL_PREFIX %+ _BIO_new %xdefine BIO_new_bio_pair BORINGSSL_PREFIX %+ _BIO_new_bio_pair %xdefine BIO_new_connect BORINGSSL_PREFIX %+ _BIO_new_connect %xdefine BIO_new_fd BORINGSSL_PREFIX %+ _BIO_new_fd %xdefine BIO_new_file BORINGSSL_PREFIX %+ _BIO_new_file %xdefine BIO_new_fp BORINGSSL_PREFIX %+ _BIO_new_fp %xdefine BIO_new_mem_buf BORINGSSL_PREFIX %+ _BIO_new_mem_buf %xdefine BIO_new_socket BORINGSSL_PREFIX %+ _BIO_new_socket %xdefine BIO_next BORINGSSL_PREFIX %+ _BIO_next %xdefine BIO_number_read BORINGSSL_PREFIX %+ _BIO_number_read %xdefine BIO_number_written BORINGSSL_PREFIX %+ _BIO_number_written %xdefine BIO_pending BORINGSSL_PREFIX %+ _BIO_pending %xdefine BIO_pop BORINGSSL_PREFIX %+ _BIO_pop %xdefine BIO_printf BORINGSSL_PREFIX %+ _BIO_printf %xdefine BIO_ptr_ctrl BORINGSSL_PREFIX %+ _BIO_ptr_ctrl %xdefine BIO_push BORINGSSL_PREFIX %+ _BIO_push %xdefine BIO_puts BORINGSSL_PREFIX %+ _BIO_puts %xdefine BIO_read BORINGSSL_PREFIX %+ _BIO_read %xdefine BIO_read_asn1 BORINGSSL_PREFIX %+ _BIO_read_asn1 %xdefine BIO_read_filename BORINGSSL_PREFIX %+ _BIO_read_filename %xdefine BIO_reset BORINGSSL_PREFIX %+ _BIO_reset %xdefine BIO_rw_filename BORINGSSL_PREFIX %+ _BIO_rw_filename %xdefine BIO_s_connect BORINGSSL_PREFIX %+ _BIO_s_connect %xdefine BIO_s_fd BORINGSSL_PREFIX %+ _BIO_s_fd %xdefine BIO_s_file BORINGSSL_PREFIX %+ _BIO_s_file %xdefine BIO_s_mem BORINGSSL_PREFIX %+ _BIO_s_mem %xdefine BIO_s_socket BORINGSSL_PREFIX %+ _BIO_s_socket %xdefine BIO_seek BORINGSSL_PREFIX %+ _BIO_seek %xdefine BIO_set_close BORINGSSL_PREFIX %+ _BIO_set_close %xdefine BIO_set_conn_hostname BORINGSSL_PREFIX %+ _BIO_set_conn_hostname %xdefine BIO_set_conn_int_port BORINGSSL_PREFIX %+ _BIO_set_conn_int_port %xdefine BIO_set_conn_port BORINGSSL_PREFIX %+ _BIO_set_conn_port %xdefine BIO_set_data BORINGSSL_PREFIX %+ _BIO_set_data %xdefine BIO_set_ex_data BORINGSSL_PREFIX %+ _BIO_set_ex_data %xdefine BIO_set_fd BORINGSSL_PREFIX %+ _BIO_set_fd %xdefine BIO_set_flags BORINGSSL_PREFIX %+ _BIO_set_flags %xdefine BIO_set_fp BORINGSSL_PREFIX %+ _BIO_set_fp %xdefine BIO_set_init BORINGSSL_PREFIX %+ _BIO_set_init %xdefine BIO_set_mem_buf BORINGSSL_PREFIX %+ _BIO_set_mem_buf %xdefine BIO_set_mem_eof_return BORINGSSL_PREFIX %+ _BIO_set_mem_eof_return %xdefine BIO_set_nbio BORINGSSL_PREFIX %+ _BIO_set_nbio %xdefine BIO_set_retry_read BORINGSSL_PREFIX %+ _BIO_set_retry_read %xdefine BIO_set_retry_reason BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine BIO_set_retry_special BORINGSSL_PREFIX %+ _BIO_set_retry_special %xdefine BIO_set_retry_write BORINGSSL_PREFIX %+ _BIO_set_retry_write %xdefine BIO_set_shutdown BORINGSSL_PREFIX %+ _BIO_set_shutdown %xdefine BIO_set_ssl BORINGSSL_PREFIX %+ _BIO_set_ssl %xdefine BIO_set_write_buffer_size BORINGSSL_PREFIX %+ _BIO_set_write_buffer_size %xdefine BIO_should_io_special BORINGSSL_PREFIX %+ _BIO_should_io_special %xdefine BIO_should_read BORINGSSL_PREFIX %+ _BIO_should_read %xdefine BIO_should_retry BORINGSSL_PREFIX %+ _BIO_should_retry %xdefine BIO_should_write BORINGSSL_PREFIX %+ _BIO_should_write %xdefine BIO_shutdown_wr BORINGSSL_PREFIX %+ _BIO_shutdown_wr %xdefine BIO_snprintf BORINGSSL_PREFIX %+ _BIO_snprintf %xdefine BIO_tell BORINGSSL_PREFIX %+ _BIO_tell %xdefine BIO_test_flags BORINGSSL_PREFIX %+ _BIO_test_flags %xdefine BIO_up_ref BORINGSSL_PREFIX %+ _BIO_up_ref %xdefine BIO_vfree BORINGSSL_PREFIX %+ _BIO_vfree %xdefine BIO_vsnprintf BORINGSSL_PREFIX %+ _BIO_vsnprintf %xdefine BIO_wpending BORINGSSL_PREFIX %+ _BIO_wpending %xdefine BIO_write BORINGSSL_PREFIX %+ _BIO_write %xdefine BIO_write_all BORINGSSL_PREFIX %+ _BIO_write_all %xdefine BIO_write_filename BORINGSSL_PREFIX %+ _BIO_write_filename %xdefine BLAKE2B256 BORINGSSL_PREFIX %+ _BLAKE2B256 %xdefine BLAKE2B256_Final BORINGSSL_PREFIX %+ _BLAKE2B256_Final %xdefine BLAKE2B256_Init BORINGSSL_PREFIX %+ _BLAKE2B256_Init %xdefine BLAKE2B256_Update BORINGSSL_PREFIX %+ _BLAKE2B256_Update %xdefine BN_BLINDING_convert BORINGSSL_PREFIX %+ _BN_BLINDING_convert %xdefine BN_BLINDING_free BORINGSSL_PREFIX %+ _BN_BLINDING_free %xdefine BN_BLINDING_invalidate BORINGSSL_PREFIX %+ _BN_BLINDING_invalidate %xdefine BN_BLINDING_invert BORINGSSL_PREFIX %+ _BN_BLINDING_invert %xdefine BN_BLINDING_new BORINGSSL_PREFIX %+ _BN_BLINDING_new %xdefine BN_CTX_end BORINGSSL_PREFIX %+ _BN_CTX_end %xdefine BN_CTX_free BORINGSSL_PREFIX %+ _BN_CTX_free %xdefine BN_CTX_get BORINGSSL_PREFIX %+ _BN_CTX_get %xdefine BN_CTX_new BORINGSSL_PREFIX %+ _BN_CTX_new %xdefine BN_CTX_start BORINGSSL_PREFIX %+ _BN_CTX_start %xdefine BN_GENCB_call BORINGSSL_PREFIX %+ _BN_GENCB_call %xdefine BN_GENCB_free BORINGSSL_PREFIX %+ _BN_GENCB_free %xdefine BN_GENCB_get_arg BORINGSSL_PREFIX %+ _BN_GENCB_get_arg %xdefine BN_GENCB_new BORINGSSL_PREFIX %+ _BN_GENCB_new %xdefine BN_GENCB_set BORINGSSL_PREFIX %+ _BN_GENCB_set %xdefine BN_MONT_CTX_copy BORINGSSL_PREFIX %+ _BN_MONT_CTX_copy %xdefine BN_MONT_CTX_free BORINGSSL_PREFIX %+ _BN_MONT_CTX_free %xdefine BN_MONT_CTX_new BORINGSSL_PREFIX %+ _BN_MONT_CTX_new %xdefine BN_MONT_CTX_new_consttime BORINGSSL_PREFIX %+ _BN_MONT_CTX_new_consttime %xdefine BN_MONT_CTX_new_for_modulus BORINGSSL_PREFIX %+ _BN_MONT_CTX_new_for_modulus %xdefine BN_MONT_CTX_set BORINGSSL_PREFIX %+ _BN_MONT_CTX_set %xdefine BN_MONT_CTX_set_locked BORINGSSL_PREFIX %+ _BN_MONT_CTX_set_locked %xdefine BN_abs_is_word BORINGSSL_PREFIX %+ _BN_abs_is_word %xdefine BN_add BORINGSSL_PREFIX %+ _BN_add %xdefine BN_add_word BORINGSSL_PREFIX %+ _BN_add_word %xdefine BN_asc2bn BORINGSSL_PREFIX %+ _BN_asc2bn %xdefine BN_bin2bn BORINGSSL_PREFIX %+ _BN_bin2bn %xdefine BN_bn2bin BORINGSSL_PREFIX %+ _BN_bn2bin %xdefine BN_bn2bin_padded BORINGSSL_PREFIX %+ _BN_bn2bin_padded %xdefine BN_bn2binpad BORINGSSL_PREFIX %+ _BN_bn2binpad %xdefine BN_bn2cbb_padded BORINGSSL_PREFIX %+ _BN_bn2cbb_padded %xdefine BN_bn2dec BORINGSSL_PREFIX %+ _BN_bn2dec %xdefine BN_bn2hex BORINGSSL_PREFIX %+ _BN_bn2hex %xdefine BN_bn2le_padded BORINGSSL_PREFIX %+ _BN_bn2le_padded %xdefine BN_bn2lebinpad BORINGSSL_PREFIX %+ _BN_bn2lebinpad %xdefine BN_bn2mpi BORINGSSL_PREFIX %+ _BN_bn2mpi %xdefine BN_clear BORINGSSL_PREFIX %+ _BN_clear %xdefine BN_clear_bit BORINGSSL_PREFIX %+ _BN_clear_bit %xdefine BN_clear_free BORINGSSL_PREFIX %+ _BN_clear_free %xdefine BN_cmp BORINGSSL_PREFIX %+ _BN_cmp %xdefine BN_cmp_word BORINGSSL_PREFIX %+ _BN_cmp_word %xdefine BN_copy BORINGSSL_PREFIX %+ _BN_copy %xdefine BN_count_low_zero_bits BORINGSSL_PREFIX %+ _BN_count_low_zero_bits %xdefine BN_dec2bn BORINGSSL_PREFIX %+ _BN_dec2bn %xdefine BN_div BORINGSSL_PREFIX %+ _BN_div %xdefine BN_div_word BORINGSSL_PREFIX %+ _BN_div_word %xdefine BN_dup BORINGSSL_PREFIX %+ _BN_dup %xdefine BN_enhanced_miller_rabin_primality_test BORINGSSL_PREFIX %+ _BN_enhanced_miller_rabin_primality_test %xdefine BN_equal_consttime BORINGSSL_PREFIX %+ _BN_equal_consttime %xdefine BN_exp BORINGSSL_PREFIX %+ _BN_exp %xdefine BN_free BORINGSSL_PREFIX %+ _BN_free %xdefine BN_from_montgomery BORINGSSL_PREFIX %+ _BN_from_montgomery %xdefine BN_gcd BORINGSSL_PREFIX %+ _BN_gcd %xdefine BN_generate_prime_ex BORINGSSL_PREFIX %+ _BN_generate_prime_ex %xdefine BN_get_rfc3526_prime_1536 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_1536 %xdefine BN_get_rfc3526_prime_2048 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_2048 %xdefine BN_get_rfc3526_prime_3072 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_3072 %xdefine BN_get_rfc3526_prime_4096 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_4096 %xdefine BN_get_rfc3526_prime_6144 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_6144 %xdefine BN_get_rfc3526_prime_8192 BORINGSSL_PREFIX %+ _BN_get_rfc3526_prime_8192 %xdefine BN_get_u64 BORINGSSL_PREFIX %+ _BN_get_u64 %xdefine BN_get_word BORINGSSL_PREFIX %+ _BN_get_word %xdefine BN_hex2bn BORINGSSL_PREFIX %+ _BN_hex2bn %xdefine BN_init BORINGSSL_PREFIX %+ _BN_init %xdefine BN_is_bit_set BORINGSSL_PREFIX %+ _BN_is_bit_set %xdefine BN_is_negative BORINGSSL_PREFIX %+ _BN_is_negative %xdefine BN_is_odd BORINGSSL_PREFIX %+ _BN_is_odd %xdefine BN_is_one BORINGSSL_PREFIX %+ _BN_is_one %xdefine BN_is_pow2 BORINGSSL_PREFIX %+ _BN_is_pow2 %xdefine BN_is_prime_ex BORINGSSL_PREFIX %+ _BN_is_prime_ex %xdefine BN_is_prime_fasttest_ex BORINGSSL_PREFIX %+ _BN_is_prime_fasttest_ex %xdefine BN_is_word BORINGSSL_PREFIX %+ _BN_is_word %xdefine BN_is_zero BORINGSSL_PREFIX %+ _BN_is_zero %xdefine BN_le2bn BORINGSSL_PREFIX %+ _BN_le2bn %xdefine BN_lebin2bn BORINGSSL_PREFIX %+ _BN_lebin2bn %xdefine BN_lshift BORINGSSL_PREFIX %+ _BN_lshift %xdefine BN_lshift1 BORINGSSL_PREFIX %+ _BN_lshift1 %xdefine BN_marshal_asn1 BORINGSSL_PREFIX %+ _BN_marshal_asn1 %xdefine BN_mask_bits BORINGSSL_PREFIX %+ _BN_mask_bits %xdefine BN_mod_add BORINGSSL_PREFIX %+ _BN_mod_add %xdefine BN_mod_add_quick BORINGSSL_PREFIX %+ _BN_mod_add_quick %xdefine BN_mod_exp BORINGSSL_PREFIX %+ _BN_mod_exp %xdefine BN_mod_exp2_mont BORINGSSL_PREFIX %+ _BN_mod_exp2_mont %xdefine BN_mod_exp_mont BORINGSSL_PREFIX %+ _BN_mod_exp_mont %xdefine BN_mod_exp_mont_consttime BORINGSSL_PREFIX %+ _BN_mod_exp_mont_consttime %xdefine BN_mod_exp_mont_word BORINGSSL_PREFIX %+ _BN_mod_exp_mont_word %xdefine BN_mod_inverse BORINGSSL_PREFIX %+ _BN_mod_inverse %xdefine BN_mod_inverse_blinded BORINGSSL_PREFIX %+ _BN_mod_inverse_blinded %xdefine BN_mod_inverse_odd BORINGSSL_PREFIX %+ _BN_mod_inverse_odd %xdefine BN_mod_lshift BORINGSSL_PREFIX %+ _BN_mod_lshift %xdefine BN_mod_lshift1 BORINGSSL_PREFIX %+ _BN_mod_lshift1 %xdefine BN_mod_lshift1_quick BORINGSSL_PREFIX %+ _BN_mod_lshift1_quick %xdefine BN_mod_lshift_quick BORINGSSL_PREFIX %+ _BN_mod_lshift_quick %xdefine BN_mod_mul BORINGSSL_PREFIX %+ _BN_mod_mul %xdefine BN_mod_mul_montgomery BORINGSSL_PREFIX %+ _BN_mod_mul_montgomery %xdefine BN_mod_pow2 BORINGSSL_PREFIX %+ _BN_mod_pow2 %xdefine BN_mod_sqr BORINGSSL_PREFIX %+ _BN_mod_sqr %xdefine BN_mod_sqrt BORINGSSL_PREFIX %+ _BN_mod_sqrt %xdefine BN_mod_sub BORINGSSL_PREFIX %+ _BN_mod_sub %xdefine BN_mod_sub_quick BORINGSSL_PREFIX %+ _BN_mod_sub_quick %xdefine BN_mod_word BORINGSSL_PREFIX %+ _BN_mod_word %xdefine BN_mpi2bn BORINGSSL_PREFIX %+ _BN_mpi2bn %xdefine BN_mul BORINGSSL_PREFIX %+ _BN_mul %xdefine BN_mul_word BORINGSSL_PREFIX %+ _BN_mul_word %xdefine BN_new BORINGSSL_PREFIX %+ _BN_new %xdefine BN_nnmod BORINGSSL_PREFIX %+ _BN_nnmod %xdefine BN_nnmod_pow2 BORINGSSL_PREFIX %+ _BN_nnmod_pow2 %xdefine BN_num_bits BORINGSSL_PREFIX %+ _BN_num_bits %xdefine BN_num_bits_word BORINGSSL_PREFIX %+ _BN_num_bits_word %xdefine BN_num_bytes BORINGSSL_PREFIX %+ _BN_num_bytes %xdefine BN_one BORINGSSL_PREFIX %+ _BN_one %xdefine BN_parse_asn1_unsigned BORINGSSL_PREFIX %+ _BN_parse_asn1_unsigned %xdefine BN_primality_test BORINGSSL_PREFIX %+ _BN_primality_test %xdefine BN_print BORINGSSL_PREFIX %+ _BN_print %xdefine BN_print_fp BORINGSSL_PREFIX %+ _BN_print_fp %xdefine BN_pseudo_rand BORINGSSL_PREFIX %+ _BN_pseudo_rand %xdefine BN_pseudo_rand_range BORINGSSL_PREFIX %+ _BN_pseudo_rand_range %xdefine BN_rand BORINGSSL_PREFIX %+ _BN_rand %xdefine BN_rand_range BORINGSSL_PREFIX %+ _BN_rand_range %xdefine BN_rand_range_ex BORINGSSL_PREFIX %+ _BN_rand_range_ex %xdefine BN_rshift BORINGSSL_PREFIX %+ _BN_rshift %xdefine BN_rshift1 BORINGSSL_PREFIX %+ _BN_rshift1 %xdefine BN_secure_new BORINGSSL_PREFIX %+ _BN_secure_new %xdefine BN_set_bit BORINGSSL_PREFIX %+ _BN_set_bit %xdefine BN_set_negative BORINGSSL_PREFIX %+ _BN_set_negative %xdefine BN_set_u64 BORINGSSL_PREFIX %+ _BN_set_u64 %xdefine BN_set_word BORINGSSL_PREFIX %+ _BN_set_word %xdefine BN_sqr BORINGSSL_PREFIX %+ _BN_sqr %xdefine BN_sqrt BORINGSSL_PREFIX %+ _BN_sqrt %xdefine BN_sub BORINGSSL_PREFIX %+ _BN_sub %xdefine BN_sub_word BORINGSSL_PREFIX %+ _BN_sub_word %xdefine BN_to_ASN1_ENUMERATED BORINGSSL_PREFIX %+ _BN_to_ASN1_ENUMERATED %xdefine BN_to_ASN1_INTEGER BORINGSSL_PREFIX %+ _BN_to_ASN1_INTEGER %xdefine BN_to_montgomery BORINGSSL_PREFIX %+ _BN_to_montgomery %xdefine BN_uadd BORINGSSL_PREFIX %+ _BN_uadd %xdefine BN_ucmp BORINGSSL_PREFIX %+ _BN_ucmp %xdefine BN_usub BORINGSSL_PREFIX %+ _BN_usub %xdefine BN_value_one BORINGSSL_PREFIX %+ _BN_value_one %xdefine BN_zero BORINGSSL_PREFIX %+ _BN_zero %xdefine BORINGSSL_keccak BORINGSSL_PREFIX %+ _BORINGSSL_keccak %xdefine BORINGSSL_keccak_absorb BORINGSSL_PREFIX %+ _BORINGSSL_keccak_absorb %xdefine BORINGSSL_keccak_init BORINGSSL_PREFIX %+ _BORINGSSL_keccak_init %xdefine BORINGSSL_keccak_squeeze BORINGSSL_PREFIX %+ _BORINGSSL_keccak_squeeze %xdefine BORINGSSL_self_test BORINGSSL_PREFIX %+ _BORINGSSL_self_test %xdefine BUF_MEM_append BORINGSSL_PREFIX %+ _BUF_MEM_append %xdefine BUF_MEM_free BORINGSSL_PREFIX %+ _BUF_MEM_free %xdefine BUF_MEM_grow BORINGSSL_PREFIX %+ _BUF_MEM_grow %xdefine BUF_MEM_grow_clean BORINGSSL_PREFIX %+ _BUF_MEM_grow_clean %xdefine BUF_MEM_new BORINGSSL_PREFIX %+ _BUF_MEM_new %xdefine BUF_MEM_reserve BORINGSSL_PREFIX %+ _BUF_MEM_reserve %xdefine BUF_memdup BORINGSSL_PREFIX %+ _BUF_memdup %xdefine BUF_strdup BORINGSSL_PREFIX %+ _BUF_strdup %xdefine BUF_strlcat BORINGSSL_PREFIX %+ _BUF_strlcat %xdefine BUF_strlcpy BORINGSSL_PREFIX %+ _BUF_strlcpy %xdefine BUF_strndup BORINGSSL_PREFIX %+ _BUF_strndup %xdefine BUF_strnlen BORINGSSL_PREFIX %+ _BUF_strnlen %xdefine CBB_add_asn1 BORINGSSL_PREFIX %+ _CBB_add_asn1 %xdefine CBB_add_asn1_bool BORINGSSL_PREFIX %+ _CBB_add_asn1_bool %xdefine CBB_add_asn1_int64 BORINGSSL_PREFIX %+ _CBB_add_asn1_int64 %xdefine CBB_add_asn1_int64_with_tag BORINGSSL_PREFIX %+ _CBB_add_asn1_int64_with_tag %xdefine CBB_add_asn1_octet_string BORINGSSL_PREFIX %+ _CBB_add_asn1_octet_string %xdefine CBB_add_asn1_oid_from_text BORINGSSL_PREFIX %+ _CBB_add_asn1_oid_from_text %xdefine CBB_add_asn1_uint64 BORINGSSL_PREFIX %+ _CBB_add_asn1_uint64 %xdefine CBB_add_asn1_uint64_with_tag BORINGSSL_PREFIX %+ _CBB_add_asn1_uint64_with_tag %xdefine CBB_add_bytes BORINGSSL_PREFIX %+ _CBB_add_bytes %xdefine CBB_add_latin1 BORINGSSL_PREFIX %+ _CBB_add_latin1 %xdefine CBB_add_space BORINGSSL_PREFIX %+ _CBB_add_space %xdefine CBB_add_u16 BORINGSSL_PREFIX %+ _CBB_add_u16 %xdefine CBB_add_u16_length_prefixed BORINGSSL_PREFIX %+ _CBB_add_u16_length_prefixed %xdefine CBB_add_u16le BORINGSSL_PREFIX %+ _CBB_add_u16le %xdefine CBB_add_u24 BORINGSSL_PREFIX %+ _CBB_add_u24 %xdefine CBB_add_u24_length_prefixed BORINGSSL_PREFIX %+ _CBB_add_u24_length_prefixed %xdefine CBB_add_u32 BORINGSSL_PREFIX %+ _CBB_add_u32 %xdefine CBB_add_u32le BORINGSSL_PREFIX %+ _CBB_add_u32le %xdefine CBB_add_u64 BORINGSSL_PREFIX %+ _CBB_add_u64 %xdefine CBB_add_u64le BORINGSSL_PREFIX %+ _CBB_add_u64le %xdefine CBB_add_u8 BORINGSSL_PREFIX %+ _CBB_add_u8 %xdefine CBB_add_u8_length_prefixed BORINGSSL_PREFIX %+ _CBB_add_u8_length_prefixed %xdefine CBB_add_ucs2_be BORINGSSL_PREFIX %+ _CBB_add_ucs2_be %xdefine CBB_add_utf32_be BORINGSSL_PREFIX %+ _CBB_add_utf32_be %xdefine CBB_add_utf8 BORINGSSL_PREFIX %+ _CBB_add_utf8 %xdefine CBB_add_zeros BORINGSSL_PREFIX %+ _CBB_add_zeros %xdefine CBB_cleanup BORINGSSL_PREFIX %+ _CBB_cleanup %xdefine CBB_data BORINGSSL_PREFIX %+ _CBB_data %xdefine CBB_did_write BORINGSSL_PREFIX %+ _CBB_did_write %xdefine CBB_discard_child BORINGSSL_PREFIX %+ _CBB_discard_child %xdefine CBB_finish BORINGSSL_PREFIX %+ _CBB_finish %xdefine CBB_finish_i2d BORINGSSL_PREFIX %+ _CBB_finish_i2d %xdefine CBB_flush BORINGSSL_PREFIX %+ _CBB_flush %xdefine CBB_flush_asn1_set_of BORINGSSL_PREFIX %+ _CBB_flush_asn1_set_of %xdefine CBB_get_utf8_len BORINGSSL_PREFIX %+ _CBB_get_utf8_len %xdefine CBB_init BORINGSSL_PREFIX %+ _CBB_init %xdefine CBB_init_fixed BORINGSSL_PREFIX %+ _CBB_init_fixed %xdefine CBB_len BORINGSSL_PREFIX %+ _CBB_len %xdefine CBB_reserve BORINGSSL_PREFIX %+ _CBB_reserve %xdefine CBB_zero BORINGSSL_PREFIX %+ _CBB_zero %xdefine CBS_asn1_ber_to_der BORINGSSL_PREFIX %+ _CBS_asn1_ber_to_der %xdefine CBS_asn1_bitstring_has_bit BORINGSSL_PREFIX %+ _CBS_asn1_bitstring_has_bit %xdefine CBS_asn1_oid_to_text BORINGSSL_PREFIX %+ _CBS_asn1_oid_to_text %xdefine CBS_contains_zero_byte BORINGSSL_PREFIX %+ _CBS_contains_zero_byte %xdefine CBS_copy_bytes BORINGSSL_PREFIX %+ _CBS_copy_bytes %xdefine CBS_data BORINGSSL_PREFIX %+ _CBS_data %xdefine CBS_get_any_asn1 BORINGSSL_PREFIX %+ _CBS_get_any_asn1 %xdefine CBS_get_any_asn1_element BORINGSSL_PREFIX %+ _CBS_get_any_asn1_element %xdefine CBS_get_any_ber_asn1_element BORINGSSL_PREFIX %+ _CBS_get_any_ber_asn1_element %xdefine CBS_get_asn1 BORINGSSL_PREFIX %+ _CBS_get_asn1 %xdefine CBS_get_asn1_bool BORINGSSL_PREFIX %+ _CBS_get_asn1_bool %xdefine CBS_get_asn1_element BORINGSSL_PREFIX %+ _CBS_get_asn1_element %xdefine CBS_get_asn1_implicit_string BORINGSSL_PREFIX %+ _CBS_get_asn1_implicit_string %xdefine CBS_get_asn1_int64 BORINGSSL_PREFIX %+ _CBS_get_asn1_int64 %xdefine CBS_get_asn1_uint64 BORINGSSL_PREFIX %+ _CBS_get_asn1_uint64 %xdefine CBS_get_bytes BORINGSSL_PREFIX %+ _CBS_get_bytes %xdefine CBS_get_last_u8 BORINGSSL_PREFIX %+ _CBS_get_last_u8 %xdefine CBS_get_latin1 BORINGSSL_PREFIX %+ _CBS_get_latin1 %xdefine CBS_get_optional_asn1 BORINGSSL_PREFIX %+ _CBS_get_optional_asn1 %xdefine CBS_get_optional_asn1_bool BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_bool %xdefine CBS_get_optional_asn1_octet_string BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_octet_string %xdefine CBS_get_optional_asn1_uint64 BORINGSSL_PREFIX %+ _CBS_get_optional_asn1_uint64 %xdefine CBS_get_u16 BORINGSSL_PREFIX %+ _CBS_get_u16 %xdefine CBS_get_u16_length_prefixed BORINGSSL_PREFIX %+ _CBS_get_u16_length_prefixed %xdefine CBS_get_u16le BORINGSSL_PREFIX %+ _CBS_get_u16le %xdefine CBS_get_u24 BORINGSSL_PREFIX %+ _CBS_get_u24 %xdefine CBS_get_u24_length_prefixed BORINGSSL_PREFIX %+ _CBS_get_u24_length_prefixed %xdefine CBS_get_u32 BORINGSSL_PREFIX %+ _CBS_get_u32 %xdefine CBS_get_u32le BORINGSSL_PREFIX %+ _CBS_get_u32le %xdefine CBS_get_u64 BORINGSSL_PREFIX %+ _CBS_get_u64 %xdefine CBS_get_u64_decimal BORINGSSL_PREFIX %+ _CBS_get_u64_decimal %xdefine CBS_get_u64le BORINGSSL_PREFIX %+ _CBS_get_u64le %xdefine CBS_get_u8 BORINGSSL_PREFIX %+ _CBS_get_u8 %xdefine CBS_get_u8_length_prefixed BORINGSSL_PREFIX %+ _CBS_get_u8_length_prefixed %xdefine CBS_get_ucs2_be BORINGSSL_PREFIX %+ _CBS_get_ucs2_be %xdefine CBS_get_until_first BORINGSSL_PREFIX %+ _CBS_get_until_first %xdefine CBS_get_utf32_be BORINGSSL_PREFIX %+ _CBS_get_utf32_be %xdefine CBS_get_utf8 BORINGSSL_PREFIX %+ _CBS_get_utf8 %xdefine CBS_init BORINGSSL_PREFIX %+ _CBS_init %xdefine CBS_is_unsigned_asn1_integer BORINGSSL_PREFIX %+ _CBS_is_unsigned_asn1_integer %xdefine CBS_is_valid_asn1_bitstring BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_bitstring %xdefine CBS_is_valid_asn1_integer BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_integer %xdefine CBS_is_valid_asn1_oid BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_oid %xdefine CBS_len BORINGSSL_PREFIX %+ _CBS_len %xdefine CBS_mem_equal BORINGSSL_PREFIX %+ _CBS_mem_equal %xdefine CBS_parse_generalized_time BORINGSSL_PREFIX %+ _CBS_parse_generalized_time %xdefine CBS_parse_utc_time BORINGSSL_PREFIX %+ _CBS_parse_utc_time %xdefine CBS_peek_asn1_tag BORINGSSL_PREFIX %+ _CBS_peek_asn1_tag %xdefine CBS_skip BORINGSSL_PREFIX %+ _CBS_skip %xdefine CBS_stow BORINGSSL_PREFIX %+ _CBS_stow %xdefine CBS_strdup BORINGSSL_PREFIX %+ _CBS_strdup %xdefine CERTIFICATEPOLICIES_free BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_free %xdefine CERTIFICATEPOLICIES_it BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_it %xdefine CERTIFICATEPOLICIES_new BORINGSSL_PREFIX %+ _CERTIFICATEPOLICIES_new %xdefine CMAC_CTX_copy BORINGSSL_PREFIX %+ _CMAC_CTX_copy %xdefine CMAC_CTX_free BORINGSSL_PREFIX %+ _CMAC_CTX_free %xdefine CMAC_CTX_new BORINGSSL_PREFIX %+ _CMAC_CTX_new %xdefine CMAC_Final BORINGSSL_PREFIX %+ _CMAC_Final %xdefine CMAC_Init BORINGSSL_PREFIX %+ _CMAC_Init %xdefine CMAC_Reset BORINGSSL_PREFIX %+ _CMAC_Reset %xdefine CMAC_Update BORINGSSL_PREFIX %+ _CMAC_Update %xdefine CONF_VALUE_new BORINGSSL_PREFIX %+ _CONF_VALUE_new %xdefine CONF_modules_free BORINGSSL_PREFIX %+ _CONF_modules_free %xdefine CONF_modules_load_file BORINGSSL_PREFIX %+ _CONF_modules_load_file %xdefine CONF_parse_list BORINGSSL_PREFIX %+ _CONF_parse_list %xdefine CRL_DIST_POINTS_free BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_free %xdefine CRL_DIST_POINTS_it BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_it %xdefine CRL_DIST_POINTS_new BORINGSSL_PREFIX %+ _CRL_DIST_POINTS_new %xdefine CRYPTO_BUFFER_POOL_free BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_POOL_free %xdefine CRYPTO_BUFFER_POOL_new BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_POOL_new %xdefine CRYPTO_BUFFER_alloc BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_alloc %xdefine CRYPTO_BUFFER_data BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_data %xdefine CRYPTO_BUFFER_free BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_free %xdefine CRYPTO_BUFFER_init_CBS BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_init_CBS %xdefine CRYPTO_BUFFER_len BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_len %xdefine CRYPTO_BUFFER_new BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new %xdefine CRYPTO_BUFFER_new_from_CBS BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new_from_CBS %xdefine CRYPTO_BUFFER_new_from_static_data_unsafe BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_new_from_static_data_unsafe %xdefine CRYPTO_BUFFER_up_ref BORINGSSL_PREFIX %+ _CRYPTO_BUFFER_up_ref %xdefine CRYPTO_MUTEX_cleanup BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_cleanup %xdefine CRYPTO_MUTEX_init BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_init %xdefine CRYPTO_MUTEX_lock_read BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_lock_read %xdefine CRYPTO_MUTEX_lock_write BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_lock_write %xdefine CRYPTO_MUTEX_unlock_read BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_unlock_read %xdefine CRYPTO_MUTEX_unlock_write BORINGSSL_PREFIX %+ _CRYPTO_MUTEX_unlock_write %xdefine CRYPTO_POLYVAL_finish BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_finish %xdefine CRYPTO_POLYVAL_init BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_init %xdefine CRYPTO_POLYVAL_update_blocks BORINGSSL_PREFIX %+ _CRYPTO_POLYVAL_update_blocks %xdefine CRYPTO_THREADID_current BORINGSSL_PREFIX %+ _CRYPTO_THREADID_current %xdefine CRYPTO_THREADID_set_callback BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_callback %xdefine CRYPTO_THREADID_set_numeric BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_numeric %xdefine CRYPTO_THREADID_set_pointer BORINGSSL_PREFIX %+ _CRYPTO_THREADID_set_pointer %xdefine CRYPTO_atomic_compare_exchange_weak_u32 BORINGSSL_PREFIX %+ _CRYPTO_atomic_compare_exchange_weak_u32 %xdefine CRYPTO_atomic_load_u32 BORINGSSL_PREFIX %+ _CRYPTO_atomic_load_u32 %xdefine CRYPTO_atomic_store_u32 BORINGSSL_PREFIX %+ _CRYPTO_atomic_store_u32 %xdefine CRYPTO_cbc128_decrypt BORINGSSL_PREFIX %+ _CRYPTO_cbc128_decrypt %xdefine CRYPTO_cbc128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_cbc128_encrypt %xdefine CRYPTO_cfb128_1_encrypt BORINGSSL_PREFIX %+ _CRYPTO_cfb128_1_encrypt %xdefine CRYPTO_cfb128_8_encrypt BORINGSSL_PREFIX %+ _CRYPTO_cfb128_8_encrypt %xdefine CRYPTO_cfb128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_cfb128_encrypt %xdefine CRYPTO_chacha_20 BORINGSSL_PREFIX %+ _CRYPTO_chacha_20 %xdefine CRYPTO_cleanup_all_ex_data BORINGSSL_PREFIX %+ _CRYPTO_cleanup_all_ex_data %xdefine CRYPTO_cpu_avoid_zmm_registers BORINGSSL_PREFIX %+ _CRYPTO_cpu_avoid_zmm_registers %xdefine CRYPTO_cpu_perf_is_like_silvermont BORINGSSL_PREFIX %+ _CRYPTO_cpu_perf_is_like_silvermont %xdefine CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 %xdefine CRYPTO_fips_186_2_prf BORINGSSL_PREFIX %+ _CRYPTO_fips_186_2_prf %xdefine CRYPTO_fork_detect_force_madv_wipeonfork_for_testing BORINGSSL_PREFIX %+ _CRYPTO_fork_detect_force_madv_wipeonfork_for_testing %xdefine CRYPTO_free BORINGSSL_PREFIX %+ _CRYPTO_free %xdefine CRYPTO_free_ex_data BORINGSSL_PREFIX %+ _CRYPTO_free_ex_data %xdefine CRYPTO_gcm128_aad BORINGSSL_PREFIX %+ _CRYPTO_gcm128_aad %xdefine CRYPTO_gcm128_decrypt BORINGSSL_PREFIX %+ _CRYPTO_gcm128_decrypt %xdefine CRYPTO_gcm128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_gcm128_encrypt %xdefine CRYPTO_gcm128_finish BORINGSSL_PREFIX %+ _CRYPTO_gcm128_finish %xdefine CRYPTO_gcm128_init_aes_key BORINGSSL_PREFIX %+ _CRYPTO_gcm128_init_aes_key %xdefine CRYPTO_gcm128_init_ctx BORINGSSL_PREFIX %+ _CRYPTO_gcm128_init_ctx %xdefine CRYPTO_gcm128_tag BORINGSSL_PREFIX %+ _CRYPTO_gcm128_tag %xdefine CRYPTO_get_dynlock_create_callback BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_create_callback %xdefine CRYPTO_get_dynlock_destroy_callback BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_destroy_callback %xdefine CRYPTO_get_dynlock_lock_callback BORINGSSL_PREFIX %+ _CRYPTO_get_dynlock_lock_callback %xdefine CRYPTO_get_ex_data BORINGSSL_PREFIX %+ _CRYPTO_get_ex_data %xdefine CRYPTO_get_ex_new_index_ex BORINGSSL_PREFIX %+ _CRYPTO_get_ex_new_index_ex %xdefine CRYPTO_get_fork_generation BORINGSSL_PREFIX %+ _CRYPTO_get_fork_generation %xdefine CRYPTO_get_lock_name BORINGSSL_PREFIX %+ _CRYPTO_get_lock_name %xdefine CRYPTO_get_locking_callback BORINGSSL_PREFIX %+ _CRYPTO_get_locking_callback %xdefine CRYPTO_get_stderr BORINGSSL_PREFIX %+ _CRYPTO_get_stderr %xdefine CRYPTO_get_thread_local BORINGSSL_PREFIX %+ _CRYPTO_get_thread_local %xdefine CRYPTO_ghash_init BORINGSSL_PREFIX %+ _CRYPTO_ghash_init %xdefine CRYPTO_has_asm BORINGSSL_PREFIX %+ _CRYPTO_has_asm %xdefine CRYPTO_hchacha20 BORINGSSL_PREFIX %+ _CRYPTO_hchacha20 %xdefine CRYPTO_init_sysrand BORINGSSL_PREFIX %+ _CRYPTO_init_sysrand %xdefine CRYPTO_is_ADX_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ADX_capable %xdefine CRYPTO_is_AESNI_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AESNI_capable %xdefine CRYPTO_is_ARMv8_AES_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_AES_capable %xdefine CRYPTO_is_ARMv8_PMULL_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_PMULL_capable %xdefine CRYPTO_is_ARMv8_SHA1_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA1_capable %xdefine CRYPTO_is_ARMv8_SHA256_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA256_capable %xdefine CRYPTO_is_ARMv8_SHA512_capable BORINGSSL_PREFIX %+ _CRYPTO_is_ARMv8_SHA512_capable %xdefine CRYPTO_is_AVX2_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX2_capable %xdefine CRYPTO_is_AVX512BW_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512BW_capable %xdefine CRYPTO_is_AVX512VL_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX512VL_capable %xdefine CRYPTO_is_AVX_capable BORINGSSL_PREFIX %+ _CRYPTO_is_AVX_capable %xdefine CRYPTO_is_BMI1_capable BORINGSSL_PREFIX %+ _CRYPTO_is_BMI1_capable %xdefine CRYPTO_is_BMI2_capable BORINGSSL_PREFIX %+ _CRYPTO_is_BMI2_capable %xdefine CRYPTO_is_FXSR_capable BORINGSSL_PREFIX %+ _CRYPTO_is_FXSR_capable %xdefine CRYPTO_is_MOVBE_capable BORINGSSL_PREFIX %+ _CRYPTO_is_MOVBE_capable %xdefine CRYPTO_is_NEON_capable BORINGSSL_PREFIX %+ _CRYPTO_is_NEON_capable %xdefine CRYPTO_is_PCLMUL_capable BORINGSSL_PREFIX %+ _CRYPTO_is_PCLMUL_capable %xdefine CRYPTO_is_RDRAND_capable BORINGSSL_PREFIX %+ _CRYPTO_is_RDRAND_capable %xdefine CRYPTO_is_SSE4_1_capable BORINGSSL_PREFIX %+ _CRYPTO_is_SSE4_1_capable %xdefine CRYPTO_is_SSSE3_capable BORINGSSL_PREFIX %+ _CRYPTO_is_SSSE3_capable %xdefine CRYPTO_is_VAES_capable BORINGSSL_PREFIX %+ _CRYPTO_is_VAES_capable %xdefine CRYPTO_is_VPCLMULQDQ_capable BORINGSSL_PREFIX %+ _CRYPTO_is_VPCLMULQDQ_capable %xdefine CRYPTO_is_confidential_build BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine CRYPTO_is_intel_cpu BORINGSSL_PREFIX %+ _CRYPTO_is_intel_cpu %xdefine CRYPTO_is_x86_SHA_capable BORINGSSL_PREFIX %+ _CRYPTO_is_x86_SHA_capable %xdefine CRYPTO_library_init BORINGSSL_PREFIX %+ _CRYPTO_library_init %xdefine CRYPTO_malloc BORINGSSL_PREFIX %+ _CRYPTO_malloc %xdefine CRYPTO_malloc_init BORINGSSL_PREFIX %+ _CRYPTO_malloc_init %xdefine CRYPTO_memcmp BORINGSSL_PREFIX %+ _CRYPTO_memcmp %xdefine CRYPTO_new_ex_data BORINGSSL_PREFIX %+ _CRYPTO_new_ex_data %xdefine CRYPTO_num_locks BORINGSSL_PREFIX %+ _CRYPTO_num_locks %xdefine CRYPTO_ofb128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_ofb128_encrypt %xdefine CRYPTO_once BORINGSSL_PREFIX %+ _CRYPTO_once %xdefine CRYPTO_poly1305_finish BORINGSSL_PREFIX %+ _CRYPTO_poly1305_finish %xdefine CRYPTO_poly1305_init BORINGSSL_PREFIX %+ _CRYPTO_poly1305_init %xdefine CRYPTO_poly1305_update BORINGSSL_PREFIX %+ _CRYPTO_poly1305_update %xdefine CRYPTO_pre_sandbox_init BORINGSSL_PREFIX %+ _CRYPTO_pre_sandbox_init %xdefine CRYPTO_rdrand BORINGSSL_PREFIX %+ _CRYPTO_rdrand %xdefine CRYPTO_rdrand_multiple8_buf BORINGSSL_PREFIX %+ _CRYPTO_rdrand_multiple8_buf %xdefine CRYPTO_realloc BORINGSSL_PREFIX %+ _CRYPTO_realloc %xdefine CRYPTO_refcount_dec_and_test_zero BORINGSSL_PREFIX %+ _CRYPTO_refcount_dec_and_test_zero %xdefine CRYPTO_refcount_inc BORINGSSL_PREFIX %+ _CRYPTO_refcount_inc %xdefine CRYPTO_secure_malloc_init BORINGSSL_PREFIX %+ _CRYPTO_secure_malloc_init %xdefine CRYPTO_secure_malloc_initialized BORINGSSL_PREFIX %+ _CRYPTO_secure_malloc_initialized %xdefine CRYPTO_secure_used BORINGSSL_PREFIX %+ _CRYPTO_secure_used %xdefine CRYPTO_set_add_lock_callback BORINGSSL_PREFIX %+ _CRYPTO_set_add_lock_callback %xdefine CRYPTO_set_dynlock_create_callback BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_create_callback %xdefine CRYPTO_set_dynlock_destroy_callback BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_destroy_callback %xdefine CRYPTO_set_dynlock_lock_callback BORINGSSL_PREFIX %+ _CRYPTO_set_dynlock_lock_callback %xdefine CRYPTO_set_ex_data BORINGSSL_PREFIX %+ _CRYPTO_set_ex_data %xdefine CRYPTO_set_id_callback BORINGSSL_PREFIX %+ _CRYPTO_set_id_callback %xdefine CRYPTO_set_locking_callback BORINGSSL_PREFIX %+ _CRYPTO_set_locking_callback %xdefine CRYPTO_set_thread_local BORINGSSL_PREFIX %+ _CRYPTO_set_thread_local %xdefine CRYPTO_sysrand BORINGSSL_PREFIX %+ _CRYPTO_sysrand %xdefine CRYPTO_sysrand_for_seed BORINGSSL_PREFIX %+ _CRYPTO_sysrand_for_seed %xdefine CRYPTO_sysrand_if_available BORINGSSL_PREFIX %+ _CRYPTO_sysrand_if_available %xdefine CRYPTO_tls13_hkdf_expand_label BORINGSSL_PREFIX %+ _CRYPTO_tls13_hkdf_expand_label %xdefine CRYPTO_tls1_prf BORINGSSL_PREFIX %+ _CRYPTO_tls1_prf %xdefine CRYPTO_xor16 BORINGSSL_PREFIX %+ _CRYPTO_xor16 %xdefine CTR_DRBG_clear BORINGSSL_PREFIX %+ _CTR_DRBG_clear %xdefine CTR_DRBG_free BORINGSSL_PREFIX %+ _CTR_DRBG_free %xdefine CTR_DRBG_generate BORINGSSL_PREFIX %+ _CTR_DRBG_generate %xdefine CTR_DRBG_init BORINGSSL_PREFIX %+ _CTR_DRBG_init %xdefine CTR_DRBG_new BORINGSSL_PREFIX %+ _CTR_DRBG_new %xdefine CTR_DRBG_reseed BORINGSSL_PREFIX %+ _CTR_DRBG_reseed %xdefine ChaCha20_ctr32_avx2 BORINGSSL_PREFIX %+ _ChaCha20_ctr32_avx2 %xdefine ChaCha20_ctr32_avx2_capable BORINGSSL_PREFIX %+ _ChaCha20_ctr32_avx2_capable %xdefine ChaCha20_ctr32_neon BORINGSSL_PREFIX %+ _ChaCha20_ctr32_neon %xdefine ChaCha20_ctr32_neon_capable BORINGSSL_PREFIX %+ _ChaCha20_ctr32_neon_capable %xdefine ChaCha20_ctr32_nohw BORINGSSL_PREFIX %+ _ChaCha20_ctr32_nohw %xdefine ChaCha20_ctr32_ssse3 BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3 %xdefine ChaCha20_ctr32_ssse3_4x BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_4x %xdefine ChaCha20_ctr32_ssse3_4x_capable BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_4x_capable %xdefine ChaCha20_ctr32_ssse3_capable BORINGSSL_PREFIX %+ _ChaCha20_ctr32_ssse3_capable %xdefine DES_decrypt3 BORINGSSL_PREFIX %+ _DES_decrypt3 %xdefine DES_ecb3_encrypt BORINGSSL_PREFIX %+ _DES_ecb3_encrypt %xdefine DES_ecb3_encrypt_ex BORINGSSL_PREFIX %+ _DES_ecb3_encrypt_ex %xdefine DES_ecb_encrypt BORINGSSL_PREFIX %+ _DES_ecb_encrypt %xdefine DES_ecb_encrypt_ex BORINGSSL_PREFIX %+ _DES_ecb_encrypt_ex %xdefine DES_ede2_cbc_encrypt BORINGSSL_PREFIX %+ _DES_ede2_cbc_encrypt %xdefine DES_ede3_cbc_encrypt BORINGSSL_PREFIX %+ _DES_ede3_cbc_encrypt %xdefine DES_ede3_cbc_encrypt_ex BORINGSSL_PREFIX %+ _DES_ede3_cbc_encrypt_ex %xdefine DES_encrypt3 BORINGSSL_PREFIX %+ _DES_encrypt3 %xdefine DES_ncbc_encrypt BORINGSSL_PREFIX %+ _DES_ncbc_encrypt %xdefine DES_ncbc_encrypt_ex BORINGSSL_PREFIX %+ _DES_ncbc_encrypt_ex %xdefine DES_set_key BORINGSSL_PREFIX %+ _DES_set_key %xdefine DES_set_key_ex BORINGSSL_PREFIX %+ _DES_set_key_ex %xdefine DES_set_key_unchecked BORINGSSL_PREFIX %+ _DES_set_key_unchecked %xdefine DES_set_odd_parity BORINGSSL_PREFIX %+ _DES_set_odd_parity %xdefine DH_bits BORINGSSL_PREFIX %+ _DH_bits %xdefine DH_check BORINGSSL_PREFIX %+ _DH_check %xdefine DH_check_pub_key BORINGSSL_PREFIX %+ _DH_check_pub_key %xdefine DH_compute_key BORINGSSL_PREFIX %+ _DH_compute_key %xdefine DH_compute_key_hashed BORINGSSL_PREFIX %+ _DH_compute_key_hashed %xdefine DH_compute_key_padded BORINGSSL_PREFIX %+ _DH_compute_key_padded %xdefine DH_free BORINGSSL_PREFIX %+ _DH_free %xdefine DH_generate_key BORINGSSL_PREFIX %+ _DH_generate_key %xdefine DH_generate_parameters_ex BORINGSSL_PREFIX %+ _DH_generate_parameters_ex %xdefine DH_get0_g BORINGSSL_PREFIX %+ _DH_get0_g %xdefine DH_get0_key BORINGSSL_PREFIX %+ _DH_get0_key %xdefine DH_get0_p BORINGSSL_PREFIX %+ _DH_get0_p %xdefine DH_get0_pqg BORINGSSL_PREFIX %+ _DH_get0_pqg %xdefine DH_get0_priv_key BORINGSSL_PREFIX %+ _DH_get0_priv_key %xdefine DH_get0_pub_key BORINGSSL_PREFIX %+ _DH_get0_pub_key %xdefine DH_get0_q BORINGSSL_PREFIX %+ _DH_get0_q %xdefine DH_get_rfc7919_2048 BORINGSSL_PREFIX %+ _DH_get_rfc7919_2048 %xdefine DH_marshal_parameters BORINGSSL_PREFIX %+ _DH_marshal_parameters %xdefine DH_new BORINGSSL_PREFIX %+ _DH_new %xdefine DH_num_bits BORINGSSL_PREFIX %+ _DH_num_bits %xdefine DH_parse_parameters BORINGSSL_PREFIX %+ _DH_parse_parameters %xdefine DH_set0_key BORINGSSL_PREFIX %+ _DH_set0_key %xdefine DH_set0_pqg BORINGSSL_PREFIX %+ _DH_set0_pqg %xdefine DH_set_length BORINGSSL_PREFIX %+ _DH_set_length %xdefine DH_size BORINGSSL_PREFIX %+ _DH_size %xdefine DH_up_ref BORINGSSL_PREFIX %+ _DH_up_ref %xdefine DHparams_dup BORINGSSL_PREFIX %+ _DHparams_dup %xdefine DIRECTORYSTRING_free BORINGSSL_PREFIX %+ _DIRECTORYSTRING_free %xdefine DIRECTORYSTRING_it BORINGSSL_PREFIX %+ _DIRECTORYSTRING_it %xdefine DIRECTORYSTRING_new BORINGSSL_PREFIX %+ _DIRECTORYSTRING_new %xdefine DISPLAYTEXT_free BORINGSSL_PREFIX %+ _DISPLAYTEXT_free %xdefine DISPLAYTEXT_it BORINGSSL_PREFIX %+ _DISPLAYTEXT_it %xdefine DISPLAYTEXT_new BORINGSSL_PREFIX %+ _DISPLAYTEXT_new %xdefine DIST_POINT_NAME_free BORINGSSL_PREFIX %+ _DIST_POINT_NAME_free %xdefine DIST_POINT_NAME_new BORINGSSL_PREFIX %+ _DIST_POINT_NAME_new %xdefine DIST_POINT_free BORINGSSL_PREFIX %+ _DIST_POINT_free %xdefine DIST_POINT_new BORINGSSL_PREFIX %+ _DIST_POINT_new %xdefine DIST_POINT_set_dpname BORINGSSL_PREFIX %+ _DIST_POINT_set_dpname %xdefine DSA_SIG_free BORINGSSL_PREFIX %+ _DSA_SIG_free %xdefine DSA_SIG_get0 BORINGSSL_PREFIX %+ _DSA_SIG_get0 %xdefine DSA_SIG_marshal BORINGSSL_PREFIX %+ _DSA_SIG_marshal %xdefine DSA_SIG_new BORINGSSL_PREFIX %+ _DSA_SIG_new %xdefine DSA_SIG_parse BORINGSSL_PREFIX %+ _DSA_SIG_parse %xdefine DSA_SIG_set0 BORINGSSL_PREFIX %+ _DSA_SIG_set0 %xdefine DSA_bits BORINGSSL_PREFIX %+ _DSA_bits %xdefine DSA_check_signature BORINGSSL_PREFIX %+ _DSA_check_signature %xdefine DSA_do_check_signature BORINGSSL_PREFIX %+ _DSA_do_check_signature %xdefine DSA_do_sign BORINGSSL_PREFIX %+ _DSA_do_sign %xdefine DSA_do_verify BORINGSSL_PREFIX %+ _DSA_do_verify %xdefine DSA_dup_DH BORINGSSL_PREFIX %+ _DSA_dup_DH %xdefine DSA_free BORINGSSL_PREFIX %+ _DSA_free %xdefine DSA_generate_key BORINGSSL_PREFIX %+ _DSA_generate_key %xdefine DSA_generate_parameters_ex BORINGSSL_PREFIX %+ _DSA_generate_parameters_ex %xdefine DSA_get0_g BORINGSSL_PREFIX %+ _DSA_get0_g %xdefine DSA_get0_key BORINGSSL_PREFIX %+ _DSA_get0_key %xdefine DSA_get0_p BORINGSSL_PREFIX %+ _DSA_get0_p %xdefine DSA_get0_pqg BORINGSSL_PREFIX %+ _DSA_get0_pqg %xdefine DSA_get0_priv_key BORINGSSL_PREFIX %+ _DSA_get0_priv_key %xdefine DSA_get0_pub_key BORINGSSL_PREFIX %+ _DSA_get0_pub_key %xdefine DSA_get0_q BORINGSSL_PREFIX %+ _DSA_get0_q %xdefine DSA_get_ex_data BORINGSSL_PREFIX %+ _DSA_get_ex_data %xdefine DSA_get_ex_new_index BORINGSSL_PREFIX %+ _DSA_get_ex_new_index %xdefine DSA_marshal_parameters BORINGSSL_PREFIX %+ _DSA_marshal_parameters %xdefine DSA_marshal_private_key BORINGSSL_PREFIX %+ _DSA_marshal_private_key %xdefine DSA_marshal_public_key BORINGSSL_PREFIX %+ _DSA_marshal_public_key %xdefine DSA_new BORINGSSL_PREFIX %+ _DSA_new %xdefine DSA_parse_parameters BORINGSSL_PREFIX %+ _DSA_parse_parameters %xdefine DSA_parse_private_key BORINGSSL_PREFIX %+ _DSA_parse_private_key %xdefine DSA_parse_public_key BORINGSSL_PREFIX %+ _DSA_parse_public_key %xdefine DSA_set0_key BORINGSSL_PREFIX %+ _DSA_set0_key %xdefine DSA_set0_pqg BORINGSSL_PREFIX %+ _DSA_set0_pqg %xdefine DSA_set_ex_data BORINGSSL_PREFIX %+ _DSA_set_ex_data %xdefine DSA_sign BORINGSSL_PREFIX %+ _DSA_sign %xdefine DSA_size BORINGSSL_PREFIX %+ _DSA_size %xdefine DSA_up_ref BORINGSSL_PREFIX %+ _DSA_up_ref %xdefine DSA_verify BORINGSSL_PREFIX %+ _DSA_verify %xdefine DSAparams_dup BORINGSSL_PREFIX %+ _DSAparams_dup %xdefine DTLS_client_method BORINGSSL_PREFIX %+ _DTLS_client_method %xdefine DTLS_method BORINGSSL_PREFIX %+ _DTLS_method %xdefine DTLS_server_method BORINGSSL_PREFIX %+ _DTLS_server_method %xdefine DTLS_with_buffers_method BORINGSSL_PREFIX %+ _DTLS_with_buffers_method %xdefine DTLSv1_2_client_method BORINGSSL_PREFIX %+ _DTLSv1_2_client_method %xdefine DTLSv1_2_method BORINGSSL_PREFIX %+ _DTLSv1_2_method %xdefine DTLSv1_2_server_method BORINGSSL_PREFIX %+ _DTLSv1_2_server_method %xdefine DTLSv1_client_method BORINGSSL_PREFIX %+ _DTLSv1_client_method %xdefine DTLSv1_get_timeout BORINGSSL_PREFIX %+ _DTLSv1_get_timeout %xdefine DTLSv1_handle_timeout BORINGSSL_PREFIX %+ _DTLSv1_handle_timeout %xdefine DTLSv1_method BORINGSSL_PREFIX %+ _DTLSv1_method %xdefine DTLSv1_server_method BORINGSSL_PREFIX %+ _DTLSv1_server_method %xdefine DTLSv1_set_initial_timeout_duration BORINGSSL_PREFIX %+ _DTLSv1_set_initial_timeout_duration %xdefine ECDH_compute_key BORINGSSL_PREFIX %+ _ECDH_compute_key %xdefine ECDH_compute_key_fips BORINGSSL_PREFIX %+ _ECDH_compute_key_fips %xdefine ECDSA_SIG_free BORINGSSL_PREFIX %+ _ECDSA_SIG_free %xdefine ECDSA_SIG_from_bytes BORINGSSL_PREFIX %+ _ECDSA_SIG_from_bytes %xdefine ECDSA_SIG_get0 BORINGSSL_PREFIX %+ _ECDSA_SIG_get0 %xdefine ECDSA_SIG_get0_r BORINGSSL_PREFIX %+ _ECDSA_SIG_get0_r %xdefine ECDSA_SIG_get0_s BORINGSSL_PREFIX %+ _ECDSA_SIG_get0_s %xdefine ECDSA_SIG_marshal BORINGSSL_PREFIX %+ _ECDSA_SIG_marshal %xdefine ECDSA_SIG_max_len BORINGSSL_PREFIX %+ _ECDSA_SIG_max_len %xdefine ECDSA_SIG_new BORINGSSL_PREFIX %+ _ECDSA_SIG_new %xdefine ECDSA_SIG_parse BORINGSSL_PREFIX %+ _ECDSA_SIG_parse %xdefine ECDSA_SIG_set0 BORINGSSL_PREFIX %+ _ECDSA_SIG_set0 %xdefine ECDSA_SIG_to_bytes BORINGSSL_PREFIX %+ _ECDSA_SIG_to_bytes %xdefine ECDSA_do_sign BORINGSSL_PREFIX %+ _ECDSA_do_sign %xdefine ECDSA_do_verify BORINGSSL_PREFIX %+ _ECDSA_do_verify %xdefine ECDSA_sign BORINGSSL_PREFIX %+ _ECDSA_sign %xdefine ECDSA_sign_with_nonce_and_leak_private_key_for_testing BORINGSSL_PREFIX %+ _ECDSA_sign_with_nonce_and_leak_private_key_for_testing %xdefine ECDSA_size BORINGSSL_PREFIX %+ _ECDSA_size %xdefine ECDSA_verify BORINGSSL_PREFIX %+ _ECDSA_verify %xdefine EC_GFp_mont_method BORINGSSL_PREFIX %+ _EC_GFp_mont_method %xdefine EC_GFp_nistp224_method BORINGSSL_PREFIX %+ _EC_GFp_nistp224_method %xdefine EC_GFp_nistp256_method BORINGSSL_PREFIX %+ _EC_GFp_nistp256_method %xdefine EC_GFp_nistz256_method BORINGSSL_PREFIX %+ _EC_GFp_nistz256_method %xdefine EC_GROUP_cmp BORINGSSL_PREFIX %+ _EC_GROUP_cmp %xdefine EC_GROUP_dup BORINGSSL_PREFIX %+ _EC_GROUP_dup %xdefine EC_GROUP_free BORINGSSL_PREFIX %+ _EC_GROUP_free %xdefine EC_GROUP_get0_generator BORINGSSL_PREFIX %+ _EC_GROUP_get0_generator %xdefine EC_GROUP_get0_order BORINGSSL_PREFIX %+ _EC_GROUP_get0_order %xdefine EC_GROUP_get_asn1_flag BORINGSSL_PREFIX %+ _EC_GROUP_get_asn1_flag %xdefine EC_GROUP_get_cofactor BORINGSSL_PREFIX %+ _EC_GROUP_get_cofactor %xdefine EC_GROUP_get_curve_GFp BORINGSSL_PREFIX %+ _EC_GROUP_get_curve_GFp %xdefine EC_GROUP_get_curve_name BORINGSSL_PREFIX %+ _EC_GROUP_get_curve_name %xdefine EC_GROUP_get_degree BORINGSSL_PREFIX %+ _EC_GROUP_get_degree %xdefine EC_GROUP_get_order BORINGSSL_PREFIX %+ _EC_GROUP_get_order %xdefine EC_GROUP_method_of BORINGSSL_PREFIX %+ _EC_GROUP_method_of %xdefine EC_GROUP_new_by_curve_name BORINGSSL_PREFIX %+ _EC_GROUP_new_by_curve_name %xdefine EC_GROUP_new_curve_GFp BORINGSSL_PREFIX %+ _EC_GROUP_new_curve_GFp %xdefine EC_GROUP_order_bits BORINGSSL_PREFIX %+ _EC_GROUP_order_bits %xdefine EC_GROUP_set_asn1_flag BORINGSSL_PREFIX %+ _EC_GROUP_set_asn1_flag %xdefine EC_GROUP_set_generator BORINGSSL_PREFIX %+ _EC_GROUP_set_generator %xdefine EC_GROUP_set_point_conversion_form BORINGSSL_PREFIX %+ _EC_GROUP_set_point_conversion_form %xdefine EC_KEY_check_fips BORINGSSL_PREFIX %+ _EC_KEY_check_fips %xdefine EC_KEY_check_key BORINGSSL_PREFIX %+ _EC_KEY_check_key %xdefine EC_KEY_derive_from_secret BORINGSSL_PREFIX %+ _EC_KEY_derive_from_secret %xdefine EC_KEY_dup BORINGSSL_PREFIX %+ _EC_KEY_dup %xdefine EC_KEY_free BORINGSSL_PREFIX %+ _EC_KEY_free %xdefine EC_KEY_generate_key BORINGSSL_PREFIX %+ _EC_KEY_generate_key %xdefine EC_KEY_generate_key_fips BORINGSSL_PREFIX %+ _EC_KEY_generate_key_fips %xdefine EC_KEY_get0_group BORINGSSL_PREFIX %+ _EC_KEY_get0_group %xdefine EC_KEY_get0_private_key BORINGSSL_PREFIX %+ _EC_KEY_get0_private_key %xdefine EC_KEY_get0_public_key BORINGSSL_PREFIX %+ _EC_KEY_get0_public_key %xdefine EC_KEY_get_conv_form BORINGSSL_PREFIX %+ _EC_KEY_get_conv_form %xdefine EC_KEY_get_enc_flags BORINGSSL_PREFIX %+ _EC_KEY_get_enc_flags %xdefine EC_KEY_get_ex_data BORINGSSL_PREFIX %+ _EC_KEY_get_ex_data %xdefine EC_KEY_get_ex_new_index BORINGSSL_PREFIX %+ _EC_KEY_get_ex_new_index %xdefine EC_KEY_is_opaque BORINGSSL_PREFIX %+ _EC_KEY_is_opaque %xdefine EC_KEY_key2buf BORINGSSL_PREFIX %+ _EC_KEY_key2buf %xdefine EC_KEY_marshal_curve_name BORINGSSL_PREFIX %+ _EC_KEY_marshal_curve_name %xdefine EC_KEY_marshal_private_key BORINGSSL_PREFIX %+ _EC_KEY_marshal_private_key %xdefine EC_KEY_new BORINGSSL_PREFIX %+ _EC_KEY_new %xdefine EC_KEY_new_by_curve_name BORINGSSL_PREFIX %+ _EC_KEY_new_by_curve_name %xdefine EC_KEY_new_method BORINGSSL_PREFIX %+ _EC_KEY_new_method %xdefine EC_KEY_oct2key BORINGSSL_PREFIX %+ _EC_KEY_oct2key %xdefine EC_KEY_oct2priv BORINGSSL_PREFIX %+ _EC_KEY_oct2priv %xdefine EC_KEY_parse_curve_name BORINGSSL_PREFIX %+ _EC_KEY_parse_curve_name %xdefine EC_KEY_parse_parameters BORINGSSL_PREFIX %+ _EC_KEY_parse_parameters %xdefine EC_KEY_parse_private_key BORINGSSL_PREFIX %+ _EC_KEY_parse_private_key %xdefine EC_KEY_priv2buf BORINGSSL_PREFIX %+ _EC_KEY_priv2buf %xdefine EC_KEY_priv2oct BORINGSSL_PREFIX %+ _EC_KEY_priv2oct %xdefine EC_KEY_set_asn1_flag BORINGSSL_PREFIX %+ _EC_KEY_set_asn1_flag %xdefine EC_KEY_set_conv_form BORINGSSL_PREFIX %+ _EC_KEY_set_conv_form %xdefine EC_KEY_set_enc_flags BORINGSSL_PREFIX %+ _EC_KEY_set_enc_flags %xdefine EC_KEY_set_ex_data BORINGSSL_PREFIX %+ _EC_KEY_set_ex_data %xdefine EC_KEY_set_group BORINGSSL_PREFIX %+ _EC_KEY_set_group %xdefine EC_KEY_set_private_key BORINGSSL_PREFIX %+ _EC_KEY_set_private_key %xdefine EC_KEY_set_public_key BORINGSSL_PREFIX %+ _EC_KEY_set_public_key %xdefine EC_KEY_set_public_key_affine_coordinates BORINGSSL_PREFIX %+ _EC_KEY_set_public_key_affine_coordinates %xdefine EC_KEY_up_ref BORINGSSL_PREFIX %+ _EC_KEY_up_ref %xdefine EC_METHOD_get_field_type BORINGSSL_PREFIX %+ _EC_METHOD_get_field_type %xdefine EC_POINT_add BORINGSSL_PREFIX %+ _EC_POINT_add %xdefine EC_POINT_clear_free BORINGSSL_PREFIX %+ _EC_POINT_clear_free %xdefine EC_POINT_cmp BORINGSSL_PREFIX %+ _EC_POINT_cmp %xdefine EC_POINT_copy BORINGSSL_PREFIX %+ _EC_POINT_copy %xdefine EC_POINT_dbl BORINGSSL_PREFIX %+ _EC_POINT_dbl %xdefine EC_POINT_dup BORINGSSL_PREFIX %+ _EC_POINT_dup %xdefine EC_POINT_free BORINGSSL_PREFIX %+ _EC_POINT_free %xdefine EC_POINT_get_affine_coordinates BORINGSSL_PREFIX %+ _EC_POINT_get_affine_coordinates %xdefine EC_POINT_get_affine_coordinates_GFp BORINGSSL_PREFIX %+ _EC_POINT_get_affine_coordinates_GFp %xdefine EC_POINT_invert BORINGSSL_PREFIX %+ _EC_POINT_invert %xdefine EC_POINT_is_at_infinity BORINGSSL_PREFIX %+ _EC_POINT_is_at_infinity %xdefine EC_POINT_is_on_curve BORINGSSL_PREFIX %+ _EC_POINT_is_on_curve %xdefine EC_POINT_mul BORINGSSL_PREFIX %+ _EC_POINT_mul %xdefine EC_POINT_new BORINGSSL_PREFIX %+ _EC_POINT_new %xdefine EC_POINT_oct2point BORINGSSL_PREFIX %+ _EC_POINT_oct2point %xdefine EC_POINT_point2buf BORINGSSL_PREFIX %+ _EC_POINT_point2buf %xdefine EC_POINT_point2cbb BORINGSSL_PREFIX %+ _EC_POINT_point2cbb %xdefine EC_POINT_point2oct BORINGSSL_PREFIX %+ _EC_POINT_point2oct %xdefine EC_POINT_set_affine_coordinates BORINGSSL_PREFIX %+ _EC_POINT_set_affine_coordinates %xdefine EC_POINT_set_affine_coordinates_GFp BORINGSSL_PREFIX %+ _EC_POINT_set_affine_coordinates_GFp %xdefine EC_POINT_set_compressed_coordinates_GFp BORINGSSL_PREFIX %+ _EC_POINT_set_compressed_coordinates_GFp %xdefine EC_POINT_set_to_infinity BORINGSSL_PREFIX %+ _EC_POINT_set_to_infinity %xdefine EC_curve_nid2nist BORINGSSL_PREFIX %+ _EC_curve_nid2nist %xdefine EC_curve_nist2nid BORINGSSL_PREFIX %+ _EC_curve_nist2nid %xdefine EC_get_builtin_curves BORINGSSL_PREFIX %+ _EC_get_builtin_curves %xdefine EC_group_p224 BORINGSSL_PREFIX %+ _EC_group_p224 %xdefine EC_group_p256 BORINGSSL_PREFIX %+ _EC_group_p256 %xdefine EC_group_p384 BORINGSSL_PREFIX %+ _EC_group_p384 %xdefine EC_group_p521 BORINGSSL_PREFIX %+ _EC_group_p521 %xdefine EC_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_PREFIX %+ _EC_hash_to_curve_p256_xmd_sha256_sswu %xdefine EC_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_PREFIX %+ _EC_hash_to_curve_p384_xmd_sha384_sswu %xdefine ED25519_keypair BORINGSSL_PREFIX %+ _ED25519_keypair %xdefine ED25519_keypair_from_seed BORINGSSL_PREFIX %+ _ED25519_keypair_from_seed %xdefine ED25519_sign BORINGSSL_PREFIX %+ _ED25519_sign %xdefine ED25519_verify BORINGSSL_PREFIX %+ _ED25519_verify %xdefine EDIPARTYNAME_free BORINGSSL_PREFIX %+ _EDIPARTYNAME_free %xdefine EDIPARTYNAME_new BORINGSSL_PREFIX %+ _EDIPARTYNAME_new %xdefine ENGINE_free BORINGSSL_PREFIX %+ _ENGINE_free %xdefine ENGINE_get_ECDSA_method BORINGSSL_PREFIX %+ _ENGINE_get_ECDSA_method %xdefine ENGINE_get_RSA_method BORINGSSL_PREFIX %+ _ENGINE_get_RSA_method %xdefine ENGINE_load_builtin_engines BORINGSSL_PREFIX %+ _ENGINE_load_builtin_engines %xdefine ENGINE_new BORINGSSL_PREFIX %+ _ENGINE_new %xdefine ENGINE_register_all_complete BORINGSSL_PREFIX %+ _ENGINE_register_all_complete %xdefine ENGINE_set_ECDSA_method BORINGSSL_PREFIX %+ _ENGINE_set_ECDSA_method %xdefine ENGINE_set_RSA_method BORINGSSL_PREFIX %+ _ENGINE_set_RSA_method %xdefine ERR_GET_LIB BORINGSSL_PREFIX %+ _ERR_GET_LIB %xdefine ERR_GET_REASON BORINGSSL_PREFIX %+ _ERR_GET_REASON %xdefine ERR_SAVE_STATE_free BORINGSSL_PREFIX %+ _ERR_SAVE_STATE_free %xdefine ERR_add_error_data BORINGSSL_PREFIX %+ _ERR_add_error_data %xdefine ERR_add_error_dataf BORINGSSL_PREFIX %+ _ERR_add_error_dataf %xdefine ERR_clear_error BORINGSSL_PREFIX %+ _ERR_clear_error %xdefine ERR_clear_system_error BORINGSSL_PREFIX %+ _ERR_clear_system_error %xdefine ERR_error_string BORINGSSL_PREFIX %+ _ERR_error_string %xdefine ERR_error_string_n BORINGSSL_PREFIX %+ _ERR_error_string_n %xdefine ERR_free_strings BORINGSSL_PREFIX %+ _ERR_free_strings %xdefine ERR_func_error_string BORINGSSL_PREFIX %+ _ERR_func_error_string %xdefine ERR_get_error BORINGSSL_PREFIX %+ _ERR_get_error %xdefine ERR_get_error_line BORINGSSL_PREFIX %+ _ERR_get_error_line %xdefine ERR_get_error_line_data BORINGSSL_PREFIX %+ _ERR_get_error_line_data %xdefine ERR_get_next_error_library BORINGSSL_PREFIX %+ _ERR_get_next_error_library %xdefine ERR_lib_error_string BORINGSSL_PREFIX %+ _ERR_lib_error_string %xdefine ERR_lib_symbol_name BORINGSSL_PREFIX %+ _ERR_lib_symbol_name %xdefine ERR_load_BIO_strings BORINGSSL_PREFIX %+ _ERR_load_BIO_strings %xdefine ERR_load_ERR_strings BORINGSSL_PREFIX %+ _ERR_load_ERR_strings %xdefine ERR_load_RAND_strings BORINGSSL_PREFIX %+ _ERR_load_RAND_strings %xdefine ERR_load_SSL_strings BORINGSSL_PREFIX %+ _ERR_load_SSL_strings %xdefine ERR_load_crypto_strings BORINGSSL_PREFIX %+ _ERR_load_crypto_strings %xdefine ERR_peek_error BORINGSSL_PREFIX %+ _ERR_peek_error %xdefine ERR_peek_error_line BORINGSSL_PREFIX %+ _ERR_peek_error_line %xdefine ERR_peek_error_line_data BORINGSSL_PREFIX %+ _ERR_peek_error_line_data %xdefine ERR_peek_last_error BORINGSSL_PREFIX %+ _ERR_peek_last_error %xdefine ERR_peek_last_error_line BORINGSSL_PREFIX %+ _ERR_peek_last_error_line %xdefine ERR_peek_last_error_line_data BORINGSSL_PREFIX %+ _ERR_peek_last_error_line_data %xdefine ERR_pop_to_mark BORINGSSL_PREFIX %+ _ERR_pop_to_mark %xdefine ERR_print_errors BORINGSSL_PREFIX %+ _ERR_print_errors %xdefine ERR_print_errors_cb BORINGSSL_PREFIX %+ _ERR_print_errors_cb %xdefine ERR_print_errors_fp BORINGSSL_PREFIX %+ _ERR_print_errors_fp %xdefine ERR_put_error BORINGSSL_PREFIX %+ _ERR_put_error %xdefine ERR_reason_error_string BORINGSSL_PREFIX %+ _ERR_reason_error_string %xdefine ERR_reason_symbol_name BORINGSSL_PREFIX %+ _ERR_reason_symbol_name %xdefine ERR_remove_state BORINGSSL_PREFIX %+ _ERR_remove_state %xdefine ERR_remove_thread_state BORINGSSL_PREFIX %+ _ERR_remove_thread_state %xdefine ERR_restore_state BORINGSSL_PREFIX %+ _ERR_restore_state %xdefine ERR_save_state BORINGSSL_PREFIX %+ _ERR_save_state %xdefine ERR_set_error_data BORINGSSL_PREFIX %+ _ERR_set_error_data %xdefine ERR_set_mark BORINGSSL_PREFIX %+ _ERR_set_mark %xdefine EVP_AEAD_CTX_aead BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_aead %xdefine EVP_AEAD_CTX_cleanup BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_cleanup %xdefine EVP_AEAD_CTX_free BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_free %xdefine EVP_AEAD_CTX_get_iv BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_get_iv %xdefine EVP_AEAD_CTX_init BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_init %xdefine EVP_AEAD_CTX_init_with_direction BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_init_with_direction %xdefine EVP_AEAD_CTX_new BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_new %xdefine EVP_AEAD_CTX_open BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_open %xdefine EVP_AEAD_CTX_open_gather BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_open_gather %xdefine EVP_AEAD_CTX_seal BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_seal %xdefine EVP_AEAD_CTX_seal_scatter BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_seal_scatter %xdefine EVP_AEAD_CTX_tag_len BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_tag_len %xdefine EVP_AEAD_CTX_zero BORINGSSL_PREFIX %+ _EVP_AEAD_CTX_zero %xdefine EVP_AEAD_key_length BORINGSSL_PREFIX %+ _EVP_AEAD_key_length %xdefine EVP_AEAD_max_overhead BORINGSSL_PREFIX %+ _EVP_AEAD_max_overhead %xdefine EVP_AEAD_max_tag_len BORINGSSL_PREFIX %+ _EVP_AEAD_max_tag_len %xdefine EVP_AEAD_nonce_length BORINGSSL_PREFIX %+ _EVP_AEAD_nonce_length %xdefine EVP_BytesToKey BORINGSSL_PREFIX %+ _EVP_BytesToKey %xdefine EVP_CIPHER_CTX_block_size BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_block_size %xdefine EVP_CIPHER_CTX_cipher BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cipher %xdefine EVP_CIPHER_CTX_cleanup BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cleanup %xdefine EVP_CIPHER_CTX_copy BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_copy %xdefine EVP_CIPHER_CTX_ctrl BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_ctrl %xdefine EVP_CIPHER_CTX_encrypting BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_encrypting %xdefine EVP_CIPHER_CTX_flags BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_flags %xdefine EVP_CIPHER_CTX_free BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_free %xdefine EVP_CIPHER_CTX_get_app_data BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_get_app_data %xdefine EVP_CIPHER_CTX_init BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_init %xdefine EVP_CIPHER_CTX_iv_length BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_iv_length %xdefine EVP_CIPHER_CTX_key_length BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_key_length %xdefine EVP_CIPHER_CTX_mode BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_mode %xdefine EVP_CIPHER_CTX_new BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_new %xdefine EVP_CIPHER_CTX_nid BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_nid %xdefine EVP_CIPHER_CTX_reset BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_reset %xdefine EVP_CIPHER_CTX_set_app_data BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_app_data %xdefine EVP_CIPHER_CTX_set_flags BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_flags %xdefine EVP_CIPHER_CTX_set_key_length BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_key_length %xdefine EVP_CIPHER_CTX_set_padding BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_set_padding %xdefine EVP_CIPHER_block_size BORINGSSL_PREFIX %+ _EVP_CIPHER_block_size %xdefine EVP_CIPHER_flags BORINGSSL_PREFIX %+ _EVP_CIPHER_flags %xdefine EVP_CIPHER_iv_length BORINGSSL_PREFIX %+ _EVP_CIPHER_iv_length %xdefine EVP_CIPHER_key_length BORINGSSL_PREFIX %+ _EVP_CIPHER_key_length %xdefine EVP_CIPHER_mode BORINGSSL_PREFIX %+ _EVP_CIPHER_mode %xdefine EVP_CIPHER_nid BORINGSSL_PREFIX %+ _EVP_CIPHER_nid %xdefine EVP_Cipher BORINGSSL_PREFIX %+ _EVP_Cipher %xdefine EVP_CipherFinal BORINGSSL_PREFIX %+ _EVP_CipherFinal %xdefine EVP_CipherFinal_ex BORINGSSL_PREFIX %+ _EVP_CipherFinal_ex %xdefine EVP_CipherInit BORINGSSL_PREFIX %+ _EVP_CipherInit %xdefine EVP_CipherInit_ex BORINGSSL_PREFIX %+ _EVP_CipherInit_ex %xdefine EVP_CipherUpdate BORINGSSL_PREFIX %+ _EVP_CipherUpdate %xdefine EVP_DecodeBase64 BORINGSSL_PREFIX %+ _EVP_DecodeBase64 %xdefine EVP_DecodeBlock BORINGSSL_PREFIX %+ _EVP_DecodeBlock %xdefine EVP_DecodeFinal BORINGSSL_PREFIX %+ _EVP_DecodeFinal %xdefine EVP_DecodeInit BORINGSSL_PREFIX %+ _EVP_DecodeInit %xdefine EVP_DecodeUpdate BORINGSSL_PREFIX %+ _EVP_DecodeUpdate %xdefine EVP_DecodedLength BORINGSSL_PREFIX %+ _EVP_DecodedLength %xdefine EVP_DecryptFinal BORINGSSL_PREFIX %+ _EVP_DecryptFinal %xdefine EVP_DecryptFinal_ex BORINGSSL_PREFIX %+ _EVP_DecryptFinal_ex %xdefine EVP_DecryptInit BORINGSSL_PREFIX %+ _EVP_DecryptInit %xdefine EVP_DecryptInit_ex BORINGSSL_PREFIX %+ _EVP_DecryptInit_ex %xdefine EVP_DecryptUpdate BORINGSSL_PREFIX %+ _EVP_DecryptUpdate %xdefine EVP_Digest BORINGSSL_PREFIX %+ _EVP_Digest %xdefine EVP_DigestFinal BORINGSSL_PREFIX %+ _EVP_DigestFinal %xdefine EVP_DigestFinalXOF BORINGSSL_PREFIX %+ _EVP_DigestFinalXOF %xdefine EVP_DigestFinal_ex BORINGSSL_PREFIX %+ _EVP_DigestFinal_ex %xdefine EVP_DigestInit BORINGSSL_PREFIX %+ _EVP_DigestInit %xdefine EVP_DigestInit_ex BORINGSSL_PREFIX %+ _EVP_DigestInit_ex %xdefine EVP_DigestSign BORINGSSL_PREFIX %+ _EVP_DigestSign %xdefine EVP_DigestSignFinal BORINGSSL_PREFIX %+ _EVP_DigestSignFinal %xdefine EVP_DigestSignInit BORINGSSL_PREFIX %+ _EVP_DigestSignInit %xdefine EVP_DigestSignUpdate BORINGSSL_PREFIX %+ _EVP_DigestSignUpdate %xdefine EVP_DigestUpdate BORINGSSL_PREFIX %+ _EVP_DigestUpdate %xdefine EVP_DigestVerify BORINGSSL_PREFIX %+ _EVP_DigestVerify %xdefine EVP_DigestVerifyFinal BORINGSSL_PREFIX %+ _EVP_DigestVerifyFinal %xdefine EVP_DigestVerifyInit BORINGSSL_PREFIX %+ _EVP_DigestVerifyInit %xdefine EVP_DigestVerifyUpdate BORINGSSL_PREFIX %+ _EVP_DigestVerifyUpdate %xdefine EVP_ENCODE_CTX_free BORINGSSL_PREFIX %+ _EVP_ENCODE_CTX_free %xdefine EVP_ENCODE_CTX_new BORINGSSL_PREFIX %+ _EVP_ENCODE_CTX_new %xdefine EVP_EncodeBlock BORINGSSL_PREFIX %+ _EVP_EncodeBlock %xdefine EVP_EncodeFinal BORINGSSL_PREFIX %+ _EVP_EncodeFinal %xdefine EVP_EncodeInit BORINGSSL_PREFIX %+ _EVP_EncodeInit %xdefine EVP_EncodeUpdate BORINGSSL_PREFIX %+ _EVP_EncodeUpdate %xdefine EVP_EncodedLength BORINGSSL_PREFIX %+ _EVP_EncodedLength %xdefine EVP_EncryptFinal BORINGSSL_PREFIX %+ _EVP_EncryptFinal %xdefine EVP_EncryptFinal_ex BORINGSSL_PREFIX %+ _EVP_EncryptFinal_ex %xdefine EVP_EncryptInit BORINGSSL_PREFIX %+ _EVP_EncryptInit %xdefine EVP_EncryptInit_ex BORINGSSL_PREFIX %+ _EVP_EncryptInit_ex %xdefine EVP_EncryptUpdate BORINGSSL_PREFIX %+ _EVP_EncryptUpdate %xdefine EVP_HPKE_AEAD_aead BORINGSSL_PREFIX %+ _EVP_HPKE_AEAD_aead %xdefine EVP_HPKE_AEAD_id BORINGSSL_PREFIX %+ _EVP_HPKE_AEAD_id %xdefine EVP_HPKE_CTX_aead BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_aead %xdefine EVP_HPKE_CTX_cleanup BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_cleanup %xdefine EVP_HPKE_CTX_export BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_export %xdefine EVP_HPKE_CTX_free BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_free %xdefine EVP_HPKE_CTX_kdf BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_kdf %xdefine EVP_HPKE_CTX_kem BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_kem %xdefine EVP_HPKE_CTX_max_overhead BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_max_overhead %xdefine EVP_HPKE_CTX_new BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_new %xdefine EVP_HPKE_CTX_open BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_open %xdefine EVP_HPKE_CTX_seal BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_seal %xdefine EVP_HPKE_CTX_setup_auth_recipient BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_recipient %xdefine EVP_HPKE_CTX_setup_auth_sender BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_sender %xdefine EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_auth_sender_with_seed_for_testing %xdefine EVP_HPKE_CTX_setup_recipient BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_recipient %xdefine EVP_HPKE_CTX_setup_sender BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_sender %xdefine EVP_HPKE_CTX_setup_sender_with_seed_for_testing BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_setup_sender_with_seed_for_testing %xdefine EVP_HPKE_CTX_zero BORINGSSL_PREFIX %+ _EVP_HPKE_CTX_zero %xdefine EVP_HPKE_KDF_hkdf_md BORINGSSL_PREFIX %+ _EVP_HPKE_KDF_hkdf_md %xdefine EVP_HPKE_KDF_id BORINGSSL_PREFIX %+ _EVP_HPKE_KDF_id %xdefine EVP_HPKE_KEM_enc_len BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_enc_len %xdefine EVP_HPKE_KEM_id BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_id %xdefine EVP_HPKE_KEM_private_key_len BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_private_key_len %xdefine EVP_HPKE_KEM_public_key_len BORINGSSL_PREFIX %+ _EVP_HPKE_KEM_public_key_len %xdefine EVP_HPKE_KEY_cleanup BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_cleanup %xdefine EVP_HPKE_KEY_copy BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_copy %xdefine EVP_HPKE_KEY_free BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_free %xdefine EVP_HPKE_KEY_generate BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_generate %xdefine EVP_HPKE_KEY_init BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_init %xdefine EVP_HPKE_KEY_kem BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_kem %xdefine EVP_HPKE_KEY_move BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_move %xdefine EVP_HPKE_KEY_new BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_new %xdefine EVP_HPKE_KEY_private_key BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_private_key %xdefine EVP_HPKE_KEY_public_key BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_public_key %xdefine EVP_HPKE_KEY_zero BORINGSSL_PREFIX %+ _EVP_HPKE_KEY_zero %xdefine EVP_MD_CTX_block_size BORINGSSL_PREFIX %+ _EVP_MD_CTX_block_size %xdefine EVP_MD_CTX_cleanse BORINGSSL_PREFIX %+ _EVP_MD_CTX_cleanse %xdefine EVP_MD_CTX_cleanup BORINGSSL_PREFIX %+ _EVP_MD_CTX_cleanup %xdefine EVP_MD_CTX_copy BORINGSSL_PREFIX %+ _EVP_MD_CTX_copy %xdefine EVP_MD_CTX_copy_ex BORINGSSL_PREFIX %+ _EVP_MD_CTX_copy_ex %xdefine EVP_MD_CTX_create BORINGSSL_PREFIX %+ _EVP_MD_CTX_create %xdefine EVP_MD_CTX_destroy BORINGSSL_PREFIX %+ _EVP_MD_CTX_destroy %xdefine EVP_MD_CTX_free BORINGSSL_PREFIX %+ _EVP_MD_CTX_free %xdefine EVP_MD_CTX_get0_md BORINGSSL_PREFIX %+ _EVP_MD_CTX_get0_md %xdefine EVP_MD_CTX_init BORINGSSL_PREFIX %+ _EVP_MD_CTX_init %xdefine EVP_MD_CTX_md BORINGSSL_PREFIX %+ _EVP_MD_CTX_md %xdefine EVP_MD_CTX_move BORINGSSL_PREFIX %+ _EVP_MD_CTX_move %xdefine EVP_MD_CTX_new BORINGSSL_PREFIX %+ _EVP_MD_CTX_new %xdefine EVP_MD_CTX_reset BORINGSSL_PREFIX %+ _EVP_MD_CTX_reset %xdefine EVP_MD_CTX_set_flags BORINGSSL_PREFIX %+ _EVP_MD_CTX_set_flags %xdefine EVP_MD_CTX_size BORINGSSL_PREFIX %+ _EVP_MD_CTX_size %xdefine EVP_MD_CTX_type BORINGSSL_PREFIX %+ _EVP_MD_CTX_type %xdefine EVP_MD_block_size BORINGSSL_PREFIX %+ _EVP_MD_block_size %xdefine EVP_MD_flags BORINGSSL_PREFIX %+ _EVP_MD_flags %xdefine EVP_MD_meth_get_flags BORINGSSL_PREFIX %+ _EVP_MD_meth_get_flags %xdefine EVP_MD_nid BORINGSSL_PREFIX %+ _EVP_MD_nid %xdefine EVP_MD_size BORINGSSL_PREFIX %+ _EVP_MD_size %xdefine EVP_MD_type BORINGSSL_PREFIX %+ _EVP_MD_type %xdefine EVP_PBE_scrypt BORINGSSL_PREFIX %+ _EVP_PBE_scrypt %xdefine EVP_PKCS82PKEY BORINGSSL_PREFIX %+ _EVP_PKCS82PKEY %xdefine EVP_PKEY2PKCS8 BORINGSSL_PREFIX %+ _EVP_PKEY2PKCS8 %xdefine EVP_PKEY_CTX_add1_hkdf_info BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_add1_hkdf_info %xdefine EVP_PKEY_CTX_ctrl BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_ctrl %xdefine EVP_PKEY_CTX_dup BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_dup %xdefine EVP_PKEY_CTX_free BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_free %xdefine EVP_PKEY_CTX_get0_pkey BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get0_pkey %xdefine EVP_PKEY_CTX_get0_rsa_oaep_label BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get0_rsa_oaep_label %xdefine EVP_PKEY_CTX_get_rsa_mgf1_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_mgf1_md %xdefine EVP_PKEY_CTX_get_rsa_oaep_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_oaep_md %xdefine EVP_PKEY_CTX_get_rsa_padding BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_padding %xdefine EVP_PKEY_CTX_get_rsa_pss_saltlen BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_rsa_pss_saltlen %xdefine EVP_PKEY_CTX_get_signature_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_get_signature_md %xdefine EVP_PKEY_CTX_hkdf_mode BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_hkdf_mode %xdefine EVP_PKEY_CTX_new BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_new %xdefine EVP_PKEY_CTX_new_id BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_new_id %xdefine EVP_PKEY_CTX_set0_rsa_oaep_label BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set0_rsa_oaep_label %xdefine EVP_PKEY_CTX_set1_hkdf_key BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set1_hkdf_key %xdefine EVP_PKEY_CTX_set1_hkdf_salt BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set1_hkdf_salt %xdefine EVP_PKEY_CTX_set_dh_pad BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dh_pad %xdefine EVP_PKEY_CTX_set_dsa_paramgen_bits BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dsa_paramgen_bits %xdefine EVP_PKEY_CTX_set_dsa_paramgen_q_bits BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_dsa_paramgen_q_bits %xdefine EVP_PKEY_CTX_set_ec_param_enc BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_ec_param_enc %xdefine EVP_PKEY_CTX_set_ec_paramgen_curve_nid BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_ec_paramgen_curve_nid %xdefine EVP_PKEY_CTX_set_hkdf_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_hkdf_md %xdefine EVP_PKEY_CTX_set_rsa_keygen_bits BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_keygen_bits %xdefine EVP_PKEY_CTX_set_rsa_keygen_pubexp BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_keygen_pubexp %xdefine EVP_PKEY_CTX_set_rsa_mgf1_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_mgf1_md %xdefine EVP_PKEY_CTX_set_rsa_oaep_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_oaep_md %xdefine EVP_PKEY_CTX_set_rsa_padding BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_padding %xdefine EVP_PKEY_CTX_set_rsa_pss_keygen_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_md %xdefine EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md %xdefine EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen %xdefine EVP_PKEY_CTX_set_rsa_pss_saltlen BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_rsa_pss_saltlen %xdefine EVP_PKEY_CTX_set_signature_md BORINGSSL_PREFIX %+ _EVP_PKEY_CTX_set_signature_md %xdefine EVP_PKEY_assign BORINGSSL_PREFIX %+ _EVP_PKEY_assign %xdefine EVP_PKEY_assign_DH BORINGSSL_PREFIX %+ _EVP_PKEY_assign_DH %xdefine EVP_PKEY_assign_DSA BORINGSSL_PREFIX %+ _EVP_PKEY_assign_DSA %xdefine EVP_PKEY_assign_EC_KEY BORINGSSL_PREFIX %+ _EVP_PKEY_assign_EC_KEY %xdefine EVP_PKEY_assign_RSA BORINGSSL_PREFIX %+ _EVP_PKEY_assign_RSA %xdefine EVP_PKEY_base_id BORINGSSL_PREFIX %+ _EVP_PKEY_base_id %xdefine EVP_PKEY_bits BORINGSSL_PREFIX %+ _EVP_PKEY_bits %xdefine EVP_PKEY_cmp BORINGSSL_PREFIX %+ _EVP_PKEY_cmp %xdefine EVP_PKEY_cmp_parameters BORINGSSL_PREFIX %+ _EVP_PKEY_cmp_parameters %xdefine EVP_PKEY_copy_parameters BORINGSSL_PREFIX %+ _EVP_PKEY_copy_parameters %xdefine EVP_PKEY_decrypt BORINGSSL_PREFIX %+ _EVP_PKEY_decrypt %xdefine EVP_PKEY_decrypt_init BORINGSSL_PREFIX %+ _EVP_PKEY_decrypt_init %xdefine EVP_PKEY_derive BORINGSSL_PREFIX %+ _EVP_PKEY_derive %xdefine EVP_PKEY_derive_init BORINGSSL_PREFIX %+ _EVP_PKEY_derive_init %xdefine EVP_PKEY_derive_set_peer BORINGSSL_PREFIX %+ _EVP_PKEY_derive_set_peer %xdefine EVP_PKEY_encrypt BORINGSSL_PREFIX %+ _EVP_PKEY_encrypt %xdefine EVP_PKEY_encrypt_init BORINGSSL_PREFIX %+ _EVP_PKEY_encrypt_init %xdefine EVP_PKEY_free BORINGSSL_PREFIX %+ _EVP_PKEY_free %xdefine EVP_PKEY_get0 BORINGSSL_PREFIX %+ _EVP_PKEY_get0 %xdefine EVP_PKEY_get0_DH BORINGSSL_PREFIX %+ _EVP_PKEY_get0_DH %xdefine EVP_PKEY_get0_DSA BORINGSSL_PREFIX %+ _EVP_PKEY_get0_DSA %xdefine EVP_PKEY_get0_EC_KEY BORINGSSL_PREFIX %+ _EVP_PKEY_get0_EC_KEY %xdefine EVP_PKEY_get0_RSA BORINGSSL_PREFIX %+ _EVP_PKEY_get0_RSA %xdefine EVP_PKEY_get1_DH BORINGSSL_PREFIX %+ _EVP_PKEY_get1_DH %xdefine EVP_PKEY_get1_DSA BORINGSSL_PREFIX %+ _EVP_PKEY_get1_DSA %xdefine EVP_PKEY_get1_EC_KEY BORINGSSL_PREFIX %+ _EVP_PKEY_get1_EC_KEY %xdefine EVP_PKEY_get1_RSA BORINGSSL_PREFIX %+ _EVP_PKEY_get1_RSA %xdefine EVP_PKEY_get1_tls_encodedpoint BORINGSSL_PREFIX %+ _EVP_PKEY_get1_tls_encodedpoint %xdefine EVP_PKEY_get_raw_private_key BORINGSSL_PREFIX %+ _EVP_PKEY_get_raw_private_key %xdefine EVP_PKEY_get_raw_public_key BORINGSSL_PREFIX %+ _EVP_PKEY_get_raw_public_key %xdefine EVP_PKEY_id BORINGSSL_PREFIX %+ _EVP_PKEY_id %xdefine EVP_PKEY_is_opaque BORINGSSL_PREFIX %+ _EVP_PKEY_is_opaque %xdefine EVP_PKEY_keygen BORINGSSL_PREFIX %+ _EVP_PKEY_keygen %xdefine EVP_PKEY_keygen_init BORINGSSL_PREFIX %+ _EVP_PKEY_keygen_init %xdefine EVP_PKEY_missing_parameters BORINGSSL_PREFIX %+ _EVP_PKEY_missing_parameters %xdefine EVP_PKEY_new BORINGSSL_PREFIX %+ _EVP_PKEY_new %xdefine EVP_PKEY_new_raw_private_key BORINGSSL_PREFIX %+ _EVP_PKEY_new_raw_private_key %xdefine EVP_PKEY_new_raw_public_key BORINGSSL_PREFIX %+ _EVP_PKEY_new_raw_public_key %xdefine EVP_PKEY_paramgen BORINGSSL_PREFIX %+ _EVP_PKEY_paramgen %xdefine EVP_PKEY_paramgen_init BORINGSSL_PREFIX %+ _EVP_PKEY_paramgen_init %xdefine EVP_PKEY_print_params BORINGSSL_PREFIX %+ _EVP_PKEY_print_params %xdefine EVP_PKEY_print_private BORINGSSL_PREFIX %+ _EVP_PKEY_print_private %xdefine EVP_PKEY_print_public BORINGSSL_PREFIX %+ _EVP_PKEY_print_public %xdefine EVP_PKEY_set1_DH BORINGSSL_PREFIX %+ _EVP_PKEY_set1_DH %xdefine EVP_PKEY_set1_DSA BORINGSSL_PREFIX %+ _EVP_PKEY_set1_DSA %xdefine EVP_PKEY_set1_EC_KEY BORINGSSL_PREFIX %+ _EVP_PKEY_set1_EC_KEY %xdefine EVP_PKEY_set1_RSA BORINGSSL_PREFIX %+ _EVP_PKEY_set1_RSA %xdefine EVP_PKEY_set1_tls_encodedpoint BORINGSSL_PREFIX %+ _EVP_PKEY_set1_tls_encodedpoint %xdefine EVP_PKEY_set_type BORINGSSL_PREFIX %+ _EVP_PKEY_set_type %xdefine EVP_PKEY_sign BORINGSSL_PREFIX %+ _EVP_PKEY_sign %xdefine EVP_PKEY_sign_init BORINGSSL_PREFIX %+ _EVP_PKEY_sign_init %xdefine EVP_PKEY_size BORINGSSL_PREFIX %+ _EVP_PKEY_size %xdefine EVP_PKEY_type BORINGSSL_PREFIX %+ _EVP_PKEY_type %xdefine EVP_PKEY_up_ref BORINGSSL_PREFIX %+ _EVP_PKEY_up_ref %xdefine EVP_PKEY_verify BORINGSSL_PREFIX %+ _EVP_PKEY_verify %xdefine EVP_PKEY_verify_init BORINGSSL_PREFIX %+ _EVP_PKEY_verify_init %xdefine EVP_PKEY_verify_recover BORINGSSL_PREFIX %+ _EVP_PKEY_verify_recover %xdefine EVP_PKEY_verify_recover_init BORINGSSL_PREFIX %+ _EVP_PKEY_verify_recover_init %xdefine EVP_SignFinal BORINGSSL_PREFIX %+ _EVP_SignFinal %xdefine EVP_SignInit BORINGSSL_PREFIX %+ _EVP_SignInit %xdefine EVP_SignInit_ex BORINGSSL_PREFIX %+ _EVP_SignInit_ex %xdefine EVP_SignUpdate BORINGSSL_PREFIX %+ _EVP_SignUpdate %xdefine EVP_VerifyFinal BORINGSSL_PREFIX %+ _EVP_VerifyFinal %xdefine EVP_VerifyInit BORINGSSL_PREFIX %+ _EVP_VerifyInit %xdefine EVP_VerifyInit_ex BORINGSSL_PREFIX %+ _EVP_VerifyInit_ex %xdefine EVP_VerifyUpdate BORINGSSL_PREFIX %+ _EVP_VerifyUpdate %xdefine EVP_add_cipher_alias BORINGSSL_PREFIX %+ _EVP_add_cipher_alias %xdefine EVP_add_digest BORINGSSL_PREFIX %+ _EVP_add_digest %xdefine EVP_aead_aes_128_cbc_sha1_tls BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha1_tls %xdefine EVP_aead_aes_128_cbc_sha1_tls_implicit_iv BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha1_tls_implicit_iv %xdefine EVP_aead_aes_128_cbc_sha256_tls BORINGSSL_PREFIX %+ _EVP_aead_aes_128_cbc_sha256_tls %xdefine EVP_aead_aes_128_ccm_bluetooth BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_bluetooth %xdefine EVP_aead_aes_128_ccm_bluetooth_8 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_bluetooth_8 %xdefine EVP_aead_aes_128_ccm_matter BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ccm_matter %xdefine EVP_aead_aes_128_ctr_hmac_sha256 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_ctr_hmac_sha256 %xdefine EVP_aead_aes_128_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm %xdefine EVP_aead_aes_128_gcm_randnonce BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_randnonce %xdefine EVP_aead_aes_128_gcm_siv BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_siv %xdefine EVP_aead_aes_128_gcm_tls12 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls12 %xdefine EVP_aead_aes_128_gcm_tls13 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls13 %xdefine EVP_aead_aes_192_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_192_gcm %xdefine EVP_aead_aes_256_cbc_sha1_tls BORINGSSL_PREFIX %+ _EVP_aead_aes_256_cbc_sha1_tls %xdefine EVP_aead_aes_256_cbc_sha1_tls_implicit_iv BORINGSSL_PREFIX %+ _EVP_aead_aes_256_cbc_sha1_tls_implicit_iv %xdefine EVP_aead_aes_256_ctr_hmac_sha256 BORINGSSL_PREFIX %+ _EVP_aead_aes_256_ctr_hmac_sha256 %xdefine EVP_aead_aes_256_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm %xdefine EVP_aead_aes_256_gcm_randnonce BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_randnonce %xdefine EVP_aead_aes_256_gcm_siv BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_siv %xdefine EVP_aead_aes_256_gcm_tls12 BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls12 %xdefine EVP_aead_aes_256_gcm_tls13 BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls13 %xdefine EVP_aead_chacha20_poly1305 BORINGSSL_PREFIX %+ _EVP_aead_chacha20_poly1305 %xdefine EVP_aead_des_ede3_cbc_sha1_tls BORINGSSL_PREFIX %+ _EVP_aead_des_ede3_cbc_sha1_tls %xdefine EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv BORINGSSL_PREFIX %+ _EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv %xdefine EVP_aead_xchacha20_poly1305 BORINGSSL_PREFIX %+ _EVP_aead_xchacha20_poly1305 %xdefine EVP_aes_128_cbc BORINGSSL_PREFIX %+ _EVP_aes_128_cbc %xdefine EVP_aes_128_ctr BORINGSSL_PREFIX %+ _EVP_aes_128_ctr %xdefine EVP_aes_128_ecb BORINGSSL_PREFIX %+ _EVP_aes_128_ecb %xdefine EVP_aes_128_gcm BORINGSSL_PREFIX %+ _EVP_aes_128_gcm %xdefine EVP_aes_128_ofb BORINGSSL_PREFIX %+ _EVP_aes_128_ofb %xdefine EVP_aes_192_cbc BORINGSSL_PREFIX %+ _EVP_aes_192_cbc %xdefine EVP_aes_192_ctr BORINGSSL_PREFIX %+ _EVP_aes_192_ctr %xdefine EVP_aes_192_ecb BORINGSSL_PREFIX %+ _EVP_aes_192_ecb %xdefine EVP_aes_192_gcm BORINGSSL_PREFIX %+ _EVP_aes_192_gcm %xdefine EVP_aes_192_ofb BORINGSSL_PREFIX %+ _EVP_aes_192_ofb %xdefine EVP_aes_256_cbc BORINGSSL_PREFIX %+ _EVP_aes_256_cbc %xdefine EVP_aes_256_ctr BORINGSSL_PREFIX %+ _EVP_aes_256_ctr %xdefine EVP_aes_256_ecb BORINGSSL_PREFIX %+ _EVP_aes_256_ecb %xdefine EVP_aes_256_gcm BORINGSSL_PREFIX %+ _EVP_aes_256_gcm %xdefine EVP_aes_256_ofb BORINGSSL_PREFIX %+ _EVP_aes_256_ofb %xdefine EVP_blake2b256 BORINGSSL_PREFIX %+ _EVP_blake2b256 %xdefine EVP_cleanup BORINGSSL_PREFIX %+ _EVP_cleanup %xdefine EVP_des_cbc BORINGSSL_PREFIX %+ _EVP_des_cbc %xdefine EVP_des_ecb BORINGSSL_PREFIX %+ _EVP_des_ecb %xdefine EVP_des_ede BORINGSSL_PREFIX %+ _EVP_des_ede %xdefine EVP_des_ede3 BORINGSSL_PREFIX %+ _EVP_des_ede3 %xdefine EVP_des_ede3_cbc BORINGSSL_PREFIX %+ _EVP_des_ede3_cbc %xdefine EVP_des_ede3_ecb BORINGSSL_PREFIX %+ _EVP_des_ede3_ecb %xdefine EVP_des_ede_cbc BORINGSSL_PREFIX %+ _EVP_des_ede_cbc %xdefine EVP_enc_null BORINGSSL_PREFIX %+ _EVP_enc_null %xdefine EVP_get_cipherbyname BORINGSSL_PREFIX %+ _EVP_get_cipherbyname %xdefine EVP_get_cipherbynid BORINGSSL_PREFIX %+ _EVP_get_cipherbynid %xdefine EVP_get_digestbyname BORINGSSL_PREFIX %+ _EVP_get_digestbyname %xdefine EVP_get_digestbynid BORINGSSL_PREFIX %+ _EVP_get_digestbynid %xdefine EVP_get_digestbyobj BORINGSSL_PREFIX %+ _EVP_get_digestbyobj %xdefine EVP_has_aes_hardware BORINGSSL_PREFIX %+ _EVP_has_aes_hardware %xdefine EVP_hpke_aes_128_gcm BORINGSSL_PREFIX %+ _EVP_hpke_aes_128_gcm %xdefine EVP_hpke_aes_256_gcm BORINGSSL_PREFIX %+ _EVP_hpke_aes_256_gcm %xdefine EVP_hpke_chacha20_poly1305 BORINGSSL_PREFIX %+ _EVP_hpke_chacha20_poly1305 %xdefine EVP_hpke_hkdf_sha256 BORINGSSL_PREFIX %+ _EVP_hpke_hkdf_sha256 %xdefine EVP_hpke_p256_hkdf_sha256 BORINGSSL_PREFIX %+ _EVP_hpke_p256_hkdf_sha256 %xdefine EVP_hpke_x25519_hkdf_sha256 BORINGSSL_PREFIX %+ _EVP_hpke_x25519_hkdf_sha256 %xdefine EVP_marshal_digest_algorithm BORINGSSL_PREFIX %+ _EVP_marshal_digest_algorithm %xdefine EVP_marshal_private_key BORINGSSL_PREFIX %+ _EVP_marshal_private_key %xdefine EVP_marshal_public_key BORINGSSL_PREFIX %+ _EVP_marshal_public_key %xdefine EVP_md4 BORINGSSL_PREFIX %+ _EVP_md4 %xdefine EVP_md5 BORINGSSL_PREFIX %+ _EVP_md5 %xdefine EVP_md5_sha1 BORINGSSL_PREFIX %+ _EVP_md5_sha1 %xdefine EVP_parse_digest_algorithm BORINGSSL_PREFIX %+ _EVP_parse_digest_algorithm %xdefine EVP_parse_private_key BORINGSSL_PREFIX %+ _EVP_parse_private_key %xdefine EVP_parse_public_key BORINGSSL_PREFIX %+ _EVP_parse_public_key %xdefine EVP_rc2_40_cbc BORINGSSL_PREFIX %+ _EVP_rc2_40_cbc %xdefine EVP_rc2_cbc BORINGSSL_PREFIX %+ _EVP_rc2_cbc %xdefine EVP_rc4 BORINGSSL_PREFIX %+ _EVP_rc4 %xdefine EVP_sha1 BORINGSSL_PREFIX %+ _EVP_sha1 %xdefine EVP_sha1_final_with_secret_suffix BORINGSSL_PREFIX %+ _EVP_sha1_final_with_secret_suffix %xdefine EVP_sha224 BORINGSSL_PREFIX %+ _EVP_sha224 %xdefine EVP_sha256 BORINGSSL_PREFIX %+ _EVP_sha256 %xdefine EVP_sha256_final_with_secret_suffix BORINGSSL_PREFIX %+ _EVP_sha256_final_with_secret_suffix %xdefine EVP_sha384 BORINGSSL_PREFIX %+ _EVP_sha384 %xdefine EVP_sha512 BORINGSSL_PREFIX %+ _EVP_sha512 %xdefine EVP_sha512_256 BORINGSSL_PREFIX %+ _EVP_sha512_256 %xdefine EVP_tls_cbc_copy_mac BORINGSSL_PREFIX %+ _EVP_tls_cbc_copy_mac %xdefine EVP_tls_cbc_digest_record BORINGSSL_PREFIX %+ _EVP_tls_cbc_digest_record %xdefine EVP_tls_cbc_record_digest_supported BORINGSSL_PREFIX %+ _EVP_tls_cbc_record_digest_supported %xdefine EVP_tls_cbc_remove_padding BORINGSSL_PREFIX %+ _EVP_tls_cbc_remove_padding %xdefine EXTENDED_KEY_USAGE_free BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_free %xdefine EXTENDED_KEY_USAGE_it BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_it %xdefine EXTENDED_KEY_USAGE_new BORINGSSL_PREFIX %+ _EXTENDED_KEY_USAGE_new %xdefine FIPS_mode BORINGSSL_PREFIX %+ _FIPS_mode %xdefine FIPS_mode_set BORINGSSL_PREFIX %+ _FIPS_mode_set %xdefine FIPS_module_name BORINGSSL_PREFIX %+ _FIPS_module_name %xdefine FIPS_query_algorithm_status BORINGSSL_PREFIX %+ _FIPS_query_algorithm_status %xdefine FIPS_read_counter BORINGSSL_PREFIX %+ _FIPS_read_counter %xdefine FIPS_service_indicator_after_call BORINGSSL_PREFIX %+ _FIPS_service_indicator_after_call %xdefine FIPS_service_indicator_before_call BORINGSSL_PREFIX %+ _FIPS_service_indicator_before_call %xdefine FIPS_version BORINGSSL_PREFIX %+ _FIPS_version %xdefine GENERAL_NAMES_free BORINGSSL_PREFIX %+ _GENERAL_NAMES_free %xdefine GENERAL_NAMES_it BORINGSSL_PREFIX %+ _GENERAL_NAMES_it %xdefine GENERAL_NAMES_new BORINGSSL_PREFIX %+ _GENERAL_NAMES_new %xdefine GENERAL_NAME_cmp BORINGSSL_PREFIX %+ _GENERAL_NAME_cmp %xdefine GENERAL_NAME_dup BORINGSSL_PREFIX %+ _GENERAL_NAME_dup %xdefine GENERAL_NAME_free BORINGSSL_PREFIX %+ _GENERAL_NAME_free %xdefine GENERAL_NAME_get0_otherName BORINGSSL_PREFIX %+ _GENERAL_NAME_get0_otherName %xdefine GENERAL_NAME_get0_value BORINGSSL_PREFIX %+ _GENERAL_NAME_get0_value %xdefine GENERAL_NAME_it BORINGSSL_PREFIX %+ _GENERAL_NAME_it %xdefine GENERAL_NAME_new BORINGSSL_PREFIX %+ _GENERAL_NAME_new %xdefine GENERAL_NAME_print BORINGSSL_PREFIX %+ _GENERAL_NAME_print %xdefine GENERAL_NAME_set0_othername BORINGSSL_PREFIX %+ _GENERAL_NAME_set0_othername %xdefine GENERAL_NAME_set0_value BORINGSSL_PREFIX %+ _GENERAL_NAME_set0_value %xdefine GENERAL_SUBTREE_free BORINGSSL_PREFIX %+ _GENERAL_SUBTREE_free %xdefine GENERAL_SUBTREE_new BORINGSSL_PREFIX %+ _GENERAL_SUBTREE_new %xdefine HKDF BORINGSSL_PREFIX %+ _HKDF %xdefine HKDF_expand BORINGSSL_PREFIX %+ _HKDF_expand %xdefine HKDF_extract BORINGSSL_PREFIX %+ _HKDF_extract %xdefine HMAC BORINGSSL_PREFIX %+ _HMAC %xdefine HMAC_CTX_cleanse BORINGSSL_PREFIX %+ _HMAC_CTX_cleanse %xdefine HMAC_CTX_cleanup BORINGSSL_PREFIX %+ _HMAC_CTX_cleanup %xdefine HMAC_CTX_copy BORINGSSL_PREFIX %+ _HMAC_CTX_copy %xdefine HMAC_CTX_copy_ex BORINGSSL_PREFIX %+ _HMAC_CTX_copy_ex %xdefine HMAC_CTX_free BORINGSSL_PREFIX %+ _HMAC_CTX_free %xdefine HMAC_CTX_get_md BORINGSSL_PREFIX %+ _HMAC_CTX_get_md %xdefine HMAC_CTX_init BORINGSSL_PREFIX %+ _HMAC_CTX_init %xdefine HMAC_CTX_new BORINGSSL_PREFIX %+ _HMAC_CTX_new %xdefine HMAC_CTX_reset BORINGSSL_PREFIX %+ _HMAC_CTX_reset %xdefine HMAC_Final BORINGSSL_PREFIX %+ _HMAC_Final %xdefine HMAC_Init BORINGSSL_PREFIX %+ _HMAC_Init %xdefine HMAC_Init_ex BORINGSSL_PREFIX %+ _HMAC_Init_ex %xdefine HMAC_Update BORINGSSL_PREFIX %+ _HMAC_Update %xdefine HMAC_size BORINGSSL_PREFIX %+ _HMAC_size %xdefine HRSS_decap BORINGSSL_PREFIX %+ _HRSS_decap %xdefine HRSS_encap BORINGSSL_PREFIX %+ _HRSS_encap %xdefine HRSS_generate_key BORINGSSL_PREFIX %+ _HRSS_generate_key %xdefine HRSS_marshal_public_key BORINGSSL_PREFIX %+ _HRSS_marshal_public_key %xdefine HRSS_parse_public_key BORINGSSL_PREFIX %+ _HRSS_parse_public_key %xdefine HRSS_poly3_invert BORINGSSL_PREFIX %+ _HRSS_poly3_invert %xdefine HRSS_poly3_mul BORINGSSL_PREFIX %+ _HRSS_poly3_mul %xdefine ISSUING_DIST_POINT_free BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_free %xdefine ISSUING_DIST_POINT_it BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_it %xdefine ISSUING_DIST_POINT_new BORINGSSL_PREFIX %+ _ISSUING_DIST_POINT_new %xdefine KYBER_decap BORINGSSL_PREFIX %+ _KYBER_decap %xdefine KYBER_encap BORINGSSL_PREFIX %+ _KYBER_encap %xdefine KYBER_encap_external_entropy BORINGSSL_PREFIX %+ _KYBER_encap_external_entropy %xdefine KYBER_generate_key BORINGSSL_PREFIX %+ _KYBER_generate_key %xdefine KYBER_generate_key_external_entropy BORINGSSL_PREFIX %+ _KYBER_generate_key_external_entropy %xdefine KYBER_marshal_private_key BORINGSSL_PREFIX %+ _KYBER_marshal_private_key %xdefine KYBER_marshal_public_key BORINGSSL_PREFIX %+ _KYBER_marshal_public_key %xdefine KYBER_parse_private_key BORINGSSL_PREFIX %+ _KYBER_parse_private_key %xdefine KYBER_parse_public_key BORINGSSL_PREFIX %+ _KYBER_parse_public_key %xdefine KYBER_public_from_private BORINGSSL_PREFIX %+ _KYBER_public_from_private %xdefine MD4 BORINGSSL_PREFIX %+ _MD4 %xdefine MD4_Final BORINGSSL_PREFIX %+ _MD4_Final %xdefine MD4_Init BORINGSSL_PREFIX %+ _MD4_Init %xdefine MD4_Transform BORINGSSL_PREFIX %+ _MD4_Transform %xdefine MD4_Update BORINGSSL_PREFIX %+ _MD4_Update %xdefine MD5 BORINGSSL_PREFIX %+ _MD5 %xdefine MD5_Final BORINGSSL_PREFIX %+ _MD5_Final %xdefine MD5_Init BORINGSSL_PREFIX %+ _MD5_Init %xdefine MD5_Transform BORINGSSL_PREFIX %+ _MD5_Transform %xdefine MD5_Update BORINGSSL_PREFIX %+ _MD5_Update %xdefine METHOD_ref BORINGSSL_PREFIX %+ _METHOD_ref %xdefine METHOD_unref BORINGSSL_PREFIX %+ _METHOD_unref %xdefine MLDSA65_generate_key BORINGSSL_PREFIX %+ _MLDSA65_generate_key %xdefine MLDSA65_marshal_public_key BORINGSSL_PREFIX %+ _MLDSA65_marshal_public_key %xdefine MLDSA65_parse_public_key BORINGSSL_PREFIX %+ _MLDSA65_parse_public_key %xdefine MLDSA65_private_key_from_seed BORINGSSL_PREFIX %+ _MLDSA65_private_key_from_seed %xdefine MLDSA65_public_from_private BORINGSSL_PREFIX %+ _MLDSA65_public_from_private %xdefine MLDSA65_sign BORINGSSL_PREFIX %+ _MLDSA65_sign %xdefine MLDSA65_verify BORINGSSL_PREFIX %+ _MLDSA65_verify %xdefine MLKEM1024_decap BORINGSSL_PREFIX %+ _MLKEM1024_decap %xdefine MLKEM1024_encap BORINGSSL_PREFIX %+ _MLKEM1024_encap %xdefine MLKEM1024_generate_key BORINGSSL_PREFIX %+ _MLKEM1024_generate_key %xdefine MLKEM1024_marshal_public_key BORINGSSL_PREFIX %+ _MLKEM1024_marshal_public_key %xdefine MLKEM1024_parse_public_key BORINGSSL_PREFIX %+ _MLKEM1024_parse_public_key %xdefine MLKEM1024_private_key_from_seed BORINGSSL_PREFIX %+ _MLKEM1024_private_key_from_seed %xdefine MLKEM1024_public_from_private BORINGSSL_PREFIX %+ _MLKEM1024_public_from_private %xdefine MLKEM768_decap BORINGSSL_PREFIX %+ _MLKEM768_decap %xdefine MLKEM768_encap BORINGSSL_PREFIX %+ _MLKEM768_encap %xdefine MLKEM768_generate_key BORINGSSL_PREFIX %+ _MLKEM768_generate_key %xdefine MLKEM768_marshal_public_key BORINGSSL_PREFIX %+ _MLKEM768_marshal_public_key %xdefine MLKEM768_parse_public_key BORINGSSL_PREFIX %+ _MLKEM768_parse_public_key %xdefine MLKEM768_private_key_from_seed BORINGSSL_PREFIX %+ _MLKEM768_private_key_from_seed %xdefine MLKEM768_public_from_private BORINGSSL_PREFIX %+ _MLKEM768_public_from_private %xdefine NAME_CONSTRAINTS_check BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_check %xdefine NAME_CONSTRAINTS_free BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_free %xdefine NAME_CONSTRAINTS_it BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_it %xdefine NAME_CONSTRAINTS_new BORINGSSL_PREFIX %+ _NAME_CONSTRAINTS_new %xdefine NCONF_free BORINGSSL_PREFIX %+ _NCONF_free %xdefine NCONF_get_section BORINGSSL_PREFIX %+ _NCONF_get_section %xdefine NCONF_get_string BORINGSSL_PREFIX %+ _NCONF_get_string %xdefine NCONF_load BORINGSSL_PREFIX %+ _NCONF_load %xdefine NCONF_load_bio BORINGSSL_PREFIX %+ _NCONF_load_bio %xdefine NCONF_new BORINGSSL_PREFIX %+ _NCONF_new %xdefine NETSCAPE_SPKAC_free BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_free %xdefine NETSCAPE_SPKAC_it BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_it %xdefine NETSCAPE_SPKAC_new BORINGSSL_PREFIX %+ _NETSCAPE_SPKAC_new %xdefine NETSCAPE_SPKI_b64_decode BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_b64_decode %xdefine NETSCAPE_SPKI_b64_encode BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_b64_encode %xdefine NETSCAPE_SPKI_free BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_free %xdefine NETSCAPE_SPKI_get_pubkey BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_get_pubkey %xdefine NETSCAPE_SPKI_it BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_it %xdefine NETSCAPE_SPKI_new BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_new %xdefine NETSCAPE_SPKI_set_pubkey BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_set_pubkey %xdefine NETSCAPE_SPKI_sign BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_sign %xdefine NETSCAPE_SPKI_verify BORINGSSL_PREFIX %+ _NETSCAPE_SPKI_verify %xdefine NOTICEREF_free BORINGSSL_PREFIX %+ _NOTICEREF_free %xdefine NOTICEREF_it BORINGSSL_PREFIX %+ _NOTICEREF_it %xdefine NOTICEREF_new BORINGSSL_PREFIX %+ _NOTICEREF_new %xdefine OBJ_cbs2nid BORINGSSL_PREFIX %+ _OBJ_cbs2nid %xdefine OBJ_cleanup BORINGSSL_PREFIX %+ _OBJ_cleanup %xdefine OBJ_cmp BORINGSSL_PREFIX %+ _OBJ_cmp %xdefine OBJ_create BORINGSSL_PREFIX %+ _OBJ_create %xdefine OBJ_dup BORINGSSL_PREFIX %+ _OBJ_dup %xdefine OBJ_find_sigid_algs BORINGSSL_PREFIX %+ _OBJ_find_sigid_algs %xdefine OBJ_find_sigid_by_algs BORINGSSL_PREFIX %+ _OBJ_find_sigid_by_algs %xdefine OBJ_get0_data BORINGSSL_PREFIX %+ _OBJ_get0_data %xdefine OBJ_get_undef BORINGSSL_PREFIX %+ _OBJ_get_undef %xdefine OBJ_length BORINGSSL_PREFIX %+ _OBJ_length %xdefine OBJ_ln2nid BORINGSSL_PREFIX %+ _OBJ_ln2nid %xdefine OBJ_nid2cbb BORINGSSL_PREFIX %+ _OBJ_nid2cbb %xdefine OBJ_nid2ln BORINGSSL_PREFIX %+ _OBJ_nid2ln %xdefine OBJ_nid2obj BORINGSSL_PREFIX %+ _OBJ_nid2obj %xdefine OBJ_nid2sn BORINGSSL_PREFIX %+ _OBJ_nid2sn %xdefine OBJ_obj2nid BORINGSSL_PREFIX %+ _OBJ_obj2nid %xdefine OBJ_obj2txt BORINGSSL_PREFIX %+ _OBJ_obj2txt %xdefine OBJ_sn2nid BORINGSSL_PREFIX %+ _OBJ_sn2nid %xdefine OBJ_txt2nid BORINGSSL_PREFIX %+ _OBJ_txt2nid %xdefine OBJ_txt2obj BORINGSSL_PREFIX %+ _OBJ_txt2obj %xdefine OPENSSL_add_all_algorithms_conf BORINGSSL_PREFIX %+ _OPENSSL_add_all_algorithms_conf %xdefine OPENSSL_armcap_P BORINGSSL_PREFIX %+ _OPENSSL_armcap_P %xdefine OPENSSL_asprintf BORINGSSL_PREFIX %+ _OPENSSL_asprintf %xdefine OPENSSL_calloc BORINGSSL_PREFIX %+ _OPENSSL_calloc %xdefine OPENSSL_cleanse BORINGSSL_PREFIX %+ _OPENSSL_cleanse %xdefine OPENSSL_cleanup BORINGSSL_PREFIX %+ _OPENSSL_cleanup %xdefine OPENSSL_clear_free BORINGSSL_PREFIX %+ _OPENSSL_clear_free %xdefine OPENSSL_config BORINGSSL_PREFIX %+ _OPENSSL_config %xdefine OPENSSL_cpuid_setup BORINGSSL_PREFIX %+ _OPENSSL_cpuid_setup %xdefine OPENSSL_free BORINGSSL_PREFIX %+ _OPENSSL_free %xdefine OPENSSL_fromxdigit BORINGSSL_PREFIX %+ _OPENSSL_fromxdigit %xdefine OPENSSL_get_armcap BORINGSSL_PREFIX %+ _OPENSSL_get_armcap %xdefine OPENSSL_get_armcap_pointer_for_test BORINGSSL_PREFIX %+ _OPENSSL_get_armcap_pointer_for_test %xdefine OPENSSL_get_ia32cap BORINGSSL_PREFIX %+ _OPENSSL_get_ia32cap %xdefine OPENSSL_gmtime BORINGSSL_PREFIX %+ _OPENSSL_gmtime %xdefine OPENSSL_gmtime_adj BORINGSSL_PREFIX %+ _OPENSSL_gmtime_adj %xdefine OPENSSL_gmtime_diff BORINGSSL_PREFIX %+ _OPENSSL_gmtime_diff %xdefine OPENSSL_hash32 BORINGSSL_PREFIX %+ _OPENSSL_hash32 %xdefine OPENSSL_ia32cap_P BORINGSSL_PREFIX %+ _OPENSSL_ia32cap_P %xdefine OPENSSL_init_cpuid BORINGSSL_PREFIX %+ _OPENSSL_init_cpuid %xdefine OPENSSL_init_crypto BORINGSSL_PREFIX %+ _OPENSSL_init_crypto %xdefine OPENSSL_init_ssl BORINGSSL_PREFIX %+ _OPENSSL_init_ssl %xdefine OPENSSL_isalnum BORINGSSL_PREFIX %+ _OPENSSL_isalnum %xdefine OPENSSL_isalpha BORINGSSL_PREFIX %+ _OPENSSL_isalpha %xdefine OPENSSL_isdigit BORINGSSL_PREFIX %+ _OPENSSL_isdigit %xdefine OPENSSL_isspace BORINGSSL_PREFIX %+ _OPENSSL_isspace %xdefine OPENSSL_isxdigit BORINGSSL_PREFIX %+ _OPENSSL_isxdigit %xdefine OPENSSL_lh_delete BORINGSSL_PREFIX %+ _OPENSSL_lh_delete %xdefine OPENSSL_lh_doall_arg BORINGSSL_PREFIX %+ _OPENSSL_lh_doall_arg %xdefine OPENSSL_lh_free BORINGSSL_PREFIX %+ _OPENSSL_lh_free %xdefine OPENSSL_lh_insert BORINGSSL_PREFIX %+ _OPENSSL_lh_insert %xdefine OPENSSL_lh_new BORINGSSL_PREFIX %+ _OPENSSL_lh_new %xdefine OPENSSL_lh_num_items BORINGSSL_PREFIX %+ _OPENSSL_lh_num_items %xdefine OPENSSL_lh_retrieve BORINGSSL_PREFIX %+ _OPENSSL_lh_retrieve %xdefine OPENSSL_lh_retrieve_key BORINGSSL_PREFIX %+ _OPENSSL_lh_retrieve_key %xdefine OPENSSL_load_builtin_modules BORINGSSL_PREFIX %+ _OPENSSL_load_builtin_modules %xdefine OPENSSL_malloc BORINGSSL_PREFIX %+ _OPENSSL_malloc %xdefine OPENSSL_malloc_init BORINGSSL_PREFIX %+ _OPENSSL_malloc_init %xdefine OPENSSL_memdup BORINGSSL_PREFIX %+ _OPENSSL_memdup %xdefine OPENSSL_no_config BORINGSSL_PREFIX %+ _OPENSSL_no_config %xdefine OPENSSL_posix_to_tm BORINGSSL_PREFIX %+ _OPENSSL_posix_to_tm %xdefine OPENSSL_realloc BORINGSSL_PREFIX %+ _OPENSSL_realloc %xdefine OPENSSL_secure_clear_free BORINGSSL_PREFIX %+ _OPENSSL_secure_clear_free %xdefine OPENSSL_secure_malloc BORINGSSL_PREFIX %+ _OPENSSL_secure_malloc %xdefine OPENSSL_sk_deep_copy BORINGSSL_PREFIX %+ _OPENSSL_sk_deep_copy %xdefine OPENSSL_sk_delete BORINGSSL_PREFIX %+ _OPENSSL_sk_delete %xdefine OPENSSL_sk_delete_if BORINGSSL_PREFIX %+ _OPENSSL_sk_delete_if %xdefine OPENSSL_sk_delete_ptr BORINGSSL_PREFIX %+ _OPENSSL_sk_delete_ptr %xdefine OPENSSL_sk_dup BORINGSSL_PREFIX %+ _OPENSSL_sk_dup %xdefine OPENSSL_sk_find BORINGSSL_PREFIX %+ _OPENSSL_sk_find %xdefine OPENSSL_sk_free BORINGSSL_PREFIX %+ _OPENSSL_sk_free %xdefine OPENSSL_sk_insert BORINGSSL_PREFIX %+ _OPENSSL_sk_insert %xdefine OPENSSL_sk_is_sorted BORINGSSL_PREFIX %+ _OPENSSL_sk_is_sorted %xdefine OPENSSL_sk_new BORINGSSL_PREFIX %+ _OPENSSL_sk_new %xdefine OPENSSL_sk_new_null BORINGSSL_PREFIX %+ _OPENSSL_sk_new_null %xdefine OPENSSL_sk_num BORINGSSL_PREFIX %+ _OPENSSL_sk_num %xdefine OPENSSL_sk_pop BORINGSSL_PREFIX %+ _OPENSSL_sk_pop %xdefine OPENSSL_sk_pop_free_ex BORINGSSL_PREFIX %+ _OPENSSL_sk_pop_free_ex %xdefine OPENSSL_sk_push BORINGSSL_PREFIX %+ _OPENSSL_sk_push %xdefine OPENSSL_sk_set BORINGSSL_PREFIX %+ _OPENSSL_sk_set %xdefine OPENSSL_sk_set_cmp_func BORINGSSL_PREFIX %+ _OPENSSL_sk_set_cmp_func %xdefine OPENSSL_sk_shift BORINGSSL_PREFIX %+ _OPENSSL_sk_shift %xdefine OPENSSL_sk_sort BORINGSSL_PREFIX %+ _OPENSSL_sk_sort %xdefine OPENSSL_sk_value BORINGSSL_PREFIX %+ _OPENSSL_sk_value %xdefine OPENSSL_sk_zero BORINGSSL_PREFIX %+ _OPENSSL_sk_zero %xdefine OPENSSL_strcasecmp BORINGSSL_PREFIX %+ _OPENSSL_strcasecmp %xdefine OPENSSL_strdup BORINGSSL_PREFIX %+ _OPENSSL_strdup %xdefine OPENSSL_strhash BORINGSSL_PREFIX %+ _OPENSSL_strhash %xdefine OPENSSL_strlcat BORINGSSL_PREFIX %+ _OPENSSL_strlcat %xdefine OPENSSL_strlcpy BORINGSSL_PREFIX %+ _OPENSSL_strlcpy %xdefine OPENSSL_strncasecmp BORINGSSL_PREFIX %+ _OPENSSL_strncasecmp %xdefine OPENSSL_strndup BORINGSSL_PREFIX %+ _OPENSSL_strndup %xdefine OPENSSL_strnlen BORINGSSL_PREFIX %+ _OPENSSL_strnlen %xdefine OPENSSL_timegm BORINGSSL_PREFIX %+ _OPENSSL_timegm %xdefine OPENSSL_tm_to_posix BORINGSSL_PREFIX %+ _OPENSSL_tm_to_posix %xdefine OPENSSL_tolower BORINGSSL_PREFIX %+ _OPENSSL_tolower %xdefine OPENSSL_vasprintf BORINGSSL_PREFIX %+ _OPENSSL_vasprintf %xdefine OPENSSL_vasprintf_internal BORINGSSL_PREFIX %+ _OPENSSL_vasprintf_internal %xdefine OPENSSL_zalloc BORINGSSL_PREFIX %+ _OPENSSL_zalloc %xdefine OTHERNAME_free BORINGSSL_PREFIX %+ _OTHERNAME_free %xdefine OTHERNAME_new BORINGSSL_PREFIX %+ _OTHERNAME_new %xdefine OpenSSL_add_all_algorithms BORINGSSL_PREFIX %+ _OpenSSL_add_all_algorithms %xdefine OpenSSL_add_all_ciphers BORINGSSL_PREFIX %+ _OpenSSL_add_all_ciphers %xdefine OpenSSL_add_all_digests BORINGSSL_PREFIX %+ _OpenSSL_add_all_digests %xdefine OpenSSL_version BORINGSSL_PREFIX %+ _OpenSSL_version %xdefine OpenSSL_version_num BORINGSSL_PREFIX %+ _OpenSSL_version_num %xdefine PEM_ASN1_read BORINGSSL_PREFIX %+ _PEM_ASN1_read %xdefine PEM_ASN1_read_bio BORINGSSL_PREFIX %+ _PEM_ASN1_read_bio %xdefine PEM_ASN1_write BORINGSSL_PREFIX %+ _PEM_ASN1_write %xdefine PEM_ASN1_write_bio BORINGSSL_PREFIX %+ _PEM_ASN1_write_bio %xdefine PEM_X509_INFO_read BORINGSSL_PREFIX %+ _PEM_X509_INFO_read %xdefine PEM_X509_INFO_read_bio BORINGSSL_PREFIX %+ _PEM_X509_INFO_read_bio %xdefine PEM_bytes_read_bio BORINGSSL_PREFIX %+ _PEM_bytes_read_bio %xdefine PEM_def_callback BORINGSSL_PREFIX %+ _PEM_def_callback %xdefine PEM_do_header BORINGSSL_PREFIX %+ _PEM_do_header %xdefine PEM_get_EVP_CIPHER_INFO BORINGSSL_PREFIX %+ _PEM_get_EVP_CIPHER_INFO %xdefine PEM_read BORINGSSL_PREFIX %+ _PEM_read %xdefine PEM_read_DHparams BORINGSSL_PREFIX %+ _PEM_read_DHparams %xdefine PEM_read_DSAPrivateKey BORINGSSL_PREFIX %+ _PEM_read_DSAPrivateKey %xdefine PEM_read_DSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_DSA_PUBKEY %xdefine PEM_read_DSAparams BORINGSSL_PREFIX %+ _PEM_read_DSAparams %xdefine PEM_read_ECPrivateKey BORINGSSL_PREFIX %+ _PEM_read_ECPrivateKey %xdefine PEM_read_EC_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_EC_PUBKEY %xdefine PEM_read_PKCS7 BORINGSSL_PREFIX %+ _PEM_read_PKCS7 %xdefine PEM_read_PKCS8 BORINGSSL_PREFIX %+ _PEM_read_PKCS8 %xdefine PEM_read_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _PEM_read_PKCS8_PRIV_KEY_INFO %xdefine PEM_read_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_PUBKEY %xdefine PEM_read_PrivateKey BORINGSSL_PREFIX %+ _PEM_read_PrivateKey %xdefine PEM_read_RSAPrivateKey BORINGSSL_PREFIX %+ _PEM_read_RSAPrivateKey %xdefine PEM_read_RSAPublicKey BORINGSSL_PREFIX %+ _PEM_read_RSAPublicKey %xdefine PEM_read_RSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_RSA_PUBKEY %xdefine PEM_read_SSL_SESSION BORINGSSL_PREFIX %+ _PEM_read_SSL_SESSION %xdefine PEM_read_X509 BORINGSSL_PREFIX %+ _PEM_read_X509 %xdefine PEM_read_X509_AUX BORINGSSL_PREFIX %+ _PEM_read_X509_AUX %xdefine PEM_read_X509_CRL BORINGSSL_PREFIX %+ _PEM_read_X509_CRL %xdefine PEM_read_X509_REQ BORINGSSL_PREFIX %+ _PEM_read_X509_REQ %xdefine PEM_read_bio BORINGSSL_PREFIX %+ _PEM_read_bio %xdefine PEM_read_bio_DHparams BORINGSSL_PREFIX %+ _PEM_read_bio_DHparams %xdefine PEM_read_bio_DSAPrivateKey BORINGSSL_PREFIX %+ _PEM_read_bio_DSAPrivateKey %xdefine PEM_read_bio_DSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_bio_DSA_PUBKEY %xdefine PEM_read_bio_DSAparams BORINGSSL_PREFIX %+ _PEM_read_bio_DSAparams %xdefine PEM_read_bio_ECPrivateKey BORINGSSL_PREFIX %+ _PEM_read_bio_ECPrivateKey %xdefine PEM_read_bio_EC_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_bio_EC_PUBKEY %xdefine PEM_read_bio_PKCS7 BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS7 %xdefine PEM_read_bio_PKCS8 BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS8 %xdefine PEM_read_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _PEM_read_bio_PKCS8_PRIV_KEY_INFO %xdefine PEM_read_bio_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_bio_PUBKEY %xdefine PEM_read_bio_PrivateKey BORINGSSL_PREFIX %+ _PEM_read_bio_PrivateKey %xdefine PEM_read_bio_RSAPrivateKey BORINGSSL_PREFIX %+ _PEM_read_bio_RSAPrivateKey %xdefine PEM_read_bio_RSAPublicKey BORINGSSL_PREFIX %+ _PEM_read_bio_RSAPublicKey %xdefine PEM_read_bio_RSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_read_bio_RSA_PUBKEY %xdefine PEM_read_bio_SSL_SESSION BORINGSSL_PREFIX %+ _PEM_read_bio_SSL_SESSION %xdefine PEM_read_bio_X509 BORINGSSL_PREFIX %+ _PEM_read_bio_X509 %xdefine PEM_read_bio_X509_AUX BORINGSSL_PREFIX %+ _PEM_read_bio_X509_AUX %xdefine PEM_read_bio_X509_CRL BORINGSSL_PREFIX %+ _PEM_read_bio_X509_CRL %xdefine PEM_read_bio_X509_REQ BORINGSSL_PREFIX %+ _PEM_read_bio_X509_REQ %xdefine PEM_write BORINGSSL_PREFIX %+ _PEM_write %xdefine PEM_write_DHparams BORINGSSL_PREFIX %+ _PEM_write_DHparams %xdefine PEM_write_DSAPrivateKey BORINGSSL_PREFIX %+ _PEM_write_DSAPrivateKey %xdefine PEM_write_DSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_DSA_PUBKEY %xdefine PEM_write_DSAparams BORINGSSL_PREFIX %+ _PEM_write_DSAparams %xdefine PEM_write_ECPrivateKey BORINGSSL_PREFIX %+ _PEM_write_ECPrivateKey %xdefine PEM_write_EC_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_EC_PUBKEY %xdefine PEM_write_PKCS7 BORINGSSL_PREFIX %+ _PEM_write_PKCS7 %xdefine PEM_write_PKCS8 BORINGSSL_PREFIX %+ _PEM_write_PKCS8 %xdefine PEM_write_PKCS8PrivateKey BORINGSSL_PREFIX %+ _PEM_write_PKCS8PrivateKey %xdefine PEM_write_PKCS8PrivateKey_nid BORINGSSL_PREFIX %+ _PEM_write_PKCS8PrivateKey_nid %xdefine PEM_write_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _PEM_write_PKCS8_PRIV_KEY_INFO %xdefine PEM_write_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_PUBKEY %xdefine PEM_write_PrivateKey BORINGSSL_PREFIX %+ _PEM_write_PrivateKey %xdefine PEM_write_RSAPrivateKey BORINGSSL_PREFIX %+ _PEM_write_RSAPrivateKey %xdefine PEM_write_RSAPublicKey BORINGSSL_PREFIX %+ _PEM_write_RSAPublicKey %xdefine PEM_write_RSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_RSA_PUBKEY %xdefine PEM_write_SSL_SESSION BORINGSSL_PREFIX %+ _PEM_write_SSL_SESSION %xdefine PEM_write_X509 BORINGSSL_PREFIX %+ _PEM_write_X509 %xdefine PEM_write_X509_AUX BORINGSSL_PREFIX %+ _PEM_write_X509_AUX %xdefine PEM_write_X509_CRL BORINGSSL_PREFIX %+ _PEM_write_X509_CRL %xdefine PEM_write_X509_REQ BORINGSSL_PREFIX %+ _PEM_write_X509_REQ %xdefine PEM_write_X509_REQ_NEW BORINGSSL_PREFIX %+ _PEM_write_X509_REQ_NEW %xdefine PEM_write_bio BORINGSSL_PREFIX %+ _PEM_write_bio %xdefine PEM_write_bio_DHparams BORINGSSL_PREFIX %+ _PEM_write_bio_DHparams %xdefine PEM_write_bio_DSAPrivateKey BORINGSSL_PREFIX %+ _PEM_write_bio_DSAPrivateKey %xdefine PEM_write_bio_DSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_bio_DSA_PUBKEY %xdefine PEM_write_bio_DSAparams BORINGSSL_PREFIX %+ _PEM_write_bio_DSAparams %xdefine PEM_write_bio_ECPrivateKey BORINGSSL_PREFIX %+ _PEM_write_bio_ECPrivateKey %xdefine PEM_write_bio_EC_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_bio_EC_PUBKEY %xdefine PEM_write_bio_PKCS7 BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS7 %xdefine PEM_write_bio_PKCS8 BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8 %xdefine PEM_write_bio_PKCS8PrivateKey BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8PrivateKey %xdefine PEM_write_bio_PKCS8PrivateKey_nid BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8PrivateKey_nid %xdefine PEM_write_bio_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _PEM_write_bio_PKCS8_PRIV_KEY_INFO %xdefine PEM_write_bio_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_bio_PUBKEY %xdefine PEM_write_bio_PrivateKey BORINGSSL_PREFIX %+ _PEM_write_bio_PrivateKey %xdefine PEM_write_bio_RSAPrivateKey BORINGSSL_PREFIX %+ _PEM_write_bio_RSAPrivateKey %xdefine PEM_write_bio_RSAPublicKey BORINGSSL_PREFIX %+ _PEM_write_bio_RSAPublicKey %xdefine PEM_write_bio_RSA_PUBKEY BORINGSSL_PREFIX %+ _PEM_write_bio_RSA_PUBKEY %xdefine PEM_write_bio_SSL_SESSION BORINGSSL_PREFIX %+ _PEM_write_bio_SSL_SESSION %xdefine PEM_write_bio_X509 BORINGSSL_PREFIX %+ _PEM_write_bio_X509 %xdefine PEM_write_bio_X509_AUX BORINGSSL_PREFIX %+ _PEM_write_bio_X509_AUX %xdefine PEM_write_bio_X509_CRL BORINGSSL_PREFIX %+ _PEM_write_bio_X509_CRL %xdefine PEM_write_bio_X509_REQ BORINGSSL_PREFIX %+ _PEM_write_bio_X509_REQ %xdefine PEM_write_bio_X509_REQ_NEW BORINGSSL_PREFIX %+ _PEM_write_bio_X509_REQ_NEW %xdefine PKCS12_PBE_add BORINGSSL_PREFIX %+ _PKCS12_PBE_add %xdefine PKCS12_create BORINGSSL_PREFIX %+ _PKCS12_create %xdefine PKCS12_free BORINGSSL_PREFIX %+ _PKCS12_free %xdefine PKCS12_get_key_and_certs BORINGSSL_PREFIX %+ _PKCS12_get_key_and_certs %xdefine PKCS12_parse BORINGSSL_PREFIX %+ _PKCS12_parse %xdefine PKCS12_verify_mac BORINGSSL_PREFIX %+ _PKCS12_verify_mac %xdefine PKCS1_MGF1 BORINGSSL_PREFIX %+ _PKCS1_MGF1 %xdefine PKCS5_PBKDF2_HMAC BORINGSSL_PREFIX %+ _PKCS5_PBKDF2_HMAC %xdefine PKCS5_PBKDF2_HMAC_SHA1 BORINGSSL_PREFIX %+ _PKCS5_PBKDF2_HMAC_SHA1 %xdefine PKCS5_pbe2_decrypt_init BORINGSSL_PREFIX %+ _PKCS5_pbe2_decrypt_init %xdefine PKCS5_pbe2_encrypt_init BORINGSSL_PREFIX %+ _PKCS5_pbe2_encrypt_init %xdefine PKCS7_bundle_CRLs BORINGSSL_PREFIX %+ _PKCS7_bundle_CRLs %xdefine PKCS7_bundle_certificates BORINGSSL_PREFIX %+ _PKCS7_bundle_certificates %xdefine PKCS7_bundle_raw_certificates BORINGSSL_PREFIX %+ _PKCS7_bundle_raw_certificates %xdefine PKCS7_free BORINGSSL_PREFIX %+ _PKCS7_free %xdefine PKCS7_get_CRLs BORINGSSL_PREFIX %+ _PKCS7_get_CRLs %xdefine PKCS7_get_PEM_CRLs BORINGSSL_PREFIX %+ _PKCS7_get_PEM_CRLs %xdefine PKCS7_get_PEM_certificates BORINGSSL_PREFIX %+ _PKCS7_get_PEM_certificates %xdefine PKCS7_get_certificates BORINGSSL_PREFIX %+ _PKCS7_get_certificates %xdefine PKCS7_get_raw_certificates BORINGSSL_PREFIX %+ _PKCS7_get_raw_certificates %xdefine PKCS7_sign BORINGSSL_PREFIX %+ _PKCS7_sign %xdefine PKCS7_type_is_data BORINGSSL_PREFIX %+ _PKCS7_type_is_data %xdefine PKCS7_type_is_digest BORINGSSL_PREFIX %+ _PKCS7_type_is_digest %xdefine PKCS7_type_is_encrypted BORINGSSL_PREFIX %+ _PKCS7_type_is_encrypted %xdefine PKCS7_type_is_enveloped BORINGSSL_PREFIX %+ _PKCS7_type_is_enveloped %xdefine PKCS7_type_is_signed BORINGSSL_PREFIX %+ _PKCS7_type_is_signed %xdefine PKCS7_type_is_signedAndEnveloped BORINGSSL_PREFIX %+ _PKCS7_type_is_signedAndEnveloped %xdefine PKCS8_PRIV_KEY_INFO_free BORINGSSL_PREFIX %+ _PKCS8_PRIV_KEY_INFO_free %xdefine PKCS8_PRIV_KEY_INFO_new BORINGSSL_PREFIX %+ _PKCS8_PRIV_KEY_INFO_new %xdefine PKCS8_decrypt BORINGSSL_PREFIX %+ _PKCS8_decrypt %xdefine PKCS8_encrypt BORINGSSL_PREFIX %+ _PKCS8_encrypt %xdefine PKCS8_marshal_encrypted_private_key BORINGSSL_PREFIX %+ _PKCS8_marshal_encrypted_private_key %xdefine PKCS8_parse_encrypted_private_key BORINGSSL_PREFIX %+ _PKCS8_parse_encrypted_private_key %xdefine POLICYINFO_free BORINGSSL_PREFIX %+ _POLICYINFO_free %xdefine POLICYINFO_it BORINGSSL_PREFIX %+ _POLICYINFO_it %xdefine POLICYINFO_new BORINGSSL_PREFIX %+ _POLICYINFO_new %xdefine POLICYQUALINFO_free BORINGSSL_PREFIX %+ _POLICYQUALINFO_free %xdefine POLICYQUALINFO_it BORINGSSL_PREFIX %+ _POLICYQUALINFO_it %xdefine POLICYQUALINFO_new BORINGSSL_PREFIX %+ _POLICYQUALINFO_new %xdefine POLICY_CONSTRAINTS_free BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_free %xdefine POLICY_CONSTRAINTS_it BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_it %xdefine POLICY_CONSTRAINTS_new BORINGSSL_PREFIX %+ _POLICY_CONSTRAINTS_new %xdefine POLICY_MAPPINGS_it BORINGSSL_PREFIX %+ _POLICY_MAPPINGS_it %xdefine POLICY_MAPPING_free BORINGSSL_PREFIX %+ _POLICY_MAPPING_free %xdefine POLICY_MAPPING_new BORINGSSL_PREFIX %+ _POLICY_MAPPING_new %xdefine RAND_OpenSSL BORINGSSL_PREFIX %+ _RAND_OpenSSL %xdefine RAND_SSLeay BORINGSSL_PREFIX %+ _RAND_SSLeay %xdefine RAND_add BORINGSSL_PREFIX %+ _RAND_add %xdefine RAND_bytes BORINGSSL_PREFIX %+ _RAND_bytes %xdefine RAND_cleanup BORINGSSL_PREFIX %+ _RAND_cleanup %xdefine RAND_disable_fork_unsafe_buffering BORINGSSL_PREFIX %+ _RAND_disable_fork_unsafe_buffering %xdefine RAND_egd BORINGSSL_PREFIX %+ _RAND_egd %xdefine RAND_enable_fork_unsafe_buffering BORINGSSL_PREFIX %+ _RAND_enable_fork_unsafe_buffering %xdefine RAND_file_name BORINGSSL_PREFIX %+ _RAND_file_name %xdefine RAND_get_rand_method BORINGSSL_PREFIX %+ _RAND_get_rand_method %xdefine RAND_get_system_entropy_for_custom_prng BORINGSSL_PREFIX %+ _RAND_get_system_entropy_for_custom_prng %xdefine RAND_load_file BORINGSSL_PREFIX %+ _RAND_load_file %xdefine RAND_poll BORINGSSL_PREFIX %+ _RAND_poll %xdefine RAND_pseudo_bytes BORINGSSL_PREFIX %+ _RAND_pseudo_bytes %xdefine RAND_seed BORINGSSL_PREFIX %+ _RAND_seed %xdefine RAND_set_rand_method BORINGSSL_PREFIX %+ _RAND_set_rand_method %xdefine RAND_status BORINGSSL_PREFIX %+ _RAND_status %xdefine RC4 BORINGSSL_PREFIX %+ _RC4 %xdefine RC4_set_key BORINGSSL_PREFIX %+ _RC4_set_key %xdefine RSAPrivateKey_dup BORINGSSL_PREFIX %+ _RSAPrivateKey_dup %xdefine RSAPublicKey_dup BORINGSSL_PREFIX %+ _RSAPublicKey_dup %xdefine RSAZ_1024_mod_exp_avx2 BORINGSSL_PREFIX %+ _RSAZ_1024_mod_exp_avx2 %xdefine RSA_PSS_PARAMS_free BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_free %xdefine RSA_PSS_PARAMS_it BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_it %xdefine RSA_PSS_PARAMS_new BORINGSSL_PREFIX %+ _RSA_PSS_PARAMS_new %xdefine RSA_add_pkcs1_prefix BORINGSSL_PREFIX %+ _RSA_add_pkcs1_prefix %xdefine RSA_bits BORINGSSL_PREFIX %+ _RSA_bits %xdefine RSA_blinding_off BORINGSSL_PREFIX %+ _RSA_blinding_off %xdefine RSA_blinding_on BORINGSSL_PREFIX %+ _RSA_blinding_on %xdefine RSA_check_fips BORINGSSL_PREFIX %+ _RSA_check_fips %xdefine RSA_check_key BORINGSSL_PREFIX %+ _RSA_check_key %xdefine RSA_decrypt BORINGSSL_PREFIX %+ _RSA_decrypt %xdefine RSA_default_method BORINGSSL_PREFIX %+ _RSA_default_method %xdefine RSA_encrypt BORINGSSL_PREFIX %+ _RSA_encrypt %xdefine RSA_flags BORINGSSL_PREFIX %+ _RSA_flags %xdefine RSA_free BORINGSSL_PREFIX %+ _RSA_free %xdefine RSA_generate_key_ex BORINGSSL_PREFIX %+ _RSA_generate_key_ex %xdefine RSA_generate_key_fips BORINGSSL_PREFIX %+ _RSA_generate_key_fips %xdefine RSA_get0_crt_params BORINGSSL_PREFIX %+ _RSA_get0_crt_params %xdefine RSA_get0_d BORINGSSL_PREFIX %+ _RSA_get0_d %xdefine RSA_get0_dmp1 BORINGSSL_PREFIX %+ _RSA_get0_dmp1 %xdefine RSA_get0_dmq1 BORINGSSL_PREFIX %+ _RSA_get0_dmq1 %xdefine RSA_get0_e BORINGSSL_PREFIX %+ _RSA_get0_e %xdefine RSA_get0_factors BORINGSSL_PREFIX %+ _RSA_get0_factors %xdefine RSA_get0_iqmp BORINGSSL_PREFIX %+ _RSA_get0_iqmp %xdefine RSA_get0_key BORINGSSL_PREFIX %+ _RSA_get0_key %xdefine RSA_get0_n BORINGSSL_PREFIX %+ _RSA_get0_n %xdefine RSA_get0_p BORINGSSL_PREFIX %+ _RSA_get0_p %xdefine RSA_get0_pss_params BORINGSSL_PREFIX %+ _RSA_get0_pss_params %xdefine RSA_get0_q BORINGSSL_PREFIX %+ _RSA_get0_q %xdefine RSA_get_ex_data BORINGSSL_PREFIX %+ _RSA_get_ex_data %xdefine RSA_get_ex_new_index BORINGSSL_PREFIX %+ _RSA_get_ex_new_index %xdefine RSA_is_opaque BORINGSSL_PREFIX %+ _RSA_is_opaque %xdefine RSA_marshal_private_key BORINGSSL_PREFIX %+ _RSA_marshal_private_key %xdefine RSA_marshal_public_key BORINGSSL_PREFIX %+ _RSA_marshal_public_key %xdefine RSA_new BORINGSSL_PREFIX %+ _RSA_new %xdefine RSA_new_method BORINGSSL_PREFIX %+ _RSA_new_method %xdefine RSA_new_method_no_e BORINGSSL_PREFIX %+ _RSA_new_method_no_e %xdefine RSA_new_private_key BORINGSSL_PREFIX %+ _RSA_new_private_key %xdefine RSA_new_private_key_large_e BORINGSSL_PREFIX %+ _RSA_new_private_key_large_e %xdefine RSA_new_private_key_no_crt BORINGSSL_PREFIX %+ _RSA_new_private_key_no_crt %xdefine RSA_new_private_key_no_e BORINGSSL_PREFIX %+ _RSA_new_private_key_no_e %xdefine RSA_new_public_key BORINGSSL_PREFIX %+ _RSA_new_public_key %xdefine RSA_new_public_key_large_e BORINGSSL_PREFIX %+ _RSA_new_public_key_large_e %xdefine RSA_padding_add_PKCS1_OAEP_mgf1 BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_OAEP_mgf1 %xdefine RSA_padding_add_PKCS1_PSS_mgf1 BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_PSS_mgf1 %xdefine RSA_padding_add_PKCS1_type_1 BORINGSSL_PREFIX %+ _RSA_padding_add_PKCS1_type_1 %xdefine RSA_padding_add_none BORINGSSL_PREFIX %+ _RSA_padding_add_none %xdefine RSA_padding_check_PKCS1_OAEP_mgf1 BORINGSSL_PREFIX %+ _RSA_padding_check_PKCS1_OAEP_mgf1 %xdefine RSA_padding_check_PKCS1_type_1 BORINGSSL_PREFIX %+ _RSA_padding_check_PKCS1_type_1 %xdefine RSA_parse_private_key BORINGSSL_PREFIX %+ _RSA_parse_private_key %xdefine RSA_parse_public_key BORINGSSL_PREFIX %+ _RSA_parse_public_key %xdefine RSA_print BORINGSSL_PREFIX %+ _RSA_print %xdefine RSA_private_decrypt BORINGSSL_PREFIX %+ _RSA_private_decrypt %xdefine RSA_private_encrypt BORINGSSL_PREFIX %+ _RSA_private_encrypt %xdefine RSA_private_key_from_bytes BORINGSSL_PREFIX %+ _RSA_private_key_from_bytes %xdefine RSA_private_key_to_bytes BORINGSSL_PREFIX %+ _RSA_private_key_to_bytes %xdefine RSA_public_decrypt BORINGSSL_PREFIX %+ _RSA_public_decrypt %xdefine RSA_public_encrypt BORINGSSL_PREFIX %+ _RSA_public_encrypt %xdefine RSA_public_key_from_bytes BORINGSSL_PREFIX %+ _RSA_public_key_from_bytes %xdefine RSA_public_key_to_bytes BORINGSSL_PREFIX %+ _RSA_public_key_to_bytes %xdefine RSA_set0_crt_params BORINGSSL_PREFIX %+ _RSA_set0_crt_params %xdefine RSA_set0_factors BORINGSSL_PREFIX %+ _RSA_set0_factors %xdefine RSA_set0_key BORINGSSL_PREFIX %+ _RSA_set0_key %xdefine RSA_set_ex_data BORINGSSL_PREFIX %+ _RSA_set_ex_data %xdefine RSA_sign BORINGSSL_PREFIX %+ _RSA_sign %xdefine RSA_sign_pss_mgf1 BORINGSSL_PREFIX %+ _RSA_sign_pss_mgf1 %xdefine RSA_sign_raw BORINGSSL_PREFIX %+ _RSA_sign_raw %xdefine RSA_size BORINGSSL_PREFIX %+ _RSA_size %xdefine RSA_test_flags BORINGSSL_PREFIX %+ _RSA_test_flags %xdefine RSA_up_ref BORINGSSL_PREFIX %+ _RSA_up_ref %xdefine RSA_verify BORINGSSL_PREFIX %+ _RSA_verify %xdefine RSA_verify_PKCS1_PSS_mgf1 BORINGSSL_PREFIX %+ _RSA_verify_PKCS1_PSS_mgf1 %xdefine RSA_verify_pss_mgf1 BORINGSSL_PREFIX %+ _RSA_verify_pss_mgf1 %xdefine RSA_verify_raw BORINGSSL_PREFIX %+ _RSA_verify_raw %xdefine SHA1 BORINGSSL_PREFIX %+ _SHA1 %xdefine SHA1_Final BORINGSSL_PREFIX %+ _SHA1_Final %xdefine SHA1_Init BORINGSSL_PREFIX %+ _SHA1_Init %xdefine SHA1_Transform BORINGSSL_PREFIX %+ _SHA1_Transform %xdefine SHA1_Update BORINGSSL_PREFIX %+ _SHA1_Update %xdefine SHA224 BORINGSSL_PREFIX %+ _SHA224 %xdefine SHA224_Final BORINGSSL_PREFIX %+ _SHA224_Final %xdefine SHA224_Init BORINGSSL_PREFIX %+ _SHA224_Init %xdefine SHA224_Update BORINGSSL_PREFIX %+ _SHA224_Update %xdefine SHA256 BORINGSSL_PREFIX %+ _SHA256 %xdefine SHA256_Final BORINGSSL_PREFIX %+ _SHA256_Final %xdefine SHA256_Init BORINGSSL_PREFIX %+ _SHA256_Init %xdefine SHA256_Transform BORINGSSL_PREFIX %+ _SHA256_Transform %xdefine SHA256_TransformBlocks BORINGSSL_PREFIX %+ _SHA256_TransformBlocks %xdefine SHA256_Update BORINGSSL_PREFIX %+ _SHA256_Update %xdefine SHA384 BORINGSSL_PREFIX %+ _SHA384 %xdefine SHA384_Final BORINGSSL_PREFIX %+ _SHA384_Final %xdefine SHA384_Init BORINGSSL_PREFIX %+ _SHA384_Init %xdefine SHA384_Update BORINGSSL_PREFIX %+ _SHA384_Update %xdefine SHA512 BORINGSSL_PREFIX %+ _SHA512 %xdefine SHA512_256 BORINGSSL_PREFIX %+ _SHA512_256 %xdefine SHA512_256_Final BORINGSSL_PREFIX %+ _SHA512_256_Final %xdefine SHA512_256_Init BORINGSSL_PREFIX %+ _SHA512_256_Init %xdefine SHA512_256_Update BORINGSSL_PREFIX %+ _SHA512_256_Update %xdefine SHA512_Final BORINGSSL_PREFIX %+ _SHA512_Final %xdefine SHA512_Init BORINGSSL_PREFIX %+ _SHA512_Init %xdefine SHA512_Transform BORINGSSL_PREFIX %+ _SHA512_Transform %xdefine SHA512_Update BORINGSSL_PREFIX %+ _SHA512_Update %xdefine SIPHASH_24 BORINGSSL_PREFIX %+ _SIPHASH_24 %xdefine SLHDSA_SHA2_128S_generate_key BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_generate_key %xdefine SLHDSA_SHA2_128S_prehash_sign BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_sign %xdefine SLHDSA_SHA2_128S_prehash_verify BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_verify %xdefine SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_warning_nonstandard_sign %xdefine SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_prehash_warning_nonstandard_verify %xdefine SLHDSA_SHA2_128S_public_from_private BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_public_from_private %xdefine SLHDSA_SHA2_128S_sign BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_sign %xdefine SLHDSA_SHA2_128S_verify BORINGSSL_PREFIX %+ _SLHDSA_SHA2_128S_verify %xdefine SPAKE2_CTX_free BORINGSSL_PREFIX %+ _SPAKE2_CTX_free %xdefine SPAKE2_CTX_new BORINGSSL_PREFIX %+ _SPAKE2_CTX_new %xdefine SPAKE2_generate_msg BORINGSSL_PREFIX %+ _SPAKE2_generate_msg %xdefine SPAKE2_process_msg BORINGSSL_PREFIX %+ _SPAKE2_process_msg %xdefine SSL_CIPHER_description BORINGSSL_PREFIX %+ _SSL_CIPHER_description %xdefine SSL_CIPHER_get_auth_nid BORINGSSL_PREFIX %+ _SSL_CIPHER_get_auth_nid %xdefine SSL_CIPHER_get_bits BORINGSSL_PREFIX %+ _SSL_CIPHER_get_bits %xdefine SSL_CIPHER_get_cipher_nid BORINGSSL_PREFIX %+ _SSL_CIPHER_get_cipher_nid %xdefine SSL_CIPHER_get_digest_nid BORINGSSL_PREFIX %+ _SSL_CIPHER_get_digest_nid %xdefine SSL_CIPHER_get_handshake_digest BORINGSSL_PREFIX %+ _SSL_CIPHER_get_handshake_digest %xdefine SSL_CIPHER_get_id BORINGSSL_PREFIX %+ _SSL_CIPHER_get_id %xdefine SSL_CIPHER_get_kx_name BORINGSSL_PREFIX %+ _SSL_CIPHER_get_kx_name %xdefine SSL_CIPHER_get_kx_nid BORINGSSL_PREFIX %+ _SSL_CIPHER_get_kx_nid %xdefine SSL_CIPHER_get_max_version BORINGSSL_PREFIX %+ _SSL_CIPHER_get_max_version %xdefine SSL_CIPHER_get_min_version BORINGSSL_PREFIX %+ _SSL_CIPHER_get_min_version %xdefine SSL_CIPHER_get_name BORINGSSL_PREFIX %+ _SSL_CIPHER_get_name %xdefine SSL_CIPHER_get_prf_nid BORINGSSL_PREFIX %+ _SSL_CIPHER_get_prf_nid %xdefine SSL_CIPHER_get_protocol_id BORINGSSL_PREFIX %+ _SSL_CIPHER_get_protocol_id %xdefine SSL_CIPHER_get_version BORINGSSL_PREFIX %+ _SSL_CIPHER_get_version %xdefine SSL_CIPHER_is_aead BORINGSSL_PREFIX %+ _SSL_CIPHER_is_aead %xdefine SSL_CIPHER_is_block_cipher BORINGSSL_PREFIX %+ _SSL_CIPHER_is_block_cipher %xdefine SSL_CIPHER_standard_name BORINGSSL_PREFIX %+ _SSL_CIPHER_standard_name %xdefine SSL_COMP_add_compression_method BORINGSSL_PREFIX %+ _SSL_COMP_add_compression_method %xdefine SSL_COMP_free_compression_methods BORINGSSL_PREFIX %+ _SSL_COMP_free_compression_methods %xdefine SSL_COMP_get0_name BORINGSSL_PREFIX %+ _SSL_COMP_get0_name %xdefine SSL_COMP_get_compression_methods BORINGSSL_PREFIX %+ _SSL_COMP_get_compression_methods %xdefine SSL_COMP_get_id BORINGSSL_PREFIX %+ _SSL_COMP_get_id %xdefine SSL_COMP_get_name BORINGSSL_PREFIX %+ _SSL_COMP_get_name %xdefine SSL_CREDENTIAL_clear_must_match_issuer BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_clear_must_match_issuer %xdefine SSL_CREDENTIAL_free BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_free %xdefine SSL_CREDENTIAL_get_ex_data BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_get_ex_data %xdefine SSL_CREDENTIAL_get_ex_new_index BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_get_ex_new_index %xdefine SSL_CREDENTIAL_must_match_issuer BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_must_match_issuer %xdefine SSL_CREDENTIAL_new_delegated BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_new_delegated %xdefine SSL_CREDENTIAL_new_x509 BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_new_x509 %xdefine SSL_CREDENTIAL_set1_cert_chain BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_cert_chain %xdefine SSL_CREDENTIAL_set1_delegated_credential BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_delegated_credential %xdefine SSL_CREDENTIAL_set1_ocsp_response BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_ocsp_response %xdefine SSL_CREDENTIAL_set1_private_key BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_private_key %xdefine SSL_CREDENTIAL_set1_signed_cert_timestamp_list BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_signed_cert_timestamp_list %xdefine SSL_CREDENTIAL_set1_signing_algorithm_prefs BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set1_signing_algorithm_prefs %xdefine SSL_CREDENTIAL_set_ex_data BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_ex_data %xdefine SSL_CREDENTIAL_set_must_match_issuer BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_must_match_issuer %xdefine SSL_CREDENTIAL_set_private_key_method BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_set_private_key_method %xdefine SSL_CREDENTIAL_up_ref BORINGSSL_PREFIX %+ _SSL_CREDENTIAL_up_ref %xdefine SSL_CTX_add0_chain_cert BORINGSSL_PREFIX %+ _SSL_CTX_add0_chain_cert %xdefine SSL_CTX_add1_chain_cert BORINGSSL_PREFIX %+ _SSL_CTX_add1_chain_cert %xdefine SSL_CTX_add1_credential BORINGSSL_PREFIX %+ _SSL_CTX_add1_credential %xdefine SSL_CTX_add_cert_compression_alg BORINGSSL_PREFIX %+ _SSL_CTX_add_cert_compression_alg %xdefine SSL_CTX_add_client_CA BORINGSSL_PREFIX %+ _SSL_CTX_add_client_CA %xdefine SSL_CTX_add_extra_chain_cert BORINGSSL_PREFIX %+ _SSL_CTX_add_extra_chain_cert %xdefine SSL_CTX_add_session BORINGSSL_PREFIX %+ _SSL_CTX_add_session %xdefine SSL_CTX_check_private_key BORINGSSL_PREFIX %+ _SSL_CTX_check_private_key %xdefine SSL_CTX_cipher_in_group BORINGSSL_PREFIX %+ _SSL_CTX_cipher_in_group %xdefine SSL_CTX_clear_chain_certs BORINGSSL_PREFIX %+ _SSL_CTX_clear_chain_certs %xdefine SSL_CTX_clear_extra_chain_certs BORINGSSL_PREFIX %+ _SSL_CTX_clear_extra_chain_certs %xdefine SSL_CTX_clear_mode BORINGSSL_PREFIX %+ _SSL_CTX_clear_mode %xdefine SSL_CTX_clear_options BORINGSSL_PREFIX %+ _SSL_CTX_clear_options %xdefine SSL_CTX_enable_ocsp_stapling BORINGSSL_PREFIX %+ _SSL_CTX_enable_ocsp_stapling %xdefine SSL_CTX_enable_signed_cert_timestamps BORINGSSL_PREFIX %+ _SSL_CTX_enable_signed_cert_timestamps %xdefine SSL_CTX_enable_tls_channel_id BORINGSSL_PREFIX %+ _SSL_CTX_enable_tls_channel_id %xdefine SSL_CTX_flush_sessions BORINGSSL_PREFIX %+ _SSL_CTX_flush_sessions %xdefine SSL_CTX_free BORINGSSL_PREFIX %+ _SSL_CTX_free %xdefine SSL_CTX_get0_certificate BORINGSSL_PREFIX %+ _SSL_CTX_get0_certificate %xdefine SSL_CTX_get0_chain BORINGSSL_PREFIX %+ _SSL_CTX_get0_chain %xdefine SSL_CTX_get0_chain_certs BORINGSSL_PREFIX %+ _SSL_CTX_get0_chain_certs %xdefine SSL_CTX_get0_param BORINGSSL_PREFIX %+ _SSL_CTX_get0_param %xdefine SSL_CTX_get0_privatekey BORINGSSL_PREFIX %+ _SSL_CTX_get0_privatekey %xdefine SSL_CTX_get_cert_store BORINGSSL_PREFIX %+ _SSL_CTX_get_cert_store %xdefine SSL_CTX_get_ciphers BORINGSSL_PREFIX %+ _SSL_CTX_get_ciphers %xdefine SSL_CTX_get_client_CA_list BORINGSSL_PREFIX %+ _SSL_CTX_get_client_CA_list %xdefine SSL_CTX_get_compliance_policy BORINGSSL_PREFIX %+ _SSL_CTX_get_compliance_policy %xdefine SSL_CTX_get_default_passwd_cb BORINGSSL_PREFIX %+ _SSL_CTX_get_default_passwd_cb %xdefine SSL_CTX_get_default_passwd_cb_userdata BORINGSSL_PREFIX %+ _SSL_CTX_get_default_passwd_cb_userdata %xdefine SSL_CTX_get_ex_data BORINGSSL_PREFIX %+ _SSL_CTX_get_ex_data %xdefine SSL_CTX_get_ex_new_index BORINGSSL_PREFIX %+ _SSL_CTX_get_ex_new_index %xdefine SSL_CTX_get_extra_chain_certs BORINGSSL_PREFIX %+ _SSL_CTX_get_extra_chain_certs %xdefine SSL_CTX_get_info_callback BORINGSSL_PREFIX %+ _SSL_CTX_get_info_callback %xdefine SSL_CTX_get_keylog_callback BORINGSSL_PREFIX %+ _SSL_CTX_get_keylog_callback %xdefine SSL_CTX_get_max_cert_list BORINGSSL_PREFIX %+ _SSL_CTX_get_max_cert_list %xdefine SSL_CTX_get_max_proto_version BORINGSSL_PREFIX %+ _SSL_CTX_get_max_proto_version %xdefine SSL_CTX_get_min_proto_version BORINGSSL_PREFIX %+ _SSL_CTX_get_min_proto_version %xdefine SSL_CTX_get_mode BORINGSSL_PREFIX %+ _SSL_CTX_get_mode %xdefine SSL_CTX_get_num_tickets BORINGSSL_PREFIX %+ _SSL_CTX_get_num_tickets %xdefine SSL_CTX_get_options BORINGSSL_PREFIX %+ _SSL_CTX_get_options %xdefine SSL_CTX_get_quiet_shutdown BORINGSSL_PREFIX %+ _SSL_CTX_get_quiet_shutdown %xdefine SSL_CTX_get_read_ahead BORINGSSL_PREFIX %+ _SSL_CTX_get_read_ahead %xdefine SSL_CTX_get_session_cache_mode BORINGSSL_PREFIX %+ _SSL_CTX_get_session_cache_mode %xdefine SSL_CTX_get_timeout BORINGSSL_PREFIX %+ _SSL_CTX_get_timeout %xdefine SSL_CTX_get_tlsext_ticket_keys BORINGSSL_PREFIX %+ _SSL_CTX_get_tlsext_ticket_keys %xdefine SSL_CTX_get_verify_callback BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_callback %xdefine SSL_CTX_get_verify_depth BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_depth %xdefine SSL_CTX_get_verify_mode BORINGSSL_PREFIX %+ _SSL_CTX_get_verify_mode %xdefine SSL_CTX_load_verify_locations BORINGSSL_PREFIX %+ _SSL_CTX_load_verify_locations %xdefine SSL_CTX_need_tmp_RSA BORINGSSL_PREFIX %+ _SSL_CTX_need_tmp_RSA %xdefine SSL_CTX_new BORINGSSL_PREFIX %+ _SSL_CTX_new %xdefine SSL_CTX_remove_session BORINGSSL_PREFIX %+ _SSL_CTX_remove_session %xdefine SSL_CTX_sess_accept BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept %xdefine SSL_CTX_sess_accept_good BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept_good %xdefine SSL_CTX_sess_accept_renegotiate BORINGSSL_PREFIX %+ _SSL_CTX_sess_accept_renegotiate %xdefine SSL_CTX_sess_cache_full BORINGSSL_PREFIX %+ _SSL_CTX_sess_cache_full %xdefine SSL_CTX_sess_cb_hits BORINGSSL_PREFIX %+ _SSL_CTX_sess_cb_hits %xdefine SSL_CTX_sess_connect BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect %xdefine SSL_CTX_sess_connect_good BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect_good %xdefine SSL_CTX_sess_connect_renegotiate BORINGSSL_PREFIX %+ _SSL_CTX_sess_connect_renegotiate %xdefine SSL_CTX_sess_get_cache_size BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_cache_size %xdefine SSL_CTX_sess_get_get_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_get_cb %xdefine SSL_CTX_sess_get_new_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_new_cb %xdefine SSL_CTX_sess_get_remove_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_get_remove_cb %xdefine SSL_CTX_sess_hits BORINGSSL_PREFIX %+ _SSL_CTX_sess_hits %xdefine SSL_CTX_sess_misses BORINGSSL_PREFIX %+ _SSL_CTX_sess_misses %xdefine SSL_CTX_sess_number BORINGSSL_PREFIX %+ _SSL_CTX_sess_number %xdefine SSL_CTX_sess_set_cache_size BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_cache_size %xdefine SSL_CTX_sess_set_get_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_get_cb %xdefine SSL_CTX_sess_set_new_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_new_cb %xdefine SSL_CTX_sess_set_remove_cb BORINGSSL_PREFIX %+ _SSL_CTX_sess_set_remove_cb %xdefine SSL_CTX_sess_timeouts BORINGSSL_PREFIX %+ _SSL_CTX_sess_timeouts %xdefine SSL_CTX_set0_buffer_pool BORINGSSL_PREFIX %+ _SSL_CTX_set0_buffer_pool %xdefine SSL_CTX_set0_chain BORINGSSL_PREFIX %+ _SSL_CTX_set0_chain %xdefine SSL_CTX_set0_client_CAs BORINGSSL_PREFIX %+ _SSL_CTX_set0_client_CAs %xdefine SSL_CTX_set0_verify_cert_store BORINGSSL_PREFIX %+ _SSL_CTX_set0_verify_cert_store %xdefine SSL_CTX_set1_chain BORINGSSL_PREFIX %+ _SSL_CTX_set1_chain %xdefine SSL_CTX_set1_curves BORINGSSL_PREFIX %+ _SSL_CTX_set1_curves %xdefine SSL_CTX_set1_curves_list BORINGSSL_PREFIX %+ _SSL_CTX_set1_curves_list %xdefine SSL_CTX_set1_ech_keys BORINGSSL_PREFIX %+ _SSL_CTX_set1_ech_keys %xdefine SSL_CTX_set1_group_ids BORINGSSL_PREFIX %+ _SSL_CTX_set1_group_ids %xdefine SSL_CTX_set1_groups BORINGSSL_PREFIX %+ _SSL_CTX_set1_groups %xdefine SSL_CTX_set1_groups_list BORINGSSL_PREFIX %+ _SSL_CTX_set1_groups_list %xdefine SSL_CTX_set1_param BORINGSSL_PREFIX %+ _SSL_CTX_set1_param %xdefine SSL_CTX_set1_sigalgs BORINGSSL_PREFIX %+ _SSL_CTX_set1_sigalgs %xdefine SSL_CTX_set1_sigalgs_list BORINGSSL_PREFIX %+ _SSL_CTX_set1_sigalgs_list %xdefine SSL_CTX_set1_tls_channel_id BORINGSSL_PREFIX %+ _SSL_CTX_set1_tls_channel_id %xdefine SSL_CTX_set1_verify_cert_store BORINGSSL_PREFIX %+ _SSL_CTX_set1_verify_cert_store %xdefine SSL_CTX_set_allow_unknown_alpn_protos BORINGSSL_PREFIX %+ _SSL_CTX_set_allow_unknown_alpn_protos %xdefine SSL_CTX_set_alpn_protos BORINGSSL_PREFIX %+ _SSL_CTX_set_alpn_protos %xdefine SSL_CTX_set_alpn_select_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_alpn_select_cb %xdefine SSL_CTX_set_cert_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_cb %xdefine SSL_CTX_set_cert_store BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_store %xdefine SSL_CTX_set_cert_verify_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_cert_verify_callback %xdefine SSL_CTX_set_chain_and_key BORINGSSL_PREFIX %+ _SSL_CTX_set_chain_and_key %xdefine SSL_CTX_set_cipher_list BORINGSSL_PREFIX %+ _SSL_CTX_set_cipher_list %xdefine SSL_CTX_set_client_CA_list BORINGSSL_PREFIX %+ _SSL_CTX_set_client_CA_list %xdefine SSL_CTX_set_client_cert_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_client_cert_cb %xdefine SSL_CTX_set_compliance_policy BORINGSSL_PREFIX %+ _SSL_CTX_set_compliance_policy %xdefine SSL_CTX_set_current_time_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_current_time_cb %xdefine SSL_CTX_set_custom_verify BORINGSSL_PREFIX %+ _SSL_CTX_set_custom_verify %xdefine SSL_CTX_set_default_passwd_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_default_passwd_cb %xdefine SSL_CTX_set_default_passwd_cb_userdata BORINGSSL_PREFIX %+ _SSL_CTX_set_default_passwd_cb_userdata %xdefine SSL_CTX_set_default_verify_paths BORINGSSL_PREFIX %+ _SSL_CTX_set_default_verify_paths %xdefine SSL_CTX_set_dos_protection_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_dos_protection_cb %xdefine SSL_CTX_set_early_data_enabled BORINGSSL_PREFIX %+ _SSL_CTX_set_early_data_enabled %xdefine SSL_CTX_set_ex_data BORINGSSL_PREFIX %+ _SSL_CTX_set_ex_data %xdefine SSL_CTX_set_false_start_allowed_without_alpn BORINGSSL_PREFIX %+ _SSL_CTX_set_false_start_allowed_without_alpn %xdefine SSL_CTX_set_grease_enabled BORINGSSL_PREFIX %+ _SSL_CTX_set_grease_enabled %xdefine SSL_CTX_set_info_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_info_callback %xdefine SSL_CTX_set_keylog_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_keylog_callback %xdefine SSL_CTX_set_max_cert_list BORINGSSL_PREFIX %+ _SSL_CTX_set_max_cert_list %xdefine SSL_CTX_set_max_proto_version BORINGSSL_PREFIX %+ _SSL_CTX_set_max_proto_version %xdefine SSL_CTX_set_max_send_fragment BORINGSSL_PREFIX %+ _SSL_CTX_set_max_send_fragment %xdefine SSL_CTX_set_min_proto_version BORINGSSL_PREFIX %+ _SSL_CTX_set_min_proto_version %xdefine SSL_CTX_set_mode BORINGSSL_PREFIX %+ _SSL_CTX_set_mode %xdefine SSL_CTX_set_msg_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_msg_callback %xdefine SSL_CTX_set_msg_callback_arg BORINGSSL_PREFIX %+ _SSL_CTX_set_msg_callback_arg %xdefine SSL_CTX_set_next_proto_select_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_next_proto_select_cb %xdefine SSL_CTX_set_next_protos_advertised_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_next_protos_advertised_cb %xdefine SSL_CTX_set_num_tickets BORINGSSL_PREFIX %+ _SSL_CTX_set_num_tickets %xdefine SSL_CTX_set_ocsp_response BORINGSSL_PREFIX %+ _SSL_CTX_set_ocsp_response %xdefine SSL_CTX_set_options BORINGSSL_PREFIX %+ _SSL_CTX_set_options %xdefine SSL_CTX_set_permute_extensions BORINGSSL_PREFIX %+ _SSL_CTX_set_permute_extensions %xdefine SSL_CTX_set_private_key_method BORINGSSL_PREFIX %+ _SSL_CTX_set_private_key_method %xdefine SSL_CTX_set_psk_client_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_psk_client_callback %xdefine SSL_CTX_set_psk_server_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_psk_server_callback %xdefine SSL_CTX_set_purpose BORINGSSL_PREFIX %+ _SSL_CTX_set_purpose %xdefine SSL_CTX_set_quic_method BORINGSSL_PREFIX %+ _SSL_CTX_set_quic_method %xdefine SSL_CTX_set_quiet_shutdown BORINGSSL_PREFIX %+ _SSL_CTX_set_quiet_shutdown %xdefine SSL_CTX_set_read_ahead BORINGSSL_PREFIX %+ _SSL_CTX_set_read_ahead %xdefine SSL_CTX_set_record_protocol_version BORINGSSL_PREFIX %+ _SSL_CTX_set_record_protocol_version %xdefine SSL_CTX_set_retain_only_sha256_of_client_certs BORINGSSL_PREFIX %+ _SSL_CTX_set_retain_only_sha256_of_client_certs %xdefine SSL_CTX_set_reverify_on_resume BORINGSSL_PREFIX %+ _SSL_CTX_set_reverify_on_resume %xdefine SSL_CTX_set_select_certificate_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_select_certificate_cb %xdefine SSL_CTX_set_session_cache_mode BORINGSSL_PREFIX %+ _SSL_CTX_set_session_cache_mode %xdefine SSL_CTX_set_session_id_context BORINGSSL_PREFIX %+ _SSL_CTX_set_session_id_context %xdefine SSL_CTX_set_session_psk_dhe_timeout BORINGSSL_PREFIX %+ _SSL_CTX_set_session_psk_dhe_timeout %xdefine SSL_CTX_set_signed_cert_timestamp_list BORINGSSL_PREFIX %+ _SSL_CTX_set_signed_cert_timestamp_list %xdefine SSL_CTX_set_signing_algorithm_prefs BORINGSSL_PREFIX %+ _SSL_CTX_set_signing_algorithm_prefs %xdefine SSL_CTX_set_srtp_profiles BORINGSSL_PREFIX %+ _SSL_CTX_set_srtp_profiles %xdefine SSL_CTX_set_strict_cipher_list BORINGSSL_PREFIX %+ _SSL_CTX_set_strict_cipher_list %xdefine SSL_CTX_set_ticket_aead_method BORINGSSL_PREFIX %+ _SSL_CTX_set_ticket_aead_method %xdefine SSL_CTX_set_timeout BORINGSSL_PREFIX %+ _SSL_CTX_set_timeout %xdefine SSL_CTX_set_tls_channel_id_enabled BORINGSSL_PREFIX %+ _SSL_CTX_set_tls_channel_id_enabled %xdefine SSL_CTX_set_tlsext_servername_arg BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_servername_arg %xdefine SSL_CTX_set_tlsext_servername_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_servername_callback %xdefine SSL_CTX_set_tlsext_status_arg BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_status_arg %xdefine SSL_CTX_set_tlsext_status_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_status_cb %xdefine SSL_CTX_set_tlsext_ticket_key_cb BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_ticket_key_cb %xdefine SSL_CTX_set_tlsext_ticket_keys BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_ticket_keys %xdefine SSL_CTX_set_tlsext_use_srtp BORINGSSL_PREFIX %+ _SSL_CTX_set_tlsext_use_srtp %xdefine SSL_CTX_set_tmp_dh BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_dh %xdefine SSL_CTX_set_tmp_dh_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_dh_callback %xdefine SSL_CTX_set_tmp_ecdh BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_ecdh %xdefine SSL_CTX_set_tmp_rsa BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_rsa %xdefine SSL_CTX_set_tmp_rsa_callback BORINGSSL_PREFIX %+ _SSL_CTX_set_tmp_rsa_callback %xdefine SSL_CTX_set_trust BORINGSSL_PREFIX %+ _SSL_CTX_set_trust %xdefine SSL_CTX_set_verify BORINGSSL_PREFIX %+ _SSL_CTX_set_verify %xdefine SSL_CTX_set_verify_algorithm_prefs BORINGSSL_PREFIX %+ _SSL_CTX_set_verify_algorithm_prefs %xdefine SSL_CTX_set_verify_depth BORINGSSL_PREFIX %+ _SSL_CTX_set_verify_depth %xdefine SSL_CTX_up_ref BORINGSSL_PREFIX %+ _SSL_CTX_up_ref %xdefine SSL_CTX_use_PrivateKey BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey %xdefine SSL_CTX_use_PrivateKey_ASN1 BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey_ASN1 %xdefine SSL_CTX_use_PrivateKey_file BORINGSSL_PREFIX %+ _SSL_CTX_use_PrivateKey_file %xdefine SSL_CTX_use_RSAPrivateKey BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey %xdefine SSL_CTX_use_RSAPrivateKey_ASN1 BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey_ASN1 %xdefine SSL_CTX_use_RSAPrivateKey_file BORINGSSL_PREFIX %+ _SSL_CTX_use_RSAPrivateKey_file %xdefine SSL_CTX_use_certificate BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate %xdefine SSL_CTX_use_certificate_ASN1 BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_ASN1 %xdefine SSL_CTX_use_certificate_chain_file BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_chain_file %xdefine SSL_CTX_use_certificate_file BORINGSSL_PREFIX %+ _SSL_CTX_use_certificate_file %xdefine SSL_CTX_use_psk_identity_hint BORINGSSL_PREFIX %+ _SSL_CTX_use_psk_identity_hint %xdefine SSL_ECH_KEYS_add BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_add %xdefine SSL_ECH_KEYS_free BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_free %xdefine SSL_ECH_KEYS_has_duplicate_config_id BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_has_duplicate_config_id %xdefine SSL_ECH_KEYS_marshal_retry_configs BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_marshal_retry_configs %xdefine SSL_ECH_KEYS_new BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_new %xdefine SSL_ECH_KEYS_up_ref BORINGSSL_PREFIX %+ _SSL_ECH_KEYS_up_ref %xdefine SSL_SESSION_copy_without_early_data BORINGSSL_PREFIX %+ _SSL_SESSION_copy_without_early_data %xdefine SSL_SESSION_early_data_capable BORINGSSL_PREFIX %+ _SSL_SESSION_early_data_capable %xdefine SSL_SESSION_free BORINGSSL_PREFIX %+ _SSL_SESSION_free %xdefine SSL_SESSION_from_bytes BORINGSSL_PREFIX %+ _SSL_SESSION_from_bytes %xdefine SSL_SESSION_get0_cipher BORINGSSL_PREFIX %+ _SSL_SESSION_get0_cipher %xdefine SSL_SESSION_get0_id_context BORINGSSL_PREFIX %+ _SSL_SESSION_get0_id_context %xdefine SSL_SESSION_get0_ocsp_response BORINGSSL_PREFIX %+ _SSL_SESSION_get0_ocsp_response %xdefine SSL_SESSION_get0_peer BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer %xdefine SSL_SESSION_get0_peer_certificates BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer_certificates %xdefine SSL_SESSION_get0_peer_sha256 BORINGSSL_PREFIX %+ _SSL_SESSION_get0_peer_sha256 %xdefine SSL_SESSION_get0_signed_cert_timestamp_list BORINGSSL_PREFIX %+ _SSL_SESSION_get0_signed_cert_timestamp_list %xdefine SSL_SESSION_get0_ticket BORINGSSL_PREFIX %+ _SSL_SESSION_get0_ticket %xdefine SSL_SESSION_get_ex_data BORINGSSL_PREFIX %+ _SSL_SESSION_get_ex_data %xdefine SSL_SESSION_get_ex_new_index BORINGSSL_PREFIX %+ _SSL_SESSION_get_ex_new_index %xdefine SSL_SESSION_get_id BORINGSSL_PREFIX %+ _SSL_SESSION_get_id %xdefine SSL_SESSION_get_master_key BORINGSSL_PREFIX %+ _SSL_SESSION_get_master_key %xdefine SSL_SESSION_get_protocol_version BORINGSSL_PREFIX %+ _SSL_SESSION_get_protocol_version %xdefine SSL_SESSION_get_ticket_lifetime_hint BORINGSSL_PREFIX %+ _SSL_SESSION_get_ticket_lifetime_hint %xdefine SSL_SESSION_get_time BORINGSSL_PREFIX %+ _SSL_SESSION_get_time %xdefine SSL_SESSION_get_timeout BORINGSSL_PREFIX %+ _SSL_SESSION_get_timeout %xdefine SSL_SESSION_get_version BORINGSSL_PREFIX %+ _SSL_SESSION_get_version %xdefine SSL_SESSION_has_peer_sha256 BORINGSSL_PREFIX %+ _SSL_SESSION_has_peer_sha256 %xdefine SSL_SESSION_has_ticket BORINGSSL_PREFIX %+ _SSL_SESSION_has_ticket %xdefine SSL_SESSION_is_resumable BORINGSSL_PREFIX %+ _SSL_SESSION_is_resumable %xdefine SSL_SESSION_new BORINGSSL_PREFIX %+ _SSL_SESSION_new %xdefine SSL_SESSION_set1_id BORINGSSL_PREFIX %+ _SSL_SESSION_set1_id %xdefine SSL_SESSION_set1_id_context BORINGSSL_PREFIX %+ _SSL_SESSION_set1_id_context %xdefine SSL_SESSION_set_ex_data BORINGSSL_PREFIX %+ _SSL_SESSION_set_ex_data %xdefine SSL_SESSION_set_protocol_version BORINGSSL_PREFIX %+ _SSL_SESSION_set_protocol_version %xdefine SSL_SESSION_set_ticket BORINGSSL_PREFIX %+ _SSL_SESSION_set_ticket %xdefine SSL_SESSION_set_time BORINGSSL_PREFIX %+ _SSL_SESSION_set_time %xdefine SSL_SESSION_set_timeout BORINGSSL_PREFIX %+ _SSL_SESSION_set_timeout %xdefine SSL_SESSION_should_be_single_use BORINGSSL_PREFIX %+ _SSL_SESSION_should_be_single_use %xdefine SSL_SESSION_to_bytes BORINGSSL_PREFIX %+ _SSL_SESSION_to_bytes %xdefine SSL_SESSION_to_bytes_for_ticket BORINGSSL_PREFIX %+ _SSL_SESSION_to_bytes_for_ticket %xdefine SSL_SESSION_up_ref BORINGSSL_PREFIX %+ _SSL_SESSION_up_ref %xdefine SSL_accept BORINGSSL_PREFIX %+ _SSL_accept %xdefine SSL_add0_chain_cert BORINGSSL_PREFIX %+ _SSL_add0_chain_cert %xdefine SSL_add1_chain_cert BORINGSSL_PREFIX %+ _SSL_add1_chain_cert %xdefine SSL_add1_credential BORINGSSL_PREFIX %+ _SSL_add1_credential %xdefine SSL_add_application_settings BORINGSSL_PREFIX %+ _SSL_add_application_settings %xdefine SSL_add_bio_cert_subjects_to_stack BORINGSSL_PREFIX %+ _SSL_add_bio_cert_subjects_to_stack %xdefine SSL_add_client_CA BORINGSSL_PREFIX %+ _SSL_add_client_CA %xdefine SSL_add_file_cert_subjects_to_stack BORINGSSL_PREFIX %+ _SSL_add_file_cert_subjects_to_stack %xdefine SSL_alert_desc_string BORINGSSL_PREFIX %+ _SSL_alert_desc_string %xdefine SSL_alert_desc_string_long BORINGSSL_PREFIX %+ _SSL_alert_desc_string_long %xdefine SSL_alert_from_verify_result BORINGSSL_PREFIX %+ _SSL_alert_from_verify_result %xdefine SSL_alert_type_string BORINGSSL_PREFIX %+ _SSL_alert_type_string %xdefine SSL_alert_type_string_long BORINGSSL_PREFIX %+ _SSL_alert_type_string_long %xdefine SSL_cache_hit BORINGSSL_PREFIX %+ _SSL_cache_hit %xdefine SSL_can_release_private_key BORINGSSL_PREFIX %+ _SSL_can_release_private_key %xdefine SSL_certs_clear BORINGSSL_PREFIX %+ _SSL_certs_clear %xdefine SSL_check_private_key BORINGSSL_PREFIX %+ _SSL_check_private_key %xdefine SSL_clear BORINGSSL_PREFIX %+ _SSL_clear %xdefine SSL_clear_chain_certs BORINGSSL_PREFIX %+ _SSL_clear_chain_certs %xdefine SSL_clear_mode BORINGSSL_PREFIX %+ _SSL_clear_mode %xdefine SSL_clear_options BORINGSSL_PREFIX %+ _SSL_clear_options %xdefine SSL_connect BORINGSSL_PREFIX %+ _SSL_connect %xdefine SSL_cutthrough_complete BORINGSSL_PREFIX %+ _SSL_cutthrough_complete %xdefine SSL_do_handshake BORINGSSL_PREFIX %+ _SSL_do_handshake %xdefine SSL_dup_CA_list BORINGSSL_PREFIX %+ _SSL_dup_CA_list %xdefine SSL_early_callback_ctx_extension_get BORINGSSL_PREFIX %+ _SSL_early_callback_ctx_extension_get %xdefine SSL_early_data_accepted BORINGSSL_PREFIX %+ _SSL_early_data_accepted %xdefine SSL_early_data_reason_string BORINGSSL_PREFIX %+ _SSL_early_data_reason_string %xdefine SSL_ech_accepted BORINGSSL_PREFIX %+ _SSL_ech_accepted %xdefine SSL_enable_ocsp_stapling BORINGSSL_PREFIX %+ _SSL_enable_ocsp_stapling %xdefine SSL_enable_signed_cert_timestamps BORINGSSL_PREFIX %+ _SSL_enable_signed_cert_timestamps %xdefine SSL_enable_tls_channel_id BORINGSSL_PREFIX %+ _SSL_enable_tls_channel_id %xdefine SSL_error_description BORINGSSL_PREFIX %+ _SSL_error_description %xdefine SSL_export_keying_material BORINGSSL_PREFIX %+ _SSL_export_keying_material %xdefine SSL_free BORINGSSL_PREFIX %+ _SSL_free %xdefine SSL_generate_key_block BORINGSSL_PREFIX %+ _SSL_generate_key_block %xdefine SSL_get0_alpn_selected BORINGSSL_PREFIX %+ _SSL_get0_alpn_selected %xdefine SSL_get0_certificate_types BORINGSSL_PREFIX %+ _SSL_get0_certificate_types %xdefine SSL_get0_chain BORINGSSL_PREFIX %+ _SSL_get0_chain %xdefine SSL_get0_chain_certs BORINGSSL_PREFIX %+ _SSL_get0_chain_certs %xdefine SSL_get0_ech_name_override BORINGSSL_PREFIX %+ _SSL_get0_ech_name_override %xdefine SSL_get0_ech_retry_configs BORINGSSL_PREFIX %+ _SSL_get0_ech_retry_configs %xdefine SSL_get0_next_proto_negotiated BORINGSSL_PREFIX %+ _SSL_get0_next_proto_negotiated %xdefine SSL_get0_ocsp_response BORINGSSL_PREFIX %+ _SSL_get0_ocsp_response %xdefine SSL_get0_param BORINGSSL_PREFIX %+ _SSL_get0_param %xdefine SSL_get0_peer_application_settings BORINGSSL_PREFIX %+ _SSL_get0_peer_application_settings %xdefine SSL_get0_peer_certificates BORINGSSL_PREFIX %+ _SSL_get0_peer_certificates %xdefine SSL_get0_peer_delegation_algorithms BORINGSSL_PREFIX %+ _SSL_get0_peer_delegation_algorithms %xdefine SSL_get0_peer_verify_algorithms BORINGSSL_PREFIX %+ _SSL_get0_peer_verify_algorithms %xdefine SSL_get0_selected_credential BORINGSSL_PREFIX %+ _SSL_get0_selected_credential %xdefine SSL_get0_server_requested_CAs BORINGSSL_PREFIX %+ _SSL_get0_server_requested_CAs %xdefine SSL_get0_session_id_context BORINGSSL_PREFIX %+ _SSL_get0_session_id_context %xdefine SSL_get0_signed_cert_timestamp_list BORINGSSL_PREFIX %+ _SSL_get0_signed_cert_timestamp_list %xdefine SSL_get1_session BORINGSSL_PREFIX %+ _SSL_get1_session %xdefine SSL_get_SSL_CTX BORINGSSL_PREFIX %+ _SSL_get_SSL_CTX %xdefine SSL_get_all_cipher_names BORINGSSL_PREFIX %+ _SSL_get_all_cipher_names %xdefine SSL_get_all_curve_names BORINGSSL_PREFIX %+ _SSL_get_all_curve_names %xdefine SSL_get_all_group_names BORINGSSL_PREFIX %+ _SSL_get_all_group_names %xdefine SSL_get_all_signature_algorithm_names BORINGSSL_PREFIX %+ _SSL_get_all_signature_algorithm_names %xdefine SSL_get_all_standard_cipher_names BORINGSSL_PREFIX %+ _SSL_get_all_standard_cipher_names %xdefine SSL_get_all_version_names BORINGSSL_PREFIX %+ _SSL_get_all_version_names %xdefine SSL_get_certificate BORINGSSL_PREFIX %+ _SSL_get_certificate %xdefine SSL_get_cipher_by_value BORINGSSL_PREFIX %+ _SSL_get_cipher_by_value %xdefine SSL_get_cipher_list BORINGSSL_PREFIX %+ _SSL_get_cipher_list %xdefine SSL_get_ciphers BORINGSSL_PREFIX %+ _SSL_get_ciphers %xdefine SSL_get_client_CA_list BORINGSSL_PREFIX %+ _SSL_get_client_CA_list %xdefine SSL_get_client_random BORINGSSL_PREFIX %+ _SSL_get_client_random %xdefine SSL_get_compliance_policy BORINGSSL_PREFIX %+ _SSL_get_compliance_policy %xdefine SSL_get_current_cipher BORINGSSL_PREFIX %+ _SSL_get_current_cipher %xdefine SSL_get_current_compression BORINGSSL_PREFIX %+ _SSL_get_current_compression %xdefine SSL_get_current_expansion BORINGSSL_PREFIX %+ _SSL_get_current_expansion %xdefine SSL_get_curve_id BORINGSSL_PREFIX %+ _SSL_get_curve_id %xdefine SSL_get_curve_name BORINGSSL_PREFIX %+ _SSL_get_curve_name %xdefine SSL_get_default_timeout BORINGSSL_PREFIX %+ _SSL_get_default_timeout %xdefine SSL_get_early_data_reason BORINGSSL_PREFIX %+ _SSL_get_early_data_reason %xdefine SSL_get_error BORINGSSL_PREFIX %+ _SSL_get_error %xdefine SSL_get_ex_data BORINGSSL_PREFIX %+ _SSL_get_ex_data %xdefine SSL_get_ex_data_X509_STORE_CTX_idx BORINGSSL_PREFIX %+ _SSL_get_ex_data_X509_STORE_CTX_idx %xdefine SSL_get_ex_new_index BORINGSSL_PREFIX %+ _SSL_get_ex_new_index %xdefine SSL_get_extms_support BORINGSSL_PREFIX %+ _SSL_get_extms_support %xdefine SSL_get_fd BORINGSSL_PREFIX %+ _SSL_get_fd %xdefine SSL_get_finished BORINGSSL_PREFIX %+ _SSL_get_finished %xdefine SSL_get_group_id BORINGSSL_PREFIX %+ _SSL_get_group_id %xdefine SSL_get_group_name BORINGSSL_PREFIX %+ _SSL_get_group_name %xdefine SSL_get_info_callback BORINGSSL_PREFIX %+ _SSL_get_info_callback %xdefine SSL_get_ivs BORINGSSL_PREFIX %+ _SSL_get_ivs %xdefine SSL_get_key_block_len BORINGSSL_PREFIX %+ _SSL_get_key_block_len %xdefine SSL_get_max_cert_list BORINGSSL_PREFIX %+ _SSL_get_max_cert_list %xdefine SSL_get_max_proto_version BORINGSSL_PREFIX %+ _SSL_get_max_proto_version %xdefine SSL_get_min_proto_version BORINGSSL_PREFIX %+ _SSL_get_min_proto_version %xdefine SSL_get_mode BORINGSSL_PREFIX %+ _SSL_get_mode %xdefine SSL_get_negotiated_group BORINGSSL_PREFIX %+ _SSL_get_negotiated_group %xdefine SSL_get_options BORINGSSL_PREFIX %+ _SSL_get_options %xdefine SSL_get_peer_cert_chain BORINGSSL_PREFIX %+ _SSL_get_peer_cert_chain %xdefine SSL_get_peer_certificate BORINGSSL_PREFIX %+ _SSL_get_peer_certificate %xdefine SSL_get_peer_finished BORINGSSL_PREFIX %+ _SSL_get_peer_finished %xdefine SSL_get_peer_full_cert_chain BORINGSSL_PREFIX %+ _SSL_get_peer_full_cert_chain %xdefine SSL_get_peer_quic_transport_params BORINGSSL_PREFIX %+ _SSL_get_peer_quic_transport_params %xdefine SSL_get_peer_signature_algorithm BORINGSSL_PREFIX %+ _SSL_get_peer_signature_algorithm %xdefine SSL_get_pending_cipher BORINGSSL_PREFIX %+ _SSL_get_pending_cipher %xdefine SSL_get_privatekey BORINGSSL_PREFIX %+ _SSL_get_privatekey %xdefine SSL_get_psk_identity BORINGSSL_PREFIX %+ _SSL_get_psk_identity %xdefine SSL_get_psk_identity_hint BORINGSSL_PREFIX %+ _SSL_get_psk_identity_hint %xdefine SSL_get_quiet_shutdown BORINGSSL_PREFIX %+ _SSL_get_quiet_shutdown %xdefine SSL_get_rbio BORINGSSL_PREFIX %+ _SSL_get_rbio %xdefine SSL_get_read_ahead BORINGSSL_PREFIX %+ _SSL_get_read_ahead %xdefine SSL_get_read_sequence BORINGSSL_PREFIX %+ _SSL_get_read_sequence %xdefine SSL_get_rfd BORINGSSL_PREFIX %+ _SSL_get_rfd %xdefine SSL_get_secure_renegotiation_support BORINGSSL_PREFIX %+ _SSL_get_secure_renegotiation_support %xdefine SSL_get_selected_srtp_profile BORINGSSL_PREFIX %+ _SSL_get_selected_srtp_profile %xdefine SSL_get_server_random BORINGSSL_PREFIX %+ _SSL_get_server_random %xdefine SSL_get_server_tmp_key BORINGSSL_PREFIX %+ _SSL_get_server_tmp_key %xdefine SSL_get_servername BORINGSSL_PREFIX %+ _SSL_get_servername %xdefine SSL_get_servername_type BORINGSSL_PREFIX %+ _SSL_get_servername_type %xdefine SSL_get_session BORINGSSL_PREFIX %+ _SSL_get_session %xdefine SSL_get_shared_ciphers BORINGSSL_PREFIX %+ _SSL_get_shared_ciphers %xdefine SSL_get_shared_sigalgs BORINGSSL_PREFIX %+ _SSL_get_shared_sigalgs %xdefine SSL_get_shutdown BORINGSSL_PREFIX %+ _SSL_get_shutdown %xdefine SSL_get_signature_algorithm_digest BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_digest %xdefine SSL_get_signature_algorithm_key_type BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_key_type %xdefine SSL_get_signature_algorithm_name BORINGSSL_PREFIX %+ _SSL_get_signature_algorithm_name %xdefine SSL_get_srtp_profiles BORINGSSL_PREFIX %+ _SSL_get_srtp_profiles %xdefine SSL_get_ticket_age_skew BORINGSSL_PREFIX %+ _SSL_get_ticket_age_skew %xdefine SSL_get_tls_channel_id BORINGSSL_PREFIX %+ _SSL_get_tls_channel_id %xdefine SSL_get_tls_unique BORINGSSL_PREFIX %+ _SSL_get_tls_unique %xdefine SSL_get_tlsext_status_ocsp_resp BORINGSSL_PREFIX %+ _SSL_get_tlsext_status_ocsp_resp %xdefine SSL_get_tlsext_status_type BORINGSSL_PREFIX %+ _SSL_get_tlsext_status_type %xdefine SSL_get_verify_callback BORINGSSL_PREFIX %+ _SSL_get_verify_callback %xdefine SSL_get_verify_depth BORINGSSL_PREFIX %+ _SSL_get_verify_depth %xdefine SSL_get_verify_mode BORINGSSL_PREFIX %+ _SSL_get_verify_mode %xdefine SSL_get_verify_result BORINGSSL_PREFIX %+ _SSL_get_verify_result %xdefine SSL_get_version BORINGSSL_PREFIX %+ _SSL_get_version %xdefine SSL_get_wbio BORINGSSL_PREFIX %+ _SSL_get_wbio %xdefine SSL_get_wfd BORINGSSL_PREFIX %+ _SSL_get_wfd %xdefine SSL_get_write_sequence BORINGSSL_PREFIX %+ _SSL_get_write_sequence %xdefine SSL_has_application_settings BORINGSSL_PREFIX %+ _SSL_has_application_settings %xdefine SSL_has_pending BORINGSSL_PREFIX %+ _SSL_has_pending %xdefine SSL_in_early_data BORINGSSL_PREFIX %+ _SSL_in_early_data %xdefine SSL_in_false_start BORINGSSL_PREFIX %+ _SSL_in_false_start %xdefine SSL_in_init BORINGSSL_PREFIX %+ _SSL_in_init %xdefine SSL_is_dtls BORINGSSL_PREFIX %+ _SSL_is_dtls %xdefine SSL_is_init_finished BORINGSSL_PREFIX %+ _SSL_is_init_finished %xdefine SSL_is_quic BORINGSSL_PREFIX %+ _SSL_is_quic %xdefine SSL_is_server BORINGSSL_PREFIX %+ _SSL_is_server %xdefine SSL_is_signature_algorithm_rsa_pss BORINGSSL_PREFIX %+ _SSL_is_signature_algorithm_rsa_pss %xdefine SSL_key_update BORINGSSL_PREFIX %+ _SSL_key_update %xdefine SSL_library_init BORINGSSL_PREFIX %+ _SSL_library_init %xdefine SSL_load_client_CA_file BORINGSSL_PREFIX %+ _SSL_load_client_CA_file %xdefine SSL_load_error_strings BORINGSSL_PREFIX %+ _SSL_load_error_strings %xdefine SSL_magic_pending_session_ptr BORINGSSL_PREFIX %+ _SSL_magic_pending_session_ptr %xdefine SSL_marshal_ech_config BORINGSSL_PREFIX %+ _SSL_marshal_ech_config %xdefine SSL_max_seal_overhead BORINGSSL_PREFIX %+ _SSL_max_seal_overhead %xdefine SSL_need_tmp_RSA BORINGSSL_PREFIX %+ _SSL_need_tmp_RSA %xdefine SSL_new BORINGSSL_PREFIX %+ _SSL_new %xdefine SSL_num_renegotiations BORINGSSL_PREFIX %+ _SSL_num_renegotiations %xdefine SSL_peek BORINGSSL_PREFIX %+ _SSL_peek %xdefine SSL_pending BORINGSSL_PREFIX %+ _SSL_pending %xdefine SSL_process_quic_post_handshake BORINGSSL_PREFIX %+ _SSL_process_quic_post_handshake %xdefine SSL_process_tls13_new_session_ticket BORINGSSL_PREFIX %+ _SSL_process_tls13_new_session_ticket %xdefine SSL_provide_quic_data BORINGSSL_PREFIX %+ _SSL_provide_quic_data %xdefine SSL_quic_max_handshake_flight_len BORINGSSL_PREFIX %+ _SSL_quic_max_handshake_flight_len %xdefine SSL_quic_read_level BORINGSSL_PREFIX %+ _SSL_quic_read_level %xdefine SSL_quic_write_level BORINGSSL_PREFIX %+ _SSL_quic_write_level %xdefine SSL_read BORINGSSL_PREFIX %+ _SSL_read %xdefine SSL_renegotiate BORINGSSL_PREFIX %+ _SSL_renegotiate %xdefine SSL_renegotiate_pending BORINGSSL_PREFIX %+ _SSL_renegotiate_pending %xdefine SSL_request_handshake_hints BORINGSSL_PREFIX %+ _SSL_request_handshake_hints %xdefine SSL_reset_early_data_reject BORINGSSL_PREFIX %+ _SSL_reset_early_data_reject %xdefine SSL_select_next_proto BORINGSSL_PREFIX %+ _SSL_select_next_proto %xdefine SSL_send_fatal_alert BORINGSSL_PREFIX %+ _SSL_send_fatal_alert %xdefine SSL_serialize_capabilities BORINGSSL_PREFIX %+ _SSL_serialize_capabilities %xdefine SSL_serialize_handshake_hints BORINGSSL_PREFIX %+ _SSL_serialize_handshake_hints %xdefine SSL_session_reused BORINGSSL_PREFIX %+ _SSL_session_reused %xdefine SSL_set0_CA_names BORINGSSL_PREFIX %+ _SSL_set0_CA_names %xdefine SSL_set0_chain BORINGSSL_PREFIX %+ _SSL_set0_chain %xdefine SSL_set0_client_CAs BORINGSSL_PREFIX %+ _SSL_set0_client_CAs %xdefine SSL_set0_rbio BORINGSSL_PREFIX %+ _SSL_set0_rbio %xdefine SSL_set0_verify_cert_store BORINGSSL_PREFIX %+ _SSL_set0_verify_cert_store %xdefine SSL_set0_wbio BORINGSSL_PREFIX %+ _SSL_set0_wbio %xdefine SSL_set1_chain BORINGSSL_PREFIX %+ _SSL_set1_chain %xdefine SSL_set1_curves BORINGSSL_PREFIX %+ _SSL_set1_curves %xdefine SSL_set1_curves_list BORINGSSL_PREFIX %+ _SSL_set1_curves_list %xdefine SSL_set1_ech_config_list BORINGSSL_PREFIX %+ _SSL_set1_ech_config_list %xdefine SSL_set1_group_ids BORINGSSL_PREFIX %+ _SSL_set1_group_ids %xdefine SSL_set1_groups BORINGSSL_PREFIX %+ _SSL_set1_groups %xdefine SSL_set1_groups_list BORINGSSL_PREFIX %+ _SSL_set1_groups_list %xdefine SSL_set1_host BORINGSSL_PREFIX %+ _SSL_set1_host %xdefine SSL_set1_param BORINGSSL_PREFIX %+ _SSL_set1_param %xdefine SSL_set1_sigalgs BORINGSSL_PREFIX %+ _SSL_set1_sigalgs %xdefine SSL_set1_sigalgs_list BORINGSSL_PREFIX %+ _SSL_set1_sigalgs_list %xdefine SSL_set1_tls_channel_id BORINGSSL_PREFIX %+ _SSL_set1_tls_channel_id %xdefine SSL_set1_verify_cert_store BORINGSSL_PREFIX %+ _SSL_set1_verify_cert_store %xdefine SSL_set_SSL_CTX BORINGSSL_PREFIX %+ _SSL_set_SSL_CTX %xdefine SSL_set_accept_state BORINGSSL_PREFIX %+ _SSL_set_accept_state %xdefine SSL_set_alpn_protos BORINGSSL_PREFIX %+ _SSL_set_alpn_protos %xdefine SSL_set_alps_use_new_codepoint BORINGSSL_PREFIX %+ _SSL_set_alps_use_new_codepoint %xdefine SSL_set_bio BORINGSSL_PREFIX %+ _SSL_set_bio %xdefine SSL_set_cert_cb BORINGSSL_PREFIX %+ _SSL_set_cert_cb %xdefine SSL_set_chain_and_key BORINGSSL_PREFIX %+ _SSL_set_chain_and_key %xdefine SSL_set_check_client_certificate_type BORINGSSL_PREFIX %+ _SSL_set_check_client_certificate_type %xdefine SSL_set_check_ecdsa_curve BORINGSSL_PREFIX %+ _SSL_set_check_ecdsa_curve %xdefine SSL_set_cipher_list BORINGSSL_PREFIX %+ _SSL_set_cipher_list %xdefine SSL_set_client_CA_list BORINGSSL_PREFIX %+ _SSL_set_client_CA_list %xdefine SSL_set_compliance_policy BORINGSSL_PREFIX %+ _SSL_set_compliance_policy %xdefine SSL_set_connect_state BORINGSSL_PREFIX %+ _SSL_set_connect_state %xdefine SSL_set_custom_verify BORINGSSL_PREFIX %+ _SSL_set_custom_verify %xdefine SSL_set_early_data_enabled BORINGSSL_PREFIX %+ _SSL_set_early_data_enabled %xdefine SSL_set_enable_ech_grease BORINGSSL_PREFIX %+ _SSL_set_enable_ech_grease %xdefine SSL_set_enforce_rsa_key_usage BORINGSSL_PREFIX %+ _SSL_set_enforce_rsa_key_usage %xdefine SSL_set_ex_data BORINGSSL_PREFIX %+ _SSL_set_ex_data %xdefine SSL_set_fd BORINGSSL_PREFIX %+ _SSL_set_fd %xdefine SSL_set_handshake_hints BORINGSSL_PREFIX %+ _SSL_set_handshake_hints %xdefine SSL_set_hostflags BORINGSSL_PREFIX %+ _SSL_set_hostflags %xdefine SSL_set_info_callback BORINGSSL_PREFIX %+ _SSL_set_info_callback %xdefine SSL_set_jdk11_workaround BORINGSSL_PREFIX %+ _SSL_set_jdk11_workaround %xdefine SSL_set_max_cert_list BORINGSSL_PREFIX %+ _SSL_set_max_cert_list %xdefine SSL_set_max_proto_version BORINGSSL_PREFIX %+ _SSL_set_max_proto_version %xdefine SSL_set_max_send_fragment BORINGSSL_PREFIX %+ _SSL_set_max_send_fragment %xdefine SSL_set_min_proto_version BORINGSSL_PREFIX %+ _SSL_set_min_proto_version %xdefine SSL_set_mode BORINGSSL_PREFIX %+ _SSL_set_mode %xdefine SSL_set_msg_callback BORINGSSL_PREFIX %+ _SSL_set_msg_callback %xdefine SSL_set_msg_callback_arg BORINGSSL_PREFIX %+ _SSL_set_msg_callback_arg %xdefine SSL_set_mtu BORINGSSL_PREFIX %+ _SSL_set_mtu %xdefine SSL_set_ocsp_response BORINGSSL_PREFIX %+ _SSL_set_ocsp_response %xdefine SSL_set_options BORINGSSL_PREFIX %+ _SSL_set_options %xdefine SSL_set_permute_extensions BORINGSSL_PREFIX %+ _SSL_set_permute_extensions %xdefine SSL_set_private_key_method BORINGSSL_PREFIX %+ _SSL_set_private_key_method %xdefine SSL_set_psk_client_callback BORINGSSL_PREFIX %+ _SSL_set_psk_client_callback %xdefine SSL_set_psk_server_callback BORINGSSL_PREFIX %+ _SSL_set_psk_server_callback %xdefine SSL_set_purpose BORINGSSL_PREFIX %+ _SSL_set_purpose %xdefine SSL_set_quic_early_data_context BORINGSSL_PREFIX %+ _SSL_set_quic_early_data_context %xdefine SSL_set_quic_method BORINGSSL_PREFIX %+ _SSL_set_quic_method %xdefine SSL_set_quic_transport_params BORINGSSL_PREFIX %+ _SSL_set_quic_transport_params %xdefine SSL_set_quic_use_legacy_codepoint BORINGSSL_PREFIX %+ _SSL_set_quic_use_legacy_codepoint %xdefine SSL_set_quiet_shutdown BORINGSSL_PREFIX %+ _SSL_set_quiet_shutdown %xdefine SSL_set_read_ahead BORINGSSL_PREFIX %+ _SSL_set_read_ahead %xdefine SSL_set_renegotiate_mode BORINGSSL_PREFIX %+ _SSL_set_renegotiate_mode %xdefine SSL_set_retain_only_sha256_of_client_certs BORINGSSL_PREFIX %+ _SSL_set_retain_only_sha256_of_client_certs %xdefine SSL_set_rfd BORINGSSL_PREFIX %+ _SSL_set_rfd %xdefine SSL_set_session BORINGSSL_PREFIX %+ _SSL_set_session %xdefine SSL_set_session_id_context BORINGSSL_PREFIX %+ _SSL_set_session_id_context %xdefine SSL_set_shed_handshake_config BORINGSSL_PREFIX %+ _SSL_set_shed_handshake_config %xdefine SSL_set_shutdown BORINGSSL_PREFIX %+ _SSL_set_shutdown %xdefine SSL_set_signed_cert_timestamp_list BORINGSSL_PREFIX %+ _SSL_set_signed_cert_timestamp_list %xdefine SSL_set_signing_algorithm_prefs BORINGSSL_PREFIX %+ _SSL_set_signing_algorithm_prefs %xdefine SSL_set_srtp_profiles BORINGSSL_PREFIX %+ _SSL_set_srtp_profiles %xdefine SSL_set_state BORINGSSL_PREFIX %+ _SSL_set_state %xdefine SSL_set_strict_cipher_list BORINGSSL_PREFIX %+ _SSL_set_strict_cipher_list %xdefine SSL_set_tls_channel_id_enabled BORINGSSL_PREFIX %+ _SSL_set_tls_channel_id_enabled %xdefine SSL_set_tlsext_host_name BORINGSSL_PREFIX %+ _SSL_set_tlsext_host_name %xdefine SSL_set_tlsext_status_ocsp_resp BORINGSSL_PREFIX %+ _SSL_set_tlsext_status_ocsp_resp %xdefine SSL_set_tlsext_status_type BORINGSSL_PREFIX %+ _SSL_set_tlsext_status_type %xdefine SSL_set_tlsext_use_srtp BORINGSSL_PREFIX %+ _SSL_set_tlsext_use_srtp %xdefine SSL_set_tmp_dh BORINGSSL_PREFIX %+ _SSL_set_tmp_dh %xdefine SSL_set_tmp_dh_callback BORINGSSL_PREFIX %+ _SSL_set_tmp_dh_callback %xdefine SSL_set_tmp_ecdh BORINGSSL_PREFIX %+ _SSL_set_tmp_ecdh %xdefine SSL_set_tmp_rsa BORINGSSL_PREFIX %+ _SSL_set_tmp_rsa %xdefine SSL_set_tmp_rsa_callback BORINGSSL_PREFIX %+ _SSL_set_tmp_rsa_callback %xdefine SSL_set_trust BORINGSSL_PREFIX %+ _SSL_set_trust %xdefine SSL_set_verify BORINGSSL_PREFIX %+ _SSL_set_verify %xdefine SSL_set_verify_algorithm_prefs BORINGSSL_PREFIX %+ _SSL_set_verify_algorithm_prefs %xdefine SSL_set_verify_depth BORINGSSL_PREFIX %+ _SSL_set_verify_depth %xdefine SSL_set_wfd BORINGSSL_PREFIX %+ _SSL_set_wfd %xdefine SSL_shutdown BORINGSSL_PREFIX %+ _SSL_shutdown %xdefine SSL_state BORINGSSL_PREFIX %+ _SSL_state %xdefine SSL_state_string BORINGSSL_PREFIX %+ _SSL_state_string %xdefine SSL_state_string_long BORINGSSL_PREFIX %+ _SSL_state_string_long %xdefine SSL_total_renegotiations BORINGSSL_PREFIX %+ _SSL_total_renegotiations %xdefine SSL_use_PrivateKey BORINGSSL_PREFIX %+ _SSL_use_PrivateKey %xdefine SSL_use_PrivateKey_ASN1 BORINGSSL_PREFIX %+ _SSL_use_PrivateKey_ASN1 %xdefine SSL_use_PrivateKey_file BORINGSSL_PREFIX %+ _SSL_use_PrivateKey_file %xdefine SSL_use_RSAPrivateKey BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey %xdefine SSL_use_RSAPrivateKey_ASN1 BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey_ASN1 %xdefine SSL_use_RSAPrivateKey_file BORINGSSL_PREFIX %+ _SSL_use_RSAPrivateKey_file %xdefine SSL_use_certificate BORINGSSL_PREFIX %+ _SSL_use_certificate %xdefine SSL_use_certificate_ASN1 BORINGSSL_PREFIX %+ _SSL_use_certificate_ASN1 %xdefine SSL_use_certificate_file BORINGSSL_PREFIX %+ _SSL_use_certificate_file %xdefine SSL_use_psk_identity_hint BORINGSSL_PREFIX %+ _SSL_use_psk_identity_hint %xdefine SSL_used_hello_retry_request BORINGSSL_PREFIX %+ _SSL_used_hello_retry_request %xdefine SSL_version BORINGSSL_PREFIX %+ _SSL_version %xdefine SSL_want BORINGSSL_PREFIX %+ _SSL_want %xdefine SSL_was_key_usage_invalid BORINGSSL_PREFIX %+ _SSL_was_key_usage_invalid %xdefine SSL_write BORINGSSL_PREFIX %+ _SSL_write %xdefine SSLeay BORINGSSL_PREFIX %+ _SSLeay %xdefine SSLeay_version BORINGSSL_PREFIX %+ _SSLeay_version %xdefine SSLv23_client_method BORINGSSL_PREFIX %+ _SSLv23_client_method %xdefine SSLv23_method BORINGSSL_PREFIX %+ _SSLv23_method %xdefine SSLv23_server_method BORINGSSL_PREFIX %+ _SSLv23_server_method %xdefine TLS_client_method BORINGSSL_PREFIX %+ _TLS_client_method %xdefine TLS_method BORINGSSL_PREFIX %+ _TLS_method %xdefine TLS_server_method BORINGSSL_PREFIX %+ _TLS_server_method %xdefine TLS_with_buffers_method BORINGSSL_PREFIX %+ _TLS_with_buffers_method %xdefine TLSv1_1_client_method BORINGSSL_PREFIX %+ _TLSv1_1_client_method %xdefine TLSv1_1_method BORINGSSL_PREFIX %+ _TLSv1_1_method %xdefine TLSv1_1_server_method BORINGSSL_PREFIX %+ _TLSv1_1_server_method %xdefine TLSv1_2_client_method BORINGSSL_PREFIX %+ _TLSv1_2_client_method %xdefine TLSv1_2_method BORINGSSL_PREFIX %+ _TLSv1_2_method %xdefine TLSv1_2_server_method BORINGSSL_PREFIX %+ _TLSv1_2_server_method %xdefine TLSv1_client_method BORINGSSL_PREFIX %+ _TLSv1_client_method %xdefine TLSv1_method BORINGSSL_PREFIX %+ _TLSv1_method %xdefine TLSv1_server_method BORINGSSL_PREFIX %+ _TLSv1_server_method %xdefine TRUST_TOKEN_CLIENT_add_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_add_key %xdefine TRUST_TOKEN_CLIENT_begin_issuance BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_issuance %xdefine TRUST_TOKEN_CLIENT_begin_issuance_over_message BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_issuance_over_message %xdefine TRUST_TOKEN_CLIENT_begin_redemption BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_begin_redemption %xdefine TRUST_TOKEN_CLIENT_finish_issuance BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_finish_issuance %xdefine TRUST_TOKEN_CLIENT_finish_redemption BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_finish_redemption %xdefine TRUST_TOKEN_CLIENT_free BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_free %xdefine TRUST_TOKEN_CLIENT_new BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_new %xdefine TRUST_TOKEN_CLIENT_set_srr_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_CLIENT_set_srr_key %xdefine TRUST_TOKEN_ISSUER_add_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_add_key %xdefine TRUST_TOKEN_ISSUER_free BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_free %xdefine TRUST_TOKEN_ISSUER_issue BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_issue %xdefine TRUST_TOKEN_ISSUER_new BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_new %xdefine TRUST_TOKEN_ISSUER_redeem BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_redeem %xdefine TRUST_TOKEN_ISSUER_redeem_over_message BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_redeem_over_message %xdefine TRUST_TOKEN_ISSUER_set_metadata_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_set_metadata_key %xdefine TRUST_TOKEN_ISSUER_set_srr_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_ISSUER_set_srr_key %xdefine TRUST_TOKEN_PRETOKEN_free BORINGSSL_PREFIX %+ _TRUST_TOKEN_PRETOKEN_free %xdefine TRUST_TOKEN_decode_private_metadata BORINGSSL_PREFIX %+ _TRUST_TOKEN_decode_private_metadata %xdefine TRUST_TOKEN_derive_key_from_secret BORINGSSL_PREFIX %+ _TRUST_TOKEN_derive_key_from_secret %xdefine TRUST_TOKEN_experiment_v1 BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v1 %xdefine TRUST_TOKEN_experiment_v2_pmb BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v2_pmb %xdefine TRUST_TOKEN_experiment_v2_voprf BORINGSSL_PREFIX %+ _TRUST_TOKEN_experiment_v2_voprf %xdefine TRUST_TOKEN_free BORINGSSL_PREFIX %+ _TRUST_TOKEN_free %xdefine TRUST_TOKEN_generate_key BORINGSSL_PREFIX %+ _TRUST_TOKEN_generate_key %xdefine TRUST_TOKEN_new BORINGSSL_PREFIX %+ _TRUST_TOKEN_new %xdefine TRUST_TOKEN_pst_v1_pmb BORINGSSL_PREFIX %+ _TRUST_TOKEN_pst_v1_pmb %xdefine TRUST_TOKEN_pst_v1_voprf BORINGSSL_PREFIX %+ _TRUST_TOKEN_pst_v1_voprf %xdefine USERNOTICE_free BORINGSSL_PREFIX %+ _USERNOTICE_free %xdefine USERNOTICE_it BORINGSSL_PREFIX %+ _USERNOTICE_it %xdefine USERNOTICE_new BORINGSSL_PREFIX %+ _USERNOTICE_new %xdefine X25519 BORINGSSL_PREFIX %+ _X25519 %xdefine X25519_keypair BORINGSSL_PREFIX %+ _X25519_keypair %xdefine X25519_public_from_private BORINGSSL_PREFIX %+ _X25519_public_from_private %xdefine X509V3_EXT_CRL_add_nconf BORINGSSL_PREFIX %+ _X509V3_EXT_CRL_add_nconf %xdefine X509V3_EXT_REQ_add_nconf BORINGSSL_PREFIX %+ _X509V3_EXT_REQ_add_nconf %xdefine X509V3_EXT_add BORINGSSL_PREFIX %+ _X509V3_EXT_add %xdefine X509V3_EXT_add_alias BORINGSSL_PREFIX %+ _X509V3_EXT_add_alias %xdefine X509V3_EXT_add_nconf BORINGSSL_PREFIX %+ _X509V3_EXT_add_nconf %xdefine X509V3_EXT_add_nconf_sk BORINGSSL_PREFIX %+ _X509V3_EXT_add_nconf_sk %xdefine X509V3_EXT_d2i BORINGSSL_PREFIX %+ _X509V3_EXT_d2i %xdefine X509V3_EXT_free BORINGSSL_PREFIX %+ _X509V3_EXT_free %xdefine X509V3_EXT_get BORINGSSL_PREFIX %+ _X509V3_EXT_get %xdefine X509V3_EXT_get_nid BORINGSSL_PREFIX %+ _X509V3_EXT_get_nid %xdefine X509V3_EXT_i2d BORINGSSL_PREFIX %+ _X509V3_EXT_i2d %xdefine X509V3_EXT_nconf BORINGSSL_PREFIX %+ _X509V3_EXT_nconf %xdefine X509V3_EXT_nconf_nid BORINGSSL_PREFIX %+ _X509V3_EXT_nconf_nid %xdefine X509V3_EXT_print BORINGSSL_PREFIX %+ _X509V3_EXT_print %xdefine X509V3_EXT_print_fp BORINGSSL_PREFIX %+ _X509V3_EXT_print_fp %xdefine X509V3_NAME_from_section BORINGSSL_PREFIX %+ _X509V3_NAME_from_section %xdefine X509V3_add1_i2d BORINGSSL_PREFIX %+ _X509V3_add1_i2d %xdefine X509V3_add_standard_extensions BORINGSSL_PREFIX %+ _X509V3_add_standard_extensions %xdefine X509V3_add_value BORINGSSL_PREFIX %+ _X509V3_add_value %xdefine X509V3_add_value_bool BORINGSSL_PREFIX %+ _X509V3_add_value_bool %xdefine X509V3_add_value_int BORINGSSL_PREFIX %+ _X509V3_add_value_int %xdefine X509V3_bool_from_string BORINGSSL_PREFIX %+ _X509V3_bool_from_string %xdefine X509V3_conf_free BORINGSSL_PREFIX %+ _X509V3_conf_free %xdefine X509V3_extensions_print BORINGSSL_PREFIX %+ _X509V3_extensions_print %xdefine X509V3_get_d2i BORINGSSL_PREFIX %+ _X509V3_get_d2i %xdefine X509V3_get_section BORINGSSL_PREFIX %+ _X509V3_get_section %xdefine X509V3_get_value_bool BORINGSSL_PREFIX %+ _X509V3_get_value_bool %xdefine X509V3_get_value_int BORINGSSL_PREFIX %+ _X509V3_get_value_int %xdefine X509V3_parse_list BORINGSSL_PREFIX %+ _X509V3_parse_list %xdefine X509V3_set_ctx BORINGSSL_PREFIX %+ _X509V3_set_ctx %xdefine X509V3_set_nconf BORINGSSL_PREFIX %+ _X509V3_set_nconf %xdefine X509_ALGOR_cmp BORINGSSL_PREFIX %+ _X509_ALGOR_cmp %xdefine X509_ALGOR_dup BORINGSSL_PREFIX %+ _X509_ALGOR_dup %xdefine X509_ALGOR_free BORINGSSL_PREFIX %+ _X509_ALGOR_free %xdefine X509_ALGOR_get0 BORINGSSL_PREFIX %+ _X509_ALGOR_get0 %xdefine X509_ALGOR_it BORINGSSL_PREFIX %+ _X509_ALGOR_it %xdefine X509_ALGOR_new BORINGSSL_PREFIX %+ _X509_ALGOR_new %xdefine X509_ALGOR_set0 BORINGSSL_PREFIX %+ _X509_ALGOR_set0 %xdefine X509_ALGOR_set_md BORINGSSL_PREFIX %+ _X509_ALGOR_set_md %xdefine X509_ATTRIBUTE_count BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_count %xdefine X509_ATTRIBUTE_create BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create %xdefine X509_ATTRIBUTE_create_by_NID BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_NID %xdefine X509_ATTRIBUTE_create_by_OBJ BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_OBJ %xdefine X509_ATTRIBUTE_create_by_txt BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_create_by_txt %xdefine X509_ATTRIBUTE_dup BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_dup %xdefine X509_ATTRIBUTE_free BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_free %xdefine X509_ATTRIBUTE_get0_data BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_data %xdefine X509_ATTRIBUTE_get0_object BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_object %xdefine X509_ATTRIBUTE_get0_type BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_get0_type %xdefine X509_ATTRIBUTE_it BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_it %xdefine X509_ATTRIBUTE_new BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_new %xdefine X509_ATTRIBUTE_set1_data BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_set1_data %xdefine X509_ATTRIBUTE_set1_object BORINGSSL_PREFIX %+ _X509_ATTRIBUTE_set1_object %xdefine X509_CERT_AUX_free BORINGSSL_PREFIX %+ _X509_CERT_AUX_free %xdefine X509_CERT_AUX_it BORINGSSL_PREFIX %+ _X509_CERT_AUX_it %xdefine X509_CERT_AUX_new BORINGSSL_PREFIX %+ _X509_CERT_AUX_new %xdefine X509_CERT_AUX_print BORINGSSL_PREFIX %+ _X509_CERT_AUX_print %xdefine X509_CINF_free BORINGSSL_PREFIX %+ _X509_CINF_free %xdefine X509_CINF_it BORINGSSL_PREFIX %+ _X509_CINF_it %xdefine X509_CINF_new BORINGSSL_PREFIX %+ _X509_CINF_new %xdefine X509_CRL_INFO_free BORINGSSL_PREFIX %+ _X509_CRL_INFO_free %xdefine X509_CRL_INFO_it BORINGSSL_PREFIX %+ _X509_CRL_INFO_it %xdefine X509_CRL_INFO_new BORINGSSL_PREFIX %+ _X509_CRL_INFO_new %xdefine X509_CRL_add0_revoked BORINGSSL_PREFIX %+ _X509_CRL_add0_revoked %xdefine X509_CRL_add1_ext_i2d BORINGSSL_PREFIX %+ _X509_CRL_add1_ext_i2d %xdefine X509_CRL_add_ext BORINGSSL_PREFIX %+ _X509_CRL_add_ext %xdefine X509_CRL_cmp BORINGSSL_PREFIX %+ _X509_CRL_cmp %xdefine X509_CRL_delete_ext BORINGSSL_PREFIX %+ _X509_CRL_delete_ext %xdefine X509_CRL_digest BORINGSSL_PREFIX %+ _X509_CRL_digest %xdefine X509_CRL_dup BORINGSSL_PREFIX %+ _X509_CRL_dup %xdefine X509_CRL_free BORINGSSL_PREFIX %+ _X509_CRL_free %xdefine X509_CRL_get0_by_cert BORINGSSL_PREFIX %+ _X509_CRL_get0_by_cert %xdefine X509_CRL_get0_by_serial BORINGSSL_PREFIX %+ _X509_CRL_get0_by_serial %xdefine X509_CRL_get0_extensions BORINGSSL_PREFIX %+ _X509_CRL_get0_extensions %xdefine X509_CRL_get0_lastUpdate BORINGSSL_PREFIX %+ _X509_CRL_get0_lastUpdate %xdefine X509_CRL_get0_nextUpdate BORINGSSL_PREFIX %+ _X509_CRL_get0_nextUpdate %xdefine X509_CRL_get0_signature BORINGSSL_PREFIX %+ _X509_CRL_get0_signature %xdefine X509_CRL_get_REVOKED BORINGSSL_PREFIX %+ _X509_CRL_get_REVOKED %xdefine X509_CRL_get_ext BORINGSSL_PREFIX %+ _X509_CRL_get_ext %xdefine X509_CRL_get_ext_by_NID BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_NID %xdefine X509_CRL_get_ext_by_OBJ BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_OBJ %xdefine X509_CRL_get_ext_by_critical BORINGSSL_PREFIX %+ _X509_CRL_get_ext_by_critical %xdefine X509_CRL_get_ext_count BORINGSSL_PREFIX %+ _X509_CRL_get_ext_count %xdefine X509_CRL_get_ext_d2i BORINGSSL_PREFIX %+ _X509_CRL_get_ext_d2i %xdefine X509_CRL_get_issuer BORINGSSL_PREFIX %+ _X509_CRL_get_issuer %xdefine X509_CRL_get_lastUpdate BORINGSSL_PREFIX %+ _X509_CRL_get_lastUpdate %xdefine X509_CRL_get_nextUpdate BORINGSSL_PREFIX %+ _X509_CRL_get_nextUpdate %xdefine X509_CRL_get_signature_nid BORINGSSL_PREFIX %+ _X509_CRL_get_signature_nid %xdefine X509_CRL_get_version BORINGSSL_PREFIX %+ _X509_CRL_get_version %xdefine X509_CRL_it BORINGSSL_PREFIX %+ _X509_CRL_it %xdefine X509_CRL_match BORINGSSL_PREFIX %+ _X509_CRL_match %xdefine X509_CRL_new BORINGSSL_PREFIX %+ _X509_CRL_new %xdefine X509_CRL_print BORINGSSL_PREFIX %+ _X509_CRL_print %xdefine X509_CRL_print_fp BORINGSSL_PREFIX %+ _X509_CRL_print_fp %xdefine X509_CRL_set1_lastUpdate BORINGSSL_PREFIX %+ _X509_CRL_set1_lastUpdate %xdefine X509_CRL_set1_nextUpdate BORINGSSL_PREFIX %+ _X509_CRL_set1_nextUpdate %xdefine X509_CRL_set1_signature_algo BORINGSSL_PREFIX %+ _X509_CRL_set1_signature_algo %xdefine X509_CRL_set1_signature_value BORINGSSL_PREFIX %+ _X509_CRL_set1_signature_value %xdefine X509_CRL_set_issuer_name BORINGSSL_PREFIX %+ _X509_CRL_set_issuer_name %xdefine X509_CRL_set_version BORINGSSL_PREFIX %+ _X509_CRL_set_version %xdefine X509_CRL_sign BORINGSSL_PREFIX %+ _X509_CRL_sign %xdefine X509_CRL_sign_ctx BORINGSSL_PREFIX %+ _X509_CRL_sign_ctx %xdefine X509_CRL_sort BORINGSSL_PREFIX %+ _X509_CRL_sort %xdefine X509_CRL_up_ref BORINGSSL_PREFIX %+ _X509_CRL_up_ref %xdefine X509_CRL_verify BORINGSSL_PREFIX %+ _X509_CRL_verify %xdefine X509_EXTENSIONS_it BORINGSSL_PREFIX %+ _X509_EXTENSIONS_it %xdefine X509_EXTENSION_create_by_NID BORINGSSL_PREFIX %+ _X509_EXTENSION_create_by_NID %xdefine X509_EXTENSION_create_by_OBJ BORINGSSL_PREFIX %+ _X509_EXTENSION_create_by_OBJ %xdefine X509_EXTENSION_dup BORINGSSL_PREFIX %+ _X509_EXTENSION_dup %xdefine X509_EXTENSION_free BORINGSSL_PREFIX %+ _X509_EXTENSION_free %xdefine X509_EXTENSION_get_critical BORINGSSL_PREFIX %+ _X509_EXTENSION_get_critical %xdefine X509_EXTENSION_get_data BORINGSSL_PREFIX %+ _X509_EXTENSION_get_data %xdefine X509_EXTENSION_get_object BORINGSSL_PREFIX %+ _X509_EXTENSION_get_object %xdefine X509_EXTENSION_it BORINGSSL_PREFIX %+ _X509_EXTENSION_it %xdefine X509_EXTENSION_new BORINGSSL_PREFIX %+ _X509_EXTENSION_new %xdefine X509_EXTENSION_set_critical BORINGSSL_PREFIX %+ _X509_EXTENSION_set_critical %xdefine X509_EXTENSION_set_data BORINGSSL_PREFIX %+ _X509_EXTENSION_set_data %xdefine X509_EXTENSION_set_object BORINGSSL_PREFIX %+ _X509_EXTENSION_set_object %xdefine X509_INFO_free BORINGSSL_PREFIX %+ _X509_INFO_free %xdefine X509_LOOKUP_add_dir BORINGSSL_PREFIX %+ _X509_LOOKUP_add_dir %xdefine X509_LOOKUP_ctrl BORINGSSL_PREFIX %+ _X509_LOOKUP_ctrl %xdefine X509_LOOKUP_file BORINGSSL_PREFIX %+ _X509_LOOKUP_file %xdefine X509_LOOKUP_free BORINGSSL_PREFIX %+ _X509_LOOKUP_free %xdefine X509_LOOKUP_hash_dir BORINGSSL_PREFIX %+ _X509_LOOKUP_hash_dir %xdefine X509_LOOKUP_load_file BORINGSSL_PREFIX %+ _X509_LOOKUP_load_file %xdefine X509_NAME_ENTRY_create_by_NID BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_NID %xdefine X509_NAME_ENTRY_create_by_OBJ BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_OBJ %xdefine X509_NAME_ENTRY_create_by_txt BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_create_by_txt %xdefine X509_NAME_ENTRY_dup BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_dup %xdefine X509_NAME_ENTRY_free BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_free %xdefine X509_NAME_ENTRY_get_data BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_get_data %xdefine X509_NAME_ENTRY_get_object BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_get_object %xdefine X509_NAME_ENTRY_it BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_it %xdefine X509_NAME_ENTRY_new BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_new %xdefine X509_NAME_ENTRY_set BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set %xdefine X509_NAME_ENTRY_set_data BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set_data %xdefine X509_NAME_ENTRY_set_object BORINGSSL_PREFIX %+ _X509_NAME_ENTRY_set_object %xdefine X509_NAME_add_entry BORINGSSL_PREFIX %+ _X509_NAME_add_entry %xdefine X509_NAME_add_entry_by_NID BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_NID %xdefine X509_NAME_add_entry_by_OBJ BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_OBJ %xdefine X509_NAME_add_entry_by_txt BORINGSSL_PREFIX %+ _X509_NAME_add_entry_by_txt %xdefine X509_NAME_cmp BORINGSSL_PREFIX %+ _X509_NAME_cmp %xdefine X509_NAME_delete_entry BORINGSSL_PREFIX %+ _X509_NAME_delete_entry %xdefine X509_NAME_digest BORINGSSL_PREFIX %+ _X509_NAME_digest %xdefine X509_NAME_dup BORINGSSL_PREFIX %+ _X509_NAME_dup %xdefine X509_NAME_entry_count BORINGSSL_PREFIX %+ _X509_NAME_entry_count %xdefine X509_NAME_free BORINGSSL_PREFIX %+ _X509_NAME_free %xdefine X509_NAME_get0_der BORINGSSL_PREFIX %+ _X509_NAME_get0_der %xdefine X509_NAME_get_entry BORINGSSL_PREFIX %+ _X509_NAME_get_entry %xdefine X509_NAME_get_index_by_NID BORINGSSL_PREFIX %+ _X509_NAME_get_index_by_NID %xdefine X509_NAME_get_index_by_OBJ BORINGSSL_PREFIX %+ _X509_NAME_get_index_by_OBJ %xdefine X509_NAME_get_text_by_NID BORINGSSL_PREFIX %+ _X509_NAME_get_text_by_NID %xdefine X509_NAME_get_text_by_OBJ BORINGSSL_PREFIX %+ _X509_NAME_get_text_by_OBJ %xdefine X509_NAME_hash BORINGSSL_PREFIX %+ _X509_NAME_hash %xdefine X509_NAME_hash_old BORINGSSL_PREFIX %+ _X509_NAME_hash_old %xdefine X509_NAME_it BORINGSSL_PREFIX %+ _X509_NAME_it %xdefine X509_NAME_new BORINGSSL_PREFIX %+ _X509_NAME_new %xdefine X509_NAME_oneline BORINGSSL_PREFIX %+ _X509_NAME_oneline %xdefine X509_NAME_print BORINGSSL_PREFIX %+ _X509_NAME_print %xdefine X509_NAME_print_ex BORINGSSL_PREFIX %+ _X509_NAME_print_ex %xdefine X509_NAME_print_ex_fp BORINGSSL_PREFIX %+ _X509_NAME_print_ex_fp %xdefine X509_NAME_set BORINGSSL_PREFIX %+ _X509_NAME_set %xdefine X509_OBJECT_free BORINGSSL_PREFIX %+ _X509_OBJECT_free %xdefine X509_OBJECT_free_contents BORINGSSL_PREFIX %+ _X509_OBJECT_free_contents %xdefine X509_OBJECT_get0_X509 BORINGSSL_PREFIX %+ _X509_OBJECT_get0_X509 %xdefine X509_OBJECT_get_type BORINGSSL_PREFIX %+ _X509_OBJECT_get_type %xdefine X509_OBJECT_new BORINGSSL_PREFIX %+ _X509_OBJECT_new %xdefine X509_PUBKEY_free BORINGSSL_PREFIX %+ _X509_PUBKEY_free %xdefine X509_PUBKEY_get BORINGSSL_PREFIX %+ _X509_PUBKEY_get %xdefine X509_PUBKEY_get0 BORINGSSL_PREFIX %+ _X509_PUBKEY_get0 %xdefine X509_PUBKEY_get0_param BORINGSSL_PREFIX %+ _X509_PUBKEY_get0_param %xdefine X509_PUBKEY_get0_public_key BORINGSSL_PREFIX %+ _X509_PUBKEY_get0_public_key %xdefine X509_PUBKEY_it BORINGSSL_PREFIX %+ _X509_PUBKEY_it %xdefine X509_PUBKEY_new BORINGSSL_PREFIX %+ _X509_PUBKEY_new %xdefine X509_PUBKEY_set BORINGSSL_PREFIX %+ _X509_PUBKEY_set %xdefine X509_PUBKEY_set0_param BORINGSSL_PREFIX %+ _X509_PUBKEY_set0_param %xdefine X509_PURPOSE_get0 BORINGSSL_PREFIX %+ _X509_PURPOSE_get0 %xdefine X509_PURPOSE_get_by_sname BORINGSSL_PREFIX %+ _X509_PURPOSE_get_by_sname %xdefine X509_PURPOSE_get_id BORINGSSL_PREFIX %+ _X509_PURPOSE_get_id %xdefine X509_PURPOSE_get_trust BORINGSSL_PREFIX %+ _X509_PURPOSE_get_trust %xdefine X509_REQ_INFO_free BORINGSSL_PREFIX %+ _X509_REQ_INFO_free %xdefine X509_REQ_INFO_it BORINGSSL_PREFIX %+ _X509_REQ_INFO_it %xdefine X509_REQ_INFO_new BORINGSSL_PREFIX %+ _X509_REQ_INFO_new %xdefine X509_REQ_add1_attr BORINGSSL_PREFIX %+ _X509_REQ_add1_attr %xdefine X509_REQ_add1_attr_by_NID BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_NID %xdefine X509_REQ_add1_attr_by_OBJ BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_OBJ %xdefine X509_REQ_add1_attr_by_txt BORINGSSL_PREFIX %+ _X509_REQ_add1_attr_by_txt %xdefine X509_REQ_add_extensions BORINGSSL_PREFIX %+ _X509_REQ_add_extensions %xdefine X509_REQ_add_extensions_nid BORINGSSL_PREFIX %+ _X509_REQ_add_extensions_nid %xdefine X509_REQ_check_private_key BORINGSSL_PREFIX %+ _X509_REQ_check_private_key %xdefine X509_REQ_delete_attr BORINGSSL_PREFIX %+ _X509_REQ_delete_attr %xdefine X509_REQ_digest BORINGSSL_PREFIX %+ _X509_REQ_digest %xdefine X509_REQ_dup BORINGSSL_PREFIX %+ _X509_REQ_dup %xdefine X509_REQ_extension_nid BORINGSSL_PREFIX %+ _X509_REQ_extension_nid %xdefine X509_REQ_free BORINGSSL_PREFIX %+ _X509_REQ_free %xdefine X509_REQ_get0_pubkey BORINGSSL_PREFIX %+ _X509_REQ_get0_pubkey %xdefine X509_REQ_get0_signature BORINGSSL_PREFIX %+ _X509_REQ_get0_signature %xdefine X509_REQ_get1_email BORINGSSL_PREFIX %+ _X509_REQ_get1_email %xdefine X509_REQ_get_attr BORINGSSL_PREFIX %+ _X509_REQ_get_attr %xdefine X509_REQ_get_attr_by_NID BORINGSSL_PREFIX %+ _X509_REQ_get_attr_by_NID %xdefine X509_REQ_get_attr_by_OBJ BORINGSSL_PREFIX %+ _X509_REQ_get_attr_by_OBJ %xdefine X509_REQ_get_attr_count BORINGSSL_PREFIX %+ _X509_REQ_get_attr_count %xdefine X509_REQ_get_extensions BORINGSSL_PREFIX %+ _X509_REQ_get_extensions %xdefine X509_REQ_get_pubkey BORINGSSL_PREFIX %+ _X509_REQ_get_pubkey %xdefine X509_REQ_get_signature_nid BORINGSSL_PREFIX %+ _X509_REQ_get_signature_nid %xdefine X509_REQ_get_subject_name BORINGSSL_PREFIX %+ _X509_REQ_get_subject_name %xdefine X509_REQ_get_version BORINGSSL_PREFIX %+ _X509_REQ_get_version %xdefine X509_REQ_it BORINGSSL_PREFIX %+ _X509_REQ_it %xdefine X509_REQ_new BORINGSSL_PREFIX %+ _X509_REQ_new %xdefine X509_REQ_print BORINGSSL_PREFIX %+ _X509_REQ_print %xdefine X509_REQ_print_ex BORINGSSL_PREFIX %+ _X509_REQ_print_ex %xdefine X509_REQ_print_fp BORINGSSL_PREFIX %+ _X509_REQ_print_fp %xdefine X509_REQ_set1_signature_algo BORINGSSL_PREFIX %+ _X509_REQ_set1_signature_algo %xdefine X509_REQ_set1_signature_value BORINGSSL_PREFIX %+ _X509_REQ_set1_signature_value %xdefine X509_REQ_set_pubkey BORINGSSL_PREFIX %+ _X509_REQ_set_pubkey %xdefine X509_REQ_set_subject_name BORINGSSL_PREFIX %+ _X509_REQ_set_subject_name %xdefine X509_REQ_set_version BORINGSSL_PREFIX %+ _X509_REQ_set_version %xdefine X509_REQ_sign BORINGSSL_PREFIX %+ _X509_REQ_sign %xdefine X509_REQ_sign_ctx BORINGSSL_PREFIX %+ _X509_REQ_sign_ctx %xdefine X509_REQ_verify BORINGSSL_PREFIX %+ _X509_REQ_verify %xdefine X509_REVOKED_add1_ext_i2d BORINGSSL_PREFIX %+ _X509_REVOKED_add1_ext_i2d %xdefine X509_REVOKED_add_ext BORINGSSL_PREFIX %+ _X509_REVOKED_add_ext %xdefine X509_REVOKED_delete_ext BORINGSSL_PREFIX %+ _X509_REVOKED_delete_ext %xdefine X509_REVOKED_dup BORINGSSL_PREFIX %+ _X509_REVOKED_dup %xdefine X509_REVOKED_free BORINGSSL_PREFIX %+ _X509_REVOKED_free %xdefine X509_REVOKED_get0_extensions BORINGSSL_PREFIX %+ _X509_REVOKED_get0_extensions %xdefine X509_REVOKED_get0_revocationDate BORINGSSL_PREFIX %+ _X509_REVOKED_get0_revocationDate %xdefine X509_REVOKED_get0_serialNumber BORINGSSL_PREFIX %+ _X509_REVOKED_get0_serialNumber %xdefine X509_REVOKED_get_ext BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext %xdefine X509_REVOKED_get_ext_by_NID BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_NID %xdefine X509_REVOKED_get_ext_by_OBJ BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_OBJ %xdefine X509_REVOKED_get_ext_by_critical BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_by_critical %xdefine X509_REVOKED_get_ext_count BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_count %xdefine X509_REVOKED_get_ext_d2i BORINGSSL_PREFIX %+ _X509_REVOKED_get_ext_d2i %xdefine X509_REVOKED_it BORINGSSL_PREFIX %+ _X509_REVOKED_it %xdefine X509_REVOKED_new BORINGSSL_PREFIX %+ _X509_REVOKED_new %xdefine X509_REVOKED_set_revocationDate BORINGSSL_PREFIX %+ _X509_REVOKED_set_revocationDate %xdefine X509_REVOKED_set_serialNumber BORINGSSL_PREFIX %+ _X509_REVOKED_set_serialNumber %xdefine X509_SIG_free BORINGSSL_PREFIX %+ _X509_SIG_free %xdefine X509_SIG_get0 BORINGSSL_PREFIX %+ _X509_SIG_get0 %xdefine X509_SIG_getm BORINGSSL_PREFIX %+ _X509_SIG_getm %xdefine X509_SIG_new BORINGSSL_PREFIX %+ _X509_SIG_new %xdefine X509_STORE_CTX_cleanup BORINGSSL_PREFIX %+ _X509_STORE_CTX_cleanup %xdefine X509_STORE_CTX_free BORINGSSL_PREFIX %+ _X509_STORE_CTX_free %xdefine X509_STORE_CTX_get0_cert BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_cert %xdefine X509_STORE_CTX_get0_chain BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_chain %xdefine X509_STORE_CTX_get0_current_crl BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_current_crl %xdefine X509_STORE_CTX_get0_param BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_param %xdefine X509_STORE_CTX_get0_parent_ctx BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_parent_ctx %xdefine X509_STORE_CTX_get0_store BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_store %xdefine X509_STORE_CTX_get0_untrusted BORINGSSL_PREFIX %+ _X509_STORE_CTX_get0_untrusted %xdefine X509_STORE_CTX_get1_certs BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_certs %xdefine X509_STORE_CTX_get1_chain BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_chain %xdefine X509_STORE_CTX_get1_crls BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_crls %xdefine X509_STORE_CTX_get1_issuer BORINGSSL_PREFIX %+ _X509_STORE_CTX_get1_issuer %xdefine X509_STORE_CTX_get_by_subject BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_by_subject %xdefine X509_STORE_CTX_get_chain BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_chain %xdefine X509_STORE_CTX_get_current_cert BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_current_cert %xdefine X509_STORE_CTX_get_error BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_error %xdefine X509_STORE_CTX_get_error_depth BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_error_depth %xdefine X509_STORE_CTX_get_ex_data BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_ex_data %xdefine X509_STORE_CTX_get_ex_new_index BORINGSSL_PREFIX %+ _X509_STORE_CTX_get_ex_new_index %xdefine X509_STORE_CTX_init BORINGSSL_PREFIX %+ _X509_STORE_CTX_init %xdefine X509_STORE_CTX_new BORINGSSL_PREFIX %+ _X509_STORE_CTX_new %xdefine X509_STORE_CTX_set0_crls BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_crls %xdefine X509_STORE_CTX_set0_param BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_param %xdefine X509_STORE_CTX_set0_trusted_stack BORINGSSL_PREFIX %+ _X509_STORE_CTX_set0_trusted_stack %xdefine X509_STORE_CTX_set_chain BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_chain %xdefine X509_STORE_CTX_set_default BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_default %xdefine X509_STORE_CTX_set_depth BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_depth %xdefine X509_STORE_CTX_set_error BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_error %xdefine X509_STORE_CTX_set_ex_data BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_ex_data %xdefine X509_STORE_CTX_set_flags BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_flags %xdefine X509_STORE_CTX_set_purpose BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_purpose %xdefine X509_STORE_CTX_set_time BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_time %xdefine X509_STORE_CTX_set_time_posix BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_time_posix %xdefine X509_STORE_CTX_set_trust BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_trust %xdefine X509_STORE_CTX_set_verify_cb BORINGSSL_PREFIX %+ _X509_STORE_CTX_set_verify_cb %xdefine X509_STORE_CTX_trusted_stack BORINGSSL_PREFIX %+ _X509_STORE_CTX_trusted_stack %xdefine X509_STORE_add_cert BORINGSSL_PREFIX %+ _X509_STORE_add_cert %xdefine X509_STORE_add_crl BORINGSSL_PREFIX %+ _X509_STORE_add_crl %xdefine X509_STORE_add_lookup BORINGSSL_PREFIX %+ _X509_STORE_add_lookup %xdefine X509_STORE_free BORINGSSL_PREFIX %+ _X509_STORE_free %xdefine X509_STORE_get0_objects BORINGSSL_PREFIX %+ _X509_STORE_get0_objects %xdefine X509_STORE_get0_param BORINGSSL_PREFIX %+ _X509_STORE_get0_param %xdefine X509_STORE_get1_objects BORINGSSL_PREFIX %+ _X509_STORE_get1_objects %xdefine X509_STORE_load_locations BORINGSSL_PREFIX %+ _X509_STORE_load_locations %xdefine X509_STORE_new BORINGSSL_PREFIX %+ _X509_STORE_new %xdefine X509_STORE_set1_param BORINGSSL_PREFIX %+ _X509_STORE_set1_param %xdefine X509_STORE_set_default_paths BORINGSSL_PREFIX %+ _X509_STORE_set_default_paths %xdefine X509_STORE_set_depth BORINGSSL_PREFIX %+ _X509_STORE_set_depth %xdefine X509_STORE_set_flags BORINGSSL_PREFIX %+ _X509_STORE_set_flags %xdefine X509_STORE_set_purpose BORINGSSL_PREFIX %+ _X509_STORE_set_purpose %xdefine X509_STORE_set_trust BORINGSSL_PREFIX %+ _X509_STORE_set_trust %xdefine X509_STORE_set_verify_cb BORINGSSL_PREFIX %+ _X509_STORE_set_verify_cb %xdefine X509_STORE_up_ref BORINGSSL_PREFIX %+ _X509_STORE_up_ref %xdefine X509_VAL_free BORINGSSL_PREFIX %+ _X509_VAL_free %xdefine X509_VAL_it BORINGSSL_PREFIX %+ _X509_VAL_it %xdefine X509_VAL_new BORINGSSL_PREFIX %+ _X509_VAL_new %xdefine X509_VERIFY_PARAM_add0_policy BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_add0_policy %xdefine X509_VERIFY_PARAM_add1_host BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_add1_host %xdefine X509_VERIFY_PARAM_clear_flags BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_clear_flags %xdefine X509_VERIFY_PARAM_free BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_free %xdefine X509_VERIFY_PARAM_get_depth BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_get_depth %xdefine X509_VERIFY_PARAM_get_flags BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_get_flags %xdefine X509_VERIFY_PARAM_inherit BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_inherit %xdefine X509_VERIFY_PARAM_lookup BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_lookup %xdefine X509_VERIFY_PARAM_new BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_new %xdefine X509_VERIFY_PARAM_set1 BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1 %xdefine X509_VERIFY_PARAM_set1_email BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_email %xdefine X509_VERIFY_PARAM_set1_host BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_host %xdefine X509_VERIFY_PARAM_set1_ip BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_ip %xdefine X509_VERIFY_PARAM_set1_ip_asc BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_ip_asc %xdefine X509_VERIFY_PARAM_set1_policies BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set1_policies %xdefine X509_VERIFY_PARAM_set_depth BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_depth %xdefine X509_VERIFY_PARAM_set_flags BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_flags %xdefine X509_VERIFY_PARAM_set_hostflags BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_hostflags %xdefine X509_VERIFY_PARAM_set_purpose BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_purpose %xdefine X509_VERIFY_PARAM_set_time BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_time %xdefine X509_VERIFY_PARAM_set_time_posix BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_time_posix %xdefine X509_VERIFY_PARAM_set_trust BORINGSSL_PREFIX %+ _X509_VERIFY_PARAM_set_trust %xdefine X509_add1_ext_i2d BORINGSSL_PREFIX %+ _X509_add1_ext_i2d %xdefine X509_add1_reject_object BORINGSSL_PREFIX %+ _X509_add1_reject_object %xdefine X509_add1_trust_object BORINGSSL_PREFIX %+ _X509_add1_trust_object %xdefine X509_add_ext BORINGSSL_PREFIX %+ _X509_add_ext %xdefine X509_alias_get0 BORINGSSL_PREFIX %+ _X509_alias_get0 %xdefine X509_alias_set1 BORINGSSL_PREFIX %+ _X509_alias_set1 %xdefine X509_chain_up_ref BORINGSSL_PREFIX %+ _X509_chain_up_ref %xdefine X509_check_akid BORINGSSL_PREFIX %+ _X509_check_akid %xdefine X509_check_ca BORINGSSL_PREFIX %+ _X509_check_ca %xdefine X509_check_email BORINGSSL_PREFIX %+ _X509_check_email %xdefine X509_check_host BORINGSSL_PREFIX %+ _X509_check_host %xdefine X509_check_ip BORINGSSL_PREFIX %+ _X509_check_ip %xdefine X509_check_ip_asc BORINGSSL_PREFIX %+ _X509_check_ip_asc %xdefine X509_check_issued BORINGSSL_PREFIX %+ _X509_check_issued %xdefine X509_check_private_key BORINGSSL_PREFIX %+ _X509_check_private_key %xdefine X509_check_purpose BORINGSSL_PREFIX %+ _X509_check_purpose %xdefine X509_check_trust BORINGSSL_PREFIX %+ _X509_check_trust %xdefine X509_cmp BORINGSSL_PREFIX %+ _X509_cmp %xdefine X509_cmp_current_time BORINGSSL_PREFIX %+ _X509_cmp_current_time %xdefine X509_cmp_time BORINGSSL_PREFIX %+ _X509_cmp_time %xdefine X509_cmp_time_posix BORINGSSL_PREFIX %+ _X509_cmp_time_posix %xdefine X509_delete_ext BORINGSSL_PREFIX %+ _X509_delete_ext %xdefine X509_digest BORINGSSL_PREFIX %+ _X509_digest %xdefine X509_dup BORINGSSL_PREFIX %+ _X509_dup %xdefine X509_email_free BORINGSSL_PREFIX %+ _X509_email_free %xdefine X509_find_by_issuer_and_serial BORINGSSL_PREFIX %+ _X509_find_by_issuer_and_serial %xdefine X509_find_by_subject BORINGSSL_PREFIX %+ _X509_find_by_subject %xdefine X509_free BORINGSSL_PREFIX %+ _X509_free %xdefine X509_get0_authority_issuer BORINGSSL_PREFIX %+ _X509_get0_authority_issuer %xdefine X509_get0_authority_key_id BORINGSSL_PREFIX %+ _X509_get0_authority_key_id %xdefine X509_get0_authority_serial BORINGSSL_PREFIX %+ _X509_get0_authority_serial %xdefine X509_get0_extensions BORINGSSL_PREFIX %+ _X509_get0_extensions %xdefine X509_get0_notAfter BORINGSSL_PREFIX %+ _X509_get0_notAfter %xdefine X509_get0_notBefore BORINGSSL_PREFIX %+ _X509_get0_notBefore %xdefine X509_get0_pubkey BORINGSSL_PREFIX %+ _X509_get0_pubkey %xdefine X509_get0_pubkey_bitstr BORINGSSL_PREFIX %+ _X509_get0_pubkey_bitstr %xdefine X509_get0_serialNumber BORINGSSL_PREFIX %+ _X509_get0_serialNumber %xdefine X509_get0_signature BORINGSSL_PREFIX %+ _X509_get0_signature %xdefine X509_get0_subject_key_id BORINGSSL_PREFIX %+ _X509_get0_subject_key_id %xdefine X509_get0_tbs_sigalg BORINGSSL_PREFIX %+ _X509_get0_tbs_sigalg %xdefine X509_get0_uids BORINGSSL_PREFIX %+ _X509_get0_uids %xdefine X509_get1_email BORINGSSL_PREFIX %+ _X509_get1_email %xdefine X509_get1_ocsp BORINGSSL_PREFIX %+ _X509_get1_ocsp %xdefine X509_get_X509_PUBKEY BORINGSSL_PREFIX %+ _X509_get_X509_PUBKEY %xdefine X509_get_default_cert_area BORINGSSL_PREFIX %+ _X509_get_default_cert_area %xdefine X509_get_default_cert_dir BORINGSSL_PREFIX %+ _X509_get_default_cert_dir %xdefine X509_get_default_cert_dir_env BORINGSSL_PREFIX %+ _X509_get_default_cert_dir_env %xdefine X509_get_default_cert_file BORINGSSL_PREFIX %+ _X509_get_default_cert_file %xdefine X509_get_default_cert_file_env BORINGSSL_PREFIX %+ _X509_get_default_cert_file_env %xdefine X509_get_default_private_dir BORINGSSL_PREFIX %+ _X509_get_default_private_dir %xdefine X509_get_ex_data BORINGSSL_PREFIX %+ _X509_get_ex_data %xdefine X509_get_ex_new_index BORINGSSL_PREFIX %+ _X509_get_ex_new_index %xdefine X509_get_ext BORINGSSL_PREFIX %+ _X509_get_ext %xdefine X509_get_ext_by_NID BORINGSSL_PREFIX %+ _X509_get_ext_by_NID %xdefine X509_get_ext_by_OBJ BORINGSSL_PREFIX %+ _X509_get_ext_by_OBJ %xdefine X509_get_ext_by_critical BORINGSSL_PREFIX %+ _X509_get_ext_by_critical %xdefine X509_get_ext_count BORINGSSL_PREFIX %+ _X509_get_ext_count %xdefine X509_get_ext_d2i BORINGSSL_PREFIX %+ _X509_get_ext_d2i %xdefine X509_get_extended_key_usage BORINGSSL_PREFIX %+ _X509_get_extended_key_usage %xdefine X509_get_extension_flags BORINGSSL_PREFIX %+ _X509_get_extension_flags %xdefine X509_get_issuer_name BORINGSSL_PREFIX %+ _X509_get_issuer_name %xdefine X509_get_key_usage BORINGSSL_PREFIX %+ _X509_get_key_usage %xdefine X509_get_notAfter BORINGSSL_PREFIX %+ _X509_get_notAfter %xdefine X509_get_notBefore BORINGSSL_PREFIX %+ _X509_get_notBefore %xdefine X509_get_pathlen BORINGSSL_PREFIX %+ _X509_get_pathlen %xdefine X509_get_pubkey BORINGSSL_PREFIX %+ _X509_get_pubkey %xdefine X509_get_serialNumber BORINGSSL_PREFIX %+ _X509_get_serialNumber %xdefine X509_get_signature_nid BORINGSSL_PREFIX %+ _X509_get_signature_nid %xdefine X509_get_subject_name BORINGSSL_PREFIX %+ _X509_get_subject_name %xdefine X509_get_version BORINGSSL_PREFIX %+ _X509_get_version %xdefine X509_getm_notAfter BORINGSSL_PREFIX %+ _X509_getm_notAfter %xdefine X509_getm_notBefore BORINGSSL_PREFIX %+ _X509_getm_notBefore %xdefine X509_gmtime_adj BORINGSSL_PREFIX %+ _X509_gmtime_adj %xdefine X509_is_valid_trust_id BORINGSSL_PREFIX %+ _X509_is_valid_trust_id %xdefine X509_issuer_name_cmp BORINGSSL_PREFIX %+ _X509_issuer_name_cmp %xdefine X509_issuer_name_hash BORINGSSL_PREFIX %+ _X509_issuer_name_hash %xdefine X509_issuer_name_hash_old BORINGSSL_PREFIX %+ _X509_issuer_name_hash_old %xdefine X509_it BORINGSSL_PREFIX %+ _X509_it %xdefine X509_keyid_get0 BORINGSSL_PREFIX %+ _X509_keyid_get0 %xdefine X509_keyid_set1 BORINGSSL_PREFIX %+ _X509_keyid_set1 %xdefine X509_load_cert_crl_file BORINGSSL_PREFIX %+ _X509_load_cert_crl_file %xdefine X509_load_cert_file BORINGSSL_PREFIX %+ _X509_load_cert_file %xdefine X509_load_crl_file BORINGSSL_PREFIX %+ _X509_load_crl_file %xdefine X509_new BORINGSSL_PREFIX %+ _X509_new %xdefine X509_parse_from_buffer BORINGSSL_PREFIX %+ _X509_parse_from_buffer %xdefine X509_policy_check BORINGSSL_PREFIX %+ _X509_policy_check %xdefine X509_print BORINGSSL_PREFIX %+ _X509_print %xdefine X509_print_ex BORINGSSL_PREFIX %+ _X509_print_ex %xdefine X509_print_ex_fp BORINGSSL_PREFIX %+ _X509_print_ex_fp %xdefine X509_print_fp BORINGSSL_PREFIX %+ _X509_print_fp %xdefine X509_pubkey_digest BORINGSSL_PREFIX %+ _X509_pubkey_digest %xdefine X509_reject_clear BORINGSSL_PREFIX %+ _X509_reject_clear %xdefine X509_set1_notAfter BORINGSSL_PREFIX %+ _X509_set1_notAfter %xdefine X509_set1_notBefore BORINGSSL_PREFIX %+ _X509_set1_notBefore %xdefine X509_set1_signature_algo BORINGSSL_PREFIX %+ _X509_set1_signature_algo %xdefine X509_set1_signature_value BORINGSSL_PREFIX %+ _X509_set1_signature_value %xdefine X509_set_ex_data BORINGSSL_PREFIX %+ _X509_set_ex_data %xdefine X509_set_issuer_name BORINGSSL_PREFIX %+ _X509_set_issuer_name %xdefine X509_set_notAfter BORINGSSL_PREFIX %+ _X509_set_notAfter %xdefine X509_set_notBefore BORINGSSL_PREFIX %+ _X509_set_notBefore %xdefine X509_set_pubkey BORINGSSL_PREFIX %+ _X509_set_pubkey %xdefine X509_set_serialNumber BORINGSSL_PREFIX %+ _X509_set_serialNumber %xdefine X509_set_subject_name BORINGSSL_PREFIX %+ _X509_set_subject_name %xdefine X509_set_version BORINGSSL_PREFIX %+ _X509_set_version %xdefine X509_sign BORINGSSL_PREFIX %+ _X509_sign %xdefine X509_sign_ctx BORINGSSL_PREFIX %+ _X509_sign_ctx %xdefine X509_signature_dump BORINGSSL_PREFIX %+ _X509_signature_dump %xdefine X509_signature_print BORINGSSL_PREFIX %+ _X509_signature_print %xdefine X509_subject_name_cmp BORINGSSL_PREFIX %+ _X509_subject_name_cmp %xdefine X509_subject_name_hash BORINGSSL_PREFIX %+ _X509_subject_name_hash %xdefine X509_subject_name_hash_old BORINGSSL_PREFIX %+ _X509_subject_name_hash_old %xdefine X509_supported_extension BORINGSSL_PREFIX %+ _X509_supported_extension %xdefine X509_time_adj BORINGSSL_PREFIX %+ _X509_time_adj %xdefine X509_time_adj_ex BORINGSSL_PREFIX %+ _X509_time_adj_ex %xdefine X509_trust_clear BORINGSSL_PREFIX %+ _X509_trust_clear %xdefine X509_up_ref BORINGSSL_PREFIX %+ _X509_up_ref %xdefine X509_verify BORINGSSL_PREFIX %+ _X509_verify %xdefine X509_verify_cert BORINGSSL_PREFIX %+ _X509_verify_cert %xdefine X509_verify_cert_error_string BORINGSSL_PREFIX %+ _X509_verify_cert_error_string %xdefine X509v3_add_ext BORINGSSL_PREFIX %+ _X509v3_add_ext %xdefine X509v3_delete_ext BORINGSSL_PREFIX %+ _X509v3_delete_ext %xdefine X509v3_get_ext BORINGSSL_PREFIX %+ _X509v3_get_ext %xdefine X509v3_get_ext_by_NID BORINGSSL_PREFIX %+ _X509v3_get_ext_by_NID %xdefine X509v3_get_ext_by_OBJ BORINGSSL_PREFIX %+ _X509v3_get_ext_by_OBJ %xdefine X509v3_get_ext_by_critical BORINGSSL_PREFIX %+ _X509v3_get_ext_by_critical %xdefine X509v3_get_ext_count BORINGSSL_PREFIX %+ _X509v3_get_ext_count %xdefine __clang_call_terminate BORINGSSL_PREFIX %+ ___clang_call_terminate %xdefine a2i_IPADDRESS BORINGSSL_PREFIX %+ _a2i_IPADDRESS %xdefine a2i_IPADDRESS_NC BORINGSSL_PREFIX %+ _a2i_IPADDRESS_NC %xdefine aes128gcmsiv_aes_ks BORINGSSL_PREFIX %+ _aes128gcmsiv_aes_ks %xdefine aes128gcmsiv_aes_ks_enc_x1 BORINGSSL_PREFIX %+ _aes128gcmsiv_aes_ks_enc_x1 %xdefine aes128gcmsiv_dec BORINGSSL_PREFIX %+ _aes128gcmsiv_dec %xdefine aes128gcmsiv_ecb_enc_block BORINGSSL_PREFIX %+ _aes128gcmsiv_ecb_enc_block %xdefine aes128gcmsiv_enc_msg_x4 BORINGSSL_PREFIX %+ _aes128gcmsiv_enc_msg_x4 %xdefine aes128gcmsiv_enc_msg_x8 BORINGSSL_PREFIX %+ _aes128gcmsiv_enc_msg_x8 %xdefine aes128gcmsiv_kdf BORINGSSL_PREFIX %+ _aes128gcmsiv_kdf %xdefine aes256gcmsiv_aes_ks BORINGSSL_PREFIX %+ _aes256gcmsiv_aes_ks %xdefine aes256gcmsiv_aes_ks_enc_x1 BORINGSSL_PREFIX %+ _aes256gcmsiv_aes_ks_enc_x1 %xdefine aes256gcmsiv_dec BORINGSSL_PREFIX %+ _aes256gcmsiv_dec %xdefine aes256gcmsiv_ecb_enc_block BORINGSSL_PREFIX %+ _aes256gcmsiv_ecb_enc_block %xdefine aes256gcmsiv_enc_msg_x4 BORINGSSL_PREFIX %+ _aes256gcmsiv_enc_msg_x4 %xdefine aes256gcmsiv_enc_msg_x8 BORINGSSL_PREFIX %+ _aes256gcmsiv_enc_msg_x8 %xdefine aes256gcmsiv_kdf BORINGSSL_PREFIX %+ _aes256gcmsiv_kdf %xdefine aes_ctr_set_key BORINGSSL_PREFIX %+ _aes_ctr_set_key %xdefine aes_gcm_dec_kernel BORINGSSL_PREFIX %+ _aes_gcm_dec_kernel %xdefine aes_gcm_dec_update_vaes_avx10_512 BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx10_512 %xdefine aes_gcm_dec_update_vaes_avx2 BORINGSSL_PREFIX %+ _aes_gcm_dec_update_vaes_avx2 %xdefine aes_gcm_enc_kernel BORINGSSL_PREFIX %+ _aes_gcm_enc_kernel %xdefine aes_gcm_enc_update_vaes_avx10_512 BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx10_512 %xdefine aes_gcm_enc_update_vaes_avx2 BORINGSSL_PREFIX %+ _aes_gcm_enc_update_vaes_avx2 %xdefine aes_hw_cbc_encrypt BORINGSSL_PREFIX %+ _aes_hw_cbc_encrypt %xdefine aes_hw_ctr32_encrypt_blocks BORINGSSL_PREFIX %+ _aes_hw_ctr32_encrypt_blocks %xdefine aes_hw_decrypt BORINGSSL_PREFIX %+ _aes_hw_decrypt %xdefine aes_hw_ecb_encrypt BORINGSSL_PREFIX %+ _aes_hw_ecb_encrypt %xdefine aes_hw_encrypt BORINGSSL_PREFIX %+ _aes_hw_encrypt %xdefine aes_hw_encrypt_key_to_decrypt_key BORINGSSL_PREFIX %+ _aes_hw_encrypt_key_to_decrypt_key %xdefine aes_hw_set_decrypt_key BORINGSSL_PREFIX %+ _aes_hw_set_decrypt_key %xdefine aes_hw_set_encrypt_key BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key %xdefine aes_hw_set_encrypt_key_alt BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_alt %xdefine aes_hw_set_encrypt_key_alt_preferred BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_alt_preferred %xdefine aes_hw_set_encrypt_key_base BORINGSSL_PREFIX %+ _aes_hw_set_encrypt_key_base %xdefine aes_nohw_cbc_encrypt BORINGSSL_PREFIX %+ _aes_nohw_cbc_encrypt %xdefine aes_nohw_ctr32_encrypt_blocks BORINGSSL_PREFIX %+ _aes_nohw_ctr32_encrypt_blocks %xdefine aes_nohw_decrypt BORINGSSL_PREFIX %+ _aes_nohw_decrypt %xdefine aes_nohw_encrypt BORINGSSL_PREFIX %+ _aes_nohw_encrypt %xdefine aes_nohw_set_decrypt_key BORINGSSL_PREFIX %+ _aes_nohw_set_decrypt_key %xdefine aes_nohw_set_encrypt_key BORINGSSL_PREFIX %+ _aes_nohw_set_encrypt_key %xdefine aesgcmsiv_htable6_init BORINGSSL_PREFIX %+ _aesgcmsiv_htable6_init %xdefine aesgcmsiv_htable_init BORINGSSL_PREFIX %+ _aesgcmsiv_htable_init %xdefine aesgcmsiv_htable_polyval BORINGSSL_PREFIX %+ _aesgcmsiv_htable_polyval %xdefine aesgcmsiv_polyval_horner BORINGSSL_PREFIX %+ _aesgcmsiv_polyval_horner %xdefine aesni_gcm_decrypt BORINGSSL_PREFIX %+ _aesni_gcm_decrypt %xdefine aesni_gcm_encrypt BORINGSSL_PREFIX %+ _aesni_gcm_encrypt %xdefine asn1_bit_string_length BORINGSSL_PREFIX %+ _asn1_bit_string_length %xdefine asn1_do_adb BORINGSSL_PREFIX %+ _asn1_do_adb %xdefine asn1_enc_free BORINGSSL_PREFIX %+ _asn1_enc_free %xdefine asn1_enc_init BORINGSSL_PREFIX %+ _asn1_enc_init %xdefine asn1_enc_restore BORINGSSL_PREFIX %+ _asn1_enc_restore %xdefine asn1_enc_save BORINGSSL_PREFIX %+ _asn1_enc_save %xdefine asn1_encoding_clear BORINGSSL_PREFIX %+ _asn1_encoding_clear %xdefine asn1_generalizedtime_to_tm BORINGSSL_PREFIX %+ _asn1_generalizedtime_to_tm %xdefine asn1_get_choice_selector BORINGSSL_PREFIX %+ _asn1_get_choice_selector %xdefine asn1_get_field_ptr BORINGSSL_PREFIX %+ _asn1_get_field_ptr %xdefine asn1_get_string_table_for_testing BORINGSSL_PREFIX %+ _asn1_get_string_table_for_testing %xdefine asn1_is_printable BORINGSSL_PREFIX %+ _asn1_is_printable %xdefine asn1_refcount_dec_and_test_zero BORINGSSL_PREFIX %+ _asn1_refcount_dec_and_test_zero %xdefine asn1_refcount_set_one BORINGSSL_PREFIX %+ _asn1_refcount_set_one %xdefine asn1_set_choice_selector BORINGSSL_PREFIX %+ _asn1_set_choice_selector %xdefine asn1_type_cleanup BORINGSSL_PREFIX %+ _asn1_type_cleanup %xdefine asn1_type_set0_string BORINGSSL_PREFIX %+ _asn1_type_set0_string %xdefine asn1_type_value_as_pointer BORINGSSL_PREFIX %+ _asn1_type_value_as_pointer %xdefine asn1_utctime_to_tm BORINGSSL_PREFIX %+ _asn1_utctime_to_tm %xdefine bcm_as_approved_status BORINGSSL_PREFIX %+ _bcm_as_approved_status %xdefine bcm_success BORINGSSL_PREFIX %+ _bcm_success %xdefine beeu_mod_inverse_vartime BORINGSSL_PREFIX %+ _beeu_mod_inverse_vartime %xdefine bio_clear_socket_error BORINGSSL_PREFIX %+ _bio_clear_socket_error %xdefine bio_errno_should_retry BORINGSSL_PREFIX %+ _bio_errno_should_retry %xdefine bio_ip_and_port_to_socket_and_addr BORINGSSL_PREFIX %+ _bio_ip_and_port_to_socket_and_addr %xdefine bio_sock_error BORINGSSL_PREFIX %+ _bio_sock_error %xdefine bio_socket_nbio BORINGSSL_PREFIX %+ _bio_socket_nbio %xdefine bio_socket_should_retry BORINGSSL_PREFIX %+ _bio_socket_should_retry %xdefine bn_abs_sub_consttime BORINGSSL_PREFIX %+ _bn_abs_sub_consttime %xdefine bn_add_words BORINGSSL_PREFIX %+ _bn_add_words %xdefine bn_assert_fits_in_bytes BORINGSSL_PREFIX %+ _bn_assert_fits_in_bytes %xdefine bn_big_endian_to_words BORINGSSL_PREFIX %+ _bn_big_endian_to_words %xdefine bn_copy_words BORINGSSL_PREFIX %+ _bn_copy_words %xdefine bn_declassify BORINGSSL_PREFIX %+ _bn_declassify %xdefine bn_div_consttime BORINGSSL_PREFIX %+ _bn_div_consttime %xdefine bn_expand BORINGSSL_PREFIX %+ _bn_expand %xdefine bn_fits_in_words BORINGSSL_PREFIX %+ _bn_fits_in_words %xdefine bn_from_montgomery_small BORINGSSL_PREFIX %+ _bn_from_montgomery_small %xdefine bn_gather5 BORINGSSL_PREFIX %+ _bn_gather5 %xdefine bn_in_range_words BORINGSSL_PREFIX %+ _bn_in_range_words %xdefine bn_is_bit_set_words BORINGSSL_PREFIX %+ _bn_is_bit_set_words %xdefine bn_is_relatively_prime BORINGSSL_PREFIX %+ _bn_is_relatively_prime %xdefine bn_jacobi BORINGSSL_PREFIX %+ _bn_jacobi %xdefine bn_lcm_consttime BORINGSSL_PREFIX %+ _bn_lcm_consttime %xdefine bn_less_than_montgomery_R BORINGSSL_PREFIX %+ _bn_less_than_montgomery_R %xdefine bn_less_than_words BORINGSSL_PREFIX %+ _bn_less_than_words %xdefine bn_miller_rabin_init BORINGSSL_PREFIX %+ _bn_miller_rabin_init %xdefine bn_miller_rabin_iteration BORINGSSL_PREFIX %+ _bn_miller_rabin_iteration %xdefine bn_minimal_width BORINGSSL_PREFIX %+ _bn_minimal_width %xdefine bn_mod_add_consttime BORINGSSL_PREFIX %+ _bn_mod_add_consttime %xdefine bn_mod_add_words BORINGSSL_PREFIX %+ _bn_mod_add_words %xdefine bn_mod_exp_mont_small BORINGSSL_PREFIX %+ _bn_mod_exp_mont_small %xdefine bn_mod_inverse0_prime_mont_small BORINGSSL_PREFIX %+ _bn_mod_inverse0_prime_mont_small %xdefine bn_mod_inverse_consttime BORINGSSL_PREFIX %+ _bn_mod_inverse_consttime %xdefine bn_mod_inverse_prime BORINGSSL_PREFIX %+ _bn_mod_inverse_prime %xdefine bn_mod_inverse_secret_prime BORINGSSL_PREFIX %+ _bn_mod_inverse_secret_prime %xdefine bn_mod_lshift1_consttime BORINGSSL_PREFIX %+ _bn_mod_lshift1_consttime %xdefine bn_mod_lshift_consttime BORINGSSL_PREFIX %+ _bn_mod_lshift_consttime %xdefine bn_mod_mul_montgomery_small BORINGSSL_PREFIX %+ _bn_mod_mul_montgomery_small %xdefine bn_mod_sub_consttime BORINGSSL_PREFIX %+ _bn_mod_sub_consttime %xdefine bn_mod_sub_words BORINGSSL_PREFIX %+ _bn_mod_sub_words %xdefine bn_mod_u16_consttime BORINGSSL_PREFIX %+ _bn_mod_u16_consttime %xdefine bn_mont_ctx_cleanup BORINGSSL_PREFIX %+ _bn_mont_ctx_cleanup %xdefine bn_mont_ctx_init BORINGSSL_PREFIX %+ _bn_mont_ctx_init %xdefine bn_mont_ctx_set_RR_consttime BORINGSSL_PREFIX %+ _bn_mont_ctx_set_RR_consttime %xdefine bn_mont_n0 BORINGSSL_PREFIX %+ _bn_mont_n0 %xdefine bn_mul4x_mont BORINGSSL_PREFIX %+ _bn_mul4x_mont %xdefine bn_mul4x_mont_capable BORINGSSL_PREFIX %+ _bn_mul4x_mont_capable %xdefine bn_mul4x_mont_gather5 BORINGSSL_PREFIX %+ _bn_mul4x_mont_gather5 %xdefine bn_mul4x_mont_gather5_capable BORINGSSL_PREFIX %+ _bn_mul4x_mont_gather5_capable %xdefine bn_mul_add_words BORINGSSL_PREFIX %+ _bn_mul_add_words %xdefine bn_mul_comba4 BORINGSSL_PREFIX %+ _bn_mul_comba4 %xdefine bn_mul_comba8 BORINGSSL_PREFIX %+ _bn_mul_comba8 %xdefine bn_mul_consttime BORINGSSL_PREFIX %+ _bn_mul_consttime %xdefine bn_mul_mont BORINGSSL_PREFIX %+ _bn_mul_mont %xdefine bn_mul_mont_gather5_nohw BORINGSSL_PREFIX %+ _bn_mul_mont_gather5_nohw %xdefine bn_mul_mont_nohw BORINGSSL_PREFIX %+ _bn_mul_mont_nohw %xdefine bn_mul_small BORINGSSL_PREFIX %+ _bn_mul_small %xdefine bn_mul_words BORINGSSL_PREFIX %+ _bn_mul_words %xdefine bn_mulx4x_mont BORINGSSL_PREFIX %+ _bn_mulx4x_mont %xdefine bn_mulx4x_mont_capable BORINGSSL_PREFIX %+ _bn_mulx4x_mont_capable %xdefine bn_mulx4x_mont_gather5 BORINGSSL_PREFIX %+ _bn_mulx4x_mont_gather5 %xdefine bn_mulx4x_mont_gather5_capable BORINGSSL_PREFIX %+ _bn_mulx4x_mont_gather5_capable %xdefine bn_mulx_adx_capable BORINGSSL_PREFIX %+ _bn_mulx_adx_capable %xdefine bn_odd_number_is_obviously_composite BORINGSSL_PREFIX %+ _bn_odd_number_is_obviously_composite %xdefine bn_one_to_montgomery BORINGSSL_PREFIX %+ _bn_one_to_montgomery %xdefine bn_power5_capable BORINGSSL_PREFIX %+ _bn_power5_capable %xdefine bn_power5_nohw BORINGSSL_PREFIX %+ _bn_power5_nohw %xdefine bn_powerx5 BORINGSSL_PREFIX %+ _bn_powerx5 %xdefine bn_powerx5_capable BORINGSSL_PREFIX %+ _bn_powerx5_capable %xdefine bn_rand_range_words BORINGSSL_PREFIX %+ _bn_rand_range_words %xdefine bn_rand_secret_range BORINGSSL_PREFIX %+ _bn_rand_secret_range %xdefine bn_reduce_once BORINGSSL_PREFIX %+ _bn_reduce_once %xdefine bn_reduce_once_in_place BORINGSSL_PREFIX %+ _bn_reduce_once_in_place %xdefine bn_resize_words BORINGSSL_PREFIX %+ _bn_resize_words %xdefine bn_rshift1_words BORINGSSL_PREFIX %+ _bn_rshift1_words %xdefine bn_rshift_secret_shift BORINGSSL_PREFIX %+ _bn_rshift_secret_shift %xdefine bn_rshift_words BORINGSSL_PREFIX %+ _bn_rshift_words %xdefine bn_scatter5 BORINGSSL_PREFIX %+ _bn_scatter5 %xdefine bn_secret BORINGSSL_PREFIX %+ _bn_secret %xdefine bn_select_words BORINGSSL_PREFIX %+ _bn_select_words %xdefine bn_set_minimal_width BORINGSSL_PREFIX %+ _bn_set_minimal_width %xdefine bn_set_static_words BORINGSSL_PREFIX %+ _bn_set_static_words %xdefine bn_set_words BORINGSSL_PREFIX %+ _bn_set_words %xdefine bn_sqr8x_internal BORINGSSL_PREFIX %+ _bn_sqr8x_internal %xdefine bn_sqr8x_mont BORINGSSL_PREFIX %+ _bn_sqr8x_mont %xdefine bn_sqr8x_mont_capable BORINGSSL_PREFIX %+ _bn_sqr8x_mont_capable %xdefine bn_sqr_comba4 BORINGSSL_PREFIX %+ _bn_sqr_comba4 %xdefine bn_sqr_comba8 BORINGSSL_PREFIX %+ _bn_sqr_comba8 %xdefine bn_sqr_consttime BORINGSSL_PREFIX %+ _bn_sqr_consttime %xdefine bn_sqr_small BORINGSSL_PREFIX %+ _bn_sqr_small %xdefine bn_sqr_words BORINGSSL_PREFIX %+ _bn_sqr_words %xdefine bn_sqrx8x_internal BORINGSSL_PREFIX %+ _bn_sqrx8x_internal %xdefine bn_sub_words BORINGSSL_PREFIX %+ _bn_sub_words %xdefine bn_to_montgomery_small BORINGSSL_PREFIX %+ _bn_to_montgomery_small %xdefine bn_uadd_consttime BORINGSSL_PREFIX %+ _bn_uadd_consttime %xdefine bn_usub_consttime BORINGSSL_PREFIX %+ _bn_usub_consttime %xdefine bn_wexpand BORINGSSL_PREFIX %+ _bn_wexpand %xdefine bn_words_to_big_endian BORINGSSL_PREFIX %+ _bn_words_to_big_endian %xdefine boringssl_ensure_ecc_self_test BORINGSSL_PREFIX %+ _boringssl_ensure_ecc_self_test %xdefine boringssl_ensure_ffdh_self_test BORINGSSL_PREFIX %+ _boringssl_ensure_ffdh_self_test %xdefine boringssl_ensure_rsa_self_test BORINGSSL_PREFIX %+ _boringssl_ensure_rsa_self_test %xdefine boringssl_fips_break_test BORINGSSL_PREFIX %+ _boringssl_fips_break_test %xdefine boringssl_fips_inc_counter BORINGSSL_PREFIX %+ _boringssl_fips_inc_counter %xdefine boringssl_self_test_hmac_sha256 BORINGSSL_PREFIX %+ _boringssl_self_test_hmac_sha256 %xdefine boringssl_self_test_sha256 BORINGSSL_PREFIX %+ _boringssl_self_test_sha256 %xdefine boringssl_self_test_sha512 BORINGSSL_PREFIX %+ _boringssl_self_test_sha512 %xdefine bsaes_capable BORINGSSL_PREFIX %+ _bsaes_capable %xdefine bsaes_cbc_encrypt BORINGSSL_PREFIX %+ _bsaes_cbc_encrypt %xdefine c2i_ASN1_BIT_STRING BORINGSSL_PREFIX %+ _c2i_ASN1_BIT_STRING %xdefine c2i_ASN1_INTEGER BORINGSSL_PREFIX %+ _c2i_ASN1_INTEGER %xdefine c2i_ASN1_OBJECT BORINGSSL_PREFIX %+ _c2i_ASN1_OBJECT %xdefine chacha20_poly1305_asm_capable BORINGSSL_PREFIX %+ _chacha20_poly1305_asm_capable %xdefine chacha20_poly1305_open BORINGSSL_PREFIX %+ _chacha20_poly1305_open %xdefine chacha20_poly1305_open_avx2 BORINGSSL_PREFIX %+ _chacha20_poly1305_open_avx2 %xdefine chacha20_poly1305_open_nohw BORINGSSL_PREFIX %+ _chacha20_poly1305_open_nohw %xdefine chacha20_poly1305_seal BORINGSSL_PREFIX %+ _chacha20_poly1305_seal %xdefine chacha20_poly1305_seal_avx2 BORINGSSL_PREFIX %+ _chacha20_poly1305_seal_avx2 %xdefine chacha20_poly1305_seal_nohw BORINGSSL_PREFIX %+ _chacha20_poly1305_seal_nohw %xdefine crypto_gcm_clmul_enabled BORINGSSL_PREFIX %+ _crypto_gcm_clmul_enabled %xdefine d2i_ASN1_BIT_STRING BORINGSSL_PREFIX %+ _d2i_ASN1_BIT_STRING %xdefine d2i_ASN1_BMPSTRING BORINGSSL_PREFIX %+ _d2i_ASN1_BMPSTRING %xdefine d2i_ASN1_BOOLEAN BORINGSSL_PREFIX %+ _d2i_ASN1_BOOLEAN %xdefine d2i_ASN1_ENUMERATED BORINGSSL_PREFIX %+ _d2i_ASN1_ENUMERATED %xdefine d2i_ASN1_GENERALIZEDTIME BORINGSSL_PREFIX %+ _d2i_ASN1_GENERALIZEDTIME %xdefine d2i_ASN1_GENERALSTRING BORINGSSL_PREFIX %+ _d2i_ASN1_GENERALSTRING %xdefine d2i_ASN1_IA5STRING BORINGSSL_PREFIX %+ _d2i_ASN1_IA5STRING %xdefine d2i_ASN1_INTEGER BORINGSSL_PREFIX %+ _d2i_ASN1_INTEGER %xdefine d2i_ASN1_NULL BORINGSSL_PREFIX %+ _d2i_ASN1_NULL %xdefine d2i_ASN1_OBJECT BORINGSSL_PREFIX %+ _d2i_ASN1_OBJECT %xdefine d2i_ASN1_OCTET_STRING BORINGSSL_PREFIX %+ _d2i_ASN1_OCTET_STRING %xdefine d2i_ASN1_PRINTABLE BORINGSSL_PREFIX %+ _d2i_ASN1_PRINTABLE %xdefine d2i_ASN1_PRINTABLESTRING BORINGSSL_PREFIX %+ _d2i_ASN1_PRINTABLESTRING %xdefine d2i_ASN1_SEQUENCE_ANY BORINGSSL_PREFIX %+ _d2i_ASN1_SEQUENCE_ANY %xdefine d2i_ASN1_SET_ANY BORINGSSL_PREFIX %+ _d2i_ASN1_SET_ANY %xdefine d2i_ASN1_T61STRING BORINGSSL_PREFIX %+ _d2i_ASN1_T61STRING %xdefine d2i_ASN1_TIME BORINGSSL_PREFIX %+ _d2i_ASN1_TIME %xdefine d2i_ASN1_TYPE BORINGSSL_PREFIX %+ _d2i_ASN1_TYPE %xdefine d2i_ASN1_UNIVERSALSTRING BORINGSSL_PREFIX %+ _d2i_ASN1_UNIVERSALSTRING %xdefine d2i_ASN1_UTCTIME BORINGSSL_PREFIX %+ _d2i_ASN1_UTCTIME %xdefine d2i_ASN1_UTF8STRING BORINGSSL_PREFIX %+ _d2i_ASN1_UTF8STRING %xdefine d2i_ASN1_VISIBLESTRING BORINGSSL_PREFIX %+ _d2i_ASN1_VISIBLESTRING %xdefine d2i_AUTHORITY_INFO_ACCESS BORINGSSL_PREFIX %+ _d2i_AUTHORITY_INFO_ACCESS %xdefine d2i_AUTHORITY_KEYID BORINGSSL_PREFIX %+ _d2i_AUTHORITY_KEYID %xdefine d2i_AutoPrivateKey BORINGSSL_PREFIX %+ _d2i_AutoPrivateKey %xdefine d2i_BASIC_CONSTRAINTS BORINGSSL_PREFIX %+ _d2i_BASIC_CONSTRAINTS %xdefine d2i_CERTIFICATEPOLICIES BORINGSSL_PREFIX %+ _d2i_CERTIFICATEPOLICIES %xdefine d2i_CRL_DIST_POINTS BORINGSSL_PREFIX %+ _d2i_CRL_DIST_POINTS %xdefine d2i_DHparams BORINGSSL_PREFIX %+ _d2i_DHparams %xdefine d2i_DHparams_bio BORINGSSL_PREFIX %+ _d2i_DHparams_bio %xdefine d2i_DIRECTORYSTRING BORINGSSL_PREFIX %+ _d2i_DIRECTORYSTRING %xdefine d2i_DISPLAYTEXT BORINGSSL_PREFIX %+ _d2i_DISPLAYTEXT %xdefine d2i_DSAPrivateKey BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey %xdefine d2i_DSAPrivateKey_bio BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey_bio %xdefine d2i_DSAPrivateKey_fp BORINGSSL_PREFIX %+ _d2i_DSAPrivateKey_fp %xdefine d2i_DSAPublicKey BORINGSSL_PREFIX %+ _d2i_DSAPublicKey %xdefine d2i_DSA_PUBKEY BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY %xdefine d2i_DSA_PUBKEY_bio BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY_bio %xdefine d2i_DSA_PUBKEY_fp BORINGSSL_PREFIX %+ _d2i_DSA_PUBKEY_fp %xdefine d2i_DSA_SIG BORINGSSL_PREFIX %+ _d2i_DSA_SIG %xdefine d2i_DSAparams BORINGSSL_PREFIX %+ _d2i_DSAparams %xdefine d2i_ECDSA_SIG BORINGSSL_PREFIX %+ _d2i_ECDSA_SIG %xdefine d2i_ECPKParameters BORINGSSL_PREFIX %+ _d2i_ECPKParameters %xdefine d2i_ECParameters BORINGSSL_PREFIX %+ _d2i_ECParameters %xdefine d2i_ECPrivateKey BORINGSSL_PREFIX %+ _d2i_ECPrivateKey %xdefine d2i_ECPrivateKey_bio BORINGSSL_PREFIX %+ _d2i_ECPrivateKey_bio %xdefine d2i_ECPrivateKey_fp BORINGSSL_PREFIX %+ _d2i_ECPrivateKey_fp %xdefine d2i_EC_PUBKEY BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY %xdefine d2i_EC_PUBKEY_bio BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY_bio %xdefine d2i_EC_PUBKEY_fp BORINGSSL_PREFIX %+ _d2i_EC_PUBKEY_fp %xdefine d2i_EXTENDED_KEY_USAGE BORINGSSL_PREFIX %+ _d2i_EXTENDED_KEY_USAGE %xdefine d2i_GENERAL_NAME BORINGSSL_PREFIX %+ _d2i_GENERAL_NAME %xdefine d2i_GENERAL_NAMES BORINGSSL_PREFIX %+ _d2i_GENERAL_NAMES %xdefine d2i_ISSUING_DIST_POINT BORINGSSL_PREFIX %+ _d2i_ISSUING_DIST_POINT %xdefine d2i_NETSCAPE_SPKAC BORINGSSL_PREFIX %+ _d2i_NETSCAPE_SPKAC %xdefine d2i_NETSCAPE_SPKI BORINGSSL_PREFIX %+ _d2i_NETSCAPE_SPKI %xdefine d2i_PKCS12 BORINGSSL_PREFIX %+ _d2i_PKCS12 %xdefine d2i_PKCS12_bio BORINGSSL_PREFIX %+ _d2i_PKCS12_bio %xdefine d2i_PKCS12_fp BORINGSSL_PREFIX %+ _d2i_PKCS12_fp %xdefine d2i_PKCS7 BORINGSSL_PREFIX %+ _d2i_PKCS7 %xdefine d2i_PKCS7_bio BORINGSSL_PREFIX %+ _d2i_PKCS7_bio %xdefine d2i_PKCS8PrivateKey_bio BORINGSSL_PREFIX %+ _d2i_PKCS8PrivateKey_bio %xdefine d2i_PKCS8PrivateKey_fp BORINGSSL_PREFIX %+ _d2i_PKCS8PrivateKey_fp %xdefine d2i_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO %xdefine d2i_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO_bio %xdefine d2i_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_PREFIX %+ _d2i_PKCS8_PRIV_KEY_INFO_fp %xdefine d2i_PKCS8_bio BORINGSSL_PREFIX %+ _d2i_PKCS8_bio %xdefine d2i_PKCS8_fp BORINGSSL_PREFIX %+ _d2i_PKCS8_fp %xdefine d2i_PUBKEY BORINGSSL_PREFIX %+ _d2i_PUBKEY %xdefine d2i_PUBKEY_bio BORINGSSL_PREFIX %+ _d2i_PUBKEY_bio %xdefine d2i_PUBKEY_fp BORINGSSL_PREFIX %+ _d2i_PUBKEY_fp %xdefine d2i_PrivateKey BORINGSSL_PREFIX %+ _d2i_PrivateKey %xdefine d2i_PrivateKey_bio BORINGSSL_PREFIX %+ _d2i_PrivateKey_bio %xdefine d2i_PrivateKey_fp BORINGSSL_PREFIX %+ _d2i_PrivateKey_fp %xdefine d2i_PublicKey BORINGSSL_PREFIX %+ _d2i_PublicKey %xdefine d2i_RSAPrivateKey BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey %xdefine d2i_RSAPrivateKey_bio BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey_bio %xdefine d2i_RSAPrivateKey_fp BORINGSSL_PREFIX %+ _d2i_RSAPrivateKey_fp %xdefine d2i_RSAPublicKey BORINGSSL_PREFIX %+ _d2i_RSAPublicKey %xdefine d2i_RSAPublicKey_bio BORINGSSL_PREFIX %+ _d2i_RSAPublicKey_bio %xdefine d2i_RSAPublicKey_fp BORINGSSL_PREFIX %+ _d2i_RSAPublicKey_fp %xdefine d2i_RSA_PSS_PARAMS BORINGSSL_PREFIX %+ _d2i_RSA_PSS_PARAMS %xdefine d2i_RSA_PUBKEY BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY %xdefine d2i_RSA_PUBKEY_bio BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY_bio %xdefine d2i_RSA_PUBKEY_fp BORINGSSL_PREFIX %+ _d2i_RSA_PUBKEY_fp %xdefine d2i_SSL_SESSION BORINGSSL_PREFIX %+ _d2i_SSL_SESSION %xdefine d2i_SSL_SESSION_bio BORINGSSL_PREFIX %+ _d2i_SSL_SESSION_bio %xdefine d2i_X509 BORINGSSL_PREFIX %+ _d2i_X509 %xdefine d2i_X509_ALGOR BORINGSSL_PREFIX %+ _d2i_X509_ALGOR %xdefine d2i_X509_ATTRIBUTE BORINGSSL_PREFIX %+ _d2i_X509_ATTRIBUTE %xdefine d2i_X509_AUX BORINGSSL_PREFIX %+ _d2i_X509_AUX %xdefine d2i_X509_CERT_AUX BORINGSSL_PREFIX %+ _d2i_X509_CERT_AUX %xdefine d2i_X509_CINF BORINGSSL_PREFIX %+ _d2i_X509_CINF %xdefine d2i_X509_CRL BORINGSSL_PREFIX %+ _d2i_X509_CRL %xdefine d2i_X509_CRL_INFO BORINGSSL_PREFIX %+ _d2i_X509_CRL_INFO %xdefine d2i_X509_CRL_bio BORINGSSL_PREFIX %+ _d2i_X509_CRL_bio %xdefine d2i_X509_CRL_fp BORINGSSL_PREFIX %+ _d2i_X509_CRL_fp %xdefine d2i_X509_EXTENSION BORINGSSL_PREFIX %+ _d2i_X509_EXTENSION %xdefine d2i_X509_EXTENSIONS BORINGSSL_PREFIX %+ _d2i_X509_EXTENSIONS %xdefine d2i_X509_NAME BORINGSSL_PREFIX %+ _d2i_X509_NAME %xdefine d2i_X509_PUBKEY BORINGSSL_PREFIX %+ _d2i_X509_PUBKEY %xdefine d2i_X509_REQ BORINGSSL_PREFIX %+ _d2i_X509_REQ %xdefine d2i_X509_REQ_INFO BORINGSSL_PREFIX %+ _d2i_X509_REQ_INFO %xdefine d2i_X509_REQ_bio BORINGSSL_PREFIX %+ _d2i_X509_REQ_bio %xdefine d2i_X509_REQ_fp BORINGSSL_PREFIX %+ _d2i_X509_REQ_fp %xdefine d2i_X509_REVOKED BORINGSSL_PREFIX %+ _d2i_X509_REVOKED %xdefine d2i_X509_SIG BORINGSSL_PREFIX %+ _d2i_X509_SIG %xdefine d2i_X509_VAL BORINGSSL_PREFIX %+ _d2i_X509_VAL %xdefine d2i_X509_bio BORINGSSL_PREFIX %+ _d2i_X509_bio %xdefine d2i_X509_fp BORINGSSL_PREFIX %+ _d2i_X509_fp %xdefine dh_asn1_meth BORINGSSL_PREFIX %+ _dh_asn1_meth %xdefine dh_check_params_fast BORINGSSL_PREFIX %+ _dh_check_params_fast %xdefine dh_compute_key_padded_no_self_test BORINGSSL_PREFIX %+ _dh_compute_key_padded_no_self_test %xdefine dh_pkey_meth BORINGSSL_PREFIX %+ _dh_pkey_meth %xdefine dsa_asn1_meth BORINGSSL_PREFIX %+ _dsa_asn1_meth %xdefine dsa_check_key BORINGSSL_PREFIX %+ _dsa_check_key %xdefine ec_GFp_mont_add BORINGSSL_PREFIX %+ _ec_GFp_mont_add %xdefine ec_GFp_mont_dbl BORINGSSL_PREFIX %+ _ec_GFp_mont_dbl %xdefine ec_GFp_mont_felem_exp BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_exp %xdefine ec_GFp_mont_felem_from_bytes BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_from_bytes %xdefine ec_GFp_mont_felem_mul BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_mul %xdefine ec_GFp_mont_felem_reduce BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_reduce %xdefine ec_GFp_mont_felem_sqr BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_sqr %xdefine ec_GFp_mont_felem_to_bytes BORINGSSL_PREFIX %+ _ec_GFp_mont_felem_to_bytes %xdefine ec_GFp_mont_init_precomp BORINGSSL_PREFIX %+ _ec_GFp_mont_init_precomp %xdefine ec_GFp_mont_mul BORINGSSL_PREFIX %+ _ec_GFp_mont_mul %xdefine ec_GFp_mont_mul_base BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_base %xdefine ec_GFp_mont_mul_batch BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_batch %xdefine ec_GFp_mont_mul_precomp BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_precomp %xdefine ec_GFp_mont_mul_public_batch BORINGSSL_PREFIX %+ _ec_GFp_mont_mul_public_batch %xdefine ec_GFp_nistp_recode_scalar_bits BORINGSSL_PREFIX %+ _ec_GFp_nistp_recode_scalar_bits %xdefine ec_GFp_simple_cmp_x_coordinate BORINGSSL_PREFIX %+ _ec_GFp_simple_cmp_x_coordinate %xdefine ec_GFp_simple_felem_from_bytes BORINGSSL_PREFIX %+ _ec_GFp_simple_felem_from_bytes %xdefine ec_GFp_simple_felem_to_bytes BORINGSSL_PREFIX %+ _ec_GFp_simple_felem_to_bytes %xdefine ec_GFp_simple_group_get_curve BORINGSSL_PREFIX %+ _ec_GFp_simple_group_get_curve %xdefine ec_GFp_simple_group_set_curve BORINGSSL_PREFIX %+ _ec_GFp_simple_group_set_curve %xdefine ec_GFp_simple_invert BORINGSSL_PREFIX %+ _ec_GFp_simple_invert %xdefine ec_GFp_simple_is_at_infinity BORINGSSL_PREFIX %+ _ec_GFp_simple_is_at_infinity %xdefine ec_GFp_simple_is_on_curve BORINGSSL_PREFIX %+ _ec_GFp_simple_is_on_curve %xdefine ec_GFp_simple_point_copy BORINGSSL_PREFIX %+ _ec_GFp_simple_point_copy %xdefine ec_GFp_simple_point_init BORINGSSL_PREFIX %+ _ec_GFp_simple_point_init %xdefine ec_GFp_simple_point_set_to_infinity BORINGSSL_PREFIX %+ _ec_GFp_simple_point_set_to_infinity %xdefine ec_GFp_simple_points_equal BORINGSSL_PREFIX %+ _ec_GFp_simple_points_equal %xdefine ec_affine_jacobian_equal BORINGSSL_PREFIX %+ _ec_affine_jacobian_equal %xdefine ec_affine_select BORINGSSL_PREFIX %+ _ec_affine_select %xdefine ec_affine_to_jacobian BORINGSSL_PREFIX %+ _ec_affine_to_jacobian %xdefine ec_asn1_meth BORINGSSL_PREFIX %+ _ec_asn1_meth %xdefine ec_bignum_to_felem BORINGSSL_PREFIX %+ _ec_bignum_to_felem %xdefine ec_bignum_to_scalar BORINGSSL_PREFIX %+ _ec_bignum_to_scalar %xdefine ec_cmp_x_coordinate BORINGSSL_PREFIX %+ _ec_cmp_x_coordinate %xdefine ec_compute_wNAF BORINGSSL_PREFIX %+ _ec_compute_wNAF %xdefine ec_felem_add BORINGSSL_PREFIX %+ _ec_felem_add %xdefine ec_felem_equal BORINGSSL_PREFIX %+ _ec_felem_equal %xdefine ec_felem_from_bytes BORINGSSL_PREFIX %+ _ec_felem_from_bytes %xdefine ec_felem_neg BORINGSSL_PREFIX %+ _ec_felem_neg %xdefine ec_felem_non_zero_mask BORINGSSL_PREFIX %+ _ec_felem_non_zero_mask %xdefine ec_felem_one BORINGSSL_PREFIX %+ _ec_felem_one %xdefine ec_felem_select BORINGSSL_PREFIX %+ _ec_felem_select %xdefine ec_felem_sub BORINGSSL_PREFIX %+ _ec_felem_sub %xdefine ec_felem_to_bignum BORINGSSL_PREFIX %+ _ec_felem_to_bignum %xdefine ec_felem_to_bytes BORINGSSL_PREFIX %+ _ec_felem_to_bytes %xdefine ec_get_x_coordinate_as_bytes BORINGSSL_PREFIX %+ _ec_get_x_coordinate_as_bytes %xdefine ec_get_x_coordinate_as_scalar BORINGSSL_PREFIX %+ _ec_get_x_coordinate_as_scalar %xdefine ec_hash_to_curve_p256_xmd_sha256_sswu BORINGSSL_PREFIX %+ _ec_hash_to_curve_p256_xmd_sha256_sswu %xdefine ec_hash_to_curve_p384_xmd_sha384_sswu BORINGSSL_PREFIX %+ _ec_hash_to_curve_p384_xmd_sha384_sswu %xdefine ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 BORINGSSL_PREFIX %+ _ec_hash_to_curve_p384_xmd_sha512_sswu_draft07 %xdefine ec_hash_to_scalar_p384_xmd_sha384 BORINGSSL_PREFIX %+ _ec_hash_to_scalar_p384_xmd_sha384 %xdefine ec_hash_to_scalar_p384_xmd_sha512_draft07 BORINGSSL_PREFIX %+ _ec_hash_to_scalar_p384_xmd_sha512_draft07 %xdefine ec_init_precomp BORINGSSL_PREFIX %+ _ec_init_precomp %xdefine ec_jacobian_to_affine BORINGSSL_PREFIX %+ _ec_jacobian_to_affine %xdefine ec_jacobian_to_affine_batch BORINGSSL_PREFIX %+ _ec_jacobian_to_affine_batch %xdefine ec_pkey_meth BORINGSSL_PREFIX %+ _ec_pkey_meth %xdefine ec_point_byte_len BORINGSSL_PREFIX %+ _ec_point_byte_len %xdefine ec_point_from_uncompressed BORINGSSL_PREFIX %+ _ec_point_from_uncompressed %xdefine ec_point_mul_no_self_test BORINGSSL_PREFIX %+ _ec_point_mul_no_self_test %xdefine ec_point_mul_scalar BORINGSSL_PREFIX %+ _ec_point_mul_scalar %xdefine ec_point_mul_scalar_base BORINGSSL_PREFIX %+ _ec_point_mul_scalar_base %xdefine ec_point_mul_scalar_batch BORINGSSL_PREFIX %+ _ec_point_mul_scalar_batch %xdefine ec_point_mul_scalar_precomp BORINGSSL_PREFIX %+ _ec_point_mul_scalar_precomp %xdefine ec_point_mul_scalar_public BORINGSSL_PREFIX %+ _ec_point_mul_scalar_public %xdefine ec_point_mul_scalar_public_batch BORINGSSL_PREFIX %+ _ec_point_mul_scalar_public_batch %xdefine ec_point_select BORINGSSL_PREFIX %+ _ec_point_select %xdefine ec_point_set_affine_coordinates BORINGSSL_PREFIX %+ _ec_point_set_affine_coordinates %xdefine ec_point_to_bytes BORINGSSL_PREFIX %+ _ec_point_to_bytes %xdefine ec_precomp_select BORINGSSL_PREFIX %+ _ec_precomp_select %xdefine ec_random_nonzero_scalar BORINGSSL_PREFIX %+ _ec_random_nonzero_scalar %xdefine ec_random_scalar BORINGSSL_PREFIX %+ _ec_random_scalar %xdefine ec_scalar_add BORINGSSL_PREFIX %+ _ec_scalar_add %xdefine ec_scalar_equal_vartime BORINGSSL_PREFIX %+ _ec_scalar_equal_vartime %xdefine ec_scalar_from_bytes BORINGSSL_PREFIX %+ _ec_scalar_from_bytes %xdefine ec_scalar_from_montgomery BORINGSSL_PREFIX %+ _ec_scalar_from_montgomery %xdefine ec_scalar_inv0_montgomery BORINGSSL_PREFIX %+ _ec_scalar_inv0_montgomery %xdefine ec_scalar_is_zero BORINGSSL_PREFIX %+ _ec_scalar_is_zero %xdefine ec_scalar_mul_montgomery BORINGSSL_PREFIX %+ _ec_scalar_mul_montgomery %xdefine ec_scalar_neg BORINGSSL_PREFIX %+ _ec_scalar_neg %xdefine ec_scalar_reduce BORINGSSL_PREFIX %+ _ec_scalar_reduce %xdefine ec_scalar_select BORINGSSL_PREFIX %+ _ec_scalar_select %xdefine ec_scalar_sub BORINGSSL_PREFIX %+ _ec_scalar_sub %xdefine ec_scalar_to_bytes BORINGSSL_PREFIX %+ _ec_scalar_to_bytes %xdefine ec_scalar_to_montgomery BORINGSSL_PREFIX %+ _ec_scalar_to_montgomery %xdefine ec_scalar_to_montgomery_inv_vartime BORINGSSL_PREFIX %+ _ec_scalar_to_montgomery_inv_vartime %xdefine ec_set_to_safe_point BORINGSSL_PREFIX %+ _ec_set_to_safe_point %xdefine ec_simple_scalar_inv0_montgomery BORINGSSL_PREFIX %+ _ec_simple_scalar_inv0_montgomery %xdefine ec_simple_scalar_to_montgomery_inv_vartime BORINGSSL_PREFIX %+ _ec_simple_scalar_to_montgomery_inv_vartime %xdefine ecdsa_sign_fixed BORINGSSL_PREFIX %+ _ecdsa_sign_fixed %xdefine ecdsa_sign_fixed_with_nonce_for_known_answer_test BORINGSSL_PREFIX %+ _ecdsa_sign_fixed_with_nonce_for_known_answer_test %xdefine ecdsa_verify_fixed BORINGSSL_PREFIX %+ _ecdsa_verify_fixed %xdefine ecdsa_verify_fixed_no_self_test BORINGSSL_PREFIX %+ _ecdsa_verify_fixed_no_self_test %xdefine ecp_nistz256_div_by_2 BORINGSSL_PREFIX %+ _ecp_nistz256_div_by_2 %xdefine ecp_nistz256_mul_by_2 BORINGSSL_PREFIX %+ _ecp_nistz256_mul_by_2 %xdefine ecp_nistz256_mul_by_3 BORINGSSL_PREFIX %+ _ecp_nistz256_mul_by_3 %xdefine ecp_nistz256_mul_mont BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont %xdefine ecp_nistz256_mul_mont_adx BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont_adx %xdefine ecp_nistz256_mul_mont_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_mul_mont_nohw %xdefine ecp_nistz256_neg BORINGSSL_PREFIX %+ _ecp_nistz256_neg %xdefine ecp_nistz256_ord_mul_mont BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont %xdefine ecp_nistz256_ord_mul_mont_adx BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont_adx %xdefine ecp_nistz256_ord_mul_mont_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_ord_mul_mont_nohw %xdefine ecp_nistz256_ord_sqr_mont BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont %xdefine ecp_nistz256_ord_sqr_mont_adx BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont_adx %xdefine ecp_nistz256_ord_sqr_mont_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_ord_sqr_mont_nohw %xdefine ecp_nistz256_point_add BORINGSSL_PREFIX %+ _ecp_nistz256_point_add %xdefine ecp_nistz256_point_add_adx BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_adx %xdefine ecp_nistz256_point_add_affine BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine %xdefine ecp_nistz256_point_add_affine_adx BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine_adx %xdefine ecp_nistz256_point_add_affine_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_affine_nohw %xdefine ecp_nistz256_point_add_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_point_add_nohw %xdefine ecp_nistz256_point_double BORINGSSL_PREFIX %+ _ecp_nistz256_point_double %xdefine ecp_nistz256_point_double_adx BORINGSSL_PREFIX %+ _ecp_nistz256_point_double_adx %xdefine ecp_nistz256_point_double_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_point_double_nohw %xdefine ecp_nistz256_select_w5 BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5 %xdefine ecp_nistz256_select_w5_avx2 BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5_avx2 %xdefine ecp_nistz256_select_w5_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_select_w5_nohw %xdefine ecp_nistz256_select_w7 BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7 %xdefine ecp_nistz256_select_w7_avx2 BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7_avx2 %xdefine ecp_nistz256_select_w7_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_select_w7_nohw %xdefine ecp_nistz256_sqr_mont BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont %xdefine ecp_nistz256_sqr_mont_adx BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont_adx %xdefine ecp_nistz256_sqr_mont_nohw BORINGSSL_PREFIX %+ _ecp_nistz256_sqr_mont_nohw %xdefine ecp_nistz256_sub BORINGSSL_PREFIX %+ _ecp_nistz256_sub %xdefine ed25519_asn1_meth BORINGSSL_PREFIX %+ _ed25519_asn1_meth %xdefine ed25519_pkey_meth BORINGSSL_PREFIX %+ _ed25519_pkey_meth %xdefine evp_pkey_set_method BORINGSSL_PREFIX %+ _evp_pkey_set_method %xdefine fiat_curve25519_adx_mul BORINGSSL_PREFIX %+ _fiat_curve25519_adx_mul %xdefine fiat_curve25519_adx_square BORINGSSL_PREFIX %+ _fiat_curve25519_adx_square %xdefine fiat_p256_adx_mul BORINGSSL_PREFIX %+ _fiat_p256_adx_mul %xdefine fiat_p256_adx_sqr BORINGSSL_PREFIX %+ _fiat_p256_adx_sqr %xdefine gcm_ghash_avx BORINGSSL_PREFIX %+ _gcm_ghash_avx %xdefine gcm_ghash_clmul BORINGSSL_PREFIX %+ _gcm_ghash_clmul %xdefine gcm_ghash_neon BORINGSSL_PREFIX %+ _gcm_ghash_neon %xdefine gcm_ghash_nohw BORINGSSL_PREFIX %+ _gcm_ghash_nohw %xdefine gcm_ghash_ssse3 BORINGSSL_PREFIX %+ _gcm_ghash_ssse3 %xdefine gcm_ghash_v8 BORINGSSL_PREFIX %+ _gcm_ghash_v8 %xdefine gcm_ghash_vpclmulqdq_avx10_512 BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx10_512 %xdefine gcm_ghash_vpclmulqdq_avx2 BORINGSSL_PREFIX %+ _gcm_ghash_vpclmulqdq_avx2 %xdefine gcm_gmult_avx BORINGSSL_PREFIX %+ _gcm_gmult_avx %xdefine gcm_gmult_clmul BORINGSSL_PREFIX %+ _gcm_gmult_clmul %xdefine gcm_gmult_neon BORINGSSL_PREFIX %+ _gcm_gmult_neon %xdefine gcm_gmult_nohw BORINGSSL_PREFIX %+ _gcm_gmult_nohw %xdefine gcm_gmult_ssse3 BORINGSSL_PREFIX %+ _gcm_gmult_ssse3 %xdefine gcm_gmult_v8 BORINGSSL_PREFIX %+ _gcm_gmult_v8 %xdefine gcm_gmult_vpclmulqdq_avx10 BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx10 %xdefine gcm_gmult_vpclmulqdq_avx2 BORINGSSL_PREFIX %+ _gcm_gmult_vpclmulqdq_avx2 %xdefine gcm_init_avx BORINGSSL_PREFIX %+ _gcm_init_avx %xdefine gcm_init_clmul BORINGSSL_PREFIX %+ _gcm_init_clmul %xdefine gcm_init_neon BORINGSSL_PREFIX %+ _gcm_init_neon %xdefine gcm_init_nohw BORINGSSL_PREFIX %+ _gcm_init_nohw %xdefine gcm_init_ssse3 BORINGSSL_PREFIX %+ _gcm_init_ssse3 %xdefine gcm_init_v8 BORINGSSL_PREFIX %+ _gcm_init_v8 %xdefine gcm_init_vpclmulqdq_avx10_512 BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx10_512 %xdefine gcm_init_vpclmulqdq_avx2 BORINGSSL_PREFIX %+ _gcm_init_vpclmulqdq_avx2 %xdefine gcm_neon_capable BORINGSSL_PREFIX %+ _gcm_neon_capable %xdefine gcm_pmull_capable BORINGSSL_PREFIX %+ _gcm_pmull_capable %xdefine have_fast_rdrand BORINGSSL_PREFIX %+ _have_fast_rdrand %xdefine have_rdrand BORINGSSL_PREFIX %+ _have_rdrand %xdefine hkdf_pkey_meth BORINGSSL_PREFIX %+ _hkdf_pkey_meth %xdefine hwaes_capable BORINGSSL_PREFIX %+ _hwaes_capable %xdefine i2a_ASN1_ENUMERATED BORINGSSL_PREFIX %+ _i2a_ASN1_ENUMERATED %xdefine i2a_ASN1_INTEGER BORINGSSL_PREFIX %+ _i2a_ASN1_INTEGER %xdefine i2a_ASN1_OBJECT BORINGSSL_PREFIX %+ _i2a_ASN1_OBJECT %xdefine i2a_ASN1_STRING BORINGSSL_PREFIX %+ _i2a_ASN1_STRING %xdefine i2c_ASN1_BIT_STRING BORINGSSL_PREFIX %+ _i2c_ASN1_BIT_STRING %xdefine i2c_ASN1_INTEGER BORINGSSL_PREFIX %+ _i2c_ASN1_INTEGER %xdefine i2d_ASN1_BIT_STRING BORINGSSL_PREFIX %+ _i2d_ASN1_BIT_STRING %xdefine i2d_ASN1_BMPSTRING BORINGSSL_PREFIX %+ _i2d_ASN1_BMPSTRING %xdefine i2d_ASN1_BOOLEAN BORINGSSL_PREFIX %+ _i2d_ASN1_BOOLEAN %xdefine i2d_ASN1_ENUMERATED BORINGSSL_PREFIX %+ _i2d_ASN1_ENUMERATED %xdefine i2d_ASN1_GENERALIZEDTIME BORINGSSL_PREFIX %+ _i2d_ASN1_GENERALIZEDTIME %xdefine i2d_ASN1_GENERALSTRING BORINGSSL_PREFIX %+ _i2d_ASN1_GENERALSTRING %xdefine i2d_ASN1_IA5STRING BORINGSSL_PREFIX %+ _i2d_ASN1_IA5STRING %xdefine i2d_ASN1_INTEGER BORINGSSL_PREFIX %+ _i2d_ASN1_INTEGER %xdefine i2d_ASN1_NULL BORINGSSL_PREFIX %+ _i2d_ASN1_NULL %xdefine i2d_ASN1_OBJECT BORINGSSL_PREFIX %+ _i2d_ASN1_OBJECT %xdefine i2d_ASN1_OCTET_STRING BORINGSSL_PREFIX %+ _i2d_ASN1_OCTET_STRING %xdefine i2d_ASN1_PRINTABLE BORINGSSL_PREFIX %+ _i2d_ASN1_PRINTABLE %xdefine i2d_ASN1_PRINTABLESTRING BORINGSSL_PREFIX %+ _i2d_ASN1_PRINTABLESTRING %xdefine i2d_ASN1_SEQUENCE_ANY BORINGSSL_PREFIX %+ _i2d_ASN1_SEQUENCE_ANY %xdefine i2d_ASN1_SET_ANY BORINGSSL_PREFIX %+ _i2d_ASN1_SET_ANY %xdefine i2d_ASN1_T61STRING BORINGSSL_PREFIX %+ _i2d_ASN1_T61STRING %xdefine i2d_ASN1_TIME BORINGSSL_PREFIX %+ _i2d_ASN1_TIME %xdefine i2d_ASN1_TYPE BORINGSSL_PREFIX %+ _i2d_ASN1_TYPE %xdefine i2d_ASN1_UNIVERSALSTRING BORINGSSL_PREFIX %+ _i2d_ASN1_UNIVERSALSTRING %xdefine i2d_ASN1_UTCTIME BORINGSSL_PREFIX %+ _i2d_ASN1_UTCTIME %xdefine i2d_ASN1_UTF8STRING BORINGSSL_PREFIX %+ _i2d_ASN1_UTF8STRING %xdefine i2d_ASN1_VISIBLESTRING BORINGSSL_PREFIX %+ _i2d_ASN1_VISIBLESTRING %xdefine i2d_AUTHORITY_INFO_ACCESS BORINGSSL_PREFIX %+ _i2d_AUTHORITY_INFO_ACCESS %xdefine i2d_AUTHORITY_KEYID BORINGSSL_PREFIX %+ _i2d_AUTHORITY_KEYID %xdefine i2d_BASIC_CONSTRAINTS BORINGSSL_PREFIX %+ _i2d_BASIC_CONSTRAINTS %xdefine i2d_CERTIFICATEPOLICIES BORINGSSL_PREFIX %+ _i2d_CERTIFICATEPOLICIES %xdefine i2d_CRL_DIST_POINTS BORINGSSL_PREFIX %+ _i2d_CRL_DIST_POINTS %xdefine i2d_DHparams BORINGSSL_PREFIX %+ _i2d_DHparams %xdefine i2d_DHparams_bio BORINGSSL_PREFIX %+ _i2d_DHparams_bio %xdefine i2d_DIRECTORYSTRING BORINGSSL_PREFIX %+ _i2d_DIRECTORYSTRING %xdefine i2d_DISPLAYTEXT BORINGSSL_PREFIX %+ _i2d_DISPLAYTEXT %xdefine i2d_DSAPrivateKey BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey %xdefine i2d_DSAPrivateKey_bio BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey_bio %xdefine i2d_DSAPrivateKey_fp BORINGSSL_PREFIX %+ _i2d_DSAPrivateKey_fp %xdefine i2d_DSAPublicKey BORINGSSL_PREFIX %+ _i2d_DSAPublicKey %xdefine i2d_DSA_PUBKEY BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY %xdefine i2d_DSA_PUBKEY_bio BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY_bio %xdefine i2d_DSA_PUBKEY_fp BORINGSSL_PREFIX %+ _i2d_DSA_PUBKEY_fp %xdefine i2d_DSA_SIG BORINGSSL_PREFIX %+ _i2d_DSA_SIG %xdefine i2d_DSAparams BORINGSSL_PREFIX %+ _i2d_DSAparams %xdefine i2d_ECDSA_SIG BORINGSSL_PREFIX %+ _i2d_ECDSA_SIG %xdefine i2d_ECPKParameters BORINGSSL_PREFIX %+ _i2d_ECPKParameters %xdefine i2d_ECParameters BORINGSSL_PREFIX %+ _i2d_ECParameters %xdefine i2d_ECPrivateKey BORINGSSL_PREFIX %+ _i2d_ECPrivateKey %xdefine i2d_ECPrivateKey_bio BORINGSSL_PREFIX %+ _i2d_ECPrivateKey_bio %xdefine i2d_ECPrivateKey_fp BORINGSSL_PREFIX %+ _i2d_ECPrivateKey_fp %xdefine i2d_EC_PUBKEY BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY %xdefine i2d_EC_PUBKEY_bio BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY_bio %xdefine i2d_EC_PUBKEY_fp BORINGSSL_PREFIX %+ _i2d_EC_PUBKEY_fp %xdefine i2d_EXTENDED_KEY_USAGE BORINGSSL_PREFIX %+ _i2d_EXTENDED_KEY_USAGE %xdefine i2d_GENERAL_NAME BORINGSSL_PREFIX %+ _i2d_GENERAL_NAME %xdefine i2d_GENERAL_NAMES BORINGSSL_PREFIX %+ _i2d_GENERAL_NAMES %xdefine i2d_ISSUING_DIST_POINT BORINGSSL_PREFIX %+ _i2d_ISSUING_DIST_POINT %xdefine i2d_NETSCAPE_SPKAC BORINGSSL_PREFIX %+ _i2d_NETSCAPE_SPKAC %xdefine i2d_NETSCAPE_SPKI BORINGSSL_PREFIX %+ _i2d_NETSCAPE_SPKI %xdefine i2d_PKCS12 BORINGSSL_PREFIX %+ _i2d_PKCS12 %xdefine i2d_PKCS12_bio BORINGSSL_PREFIX %+ _i2d_PKCS12_bio %xdefine i2d_PKCS12_fp BORINGSSL_PREFIX %+ _i2d_PKCS12_fp %xdefine i2d_PKCS7 BORINGSSL_PREFIX %+ _i2d_PKCS7 %xdefine i2d_PKCS7_bio BORINGSSL_PREFIX %+ _i2d_PKCS7_bio %xdefine i2d_PKCS8PrivateKeyInfo_bio BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKeyInfo_bio %xdefine i2d_PKCS8PrivateKeyInfo_fp BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKeyInfo_fp %xdefine i2d_PKCS8PrivateKey_bio BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_bio %xdefine i2d_PKCS8PrivateKey_fp BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_fp %xdefine i2d_PKCS8PrivateKey_nid_bio BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_nid_bio %xdefine i2d_PKCS8PrivateKey_nid_fp BORINGSSL_PREFIX %+ _i2d_PKCS8PrivateKey_nid_fp %xdefine i2d_PKCS8_PRIV_KEY_INFO BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO %xdefine i2d_PKCS8_PRIV_KEY_INFO_bio BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO_bio %xdefine i2d_PKCS8_PRIV_KEY_INFO_fp BORINGSSL_PREFIX %+ _i2d_PKCS8_PRIV_KEY_INFO_fp %xdefine i2d_PKCS8_bio BORINGSSL_PREFIX %+ _i2d_PKCS8_bio %xdefine i2d_PKCS8_fp BORINGSSL_PREFIX %+ _i2d_PKCS8_fp %xdefine i2d_PUBKEY BORINGSSL_PREFIX %+ _i2d_PUBKEY %xdefine i2d_PUBKEY_bio BORINGSSL_PREFIX %+ _i2d_PUBKEY_bio %xdefine i2d_PUBKEY_fp BORINGSSL_PREFIX %+ _i2d_PUBKEY_fp %xdefine i2d_PrivateKey BORINGSSL_PREFIX %+ _i2d_PrivateKey %xdefine i2d_PrivateKey_bio BORINGSSL_PREFIX %+ _i2d_PrivateKey_bio %xdefine i2d_PrivateKey_fp BORINGSSL_PREFIX %+ _i2d_PrivateKey_fp %xdefine i2d_PublicKey BORINGSSL_PREFIX %+ _i2d_PublicKey %xdefine i2d_RSAPrivateKey BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey %xdefine i2d_RSAPrivateKey_bio BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey_bio %xdefine i2d_RSAPrivateKey_fp BORINGSSL_PREFIX %+ _i2d_RSAPrivateKey_fp %xdefine i2d_RSAPublicKey BORINGSSL_PREFIX %+ _i2d_RSAPublicKey %xdefine i2d_RSAPublicKey_bio BORINGSSL_PREFIX %+ _i2d_RSAPublicKey_bio %xdefine i2d_RSAPublicKey_fp BORINGSSL_PREFIX %+ _i2d_RSAPublicKey_fp %xdefine i2d_RSA_PSS_PARAMS BORINGSSL_PREFIX %+ _i2d_RSA_PSS_PARAMS %xdefine i2d_RSA_PUBKEY BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY %xdefine i2d_RSA_PUBKEY_bio BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY_bio %xdefine i2d_RSA_PUBKEY_fp BORINGSSL_PREFIX %+ _i2d_RSA_PUBKEY_fp %xdefine i2d_SSL_SESSION BORINGSSL_PREFIX %+ _i2d_SSL_SESSION %xdefine i2d_SSL_SESSION_bio BORINGSSL_PREFIX %+ _i2d_SSL_SESSION_bio %xdefine i2d_X509 BORINGSSL_PREFIX %+ _i2d_X509 %xdefine i2d_X509_ALGOR BORINGSSL_PREFIX %+ _i2d_X509_ALGOR %xdefine i2d_X509_ATTRIBUTE BORINGSSL_PREFIX %+ _i2d_X509_ATTRIBUTE %xdefine i2d_X509_AUX BORINGSSL_PREFIX %+ _i2d_X509_AUX %xdefine i2d_X509_CERT_AUX BORINGSSL_PREFIX %+ _i2d_X509_CERT_AUX %xdefine i2d_X509_CINF BORINGSSL_PREFIX %+ _i2d_X509_CINF %xdefine i2d_X509_CRL BORINGSSL_PREFIX %+ _i2d_X509_CRL %xdefine i2d_X509_CRL_INFO BORINGSSL_PREFIX %+ _i2d_X509_CRL_INFO %xdefine i2d_X509_CRL_bio BORINGSSL_PREFIX %+ _i2d_X509_CRL_bio %xdefine i2d_X509_CRL_fp BORINGSSL_PREFIX %+ _i2d_X509_CRL_fp %xdefine i2d_X509_CRL_tbs BORINGSSL_PREFIX %+ _i2d_X509_CRL_tbs %xdefine i2d_X509_EXTENSION BORINGSSL_PREFIX %+ _i2d_X509_EXTENSION %xdefine i2d_X509_EXTENSIONS BORINGSSL_PREFIX %+ _i2d_X509_EXTENSIONS %xdefine i2d_X509_NAME BORINGSSL_PREFIX %+ _i2d_X509_NAME %xdefine i2d_X509_PUBKEY BORINGSSL_PREFIX %+ _i2d_X509_PUBKEY %xdefine i2d_X509_REQ BORINGSSL_PREFIX %+ _i2d_X509_REQ %xdefine i2d_X509_REQ_INFO BORINGSSL_PREFIX %+ _i2d_X509_REQ_INFO %xdefine i2d_X509_REQ_bio BORINGSSL_PREFIX %+ _i2d_X509_REQ_bio %xdefine i2d_X509_REQ_fp BORINGSSL_PREFIX %+ _i2d_X509_REQ_fp %xdefine i2d_X509_REVOKED BORINGSSL_PREFIX %+ _i2d_X509_REVOKED %xdefine i2d_X509_SIG BORINGSSL_PREFIX %+ _i2d_X509_SIG %xdefine i2d_X509_VAL BORINGSSL_PREFIX %+ _i2d_X509_VAL %xdefine i2d_X509_bio BORINGSSL_PREFIX %+ _i2d_X509_bio %xdefine i2d_X509_fp BORINGSSL_PREFIX %+ _i2d_X509_fp %xdefine i2d_X509_tbs BORINGSSL_PREFIX %+ _i2d_X509_tbs %xdefine i2d_re_X509_CRL_tbs BORINGSSL_PREFIX %+ _i2d_re_X509_CRL_tbs %xdefine i2d_re_X509_REQ_tbs BORINGSSL_PREFIX %+ _i2d_re_X509_REQ_tbs %xdefine i2d_re_X509_tbs BORINGSSL_PREFIX %+ _i2d_re_X509_tbs %xdefine i2o_ECPublicKey BORINGSSL_PREFIX %+ _i2o_ECPublicKey %xdefine i2s_ASN1_ENUMERATED BORINGSSL_PREFIX %+ _i2s_ASN1_ENUMERATED %xdefine i2s_ASN1_INTEGER BORINGSSL_PREFIX %+ _i2s_ASN1_INTEGER %xdefine i2s_ASN1_OCTET_STRING BORINGSSL_PREFIX %+ _i2s_ASN1_OCTET_STRING %xdefine i2t_ASN1_OBJECT BORINGSSL_PREFIX %+ _i2t_ASN1_OBJECT %xdefine i2v_GENERAL_NAME BORINGSSL_PREFIX %+ _i2v_GENERAL_NAME %xdefine i2v_GENERAL_NAMES BORINGSSL_PREFIX %+ _i2v_GENERAL_NAMES %xdefine k25519Precomp BORINGSSL_PREFIX %+ _k25519Precomp %xdefine kBoringSSLRSASqrtTwo BORINGSSL_PREFIX %+ _kBoringSSLRSASqrtTwo %xdefine kBoringSSLRSASqrtTwoLen BORINGSSL_PREFIX %+ _kBoringSSLRSASqrtTwoLen %xdefine kOpenSSLReasonStringData BORINGSSL_PREFIX %+ _kOpenSSLReasonStringData %xdefine kOpenSSLReasonValues BORINGSSL_PREFIX %+ _kOpenSSLReasonValues %xdefine kOpenSSLReasonValuesLen BORINGSSL_PREFIX %+ _kOpenSSLReasonValuesLen %xdefine lh_CONF_SECTION_call_cmp_func BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_cmp_func %xdefine lh_CONF_SECTION_call_doall_arg BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_doall_arg %xdefine lh_CONF_SECTION_call_hash_func BORINGSSL_PREFIX %+ _lh_CONF_SECTION_call_hash_func %xdefine lh_CONF_SECTION_doall_arg BORINGSSL_PREFIX %+ _lh_CONF_SECTION_doall_arg %xdefine lh_CONF_SECTION_free BORINGSSL_PREFIX %+ _lh_CONF_SECTION_free %xdefine lh_CONF_SECTION_insert BORINGSSL_PREFIX %+ _lh_CONF_SECTION_insert %xdefine lh_CONF_SECTION_new BORINGSSL_PREFIX %+ _lh_CONF_SECTION_new %xdefine lh_CONF_SECTION_retrieve BORINGSSL_PREFIX %+ _lh_CONF_SECTION_retrieve %xdefine lh_CONF_VALUE_call_cmp_func BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_cmp_func %xdefine lh_CONF_VALUE_call_doall_arg BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_doall_arg %xdefine lh_CONF_VALUE_call_hash_func BORINGSSL_PREFIX %+ _lh_CONF_VALUE_call_hash_func %xdefine lh_CONF_VALUE_doall_arg BORINGSSL_PREFIX %+ _lh_CONF_VALUE_doall_arg %xdefine lh_CONF_VALUE_free BORINGSSL_PREFIX %+ _lh_CONF_VALUE_free %xdefine lh_CONF_VALUE_insert BORINGSSL_PREFIX %+ _lh_CONF_VALUE_insert %xdefine lh_CONF_VALUE_new BORINGSSL_PREFIX %+ _lh_CONF_VALUE_new %xdefine lh_CONF_VALUE_retrieve BORINGSSL_PREFIX %+ _lh_CONF_VALUE_retrieve %xdefine lh_CRYPTO_BUFFER_call_cmp_func BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_call_cmp_func %xdefine lh_CRYPTO_BUFFER_call_hash_func BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_call_hash_func %xdefine lh_CRYPTO_BUFFER_delete BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_delete %xdefine lh_CRYPTO_BUFFER_free BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_free %xdefine lh_CRYPTO_BUFFER_insert BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_insert %xdefine lh_CRYPTO_BUFFER_new BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_new %xdefine lh_CRYPTO_BUFFER_num_items BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_num_items %xdefine lh_CRYPTO_BUFFER_retrieve BORINGSSL_PREFIX %+ _lh_CRYPTO_BUFFER_retrieve %xdefine md5_block_asm_data_order BORINGSSL_PREFIX %+ _md5_block_asm_data_order %xdefine o2i_ECPublicKey BORINGSSL_PREFIX %+ _o2i_ECPublicKey %xdefine pkcs12_iterations_acceptable BORINGSSL_PREFIX %+ _pkcs12_iterations_acceptable %xdefine pkcs12_key_gen BORINGSSL_PREFIX %+ _pkcs12_key_gen %xdefine pkcs12_pbe_encrypt_init BORINGSSL_PREFIX %+ _pkcs12_pbe_encrypt_init %xdefine pkcs7_add_signed_data BORINGSSL_PREFIX %+ _pkcs7_add_signed_data %xdefine pkcs7_parse_header BORINGSSL_PREFIX %+ _pkcs7_parse_header %xdefine pkcs8_pbe_decrypt BORINGSSL_PREFIX %+ _pkcs8_pbe_decrypt %xdefine pmbtoken_exp1_blind BORINGSSL_PREFIX %+ _pmbtoken_exp1_blind %xdefine pmbtoken_exp1_client_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_exp1_client_key_from_bytes %xdefine pmbtoken_exp1_derive_key_from_secret BORINGSSL_PREFIX %+ _pmbtoken_exp1_derive_key_from_secret %xdefine pmbtoken_exp1_generate_key BORINGSSL_PREFIX %+ _pmbtoken_exp1_generate_key %xdefine pmbtoken_exp1_get_h_for_testing BORINGSSL_PREFIX %+ _pmbtoken_exp1_get_h_for_testing %xdefine pmbtoken_exp1_issuer_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_exp1_issuer_key_from_bytes %xdefine pmbtoken_exp1_read BORINGSSL_PREFIX %+ _pmbtoken_exp1_read %xdefine pmbtoken_exp1_sign BORINGSSL_PREFIX %+ _pmbtoken_exp1_sign %xdefine pmbtoken_exp1_unblind BORINGSSL_PREFIX %+ _pmbtoken_exp1_unblind %xdefine pmbtoken_exp2_blind BORINGSSL_PREFIX %+ _pmbtoken_exp2_blind %xdefine pmbtoken_exp2_client_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_exp2_client_key_from_bytes %xdefine pmbtoken_exp2_derive_key_from_secret BORINGSSL_PREFIX %+ _pmbtoken_exp2_derive_key_from_secret %xdefine pmbtoken_exp2_generate_key BORINGSSL_PREFIX %+ _pmbtoken_exp2_generate_key %xdefine pmbtoken_exp2_get_h_for_testing BORINGSSL_PREFIX %+ _pmbtoken_exp2_get_h_for_testing %xdefine pmbtoken_exp2_issuer_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_exp2_issuer_key_from_bytes %xdefine pmbtoken_exp2_read BORINGSSL_PREFIX %+ _pmbtoken_exp2_read %xdefine pmbtoken_exp2_sign BORINGSSL_PREFIX %+ _pmbtoken_exp2_sign %xdefine pmbtoken_exp2_unblind BORINGSSL_PREFIX %+ _pmbtoken_exp2_unblind %xdefine pmbtoken_pst1_blind BORINGSSL_PREFIX %+ _pmbtoken_pst1_blind %xdefine pmbtoken_pst1_client_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_pst1_client_key_from_bytes %xdefine pmbtoken_pst1_derive_key_from_secret BORINGSSL_PREFIX %+ _pmbtoken_pst1_derive_key_from_secret %xdefine pmbtoken_pst1_generate_key BORINGSSL_PREFIX %+ _pmbtoken_pst1_generate_key %xdefine pmbtoken_pst1_get_h_for_testing BORINGSSL_PREFIX %+ _pmbtoken_pst1_get_h_for_testing %xdefine pmbtoken_pst1_issuer_key_from_bytes BORINGSSL_PREFIX %+ _pmbtoken_pst1_issuer_key_from_bytes %xdefine pmbtoken_pst1_read BORINGSSL_PREFIX %+ _pmbtoken_pst1_read %xdefine pmbtoken_pst1_sign BORINGSSL_PREFIX %+ _pmbtoken_pst1_sign %xdefine pmbtoken_pst1_unblind BORINGSSL_PREFIX %+ _pmbtoken_pst1_unblind %xdefine poly_Rq_mul BORINGSSL_PREFIX %+ _poly_Rq_mul %xdefine rand_fork_unsafe_buffering_enabled BORINGSSL_PREFIX %+ _rand_fork_unsafe_buffering_enabled %xdefine rsa_asn1_meth BORINGSSL_PREFIX %+ _rsa_asn1_meth %xdefine rsa_check_public_key BORINGSSL_PREFIX %+ _rsa_check_public_key %xdefine rsa_default_private_transform BORINGSSL_PREFIX %+ _rsa_default_private_transform %xdefine rsa_default_sign_raw BORINGSSL_PREFIX %+ _rsa_default_sign_raw %xdefine rsa_invalidate_key BORINGSSL_PREFIX %+ _rsa_invalidate_key %xdefine rsa_pkey_meth BORINGSSL_PREFIX %+ _rsa_pkey_meth %xdefine rsa_private_transform BORINGSSL_PREFIX %+ _rsa_private_transform %xdefine rsa_private_transform_no_self_test BORINGSSL_PREFIX %+ _rsa_private_transform_no_self_test %xdefine rsa_sign_no_self_test BORINGSSL_PREFIX %+ _rsa_sign_no_self_test %xdefine rsa_verify_no_self_test BORINGSSL_PREFIX %+ _rsa_verify_no_self_test %xdefine rsa_verify_raw_no_self_test BORINGSSL_PREFIX %+ _rsa_verify_raw_no_self_test %xdefine rsaz_1024_gather5_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_gather5_avx2 %xdefine rsaz_1024_mul_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_mul_avx2 %xdefine rsaz_1024_norm2red_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_norm2red_avx2 %xdefine rsaz_1024_red2norm_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_red2norm_avx2 %xdefine rsaz_1024_scatter5_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_scatter5_avx2 %xdefine rsaz_1024_sqr_avx2 BORINGSSL_PREFIX %+ _rsaz_1024_sqr_avx2 %xdefine rsaz_avx2_preferred BORINGSSL_PREFIX %+ _rsaz_avx2_preferred %xdefine s2i_ASN1_INTEGER BORINGSSL_PREFIX %+ _s2i_ASN1_INTEGER %xdefine s2i_ASN1_OCTET_STRING BORINGSSL_PREFIX %+ _s2i_ASN1_OCTET_STRING %xdefine sha1_avx2_capable BORINGSSL_PREFIX %+ _sha1_avx2_capable %xdefine sha1_avx_capable BORINGSSL_PREFIX %+ _sha1_avx_capable %xdefine sha1_block_data_order_avx BORINGSSL_PREFIX %+ _sha1_block_data_order_avx %xdefine sha1_block_data_order_avx2 BORINGSSL_PREFIX %+ _sha1_block_data_order_avx2 %xdefine sha1_block_data_order_hw BORINGSSL_PREFIX %+ _sha1_block_data_order_hw %xdefine sha1_block_data_order_nohw BORINGSSL_PREFIX %+ _sha1_block_data_order_nohw %xdefine sha1_block_data_order_ssse3 BORINGSSL_PREFIX %+ _sha1_block_data_order_ssse3 %xdefine sha1_hw_capable BORINGSSL_PREFIX %+ _sha1_hw_capable %xdefine sha1_ssse3_capable BORINGSSL_PREFIX %+ _sha1_ssse3_capable %xdefine sha256_avx_capable BORINGSSL_PREFIX %+ _sha256_avx_capable %xdefine sha256_block_data_order_avx BORINGSSL_PREFIX %+ _sha256_block_data_order_avx %xdefine sha256_block_data_order_hw BORINGSSL_PREFIX %+ _sha256_block_data_order_hw %xdefine sha256_block_data_order_nohw BORINGSSL_PREFIX %+ _sha256_block_data_order_nohw %xdefine sha256_block_data_order_ssse3 BORINGSSL_PREFIX %+ _sha256_block_data_order_ssse3 %xdefine sha256_hw_capable BORINGSSL_PREFIX %+ _sha256_hw_capable %xdefine sha256_ssse3_capable BORINGSSL_PREFIX %+ _sha256_ssse3_capable %xdefine sha512_avx_capable BORINGSSL_PREFIX %+ _sha512_avx_capable %xdefine sha512_block_data_order_avx BORINGSSL_PREFIX %+ _sha512_block_data_order_avx %xdefine sha512_block_data_order_hw BORINGSSL_PREFIX %+ _sha512_block_data_order_hw %xdefine sha512_block_data_order_nohw BORINGSSL_PREFIX %+ _sha512_block_data_order_nohw %xdefine sha512_hw_capable BORINGSSL_PREFIX %+ _sha512_hw_capable %xdefine sk_ACCESS_DESCRIPTION_call_free_func BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_call_free_func %xdefine sk_ACCESS_DESCRIPTION_new_null BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_new_null %xdefine sk_ACCESS_DESCRIPTION_num BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_num %xdefine sk_ACCESS_DESCRIPTION_pop_free BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_pop_free %xdefine sk_ACCESS_DESCRIPTION_push BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_push %xdefine sk_ACCESS_DESCRIPTION_value BORINGSSL_PREFIX %+ _sk_ACCESS_DESCRIPTION_value %xdefine sk_ASN1_INTEGER_num BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_num %xdefine sk_ASN1_INTEGER_push BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_push %xdefine sk_ASN1_INTEGER_value BORINGSSL_PREFIX %+ _sk_ASN1_INTEGER_value %xdefine sk_ASN1_OBJECT_call_cmp_func BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_cmp_func %xdefine sk_ASN1_OBJECT_call_copy_func BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_copy_func %xdefine sk_ASN1_OBJECT_call_free_func BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_call_free_func %xdefine sk_ASN1_OBJECT_deep_copy BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_deep_copy %xdefine sk_ASN1_OBJECT_dup BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_dup %xdefine sk_ASN1_OBJECT_find BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_find %xdefine sk_ASN1_OBJECT_free BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_free %xdefine sk_ASN1_OBJECT_is_sorted BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_is_sorted %xdefine sk_ASN1_OBJECT_new_null BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_new_null %xdefine sk_ASN1_OBJECT_num BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_num %xdefine sk_ASN1_OBJECT_pop_free BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_pop_free %xdefine sk_ASN1_OBJECT_push BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_push %xdefine sk_ASN1_OBJECT_set_cmp_func BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_set_cmp_func %xdefine sk_ASN1_OBJECT_sort BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_sort %xdefine sk_ASN1_OBJECT_value BORINGSSL_PREFIX %+ _sk_ASN1_OBJECT_value %xdefine sk_ASN1_TYPE_num BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_num %xdefine sk_ASN1_TYPE_push BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_push %xdefine sk_ASN1_TYPE_value BORINGSSL_PREFIX %+ _sk_ASN1_TYPE_value %xdefine sk_ASN1_VALUE_free BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_free %xdefine sk_ASN1_VALUE_new_null BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_new_null %xdefine sk_ASN1_VALUE_num BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_num %xdefine sk_ASN1_VALUE_pop BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_pop %xdefine sk_ASN1_VALUE_push BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_push %xdefine sk_ASN1_VALUE_value BORINGSSL_PREFIX %+ _sk_ASN1_VALUE_value %xdefine sk_CONF_VALUE_call_free_func BORINGSSL_PREFIX %+ _sk_CONF_VALUE_call_free_func %xdefine sk_CONF_VALUE_delete_ptr BORINGSSL_PREFIX %+ _sk_CONF_VALUE_delete_ptr %xdefine sk_CONF_VALUE_free BORINGSSL_PREFIX %+ _sk_CONF_VALUE_free %xdefine sk_CONF_VALUE_new_null BORINGSSL_PREFIX %+ _sk_CONF_VALUE_new_null %xdefine sk_CONF_VALUE_num BORINGSSL_PREFIX %+ _sk_CONF_VALUE_num %xdefine sk_CONF_VALUE_pop BORINGSSL_PREFIX %+ _sk_CONF_VALUE_pop %xdefine sk_CONF_VALUE_pop_free BORINGSSL_PREFIX %+ _sk_CONF_VALUE_pop_free %xdefine sk_CONF_VALUE_push BORINGSSL_PREFIX %+ _sk_CONF_VALUE_push %xdefine sk_CONF_VALUE_value BORINGSSL_PREFIX %+ _sk_CONF_VALUE_value %xdefine sk_CRYPTO_BUFFER_call_copy_func BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_call_copy_func %xdefine sk_CRYPTO_BUFFER_call_free_func BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_call_free_func %xdefine sk_CRYPTO_BUFFER_deep_copy BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_deep_copy %xdefine sk_CRYPTO_BUFFER_new_null BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_new_null %xdefine sk_CRYPTO_BUFFER_num BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_num %xdefine sk_CRYPTO_BUFFER_pop BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_pop %xdefine sk_CRYPTO_BUFFER_pop_free BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_pop_free %xdefine sk_CRYPTO_BUFFER_push BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_push %xdefine sk_CRYPTO_BUFFER_set BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_set %xdefine sk_CRYPTO_BUFFER_value BORINGSSL_PREFIX %+ _sk_CRYPTO_BUFFER_value %xdefine sk_DIST_POINT_call_free_func BORINGSSL_PREFIX %+ _sk_DIST_POINT_call_free_func %xdefine sk_DIST_POINT_new_null BORINGSSL_PREFIX %+ _sk_DIST_POINT_new_null %xdefine sk_DIST_POINT_num BORINGSSL_PREFIX %+ _sk_DIST_POINT_num %xdefine sk_DIST_POINT_pop_free BORINGSSL_PREFIX %+ _sk_DIST_POINT_pop_free %xdefine sk_DIST_POINT_push BORINGSSL_PREFIX %+ _sk_DIST_POINT_push %xdefine sk_DIST_POINT_value BORINGSSL_PREFIX %+ _sk_DIST_POINT_value %xdefine sk_GENERAL_NAME_call_free_func BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_call_free_func %xdefine sk_GENERAL_NAME_new_null BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_new_null %xdefine sk_GENERAL_NAME_num BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_num %xdefine sk_GENERAL_NAME_pop_free BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_pop_free %xdefine sk_GENERAL_NAME_push BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_push %xdefine sk_GENERAL_NAME_set BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_set %xdefine sk_GENERAL_NAME_value BORINGSSL_PREFIX %+ _sk_GENERAL_NAME_value %xdefine sk_GENERAL_SUBTREE_new_null BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_new_null %xdefine sk_GENERAL_SUBTREE_num BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_num %xdefine sk_GENERAL_SUBTREE_push BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_push %xdefine sk_GENERAL_SUBTREE_value BORINGSSL_PREFIX %+ _sk_GENERAL_SUBTREE_value %xdefine sk_OPENSSL_STRING_call_cmp_func BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_cmp_func %xdefine sk_OPENSSL_STRING_call_copy_func BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_copy_func %xdefine sk_OPENSSL_STRING_call_free_func BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_call_free_func %xdefine sk_OPENSSL_STRING_deep_copy BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_deep_copy %xdefine sk_OPENSSL_STRING_find BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_find %xdefine sk_OPENSSL_STRING_free BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_free %xdefine sk_OPENSSL_STRING_new BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_new %xdefine sk_OPENSSL_STRING_new_null BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_new_null %xdefine sk_OPENSSL_STRING_num BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_num %xdefine sk_OPENSSL_STRING_pop_free BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_pop_free %xdefine sk_OPENSSL_STRING_push BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_push %xdefine sk_OPENSSL_STRING_sort BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_sort %xdefine sk_OPENSSL_STRING_value BORINGSSL_PREFIX %+ _sk_OPENSSL_STRING_value %xdefine sk_POLICYINFO_call_cmp_func BORINGSSL_PREFIX %+ _sk_POLICYINFO_call_cmp_func %xdefine sk_POLICYINFO_call_free_func BORINGSSL_PREFIX %+ _sk_POLICYINFO_call_free_func %xdefine sk_POLICYINFO_find BORINGSSL_PREFIX %+ _sk_POLICYINFO_find %xdefine sk_POLICYINFO_is_sorted BORINGSSL_PREFIX %+ _sk_POLICYINFO_is_sorted %xdefine sk_POLICYINFO_new_null BORINGSSL_PREFIX %+ _sk_POLICYINFO_new_null %xdefine sk_POLICYINFO_num BORINGSSL_PREFIX %+ _sk_POLICYINFO_num %xdefine sk_POLICYINFO_pop_free BORINGSSL_PREFIX %+ _sk_POLICYINFO_pop_free %xdefine sk_POLICYINFO_push BORINGSSL_PREFIX %+ _sk_POLICYINFO_push %xdefine sk_POLICYINFO_set_cmp_func BORINGSSL_PREFIX %+ _sk_POLICYINFO_set_cmp_func %xdefine sk_POLICYINFO_sort BORINGSSL_PREFIX %+ _sk_POLICYINFO_sort %xdefine sk_POLICYINFO_value BORINGSSL_PREFIX %+ _sk_POLICYINFO_value %xdefine sk_POLICYQUALINFO_new_null BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_new_null %xdefine sk_POLICYQUALINFO_num BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_num %xdefine sk_POLICYQUALINFO_push BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_push %xdefine sk_POLICYQUALINFO_value BORINGSSL_PREFIX %+ _sk_POLICYQUALINFO_value %xdefine sk_POLICY_MAPPING_call_cmp_func BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_call_cmp_func %xdefine sk_POLICY_MAPPING_call_free_func BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_call_free_func %xdefine sk_POLICY_MAPPING_find BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_find %xdefine sk_POLICY_MAPPING_is_sorted BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_is_sorted %xdefine sk_POLICY_MAPPING_new_null BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_new_null %xdefine sk_POLICY_MAPPING_num BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_num %xdefine sk_POLICY_MAPPING_pop_free BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_pop_free %xdefine sk_POLICY_MAPPING_push BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_push %xdefine sk_POLICY_MAPPING_set_cmp_func BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_set_cmp_func %xdefine sk_POLICY_MAPPING_sort BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_sort %xdefine sk_POLICY_MAPPING_value BORINGSSL_PREFIX %+ _sk_POLICY_MAPPING_value %xdefine sk_SRTP_PROTECTION_PROFILE_new_null BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_new_null %xdefine sk_SRTP_PROTECTION_PROFILE_num BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_num %xdefine sk_SRTP_PROTECTION_PROFILE_push BORINGSSL_PREFIX %+ _sk_SRTP_PROTECTION_PROFILE_push %xdefine sk_SSL_CIPHER_call_cmp_func BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_call_cmp_func %xdefine sk_SSL_CIPHER_delete BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_delete %xdefine sk_SSL_CIPHER_dup BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_dup %xdefine sk_SSL_CIPHER_find BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_find %xdefine sk_SSL_CIPHER_new_null BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_new_null %xdefine sk_SSL_CIPHER_num BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_num %xdefine sk_SSL_CIPHER_push BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_push %xdefine sk_SSL_CIPHER_value BORINGSSL_PREFIX %+ _sk_SSL_CIPHER_value %xdefine sk_TRUST_TOKEN_PRETOKEN_call_free_func BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_call_free_func %xdefine sk_TRUST_TOKEN_PRETOKEN_new_null BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_new_null %xdefine sk_TRUST_TOKEN_PRETOKEN_num BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_num %xdefine sk_TRUST_TOKEN_PRETOKEN_pop_free BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_pop_free %xdefine sk_TRUST_TOKEN_PRETOKEN_push BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_push %xdefine sk_TRUST_TOKEN_PRETOKEN_value BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_PRETOKEN_value %xdefine sk_TRUST_TOKEN_call_free_func BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_call_free_func %xdefine sk_TRUST_TOKEN_new_null BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_new_null %xdefine sk_TRUST_TOKEN_pop_free BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_pop_free %xdefine sk_TRUST_TOKEN_push BORINGSSL_PREFIX %+ _sk_TRUST_TOKEN_push %xdefine sk_X509_ATTRIBUTE_delete BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_delete %xdefine sk_X509_ATTRIBUTE_new_null BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_new_null %xdefine sk_X509_ATTRIBUTE_num BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_num %xdefine sk_X509_ATTRIBUTE_push BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_push %xdefine sk_X509_ATTRIBUTE_value BORINGSSL_PREFIX %+ _sk_X509_ATTRIBUTE_value %xdefine sk_X509_CRL_call_free_func BORINGSSL_PREFIX %+ _sk_X509_CRL_call_free_func %xdefine sk_X509_CRL_free BORINGSSL_PREFIX %+ _sk_X509_CRL_free %xdefine sk_X509_CRL_new_null BORINGSSL_PREFIX %+ _sk_X509_CRL_new_null %xdefine sk_X509_CRL_num BORINGSSL_PREFIX %+ _sk_X509_CRL_num %xdefine sk_X509_CRL_pop BORINGSSL_PREFIX %+ _sk_X509_CRL_pop %xdefine sk_X509_CRL_pop_free BORINGSSL_PREFIX %+ _sk_X509_CRL_pop_free %xdefine sk_X509_CRL_push BORINGSSL_PREFIX %+ _sk_X509_CRL_push %xdefine sk_X509_CRL_value BORINGSSL_PREFIX %+ _sk_X509_CRL_value %xdefine sk_X509_EXTENSION_call_free_func BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_call_free_func %xdefine sk_X509_EXTENSION_delete BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_delete %xdefine sk_X509_EXTENSION_free BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_free %xdefine sk_X509_EXTENSION_insert BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_insert %xdefine sk_X509_EXTENSION_new_null BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_new_null %xdefine sk_X509_EXTENSION_num BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_num %xdefine sk_X509_EXTENSION_pop_free BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_pop_free %xdefine sk_X509_EXTENSION_push BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_push %xdefine sk_X509_EXTENSION_set BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_set %xdefine sk_X509_EXTENSION_value BORINGSSL_PREFIX %+ _sk_X509_EXTENSION_value %xdefine sk_X509_INFO_call_free_func BORINGSSL_PREFIX %+ _sk_X509_INFO_call_free_func %xdefine sk_X509_INFO_free BORINGSSL_PREFIX %+ _sk_X509_INFO_free %xdefine sk_X509_INFO_new_null BORINGSSL_PREFIX %+ _sk_X509_INFO_new_null %xdefine sk_X509_INFO_num BORINGSSL_PREFIX %+ _sk_X509_INFO_num %xdefine sk_X509_INFO_pop BORINGSSL_PREFIX %+ _sk_X509_INFO_pop %xdefine sk_X509_INFO_pop_free BORINGSSL_PREFIX %+ _sk_X509_INFO_pop_free %xdefine sk_X509_INFO_push BORINGSSL_PREFIX %+ _sk_X509_INFO_push %xdefine sk_X509_INFO_value BORINGSSL_PREFIX %+ _sk_X509_INFO_value %xdefine sk_X509_LOOKUP_call_free_func BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_call_free_func %xdefine sk_X509_LOOKUP_new_null BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_new_null %xdefine sk_X509_LOOKUP_num BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_num %xdefine sk_X509_LOOKUP_pop_free BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_pop_free %xdefine sk_X509_LOOKUP_push BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_push %xdefine sk_X509_LOOKUP_value BORINGSSL_PREFIX %+ _sk_X509_LOOKUP_value %xdefine sk_X509_NAME_ENTRY_call_free_func BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_call_free_func %xdefine sk_X509_NAME_ENTRY_delete BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_delete %xdefine sk_X509_NAME_ENTRY_free BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_free %xdefine sk_X509_NAME_ENTRY_insert BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_insert %xdefine sk_X509_NAME_ENTRY_new_null BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_new_null %xdefine sk_X509_NAME_ENTRY_num BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_num %xdefine sk_X509_NAME_ENTRY_pop_free BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_pop_free %xdefine sk_X509_NAME_ENTRY_push BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_push %xdefine sk_X509_NAME_ENTRY_set BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_set %xdefine sk_X509_NAME_ENTRY_value BORINGSSL_PREFIX %+ _sk_X509_NAME_ENTRY_value %xdefine sk_X509_NAME_call_cmp_func BORINGSSL_PREFIX %+ _sk_X509_NAME_call_cmp_func %xdefine sk_X509_NAME_call_copy_func BORINGSSL_PREFIX %+ _sk_X509_NAME_call_copy_func %xdefine sk_X509_NAME_call_free_func BORINGSSL_PREFIX %+ _sk_X509_NAME_call_free_func %xdefine sk_X509_NAME_deep_copy BORINGSSL_PREFIX %+ _sk_X509_NAME_deep_copy %xdefine sk_X509_NAME_find BORINGSSL_PREFIX %+ _sk_X509_NAME_find %xdefine sk_X509_NAME_new BORINGSSL_PREFIX %+ _sk_X509_NAME_new %xdefine sk_X509_NAME_new_null BORINGSSL_PREFIX %+ _sk_X509_NAME_new_null %xdefine sk_X509_NAME_num BORINGSSL_PREFIX %+ _sk_X509_NAME_num %xdefine sk_X509_NAME_pop_free BORINGSSL_PREFIX %+ _sk_X509_NAME_pop_free %xdefine sk_X509_NAME_set BORINGSSL_PREFIX %+ _sk_X509_NAME_set %xdefine sk_X509_NAME_set_cmp_func BORINGSSL_PREFIX %+ _sk_X509_NAME_set_cmp_func %xdefine sk_X509_NAME_sort BORINGSSL_PREFIX %+ _sk_X509_NAME_sort %xdefine sk_X509_NAME_value BORINGSSL_PREFIX %+ _sk_X509_NAME_value %xdefine sk_X509_OBJECT_call_cmp_func BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_cmp_func %xdefine sk_X509_OBJECT_call_copy_func BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_copy_func %xdefine sk_X509_OBJECT_call_free_func BORINGSSL_PREFIX %+ _sk_X509_OBJECT_call_free_func %xdefine sk_X509_OBJECT_deep_copy BORINGSSL_PREFIX %+ _sk_X509_OBJECT_deep_copy %xdefine sk_X509_OBJECT_find BORINGSSL_PREFIX %+ _sk_X509_OBJECT_find %xdefine sk_X509_OBJECT_new BORINGSSL_PREFIX %+ _sk_X509_OBJECT_new %xdefine sk_X509_OBJECT_num BORINGSSL_PREFIX %+ _sk_X509_OBJECT_num %xdefine sk_X509_OBJECT_pop_free BORINGSSL_PREFIX %+ _sk_X509_OBJECT_pop_free %xdefine sk_X509_OBJECT_push BORINGSSL_PREFIX %+ _sk_X509_OBJECT_push %xdefine sk_X509_OBJECT_sort BORINGSSL_PREFIX %+ _sk_X509_OBJECT_sort %xdefine sk_X509_OBJECT_value BORINGSSL_PREFIX %+ _sk_X509_OBJECT_value %xdefine sk_X509_REVOKED_call_cmp_func BORINGSSL_PREFIX %+ _sk_X509_REVOKED_call_cmp_func %xdefine sk_X509_REVOKED_find BORINGSSL_PREFIX %+ _sk_X509_REVOKED_find %xdefine sk_X509_REVOKED_is_sorted BORINGSSL_PREFIX %+ _sk_X509_REVOKED_is_sorted %xdefine sk_X509_REVOKED_new BORINGSSL_PREFIX %+ _sk_X509_REVOKED_new %xdefine sk_X509_REVOKED_num BORINGSSL_PREFIX %+ _sk_X509_REVOKED_num %xdefine sk_X509_REVOKED_push BORINGSSL_PREFIX %+ _sk_X509_REVOKED_push %xdefine sk_X509_REVOKED_set_cmp_func BORINGSSL_PREFIX %+ _sk_X509_REVOKED_set_cmp_func %xdefine sk_X509_REVOKED_sort BORINGSSL_PREFIX %+ _sk_X509_REVOKED_sort %xdefine sk_X509_REVOKED_value BORINGSSL_PREFIX %+ _sk_X509_REVOKED_value %xdefine sk_X509_call_free_func BORINGSSL_PREFIX %+ _sk_X509_call_free_func %xdefine sk_X509_delete BORINGSSL_PREFIX %+ _sk_X509_delete %xdefine sk_X509_delete_ptr BORINGSSL_PREFIX %+ _sk_X509_delete_ptr %xdefine sk_X509_dup BORINGSSL_PREFIX %+ _sk_X509_dup %xdefine sk_X509_free BORINGSSL_PREFIX %+ _sk_X509_free %xdefine sk_X509_new_null BORINGSSL_PREFIX %+ _sk_X509_new_null %xdefine sk_X509_num BORINGSSL_PREFIX %+ _sk_X509_num %xdefine sk_X509_pop BORINGSSL_PREFIX %+ _sk_X509_pop %xdefine sk_X509_pop_free BORINGSSL_PREFIX %+ _sk_X509_pop_free %xdefine sk_X509_push BORINGSSL_PREFIX %+ _sk_X509_push %xdefine sk_X509_set BORINGSSL_PREFIX %+ _sk_X509_set %xdefine sk_X509_shift BORINGSSL_PREFIX %+ _sk_X509_shift %xdefine sk_X509_value BORINGSSL_PREFIX %+ _sk_X509_value %xdefine sk_free BORINGSSL_PREFIX %+ _sk_free %xdefine sk_new_null BORINGSSL_PREFIX %+ _sk_new_null %xdefine sk_num BORINGSSL_PREFIX %+ _sk_num %xdefine sk_pop BORINGSSL_PREFIX %+ _sk_pop %xdefine sk_pop_free BORINGSSL_PREFIX %+ _sk_pop_free %xdefine sk_pop_free_ex BORINGSSL_PREFIX %+ _sk_pop_free_ex %xdefine sk_push BORINGSSL_PREFIX %+ _sk_push %xdefine sk_value BORINGSSL_PREFIX %+ _sk_value %xdefine sk_void_free BORINGSSL_PREFIX %+ _sk_void_free %xdefine sk_void_new_null BORINGSSL_PREFIX %+ _sk_void_new_null %xdefine sk_void_num BORINGSSL_PREFIX %+ _sk_void_num %xdefine sk_void_push BORINGSSL_PREFIX %+ _sk_void_push %xdefine sk_void_set BORINGSSL_PREFIX %+ _sk_void_set %xdefine sk_void_value BORINGSSL_PREFIX %+ _sk_void_value %xdefine slhdsa_copy_keypair_addr BORINGSSL_PREFIX %+ _slhdsa_copy_keypair_addr %xdefine slhdsa_fors_pk_from_sig BORINGSSL_PREFIX %+ _slhdsa_fors_pk_from_sig %xdefine slhdsa_fors_sign BORINGSSL_PREFIX %+ _slhdsa_fors_sign %xdefine slhdsa_fors_sk_gen BORINGSSL_PREFIX %+ _slhdsa_fors_sk_gen %xdefine slhdsa_fors_treehash BORINGSSL_PREFIX %+ _slhdsa_fors_treehash %xdefine slhdsa_get_tree_index BORINGSSL_PREFIX %+ _slhdsa_get_tree_index %xdefine slhdsa_ht_sign BORINGSSL_PREFIX %+ _slhdsa_ht_sign %xdefine slhdsa_ht_verify BORINGSSL_PREFIX %+ _slhdsa_ht_verify %xdefine slhdsa_set_chain_addr BORINGSSL_PREFIX %+ _slhdsa_set_chain_addr %xdefine slhdsa_set_hash_addr BORINGSSL_PREFIX %+ _slhdsa_set_hash_addr %xdefine slhdsa_set_keypair_addr BORINGSSL_PREFIX %+ _slhdsa_set_keypair_addr %xdefine slhdsa_set_layer_addr BORINGSSL_PREFIX %+ _slhdsa_set_layer_addr %xdefine slhdsa_set_tree_addr BORINGSSL_PREFIX %+ _slhdsa_set_tree_addr %xdefine slhdsa_set_tree_height BORINGSSL_PREFIX %+ _slhdsa_set_tree_height %xdefine slhdsa_set_tree_index BORINGSSL_PREFIX %+ _slhdsa_set_tree_index %xdefine slhdsa_set_type BORINGSSL_PREFIX %+ _slhdsa_set_type %xdefine slhdsa_thash_f BORINGSSL_PREFIX %+ _slhdsa_thash_f %xdefine slhdsa_thash_h BORINGSSL_PREFIX %+ _slhdsa_thash_h %xdefine slhdsa_thash_hmsg BORINGSSL_PREFIX %+ _slhdsa_thash_hmsg %xdefine slhdsa_thash_prf BORINGSSL_PREFIX %+ _slhdsa_thash_prf %xdefine slhdsa_thash_prfmsg BORINGSSL_PREFIX %+ _slhdsa_thash_prfmsg %xdefine slhdsa_thash_tk BORINGSSL_PREFIX %+ _slhdsa_thash_tk %xdefine slhdsa_thash_tl BORINGSSL_PREFIX %+ _slhdsa_thash_tl %xdefine slhdsa_treehash BORINGSSL_PREFIX %+ _slhdsa_treehash %xdefine slhdsa_wots_pk_from_sig BORINGSSL_PREFIX %+ _slhdsa_wots_pk_from_sig %xdefine slhdsa_wots_pk_gen BORINGSSL_PREFIX %+ _slhdsa_wots_pk_gen %xdefine slhdsa_wots_sign BORINGSSL_PREFIX %+ _slhdsa_wots_sign %xdefine slhdsa_xmss_pk_from_sig BORINGSSL_PREFIX %+ _slhdsa_xmss_pk_from_sig %xdefine slhdsa_xmss_sign BORINGSSL_PREFIX %+ _slhdsa_xmss_sign %xdefine v2i_GENERAL_NAME BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME %xdefine v2i_GENERAL_NAMES BORINGSSL_PREFIX %+ _v2i_GENERAL_NAMES %xdefine v2i_GENERAL_NAME_ex BORINGSSL_PREFIX %+ _v2i_GENERAL_NAME_ex %xdefine v3_akey_id BORINGSSL_PREFIX %+ _v3_akey_id %xdefine v3_alt BORINGSSL_PREFIX %+ _v3_alt %xdefine v3_bcons BORINGSSL_PREFIX %+ _v3_bcons %xdefine v3_cpols BORINGSSL_PREFIX %+ _v3_cpols %xdefine v3_crl_invdate BORINGSSL_PREFIX %+ _v3_crl_invdate %xdefine v3_crl_num BORINGSSL_PREFIX %+ _v3_crl_num %xdefine v3_crl_reason BORINGSSL_PREFIX %+ _v3_crl_reason %xdefine v3_crld BORINGSSL_PREFIX %+ _v3_crld %xdefine v3_delta_crl BORINGSSL_PREFIX %+ _v3_delta_crl %xdefine v3_ext_ku BORINGSSL_PREFIX %+ _v3_ext_ku %xdefine v3_freshest_crl BORINGSSL_PREFIX %+ _v3_freshest_crl %xdefine v3_idp BORINGSSL_PREFIX %+ _v3_idp %xdefine v3_info BORINGSSL_PREFIX %+ _v3_info %xdefine v3_inhibit_anyp BORINGSSL_PREFIX %+ _v3_inhibit_anyp %xdefine v3_key_usage BORINGSSL_PREFIX %+ _v3_key_usage %xdefine v3_name_constraints BORINGSSL_PREFIX %+ _v3_name_constraints %xdefine v3_ns_ia5_list BORINGSSL_PREFIX %+ _v3_ns_ia5_list %xdefine v3_nscert BORINGSSL_PREFIX %+ _v3_nscert %xdefine v3_ocsp_accresp BORINGSSL_PREFIX %+ _v3_ocsp_accresp %xdefine v3_ocsp_nocheck BORINGSSL_PREFIX %+ _v3_ocsp_nocheck %xdefine v3_policy_constraints BORINGSSL_PREFIX %+ _v3_policy_constraints %xdefine v3_policy_mappings BORINGSSL_PREFIX %+ _v3_policy_mappings %xdefine v3_sinfo BORINGSSL_PREFIX %+ _v3_sinfo %xdefine v3_skey_id BORINGSSL_PREFIX %+ _v3_skey_id %xdefine voprf_exp2_blind BORINGSSL_PREFIX %+ _voprf_exp2_blind %xdefine voprf_exp2_client_key_from_bytes BORINGSSL_PREFIX %+ _voprf_exp2_client_key_from_bytes %xdefine voprf_exp2_derive_key_from_secret BORINGSSL_PREFIX %+ _voprf_exp2_derive_key_from_secret %xdefine voprf_exp2_generate_key BORINGSSL_PREFIX %+ _voprf_exp2_generate_key %xdefine voprf_exp2_issuer_key_from_bytes BORINGSSL_PREFIX %+ _voprf_exp2_issuer_key_from_bytes %xdefine voprf_exp2_read BORINGSSL_PREFIX %+ _voprf_exp2_read %xdefine voprf_exp2_sign BORINGSSL_PREFIX %+ _voprf_exp2_sign %xdefine voprf_exp2_unblind BORINGSSL_PREFIX %+ _voprf_exp2_unblind %xdefine voprf_pst1_blind BORINGSSL_PREFIX %+ _voprf_pst1_blind %xdefine voprf_pst1_client_key_from_bytes BORINGSSL_PREFIX %+ _voprf_pst1_client_key_from_bytes %xdefine voprf_pst1_derive_key_from_secret BORINGSSL_PREFIX %+ _voprf_pst1_derive_key_from_secret %xdefine voprf_pst1_generate_key BORINGSSL_PREFIX %+ _voprf_pst1_generate_key %xdefine voprf_pst1_issuer_key_from_bytes BORINGSSL_PREFIX %+ _voprf_pst1_issuer_key_from_bytes %xdefine voprf_pst1_read BORINGSSL_PREFIX %+ _voprf_pst1_read %xdefine voprf_pst1_sign BORINGSSL_PREFIX %+ _voprf_pst1_sign %xdefine voprf_pst1_sign_with_proof_scalar_for_testing BORINGSSL_PREFIX %+ _voprf_pst1_sign_with_proof_scalar_for_testing %xdefine voprf_pst1_unblind BORINGSSL_PREFIX %+ _voprf_pst1_unblind %xdefine vpaes_capable BORINGSSL_PREFIX %+ _vpaes_capable %xdefine vpaes_cbc_encrypt BORINGSSL_PREFIX %+ _vpaes_cbc_encrypt %xdefine vpaes_ctr32_encrypt_blocks BORINGSSL_PREFIX %+ _vpaes_ctr32_encrypt_blocks %xdefine vpaes_decrypt BORINGSSL_PREFIX %+ _vpaes_decrypt %xdefine vpaes_decrypt_key_to_bsaes BORINGSSL_PREFIX %+ _vpaes_decrypt_key_to_bsaes %xdefine vpaes_encrypt BORINGSSL_PREFIX %+ _vpaes_encrypt %xdefine vpaes_set_decrypt_key BORINGSSL_PREFIX %+ _vpaes_set_decrypt_key %xdefine vpaes_set_encrypt_key BORINGSSL_PREFIX %+ _vpaes_set_encrypt_key %xdefine x25519_asn1_meth BORINGSSL_PREFIX %+ _x25519_asn1_meth %xdefine x25519_ge_add BORINGSSL_PREFIX %+ _x25519_ge_add %xdefine x25519_ge_frombytes_vartime BORINGSSL_PREFIX %+ _x25519_ge_frombytes_vartime %xdefine x25519_ge_p1p1_to_p2 BORINGSSL_PREFIX %+ _x25519_ge_p1p1_to_p2 %xdefine x25519_ge_p1p1_to_p3 BORINGSSL_PREFIX %+ _x25519_ge_p1p1_to_p3 %xdefine x25519_ge_p3_to_cached BORINGSSL_PREFIX %+ _x25519_ge_p3_to_cached %xdefine x25519_ge_scalarmult BORINGSSL_PREFIX %+ _x25519_ge_scalarmult %xdefine x25519_ge_scalarmult_base BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_base %xdefine x25519_ge_scalarmult_base_adx BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_base_adx %xdefine x25519_ge_scalarmult_small_precomp BORINGSSL_PREFIX %+ _x25519_ge_scalarmult_small_precomp %xdefine x25519_ge_sub BORINGSSL_PREFIX %+ _x25519_ge_sub %xdefine x25519_ge_tobytes BORINGSSL_PREFIX %+ _x25519_ge_tobytes %xdefine x25519_pkey_meth BORINGSSL_PREFIX %+ _x25519_pkey_meth %xdefine x25519_sc_reduce BORINGSSL_PREFIX %+ _x25519_sc_reduce %xdefine x25519_scalar_mult_adx BORINGSSL_PREFIX %+ _x25519_scalar_mult_adx %xdefine x509V3_add_value_asn1_string BORINGSSL_PREFIX %+ _x509V3_add_value_asn1_string %xdefine x509_check_issued_with_callback BORINGSSL_PREFIX %+ _x509_check_issued_with_callback %xdefine x509_digest_sign_algorithm BORINGSSL_PREFIX %+ _x509_digest_sign_algorithm %xdefine x509_digest_verify_init BORINGSSL_PREFIX %+ _x509_digest_verify_init %xdefine x509_print_rsa_pss_params BORINGSSL_PREFIX %+ _x509_print_rsa_pss_params %xdefine x509_rsa_ctx_to_pss BORINGSSL_PREFIX %+ _x509_rsa_ctx_to_pss %xdefine x509_rsa_pss_to_ctx BORINGSSL_PREFIX %+ _x509_rsa_pss_to_ctx %xdefine x509v3_a2i_ipadd BORINGSSL_PREFIX %+ _x509v3_a2i_ipadd %xdefine x509v3_bytes_to_hex BORINGSSL_PREFIX %+ _x509v3_bytes_to_hex %xdefine x509v3_cache_extensions BORINGSSL_PREFIX %+ _x509v3_cache_extensions %xdefine x509v3_conf_name_matches BORINGSSL_PREFIX %+ _x509v3_conf_name_matches %xdefine x509v3_hex_to_bytes BORINGSSL_PREFIX %+ _x509v3_hex_to_bytes %xdefine x509v3_looks_like_dns_name BORINGSSL_PREFIX %+ _x509v3_looks_like_dns_name %endif ================================================ FILE: Sources/CNIOBoringSSL/include/experimental/CNIOBoringSSL_kyber.h ================================================ /* Copyright 2023 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef OPENSSL_HEADER_KYBER_H #define OPENSSL_HEADER_KYBER_H #include "CNIOBoringSSL_base.h" #if defined(__cplusplus) extern "C" { #endif #if defined(OPENSSL_UNSTABLE_EXPERIMENTAL_KYBER) // This header implements experimental, draft versions of not-yet-standardized // primitives. When the standard is complete, these functions will be removed // and replaced with the final, incompatible standard version. They are // available now for short-lived experiments, but must not be deployed anywhere // durable, such as a long-lived key store. To use these functions define // OPENSSL_UNSTABLE_EXPERIMENTAL_KYBER // Kyber768. // // This implements the round-3 specification of Kyber, defined at // https://pq-crystals.org/kyber/data/kyber-specification-round3-20210804.pdf // KYBER_public_key contains a Kyber768 public key. The contents of this // object should never leave the address space since the format is unstable. struct KYBER_public_key { union { uint8_t bytes[512 * (3 + 9) + 32 + 32]; uint16_t alignment; } opaque; }; // KYBER_private_key contains a Kyber768 private key. The contents of this // object should never leave the address space since the format is unstable. struct KYBER_private_key { union { uint8_t bytes[512 * (3 + 3 + 9) + 32 + 32 + 32]; uint16_t alignment; } opaque; }; // KYBER_PUBLIC_KEY_BYTES is the number of bytes in an encoded Kyber768 public // key. #define KYBER_PUBLIC_KEY_BYTES 1184 // KYBER_SHARED_SECRET_BYTES is the number of bytes in the Kyber768 shared // secret. Although the round-3 specification has a variable-length output, the // final ML-KEM construction is expected to use a fixed 32-byte output. To // simplify the future transition, we apply the same restriction. #define KYBER_SHARED_SECRET_BYTES 32 // KYBER_generate_key generates a random public/private key pair, writes the // encoded public key to |out_encoded_public_key| and sets |out_private_key| to // the private key. OPENSSL_EXPORT void KYBER_generate_key( uint8_t out_encoded_public_key[KYBER_PUBLIC_KEY_BYTES], struct KYBER_private_key *out_private_key); // KYBER_public_from_private sets |*out_public_key| to the public key that // corresponds to |private_key|. (This is faster than parsing the output of // |KYBER_generate_key| if, for some reason, you need to encapsulate to a key // that was just generated.) OPENSSL_EXPORT void KYBER_public_from_private( struct KYBER_public_key *out_public_key, const struct KYBER_private_key *private_key); // KYBER_CIPHERTEXT_BYTES is number of bytes in the Kyber768 ciphertext. #define KYBER_CIPHERTEXT_BYTES 1088 // KYBER_encap encrypts a random shared secret for |public_key|, writes the // ciphertext to |out_ciphertext|, and writes the random shared secret to // |out_shared_secret|. OPENSSL_EXPORT void KYBER_encap( uint8_t out_ciphertext[KYBER_CIPHERTEXT_BYTES], uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const struct KYBER_public_key *public_key); // KYBER_decap decrypts a shared secret from |ciphertext| using |private_key| // and writes it to |out_shared_secret|. If |ciphertext| is invalid, // |out_shared_secret| is filled with a key that will always be the same for the // same |ciphertext| and |private_key|, but which appears to be random unless // one has access to |private_key|. These alternatives occur in constant time. // Any subsequent symmetric encryption using |out_shared_secret| must use an // authenticated encryption scheme in order to discover the decapsulation // failure. OPENSSL_EXPORT void KYBER_decap( uint8_t out_shared_secret[KYBER_SHARED_SECRET_BYTES], const uint8_t ciphertext[KYBER_CIPHERTEXT_BYTES], const struct KYBER_private_key *private_key); // Serialisation of keys. // KYBER_marshal_public_key serializes |public_key| to |out| in the standard // format for Kyber public keys. It returns one on success or zero on allocation // error. OPENSSL_EXPORT int KYBER_marshal_public_key( CBB *out, const struct KYBER_public_key *public_key); // KYBER_parse_public_key parses a public key, in the format generated by // |KYBER_marshal_public_key|, from |in| and writes the result to // |out_public_key|. It returns one on success or zero on parse error or if // there are trailing bytes in |in|. OPENSSL_EXPORT int KYBER_parse_public_key( struct KYBER_public_key *out_public_key, CBS *in); // KYBER_marshal_private_key serializes |private_key| to |out| in the standard // format for Kyber private keys. It returns one on success or zero on // allocation error. OPENSSL_EXPORT int KYBER_marshal_private_key( CBB *out, const struct KYBER_private_key *private_key); // KYBER_PRIVATE_KEY_BYTES is the length of the data produced by // |KYBER_marshal_private_key|. #define KYBER_PRIVATE_KEY_BYTES 2400 // KYBER_parse_private_key parses a private key, in the format generated by // |KYBER_marshal_private_key|, from |in| and writes the result to // |out_private_key|. It returns one on success or zero on parse error or if // there are trailing bytes in |in|. OPENSSL_EXPORT int KYBER_parse_private_key( struct KYBER_private_key *out_private_key, CBS *in); #endif // OPENSSL_UNSTABLE_EXPERIMENTAL_KYBER #if defined(__cplusplus) } // extern C #endif #endif // OPENSSL_HEADER_KYBER_H ================================================ FILE: Sources/CNIOBoringSSL/include/module.modulemap ================================================ module CNIOBoringSSL { umbrella header "CNIOBoringSSL.h" export * } ================================================ FILE: Sources/CNIOBoringSSL/ssl/bio_ssl.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include static SSL *get_ssl(BIO *bio) { return reinterpret_cast(bio->ptr); } static int ssl_read(BIO *bio, char *out, int outl) { SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } BIO_clear_retry_flags(bio); const int ret = SSL_read(ssl, out, outl); switch (SSL_get_error(ssl, ret)) { case SSL_ERROR_WANT_READ: BIO_set_retry_read(bio); break; case SSL_ERROR_WANT_WRITE: BIO_set_retry_write(bio); break; case SSL_ERROR_WANT_ACCEPT: BIO_set_retry_special(bio); BIO_set_retry_reason(bio, BIO_RR_ACCEPT); break; case SSL_ERROR_WANT_CONNECT: BIO_set_retry_special(bio); BIO_set_retry_reason(bio, BIO_RR_CONNECT); break; case SSL_ERROR_NONE: case SSL_ERROR_SYSCALL: case SSL_ERROR_SSL: case SSL_ERROR_ZERO_RETURN: default: break; } return ret; } static int ssl_write(BIO *bio, const char *out, int outl) { SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } BIO_clear_retry_flags(bio); const int ret = SSL_write(ssl, out, outl); switch (SSL_get_error(ssl, ret)) { case SSL_ERROR_WANT_WRITE: BIO_set_retry_write(bio); break; case SSL_ERROR_WANT_READ: BIO_set_retry_read(bio); break; case SSL_ERROR_WANT_CONNECT: BIO_set_retry_special(bio); BIO_set_retry_reason(bio, BIO_RR_CONNECT); break; case SSL_ERROR_NONE: case SSL_ERROR_SYSCALL: case SSL_ERROR_SSL: default: break; } return ret; } static long ssl_ctrl(BIO *bio, int cmd, long num, void *ptr) { SSL *ssl = get_ssl(bio); if (ssl == NULL && cmd != BIO_C_SET_SSL) { return 0; } switch (cmd) { case BIO_C_SET_SSL: if (ssl != NULL) { // OpenSSL allows reusing an SSL BIO with a different SSL object. We do // not support this. OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Note this differs from upstream OpenSSL, which synchronizes // |bio->next_bio| with |ssl|'s rbio here, and on |BIO_CTRL_PUSH|. We call // into the corresponding |BIO| directly. (We can implement the upstream // behavior if it ends up necessary.) bio->shutdown = static_cast(num); bio->ptr = ptr; bio->init = 1; return 1; case BIO_CTRL_GET_CLOSE: return bio->shutdown; case BIO_CTRL_SET_CLOSE: bio->shutdown = static_cast(num); return 1; case BIO_CTRL_WPENDING: return BIO_ctrl(SSL_get_wbio(ssl), cmd, num, ptr); case BIO_CTRL_PENDING: return SSL_pending(ssl); case BIO_CTRL_FLUSH: { BIO *wbio = SSL_get_wbio(ssl); BIO_clear_retry_flags(bio); long ret = BIO_ctrl(wbio, cmd, num, ptr); BIO_set_flags(bio, BIO_get_retry_flags(wbio)); BIO_set_retry_reason(bio, BIO_get_retry_reason(wbio)); return ret; } case BIO_CTRL_PUSH: case BIO_CTRL_POP: case BIO_CTRL_DUP: return -1; default: return BIO_ctrl(SSL_get_rbio(ssl), cmd, num, ptr); } } static int ssl_new(BIO *bio) { return 1; } static int ssl_free(BIO *bio) { SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 1; } SSL_shutdown(ssl); if (bio->shutdown) { SSL_free(ssl); } return 1; } static long ssl_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } switch (cmd) { case BIO_CTRL_SET_CALLBACK: return -1; default: return BIO_callback_ctrl(SSL_get_rbio(ssl), cmd, fp); } } static const BIO_METHOD ssl_method = { BIO_TYPE_SSL, "SSL", ssl_write, ssl_read, NULL, NULL, ssl_ctrl, ssl_new, ssl_free, ssl_callback_ctrl, }; const BIO_METHOD *BIO_f_ssl(void) { return &ssl_method; } long BIO_set_ssl(BIO *bio, SSL *ssl, int take_owership) { return BIO_ctrl(bio, BIO_C_SET_SSL, take_owership, ssl); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/d1_both.cc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // TODO(davidben): 28 comes from the size of IP + UDP header. Is this reasonable // for these values? Notably, why is kMinMTU a function of the transport // protocol's overhead rather than, say, what's needed to hold a minimally-sized // handshake fragment plus protocol overhead. // kMinMTU is the minimum acceptable MTU value. static const unsigned int kMinMTU = 256 - 28; // kDefaultMTU is the default MTU value to use if neither the user nor // the underlying BIO supplies one. static const unsigned int kDefaultMTU = 1500 - 28; // BitRange returns a |uint8_t| with bits |start|, inclusive, to |end|, // exclusive, set. static uint8_t BitRange(size_t start, size_t end) { assert(start <= end && end <= 8); return static_cast(~((1u << start) - 1) & ((1u << end) - 1)); } // FirstUnmarkedRangeInByte returns the first unmarked range in bits |b|. static DTLSMessageBitmap::Range FirstUnmarkedRangeInByte(uint8_t b) { size_t start, end; for (start = 0; start < 8; start++) { if ((b & (1u << start)) == 0) { break; } } for (end = start; end < 8; end++) { if ((b & (1u << end)) != 0) { break; } } return DTLSMessageBitmap::Range{start, end}; } bool DTLSMessageBitmap::Init(size_t num_bits) { if (num_bits + 7 < num_bits) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } size_t num_bytes = (num_bits + 7) / 8; size_t bits_rounded = num_bytes * 8; if (!bytes_.Init(num_bytes)) { return false; } MarkRange(num_bits, bits_rounded); first_unmarked_byte_ = 0; return true; } void DTLSMessageBitmap::MarkRange(size_t start, size_t end) { assert(start <= end); // Don't bother touching bytes that have already been marked. start = std::max(start, first_unmarked_byte_ << 3); // Clamp everything within range. start = std::min(start, bytes_.size() << 3); end = std::min(end, bytes_.size() << 3); if (start >= end) { return; } if ((start >> 3) == (end >> 3)) { bytes_[start >> 3] |= BitRange(start & 7, end & 7); } else { bytes_[start >> 3] |= BitRange(start & 7, 8); for (size_t i = (start >> 3) + 1; i < (end >> 3); i++) { bytes_[i] = 0xff; } if ((end & 7) != 0) { bytes_[end >> 3] |= BitRange(0, end & 7); } } // Maintain the |first_unmarked_byte_| invariant. This work is amortized // across all |MarkRange| calls. while (first_unmarked_byte_ < bytes_.size() && bytes_[first_unmarked_byte_] == 0xff) { first_unmarked_byte_++; } // If the whole message is marked, we no longer need to spend memory on the // bitmap. if (first_unmarked_byte_ >= bytes_.size()) { bytes_.Reset(); first_unmarked_byte_ = 0; } } DTLSMessageBitmap::Range DTLSMessageBitmap::NextUnmarkedRange( size_t start) const { // Don't bother looking at bytes that are known to be fully marked. start = std::max(start, first_unmarked_byte_ << 3); size_t idx = start >> 3; if (idx >= bytes_.size()) { return Range{0, 0}; } // Look at the bits from |start| up to a byte boundary. uint8_t byte = bytes_[idx] | BitRange(0, start & 7); if (byte == 0xff) { // Nothing unmarked at this byte. Keep searching for an unmarked bit. for (idx = idx + 1; idx < bytes_.size(); idx++) { if (bytes_[idx] != 0xff) { byte = bytes_[idx]; break; } } if (idx >= bytes_.size()) { return Range{0, 0}; } } Range range = FirstUnmarkedRangeInByte(byte); assert(!range.empty()); bool should_extend = range.end == 8; range.start += idx << 3; range.end += idx << 3; if (!should_extend) { // The range did not end at a byte boundary. We're done. return range; } // Collect all fully unmarked bytes. for (idx = idx + 1; idx < bytes_.size(); idx++) { if (bytes_[idx] != 0) { break; } } range.end = idx << 3; // Add any bits from the remaining byte, if any. if (idx < bytes_.size()) { Range extra = FirstUnmarkedRangeInByte(bytes_[idx]); if (extra.start == 0) { range.end += extra.end; } } return range; } // Receiving handshake messages. static UniquePtr dtls_new_incoming_message( const struct hm_header_st *msg_hdr) { ScopedCBB cbb; UniquePtr frag = MakeUnique(); if (!frag) { return nullptr; } frag->type = msg_hdr->type; frag->seq = msg_hdr->seq; // Allocate space for the reassembled message and fill in the header. if (!frag->data.InitForOverwrite(DTLS1_HM_HEADER_LENGTH + msg_hdr->msg_len)) { return nullptr; } if (!CBB_init_fixed(cbb.get(), frag->data.data(), DTLS1_HM_HEADER_LENGTH) || !CBB_add_u8(cbb.get(), msg_hdr->type) || !CBB_add_u24(cbb.get(), msg_hdr->msg_len) || !CBB_add_u16(cbb.get(), msg_hdr->seq) || !CBB_add_u24(cbb.get(), 0 /* frag_off */) || !CBB_add_u24(cbb.get(), msg_hdr->msg_len) || !CBB_finish(cbb.get(), NULL, NULL)) { return nullptr; } if (!frag->reassembly.Init(msg_hdr->msg_len)) { return nullptr; } return frag; } // dtls1_is_current_message_complete returns whether the current handshake // message is complete. static bool dtls1_is_current_message_complete(const SSL *ssl) { size_t idx = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; DTLSIncomingMessage *frag = ssl->d1->incoming_messages[idx].get(); return frag != nullptr && frag->reassembly.IsComplete(); } // dtls1_get_incoming_message returns the incoming message corresponding to // |msg_hdr|. If none exists, it creates a new one and inserts it in the // queue. Otherwise, it checks |msg_hdr| is consistent with the existing one. It // returns NULL on failure. The caller does not take ownership of the result. static DTLSIncomingMessage *dtls1_get_incoming_message( SSL *ssl, uint8_t *out_alert, const struct hm_header_st *msg_hdr) { if (msg_hdr->seq < ssl->d1->handshake_read_seq || msg_hdr->seq - ssl->d1->handshake_read_seq >= SSL_MAX_HANDSHAKE_FLIGHT) { *out_alert = SSL_AD_INTERNAL_ERROR; return NULL; } size_t idx = msg_hdr->seq % SSL_MAX_HANDSHAKE_FLIGHT; DTLSIncomingMessage *frag = ssl->d1->incoming_messages[idx].get(); if (frag != NULL) { assert(frag->seq == msg_hdr->seq); // The new fragment must be compatible with the previous fragments from this // message. if (frag->type != msg_hdr->type || // frag->msg_len() != msg_hdr->msg_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_FRAGMENT_MISMATCH); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return NULL; } return frag; } // This is the first fragment from this message. ssl->d1->incoming_messages[idx] = dtls_new_incoming_message(msg_hdr); if (!ssl->d1->incoming_messages[idx]) { *out_alert = SSL_AD_INTERNAL_ERROR; return NULL; } return ssl->d1->incoming_messages[idx].get(); } bool dtls1_process_handshake_fragments(SSL *ssl, uint8_t *out_alert, DTLSRecordNumber record_number, Span record) { bool implicit_ack = false; bool skipped_fragments = false; CBS cbs = record; while (CBS_len(&cbs) > 0) { // Read a handshake fragment. struct hm_header_st msg_hdr; CBS body; if (!dtls1_parse_fragment(&cbs, &msg_hdr, &body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HANDSHAKE_RECORD); *out_alert = SSL_AD_DECODE_ERROR; return false; } const size_t frag_off = msg_hdr.frag_off; const size_t frag_len = msg_hdr.frag_len; const size_t msg_len = msg_hdr.msg_len; if (frag_off > msg_len || frag_len > msg_len - frag_off) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HANDSHAKE_RECORD); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } if (msg_hdr.seq < ssl->d1->handshake_read_seq || ssl->d1->handshake_read_overflow) { // Ignore fragments from the past. This is a retransmit of data we already // received. // // TODO(crbug.com/42290594): Use this to drive retransmits. continue; } if (record_number.epoch() != ssl->d1->read_epoch.epoch || ssl->d1->next_read_epoch != nullptr) { // New messages can only arrive in the latest epoch. This can fail if the // record came from |prev_read_epoch|, or if it came from |read_epoch| but // |next_read_epoch| exists. (It cannot come from |next_read_epoch| // because |next_read_epoch| becomes |read_epoch| once it receives a // record.) OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return false; } if (msg_len > ssl_max_handshake_message_len(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } if (SSL_in_init(ssl) && ssl_has_final_version(ssl) && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { // During the handshake, if we receive any portion of the next flight, the // peer must have received our most recent flight. In DTLS 1.3, this is an // implicit ACK. See RFC 9147, Section 7.1. // // This only applies during the handshake. After the handshake, the next // message may be part of a post-handshake transaction. It also does not // apply immediately after the handshake. As a client, receiving a // KeyUpdate or NewSessionTicket does not imply the server has received // our Finished. The server may have sent those messages in half-RTT. implicit_ack = true; } if (msg_hdr.seq - ssl->d1->handshake_read_seq > SSL_MAX_HANDSHAKE_FLIGHT) { // Ignore fragments too far in the future. skipped_fragments = true; continue; } DTLSIncomingMessage *frag = dtls1_get_incoming_message(ssl, out_alert, &msg_hdr); if (frag == nullptr) { return false; } assert(frag->msg_len() == msg_len); if (frag->reassembly.IsComplete()) { // The message is already assembled. continue; } assert(msg_len > 0); // Copy the body into the fragment. Span dest = frag->msg().subspan(frag_off, CBS_len(&body)); OPENSSL_memcpy(dest.data(), CBS_data(&body), CBS_len(&body)); frag->reassembly.MarkRange(frag_off, frag_off + frag_len); } if (implicit_ack) { dtls1_stop_timer(ssl); dtls_clear_outgoing_messages(ssl); } if (!skipped_fragments) { ssl->d1->records_to_ack.PushBack(record_number); if (ssl_has_final_version(ssl) && ssl_protocol_version(ssl) >= TLS1_3_VERSION && !ssl->d1->ack_timer.IsSet() && !ssl->d1->sending_ack) { // Schedule sending an ACK. The delay serves several purposes: // - If there are more records to come, we send only one ACK. // - If there are more records to come and the flight is now complete, we // will send the reply (which implicitly ACKs the previous flight) and // cancel the timer. // - If there are more records to come, the flight is now complete, but // generating the response is delayed (e.g. a slow, async private key), // the timer will fire and we send an ACK anyway. OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); ssl->d1->ack_timer.StartMicroseconds( now, uint64_t{ssl->d1->timeout_duration_ms} * 1000 / 4); } } return true; } ssl_open_record_t dtls1_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { uint8_t type; DTLSRecordNumber record_number; Span record; auto ret = dtls_open_record(ssl, &type, &record_number, &record, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } switch (type) { case SSL3_RT_APPLICATION_DATA: // In DTLS 1.2, out-of-order application data may be received between // ChangeCipherSpec and Finished. Discard it. return ssl_open_record_discard; case SSL3_RT_CHANGE_CIPHER_SPEC: if (record.size() != 1u || record[0] != SSL3_MT_CCS) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return ssl_open_record_error; } // We do not support renegotiation, so encrypted ChangeCipherSpec records // are illegal. if (record_number.epoch() != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } // Ignore ChangeCipherSpec from a previous epoch. if (record_number.epoch() != ssl->d1->read_epoch.epoch) { return ssl_open_record_discard; } // Flag the ChangeCipherSpec for later. // TODO(crbug.com/42290594): Should we reject this in DTLS 1.3? ssl->d1->has_change_cipher_spec = true; ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, record); return ssl_open_record_success; case SSL3_RT_ACK: return dtls1_process_ack(ssl, out_alert, record_number, record); case SSL3_RT_HANDSHAKE: if (!dtls1_process_handshake_fragments(ssl, out_alert, record_number, record)) { return ssl_open_record_error; } return ssl_open_record_success; default: OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } } bool dtls1_get_message(const SSL *ssl, SSLMessage *out) { if (!dtls1_is_current_message_complete(ssl)) { return false; } size_t idx = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; const DTLSIncomingMessage *frag = ssl->d1->incoming_messages[idx].get(); out->type = frag->type; out->raw = CBS(frag->data); out->body = CBS(frag->msg()); out->is_v2_hello = false; if (!ssl->s3->has_message) { ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, out->raw); ssl->s3->has_message = true; } return true; } void dtls1_next_message(SSL *ssl) { assert(ssl->s3->has_message); assert(dtls1_is_current_message_complete(ssl)); size_t index = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; ssl->d1->incoming_messages[index].reset(); ssl->d1->handshake_read_seq++; if (ssl->d1->handshake_read_seq == 0) { ssl->d1->handshake_read_overflow = true; } ssl->s3->has_message = false; // If we previously sent a flight, mark it as having a reply, so // |on_handshake_complete| can manage post-handshake retransmission. if (ssl->d1->outgoing_messages_complete) { ssl->d1->flight_has_reply = true; } } bool dtls_has_unprocessed_handshake_data(const SSL *ssl) { size_t current = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; for (size_t i = 0; i < SSL_MAX_HANDSHAKE_FLIGHT; i++) { // Skip the current message. if (ssl->s3->has_message && i == current) { assert(dtls1_is_current_message_complete(ssl)); continue; } if (ssl->d1->incoming_messages[i] != nullptr) { return true; } } return false; } bool dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, CBS *out_body) { OPENSSL_memset(out_hdr, 0x00, sizeof(struct hm_header_st)); if (!CBS_get_u8(cbs, &out_hdr->type) || !CBS_get_u24(cbs, &out_hdr->msg_len) || !CBS_get_u16(cbs, &out_hdr->seq) || !CBS_get_u24(cbs, &out_hdr->frag_off) || !CBS_get_u24(cbs, &out_hdr->frag_len) || !CBS_get_bytes(cbs, out_body, out_hdr->frag_len)) { return false; } return true; } ssl_open_record_t dtls1_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { if (!ssl->d1->has_change_cipher_spec) { // dtls1_open_handshake processes both handshake and ChangeCipherSpec. auto ret = dtls1_open_handshake(ssl, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } } if (ssl->d1->has_change_cipher_spec) { ssl->d1->has_change_cipher_spec = false; return ssl_open_record_success; } return ssl_open_record_discard; } // Sending handshake messages. void dtls_clear_outgoing_messages(SSL *ssl) { ssl->d1->outgoing_messages.clear(); ssl->d1->sent_records = nullptr; ssl->d1->outgoing_written = 0; ssl->d1->outgoing_offset = 0; ssl->d1->outgoing_messages_complete = false; ssl->d1->flight_has_reply = false; ssl->d1->sending_flight = false; dtls_clear_unused_write_epochs(ssl); } void dtls_clear_unused_write_epochs(SSL *ssl) { ssl->d1->extra_write_epochs.EraseIf( [ssl](const UniquePtr &write_epoch) -> bool { // Non-current epochs may be discarded once there are no incomplete // outgoing messages that reference them. // // TODO(crbug.com/42290594): Epoch 1 (0-RTT) should be retained until // epoch 3 (app data) is available. for (const auto &msg : ssl->d1->outgoing_messages) { if (msg.epoch == write_epoch->epoch() && !msg.IsFullyAcked()) { return false; } } return true; }); } bool dtls1_init_message(const SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { // Pick a modest size hint to save most of the |realloc| calls. if (!CBB_init(cbb, 64) || // !CBB_add_u8(cbb, type) || // !CBB_add_u24(cbb, 0 /* length (filled in later) */) || // !CBB_add_u16(cbb, ssl->d1->handshake_write_seq) || // !CBB_add_u24(cbb, 0 /* offset */) || // !CBB_add_u24_length_prefixed(cbb, body)) { return false; } return true; } bool dtls1_finish_message(const SSL *ssl, CBB *cbb, Array *out_msg) { if (!CBBFinishArray(cbb, out_msg) || out_msg->size() < DTLS1_HM_HEADER_LENGTH) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // Fix up the header. Copy the fragment length into the total message // length. OPENSSL_memcpy(out_msg->data() + 1, out_msg->data() + DTLS1_HM_HEADER_LENGTH - 3, 3); return true; } // add_outgoing adds a new handshake message or ChangeCipherSpec to the current // outgoing flight. It returns true on success and false on error. static bool add_outgoing(SSL *ssl, bool is_ccs, Array data) { if (ssl->d1->outgoing_messages_complete) { // If we've begun writing a new flight, we received the peer flight. Discard // the timer and the our flight. dtls1_stop_timer(ssl); dtls_clear_outgoing_messages(ssl); } if (!is_ccs) { if (ssl->d1->handshake_write_overflow) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } // TODO(svaldez): Move this up a layer to fix abstraction for SSLTranscript // on hs. if (ssl->s3->hs != NULL && !ssl->s3->hs->transcript.Update(data)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } ssl->d1->handshake_write_seq++; if (ssl->d1->handshake_write_seq == 0) { ssl->d1->handshake_write_overflow = true; } } DTLSOutgoingMessage msg; msg.data = std::move(data); msg.epoch = ssl->d1->write_epoch.epoch(); msg.is_ccs = is_ccs; // Zero-length messages need 1 bit to track whether the peer has received the // message header. (Normally the message header is implicitly received when // any fragment of the message is received at all.) if (!is_ccs && !msg.acked.Init(std::max(msg.msg_len(), size_t{1}))) { return false; } // This should not fail if |SSL_MAX_HANDSHAKE_FLIGHT| was sized correctly. // // TODO(crbug.com/42290594): This can currently fail in DTLS 1.3. The caller // can configure how many tickets to send, up to kMaxTickets. Additionally, if // we send 0.5-RTT tickets in 0-RTT, we may even have tickets queued up with // the server flight. if (!ssl->d1->outgoing_messages.TryPushBack(std::move(msg))) { assert(false); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return true; } bool dtls1_add_message(SSL *ssl, Array data) { return add_outgoing(ssl, false /* handshake */, std::move(data)); } bool dtls1_add_change_cipher_spec(SSL *ssl) { // DTLS 1.3 disables compatibility mode, which means that DTLS 1.3 never sends // a ChangeCipherSpec message. if (ssl_protocol_version(ssl) > TLS1_2_VERSION) { return true; } return add_outgoing(ssl, true /* ChangeCipherSpec */, Array()); } // dtls1_update_mtu updates the current MTU from the BIO, ensuring it is above // the minimum. static void dtls1_update_mtu(SSL *ssl) { // TODO(davidben): No consumer implements |BIO_CTRL_DGRAM_SET_MTU| and the // only |BIO_CTRL_DGRAM_QUERY_MTU| implementation could use // |SSL_set_mtu|. Does this need to be so complex? if (ssl->d1->mtu < dtls1_min_mtu() && !(SSL_get_options(ssl) & SSL_OP_NO_QUERY_MTU)) { long mtu = BIO_ctrl(ssl->wbio.get(), BIO_CTRL_DGRAM_QUERY_MTU, 0, NULL); if (mtu >= 0 && mtu <= (1 << 30) && (unsigned)mtu >= dtls1_min_mtu()) { ssl->d1->mtu = (unsigned)mtu; } else { ssl->d1->mtu = kDefaultMTU; BIO_ctrl(ssl->wbio.get(), BIO_CTRL_DGRAM_SET_MTU, ssl->d1->mtu, NULL); } } // The MTU should be above the minimum now. assert(ssl->d1->mtu >= dtls1_min_mtu()); } enum seal_result_t { seal_error, seal_continue, seal_flush, }; // seal_next_record seals one record's worth of messages to |out| and advances // |ssl|'s internal state past the data that was sealed. If progress was made, // it returns |seal_flush| or |seal_continue| and sets // |*out_len| to the number of bytes written. // // If the function stopped because the next message could not be combined into // this record, it returns |seal_continue| and the caller should loop again. // Otherwise, it returns |seal_flush| and the packet is complete (either because // there are no more messages or the packet is full). static seal_result_t seal_next_record(SSL *ssl, Span out, size_t *out_len) { *out_len = 0; // Skip any fully acked messages. while (ssl->d1->outgoing_written < ssl->d1->outgoing_messages.size() && ssl->d1->outgoing_messages[ssl->d1->outgoing_written].IsFullyAcked()) { ssl->d1->outgoing_offset = 0; ssl->d1->outgoing_written++; } // There was nothing left to write. if (ssl->d1->outgoing_written >= ssl->d1->outgoing_messages.size()) { return seal_flush; } const auto &first_msg = ssl->d1->outgoing_messages[ssl->d1->outgoing_written]; size_t prefix_len = dtls_seal_prefix_len(ssl, first_msg.epoch); size_t max_in_len = dtls_seal_max_input_len(ssl, first_msg.epoch, out.size()); if (max_in_len == 0) { // There is no room for a single record. return seal_flush; } if (first_msg.is_ccs) { static const uint8_t kChangeCipherSpec[1] = {SSL3_MT_CCS}; DTLSRecordNumber record_number; if (!dtls_seal_record(ssl, &record_number, out.data(), out_len, out.size(), SSL3_RT_CHANGE_CIPHER_SPEC, kChangeCipherSpec, sizeof(kChangeCipherSpec), first_msg.epoch)) { return seal_error; } ssl_do_msg_callback(ssl, /*is_write=*/1, SSL3_RT_CHANGE_CIPHER_SPEC, kChangeCipherSpec); ssl->d1->outgoing_offset = 0; ssl->d1->outgoing_written++; return seal_continue; } // TODO(crbug.com/374991962): For now, only send one message per record in // epoch 0. Sending multiple is allowed and more efficient, but breaks // b/378742138. const bool allow_multiple_messages = first_msg.epoch != 0; // Pack as many handshake fragments into one record as we can. We stage the // fragments in the output buffer, to be sealed in-place. bool should_continue = false; Span fragments = out.subspan(prefix_len, max_in_len); CBB cbb; CBB_init_fixed(&cbb, fragments.data(), fragments.size()); DTLSSentRecord sent_record; sent_record.first_msg = ssl->d1->outgoing_written; sent_record.first_msg_start = ssl->d1->outgoing_offset; while (ssl->d1->outgoing_written < ssl->d1->outgoing_messages.size()) { const auto &msg = ssl->d1->outgoing_messages[ssl->d1->outgoing_written]; if (msg.epoch != first_msg.epoch || msg.is_ccs) { // We can only pack messages if the epoch matches. There may be more room // in the packet, so tell the caller to keep going. should_continue = true; break; } // Decode |msg|'s header. CBS cbs(msg.data), body_cbs; struct hm_header_st hdr; if (!dtls1_parse_fragment(&cbs, &hdr, &body_cbs) || // hdr.frag_off != 0 || // hdr.frag_len != CBS_len(&body_cbs) || // hdr.msg_len != CBS_len(&body_cbs) || // CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return seal_error; } // Iterate over every un-acked range in the message, if any. Span body = body_cbs; for (;;) { auto range = msg.acked.NextUnmarkedRange(ssl->d1->outgoing_offset); if (range.empty()) { // Advance to the next message. ssl->d1->outgoing_offset = 0; ssl->d1->outgoing_written++; break; } // Determine how much progress can be made (minimum one byte of progress). size_t capacity = fragments.size() - CBB_len(&cbb); if (capacity < DTLS1_HM_HEADER_LENGTH + 1) { goto packet_full; } size_t todo = std::min(range.size(), capacity - DTLS1_HM_HEADER_LENGTH); // Empty messages are special-cased in ACK tracking. We act as if they // have one byte, but in reality that byte is tracking the header. Span frag; if (!body.empty()) { frag = body.subspan(range.start, todo); } // Assemble the fragment. size_t frag_start = CBB_len(&cbb); CBB child; if (!CBB_add_u8(&cbb, hdr.type) || // !CBB_add_u24(&cbb, hdr.msg_len) || // !CBB_add_u16(&cbb, hdr.seq) || // !CBB_add_u24(&cbb, range.start) || // !CBB_add_u24_length_prefixed(&cbb, &child) || // !CBB_add_bytes(&child, frag.data(), frag.size()) || // !CBB_flush(&cbb)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return seal_error; } size_t frag_end = CBB_len(&cbb); // TODO(davidben): It is odd that, on output, we inform the caller of // retransmits and individual fragments, but on input we only inform the // caller of complete messages. ssl_do_msg_callback(ssl, /*is_write=*/1, SSL3_RT_HANDSHAKE, fragments.subspan(frag_start, frag_end - frag_start)); ssl->d1->outgoing_offset = range.start + todo; if (todo < range.size()) { // The packet was the limiting factor. goto packet_full; } } if (!allow_multiple_messages) { should_continue = true; break; } } packet_full: sent_record.last_msg = ssl->d1->outgoing_written; sent_record.last_msg_end = ssl->d1->outgoing_offset; // We could not fit anything. Don't try to make a record. if (CBB_len(&cbb) == 0) { assert(!should_continue); return seal_flush; } if (!dtls_seal_record(ssl, &sent_record.number, out.data(), out_len, out.size(), SSL3_RT_HANDSHAKE, CBB_data(&cbb), CBB_len(&cbb), first_msg.epoch)) { return seal_error; } // If DTLS 1.3 (or if the version is not yet known and it may be DTLS 1.3), // save the record number to match against ACKs later. if (ssl->s3->version == 0 || ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (ssl->d1->sent_records == nullptr) { ssl->d1->sent_records = MakeUnique>(); if (ssl->d1->sent_records == nullptr) { return seal_error; } } ssl->d1->sent_records->PushBack(sent_record); } return should_continue ? seal_continue : seal_flush; } // seal_next_packet writes as much of the next flight as possible to |out| and // advances |ssl->d1->outgoing_written| and |ssl->d1->outgoing_offset| as // appropriate. static bool seal_next_packet(SSL *ssl, Span out, size_t *out_len) { size_t total = 0; for (;;) { size_t len; seal_result_t ret = seal_next_record(ssl, out, &len); switch (ret) { case seal_error: return false; case seal_flush: case seal_continue: out = out.subspan(len); total += len; break; } if (ret == seal_flush) { break; } } *out_len = total; return true; } static int send_flight(SSL *ssl) { if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } if (ssl->wbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } if (ssl->d1->num_timeouts > DTLS1_MAX_TIMEOUTS) { OPENSSL_PUT_ERROR(SSL, SSL_R_READ_TIMEOUT_EXPIRED); return -1; } dtls1_update_mtu(ssl); Array packet; if (!packet.InitForOverwrite(ssl->d1->mtu)) { return -1; } while (ssl->d1->outgoing_written < ssl->d1->outgoing_messages.size()) { uint8_t old_written = ssl->d1->outgoing_written; uint32_t old_offset = ssl->d1->outgoing_offset; size_t packet_len; if (!seal_next_packet(ssl, Span(packet), &packet_len)) { return -1; } if (packet_len == 0 && ssl->d1->outgoing_written < ssl->d1->outgoing_messages.size()) { // We made no progress with the packet size available, but did not reach // the end. OPENSSL_PUT_ERROR(SSL, SSL_R_MTU_TOO_SMALL); return false; } if (packet_len != 0) { int bio_ret = BIO_write(ssl->wbio.get(), packet.data(), packet_len); if (bio_ret <= 0) { // Retry this packet the next time around. ssl->d1->outgoing_written = old_written; ssl->d1->outgoing_offset = old_offset; ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return bio_ret; } } } if (BIO_flush(ssl->wbio.get()) <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return -1; } return 1; } void dtls1_finish_flight(SSL *ssl) { if (ssl->d1->outgoing_messages.empty() || ssl->d1->outgoing_messages_complete) { return; // Nothing to do. } if (ssl->d1->outgoing_messages[0].epoch <= 2) { // DTLS 1.3 handshake messages (epoch 2 and below) implicitly ACK the // previous flight, so there is no need to ACK previous records. This // clears the ACK buffer slightly earlier than the specification suggests. // See the discussion in // https://mailarchive.ietf.org/arch/msg/tls/kjJnquJOVaWxu5hUCmNzB35eqY0/ ssl->d1->records_to_ack.Clear(); ssl->d1->ack_timer.Stop(); ssl->d1->sending_ack = false; } ssl->d1->outgoing_messages_complete = true; ssl->d1->sending_flight = true; // Stop retransmitting the previous flight. In DTLS 1.3, we'll have stopped // the timer already, but DTLS 1.2 keeps it running until the next flight is // ready. dtls1_stop_timer(ssl); } void dtls1_schedule_ack(SSL *ssl) { ssl->d1->ack_timer.Stop(); ssl->d1->sending_ack = !ssl->d1->records_to_ack.empty(); } static int send_ack(SSL *ssl) { assert(ssl_protocol_version(ssl) >= TLS1_3_VERSION); // Ensure we don't send so many ACKs that we overflow the MTU. There is a // 2-byte length prefix and each ACK is 16 bytes. dtls1_update_mtu(ssl); size_t max_plaintext = dtls_seal_max_input_len(ssl, ssl->d1->write_epoch.epoch(), ssl->d1->mtu); if (max_plaintext < 2 + 16) { OPENSSL_PUT_ERROR(SSL, SSL_R_MTU_TOO_SMALL); // No room for even one ACK. return -1; } size_t num_acks = std::min((max_plaintext - 2) / 16, ssl->d1->records_to_ack.size()); // Assemble the ACK. RFC 9147 says to sort ACKs numerically. It is unclear if // other implementations do this, but go ahead and sort for now. See // https://mailarchive.ietf.org/arch/msg/tls/kjJnquJOVaWxu5hUCmNzB35eqY0/. // Remove this if rfc9147bis removes this requirement. InplaceVector sorted; for (size_t i = ssl->d1->records_to_ack.size() - num_acks; i < ssl->d1->records_to_ack.size(); i++) { sorted.PushBack(ssl->d1->records_to_ack[i]); } std::sort(sorted.begin(), sorted.end()); uint8_t buf[2 + 16 * DTLS_MAX_ACK_BUFFER]; CBB cbb, child; CBB_init_fixed(&cbb, buf, sizeof(buf)); BSSL_CHECK(CBB_add_u16_length_prefixed(&cbb, &child)); for (const auto &number : sorted) { BSSL_CHECK(CBB_add_u64(&child, number.epoch())); BSSL_CHECK(CBB_add_u64(&child, number.sequence())); } BSSL_CHECK(CBB_flush(&cbb)); // Encrypt it. uint8_t record[DTLS1_3_RECORD_HEADER_WRITE_LENGTH + sizeof(buf) + 1 /* record type */ + EVP_AEAD_MAX_OVERHEAD]; size_t record_len; DTLSRecordNumber record_number; if (!dtls_seal_record(ssl, &record_number, record, &record_len, sizeof(record), SSL3_RT_ACK, CBB_data(&cbb), CBB_len(&cbb), ssl->d1->write_epoch.epoch())) { return -1; } ssl_do_msg_callback(ssl, /*is_write=*/1, SSL3_RT_ACK, Span(CBB_data(&cbb), CBB_len(&cbb))); int bio_ret = BIO_write(ssl->wbio.get(), record, static_cast(record_len)); if (bio_ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return bio_ret; } if (BIO_flush(ssl->wbio.get()) <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return -1; } return 1; } int dtls1_flush(SSL *ssl) { // Send the pending ACK, if any. if (ssl->d1->sending_ack) { int ret = send_ack(ssl); if (ret <= 0) { return ret; } ssl->d1->sending_ack = false; } // Send the pending flight, if any. if (ssl->d1->sending_flight) { int ret = send_flight(ssl); if (ret <= 0) { return ret; } // Reset state for the next send. ssl->d1->outgoing_written = 0; ssl->d1->outgoing_offset = 0; ssl->d1->sending_flight = false; // Schedule the next retransmit timer. In DTLS 1.3, we retransmit all // flights until ACKed. In DTLS 1.2, the final Finished flight is never // ACKed, so we do not keep the timer running after the handshake. if (SSL_in_init(ssl) || ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (ssl->d1->num_timeouts == 0) { ssl->d1->timeout_duration_ms = ssl->initial_timeout_duration_ms; } else { ssl->d1->timeout_duration_ms = std::min(ssl->d1->timeout_duration_ms * 2, uint32_t{60000}); } OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); ssl->d1->retransmit_timer.StartMicroseconds( now, uint64_t{ssl->d1->timeout_duration_ms} * 1000); } } return 1; } unsigned int dtls1_min_mtu(void) { return kMinMTU; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/d1_lib.cc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN DTLS1_STATE::DTLS1_STATE() : has_change_cipher_spec(false), outgoing_messages_complete(false), flight_has_reply(false), handshake_write_overflow(false), handshake_read_overflow(false), sending_flight(false), sending_ack(false), queued_key_update(QueuedKeyUpdate::kNone) {} DTLS1_STATE::~DTLS1_STATE() {} bool DTLS1_STATE::Init() { // Set up the initial epochs. read_epoch.aead = SSLAEADContext::CreateNullCipher(); write_epoch.aead = SSLAEADContext::CreateNullCipher(); if (read_epoch.aead == nullptr || write_epoch.aead == nullptr) { return false; } return true; } bool dtls1_new(SSL *ssl) { if (!tls_new(ssl)) { return false; } UniquePtr d1 = MakeUnique(); if (!d1 || !d1->Init()) { tls_free(ssl); return false; } ssl->d1 = d1.release(); return true; } void dtls1_free(SSL *ssl) { tls_free(ssl); if (ssl == NULL) { return; } Delete(ssl->d1); ssl->d1 = NULL; } void DTLSTimer::StartMicroseconds(OPENSSL_timeval now, uint64_t microseconds) { uint64_t seconds = microseconds / 1000000; microseconds %= 1000000; now.tv_usec += microseconds; if (now.tv_usec >= 1000000) { now.tv_usec -= 1000000; seconds++; } if (now.tv_sec > UINT64_MAX - seconds) { Stop(); return; } now.tv_sec += seconds; expire_time_ = now; } void DTLSTimer::Stop() { expire_time_ = {0, 0}; } bool DTLSTimer::IsExpired(OPENSSL_timeval now) const { return MicrosecondsRemaining(now) == 0; } bool DTLSTimer::IsSet() const { return expire_time_.tv_sec != 0 || expire_time_.tv_usec != 0; } uint64_t DTLSTimer::MicrosecondsRemaining(OPENSSL_timeval now) const { if (!IsSet()) { return kNever; } if (now.tv_sec > expire_time_.tv_sec || (now.tv_sec == expire_time_.tv_sec && now.tv_usec >= expire_time_.tv_usec)) { return 0; } uint64_t sec = expire_time_.tv_sec - now.tv_sec; uint32_t usec; if (expire_time_.tv_usec >= now.tv_usec) { usec = expire_time_.tv_usec - now.tv_usec; } else { sec--; usec = expire_time_.tv_usec + 1000000 - now.tv_usec; } // If remaining time is less than 15 ms, return 0 to prevent issues because of // small divergences with socket timeouts. if (sec == 0 && usec < 15000) { return 0; } if (sec > UINT64_MAX / 1000000) { return kNever; } sec *= 1000000; if (sec > UINT64_MAX - usec) { return kNever; } return sec + usec; } void dtls1_stop_timer(SSL *ssl) { ssl->d1->num_timeouts = 0; ssl->d1->retransmit_timer.Stop(); ssl->d1->timeout_duration_ms = ssl->initial_timeout_duration_ms; } BSSL_NAMESPACE_END using namespace bssl; void DTLSv1_set_initial_timeout_duration(SSL *ssl, uint32_t duration_ms) { ssl->initial_timeout_duration_ms = duration_ms; } int DTLSv1_get_timeout(const SSL *ssl, struct timeval *out) { if (!SSL_is_dtls(ssl)) { return 0; } OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); uint64_t remaining_usec = ssl->d1->retransmit_timer.MicrosecondsRemaining(now); remaining_usec = std::min(remaining_usec, ssl->d1->ack_timer.MicrosecondsRemaining(now)); if (remaining_usec == DTLSTimer::kNever) { return 0; // No timeout is set. } uint64_t remaining_sec = remaining_usec / 1000000; remaining_usec %= 1000000; // |timeval| uses |time_t|, which may be 32-bit. const auto kTvSecMax = std::numeric_limitstv_sec)>::max(); if (remaining_sec > static_cast(kTvSecMax)) { out->tv_sec = kTvSecMax; // Saturate the output. out->tv_usec = 999999; } else { out->tv_sec = static_casttv_sec)>(remaining_sec); } out->tv_usec = remaining_usec; return 1; } int DTLSv1_handle_timeout(SSL *ssl) { ssl_reset_error_state(ssl); if (!SSL_is_dtls(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } if (!ssl->d1->ack_timer.IsSet() && !ssl->d1->retransmit_timer.IsSet()) { // No timers are running. Don't bother querying the clock. return 0; } OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); bool any_timer_expired = false; if (ssl->d1->ack_timer.IsExpired(now)) { any_timer_expired = true; ssl->d1->sending_ack = true; ssl->d1->ack_timer.Stop(); } if (ssl->d1->retransmit_timer.IsExpired(now)) { any_timer_expired = true; ssl->d1->sending_flight = true; ssl->d1->retransmit_timer.Stop(); ssl->d1->num_timeouts++; // Reduce MTU after 2 unsuccessful retransmissions. if (ssl->d1->num_timeouts > DTLS1_MTU_TIMEOUTS && !(SSL_get_options(ssl) & SSL_OP_NO_QUERY_MTU)) { long mtu = BIO_ctrl(ssl->wbio.get(), BIO_CTRL_DGRAM_GET_FALLBACK_MTU, 0, nullptr); if (mtu >= 0 && mtu <= (1 << 30) && (unsigned)mtu >= dtls1_min_mtu()) { ssl->d1->mtu = (unsigned)mtu; } } } if (!any_timer_expired) { return 0; } return dtls1_flush(ssl); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/d1_pkt.cc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN ssl_open_record_t dtls1_process_ack(SSL *ssl, uint8_t *out_alert, DTLSRecordNumber ack_record_number, Span data) { // As a DTLS-1.3-capable client, it is possible to receive an ACK before we // receive ServerHello and learned the server picked DTLS 1.3. Thus, tolerate // but ignore ACKs before the version is set. if (!ssl_has_final_version(ssl)) { return ssl_open_record_discard; } // ACKs are only allowed in DTLS 1.3. Reject them if we've negotiated a // version and it's not 1.3. if (ssl_protocol_version(ssl) < TLS1_3_VERSION) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } CBS cbs = data, record_numbers; if (!CBS_get_u16_length_prefixed(&cbs, &record_numbers) || CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return ssl_open_record_error; } while (CBS_len(&record_numbers) != 0) { uint64_t epoch, seq; if (!CBS_get_u64(&record_numbers, &epoch) || !CBS_get_u64(&record_numbers, &seq)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return ssl_open_record_error; } // During the handshake, records must be ACKed at the same or higher epoch. // See https://www.rfc-editor.org/errata/eid8108. Additionally, if the // record does not fit in DTLSRecordNumber, it is definitely not a record // number that we sent. if ((ack_record_number.epoch() < ssl_encryption_application && epoch > ack_record_number.epoch()) || epoch > UINT16_MAX || seq > DTLSRecordNumber::kMaxSequence) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return ssl_open_record_error; } // Find the sent record that matches this ACK. DTLSRecordNumber number(static_cast(epoch), seq); DTLSSentRecord *sent_record = nullptr; if (ssl->d1->sent_records != nullptr) { for (size_t i = 0; i < ssl->d1->sent_records->size(); i++) { if ((*ssl->d1->sent_records)[i].number == number) { sent_record = &(*ssl->d1->sent_records)[i]; break; } } } if (sent_record == nullptr) { // We may have sent this record and forgotten it, so this is not an error. continue; } // Mark each message as ACKed. if (sent_record->first_msg == sent_record->last_msg) { ssl->d1->outgoing_messages[sent_record->first_msg].acked.MarkRange( sent_record->first_msg_start, sent_record->last_msg_end); } else { ssl->d1->outgoing_messages[sent_record->first_msg].acked.MarkRange( sent_record->first_msg_start, SIZE_MAX); for (size_t i = size_t{sent_record->first_msg} + 1; i < sent_record->last_msg; i++) { ssl->d1->outgoing_messages[i].acked.MarkRange(0, SIZE_MAX); } if (sent_record->last_msg_end != 0) { ssl->d1->outgoing_messages[sent_record->last_msg].acked.MarkRange( 0, sent_record->last_msg_end); } } // Clear the state so we don't bother re-marking the messages next time. sent_record->first_msg = 0; sent_record->first_msg_start = 0; sent_record->last_msg = 0; sent_record->last_msg_end = 0; } // If the outgoing flight is now fully ACKed, we are done retransmitting. if (std::all_of(ssl->d1->outgoing_messages.begin(), ssl->d1->outgoing_messages.end(), [](const auto &msg) { return msg.IsFullyAcked(); })) { dtls1_stop_timer(ssl); dtls_clear_outgoing_messages(ssl); // DTLS 1.3 defers the key update to when the message is ACKed. if (ssl->s3->key_update_pending) { if (!tls13_rotate_traffic_key(ssl, evp_aead_seal)) { return ssl_open_record_error; } ssl->s3->key_update_pending = false; } // Check for deferred messages. if (ssl->d1->queued_key_update != QueuedKeyUpdate::kNone) { int request_type = ssl->d1->queued_key_update == QueuedKeyUpdate::kUpdateRequested ? SSL_KEY_UPDATE_REQUESTED : SSL_KEY_UPDATE_NOT_REQUESTED; ssl->d1->queued_key_update = QueuedKeyUpdate::kNone; if (!tls13_add_key_update(ssl, request_type)) { return ssl_open_record_error; } } } else { // We may still be able to drop unused write epochs. dtls_clear_unused_write_epochs(ssl); // TODO(crbug.com/42290594): Schedule a retransmit. The peer will have // waited before sending the ACK, so a partial ACK suggests packet loss. } ssl_do_msg_callback(ssl, /*is_write=*/0, SSL3_RT_ACK, data); return ssl_open_record_discard; } ssl_open_record_t dtls1_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in) { assert(!SSL_in_init(ssl)); uint8_t type; DTLSRecordNumber record_number; Span record; auto ret = dtls_open_record(ssl, &type, &record_number, &record, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } if (type == SSL3_RT_HANDSHAKE) { // Process handshake fragments for DTLS 1.3 post-handshake messages. if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (!dtls1_process_handshake_fragments(ssl, out_alert, record_number, record)) { return ssl_open_record_error; } return ssl_open_record_discard; } // Parse the first fragment header to determine if this is a pre-CCS or // post-CCS handshake record. DTLS resets handshake message numbers on each // handshake, so renegotiations and retransmissions are ambiguous. // // TODO(crbug.com/42290594): Move this logic into // |dtls1_process_handshake_fragments| and integrate it into DTLS 1.3 // retransmit conditions. CBS cbs, body; struct hm_header_st msg_hdr; CBS_init(&cbs, record.data(), record.size()); if (!dtls1_parse_fragment(&cbs, &msg_hdr, &body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HANDSHAKE_RECORD); *out_alert = SSL_AD_DECODE_ERROR; return ssl_open_record_error; } if (msg_hdr.type == SSL3_MT_FINISHED && msg_hdr.seq == ssl->d1->handshake_read_seq - 1) { if (!ssl->d1->sending_flight && msg_hdr.frag_off == 0) { // Retransmit our last flight of messages. If the peer sends the second // Finished, they may not have received ours. Only do this for the // first fragment, in case the Finished was fragmented. // // This is not really a timeout, but increment the timeout count so we // eventually give up. ssl->d1->num_timeouts++; ssl->d1->sending_flight = true; } return ssl_open_record_discard; } // Otherwise, this is a pre-CCS handshake message from an unsupported // renegotiation attempt. Fall through to the error path. } if (type == SSL3_RT_ACK) { return dtls1_process_ack(ssl, out_alert, record_number, record); } if (type != SSL3_RT_APPLICATION_DATA) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } if (record.empty()) { return ssl_open_record_discard; } *out = record; return ssl_open_record_success; } int dtls1_write_app_data(SSL *ssl, bool *out_needs_handshake, size_t *out_bytes_written, Span in) { assert(!SSL_in_init(ssl)); *out_needs_handshake = false; if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } // DTLS does not split the input across records. if (in.size() > SSL3_RT_MAX_PLAIN_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_DTLS_MESSAGE_TOO_BIG); return -1; } if (in.empty()) { *out_bytes_written = 0; return 1; } // TODO(crbug.com/381113363): Use the 0-RTT epoch if writing 0-RTT. int ret = dtls1_write_record(ssl, SSL3_RT_APPLICATION_DATA, in, ssl->d1->write_epoch.epoch()); if (ret <= 0) { return ret; } *out_bytes_written = in.size(); return 1; } int dtls1_write_record(SSL *ssl, int type, Span in, uint16_t epoch) { SSLBuffer *buf = &ssl->s3->write_buffer; assert(in.size() <= SSL3_RT_MAX_PLAIN_LENGTH); // There should never be a pending write buffer in DTLS. One can't write half // a datagram, so the write buffer is always dropped in // |ssl_write_buffer_flush|. assert(buf->empty()); if (in.size() > SSL3_RT_MAX_PLAIN_LENGTH) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } DTLSRecordNumber record_number; size_t ciphertext_len; if (!buf->EnsureCap(dtls_seal_prefix_len(ssl, epoch), in.size() + SSL_max_seal_overhead(ssl)) || !dtls_seal_record(ssl, &record_number, buf->remaining().data(), &ciphertext_len, buf->remaining().size(), type, in.data(), in.size(), epoch)) { buf->Clear(); return -1; } buf->DidWrite(ciphertext_len); int ret = ssl_write_buffer_flush(ssl); if (ret <= 0) { return ret; } return 1; } int dtls1_dispatch_alert(SSL *ssl) { int ret = dtls1_write_record(ssl, SSL3_RT_ALERT, ssl->s3->send_alert, ssl->d1->write_epoch.epoch()); if (ret <= 0) { return ret; } ssl->s3->alert_dispatch = false; // If the alert is fatal, flush the BIO now. if (ssl->s3->send_alert[0] == SSL3_AL_FATAL) { BIO_flush(ssl->wbio.get()); } ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert); int alert = (ssl->s3->send_alert[0] << 8) | ssl->s3->send_alert[1]; ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, alert); return 1; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/d1_srtp.cc ================================================ /* * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ /* DTLS code by Eric Rescorla Copyright (C) 2006, Network Resonance, Inc. Copyright (C) 2011, RTFM, Inc. */ #include #include #include #include #include "internal.h" using namespace bssl; static const SRTP_PROTECTION_PROFILE kSRTPProfiles[] = { {"SRTP_AES128_CM_SHA1_80", SRTP_AES128_CM_SHA1_80}, {"SRTP_AES128_CM_SHA1_32", SRTP_AES128_CM_SHA1_32}, {"SRTP_AEAD_AES_128_GCM", SRTP_AEAD_AES_128_GCM}, {"SRTP_AEAD_AES_256_GCM", SRTP_AEAD_AES_256_GCM}, {0, 0}, }; static int find_profile_by_name(const char *profile_name, const SRTP_PROTECTION_PROFILE **pptr, size_t len) { const SRTP_PROTECTION_PROFILE *p = kSRTPProfiles; while (p->name) { if (len == strlen(p->name) && !strncmp(p->name, profile_name, len)) { *pptr = p; return 1; } p++; } return 0; } static int ssl_ctx_make_profiles( const char *profiles_string, UniquePtr *out) { UniquePtr profiles( sk_SRTP_PROTECTION_PROFILE_new_null()); if (profiles == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES); return 0; } const char *col; const char *ptr = profiles_string; do { col = strchr(ptr, ':'); const SRTP_PROTECTION_PROFILE *profile; if (!find_profile_by_name(ptr, &profile, col ? (size_t)(col - ptr) : strlen(ptr))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE); return 0; } if (!sk_SRTP_PROTECTION_PROFILE_push(profiles.get(), profile)) { return 0; } if (col) { ptr = col + 1; } } while (col); *out = std::move(profiles); return 1; } int SSL_CTX_set_srtp_profiles(SSL_CTX *ctx, const char *profiles) { return ssl_ctx_make_profiles(profiles, &ctx->srtp_profiles); } int SSL_set_srtp_profiles(SSL *ssl, const char *profiles) { return ssl->config != nullptr && ssl_ctx_make_profiles(profiles, &ssl->config->srtp_profiles); } const STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles(const SSL *ssl) { if (ssl == nullptr) { return nullptr; } if (ssl->config == nullptr) { assert(0); return nullptr; } return ssl->config->srtp_profiles != nullptr ? ssl->config->srtp_profiles.get() : ssl->ctx->srtp_profiles.get(); } const SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *ssl) { return ssl->s3->srtp_profile; } int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles) { // This API inverts its return value. return !SSL_CTX_set_srtp_profiles(ctx, profiles); } int SSL_set_tlsext_use_srtp(SSL *ssl, const char *profiles) { // This API inverts its return value. return !SSL_set_srtp_profiles(ssl, profiles); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/dtls_method.cc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../crypto/internal.h" #include "internal.h" using namespace bssl; static void dtls1_on_handshake_complete(SSL *ssl) { if (ssl_protocol_version(ssl) <= TLS1_2_VERSION) { // Stop the reply timer left by the last flight we sent. In DTLS 1.2, the // retransmission timer ends when the handshake completes. If we sent the // final flight, we may still need to retransmit it, but that is driven by // messages from the peer. dtls1_stop_timer(ssl); // If the final flight had a reply, we know the peer has received it. If // not, we must leave the flight around for post-handshake retransmission. if (ssl->d1->flight_has_reply) { dtls_clear_outgoing_messages(ssl); } } } static bool next_epoch(const SSL *ssl, uint16_t *out, ssl_encryption_level_t level, uint16_t prev) { switch (level) { case ssl_encryption_initial: case ssl_encryption_early_data: case ssl_encryption_handshake: *out = static_cast(level); return true; case ssl_encryption_application: if (prev < ssl_encryption_application && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { *out = static_cast(level); return true; } if (prev == 0xffff) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_KEY_UPDATES); return false; } *out = prev + 1; return true; } assert(0); return false; } static bool dtls1_set_read_state(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret) { // Cipher changes are forbidden if the current epoch has leftover data. if (dtls_has_unprocessed_handshake_data(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return false; } DTLSReadEpoch new_epoch; new_epoch.aead = std::move(aead_ctx); if (!next_epoch(ssl, &new_epoch.epoch, level, ssl->d1->read_epoch.epoch)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return false; } if (ssl_protocol_version(ssl) > TLS1_2_VERSION) { new_epoch.rn_encrypter = RecordNumberEncrypter::Create(new_epoch.aead->cipher(), traffic_secret); if (new_epoch.rn_encrypter == nullptr) { return false; } // In DTLS 1.3, new read epochs are not applied immediately. In principle, // we could do the same in DTLS 1.2, but we would ignore every record from // the previous epoch anyway. assert(ssl->d1->next_read_epoch == nullptr); ssl->d1->next_read_epoch = MakeUnique(std::move(new_epoch)); if (ssl->d1->next_read_epoch == nullptr) { return false; } } else { ssl->d1->read_epoch = std::move(new_epoch); ssl->d1->has_change_cipher_spec = false; } return true; } static bool dtls1_set_write_state(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret) { uint16_t epoch; if (!next_epoch(ssl, &epoch, level, ssl->d1->write_epoch.epoch())) { return false; } DTLSWriteEpoch new_epoch; new_epoch.aead = std::move(aead_ctx); new_epoch.next_record = DTLSRecordNumber(epoch, 0); if (ssl_protocol_version(ssl) > TLS1_2_VERSION) { new_epoch.rn_encrypter = RecordNumberEncrypter::Create(new_epoch.aead->cipher(), traffic_secret); if (new_epoch.rn_encrypter == nullptr) { return false; } } auto current = MakeUnique(std::move(ssl->d1->write_epoch)); if (current == nullptr) { return false; } ssl->d1->write_epoch = std::move(new_epoch); ssl->d1->extra_write_epochs.PushBack(std::move(current)); dtls_clear_unused_write_epochs(ssl); return true; } static const SSL_PROTOCOL_METHOD kDTLSProtocolMethod = { true /* is_dtls */, dtls1_new, dtls1_free, dtls1_get_message, dtls1_next_message, dtls_has_unprocessed_handshake_data, dtls1_open_handshake, dtls1_open_change_cipher_spec, dtls1_open_app_data, dtls1_write_app_data, dtls1_dispatch_alert, dtls1_init_message, dtls1_finish_message, dtls1_add_message, dtls1_add_change_cipher_spec, dtls1_finish_flight, dtls1_schedule_ack, dtls1_flush, dtls1_on_handshake_complete, dtls1_set_read_state, dtls1_set_write_state, }; const SSL_METHOD *DTLS_method(void) { static const SSL_METHOD kMethod = { 0, &kDTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } const SSL_METHOD *DTLS_with_buffers_method(void) { static const SSL_METHOD kMethod = { 0, &kDTLSProtocolMethod, &ssl_noop_x509_method, }; return &kMethod; } // Legacy version-locked methods. const SSL_METHOD *DTLSv1_2_method(void) { static const SSL_METHOD kMethod = { DTLS1_2_VERSION, &kDTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } const SSL_METHOD *DTLSv1_method(void) { static const SSL_METHOD kMethod = { DTLS1_VERSION, &kDTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } // Legacy side-specific methods. const SSL_METHOD *DTLSv1_2_server_method(void) { return DTLSv1_2_method(); } const SSL_METHOD *DTLSv1_server_method(void) { return DTLSv1_method(); } const SSL_METHOD *DTLSv1_2_client_method(void) { return DTLSv1_2_method(); } const SSL_METHOD *DTLSv1_client_method(void) { return DTLSv1_method(); } const SSL_METHOD *DTLS_server_method(void) { return DTLS_method(); } const SSL_METHOD *DTLS_client_method(void) { return DTLS_method(); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/dtls_record.cc ================================================ /* * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN bool DTLSReplayBitmap::ShouldDiscard(uint64_t seq_num) const { const size_t kWindowSize = map_.size(); if (seq_num > max_seq_num_) { return false; } uint64_t idx = max_seq_num_ - seq_num; return idx >= kWindowSize || map_[idx]; } void DTLSReplayBitmap::Record(uint64_t seq_num) { const size_t kWindowSize = map_.size(); // Shift the window if necessary. if (seq_num > max_seq_num_) { uint64_t shift = seq_num - max_seq_num_; if (shift >= kWindowSize) { map_.reset(); } else { map_ <<= shift; } max_seq_num_ = seq_num; } uint64_t idx = max_seq_num_ - seq_num; if (idx < kWindowSize) { map_[idx] = true; } } static uint16_t dtls_record_version(const SSL *ssl) { if (ssl->s3->version == 0) { // Before the version is determined, outgoing records use dTLS 1.0 for // historical compatibility requirements. return DTLS1_VERSION; } // DTLS 1.3 freezes the record version at DTLS 1.2. Previous ones use the // version itself. return ssl_protocol_version(ssl) >= TLS1_3_VERSION ? DTLS1_2_VERSION : ssl->s3->version; } static uint64_t dtls_aead_sequence(const SSL *ssl, DTLSRecordNumber num) { // DTLS 1.3 uses the sequence number with the AEAD, while DTLS 1.2 uses the // combined value. If the version is not known, the epoch is unencrypted and // the value is ignored. return (ssl->s3->version != 0 && ssl_protocol_version(ssl) >= TLS1_3_VERSION) ? num.sequence() : num.combined(); } // reconstruct_epoch finds the largest epoch that ends with the epoch bits from // |wire_epoch| that is less than or equal to |current_epoch|, to match the // epoch reconstruction algorithm described in RFC 9147 section 4.2.2. static uint16_t reconstruct_epoch(uint8_t wire_epoch, uint16_t current_epoch) { uint16_t current_epoch_high = current_epoch & 0xfffc; uint16_t epoch = (wire_epoch & 0x3) | current_epoch_high; if (epoch > current_epoch && current_epoch_high > 0) { epoch -= 0x4; } return epoch; } uint64_t reconstruct_seqnum(uint16_t wire_seq, uint64_t seq_mask, uint64_t max_valid_seqnum) { // Although DTLS 1.3 can support sequence numbers up to 2^64-1, we continue to // enforce the DTLS 1.2 2^48-1 limit. With a minimal DTLS 1.3 record header (2 // bytes), no payload, and 16 byte AEAD overhead, sending 2^48 records would // require 5 petabytes. This allows us to continue to pack a DTLS record // number into an 8-byte structure. assert(max_valid_seqnum <= DTLSRecordNumber::kMaxSequence); assert(seq_mask == 0xff || seq_mask == 0xffff); uint64_t max_seqnum_plus_one = max_valid_seqnum + 1; uint64_t diff = (wire_seq - max_seqnum_plus_one) & seq_mask; uint64_t step = seq_mask + 1; // This addition cannot overflow. It is at most 2^48 + seq_mask. It, however, // may exceed 2^48-1. uint64_t seqnum = max_seqnum_plus_one + diff; bool too_large = seqnum > DTLSRecordNumber::kMaxSequence; // If the diff is larger than half the step size, then the closest seqnum // to max_seqnum_plus_one (in Z_{2^64}) is seqnum minus step instead of // seqnum. bool closer_is_less = diff > step / 2; // Subtracting step from seqnum will cause underflow if seqnum is too small. bool would_underflow = seqnum < step; if (too_large || (closer_is_less && !would_underflow)) { seqnum -= step; } assert(seqnum <= DTLSRecordNumber::kMaxSequence); return seqnum; } static Span cbs_to_writable_bytes(CBS cbs) { return Span(const_cast(CBS_data(&cbs)), CBS_len(&cbs)); } struct ParsedDTLSRecord { // read_epoch will be null if the record is for an unrecognized epoch. In that // case, |number| may be unset. DTLSReadEpoch *read_epoch = nullptr; DTLSRecordNumber number; CBS header, body; uint8_t type = 0; uint16_t version = 0; }; static bool use_dtls13_record_header(const SSL *ssl, uint16_t epoch) { // Plaintext records in DTLS 1.3 also use the DTLSPlaintext structure for // backwards compatibility. return ssl->s3->version != 0 && ssl_protocol_version(ssl) > TLS1_2_VERSION && epoch > 0; } static bool parse_dtls13_record(SSL *ssl, CBS *in, ParsedDTLSRecord *out) { if (out->type & 0x10) { // Connection ID bit set, which we didn't negotiate. return false; } uint16_t max_epoch = ssl->d1->read_epoch.epoch; if (ssl->d1->next_read_epoch != nullptr) { max_epoch = std::max(max_epoch, ssl->d1->next_read_epoch->epoch); } uint16_t epoch = reconstruct_epoch(out->type, max_epoch); size_t seq_len = (out->type & 0x08) ? 2 : 1; CBS seq_bytes; if (!CBS_get_bytes(in, &seq_bytes, seq_len)) { return false; } if (out->type & 0x04) { // 16-bit length present if (!CBS_get_u16_length_prefixed(in, &out->body)) { return false; } } else { // No length present - the remaining contents are the whole packet. // CBS_get_bytes is used here to advance |in| to the end so that future // code that computes the number of consumed bytes functions correctly. BSSL_CHECK(CBS_get_bytes(in, &out->body, CBS_len(in))); } // Drop the previous read epoch if expired. if (ssl->d1->prev_read_epoch != nullptr && ssl_ctx_get_current_time(ssl->ctx.get()).tv_sec > ssl->d1->prev_read_epoch->expire) { ssl->d1->prev_read_epoch = nullptr; } // Look up the corresponding epoch. This header form only matches encrypted // DTLS 1.3 epochs. DTLSReadEpoch *read_epoch = nullptr; if (epoch == ssl->d1->read_epoch.epoch) { read_epoch = &ssl->d1->read_epoch; } else if (ssl->d1->next_read_epoch != nullptr && epoch == ssl->d1->next_read_epoch->epoch) { read_epoch = ssl->d1->next_read_epoch.get(); } else if (ssl->d1->prev_read_epoch != nullptr && epoch == ssl->d1->prev_read_epoch->epoch.epoch) { read_epoch = &ssl->d1->prev_read_epoch->epoch; } if (read_epoch != nullptr && use_dtls13_record_header(ssl, epoch)) { out->read_epoch = read_epoch; // Decrypt and reconstruct the sequence number: uint8_t mask[2]; if (!read_epoch->rn_encrypter->GenerateMask(mask, out->body)) { // GenerateMask most likely failed because the record body was not long // enough. return false; } // Apply the mask to the sequence number in-place. The header (with the // decrypted sequence number bytes) is used as the additional data for the // AEAD function. auto writable_seq = cbs_to_writable_bytes(seq_bytes); uint64_t seq = 0; for (size_t i = 0; i < writable_seq.size(); i++) { writable_seq[i] ^= mask[i]; seq = (seq << 8) | writable_seq[i]; } uint64_t full_seq = reconstruct_seqnum(seq, (1 << (seq_len * 8)) - 1, read_epoch->bitmap.max_seq_num()); out->number = DTLSRecordNumber(epoch, full_seq); } return true; } static bool parse_dtls12_record(SSL *ssl, CBS *in, ParsedDTLSRecord *out) { uint64_t epoch_and_seq; if (!CBS_get_u16(in, &out->version) || // !CBS_get_u64(in, &epoch_and_seq) || !CBS_get_u16_length_prefixed(in, &out->body)) { return false; } out->number = DTLSRecordNumber::FromCombined(epoch_and_seq); uint16_t epoch = out->number.epoch(); bool version_ok; if (epoch == 0) { // Only check the first byte. Enforcing beyond that can prevent decoding // version negotiation failure alerts. version_ok = (out->version >> 8) == DTLS1_VERSION_MAJOR; } else { version_ok = out->version == dtls_record_version(ssl); } if (!version_ok) { return false; } // Look up the corresponding epoch. In DTLS 1.2, we only need to consider one // epoch. if (epoch == ssl->d1->read_epoch.epoch && !use_dtls13_record_header(ssl, epoch)) { out->read_epoch = &ssl->d1->read_epoch; } return true; } static bool parse_dtls_record(SSL *ssl, CBS *cbs, ParsedDTLSRecord *out) { CBS copy = *cbs; if (!CBS_get_u8(cbs, &out->type)) { return false; } bool ok; if ((out->type & 0xe0) == 0x20) { ok = parse_dtls13_record(ssl, cbs, out); } else { ok = parse_dtls12_record(ssl, cbs, out); } if (!ok) { return false; } if (CBS_len(&out->body) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { return false; } size_t header_len = CBS_data(&out->body) - CBS_data(©); BSSL_CHECK(CBS_get_bytes(©, &out->header, header_len)); return true; } enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, DTLSRecordNumber *out_number, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; if (ssl->s3->read_shutdown == ssl_shutdown_close_notify) { return ssl_open_record_close_notify; } if (in.empty()) { return ssl_open_record_partial; } CBS cbs(in); ParsedDTLSRecord record; if (!parse_dtls_record(ssl, &cbs, &record)) { // The record header was incomplete or malformed. Drop the entire packet. *out_consumed = in.size(); return ssl_open_record_discard; } ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, record.header); if (record.read_epoch == nullptr || record.read_epoch->bitmap.ShouldDiscard(record.number.sequence())) { // Drop this record. It's from an unknown epoch or is a replay. Note that if // the record is from next epoch, it could be buffered for later. For // simplicity, drop it and expect retransmit to handle it later; DTLS must // handle packet loss anyway. *out_consumed = in.size() - CBS_len(&cbs); return ssl_open_record_discard; } // Decrypt the body in-place. if (!record.read_epoch->aead->Open(out, record.type, record.version, dtls_aead_sequence(ssl, record.number), record.header, cbs_to_writable_bytes(record.body))) { // Bad packets are silently dropped in DTLS. See section 4.2.1 of RFC 6347. // Clear the error queue of any errors decryption may have added. Drop the // entire packet as it must not have come from the peer. // // TODO(davidben): This doesn't distinguish malloc failures from encryption // failures. ERR_clear_error(); *out_consumed = in.size() - CBS_len(&cbs); return ssl_open_record_discard; } *out_consumed = in.size() - CBS_len(&cbs); // DTLS 1.3 hides the record type inside the encrypted data. bool has_padding = !record.read_epoch->aead->is_null_cipher() && ssl_protocol_version(ssl) >= TLS1_3_VERSION; // Check the plaintext length. size_t plaintext_limit = SSL3_RT_MAX_PLAIN_LENGTH + (has_padding ? 1 : 0); if (out->size() > plaintext_limit) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); *out_alert = SSL_AD_RECORD_OVERFLOW; return ssl_open_record_error; } if (has_padding) { do { if (out->empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); *out_alert = SSL_AD_DECRYPT_ERROR; return ssl_open_record_error; } record.type = out->back(); *out = out->subspan(0, out->size() - 1); } while (record.type == 0); } record.read_epoch->bitmap.Record(record.number.sequence()); // Once we receive a record from the next epoch in DTLS 1.3, it becomes the // current epoch. Also save the previous epoch. This allows us to handle // packet reordering on KeyUpdate, as well as ACK retransmissions of the // Finished flight. if (record.read_epoch == ssl->d1->next_read_epoch.get()) { assert(ssl_protocol_version(ssl) >= TLS1_3_VERSION); auto prev = MakeUnique(); if (prev == nullptr) { *out_alert = SSL_AD_INTERNAL_ERROR; return ssl_open_record_error; } // Release the epoch after a timeout. prev->expire = ssl_ctx_get_current_time(ssl->ctx.get()).tv_sec; if (prev->expire >= UINT64_MAX - DTLS_PREV_READ_EPOCH_EXPIRE_SECONDS) { prev->expire = UINT64_MAX; // Saturate on overflow. } else { prev->expire += DTLS_PREV_READ_EPOCH_EXPIRE_SECONDS; } prev->epoch = std::move(ssl->d1->read_epoch); ssl->d1->prev_read_epoch = std::move(prev); ssl->d1->read_epoch = std::move(*ssl->d1->next_read_epoch); ssl->d1->next_read_epoch = nullptr; } // TODO(davidben): Limit the number of empty records as in TLS? This is only // useful if we also limit discarded packets. if (record.type == SSL3_RT_ALERT) { return ssl_process_alert(ssl, out_alert, *out); } // Reject application data in epochs that do not allow it. if (record.type == SSL3_RT_APPLICATION_DATA) { bool app_data_allowed; if (ssl->s3->version != 0 && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { // Application data is allowed in 0-RTT (epoch 1) and after the handshake // (3 and up). app_data_allowed = record.number.epoch() == 1 || record.number.epoch() >= 3; } else { // Application data is allowed starting epoch 1. app_data_allowed = record.number.epoch() >= 1; } if (!app_data_allowed) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } } ssl->s3->warning_alert_count = 0; *out_type = record.type; *out_number = record.number; return ssl_open_record_success; } static DTLSWriteEpoch *get_write_epoch(const SSL *ssl, uint16_t epoch) { if (ssl->d1->write_epoch.epoch() == epoch) { return &ssl->d1->write_epoch; } for (const auto &e : ssl->d1->extra_write_epochs) { if (e->epoch() == epoch) { return e.get(); } } return nullptr; } size_t dtls_record_header_write_len(const SSL *ssl, uint16_t epoch) { if (!use_dtls13_record_header(ssl, epoch)) { return DTLS_PLAINTEXT_RECORD_HEADER_LENGTH; } // The DTLS 1.3 has a variable length record header. We never send Connection // ID, we always send 16-bit sequence numbers, and we send a length. (Length // can be omitted, but only for the last record of a packet. Since we send // multiple records in one packet, it's easier to implement always sending the // length.) return DTLS1_3_RECORD_HEADER_WRITE_LENGTH; } size_t dtls_max_seal_overhead(const SSL *ssl, uint16_t epoch) { DTLSWriteEpoch *write_epoch = get_write_epoch(ssl, epoch); if (write_epoch == nullptr) { return 0; } size_t ret = dtls_record_header_write_len(ssl, epoch) + write_epoch->aead->MaxOverhead(); if (use_dtls13_record_header(ssl, epoch)) { // Add 1 byte for the encrypted record type. ret++; } return ret; } size_t dtls_seal_prefix_len(const SSL *ssl, uint16_t epoch) { DTLSWriteEpoch *write_epoch = get_write_epoch(ssl, epoch); if (write_epoch == nullptr) { return 0; } return dtls_record_header_write_len(ssl, epoch) + write_epoch->aead->ExplicitNonceLen(); } size_t dtls_seal_max_input_len(const SSL *ssl, uint16_t epoch, size_t max_out) { DTLSWriteEpoch *write_epoch = get_write_epoch(ssl, epoch); if (write_epoch == nullptr) { return 0; } size_t header_len = dtls_record_header_write_len(ssl, epoch); if (max_out <= header_len) { return 0; } max_out -= header_len; max_out = write_epoch->aead->MaxSealInputLen(max_out); if (max_out > 0 && use_dtls13_record_header(ssl, epoch)) { // Remove 1 byte for the encrypted record type. max_out--; } return max_out; } bool dtls_seal_record(SSL *ssl, DTLSRecordNumber *out_number, uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, const uint8_t *in, size_t in_len, uint16_t epoch) { const size_t prefix = dtls_seal_prefix_len(ssl, epoch); if (buffers_alias(in, in_len, out, max_out) && (max_out < prefix || out + prefix != in)) { OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); return false; } // Determine the parameters for the current epoch. DTLSWriteEpoch *write_epoch = get_write_epoch(ssl, epoch); if (write_epoch == nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } const size_t record_header_len = dtls_record_header_write_len(ssl, epoch); // Ensure the sequence number update does not overflow. DTLSRecordNumber record_number = write_epoch->next_record; if (!record_number.HasNext()) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } bool dtls13_header = use_dtls13_record_header(ssl, epoch); uint8_t *extra_in = NULL; size_t extra_in_len = 0; if (dtls13_header) { extra_in = &type; extra_in_len = 1; } size_t ciphertext_len; if (!write_epoch->aead->CiphertextLen(&ciphertext_len, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return false; } if (max_out < record_header_len + ciphertext_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); return false; } uint16_t record_version = dtls_record_version(ssl); if (dtls13_header) { // The first byte of the DTLS 1.3 record header has the following format: // 0 1 2 3 4 5 6 7 // +-+-+-+-+-+-+-+-+ // |0|0|1|C|S|L|E E| // +-+-+-+-+-+-+-+-+ // // We set C=0 (no Connection ID), S=1 (16-bit sequence number), L=1 (length // is present), which is a mask of 0x2c. The E E bits are the low-order two // bits of the epoch. // // +-+-+-+-+-+-+-+-+ // |0|0|1|0|1|1|E E| // +-+-+-+-+-+-+-+-+ out[0] = 0x2c | (epoch & 0x3); // We always use a two-byte sequence number. A one-byte sequence number // would require coordinating with the application on ACK feedback to know // that the peer is not too far behind. CRYPTO_store_u16_be(out + 1, write_epoch->next_record.sequence()); // TODO(crbug.com/42290594): When we know the record is last in the packet, // omit the length. CRYPTO_store_u16_be(out + 3, ciphertext_len); } else { out[0] = type; CRYPTO_store_u16_be(out + 1, record_version); CRYPTO_store_u64_be(out + 3, record_number.combined()); CRYPTO_store_u16_be(out + 11, ciphertext_len); } Span header(out, record_header_len); if (!write_epoch->aead->SealScatter( out + record_header_len, out + prefix, out + prefix + in_len, type, record_version, dtls_aead_sequence(ssl, record_number), header, in, in_len, extra_in, extra_in_len)) { return false; } // Perform record number encryption (RFC 9147 section 4.2.3). if (dtls13_header) { // Record number encryption uses bytes from the ciphertext as a sample to // generate the mask used for encryption. For simplicity, pass in the whole // ciphertext as the sample - GenerateRecordNumberMask will read only what // it needs (and error if |sample| is too short). Span sample(out + record_header_len, ciphertext_len); uint8_t mask[2]; if (!write_epoch->rn_encrypter->GenerateMask(mask, sample)) { return false; } out[1] ^= mask[0]; out[2] ^= mask[1]; } *out_number = record_number; write_epoch->next_record = record_number.Next(); *out_len = record_header_len + ciphertext_len; ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, header); return true; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/encrypted_client_hello.cc ================================================ /* Copyright 2021 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "internal.h" BSSL_NAMESPACE_BEGIN // ECH reuses the extension code point for the version number. static constexpr uint16_t kECHConfigVersion = TLSEXT_TYPE_encrypted_client_hello; static const decltype(&EVP_hpke_aes_128_gcm) kSupportedAEADs[] = { &EVP_hpke_aes_128_gcm, &EVP_hpke_aes_256_gcm, &EVP_hpke_chacha20_poly1305, }; static const EVP_HPKE_AEAD *get_ech_aead(uint16_t aead_id) { for (const auto aead_func : kSupportedAEADs) { const EVP_HPKE_AEAD *aead = aead_func(); if (aead_id == EVP_HPKE_AEAD_id(aead)) { return aead; } } return nullptr; } // ssl_client_hello_write_without_extensions serializes |client_hello| into // |out|, omitting the length-prefixed extensions. It serializes individual // fields, starting with |client_hello->version|, and ignores the // |client_hello->client_hello| field. It returns true on success and false on // failure. static bool ssl_client_hello_write_without_extensions( const SSL_CLIENT_HELLO *client_hello, CBB *out) { CBB cbb; if (!CBB_add_u16(out, client_hello->version) || !CBB_add_bytes(out, client_hello->random, client_hello->random_len) || !CBB_add_u8_length_prefixed(out, &cbb) || !CBB_add_bytes(&cbb, client_hello->session_id, client_hello->session_id_len)) { return false; } if (SSL_is_dtls(client_hello->ssl)) { if (!CBB_add_u8_length_prefixed(out, &cbb) || !CBB_add_bytes(&cbb, client_hello->dtls_cookie, client_hello->dtls_cookie_len)) { return false; } } if (!CBB_add_u16_length_prefixed(out, &cbb) || !CBB_add_bytes(&cbb, client_hello->cipher_suites, client_hello->cipher_suites_len) || !CBB_add_u8_length_prefixed(out, &cbb) || !CBB_add_bytes(&cbb, client_hello->compression_methods, client_hello->compression_methods_len) || !CBB_flush(out)) { return false; } return true; } static bool is_valid_client_hello_inner(SSL *ssl, uint8_t *out_alert, Span body) { // See draft-ietf-tls-esni-13, section 7.1. SSL_CLIENT_HELLO client_hello; CBS extension; if (!ssl_client_hello_init(ssl, &client_hello, body) || !ssl_client_hello_get_extension(&client_hello, &extension, TLSEXT_TYPE_encrypted_client_hello) || CBS_len(&extension) != 1 || // CBS_data(&extension)[0] != ECH_CLIENT_INNER || !ssl_client_hello_get_extension(&client_hello, &extension, TLSEXT_TYPE_supported_versions)) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_CLIENT_HELLO_INNER); return false; } // Parse supported_versions and reject TLS versions prior to TLS 1.3. Older // versions are incompatible with ECH. CBS versions; if (!CBS_get_u8_length_prefixed(&extension, &versions) || CBS_len(&extension) != 0 || // CBS_len(&versions) == 0) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } while (CBS_len(&versions) != 0) { uint16_t version; if (!CBS_get_u16(&versions, &version)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (version == SSL3_VERSION || version == TLS1_VERSION || version == TLS1_1_VERSION || version == TLS1_2_VERSION || version == DTLS1_VERSION || version == DTLS1_2_VERSION) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_CLIENT_HELLO_INNER); return false; } } return true; } bool ssl_decode_client_hello_inner( SSL *ssl, uint8_t *out_alert, Array *out_client_hello_inner, Span encoded_client_hello_inner, const SSL_CLIENT_HELLO *client_hello_outer) { SSL_CLIENT_HELLO client_hello_inner; CBS cbs = encoded_client_hello_inner; if (!ssl_parse_client_hello_with_trailing_data(ssl, &cbs, &client_hello_inner)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // The remaining data is padding. uint8_t padding; while (CBS_get_u8(&cbs, &padding)) { if (padding != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } } // TLS 1.3 ClientHellos must have extensions, and EncodedClientHelloInners use // ClientHelloOuter's session_id. if (client_hello_inner.extensions_len == 0 || client_hello_inner.session_id_len != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } client_hello_inner.session_id = client_hello_outer->session_id; client_hello_inner.session_id_len = client_hello_outer->session_id_len; // Begin serializing a message containing the ClientHelloInner in |cbb|. ScopedCBB cbb; CBB body, extensions_cbb; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CLIENT_HELLO) || !ssl_client_hello_write_without_extensions(&client_hello_inner, &body) || !CBB_add_u16_length_prefixed(&body, &extensions_cbb)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } auto inner_extensions = Span(client_hello_inner.extensions, client_hello_inner.extensions_len); CBS ext_list_wrapper; if (!ssl_client_hello_get_extension(&client_hello_inner, &ext_list_wrapper, TLSEXT_TYPE_ech_outer_extensions)) { // No ech_outer_extensions. Copy everything. if (!CBB_add_bytes(&extensions_cbb, inner_extensions.data(), inner_extensions.size())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } else { const size_t offset = CBS_data(&ext_list_wrapper) - inner_extensions.data(); auto inner_extensions_before = inner_extensions.subspan(0, offset - 4 /* extension header */); auto inner_extensions_after = inner_extensions.subspan(offset + CBS_len(&ext_list_wrapper)); if (!CBB_add_bytes(&extensions_cbb, inner_extensions_before.data(), inner_extensions_before.size())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // Expand ech_outer_extensions. See draft-ietf-tls-esni-13, Appendix B. CBS ext_list; if (!CBS_get_u8_length_prefixed(&ext_list_wrapper, &ext_list) || CBS_len(&ext_list) == 0 || CBS_len(&ext_list_wrapper) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } CBS outer_extensions; CBS_init(&outer_extensions, client_hello_outer->extensions, client_hello_outer->extensions_len); while (CBS_len(&ext_list) != 0) { // Find the next extension to copy. uint16_t want; if (!CBS_get_u16(&ext_list, &want)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // The ECH extension itself is not in the AAD and may not be referenced. if (want == TLSEXT_TYPE_encrypted_client_hello) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_OUTER_EXTENSION); return false; } // Seek to |want| in |outer_extensions|. |ext_list| is required to match // ClientHelloOuter in order. uint16_t found; CBS ext_body; do { if (CBS_len(&outer_extensions) == 0) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_OUTER_EXTENSION); return false; } if (!CBS_get_u16(&outer_extensions, &found) || !CBS_get_u16_length_prefixed(&outer_extensions, &ext_body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } } while (found != want); // Copy the extension. if (!CBB_add_u16(&extensions_cbb, found) || !CBB_add_u16(&extensions_cbb, CBS_len(&ext_body)) || !CBB_add_bytes(&extensions_cbb, CBS_data(&ext_body), CBS_len(&ext_body))) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } } if (!CBB_add_bytes(&extensions_cbb, inner_extensions_after.data(), inner_extensions_after.size())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } if (!CBB_flush(&body)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (!is_valid_client_hello_inner(ssl, out_alert, Span(CBB_data(&body), CBB_len(&body)))) { return false; } if (!ssl->method->finish_message(ssl, cbb.get(), out_client_hello_inner)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return true; } bool ssl_client_hello_decrypt(SSL_HANDSHAKE *hs, uint8_t *out_alert, bool *out_is_decrypt_error, Array *out, const SSL_CLIENT_HELLO *client_hello_outer, Span payload) { *out_is_decrypt_error = false; // The ClientHelloOuterAAD is |client_hello_outer| with |payload| (which must // point within |client_hello_outer->extensions|) replaced with zeros. See // draft-ietf-tls-esni-13, section 5.2. Array aad; if (!aad.CopyFrom(Span(client_hello_outer->client_hello, client_hello_outer->client_hello_len))) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } // We assert with |uintptr_t| because the comparison would be UB if they // didn't alias. assert(reinterpret_cast(client_hello_outer->extensions) <= reinterpret_cast(payload.data())); assert(reinterpret_cast(client_hello_outer->extensions + client_hello_outer->extensions_len) >= reinterpret_cast(payload.data() + payload.size())); Span payload_aad = Span(aad).subspan( payload.data() - client_hello_outer->client_hello, payload.size()); OPENSSL_memset(payload_aad.data(), 0, payload_aad.size()); // Decrypt the EncodedClientHelloInner. Array encoded; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) // In fuzzer mode, disable encryption to improve coverage. We reserve a short // input to signal decryption failure, so the fuzzer can explore fallback to // ClientHelloOuter. const uint8_t kBadPayload[] = {0xff}; if (payload == kBadPayload) { *out_alert = SSL_AD_DECRYPT_ERROR; *out_is_decrypt_error = true; OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); return false; } if (!encoded.CopyFrom(payload)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } #else if (!encoded.InitForOverwrite(payload.size())) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } size_t len; if (!EVP_HPKE_CTX_open(hs->ech_hpke_ctx.get(), encoded.data(), &len, encoded.size(), payload.data(), payload.size(), aad.data(), aad.size())) { *out_alert = SSL_AD_DECRYPT_ERROR; *out_is_decrypt_error = true; OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); return false; } encoded.Shrink(len); #endif if (!ssl_decode_client_hello_inner(hs->ssl, out_alert, out, encoded, client_hello_outer)) { return false; } ssl_do_msg_callback(hs->ssl, /*is_write=*/0, SSL3_RT_CLIENT_HELLO_INNER, *out); return true; } static bool is_hex_component(Span in) { if (in.size() < 2 || in[0] != '0' || (in[1] != 'x' && in[1] != 'X')) { return false; } for (uint8_t b : in.subspan(2)) { if (!OPENSSL_isxdigit(b)) { return false; } } return true; } static bool is_decimal_component(Span in) { if (in.empty()) { return false; } for (uint8_t b : in) { if (!('0' <= b && b <= '9')) { return false; } } return true; } bool ssl_is_valid_ech_public_name(Span public_name) { // See draft-ietf-tls-esni-13, Section 4 and RFC 5890, Section 2.3.1. The // public name must be a dot-separated sequence of LDH labels and not begin or // end with a dot. auto remaining = public_name; if (remaining.empty()) { return false; } Span last; while (!remaining.empty()) { // Find the next dot-separated component. auto dot = std::find(remaining.begin(), remaining.end(), '.'); Span component; if (dot == remaining.end()) { component = remaining; last = component; remaining = Span(); } else { component = remaining.subspan(0, dot - remaining.begin()); // Skip the dot. remaining = remaining.subspan(dot - remaining.begin() + 1); if (remaining.empty()) { // Trailing dots are not allowed. return false; } } // |component| must be a valid LDH label. Checking for empty components also // rejects leading dots. if (component.empty() || component.size() > 63 || component.front() == '-' || component.back() == '-') { return false; } for (uint8_t c : component) { if (!OPENSSL_isalnum(c) && c != '-') { return false; } } } // The WHATWG URL parser additionally does not allow any DNS names that end in // a numeric component. See: // https://url.spec.whatwg.org/#concept-host-parser // https://url.spec.whatwg.org/#ends-in-a-number-checker // // The WHATWG parser is formulated in terms of parsing decimal, octal, and // hex, along with a separate ASCII digits check. The ASCII digits check // subsumes the decimal and octal check, so we only need to check two cases. return !is_hex_component(last) && !is_decimal_component(last); } static bool parse_ech_config(CBS *cbs, ECHConfig *out, bool *out_supported, bool all_extensions_mandatory) { uint16_t version; CBS orig = *cbs; CBS contents; if (!CBS_get_u16(cbs, &version) || !CBS_get_u16_length_prefixed(cbs, &contents)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (version != kECHConfigVersion) { *out_supported = false; return true; } // Make a copy of the ECHConfig and parse from it, so the results alias into // the saved copy. if (!out->raw.CopyFrom( Span(CBS_data(&orig), CBS_len(&orig) - CBS_len(cbs)))) { return false; } CBS ech_config(out->raw); CBS public_name, public_key, cipher_suites, extensions; if (!CBS_skip(&ech_config, 2) || // version !CBS_get_u16_length_prefixed(&ech_config, &contents) || !CBS_get_u8(&contents, &out->config_id) || !CBS_get_u16(&contents, &out->kem_id) || !CBS_get_u16_length_prefixed(&contents, &public_key) || CBS_len(&public_key) == 0 || !CBS_get_u16_length_prefixed(&contents, &cipher_suites) || CBS_len(&cipher_suites) == 0 || CBS_len(&cipher_suites) % 4 != 0 || !CBS_get_u8(&contents, &out->maximum_name_length) || !CBS_get_u8_length_prefixed(&contents, &public_name) || CBS_len(&public_name) == 0 || !CBS_get_u16_length_prefixed(&contents, &extensions) || CBS_len(&contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (!ssl_is_valid_ech_public_name(public_name)) { // TODO(https://crbug.com/boringssl/275): The draft says ECHConfigs with // invalid public names should be ignored, but LDH syntax failures are // unambiguously invalid. *out_supported = false; return true; } out->public_key = public_key; out->public_name = public_name; // This function does not ensure |out->kem_id| and |out->cipher_suites| use // supported algorithms. The caller must do this. out->cipher_suites = cipher_suites; bool has_unknown_mandatory_extension = false; while (CBS_len(&extensions) != 0) { uint16_t type; CBS body; if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // We currently do not support any extensions. if (type & 0x8000 || all_extensions_mandatory) { // Extension numbers with the high bit set are mandatory. Continue parsing // to enforce syntax, but we will ultimately ignore this ECHConfig as a // client and reject it as a server. has_unknown_mandatory_extension = true; } } *out_supported = !has_unknown_mandatory_extension; return true; } bool ECHServerConfig::Init(Span ech_config, const EVP_HPKE_KEY *key, bool is_retry_config) { is_retry_config_ = is_retry_config; // Parse the ECHConfig, rejecting all unsupported parameters and extensions. // Unlike most server options, ECH's server configuration is serialized and // configured in both the server and DNS. If the caller configures an // unsupported parameter, this is a deployment error. To catch these errors, // we fail early. CBS cbs = ech_config; bool supported; if (!parse_ech_config(&cbs, &ech_config_, &supported, /*all_extensions_mandatory=*/true)) { return false; } if (CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (!supported) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ECH_SERVER_CONFIG); return false; } CBS cipher_suites = ech_config_.cipher_suites; while (CBS_len(&cipher_suites) > 0) { uint16_t kdf_id, aead_id; if (!CBS_get_u16(&cipher_suites, &kdf_id) || !CBS_get_u16(&cipher_suites, &aead_id)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // The server promises to support every option in the ECHConfig, so reject // any unsupported cipher suites. if (kdf_id != EVP_HPKE_HKDF_SHA256 || get_ech_aead(aead_id) == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ECH_SERVER_CONFIG); return false; } } // Check the public key in the ECHConfig matches |key|. uint8_t expected_public_key[EVP_HPKE_MAX_PUBLIC_KEY_LENGTH]; size_t expected_public_key_len; if (!EVP_HPKE_KEY_public_key(key, expected_public_key, &expected_public_key_len, sizeof(expected_public_key))) { return false; } if (ech_config_.kem_id != EVP_HPKE_KEM_id(EVP_HPKE_KEY_kem(key)) || Span(expected_public_key, expected_public_key_len) != ech_config_.public_key) { OPENSSL_PUT_ERROR(SSL, SSL_R_ECH_SERVER_CONFIG_AND_PRIVATE_KEY_MISMATCH); return false; } if (!EVP_HPKE_KEY_copy(key_.get(), key)) { return false; } return true; } bool ECHServerConfig::SetupContext(EVP_HPKE_CTX *ctx, uint16_t kdf_id, uint16_t aead_id, Span enc) const { // Check the cipher suite is supported by this ECHServerConfig. CBS cbs(ech_config_.cipher_suites); bool cipher_ok = false; while (CBS_len(&cbs) != 0) { uint16_t supported_kdf_id, supported_aead_id; if (!CBS_get_u16(&cbs, &supported_kdf_id) || !CBS_get_u16(&cbs, &supported_aead_id)) { return false; } if (kdf_id == supported_kdf_id && aead_id == supported_aead_id) { cipher_ok = true; break; } } if (!cipher_ok) { return false; } static const uint8_t kInfoLabel[] = "tls ech"; ScopedCBB info_cbb; if (!CBB_init(info_cbb.get(), sizeof(kInfoLabel) + ech_config_.raw.size()) || !CBB_add_bytes(info_cbb.get(), kInfoLabel, sizeof(kInfoLabel) /* includes trailing NUL */) || !CBB_add_bytes(info_cbb.get(), ech_config_.raw.data(), ech_config_.raw.size())) { return false; } assert(kdf_id == EVP_HPKE_HKDF_SHA256); assert(get_ech_aead(aead_id) != NULL); return EVP_HPKE_CTX_setup_recipient(ctx, key_.get(), EVP_hpke_hkdf_sha256(), get_ech_aead(aead_id), enc.data(), enc.size(), CBB_data(info_cbb.get()), CBB_len(info_cbb.get())); } bool ssl_is_valid_ech_config_list(Span ech_config_list) { CBS cbs = ech_config_list, child; if (!CBS_get_u16_length_prefixed(&cbs, &child) || // CBS_len(&child) == 0 || // CBS_len(&cbs) > 0) { return false; } while (CBS_len(&child) > 0) { ECHConfig ech_config; bool supported; if (!parse_ech_config(&child, &ech_config, &supported, /*all_extensions_mandatory=*/false)) { return false; } } return true; } static bool select_ech_cipher_suite(const EVP_HPKE_KDF **out_kdf, const EVP_HPKE_AEAD **out_aead, Span cipher_suites, const bool has_aes_hardware) { const EVP_HPKE_AEAD *aead = nullptr; CBS cbs = cipher_suites; while (CBS_len(&cbs) != 0) { uint16_t kdf_id, aead_id; if (!CBS_get_u16(&cbs, &kdf_id) || // !CBS_get_u16(&cbs, &aead_id)) { return false; } // Pick the first common cipher suite, but prefer ChaCha20-Poly1305 if we // don't have AES hardware. const EVP_HPKE_AEAD *candidate = get_ech_aead(aead_id); if (kdf_id != EVP_HPKE_HKDF_SHA256 || candidate == nullptr) { continue; } if (aead == nullptr || (!has_aes_hardware && aead_id == EVP_HPKE_CHACHA20_POLY1305)) { aead = candidate; } } if (aead == nullptr) { return false; } *out_kdf = EVP_hpke_hkdf_sha256(); *out_aead = aead; return true; } bool ssl_select_ech_config(SSL_HANDSHAKE *hs, Span out_enc, size_t *out_enc_len) { *out_enc_len = 0; if (hs->max_version < TLS1_3_VERSION) { // ECH requires TLS 1.3. return true; } if (!hs->config->client_ech_config_list.empty()) { CBS cbs = CBS(hs->config->client_ech_config_list); CBS child; if (!CBS_get_u16_length_prefixed(&cbs, &child) || // CBS_len(&child) == 0 || // CBS_len(&cbs) > 0) { return false; } // Look for the first ECHConfig with supported parameters. while (CBS_len(&child) > 0) { ECHConfig ech_config; bool supported; if (!parse_ech_config(&child, &ech_config, &supported, /*all_extensions_mandatory=*/false)) { return false; } const EVP_HPKE_KEM *kem = EVP_hpke_x25519_hkdf_sha256(); const EVP_HPKE_KDF *kdf; const EVP_HPKE_AEAD *aead; if (supported && // ech_config.kem_id == EVP_HPKE_DHKEM_X25519_HKDF_SHA256 && select_ech_cipher_suite(&kdf, &aead, ech_config.cipher_suites, hs->ssl->config->aes_hw_override ? hs->ssl->config->aes_hw_override_value : EVP_has_aes_hardware())) { ScopedCBB info; static const uint8_t kInfoLabel[] = "tls ech"; // includes trailing NUL if (!CBB_init(info.get(), sizeof(kInfoLabel) + ech_config.raw.size()) || !CBB_add_bytes(info.get(), kInfoLabel, sizeof(kInfoLabel)) || !CBB_add_bytes(info.get(), ech_config.raw.data(), ech_config.raw.size())) { return false; } if (!EVP_HPKE_CTX_setup_sender( hs->ech_hpke_ctx.get(), out_enc.data(), out_enc_len, out_enc.size(), kem, kdf, aead, ech_config.public_key.data(), ech_config.public_key.size(), CBB_data(info.get()), CBB_len(info.get())) || !hs->inner_transcript.Init()) { return false; } hs->selected_ech_config = MakeUnique(std::move(ech_config)); return hs->selected_ech_config != nullptr; } } } return true; } static size_t aead_overhead(const EVP_HPKE_AEAD *aead) { #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) // TODO(https://crbug.com/boringssl/275): Having to adjust the overhead // everywhere is tedious. Change fuzzer mode to append a fake tag but still // otherwise be cleartext, refresh corpora, and then inline this function. return 0; #else return EVP_AEAD_max_overhead(EVP_HPKE_AEAD_aead(aead)); #endif } // random_size returns a random value between |min| and |max|, inclusive. static size_t random_size(size_t min, size_t max) { assert(min < max); size_t value; RAND_bytes(reinterpret_cast(&value), sizeof(value)); return value % (max - min + 1) + min; } static bool setup_ech_grease(SSL_HANDSHAKE *hs) { assert(!hs->selected_ech_config); if (hs->max_version < TLS1_3_VERSION || !hs->config->ech_grease_enabled) { return true; } const uint16_t kdf_id = EVP_HPKE_HKDF_SHA256; const bool has_aes_hw = hs->ssl->config->aes_hw_override ? hs->ssl->config->aes_hw_override_value : EVP_has_aes_hardware(); const EVP_HPKE_AEAD *aead = has_aes_hw ? EVP_hpke_aes_128_gcm() : EVP_hpke_chacha20_poly1305(); static_assert(ssl_grease_ech_config_id < sizeof(hs->grease_seed), "hs->grease_seed is too small"); uint8_t config_id = hs->grease_seed[ssl_grease_ech_config_id]; uint8_t enc[X25519_PUBLIC_VALUE_LEN]; uint8_t private_key_unused[X25519_PRIVATE_KEY_LEN]; X25519_keypair(enc, private_key_unused); // To determine a plausible length for the payload, we estimate the size of a // typical EncodedClientHelloInner without resumption: // // 2+32+1+2 version, random, legacy_session_id, legacy_compression_methods // 2+4*2 cipher_suites (three TLS 1.3 ciphers, GREASE) // 2 extensions prefix // 5 inner encrypted_client_hello // 4+1+2*2 supported_versions (TLS 1.3, GREASE) // 4+1+10*2 outer_extensions (key_share, sigalgs, sct, alpn, // supported_groups, status_request, psk_key_exchange_modes, // compress_certificate, GREASE x2) // // The server_name extension has an overhead of 9 bytes. For now, arbitrarily // estimate maximum_name_length to be between 32 and 100 bytes. Then round up // to a multiple of 32, to match draft-ietf-tls-esni-13, section 6.1.3. const size_t payload_len = 32 * random_size(128 / 32, 224 / 32) + aead_overhead(aead); bssl::ScopedCBB cbb; CBB enc_cbb, payload_cbb; uint8_t *payload; if (!CBB_init(cbb.get(), 256) || !CBB_add_u16(cbb.get(), kdf_id) || !CBB_add_u16(cbb.get(), EVP_HPKE_AEAD_id(aead)) || !CBB_add_u8(cbb.get(), config_id) || !CBB_add_u16_length_prefixed(cbb.get(), &enc_cbb) || !CBB_add_bytes(&enc_cbb, enc, sizeof(enc)) || !CBB_add_u16_length_prefixed(cbb.get(), &payload_cbb) || !CBB_add_space(&payload_cbb, &payload, payload_len) || !RAND_bytes(payload, payload_len) || !CBBFinishArray(cbb.get(), &hs->ech_client_outer)) { return false; } return true; } bool ssl_encrypt_client_hello(SSL_HANDSHAKE *hs, Span enc) { SSL *const ssl = hs->ssl; if (!hs->selected_ech_config) { return setup_ech_grease(hs); } // Construct ClientHelloInner and EncodedClientHelloInner. See // draft-ietf-tls-esni-13, sections 5.1 and 6.1. ScopedCBB cbb, encoded_cbb; CBB body; bool needs_psk_binder; Array hello_inner; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CLIENT_HELLO) || !CBB_init(encoded_cbb.get(), 256) || !ssl_write_client_hello_without_extensions(hs, &body, ssl_client_hello_inner, /*empty_session_id=*/false) || !ssl_write_client_hello_without_extensions(hs, encoded_cbb.get(), ssl_client_hello_inner, /*empty_session_id=*/true) || !ssl_add_clienthello_tlsext(hs, &body, encoded_cbb.get(), &needs_psk_binder, ssl_client_hello_inner, CBB_len(&body)) || !ssl->method->finish_message(ssl, cbb.get(), &hello_inner)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (needs_psk_binder) { size_t binder_len; if (!tls13_write_psk_binder(hs, hs->inner_transcript, Span(hello_inner), &binder_len)) { return false; } // Also update the EncodedClientHelloInner. auto encoded_binder = Span(const_cast(CBB_data(encoded_cbb.get())), CBB_len(encoded_cbb.get())) .last(binder_len); auto hello_inner_binder = Span(hello_inner).last(binder_len); OPENSSL_memcpy(encoded_binder.data(), hello_inner_binder.data(), binder_len); } ssl_do_msg_callback(ssl, /*is_write=*/1, SSL3_RT_CLIENT_HELLO_INNER, hello_inner); if (!hs->inner_transcript.Update(hello_inner)) { return false; } // Pad the EncodedClientHelloInner. See draft-ietf-tls-esni-13, section 6.1.3. size_t padding_len = 0; size_t maximum_name_length = hs->selected_ech_config->maximum_name_length; if (ssl->hostname) { size_t hostname_len = strlen(ssl->hostname.get()); if (hostname_len <= maximum_name_length) { padding_len = maximum_name_length - hostname_len; } } else { // No SNI. Pad up to |maximum_name_length|, including server_name extension // overhead. padding_len = 9 + maximum_name_length; } // Pad the whole thing to a multiple of 32 bytes. padding_len += 31 - ((CBB_len(encoded_cbb.get()) + padding_len - 1) % 32); Array encoded; if (!CBB_add_zeros(encoded_cbb.get(), padding_len) || !CBBFinishArray(encoded_cbb.get(), &encoded)) { return false; } // Encrypt |encoded|. See draft-ietf-tls-esni-13, section 6.1.1. First, // assemble the extension with a placeholder value for ClientHelloOuterAAD. // See draft-ietf-tls-esni-13, section 5.2. const EVP_HPKE_KDF *kdf = EVP_HPKE_CTX_kdf(hs->ech_hpke_ctx.get()); const EVP_HPKE_AEAD *aead = EVP_HPKE_CTX_aead(hs->ech_hpke_ctx.get()); size_t payload_len = encoded.size() + aead_overhead(aead); CBB enc_cbb, payload_cbb; if (!CBB_init(cbb.get(), 256) || !CBB_add_u16(cbb.get(), EVP_HPKE_KDF_id(kdf)) || !CBB_add_u16(cbb.get(), EVP_HPKE_AEAD_id(aead)) || !CBB_add_u8(cbb.get(), hs->selected_ech_config->config_id) || !CBB_add_u16_length_prefixed(cbb.get(), &enc_cbb) || !CBB_add_bytes(&enc_cbb, enc.data(), enc.size()) || !CBB_add_u16_length_prefixed(cbb.get(), &payload_cbb) || !CBB_add_zeros(&payload_cbb, payload_len) || !CBBFinishArray(cbb.get(), &hs->ech_client_outer)) { return false; } // Construct ClientHelloOuterAAD. // TODO(https://crbug.com/boringssl/275): This ends up constructing the // ClientHelloOuter twice. Instead, reuse |aad| for the ClientHello, now that // draft-12 made the length prefixes match. bssl::ScopedCBB aad; if (!CBB_init(aad.get(), 256) || !ssl_write_client_hello_without_extensions(hs, aad.get(), ssl_client_hello_outer, /*empty_session_id=*/false) || !ssl_add_clienthello_tlsext(hs, aad.get(), /*out_encoded=*/nullptr, &needs_psk_binder, ssl_client_hello_outer, CBB_len(aad.get()))) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // ClientHelloOuter may not require a PSK binder. Otherwise, we have a // circular dependency. assert(!needs_psk_binder); // Replace the payload in |hs->ech_client_outer| with the encrypted value. auto payload_span = Span(hs->ech_client_outer).last(payload_len); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) // In fuzzer mode, the server expects a cleartext payload. assert(payload_span.size() == encoded.size()); OPENSSL_memcpy(payload_span.data(), encoded.data(), encoded.size()); #else if (!EVP_HPKE_CTX_seal(hs->ech_hpke_ctx.get(), payload_span.data(), &payload_len, payload_span.size(), encoded.data(), encoded.size(), CBB_data(aad.get()), CBB_len(aad.get())) || payload_len != payload_span.size()) { return false; } #endif // BORINGSSL_UNSAFE_FUZZER_MODE return true; } BSSL_NAMESPACE_END using namespace bssl; void SSL_set_enable_ech_grease(SSL *ssl, int enable) { if (!ssl->config) { return; } ssl->config->ech_grease_enabled = !!enable; } int SSL_set1_ech_config_list(SSL *ssl, const uint8_t *ech_config_list, size_t ech_config_list_len) { if (!ssl->config) { return 0; } auto span = Span(ech_config_list, ech_config_list_len); if (!ssl_is_valid_ech_config_list(span)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ECH_CONFIG_LIST); return 0; } return ssl->config->client_ech_config_list.CopyFrom(span); } void SSL_get0_ech_name_override(const SSL *ssl, const char **out_name, size_t *out_name_len) { // When ECH is rejected, we use the public name. Note that, if // |SSL_CTX_set_reverify_on_resume| is enabled, we reverify the certificate // before the 0-RTT point. If also offering ECH, we verify as if // ClientHelloInner was accepted and do not override. This works because, at // this point, |ech_status| will be |ssl_ech_none|. See the // ECH-Client-Reject-EarlyDataReject-OverrideNameOnRetry tests in runner.go. const SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (!ssl->server && hs && ssl->s3->ech_status == ssl_ech_rejected) { *out_name = reinterpret_cast( hs->selected_ech_config->public_name.data()); *out_name_len = hs->selected_ech_config->public_name.size(); } else { *out_name = nullptr; *out_name_len = 0; } } void SSL_get0_ech_retry_configs(const SSL *ssl, const uint8_t **out_retry_configs, size_t *out_retry_configs_len) { const SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (!hs || !hs->ech_authenticated_reject) { // It is an error to call this function except in response to // |SSL_R_ECH_REJECTED|. Returning an empty string risks the caller // mistakenly believing the server has disabled ECH. Instead, return a // non-empty ECHConfigList with a syntax error, so the subsequent // |SSL_set1_ech_config_list| call will fail. assert(0); static const uint8_t kPlaceholder[] = { kECHConfigVersion >> 8, kECHConfigVersion & 0xff, 0xff, 0xff, 0xff}; *out_retry_configs = kPlaceholder; *out_retry_configs_len = sizeof(kPlaceholder); return; } *out_retry_configs = hs->ech_retry_configs.data(); *out_retry_configs_len = hs->ech_retry_configs.size(); } int SSL_marshal_ech_config(uint8_t **out, size_t *out_len, uint8_t config_id, const EVP_HPKE_KEY *key, const char *public_name, size_t max_name_len) { Span public_name_u8 = StringAsBytes(public_name); if (!ssl_is_valid_ech_public_name(public_name_u8)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ECH_PUBLIC_NAME); return 0; } // The maximum name length is encoded in one byte. if (max_name_len > 0xff) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_LENGTH); return 0; } // See draft-ietf-tls-esni-13, section 4. ScopedCBB cbb; CBB contents, child; uint8_t *public_key; size_t public_key_len; if (!CBB_init(cbb.get(), 128) || // !CBB_add_u16(cbb.get(), kECHConfigVersion) || !CBB_add_u16_length_prefixed(cbb.get(), &contents) || !CBB_add_u8(&contents, config_id) || !CBB_add_u16(&contents, EVP_HPKE_KEM_id(EVP_HPKE_KEY_kem(key))) || !CBB_add_u16_length_prefixed(&contents, &child) || !CBB_reserve(&child, &public_key, EVP_HPKE_MAX_PUBLIC_KEY_LENGTH) || !EVP_HPKE_KEY_public_key(key, public_key, &public_key_len, EVP_HPKE_MAX_PUBLIC_KEY_LENGTH) || !CBB_did_write(&child, public_key_len) || !CBB_add_u16_length_prefixed(&contents, &child) || // Write a default cipher suite configuration. !CBB_add_u16(&child, EVP_HPKE_HKDF_SHA256) || !CBB_add_u16(&child, EVP_HPKE_AES_128_GCM) || !CBB_add_u16(&child, EVP_HPKE_HKDF_SHA256) || !CBB_add_u16(&child, EVP_HPKE_CHACHA20_POLY1305) || !CBB_add_u8(&contents, max_name_len) || !CBB_add_u8_length_prefixed(&contents, &child) || !CBB_add_bytes(&child, public_name_u8.data(), public_name_u8.size()) || // TODO(https://crbug.com/boringssl/275): Reserve some GREASE extensions // and include some. !CBB_add_u16(&contents, 0 /* no extensions */) || !CBB_finish(cbb.get(), out, out_len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } return 1; } SSL_ECH_KEYS *SSL_ECH_KEYS_new() { return New(); } void SSL_ECH_KEYS_up_ref(SSL_ECH_KEYS *keys) { keys->UpRefInternal(); } void SSL_ECH_KEYS_free(SSL_ECH_KEYS *keys) { if (keys != nullptr) { keys->DecRefInternal(); } } int SSL_ECH_KEYS_add(SSL_ECH_KEYS *configs, int is_retry_config, const uint8_t *ech_config, size_t ech_config_len, const EVP_HPKE_KEY *key) { UniquePtr parsed_config = MakeUnique(); if (!parsed_config) { return 0; } if (!parsed_config->Init(Span(ech_config, ech_config_len), key, !!is_retry_config)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return 0; } if (!configs->configs.Push(std::move(parsed_config))) { return 0; } return 1; } int SSL_ECH_KEYS_has_duplicate_config_id(const SSL_ECH_KEYS *keys) { bool seen[256] = {false}; for (const auto &config : keys->configs) { if (seen[config->ech_config().config_id]) { return 1; } seen[config->ech_config().config_id] = true; } return 0; } int SSL_ECH_KEYS_marshal_retry_configs(const SSL_ECH_KEYS *keys, uint8_t **out, size_t *out_len) { ScopedCBB cbb; CBB child; if (!CBB_init(cbb.get(), 128) || !CBB_add_u16_length_prefixed(cbb.get(), &child)) { return false; } for (const auto &config : keys->configs) { if (config->is_retry_config() && !CBB_add_bytes(&child, config->ech_config().raw.data(), config->ech_config().raw.size())) { return false; } } return CBB_finish(cbb.get(), out, out_len); } int SSL_CTX_set1_ech_keys(SSL_CTX *ctx, SSL_ECH_KEYS *keys) { bool has_retry_config = false; for (const auto &config : keys->configs) { if (config->is_retry_config()) { has_retry_config = true; break; } } if (!has_retry_config) { OPENSSL_PUT_ERROR(SSL, SSL_R_ECH_SERVER_WOULD_HAVE_NO_RETRY_CONFIGS); return 0; } UniquePtr owned_keys = UpRef(keys); MutexWriteLock lock(&ctx->lock); ctx->ech_keys.swap(owned_keys); return 1; } int SSL_ech_accepted(const SSL *ssl) { if (SSL_in_early_data(ssl) && !ssl->server) { // In the client early data state, we report properties as if the server // accepted early data. The server can only accept early data with // ClientHelloInner. return ssl->s3->hs->selected_ech_config != nullptr; } return ssl->s3->ech_status == ssl_ech_accepted; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/extensions.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static bool ssl_check_clienthello_tlsext(SSL_HANDSHAKE *hs); static bool ssl_check_serverhello_tlsext(SSL_HANDSHAKE *hs); static int compare_uint16_t(const void *p1, const void *p2) { uint16_t u1 = *((const uint16_t *)p1); uint16_t u2 = *((const uint16_t *)p2); if (u1 < u2) { return -1; } else if (u1 > u2) { return 1; } else { return 0; } } // Per http://tools.ietf.org/html/rfc5246#section-7.4.1.4, there may not be // more than one extension of the same type in a ClientHello or ServerHello. // This function does an initial scan over the extensions block to filter those // out. static bool tls1_check_duplicate_extensions(const CBS *cbs) { // First pass: count the extensions. size_t num_extensions = 0; CBS extensions = *cbs; while (CBS_len(&extensions) > 0) { uint16_t type; CBS extension; if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { return false; } num_extensions++; } if (num_extensions == 0) { return true; } Array extension_types; if (!extension_types.InitForOverwrite(num_extensions)) { return false; } // Second pass: gather the extension types. extensions = *cbs; for (size_t i = 0; i < extension_types.size(); i++) { CBS extension; if (!CBS_get_u16(&extensions, &extension_types[i]) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { // This should not happen. return false; } } assert(CBS_len(&extensions) == 0); // Sort the extensions and make sure there are no duplicates. qsort(extension_types.data(), extension_types.size(), sizeof(uint16_t), compare_uint16_t); for (size_t i = 1; i < num_extensions; i++) { if (extension_types[i - 1] == extension_types[i]) { return false; } } return true; } static bool is_post_quantum_group(uint16_t id) { switch (id) { case SSL_GROUP_X25519_KYBER768_DRAFT00: case SSL_GROUP_X25519_MLKEM768: return true; default: return false; } } bool ssl_client_hello_init(const SSL *ssl, SSL_CLIENT_HELLO *out, Span body) { CBS cbs = body; if (!ssl_parse_client_hello_with_trailing_data(ssl, &cbs, out) || CBS_len(&cbs) != 0) { return false; } return true; } bool ssl_parse_client_hello_with_trailing_data(const SSL *ssl, CBS *cbs, SSL_CLIENT_HELLO *out) { OPENSSL_memset(out, 0, sizeof(*out)); out->ssl = const_cast(ssl); CBS copy = *cbs; CBS random, session_id; if (!CBS_get_u16(cbs, &out->version) || !CBS_get_bytes(cbs, &random, SSL3_RANDOM_SIZE) || !CBS_get_u8_length_prefixed(cbs, &session_id) || CBS_len(&session_id) > SSL_MAX_SSL_SESSION_ID_LENGTH) { return false; } out->random = CBS_data(&random); out->random_len = CBS_len(&random); out->session_id = CBS_data(&session_id); out->session_id_len = CBS_len(&session_id); if (SSL_is_dtls(out->ssl)) { CBS cookie; if (!CBS_get_u8_length_prefixed(cbs, &cookie)) { return false; } out->dtls_cookie = CBS_data(&cookie); out->dtls_cookie_len = CBS_len(&cookie); } else { out->dtls_cookie = nullptr; out->dtls_cookie_len = 0; } CBS cipher_suites, compression_methods; if (!CBS_get_u16_length_prefixed(cbs, &cipher_suites) || CBS_len(&cipher_suites) < 2 || (CBS_len(&cipher_suites) & 1) != 0 || !CBS_get_u8_length_prefixed(cbs, &compression_methods) || CBS_len(&compression_methods) < 1) { return false; } out->cipher_suites = CBS_data(&cipher_suites); out->cipher_suites_len = CBS_len(&cipher_suites); out->compression_methods = CBS_data(&compression_methods); out->compression_methods_len = CBS_len(&compression_methods); // If the ClientHello ends here then it's valid, but doesn't have any // extensions. if (CBS_len(cbs) == 0) { out->extensions = nullptr; out->extensions_len = 0; } else { // Extract extensions and check it is valid. CBS extensions; if (!CBS_get_u16_length_prefixed(cbs, &extensions) || !tls1_check_duplicate_extensions(&extensions)) { return false; } out->extensions = CBS_data(&extensions); out->extensions_len = CBS_len(&extensions); } out->client_hello = CBS_data(©); out->client_hello_len = CBS_len(©) - CBS_len(cbs); return true; } bool ssl_client_hello_get_extension(const SSL_CLIENT_HELLO *client_hello, CBS *out, uint16_t extension_type) { CBS extensions; CBS_init(&extensions, client_hello->extensions, client_hello->extensions_len); while (CBS_len(&extensions) != 0) { // Decode the next extension. uint16_t type; CBS extension; if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { return false; } if (type == extension_type) { *out = extension; return true; } } return false; } static const uint16_t kDefaultGroups[] = { SSL_GROUP_X25519, SSL_GROUP_SECP256R1, SSL_GROUP_SECP384R1, }; Span tls1_get_grouplist(const SSL_HANDSHAKE *hs) { if (!hs->config->supported_group_list.empty()) { return hs->config->supported_group_list; } return Span(kDefaultGroups); } bool tls1_get_shared_group(SSL_HANDSHAKE *hs, uint16_t *out_group_id) { SSL *const ssl = hs->ssl; assert(ssl->server); // Clients are not required to send a supported_groups extension. In this // case, the server is free to pick any group it likes. See RFC 4492, // section 4, paragraph 3. // // However, in the interests of compatibility, we will skip ECDH if the // client didn't send an extension because we can't be sure that they'll // support our favoured group. Thus we do not special-case an emtpy // |peer_supported_group_list|. Span groups = tls1_get_grouplist(hs); Span pref, supp; if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { pref = groups; supp = hs->peer_supported_group_list; } else { pref = hs->peer_supported_group_list; supp = groups; } for (uint16_t pref_group : pref) { for (uint16_t supp_group : supp) { if (pref_group == supp_group && // Post-quantum key agreements don't fit in the u8-length-prefixed // ECPoint field in TLS 1.2 and below. (ssl_protocol_version(ssl) >= TLS1_3_VERSION || !is_post_quantum_group(pref_group))) { *out_group_id = pref_group; return true; } } } return false; } bool tls1_check_group_id(const SSL_HANDSHAKE *hs, uint16_t group_id) { if (is_post_quantum_group(group_id) && ssl_protocol_version(hs->ssl) < TLS1_3_VERSION) { // Post-quantum "groups" require TLS 1.3. return false; } // We internally assume zero is never allocated as a group ID. if (group_id == 0) { return false; } for (uint16_t supported : tls1_get_grouplist(hs)) { if (supported == group_id) { return true; } } return false; } // kVerifySignatureAlgorithms is the default list of accepted signature // algorithms for verifying. static const uint16_t kVerifySignatureAlgorithms[] = { // List our preferred algorithms first. SSL_SIGN_ECDSA_SECP256R1_SHA256, SSL_SIGN_RSA_PSS_RSAE_SHA256, SSL_SIGN_RSA_PKCS1_SHA256, // Larger hashes are acceptable. SSL_SIGN_ECDSA_SECP384R1_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA384, SSL_SIGN_RSA_PKCS1_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA512, SSL_SIGN_RSA_PKCS1_SHA512, // For now, SHA-1 is still accepted but least preferable. SSL_SIGN_RSA_PKCS1_SHA1, }; // kSignSignatureAlgorithms is the default list of supported signature // algorithms for signing. static const uint16_t kSignSignatureAlgorithms[] = { // List our preferred algorithms first. SSL_SIGN_ED25519, SSL_SIGN_ECDSA_SECP256R1_SHA256, SSL_SIGN_RSA_PSS_RSAE_SHA256, SSL_SIGN_RSA_PKCS1_SHA256, // If needed, sign larger hashes. // // TODO(davidben): Determine which of these may be pruned. SSL_SIGN_ECDSA_SECP384R1_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA384, SSL_SIGN_RSA_PKCS1_SHA384, SSL_SIGN_ECDSA_SECP521R1_SHA512, SSL_SIGN_RSA_PSS_RSAE_SHA512, SSL_SIGN_RSA_PKCS1_SHA512, // If the peer supports nothing else, sign with SHA-1. SSL_SIGN_ECDSA_SHA1, SSL_SIGN_RSA_PKCS1_SHA1, }; static Span tls12_get_verify_sigalgs(const SSL_HANDSHAKE *hs) { if (hs->config->verify_sigalgs.empty()) { return Span(kVerifySignatureAlgorithms); } return hs->config->verify_sigalgs; } bool tls12_add_verify_sigalgs(const SSL_HANDSHAKE *hs, CBB *out) { for (uint16_t sigalg : tls12_get_verify_sigalgs(hs)) { if (!CBB_add_u16(out, sigalg)) { return false; } } return true; } bool tls12_check_peer_sigalg(const SSL_HANDSHAKE *hs, uint8_t *out_alert, uint16_t sigalg, EVP_PKEY *pkey) { // The peer must have selected an algorithm that is consistent with its public // key, the TLS version, and what we advertised. Span sigalgs = tls12_get_verify_sigalgs(hs); if (std::find(sigalgs.begin(), sigalgs.end(), sigalg) == sigalgs.end() || !ssl_pkey_supports_algorithm(hs->ssl, pkey, sigalg, /*is_verify=*/true)) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } return true; } // tls_extension represents a TLS extension that is handled internally. // // The parse callbacks receive a |CBS| that contains the contents of the // extension (i.e. not including the type and length bytes). If an extension is // not received then the parse callbacks will be called with a NULL CBS so that // they can do any processing needed to handle the absence of an extension. // // The add callbacks receive a |CBB| to which the extension can be appended but // the function is responsible for appending the type and length bytes too. // // |add_clienthello| may be called multiple times and must not mutate |hs|. It // is additionally passed two output |CBB|s. If the extension is the same // independent of the value of |type|, the callback may write to // |out_compressible| instead of |out|. When serializing the ClientHelloInner, // all compressible extensions will be made continguous and replaced with // ech_outer_extensions when encrypted. When serializing the ClientHelloOuter // or not offering ECH, |out| will be equal to |out_compressible|, so writing to // |out_compressible| still works. // // Note the |parse_serverhello| and |add_serverhello| callbacks refer to the // TLS 1.2 ServerHello. In TLS 1.3, these callbacks act on EncryptedExtensions, // with ServerHello extensions handled elsewhere in the handshake. // // All callbacks return true for success and false for error. If a parse // function returns zero then a fatal alert with value |*out_alert| will be // sent. If |*out_alert| isn't set, then a |decode_error| alert will be sent. struct tls_extension { uint16_t value; bool (*add_clienthello)(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type); bool (*parse_serverhello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents); bool (*parse_clienthello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents); bool (*add_serverhello)(SSL_HANDSHAKE *hs, CBB *out); }; static bool forbid_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents != NULL) { // Servers MUST NOT send this extension. *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); return false; } return true; } static bool ignore_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { // This extension from the client is handled elsewhere. return true; } static bool dont_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { return true; } // Server name indication (SNI). // // https://tools.ietf.org/html/rfc6066#section-3. static bool ext_sni_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; // If offering ECH, send the public name instead of the configured name. Span hostname; if (type == ssl_client_hello_outer) { hostname = hs->selected_ech_config->public_name; } else { if (ssl->hostname == nullptr) { return true; } hostname = StringAsBytes(ssl->hostname.get()); } CBB contents, server_name_list, name; if (!CBB_add_u16(out, TLSEXT_TYPE_server_name) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &server_name_list) || !CBB_add_u8(&server_name_list, TLSEXT_NAMETYPE_host_name) || !CBB_add_u16_length_prefixed(&server_name_list, &name) || !CBB_add_bytes(&name, hostname.data(), hostname.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_sni_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { // The server may acknowledge SNI with an empty extension. We check the syntax // but otherwise ignore this signal. return contents == NULL || CBS_len(contents) == 0; } static bool ext_sni_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { // SNI has already been parsed earlier in the handshake. See |extract_sni|. return true; } static bool ext_sni_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (hs->ssl->s3->session_reused || // !hs->should_ack_sni) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_server_name) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } // Encrypted ClientHello (ECH) // // https://tools.ietf.org/html/draft-ietf-tls-esni-13 static bool ext_ech_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (type == ssl_client_hello_inner) { if (!CBB_add_u16(out, TLSEXT_TYPE_encrypted_client_hello) || !CBB_add_u16(out, /* length */ 1) || !CBB_add_u8(out, ECH_CLIENT_INNER)) { return false; } return true; } if (hs->ech_client_outer.empty()) { return true; } CBB ech_body; if (!CBB_add_u16(out, TLSEXT_TYPE_encrypted_client_hello) || !CBB_add_u16_length_prefixed(out, &ech_body) || !CBB_add_u8(&ech_body, ECH_CLIENT_OUTER) || !CBB_add_bytes(&ech_body, hs->ech_client_outer.data(), hs->ech_client_outer.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_ech_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } // The ECH extension may not be sent in TLS 1.2 ServerHello, only TLS 1.3 // EncryptedExtensions. It also may not be sent in response to an inner ECH // extension. if (ssl_protocol_version(ssl) < TLS1_3_VERSION || ssl->s3->ech_status == ssl_ech_accepted) { *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); return false; } if (!ssl_is_valid_ech_config_list(*contents)) { *out_alert = SSL_AD_DECODE_ERROR; return false; } if (ssl->s3->ech_status == ssl_ech_rejected && !hs->ech_retry_configs.CopyFrom(*contents)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } return true; } static bool ext_ech_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == nullptr) { return true; } uint8_t type; if (!CBS_get_u8(contents, &type)) { return false; } if (type == ECH_CLIENT_OUTER) { // Outer ECH extensions are handled outside the callback. return true; } if (type != ECH_CLIENT_INNER || CBS_len(contents) != 0) { return false; } hs->ech_is_inner = true; return true; } static bool ext_ech_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl_protocol_version(ssl) < TLS1_3_VERSION || ssl->s3->ech_status == ssl_ech_accepted || // hs->ech_keys == nullptr) { return true; } // Write the list of retry configs to |out|. Note |SSL_CTX_set1_ech_keys| // ensures |ech_keys| contains at least one retry config. CBB body, retry_configs; if (!CBB_add_u16(out, TLSEXT_TYPE_encrypted_client_hello) || !CBB_add_u16_length_prefixed(out, &body) || !CBB_add_u16_length_prefixed(&body, &retry_configs)) { return false; } for (const auto &config : hs->ech_keys->configs) { if (!config->is_retry_config()) { continue; } if (!CBB_add_bytes(&retry_configs, config->ech_config().raw.data(), config->ech_config().raw.size())) { return false; } } return CBB_flush(out); } // Renegotiation indication. // // https://tools.ietf.org/html/rfc5746 static bool ext_ri_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; // Renegotiation indication is not necessary in TLS 1.3. if (hs->min_version >= TLS1_3_VERSION || // type == ssl_client_hello_inner) { return true; } assert(ssl->s3->initial_handshake_complete == !ssl->s3->previous_client_finished.empty()); CBB contents, prev_finished; if (!CBB_add_u16(out, TLSEXT_TYPE_renegotiate) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &prev_finished) || !CBB_add_bytes(&prev_finished, ssl->s3->previous_client_finished.data(), ssl->s3->previous_client_finished.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_ri_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents != NULL && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } // Servers may not switch between omitting the extension and supporting it. // See RFC 5746, sections 3.5 and 4.2. if (ssl->s3->initial_handshake_complete && (contents != NULL) != ssl->s3->send_connection_binding) { *out_alert = SSL_AD_HANDSHAKE_FAILURE; OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); return false; } if (contents == NULL) { // Strictly speaking, if we want to avoid an attack we should *always* see // RI even on initial ServerHello because the client doesn't see any // renegotiation during an attack. However this would mean we could not // connect to any server which doesn't support RI. // // OpenSSL has |SSL_OP_LEGACY_SERVER_CONNECT| to control this, but in // practical terms every client sets it so it's just assumed here. return true; } // Check for logic errors. assert(ssl->s3->previous_client_finished.size() == ssl->s3->previous_server_finished.size()); assert(ssl->s3->initial_handshake_complete == !ssl->s3->previous_client_finished.empty()); // Parse out the extension contents. CBS renegotiated_connection; if (!CBS_get_u8_length_prefixed(contents, &renegotiated_connection) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_ENCODING_ERR); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } // Check that the extension matches. CBS client_verify, server_verify; if (!CBS_get_bytes(&renegotiated_connection, &client_verify, ssl->s3->previous_client_finished.size()) || !CBS_get_bytes(&renegotiated_connection, &server_verify, ssl->s3->previous_server_finished.size()) || CBS_len(&renegotiated_connection) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; return false; } bool ok = CBS_mem_equal(&client_verify, ssl->s3->previous_client_finished.data(), ssl->s3->previous_client_finished.size()) && CBS_mem_equal(&server_verify, ssl->s3->previous_server_finished.data(), ssl->s3->previous_server_finished.size()); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) ok = true; #endif if (!ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; return false; } ssl->s3->send_connection_binding = true; return true; } static bool ext_ri_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; // Renegotiation isn't supported as a server so this function should never be // called after the initial handshake. assert(!ssl->s3->initial_handshake_complete); if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return true; } if (contents == NULL) { return true; } CBS renegotiated_connection; if (!CBS_get_u8_length_prefixed(contents, &renegotiated_connection) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_ENCODING_ERR); return false; } // Check that the extension matches. We do not support renegotiation as a // server, so this must be empty. if (CBS_len(&renegotiated_connection) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; return false; } ssl->s3->send_connection_binding = true; return true; } static bool ext_ri_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; // Renegotiation isn't supported as a server so this function should never be // called after the initial handshake. assert(!ssl->s3->initial_handshake_complete); if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_renegotiate) || !CBB_add_u16(out, 1 /* length */) || !CBB_add_u8(out, 0 /* empty renegotiation info */)) { return false; } return true; } // Extended Master Secret. // // https://tools.ietf.org/html/rfc7627 static bool ext_ems_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { // Extended master secret is not necessary in TLS 1.3. if (hs->min_version >= TLS1_3_VERSION || type == ssl_client_hello_inner) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_extended_master_secret) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } static bool ext_ems_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents != NULL) { if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || // CBS_len(contents) != 0) { return false; } hs->extended_master_secret = true; } // Whether EMS is negotiated may not change on renegotiation. if (ssl->s3->established_session != nullptr && hs->extended_master_secret != !!ssl->s3->established_session->extended_master_secret) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_EMS_MISMATCH); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } return true; } static bool ext_ems_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { return true; } if (contents == NULL) { return true; } if (CBS_len(contents) != 0) { return false; } hs->extended_master_secret = true; return true; } static bool ext_ems_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->extended_master_secret) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_extended_master_secret) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } // Session tickets. // // https://tools.ietf.org/html/rfc5077 static bool ext_ticket_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; // TLS 1.3 uses a different ticket extension. if (hs->min_version >= TLS1_3_VERSION || type == ssl_client_hello_inner || SSL_get_options(ssl) & SSL_OP_NO_TICKET) { return true; } // Renegotiation does not participate in session resumption. However, still // advertise the extension to avoid potentially breaking servers which carry // over the state from the previous handshake, such as OpenSSL servers // without upstream's 3c3f0259238594d77264a78944d409f2127642c4. Span ticket; if (!ssl->s3->initial_handshake_complete && // ssl->session != nullptr && ssl_session_get_type(ssl->session.get()) == SSLSessionType::kTicket) { ticket = ssl->session->ticket; } CBB ticket_cbb; if (!CBB_add_u16(out, TLSEXT_TYPE_session_ticket) || !CBB_add_u16_length_prefixed(out, &ticket_cbb) || !CBB_add_bytes(&ticket_cbb, ticket.data(), ticket.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_ticket_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return false; } // If |SSL_OP_NO_TICKET| is set then no extension will have been sent and // this function should never be called, even if the server tries to send the // extension. assert((SSL_get_options(ssl) & SSL_OP_NO_TICKET) == 0); if (CBS_len(contents) != 0) { return false; } hs->ticket_expected = true; return true; } static bool ext_ticket_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->ticket_expected) { return true; } // If |SSL_OP_NO_TICKET| is set, |ticket_expected| should never be true. assert((SSL_get_options(hs->ssl) & SSL_OP_NO_TICKET) == 0); if (!CBB_add_u16(out, TLSEXT_TYPE_session_ticket) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } // Signature Algorithms. // // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 static bool ext_sigalgs_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (hs->max_version < TLS1_2_VERSION) { return true; } CBB contents, sigalgs_cbb; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_signature_algorithms) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &sigalgs_cbb) || !tls12_add_verify_sigalgs(hs, &sigalgs_cbb) || !CBB_flush(out_compressible)) { return false; } return true; } static bool ext_sigalgs_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { hs->peer_sigalgs.Reset(); if (contents == NULL) { return true; } CBS supported_signature_algorithms; if (!CBS_get_u16_length_prefixed(contents, &supported_signature_algorithms) || CBS_len(contents) != 0 || !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { return false; } return true; } // OCSP Stapling. // // https://tools.ietf.org/html/rfc6066#section-8 static bool ext_ocsp_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (!hs->config->ocsp_stapling_enabled) { return true; } CBB contents; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_status_request) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u8(&contents, TLSEXT_STATUSTYPE_ocsp) || !CBB_add_u16(&contents, 0 /* empty responder ID list */) || !CBB_add_u16(&contents, 0 /* empty request extensions */) || !CBB_flush(out_compressible)) { return false; } return true; } static bool ext_ocsp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } // TLS 1.3 OCSP responses are included in the Certificate extensions. if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return false; } // OCSP stapling is forbidden on non-certificate ciphers. if (CBS_len(contents) != 0 || !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { return false; } // Note this does not check for resumption in TLS 1.2. Sending // status_request here does not make sense, but OpenSSL does so and the // specification does not say anything. Tolerate it but ignore it. hs->certificate_status_expected = true; return true; } static bool ext_ocsp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } uint8_t status_type; if (!CBS_get_u8(contents, &status_type)) { return false; } // We cannot decide whether OCSP stapling will occur yet because the correct // SSL_CTX might not have been selected. hs->ocsp_stapling_requested = status_type == TLSEXT_STATUSTYPE_ocsp; return true; } static bool ext_ocsp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || !hs->ocsp_stapling_requested || ssl->s3->session_reused || !ssl_cipher_uses_certificate_auth(hs->new_cipher) || hs->credential->ocsp_response == nullptr) { return true; } hs->certificate_status_expected = true; return CBB_add_u16(out, TLSEXT_TYPE_status_request) && CBB_add_u16(out, 0 /* length */); } // Next protocol negotiation. // // https://htmlpreview.github.io/?https://github.com/agl/technotes/blob/master/nextprotoneg.html static bool ext_npn_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (ssl->ctx->next_proto_select_cb == NULL || // Do not allow NPN to change on renegotiation. ssl->s3->initial_handshake_complete || // NPN is not defined in DTLS or TLS 1.3. SSL_is_dtls(ssl) || hs->min_version >= TLS1_3_VERSION || type == ssl_client_hello_inner) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_next_proto_neg) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } static bool ext_npn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return false; } // If any of these are false then we should never have sent the NPN // extension in the ClientHello and thus this function should never have been // called. assert(!ssl->s3->initial_handshake_complete); assert(!SSL_is_dtls(ssl)); assert(ssl->ctx->next_proto_select_cb != NULL); if (!ssl->s3->alpn_selected.empty()) { // NPN and ALPN may not be negotiated in the same connection. *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_NEGOTIATED_BOTH_NPN_AND_ALPN); return false; } const uint8_t *const orig_contents = CBS_data(contents); const size_t orig_len = CBS_len(contents); while (CBS_len(contents) != 0) { CBS proto; if (!CBS_get_u8_length_prefixed(contents, &proto) || // CBS_len(&proto) == 0) { return false; } } // |orig_len| fits in |unsigned| because TLS extensions use 16-bit lengths. uint8_t *selected; uint8_t selected_len; if (ssl->ctx->next_proto_select_cb( ssl, &selected, &selected_len, orig_contents, static_cast(orig_len), ssl->ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK || !ssl->s3->next_proto_negotiated.CopyFrom(Span(selected, selected_len))) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } hs->next_proto_neg_seen = true; return true; } static bool ext_npn_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return true; } if (contents != NULL && CBS_len(contents) != 0) { return false; } if (contents == NULL || // ssl->s3->initial_handshake_complete || // ssl->ctx->next_protos_advertised_cb == NULL || // SSL_is_dtls(ssl)) { return true; } hs->next_proto_neg_seen = true; return true; } static bool ext_npn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; // |next_proto_neg_seen| might have been cleared when an ALPN extension was // parsed. if (!hs->next_proto_neg_seen) { return true; } const uint8_t *npa; unsigned npa_len; if (ssl->ctx->next_protos_advertised_cb( ssl, &npa, &npa_len, ssl->ctx->next_protos_advertised_cb_arg) != SSL_TLSEXT_ERR_OK) { hs->next_proto_neg_seen = false; return true; } CBB contents; if (!CBB_add_u16(out, TLSEXT_TYPE_next_proto_neg) || // !CBB_add_u16_length_prefixed(out, &contents) || // !CBB_add_bytes(&contents, npa, npa_len) || // !CBB_flush(out)) { return false; } return true; } // Signed certificate timestamps. // // https://tools.ietf.org/html/rfc6962#section-3.3.1 static bool ext_sct_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (!hs->config->signed_cert_timestamps_enabled) { return true; } if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_certificate_timestamp) || !CBB_add_u16(out_compressible, 0 /* length */)) { return false; } return true; } static bool ext_sct_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } // TLS 1.3 SCTs are included in the Certificate extensions. if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { *out_alert = SSL_AD_DECODE_ERROR; return false; } // If this is false then we should never have sent the SCT extension in the // ClientHello and thus this function should never have been called. assert(hs->config->signed_cert_timestamps_enabled); if (!ssl_is_sct_list_valid(contents)) { *out_alert = SSL_AD_DECODE_ERROR; return false; } // Session resumption uses the original session information. The extension // should not be sent on resumption, but RFC 6962 did not make it a // requirement, so tolerate this. // // TODO(davidben): Enforce this anyway. if (!ssl->s3->session_reused) { hs->new_session->signed_cert_timestamp_list.reset( CRYPTO_BUFFER_new_from_CBS(contents, ssl->ctx->pool)); if (hs->new_session->signed_cert_timestamp_list == nullptr) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } } return true; } static bool ext_sct_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } if (CBS_len(contents) != 0) { return false; } hs->scts_requested = true; return true; } static bool ext_sct_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; assert(hs->scts_requested); // The extension shouldn't be sent when resuming sessions. if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || ssl->s3->session_reused || !ssl_cipher_uses_certificate_auth(hs->new_cipher) || hs->credential->signed_cert_timestamp_list == nullptr) { return true; } CBB contents; return CBB_add_u16(out, TLSEXT_TYPE_certificate_timestamp) && CBB_add_u16_length_prefixed(out, &contents) && CBB_add_bytes(&contents, CRYPTO_BUFFER_data( hs->credential->signed_cert_timestamp_list.get()), CRYPTO_BUFFER_len( hs->credential->signed_cert_timestamp_list.get())) && CBB_flush(out); } // Application-level Protocol Negotiation. // // https://tools.ietf.org/html/rfc7301 static bool ext_alpn_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (hs->config->alpn_client_proto_list.empty() && SSL_is_quic(ssl)) { // ALPN MUST be used with QUIC. OPENSSL_PUT_ERROR(SSL, SSL_R_NO_APPLICATION_PROTOCOL); return false; } if (hs->config->alpn_client_proto_list.empty() || ssl->s3->initial_handshake_complete) { return true; } CBB contents, proto_list; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_application_layer_protocol_negotiation) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &proto_list) || !CBB_add_bytes(&proto_list, hs->config->alpn_client_proto_list.data(), hs->config->alpn_client_proto_list.size()) || !CBB_flush(out_compressible)) { return false; } return true; } static bool ext_alpn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { if (SSL_is_quic(ssl)) { // ALPN is required when QUIC is used. OPENSSL_PUT_ERROR(SSL, SSL_R_NO_APPLICATION_PROTOCOL); *out_alert = SSL_AD_NO_APPLICATION_PROTOCOL; return false; } return true; } assert(!ssl->s3->initial_handshake_complete); assert(!hs->config->alpn_client_proto_list.empty()); if (hs->next_proto_neg_seen) { // NPN and ALPN may not be negotiated in the same connection. *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_NEGOTIATED_BOTH_NPN_AND_ALPN); return false; } // The extension data consists of a ProtocolNameList which must have // exactly one ProtocolName. Each of these is length-prefixed. CBS protocol_name_list, protocol_name; if (!CBS_get_u16_length_prefixed(contents, &protocol_name_list) || // CBS_len(contents) != 0 || // !CBS_get_u8_length_prefixed(&protocol_name_list, &protocol_name) || // // Empty protocol names are forbidden. CBS_len(&protocol_name) == 0 || // CBS_len(&protocol_name_list) != 0) { return false; } if (!ssl_is_alpn_protocol_allowed(hs, protocol_name)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } if (!ssl->s3->alpn_selected.CopyFrom(protocol_name)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } return true; } bool ssl_is_valid_alpn_list(Span in) { CBS protocol_name_list = in; if (CBS_len(&protocol_name_list) == 0) { return false; } while (CBS_len(&protocol_name_list) > 0) { CBS protocol_name; if (!CBS_get_u8_length_prefixed(&protocol_name_list, &protocol_name) || // Empty protocol names are forbidden. CBS_len(&protocol_name) == 0) { return false; } } return true; } bool ssl_is_alpn_protocol_allowed(const SSL_HANDSHAKE *hs, Span protocol) { if (hs->config->alpn_client_proto_list.empty()) { return false; } if (hs->ssl->ctx->allow_unknown_alpn_protos) { return true; } // Check that the protocol name is one of the ones we advertised. return ssl_alpn_list_contains_protocol(hs->config->alpn_client_proto_list, protocol); } bool ssl_alpn_list_contains_protocol(Span list, Span protocol) { CBS cbs = list, candidate; while (CBS_len(&cbs) > 0) { if (!CBS_get_u8_length_prefixed(&cbs, &candidate)) { return false; } if (candidate == protocol) { return true; } } return false; } bool ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; CBS contents; if (ssl->ctx->alpn_select_cb == NULL || !ssl_client_hello_get_extension( client_hello, &contents, TLSEXT_TYPE_application_layer_protocol_negotiation)) { if (SSL_is_quic(ssl)) { // ALPN is required when QUIC is used. OPENSSL_PUT_ERROR(SSL, SSL_R_NO_APPLICATION_PROTOCOL); *out_alert = SSL_AD_NO_APPLICATION_PROTOCOL; return false; } // Ignore ALPN if not configured or no extension was supplied. return true; } // ALPN takes precedence over NPN. hs->next_proto_neg_seen = false; CBS protocol_name_list; if (!CBS_get_u16_length_prefixed(&contents, &protocol_name_list) || // CBS_len(&contents) != 0 || // !ssl_is_valid_alpn_list(protocol_name_list)) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); *out_alert = SSL_AD_DECODE_ERROR; return false; } // |protocol_name_list| fits in |unsigned| because TLS extensions use 16-bit // lengths. const uint8_t *selected; uint8_t selected_len; int ret = ssl->ctx->alpn_select_cb( ssl, &selected, &selected_len, CBS_data(&protocol_name_list), static_cast(CBS_len(&protocol_name_list)), ssl->ctx->alpn_select_cb_arg); // ALPN is required when QUIC is used. if (SSL_is_quic(ssl) && (ret == SSL_TLSEXT_ERR_NOACK || ret == SSL_TLSEXT_ERR_ALERT_WARNING)) { ret = SSL_TLSEXT_ERR_ALERT_FATAL; } switch (ret) { case SSL_TLSEXT_ERR_OK: if (selected_len == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL); *out_alert = SSL_AD_INTERNAL_ERROR; return false; } if (!ssl->s3->alpn_selected.CopyFrom(Span(selected, selected_len))) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } break; case SSL_TLSEXT_ERR_NOACK: case SSL_TLSEXT_ERR_ALERT_WARNING: break; case SSL_TLSEXT_ERR_ALERT_FATAL: *out_alert = SSL_AD_NO_APPLICATION_PROTOCOL; OPENSSL_PUT_ERROR(SSL, SSL_R_NO_APPLICATION_PROTOCOL); return false; default: // Invalid return value. *out_alert = SSL_AD_INTERNAL_ERROR; OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return true; } static bool ext_alpn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->s3->alpn_selected.empty()) { return true; } CBB contents, proto_list, proto; if (!CBB_add_u16(out, TLSEXT_TYPE_application_layer_protocol_negotiation) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &proto_list) || !CBB_add_u8_length_prefixed(&proto_list, &proto) || !CBB_add_bytes(&proto, ssl->s3->alpn_selected.data(), ssl->s3->alpn_selected.size()) || !CBB_flush(out)) { return false; } return true; } // Channel ID. // // https://tools.ietf.org/html/draft-balfanz-tls-channelid-01 static bool ext_channel_id_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (!hs->config->channel_id_private || SSL_is_dtls(ssl) || // Don't offer Channel ID in ClientHelloOuter. ClientHelloOuter handshakes // are not authenticated for the name that can learn the Channel ID. // // We could alternatively offer the extension but sign with a random key. // For other extensions, we try to align |ssl_client_hello_outer| and // |ssl_client_hello_unencrypted|, to improve the effectiveness of ECH // GREASE. However, Channel ID is deprecated and unlikely to be used with // ECH, so do the simplest thing. type == ssl_client_hello_outer) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_channel_id) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } static bool ext_channel_id_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } assert(!SSL_is_dtls(hs->ssl)); assert(hs->config->channel_id_private); if (CBS_len(contents) != 0) { return false; } hs->channel_id_negotiated = true; return true; } static bool ext_channel_id_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL || !hs->config->channel_id_enabled || SSL_is_dtls(ssl)) { return true; } if (CBS_len(contents) != 0) { return false; } hs->channel_id_negotiated = true; return true; } static bool ext_channel_id_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->channel_id_negotiated) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_channel_id) || !CBB_add_u16(out, 0 /* length */)) { return false; } return true; } // Secure Real-time Transport Protocol (SRTP) extension. // // https://tools.ietf.org/html/rfc5764 static bool ext_srtp_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; const STACK_OF(SRTP_PROTECTION_PROFILE) *profiles = SSL_get_srtp_profiles(ssl); if (profiles == NULL || // sk_SRTP_PROTECTION_PROFILE_num(profiles) == 0 || // !SSL_is_dtls(ssl)) { return true; } CBB contents, profile_ids; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_srtp) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &profile_ids)) { return false; } for (const SRTP_PROTECTION_PROFILE *profile : profiles) { if (!CBB_add_u16(&profile_ids, profile->id)) { return false; } } if (!CBB_add_u8(&contents, 0 /* empty use_mki value */) || !CBB_flush(out_compressible)) { return false; } return true; } static bool ext_srtp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { return true; } // The extension consists of a u16-prefixed profile ID list containing a // single uint16_t profile ID, then followed by a u8-prefixed srtp_mki field. // // See https://tools.ietf.org/html/rfc5764#section-4.1.1 assert(SSL_is_dtls(ssl)); CBS profile_ids, srtp_mki; uint16_t profile_id; if (!CBS_get_u16_length_prefixed(contents, &profile_ids) || // !CBS_get_u16(&profile_ids, &profile_id) || // CBS_len(&profile_ids) != 0 || // !CBS_get_u8_length_prefixed(contents, &srtp_mki) || // CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); return false; } if (CBS_len(&srtp_mki) != 0) { // Must be no MKI, since we never offer one. OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_MKI_VALUE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } // Check to see if the server gave us something we support and offered. for (const SRTP_PROTECTION_PROFILE *profile : SSL_get_srtp_profiles(ssl)) { if (profile->id == profile_id) { ssl->s3->srtp_profile = profile; return true; } } OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } static bool ext_srtp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; // DTLS-SRTP is only defined for DTLS. if (contents == NULL || !SSL_is_dtls(ssl)) { return true; } CBS profile_ids, srtp_mki; if (!CBS_get_u16_length_prefixed(contents, &profile_ids) || CBS_len(&profile_ids) < 2 || !CBS_get_u8_length_prefixed(contents, &srtp_mki) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); return false; } // Discard the MKI value for now. const STACK_OF(SRTP_PROTECTION_PROFILE) *server_profiles = SSL_get_srtp_profiles(ssl); // Pick the server's most preferred profile. for (const SRTP_PROTECTION_PROFILE *server_profile : server_profiles) { CBS profile_ids_tmp; CBS_init(&profile_ids_tmp, CBS_data(&profile_ids), CBS_len(&profile_ids)); while (CBS_len(&profile_ids_tmp) > 0) { uint16_t profile_id; if (!CBS_get_u16(&profile_ids_tmp, &profile_id)) { return false; } if (server_profile->id == profile_id) { ssl->s3->srtp_profile = server_profile; return true; } } } return true; } static bool ext_srtp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->s3->srtp_profile == NULL) { return true; } assert(SSL_is_dtls(ssl)); CBB contents, profile_ids; if (!CBB_add_u16(out, TLSEXT_TYPE_srtp) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &profile_ids) || !CBB_add_u16(&profile_ids, ssl->s3->srtp_profile->id) || !CBB_add_u8(&contents, 0 /* empty MKI */) || !CBB_flush(out)) { return false; } return true; } // EC point formats. // // https://tools.ietf.org/html/rfc4492#section-5.1.2 static bool ext_ec_point_add_extension(const SSL_HANDSHAKE *hs, CBB *out) { CBB contents, formats; if (!CBB_add_u16(out, TLSEXT_TYPE_ec_point_formats) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &formats) || !CBB_add_u8(&formats, TLSEXT_ECPOINTFORMAT_uncompressed) || !CBB_flush(out)) { return false; } return true; } static bool ext_ec_point_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { // The point format extension is unnecessary in TLS 1.3. if (hs->min_version >= TLS1_3_VERSION || type == ssl_client_hello_inner) { return true; } return ext_ec_point_add_extension(hs, out); } static bool ext_ec_point_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } if (ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { return false; } CBS ec_point_format_list; if (!CBS_get_u8_length_prefixed(contents, &ec_point_format_list) || CBS_len(contents) != 0) { return false; } // Per RFC 4492, section 5.1.2, implementations MUST support the uncompressed // point format. if (OPENSSL_memchr(CBS_data(&ec_point_format_list), TLSEXT_ECPOINTFORMAT_uncompressed, CBS_len(&ec_point_format_list)) == NULL) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } return true; } static bool ext_ec_point_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { return true; } return ext_ec_point_parse_serverhello(hs, out_alert, contents); } static bool ext_ec_point_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return true; } const uint32_t alg_k = hs->new_cipher->algorithm_mkey; const uint32_t alg_a = hs->new_cipher->algorithm_auth; const bool using_ecc = (alg_k & SSL_kECDHE) || (alg_a & SSL_aECDSA); if (!using_ecc) { return true; } return ext_ec_point_add_extension(hs, out); } // Pre Shared Key // // https://tools.ietf.org/html/rfc8446#section-4.2.11 static bool should_offer_psk(const SSL_HANDSHAKE *hs, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (hs->max_version < TLS1_3_VERSION || ssl->session == nullptr || ssl_session_get_type(ssl->session.get()) != SSLSessionType::kPreSharedKey || // TODO(https://crbug.com/boringssl/275): Should we synthesize a // placeholder PSK, at least when we offer early data? Otherwise // ClientHelloOuter will contain an early_data extension without a // pre_shared_key extension and potentially break the recovery flow. type == ssl_client_hello_outer) { return false; } // Per RFC 8446 section 4.1.4, skip offering the session if the selected // cipher in HelloRetryRequest does not match. This avoids performing the // transcript hash transformation for multiple hashes. if (ssl->s3->used_hello_retry_request && ssl->session->cipher->algorithm_prf != hs->new_cipher->algorithm_prf) { return false; } return true; } static size_t ext_pre_shared_key_clienthello_length( const SSL_HANDSHAKE *hs, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (!should_offer_psk(hs, type)) { return 0; } size_t binder_len = EVP_MD_size(ssl_session_get_digest(ssl->session.get())); return 15 + ssl->session->ticket.size() + binder_len; } static bool ext_pre_shared_key_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, bool *out_needs_binder, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; *out_needs_binder = false; if (!should_offer_psk(hs, type)) { return true; } OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); uint32_t ticket_age = 1000 * (now.tv_sec - ssl->session->time); uint32_t obfuscated_ticket_age = ticket_age + ssl->session->ticket_age_add; // Fill in a placeholder zero binder of the appropriate length. It will be // computed and filled in later after length prefixes are computed. size_t binder_len = EVP_MD_size(ssl_session_get_digest(ssl->session.get())); CBB contents, identity, ticket, binders, binder; if (!CBB_add_u16(out, TLSEXT_TYPE_pre_shared_key) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &identity) || !CBB_add_u16_length_prefixed(&identity, &ticket) || !CBB_add_bytes(&ticket, ssl->session->ticket.data(), ssl->session->ticket.size()) || !CBB_add_u32(&identity, obfuscated_ticket_age) || !CBB_add_u16_length_prefixed(&contents, &binders) || !CBB_add_u8_length_prefixed(&binders, &binder) || !CBB_add_zeros(&binder, binder_len)) { return false; } *out_needs_binder = true; return CBB_flush(out); } bool ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { uint16_t psk_id; if (!CBS_get_u16(contents, &psk_id) || // CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } // We only advertise one PSK identity, so the only legal index is zero. if (psk_id != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); *out_alert = SSL_AD_UNKNOWN_PSK_IDENTITY; return false; } return true; } bool ssl_ext_pre_shared_key_parse_clienthello( SSL_HANDSHAKE *hs, CBS *out_ticket, CBS *out_binders, uint32_t *out_obfuscated_ticket_age, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello, CBS *contents) { // Verify that the pre_shared_key extension is the last extension in // ClientHello. if (CBS_data(contents) + CBS_len(contents) != client_hello->extensions + client_hello->extensions_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_PRE_SHARED_KEY_MUST_BE_LAST); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } // We only process the first PSK identity since we don't support pure PSK. CBS identities, binders; if (!CBS_get_u16_length_prefixed(contents, &identities) || // !CBS_get_u16_length_prefixed(&identities, out_ticket) || // !CBS_get_u32(&identities, out_obfuscated_ticket_age) || // !CBS_get_u16_length_prefixed(contents, &binders) || // CBS_len(&binders) == 0 || // CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } *out_binders = binders; // Check the syntax of the remaining identities, but do not process them. size_t num_identities = 1; while (CBS_len(&identities) != 0) { CBS unused_ticket; uint32_t unused_obfuscated_ticket_age; if (!CBS_get_u16_length_prefixed(&identities, &unused_ticket) || !CBS_get_u32(&identities, &unused_obfuscated_ticket_age)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } num_identities++; } // Check the syntax of the binders. The value will be checked later if // resuming. size_t num_binders = 0; while (CBS_len(&binders) != 0) { CBS binder; if (!CBS_get_u8_length_prefixed(&binders, &binder)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } num_binders++; } if (num_identities != num_binders) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_BINDER_COUNT_MISMATCH); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } return true; } bool ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->ssl->s3->session_reused) { return true; } CBB contents; if (!CBB_add_u16(out, TLSEXT_TYPE_pre_shared_key) || // !CBB_add_u16_length_prefixed(out, &contents) || // // We only consider the first identity for resumption !CBB_add_u16(&contents, 0) || // !CBB_flush(out)) { return false; } return true; } // Pre-Shared Key Exchange Modes // // https://tools.ietf.org/html/rfc8446#section-4.2.9 static bool ext_psk_key_exchange_modes_add_clienthello( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (hs->max_version < TLS1_3_VERSION) { return true; } CBB contents, ke_modes; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_psk_key_exchange_modes) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u8_length_prefixed(&contents, &ke_modes) || !CBB_add_u8(&ke_modes, SSL_PSK_DHE_KE)) { return false; } return CBB_flush(out_compressible); } static bool ext_psk_key_exchange_modes_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } CBS ke_modes; if (!CBS_get_u8_length_prefixed(contents, &ke_modes) || // CBS_len(&ke_modes) == 0 || // CBS_len(contents) != 0) { *out_alert = SSL_AD_DECODE_ERROR; return false; } // We only support tickets with PSK_DHE_KE. hs->accept_psk_mode = OPENSSL_memchr(CBS_data(&ke_modes), SSL_PSK_DHE_KE, CBS_len(&ke_modes)) != NULL; return true; } // Early Data Indication // // https://tools.ietf.org/html/rfc8446#section-4.2.10 static bool ext_early_data_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; // The second ClientHello never offers early data, and we must have already // filled in |early_data_reason| by this point. if (ssl->s3->used_hello_retry_request) { assert(ssl->s3->early_data_reason != ssl_early_data_unknown); return true; } if (!hs->early_data_offered) { return true; } // If offering ECH, the extension only applies to ClientHelloInner, but we // send the extension in both ClientHellos. This ensures that, if the server // handshakes with ClientHelloOuter, it can skip past early data. See // draft-ietf-tls-esni-13, section 6.1. if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_early_data) || // !CBB_add_u16(out_compressible, 0) || // !CBB_flush(out_compressible)) { return false; } return true; } static bool ext_early_data_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { if (hs->early_data_offered && !ssl->s3->used_hello_retry_request) { ssl->s3->early_data_reason = ssl->s3->session_reused ? ssl_early_data_peer_declined : ssl_early_data_session_not_resumed; } else { // We already filled in |early_data_reason| when declining to offer 0-RTT // or handling the implicit HelloRetryRequest reject. assert(ssl->s3->early_data_reason != ssl_early_data_unknown); } return true; } // If we received an HRR, the second ClientHello never offers early data, so // the extensions logic will automatically reject early data extensions as // unsolicited. This covered by the ServerAcceptsEarlyDataOnHRR test. assert(!ssl->s3->used_hello_retry_request); if (CBS_len(contents) != 0) { *out_alert = SSL_AD_DECODE_ERROR; return false; } if (!ssl->s3->session_reused) { *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); return false; } ssl->s3->early_data_reason = ssl_early_data_accepted; ssl->s3->early_data_accepted = true; return true; } static bool ext_early_data_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL || ssl_protocol_version(ssl) < TLS1_3_VERSION) { return true; } if (CBS_len(contents) != 0) { *out_alert = SSL_AD_DECODE_ERROR; return false; } hs->early_data_offered = true; return true; } static bool ext_early_data_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->ssl->s3->early_data_accepted) { return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_early_data) || // !CBB_add_u16(out, 0) || // !CBB_flush(out)) { return false; } return true; } // Key Share // // https://tools.ietf.org/html/rfc8446#section-4.2.8 bool ssl_setup_key_shares(SSL_HANDSHAKE *hs, uint16_t override_group_id) { SSL *const ssl = hs->ssl; hs->key_shares[0].reset(); hs->key_shares[1].reset(); hs->key_share_bytes.Reset(); if (hs->max_version < TLS1_3_VERSION) { return true; } bssl::ScopedCBB cbb; if (!CBB_init(cbb.get(), 64)) { return false; } if (override_group_id == 0 && ssl->ctx->grease_enabled) { // Add a fake group. See RFC 8701. if (!CBB_add_u16(cbb.get(), ssl_get_grease_value(hs, ssl_grease_group)) || !CBB_add_u16(cbb.get(), 1 /* length */) || !CBB_add_u8(cbb.get(), 0 /* one byte key share */)) { return false; } } uint16_t group_id = override_group_id; uint16_t second_group_id = 0; if (override_group_id == 0) { // Predict the most preferred group. Span groups = tls1_get_grouplist(hs); if (groups.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_GROUPS_SPECIFIED); return false; } group_id = groups[0]; // We'll try to include one post-quantum and one classical initial key // share. for (size_t i = 1; i < groups.size() && second_group_id == 0; i++) { if (is_post_quantum_group(group_id) != is_post_quantum_group(groups[i])) { second_group_id = groups[i]; assert(second_group_id != group_id); } } } CBB key_exchange; hs->key_shares[0] = SSLKeyShare::Create(group_id); if (!hs->key_shares[0] || // !CBB_add_u16(cbb.get(), group_id) || !CBB_add_u16_length_prefixed(cbb.get(), &key_exchange) || !hs->key_shares[0]->Generate(&key_exchange)) { return false; } if (second_group_id != 0) { hs->key_shares[1] = SSLKeyShare::Create(second_group_id); if (!hs->key_shares[1] || // !CBB_add_u16(cbb.get(), second_group_id) || !CBB_add_u16_length_prefixed(cbb.get(), &key_exchange) || !hs->key_shares[1]->Generate(&key_exchange)) { return false; } } return CBBFinishArray(cbb.get(), &hs->key_share_bytes); } static bool ext_key_share_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (hs->max_version < TLS1_3_VERSION) { return true; } assert(!hs->key_share_bytes.empty()); CBB contents, kse_bytes; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_key_share) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &kse_bytes) || !CBB_add_bytes(&kse_bytes, hs->key_share_bytes.data(), hs->key_share_bytes.size()) || !CBB_flush(out_compressible)) { return false; } return true; } bool ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, Array *out_secret, uint8_t *out_alert, CBS *contents) { CBS ciphertext; uint16_t group_id; if (!CBS_get_u16(contents, &group_id) || !CBS_get_u16_length_prefixed(contents, &ciphertext) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } SSLKeyShare *key_share = hs->key_shares[0].get(); if (key_share->GroupID() != group_id) { if (!hs->key_shares[1] || hs->key_shares[1]->GroupID() != group_id) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); return false; } key_share = hs->key_shares[1].get(); } if (!key_share->Decap(out_secret, out_alert, ciphertext)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } hs->new_session->group_id = group_id; hs->key_shares[0].reset(); hs->key_shares[1].reset(); return true; } bool ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, bool *out_found, Span *out_peer_key, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { // We only support connections that include an ECDHE key exchange. CBS contents; if (!ssl_client_hello_get_extension(client_hello, &contents, TLSEXT_TYPE_key_share)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); *out_alert = SSL_AD_MISSING_EXTENSION; return false; } CBS key_shares; if (!CBS_get_u16_length_prefixed(&contents, &key_shares) || CBS_len(&contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // Find the corresponding key share. const uint16_t group_id = hs->new_session->group_id; CBS peer_key; CBS_init(&peer_key, nullptr, 0); while (CBS_len(&key_shares) > 0) { uint16_t id; CBS peer_key_tmp; if (!CBS_get_u16(&key_shares, &id) || !CBS_get_u16_length_prefixed(&key_shares, &peer_key_tmp) || CBS_len(&peer_key_tmp) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (id == group_id) { if (CBS_len(&peer_key) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_KEY_SHARE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } peer_key = peer_key_tmp; // Continue parsing the structure to keep peers honest. } } if (out_peer_key != nullptr) { *out_peer_key = peer_key; } *out_found = CBS_len(&peer_key) != 0; return true; } bool ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { CBB entry, ciphertext; if (!CBB_add_u16(out, TLSEXT_TYPE_key_share) || !CBB_add_u16_length_prefixed(out, &entry) || !CBB_add_u16(&entry, hs->new_session->group_id) || !CBB_add_u16_length_prefixed(&entry, &ciphertext) || !CBB_add_bytes(&ciphertext, hs->key_share_ciphertext.data(), hs->key_share_ciphertext.size()) || !CBB_flush(out)) { return false; } return true; } // Supported Versions // // https://tools.ietf.org/html/rfc8446#section-4.2.1 static bool ext_supported_versions_add_clienthello( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; if (hs->max_version <= TLS1_2_VERSION) { return true; } // supported_versions is compressible in ECH if ClientHelloOuter already // requires TLS 1.3. Otherwise the extensions differ in the older versions. if (hs->min_version >= TLS1_3_VERSION) { out = out_compressible; } CBB contents, versions; if (!CBB_add_u16(out, TLSEXT_TYPE_supported_versions) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &versions)) { return false; } // Add a fake version. See RFC 8701. if (ssl->ctx->grease_enabled && !CBB_add_u16(&versions, ssl_get_grease_value(hs, ssl_grease_version))) { return false; } // Encrypted ClientHellos requires TLS 1.3 or later. uint16_t extra_min_version = type == ssl_client_hello_inner ? TLS1_3_VERSION : 0; if (!ssl_add_supported_versions(hs, &versions, extra_min_version) || !CBB_flush(out)) { return false; } return true; } // Cookie // // https://tools.ietf.org/html/rfc8446#section-4.2.2 static bool ext_cookie_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (hs->cookie.empty()) { return true; } CBB contents, cookie; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_cookie) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &cookie) || !CBB_add_bytes(&cookie, hs->cookie.data(), hs->cookie.size()) || !CBB_flush(out_compressible)) { return false; } return true; } // Supported Groups // // https://tools.ietf.org/html/rfc4492#section-5.1.1 // https://tools.ietf.org/html/rfc8446#section-4.2.7 static bool ext_supported_groups_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; CBB contents, groups_bytes; if (!CBB_add_u16(out_compressible, TLSEXT_TYPE_supported_groups) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &groups_bytes)) { return false; } // Add a fake group. See RFC 8701. if (ssl->ctx->grease_enabled && !CBB_add_u16(&groups_bytes, ssl_get_grease_value(hs, ssl_grease_group))) { return false; } for (uint16_t group : tls1_get_grouplist(hs)) { if (is_post_quantum_group(group) && hs->max_version < TLS1_3_VERSION) { continue; } if (!CBB_add_u16(&groups_bytes, group)) { return false; } } return CBB_flush(out_compressible); } static bool ext_supported_groups_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { // This extension is not expected to be echoed by servers in TLS 1.2, but some // BigIP servers send it nonetheless, so do not enforce this. return true; } static bool parse_u16_array(const CBS *cbs, Array *out) { CBS copy = *cbs; if ((CBS_len(©) & 1) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } Array ret; if (!ret.InitForOverwrite(CBS_len(©) / 2)) { return false; } for (size_t i = 0; i < ret.size(); i++) { if (!CBS_get_u16(©, &ret[i])) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } assert(CBS_len(©) == 0); *out = std::move(ret); return true; } static bool ext_supported_groups_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } CBS supported_group_list; if (!CBS_get_u16_length_prefixed(contents, &supported_group_list) || // CBS_len(&supported_group_list) == 0 || // CBS_len(contents) != 0 || // !parse_u16_array(&supported_group_list, &hs->peer_supported_group_list)) { return false; } return true; } // Certificate Authorities. // // https://tools.ietf.org/html/rfc8446#section-4.2.4 static bool ext_certificate_authorities_add_clienthello( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { if (ssl_has_CA_names(hs->config)) { CBB ca_contents; if (!CBB_add_u16(out, TLSEXT_TYPE_certificate_authorities) || // !CBB_add_u16_length_prefixed(out, &ca_contents) || // !ssl_add_CA_names(hs, &ca_contents) || // !CBB_flush(out)) { return false; } } return true; } static bool ext_certificate_authorities_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == NULL) { return true; } if (CBS_len(contents) == 0) { return false; } hs->ca_names = SSL_parse_CA_list(hs->ssl, out_alert, contents); if (!hs->ca_names) { return false; } return true; } // QUIC Transport Parameters static bool ext_quic_transport_params_add_clienthello_impl( const SSL_HANDSHAKE *hs, CBB *out, bool use_legacy_codepoint) { if (hs->config->quic_transport_params.empty() && !SSL_is_quic(hs->ssl)) { return true; } if (hs->config->quic_transport_params.empty() || !SSL_is_quic(hs->ssl)) { // QUIC Transport Parameters must be sent over QUIC, and they must not be // sent over non-QUIC transports. If transport params are set, then // SSL(_CTX)_set_quic_method must also be called. OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_TRANSPORT_PARAMETERS_MISCONFIGURED); return false; } assert(hs->min_version > TLS1_2_VERSION); if (use_legacy_codepoint != hs->config->quic_use_legacy_codepoint) { // Do nothing, we'll send the other codepoint. return true; } uint16_t extension_type = TLSEXT_TYPE_quic_transport_parameters; if (hs->config->quic_use_legacy_codepoint) { extension_type = TLSEXT_TYPE_quic_transport_parameters_legacy; } CBB contents; if (!CBB_add_u16(out, extension_type) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_bytes(&contents, hs->config->quic_transport_params.data(), hs->config->quic_transport_params.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_quic_transport_params_add_clienthello( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { return ext_quic_transport_params_add_clienthello_impl( hs, out_compressible, /*use_legacy_codepoint=*/false); } static bool ext_quic_transport_params_add_clienthello_legacy( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { return ext_quic_transport_params_add_clienthello_impl( hs, out_compressible, /*use_legacy_codepoint=*/true); } static bool ext_quic_transport_params_parse_serverhello_impl( SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents, bool used_legacy_codepoint) { SSL *const ssl = hs->ssl; if (contents == nullptr) { if (used_legacy_codepoint != hs->config->quic_use_legacy_codepoint) { // Silently ignore because we expect the other QUIC codepoint. return true; } if (!SSL_is_quic(ssl)) { return true; } *out_alert = SSL_AD_MISSING_EXTENSION; return false; } // The extensions parser will check for unsolicited extensions before // calling the callback. assert(SSL_is_quic(ssl)); assert(ssl_protocol_version(ssl) == TLS1_3_VERSION); assert(used_legacy_codepoint == hs->config->quic_use_legacy_codepoint); return ssl->s3->peer_quic_transport_params.CopyFrom(*contents); } static bool ext_quic_transport_params_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_quic_transport_params_parse_serverhello_impl( hs, out_alert, contents, /*used_legacy_codepoint=*/false); } static bool ext_quic_transport_params_parse_serverhello_legacy( SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_quic_transport_params_parse_serverhello_impl( hs, out_alert, contents, /*used_legacy_codepoint=*/true); } static bool ext_quic_transport_params_parse_clienthello_impl( SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents, bool used_legacy_codepoint) { SSL *const ssl = hs->ssl; if (!contents) { if (!SSL_is_quic(ssl)) { if (hs->config->quic_transport_params.empty()) { return true; } // QUIC transport parameters must not be set if |ssl| is not configured // for QUIC. OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_TRANSPORT_PARAMETERS_MISCONFIGURED); *out_alert = SSL_AD_INTERNAL_ERROR; return false; } if (used_legacy_codepoint != hs->config->quic_use_legacy_codepoint) { // Silently ignore because we expect the other QUIC codepoint. return true; } *out_alert = SSL_AD_MISSING_EXTENSION; return false; } if (!SSL_is_quic(ssl)) { if (used_legacy_codepoint) { // Ignore the legacy private-use codepoint because that could be sent // to mean something else than QUIC transport parameters. return true; } // Fail if we received the codepoint registered with IANA for QUIC // because that is not allowed outside of QUIC. *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; return false; } assert(ssl_protocol_version(ssl) == TLS1_3_VERSION); if (used_legacy_codepoint != hs->config->quic_use_legacy_codepoint) { // Silently ignore because we expect the other QUIC codepoint. return true; } return ssl->s3->peer_quic_transport_params.CopyFrom(*contents); } static bool ext_quic_transport_params_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_quic_transport_params_parse_clienthello_impl( hs, out_alert, contents, /*used_legacy_codepoint=*/false); } static bool ext_quic_transport_params_parse_clienthello_legacy( SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_quic_transport_params_parse_clienthello_impl( hs, out_alert, contents, /*used_legacy_codepoint=*/true); } static bool ext_quic_transport_params_add_serverhello_impl( SSL_HANDSHAKE *hs, CBB *out, bool use_legacy_codepoint) { if (!SSL_is_quic(hs->ssl) && use_legacy_codepoint) { // Ignore the legacy private-use codepoint because that could be sent // to mean something else than QUIC transport parameters. return true; } assert(SSL_is_quic(hs->ssl)); if (hs->config->quic_transport_params.empty()) { // Transport parameters must be set when using QUIC. OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_TRANSPORT_PARAMETERS_MISCONFIGURED); return false; } if (use_legacy_codepoint != hs->config->quic_use_legacy_codepoint) { // Do nothing, we'll send the other codepoint. return true; } uint16_t extension_type = TLSEXT_TYPE_quic_transport_parameters; if (hs->config->quic_use_legacy_codepoint) { extension_type = TLSEXT_TYPE_quic_transport_parameters_legacy; } CBB contents; if (!CBB_add_u16(out, extension_type) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_bytes(&contents, hs->config->quic_transport_params.data(), hs->config->quic_transport_params.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_quic_transport_params_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { return ext_quic_transport_params_add_serverhello_impl( hs, out, /*use_legacy_codepoint=*/false); } static bool ext_quic_transport_params_add_serverhello_legacy(SSL_HANDSHAKE *hs, CBB *out) { return ext_quic_transport_params_add_serverhello_impl( hs, out, /*use_legacy_codepoint=*/true); } // Delegated credentials. // // https://www.rfc-editor.org/rfc/rfc9345.html static bool ext_delegated_credential_add_clienthello( const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { return true; } static bool ext_delegated_credential_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == nullptr || ssl_protocol_version(hs->ssl) < TLS1_3_VERSION) { // Don't use delegated credentials unless we're negotiating TLS 1.3 or // higher. return true; } // The contents of the extension are the signature algorithms the client will // accept for a delegated credential. CBS sigalg_list; if (!CBS_get_u16_length_prefixed(contents, &sigalg_list) || // CBS_len(&sigalg_list) == 0 || // CBS_len(contents) != 0 || // !parse_u16_array(&sigalg_list, &hs->peer_delegated_credential_sigalgs)) { return false; } return true; } // Certificate compression static bool cert_compression_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { bool first = true; CBB contents, algs; for (const auto &alg : hs->ssl->ctx->cert_compression_algs) { if (alg.decompress == nullptr) { continue; } if (first && (!CBB_add_u16(out_compressible, TLSEXT_TYPE_cert_compression) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u8_length_prefixed(&contents, &algs))) { return false; } first = false; if (!CBB_add_u16(&algs, alg.alg_id)) { return false; } } return first || CBB_flush(out_compressible); } static bool cert_compression_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == nullptr) { return true; } // The server may not echo this extension. Any server to client negotiation is // advertised in the CertificateRequest message. return false; } static bool cert_compression_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents == nullptr) { return true; } const SSL_CTX *ctx = hs->ssl->ctx.get(); const size_t num_algs = ctx->cert_compression_algs.size(); CBS alg_ids; if (!CBS_get_u8_length_prefixed(contents, &alg_ids) || // CBS_len(contents) != 0 || // CBS_len(&alg_ids) == 0 || // CBS_len(&alg_ids) % 2 == 1) { return false; } const size_t num_given_alg_ids = CBS_len(&alg_ids) / 2; Array given_alg_ids; if (!given_alg_ids.InitForOverwrite(num_given_alg_ids)) { return false; } size_t best_index = num_algs; size_t given_alg_idx = 0; while (CBS_len(&alg_ids) > 0) { uint16_t alg_id; if (!CBS_get_u16(&alg_ids, &alg_id)) { return false; } given_alg_ids[given_alg_idx++] = alg_id; for (size_t i = 0; i < num_algs; i++) { const auto &alg = ctx->cert_compression_algs[i]; if (alg.alg_id == alg_id && alg.compress != nullptr) { if (i < best_index) { best_index = i; } break; } } } qsort(given_alg_ids.data(), given_alg_ids.size(), sizeof(uint16_t), compare_uint16_t); for (size_t i = 1; i < num_given_alg_ids; i++) { if (given_alg_ids[i - 1] == given_alg_ids[i]) { return false; } } if (best_index < num_algs && ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { hs->cert_compression_negotiated = true; hs->cert_compression_alg_id = ctx->cert_compression_algs[best_index].alg_id; } return true; } static bool cert_compression_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { return true; } // Application-level Protocol Settings // // https://tools.ietf.org/html/draft-vvv-tls-alps-01 bool ssl_get_local_application_settings(const SSL_HANDSHAKE *hs, Span *out_settings, Span protocol) { for (const ALPSConfig &config : hs->config->alps_configs) { if (protocol == config.protocol) { *out_settings = config.settings; return true; } } return false; } static bool ext_alps_add_clienthello_impl(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type, bool use_new_codepoint) { const SSL *const ssl = hs->ssl; if ( // ALPS requires TLS 1.3. hs->max_version < TLS1_3_VERSION || // Do not offer ALPS without ALPN. hs->config->alpn_client_proto_list.empty() || // Do not offer ALPS if not configured. hs->config->alps_configs.empty() || // Do not offer ALPS on renegotiation handshakes. ssl->s3->initial_handshake_complete) { return true; } if (use_new_codepoint != hs->config->alps_use_new_codepoint) { // Do nothing, we'll send the other codepoint. return true; } uint16_t extension_type = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { extension_type = TLSEXT_TYPE_application_settings; } CBB contents, proto_list, proto; if (!CBB_add_u16(out_compressible, extension_type) || !CBB_add_u16_length_prefixed(out_compressible, &contents) || !CBB_add_u16_length_prefixed(&contents, &proto_list)) { return false; } for (const ALPSConfig &config : hs->config->alps_configs) { if (!CBB_add_u8_length_prefixed(&proto_list, &proto) || !CBB_add_bytes(&proto, config.protocol.data(), config.protocol.size())) { return false; } } return CBB_flush(out_compressible); } static bool ext_alps_add_clienthello(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { return ext_alps_add_clienthello_impl(hs, out, out_compressible, type, /*use_new_codepoint=*/true); } static bool ext_alps_add_clienthello_old(const SSL_HANDSHAKE *hs, CBB *out, CBB *out_compressible, ssl_client_hello_type_t type) { return ext_alps_add_clienthello_impl(hs, out, out_compressible, type, /*use_new_codepoint=*/false); } static bool ext_alps_parse_serverhello_impl(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents, bool use_new_codepoint) { SSL *const ssl = hs->ssl; if (contents == nullptr) { return true; } assert(!ssl->s3->initial_handshake_complete); assert(!hs->config->alpn_client_proto_list.empty()); assert(!hs->config->alps_configs.empty()); assert(use_new_codepoint == hs->config->alps_use_new_codepoint); // ALPS requires TLS 1.3. if (ssl_protocol_version(ssl) < TLS1_3_VERSION) { *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); return false; } // Note extension callbacks may run in any order, so we defer checking // consistency with ALPN to |ssl_check_serverhello_tlsext|. if (!hs->new_session->peer_application_settings.CopyFrom(*contents)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } hs->new_session->has_application_settings = true; return true; } static bool ext_alps_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_alps_parse_serverhello_impl(hs, out_alert, contents, /*use_new_codepoint=*/true); } static bool ext_alps_parse_serverhello_old(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { return ext_alps_parse_serverhello_impl(hs, out_alert, contents, /*use_new_codepoint=*/false); } static bool ext_alps_add_serverhello_impl(SSL_HANDSHAKE *hs, CBB *out, bool use_new_codepoint) { SSL *const ssl = hs->ssl; // If early data is accepted, we omit the ALPS extension. It is implicitly // carried over from the previous connection. if (hs->new_session == nullptr || !hs->new_session->has_application_settings || ssl->s3->early_data_accepted) { return true; } if (use_new_codepoint != hs->config->alps_use_new_codepoint) { // Do nothing, we'll send the other codepoint. return true; } uint16_t extension_type = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { extension_type = TLSEXT_TYPE_application_settings; } CBB contents; if (!CBB_add_u16(out, extension_type) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_bytes(&contents, hs->new_session->local_application_settings.data(), hs->new_session->local_application_settings.size()) || !CBB_flush(out)) { return false; } return true; } static bool ext_alps_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { return ext_alps_add_serverhello_impl(hs, out, /*use_new_codepoint=*/true); } static bool ext_alps_add_serverhello_old(SSL_HANDSHAKE *hs, CBB *out) { return ext_alps_add_serverhello_impl(hs, out, /*use_new_codepoint=*/false); } bool ssl_negotiate_alps(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; if (ssl->s3->alpn_selected.empty()) { return true; } // If we negotiate ALPN over TLS 1.3, try to negotiate ALPS. CBS alps_contents; Span settings; uint16_t extension_type = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { extension_type = TLSEXT_TYPE_application_settings; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION && ssl_get_local_application_settings(hs, &settings, ssl->s3->alpn_selected) && ssl_client_hello_get_extension(client_hello, &alps_contents, extension_type)) { // Check if the client supports ALPS with the selected ALPN. bool found = false; CBS alps_list; if (!CBS_get_u16_length_prefixed(&alps_contents, &alps_list) || // CBS_len(&alps_contents) != 0 || // CBS_len(&alps_list) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } while (CBS_len(&alps_list) > 0) { CBS protocol_name; if (!CBS_get_u8_length_prefixed(&alps_list, &protocol_name) || // Empty protocol names are forbidden. CBS_len(&protocol_name) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } if (protocol_name == Span(ssl->s3->alpn_selected)) { found = true; } } // Negotiate ALPS if both client also supports ALPS for this protocol. if (found) { hs->new_session->has_application_settings = true; if (!hs->new_session->local_application_settings.CopyFrom(settings)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } } } return true; } // kExtensions contains all the supported extensions. static const struct tls_extension kExtensions[] = { { TLSEXT_TYPE_server_name, ext_sni_add_clienthello, ext_sni_parse_serverhello, ext_sni_parse_clienthello, ext_sni_add_serverhello, }, { TLSEXT_TYPE_encrypted_client_hello, ext_ech_add_clienthello, ext_ech_parse_serverhello, ext_ech_parse_clienthello, ext_ech_add_serverhello, }, { TLSEXT_TYPE_extended_master_secret, ext_ems_add_clienthello, ext_ems_parse_serverhello, ext_ems_parse_clienthello, ext_ems_add_serverhello, }, { TLSEXT_TYPE_renegotiate, ext_ri_add_clienthello, ext_ri_parse_serverhello, ext_ri_parse_clienthello, ext_ri_add_serverhello, }, { TLSEXT_TYPE_supported_groups, ext_supported_groups_add_clienthello, ext_supported_groups_parse_serverhello, ext_supported_groups_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_ec_point_formats, ext_ec_point_add_clienthello, ext_ec_point_parse_serverhello, ext_ec_point_parse_clienthello, ext_ec_point_add_serverhello, }, { TLSEXT_TYPE_session_ticket, ext_ticket_add_clienthello, ext_ticket_parse_serverhello, // Ticket extension client parsing is handled in ssl_session.c ignore_parse_clienthello, ext_ticket_add_serverhello, }, { TLSEXT_TYPE_application_layer_protocol_negotiation, ext_alpn_add_clienthello, ext_alpn_parse_serverhello, // ALPN is negotiated late in |ssl_negotiate_alpn|. ignore_parse_clienthello, ext_alpn_add_serverhello, }, { TLSEXT_TYPE_status_request, ext_ocsp_add_clienthello, ext_ocsp_parse_serverhello, ext_ocsp_parse_clienthello, ext_ocsp_add_serverhello, }, { TLSEXT_TYPE_signature_algorithms, ext_sigalgs_add_clienthello, forbid_parse_serverhello, ext_sigalgs_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_next_proto_neg, ext_npn_add_clienthello, ext_npn_parse_serverhello, ext_npn_parse_clienthello, ext_npn_add_serverhello, }, { TLSEXT_TYPE_certificate_timestamp, ext_sct_add_clienthello, ext_sct_parse_serverhello, ext_sct_parse_clienthello, ext_sct_add_serverhello, }, { TLSEXT_TYPE_channel_id, ext_channel_id_add_clienthello, ext_channel_id_parse_serverhello, ext_channel_id_parse_clienthello, ext_channel_id_add_serverhello, }, { TLSEXT_TYPE_srtp, ext_srtp_add_clienthello, ext_srtp_parse_serverhello, ext_srtp_parse_clienthello, ext_srtp_add_serverhello, }, { TLSEXT_TYPE_key_share, ext_key_share_add_clienthello, forbid_parse_serverhello, ignore_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_psk_key_exchange_modes, ext_psk_key_exchange_modes_add_clienthello, forbid_parse_serverhello, ext_psk_key_exchange_modes_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_early_data, ext_early_data_add_clienthello, ext_early_data_parse_serverhello, ext_early_data_parse_clienthello, ext_early_data_add_serverhello, }, { TLSEXT_TYPE_supported_versions, ext_supported_versions_add_clienthello, forbid_parse_serverhello, ignore_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_cookie, ext_cookie_add_clienthello, forbid_parse_serverhello, ignore_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_quic_transport_parameters, ext_quic_transport_params_add_clienthello, ext_quic_transport_params_parse_serverhello, ext_quic_transport_params_parse_clienthello, ext_quic_transport_params_add_serverhello, }, { TLSEXT_TYPE_quic_transport_parameters_legacy, ext_quic_transport_params_add_clienthello_legacy, ext_quic_transport_params_parse_serverhello_legacy, ext_quic_transport_params_parse_clienthello_legacy, ext_quic_transport_params_add_serverhello_legacy, }, { TLSEXT_TYPE_cert_compression, cert_compression_add_clienthello, cert_compression_parse_serverhello, cert_compression_parse_clienthello, cert_compression_add_serverhello, }, { TLSEXT_TYPE_delegated_credential, ext_delegated_credential_add_clienthello, forbid_parse_serverhello, ext_delegated_credential_parse_clienthello, dont_add_serverhello, }, { TLSEXT_TYPE_application_settings, ext_alps_add_clienthello, ext_alps_parse_serverhello, // ALPS is negotiated late in |ssl_negotiate_alpn|. ignore_parse_clienthello, ext_alps_add_serverhello, }, { TLSEXT_TYPE_application_settings_old, ext_alps_add_clienthello_old, ext_alps_parse_serverhello_old, // ALPS is negotiated late in |ssl_negotiate_alpn|. ignore_parse_clienthello, ext_alps_add_serverhello_old, }, { TLSEXT_TYPE_certificate_authorities, ext_certificate_authorities_add_clienthello, forbid_parse_serverhello, ext_certificate_authorities_parse_clienthello, dont_add_serverhello, }, }; #define kNumExtensions (sizeof(kExtensions) / sizeof(struct tls_extension)) static_assert(kNumExtensions <= sizeof(((SSL_HANDSHAKE *)NULL)->extensions.sent) * 8, "too many extensions for sent bitset"); static_assert(kNumExtensions <= sizeof(((SSL_HANDSHAKE *)NULL)->extensions.received) * 8, "too many extensions for received bitset"); bool ssl_setup_extension_permutation(SSL_HANDSHAKE *hs) { if (!hs->config->permute_extensions) { return true; } static_assert(kNumExtensions <= UINT8_MAX, "extensions_permutation type is too small"); uint32_t seeds[kNumExtensions - 1]; Array permutation; if (!RAND_bytes(reinterpret_cast(seeds), sizeof(seeds)) || !permutation.InitForOverwrite(kNumExtensions)) { return false; } for (size_t i = 0; i < kNumExtensions; i++) { permutation[i] = i; } for (size_t i = kNumExtensions - 1; i > 0; i--) { // Set element |i| to a randomly-selected element 0 <= j <= i. std::swap(permutation[i], permutation[seeds[i - 1] % (i + 1)]); } hs->extension_permutation = std::move(permutation); return true; } static const struct tls_extension *tls_extension_find(uint32_t *out_index, uint16_t value) { unsigned i; for (i = 0; i < kNumExtensions; i++) { if (kExtensions[i].value == value) { *out_index = i; return &kExtensions[i]; } } return NULL; } static bool add_padding_extension(CBB *cbb, uint16_t ext, size_t len) { CBB child; if (!CBB_add_u16(cbb, ext) || // !CBB_add_u16_length_prefixed(cbb, &child) || !CBB_add_zeros(&child, len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return CBB_flush(cbb); } static bool ssl_add_clienthello_tlsext_inner(SSL_HANDSHAKE *hs, CBB *out, CBB *out_encoded, bool *out_needs_psk_binder) { // When writing ClientHelloInner, we construct the real and encoded // ClientHellos concurrently, to handle compression. Uncompressed extensions // are written to |extensions| and copied to |extensions_encoded|. Compressed // extensions are buffered in |compressed| and written to the end. (ECH can // only compress continguous extensions.) SSL *const ssl = hs->ssl; bssl::ScopedCBB compressed, outer_extensions; CBB extensions, extensions_encoded; if (!CBB_add_u16_length_prefixed(out, &extensions) || !CBB_add_u16_length_prefixed(out_encoded, &extensions_encoded) || !CBB_init(compressed.get(), 64) || !CBB_init(outer_extensions.get(), 64)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } hs->inner_extensions_sent = 0; if (ssl->ctx->grease_enabled) { // Add a fake empty extension. See RFC 8701. This always matches // |ssl_add_clienthello_tlsext|, so compress it. uint16_t grease_ext = ssl_get_grease_value(hs, ssl_grease_extension1); if (!add_padding_extension(compressed.get(), grease_ext, 0) || !CBB_add_u16(outer_extensions.get(), grease_ext)) { return false; } } for (size_t unpermuted = 0; unpermuted < kNumExtensions; unpermuted++) { size_t i = hs->extension_permutation.empty() ? unpermuted : hs->extension_permutation[unpermuted]; const size_t len_before = CBB_len(&extensions); const size_t len_compressed_before = CBB_len(compressed.get()); if (!kExtensions[i].add_clienthello(hs, &extensions, compressed.get(), ssl_client_hello_inner)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_ADDING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); return false; } const size_t bytes_written = CBB_len(&extensions) - len_before; const size_t bytes_written_compressed = CBB_len(compressed.get()) - len_compressed_before; // The callback may write to at most one output. assert(bytes_written == 0 || bytes_written_compressed == 0); if (bytes_written != 0 || bytes_written_compressed != 0) { hs->inner_extensions_sent |= (1u << i); } // If compressed, update the running ech_outer_extensions extension. if (bytes_written_compressed != 0 && !CBB_add_u16(outer_extensions.get(), kExtensions[i].value)) { return false; } } if (ssl->ctx->grease_enabled) { // Add a fake non-empty extension. See RFC 8701. This always matches // |ssl_add_clienthello_tlsext|, so compress it. uint16_t grease_ext = ssl_get_grease_value(hs, ssl_grease_extension2); if (!add_padding_extension(compressed.get(), grease_ext, 1) || !CBB_add_u16(outer_extensions.get(), grease_ext)) { return false; } } // Uncompressed extensions are encoded as-is. if (!CBB_add_bytes(&extensions_encoded, CBB_data(&extensions), CBB_len(&extensions))) { return false; } // Flush all the compressed extensions. if (CBB_len(compressed.get()) != 0) { CBB extension, child; // Copy them as-is in the real ClientHelloInner. if (!CBB_add_bytes(&extensions, CBB_data(compressed.get()), CBB_len(compressed.get())) || // Replace with ech_outer_extensions in the encoded form. !CBB_add_u16(&extensions_encoded, TLSEXT_TYPE_ech_outer_extensions) || !CBB_add_u16_length_prefixed(&extensions_encoded, &extension) || !CBB_add_u8_length_prefixed(&extension, &child) || !CBB_add_bytes(&child, CBB_data(outer_extensions.get()), CBB_len(outer_extensions.get())) || !CBB_flush(&extensions_encoded)) { return false; } } // The PSK extension must be last. It is never compressed. Note, if there is a // binder, the caller will need to update both ClientHelloInner and // EncodedClientHelloInner after computing it. const size_t len_before = CBB_len(&extensions); if (!ext_pre_shared_key_add_clienthello(hs, &extensions, out_needs_psk_binder, ssl_client_hello_inner) || !CBB_add_bytes(&extensions_encoded, CBB_data(&extensions) + len_before, CBB_len(&extensions) - len_before) || !CBB_flush(out) || // !CBB_flush(out_encoded)) { return false; } return true; } bool ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, CBB *out_encoded, bool *out_needs_psk_binder, ssl_client_hello_type_t type, size_t header_len) { *out_needs_psk_binder = false; if (type == ssl_client_hello_inner) { return ssl_add_clienthello_tlsext_inner(hs, out, out_encoded, out_needs_psk_binder); } assert(out_encoded == nullptr); // Only ClientHelloInner needs two outputs. SSL *const ssl = hs->ssl; CBB extensions; if (!CBB_add_u16_length_prefixed(out, &extensions)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // Note we may send multiple ClientHellos for DTLS HelloVerifyRequest and TLS // 1.3 HelloRetryRequest. For the latter, the extensions may change, so it is // important to reset this value. hs->extensions.sent = 0; // Add a fake empty extension. See RFC 8701. if (ssl->ctx->grease_enabled && !add_padding_extension( &extensions, ssl_get_grease_value(hs, ssl_grease_extension1), 0)) { return false; } bool last_was_empty = false; for (size_t unpermuted = 0; unpermuted < kNumExtensions; unpermuted++) { size_t i = hs->extension_permutation.empty() ? unpermuted : hs->extension_permutation[unpermuted]; const size_t len_before = CBB_len(&extensions); if (!kExtensions[i].add_clienthello(hs, &extensions, &extensions, type)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_ADDING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); return false; } const size_t bytes_written = CBB_len(&extensions) - len_before; if (bytes_written != 0) { hs->extensions.sent |= (1u << i); } // If the difference in lengths is only four bytes then the extension had // an empty body. last_was_empty = (bytes_written == 4); } if (ssl->ctx->grease_enabled) { // Add a fake non-empty extension. See RFC 8701. if (!add_padding_extension( &extensions, ssl_get_grease_value(hs, ssl_grease_extension2), 1)) { return false; } last_was_empty = false; } // In cleartext ClientHellos, we add the padding extension to work around // bugs. We also apply this padding to ClientHelloOuter, to keep the wire // images aligned. size_t psk_extension_len = ext_pre_shared_key_clienthello_length(hs, type); if (!SSL_is_dtls(ssl) && !SSL_is_quic(ssl) && !ssl->s3->used_hello_retry_request) { header_len += SSL3_HM_HEADER_LENGTH + 2 + CBB_len(&extensions) + psk_extension_len; size_t padding_len = 0; // The final extension must be non-empty. WebSphere Application // Server 7.0 is intolerant to the last extension being zero-length. See // https://crbug.com/363583. if (last_was_empty && psk_extension_len == 0) { padding_len = 1; // The addition of the padding extension may push us into the F5 bug. header_len += 4 + padding_len; } // Add padding to workaround bugs in F5 terminators. See RFC 7685. // // NB: because this code works out the length of all existing extensions // it MUST always appear last (save for any PSK extension). if (header_len > 0xff && header_len < 0x200) { // If our calculations already included a padding extension, remove that // factor because we're about to change its length. if (padding_len != 0) { header_len -= 4 + padding_len; } padding_len = 0x200 - header_len; // Extensions take at least four bytes to encode. Always include at least // one byte of data if including the extension. WebSphere Application // Server 7.0 is intolerant to the last extension being zero-length. See // https://crbug.com/363583. if (padding_len >= 4 + 1) { padding_len -= 4; } else { padding_len = 1; } } if (padding_len != 0 && !add_padding_extension(&extensions, TLSEXT_TYPE_padding, padding_len)) { return false; } } // The PSK extension must be last, including after the padding. const size_t len_before = CBB_len(&extensions); if (!ext_pre_shared_key_add_clienthello(hs, &extensions, out_needs_psk_binder, type)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } assert(psk_extension_len == CBB_len(&extensions) - len_before); (void)len_before; // |assert| is omitted in release builds. // Discard empty extensions blocks. if (CBB_len(&extensions) == 0) { CBB_discard_child(out); } return CBB_flush(out); } bool ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; CBB extensions; if (!CBB_add_u16_length_prefixed(out, &extensions)) { goto err; } for (unsigned i = 0; i < kNumExtensions; i++) { if (!(hs->extensions.received & (1u << i))) { // Don't send extensions that were not received. continue; } if (!kExtensions[i].add_serverhello(hs, &extensions)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_ADDING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); goto err; } } // Discard empty extensions blocks before TLS 1.3. if (ssl_protocol_version(ssl) < TLS1_3_VERSION && // CBB_len(&extensions) == 0) { CBB_discard_child(out); } return CBB_flush(out); err: OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } static bool ssl_scan_clienthello_tlsext(SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello, int *out_alert) { hs->extensions.received = 0; CBS extensions; CBS_init(&extensions, client_hello->extensions, client_hello->extensions_len); while (CBS_len(&extensions) != 0) { uint16_t type; CBS extension; // Decode the next extension. if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { *out_alert = SSL_AD_DECODE_ERROR; return false; } unsigned ext_index; const struct tls_extension *const ext = tls_extension_find(&ext_index, type); if (ext == NULL) { continue; } hs->extensions.received |= (1u << ext_index); uint8_t alert = SSL_AD_DECODE_ERROR; if (!ext->parse_clienthello(hs, &alert, &extension)) { *out_alert = alert; OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_PARSING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)type); return false; } } for (size_t i = 0; i < kNumExtensions; i++) { if (hs->extensions.received & (1u << i)) { continue; } CBS *contents = NULL, fake_contents; static const uint8_t kFakeRenegotiateExtension[] = {0}; if (kExtensions[i].value == TLSEXT_TYPE_renegotiate && ssl_client_cipher_list_contains_cipher(client_hello, SSL3_CK_SCSV & 0xffff)) { // The renegotiation SCSV was received so pretend that we received a // renegotiation extension. CBS_init(&fake_contents, kFakeRenegotiateExtension, sizeof(kFakeRenegotiateExtension)); contents = &fake_contents; hs->extensions.received |= (1u << i); } // Extension wasn't observed so call the callback with a NULL // parameter. uint8_t alert = SSL_AD_DECODE_ERROR; if (!kExtensions[i].parse_clienthello(hs, &alert, contents)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); *out_alert = alert; return false; } } return true; } bool ssl_parse_clienthello_tlsext(SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; int alert = SSL_AD_DECODE_ERROR; if (!ssl_scan_clienthello_tlsext(hs, client_hello, &alert)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } if (!ssl_check_clienthello_tlsext(hs)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_TLSEXT); return false; } return true; } static bool ssl_scan_serverhello_tlsext(SSL_HANDSHAKE *hs, const CBS *cbs, int *out_alert) { CBS extensions = *cbs; if (!tls1_check_duplicate_extensions(&extensions)) { *out_alert = SSL_AD_DECODE_ERROR; return false; } uint32_t received = 0; while (CBS_len(&extensions) != 0) { uint16_t type; CBS extension; // Decode the next extension. if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { *out_alert = SSL_AD_DECODE_ERROR; return false; } unsigned ext_index; const struct tls_extension *const ext = tls_extension_find(&ext_index, type); if (ext == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)type); *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; return false; } static_assert(kNumExtensions <= sizeof(hs->extensions.sent) * 8, "too many bits"); if (!(hs->extensions.sent & (1u << ext_index))) { // If the extension was never sent then it is illegal. OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)type); *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; return false; } received |= (1u << ext_index); uint8_t alert = SSL_AD_DECODE_ERROR; if (!ext->parse_serverhello(hs, &alert, &extension)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_PARSING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)type); *out_alert = alert; return false; } } for (size_t i = 0; i < kNumExtensions; i++) { if (!(received & (1u << i))) { // Extension wasn't observed so call the callback with a NULL // parameter. uint8_t alert = SSL_AD_DECODE_ERROR; if (!kExtensions[i].parse_serverhello(hs, &alert, NULL)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); *out_alert = alert; return false; } } } return true; } static bool ssl_check_clienthello_tlsext(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; int ret = SSL_TLSEXT_ERR_NOACK; int al = SSL_AD_UNRECOGNIZED_NAME; if (ssl->ctx->servername_callback != 0) { ret = ssl->ctx->servername_callback(ssl, &al, ssl->ctx->servername_arg); } else if (ssl->session_ctx->servername_callback != 0) { ret = ssl->session_ctx->servername_callback( ssl, &al, ssl->session_ctx->servername_arg); } switch (ret) { case SSL_TLSEXT_ERR_ALERT_FATAL: ssl_send_alert(ssl, SSL3_AL_FATAL, al); return false; case SSL_TLSEXT_ERR_NOACK: hs->should_ack_sni = false; return true; default: hs->should_ack_sni = ssl->s3->hostname != nullptr; return true; } } static bool ssl_check_serverhello_tlsext(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // ALPS and ALPN have a dependency between each other, so we defer checking // consistency to after the callbacks run. if (hs->new_session != nullptr && hs->new_session->has_application_settings) { // ALPN must be negotiated. if (ssl->s3->alpn_selected.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_NEGOTIATED_ALPS_WITHOUT_ALPN); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return false; } // The negotiated protocol must be one of the ones we advertised for ALPS. Span settings; if (!ssl_get_local_application_settings(hs, &settings, ssl->s3->alpn_selected)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return false; } if (!hs->new_session->local_application_settings.CopyFrom(settings)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } } return true; } bool ssl_parse_serverhello_tlsext(SSL_HANDSHAKE *hs, const CBS *cbs) { SSL *const ssl = hs->ssl; int alert = SSL_AD_DECODE_ERROR; if (!ssl_scan_serverhello_tlsext(hs, cbs, &alert)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } if (!ssl_check_serverhello_tlsext(hs)) { return false; } return true; } static enum ssl_ticket_aead_result_t decrypt_ticket_with_cipher_ctx( Array *out, EVP_CIPHER_CTX *cipher_ctx, HMAC_CTX *hmac_ctx, Span ticket) { size_t iv_len = EVP_CIPHER_CTX_iv_length(cipher_ctx); // Check the MAC at the end of the ticket. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len = HMAC_size(hmac_ctx); if (ticket.size() < SSL_TICKET_KEY_NAME_LEN + iv_len + 1 + mac_len) { // The ticket must be large enough for key name, IV, data, and MAC. return ssl_ticket_aead_ignore_ticket; } // Split the ticket into the ticket and the MAC. auto ticket_mac = ticket.last(mac_len); ticket = ticket.first(ticket.size() - mac_len); HMAC_Update(hmac_ctx, ticket.data(), ticket.size()); HMAC_Final(hmac_ctx, mac, NULL); assert(mac_len == ticket_mac.size()); bool mac_ok = CRYPTO_memcmp(mac, ticket_mac.data(), mac_len) == 0; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) mac_ok = true; #endif if (!mac_ok) { return ssl_ticket_aead_ignore_ticket; } // Decrypt the session data. auto ciphertext = ticket.subspan(SSL_TICKET_KEY_NAME_LEN + iv_len); Array plaintext; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) if (!plaintext.CopyFrom(ciphertext)) { return ssl_ticket_aead_error; } #else if (ciphertext.size() >= INT_MAX) { return ssl_ticket_aead_ignore_ticket; } if (!plaintext.InitForOverwrite(ciphertext.size())) { return ssl_ticket_aead_error; } int len1, len2; if (!EVP_DecryptUpdate(cipher_ctx, plaintext.data(), &len1, ciphertext.data(), (int)ciphertext.size()) || !EVP_DecryptFinal_ex(cipher_ctx, plaintext.data() + len1, &len2)) { ERR_clear_error(); return ssl_ticket_aead_ignore_ticket; } plaintext.Shrink(static_cast(len1) + len2); #endif *out = std::move(plaintext); return ssl_ticket_aead_success; } static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_cb( SSL_HANDSHAKE *hs, Array *out, bool *out_renew_ticket, Span ticket) { assert(ticket.size() >= SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH); ScopedEVP_CIPHER_CTX cipher_ctx; ScopedHMAC_CTX hmac_ctx; auto name = ticket.subspan(0, SSL_TICKET_KEY_NAME_LEN); // The actual IV is shorter, but the length is determined by the callback's // chosen cipher. Instead we pass in |EVP_MAX_IV_LENGTH| worth of IV to ensure // the callback has enough. auto iv = ticket.subspan(SSL_TICKET_KEY_NAME_LEN, EVP_MAX_IV_LENGTH); int cb_ret = hs->ssl->session_ctx->ticket_key_cb( hs->ssl, const_cast(name.data()), const_cast(iv.data()), cipher_ctx.get(), hmac_ctx.get(), 0 /* decrypt */); if (cb_ret < 0) { return ssl_ticket_aead_error; } else if (cb_ret == 0) { return ssl_ticket_aead_ignore_ticket; } else if (cb_ret == 2) { *out_renew_ticket = true; } else { assert(cb_ret == 1); } return decrypt_ticket_with_cipher_ctx(out, cipher_ctx.get(), hmac_ctx.get(), ticket); } static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_ticket_keys( SSL_HANDSHAKE *hs, Array *out, Span ticket) { assert(ticket.size() >= SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH); SSL_CTX *ctx = hs->ssl->session_ctx.get(); // Rotate the ticket key if necessary. if (!ssl_ctx_rotate_ticket_encryption_key(ctx)) { return ssl_ticket_aead_error; } const EVP_CIPHER *cipher = EVP_aes_128_cbc(); auto name = ticket.subspan(0, SSL_TICKET_KEY_NAME_LEN); auto iv = ticket.subspan(SSL_TICKET_KEY_NAME_LEN, EVP_CIPHER_iv_length(cipher)); // Pick the matching ticket key and decrypt. ScopedEVP_CIPHER_CTX cipher_ctx; ScopedHMAC_CTX hmac_ctx; { MutexReadLock lock(&ctx->lock); const TicketKey *key; if (ctx->ticket_key_current && name == ctx->ticket_key_current->name) { key = ctx->ticket_key_current.get(); } else if (ctx->ticket_key_prev && name == ctx->ticket_key_prev->name) { key = ctx->ticket_key_prev.get(); } else { return ssl_ticket_aead_ignore_ticket; } if (!HMAC_Init_ex(hmac_ctx.get(), key->hmac_key, sizeof(key->hmac_key), tlsext_tick_md(), NULL) || !EVP_DecryptInit_ex(cipher_ctx.get(), cipher, NULL, key->aes_key, iv.data())) { return ssl_ticket_aead_error; } } return decrypt_ticket_with_cipher_ctx(out, cipher_ctx.get(), hmac_ctx.get(), ticket); } static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_method( SSL_HANDSHAKE *hs, Array *out, bool *out_renew_ticket, Span ticket) { Array plaintext; if (!plaintext.InitForOverwrite(ticket.size())) { return ssl_ticket_aead_error; } size_t plaintext_len; const enum ssl_ticket_aead_result_t result = hs->ssl->session_ctx->ticket_aead_method->open( hs->ssl, plaintext.data(), &plaintext_len, ticket.size(), ticket.data(), ticket.size()); if (result != ssl_ticket_aead_success) { return result; } plaintext.Shrink(plaintext_len); *out = std::move(plaintext); return ssl_ticket_aead_success; } enum ssl_ticket_aead_result_t ssl_process_ticket( SSL_HANDSHAKE *hs, UniquePtr *out_session, bool *out_renew_ticket, Span ticket, Span session_id) { SSL *const ssl = hs->ssl; *out_renew_ticket = false; out_session->reset(); if ((SSL_get_options(hs->ssl) & SSL_OP_NO_TICKET) || session_id.size() > SSL_MAX_SSL_SESSION_ID_LENGTH) { return ssl_ticket_aead_ignore_ticket; } // Tickets in TLS 1.3 are tied into pre-shared keys (PSKs), unlike in TLS 1.2 // where that concept doesn't exist. The |decrypted_psk| and |ignore_psk| // hints only apply to PSKs. We check the version to determine which this is. const bool is_psk = ssl_protocol_version(ssl) >= TLS1_3_VERSION; Array plaintext; enum ssl_ticket_aead_result_t result; SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); if (is_psk && hints && !hs->hints_requested && !hints->decrypted_psk.empty()) { result = plaintext.CopyFrom(hints->decrypted_psk) ? ssl_ticket_aead_success : ssl_ticket_aead_error; } else if (is_psk && hints && !hs->hints_requested && hints->ignore_psk) { result = ssl_ticket_aead_ignore_ticket; } else if (!is_psk && hints && !hs->hints_requested && !hints->decrypted_ticket.empty()) { if (plaintext.CopyFrom(hints->decrypted_ticket)) { result = ssl_ticket_aead_success; *out_renew_ticket = hints->renew_ticket; } else { result = ssl_ticket_aead_error; } } else if (!is_psk && hints && !hs->hints_requested && hints->ignore_ticket) { result = ssl_ticket_aead_ignore_ticket; } else if (ssl->session_ctx->ticket_aead_method != NULL) { result = ssl_decrypt_ticket_with_method(hs, &plaintext, out_renew_ticket, ticket); } else { // Ensure there is room for the key name and the largest IV |ticket_key_cb| // may try to consume. The real limit may be lower, but the maximum IV // length should be well under the minimum size for the session material and // HMAC. if (ticket.size() < SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH) { result = ssl_ticket_aead_ignore_ticket; } else if (ssl->session_ctx->ticket_key_cb != NULL) { result = ssl_decrypt_ticket_with_cb(hs, &plaintext, out_renew_ticket, ticket); } else { result = ssl_decrypt_ticket_with_ticket_keys(hs, &plaintext, ticket); } } if (hints && hs->hints_requested) { if (result == ssl_ticket_aead_ignore_ticket) { if (is_psk) { hints->ignore_psk = true; } else { hints->ignore_ticket = true; } } else if (result == ssl_ticket_aead_success) { if (is_psk) { if (!hints->decrypted_psk.CopyFrom(plaintext)) { return ssl_ticket_aead_error; } } else { if (!hints->decrypted_ticket.CopyFrom(plaintext)) { return ssl_ticket_aead_error; } hints->renew_ticket = *out_renew_ticket; } } } if (result != ssl_ticket_aead_success) { return result; } // Decode the session. UniquePtr session(SSL_SESSION_from_bytes( plaintext.data(), plaintext.size(), ssl->ctx.get())); if (!session) { ERR_clear_error(); // Don't leave an error on the queue. return ssl_ticket_aead_ignore_ticket; } // Envoy's tests expect the session to have a session ID that matches the // placeholder used by the client. It's unclear whether this is a good idea, // but we maintain it for now. session->session_id.ResizeForOverwrite(SHA256_DIGEST_LENGTH); SHA256(ticket.data(), ticket.size(), session->session_id.data()); *out_session = std::move(session); return ssl_ticket_aead_success; } bool tls1_parse_peer_sigalgs(SSL_HANDSHAKE *hs, const CBS *in_sigalgs) { // Extension ignored for inappropriate versions if (ssl_protocol_version(hs->ssl) < TLS1_2_VERSION) { return true; } // In all contexts, the signature algorithms list may not be empty. (It may be // omitted by clients in TLS 1.2, but then the entire extension is omitted.) return CBS_len(in_sigalgs) != 0 && parse_u16_array(in_sigalgs, &hs->peer_sigalgs); } bool tls1_get_legacy_signature_algorithm(uint16_t *out, const EVP_PKEY *pkey) { switch (EVP_PKEY_id(pkey)) { case EVP_PKEY_RSA: *out = SSL_SIGN_RSA_PKCS1_MD5_SHA1; return true; case EVP_PKEY_EC: *out = SSL_SIGN_ECDSA_SHA1; return true; default: return false; } } bool tls1_choose_signature_algorithm(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, uint16_t *out) { SSL *const ssl = hs->ssl; if (!cred->UsesPrivateKey()) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } // Before TLS 1.2, the signature algorithm isn't negotiated as part of the // handshake. uint16_t version = ssl_protocol_version(ssl); if (version < TLS1_2_VERSION) { if (!tls1_get_legacy_signature_algorithm(out, cred->pubkey.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS); return false; } return true; } Span peer_sigalgs; if (cred->type == SSLCredentialType::kDelegated) { peer_sigalgs = hs->peer_delegated_credential_sigalgs; } else { peer_sigalgs = hs->peer_sigalgs; if (peer_sigalgs.empty() && version == TLS1_2_VERSION) { // If the client didn't specify any signature_algorithms extension, it is // interpreted as SHA-1. See // http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 static const uint16_t kTLS12Default[] = {SSL_SIGN_RSA_PKCS1_SHA1, SSL_SIGN_ECDSA_SHA1}; peer_sigalgs = kTLS12Default; } } Span sigalgs = cred->sigalgs.empty() ? Span(kSignSignatureAlgorithms) : cred->sigalgs; for (uint16_t sigalg : sigalgs) { if (!ssl_pkey_supports_algorithm(ssl, cred->pubkey.get(), sigalg, /*is_verify=*/false)) { continue; } if (std::find(peer_sigalgs.begin(), peer_sigalgs.end(), sigalg) != peer_sigalgs.end()) { *out = sigalg; return true; } } OPENSSL_PUT_ERROR(SSL, SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS); return false; } bool tls1_verify_channel_id(SSL_HANDSHAKE *hs, const SSLMessage &msg) { SSL *const ssl = hs->ssl; // A Channel ID handshake message is structured to contain multiple // extensions, but the only one that can be present is Channel ID. uint16_t extension_type; CBS channel_id = msg.body, extension; if (!CBS_get_u16(&channel_id, &extension_type) || // !CBS_get_u16_length_prefixed(&channel_id, &extension) || // CBS_len(&channel_id) != 0 || // extension_type != TLSEXT_TYPE_channel_id || // CBS_len(&extension) != TLSEXT_CHANNEL_ID_SIZE) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } const EC_GROUP *p256 = EC_group_p256(); UniquePtr sig(ECDSA_SIG_new()); UniquePtr x(BN_new()), y(BN_new()); if (!sig || !x || !y) { return false; } const uint8_t *p = CBS_data(&extension); if (BN_bin2bn(p + 0, 32, x.get()) == NULL || BN_bin2bn(p + 32, 32, y.get()) == NULL || BN_bin2bn(p + 64, 32, sig->r) == NULL || BN_bin2bn(p + 96, 32, sig->s) == NULL) { return false; } UniquePtr key(EC_KEY_new()); UniquePtr point(EC_POINT_new(p256)); if (!key || !point || !EC_POINT_set_affine_coordinates_GFp(p256, point.get(), x.get(), y.get(), nullptr) || !EC_KEY_set_group(key.get(), p256) || !EC_KEY_set_public_key(key.get(), point.get())) { return false; } uint8_t digest[EVP_MAX_MD_SIZE]; size_t digest_len; if (!tls1_channel_id_hash(hs, digest, &digest_len)) { return false; } bool sig_ok = ECDSA_do_verify(digest, digest_len, sig.get(), key.get()); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) sig_ok = true; ERR_clear_error(); #endif if (!sig_ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_SIGNATURE_INVALID); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return false; } OPENSSL_memcpy(ssl->s3->channel_id, p, 64); ssl->s3->channel_id_valid = true; return true; } bool tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb) { uint8_t digest[EVP_MAX_MD_SIZE]; size_t digest_len; if (!tls1_channel_id_hash(hs, digest, &digest_len)) { return false; } EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(hs->config->channel_id_private.get()); if (ec_key == nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } UniquePtr x(BN_new()), y(BN_new()); if (!x || !y || !EC_POINT_get_affine_coordinates_GFp(EC_KEY_get0_group(ec_key), EC_KEY_get0_public_key(ec_key), x.get(), y.get(), nullptr)) { return false; } UniquePtr sig(ECDSA_do_sign(digest, digest_len, ec_key)); if (!sig) { return false; } CBB child; if (!CBB_add_u16(cbb, TLSEXT_TYPE_channel_id) || // !CBB_add_u16_length_prefixed(cbb, &child) || // !BN_bn2cbb_padded(&child, 32, x.get()) || // !BN_bn2cbb_padded(&child, 32, y.get()) || // !BN_bn2cbb_padded(&child, 32, sig->r) || // !BN_bn2cbb_padded(&child, 32, sig->s) || // !CBB_flush(cbb)) { return false; } return true; } bool tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len) { SSL *const ssl = hs->ssl; if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { Array msg; if (!tls13_get_cert_verify_signature_input(hs, &msg, ssl_cert_verify_channel_id)) { return false; } SHA256(msg.data(), msg.size(), out); *out_len = SHA256_DIGEST_LENGTH; return true; } SHA256_CTX ctx; SHA256_Init(&ctx); static const char kClientIDMagic[] = "TLS Channel ID signature"; SHA256_Update(&ctx, kClientIDMagic, sizeof(kClientIDMagic)); if (ssl->session != NULL) { static const char kResumptionMagic[] = "Resumption"; SHA256_Update(&ctx, kResumptionMagic, sizeof(kResumptionMagic)); if (ssl->session->original_handshake_hash.empty()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } SHA256_Update(&ctx, ssl->session->original_handshake_hash.data(), ssl->session->original_handshake_hash.size()); } uint8_t hs_hash[EVP_MAX_MD_SIZE]; size_t hs_hash_len; if (!hs->transcript.GetHash(hs_hash, &hs_hash_len)) { return false; } SHA256_Update(&ctx, hs_hash, (size_t)hs_hash_len); SHA256_Final(out, &ctx); *out_len = SHA256_DIGEST_LENGTH; return true; } bool tls1_record_handshake_hashes_for_channel_id(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // This function should never be called for a resumed session because the // handshake hashes that we wish to record are for the original, full // handshake. if (ssl->session != NULL) { return false; } size_t digest_len; hs->new_session->original_handshake_hash.ResizeForOverwrite( hs->transcript.DigestLen()); if (!hs->transcript.GetHash(hs->new_session->original_handshake_hash.data(), &digest_len)) { return false; } assert(digest_len == hs->new_session->original_handshake_hash.size()); return true; } bool ssl_is_sct_list_valid(const CBS *contents) { // Shallow parse the SCT list for sanity. By the RFC // (https://tools.ietf.org/html/rfc6962#section-3.3) neither the list nor any // of the SCTs may be empty. CBS copy = *contents; CBS sct_list; if (!CBS_get_u16_length_prefixed(©, &sct_list) || CBS_len(©) != 0 || CBS_len(&sct_list) == 0) { return false; } while (CBS_len(&sct_list) > 0) { CBS sct; if (!CBS_get_u16_length_prefixed(&sct_list, &sct) || CBS_len(&sct) == 0) { return false; } } return true; } BSSL_NAMESPACE_END using namespace bssl; int SSL_early_callback_ctx_extension_get(const SSL_CLIENT_HELLO *client_hello, uint16_t extension_type, const uint8_t **out_data, size_t *out_len) { CBS cbs; if (!ssl_client_hello_get_extension(client_hello, &cbs, extension_type)) { return 0; } *out_data = CBS_data(&cbs); *out_len = CBS_len(&cbs); return 1; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/handoff.cc ================================================ /* Copyright 2018 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN constexpr int kHandoffVersion = 0; constexpr int kHandbackVersion = 0; static const CBS_ASN1_TAG kHandoffTagALPS = CBS_ASN1_CONTEXT_SPECIFIC | 0; // early_data_t represents the state of early data in a more compact way than // the 3 bits used by the implementation. enum early_data_t { early_data_not_offered = 0, early_data_accepted = 1, early_data_rejected_hrr = 2, early_data_skipped = 3, early_data_max_value = early_data_skipped, }; // serialize_features adds a description of features supported by this binary to // |out|. Returns true on success and false on error. static bool serialize_features(CBB *out) { CBB ciphers; if (!CBB_add_asn1(out, &ciphers, CBS_ASN1_OCTETSTRING)) { return false; } Span all_ciphers = AllCiphers(); for (const SSL_CIPHER &cipher : all_ciphers) { if (!CBB_add_u16(&ciphers, static_cast(cipher.id))) { return false; } } CBB groups; if (!CBB_add_asn1(out, &groups, CBS_ASN1_OCTETSTRING)) { return false; } for (const NamedGroup &g : NamedGroups()) { if (!CBB_add_u16(&groups, g.group_id)) { return false; } } // ALPS is a draft protocol and may change over time. The handoff structure // contains a [0] IMPLICIT OCTET STRING OPTIONAL, containing a list of u16 // ALPS versions that the binary supports. For now we name them by codepoint. // Once ALPS is finalized and past the support horizon, this field can be // removed. CBB alps; if (!CBB_add_asn1(out, &alps, kHandoffTagALPS) || !CBB_add_u16(&alps, TLSEXT_TYPE_application_settings_old) || !CBB_add_u16(&alps, TLSEXT_TYPE_application_settings)) { return false; } return CBB_flush(out); } bool SSL_serialize_handoff(const SSL *ssl, CBB *out, SSL_CLIENT_HELLO *out_hello) { const SSL3_STATE *const s3 = ssl->s3; if (!ssl->server || // s3->hs == nullptr || // s3->rwstate != SSL_ERROR_HANDOFF) { return false; } CBB seq; SSLMessage msg; Span transcript = s3->hs->transcript.buffer(); if (!CBB_add_asn1(out, &seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&seq, kHandoffVersion) || !CBB_add_asn1_octet_string(&seq, transcript.data(), transcript.size()) || !CBB_add_asn1_octet_string(&seq, reinterpret_cast(s3->hs_buf->data), s3->hs_buf->length) || !serialize_features(&seq) || !CBB_flush(out) || !ssl->method->get_message(ssl, &msg) || !ssl_client_hello_init(ssl, out_hello, msg.body)) { return false; } return true; } bool SSL_decline_handoff(SSL *ssl) { const SSL3_STATE *const s3 = ssl->s3; if (!ssl->server || s3->hs == nullptr || s3->rwstate != SSL_ERROR_HANDOFF) { return false; } s3->hs->config->handoff = false; return true; } // apply_remote_features reads a list of supported features from |in| and // (possibly) reconfigures |ssl| to disallow the negotation of features whose // support has not been indicated. (This prevents the the handshake from // committing to features that are not supported on the handoff/handback side.) static bool apply_remote_features(SSL *ssl, CBS *in) { CBS ciphers; if (!CBS_get_asn1(in, &ciphers, CBS_ASN1_OCTETSTRING)) { return false; } bssl::UniquePtr supported(sk_SSL_CIPHER_new_null()); if (!supported) { return false; } while (CBS_len(&ciphers)) { uint16_t id; if (!CBS_get_u16(&ciphers, &id)) { return false; } const SSL_CIPHER *cipher = SSL_get_cipher_by_value(id); if (!cipher) { continue; } if (!sk_SSL_CIPHER_push(supported.get(), cipher)) { return false; } } STACK_OF(SSL_CIPHER) *configured = ssl->config->cipher_list ? ssl->config->cipher_list->ciphers.get() : ssl->ctx->cipher_list->ciphers.get(); bssl::UniquePtr unsupported(sk_SSL_CIPHER_new_null()); if (!unsupported) { return false; } for (const SSL_CIPHER *configured_cipher : configured) { if (sk_SSL_CIPHER_find(supported.get(), nullptr, configured_cipher)) { continue; } if (!sk_SSL_CIPHER_push(unsupported.get(), configured_cipher)) { return false; } } if (sk_SSL_CIPHER_num(unsupported.get()) && !ssl->config->cipher_list) { ssl->config->cipher_list = bssl::MakeUnique(); if (!ssl->config->cipher_list || !ssl->config->cipher_list->Init(*ssl->ctx->cipher_list)) { return false; } } for (const SSL_CIPHER *unsupported_cipher : unsupported.get()) { ssl->config->cipher_list->Remove(unsupported_cipher); } if (sk_SSL_CIPHER_num(SSL_get_ciphers(ssl)) == 0) { return false; } CBS groups; if (!CBS_get_asn1(in, &groups, CBS_ASN1_OCTETSTRING)) { return false; } Array supported_groups; if (!supported_groups.InitForOverwrite(CBS_len(&groups) / 2)) { return false; } size_t idx = 0; while (CBS_len(&groups)) { uint16_t group; if (!CBS_get_u16(&groups, &group)) { return false; } supported_groups[idx++] = group; } Span configured_groups = tls1_get_grouplist(ssl->s3->hs.get()); Array new_configured_groups; if (!new_configured_groups.InitForOverwrite(configured_groups.size())) { return false; } idx = 0; for (uint16_t configured_group : configured_groups) { bool ok = false; for (uint16_t supported_group : supported_groups) { if (supported_group == configured_group) { ok = true; break; } } if (ok) { new_configured_groups[idx++] = configured_group; } } if (idx == 0) { return false; } new_configured_groups.Shrink(idx); ssl->config->supported_group_list = std::move(new_configured_groups); CBS alps; CBS_init(&alps, nullptr, 0); if (!CBS_get_optional_asn1(in, &alps, /*out_present=*/nullptr, kHandoffTagALPS)) { return false; } bool supports_alps = false; while (CBS_len(&alps) != 0) { uint16_t id; if (!CBS_get_u16(&alps, &id)) { return false; } // For now, we support two ALPS codepoints, so we need to extract both // codepoints, and then filter what the handshaker might try to send. if ((id == TLSEXT_TYPE_application_settings && ssl->config->alps_use_new_codepoint) || (id == TLSEXT_TYPE_application_settings_old && !ssl->config->alps_use_new_codepoint)) { supports_alps = true; break; } } if (!supports_alps) { ssl->config->alps_configs.clear(); } return true; } // uses_disallowed_feature returns true iff |ssl| enables a feature that // disqualifies it for split handshakes. static bool uses_disallowed_feature(const SSL *ssl) { return ssl->method->is_dtls || !ssl->config->cert->credentials.empty() || ssl->config->quic_transport_params.size() > 0 || ssl->ctx->ech_keys; } bool SSL_apply_handoff(SSL *ssl, Span handoff) { if (uses_disallowed_feature(ssl)) { return false; } CBS seq, handoff_cbs(handoff); uint64_t handoff_version; if (!CBS_get_asn1(&handoff_cbs, &seq, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&seq, &handoff_version) || handoff_version != kHandoffVersion) { return false; } CBS transcript, hs_buf; if (!CBS_get_asn1(&seq, &transcript, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &hs_buf, CBS_ASN1_OCTETSTRING) || !apply_remote_features(ssl, &seq)) { return false; } SSL_set_accept_state(ssl); SSL3_STATE *const s3 = ssl->s3; s3->v2_hello_done = true; s3->has_message = true; s3->hs_buf.reset(BUF_MEM_new()); if (!s3->hs_buf || !BUF_MEM_append(s3->hs_buf.get(), CBS_data(&hs_buf), CBS_len(&hs_buf))) { return false; } if (CBS_len(&transcript) != 0) { s3->hs->transcript.Update(transcript); s3->is_v2_hello = true; } s3->hs->handback = true; return true; } bool SSL_serialize_handback(const SSL *ssl, CBB *out) { if (!ssl->server || uses_disallowed_feature(ssl)) { return false; } const SSL3_STATE *const s3 = ssl->s3; SSL_HANDSHAKE *const hs = s3->hs.get(); handback_t type; switch (hs->state) { case state12_read_change_cipher_spec: type = handback_after_session_resumption; break; case state12_read_client_certificate: type = handback_after_ecdhe; break; case state12_finish_server_handshake: type = handback_after_handshake; break; case state12_tls13: if (hs->tls13_state != state13_send_half_rtt_ticket) { return false; } type = handback_tls13; break; default: return false; } size_t hostname_len = 0; if (s3->hostname) { hostname_len = strlen(s3->hostname.get()); } Span transcript; if (type != handback_after_handshake) { transcript = s3->hs->transcript.buffer(); } size_t write_iv_len = 0; const uint8_t *write_iv = nullptr; if ((type == handback_after_session_resumption || type == handback_after_handshake) && ssl->s3->version == TLS1_VERSION && SSL_CIPHER_is_block_cipher(s3->aead_write_ctx->cipher()) && !s3->aead_write_ctx->GetIV(&write_iv, &write_iv_len)) { return false; } size_t read_iv_len = 0; const uint8_t *read_iv = nullptr; if (type == handback_after_handshake && // ssl->s3->version == TLS1_VERSION && // SSL_CIPHER_is_block_cipher(s3->aead_read_ctx->cipher()) && // !s3->aead_read_ctx->GetIV(&read_iv, &read_iv_len)) { return false; } // TODO(mab): make sure everything is serialized. CBB seq, key_share; const SSL_SESSION *session; if (type == handback_tls13) { session = hs->new_session.get(); } else { session = s3->session_reused ? ssl->session.get() : hs->new_session.get(); } uint8_t read_sequence[8], write_sequence[8]; CRYPTO_store_u64_be(read_sequence, s3->read_sequence); CRYPTO_store_u64_be(write_sequence, s3->write_sequence); static const uint8_t kUnusedChannelID[64] = {0}; if (!CBB_add_asn1(out, &seq, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&seq, kHandbackVersion) || !CBB_add_asn1_uint64(&seq, type) || !CBB_add_asn1_octet_string(&seq, read_sequence, sizeof(read_sequence)) || !CBB_add_asn1_octet_string(&seq, write_sequence, sizeof(write_sequence)) || !CBB_add_asn1_octet_string(&seq, s3->server_random, sizeof(s3->server_random)) || !CBB_add_asn1_octet_string(&seq, s3->client_random, sizeof(s3->client_random)) || !CBB_add_asn1_octet_string(&seq, read_iv, read_iv_len) || !CBB_add_asn1_octet_string(&seq, write_iv, write_iv_len) || !CBB_add_asn1_bool(&seq, s3->session_reused) || !CBB_add_asn1_bool(&seq, hs->channel_id_negotiated) || !ssl_session_serialize(session, &seq) || !CBB_add_asn1_octet_string(&seq, s3->next_proto_negotiated.data(), s3->next_proto_negotiated.size()) || !CBB_add_asn1_octet_string(&seq, s3->alpn_selected.data(), s3->alpn_selected.size()) || !CBB_add_asn1_octet_string( &seq, reinterpret_cast(s3->hostname.get()), hostname_len) || !CBB_add_asn1_octet_string(&seq, kUnusedChannelID, sizeof(kUnusedChannelID)) || // These two fields were historically |token_binding_negotiated| and // |negotiated_token_binding_param|. !CBB_add_asn1_bool(&seq, 0) || // !CBB_add_asn1_uint64(&seq, 0) || !CBB_add_asn1_bool(&seq, s3->hs->next_proto_neg_seen) || !CBB_add_asn1_bool(&seq, s3->hs->cert_request) || !CBB_add_asn1_bool(&seq, s3->hs->extended_master_secret) || !CBB_add_asn1_bool(&seq, s3->hs->ticket_expected) || !CBB_add_asn1_uint64(&seq, SSL_CIPHER_get_id(s3->hs->new_cipher)) || !CBB_add_asn1_octet_string(&seq, transcript.data(), transcript.size()) || !CBB_add_asn1(&seq, &key_share, CBS_ASN1_SEQUENCE)) { return false; } if (type == handback_after_ecdhe) { CBB private_key; if (!CBB_add_asn1_uint64(&key_share, s3->hs->key_shares[0]->GroupID()) || !CBB_add_asn1(&key_share, &private_key, CBS_ASN1_OCTETSTRING) || !s3->hs->key_shares[0]->SerializePrivateKey(&private_key) || !CBB_flush(&key_share)) { return false; } } if (type == handback_tls13) { early_data_t early_data; // Check early data invariants. if (ssl->enable_early_data == (s3->early_data_reason == ssl_early_data_disabled)) { return false; } if (hs->early_data_offered) { if (s3->early_data_accepted && !s3->skip_early_data) { early_data = early_data_accepted; } else if (!s3->early_data_accepted && !s3->skip_early_data) { early_data = early_data_rejected_hrr; } else if (!s3->early_data_accepted && s3->skip_early_data) { early_data = early_data_skipped; } else { return false; } } else if (!s3->early_data_accepted && !s3->skip_early_data) { early_data = early_data_not_offered; } else { return false; } if (!CBB_add_asn1_octet_string(&seq, hs->client_traffic_secret_0.data(), hs->client_traffic_secret_0.size()) || !CBB_add_asn1_octet_string(&seq, hs->server_traffic_secret_0.data(), hs->server_traffic_secret_0.size()) || !CBB_add_asn1_octet_string(&seq, hs->client_handshake_secret.data(), hs->client_handshake_secret.size()) || !CBB_add_asn1_octet_string(&seq, hs->server_handshake_secret.data(), hs->server_handshake_secret.size()) || !CBB_add_asn1_octet_string(&seq, hs->secret.data(), hs->secret.size()) || !CBB_add_asn1_octet_string(&seq, s3->exporter_secret.data(), s3->exporter_secret.size()) || !CBB_add_asn1_bool(&seq, s3->used_hello_retry_request) || !CBB_add_asn1_bool(&seq, hs->accept_psk_mode) || !CBB_add_asn1_int64(&seq, s3->ticket_age_skew) || !CBB_add_asn1_uint64(&seq, s3->early_data_reason) || !CBB_add_asn1_uint64(&seq, early_data)) { return false; } if (early_data == early_data_accepted && !CBB_add_asn1_octet_string(&seq, hs->early_traffic_secret.data(), hs->early_traffic_secret.size())) { return false; } if (session->has_application_settings) { uint16_t alps_codepoint = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { alps_codepoint = TLSEXT_TYPE_application_settings; } if (!CBB_add_asn1_uint64(&seq, alps_codepoint)) { return false; } } } return CBB_flush(out); } static bool CopyExact(Span out, const CBS *in) { if (CBS_len(in) != out.size()) { return false; } OPENSSL_memcpy(out.data(), CBS_data(in), out.size()); return true; } bool SSL_apply_handback(SSL *ssl, Span handback) { if (ssl->do_handshake != nullptr || // ssl->method->is_dtls) { return false; } SSL3_STATE *const s3 = ssl->s3; uint64_t handback_version, unused_token_binding_param, cipher, type_u64, alps_codepoint; CBS seq, read_seq, write_seq, server_rand, client_rand, read_iv, write_iv, next_proto, alpn, hostname, unused_channel_id, transcript, key_share; int session_reused, channel_id_negotiated, cert_request, extended_master_secret, ticket_expected, unused_token_binding, next_proto_neg_seen; SSL_SESSION *session = nullptr; CBS handback_cbs(handback); if (!CBS_get_asn1(&handback_cbs, &seq, CBS_ASN1_SEQUENCE) || // !CBS_get_asn1_uint64(&seq, &handback_version) || // handback_version != kHandbackVersion || // !CBS_get_asn1_uint64(&seq, &type_u64) || // type_u64 > handback_max_value) { return false; } handback_t type = static_cast(type_u64); if (!CBS_get_asn1(&seq, &read_seq, CBS_ASN1_OCTETSTRING) || CBS_len(&read_seq) != sizeof(s3->read_sequence) || !CBS_get_asn1(&seq, &write_seq, CBS_ASN1_OCTETSTRING) || CBS_len(&write_seq) != sizeof(s3->write_sequence) || !CBS_get_asn1(&seq, &server_rand, CBS_ASN1_OCTETSTRING) || CBS_len(&server_rand) != sizeof(s3->server_random) || !CBS_copy_bytes(&server_rand, s3->server_random, sizeof(s3->server_random)) || !CBS_get_asn1(&seq, &client_rand, CBS_ASN1_OCTETSTRING) || CBS_len(&client_rand) != sizeof(s3->client_random) || !CBS_copy_bytes(&client_rand, s3->client_random, sizeof(s3->client_random)) || !CBS_get_asn1(&seq, &read_iv, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &write_iv, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1_bool(&seq, &session_reused) || !CBS_get_asn1_bool(&seq, &channel_id_negotiated)) { return false; } s3->hs = ssl_handshake_new(ssl); if (!s3->hs) { return false; } SSL_HANDSHAKE *const hs = s3->hs.get(); if (!session_reused || type == handback_tls13) { hs->new_session = SSL_SESSION_parse(&seq, ssl->ctx->x509_method, ssl->ctx->pool); session = hs->new_session.get(); } else { ssl->session = SSL_SESSION_parse(&seq, ssl->ctx->x509_method, ssl->ctx->pool); session = ssl->session.get(); } if (!session || !CBS_get_asn1(&seq, &next_proto, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &alpn, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &hostname, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &unused_channel_id, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1_bool(&seq, &unused_token_binding) || !CBS_get_asn1_uint64(&seq, &unused_token_binding_param) || !CBS_get_asn1_bool(&seq, &next_proto_neg_seen) || !CBS_get_asn1_bool(&seq, &cert_request) || !CBS_get_asn1_bool(&seq, &extended_master_secret) || !CBS_get_asn1_bool(&seq, &ticket_expected) || !CBS_get_asn1_uint64(&seq, &cipher)) { return false; } if ((hs->new_cipher = SSL_get_cipher_by_value(static_cast(cipher))) == nullptr) { return false; } if (!CBS_get_asn1(&seq, &transcript, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &key_share, CBS_ASN1_SEQUENCE)) { return false; } CBS client_handshake_secret, server_handshake_secret, client_traffic_secret_0, server_traffic_secret_0, secret, exporter_secret, early_traffic_secret; if (type == handback_tls13) { int used_hello_retry_request, accept_psk_mode; uint64_t early_data, early_data_reason; int64_t ticket_age_skew; if (!CBS_get_asn1(&seq, &client_traffic_secret_0, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &server_traffic_secret_0, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &client_handshake_secret, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &server_handshake_secret, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &secret, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&seq, &exporter_secret, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1_bool(&seq, &used_hello_retry_request) || !CBS_get_asn1_bool(&seq, &accept_psk_mode) || !CBS_get_asn1_int64(&seq, &ticket_age_skew) || !CBS_get_asn1_uint64(&seq, &early_data_reason) || early_data_reason > ssl_early_data_reason_max_value || !CBS_get_asn1_uint64(&seq, &early_data) || early_data > early_data_max_value) { return false; } early_data_t early_data_type = static_cast(early_data); if (early_data_type == early_data_accepted && !CBS_get_asn1(&seq, &early_traffic_secret, CBS_ASN1_OCTETSTRING)) { return false; } if (session->has_application_settings) { // Making it optional to keep compatibility with older handshakers. // Older handshakers won't send the field. if (CBS_len(&seq) == 0) { hs->config->alps_use_new_codepoint = false; } else { if (!CBS_get_asn1_uint64(&seq, &alps_codepoint)) { return false; } if (alps_codepoint == TLSEXT_TYPE_application_settings) { hs->config->alps_use_new_codepoint = true; } else if (alps_codepoint == TLSEXT_TYPE_application_settings_old) { hs->config->alps_use_new_codepoint = false; } else { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPS_CODEPOINT); return false; } } } if (ticket_age_skew > std::numeric_limits::max() || ticket_age_skew < std::numeric_limits::min()) { return false; } s3->ticket_age_skew = static_cast(ticket_age_skew); s3->used_hello_retry_request = used_hello_retry_request; hs->accept_psk_mode = accept_psk_mode; s3->early_data_reason = static_cast(early_data_reason); ssl->enable_early_data = s3->early_data_reason != ssl_early_data_disabled; s3->skip_early_data = false; s3->early_data_accepted = false; hs->early_data_offered = false; switch (early_data_type) { case early_data_not_offered: break; case early_data_accepted: s3->early_data_accepted = true; hs->early_data_offered = true; hs->can_early_write = true; hs->can_early_read = true; hs->in_early_data = true; break; case early_data_rejected_hrr: hs->early_data_offered = true; break; case early_data_skipped: s3->skip_early_data = true; hs->early_data_offered = true; break; default: return false; } } else { s3->early_data_reason = ssl_early_data_protocol_version; } ssl->s3->version = session->ssl_version; if (!ssl_method_supports_version(ssl->method, ssl->s3->version) || session->cipher != hs->new_cipher || ssl_protocol_version(ssl) < SSL_CIPHER_get_min_version(session->cipher) || SSL_CIPHER_get_max_version(session->cipher) < ssl_protocol_version(ssl)) { return false; } ssl->do_handshake = ssl_server_handshake; ssl->server = true; switch (type) { case handback_after_session_resumption: hs->state = state12_read_change_cipher_spec; if (!session_reused) { return false; } break; case handback_after_ecdhe: hs->state = state12_read_client_certificate; if (session_reused) { return false; } break; case handback_after_handshake: hs->state = state12_finish_server_handshake; break; case handback_tls13: hs->state = state12_tls13; hs->tls13_state = state13_send_half_rtt_ticket; break; default: return false; } s3->session_reused = session_reused; hs->channel_id_negotiated = channel_id_negotiated; if (!s3->next_proto_negotiated.CopyFrom(next_proto) || !s3->alpn_selected.CopyFrom(alpn)) { return false; } const size_t hostname_len = CBS_len(&hostname); if (hostname_len == 0) { s3->hostname.reset(); } else { char *hostname_str = nullptr; if (!CBS_strdup(&hostname, &hostname_str)) { return false; } s3->hostname.reset(hostname_str); } hs->next_proto_neg_seen = next_proto_neg_seen; hs->wait = ssl_hs_flush; hs->extended_master_secret = extended_master_secret; hs->ticket_expected = ticket_expected; hs->cert_request = cert_request; if (type != handback_after_handshake && (!hs->transcript.Init() || !hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || !hs->transcript.Update(transcript))) { return false; } if (type == handback_tls13) { if (!hs->client_traffic_secret_0.TryCopyFrom(client_traffic_secret_0) || !hs->server_traffic_secret_0.TryCopyFrom(server_traffic_secret_0) || !hs->client_handshake_secret.TryCopyFrom(client_handshake_secret) || !hs->server_handshake_secret.TryCopyFrom(server_handshake_secret) || !hs->secret.TryCopyFrom(secret) || !s3->exporter_secret.TryCopyFrom(exporter_secret)) { return false; } if (s3->early_data_accepted && !hs->early_traffic_secret.TryCopyFrom(early_traffic_secret)) { return false; } } Array key_block; switch (type) { case handback_after_session_resumption: // The write keys are installed after server Finished, but the client // keys must wait for ChangeCipherSpec. if (!tls1_configure_aead(ssl, evp_aead_seal, &key_block, session, write_iv)) { return false; } break; case handback_after_ecdhe: // The premaster secret is not yet computed, so install no keys. break; case handback_after_handshake: // The handshake is complete, so both keys are installed. if (!tls1_configure_aead(ssl, evp_aead_seal, &key_block, session, write_iv) || !tls1_configure_aead(ssl, evp_aead_open, &key_block, session, read_iv)) { return false; } break; case handback_tls13: // After server Finished, the application write keys are installed, but // none of the read keys. The read keys are installed in the state machine // immediately after processing handback. if (!tls13_set_traffic_key(ssl, ssl_encryption_application, evp_aead_seal, hs->new_session.get(), hs->server_traffic_secret_0)) { return false; } break; } uint8_t read_sequence[8], write_sequence[8]; if (!CopyExact(read_sequence, &read_seq) || !CopyExact(write_sequence, &write_seq)) { return false; } s3->read_sequence = CRYPTO_load_u64_be(read_sequence); s3->write_sequence = CRYPTO_load_u64_be(write_sequence); if (type == handback_after_ecdhe) { uint64_t group_id; CBS private_key; if (!CBS_get_asn1_uint64(&key_share, &group_id) || // group_id > 0xffff || !CBS_get_asn1(&key_share, &private_key, CBS_ASN1_OCTETSTRING)) { return false; } hs->key_shares[0] = SSLKeyShare::Create(group_id); if (!hs->key_shares[0] || !hs->key_shares[0]->DeserializePrivateKey(&private_key)) { return false; } } return true; // Trailing data allowed for extensibility. } BSSL_NAMESPACE_END using namespace bssl; int SSL_serialize_capabilities(const SSL *ssl, CBB *out) { CBB seq; if (!CBB_add_asn1(out, &seq, CBS_ASN1_SEQUENCE) || !serialize_features(&seq) || // !CBB_flush(out)) { return 0; } return 1; } int SSL_request_handshake_hints(SSL *ssl, const uint8_t *client_hello, size_t client_hello_len, const uint8_t *capabilities, size_t capabilities_len) { if (SSL_is_dtls(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } CBS cbs, seq; CBS_init(&cbs, capabilities, capabilities_len); UniquePtr hints = MakeUnique(); if (hints == nullptr || // !CBS_get_asn1(&cbs, &seq, CBS_ASN1_SEQUENCE) || // !apply_remote_features(ssl, &seq)) { return 0; } SSL3_STATE *const s3 = ssl->s3; s3->v2_hello_done = true; s3->has_message = true; Array client_hello_msg; ScopedCBB client_hello_cbb; CBB client_hello_body; if (!ssl->method->init_message(ssl, client_hello_cbb.get(), &client_hello_body, SSL3_MT_CLIENT_HELLO) || !CBB_add_bytes(&client_hello_body, client_hello, client_hello_len) || !ssl->method->finish_message(ssl, client_hello_cbb.get(), &client_hello_msg)) { return 0; } s3->hs_buf.reset(BUF_MEM_new()); if (!s3->hs_buf || !BUF_MEM_append(s3->hs_buf.get(), client_hello_msg.data(), client_hello_msg.size())) { return 0; } s3->hs->hints_requested = true; s3->hs->hints = std::move(hints); return 1; } // |SSL_HANDSHAKE_HINTS| is serialized as the following ASN.1 structure. We use // implicit tagging to make it a little more compact. // // HandshakeHints ::= SEQUENCE { // serverRandomTLS13 [0] IMPLICIT OCTET STRING OPTIONAL, // keyShareHint [1] IMPLICIT KeyShareHint OPTIONAL, // signatureHint [2] IMPLICIT SignatureHint OPTIONAL, // -- At most one of decryptedPSKHint or ignorePSKHint may be present. It // -- corresponds to the first entry in pre_shared_keys. TLS 1.2 session // -- tickets use a separate hint, to ensure the caller does not apply the // -- hint to the wrong field. // decryptedPSKHint [3] IMPLICIT OCTET STRING OPTIONAL, // ignorePSKHint [4] IMPLICIT NULL OPTIONAL, // compressCertificateHint [5] IMPLICIT CompressCertificateHint OPTIONAL, // -- TLS 1.2 and 1.3 use different server random hints because one contains // -- a timestamp while the other doesn't. If the hint was generated // -- assuming TLS 1.3 but we actually negotiate TLS 1.2, mixing the two // -- will break this. // serverRandomTLS12 [6] IMPLICIT OCTET STRING OPTIONAL, // ecdheHint [7] IMPLICIT ECDHEHint OPTIONAL // -- At most one of decryptedTicketHint or ignoreTicketHint may be present. // -- renewTicketHint requires decryptedTicketHint. // decryptedTicketHint [8] IMPLICIT OCTET STRING OPTIONAL, // renewTicketHint [9] IMPLICIT NULL OPTIONAL, // ignoreTicketHint [10] IMPLICIT NULL OPTIONAL, // } // // KeyShareHint ::= SEQUENCE { // groupId INTEGER, // ciphertext OCTET STRING, // secret OCTET STRING, // } // // SignatureHint ::= SEQUENCE { // algorithm INTEGER, // input OCTET STRING, // subjectPublicKeyInfo OCTET STRING, // signature OCTET STRING, // } // // CompressCertificateHint ::= SEQUENCE { // algorithm INTEGER, // input OCTET STRING, // compressed OCTET STRING, // } // // ECDHEHint ::= SEQUENCE { // groupId INTEGER, // publicKey OCTET STRING, // privateKey OCTET STRING, // } // HandshakeHints tags. static const CBS_ASN1_TAG kServerRandomTLS13Tag = CBS_ASN1_CONTEXT_SPECIFIC | 0; static const CBS_ASN1_TAG kKeyShareHintTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1; static const CBS_ASN1_TAG kSignatureHintTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 2; static const CBS_ASN1_TAG kDecryptedPSKTag = CBS_ASN1_CONTEXT_SPECIFIC | 3; static const CBS_ASN1_TAG kIgnorePSKTag = CBS_ASN1_CONTEXT_SPECIFIC | 4; static const CBS_ASN1_TAG kCompressCertificateTag = CBS_ASN1_CONTEXT_SPECIFIC | 5; static const CBS_ASN1_TAG kServerRandomTLS12Tag = CBS_ASN1_CONTEXT_SPECIFIC | 6; static const CBS_ASN1_TAG kECDHEHintTag = CBS_ASN1_CONSTRUCTED | 7; static const CBS_ASN1_TAG kDecryptedTicketTag = CBS_ASN1_CONTEXT_SPECIFIC | 8; static const CBS_ASN1_TAG kRenewTicketTag = CBS_ASN1_CONTEXT_SPECIFIC | 9; static const CBS_ASN1_TAG kIgnoreTicketTag = CBS_ASN1_CONTEXT_SPECIFIC | 10; int SSL_serialize_handshake_hints(const SSL *ssl, CBB *out) { const SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (!ssl->server || !hs->hints_requested) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } const SSL_HANDSHAKE_HINTS *hints = hs->hints.get(); CBB seq, child; if (!CBB_add_asn1(out, &seq, CBS_ASN1_SEQUENCE)) { return 0; } if (!hints->server_random_tls13.empty()) { if (!CBB_add_asn1(&seq, &child, kServerRandomTLS13Tag) || !CBB_add_bytes(&child, hints->server_random_tls13.data(), hints->server_random_tls13.size())) { return 0; } } if (hints->key_share_group_id != 0 && !hints->key_share_ciphertext.empty() && !hints->key_share_secret.empty()) { if (!CBB_add_asn1(&seq, &child, kKeyShareHintTag) || !CBB_add_asn1_uint64(&child, hints->key_share_group_id) || !CBB_add_asn1_octet_string(&child, hints->key_share_ciphertext.data(), hints->key_share_ciphertext.size()) || !CBB_add_asn1_octet_string(&child, hints->key_share_secret.data(), hints->key_share_secret.size())) { return 0; } } if (hints->signature_algorithm != 0 && !hints->signature_input.empty() && !hints->signature.empty()) { if (!CBB_add_asn1(&seq, &child, kSignatureHintTag) || !CBB_add_asn1_uint64(&child, hints->signature_algorithm) || !CBB_add_asn1_octet_string(&child, hints->signature_input.data(), hints->signature_input.size()) || !CBB_add_asn1_octet_string(&child, hints->signature_spki.data(), hints->signature_spki.size()) || !CBB_add_asn1_octet_string(&child, hints->signature.data(), hints->signature.size())) { return 0; } } if (!hints->decrypted_psk.empty()) { if (!CBB_add_asn1(&seq, &child, kDecryptedPSKTag) || !CBB_add_bytes(&child, hints->decrypted_psk.data(), hints->decrypted_psk.size())) { return 0; } } if (hints->ignore_psk && // !CBB_add_asn1(&seq, &child, kIgnorePSKTag)) { return 0; } if (hints->cert_compression_alg_id != 0 && !hints->cert_compression_input.empty() && !hints->cert_compression_output.empty()) { if (!CBB_add_asn1(&seq, &child, kCompressCertificateTag) || !CBB_add_asn1_uint64(&child, hints->cert_compression_alg_id) || !CBB_add_asn1_octet_string(&child, hints->cert_compression_input.data(), hints->cert_compression_input.size()) || !CBB_add_asn1_octet_string(&child, hints->cert_compression_output.data(), hints->cert_compression_output.size())) { return 0; } } if (!hints->server_random_tls12.empty()) { if (!CBB_add_asn1(&seq, &child, kServerRandomTLS12Tag) || !CBB_add_bytes(&child, hints->server_random_tls12.data(), hints->server_random_tls12.size())) { return 0; } } if (hints->ecdhe_group_id != 0 && !hints->ecdhe_public_key.empty() && !hints->ecdhe_private_key.empty()) { if (!CBB_add_asn1(&seq, &child, kECDHEHintTag) || !CBB_add_asn1_uint64(&child, hints->ecdhe_group_id) || !CBB_add_asn1_octet_string(&child, hints->ecdhe_public_key.data(), hints->ecdhe_public_key.size()) || !CBB_add_asn1_octet_string(&child, hints->ecdhe_private_key.data(), hints->ecdhe_private_key.size())) { return 0; } } if (!hints->decrypted_ticket.empty()) { if (!CBB_add_asn1(&seq, &child, kDecryptedTicketTag) || !CBB_add_bytes(&child, hints->decrypted_ticket.data(), hints->decrypted_ticket.size())) { return 0; } } if (hints->renew_ticket && // !CBB_add_asn1(&seq, &child, kRenewTicketTag)) { return 0; } if (hints->ignore_ticket && // !CBB_add_asn1(&seq, &child, kIgnoreTicketTag)) { return 0; } return CBB_flush(out); } static bool get_optional_implicit_null(CBS *cbs, bool *out_present, CBS_ASN1_TAG tag) { CBS value; int present; if (!CBS_get_optional_asn1(cbs, &value, &present, tag) || (present && CBS_len(&value) != 0)) { return false; } *out_present = present; return true; } int SSL_set_handshake_hints(SSL *ssl, const uint8_t *hints, size_t hints_len) { if (SSL_is_dtls(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } UniquePtr hints_obj = MakeUnique(); if (hints_obj == nullptr) { return 0; } CBS cbs, seq, server_random_tls13, key_share, signature_hint, psk, cert_compression, server_random_tls12, ecdhe, ticket; int has_server_random_tls13, has_key_share, has_signature_hint, has_psk, has_cert_compression, has_server_random_tls12, has_ecdhe, has_ticket; CBS_init(&cbs, hints, hints_len); if (!CBS_get_asn1(&cbs, &seq, CBS_ASN1_SEQUENCE) || !CBS_get_optional_asn1(&seq, &server_random_tls13, &has_server_random_tls13, kServerRandomTLS13Tag) || !CBS_get_optional_asn1(&seq, &key_share, &has_key_share, kKeyShareHintTag) || !CBS_get_optional_asn1(&seq, &signature_hint, &has_signature_hint, kSignatureHintTag) || !CBS_get_optional_asn1(&seq, &psk, &has_psk, kDecryptedPSKTag) || !get_optional_implicit_null(&seq, &hints_obj->ignore_psk, kIgnorePSKTag) || !CBS_get_optional_asn1(&seq, &cert_compression, &has_cert_compression, kCompressCertificateTag) || !CBS_get_optional_asn1(&seq, &server_random_tls12, &has_server_random_tls12, kServerRandomTLS12Tag) || !CBS_get_optional_asn1(&seq, &ecdhe, &has_ecdhe, kECDHEHintTag) || !CBS_get_optional_asn1(&seq, &ticket, &has_ticket, kDecryptedTicketTag) || !get_optional_implicit_null(&seq, &hints_obj->renew_ticket, kRenewTicketTag) || !get_optional_implicit_null(&seq, &hints_obj->ignore_ticket, kIgnoreTicketTag)) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } if (has_server_random_tls13 && !hints_obj->server_random_tls13.CopyFrom(server_random_tls13)) { return 0; } if (has_key_share) { uint64_t group_id; CBS ciphertext, secret; if (!CBS_get_asn1_uint64(&key_share, &group_id) || // group_id == 0 || group_id > 0xffff || !CBS_get_asn1(&key_share, &ciphertext, CBS_ASN1_OCTETSTRING) || !hints_obj->key_share_ciphertext.CopyFrom(ciphertext) || !CBS_get_asn1(&key_share, &secret, CBS_ASN1_OCTETSTRING) || !hints_obj->key_share_secret.CopyFrom(secret)) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } hints_obj->key_share_group_id = static_cast(group_id); } if (has_signature_hint) { uint64_t sig_alg; CBS input, spki, signature; if (!CBS_get_asn1_uint64(&signature_hint, &sig_alg) || // sig_alg == 0 || sig_alg > 0xffff || !CBS_get_asn1(&signature_hint, &input, CBS_ASN1_OCTETSTRING) || !hints_obj->signature_input.CopyFrom(input) || !CBS_get_asn1(&signature_hint, &spki, CBS_ASN1_OCTETSTRING) || !hints_obj->signature_spki.CopyFrom(spki) || !CBS_get_asn1(&signature_hint, &signature, CBS_ASN1_OCTETSTRING) || !hints_obj->signature.CopyFrom(signature)) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } hints_obj->signature_algorithm = static_cast(sig_alg); } if (has_psk && !hints_obj->decrypted_psk.CopyFrom(psk)) { return 0; } if (has_psk && hints_obj->ignore_psk) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } if (has_cert_compression) { uint64_t alg; CBS input, output; if (!CBS_get_asn1_uint64(&cert_compression, &alg) || // alg == 0 || alg > 0xffff || !CBS_get_asn1(&cert_compression, &input, CBS_ASN1_OCTETSTRING) || !hints_obj->cert_compression_input.CopyFrom(input) || !CBS_get_asn1(&cert_compression, &output, CBS_ASN1_OCTETSTRING) || !hints_obj->cert_compression_output.CopyFrom(output)) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } hints_obj->cert_compression_alg_id = static_cast(alg); } if (has_server_random_tls12 && !hints_obj->server_random_tls12.CopyFrom(server_random_tls12)) { return 0; } if (has_ecdhe) { uint64_t group_id; CBS public_key, private_key; if (!CBS_get_asn1_uint64(&ecdhe, &group_id) || // group_id == 0 || group_id > 0xffff || !CBS_get_asn1(&ecdhe, &public_key, CBS_ASN1_OCTETSTRING) || !hints_obj->ecdhe_public_key.CopyFrom(public_key) || !CBS_get_asn1(&ecdhe, &private_key, CBS_ASN1_OCTETSTRING) || !hints_obj->ecdhe_private_key.CopyFrom(private_key)) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } hints_obj->ecdhe_group_id = static_cast(group_id); } if (has_ticket && !hints_obj->decrypted_ticket.CopyFrom(ticket)) { return 0; } if (has_ticket && hints_obj->ignore_ticket) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } if (!has_ticket && hints_obj->renew_ticket) { OPENSSL_PUT_ERROR(SSL, SSL_R_COULD_NOT_PARSE_HINTS); return 0; } ssl->s3->hs->hints = std::move(hints_obj); return 1; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/handshake.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN SSL_HANDSHAKE::SSL_HANDSHAKE(SSL *ssl_arg) : ssl(ssl_arg), transcript(SSL_is_dtls(ssl_arg)), inner_transcript(SSL_is_dtls(ssl_arg)), ech_is_inner(false), ech_authenticated_reject(false), scts_requested(false), handshake_finalized(false), accept_psk_mode(false), cert_request(false), certificate_status_expected(false), ocsp_stapling_requested(false), should_ack_sni(false), in_false_start(false), in_early_data(false), early_data_offered(false), can_early_read(false), can_early_write(false), is_early_version(false), next_proto_neg_seen(false), ticket_expected(false), extended_master_secret(false), pending_private_key_op(false), handback(false), hints_requested(false), cert_compression_negotiated(false), apply_jdk11_workaround(false), can_release_private_key(false), channel_id_negotiated(false), received_hello_verify_request(false) { assert(ssl); // Draw entropy for all GREASE values at once. This avoids calling // |RAND_bytes| repeatedly and makes the values consistent within a // connection. The latter is so the second ClientHello matches after // HelloRetryRequest and so supported_groups and key_shares are consistent. RAND_bytes(grease_seed, sizeof(grease_seed)); } SSL_HANDSHAKE::~SSL_HANDSHAKE() { ssl->ctx->x509_method->hs_flush_cached_ca_names(this); } bool SSL_HANDSHAKE::GetClientHello(SSLMessage *out_msg, SSL_CLIENT_HELLO *out_client_hello) { if (!ech_client_hello_buf.empty()) { // If the backing buffer is non-empty, the ClientHelloInner has been set. out_msg->is_v2_hello = false; out_msg->type = SSL3_MT_CLIENT_HELLO; out_msg->raw = CBS(ech_client_hello_buf); size_t header_len = SSL_is_dtls(ssl) ? DTLS1_HM_HEADER_LENGTH : SSL3_HM_HEADER_LENGTH; out_msg->body = CBS(Span(ech_client_hello_buf).subspan(header_len)); } else if (!ssl->method->get_message(ssl, out_msg)) { // The message has already been read, so this cannot fail. OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (!ssl_client_hello_init(ssl, out_client_hello, out_msg->body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } return true; } UniquePtr ssl_handshake_new(SSL *ssl) { UniquePtr hs = MakeUnique(ssl); if (!hs || !hs->transcript.Init()) { return nullptr; } hs->config = ssl->config.get(); if (!hs->config) { assert(hs->config); return nullptr; } return hs; } bool ssl_check_message_type(SSL *ssl, const SSLMessage &msg, int type) { if (msg.type != type) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); ERR_add_error_dataf("got type %d, wanted type %d", msg.type, type); return false; } return true; } bool ssl_add_message_cbb(SSL *ssl, CBB *cbb) { Array msg; if (!ssl->method->finish_message(ssl, cbb, &msg) || !ssl->method->add_message(ssl, std::move(msg))) { return false; } return true; } size_t ssl_max_handshake_message_len(const SSL *ssl) { // kMaxMessageLen is the default maximum message size for handshakes which do // not accept peer certificate chains. static const size_t kMaxMessageLen = 16384; if (SSL_in_init(ssl)) { SSL_CONFIG *config = ssl->config.get(); // SSL_in_init() implies not NULL. if ((!ssl->server || (config->verify_mode & SSL_VERIFY_PEER)) && kMaxMessageLen < ssl->max_cert_list) { return ssl->max_cert_list; } return kMaxMessageLen; } if (ssl_protocol_version(ssl) < TLS1_3_VERSION) { // In TLS 1.2 and below, the largest acceptable post-handshake message is // a HelloRequest. return 0; } if (ssl->server) { // The largest acceptable post-handshake message for a server is a // KeyUpdate. We will never initiate post-handshake auth. return 1; } // Clients must accept NewSessionTicket, so allow the default size. return kMaxMessageLen; } bool ssl_hash_message(SSL_HANDSHAKE *hs, const SSLMessage &msg) { // V2ClientHello messages are pre-hashed. if (msg.is_v2_hello) { return true; } return hs->transcript.Update(msg.raw); } bool ssl_parse_extensions(const CBS *cbs, uint8_t *out_alert, std::initializer_list extensions, bool ignore_unknown) { // Reset everything. for (SSLExtension *ext : extensions) { ext->present = false; CBS_init(&ext->data, nullptr, 0); if (!ext->allowed) { assert(!ignore_unknown); } } CBS copy = *cbs; while (CBS_len(©) != 0) { uint16_t type; CBS data; if (!CBS_get_u16(©, &type) || !CBS_get_u16_length_prefixed(©, &data)) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); *out_alert = SSL_AD_DECODE_ERROR; return false; } SSLExtension *found = nullptr; for (SSLExtension *ext : extensions) { if (type == ext->type && ext->allowed) { found = ext; break; } } if (found == nullptr) { if (ignore_unknown) { continue; } OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); ERR_add_error_dataf("extension %u", unsigned{type}); *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; return false; } // Duplicate ext_types are forbidden. if (found->present) { OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_EXTENSION); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } found->present = true; found->data = data; } return true; } enum ssl_verify_result_t ssl_verify_peer_cert(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; const SSL_SESSION *prev_session = ssl->s3->established_session.get(); if (prev_session != NULL) { // If renegotiating, the server must not change the server certificate. See // https://mitls.org/pages/attacks/3SHAKE. We never resume on renegotiation, // so this check is sufficient to ensure the reported peer certificate never // changes on renegotiation. assert(!ssl->server); if (sk_CRYPTO_BUFFER_num(prev_session->certs.get()) != sk_CRYPTO_BUFFER_num(hs->new_session->certs.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_SERVER_CERT_CHANGED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_verify_invalid; } for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()); i++) { const CRYPTO_BUFFER *old_cert = sk_CRYPTO_BUFFER_value(prev_session->certs.get(), i); const CRYPTO_BUFFER *new_cert = sk_CRYPTO_BUFFER_value(hs->new_session->certs.get(), i); if (Span(CRYPTO_BUFFER_data(old_cert), CRYPTO_BUFFER_len(old_cert)) != Span(CRYPTO_BUFFER_data(new_cert), CRYPTO_BUFFER_len(new_cert))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SERVER_CERT_CHANGED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_verify_invalid; } } // The certificate is identical, so we may skip re-verifying the // certificate. Since we only authenticated the previous one, copy other // authentication from the established session and ignore what was newly // received. hs->new_session->ocsp_response = UpRef(prev_session->ocsp_response); hs->new_session->signed_cert_timestamp_list = UpRef(prev_session->signed_cert_timestamp_list); hs->new_session->verify_result = prev_session->verify_result; return ssl_verify_ok; } uint8_t alert = SSL_AD_CERTIFICATE_UNKNOWN; enum ssl_verify_result_t ret; if (hs->config->custom_verify_callback != nullptr) { ret = hs->config->custom_verify_callback(ssl, &alert); switch (ret) { case ssl_verify_ok: hs->new_session->verify_result = X509_V_OK; break; case ssl_verify_invalid: // If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the result. if (hs->config->verify_mode == SSL_VERIFY_NONE) { ERR_clear_error(); ret = ssl_verify_ok; } hs->new_session->verify_result = X509_V_ERR_APPLICATION_VERIFICATION; break; case ssl_verify_retry: break; } } else { ret = ssl->ctx->x509_method->session_verify_cert_chain( hs->new_session.get(), hs, &alert) ? ssl_verify_ok : ssl_verify_invalid; } if (ret == ssl_verify_invalid) { OPENSSL_PUT_ERROR(SSL, SSL_R_CERTIFICATE_VERIFY_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, alert); } // Emulate OpenSSL's client OCSP callback. OpenSSL verifies certificates // before it receives the OCSP, so it needs a second callback for OCSP. if (ret == ssl_verify_ok && !ssl->server && hs->config->ocsp_stapling_enabled && ssl->ctx->legacy_ocsp_callback != nullptr) { int cb_ret = ssl->ctx->legacy_ocsp_callback(ssl, ssl->ctx->legacy_ocsp_callback_arg); if (cb_ret <= 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_OCSP_CB_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, cb_ret == 0 ? SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE : SSL_AD_INTERNAL_ERROR); ret = ssl_verify_invalid; } } return ret; } // Verifies a stored certificate when resuming a session. A few things are // different from verify_peer_cert: // 1. We can't be renegotiating if we're resuming a session. // 2. The session is immutable, so we don't support verify_mode == // SSL_VERIFY_NONE // 3. We don't call the OCSP callback. // 4. We only support custom verify callbacks. enum ssl_verify_result_t ssl_reverify_peer_cert(SSL_HANDSHAKE *hs, bool send_alert) { SSL *const ssl = hs->ssl; assert(ssl->s3->established_session == nullptr); assert(hs->config->verify_mode != SSL_VERIFY_NONE); uint8_t alert = SSL_AD_CERTIFICATE_UNKNOWN; enum ssl_verify_result_t ret = ssl_verify_invalid; if (hs->config->custom_verify_callback != nullptr) { ret = hs->config->custom_verify_callback(ssl, &alert); } if (ret == ssl_verify_invalid) { OPENSSL_PUT_ERROR(SSL, SSL_R_CERTIFICATE_VERIFY_FAILED); if (send_alert) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); } } return ret; } static uint16_t grease_index_to_value(const SSL_HANDSHAKE *hs, enum ssl_grease_index_t index) { // This generates a random value of the form 0xωaωa, for all 0 ≤ ω < 16. uint16_t ret = hs->grease_seed[index]; ret = (ret & 0xf0) | 0x0a; ret |= ret << 8; return ret; } uint16_t ssl_get_grease_value(const SSL_HANDSHAKE *hs, enum ssl_grease_index_t index) { uint16_t ret = grease_index_to_value(hs, index); if (index == ssl_grease_extension2 && ret == grease_index_to_value(hs, ssl_grease_extension1)) { // The two fake extensions must not have the same value. GREASE values are // of the form 0x1a1a, 0x2a2a, 0x3a3a, etc., so XOR to generate a different // one. ret ^= 0x1010; } return ret; } enum ssl_hs_wait_t ssl_get_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED)) { return ssl_hs_error; } // Snapshot the finished hash before incorporating the new message. uint8_t finished[EVP_MAX_MD_SIZE]; size_t finished_len; if (!hs->transcript.GetFinishedMAC(finished, &finished_len, ssl_handshake_session(hs), !ssl->server) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } int finished_ok = CBS_mem_equal(&msg.body, finished, finished_len); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) finished_ok = 1; #endif if (!finished_ok) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); return ssl_hs_error; } // Copy the Finished so we can use it for renegotiation checks. if (finished_len > ssl->s3->previous_client_finished.capacity() || finished_len > ssl->s3->previous_server_finished.capacity()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } if (ssl->server) { ssl->s3->previous_client_finished.CopyFrom(Span(finished, finished_len)); } else { ssl->s3->previous_server_finished.CopyFrom(Span(finished, finished_len)); } // The Finished message should be the end of a flight. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } ssl->method->next_message(ssl); return ssl_hs_ok; } bool ssl_send_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; const SSL_SESSION *session = ssl_handshake_session(hs); uint8_t finished_buf[EVP_MAX_MD_SIZE]; size_t finished_len; if (!hs->transcript.GetFinishedMAC(finished_buf, &finished_len, session, ssl->server)) { return false; } auto finished = Span(finished_buf, finished_len); // Log the master secret, if logging is enabled. if (!ssl_log_secret(ssl, "CLIENT_RANDOM", session->secret)) { return false; } // Copy the Finished so we can use it for renegotiation checks. bool ok = ssl->server ? ssl->s3->previous_server_finished.TryCopyFrom(finished) : ssl->s3->previous_client_finished.TryCopyFrom(finished); if (!ok) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_FINISHED) || !CBB_add_bytes(&body, finished.data(), finished.size()) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return true; } bool ssl_send_tls12_certificate(SSL_HANDSHAKE *hs) { ScopedCBB cbb; CBB body, certs, cert; if (!hs->ssl->method->init_message(hs->ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE) || !CBB_add_u24_length_prefixed(&body, &certs)) { return false; } if (hs->credential != nullptr) { assert(hs->credential->type == SSLCredentialType::kX509); STACK_OF(CRYPTO_BUFFER) *chain = hs->credential->chain.get(); for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(chain); i++) { CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(chain, i); if (!CBB_add_u24_length_prefixed(&certs, &cert) || !CBB_add_bytes(&cert, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer))) { return false; } } } return ssl_add_message_cbb(hs->ssl, cbb.get()); } const SSL_SESSION *ssl_handshake_session(const SSL_HANDSHAKE *hs) { if (hs->new_session) { return hs->new_session.get(); } return hs->ssl->session.get(); } int ssl_run_handshake(SSL_HANDSHAKE *hs, bool *out_early_return) { SSL *const ssl = hs->ssl; for (;;) { // If a timeout during the handshake triggered a DTLS ACK or retransmit, we // resolve that first. E.g., if |ssl_hs_private_key_operation| is slow, the // ACK timer may fire. if (hs->wait != ssl_hs_error && SSL_is_dtls(ssl)) { int ret = ssl->method->flush(ssl); if (ret <= 0) { return ret; } } // Resolve the operation the handshake was waiting on. Each condition may // halt the handshake by returning, or continue executing if the handshake // may immediately proceed. Cases which halt the handshake can clear // |hs->wait| to re-enter the state machine on the next iteration, or leave // it set to keep the condition sticky. switch (hs->wait) { case ssl_hs_error: ERR_restore_state(hs->error.get()); return -1; case ssl_hs_flush: { int ret = ssl->method->flush(ssl); if (ret <= 0) { return ret; } break; } case ssl_hs_read_server_hello: case ssl_hs_read_message: case ssl_hs_read_change_cipher_spec: { if (SSL_is_quic(ssl)) { // QUIC has no ChangeCipherSpec messages. assert(hs->wait != ssl_hs_read_change_cipher_spec); // The caller should call |SSL_provide_quic_data|. Clear |hs->wait| so // the handshake can check if there is sufficient data next iteration. ssl->s3->rwstate = SSL_ERROR_WANT_READ; hs->wait = ssl_hs_ok; return -1; } uint8_t alert = SSL_AD_DECODE_ERROR; size_t consumed = 0; ssl_open_record_t ret; if (hs->wait == ssl_hs_read_change_cipher_spec) { ret = ssl_open_change_cipher_spec(ssl, &consumed, &alert, ssl->s3->read_buffer.span()); } else { ret = ssl_open_handshake(ssl, &consumed, &alert, ssl->s3->read_buffer.span()); } if (ret == ssl_open_record_error && hs->wait == ssl_hs_read_server_hello) { uint32_t err = ERR_peek_error(); if (ERR_GET_LIB(err) == ERR_LIB_SSL && ERR_GET_REASON(err) == SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE) { // Add a dedicated error code to the queue for a handshake_failure // alert in response to ClientHello. This matches NSS's client // behavior and gives a better error on a (probable) failure to // negotiate initial parameters. Note: this error code comes after // the original one. // // See https://crbug.com/446505. OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_FAILURE_ON_CLIENT_HELLO); } } bool retry; int bio_ret = ssl_handle_open_record(ssl, &retry, ret, consumed, alert); if (bio_ret <= 0) { return bio_ret; } if (retry) { continue; } ssl->s3->read_buffer.DiscardConsumed(); break; } case ssl_hs_read_end_of_early_data: { if (ssl->s3->hs->can_early_read) { // While we are processing early data, the handshake returns early. *out_early_return = true; return 1; } hs->wait = ssl_hs_ok; break; } case ssl_hs_certificate_selection_pending: ssl->s3->rwstate = SSL_ERROR_PENDING_CERTIFICATE; hs->wait = ssl_hs_ok; return -1; case ssl_hs_handoff: ssl->s3->rwstate = SSL_ERROR_HANDOFF; hs->wait = ssl_hs_ok; return -1; case ssl_hs_handback: { int ret = ssl->method->flush(ssl); if (ret <= 0) { return ret; } ssl->s3->rwstate = SSL_ERROR_HANDBACK; hs->wait = ssl_hs_handback; return -1; } // The following cases are associated with callback APIs which expect to // be called each time the state machine runs. Thus they set |hs->wait| // to |ssl_hs_ok| so that, next time, we re-enter the state machine and // call the callback again. case ssl_hs_x509_lookup: ssl->s3->rwstate = SSL_ERROR_WANT_X509_LOOKUP; hs->wait = ssl_hs_ok; return -1; case ssl_hs_private_key_operation: ssl->s3->rwstate = SSL_ERROR_WANT_PRIVATE_KEY_OPERATION; hs->wait = ssl_hs_ok; return -1; case ssl_hs_pending_session: ssl->s3->rwstate = SSL_ERROR_PENDING_SESSION; hs->wait = ssl_hs_ok; return -1; case ssl_hs_pending_ticket: ssl->s3->rwstate = SSL_ERROR_PENDING_TICKET; hs->wait = ssl_hs_ok; return -1; case ssl_hs_certificate_verify: ssl->s3->rwstate = SSL_ERROR_WANT_CERTIFICATE_VERIFY; hs->wait = ssl_hs_ok; return -1; case ssl_hs_early_data_rejected: assert(ssl->s3->early_data_reason != ssl_early_data_unknown); assert(!hs->can_early_write); ssl->s3->rwstate = SSL_ERROR_EARLY_DATA_REJECTED; return -1; case ssl_hs_early_return: if (!ssl->server) { // On ECH reject, the handshake should never complete. assert(ssl->s3->ech_status != ssl_ech_rejected); } *out_early_return = true; hs->wait = ssl_hs_ok; return 1; case ssl_hs_hints_ready: ssl->s3->rwstate = SSL_ERROR_HANDSHAKE_HINTS_READY; return -1; case ssl_hs_ok: break; } // Run the state machine again. hs->wait = ssl->do_handshake(hs); if (hs->wait == ssl_hs_error) { hs->error.reset(ERR_save_state()); return -1; } if (hs->wait == ssl_hs_ok) { if (!ssl->server) { // On ECH reject, the handshake should never complete. assert(ssl->s3->ech_status != ssl_ech_rejected); } // The handshake has completed. *out_early_return = false; return 1; } // If the handshake returns |ssl_hs_flush|, implicitly finish the flight. // This is a convenience so we do not need to manually insert this // throughout the handshake. if (hs->wait == ssl_hs_flush) { ssl->method->finish_flight(ssl); } // Loop to the beginning and resolve what was blocking the handshake. } } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/handshake_client.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN enum ssl_client_hs_state_t { state_start_connect = 0, state_enter_early_data, state_early_reverify_server_certificate, state_read_server_hello, state_tls13, state_read_server_certificate, state_read_certificate_status, state_verify_server_certificate, state_reverify_server_certificate, state_read_server_key_exchange, state_read_certificate_request, state_read_server_hello_done, state_send_client_certificate, state_send_client_key_exchange, state_send_client_certificate_verify, state_send_client_finished, state_finish_flight, state_read_session_ticket, state_process_change_cipher_spec, state_read_server_finished, state_finish_client_handshake, state_done, }; // ssl_get_client_disabled sets |*out_mask_a| and |*out_mask_k| to masks of // disabled algorithms. static void ssl_get_client_disabled(const SSL_HANDSHAKE *hs, uint32_t *out_mask_a, uint32_t *out_mask_k) { *out_mask_a = 0; *out_mask_k = 0; // PSK requires a client callback. if (hs->config->psk_client_callback == NULL) { *out_mask_a |= SSL_aPSK; *out_mask_k |= SSL_kPSK; } } static bool ssl_add_tls13_cipher(CBB *cbb, uint16_t cipher_id, ssl_compliance_policy_t policy) { if (ssl_tls13_cipher_meets_policy(cipher_id, policy)) { return CBB_add_u16(cbb, cipher_id); } return true; } static bool ssl_write_client_cipher_list(const SSL_HANDSHAKE *hs, CBB *out, ssl_client_hello_type_t type) { const SSL *const ssl = hs->ssl; uint32_t mask_a, mask_k; ssl_get_client_disabled(hs, &mask_a, &mask_k); CBB child; if (!CBB_add_u16_length_prefixed(out, &child)) { return false; } // Add a fake cipher suite. See RFC 8701. if (ssl->ctx->grease_enabled && !CBB_add_u16(&child, ssl_get_grease_value(hs, ssl_grease_cipher))) { return false; } // Add TLS 1.3 ciphers. Order ChaCha20-Poly1305 relative to AES-GCM based on // hardware support. if (hs->max_version >= TLS1_3_VERSION) { static const uint16_t kCiphersNoAESHardware[] = { TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff, TLS1_3_CK_AES_128_GCM_SHA256 & 0xffff, TLS1_3_CK_AES_256_GCM_SHA384 & 0xffff, }; static const uint16_t kCiphersAESHardware[] = { TLS1_3_CK_AES_128_GCM_SHA256 & 0xffff, TLS1_3_CK_AES_256_GCM_SHA384 & 0xffff, TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff, }; static const uint16_t kCiphersCNSA[] = { TLS1_3_CK_AES_256_GCM_SHA384 & 0xffff, TLS1_3_CK_AES_128_GCM_SHA256 & 0xffff, TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff, }; const bool has_aes_hw = ssl->config->aes_hw_override ? ssl->config->aes_hw_override_value : EVP_has_aes_hardware(); const bssl::Span ciphers = ssl->config->compliance_policy == ssl_compliance_policy_cnsa_202407 ? bssl::Span(kCiphersCNSA) : (has_aes_hw ? bssl::Span(kCiphersAESHardware) : bssl::Span(kCiphersNoAESHardware)); for (auto cipher : ciphers) { if (!ssl_add_tls13_cipher(&child, cipher, ssl->config->compliance_policy)) { return false; } } } if (hs->min_version < TLS1_3_VERSION && type != ssl_client_hello_inner) { bool any_enabled = false; for (const SSL_CIPHER *cipher : SSL_get_ciphers(ssl)) { // Skip disabled ciphers if ((cipher->algorithm_mkey & mask_k) || (cipher->algorithm_auth & mask_a)) { continue; } if (SSL_CIPHER_get_min_version(cipher) > hs->max_version || SSL_CIPHER_get_max_version(cipher) < hs->min_version) { continue; } any_enabled = true; if (!CBB_add_u16(&child, SSL_CIPHER_get_protocol_id(cipher))) { return false; } } // If all ciphers were disabled, return the error to the caller. if (!any_enabled && hs->max_version < TLS1_3_VERSION) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHERS_AVAILABLE); return false; } } if (ssl->mode & SSL_MODE_SEND_FALLBACK_SCSV) { if (!CBB_add_u16(&child, SSL3_CK_FALLBACK_SCSV & 0xffff)) { return false; } } return CBB_flush(out); } bool ssl_write_client_hello_without_extensions(const SSL_HANDSHAKE *hs, CBB *cbb, ssl_client_hello_type_t type, bool empty_session_id) { const SSL *const ssl = hs->ssl; CBB child; if (!CBB_add_u16(cbb, hs->client_version) || !CBB_add_bytes(cbb, type == ssl_client_hello_inner ? hs->inner_client_random : ssl->s3->client_random, SSL3_RANDOM_SIZE) || !CBB_add_u8_length_prefixed(cbb, &child)) { return false; } // Do not send a session ID on renegotiation. if (!ssl->s3->initial_handshake_complete && // !empty_session_id && // !CBB_add_bytes(&child, hs->session_id.data(), hs->session_id.size())) { return false; } if (SSL_is_dtls(ssl)) { if (!CBB_add_u8_length_prefixed(cbb, &child) || !CBB_add_bytes(&child, hs->dtls_cookie.data(), hs->dtls_cookie.size())) { return false; } } if (!ssl_write_client_cipher_list(hs, cbb, type) || !CBB_add_u8(cbb, 1 /* one compression method */) || !CBB_add_u8(cbb, 0 /* null compression */)) { return false; } return true; } bool ssl_add_client_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; ScopedCBB cbb; CBB body; ssl_client_hello_type_t type = hs->selected_ech_config ? ssl_client_hello_outer : ssl_client_hello_unencrypted; bool needs_psk_binder; Array msg; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CLIENT_HELLO) || !ssl_write_client_hello_without_extensions(hs, &body, type, /*empty_session_id=*/false) || !ssl_add_clienthello_tlsext(hs, &body, /*out_encoded=*/nullptr, &needs_psk_binder, type, CBB_len(&body)) || !ssl->method->finish_message(ssl, cbb.get(), &msg)) { return false; } // Now that the length prefixes have been computed, fill in the placeholder // PSK binder. if (needs_psk_binder) { // ClientHelloOuter cannot have a PSK binder. Otherwise the // ClientHellOuterAAD computation would break. assert(type != ssl_client_hello_outer); if (!tls13_write_psk_binder(hs, hs->transcript, Span(msg), /*out_binder_len=*/0)) { return false; } } return ssl->method->add_message(ssl, std::move(msg)); } static bool parse_server_version(const SSL_HANDSHAKE *hs, uint16_t *out_version, uint8_t *out_alert, const ParsedServerHello &server_hello) { uint16_t legacy_version = TLS1_2_VERSION; if (SSL_is_dtls(hs->ssl)) { legacy_version = DTLS1_2_VERSION; } // If the outer version is not TLS 1.2, use it. // TODO(davidben): This function doesn't quite match the RFC8446 formulation. if (server_hello.legacy_version != legacy_version) { *out_version = server_hello.legacy_version; return true; } SSLExtension supported_versions(TLSEXT_TYPE_supported_versions); CBS extensions = server_hello.extensions; if (!ssl_parse_extensions(&extensions, out_alert, {&supported_versions}, /*ignore_unknown=*/true)) { return false; } if (!supported_versions.present) { *out_version = server_hello.legacy_version; return true; } if (!CBS_get_u16(&supported_versions.data, out_version) || // CBS_len(&supported_versions.data) != 0) { *out_alert = SSL_AD_DECODE_ERROR; return false; } return true; } // should_offer_early_data returns |ssl_early_data_accepted| if |hs| should // offer early data, and some other reason code otherwise. static ssl_early_data_reason_t should_offer_early_data( const SSL_HANDSHAKE *hs) { const SSL *const ssl = hs->ssl; assert(!ssl->server); if (!ssl->enable_early_data) { return ssl_early_data_disabled; } if (hs->max_version < TLS1_3_VERSION || SSL_is_dtls(ssl)) { // We discard inapplicable sessions, so this is redundant with the session // checks below, but reporting that TLS 1.3 was disabled is more useful. // // TODO(crbug.com/381113363): Support early data in DTLS 1.3. return ssl_early_data_protocol_version; } if (ssl->session == nullptr) { return ssl_early_data_no_session_offered; } if (ssl_session_protocol_version(ssl->session.get()) < TLS1_3_VERSION || ssl->session->ticket_max_early_data == 0) { return ssl_early_data_unsupported_for_session; } if (!ssl->session->early_alpn.empty()) { if (!ssl_is_alpn_protocol_allowed(hs, ssl->session->early_alpn)) { // Avoid reporting a confusing value in |SSL_get0_alpn_selected|. return ssl_early_data_alpn_mismatch; } // If the previous connection negotiated ALPS, only offer 0-RTT when the // local are settings are consistent with what we'd offer for this // connection. if (ssl->session->has_application_settings) { Span settings; if (!ssl_get_local_application_settings(hs, &settings, ssl->session->early_alpn) || settings != ssl->session->local_application_settings) { return ssl_early_data_alps_mismatch; } } } // Early data has not yet been accepted, but we use it as a success code. return ssl_early_data_accepted; } void ssl_done_writing_client_hello(SSL_HANDSHAKE *hs) { hs->ech_client_outer.Reset(); hs->cookie.Reset(); hs->key_share_bytes.Reset(); } static enum ssl_hs_wait_t do_start_connect(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_START, 1); // |session_reused| must be reset in case this is a renegotiation. ssl->s3->session_reused = false; // Freeze the version range. if (!ssl_get_version_range(hs, &hs->min_version, &hs->max_version)) { return ssl_hs_error; } uint8_t ech_enc[EVP_HPKE_MAX_ENC_LENGTH]; size_t ech_enc_len; if (!ssl_select_ech_config(hs, ech_enc, &ech_enc_len)) { return ssl_hs_error; } // Always advertise the ClientHello version from the original maximum version, // even on renegotiation. The static RSA key exchange uses this field, and // some servers fail when it changes across handshakes. if (SSL_is_dtls(hs->ssl)) { hs->client_version = hs->max_version >= TLS1_2_VERSION ? DTLS1_2_VERSION : DTLS1_VERSION; } else { hs->client_version = hs->max_version >= TLS1_2_VERSION ? TLS1_2_VERSION : hs->max_version; } // If the configured session has expired or is not usable, drop it. We also do // not offer sessions on renegotiation. SSLSessionType session_type = SSLSessionType::kNotResumable; if (ssl->session != nullptr) { session_type = ssl_session_get_type(ssl->session.get()); if (ssl->session->is_server || !ssl_supports_version(hs, ssl->session->ssl_version) || // Do not offer TLS 1.2 sessions with ECH. ClientHelloInner does not // offer TLS 1.2, and the cleartext session ID may leak the server // identity. (hs->selected_ech_config && ssl_session_protocol_version(ssl->session.get()) < TLS1_3_VERSION) || session_type == SSLSessionType::kNotResumable || // Don't offer TLS 1.2 tickets if disabled. (session_type == SSLSessionType::kTicket && (SSL_get_options(ssl) & SSL_OP_NO_TICKET)) || !ssl_session_is_time_valid(ssl, ssl->session.get()) || SSL_is_quic(ssl) != int{ssl->session->is_quic} || ssl->s3->initial_handshake_complete) { ssl_set_session(ssl, nullptr); session_type = SSLSessionType::kNotResumable; } } if (!RAND_bytes(ssl->s3->client_random, sizeof(ssl->s3->client_random))) { return ssl_hs_error; } if (hs->selected_ech_config && !RAND_bytes(hs->inner_client_random, sizeof(hs->inner_client_random))) { return ssl_hs_error; } // Compatibility mode sends a random session ID. Compatibility mode is // enabled for TLS 1.3, but not when it's run over QUIC or DTLS. const bool enable_compatibility_mode = hs->max_version >= TLS1_3_VERSION && !SSL_is_quic(ssl) && !SSL_is_dtls(ssl); if (session_type == SSLSessionType::kID) { hs->session_id = ssl->session->session_id; } else if (session_type == SSLSessionType::kTicket || enable_compatibility_mode) { // TLS 1.2 session tickets require a placeholder value to signal resumption. hs->session_id.ResizeForOverwrite(SSL_MAX_SSL_SESSION_ID_LENGTH); if (!RAND_bytes(hs->session_id.data(), hs->session_id.size())) { return ssl_hs_error; } } ssl_early_data_reason_t reason = should_offer_early_data(hs); if (reason != ssl_early_data_accepted) { ssl->s3->early_data_reason = reason; } else { hs->early_data_offered = true; } if (!ssl_setup_key_shares(hs, /*override_group_id=*/0) || !ssl_setup_extension_permutation(hs) || !ssl_encrypt_client_hello(hs, Span(ech_enc, ech_enc_len)) || !ssl_add_client_hello(hs)) { return ssl_hs_error; } hs->state = state_enter_early_data; return ssl_hs_flush; } static enum ssl_hs_wait_t do_enter_early_data(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->early_data_offered) { hs->state = state_read_server_hello; return ssl_hs_ok; } // Stash the early data session and activate the early version. This must // happen before |do_early_reverify_server_certificate|, so early connection // properties are available to the callback. Note the early version may be // overwritten later by the final version. hs->early_session = UpRef(ssl->session); ssl->s3->version = hs->early_session->ssl_version; hs->is_early_version = true; hs->state = state_early_reverify_server_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_early_reverify_server_certificate( SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->ctx->reverify_on_resume) { // Don't send an alert on error. The alert would be in the clear, which the // server is not expecting anyway. Alerts in between ClientHello and // ServerHello cannot usefully be delivered in TLS 1.3. // // TODO(davidben): The client behavior should be to verify the certificate // before deciding whether to offer the session and, if invalid, decline to // send the session. switch (ssl_reverify_peer_cert(hs, /*send_alert=*/false)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->state = state_early_reverify_server_certificate; return ssl_hs_certificate_verify; } } if (!ssl->method->add_change_cipher_spec(ssl)) { return ssl_hs_error; } // Defer releasing the 0-RTT key to after certificate reverification, so the // QUIC implementation does not accidentally write data too early. if (!tls13_init_early_key_schedule(hs, hs->early_session.get()) || !tls13_derive_early_secret(hs) || !tls13_set_traffic_key(hs->ssl, ssl_encryption_early_data, evp_aead_seal, hs->early_session.get(), hs->early_traffic_secret)) { return ssl_hs_error; } hs->in_early_data = true; hs->can_early_write = true; hs->state = state_read_server_hello; return ssl_hs_early_return; } static bool handle_hello_verify_request(SSL_HANDSHAKE *hs, const SSLMessage &msg) { SSL *const ssl = hs->ssl; assert(SSL_is_dtls(ssl)); assert(msg.type == DTLS1_MT_HELLO_VERIFY_REQUEST); assert(!hs->received_hello_verify_request); CBS hello_verify_request = msg.body, cookie; uint16_t server_version; if (!CBS_get_u16(&hello_verify_request, &server_version) || !CBS_get_u8_length_prefixed(&hello_verify_request, &cookie) || CBS_len(&hello_verify_request) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } if (!hs->dtls_cookie.CopyFrom(cookie)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } hs->received_hello_verify_request = true; ssl->method->next_message(ssl); // DTLS resets the handshake buffer after HelloVerifyRequest. if (!hs->transcript.Init()) { return false; } return ssl_add_client_hello(hs); } bool ssl_parse_server_hello(ParsedServerHello *out, uint8_t *out_alert, const SSLMessage &msg) { if (msg.type != SSL3_MT_SERVER_HELLO) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return false; } out->raw = msg.raw; CBS body = msg.body; if (!CBS_get_u16(&body, &out->legacy_version) || !CBS_get_bytes(&body, &out->random, SSL3_RANDOM_SIZE) || !CBS_get_u8_length_prefixed(&body, &out->session_id) || CBS_len(&out->session_id) > SSL3_SESSION_ID_SIZE || !CBS_get_u16(&body, &out->cipher_suite) || !CBS_get_u8(&body, &out->compression_method)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } // In TLS 1.2 and below, empty extensions blocks may be omitted. In TLS 1.3, // ServerHellos always have extensions, so this can be applied generically. CBS_init(&out->extensions, nullptr, 0); if ((CBS_len(&body) != 0 && !CBS_get_u16_length_prefixed(&body, &out->extensions)) || CBS_len(&body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } return true; } static enum ssl_hs_wait_t do_read_server_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_server_hello; } if (SSL_is_dtls(ssl) && !hs->received_hello_verify_request && msg.type == DTLS1_MT_HELLO_VERIFY_REQUEST) { if (!handle_hello_verify_request(hs, msg)) { return ssl_hs_error; } hs->received_hello_verify_request = true; hs->state = state_read_server_hello; return ssl_hs_flush; } ParsedServerHello server_hello; uint16_t server_version; uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_server_hello(&server_hello, &alert, msg) || !parse_server_version(hs, &server_version, &alert, server_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!ssl_supports_version(hs, server_version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); return ssl_hs_error; } if (!ssl->s3->initial_handshake_complete) { // |ssl->s3->version| may be set due to 0-RTT. If it was to a different // value, the check below will fire. assert(ssl->s3->version == 0 || (hs->is_early_version && ssl->s3->version == hs->early_session->ssl_version)); ssl->s3->version = server_version; hs->is_early_version = false; } else if (server_version != ssl->s3->version) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); return ssl_hs_error; } // If the version did not match, stop sending 0-RTT data. if (hs->early_data_offered && ssl->s3->version != hs->early_session->ssl_version) { // This is currently only possible by reading a TLS 1.2 (or earlier) // ServerHello in response to TLS 1.3. If there is ever a TLS 1.4, or // another variant of TLS 1.3, the fatal error below will need to be a clean // 0-RTT reject. assert(ssl_protocol_version(ssl) < TLS1_3_VERSION); assert(ssl_session_protocol_version(hs->early_session.get()) >= TLS1_3_VERSION); // A TLS 1.2 server would not know to skip the early data we offered, so // there is no point in continuing the handshake. Report an error code as // soon as we detect this. The caller may use this error code to implement // the fallback described in RFC 8446 appendix D.3. // // Disconnect early writes. This ensures subsequent |SSL_write| calls query // the handshake which, in turn, will replay the error code rather than fail // at the |write_shutdown| check. See https://crbug.com/1078515. // TODO(davidben): Should all handshake errors do this? What about record // decryption failures? // // TODO(crbug.com/381113363): Although missing from the spec, a DTLS 1.2 // server will already naturally skip 0-RTT data. If we implement DTLS 1.3 // 0-RTT, we may want a clean reject. assert(!SSL_is_dtls(ssl)); hs->can_early_write = false; OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_ON_EARLY_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); return ssl_hs_error; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (hs->received_hello_verify_request) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_MESSAGE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); return ssl_hs_error; } hs->state = state_tls13; return ssl_hs_ok; } // Clear some TLS 1.3 state that no longer needs to be retained. hs->key_shares[0].reset(); hs->key_shares[1].reset(); ssl_done_writing_client_hello(hs); // TLS 1.2 handshakes cannot accept ECH. if (hs->selected_ech_config) { ssl->s3->ech_status = ssl_ech_rejected; } // Copy over the server random. OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_hello.random), SSL3_RANDOM_SIZE); // Enforce the TLS 1.3 anti-downgrade feature. if (!ssl->s3->initial_handshake_complete && hs->max_version >= TLS1_3_VERSION) { static_assert( sizeof(kTLS12DowngradeRandom) == sizeof(kTLS13DowngradeRandom), "downgrade signals have different size"); static_assert( sizeof(kJDK11DowngradeRandom) == sizeof(kTLS13DowngradeRandom), "downgrade signals have different size"); auto suffix = Span(ssl->s3->server_random).last(sizeof(kTLS13DowngradeRandom)); if (suffix == kTLS12DowngradeRandom || suffix == kTLS13DowngradeRandom || suffix == kJDK11DowngradeRandom) { OPENSSL_PUT_ERROR(SSL, SSL_R_TLS13_DOWNGRADE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } } // The cipher must be allowed in the selected version and enabled. const SSL_CIPHER *cipher = SSL_get_cipher_by_value(server_hello.cipher_suite); uint32_t mask_a, mask_k; ssl_get_client_disabled(hs, &mask_a, &mask_k); if (cipher == nullptr || // (cipher->algorithm_mkey & mask_k) || // (cipher->algorithm_auth & mask_a) || // SSL_CIPHER_get_min_version(cipher) > ssl_protocol_version(ssl) || // SSL_CIPHER_get_max_version(cipher) < ssl_protocol_version(ssl) || // !sk_SSL_CIPHER_find(SSL_get_ciphers(ssl), nullptr, cipher)) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } hs->new_cipher = cipher; if (!hs->session_id.empty() && Span(server_hello.session_id) == hs->session_id) { // Echoing the ClientHello session ID in TLS 1.2, whether from the session // or a synthetic one, indicates resumption. If there was no session (or if // the session was only offered in ECH ClientHelloInner), this was the // TLS 1.3 compatibility mode session ID. As we know this is not a session // the server knows about, any server resuming it is in error. Reject the // first connection deterministicly, rather than installing an invalid // session into the session cache. https://crbug.com/796910 if (ssl->session == nullptr || ssl->s3->ech_status == ssl_ech_rejected) { OPENSSL_PUT_ERROR(SSL, SSL_R_SERVER_ECHOED_INVALID_SESSION_ID); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (ssl->session->ssl_version != ssl->s3->version) { OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (ssl->session->cipher != hs->new_cipher) { OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (!ssl_session_is_context_valid(hs, ssl->session.get())) { // This is actually a client application bug. OPENSSL_PUT_ERROR(SSL, SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // We never offer sessions on renegotiation. assert(!ssl->s3->initial_handshake_complete); ssl->s3->session_reused = true; } else { // The session wasn't resumed. Create a fresh SSL_SESSION to fill out. ssl_set_session(ssl, NULL); if (!ssl_get_new_session(hs)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // Save the session ID from the server. This may be empty if the session // isn't resumable, or if we'll receive a session ticket later. The // ServerHello parser ensures |server_hello.session_id| is within bounds. hs->new_session->session_id.CopyFrom(server_hello.session_id); hs->new_session->cipher = hs->new_cipher; } // Now that the cipher is known, initialize the handshake hash and hash the // ServerHello. if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || !ssl_hash_message(hs, msg)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // If doing a full handshake, the server may request a client certificate // which requires hashing the handshake transcript. Otherwise, the handshake // buffer may be released. if (ssl->session != NULL || !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->transcript.FreeBuffer(); } // Only the NULL compression algorithm is supported. if (server_hello.compression_method != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (!ssl_parse_serverhello_tlsext(hs, &server_hello.extensions)) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); return ssl_hs_error; } if (ssl->session != NULL && hs->extended_master_secret != ssl->session->extended_master_secret) { if (ssl->session->extended_master_secret) { OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION); } ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } ssl->method->next_message(ssl); if (ssl->session != NULL) { if (ssl->ctx->reverify_on_resume && ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->state = state_reverify_server_certificate; } else { hs->state = state_read_session_ticket; } return ssl_hs_ok; } hs->state = state_read_server_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_tls13(SSL_HANDSHAKE *hs) { enum ssl_hs_wait_t wait = tls13_client_handshake(hs); if (wait == ssl_hs_ok) { hs->state = state_finish_client_handshake; return ssl_hs_ok; } return wait; } static enum ssl_hs_wait_t do_read_server_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->state = state_read_certificate_status; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } CBS body = msg.body; uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_cert_chain(&alert, &hs->new_session->certs, &hs->peer_pubkey, NULL, &body, ssl->ctx->pool)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()) == 0 || CBS_len(&body) != 0 || !ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (!ssl_check_leaf_certificate( hs, hs->peer_pubkey.get(), sk_CRYPTO_BUFFER_value(hs->new_session->certs.get(), 0))) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state_read_certificate_status; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_certificate_status(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->certificate_status_expected) { hs->state = state_verify_server_certificate; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (msg.type != SSL3_MT_CERTIFICATE_STATUS) { // A server may send status_request in ServerHello and then change its mind // about sending CertificateStatus. hs->state = state_verify_server_certificate; return ssl_hs_ok; } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } CBS certificate_status = msg.body, ocsp_response; uint8_t status_type; if (!CBS_get_u8(&certificate_status, &status_type) || // status_type != TLSEXT_STATUSTYPE_ocsp || // !CBS_get_u24_length_prefixed(&certificate_status, &ocsp_response) || // CBS_len(&ocsp_response) == 0 || // CBS_len(&certificate_status) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } hs->new_session->ocsp_response.reset( CRYPTO_BUFFER_new_from_CBS(&ocsp_response, ssl->ctx->pool)); if (hs->new_session->ocsp_response == nullptr) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state_verify_server_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_verify_server_certificate(SSL_HANDSHAKE *hs) { if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->state = state_read_server_key_exchange; return ssl_hs_ok; } switch (ssl_verify_peer_cert(hs)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->state = state_verify_server_certificate; return ssl_hs_certificate_verify; } hs->state = state_read_server_key_exchange; return ssl_hs_ok; } static enum ssl_hs_wait_t do_reverify_server_certificate(SSL_HANDSHAKE *hs) { assert(hs->ssl->ctx->reverify_on_resume); switch (ssl_reverify_peer_cert(hs, /*send_alert=*/true)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->state = state_reverify_server_certificate; return ssl_hs_certificate_verify; } hs->state = state_read_session_ticket; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_key_exchange(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (msg.type != SSL3_MT_SERVER_KEY_EXCHANGE) { // Some ciphers (pure PSK) have an optional ServerKeyExchange message. if (ssl_cipher_requires_server_key_exchange(hs->new_cipher)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return ssl_hs_error; } hs->state = state_read_certificate_request; return ssl_hs_ok; } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } uint32_t alg_k = hs->new_cipher->algorithm_mkey; uint32_t alg_a = hs->new_cipher->algorithm_auth; CBS server_key_exchange = msg.body; if (alg_a & SSL_aPSK) { CBS psk_identity_hint; // Each of the PSK key exchanges begins with a psk_identity_hint. if (!CBS_get_u16_length_prefixed(&server_key_exchange, &psk_identity_hint)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // Store the PSK identity hint for the ClientKeyExchange. Assume that the // maximum length of a PSK identity hint can be as long as the maximum // length of a PSK identity. Also do not allow NULL characters; identities // are saved as C strings. // // TODO(davidben): Should invalid hints be ignored? It's a hint rather than // a specific identity. if (CBS_len(&psk_identity_hint) > PSK_MAX_IDENTITY_LEN || CBS_contains_zero_byte(&psk_identity_hint)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // Save non-empty identity hints as a C string. Empty identity hints we // treat as missing. Plain PSK makes it possible to send either no hint // (omit ServerKeyExchange) or an empty hint, while ECDHE_PSK can only spell // empty hint. Having different capabilities is odd, so we interpret empty // and missing as identical. char *raw = nullptr; if (CBS_len(&psk_identity_hint) != 0 && !CBS_strdup(&psk_identity_hint, &raw)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } hs->peer_psk_identity_hint.reset(raw); } if (alg_k & SSL_kECDHE) { // Parse the server parameters. uint8_t group_type; uint16_t group_id; CBS point; if (!CBS_get_u8(&server_key_exchange, &group_type) || group_type != NAMED_CURVE_TYPE || !CBS_get_u16(&server_key_exchange, &group_id) || !CBS_get_u8_length_prefixed(&server_key_exchange, &point)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // Ensure the group is consistent with preferences. if (!tls1_check_group_id(hs, group_id)) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // Save the group and peer public key for later. hs->new_session->group_id = group_id; if (!hs->peer_key.CopyFrom(point)) { return ssl_hs_error; } } else if (!(alg_k & SSL_kPSK)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return ssl_hs_error; } // At this point, |server_key_exchange| contains the signature, if any, while // |msg.body| contains the entire message. From that, derive a CBS containing // just the parameter. CBS parameter; CBS_init(¶meter, CBS_data(&msg.body), CBS_len(&msg.body) - CBS_len(&server_key_exchange)); // ServerKeyExchange should be signed by the server's public key. if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { uint16_t signature_algorithm = 0; if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { if (!CBS_get_u16(&server_key_exchange, &signature_algorithm)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } uint8_t alert = SSL_AD_DECODE_ERROR; if (!tls12_check_peer_sigalg(hs, &alert, signature_algorithm, hs->peer_pubkey.get())) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } hs->new_session->peer_signature_algorithm = signature_algorithm; } else if (!tls1_get_legacy_signature_algorithm(&signature_algorithm, hs->peer_pubkey.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_CERTIFICATE); return ssl_hs_error; } // The last field in |server_key_exchange| is the signature. CBS signature; if (!CBS_get_u16_length_prefixed(&server_key_exchange, &signature) || CBS_len(&server_key_exchange) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } ScopedCBB transcript; Array transcript_data; if (!CBB_init(transcript.get(), 2 * SSL3_RANDOM_SIZE + CBS_len(¶meter)) || !CBB_add_bytes(transcript.get(), ssl->s3->client_random, SSL3_RANDOM_SIZE) || !CBB_add_bytes(transcript.get(), ssl->s3->server_random, SSL3_RANDOM_SIZE) || !CBB_add_bytes(transcript.get(), CBS_data(¶meter), CBS_len(¶meter)) || !CBBFinishArray(transcript.get(), &transcript_data)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } if (!ssl_public_key_verify(ssl, signature, signature_algorithm, hs->peer_pubkey.get(), transcript_data)) { // bad signature OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return ssl_hs_error; } } else { // PSK ciphers are the only supported certificate-less ciphers. assert(alg_a == SSL_aPSK); if (CBS_len(&server_key_exchange) > 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXTRA_DATA_IN_MESSAGE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } } ssl->method->next_message(ssl); hs->state = state_read_certificate_request; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_certificate_request(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->state = state_read_server_hello_done; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (msg.type == SSL3_MT_SERVER_HELLO_DONE) { // If we get here we don't need the handshake buffer as we won't be doing // client auth. hs->transcript.FreeBuffer(); hs->state = state_read_server_hello_done; return ssl_hs_ok; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_REQUEST) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } // Get the certificate types. CBS body = msg.body, certificate_types; if (!CBS_get_u8_length_prefixed(&body, &certificate_types)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } if (!hs->certificate_types.CopyFrom(certificate_types)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { CBS supported_signature_algorithms; if (!CBS_get_u16_length_prefixed(&body, &supported_signature_algorithms) || !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } } uint8_t alert = SSL_AD_DECODE_ERROR; UniquePtr ca_names = SSL_parse_CA_list(ssl, &alert, &body); if (!ca_names) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (CBS_len(&body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } hs->cert_request = true; hs->ca_names = std::move(ca_names); ssl->ctx->x509_method->hs_flush_cached_ca_names(hs); ssl->method->next_message(ssl); hs->state = state_read_server_hello_done; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_hello_done(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_SERVER_HELLO_DONE) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } // ServerHelloDone is empty. if (CBS_len(&msg.body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } // ServerHelloDone should be the end of the flight. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state_send_client_certificate; return ssl_hs_ok; } static bool check_credential(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, uint16_t *out_sigalg) { if (cred->type != SSLCredentialType::kX509) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } if (hs->config->check_client_certificate_type) { // Check the certificate types advertised by the peer. uint8_t cert_type; switch (EVP_PKEY_id(cred->pubkey.get())) { case EVP_PKEY_RSA: cert_type = SSL3_CT_RSA_SIGN; break; case EVP_PKEY_EC: case EVP_PKEY_ED25519: cert_type = TLS_CT_ECDSA_SIGN; break; default: OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } if (std::find(hs->certificate_types.begin(), hs->certificate_types.end(), cert_type) == hs->certificate_types.end()) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } } // All currently supported credentials require a signature. Note this does not // check the ECDSA curve. Prior to TLS 1.3, there is no way to determine which // ECDSA curves are supported by the peer, so we must assume all curves are // supported. return tls1_choose_signature_algorithm(hs, cred, out_sigalg); } static enum ssl_hs_wait_t do_send_client_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // The peer didn't request a certificate. if (!hs->cert_request) { hs->state = state_send_client_key_exchange; return ssl_hs_ok; } if (ssl->s3->ech_status == ssl_ech_rejected) { // Do not send client certificates on ECH reject. We have not authenticated // the server for the name that can learn the certificate. SSL_certs_clear(ssl); } else if (hs->config->cert->cert_cb != nullptr) { // Call cert_cb to update the certificate. int rv = hs->config->cert->cert_cb(ssl, hs->config->cert->cert_cb_arg); if (rv == 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); return ssl_hs_error; } if (rv < 0) { hs->state = state_send_client_certificate; return ssl_hs_x509_lookup; } } Array creds; if (!ssl_get_credential_list(hs, &creds)) { return ssl_hs_error; } if (creds.empty()) { // If there were no credentials, proceed without a client certificate. In // this case, the handshake buffer may be released early. hs->transcript.FreeBuffer(); } else { // Select the credential to use. for (SSL_CREDENTIAL *cred : creds) { ERR_clear_error(); uint16_t sigalg; if (check_credential(hs, cred, &sigalg)) { hs->credential = UpRef(cred); hs->signature_algorithm = sigalg; break; } } if (hs->credential == nullptr) { // The error from the last attempt is in the error queue. assert(ERR_peek_error() != 0); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } } if (!ssl_send_tls12_certificate(hs)) { return ssl_hs_error; } hs->state = state_send_client_key_exchange; return ssl_hs_ok; } static_assert(sizeof(size_t) >= sizeof(unsigned), "size_t is smaller than unsigned"); static enum ssl_hs_wait_t do_send_client_key_exchange(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CLIENT_KEY_EXCHANGE)) { return ssl_hs_error; } Array pms; uint32_t alg_k = hs->new_cipher->algorithm_mkey; uint32_t alg_a = hs->new_cipher->algorithm_auth; if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { const CRYPTO_BUFFER *leaf = sk_CRYPTO_BUFFER_value(hs->new_session->certs.get(), 0); CBS leaf_cbs; CRYPTO_BUFFER_init_CBS(leaf, &leaf_cbs); // Check the key usage matches the cipher suite. We do this unconditionally // for non-RSA certificates. In particular, it's needed to distinguish ECDH // certificates, which we do not support, from ECDSA certificates. // Historically, we have not checked RSA key usages, so it is controlled by // a flag for now. See https://crbug.com/795089. ssl_key_usage_t intended_use = (alg_k & SSL_kRSA) ? key_usage_encipherment : key_usage_digital_signature; if (!ssl_cert_check_key_usage(&leaf_cbs, intended_use)) { if (hs->config->enforce_rsa_key_usage || EVP_PKEY_id(hs->peer_pubkey.get()) != EVP_PKEY_RSA) { return ssl_hs_error; } ERR_clear_error(); ssl->s3->was_key_usage_invalid = true; } } // If using a PSK key exchange, prepare the pre-shared key. unsigned psk_len = 0; uint8_t psk[PSK_MAX_PSK_LEN]; if (alg_a & SSL_aPSK) { if (hs->config->psk_client_callback == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_NO_CLIENT_CB); return ssl_hs_error; } char identity[PSK_MAX_IDENTITY_LEN + 1]; OPENSSL_memset(identity, 0, sizeof(identity)); psk_len = hs->config->psk_client_callback( ssl, hs->peer_psk_identity_hint.get(), identity, sizeof(identity), psk, sizeof(psk)); if (psk_len == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } assert(psk_len <= PSK_MAX_PSK_LEN); hs->new_session->psk_identity.reset(OPENSSL_strdup(identity)); if (hs->new_session->psk_identity == nullptr) { return ssl_hs_error; } // Write out psk_identity. CBB child; if (!CBB_add_u16_length_prefixed(&body, &child) || !CBB_add_bytes(&child, (const uint8_t *)identity, OPENSSL_strnlen(identity, sizeof(identity))) || !CBB_flush(&body)) { return ssl_hs_error; } } // Depending on the key exchange method, compute |pms|. if (alg_k & SSL_kRSA) { RSA *rsa = EVP_PKEY_get0_RSA(hs->peer_pubkey.get()); if (rsa == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } if (!pms.InitForOverwrite(SSL_MAX_MASTER_KEY_LENGTH)) { return ssl_hs_error; } pms[0] = hs->client_version >> 8; pms[1] = hs->client_version & 0xff; if (!RAND_bytes(&pms[2], SSL_MAX_MASTER_KEY_LENGTH - 2)) { return ssl_hs_error; } CBB enc_pms; uint8_t *ptr; size_t enc_pms_len; if (!CBB_add_u16_length_prefixed(&body, &enc_pms) || // !CBB_reserve(&enc_pms, &ptr, RSA_size(rsa)) || // !RSA_encrypt(rsa, &enc_pms_len, ptr, RSA_size(rsa), pms.data(), pms.size(), RSA_PKCS1_PADDING) || // !CBB_did_write(&enc_pms, enc_pms_len) || // !CBB_flush(&body)) { return ssl_hs_error; } } else if (alg_k & SSL_kECDHE) { CBB child; if (!CBB_add_u8_length_prefixed(&body, &child)) { return ssl_hs_error; } // Generate a premaster secret and encapsulate it. bssl::UniquePtr kem = SSLKeyShare::Create(hs->new_session->group_id); uint8_t alert = SSL_AD_DECODE_ERROR; if (!kem || !kem->Encap(&child, &pms, &alert, hs->peer_key)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!CBB_flush(&body)) { return ssl_hs_error; } // The peer key can now be discarded. hs->peer_key.Reset(); } else if (alg_k & SSL_kPSK) { // For plain PSK, other_secret is a block of 0s with the same length as // the pre-shared key. if (!pms.Init(psk_len)) { return ssl_hs_error; } } else { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } // For a PSK cipher suite, other_secret is combined with the pre-shared // key. if (alg_a & SSL_aPSK) { ScopedCBB pms_cbb; CBB child; if (!CBB_init(pms_cbb.get(), 2 + psk_len + 2 + pms.size()) || !CBB_add_u16_length_prefixed(pms_cbb.get(), &child) || !CBB_add_bytes(&child, pms.data(), pms.size()) || !CBB_add_u16_length_prefixed(pms_cbb.get(), &child) || !CBB_add_bytes(&child, psk, psk_len) || !CBBFinishArray(pms_cbb.get(), &pms)) { return ssl_hs_error; } } // The message must be added to the finished hash before calculating the // master secret. if (!ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } hs->new_session->secret.ResizeForOverwrite(SSL3_MASTER_SECRET_SIZE); if (!tls1_generate_master_secret(hs, Span(hs->new_session->secret), pms)) { return ssl_hs_error; } hs->new_session->extended_master_secret = hs->extended_master_secret; hs->state = state_send_client_certificate_verify; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_client_certificate_verify(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->cert_request || hs->credential == nullptr) { hs->state = state_send_client_finished; return ssl_hs_ok; } ScopedCBB cbb; CBB body, child; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE_VERIFY)) { return ssl_hs_error; } assert(hs->signature_algorithm != 0); if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { // Write out the digest type in TLS 1.2. if (!CBB_add_u16(&body, hs->signature_algorithm)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } // Set aside space for the signature. const size_t max_sig_len = EVP_PKEY_size(hs->credential->pubkey.get()); uint8_t *ptr; if (!CBB_add_u16_length_prefixed(&body, &child) || !CBB_reserve(&child, &ptr, max_sig_len)) { return ssl_hs_error; } size_t sig_len = max_sig_len; switch (ssl_private_key_sign(hs, ptr, &sig_len, max_sig_len, hs->signature_algorithm, hs->transcript.buffer())) { case ssl_private_key_success: break; case ssl_private_key_failure: return ssl_hs_error; case ssl_private_key_retry: hs->state = state_send_client_certificate_verify; return ssl_hs_private_key_operation; } if (!CBB_did_write(&child, sig_len) || // !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } // The handshake buffer is no longer necessary. hs->transcript.FreeBuffer(); hs->state = state_send_client_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_client_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; hs->can_release_private_key = true; if (!ssl->method->add_change_cipher_spec(ssl) || !tls1_change_cipher_state(hs, evp_aead_seal)) { return ssl_hs_error; } if (hs->next_proto_neg_seen) { static const uint8_t kZero[32] = {0}; size_t padding_len = 32 - ((ssl->s3->next_proto_negotiated.size() + 2) % 32); ScopedCBB cbb; CBB body, child; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_NEXT_PROTO) || !CBB_add_u8_length_prefixed(&body, &child) || !CBB_add_bytes(&child, ssl->s3->next_proto_negotiated.data(), ssl->s3->next_proto_negotiated.size()) || !CBB_add_u8_length_prefixed(&body, &child) || !CBB_add_bytes(&child, kZero, padding_len) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } if (hs->channel_id_negotiated) { ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CHANNEL_ID) || !tls1_write_channel_id(hs, &body) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } if (!ssl_send_finished(hs)) { return ssl_hs_error; } hs->state = state_finish_flight; return ssl_hs_flush; } static bool can_false_start(const SSL_HANDSHAKE *hs) { const SSL *const ssl = hs->ssl; // False Start bypasses the Finished check's downgrade protection. This can // enable attacks where we send data under weaker settings than supported // (e.g. the Logjam attack). Thus we require TLS 1.2 with an ECDHE+AEAD // cipher, our strongest settings before TLS 1.3. // // Now that TLS 1.3 exists, we would like to avoid similar attacks between // TLS 1.2 and TLS 1.3, but there are too many TLS 1.2 deployments to // sacrifice False Start on them. Instead, we rely on the ServerHello.random // downgrade signal, which we unconditionally enforce. if (SSL_is_dtls(ssl) || // SSL_version(ssl) != TLS1_2_VERSION || // hs->new_cipher->algorithm_mkey != SSL_kECDHE || // hs->new_cipher->algorithm_mac != SSL_AEAD) { return false; } // If ECH was rejected, disable False Start. We run the handshake to // completion, including the Finished downgrade check, to authenticate the // recovery flow. if (ssl->s3->ech_status == ssl_ech_rejected) { return false; } // Additionally require ALPN or NPN by default. // // TODO(davidben): Can this constraint be relaxed globally now that cipher // suite requirements have been tightened? if (!ssl->ctx->false_start_allowed_without_alpn && ssl->s3->alpn_selected.empty() && ssl->s3->next_proto_negotiated.empty()) { return false; } return true; } static enum ssl_hs_wait_t do_finish_flight(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->session != NULL) { hs->state = state_finish_client_handshake; return ssl_hs_ok; } // This is a full handshake. If it involves ChannelID, then record the // handshake hashes at this point in the session so that any resumption of // this session with ChannelID can sign those hashes. if (!tls1_record_handshake_hashes_for_channel_id(hs)) { return ssl_hs_error; } hs->state = state_read_session_ticket; if ((SSL_get_mode(ssl) & SSL_MODE_ENABLE_FALSE_START) && can_false_start(hs) && // No False Start on renegotiation (would complicate the state machine). !ssl->s3->initial_handshake_complete) { hs->in_false_start = true; hs->can_early_write = true; return ssl_hs_early_return; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_session_ticket(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->ticket_expected) { hs->state = state_process_change_cipher_spec; return ssl_hs_read_change_cipher_spec; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_NEW_SESSION_TICKET) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } CBS new_session_ticket = msg.body, ticket; uint32_t ticket_lifetime_hint; if (!CBS_get_u32(&new_session_ticket, &ticket_lifetime_hint) || !CBS_get_u16_length_prefixed(&new_session_ticket, &ticket) || CBS_len(&new_session_ticket) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } if (CBS_len(&ticket) == 0) { // RFC 5077 allows a server to change its mind and send no ticket after // negotiating the extension. The value of |ticket_expected| is checked in // |ssl_update_cache| so is cleared here to avoid an unnecessary update. hs->ticket_expected = false; ssl->method->next_message(ssl); hs->state = state_process_change_cipher_spec; return ssl_hs_read_change_cipher_spec; } if (ssl->session != nullptr) { // The server is sending a new ticket for an existing session. Sessions are // immutable once established, so duplicate all but the ticket of the // existing session. assert(!hs->new_session); hs->new_session = SSL_SESSION_dup(ssl->session.get(), SSL_SESSION_INCLUDE_NONAUTH); if (!hs->new_session) { return ssl_hs_error; } } // |ticket_lifetime_hint| is measured from when the ticket was issued. ssl_session_rebase_time(ssl, hs->new_session.get()); if (!hs->new_session->ticket.CopyFrom(ticket)) { return ssl_hs_error; } hs->new_session->ticket_lifetime_hint = ticket_lifetime_hint; // Historically, OpenSSL filled in fake session IDs for ticket-based sessions. // TODO(davidben): Are external callers relying on this? Try removing this. hs->new_session->session_id.ResizeForOverwrite(SHA256_DIGEST_LENGTH); SHA256(CBS_data(&ticket), CBS_len(&ticket), hs->new_session->session_id.data()); ssl->method->next_message(ssl); hs->state = state_process_change_cipher_spec; return ssl_hs_read_change_cipher_spec; } static enum ssl_hs_wait_t do_process_change_cipher_spec(SSL_HANDSHAKE *hs) { if (!tls1_change_cipher_state(hs, evp_aead_open)) { return ssl_hs_error; } hs->state = state_read_server_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; enum ssl_hs_wait_t wait = ssl_get_finished(hs); if (wait != ssl_hs_ok) { return wait; } if (ssl->session != NULL) { hs->state = state_send_client_finished; return ssl_hs_ok; } hs->state = state_finish_client_handshake; return ssl_hs_ok; } static enum ssl_hs_wait_t do_finish_client_handshake(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->s3->ech_status == ssl_ech_rejected) { // Release the retry configs. hs->ech_authenticated_reject = true; ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ECH_REQUIRED); OPENSSL_PUT_ERROR(SSL, SSL_R_ECH_REJECTED); return ssl_hs_error; } ssl->method->on_handshake_complete(ssl); // Note TLS 1.2 resumptions with ticket renewal have both |ssl->session| (the // resumed session) and |hs->new_session| (the session with the new ticket). bool has_new_session = hs->new_session != nullptr; if (has_new_session) { // When False Start is enabled, the handshake reports completion early. The // caller may then have passed the (then unresuable) |hs->new_session| to // another thread via |SSL_get0_session| for resumption. To avoid potential // race conditions in such callers, we duplicate the session before // clearing |not_resumable|. ssl->s3->established_session = SSL_SESSION_dup(hs->new_session.get(), SSL_SESSION_DUP_ALL); if (!ssl->s3->established_session) { return ssl_hs_error; } // Renegotiations do not participate in session resumption. if (!ssl->s3->initial_handshake_complete) { ssl->s3->established_session->not_resumable = false; } hs->new_session.reset(); } else { assert(ssl->session != nullptr); ssl->s3->established_session = UpRef(ssl->session); } hs->handshake_finalized = true; ssl->s3->initial_handshake_complete = true; if (has_new_session) { ssl_update_cache(ssl); } hs->state = state_done; return ssl_hs_ok; } enum ssl_hs_wait_t ssl_client_handshake(SSL_HANDSHAKE *hs) { while (hs->state != state_done) { enum ssl_hs_wait_t ret = ssl_hs_error; enum ssl_client_hs_state_t state = static_cast(hs->state); switch (state) { case state_start_connect: ret = do_start_connect(hs); break; case state_enter_early_data: ret = do_enter_early_data(hs); break; case state_early_reverify_server_certificate: ret = do_early_reverify_server_certificate(hs); break; case state_read_server_hello: ret = do_read_server_hello(hs); break; case state_tls13: ret = do_tls13(hs); break; case state_read_server_certificate: ret = do_read_server_certificate(hs); break; case state_read_certificate_status: ret = do_read_certificate_status(hs); break; case state_verify_server_certificate: ret = do_verify_server_certificate(hs); break; case state_reverify_server_certificate: ret = do_reverify_server_certificate(hs); break; case state_read_server_key_exchange: ret = do_read_server_key_exchange(hs); break; case state_read_certificate_request: ret = do_read_certificate_request(hs); break; case state_read_server_hello_done: ret = do_read_server_hello_done(hs); break; case state_send_client_certificate: ret = do_send_client_certificate(hs); break; case state_send_client_key_exchange: ret = do_send_client_key_exchange(hs); break; case state_send_client_certificate_verify: ret = do_send_client_certificate_verify(hs); break; case state_send_client_finished: ret = do_send_client_finished(hs); break; case state_finish_flight: ret = do_finish_flight(hs); break; case state_read_session_ticket: ret = do_read_session_ticket(hs); break; case state_process_change_cipher_spec: ret = do_process_change_cipher_spec(hs); break; case state_read_server_finished: ret = do_read_server_finished(hs); break; case state_finish_client_handshake: ret = do_finish_client_handshake(hs); break; case state_done: ret = ssl_hs_ok; break; } if (hs->state != state) { ssl_do_info_callback(hs->ssl, SSL_CB_CONNECT_LOOP, 1); } if (ret != ssl_hs_ok) { return ret; } } ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_DONE, 1); return ssl_hs_ok; } const char *ssl_client_handshake_state(SSL_HANDSHAKE *hs) { enum ssl_client_hs_state_t state = static_cast(hs->state); switch (state) { case state_start_connect: return "TLS client start_connect"; case state_enter_early_data: return "TLS client enter_early_data"; case state_early_reverify_server_certificate: return "TLS client early_reverify_server_certificate"; case state_read_server_hello: return "TLS client read_server_hello"; case state_tls13: return tls13_client_handshake_state(hs); case state_read_server_certificate: return "TLS client read_server_certificate"; case state_read_certificate_status: return "TLS client read_certificate_status"; case state_verify_server_certificate: return "TLS client verify_server_certificate"; case state_reverify_server_certificate: return "TLS client reverify_server_certificate"; case state_read_server_key_exchange: return "TLS client read_server_key_exchange"; case state_read_certificate_request: return "TLS client read_certificate_request"; case state_read_server_hello_done: return "TLS client read_server_hello_done"; case state_send_client_certificate: return "TLS client send_client_certificate"; case state_send_client_key_exchange: return "TLS client send_client_key_exchange"; case state_send_client_certificate_verify: return "TLS client send_client_certificate_verify"; case state_send_client_finished: return "TLS client send_client_finished"; case state_finish_flight: return "TLS client finish_flight"; case state_read_session_ticket: return "TLS client read_session_ticket"; case state_process_change_cipher_spec: return "TLS client process_change_cipher_spec"; case state_read_server_finished: return "TLS client read_server_finished"; case state_finish_client_handshake: return "TLS client finish_client_handshake"; case state_done: return "TLS client done"; } return "TLS client unknown"; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/handshake_server.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN bool ssl_client_cipher_list_contains_cipher( const SSL_CLIENT_HELLO *client_hello, uint16_t id) { CBS cipher_suites; CBS_init(&cipher_suites, client_hello->cipher_suites, client_hello->cipher_suites_len); while (CBS_len(&cipher_suites) > 0) { uint16_t got_id; if (!CBS_get_u16(&cipher_suites, &got_id)) { return false; } if (got_id == id) { return true; } } return false; } static bool negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; assert(ssl->s3->version == 0); CBS supported_versions, versions; if (ssl_client_hello_get_extension(client_hello, &supported_versions, TLSEXT_TYPE_supported_versions)) { if (!CBS_get_u8_length_prefixed(&supported_versions, &versions) || // CBS_len(&supported_versions) != 0 || // CBS_len(&versions) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } } else { // Convert the ClientHello version to an equivalent supported_versions // extension. static const uint8_t kTLSVersions[] = { 0x03, 0x03, // TLS 1.2 0x03, 0x02, // TLS 1.1 0x03, 0x01, // TLS 1 }; static const uint8_t kDTLSVersions[] = { 0xfe, 0xfd, // DTLS 1.2 0xfe, 0xff, // DTLS 1.0 }; size_t versions_len = 0; if (SSL_is_dtls(ssl)) { if (client_hello->version <= DTLS1_2_VERSION) { versions_len = 4; } else if (client_hello->version <= DTLS1_VERSION) { versions_len = 2; } versions = Span(kDTLSVersions).last(versions_len); } else { if (client_hello->version >= TLS1_2_VERSION) { versions_len = 6; } else if (client_hello->version >= TLS1_1_VERSION) { versions_len = 4; } else if (client_hello->version >= TLS1_VERSION) { versions_len = 2; } versions = Span(kTLSVersions).last(versions_len); } } if (!ssl_negotiate_version(hs, out_alert, &ssl->s3->version, &versions)) { return false; } // Handle FALLBACK_SCSV. if (ssl_client_cipher_list_contains_cipher(client_hello, SSL3_CK_FALLBACK_SCSV & 0xffff) && ssl_protocol_version(ssl) < hs->max_version) { OPENSSL_PUT_ERROR(SSL, SSL_R_INAPPROPRIATE_FALLBACK); *out_alert = SSL3_AD_INAPPROPRIATE_FALLBACK; return false; } return true; } static UniquePtr ssl_parse_client_cipher_list( const SSL_CLIENT_HELLO *client_hello) { CBS cipher_suites; CBS_init(&cipher_suites, client_hello->cipher_suites, client_hello->cipher_suites_len); UniquePtr sk(sk_SSL_CIPHER_new_null()); if (!sk) { return nullptr; } while (CBS_len(&cipher_suites) > 0) { uint16_t cipher_suite; if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST); return nullptr; } const SSL_CIPHER *c = SSL_get_cipher_by_value(cipher_suite); if (c != NULL && !sk_SSL_CIPHER_push(sk.get(), c)) { return nullptr; } } return sk; } static const SSL_CIPHER *choose_cipher(SSL_HANDSHAKE *hs, const STACK_OF(SSL_CIPHER) *client_pref, uint32_t mask_k, uint32_t mask_a) { SSL *const ssl = hs->ssl; const STACK_OF(SSL_CIPHER) *prio, *allow; // in_group_flags will either be NULL, or will point to an array of bytes // which indicate equal-preference groups in the |prio| stack. See the // comment about |in_group_flags| in the |SSLCipherPreferenceList| // struct. const bool *in_group_flags; // group_min contains the minimal index so far found in a group, or -1 if no // such value exists yet. int group_min = -1; const SSLCipherPreferenceList *server_pref = hs->config->cipher_list ? hs->config->cipher_list.get() : ssl->ctx->cipher_list.get(); if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { prio = server_pref->ciphers.get(); in_group_flags = server_pref->in_group_flags; allow = client_pref; } else { prio = client_pref; in_group_flags = NULL; allow = server_pref->ciphers.get(); } for (size_t i = 0; i < sk_SSL_CIPHER_num(prio); i++) { const SSL_CIPHER *c = sk_SSL_CIPHER_value(prio, i); size_t cipher_index; if ( // Check if the cipher is supported for the current version. SSL_CIPHER_get_min_version(c) <= ssl_protocol_version(ssl) && // ssl_protocol_version(ssl) <= SSL_CIPHER_get_max_version(c) && // // Check the cipher is supported for the server configuration. (c->algorithm_mkey & mask_k) && // (c->algorithm_auth & mask_a) && // // Check the cipher is in the |allow| list. sk_SSL_CIPHER_find(allow, &cipher_index, c)) { if (in_group_flags != NULL && in_group_flags[i]) { // This element of |prio| is in a group. Update the minimum index found // so far and continue looking. if (group_min == -1 || (size_t)group_min > cipher_index) { group_min = cipher_index; } } else { if (group_min != -1 && (size_t)group_min < cipher_index) { cipher_index = group_min; } return sk_SSL_CIPHER_value(allow, cipher_index); } } if (in_group_flags != NULL && !in_group_flags[i] && group_min != -1) { // We are about to leave a group, but we found a match in it, so that's // our answer. return sk_SSL_CIPHER_value(allow, group_min); } } OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); return nullptr; } struct TLS12ServerParams { bool ok() const { return cipher != nullptr; } const SSL_CIPHER *cipher = nullptr; uint16_t signature_algorithm = 0; }; static TLS12ServerParams choose_params(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, const STACK_OF(SSL_CIPHER) *client_pref, bool has_ecdhe_group) { // Determine the usable cipher suites. uint32_t mask_k = 0, mask_a = 0; if (has_ecdhe_group) { mask_k |= SSL_kECDHE; } if (hs->config->psk_server_callback != nullptr) { mask_k |= SSL_kPSK; mask_a |= SSL_aPSK; } uint16_t sigalg = 0; if (cred != nullptr && cred->type == SSLCredentialType::kX509) { bool sign_ok = tls1_choose_signature_algorithm(hs, cred, &sigalg); ERR_clear_error(); // ECDSA keys must additionally be checked against the peer's supported // curve list. int key_type = EVP_PKEY_id(cred->pubkey.get()); if (hs->config->check_ecdsa_curve && key_type == EVP_PKEY_EC) { EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(cred->pubkey.get()); uint16_t group_id; if (!ssl_nid_to_group_id( &group_id, EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key))) || std::find(hs->peer_supported_group_list.begin(), hs->peer_supported_group_list.end(), group_id) == hs->peer_supported_group_list.end()) { sign_ok = false; // If this would make us unable to pick any cipher, return an error. // This is not strictly necessary, but it gives us a more specific // error to help the caller diagnose issues. if (mask_a == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); return TLS12ServerParams(); } } } mask_a |= ssl_cipher_auth_mask_for_key(cred->pubkey.get(), sign_ok); if (key_type == EVP_PKEY_RSA) { mask_k |= SSL_kRSA; } } TLS12ServerParams params; params.cipher = choose_cipher(hs, client_pref, mask_k, mask_a); if (params.cipher == nullptr) { return TLS12ServerParams(); } if (ssl_cipher_requires_server_key_exchange(params.cipher) && ssl_cipher_uses_certificate_auth(params.cipher)) { params.signature_algorithm = sigalg; } return params; } static enum ssl_hs_wait_t do_start_accept(SSL_HANDSHAKE *hs) { ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_START, 1); hs->state = state12_read_client_hello; return ssl_hs_ok; } // is_probably_jdk11_with_tls13 returns whether |client_hello| was probably sent // from a JDK 11 client with both TLS 1.3 and a prior version enabled. static bool is_probably_jdk11_with_tls13(const SSL_CLIENT_HELLO *client_hello) { // JDK 11 ClientHellos contain a number of unusual properties which should // limit false positives. // JDK 11 does not support ChaCha20-Poly1305. This is unusual: many modern // clients implement ChaCha20-Poly1305. if (ssl_client_cipher_list_contains_cipher( client_hello, TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff)) { return false; } // JDK 11 always sends extensions in a particular order. constexpr uint16_t kMaxFragmentLength = 0x0001; constexpr uint16_t kStatusRequestV2 = 0x0011; static constexpr struct { uint16_t id; bool required; } kJavaExtensions[] = { {TLSEXT_TYPE_server_name, false}, {kMaxFragmentLength, false}, {TLSEXT_TYPE_status_request, false}, {TLSEXT_TYPE_supported_groups, true}, {TLSEXT_TYPE_ec_point_formats, false}, {TLSEXT_TYPE_signature_algorithms, true}, // Java always sends signature_algorithms_cert. {TLSEXT_TYPE_signature_algorithms_cert, true}, {TLSEXT_TYPE_application_layer_protocol_negotiation, false}, {kStatusRequestV2, false}, {TLSEXT_TYPE_extended_master_secret, false}, {TLSEXT_TYPE_supported_versions, true}, {TLSEXT_TYPE_cookie, false}, {TLSEXT_TYPE_psk_key_exchange_modes, true}, {TLSEXT_TYPE_key_share, true}, {TLSEXT_TYPE_renegotiate, false}, {TLSEXT_TYPE_pre_shared_key, false}, }; Span sigalgs, sigalgs_cert; bool has_status_request = false, has_status_request_v2 = false; CBS extensions, supported_groups; CBS_init(&extensions, client_hello->extensions, client_hello->extensions_len); for (const auto &java_extension : kJavaExtensions) { CBS copy = extensions; uint16_t id; if (CBS_get_u16(©, &id) && id == java_extension.id) { // The next extension is the one we expected. extensions = copy; CBS body; if (!CBS_get_u16_length_prefixed(&extensions, &body)) { return false; } switch (id) { case TLSEXT_TYPE_status_request: has_status_request = true; break; case kStatusRequestV2: has_status_request_v2 = true; break; case TLSEXT_TYPE_signature_algorithms: sigalgs = body; break; case TLSEXT_TYPE_signature_algorithms_cert: sigalgs_cert = body; break; case TLSEXT_TYPE_supported_groups: supported_groups = body; break; } } else if (java_extension.required) { return false; } } if (CBS_len(&extensions) != 0) { return false; } // JDK 11 never advertises X25519. It is not offered by default, and // -Djdk.tls.namedGroups=x25519 does not work. This is unusual: many modern // clients implement X25519. while (CBS_len(&supported_groups) > 0) { uint16_t group; if (!CBS_get_u16(&supported_groups, &group) || // group == SSL_GROUP_X25519) { return false; } } if ( // JDK 11 always sends the same contents in signature_algorithms and // signature_algorithms_cert. This is unusual: // signature_algorithms_cert, if omitted, is treated as if it were // signature_algorithms. sigalgs != sigalgs_cert || // When TLS 1.2 or below is enabled, JDK 11 sends status_request_v2 iff it // sends status_request. This is unusual: status_request_v2 is not widely // implemented. has_status_request != has_status_request_v2) { return false; } return true; } static bool decrypt_ech(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; CBS body; if (!ssl_client_hello_get_extension(client_hello, &body, TLSEXT_TYPE_encrypted_client_hello)) { return true; } uint8_t type; if (!CBS_get_u8(&body, &type)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } if (type != ECH_CLIENT_OUTER) { return true; } // This is a ClientHelloOuter ECH extension. Attempt to decrypt it. uint8_t config_id; uint16_t kdf_id, aead_id; CBS enc, payload; if (!CBS_get_u16(&body, &kdf_id) || // !CBS_get_u16(&body, &aead_id) || // !CBS_get_u8(&body, &config_id) || !CBS_get_u16_length_prefixed(&body, &enc) || !CBS_get_u16_length_prefixed(&body, &payload) || // CBS_len(&body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } { MutexReadLock lock(&ssl->ctx->lock); hs->ech_keys = UpRef(ssl->ctx->ech_keys); } if (!hs->ech_keys) { ssl->s3->ech_status = ssl_ech_rejected; return true; } for (const auto &config : hs->ech_keys->configs) { hs->ech_hpke_ctx.Reset(); if (config_id != config->ech_config().config_id || !config->SetupContext(hs->ech_hpke_ctx.get(), kdf_id, aead_id, enc)) { // Ignore the error and try another ECHConfig. ERR_clear_error(); continue; } bool is_decrypt_error; if (!ssl_client_hello_decrypt(hs, out_alert, &is_decrypt_error, &hs->ech_client_hello_buf, client_hello, payload)) { if (is_decrypt_error) { // Ignore the error and try another ECHConfig. ERR_clear_error(); // The |out_alert| calling convention currently relies on a default of // |SSL_AD_DECODE_ERROR|. https://crbug.com/boringssl/373 tracks // switching to sum types, which avoids this. *out_alert = SSL_AD_DECODE_ERROR; continue; } OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); return false; } hs->ech_config_id = config_id; ssl->s3->ech_status = ssl_ech_accepted; return true; } // If we did not accept ECH, proceed with the ClientHelloOuter. Note this // could be key mismatch or ECH GREASE, so we must complete the handshake // as usual, except EncryptedExtensions will contain retry configs. ssl->s3->ech_status = ssl_ech_rejected; return true; } static bool extract_sni(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; CBS sni; if (!ssl_client_hello_get_extension(client_hello, &sni, TLSEXT_TYPE_server_name)) { // No SNI extension to parse. // // Clear state in case we previously extracted SNI from ClientHelloOuter. ssl->s3->hostname.reset(); return true; } CBS server_name_list, host_name; uint8_t name_type; if (!CBS_get_u16_length_prefixed(&sni, &server_name_list) || // !CBS_get_u8(&server_name_list, &name_type) || // // Although the server_name extension was intended to be extensible to // new name types and multiple names, OpenSSL 1.0.x had a bug which meant // different name types will cause an error. Further, RFC 4366 originally // defined syntax inextensibly. RFC 6066 corrected this mistake, but // adding new name types is no longer feasible. // // Act as if the extensibility does not exist to simplify parsing. !CBS_get_u16_length_prefixed(&server_name_list, &host_name) || // CBS_len(&server_name_list) != 0 || // CBS_len(&sni) != 0) { *out_alert = SSL_AD_DECODE_ERROR; return false; } if (name_type != TLSEXT_NAMETYPE_host_name || // CBS_len(&host_name) == 0 || // CBS_len(&host_name) > TLSEXT_MAXLEN_host_name || // CBS_contains_zero_byte(&host_name)) { *out_alert = SSL_AD_UNRECOGNIZED_NAME; return false; } // Copy the hostname as a string. char *raw = nullptr; if (!CBS_strdup(&host_name, &raw)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } ssl->s3->hostname.reset(raw); return true; } static enum ssl_hs_wait_t do_read_client_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_HELLO)) { return ssl_hs_error; } SSL_CLIENT_HELLO client_hello; if (!ssl_client_hello_init(ssl, &client_hello, msg.body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // ClientHello should be the end of the flight. We check this early to cover // all protocol versions. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } if (hs->config->handoff) { return ssl_hs_handoff; } uint8_t alert = SSL_AD_DECODE_ERROR; // We check for rejection status in case we've rewound the state machine after // determining `ClientHelloInner` is invalid. if (ssl->s3->ech_status != ssl_ech_rejected && !decrypt_ech(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // ECH may have changed which ClientHello we process. Update |msg| and // |client_hello| in case. if (!hs->GetClientHello(&msg, &client_hello)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } if (!extract_sni(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } hs->state = state12_read_client_hello_after_ech; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_hello_after_ech(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg_unused; SSL_CLIENT_HELLO client_hello; if (!hs->GetClientHello(&msg_unused, &client_hello)) { return ssl_hs_error; } // Run the early callback. if (ssl->ctx->select_certificate_cb != NULL) { switch (ssl->ctx->select_certificate_cb(&client_hello)) { case ssl_select_cert_retry: return ssl_hs_certificate_selection_pending; case ssl_select_cert_disable_ech: hs->ech_client_hello_buf.Reset(); hs->ech_keys = nullptr; hs->state = state12_read_client_hello; ssl->s3->ech_status = ssl_ech_rejected; return ssl_hs_ok; case ssl_select_cert_error: // Connection rejected. OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; default: /* fallthrough */; } } // Freeze the version range after the early callback. if (!ssl_get_version_range(hs, &hs->min_version, &hs->max_version)) { return ssl_hs_error; } if (hs->config->jdk11_workaround && is_probably_jdk11_with_tls13(&client_hello)) { hs->apply_jdk11_workaround = true; } uint8_t alert = SSL_AD_DECODE_ERROR; if (!negotiate_version(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } hs->client_version = client_hello.version; if (client_hello.random_len != SSL3_RANDOM_SIZE) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } OPENSSL_memcpy(ssl->s3->client_random, client_hello.random, client_hello.random_len); // Only null compression is supported. TLS 1.3 further requires the peer // advertise no other compression. if (OPENSSL_memchr(client_hello.compression_methods, 0, client_hello.compression_methods_len) == NULL || (ssl_protocol_version(ssl) >= TLS1_3_VERSION && client_hello.compression_methods_len != 1)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMPRESSION_LIST); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // TLS extensions. if (!ssl_parse_clienthello_tlsext(hs, &client_hello)) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); return ssl_hs_error; } hs->state = state12_cert_callback; return ssl_hs_ok; } static enum ssl_hs_wait_t do_cert_callback(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // Call |cert_cb| to update server certificates if required. if (hs->config->cert->cert_cb != NULL) { int rv = hs->config->cert->cert_cb(ssl, hs->config->cert->cert_cb_arg); if (rv == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } if (rv < 0) { return ssl_hs_x509_lookup; } } if (hs->ocsp_stapling_requested && ssl->ctx->legacy_ocsp_callback != nullptr) { switch (ssl->ctx->legacy_ocsp_callback( ssl, ssl->ctx->legacy_ocsp_callback_arg)) { case SSL_TLSEXT_ERR_OK: break; case SSL_TLSEXT_ERR_NOACK: hs->ocsp_stapling_requested = false; break; default: OPENSSL_PUT_ERROR(SSL, SSL_R_OCSP_CB_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { // Jump to the TLS 1.3 state machine. hs->state = state12_tls13; return ssl_hs_ok; } // It should not be possible to negotiate TLS 1.2 with ECH. The // ClientHelloInner decoding function rejects ClientHellos which offer TLS 1.2 // or below. assert(ssl->s3->ech_status != ssl_ech_accepted); ssl->s3->early_data_reason = ssl_early_data_protocol_version; hs->state = state12_select_parameters; return ssl_hs_ok; } static enum ssl_hs_wait_t do_tls13(SSL_HANDSHAKE *hs) { enum ssl_hs_wait_t wait = tls13_server_handshake(hs); if (wait == ssl_hs_ok) { hs->state = state12_finish_server_handshake; return ssl_hs_ok; } return wait; } static enum ssl_hs_wait_t do_select_parameters(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; SSL_CLIENT_HELLO client_hello; if (!hs->GetClientHello(&msg, &client_hello)) { return ssl_hs_error; } // Determine the ECDHE group to use, if we are to use ECDHE. uint16_t group_id = 0; bool has_ecdhe_group = tls1_get_shared_group(hs, &group_id); // Select the credential and cipher suite. This must be done after |cert_cb| // runs, so the final credential list is known. // // TODO(davidben): In the course of picking these, we also pick the ECDHE // group and signature algorithm. It would be tidier if we saved that decision // and avoided redoing it later. UniquePtr client_pref = ssl_parse_client_cipher_list(&client_hello); if (client_pref == nullptr) { return ssl_hs_error; } Array creds; if (!ssl_get_credential_list(hs, &creds)) { return ssl_hs_error; } TLS12ServerParams params; if (creds.empty()) { // The caller may have configured no credentials, but set a PSK callback. params = choose_params(hs, /*cred=*/nullptr, client_pref.get(), has_ecdhe_group); } else { // Select the first credential which works. for (SSL_CREDENTIAL *cred : creds) { ERR_clear_error(); params = choose_params(hs, cred, client_pref.get(), has_ecdhe_group); if (params.ok()) { hs->credential = UpRef(cred); break; } } } if (!params.ok()) { // The error from the last attempt is in the error queue. assert(ERR_peek_error() != 0); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } hs->new_cipher = params.cipher; hs->signature_algorithm = params.signature_algorithm; // |ssl_client_hello_init| checks that |client_hello.session_id| is not too // large. hs->session_id.CopyFrom( Span(client_hello.session_id, client_hello.session_id_len)); // Determine whether we are doing session resumption. UniquePtr session; bool tickets_supported = false, renew_ticket = false; enum ssl_hs_wait_t wait = ssl_get_prev_session( hs, &session, &tickets_supported, &renew_ticket, &client_hello); if (wait != ssl_hs_ok) { return wait; } if (session) { if (session->extended_master_secret && !hs->extended_master_secret) { // A ClientHello without EMS that attempts to resume a session with EMS // is fatal to the connection. OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } if (!ssl_session_is_resumable(hs, session.get()) || // If the client offers the EMS extension, but the previous session // didn't use it, then negotiate a new session. hs->extended_master_secret != session->extended_master_secret) { session.reset(); } } if (session) { // Use the old session. hs->ticket_expected = renew_ticket; ssl->session = std::move(session); ssl->s3->session_reused = true; hs->can_release_private_key = true; } else { hs->ticket_expected = tickets_supported; ssl_set_session(ssl, nullptr); if (!ssl_get_new_session(hs)) { return ssl_hs_error; } // Assign a session ID if not using session tickets. if (!hs->ticket_expected && (ssl->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)) { hs->new_session->session_id.ResizeForOverwrite( SSL3_SSL_SESSION_ID_LENGTH); RAND_bytes(hs->new_session->session_id.data(), hs->new_session->session_id.size()); } } if (ssl->ctx->dos_protection_cb != NULL && ssl->ctx->dos_protection_cb(&client_hello) == 0) { // Connection rejected for DOS reasons. OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } if (ssl->session == NULL) { hs->new_session->cipher = hs->new_cipher; if (hs->new_session->cipher->algorithm_mkey & SSL_kECDHE) { assert(has_ecdhe_group); hs->new_session->group_id = group_id; } // Determine whether to request a client certificate. hs->cert_request = !!(hs->config->verify_mode & SSL_VERIFY_PEER); // Only request a certificate if Channel ID isn't negotiated. if ((hs->config->verify_mode & SSL_VERIFY_PEER_IF_NO_OBC) && hs->channel_id_negotiated) { hs->cert_request = false; } // CertificateRequest may only be sent in certificate-based ciphers. if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { hs->cert_request = false; } if (!hs->cert_request) { // OpenSSL returns X509_V_OK when no certificates are requested. This is // classed by them as a bug, but it's assumed by at least NGINX. hs->new_session->verify_result = X509_V_OK; } } // HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was // deferred. Complete it now. uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_negotiate_alpn(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Now that all parameters are known, initialize the handshake hash and hash // the ClientHello. if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // Handback includes the whole handshake transcript, so we cannot free the // transcript buffer in the handback case. if (!hs->cert_request && !hs->handback) { hs->transcript.FreeBuffer(); } if (!ssl_hash_message(hs, msg)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state12_send_server_hello; return ssl_hs_ok; } static void copy_suffix(Span out, Span in) { out = out.last(in.size()); OPENSSL_memcpy(out.data(), in.data(), in.size()); } static enum ssl_hs_wait_t do_send_server_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // We only accept ChannelIDs on connections with ECDHE in order to avoid a // known attack while we fix ChannelID itself. if (hs->channel_id_negotiated && (hs->new_cipher->algorithm_mkey & SSL_kECDHE) == 0) { hs->channel_id_negotiated = false; } // If this is a resumption and the original handshake didn't support // ChannelID then we didn't record the original handshake hashes in the // session and so cannot resume with ChannelIDs. if (ssl->session != nullptr && ssl->session->original_handshake_hash.empty()) { hs->channel_id_negotiated = false; } SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); if (hints && !hs->hints_requested && hints->server_random_tls12.size() == SSL3_RANDOM_SIZE) { OPENSSL_memcpy(ssl->s3->server_random, hints->server_random_tls12.data(), SSL3_RANDOM_SIZE); } else { OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); CRYPTO_store_u32_be(ssl->s3->server_random, static_cast(now.tv_sec)); if (!RAND_bytes(ssl->s3->server_random + 4, SSL3_RANDOM_SIZE - 4)) { return ssl_hs_error; } if (hints && hs->hints_requested && !hints->server_random_tls12.CopyFrom(ssl->s3->server_random)) { return ssl_hs_error; } } // Implement the TLS 1.3 anti-downgrade feature. if (hs->max_version >= TLS1_3_VERSION) { if (ssl_protocol_version(ssl) == TLS1_2_VERSION) { if (hs->apply_jdk11_workaround) { // JDK 11 implements the TLS 1.3 downgrade signal, so we cannot send it // here. However, the signal is only effective if all TLS 1.2 // ServerHellos produced by the server are marked. Thus we send a // different non-standard signal for the time being, until JDK 11.0.2 is // released and clients have updated. copy_suffix(ssl->s3->server_random, kJDK11DowngradeRandom); } else { copy_suffix(ssl->s3->server_random, kTLS13DowngradeRandom); } } else { copy_suffix(ssl->s3->server_random, kTLS12DowngradeRandom); } } Span session_id; if (ssl->session != nullptr) { // Echo the session ID from the ClientHello to indicate resumption. session_id = hs->session_id; } else { session_id = hs->new_session->session_id; } ScopedCBB cbb; CBB body, session_id_bytes; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO) || !CBB_add_u16(&body, ssl->s3->version) || !CBB_add_bytes(&body, ssl->s3->server_random, SSL3_RANDOM_SIZE) || !CBB_add_u8_length_prefixed(&body, &session_id_bytes) || !CBB_add_bytes(&session_id_bytes, session_id.data(), session_id.size()) || !CBB_add_u16(&body, SSL_CIPHER_get_protocol_id(hs->new_cipher)) || !CBB_add_u8(&body, 0 /* no compression */) || !ssl_add_serverhello_tlsext(hs, &body) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } if (ssl->session != nullptr) { // No additional hints to generate in resumption. if (hs->hints_requested) { return ssl_hs_hints_ready; } hs->state = state12_send_server_finished; } else { hs->state = state12_send_server_certificate; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; ScopedCBB cbb; if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { assert(hs->credential != nullptr); if (!ssl_send_tls12_certificate(hs)) { return ssl_hs_error; } if (hs->certificate_status_expected) { CBB body, ocsp_response; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE_STATUS) || !CBB_add_u8(&body, TLSEXT_STATUSTYPE_ocsp) || !CBB_add_u24_length_prefixed(&body, &ocsp_response) || !CBB_add_bytes( &ocsp_response, CRYPTO_BUFFER_data(hs->credential->ocsp_response.get()), CRYPTO_BUFFER_len(hs->credential->ocsp_response.get())) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } } // Assemble ServerKeyExchange parameters if needed. uint32_t alg_k = hs->new_cipher->algorithm_mkey; uint32_t alg_a = hs->new_cipher->algorithm_auth; if (ssl_cipher_requires_server_key_exchange(hs->new_cipher) || ((alg_a & SSL_aPSK) && hs->config->psk_identity_hint)) { // Pre-allocate enough room to comfortably fit an ECDHE public key. Prepend // the client and server randoms for the signing transcript. CBB child; if (!CBB_init(cbb.get(), SSL3_RANDOM_SIZE * 2 + 128) || !CBB_add_bytes(cbb.get(), ssl->s3->client_random, SSL3_RANDOM_SIZE) || !CBB_add_bytes(cbb.get(), ssl->s3->server_random, SSL3_RANDOM_SIZE)) { return ssl_hs_error; } // PSK ciphers begin with an identity hint. if (alg_a & SSL_aPSK) { size_t len = hs->config->psk_identity_hint == nullptr ? 0 : strlen(hs->config->psk_identity_hint.get()); if (!CBB_add_u16_length_prefixed(cbb.get(), &child) || !CBB_add_bytes(&child, (const uint8_t *)hs->config->psk_identity_hint.get(), len)) { return ssl_hs_error; } } if (alg_k & SSL_kECDHE) { assert(hs->new_session->group_id != 0); hs->key_shares[0] = SSLKeyShare::Create(hs->new_session->group_id); if (!hs->key_shares[0] || // !CBB_add_u8(cbb.get(), NAMED_CURVE_TYPE) || // !CBB_add_u16(cbb.get(), hs->new_session->group_id) || // !CBB_add_u8_length_prefixed(cbb.get(), &child)) { return ssl_hs_error; } SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); bool hint_ok = false; if (hints && !hs->hints_requested && hints->ecdhe_group_id == hs->new_session->group_id && !hints->ecdhe_public_key.empty() && !hints->ecdhe_private_key.empty()) { CBS cbs = CBS(hints->ecdhe_private_key); hint_ok = hs->key_shares[0]->DeserializePrivateKey(&cbs); } if (hint_ok) { // Reuse the ECDH key from handshake hints. if (!CBB_add_bytes(&child, hints->ecdhe_public_key.data(), hints->ecdhe_public_key.size())) { return ssl_hs_error; } } else { // Generate a key, and emit the public half. if (!hs->key_shares[0]->Generate(&child)) { return ssl_hs_error; } // If generating hints, save the ECDHE key. if (hints && hs->hints_requested) { bssl::ScopedCBB private_key_cbb; if (!hints->ecdhe_public_key.CopyFrom( Span(CBB_data(&child), CBB_len(&child))) || !CBB_init(private_key_cbb.get(), 32) || !hs->key_shares[0]->SerializePrivateKey(private_key_cbb.get()) || !CBBFinishArray(private_key_cbb.get(), &hints->ecdhe_private_key)) { return ssl_hs_error; } hints->ecdhe_group_id = hs->new_session->group_id; } } } else { assert(alg_k & SSL_kPSK); } if (!CBBFinishArray(cbb.get(), &hs->server_params)) { return ssl_hs_error; } } hs->state = state12_send_server_key_exchange; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_key_exchange(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->server_params.size() == 0) { hs->state = state12_send_server_hello_done; return ssl_hs_ok; } ScopedCBB cbb; CBB body, child; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_KEY_EXCHANGE) || // |hs->server_params| contains a prefix for signing. hs->server_params.size() < 2 * SSL3_RANDOM_SIZE || !CBB_add_bytes(&body, hs->server_params.data() + 2 * SSL3_RANDOM_SIZE, hs->server_params.size() - 2 * SSL3_RANDOM_SIZE)) { return ssl_hs_error; } // Add a signature. if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { // Determine the signature algorithm. uint16_t signature_algorithm; if (!tls1_choose_signature_algorithm(hs, hs->credential.get(), &signature_algorithm)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { if (!CBB_add_u16(&body, signature_algorithm)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } } // Add space for the signature. const size_t max_sig_len = EVP_PKEY_size(hs->credential->pubkey.get()); uint8_t *ptr; if (!CBB_add_u16_length_prefixed(&body, &child) || !CBB_reserve(&child, &ptr, max_sig_len)) { return ssl_hs_error; } size_t sig_len; switch (ssl_private_key_sign(hs, ptr, &sig_len, max_sig_len, signature_algorithm, hs->server_params)) { case ssl_private_key_success: if (!CBB_did_write(&child, sig_len)) { return ssl_hs_error; } break; case ssl_private_key_failure: return ssl_hs_error; case ssl_private_key_retry: return ssl_hs_private_key_operation; } } hs->can_release_private_key = true; if (!ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } hs->server_params.Reset(); hs->state = state12_send_server_hello_done; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_hello_done(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->hints_requested) { return ssl_hs_hints_ready; } ScopedCBB cbb; CBB body; if (hs->cert_request) { CBB cert_types, sigalgs_cbb; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE_REQUEST) || !CBB_add_u8_length_prefixed(&body, &cert_types) || !CBB_add_u8(&cert_types, SSL3_CT_RSA_SIGN) || !CBB_add_u8(&cert_types, TLS_CT_ECDSA_SIGN) || (ssl_protocol_version(ssl) >= TLS1_2_VERSION && (!CBB_add_u16_length_prefixed(&body, &sigalgs_cbb) || !tls12_add_verify_sigalgs(hs, &sigalgs_cbb))) || !ssl_add_client_CA_list(hs, &body) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO_DONE) || !ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } hs->state = state12_read_client_certificate; return ssl_hs_flush; } static enum ssl_hs_wait_t do_read_client_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->handback && hs->new_cipher->algorithm_mkey == SSL_kECDHE) { return ssl_hs_handback; } if (!hs->cert_request) { hs->state = state12_verify_client_certificate; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE)) { return ssl_hs_error; } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } CBS certificate_msg = msg.body; uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_cert_chain(&alert, &hs->new_session->certs, &hs->peer_pubkey, hs->config->retain_only_sha256_of_client_certs ? hs->new_session->peer_sha256 : nullptr, &certificate_msg, ssl->ctx->pool)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (CBS_len(&certificate_msg) != 0 || !ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()) == 0) { // No client certificate so the handshake buffer may be discarded. hs->transcript.FreeBuffer(); if (hs->config->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { // Fail for TLS only if we required a certificate OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // OpenSSL returns X509_V_OK when no certificates are received. This is // classed by them as a bug, but it's assumed by at least NGINX. hs->new_session->verify_result = X509_V_OK; } else if (hs->config->retain_only_sha256_of_client_certs) { // The hash will have been filled in. hs->new_session->peer_sha256_valid = true; } ssl->method->next_message(ssl); hs->state = state12_verify_client_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_verify_client_certificate(SSL_HANDSHAKE *hs) { if (sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()) > 0) { switch (ssl_verify_peer_cert(hs)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: return ssl_hs_certificate_verify; } } hs->state = state12_read_client_key_exchange; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_key_exchange(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_KEY_EXCHANGE)) { return ssl_hs_error; } CBS client_key_exchange = msg.body; uint32_t alg_k = hs->new_cipher->algorithm_mkey; uint32_t alg_a = hs->new_cipher->algorithm_auth; // If using a PSK key exchange, parse the PSK identity. if (alg_a & SSL_aPSK) { CBS psk_identity; // If using PSK, the ClientKeyExchange contains a psk_identity. If PSK, // then this is the only field in the message. if (!CBS_get_u16_length_prefixed(&client_key_exchange, &psk_identity) || ((alg_k & SSL_kPSK) && CBS_len(&client_key_exchange) != 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (CBS_len(&psk_identity) > PSK_MAX_IDENTITY_LEN || CBS_contains_zero_byte(&psk_identity)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } char *raw = nullptr; if (!CBS_strdup(&psk_identity, &raw)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } hs->new_session->psk_identity.reset(raw); } // Depending on the key exchange method, compute |premaster_secret|. Array premaster_secret; if (alg_k & SSL_kRSA) { CBS encrypted_premaster_secret; if (!CBS_get_u16_length_prefixed(&client_key_exchange, &encrypted_premaster_secret) || CBS_len(&client_key_exchange) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // Allocate a buffer large enough for an RSA decryption. Array decrypt_buf; if (!decrypt_buf.InitForOverwrite( EVP_PKEY_size(hs->credential->pubkey.get()))) { return ssl_hs_error; } // Decrypt with no padding. PKCS#1 padding will be removed as part of the // timing-sensitive code below. size_t decrypt_len; switch (ssl_private_key_decrypt(hs, decrypt_buf.data(), &decrypt_len, decrypt_buf.size(), encrypted_premaster_secret)) { case ssl_private_key_success: break; case ssl_private_key_failure: return ssl_hs_error; case ssl_private_key_retry: return ssl_hs_private_key_operation; } if (decrypt_len != decrypt_buf.size()) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return ssl_hs_error; } CONSTTIME_SECRET(decrypt_buf.data(), decrypt_len); // Prepare a random premaster, to be used on invalid padding. See RFC 5246, // section 7.4.7.1. if (!premaster_secret.InitForOverwrite(SSL_MAX_MASTER_KEY_LENGTH) || !RAND_bytes(premaster_secret.data(), premaster_secret.size())) { return ssl_hs_error; } // The smallest padded premaster is 11 bytes of overhead. Small keys are // publicly invalid. if (decrypt_len < 11 + premaster_secret.size()) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return ssl_hs_error; } // Check the padding. See RFC 3447, section 7.2.2. size_t padding_len = decrypt_len - premaster_secret.size(); uint8_t good = constant_time_eq_int_8(decrypt_buf[0], 0) & constant_time_eq_int_8(decrypt_buf[1], 2); for (size_t i = 2; i < padding_len - 1; i++) { good &= ~constant_time_is_zero_8(decrypt_buf[i]); } good &= constant_time_is_zero_8(decrypt_buf[padding_len - 1]); // The premaster secret must begin with |client_version|. This too must be // checked in constant time (http://eprint.iacr.org/2003/052/). good &= constant_time_eq_8(decrypt_buf[padding_len], (unsigned)(hs->client_version >> 8)); good &= constant_time_eq_8(decrypt_buf[padding_len + 1], (unsigned)(hs->client_version & 0xff)); // Select, in constant time, either the decrypted premaster or the random // premaster based on |good|. for (size_t i = 0; i < premaster_secret.size(); i++) { premaster_secret[i] = constant_time_select_8( good, decrypt_buf[padding_len + i], premaster_secret[i]); } } else if (alg_k & SSL_kECDHE) { // Parse the ClientKeyExchange. CBS ciphertext; if (!CBS_get_u8_length_prefixed(&client_key_exchange, &ciphertext) || CBS_len(&client_key_exchange) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // Decapsulate the premaster secret. uint8_t alert = SSL_AD_DECODE_ERROR; if (!hs->key_shares[0]->Decap(&premaster_secret, &alert, ciphertext)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // The key exchange state may now be discarded. hs->key_shares[0].reset(); hs->key_shares[1].reset(); } else if (!(alg_k & SSL_kPSK)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // For a PSK cipher suite, the actual pre-master secret is combined with the // pre-shared key. if (alg_a & SSL_aPSK) { if (hs->config->psk_server_callback == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // Look up the key for the identity. uint8_t psk[PSK_MAX_PSK_LEN]; unsigned psk_len = hs->config->psk_server_callback( ssl, hs->new_session->psk_identity.get(), psk, sizeof(psk)); if (psk_len > PSK_MAX_PSK_LEN) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } else if (psk_len == 0) { // PSK related to the given identity not found. OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNKNOWN_PSK_IDENTITY); return ssl_hs_error; } if (alg_k & SSL_kPSK) { // In plain PSK, other_secret is a block of 0s with the same length as the // pre-shared key. if (!premaster_secret.Init(psk_len)) { return ssl_hs_error; } } ScopedCBB new_premaster; CBB child; if (!CBB_init(new_premaster.get(), 2 + psk_len + 2 + premaster_secret.size()) || !CBB_add_u16_length_prefixed(new_premaster.get(), &child) || !CBB_add_bytes(&child, premaster_secret.data(), premaster_secret.size()) || !CBB_add_u16_length_prefixed(new_premaster.get(), &child) || !CBB_add_bytes(&child, psk, psk_len) || !CBBFinishArray(new_premaster.get(), &premaster_secret)) { return ssl_hs_error; } } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } // Compute the master secret. hs->new_session->secret.ResizeForOverwrite(SSL3_MASTER_SECRET_SIZE); if (!tls1_generate_master_secret(hs, Span(hs->new_session->secret), premaster_secret)) { return ssl_hs_error; } hs->new_session->extended_master_secret = hs->extended_master_secret; // Declassify the secret to undo the RSA decryption validation above. We are // not currently running most of the TLS library with constant-time // validation. // TODO(crbug.com/42290551): Remove this and cover the TLS library too. CONSTTIME_DECLASSIFY(hs->new_session->secret.data(), hs->new_session->secret.size()); hs->can_release_private_key = true; ssl->method->next_message(ssl); hs->state = state12_read_client_certificate_verify; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_certificate_verify(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // Only RSA and ECDSA client certificates are supported, so a // CertificateVerify is required if and only if there's a client certificate. if (!hs->peer_pubkey) { hs->transcript.FreeBuffer(); hs->state = state12_read_change_cipher_spec; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY)) { return ssl_hs_error; } // The peer certificate must be valid for signing. const CRYPTO_BUFFER *leaf = sk_CRYPTO_BUFFER_value(hs->new_session->certs.get(), 0); CBS leaf_cbs; CRYPTO_BUFFER_init_CBS(leaf, &leaf_cbs); if (!ssl_cert_check_key_usage(&leaf_cbs, key_usage_digital_signature)) { return ssl_hs_error; } CBS certificate_verify = msg.body, signature; // Determine the signature algorithm. uint16_t signature_algorithm = 0; if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { if (!CBS_get_u16(&certificate_verify, &signature_algorithm)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } uint8_t alert = SSL_AD_DECODE_ERROR; if (!tls12_check_peer_sigalg(hs, &alert, signature_algorithm, hs->peer_pubkey.get())) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } hs->new_session->peer_signature_algorithm = signature_algorithm; } else if (!tls1_get_legacy_signature_algorithm(&signature_algorithm, hs->peer_pubkey.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_CERTIFICATE); return ssl_hs_error; } // Parse and verify the signature. if (!CBS_get_u16_length_prefixed(&certificate_verify, &signature) || CBS_len(&certificate_verify) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (!ssl_public_key_verify(ssl, signature, signature_algorithm, hs->peer_pubkey.get(), hs->transcript.buffer())) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return ssl_hs_error; } // The handshake buffer is no longer necessary, and we may hash the current // message. hs->transcript.FreeBuffer(); if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state12_read_change_cipher_spec; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_change_cipher_spec(SSL_HANDSHAKE *hs) { if (hs->handback && hs->ssl->session != NULL) { return ssl_hs_handback; } hs->state = state12_process_change_cipher_spec; return ssl_hs_read_change_cipher_spec; } static enum ssl_hs_wait_t do_process_change_cipher_spec(SSL_HANDSHAKE *hs) { if (!tls1_change_cipher_state(hs, evp_aead_open)) { return ssl_hs_error; } hs->state = state12_read_next_proto; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_next_proto(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->next_proto_neg_seen) { hs->state = state12_read_channel_id; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_NEXT_PROTO) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } CBS next_protocol = msg.body, selected_protocol, padding; if (!CBS_get_u8_length_prefixed(&next_protocol, &selected_protocol) || !CBS_get_u8_length_prefixed(&next_protocol, &padding) || CBS_len(&next_protocol) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (!ssl->s3->next_proto_negotiated.CopyFrom(selected_protocol)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state12_read_channel_id; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_channel_id(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->channel_id_negotiated) { hs->state = state12_read_client_finished; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CHANNEL_ID) || !tls1_verify_channel_id(hs, msg) || // !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->state = state12_read_client_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; enum ssl_hs_wait_t wait = ssl_get_finished(hs); if (wait != ssl_hs_ok) { return wait; } if (ssl->session != NULL) { hs->state = state12_finish_server_handshake; } else { hs->state = state12_send_server_finished; } // If this is a full handshake with ChannelID then record the handshake // hashes in |hs->new_session| in case we need them to verify a // ChannelID signature on a resumption of this session in the future. if (ssl->session == NULL && ssl->s3->channel_id_valid && !tls1_record_handshake_hashes_for_channel_id(hs)) { return ssl_hs_error; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->ticket_expected) { const SSL_SESSION *session; UniquePtr session_copy; if (ssl->session == NULL) { // Fix the timeout to measure from the ticket issuance time. ssl_session_rebase_time(ssl, hs->new_session.get()); session = hs->new_session.get(); } else { // We are renewing an existing session. Duplicate the session to adjust // the timeout. session_copy = SSL_SESSION_dup(ssl->session.get(), SSL_SESSION_INCLUDE_NONAUTH); if (!session_copy) { return ssl_hs_error; } ssl_session_rebase_time(ssl, session_copy.get()); session = session_copy.get(); } ScopedCBB cbb; CBB body, ticket; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_NEW_SESSION_TICKET) || !CBB_add_u32(&body, session->timeout) || !CBB_add_u16_length_prefixed(&body, &ticket) || !ssl_encrypt_ticket(hs, &ticket, session) || // |ticket| may be empty to skip sending a ticket. In TLS 1.2, servers // skip sending tickets by sending empty NewSessionTicket, so no special // handling is needed. !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } } if (!ssl->method->add_change_cipher_spec(ssl) || // !tls1_change_cipher_state(hs, evp_aead_seal) || // !ssl_send_finished(hs)) { return ssl_hs_error; } if (ssl->session != NULL) { hs->state = state12_read_change_cipher_spec; } else { hs->state = state12_finish_server_handshake; } return ssl_hs_flush; } static enum ssl_hs_wait_t do_finish_server_handshake(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->handback) { return ssl_hs_handback; } ssl->method->on_handshake_complete(ssl); // If we aren't retaining peer certificates then we can discard it now. if (hs->new_session != NULL && hs->config->retain_only_sha256_of_client_certs) { hs->new_session->certs.reset(); ssl->ctx->x509_method->session_clear(hs->new_session.get()); } bool has_new_session = hs->new_session != nullptr; if (has_new_session) { assert(ssl->session == nullptr); ssl->s3->established_session = std::move(hs->new_session); ssl->s3->established_session->not_resumable = false; } else { assert(ssl->session != nullptr); ssl->s3->established_session = UpRef(ssl->session); } hs->handshake_finalized = true; ssl->s3->initial_handshake_complete = true; if (has_new_session) { ssl_update_cache(ssl); } hs->state = state12_done; return ssl_hs_ok; } enum ssl_hs_wait_t ssl_server_handshake(SSL_HANDSHAKE *hs) { while (hs->state != state12_done) { enum ssl_hs_wait_t ret = ssl_hs_error; enum tls12_server_hs_state_t state = static_cast(hs->state); switch (state) { case state12_start_accept: ret = do_start_accept(hs); break; case state12_read_client_hello: ret = do_read_client_hello(hs); break; case state12_read_client_hello_after_ech: ret = do_read_client_hello_after_ech(hs); break; case state12_cert_callback: ret = do_cert_callback(hs); break; case state12_tls13: ret = do_tls13(hs); break; case state12_select_parameters: ret = do_select_parameters(hs); break; case state12_send_server_hello: ret = do_send_server_hello(hs); break; case state12_send_server_certificate: ret = do_send_server_certificate(hs); break; case state12_send_server_key_exchange: ret = do_send_server_key_exchange(hs); break; case state12_send_server_hello_done: ret = do_send_server_hello_done(hs); break; case state12_read_client_certificate: ret = do_read_client_certificate(hs); break; case state12_verify_client_certificate: ret = do_verify_client_certificate(hs); break; case state12_read_client_key_exchange: ret = do_read_client_key_exchange(hs); break; case state12_read_client_certificate_verify: ret = do_read_client_certificate_verify(hs); break; case state12_read_change_cipher_spec: ret = do_read_change_cipher_spec(hs); break; case state12_process_change_cipher_spec: ret = do_process_change_cipher_spec(hs); break; case state12_read_next_proto: ret = do_read_next_proto(hs); break; case state12_read_channel_id: ret = do_read_channel_id(hs); break; case state12_read_client_finished: ret = do_read_client_finished(hs); break; case state12_send_server_finished: ret = do_send_server_finished(hs); break; case state12_finish_server_handshake: ret = do_finish_server_handshake(hs); break; case state12_done: ret = ssl_hs_ok; break; } if (hs->state != state) { ssl_do_info_callback(hs->ssl, SSL_CB_ACCEPT_LOOP, 1); } if (ret != ssl_hs_ok) { return ret; } } ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_DONE, 1); return ssl_hs_ok; } const char *ssl_server_handshake_state(SSL_HANDSHAKE *hs) { enum tls12_server_hs_state_t state = static_cast(hs->state); switch (state) { case state12_start_accept: return "TLS server start_accept"; case state12_read_client_hello: return "TLS server read_client_hello"; case state12_read_client_hello_after_ech: return "TLS server read_client_hello_after_ech"; case state12_cert_callback: return "TLS server cert_callback"; case state12_tls13: return tls13_server_handshake_state(hs); case state12_select_parameters: return "TLS server select_parameters"; case state12_send_server_hello: return "TLS server send_server_hello"; case state12_send_server_certificate: return "TLS server send_server_certificate"; case state12_send_server_key_exchange: return "TLS server send_server_key_exchange"; case state12_send_server_hello_done: return "TLS server send_server_hello_done"; case state12_read_client_certificate: return "TLS server read_client_certificate"; case state12_verify_client_certificate: return "TLS server verify_client_certificate"; case state12_read_client_key_exchange: return "TLS server read_client_key_exchange"; case state12_read_client_certificate_verify: return "TLS server read_client_certificate_verify"; case state12_read_change_cipher_spec: return "TLS server read_change_cipher_spec"; case state12_process_change_cipher_spec: return "TLS server process_change_cipher_spec"; case state12_read_next_proto: return "TLS server read_next_proto"; case state12_read_channel_id: return "TLS server read_channel_id"; case state12_read_client_finished: return "TLS server read_client_finished"; case state12_send_server_finished: return "TLS server send_server_finished"; case state12_finish_server_handshake: return "TLS server finish_server_handshake"; case state12_done: return "TLS server done"; } return "TLS server unknown"; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/internal.h ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #ifndef OPENSSL_HEADER_SSL_INTERNAL_H #define OPENSSL_HEADER_SSL_INTERNAL_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/err/internal.h" #include "../crypto/internal.h" #include "../crypto/lhash/internal.h" #if defined(OPENSSL_WINDOWS) // Windows defines struct timeval in winsock2.h. OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) #else #include #endif BSSL_NAMESPACE_BEGIN struct SSL_CONFIG; struct SSL_HANDSHAKE; struct SSL_PROTOCOL_METHOD; struct SSL_X509_METHOD; // C++ utilities. // New behaves like |new| but uses |OPENSSL_malloc| for memory allocation. It // returns nullptr on allocation error. It only implements single-object // allocation and not new T[n]. // // Note: unlike |new|, this does not support non-public constructors. template T *New(Args &&...args) { void *t = OPENSSL_malloc(sizeof(T)); if (t == nullptr) { return nullptr; } return new (t) T(std::forward(args)...); } // Delete behaves like |delete| but uses |OPENSSL_free| to release memory. // // Note: unlike |delete| this does not support non-public destructors. template void Delete(T *t) { if (t != nullptr) { t->~T(); OPENSSL_free(t); } } // All types with kAllowUniquePtr set may be used with UniquePtr. Other types // may be C structs which require a |BORINGSSL_MAKE_DELETER| registration. namespace internal { template struct DeleterImpl> { static void Free(T *t) { Delete(t); } }; } // namespace internal // MakeUnique behaves like |std::make_unique| but returns nullptr on allocation // error. template UniquePtr MakeUnique(Args &&...args) { return UniquePtr(New(std::forward(args)...)); } // Array is an owning array of elements of |T|. template class Array { public: // Array's default constructor creates an empty array. Array() {} Array(const Array &) = delete; Array(Array &&other) { *this = std::move(other); } ~Array() { Reset(); } Array &operator=(const Array &) = delete; Array &operator=(Array &&other) { Reset(); other.Release(&data_, &size_); return *this; } const T *data() const { return data_; } T *data() { return data_; } size_t size() const { return size_; } bool empty() const { return size_ == 0; } const T &operator[](size_t i) const { BSSL_CHECK(i < size_); return data_[i]; } T &operator[](size_t i) { BSSL_CHECK(i < size_); return data_[i]; } T *begin() { return data_; } const T *begin() const { return data_; } T *end() { return data_ + size_; } const T *end() const { return data_ + size_; } void Reset() { Reset(nullptr, 0); } // Reset releases the current contents of the array and takes ownership of the // raw pointer supplied by the caller. void Reset(T *new_data, size_t new_size) { std::destroy_n(data_, size_); OPENSSL_free(data_); data_ = new_data; size_ = new_size; } // Release releases ownership of the array to a raw pointer supplied by the // caller. void Release(T **out, size_t *out_size) { *out = data_; *out_size = size_; data_ = nullptr; size_ = 0; } // Init replaces the array with a newly-allocated array of |new_size| // value-constructed copies of |T|. It returns true on success and false on // error. If |T| is a primitive type like |uint8_t|, value-construction means // it will be zero-initialized. [[nodiscard]] bool Init(size_t new_size) { if (!InitUninitialized(new_size)) { return false; } std::uninitialized_value_construct_n(data_, size_); return true; } // InitForOverwrite behaves like |Init| but it default-constructs each element // instead. This means that, if |T| is a primitive type, the array will be // uninitialized and thus must be filled in by the caller. [[nodiscard]] bool InitForOverwrite(size_t new_size) { if (!InitUninitialized(new_size)) { return false; } std::uninitialized_default_construct_n(data_, size_); return true; } // CopyFrom replaces the array with a newly-allocated copy of |in|. It returns // true on success and false on error. [[nodiscard]] bool CopyFrom(Span in) { if (!InitUninitialized(in.size())) { return false; } std::uninitialized_copy(in.begin(), in.end(), data_); return true; } // Shrink shrinks the stored size of the array to |new_size|. It crashes if // the new size is larger. Note this does not shrink the allocation itself. void Shrink(size_t new_size) { if (new_size > size_) { abort(); } std::destroy_n(data_ + new_size, size_ - new_size); size_ = new_size; } private: // InitUninitialized replaces the array with a newly-allocated array of // |new_size| elements, but whose constructor has not yet run. On success, the // elements must be constructed before returning control to the caller. bool InitUninitialized(size_t new_size) { Reset(); if (new_size == 0) { return true; } if (new_size > std::numeric_limits::max() / sizeof(T)) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } data_ = reinterpret_cast(OPENSSL_malloc(new_size * sizeof(T))); if (data_ == nullptr) { return false; } size_ = new_size; return true; } T *data_ = nullptr; size_t size_ = 0; }; // Vector is a resizable array of elements of |T|. template class Vector { public: Vector() = default; Vector(const Vector &) = delete; Vector(Vector &&other) { *this = std::move(other); } ~Vector() { clear(); } Vector &operator=(const Vector &) = delete; Vector &operator=(Vector &&other) { clear(); std::swap(data_, other.data_); std::swap(size_, other.size_); std::swap(capacity_, other.capacity_); return *this; } const T *data() const { return data_; } T *data() { return data_; } size_t size() const { return size_; } bool empty() const { return size_ == 0; } const T &operator[](size_t i) const { BSSL_CHECK(i < size_); return data_[i]; } T &operator[](size_t i) { BSSL_CHECK(i < size_); return data_[i]; } T *begin() { return data_; } const T *begin() const { return data_; } T *end() { return data_ + size_; } const T *end() const { return data_ + size_; } void clear() { std::destroy_n(data_, size_); OPENSSL_free(data_); data_ = nullptr; size_ = 0; capacity_ = 0; } // Push adds |elem| at the end of the internal array, growing if necessary. It // returns false when allocation fails. [[nodiscard]] bool Push(T elem) { if (!MaybeGrow()) { return false; } new (&data_[size_]) T(std::move(elem)); size_++; return true; } // CopyFrom replaces the contents of the array with a copy of |in|. It returns // true on success and false on allocation error. [[nodiscard]] bool CopyFrom(Span in) { Array copy; if (!copy.CopyFrom(in)) { return false; } clear(); copy.Release(&data_, &size_); capacity_ = size_; return true; } private: // If there is no room for one more element, creates a new backing array with // double the size of the old one and copies elements over. bool MaybeGrow() { // No need to grow if we have room for one more T. if (size_ < capacity_) { return true; } size_t new_capacity = kDefaultSize; if (capacity_ > 0) { // Double the array's size if it's safe to do so. if (capacity_ > std::numeric_limits::max() / 2) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } new_capacity = capacity_ * 2; } if (new_capacity > std::numeric_limits::max() / sizeof(T)) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } T *new_data = reinterpret_cast(OPENSSL_malloc(new_capacity * sizeof(T))); if (new_data == nullptr) { return false; } size_t new_size = size_; std::uninitialized_move(begin(), end(), new_data); clear(); data_ = new_data; size_ = new_size; capacity_ = new_capacity; return true; } // data_ is a pointer to |capacity_| objects of size |T|, the first |size_| of // which are constructed. T *data_ = nullptr; // |size_| is the number of elements stored in this Vector. size_t size_ = 0; // |capacity_| is the number of elements allocated in this Vector. size_t capacity_ = 0; // |kDefaultSize| is the default initial size of the backing array. static constexpr size_t kDefaultSize = 16; }; // A PackedSize is an integer that can store values from 0 to N, represented as // a minimal-width integer. template using PackedSize = std::conditional_t< N <= 0xff, uint8_t, std::conditional_t>>; // An InplaceVector is like a Vector, but stores up to N elements inline in the // object. It is inspired by std::inplace_vector in C++26. template class InplaceVector { public: InplaceVector() = default; InplaceVector(const InplaceVector &other) { *this = other; } InplaceVector(InplaceVector &&other) { *this = std::move(other); } ~InplaceVector() { clear(); } InplaceVector &operator=(const InplaceVector &other) { if (this != &other) { CopyFrom(other); } return *this; } InplaceVector &operator=(InplaceVector &&other) { clear(); std::uninitialized_move(other.begin(), other.end(), data()); size_ = other.size(); return *this; } const T *data() const { return reinterpret_cast(storage_); } T *data() { return reinterpret_cast(storage_); } size_t size() const { return size_; } static constexpr size_t capacity() { return N; } bool empty() const { return size_ == 0; } const T &operator[](size_t i) const { BSSL_CHECK(i < size_); return data()[i]; } T &operator[](size_t i) { BSSL_CHECK(i < size_); return data()[i]; } T *begin() { return data(); } const T *begin() const { return data(); } T *end() { return data() + size_; } const T *end() const { return data() + size_; } void clear() { Shrink(0); } // Shrink resizes the vector to |new_size|, which must not be larger than the // current size. Unlike |Resize|, this can be called when |T| is not // default-constructible. void Shrink(size_t new_size) { BSSL_CHECK(new_size <= size_); std::destroy_n(data() + new_size, size_ - new_size); size_ = static_cast>(new_size); } // TryResize resizes the vector to |new_size| and returns true, or returns // false if |new_size| is too large. Any newly-added elements are // value-initialized. [[nodiscard]] bool TryResize(size_t new_size) { if (new_size <= size_) { Shrink(new_size); return true; } if (new_size > capacity()) { return false; } std::uninitialized_value_construct_n(data() + size_, new_size - size_); size_ = static_cast>(new_size); return true; } // TryResizeForOverwrite behaves like |TryResize|, but newly-added elements // are default-initialized, so POD types may contain uninitialized values that // the caller is responsible for filling in. [[nodiscard]] bool TryResizeForOverwrite(size_t new_size) { if (new_size <= size_) { Shrink(new_size); return true; } if (new_size > capacity()) { return false; } std::uninitialized_default_construct_n(data() + size_, new_size - size_); size_ = static_cast>(new_size); return true; } // TryCopyFrom sets the vector to a copy of |in| and returns true, or returns // false if |in| is too large. [[nodiscard]] bool TryCopyFrom(Span in) { if (in.size() > capacity()) { return false; } clear(); std::uninitialized_copy(in.begin(), in.end(), data()); size_ = in.size(); return true; } // TryPushBack appends |val| to the vector and returns a pointer to the // newly-inserted value, or nullptr if the vector is at capacity. [[nodiscard]] T *TryPushBack(T val) { if (size() >= capacity()) { return nullptr; } T *ret = &data()[size_]; new (ret) T(std::move(val)); size_++; return ret; } // The following methods behave like their |Try*| counterparts, but abort the // program on failure. void Resize(size_t size) { BSSL_CHECK(TryResize(size)); } void ResizeForOverwrite(size_t size) { BSSL_CHECK(TryResizeForOverwrite(size)); } void CopyFrom(Span in) { BSSL_CHECK(TryCopyFrom(in)); } T &PushBack(T val) { T *ret = TryPushBack(std::move(val)); BSSL_CHECK(ret != nullptr); return *ret; } template void EraseIf(Pred pred) { // See if anything needs to be erased at all. This avoids a self-move. auto iter = std::find_if(begin(), end(), pred); if (iter == end()) { return; } // Elements before the first to be erased may be left as-is. size_t new_size = iter - begin(); // Swap all subsequent elements in if they are to be kept. for (size_t i = new_size + 1; i < size(); i++) { if (!pred((*this)[i])) { (*this)[new_size] = std::move((*this)[i]); new_size++; } } Shrink(new_size); } private: alignas(T) char storage_[sizeof(T[N])]; PackedSize size_ = 0; }; // An MRUQueue maintains a queue of up to |N| objects of type |T|. If the queue // is at capacity, adding to the queue pops the least recently added element. template class MRUQueue { public: static constexpr bool kAllowUniquePtr = true; MRUQueue() = default; // If we ever need to make this type movable, we could. (The defaults almost // work except we need |start_| to be reset when moved-from.) MRUQueue(const MRUQueue &other) = delete; MRUQueue &operator=(const MRUQueue &other) = delete; bool empty() const { return size() == 0; } size_t size() const { return storage_.size(); } T &operator[](size_t i) { BSSL_CHECK(i < size()); return storage_[(start_ + i) % N]; } const T &operator[](size_t i) const { return (*const_cast(this))[i]; } void Clear() { storage_.clear(); start_ = 0; } void PushBack(T t) { if (storage_.size() < N) { assert(start_ == 0); storage_.PushBack(std::move(t)); } else { (*this)[0] = std::move(t); start_ = (start_ + 1) % N; } } private: InplaceVector storage_; PackedSize start_ = 0; }; // CBBFinishArray behaves like |CBB_finish| but stores the result in an Array. OPENSSL_EXPORT bool CBBFinishArray(CBB *cbb, Array *out); // GetAllNames helps to implement |*_get_all_*_names| style functions. It // writes at most |max_out| string pointers to |out| and returns the number that // it would have liked to have written. The strings written consist of // |fixed_names_len| strings from |fixed_names| followed by |objects_len| // strings taken by projecting |objects| through |name|. template inline size_t GetAllNames(const char **out, size_t max_out, Span fixed_names, Name(T::*name), Span objects) { auto span = bssl::Span(out, max_out); for (size_t i = 0; !span.empty() && i < fixed_names.size(); i++) { span[0] = fixed_names[i]; span = span.subspan(1); } span = span.subspan(0, objects.size()); for (size_t i = 0; i < span.size(); i++) { span[i] = objects[i].*name; } return fixed_names.size() + objects.size(); } // RefCounted is a common base for ref-counted types. This is an instance of the // C++ curiously-recurring template pattern, so a type Foo must subclass // RefCounted. It additionally must friend RefCounted to allow calling // the destructor. template class RefCounted { public: RefCounted(const RefCounted &) = delete; RefCounted &operator=(const RefCounted &) = delete; // These methods are intentionally named differently from `bssl::UpRef` to // avoid a collision. Only the implementations of `FOO_up_ref` and `FOO_free` // should call these. void UpRefInternal() { CRYPTO_refcount_inc(&references_); } void DecRefInternal() { if (CRYPTO_refcount_dec_and_test_zero(&references_)) { Derived *d = static_cast(this); d->~Derived(); OPENSSL_free(d); } } protected: // Ensure that only `Derived`, which must inherit from `RefCounted`, // can call the constructor. This catches bugs where someone inherited from // the wrong base. class CheckSubClass { private: friend Derived; CheckSubClass() = default; }; RefCounted(CheckSubClass) { static_assert(std::is_base_of::value, "Derived must subclass RefCounted"); } ~RefCounted() = default; private: CRYPTO_refcount_t references_ = 1; }; // Protocol versions. // // Due to DTLS's historical wire version differences, we maintain two notions of // version. // // The "version" or "wire version" is the actual 16-bit value that appears on // the wire. It uniquely identifies a version and is also used at API // boundaries. The set of supported versions differs between TLS and DTLS. Wire // versions are opaque values and may not be compared numerically. // // The "protocol version" identifies the high-level handshake variant being // used. DTLS versions map to the corresponding TLS versions. Protocol versions // are sequential and may be compared numerically. // ssl_protocol_version_from_wire sets |*out| to the protocol version // corresponding to wire version |version| and returns true. If |version| is not // a valid TLS or DTLS version, it returns false. // // Note this simultaneously handles both DTLS and TLS. Use one of the // higher-level functions below for most operations. bool ssl_protocol_version_from_wire(uint16_t *out, uint16_t version); // ssl_get_version_range sets |*out_min_version| and |*out_max_version| to the // minimum and maximum enabled protocol versions, respectively. bool ssl_get_version_range(const SSL_HANDSHAKE *hs, uint16_t *out_min_version, uint16_t *out_max_version); // ssl_supports_version returns whether |hs| supports |version|. bool ssl_supports_version(const SSL_HANDSHAKE *hs, uint16_t version); // ssl_method_supports_version returns whether |method| supports |version|. bool ssl_method_supports_version(const SSL_PROTOCOL_METHOD *method, uint16_t version); // ssl_add_supported_versions writes the supported versions of |hs| to |cbb|, in // decreasing preference order. The version list is filtered to those whose // protocol version is at least |extra_min_version|. bool ssl_add_supported_versions(const SSL_HANDSHAKE *hs, CBB *cbb, uint16_t extra_min_version); // ssl_negotiate_version negotiates a common version based on |hs|'s preferences // and the peer preference list in |peer_versions|. On success, it returns true // and sets |*out_version| to the selected version. Otherwise, it returns false // and sets |*out_alert| to an alert to send. bool ssl_negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, uint16_t *out_version, const CBS *peer_versions); // ssl_has_final_version returns whether |ssl| has determined the final version. // This may be used to distinguish the predictive 0-RTT version from the final // one. bool ssl_has_final_version(const SSL *ssl); // ssl_protocol_version returns |ssl|'s protocol version. It is an error to // call this function before the version is determined. uint16_t ssl_protocol_version(const SSL *ssl); // Cipher suites. BSSL_NAMESPACE_END struct ssl_cipher_st { // name is the OpenSSL name for the cipher. const char *name; // standard_name is the IETF name for the cipher. const char *standard_name; // id is the cipher suite value bitwise OR-d with 0x03000000. uint32_t id; // algorithm_* determine the cipher suite. See constants below for the values. uint32_t algorithm_mkey; uint32_t algorithm_auth; uint32_t algorithm_enc; uint32_t algorithm_mac; uint32_t algorithm_prf; }; BSSL_NAMESPACE_BEGIN // Bits for |algorithm_mkey| (key exchange algorithm). #define SSL_kRSA 0x00000001u #define SSL_kECDHE 0x00000002u // SSL_kPSK is only set for plain PSK, not ECDHE_PSK. #define SSL_kPSK 0x00000004u #define SSL_kGENERIC 0x00000008u // Bits for |algorithm_auth| (server authentication). #define SSL_aRSA_SIGN 0x00000001u #define SSL_aRSA_DECRYPT 0x00000002u #define SSL_aECDSA 0x00000004u // SSL_aPSK is set for both PSK and ECDHE_PSK. #define SSL_aPSK 0x00000008u #define SSL_aGENERIC 0x00000010u #define SSL_aCERT (SSL_aRSA_SIGN | SSL_aRSA_DECRYPT | SSL_aECDSA) // Bits for |algorithm_enc| (symmetric encryption). #define SSL_3DES 0x00000001u #define SSL_AES128 0x00000002u #define SSL_AES256 0x00000004u #define SSL_AES128GCM 0x00000008u #define SSL_AES256GCM 0x00000010u #define SSL_CHACHA20POLY1305 0x00000020u #define SSL_AES (SSL_AES128 | SSL_AES256 | SSL_AES128GCM | SSL_AES256GCM) // Bits for |algorithm_mac| (symmetric authentication). #define SSL_SHA1 0x00000001u #define SSL_SHA256 0x00000002u // SSL_AEAD is set for all AEADs. #define SSL_AEAD 0x00000004u // Bits for |algorithm_prf| (handshake digest). #define SSL_HANDSHAKE_MAC_DEFAULT 0x1 #define SSL_HANDSHAKE_MAC_SHA256 0x2 #define SSL_HANDSHAKE_MAC_SHA384 0x4 // SSL_MAX_MD_SIZE is size of the largest hash function used in TLS, SHA-384. #define SSL_MAX_MD_SIZE 48 // An SSLCipherPreferenceList contains a list of SSL_CIPHERs with equal- // preference groups. For TLS clients, the groups are moot because the server // picks the cipher and groups cannot be expressed on the wire. However, for // servers, the equal-preference groups allow the client's preferences to be // partially respected. (This only has an effect with // SSL_OP_CIPHER_SERVER_PREFERENCE). // // The equal-preference groups are expressed by grouping SSL_CIPHERs together. // All elements of a group have the same priority: no ordering is expressed // within a group. // // The values in |ciphers| are in one-to-one correspondence with // |in_group_flags|. (That is, sk_SSL_CIPHER_num(ciphers) is the number of // bytes in |in_group_flags|.) The bytes in |in_group_flags| are either 1, to // indicate that the corresponding SSL_CIPHER is not the last element of a // group, or 0 to indicate that it is. // // For example, if |in_group_flags| contains all zeros then that indicates a // traditional, fully-ordered preference. Every SSL_CIPHER is the last element // of the group (i.e. they are all in a one-element group). // // For a more complex example, consider: // ciphers: A B C D E F // in_group_flags: 1 1 0 0 1 0 // // That would express the following, order: // // A E // B -> D -> F // C struct SSLCipherPreferenceList { static constexpr bool kAllowUniquePtr = true; SSLCipherPreferenceList() = default; ~SSLCipherPreferenceList(); bool Init(UniquePtr ciphers, Span in_group_flags); bool Init(const SSLCipherPreferenceList &); void Remove(const SSL_CIPHER *cipher); UniquePtr ciphers; bool *in_group_flags = nullptr; }; // AllCiphers returns an array of all supported ciphers, sorted by id. Span AllCiphers(); // ssl_cipher_get_evp_aead sets |*out_aead| to point to the correct EVP_AEAD // object for |cipher| protocol version |version|. It sets |*out_mac_secret_len| // and |*out_fixed_iv_len| to the MAC key length and fixed IV length, // respectively. The MAC key length is zero except for legacy block and stream // ciphers. It returns true on success and false on error. bool ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, size_t *out_mac_secret_len, size_t *out_fixed_iv_len, const SSL_CIPHER *cipher, uint16_t version); // ssl_get_handshake_digest returns the |EVP_MD| corresponding to |version| and // |cipher|. const EVP_MD *ssl_get_handshake_digest(uint16_t version, const SSL_CIPHER *cipher); // ssl_create_cipher_list evaluates |rule_str|. It sets |*out_cipher_list| to a // newly-allocated |SSLCipherPreferenceList| containing the result. It returns // true on success and false on failure. If |strict| is true, nonsense will be // rejected. If false, nonsense will be silently ignored. An empty result is // considered an error regardless of |strict|. |has_aes_hw| indicates if the // list should be ordered based on having support for AES in hardware or not. bool ssl_create_cipher_list(UniquePtr *out_cipher_list, const bool has_aes_hw, const char *rule_str, bool strict); // ssl_cipher_auth_mask_for_key returns the mask of cipher |algorithm_auth| // values suitable for use with |key| in TLS 1.2 and below. |sign_ok| indicates // whether |key| may be used for signing. uint32_t ssl_cipher_auth_mask_for_key(const EVP_PKEY *key, bool sign_ok); // ssl_cipher_uses_certificate_auth returns whether |cipher| authenticates the // server and, optionally, the client with a certificate. bool ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher); // ssl_cipher_requires_server_key_exchange returns whether |cipher| requires a // ServerKeyExchange message. // // This function may return false while still allowing |cipher| an optional // ServerKeyExchange. This is the case for plain PSK ciphers. bool ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher); // ssl_cipher_get_record_split_len, for TLS 1.0 CBC mode ciphers, returns the // length of an encrypted 1-byte record, for use in record-splitting. Otherwise // it returns zero. size_t ssl_cipher_get_record_split_len(const SSL_CIPHER *cipher); // ssl_choose_tls13_cipher returns an |SSL_CIPHER| corresponding with the best // available from |cipher_suites| compatible with |version| and |policy|. It // returns NULL if there isn't a compatible cipher. |has_aes_hw| indicates if // the choice should be made as if support for AES in hardware is available. const SSL_CIPHER *ssl_choose_tls13_cipher(CBS cipher_suites, bool has_aes_hw, uint16_t version, enum ssl_compliance_policy_t policy); // ssl_tls13_cipher_meets_policy returns true if |cipher_id| is acceptable given // |policy|. bool ssl_tls13_cipher_meets_policy(uint16_t cipher_id, enum ssl_compliance_policy_t policy); // ssl_cipher_is_deprecated returns true if |cipher| is deprecated. OPENSSL_EXPORT bool ssl_cipher_is_deprecated(const SSL_CIPHER *cipher); // Transcript layer. // SSLTranscript maintains the handshake transcript as a combination of a // buffer and running hash. class SSLTranscript { public: explicit SSLTranscript(bool is_dtls); ~SSLTranscript(); SSLTranscript(SSLTranscript &&other) = default; SSLTranscript &operator=(SSLTranscript &&other) = default; // Init initializes the handshake transcript. If called on an existing // transcript, it resets the transcript and hash. It returns true on success // and false on failure. bool Init(); // InitHash initializes the handshake hash based on the PRF and contents of // the handshake transcript. Subsequent calls to |Update| will update the // rolling hash. It returns one on success and zero on failure. It is an error // to call this function after the handshake buffer is released. This may be // called multiple times to change the hash function. bool InitHash(uint16_t version, const SSL_CIPHER *cipher); // UpdateForHelloRetryRequest resets the rolling hash with the // HelloRetryRequest construction. It returns true on success and false on // failure. It is an error to call this function before the handshake buffer // is released. bool UpdateForHelloRetryRequest(); // CopyToHashContext initializes |ctx| with |digest| and the data thus far in // the transcript. It returns true on success and false on failure. If the // handshake buffer is still present, |digest| may be any supported digest. // Otherwise, |digest| must match the transcript hash. bool CopyToHashContext(EVP_MD_CTX *ctx, const EVP_MD *digest) const; Span buffer() const { return Span(reinterpret_cast(buffer_->data), buffer_->length); } // FreeBuffer releases the handshake buffer. Subsequent calls to // |Update| will not update the handshake buffer. void FreeBuffer(); // DigestLen returns the length of the PRF hash. size_t DigestLen() const; // Digest returns the PRF hash. For TLS 1.1 and below, this is // |EVP_md5_sha1|. const EVP_MD *Digest() const; // Update adds |in| to the handshake buffer and handshake hash, whichever is // enabled. It returns true on success and false on failure. bool Update(Span in); // GetHash writes the handshake hash to |out| which must have room for at // least |DigestLen| bytes. On success, it returns true and sets |*out_len| to // the number of bytes written. Otherwise, it returns false. bool GetHash(uint8_t *out, size_t *out_len) const; // GetFinishedMAC computes the MAC for the Finished message into the bytes // pointed by |out| and writes the number of bytes to |*out_len|. |out| must // have room for |EVP_MAX_MD_SIZE| bytes. It returns true on success and false // on failure. bool GetFinishedMAC(uint8_t *out, size_t *out_len, const SSL_SESSION *session, bool from_server) const; private: // HashBuffer initializes |ctx| to use |digest| and writes the contents of // |buffer_| to |ctx|. If this SSLTranscript is for DTLS 1.3, the appropriate // bytes in |buffer_| will be skipped when hashing the buffer. bool HashBuffer(EVP_MD_CTX *ctx, const EVP_MD *digest) const; // AddToBufferOrHash directly adds the contents of |in| to |buffer_| and/or // |hash_|. bool AddToBufferOrHash(Span in); // buffer_, if non-null, contains the handshake transcript. UniquePtr buffer_; // hash, if initialized with an |EVP_MD|, maintains the handshake hash. ScopedEVP_MD_CTX hash_; // is_dtls_ indicates whether this is a transcript for a DTLS connection. bool is_dtls_ : 1; // version_ contains the version for the connection (if known). uint16_t version_ = 0; }; // tls1_prf computes the PRF function for |ssl|. It fills |out|, using |secret| // as the secret and |label| as the label. |seed1| and |seed2| are concatenated // to form the seed parameter. It returns true on success and false on failure. bool tls1_prf(const EVP_MD *digest, Span out, Span secret, std::string_view label, Span seed1, Span seed2); // Encryption layer. // SSLAEADContext contains information about an AEAD that is being used to // encrypt an SSL connection. class SSLAEADContext { public: explicit SSLAEADContext(const SSL_CIPHER *cipher); ~SSLAEADContext(); static constexpr bool kAllowUniquePtr = true; SSLAEADContext(const SSLAEADContext &&) = delete; SSLAEADContext &operator=(const SSLAEADContext &&) = delete; // CreateNullCipher creates an |SSLAEADContext| for the null cipher. static UniquePtr CreateNullCipher(); // Create creates an |SSLAEADContext| using the supplied key material. It // returns nullptr on error. Only one of |Open| or |Seal| may be used with the // resulting object, depending on |direction|. |version| is the wire version. static UniquePtr Create(enum evp_aead_direction_t direction, uint16_t version, const SSL_CIPHER *cipher, Span enc_key, Span mac_key, Span fixed_iv); // CreatePlaceholderForQUIC creates a placeholder |SSLAEADContext| for the // given cipher. The resulting object can be queried for various properties // but cannot encrypt or decrypt data. static UniquePtr CreatePlaceholderForQUIC( const SSL_CIPHER *cipher); const SSL_CIPHER *cipher() const { return cipher_; } // is_null_cipher returns true if this is the null cipher. bool is_null_cipher() const { return !cipher_; } // ExplicitNonceLen returns the length of the explicit nonce. size_t ExplicitNonceLen() const; // MaxOverhead returns the maximum overhead of calling |Seal|. size_t MaxOverhead() const; // MaxSealInputLen returns the maximum length for |Seal| that can fit in // |max_out| output bytes, or zero if no input may fit. size_t MaxSealInputLen(size_t max_out) const; // SuffixLen calculates the suffix length written by |SealScatter| and writes // it to |*out_suffix_len|. It returns true on success and false on error. // |in_len| and |extra_in_len| should equal the argument of the same names // passed to |SealScatter|. bool SuffixLen(size_t *out_suffix_len, size_t in_len, size_t extra_in_len) const; // CiphertextLen calculates the total ciphertext length written by // |SealScatter| and writes it to |*out_len|. It returns true on success and // false on error. |in_len| and |extra_in_len| should equal the argument of // the same names passed to |SealScatter|. bool CiphertextLen(size_t *out_len, size_t in_len, size_t extra_in_len) const; // Open authenticates and decrypts |in| in-place. On success, it sets |*out| // to the plaintext in |in| and returns true. Otherwise, it returns // false. The output will always be |ExplicitNonceLen| bytes ahead of |in|. bool Open(Span *out, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, Span in); // Seal encrypts and authenticates |in_len| bytes from |in| and writes the // result to |out|. It returns true on success and false on error. // // If |in| and |out| alias then |out| + |ExplicitNonceLen| must be == |in|. bool Seal(uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, const uint8_t *in, size_t in_len); // SealScatter encrypts and authenticates |in_len| bytes from |in| and splits // the result between |out_prefix|, |out| and |out_suffix|. It returns one on // success and zero on error. // // On successful return, exactly |ExplicitNonceLen| bytes are written to // |out_prefix|, |in_len| bytes to |out|, and |SuffixLen| bytes to // |out_suffix|. // // |extra_in| may point to an additional plaintext buffer. If present, // |extra_in_len| additional bytes are encrypted and authenticated, and the // ciphertext is written to the beginning of |out_suffix|. |SuffixLen| should // be used to size |out_suffix| accordingly. // // If |in| and |out| alias then |out| must be == |in|. Other arguments may not // alias anything. bool SealScatter(uint8_t *out_prefix, uint8_t *out, uint8_t *out_suffix, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len); bool GetIV(const uint8_t **out_iv, size_t *out_iv_len) const; private: // GetAdditionalData returns the additional data, writing into |storage| if // necessary. Span GetAdditionalData(uint8_t storage[13], uint8_t type, uint16_t record_version, uint64_t seqnum, size_t plaintext_len, Span header); const SSL_CIPHER *cipher_; ScopedEVP_AEAD_CTX ctx_; // fixed_nonce_ contains any bytes of the nonce that are fixed for all // records. InplaceVector fixed_nonce_; uint8_t variable_nonce_len_ = 0; // variable_nonce_included_in_record_ is true if the variable nonce // for a record is included as a prefix before the ciphertext. bool variable_nonce_included_in_record_ : 1; // random_variable_nonce_ is true if the variable nonce is // randomly generated, rather than derived from the sequence // number. bool random_variable_nonce_ : 1; // xor_fixed_nonce_ is true if the fixed nonce should be XOR'd into the // variable nonce rather than prepended. bool xor_fixed_nonce_ : 1; // omit_length_in_ad_ is true if the length should be omitted in the // AEAD's ad parameter. bool omit_length_in_ad_ : 1; // ad_is_header_ is true if the AEAD's ad parameter is the record header. bool ad_is_header_ : 1; }; // DTLS replay bitmap. // DTLSReplayBitmap maintains a sliding window of sequence numbers to detect // replayed packets. class DTLSReplayBitmap { public: // ShouldDiscard returns true if |seq_num| has been seen in // |bitmap| or is stale. Otherwise it returns false. bool ShouldDiscard(uint64_t seqnum) const; // Record updates the bitmap to record receipt of sequence number // |seq_num|. It slides the window forward if needed. It is an error to call // this function on a stale sequence number. void Record(uint64_t seqnum); uint64_t max_seq_num() const { return max_seq_num_; } private: // map is a bitset of sequence numbers that have been seen. Bit i corresponds // to |max_seq_num_ - i|. std::bitset<256> map_; // max_seq_num_ is the largest sequence number seen so far as a 64-bit // integer. uint64_t max_seq_num_ = 0; }; // reconstruct_seqnum takes the low order bits of a record sequence number from // the wire and reconstructs the full sequence number. It does so using the // algorithm described in section 4.2.2 of RFC 9147, where |wire_seq| is the // low bits of the sequence number as seen on the wire, |seq_mask| is a bitmask // of 8 or 16 1 bits corresponding to the length of the sequence number on the // wire, and |max_valid_seqnum| is the largest sequence number of a record // successfully deprotected in this epoch. This function returns the sequence // number that is numerically closest to one plus |max_valid_seqnum| that when // bitwise and-ed with |seq_mask| equals |wire_seq|. // // |max_valid_seqnum| must be most 2^48-1, in which case the output will also be // at most 2^48-1. OPENSSL_EXPORT uint64_t reconstruct_seqnum(uint16_t wire_seq, uint64_t seq_mask, uint64_t max_valid_seqnum); // Record layer. class DTLSRecordNumber { public: static constexpr uint64_t kMaxSequence = (uint64_t{1} << 48) - 1; DTLSRecordNumber() = default; DTLSRecordNumber(uint16_t epoch, uint64_t sequence) { BSSL_CHECK(sequence <= kMaxSequence); combined_ = (uint64_t{epoch} << 48) | sequence; } static DTLSRecordNumber FromCombined(uint64_t combined) { return DTLSRecordNumber(combined); } bool operator==(DTLSRecordNumber r) const { return combined() == r.combined(); } bool operator!=(DTLSRecordNumber r) const { return !((*this) == r); } bool operator<(DTLSRecordNumber r) const { return combined() < r.combined(); } uint64_t combined() const { return combined_; } uint16_t epoch() const { return combined_ >> 48; } uint64_t sequence() const { return combined_ & kMaxSequence; } bool HasNext() const { return sequence() < kMaxSequence; } DTLSRecordNumber Next() const { BSSL_CHECK(HasNext()); // This will not overflow into the epoch. return DTLSRecordNumber::FromCombined(combined_ + 1); } private: explicit DTLSRecordNumber(uint64_t combined) : combined_(combined) {} uint64_t combined_ = 0; }; class RecordNumberEncrypter { public: static constexpr bool kAllowUniquePtr = true; static constexpr size_t kMaxKeySize = 32; // Create returns a DTLS 1.3 record number encrypter for |traffic_secret|, or // nullptr on error. static UniquePtr Create( const SSL_CIPHER *cipher, Span traffic_secret); virtual ~RecordNumberEncrypter() = default; virtual size_t KeySize() = 0; virtual bool SetKey(Span key) = 0; virtual bool GenerateMask(Span out, Span sample) = 0; }; struct DTLSReadEpoch { static constexpr bool kAllowUniquePtr = true; // TODO(davidben): This could be made slightly more compact if |bitmap| stored // a DTLSRecordNumber. uint16_t epoch = 0; UniquePtr aead; UniquePtr rn_encrypter; DTLSReplayBitmap bitmap; }; struct DTLSWriteEpoch { static constexpr bool kAllowUniquePtr = true; uint16_t epoch() const { return next_record.epoch(); } DTLSRecordNumber next_record; UniquePtr aead; UniquePtr rn_encrypter; }; // ssl_record_prefix_len returns the length of the prefix before the ciphertext // of a record for |ssl|. // // TODO(davidben): Expose this as part of public API once the high-level // buffer-free APIs are available. size_t ssl_record_prefix_len(const SSL *ssl); enum ssl_open_record_t { ssl_open_record_success, ssl_open_record_discard, ssl_open_record_partial, ssl_open_record_close_notify, ssl_open_record_error, }; // tls_open_record decrypts a record from |in| in-place. // // If the input did not contain a complete record, it returns // |ssl_open_record_partial|. It sets |*out_consumed| to the total number of // bytes necessary. It is guaranteed that a successful call to |tls_open_record| // will consume at least that many bytes. // // Otherwise, it sets |*out_consumed| to the number of bytes of input // consumed. Note that input may be consumed on all return codes if a record was // decrypted. // // On success, it returns |ssl_open_record_success|. It sets |*out_type| to the // record type and |*out| to the record body in |in|. Note that |*out| may be // empty. // // If a record was successfully processed but should be discarded, it returns // |ssl_open_record_discard|. // // If a record was successfully processed but is a close_notify, it returns // |ssl_open_record_close_notify|. // // On failure or fatal alert, it returns |ssl_open_record_error| and sets // |*out_alert| to an alert to emit, or zero if no alert should be emitted. enum ssl_open_record_t tls_open_record(SSL *ssl, uint8_t *out_type, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); // dtls_open_record implements |tls_open_record| for DTLS. It only returns // |ssl_open_record_partial| if |in| was empty and sets |*out_consumed| to // zero. The caller should read one packet and try again. On success, // |*out_number| is set to the record number of the record. enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, DTLSRecordNumber *out_number, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); // ssl_needs_record_splitting returns one if |ssl|'s current outgoing cipher // state needs record-splitting and zero otherwise. bool ssl_needs_record_splitting(const SSL *ssl); // tls_seal_record seals a new record of type |type| and body |in| and writes it // to |out|. At most |max_out| bytes will be written. It returns true on success // and false on error. If enabled, |tls_seal_record| implements TLS 1.0 CBC // 1/n-1 record splitting and may write two records concatenated. // // For a large record, the bulk of the ciphertext will begin // |tls_seal_align_prefix_len| bytes into out. Aligning |out| appropriately may // improve performance. It writes at most |in_len| + |SSL_max_seal_overhead| // bytes to |out|. // // |in| and |out| may not alias. bool tls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, const uint8_t *in, size_t in_len); // dtls_record_header_write_len returns the length of the record header that // will be written at |epoch|. size_t dtls_record_header_write_len(const SSL *ssl, uint16_t epoch); // dtls_max_seal_overhead returns the maximum overhead, in bytes, of sealing a // record. size_t dtls_max_seal_overhead(const SSL *ssl, uint16_t epoch); // dtls_seal_prefix_len returns the number of bytes of prefix to reserve in // front of the plaintext when sealing a record in-place. size_t dtls_seal_prefix_len(const SSL *ssl, uint16_t epoch); // dtls_seal_max_input_len returns the maximum number of input bytes that can // fit in a record of up to |max_out| bytes, or zero if none may fit. size_t dtls_seal_max_input_len(const SSL *ssl, uint16_t epoch, size_t max_out); // dtls_seal_record implements |tls_seal_record| for DTLS. |epoch| selects which // epoch's cipher state to use. Unlike |tls_seal_record|, |in| and |out| may // alias but, if they do, |in| must be exactly |dtls_seal_prefix_len| bytes // ahead of |out|. On success, |*out_number| is set to the record number of the // record. bool dtls_seal_record(SSL *ssl, DTLSRecordNumber *out_number, uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, const uint8_t *in, size_t in_len, uint16_t epoch); // ssl_process_alert processes |in| as an alert and updates |ssl|'s shutdown // state. It returns one of |ssl_open_record_discard|, |ssl_open_record_error|, // |ssl_open_record_close_notify|, or |ssl_open_record_fatal_alert| as // appropriate. enum ssl_open_record_t ssl_process_alert(SSL *ssl, uint8_t *out_alert, Span in); // Private key operations. // ssl_private_key_* perform the corresponding operation on // |SSL_PRIVATE_KEY_METHOD|. If there is a custom private key configured, they // call the corresponding function or |complete| depending on whether there is a // pending operation. Otherwise, they implement the operation with // |EVP_PKEY|. enum ssl_private_key_result_t ssl_private_key_sign( SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, uint16_t sigalg, Span in); enum ssl_private_key_result_t ssl_private_key_decrypt(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, Span in); // ssl_pkey_supports_algorithm returns whether |pkey| may be used to sign // |sigalg|. bool ssl_pkey_supports_algorithm(const SSL *ssl, EVP_PKEY *pkey, uint16_t sigalg, bool is_verify); // ssl_public_key_verify verifies that the |signature| is valid for the public // key |pkey| and input |in|, using the signature algorithm |sigalg|. bool ssl_public_key_verify(SSL *ssl, Span signature, uint16_t sigalg, EVP_PKEY *pkey, Span in); // Key shares. // SSLKeyShare abstracts over KEM-like constructions, for use with TLS 1.2 ECDHE // cipher suites and the TLS 1.3 key_share extension. // // TODO(davidben): This class is named SSLKeyShare after the TLS 1.3 key_share // extension, but it really implements a KEM abstraction. Additionally, we use // the same type for Encap, which is a one-off, stateless operation, as Generate // and Decap. Slightly tidier would be for Generate to return a new SSLKEMKey // (or we introduce EVP_KEM and EVP_KEM_KEY), with a Decap method, and for Encap // to be static function. class SSLKeyShare { public: virtual ~SSLKeyShare() {} static constexpr bool kAllowUniquePtr = true; // Create returns a SSLKeyShare instance for use with group |group_id| or // nullptr on error. static UniquePtr Create(uint16_t group_id); // GroupID returns the group ID. virtual uint16_t GroupID() const = 0; // Generate generates a keypair and writes the public key to |out_public_key|. // It returns true on success and false on error. virtual bool Generate(CBB *out_public_key) = 0; // Encap generates an ephemeral, symmetric secret and encapsulates it with // |peer_key|. On success, it returns true, writes the encapsulated secret to // |out_ciphertext|, and sets |*out_secret| to the shared secret. On failure, // it returns false and sets |*out_alert| to an alert to send to the peer. virtual bool Encap(CBB *out_ciphertext, Array *out_secret, uint8_t *out_alert, Span peer_key) = 0; // Decap decapsulates the symmetric secret in |ciphertext|. On success, it // returns true and sets |*out_secret| to the shared secret. On failure, it // returns false and sets |*out_alert| to an alert to send to the peer. virtual bool Decap(Array *out_secret, uint8_t *out_alert, Span ciphertext) = 0; // SerializePrivateKey writes the private key to |out|, returning true if // successful and false otherwise. It should be called after |Generate|. virtual bool SerializePrivateKey(CBB *out) { return false; } // DeserializePrivateKey initializes the state of the key exchange from |in|, // returning true if successful and false otherwise. virtual bool DeserializePrivateKey(CBS *in) { return false; } }; struct NamedGroup { int nid; uint16_t group_id; const char name[32], alias[32]; }; // NamedGroups returns all supported groups. Span NamedGroups(); // ssl_nid_to_group_id looks up the group corresponding to |nid|. On success, it // sets |*out_group_id| to the group ID and returns true. Otherwise, it returns // false. bool ssl_nid_to_group_id(uint16_t *out_group_id, int nid); // ssl_name_to_group_id looks up the group corresponding to the |name| string of // length |len|. On success, it sets |*out_group_id| to the group ID and returns // true. Otherwise, it returns false. bool ssl_name_to_group_id(uint16_t *out_group_id, const char *name, size_t len); // ssl_group_id_to_nid returns the NID corresponding to |group_id| or // |NID_undef| if unknown. int ssl_group_id_to_nid(uint16_t group_id); // Handshake messages. struct SSLMessage { bool is_v2_hello; uint8_t type; CBS body; // raw is the entire serialized handshake message, including the TLS or DTLS // message header. CBS raw; }; // SSL_MAX_HANDSHAKE_FLIGHT is the number of messages, including // ChangeCipherSpec, in the longest handshake flight. Currently this is the // client's second leg in a full handshake when client certificates, NPN, and // Channel ID, are all enabled. #define SSL_MAX_HANDSHAKE_FLIGHT 7 extern const uint8_t kHelloRetryRequest[SSL3_RANDOM_SIZE]; extern const uint8_t kTLS12DowngradeRandom[8]; extern const uint8_t kTLS13DowngradeRandom[8]; extern const uint8_t kJDK11DowngradeRandom[8]; // ssl_max_handshake_message_len returns the maximum number of bytes permitted // in a handshake message for |ssl|. size_t ssl_max_handshake_message_len(const SSL *ssl); // tls_can_accept_handshake_data returns whether |ssl| is able to accept more // data into handshake buffer. bool tls_can_accept_handshake_data(const SSL *ssl, uint8_t *out_alert); // tls_has_unprocessed_handshake_data returns whether there is buffered // handshake data that has not been consumed by |get_message|. bool tls_has_unprocessed_handshake_data(const SSL *ssl); // tls_append_handshake_data appends |data| to the handshake buffer. It returns // true on success and false on allocation failure. bool tls_append_handshake_data(SSL *ssl, Span data); // dtls_has_unprocessed_handshake_data behaves like // |tls_has_unprocessed_handshake_data| for DTLS. bool dtls_has_unprocessed_handshake_data(const SSL *ssl); // tls_flush_pending_hs_data flushes any handshake plaintext data. bool tls_flush_pending_hs_data(SSL *ssl); // dtls_clear_outgoing_messages releases all buffered outgoing messages. void dtls_clear_outgoing_messages(SSL *ssl); // dtls_clear_unused_write_epochs releases any write epochs that are no longer // needed. void dtls_clear_unused_write_epochs(SSL *ssl); // Callbacks. // ssl_do_info_callback calls |ssl|'s info callback, if set. void ssl_do_info_callback(const SSL *ssl, int type, int value); // ssl_do_msg_callback calls |ssl|'s message callback, if set. void ssl_do_msg_callback(const SSL *ssl, int is_write, int content_type, Span in); // Transport buffers. class SSLBuffer { public: SSLBuffer() {} ~SSLBuffer() { Clear(); } SSLBuffer(const SSLBuffer &) = delete; SSLBuffer &operator=(const SSLBuffer &) = delete; uint8_t *data() { return buf_ + offset_; } size_t size() const { return size_; } bool empty() const { return size_ == 0; } size_t cap() const { return cap_; } Span span() { return Span(data(), size()); } Span remaining() { return Span(data() + size(), cap() - size()); } // Clear releases the buffer. void Clear(); // EnsureCap ensures the buffer has capacity at least |new_cap|, aligned such // that data written after |header_len| is aligned to a // |SSL3_ALIGN_PAYLOAD|-byte boundary. It returns true on success and false // on error. bool EnsureCap(size_t header_len, size_t new_cap); // DidWrite extends the buffer by |len|. The caller must have filled in to // this point. void DidWrite(size_t len); // Consume consumes |len| bytes from the front of the buffer. The memory // consumed will remain valid until the next call to |DiscardConsumed| or // |Clear|. void Consume(size_t len); // DiscardConsumed discards the consumed bytes from the buffer. If the buffer // is now empty, it releases memory used by it. void DiscardConsumed(); private: // buf_ is the memory allocated for this buffer. uint8_t *buf_ = nullptr; // offset_ is the offset into |buf_| which the buffer contents start at. uint16_t offset_ = 0; // size_ is the size of the buffer contents from |buf_| + |offset_|. uint16_t size_ = 0; // cap_ is how much memory beyond |buf_| + |offset_| is available. uint16_t cap_ = 0; // inline_buf_ is a static buffer for short reads. uint8_t inline_buf_[SSL3_RT_HEADER_LENGTH]; }; // ssl_read_buffer_extend_to extends the read buffer to the desired length. For // TLS, it reads to the end of the buffer until the buffer is |len| bytes // long. For DTLS, it reads a new packet and ignores |len|. It returns one on // success, zero on EOF, and a negative number on error. // // It is an error to call |ssl_read_buffer_extend_to| in DTLS when the buffer is // non-empty. int ssl_read_buffer_extend_to(SSL *ssl, size_t len); // ssl_handle_open_record handles the result of passing |ssl->s3->read_buffer| // to a record-processing function. If |ret| is a success or if the caller // should retry, it returns one and sets |*out_retry|. Otherwise, it returns <= // 0. int ssl_handle_open_record(SSL *ssl, bool *out_retry, ssl_open_record_t ret, size_t consumed, uint8_t alert); // ssl_write_buffer_flush flushes the write buffer to the transport. It returns // one on success and <= 0 on error. For DTLS, whether or not the write // succeeds, the write buffer will be cleared. int ssl_write_buffer_flush(SSL *ssl); // Certificate functions. // ssl_parse_cert_chain parses a certificate list from |cbs| in the format used // by a TLS Certificate message. On success, it advances |cbs| and returns // true. Otherwise, it returns false and sets |*out_alert| to an alert to send // to the peer. // // If the list is non-empty then |*out_chain| and |*out_pubkey| will be set to // the certificate chain and the leaf certificate's public key // respectively. Otherwise, both will be set to nullptr. // // If the list is non-empty and |out_leaf_sha256| is non-NULL, it writes the // SHA-256 hash of the leaf to |out_leaf_sha256|. bool ssl_parse_cert_chain(uint8_t *out_alert, UniquePtr *out_chain, UniquePtr *out_pubkey, uint8_t *out_leaf_sha256, CBS *cbs, CRYPTO_BUFFER_POOL *pool); enum ssl_key_usage_t { key_usage_digital_signature = 0, key_usage_encipherment = 2, }; // ssl_cert_check_key_usage parses the DER-encoded, X.509 certificate in |in| // and returns true if doesn't specify a key usage or, if it does, if it // includes |bit|. Otherwise it pushes to the error queue and returns false. OPENSSL_EXPORT bool ssl_cert_check_key_usage(const CBS *in, enum ssl_key_usage_t bit); // ssl_cert_extract_issuer parses the DER-encoded, X.509 certificate in |in| // and extracts the issuer. On success it returns true and the DER encoded // issuer is in |out_dn|, otherwise it returns false. OPENSSL_EXPORT bool ssl_cert_extract_issuer(const CBS *in, CBS *out_dn); // ssl_cert_matches_issuer parses the DER-encoded, X.509 certificate in |in| // and returns true if its issuer is an exact match for the DER encoded // distinguished name in |dn| bool ssl_cert_matches_issuer(const CBS *in, const CBS *dn); // ssl_cert_parse_pubkey extracts the public key from the DER-encoded, X.509 // certificate in |in|. It returns an allocated |EVP_PKEY| or else returns // nullptr and pushes to the error queue. UniquePtr ssl_cert_parse_pubkey(const CBS *in); // SSL_parse_CA_list parses a CA list from |cbs| in the format used by a TLS // CertificateRequest message and Certificate Authorities extension. On success, // it returns a newly-allocated |CRYPTO_BUFFER| list and advances // |cbs|. Otherwise, it returns nullptr and sets |*out_alert| to an alert to // send to the peer. UniquePtr SSL_parse_CA_list(SSL *ssl, uint8_t *out_alert, CBS *cbs); // ssl_has_client_CAs returns whether there are configured CAs. bool ssl_has_client_CAs(const SSL_CONFIG *cfg); // ssl_add_client_CA_list adds the configured CA list to |cbb| in the format // used by a TLS CertificateRequest message. It returns true on success and // false on error. bool ssl_add_client_CA_list(const SSL_HANDSHAKE *hs, CBB *cbb); // ssl_has_CA_names returns whether there are configured CA names. bool ssl_has_CA_names(const SSL_CONFIG *cfg); // ssl_add_CA_names adds the configured CA_names list to |cbb| in the format // used by a TLS Certificate Authorities extension. It returns true on success // and false on error. bool ssl_add_CA_names(const SSL_HANDSHAKE *hs, CBB *cbb); // ssl_check_leaf_certificate returns one if |pkey| and |leaf| are suitable as // a server's leaf certificate for |hs|. Otherwise, it returns zero and pushes // an error on the error queue. bool ssl_check_leaf_certificate(SSL_HANDSHAKE *hs, EVP_PKEY *pkey, const CRYPTO_BUFFER *leaf); // TLS 1.3 key derivation. // tls13_init_key_schedule initializes the handshake hash and key derivation // state, and incorporates the PSK. The cipher suite and PRF hash must have been // selected at this point. It returns true on success and false on error. bool tls13_init_key_schedule(SSL_HANDSHAKE *hs, Span psk); // tls13_init_early_key_schedule initializes the handshake hash and key // derivation state from |session| for use with 0-RTT. It returns one on success // and zero on error. bool tls13_init_early_key_schedule(SSL_HANDSHAKE *hs, const SSL_SESSION *session); // tls13_advance_key_schedule incorporates |in| into the key schedule with // HKDF-Extract. It returns true on success and false on error. bool tls13_advance_key_schedule(SSL_HANDSHAKE *hs, Span in); // tls13_set_traffic_key sets the read or write traffic keys to // |traffic_secret|. The version and cipher suite are determined from |session|. // It returns true on success and false on error. bool tls13_set_traffic_key(SSL *ssl, enum ssl_encryption_level_t level, enum evp_aead_direction_t direction, const SSL_SESSION *session, Span traffic_secret); // tls13_derive_early_secret derives the early traffic secret. It returns true // on success and false on error. bool tls13_derive_early_secret(SSL_HANDSHAKE *hs); // tls13_derive_handshake_secrets derives the handshake traffic secret. It // returns true on success and false on error. bool tls13_derive_handshake_secrets(SSL_HANDSHAKE *hs); // tls13_rotate_traffic_key derives the next read or write traffic secret. It // returns true on success and false on error. bool tls13_rotate_traffic_key(SSL *ssl, enum evp_aead_direction_t direction); // tls13_derive_application_secrets derives the initial application data traffic // and exporter secrets based on the handshake transcripts and |master_secret|. // It returns true on success and false on error. bool tls13_derive_application_secrets(SSL_HANDSHAKE *hs); // tls13_derive_resumption_secret derives the |resumption_secret|. bool tls13_derive_resumption_secret(SSL_HANDSHAKE *hs); // tls13_export_keying_material provides an exporter interface to use the // |exporter_secret|. bool tls13_export_keying_material(SSL *ssl, Span out, Span secret, std::string_view label, Span context); // tls13_finished_mac calculates the MAC of the handshake transcript to verify // the integrity of the Finished message, and stores the result in |out| and // length in |out_len|. |is_server| is true if this is for the Server Finished // and false for the Client Finished. bool tls13_finished_mac(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, bool is_server); // tls13_derive_session_psk calculates the PSK for this session based on the // resumption master secret and |nonce|. It returns true on success, and false // on failure. bool tls13_derive_session_psk(SSL_SESSION *session, Span nonce, bool is_dtls); // tls13_write_psk_binder calculates the PSK binder value over |transcript| and // |msg|, and replaces the last bytes of |msg| with the resulting value. It // returns true on success, and false on failure. If |out_binder_len| is // non-NULL, it sets |*out_binder_len| to the length of the value computed. bool tls13_write_psk_binder(const SSL_HANDSHAKE *hs, const SSLTranscript &transcript, Span msg, size_t *out_binder_len); // tls13_verify_psk_binder verifies that the handshake transcript, truncated up // to the binders has a valid signature using the value of |session|'s // resumption secret. It returns true on success, and false on failure. bool tls13_verify_psk_binder(const SSL_HANDSHAKE *hs, const SSL_SESSION *session, const SSLMessage &msg, CBS *binders); // Encrypted ClientHello. struct ECHConfig { static constexpr bool kAllowUniquePtr = true; // raw contains the serialized ECHConfig. Array raw; // The following fields alias into |raw|. Span public_key; Span public_name; Span cipher_suites; uint16_t kem_id = 0; uint8_t maximum_name_length = 0; uint8_t config_id = 0; }; class ECHServerConfig { public: static constexpr bool kAllowUniquePtr = true; ECHServerConfig() = default; ECHServerConfig(const ECHServerConfig &other) = delete; ECHServerConfig &operator=(ECHServerConfig &&) = delete; // Init parses |ech_config| as an ECHConfig and saves a copy of |key|. // It returns true on success and false on error. bool Init(Span ech_config, const EVP_HPKE_KEY *key, bool is_retry_config); // SetupContext sets up |ctx| for a new connection, given the specified // HPKE ciphersuite and encapsulated KEM key. It returns true on success and // false on error. This function may only be called on an initialized object. bool SetupContext(EVP_HPKE_CTX *ctx, uint16_t kdf_id, uint16_t aead_id, Span enc) const; const ECHConfig &ech_config() const { return ech_config_; } bool is_retry_config() const { return is_retry_config_; } private: ECHConfig ech_config_; ScopedEVP_HPKE_KEY key_; bool is_retry_config_ = false; }; enum ssl_client_hello_type_t { ssl_client_hello_unencrypted, ssl_client_hello_inner, ssl_client_hello_outer, }; // ECH_CLIENT_* are types for the ClientHello encrypted_client_hello extension. #define ECH_CLIENT_OUTER 0 #define ECH_CLIENT_INNER 1 // ssl_decode_client_hello_inner recovers the full ClientHelloInner from the // EncodedClientHelloInner |encoded_client_hello_inner| by replacing its // outer_extensions extension with the referenced extensions from the // ClientHelloOuter |client_hello_outer|. If successful, it writes the recovered // ClientHelloInner to |out_client_hello_inner|. It returns true on success and // false on failure. // // This function is exported for fuzzing. OPENSSL_EXPORT bool ssl_decode_client_hello_inner( SSL *ssl, uint8_t *out_alert, Array *out_client_hello_inner, Span encoded_client_hello_inner, const SSL_CLIENT_HELLO *client_hello_outer); // ssl_client_hello_decrypt attempts to decrypt and decode the |payload|. It // writes the result to |*out|. |payload| must point into |client_hello_outer|. // It returns true on success and false on error. On error, it sets // |*out_is_decrypt_error| to whether the failure was due to a bad ciphertext. bool ssl_client_hello_decrypt(SSL_HANDSHAKE *hs, uint8_t *out_alert, bool *out_is_decrypt_error, Array *out, const SSL_CLIENT_HELLO *client_hello_outer, Span payload); #define ECH_CONFIRMATION_SIGNAL_LEN 8 // ssl_ech_confirmation_signal_hello_offset returns the offset of the ECH // confirmation signal in a ServerHello message, including the handshake header. size_t ssl_ech_confirmation_signal_hello_offset(const SSL *ssl); // ssl_ech_accept_confirmation computes the server's ECH acceptance signal, // writing it to |out|. The transcript portion is the concatenation of // |transcript| with |msg|. The |ECH_CONFIRMATION_SIGNAL_LEN| bytes from // |offset| in |msg| are replaced with zeros before hashing. This function // returns true on success, and false on failure. bool ssl_ech_accept_confirmation(const SSL_HANDSHAKE *hs, Span out, Span client_random, const SSLTranscript &transcript, bool is_hrr, Span msg, size_t offset); // ssl_is_valid_ech_public_name returns true if |public_name| is a valid ECH // public name and false otherwise. It is exported for testing. OPENSSL_EXPORT bool ssl_is_valid_ech_public_name( Span public_name); // ssl_is_valid_ech_config_list returns true if |ech_config_list| is a valid // ECHConfigList structure and false otherwise. bool ssl_is_valid_ech_config_list(Span ech_config_list); // ssl_select_ech_config selects an ECHConfig and associated parameters to offer // on the client and updates |hs|. It returns true on success, whether an // ECHConfig was found or not, and false on internal error. On success, the // encapsulated key is written to |out_enc| and |*out_enc_len| is set to the // number of bytes written. If the function did not select an ECHConfig, the // encapsulated key is the empty string. bool ssl_select_ech_config(SSL_HANDSHAKE *hs, Span out_enc, size_t *out_enc_len); // ssl_ech_extension_body_length returns the length of the body of a ClientHello // ECH extension that encrypts |in_len| bytes with |aead| and an 'enc' value of // length |enc_len|. The result does not include the four-byte extension header. size_t ssl_ech_extension_body_length(const EVP_HPKE_AEAD *aead, size_t enc_len, size_t in_len); // ssl_encrypt_client_hello constructs a new ClientHelloInner, adds it to the // inner transcript, and encrypts for inclusion in the ClientHelloOuter. |enc| // is the encapsulated key to include in the extension. It returns true on // success and false on error. If not offering ECH, |enc| is ignored and the // function will compute a GREASE ECH extension if necessary, and otherwise // return success while doing nothing. // // Encrypting the ClientHelloInner incorporates all extensions in the // ClientHelloOuter, so all other state necessary for |ssl_add_client_hello| // must already be computed. bool ssl_encrypt_client_hello(SSL_HANDSHAKE *hs, Span enc); // Credentials. enum class SSLCredentialType { kX509, kDelegated, }; BSSL_NAMESPACE_END // SSL_CREDENTIAL is exported to C, so it must be defined outside the namespace. struct ssl_credential_st : public bssl::RefCounted { explicit ssl_credential_st(bssl::SSLCredentialType type); ssl_credential_st(const ssl_credential_st &) = delete; ssl_credential_st &operator=(const ssl_credential_st &) = delete; // Dup returns a copy of the credential, or nullptr on error. The |ex_data| // values are not copied. This is only used on the legacy credential, whose // |ex_data| is inaccessible. bssl::UniquePtr Dup() const; // ClearCertAndKey erases any certificate and private key on the credential. void ClearCertAndKey(); // UsesX509 returns true if the credential type uses an X.509 certificate. bool UsesX509() const; // UsesPrivateKey returns true if the credential type uses an asymmetric // private key. bool UsesPrivateKey() const; // IsComplete returns whether all required fields in the credential have been // filled in. bool IsComplete() const; // SetLeafCert sets the leaf certificate to |leaf|, leaving the remaining // certificates unmodified. It returns true on success and false on error. If // |discard_key_on_mismatch| is true and the private key is inconsistent with // the new leaf certificate, it is silently discarded. bool SetLeafCert(bssl::UniquePtr leaf, bool discard_key_on_mismatch); // ClearIntermediateCerts clears intermediate certificates in the certificate // chain, while preserving the leaf. void ClearIntermediateCerts(); // AppendIntermediateCert appends |cert| to the certificate chain. If there is // no leaf certificate configured, it leaves a placeholder null in |chain|. It // returns one on success and zero on error. bool AppendIntermediateCert(bssl::UniquePtr cert); // ChainContainsIssuer returns true if |dn| is a byte for byte match with the // issuer of any certificate in |chain|, false otherwise. bool ChainContainsIssuer(bssl::Span dn) const; // type is the credential type and determines which other fields apply. bssl::SSLCredentialType type; // pubkey is the cached public key of the credential. Unlike |privkey|, it is // always present and is extracted from the certificate, delegated credential, // etc. bssl::UniquePtr pubkey; // privkey is the private key of the credential. It may be omitted in favor of // |key_method|. bssl::UniquePtr privkey; // key_method, if non-null, is a set of callbacks to call for private key // operations. const SSL_PRIVATE_KEY_METHOD *key_method = nullptr; // sigalgs, if non-empty, is the set of signature algorithms supported by the // private key in decreasing order of preference. If empty, the default list // is used. // // In delegated credentials, this field is not configurable and is instead // computed from the dc_cert_verify_algorithm field. bssl::Array sigalgs; // chain contains the certificate chain, with the leaf at the beginning. The // first element of |chain| may be nullptr to indicate that the leaf // certificate has not yet been set. // If |chain| != nullptr -> len(chain) >= 1 // If |chain[0]| == nullptr -> len(chain) >= 2. // |chain[1..]| != nullptr bssl::UniquePtr chain; // dc is the DelegatedCredential structure, if this is a delegated credential. bssl::UniquePtr dc; // dc_algorithm is the signature scheme of the signature over the delegated // credential itself, made by the end-entity certificate's public key. uint16_t dc_algorithm = 0; // Signed certificate timestamp list to be sent to the client, if requested bssl::UniquePtr signed_cert_timestamp_list; // OCSP response to be sent to the client, if requested. bssl::UniquePtr ocsp_response; CRYPTO_EX_DATA ex_data; // must_match_issuer is a flag indicating that this credential should be // considered only when it matches a peer request for a particular issuer via // a negotiation mechanism (such as the certificate_authorities extension). bool must_match_issuer = false; private: friend RefCounted; ~ssl_credential_st(); }; BSSL_NAMESPACE_BEGIN // ssl_get_credential_list computes |hs|'s credential list. On success, it // writes it to |*out| and returns true. Otherwise, it returns false. The // credential list may be empty, in which case this function will successfully // return an empty array. // // The pointers in the result are only valid until |hs| is next mutated. bool ssl_get_credential_list(SSL_HANDSHAKE *hs, Array *out); // ssl_credential_matches_requested_issuers returns true if |cred| is a // usable match for any requested issuers in |hs|, and false with an error // otherwise. bool ssl_credential_matches_requested_issuers(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred); // Handshake functions. enum ssl_hs_wait_t { ssl_hs_error, ssl_hs_ok, ssl_hs_read_server_hello, ssl_hs_read_message, ssl_hs_flush, ssl_hs_certificate_selection_pending, ssl_hs_handoff, ssl_hs_handback, ssl_hs_x509_lookup, ssl_hs_private_key_operation, ssl_hs_pending_session, ssl_hs_pending_ticket, ssl_hs_early_return, ssl_hs_early_data_rejected, ssl_hs_read_end_of_early_data, ssl_hs_read_change_cipher_spec, ssl_hs_certificate_verify, ssl_hs_hints_ready, }; enum ssl_grease_index_t { ssl_grease_cipher = 0, ssl_grease_group, ssl_grease_extension1, ssl_grease_extension2, ssl_grease_version, ssl_grease_ticket_extension, ssl_grease_ech_config_id, ssl_grease_last_index = ssl_grease_ech_config_id, }; enum tls12_server_hs_state_t { state12_start_accept = 0, state12_read_client_hello, state12_read_client_hello_after_ech, state12_cert_callback, state12_tls13, state12_select_parameters, state12_send_server_hello, state12_send_server_certificate, state12_send_server_key_exchange, state12_send_server_hello_done, state12_read_client_certificate, state12_verify_client_certificate, state12_read_client_key_exchange, state12_read_client_certificate_verify, state12_read_change_cipher_spec, state12_process_change_cipher_spec, state12_read_next_proto, state12_read_channel_id, state12_read_client_finished, state12_send_server_finished, state12_finish_server_handshake, state12_done, }; enum tls13_server_hs_state_t { state13_select_parameters = 0, state13_select_session, state13_send_hello_retry_request, state13_read_second_client_hello, state13_send_server_hello, state13_send_server_certificate_verify, state13_send_server_finished, state13_send_half_rtt_ticket, state13_read_second_client_flight, state13_process_end_of_early_data, state13_read_client_encrypted_extensions, state13_read_client_certificate, state13_read_client_certificate_verify, state13_read_channel_id, state13_read_client_finished, state13_send_new_session_ticket, state13_done, }; // handback_t lists the points in the state machine where a handback can occur. // These are the different points at which key material is no longer needed. enum handback_t { handback_after_session_resumption = 0, handback_after_ecdhe = 1, handback_after_handshake = 2, handback_tls13 = 3, handback_max_value = handback_tls13, }; // SSL_HANDSHAKE_HINTS contains handshake hints for a connection. See // |SSL_request_handshake_hints| and related functions. struct SSL_HANDSHAKE_HINTS { static constexpr bool kAllowUniquePtr = true; Array server_random_tls12; Array server_random_tls13; uint16_t key_share_group_id = 0; Array key_share_ciphertext; Array key_share_secret; uint16_t signature_algorithm = 0; Array signature_input; Array signature_spki; Array signature; Array decrypted_psk; bool ignore_psk = false; uint16_t cert_compression_alg_id = 0; Array cert_compression_input; Array cert_compression_output; uint16_t ecdhe_group_id = 0; Array ecdhe_public_key; Array ecdhe_private_key; Array decrypted_ticket; bool renew_ticket = false; bool ignore_ticket = false; }; struct SSL_HANDSHAKE { explicit SSL_HANDSHAKE(SSL *ssl); ~SSL_HANDSHAKE(); static constexpr bool kAllowUniquePtr = true; // ssl is a non-owning pointer to the parent |SSL| object. SSL *ssl; // config is a non-owning pointer to the handshake configuration. SSL_CONFIG *config; // wait contains the operation the handshake is currently blocking on or // |ssl_hs_ok| if none. enum ssl_hs_wait_t wait = ssl_hs_ok; // state is the internal state for the TLS 1.2 and below handshake. Its // values depend on |do_handshake| but the starting state is always zero. int state = 0; // tls13_state is the internal state for the TLS 1.3 handshake. Its values // depend on |do_handshake| but the starting state is always zero. int tls13_state = 0; // min_version is the minimum accepted protocol version, taking account both // |SSL_OP_NO_*| and |SSL_CTX_set_min_proto_version| APIs. uint16_t min_version = 0; // max_version is the maximum accepted protocol version, taking account both // |SSL_OP_NO_*| and |SSL_CTX_set_max_proto_version| APIs. uint16_t max_version = 0; InplaceVector secret; InplaceVector early_traffic_secret; InplaceVector client_handshake_secret; InplaceVector server_handshake_secret; InplaceVector client_traffic_secret_0; InplaceVector server_traffic_secret_0; InplaceVector expected_client_finished; // GetClientHello, on the server, returns either the normal ClientHello // message or the ClientHelloInner if it has been serialized to // |ech_client_hello_buf|. This function should only be called when the // current message is a ClientHello. It returns true on success and false on // error. // // Note that fields of the returned |out_msg| and |out_client_hello| point // into a handshake-owned buffer, so their lifetimes should not exceed this // SSL_HANDSHAKE. bool GetClientHello(SSLMessage *out_msg, SSL_CLIENT_HELLO *out_client_hello); union { // sent is a bitset where the bits correspond to elements of kExtensions // in extensions.cc. Each bit is set if that extension was sent in a // ClientHello. It's not used by servers. uint32_t sent = 0; // received is a bitset, like |sent|, but is used by servers to record // which extensions were received from a client. uint32_t received; } extensions; // inner_extensions_sent, on clients that offer ECH, is |extensions.sent| for // the ClientHelloInner. uint32_t inner_extensions_sent = 0; // error, if |wait| is |ssl_hs_error|, is the error the handshake failed on. UniquePtr error; // key_shares are the current key exchange instances. The second is only used // as a client if we believe that we should offer two key shares in a // ClientHello. UniquePtr key_shares[2]; // transcript is the current handshake transcript. SSLTranscript transcript; // inner_transcript, on the client, is the handshake transcript for the // ClientHelloInner handshake. It is moved to |transcript| if the server // accepts ECH. SSLTranscript inner_transcript; // inner_client_random is the ClientHello random value used with // ClientHelloInner. uint8_t inner_client_random[SSL3_RANDOM_SIZE] = {0}; // cookie is the value of the cookie in HelloRetryRequest, or empty if none // was received. Array cookie; // dtls_cookie is the value of the cookie in DTLS HelloVerifyRequest. If // empty, either none was received or HelloVerifyRequest contained an empty // cookie. Check the received_hello_verify_request field to distinguish an // empty cookie from no HelloVerifyRequest message being received. Array dtls_cookie; // ech_client_outer contains the outer ECH extension to send in the // ClientHello, excluding the header and type byte. Array ech_client_outer; // ech_retry_configs, on the client, contains the retry configs from the // server as a serialized ECHConfigList. Array ech_retry_configs; // ech_client_hello_buf, on the server, contains the bytes of the // reconstructed ClientHelloInner message. Array ech_client_hello_buf; // key_share_bytes is the key_share extension that the client should send. Array key_share_bytes; // key_share_ciphertext, for servers, is encapsulated shared secret to be sent // to the client in the TLS 1.3 key_share extension. Array key_share_ciphertext; // peer_sigalgs are the signature algorithms that the peer supports. These are // taken from the contents of the signature algorithms extension for a server // or from the CertificateRequest for a client. Array peer_sigalgs; // peer_supported_group_list contains the supported group IDs advertised by // the peer. This is only set on the server's end. The server does not // advertise this extension to the client. Array peer_supported_group_list; // peer_delegated_credential_sigalgs are the signature algorithms the peer // supports with delegated credentials, or empty if the peer does not support // delegated credentials. Array peer_delegated_credential_sigalgs; // peer_key is the peer's ECDH key for a TLS 1.2 client. Array peer_key; // extension_permutation is the permutation to apply to ClientHello // extensions. It maps indices into the |kExtensions| table into other // indices. Array extension_permutation; // cert_compression_alg_id, for a server, contains the negotiated certificate // compression algorithm for this client. It is only valid if // |cert_compression_negotiated| is true. uint16_t cert_compression_alg_id; // ech_hpke_ctx is the HPKE context used in ECH. On the server, it is // initialized if |ech_status| is |ssl_ech_accepted|. On the client, it is // initialized if |selected_ech_config| is not nullptr. ScopedEVP_HPKE_CTX ech_hpke_ctx; // server_params, in a TLS 1.2 server, stores the ServerKeyExchange // parameters. It has client and server randoms prepended for signing // convenience. Array server_params; // peer_psk_identity_hint, on the client, is the psk_identity_hint sent by the // server when using a TLS 1.2 PSK key exchange. UniquePtr peer_psk_identity_hint; // ca_names contains the list of CAs received via the Certificate Authorities // extension in our peer's CertificateRequest or ClientHello message UniquePtr ca_names; // cached_x509_ca_names contains a cache of parsed versions of the elements of // |ca_names|. This pointer is left non-owning so only // |ssl_crypto_x509_method| needs to link against crypto/x509. STACK_OF(X509_NAME) *cached_x509_ca_names = nullptr; // certificate_types, on the client, contains the set of certificate types // received in a CertificateRequest message. Array certificate_types; // credential is the credential we are using for the handshake. UniquePtr credential; // peer_pubkey is the public key parsed from the peer's leaf certificate. UniquePtr peer_pubkey; // new_session is the new mutable session being established by the current // handshake. It should not be cached. UniquePtr new_session; // early_session is the session corresponding to the current 0-RTT state on // the client if |in_early_data| is true. UniquePtr early_session; // ssl_ech_keys, for servers, is the set of ECH keys to use with this // handshake. This is copied from |SSL_CTX| to ensure consistent behavior as // |SSL_CTX| rotates keys. UniquePtr ech_keys; // selected_ech_config, for clients, is the ECHConfig the client uses to offer // ECH, or nullptr if ECH is not being offered. If non-NULL, |ech_hpke_ctx| // will be initialized. UniquePtr selected_ech_config; // new_cipher is the cipher being negotiated in this handshake. const SSL_CIPHER *new_cipher = nullptr; // key_block is the record-layer key block for TLS 1.2 and earlier. Array key_block; // hints contains the handshake hints for this connection. If // |hints_requested| is true, this field is non-null and contains the pending // hints to filled as the predicted handshake progresses. Otherwise, this // field, if non-null, contains hints configured by the caller and will // influence the handshake on match. UniquePtr hints; // ech_is_inner, on the server, indicates whether the ClientHello contained an // inner ECH extension. bool ech_is_inner : 1; // ech_authenticated_reject, on the client, indicates whether an ECH rejection // handshake has been authenticated. bool ech_authenticated_reject : 1; // scts_requested is true if the SCT extension is in the ClientHello. bool scts_requested : 1; // handshake_finalized is true once the handshake has completed, at which // point accessors should use the established state. bool handshake_finalized : 1; // accept_psk_mode stores whether the client's PSK mode is compatible with our // preferences. bool accept_psk_mode : 1; // cert_request is true if a client certificate was requested. bool cert_request : 1; // certificate_status_expected is true if OCSP stapling was negotiated and the // server is expected to send a CertificateStatus message. (This is used on // both the client and server sides.) bool certificate_status_expected : 1; // ocsp_stapling_requested is true if a client requested OCSP stapling. bool ocsp_stapling_requested : 1; // should_ack_sni is used by a server and indicates that the SNI extension // should be echoed in the ServerHello. bool should_ack_sni : 1; // in_false_start is true if there is a pending client handshake in False // Start. The client may write data at this point. bool in_false_start : 1; // in_early_data is true if there is a pending handshake that has progressed // enough to send and receive early data. bool in_early_data : 1; // early_data_offered is true if the client sent the early_data extension. bool early_data_offered : 1; // can_early_read is true if application data may be read at this point in the // handshake. bool can_early_read : 1; // can_early_write is true if application data may be written at this point in // the handshake. bool can_early_write : 1; // is_early_version is true if the protocol version configured is not // necessarily the final version and is just the predicted 0-RTT version. bool is_early_version : 1; // next_proto_neg_seen is one of NPN was negotiated. bool next_proto_neg_seen : 1; // ticket_expected is true if a TLS 1.2 NewSessionTicket message is to be sent // or received. bool ticket_expected : 1; // extended_master_secret is true if the extended master secret extension is // negotiated in this handshake. bool extended_master_secret : 1; // pending_private_key_op is true if there is a pending private key operation // in progress. bool pending_private_key_op : 1; // handback indicates that a server should pause the handshake after // finishing operations that require private key material, in such a way that // |SSL_get_error| returns |SSL_ERROR_HANDBACK|. It is set by // |SSL_apply_handoff|. bool handback : 1; // hints_requested indicates the caller has requested handshake hints. Only // the first round-trip of the handshake will complete, after which the // |hints| structure can be serialized. bool hints_requested : 1; // cert_compression_negotiated is true iff |cert_compression_alg_id| is valid. bool cert_compression_negotiated : 1; // apply_jdk11_workaround is true if the peer is probably a JDK 11 client // which implemented TLS 1.3 incorrectly. bool apply_jdk11_workaround : 1; // can_release_private_key is true if the private key will no longer be used // in this handshake. bool can_release_private_key : 1; // channel_id_negotiated is true if Channel ID should be used in this // handshake. bool channel_id_negotiated : 1; // received_hello_verify_request is true if we received a HelloVerifyRequest // message from the server. bool received_hello_verify_request : 1; // client_version is the value sent or received in the ClientHello version. uint16_t client_version = 0; // early_data_read is the amount of early data that has been read by the // record layer. uint16_t early_data_read = 0; // early_data_written is the amount of early data that has been written by the // record layer. uint16_t early_data_written = 0; // signature_algorithm is the signature algorithm to be used in signing with // the selected credential, or zero if not applicable or not yet selected. uint16_t signature_algorithm = 0; // ech_config_id is the ECH config sent by the client. uint8_t ech_config_id = 0; // session_id is the session ID in the ClientHello. InplaceVector session_id; // grease_seed is the entropy for GREASE values. uint8_t grease_seed[ssl_grease_last_index + 1] = {0}; }; // kMaxTickets is the maximum number of tickets to send immediately after the // handshake. We use a one-byte ticket nonce, and there is no point in sending // so many tickets. constexpr size_t kMaxTickets = 16; UniquePtr ssl_handshake_new(SSL *ssl); // ssl_check_message_type checks if |msg| has type |type|. If so it returns // one. Otherwise, it sends an alert and returns zero. bool ssl_check_message_type(SSL *ssl, const SSLMessage &msg, int type); // ssl_run_handshake runs the TLS handshake. It returns one on success and <= 0 // on error. It sets |out_early_return| to one if we've completed the handshake // early. int ssl_run_handshake(SSL_HANDSHAKE *hs, bool *out_early_return); // The following are implementations of |do_handshake| for the client and // server. enum ssl_hs_wait_t ssl_client_handshake(SSL_HANDSHAKE *hs); enum ssl_hs_wait_t ssl_server_handshake(SSL_HANDSHAKE *hs); enum ssl_hs_wait_t tls13_client_handshake(SSL_HANDSHAKE *hs); enum ssl_hs_wait_t tls13_server_handshake(SSL_HANDSHAKE *hs); // The following functions return human-readable representations of the TLS // handshake states for debugging. const char *ssl_client_handshake_state(SSL_HANDSHAKE *hs); const char *ssl_server_handshake_state(SSL_HANDSHAKE *hs); const char *tls13_client_handshake_state(SSL_HANDSHAKE *hs); const char *tls13_server_handshake_state(SSL_HANDSHAKE *hs); // tls13_add_key_update queues a KeyUpdate message on |ssl|. |request_type| must // be one of |SSL_KEY_UPDATE_REQUESTED| or |SSL_KEY_UPDATE_NOT_REQUESTED|. bool tls13_add_key_update(SSL *ssl, int request_type); // tls13_post_handshake processes a post-handshake message. It returns true on // success and false on failure. bool tls13_post_handshake(SSL *ssl, const SSLMessage &msg); bool tls13_process_certificate(SSL_HANDSHAKE *hs, const SSLMessage &msg, bool allow_anonymous); bool tls13_process_certificate_verify(SSL_HANDSHAKE *hs, const SSLMessage &msg); // tls13_process_finished processes |msg| as a Finished message from the // peer. If |use_saved_value| is true, the verify_data is compared against // |hs->expected_client_finished| rather than computed fresh. bool tls13_process_finished(SSL_HANDSHAKE *hs, const SSLMessage &msg, bool use_saved_value); bool tls13_add_certificate(SSL_HANDSHAKE *hs); // tls13_add_certificate_verify adds a TLS 1.3 CertificateVerify message to the // handshake. If it returns |ssl_private_key_retry|, it should be called again // to retry when the signing operation is completed. enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs); bool tls13_add_finished(SSL_HANDSHAKE *hs); bool tls13_process_new_session_ticket(SSL *ssl, const SSLMessage &msg); bssl::UniquePtr tls13_create_session_with_ticket(SSL *ssl, CBS *body); // ssl_setup_extension_permutation computes a ClientHello extension permutation // for |hs|, if applicable. It returns true on success and false on error. bool ssl_setup_extension_permutation(SSL_HANDSHAKE *hs); // ssl_setup_key_shares computes client key shares and saves them in |hs|. It // returns true on success and false on failure. If |override_group_id| is zero, // it offers the default groups, including GREASE. If it is non-zero, it offers // a single key share of the specified group. bool ssl_setup_key_shares(SSL_HANDSHAKE *hs, uint16_t override_group_id); bool ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, Array *out_secret, uint8_t *out_alert, CBS *contents); bool ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, bool *out_found, Span *out_peer_key, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello); bool ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); bool ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents); bool ssl_ext_pre_shared_key_parse_clienthello( SSL_HANDSHAKE *hs, CBS *out_ticket, CBS *out_binders, uint32_t *out_obfuscated_ticket_age, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello, CBS *contents); bool ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); // ssl_is_sct_list_valid does a shallow parse of the SCT list in |contents| and // returns whether it's valid. bool ssl_is_sct_list_valid(const CBS *contents); // ssl_write_client_hello_without_extensions writes a ClientHello to |out|, // up to the extensions field. |type| determines the type of ClientHello to // write. If |omit_session_id| is true, the session ID is empty. bool ssl_write_client_hello_without_extensions(const SSL_HANDSHAKE *hs, CBB *cbb, ssl_client_hello_type_t type, bool empty_session_id); // ssl_add_client_hello constructs a ClientHello and adds it to the outgoing // flight. It returns true on success and false on error. bool ssl_add_client_hello(SSL_HANDSHAKE *hs); struct ParsedServerHello { CBS raw; uint16_t legacy_version = 0; CBS random; CBS session_id; uint16_t cipher_suite = 0; uint8_t compression_method = 0; CBS extensions; }; // ssl_parse_server_hello parses |msg| as a ServerHello. On success, it writes // the result to |*out| and returns true. Otherwise, it returns false and sets // |*out_alert| to an alert to send to the peer. bool ssl_parse_server_hello(ParsedServerHello *out, uint8_t *out_alert, const SSLMessage &msg); enum ssl_cert_verify_context_t { ssl_cert_verify_server, ssl_cert_verify_client, ssl_cert_verify_channel_id, }; // tls13_get_cert_verify_signature_input generates the message to be signed for // TLS 1.3's CertificateVerify message. |cert_verify_context| determines the // type of signature. It sets |*out| to a newly allocated buffer containing the // result. This function returns true on success and false on failure. bool tls13_get_cert_verify_signature_input( SSL_HANDSHAKE *hs, Array *out, enum ssl_cert_verify_context_t cert_verify_context); // ssl_is_valid_alpn_list returns whether |in| is a valid ALPN protocol list. bool ssl_is_valid_alpn_list(Span in); // ssl_is_alpn_protocol_allowed returns whether |protocol| is a valid server // selection for |hs->ssl|'s client preferences. bool ssl_is_alpn_protocol_allowed(const SSL_HANDSHAKE *hs, Span protocol); // ssl_alpn_list_contains_protocol returns whether |list|, a serialized ALPN // protocol list, contains |protocol|. bool ssl_alpn_list_contains_protocol(Span list, Span protocol); // ssl_negotiate_alpn negotiates the ALPN extension, if applicable. It returns // true on successful negotiation or if nothing was negotiated. It returns false // and sets |*out_alert| to an alert on error. bool ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello); // ssl_get_local_application_settings looks up the configured ALPS value for // |protocol|. If found, it sets |*out_settings| to the value and returns true. // Otherwise, it returns false. bool ssl_get_local_application_settings(const SSL_HANDSHAKE *hs, Span *out_settings, Span protocol); // ssl_negotiate_alps negotiates the ALPS extension, if applicable. It returns // true on successful negotiation or if nothing was negotiated. It returns false // and sets |*out_alert| to an alert on error. bool ssl_negotiate_alps(SSL_HANDSHAKE *hs, uint8_t *out_alert, const SSL_CLIENT_HELLO *client_hello); struct SSLExtension { SSLExtension(uint16_t type_arg, bool allowed_arg = true) : type(type_arg), allowed(allowed_arg), present(false) { CBS_init(&data, nullptr, 0); } uint16_t type; bool allowed; bool present; CBS data; }; // ssl_parse_extensions parses a TLS extensions block out of |cbs| and advances // it. It writes the parsed extensions to pointers in |extensions|. On success, // it fills in the |present| and |data| fields and returns true. Otherwise, it // sets |*out_alert| to an alert to send and returns false. Unknown extensions // are rejected unless |ignore_unknown| is true. bool ssl_parse_extensions(const CBS *cbs, uint8_t *out_alert, std::initializer_list extensions, bool ignore_unknown); // ssl_verify_peer_cert verifies the peer certificate for |hs|. enum ssl_verify_result_t ssl_verify_peer_cert(SSL_HANDSHAKE *hs); // ssl_reverify_peer_cert verifies the peer certificate for |hs| when resuming a // session. enum ssl_verify_result_t ssl_reverify_peer_cert(SSL_HANDSHAKE *hs, bool send_alert); enum ssl_hs_wait_t ssl_get_finished(SSL_HANDSHAKE *hs); // ssl_send_finished adds a Finished message to the current flight of messages. // It returns true on success and false on error. bool ssl_send_finished(SSL_HANDSHAKE *hs); // ssl_send_tls12_certificate adds a TLS 1.2 Certificate message to the current // flight of messages. It returns true on success and false on error. bool ssl_send_tls12_certificate(SSL_HANDSHAKE *hs); // ssl_handshake_session returns the |SSL_SESSION| corresponding to the current // handshake. Note, in TLS 1.2 resumptions, this session is immutable. const SSL_SESSION *ssl_handshake_session(const SSL_HANDSHAKE *hs); // ssl_done_writing_client_hello is called after the last ClientHello is written // by |hs|. It releases some memory that is no longer needed. void ssl_done_writing_client_hello(SSL_HANDSHAKE *hs); // SSLKEYLOGFILE functions. // ssl_log_secret logs |secret| with label |label|, if logging is enabled for // |ssl|. It returns true on success and false on failure. bool ssl_log_secret(const SSL *ssl, const char *label, Span secret); // ClientHello functions. // ssl_client_hello_init parses |body| as a ClientHello message, excluding the // message header, and writes the result to |*out|. It returns true on success // and false on error. This function is exported for testing. OPENSSL_EXPORT bool ssl_client_hello_init(const SSL *ssl, SSL_CLIENT_HELLO *out, Span body); bool ssl_parse_client_hello_with_trailing_data(const SSL *ssl, CBS *cbs, SSL_CLIENT_HELLO *out); bool ssl_client_hello_get_extension(const SSL_CLIENT_HELLO *client_hello, CBS *out, uint16_t extension_type); bool ssl_client_cipher_list_contains_cipher( const SSL_CLIENT_HELLO *client_hello, uint16_t id); // GREASE. // ssl_get_grease_value returns a GREASE value for |hs|. For a given // connection, the values for each index will be deterministic. This allows the // same ClientHello be sent twice for a HelloRetryRequest or the same group be // advertised in both supported_groups and key_shares. uint16_t ssl_get_grease_value(const SSL_HANDSHAKE *hs, enum ssl_grease_index_t index); // Signature algorithms. // tls1_parse_peer_sigalgs parses |sigalgs| as the list of peer signature // algorithms and saves them on |hs|. It returns true on success and false on // error. bool tls1_parse_peer_sigalgs(SSL_HANDSHAKE *hs, const CBS *sigalgs); // tls1_get_legacy_signature_algorithm sets |*out| to the signature algorithm // that should be used with |pkey| in TLS 1.1 and earlier. It returns true on // success and false if |pkey| may not be used at those versions. bool tls1_get_legacy_signature_algorithm(uint16_t *out, const EVP_PKEY *pkey); // tls1_choose_signature_algorithm sets |*out| to a signature algorithm for use // with |cred| based on the peer's preferences and the algorithms supported. It // returns true on success and false on error. bool tls1_choose_signature_algorithm(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, uint16_t *out); // tls12_add_verify_sigalgs adds the signature algorithms acceptable for the // peer signature to |out|. It returns true on success and false on error. bool tls12_add_verify_sigalgs(const SSL_HANDSHAKE *hs, CBB *out); // tls12_check_peer_sigalg checks if |sigalg| is acceptable for the peer // signature from |pkey|. It returns true on success and false on error, setting // |*out_alert| to an alert to send. bool tls12_check_peer_sigalg(const SSL_HANDSHAKE *hs, uint8_t *out_alert, uint16_t sigalg, EVP_PKEY *pkey); // Underdocumented functions. // // Functions below here haven't been touched up and may be underdocumented. #define TLSEXT_CHANNEL_ID_SIZE 128 // From RFC 4492, used in encoding the curve type in ECParameters #define NAMED_CURVE_TYPE 3 struct CERT { static constexpr bool kAllowUniquePtr = true; explicit CERT(const SSL_X509_METHOD *x509_method); ~CERT(); bool is_valid() const { return legacy_credential != nullptr; } // credentials is the list of credentials to select between. Elements of this // array immutable. Vector> credentials; // legacy_credential is the credential configured by the legacy // non-credential-based APIs. If IsComplete() returns true, it is appended to // the list of credentials. UniquePtr legacy_credential; // x509_method contains pointers to functions that might deal with |X509| // compatibility, or might be a no-op, depending on the application. const SSL_X509_METHOD *x509_method = nullptr; // x509_chain may contain a parsed copy of |chain[1..]| from the legacy // credential. This is only used as a cache in order to implement “get0” // functions that return a non-owning pointer to the certificate chain. STACK_OF(X509) *x509_chain = nullptr; // x509_leaf may contain a parsed copy of the first element of |chain| from // the legacy credential. This is only used as a cache in order to implement // “get0” functions that return a non-owning pointer to the certificate chain. X509 *x509_leaf = nullptr; // x509_stash contains the last |X509| object append to the legacy // credential's chain. This is a workaround for some third-party code that // continue to use an |X509| object even after passing ownership with an // “add0” function. X509 *x509_stash = nullptr; // Certificate setup callback: if set is called whenever a // certificate may be required (client or server). the callback // can then examine any appropriate parameters and setup any // certificates required. This allows advanced applications // to select certificates on the fly: for example based on // supported signature algorithms or curves. int (*cert_cb)(SSL *ssl, void *arg) = nullptr; void *cert_cb_arg = nullptr; // Optional X509_STORE for certificate validation. If NULL the parent SSL_CTX // store is used instead. X509_STORE *verify_store = nullptr; // sid_ctx partitions the session space within a shared session cache or // ticket key. Only sessions with a matching value will be accepted. InplaceVector sid_ctx; }; // |SSL_PROTOCOL_METHOD| abstracts between TLS and DTLS. struct SSL_PROTOCOL_METHOD { bool is_dtls; bool (*ssl_new)(SSL *ssl); void (*ssl_free)(SSL *ssl); // get_message sets |*out| to the current handshake message and returns true // if one has been received. It returns false if more input is needed. bool (*get_message)(const SSL *ssl, SSLMessage *out); // next_message is called to release the current handshake message. void (*next_message)(SSL *ssl); // has_unprocessed_handshake_data returns whether there is buffered // handshake data that has not been consumed by |get_message|. bool (*has_unprocessed_handshake_data)(const SSL *ssl); // Use the |ssl_open_handshake| wrapper. ssl_open_record_t (*open_handshake)(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); // Use the |ssl_open_change_cipher_spec| wrapper. ssl_open_record_t (*open_change_cipher_spec)(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); // Use the |ssl_open_app_data| wrapper. ssl_open_record_t (*open_app_data)(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); // write_app_data encrypts and writes |in| as application data. On success, it // returns one and sets |*out_bytes_written| to the number of bytes of |in| // written. Otherwise, it returns <= 0 and sets |*out_needs_handshake| to // whether the operation failed because the caller needs to drive the // handshake. int (*write_app_data)(SSL *ssl, bool *out_needs_handshake, size_t *out_bytes_written, Span in); int (*dispatch_alert)(SSL *ssl); // init_message begins a new handshake message of type |type|. |cbb| is the // root CBB to be passed into |finish_message|. |*body| is set to a child CBB // the caller should write to. It returns true on success and false on error. bool (*init_message)(const SSL *ssl, CBB *cbb, CBB *body, uint8_t type); // finish_message finishes a handshake message. It sets |*out_msg| to the // serialized message. It returns true on success and false on error. bool (*finish_message)(const SSL *ssl, CBB *cbb, bssl::Array *out_msg); // add_message adds a handshake message to the pending flight. It returns // true on success and false on error. bool (*add_message)(SSL *ssl, bssl::Array msg); // add_change_cipher_spec adds a ChangeCipherSpec record to the pending // flight. It returns true on success and false on error. bool (*add_change_cipher_spec)(SSL *ssl); // finish_flight marks the pending flight as finished and ready to send. // |flush| must be called to write it. void (*finish_flight)(SSL *ssl); // schedule_ack schedules a DTLS 1.3 ACK to be sent, without an ACK delay. // |flush| must be called to write it. void (*schedule_ack)(SSL *ssl); // flush writes any scheduled data to the transport. It returns one on success // and <= 0 on error. int (*flush)(SSL *ssl); // on_handshake_complete is called when the handshake is complete. void (*on_handshake_complete)(SSL *ssl); // set_read_state sets |ssl|'s read cipher state and level to |aead_ctx| and // |level|. In QUIC, |aead_ctx| is a placeholder object. In TLS 1.3, // |traffic_secret| is the original traffic secret. This function returns true // on success and false on error. // // TODO(crbug.com/371998381): Take the traffic secrets as input and let the // function create the SSLAEADContext. bool (*set_read_state)(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret); // set_write_state sets |ssl|'s write cipher state and level to |aead_ctx| and // |level|. In QUIC, |aead_ctx| is a placeholder object In TLS 1.3, // |traffic_secret| is the original traffic secret. This function returns true // on success and false on error. // // TODO(crbug.com/371998381): Take the traffic secrets as input and let the // function create the SSLAEADContext. bool (*set_write_state)(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret); }; // The following wrappers call |open_*| but handle |read_shutdown| correctly. // ssl_open_handshake processes a record from |in| for reading a handshake // message. ssl_open_record_t ssl_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); // ssl_open_change_cipher_spec processes a record from |in| for reading a // ChangeCipherSpec. ssl_open_record_t ssl_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); // ssl_open_app_data processes a record from |in| for reading application data. // On success, it returns |ssl_open_record_success| and sets |*out| to the // input. If it encounters a post-handshake message, it returns // |ssl_open_record_discard|. The caller should then retry, after processing any // messages received with |get_message|. ssl_open_record_t ssl_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); struct SSL_X509_METHOD { // check_CA_list returns one if |names| is a good list of X.509 distinguished // names and zero otherwise. This is used to ensure that we can reject // unparsable values at handshake time when using crypto/x509. bool (*check_CA_list)(STACK_OF(CRYPTO_BUFFER) *names); // cert_clear frees and NULLs all X509 certificate-related state. void (*cert_clear)(CERT *cert); // cert_free frees all X509-related state. void (*cert_free)(CERT *cert); // cert_flush_cached_chain drops any cached |X509|-based certificate chain // from |cert|. // cert_dup duplicates any needed fields from |cert| to |new_cert|. void (*cert_dup)(CERT *new_cert, const CERT *cert); void (*cert_flush_cached_chain)(CERT *cert); // cert_flush_cached_chain drops any cached |X509|-based leaf certificate // from |cert|. void (*cert_flush_cached_leaf)(CERT *cert); // session_cache_objects fills out |sess->x509_peer| and |sess->x509_chain| // from |sess->certs| and erases |sess->x509_chain_without_leaf|. It returns // true on success or false on error. bool (*session_cache_objects)(SSL_SESSION *session); // session_dup duplicates any needed fields from |session| to |new_session|. // It returns true on success or false on error. bool (*session_dup)(SSL_SESSION *new_session, const SSL_SESSION *session); // session_clear frees any X509-related state from |session|. void (*session_clear)(SSL_SESSION *session); // session_verify_cert_chain verifies the certificate chain in |session|, // sets |session->verify_result| and returns true on success or false on // error. bool (*session_verify_cert_chain)(SSL_SESSION *session, SSL_HANDSHAKE *ssl, uint8_t *out_alert); // hs_flush_cached_ca_names drops any cached |X509_NAME|s from |hs|. void (*hs_flush_cached_ca_names)(SSL_HANDSHAKE *hs); // ssl_new does any necessary initialisation of |hs|. It returns true on // success or false on error. bool (*ssl_new)(SSL_HANDSHAKE *hs); // ssl_free frees anything created by |ssl_new|. void (*ssl_config_free)(SSL_CONFIG *cfg); // ssl_flush_cached_client_CA drops any cached |X509_NAME|s from |ssl|. void (*ssl_flush_cached_client_CA)(SSL_CONFIG *cfg); // ssl_auto_chain_if_needed runs the deprecated auto-chaining logic if // necessary. On success, it updates |ssl|'s certificate configuration as // needed and returns true. Otherwise, it returns false. bool (*ssl_auto_chain_if_needed)(SSL_HANDSHAKE *hs); // ssl_ctx_new does any necessary initialisation of |ctx|. It returns true on // success or false on error. bool (*ssl_ctx_new)(SSL_CTX *ctx); // ssl_ctx_free frees anything created by |ssl_ctx_new|. void (*ssl_ctx_free)(SSL_CTX *ctx); // ssl_ctx_flush_cached_client_CA drops any cached |X509_NAME|s from |ctx|. void (*ssl_ctx_flush_cached_client_CA)(SSL_CTX *ssl); }; // ssl_crypto_x509_method provides the |SSL_X509_METHOD| functions using // crypto/x509. extern const SSL_X509_METHOD ssl_crypto_x509_method; // ssl_noop_x509_method provides the |SSL_X509_METHOD| functions that avoid // crypto/x509. extern const SSL_X509_METHOD ssl_noop_x509_method; struct TicketKey { static constexpr bool kAllowUniquePtr = true; uint8_t name[SSL_TICKET_KEY_NAME_LEN] = {0}; uint8_t hmac_key[16] = {0}; uint8_t aes_key[16] = {0}; // next_rotation_tv_sec is the time (in seconds from the epoch) when the // current key should be superseded by a new key, or the time when a previous // key should be dropped. If zero, then the key should not be automatically // rotated. uint64_t next_rotation_tv_sec = 0; }; struct CertCompressionAlg { static constexpr bool kAllowUniquePtr = true; ssl_cert_compression_func_t compress = nullptr; ssl_cert_decompression_func_t decompress = nullptr; uint16_t alg_id = 0; }; BSSL_NAMESPACE_END DEFINE_LHASH_OF(SSL_SESSION) BSSL_NAMESPACE_BEGIN // An ssl_shutdown_t describes the shutdown state of one end of the connection, // whether it is alive or has been shutdown via close_notify or fatal alert. enum ssl_shutdown_t { ssl_shutdown_none = 0, ssl_shutdown_close_notify = 1, ssl_shutdown_error = 2, }; enum ssl_ech_status_t { // ssl_ech_none indicates ECH was not offered, or we have not gotten far // enough in the handshake to determine the status. ssl_ech_none, // ssl_ech_accepted indicates the server accepted ECH. ssl_ech_accepted, // ssl_ech_rejected indicates the server was offered ECH but rejected it. ssl_ech_rejected, }; struct SSL3_STATE { static constexpr bool kAllowUniquePtr = true; SSL3_STATE(); ~SSL3_STATE(); uint64_t read_sequence = 0; uint64_t write_sequence = 0; uint8_t server_random[SSL3_RANDOM_SIZE] = {0}; uint8_t client_random[SSL3_RANDOM_SIZE] = {0}; // read_buffer holds data from the transport to be processed. SSLBuffer read_buffer; // write_buffer holds data to be written to the transport. SSLBuffer write_buffer; // pending_app_data is the unconsumed application data. It points into // |read_buffer|. Span pending_app_data; // unreported_bytes_written is the number of bytes successfully written to the // transport, but not yet reported to the caller. The next |SSL_write| will // skip this many bytes from the input. This is used if // |SSL_MODE_ENABLE_PARTIAL_WRITE| is disabled, in which case |SSL_write| only // reports bytes written when the full caller input is written. size_t unreported_bytes_written = 0; // pending_write, if |has_pending_write| is true, is the caller-supplied data // corresponding to the current pending write. This is used to check the // caller retried with a compatible buffer. Span pending_write; // pending_write_type, if |has_pending_write| is true, is the record type // for the current pending write. // // TODO(davidben): Remove this when alerts are moved out of this write path. uint8_t pending_write_type = 0; // read_shutdown is the shutdown state for the read half of the connection. enum ssl_shutdown_t read_shutdown = ssl_shutdown_none; // write_shutdown is the shutdown state for the write half of the connection. enum ssl_shutdown_t write_shutdown = ssl_shutdown_none; // read_error, if |read_shutdown| is |ssl_shutdown_error|, is the error for // the receive half of the connection. UniquePtr read_error; int total_renegotiations = 0; // This holds a variable that indicates what we were doing when a 0 or -1 is // returned. This is needed for non-blocking IO so we know what request // needs re-doing when in SSL_accept or SSL_connect int rwstate = SSL_ERROR_NONE; enum ssl_encryption_level_t quic_read_level = ssl_encryption_initial; enum ssl_encryption_level_t quic_write_level = ssl_encryption_initial; // version is the protocol version, or zero if the version has not yet been // set. In clients offering 0-RTT, this version will initially be set to the // early version, then switched to the final version. To distinguish these // cases, use |ssl_has_final_version|. uint16_t version = 0; // early_data_skipped is the amount of early data that has been skipped by the // record layer. uint16_t early_data_skipped = 0; // empty_record_count is the number of consecutive empty records received. uint8_t empty_record_count = 0; // warning_alert_count is the number of consecutive warning alerts // received. uint8_t warning_alert_count = 0; // key_update_count is the number of consecutive KeyUpdates received. uint8_t key_update_count = 0; // ech_status indicates whether ECH was accepted by the server. ssl_ech_status_t ech_status = ssl_ech_none; // skip_early_data instructs the record layer to skip unexpected early data // messages when 0RTT is rejected. bool skip_early_data : 1; // v2_hello_done is true if the peer's V2ClientHello, if any, has been handled // and future messages should use the record layer. bool v2_hello_done : 1; // is_v2_hello is true if the current handshake message was derived from a // V2ClientHello rather than received from the peer directly. bool is_v2_hello : 1; // has_message is true if the current handshake message has been returned // at least once by |get_message| and false otherwise. bool has_message : 1; // initial_handshake_complete is true if the initial handshake has // completed. bool initial_handshake_complete : 1; // session_reused indicates whether a session was resumed. bool session_reused : 1; bool send_connection_binding : 1; // channel_id_valid is true if, on the server, the client has negotiated a // Channel ID and the |channel_id| field is filled in. bool channel_id_valid : 1; // key_update_pending is true if we are in the process of sending a KeyUpdate // message. As a DoS mitigation (and a requirement in DTLS), we never send // more than one KeyUpdate at once. In DTLS, this tracks whether there is an // unACKed KeyUpdate. bool key_update_pending : 1; // early_data_accepted is true if early data was accepted by the server. bool early_data_accepted : 1; // alert_dispatch is true there is an alert in |send_alert| to be sent. bool alert_dispatch : 1; // renegotiate_pending is whether the read half of the channel is blocked on a // HelloRequest. bool renegotiate_pending : 1; // used_hello_retry_request is whether the handshake used a TLS 1.3 // HelloRetryRequest message. bool used_hello_retry_request : 1; // was_key_usage_invalid is whether the handshake succeeded despite using a // TLS mode which was incompatible with the leaf certificate's keyUsage // extension. bool was_key_usage_invalid : 1; // hs_buf is the buffer of handshake data to process. UniquePtr hs_buf; // pending_hs_data contains the pending handshake data that has not yet // been encrypted to |pending_flight|. This allows packing the handshake into // fewer records. UniquePtr pending_hs_data; // pending_flight is the pending outgoing flight. This is used to flush each // handshake flight in a single write. |write_buffer| must be written out // before this data. UniquePtr pending_flight; // pending_flight_offset is the number of bytes of |pending_flight| which have // been successfully written. uint32_t pending_flight_offset = 0; // ticket_age_skew is the difference, in seconds, between the client-sent // ticket age and the server-computed value in TLS 1.3 server connections // which resumed a session. int32_t ticket_age_skew = 0; // ssl_early_data_reason stores details on why 0-RTT was accepted or rejected. enum ssl_early_data_reason_t early_data_reason = ssl_early_data_unknown; // aead_read_ctx is the current read cipher state. UniquePtr aead_read_ctx; // aead_write_ctx is the current write cipher state. UniquePtr aead_write_ctx; // hs is the handshake state for the current handshake or NULL if there isn't // one. UniquePtr hs; InplaceVector write_traffic_secret; InplaceVector read_traffic_secret; InplaceVector exporter_secret; // Connection binding to prevent renegotiation attacks InplaceVector previous_client_finished; InplaceVector previous_server_finished; uint8_t send_alert[2] = {0}; // established_session is the session established by the connection. This // session is only filled upon the completion of the handshake and is // immutable. UniquePtr established_session; // Next protocol negotiation. For the client, this is the protocol that we // sent in NextProtocol and is set when handling ServerHello extensions. // // For a server, this is the client's selected_protocol from NextProtocol and // is set when handling the NextProtocol message, before the Finished // message. Array next_proto_negotiated; // ALPN information // (we are in the process of transitioning from NPN to ALPN.) // In a server these point to the selected ALPN protocol after the // ClientHello has been processed. In a client these contain the protocol // that the server selected once the ServerHello has been processed. Array alpn_selected; // hostname, on the server, is the value of the SNI extension. UniquePtr hostname; // For a server: // If |channel_id_valid| is true, then this contains the // verified Channel ID from the client: a P256 point, (x,y), where // each are big-endian values. uint8_t channel_id[64] = {0}; // Contains the QUIC transport params received by the peer. Array peer_quic_transport_params; // srtp_profile is the selected SRTP protection profile for // DTLS-SRTP. const SRTP_PROTECTION_PROFILE *srtp_profile = nullptr; }; // lengths of messages #define DTLS1_RT_MAX_HEADER_LENGTH 13 // DTLS_PLAINTEXT_RECORD_HEADER_LENGTH is the length of the DTLS record header // for plaintext records (in DTLS 1.3) or DTLS versions <= 1.2. #define DTLS_PLAINTEXT_RECORD_HEADER_LENGTH 13 // DTLS1_3_RECORD_HEADER_LENGTH is the length of the DTLS 1.3 record header // sent by BoringSSL for encrypted records. Note that received encrypted DTLS // 1.3 records might have a different length header. #define DTLS1_3_RECORD_HEADER_WRITE_LENGTH 5 static_assert(DTLS1_RT_MAX_HEADER_LENGTH >= DTLS_PLAINTEXT_RECORD_HEADER_LENGTH, "DTLS1_RT_MAX_HEADER_LENGTH must not be smaller than defined " "record header lengths"); static_assert(DTLS1_RT_MAX_HEADER_LENGTH >= DTLS1_3_RECORD_HEADER_WRITE_LENGTH, "DTLS1_RT_MAX_HEADER_LENGTH must not be smaller than defined " "record header lengths"); #define DTLS1_HM_HEADER_LENGTH 12 // A DTLSMessageBitmap maintains a list of bits which may be marked to indicate // a portion of a message was received or ACKed. class DTLSMessageBitmap { public: // A Range represents a range of bits from |start|, inclusive, to |end|, // exclusive. struct Range { size_t start = 0; size_t end = 0; bool empty() const { return start == end; } size_t size() const { return end - start; } bool operator==(const Range &r) const { return start == r.start && end == r.end; } bool operator!=(const Range &r) const { return !(*this == r); } }; // Init initializes the structure with |num_bits| unmarked bits, from zero // to |num_bits - 1|. bool Init(size_t num_bits); // MarkRange marks the bits from |start|, inclusive, to |end|, exclusive. void MarkRange(size_t start, size_t end); // NextUnmarkedRange returns the next range of unmarked bits, starting from // |start|, inclusive. If all bits after |start| are marked, it returns an // empty range. Range NextUnmarkedRange(size_t start) const; // IsComplete returns whether every bit in the bitmask has been marked. bool IsComplete() const { return bytes_.empty(); } private: // bytes_ contains the unmarked bits. We maintain an invariant: if |bytes_| is // not empty, some bit is unset. Array bytes_; // first_unmarked_byte_ is the index of first byte in |bytes_| that is not // 0xff. This is maintained to amortize checking if the message is complete. size_t first_unmarked_byte_ = 0; }; struct hm_header_st { uint8_t type; uint32_t msg_len; uint16_t seq; uint32_t frag_off; uint32_t frag_len; }; // An DTLSIncomingMessage is an incoming DTLS message, possibly not yet // assembled. struct DTLSIncomingMessage { static constexpr bool kAllowUniquePtr = true; Span msg() { return Span(data).subspan(DTLS1_HM_HEADER_LENGTH); } Span msg() const { return Span(data).subspan(DTLS1_HM_HEADER_LENGTH); } size_t msg_len() const { return msg().size(); } // type is the type of the message. uint8_t type = 0; // seq is the sequence number of this message. uint16_t seq = 0; // data contains the message, including the message header of length // |DTLS1_HM_HEADER_LENGTH|. Array data; // reassembly tracks which parts of the message have been received. DTLSMessageBitmap reassembly; }; struct DTLSOutgoingMessage { size_t msg_len() const { assert(!is_ccs); assert(data.size() >= DTLS1_HM_HEADER_LENGTH); return data.size() - DTLS1_HM_HEADER_LENGTH; } bool IsFullyAcked() const { // ACKs only exist in DTLS 1.3, which does not send ChangeCipherSpec. return !is_ccs && acked.IsComplete(); } Array data; uint16_t epoch = 0; bool is_ccs = false; // acked tracks which bits of the message have been ACKed by the peer. If // |msg_len| is zero, it tracks one bit for whether the header has been // received. DTLSMessageBitmap acked; }; struct OPENSSL_timeval { uint64_t tv_sec; uint32_t tv_usec; }; struct DTLSTimer { public: static constexpr uint64_t kNever = UINT64_MAX; // StartMicroseconds schedules the timer to expire the specified number of // microseconds from |now|. void StartMicroseconds(OPENSSL_timeval now, uint64_t microseconds); // Stop disables the timer. void Stop(); // IsExpired returns true if the timer was set and is expired at time |now|. bool IsExpired(OPENSSL_timeval now) const; // IsSet returns true if the timer is scheduled or expired, and false if it is // stopped. bool IsSet() const; // MicrosecondsRemaining returns the time remaining, in microseconds, at // |now|, or |kNever| if the timer is unset. uint64_t MicrosecondsRemaining(OPENSSL_timeval now) const; private: // expire_time_ is the time when the timer expires, or zero if the timer is // unset. // // TODO(crbug.com/366284846): This is an extremely inconvenient time // representation. Switch libssl to something like a 64-bit count of // microseconds. While it's decidedly past 1970 now, zero is a less obviously // sound distinguished value for the monotonic clock, so maybe we should use a // different distinguished time, like |INT64_MAX| in the microseconds // representation. OPENSSL_timeval expire_time_ = {0, 0}; }; // DTLS_MAX_EXTRA_WRITE_EPOCHS is the maximum number of additional write epochs // that DTLS may need to retain. // // The maximum is, as a DTLS 1.3 server, immediately after sending Finished. At // this point, the current epoch is the application write keys (epoch 3), but we // may have ServerHello (epoch 0) and EncryptedExtensions (epoch 1) to // retransmit. KeyUpdate does not increase this count. If the server were to // initiate KeyUpdate from this state, it would not apply the new epoch until // the client's ACKs have caught up. At that point, epochs 0 and 1 can be // discarded. #define DTLS_MAX_EXTRA_WRITE_EPOCHS 2 // DTLS_MAX_ACK_BUFFER is the maximum number of records worth of data we'll keep // track of with DTLS 1.3 ACKs. When we exceed this value, information about // stale records will be dropped. This will not break the connection but may // cause ACKs to perform worse and retransmit unnecessary information. #define DTLS_MAX_ACK_BUFFER 32 // A DTLSSentRecord records information about a record we sent. Each record // covers all bytes from |first_msg_start| (inclusive) of |first_msg| to // |last_msg_end| (exclusive) of |last_msg|. Messages are referenced by index // into |outgoing_messages|. |last_msg_end| may be |outgoing_messages.size()| if // |last_msg_end| is zero. // // When the message is empty, |first_msg_start| and |last_msg_end| are // maintained as if there is a single bit in the message representing the // header. See |acked| in DTLSOutgoingMessage. struct DTLSSentRecord { DTLSRecordNumber number; PackedSize first_msg = 0; PackedSize last_msg = 0; uint32_t first_msg_start = 0; uint32_t last_msg_end = 0; }; enum class QueuedKeyUpdate { kNone, kUpdateNotRequested, kUpdateRequested, }; // DTLS_PREV_READ_EPOCH_EXPIRE_SECONDS is how long to retain the previous read // epoch in DTLS 1.3. This value is set based on the following: // // - Section 4.2.1 of RFC 9147 recommends retaining past read epochs for the // default TCP MSL. This accommodates packet reordering with KeyUpdate. // // - Section 5.8.1 of RFC 9147 requires being capable of ACKing the client's // final flight for at least twice the default MSL. That requires retaining // epoch 2 after the handshake. // // - Section 4 of RFC 9293 defines the MSL to be two minutes. #define DTLS_PREV_READ_EPOCH_EXPIRE_SECONDS (4 * 60) struct DTLSPrevReadEpoch { static constexpr bool kAllowUniquePtr = true; DTLSReadEpoch epoch; // expire is the expiration time of the read epoch, expressed as a POSIX // timestamp in seconds. uint64_t expire; }; struct DTLS1_STATE { static constexpr bool kAllowUniquePtr = true; DTLS1_STATE(); ~DTLS1_STATE(); bool Init(); // has_change_cipher_spec is true if we have received a ChangeCipherSpec from // the peer in this epoch. bool has_change_cipher_spec : 1; // outgoing_messages_complete is true if |outgoing_messages| has been // completed by an attempt to flush it. Future calls to |add_message| and // |add_change_cipher_spec| will start a new flight. bool outgoing_messages_complete : 1; // flight_has_reply is true if the current outgoing flight is complete and has // processed at least one message. This is used to detect whether we or the // peer sent the final flight. bool flight_has_reply : 1; // handshake_write_overflow and handshake_read_overflow are true if // handshake_write_seq and handshake_read_seq, respectively have overflowed. bool handshake_write_overflow : 1; bool handshake_read_overflow : 1; // sending_flight and sending_ack are true if we are in the process of sending // a handshake flight and ACK, respectively. bool sending_flight : 1; bool sending_ack : 1; // queued_key_update, if not kNone, indicates we've queued a KeyUpdate message // to send after the current flight is ACKed. QueuedKeyUpdate queued_key_update : 2; uint16_t handshake_write_seq = 0; uint16_t handshake_read_seq = 0; // read_epoch is the current read epoch. DTLSReadEpoch read_epoch; // next_read_epoch is the next read epoch in DTLS 1.3. It will become // current once a record is received from it. UniquePtr next_read_epoch; // prev_read_epoch is the previous read epoch in DTLS 1.3. UniquePtr prev_read_epoch; // write_epoch is the current DTLS write epoch. Non-retransmit records will // generally use this epoch. // TODO(crbug.com/381113363): 0-RTT will be the exception, when implemented. DTLSWriteEpoch write_epoch; // extra_write_epochs is the collection available write epochs. InplaceVector, DTLS_MAX_EXTRA_WRITE_EPOCHS> extra_write_epochs; // incoming_messages is a ring buffer of incoming handshake messages that have // yet to be processed. The front of the ring buffer is message number // |handshake_read_seq|, at position |handshake_read_seq| % // |SSL_MAX_HANDSHAKE_FLIGHT|. UniquePtr incoming_messages[SSL_MAX_HANDSHAKE_FLIGHT]; // outgoing_messages is the queue of outgoing messages from the last handshake // flight. InplaceVector outgoing_messages; // sent_records is a queue of records we sent, for processing ACKs. To save // memory in the steady state, the structure is stored on the heap and dropped // when empty. UniquePtr> sent_records; // records_to_ack is a queue of received records that we should ACK. This is // not stored on the heap because, in the steady state, DTLS 1.3 does not // necessarily empty this list. (We probably could drop records from here once // they are sufficiently old.) MRUQueue records_to_ack; // outgoing_written is the number of outgoing messages that have been // written. uint8_t outgoing_written = 0; // outgoing_offset is the number of bytes of the next outgoing message have // been written. uint32_t outgoing_offset = 0; unsigned mtu = 0; // max DTLS packet size // num_timeouts is the number of times the retransmit timer has fired since // the last time it was reset. unsigned num_timeouts = 0; // retransmit_timer tracks when to schedule the next DTLS retransmit if we do // not hear from the peer. DTLSTimer retransmit_timer; // ack_timer tracks when to send an ACK. DTLSTimer ack_timer; // timeout_duration_ms is the timeout duration in milliseconds. uint32_t timeout_duration_ms = 0; }; // An ALPSConfig is a pair of ALPN protocol and settings value to use with ALPS. struct ALPSConfig { Array protocol; Array settings; }; // SSL_CONFIG contains configuration bits that can be shed after the handshake // completes. Objects of this type are not shared; they are unique to a // particular |SSL|. // // See SSL_shed_handshake_config() for more about the conditions under which // configuration can be shed. struct SSL_CONFIG { static constexpr bool kAllowUniquePtr = true; explicit SSL_CONFIG(SSL *ssl_arg); ~SSL_CONFIG(); // ssl is a non-owning pointer to the parent |SSL| object. SSL *const ssl = nullptr; // conf_max_version is the maximum acceptable version configured by // |SSL_set_max_proto_version|. Note this version is not normalized in DTLS // and is further constrained by |SSL_OP_NO_*|. uint16_t conf_max_version = 0; // conf_min_version is the minimum acceptable version configured by // |SSL_set_min_proto_version|. Note this version is not normalized in DTLS // and is further constrained by |SSL_OP_NO_*|. uint16_t conf_min_version = 0; X509_VERIFY_PARAM *param = nullptr; // crypto UniquePtr cipher_list; // This is used to hold the local certificate used (i.e. the server // certificate for a server or the client certificate for a client). UniquePtr cert; int (*verify_callback)(int ok, X509_STORE_CTX *ctx) = nullptr; // fail if callback returns 0 enum ssl_verify_result_t (*custom_verify_callback)( SSL *ssl, uint8_t *out_alert) = nullptr; // Server-only: psk_identity_hint is the identity hint to send in // PSK-based key exchanges. UniquePtr psk_identity_hint; unsigned (*psk_client_callback)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len) = nullptr; unsigned (*psk_server_callback)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len) = nullptr; // for server side, keep the list of CA_dn we can use UniquePtr client_CA; // cached_x509_client_CA is a cache of parsed versions of the elements of // |client_CA|. STACK_OF(X509_NAME) *cached_x509_client_CA = nullptr; // For client side, keep the list of CA distinguished names we can use // for the Certificate Authorities extension. // TODO(bbe) having this separate from the client side (above) is mildly // silly, but OpenSSL has *_client_CA API's for this exposed, and for the // moment we are not crossing those streams. UniquePtr CA_names; Array supported_group_list; // our list // channel_id_private is the client's Channel ID private key, or null if // Channel ID should not be offered on this connection. UniquePtr channel_id_private; // For a client, this contains the list of supported protocols in wire // format. Array alpn_client_proto_list; // alps_configs contains the list of supported protocols to use with ALPS, // along with their corresponding ALPS values. Vector alps_configs; // Contains the QUIC transport params that this endpoint will send. Array quic_transport_params; // Contains the context used to decide whether to accept early data in QUIC. Array quic_early_data_context; // verify_sigalgs, if not empty, is the set of signature algorithms // accepted from the peer in decreasing order of preference. Array verify_sigalgs; // srtp_profiles is the list of configured SRTP protection profiles for // DTLS-SRTP. UniquePtr srtp_profiles; // client_ech_config_list, if not empty, is a serialized ECHConfigList // structure for the client to use when negotiating ECH. Array client_ech_config_list; // compliance_policy limits the set of ciphers that can be selected when // negotiating a TLS 1.3 connection. enum ssl_compliance_policy_t compliance_policy = ssl_compliance_policy_none; // verify_mode is a bitmask of |SSL_VERIFY_*| values. uint8_t verify_mode = SSL_VERIFY_NONE; // ech_grease_enabled controls whether ECH GREASE may be sent in the // ClientHello. bool ech_grease_enabled : 1; // Enable signed certificate time stamps. Currently client only. bool signed_cert_timestamps_enabled : 1; // ocsp_stapling_enabled is only used by client connections and indicates // whether OCSP stapling will be requested. bool ocsp_stapling_enabled : 1; // channel_id_enabled is copied from the |SSL_CTX|. For a server, it means // that we'll accept Channel IDs from clients. It is ignored on the client. bool channel_id_enabled : 1; // If enforce_rsa_key_usage is true, the handshake will fail if the // keyUsage extension is present and incompatible with the TLS usage. // This field is not read until after certificate verification. bool enforce_rsa_key_usage : 1; // retain_only_sha256_of_client_certs is true if we should compute the SHA256 // hash of the peer's certificate and then discard it to save memory and // session space. Only effective on the server side. bool retain_only_sha256_of_client_certs : 1; // handoff indicates that a server should stop after receiving the // ClientHello and pause the handshake in such a way that |SSL_get_error| // returns |SSL_ERROR_HANDOFF|. This is copied in |SSL_new| from the |SSL_CTX| // element of the same name and may be cleared if the handoff is declined. bool handoff : 1; // shed_handshake_config indicates that the handshake config (this object!) // should be freed after the handshake completes. bool shed_handshake_config : 1; // jdk11_workaround is whether to disable TLS 1.3 for JDK 11 clients, as a // workaround for https://bugs.openjdk.java.net/browse/JDK-8211806. bool jdk11_workaround : 1; // QUIC drafts up to and including 32 used a different TLS extension // codepoint to convey QUIC's transport parameters. bool quic_use_legacy_codepoint : 1; // permute_extensions is whether to permute extensions when sending messages. bool permute_extensions : 1; // aes_hw_override if set indicates we should override checking for aes // hardware support, and use the value in aes_hw_override_value instead. bool aes_hw_override : 1; // aes_hw_override_value is used for testing to indicate the support or lack // of support for AES hw. The value is only considered if |aes_hw_override| is // true. bool aes_hw_override_value : 1; // alps_use_new_codepoint if set indicates we use new ALPS extension codepoint // to negotiate and convey application settings. bool alps_use_new_codepoint : 1; // check_client_certificate_type indicates whether the client, in TLS 1.2 and // below, will check its certificate against the server's requested // certificate types. bool check_client_certificate_type : 1; // check_ecdsa_curve indicates whether the server, in TLS 1.2 and below, will // check its certificate against the client's supported ECDSA curves. bool check_ecdsa_curve : 1; }; // From RFC 8446, used in determining PSK modes. #define SSL_PSK_DHE_KE 0x1 // kMaxEarlyDataAccepted is the advertised number of plaintext bytes of early // data that will be accepted. This value should be slightly below // kMaxEarlyDataSkipped in tls_record.c, which is measured in ciphertext. static const size_t kMaxEarlyDataAccepted = 14336; UniquePtr ssl_cert_dup(CERT *cert); bool ssl_set_cert(CERT *cert, UniquePtr buffer); bool ssl_is_key_type_supported(int key_type); // ssl_compare_public_and_private_key returns true if |pubkey| is the public // counterpart to |privkey|. Otherwise it returns false and pushes a helpful // message on the error queue. bool ssl_compare_public_and_private_key(const EVP_PKEY *pubkey, const EVP_PKEY *privkey); bool ssl_get_new_session(SSL_HANDSHAKE *hs); // ssl_encrypt_ticket encrypt a ticket for |session| and writes the result to // |out|. It returns true on success and false on error. If, on success, nothing // was written to |out|, the caller should skip sending a ticket. bool ssl_encrypt_ticket(SSL_HANDSHAKE *hs, CBB *out, const SSL_SESSION *session); bool ssl_ctx_rotate_ticket_encryption_key(SSL_CTX *ctx); // ssl_session_new returns a newly-allocated blank |SSL_SESSION| or nullptr on // error. UniquePtr ssl_session_new(const SSL_X509_METHOD *x509_method); // ssl_hash_session_id returns a hash of |session_id|, suitable for a hash table // keyed on session IDs. uint32_t ssl_hash_session_id(Span session_id); // SSL_SESSION_parse parses an |SSL_SESSION| from |cbs| and advances |cbs| over // the parsed data. OPENSSL_EXPORT UniquePtr SSL_SESSION_parse( CBS *cbs, const SSL_X509_METHOD *x509_method, CRYPTO_BUFFER_POOL *pool); // ssl_session_serialize writes |in| to |cbb| as if it were serialising a // session for Session-ID resumption. It returns true on success and false on // error. OPENSSL_EXPORT bool ssl_session_serialize(const SSL_SESSION *in, CBB *cbb); enum class SSLSessionType { // The session is not resumable. kNotResumable, // The session uses a TLS 1.2 session ID. kID, // The session uses a TLS 1.2 ticket. kTicket, // The session uses a TLS 1.3 pre-shared key. kPreSharedKey, }; // ssl_session_get_type returns the type of |session|. SSLSessionType ssl_session_get_type(const SSL_SESSION *session); // ssl_session_is_context_valid returns whether |session|'s session ID context // matches the one set on |hs|. bool ssl_session_is_context_valid(const SSL_HANDSHAKE *hs, const SSL_SESSION *session); // ssl_session_is_time_valid returns true if |session| is still valid and false // if it has expired. bool ssl_session_is_time_valid(const SSL *ssl, const SSL_SESSION *session); // ssl_session_is_resumable returns whether |session| is resumable for |hs|. bool ssl_session_is_resumable(const SSL_HANDSHAKE *hs, const SSL_SESSION *session); // ssl_session_protocol_version returns the protocol version associated with // |session|. Note that despite the name, this is not the same as // |SSL_SESSION_get_protocol_version|. The latter is based on upstream's name. uint16_t ssl_session_protocol_version(const SSL_SESSION *session); // ssl_session_get_digest returns the digest used in |session|. const EVP_MD *ssl_session_get_digest(const SSL_SESSION *session); void ssl_set_session(SSL *ssl, SSL_SESSION *session); // ssl_get_prev_session looks up the previous session based on |client_hello|. // On success, it sets |*out_session| to the session or nullptr if none was // found. If the session could not be looked up synchronously, it returns // |ssl_hs_pending_session| and should be called again. If a ticket could not be // decrypted immediately it returns |ssl_hs_pending_ticket| and should also // be called again. Otherwise, it returns |ssl_hs_error|. enum ssl_hs_wait_t ssl_get_prev_session(SSL_HANDSHAKE *hs, UniquePtr *out_session, bool *out_tickets_supported, bool *out_renew_ticket, const SSL_CLIENT_HELLO *client_hello); // The following flags determine which parts of the session are duplicated. #define SSL_SESSION_DUP_AUTH_ONLY 0x0 #define SSL_SESSION_INCLUDE_TICKET 0x1 #define SSL_SESSION_INCLUDE_NONAUTH 0x2 #define SSL_SESSION_DUP_ALL \ (SSL_SESSION_INCLUDE_TICKET | SSL_SESSION_INCLUDE_NONAUTH) // SSL_SESSION_dup returns a newly-allocated |SSL_SESSION| with a copy of the // fields in |session| or nullptr on error. The new session is non-resumable and // must be explicitly marked resumable once it has been filled in. OPENSSL_EXPORT UniquePtr SSL_SESSION_dup(SSL_SESSION *session, int dup_flags); // ssl_session_rebase_time updates |session|'s start time to the current time, // adjusting the timeout so the expiration time is unchanged. void ssl_session_rebase_time(SSL *ssl, SSL_SESSION *session); // ssl_session_renew_timeout calls |ssl_session_rebase_time| and renews // |session|'s timeout to |timeout| (measured from the current time). The // renewal is clamped to the session's auth_timeout. void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, uint32_t timeout); void ssl_update_cache(SSL *ssl); void ssl_send_alert(SSL *ssl, int level, int desc); int ssl_send_alert_impl(SSL *ssl, int level, int desc); bool tls_get_message(const SSL *ssl, SSLMessage *out); ssl_open_record_t tls_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); void tls_next_message(SSL *ssl); int tls_dispatch_alert(SSL *ssl); ssl_open_record_t tls_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); ssl_open_record_t tls_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); int tls_write_app_data(SSL *ssl, bool *out_needs_handshake, size_t *out_bytes_written, Span in); bool tls_new(SSL *ssl); void tls_free(SSL *ssl); bool tls_init_message(const SSL *ssl, CBB *cbb, CBB *body, uint8_t type); bool tls_finish_message(const SSL *ssl, CBB *cbb, Array *out_msg); bool tls_add_message(SSL *ssl, Array msg); bool tls_add_change_cipher_spec(SSL *ssl); int tls_flush(SSL *ssl); bool dtls1_init_message(const SSL *ssl, CBB *cbb, CBB *body, uint8_t type); bool dtls1_finish_message(const SSL *ssl, CBB *cbb, Array *out_msg); bool dtls1_add_message(SSL *ssl, Array msg); bool dtls1_add_change_cipher_spec(SSL *ssl); void dtls1_finish_flight(SSL *ssl); void dtls1_schedule_ack(SSL *ssl); int dtls1_flush(SSL *ssl); // ssl_add_message_cbb finishes the handshake message in |cbb| and adds it to // the pending flight. It returns true on success and false on error. bool ssl_add_message_cbb(SSL *ssl, CBB *cbb); // ssl_hash_message incorporates |msg| into the handshake hash. It returns true // on success and false on allocation failure. bool ssl_hash_message(SSL_HANDSHAKE *hs, const SSLMessage &msg); ssl_open_record_t dtls1_process_ack(SSL *ssl, uint8_t *out_alert, DTLSRecordNumber ack_record_number, Span data); ssl_open_record_t dtls1_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in); ssl_open_record_t dtls1_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); int dtls1_write_app_data(SSL *ssl, bool *out_needs_handshake, size_t *out_bytes_written, Span in); // dtls1_write_record sends a record. It returns one on success and <= 0 on // error. int dtls1_write_record(SSL *ssl, int type, Span in, uint16_t epoch); bool dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, CBS *out_body); // DTLS1_MTU_TIMEOUTS is the maximum number of retransmit timeouts to expire // before starting to decrease the MTU. #define DTLS1_MTU_TIMEOUTS 2 // DTLS1_MAX_TIMEOUTS is the maximum number of retransmit timeouts to expire // before failing the DTLS handshake. #define DTLS1_MAX_TIMEOUTS 12 void dtls1_stop_timer(SSL *ssl); unsigned int dtls1_min_mtu(void); bool dtls1_new(SSL *ssl); void dtls1_free(SSL *ssl); bool dtls1_process_handshake_fragments(SSL *ssl, uint8_t *out_alert, DTLSRecordNumber record_number, Span record); bool dtls1_get_message(const SSL *ssl, SSLMessage *out); ssl_open_record_t dtls1_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in); void dtls1_next_message(SSL *ssl); int dtls1_dispatch_alert(SSL *ssl); // tls1_configure_aead configures either the read or write direction AEAD (as // determined by |direction|) using the keys generated by the TLS KDF. The // |key_block_cache| argument is used to store the generated key block, if // empty. Otherwise it's assumed that the key block is already contained within // it. It returns true on success or false on error. bool tls1_configure_aead(SSL *ssl, evp_aead_direction_t direction, Array *key_block_cache, const SSL_SESSION *session, Span iv_override); bool tls1_change_cipher_state(SSL_HANDSHAKE *hs, evp_aead_direction_t direction); // tls1_generate_master_secret computes the master secret from |premaster| and // writes it to |out|. |out| must have size |SSL3_MASTER_SECRET_SIZE|. bool tls1_generate_master_secret(SSL_HANDSHAKE *hs, Span out, Span premaster); // tls1_get_grouplist returns the locally-configured group preference list. Span tls1_get_grouplist(const SSL_HANDSHAKE *ssl); // tls1_check_group_id returns whether |group_id| is consistent with locally- // configured group preferences. bool tls1_check_group_id(const SSL_HANDSHAKE *ssl, uint16_t group_id); // tls1_get_shared_group sets |*out_group_id| to the first preferred shared // group between client and server preferences and returns true. If none may be // found, it returns false. bool tls1_get_shared_group(SSL_HANDSHAKE *hs, uint16_t *out_group_id); // ssl_add_clienthello_tlsext writes ClientHello extensions to |out| for |type|. // It returns true on success and false on failure. The |header_len| argument is // the length of the ClientHello written so far and is used to compute the // padding length. (It does not include the record header or handshake headers.) // // If |type| is |ssl_client_hello_inner|, this function also writes the // compressed extensions to |out_encoded|. Otherwise, |out_encoded| should be // nullptr. // // On success, the function sets |*out_needs_psk_binder| to whether the last // ClientHello extension was the pre_shared_key extension and needs a PSK binder // filled in. The caller should then update |out| and, if applicable, // |out_encoded| with the binder after completing the whole message. bool ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, CBB *out_encoded, bool *out_needs_psk_binder, ssl_client_hello_type_t type, size_t header_len); bool ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out); bool ssl_parse_clienthello_tlsext(SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello); bool ssl_parse_serverhello_tlsext(SSL_HANDSHAKE *hs, const CBS *extensions); #define tlsext_tick_md EVP_sha256 // ssl_process_ticket processes a session ticket from the client. It returns // one of: // |ssl_ticket_aead_success|: |*out_session| is set to the parsed session and // |*out_renew_ticket| is set to whether the ticket should be renewed. // |ssl_ticket_aead_ignore_ticket|: |*out_renew_ticket| is set to whether a // fresh ticket should be sent, but the given ticket cannot be used. // |ssl_ticket_aead_retry|: the ticket could not be immediately decrypted. // Retry later. // |ssl_ticket_aead_error|: an error occured that is fatal to the connection. enum ssl_ticket_aead_result_t ssl_process_ticket( SSL_HANDSHAKE *hs, UniquePtr *out_session, bool *out_renew_ticket, Span ticket, Span session_id); // tls1_verify_channel_id processes |msg| as a Channel ID message, and verifies // the signature. If the key is valid, it saves the Channel ID and returns true. // Otherwise, it returns false. bool tls1_verify_channel_id(SSL_HANDSHAKE *hs, const SSLMessage &msg); // tls1_write_channel_id generates a Channel ID message and puts the output in // |cbb|. |ssl->channel_id_private| must already be set before calling. This // function returns true on success and false on error. bool tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb); // tls1_channel_id_hash computes the hash to be signed by Channel ID and writes // it to |out|, which must contain at least |EVP_MAX_MD_SIZE| bytes. It returns // true on success and false on failure. bool tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len); // tls1_record_handshake_hashes_for_channel_id records the current handshake // hashes in |hs->new_session| so that Channel ID resumptions can sign that // data. bool tls1_record_handshake_hashes_for_channel_id(SSL_HANDSHAKE *hs); // ssl_can_write returns whether |ssl| is allowed to write. bool ssl_can_write(const SSL *ssl); // ssl_can_read returns wheter |ssl| is allowed to read. bool ssl_can_read(const SSL *ssl); OPENSSL_timeval ssl_ctx_get_current_time(const SSL_CTX *ctx); // ssl_reset_error_state resets state for |SSL_get_error|. void ssl_reset_error_state(SSL *ssl); // ssl_set_read_error sets |ssl|'s read half into an error state, saving the // current state of the error queue. void ssl_set_read_error(SSL *ssl); BSSL_NAMESPACE_END // Opaque C types. // // The following types are exported to C code as public typedefs, so they must // be defined outside of the namespace. // ssl_method_st backs the public |SSL_METHOD| type. It is a compatibility // structure to support the legacy version-locked methods. struct ssl_method_st { // version, if non-zero, is the only protocol version acceptable to an // SSL_CTX initialized from this method. uint16_t version; // method is the underlying SSL_PROTOCOL_METHOD that initializes the // SSL_CTX. const bssl::SSL_PROTOCOL_METHOD *method; // x509_method contains pointers to functions that might deal with |X509| // compatibility, or might be a no-op, depending on the application. const bssl::SSL_X509_METHOD *x509_method; }; struct ssl_ctx_st : public bssl::RefCounted { explicit ssl_ctx_st(const SSL_METHOD *ssl_method); ssl_ctx_st(const ssl_ctx_st &) = delete; ssl_ctx_st &operator=(const ssl_ctx_st &) = delete; const bssl::SSL_PROTOCOL_METHOD *method = nullptr; const bssl::SSL_X509_METHOD *x509_method = nullptr; // lock is used to protect various operations on this object. CRYPTO_MUTEX lock; // conf_max_version is the maximum acceptable protocol version configured by // |SSL_CTX_set_max_proto_version|. Note this version is normalized in DTLS // and is further constrainted by |SSL_OP_NO_*|. uint16_t conf_max_version = 0; // conf_min_version is the minimum acceptable protocol version configured by // |SSL_CTX_set_min_proto_version|. Note this version is normalized in DTLS // and is further constrainted by |SSL_OP_NO_*|. uint16_t conf_min_version = 0; // num_tickets is the number of tickets to send immediately after the TLS 1.3 // handshake. TLS 1.3 recommends single-use tickets so, by default, issue two /// in case the client makes several connections before getting a renewal. uint8_t num_tickets = 2; // quic_method is the method table corresponding to the QUIC hooks. const SSL_QUIC_METHOD *quic_method = nullptr; bssl::UniquePtr cipher_list; X509_STORE *cert_store = nullptr; LHASH_OF(SSL_SESSION) *sessions = nullptr; // Most session-ids that will be cached, default is // SSL_SESSION_CACHE_MAX_SIZE_DEFAULT. 0 is unlimited. unsigned long session_cache_size = SSL_SESSION_CACHE_MAX_SIZE_DEFAULT; SSL_SESSION *session_cache_head = nullptr; SSL_SESSION *session_cache_tail = nullptr; // handshakes_since_cache_flush is the number of successful handshakes since // the last cache flush. int handshakes_since_cache_flush = 0; // This can have one of 2 values, ored together, // SSL_SESS_CACHE_CLIENT, // SSL_SESS_CACHE_SERVER, // Default is SSL_SESSION_CACHE_SERVER, which means only // SSL_accept which cache SSL_SESSIONS. int session_cache_mode = SSL_SESS_CACHE_SERVER; // session_timeout is the default lifetime for new sessions in TLS 1.2 and // earlier, in seconds. uint32_t session_timeout = SSL_DEFAULT_SESSION_TIMEOUT; // session_psk_dhe_timeout is the default lifetime for new sessions in TLS // 1.3, in seconds. uint32_t session_psk_dhe_timeout = SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT; // If this callback is not null, it will be called each time a session id is // added to the cache. If this function returns 1, it means that the // callback will do a SSL_SESSION_free() when it has finished using it. // Otherwise, on 0, it means the callback has finished with it. If // remove_session_cb is not null, it will be called when a session-id is // removed from the cache. After the call, OpenSSL will SSL_SESSION_free() // it. int (*new_session_cb)(SSL *ssl, SSL_SESSION *sess) = nullptr; void (*remove_session_cb)(SSL_CTX *ctx, SSL_SESSION *sess) = nullptr; SSL_SESSION *(*get_session_cb)(SSL *ssl, const uint8_t *data, int len, int *copy) = nullptr; // if defined, these override the X509_verify_cert() calls int (*app_verify_callback)(X509_STORE_CTX *store_ctx, void *arg) = nullptr; void *app_verify_arg = nullptr; ssl_verify_result_t (*custom_verify_callback)(SSL *ssl, uint8_t *out_alert) = nullptr; // Default password callback. pem_password_cb *default_passwd_callback = nullptr; // Default password callback user data. void *default_passwd_callback_userdata = nullptr; // get client cert callback int (*client_cert_cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey) = nullptr; CRYPTO_EX_DATA ex_data; // Default values used when no per-SSL value is defined follow void (*info_callback)(const SSL *ssl, int type, int value) = nullptr; // what we put in client cert requests bssl::UniquePtr client_CA; // cached_x509_client_CA is a cache of parsed versions of the elements of // |client_CA|. STACK_OF(X509_NAME) *cached_x509_client_CA = nullptr; // What we put in client hello in the CA extension. bssl::UniquePtr CA_names; // Default values to use in SSL structures follow (these are copied by // SSL_new) uint32_t options = 0; // Disable the auto-chaining feature by default. wpa_supplicant relies on this // feature, but require callers opt into it. uint32_t mode = SSL_MODE_NO_AUTO_CHAIN; uint32_t max_cert_list = SSL_MAX_CERT_LIST_DEFAULT; bssl::UniquePtr cert; // callback that allows applications to peek at protocol messages void (*msg_callback)(int is_write, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg) = nullptr; void *msg_callback_arg = nullptr; int verify_mode = SSL_VERIFY_NONE; int (*default_verify_callback)(int ok, X509_STORE_CTX *ctx) = nullptr; // called 'verify_callback' in the SSL X509_VERIFY_PARAM *param = nullptr; // select_certificate_cb is called before most ClientHello processing and // before the decision whether to resume a session is made. See // |ssl_select_cert_result_t| for details of the return values. ssl_select_cert_result_t (*select_certificate_cb)(const SSL_CLIENT_HELLO *) = nullptr; // dos_protection_cb is called once the resumption decision for a ClientHello // has been made. It returns one to continue the handshake or zero to // abort. int (*dos_protection_cb)(const SSL_CLIENT_HELLO *) = nullptr; // Controls whether to verify certificates when resuming connections. They // were already verified when the connection was first made, so the default is // false. For now, this is only respected on clients, not servers. bool reverify_on_resume = false; // Maximum amount of data to send in one fragment. actual record size can be // more than this due to padding and MAC overheads. uint16_t max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH; // TLS extensions servername callback int (*servername_callback)(SSL *, int *, void *) = nullptr; void *servername_arg = nullptr; // RFC 4507 session ticket keys. |ticket_key_current| may be NULL before the // first handshake and |ticket_key_prev| may be NULL at any time. // Automatically generated ticket keys are rotated as needed at handshake // time. Hence, all access must be synchronized through |lock|. bssl::UniquePtr ticket_key_current; bssl::UniquePtr ticket_key_prev; // Callback to support customisation of ticket key setting int (*ticket_key_cb)(SSL *ssl, uint8_t *name, uint8_t *iv, EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc) = nullptr; // Server-only: psk_identity_hint is the default identity hint to send in // PSK-based key exchanges. bssl::UniquePtr psk_identity_hint; unsigned (*psk_client_callback)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len) = nullptr; unsigned (*psk_server_callback)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len) = nullptr; // Next protocol negotiation information // (for experimental NPN extension). // For a server, this contains a callback function by which the set of // advertised protocols can be provided. int (*next_protos_advertised_cb)(SSL *ssl, const uint8_t **out, unsigned *out_len, void *arg) = nullptr; void *next_protos_advertised_cb_arg = nullptr; // For a client, this contains a callback function that selects the // next protocol from the list provided by the server. int (*next_proto_select_cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg) = nullptr; void *next_proto_select_cb_arg = nullptr; // ALPN information // (we are in the process of transitioning from NPN to ALPN.) // For a server, this contains a callback function that allows the // server to select the protocol for the connection. // out: on successful return, this must point to the raw protocol // name (without the length prefix). // outlen: on successful return, this contains the length of |*out|. // in: points to the client's list of supported protocols in // wire-format. // inlen: the length of |in|. int (*alpn_select_cb)(SSL *ssl, const uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg) = nullptr; void *alpn_select_cb_arg = nullptr; // For a client, this contains the list of supported protocols in wire // format. bssl::Array alpn_client_proto_list; // SRTP profiles we are willing to do from RFC 5764 bssl::UniquePtr srtp_profiles; // Defined compression algorithms for certificates. bssl::Vector cert_compression_algs; // Supported group values inherited by SSL structure bssl::Array supported_group_list; // channel_id_private is the client's Channel ID private key, or null if // Channel ID should not be offered on this connection. bssl::UniquePtr channel_id_private; // ech_keys contains the server's list of ECHConfig values and associated // private keys. This list may be swapped out at any time, so all access must // be synchronized through |lock|. bssl::UniquePtr ech_keys; // keylog_callback, if not NULL, is the key logging callback. See // |SSL_CTX_set_keylog_callback|. void (*keylog_callback)(const SSL *ssl, const char *line) = nullptr; // current_time_cb, if not NULL, is the function to use to get the current // time. It sets |*out_clock| to the current time. The |ssl| argument is // always NULL. See |SSL_CTX_set_current_time_cb|. void (*current_time_cb)(const SSL *ssl, struct timeval *out_clock) = nullptr; // pool is used for all |CRYPTO_BUFFER|s in case we wish to share certificate // memory. CRYPTO_BUFFER_POOL *pool = nullptr; // ticket_aead_method contains function pointers for opening and sealing // session tickets. const SSL_TICKET_AEAD_METHOD *ticket_aead_method = nullptr; // legacy_ocsp_callback implements an OCSP-related callback for OpenSSL // compatibility. int (*legacy_ocsp_callback)(SSL *ssl, void *arg) = nullptr; void *legacy_ocsp_callback_arg = nullptr; // compliance_policy limits the set of ciphers that can be selected when // negotiating a TLS 1.3 connection. enum ssl_compliance_policy_t compliance_policy = ssl_compliance_policy_none; // verify_sigalgs, if not empty, is the set of signature algorithms // accepted from the peer in decreasing order of preference. bssl::Array verify_sigalgs; // retain_only_sha256_of_client_certs is true if we should compute the SHA256 // hash of the peer's certificate and then discard it to save memory and // session space. Only effective on the server side. bool retain_only_sha256_of_client_certs : 1; // quiet_shutdown is true if the connection should not send a close_notify on // shutdown. bool quiet_shutdown : 1; // ocsp_stapling_enabled is only used by client connections and indicates // whether OCSP stapling will be requested. bool ocsp_stapling_enabled : 1; // If true, a client will request certificate timestamps. bool signed_cert_timestamps_enabled : 1; // channel_id_enabled is whether Channel ID is enabled. For a server, means // that we'll accept Channel IDs from clients. For a client, means that we'll // advertise support. bool channel_id_enabled : 1; // grease_enabled is whether GREASE (RFC 8701) is enabled. bool grease_enabled : 1; // permute_extensions is whether to permute extensions when sending messages. bool permute_extensions : 1; // allow_unknown_alpn_protos is whether the client allows unsolicited ALPN // protocols from the peer. bool allow_unknown_alpn_protos : 1; // false_start_allowed_without_alpn is whether False Start (if // |SSL_MODE_ENABLE_FALSE_START| is enabled) is allowed without ALPN. bool false_start_allowed_without_alpn : 1; // handoff indicates that a server should stop after receiving the // ClientHello and pause the handshake in such a way that |SSL_get_error| // returns |SSL_ERROR_HANDOFF|. bool handoff : 1; // If enable_early_data is true, early data can be sent and accepted. bool enable_early_data : 1; // aes_hw_override if set indicates we should override checking for AES // hardware support, and use the value in aes_hw_override_value instead. bool aes_hw_override : 1; // aes_hw_override_value is used for testing to indicate the support or lack // of support for AES hardware. The value is only considered if // |aes_hw_override| is true. bool aes_hw_override_value : 1; private: friend RefCounted; ~ssl_ctx_st(); }; struct ssl_st { explicit ssl_st(SSL_CTX *ctx_arg); ssl_st(const ssl_st &) = delete; ssl_st &operator=(const ssl_st &) = delete; ~ssl_st(); // method is the method table corresponding to the current protocol (DTLS or // TLS). const bssl::SSL_PROTOCOL_METHOD *method = nullptr; // config is a container for handshake configuration. Accesses to this field // should check for nullptr, since configuration may be shed after the // handshake completes. (If you have the |SSL_HANDSHAKE| object at hand, use // that instead, and skip the null check.) bssl::UniquePtr config; uint16_t max_send_fragment = 0; // There are 2 BIO's even though they are normally both the same. This is so // data can be read and written to different handlers bssl::UniquePtr rbio; // used by SSL_read bssl::UniquePtr wbio; // used by SSL_write // do_handshake runs the handshake. On completion, it returns |ssl_hs_ok|. // Otherwise, it returns a value corresponding to what operation is needed to // progress. bssl::ssl_hs_wait_t (*do_handshake)(bssl::SSL_HANDSHAKE *hs) = nullptr; bssl::SSL3_STATE *s3 = nullptr; // TLS variables bssl::DTLS1_STATE *d1 = nullptr; // DTLS variables // callback that allows applications to peek at protocol messages void (*msg_callback)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg) = nullptr; void *msg_callback_arg = nullptr; // session info // initial_timeout_duration_ms is the default DTLS timeout duration in // milliseconds. It's used to initialize the timer any time it's restarted. We // default to RFC 9147's recommendation for real-time applications, 400ms. uint32_t initial_timeout_duration_ms = 400; // session is the configured session to be offered by the client. This session // is immutable. bssl::UniquePtr session; void (*info_callback)(const SSL *ssl, int type, int value) = nullptr; bssl::UniquePtr ctx; // session_ctx is the |SSL_CTX| used for the session cache and related // settings. bssl::UniquePtr session_ctx; // extra application data CRYPTO_EX_DATA ex_data; uint32_t options = 0; // protocol behaviour uint32_t mode = 0; // API behaviour uint32_t max_cert_list = 0; bssl::UniquePtr hostname; // quic_method is the method table corresponding to the QUIC hooks. const SSL_QUIC_METHOD *quic_method = nullptr; // renegotiate_mode controls how peer renegotiation attempts are handled. ssl_renegotiate_mode_t renegotiate_mode = ssl_renegotiate_never; // server is true iff the this SSL* is the server half. Note: before the SSL* // is initialized by either SSL_set_accept_state or SSL_set_connect_state, // the side is not determined. In this state, server is always false. bool server : 1; // quiet_shutdown is true if the connection should not send a close_notify on // shutdown. bool quiet_shutdown : 1; // If enable_early_data is true, early data can be sent and accepted. bool enable_early_data : 1; }; struct ssl_session_st : public bssl::RefCounted { explicit ssl_session_st(const bssl::SSL_X509_METHOD *method); ssl_session_st(const ssl_session_st &) = delete; ssl_session_st &operator=(const ssl_session_st &) = delete; // ssl_version is the (D)TLS version that established the session. uint16_t ssl_version = 0; // group_id is the ID of the ECDH group used to establish this session or zero // if not applicable or unknown. uint16_t group_id = 0; // peer_signature_algorithm is the signature algorithm used to authenticate // the peer, or zero if not applicable or unknown. uint16_t peer_signature_algorithm = 0; // secret, in TLS 1.2 and below, is the master secret associated with the // session. In TLS 1.3 and up, it is the resumption PSK for sessions handed to // the caller, but it stores the resumption secret when stored on |SSL| // objects. bssl::InplaceVector secret; bssl::InplaceVector session_id; // this is used to determine whether the session is being reused in // the appropriate context. It is up to the application to set this, // via SSL_new bssl::InplaceVector sid_ctx; bssl::UniquePtr psk_identity; // certs contains the certificate chain from the peer, starting with the leaf // certificate. bssl::UniquePtr certs; const bssl::SSL_X509_METHOD *x509_method = nullptr; // x509_peer is the peer's certificate. X509 *x509_peer = nullptr; // x509_chain is the certificate chain sent by the peer. NOTE: for historical // reasons, when a client (so the peer is a server), the chain includes // |peer|, but when a server it does not. STACK_OF(X509) *x509_chain = nullptr; // x509_chain_without_leaf is a lazily constructed copy of |x509_chain| that // omits the leaf certificate. This exists because OpenSSL, historically, // didn't include the leaf certificate in the chain for a server, but did for // a client. The |x509_chain| always includes it and, if an API call requires // a chain without, it is stored here. STACK_OF(X509) *x509_chain_without_leaf = nullptr; // verify_result is the result of certificate verification in the case of // non-fatal certificate errors. long verify_result = X509_V_ERR_INVALID_CALL; // timeout is the lifetime of the session in seconds, measured from |time|. // This is renewable up to |auth_timeout|. uint32_t timeout = SSL_DEFAULT_SESSION_TIMEOUT; // auth_timeout is the non-renewable lifetime of the session in seconds, // measured from |time|. uint32_t auth_timeout = SSL_DEFAULT_SESSION_TIMEOUT; // time is the time the session was issued, measured in seconds from the UNIX // epoch. uint64_t time = 0; const SSL_CIPHER *cipher = nullptr; CRYPTO_EX_DATA ex_data; // application specific data // These are used to make removal of session-ids more efficient and to // implement a maximum cache size. SSL_SESSION *prev = nullptr, *next = nullptr; bssl::Array ticket; bssl::UniquePtr signed_cert_timestamp_list; // The OCSP response that came with the session. bssl::UniquePtr ocsp_response; // peer_sha256 contains the SHA-256 hash of the peer's certificate if // |peer_sha256_valid| is true. uint8_t peer_sha256[SHA256_DIGEST_LENGTH] = {0}; // original_handshake_hash contains the handshake hash (either SHA-1+MD5 or // SHA-2, depending on TLS version) for the original, full handshake that // created a session. This is used by Channel IDs during resumption. bssl::InplaceVector original_handshake_hash; uint32_t ticket_lifetime_hint = 0; // Session lifetime hint in seconds uint32_t ticket_age_add = 0; // ticket_max_early_data is the maximum amount of data allowed to be sent as // early data. If zero, 0-RTT is disallowed. uint32_t ticket_max_early_data = 0; // early_alpn is the ALPN protocol from the initial handshake. This is only // stored for TLS 1.3 and above in order to enforce ALPN matching for 0-RTT // resumptions. For the current connection's ALPN protocol, see // |alpn_selected| on |SSL3_STATE|. bssl::Array early_alpn; // local_application_settings, if |has_application_settings| is true, is the // local ALPS value for this connection. bssl::Array local_application_settings; // peer_application_settings, if |has_application_settings| is true, is the // peer ALPS value for this connection. bssl::Array peer_application_settings; // extended_master_secret is whether the master secret in this session was // generated using EMS and thus isn't vulnerable to the Triple Handshake // attack. bool extended_master_secret : 1; // peer_sha256_valid is whether |peer_sha256| is valid. bool peer_sha256_valid : 1; // Non-zero if peer_sha256 is valid // not_resumable is used to indicate that session resumption is disallowed. bool not_resumable : 1; // ticket_age_add_valid is whether |ticket_age_add| is valid. bool ticket_age_add_valid : 1; // is_server is whether this session was created by a server. bool is_server : 1; // is_quic indicates whether this session was created using QUIC. bool is_quic : 1; // has_application_settings indicates whether ALPS was negotiated in this // session. bool has_application_settings : 1; // quic_early_data_context is used to determine whether early data must be // rejected when performing a QUIC handshake. bssl::Array quic_early_data_context; private: friend RefCounted; ~ssl_session_st(); }; struct ssl_ech_keys_st : public bssl::RefCounted { ssl_ech_keys_st() : RefCounted(CheckSubClass()) {} bssl::Vector> configs; private: friend RefCounted; ~ssl_ech_keys_st() = default; }; #endif // OPENSSL_HEADER_SSL_INTERNAL_H ================================================ FILE: Sources/CNIOBoringSSL/ssl/s3_both.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static bool add_record_to_flight(SSL *ssl, uint8_t type, Span in) { // The caller should have flushed |pending_hs_data| first. assert(!ssl->s3->pending_hs_data); // We'll never add a flight while in the process of writing it out. assert(ssl->s3->pending_flight_offset == 0); if (ssl->s3->pending_flight == nullptr) { ssl->s3->pending_flight.reset(BUF_MEM_new()); if (ssl->s3->pending_flight == nullptr) { return false; } } size_t max_out = in.size() + SSL_max_seal_overhead(ssl); size_t new_cap = ssl->s3->pending_flight->length + max_out; if (max_out < in.size() || new_cap < max_out) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } size_t len; if (!BUF_MEM_reserve(ssl->s3->pending_flight.get(), new_cap) || !tls_seal_record(ssl, (uint8_t *)ssl->s3->pending_flight->data + ssl->s3->pending_flight->length, &len, max_out, type, in.data(), in.size())) { return false; } ssl->s3->pending_flight->length += len; return true; } bool tls_init_message(const SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { // Pick a modest size hint to save most of the |realloc| calls. if (!CBB_init(cbb, 64) || // !CBB_add_u8(cbb, type) || // !CBB_add_u24_length_prefixed(cbb, body)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); CBB_cleanup(cbb); return false; } return true; } bool tls_finish_message(const SSL *ssl, CBB *cbb, Array *out_msg) { return CBBFinishArray(cbb, out_msg); } bool tls_add_message(SSL *ssl, Array msg) { // Pack handshake data into the minimal number of records. This avoids // unnecessary encryption overhead, notably in TLS 1.3 where we send several // encrypted messages in a row. For now, we do not do this for the null // cipher. The benefit is smaller and there is a risk of breaking buggy // implementations. // // TODO(crbug.com/374991962): See if we can do this uniformly. Span rest = msg; if (!SSL_is_quic(ssl) && ssl->s3->aead_write_ctx->is_null_cipher()) { while (!rest.empty()) { Span chunk = rest.subspan(0, ssl->max_send_fragment); rest = rest.subspan(chunk.size()); if (!add_record_to_flight(ssl, SSL3_RT_HANDSHAKE, chunk)) { return false; } } } else { while (!rest.empty()) { // Flush if |pending_hs_data| is full. if (ssl->s3->pending_hs_data && ssl->s3->pending_hs_data->length >= ssl->max_send_fragment && !tls_flush_pending_hs_data(ssl)) { return false; } size_t pending_len = ssl->s3->pending_hs_data ? ssl->s3->pending_hs_data->length : 0; Span chunk = rest.subspan(0, ssl->max_send_fragment - pending_len); assert(!chunk.empty()); rest = rest.subspan(chunk.size()); if (!ssl->s3->pending_hs_data) { ssl->s3->pending_hs_data.reset(BUF_MEM_new()); } if (!ssl->s3->pending_hs_data || !BUF_MEM_append(ssl->s3->pending_hs_data.get(), chunk.data(), chunk.size())) { return false; } } } ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HANDSHAKE, msg); // TODO(svaldez): Move this up a layer to fix abstraction for SSLTranscript on // hs. if (ssl->s3->hs != NULL && // !ssl->s3->hs->transcript.Update(msg)) { return false; } return true; } bool tls_flush_pending_hs_data(SSL *ssl) { if (!ssl->s3->pending_hs_data || ssl->s3->pending_hs_data->length == 0) { return true; } UniquePtr pending_hs_data = std::move(ssl->s3->pending_hs_data); auto data = Span(reinterpret_cast(pending_hs_data->data), pending_hs_data->length); if (SSL_is_quic(ssl)) { if ((ssl->s3->hs == nullptr || !ssl->s3->hs->hints_requested) && !ssl->quic_method->add_handshake_data(ssl, ssl->s3->quic_write_level, data.data(), data.size())) { OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_INTERNAL_ERROR); return false; } return true; } return add_record_to_flight(ssl, SSL3_RT_HANDSHAKE, data); } bool tls_add_change_cipher_spec(SSL *ssl) { if (SSL_is_quic(ssl)) { return true; } static const uint8_t kChangeCipherSpec[1] = {SSL3_MT_CCS}; if (!tls_flush_pending_hs_data(ssl) || !add_record_to_flight(ssl, SSL3_RT_CHANGE_CIPHER_SPEC, kChangeCipherSpec)) { return false; } ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_CHANGE_CIPHER_SPEC, kChangeCipherSpec); return true; } int tls_flush(SSL *ssl) { if (!tls_flush_pending_hs_data(ssl)) { return -1; } if (SSL_is_quic(ssl)) { if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } if (!ssl->quic_method->flush_flight(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_INTERNAL_ERROR); return -1; } } if (ssl->s3->pending_flight == nullptr) { return 1; } if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } static_assert(INT_MAX <= 0xffffffff, "int is larger than 32 bits"); if (ssl->s3->pending_flight->length > INT_MAX) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } // If there is pending data in the write buffer, it must be flushed out before // any new data in pending_flight. if (!ssl->s3->write_buffer.empty()) { int ret = ssl_write_buffer_flush(ssl); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return ret; } } if (ssl->wbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } // Write the pending flight. while (ssl->s3->pending_flight_offset < ssl->s3->pending_flight->length) { int ret = BIO_write( ssl->wbio.get(), ssl->s3->pending_flight->data + ssl->s3->pending_flight_offset, ssl->s3->pending_flight->length - ssl->s3->pending_flight_offset); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return ret; } ssl->s3->pending_flight_offset += ret; } if (BIO_flush(ssl->wbio.get()) <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return -1; } ssl->s3->pending_flight.reset(); ssl->s3->pending_flight_offset = 0; return 1; } static ssl_open_record_t read_v2_client_hello(SSL *ssl, size_t *out_consumed, Span in) { *out_consumed = 0; assert(in.size() >= SSL3_RT_HEADER_LENGTH); // Determine the length of the V2ClientHello. size_t msg_length = ((in[0] & 0x7f) << 8) | in[1]; if (msg_length > (1024 * 4)) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return ssl_open_record_error; } if (msg_length < SSL3_RT_HEADER_LENGTH - 2) { // Reject lengths that are too short early. We have already read // |SSL3_RT_HEADER_LENGTH| bytes, so we should not attempt to process an // (invalid) V2ClientHello which would be shorter than that. OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_LENGTH_MISMATCH); return ssl_open_record_error; } // Ask for the remainder of the V2ClientHello. if (in.size() < 2 + msg_length) { *out_consumed = 2 + msg_length; return ssl_open_record_partial; } CBS v2_client_hello = CBS(in.subspan(2, msg_length)); // The V2ClientHello without the length is incorporated into the handshake // hash. This is only ever called at the start of the handshake, so hs is // guaranteed to be non-NULL. if (!ssl->s3->hs->transcript.Update(v2_client_hello)) { return ssl_open_record_error; } ssl_do_msg_callback(ssl, 0 /* read */, 0 /* V2ClientHello */, v2_client_hello); uint8_t msg_type; uint16_t version, cipher_spec_length, session_id_length, challenge_length; CBS cipher_specs, session_id, challenge; if (!CBS_get_u8(&v2_client_hello, &msg_type) || !CBS_get_u16(&v2_client_hello, &version) || !CBS_get_u16(&v2_client_hello, &cipher_spec_length) || !CBS_get_u16(&v2_client_hello, &session_id_length) || !CBS_get_u16(&v2_client_hello, &challenge_length) || !CBS_get_bytes(&v2_client_hello, &cipher_specs, cipher_spec_length) || !CBS_get_bytes(&v2_client_hello, &session_id, session_id_length) || !CBS_get_bytes(&v2_client_hello, &challenge, challenge_length) || CBS_len(&v2_client_hello) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_open_record_error; } // msg_type has already been checked. assert(msg_type == SSL2_MT_CLIENT_HELLO); // The client_random is the V2ClientHello challenge. Truncate or left-pad with // zeros as needed. size_t rand_len = CBS_len(&challenge); if (rand_len > SSL3_RANDOM_SIZE) { rand_len = SSL3_RANDOM_SIZE; } uint8_t random[SSL3_RANDOM_SIZE]; OPENSSL_memset(random, 0, SSL3_RANDOM_SIZE); OPENSSL_memcpy(random + (SSL3_RANDOM_SIZE - rand_len), CBS_data(&challenge), rand_len); // Write out an equivalent TLS ClientHello directly to the handshake buffer. size_t max_v3_client_hello = SSL3_HM_HEADER_LENGTH + 2 /* version */ + SSL3_RANDOM_SIZE + 1 /* session ID length */ + 2 /* cipher list length */ + CBS_len(&cipher_specs) / 3 * 2 + 1 /* compression length */ + 1 /* compression */; ScopedCBB client_hello; CBB hello_body, cipher_suites; if (!ssl->s3->hs_buf) { ssl->s3->hs_buf.reset(BUF_MEM_new()); } if (!ssl->s3->hs_buf || !BUF_MEM_reserve(ssl->s3->hs_buf.get(), max_v3_client_hello) || !CBB_init_fixed(client_hello.get(), (uint8_t *)ssl->s3->hs_buf->data, ssl->s3->hs_buf->max) || !CBB_add_u8(client_hello.get(), SSL3_MT_CLIENT_HELLO) || !CBB_add_u24_length_prefixed(client_hello.get(), &hello_body) || !CBB_add_u16(&hello_body, version) || !CBB_add_bytes(&hello_body, random, SSL3_RANDOM_SIZE) || // No session id. !CBB_add_u8(&hello_body, 0) || !CBB_add_u16_length_prefixed(&hello_body, &cipher_suites)) { return ssl_open_record_error; } // Copy the cipher suites. while (CBS_len(&cipher_specs) > 0) { uint32_t cipher_spec; if (!CBS_get_u24(&cipher_specs, &cipher_spec)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_open_record_error; } // Skip SSLv2 ciphers. if ((cipher_spec & 0xff0000) != 0) { continue; } if (!CBB_add_u16(&cipher_suites, cipher_spec)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_open_record_error; } } // Add the null compression scheme and finish. if (!CBB_add_u8(&hello_body, 1) || // !CBB_add_u8(&hello_body, 0) || // !CBB_finish(client_hello.get(), NULL, &ssl->s3->hs_buf->length)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_open_record_error; } *out_consumed = 2 + msg_length; ssl->s3->is_v2_hello = true; return ssl_open_record_success; } static bool parse_message(const SSL *ssl, SSLMessage *out, size_t *out_bytes_needed) { if (!ssl->s3->hs_buf) { *out_bytes_needed = 4; return false; } CBS cbs; uint32_t len; CBS_init(&cbs, reinterpret_cast(ssl->s3->hs_buf->data), ssl->s3->hs_buf->length); if (!CBS_get_u8(&cbs, &out->type) || // !CBS_get_u24(&cbs, &len)) { *out_bytes_needed = 4; return false; } if (!CBS_get_bytes(&cbs, &out->body, len)) { *out_bytes_needed = 4 + len; return false; } CBS_init(&out->raw, reinterpret_cast(ssl->s3->hs_buf->data), 4 + len); out->is_v2_hello = ssl->s3->is_v2_hello; return true; } bool tls_get_message(const SSL *ssl, SSLMessage *out) { size_t unused; if (!parse_message(ssl, out, &unused)) { return false; } if (!ssl->s3->has_message) { if (!out->is_v2_hello) { ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, out->raw); } ssl->s3->has_message = true; } return true; } bool tls_can_accept_handshake_data(const SSL *ssl, uint8_t *out_alert) { // If there is a complete message, the caller must have consumed it first. SSLMessage msg; size_t bytes_needed; if (parse_message(ssl, &msg, &bytes_needed)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); *out_alert = SSL_AD_INTERNAL_ERROR; return false; } // Enforce the limit so the peer cannot force us to buffer 16MB. if (bytes_needed > 4 + ssl_max_handshake_message_len(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } return true; } bool tls_has_unprocessed_handshake_data(const SSL *ssl) { size_t msg_len = 0; if (ssl->s3->has_message) { SSLMessage msg; size_t unused; if (parse_message(ssl, &msg, &unused)) { msg_len = CBS_len(&msg.raw); } } return ssl->s3->hs_buf && ssl->s3->hs_buf->length > msg_len; } bool tls_append_handshake_data(SSL *ssl, Span data) { // Re-create the handshake buffer if needed. if (!ssl->s3->hs_buf) { ssl->s3->hs_buf.reset(BUF_MEM_new()); } return ssl->s3->hs_buf && BUF_MEM_append(ssl->s3->hs_buf.get(), data.data(), data.size()); } ssl_open_record_t tls_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; // Bypass the record layer for the first message to handle V2ClientHello. if (ssl->server && !ssl->s3->v2_hello_done) { // Ask for the first 5 bytes, the size of the TLS record header. This is // sufficient to detect a V2ClientHello and ensures that we never read // beyond the first record. if (in.size() < SSL3_RT_HEADER_LENGTH) { *out_consumed = SSL3_RT_HEADER_LENGTH; return ssl_open_record_partial; } // Some dedicated error codes for protocol mixups should the application // wish to interpret them differently. (These do not overlap with // ClientHello or V2ClientHello.) auto str = bssl::BytesAsStringView(in); if (str.substr(0, 4) == "GET " || // str.substr(0, 5) == "POST " || // str.substr(0, 5) == "HEAD " || // str.substr(0, 4) == "PUT ") { OPENSSL_PUT_ERROR(SSL, SSL_R_HTTP_REQUEST); *out_alert = 0; return ssl_open_record_error; } if (str.substr(0, 5) == "CONNE") { OPENSSL_PUT_ERROR(SSL, SSL_R_HTTPS_PROXY_REQUEST); *out_alert = 0; return ssl_open_record_error; } // Check for a V2ClientHello. if ((in[0] & 0x80) != 0 && in[2] == SSL2_MT_CLIENT_HELLO && in[3] == SSL3_VERSION_MAJOR) { auto ret = read_v2_client_hello(ssl, out_consumed, in); if (ret == ssl_open_record_error) { *out_alert = 0; } else if (ret == ssl_open_record_success) { ssl->s3->v2_hello_done = true; } return ret; } ssl->s3->v2_hello_done = true; } uint8_t type; Span body; auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } if (type != SSL3_RT_HANDSHAKE) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } // Append the entire handshake record to the buffer. if (!tls_append_handshake_data(ssl, body)) { *out_alert = SSL_AD_INTERNAL_ERROR; return ssl_open_record_error; } return ssl_open_record_success; } void tls_next_message(SSL *ssl) { SSLMessage msg; if (!tls_get_message(ssl, &msg) || // !ssl->s3->hs_buf || // ssl->s3->hs_buf->length < CBS_len(&msg.raw)) { assert(0); return; } OPENSSL_memmove(ssl->s3->hs_buf->data, ssl->s3->hs_buf->data + CBS_len(&msg.raw), ssl->s3->hs_buf->length - CBS_len(&msg.raw)); ssl->s3->hs_buf->length -= CBS_len(&msg.raw); ssl->s3->is_v2_hello = false; ssl->s3->has_message = false; // Post-handshake messages are rare, so release the buffer after every // message. During the handshake, |on_handshake_complete| will release it. if (!SSL_in_init(ssl) && ssl->s3->hs_buf->length == 0) { ssl->s3->hs_buf.reset(); } } namespace { class CipherScorer { public: using Score = int; static constexpr Score kMinScore = 0; virtual ~CipherScorer() = default; virtual Score Evaluate(const SSL_CIPHER *cipher) const = 0; }; // AesHwCipherScorer scores cipher suites based on whether AES is supported in // hardware. class AesHwCipherScorer : public CipherScorer { public: explicit AesHwCipherScorer(bool has_aes_hw) : aes_is_fine_(has_aes_hw) {} virtual ~AesHwCipherScorer() override = default; Score Evaluate(const SSL_CIPHER *a) const override { return // Something is always preferable to nothing. 1 + // Either AES is fine, or else ChaCha20 is preferred. ((aes_is_fine_ || a->algorithm_enc == SSL_CHACHA20POLY1305) ? 1 : 0); } private: const bool aes_is_fine_; }; // CNsaCipherScorer prefers AES-256-GCM over AES-128-GCM over anything else. class CNsaCipherScorer : public CipherScorer { public: virtual ~CNsaCipherScorer() override = default; Score Evaluate(const SSL_CIPHER *a) const override { if (a->id == TLS1_3_CK_AES_256_GCM_SHA384) { return 3; } else if (a->id == TLS1_3_CK_AES_128_GCM_SHA256) { return 2; } return 1; } }; } // namespace bool ssl_tls13_cipher_meets_policy(uint16_t cipher_id, enum ssl_compliance_policy_t policy) { switch (policy) { case ssl_compliance_policy_none: case ssl_compliance_policy_cnsa_202407: return true; case ssl_compliance_policy_fips_202205: switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256 & 0xffff: case TLS1_3_CK_AES_256_GCM_SHA384 & 0xffff: return true; case TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff: return false; default: assert(false); return false; } case ssl_compliance_policy_wpa3_192_202304: switch (cipher_id) { case TLS1_3_CK_AES_256_GCM_SHA384 & 0xffff: return true; case TLS1_3_CK_AES_128_GCM_SHA256 & 0xffff: case TLS1_3_CK_CHACHA20_POLY1305_SHA256 & 0xffff: return false; default: assert(false); return false; } } assert(false); return false; } const SSL_CIPHER *ssl_choose_tls13_cipher(CBS cipher_suites, bool has_aes_hw, uint16_t version, enum ssl_compliance_policy_t policy) { if (CBS_len(&cipher_suites) % 2 != 0) { return nullptr; } const SSL_CIPHER *best = nullptr; AesHwCipherScorer aes_hw_scorer(has_aes_hw); CNsaCipherScorer cnsa_scorer; CipherScorer *const scorer = (policy == ssl_compliance_policy_cnsa_202407) ? static_cast(&cnsa_scorer) : static_cast(&aes_hw_scorer); CipherScorer::Score best_score = CipherScorer::kMinScore; while (CBS_len(&cipher_suites) > 0) { uint16_t cipher_suite; if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { return nullptr; } // Limit to TLS 1.3 ciphers we know about. const SSL_CIPHER *candidate = SSL_get_cipher_by_value(cipher_suite); if (candidate == nullptr || SSL_CIPHER_get_min_version(candidate) > version || SSL_CIPHER_get_max_version(candidate) < version) { continue; } if (!ssl_tls13_cipher_meets_policy(SSL_CIPHER_get_protocol_id(candidate), policy)) { continue; } const CipherScorer::Score candidate_score = scorer->Evaluate(candidate); // |candidate_score| must be larger to displace the current choice. That way // the client's order controls between ciphers with an equal score. if (candidate_score > best_score) { best = candidate; best_score = candidate_score; } } return best; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/s3_lib.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN SSL3_STATE::SSL3_STATE() : skip_early_data(false), v2_hello_done(false), is_v2_hello(false), has_message(false), initial_handshake_complete(false), session_reused(false), send_connection_binding(false), channel_id_valid(false), key_update_pending(false), early_data_accepted(false), alert_dispatch(false), renegotiate_pending(false), used_hello_retry_request(false), was_key_usage_invalid(false) {} SSL3_STATE::~SSL3_STATE() {} bool tls_new(SSL *ssl) { UniquePtr s3 = MakeUnique(); if (!s3) { return false; } // TODO(crbug.com/368805255): Fields that aren't used in DTLS should not be // allocated at all. // TODO(crbug.com/371998381): Don't create these in QUIC either, once the // placeholder QUIC ones for subsequent epochs are removed. if (!SSL_is_dtls(ssl)) { s3->aead_read_ctx = SSLAEADContext::CreateNullCipher(); s3->aead_write_ctx = SSLAEADContext::CreateNullCipher(); if (!s3->aead_read_ctx || !s3->aead_write_ctx) { return false; } } s3->hs = ssl_handshake_new(ssl); if (!s3->hs) { return false; } ssl->s3 = s3.release(); return true; } void tls_free(SSL *ssl) { if (ssl->s3 == NULL) { return; } Delete(ssl->s3); ssl->s3 = NULL; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/s3_pkt.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include "../crypto/err/internal.h" #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static int do_tls_write(SSL *ssl, size_t *out_bytes_written, uint8_t type, Span in); int tls_write_app_data(SSL *ssl, bool *out_needs_handshake, size_t *out_bytes_written, Span in) { assert(ssl_can_write(ssl)); assert(!ssl->s3->aead_write_ctx->is_null_cipher()); *out_needs_handshake = false; if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } size_t total_bytes_written = ssl->s3->unreported_bytes_written; if (in.size() < total_bytes_written) { // This can happen if the caller disables |SSL_MODE_ENABLE_PARTIAL_WRITE|, // asks us to write some input of length N, we successfully encrypt M bytes // and write it, but fail to write the rest. We will report // |SSL_ERROR_WANT_WRITE|. If the caller then retries with fewer than M // bytes, we cannot satisfy that request. The caller is required to always // retry with at least as many bytes as the previous attempt. OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_LENGTH); return -1; } in = in.subspan(total_bytes_written); const bool is_early_data_write = !ssl->server && SSL_in_early_data(ssl) && ssl->s3->hs->can_early_write; for (;;) { size_t max_send_fragment = ssl->max_send_fragment; if (is_early_data_write) { SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (hs->early_data_written >= hs->early_session->ticket_max_early_data) { ssl->s3->unreported_bytes_written = total_bytes_written; hs->can_early_write = false; *out_needs_handshake = true; return -1; } max_send_fragment = std::min( max_send_fragment, size_t{hs->early_session->ticket_max_early_data - hs->early_data_written}); } const size_t to_write = std::min(max_send_fragment, in.size()); size_t bytes_written; int ret = do_tls_write(ssl, &bytes_written, SSL3_RT_APPLICATION_DATA, in.subspan(0, to_write)); if (ret <= 0) { ssl->s3->unreported_bytes_written = total_bytes_written; return ret; } // Note |bytes_written| may be less than |to_write| if there was a pending // record from a smaller write attempt. assert(bytes_written <= to_write); total_bytes_written += bytes_written; in = in.subspan(bytes_written); if (is_early_data_write) { ssl->s3->hs->early_data_written += bytes_written; } if (in.empty() || (ssl->mode & SSL_MODE_ENABLE_PARTIAL_WRITE)) { ssl->s3->unreported_bytes_written = 0; *out_bytes_written = total_bytes_written; return 1; } } } // tls_seal_align_prefix_len returns the length of the prefix before the start // of the bulk of the ciphertext when sealing a record with |ssl|. Callers may // use this to align buffers. // // Note when TLS 1.0 CBC record-splitting is enabled, this includes the one byte // record and is the offset into second record's ciphertext. Thus sealing a // small record may result in a smaller output than this value. // // TODO(davidben): Is this alignment valuable? Record-splitting makes this a // mess. static size_t tls_seal_align_prefix_len(const SSL *ssl) { size_t ret = SSL3_RT_HEADER_LENGTH + ssl->s3->aead_write_ctx->ExplicitNonceLen(); if (ssl_needs_record_splitting(ssl)) { ret += SSL3_RT_HEADER_LENGTH; ret += ssl_cipher_get_record_split_len(ssl->s3->aead_write_ctx->cipher()); } return ret; } // do_tls_write writes an SSL record of the given type. On success, it sets // |*out_bytes_written| to number of bytes successfully written and returns one. // On error, it returns a value <= 0 from the underlying |BIO|. static int do_tls_write(SSL *ssl, size_t *out_bytes_written, uint8_t type, Span in) { // If there is a pending write, the retry must be consistent. if (!ssl->s3->pending_write.empty() && (ssl->s3->pending_write.size() > in.size() || (!(ssl->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) && ssl->s3->pending_write.data() != in.data()) || ssl->s3->pending_write_type != type)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_WRITE_RETRY); return -1; } // Flush any unwritten data to the transport. There may be data to flush even // if |wpend_tot| is zero. int ret = ssl_write_buffer_flush(ssl); if (ret <= 0) { return ret; } // If there is a pending write, we just completed it. Report it to the caller. if (!ssl->s3->pending_write.empty()) { *out_bytes_written = ssl->s3->pending_write.size(); ssl->s3->pending_write = {}; return 1; } SSLBuffer *buf = &ssl->s3->write_buffer; if (in.size() > SSL3_RT_MAX_PLAIN_LENGTH || buf->size() > 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } if (!tls_flush_pending_hs_data(ssl)) { return -1; } // We may have unflushed handshake data that must be written before |in|. This // may be a KeyUpdate acknowledgment, 0-RTT key change messages, or a // NewSessionTicket. Span pending_flight; if (ssl->s3->pending_flight != nullptr) { pending_flight = Span(reinterpret_cast(ssl->s3->pending_flight->data), ssl->s3->pending_flight->length); pending_flight = pending_flight.subspan(ssl->s3->pending_flight_offset); } size_t max_out = pending_flight.size(); if (!in.empty()) { const size_t max_ciphertext_len = in.size() + SSL_max_seal_overhead(ssl); if (max_ciphertext_len < in.size() || max_out + max_ciphertext_len < max_out) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return -1; } max_out += max_ciphertext_len; } if (max_out == 0) { // Nothing to write. *out_bytes_written = 0; return 1; } if (!buf->EnsureCap(pending_flight.size() + tls_seal_align_prefix_len(ssl), max_out)) { return -1; } // Copy |pending_flight| to the output. if (!pending_flight.empty()) { OPENSSL_memcpy(buf->remaining().data(), pending_flight.data(), pending_flight.size()); ssl->s3->pending_flight.reset(); ssl->s3->pending_flight_offset = 0; buf->DidWrite(pending_flight.size()); } if (!in.empty()) { size_t ciphertext_len; if (!tls_seal_record(ssl, buf->remaining().data(), &ciphertext_len, buf->remaining().size(), type, in.data(), in.size())) { return -1; } buf->DidWrite(ciphertext_len); } // Now that we've made progress on the connection, uncork KeyUpdate // acknowledgments. ssl->s3->key_update_pending = false; // Flush the write buffer. ret = ssl_write_buffer_flush(ssl); if (ret <= 0) { // Track the unfinished write. if (!in.empty()) { ssl->s3->pending_write = in; ssl->s3->pending_write_type = type; } return ret; } *out_bytes_written = in.size(); return 1; } ssl_open_record_t tls_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in) { assert(ssl_can_read(ssl)); assert(!ssl->s3->aead_read_ctx->is_null_cipher()); uint8_t type; Span body; auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } const bool is_early_data_read = ssl->server && SSL_in_early_data(ssl); if (type == SSL3_RT_HANDSHAKE) { // Post-handshake data prior to TLS 1.3 is always renegotiation, which we // never accept as a server. Otherwise |tls_get_message| will send // |SSL_R_EXCESSIVE_MESSAGE_SIZE|. if (ssl->server && ssl_protocol_version(ssl) < TLS1_3_VERSION) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); *out_alert = SSL_AD_NO_RENEGOTIATION; return ssl_open_record_error; } if (!tls_append_handshake_data(ssl, body)) { *out_alert = SSL_AD_INTERNAL_ERROR; return ssl_open_record_error; } return ssl_open_record_discard; } if (type != SSL3_RT_APPLICATION_DATA) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } if (is_early_data_read) { if (body.size() > kMaxEarlyDataAccepted - ssl->s3->hs->early_data_read) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MUCH_READ_EARLY_DATA); *out_alert = SSL3_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } ssl->s3->hs->early_data_read += body.size(); } if (body.empty()) { return ssl_open_record_discard; } *out = body; return ssl_open_record_success; } ssl_open_record_t tls_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { uint8_t type; Span body; auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); if (ret != ssl_open_record_success) { return ret; } if (type != SSL3_RT_CHANGE_CIPHER_SPEC) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } if (body.size() != 1 || body[0] != SSL3_MT_CCS) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return ssl_open_record_error; } ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, body); return ssl_open_record_success; } void ssl_send_alert(SSL *ssl, int level, int desc) { // This function is called in response to a fatal error from the peer. Ignore // any failures writing the alert and report only the original error. In // particular, if the transport uses |SSL_write|, our existing error will be // clobbered so we must save and restore the error queue. See // https://crbug.com/959305. // // TODO(davidben): Return the alert out of the handshake, rather than calling // this function internally everywhere. // // TODO(davidben): This does not allow retrying if the alert hit EAGAIN. See // https://crbug.com/boringssl/130. UniquePtr err_state(ERR_save_state()); ssl_send_alert_impl(ssl, level, desc); ERR_restore_state(err_state.get()); } int ssl_send_alert_impl(SSL *ssl, int level, int desc) { // It is illegal to send an alert when we've already sent a closing one. if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } if (level == SSL3_AL_WARNING && desc == SSL_AD_CLOSE_NOTIFY) { ssl->s3->write_shutdown = ssl_shutdown_close_notify; } else { assert(level == SSL3_AL_FATAL); assert(desc != SSL_AD_CLOSE_NOTIFY); ssl->s3->write_shutdown = ssl_shutdown_error; } ssl->s3->alert_dispatch = true; ssl->s3->send_alert[0] = level; ssl->s3->send_alert[1] = desc; if (ssl->s3->write_buffer.empty()) { // Nothing is being written out, so the alert may be dispatched // immediately. return ssl->method->dispatch_alert(ssl); } // The alert will be dispatched later. return -1; } int tls_dispatch_alert(SSL *ssl) { if (SSL_is_quic(ssl)) { if (!ssl->quic_method->send_alert(ssl, ssl->s3->quic_write_level, ssl->s3->send_alert[1])) { OPENSSL_PUT_ERROR(SSL, SSL_R_QUIC_INTERNAL_ERROR); return 0; } } else { size_t bytes_written; int ret = do_tls_write(ssl, &bytes_written, SSL3_RT_ALERT, ssl->s3->send_alert); if (ret <= 0) { return ret; } assert(bytes_written == 2); } ssl->s3->alert_dispatch = false; // If the alert is fatal, flush the BIO now. if (ssl->s3->send_alert[0] == SSL3_AL_FATAL) { BIO_flush(ssl->wbio.get()); } ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert); int alert = (ssl->s3->send_alert[0] << 8) | ssl->s3->send_alert[1]; ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, alert); return 1; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_aead_ctx.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) #define FUZZER_MODE true #else #define FUZZER_MODE false #endif BSSL_NAMESPACE_BEGIN SSLAEADContext::SSLAEADContext(const SSL_CIPHER *cipher_arg) : cipher_(cipher_arg), variable_nonce_included_in_record_(false), random_variable_nonce_(false), xor_fixed_nonce_(false), omit_length_in_ad_(false), ad_is_header_(false) {} SSLAEADContext::~SSLAEADContext() {} UniquePtr SSLAEADContext::CreateNullCipher() { return MakeUnique(/*cipher=*/nullptr); } UniquePtr SSLAEADContext::Create( enum evp_aead_direction_t direction, uint16_t version, const SSL_CIPHER *cipher, Span enc_key, Span mac_key, Span fixed_iv) { const EVP_AEAD *aead; uint16_t protocol_version; size_t expected_mac_key_len, expected_fixed_iv_len; if (!ssl_protocol_version_from_wire(&protocol_version, version) || !ssl_cipher_get_evp_aead(&aead, &expected_mac_key_len, &expected_fixed_iv_len, cipher, protocol_version) || // Ensure the caller returned correct key sizes. expected_fixed_iv_len != fixed_iv.size() || expected_mac_key_len != mac_key.size()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return nullptr; } UniquePtr aead_ctx = MakeUnique(cipher); if (!aead_ctx) { return nullptr; } uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; assert(EVP_AEAD_nonce_length(aead) <= EVP_AEAD_MAX_NONCE_LENGTH); static_assert(EVP_AEAD_MAX_NONCE_LENGTH < 256, "variable_nonce_len doesn't fit in uint8_t"); aead_ctx->variable_nonce_len_ = (uint8_t)EVP_AEAD_nonce_length(aead); if (mac_key.empty()) { // This is an actual AEAD. aead_ctx->fixed_nonce_.CopyFrom(fixed_iv); if (protocol_version >= TLS1_3_VERSION || cipher->algorithm_enc & SSL_CHACHA20POLY1305) { // TLS 1.3, and TLS 1.2 ChaCha20-Poly1305, XOR the fixed IV with the // sequence number to form the nonce. aead_ctx->xor_fixed_nonce_ = true; aead_ctx->variable_nonce_len_ = 8; assert(fixed_iv.size() >= aead_ctx->variable_nonce_len_); } else { // TLS 1.2 AES-GCM prepends the fixed IV to an explicit nonce. assert(fixed_iv.size() <= aead_ctx->variable_nonce_len_); assert(cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)); aead_ctx->variable_nonce_len_ -= fixed_iv.size(); aead_ctx->variable_nonce_included_in_record_ = true; } // Starting TLS 1.3, the AAD is the whole record header. if (protocol_version >= TLS1_3_VERSION) { aead_ctx->ad_is_header_ = true; } } else { // This is a CBC cipher suite that implements the |EVP_AEAD| interface. The // |EVP_AEAD| takes the MAC key, encryption key, and fixed IV concatenated // as its input key. assert(protocol_version < TLS1_3_VERSION); BSSL_CHECK(mac_key.size() + enc_key.size() + fixed_iv.size() <= sizeof(merged_key)); OPENSSL_memcpy(merged_key, mac_key.data(), mac_key.size()); OPENSSL_memcpy(merged_key + mac_key.size(), enc_key.data(), enc_key.size()); OPENSSL_memcpy(merged_key + mac_key.size() + enc_key.size(), fixed_iv.data(), fixed_iv.size()); enc_key = Span(merged_key, enc_key.size() + mac_key.size() + fixed_iv.size()); // The |EVP_AEAD|'s per-encryption nonce, if any, is actually the CBC IV. It // must be generated randomly and prepended to the record. aead_ctx->variable_nonce_included_in_record_ = true; aead_ctx->random_variable_nonce_ = true; aead_ctx->omit_length_in_ad_ = true; } if (!EVP_AEAD_CTX_init_with_direction( aead_ctx->ctx_.get(), aead, enc_key.data(), enc_key.size(), EVP_AEAD_DEFAULT_TAG_LENGTH, direction)) { return nullptr; } return aead_ctx; } UniquePtr SSLAEADContext::CreatePlaceholderForQUIC( const SSL_CIPHER *cipher) { return MakeUnique(cipher); } size_t SSLAEADContext::ExplicitNonceLen() const { if (!FUZZER_MODE && variable_nonce_included_in_record_) { return variable_nonce_len_; } return 0; } bool SSLAEADContext::SuffixLen(size_t *out_suffix_len, const size_t in_len, const size_t extra_in_len) const { if (is_null_cipher() || FUZZER_MODE) { *out_suffix_len = extra_in_len; return true; } return !!EVP_AEAD_CTX_tag_len(ctx_.get(), out_suffix_len, in_len, extra_in_len); } bool SSLAEADContext::CiphertextLen(size_t *out_len, const size_t in_len, const size_t extra_in_len) const { size_t len; if (!SuffixLen(&len, in_len, extra_in_len)) { return false; } len += ExplicitNonceLen(); len += in_len; if (len < in_len || len >= 0xffff) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } *out_len = len; return true; } size_t SSLAEADContext::MaxOverhead() const { return ExplicitNonceLen() + (is_null_cipher() || FUZZER_MODE ? 0 : EVP_AEAD_max_overhead(EVP_AEAD_CTX_aead(ctx_.get()))); } size_t SSLAEADContext::MaxSealInputLen(size_t max_out) const { size_t explicit_nonce_len = ExplicitNonceLen(); if (max_out <= explicit_nonce_len) { return 0; } max_out -= explicit_nonce_len; if (is_null_cipher() || FUZZER_MODE) { return max_out; } // TODO(crbug.com/42290602): This should be part of |EVP_AEAD_CTX|. size_t overhead = EVP_AEAD_max_overhead(EVP_AEAD_CTX_aead(ctx_.get())); if (SSL_CIPHER_is_block_cipher(cipher())) { size_t block_size; switch (cipher()->algorithm_enc) { case SSL_AES128: case SSL_AES256: block_size = 16; break; case SSL_3DES: block_size = 8; break; default: abort(); } // The output for a CBC cipher is always a whole number of blocks. Round the // remaining capacity down. max_out &= ~(block_size - 1); // The maximum overhead is a full block of padding and the MAC, but the // minimum overhead is one byte of padding, once we know the output is // rounded down. assert(overhead > block_size); overhead -= block_size - 1; } return max_out <= overhead ? 0 : max_out - overhead; } Span SSLAEADContext::GetAdditionalData( uint8_t storage[13], uint8_t type, uint16_t record_version, uint64_t seqnum, size_t plaintext_len, Span header) { if (ad_is_header_) { return header; } CRYPTO_store_u64_be(storage, seqnum); size_t len = 8; storage[len++] = type; storage[len++] = static_cast((record_version >> 8)); storage[len++] = static_cast(record_version); if (!omit_length_in_ad_) { storage[len++] = static_cast((plaintext_len >> 8)); storage[len++] = static_cast(plaintext_len); } return Span(storage, len); } bool SSLAEADContext::Open(Span *out, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, Span in) { if (is_null_cipher() || FUZZER_MODE) { // Handle the initial NULL cipher. *out = in; return true; } // TLS 1.2 AEADs include the length in the AD and are assumed to have fixed // overhead. Otherwise the parameter is unused. size_t plaintext_len = 0; if (!omit_length_in_ad_) { size_t overhead = MaxOverhead(); if (in.size() < overhead) { // Publicly invalid. OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); return false; } plaintext_len = in.size() - overhead; } uint8_t ad_storage[13]; Span ad = GetAdditionalData(ad_storage, type, record_version, seqnum, plaintext_len, header); // Assemble the nonce. uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; size_t nonce_len = 0; // Prepend the fixed nonce, or left-pad with zeros if XORing. if (xor_fixed_nonce_) { nonce_len = fixed_nonce_.size() - variable_nonce_len_; OPENSSL_memset(nonce, 0, nonce_len); } else { OPENSSL_memcpy(nonce, fixed_nonce_.data(), fixed_nonce_.size()); nonce_len += fixed_nonce_.size(); } // Add the variable nonce. if (variable_nonce_included_in_record_) { if (in.size() < variable_nonce_len_) { // Publicly invalid. OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); return false; } OPENSSL_memcpy(nonce + nonce_len, in.data(), variable_nonce_len_); in = in.subspan(variable_nonce_len_); } else { assert(variable_nonce_len_ == 8); CRYPTO_store_u64_be(nonce + nonce_len, seqnum); } nonce_len += variable_nonce_len_; // XOR the fixed nonce, if necessary. if (xor_fixed_nonce_) { assert(nonce_len == fixed_nonce_.size()); for (size_t i = 0; i < fixed_nonce_.size(); i++) { nonce[i] ^= fixed_nonce_[i]; } } // Decrypt in-place. size_t len; if (!EVP_AEAD_CTX_open(ctx_.get(), in.data(), &len, in.size(), nonce, nonce_len, in.data(), in.size(), ad.data(), ad.size())) { return false; } *out = in.subspan(0, len); return true; } bool SSLAEADContext::SealScatter(uint8_t *out_prefix, uint8_t *out, uint8_t *out_suffix, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, const uint8_t *in, size_t in_len, const uint8_t *extra_in, size_t extra_in_len) { const size_t prefix_len = ExplicitNonceLen(); size_t suffix_len; if (!SuffixLen(&suffix_len, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return false; } if ((in != out && buffers_alias(in, in_len, out, in_len)) || buffers_alias(in, in_len, out_prefix, prefix_len) || buffers_alias(in, in_len, out_suffix, suffix_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); return false; } if (is_null_cipher() || FUZZER_MODE) { // Handle the initial NULL cipher. OPENSSL_memmove(out, in, in_len); OPENSSL_memmove(out_suffix, extra_in, extra_in_len); return true; } uint8_t ad_storage[13]; Span ad = GetAdditionalData(ad_storage, type, record_version, seqnum, in_len, header); // Assemble the nonce. uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; size_t nonce_len = 0; // Prepend the fixed nonce, or left-pad with zeros if XORing. if (xor_fixed_nonce_) { nonce_len = fixed_nonce_.size() - variable_nonce_len_; OPENSSL_memset(nonce, 0, nonce_len); } else { OPENSSL_memcpy(nonce, fixed_nonce_.data(), fixed_nonce_.size()); nonce_len += fixed_nonce_.size(); } // Select the variable nonce. if (random_variable_nonce_) { assert(variable_nonce_included_in_record_); if (!RAND_bytes(nonce + nonce_len, variable_nonce_len_)) { return false; } } else { // When sending we use the sequence number as the variable part of the // nonce. assert(variable_nonce_len_ == 8); CRYPTO_store_u64_be(nonce + nonce_len, seqnum); } nonce_len += variable_nonce_len_; // Emit the variable nonce if included in the record. if (variable_nonce_included_in_record_) { assert(!xor_fixed_nonce_); if (buffers_alias(in, in_len, out_prefix, variable_nonce_len_)) { OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); return false; } OPENSSL_memcpy(out_prefix, nonce + fixed_nonce_.size(), variable_nonce_len_); } // XOR the fixed nonce, if necessary. if (xor_fixed_nonce_) { assert(nonce_len == fixed_nonce_.size()); for (size_t i = 0; i < fixed_nonce_.size(); i++) { nonce[i] ^= fixed_nonce_[i]; } } size_t written_suffix_len; bool result = !!EVP_AEAD_CTX_seal_scatter( ctx_.get(), out, out_suffix, &written_suffix_len, suffix_len, nonce, nonce_len, in, in_len, extra_in, extra_in_len, ad.data(), ad.size()); assert(!result || written_suffix_len == suffix_len); return result; } bool SSLAEADContext::Seal(uint8_t *out, size_t *out_len, size_t max_out_len, uint8_t type, uint16_t record_version, uint64_t seqnum, Span header, const uint8_t *in, size_t in_len) { const size_t prefix_len = ExplicitNonceLen(); size_t suffix_len; if (!SuffixLen(&suffix_len, in_len, 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return false; } if (in_len + prefix_len < in_len || in_len + prefix_len + suffix_len < in_len + prefix_len) { OPENSSL_PUT_ERROR(CIPHER, SSL_R_RECORD_TOO_LARGE); return false; } if (in_len + prefix_len + suffix_len > max_out_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); return false; } if (!SealScatter(out, out + prefix_len, out + prefix_len + in_len, type, record_version, seqnum, header, in, in_len, 0, 0)) { return false; } *out_len = prefix_len + in_len + suffix_len; return true; } bool SSLAEADContext::GetIV(const uint8_t **out_iv, size_t *out_iv_len) const { return !is_null_cipher() && EVP_AEAD_CTX_get_iv(ctx_.get(), out_iv, out_iv_len); } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_asn1.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // An SSL_SESSION is serialized as the following ASN.1 structure: // // SSLSession ::= SEQUENCE { // version INTEGER (1), -- session structure version // sslVersion INTEGER, -- protocol version number // cipher OCTET STRING, -- two bytes long // sessionID OCTET STRING, // secret OCTET STRING, // time [1] INTEGER, -- seconds since UNIX epoch // timeout [2] INTEGER, -- in seconds // peer [3] Certificate OPTIONAL, // sessionIDContext [4] OCTET STRING OPTIONAL, // verifyResult [5] INTEGER OPTIONAL, -- one of X509_V_* codes // pskIdentity [8] OCTET STRING OPTIONAL, // ticketLifetimeHint [9] INTEGER OPTIONAL, -- client-only // ticket [10] OCTET STRING OPTIONAL, -- client-only // peerSHA256 [13] OCTET STRING OPTIONAL, // originalHandshakeHash [14] OCTET STRING OPTIONAL, // signedCertTimestampList [15] OCTET STRING OPTIONAL, // -- contents of SCT extension // ocspResponse [16] OCTET STRING OPTIONAL, // -- stapled OCSP response from the server // extendedMasterSecret [17] BOOLEAN OPTIONAL, // groupID [18] INTEGER OPTIONAL, // certChain [19] SEQUENCE OF Certificate OPTIONAL, // ticketAgeAdd [21] OCTET STRING OPTIONAL, // isServer [22] BOOLEAN DEFAULT TRUE, // peerSignatureAlgorithm [23] INTEGER OPTIONAL, // ticketMaxEarlyData [24] INTEGER OPTIONAL, // authTimeout [25] INTEGER OPTIONAL, -- defaults to timeout // earlyALPN [26] OCTET STRING OPTIONAL, // isQuic [27] BOOLEAN OPTIONAL, // quicEarlyDataHash [28] OCTET STRING OPTIONAL, // localALPS [29] OCTET STRING OPTIONAL, // peerALPS [30] OCTET STRING OPTIONAL, // -- Either both or none of localALPS and peerALPS must be present. If both // -- are present, earlyALPN must be present and non-empty. // } // // Note: historically this serialization has included other optional // fields. Their presence is currently treated as a parse error, except for // hostName, which is ignored. // // keyArg [0] IMPLICIT OCTET STRING OPTIONAL, // hostName [6] OCTET STRING OPTIONAL, // pskIdentityHint [7] OCTET STRING OPTIONAL, // compressionMethod [11] OCTET STRING OPTIONAL, // srpUsername [12] OCTET STRING OPTIONAL, // ticketFlags [20] INTEGER OPTIONAL, static const unsigned kVersion = 1; static const CBS_ASN1_TAG kTimeTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1; static const CBS_ASN1_TAG kTimeoutTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 2; static const CBS_ASN1_TAG kPeerTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 3; static const CBS_ASN1_TAG kSessionIDContextTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 4; static const CBS_ASN1_TAG kVerifyResultTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 5; static const CBS_ASN1_TAG kHostNameTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 6; static const CBS_ASN1_TAG kPSKIdentityTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 8; static const CBS_ASN1_TAG kTicketLifetimeHintTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 9; static const CBS_ASN1_TAG kTicketTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 10; static const CBS_ASN1_TAG kPeerSHA256Tag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 13; static const CBS_ASN1_TAG kOriginalHandshakeHashTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 14; static const CBS_ASN1_TAG kSignedCertTimestampListTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 15; static const CBS_ASN1_TAG kOCSPResponseTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 16; static const CBS_ASN1_TAG kExtendedMasterSecretTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 17; static const CBS_ASN1_TAG kGroupIDTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 18; static const CBS_ASN1_TAG kCertChainTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 19; static const CBS_ASN1_TAG kTicketAgeAddTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 21; static const CBS_ASN1_TAG kIsServerTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 22; static const CBS_ASN1_TAG kPeerSignatureAlgorithmTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 23; static const CBS_ASN1_TAG kTicketMaxEarlyDataTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 24; static const CBS_ASN1_TAG kAuthTimeoutTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 25; static const CBS_ASN1_TAG kEarlyALPNTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 26; static const CBS_ASN1_TAG kIsQuicTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 27; static const CBS_ASN1_TAG kQuicEarlyDataContextTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 28; static const CBS_ASN1_TAG kLocalALPSTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 29; static const CBS_ASN1_TAG kPeerALPSTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 30; static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, CBB *cbb, int for_ticket) { if (in == NULL || in->cipher == NULL) { return 0; } CBB session, child, child2; if (!CBB_add_asn1(cbb, &session, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&session, kVersion) || !CBB_add_asn1_uint64(&session, in->ssl_version) || !CBB_add_asn1(&session, &child, CBS_ASN1_OCTETSTRING) || !CBB_add_u16(&child, (uint16_t)(in->cipher->id & 0xffff)) || // The session ID is irrelevant for a session ticket. !CBB_add_asn1_octet_string(&session, in->session_id.data(), for_ticket ? 0 : in->session_id.size()) || !CBB_add_asn1_octet_string(&session, in->secret.data(), in->secret.size()) || !CBB_add_asn1(&session, &child, kTimeTag) || !CBB_add_asn1_uint64(&child, in->time) || !CBB_add_asn1(&session, &child, kTimeoutTag) || !CBB_add_asn1_uint64(&child, in->timeout)) { return 0; } // The peer certificate is only serialized if the SHA-256 isn't // serialized instead. if (sk_CRYPTO_BUFFER_num(in->certs.get()) > 0 && !in->peer_sha256_valid) { const CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(in->certs.get(), 0); if (!CBB_add_asn1(&session, &child, kPeerTag) || !CBB_add_bytes(&child, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer))) { return 0; } } // Although it is OPTIONAL and usually empty, OpenSSL has // historically always encoded the sid_ctx. if (!CBB_add_asn1(&session, &child, kSessionIDContextTag) || !CBB_add_asn1_octet_string(&child, in->sid_ctx.data(), in->sid_ctx.size())) { return 0; } if (in->verify_result != X509_V_OK) { if (!CBB_add_asn1(&session, &child, kVerifyResultTag) || !CBB_add_asn1_uint64(&child, in->verify_result)) { return 0; } } if (in->psk_identity) { if (!CBB_add_asn1(&session, &child, kPSKIdentityTag) || !CBB_add_asn1_octet_string(&child, (const uint8_t *)in->psk_identity.get(), strlen(in->psk_identity.get()))) { return 0; } } if (in->ticket_lifetime_hint > 0) { if (!CBB_add_asn1(&session, &child, kTicketLifetimeHintTag) || !CBB_add_asn1_uint64(&child, in->ticket_lifetime_hint)) { return 0; } } if (!in->ticket.empty() && !for_ticket) { if (!CBB_add_asn1(&session, &child, kTicketTag) || !CBB_add_asn1_octet_string(&child, in->ticket.data(), in->ticket.size())) { return 0; } } if (in->peer_sha256_valid) { if (!CBB_add_asn1(&session, &child, kPeerSHA256Tag) || !CBB_add_asn1_octet_string(&child, in->peer_sha256, sizeof(in->peer_sha256))) { return 0; } } if (!in->original_handshake_hash.empty()) { if (!CBB_add_asn1(&session, &child, kOriginalHandshakeHashTag) || !CBB_add_asn1_octet_string(&child, in->original_handshake_hash.data(), in->original_handshake_hash.size())) { return 0; } } if (in->signed_cert_timestamp_list != nullptr) { if (!CBB_add_asn1(&session, &child, kSignedCertTimestampListTag) || !CBB_add_asn1_octet_string( &child, CRYPTO_BUFFER_data(in->signed_cert_timestamp_list.get()), CRYPTO_BUFFER_len(in->signed_cert_timestamp_list.get()))) { return 0; } } if (in->ocsp_response != nullptr) { if (!CBB_add_asn1(&session, &child, kOCSPResponseTag) || !CBB_add_asn1_octet_string( &child, CRYPTO_BUFFER_data(in->ocsp_response.get()), CRYPTO_BUFFER_len(in->ocsp_response.get()))) { return 0; } } if (in->extended_master_secret) { if (!CBB_add_asn1(&session, &child, kExtendedMasterSecretTag) || !CBB_add_asn1_bool(&child, true)) { return 0; } } if (in->group_id > 0 && // (!CBB_add_asn1(&session, &child, kGroupIDTag) || // !CBB_add_asn1_uint64(&child, in->group_id))) { return 0; } // The certificate chain is only serialized if the leaf's SHA-256 isn't // serialized instead. if (in->certs != NULL && // !in->peer_sha256_valid && // sk_CRYPTO_BUFFER_num(in->certs.get()) >= 2) { if (!CBB_add_asn1(&session, &child, kCertChainTag)) { return 0; } for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(in->certs.get()); i++) { const CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(in->certs.get(), i); if (!CBB_add_bytes(&child, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer))) { return 0; } } } if (in->ticket_age_add_valid) { if (!CBB_add_asn1(&session, &child, kTicketAgeAddTag) || !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || !CBB_add_u32(&child2, in->ticket_age_add)) { return 0; } } if (!in->is_server) { if (!CBB_add_asn1(&session, &child, kIsServerTag) || !CBB_add_asn1_bool(&child, false)) { return 0; } } if (in->peer_signature_algorithm != 0 && (!CBB_add_asn1(&session, &child, kPeerSignatureAlgorithmTag) || !CBB_add_asn1_uint64(&child, in->peer_signature_algorithm))) { return 0; } if (in->ticket_max_early_data != 0 && (!CBB_add_asn1(&session, &child, kTicketMaxEarlyDataTag) || !CBB_add_asn1_uint64(&child, in->ticket_max_early_data))) { return 0; } if (in->timeout != in->auth_timeout && (!CBB_add_asn1(&session, &child, kAuthTimeoutTag) || !CBB_add_asn1_uint64(&child, in->auth_timeout))) { return 0; } if (!in->early_alpn.empty()) { if (!CBB_add_asn1(&session, &child, kEarlyALPNTag) || !CBB_add_asn1_octet_string(&child, in->early_alpn.data(), in->early_alpn.size())) { return 0; } } if (in->is_quic) { if (!CBB_add_asn1(&session, &child, kIsQuicTag) || !CBB_add_asn1_bool(&child, true)) { return 0; } } if (!in->quic_early_data_context.empty()) { if (!CBB_add_asn1(&session, &child, kQuicEarlyDataContextTag) || !CBB_add_asn1_octet_string(&child, in->quic_early_data_context.data(), in->quic_early_data_context.size())) { return 0; } } if (in->has_application_settings) { if (!CBB_add_asn1(&session, &child, kLocalALPSTag) || !CBB_add_asn1_octet_string(&child, in->local_application_settings.data(), in->local_application_settings.size()) || !CBB_add_asn1(&session, &child, kPeerALPSTag) || !CBB_add_asn1_octet_string(&child, in->peer_application_settings.data(), in->peer_application_settings.size())) { return 0; } } return CBB_flush(cbb); } // SSL_SESSION_parse_string gets an optional ASN.1 OCTET STRING explicitly // tagged with |tag| from |cbs| and saves it in |*out|. If the element was not // found, it sets |*out| to NULL. It returns one on success, whether or not the // element was found, and zero on decode error. static int SSL_SESSION_parse_string(CBS *cbs, UniquePtr *out, CBS_ASN1_TAG tag) { CBS value; int present; if (!CBS_get_optional_asn1_octet_string(cbs, &value, &present, tag)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } if (present) { if (CBS_contains_zero_byte(&value)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } char *raw = nullptr; if (!CBS_strdup(&value, &raw)) { return 0; } out->reset(raw); } else { out->reset(); } return 1; } // SSL_SESSION_parse_octet_string gets an optional ASN.1 OCTET STRING explicitly // tagged with |tag| from |cbs| and stows it in |*out|. It returns one on // success, whether or not the element was found, and zero on decode error. static bool SSL_SESSION_parse_octet_string(CBS *cbs, Array *out, CBS_ASN1_TAG tag) { CBS value; if (!CBS_get_optional_asn1_octet_string(cbs, &value, NULL, tag)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return false; } return out->CopyFrom(value); } static int SSL_SESSION_parse_crypto_buffer(CBS *cbs, UniquePtr *out, CBS_ASN1_TAG tag, CRYPTO_BUFFER_POOL *pool) { if (!CBS_peek_asn1_tag(cbs, tag)) { return 1; } CBS child, value; if (!CBS_get_asn1(cbs, &child, tag) || !CBS_get_asn1(&child, &value, CBS_ASN1_OCTETSTRING) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } out->reset(CRYPTO_BUFFER_new_from_CBS(&value, pool)); if (*out == nullptr) { return 0; } return 1; } static int SSL_SESSION_parse_long(CBS *cbs, long *out, CBS_ASN1_TAG tag, long default_value) { uint64_t value; if (!CBS_get_optional_asn1_uint64(cbs, &value, tag, (uint64_t)default_value) || value > LONG_MAX) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } *out = (long)value; return 1; } static int SSL_SESSION_parse_u32(CBS *cbs, uint32_t *out, CBS_ASN1_TAG tag, uint32_t default_value) { uint64_t value; if (!CBS_get_optional_asn1_uint64(cbs, &value, tag, (uint64_t)default_value) || value > 0xffffffff) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } *out = (uint32_t)value; return 1; } static int SSL_SESSION_parse_u16(CBS *cbs, uint16_t *out, CBS_ASN1_TAG tag, uint16_t default_value) { uint64_t value; if (!CBS_get_optional_asn1_uint64(cbs, &value, tag, (uint64_t)default_value) || value > 0xffff) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return 0; } *out = (uint16_t)value; return 1; } UniquePtr SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, CRYPTO_BUFFER_POOL *pool) { UniquePtr ret = ssl_session_new(x509_method); if (!ret) { return nullptr; } CBS session; uint64_t version, ssl_version; uint16_t unused; if (!CBS_get_asn1(cbs, &session, CBS_ASN1_SEQUENCE) || // !CBS_get_asn1_uint64(&session, &version) || // version != kVersion || // !CBS_get_asn1_uint64(&session, &ssl_version) || // // Require sessions have versions valid in either TLS or DTLS. The session // will not be used by the handshake if not applicable, but, for // simplicity, never parse a session that does not pass // |ssl_protocol_version_from_wire|. ssl_version > UINT16_MAX || // !ssl_protocol_version_from_wire(&unused, ssl_version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->ssl_version = ssl_version; CBS cipher; uint16_t cipher_value; if (!CBS_get_asn1(&session, &cipher, CBS_ASN1_OCTETSTRING) || // !CBS_get_u16(&cipher, &cipher_value) || // CBS_len(&cipher) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->cipher = SSL_get_cipher_by_value(cipher_value); if (ret->cipher == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_CIPHER); return nullptr; } CBS session_id, secret, child; uint64_t timeout; if (!CBS_get_asn1(&session, &session_id, CBS_ASN1_OCTETSTRING) || !ret->session_id.TryCopyFrom(session_id) || !CBS_get_asn1(&session, &secret, CBS_ASN1_OCTETSTRING) || !ret->secret.TryCopyFrom(secret) || !CBS_get_asn1(&session, &child, kTimeTag) || !CBS_get_asn1_uint64(&child, &ret->time) || !CBS_get_asn1(&session, &child, kTimeoutTag) || !CBS_get_asn1_uint64(&child, &timeout) || // timeout > UINT32_MAX) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->timeout = (uint32_t)timeout; CBS peer; int has_peer; if (!CBS_get_optional_asn1(&session, &peer, &has_peer, kPeerTag) || (has_peer && CBS_len(&peer) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } // |peer| is processed with the certificate chain. CBS sid_ctx; if (!CBS_get_optional_asn1_octet_string( &session, &sid_ctx, /*out_present=*/nullptr, kSessionIDContextTag) || !ret->sid_ctx.TryCopyFrom(sid_ctx) || !SSL_SESSION_parse_long(&session, &ret->verify_result, kVerifyResultTag, X509_V_OK)) { return nullptr; } // Skip the historical hostName field. CBS unused_hostname; if (!CBS_get_optional_asn1(&session, &unused_hostname, nullptr, kHostNameTag)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } if (!SSL_SESSION_parse_string(&session, &ret->psk_identity, kPSKIdentityTag) || !SSL_SESSION_parse_u32(&session, &ret->ticket_lifetime_hint, kTicketLifetimeHintTag, 0) || !SSL_SESSION_parse_octet_string(&session, &ret->ticket, kTicketTag)) { return nullptr; } if (CBS_peek_asn1_tag(&session, kPeerSHA256Tag)) { CBS peer_sha256; if (!CBS_get_asn1(&session, &child, kPeerSHA256Tag) || !CBS_get_asn1(&child, &peer_sha256, CBS_ASN1_OCTETSTRING) || CBS_len(&peer_sha256) != sizeof(ret->peer_sha256) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } OPENSSL_memcpy(ret->peer_sha256, CBS_data(&peer_sha256), sizeof(ret->peer_sha256)); ret->peer_sha256_valid = true; } else { ret->peer_sha256_valid = false; } CBS original_handshake_hash; if (!CBS_get_optional_asn1_octet_string(&session, &original_handshake_hash, /*out_present=*/nullptr, kOriginalHandshakeHashTag) || !ret->original_handshake_hash.TryCopyFrom(original_handshake_hash) || !SSL_SESSION_parse_crypto_buffer(&session, &ret->signed_cert_timestamp_list, kSignedCertTimestampListTag, pool) || !SSL_SESSION_parse_crypto_buffer(&session, &ret->ocsp_response, kOCSPResponseTag, pool)) { return nullptr; } int extended_master_secret; if (!CBS_get_optional_asn1_bool(&session, &extended_master_secret, kExtendedMasterSecretTag, 0 /* default to false */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->extended_master_secret = !!extended_master_secret; if (!SSL_SESSION_parse_u16(&session, &ret->group_id, kGroupIDTag, 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } CBS cert_chain; CBS_init(&cert_chain, NULL, 0); int has_cert_chain; if (!CBS_get_optional_asn1(&session, &cert_chain, &has_cert_chain, kCertChainTag) || (has_cert_chain && CBS_len(&cert_chain) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } if (has_cert_chain && !has_peer) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } if (has_peer || has_cert_chain) { ret->certs.reset(sk_CRYPTO_BUFFER_new_null()); if (ret->certs == nullptr) { return nullptr; } if (has_peer) { UniquePtr buffer(CRYPTO_BUFFER_new_from_CBS(&peer, pool)); if (!buffer || // !PushToStack(ret->certs.get(), std::move(buffer))) { return nullptr; } } while (CBS_len(&cert_chain) > 0) { CBS cert; if (!CBS_get_any_asn1_element(&cert_chain, &cert, NULL, NULL) || CBS_len(&cert) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } UniquePtr buffer(CRYPTO_BUFFER_new_from_CBS(&cert, pool)); if (buffer == nullptr || !PushToStack(ret->certs.get(), std::move(buffer))) { return nullptr; } } } CBS age_add; int age_add_present; if (!CBS_get_optional_asn1_octet_string(&session, &age_add, &age_add_present, kTicketAgeAddTag) || (age_add_present && // !CBS_get_u32(&age_add, &ret->ticket_age_add)) || // CBS_len(&age_add) != 0) { return nullptr; } ret->ticket_age_add_valid = age_add_present != 0; int is_server; if (!CBS_get_optional_asn1_bool(&session, &is_server, kIsServerTag, 1 /* default to true */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } /* TODO: in time we can include |is_server| for servers too, then we can enforce that client and server sessions are never mixed up. */ ret->is_server = is_server; int is_quic; if (!SSL_SESSION_parse_u16(&session, &ret->peer_signature_algorithm, kPeerSignatureAlgorithmTag, 0) || !SSL_SESSION_parse_u32(&session, &ret->ticket_max_early_data, kTicketMaxEarlyDataTag, 0) || !SSL_SESSION_parse_u32(&session, &ret->auth_timeout, kAuthTimeoutTag, ret->timeout) || !SSL_SESSION_parse_octet_string(&session, &ret->early_alpn, kEarlyALPNTag) || !CBS_get_optional_asn1_bool(&session, &is_quic, kIsQuicTag, /*default_value=*/false) || !SSL_SESSION_parse_octet_string(&session, &ret->quic_early_data_context, kQuicEarlyDataContextTag)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } CBS settings; int has_local_alps, has_peer_alps; if (!CBS_get_optional_asn1_octet_string(&session, &settings, &has_local_alps, kLocalALPSTag) || !ret->local_application_settings.CopyFrom(settings) || !CBS_get_optional_asn1_octet_string(&session, &settings, &has_peer_alps, kPeerALPSTag) || !ret->peer_application_settings.CopyFrom(settings) || CBS_len(&session) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->is_quic = is_quic; // The two ALPS values and ALPN must be consistent. if (has_local_alps != has_peer_alps || (has_local_alps && ret->early_alpn.empty())) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } ret->has_application_settings = has_local_alps; if (!x509_method->session_cache_objects(ret.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return nullptr; } return ret; } bool ssl_session_serialize(const SSL_SESSION *in, CBB *cbb) { return SSL_SESSION_to_bytes_full(in, cbb, 0); } BSSL_NAMESPACE_END using namespace bssl; int SSL_SESSION_to_bytes(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len) { if (in->not_resumable) { // If the caller has an unresumable session, e.g. if |SSL_get_session| were // called on a TLS 1.3 or False Started connection, serialize with a // placeholder value so it is not accidentally deserialized into a resumable // one. static const char kNotResumableSession[] = "NOT RESUMABLE"; *out_len = strlen(kNotResumableSession); *out_data = (uint8_t *)OPENSSL_memdup(kNotResumableSession, *out_len); if (*out_data == NULL) { return 0; } return 1; } ScopedCBB cbb; if (!CBB_init(cbb.get(), 256) || !SSL_SESSION_to_bytes_full(in, cbb.get(), 0) || !CBB_finish(cbb.get(), out_data, out_len)) { return 0; } return 1; } int SSL_SESSION_to_bytes_for_ticket(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len) { ScopedCBB cbb; if (!CBB_init(cbb.get(), 256) || !SSL_SESSION_to_bytes_full(in, cbb.get(), 1) || !CBB_finish(cbb.get(), out_data, out_len)) { return 0; } return 1; } int i2d_SSL_SESSION(SSL_SESSION *in, uint8_t **pp) { uint8_t *out; size_t len; if (!SSL_SESSION_to_bytes(in, &out, &len)) { return -1; } if (len > INT_MAX) { OPENSSL_free(out); OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return -1; } if (pp) { OPENSSL_memcpy(*pp, out, len); *pp += len; } OPENSSL_free(out); return len; } SSL_SESSION *SSL_SESSION_from_bytes(const uint8_t *in, size_t in_len, const SSL_CTX *ctx) { CBS cbs; CBS_init(&cbs, in, in_len); UniquePtr ret = SSL_SESSION_parse(&cbs, ctx->x509_method, ctx->pool); if (!ret) { return NULL; } if (CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); return NULL; } return ret.release(); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_buffer.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // BIO uses int instead of size_t. No lengths will exceed uint16_t, so this will // not overflow. static_assert(0xffff <= INT_MAX, "uint16_t does not fit in int"); static_assert((SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) == 0, "SSL3_ALIGN_PAYLOAD must be a power of 2"); void SSLBuffer::Clear() { if (buf_ != inline_buf_) { free(buf_); // Allocated with malloc(). } buf_ = nullptr; offset_ = 0; size_ = 0; cap_ = 0; } bool SSLBuffer::EnsureCap(size_t header_len, size_t new_cap) { if (new_cap > 0xffff) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (cap_ >= new_cap) { return true; } uint8_t *new_buf; size_t new_offset; if (new_cap <= sizeof(inline_buf_)) { // This function is called twice per TLS record, first for the five-byte // header. To avoid allocating twice, use an inline buffer for short inputs. new_buf = inline_buf_; new_offset = 0; } else { // Add up to |SSL3_ALIGN_PAYLOAD| - 1 bytes of slack for alignment. // // Since this buffer gets allocated quite frequently and doesn't contain any // sensitive data, we allocate with malloc rather than |OPENSSL_malloc| and // avoid zeroing on free. new_buf = (uint8_t *)malloc(new_cap + SSL3_ALIGN_PAYLOAD - 1); if (new_buf == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return false; } // Offset the buffer such that the record body is aligned. new_offset = (0 - header_len - (uintptr_t)new_buf) & (SSL3_ALIGN_PAYLOAD - 1); } // Note if the both old and new buffer are inline, the source and destination // may alias. OPENSSL_memmove(new_buf + new_offset, buf_ + offset_, size_); if (buf_ != inline_buf_) { free(buf_); // Allocated with malloc(). } buf_ = new_buf; offset_ = new_offset; cap_ = new_cap; return true; } void SSLBuffer::DidWrite(size_t new_size) { if (new_size > cap() - size()) { abort(); } size_ += new_size; } void SSLBuffer::Consume(size_t len) { if (len > size_) { abort(); } offset_ += (uint16_t)len; size_ -= (uint16_t)len; cap_ -= (uint16_t)len; } void SSLBuffer::DiscardConsumed() { if (size_ == 0) { Clear(); } } static int dtls_read_buffer_next_packet(SSL *ssl) { SSLBuffer *buf = &ssl->s3->read_buffer; if (!buf->empty()) { // It is an error to call |dtls_read_buffer_extend| when the read buffer is // not empty. OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } // Read a single packet from |ssl->rbio|. |buf->cap()| must fit in an int. int ret = BIO_read(ssl->rbio.get(), buf->data(), static_cast(buf->cap())); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_READ; return ret; } buf->DidWrite(static_cast(ret)); return 1; } static int tls_read_buffer_extend_to(SSL *ssl, size_t len) { SSLBuffer *buf = &ssl->s3->read_buffer; if (len > buf->cap()) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); return -1; } // Read until the target length is reached. while (buf->size() < len) { // The amount of data to read is bounded by |buf->cap|, which must fit in an // int. int ret = BIO_read(ssl->rbio.get(), buf->data() + buf->size(), static_cast(len - buf->size())); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_READ; return ret; } buf->DidWrite(static_cast(ret)); } return 1; } int ssl_read_buffer_extend_to(SSL *ssl, size_t len) { // |ssl_read_buffer_extend_to| implicitly discards any consumed data. ssl->s3->read_buffer.DiscardConsumed(); if (SSL_is_dtls(ssl)) { static_assert( DTLS1_RT_MAX_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH <= 0xffff, "DTLS read buffer is too large"); // The |len| parameter is ignored in DTLS. len = DTLS1_RT_MAX_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH; } // The DTLS record header can have a variable length, so the |header_len| // value provided for buffer alignment only works if the header is the maximum // length. if (!ssl->s3->read_buffer.EnsureCap(DTLS1_RT_MAX_HEADER_LENGTH, len)) { return -1; } if (ssl->rbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } int ret; if (SSL_is_dtls(ssl)) { // |len| is ignored for a datagram transport. ret = dtls_read_buffer_next_packet(ssl); } else { ret = tls_read_buffer_extend_to(ssl, len); } if (ret <= 0) { // If the buffer was empty originally and remained empty after attempting to // extend it, release the buffer until the next attempt. ssl->s3->read_buffer.DiscardConsumed(); } return ret; } int ssl_handle_open_record(SSL *ssl, bool *out_retry, ssl_open_record_t ret, size_t consumed, uint8_t alert) { *out_retry = false; if (ret != ssl_open_record_partial) { ssl->s3->read_buffer.Consume(consumed); } if (ret != ssl_open_record_success) { // Nothing was returned to the caller, so discard anything marked consumed. ssl->s3->read_buffer.DiscardConsumed(); } switch (ret) { case ssl_open_record_success: return 1; case ssl_open_record_partial: { int read_ret = ssl_read_buffer_extend_to(ssl, consumed); if (read_ret <= 0) { return read_ret; } *out_retry = true; return 1; } case ssl_open_record_discard: *out_retry = true; return 1; case ssl_open_record_close_notify: ssl->s3->rwstate = SSL_ERROR_ZERO_RETURN; return 0; case ssl_open_record_error: if (alert != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); } return -1; } assert(0); return -1; } static_assert(SSL3_RT_HEADER_LENGTH * 2 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD * 2 + SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, "maximum TLS write buffer is too large"); static_assert(DTLS1_RT_MAX_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD + SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, "maximum DTLS write buffer is too large"); static int tls_write_buffer_flush(SSL *ssl) { SSLBuffer *buf = &ssl->s3->write_buffer; while (!buf->empty()) { int ret = BIO_write(ssl->wbio.get(), buf->data(), buf->size()); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; return ret; } buf->Consume(static_cast(ret)); } buf->Clear(); return 1; } static int dtls_write_buffer_flush(SSL *ssl) { SSLBuffer *buf = &ssl->s3->write_buffer; if (buf->empty()) { return 1; } int ret = BIO_write(ssl->wbio.get(), buf->data(), buf->size()); if (ret <= 0) { ssl->s3->rwstate = SSL_ERROR_WANT_WRITE; // If the write failed, drop the write buffer anyway. Datagram transports // can't write half a packet, so the caller is expected to retry from the // top. buf->Clear(); return ret; } buf->Clear(); return 1; } int ssl_write_buffer_flush(SSL *ssl) { if (ssl->wbio == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); return -1; } if (SSL_is_dtls(ssl)) { return dtls_write_buffer_flush(ssl); } else { return tls_write_buffer_flush(ssl); } } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_cert.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN CERT::CERT(const SSL_X509_METHOD *x509_method_arg) : legacy_credential(MakeUnique(SSLCredentialType::kX509)), x509_method(x509_method_arg) {} CERT::~CERT() { x509_method->cert_free(this); } UniquePtr ssl_cert_dup(CERT *cert) { UniquePtr ret = MakeUnique(cert->x509_method); if (!ret) { return nullptr; } // TODO(crbug.com/boringssl/431): This should just be |CopyFrom|. for (const auto &cred : cert->credentials) { if (!ret->credentials.Push(UpRef(cred))) { return nullptr; } } // |legacy_credential| is mutable, so it must be copied. We cannot simply // bump the reference count. ret->legacy_credential = cert->legacy_credential->Dup(); if (ret->legacy_credential == nullptr) { return nullptr; } ret->cert_cb = cert->cert_cb; ret->cert_cb_arg = cert->cert_cb_arg; ret->x509_method->cert_dup(ret.get(), cert); ret->sid_ctx = cert->sid_ctx; return ret; } static void ssl_cert_set_cert_cb(CERT *cert, int (*cb)(SSL *ssl, void *arg), void *arg) { cert->cert_cb = cb; cert->cert_cb_arg = arg; } static int cert_set_chain_and_key( CERT *cert, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method) { if (num_certs == 0 || // (privkey == NULL && privkey_method == NULL)) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (privkey != NULL && privkey_method != NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD); return 0; } cert->legacy_credential->ClearCertAndKey(); if (!SSL_CREDENTIAL_set1_cert_chain(cert->legacy_credential.get(), certs, num_certs)) { return 0; } cert->x509_method->cert_flush_cached_leaf(cert); cert->x509_method->cert_flush_cached_chain(cert); return privkey != nullptr ? SSL_CREDENTIAL_set1_private_key(cert->legacy_credential.get(), privkey) : SSL_CREDENTIAL_set_private_key_method( cert->legacy_credential.get(), privkey_method); } bool ssl_set_cert(CERT *cert, UniquePtr buffer) { // Don't fail for a cert/key mismatch, just free the current private key. // (When switching to a different keypair, the caller should switch the // certificate, then the key.) if (!cert->legacy_credential->SetLeafCert(std::move(buffer), /*discard_key_on_mismatch=*/true)) { return false; } cert->x509_method->cert_flush_cached_leaf(cert); return true; } bool ssl_parse_cert_chain(uint8_t *out_alert, UniquePtr *out_chain, UniquePtr *out_pubkey, uint8_t *out_leaf_sha256, CBS *cbs, CRYPTO_BUFFER_POOL *pool) { out_chain->reset(); out_pubkey->reset(); CBS certificate_list; if (!CBS_get_u24_length_prefixed(cbs, &certificate_list)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (CBS_len(&certificate_list) == 0) { return true; } UniquePtr chain(sk_CRYPTO_BUFFER_new_null()); if (!chain) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } UniquePtr pubkey; while (CBS_len(&certificate_list) > 0) { CBS certificate; if (!CBS_get_u24_length_prefixed(&certificate_list, &certificate) || CBS_len(&certificate) == 0) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_LENGTH_MISMATCH); return false; } if (sk_CRYPTO_BUFFER_num(chain.get()) == 0) { pubkey = ssl_cert_parse_pubkey(&certificate); if (!pubkey) { *out_alert = SSL_AD_DECODE_ERROR; return false; } // Retain the hash of the leaf certificate if requested. if (out_leaf_sha256 != NULL) { SHA256(CBS_data(&certificate), CBS_len(&certificate), out_leaf_sha256); } } UniquePtr buf( CRYPTO_BUFFER_new_from_CBS(&certificate, pool)); if (!buf || // !PushToStack(chain.get(), std::move(buf))) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } } *out_chain = std::move(chain); *out_pubkey = std::move(pubkey); return true; } // ssl_cert_skip_to_spki parses a DER-encoded, X.509 certificate from |in| and // positions |*out_tbs_cert| to cover the TBSCertificate, starting at the // subjectPublicKeyInfo. static bool ssl_cert_skip_to_spki(const CBS *in, CBS *out_tbs_cert) { /* From RFC 5280, section 4.1 * Certificate ::= SEQUENCE { * tbsCertificate TBSCertificate, * signatureAlgorithm AlgorithmIdentifier, * signatureValue BIT STRING } * TBSCertificate ::= SEQUENCE { * version [0] EXPLICIT Version DEFAULT v1, * serialNumber CertificateSerialNumber, * signature AlgorithmIdentifier, * issuer Name, * validity Validity, * subject Name, * subjectPublicKeyInfo SubjectPublicKeyInfo, * ... } */ CBS buf = *in; CBS toplevel; if (!CBS_get_asn1(&buf, &toplevel, CBS_ASN1_SEQUENCE) || // CBS_len(&buf) != 0 || // !CBS_get_asn1(&toplevel, out_tbs_cert, CBS_ASN1_SEQUENCE) || // // version !CBS_get_optional_asn1( out_tbs_cert, NULL, NULL, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || // // serialNumber !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_INTEGER) || // signature algorithm !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || // issuer !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || // validity !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || // subject !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE)) { return false; } return true; } bool ssl_cert_extract_issuer(const CBS *in, CBS *out_dn) { CBS buf = *in; CBS toplevel; CBS cert; if (!CBS_get_asn1(&buf, &toplevel, CBS_ASN1_SEQUENCE) || // CBS_len(&buf) != 0 || // !CBS_get_asn1(&toplevel, &cert, CBS_ASN1_SEQUENCE) || // // version !CBS_get_optional_asn1( &cert, NULL, NULL, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || // // serialNumber !CBS_get_asn1(&cert, NULL, CBS_ASN1_INTEGER) || // // signature algorithm !CBS_get_asn1(&cert, NULL, CBS_ASN1_SEQUENCE) || // // issuer !CBS_get_asn1_element(&cert, out_dn, CBS_ASN1_SEQUENCE)) { return false; } return true; } bool ssl_cert_matches_issuer(const CBS *in, const CBS *dn) { CBS issuer; if (!ssl_cert_extract_issuer(in, &issuer)) { return false; } return CBS_mem_equal(&issuer, CBS_data(dn), CBS_len(dn)); } UniquePtr ssl_cert_parse_pubkey(const CBS *in) { CBS buf = *in, tbs_cert; if (!ssl_cert_skip_to_spki(&buf, &tbs_cert)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return nullptr; } return UniquePtr(EVP_parse_public_key(&tbs_cert)); } bool ssl_compare_public_and_private_key(const EVP_PKEY *pubkey, const EVP_PKEY *privkey) { if (EVP_PKEY_is_opaque(privkey)) { // We cannot check an opaque private key and have to trust that it // matches. return true; } switch (EVP_PKEY_cmp(pubkey, privkey)) { case 1: return true; case 0: OPENSSL_PUT_ERROR(X509, X509_R_KEY_VALUES_MISMATCH); return false; case -1: OPENSSL_PUT_ERROR(X509, X509_R_KEY_TYPE_MISMATCH); return false; case -2: OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_KEY_TYPE); return false; } assert(0); return false; } bool ssl_cert_check_key_usage(const CBS *in, enum ssl_key_usage_t bit) { CBS buf = *in; CBS tbs_cert, outer_extensions; int has_extensions; if (!ssl_cert_skip_to_spki(&buf, &tbs_cert) || // subjectPublicKeyInfo !CBS_get_asn1(&tbs_cert, NULL, CBS_ASN1_SEQUENCE) || // issuerUniqueID !CBS_get_optional_asn1(&tbs_cert, NULL, NULL, CBS_ASN1_CONTEXT_SPECIFIC | 1) || // subjectUniqueID !CBS_get_optional_asn1(&tbs_cert, NULL, NULL, CBS_ASN1_CONTEXT_SPECIFIC | 2) || !CBS_get_optional_asn1( &tbs_cert, &outer_extensions, &has_extensions, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 3)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return false; } if (!has_extensions) { return true; } CBS extensions; if (!CBS_get_asn1(&outer_extensions, &extensions, CBS_ASN1_SEQUENCE)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return false; } while (CBS_len(&extensions) > 0) { CBS extension, oid, contents; if (!CBS_get_asn1(&extensions, &extension, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&extension, &oid, CBS_ASN1_OBJECT) || (CBS_peek_asn1_tag(&extension, CBS_ASN1_BOOLEAN) && !CBS_get_asn1(&extension, NULL, CBS_ASN1_BOOLEAN)) || !CBS_get_asn1(&extension, &contents, CBS_ASN1_OCTETSTRING) || CBS_len(&extension) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return false; } static const uint8_t kKeyUsageOID[3] = {0x55, 0x1d, 0x0f}; if (CBS_len(&oid) != sizeof(kKeyUsageOID) || OPENSSL_memcmp(CBS_data(&oid), kKeyUsageOID, sizeof(kKeyUsageOID)) != 0) { continue; } CBS bit_string; if (!CBS_get_asn1(&contents, &bit_string, CBS_ASN1_BITSTRING) || CBS_len(&contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return false; } // This is the KeyUsage extension. See // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 if (!CBS_is_valid_asn1_bitstring(&bit_string)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); return false; } if (!CBS_asn1_bitstring_has_bit(&bit_string, bit)) { OPENSSL_PUT_ERROR(SSL, SSL_R_KEY_USAGE_BIT_INCORRECT); return false; } return true; } // No KeyUsage extension found. return true; } UniquePtr SSL_parse_CA_list(SSL *ssl, uint8_t *out_alert, CBS *cbs) { CRYPTO_BUFFER_POOL *const pool = ssl->ctx->pool; UniquePtr ret(sk_CRYPTO_BUFFER_new_null()); if (!ret) { *out_alert = SSL_AD_INTERNAL_ERROR; return nullptr; } CBS child; if (!CBS_get_u16_length_prefixed(cbs, &child)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_LENGTH_MISMATCH); return nullptr; } while (CBS_len(&child) > 0) { CBS distinguished_name; if (!CBS_get_u16_length_prefixed(&child, &distinguished_name)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_CA_DN_TOO_LONG); return nullptr; } UniquePtr buffer( CRYPTO_BUFFER_new_from_CBS(&distinguished_name, pool)); if (!buffer || // !PushToStack(ret.get(), std::move(buffer))) { *out_alert = SSL_AD_INTERNAL_ERROR; return nullptr; } } if (!ssl->ctx->x509_method->check_CA_list(ret.get())) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return nullptr; } return ret; } static bool CA_names_non_empty(const STACK_OF(CRYPTO_BUFFER) *config_names, const STACK_OF(CRYPTO_BUFFER) *ctx_names) { if (config_names != nullptr) { return sk_CRYPTO_BUFFER_num(config_names) > 0; } if (ctx_names != nullptr) { return sk_CRYPTO_BUFFER_num(ctx_names) > 0; } return false; } static bool marshal_CA_names(const STACK_OF(CRYPTO_BUFFER) *config_names, const STACK_OF(CRYPTO_BUFFER) *ctx_names, CBB *cbb) { const STACK_OF(CRYPTO_BUFFER) *names = config_names == nullptr ? ctx_names : config_names; CBB child, name_cbb; if (!CBB_add_u16_length_prefixed(cbb, &child)) { return false; } if (names == nullptr) { return CBB_flush(cbb); } for (const CRYPTO_BUFFER *name : names) { if (!CBB_add_u16_length_prefixed(&child, &name_cbb) || !CBB_add_bytes(&name_cbb, CRYPTO_BUFFER_data(name), CRYPTO_BUFFER_len(name))) { return false; } } return CBB_flush(cbb); } bool ssl_has_client_CAs(const SSL_CONFIG *cfg) { return CA_names_non_empty(cfg->client_CA.get(), cfg->ssl->ctx->client_CA.get()); } bool ssl_has_CA_names(const SSL_CONFIG *cfg) { return CA_names_non_empty(cfg->CA_names.get(), cfg->ssl->ctx->CA_names.get()); } bool ssl_add_client_CA_list(const SSL_HANDSHAKE *hs, CBB *cbb) { return marshal_CA_names(hs->config->client_CA.get(), hs->ssl->ctx->client_CA.get(), cbb); } bool ssl_add_CA_names(const SSL_HANDSHAKE *hs, CBB *cbb) { return marshal_CA_names(hs->config->CA_names.get(), hs->ssl->ctx->CA_names.get(), cbb); } bool ssl_check_leaf_certificate(SSL_HANDSHAKE *hs, EVP_PKEY *pkey, const CRYPTO_BUFFER *leaf) { assert(ssl_protocol_version(hs->ssl) < TLS1_3_VERSION); // Check the certificate's type matches the cipher. This does not check key // usage restrictions, which are handled separately. // // TODO(davidben): Put the key type and key usage checks in one place. if (!(hs->new_cipher->algorithm_auth & ssl_cipher_auth_mask_for_key(pkey, /*sign_ok=*/true))) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CERTIFICATE_TYPE); return false; } if (EVP_PKEY_id(pkey) == EVP_PKEY_EC) { // Check the key's group and point format are acceptable. EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey); uint16_t group_id; if (!ssl_nid_to_group_id( &group_id, EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key))) || !tls1_check_group_id(hs, group_id) || EC_KEY_get_conv_form(ec_key) != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECC_CERT); return false; } } return true; } BSSL_NAMESPACE_END using namespace bssl; int SSL_set_chain_and_key(SSL *ssl, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method) { if (!ssl->config) { return 0; } return cert_set_chain_and_key(ssl->config->cert.get(), certs, num_certs, privkey, privkey_method); } int SSL_CTX_set_chain_and_key(SSL_CTX *ctx, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method) { return cert_set_chain_and_key(ctx->cert.get(), certs, num_certs, privkey, privkey_method); } void SSL_certs_clear(SSL *ssl) { if (!ssl->config) { return; } CERT *cert = ssl->config->cert.get(); cert->x509_method->cert_clear(cert); cert->credentials.clear(); cert->legacy_credential->ClearCertAndKey(); } const STACK_OF(CRYPTO_BUFFER) *SSL_CTX_get0_chain(const SSL_CTX *ctx) { return ctx->cert->legacy_credential->chain.get(); } const STACK_OF(CRYPTO_BUFFER) *SSL_get0_chain(const SSL *ssl) { if (!ssl->config) { return nullptr; } return ssl->config->cert->legacy_credential->chain.get(); } int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, size_t der_len, const uint8_t *der) { UniquePtr buffer(CRYPTO_BUFFER_new(der, der_len, NULL)); if (!buffer) { return 0; } return ssl_set_cert(ctx->cert.get(), std::move(buffer)); } int SSL_use_certificate_ASN1(SSL *ssl, const uint8_t *der, size_t der_len) { UniquePtr buffer(CRYPTO_BUFFER_new(der, der_len, NULL)); if (!buffer || !ssl->config) { return 0; } return ssl_set_cert(ssl->config->cert.get(), std::move(buffer)); } void SSL_CTX_set_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, void *arg), void *arg) { ssl_cert_set_cert_cb(ctx->cert.get(), cb, arg); } void SSL_set_cert_cb(SSL *ssl, int (*cb)(SSL *ssl, void *arg), void *arg) { if (!ssl->config) { return; } ssl_cert_set_cert_cb(ssl->config->cert.get(), cb, arg); } const STACK_OF(CRYPTO_BUFFER) *SSL_get0_peer_certificates(const SSL *ssl) { SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return NULL; } return session->certs.get(); } const STACK_OF(CRYPTO_BUFFER) *SSL_get0_server_requested_CAs(const SSL *ssl) { if (ssl->s3->hs == NULL) { return NULL; } return ssl->s3->hs->ca_names.get(); } int SSL_CTX_set_signed_cert_timestamp_list(SSL_CTX *ctx, const uint8_t *list, size_t list_len) { UniquePtr buf(CRYPTO_BUFFER_new(list, list_len, nullptr)); return buf != nullptr && SSL_CREDENTIAL_set1_signed_cert_timestamp_list( ctx->cert->legacy_credential.get(), buf.get()); } int SSL_set_signed_cert_timestamp_list(SSL *ssl, const uint8_t *list, size_t list_len) { if (!ssl->config) { return 0; } UniquePtr buf(CRYPTO_BUFFER_new(list, list_len, nullptr)); return buf != nullptr && SSL_CREDENTIAL_set1_signed_cert_timestamp_list( ssl->config->cert->legacy_credential.get(), buf.get()); } int SSL_CTX_set_ocsp_response(SSL_CTX *ctx, const uint8_t *response, size_t response_len) { UniquePtr buf( CRYPTO_BUFFER_new(response, response_len, nullptr)); return buf != nullptr && SSL_CREDENTIAL_set1_ocsp_response( ctx->cert->legacy_credential.get(), buf.get()); } int SSL_set_ocsp_response(SSL *ssl, const uint8_t *response, size_t response_len) { if (!ssl->config) { return 0; } UniquePtr buf( CRYPTO_BUFFER_new(response, response_len, nullptr)); return buf != nullptr && SSL_CREDENTIAL_set1_ocsp_response( ssl->config->cert->legacy_credential.get(), buf.get()); } void SSL_CTX_set0_client_CAs(SSL_CTX *ctx, STACK_OF(CRYPTO_BUFFER) *name_list) { ctx->x509_method->ssl_ctx_flush_cached_client_CA(ctx); ctx->client_CA.reset(name_list); } void SSL_set0_client_CAs(SSL *ssl, STACK_OF(CRYPTO_BUFFER) *name_list) { if (!ssl->config) { return; } ssl->ctx->x509_method->ssl_flush_cached_client_CA(ssl->config.get()); ssl->config->client_CA.reset(name_list); } void SSL_set0_CA_names(SSL *ssl, STACK_OF(CRYPTO_BUFFER) *name_list) { if (!ssl->config) { return; } ssl->config->CA_names.reset(name_list); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_cipher.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static constexpr SSL_CIPHER kCiphers[] = { // The RSA ciphers // Cipher 0A { SSL3_TXT_RSA_DES_192_CBC3_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", SSL3_CK_RSA_DES_192_CBC3_SHA, SSL_kRSA, SSL_aRSA_DECRYPT, SSL_3DES, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // New AES ciphersuites // Cipher 2F { TLS1_TXT_RSA_WITH_AES_128_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", TLS1_CK_RSA_WITH_AES_128_SHA, SSL_kRSA, SSL_aRSA_DECRYPT, SSL_AES128, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher 35 { TLS1_TXT_RSA_WITH_AES_256_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", TLS1_CK_RSA_WITH_AES_256_SHA, SSL_kRSA, SSL_aRSA_DECRYPT, SSL_AES256, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // PSK cipher suites. // Cipher 8C { TLS1_TXT_PSK_WITH_AES_128_CBC_SHA, "TLS_PSK_WITH_AES_128_CBC_SHA", TLS1_CK_PSK_WITH_AES_128_CBC_SHA, SSL_kPSK, SSL_aPSK, SSL_AES128, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher 8D { TLS1_TXT_PSK_WITH_AES_256_CBC_SHA, "TLS_PSK_WITH_AES_256_CBC_SHA", TLS1_CK_PSK_WITH_AES_256_CBC_SHA, SSL_kPSK, SSL_aPSK, SSL_AES256, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // GCM ciphersuites from RFC 5288 // Cipher 9C { TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", TLS1_CK_RSA_WITH_AES_128_GCM_SHA256, SSL_kRSA, SSL_aRSA_DECRYPT, SSL_AES128GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher 9D { TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", TLS1_CK_RSA_WITH_AES_256_GCM_SHA384, SSL_kRSA, SSL_aRSA_DECRYPT, SSL_AES256GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA384, }, // TLS 1.3 suites. // Cipher 1301 { TLS1_3_RFC_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", TLS1_3_CK_AES_128_GCM_SHA256, SSL_kGENERIC, SSL_aGENERIC, SSL_AES128GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher 1302 { TLS1_3_RFC_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", TLS1_3_CK_AES_256_GCM_SHA384, SSL_kGENERIC, SSL_aGENERIC, SSL_AES256GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA384, }, // Cipher 1303 { TLS1_3_RFC_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", TLS1_3_CK_CHACHA20_POLY1305_SHA256, SSL_kGENERIC, SSL_aGENERIC, SSL_CHACHA20POLY1305, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher C009 { TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aECDSA, SSL_AES128, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher C00A { TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aECDSA, SSL_AES256, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher C013 { TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aRSA_SIGN, SSL_AES128, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher C014 { TLS1_TXT_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aRSA_SIGN, SSL_AES256, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher C027 { TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SSL_kECDHE, SSL_aRSA_SIGN, SSL_AES128, SSL_SHA256, SSL_HANDSHAKE_MAC_SHA256, }, // GCM based TLS v1.2 ciphersuites from RFC 5289 // Cipher C02B { TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, SSL_kECDHE, SSL_aECDSA, SSL_AES128GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher C02C { TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, SSL_kECDHE, SSL_aECDSA, SSL_AES256GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA384, }, // Cipher C02F { TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256, SSL_kECDHE, SSL_aRSA_SIGN, SSL_AES128GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher C030 { TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SSL_kECDHE, SSL_aRSA_SIGN, SSL_AES256GCM, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA384, }, // ECDHE-PSK cipher suites. // Cipher C035 { TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA, "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aPSK, SSL_AES128, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // Cipher C036 { TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA, "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aPSK, SSL_AES256, SSL_SHA1, SSL_HANDSHAKE_MAC_DEFAULT, }, // ChaCha20-Poly1305 cipher suites. // Cipher CCA8 { TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aRSA_SIGN, SSL_CHACHA20POLY1305, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher CCA9 { TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aECDSA, SSL_CHACHA20POLY1305, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, // Cipher CCAB { TLS1_TXT_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aPSK, SSL_CHACHA20POLY1305, SSL_AEAD, SSL_HANDSHAKE_MAC_SHA256, }, }; Span AllCiphers() { return kCiphers; } static constexpr size_t NumTLS13Ciphers() { size_t num = 0; for (const auto &cipher : kCiphers) { if (cipher.algorithm_mkey == SSL_kGENERIC) { num++; } } return num; } #define CIPHER_ADD 1 #define CIPHER_KILL 2 #define CIPHER_DEL 3 #define CIPHER_ORD 4 #define CIPHER_SPECIAL 5 typedef struct cipher_order_st { const SSL_CIPHER *cipher; bool active; bool in_group; struct cipher_order_st *next, *prev; } CIPHER_ORDER; typedef struct cipher_alias_st { // name is the name of the cipher alias. const char *name = nullptr; // The following fields are bitmasks for the corresponding fields on // |SSL_CIPHER|. A cipher matches a cipher alias iff, for each bitmask, the // bit corresponding to the cipher's value is set to 1. If any bitmask is // all zeroes, the alias matches nothing. Use |~0u| for the default value. uint32_t algorithm_mkey = ~0u; uint32_t algorithm_auth = ~0u; uint32_t algorithm_enc = ~0u; uint32_t algorithm_mac = ~0u; // min_version, if non-zero, matches all ciphers which were added in that // particular protocol version. uint16_t min_version = 0; // include_deprecated, if true, means this alias includes deprecated ciphers. bool include_deprecated = false; } CIPHER_ALIAS; static const CIPHER_ALIAS kCipherAliases[] = { {"ALL", ~0u, ~0u, ~0u, ~0u, 0}, // The "COMPLEMENTOFDEFAULT" rule is omitted. It matches nothing. // key exchange aliases // (some of those using only a single bit here combine // multiple key exchange algs according to the RFCs. {"kRSA", SSL_kRSA, ~0u, ~0u, ~0u, 0}, {"kECDHE", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"kEECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"ECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"kPSK", SSL_kPSK, ~0u, ~0u, ~0u, 0}, // server authentication aliases {"aRSA", ~0u, SSL_aRSA_SIGN | SSL_aRSA_DECRYPT, ~0u, ~0u, 0}, {"aECDSA", ~0u, SSL_aECDSA, ~0u, ~0u, 0}, {"ECDSA", ~0u, SSL_aECDSA, ~0u, ~0u, 0}, {"aPSK", ~0u, SSL_aPSK, ~0u, ~0u, 0}, // aliases combining key exchange and server authentication {"ECDHE", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"EECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"RSA", SSL_kRSA, SSL_aRSA_SIGN | SSL_aRSA_DECRYPT, ~0u, ~0u, 0}, {"PSK", SSL_kPSK, SSL_aPSK, ~0u, ~0u, 0}, // symmetric encryption aliases {"3DES", ~0u, ~0u, SSL_3DES, ~0u, 0, /*include_deprecated=*/true}, {"AES128", ~0u, ~0u, SSL_AES128 | SSL_AES128GCM, ~0u, 0, /*include_deprecated=*/false}, {"AES256", ~0u, ~0u, SSL_AES256 | SSL_AES256GCM, ~0u, 0, /*include_deprecated=*/false}, {"AES", ~0u, ~0u, SSL_AES, ~0u, 0}, {"AESGCM", ~0u, ~0u, SSL_AES128GCM | SSL_AES256GCM, ~0u, 0, /*include_deprecated=*/false}, {"CHACHA20", ~0u, ~0u, SSL_CHACHA20POLY1305, ~0u, 0, /*include_deprecated=*/false}, // MAC aliases {"SHA1", ~0u, ~0u, ~0u, SSL_SHA1, 0}, {"SHA", ~0u, ~0u, ~0u, SSL_SHA1, 0}, // Legacy protocol minimum version aliases. "TLSv1" is intentionally the // same as "SSLv3". {"SSLv3", ~0u, ~0u, ~0u, ~0u, SSL3_VERSION}, {"TLSv1", ~0u, ~0u, ~0u, ~0u, SSL3_VERSION}, {"TLSv1.2", ~0u, ~0u, ~0u, ~0u, TLS1_2_VERSION}, // Legacy strength classes. {"HIGH", ~0u, ~0u, ~0u, ~0u, 0}, {"FIPS", ~0u, ~0u, ~0u, ~0u, 0}, // Temporary no-op aliases corresponding to removed SHA-2 legacy CBC // ciphers. These should be removed after 2018-05-14. {"SHA256", 0, 0, 0, 0, 0}, {"SHA384", 0, 0, 0, 0, 0}, }; static const size_t kCipherAliasesLen = OPENSSL_ARRAY_SIZE(kCipherAliases); bool ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, size_t *out_mac_secret_len, size_t *out_fixed_iv_len, const SSL_CIPHER *cipher, uint16_t version) { *out_aead = NULL; *out_mac_secret_len = 0; *out_fixed_iv_len = 0; if (cipher->algorithm_mac == SSL_AEAD) { if (cipher->algorithm_enc == SSL_AES128GCM) { if (version < TLS1_3_VERSION) { *out_aead = EVP_aead_aes_128_gcm_tls12(); } else { *out_aead = EVP_aead_aes_128_gcm_tls13(); } *out_fixed_iv_len = 4; } else if (cipher->algorithm_enc == SSL_AES256GCM) { if (version < TLS1_3_VERSION) { *out_aead = EVP_aead_aes_256_gcm_tls12(); } else { *out_aead = EVP_aead_aes_256_gcm_tls13(); } *out_fixed_iv_len = 4; } else if (cipher->algorithm_enc == SSL_CHACHA20POLY1305) { *out_aead = EVP_aead_chacha20_poly1305(); *out_fixed_iv_len = 12; } else { return false; } // In TLS 1.3, the iv_len is equal to the AEAD nonce length whereas the code // above computes the TLS 1.2 construction. if (version >= TLS1_3_VERSION) { *out_fixed_iv_len = EVP_AEAD_nonce_length(*out_aead); } } else if (cipher->algorithm_mac == SSL_SHA1) { if (cipher->algorithm_enc == SSL_3DES) { if (version == TLS1_VERSION) { *out_aead = EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(); *out_fixed_iv_len = 8; } else { *out_aead = EVP_aead_des_ede3_cbc_sha1_tls(); } } else if (cipher->algorithm_enc == SSL_AES128) { if (version == TLS1_VERSION) { *out_aead = EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(); *out_fixed_iv_len = 16; } else { *out_aead = EVP_aead_aes_128_cbc_sha1_tls(); } } else if (cipher->algorithm_enc == SSL_AES256) { if (version == TLS1_VERSION) { *out_aead = EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(); *out_fixed_iv_len = 16; } else { *out_aead = EVP_aead_aes_256_cbc_sha1_tls(); } } else { return false; } *out_mac_secret_len = SHA_DIGEST_LENGTH; } else if (cipher->algorithm_mac == SSL_SHA256) { if (cipher->algorithm_enc == SSL_AES128) { *out_aead = EVP_aead_aes_128_cbc_sha256_tls(); } else { return false; } *out_mac_secret_len = SHA256_DIGEST_LENGTH; } else { return false; } return true; } const EVP_MD *ssl_get_handshake_digest(uint16_t version, const SSL_CIPHER *cipher) { switch (cipher->algorithm_prf) { case SSL_HANDSHAKE_MAC_DEFAULT: return version >= TLS1_2_VERSION ? EVP_sha256() : EVP_md5_sha1(); case SSL_HANDSHAKE_MAC_SHA256: return EVP_sha256(); case SSL_HANDSHAKE_MAC_SHA384: return EVP_sha384(); default: assert(0); return NULL; } } static bool is_cipher_list_separator(char c, bool is_strict) { if (c == ':') { return true; } return !is_strict && (c == ' ' || c == ';' || c == ','); } // rule_equals returns whether the NUL-terminated string |rule| is equal to the // |buf_len| bytes at |buf|. static bool rule_equals(const char *rule, const char *buf, size_t buf_len) { // |strncmp| alone only checks that |buf| is a prefix of |rule|. return strncmp(rule, buf, buf_len) == 0 && rule[buf_len] == '\0'; } static void ll_append_tail(CIPHER_ORDER **head, CIPHER_ORDER *curr, CIPHER_ORDER **tail) { if (curr == *tail) { return; } if (curr == *head) { *head = curr->next; } if (curr->prev != NULL) { curr->prev->next = curr->next; } if (curr->next != NULL) { curr->next->prev = curr->prev; } (*tail)->next = curr; curr->prev = *tail; curr->next = NULL; *tail = curr; } static void ll_append_head(CIPHER_ORDER **head, CIPHER_ORDER *curr, CIPHER_ORDER **tail) { if (curr == *head) { return; } if (curr == *tail) { *tail = curr->prev; } if (curr->next != NULL) { curr->next->prev = curr->prev; } if (curr->prev != NULL) { curr->prev->next = curr->next; } (*head)->prev = curr; curr->next = *head; curr->prev = NULL; *head = curr; } SSLCipherPreferenceList::~SSLCipherPreferenceList() { OPENSSL_free(in_group_flags); } bool SSLCipherPreferenceList::Init(UniquePtr ciphers_arg, Span in_group_flags_arg) { if (sk_SSL_CIPHER_num(ciphers_arg.get()) != in_group_flags_arg.size()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } Array copy; if (!copy.CopyFrom(in_group_flags_arg)) { return false; } ciphers = std::move(ciphers_arg); size_t unused_len; copy.Release(&in_group_flags, &unused_len); return true; } bool SSLCipherPreferenceList::Init(const SSLCipherPreferenceList &other) { size_t size = sk_SSL_CIPHER_num(other.ciphers.get()); Span other_flags(other.in_group_flags, size); UniquePtr other_ciphers( sk_SSL_CIPHER_dup(other.ciphers.get())); if (!other_ciphers) { return false; } return Init(std::move(other_ciphers), other_flags); } void SSLCipherPreferenceList::Remove(const SSL_CIPHER *cipher) { size_t index; if (!sk_SSL_CIPHER_find(ciphers.get(), &index, cipher)) { return; } if (!in_group_flags[index] /* last element of group */ && index > 0) { in_group_flags[index - 1] = false; } for (size_t i = index; i < sk_SSL_CIPHER_num(ciphers.get()) - 1; ++i) { in_group_flags[i] = in_group_flags[i + 1]; } sk_SSL_CIPHER_delete(ciphers.get(), index); } bool ssl_cipher_is_deprecated(const SSL_CIPHER *cipher) { return cipher->id == TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA256 || cipher->algorithm_enc == SSL_3DES; } // ssl_cipher_apply_rule applies the rule type |rule| to ciphers matching its // parameters in the linked list from |*head_p| to |*tail_p|. It writes the new // head and tail of the list to |*head_p| and |*tail_p|, respectively. // // - If |cipher_id| is non-zero, only that cipher is selected. // - Otherwise, if |strength_bits| is non-negative, it selects ciphers // of that strength. // - Otherwise, |alias| must be non-null. It selects ciphers that matches // |*alias|. static void ssl_cipher_apply_rule(uint32_t cipher_id, const CIPHER_ALIAS *alias, int rule, int strength_bits, bool in_group, CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p) { CIPHER_ORDER *head, *tail, *curr, *next, *last; const SSL_CIPHER *cp; bool reverse = false; if (cipher_id == 0 && strength_bits == -1 && alias->min_version == 0 && (alias->algorithm_mkey == 0 || alias->algorithm_auth == 0 || alias->algorithm_enc == 0 || alias->algorithm_mac == 0)) { // The rule matches nothing, so bail early. return; } if (rule == CIPHER_DEL) { // needed to maintain sorting between currently deleted ciphers reverse = true; } head = *head_p; tail = *tail_p; if (reverse) { next = tail; last = head; } else { next = head; last = tail; } curr = NULL; for (;;) { if (curr == last) { break; } curr = next; if (curr == NULL) { break; } next = reverse ? curr->prev : curr->next; cp = curr->cipher; // Selection criteria is either a specific cipher, the value of // |strength_bits|, or the algorithms used. if (cipher_id != 0) { if (cipher_id != cp->id) { continue; } } else if (strength_bits >= 0) { if (strength_bits != SSL_CIPHER_get_bits(cp, NULL)) { continue; } } else { if (!(alias->algorithm_mkey & cp->algorithm_mkey) || !(alias->algorithm_auth & cp->algorithm_auth) || !(alias->algorithm_enc & cp->algorithm_enc) || !(alias->algorithm_mac & cp->algorithm_mac) || (alias->min_version != 0 && SSL_CIPHER_get_min_version(cp) != alias->min_version) || (!alias->include_deprecated && ssl_cipher_is_deprecated(cp))) { continue; } } // add the cipher if it has not been added yet. if (rule == CIPHER_ADD) { // reverse == false if (!curr->active) { ll_append_tail(&head, curr, &tail); curr->active = true; curr->in_group = in_group; } } // Move the added cipher to this location else if (rule == CIPHER_ORD) { // reverse == false if (curr->active) { ll_append_tail(&head, curr, &tail); curr->in_group = false; } } else if (rule == CIPHER_DEL) { // reverse == true if (curr->active) { // most recently deleted ciphersuites get best positions // for any future CIPHER_ADD (note that the CIPHER_DEL loop // works in reverse to maintain the order) ll_append_head(&head, curr, &tail); curr->active = false; curr->in_group = false; } } else if (rule == CIPHER_KILL) { // reverse == false if (head == curr) { head = curr->next; } else { curr->prev->next = curr->next; } if (tail == curr) { tail = curr->prev; } curr->active = false; if (curr->next != NULL) { curr->next->prev = curr->prev; } if (curr->prev != NULL) { curr->prev->next = curr->next; } curr->next = NULL; curr->prev = NULL; } } *head_p = head; *tail_p = tail; } static bool ssl_cipher_strength_sort(CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p) { // This routine sorts the ciphers with descending strength. The sorting must // keep the pre-sorted sequence, so we apply the normal sorting routine as // '+' movement to the end of the list. int max_strength_bits = 0; CIPHER_ORDER *curr = *head_p; while (curr != NULL) { if (curr->active && SSL_CIPHER_get_bits(curr->cipher, NULL) > max_strength_bits) { max_strength_bits = SSL_CIPHER_get_bits(curr->cipher, NULL); } curr = curr->next; } Array number_uses; if (!number_uses.Init(max_strength_bits + 1)) { return false; } // Now find the strength_bits values actually used. curr = *head_p; while (curr != NULL) { if (curr->active) { number_uses[SSL_CIPHER_get_bits(curr->cipher, NULL)]++; } curr = curr->next; } // Go through the list of used strength_bits values in descending order. for (int i = max_strength_bits; i >= 0; i--) { if (number_uses[i] > 0) { ssl_cipher_apply_rule(/*cipher_id=*/0, /*alias=*/nullptr, CIPHER_ORD, i, false, head_p, tail_p); } } return true; } static bool ssl_cipher_process_rulestr(const char *rule_str, CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p, bool strict) { const char *l, *buf; bool in_group = false, has_group = false; size_t j, buf_len; char ch; l = rule_str; for (;;) { ch = *l; if (ch == '\0') { break; // done } int rule; if (in_group) { if (ch == ']') { if (*tail_p) { (*tail_p)->in_group = false; } in_group = false; l++; continue; } if (ch == '|') { rule = CIPHER_ADD; l++; continue; } else if (!OPENSSL_isalnum(ch)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_OPERATOR_IN_GROUP); return false; } else { rule = CIPHER_ADD; } } else if (ch == '-') { rule = CIPHER_DEL; l++; } else if (ch == '+') { rule = CIPHER_ORD; l++; } else if (ch == '!') { rule = CIPHER_KILL; l++; } else if (ch == '@') { rule = CIPHER_SPECIAL; l++; } else if (ch == '[') { assert(!in_group); in_group = true; has_group = true; l++; continue; } else { rule = CIPHER_ADD; } // If preference groups are enabled, the only legal operator is +. // Otherwise the in_group bits will get mixed up. if (has_group && rule != CIPHER_ADD) { OPENSSL_PUT_ERROR(SSL, SSL_R_MIXED_SPECIAL_OPERATOR_WITH_GROUPS); return false; } if (is_cipher_list_separator(ch, strict)) { l++; continue; } bool multi = false; uint32_t cipher_id = 0; CIPHER_ALIAS alias; bool skip_rule = false; // When adding, exclude deprecated ciphers by default. alias.include_deprecated = rule != CIPHER_ADD; for (;;) { ch = *l; buf = l; buf_len = 0; while (OPENSSL_isalnum(ch) || ch == '-' || ch == '.' || ch == '_') { ch = *(++l); buf_len++; } if (buf_len == 0) { // We hit something we cannot deal with, it is no command or separator // nor alphanumeric, so we call this an error. OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); return false; } if (rule == CIPHER_SPECIAL) { break; } // Look for a matching exact cipher. These aren't allowed in multipart // rules. if (!multi && ch != '+') { for (j = 0; j < OPENSSL_ARRAY_SIZE(kCiphers); j++) { const SSL_CIPHER *cipher = &kCiphers[j]; if (rule_equals(cipher->name, buf, buf_len) || rule_equals(cipher->standard_name, buf, buf_len)) { cipher_id = cipher->id; break; } } } if (cipher_id == 0) { // If not an exact cipher, look for a matching cipher alias. for (j = 0; j < kCipherAliasesLen; j++) { if (rule_equals(kCipherAliases[j].name, buf, buf_len)) { alias.algorithm_mkey &= kCipherAliases[j].algorithm_mkey; alias.algorithm_auth &= kCipherAliases[j].algorithm_auth; alias.algorithm_enc &= kCipherAliases[j].algorithm_enc; alias.algorithm_mac &= kCipherAliases[j].algorithm_mac; // When specifying a combination of aliases, if any aliases // enables deprecated ciphers, deprecated ciphers are included. This // is slightly different from the bitmasks in that adding aliases // can increase the set of matched ciphers. This is so that an alias // like "RSA" will only specifiy AES-based RSA ciphers, but // "RSA+3DES" will still specify 3DES. alias.include_deprecated |= kCipherAliases[j].include_deprecated; if (alias.min_version != 0 && alias.min_version != kCipherAliases[j].min_version) { skip_rule = true; } else { alias.min_version = kCipherAliases[j].min_version; } break; } } if (j == kCipherAliasesLen) { skip_rule = true; if (strict) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); return false; } } } // Check for a multipart rule. if (ch != '+') { break; } l++; multi = true; } // Ok, we have the rule, now apply it. if (rule == CIPHER_SPECIAL) { if (buf_len != 8 || strncmp(buf, "STRENGTH", 8) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); return false; } if (!ssl_cipher_strength_sort(head_p, tail_p)) { return false; } // We do not support any "multi" options together with "@", so throw away // the rest of the command, if any left, until end or ':' is found. while (*l != '\0' && !is_cipher_list_separator(*l, strict)) { l++; } } else if (!skip_rule) { ssl_cipher_apply_rule(cipher_id, &alias, rule, -1, in_group, head_p, tail_p); } } if (in_group) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); return false; } return true; } bool ssl_create_cipher_list(UniquePtr *out_cipher_list, const bool has_aes_hw, const char *rule_str, bool strict) { // Return with error if nothing to do. if (rule_str == NULL || out_cipher_list == NULL) { return false; } // We prefer ECDHE ciphers over non-PFS ciphers. Then we prefer AEAD over // non-AEAD. The constants are masked by 0xffff to remove the vestigial 0x03 // byte from SSL 2.0. static const uint16_t kAESCiphers[] = { TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 & 0xffff, TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256 & 0xffff, TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 & 0xffff, TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384 & 0xffff, }; static const uint16_t kChaChaCiphers[] = { TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 & 0xffff, TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 & 0xffff, TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 & 0xffff, }; static const uint16_t kLegacyCiphers[] = { TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA & 0xffff, TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA & 0xffff, TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA & 0xffff, TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA & 0xffff, TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA & 0xffff, TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA & 0xffff, TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA256 & 0xffff, TLS1_CK_RSA_WITH_AES_128_GCM_SHA256 & 0xffff, TLS1_CK_RSA_WITH_AES_256_GCM_SHA384 & 0xffff, TLS1_CK_RSA_WITH_AES_128_SHA & 0xffff, TLS1_CK_PSK_WITH_AES_128_CBC_SHA & 0xffff, TLS1_CK_RSA_WITH_AES_256_SHA & 0xffff, TLS1_CK_PSK_WITH_AES_256_CBC_SHA & 0xffff, SSL3_CK_RSA_DES_192_CBC3_SHA & 0xffff, }; // Set up a linked list of ciphers. CIPHER_ORDER co_list[OPENSSL_ARRAY_SIZE(kAESCiphers) + OPENSSL_ARRAY_SIZE(kChaChaCiphers) + OPENSSL_ARRAY_SIZE(kLegacyCiphers)]; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(co_list); i++) { co_list[i].next = i + 1 < OPENSSL_ARRAY_SIZE(co_list) ? &co_list[i + 1] : nullptr; co_list[i].prev = i == 0 ? nullptr : &co_list[i - 1]; co_list[i].active = false; co_list[i].in_group = false; } CIPHER_ORDER *head = &co_list[0]; CIPHER_ORDER *tail = &co_list[OPENSSL_ARRAY_SIZE(co_list) - 1]; // Order AES ciphers vs ChaCha ciphers based on whether we have AES hardware. // // TODO(crbug.com/boringssl/29): We should also set up equipreference groups // as a server. size_t num = 0; if (has_aes_hw) { for (uint16_t id : kAESCiphers) { co_list[num++].cipher = SSL_get_cipher_by_value(id); assert(co_list[num - 1].cipher != nullptr); } } for (uint16_t id : kChaChaCiphers) { co_list[num++].cipher = SSL_get_cipher_by_value(id); assert(co_list[num - 1].cipher != nullptr); } if (!has_aes_hw) { for (uint16_t id : kAESCiphers) { co_list[num++].cipher = SSL_get_cipher_by_value(id); assert(co_list[num - 1].cipher != nullptr); } } for (uint16_t id : kLegacyCiphers) { co_list[num++].cipher = SSL_get_cipher_by_value(id); assert(co_list[num - 1].cipher != nullptr); } assert(num == OPENSSL_ARRAY_SIZE(co_list)); static_assert(OPENSSL_ARRAY_SIZE(co_list) + NumTLS13Ciphers() == OPENSSL_ARRAY_SIZE(kCiphers), "Not all ciphers are included in the cipher order"); // If the rule_string begins with DEFAULT, apply the default rule before // using the (possibly available) additional rules. const char *rule_p = rule_str; if (strncmp(rule_str, "DEFAULT", 7) == 0) { if (!ssl_cipher_process_rulestr(SSL_DEFAULT_CIPHER_LIST, &head, &tail, strict)) { return false; } rule_p += 7; if (*rule_p == ':') { rule_p++; } } if (*rule_p != '\0' && !ssl_cipher_process_rulestr(rule_p, &head, &tail, strict)) { return false; } // Allocate new "cipherstack" for the result, return with error // if we cannot get one. UniquePtr cipherstack(sk_SSL_CIPHER_new_null()); Array in_group_flags; if (cipherstack == nullptr || !in_group_flags.InitForOverwrite(OPENSSL_ARRAY_SIZE(kCiphers))) { return false; } // The cipher selection for the list is done. The ciphers are added // to the resulting precedence to the STACK_OF(SSL_CIPHER). size_t num_in_group_flags = 0; for (CIPHER_ORDER *curr = head; curr != NULL; curr = curr->next) { if (curr->active) { if (!sk_SSL_CIPHER_push(cipherstack.get(), curr->cipher)) { return false; } in_group_flags[num_in_group_flags++] = curr->in_group; } } in_group_flags.Shrink(num_in_group_flags); UniquePtr pref_list = MakeUnique(); if (!pref_list || !pref_list->Init(std::move(cipherstack), in_group_flags)) { return false; } *out_cipher_list = std::move(pref_list); // Configuring an empty cipher list is an error but still updates the // output. if (sk_SSL_CIPHER_num((*out_cipher_list)->ciphers.get()) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); return false; } return true; } uint32_t ssl_cipher_auth_mask_for_key(const EVP_PKEY *key, bool sign_ok) { switch (EVP_PKEY_id(key)) { case EVP_PKEY_RSA: return sign_ok ? (SSL_aRSA_SIGN | SSL_aRSA_DECRYPT) : SSL_aRSA_DECRYPT; case EVP_PKEY_EC: case EVP_PKEY_ED25519: // Ed25519 keys in TLS 1.2 repurpose the ECDSA ciphers. return sign_ok ? SSL_aECDSA : 0; default: return 0; } } bool ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher) { return (cipher->algorithm_auth & SSL_aCERT) != 0; } bool ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher) { // Ephemeral Diffie-Hellman key exchanges require a ServerKeyExchange. It is // optional or omitted in all others. return (cipher->algorithm_mkey & SSL_kECDHE) != 0; } size_t ssl_cipher_get_record_split_len(const SSL_CIPHER *cipher) { size_t block_size; switch (cipher->algorithm_enc) { case SSL_3DES: block_size = 8; break; case SSL_AES128: case SSL_AES256: block_size = 16; break; default: return 0; } // All supported TLS 1.0 ciphers use SHA-1. assert(cipher->algorithm_mac == SSL_SHA1); size_t ret = 1 + SHA_DIGEST_LENGTH; ret += block_size - (ret % block_size); return ret; } BSSL_NAMESPACE_END using namespace bssl; static constexpr int ssl_cipher_id_cmp(const SSL_CIPHER *a, const SSL_CIPHER *b) { if (a->id > b->id) { return 1; } if (a->id < b->id) { return -1; } return 0; } static int ssl_cipher_id_cmp_void(const void *in_a, const void *in_b) { return ssl_cipher_id_cmp(reinterpret_cast(in_a), reinterpret_cast(in_b)); } template static constexpr bool ssl_ciphers_sorted(const SSL_CIPHER (&ciphers)[N]) { for (size_t i = 1; i < N; i++) { if (ssl_cipher_id_cmp(&ciphers[i - 1], &ciphers[i]) >= 0) { return false; } } return true; } static_assert(ssl_ciphers_sorted(kCiphers), "Ciphers are not sorted, bsearch won't work"); const SSL_CIPHER *SSL_get_cipher_by_value(uint16_t value) { SSL_CIPHER c; c.id = 0x03000000L | value; return reinterpret_cast( bsearch(&c, kCiphers, OPENSSL_ARRAY_SIZE(kCiphers), sizeof(SSL_CIPHER), ssl_cipher_id_cmp_void)); } uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *cipher) { return cipher->id; } uint16_t SSL_CIPHER_get_protocol_id(const SSL_CIPHER *cipher) { // All OpenSSL cipher IDs are prefaced with 0x03. Historically this referred // to SSLv2 vs SSLv3. assert((cipher->id & 0xff000000) == 0x03000000); return static_cast(cipher->id); } int SSL_CIPHER_is_aead(const SSL_CIPHER *cipher) { return (cipher->algorithm_mac & SSL_AEAD) != 0; } int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *cipher) { switch (cipher->algorithm_enc) { case SSL_3DES: return NID_des_ede3_cbc; case SSL_AES128: return NID_aes_128_cbc; case SSL_AES256: return NID_aes_256_cbc; case SSL_AES128GCM: return NID_aes_128_gcm; case SSL_AES256GCM: return NID_aes_256_gcm; case SSL_CHACHA20POLY1305: return NID_chacha20_poly1305; } assert(0); return NID_undef; } int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *cipher) { switch (cipher->algorithm_mac) { case SSL_AEAD: return NID_undef; case SSL_SHA1: return NID_sha1; case SSL_SHA256: return NID_sha256; } assert(0); return NID_undef; } int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *cipher) { switch (cipher->algorithm_mkey) { case SSL_kRSA: return NID_kx_rsa; case SSL_kECDHE: return NID_kx_ecdhe; case SSL_kPSK: return NID_kx_psk; case SSL_kGENERIC: return NID_kx_any; } assert(0); return NID_undef; } int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *cipher) { switch (cipher->algorithm_auth) { case SSL_aRSA_DECRYPT: case SSL_aRSA_SIGN: return NID_auth_rsa; case SSL_aECDSA: return NID_auth_ecdsa; case SSL_aPSK: return NID_auth_psk; case SSL_aGENERIC: return NID_auth_any; } assert(0); return NID_undef; } const EVP_MD *SSL_CIPHER_get_handshake_digest(const SSL_CIPHER *cipher) { switch (cipher->algorithm_prf) { case SSL_HANDSHAKE_MAC_DEFAULT: return EVP_md5_sha1(); case SSL_HANDSHAKE_MAC_SHA256: return EVP_sha256(); case SSL_HANDSHAKE_MAC_SHA384: return EVP_sha384(); } assert(0); return NULL; } int SSL_CIPHER_get_prf_nid(const SSL_CIPHER *cipher) { const EVP_MD *md = SSL_CIPHER_get_handshake_digest(cipher); if (md == NULL) { return NID_undef; } return EVP_MD_nid(md); } int SSL_CIPHER_is_block_cipher(const SSL_CIPHER *cipher) { return cipher->algorithm_mac != SSL_AEAD; } uint16_t SSL_CIPHER_get_min_version(const SSL_CIPHER *cipher) { if (cipher->algorithm_mkey == SSL_kGENERIC || cipher->algorithm_auth == SSL_aGENERIC) { return TLS1_3_VERSION; } if (cipher->algorithm_prf != SSL_HANDSHAKE_MAC_DEFAULT) { // Cipher suites before TLS 1.2 use the default PRF, while all those added // afterwards specify a particular hash. return TLS1_2_VERSION; } return SSL3_VERSION; } uint16_t SSL_CIPHER_get_max_version(const SSL_CIPHER *cipher) { if (cipher->algorithm_mkey == SSL_kGENERIC || cipher->algorithm_auth == SSL_aGENERIC) { return TLS1_3_VERSION; } return TLS1_2_VERSION; } static const char *kUnknownCipher = "(NONE)"; // return the actual cipher being used const char *SSL_CIPHER_get_name(const SSL_CIPHER *cipher) { if (cipher != NULL) { return cipher->name; } return kUnknownCipher; } const char *SSL_CIPHER_standard_name(const SSL_CIPHER *cipher) { return cipher->standard_name; } const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher) { if (cipher == NULL) { return ""; } switch (cipher->algorithm_mkey) { case SSL_kRSA: return "RSA"; case SSL_kECDHE: switch (cipher->algorithm_auth) { case SSL_aECDSA: return "ECDHE_ECDSA"; case SSL_aRSA_SIGN: return "ECDHE_RSA"; case SSL_aPSK: return "ECDHE_PSK"; default: assert(0); return "UNKNOWN"; } case SSL_kPSK: assert(cipher->algorithm_auth == SSL_aPSK); return "PSK"; case SSL_kGENERIC: assert(cipher->algorithm_auth == SSL_aGENERIC); return "GENERIC"; default: assert(0); return "UNKNOWN"; } } int SSL_CIPHER_get_bits(const SSL_CIPHER *cipher, int *out_alg_bits) { if (cipher == NULL) { return 0; } int alg_bits, strength_bits; switch (cipher->algorithm_enc) { case SSL_AES128: case SSL_AES128GCM: alg_bits = 128; strength_bits = 128; break; case SSL_AES256: case SSL_AES256GCM: case SSL_CHACHA20POLY1305: alg_bits = 256; strength_bits = 256; break; case SSL_3DES: alg_bits = 168; strength_bits = 112; break; default: assert(0); alg_bits = 0; strength_bits = 0; } if (out_alg_bits != NULL) { *out_alg_bits = alg_bits; } return strength_bits; } const char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, int len) { const char *kx, *au, *enc, *mac; uint32_t alg_mkey, alg_auth, alg_enc, alg_mac; alg_mkey = cipher->algorithm_mkey; alg_auth = cipher->algorithm_auth; alg_enc = cipher->algorithm_enc; alg_mac = cipher->algorithm_mac; switch (alg_mkey) { case SSL_kRSA: kx = "RSA"; break; case SSL_kECDHE: kx = "ECDH"; break; case SSL_kPSK: kx = "PSK"; break; case SSL_kGENERIC: kx = "GENERIC"; break; default: kx = "unknown"; } switch (alg_auth) { case SSL_aRSA_DECRYPT: case SSL_aRSA_SIGN: au = "RSA"; break; case SSL_aECDSA: au = "ECDSA"; break; case SSL_aPSK: au = "PSK"; break; case SSL_aGENERIC: au = "GENERIC"; break; default: au = "unknown"; break; } switch (alg_enc) { case SSL_3DES: enc = "3DES(168)"; break; case SSL_AES128: enc = "AES(128)"; break; case SSL_AES256: enc = "AES(256)"; break; case SSL_AES128GCM: enc = "AESGCM(128)"; break; case SSL_AES256GCM: enc = "AESGCM(256)"; break; case SSL_CHACHA20POLY1305: enc = "ChaCha20-Poly1305"; break; default: enc = "unknown"; break; } switch (alg_mac) { case SSL_SHA1: mac = "SHA1"; break; case SSL_SHA256: mac = "SHA256"; break; case SSL_AEAD: mac = "AEAD"; break; default: mac = "unknown"; break; } if (buf == NULL) { len = 128; buf = (char *)OPENSSL_malloc(len); if (buf == NULL) { return NULL; } } else if (len < 128) { return "Buffer too small"; } snprintf(buf, len, "%-23s Kx=%-8s Au=%-4s Enc=%-9s Mac=%-4s\n", cipher->name, kx, au, enc, mac); return buf; } const char *SSL_CIPHER_get_version(const SSL_CIPHER *cipher) { return "TLSv1/SSLv3"; } STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void) { return NULL; } int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm) { return 1; } const char *SSL_COMP_get_name(const COMP_METHOD *comp) { return NULL; } const char *SSL_COMP_get0_name(const SSL_COMP *comp) { return comp->name; } int SSL_COMP_get_id(const SSL_COMP *comp) { return comp->id; } void SSL_COMP_free_compression_methods(void) {} size_t SSL_get_all_cipher_names(const char **out, size_t max_out) { return GetAllNames(out, max_out, Span(&kUnknownCipher, 1), &SSL_CIPHER::name, Span(kCiphers)); } size_t SSL_get_all_standard_cipher_names(const char **out, size_t max_out) { return GetAllNames(out, max_out, Span(), &SSL_CIPHER::standard_name, Span(kCiphers)); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_credential.cc ================================================ /* Copyright 2024 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // new_leafless_chain returns a fresh stack of buffers set to {nullptr}. static UniquePtr new_leafless_chain(void) { UniquePtr chain(sk_CRYPTO_BUFFER_new_null()); if (!chain || !sk_CRYPTO_BUFFER_push(chain.get(), nullptr)) { return nullptr; } return chain; } bool ssl_get_credential_list(SSL_HANDSHAKE *hs, Array *out) { CERT *cert = hs->config->cert.get(); // Finish filling in the legacy credential if needed. if (!cert->x509_method->ssl_auto_chain_if_needed(hs)) { return false; } size_t num_creds = cert->credentials.size(); bool include_legacy = cert->legacy_credential->IsComplete(); if (include_legacy) { num_creds++; } if (!out->InitForOverwrite(num_creds)) { return false; } for (size_t i = 0; i < cert->credentials.size(); i++) { (*out)[i] = cert->credentials[i].get(); } if (include_legacy) { (*out)[num_creds - 1] = cert->legacy_credential.get(); } return true; } bool ssl_credential_matches_requested_issuers(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred) { if (!cred->must_match_issuer) { // This credential does not need to match a requested issuer, so // it is good to use without a match. return true; } // If we have names sent by the CA extension, and this // credential matches it, it is good. if (hs->ca_names != nullptr) { for (const CRYPTO_BUFFER *ca_name : hs->ca_names.get()) { if (cred->ChainContainsIssuer( Span(CRYPTO_BUFFER_data(ca_name), CRYPTO_BUFFER_len(ca_name)))) { return true; } } } // TODO(bbe): Other forms of issuer matching go here. OPENSSL_PUT_ERROR(SSL, SSL_R_NO_MATCHING_ISSUER); return false; } BSSL_NAMESPACE_END using namespace bssl; static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; ssl_credential_st::ssl_credential_st(SSLCredentialType type_arg) : RefCounted(CheckSubClass()), type(type_arg) { CRYPTO_new_ex_data(&ex_data); } ssl_credential_st::~ssl_credential_st() { CRYPTO_free_ex_data(&g_ex_data_class, this, &ex_data); } static CRYPTO_BUFFER *buffer_up_ref(const CRYPTO_BUFFER *buffer) { CRYPTO_BUFFER_up_ref(const_cast(buffer)); return const_cast(buffer); } UniquePtr ssl_credential_st::Dup() const { assert(type == SSLCredentialType::kX509); UniquePtr ret = MakeUnique(type); if (ret == nullptr) { return nullptr; } ret->pubkey = UpRef(pubkey); ret->privkey = UpRef(privkey); ret->key_method = key_method; if (!ret->sigalgs.CopyFrom(sigalgs)) { return nullptr; } if (chain) { ret->chain.reset(sk_CRYPTO_BUFFER_deep_copy(chain.get(), buffer_up_ref, CRYPTO_BUFFER_free)); if (!ret->chain) { return nullptr; } } ret->dc = UpRef(dc); ret->signed_cert_timestamp_list = UpRef(signed_cert_timestamp_list); ret->ocsp_response = UpRef(ocsp_response); ret->dc_algorithm = dc_algorithm; return ret; } void ssl_credential_st::ClearCertAndKey() { pubkey = nullptr; privkey = nullptr; key_method = nullptr; chain = nullptr; } bool ssl_credential_st::UsesX509() const { // Currently, all credential types use X.509. However, we may add other // certificate types in the future. Add the checks in the setters now, so we // don't forget. return true; } bool ssl_credential_st::UsesPrivateKey() const { // Currently, all credential types use private keys. However, we may add PSK return true; } bool ssl_credential_st::IsComplete() const { // APIs like |SSL_use_certificate| and |SSL_set1_chain| configure the leaf and // other certificates separately. It is possible for |chain| have a null leaf. if (UsesX509() && (sk_CRYPTO_BUFFER_num(chain.get()) == 0 || sk_CRYPTO_BUFFER_value(chain.get(), 0) == nullptr)) { return false; } // We must have successfully extracted a public key from the certificate, // delegated credential, etc. if (UsesPrivateKey() && pubkey == nullptr) { return false; } if (UsesPrivateKey() && privkey == nullptr && key_method == nullptr) { return false; } if (type == SSLCredentialType::kDelegated && dc == nullptr) { return false; } return true; } bool ssl_credential_st::SetLeafCert(UniquePtr leaf, bool discard_key_on_mismatch) { if (!UsesX509()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return false; } const bool private_key_matches_leaf = type != SSLCredentialType::kDelegated; CBS cbs; CRYPTO_BUFFER_init_CBS(leaf.get(), &cbs); UniquePtr new_pubkey = ssl_cert_parse_pubkey(&cbs); if (new_pubkey == nullptr) { return false; } if (!ssl_is_key_type_supported(EVP_PKEY_id(new_pubkey.get()))) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } // An ECC certificate may be usable for ECDH or ECDSA. We only support ECDSA // certificates, so sanity-check the key usage extension. if (EVP_PKEY_id(new_pubkey.get()) == EVP_PKEY_EC && !ssl_cert_check_key_usage(&cbs, key_usage_digital_signature)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } if (private_key_matches_leaf && privkey != nullptr && !ssl_compare_public_and_private_key(new_pubkey.get(), privkey.get())) { if (!discard_key_on_mismatch) { return false; } ERR_clear_error(); privkey = nullptr; } if (chain == nullptr) { chain = new_leafless_chain(); if (chain == nullptr) { return false; } } CRYPTO_BUFFER_free(sk_CRYPTO_BUFFER_value(chain.get(), 0)); sk_CRYPTO_BUFFER_set(chain.get(), 0, leaf.release()); if (private_key_matches_leaf) { pubkey = std::move(new_pubkey); } return true; } void ssl_credential_st::ClearIntermediateCerts() { if (chain == nullptr) { return; } while (sk_CRYPTO_BUFFER_num(chain.get()) > 1) { CRYPTO_BUFFER_free(sk_CRYPTO_BUFFER_pop(chain.get())); } } bool ssl_credential_st::ChainContainsIssuer( bssl::Span dn) const { if (UsesX509()) { // TODO(bbe) This is used for matching a chain by CA name for the CA // extension. If we require a chain to be present, we could remove any // remaining parts of the chain after the found issuer, on the assumption // that the peer sending the CA extension has the issuer in their trust // store and does not need us to waste bytes on the wire. CBS dn_cbs; CBS_init(&dn_cbs, dn.data(), dn.size()); for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(chain.get()); i++) { const CRYPTO_BUFFER *cert = sk_CRYPTO_BUFFER_value(chain.get(), i); CBS cert_cbs; CRYPTO_BUFFER_init_CBS(cert, &cert_cbs); if (ssl_cert_matches_issuer(&cert_cbs, &dn_cbs)) { return true; } } } return false; } bool ssl_credential_st::AppendIntermediateCert(UniquePtr cert) { if (!UsesX509()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return false; } if (chain == nullptr) { chain = new_leafless_chain(); if (chain == nullptr) { return false; } } return PushToStack(chain.get(), std::move(cert)); } SSL_CREDENTIAL *SSL_CREDENTIAL_new_x509(void) { return New(SSLCredentialType::kX509); } SSL_CREDENTIAL *SSL_CREDENTIAL_new_delegated(void) { return New(SSLCredentialType::kDelegated); } void SSL_CREDENTIAL_up_ref(SSL_CREDENTIAL *cred) { cred->UpRefInternal(); } void SSL_CREDENTIAL_free(SSL_CREDENTIAL *cred) { if (cred != nullptr) { cred->DecRefInternal(); } } int SSL_CREDENTIAL_set1_private_key(SSL_CREDENTIAL *cred, EVP_PKEY *key) { if (!cred->UsesPrivateKey()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // If the public half has been configured, check |key| matches. |pubkey| will // have been extracted from the certificate, delegated credential, etc. if (cred->pubkey != nullptr && !ssl_compare_public_and_private_key(cred->pubkey.get(), key)) { return false; } cred->privkey = UpRef(key); cred->key_method = nullptr; return 1; } int SSL_CREDENTIAL_set_private_key_method( SSL_CREDENTIAL *cred, const SSL_PRIVATE_KEY_METHOD *key_method) { if (!cred->UsesPrivateKey()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } cred->privkey = nullptr; cred->key_method = key_method; return 1; } int SSL_CREDENTIAL_set1_cert_chain(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *const *certs, size_t num_certs) { if (!cred->UsesX509() || num_certs == 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (!cred->SetLeafCert(UpRef(certs[0]), /*discard_key_on_mismatch=*/false)) { return 0; } cred->ClearIntermediateCerts(); for (size_t i = 1; i < num_certs; i++) { if (!cred->AppendIntermediateCert(UpRef(certs[i]))) { return 0; } } return 1; } int SSL_CREDENTIAL_set1_delegated_credential(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *dc) { if (cred->type != SSLCredentialType::kDelegated) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Parse the delegated credential to check for validity, and extract a few // fields from it. See RFC 9345, section 4. CBS cbs, spki, sig; uint32_t valid_time; uint16_t dc_cert_verify_algorithm, algorithm; CRYPTO_BUFFER_init_CBS(dc, &cbs); if (!CBS_get_u32(&cbs, &valid_time) || !CBS_get_u16(&cbs, &dc_cert_verify_algorithm) || !CBS_get_u24_length_prefixed(&cbs, &spki) || !CBS_get_u16(&cbs, &algorithm) || !CBS_get_u16_length_prefixed(&cbs, &sig) || // CBS_len(&sig) == 0 || // CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return 0; } // RFC 9345 forbids algorithms that use the rsaEncryption OID. As the // RSASSA-PSS OID is unusably complicated, this effectively means we will not // support RSA delegated credentials. if (SSL_get_signature_algorithm_key_type(dc_cert_verify_algorithm) == EVP_PKEY_RSA) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); return 0; } UniquePtr pubkey(EVP_parse_public_key(&spki)); if (pubkey == nullptr || CBS_len(&spki) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return 0; } if (!cred->sigalgs.CopyFrom(Span(&dc_cert_verify_algorithm, 1))) { return 0; } if (cred->privkey != nullptr && !ssl_compare_public_and_private_key(pubkey.get(), cred->privkey.get())) { return 0; } cred->dc = UpRef(dc); cred->pubkey = std::move(pubkey); cred->dc_algorithm = algorithm; return 1; } int SSL_CREDENTIAL_set1_ocsp_response(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *ocsp) { if (!cred->UsesX509()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } cred->ocsp_response = UpRef(ocsp); return 1; } int SSL_CREDENTIAL_set1_signed_cert_timestamp_list(SSL_CREDENTIAL *cred, CRYPTO_BUFFER *sct_list) { if (!cred->UsesX509()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } CBS cbs; CRYPTO_BUFFER_init_CBS(sct_list, &cbs); if (!ssl_is_sct_list_valid(&cbs)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SCT_LIST); return 0; } cred->signed_cert_timestamp_list = UpRef(sct_list); return 1; } int SSL_CTX_add1_credential(SSL_CTX *ctx, SSL_CREDENTIAL *cred) { if (!cred->IsComplete()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return ctx->cert->credentials.Push(UpRef(cred)); } int SSL_add1_credential(SSL *ssl, SSL_CREDENTIAL *cred) { if (ssl->config == nullptr) { return 0; } if (!cred->IsComplete()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return ssl->config->cert->credentials.Push(UpRef(cred)); } const SSL_CREDENTIAL *SSL_get0_selected_credential(const SSL *ssl) { if (ssl->s3->hs == nullptr) { return nullptr; } return ssl->s3->hs->credential.get(); } int SSL_CREDENTIAL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int SSL_CREDENTIAL_set_ex_data(SSL_CREDENTIAL *cred, int idx, void *arg) { return CRYPTO_set_ex_data(&cred->ex_data, idx, arg); } void *SSL_CREDENTIAL_get_ex_data(const SSL_CREDENTIAL *cred, int idx) { return CRYPTO_get_ex_data(&cred->ex_data, idx); } void SSL_CREDENTIAL_set_must_match_issuer(SSL_CREDENTIAL *cred) { cred->must_match_issuer = true; } void SSL_CREDENTIAL_clear_must_match_issuer(SSL_CREDENTIAL *cred) { cred->must_match_issuer = false; } int SSL_CREDENTIAL_must_match_issuer(const SSL_CREDENTIAL *cred) { return cred->must_match_issuer ? 1 : 0; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_file.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "internal.h" static int xname_cmp(const X509_NAME *const *a, const X509_NAME *const *b) { return X509_NAME_cmp(*a, *b); } static int add_bio_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, BIO *bio, bool allow_empty) { // This function historically sorted |out| after every addition and skipped // duplicates. This implementation preserves that behavior, but only sorts at // the end, to avoid a quadratic running time. Existing duplicates in |out| // are preserved, but do not introduce new duplicates. bssl::UniquePtr to_append(sk_X509_NAME_new(xname_cmp)); if (to_append == nullptr) { return 0; } // Temporarily switch the comparison function for |out|. struct RestoreCmpFunc { ~RestoreCmpFunc() { sk_X509_NAME_set_cmp_func(stack, old_cmp); } STACK_OF(X509_NAME) *stack; int (*old_cmp)(const X509_NAME *const *, const X509_NAME *const *); }; RestoreCmpFunc restore = {out, sk_X509_NAME_set_cmp_func(out, xname_cmp)}; sk_X509_NAME_sort(out); bool first = true; for (;;) { bssl::UniquePtr x509( PEM_read_bio_X509(bio, nullptr, nullptr, nullptr)); if (x509 == nullptr) { if (first && !allow_empty) { return 0; } // TODO(davidben): This ignores PEM syntax errors. It should only succeed // on |PEM_R_NO_START_LINE|. ERR_clear_error(); break; } first = false; X509_NAME *subject = X509_get_subject_name(x509.get()); // Skip if already present in |out|. Duplicates in |to_append| will be // handled separately. if (sk_X509_NAME_find(out, /*out_index=*/NULL, subject)) { continue; } bssl::UniquePtr copy(X509_NAME_dup(subject)); if (copy == nullptr || !bssl::PushToStack(to_append.get(), std::move(copy))) { return 0; } } // Append |to_append| to |stack|, skipping any duplicates. sk_X509_NAME_sort(to_append.get()); size_t num = sk_X509_NAME_num(to_append.get()); for (size_t i = 0; i < num; i++) { bssl::UniquePtr name(sk_X509_NAME_value(to_append.get(), i)); sk_X509_NAME_set(to_append.get(), i, nullptr); if (i + 1 < num && X509_NAME_cmp(name.get(), sk_X509_NAME_value(to_append.get(), i + 1)) == 0) { continue; } if (!bssl::PushToStack(out, std::move(name))) { return 0; } } // Sort |out| one last time, to preserve the historical behavior of // maintaining the sorted list. sk_X509_NAME_sort(out); return 1; } int SSL_add_bio_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, BIO *bio) { return add_bio_cert_subjects_to_stack(out, bio, /*allow_empty=*/true); } STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file) { bssl::UniquePtr in(BIO_new_file(file, "rb")); if (in == nullptr) { return nullptr; } bssl::UniquePtr ret(sk_X509_NAME_new_null()); if (ret == nullptr || // !add_bio_cert_subjects_to_stack(ret.get(), in.get(), /*allow_empty=*/false)) { return nullptr; } return ret.release(); } int SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, const char *file) { bssl::UniquePtr in(BIO_new_file(file, "rb")); if (in == nullptr) { return 0; } return SSL_add_bio_cert_subjects_to_stack(out, in.get()); } int SSL_use_certificate_file(SSL *ssl, const char *file, int type) { int reason_code; BIO *in; int ret = 0; X509 *x = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; x = d2i_X509_bio(in, NULL); } else if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; x = PEM_read_bio_X509(in, NULL, ssl->ctx->default_passwd_callback, ssl->ctx->default_passwd_callback_userdata); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (x == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_use_certificate(ssl, x); end: X509_free(x); BIO_free(in); return ret; } int SSL_use_RSAPrivateKey_file(SSL *ssl, const char *file, int type) { int reason_code, ret = 0; BIO *in; RSA *rsa = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; rsa = d2i_RSAPrivateKey_bio(in, NULL); } else if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; rsa = PEM_read_bio_RSAPrivateKey(in, NULL, ssl->ctx->default_passwd_callback, ssl->ctx->default_passwd_callback_userdata); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (rsa == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_use_RSAPrivateKey(ssl, rsa); RSA_free(rsa); end: BIO_free(in); return ret; } int SSL_use_PrivateKey_file(SSL *ssl, const char *file, int type) { int reason_code, ret = 0; BIO *in; EVP_PKEY *pkey = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; pkey = PEM_read_bio_PrivateKey(in, NULL, ssl->ctx->default_passwd_callback, ssl->ctx->default_passwd_callback_userdata); } else if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; pkey = d2i_PrivateKey_bio(in, NULL); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (pkey == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_use_PrivateKey(ssl, pkey); EVP_PKEY_free(pkey); end: BIO_free(in); return ret; } int SSL_CTX_use_certificate_file(SSL_CTX *ctx, const char *file, int type) { int reason_code; BIO *in; int ret = 0; X509 *x = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; x = d2i_X509_bio(in, NULL); } else if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; x = PEM_read_bio_X509(in, NULL, ctx->default_passwd_callback, ctx->default_passwd_callback_userdata); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (x == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_CTX_use_certificate(ctx, x); end: X509_free(x); BIO_free(in); return ret; } int SSL_CTX_use_RSAPrivateKey_file(SSL_CTX *ctx, const char *file, int type) { int reason_code, ret = 0; BIO *in; RSA *rsa = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; rsa = d2i_RSAPrivateKey_bio(in, NULL); } else if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; rsa = PEM_read_bio_RSAPrivateKey(in, NULL, ctx->default_passwd_callback, ctx->default_passwd_callback_userdata); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (rsa == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_CTX_use_RSAPrivateKey(ctx, rsa); RSA_free(rsa); end: BIO_free(in); return ret; } int SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, int type) { int reason_code, ret = 0; BIO *in; EVP_PKEY *pkey = NULL; in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } if (type == SSL_FILETYPE_PEM) { reason_code = ERR_R_PEM_LIB; pkey = PEM_read_bio_PrivateKey(in, NULL, ctx->default_passwd_callback, ctx->default_passwd_callback_userdata); } else if (type == SSL_FILETYPE_ASN1) { reason_code = ERR_R_ASN1_LIB; pkey = d2i_PrivateKey_bio(in, NULL); } else { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SSL_FILETYPE); goto end; } if (pkey == NULL) { OPENSSL_PUT_ERROR(SSL, reason_code); goto end; } ret = SSL_CTX_use_PrivateKey(ctx, pkey); EVP_PKEY_free(pkey); end: BIO_free(in); return ret; } // Read a file that contains our certificate in "PEM" format, possibly followed // by a sequence of CA certificates that should be sent to the peer in the // Certificate message. int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file) { BIO *in; int ret = 0; X509 *x = NULL; ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate() in = BIO_new(BIO_s_file()); if (in == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); goto end; } if (BIO_read_filename(in, file) <= 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB); goto end; } x = PEM_read_bio_X509_AUX(in, NULL, ctx->default_passwd_callback, ctx->default_passwd_callback_userdata); if (x == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PEM_LIB); goto end; } ret = SSL_CTX_use_certificate(ctx, x); if (ERR_peek_error() != 0) { ret = 0; // Key/certificate mismatch doesn't imply ret==0 ... } if (ret) { // If we could set up our certificate, now proceed to the CA // certificates. X509 *ca; int r; uint32_t err; SSL_CTX_clear_chain_certs(ctx); while ((ca = PEM_read_bio_X509(in, NULL, ctx->default_passwd_callback, ctx->default_passwd_callback_userdata)) != NULL) { r = SSL_CTX_add0_chain_cert(ctx, ca); if (!r) { X509_free(ca); ret = 0; goto end; } // Note that we must not free r if it was successfully added to the chain // (while we must free the main certificate, since its reference count is // increased by SSL_CTX_use_certificate). } // When the while loop ends, it's usually just EOF. err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { ERR_clear_error(); } else { ret = 0; // some real error } } end: X509_free(x); BIO_free(in); return ret; } void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb) { ctx->default_passwd_callback = cb; } pem_password_cb *SSL_CTX_get_default_passwd_cb(const SSL_CTX *ctx) { return ctx->default_passwd_callback; } void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *data) { ctx->default_passwd_callback_userdata = data; } void *SSL_CTX_get_default_passwd_cb_userdata(const SSL_CTX *ctx) { return ctx->default_passwd_callback_userdata; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_key_share.cc ================================================ /* Copyright 2015 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #define OPENSSL_UNSTABLE_EXPERIMENTAL_KYBER #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN namespace { class ECKeyShare : public SSLKeyShare { public: ECKeyShare(const EC_GROUP *group, uint16_t group_id) : group_(group), group_id_(group_id) {} uint16_t GroupID() const override { return group_id_; } bool Generate(CBB *out) override { assert(!private_key_); // Generate a private key. private_key_.reset(BN_new()); if (!private_key_ || !BN_rand_range_ex(private_key_.get(), 1, EC_GROUP_get0_order(group_))) { return false; } // Compute the corresponding public key and serialize it. UniquePtr public_key(EC_POINT_new(group_)); if (!public_key || !EC_POINT_mul(group_, public_key.get(), private_key_.get(), nullptr, nullptr, /*ctx=*/nullptr) || !EC_POINT_point2cbb(out, group_, public_key.get(), POINT_CONVERSION_UNCOMPRESSED, /*ctx=*/nullptr)) { return false; } return true; } bool Encap(CBB *out_ciphertext, Array *out_secret, uint8_t *out_alert, Span peer_key) override { // ECDH may be fit into a KEM-like abstraction by using a second keypair's // public key as the ciphertext. *out_alert = SSL_AD_INTERNAL_ERROR; return Generate(out_ciphertext) && Decap(out_secret, out_alert, peer_key); } bool Decap(Array *out_secret, uint8_t *out_alert, Span ciphertext) override { assert(group_); assert(private_key_); *out_alert = SSL_AD_INTERNAL_ERROR; UniquePtr peer_point(EC_POINT_new(group_)); UniquePtr result(EC_POINT_new(group_)); UniquePtr x(BN_new()); if (!peer_point || !result || !x) { return false; } if (ciphertext.empty() || ciphertext[0] != POINT_CONVERSION_UNCOMPRESSED || !EC_POINT_oct2point(group_, peer_point.get(), ciphertext.data(), ciphertext.size(), /*ctx=*/nullptr)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); *out_alert = SSL_AD_ILLEGAL_PARAMETER; return false; } // Compute the x-coordinate of |peer_key| * |private_key_|. if (!EC_POINT_mul(group_, result.get(), nullptr, peer_point.get(), private_key_.get(), /*ctx=*/nullptr) || !EC_POINT_get_affine_coordinates_GFp(group_, result.get(), x.get(), nullptr, /*ctx=*/nullptr)) { return false; } // Encode the x-coordinate left-padded with zeros. Array secret; if (!secret.InitForOverwrite((EC_GROUP_get_degree(group_) + 7) / 8) || !BN_bn2bin_padded(secret.data(), secret.size(), x.get())) { return false; } *out_secret = std::move(secret); return true; } bool SerializePrivateKey(CBB *out) override { assert(group_); assert(private_key_); // Padding is added to avoid leaking the length. size_t len = BN_num_bytes(EC_GROUP_get0_order(group_)); return BN_bn2cbb_padded(out, len, private_key_.get()); } bool DeserializePrivateKey(CBS *in) override { assert(!private_key_); private_key_.reset(BN_bin2bn(CBS_data(in), CBS_len(in), nullptr)); return private_key_ != nullptr; } private: UniquePtr private_key_; const EC_GROUP *const group_ = nullptr; uint16_t group_id_; }; class X25519KeyShare : public SSLKeyShare { public: X25519KeyShare() {} uint16_t GroupID() const override { return SSL_GROUP_X25519; } bool Generate(CBB *out) override { uint8_t public_key[32]; X25519_keypair(public_key, private_key_); return !!CBB_add_bytes(out, public_key, sizeof(public_key)); } bool Encap(CBB *out_ciphertext, Array *out_secret, uint8_t *out_alert, Span peer_key) override { // X25519 may be fit into a KEM-like abstraction by using a second keypair's // public key as the ciphertext. *out_alert = SSL_AD_INTERNAL_ERROR; return Generate(out_ciphertext) && Decap(out_secret, out_alert, peer_key); } bool Decap(Array *out_secret, uint8_t *out_alert, Span ciphertext) override { *out_alert = SSL_AD_INTERNAL_ERROR; Array secret; if (!secret.InitForOverwrite(32)) { return false; } if (ciphertext.size() != 32 || // !X25519(secret.data(), private_key_, ciphertext.data())) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); return false; } *out_secret = std::move(secret); return true; } bool SerializePrivateKey(CBB *out) override { return CBB_add_bytes(out, private_key_, sizeof(private_key_)); } bool DeserializePrivateKey(CBS *in) override { if (CBS_len(in) != sizeof(private_key_) || !CBS_copy_bytes(in, private_key_, sizeof(private_key_))) { return false; } return true; } private: uint8_t private_key_[32]; }; // draft-tls-westerbaan-xyber768d00-03 class X25519Kyber768KeyShare : public SSLKeyShare { public: X25519Kyber768KeyShare() {} uint16_t GroupID() const override { return SSL_GROUP_X25519_KYBER768_DRAFT00; } bool Generate(CBB *out) override { uint8_t x25519_public_key[32]; X25519_keypair(x25519_public_key, x25519_private_key_); uint8_t kyber_public_key[KYBER_PUBLIC_KEY_BYTES]; KYBER_generate_key(kyber_public_key, &kyber_private_key_); if (!CBB_add_bytes(out, x25519_public_key, sizeof(x25519_public_key)) || !CBB_add_bytes(out, kyber_public_key, sizeof(kyber_public_key))) { return false; } return true; } bool Encap(CBB *out_ciphertext, Array *out_secret, uint8_t *out_alert, Span peer_key) override { Array secret; if (!secret.InitForOverwrite(32 + KYBER_SHARED_SECRET_BYTES)) { return false; } uint8_t x25519_public_key[32]; X25519_keypair(x25519_public_key, x25519_private_key_); KYBER_public_key peer_kyber_pub; CBS peer_key_cbs, peer_x25519_cbs, peer_kyber_cbs; CBS_init(&peer_key_cbs, peer_key.data(), peer_key.size()); if (!CBS_get_bytes(&peer_key_cbs, &peer_x25519_cbs, 32) || !CBS_get_bytes(&peer_key_cbs, &peer_kyber_cbs, KYBER_PUBLIC_KEY_BYTES) || CBS_len(&peer_key_cbs) != 0 || !X25519(secret.data(), x25519_private_key_, CBS_data(&peer_x25519_cbs)) || !KYBER_parse_public_key(&peer_kyber_pub, &peer_kyber_cbs)) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); return false; } uint8_t kyber_ciphertext[KYBER_CIPHERTEXT_BYTES]; KYBER_encap(kyber_ciphertext, secret.data() + 32, &peer_kyber_pub); if (!CBB_add_bytes(out_ciphertext, x25519_public_key, sizeof(x25519_public_key)) || !CBB_add_bytes(out_ciphertext, kyber_ciphertext, sizeof(kyber_ciphertext))) { return false; } *out_secret = std::move(secret); return true; } bool Decap(Array *out_secret, uint8_t *out_alert, Span ciphertext) override { *out_alert = SSL_AD_INTERNAL_ERROR; Array secret; if (!secret.InitForOverwrite(32 + KYBER_SHARED_SECRET_BYTES)) { return false; } if (ciphertext.size() != 32 + KYBER_CIPHERTEXT_BYTES || !X25519(secret.data(), x25519_private_key_, ciphertext.data())) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); return false; } KYBER_decap(secret.data() + 32, ciphertext.data() + 32, &kyber_private_key_); *out_secret = std::move(secret); return true; } private: uint8_t x25519_private_key_[32]; KYBER_private_key kyber_private_key_; }; // draft-kwiatkowski-tls-ecdhe-mlkem-01 class X25519MLKEM768KeyShare : public SSLKeyShare { public: X25519MLKEM768KeyShare() {} uint16_t GroupID() const override { return SSL_GROUP_X25519_MLKEM768; } bool Generate(CBB *out) override { uint8_t mlkem_public_key[MLKEM768_PUBLIC_KEY_BYTES]; MLKEM768_generate_key(mlkem_public_key, /*optional_out_seed=*/nullptr, &mlkem_private_key_); uint8_t x25519_public_key[X25519_PUBLIC_VALUE_LEN]; X25519_keypair(x25519_public_key, x25519_private_key_); if (!CBB_add_bytes(out, mlkem_public_key, sizeof(mlkem_public_key)) || !CBB_add_bytes(out, x25519_public_key, sizeof(x25519_public_key))) { return false; } return true; } bool Encap(CBB *out_ciphertext, Array *out_secret, uint8_t *out_alert, Span peer_key) override { Array secret; if (!secret.InitForOverwrite(MLKEM_SHARED_SECRET_BYTES + X25519_SHARED_KEY_LEN)) { return false; } MLKEM768_public_key peer_mlkem_pub; uint8_t x25519_public_key[X25519_PUBLIC_VALUE_LEN]; X25519_keypair(x25519_public_key, x25519_private_key_); CBS peer_key_cbs, peer_mlkem_cbs, peer_x25519_cbs; CBS_init(&peer_key_cbs, peer_key.data(), peer_key.size()); if (!CBS_get_bytes(&peer_key_cbs, &peer_mlkem_cbs, MLKEM768_PUBLIC_KEY_BYTES) || !MLKEM768_parse_public_key(&peer_mlkem_pub, &peer_mlkem_cbs) || !CBS_get_bytes(&peer_key_cbs, &peer_x25519_cbs, X25519_PUBLIC_VALUE_LEN) || CBS_len(&peer_key_cbs) != 0 || !X25519(secret.data() + MLKEM_SHARED_SECRET_BYTES, x25519_private_key_, CBS_data(&peer_x25519_cbs))) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); return false; } uint8_t mlkem_ciphertext[MLKEM768_CIPHERTEXT_BYTES]; MLKEM768_encap(mlkem_ciphertext, secret.data(), &peer_mlkem_pub); if (!CBB_add_bytes(out_ciphertext, mlkem_ciphertext, sizeof(mlkem_ciphertext)) || !CBB_add_bytes(out_ciphertext, x25519_public_key, sizeof(x25519_public_key))) { return false; } *out_secret = std::move(secret); return true; } bool Decap(Array *out_secret, uint8_t *out_alert, Span ciphertext) override { *out_alert = SSL_AD_INTERNAL_ERROR; Array secret; if (!secret.InitForOverwrite(MLKEM_SHARED_SECRET_BYTES + X25519_SHARED_KEY_LEN)) { return false; } if (ciphertext.size() != MLKEM768_CIPHERTEXT_BYTES + X25519_PUBLIC_VALUE_LEN || !MLKEM768_decap(secret.data(), ciphertext.data(), MLKEM768_CIPHERTEXT_BYTES, &mlkem_private_key_) || !X25519(secret.data() + MLKEM_SHARED_SECRET_BYTES, x25519_private_key_, ciphertext.data() + MLKEM768_CIPHERTEXT_BYTES)) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); return false; } *out_secret = std::move(secret); return true; } private: uint8_t x25519_private_key_[32]; MLKEM768_private_key mlkem_private_key_; }; constexpr NamedGroup kNamedGroups[] = { {NID_secp224r1, SSL_GROUP_SECP224R1, "P-224", "secp224r1"}, {NID_X9_62_prime256v1, SSL_GROUP_SECP256R1, "P-256", "prime256v1"}, {NID_secp384r1, SSL_GROUP_SECP384R1, "P-384", "secp384r1"}, {NID_secp521r1, SSL_GROUP_SECP521R1, "P-521", "secp521r1"}, {NID_X25519, SSL_GROUP_X25519, "X25519", "x25519"}, {NID_X25519Kyber768Draft00, SSL_GROUP_X25519_KYBER768_DRAFT00, "X25519Kyber768Draft00", ""}, {NID_X25519MLKEM768, SSL_GROUP_X25519_MLKEM768, "X25519MLKEM768", ""}, }; } // namespace Span NamedGroups() { return kNamedGroups; } UniquePtr SSLKeyShare::Create(uint16_t group_id) { switch (group_id) { case SSL_GROUP_SECP224R1: return MakeUnique(EC_group_p224(), SSL_GROUP_SECP224R1); case SSL_GROUP_SECP256R1: return MakeUnique(EC_group_p256(), SSL_GROUP_SECP256R1); case SSL_GROUP_SECP384R1: return MakeUnique(EC_group_p384(), SSL_GROUP_SECP384R1); case SSL_GROUP_SECP521R1: return MakeUnique(EC_group_p521(), SSL_GROUP_SECP521R1); case SSL_GROUP_X25519: return MakeUnique(); case SSL_GROUP_X25519_KYBER768_DRAFT00: return MakeUnique(); case SSL_GROUP_X25519_MLKEM768: return MakeUnique(); default: return nullptr; } } bool ssl_nid_to_group_id(uint16_t *out_group_id, int nid) { for (const auto &group : kNamedGroups) { if (group.nid == nid) { *out_group_id = group.group_id; return true; } } return false; } bool ssl_name_to_group_id(uint16_t *out_group_id, const char *name, size_t len) { for (const auto &group : kNamedGroups) { if (len == strlen(group.name) && // !strncmp(group.name, name, len)) { *out_group_id = group.group_id; return true; } if (strlen(group.alias) > 0 && len == strlen(group.alias) && !strncmp(group.alias, name, len)) { *out_group_id = group.group_id; return true; } } return false; } int ssl_group_id_to_nid(uint16_t group_id) { for (const auto &group : kNamedGroups) { if (group.group_id == group_id) { return group.nid; } } return NID_undef; } BSSL_NAMESPACE_END using namespace bssl; const char *SSL_get_group_name(uint16_t group_id) { for (const auto &group : kNamedGroups) { if (group.group_id == group_id) { return group.name; } } return nullptr; } size_t SSL_get_all_group_names(const char **out, size_t max_out) { return GetAllNames(out, max_out, Span(), &NamedGroup::name, Span(kNamedGroups)); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_lib.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" #if defined(OPENSSL_WINDOWS) #include #else #include #include #endif BSSL_NAMESPACE_BEGIN static_assert(SSL3_RT_MAX_ENCRYPTED_OVERHEAD >= SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD, "max overheads are inconsistent"); // |SSL_R_UNKNOWN_PROTOCOL| is no longer emitted, but continue to define it // to avoid downstream churn. OPENSSL_DECLARE_ERROR_REASON(SSL, UNKNOWN_PROTOCOL) // The following errors are no longer emitted, but are used in nginx without // #ifdefs. OPENSSL_DECLARE_ERROR_REASON(SSL, BLOCK_CIPHER_PAD_IS_WRONG) OPENSSL_DECLARE_ERROR_REASON(SSL, NO_CIPHERS_SPECIFIED) // Some error codes are special. Ensure the make_errors.go script never // regresses this. static_assert(SSL_R_TLSV1_ALERT_NO_RENEGOTIATION == SSL_AD_NO_RENEGOTIATION + SSL_AD_REASON_OFFSET, "alert reason code mismatch"); // kMaxHandshakeSize is the maximum size, in bytes, of a handshake message. static const size_t kMaxHandshakeSize = (1u << 24) - 1; static CRYPTO_EX_DATA_CLASS g_ex_data_class_ssl = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; static CRYPTO_EX_DATA_CLASS g_ex_data_class_ssl_ctx = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; bool CBBFinishArray(CBB *cbb, Array *out) { uint8_t *ptr; size_t len; if (!CBB_finish(cbb, &ptr, &len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } out->Reset(ptr, len); return true; } void ssl_reset_error_state(SSL *ssl) { // Functions which use |SSL_get_error| must reset I/O and error state on // entry. ssl->s3->rwstate = SSL_ERROR_NONE; ERR_clear_error(); ERR_clear_system_error(); } void ssl_set_read_error(SSL *ssl) { ssl->s3->read_shutdown = ssl_shutdown_error; ssl->s3->read_error.reset(ERR_save_state()); } static bool check_read_error(const SSL *ssl) { if (ssl->s3->read_shutdown == ssl_shutdown_error) { ERR_restore_state(ssl->s3->read_error.get()); return false; } return true; } bool ssl_can_write(const SSL *ssl) { return !SSL_in_init(ssl) || ssl->s3->hs->can_early_write; } bool ssl_can_read(const SSL *ssl) { return !SSL_in_init(ssl) || ssl->s3->hs->can_early_read; } ssl_open_record_t ssl_open_handshake(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; if (!check_read_error(ssl)) { *out_alert = 0; return ssl_open_record_error; } auto ret = ssl->method->open_handshake(ssl, out_consumed, out_alert, in); if (ret == ssl_open_record_error) { ssl_set_read_error(ssl); } return ret; } ssl_open_record_t ssl_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; if (!check_read_error(ssl)) { *out_alert = 0; return ssl_open_record_error; } auto ret = ssl->method->open_change_cipher_spec(ssl, out_consumed, out_alert, in); if (ret == ssl_open_record_error) { ssl_set_read_error(ssl); } return ret; } ssl_open_record_t ssl_open_app_data(SSL *ssl, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; if (!check_read_error(ssl)) { *out_alert = 0; return ssl_open_record_error; } auto ret = ssl->method->open_app_data(ssl, out, out_consumed, out_alert, in); if (ret == ssl_open_record_error) { ssl_set_read_error(ssl); } return ret; } static uint8_t hex_char_consttime(uint8_t b) { declassify_assert(b < 16); return constant_time_select_8(constant_time_lt_8(b, 10), b + '0', b - 10 + 'a'); } static bool cbb_add_hex_consttime(CBB *cbb, Span in) { uint8_t *out; if (!CBB_add_space(cbb, &out, in.size() * 2)) { return false; } for (uint8_t b : in) { *(out++) = hex_char_consttime(b >> 4); *(out++) = hex_char_consttime(b & 0xf); } return true; } bool ssl_log_secret(const SSL *ssl, const char *label, Span secret) { if (ssl->ctx->keylog_callback == NULL) { return true; } ScopedCBB cbb; Array line; auto label_bytes = bssl::StringAsBytes(label); if (!CBB_init(cbb.get(), strlen(label) + 1 + SSL3_RANDOM_SIZE * 2 + 1 + secret.size() * 2 + 1) || !CBB_add_bytes(cbb.get(), label_bytes.data(), label_bytes.size()) || !CBB_add_u8(cbb.get(), ' ') || !cbb_add_hex_consttime(cbb.get(), ssl->s3->client_random) || !CBB_add_u8(cbb.get(), ' ') || // Convert to hex in constant time to avoid leaking |secret|. If the // callback discards the data, we should not introduce side channels. !cbb_add_hex_consttime(cbb.get(), secret) || !CBB_add_u8(cbb.get(), 0 /* NUL */) || !CBBFinishArray(cbb.get(), &line)) { return false; } ssl->ctx->keylog_callback(ssl, reinterpret_cast(line.data())); return true; } void ssl_do_info_callback(const SSL *ssl, int type, int value) { void (*cb)(const SSL *ssl, int type, int value) = NULL; if (ssl->info_callback != NULL) { cb = ssl->info_callback; } else if (ssl->ctx->info_callback != NULL) { cb = ssl->ctx->info_callback; } if (cb != NULL) { cb(ssl, type, value); } } void ssl_do_msg_callback(const SSL *ssl, int is_write, int content_type, Span in) { if (ssl->msg_callback == NULL) { return; } // |version| is zero when calling for |SSL3_RT_HEADER| and |SSL2_VERSION| for // a V2ClientHello. int version; switch (content_type) { case 0: // V2ClientHello version = SSL2_VERSION; break; case SSL3_RT_HEADER: version = 0; break; default: version = SSL_version(ssl); } ssl->msg_callback(is_write, version, content_type, in.data(), in.size(), const_cast(ssl), ssl->msg_callback_arg); } OPENSSL_timeval ssl_ctx_get_current_time(const SSL_CTX *ctx) { if (ctx->current_time_cb != NULL) { // TODO(davidben): Update current_time_cb to use OPENSSL_timeval. See // https://crbug.com/boringssl/155. struct timeval clock; ctx->current_time_cb(nullptr /* ssl */, &clock); if (clock.tv_sec < 0) { assert(0); return {0, 0}; } else { return {static_cast(clock.tv_sec), static_cast(clock.tv_usec)}; } } #if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) return {1234, 1234}; #elif defined(OPENSSL_WINDOWS) struct _timeb time; _ftime(&time); if (time.time < 0) { assert(0); return {0, 0}; } else { return {static_cast(time.time), static_cast(time.millitm * 1000)}; } #else struct timeval clock; gettimeofday(&clock, NULL); if (clock.tv_sec < 0) { assert(0); return {0, 0}; } else { return {static_cast(clock.tv_sec), static_cast(clock.tv_usec)}; } #endif } void SSL_CTX_set_handoff_mode(SSL_CTX *ctx, bool on) { ctx->handoff = on; } static bool ssl_can_renegotiate(const SSL *ssl) { if (ssl->server || SSL_is_dtls(ssl)) { return false; } if (ssl->s3->version != 0 // && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return false; } // The config has already been shed. if (!ssl->config) { return false; } switch (ssl->renegotiate_mode) { case ssl_renegotiate_ignore: case ssl_renegotiate_never: return false; case ssl_renegotiate_freely: case ssl_renegotiate_explicit: return true; case ssl_renegotiate_once: return ssl->s3->total_renegotiations == 0; } assert(0); return false; } static void ssl_maybe_shed_handshake_config(SSL *ssl) { if (ssl->s3->hs != nullptr || // ssl->config == nullptr || // !ssl->config->shed_handshake_config || // ssl_can_renegotiate(ssl)) { return; } ssl->config.reset(); } void SSL_set_handoff_mode(SSL *ssl, bool on) { if (!ssl->config) { return; } ssl->config->handoff = on; } bool SSL_get_traffic_secrets(const SSL *ssl, Span *out_read_traffic_secret, Span *out_write_traffic_secret) { // This API is not well-defined for DTLS 1.3 (see https://crbug.com/42290608) // or QUIC, where multiple epochs may be alive at once. if (SSL_is_dtls(ssl) || SSL_is_quic(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return false; } if (!ssl->s3->initial_handshake_complete) { OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_NOT_COMPLETE); return false; } if (SSL_version(ssl) < TLS1_3_VERSION) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); return false; } *out_read_traffic_secret = ssl->s3->read_traffic_secret; *out_write_traffic_secret = ssl->s3->write_traffic_secret; return true; } void SSL_CTX_set_aes_hw_override_for_testing(SSL_CTX *ctx, bool override_value) { ctx->aes_hw_override = true; ctx->aes_hw_override_value = override_value; } void SSL_set_aes_hw_override_for_testing(SSL *ssl, bool override_value) { ssl->config->aes_hw_override = true; ssl->config->aes_hw_override_value = override_value; } BSSL_NAMESPACE_END using namespace bssl; int SSL_library_init(void) { return 1; } int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) { return 1; } static uint32_t ssl_session_hash(const SSL_SESSION *sess) { return ssl_hash_session_id(sess->session_id); } static int ssl_session_cmp(const SSL_SESSION *a, const SSL_SESSION *b) { return Span(a->session_id) == b->session_id ? 0 : 1; } ssl_ctx_st::ssl_ctx_st(const SSL_METHOD *ssl_method) : RefCounted(CheckSubClass()), method(ssl_method->method), x509_method(ssl_method->x509_method), retain_only_sha256_of_client_certs(false), quiet_shutdown(false), ocsp_stapling_enabled(false), signed_cert_timestamps_enabled(false), channel_id_enabled(false), grease_enabled(false), permute_extensions(false), allow_unknown_alpn_protos(false), false_start_allowed_without_alpn(false), handoff(false), enable_early_data(false), aes_hw_override(false), aes_hw_override_value(false) { CRYPTO_MUTEX_init(&lock); CRYPTO_new_ex_data(&ex_data); } ssl_ctx_st::~ssl_ctx_st() { // Free the internal session cache. Note that this calls the caller-supplied // remove callback, so we must do it before clearing ex_data. (See ticket // [openssl.org #212].) SSL_CTX_flush_sessions(this, 0); CRYPTO_free_ex_data(&g_ex_data_class_ssl_ctx, this, &ex_data); CRYPTO_MUTEX_cleanup(&lock); lh_SSL_SESSION_free(sessions); x509_method->ssl_ctx_free(this); } SSL_CTX *SSL_CTX_new(const SSL_METHOD *method) { if (method == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_NULL_SSL_METHOD_PASSED); return nullptr; } UniquePtr ret = MakeUnique(method); if (!ret) { return nullptr; } ret->cert = MakeUnique(method->x509_method); ret->sessions = lh_SSL_SESSION_new(ssl_session_hash, ssl_session_cmp); ret->client_CA.reset(sk_CRYPTO_BUFFER_new_null()); ret->CA_names.reset(sk_CRYPTO_BUFFER_new_null()); if (ret->cert == nullptr || // !ret->cert->is_valid() || // ret->sessions == nullptr || // ret->client_CA == nullptr || // ret->CA_names == nullptr || // !ret->x509_method->ssl_ctx_new(ret.get())) { return nullptr; } if (!SSL_CTX_set_strict_cipher_list(ret.get(), SSL_DEFAULT_CIPHER_LIST) || // Lock the SSL_CTX to the specified version, for compatibility with // legacy uses of SSL_METHOD. !SSL_CTX_set_max_proto_version(ret.get(), method->version) || !SSL_CTX_set_min_proto_version(ret.get(), method->version)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return nullptr; } return ret.release(); } int SSL_CTX_up_ref(SSL_CTX *ctx) { ctx->UpRefInternal(); return 1; } void SSL_CTX_free(SSL_CTX *ctx) { if (ctx != nullptr) { ctx->DecRefInternal(); } } ssl_st::ssl_st(SSL_CTX *ctx_arg) : method(ctx_arg->method), max_send_fragment(ctx_arg->max_send_fragment), msg_callback(ctx_arg->msg_callback), msg_callback_arg(ctx_arg->msg_callback_arg), ctx(UpRef(ctx_arg)), session_ctx(UpRef(ctx_arg)), options(ctx->options), mode(ctx->mode), max_cert_list(ctx->max_cert_list), server(false), quiet_shutdown(ctx->quiet_shutdown), enable_early_data(ctx->enable_early_data) { CRYPTO_new_ex_data(&ex_data); } ssl_st::~ssl_st() { CRYPTO_free_ex_data(&g_ex_data_class_ssl, this, &ex_data); // |config| refers to |this|, so we must release it earlier. config.reset(); if (method != NULL) { method->ssl_free(this); } } SSL *SSL_new(SSL_CTX *ctx) { if (ctx == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_NULL_SSL_CTX); return nullptr; } UniquePtr ssl = MakeUnique(ctx); if (ssl == nullptr) { return nullptr; } ssl->config = MakeUnique(ssl.get()); if (ssl->config == nullptr) { return nullptr; } ssl->config->conf_min_version = ctx->conf_min_version; ssl->config->conf_max_version = ctx->conf_max_version; ssl->config->cert = ssl_cert_dup(ctx->cert.get()); if (ssl->config->cert == nullptr) { return nullptr; } ssl->config->verify_mode = ctx->verify_mode; ssl->config->verify_callback = ctx->default_verify_callback; ssl->config->custom_verify_callback = ctx->custom_verify_callback; ssl->config->retain_only_sha256_of_client_certs = ctx->retain_only_sha256_of_client_certs; ssl->config->permute_extensions = ctx->permute_extensions; ssl->config->aes_hw_override = ctx->aes_hw_override; ssl->config->aes_hw_override_value = ctx->aes_hw_override_value; ssl->config->compliance_policy = ctx->compliance_policy; if (!ssl->config->supported_group_list.CopyFrom(ctx->supported_group_list) || !ssl->config->alpn_client_proto_list.CopyFrom( ctx->alpn_client_proto_list) || !ssl->config->verify_sigalgs.CopyFrom(ctx->verify_sigalgs)) { return nullptr; } if (ctx->psk_identity_hint) { ssl->config->psk_identity_hint.reset( OPENSSL_strdup(ctx->psk_identity_hint.get())); if (ssl->config->psk_identity_hint == nullptr) { return nullptr; } } ssl->config->psk_client_callback = ctx->psk_client_callback; ssl->config->psk_server_callback = ctx->psk_server_callback; ssl->config->channel_id_enabled = ctx->channel_id_enabled; ssl->config->channel_id_private = UpRef(ctx->channel_id_private); ssl->config->signed_cert_timestamps_enabled = ctx->signed_cert_timestamps_enabled; ssl->config->ocsp_stapling_enabled = ctx->ocsp_stapling_enabled; ssl->config->handoff = ctx->handoff; ssl->quic_method = ctx->quic_method; if (!ssl->method->ssl_new(ssl.get()) || !ssl->ctx->x509_method->ssl_new(ssl->s3->hs.get())) { return nullptr; } return ssl.release(); } SSL_CONFIG::SSL_CONFIG(SSL *ssl_arg) : ssl(ssl_arg), ech_grease_enabled(false), signed_cert_timestamps_enabled(false), ocsp_stapling_enabled(false), channel_id_enabled(false), enforce_rsa_key_usage(true), retain_only_sha256_of_client_certs(false), handoff(false), shed_handshake_config(false), jdk11_workaround(false), quic_use_legacy_codepoint(false), permute_extensions(false), alps_use_new_codepoint(false), check_client_certificate_type(true), check_ecdsa_curve(true) { assert(ssl); } SSL_CONFIG::~SSL_CONFIG() { if (ssl->ctx != nullptr) { ssl->ctx->x509_method->ssl_config_free(this); } } void SSL_free(SSL *ssl) { Delete(ssl); } void SSL_set_connect_state(SSL *ssl) { ssl->server = false; ssl->do_handshake = ssl_client_handshake; } void SSL_set_accept_state(SSL *ssl) { ssl->server = true; ssl->do_handshake = ssl_server_handshake; } void SSL_set0_rbio(SSL *ssl, BIO *rbio) { ssl->rbio.reset(rbio); } void SSL_set0_wbio(SSL *ssl, BIO *wbio) { ssl->wbio.reset(wbio); } void SSL_set_bio(SSL *ssl, BIO *rbio, BIO *wbio) { // For historical reasons, this function has many different cases in ownership // handling. // If nothing has changed, do nothing if (rbio == SSL_get_rbio(ssl) && wbio == SSL_get_wbio(ssl)) { return; } // If the two arguments are equal, one fewer reference is granted than // taken. if (rbio != NULL && rbio == wbio) { BIO_up_ref(rbio); } // If only the wbio is changed, adopt only one reference. if (rbio == SSL_get_rbio(ssl)) { SSL_set0_wbio(ssl, wbio); return; } // There is an asymmetry here for historical reasons. If only the rbio is // changed AND the rbio and wbio were originally different, then we only adopt // one reference. if (wbio == SSL_get_wbio(ssl) && SSL_get_rbio(ssl) != SSL_get_wbio(ssl)) { SSL_set0_rbio(ssl, rbio); return; } // Otherwise, adopt both references. SSL_set0_rbio(ssl, rbio); SSL_set0_wbio(ssl, wbio); } BIO *SSL_get_rbio(const SSL *ssl) { return ssl->rbio.get(); } BIO *SSL_get_wbio(const SSL *ssl) { return ssl->wbio.get(); } size_t SSL_quic_max_handshake_flight_len(const SSL *ssl, enum ssl_encryption_level_t level) { // Limits flights to 16K by default when there are no large // (certificate-carrying) messages. static const size_t kDefaultLimit = 16384; switch (level) { case ssl_encryption_initial: return kDefaultLimit; case ssl_encryption_early_data: // QUIC does not send EndOfEarlyData. return 0; case ssl_encryption_handshake: if (ssl->server) { // Servers may receive Certificate message if configured to request // client certificates. if (!!(ssl->config->verify_mode & SSL_VERIFY_PEER) && ssl->max_cert_list > kDefaultLimit) { return ssl->max_cert_list; } } else { // Clients may receive both Certificate message and a CertificateRequest // message. if (2 * ssl->max_cert_list > kDefaultLimit) { return 2 * ssl->max_cert_list; } } return kDefaultLimit; case ssl_encryption_application: // Note there is not actually a bound on the number of NewSessionTickets // one may send in a row. This level may need more involved flow // control. See https://github.com/quicwg/base-drafts/issues/1834. return kDefaultLimit; } return 0; } enum ssl_encryption_level_t SSL_quic_read_level(const SSL *ssl) { assert(SSL_is_quic(ssl)); return ssl->s3->quic_read_level; } enum ssl_encryption_level_t SSL_quic_write_level(const SSL *ssl) { assert(SSL_is_quic(ssl)); return ssl->s3->quic_write_level; } int SSL_provide_quic_data(SSL *ssl, enum ssl_encryption_level_t level, const uint8_t *data, size_t len) { if (!SSL_is_quic(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (level != ssl->s3->quic_read_level) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_ENCRYPTION_LEVEL_RECEIVED); return 0; } size_t new_len = (ssl->s3->hs_buf ? ssl->s3->hs_buf->length : 0) + len; if (new_len < len || new_len > SSL_quic_max_handshake_flight_len(ssl, level)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); return 0; } return tls_append_handshake_data(ssl, Span(data, len)); } int SSL_do_handshake(SSL *ssl) { ssl_reset_error_state(ssl); if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_TYPE_NOT_SET); return -1; } if (!SSL_in_init(ssl)) { return 1; } // Run the handshake. SSL_HANDSHAKE *hs = ssl->s3->hs.get(); bool early_return = false; int ret = ssl_run_handshake(hs, &early_return); ssl_do_info_callback( ssl, ssl->server ? SSL_CB_ACCEPT_EXIT : SSL_CB_CONNECT_EXIT, ret); if (ret <= 0) { return ret; } // Destroy the handshake object if the handshake has completely finished. if (!early_return) { ssl->s3->hs.reset(); ssl_maybe_shed_handshake_config(ssl); } return 1; } int SSL_connect(SSL *ssl) { if (ssl->do_handshake == NULL) { // Not properly initialized yet SSL_set_connect_state(ssl); } return SSL_do_handshake(ssl); } int SSL_accept(SSL *ssl) { if (ssl->do_handshake == NULL) { // Not properly initialized yet SSL_set_accept_state(ssl); } return SSL_do_handshake(ssl); } static int ssl_do_post_handshake(SSL *ssl, const SSLMessage &msg) { if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return tls13_post_handshake(ssl, msg); } // Check for renegotiation on the server before parsing to use the correct // error. Renegotiation is triggered by a different message for servers. if (ssl->server) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_NO_RENEGOTIATION); return 0; } if (msg.type != SSL3_MT_HELLO_REQUEST || CBS_len(&msg.body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HELLO_REQUEST); return 0; } if (ssl->renegotiate_mode == ssl_renegotiate_ignore) { return 1; // Ignore the HelloRequest. } ssl->s3->renegotiate_pending = true; if (ssl->renegotiate_mode == ssl_renegotiate_explicit) { return 1; // Handle it later. } if (!SSL_renegotiate(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_NO_RENEGOTIATION); return 0; } return 1; } int SSL_process_quic_post_handshake(SSL *ssl) { ssl_reset_error_state(ssl); if (!SSL_is_quic(ssl) || SSL_in_init(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Replay post-handshake message errors. if (!check_read_error(ssl)) { return 0; } // Process any buffered post-handshake messages. SSLMessage msg; while (ssl->method->get_message(ssl, &msg)) { // Handle the post-handshake message and try again. if (!ssl_do_post_handshake(ssl, msg)) { ssl_set_read_error(ssl); return 0; } ssl->method->next_message(ssl); } return 1; } static int ssl_read_impl(SSL *ssl) { ssl_reset_error_state(ssl); if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } // Replay post-handshake message errors. if (!check_read_error(ssl)) { return -1; } while (ssl->s3->pending_app_data.empty()) { if (ssl->s3->renegotiate_pending) { ssl->s3->rwstate = SSL_ERROR_WANT_RENEGOTIATE; return -1; } // If a read triggered a DTLS ACK or retransmit, resolve that before reading // more. if (SSL_is_dtls(ssl)) { int ret = ssl->method->flush(ssl); if (ret <= 0) { return ret; } } // Complete the current handshake, if any. False Start will cause // |SSL_do_handshake| to return mid-handshake, so this may require multiple // iterations. while (!ssl_can_read(ssl)) { int ret = SSL_do_handshake(ssl); if (ret < 0) { return ret; } if (ret == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_HANDSHAKE_FAILURE); return -1; } } // Process any buffered post-handshake messages. SSLMessage msg; if (ssl->method->get_message(ssl, &msg)) { // If we received an interrupt in early read (EndOfEarlyData), loop again // for the handshake to process it. if (SSL_in_init(ssl)) { ssl->s3->hs->can_early_read = false; continue; } // Handle the post-handshake message and try again. if (!ssl_do_post_handshake(ssl, msg)) { ssl_set_read_error(ssl); return -1; } ssl->method->next_message(ssl); continue; // Loop again. We may have begun a new handshake. } uint8_t alert = SSL_AD_DECODE_ERROR; size_t consumed = 0; auto ret = ssl_open_app_data(ssl, &ssl->s3->pending_app_data, &consumed, &alert, ssl->s3->read_buffer.span()); bool retry; int bio_ret = ssl_handle_open_record(ssl, &retry, ret, consumed, alert); if (bio_ret <= 0) { return bio_ret; } if (!retry) { assert(!ssl->s3->pending_app_data.empty()); ssl->s3->key_update_count = 0; } } return 1; } int SSL_read(SSL *ssl, void *buf, int num) { int ret = SSL_peek(ssl, buf, num); if (ret <= 0) { return ret; } // TODO(davidben): In DTLS, should the rest of the record be discarded? DTLS // is not a stream. See https://crbug.com/boringssl/65. ssl->s3->pending_app_data = ssl->s3->pending_app_data.subspan(static_cast(ret)); if (ssl->s3->pending_app_data.empty()) { ssl->s3->read_buffer.DiscardConsumed(); } return ret; } int SSL_peek(SSL *ssl, void *buf, int num) { if (SSL_is_quic(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } int ret = ssl_read_impl(ssl); if (ret <= 0) { return ret; } if (num <= 0) { return num; } size_t todo = std::min(ssl->s3->pending_app_data.size(), static_cast(num)); OPENSSL_memcpy(buf, ssl->s3->pending_app_data.data(), todo); return static_cast(todo); } int SSL_write(SSL *ssl, const void *buf, int num) { ssl_reset_error_state(ssl); if (SSL_is_quic(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } int ret = 0; size_t bytes_written = 0; bool needs_handshake = false; do { // If necessary, complete the handshake implicitly. if (!ssl_can_write(ssl)) { ret = SSL_do_handshake(ssl); if (ret < 0) { return ret; } if (ret == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_HANDSHAKE_FAILURE); return -1; } } if (num < 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_LENGTH); return -1; } ret = ssl->method->write_app_data( ssl, &needs_handshake, &bytes_written, Span(static_cast(buf), static_cast(num))); } while (needs_handshake); return ret <= 0 ? ret : static_cast(bytes_written); } int SSL_key_update(SSL *ssl, int request_type) { ssl_reset_error_state(ssl); if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return 0; } if (SSL_is_quic(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (!ssl->s3->initial_handshake_complete) { OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_NOT_COMPLETE); return 0; } if (ssl_protocol_version(ssl) < TLS1_3_VERSION) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); return 0; } return tls13_add_key_update(ssl, request_type); } int SSL_shutdown(SSL *ssl) { ssl_reset_error_state(ssl); if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } // If we are in the middle of a handshake, silently succeed. Consumers often // call this function before |SSL_free|, whether the handshake succeeded or // not. We assume the caller has already handled failed handshakes. if (SSL_in_init(ssl)) { return 1; } if (ssl->quiet_shutdown) { // Do nothing if configured not to send a close_notify. ssl->s3->write_shutdown = ssl_shutdown_close_notify; ssl->s3->read_shutdown = ssl_shutdown_close_notify; return 1; } // This function completes in two stages. It sends a close_notify and then it // waits for a close_notify to come in. Perform exactly one action and return // whether or not it succeeds. if (ssl->s3->write_shutdown != ssl_shutdown_close_notify) { // Send a close_notify. if (ssl_send_alert_impl(ssl, SSL3_AL_WARNING, SSL_AD_CLOSE_NOTIFY) <= 0) { return -1; } } else if (ssl->s3->alert_dispatch) { // Finish sending the close_notify. if (ssl->method->dispatch_alert(ssl) <= 0) { return -1; } } else if (ssl->s3->read_shutdown != ssl_shutdown_close_notify) { if (SSL_is_dtls(ssl)) { // Bidirectional shutdown doesn't make sense for an unordered // transport. DTLS alerts also aren't delivered reliably, so we may even // time out because the peer never received our close_notify. Report to // the caller that the channel has fully shut down. if (ssl->s3->read_shutdown == ssl_shutdown_error) { ERR_restore_state(ssl->s3->read_error.get()); return -1; } ssl->s3->read_shutdown = ssl_shutdown_close_notify; } else { // Process records until an error, close_notify, or application data. if (ssl_read_impl(ssl) > 0) { // We received some unexpected application data. OPENSSL_PUT_ERROR(SSL, SSL_R_APPLICATION_DATA_ON_SHUTDOWN); return -1; } if (ssl->s3->read_shutdown != ssl_shutdown_close_notify) { return -1; } } } // Return 0 for unidirectional shutdown and 1 for bidirectional shutdown. return ssl->s3->read_shutdown == ssl_shutdown_close_notify; } int SSL_send_fatal_alert(SSL *ssl, uint8_t alert) { if (ssl->s3->alert_dispatch) { if (ssl->s3->send_alert[0] != SSL3_AL_FATAL || ssl->s3->send_alert[1] != alert) { // We are already attempting to write a different alert. OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } return ssl->method->dispatch_alert(ssl); } return ssl_send_alert_impl(ssl, SSL3_AL_FATAL, alert); } int SSL_set_quic_transport_params(SSL *ssl, const uint8_t *params, size_t params_len) { return ssl->config && ssl->config->quic_transport_params.CopyFrom(Span(params, params_len)); } void SSL_get_peer_quic_transport_params(const SSL *ssl, const uint8_t **out_params, size_t *out_params_len) { *out_params = ssl->s3->peer_quic_transport_params.data(); *out_params_len = ssl->s3->peer_quic_transport_params.size(); } int SSL_set_quic_early_data_context(SSL *ssl, const uint8_t *context, size_t context_len) { return ssl->config && ssl->config->quic_early_data_context.CopyFrom( Span(context, context_len)); } void SSL_CTX_set_early_data_enabled(SSL_CTX *ctx, int enabled) { ctx->enable_early_data = !!enabled; } void SSL_set_early_data_enabled(SSL *ssl, int enabled) { ssl->enable_early_data = !!enabled; } int SSL_in_early_data(const SSL *ssl) { if (ssl->s3->hs == NULL) { return 0; } return ssl->s3->hs->in_early_data; } int SSL_early_data_accepted(const SSL *ssl) { return ssl->s3->early_data_accepted; } void SSL_reset_early_data_reject(SSL *ssl) { SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (hs == NULL || // hs->wait != ssl_hs_early_data_rejected) { abort(); } hs->wait = ssl_hs_ok; hs->in_early_data = false; hs->early_session.reset(); // Discard any unfinished writes from the perspective of |SSL_write|'s // retry. The handshake will transparently flush out the pending record // (discarded by the server) to keep the framing correct. ssl->s3->pending_write = {}; } enum ssl_early_data_reason_t SSL_get_early_data_reason(const SSL *ssl) { return ssl->s3->early_data_reason; } const char *SSL_early_data_reason_string(enum ssl_early_data_reason_t reason) { switch (reason) { case ssl_early_data_unknown: return "unknown"; case ssl_early_data_disabled: return "disabled"; case ssl_early_data_accepted: return "accepted"; case ssl_early_data_protocol_version: return "protocol_version"; case ssl_early_data_peer_declined: return "peer_declined"; case ssl_early_data_no_session_offered: return "no_session_offered"; case ssl_early_data_session_not_resumed: return "session_not_resumed"; case ssl_early_data_unsupported_for_session: return "unsupported_for_session"; case ssl_early_data_hello_retry_request: return "hello_retry_request"; case ssl_early_data_alpn_mismatch: return "alpn_mismatch"; case ssl_early_data_channel_id: return "channel_id"; case ssl_early_data_ticket_age_skew: return "ticket_age_skew"; case ssl_early_data_quic_parameter_mismatch: return "quic_parameter_mismatch"; case ssl_early_data_alps_mismatch: return "alps_mismatch"; } return nullptr; } static int bio_retry_reason_to_error(int reason) { switch (reason) { case BIO_RR_CONNECT: return SSL_ERROR_WANT_CONNECT; case BIO_RR_ACCEPT: return SSL_ERROR_WANT_ACCEPT; default: return SSL_ERROR_SYSCALL; } } int SSL_get_error(const SSL *ssl, int ret_code) { if (ret_code > 0) { return SSL_ERROR_NONE; } // Make things return SSL_ERROR_SYSCALL when doing SSL_do_handshake etc, // where we do encode the error uint32_t err = ERR_peek_error(); if (err != 0) { if (ERR_GET_LIB(err) == ERR_LIB_SYS) { return SSL_ERROR_SYSCALL; } return SSL_ERROR_SSL; } if (ret_code == 0) { if (ssl->s3->rwstate == SSL_ERROR_ZERO_RETURN) { return SSL_ERROR_ZERO_RETURN; } // An EOF was observed which violates the protocol, and the underlying // transport does not participate in the error queue. Bubble up to the // caller. return SSL_ERROR_SYSCALL; } switch (ssl->s3->rwstate) { case SSL_ERROR_PENDING_SESSION: case SSL_ERROR_PENDING_CERTIFICATE: case SSL_ERROR_HANDOFF: case SSL_ERROR_HANDBACK: case SSL_ERROR_WANT_X509_LOOKUP: case SSL_ERROR_WANT_PRIVATE_KEY_OPERATION: case SSL_ERROR_PENDING_TICKET: case SSL_ERROR_EARLY_DATA_REJECTED: case SSL_ERROR_WANT_CERTIFICATE_VERIFY: case SSL_ERROR_WANT_RENEGOTIATE: case SSL_ERROR_HANDSHAKE_HINTS_READY: return ssl->s3->rwstate; case SSL_ERROR_WANT_READ: { if (SSL_is_quic(ssl)) { return SSL_ERROR_WANT_READ; } BIO *bio = SSL_get_rbio(ssl); if (BIO_should_read(bio)) { return SSL_ERROR_WANT_READ; } if (BIO_should_write(bio)) { // TODO(davidben): OpenSSL historically checked for writes on the read // BIO. Can this be removed? return SSL_ERROR_WANT_WRITE; } if (BIO_should_io_special(bio)) { return bio_retry_reason_to_error(BIO_get_retry_reason(bio)); } break; } case SSL_ERROR_WANT_WRITE: { BIO *bio = SSL_get_wbio(ssl); if (BIO_should_write(bio)) { return SSL_ERROR_WANT_WRITE; } if (BIO_should_read(bio)) { // TODO(davidben): OpenSSL historically checked for reads on the write // BIO. Can this be removed? return SSL_ERROR_WANT_READ; } if (BIO_should_io_special(bio)) { return bio_retry_reason_to_error(BIO_get_retry_reason(bio)); } break; } } return SSL_ERROR_SYSCALL; } const char *SSL_error_description(int err) { switch (err) { case SSL_ERROR_NONE: return "NONE"; case SSL_ERROR_SSL: return "SSL"; case SSL_ERROR_WANT_READ: return "WANT_READ"; case SSL_ERROR_WANT_WRITE: return "WANT_WRITE"; case SSL_ERROR_WANT_X509_LOOKUP: return "WANT_X509_LOOKUP"; case SSL_ERROR_SYSCALL: return "SYSCALL"; case SSL_ERROR_ZERO_RETURN: return "ZERO_RETURN"; case SSL_ERROR_WANT_CONNECT: return "WANT_CONNECT"; case SSL_ERROR_WANT_ACCEPT: return "WANT_ACCEPT"; case SSL_ERROR_PENDING_SESSION: return "PENDING_SESSION"; case SSL_ERROR_PENDING_CERTIFICATE: return "PENDING_CERTIFICATE"; case SSL_ERROR_WANT_PRIVATE_KEY_OPERATION: return "WANT_PRIVATE_KEY_OPERATION"; case SSL_ERROR_PENDING_TICKET: return "PENDING_TICKET"; case SSL_ERROR_EARLY_DATA_REJECTED: return "EARLY_DATA_REJECTED"; case SSL_ERROR_WANT_CERTIFICATE_VERIFY: return "WANT_CERTIFICATE_VERIFY"; case SSL_ERROR_HANDOFF: return "HANDOFF"; case SSL_ERROR_HANDBACK: return "HANDBACK"; case SSL_ERROR_WANT_RENEGOTIATE: return "WANT_RENEGOTIATE"; case SSL_ERROR_HANDSHAKE_HINTS_READY: return "HANDSHAKE_HINTS_READY"; default: return nullptr; } } uint32_t SSL_CTX_set_options(SSL_CTX *ctx, uint32_t options) { ctx->options |= options; return ctx->options; } uint32_t SSL_CTX_clear_options(SSL_CTX *ctx, uint32_t options) { ctx->options &= ~options; return ctx->options; } uint32_t SSL_CTX_get_options(const SSL_CTX *ctx) { return ctx->options; } uint32_t SSL_set_options(SSL *ssl, uint32_t options) { ssl->options |= options; return ssl->options; } uint32_t SSL_clear_options(SSL *ssl, uint32_t options) { ssl->options &= ~options; return ssl->options; } uint32_t SSL_get_options(const SSL *ssl) { return ssl->options; } uint32_t SSL_CTX_set_mode(SSL_CTX *ctx, uint32_t mode) { ctx->mode |= mode; return ctx->mode; } uint32_t SSL_CTX_clear_mode(SSL_CTX *ctx, uint32_t mode) { ctx->mode &= ~mode; return ctx->mode; } uint32_t SSL_CTX_get_mode(const SSL_CTX *ctx) { return ctx->mode; } uint32_t SSL_set_mode(SSL *ssl, uint32_t mode) { ssl->mode |= mode; return ssl->mode; } uint32_t SSL_clear_mode(SSL *ssl, uint32_t mode) { ssl->mode &= ~mode; return ssl->mode; } uint32_t SSL_get_mode(const SSL *ssl) { return ssl->mode; } void SSL_CTX_set0_buffer_pool(SSL_CTX *ctx, CRYPTO_BUFFER_POOL *pool) { ctx->pool = pool; } int SSL_get_tls_unique(const SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out) { *out_len = 0; OPENSSL_memset(out, 0, max_out); // tls-unique is not defined for TLS 1.3. if (!ssl->s3->initial_handshake_complete || ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 0; } // The tls-unique value is the first Finished message in the handshake, which // is the client's in a full handshake and the server's for a resumption. See // https://tools.ietf.org/html/rfc5929#section-3.1. Span finished = ssl->s3->previous_client_finished; if (ssl->session != NULL) { // tls-unique is broken for resumed sessions unless EMS is used. if (!ssl->session->extended_master_secret) { return 0; } finished = ssl->s3->previous_server_finished; } *out_len = finished.size(); if (finished.size() > max_out) { *out_len = max_out; } OPENSSL_memcpy(out, finished.data(), *out_len); return 1; } static int set_session_id_context(CERT *cert, const uint8_t *sid_ctx, size_t sid_ctx_len) { if (!cert->sid_ctx.TryCopyFrom(Span(sid_ctx, sid_ctx_len))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); return 0; } return 1; } int SSL_CTX_set_session_id_context(SSL_CTX *ctx, const uint8_t *sid_ctx, size_t sid_ctx_len) { return set_session_id_context(ctx->cert.get(), sid_ctx, sid_ctx_len); } int SSL_set_session_id_context(SSL *ssl, const uint8_t *sid_ctx, size_t sid_ctx_len) { if (!ssl->config) { return 0; } return set_session_id_context(ssl->config->cert.get(), sid_ctx, sid_ctx_len); } const uint8_t *SSL_get0_session_id_context(const SSL *ssl, size_t *out_len) { if (!ssl->config) { assert(ssl->config); *out_len = 0; return NULL; } *out_len = ssl->config->cert->sid_ctx.size(); return ssl->config->cert->sid_ctx.data(); } int SSL_get_fd(const SSL *ssl) { return SSL_get_rfd(ssl); } int SSL_get_rfd(const SSL *ssl) { int ret = -1; BIO *b = BIO_find_type(SSL_get_rbio(ssl), BIO_TYPE_DESCRIPTOR); if (b != NULL) { BIO_get_fd(b, &ret); } return ret; } int SSL_get_wfd(const SSL *ssl) { int ret = -1; BIO *b = BIO_find_type(SSL_get_wbio(ssl), BIO_TYPE_DESCRIPTOR); if (b != NULL) { BIO_get_fd(b, &ret); } return ret; } #if !defined(OPENSSL_NO_SOCK) int SSL_set_fd(SSL *ssl, int fd) { BIO *bio = BIO_new(BIO_s_socket()); if (bio == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); return 0; } BIO_set_fd(bio, fd, BIO_NOCLOSE); SSL_set_bio(ssl, bio, bio); return 1; } int SSL_set_wfd(SSL *ssl, int fd) { BIO *rbio = SSL_get_rbio(ssl); if (rbio == NULL || BIO_method_type(rbio) != BIO_TYPE_SOCKET || BIO_get_fd(rbio, NULL) != fd) { BIO *bio = BIO_new(BIO_s_socket()); if (bio == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); return 0; } BIO_set_fd(bio, fd, BIO_NOCLOSE); SSL_set0_wbio(ssl, bio); } else { // Copy the rbio over to the wbio. BIO_up_ref(rbio); SSL_set0_wbio(ssl, rbio); } return 1; } int SSL_set_rfd(SSL *ssl, int fd) { BIO *wbio = SSL_get_wbio(ssl); if (wbio == NULL || BIO_method_type(wbio) != BIO_TYPE_SOCKET || BIO_get_fd(wbio, NULL) != fd) { BIO *bio = BIO_new(BIO_s_socket()); if (bio == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB); return 0; } BIO_set_fd(bio, fd, BIO_NOCLOSE); SSL_set0_rbio(ssl, bio); } else { // Copy the wbio over to the rbio. BIO_up_ref(wbio); SSL_set0_rbio(ssl, wbio); } return 1; } #endif // !OPENSSL_NO_SOCK static size_t copy_finished(void *out, size_t out_len, Span in) { if (out_len > in.size()) { out_len = in.size(); } OPENSSL_memcpy(out, in.data(), out_len); return in.size(); } size_t SSL_get_finished(const SSL *ssl, void *buf, size_t count) { if (!ssl->s3->initial_handshake_complete || ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 0; } if (ssl->server) { return copy_finished(buf, count, ssl->s3->previous_server_finished); } return copy_finished(buf, count, ssl->s3->previous_client_finished); } size_t SSL_get_peer_finished(const SSL *ssl, void *buf, size_t count) { if (!ssl->s3->initial_handshake_complete || ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 0; } if (ssl->server) { return copy_finished(buf, count, ssl->s3->previous_client_finished); } return copy_finished(buf, count, ssl->s3->previous_server_finished); } int SSL_get_verify_mode(const SSL *ssl) { if (!ssl->config) { assert(ssl->config); return -1; } return ssl->config->verify_mode; } int SSL_get_extms_support(const SSL *ssl) { // TLS 1.3 does not require extended master secret and always reports as // supporting it. if (ssl->s3->version == 0) { return 0; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 1; } // If the initial handshake completed, query the established session. if (ssl->s3->established_session != NULL) { return ssl->s3->established_session->extended_master_secret; } // Otherwise, query the in-progress handshake. if (ssl->s3->hs != NULL) { return ssl->s3->hs->extended_master_secret; } assert(0); return 0; } int SSL_CTX_get_read_ahead(const SSL_CTX *ctx) { return 0; } int SSL_get_read_ahead(const SSL *ssl) { return 0; } int SSL_CTX_set_read_ahead(SSL_CTX *ctx, int yes) { return 1; } int SSL_set_read_ahead(SSL *ssl, int yes) { return 1; } int SSL_pending(const SSL *ssl) { return static_cast(ssl->s3->pending_app_data.size()); } int SSL_has_pending(const SSL *ssl) { return SSL_pending(ssl) != 0 || !ssl->s3->read_buffer.empty(); } static bool has_cert_and_key(const SSL_CREDENTIAL *cred) { // TODO(davidben): If |cred->key_method| is set, that should be fine too. if (cred->privkey == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_PRIVATE_KEY_ASSIGNED); return false; } if (cred->chain == nullptr || sk_CRYPTO_BUFFER_value(cred->chain.get(), 0) == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_ASSIGNED); return false; } return true; } int SSL_CTX_check_private_key(const SSL_CTX *ctx) { // There is no need to actually check consistency because inconsistent values // can never be configured. return has_cert_and_key(ctx->cert->legacy_credential.get()); } int SSL_check_private_key(const SSL *ssl) { if (!ssl->config) { return 0; } // There is no need to actually check consistency because inconsistent values // can never be configured. return has_cert_and_key(ssl->config->cert->legacy_credential.get()); } long SSL_get_default_timeout(const SSL *ssl) { return SSL_DEFAULT_SESSION_TIMEOUT; } int SSL_renegotiate(SSL *ssl) { // Caller-initiated renegotiation is not supported. if (!ssl->s3->renegotiate_pending) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } if (!ssl_can_renegotiate(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); return 0; } // We should not have told the caller to release the private key. assert(!SSL_can_release_private_key(ssl)); // Renegotiation is only supported at quiescent points in the application // protocol, namely in HTTPS, just before reading the HTTP response. // Require the record-layer be idle and avoid complexities of sending a // handshake record while an application_data record is being written. if (!ssl->s3->write_buffer.empty() || ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); return 0; } // Begin a new handshake. if (ssl->s3->hs != nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } ssl->s3->hs = ssl_handshake_new(ssl); if (ssl->s3->hs == nullptr) { return 0; } ssl->s3->renegotiate_pending = false; ssl->s3->total_renegotiations++; return 1; } int SSL_renegotiate_pending(SSL *ssl) { return SSL_in_init(ssl) && ssl->s3->initial_handshake_complete; } int SSL_total_renegotiations(const SSL *ssl) { return ssl->s3->total_renegotiations; } size_t SSL_CTX_get_max_cert_list(const SSL_CTX *ctx) { return ctx->max_cert_list; } void SSL_CTX_set_max_cert_list(SSL_CTX *ctx, size_t max_cert_list) { if (max_cert_list > kMaxHandshakeSize) { max_cert_list = kMaxHandshakeSize; } ctx->max_cert_list = (uint32_t)max_cert_list; } size_t SSL_get_max_cert_list(const SSL *ssl) { return ssl->max_cert_list; } void SSL_set_max_cert_list(SSL *ssl, size_t max_cert_list) { if (max_cert_list > kMaxHandshakeSize) { max_cert_list = kMaxHandshakeSize; } ssl->max_cert_list = (uint32_t)max_cert_list; } int SSL_CTX_set_max_send_fragment(SSL_CTX *ctx, size_t max_send_fragment) { if (max_send_fragment < 512) { max_send_fragment = 512; } if (max_send_fragment > SSL3_RT_MAX_PLAIN_LENGTH) { max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH; } ctx->max_send_fragment = (uint16_t)max_send_fragment; return 1; } int SSL_set_max_send_fragment(SSL *ssl, size_t max_send_fragment) { if (max_send_fragment < 512) { max_send_fragment = 512; } if (max_send_fragment > SSL3_RT_MAX_PLAIN_LENGTH) { max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH; } ssl->max_send_fragment = (uint16_t)max_send_fragment; return 1; } int SSL_set_mtu(SSL *ssl, unsigned mtu) { if (!SSL_is_dtls(ssl) || mtu < dtls1_min_mtu()) { return 0; } ssl->d1->mtu = mtu; return 1; } int SSL_get_secure_renegotiation_support(const SSL *ssl) { if (ssl->s3->version == 0) { return 0; } return ssl_protocol_version(ssl) >= TLS1_3_VERSION || ssl->s3->send_connection_binding; } size_t SSL_CTX_sess_number(const SSL_CTX *ctx) { MutexReadLock lock(const_cast(&ctx->lock)); return lh_SSL_SESSION_num_items(ctx->sessions); } unsigned long SSL_CTX_sess_set_cache_size(SSL_CTX *ctx, unsigned long size) { unsigned long ret = ctx->session_cache_size; ctx->session_cache_size = size; return ret; } unsigned long SSL_CTX_sess_get_cache_size(const SSL_CTX *ctx) { return ctx->session_cache_size; } int SSL_CTX_set_session_cache_mode(SSL_CTX *ctx, int mode) { int ret = ctx->session_cache_mode; ctx->session_cache_mode = mode; return ret; } int SSL_CTX_get_session_cache_mode(const SSL_CTX *ctx) { return ctx->session_cache_mode; } int SSL_CTX_get_tlsext_ticket_keys(SSL_CTX *ctx, void *out, size_t len) { if (out == NULL) { return 48; } if (len != 48) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH); return 0; } // The default ticket keys are initialized lazily. Trigger a key // rotation to initialize them. if (!ssl_ctx_rotate_ticket_encryption_key(ctx)) { return 0; } uint8_t *out_bytes = reinterpret_cast(out); MutexReadLock lock(&ctx->lock); OPENSSL_memcpy(out_bytes, ctx->ticket_key_current->name, 16); OPENSSL_memcpy(out_bytes + 16, ctx->ticket_key_current->hmac_key, 16); OPENSSL_memcpy(out_bytes + 32, ctx->ticket_key_current->aes_key, 16); return 1; } int SSL_CTX_set_tlsext_ticket_keys(SSL_CTX *ctx, const void *in, size_t len) { if (in == NULL) { return 48; } if (len != 48) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH); return 0; } auto key = MakeUnique(); if (!key) { return 0; } const uint8_t *in_bytes = reinterpret_cast(in); OPENSSL_memcpy(key->name, in_bytes, 16); OPENSSL_memcpy(key->hmac_key, in_bytes + 16, 16); OPENSSL_memcpy(key->aes_key, in_bytes + 32, 16); // Disable automatic key rotation for manually-configured keys. This is now // the caller's responsibility. key->next_rotation_tv_sec = 0; ctx->ticket_key_current = std::move(key); ctx->ticket_key_prev.reset(); return 1; } int SSL_CTX_set_tlsext_ticket_key_cb( SSL_CTX *ctx, int (*callback)(SSL *ssl, uint8_t *key_name, uint8_t *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hmac_ctx, int encrypt)) { ctx->ticket_key_cb = callback; return 1; } static bool check_group_ids(Span group_ids) { for (uint16_t group_id : group_ids) { if (ssl_group_id_to_nid(group_id) == NID_undef) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ELLIPTIC_CURVE); return false; } } return true; } int SSL_CTX_set1_group_ids(SSL_CTX *ctx, const uint16_t *group_ids, size_t num_group_ids) { auto span = Span(group_ids, num_group_ids); return check_group_ids(span) && ctx->supported_group_list.CopyFrom(span); } int SSL_set1_group_ids(SSL *ssl, const uint16_t *group_ids, size_t num_group_ids) { if (!ssl->config) { return 0; } auto span = Span(group_ids, num_group_ids); return check_group_ids(span) && ssl->config->supported_group_list.CopyFrom(span); } static bool ssl_nids_to_group_ids(Array *out_group_ids, Span nids) { Array group_ids; if (!group_ids.InitForOverwrite(nids.size())) { return false; } for (size_t i = 0; i < nids.size(); i++) { if (!ssl_nid_to_group_id(&group_ids[i], nids[i])) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ELLIPTIC_CURVE); return false; } } *out_group_ids = std::move(group_ids); return true; } int SSL_CTX_set1_groups(SSL_CTX *ctx, const int *groups, size_t num_groups) { return ssl_nids_to_group_ids(&ctx->supported_group_list, Span(groups, num_groups)); } int SSL_set1_groups(SSL *ssl, const int *groups, size_t num_groups) { if (!ssl->config) { return 0; } return ssl_nids_to_group_ids(&ssl->config->supported_group_list, Span(groups, num_groups)); } static bool ssl_str_to_group_ids(Array *out_group_ids, const char *str) { // Count the number of groups in the list. size_t count = 0; const char *ptr = str, *col; do { col = strchr(ptr, ':'); count++; if (col) { ptr = col + 1; } } while (col); Array group_ids; if (!group_ids.InitForOverwrite(count)) { return false; } size_t i = 0; ptr = str; do { col = strchr(ptr, ':'); if (!ssl_name_to_group_id(&group_ids[i++], ptr, col ? (size_t)(col - ptr) : strlen(ptr))) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ELLIPTIC_CURVE); return false; } if (col) { ptr = col + 1; } } while (col); assert(i == count); *out_group_ids = std::move(group_ids); return true; } int SSL_CTX_set1_groups_list(SSL_CTX *ctx, const char *groups) { return ssl_str_to_group_ids(&ctx->supported_group_list, groups); } int SSL_set1_groups_list(SSL *ssl, const char *groups) { if (!ssl->config) { return 0; } return ssl_str_to_group_ids(&ssl->config->supported_group_list, groups); } uint16_t SSL_get_group_id(const SSL *ssl) { SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return 0; } return session->group_id; } int SSL_get_negotiated_group(const SSL *ssl) { uint16_t group_id = SSL_get_group_id(ssl); if (group_id == 0) { return NID_undef; } return ssl_group_id_to_nid(group_id); } int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh) { return 1; } int SSL_set_tmp_dh(SSL *ssl, const DH *dh) { return 1; } STACK_OF(SSL_CIPHER) *SSL_CTX_get_ciphers(const SSL_CTX *ctx) { return ctx->cipher_list->ciphers.get(); } int SSL_CTX_cipher_in_group(const SSL_CTX *ctx, size_t i) { if (i >= sk_SSL_CIPHER_num(ctx->cipher_list->ciphers.get())) { return 0; } return ctx->cipher_list->in_group_flags[i]; } STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl) { if (ssl == NULL) { return NULL; } if (ssl->config == NULL) { assert(ssl->config); return NULL; } return ssl->config->cipher_list ? ssl->config->cipher_list->ciphers.get() : ssl->ctx->cipher_list->ciphers.get(); } const char *SSL_get_cipher_list(const SSL *ssl, int n) { if (ssl == NULL) { return NULL; } STACK_OF(SSL_CIPHER) *sk = SSL_get_ciphers(ssl); if (sk == NULL || n < 0 || (size_t)n >= sk_SSL_CIPHER_num(sk)) { return NULL; } const SSL_CIPHER *c = sk_SSL_CIPHER_value(sk, n); if (c == NULL) { return NULL; } return c->name; } int SSL_CTX_set_cipher_list(SSL_CTX *ctx, const char *str) { const bool has_aes_hw = ctx->aes_hw_override ? ctx->aes_hw_override_value : EVP_has_aes_hardware(); return ssl_create_cipher_list(&ctx->cipher_list, has_aes_hw, str, false /* not strict */); } int SSL_CTX_set_strict_cipher_list(SSL_CTX *ctx, const char *str) { const bool has_aes_hw = ctx->aes_hw_override ? ctx->aes_hw_override_value : EVP_has_aes_hardware(); return ssl_create_cipher_list(&ctx->cipher_list, has_aes_hw, str, true /* strict */); } int SSL_set_cipher_list(SSL *ssl, const char *str) { if (!ssl->config) { return 0; } const bool has_aes_hw = ssl->config->aes_hw_override ? ssl->config->aes_hw_override_value : EVP_has_aes_hardware(); return ssl_create_cipher_list(&ssl->config->cipher_list, has_aes_hw, str, false /* not strict */); } int SSL_set_strict_cipher_list(SSL *ssl, const char *str) { if (!ssl->config) { return 0; } const bool has_aes_hw = ssl->config->aes_hw_override ? ssl->config->aes_hw_override_value : EVP_has_aes_hardware(); return ssl_create_cipher_list(&ssl->config->cipher_list, has_aes_hw, str, true /* strict */); } const char *SSL_get_servername(const SSL *ssl, const int type) { if (type != TLSEXT_NAMETYPE_host_name) { return NULL; } // Historically, |SSL_get_servername| was also the configuration getter // corresponding to |SSL_set_tlsext_host_name|. if (ssl->hostname != nullptr) { return ssl->hostname.get(); } return ssl->s3->hostname.get(); } int SSL_get_servername_type(const SSL *ssl) { if (SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name) == NULL) { return -1; } return TLSEXT_NAMETYPE_host_name; } void SSL_CTX_set_custom_verify( SSL_CTX *ctx, int mode, enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)) { ctx->verify_mode = mode; ctx->custom_verify_callback = callback; } void SSL_set_custom_verify( SSL *ssl, int mode, enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)) { if (!ssl->config) { return; } ssl->config->verify_mode = mode; ssl->config->custom_verify_callback = callback; } void SSL_CTX_enable_signed_cert_timestamps(SSL_CTX *ctx) { ctx->signed_cert_timestamps_enabled = true; } void SSL_enable_signed_cert_timestamps(SSL *ssl) { if (!ssl->config) { return; } ssl->config->signed_cert_timestamps_enabled = true; } void SSL_CTX_enable_ocsp_stapling(SSL_CTX *ctx) { ctx->ocsp_stapling_enabled = true; } void SSL_enable_ocsp_stapling(SSL *ssl) { if (!ssl->config) { return; } ssl->config->ocsp_stapling_enabled = true; } void SSL_get0_signed_cert_timestamp_list(const SSL *ssl, const uint8_t **out, size_t *out_len) { SSL_SESSION *session = SSL_get_session(ssl); if (ssl->server || !session || !session->signed_cert_timestamp_list) { *out_len = 0; *out = NULL; return; } *out = CRYPTO_BUFFER_data(session->signed_cert_timestamp_list.get()); *out_len = CRYPTO_BUFFER_len(session->signed_cert_timestamp_list.get()); } void SSL_get0_ocsp_response(const SSL *ssl, const uint8_t **out, size_t *out_len) { SSL_SESSION *session = SSL_get_session(ssl); if (ssl->server || !session || !session->ocsp_response) { *out_len = 0; *out = NULL; return; } *out = CRYPTO_BUFFER_data(session->ocsp_response.get()); *out_len = CRYPTO_BUFFER_len(session->ocsp_response.get()); } int SSL_set_tlsext_host_name(SSL *ssl, const char *name) { ssl->hostname.reset(); if (name == nullptr) { return 1; } size_t len = strlen(name); if (len == 0 || len > TLSEXT_MAXLEN_host_name) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL3_EXT_INVALID_SERVERNAME); return 0; } ssl->hostname.reset(OPENSSL_strdup(name)); if (ssl->hostname == nullptr) { return 0; } return 1; } int SSL_CTX_set_tlsext_servername_callback( SSL_CTX *ctx, int (*callback)(SSL *ssl, int *out_alert, void *arg)) { ctx->servername_callback = callback; return 1; } int SSL_CTX_set_tlsext_servername_arg(SSL_CTX *ctx, void *arg) { ctx->servername_arg = arg; return 1; } int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, const uint8_t *peer, unsigned peer_len, const uint8_t *supported, unsigned supported_len) { *out = nullptr; *out_len = 0; // Both |peer| and |supported| must be valid protocol lists, but |peer| may be // empty in NPN. auto peer_span = Span(peer, peer_len); auto supported_span = Span(supported, supported_len); if ((!peer_span.empty() && !ssl_is_valid_alpn_list(peer_span)) || !ssl_is_valid_alpn_list(supported_span)) { return OPENSSL_NPN_NO_OVERLAP; } // For each protocol in peer preference order, see if we support it. CBS cbs = peer_span, proto; while (CBS_len(&cbs) != 0) { if (!CBS_get_u8_length_prefixed(&cbs, &proto) || CBS_len(&proto) == 0) { return OPENSSL_NPN_NO_OVERLAP; } if (ssl_alpn_list_contains_protocol(Span(supported, supported_len), proto)) { // This function is not const-correct for compatibility with existing // callers. *out = const_cast(CBS_data(&proto)); // A u8 length prefix will fit in |uint8_t|. *out_len = static_cast(CBS_len(&proto)); return OPENSSL_NPN_NEGOTIATED; } } // There's no overlap between our protocols and the peer's list. In ALPN, the // caller is expected to fail the connection with no_application_protocol. In // NPN, the caller is expected to opportunistically select the first protocol. // See draft-agl-tls-nextprotoneg-04, section 6. cbs = supported_span; if (!CBS_get_u8_length_prefixed(&cbs, &proto) || CBS_len(&proto) == 0) { return OPENSSL_NPN_NO_OVERLAP; } // See above. *out = const_cast(CBS_data(&proto)); *out_len = static_cast(CBS_len(&proto)); return OPENSSL_NPN_NO_OVERLAP; } void SSL_get0_next_proto_negotiated(const SSL *ssl, const uint8_t **out_data, unsigned *out_len) { // NPN protocols have one-byte lengths, so they must fit in |unsigned|. assert(ssl->s3->next_proto_negotiated.size() <= UINT_MAX); *out_data = ssl->s3->next_proto_negotiated.data(); *out_len = static_cast(ssl->s3->next_proto_negotiated.size()); } void SSL_CTX_set_next_protos_advertised_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, unsigned *out_len, void *arg), void *arg) { ctx->next_protos_advertised_cb = cb; ctx->next_protos_advertised_cb_arg = arg; } void SSL_CTX_set_next_proto_select_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg) { ctx->next_proto_select_cb = cb; ctx->next_proto_select_cb_arg = arg; } int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const uint8_t *protos, size_t protos_len) { // Note this function's return value is backwards. auto span = Span(protos, protos_len); if (!span.empty() && !ssl_is_valid_alpn_list(span)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL_LIST); return 1; } return ctx->alpn_client_proto_list.CopyFrom(span) ? 0 : 1; } int SSL_set_alpn_protos(SSL *ssl, const uint8_t *protos, size_t protos_len) { // Note this function's return value is backwards. if (!ssl->config) { return 1; } auto span = Span(protos, protos_len); if (!span.empty() && !ssl_is_valid_alpn_list(span)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL_LIST); return 1; } return ssl->config->alpn_client_proto_list.CopyFrom(span) ? 0 : 1; } void SSL_CTX_set_alpn_select_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg) { ctx->alpn_select_cb = cb; ctx->alpn_select_cb_arg = arg; } void SSL_get0_alpn_selected(const SSL *ssl, const uint8_t **out_data, unsigned *out_len) { Span protocol; if (SSL_in_early_data(ssl) && !ssl->server) { protocol = ssl->s3->hs->early_session->early_alpn; } else { protocol = ssl->s3->alpn_selected; } // ALPN protocols have one-byte lengths, so they must fit in |unsigned|. assert(protocol.size() < UINT_MAX); *out_data = protocol.data(); *out_len = static_cast(protocol.size()); } void SSL_CTX_set_allow_unknown_alpn_protos(SSL_CTX *ctx, int enabled) { ctx->allow_unknown_alpn_protos = !!enabled; } int SSL_add_application_settings(SSL *ssl, const uint8_t *proto, size_t proto_len, const uint8_t *settings, size_t settings_len) { if (!ssl->config) { return 0; } ALPSConfig config; if (!config.protocol.CopyFrom(Span(proto, proto_len)) || !config.settings.CopyFrom(Span(settings, settings_len)) || !ssl->config->alps_configs.Push(std::move(config))) { return 0; } return 1; } void SSL_get0_peer_application_settings(const SSL *ssl, const uint8_t **out_data, size_t *out_len) { const SSL_SESSION *session = SSL_get_session(ssl); Span settings = session ? session->peer_application_settings : Span(); *out_data = settings.data(); *out_len = settings.size(); } int SSL_has_application_settings(const SSL *ssl) { const SSL_SESSION *session = SSL_get_session(ssl); return session && session->has_application_settings; } void SSL_set_alps_use_new_codepoint(SSL *ssl, int use_new) { if (!ssl->config) { return; } ssl->config->alps_use_new_codepoint = !!use_new; } int SSL_CTX_add_cert_compression_alg(SSL_CTX *ctx, uint16_t alg_id, ssl_cert_compression_func_t compress, ssl_cert_decompression_func_t decompress) { assert(compress != nullptr || decompress != nullptr); for (const auto &alg : ctx->cert_compression_algs) { if (alg.alg_id == alg_id) { return 0; } } CertCompressionAlg alg; alg.alg_id = alg_id; alg.compress = compress; alg.decompress = decompress; return ctx->cert_compression_algs.Push(alg); } void SSL_CTX_set_tls_channel_id_enabled(SSL_CTX *ctx, int enabled) { ctx->channel_id_enabled = !!enabled; } int SSL_CTX_enable_tls_channel_id(SSL_CTX *ctx) { SSL_CTX_set_tls_channel_id_enabled(ctx, 1); return 1; } void SSL_set_tls_channel_id_enabled(SSL *ssl, int enabled) { if (!ssl->config) { return; } ssl->config->channel_id_enabled = !!enabled; } int SSL_enable_tls_channel_id(SSL *ssl) { SSL_set_tls_channel_id_enabled(ssl, 1); return 1; } static int is_p256_key(EVP_PKEY *private_key) { const EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(private_key); return ec_key != NULL && EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)) == NID_X9_62_prime256v1; } int SSL_CTX_set1_tls_channel_id(SSL_CTX *ctx, EVP_PKEY *private_key) { if (!is_p256_key(private_key)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_NOT_P256); return 0; } ctx->channel_id_private = UpRef(private_key); return 1; } int SSL_set1_tls_channel_id(SSL *ssl, EVP_PKEY *private_key) { if (!ssl->config) { return 0; } if (!is_p256_key(private_key)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_NOT_P256); return 0; } ssl->config->channel_id_private = UpRef(private_key); return 1; } size_t SSL_get_tls_channel_id(SSL *ssl, uint8_t *out, size_t max_out) { if (!ssl->s3->channel_id_valid) { return 0; } OPENSSL_memcpy(out, ssl->s3->channel_id, (max_out < 64) ? max_out : 64); return 64; } size_t SSL_get0_certificate_types(const SSL *ssl, const uint8_t **out_types) { Span types; if (!ssl->server && ssl->s3->hs != nullptr) { types = ssl->s3->hs->certificate_types; } *out_types = types.data(); return types.size(); } size_t SSL_get0_peer_verify_algorithms(const SSL *ssl, const uint16_t **out_sigalgs) { Span sigalgs; if (ssl->s3->hs != nullptr) { sigalgs = ssl->s3->hs->peer_sigalgs; } *out_sigalgs = sigalgs.data(); return sigalgs.size(); } size_t SSL_get0_peer_delegation_algorithms(const SSL *ssl, const uint16_t **out_sigalgs) { Span sigalgs; if (ssl->s3->hs != nullptr) { sigalgs = ssl->s3->hs->peer_delegated_credential_sigalgs; } *out_sigalgs = sigalgs.data(); return sigalgs.size(); } EVP_PKEY *SSL_get_privatekey(const SSL *ssl) { if (!ssl->config) { assert(ssl->config); return nullptr; } return ssl->config->cert->legacy_credential->privkey.get(); } EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx) { return ctx->cert->legacy_credential->privkey.get(); } const SSL_CIPHER *SSL_get_current_cipher(const SSL *ssl) { const SSL_SESSION *session = SSL_get_session(ssl); return session == nullptr ? nullptr : session->cipher; } int SSL_session_reused(const SSL *ssl) { return ssl->s3->session_reused || SSL_in_early_data(ssl); } const COMP_METHOD *SSL_get_current_compression(SSL *ssl) { return NULL; } const COMP_METHOD *SSL_get_current_expansion(SSL *ssl) { return NULL; } int SSL_get_server_tmp_key(SSL *ssl, EVP_PKEY **out_key) { return 0; } void SSL_CTX_set_quiet_shutdown(SSL_CTX *ctx, int mode) { ctx->quiet_shutdown = (mode != 0); } int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx) { return ctx->quiet_shutdown; } void SSL_set_quiet_shutdown(SSL *ssl, int mode) { ssl->quiet_shutdown = (mode != 0); } int SSL_get_quiet_shutdown(const SSL *ssl) { return ssl->quiet_shutdown; } void SSL_set_shutdown(SSL *ssl, int mode) { // It is an error to clear any bits that have already been set. (We can't try // to get a second close_notify or send two.) assert((SSL_get_shutdown(ssl) & mode) == SSL_get_shutdown(ssl)); if (mode & SSL_RECEIVED_SHUTDOWN && ssl->s3->read_shutdown == ssl_shutdown_none) { ssl->s3->read_shutdown = ssl_shutdown_close_notify; } if (mode & SSL_SENT_SHUTDOWN && ssl->s3->write_shutdown == ssl_shutdown_none) { ssl->s3->write_shutdown = ssl_shutdown_close_notify; } } int SSL_get_shutdown(const SSL *ssl) { int ret = 0; if (ssl->s3->read_shutdown != ssl_shutdown_none) { // Historically, OpenSSL set |SSL_RECEIVED_SHUTDOWN| on both close_notify // and fatal alert. ret |= SSL_RECEIVED_SHUTDOWN; } if (ssl->s3->write_shutdown == ssl_shutdown_close_notify) { // Historically, OpenSSL set |SSL_SENT_SHUTDOWN| on only close_notify. ret |= SSL_SENT_SHUTDOWN; } return ret; } SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl) { return ssl->ctx.get(); } SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx) { if (!ssl->config) { return NULL; } if (ssl->ctx.get() == ctx) { return ssl->ctx.get(); } // One cannot change the X.509 callbacks during a connection. if (ssl->ctx->x509_method != ctx->x509_method) { assert(0); return NULL; } UniquePtr new_cert = ssl_cert_dup(ctx->cert.get()); if (!new_cert) { return nullptr; } ssl->config->cert = std::move(new_cert); ssl->ctx = UpRef(ctx); ssl->enable_early_data = ssl->ctx->enable_early_data; return ssl->ctx.get(); } void SSL_set_info_callback(SSL *ssl, void (*cb)(const SSL *ssl, int type, int value)) { ssl->info_callback = cb; } void (*SSL_get_info_callback(const SSL *ssl))(const SSL *ssl, int type, int value) { return ssl->info_callback; } int SSL_state(const SSL *ssl) { return SSL_in_init(ssl) ? SSL_ST_INIT : SSL_ST_OK; } void SSL_set_state(SSL *ssl, int state) {} char *SSL_get_shared_ciphers(const SSL *ssl, char *buf, int len) { if (len <= 0) { return NULL; } buf[0] = '\0'; return buf; } int SSL_get_shared_sigalgs(SSL *ssl, int idx, int *psign, int *phash, int *psignandhash, uint8_t *rsig, uint8_t *rhash) { return 0; } int SSL_CTX_set_quic_method(SSL_CTX *ctx, const SSL_QUIC_METHOD *quic_method) { if (ctx->method->is_dtls) { return 0; } ctx->quic_method = quic_method; return 1; } int SSL_set_quic_method(SSL *ssl, const SSL_QUIC_METHOD *quic_method) { if (ssl->method->is_dtls) { return 0; } ssl->quic_method = quic_method; return 1; } int SSL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class_ssl, argl, argp, free_func); } int SSL_set_ex_data(SSL *ssl, int idx, void *data) { return CRYPTO_set_ex_data(&ssl->ex_data, idx, data); } void *SSL_get_ex_data(const SSL *ssl, int idx) { return CRYPTO_get_ex_data(&ssl->ex_data, idx); } int SSL_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class_ssl_ctx, argl, argp, free_func); } int SSL_CTX_set_ex_data(SSL_CTX *ctx, int idx, void *data) { return CRYPTO_set_ex_data(&ctx->ex_data, idx, data); } void *SSL_CTX_get_ex_data(const SSL_CTX *ctx, int idx) { return CRYPTO_get_ex_data(&ctx->ex_data, idx); } int SSL_want(const SSL *ssl) { // Historically, OpenSSL did not track |SSL_ERROR_ZERO_RETURN| as an |rwstate| // value. We do, but map it back to |SSL_ERROR_NONE| to preserve the original // behavior. return ssl->s3->rwstate == SSL_ERROR_ZERO_RETURN ? SSL_ERROR_NONE : ssl->s3->rwstate; } void SSL_CTX_set_tmp_rsa_callback(SSL_CTX *ctx, RSA *(*cb)(SSL *ssl, int is_export, int keylength)) {} void SSL_set_tmp_rsa_callback(SSL *ssl, RSA *(*cb)(SSL *ssl, int is_export, int keylength)) {} void SSL_CTX_set_tmp_dh_callback(SSL_CTX *ctx, DH *(*cb)(SSL *ssl, int is_export, int keylength)) {} void SSL_set_tmp_dh_callback(SSL *ssl, DH *(*cb)(SSL *ssl, int is_export, int keylength)) {} static int use_psk_identity_hint(UniquePtr *out, const char *identity_hint) { if (identity_hint != NULL && strlen(identity_hint) > PSK_MAX_IDENTITY_LEN) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); return 0; } // Clear currently configured hint, if any. out->reset(); // Treat the empty hint as not supplying one. Plain PSK makes it possible to // send either no hint (omit ServerKeyExchange) or an empty hint, while // ECDHE_PSK can only spell empty hint. Having different capabilities is odd, // so we interpret empty and missing as identical. if (identity_hint != NULL && identity_hint[0] != '\0') { out->reset(OPENSSL_strdup(identity_hint)); if (*out == nullptr) { return 0; } } return 1; } int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint) { return use_psk_identity_hint(&ctx->psk_identity_hint, identity_hint); } int SSL_use_psk_identity_hint(SSL *ssl, const char *identity_hint) { if (!ssl->config) { return 0; } return use_psk_identity_hint(&ssl->config->psk_identity_hint, identity_hint); } const char *SSL_get_psk_identity_hint(const SSL *ssl) { if (ssl == NULL) { return NULL; } if (ssl->config == NULL) { assert(ssl->config); return NULL; } return ssl->config->psk_identity_hint.get(); } const char *SSL_get_psk_identity(const SSL *ssl) { if (ssl == NULL) { return NULL; } SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return NULL; } return session->psk_identity.get(); } void SSL_set_psk_client_callback( SSL *ssl, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len)) { if (!ssl->config) { return; } ssl->config->psk_client_callback = cb; } void SSL_CTX_set_psk_client_callback( SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len)) { ctx->psk_client_callback = cb; } void SSL_set_psk_server_callback(SSL *ssl, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len)) { if (!ssl->config) { return; } ssl->config->psk_server_callback = cb; } void SSL_CTX_set_psk_server_callback( SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, unsigned max_psk_len)) { ctx->psk_server_callback = cb; } void SSL_CTX_set_msg_callback(SSL_CTX *ctx, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)) { ctx->msg_callback = cb; } void SSL_CTX_set_msg_callback_arg(SSL_CTX *ctx, void *arg) { ctx->msg_callback_arg = arg; } void SSL_set_msg_callback(SSL *ssl, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)) { ssl->msg_callback = cb; } void SSL_set_msg_callback_arg(SSL *ssl, void *arg) { ssl->msg_callback_arg = arg; } void SSL_CTX_set_keylog_callback(SSL_CTX *ctx, void (*cb)(const SSL *ssl, const char *line)) { ctx->keylog_callback = cb; } void (*SSL_CTX_get_keylog_callback(const SSL_CTX *ctx))(const SSL *ssl, const char *line) { return ctx->keylog_callback; } void SSL_CTX_set_current_time_cb(SSL_CTX *ctx, void (*cb)(const SSL *ssl, struct timeval *out_clock)) { ctx->current_time_cb = cb; } int SSL_can_release_private_key(const SSL *ssl) { if (ssl_can_renegotiate(ssl)) { // If the connection can renegotiate (client only), the private key may be // used in a future handshake. return 0; } // Otherwise, this is determined by the current handshake. return !ssl->s3->hs || ssl->s3->hs->can_release_private_key; } int SSL_is_init_finished(const SSL *ssl) { return !SSL_in_init(ssl); } int SSL_in_init(const SSL *ssl) { // This returns false once all the handshake state has been finalized, to // allow callbacks and getters based on SSL_in_init to return the correct // values. SSL_HANDSHAKE *hs = ssl->s3->hs.get(); return hs != nullptr && !hs->handshake_finalized; } int SSL_in_false_start(const SSL *ssl) { if (ssl->s3->hs == NULL) { return 0; } return ssl->s3->hs->in_false_start; } int SSL_cutthrough_complete(const SSL *ssl) { return SSL_in_false_start(ssl); } int SSL_is_server(const SSL *ssl) { return ssl->server; } int SSL_is_dtls(const SSL *ssl) { return ssl->method->is_dtls; } int SSL_is_quic(const SSL *ssl) { return ssl->quic_method != nullptr; } void SSL_CTX_set_select_certificate_cb( SSL_CTX *ctx, enum ssl_select_cert_result_t (*cb)(const SSL_CLIENT_HELLO *)) { ctx->select_certificate_cb = cb; } void SSL_CTX_set_dos_protection_cb(SSL_CTX *ctx, int (*cb)(const SSL_CLIENT_HELLO *)) { ctx->dos_protection_cb = cb; } void SSL_CTX_set_reverify_on_resume(SSL_CTX *ctx, int enabled) { ctx->reverify_on_resume = !!enabled; } void SSL_set_enforce_rsa_key_usage(SSL *ssl, int enabled) { if (!ssl->config) { return; } ssl->config->enforce_rsa_key_usage = !!enabled; } int SSL_was_key_usage_invalid(const SSL *ssl) { return ssl->s3->was_key_usage_invalid; } void SSL_set_renegotiate_mode(SSL *ssl, enum ssl_renegotiate_mode_t mode) { ssl->renegotiate_mode = mode; // Check if |ssl_can_renegotiate| has changed and the configuration may now be // shed. HTTP clients may initially allow renegotiation for HTTP/1.1, and then // disable after the handshake once the ALPN protocol is known to be HTTP/2. ssl_maybe_shed_handshake_config(ssl); } int SSL_get_ivs(const SSL *ssl, const uint8_t **out_read_iv, const uint8_t **out_write_iv, size_t *out_iv_len) { // No cipher suites maintain stateful internal IVs in DTLS. It would not be // compatible with reordering. if (SSL_is_dtls(ssl)) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } size_t write_iv_len; if (!ssl->s3->aead_read_ctx->GetIV(out_read_iv, out_iv_len) || !ssl->s3->aead_write_ctx->GetIV(out_write_iv, &write_iv_len) || *out_iv_len != write_iv_len) { return 0; } return 1; } uint64_t SSL_get_read_sequence(const SSL *ssl) { if (SSL_is_dtls(ssl)) { // TODO(crbug.com/42290608): This API needs to reworked. // // In DTLS 1.2, right at an epoch transition, |read_epoch| may not have // received any records. We will then return that sequence 0 is the highest // received, but it's really -1, which is not representable. This is mostly // moot because, after the handshake, we will never be in the state. // // In DTLS 1.3, epochs do not transition until the first record comes in. // This avoids the DTLS 1.2 problem but introduces a different problem: // during a KeyUpdate (which may occur in the steady state), both epochs are // live. We'll likely need a new API for DTLS offload. const DTLSReadEpoch *read_epoch = &ssl->d1->read_epoch; return DTLSRecordNumber(read_epoch->epoch, read_epoch->bitmap.max_seq_num()) .combined(); } return ssl->s3->read_sequence; } uint64_t SSL_get_write_sequence(const SSL *ssl) { if (SSL_is_dtls(ssl)) { return ssl->d1->write_epoch.next_record.combined(); } return ssl->s3->write_sequence; } uint16_t SSL_get_peer_signature_algorithm(const SSL *ssl) { SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return 0; } return session->peer_signature_algorithm; } size_t SSL_get_client_random(const SSL *ssl, uint8_t *out, size_t max_out) { if (max_out == 0) { return sizeof(ssl->s3->client_random); } if (max_out > sizeof(ssl->s3->client_random)) { max_out = sizeof(ssl->s3->client_random); } OPENSSL_memcpy(out, ssl->s3->client_random, max_out); return max_out; } size_t SSL_get_server_random(const SSL *ssl, uint8_t *out, size_t max_out) { if (max_out == 0) { return sizeof(ssl->s3->server_random); } if (max_out > sizeof(ssl->s3->server_random)) { max_out = sizeof(ssl->s3->server_random); } OPENSSL_memcpy(out, ssl->s3->server_random, max_out); return max_out; } const SSL_CIPHER *SSL_get_pending_cipher(const SSL *ssl) { SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (hs == NULL) { return NULL; } return hs->new_cipher; } void SSL_set_retain_only_sha256_of_client_certs(SSL *ssl, int enabled) { if (!ssl->config) { return; } ssl->config->retain_only_sha256_of_client_certs = !!enabled; } void SSL_CTX_set_retain_only_sha256_of_client_certs(SSL_CTX *ctx, int enabled) { ctx->retain_only_sha256_of_client_certs = !!enabled; } void SSL_CTX_set_grease_enabled(SSL_CTX *ctx, int enabled) { ctx->grease_enabled = !!enabled; } void SSL_CTX_set_permute_extensions(SSL_CTX *ctx, int enabled) { ctx->permute_extensions = !!enabled; } void SSL_set_permute_extensions(SSL *ssl, int enabled) { if (!ssl->config) { return; } ssl->config->permute_extensions = !!enabled; } int32_t SSL_get_ticket_age_skew(const SSL *ssl) { return ssl->s3->ticket_age_skew; } void SSL_CTX_set_false_start_allowed_without_alpn(SSL_CTX *ctx, int allowed) { ctx->false_start_allowed_without_alpn = !!allowed; } int SSL_used_hello_retry_request(const SSL *ssl) { return ssl->s3->used_hello_retry_request; } void SSL_set_shed_handshake_config(SSL *ssl, int enable) { if (!ssl->config) { return; } ssl->config->shed_handshake_config = !!enable; } void SSL_set_jdk11_workaround(SSL *ssl, int enable) { if (!ssl->config) { return; } ssl->config->jdk11_workaround = !!enable; } void SSL_set_check_client_certificate_type(SSL *ssl, int enable) { if (!ssl->config) { return; } ssl->config->check_client_certificate_type = !!enable; } void SSL_set_check_ecdsa_curve(SSL *ssl, int enable) { if (!ssl->config) { return; } ssl->config->check_ecdsa_curve = !!enable; } void SSL_set_quic_use_legacy_codepoint(SSL *ssl, int use_legacy) { if (!ssl->config) { return; } ssl->config->quic_use_legacy_codepoint = !!use_legacy; } int SSL_clear(SSL *ssl) { if (!ssl->config) { return 0; // SSL_clear may not be used after shedding config. } // In OpenSSL, reusing a client |SSL| with |SSL_clear| causes the previously // established session to be offered the next time around. wpa_supplicant // depends on this behavior, so emulate it. UniquePtr session; if (!ssl->server && ssl->s3->established_session != NULL) { session = UpRef(ssl->s3->established_session); } // The ssl->d1->mtu is simultaneously configuration (preserved across // clear) and connection-specific state (gets reset). // // TODO(davidben): Avoid this. unsigned mtu = 0; if (ssl->d1 != NULL) { mtu = ssl->d1->mtu; } ssl->method->ssl_free(ssl); if (!ssl->method->ssl_new(ssl)) { return 0; } if (SSL_is_dtls(ssl) && (SSL_get_options(ssl) & SSL_OP_NO_QUERY_MTU)) { ssl->d1->mtu = mtu; } if (session != nullptr) { SSL_set_session(ssl, session.get()); } return 1; } int SSL_CTX_sess_connect(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_connect_good(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_connect_renegotiate(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_accept(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_accept_renegotiate(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_accept_good(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_hits(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_cb_hits(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_misses(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_timeouts(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_cache_full(const SSL_CTX *ctx) { return 0; } int SSL_num_renegotiations(const SSL *ssl) { return SSL_total_renegotiations(ssl); } int SSL_CTX_need_tmp_RSA(const SSL_CTX *ctx) { return 0; } int SSL_need_tmp_RSA(const SSL *ssl) { return 0; } int SSL_CTX_set_tmp_rsa(SSL_CTX *ctx, const RSA *rsa) { return 1; } int SSL_set_tmp_rsa(SSL *ssl, const RSA *rsa) { return 1; } void ERR_load_SSL_strings(void) {} void SSL_load_error_strings(void) {} int SSL_cache_hit(SSL *ssl) { return SSL_session_reused(ssl); } int SSL_CTX_set_tmp_ecdh(SSL_CTX *ctx, const EC_KEY *ec_key) { if (ec_key == NULL || EC_KEY_get0_group(ec_key) == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)); return SSL_CTX_set1_groups(ctx, &nid, 1); } int SSL_set_tmp_ecdh(SSL *ssl, const EC_KEY *ec_key) { if (ec_key == NULL || EC_KEY_get0_group(ec_key) == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)); return SSL_set1_groups(ssl, &nid, 1); } void SSL_CTX_set_ticket_aead_method(SSL_CTX *ctx, const SSL_TICKET_AEAD_METHOD *aead_method) { ctx->ticket_aead_method = aead_method; } SSL_SESSION *SSL_process_tls13_new_session_ticket(SSL *ssl, const uint8_t *buf, size_t buf_len) { if (SSL_in_init(ssl) || // ssl_protocol_version(ssl) != TLS1_3_VERSION || // ssl->server) { // Only TLS 1.3 clients are supported. OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return nullptr; } CBS cbs, body; CBS_init(&cbs, buf, buf_len); uint8_t type; if (!CBS_get_u8(&cbs, &type) || // !CBS_get_u24_length_prefixed(&cbs, &body) || // CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return nullptr; } UniquePtr session = tls13_create_session_with_ticket(ssl, &body); if (!session) { // |tls13_create_session_with_ticket| puts the correct error. return nullptr; } return session.release(); } int SSL_CTX_set_num_tickets(SSL_CTX *ctx, size_t num_tickets) { num_tickets = std::min(num_tickets, kMaxTickets); static_assert(kMaxTickets <= 0xff, "Too many tickets."); ctx->num_tickets = static_cast(num_tickets); return 1; } size_t SSL_CTX_get_num_tickets(const SSL_CTX *ctx) { return ctx->num_tickets; } int SSL_set_tlsext_status_type(SSL *ssl, int type) { if (!ssl->config) { return 0; } ssl->config->ocsp_stapling_enabled = type == TLSEXT_STATUSTYPE_ocsp; return 1; } int SSL_get_tlsext_status_type(const SSL *ssl) { if (ssl->server) { SSL_HANDSHAKE *hs = ssl->s3->hs.get(); return hs != nullptr && hs->ocsp_stapling_requested ? TLSEXT_STATUSTYPE_ocsp : TLSEXT_STATUSTYPE_nothing; } return ssl->config != nullptr && ssl->config->ocsp_stapling_enabled ? TLSEXT_STATUSTYPE_ocsp : TLSEXT_STATUSTYPE_nothing; } int SSL_set_tlsext_status_ocsp_resp(SSL *ssl, uint8_t *resp, size_t resp_len) { if (SSL_set_ocsp_response(ssl, resp, resp_len)) { OPENSSL_free(resp); return 1; } return 0; } size_t SSL_get_tlsext_status_ocsp_resp(const SSL *ssl, const uint8_t **out) { size_t ret; SSL_get0_ocsp_response(ssl, out, &ret); return ret; } int SSL_CTX_set_tlsext_status_cb(SSL_CTX *ctx, int (*callback)(SSL *ssl, void *arg)) { ctx->legacy_ocsp_callback = callback; return 1; } int SSL_CTX_set_tlsext_status_arg(SSL_CTX *ctx, void *arg) { ctx->legacy_ocsp_callback_arg = arg; return 1; } uint16_t SSL_get_curve_id(const SSL *ssl) { return SSL_get_group_id(ssl); } const char *SSL_get_curve_name(uint16_t curve_id) { return SSL_get_group_name(curve_id); } size_t SSL_get_all_curve_names(const char **out, size_t max_out) { return SSL_get_all_group_names(out, max_out); } int SSL_CTX_set1_curves(SSL_CTX *ctx, const int *curves, size_t num_curves) { return SSL_CTX_set1_groups(ctx, curves, num_curves); } int SSL_set1_curves(SSL *ssl, const int *curves, size_t num_curves) { return SSL_set1_groups(ssl, curves, num_curves); } int SSL_CTX_set1_curves_list(SSL_CTX *ctx, const char *curves) { return SSL_CTX_set1_groups_list(ctx, curves); } int SSL_set1_curves_list(SSL *ssl, const char *curves) { return SSL_set1_groups_list(ssl, curves); } namespace fips202205 { // (References are to SP 800-52r2): // Section 3.4.2.2 // "at least one of the NIST-approved curves, P-256 (secp256r1) and P384 // (secp384r1), shall be supported as described in RFC 8422." // // Section 3.3.1 // "The server shall be configured to only use cipher suites that are // composed entirely of NIST approved algorithms" static const uint16_t kGroups[] = {SSL_GROUP_SECP256R1, SSL_GROUP_SECP384R1}; static const uint16_t kSigAlgs[] = { SSL_SIGN_RSA_PKCS1_SHA256, SSL_SIGN_RSA_PKCS1_SHA384, SSL_SIGN_RSA_PKCS1_SHA512, // Table 4.1: // "The curve should be P-256 or P-384" SSL_SIGN_ECDSA_SECP256R1_SHA256, SSL_SIGN_ECDSA_SECP384R1_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA256, SSL_SIGN_RSA_PSS_RSAE_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA512, }; static const char kTLS12Ciphers[] = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:" "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:" "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:" "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"; static int Configure(SSL_CTX *ctx) { ctx->compliance_policy = ssl_compliance_policy_fips_202205; return // Section 3.1: // "Servers that support government-only applications shall be // configured to use TLS 1.2 and should be configured to use TLS 1.3 // as well. These servers should not be configured to use TLS 1.1 and // shall not use TLS 1.0, SSL 3.0, or SSL 2.0. SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION) && SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION) && // Sections 3.3.1.1.1 and 3.3.1.1.2 are ambiguous about whether // HMAC-SHA-1 cipher suites are permitted with TLS 1.2. However, later the // Encrypt-then-MAC extension is required for all CBC cipher suites and so // it's easier to drop them. SSL_CTX_set_strict_cipher_list(ctx, kTLS12Ciphers) && SSL_CTX_set1_group_ids(ctx, kGroups, OPENSSL_ARRAY_SIZE(kGroups)) && SSL_CTX_set_signing_algorithm_prefs(ctx, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)) && SSL_CTX_set_verify_algorithm_prefs(ctx, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)); } static int Configure(SSL *ssl) { ssl->config->compliance_policy = ssl_compliance_policy_fips_202205; // See |Configure(SSL_CTX)|, above, for reasoning. return SSL_set_min_proto_version(ssl, TLS1_2_VERSION) && SSL_set_max_proto_version(ssl, TLS1_3_VERSION) && SSL_set_strict_cipher_list(ssl, kTLS12Ciphers) && SSL_set1_group_ids(ssl, kGroups, OPENSSL_ARRAY_SIZE(kGroups)) && SSL_set_signing_algorithm_prefs(ssl, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)) && SSL_set_verify_algorithm_prefs(ssl, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)); } } // namespace fips202205 namespace wpa202304 { // See WPA version 3.1, section 3.5. static const uint16_t kGroups[] = {SSL_GROUP_SECP384R1}; static const uint16_t kSigAlgs[] = { SSL_SIGN_RSA_PKCS1_SHA384, // SSL_SIGN_RSA_PKCS1_SHA512, // SSL_SIGN_ECDSA_SECP384R1_SHA384, // SSL_SIGN_RSA_PSS_RSAE_SHA384, // SSL_SIGN_RSA_PSS_RSAE_SHA512, // }; static const char kTLS12Ciphers[] = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:" "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"; static int Configure(SSL_CTX *ctx) { ctx->compliance_policy = ssl_compliance_policy_wpa3_192_202304; return SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION) && SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION) && SSL_CTX_set_strict_cipher_list(ctx, kTLS12Ciphers) && SSL_CTX_set1_group_ids(ctx, kGroups, OPENSSL_ARRAY_SIZE(kGroups)) && SSL_CTX_set_signing_algorithm_prefs(ctx, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)) && SSL_CTX_set_verify_algorithm_prefs(ctx, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)); } static int Configure(SSL *ssl) { ssl->config->compliance_policy = ssl_compliance_policy_wpa3_192_202304; return SSL_set_min_proto_version(ssl, TLS1_2_VERSION) && SSL_set_max_proto_version(ssl, TLS1_3_VERSION) && SSL_set_strict_cipher_list(ssl, kTLS12Ciphers) && SSL_set1_group_ids(ssl, kGroups, OPENSSL_ARRAY_SIZE(kGroups)) && SSL_set_signing_algorithm_prefs(ssl, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)) && SSL_set_verify_algorithm_prefs(ssl, kSigAlgs, OPENSSL_ARRAY_SIZE(kSigAlgs)); } } // namespace wpa202304 namespace cnsa202407 { static int Configure(SSL_CTX *ctx) { ctx->compliance_policy = ssl_compliance_policy_cnsa_202407; return 1; } static int Configure(SSL *ssl) { ssl->config->compliance_policy = ssl_compliance_policy_cnsa_202407; return 1; } } // namespace cnsa202407 int SSL_CTX_set_compliance_policy(SSL_CTX *ctx, enum ssl_compliance_policy_t policy) { switch (policy) { case ssl_compliance_policy_fips_202205: return fips202205::Configure(ctx); case ssl_compliance_policy_wpa3_192_202304: return wpa202304::Configure(ctx); case ssl_compliance_policy_cnsa_202407: return cnsa202407::Configure(ctx); default: return 0; } } enum ssl_compliance_policy_t SSL_CTX_get_compliance_policy(const SSL_CTX *ctx) { return ctx->compliance_policy; } int SSL_set_compliance_policy(SSL *ssl, enum ssl_compliance_policy_t policy) { switch (policy) { case ssl_compliance_policy_fips_202205: return fips202205::Configure(ssl); case ssl_compliance_policy_wpa3_192_202304: return wpa202304::Configure(ssl); case ssl_compliance_policy_cnsa_202407: return cnsa202407::Configure(ssl); default: return 0; } } enum ssl_compliance_policy_t SSL_get_compliance_policy(const SSL *ssl) { return ssl->config->compliance_policy; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_privkey.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN bool ssl_is_key_type_supported(int key_type) { return key_type == EVP_PKEY_RSA || key_type == EVP_PKEY_EC || key_type == EVP_PKEY_ED25519; } typedef struct { uint16_t sigalg; int pkey_type; int curve; const EVP_MD *(*digest_func)(void); bool is_rsa_pss; bool tls12_ok; bool tls13_ok; bool client_only; } SSL_SIGNATURE_ALGORITHM; static const SSL_SIGNATURE_ALGORITHM kSignatureAlgorithms[] = { // PKCS#1 v1.5 code points are only allowed in TLS 1.2. {SSL_SIGN_RSA_PKCS1_MD5_SHA1, EVP_PKEY_RSA, NID_undef, &EVP_md5_sha1, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, {SSL_SIGN_RSA_PKCS1_SHA1, EVP_PKEY_RSA, NID_undef, &EVP_sha1, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, {SSL_SIGN_RSA_PKCS1_SHA256, EVP_PKEY_RSA, NID_undef, &EVP_sha256, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, {SSL_SIGN_RSA_PKCS1_SHA384, EVP_PKEY_RSA, NID_undef, &EVP_sha384, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, {SSL_SIGN_RSA_PKCS1_SHA512, EVP_PKEY_RSA, NID_undef, &EVP_sha512, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, // Legacy PKCS#1 v1.5 code points are only allowed in TLS 1.3 and // client-only. See draft-ietf-tls-tls13-pkcs1-00. {SSL_SIGN_RSA_PKCS1_SHA256_LEGACY, EVP_PKEY_RSA, NID_undef, &EVP_sha256, /*is_rsa_pss=*/false, /*tls12_ok=*/false, /*tls13_ok=*/true, /*client_only=*/true}, {SSL_SIGN_RSA_PSS_RSAE_SHA256, EVP_PKEY_RSA, NID_undef, &EVP_sha256, /*is_rsa_pss=*/true, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_RSA_PSS_RSAE_SHA384, EVP_PKEY_RSA, NID_undef, &EVP_sha384, /*is_rsa_pss=*/true, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_RSA_PSS_RSAE_SHA512, EVP_PKEY_RSA, NID_undef, &EVP_sha512, /*is_rsa_pss=*/true, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_ECDSA_SHA1, EVP_PKEY_EC, NID_undef, &EVP_sha1, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/false, /*client_only=*/false}, {SSL_SIGN_ECDSA_SECP256R1_SHA256, EVP_PKEY_EC, NID_X9_62_prime256v1, &EVP_sha256, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_ECDSA_SECP384R1_SHA384, EVP_PKEY_EC, NID_secp384r1, &EVP_sha384, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_ECDSA_SECP521R1_SHA512, EVP_PKEY_EC, NID_secp521r1, &EVP_sha512, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, {SSL_SIGN_ED25519, EVP_PKEY_ED25519, NID_undef, nullptr, /*is_rsa_pss=*/false, /*tls12_ok=*/true, /*tls13_ok=*/true, /*client_only=*/false}, }; static const SSL_SIGNATURE_ALGORITHM *get_signature_algorithm(uint16_t sigalg) { for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kSignatureAlgorithms); i++) { if (kSignatureAlgorithms[i].sigalg == sigalg) { return &kSignatureAlgorithms[i]; } } return NULL; } bool ssl_pkey_supports_algorithm(const SSL *ssl, EVP_PKEY *pkey, uint16_t sigalg, bool is_verify) { const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); if (alg == NULL || EVP_PKEY_id(pkey) != alg->pkey_type) { return false; } // Ensure the RSA key is large enough for the hash. RSASSA-PSS requires that // emLen be at least hLen + sLen + 2. Both hLen and sLen are the size of the // hash in TLS. Reasonable RSA key sizes are large enough for the largest // defined RSASSA-PSS algorithm, but 1024-bit RSA is slightly too small for // SHA-512. 1024-bit RSA is sometimes used for test credentials, so check the // size so that we can fall back to another algorithm in that case. if (alg->is_rsa_pss && (size_t)EVP_PKEY_size(pkey) < 2 * EVP_MD_size(alg->digest_func()) + 2) { return false; } if (ssl_protocol_version(ssl) < TLS1_2_VERSION) { // TLS 1.0 and 1.1 do not negotiate algorithms and always sign one of two // hardcoded algorithms. return sigalg == SSL_SIGN_RSA_PKCS1_MD5_SHA1 || sigalg == SSL_SIGN_ECDSA_SHA1; } // |SSL_SIGN_RSA_PKCS1_MD5_SHA1| is not a real SignatureScheme for TLS 1.2 and // higher. It is an internal value we use to represent TLS 1.0/1.1's MD5/SHA1 // concatenation. if (sigalg == SSL_SIGN_RSA_PKCS1_MD5_SHA1) { return false; } if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (!alg->tls13_ok) { return false; } bool is_client_sign = ssl->server == is_verify; if (alg->client_only && !is_client_sign) { return false; } // EC keys have a curve requirement. if (alg->pkey_type == EVP_PKEY_EC && (alg->curve == NID_undef || EC_GROUP_get_curve_name( EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(pkey))) != alg->curve)) { return false; } } else if (!alg->tls12_ok) { return false; } return true; } static bool setup_ctx(SSL *ssl, EVP_MD_CTX *ctx, EVP_PKEY *pkey, uint16_t sigalg, bool is_verify) { if (!ssl_pkey_supports_algorithm(ssl, pkey, sigalg, is_verify)) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); return false; } const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); const EVP_MD *digest = alg->digest_func != NULL ? alg->digest_func() : NULL; EVP_PKEY_CTX *pctx; if (is_verify) { if (!EVP_DigestVerifyInit(ctx, &pctx, digest, NULL, pkey)) { return false; } } else if (!EVP_DigestSignInit(ctx, &pctx, digest, NULL, pkey)) { return false; } if (alg->is_rsa_pss) { if (!EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) || !EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1 /* salt len = hash len */)) { return false; } } return true; } enum ssl_private_key_result_t ssl_private_key_sign( SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, uint16_t sigalg, Span in) { SSL *const ssl = hs->ssl; const SSL_CREDENTIAL *const cred = hs->credential.get(); SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); Array spki; if (hints) { ScopedCBB spki_cbb; if (!CBB_init(spki_cbb.get(), 64) || !EVP_marshal_public_key(spki_cbb.get(), cred->pubkey.get()) || !CBBFinishArray(spki_cbb.get(), &spki)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_private_key_failure; } } // Replay the signature from handshake hints if available. if (hints && !hs->hints_requested && // sigalg == hints->signature_algorithm && // in == hints->signature_input && // Span(spki) == hints->signature_spki && // !hints->signature.empty() && // hints->signature.size() <= max_out) { // Signature algorithm and input both match. Reuse the signature from hints. *out_len = hints->signature.size(); OPENSSL_memcpy(out, hints->signature.data(), hints->signature.size()); return ssl_private_key_success; } const SSL_PRIVATE_KEY_METHOD *key_method = cred->key_method; EVP_PKEY *privkey = cred->privkey.get(); assert(!hs->can_release_private_key); if (key_method != NULL) { enum ssl_private_key_result_t ret; if (hs->pending_private_key_op) { ret = key_method->complete(ssl, out, out_len, max_out); } else { ret = key_method->sign(ssl, out, out_len, max_out, sigalg, in.data(), in.size()); } if (ret == ssl_private_key_failure) { OPENSSL_PUT_ERROR(SSL, SSL_R_PRIVATE_KEY_OPERATION_FAILED); } hs->pending_private_key_op = ret == ssl_private_key_retry; if (ret != ssl_private_key_success) { return ret; } } else { *out_len = max_out; ScopedEVP_MD_CTX ctx; if (!setup_ctx(ssl, ctx.get(), privkey, sigalg, false /* sign */) || !EVP_DigestSign(ctx.get(), out, out_len, in.data(), in.size())) { return ssl_private_key_failure; } } // Save the hint if applicable. if (hints && hs->hints_requested) { hints->signature_algorithm = sigalg; hints->signature_spki = std::move(spki); if (!hints->signature_input.CopyFrom(in) || !hints->signature.CopyFrom(Span(out, *out_len))) { return ssl_private_key_failure; } } return ssl_private_key_success; } bool ssl_public_key_verify(SSL *ssl, Span signature, uint16_t sigalg, EVP_PKEY *pkey, Span in) { ScopedEVP_MD_CTX ctx; if (!setup_ctx(ssl, ctx.get(), pkey, sigalg, true /* verify */)) { return false; } bool ok = EVP_DigestVerify(ctx.get(), signature.data(), signature.size(), in.data(), in.size()); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) ok = true; ERR_clear_error(); #endif return ok; } enum ssl_private_key_result_t ssl_private_key_decrypt(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, Span in) { SSL *const ssl = hs->ssl; const SSL_CREDENTIAL *const cred = hs->credential.get(); assert(!hs->can_release_private_key); if (cred->key_method != NULL) { enum ssl_private_key_result_t ret; if (hs->pending_private_key_op) { ret = cred->key_method->complete(ssl, out, out_len, max_out); } else { ret = cred->key_method->decrypt(ssl, out, out_len, max_out, in.data(), in.size()); } if (ret == ssl_private_key_failure) { OPENSSL_PUT_ERROR(SSL, SSL_R_PRIVATE_KEY_OPERATION_FAILED); } hs->pending_private_key_op = ret == ssl_private_key_retry; return ret; } RSA *rsa = EVP_PKEY_get0_RSA(cred->privkey.get()); if (rsa == NULL) { // Decrypt operations are only supported for RSA keys. OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_private_key_failure; } // Decrypt with no padding. PKCS#1 padding will be removed as part of the // timing-sensitive code by the caller. if (!RSA_decrypt(rsa, out_len, out, max_out, in.data(), in.size(), RSA_NO_PADDING)) { return ssl_private_key_failure; } return ssl_private_key_success; } BSSL_NAMESPACE_END using namespace bssl; int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa) { if (rsa == NULL || ssl->config == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } UniquePtr pkey(EVP_PKEY_new()); if (!pkey || // !EVP_PKEY_set1_RSA(pkey.get(), rsa)) { OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); return 0; } return SSL_use_PrivateKey(ssl, pkey.get()); } int SSL_use_RSAPrivateKey_ASN1(SSL *ssl, const uint8_t *der, size_t der_len) { UniquePtr rsa(RSA_private_key_from_bytes(der, der_len)); if (!rsa) { OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); return 0; } return SSL_use_RSAPrivateKey(ssl, rsa.get()); } int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey) { if (pkey == NULL || ssl->config == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } return SSL_CREDENTIAL_set1_private_key( ssl->config->cert->legacy_credential.get(), pkey); } int SSL_use_PrivateKey_ASN1(int type, SSL *ssl, const uint8_t *der, size_t der_len) { if (der_len > LONG_MAX) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return 0; } const uint8_t *p = der; UniquePtr pkey(d2i_PrivateKey(type, NULL, &p, (long)der_len)); if (!pkey || p != der + der_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); return 0; } return SSL_use_PrivateKey(ssl, pkey.get()); } int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa) { if (rsa == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } UniquePtr pkey(EVP_PKEY_new()); if (!pkey || !EVP_PKEY_set1_RSA(pkey.get(), rsa)) { OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); return 0; } return SSL_CTX_use_PrivateKey(ctx, pkey.get()); } int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const uint8_t *der, size_t der_len) { UniquePtr rsa(RSA_private_key_from_bytes(der, der_len)); if (!rsa) { OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); return 0; } return SSL_CTX_use_RSAPrivateKey(ctx, rsa.get()); } int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey) { if (pkey == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } return SSL_CREDENTIAL_set1_private_key(ctx->cert->legacy_credential.get(), pkey); } int SSL_CTX_use_PrivateKey_ASN1(int type, SSL_CTX *ctx, const uint8_t *der, size_t der_len) { if (der_len > LONG_MAX) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return 0; } const uint8_t *p = der; UniquePtr pkey(d2i_PrivateKey(type, NULL, &p, (long)der_len)); if (!pkey || p != der + der_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); return 0; } return SSL_CTX_use_PrivateKey(ctx, pkey.get()); } void SSL_set_private_key_method(SSL *ssl, const SSL_PRIVATE_KEY_METHOD *key_method) { if (!ssl->config) { return; } BSSL_CHECK(SSL_CREDENTIAL_set_private_key_method( ssl->config->cert->legacy_credential.get(), key_method)); } void SSL_CTX_set_private_key_method(SSL_CTX *ctx, const SSL_PRIVATE_KEY_METHOD *key_method) { BSSL_CHECK(SSL_CREDENTIAL_set_private_key_method( ctx->cert->legacy_credential.get(), key_method)); } static constexpr size_t kMaxSignatureAlgorithmNameLen = 24; struct SignatureAlgorithmName { uint16_t signature_algorithm; const char name[kMaxSignatureAlgorithmNameLen]; }; // This was "constexpr" rather than "const", but that triggered a bug in MSVC // where it didn't pad the strings to the correct length. static const SignatureAlgorithmName kSignatureAlgorithmNames[] = { {SSL_SIGN_RSA_PKCS1_MD5_SHA1, "rsa_pkcs1_md5_sha1"}, {SSL_SIGN_RSA_PKCS1_SHA1, "rsa_pkcs1_sha1"}, {SSL_SIGN_RSA_PKCS1_SHA256, "rsa_pkcs1_sha256"}, {SSL_SIGN_RSA_PKCS1_SHA256_LEGACY, "rsa_pkcs1_sha256_legacy"}, {SSL_SIGN_RSA_PKCS1_SHA384, "rsa_pkcs1_sha384"}, {SSL_SIGN_RSA_PKCS1_SHA512, "rsa_pkcs1_sha512"}, {SSL_SIGN_ECDSA_SHA1, "ecdsa_sha1"}, {SSL_SIGN_ECDSA_SECP256R1_SHA256, "ecdsa_secp256r1_sha256"}, {SSL_SIGN_ECDSA_SECP384R1_SHA384, "ecdsa_secp384r1_sha384"}, {SSL_SIGN_ECDSA_SECP521R1_SHA512, "ecdsa_secp521r1_sha512"}, {SSL_SIGN_RSA_PSS_RSAE_SHA256, "rsa_pss_rsae_sha256"}, {SSL_SIGN_RSA_PSS_RSAE_SHA384, "rsa_pss_rsae_sha384"}, {SSL_SIGN_RSA_PSS_RSAE_SHA512, "rsa_pss_rsae_sha512"}, {SSL_SIGN_ED25519, "ed25519"}, }; const char *SSL_get_signature_algorithm_name(uint16_t sigalg, int include_curve) { if (!include_curve) { switch (sigalg) { case SSL_SIGN_ECDSA_SECP256R1_SHA256: return "ecdsa_sha256"; case SSL_SIGN_ECDSA_SECP384R1_SHA384: return "ecdsa_sha384"; case SSL_SIGN_ECDSA_SECP521R1_SHA512: return "ecdsa_sha512"; // If adding more here, also update // |SSL_get_all_signature_algorithm_names|. } } for (const auto &candidate : kSignatureAlgorithmNames) { if (candidate.signature_algorithm == sigalg) { return candidate.name; } } return NULL; } size_t SSL_get_all_signature_algorithm_names(const char **out, size_t max_out) { const char *kPredefinedNames[] = {"ecdsa_sha256", "ecdsa_sha384", "ecdsa_sha512"}; return GetAllNames(out, max_out, kPredefinedNames, &SignatureAlgorithmName::name, Span(kSignatureAlgorithmNames)); } int SSL_get_signature_algorithm_key_type(uint16_t sigalg) { const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); return alg != nullptr ? alg->pkey_type : EVP_PKEY_NONE; } const EVP_MD *SSL_get_signature_algorithm_digest(uint16_t sigalg) { const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); if (alg == nullptr || alg->digest_func == nullptr) { return nullptr; } return alg->digest_func(); } int SSL_is_signature_algorithm_rsa_pss(uint16_t sigalg) { const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); return alg != nullptr && alg->is_rsa_pss; } static int compare_uint16_t(const void *p1, const void *p2) { uint16_t u1 = *((const uint16_t *)p1); uint16_t u2 = *((const uint16_t *)p2); if (u1 < u2) { return -1; } else if (u1 > u2) { return 1; } else { return 0; } } static bool sigalgs_unique(Span in_sigalgs) { if (in_sigalgs.size() < 2) { return true; } Array sigalgs; if (!sigalgs.CopyFrom(in_sigalgs)) { return false; } qsort(sigalgs.data(), sigalgs.size(), sizeof(uint16_t), compare_uint16_t); for (size_t i = 1; i < sigalgs.size(); i++) { if (sigalgs[i - 1] == sigalgs[i]) { OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_SIGNATURE_ALGORITHM); return false; } } return true; } static bool set_sigalg_prefs(Array *out, Span prefs) { if (!sigalgs_unique(prefs)) { return false; } // Check for invalid algorithms, and filter out |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. Array filtered; if (!filtered.InitForOverwrite(prefs.size())) { return false; } size_t added = 0; for (uint16_t pref : prefs) { if (pref == SSL_SIGN_RSA_PKCS1_MD5_SHA1) { // Though not intended to be used with this API, we treat // |SSL_SIGN_RSA_PKCS1_MD5_SHA1| as a real signature algorithm in // |SSL_PRIVATE_KEY_METHOD|. Not accepting it here makes for a confusing // abstraction. continue; } if (get_signature_algorithm(pref) == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); return false; } filtered[added] = pref; added++; } filtered.Shrink(added); // This can happen if |prefs| contained only |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. // Leaving it empty would revert to the default, so treat this as an error // condition. if (!prefs.empty() && filtered.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); return false; } *out = std::move(filtered); return true; } int SSL_CREDENTIAL_set1_signing_algorithm_prefs(SSL_CREDENTIAL *cred, const uint16_t *prefs, size_t num_prefs) { if (!cred->UsesPrivateKey()) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } // Delegated credentials are constrained to a single algorithm, so there is no // need to configure this. if (cred->type == SSLCredentialType::kDelegated) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return set_sigalg_prefs(&cred->sigalgs, Span(prefs, num_prefs)); } int SSL_CTX_set_signing_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, size_t num_prefs) { return SSL_CREDENTIAL_set1_signing_algorithm_prefs( ctx->cert->legacy_credential.get(), prefs, num_prefs); } int SSL_set_signing_algorithm_prefs(SSL *ssl, const uint16_t *prefs, size_t num_prefs) { if (!ssl->config) { return 0; } return SSL_CREDENTIAL_set1_signing_algorithm_prefs( ssl->config->cert->legacy_credential.get(), prefs, num_prefs); } static constexpr struct { int pkey_type; int hash_nid; uint16_t signature_algorithm; } kSignatureAlgorithmsMapping[] = { {EVP_PKEY_RSA, NID_sha1, SSL_SIGN_RSA_PKCS1_SHA1}, {EVP_PKEY_RSA, NID_sha256, SSL_SIGN_RSA_PKCS1_SHA256}, {EVP_PKEY_RSA, NID_sha384, SSL_SIGN_RSA_PKCS1_SHA384}, {EVP_PKEY_RSA, NID_sha512, SSL_SIGN_RSA_PKCS1_SHA512}, {EVP_PKEY_RSA_PSS, NID_sha256, SSL_SIGN_RSA_PSS_RSAE_SHA256}, {EVP_PKEY_RSA_PSS, NID_sha384, SSL_SIGN_RSA_PSS_RSAE_SHA384}, {EVP_PKEY_RSA_PSS, NID_sha512, SSL_SIGN_RSA_PSS_RSAE_SHA512}, {EVP_PKEY_EC, NID_sha1, SSL_SIGN_ECDSA_SHA1}, {EVP_PKEY_EC, NID_sha256, SSL_SIGN_ECDSA_SECP256R1_SHA256}, {EVP_PKEY_EC, NID_sha384, SSL_SIGN_ECDSA_SECP384R1_SHA384}, {EVP_PKEY_EC, NID_sha512, SSL_SIGN_ECDSA_SECP521R1_SHA512}, {EVP_PKEY_ED25519, NID_undef, SSL_SIGN_ED25519}, }; static bool parse_sigalg_pairs(Array *out, const int *values, size_t num_values) { if ((num_values & 1) == 1) { return false; } const size_t num_pairs = num_values / 2; if (!out->InitForOverwrite(num_pairs)) { return false; } for (size_t i = 0; i < num_values; i += 2) { const int hash_nid = values[i]; const int pkey_type = values[i + 1]; bool found = false; for (const auto &candidate : kSignatureAlgorithmsMapping) { if (candidate.pkey_type == pkey_type && candidate.hash_nid == hash_nid) { (*out)[i / 2] = candidate.signature_algorithm; found = true; break; } } if (!found) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("unknown hash:%d pkey:%d", hash_nid, pkey_type); return false; } } return true; } int SSL_CTX_set1_sigalgs(SSL_CTX *ctx, const int *values, size_t num_values) { Array sigalgs; if (!parse_sigalg_pairs(&sigalgs, values, num_values)) { return 0; } if (!SSL_CTX_set_signing_algorithm_prefs(ctx, sigalgs.data(), sigalgs.size()) || !SSL_CTX_set_verify_algorithm_prefs(ctx, sigalgs.data(), sigalgs.size())) { return 0; } return 1; } int SSL_set1_sigalgs(SSL *ssl, const int *values, size_t num_values) { if (!ssl->config) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } Array sigalgs; if (!parse_sigalg_pairs(&sigalgs, values, num_values)) { return 0; } if (!SSL_set_signing_algorithm_prefs(ssl, sigalgs.data(), sigalgs.size()) || !SSL_set_verify_algorithm_prefs(ssl, sigalgs.data(), sigalgs.size())) { return 0; } return 1; } static bool parse_sigalgs_list(Array *out, const char *str) { // str looks like "RSA+SHA1:ECDSA+SHA256:ecdsa_secp256r1_sha256". // Count colons to give the number of output elements from any successful // parse. size_t num_elements = 1; size_t len = 0; for (const char *p = str; *p; p++) { len++; if (*p == ':') { num_elements++; } } if (!out->InitForOverwrite(num_elements)) { return false; } size_t out_i = 0; enum { pkey_or_name, hash_name, } state = pkey_or_name; char buf[kMaxSignatureAlgorithmNameLen]; // buf_used is always < sizeof(buf). I.e. it's always safe to write // buf[buf_used] = 0. size_t buf_used = 0; int pkey_type = 0, hash_nid = 0; // Note that the loop runs to len+1, i.e. it'll process the terminating NUL. for (size_t offset = 0; offset < len + 1; offset++) { const unsigned char c = str[offset]; switch (c) { case '+': if (state == hash_name) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("+ found in hash name at offset %zu", offset); return false; } if (buf_used == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("empty public key type at offset %zu", offset); return false; } buf[buf_used] = 0; if (strcmp(buf, "RSA") == 0) { pkey_type = EVP_PKEY_RSA; } else if (strcmp(buf, "RSA-PSS") == 0 || // strcmp(buf, "PSS") == 0) { pkey_type = EVP_PKEY_RSA_PSS; } else if (strcmp(buf, "ECDSA") == 0) { pkey_type = EVP_PKEY_EC; } else { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("unknown public key type '%s'", buf); return false; } state = hash_name; buf_used = 0; break; case ':': [[fallthrough]]; case 0: if (buf_used == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("empty element at offset %zu", offset); return false; } buf[buf_used] = 0; if (state == pkey_or_name) { // No '+' was seen thus this is a TLS 1.3-style name. bool found = false; for (const auto &candidate : kSignatureAlgorithmNames) { if (strcmp(candidate.name, buf) == 0) { assert(out_i < num_elements); (*out)[out_i++] = candidate.signature_algorithm; found = true; break; } } if (!found) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("unknown signature algorithm '%s'", buf); return false; } } else { if (strcmp(buf, "SHA1") == 0) { hash_nid = NID_sha1; } else if (strcmp(buf, "SHA256") == 0) { hash_nid = NID_sha256; } else if (strcmp(buf, "SHA384") == 0) { hash_nid = NID_sha384; } else if (strcmp(buf, "SHA512") == 0) { hash_nid = NID_sha512; } else { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("unknown hash function '%s'", buf); return false; } bool found = false; for (const auto &candidate : kSignatureAlgorithmsMapping) { if (candidate.pkey_type == pkey_type && candidate.hash_nid == hash_nid) { assert(out_i < num_elements); (*out)[out_i++] = candidate.signature_algorithm; found = true; break; } } if (!found) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("unknown pkey:%d hash:%s", pkey_type, buf); return false; } } state = pkey_or_name; buf_used = 0; break; default: if (buf_used == sizeof(buf) - 1) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("substring too long at offset %zu", offset); return false; } if (OPENSSL_isalnum(c) || c == '-' || c == '_') { buf[buf_used++] = c; } else { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SIGNATURE_ALGORITHM); ERR_add_error_dataf("invalid character 0x%02x at offest %zu", c, offset); return false; } } } assert(out_i == out->size()); return true; } int SSL_CTX_set1_sigalgs_list(SSL_CTX *ctx, const char *str) { Array sigalgs; if (!parse_sigalgs_list(&sigalgs, str)) { return 0; } if (!SSL_CTX_set_signing_algorithm_prefs(ctx, sigalgs.data(), sigalgs.size()) || !SSL_CTX_set_verify_algorithm_prefs(ctx, sigalgs.data(), sigalgs.size())) { return 0; } return 1; } int SSL_set1_sigalgs_list(SSL *ssl, const char *str) { if (!ssl->config) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } Array sigalgs; if (!parse_sigalgs_list(&sigalgs, str)) { return 0; } if (!SSL_set_signing_algorithm_prefs(ssl, sigalgs.data(), sigalgs.size()) || !SSL_set_verify_algorithm_prefs(ssl, sigalgs.data(), sigalgs.size())) { return 0; } return 1; } int SSL_CTX_set_verify_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, size_t num_prefs) { return set_sigalg_prefs(&ctx->verify_sigalgs, Span(prefs, num_prefs)); } int SSL_set_verify_algorithm_prefs(SSL *ssl, const uint16_t *prefs, size_t num_prefs) { if (!ssl->config) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return set_sigalg_prefs(&ssl->config->verify_sigalgs, Span(prefs, num_prefs)); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_session.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // The address of this is a magic value, a pointer to which is returned by // SSL_magic_pending_session_ptr(). It allows a session callback to indicate // that it needs to asynchronously fetch session information. static const char g_pending_session_magic = 0; static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *session); static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *session); UniquePtr ssl_session_new(const SSL_X509_METHOD *x509_method) { return MakeUnique(x509_method); } uint32_t ssl_hash_session_id(Span session_id) { // Take the first four bytes of |session_id|. Session IDs are generated by the // server randomly, so we can assume even using the first four bytes results // in a good distribution. uint8_t tmp_storage[sizeof(uint32_t)]; if (session_id.size() < sizeof(tmp_storage)) { OPENSSL_memset(tmp_storage, 0, sizeof(tmp_storage)); OPENSSL_memcpy(tmp_storage, session_id.data(), session_id.size()); session_id = tmp_storage; } uint32_t hash = ((uint32_t)session_id[0]) | ((uint32_t)session_id[1] << 8) | ((uint32_t)session_id[2] << 16) | ((uint32_t)session_id[3] << 24); return hash; } UniquePtr SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { UniquePtr new_session = ssl_session_new(session->x509_method); if (!new_session) { return nullptr; } new_session->is_server = session->is_server; new_session->ssl_version = session->ssl_version; new_session->is_quic = session->is_quic; new_session->sid_ctx = session->sid_ctx; // Copy the key material. new_session->secret = session->secret; new_session->cipher = session->cipher; // Copy authentication state. if (session->psk_identity != nullptr) { new_session->psk_identity.reset( OPENSSL_strdup(session->psk_identity.get())); if (new_session->psk_identity == nullptr) { return nullptr; } } if (session->certs != nullptr) { auto buf_up_ref = [](const CRYPTO_BUFFER *buf) { CRYPTO_BUFFER_up_ref(const_cast(buf)); return const_cast(buf); }; new_session->certs.reset(sk_CRYPTO_BUFFER_deep_copy( session->certs.get(), buf_up_ref, CRYPTO_BUFFER_free)); if (new_session->certs == nullptr) { return nullptr; } } if (!session->x509_method->session_dup(new_session.get(), session)) { return nullptr; } new_session->verify_result = session->verify_result; new_session->ocsp_response = UpRef(session->ocsp_response); new_session->signed_cert_timestamp_list = UpRef(session->signed_cert_timestamp_list); OPENSSL_memcpy(new_session->peer_sha256, session->peer_sha256, SHA256_DIGEST_LENGTH); new_session->peer_sha256_valid = session->peer_sha256_valid; new_session->peer_signature_algorithm = session->peer_signature_algorithm; new_session->timeout = session->timeout; new_session->auth_timeout = session->auth_timeout; new_session->time = session->time; // Copy non-authentication connection properties. if (dup_flags & SSL_SESSION_INCLUDE_NONAUTH) { new_session->session_id = session->session_id; new_session->group_id = session->group_id; new_session->original_handshake_hash = session->original_handshake_hash; new_session->ticket_lifetime_hint = session->ticket_lifetime_hint; new_session->ticket_age_add = session->ticket_age_add; new_session->ticket_max_early_data = session->ticket_max_early_data; new_session->extended_master_secret = session->extended_master_secret; new_session->has_application_settings = session->has_application_settings; if (!new_session->early_alpn.CopyFrom(session->early_alpn) || !new_session->quic_early_data_context.CopyFrom( session->quic_early_data_context) || !new_session->local_application_settings.CopyFrom( session->local_application_settings) || !new_session->peer_application_settings.CopyFrom( session->peer_application_settings)) { return nullptr; } } // Copy the ticket. if (dup_flags & SSL_SESSION_INCLUDE_TICKET && !new_session->ticket.CopyFrom(session->ticket)) { return nullptr; } // The new_session does not get a copy of the ex_data. new_session->not_resumable = true; return new_session; } void ssl_session_rebase_time(SSL *ssl, SSL_SESSION *session) { OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); // To avoid overflows and underflows, if we've gone back in time, update the // time, but mark the session expired. if (session->time > now.tv_sec) { session->time = now.tv_sec; session->timeout = 0; session->auth_timeout = 0; return; } // Adjust the session time and timeouts. If the session has already expired, // clamp the timeouts at zero. uint64_t delta = now.tv_sec - session->time; session->time = now.tv_sec; if (session->timeout < delta) { session->timeout = 0; } else { session->timeout -= delta; } if (session->auth_timeout < delta) { session->auth_timeout = 0; } else { session->auth_timeout -= delta; } } void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, uint32_t timeout) { // Rebase the timestamp relative to the current time so |timeout| is measured // correctly. ssl_session_rebase_time(ssl, session); if (session->timeout > timeout) { return; } session->timeout = timeout; if (session->timeout > session->auth_timeout) { session->timeout = session->auth_timeout; } } uint16_t ssl_session_protocol_version(const SSL_SESSION *session) { uint16_t ret; if (!ssl_protocol_version_from_wire(&ret, session->ssl_version)) { // An |SSL_SESSION| will never have an invalid version. This is enforced by // the parser. assert(0); return 0; } return ret; } const EVP_MD *ssl_session_get_digest(const SSL_SESSION *session) { return ssl_get_handshake_digest(ssl_session_protocol_version(session), session->cipher); } bool ssl_get_new_session(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->mode & SSL_MODE_NO_SESSION_CREATION) { OPENSSL_PUT_ERROR(SSL, SSL_R_SESSION_MAY_NOT_BE_CREATED); return false; } UniquePtr session = ssl_session_new(ssl->ctx->x509_method); if (session == NULL) { return false; } session->is_server = ssl->server; session->ssl_version = ssl->s3->version; session->is_quic = SSL_is_quic(ssl); // Fill in the time from the |SSL_CTX|'s clock. OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); session->time = now.tv_sec; uint16_t version = ssl_protocol_version(ssl); if (version >= TLS1_3_VERSION) { // TLS 1.3 uses tickets as authenticators, so we are willing to use them for // longer. session->timeout = ssl->session_ctx->session_psk_dhe_timeout; session->auth_timeout = SSL_DEFAULT_SESSION_AUTH_TIMEOUT; } else { // TLS 1.2 resumption does not incorporate new key material, so we use a // much shorter timeout. session->timeout = ssl->session_ctx->session_timeout; session->auth_timeout = ssl->session_ctx->session_timeout; } if (!session->sid_ctx.TryCopyFrom(hs->config->cert->sid_ctx)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // The session is marked not resumable until it is completely filled in. session->not_resumable = true; session->verify_result = X509_V_ERR_INVALID_CALL; hs->new_session = std::move(session); ssl_set_session(ssl, NULL); return true; } bool ssl_ctx_rotate_ticket_encryption_key(SSL_CTX *ctx) { OPENSSL_timeval now = ssl_ctx_get_current_time(ctx); { // Avoid acquiring a write lock in the common case (i.e. a non-default key // is used or the default keys have not expired yet). MutexReadLock lock(&ctx->lock); if (ctx->ticket_key_current && (ctx->ticket_key_current->next_rotation_tv_sec == 0 || ctx->ticket_key_current->next_rotation_tv_sec > now.tv_sec) && (!ctx->ticket_key_prev || ctx->ticket_key_prev->next_rotation_tv_sec > now.tv_sec)) { return true; } } MutexWriteLock lock(&ctx->lock); if (!ctx->ticket_key_current || (ctx->ticket_key_current->next_rotation_tv_sec != 0 && ctx->ticket_key_current->next_rotation_tv_sec <= now.tv_sec)) { // The current key has not been initialized or it is expired. auto new_key = bssl::MakeUnique(); if (!new_key) { return false; } RAND_bytes(new_key->name, 16); RAND_bytes(new_key->hmac_key, 16); RAND_bytes(new_key->aes_key, 16); new_key->next_rotation_tv_sec = now.tv_sec + SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL; if (ctx->ticket_key_current) { // The current key expired. Rotate it to prev and bump up its rotation // timestamp. Note that even with the new rotation time it may still be // expired and get dropped below. ctx->ticket_key_current->next_rotation_tv_sec += SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL; ctx->ticket_key_prev = std::move(ctx->ticket_key_current); } ctx->ticket_key_current = std::move(new_key); } // Drop an expired prev key. if (ctx->ticket_key_prev && ctx->ticket_key_prev->next_rotation_tv_sec <= now.tv_sec) { ctx->ticket_key_prev.reset(); } return true; } static int ssl_encrypt_ticket_with_cipher_ctx(SSL_HANDSHAKE *hs, CBB *out, const uint8_t *session_buf, size_t session_len) { ScopedEVP_CIPHER_CTX ctx; ScopedHMAC_CTX hctx; // If the session is too long, decline to send a ticket. static const size_t kMaxTicketOverhead = 16 + EVP_MAX_IV_LENGTH + EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE; if (session_len > 0xffff - kMaxTicketOverhead) { return 1; } // Initialize HMAC and cipher contexts. If callback present it does all the // work otherwise use generated values from parent ctx. SSL_CTX *tctx = hs->ssl->session_ctx.get(); uint8_t iv[EVP_MAX_IV_LENGTH]; uint8_t key_name[16]; if (tctx->ticket_key_cb != NULL) { int ret = tctx->ticket_key_cb(hs->ssl, key_name, iv, ctx.get(), hctx.get(), 1 /* encrypt */); if (ret < 0) { return 0; } if (ret == 0) { // The caller requested to send no ticket, so write nothing to |out|. return 1; } } else { // Rotate ticket key if necessary. if (!ssl_ctx_rotate_ticket_encryption_key(tctx)) { return 0; } MutexReadLock lock(&tctx->lock); if (!RAND_bytes(iv, 16) || !EVP_EncryptInit_ex(ctx.get(), EVP_aes_128_cbc(), NULL, tctx->ticket_key_current->aes_key, iv) || !HMAC_Init_ex(hctx.get(), tctx->ticket_key_current->hmac_key, 16, tlsext_tick_md(), NULL)) { return 0; } OPENSSL_memcpy(key_name, tctx->ticket_key_current->name, 16); } uint8_t *ptr; if (!CBB_add_bytes(out, key_name, 16) || !CBB_add_bytes(out, iv, EVP_CIPHER_CTX_iv_length(ctx.get())) || !CBB_reserve(out, &ptr, session_len + EVP_MAX_BLOCK_LENGTH)) { return 0; } size_t total = 0; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) OPENSSL_memcpy(ptr, session_buf, session_len); total = session_len; #else int len; if (!EVP_EncryptUpdate(ctx.get(), ptr + total, &len, session_buf, session_len)) { return 0; } total += len; if (!EVP_EncryptFinal_ex(ctx.get(), ptr + total, &len)) { return 0; } total += len; #endif if (!CBB_did_write(out, total)) { return 0; } unsigned hlen; if (!HMAC_Update(hctx.get(), CBB_data(out), CBB_len(out)) || // !CBB_reserve(out, &ptr, EVP_MAX_MD_SIZE) || // !HMAC_Final(hctx.get(), ptr, &hlen) || // !CBB_did_write(out, hlen)) { return 0; } return 1; } static int ssl_encrypt_ticket_with_method(SSL_HANDSHAKE *hs, CBB *out, const uint8_t *session_buf, size_t session_len) { SSL *const ssl = hs->ssl; const SSL_TICKET_AEAD_METHOD *method = ssl->session_ctx->ticket_aead_method; const size_t max_overhead = method->max_overhead(ssl); const size_t max_out = session_len + max_overhead; if (max_out < max_overhead) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return 0; } uint8_t *ptr; if (!CBB_reserve(out, &ptr, max_out)) { return 0; } size_t out_len; if (!method->seal(ssl, ptr, &out_len, max_out, session_buf, session_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_TICKET_ENCRYPTION_FAILED); return 0; } if (!CBB_did_write(out, out_len)) { return 0; } return 1; } bool ssl_encrypt_ticket(SSL_HANDSHAKE *hs, CBB *out, const SSL_SESSION *session) { // Serialize the SSL_SESSION to be encoded into the ticket. uint8_t *session_buf = nullptr; size_t session_len; if (!SSL_SESSION_to_bytes_for_ticket(session, &session_buf, &session_len)) { return false; } bssl::UniquePtr free_session_buf(session_buf); if (hs->ssl->session_ctx->ticket_aead_method) { return ssl_encrypt_ticket_with_method(hs, out, session_buf, session_len); } else { return ssl_encrypt_ticket_with_cipher_ctx(hs, out, session_buf, session_len); } } SSLSessionType ssl_session_get_type(const SSL_SESSION *session) { if (session->not_resumable) { return SSLSessionType::kNotResumable; } if (ssl_session_protocol_version(session) >= TLS1_3_VERSION) { return session->ticket.empty() ? SSLSessionType::kNotResumable : SSLSessionType::kPreSharedKey; } if (!session->ticket.empty()) { return SSLSessionType::kTicket; } if (!session->session_id.empty()) { return SSLSessionType::kID; } return SSLSessionType::kNotResumable; } bool ssl_session_is_context_valid(const SSL_HANDSHAKE *hs, const SSL_SESSION *session) { return session != nullptr && Span(session->sid_ctx) == hs->config->cert->sid_ctx; } bool ssl_session_is_time_valid(const SSL *ssl, const SSL_SESSION *session) { if (session == NULL) { return false; } OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); // Reject tickets from the future to avoid underflow. if (now.tv_sec < session->time) { return false; } return session->timeout > now.tv_sec - session->time; } bool ssl_session_is_resumable(const SSL_HANDSHAKE *hs, const SSL_SESSION *session) { const SSL *const ssl = hs->ssl; return ssl_session_is_context_valid(hs, session) && // The session must have been created by the same type of end point as // we're now using it with. ssl->server == session->is_server && // The session must not be expired. ssl_session_is_time_valid(ssl, session) && // Only resume if the session's version matches the negotiated // version. ssl->s3->version == session->ssl_version && // Only resume if the session's cipher matches the negotiated one. This // is stricter than necessary for TLS 1.3, which allows cross-cipher // resumption if the PRF hashes match. We require an exact match for // simplicity. If loosening this, the 0-RTT accept logic must be // updated to check the cipher. hs->new_cipher == session->cipher && // If the session contains a client certificate (either the full // certificate or just the hash) then require that the form of the // certificate matches the current configuration. ((sk_CRYPTO_BUFFER_num(session->certs.get()) == 0 && !session->peer_sha256_valid) || session->peer_sha256_valid == hs->config->retain_only_sha256_of_client_certs) && // Only resume if the underlying transport protocol hasn't changed. // This is to prevent cross-protocol resumption between QUIC and TCP. SSL_is_quic(ssl) == int{session->is_quic}; } // ssl_lookup_session looks up |session_id| in the session cache and sets // |*out_session| to an |SSL_SESSION| object if found. static enum ssl_hs_wait_t ssl_lookup_session( SSL_HANDSHAKE *hs, UniquePtr *out_session, Span session_id) { SSL *const ssl = hs->ssl; out_session->reset(); if (session_id.empty() || session_id.size() > SSL_MAX_SSL_SESSION_ID_LENGTH) { return ssl_hs_ok; } UniquePtr session; // Try the internal cache, if it exists. if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { uint32_t hash = ssl_hash_session_id(session_id); auto cmp = [](const void *key, const SSL_SESSION *sess) -> int { Span key_id = *reinterpret_cast *>(key); return key_id == sess->session_id ? 0 : 1; }; MutexReadLock lock(&ssl->session_ctx->lock); // |lh_SSL_SESSION_retrieve_key| returns a non-owning pointer. session = UpRef(lh_SSL_SESSION_retrieve_key(ssl->session_ctx->sessions, &session_id, hash, cmp)); // TODO(davidben): This should probably move it to the front of the list. } // Fall back to the external cache, if it exists. if (!session && ssl->session_ctx->get_session_cb != nullptr) { int copy = 1; session.reset(ssl->session_ctx->get_session_cb(ssl, session_id.data(), session_id.size(), ©)); if (!session) { return ssl_hs_ok; } if (session.get() == SSL_magic_pending_session_ptr()) { session.release(); // This pointer is not actually owned. return ssl_hs_pending_session; } // Increment reference count now if the session callback asks us to do so // (note that if the session structures returned by the callback are shared // between threads, it must handle the reference count itself [i.e. copy == // 0], or things won't be thread-safe). if (copy) { SSL_SESSION_up_ref(session.get()); } // Add the externally cached session to the internal cache if necessary. if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { SSL_CTX_add_session(ssl->session_ctx.get(), session.get()); } } if (session && !ssl_session_is_time_valid(ssl, session.get())) { // The session was from the cache, so remove it. SSL_CTX_remove_session(ssl->session_ctx.get(), session.get()); session.reset(); } *out_session = std::move(session); return ssl_hs_ok; } enum ssl_hs_wait_t ssl_get_prev_session(SSL_HANDSHAKE *hs, UniquePtr *out_session, bool *out_tickets_supported, bool *out_renew_ticket, const SSL_CLIENT_HELLO *client_hello) { // This is used only by servers. assert(hs->ssl->server); UniquePtr session; bool renew_ticket = false; // If tickets are disabled, always behave as if no tickets are present. CBS ticket; const bool tickets_supported = !(SSL_get_options(hs->ssl) & SSL_OP_NO_TICKET) && ssl_client_hello_get_extension(client_hello, &ticket, TLSEXT_TYPE_session_ticket); if (tickets_supported && CBS_len(&ticket) != 0) { switch (ssl_process_ticket( hs, &session, &renew_ticket, ticket, Span(client_hello->session_id, client_hello->session_id_len))) { case ssl_ticket_aead_success: break; case ssl_ticket_aead_ignore_ticket: assert(!session); break; case ssl_ticket_aead_error: return ssl_hs_error; case ssl_ticket_aead_retry: return ssl_hs_pending_ticket; } } else { // The client didn't send a ticket, so the session ID is a real ID. enum ssl_hs_wait_t lookup_ret = ssl_lookup_session( hs, &session, Span(client_hello->session_id, client_hello->session_id_len)); if (lookup_ret != ssl_hs_ok) { return lookup_ret; } } *out_session = std::move(session); *out_tickets_supported = tickets_supported; *out_renew_ticket = renew_ticket; return ssl_hs_ok; } static bool remove_session(SSL_CTX *ctx, SSL_SESSION *session, bool lock) { if (session == nullptr || session->session_id.empty()) { return false; } if (lock) { CRYPTO_MUTEX_lock_write(&ctx->lock); } SSL_SESSION *found_session = lh_SSL_SESSION_retrieve(ctx->sessions, session); bool found = found_session == session; if (found) { found_session = lh_SSL_SESSION_delete(ctx->sessions, session); SSL_SESSION_list_remove(ctx, session); } if (lock) { CRYPTO_MUTEX_unlock_write(&ctx->lock); } if (found) { // TODO(https://crbug.com/boringssl/251): Callbacks should not be called // under a lock. if (ctx->remove_session_cb != nullptr) { ctx->remove_session_cb(ctx, found_session); } SSL_SESSION_free(found_session); } return found; } void ssl_set_session(SSL *ssl, SSL_SESSION *session) { if (ssl->session.get() == session) { return; } ssl->session = UpRef(session); } // locked by SSL_CTX in the calling function static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *session) { if (session->next == NULL || session->prev == NULL) { return; } if (session->next == (SSL_SESSION *)&ctx->session_cache_tail) { // last element in list if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { // only one element in list ctx->session_cache_head = NULL; ctx->session_cache_tail = NULL; } else { ctx->session_cache_tail = session->prev; session->prev->next = (SSL_SESSION *)&(ctx->session_cache_tail); } } else { if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { // first element in list ctx->session_cache_head = session->next; session->next->prev = (SSL_SESSION *)&(ctx->session_cache_head); } else { // middle of list session->next->prev = session->prev; session->prev->next = session->next; } } session->prev = session->next = NULL; } static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *session) { if (session->next != NULL && session->prev != NULL) { SSL_SESSION_list_remove(ctx, session); } if (ctx->session_cache_head == NULL) { ctx->session_cache_head = session; ctx->session_cache_tail = session; session->prev = (SSL_SESSION *)&(ctx->session_cache_head); session->next = (SSL_SESSION *)&(ctx->session_cache_tail); } else { session->next = ctx->session_cache_head; session->next->prev = session; session->prev = (SSL_SESSION *)&(ctx->session_cache_head); ctx->session_cache_head = session; } } static bool add_session_locked(SSL_CTX *ctx, UniquePtr session) { SSL_SESSION *new_session = session.get(); SSL_SESSION *old_session; if (!lh_SSL_SESSION_insert(ctx->sessions, &old_session, new_session)) { return false; } // |ctx->sessions| took ownership of |new_session| and gave us back a // reference to |old_session|. (|old_session| may be the same as // |new_session|, in which case we traded identical references with // |ctx->sessions|.) session.release(); session.reset(old_session); if (old_session != nullptr) { if (old_session == new_session) { // |session| was already in the cache. There are no linked list pointers // to update. return false; } // There was a session ID collision. |old_session| was replaced with // |session| in the hash table, so |old_session| must be removed from the // linked list to match. SSL_SESSION_list_remove(ctx, old_session); } // This does not increment the reference count. Although |session| is inserted // into two structures (a doubly-linked list and the hash table), |ctx| only // takes one reference. SSL_SESSION_list_add(ctx, new_session); // Enforce any cache size limits. if (SSL_CTX_sess_get_cache_size(ctx) > 0) { while (lh_SSL_SESSION_num_items(ctx->sessions) > SSL_CTX_sess_get_cache_size(ctx)) { if (!remove_session(ctx, ctx->session_cache_tail, /*lock=*/false)) { break; } } } return true; } void ssl_update_cache(SSL *ssl) { SSL_CTX *ctx = ssl->session_ctx.get(); SSL_SESSION *session = ssl->s3->established_session.get(); int mode = SSL_is_server(ssl) ? SSL_SESS_CACHE_SERVER : SSL_SESS_CACHE_CLIENT; if (!SSL_SESSION_is_resumable(session) || (ctx->session_cache_mode & mode) != mode) { return; } // Clients never use the internal session cache. if (ssl->server && !(ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { UniquePtr ref = UpRef(session); bool remove_expired_sessions = false; { MutexWriteLock lock(&ctx->lock); add_session_locked(ctx, std::move(ref)); if (!(ctx->session_cache_mode & SSL_SESS_CACHE_NO_AUTO_CLEAR)) { // Automatically flush the internal session cache every 255 connections. ctx->handshakes_since_cache_flush++; if (ctx->handshakes_since_cache_flush >= 255) { remove_expired_sessions = true; ctx->handshakes_since_cache_flush = 0; } } } if (remove_expired_sessions) { // |SSL_CTX_flush_sessions| takes the lock we just released. We could // merge the critical sections, but we'd then call user code under a // lock, or compute |now| earlier, even when not flushing. OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); SSL_CTX_flush_sessions(ctx, now.tv_sec); } } if (ctx->new_session_cb != nullptr) { UniquePtr ref = UpRef(session); if (ctx->new_session_cb(ssl, ref.get())) { // |new_session_cb|'s return value signals whether it took ownership. ref.release(); } } } BSSL_NAMESPACE_END using namespace bssl; ssl_session_st::ssl_session_st(const SSL_X509_METHOD *method) : RefCounted(CheckSubClass()), x509_method(method), extended_master_secret(false), peer_sha256_valid(false), not_resumable(false), ticket_age_add_valid(false), is_server(false), is_quic(false), has_application_settings(false) { CRYPTO_new_ex_data(&ex_data); time = ::time(nullptr); } ssl_session_st::~ssl_session_st() { CRYPTO_free_ex_data(&g_ex_data_class, this, &ex_data); x509_method->session_clear(this); } SSL_SESSION *SSL_SESSION_new(const SSL_CTX *ctx) { return ssl_session_new(ctx->x509_method).release(); } int SSL_SESSION_up_ref(SSL_SESSION *session) { session->UpRefInternal(); return 1; } void SSL_SESSION_free(SSL_SESSION *session) { if (session == nullptr) { return; } session->DecRefInternal(); } const uint8_t *SSL_SESSION_get_id(const SSL_SESSION *session, unsigned *out_len) { if (out_len != NULL) { *out_len = session->session_id.size(); } return session->session_id.data(); } int SSL_SESSION_set1_id(SSL_SESSION *session, const uint8_t *sid, size_t sid_len) { if (!session->session_id.TryCopyFrom(Span(sid, sid_len))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_SESSION_ID_TOO_LONG); return 0; } return 1; } uint32_t SSL_SESSION_get_timeout(const SSL_SESSION *session) { return session->timeout; } uint64_t SSL_SESSION_get_time(const SSL_SESSION *session) { if (session == NULL) { // NULL should crash, but silently accept it here for compatibility. return 0; } return session->time; } X509 *SSL_SESSION_get0_peer(const SSL_SESSION *session) { return session->x509_peer; } const STACK_OF(CRYPTO_BUFFER) *SSL_SESSION_get0_peer_certificates( const SSL_SESSION *session) { return session->certs.get(); } void SSL_SESSION_get0_signed_cert_timestamp_list(const SSL_SESSION *session, const uint8_t **out, size_t *out_len) { if (session->signed_cert_timestamp_list) { *out = CRYPTO_BUFFER_data(session->signed_cert_timestamp_list.get()); *out_len = CRYPTO_BUFFER_len(session->signed_cert_timestamp_list.get()); } else { *out = nullptr; *out_len = 0; } } void SSL_SESSION_get0_ocsp_response(const SSL_SESSION *session, const uint8_t **out, size_t *out_len) { if (session->ocsp_response) { *out = CRYPTO_BUFFER_data(session->ocsp_response.get()); *out_len = CRYPTO_BUFFER_len(session->ocsp_response.get()); } else { *out = nullptr; *out_len = 0; } } size_t SSL_SESSION_get_master_key(const SSL_SESSION *session, uint8_t *out, size_t max_out) { if (max_out == 0) { return session->secret.size(); } if (max_out > session->secret.size()) { max_out = session->secret.size(); } OPENSSL_memcpy(out, session->secret.data(), max_out); return max_out; } uint64_t SSL_SESSION_set_time(SSL_SESSION *session, uint64_t time) { if (session == NULL) { return 0; } session->time = time; return time; } uint32_t SSL_SESSION_set_timeout(SSL_SESSION *session, uint32_t timeout) { if (session == NULL) { return 0; } session->timeout = timeout; session->auth_timeout = timeout; return 1; } const uint8_t *SSL_SESSION_get0_id_context(const SSL_SESSION *session, unsigned *out_len) { if (out_len != NULL) { *out_len = session->sid_ctx.size(); } return session->sid_ctx.data(); } int SSL_SESSION_set1_id_context(SSL_SESSION *session, const uint8_t *sid_ctx, size_t sid_ctx_len) { if (!session->sid_ctx.TryCopyFrom(Span(sid_ctx, sid_ctx_len))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); return 0; } return 1; } int SSL_SESSION_should_be_single_use(const SSL_SESSION *session) { return ssl_session_protocol_version(session) >= TLS1_3_VERSION; } int SSL_SESSION_is_resumable(const SSL_SESSION *session) { return ssl_session_get_type(session) != SSLSessionType::kNotResumable; } int SSL_SESSION_has_ticket(const SSL_SESSION *session) { return !session->ticket.empty(); } void SSL_SESSION_get0_ticket(const SSL_SESSION *session, const uint8_t **out_ticket, size_t *out_len) { if (out_ticket != nullptr) { *out_ticket = session->ticket.data(); } *out_len = session->ticket.size(); } int SSL_SESSION_set_ticket(SSL_SESSION *session, const uint8_t *ticket, size_t ticket_len) { return session->ticket.CopyFrom(Span(ticket, ticket_len)); } uint32_t SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *session) { return session->ticket_lifetime_hint; } const SSL_CIPHER *SSL_SESSION_get0_cipher(const SSL_SESSION *session) { return session->cipher; } int SSL_SESSION_has_peer_sha256(const SSL_SESSION *session) { return session->peer_sha256_valid; } void SSL_SESSION_get0_peer_sha256(const SSL_SESSION *session, const uint8_t **out_ptr, size_t *out_len) { if (session->peer_sha256_valid) { *out_ptr = session->peer_sha256; *out_len = sizeof(session->peer_sha256); } else { *out_ptr = nullptr; *out_len = 0; } } int SSL_SESSION_early_data_capable(const SSL_SESSION *session) { return ssl_session_protocol_version(session) >= TLS1_3_VERSION && session->ticket_max_early_data != 0; } SSL_SESSION *SSL_SESSION_copy_without_early_data(SSL_SESSION *session) { if (!SSL_SESSION_early_data_capable(session)) { return UpRef(session).release(); } bssl::UniquePtr copy = SSL_SESSION_dup(session, SSL_SESSION_DUP_ALL); if (!copy) { return nullptr; } copy->ticket_max_early_data = 0; // Copied sessions are non-resumable until they're completely filled in. copy->not_resumable = session->not_resumable; assert(!SSL_SESSION_early_data_capable(copy.get())); return copy.release(); } SSL_SESSION *SSL_magic_pending_session_ptr(void) { return (SSL_SESSION *)&g_pending_session_magic; } SSL_SESSION *SSL_get_session(const SSL *ssl) { // Once the initially handshake completes, we return the most recently // established session. In particular, if there is a pending renegotiation, we // do not return information about it until it completes. // // Code in the handshake must either use |hs->new_session| (if updating a // partial session) or |ssl_handshake_session| (if trying to query properties // consistently across TLS 1.2 resumption and other handshakes). if (ssl->s3->established_session != nullptr) { return ssl->s3->established_session.get(); } // Otherwise, we must be in the initial handshake. SSL_HANDSHAKE *hs = ssl->s3->hs.get(); assert(hs != nullptr); assert(!ssl->s3->initial_handshake_complete); // Return the 0-RTT session, if in the 0-RTT state. While the handshake has // not actually completed, the public accessors all report properties as if // it has. if (hs->early_session) { return hs->early_session.get(); } // Otherwise, return the partial session. return (SSL_SESSION *)ssl_handshake_session(hs); } SSL_SESSION *SSL_get1_session(SSL *ssl) { SSL_SESSION *ret = SSL_get_session(ssl); if (ret != NULL) { SSL_SESSION_up_ref(ret); } return ret; } int SSL_SESSION_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index_ex(&g_ex_data_class, argl, argp, free_func); } int SSL_SESSION_set_ex_data(SSL_SESSION *session, int idx, void *arg) { return CRYPTO_set_ex_data(&session->ex_data, idx, arg); } void *SSL_SESSION_get_ex_data(const SSL_SESSION *session, int idx) { return CRYPTO_get_ex_data(&session->ex_data, idx); } int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session) { UniquePtr owned_session = UpRef(session); MutexWriteLock lock(&ctx->lock); return add_session_locked(ctx, std::move(owned_session)); } int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session) { return remove_session(ctx, session, /*lock=*/true); } int SSL_set_session(SSL *ssl, SSL_SESSION *session) { // SSL_set_session may only be called before the handshake has started. if (ssl->s3->initial_handshake_complete || // ssl->s3->hs == NULL || // ssl->s3->hs->state != 0) { abort(); } ssl_set_session(ssl, session); return 1; } uint32_t SSL_CTX_set_timeout(SSL_CTX *ctx, uint32_t timeout) { if (ctx == NULL) { return 0; } // Historically, zero was treated as |SSL_DEFAULT_SESSION_TIMEOUT|. if (timeout == 0) { timeout = SSL_DEFAULT_SESSION_TIMEOUT; } uint32_t old_timeout = ctx->session_timeout; ctx->session_timeout = timeout; return old_timeout; } uint32_t SSL_CTX_get_timeout(const SSL_CTX *ctx) { if (ctx == NULL) { return 0; } return ctx->session_timeout; } void SSL_CTX_set_session_psk_dhe_timeout(SSL_CTX *ctx, uint32_t timeout) { ctx->session_psk_dhe_timeout = timeout; } typedef struct timeout_param_st { SSL_CTX *ctx; uint64_t time; LHASH_OF(SSL_SESSION) *cache; } TIMEOUT_PARAM; static void timeout_doall_arg(SSL_SESSION *session, void *void_param) { TIMEOUT_PARAM *param = reinterpret_cast(void_param); if (param->time == 0 || // session->time + session->timeout < session->time || // param->time > (session->time + session->timeout)) { // TODO(davidben): This can probably just call |remove_session|. (void)lh_SSL_SESSION_delete(param->cache, session); SSL_SESSION_list_remove(param->ctx, session); // TODO(https://crbug.com/boringssl/251): Callbacks should not be called // under a lock. if (param->ctx->remove_session_cb != NULL) { param->ctx->remove_session_cb(param->ctx, session); } SSL_SESSION_free(session); } } void SSL_CTX_flush_sessions(SSL_CTX *ctx, uint64_t time) { TIMEOUT_PARAM tp; tp.ctx = ctx; tp.cache = ctx->sessions; if (tp.cache == NULL) { return; } tp.time = time; MutexWriteLock lock(&ctx->lock); lh_SSL_SESSION_doall_arg(tp.cache, timeout_doall_arg, &tp); } void SSL_CTX_sess_set_new_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, SSL_SESSION *session)) { ctx->new_session_cb = cb; } int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx))(SSL *ssl, SSL_SESSION *session) { return ctx->new_session_cb; } void SSL_CTX_sess_set_remove_cb(SSL_CTX *ctx, void (*cb)(SSL_CTX *ctx, SSL_SESSION *session)) { ctx->remove_session_cb = cb; } void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))(SSL_CTX *ctx, SSL_SESSION *session) { return ctx->remove_session_cb; } void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx, SSL_SESSION *(*cb)(SSL *ssl, const uint8_t *id, int id_len, int *out_copy)) { ctx->get_session_cb = cb; } SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))(SSL *ssl, const uint8_t *id, int id_len, int *out_copy) { return ctx->get_session_cb; } void SSL_CTX_set_info_callback(SSL_CTX *ctx, void (*cb)(const SSL *ssl, int type, int value)) { ctx->info_callback = cb; } void (*SSL_CTX_get_info_callback(SSL_CTX *ctx))(const SSL *ssl, int type, int value) { return ctx->info_callback; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_stat.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include "internal.h" const char *SSL_state_string_long(const SSL *ssl) { if (ssl->s3->hs == nullptr) { return "SSL negotiation finished successfully"; } return ssl->server ? ssl_server_handshake_state(ssl->s3->hs.get()) : ssl_client_handshake_state(ssl->s3->hs.get()); } const char *SSL_state_string(const SSL *ssl) { return "!!!!!!"; } const char *SSL_alert_type_string_long(int value) { value >>= 8; if (value == SSL3_AL_WARNING) { return "warning"; } else if (value == SSL3_AL_FATAL) { return "fatal"; } return "unknown"; } const char *SSL_alert_type_string(int value) { return "!"; } const char *SSL_alert_desc_string(int value) { return "!!"; } const char *SSL_alert_desc_string_long(int value) { switch (value & 0xff) { case SSL3_AD_CLOSE_NOTIFY: return "close notify"; case SSL3_AD_UNEXPECTED_MESSAGE: return "unexpected_message"; case SSL3_AD_BAD_RECORD_MAC: return "bad record mac"; case SSL3_AD_DECOMPRESSION_FAILURE: return "decompression failure"; case SSL3_AD_HANDSHAKE_FAILURE: return "handshake failure"; case SSL3_AD_NO_CERTIFICATE: return "no certificate"; case SSL3_AD_BAD_CERTIFICATE: return "bad certificate"; case SSL3_AD_UNSUPPORTED_CERTIFICATE: return "unsupported certificate"; case SSL3_AD_CERTIFICATE_REVOKED: return "certificate revoked"; case SSL3_AD_CERTIFICATE_EXPIRED: return "certificate expired"; case SSL3_AD_CERTIFICATE_UNKNOWN: return "certificate unknown"; case SSL3_AD_ILLEGAL_PARAMETER: return "illegal parameter"; case TLS1_AD_DECRYPTION_FAILED: return "decryption failed"; case TLS1_AD_RECORD_OVERFLOW: return "record overflow"; case TLS1_AD_UNKNOWN_CA: return "unknown CA"; case TLS1_AD_ACCESS_DENIED: return "access denied"; case TLS1_AD_DECODE_ERROR: return "decode error"; case TLS1_AD_DECRYPT_ERROR: return "decrypt error"; case TLS1_AD_EXPORT_RESTRICTION: return "export restriction"; case TLS1_AD_PROTOCOL_VERSION: return "protocol version"; case TLS1_AD_INSUFFICIENT_SECURITY: return "insufficient security"; case TLS1_AD_INTERNAL_ERROR: return "internal error"; case SSL3_AD_INAPPROPRIATE_FALLBACK: return "inappropriate fallback"; case TLS1_AD_USER_CANCELLED: return "user canceled"; case TLS1_AD_NO_RENEGOTIATION: return "no renegotiation"; case TLS1_AD_MISSING_EXTENSION: return "missing extension"; case TLS1_AD_UNSUPPORTED_EXTENSION: return "unsupported extension"; case TLS1_AD_CERTIFICATE_UNOBTAINABLE: return "certificate unobtainable"; case TLS1_AD_UNRECOGNIZED_NAME: return "unrecognized name"; case TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: return "bad certificate status response"; case TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: return "bad certificate hash value"; case TLS1_AD_UNKNOWN_PSK_IDENTITY: return "unknown PSK identity"; case TLS1_AD_CERTIFICATE_REQUIRED: return "certificate required"; case TLS1_AD_NO_APPLICATION_PROTOCOL: return "no application protocol"; case TLS1_AD_ECH_REQUIRED: return "ECH required"; default: return "unknown"; } } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_transcript.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include "internal.h" BSSL_NAMESPACE_BEGIN SSLTranscript::SSLTranscript(bool is_dtls) : is_dtls_(is_dtls) {} SSLTranscript::~SSLTranscript() {} bool SSLTranscript::Init() { buffer_.reset(BUF_MEM_new()); if (!buffer_) { return false; } hash_.Reset(); return true; } bool SSLTranscript::InitHash(uint16_t version, const SSL_CIPHER *cipher) { version_ = version; const EVP_MD *md = ssl_get_handshake_digest(version, cipher); if (Digest() == md) { // No need to re-hash the buffer. return true; } if (!HashBuffer(hash_.get(), md)) { return false; } if (is_dtls_ && version_ >= TLS1_3_VERSION) { // In DTLS 1.3, prior to the call to InitHash, the message (if present) in // the buffer has the DTLS 1.2 header. After the call to InitHash, the TLS // 1.3 header is written by SSLTranscript::Update. If the buffer isn't freed // here, it would have a mix of different header formats and using it would // yield wrong results. However, there's no need for the buffer once the // version and the digest for the cipher suite are known, so the buffer is // freed here to avoid potential misuse of the SSLTranscript object. FreeBuffer(); } return true; } bool SSLTranscript::HashBuffer(EVP_MD_CTX *ctx, const EVP_MD *digest) const { if (!EVP_DigestInit_ex(ctx, digest, nullptr)) { return false; } if (!is_dtls_ || version_ < TLS1_3_VERSION) { return EVP_DigestUpdate(ctx, buffer_->data, buffer_->length); } // If the version is DTLS 1.3 and we still have a buffer, then there should be // at most a single DTLSHandshake message in the buffer, for the ClientHello. // On the server side, the version (DTLS 1.3) and cipher suite are chosen in // response to the first ClientHello, and InitHash is called before that // ClientHello is added to the SSLTranscript, so the buffer is empty if this // SSLTranscript is on the server. if (buffer_->length == 0) { return true; } // On the client side, we can receive either a ServerHello or // HelloRetryRequest in response to the ClientHello. Regardless of which // message we receive, the client code calls InitHash before updating the // transcript with that message, so the ClientHello is the only message in the // buffer. In DTLS 1.3, we need to skip the message_seq, fragment_offset, and // fragment_length fields from the DTLSHandshake message in the buffer. The // structure of a DTLSHandshake message is as follows (RFC 9147, section 5.2): // // struct { // HandshakeType msg_type; /* handshake type */ // uint24 length; /* bytes in message */ // uint16 message_seq; /* DTLS-required field */ // uint24 fragment_offset; /* DTLS-required field */ // uint24 fragment_length; /* DTLS-required field */ // select (msg_type) { // /* omitted for brevity */ // } body; // } DTLSHandshake; CBS buf, header; CBS_init(&buf, reinterpret_cast(buffer_->data), buffer_->length); if (!CBS_get_bytes(&buf, &header, 4) || // !CBS_skip(&buf, 8) || // !EVP_DigestUpdate(ctx, CBS_data(&header), CBS_len(&header)) || // !EVP_DigestUpdate(ctx, CBS_data(&buf), CBS_len(&buf))) { return false; } return true; } void SSLTranscript::FreeBuffer() { buffer_.reset(); } size_t SSLTranscript::DigestLen() const { return EVP_MD_size(Digest()); } const EVP_MD *SSLTranscript::Digest() const { return EVP_MD_CTX_get0_md(hash_.get()); } bool SSLTranscript::UpdateForHelloRetryRequest() { if (buffer_) { buffer_->length = 0; } uint8_t old_hash[EVP_MAX_MD_SIZE]; size_t hash_len; if (!GetHash(old_hash, &hash_len)) { return false; } const uint8_t header[4] = {SSL3_MT_MESSAGE_HASH, 0, 0, static_cast(hash_len)}; if (!EVP_DigestInit_ex(hash_.get(), Digest(), nullptr) || !AddToBufferOrHash(header) || !AddToBufferOrHash(Span(old_hash, hash_len))) { return false; } return true; } bool SSLTranscript::CopyToHashContext(EVP_MD_CTX *ctx, const EVP_MD *digest) const { const EVP_MD *transcript_digest = Digest(); if (transcript_digest != nullptr && EVP_MD_type(transcript_digest) == EVP_MD_type(digest)) { return EVP_MD_CTX_copy_ex(ctx, hash_.get()); } if (buffer_) { return HashBuffer(ctx, digest); } OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } bool SSLTranscript::Update(Span in) { if (!is_dtls_ || version_ < TLS1_3_VERSION) { return AddToBufferOrHash(in); } if (in.size() < DTLS1_HM_HEADER_LENGTH) { return false; } // The message passed into Update is the whole Handshake or DTLSHandshake // message, including the msg_type and length. In DTLS, the DTLSHandshake // message also has message_seq, fragment_offset, and fragment_length // fields. In DTLS 1.3, those fields are omitted so that the same // transcript format as TLS 1.3 is used. This means we write the 1-byte // msg_type, 3-byte length, then skip 2+3+3 bytes for the DTLS-specific // fields that get omitted. if (!AddToBufferOrHash(in.subspan(0, 4)) || !AddToBufferOrHash(in.subspan(12))) { return false; } return true; } bool SSLTranscript::AddToBufferOrHash(Span in) { // Depending on the state of the handshake, either the handshake buffer may be // active, the rolling hash, or both. if (buffer_ && // !BUF_MEM_append(buffer_.get(), in.data(), in.size())) { return false; } if (EVP_MD_CTX_md(hash_.get()) != NULL) { EVP_DigestUpdate(hash_.get(), in.data(), in.size()); } return true; } bool SSLTranscript::GetHash(uint8_t *out, size_t *out_len) const { ScopedEVP_MD_CTX ctx; unsigned len; if (!EVP_MD_CTX_copy_ex(ctx.get(), hash_.get()) || !EVP_DigestFinal_ex(ctx.get(), out, &len)) { return false; } *out_len = len; return true; } bool SSLTranscript::GetFinishedMAC(uint8_t *out, size_t *out_len, const SSL_SESSION *session, bool from_server) const { uint8_t digest[EVP_MAX_MD_SIZE]; size_t digest_len; if (!GetHash(digest, &digest_len)) { return false; } std::string_view label = from_server ? "server finished" : "client finished"; static const size_t kFinishedLen = 12; if (!tls1_prf(Digest(), Span(out, kFinishedLen), session->secret, label, Span(digest, digest_len), {})) { return false; } *out_len = kFinishedLen; return true; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_versions.cc ================================================ /* Copyright 2017 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN bool ssl_protocol_version_from_wire(uint16_t *out, uint16_t version) { switch (version) { case TLS1_VERSION: case TLS1_1_VERSION: case TLS1_2_VERSION: case TLS1_3_VERSION: *out = version; return true; case DTLS1_VERSION: // DTLS 1.0 is analogous to TLS 1.1, not TLS 1.0. *out = TLS1_1_VERSION; return true; case DTLS1_2_VERSION: *out = TLS1_2_VERSION; return true; case DTLS1_3_VERSION: *out = TLS1_3_VERSION; return true; default: return false; } } // The follow arrays are the supported versions for TLS and DTLS, in order of // decreasing preference. static const uint16_t kTLSVersions[] = { TLS1_3_VERSION, TLS1_2_VERSION, TLS1_1_VERSION, TLS1_VERSION, }; static const uint16_t kDTLSVersions[] = { DTLS1_3_VERSION, DTLS1_2_VERSION, DTLS1_VERSION, }; static Span get_method_versions( const SSL_PROTOCOL_METHOD *method) { return method->is_dtls ? Span(kDTLSVersions) : Span(kTLSVersions); } bool ssl_method_supports_version(const SSL_PROTOCOL_METHOD *method, uint16_t version) { for (uint16_t supported : get_method_versions(method)) { if (supported == version) { return true; } } return false; } // The following functions map between API versions and wire versions. The // public API works on wire versions. static const char *kUnknownVersion = "unknown"; struct VersionInfo { uint16_t version; const char *name; }; static const VersionInfo kVersionNames[] = { {TLS1_3_VERSION, "TLSv1.3"}, {TLS1_2_VERSION, "TLSv1.2"}, {TLS1_1_VERSION, "TLSv1.1"}, {TLS1_VERSION, "TLSv1"}, {DTLS1_VERSION, "DTLSv1"}, {DTLS1_2_VERSION, "DTLSv1.2"}, {DTLS1_3_VERSION, "DTLSv1.3"}, }; static const char *ssl_version_to_string(uint16_t version) { for (const auto &v : kVersionNames) { if (v.version == version) { return v.name; } } return kUnknownVersion; } static uint16_t wire_version_to_api(uint16_t version) { return version; } // api_version_to_wire maps |version| to some representative wire version. static bool api_version_to_wire(uint16_t *out, uint16_t version) { // Check it is a real protocol version. uint16_t unused; if (!ssl_protocol_version_from_wire(&unused, version)) { return false; } *out = version; return true; } static bool set_version_bound(const SSL_PROTOCOL_METHOD *method, uint16_t *out, uint16_t version) { if (!api_version_to_wire(&version, version) || !ssl_method_supports_version(method, version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_SSL_VERSION); return false; } *out = version; return true; } static bool set_min_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, uint16_t version) { // Zero is interpreted as the default minimum version. if (version == 0) { *out = method->is_dtls ? DTLS1_2_VERSION : TLS1_2_VERSION; return true; } return set_version_bound(method, out, version); } static bool set_max_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, uint16_t version) { // Zero is interpreted as the default maximum version. // TODO(crbug.com/42290594): Enable DTLS 1.3 by default, after it's // successfully shipped in WebRTC. if (version == 0) { *out = method->is_dtls ? DTLS1_2_VERSION : TLS1_3_VERSION; return true; } return set_version_bound(method, out, version); } const struct { uint16_t version; uint32_t flag; } kProtocolVersions[] = { {TLS1_VERSION, SSL_OP_NO_TLSv1}, {TLS1_1_VERSION, SSL_OP_NO_TLSv1_1}, {TLS1_2_VERSION, SSL_OP_NO_TLSv1_2}, {TLS1_3_VERSION, SSL_OP_NO_TLSv1_3}, }; bool ssl_get_version_range(const SSL_HANDSHAKE *hs, uint16_t *out_min_version, uint16_t *out_max_version) { // For historical reasons, |SSL_OP_NO_DTLSv1| aliases |SSL_OP_NO_TLSv1|, but // DTLS 1.0 should be mapped to TLS 1.1. uint32_t options = hs->ssl->options; if (SSL_is_dtls(hs->ssl)) { options &= ~SSL_OP_NO_TLSv1_1; if (options & SSL_OP_NO_DTLSv1) { options |= SSL_OP_NO_TLSv1_1; } } uint16_t min_version, max_version; if (!ssl_protocol_version_from_wire(&min_version, hs->config->conf_min_version) || !ssl_protocol_version_from_wire(&max_version, hs->config->conf_max_version)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // QUIC requires TLS 1.3. if (SSL_is_quic(hs->ssl) && min_version < TLS1_3_VERSION) { min_version = TLS1_3_VERSION; } // The |SSL_OP_NO_*| flags disable individual protocols. This has two // problems. First, prior to TLS 1.3, the protocol can only express a // contiguous range of versions. Second, a library consumer trying to set a // maximum version cannot disable protocol versions that get added in a future // version of the library. // // To account for both of these, OpenSSL interprets the client-side bitmask // as a min/max range by picking the lowest contiguous non-empty range of // enabled protocols. Note that this means it is impossible to set a maximum // version of the higest supported TLS version in a future-proof way. bool any_enabled = false; for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kProtocolVersions); i++) { // Only look at the versions already enabled. if (min_version > kProtocolVersions[i].version) { continue; } if (max_version < kProtocolVersions[i].version) { break; } if (!(options & kProtocolVersions[i].flag)) { // The minimum version is the first enabled version. if (!any_enabled) { any_enabled = true; min_version = kProtocolVersions[i].version; } continue; } // If there is a disabled version after the first enabled one, all versions // after it are implicitly disabled. if (any_enabled) { max_version = kProtocolVersions[i - 1].version; break; } } if (!any_enabled) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SUPPORTED_VERSIONS_ENABLED); return false; } *out_min_version = min_version; *out_max_version = max_version; return true; } static uint16_t ssl_version(const SSL *ssl) { // In early data, we report the predicted version. Note it is possible that we // have a predicted version and a *different* true version. This means 0-RTT // has been rejected, but until the reject has reported to the application and // applied with |SSL_reset_early_data_reject|, we continue reporting a // self-consistent connection. if (SSL_in_early_data(ssl) && !ssl->server) { return ssl->s3->hs->early_session->ssl_version; } if (ssl->s3->version != 0) { return ssl->s3->version; } // The TLS versions has not yet been negotiated. Historically, we would return // (D)TLS 1.2, so preserve that behavior. return SSL_is_dtls(ssl) ? DTLS1_2_VERSION : TLS1_2_VERSION; } bool ssl_has_final_version(const SSL *ssl) { return ssl->s3->version != 0 && (ssl->s3->hs == nullptr || !ssl->s3->hs->is_early_version); } uint16_t ssl_protocol_version(const SSL *ssl) { assert(ssl->s3->version != 0); uint16_t version; if (!ssl_protocol_version_from_wire(&version, ssl->s3->version)) { // |ssl->s3->version| will always be set to a valid version. assert(0); return 0; } return version; } bool ssl_supports_version(const SSL_HANDSHAKE *hs, uint16_t version) { const SSL *const ssl = hs->ssl; uint16_t protocol_version; if (!ssl_method_supports_version(ssl->method, version) || !ssl_protocol_version_from_wire(&protocol_version, version) || hs->min_version > protocol_version || protocol_version > hs->max_version) { return false; } return true; } bool ssl_add_supported_versions(const SSL_HANDSHAKE *hs, CBB *cbb, uint16_t extra_min_version) { for (uint16_t version : get_method_versions(hs->ssl->method)) { uint16_t protocol_version; if (ssl_supports_version(hs, version) && ssl_protocol_version_from_wire(&protocol_version, version) && protocol_version >= extra_min_version && // !CBB_add_u16(cbb, version)) { return false; } } return true; } bool ssl_negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, uint16_t *out_version, const CBS *peer_versions) { for (uint16_t version : get_method_versions(hs->ssl->method)) { if (!ssl_supports_version(hs, version)) { continue; } // JDK 11, prior to 11.0.2, has a buggy TLS 1.3 implementation which fails // to send SNI when offering 1.3 sessions. Disable TLS 1.3 for such // clients. We apply this logic here rather than |ssl_supports_version| so // the downgrade signal continues to query the true capabilities. (The // workaround is a limitation of the peer's capabilities rather than our // own.) // // See https://bugs.openjdk.java.net/browse/JDK-8211806. if (version == TLS1_3_VERSION && hs->apply_jdk11_workaround) { continue; } CBS copy = *peer_versions; while (CBS_len(©) != 0) { uint16_t peer_version; if (!CBS_get_u16(©, &peer_version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } if (peer_version == version) { *out_version = version; return true; } } } OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); *out_alert = SSL_AD_PROTOCOL_VERSION; return false; } BSSL_NAMESPACE_END using namespace bssl; int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version) { return set_min_version(ctx->method, &ctx->conf_min_version, version); } int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version) { return set_max_version(ctx->method, &ctx->conf_max_version, version); } uint16_t SSL_CTX_get_min_proto_version(const SSL_CTX *ctx) { return ctx->conf_min_version; } uint16_t SSL_CTX_get_max_proto_version(const SSL_CTX *ctx) { return ctx->conf_max_version; } int SSL_set_min_proto_version(SSL *ssl, uint16_t version) { if (!ssl->config) { return 0; } return set_min_version(ssl->method, &ssl->config->conf_min_version, version); } int SSL_set_max_proto_version(SSL *ssl, uint16_t version) { if (!ssl->config) { return 0; } return set_max_version(ssl->method, &ssl->config->conf_max_version, version); } uint16_t SSL_get_min_proto_version(const SSL *ssl) { if (!ssl->config) { return 0; } return ssl->config->conf_min_version; } uint16_t SSL_get_max_proto_version(const SSL *ssl) { if (!ssl->config) { return 0; } return ssl->config->conf_max_version; } int SSL_version(const SSL *ssl) { return wire_version_to_api(ssl_version(ssl)); } const char *SSL_get_version(const SSL *ssl) { return ssl_version_to_string(ssl_version(ssl)); } size_t SSL_get_all_version_names(const char **out, size_t max_out) { return GetAllNames(out, max_out, Span(&kUnknownVersion, 1), &VersionInfo::name, Span(kVersionNames)); } const char *SSL_SESSION_get_version(const SSL_SESSION *session) { return ssl_version_to_string(session->ssl_version); } uint16_t SSL_SESSION_get_protocol_version(const SSL_SESSION *session) { return wire_version_to_api(session->ssl_version); } int SSL_SESSION_set_protocol_version(SSL_SESSION *session, uint16_t version) { // This picks a representative TLS 1.3 version, but this API should only be // used on unit test sessions anyway. return api_version_to_wire(&session->ssl_version, version); } int SSL_CTX_set_record_protocol_version(SSL_CTX *ctx, int version) { return version == 0; } ================================================ FILE: Sources/CNIOBoringSSL/ssl/ssl_x509.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // check_ssl_x509_method asserts that |ssl| has the X509-based method // installed. Calling an X509-based method on an |ssl| with a different method // will likely misbehave and possibly crash or leak memory. static void check_ssl_x509_method(const SSL *ssl) { assert(ssl == NULL || ssl->ctx->x509_method == &ssl_crypto_x509_method); } // check_ssl_ctx_x509_method acts like |check_ssl_x509_method|, but for an // |SSL_CTX|. static void check_ssl_ctx_x509_method(const SSL_CTX *ctx) { assert(ctx == NULL || ctx->x509_method == &ssl_crypto_x509_method); } // x509_to_buffer returns a |CRYPTO_BUFFER| that contains the serialised // contents of |x509|. static UniquePtr x509_to_buffer(X509 *x509) { uint8_t *buf = NULL; int cert_len = i2d_X509(x509, &buf); if (cert_len <= 0) { return 0; } UniquePtr buffer(CRYPTO_BUFFER_new(buf, cert_len, NULL)); OPENSSL_free(buf); return buffer; } static void ssl_crypto_x509_cert_flush_cached_leaf(CERT *cert) { X509_free(cert->x509_leaf); cert->x509_leaf = nullptr; } static void ssl_crypto_x509_cert_flush_cached_chain(CERT *cert) { sk_X509_pop_free(cert->x509_chain, X509_free); cert->x509_chain = nullptr; } // ssl_cert_set1_chain sets elements 1.. of |cert->chain| to the serialised // forms of elements of |chain|. It returns one on success or zero on error, in // which case no change to |cert->chain| is made. It preverses the existing // leaf from |cert->chain|, if any. static bool ssl_cert_set1_chain(CERT *cert, STACK_OF(X509) *chain) { cert->legacy_credential->ClearIntermediateCerts(); for (X509 *x509 : chain) { UniquePtr buffer = x509_to_buffer(x509); if (!buffer || !cert->legacy_credential->AppendIntermediateCert(std::move(buffer))) { return false; } } ssl_crypto_x509_cert_flush_cached_chain(cert); return true; } static bool ssl_crypto_x509_check_client_CA_list( STACK_OF(CRYPTO_BUFFER) *names) { for (const CRYPTO_BUFFER *buffer : names) { const uint8_t *inp = CRYPTO_BUFFER_data(buffer); UniquePtr name( d2i_X509_NAME(nullptr, &inp, CRYPTO_BUFFER_len(buffer))); if (name == nullptr || inp != CRYPTO_BUFFER_data(buffer) + CRYPTO_BUFFER_len(buffer)) { return false; } } return true; } static void ssl_crypto_x509_cert_clear(CERT *cert) { ssl_crypto_x509_cert_flush_cached_leaf(cert); ssl_crypto_x509_cert_flush_cached_chain(cert); X509_free(cert->x509_stash); cert->x509_stash = nullptr; } static void ssl_crypto_x509_cert_free(CERT *cert) { ssl_crypto_x509_cert_clear(cert); X509_STORE_free(cert->verify_store); } static void ssl_crypto_x509_cert_dup(CERT *new_cert, const CERT *cert) { if (cert->verify_store != nullptr) { X509_STORE_up_ref(cert->verify_store); new_cert->verify_store = cert->verify_store; } } static bool ssl_crypto_x509_session_cache_objects(SSL_SESSION *sess) { bssl::UniquePtr chain, chain_without_leaf; if (sk_CRYPTO_BUFFER_num(sess->certs.get()) > 0) { chain.reset(sk_X509_new_null()); if (!chain) { return false; } if (sess->is_server) { // chain_without_leaf is only needed for server sessions. See // |SSL_get_peer_cert_chain|. chain_without_leaf.reset(sk_X509_new_null()); if (!chain_without_leaf) { return false; } } } bssl::UniquePtr leaf; for (CRYPTO_BUFFER *cert : sess->certs.get()) { UniquePtr x509(X509_parse_from_buffer(cert)); if (!x509) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (leaf == nullptr) { leaf = UpRef(x509); } else if (chain_without_leaf && !PushToStack(chain_without_leaf.get(), UpRef(x509))) { return false; } if (!PushToStack(chain.get(), std::move(x509))) { return false; } } sk_X509_pop_free(sess->x509_chain, X509_free); sess->x509_chain = chain.release(); sk_X509_pop_free(sess->x509_chain_without_leaf, X509_free); sess->x509_chain_without_leaf = chain_without_leaf.release(); X509_free(sess->x509_peer); sess->x509_peer = leaf.release(); return true; } static bool ssl_crypto_x509_session_dup(SSL_SESSION *new_session, const SSL_SESSION *session) { new_session->x509_peer = UpRef(session->x509_peer).release(); if (session->x509_chain != nullptr) { new_session->x509_chain = X509_chain_up_ref(session->x509_chain); if (new_session->x509_chain == nullptr) { return false; } } if (session->x509_chain_without_leaf != nullptr) { new_session->x509_chain_without_leaf = X509_chain_up_ref(session->x509_chain_without_leaf); if (new_session->x509_chain_without_leaf == nullptr) { return false; } } return true; } static void ssl_crypto_x509_session_clear(SSL_SESSION *session) { X509_free(session->x509_peer); session->x509_peer = nullptr; sk_X509_pop_free(session->x509_chain, X509_free); session->x509_chain = nullptr; sk_X509_pop_free(session->x509_chain_without_leaf, X509_free); session->x509_chain_without_leaf = nullptr; } static bool ssl_crypto_x509_session_verify_cert_chain(SSL_SESSION *session, SSL_HANDSHAKE *hs, uint8_t *out_alert) { *out_alert = SSL_AD_INTERNAL_ERROR; STACK_OF(X509) *const cert_chain = session->x509_chain; if (cert_chain == nullptr || sk_X509_num(cert_chain) == 0) { return false; } SSL *const ssl = hs->ssl; SSL_CTX *ssl_ctx = ssl->ctx.get(); X509_STORE *verify_store = ssl_ctx->cert_store; if (hs->config->cert->verify_store != nullptr) { verify_store = hs->config->cert->verify_store; } X509 *leaf = sk_X509_value(cert_chain, 0); const char *name; size_t name_len; SSL_get0_ech_name_override(ssl, &name, &name_len); UniquePtr ctx(X509_STORE_CTX_new()); if (!ctx || // !X509_STORE_CTX_init(ctx.get(), verify_store, leaf, cert_chain) || // !X509_STORE_CTX_set_ex_data( ctx.get(), SSL_get_ex_data_X509_STORE_CTX_idx(), ssl) || // // We need to inherit the verify parameters. These can be determined by // the context: if its a server it will verify SSL client certificates or // vice versa. !X509_STORE_CTX_set_default( ctx.get(), ssl->server ? "ssl_client" : "ssl_server") || // // Anything non-default in "param" should overwrite anything in the ctx. !X509_VERIFY_PARAM_set1(X509_STORE_CTX_get0_param(ctx.get()), hs->config->param) || // // ClientHelloOuter connections use a different name. (name_len != 0 && // !X509_VERIFY_PARAM_set1_host(X509_STORE_CTX_get0_param(ctx.get()), name, name_len))) { OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); return false; } if (hs->config->verify_callback) { X509_STORE_CTX_set_verify_cb(ctx.get(), hs->config->verify_callback); } int verify_ret; if (ssl_ctx->app_verify_callback != nullptr) { verify_ret = ssl_ctx->app_verify_callback(ctx.get(), ssl_ctx->app_verify_arg); } else { verify_ret = X509_verify_cert(ctx.get()); } session->verify_result = X509_STORE_CTX_get_error(ctx.get()); // If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the result. if (verify_ret <= 0 && hs->config->verify_mode != SSL_VERIFY_NONE) { *out_alert = SSL_alert_from_verify_result(session->verify_result); return false; } ERR_clear_error(); return true; } static void ssl_crypto_x509_hs_flush_cached_ca_names(SSL_HANDSHAKE *hs) { sk_X509_NAME_pop_free(hs->cached_x509_ca_names, X509_NAME_free); hs->cached_x509_ca_names = nullptr; } static bool ssl_crypto_x509_ssl_new(SSL_HANDSHAKE *hs) { hs->config->param = X509_VERIFY_PARAM_new(); if (hs->config->param == nullptr) { return false; } X509_VERIFY_PARAM_inherit(hs->config->param, hs->ssl->ctx->param); return true; } static void ssl_crypto_x509_ssl_flush_cached_client_CA(SSL_CONFIG *cfg) { sk_X509_NAME_pop_free(cfg->cached_x509_client_CA, X509_NAME_free); cfg->cached_x509_client_CA = nullptr; } static void ssl_crypto_x509_ssl_config_free(SSL_CONFIG *cfg) { sk_X509_NAME_pop_free(cfg->cached_x509_client_CA, X509_NAME_free); cfg->cached_x509_client_CA = nullptr; X509_VERIFY_PARAM_free(cfg->param); } static bool ssl_crypto_x509_ssl_auto_chain_if_needed(SSL_HANDSHAKE *hs) { // Only build a chain if the feature isn't disabled, the legacy credential // exists but has no intermediates configured. SSL *ssl = hs->ssl; SSL_CREDENTIAL *cred = hs->config->cert->legacy_credential.get(); if ((ssl->mode & SSL_MODE_NO_AUTO_CHAIN) || !cred->IsComplete() || sk_CRYPTO_BUFFER_num(cred->chain.get()) != 1) { return true; } UniquePtr leaf( X509_parse_from_buffer(sk_CRYPTO_BUFFER_value(cred->chain.get(), 0))); if (!leaf) { OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); return false; } UniquePtr ctx(X509_STORE_CTX_new()); if (!ctx || !X509_STORE_CTX_init(ctx.get(), ssl->ctx->cert_store, leaf.get(), nullptr)) { OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); return false; } // Attempt to build a chain, ignoring the result. X509_verify_cert(ctx.get()); ERR_clear_error(); // Remove the leaf from the generated chain. UniquePtr chain(X509_STORE_CTX_get1_chain(ctx.get())); if (!chain) { return false; } X509_free(sk_X509_shift(chain.get())); return SSL_set1_chain(ssl, chain.get()); } static void ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(SSL_CTX *ctx) { sk_X509_NAME_pop_free(ctx->cached_x509_client_CA, X509_NAME_free); ctx->cached_x509_client_CA = nullptr; } static bool ssl_crypto_x509_ssl_ctx_new(SSL_CTX *ctx) { ctx->cert_store = X509_STORE_new(); ctx->param = X509_VERIFY_PARAM_new(); return (ctx->cert_store != nullptr && ctx->param != nullptr); } static void ssl_crypto_x509_ssl_ctx_free(SSL_CTX *ctx) { ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(ctx); X509_VERIFY_PARAM_free(ctx->param); X509_STORE_free(ctx->cert_store); } const SSL_X509_METHOD ssl_crypto_x509_method = { ssl_crypto_x509_check_client_CA_list, ssl_crypto_x509_cert_clear, ssl_crypto_x509_cert_free, ssl_crypto_x509_cert_dup, ssl_crypto_x509_cert_flush_cached_chain, ssl_crypto_x509_cert_flush_cached_leaf, ssl_crypto_x509_session_cache_objects, ssl_crypto_x509_session_dup, ssl_crypto_x509_session_clear, ssl_crypto_x509_session_verify_cert_chain, ssl_crypto_x509_hs_flush_cached_ca_names, ssl_crypto_x509_ssl_new, ssl_crypto_x509_ssl_config_free, ssl_crypto_x509_ssl_flush_cached_client_CA, ssl_crypto_x509_ssl_auto_chain_if_needed, ssl_crypto_x509_ssl_ctx_new, ssl_crypto_x509_ssl_ctx_free, ssl_crypto_x509_ssl_ctx_flush_cached_client_CA, }; BSSL_NAMESPACE_END using namespace bssl; X509 *SSL_get_peer_certificate(const SSL *ssl) { check_ssl_x509_method(ssl); if (ssl == NULL) { return NULL; } SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL || session->x509_peer == NULL) { return NULL; } X509_up_ref(session->x509_peer); return session->x509_peer; } STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl) { check_ssl_x509_method(ssl); if (ssl == nullptr) { return nullptr; } SSL_SESSION *session = SSL_get_session(ssl); if (session == nullptr) { return nullptr; } // OpenSSL historically didn't include the leaf certificate in the returned // certificate chain, but only for servers. return ssl->server ? session->x509_chain_without_leaf : session->x509_chain; } STACK_OF(X509) *SSL_get_peer_full_cert_chain(const SSL *ssl) { check_ssl_x509_method(ssl); SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return NULL; } return session->x509_chain; } int SSL_CTX_set_purpose(SSL_CTX *ctx, int purpose) { check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set_purpose(ctx->param, purpose); } int SSL_set_purpose(SSL *ssl, int purpose) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return X509_VERIFY_PARAM_set_purpose(ssl->config->param, purpose); } int SSL_CTX_set_trust(SSL_CTX *ctx, int trust) { check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set_trust(ctx->param, trust); } int SSL_set_trust(SSL *ssl, int trust) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return X509_VERIFY_PARAM_set_trust(ssl->config->param, trust); } int SSL_CTX_set1_param(SSL_CTX *ctx, const X509_VERIFY_PARAM *param) { check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set1(ctx->param, param); } int SSL_set1_param(SSL *ssl, const X509_VERIFY_PARAM *param) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return X509_VERIFY_PARAM_set1(ssl->config->param, param); } X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return ctx->param; } X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return 0; } return ssl->config->param; } int SSL_get_verify_depth(const SSL *ssl) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return 0; } return X509_VERIFY_PARAM_get_depth(ssl->config->param); } int (*SSL_get_verify_callback(const SSL *ssl))(int, X509_STORE_CTX *) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return 0; } return ssl->config->verify_callback; } int SSL_CTX_get_verify_mode(const SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return ctx->verify_mode; } int SSL_CTX_get_verify_depth(const SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_get_depth(ctx->param); } int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx))( int ok, X509_STORE_CTX *store_ctx) { check_ssl_ctx_x509_method(ctx); return ctx->default_verify_callback; } void SSL_set_verify(SSL *ssl, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)) { check_ssl_x509_method(ssl); if (!ssl->config) { return; } ssl->config->verify_mode = mode; if (callback != NULL) { ssl->config->verify_callback = callback; } } void SSL_set_verify_depth(SSL *ssl, int depth) { check_ssl_x509_method(ssl); if (!ssl->config) { return; } X509_VERIFY_PARAM_set_depth(ssl->config->param, depth); } void SSL_CTX_set_cert_verify_callback( SSL_CTX *ctx, int (*cb)(X509_STORE_CTX *store_ctx, void *arg), void *arg) { check_ssl_ctx_x509_method(ctx); ctx->app_verify_callback = cb; ctx->app_verify_arg = arg; } void SSL_CTX_set_verify(SSL_CTX *ctx, int mode, int (*cb)(int, X509_STORE_CTX *)) { check_ssl_ctx_x509_method(ctx); ctx->verify_mode = mode; ctx->default_verify_callback = cb; } void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth) { check_ssl_ctx_x509_method(ctx); X509_VERIFY_PARAM_set_depth(ctx->param, depth); } int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return X509_STORE_set_default_paths(ctx->cert_store); } int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *ca_file, const char *ca_dir) { check_ssl_ctx_x509_method(ctx); return X509_STORE_load_locations(ctx->cert_store, ca_file, ca_dir); } long SSL_get_verify_result(const SSL *ssl) { check_ssl_x509_method(ssl); SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return X509_V_ERR_INVALID_CALL; } return session->verify_result; } X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return ctx->cert_store; } void SSL_CTX_set_cert_store(SSL_CTX *ctx, X509_STORE *store) { check_ssl_ctx_x509_method(ctx); X509_STORE_free(ctx->cert_store); ctx->cert_store = store; } static int ssl_use_certificate(CERT *cert, X509 *x) { if (x == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } UniquePtr buffer = x509_to_buffer(x); if (!buffer) { return 0; } return ssl_set_cert(cert, std::move(buffer)); } int SSL_use_certificate(SSL *ssl, X509 *x) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return ssl_use_certificate(ssl->config->cert.get(), x); } int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x) { check_ssl_ctx_x509_method(ctx); return ssl_use_certificate(ctx->cert.get(), x); } // ssl_cert_cache_leaf_cert sets |cert->x509_leaf|, if currently NULL, from the // first element of |cert->chain|. static int ssl_cert_cache_leaf_cert(CERT *cert) { assert(cert->x509_method); const SSL_CREDENTIAL *cred = cert->legacy_credential.get(); if (cert->x509_leaf != NULL || cred->chain == NULL) { return 1; } CRYPTO_BUFFER *leaf = sk_CRYPTO_BUFFER_value(cred->chain.get(), 0); if (!leaf) { return 1; } cert->x509_leaf = X509_parse_from_buffer(leaf); return cert->x509_leaf != NULL; } static X509 *ssl_cert_get0_leaf(CERT *cert) { if (cert->x509_leaf == NULL && // !ssl_cert_cache_leaf_cert(cert)) { return NULL; } return cert->x509_leaf; } X509 *SSL_get_certificate(const SSL *ssl) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return 0; } return ssl_cert_get0_leaf(ssl->config->cert.get()); } X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); MutexWriteLock lock(const_cast(&ctx->lock)); return ssl_cert_get0_leaf(ctx->cert.get()); } static int ssl_cert_add1_chain_cert(CERT *cert, X509 *x509) { assert(cert->x509_method); UniquePtr buffer = x509_to_buffer(x509); if (!buffer || !cert->legacy_credential->AppendIntermediateCert(std::move(buffer))) { return 0; } ssl_crypto_x509_cert_flush_cached_chain(cert); return 1; } static int ssl_cert_add0_chain_cert(CERT *cert, X509 *x509) { if (!ssl_cert_add1_chain_cert(cert, x509)) { return 0; } X509_free(cert->x509_stash); cert->x509_stash = x509; return 1; } int SSL_CTX_set0_chain(SSL_CTX *ctx, STACK_OF(X509) *chain) { check_ssl_ctx_x509_method(ctx); if (!ssl_cert_set1_chain(ctx->cert.get(), chain)) { return 0; } sk_X509_pop_free(chain, X509_free); return 1; } int SSL_CTX_set1_chain(SSL_CTX *ctx, STACK_OF(X509) *chain) { check_ssl_ctx_x509_method(ctx); return ssl_cert_set1_chain(ctx->cert.get(), chain); } int SSL_set0_chain(SSL *ssl, STACK_OF(X509) *chain) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } if (!ssl_cert_set1_chain(ssl->config->cert.get(), chain)) { return 0; } sk_X509_pop_free(chain, X509_free); return 1; } int SSL_set1_chain(SSL *ssl, STACK_OF(X509) *chain) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return ssl_cert_set1_chain(ssl->config->cert.get(), chain); } int SSL_CTX_add0_chain_cert(SSL_CTX *ctx, X509 *x509) { check_ssl_ctx_x509_method(ctx); return ssl_cert_add0_chain_cert(ctx->cert.get(), x509); } int SSL_CTX_add1_chain_cert(SSL_CTX *ctx, X509 *x509) { check_ssl_ctx_x509_method(ctx); return ssl_cert_add1_chain_cert(ctx->cert.get(), x509); } int SSL_CTX_add_extra_chain_cert(SSL_CTX *ctx, X509 *x509) { check_ssl_ctx_x509_method(ctx); return SSL_CTX_add0_chain_cert(ctx, x509); } int SSL_add0_chain_cert(SSL *ssl, X509 *x509) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return ssl_cert_add0_chain_cert(ssl->config->cert.get(), x509); } int SSL_add1_chain_cert(SSL *ssl, X509 *x509) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return ssl_cert_add1_chain_cert(ssl->config->cert.get(), x509); } int SSL_CTX_clear_chain_certs(SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return SSL_CTX_set0_chain(ctx, NULL); } int SSL_CTX_clear_extra_chain_certs(SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); return SSL_CTX_clear_chain_certs(ctx); } int SSL_clear_chain_certs(SSL *ssl) { check_ssl_x509_method(ssl); return SSL_set0_chain(ssl, NULL); } // ssl_cert_cache_chain_certs fills in |cert->x509_chain| from elements 1.. of // |cert->chain|. static int ssl_cert_cache_chain_certs(CERT *cert) { assert(cert->x509_method); const SSL_CREDENTIAL *cred = cert->legacy_credential.get(); if (cert->x509_chain != nullptr || cred->chain == nullptr || sk_CRYPTO_BUFFER_num(cred->chain.get()) < 2) { return 1; } UniquePtr chain(sk_X509_new_null()); if (!chain) { return 0; } for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(cred->chain.get()); i++) { CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(cred->chain.get(), i); UniquePtr x509(X509_parse_from_buffer(buffer)); if (!x509 || // !PushToStack(chain.get(), std::move(x509))) { return 0; } } cert->x509_chain = chain.release(); return 1; } int SSL_CTX_get0_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain) { check_ssl_ctx_x509_method(ctx); MutexWriteLock lock(const_cast(&ctx->lock)); if (!ssl_cert_cache_chain_certs(ctx->cert.get())) { *out_chain = NULL; return 0; } *out_chain = ctx->cert->x509_chain; return 1; } int SSL_CTX_get_extra_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain) { return SSL_CTX_get0_chain_certs(ctx, out_chain); } int SSL_get0_chain_certs(const SSL *ssl, STACK_OF(X509) **out_chain) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return 0; } if (!ssl_cert_cache_chain_certs(ssl->config->cert.get())) { *out_chain = NULL; return 0; } *out_chain = ssl->config->cert->x509_chain; return 1; } SSL_SESSION *d2i_SSL_SESSION_bio(BIO *bio, SSL_SESSION **out) { uint8_t *data; size_t len; if (!BIO_read_asn1(bio, &data, &len, 1024 * 1024)) { return 0; } bssl::UniquePtr free_data(data); const uint8_t *ptr = data; return d2i_SSL_SESSION(out, &ptr, static_cast(len)); } int i2d_SSL_SESSION_bio(BIO *bio, const SSL_SESSION *session) { uint8_t *data; size_t len; if (!SSL_SESSION_to_bytes(session, &data, &len)) { return 0; } bssl::UniquePtr free_data(data); return BIO_write_all(bio, data, len); } IMPLEMENT_PEM_rw(SSL_SESSION, SSL_SESSION, PEM_STRING_SSL_SESSION, SSL_SESSION) SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const uint8_t **pp, long length) { if (length < 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return NULL; } CBS cbs; CBS_init(&cbs, *pp, length); UniquePtr ret = SSL_SESSION_parse(&cbs, &ssl_crypto_x509_method, NULL /* no buffer pool */); if (!ret) { return NULL; } if (a) { SSL_SESSION_free(*a); *a = ret.get(); } *pp = CBS_data(&cbs); return ret.release(); } STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list) { // TODO(https://crbug.com/boringssl/407): |X509_NAME_dup| should be const. auto name_dup = [](const X509_NAME *name) { return X509_NAME_dup(const_cast(name)); }; return sk_X509_NAME_deep_copy(list, name_dup, X509_NAME_free); } static void set_client_CA_list(UniquePtr *ca_list, const STACK_OF(X509_NAME) *name_list, CRYPTO_BUFFER_POOL *pool) { UniquePtr buffers(sk_CRYPTO_BUFFER_new_null()); if (!buffers) { return; } for (X509_NAME *name : name_list) { uint8_t *outp = NULL; int len = i2d_X509_NAME(name, &outp); if (len < 0) { return; } UniquePtr buffer(CRYPTO_BUFFER_new(outp, len, pool)); OPENSSL_free(outp); if (!buffer || !PushToStack(buffers.get(), std::move(buffer))) { return; } } *ca_list = std::move(buffers); } void SSL_set_client_CA_list(SSL *ssl, STACK_OF(X509_NAME) *name_list) { check_ssl_x509_method(ssl); if (!ssl->config) { return; } ssl->ctx->x509_method->ssl_flush_cached_client_CA(ssl->config.get()); set_client_CA_list(&ssl->config->client_CA, name_list, ssl->ctx->pool); sk_X509_NAME_pop_free(name_list, X509_NAME_free); } void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list) { check_ssl_ctx_x509_method(ctx); ctx->x509_method->ssl_ctx_flush_cached_client_CA(ctx); set_client_CA_list(&ctx->client_CA, name_list, ctx->pool); sk_X509_NAME_pop_free(name_list, X509_NAME_free); } static STACK_OF(X509_NAME) *buffer_names_to_x509( const STACK_OF(CRYPTO_BUFFER) *names, STACK_OF(X509_NAME) **cached) { if (names == NULL) { return NULL; } if (*cached != NULL) { return *cached; } UniquePtr new_cache(sk_X509_NAME_new_null()); if (!new_cache) { return NULL; } for (const CRYPTO_BUFFER *buffer : names) { const uint8_t *inp = CRYPTO_BUFFER_data(buffer); UniquePtr name( d2i_X509_NAME(nullptr, &inp, CRYPTO_BUFFER_len(buffer))); if (!name || inp != CRYPTO_BUFFER_data(buffer) + CRYPTO_BUFFER_len(buffer) || !PushToStack(new_cache.get(), std::move(name))) { return NULL; } } *cached = new_cache.release(); return *cached; } STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *ssl) { check_ssl_x509_method(ssl); if (!ssl->config) { assert(ssl->config); return NULL; } // For historical reasons, this function is used both to query configuration // state on a server as well as handshake state on a client. However, whether // |ssl| is a client or server is not known until explicitly configured with // |SSL_set_connect_state|. If |do_handshake| is NULL, |ssl| is in an // indeterminate mode and |ssl->server| is unset. if (ssl->do_handshake != NULL && !ssl->server) { if (ssl->s3->hs != NULL) { return buffer_names_to_x509(ssl->s3->hs->ca_names.get(), &ssl->s3->hs->cached_x509_ca_names); } return NULL; } if (ssl->config->client_CA != NULL) { return buffer_names_to_x509( ssl->config->client_CA.get(), (STACK_OF(X509_NAME) **)&ssl->config->cached_x509_client_CA); } return SSL_CTX_get_client_CA_list(ssl->ctx.get()); } STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list(const SSL_CTX *ctx) { check_ssl_ctx_x509_method(ctx); // This is a logically const operation that may be called on multiple threads, // so it needs to lock around updating |cached_x509_client_CA|. MutexWriteLock lock(const_cast(&ctx->lock)); return buffer_names_to_x509( ctx->client_CA.get(), const_cast(&ctx->cached_x509_client_CA)); } static int add_client_CA(UniquePtr *names, X509 *x509, CRYPTO_BUFFER_POOL *pool) { if (x509 == NULL) { return 0; } uint8_t *outp = NULL; int len = i2d_X509_NAME(X509_get_subject_name(x509), &outp); if (len < 0) { return 0; } UniquePtr buffer(CRYPTO_BUFFER_new(outp, len, pool)); OPENSSL_free(outp); if (!buffer) { return 0; } int alloced = 0; if (*names == nullptr) { names->reset(sk_CRYPTO_BUFFER_new_null()); alloced = 1; if (*names == NULL) { return 0; } } if (!PushToStack(names->get(), std::move(buffer))) { if (alloced) { names->reset(); } return 0; } return 1; } int SSL_add_client_CA(SSL *ssl, X509 *x509) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } if (!add_client_CA(&ssl->config->client_CA, x509, ssl->ctx->pool)) { return 0; } ssl_crypto_x509_ssl_flush_cached_client_CA(ssl->config.get()); return 1; } int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x509) { check_ssl_ctx_x509_method(ctx); if (!add_client_CA(&ctx->client_CA, x509, ctx->pool)) { return 0; } ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(ctx); return 1; } static int do_client_cert_cb(SSL *ssl, void *arg) { // Should only be called during handshake, but check to be sure. BSSL_CHECK(ssl->config); if (ssl->config->cert->legacy_credential->IsComplete() || ssl->ctx->client_cert_cb == nullptr) { return 1; } X509 *x509 = NULL; EVP_PKEY *pkey = NULL; int ret = ssl->ctx->client_cert_cb(ssl, &x509, &pkey); if (ret < 0) { return -1; } UniquePtr free_x509(x509); UniquePtr free_pkey(pkey); if (ret != 0) { if (!SSL_use_certificate(ssl, x509) || // !SSL_use_PrivateKey(ssl, pkey)) { return 0; } } return 1; } void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey)) { check_ssl_ctx_x509_method(ctx); // Emulate the old client certificate callback with the new one. SSL_CTX_set_cert_cb(ctx, do_client_cert_cb, NULL); ctx->client_cert_cb = cb; } static int set_cert_store(X509_STORE **store_ptr, X509_STORE *new_store, int take_ref) { X509_STORE_free(*store_ptr); *store_ptr = new_store; if (new_store != NULL && take_ref) { X509_STORE_up_ref(new_store); } return 1; } int SSL_get_ex_data_X509_STORE_CTX_idx(void) { // The ex_data index to go from |X509_STORE_CTX| to |SSL| always uses the // reserved app_data slot. Before ex_data was introduced, app_data was used. // Avoid breaking any software which assumes |X509_STORE_CTX_get_app_data| // works. return 0; } int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { check_ssl_ctx_x509_method(ctx); return set_cert_store(&ctx->cert->verify_store, store, 0); } int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { check_ssl_ctx_x509_method(ctx); return set_cert_store(&ctx->cert->verify_store, store, 1); } int SSL_set0_verify_cert_store(SSL *ssl, X509_STORE *store) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return set_cert_store(&ssl->config->cert->verify_store, store, 0); } int SSL_set1_verify_cert_store(SSL *ssl, X509_STORE *store) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return set_cert_store(&ssl->config->cert->verify_store, store, 1); } int SSL_set1_host(SSL *ssl, const char *hostname) { check_ssl_x509_method(ssl); if (!ssl->config) { return 0; } return X509_VERIFY_PARAM_set1_host(ssl->config->param, hostname, strlen(hostname)); } void SSL_set_hostflags(SSL *ssl, unsigned flags) { check_ssl_x509_method(ssl); if (!ssl->config) { return; } X509_VERIFY_PARAM_set_hostflags(ssl->config->param, flags); } int SSL_alert_from_verify_result(long result) { switch (result) { case X509_V_ERR_CERT_CHAIN_TOO_LONG: case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: case X509_V_ERR_INVALID_CA: case X509_V_ERR_PATH_LENGTH_EXCEEDED: case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: case X509_V_ERR_UNABLE_TO_GET_CRL: case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE: return SSL_AD_UNKNOWN_CA; case X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE: case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: case X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY: case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: case X509_V_ERR_CERT_UNTRUSTED: case X509_V_ERR_CERT_REJECTED: case X509_V_ERR_HOSTNAME_MISMATCH: case X509_V_ERR_EMAIL_MISMATCH: case X509_V_ERR_IP_ADDRESS_MISMATCH: return SSL_AD_BAD_CERTIFICATE; case X509_V_ERR_CERT_SIGNATURE_FAILURE: case X509_V_ERR_CRL_SIGNATURE_FAILURE: return SSL_AD_DECRYPT_ERROR; case X509_V_ERR_CERT_HAS_EXPIRED: case X509_V_ERR_CERT_NOT_YET_VALID: case X509_V_ERR_CRL_HAS_EXPIRED: case X509_V_ERR_CRL_NOT_YET_VALID: return SSL_AD_CERTIFICATE_EXPIRED; case X509_V_ERR_CERT_REVOKED: return SSL_AD_CERTIFICATE_REVOKED; case X509_V_ERR_UNSPECIFIED: case X509_V_ERR_OUT_OF_MEM: case X509_V_ERR_INVALID_CALL: case X509_V_ERR_STORE_LOOKUP: return SSL_AD_INTERNAL_ERROR; case X509_V_ERR_APPLICATION_VERIFICATION: return SSL_AD_HANDSHAKE_FAILURE; case X509_V_ERR_INVALID_PURPOSE: return SSL_AD_UNSUPPORTED_CERTIFICATE; default: return SSL_AD_CERTIFICATE_UNKNOWN; } } ================================================ FILE: Sources/CNIOBoringSSL/ssl/t1_enc.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * Copyright 2005 Nokia. All rights reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/fipsmodule/tls/internal.h" #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN bool tls1_prf(const EVP_MD *digest, Span out, Span secret, std::string_view label, Span seed1, Span seed2) { return 1 == CRYPTO_tls1_prf(digest, out.data(), out.size(), secret.data(), secret.size(), label.data(), label.size(), seed1.data(), seed1.size(), seed2.data(), seed2.size()); } static bool get_key_block_lengths(const SSL *ssl, size_t *out_mac_secret_len, size_t *out_key_len, size_t *out_iv_len, const SSL_CIPHER *cipher) { const EVP_AEAD *aead = NULL; if (!ssl_cipher_get_evp_aead(&aead, out_mac_secret_len, out_iv_len, cipher, ssl_protocol_version(ssl))) { OPENSSL_PUT_ERROR(SSL, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); return false; } *out_key_len = EVP_AEAD_key_length(aead); if (*out_mac_secret_len > 0) { // For "stateful" AEADs (i.e. compatibility with pre-AEAD cipher suites) the // key length reported by |EVP_AEAD_key_length| will include the MAC key // bytes and initial implicit IV. if (*out_key_len < *out_mac_secret_len + *out_iv_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } *out_key_len -= *out_mac_secret_len + *out_iv_len; } return true; } static bool generate_key_block(const SSL *ssl, Span out, const SSL_SESSION *session) { const EVP_MD *digest = ssl_session_get_digest(session); // Note this function assumes that |session|'s key material corresponds to // |ssl->s3->client_random| and |ssl->s3->server_random|. return tls1_prf(digest, out, session->secret, "key expansion", ssl->s3->server_random, ssl->s3->client_random); } bool tls1_configure_aead(SSL *ssl, evp_aead_direction_t direction, Array *key_block_cache, const SSL_SESSION *session, Span iv_override) { size_t mac_secret_len, key_len, iv_len; if (!get_key_block_lengths(ssl, &mac_secret_len, &key_len, &iv_len, session->cipher)) { return false; } // Ensure that |key_block_cache| is set up. const size_t key_block_size = 2 * (mac_secret_len + key_len + iv_len); if (key_block_cache->empty()) { if (!key_block_cache->InitForOverwrite(key_block_size) || !generate_key_block(ssl, Span(*key_block_cache), session)) { return false; } } assert(key_block_cache->size() == key_block_size); Span key_block = *key_block_cache; Span mac_secret, key, iv; if (direction == (ssl->server ? evp_aead_open : evp_aead_seal)) { // Use the client write (server read) keys. mac_secret = key_block.subspan(0, mac_secret_len); key = key_block.subspan(2 * mac_secret_len, key_len); iv = key_block.subspan(2 * mac_secret_len + 2 * key_len, iv_len); } else { // Use the server write (client read) keys. mac_secret = key_block.subspan(mac_secret_len, mac_secret_len); key = key_block.subspan(2 * mac_secret_len + key_len, key_len); iv = key_block.subspan(2 * mac_secret_len + 2 * key_len + iv_len, iv_len); } if (!iv_override.empty()) { if (iv_override.size() != iv_len) { return false; } iv = iv_override; } UniquePtr aead_ctx = SSLAEADContext::Create( direction, ssl->s3->version, session->cipher, key, mac_secret, iv); if (!aead_ctx) { return false; } if (direction == evp_aead_open) { return ssl->method->set_read_state(ssl, ssl_encryption_application, std::move(aead_ctx), /*traffic_secret=*/{}); } return ssl->method->set_write_state(ssl, ssl_encryption_application, std::move(aead_ctx), /*traffic_secret=*/{}); } bool tls1_change_cipher_state(SSL_HANDSHAKE *hs, evp_aead_direction_t direction) { return tls1_configure_aead(hs->ssl, direction, &hs->key_block, ssl_handshake_session(hs), {}); } bool tls1_generate_master_secret(SSL_HANDSHAKE *hs, Span out, Span premaster) { BSSL_CHECK(out.size() == SSL3_MASTER_SECRET_SIZE); const SSL *ssl = hs->ssl; if (hs->extended_master_secret) { uint8_t digests[EVP_MAX_MD_SIZE]; size_t digests_len; if (!hs->transcript.GetHash(digests, &digests_len) || !tls1_prf(hs->transcript.Digest(), out, premaster, "extended master secret", Span(digests, digests_len), {})) { return false; } } else { if (!tls1_prf(hs->transcript.Digest(), out, premaster, "master secret", ssl->s3->client_random, ssl->s3->server_random)) { return false; } } return true; } BSSL_NAMESPACE_END using namespace bssl; size_t SSL_get_key_block_len(const SSL *ssl) { // See |SSL_generate_key_block|. if (SSL_in_init(ssl) || ssl_protocol_version(ssl) > TLS1_2_VERSION) { return 0; } size_t mac_secret_len, key_len, fixed_iv_len; if (!get_key_block_lengths(ssl, &mac_secret_len, &key_len, &fixed_iv_len, SSL_get_current_cipher(ssl))) { ERR_clear_error(); return 0; } return 2 * (mac_secret_len + key_len + fixed_iv_len); } int SSL_generate_key_block(const SSL *ssl, uint8_t *out, size_t out_len) { // Which cipher state to use is ambiguous during a handshake. In particular, // there are points where read and write states are from different epochs. // During a handshake, before ChangeCipherSpec, the encryption states may not // match |ssl->s3->client_random| and |ssl->s3->server_random|. if (SSL_in_init(ssl) || ssl_protocol_version(ssl) > TLS1_2_VERSION) { OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } return generate_key_block(ssl, Span(out, out_len), SSL_get_session(ssl)); } int SSL_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, const char *label, size_t label_len, const uint8_t *context, size_t context_len, int use_context) { auto out_span = Span(out, out_len); std::string_view label_sv(label, label_len); // In TLS 1.3, the exporter may be used whenever the secret has been derived. if (ssl->s3->version != 0 && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { if (ssl->s3->exporter_secret.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_NOT_COMPLETE); return 0; } if (!use_context) { context = nullptr; context_len = 0; } return tls13_export_keying_material(ssl, out_span, ssl->s3->exporter_secret, label_sv, Span(context, context_len)); } // Exporters may be used in False Start, where the handshake has progressed // enough. Otherwise, they may not be used during a handshake. if (SSL_in_init(ssl) && !SSL_in_false_start(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_NOT_COMPLETE); return 0; } size_t seed_len = 2 * SSL3_RANDOM_SIZE; if (use_context) { if (context_len >= 1u << 16) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return 0; } seed_len += 2 + context_len; } Array seed; if (!seed.InitForOverwrite(seed_len)) { return 0; } OPENSSL_memcpy(seed.data(), ssl->s3->client_random, SSL3_RANDOM_SIZE); OPENSSL_memcpy(seed.data() + SSL3_RANDOM_SIZE, ssl->s3->server_random, SSL3_RANDOM_SIZE); if (use_context) { seed[2 * SSL3_RANDOM_SIZE] = static_cast(context_len >> 8); seed[2 * SSL3_RANDOM_SIZE + 1] = static_cast(context_len); OPENSSL_memcpy(seed.data() + 2 * SSL3_RANDOM_SIZE + 2, context, context_len); } const SSL_SESSION *session = SSL_get_session(ssl); const EVP_MD *digest = ssl_session_get_digest(session); return tls1_prf(digest, out_span, session->secret, label_sv, seed, {}); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls13_both.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // kMaxKeyUpdates is the number of consecutive KeyUpdates that will be // processed. Without this limit an attacker could force unbounded processing // without being able to return application data. static const uint8_t kMaxKeyUpdates = 32; const uint8_t kHelloRetryRequest[SSL3_RANDOM_SIZE] = { 0xcf, 0x21, 0xad, 0x74, 0xe5, 0x9a, 0x61, 0x11, 0xbe, 0x1d, 0x8c, 0x02, 0x1e, 0x65, 0xb8, 0x91, 0xc2, 0xa2, 0x11, 0x16, 0x7a, 0xbb, 0x8c, 0x5e, 0x07, 0x9e, 0x09, 0xe2, 0xc8, 0xa8, 0x33, 0x9c, }; // See RFC 8446, section 4.1.3. const uint8_t kTLS12DowngradeRandom[8] = {0x44, 0x4f, 0x57, 0x4e, 0x47, 0x52, 0x44, 0x00}; const uint8_t kTLS13DowngradeRandom[8] = {0x44, 0x4f, 0x57, 0x4e, 0x47, 0x52, 0x44, 0x01}; // This is a non-standard randomly-generated value. const uint8_t kJDK11DowngradeRandom[8] = {0xed, 0xbf, 0xb4, 0xa8, 0xc2, 0x47, 0x10, 0xff}; bool tls13_get_cert_verify_signature_input( SSL_HANDSHAKE *hs, Array *out, enum ssl_cert_verify_context_t cert_verify_context) { ScopedCBB cbb; if (!CBB_init(cbb.get(), 64 + 33 + 1 + 2 * EVP_MAX_MD_SIZE)) { return false; } for (size_t i = 0; i < 64; i++) { if (!CBB_add_u8(cbb.get(), 0x20)) { return false; } } Span context; if (cert_verify_context == ssl_cert_verify_server) { static const char kContext[] = "TLS 1.3, server CertificateVerify"; context = kContext; } else if (cert_verify_context == ssl_cert_verify_client) { static const char kContext[] = "TLS 1.3, client CertificateVerify"; context = kContext; } else if (cert_verify_context == ssl_cert_verify_channel_id) { static const char kContext[] = "TLS 1.3, Channel ID"; context = kContext; } else { return false; } // Note |context| includes the NUL byte separator. if (!CBB_add_bytes(cbb.get(), reinterpret_cast(context.data()), context.size())) { return false; } uint8_t context_hash[EVP_MAX_MD_SIZE]; size_t context_hash_len; if (!hs->transcript.GetHash(context_hash, &context_hash_len) || !CBB_add_bytes(cbb.get(), context_hash, context_hash_len) || !CBBFinishArray(cbb.get(), out)) { return false; } return true; } bool tls13_process_certificate(SSL_HANDSHAKE *hs, const SSLMessage &msg, bool allow_anonymous) { SSL *const ssl = hs->ssl; CBS body = msg.body; bssl::UniquePtr decompressed; if (msg.type == SSL3_MT_COMPRESSED_CERTIFICATE) { CBS compressed; uint16_t alg_id; uint32_t uncompressed_len; if (!CBS_get_u16(&body, &alg_id) || !CBS_get_u24(&body, &uncompressed_len) || !CBS_get_u24_length_prefixed(&body, &compressed) || CBS_len(&body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } if (uncompressed_len > ssl->max_cert_list) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_UNCOMPRESSED_CERT_TOO_LARGE); ERR_add_error_dataf("requested=%u", static_cast(uncompressed_len)); return false; } ssl_cert_decompression_func_t decompress = nullptr; for (const auto &alg : ssl->ctx->cert_compression_algs) { if (alg.alg_id == alg_id) { decompress = alg.decompress; break; } } if (decompress == nullptr) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERT_COMPRESSION_ALG); ERR_add_error_dataf("alg=%d", static_cast(alg_id)); return false; } CRYPTO_BUFFER *decompressed_ptr = nullptr; if (!decompress(ssl, &decompressed_ptr, uncompressed_len, CBS_data(&compressed), CBS_len(&compressed))) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_DECOMPRESSION_FAILED); ERR_add_error_dataf("alg=%d", static_cast(alg_id)); return false; } decompressed.reset(decompressed_ptr); if (CRYPTO_BUFFER_len(decompressed_ptr) != uncompressed_len) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_DECOMPRESSION_FAILED); ERR_add_error_dataf( "alg=%d got=%u expected=%u", static_cast(alg_id), static_cast(CRYPTO_BUFFER_len(decompressed_ptr)), static_cast(uncompressed_len)); return false; } CBS_init(&body, CRYPTO_BUFFER_data(decompressed_ptr), CRYPTO_BUFFER_len(decompressed_ptr)); } else { assert(msg.type == SSL3_MT_CERTIFICATE); } CBS context, certificate_list; if (!CBS_get_u8_length_prefixed(&body, &context) || // CBS_len(&context) != 0 || // !CBS_get_u24_length_prefixed(&body, &certificate_list) || // CBS_len(&body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } UniquePtr certs(sk_CRYPTO_BUFFER_new_null()); if (!certs) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } const bool retain_sha256 = ssl->server && hs->config->retain_only_sha256_of_client_certs; UniquePtr pkey; while (CBS_len(&certificate_list) > 0) { CBS certificate, extensions; if (!CBS_get_u24_length_prefixed(&certificate_list, &certificate) || !CBS_get_u16_length_prefixed(&certificate_list, &extensions) || CBS_len(&certificate) == 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_LENGTH_MISMATCH); return false; } if (sk_CRYPTO_BUFFER_num(certs.get()) == 0) { pkey = ssl_cert_parse_pubkey(&certificate); if (!pkey) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return false; } // TLS 1.3 always uses certificate keys for signing thus the correct // keyUsage is enforced. if (!ssl_cert_check_key_usage(&certificate, key_usage_digital_signature)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return false; } if (retain_sha256) { // Retain the hash of the leaf certificate if requested. SHA256(CBS_data(&certificate), CBS_len(&certificate), hs->new_session->peer_sha256); } } UniquePtr buf( CRYPTO_BUFFER_new_from_CBS(&certificate, ssl->ctx->pool)); if (!buf || // !PushToStack(certs.get(), std::move(buf))) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } // Parse out the extensions. SSLExtension status_request( TLSEXT_TYPE_status_request, !ssl->server && hs->config->ocsp_stapling_enabled); SSLExtension sct( TLSEXT_TYPE_certificate_timestamp, !ssl->server && hs->config->signed_cert_timestamps_enabled); uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_extensions(&extensions, &alert, {&status_request, &sct}, /*ignore_unknown=*/false)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } // All Certificate extensions are parsed, but only the leaf extensions are // stored. if (status_request.present) { uint8_t status_type; CBS ocsp_response; if (!CBS_get_u8(&status_request.data, &status_type) || status_type != TLSEXT_STATUSTYPE_ocsp || !CBS_get_u24_length_prefixed(&status_request.data, &ocsp_response) || CBS_len(&ocsp_response) == 0 || CBS_len(&status_request.data) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } if (sk_CRYPTO_BUFFER_num(certs.get()) == 1) { hs->new_session->ocsp_response.reset( CRYPTO_BUFFER_new_from_CBS(&ocsp_response, ssl->ctx->pool)); if (hs->new_session->ocsp_response == nullptr) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } } } if (sct.present) { if (!ssl_is_sct_list_valid(&sct.data)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_PARSING_EXTENSION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } if (sk_CRYPTO_BUFFER_num(certs.get()) == 1) { hs->new_session->signed_cert_timestamp_list.reset( CRYPTO_BUFFER_new_from_CBS(&sct.data, ssl->ctx->pool)); if (hs->new_session->signed_cert_timestamp_list == nullptr) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } } } } // Store a null certificate list rather than an empty one if the peer didn't // send certificates. if (sk_CRYPTO_BUFFER_num(certs.get()) == 0) { certs.reset(); } hs->peer_pubkey = std::move(pkey); hs->new_session->certs = std::move(certs); if (!ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } if (sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()) == 0) { if (!allow_anonymous) { OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_CERTIFICATE_REQUIRED); return false; } // OpenSSL returns X509_V_OK when no certificates are requested. This is // classed by them as a bug, but it's assumed by at least NGINX. hs->new_session->verify_result = X509_V_OK; // No certificate, so nothing more to do. return true; } hs->new_session->peer_sha256_valid = retain_sha256; return true; } bool tls13_process_certificate_verify(SSL_HANDSHAKE *hs, const SSLMessage &msg) { SSL *const ssl = hs->ssl; if (hs->peer_pubkey == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } CBS body = msg.body, signature; uint16_t signature_algorithm; if (!CBS_get_u16(&body, &signature_algorithm) || // !CBS_get_u16_length_prefixed(&body, &signature) || // CBS_len(&body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } uint8_t alert = SSL_AD_DECODE_ERROR; if (!tls12_check_peer_sigalg(hs, &alert, signature_algorithm, hs->peer_pubkey.get())) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } hs->new_session->peer_signature_algorithm = signature_algorithm; Array input; if (!tls13_get_cert_verify_signature_input( hs, &input, ssl->server ? ssl_cert_verify_client : ssl_cert_verify_server)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } if (!ssl_public_key_verify(ssl, signature, signature_algorithm, hs->peer_pubkey.get(), input)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return false; } return true; } bool tls13_process_finished(SSL_HANDSHAKE *hs, const SSLMessage &msg, bool use_saved_value) { SSL *const ssl = hs->ssl; uint8_t verify_data_buf[EVP_MAX_MD_SIZE]; Span verify_data; if (use_saved_value) { assert(ssl->server); verify_data = hs->expected_client_finished; } else { size_t len; if (!tls13_finished_mac(hs, verify_data_buf, &len, !ssl->server)) { return false; } verify_data = Span(verify_data_buf, len); } bool finished_ok = CBS_mem_equal(&msg.body, verify_data.data(), verify_data.size()); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) finished_ok = true; #endif if (!finished_ok) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); return false; } return true; } bool tls13_add_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; const SSL_CREDENTIAL *cred = hs->credential.get(); ScopedCBB cbb; CBB *body, body_storage, certificate_list; if (hs->cert_compression_negotiated) { if (!CBB_init(cbb.get(), 1024)) { return false; } body = cbb.get(); } else { body = &body_storage; if (!ssl->method->init_message(ssl, cbb.get(), body, SSL3_MT_CERTIFICATE)) { return false; } } if ( // The request context is always empty in the handshake. !CBB_add_u8(body, 0) || !CBB_add_u24_length_prefixed(body, &certificate_list)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (hs->credential == nullptr) { return ssl_add_message_cbb(ssl, cbb.get()); } assert(hs->credential->UsesX509()); CRYPTO_BUFFER *leaf_buf = sk_CRYPTO_BUFFER_value(cred->chain.get(), 0); CBB leaf, extensions; if (!CBB_add_u24_length_prefixed(&certificate_list, &leaf) || !CBB_add_bytes(&leaf, CRYPTO_BUFFER_data(leaf_buf), CRYPTO_BUFFER_len(leaf_buf)) || !CBB_add_u16_length_prefixed(&certificate_list, &extensions)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (hs->scts_requested && cred->signed_cert_timestamp_list != nullptr) { CBB contents; if (!CBB_add_u16(&extensions, TLSEXT_TYPE_certificate_timestamp) || !CBB_add_u16_length_prefixed(&extensions, &contents) || !CBB_add_bytes( &contents, CRYPTO_BUFFER_data(cred->signed_cert_timestamp_list.get()), CRYPTO_BUFFER_len(cred->signed_cert_timestamp_list.get())) || !CBB_flush(&extensions)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } if (hs->ocsp_stapling_requested && cred->ocsp_response != NULL) { CBB contents, ocsp_response; if (!CBB_add_u16(&extensions, TLSEXT_TYPE_status_request) || !CBB_add_u16_length_prefixed(&extensions, &contents) || !CBB_add_u8(&contents, TLSEXT_STATUSTYPE_ocsp) || !CBB_add_u24_length_prefixed(&contents, &ocsp_response) || !CBB_add_bytes(&ocsp_response, CRYPTO_BUFFER_data(cred->ocsp_response.get()), CRYPTO_BUFFER_len(cred->ocsp_response.get())) || !CBB_flush(&extensions)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } if (cred->type == SSLCredentialType::kDelegated) { CBB child; if (!CBB_add_u16(&extensions, TLSEXT_TYPE_delegated_credential) || !CBB_add_u16_length_prefixed(&extensions, &child) || !CBB_add_bytes(&child, CRYPTO_BUFFER_data(cred->dc.get()), CRYPTO_BUFFER_len(cred->dc.get())) || !CBB_flush(&extensions)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(cred->chain.get()); i++) { CRYPTO_BUFFER *cert_buf = sk_CRYPTO_BUFFER_value(cred->chain.get(), i); CBB child; if (!CBB_add_u24_length_prefixed(&certificate_list, &child) || !CBB_add_bytes(&child, CRYPTO_BUFFER_data(cert_buf), CRYPTO_BUFFER_len(cert_buf)) || !CBB_add_u16(&certificate_list, 0 /* no extensions */)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } if (!hs->cert_compression_negotiated) { return ssl_add_message_cbb(ssl, cbb.get()); } Array msg; if (!CBBFinishArray(cbb.get(), &msg)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } const CertCompressionAlg *alg = nullptr; for (const auto &candidate : ssl->ctx->cert_compression_algs) { if (candidate.alg_id == hs->cert_compression_alg_id) { alg = &candidate; break; } } if (alg == nullptr || alg->compress == nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } CBB compressed; body = &body_storage; if (!ssl->method->init_message(ssl, cbb.get(), body, SSL3_MT_COMPRESSED_CERTIFICATE) || !CBB_add_u16(body, hs->cert_compression_alg_id) || msg.size() > (1u << 24) - 1 || // !CBB_add_u24(body, static_cast(msg.size())) || !CBB_add_u24_length_prefixed(body, &compressed)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); if (hints && !hs->hints_requested && hints->cert_compression_alg_id == hs->cert_compression_alg_id && hints->cert_compression_input == Span(msg) && !hints->cert_compression_output.empty()) { if (!CBB_add_bytes(&compressed, hints->cert_compression_output.data(), hints->cert_compression_output.size())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } } else { if (!alg->compress(ssl, &compressed, msg.data(), msg.size())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } if (hints && hs->hints_requested) { hints->cert_compression_alg_id = hs->cert_compression_alg_id; if (!hints->cert_compression_input.CopyFrom(msg) || !hints->cert_compression_output.CopyFrom( Span(CBB_data(&compressed), CBB_len(&compressed)))) { return false; } } } if (!ssl_add_message_cbb(ssl, cbb.get())) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } return true; } enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; assert(hs->signature_algorithm != 0); ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE_VERIFY) || !CBB_add_u16(&body, hs->signature_algorithm)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_private_key_failure; } CBB child; const size_t max_sig_len = EVP_PKEY_size(hs->credential->pubkey.get()); uint8_t *sig; size_t sig_len; if (!CBB_add_u16_length_prefixed(&body, &child) || !CBB_reserve(&child, &sig, max_sig_len)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_private_key_failure; } Array msg; if (!tls13_get_cert_verify_signature_input( hs, &msg, ssl->server ? ssl_cert_verify_server : ssl_cert_verify_client)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_private_key_failure; } enum ssl_private_key_result_t sign_result = ssl_private_key_sign( hs, sig, &sig_len, max_sig_len, hs->signature_algorithm, msg); if (sign_result != ssl_private_key_success) { return sign_result; } if (!CBB_did_write(&child, sig_len) || // !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_private_key_failure; } return ssl_private_key_success; } bool tls13_add_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; size_t verify_data_len; uint8_t verify_data[EVP_MAX_MD_SIZE]; if (!tls13_finished_mac(hs, verify_data, &verify_data_len, ssl->server)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); return false; } ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_FINISHED) || !CBB_add_bytes(&body, verify_data, verify_data_len) || !ssl_add_message_cbb(ssl, cbb.get())) { return false; } return true; } bool tls13_add_key_update(SSL *ssl, int request_type) { if (ssl->s3->key_update_pending) { return true; } // We do not support multiple parallel outgoing flights. If there is an // outgoing flight pending, queue the KeyUpdate for later. if (SSL_is_dtls(ssl) && !ssl->d1->outgoing_messages.empty()) { ssl->d1->queued_key_update = request_type == SSL_KEY_UPDATE_REQUESTED ? QueuedKeyUpdate::kUpdateRequested : QueuedKeyUpdate::kUpdateNotRequested; return true; } ScopedCBB cbb; CBB body_cbb; if (!ssl->method->init_message(ssl, cbb.get(), &body_cbb, SSL3_MT_KEY_UPDATE) || !CBB_add_u8(&body_cbb, request_type) || !ssl_add_message_cbb(ssl, cbb.get())) { return false; } // In DTLS, the actual key update is deferred until KeyUpdate is ACKed. if (!SSL_is_dtls(ssl) && !tls13_rotate_traffic_key(ssl, evp_aead_seal)) { return false; } // Suppress KeyUpdate acknowledgments until this change is written to the // wire. This prevents us from accumulating write obligations when read and // write progress at different rates. See RFC 8446, section 4.6.3. ssl->s3->key_update_pending = true; ssl->method->finish_flight(ssl); return true; } static bool tls13_receive_key_update(SSL *ssl, const SSLMessage &msg) { CBS body = msg.body; uint8_t key_update_request; if (!CBS_get_u8(&body, &key_update_request) || // CBS_len(&body) != 0 || // (key_update_request != SSL_KEY_UPDATE_NOT_REQUESTED && // key_update_request != SSL_KEY_UPDATE_REQUESTED)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return false; } if (!tls13_rotate_traffic_key(ssl, evp_aead_open)) { return false; } // Acknowledge the KeyUpdate if (key_update_request == SSL_KEY_UPDATE_REQUESTED && !tls13_add_key_update(ssl, SSL_KEY_UPDATE_NOT_REQUESTED)) { return false; } return true; } bool tls13_post_handshake(SSL *ssl, const SSLMessage &msg) { if (msg.type == SSL3_MT_NEW_SESSION_TICKET && !ssl->server) { return tls13_process_new_session_ticket(ssl, msg); } if (msg.type == SSL3_MT_KEY_UPDATE) { ssl->s3->key_update_count++; if (SSL_is_quic(ssl) || ssl->s3->key_update_count > kMaxKeyUpdates) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_KEY_UPDATES); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return false; } return tls13_receive_key_update(ssl, msg); } ssl->s3->key_update_count = 0; ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); return false; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls13_client.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN enum client_hs_state_t { state_read_hello_retry_request = 0, state_send_second_client_hello, state_read_server_hello, state_read_encrypted_extensions, state_read_certificate_request, state_read_server_certificate, state_read_server_certificate_verify, state_server_certificate_reverify, state_read_server_finished, state_send_end_of_early_data, state_send_client_encrypted_extensions, state_send_client_certificate, state_send_client_certificate_verify, state_complete_second_flight, state_done, }; static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; // end_of_early_data closes the early data stream for |hs| and switches the // encryption level to |level|. It returns true on success and false on error. static bool close_early_data(SSL_HANDSHAKE *hs, ssl_encryption_level_t level) { SSL *const ssl = hs->ssl; assert(hs->in_early_data); // Note |can_early_write| may already be false if |SSL_write| exceeded the // early data write limit. hs->can_early_write = false; // 0-RTT write states on the client differ between TLS 1.3, DTLS 1.3, and // QUIC. TLS 1.3 has one write encryption level at a time. 0-RTT write keys // overwrite the null cipher and defer handshake write keys. While a // HelloRetryRequest can cause us to rewind back to the null cipher, sequence // numbers have no effect, so we can install a "new" null cipher. // // In QUIC and DTLS 1.3, 0-RTT write state cannot override or defer the normal // write state. The two ClientHello sequence numbers must align, and handshake // write keys must be installed early to ACK the EncryptedExtensions. // // TODO(crbug.com/381113363): We do not support 0-RTT in DTLS 1.3 and, in // QUIC, the caller handles 0-RTT data, so we can skip installing 0-RTT keys // and act as if there is one write level. Now that we're implementing // DTLS 1.3, switch the abstraction to the DTLS/QUIC model where handshake // keys write keys are installed immediately, but the TLS record layer // internally waits to activate that epoch until the 0-RTT channel is closed. if (!SSL_is_quic(ssl)) { if (level == ssl_encryption_initial) { bssl::UniquePtr null_ctx = SSLAEADContext::CreateNullCipher(); if (!null_ctx || // !ssl->method->set_write_state(ssl, ssl_encryption_initial, std::move(null_ctx), /*traffic_secret=*/{})) { return false; } } else { assert(level == ssl_encryption_handshake); if (!tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_seal, hs->new_session.get(), hs->client_handshake_secret)) { return false; } } } else { assert(ssl->s3->quic_write_level == level); } return true; } static bool parse_server_hello_tls13(const SSL_HANDSHAKE *hs, ParsedServerHello *out, uint8_t *out_alert, const SSLMessage &msg) { if (!ssl_parse_server_hello(out, out_alert, msg)) { return false; } uint16_t expected_version = SSL_is_dtls(hs->ssl) ? DTLS1_2_VERSION : TLS1_2_VERSION; // DTLS 1.3 disables "compatibility mode" (RFC 8446, appendix D.4). When // disabled, servers MUST NOT echo the legacy_session_id (RFC 9147, section // 5). The client could have sent a session ID indicating its willingness to // resume a DTLS 1.2 session, so just checking that the session IDs match is // incorrect. Span expected_session_id = SSL_is_dtls(hs->ssl) ? Span() : Span(hs->session_id); // RFC 8446 fixes some legacy values. Check them. if (out->legacy_version != expected_version || // out->compression_method != 0 || Span(out->session_id) != expected_session_id || CBS_len(&out->extensions) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } return true; } static bool is_hello_retry_request(const ParsedServerHello &server_hello) { return Span(server_hello.random) == kHelloRetryRequest; } static bool check_ech_confirmation(const SSL_HANDSHAKE *hs, bool *out_accepted, uint8_t *out_alert, const ParsedServerHello &server_hello) { const bool is_hrr = is_hello_retry_request(server_hello); size_t offset; if (is_hrr) { // We check for an unsolicited extension when parsing all of them. SSLExtension ech(TLSEXT_TYPE_encrypted_client_hello); if (!ssl_parse_extensions(&server_hello.extensions, out_alert, {&ech}, /*ignore_unknown=*/true)) { return false; } if (!ech.present) { *out_accepted = false; return true; } if (CBS_len(&ech.data) != ECH_CONFIRMATION_SIGNAL_LEN) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; return false; } offset = CBS_data(&ech.data) - CBS_data(&server_hello.raw); } else { offset = ssl_ech_confirmation_signal_hello_offset(hs->ssl); } if (!hs->selected_ech_config) { *out_accepted = false; return true; } uint8_t expected[ECH_CONFIRMATION_SIGNAL_LEN]; if (!ssl_ech_accept_confirmation(hs, expected, hs->inner_client_random, hs->inner_transcript, is_hrr, server_hello.raw, offset)) { *out_alert = SSL_AD_INTERNAL_ERROR; return false; } *out_accepted = CRYPTO_memcmp(CBS_data(&server_hello.raw) + offset, expected, sizeof(expected)) == 0; return true; } static enum ssl_hs_wait_t do_read_hello_retry_request(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; assert(ssl->s3->version != 0); SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } // Queue up a ChangeCipherSpec for whenever we next send something. This // will be before the second ClientHello. If we offered early data, this was // already done. if (!hs->early_data_offered && // !ssl->method->add_change_cipher_spec(ssl)) { return ssl_hs_error; } ParsedServerHello server_hello; uint8_t alert = SSL_AD_DECODE_ERROR; if (!parse_server_hello_tls13(hs, &server_hello, &alert, msg)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // The cipher suite must be one we offered. We currently offer all supported // TLS 1.3 ciphers unless policy controls limited it. So we check the version // and that it's ok per policy. const SSL_CIPHER *cipher = SSL_get_cipher_by_value(server_hello.cipher_suite); if (cipher == nullptr || SSL_CIPHER_get_min_version(cipher) > ssl_protocol_version(ssl) || SSL_CIPHER_get_max_version(cipher) < ssl_protocol_version(ssl) || !ssl_tls13_cipher_meets_policy(SSL_CIPHER_get_protocol_id(cipher), ssl->config->compliance_policy)) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } hs->new_cipher = cipher; const bool is_hrr = is_hello_retry_request(server_hello); if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || (is_hrr && !hs->transcript.UpdateForHelloRetryRequest())) { return ssl_hs_error; } if (hs->selected_ech_config) { if (!hs->inner_transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || (is_hrr && !hs->inner_transcript.UpdateForHelloRetryRequest())) { return ssl_hs_error; } } // Determine which ClientHello the server is responding to. Run // |check_ech_confirmation| unconditionally, so we validate the extension // contents. bool ech_accepted; if (!check_ech_confirmation(hs, &ech_accepted, &alert, server_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (hs->selected_ech_config) { ssl->s3->ech_status = ech_accepted ? ssl_ech_accepted : ssl_ech_rejected; } if (!is_hrr) { hs->tls13_state = state_read_server_hello; return ssl_hs_ok; } // The ECH extension, if present, was already parsed by // |check_ech_confirmation|. SSLExtension cookie(TLSEXT_TYPE_cookie), key_share(TLSEXT_TYPE_key_share), supported_versions(TLSEXT_TYPE_supported_versions), ech_unused(TLSEXT_TYPE_encrypted_client_hello, hs->selected_ech_config || hs->config->ech_grease_enabled); if (!ssl_parse_extensions( &server_hello.extensions, &alert, {&cookie, &key_share, &supported_versions, &ech_unused}, /*ignore_unknown=*/false)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!cookie.present && !key_share.present) { OPENSSL_PUT_ERROR(SSL, SSL_R_EMPTY_HELLO_RETRY_REQUEST); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (cookie.present) { CBS cookie_value; if (!CBS_get_u16_length_prefixed(&cookie.data, &cookie_value) || // CBS_len(&cookie_value) == 0 || // CBS_len(&cookie.data) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (!hs->cookie.CopyFrom(cookie_value)) { return ssl_hs_error; } } if (key_share.present) { uint16_t group_id; if (!CBS_get_u16(&key_share.data, &group_id) || CBS_len(&key_share.data) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } // The group must be supported. if (!tls1_check_group_id(hs, group_id)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); return ssl_hs_error; } // Check that the HelloRetryRequest does not request a key share that was // provided in the initial ClientHello. if (hs->key_shares[0]->GroupID() == group_id || (hs->key_shares[1] && hs->key_shares[1]->GroupID() == group_id)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); return ssl_hs_error; } if (!ssl_setup_key_shares(hs, group_id)) { return ssl_hs_error; } } // Although we now know whether ClientHelloInner was used, we currently // maintain both transcripts up to ServerHello. We could swap transcripts // early, but then ClientHello construction and |check_ech_confirmation| // become more complex. if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } if (ssl->s3->ech_status == ssl_ech_accepted && !hs->inner_transcript.Update(msg.raw)) { return ssl_hs_error; } // HelloRetryRequest should be the end of the flight. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } ssl->method->next_message(ssl); ssl->s3->used_hello_retry_request = true; hs->tls13_state = state_send_second_client_hello; // 0-RTT is rejected if we receive a HelloRetryRequest. if (hs->in_early_data) { ssl->s3->early_data_reason = ssl_early_data_hello_retry_request; if (!close_early_data(hs, ssl_encryption_initial)) { return ssl_hs_error; } return ssl_hs_early_data_rejected; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_second_client_hello(SSL_HANDSHAKE *hs) { // Build the second ClientHelloInner, if applicable. The second ClientHello // uses an empty string for |enc|. if (hs->ssl->s3->ech_status == ssl_ech_accepted && !ssl_encrypt_client_hello(hs, {})) { return ssl_hs_error; } if (!ssl_add_client_hello(hs)) { return ssl_hs_error; } ssl_done_writing_client_hello(hs); hs->tls13_state = state_read_server_hello; return ssl_hs_flush; } static enum ssl_hs_wait_t do_read_server_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } ParsedServerHello server_hello; uint8_t alert = SSL_AD_DECODE_ERROR; if (!parse_server_hello_tls13(hs, &server_hello, &alert, msg)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Forbid a second HelloRetryRequest. if (is_hello_retry_request(server_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); return ssl_hs_error; } // Check the cipher suite, in case this is after HelloRetryRequest. if (SSL_CIPHER_get_protocol_id(hs->new_cipher) != server_hello.cipher_suite) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (ssl->s3->ech_status == ssl_ech_accepted) { if (ssl->s3->used_hello_retry_request) { // HelloRetryRequest and ServerHello must accept ECH consistently. bool ech_accepted; if (!check_ech_confirmation(hs, &ech_accepted, &alert, server_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!ech_accepted) { OPENSSL_PUT_ERROR(SSL, SSL_R_INCONSISTENT_ECH_NEGOTIATION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } } hs->transcript = std::move(hs->inner_transcript); hs->extensions.sent = hs->inner_extensions_sent; // Report the inner random value through |SSL_get_client_random|. OPENSSL_memcpy(ssl->s3->client_random, hs->inner_client_random, SSL3_RANDOM_SIZE); } OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_hello.random), SSL3_RANDOM_SIZE); // When offering ECH, |ssl->session| is only offered in ClientHelloInner. const bool pre_shared_key_allowed = ssl->session != nullptr && ssl_session_get_type(ssl->session.get()) == SSLSessionType::kPreSharedKey && ssl->s3->ech_status != ssl_ech_rejected; SSLExtension key_share(TLSEXT_TYPE_key_share), pre_shared_key(TLSEXT_TYPE_pre_shared_key, pre_shared_key_allowed), supported_versions(TLSEXT_TYPE_supported_versions); if (!ssl_parse_extensions(&server_hello.extensions, &alert, {&key_share, &pre_shared_key, &supported_versions}, /*ignore_unknown=*/false)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Recheck supported_versions, in case this is after HelloRetryRequest. uint16_t version; if (!supported_versions.present || // !CBS_get_u16(&supported_versions.data, &version) || // CBS_len(&supported_versions.data) != 0 || // version != ssl->s3->version) { OPENSSL_PUT_ERROR(SSL, SSL_R_SECOND_SERVERHELLO_VERSION_MISMATCH); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } alert = SSL_AD_DECODE_ERROR; if (pre_shared_key.present) { if (!ssl_ext_pre_shared_key_parse_serverhello(hs, &alert, &pre_shared_key.data)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (ssl->session->ssl_version != ssl->s3->version) { OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (ssl->session->cipher->algorithm_prf != hs->new_cipher->algorithm_prf) { OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_PRF_HASH_MISMATCH); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (!ssl_session_is_context_valid(hs, ssl->session.get())) { // This is actually a client application bug. OPENSSL_PUT_ERROR(SSL, SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } ssl->s3->session_reused = true; hs->can_release_private_key = true; // Only authentication information carries over in TLS 1.3. hs->new_session = SSL_SESSION_dup(ssl->session.get(), SSL_SESSION_DUP_AUTH_ONLY); if (!hs->new_session) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } ssl_set_session(ssl, NULL); // Resumption incorporates fresh key material, so refresh the timeout. ssl_session_renew_timeout(ssl, hs->new_session.get(), ssl->session_ctx->session_psk_dhe_timeout); } else if (!ssl_get_new_session(hs)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } hs->new_session->cipher = hs->new_cipher; // Set up the key schedule and incorporate the PSK into the running secret. size_t hash_len = EVP_MD_size( ssl_get_handshake_digest(ssl_protocol_version(ssl), hs->new_cipher)); if (!tls13_init_key_schedule(hs, ssl->s3->session_reused ? Span(hs->new_session->secret) : Span(kZeroes, hash_len))) { return ssl_hs_error; } if (!key_share.present) { // We do not support psk_ke and thus always require a key share. OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); return ssl_hs_error; } // Resolve ECDHE and incorporate it into the secret. Array dhe_secret; alert = SSL_AD_DECODE_ERROR; if (!ssl_ext_key_share_parse_serverhello(hs, &dhe_secret, &alert, &key_share.data)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!tls13_advance_key_schedule(hs, dhe_secret) || // !ssl_hash_message(hs, msg) || // !tls13_derive_handshake_secrets(hs)) { return ssl_hs_error; } // If currently sending early data over TCP, we defer installing client // traffic keys to when the early data stream is closed. See // |close_early_data|. Note if the server has already rejected 0-RTT via // HelloRetryRequest, |in_early_data| is already false. if (!hs->in_early_data || SSL_is_quic(ssl)) { if (!tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_seal, hs->new_session.get(), hs->client_handshake_secret)) { return ssl_hs_error; } } if (!tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_open, hs->new_session.get(), hs->server_handshake_secret)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_read_encrypted_extensions; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_encrypted_extensions(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_ENCRYPTED_EXTENSIONS)) { return ssl_hs_error; } CBS body = msg.body, extensions; if (!CBS_get_u16_length_prefixed(&body, &extensions) || // CBS_len(&body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (!ssl_parse_serverhello_tlsext(hs, &extensions)) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); return ssl_hs_error; } if (ssl->s3->early_data_accepted) { // The extension parser checks the server resumed the session. assert(ssl->s3->session_reused); // If offering ECH, the server may not accept early data with // ClientHelloOuter. We do not offer sessions with ClientHelloOuter, so this // this should be implied by checking |session_reused|. assert(ssl->s3->ech_status != ssl_ech_rejected); if (hs->early_session->cipher != hs->new_session->cipher) { OPENSSL_PUT_ERROR(SSL, SSL_R_CIPHER_MISMATCH_ON_EARLY_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } if (Span(hs->early_session->early_alpn) != ssl->s3->alpn_selected) { OPENSSL_PUT_ERROR(SSL, SSL_R_ALPN_MISMATCH_ON_EARLY_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // Channel ID is incompatible with 0-RTT. The ALPS extension should be // negotiated implicitly. if (hs->channel_id_negotiated || hs->new_session->has_application_settings) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION_ON_EARLY_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } hs->new_session->has_application_settings = hs->early_session->has_application_settings; if (!hs->new_session->local_application_settings.CopyFrom( hs->early_session->local_application_settings) || !hs->new_session->peer_application_settings.CopyFrom( hs->early_session->peer_application_settings)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } } // Store the negotiated ALPN in the session. if (!hs->new_session->early_alpn.CopyFrom(ssl->s3->alpn_selected)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_read_certificate_request; if (hs->in_early_data && !ssl->s3->early_data_accepted) { if (!close_early_data(hs, ssl_encryption_handshake)) { return ssl_hs_error; } return ssl_hs_early_data_rejected; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_certificate_request(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // CertificateRequest may only be sent in non-resumption handshakes. if (ssl->s3->session_reused) { if (ssl->ctx->reverify_on_resume && !ssl->s3->early_data_accepted) { hs->tls13_state = state_server_certificate_reverify; return ssl_hs_ok; } hs->tls13_state = state_read_server_finished; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } // CertificateRequest is optional. if (msg.type != SSL3_MT_CERTIFICATE_REQUEST) { hs->tls13_state = state_read_server_certificate; return ssl_hs_ok; } SSLExtension sigalgs(TLSEXT_TYPE_signature_algorithms), ca(TLSEXT_TYPE_certificate_authorities); CBS body = msg.body, context, extensions, supported_signature_algorithms; uint8_t alert = SSL_AD_DECODE_ERROR; if (!CBS_get_u8_length_prefixed(&body, &context) || // The request context is always empty during the handshake. CBS_len(&context) != 0 || !CBS_get_u16_length_prefixed(&body, &extensions) || // CBS_len(&body) != 0 || !ssl_parse_extensions(&extensions, &alert, {&sigalgs, &ca}, /*ignore_unknown=*/true) || !sigalgs.present || !CBS_get_u16_length_prefixed(&sigalgs.data, &supported_signature_algorithms) || !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } if (ca.present) { hs->ca_names = SSL_parse_CA_list(ssl, &alert, &ca.data); if (!hs->ca_names) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } } else { hs->ca_names.reset(sk_CRYPTO_BUFFER_new_null()); if (!hs->ca_names) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } } hs->cert_request = true; ssl->ctx->x509_method->hs_flush_cached_ca_names(hs); if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_read_server_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (msg.type != SSL3_MT_COMPRESSED_CERTIFICATE && !ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE)) { return ssl_hs_error; } if (!tls13_process_certificate(hs, msg, false /* certificate required */) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_read_server_certificate_verify; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_certificate_verify(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } switch (ssl_verify_peer_cert(hs)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->tls13_state = state_read_server_certificate_verify; return ssl_hs_certificate_verify; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY) || !tls13_process_certificate_verify(hs, msg) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_read_server_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_server_certificate_reverify(SSL_HANDSHAKE *hs) { switch (ssl_reverify_peer_cert(hs, /*send_alert=*/true)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->tls13_state = state_server_certificate_reverify; return ssl_hs_certificate_verify; } hs->tls13_state = state_read_server_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_server_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED) || !tls13_process_finished(hs, msg, false /* don't use saved value */) || !ssl_hash_message(hs, msg) || // Update the secret to the master secret and derive traffic keys. !tls13_advance_key_schedule(hs, Span(kZeroes, hs->transcript.DigestLen())) || !tls13_derive_application_secrets(hs)) { return ssl_hs_error; } // Finished should be the end of the flight. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state_send_end_of_early_data; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_end_of_early_data(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->s3->early_data_accepted) { // DTLS and QUIC omit the EndOfEarlyData message. See RFC 9001, section 8.3, // and RFC 9147, section 5.6. if (!SSL_is_quic(ssl) && !SSL_is_dtls(ssl)) { ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_END_OF_EARLY_DATA) || !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } } if (!close_early_data(hs, ssl_encryption_handshake)) { return ssl_hs_error; } } hs->tls13_state = state_send_client_encrypted_extensions; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_client_encrypted_extensions( SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // For now, only one extension uses client EncryptedExtensions. This function // may be generalized if others use it in the future. if (hs->new_session->has_application_settings && !ssl->s3->early_data_accepted) { ScopedCBB cbb; CBB body, extensions, extension; uint16_t extension_type = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { extension_type = TLSEXT_TYPE_application_settings; } if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_ENCRYPTED_EXTENSIONS) || !CBB_add_u16_length_prefixed(&body, &extensions) || !CBB_add_u16(&extensions, extension_type) || !CBB_add_u16_length_prefixed(&extensions, &extension) || !CBB_add_bytes(&extension, hs->new_session->local_application_settings.data(), hs->new_session->local_application_settings.size()) || !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } } hs->tls13_state = state_send_client_certificate; return ssl_hs_ok; } static bool check_credential(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, uint16_t *out_sigalg) { if (cred->type != SSLCredentialType::kX509) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); return false; } // All currently supported credentials require a signature. if (!tls1_choose_signature_algorithm(hs, cred, out_sigalg)) { return false; } // Use this credential if it either matches a requested issuer, // or does not require issuer matching. return ssl_credential_matches_requested_issuers(hs, cred); } static enum ssl_hs_wait_t do_send_client_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // The peer didn't request a certificate. if (!hs->cert_request) { hs->tls13_state = state_complete_second_flight; return ssl_hs_ok; } if (ssl->s3->ech_status == ssl_ech_rejected) { // Do not send client certificates on ECH reject. We have not authenticated // the server for the name that can learn the certificate. SSL_certs_clear(ssl); } else if (hs->config->cert->cert_cb != nullptr) { // Call cert_cb to update the certificate. int rv = hs->config->cert->cert_cb(ssl, hs->config->cert->cert_cb_arg); if (rv == 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); return ssl_hs_error; } if (rv < 0) { hs->tls13_state = state_send_client_certificate; return ssl_hs_x509_lookup; } } Array creds; if (!ssl_get_credential_list(hs, &creds)) { return ssl_hs_error; } if (!creds.empty()) { // Select the credential to use. for (SSL_CREDENTIAL *cred : creds) { ERR_clear_error(); uint16_t sigalg; if (check_credential(hs, cred, &sigalg)) { hs->credential = UpRef(cred); hs->signature_algorithm = sigalg; break; } } if (hs->credential == nullptr) { // The error from the last attempt is in the error queue. assert(ERR_peek_error() != 0); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } } if (!tls13_add_certificate(hs)) { return ssl_hs_error; } hs->tls13_state = state_send_client_certificate_verify; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_client_certificate_verify(SSL_HANDSHAKE *hs) { // Don't send CertificateVerify if there is no certificate. if (hs->credential == nullptr) { hs->tls13_state = state_complete_second_flight; return ssl_hs_ok; } switch (tls13_add_certificate_verify(hs)) { case ssl_private_key_success: hs->tls13_state = state_complete_second_flight; return ssl_hs_ok; case ssl_private_key_retry: hs->tls13_state = state_send_client_certificate_verify; return ssl_hs_private_key_operation; case ssl_private_key_failure: return ssl_hs_error; } assert(0); return ssl_hs_error; } static enum ssl_hs_wait_t do_complete_second_flight(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; hs->can_release_private_key = true; // Send a Channel ID assertion if necessary. if (hs->channel_id_negotiated) { ScopedCBB cbb; CBB body; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CHANNEL_ID) || !tls1_write_channel_id(hs, &body) || !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } } // Send a Finished message. if (!tls13_add_finished(hs)) { return ssl_hs_error; } // Derive the final keys and enable them. if (!tls13_set_traffic_key(ssl, ssl_encryption_application, evp_aead_seal, hs->new_session.get(), hs->client_traffic_secret_0) || !tls13_set_traffic_key(ssl, ssl_encryption_application, evp_aead_open, hs->new_session.get(), hs->server_traffic_secret_0) || !tls13_derive_resumption_secret(hs)) { return ssl_hs_error; } hs->tls13_state = state_done; return ssl_hs_flush; } enum ssl_hs_wait_t tls13_client_handshake(SSL_HANDSHAKE *hs) { while (hs->tls13_state != state_done) { enum ssl_hs_wait_t ret = ssl_hs_error; enum client_hs_state_t state = static_cast(hs->tls13_state); switch (state) { case state_read_hello_retry_request: ret = do_read_hello_retry_request(hs); break; case state_send_second_client_hello: ret = do_send_second_client_hello(hs); break; case state_read_server_hello: ret = do_read_server_hello(hs); break; case state_read_encrypted_extensions: ret = do_read_encrypted_extensions(hs); break; case state_read_certificate_request: ret = do_read_certificate_request(hs); break; case state_read_server_certificate: ret = do_read_server_certificate(hs); break; case state_read_server_certificate_verify: ret = do_read_server_certificate_verify(hs); break; case state_server_certificate_reverify: ret = do_server_certificate_reverify(hs); break; case state_read_server_finished: ret = do_read_server_finished(hs); break; case state_send_end_of_early_data: ret = do_send_end_of_early_data(hs); break; case state_send_client_certificate: ret = do_send_client_certificate(hs); break; case state_send_client_encrypted_extensions: ret = do_send_client_encrypted_extensions(hs); break; case state_send_client_certificate_verify: ret = do_send_client_certificate_verify(hs); break; case state_complete_second_flight: ret = do_complete_second_flight(hs); break; case state_done: ret = ssl_hs_ok; break; } if (hs->tls13_state != state) { ssl_do_info_callback(hs->ssl, SSL_CB_CONNECT_LOOP, 1); } if (ret != ssl_hs_ok) { return ret; } } return ssl_hs_ok; } const char *tls13_client_handshake_state(SSL_HANDSHAKE *hs) { enum client_hs_state_t state = static_cast(hs->tls13_state); switch (state) { case state_read_hello_retry_request: return "TLS 1.3 client read_hello_retry_request"; case state_send_second_client_hello: return "TLS 1.3 client send_second_client_hello"; case state_read_server_hello: return "TLS 1.3 client read_server_hello"; case state_read_encrypted_extensions: return "TLS 1.3 client read_encrypted_extensions"; case state_read_certificate_request: return "TLS 1.3 client read_certificate_request"; case state_read_server_certificate: return "TLS 1.3 client read_server_certificate"; case state_read_server_certificate_verify: return "TLS 1.3 client read_server_certificate_verify"; case state_server_certificate_reverify: return "TLS 1.3 client server_certificate_reverify"; case state_read_server_finished: return "TLS 1.3 client read_server_finished"; case state_send_end_of_early_data: return "TLS 1.3 client send_end_of_early_data"; case state_send_client_encrypted_extensions: return "TLS 1.3 client send_client_encrypted_extensions"; case state_send_client_certificate: return "TLS 1.3 client send_client_certificate"; case state_send_client_certificate_verify: return "TLS 1.3 client send_client_certificate_verify"; case state_complete_second_flight: return "TLS 1.3 client complete_second_flight"; case state_done: return "TLS 1.3 client done"; } return "TLS 1.3 client unknown"; } bool tls13_process_new_session_ticket(SSL *ssl, const SSLMessage &msg) { if (ssl->s3->write_shutdown != ssl_shutdown_none) { // Ignore tickets on shutdown. Callers tend to indiscriminately call // |SSL_shutdown| before destroying an |SSL|, at which point calling the new // session callback may be confusing. return true; } CBS body = msg.body; UniquePtr session = tls13_create_session_with_ticket(ssl, &body); if (!session) { return false; } if ((ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_CLIENT) && ssl->session_ctx->new_session_cb != NULL && ssl->session_ctx->new_session_cb(ssl, session.get())) { // |new_session_cb|'s return value signals that it took ownership. session.release(); } return true; } UniquePtr tls13_create_session_with_ticket(SSL *ssl, CBS *body) { UniquePtr session = SSL_SESSION_dup( ssl->s3->established_session.get(), SSL_SESSION_INCLUDE_NONAUTH); if (!session) { return nullptr; } ssl_session_rebase_time(ssl, session.get()); uint32_t server_timeout; CBS ticket_nonce, ticket, extensions; if (!CBS_get_u32(body, &server_timeout) || !CBS_get_u32(body, &session->ticket_age_add) || !CBS_get_u8_length_prefixed(body, &ticket_nonce) || !CBS_get_u16_length_prefixed(body, &ticket) || CBS_len(&ticket) == 0 || // !session->ticket.CopyFrom(ticket) || !CBS_get_u16_length_prefixed(body, &extensions) || // CBS_len(body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return nullptr; } // Cap the renewable lifetime by the server advertised value. This avoids // wasting bandwidth on 0-RTT when we know the server will reject it. if (session->timeout > server_timeout) { session->timeout = server_timeout; } if (!tls13_derive_session_psk(session.get(), ticket_nonce, SSL_is_dtls(ssl))) { return nullptr; } SSLExtension early_data(TLSEXT_TYPE_early_data); uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_extensions(&extensions, &alert, {&early_data}, /*ignore_unknown=*/true)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return nullptr; } if (early_data.present) { if (!CBS_get_u32(&early_data.data, &session->ticket_max_early_data) || CBS_len(&early_data.data) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return nullptr; } // QUIC does not use the max_early_data_size parameter and always sets it to // a fixed value. See RFC 9001, section 4.6.1. if (SSL_is_quic(ssl) && session->ticket_max_early_data != 0xffffffff) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return nullptr; } } // Historically, OpenSSL filled in fake session IDs for ticket-based sessions. // Envoy's tests depend on this, although perhaps they shouldn't. session->session_id.ResizeForOverwrite(SHA256_DIGEST_LENGTH); SHA256(CBS_data(&ticket), CBS_len(&ticket), session->session_id.data()); session->ticket_age_add_valid = true; session->not_resumable = false; return session; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls13_enc.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/fipsmodule/tls/internal.h" #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static bool init_key_schedule(SSL_HANDSHAKE *hs, SSLTranscript *transcript, uint16_t version, const SSL_CIPHER *cipher) { if (!transcript->InitHash(version, cipher)) { return false; } // Initialize the secret to the zero key. hs->secret.clear(); hs->secret.Resize(transcript->DigestLen()); return true; } static bool hkdf_extract_to_secret(SSL_HANDSHAKE *hs, const SSLTranscript &transcript, Span in) { size_t len; if (!HKDF_extract(hs->secret.data(), &len, transcript.Digest(), in.data(), in.size(), hs->secret.data(), hs->secret.size())) { return false; } assert(len == hs->secret.size()); return true; } bool tls13_init_key_schedule(SSL_HANDSHAKE *hs, Span psk) { if (!init_key_schedule(hs, &hs->transcript, ssl_protocol_version(hs->ssl), hs->new_cipher)) { return false; } // Handback includes the whole handshake transcript, so we cannot free the // transcript buffer in the handback case. if (!hs->handback) { hs->transcript.FreeBuffer(); } return hkdf_extract_to_secret(hs, hs->transcript, psk); } bool tls13_init_early_key_schedule(SSL_HANDSHAKE *hs, const SSL_SESSION *session) { assert(!hs->ssl->server); // When offering ECH, early data is associated with ClientHelloInner, not // ClientHelloOuter. SSLTranscript *transcript = hs->selected_ech_config ? &hs->inner_transcript : &hs->transcript; return init_key_schedule(hs, transcript, ssl_session_protocol_version(session), session->cipher) && hkdf_extract_to_secret(hs, *transcript, session->secret); } static bool hkdf_expand_label_with_prefix(Span out, const EVP_MD *digest, Span secret, std::string_view label_prefix, std::string_view label, Span hash) { // This is a copy of CRYPTO_tls13_hkdf_expand_label, but modified to take an // arbitrary prefix for the label instead of using the hardcoded "tls13 " // prefix. CBB cbb, child; uint8_t *hkdf_label = NULL; size_t hkdf_label_len; CBB_zero(&cbb); if (!CBB_init(&cbb, 2 + 1 + label_prefix.size() + label.size() + 1 + hash.size()) || !CBB_add_u16(&cbb, out.size()) || !CBB_add_u8_length_prefixed(&cbb, &child) || !CBB_add_bytes(&child, reinterpret_cast(label_prefix.data()), label_prefix.size()) || !CBB_add_bytes(&child, reinterpret_cast(label.data()), label.size()) || !CBB_add_u8_length_prefixed(&cbb, &child) || !CBB_add_bytes(&child, hash.data(), hash.size()) || !CBB_finish(&cbb, &hkdf_label, &hkdf_label_len)) { CBB_cleanup(&cbb); return false; } const int ret = HKDF_expand(out.data(), out.size(), digest, secret.data(), secret.size(), hkdf_label, hkdf_label_len); OPENSSL_free(hkdf_label); return ret == 1; } static bool hkdf_expand_label(Span out, const EVP_MD *digest, Span secret, std::string_view label, Span hash, bool is_dtls) { if (is_dtls) { return hkdf_expand_label_with_prefix(out, digest, secret, "dtls13", label, hash); } return CRYPTO_tls13_hkdf_expand_label( out.data(), out.size(), digest, secret.data(), secret.size(), reinterpret_cast(label.data()), label.size(), hash.data(), hash.size()) == 1; } static const char kTLS13LabelDerived[] = "derived"; bool tls13_advance_key_schedule(SSL_HANDSHAKE *hs, Span in) { uint8_t derive_context[EVP_MAX_MD_SIZE]; unsigned derive_context_len; return EVP_Digest(nullptr, 0, derive_context, &derive_context_len, hs->transcript.Digest(), nullptr) && hkdf_expand_label(Span(hs->secret), hs->transcript.Digest(), hs->secret, kTLS13LabelDerived, Span(derive_context, derive_context_len), SSL_is_dtls(hs->ssl)) && hkdf_extract_to_secret(hs, hs->transcript, in); } // derive_secret_with_transcript derives a secret of length // |transcript.DigestLen()| and writes the result in |out| with the given label, // the current base secret, and the state of |transcript|. It returns true on // success and false on error. static bool derive_secret_with_transcript( const SSL_HANDSHAKE *hs, InplaceVector *out, const SSLTranscript &transcript, std::string_view label) { uint8_t context_hash[EVP_MAX_MD_SIZE]; size_t context_hash_len; if (!transcript.GetHash(context_hash, &context_hash_len)) { return false; } out->ResizeForOverwrite(transcript.DigestLen()); return hkdf_expand_label(Span(*out), transcript.Digest(), hs->secret, label, Span(context_hash, context_hash_len), SSL_is_dtls(hs->ssl)); } static bool derive_secret(SSL_HANDSHAKE *hs, InplaceVector *out, std::string_view label) { return derive_secret_with_transcript(hs, out, hs->transcript, label); } bool tls13_set_traffic_key(SSL *ssl, enum ssl_encryption_level_t level, enum evp_aead_direction_t direction, const SSL_SESSION *session, Span traffic_secret) { uint16_t version = ssl_session_protocol_version(session); const EVP_MD *digest = ssl_session_get_digest(session); bool is_dtls = SSL_is_dtls(ssl); UniquePtr traffic_aead; if (SSL_is_quic(ssl)) { // Install a placeholder SSLAEADContext so that SSL accessors work. The // encryption itself will be handled by the SSL_QUIC_METHOD. traffic_aead = SSLAEADContext::CreatePlaceholderForQUIC(session->cipher); } else { // Look up cipher suite properties. const EVP_AEAD *aead; size_t discard; if (!ssl_cipher_get_evp_aead(&aead, &discard, &discard, session->cipher, version)) { return false; } // Derive the key and IV. uint8_t key_buf[EVP_AEAD_MAX_KEY_LENGTH], iv_buf[EVP_AEAD_MAX_NONCE_LENGTH]; auto key = Span(key_buf).first(EVP_AEAD_key_length(aead)); auto iv = Span(iv_buf).first(EVP_AEAD_nonce_length(aead)); if (!hkdf_expand_label(key, digest, traffic_secret, "key", {}, is_dtls) || !hkdf_expand_label(iv, digest, traffic_secret, "iv", {}, is_dtls)) { return false; } traffic_aead = SSLAEADContext::Create(direction, session->ssl_version, session->cipher, key, {}, iv); } if (!traffic_aead) { return false; } if (direction == evp_aead_open) { if (!ssl->method->set_read_state(ssl, level, std::move(traffic_aead), traffic_secret)) { return false; } ssl->s3->read_traffic_secret.CopyFrom(traffic_secret); } else { if (!ssl->method->set_write_state(ssl, level, std::move(traffic_aead), traffic_secret)) { return false; } ssl->s3->write_traffic_secret.CopyFrom(traffic_secret); } return true; } namespace { class AESRecordNumberEncrypter : public RecordNumberEncrypter { public: bool SetKey(Span key) override { return AES_set_encrypt_key(key.data(), key.size() * 8, &key_) == 0; } bool GenerateMask(Span out, Span sample) override { if (sample.size() < AES_BLOCK_SIZE || out.size() > AES_BLOCK_SIZE) { return false; } uint8_t mask[AES_BLOCK_SIZE]; AES_encrypt(sample.data(), mask, &key_); OPENSSL_memcpy(out.data(), mask, out.size()); return true; } private: AES_KEY key_; }; class AES128RecordNumberEncrypter : public AESRecordNumberEncrypter { public: size_t KeySize() override { return 16; } }; class AES256RecordNumberEncrypter : public AESRecordNumberEncrypter { public: size_t KeySize() override { return 32; } }; class ChaChaRecordNumberEncrypter : public RecordNumberEncrypter { public: size_t KeySize() override { return kKeySize; } bool SetKey(Span key) override { if (key.size() != kKeySize) { return false; } OPENSSL_memcpy(key_, key.data(), key.size()); return true; } bool GenerateMask(Span out, Span sample) override { // RFC 9147 section 4.2.3 uses the first 4 bytes of the sample as the // counter and the next 12 bytes as the nonce. If we have less than 4+12=16 // bytes in the sample, then we'll read past the end of the |sample| buffer. // The counter is interpreted as little-endian per RFC 8439. if (sample.size() < 16) { return false; } uint32_t counter = CRYPTO_load_u32_le(sample.data()); Span nonce = sample.subspan(4); OPENSSL_memset(out.data(), 0, out.size()); CRYPTO_chacha_20(out.data(), out.data(), out.size(), key_, nonce.data(), counter); return true; } private: static constexpr size_t kKeySize = 32; uint8_t key_[kKeySize]; }; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) class NullRecordNumberEncrypter : public RecordNumberEncrypter { public: size_t KeySize() override { return 0; } bool SetKey(Span key) override { return true; } bool GenerateMask(Span out, Span sample) override { OPENSSL_memset(out.data(), 0, out.size()); return true; } }; #endif // BORINGSSL_UNSAFE_FUZZER_MODE } // namespace UniquePtr RecordNumberEncrypter::Create( const SSL_CIPHER *cipher, Span traffic_secret) { const EVP_MD *digest = ssl_get_handshake_digest(TLS1_3_VERSION, cipher); UniquePtr ret; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) ret = MakeUnique(); #else if (cipher->algorithm_enc == SSL_AES128GCM) { ret = MakeUnique(); } else if (cipher->algorithm_enc == SSL_AES256GCM) { ret = MakeUnique(); } else if (cipher->algorithm_enc == SSL_CHACHA20POLY1305) { ret = MakeUnique(); } else { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); } #endif // BORINGSSL_UNSAFE_FUZZER_MODE if (ret == nullptr) { return nullptr; } uint8_t rne_key_buf[RecordNumberEncrypter::kMaxKeySize]; auto rne_key = Span(rne_key_buf).first(ret->KeySize()); if (!hkdf_expand_label(rne_key, digest, traffic_secret, "sn", {}, /*is_dtls=*/true) || !ret->SetKey(rne_key)) { return nullptr; } return ret; } static const char kTLS13LabelExporter[] = "exp master"; static const char kTLS13LabelClientEarlyTraffic[] = "c e traffic"; static const char kTLS13LabelClientHandshakeTraffic[] = "c hs traffic"; static const char kTLS13LabelServerHandshakeTraffic[] = "s hs traffic"; static const char kTLS13LabelClientApplicationTraffic[] = "c ap traffic"; static const char kTLS13LabelServerApplicationTraffic[] = "s ap traffic"; bool tls13_derive_early_secret(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // When offering ECH on the client, early data is associated with // ClientHelloInner, not ClientHelloOuter. const SSLTranscript &transcript = (!ssl->server && hs->selected_ech_config) ? hs->inner_transcript : hs->transcript; if (!derive_secret_with_transcript(hs, &hs->early_traffic_secret, transcript, kTLS13LabelClientEarlyTraffic) || !ssl_log_secret(ssl, "CLIENT_EARLY_TRAFFIC_SECRET", hs->early_traffic_secret)) { return false; } return true; } bool tls13_derive_handshake_secrets(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!derive_secret(hs, &hs->client_handshake_secret, kTLS13LabelClientHandshakeTraffic) || !ssl_log_secret(ssl, "CLIENT_HANDSHAKE_TRAFFIC_SECRET", hs->client_handshake_secret) || !derive_secret(hs, &hs->server_handshake_secret, kTLS13LabelServerHandshakeTraffic) || !ssl_log_secret(ssl, "SERVER_HANDSHAKE_TRAFFIC_SECRET", hs->server_handshake_secret)) { return false; } return true; } bool tls13_derive_application_secrets(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!derive_secret(hs, &hs->client_traffic_secret_0, kTLS13LabelClientApplicationTraffic) || !ssl_log_secret(ssl, "CLIENT_TRAFFIC_SECRET_0", hs->client_traffic_secret_0) || !derive_secret(hs, &hs->server_traffic_secret_0, kTLS13LabelServerApplicationTraffic) || !ssl_log_secret(ssl, "SERVER_TRAFFIC_SECRET_0", hs->server_traffic_secret_0) || !derive_secret(hs, &ssl->s3->exporter_secret, kTLS13LabelExporter) || !ssl_log_secret(ssl, "EXPORTER_SECRET", ssl->s3->exporter_secret)) { return false; } return true; } static const char kTLS13LabelApplicationTraffic[] = "traffic upd"; bool tls13_rotate_traffic_key(SSL *ssl, enum evp_aead_direction_t direction) { Span secret = direction == evp_aead_open ? Span(ssl->s3->read_traffic_secret) : Span(ssl->s3->write_traffic_secret); const SSL_SESSION *session = SSL_get_session(ssl); const EVP_MD *digest = ssl_session_get_digest(session); return hkdf_expand_label(secret, digest, secret, kTLS13LabelApplicationTraffic, {}, SSL_is_dtls(ssl)) && tls13_set_traffic_key(ssl, ssl_encryption_application, direction, session, secret); } static const char kTLS13LabelResumption[] = "res master"; bool tls13_derive_resumption_secret(SSL_HANDSHAKE *hs) { return derive_secret(hs, &hs->new_session->secret, kTLS13LabelResumption); } static const char kTLS13LabelFinished[] = "finished"; // tls13_verify_data sets |out| to be the HMAC of |context| using a derived // Finished key for both Finished messages and the PSK binder. |out| must have // space available for |EVP_MAX_MD_SIZE| bytes. static bool tls13_verify_data(uint8_t *out, size_t *out_len, const EVP_MD *digest, uint16_t version, Span secret, Span context, bool is_dtls) { uint8_t key_buf[EVP_MAX_MD_SIZE]; auto key = Span(key_buf, EVP_MD_size(digest)); unsigned len; if (!hkdf_expand_label(key, digest, secret, kTLS13LabelFinished, {}, is_dtls) || HMAC(digest, key.data(), key.size(), context.data(), context.size(), out, &len) == nullptr) { return false; } *out_len = len; return true; } bool tls13_finished_mac(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, bool is_server) { Span traffic_secret = is_server ? hs->server_handshake_secret : hs->client_handshake_secret; uint8_t context_hash[EVP_MAX_MD_SIZE]; size_t context_hash_len; if (!hs->transcript.GetHash(context_hash, &context_hash_len) || !tls13_verify_data(out, out_len, hs->transcript.Digest(), hs->ssl->s3->version, traffic_secret, Span(context_hash, context_hash_len), SSL_is_dtls(hs->ssl))) { return false; } return true; } static const char kTLS13LabelResumptionPSK[] = "resumption"; bool tls13_derive_session_psk(SSL_SESSION *session, Span nonce, bool is_dtls) { const EVP_MD *digest = ssl_session_get_digest(session); // The session initially stores the resumption_master_secret, which we // override with the PSK. assert(session->secret.size() == EVP_MD_size(digest)); return hkdf_expand_label(Span(session->secret), digest, session->secret, kTLS13LabelResumptionPSK, nonce, is_dtls); } static const char kTLS13LabelExportKeying[] = "exporter"; bool tls13_export_keying_material(SSL *ssl, Span out, Span secret, std::string_view label, Span context) { if (secret.empty()) { assert(0); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } const EVP_MD *digest = ssl_session_get_digest(SSL_get_session(ssl)); uint8_t hash_buf[EVP_MAX_MD_SIZE]; uint8_t export_context_buf[EVP_MAX_MD_SIZE]; unsigned hash_len; unsigned export_context_len; if (!EVP_Digest(context.data(), context.size(), hash_buf, &hash_len, digest, nullptr) || !EVP_Digest(nullptr, 0, export_context_buf, &export_context_len, digest, nullptr)) { return false; } auto hash = Span(hash_buf, hash_len); auto export_context = Span(export_context_buf, export_context_len); uint8_t derived_secret_buf[EVP_MAX_MD_SIZE]; auto derived_secret = Span(derived_secret_buf, EVP_MD_size(digest)); return hkdf_expand_label(derived_secret, digest, secret, label, export_context, SSL_is_dtls(ssl)) && hkdf_expand_label(out, digest, derived_secret, kTLS13LabelExportKeying, hash, SSL_is_dtls(ssl)); } static const char kTLS13LabelPSKBinder[] = "res binder"; static bool tls13_psk_binder(uint8_t *out, size_t *out_len, const SSL_SESSION *session, const SSLTranscript &transcript, Span client_hello, size_t binders_len, bool is_dtls) { const EVP_MD *digest = ssl_session_get_digest(session); // Compute the binder key. // // TODO(davidben): Ideally we wouldn't recompute early secret and the binder // key each time. uint8_t binder_context[EVP_MAX_MD_SIZE]; unsigned binder_context_len; uint8_t early_secret[EVP_MAX_MD_SIZE] = {0}; size_t early_secret_len; uint8_t binder_key_buf[EVP_MAX_MD_SIZE] = {0}; auto binder_key = Span(binder_key_buf, EVP_MD_size(digest)); if (!EVP_Digest(nullptr, 0, binder_context, &binder_context_len, digest, nullptr) || !HKDF_extract(early_secret, &early_secret_len, digest, session->secret.data(), session->secret.size(), nullptr, 0) || !hkdf_expand_label(binder_key, digest, Span(early_secret, early_secret_len), kTLS13LabelPSKBinder, Span(binder_context, binder_context_len), is_dtls)) { return false; } // Hash the transcript and truncated ClientHello. if (client_hello.size() < binders_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } auto truncated = client_hello.subspan(0, client_hello.size() - binders_len); uint8_t context[EVP_MAX_MD_SIZE]; unsigned context_len; ScopedEVP_MD_CTX ctx; if (!is_dtls) { if (!transcript.CopyToHashContext(ctx.get(), digest) || !EVP_DigestUpdate(ctx.get(), truncated.data(), truncated.size()) || !EVP_DigestFinal_ex(ctx.get(), context, &context_len)) { return false; } } else { // In DTLS 1.3, the transcript hash is computed over only the TLS 1.3 // handshake messages (i.e. only type and length in the header), not the // full DTLSHandshake messages that are in |truncated|. This code pulls // the header and body out of the truncated ClientHello and writes those // to the hash context so the correct binder value is computed. if (truncated.size() < DTLS1_HM_HEADER_LENGTH) { return false; } auto header = truncated.subspan(0, 4); auto body = truncated.subspan(12); if (!transcript.CopyToHashContext(ctx.get(), digest) || !EVP_DigestUpdate(ctx.get(), header.data(), header.size()) || !EVP_DigestUpdate(ctx.get(), body.data(), body.size()) || !EVP_DigestFinal_ex(ctx.get(), context, &context_len)) { return false; } } if (!tls13_verify_data(out, out_len, digest, session->ssl_version, binder_key, Span(context, context_len), is_dtls)) { return false; } assert(*out_len == EVP_MD_size(digest)); return true; } bool tls13_write_psk_binder(const SSL_HANDSHAKE *hs, const SSLTranscript &transcript, Span msg, size_t *out_binder_len) { const SSL *const ssl = hs->ssl; const EVP_MD *digest = ssl_session_get_digest(ssl->session.get()); const size_t hash_len = EVP_MD_size(digest); // We only offer one PSK, so the binders are a u16 and u8 length // prefix, followed by the binder. The caller is assumed to have constructed // |msg| with placeholder binders. const size_t binders_len = 3 + hash_len; uint8_t verify_data[EVP_MAX_MD_SIZE]; size_t verify_data_len; if (!tls13_psk_binder(verify_data, &verify_data_len, ssl->session.get(), transcript, msg, binders_len, SSL_is_dtls(hs->ssl)) || verify_data_len != hash_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } auto msg_binder = msg.last(verify_data_len); OPENSSL_memcpy(msg_binder.data(), verify_data, verify_data_len); if (out_binder_len != nullptr) { *out_binder_len = verify_data_len; } return true; } bool tls13_verify_psk_binder(const SSL_HANDSHAKE *hs, const SSL_SESSION *session, const SSLMessage &msg, CBS *binders) { uint8_t verify_data[EVP_MAX_MD_SIZE]; size_t verify_data_len; CBS binder; // The binders are computed over |msg| with |binders| and its u16 length // prefix removed. The caller is assumed to have parsed |msg|, extracted // |binders|, and verified the PSK extension is last. if (!tls13_psk_binder(verify_data, &verify_data_len, session, hs->transcript, msg.raw, 2 + CBS_len(binders), SSL_is_dtls(hs->ssl)) || // We only consider the first PSK, so compare against the first binder. !CBS_get_u8_length_prefixed(binders, &binder)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } bool binder_ok = CBS_len(&binder) == verify_data_len && CRYPTO_memcmp(CBS_data(&binder), verify_data, verify_data_len) == 0; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) binder_ok = true; #endif if (!binder_ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); return false; } return true; } size_t ssl_ech_confirmation_signal_hello_offset(const SSL *ssl) { static_assert(ECH_CONFIRMATION_SIGNAL_LEN < SSL3_RANDOM_SIZE, "the confirmation signal is a suffix of the random"); const size_t header_len = SSL_is_dtls(ssl) ? DTLS1_HM_HEADER_LENGTH : SSL3_HM_HEADER_LENGTH; return header_len + 2 /* version */ + SSL3_RANDOM_SIZE - ECH_CONFIRMATION_SIGNAL_LEN; } bool ssl_ech_accept_confirmation(const SSL_HANDSHAKE *hs, Span out, Span client_random, const SSLTranscript &transcript, bool is_hrr, Span msg, size_t offset) { // See draft-ietf-tls-esni-13, sections 7.2 and 7.2.1. static const uint8_t kZeros[EVP_MAX_MD_SIZE] = {0}; // We hash |msg|, with bytes from |offset| zeroed. if (msg.size() < offset + ECH_CONFIRMATION_SIGNAL_LEN) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return false; } // We represent DTLS messages with the longer DTLS 1.2 header, but DTLS 1.3 // removes the extra fields from the transcript. auto header = msg.subspan(0, SSL3_HM_HEADER_LENGTH); size_t full_header_len = SSL_is_dtls(hs->ssl) ? DTLS1_HM_HEADER_LENGTH : SSL3_HM_HEADER_LENGTH; auto before_zeros = msg.subspan(full_header_len, offset - full_header_len); auto after_zeros = msg.subspan(offset + ECH_CONFIRMATION_SIGNAL_LEN); uint8_t context[EVP_MAX_MD_SIZE]; unsigned context_len; ScopedEVP_MD_CTX ctx; if (!transcript.CopyToHashContext(ctx.get(), transcript.Digest()) || !EVP_DigestUpdate(ctx.get(), header.data(), header.size()) || !EVP_DigestUpdate(ctx.get(), before_zeros.data(), before_zeros.size()) || !EVP_DigestUpdate(ctx.get(), kZeros, ECH_CONFIRMATION_SIGNAL_LEN) || !EVP_DigestUpdate(ctx.get(), after_zeros.data(), after_zeros.size()) || !EVP_DigestFinal_ex(ctx.get(), context, &context_len)) { return false; } uint8_t secret[EVP_MAX_MD_SIZE]; size_t secret_len; if (!HKDF_extract(secret, &secret_len, transcript.Digest(), client_random.data(), client_random.size(), kZeros, transcript.DigestLen())) { return false; } assert(out.size() == ECH_CONFIRMATION_SIGNAL_LEN); return hkdf_expand_label( out, transcript.Digest(), Span(secret, secret_len), is_hrr ? "hrr ech accept confirmation" : "ech accept confirmation", Span(context, context_len), SSL_is_dtls(hs->ssl)); } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls13_server.cc ================================================ /* Copyright 2016 The BoringSSL Authors * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; // Allow a minute of ticket age skew in either direction. This covers // transmission delays in ClientHello and NewSessionTicket, as well as // drift between client and server clock rate since the ticket was issued. // See RFC 8446, section 8.3. static const int32_t kMaxTicketAgeSkewSeconds = 60; static bool resolve_ecdhe_secret(SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; const uint16_t group_id = hs->new_session->group_id; bool found_key_share; Span peer_key; uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_ext_key_share_parse_clienthello(hs, &found_key_share, &peer_key, &alert, client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } if (!found_key_share) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); return false; } Array secret; SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); if (hints && !hs->hints_requested && hints->key_share_group_id == group_id && !hints->key_share_secret.empty()) { // Copy the key_share secret from hints. if (!hs->key_share_ciphertext.CopyFrom(hints->key_share_ciphertext) || !secret.CopyFrom(hints->key_share_secret)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } } else { ScopedCBB ciphertext; UniquePtr key_share = SSLKeyShare::Create(group_id); if (!key_share || // !CBB_init(ciphertext.get(), 32) || !key_share->Encap(ciphertext.get(), &secret, &alert, peer_key) || !CBBFinishArray(ciphertext.get(), &hs->key_share_ciphertext)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return false; } if (hints && hs->hints_requested) { hints->key_share_group_id = group_id; if (!hints->key_share_ciphertext.CopyFrom(hs->key_share_ciphertext) || !hints->key_share_secret.CopyFrom(secret)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return false; } } } return tls13_advance_key_schedule(hs, secret); } static int ssl_ext_supported_versions_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { CBB contents; if (!CBB_add_u16(out, TLSEXT_TYPE_supported_versions) || // !CBB_add_u16_length_prefixed(out, &contents) || // !CBB_add_u16(&contents, hs->ssl->s3->version) || // !CBB_flush(out)) { return 0; } return 1; } static const SSL_CIPHER *choose_tls13_cipher( const SSL *ssl, const SSL_CLIENT_HELLO *client_hello) { CBS cipher_suites; CBS_init(&cipher_suites, client_hello->cipher_suites, client_hello->cipher_suites_len); const uint16_t version = ssl_protocol_version(ssl); return ssl_choose_tls13_cipher(cipher_suites, ssl->config->aes_hw_override ? ssl->config->aes_hw_override_value : EVP_has_aes_hardware(), version, ssl->config->compliance_policy); } static bool add_new_session_tickets(SSL_HANDSHAKE *hs, bool *out_sent_tickets) { SSL *const ssl = hs->ssl; if ( // If the client doesn't accept resumption with PSK_DHE_KE, don't send a // session ticket. !hs->accept_psk_mode || // We only implement stateless resumption in TLS 1.3, so skip sending // tickets if disabled. (SSL_get_options(ssl) & SSL_OP_NO_TICKET)) { *out_sent_tickets = false; return true; } // Rebase the session timestamp so that it is measured from ticket // issuance. ssl_session_rebase_time(ssl, hs->new_session.get()); assert(ssl->session_ctx->num_tickets <= kMaxTickets); bool sent_tickets = false; for (size_t i = 0; i < ssl->session_ctx->num_tickets; i++) { UniquePtr session( SSL_SESSION_dup(hs->new_session.get(), SSL_SESSION_INCLUDE_NONAUTH)); if (!session) { return false; } if (!RAND_bytes((uint8_t *)&session->ticket_age_add, 4)) { return false; } session->ticket_age_add_valid = true; // TODO(crbug.com/381113363): Remove the SSL_is_dtls check once we support // 0-RTT for DTLS 1.3. bool enable_early_data = ssl->enable_early_data && (!SSL_is_quic(ssl) || !ssl->config->quic_early_data_context.empty()) && !SSL_is_dtls(ssl); if (enable_early_data) { // QUIC does not use the max_early_data_size parameter and always sets it // to a fixed value. See RFC 9001, section 4.6.1. session->ticket_max_early_data = SSL_is_quic(ssl) ? 0xffffffff : kMaxEarlyDataAccepted; } static_assert(kMaxTickets < 256, "Too many tickets"); assert(i < 256); uint8_t nonce[] = {static_cast(i)}; ScopedCBB cbb; CBB body, nonce_cbb, ticket, extensions; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_NEW_SESSION_TICKET) || !CBB_add_u32(&body, session->timeout) || !CBB_add_u32(&body, session->ticket_age_add) || !CBB_add_u8_length_prefixed(&body, &nonce_cbb) || !CBB_add_bytes(&nonce_cbb, nonce, sizeof(nonce)) || !tls13_derive_session_psk(session.get(), nonce, SSL_is_dtls(ssl)) || !CBB_add_u16_length_prefixed(&body, &ticket) || !ssl_encrypt_ticket(hs, &ticket, session.get())) { return false; } if (CBB_len(&ticket) == 0) { // The caller decided not to encrypt a ticket. Skip the message. continue; } if (!CBB_add_u16_length_prefixed(&body, &extensions)) { return false; } if (enable_early_data) { CBB early_data; if (!CBB_add_u16(&extensions, TLSEXT_TYPE_early_data) || !CBB_add_u16_length_prefixed(&extensions, &early_data) || !CBB_add_u32(&early_data, session->ticket_max_early_data) || !CBB_flush(&extensions)) { return false; } } // Add a fake extension. See RFC 8701. if (!CBB_add_u16(&extensions, ssl_get_grease_value(hs, ssl_grease_ticket_extension)) || !CBB_add_u16(&extensions, 0 /* empty */)) { return false; } if (!ssl_add_message_cbb(ssl, cbb.get())) { return false; } sent_tickets = true; } *out_sent_tickets = sent_tickets; return true; } static bool check_credential(SSL_HANDSHAKE *hs, const SSL_CREDENTIAL *cred, uint16_t *out_sigalg) { switch (cred->type) { case SSLCredentialType::kX509: break; case SSLCredentialType::kDelegated: // Check that the peer supports the signature over the delegated // credential. if (std::find(hs->peer_sigalgs.begin(), hs->peer_sigalgs.end(), cred->dc_algorithm) == hs->peer_sigalgs.end()) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS); return false; } break; } // All currently supported credentials require a signature. If |cred| is a // delegated credential, this also checks that the peer supports delegated // credentials and matched |dc_cert_verify_algorithm|. if (!tls1_choose_signature_algorithm(hs, cred, out_sigalg)) { return false; } // Use this credential if it either matches a requested issuer, // or does not require issuer matching. return ssl_credential_matches_requested_issuers(hs, cred); } static enum ssl_hs_wait_t do_select_parameters(SSL_HANDSHAKE *hs) { // At this point, most ClientHello extensions have already been processed by // the common handshake logic. Resolve the remaining non-PSK parameters. SSL *const ssl = hs->ssl; SSLMessage msg; SSL_CLIENT_HELLO client_hello; if (!hs->GetClientHello(&msg, &client_hello)) { return ssl_hs_error; } if (SSL_is_quic(ssl) && client_hello.session_id_len > 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_COMPATIBILITY_MODE); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // DTLS 1.3 disables compatibility mode, and even if the client advertised a // session ID (for resumption in DTLS 1.2), the server "MUST NOT echo the // 'legacy_session_id' value from the client" (RFC 9147, section 5) as it // would in a TLS 1.3 handshake. if (!SSL_is_dtls(ssl)) { hs->session_id.CopyFrom( Span(client_hello.session_id, client_hello.session_id_len)); } Array creds; if (!ssl_get_credential_list(hs, &creds)) { return ssl_hs_error; } if (creds.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_SET); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // Select the credential to use. for (SSL_CREDENTIAL *cred : creds) { ERR_clear_error(); uint16_t sigalg; if (check_credential(hs, cred, &sigalg)) { hs->credential = UpRef(cred); hs->signature_algorithm = sigalg; break; } } if (hs->credential == nullptr) { // The error from the last attempt is in the error queue. assert(ERR_peek_error() != 0); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // Negotiate the cipher suite. hs->new_cipher = choose_tls13_cipher(ssl, &client_hello); if (hs->new_cipher == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was // deferred. Complete it now. uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_negotiate_alpn(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // The PRF hash is now known. if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher)) { return ssl_hs_error; } hs->tls13_state = state13_select_session; return ssl_hs_ok; } static enum ssl_ticket_aead_result_t select_session( SSL_HANDSHAKE *hs, uint8_t *out_alert, UniquePtr *out_session, int32_t *out_ticket_age_skew, bool *out_offered_ticket, const SSLMessage &msg, const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; *out_session = nullptr; CBS pre_shared_key; *out_offered_ticket = ssl_client_hello_get_extension( client_hello, &pre_shared_key, TLSEXT_TYPE_pre_shared_key); if (!*out_offered_ticket) { return ssl_ticket_aead_ignore_ticket; } // Per RFC 8446, section 4.2.9, servers MUST abort the handshake if the client // sends pre_shared_key without psk_key_exchange_modes. CBS unused; if (!ssl_client_hello_get_extension(client_hello, &unused, TLSEXT_TYPE_psk_key_exchange_modes)) { *out_alert = SSL_AD_MISSING_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); return ssl_ticket_aead_error; } CBS ticket, binders; uint32_t client_ticket_age; if (!ssl_ext_pre_shared_key_parse_clienthello( hs, &ticket, &binders, &client_ticket_age, out_alert, client_hello, &pre_shared_key)) { return ssl_ticket_aead_error; } // If the peer did not offer psk_dhe, ignore the resumption. if (!hs->accept_psk_mode) { return ssl_ticket_aead_ignore_ticket; } // TLS 1.3 session tickets are renewed separately as part of the // NewSessionTicket. bool unused_renew; UniquePtr session; enum ssl_ticket_aead_result_t ret = ssl_process_ticket(hs, &session, &unused_renew, ticket, {}); switch (ret) { case ssl_ticket_aead_success: break; case ssl_ticket_aead_error: *out_alert = SSL_AD_INTERNAL_ERROR; return ret; default: return ret; } if (!ssl_session_is_resumable(hs, session.get()) || // Historically, some TLS 1.3 tickets were missing ticket_age_add. !session->ticket_age_add_valid) { return ssl_ticket_aead_ignore_ticket; } // Recover the client ticket age and convert to seconds. client_ticket_age -= session->ticket_age_add; client_ticket_age /= 1000; OPENSSL_timeval now = ssl_ctx_get_current_time(ssl->ctx.get()); // Compute the server ticket age in seconds. assert(now.tv_sec >= session->time); uint64_t server_ticket_age = now.tv_sec - session->time; // To avoid overflowing |hs->ticket_age_skew|, we will not resume // 68-year-old sessions. if (server_ticket_age > INT32_MAX) { return ssl_ticket_aead_ignore_ticket; } *out_ticket_age_skew = static_cast(client_ticket_age) - static_cast(server_ticket_age); // Check the PSK binder. if (!tls13_verify_psk_binder(hs, session.get(), msg, &binders)) { *out_alert = SSL_AD_DECRYPT_ERROR; return ssl_ticket_aead_error; } *out_session = std::move(session); return ssl_ticket_aead_success; } static bool quic_ticket_compatible(const SSL_SESSION *session, const SSL_CONFIG *config) { if (!session->is_quic) { return true; } if (session->quic_early_data_context.empty() || config->quic_early_data_context.size() != session->quic_early_data_context.size() || CRYPTO_memcmp(config->quic_early_data_context.data(), session->quic_early_data_context.data(), session->quic_early_data_context.size()) != 0) { return false; } return true; } static enum ssl_hs_wait_t do_select_session(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; SSL_CLIENT_HELLO client_hello; if (!hs->GetClientHello(&msg, &client_hello)) { return ssl_hs_error; } uint8_t alert = SSL_AD_DECODE_ERROR; UniquePtr session; bool offered_ticket = false; switch (select_session(hs, &alert, &session, &ssl->s3->ticket_age_skew, &offered_ticket, msg, &client_hello)) { case ssl_ticket_aead_ignore_ticket: assert(!session); if (!ssl_get_new_session(hs)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } break; case ssl_ticket_aead_success: // Carry over authentication information from the previous handshake into // a fresh session. hs->new_session = SSL_SESSION_dup(session.get(), SSL_SESSION_DUP_AUTH_ONLY); if (hs->new_session == nullptr) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } ssl->s3->session_reused = true; hs->can_release_private_key = true; // Resumption incorporates fresh key material, so refresh the timeout. ssl_session_renew_timeout(ssl, hs->new_session.get(), ssl->session_ctx->session_psk_dhe_timeout); break; case ssl_ticket_aead_error: ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; case ssl_ticket_aead_retry: hs->tls13_state = state13_select_session; return ssl_hs_pending_ticket; } // Negotiate ALPS now, after ALPN is negotiated and |hs->new_session| is // initialized. if (!ssl_negotiate_alps(hs, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Record connection properties in the new session. hs->new_session->cipher = hs->new_cipher; if (!tls1_get_shared_group(hs, &hs->new_session->group_id)) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_GROUP); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); return ssl_hs_error; } // Determine if we need HelloRetryRequest. bool found_key_share; if (!ssl_ext_key_share_parse_clienthello(hs, &found_key_share, /*out_key_share=*/nullptr, &alert, &client_hello)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Determine if we're negotiating 0-RTT. if (!ssl->enable_early_data) { ssl->s3->early_data_reason = ssl_early_data_disabled; } else if (!offered_ticket) { ssl->s3->early_data_reason = ssl_early_data_no_session_offered; } else if (!session) { ssl->s3->early_data_reason = ssl_early_data_session_not_resumed; } else if (session->ticket_max_early_data == 0) { ssl->s3->early_data_reason = ssl_early_data_unsupported_for_session; } else if (!hs->early_data_offered) { ssl->s3->early_data_reason = ssl_early_data_peer_declined; } else if (hs->channel_id_negotiated) { // Channel ID is incompatible with 0-RTT. ssl->s3->early_data_reason = ssl_early_data_channel_id; } else if (Span(ssl->s3->alpn_selected) != session->early_alpn) { // The negotiated ALPN must match the one in the ticket. ssl->s3->early_data_reason = ssl_early_data_alpn_mismatch; } else if (hs->new_session->has_application_settings != session->has_application_settings || Span(hs->new_session->local_application_settings) != session->local_application_settings) { ssl->s3->early_data_reason = ssl_early_data_alps_mismatch; } else if (ssl->s3->ticket_age_skew < -kMaxTicketAgeSkewSeconds || kMaxTicketAgeSkewSeconds < ssl->s3->ticket_age_skew) { ssl->s3->early_data_reason = ssl_early_data_ticket_age_skew; } else if (!quic_ticket_compatible(session.get(), hs->config)) { ssl->s3->early_data_reason = ssl_early_data_quic_parameter_mismatch; } else if (!found_key_share) { ssl->s3->early_data_reason = ssl_early_data_hello_retry_request; } else { // |ssl_session_is_resumable| forbids cross-cipher resumptions even if the // PRF hashes match. assert(hs->new_cipher == session->cipher); ssl->s3->early_data_reason = ssl_early_data_accepted; ssl->s3->early_data_accepted = true; } // Store the ALPN and ALPS values in the session for 0-RTT. Note the peer // applications settings are not generally known until client // EncryptedExtensions. if (!hs->new_session->early_alpn.CopyFrom(ssl->s3->alpn_selected)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // The peer applications settings are usually received later, in // EncryptedExtensions. But, in 0-RTT handshakes, we carry over the // values from |session|. Do this now, before |session| is discarded. if (ssl->s3->early_data_accepted && hs->new_session->has_application_settings && !hs->new_session->peer_application_settings.CopyFrom( session->peer_application_settings)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } // Copy the QUIC early data context to the session. if (ssl->enable_early_data && SSL_is_quic(ssl)) { if (!hs->new_session->quic_early_data_context.CopyFrom( hs->config->quic_early_data_context)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } } if (ssl->ctx->dos_protection_cb != NULL && ssl->ctx->dos_protection_cb(&client_hello) == 0) { // Connection rejected for DOS reasons. OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } size_t hash_len = EVP_MD_size( ssl_get_handshake_digest(ssl_protocol_version(ssl), hs->new_cipher)); // Set up the key schedule and incorporate the PSK into the running secret. if (!tls13_init_key_schedule(hs, ssl->s3->session_reused ? Span(hs->new_session->secret) : Span(kZeroes, hash_len)) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } if (ssl->s3->early_data_accepted) { if (!tls13_derive_early_secret(hs)) { return ssl_hs_error; } } else if (hs->early_data_offered) { ssl->s3->skip_early_data = true; } if (!found_key_share) { ssl->method->next_message(ssl); if (!hs->transcript.UpdateForHelloRetryRequest()) { return ssl_hs_error; } hs->tls13_state = state13_send_hello_retry_request; return ssl_hs_ok; } if (!resolve_ecdhe_secret(hs, &client_hello)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->ech_client_hello_buf.Reset(); hs->tls13_state = state13_send_server_hello; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_hello_retry_request(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->hints_requested) { return ssl_hs_hints_ready; } ScopedCBB cbb; CBB body, session_id, extensions; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO) || !CBB_add_u16(&body, TLS1_2_VERSION) || !CBB_add_bytes(&body, kHelloRetryRequest, SSL3_RANDOM_SIZE) || !CBB_add_u8_length_prefixed(&body, &session_id) || !CBB_add_bytes(&session_id, hs->session_id.data(), hs->session_id.size()) || !CBB_add_u16(&body, SSL_CIPHER_get_protocol_id(hs->new_cipher)) || !CBB_add_u8(&body, 0 /* no compression */) || !CBB_add_u16_length_prefixed(&body, &extensions) || !CBB_add_u16(&extensions, TLSEXT_TYPE_supported_versions) || !CBB_add_u16(&extensions, 2 /* length */) || !CBB_add_u16(&extensions, ssl->s3->version) || !CBB_add_u16(&extensions, TLSEXT_TYPE_key_share) || !CBB_add_u16(&extensions, 2 /* length */) || !CBB_add_u16(&extensions, hs->new_session->group_id)) { return ssl_hs_error; } if (hs->ech_is_inner) { // Fill a placeholder for the ECH confirmation value. if (!CBB_add_u16(&extensions, TLSEXT_TYPE_encrypted_client_hello) || !CBB_add_u16(&extensions, ECH_CONFIRMATION_SIGNAL_LEN) || !CBB_add_zeros(&extensions, ECH_CONFIRMATION_SIGNAL_LEN)) { return ssl_hs_error; } } Array hrr; if (!ssl->method->finish_message(ssl, cbb.get(), &hrr)) { return ssl_hs_error; } if (hs->ech_is_inner) { // Now that the message is encoded, fill in the whole value. size_t offset = hrr.size() - ECH_CONFIRMATION_SIGNAL_LEN; if (!ssl_ech_accept_confirmation( hs, Span(hrr).last(ECH_CONFIRMATION_SIGNAL_LEN), ssl->s3->client_random, hs->transcript, /*is_hrr=*/true, hrr, offset)) { return ssl_hs_error; } } if (!ssl->method->add_message(ssl, std::move(hrr)) || !ssl->method->add_change_cipher_spec(ssl)) { return ssl_hs_error; } ssl->s3->used_hello_retry_request = true; hs->tls13_state = state13_read_second_client_hello; return ssl_hs_flush; } static enum ssl_hs_wait_t do_read_second_client_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_HELLO)) { return ssl_hs_error; } SSL_CLIENT_HELLO client_hello; if (!ssl_client_hello_init(ssl, &client_hello, msg.body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (ssl->s3->ech_status == ssl_ech_accepted) { // If we previously accepted the ClientHelloInner, the second ClientHello // must contain an outer encrypted_client_hello extension. CBS ech_body; if (!ssl_client_hello_get_extension(&client_hello, &ech_body, TLSEXT_TYPE_encrypted_client_hello)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); return ssl_hs_error; } uint16_t kdf_id, aead_id; uint8_t type, config_id; CBS enc, payload; if (!CBS_get_u8(&ech_body, &type) || // type != ECH_CLIENT_OUTER || // !CBS_get_u16(&ech_body, &kdf_id) || // !CBS_get_u16(&ech_body, &aead_id) || !CBS_get_u8(&ech_body, &config_id) || !CBS_get_u16_length_prefixed(&ech_body, &enc) || !CBS_get_u16_length_prefixed(&ech_body, &payload) || CBS_len(&ech_body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } if (kdf_id != EVP_HPKE_KDF_id(EVP_HPKE_CTX_kdf(hs->ech_hpke_ctx.get())) || aead_id != EVP_HPKE_AEAD_id(EVP_HPKE_CTX_aead(hs->ech_hpke_ctx.get())) || config_id != hs->ech_config_id || CBS_len(&enc) > 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } // Decrypt the payload with the HPKE context from the first ClientHello. uint8_t alert = SSL_AD_DECODE_ERROR; bool unused; if (!ssl_client_hello_decrypt(hs, &alert, &unused, &hs->ech_client_hello_buf, &client_hello, payload)) { // Decryption failure is fatal in the second ClientHello. OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Reparse |client_hello| from the buffer owned by |hs|. if (!hs->GetClientHello(&msg, &client_hello)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } } // We perform all our negotiation based on the first ClientHello (for // consistency with what |select_certificate_cb| observed), which is in the // transcript, so we can ignore most of this second one. // // We do, however, check the second PSK binder. This covers the client key // share, in case we ever send half-RTT data (we currently do not). It is also // a tricky computation, so we enforce the peer handled it correctly. if (ssl->s3->session_reused) { CBS pre_shared_key; if (!ssl_client_hello_get_extension(&client_hello, &pre_shared_key, TLSEXT_TYPE_pre_shared_key)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INCONSISTENT_CLIENT_HELLO); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); return ssl_hs_error; } CBS ticket, binders; uint32_t client_ticket_age; uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_ext_pre_shared_key_parse_clienthello( hs, &ticket, &binders, &client_ticket_age, &alert, &client_hello, &pre_shared_key)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } // Note it is important that we do not obtain a new |SSL_SESSION| from // |ticket|. We have already selected parameters based on the first // ClientHello (in the transcript) and must not switch partway through. if (!tls13_verify_psk_binder(hs, hs->new_session.get(), msg, &binders)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); return ssl_hs_error; } } if (!resolve_ecdhe_secret(hs, &client_hello)) { return ssl_hs_error; } if (!ssl_hash_message(hs, msg)) { return ssl_hs_error; } // ClientHello should be the end of the flight. if (ssl->method->has_unprocessed_handshake_data(ssl)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); return ssl_hs_error; } ssl->method->next_message(ssl); hs->ech_client_hello_buf.Reset(); hs->tls13_state = state13_send_server_hello; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_hello(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; Span random(ssl->s3->server_random); SSL_HANDSHAKE_HINTS *const hints = hs->hints.get(); if (hints && !hs->hints_requested && hints->server_random_tls13.size() == random.size()) { OPENSSL_memcpy(random.data(), hints->server_random_tls13.data(), random.size()); } else { RAND_bytes(random.data(), random.size()); if (hints && hs->hints_requested && !hints->server_random_tls13.CopyFrom(random)) { return ssl_hs_error; } } uint16_t server_hello_version = TLS1_2_VERSION; if (SSL_is_dtls(ssl)) { server_hello_version = DTLS1_2_VERSION; } Array server_hello; ScopedCBB cbb; CBB body, extensions, session_id; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO) || !CBB_add_u16(&body, server_hello_version) || !CBB_add_bytes(&body, ssl->s3->server_random, sizeof(ssl->s3->server_random)) || !CBB_add_u8_length_prefixed(&body, &session_id) || !CBB_add_bytes(&session_id, hs->session_id.data(), hs->session_id.size()) || !CBB_add_u16(&body, SSL_CIPHER_get_protocol_id(hs->new_cipher)) || !CBB_add_u8(&body, 0) || !CBB_add_u16_length_prefixed(&body, &extensions) || !ssl_ext_pre_shared_key_add_serverhello(hs, &extensions) || !ssl_ext_key_share_add_serverhello(hs, &extensions) || !ssl_ext_supported_versions_add_serverhello(hs, &extensions) || !ssl->method->finish_message(ssl, cbb.get(), &server_hello)) { return ssl_hs_error; } assert(ssl->s3->ech_status != ssl_ech_accepted || hs->ech_is_inner); if (hs->ech_is_inner) { // Fill in the ECH confirmation signal. const size_t offset = ssl_ech_confirmation_signal_hello_offset(ssl); Span random_suffix = random.last(ECH_CONFIRMATION_SIGNAL_LEN); if (!ssl_ech_accept_confirmation(hs, random_suffix, ssl->s3->client_random, hs->transcript, /*is_hrr=*/false, server_hello, offset)) { return ssl_hs_error; } // Update |server_hello|. Span server_hello_out = Span(server_hello).subspan(offset, ECH_CONFIRMATION_SIGNAL_LEN); OPENSSL_memcpy(server_hello_out.data(), random_suffix.data(), ECH_CONFIRMATION_SIGNAL_LEN); } if (!ssl->method->add_message(ssl, std::move(server_hello))) { return ssl_hs_error; } hs->key_share_ciphertext.Reset(); // No longer needed. if (!ssl->s3->used_hello_retry_request && !ssl->method->add_change_cipher_spec(ssl)) { return ssl_hs_error; } // Derive and enable the handshake traffic secrets. if (!tls13_derive_handshake_secrets(hs) || !tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_seal, hs->new_session.get(), hs->server_handshake_secret)) { return ssl_hs_error; } // Send EncryptedExtensions. if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_ENCRYPTED_EXTENSIONS) || !ssl_add_serverhello_tlsext(hs, &body) || !ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } if (!ssl->s3->session_reused) { // Determine whether to request a client certificate. hs->cert_request = !!(hs->config->verify_mode & SSL_VERIFY_PEER); // Only request a certificate if Channel ID isn't negotiated. if ((hs->config->verify_mode & SSL_VERIFY_PEER_IF_NO_OBC) && hs->channel_id_negotiated) { hs->cert_request = false; } } // Send a CertificateRequest, if necessary. if (hs->cert_request) { CBB cert_request_extensions, sigalg_contents, sigalgs_cbb; if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE_REQUEST) || !CBB_add_u8(&body, 0 /* no certificate_request_context. */) || !CBB_add_u16_length_prefixed(&body, &cert_request_extensions) || !CBB_add_u16(&cert_request_extensions, TLSEXT_TYPE_signature_algorithms) || !CBB_add_u16_length_prefixed(&cert_request_extensions, &sigalg_contents) || !CBB_add_u16_length_prefixed(&sigalg_contents, &sigalgs_cbb) || !tls12_add_verify_sigalgs(hs, &sigalgs_cbb)) { return ssl_hs_error; } if (ssl_has_client_CAs(hs->config)) { CBB ca_contents; if (!CBB_add_u16(&cert_request_extensions, TLSEXT_TYPE_certificate_authorities) || !CBB_add_u16_length_prefixed(&cert_request_extensions, &ca_contents) || !ssl_add_client_CA_list(hs, &ca_contents) || !CBB_flush(&cert_request_extensions)) { return ssl_hs_error; } } if (!ssl_add_message_cbb(ssl, cbb.get())) { return ssl_hs_error; } } // Send the server Certificate message, if necessary. if (!ssl->s3->session_reused) { if (!tls13_add_certificate(hs)) { return ssl_hs_error; } hs->tls13_state = state13_send_server_certificate_verify; return ssl_hs_ok; } hs->tls13_state = state13_send_server_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_server_certificate_verify(SSL_HANDSHAKE *hs) { switch (tls13_add_certificate_verify(hs)) { case ssl_private_key_success: hs->tls13_state = state13_send_server_finished; return ssl_hs_ok; case ssl_private_key_retry: hs->tls13_state = state13_send_server_certificate_verify; return ssl_hs_private_key_operation; case ssl_private_key_failure: return ssl_hs_error; } assert(0); return ssl_hs_error; } static enum ssl_hs_wait_t do_send_server_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (hs->hints_requested) { return ssl_hs_hints_ready; } hs->can_release_private_key = true; if (!tls13_add_finished(hs) || // Update the secret to the master secret and derive traffic keys. !tls13_advance_key_schedule(hs, Span(kZeroes, hs->transcript.DigestLen())) || !tls13_derive_application_secrets(hs) || !tls13_set_traffic_key(ssl, ssl_encryption_application, evp_aead_seal, hs->new_session.get(), hs->server_traffic_secret_0)) { return ssl_hs_error; } hs->tls13_state = state13_send_half_rtt_ticket; return hs->handback ? ssl_hs_handback : ssl_hs_ok; } static enum ssl_hs_wait_t do_send_half_rtt_ticket(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->s3->early_data_accepted) { // If accepting 0-RTT, we send tickets half-RTT. This gets the tickets on // the wire sooner and also avoids triggering a write on |SSL_read| when // processing the client Finished. This requires computing the client // Finished early. See RFC 8446, section 4.6.1. static const uint8_t kEndOfEarlyData[4] = {SSL3_MT_END_OF_EARLY_DATA, 0, 0, 0}; if (!SSL_is_quic(ssl) && !hs->transcript.Update(kEndOfEarlyData)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } size_t finished_len; hs->expected_client_finished.Resize(hs->transcript.DigestLen()); if (!tls13_finished_mac(hs, hs->expected_client_finished.data(), &finished_len, false /* client */)) { return ssl_hs_error; } if (finished_len != hs->expected_client_finished.size()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return ssl_hs_error; } // Feed the predicted Finished into the transcript. This allows us to derive // the resumption secret early and send half-RTT tickets. // // TODO(crbug.com/381113363): Don't use half-RTT tickets with DTLS 1.3. // TODO(crbug.com/376939532): Perhaps don't use half-RTT tickets at all. assert(!SSL_is_dtls(hs->ssl)); assert(hs->expected_client_finished.size() <= 0xff); uint8_t header[4] = { SSL3_MT_FINISHED, 0, 0, static_cast(hs->expected_client_finished.size())}; bool unused_sent_tickets; if (!hs->transcript.Update(header) || !hs->transcript.Update(hs->expected_client_finished) || !tls13_derive_resumption_secret(hs) || !add_new_session_tickets(hs, &unused_sent_tickets)) { return ssl_hs_error; } } hs->tls13_state = state13_read_second_client_flight; return ssl_hs_flush; } static bool uses_end_of_early_data(const SSL *ssl) { // DTLS and QUIC omit the EndOfEarlyData message. See RFC 9001, section 8.3, // and RFC 9147, section 5.6. return !SSL_is_quic(ssl) && !SSL_is_dtls(ssl); } static enum ssl_hs_wait_t do_read_second_client_flight(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (ssl->s3->early_data_accepted) { if (!tls13_set_traffic_key(ssl, ssl_encryption_early_data, evp_aead_open, hs->new_session.get(), hs->early_traffic_secret)) { return ssl_hs_error; } hs->can_early_write = true; hs->can_early_read = true; hs->in_early_data = true; } // If the EndOfEarlyData message is not used, switch to // client_handshake_secret before the early return. if (!uses_end_of_early_data(ssl)) { if (!tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_open, hs->new_session.get(), hs->client_handshake_secret)) { return ssl_hs_error; } hs->tls13_state = state13_process_end_of_early_data; return ssl->s3->early_data_accepted ? ssl_hs_early_return : ssl_hs_ok; } hs->tls13_state = state13_process_end_of_early_data; return ssl->s3->early_data_accepted ? ssl_hs_read_end_of_early_data : ssl_hs_ok; } static enum ssl_hs_wait_t do_process_end_of_early_data(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // In protocols that use EndOfEarlyData, we must consume the extra message and // switch to client_handshake_secret after the early return. if (uses_end_of_early_data(ssl)) { // If early data was not accepted, the EndOfEarlyData will be in the // discarded early data. if (hs->ssl->s3->early_data_accepted) { SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_END_OF_EARLY_DATA)) { return ssl_hs_error; } if (CBS_len(&msg.body) != 0) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); return ssl_hs_error; } ssl->method->next_message(ssl); } if (!tls13_set_traffic_key(ssl, ssl_encryption_handshake, evp_aead_open, hs->new_session.get(), hs->client_handshake_secret)) { return ssl_hs_error; } } hs->tls13_state = state13_read_client_encrypted_extensions; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_encrypted_extensions( SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; // For now, only one extension uses client EncryptedExtensions. This function // may be generalized if others use it in the future. if (hs->new_session->has_application_settings && !ssl->s3->early_data_accepted) { SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_ENCRYPTED_EXTENSIONS)) { return ssl_hs_error; } CBS body = msg.body, extensions; if (!CBS_get_u16_length_prefixed(&body, &extensions) || CBS_len(&body) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return ssl_hs_error; } uint16_t extension_type = TLSEXT_TYPE_application_settings_old; if (hs->config->alps_use_new_codepoint) { extension_type = TLSEXT_TYPE_application_settings; } SSLExtension application_settings(extension_type); uint8_t alert = SSL_AD_DECODE_ERROR; if (!ssl_parse_extensions(&extensions, &alert, {&application_settings}, /*ignore_unknown=*/false)) { ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return ssl_hs_error; } if (!application_settings.present) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); return ssl_hs_error; } // Note that, if 0-RTT was accepted, these values will already have been // initialized earlier. if (!hs->new_session->peer_application_settings.CopyFrom( application_settings.data) || !ssl_hash_message(hs, msg)) { ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); return ssl_hs_error; } ssl->method->next_message(ssl); } hs->tls13_state = state13_read_client_certificate; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_certificate(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->cert_request) { if (!ssl->s3->session_reused) { // OpenSSL returns X509_V_OK when no certificates are requested. This is // classed by them as a bug, but it's assumed by at least NGINX. (Only do // this in full handshakes as resumptions should carry over the previous // |verify_result|, though this is a no-op because servers do not // implement the client's odd soft-fail mode.) hs->new_session->verify_result = X509_V_OK; } // Skip this state. hs->tls13_state = state13_read_channel_id; return ssl_hs_ok; } const bool allow_anonymous = (hs->config->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) == 0; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE) || !tls13_process_certificate(hs, msg, allow_anonymous) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state13_read_client_certificate_verify; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_certificate_verify(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (sk_CRYPTO_BUFFER_num(hs->new_session->certs.get()) == 0) { // Skip this state. hs->tls13_state = state13_read_channel_id; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } switch (ssl_verify_peer_cert(hs)) { case ssl_verify_ok: break; case ssl_verify_invalid: return ssl_hs_error; case ssl_verify_retry: hs->tls13_state = state13_read_client_certificate_verify; return ssl_hs_certificate_verify; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY) || !tls13_process_certificate_verify(hs, msg) || !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state13_read_channel_id; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_channel_id(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; if (!hs->channel_id_negotiated) { hs->tls13_state = state13_read_client_finished; return ssl_hs_ok; } SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_CHANNEL_ID) || // !tls1_verify_channel_id(hs, msg) || // !ssl_hash_message(hs, msg)) { return ssl_hs_error; } ssl->method->next_message(ssl); hs->tls13_state = state13_read_client_finished; return ssl_hs_ok; } static enum ssl_hs_wait_t do_read_client_finished(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; SSLMessage msg; if (!ssl->method->get_message(ssl, &msg)) { return ssl_hs_read_message; } if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED) || // If early data was accepted, we've already computed the client Finished // and derived the resumption secret. !tls13_process_finished(hs, msg, ssl->s3->early_data_accepted) || // evp_aead_seal keys have already been switched. !tls13_set_traffic_key(ssl, ssl_encryption_application, evp_aead_open, hs->new_session.get(), hs->client_traffic_secret_0)) { return ssl_hs_error; } if (!ssl->s3->early_data_accepted) { if (!ssl_hash_message(hs, msg) || // !tls13_derive_resumption_secret(hs)) { return ssl_hs_error; } // We send post-handshake tickets as part of the handshake in 1-RTT. hs->tls13_state = state13_send_new_session_ticket; } else { // We already sent half-RTT tickets. hs->tls13_state = state13_done; } ssl->method->next_message(ssl); if (SSL_is_dtls(ssl)) { ssl->method->schedule_ack(ssl); return ssl_hs_flush; } return ssl_hs_ok; } static enum ssl_hs_wait_t do_send_new_session_ticket(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; bool sent_tickets; if (!add_new_session_tickets(hs, &sent_tickets)) { return ssl_hs_error; } hs->tls13_state = state13_done; // In QUIC and DTLS, we can flush the ticket to the transport immediately. In // TLS over TCP-like transports, we defer until the server performs a write. // This prevents a non-reading client from causing the server to hang in the // case of a small server write buffer. Consumers which don't write data to // the client will need to do a zero-byte write if they wish to flush the // tickets. bool should_flush = sent_tickets && (SSL_is_dtls(ssl) || SSL_is_quic(ssl)); return should_flush ? ssl_hs_flush : ssl_hs_ok; } enum ssl_hs_wait_t tls13_server_handshake(SSL_HANDSHAKE *hs) { while (hs->tls13_state != state13_done) { enum ssl_hs_wait_t ret = ssl_hs_error; enum tls13_server_hs_state_t state = static_cast(hs->tls13_state); switch (state) { case state13_select_parameters: ret = do_select_parameters(hs); break; case state13_select_session: ret = do_select_session(hs); break; case state13_send_hello_retry_request: ret = do_send_hello_retry_request(hs); break; case state13_read_second_client_hello: ret = do_read_second_client_hello(hs); break; case state13_send_server_hello: ret = do_send_server_hello(hs); break; case state13_send_server_certificate_verify: ret = do_send_server_certificate_verify(hs); break; case state13_send_server_finished: ret = do_send_server_finished(hs); break; case state13_send_half_rtt_ticket: ret = do_send_half_rtt_ticket(hs); break; case state13_read_second_client_flight: ret = do_read_second_client_flight(hs); break; case state13_process_end_of_early_data: ret = do_process_end_of_early_data(hs); break; case state13_read_client_encrypted_extensions: ret = do_read_client_encrypted_extensions(hs); break; case state13_read_client_certificate: ret = do_read_client_certificate(hs); break; case state13_read_client_certificate_verify: ret = do_read_client_certificate_verify(hs); break; case state13_read_channel_id: ret = do_read_channel_id(hs); break; case state13_read_client_finished: ret = do_read_client_finished(hs); break; case state13_send_new_session_ticket: ret = do_send_new_session_ticket(hs); break; case state13_done: ret = ssl_hs_ok; break; } if (hs->tls13_state != state) { ssl_do_info_callback(hs->ssl, SSL_CB_ACCEPT_LOOP, 1); } if (ret != ssl_hs_ok) { return ret; } } return ssl_hs_ok; } const char *tls13_server_handshake_state(SSL_HANDSHAKE *hs) { enum tls13_server_hs_state_t state = static_cast(hs->tls13_state); switch (state) { case state13_select_parameters: return "TLS 1.3 server select_parameters"; case state13_select_session: return "TLS 1.3 server select_session"; case state13_send_hello_retry_request: return "TLS 1.3 server send_hello_retry_request"; case state13_read_second_client_hello: return "TLS 1.3 server read_second_client_hello"; case state13_send_server_hello: return "TLS 1.3 server send_server_hello"; case state13_send_server_certificate_verify: return "TLS 1.3 server send_server_certificate_verify"; case state13_send_half_rtt_ticket: return "TLS 1.3 server send_half_rtt_ticket"; case state13_send_server_finished: return "TLS 1.3 server send_server_finished"; case state13_read_second_client_flight: return "TLS 1.3 server read_second_client_flight"; case state13_process_end_of_early_data: return "TLS 1.3 server process_end_of_early_data"; case state13_read_client_encrypted_extensions: return "TLS 1.3 server read_client_encrypted_extensions"; case state13_read_client_certificate: return "TLS 1.3 server read_client_certificate"; case state13_read_client_certificate_verify: return "TLS 1.3 server read_client_certificate_verify"; case state13_read_channel_id: return "TLS 1.3 server read_channel_id"; case state13_read_client_finished: return "TLS 1.3 server read_client_finished"; case state13_send_new_session_ticket: return "TLS 1.3 server send_new_session_ticket"; case state13_done: return "TLS 1.3 server done"; } return "TLS 1.3 server unknown"; } BSSL_NAMESPACE_END ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls_method.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN static void tls_on_handshake_complete(SSL *ssl) { // The handshake should have released its final message. assert(!ssl->s3->has_message); // During the handshake, |hs_buf| is retained. Release if it there is no // excess in it. There should not be any excess because the handshake logic // rejects unprocessed data after each Finished message. Note this means we do // not allow a TLS 1.2 HelloRequest to be packed into the same record as // Finished. (Schannel also rejects this.) assert(!ssl->s3->hs_buf || ssl->s3->hs_buf->length == 0); if (ssl->s3->hs_buf && ssl->s3->hs_buf->length == 0) { ssl->s3->hs_buf.reset(); } } static bool tls_set_read_state(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret) { // Cipher changes are forbidden if the current epoch has leftover data. if (tls_has_unprocessed_handshake_data(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESS_HANDSHAKE_DATA); ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); return false; } if (SSL_is_quic(ssl)) { if ((ssl->s3->hs == nullptr || !ssl->s3->hs->hints_requested) && !ssl->quic_method->set_read_secret(ssl, level, aead_ctx->cipher(), traffic_secret.data(), traffic_secret.size())) { return false; } // QUIC only uses |ssl| for handshake messages, which never use early data // keys, so we return without installing anything. This avoids needing to // have two secrets active at once in 0-RTT. if (level == ssl_encryption_early_data) { return true; } ssl->s3->quic_read_level = level; } ssl->s3->read_sequence = 0; ssl->s3->aead_read_ctx = std::move(aead_ctx); return true; } static bool tls_set_write_state(SSL *ssl, ssl_encryption_level_t level, UniquePtr aead_ctx, Span traffic_secret) { if (!tls_flush_pending_hs_data(ssl)) { return false; } if (SSL_is_quic(ssl)) { if ((ssl->s3->hs == nullptr || !ssl->s3->hs->hints_requested) && !ssl->quic_method->set_write_secret(ssl, level, aead_ctx->cipher(), traffic_secret.data(), traffic_secret.size())) { return false; } // QUIC only uses |ssl| for handshake messages, which never use early data // keys, so we return without installing anything. This avoids needing to // have two secrets active at once in 0-RTT. if (level == ssl_encryption_early_data) { return true; } ssl->s3->quic_write_level = level; } ssl->s3->write_sequence = 0; ssl->s3->aead_write_ctx = std::move(aead_ctx); return true; } static void tls_finish_flight(SSL *ssl) { // We don't track whether a flight is complete in TLS and instead always flush // every queued message in |tls_flush|, whether the flight is complete or not. } static void tls_schedule_ack(SSL *ssl) { // TLS does not use ACKs. } static const SSL_PROTOCOL_METHOD kTLSProtocolMethod = { false /* is_dtls */, tls_new, tls_free, tls_get_message, tls_next_message, tls_has_unprocessed_handshake_data, tls_open_handshake, tls_open_change_cipher_spec, tls_open_app_data, tls_write_app_data, tls_dispatch_alert, tls_init_message, tls_finish_message, tls_add_message, tls_add_change_cipher_spec, tls_finish_flight, tls_schedule_ack, tls_flush, tls_on_handshake_complete, tls_set_read_state, tls_set_write_state, }; static bool ssl_noop_x509_check_client_CA_names( STACK_OF(CRYPTO_BUFFER) *names) { return true; } static void ssl_noop_x509_clear(CERT *cert) {} static void ssl_noop_x509_free(CERT *cert) {} static void ssl_noop_x509_dup(CERT *new_cert, const CERT *cert) {} static void ssl_noop_x509_flush_cached_leaf(CERT *cert) {} static void ssl_noop_x509_flush_cached_chain(CERT *cert) {} static bool ssl_noop_x509_session_cache_objects(SSL_SESSION *sess) { return true; } static bool ssl_noop_x509_session_dup(SSL_SESSION *new_session, const SSL_SESSION *session) { return true; } static void ssl_noop_x509_session_clear(SSL_SESSION *session) {} static bool ssl_noop_x509_session_verify_cert_chain(SSL_SESSION *session, SSL_HANDSHAKE *hs, uint8_t *out_alert) { return false; } static void ssl_noop_x509_hs_flush_cached_ca_names(SSL_HANDSHAKE *hs) {} static bool ssl_noop_x509_ssl_new(SSL_HANDSHAKE *hs) { return true; } static void ssl_noop_x509_ssl_config_free(SSL_CONFIG *cfg) {} static void ssl_noop_x509_ssl_flush_cached_client_CA(SSL_CONFIG *cfg) {} static bool ssl_noop_x509_ssl_auto_chain_if_needed(SSL_HANDSHAKE *hs) { return true; } static bool ssl_noop_x509_ssl_ctx_new(SSL_CTX *ctx) { return true; } static void ssl_noop_x509_ssl_ctx_free(SSL_CTX *ctx) {} static void ssl_noop_x509_ssl_ctx_flush_cached_client_CA(SSL_CTX *ctx) {} const SSL_X509_METHOD ssl_noop_x509_method = { ssl_noop_x509_check_client_CA_names, ssl_noop_x509_clear, ssl_noop_x509_free, ssl_noop_x509_dup, ssl_noop_x509_flush_cached_chain, ssl_noop_x509_flush_cached_leaf, ssl_noop_x509_session_cache_objects, ssl_noop_x509_session_dup, ssl_noop_x509_session_clear, ssl_noop_x509_session_verify_cert_chain, ssl_noop_x509_hs_flush_cached_ca_names, ssl_noop_x509_ssl_new, ssl_noop_x509_ssl_config_free, ssl_noop_x509_ssl_flush_cached_client_CA, ssl_noop_x509_ssl_auto_chain_if_needed, ssl_noop_x509_ssl_ctx_new, ssl_noop_x509_ssl_ctx_free, ssl_noop_x509_ssl_ctx_flush_cached_client_CA, }; BSSL_NAMESPACE_END using namespace bssl; const SSL_METHOD *TLS_method(void) { static const SSL_METHOD kMethod = { 0, &kTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } const SSL_METHOD *SSLv23_method(void) { return TLS_method(); } const SSL_METHOD *TLS_with_buffers_method(void) { static const SSL_METHOD kMethod = { 0, &kTLSProtocolMethod, &ssl_noop_x509_method, }; return &kMethod; } // Legacy version-locked methods. const SSL_METHOD *TLSv1_2_method(void) { static const SSL_METHOD kMethod = { TLS1_2_VERSION, &kTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } const SSL_METHOD *TLSv1_1_method(void) { static const SSL_METHOD kMethod = { TLS1_1_VERSION, &kTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } const SSL_METHOD *TLSv1_method(void) { static const SSL_METHOD kMethod = { TLS1_VERSION, &kTLSProtocolMethod, &ssl_crypto_x509_method, }; return &kMethod; } // Legacy side-specific methods. const SSL_METHOD *TLSv1_2_server_method(void) { return TLSv1_2_method(); } const SSL_METHOD *TLSv1_1_server_method(void) { return TLSv1_1_method(); } const SSL_METHOD *TLSv1_server_method(void) { return TLSv1_method(); } const SSL_METHOD *TLSv1_2_client_method(void) { return TLSv1_2_method(); } const SSL_METHOD *TLSv1_1_client_method(void) { return TLSv1_1_method(); } const SSL_METHOD *TLSv1_client_method(void) { return TLSv1_method(); } const SSL_METHOD *SSLv23_server_method(void) { return SSLv23_method(); } const SSL_METHOD *SSLv23_client_method(void) { return SSLv23_method(); } const SSL_METHOD *TLS_server_method(void) { return TLS_method(); } const SSL_METHOD *TLS_client_method(void) { return TLS_method(); } ================================================ FILE: Sources/CNIOBoringSSL/ssl/tls_record.cc ================================================ /* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include #include #include #include #include #include #include "../crypto/internal.h" #include "internal.h" BSSL_NAMESPACE_BEGIN // kMaxEmptyRecords is the number of consecutive, empty records that will be // processed. Without this limit an attacker could send empty records at a // faster rate than we can process and cause record processing to loop // forever. static const uint8_t kMaxEmptyRecords = 32; // kMaxEarlyDataSkipped is the maximum number of rejected early data bytes that // will be skipped. Without this limit an attacker could send records at a // faster rate than we can process and cause trial decryption to loop forever. // This value should be slightly above kMaxEarlyDataAccepted, which is measured // in plaintext. static const size_t kMaxEarlyDataSkipped = 16384; // kMaxWarningAlerts is the number of consecutive warning alerts that will be // processed. static const uint8_t kMaxWarningAlerts = 4; // ssl_needs_record_splitting returns one if |ssl|'s current outgoing cipher // state needs record-splitting and zero otherwise. bool ssl_needs_record_splitting(const SSL *ssl) { #if !defined(BORINGSSL_UNSAFE_FUZZER_MODE) return !ssl->s3->aead_write_ctx->is_null_cipher() && ssl_protocol_version(ssl) < TLS1_1_VERSION && (ssl->mode & SSL_MODE_CBC_RECORD_SPLITTING) != 0 && SSL_CIPHER_is_block_cipher(ssl->s3->aead_write_ctx->cipher()); #else return false; #endif } size_t ssl_record_prefix_len(const SSL *ssl) { assert(!SSL_is_dtls(ssl)); return SSL3_RT_HEADER_LENGTH + ssl->s3->aead_read_ctx->ExplicitNonceLen(); } static ssl_open_record_t skip_early_data(SSL *ssl, uint8_t *out_alert, size_t consumed) { ssl->s3->early_data_skipped += consumed; if (ssl->s3->early_data_skipped < consumed) { ssl->s3->early_data_skipped = kMaxEarlyDataSkipped + 1; } if (ssl->s3->early_data_skipped > kMaxEarlyDataSkipped) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MUCH_SKIPPED_EARLY_DATA); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } return ssl_open_record_discard; } static uint16_t tls_record_version(const SSL *ssl) { if (ssl->s3->version == 0) { // Before the version is determined, outgoing records use TLS 1.0 for // historical compatibility requirements. return TLS1_VERSION; } // TLS 1.3 freezes the record version at TLS 1.2. Previous ones use the // version itself. return ssl_protocol_version(ssl) >= TLS1_3_VERSION ? TLS1_2_VERSION : ssl->s3->version; } ssl_open_record_t tls_open_record(SSL *ssl, uint8_t *out_type, Span *out, size_t *out_consumed, uint8_t *out_alert, Span in) { *out_consumed = 0; if (ssl->s3->read_shutdown == ssl_shutdown_close_notify) { return ssl_open_record_close_notify; } // If there is an unprocessed handshake message or we are already buffering // too much, stop before decrypting another handshake record. if (!tls_can_accept_handshake_data(ssl, out_alert)) { return ssl_open_record_error; } CBS cbs = CBS(in); // Decode the record header. uint8_t type; uint16_t version, ciphertext_len; if (!CBS_get_u8(&cbs, &type) || // !CBS_get_u16(&cbs, &version) || // !CBS_get_u16(&cbs, &ciphertext_len)) { *out_consumed = SSL3_RT_HEADER_LENGTH; return ssl_open_record_partial; } bool version_ok; if (ssl->s3->aead_read_ctx->is_null_cipher()) { // Only check the first byte. Enforcing beyond that can prevent decoding // version negotiation failure alerts. version_ok = (version >> 8) == SSL3_VERSION_MAJOR; } else { version_ok = version == tls_record_version(ssl); } if (!version_ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_NUMBER); *out_alert = SSL_AD_PROTOCOL_VERSION; return ssl_open_record_error; } // Check the ciphertext length. if (ciphertext_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); *out_alert = SSL_AD_RECORD_OVERFLOW; return ssl_open_record_error; } // Extract the body. CBS body; if (!CBS_get_bytes(&cbs, &body, ciphertext_len)) { *out_consumed = SSL3_RT_HEADER_LENGTH + (size_t)ciphertext_len; return ssl_open_record_partial; } Span header = in.subspan(0, SSL3_RT_HEADER_LENGTH); ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, header); *out_consumed = in.size() - CBS_len(&cbs); // In TLS 1.3, during the handshake, skip ChangeCipherSpec records. static const uint8_t kChangeCipherSpec[] = {SSL3_MT_CCS}; if (ssl_has_final_version(ssl) && ssl_protocol_version(ssl) >= TLS1_3_VERSION && SSL_in_init(ssl) && type == SSL3_RT_CHANGE_CIPHER_SPEC && Span(body) == kChangeCipherSpec) { ssl->s3->empty_record_count++; if (ssl->s3->empty_record_count > kMaxEmptyRecords) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_EMPTY_FRAGMENTS); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } return ssl_open_record_discard; } // Skip early data received when expecting a second ClientHello if we rejected // 0RTT. if (ssl->s3->skip_early_data && // ssl->s3->aead_read_ctx->is_null_cipher() && // type == SSL3_RT_APPLICATION_DATA) { return skip_early_data(ssl, out_alert, *out_consumed); } // Ensure the sequence number update does not overflow. if (ssl->s3->read_sequence + 1 == 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); *out_alert = SSL_AD_INTERNAL_ERROR; return ssl_open_record_error; } // Decrypt the body in-place. if (!ssl->s3->aead_read_ctx->Open( out, type, version, ssl->s3->read_sequence, header, Span(const_cast(CBS_data(&body)), CBS_len(&body)))) { if (ssl->s3->skip_early_data && !ssl->s3->aead_read_ctx->is_null_cipher()) { ERR_clear_error(); return skip_early_data(ssl, out_alert, *out_consumed); } OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); *out_alert = SSL_AD_BAD_RECORD_MAC; return ssl_open_record_error; } ssl->s3->skip_early_data = false; ssl->s3->read_sequence++; // TLS 1.3 hides the record type inside the encrypted data. bool has_padding = !ssl->s3->aead_read_ctx->is_null_cipher() && ssl_protocol_version(ssl) >= TLS1_3_VERSION; // If there is padding, the plaintext limit includes the padding, but includes // extra room for the inner content type. size_t plaintext_limit = has_padding ? SSL3_RT_MAX_PLAIN_LENGTH + 1 : SSL3_RT_MAX_PLAIN_LENGTH; if (out->size() > plaintext_limit) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); *out_alert = SSL_AD_RECORD_OVERFLOW; return ssl_open_record_error; } if (has_padding) { // The outer record type is always application_data. if (type != SSL3_RT_APPLICATION_DATA) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_OUTER_RECORD_TYPE); *out_alert = SSL_AD_DECODE_ERROR; return ssl_open_record_error; } do { if (out->empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); *out_alert = SSL_AD_DECRYPT_ERROR; return ssl_open_record_error; } type = out->back(); *out = out->subspan(0, out->size() - 1); } while (type == 0); } // Limit the number of consecutive empty records. if (out->empty()) { ssl->s3->empty_record_count++; if (ssl->s3->empty_record_count > kMaxEmptyRecords) { OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_EMPTY_FRAGMENTS); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } // Apart from the limit, empty records are returned up to the caller. This // allows the caller to reject records of the wrong type. } else { ssl->s3->empty_record_count = 0; } if (type == SSL3_RT_ALERT) { return ssl_process_alert(ssl, out_alert, *out); } // Handshake messages may not interleave with any other record type. if (type != SSL3_RT_HANDSHAKE && // tls_has_unprocessed_handshake_data(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); *out_alert = SSL_AD_UNEXPECTED_MESSAGE; return ssl_open_record_error; } ssl->s3->warning_alert_count = 0; *out_type = type; return ssl_open_record_success; } static bool do_seal_record(SSL *ssl, uint8_t *out_prefix, uint8_t *out, uint8_t *out_suffix, uint8_t type, const uint8_t *in, const size_t in_len) { SSLAEADContext *aead = ssl->s3->aead_write_ctx.get(); uint8_t *extra_in = NULL; size_t extra_in_len = 0; if (!aead->is_null_cipher() && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { // TLS 1.3 hides the actual record type inside the encrypted data. extra_in = &type; extra_in_len = 1; } size_t suffix_len, ciphertext_len; if (!aead->SuffixLen(&suffix_len, in_len, extra_in_len) || !aead->CiphertextLen(&ciphertext_len, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return false; } assert(in == out || !buffers_alias(in, in_len, out, in_len)); assert(!buffers_alias(in, in_len, out_prefix, ssl_record_prefix_len(ssl))); assert(!buffers_alias(in, in_len, out_suffix, suffix_len)); if (extra_in_len) { out_prefix[0] = SSL3_RT_APPLICATION_DATA; } else { out_prefix[0] = type; } uint16_t record_version = tls_record_version(ssl); out_prefix[1] = record_version >> 8; out_prefix[2] = record_version & 0xff; out_prefix[3] = ciphertext_len >> 8; out_prefix[4] = ciphertext_len & 0xff; Span header = Span(out_prefix, SSL3_RT_HEADER_LENGTH); // Ensure the sequence number update does not overflow. if (ssl->s3->write_sequence + 1 == 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return false; } if (!aead->SealScatter(out_prefix + SSL3_RT_HEADER_LENGTH, out, out_suffix, out_prefix[0], record_version, ssl->s3->write_sequence, header, in, in_len, extra_in, extra_in_len)) { return false; } ssl->s3->write_sequence++; ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, header); return true; } static size_t tls_seal_scatter_prefix_len(const SSL *ssl, uint8_t type, size_t in_len) { size_t ret = SSL3_RT_HEADER_LENGTH; if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && ssl_needs_record_splitting(ssl)) { // In the case of record splitting, the 1-byte record (of the 1/n-1 split) // will be placed in the prefix, as will four of the five bytes of the // record header for the main record. The final byte will replace the first // byte of the plaintext that was used in the small record. ret += ssl_cipher_get_record_split_len(ssl->s3->aead_write_ctx->cipher()); ret += SSL3_RT_HEADER_LENGTH - 1; } else { ret += ssl->s3->aead_write_ctx->ExplicitNonceLen(); } return ret; } static bool tls_seal_scatter_suffix_len(const SSL *ssl, size_t *out_suffix_len, uint8_t type, size_t in_len) { size_t extra_in_len = 0; if (!ssl->s3->aead_write_ctx->is_null_cipher() && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { // TLS 1.3 adds an extra byte for encrypted record type. extra_in_len = 1; } // clang-format off if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && ssl_needs_record_splitting(ssl)) { // With record splitting enabled, the first byte gets sealed into a separate // record which is written into the prefix. in_len -= 1; } // clang-format on return ssl->s3->aead_write_ctx->SuffixLen(out_suffix_len, in_len, extra_in_len); } // tls_seal_scatter_record seals a new record of type |type| and body |in| and // splits it between |out_prefix|, |out|, and |out_suffix|. Exactly // |tls_seal_scatter_prefix_len| bytes are written to |out_prefix|, |in_len| // bytes to |out|, and |tls_seal_scatter_suffix_len| bytes to |out_suffix|. It // returns one on success and zero on error. If enabled, // |tls_seal_scatter_record| implements TLS 1.0 CBC 1/n-1 record splitting and // may write two records concatenated. static bool tls_seal_scatter_record(SSL *ssl, uint8_t *out_prefix, uint8_t *out, uint8_t *out_suffix, uint8_t type, const uint8_t *in, size_t in_len) { if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && ssl_needs_record_splitting(ssl)) { assert(ssl->s3->aead_write_ctx->ExplicitNonceLen() == 0); const size_t prefix_len = SSL3_RT_HEADER_LENGTH; // Write the 1-byte fragment into |out_prefix|. uint8_t *split_body = out_prefix + prefix_len; uint8_t *split_suffix = split_body + 1; if (!do_seal_record(ssl, out_prefix, split_body, split_suffix, type, in, 1)) { return false; } size_t split_record_suffix_len; if (!ssl->s3->aead_write_ctx->SuffixLen(&split_record_suffix_len, 1, 0)) { assert(false); return false; } const size_t split_record_len = prefix_len + 1 + split_record_suffix_len; assert(SSL3_RT_HEADER_LENGTH + ssl_cipher_get_record_split_len( ssl->s3->aead_write_ctx->cipher()) == split_record_len); // Write the n-1-byte fragment. The header gets split between |out_prefix| // (header[:-1]) and |out| (header[-1:]). uint8_t tmp_prefix[SSL3_RT_HEADER_LENGTH]; if (!do_seal_record(ssl, tmp_prefix, out + 1, out_suffix, type, in + 1, in_len - 1)) { return false; } assert(tls_seal_scatter_prefix_len(ssl, type, in_len) == split_record_len + SSL3_RT_HEADER_LENGTH - 1); OPENSSL_memcpy(out_prefix + split_record_len, tmp_prefix, SSL3_RT_HEADER_LENGTH - 1); OPENSSL_memcpy(out, tmp_prefix + SSL3_RT_HEADER_LENGTH - 1, 1); return true; } return do_seal_record(ssl, out_prefix, out, out_suffix, type, in, in_len); } bool tls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out_len, uint8_t type, const uint8_t *in, size_t in_len) { if (buffers_alias(in, in_len, out, max_out_len)) { OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); return false; } const size_t prefix_len = tls_seal_scatter_prefix_len(ssl, type, in_len); size_t suffix_len; if (!tls_seal_scatter_suffix_len(ssl, &suffix_len, type, in_len)) { return false; } if (in_len + prefix_len < in_len || prefix_len + in_len + suffix_len < prefix_len + in_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); return false; } if (max_out_len < in_len + prefix_len + suffix_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); return false; } uint8_t *prefix = out; uint8_t *body = out + prefix_len; uint8_t *suffix = body + in_len; if (!tls_seal_scatter_record(ssl, prefix, body, suffix, type, in, in_len)) { return false; } *out_len = prefix_len + in_len + suffix_len; return true; } enum ssl_open_record_t ssl_process_alert(SSL *ssl, uint8_t *out_alert, Span in) { // Alerts records may not contain fragmented or multiple alerts. if (in.size() != 2) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); return ssl_open_record_error; } ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_ALERT, in); const uint8_t alert_level = in[0]; const uint8_t alert_descr = in[1]; uint16_t alert = (alert_level << 8) | alert_descr; ssl_do_info_callback(ssl, SSL_CB_READ_ALERT, alert); if (alert_level == SSL3_AL_WARNING) { if (alert_descr == SSL_AD_CLOSE_NOTIFY) { ssl->s3->read_shutdown = ssl_shutdown_close_notify; return ssl_open_record_close_notify; } // Warning alerts do not exist in TLS 1.3, but RFC 8446 section 6.1 // continues to define user_canceled as a signal to cancel the handshake, // without specifying how to handle it. JDK11 misuses it to signal // full-duplex connection close after the handshake. As a workaround, skip // user_canceled as in TLS 1.2. This matches NSS and OpenSSL. if (ssl_has_final_version(ssl) && ssl_protocol_version(ssl) >= TLS1_3_VERSION && alert_descr != SSL_AD_USER_CANCELLED) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); return ssl_open_record_error; } ssl->s3->warning_alert_count++; if (ssl->s3->warning_alert_count > kMaxWarningAlerts) { *out_alert = SSL_AD_UNEXPECTED_MESSAGE; OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_WARNING_ALERTS); return ssl_open_record_error; } return ssl_open_record_discard; } if (alert_level == SSL3_AL_FATAL) { OPENSSL_PUT_ERROR(SSL, SSL_AD_REASON_OFFSET + alert_descr); ERR_add_error_dataf("SSL alert number %d", alert_descr); *out_alert = 0; // No alert to send back to the peer. return ssl_open_record_error; } *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_ALERT_TYPE); return ssl_open_record_error; } BSSL_NAMESPACE_END using namespace bssl; size_t SSL_max_seal_overhead(const SSL *ssl) { if (SSL_is_dtls(ssl)) { // TODO(crbug.com/381113363): Use the 0-RTT epoch if writing 0-RTT. return dtls_max_seal_overhead(ssl, ssl->d1->write_epoch.epoch()); } size_t ret = SSL3_RT_HEADER_LENGTH; ret += ssl->s3->aead_write_ctx->MaxOverhead(); // TLS 1.3 needs an extra byte for the encrypted record type. if (!ssl->s3->aead_write_ctx->is_null_cipher() && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { ret += 1; } if (ssl_needs_record_splitting(ssl)) { ret *= 2; } return ret; } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/asm/fiat_curve25519_adx_mul.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .intel_syntax noprefix .text #if defined(__APPLE__) .private_extern _fiat_curve25519_adx_mul .global _fiat_curve25519_adx_mul _fiat_curve25519_adx_mul: #else .type fiat_curve25519_adx_mul, @function .hidden fiat_curve25519_adx_mul .global fiat_curve25519_adx_mul fiat_curve25519_adx_mul: #endif .cfi_startproc _CET_ENDBR push rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 mov rbp, rsp mov rax, rdx mov rdx, [ rsi + 0x18 ] mulx r11, r10, [ rax + 0x8 ] mov rdx, [ rax + 0x0 ] mov [ rsp - 0x58 ], r15 .cfi_offset r15, -16-0x58 mulx r8, rcx, [ rsi + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x80 ], rbx .cfi_offset rbx, -16-0x80 mulx rbx, r9, [ rax + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x70 ], r12 .cfi_offset r12, -16-0x70 mulx r15, r12, [ rax + 0x8 ] mov rdx, [ rsi + 0x0 ] mov [ rsp - 0x68 ], r13 .cfi_offset r13, -16-0x68 mov [ rsp - 0x60 ], r14 .cfi_offset r14, -16-0x60 mulx r14, r13, [ rax + 0x0 ] mov rdx, [ rax + 0x10 ] mov [ rsp - 0x18 ], r15 mov [ rsp - 0x50 ], rdi mulx rdi, r15, [ rsi + 0x0 ] mov rdx, [ rax + 0x18 ] mov [ rsp - 0x48 ], r13 mov [ rsp - 0x40 ], r9 mulx r9, r13, [ rsi + 0x0 ] test al, al adox rcx, rdi mov rdx, [ rsi + 0x10 ] mov [ rsp - 0x38 ], r13 mulx r13, rdi, [ rax + 0x8 ] adox r10, r9 mov rdx, 0x0 adox rbx, rdx adcx rdi, rcx adcx r8, r10 mov r9, rdx adcx r9, rbx mov rdx, [ rsi + 0x10 ] mulx r10, rcx, [ rax + 0x0 ] mov rdx, [ rsi + 0x0 ] mov [ rsp - 0x30 ], r15 mulx r15, rbx, [ rax + 0x8 ] mov rdx, -0x2 inc rdx adox rcx, r15 setc r15b clc adcx rcx, r12 adox r10, rdi mov rdx, [ rax + 0x10 ] mov [ rsp - 0x78 ], rcx mulx rcx, rdi, [ rsi + 0x10 ] adox rdi, r8 mov rdx, [ rax + 0x18 ] mov [ rsp - 0x28 ], rcx mulx rcx, r8, [ rsi + 0x10 ] mov rdx, [ rax + 0x10 ] mov [ rsp - 0x20 ], r8 mulx r12, r8, [ rsi + 0x18 ] adox r8, r9 mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x10 ], r12 mulx r12, r9, [ rax + 0x10 ] movzx rdx, r15b lea rdx, [ rdx + rcx ] adcx r9, r10 adcx r13, rdi mov r15, 0x0 mov r10, r15 adox r10, rdx mov rdx, [ rax + 0x18 ] mulx rcx, rdi, [ rsi + 0x18 ] adox rcx, r15 adcx r11, r8 mov rdx, r15 adcx rdx, r10 adcx rcx, r15 mov r8, rdx mov rdx, [ rax + 0x0 ] mulx r15, r10, [ rsi + 0x8 ] test al, al adox r10, r14 adcx rbx, r10 adox r15, [ rsp - 0x78 ] adcx r15, [ rsp - 0x30 ] adox r9, [ rsp - 0x18 ] adcx r9, [ rsp - 0x38 ] adox r13, [ rsp - 0x40 ] adcx r12, r13 adox r11, [ rsp - 0x20 ] adcx r11, [ rsp - 0x28 ] mov rdx, 0x26 mulx rsi, r14, r12 adox rdi, r8 adcx rdi, [ rsp - 0x10 ] mulx r10, r8, r11 mov r13, 0x0 adox rcx, r13 adcx rcx, r13 mulx r11, r12, rdi xor rdi, rdi adox r8, rbx adox r12, r15 mulx rbx, r13, rcx adcx r14, [ rsp - 0x48 ] adox r13, r9 adox rbx, rdi adcx rsi, r8 adcx r10, r12 adcx r11, r13 adc rbx, 0x0 mulx r9, r15, rbx xor r9, r9 adox r15, r14 mov rdi, r9 adox rdi, rsi mov rcx, r9 adox rcx, r10 mov r8, [ rsp - 0x50 ] mov [ r8 + 0x8 ], rdi mov r12, r9 adox r12, r11 mov r14, r9 cmovo r14, rdx mov [ r8 + 0x18 ], r12 adcx r15, r14 mov [ r8 + 0x0 ], r15 mov [ r8 + 0x10 ], rcx mov rbx, [ rsp - 0x80 ] .cfi_restore rbx mov r12, [ rsp - 0x70 ] .cfi_restore r12 mov r13, [ rsp - 0x68 ] .cfi_restore r13 mov r14, [ rsp - 0x60 ] .cfi_restore r14 mov r15, [ rsp - 0x58 ] .cfi_restore r15 pop rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 ret .cfi_endproc #if defined(__ELF__) .size fiat_curve25519_adx_mul, .-fiat_curve25519_adx_mul #endif #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/asm/fiat_curve25519_adx_square.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .intel_syntax noprefix .text #if defined(__APPLE__) .private_extern _fiat_curve25519_adx_square .global _fiat_curve25519_adx_square _fiat_curve25519_adx_square: #else .type fiat_curve25519_adx_square, @function .hidden fiat_curve25519_adx_square .global fiat_curve25519_adx_square fiat_curve25519_adx_square: #endif .cfi_startproc _CET_ENDBR push rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 mov rbp, rsp mov rdx, [ rsi + 0x0 ] mulx r10, rax, [ rsi + 0x8 ] mov rdx, [ rsi + 0x0 ] mulx rcx, r11, [ rsi + 0x10 ] xor rdx, rdx adox r11, r10 mov rdx, [ rsi + 0x0 ] mulx r9, r8, [ rsi + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x80 ], rbx .cfi_offset rbx, -16-0x80 mulx rbx, r10, [ rsi + 0x18 ] adox r8, rcx mov [rsp - 0x48 ], rdi adox r10, r9 adcx rax, rax mov rdx, [ rsi + 0x10 ] mulx r9, rcx, [ rsi + 0x18 ] adox rcx, rbx mov rdx, [ rsi + 0x10 ] mulx rdi, rbx, [ rsi + 0x8 ] mov rdx, 0x0 adox r9, rdx mov [ rsp - 0x70 ], r12 .cfi_offset r12, -16-0x70 mov r12, -0x3 inc r12 adox rbx, r8 adox rdi, r10 adcx r11, r11 mov r8, rdx adox r8, rcx mov r10, rdx adox r10, r9 adcx rbx, rbx mov rdx, [ rsi + 0x0 ] mulx r9, rcx, rdx mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x68 ], r13 .cfi_offset r13, -16-0x68 mov [ rsp - 0x60 ], r14 .cfi_offset r14, -16-0x60 mulx r14, r13, rdx seto dl inc r12 adox r9, rax adox r13, r11 adox r14, rbx adcx rdi, rdi mov al, dl mov rdx, [ rsi + 0x10 ] mulx rbx, r11, rdx adox r11, rdi adcx r8, r8 adox rbx, r8 adcx r10, r10 movzx rdx, al mov rdi, 0x0 adcx rdx, rdi movzx r8, al lea r8, [ r8 + rdx ] mov rdx, [ rsi + 0x18 ] mulx rdi, rax, rdx adox rax, r10 mov rdx, 0x26 mov [ rsp - 0x58 ], r15 .cfi_offset r15, -16-0x58 mulx r15, r10, r11 clc adcx r10, rcx mulx r11, rcx, rbx adox r8, rdi mulx rdi, rbx, r8 inc r12 adox rcx, r9 mulx r8, r9, rax adcx r15, rcx adox r9, r13 adcx r11, r9 adox rbx, r14 adox rdi, r12 adcx r8, rbx adc rdi, 0x0 mulx r14, r13, rdi test al, al mov rdi, [ rsp - 0x48 ] adox r13, r10 mov r14, r12 adox r14, r15 mov [ rdi + 0x8 ], r14 mov rax, r12 adox rax, r11 mov r10, r12 adox r10, r8 mov [ rdi + 0x10 ], rax mov rcx, r12 cmovo rcx, rdx adcx r13, rcx mov [ rdi + 0x0 ], r13 mov [ rdi + 0x18 ], r10 mov rbx, [ rsp - 0x80 ] .cfi_restore rbx mov r12, [ rsp - 0x70 ] .cfi_restore r12 mov r13, [ rsp - 0x68 ] .cfi_restore r13 mov r14, [ rsp - 0x60 ] .cfi_restore r14 mov r15, [ rsp - 0x58 ] .cfi_restore r15 pop rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 ret .cfi_endproc #if defined(__ELF__) .size fiat_curve25519_adx_square, .-fiat_curve25519_adx_square #endif #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/asm/fiat_p256_adx_mul.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .text #if defined(__APPLE__) .private_extern _fiat_p256_adx_mul .global _fiat_p256_adx_mul _fiat_p256_adx_mul: #else .type fiat_p256_adx_mul, @function .hidden fiat_p256_adx_mul .global fiat_p256_adx_mul fiat_p256_adx_mul: #endif .cfi_startproc _CET_ENDBR pushq %rbp ;.cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 movq %rsp, %rbp movq %rdx, %rax movq (%rsi), %rdx testb %al, %al mulxq (%rax), %rcx, %r8 movq %rbx, -0x80(%rsp) .cfi_offset rbx, -16-0x80 mulxq 0x8(%rax), %r9, %rbx movq %r14, -0x68(%rsp) .cfi_offset r14, -16-0x68 adcq %r8, %r9 movq %r15, -0x60(%rsp) .cfi_offset r15, -16-0x60 mulxq 0x10(%rax), %r14, %r15 movq %r12, -0x78(%rsp) .cfi_offset r12, -16-0x78 adcq %rbx, %r14 mulxq 0x18(%rax), %r10, %r11 movq %r13, -0x70(%rsp) .cfi_offset r13, -16-0x70 adcq %r15, %r10 movq 0x8(%rsi), %rdx mulxq (%rax), %r8, %rbx adcq $0x0, %r11 xorq %r15, %r15 adcxq %r9, %r8 adoxq %r14, %rbx movq %rdi, -0x58(%rsp) mulxq 0x8(%rax), %r9, %rdi adcxq %rbx, %r9 adoxq %r10, %rdi mulxq 0x10(%rax), %r14, %rbx adcxq %rdi, %r14 adoxq %r11, %rbx mulxq 0x18(%rax), %r12, %r13 adcxq %rbx, %r12 movq $0x100000000, %rdx mulxq %rcx, %r10, %r11 adoxq %r15, %r13 adcxq %r15, %r13 xorq %rdi, %rdi adoxq %r8, %r10 mulxq %r10, %rbx, %r8 adoxq %r9, %r11 adcxq %r11, %rbx adoxq %r14, %r8 movq $0xffffffff00000001, %rdx mulxq %rcx, %r15, %r9 adcxq %r8, %r15 adoxq %r12, %r9 mulxq %r10, %rcx, %r14 movq 0x10(%rsi), %rdx mulxq 0x8(%rax), %r12, %r10 adcxq %r9, %rcx adoxq %r13, %r14 mulxq (%rax), %r13, %r11 movq %rdi, %r9 adcxq %r9, %r14 adoxq %rdi, %rdi adcq $0x0, %rdi xorq %r9, %r9 adcxq %rbx, %r13 adoxq %r15, %r11 movq 0x10(%rsi), %rdx mulxq 0x10(%rax), %r8, %r15 adoxq %rcx, %r10 mulxq 0x18(%rax), %rbx, %rcx movq 0x18(%rsi), %rdx adcxq %r11, %r12 mulxq 0x8(%rax), %r11, %rsi adcxq %r10, %r8 adoxq %r14, %r15 adcxq %r15, %rbx adoxq %r9, %rcx adcxq %r9, %rcx mulxq (%rax), %r10, %r15 addq %rdi, %rcx movq %r9, %r14 adcq $0x0, %r14 xorq %r9, %r9 adcxq %r12, %r10 adoxq %r8, %r15 adcxq %r15, %r11 adoxq %rbx, %rsi mulxq 0x10(%rax), %r12, %r8 adoxq %rcx, %r8 mulxq 0x18(%rax), %rbx, %rcx adcxq %rsi, %r12 adoxq %r9, %rcx movq $0x100000000, %rdx adcxq %r8, %rbx adcq $0x0, %rcx mulxq %r13, %r15, %rdi xorq %rax, %rax adcxq %r14, %rcx adcq $0x0, %rax xorq %r9, %r9 adoxq %r10, %r15 mulxq %r15, %r10, %r14 adoxq %r11, %rdi movq $0xffffffff00000001, %rdx adoxq %r12, %r14 adcxq %rdi, %r10 mulxq %r13, %r11, %r12 adcxq %r14, %r11 adoxq %rbx, %r12 mulxq %r15, %r13, %rbx adcxq %r12, %r13 adoxq %rcx, %rbx movq %r9, %r8 adoxq %r9, %rax adcxq %rbx, %r8 adcq $0x0, %rax movq %rax, %rcx movq $0xffffffffffffffff, %r15 movq %r10, %rdi subq %r15, %rdi movq $0xffffffff, %r14 movq %r11, %r12 sbbq %r14, %r12 movq %r13, %rbx sbbq %r9, %rbx movq %rax, %rax movq %r8, %rax sbbq %rdx, %rax sbbq %r9, %rcx cmovcq %r10, %rdi movq -0x58(%rsp), %r10 cmovcq %r13, %rbx movq -0x70(%rsp), %r13 .cfi_restore r13 cmovcq %r11, %r12 cmovcq %r8, %rax movq %rbx, 0x10(%r10) movq -0x80(%rsp), %rbx .cfi_restore rbx movq %rdi, (%r10) movq %r12, 0x8(%r10) movq %rax, 0x18(%r10) movq -0x78(%rsp), %r12 .cfi_restore r12 movq -0x68(%rsp), %r14 .cfi_restore r14 movq -0x60(%rsp), %r15 .cfi_restore r15 popq %rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 retq .cfi_endproc #if defined(__ELF__) .size fiat_p256_adx_mul, .-fiat_p256_adx_mul #endif #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/asm/fiat_p256_adx_sqr.S ================================================ #define BORINGSSL_PREFIX CNIOBoringSSL #include #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .text #if defined(__APPLE__) .private_extern _fiat_p256_adx_sqr .global _fiat_p256_adx_sqr _fiat_p256_adx_sqr: #else .type fiat_p256_adx_sqr, @function .hidden fiat_p256_adx_sqr .global fiat_p256_adx_sqr fiat_p256_adx_sqr: #endif .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 movq %rsp, %rbp movq (%rsi), %rdx mulxq 0x18(%rsi), %rax, %r10 mulxq %rdx, %r11, %rcx mulxq 0x8(%rsi), %r8, %r9 movq %rbx, -0x80(%rsp) .cfi_offset rbx, -16-0x80 xorq %rbx, %rbx adoxq %r8, %r8 movq %r12, -0x78(%rsp) .cfi_offset r12, -16-0x78 mulxq 0x10(%rsi), %rbx, %r12 movq 0x8(%rsi), %rdx movq %r13, -0x70(%rsp) .cfi_offset r13, -16-0x70 movq %r14, -0x68(%rsp) .cfi_offset r14, -16-0x68 mulxq %rdx, %r13, %r14 movq %r15, -0x60(%rsp) .cfi_offset r15, -16-0x60 movq %rdi, -0x58(%rsp) mulxq 0x10(%rsi), %r15, %rdi adcxq %r15, %r12 movq %r11, -0x50(%rsp) mulxq 0x18(%rsi), %r15, %r11 adcxq %rdi, %r10 movq $0x0, %rdi adcxq %rdi, %r11 clc adcxq %r9, %rbx adoxq %rbx, %rbx adcxq %r12, %rax adoxq %rax, %rax adcxq %r10, %r15 adoxq %r15, %r15 movq 0x10(%rsi), %rdx mulxq 0x18(%rsi), %r9, %r12 adcxq %r11, %r9 adcxq %rdi, %r12 mulxq %rdx, %r10, %r11 clc adcxq %r8, %rcx adcxq %rbx, %r13 adcxq %rax, %r14 adoxq %r9, %r9 adcxq %r15, %r10 movq 0x18(%rsi), %rdx mulxq %rdx, %r8, %rbx adoxq %r12, %r12 adcxq %r9, %r11 movq -0x50(%rsp), %rsi adcxq %r12, %r8 movq $0x100000000, %rax movq %rax, %rdx mulxq %rsi, %rax, %r15 adcxq %rdi, %rbx adoxq %rdi, %rbx xorq %r9, %r9 adoxq %rcx, %rax adoxq %r13, %r15 mulxq %rax, %rdi, %rcx adcxq %r15, %rdi adoxq %r14, %rcx movq $0xffffffff00000001, %rdx mulxq %rsi, %r13, %r14 adoxq %r10, %r14 adcxq %rcx, %r13 mulxq %rax, %r10, %r12 adoxq %r11, %r12 movq %r9, %r11 adoxq %r8, %r11 adcxq %r14, %r10 movq %r9, %r8 adcxq %r12, %r8 movq %r9, %rax adcxq %r11, %rax movq %r9, %r15 adoxq %rbx, %r15 movq $0x100000000, %rdx mulxq %rdi, %rbx, %rcx movq %r9, %r14 adcxq %r15, %r14 movq %r9, %r12 adoxq %r12, %r12 adcxq %r9, %r12 adoxq %r13, %rbx mulxq %rbx, %r13, %r11 movq $0xffffffff00000001, %r15 movq %r15, %rdx mulxq %rbx, %r15, %rsi adoxq %r10, %rcx adoxq %r8, %r11 mulxq %rdi, %r10, %r8 adcxq %rcx, %r13 adoxq %rax, %r8 adcxq %r11, %r10 adoxq %r14, %rsi movq %r12, %rdi movq %r9, %rax adoxq %rax, %rdi adcxq %r8, %r15 movq %rax, %r14 adcxq %rsi, %r14 adcxq %r9, %rdi decq %r9 movq %r13, %rbx subq %r9, %rbx movq $0xffffffff, %rcx movq %r10, %r11 sbbq %rcx, %r11 movq %r15, %r8 sbbq %rax, %r8 movq %r14, %rsi sbbq %rdx, %rsi sbbq %rax, %rdi cmovcq %r13, %rbx cmovcq %r15, %r8 cmovcq %r10, %r11 cmovcq %r14, %rsi movq -0x58(%rsp), %rdi movq %rsi, 0x18(%rdi) movq %rbx, (%rdi) movq %r11, 0x8(%rdi) movq %r8, 0x10(%rdi) movq -0x80(%rsp), %rbx .cfi_restore rbx movq -0x78(%rsp), %r12 .cfi_restore r12 movq -0x70(%rsp), %r13 .cfi_restore r13 movq -0x68(%rsp), %r14 .cfi_restore r14 movq -0x60(%rsp), %r15 .cfi_restore r15 popq %rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 retq .cfi_endproc #if defined(__ELF__) .size fiat_p256_adx_sqr, .-fiat_p256_adx_sqr #endif #endif #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/curve25519_32.h ================================================ /* Autogenerated: 'src/ExtractionOCaml/unsaturated_solinas' --inline --static --use-value-barrier 25519 32 '(auto)' '2^255 - 19' carry_mul carry_square carry add sub opp selectznz to_bytes from_bytes relax carry_scmul121666 */ /* curve description: 25519 */ /* machine_wordsize = 32 (from "32") */ /* requested operations: carry_mul, carry_square, carry, add, sub, opp, selectznz, to_bytes, from_bytes, relax, carry_scmul121666 */ /* n = 10 (from "(auto)") */ /* s-c = 2^255 - [(1, 19)] (from "2^255 - 19") */ /* tight_bounds_multiplier = 1 (from "") */ /* */ /* Computed values: */ /* carry_chain = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1] */ /* eval z = z[0] + (z[1] << 26) + (z[2] << 51) + (z[3] << 77) + (z[4] << 102) + (z[5] << 128) + (z[6] << 153) + (z[7] << 179) + (z[8] << 204) + (z[9] << 230) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* balance = [0x7ffffda, 0x3fffffe, 0x7fffffe, 0x3fffffe, 0x7fffffe, 0x3fffffe, 0x7fffffe, 0x3fffffe, 0x7fffffe, 0x3fffffe] */ #include typedef unsigned char fiat_25519_uint1; typedef signed char fiat_25519_int1; #if defined(__GNUC__) || defined(__clang__) # define FIAT_25519_FIAT_INLINE __inline__ #else # define FIAT_25519_FIAT_INLINE #endif /* The type fiat_25519_loose_field_element is a field element with loose bounds. */ /* Bounds: [[0x0 ~> 0xc000000], [0x0 ~> 0x6000000], [0x0 ~> 0xc000000], [0x0 ~> 0x6000000], [0x0 ~> 0xc000000], [0x0 ~> 0x6000000], [0x0 ~> 0xc000000], [0x0 ~> 0x6000000], [0x0 ~> 0xc000000], [0x0 ~> 0x6000000]] */ typedef uint32_t fiat_25519_loose_field_element[10]; /* The type fiat_25519_tight_field_element is a field element with tight bounds. */ /* Bounds: [[0x0 ~> 0x4000000], [0x0 ~> 0x2000000], [0x0 ~> 0x4000000], [0x0 ~> 0x2000000], [0x0 ~> 0x4000000], [0x0 ~> 0x2000000], [0x0 ~> 0x4000000], [0x0 ~> 0x2000000], [0x0 ~> 0x4000000], [0x0 ~> 0x2000000]] */ typedef uint32_t fiat_25519_tight_field_element[10]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #if !defined(FIAT_25519_NO_ASM) && (defined(__GNUC__) || defined(__clang__)) static __inline__ uint32_t fiat_25519_value_barrier_u32(uint32_t a) { __asm__("" : "+r"(a) : /* no inputs */); return a; } #else # define fiat_25519_value_barrier_u32(x) (x) #endif /* * The function fiat_25519_addcarryx_u26 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^26 * out2 = ⌊(arg1 + arg2 + arg3) / 2^26⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x3ffffff] * arg3: [0x0 ~> 0x3ffffff] * Output Bounds: * out1: [0x0 ~> 0x3ffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_addcarryx_u26(uint32_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint32_t arg2, uint32_t arg3) { uint32_t x1; uint32_t x2; fiat_25519_uint1 x3; x1 = ((arg1 + arg2) + arg3); x2 = (x1 & UINT32_C(0x3ffffff)); x3 = (fiat_25519_uint1)(x1 >> 26); *out1 = x2; *out2 = x3; } /* * The function fiat_25519_subborrowx_u26 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^26 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^26⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x3ffffff] * arg3: [0x0 ~> 0x3ffffff] * Output Bounds: * out1: [0x0 ~> 0x3ffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_subborrowx_u26(uint32_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint32_t arg2, uint32_t arg3) { int32_t x1; fiat_25519_int1 x2; uint32_t x3; x1 = ((int32_t)(arg2 - arg1) - (int32_t)arg3); x2 = (fiat_25519_int1)(x1 >> 26); x3 = (x1 & UINT32_C(0x3ffffff)); *out1 = x3; *out2 = (fiat_25519_uint1)(0x0 - x2); } /* * The function fiat_25519_addcarryx_u25 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^25 * out2 = ⌊(arg1 + arg2 + arg3) / 2^25⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x1ffffff] * arg3: [0x0 ~> 0x1ffffff] * Output Bounds: * out1: [0x0 ~> 0x1ffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_addcarryx_u25(uint32_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint32_t arg2, uint32_t arg3) { uint32_t x1; uint32_t x2; fiat_25519_uint1 x3; x1 = ((arg1 + arg2) + arg3); x2 = (x1 & UINT32_C(0x1ffffff)); x3 = (fiat_25519_uint1)(x1 >> 25); *out1 = x2; *out2 = x3; } /* * The function fiat_25519_subborrowx_u25 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^25 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^25⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x1ffffff] * arg3: [0x0 ~> 0x1ffffff] * Output Bounds: * out1: [0x0 ~> 0x1ffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_subborrowx_u25(uint32_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint32_t arg2, uint32_t arg3) { int32_t x1; fiat_25519_int1 x2; uint32_t x3; x1 = ((int32_t)(arg2 - arg1) - (int32_t)arg3); x2 = (fiat_25519_int1)(x1 >> 25); x3 = (x1 & UINT32_C(0x1ffffff)); *out1 = x3; *out2 = (fiat_25519_uint1)(0x0 - x2); } /* * The function fiat_25519_cmovznz_u32 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffff] * arg3: [0x0 ~> 0xffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffff] */ static FIAT_25519_FIAT_INLINE void fiat_25519_cmovznz_u32(uint32_t* out1, fiat_25519_uint1 arg1, uint32_t arg2, uint32_t arg3) { fiat_25519_uint1 x1; uint32_t x2; uint32_t x3; x1 = (!(!arg1)); x2 = ((fiat_25519_int1)(0x0 - x1) & UINT32_C(0xffffffff)); x3 = ((fiat_25519_value_barrier_u32(x2) & arg3) | (fiat_25519_value_barrier_u32((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_25519_carry_mul multiplies two field elements and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_mul(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1, const fiat_25519_loose_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint64_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint64_t x57; uint64_t x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; uint64_t x71; uint64_t x72; uint64_t x73; uint64_t x74; uint64_t x75; uint64_t x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; uint64_t x81; uint64_t x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; uint64_t x92; uint64_t x93; uint64_t x94; uint64_t x95; uint64_t x96; uint64_t x97; uint64_t x98; uint64_t x99; uint64_t x100; uint64_t x101; uint64_t x102; uint32_t x103; uint64_t x104; uint64_t x105; uint64_t x106; uint64_t x107; uint64_t x108; uint64_t x109; uint64_t x110; uint64_t x111; uint64_t x112; uint64_t x113; uint64_t x114; uint32_t x115; uint64_t x116; uint64_t x117; uint32_t x118; uint64_t x119; uint64_t x120; uint32_t x121; uint64_t x122; uint64_t x123; uint32_t x124; uint64_t x125; uint64_t x126; uint32_t x127; uint64_t x128; uint64_t x129; uint32_t x130; uint64_t x131; uint64_t x132; uint32_t x133; uint64_t x134; uint64_t x135; uint32_t x136; uint64_t x137; uint64_t x138; uint32_t x139; uint64_t x140; uint64_t x141; uint32_t x142; uint32_t x143; uint32_t x144; fiat_25519_uint1 x145; uint32_t x146; uint32_t x147; x1 = ((uint64_t)(arg1[9]) * ((arg2[9]) * UINT8_C(0x26))); x2 = ((uint64_t)(arg1[9]) * ((arg2[8]) * UINT8_C(0x13))); x3 = ((uint64_t)(arg1[9]) * ((arg2[7]) * UINT8_C(0x26))); x4 = ((uint64_t)(arg1[9]) * ((arg2[6]) * UINT8_C(0x13))); x5 = ((uint64_t)(arg1[9]) * ((arg2[5]) * UINT8_C(0x26))); x6 = ((uint64_t)(arg1[9]) * ((arg2[4]) * UINT8_C(0x13))); x7 = ((uint64_t)(arg1[9]) * ((arg2[3]) * UINT8_C(0x26))); x8 = ((uint64_t)(arg1[9]) * ((arg2[2]) * UINT8_C(0x13))); x9 = ((uint64_t)(arg1[9]) * ((arg2[1]) * UINT8_C(0x26))); x10 = ((uint64_t)(arg1[8]) * ((arg2[9]) * UINT8_C(0x13))); x11 = ((uint64_t)(arg1[8]) * ((arg2[8]) * UINT8_C(0x13))); x12 = ((uint64_t)(arg1[8]) * ((arg2[7]) * UINT8_C(0x13))); x13 = ((uint64_t)(arg1[8]) * ((arg2[6]) * UINT8_C(0x13))); x14 = ((uint64_t)(arg1[8]) * ((arg2[5]) * UINT8_C(0x13))); x15 = ((uint64_t)(arg1[8]) * ((arg2[4]) * UINT8_C(0x13))); x16 = ((uint64_t)(arg1[8]) * ((arg2[3]) * UINT8_C(0x13))); x17 = ((uint64_t)(arg1[8]) * ((arg2[2]) * UINT8_C(0x13))); x18 = ((uint64_t)(arg1[7]) * ((arg2[9]) * UINT8_C(0x26))); x19 = ((uint64_t)(arg1[7]) * ((arg2[8]) * UINT8_C(0x13))); x20 = ((uint64_t)(arg1[7]) * ((arg2[7]) * UINT8_C(0x26))); x21 = ((uint64_t)(arg1[7]) * ((arg2[6]) * UINT8_C(0x13))); x22 = ((uint64_t)(arg1[7]) * ((arg2[5]) * UINT8_C(0x26))); x23 = ((uint64_t)(arg1[7]) * ((arg2[4]) * UINT8_C(0x13))); x24 = ((uint64_t)(arg1[7]) * ((arg2[3]) * UINT8_C(0x26))); x25 = ((uint64_t)(arg1[6]) * ((arg2[9]) * UINT8_C(0x13))); x26 = ((uint64_t)(arg1[6]) * ((arg2[8]) * UINT8_C(0x13))); x27 = ((uint64_t)(arg1[6]) * ((arg2[7]) * UINT8_C(0x13))); x28 = ((uint64_t)(arg1[6]) * ((arg2[6]) * UINT8_C(0x13))); x29 = ((uint64_t)(arg1[6]) * ((arg2[5]) * UINT8_C(0x13))); x30 = ((uint64_t)(arg1[6]) * ((arg2[4]) * UINT8_C(0x13))); x31 = ((uint64_t)(arg1[5]) * ((arg2[9]) * UINT8_C(0x26))); x32 = ((uint64_t)(arg1[5]) * ((arg2[8]) * UINT8_C(0x13))); x33 = ((uint64_t)(arg1[5]) * ((arg2[7]) * UINT8_C(0x26))); x34 = ((uint64_t)(arg1[5]) * ((arg2[6]) * UINT8_C(0x13))); x35 = ((uint64_t)(arg1[5]) * ((arg2[5]) * UINT8_C(0x26))); x36 = ((uint64_t)(arg1[4]) * ((arg2[9]) * UINT8_C(0x13))); x37 = ((uint64_t)(arg1[4]) * ((arg2[8]) * UINT8_C(0x13))); x38 = ((uint64_t)(arg1[4]) * ((arg2[7]) * UINT8_C(0x13))); x39 = ((uint64_t)(arg1[4]) * ((arg2[6]) * UINT8_C(0x13))); x40 = ((uint64_t)(arg1[3]) * ((arg2[9]) * UINT8_C(0x26))); x41 = ((uint64_t)(arg1[3]) * ((arg2[8]) * UINT8_C(0x13))); x42 = ((uint64_t)(arg1[3]) * ((arg2[7]) * UINT8_C(0x26))); x43 = ((uint64_t)(arg1[2]) * ((arg2[9]) * UINT8_C(0x13))); x44 = ((uint64_t)(arg1[2]) * ((arg2[8]) * UINT8_C(0x13))); x45 = ((uint64_t)(arg1[1]) * ((arg2[9]) * UINT8_C(0x26))); x46 = ((uint64_t)(arg1[9]) * (arg2[0])); x47 = ((uint64_t)(arg1[8]) * (arg2[1])); x48 = ((uint64_t)(arg1[8]) * (arg2[0])); x49 = ((uint64_t)(arg1[7]) * (arg2[2])); x50 = ((uint64_t)(arg1[7]) * ((arg2[1]) * 0x2)); x51 = ((uint64_t)(arg1[7]) * (arg2[0])); x52 = ((uint64_t)(arg1[6]) * (arg2[3])); x53 = ((uint64_t)(arg1[6]) * (arg2[2])); x54 = ((uint64_t)(arg1[6]) * (arg2[1])); x55 = ((uint64_t)(arg1[6]) * (arg2[0])); x56 = ((uint64_t)(arg1[5]) * (arg2[4])); x57 = ((uint64_t)(arg1[5]) * ((arg2[3]) * 0x2)); x58 = ((uint64_t)(arg1[5]) * (arg2[2])); x59 = ((uint64_t)(arg1[5]) * ((arg2[1]) * 0x2)); x60 = ((uint64_t)(arg1[5]) * (arg2[0])); x61 = ((uint64_t)(arg1[4]) * (arg2[5])); x62 = ((uint64_t)(arg1[4]) * (arg2[4])); x63 = ((uint64_t)(arg1[4]) * (arg2[3])); x64 = ((uint64_t)(arg1[4]) * (arg2[2])); x65 = ((uint64_t)(arg1[4]) * (arg2[1])); x66 = ((uint64_t)(arg1[4]) * (arg2[0])); x67 = ((uint64_t)(arg1[3]) * (arg2[6])); x68 = ((uint64_t)(arg1[3]) * ((arg2[5]) * 0x2)); x69 = ((uint64_t)(arg1[3]) * (arg2[4])); x70 = ((uint64_t)(arg1[3]) * ((arg2[3]) * 0x2)); x71 = ((uint64_t)(arg1[3]) * (arg2[2])); x72 = ((uint64_t)(arg1[3]) * ((arg2[1]) * 0x2)); x73 = ((uint64_t)(arg1[3]) * (arg2[0])); x74 = ((uint64_t)(arg1[2]) * (arg2[7])); x75 = ((uint64_t)(arg1[2]) * (arg2[6])); x76 = ((uint64_t)(arg1[2]) * (arg2[5])); x77 = ((uint64_t)(arg1[2]) * (arg2[4])); x78 = ((uint64_t)(arg1[2]) * (arg2[3])); x79 = ((uint64_t)(arg1[2]) * (arg2[2])); x80 = ((uint64_t)(arg1[2]) * (arg2[1])); x81 = ((uint64_t)(arg1[2]) * (arg2[0])); x82 = ((uint64_t)(arg1[1]) * (arg2[8])); x83 = ((uint64_t)(arg1[1]) * ((arg2[7]) * 0x2)); x84 = ((uint64_t)(arg1[1]) * (arg2[6])); x85 = ((uint64_t)(arg1[1]) * ((arg2[5]) * 0x2)); x86 = ((uint64_t)(arg1[1]) * (arg2[4])); x87 = ((uint64_t)(arg1[1]) * ((arg2[3]) * 0x2)); x88 = ((uint64_t)(arg1[1]) * (arg2[2])); x89 = ((uint64_t)(arg1[1]) * ((arg2[1]) * 0x2)); x90 = ((uint64_t)(arg1[1]) * (arg2[0])); x91 = ((uint64_t)(arg1[0]) * (arg2[9])); x92 = ((uint64_t)(arg1[0]) * (arg2[8])); x93 = ((uint64_t)(arg1[0]) * (arg2[7])); x94 = ((uint64_t)(arg1[0]) * (arg2[6])); x95 = ((uint64_t)(arg1[0]) * (arg2[5])); x96 = ((uint64_t)(arg1[0]) * (arg2[4])); x97 = ((uint64_t)(arg1[0]) * (arg2[3])); x98 = ((uint64_t)(arg1[0]) * (arg2[2])); x99 = ((uint64_t)(arg1[0]) * (arg2[1])); x100 = ((uint64_t)(arg1[0]) * (arg2[0])); x101 = (x100 + (x45 + (x44 + (x42 + (x39 + (x35 + (x30 + (x24 + (x17 + x9))))))))); x102 = (x101 >> 26); x103 = (uint32_t)(x101 & UINT32_C(0x3ffffff)); x104 = (x91 + (x82 + (x74 + (x67 + (x61 + (x56 + (x52 + (x49 + (x47 + x46))))))))); x105 = (x92 + (x83 + (x75 + (x68 + (x62 + (x57 + (x53 + (x50 + (x48 + x1))))))))); x106 = (x93 + (x84 + (x76 + (x69 + (x63 + (x58 + (x54 + (x51 + (x10 + x2))))))))); x107 = (x94 + (x85 + (x77 + (x70 + (x64 + (x59 + (x55 + (x18 + (x11 + x3))))))))); x108 = (x95 + (x86 + (x78 + (x71 + (x65 + (x60 + (x25 + (x19 + (x12 + x4))))))))); x109 = (x96 + (x87 + (x79 + (x72 + (x66 + (x31 + (x26 + (x20 + (x13 + x5))))))))); x110 = (x97 + (x88 + (x80 + (x73 + (x36 + (x32 + (x27 + (x21 + (x14 + x6))))))))); x111 = (x98 + (x89 + (x81 + (x40 + (x37 + (x33 + (x28 + (x22 + (x15 + x7))))))))); x112 = (x99 + (x90 + (x43 + (x41 + (x38 + (x34 + (x29 + (x23 + (x16 + x8))))))))); x113 = (x102 + x112); x114 = (x113 >> 25); x115 = (uint32_t)(x113 & UINT32_C(0x1ffffff)); x116 = (x114 + x111); x117 = (x116 >> 26); x118 = (uint32_t)(x116 & UINT32_C(0x3ffffff)); x119 = (x117 + x110); x120 = (x119 >> 25); x121 = (uint32_t)(x119 & UINT32_C(0x1ffffff)); x122 = (x120 + x109); x123 = (x122 >> 26); x124 = (uint32_t)(x122 & UINT32_C(0x3ffffff)); x125 = (x123 + x108); x126 = (x125 >> 25); x127 = (uint32_t)(x125 & UINT32_C(0x1ffffff)); x128 = (x126 + x107); x129 = (x128 >> 26); x130 = (uint32_t)(x128 & UINT32_C(0x3ffffff)); x131 = (x129 + x106); x132 = (x131 >> 25); x133 = (uint32_t)(x131 & UINT32_C(0x1ffffff)); x134 = (x132 + x105); x135 = (x134 >> 26); x136 = (uint32_t)(x134 & UINT32_C(0x3ffffff)); x137 = (x135 + x104); x138 = (x137 >> 25); x139 = (uint32_t)(x137 & UINT32_C(0x1ffffff)); x140 = (x138 * UINT8_C(0x13)); x141 = (x103 + x140); x142 = (uint32_t)(x141 >> 26); x143 = (uint32_t)(x141 & UINT32_C(0x3ffffff)); x144 = (x142 + x115); x145 = (fiat_25519_uint1)(x144 >> 25); x146 = (x144 & UINT32_C(0x1ffffff)); x147 = (x145 + x118); out1[0] = x143; out1[1] = x146; out1[2] = x147; out1[3] = x121; out1[4] = x124; out1[5] = x127; out1[6] = x130; out1[7] = x133; out1[8] = x136; out1[9] = x139; } /* * The function fiat_25519_carry_square squares a field element and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_square(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint64_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint64_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; uint32_t x17; uint32_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint64_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint64_t x57; uint64_t x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; uint64_t x71; uint64_t x72; uint64_t x73; uint64_t x74; uint64_t x75; uint32_t x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; uint64_t x81; uint64_t x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint32_t x88; uint64_t x89; uint64_t x90; uint32_t x91; uint64_t x92; uint64_t x93; uint32_t x94; uint64_t x95; uint64_t x96; uint32_t x97; uint64_t x98; uint64_t x99; uint32_t x100; uint64_t x101; uint64_t x102; uint32_t x103; uint64_t x104; uint64_t x105; uint32_t x106; uint64_t x107; uint64_t x108; uint32_t x109; uint64_t x110; uint64_t x111; uint32_t x112; uint64_t x113; uint64_t x114; uint32_t x115; uint32_t x116; uint32_t x117; fiat_25519_uint1 x118; uint32_t x119; uint32_t x120; x1 = ((arg1[9]) * UINT8_C(0x13)); x2 = (x1 * 0x2); x3 = ((arg1[9]) * 0x2); x4 = ((arg1[8]) * UINT8_C(0x13)); x5 = ((uint64_t)x4 * 0x2); x6 = ((arg1[8]) * 0x2); x7 = ((arg1[7]) * UINT8_C(0x13)); x8 = (x7 * 0x2); x9 = ((arg1[7]) * 0x2); x10 = ((arg1[6]) * UINT8_C(0x13)); x11 = ((uint64_t)x10 * 0x2); x12 = ((arg1[6]) * 0x2); x13 = ((arg1[5]) * UINT8_C(0x13)); x14 = ((arg1[5]) * 0x2); x15 = ((arg1[4]) * 0x2); x16 = ((arg1[3]) * 0x2); x17 = ((arg1[2]) * 0x2); x18 = ((arg1[1]) * 0x2); x19 = ((uint64_t)(arg1[9]) * (x1 * 0x2)); x20 = ((uint64_t)(arg1[8]) * x2); x21 = ((uint64_t)(arg1[8]) * x4); x22 = ((arg1[7]) * ((uint64_t)x2 * 0x2)); x23 = ((arg1[7]) * x5); x24 = ((uint64_t)(arg1[7]) * (x7 * 0x2)); x25 = ((uint64_t)(arg1[6]) * x2); x26 = ((arg1[6]) * x5); x27 = ((uint64_t)(arg1[6]) * x8); x28 = ((uint64_t)(arg1[6]) * x10); x29 = ((arg1[5]) * ((uint64_t)x2 * 0x2)); x30 = ((arg1[5]) * x5); x31 = ((arg1[5]) * ((uint64_t)x8 * 0x2)); x32 = ((arg1[5]) * x11); x33 = ((uint64_t)(arg1[5]) * (x13 * 0x2)); x34 = ((uint64_t)(arg1[4]) * x2); x35 = ((arg1[4]) * x5); x36 = ((uint64_t)(arg1[4]) * x8); x37 = ((arg1[4]) * x11); x38 = ((uint64_t)(arg1[4]) * x14); x39 = ((uint64_t)(arg1[4]) * (arg1[4])); x40 = ((arg1[3]) * ((uint64_t)x2 * 0x2)); x41 = ((arg1[3]) * x5); x42 = ((arg1[3]) * ((uint64_t)x8 * 0x2)); x43 = ((uint64_t)(arg1[3]) * x12); x44 = ((uint64_t)(arg1[3]) * (x14 * 0x2)); x45 = ((uint64_t)(arg1[3]) * x15); x46 = ((uint64_t)(arg1[3]) * ((arg1[3]) * 0x2)); x47 = ((uint64_t)(arg1[2]) * x2); x48 = ((arg1[2]) * x5); x49 = ((uint64_t)(arg1[2]) * x9); x50 = ((uint64_t)(arg1[2]) * x12); x51 = ((uint64_t)(arg1[2]) * x14); x52 = ((uint64_t)(arg1[2]) * x15); x53 = ((uint64_t)(arg1[2]) * x16); x54 = ((uint64_t)(arg1[2]) * (arg1[2])); x55 = ((arg1[1]) * ((uint64_t)x2 * 0x2)); x56 = ((uint64_t)(arg1[1]) * x6); x57 = ((uint64_t)(arg1[1]) * (x9 * 0x2)); x58 = ((uint64_t)(arg1[1]) * x12); x59 = ((uint64_t)(arg1[1]) * (x14 * 0x2)); x60 = ((uint64_t)(arg1[1]) * x15); x61 = ((uint64_t)(arg1[1]) * (x16 * 0x2)); x62 = ((uint64_t)(arg1[1]) * x17); x63 = ((uint64_t)(arg1[1]) * ((arg1[1]) * 0x2)); x64 = ((uint64_t)(arg1[0]) * x3); x65 = ((uint64_t)(arg1[0]) * x6); x66 = ((uint64_t)(arg1[0]) * x9); x67 = ((uint64_t)(arg1[0]) * x12); x68 = ((uint64_t)(arg1[0]) * x14); x69 = ((uint64_t)(arg1[0]) * x15); x70 = ((uint64_t)(arg1[0]) * x16); x71 = ((uint64_t)(arg1[0]) * x17); x72 = ((uint64_t)(arg1[0]) * x18); x73 = ((uint64_t)(arg1[0]) * (arg1[0])); x74 = (x73 + (x55 + (x48 + (x42 + (x37 + x33))))); x75 = (x74 >> 26); x76 = (uint32_t)(x74 & UINT32_C(0x3ffffff)); x77 = (x64 + (x56 + (x49 + (x43 + x38)))); x78 = (x65 + (x57 + (x50 + (x44 + (x39 + x19))))); x79 = (x66 + (x58 + (x51 + (x45 + x20)))); x80 = (x67 + (x59 + (x52 + (x46 + (x22 + x21))))); x81 = (x68 + (x60 + (x53 + (x25 + x23)))); x82 = (x69 + (x61 + (x54 + (x29 + (x26 + x24))))); x83 = (x70 + (x62 + (x34 + (x30 + x27)))); x84 = (x71 + (x63 + (x40 + (x35 + (x31 + x28))))); x85 = (x72 + (x47 + (x41 + (x36 + x32)))); x86 = (x75 + x85); x87 = (x86 >> 25); x88 = (uint32_t)(x86 & UINT32_C(0x1ffffff)); x89 = (x87 + x84); x90 = (x89 >> 26); x91 = (uint32_t)(x89 & UINT32_C(0x3ffffff)); x92 = (x90 + x83); x93 = (x92 >> 25); x94 = (uint32_t)(x92 & UINT32_C(0x1ffffff)); x95 = (x93 + x82); x96 = (x95 >> 26); x97 = (uint32_t)(x95 & UINT32_C(0x3ffffff)); x98 = (x96 + x81); x99 = (x98 >> 25); x100 = (uint32_t)(x98 & UINT32_C(0x1ffffff)); x101 = (x99 + x80); x102 = (x101 >> 26); x103 = (uint32_t)(x101 & UINT32_C(0x3ffffff)); x104 = (x102 + x79); x105 = (x104 >> 25); x106 = (uint32_t)(x104 & UINT32_C(0x1ffffff)); x107 = (x105 + x78); x108 = (x107 >> 26); x109 = (uint32_t)(x107 & UINT32_C(0x3ffffff)); x110 = (x108 + x77); x111 = (x110 >> 25); x112 = (uint32_t)(x110 & UINT32_C(0x1ffffff)); x113 = (x111 * UINT8_C(0x13)); x114 = (x76 + x113); x115 = (uint32_t)(x114 >> 26); x116 = (uint32_t)(x114 & UINT32_C(0x3ffffff)); x117 = (x115 + x88); x118 = (fiat_25519_uint1)(x117 >> 25); x119 = (x117 & UINT32_C(0x1ffffff)); x120 = (x118 + x91); out1[0] = x116; out1[1] = x119; out1[2] = x120; out1[3] = x94; out1[4] = x97; out1[5] = x100; out1[6] = x103; out1[7] = x106; out1[8] = x109; out1[9] = x112; } /* * The function fiat_25519_carry reduces a field element. * * Postconditions: * eval out1 mod m = eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint32_t x20; uint32_t x21; uint32_t x22; x1 = (arg1[0]); x2 = ((x1 >> 26) + (arg1[1])); x3 = ((x2 >> 25) + (arg1[2])); x4 = ((x3 >> 26) + (arg1[3])); x5 = ((x4 >> 25) + (arg1[4])); x6 = ((x5 >> 26) + (arg1[5])); x7 = ((x6 >> 25) + (arg1[6])); x8 = ((x7 >> 26) + (arg1[7])); x9 = ((x8 >> 25) + (arg1[8])); x10 = ((x9 >> 26) + (arg1[9])); x11 = ((x1 & UINT32_C(0x3ffffff)) + ((x10 >> 25) * UINT8_C(0x13))); x12 = ((fiat_25519_uint1)(x11 >> 26) + (x2 & UINT32_C(0x1ffffff))); x13 = (x11 & UINT32_C(0x3ffffff)); x14 = (x12 & UINT32_C(0x1ffffff)); x15 = ((fiat_25519_uint1)(x12 >> 25) + (x3 & UINT32_C(0x3ffffff))); x16 = (x4 & UINT32_C(0x1ffffff)); x17 = (x5 & UINT32_C(0x3ffffff)); x18 = (x6 & UINT32_C(0x1ffffff)); x19 = (x7 & UINT32_C(0x3ffffff)); x20 = (x8 & UINT32_C(0x1ffffff)); x21 = (x9 & UINT32_C(0x3ffffff)); x22 = (x10 & UINT32_C(0x1ffffff)); out1[0] = x13; out1[1] = x14; out1[2] = x15; out1[3] = x16; out1[4] = x17; out1[5] = x18; out1[6] = x19; out1[7] = x20; out1[8] = x21; out1[9] = x22; } /* * The function fiat_25519_add adds two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 + eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_add(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; x1 = ((arg1[0]) + (arg2[0])); x2 = ((arg1[1]) + (arg2[1])); x3 = ((arg1[2]) + (arg2[2])); x4 = ((arg1[3]) + (arg2[3])); x5 = ((arg1[4]) + (arg2[4])); x6 = ((arg1[5]) + (arg2[5])); x7 = ((arg1[6]) + (arg2[6])); x8 = ((arg1[7]) + (arg2[7])); x9 = ((arg1[8]) + (arg2[8])); x10 = ((arg1[9]) + (arg2[9])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; out1[8] = x9; out1[9] = x10; } /* * The function fiat_25519_sub subtracts two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 - eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_sub(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; x1 = ((UINT32_C(0x7ffffda) + (arg1[0])) - (arg2[0])); x2 = ((UINT32_C(0x3fffffe) + (arg1[1])) - (arg2[1])); x3 = ((UINT32_C(0x7fffffe) + (arg1[2])) - (arg2[2])); x4 = ((UINT32_C(0x3fffffe) + (arg1[3])) - (arg2[3])); x5 = ((UINT32_C(0x7fffffe) + (arg1[4])) - (arg2[4])); x6 = ((UINT32_C(0x3fffffe) + (arg1[5])) - (arg2[5])); x7 = ((UINT32_C(0x7fffffe) + (arg1[6])) - (arg2[6])); x8 = ((UINT32_C(0x3fffffe) + (arg1[7])) - (arg2[7])); x9 = ((UINT32_C(0x7fffffe) + (arg1[8])) - (arg2[8])); x10 = ((UINT32_C(0x3fffffe) + (arg1[9])) - (arg2[9])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; out1[8] = x9; out1[9] = x10; } /* * The function fiat_25519_opp negates a field element. * * Postconditions: * eval out1 mod m = -eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_opp(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; x1 = (UINT32_C(0x7ffffda) - (arg1[0])); x2 = (UINT32_C(0x3fffffe) - (arg1[1])); x3 = (UINT32_C(0x7fffffe) - (arg1[2])); x4 = (UINT32_C(0x3fffffe) - (arg1[3])); x5 = (UINT32_C(0x7fffffe) - (arg1[4])); x6 = (UINT32_C(0x3fffffe) - (arg1[5])); x7 = (UINT32_C(0x7fffffe) - (arg1[6])); x8 = (UINT32_C(0x3fffffe) - (arg1[7])); x9 = (UINT32_C(0x7fffffe) - (arg1[8])); x10 = (UINT32_C(0x3fffffe) - (arg1[9])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; out1[8] = x9; out1[9] = x10; } /* Not used in BoringSSL. */ #if 0 /* * The function fiat_25519_selectznz is a multi-limb conditional select. * * Postconditions: * eval out1 = (if arg1 = 0 then eval arg2 else eval arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * arg3: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_selectznz(uint32_t out1[10], fiat_25519_uint1 arg1, const uint32_t arg2[10], const uint32_t arg3[10]) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; fiat_25519_cmovznz_u32(&x1, arg1, (arg2[0]), (arg3[0])); fiat_25519_cmovznz_u32(&x2, arg1, (arg2[1]), (arg3[1])); fiat_25519_cmovznz_u32(&x3, arg1, (arg2[2]), (arg3[2])); fiat_25519_cmovznz_u32(&x4, arg1, (arg2[3]), (arg3[3])); fiat_25519_cmovznz_u32(&x5, arg1, (arg2[4]), (arg3[4])); fiat_25519_cmovznz_u32(&x6, arg1, (arg2[5]), (arg3[5])); fiat_25519_cmovznz_u32(&x7, arg1, (arg2[6]), (arg3[6])); fiat_25519_cmovznz_u32(&x8, arg1, (arg2[7]), (arg3[7])); fiat_25519_cmovznz_u32(&x9, arg1, (arg2[8]), (arg3[8])); fiat_25519_cmovznz_u32(&x10, arg1, (arg2[9]), (arg3[9])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; out1[8] = x9; out1[9] = x10; } #endif /* * The function fiat_25519_to_bytes serializes a field element to bytes in little-endian order. * * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_to_bytes(uint8_t out1[32], const fiat_25519_tight_field_element arg1) { uint32_t x1; fiat_25519_uint1 x2; uint32_t x3; fiat_25519_uint1 x4; uint32_t x5; fiat_25519_uint1 x6; uint32_t x7; fiat_25519_uint1 x8; uint32_t x9; fiat_25519_uint1 x10; uint32_t x11; fiat_25519_uint1 x12; uint32_t x13; fiat_25519_uint1 x14; uint32_t x15; fiat_25519_uint1 x16; uint32_t x17; fiat_25519_uint1 x18; uint32_t x19; fiat_25519_uint1 x20; uint32_t x21; uint32_t x22; fiat_25519_uint1 x23; uint32_t x24; fiat_25519_uint1 x25; uint32_t x26; fiat_25519_uint1 x27; uint32_t x28; fiat_25519_uint1 x29; uint32_t x30; fiat_25519_uint1 x31; uint32_t x32; fiat_25519_uint1 x33; uint32_t x34; fiat_25519_uint1 x35; uint32_t x36; fiat_25519_uint1 x37; uint32_t x38; fiat_25519_uint1 x39; uint32_t x40; fiat_25519_uint1 x41; uint32_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint32_t x47; uint32_t x48; uint32_t x49; uint8_t x50; uint32_t x51; uint8_t x52; uint32_t x53; uint8_t x54; uint8_t x55; uint32_t x56; uint8_t x57; uint32_t x58; uint8_t x59; uint32_t x60; uint8_t x61; uint8_t x62; uint32_t x63; uint8_t x64; uint32_t x65; uint8_t x66; uint32_t x67; uint8_t x68; uint8_t x69; uint32_t x70; uint8_t x71; uint32_t x72; uint8_t x73; uint32_t x74; uint8_t x75; uint8_t x76; uint32_t x77; uint8_t x78; uint32_t x79; uint8_t x80; uint32_t x81; uint8_t x82; uint8_t x83; uint8_t x84; uint32_t x85; uint8_t x86; uint32_t x87; uint8_t x88; fiat_25519_uint1 x89; uint32_t x90; uint8_t x91; uint32_t x92; uint8_t x93; uint32_t x94; uint8_t x95; uint8_t x96; uint32_t x97; uint8_t x98; uint32_t x99; uint8_t x100; uint32_t x101; uint8_t x102; uint8_t x103; uint32_t x104; uint8_t x105; uint32_t x106; uint8_t x107; uint32_t x108; uint8_t x109; uint8_t x110; uint32_t x111; uint8_t x112; uint32_t x113; uint8_t x114; uint32_t x115; uint8_t x116; uint8_t x117; fiat_25519_subborrowx_u26(&x1, &x2, 0x0, (arg1[0]), UINT32_C(0x3ffffed)); fiat_25519_subborrowx_u25(&x3, &x4, x2, (arg1[1]), UINT32_C(0x1ffffff)); fiat_25519_subborrowx_u26(&x5, &x6, x4, (arg1[2]), UINT32_C(0x3ffffff)); fiat_25519_subborrowx_u25(&x7, &x8, x6, (arg1[3]), UINT32_C(0x1ffffff)); fiat_25519_subborrowx_u26(&x9, &x10, x8, (arg1[4]), UINT32_C(0x3ffffff)); fiat_25519_subborrowx_u25(&x11, &x12, x10, (arg1[5]), UINT32_C(0x1ffffff)); fiat_25519_subborrowx_u26(&x13, &x14, x12, (arg1[6]), UINT32_C(0x3ffffff)); fiat_25519_subborrowx_u25(&x15, &x16, x14, (arg1[7]), UINT32_C(0x1ffffff)); fiat_25519_subborrowx_u26(&x17, &x18, x16, (arg1[8]), UINT32_C(0x3ffffff)); fiat_25519_subborrowx_u25(&x19, &x20, x18, (arg1[9]), UINT32_C(0x1ffffff)); fiat_25519_cmovznz_u32(&x21, x20, 0x0, UINT32_C(0xffffffff)); fiat_25519_addcarryx_u26(&x22, &x23, 0x0, x1, (x21 & UINT32_C(0x3ffffed))); fiat_25519_addcarryx_u25(&x24, &x25, x23, x3, (x21 & UINT32_C(0x1ffffff))); fiat_25519_addcarryx_u26(&x26, &x27, x25, x5, (x21 & UINT32_C(0x3ffffff))); fiat_25519_addcarryx_u25(&x28, &x29, x27, x7, (x21 & UINT32_C(0x1ffffff))); fiat_25519_addcarryx_u26(&x30, &x31, x29, x9, (x21 & UINT32_C(0x3ffffff))); fiat_25519_addcarryx_u25(&x32, &x33, x31, x11, (x21 & UINT32_C(0x1ffffff))); fiat_25519_addcarryx_u26(&x34, &x35, x33, x13, (x21 & UINT32_C(0x3ffffff))); fiat_25519_addcarryx_u25(&x36, &x37, x35, x15, (x21 & UINT32_C(0x1ffffff))); fiat_25519_addcarryx_u26(&x38, &x39, x37, x17, (x21 & UINT32_C(0x3ffffff))); fiat_25519_addcarryx_u25(&x40, &x41, x39, x19, (x21 & UINT32_C(0x1ffffff))); x42 = (x40 << 6); x43 = (x38 << 4); x44 = (x36 << 3); x45 = (x34 * (uint32_t)0x2); x46 = (x30 << 6); x47 = (x28 << 5); x48 = (x26 << 3); x49 = (x24 << 2); x50 = (uint8_t)(x22 & UINT8_C(0xff)); x51 = (x22 >> 8); x52 = (uint8_t)(x51 & UINT8_C(0xff)); x53 = (x51 >> 8); x54 = (uint8_t)(x53 & UINT8_C(0xff)); x55 = (uint8_t)(x53 >> 8); x56 = (x49 + (uint32_t)x55); x57 = (uint8_t)(x56 & UINT8_C(0xff)); x58 = (x56 >> 8); x59 = (uint8_t)(x58 & UINT8_C(0xff)); x60 = (x58 >> 8); x61 = (uint8_t)(x60 & UINT8_C(0xff)); x62 = (uint8_t)(x60 >> 8); x63 = (x48 + (uint32_t)x62); x64 = (uint8_t)(x63 & UINT8_C(0xff)); x65 = (x63 >> 8); x66 = (uint8_t)(x65 & UINT8_C(0xff)); x67 = (x65 >> 8); x68 = (uint8_t)(x67 & UINT8_C(0xff)); x69 = (uint8_t)(x67 >> 8); x70 = (x47 + (uint32_t)x69); x71 = (uint8_t)(x70 & UINT8_C(0xff)); x72 = (x70 >> 8); x73 = (uint8_t)(x72 & UINT8_C(0xff)); x74 = (x72 >> 8); x75 = (uint8_t)(x74 & UINT8_C(0xff)); x76 = (uint8_t)(x74 >> 8); x77 = (x46 + (uint32_t)x76); x78 = (uint8_t)(x77 & UINT8_C(0xff)); x79 = (x77 >> 8); x80 = (uint8_t)(x79 & UINT8_C(0xff)); x81 = (x79 >> 8); x82 = (uint8_t)(x81 & UINT8_C(0xff)); x83 = (uint8_t)(x81 >> 8); x84 = (uint8_t)(x32 & UINT8_C(0xff)); x85 = (x32 >> 8); x86 = (uint8_t)(x85 & UINT8_C(0xff)); x87 = (x85 >> 8); x88 = (uint8_t)(x87 & UINT8_C(0xff)); x89 = (fiat_25519_uint1)(x87 >> 8); x90 = (x45 + (uint32_t)x89); x91 = (uint8_t)(x90 & UINT8_C(0xff)); x92 = (x90 >> 8); x93 = (uint8_t)(x92 & UINT8_C(0xff)); x94 = (x92 >> 8); x95 = (uint8_t)(x94 & UINT8_C(0xff)); x96 = (uint8_t)(x94 >> 8); x97 = (x44 + (uint32_t)x96); x98 = (uint8_t)(x97 & UINT8_C(0xff)); x99 = (x97 >> 8); x100 = (uint8_t)(x99 & UINT8_C(0xff)); x101 = (x99 >> 8); x102 = (uint8_t)(x101 & UINT8_C(0xff)); x103 = (uint8_t)(x101 >> 8); x104 = (x43 + (uint32_t)x103); x105 = (uint8_t)(x104 & UINT8_C(0xff)); x106 = (x104 >> 8); x107 = (uint8_t)(x106 & UINT8_C(0xff)); x108 = (x106 >> 8); x109 = (uint8_t)(x108 & UINT8_C(0xff)); x110 = (uint8_t)(x108 >> 8); x111 = (x42 + (uint32_t)x110); x112 = (uint8_t)(x111 & UINT8_C(0xff)); x113 = (x111 >> 8); x114 = (uint8_t)(x113 & UINT8_C(0xff)); x115 = (x113 >> 8); x116 = (uint8_t)(x115 & UINT8_C(0xff)); x117 = (uint8_t)(x115 >> 8); out1[0] = x50; out1[1] = x52; out1[2] = x54; out1[3] = x57; out1[4] = x59; out1[5] = x61; out1[6] = x64; out1[7] = x66; out1[8] = x68; out1[9] = x71; out1[10] = x73; out1[11] = x75; out1[12] = x78; out1[13] = x80; out1[14] = x82; out1[15] = x83; out1[16] = x84; out1[17] = x86; out1[18] = x88; out1[19] = x91; out1[20] = x93; out1[21] = x95; out1[22] = x98; out1[23] = x100; out1[24] = x102; out1[25] = x105; out1[26] = x107; out1[27] = x109; out1[28] = x112; out1[29] = x114; out1[30] = x116; out1[31] = x117; } /* * The function fiat_25519_from_bytes deserializes a field element from bytes in little-endian order. * * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_from_bytes(fiat_25519_tight_field_element out1, const uint8_t arg1[32]) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint8_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint32_t x20; uint32_t x21; uint32_t x22; uint32_t x23; uint32_t x24; uint32_t x25; uint32_t x26; uint32_t x27; uint32_t x28; uint32_t x29; uint32_t x30; uint32_t x31; uint8_t x32; uint32_t x33; uint32_t x34; uint32_t x35; uint32_t x36; uint8_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; uint8_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint8_t x47; uint32_t x48; uint32_t x49; uint32_t x50; uint32_t x51; uint8_t x52; uint32_t x53; uint32_t x54; uint32_t x55; uint32_t x56; uint32_t x57; uint32_t x58; uint32_t x59; uint8_t x60; uint32_t x61; uint32_t x62; uint32_t x63; uint32_t x64; uint8_t x65; uint32_t x66; uint32_t x67; uint32_t x68; uint32_t x69; uint8_t x70; uint32_t x71; uint32_t x72; uint32_t x73; uint32_t x74; uint8_t x75; uint32_t x76; uint32_t x77; uint32_t x78; x1 = ((uint32_t)(arg1[31]) << 18); x2 = ((uint32_t)(arg1[30]) << 10); x3 = ((uint32_t)(arg1[29]) << 2); x4 = ((uint32_t)(arg1[28]) << 20); x5 = ((uint32_t)(arg1[27]) << 12); x6 = ((uint32_t)(arg1[26]) << 4); x7 = ((uint32_t)(arg1[25]) << 21); x8 = ((uint32_t)(arg1[24]) << 13); x9 = ((uint32_t)(arg1[23]) << 5); x10 = ((uint32_t)(arg1[22]) << 23); x11 = ((uint32_t)(arg1[21]) << 15); x12 = ((uint32_t)(arg1[20]) << 7); x13 = ((uint32_t)(arg1[19]) << 24); x14 = ((uint32_t)(arg1[18]) << 16); x15 = ((uint32_t)(arg1[17]) << 8); x16 = (arg1[16]); x17 = ((uint32_t)(arg1[15]) << 18); x18 = ((uint32_t)(arg1[14]) << 10); x19 = ((uint32_t)(arg1[13]) << 2); x20 = ((uint32_t)(arg1[12]) << 19); x21 = ((uint32_t)(arg1[11]) << 11); x22 = ((uint32_t)(arg1[10]) << 3); x23 = ((uint32_t)(arg1[9]) << 21); x24 = ((uint32_t)(arg1[8]) << 13); x25 = ((uint32_t)(arg1[7]) << 5); x26 = ((uint32_t)(arg1[6]) << 22); x27 = ((uint32_t)(arg1[5]) << 14); x28 = ((uint32_t)(arg1[4]) << 6); x29 = ((uint32_t)(arg1[3]) << 24); x30 = ((uint32_t)(arg1[2]) << 16); x31 = ((uint32_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint32_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x35 & UINT32_C(0x3ffffff)); x37 = (uint8_t)(x35 >> 26); x38 = (x28 + (uint32_t)x37); x39 = (x27 + x38); x40 = (x26 + x39); x41 = (x40 & UINT32_C(0x1ffffff)); x42 = (uint8_t)(x40 >> 25); x43 = (x25 + (uint32_t)x42); x44 = (x24 + x43); x45 = (x23 + x44); x46 = (x45 & UINT32_C(0x3ffffff)); x47 = (uint8_t)(x45 >> 26); x48 = (x22 + (uint32_t)x47); x49 = (x21 + x48); x50 = (x20 + x49); x51 = (x50 & UINT32_C(0x1ffffff)); x52 = (uint8_t)(x50 >> 25); x53 = (x19 + (uint32_t)x52); x54 = (x18 + x53); x55 = (x17 + x54); x56 = (x15 + (uint32_t)x16); x57 = (x14 + x56); x58 = (x13 + x57); x59 = (x58 & UINT32_C(0x1ffffff)); x60 = (uint8_t)(x58 >> 25); x61 = (x12 + (uint32_t)x60); x62 = (x11 + x61); x63 = (x10 + x62); x64 = (x63 & UINT32_C(0x3ffffff)); x65 = (uint8_t)(x63 >> 26); x66 = (x9 + (uint32_t)x65); x67 = (x8 + x66); x68 = (x7 + x67); x69 = (x68 & UINT32_C(0x1ffffff)); x70 = (uint8_t)(x68 >> 25); x71 = (x6 + (uint32_t)x70); x72 = (x5 + x71); x73 = (x4 + x72); x74 = (x73 & UINT32_C(0x3ffffff)); x75 = (uint8_t)(x73 >> 26); x76 = (x3 + (uint32_t)x75); x77 = (x2 + x76); x78 = (x1 + x77); out1[0] = x36; out1[1] = x41; out1[2] = x46; out1[3] = x51; out1[4] = x55; out1[5] = x59; out1[6] = x64; out1[7] = x69; out1[8] = x74; out1[9] = x78; } /* Not used in BoringSSL. */ #if 0 /* * The function fiat_25519_relax is the identity function converting from tight field elements to loose field elements. * * Postconditions: * out1 = arg1 * */ static FIAT_25519_FIAT_INLINE void fiat_25519_relax(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; x1 = (arg1[0]); x2 = (arg1[1]); x3 = (arg1[2]); x4 = (arg1[3]); x5 = (arg1[4]); x6 = (arg1[5]); x7 = (arg1[6]); x8 = (arg1[7]); x9 = (arg1[8]); x10 = (arg1[9]); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; out1[8] = x9; out1[9] = x10; } #endif /* * The function fiat_25519_carry_scmul_121666 multiplies a field element by 121666 and reduces the result. * * Postconditions: * eval out1 mod m = (121666 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_scmul_121666(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint32_t x11; uint32_t x12; uint64_t x13; uint32_t x14; uint32_t x15; uint64_t x16; uint32_t x17; uint32_t x18; uint64_t x19; uint32_t x20; uint32_t x21; uint64_t x22; uint32_t x23; uint32_t x24; uint64_t x25; uint32_t x26; uint32_t x27; uint64_t x28; uint32_t x29; uint32_t x30; uint64_t x31; uint32_t x32; uint32_t x33; uint64_t x34; uint32_t x35; uint32_t x36; uint64_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; fiat_25519_uint1 x42; uint32_t x43; uint32_t x44; fiat_25519_uint1 x45; uint32_t x46; uint32_t x47; x1 = ((uint64_t)UINT32_C(0x1db42) * (arg1[9])); x2 = ((uint64_t)UINT32_C(0x1db42) * (arg1[8])); x3 = ((uint64_t)UINT32_C(0x1db42) * (arg1[7])); x4 = ((uint64_t)UINT32_C(0x1db42) * (arg1[6])); x5 = ((uint64_t)UINT32_C(0x1db42) * (arg1[5])); x6 = ((uint64_t)UINT32_C(0x1db42) * (arg1[4])); x7 = ((uint64_t)UINT32_C(0x1db42) * (arg1[3])); x8 = ((uint64_t)UINT32_C(0x1db42) * (arg1[2])); x9 = ((uint64_t)UINT32_C(0x1db42) * (arg1[1])); x10 = ((uint64_t)UINT32_C(0x1db42) * (arg1[0])); x11 = (uint32_t)(x10 >> 26); x12 = (uint32_t)(x10 & UINT32_C(0x3ffffff)); x13 = (x11 + x9); x14 = (uint32_t)(x13 >> 25); x15 = (uint32_t)(x13 & UINT32_C(0x1ffffff)); x16 = (x14 + x8); x17 = (uint32_t)(x16 >> 26); x18 = (uint32_t)(x16 & UINT32_C(0x3ffffff)); x19 = (x17 + x7); x20 = (uint32_t)(x19 >> 25); x21 = (uint32_t)(x19 & UINT32_C(0x1ffffff)); x22 = (x20 + x6); x23 = (uint32_t)(x22 >> 26); x24 = (uint32_t)(x22 & UINT32_C(0x3ffffff)); x25 = (x23 + x5); x26 = (uint32_t)(x25 >> 25); x27 = (uint32_t)(x25 & UINT32_C(0x1ffffff)); x28 = (x26 + x4); x29 = (uint32_t)(x28 >> 26); x30 = (uint32_t)(x28 & UINT32_C(0x3ffffff)); x31 = (x29 + x3); x32 = (uint32_t)(x31 >> 25); x33 = (uint32_t)(x31 & UINT32_C(0x1ffffff)); x34 = (x32 + x2); x35 = (uint32_t)(x34 >> 26); x36 = (uint32_t)(x34 & UINT32_C(0x3ffffff)); x37 = (x35 + x1); x38 = (uint32_t)(x37 >> 25); x39 = (uint32_t)(x37 & UINT32_C(0x1ffffff)); x40 = (x38 * UINT8_C(0x13)); x41 = (x12 + x40); x42 = (fiat_25519_uint1)(x41 >> 26); x43 = (x41 & UINT32_C(0x3ffffff)); x44 = (x42 + x15); x45 = (fiat_25519_uint1)(x44 >> 25); x46 = (x44 & UINT32_C(0x1ffffff)); x47 = (x45 + x18); out1[0] = x43; out1[1] = x46; out1[2] = x47; out1[3] = x21; out1[4] = x24; out1[5] = x27; out1[6] = x30; out1[7] = x33; out1[8] = x36; out1[9] = x39; } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/curve25519_64.h ================================================ /* Autogenerated: 'src/ExtractionOCaml/unsaturated_solinas' --inline --static --use-value-barrier 25519 64 '(auto)' '2^255 - 19' carry_mul carry_square carry add sub opp selectznz to_bytes from_bytes relax carry_scmul121666 */ /* curve description: 25519 */ /* machine_wordsize = 64 (from "64") */ /* requested operations: carry_mul, carry_square, carry, add, sub, opp, selectznz, to_bytes, from_bytes, relax, carry_scmul121666 */ /* n = 5 (from "(auto)") */ /* s-c = 2^255 - [(1, 19)] (from "2^255 - 19") */ /* tight_bounds_multiplier = 1 (from "") */ /* */ /* Computed values: */ /* carry_chain = [0, 1, 2, 3, 4, 0, 1] */ /* eval z = z[0] + (z[1] << 51) + (z[2] << 102) + (z[3] << 153) + (z[4] << 204) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* balance = [0xfffffffffffda, 0xffffffffffffe, 0xffffffffffffe, 0xffffffffffffe, 0xffffffffffffe] */ #include typedef unsigned char fiat_25519_uint1; typedef signed char fiat_25519_int1; #if defined(__GNUC__) || defined(__clang__) # define FIAT_25519_FIAT_EXTENSION __extension__ # define FIAT_25519_FIAT_INLINE __inline__ #else # define FIAT_25519_FIAT_EXTENSION # define FIAT_25519_FIAT_INLINE #endif FIAT_25519_FIAT_EXTENSION typedef signed __int128 fiat_25519_int128; FIAT_25519_FIAT_EXTENSION typedef unsigned __int128 fiat_25519_uint128; /* The type fiat_25519_loose_field_element is a field element with loose bounds. */ /* Bounds: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]] */ typedef uint64_t fiat_25519_loose_field_element[5]; /* The type fiat_25519_tight_field_element is a field element with tight bounds. */ /* Bounds: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]] */ typedef uint64_t fiat_25519_tight_field_element[5]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #if !defined(FIAT_25519_NO_ASM) && (defined(__GNUC__) || defined(__clang__)) static __inline__ uint64_t fiat_25519_value_barrier_u64(uint64_t a) { __asm__("" : "+r"(a) : /* no inputs */); return a; } #else # define fiat_25519_value_barrier_u64(x) (x) #endif /* * The function fiat_25519_addcarryx_u51 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^51 * out2 = ⌊(arg1 + arg2 + arg3) / 2^51⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x7ffffffffffff] * arg3: [0x0 ~> 0x7ffffffffffff] * Output Bounds: * out1: [0x0 ~> 0x7ffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_addcarryx_u51(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { uint64_t x1; uint64_t x2; fiat_25519_uint1 x3; x1 = ((arg1 + arg2) + arg3); x2 = (x1 & UINT64_C(0x7ffffffffffff)); x3 = (fiat_25519_uint1)(x1 >> 51); *out1 = x2; *out2 = x3; } /* * The function fiat_25519_subborrowx_u51 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^51 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^51⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x7ffffffffffff] * arg3: [0x0 ~> 0x7ffffffffffff] * Output Bounds: * out1: [0x0 ~> 0x7ffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_subborrowx_u51(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { int64_t x1; fiat_25519_int1 x2; uint64_t x3; x1 = ((int64_t)(arg2 - (int64_t)arg1) - (int64_t)arg3); x2 = (fiat_25519_int1)(x1 >> 51); x3 = (x1 & UINT64_C(0x7ffffffffffff)); *out1 = x3; *out2 = (fiat_25519_uint1)(0x0 - x2); } /* * The function fiat_25519_cmovznz_u64 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_25519_FIAT_INLINE void fiat_25519_cmovznz_u64(uint64_t* out1, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_25519_uint1 x1; uint64_t x2; uint64_t x3; x1 = (!(!arg1)); x2 = ((fiat_25519_int1)(0x0 - x1) & UINT64_C(0xffffffffffffffff)); x3 = ((fiat_25519_value_barrier_u64(x2) & arg3) | (fiat_25519_value_barrier_u64((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_25519_carry_mul multiplies two field elements and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_mul(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1, const fiat_25519_loose_field_element arg2) { fiat_25519_uint128 x1; fiat_25519_uint128 x2; fiat_25519_uint128 x3; fiat_25519_uint128 x4; fiat_25519_uint128 x5; fiat_25519_uint128 x6; fiat_25519_uint128 x7; fiat_25519_uint128 x8; fiat_25519_uint128 x9; fiat_25519_uint128 x10; fiat_25519_uint128 x11; fiat_25519_uint128 x12; fiat_25519_uint128 x13; fiat_25519_uint128 x14; fiat_25519_uint128 x15; fiat_25519_uint128 x16; fiat_25519_uint128 x17; fiat_25519_uint128 x18; fiat_25519_uint128 x19; fiat_25519_uint128 x20; fiat_25519_uint128 x21; fiat_25519_uint128 x22; fiat_25519_uint128 x23; fiat_25519_uint128 x24; fiat_25519_uint128 x25; fiat_25519_uint128 x26; uint64_t x27; uint64_t x28; fiat_25519_uint128 x29; fiat_25519_uint128 x30; fiat_25519_uint128 x31; fiat_25519_uint128 x32; fiat_25519_uint128 x33; uint64_t x34; uint64_t x35; fiat_25519_uint128 x36; uint64_t x37; uint64_t x38; fiat_25519_uint128 x39; uint64_t x40; uint64_t x41; fiat_25519_uint128 x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; fiat_25519_uint1 x50; uint64_t x51; uint64_t x52; x1 = ((fiat_25519_uint128)(arg1[4]) * ((arg2[4]) * UINT8_C(0x13))); x2 = ((fiat_25519_uint128)(arg1[4]) * ((arg2[3]) * UINT8_C(0x13))); x3 = ((fiat_25519_uint128)(arg1[4]) * ((arg2[2]) * UINT8_C(0x13))); x4 = ((fiat_25519_uint128)(arg1[4]) * ((arg2[1]) * UINT8_C(0x13))); x5 = ((fiat_25519_uint128)(arg1[3]) * ((arg2[4]) * UINT8_C(0x13))); x6 = ((fiat_25519_uint128)(arg1[3]) * ((arg2[3]) * UINT8_C(0x13))); x7 = ((fiat_25519_uint128)(arg1[3]) * ((arg2[2]) * UINT8_C(0x13))); x8 = ((fiat_25519_uint128)(arg1[2]) * ((arg2[4]) * UINT8_C(0x13))); x9 = ((fiat_25519_uint128)(arg1[2]) * ((arg2[3]) * UINT8_C(0x13))); x10 = ((fiat_25519_uint128)(arg1[1]) * ((arg2[4]) * UINT8_C(0x13))); x11 = ((fiat_25519_uint128)(arg1[4]) * (arg2[0])); x12 = ((fiat_25519_uint128)(arg1[3]) * (arg2[1])); x13 = ((fiat_25519_uint128)(arg1[3]) * (arg2[0])); x14 = ((fiat_25519_uint128)(arg1[2]) * (arg2[2])); x15 = ((fiat_25519_uint128)(arg1[2]) * (arg2[1])); x16 = ((fiat_25519_uint128)(arg1[2]) * (arg2[0])); x17 = ((fiat_25519_uint128)(arg1[1]) * (arg2[3])); x18 = ((fiat_25519_uint128)(arg1[1]) * (arg2[2])); x19 = ((fiat_25519_uint128)(arg1[1]) * (arg2[1])); x20 = ((fiat_25519_uint128)(arg1[1]) * (arg2[0])); x21 = ((fiat_25519_uint128)(arg1[0]) * (arg2[4])); x22 = ((fiat_25519_uint128)(arg1[0]) * (arg2[3])); x23 = ((fiat_25519_uint128)(arg1[0]) * (arg2[2])); x24 = ((fiat_25519_uint128)(arg1[0]) * (arg2[1])); x25 = ((fiat_25519_uint128)(arg1[0]) * (arg2[0])); x26 = (x25 + (x10 + (x9 + (x7 + x4)))); x27 = (uint64_t)(x26 >> 51); x28 = (uint64_t)(x26 & UINT64_C(0x7ffffffffffff)); x29 = (x21 + (x17 + (x14 + (x12 + x11)))); x30 = (x22 + (x18 + (x15 + (x13 + x1)))); x31 = (x23 + (x19 + (x16 + (x5 + x2)))); x32 = (x24 + (x20 + (x8 + (x6 + x3)))); x33 = (x27 + x32); x34 = (uint64_t)(x33 >> 51); x35 = (uint64_t)(x33 & UINT64_C(0x7ffffffffffff)); x36 = (x34 + x31); x37 = (uint64_t)(x36 >> 51); x38 = (uint64_t)(x36 & UINT64_C(0x7ffffffffffff)); x39 = (x37 + x30); x40 = (uint64_t)(x39 >> 51); x41 = (uint64_t)(x39 & UINT64_C(0x7ffffffffffff)); x42 = (x40 + x29); x43 = (uint64_t)(x42 >> 51); x44 = (uint64_t)(x42 & UINT64_C(0x7ffffffffffff)); x45 = (x43 * UINT8_C(0x13)); x46 = (x28 + x45); x47 = (x46 >> 51); x48 = (x46 & UINT64_C(0x7ffffffffffff)); x49 = (x47 + x35); x50 = (fiat_25519_uint1)(x49 >> 51); x51 = (x49 & UINT64_C(0x7ffffffffffff)); x52 = (x50 + x38); out1[0] = x48; out1[1] = x51; out1[2] = x52; out1[3] = x41; out1[4] = x44; } /* * The function fiat_25519_carry_square squares a field element and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_square(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; fiat_25519_uint128 x9; fiat_25519_uint128 x10; fiat_25519_uint128 x11; fiat_25519_uint128 x12; fiat_25519_uint128 x13; fiat_25519_uint128 x14; fiat_25519_uint128 x15; fiat_25519_uint128 x16; fiat_25519_uint128 x17; fiat_25519_uint128 x18; fiat_25519_uint128 x19; fiat_25519_uint128 x20; fiat_25519_uint128 x21; fiat_25519_uint128 x22; fiat_25519_uint128 x23; fiat_25519_uint128 x24; uint64_t x25; uint64_t x26; fiat_25519_uint128 x27; fiat_25519_uint128 x28; fiat_25519_uint128 x29; fiat_25519_uint128 x30; fiat_25519_uint128 x31; uint64_t x32; uint64_t x33; fiat_25519_uint128 x34; uint64_t x35; uint64_t x36; fiat_25519_uint128 x37; uint64_t x38; uint64_t x39; fiat_25519_uint128 x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; fiat_25519_uint1 x48; uint64_t x49; uint64_t x50; x1 = ((arg1[4]) * UINT8_C(0x13)); x2 = (x1 * 0x2); x3 = ((arg1[4]) * 0x2); x4 = ((arg1[3]) * UINT8_C(0x13)); x5 = (x4 * 0x2); x6 = ((arg1[3]) * 0x2); x7 = ((arg1[2]) * 0x2); x8 = ((arg1[1]) * 0x2); x9 = ((fiat_25519_uint128)(arg1[4]) * x1); x10 = ((fiat_25519_uint128)(arg1[3]) * x2); x11 = ((fiat_25519_uint128)(arg1[3]) * x4); x12 = ((fiat_25519_uint128)(arg1[2]) * x2); x13 = ((fiat_25519_uint128)(arg1[2]) * x5); x14 = ((fiat_25519_uint128)(arg1[2]) * (arg1[2])); x15 = ((fiat_25519_uint128)(arg1[1]) * x2); x16 = ((fiat_25519_uint128)(arg1[1]) * x6); x17 = ((fiat_25519_uint128)(arg1[1]) * x7); x18 = ((fiat_25519_uint128)(arg1[1]) * (arg1[1])); x19 = ((fiat_25519_uint128)(arg1[0]) * x3); x20 = ((fiat_25519_uint128)(arg1[0]) * x6); x21 = ((fiat_25519_uint128)(arg1[0]) * x7); x22 = ((fiat_25519_uint128)(arg1[0]) * x8); x23 = ((fiat_25519_uint128)(arg1[0]) * (arg1[0])); x24 = (x23 + (x15 + x13)); x25 = (uint64_t)(x24 >> 51); x26 = (uint64_t)(x24 & UINT64_C(0x7ffffffffffff)); x27 = (x19 + (x16 + x14)); x28 = (x20 + (x17 + x9)); x29 = (x21 + (x18 + x10)); x30 = (x22 + (x12 + x11)); x31 = (x25 + x30); x32 = (uint64_t)(x31 >> 51); x33 = (uint64_t)(x31 & UINT64_C(0x7ffffffffffff)); x34 = (x32 + x29); x35 = (uint64_t)(x34 >> 51); x36 = (uint64_t)(x34 & UINT64_C(0x7ffffffffffff)); x37 = (x35 + x28); x38 = (uint64_t)(x37 >> 51); x39 = (uint64_t)(x37 & UINT64_C(0x7ffffffffffff)); x40 = (x38 + x27); x41 = (uint64_t)(x40 >> 51); x42 = (uint64_t)(x40 & UINT64_C(0x7ffffffffffff)); x43 = (x41 * UINT8_C(0x13)); x44 = (x26 + x43); x45 = (x44 >> 51); x46 = (x44 & UINT64_C(0x7ffffffffffff)); x47 = (x45 + x33); x48 = (fiat_25519_uint1)(x47 >> 51); x49 = (x47 & UINT64_C(0x7ffffffffffff)); x50 = (x48 + x36); out1[0] = x46; out1[1] = x49; out1[2] = x50; out1[3] = x39; out1[4] = x42; } /* * The function fiat_25519_carry reduces a field element. * * Postconditions: * eval out1 mod m = eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; x1 = (arg1[0]); x2 = ((x1 >> 51) + (arg1[1])); x3 = ((x2 >> 51) + (arg1[2])); x4 = ((x3 >> 51) + (arg1[3])); x5 = ((x4 >> 51) + (arg1[4])); x6 = ((x1 & UINT64_C(0x7ffffffffffff)) + ((x5 >> 51) * UINT8_C(0x13))); x7 = ((fiat_25519_uint1)(x6 >> 51) + (x2 & UINT64_C(0x7ffffffffffff))); x8 = (x6 & UINT64_C(0x7ffffffffffff)); x9 = (x7 & UINT64_C(0x7ffffffffffff)); x10 = ((fiat_25519_uint1)(x7 >> 51) + (x3 & UINT64_C(0x7ffffffffffff))); x11 = (x4 & UINT64_C(0x7ffffffffffff)); x12 = (x5 & UINT64_C(0x7ffffffffffff)); out1[0] = x8; out1[1] = x9; out1[2] = x10; out1[3] = x11; out1[4] = x12; } /* * The function fiat_25519_add adds two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 + eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_add(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = ((arg1[0]) + (arg2[0])); x2 = ((arg1[1]) + (arg2[1])); x3 = ((arg1[2]) + (arg2[2])); x4 = ((arg1[3]) + (arg2[3])); x5 = ((arg1[4]) + (arg2[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_sub subtracts two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 - eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_sub(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = ((UINT64_C(0xfffffffffffda) + (arg1[0])) - (arg2[0])); x2 = ((UINT64_C(0xffffffffffffe) + (arg1[1])) - (arg2[1])); x3 = ((UINT64_C(0xffffffffffffe) + (arg1[2])) - (arg2[2])); x4 = ((UINT64_C(0xffffffffffffe) + (arg1[3])) - (arg2[3])); x5 = ((UINT64_C(0xffffffffffffe) + (arg1[4])) - (arg2[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_opp negates a field element. * * Postconditions: * eval out1 mod m = -eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_opp(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = (UINT64_C(0xfffffffffffda) - (arg1[0])); x2 = (UINT64_C(0xffffffffffffe) - (arg1[1])); x3 = (UINT64_C(0xffffffffffffe) - (arg1[2])); x4 = (UINT64_C(0xffffffffffffe) - (arg1[3])); x5 = (UINT64_C(0xffffffffffffe) - (arg1[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* Not used in BoringSSL. */ #if 0 /* * The function fiat_25519_selectznz is a multi-limb conditional select. * * Postconditions: * eval out1 = (if arg1 = 0 then eval arg2 else eval arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_selectznz(uint64_t out1[5], fiat_25519_uint1 arg1, const uint64_t arg2[5], const uint64_t arg3[5]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; fiat_25519_cmovznz_u64(&x1, arg1, (arg2[0]), (arg3[0])); fiat_25519_cmovznz_u64(&x2, arg1, (arg2[1]), (arg3[1])); fiat_25519_cmovznz_u64(&x3, arg1, (arg2[2]), (arg3[2])); fiat_25519_cmovznz_u64(&x4, arg1, (arg2[3]), (arg3[3])); fiat_25519_cmovznz_u64(&x5, arg1, (arg2[4]), (arg3[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } #endif /* * The function fiat_25519_to_bytes serializes a field element to bytes in little-endian order. * * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_to_bytes(uint8_t out1[32], const fiat_25519_tight_field_element arg1) { uint64_t x1; fiat_25519_uint1 x2; uint64_t x3; fiat_25519_uint1 x4; uint64_t x5; fiat_25519_uint1 x6; uint64_t x7; fiat_25519_uint1 x8; uint64_t x9; fiat_25519_uint1 x10; uint64_t x11; uint64_t x12; fiat_25519_uint1 x13; uint64_t x14; fiat_25519_uint1 x15; uint64_t x16; fiat_25519_uint1 x17; uint64_t x18; fiat_25519_uint1 x19; uint64_t x20; fiat_25519_uint1 x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint8_t x26; uint64_t x27; uint8_t x28; uint64_t x29; uint8_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint8_t x34; uint64_t x35; uint8_t x36; uint8_t x37; uint64_t x38; uint8_t x39; uint64_t x40; uint8_t x41; uint64_t x42; uint8_t x43; uint64_t x44; uint8_t x45; uint64_t x46; uint8_t x47; uint64_t x48; uint8_t x49; uint8_t x50; uint64_t x51; uint8_t x52; uint64_t x53; uint8_t x54; uint64_t x55; uint8_t x56; uint64_t x57; uint8_t x58; uint64_t x59; uint8_t x60; uint64_t x61; uint8_t x62; uint64_t x63; uint8_t x64; fiat_25519_uint1 x65; uint64_t x66; uint8_t x67; uint64_t x68; uint8_t x69; uint64_t x70; uint8_t x71; uint64_t x72; uint8_t x73; uint64_t x74; uint8_t x75; uint64_t x76; uint8_t x77; uint8_t x78; uint64_t x79; uint8_t x80; uint64_t x81; uint8_t x82; uint64_t x83; uint8_t x84; uint64_t x85; uint8_t x86; uint64_t x87; uint8_t x88; uint64_t x89; uint8_t x90; uint8_t x91; fiat_25519_subborrowx_u51(&x1, &x2, 0x0, (arg1[0]), UINT64_C(0x7ffffffffffed)); fiat_25519_subborrowx_u51(&x3, &x4, x2, (arg1[1]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x5, &x6, x4, (arg1[2]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x7, &x8, x6, (arg1[3]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x9, &x10, x8, (arg1[4]), UINT64_C(0x7ffffffffffff)); fiat_25519_cmovznz_u64(&x11, x10, 0x0, UINT64_C(0xffffffffffffffff)); fiat_25519_addcarryx_u51(&x12, &x13, 0x0, x1, (x11 & UINT64_C(0x7ffffffffffed))); fiat_25519_addcarryx_u51(&x14, &x15, x13, x3, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x16, &x17, x15, x5, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x18, &x19, x17, x7, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x20, &x21, x19, x9, (x11 & UINT64_C(0x7ffffffffffff))); x22 = (x20 << 4); x23 = (x18 * (uint64_t)0x2); x24 = (x16 << 6); x25 = (x14 << 3); x26 = (uint8_t)(x12 & UINT8_C(0xff)); x27 = (x12 >> 8); x28 = (uint8_t)(x27 & UINT8_C(0xff)); x29 = (x27 >> 8); x30 = (uint8_t)(x29 & UINT8_C(0xff)); x31 = (x29 >> 8); x32 = (uint8_t)(x31 & UINT8_C(0xff)); x33 = (x31 >> 8); x34 = (uint8_t)(x33 & UINT8_C(0xff)); x35 = (x33 >> 8); x36 = (uint8_t)(x35 & UINT8_C(0xff)); x37 = (uint8_t)(x35 >> 8); x38 = (x25 + (uint64_t)x37); x39 = (uint8_t)(x38 & UINT8_C(0xff)); x40 = (x38 >> 8); x41 = (uint8_t)(x40 & UINT8_C(0xff)); x42 = (x40 >> 8); x43 = (uint8_t)(x42 & UINT8_C(0xff)); x44 = (x42 >> 8); x45 = (uint8_t)(x44 & UINT8_C(0xff)); x46 = (x44 >> 8); x47 = (uint8_t)(x46 & UINT8_C(0xff)); x48 = (x46 >> 8); x49 = (uint8_t)(x48 & UINT8_C(0xff)); x50 = (uint8_t)(x48 >> 8); x51 = (x24 + (uint64_t)x50); x52 = (uint8_t)(x51 & UINT8_C(0xff)); x53 = (x51 >> 8); x54 = (uint8_t)(x53 & UINT8_C(0xff)); x55 = (x53 >> 8); x56 = (uint8_t)(x55 & UINT8_C(0xff)); x57 = (x55 >> 8); x58 = (uint8_t)(x57 & UINT8_C(0xff)); x59 = (x57 >> 8); x60 = (uint8_t)(x59 & UINT8_C(0xff)); x61 = (x59 >> 8); x62 = (uint8_t)(x61 & UINT8_C(0xff)); x63 = (x61 >> 8); x64 = (uint8_t)(x63 & UINT8_C(0xff)); x65 = (fiat_25519_uint1)(x63 >> 8); x66 = (x23 + (uint64_t)x65); x67 = (uint8_t)(x66 & UINT8_C(0xff)); x68 = (x66 >> 8); x69 = (uint8_t)(x68 & UINT8_C(0xff)); x70 = (x68 >> 8); x71 = (uint8_t)(x70 & UINT8_C(0xff)); x72 = (x70 >> 8); x73 = (uint8_t)(x72 & UINT8_C(0xff)); x74 = (x72 >> 8); x75 = (uint8_t)(x74 & UINT8_C(0xff)); x76 = (x74 >> 8); x77 = (uint8_t)(x76 & UINT8_C(0xff)); x78 = (uint8_t)(x76 >> 8); x79 = (x22 + (uint64_t)x78); x80 = (uint8_t)(x79 & UINT8_C(0xff)); x81 = (x79 >> 8); x82 = (uint8_t)(x81 & UINT8_C(0xff)); x83 = (x81 >> 8); x84 = (uint8_t)(x83 & UINT8_C(0xff)); x85 = (x83 >> 8); x86 = (uint8_t)(x85 & UINT8_C(0xff)); x87 = (x85 >> 8); x88 = (uint8_t)(x87 & UINT8_C(0xff)); x89 = (x87 >> 8); x90 = (uint8_t)(x89 & UINT8_C(0xff)); x91 = (uint8_t)(x89 >> 8); out1[0] = x26; out1[1] = x28; out1[2] = x30; out1[3] = x32; out1[4] = x34; out1[5] = x36; out1[6] = x39; out1[7] = x41; out1[8] = x43; out1[9] = x45; out1[10] = x47; out1[11] = x49; out1[12] = x52; out1[13] = x54; out1[14] = x56; out1[15] = x58; out1[16] = x60; out1[17] = x62; out1[18] = x64; out1[19] = x67; out1[20] = x69; out1[21] = x71; out1[22] = x73; out1[23] = x75; out1[24] = x77; out1[25] = x80; out1[26] = x82; out1[27] = x84; out1[28] = x86; out1[29] = x88; out1[30] = x90; out1[31] = x91; } /* * The function fiat_25519_from_bytes deserializes a field element from bytes in little-endian order. * * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_from_bytes(fiat_25519_tight_field_element out1, const uint8_t arg1[32]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint8_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint8_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint8_t x57; uint64_t x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint8_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; uint64_t x71; x1 = ((uint64_t)(arg1[31]) << 44); x2 = ((uint64_t)(arg1[30]) << 36); x3 = ((uint64_t)(arg1[29]) << 28); x4 = ((uint64_t)(arg1[28]) << 20); x5 = ((uint64_t)(arg1[27]) << 12); x6 = ((uint64_t)(arg1[26]) << 4); x7 = ((uint64_t)(arg1[25]) << 47); x8 = ((uint64_t)(arg1[24]) << 39); x9 = ((uint64_t)(arg1[23]) << 31); x10 = ((uint64_t)(arg1[22]) << 23); x11 = ((uint64_t)(arg1[21]) << 15); x12 = ((uint64_t)(arg1[20]) << 7); x13 = ((uint64_t)(arg1[19]) << 50); x14 = ((uint64_t)(arg1[18]) << 42); x15 = ((uint64_t)(arg1[17]) << 34); x16 = ((uint64_t)(arg1[16]) << 26); x17 = ((uint64_t)(arg1[15]) << 18); x18 = ((uint64_t)(arg1[14]) << 10); x19 = ((uint64_t)(arg1[13]) << 2); x20 = ((uint64_t)(arg1[12]) << 45); x21 = ((uint64_t)(arg1[11]) << 37); x22 = ((uint64_t)(arg1[10]) << 29); x23 = ((uint64_t)(arg1[9]) << 21); x24 = ((uint64_t)(arg1[8]) << 13); x25 = ((uint64_t)(arg1[7]) << 5); x26 = ((uint64_t)(arg1[6]) << 48); x27 = ((uint64_t)(arg1[5]) << 40); x28 = ((uint64_t)(arg1[4]) << 32); x29 = ((uint64_t)(arg1[3]) << 24); x30 = ((uint64_t)(arg1[2]) << 16); x31 = ((uint64_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint64_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x28 + x35); x37 = (x27 + x36); x38 = (x26 + x37); x39 = (x38 & UINT64_C(0x7ffffffffffff)); x40 = (uint8_t)(x38 >> 51); x41 = (x25 + (uint64_t)x40); x42 = (x24 + x41); x43 = (x23 + x42); x44 = (x22 + x43); x45 = (x21 + x44); x46 = (x20 + x45); x47 = (x46 & UINT64_C(0x7ffffffffffff)); x48 = (uint8_t)(x46 >> 51); x49 = (x19 + (uint64_t)x48); x50 = (x18 + x49); x51 = (x17 + x50); x52 = (x16 + x51); x53 = (x15 + x52); x54 = (x14 + x53); x55 = (x13 + x54); x56 = (x55 & UINT64_C(0x7ffffffffffff)); x57 = (uint8_t)(x55 >> 51); x58 = (x12 + (uint64_t)x57); x59 = (x11 + x58); x60 = (x10 + x59); x61 = (x9 + x60); x62 = (x8 + x61); x63 = (x7 + x62); x64 = (x63 & UINT64_C(0x7ffffffffffff)); x65 = (uint8_t)(x63 >> 51); x66 = (x6 + (uint64_t)x65); x67 = (x5 + x66); x68 = (x4 + x67); x69 = (x3 + x68); x70 = (x2 + x69); x71 = (x1 + x70); out1[0] = x39; out1[1] = x47; out1[2] = x56; out1[3] = x64; out1[4] = x71; } /* Not used in BoringSSL. */ #if 0 /* * The function fiat_25519_relax is the identity function converting from tight field elements to loose field elements. * * Postconditions: * out1 = arg1 * */ static FIAT_25519_FIAT_INLINE void fiat_25519_relax(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = (arg1[0]); x2 = (arg1[1]); x3 = (arg1[2]); x4 = (arg1[3]); x5 = (arg1[4]); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } #endif /* * The function fiat_25519_carry_scmul_121666 multiplies a field element by 121666 and reduces the result. * * Postconditions: * eval out1 mod m = (121666 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_scmul_121666(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { fiat_25519_uint128 x1; fiat_25519_uint128 x2; fiat_25519_uint128 x3; fiat_25519_uint128 x4; fiat_25519_uint128 x5; uint64_t x6; uint64_t x7; fiat_25519_uint128 x8; uint64_t x9; uint64_t x10; fiat_25519_uint128 x11; uint64_t x12; uint64_t x13; fiat_25519_uint128 x14; uint64_t x15; uint64_t x16; fiat_25519_uint128 x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; fiat_25519_uint1 x22; uint64_t x23; uint64_t x24; fiat_25519_uint1 x25; uint64_t x26; uint64_t x27; x1 = ((fiat_25519_uint128)UINT32_C(0x1db42) * (arg1[4])); x2 = ((fiat_25519_uint128)UINT32_C(0x1db42) * (arg1[3])); x3 = ((fiat_25519_uint128)UINT32_C(0x1db42) * (arg1[2])); x4 = ((fiat_25519_uint128)UINT32_C(0x1db42) * (arg1[1])); x5 = ((fiat_25519_uint128)UINT32_C(0x1db42) * (arg1[0])); x6 = (uint64_t)(x5 >> 51); x7 = (uint64_t)(x5 & UINT64_C(0x7ffffffffffff)); x8 = (x6 + x4); x9 = (uint64_t)(x8 >> 51); x10 = (uint64_t)(x8 & UINT64_C(0x7ffffffffffff)); x11 = (x9 + x3); x12 = (uint64_t)(x11 >> 51); x13 = (uint64_t)(x11 & UINT64_C(0x7ffffffffffff)); x14 = (x12 + x2); x15 = (uint64_t)(x14 >> 51); x16 = (uint64_t)(x14 & UINT64_C(0x7ffffffffffff)); x17 = (x15 + x1); x18 = (uint64_t)(x17 >> 51); x19 = (uint64_t)(x17 & UINT64_C(0x7ffffffffffff)); x20 = (x18 * UINT8_C(0x13)); x21 = (x7 + x20); x22 = (fiat_25519_uint1)(x21 >> 51); x23 = (x21 & UINT64_C(0x7ffffffffffff)); x24 = (x22 + x10); x25 = (fiat_25519_uint1)(x24 >> 51); x26 = (x24 & UINT64_C(0x7ffffffffffff)); x27 = (x25 + x13); out1[0] = x23; out1[1] = x26; out1[2] = x27; out1[3] = x16; out1[4] = x19; } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/curve25519_64_adx.h ================================================ #include #include "../../crypto/internal.h" #include #include #include typedef uint64_t fe4[4]; typedef uint8_t fiat_uint1; typedef int8_t fiat_int1; static __inline__ uint64_t fiat_value_barrier_u64(uint64_t a) { __asm__("" : "+r"(a) : /* no inputs */); return a; } __attribute__((target("adx,bmi2"))) static inline void fe4_mul(fe4 out, const fe4 x, const fe4 y) { fiat_curve25519_adx_mul(out, x, y); } __attribute__((target("adx,bmi2"))) static inline void fe4_sq(fe4 out, const fe4 x) { fiat_curve25519_adx_square(out, x); } /* * The function fiat_mulx_u64 is a multiplication, returning the full double-width result. * * Postconditions: * out1 = (arg1 * arg2) mod 2^64 * out2 = ⌊arg1 * arg2 / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0xffffffffffffffff] */ __attribute__((target("adx,bmi2"))) static inline void fiat_mulx_u64(uint64_t* out1, uint64_t* out2, uint64_t arg1, uint64_t arg2) { // NOTE: edited after generation #if defined(_M_X64) unsigned long long t; *out1 = _umul128(arg1, arg2, &t); *out2 = t; #elif defined(_M_ARM64) *out1 = arg1 * arg2; *out2 = __umulh(arg1, arg2); #else unsigned __int128 t = (unsigned __int128)arg1 * arg2; *out1 = t; *out2 = (t >> 64); #endif } /* * The function fiat_addcarryx_u64 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^64 * out2 = ⌊(arg1 + arg2 + arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ __attribute__((target("adx,bmi2"))) static inline void fiat_addcarryx_u64(uint64_t* out1, fiat_uint1* out2, fiat_uint1 arg1, uint64_t arg2, uint64_t arg3) { // NOTE: edited after generation #if defined(__has_builtin) # if __has_builtin(__builtin_ia32_addcarryx_u64) # define addcarry64 __builtin_ia32_addcarryx_u64 # endif #endif #if defined(addcarry64) long long unsigned int t; *out2 = addcarry64(arg1, arg2, arg3, &t); *out1 = t; #elif defined(_M_X64) long long unsigned int t; *out2 = _addcarry_u64(arg1, arg2, arg3, out1); *out1 = t; #else arg2 += arg1; arg1 = arg2 < arg1; uint64_t ret = arg2 + arg3; arg1 += ret < arg2; *out1 = ret; *out2 = arg1; #endif #undef addcarry64 } /* * The function fiat_subborrowx_u64 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^64 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ __attribute__((target("adx,bmi2"))) static inline void fiat_subborrowx_u64(uint64_t* out1, fiat_uint1* out2, fiat_uint1 arg1, uint64_t arg2, uint64_t arg3) { #if defined(__has_builtin) # if __has_builtin(__builtin_ia32_subborrow_u64) # define subborrow64 __builtin_ia32_subborrow_u64 # endif #endif #if defined(subborrow64) long long unsigned int t; *out2 = subborrow64(arg1, arg2, arg3, &t); *out1 = t; #elif defined(_M_X64) long long unsigned int t; *out2 = _subborrow_u64(arg1, arg2, arg3, &t); // NOTE: edited after generation *out1 = t; #else *out1 = arg2 - arg3 - arg1; *out2 = (arg2 < arg3) | ((arg2 == arg3) & arg1); #endif #undef subborrow64 } /* * The function fiat_cmovznz_u64 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ __attribute__((target("adx,bmi2"))) static inline void fiat_cmovznz_u64(uint64_t* out1, fiat_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_uint1 x1; uint64_t x2; uint64_t x3; x1 = (!(!arg1)); x2 = ((fiat_int1)(0x0 - x1) & UINT64_C(0xffffffffffffffff)); x3 = ((fiat_value_barrier_u64(x2) & arg3) | (fiat_value_barrier_u64((~x2)) & arg2)); *out1 = x3; } /* * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ __attribute__((target("adx,bmi2"))) static void fe4_add(uint64_t out1[4], const uint64_t arg1[4], const uint64_t arg2[4]) { uint64_t x1; fiat_uint1 x2; uint64_t x3; fiat_uint1 x4; uint64_t x5; fiat_uint1 x6; uint64_t x7; fiat_uint1 x8; uint64_t x9; uint64_t x10; fiat_uint1 x11; uint64_t x12; fiat_uint1 x13; uint64_t x14; fiat_uint1 x15; uint64_t x16; fiat_uint1 x17; uint64_t x18; uint64_t x19; fiat_uint1 x20; fiat_addcarryx_u64(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_addcarryx_u64(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_addcarryx_u64(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_addcarryx_u64(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_cmovznz_u64(&x9, x8, 0x0, UINT8_C(0x26)); // NOTE: clang 14 for Zen 2 uses sbb, and fiat_addcarryx_u64(&x10, &x11, 0x0, x1, x9); fiat_addcarryx_u64(&x12, &x13, x11, x3, 0x0); fiat_addcarryx_u64(&x14, &x15, x13, x5, 0x0); fiat_addcarryx_u64(&x16, &x17, x15, x7, 0x0); fiat_cmovznz_u64(&x18, x17, 0x0, UINT8_C(0x26)); // NOTE: clang 14 for Zen 2 uses sbb, and fiat_addcarryx_u64(&x19, &x20, 0x0, x10, x18); out1[0] = x19; out1[1] = x12; out1[2] = x14; out1[3] = x16; } /* * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ __attribute__((target("adx,bmi2"))) static void fe4_sub(uint64_t out1[4], const uint64_t arg1[4], const uint64_t arg2[4]) { uint64_t x1; uint64_t x2; fiat_uint1 x3; uint64_t x4; uint64_t x5; fiat_uint1 x6; uint64_t x7; uint64_t x8; fiat_uint1 x9; uint64_t x10; uint64_t x11; fiat_uint1 x12; uint64_t x13; uint64_t x14; fiat_uint1 x15; uint64_t x16; fiat_uint1 x17; uint64_t x18; fiat_uint1 x19; uint64_t x20; fiat_uint1 x21; uint64_t x22; uint64_t x23; fiat_uint1 x24; x1 = (arg2[0]); fiat_subborrowx_u64(&x2, &x3, 0x0, (arg1[0]), x1); x4 = (arg2[1]); fiat_subborrowx_u64(&x5, &x6, x3, (arg1[1]), x4); x7 = (arg2[2]); fiat_subborrowx_u64(&x8, &x9, x6, (arg1[2]), x7); x10 = (arg2[3]); fiat_subborrowx_u64(&x11, &x12, x9, (arg1[3]), x10); fiat_cmovznz_u64(&x13, x12, 0x0, UINT8_C(0x26)); // NOTE: clang 14 for Zen 2 uses sbb, and fiat_subborrowx_u64(&x14, &x15, 0x0, x2, x13); fiat_subborrowx_u64(&x16, &x17, x15, x5, 0x0); fiat_subborrowx_u64(&x18, &x19, x17, x8, 0x0); fiat_subborrowx_u64(&x20, &x21, x19, x11, 0x0); fiat_cmovznz_u64(&x22, x21, 0x0, UINT8_C(0x26)); // NOTE: clang 14 for Zen 2 uses sbb, and fiat_subborrowx_u64(&x23, &x24, 0x0, x14, x22); out1[0] = x23; out1[1] = x16; out1[2] = x18; out1[3] = x20; } /* * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg2: [0x0 ~> 0x3ffffffffffffff] // NOTE: this is not any uint64! * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ __attribute__((target("adx,bmi2"))) static void fe4_scmul(uint64_t out1[4], const uint64_t arg1[4], uint64_t arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; fiat_uint1 x6; uint64_t x7; uint64_t x8; uint64_t x9; fiat_uint1 x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_uint1 x14; uint64_t x15; uint64_t x16; uint64_t x17; fiat_uint1 x18; uint64_t x19; fiat_uint1 x20; uint64_t x21; fiat_uint1 x22; uint64_t x23; fiat_uint1 x24; uint64_t x25; uint64_t x26; fiat_uint1 x27; fiat_mulx_u64(&x1, &x2, (arg1[0]), arg2); fiat_mulx_u64(&x3, &x4, (arg1[1]), arg2); fiat_addcarryx_u64(&x5, &x6, 0x0, x2, x3); fiat_mulx_u64(&x7, &x8, (arg1[2]), arg2); fiat_addcarryx_u64(&x9, &x10, x6, x4, x7); fiat_mulx_u64(&x11, &x12, (arg1[3]), arg2); fiat_addcarryx_u64(&x13, &x14, x10, x8, x11); fiat_mulx_u64(&x15, &x16, (x12 + (uint64_t)x14), UINT8_C(0x26)); fiat_addcarryx_u64(&x17, &x18, 0x0, x1, x15); fiat_addcarryx_u64(&x19, &x20, x18, x5, 0x0); fiat_addcarryx_u64(&x21, &x22, x20, x9, 0x0); fiat_addcarryx_u64(&x23, &x24, x22, x13, 0x0); fiat_cmovznz_u64(&x25, x24, 0x0, UINT8_C(0x26)); // NOTE: clang 14 for Zen 2 uses sbb, and fiat_addcarryx_u64(&x26, &x27, 0x0, x17, x25); out1[0] = x26; out1[1] = x19; out1[2] = x21; out1[3] = x23; } /* * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ __attribute__((target("adx,bmi2"))) static void fe4_canon(uint64_t out1[4], const uint64_t arg1[4]) { uint64_t x1; fiat_uint1 x2; uint64_t x3; fiat_uint1 x4; uint64_t x5; fiat_uint1 x6; uint64_t x7; fiat_uint1 x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_uint1 x14; uint64_t x15; fiat_uint1 x16; uint64_t x17; fiat_uint1 x18; uint64_t x19; fiat_uint1 x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; fiat_subborrowx_u64(&x1, &x2, 0x0, (arg1[0]), UINT64_C(0xffffffffffffffed)); fiat_subborrowx_u64(&x3, &x4, x2, (arg1[1]), UINT64_C(0xffffffffffffffff)); fiat_subborrowx_u64(&x5, &x6, x4, (arg1[2]), UINT64_C(0xffffffffffffffff)); fiat_subborrowx_u64(&x7, &x8, x6, (arg1[3]), UINT64_C(0x7fffffffffffffff)); fiat_cmovznz_u64(&x9, x8, x1, (arg1[0])); fiat_cmovznz_u64(&x10, x8, x3, (arg1[1])); fiat_cmovznz_u64(&x11, x8, x5, (arg1[2])); fiat_cmovznz_u64(&x12, x8, x7, (arg1[3])); fiat_subborrowx_u64(&x13, &x14, 0x0, x9, UINT64_C(0xffffffffffffffed)); fiat_subborrowx_u64(&x15, &x16, x14, x10, UINT64_C(0xffffffffffffffff)); fiat_subborrowx_u64(&x17, &x18, x16, x11, UINT64_C(0xffffffffffffffff)); fiat_subborrowx_u64(&x19, &x20, x18, x12, UINT64_C(0x7fffffffffffffff)); fiat_cmovznz_u64(&x21, x20, x13, x9); fiat_cmovznz_u64(&x22, x20, x15, x10); fiat_cmovznz_u64(&x23, x20, x17, x11); fiat_cmovznz_u64(&x24, x20, x19, x12); out1[0] = x21; out1[1] = x22; out1[2] = x23; out1[3] = x24; } /* * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ __attribute__((target("adx,bmi2"))) static void fe4_cswap(uint64_t out1[4], uint64_t out2[4], fiat_uint1 arg1, const uint64_t arg2[4], const uint64_t arg3[4]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; // NOTE: clang 14 for Zen 2 uses YMM registers fiat_cmovznz_u64(&x1, arg1, (arg2[0]), (arg3[0])); fiat_cmovznz_u64(&x2, arg1, (arg2[1]), (arg3[1])); fiat_cmovznz_u64(&x3, arg1, (arg2[2]), (arg3[2])); fiat_cmovznz_u64(&x4, arg1, (arg2[3]), (arg3[3])); fiat_cmovznz_u64(&x5, arg1, (arg3[0]), (arg2[0])); fiat_cmovznz_u64(&x6, arg1, (arg3[1]), (arg2[1])); fiat_cmovznz_u64(&x7, arg1, (arg3[2]), (arg2[2])); fiat_cmovznz_u64(&x8, arg1, (arg3[3]), (arg2[3])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out2[0] = x5; out2[1] = x6; out2[2] = x7; out2[3] = x8; } // The following functions are adaped from crypto/curve25519/curve25519.c // It would be desirable to share the code, but with the current field // implementations both 4-limb and 5-limb versions of the curve-level code need // to be included in builds targetting an unknown variant of x86_64. __attribute__((target("adx,bmi2"))) static void fe4_invert(fe4 out, const fe4 z) { fe4 t0; fe4 t1; fe4 t2; fe4 t3; int i; fe4_sq(t0, z); fe4_sq(t1, t0); for (i = 1; i < 2; ++i) { fe4_sq(t1, t1); } fe4_mul(t1, z, t1); fe4_mul(t0, t0, t1); fe4_sq(t2, t0); fe4_mul(t1, t1, t2); fe4_sq(t2, t1); for (i = 1; i < 5; ++i) { fe4_sq(t2, t2); } fe4_mul(t1, t2, t1); fe4_sq(t2, t1); for (i = 1; i < 10; ++i) { fe4_sq(t2, t2); } fe4_mul(t2, t2, t1); fe4_sq(t3, t2); for (i = 1; i < 20; ++i) { fe4_sq(t3, t3); } fe4_mul(t2, t3, t2); fe4_sq(t2, t2); for (i = 1; i < 10; ++i) { fe4_sq(t2, t2); } fe4_mul(t1, t2, t1); fe4_sq(t2, t1); for (i = 1; i < 50; ++i) { fe4_sq(t2, t2); } fe4_mul(t2, t2, t1); fe4_sq(t3, t2); for (i = 1; i < 100; ++i) { fe4_sq(t3, t3); } fe4_mul(t2, t3, t2); fe4_sq(t2, t2); for (i = 1; i < 50; ++i) { fe4_sq(t2, t2); } fe4_mul(t1, t2, t1); fe4_sq(t1, t1); for (i = 1; i < 5; ++i) { fe4_sq(t1, t1); } fe4_mul(out, t1, t0); } __attribute__((target("adx,bmi2"))) void x25519_scalar_mult_adx(uint8_t out[32], const uint8_t scalar[32], const uint8_t point[32]) { uint8_t e[32]; OPENSSL_memcpy(e, scalar, 32); e[0] &= 248; e[31] &= 127; e[31] |= 64; // The following implementation was transcribed to Coq and proven to // correspond to unary scalar multiplication in affine coordinates given that // x1 != 0 is the x coordinate of some point on the curve. It was also checked // in Coq that doing a ladderstep with x1 = x3 = 0 gives z2' = z3' = 0, and z2 // = z3 = 0 gives z2' = z3' = 0. The statement was quantified over the // underlying field, so it applies to Curve25519 itself and the quadratic // twist of Curve25519. It was not proven in Coq that prime-field arithmetic // correctly simulates extension-field arithmetic on prime-field values. // The decoding of the byte array representation of e was not considered. // Specification of Montgomery curves in affine coordinates: // // Proof that these form a group that is isomorphic to a Weierstrass curve: // // Coq transcription and correctness proof of the loop (where scalarbits=255): // // // preconditions: 0 <= e < 2^255 (not necessarily e < order), fe_invert(0) = 0 fe4 x1, x2 = {1}, z2 = {0}, x3, z3 = {1}, tmp0, tmp1; OPENSSL_memcpy(x1, point, sizeof(fe4)); x1[3] &= (uint64_t)(-1)>>1; OPENSSL_memcpy(x3, x1, sizeof(fe4)); unsigned swap = 0; int pos; for (pos = 254; pos >= 0; --pos) { // loop invariant as of right before the test, for the case where x1 != 0: // pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 is nonzero // let r := e >> (pos+1) in the following equalities of projective points: // to_xz (r*P) === if swap then (x3, z3) else (x2, z2) // to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) // x1 is the nonzero x coordinate of the nonzero point (r*P-(r+1)*P) unsigned b = 1 & (e[pos / 8] >> (pos & 7)); swap ^= b; fe4_cswap(x2, x3, swap, x2, x3); fe4_cswap(z2, z3, swap, z2, z3); swap = b; // Coq transcription of ladderstep formula (called from transcribed loop): // // // x1 != 0 // x1 = 0 fe4_sub(tmp0, x3, z3); fe4_sub(tmp1, x2, z2); fe4_add(x2, x2, z2); fe4_add(z2, x3, z3); fe4_mul(z3, tmp0, x2); fe4_mul(z2, z2, tmp1); fe4_sq(tmp0, tmp1); fe4_sq(tmp1, x2); fe4_add(x3, z3, z2); fe4_sub(z2, z3, z2); fe4_mul(x2, tmp1, tmp0); fe4_sub(tmp1, tmp1, tmp0); fe4_sq(z2, z2); fe4_scmul(z3, tmp1, 121666); fe4_sq(x3, x3); fe4_add(tmp0, tmp0, z3); fe4_mul(z3, x1, z2); fe4_mul(z2, tmp1, tmp0); } // here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) else (x2, z2) fe4_cswap(x2, x3, swap, x2, x3); fe4_cswap(z2, z3, swap, z2, z3); fe4_invert(z2, z2); fe4_mul(x2, x2, z2); fe4_canon(x2, x2); OPENSSL_memcpy(out, x2, sizeof(fe4)); } typedef struct { fe4 X; fe4 Y; fe4 Z; fe4 T; } ge_p3_4; typedef struct { fe4 yplusx; fe4 yminusx; fe4 xy2d; } ge_precomp_4; __attribute__((target("adx,bmi2"))) static void inline_x25519_ge_dbl_4(ge_p3_4 *r, const ge_p3_4 *p, bool skip_t) { // Transcribed from a Coq function proven against affine coordinates. // https://github.com/mit-plv/fiat-crypto/blob/9943ba9e7d8f3e1c0054b2c94a5edca46ea73ef8/src/Curves/Edwards/XYZT/Basic.v#L136-L165 fe4 trX, trZ, trT, t0, cX, cY, cZ, cT; fe4_sq(trX, p->X); fe4_sq(trZ, p->Y); fe4_sq(trT, p->Z); fe4_add(trT, trT, trT); fe4_add(cY, p->X, p->Y); fe4_sq(t0, cY); fe4_add(cY, trZ, trX); fe4_sub(cZ, trZ, trX); fe4_sub(cX, t0, cY); fe4_sub(cT, trT, cZ); fe4_mul(r->X, cX, cT); fe4_mul(r->Y, cY, cZ); fe4_mul(r->Z, cZ, cT); if (!skip_t) { fe4_mul(r->T, cX, cY); } } __attribute__((target("adx,bmi2"))) __attribute__((always_inline)) // 4% speedup with clang14 and zen2 static inline void ge_p3_add_p3_precomp_4(ge_p3_4 *r, const ge_p3_4 *p, const ge_precomp_4 *q) { fe4 A, B, C, YplusX, YminusX, D, X3, Y3, Z3, T3; // Transcribed from a Coq function proven against affine coordinates. // https://github.com/mit-plv/fiat-crypto/blob/a36568d1d73aff5d7accc79fd28be672882f9c17/src/Curves/Edwards/XYZT/Precomputed.v#L38-L56 fe4_add(YplusX, p->Y, p->X); fe4_sub(YminusX, p->Y, p->X); fe4_mul(A, YplusX, q->yplusx); fe4_mul(B, YminusX, q->yminusx); fe4_mul(C, q->xy2d, p->T); fe4_add(D, p->Z, p->Z); fe4_sub(X3, A, B); fe4_add(Y3, A, B); fe4_add(Z3, D, C); fe4_sub(T3, D, C); fe4_mul(r->X, X3, T3); fe4_mul(r->Y, Y3, Z3); fe4_mul(r->Z, Z3, T3); fe4_mul(r->T, X3, Y3); } __attribute__((always_inline)) // 25% speedup with clang14 and zen2 static inline void table_select_4(ge_precomp_4 *t, const int pos, const signed char b) { uint8_t bnegative = constant_time_msb_w(b); uint8_t babs = b - ((bnegative & b) << 1); uint8_t t_bytes[3][32] = { {static_cast(constant_time_is_zero_w(b) & 1)}, {static_cast(constant_time_is_zero_w(b) & 1)}, {0}, }; #if defined(__clang__) __asm__("" : "+m" (t_bytes) : /*no inputs*/); #endif static_assert(sizeof(t_bytes) == sizeof(k25519Precomp[pos][0]), ""); for (int i = 0; i < 8; i++) { constant_time_conditional_memxor(t_bytes, k25519Precomp[pos][i], sizeof(t_bytes), constant_time_eq_w(babs, 1 + i)); } static_assert(sizeof(t_bytes) == sizeof(ge_precomp_4), ""); // fe4 uses saturated 64-bit limbs, so converting from bytes is just a copy. OPENSSL_memcpy(t, t_bytes, sizeof(ge_precomp_4)); fe4 xy2d_neg = {0}; fe4_sub(xy2d_neg, xy2d_neg, t->xy2d); constant_time_conditional_memcpy(t->yplusx, t_bytes[1], sizeof(fe4), bnegative); constant_time_conditional_memcpy(t->yminusx, t_bytes[0], sizeof(fe4), bnegative); constant_time_conditional_memcpy(t->xy2d, xy2d_neg, sizeof(fe4), bnegative); } // h = a * B // where a = a[0]+256*a[1]+...+256^31 a[31] // B is the Ed25519 base point (x,4/5) with x positive. // // Preconditions: // a[31] <= 127 __attribute__((target("adx,bmi2"))) void x25519_ge_scalarmult_base_adx(uint8_t h[4][32], const uint8_t a[32]) { signed char e[64]; signed char carry; for (unsigned i = 0; i < 32; ++i) { e[2 * i + 0] = (a[i] >> 0) & 15; e[2 * i + 1] = (a[i] >> 4) & 15; } // each e[i] is between 0 and 15 // e[63] is between 0 and 7 carry = 0; for (unsigned i = 0; i < 63; ++i) { e[i] += carry; carry = e[i] + 8; carry >>= 4; e[i] -= carry << 4; } e[63] += carry; // each e[i] is between -8 and 8 ge_p3_4 r = {{0}, {1}, {1}, {0}}; for (unsigned i = 1; i < 64; i += 2) { ge_precomp_4 t; table_select_4(&t, i / 2, e[i]); ge_p3_add_p3_precomp_4(&r, &r, &t); } inline_x25519_ge_dbl_4(&r, &r, /*skip_t=*/true); inline_x25519_ge_dbl_4(&r, &r, /*skip_t=*/true); inline_x25519_ge_dbl_4(&r, &r, /*skip_t=*/true); inline_x25519_ge_dbl_4(&r, &r, /*skip_t=*/false); for (unsigned i = 0; i < 64; i += 2) { ge_precomp_4 t; table_select_4(&t, i / 2, e[i]); ge_p3_add_p3_precomp_4(&r, &r, &t); } // fe4 uses saturated 64-bit limbs, so converting to bytes is just a copy. // Satisfy stated precondition of fiat_25519_from_bytes; tests pass either way fe4_canon(r.X, r.X); fe4_canon(r.Y, r.Y); fe4_canon(r.Z, r.Z); fe4_canon(r.T, r.T); static_assert(sizeof(ge_p3_4) == sizeof(uint8_t[4][32]), ""); OPENSSL_memcpy(h, &r, sizeof(ge_p3_4)); } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/curve25519_64_msvc.h ================================================ /* Autogenerated: 'src/ExtractionOCaml/unsaturated_solinas' --inline --static --use-value-barrier --no-wide-int 25519 64 '(auto)' '2^255 - 19' carry_mul carry_square carry add sub opp selectznz to_bytes from_bytes relax carry_scmul121666 */ /* curve description: 25519 */ /* machine_wordsize = 64 (from "64") */ /* requested operations: carry_mul, carry_square, carry, add, sub, opp, selectznz, to_bytes, from_bytes, relax, carry_scmul121666 */ /* n = 5 (from "(auto)") */ /* s-c = 2^255 - [(1, 19)] (from "2^255 - 19") */ /* tight_bounds_multiplier = 1 (from "") */ /* */ /* Computed values: */ /* carry_chain = [0, 1, 2, 3, 4, 0, 1] */ /* eval z = z[0] + (z[1] << 51) + (z[2] << 102) + (z[3] << 153) + (z[4] << 204) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* balance = [0xfffffffffffda, 0xffffffffffffe, 0xffffffffffffe, 0xffffffffffffe, 0xffffffffffffe] */ #include #include #if defined(_M_X64) #include #endif typedef unsigned char fiat_25519_uint1; typedef signed char fiat_25519_int1; #define FIAT_25519_FIAT_INLINE inline /* The type fiat_25519_loose_field_element is a field element with loose bounds. */ /* Bounds: [[0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000], [0x0 ~> 0x18000000000000]] */ typedef uint64_t fiat_25519_loose_field_element[5]; /* The type fiat_25519_tight_field_element is a field element with tight bounds. */ /* Bounds: [[0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000], [0x0 ~> 0x8000000000000]] */ typedef uint64_t fiat_25519_tight_field_element[5]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #define fiat_25519_value_barrier_u64(x) (x) /* * The function fiat_25519_addcarryx_u64 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^64 * out2 = ⌊(arg1 + arg2 + arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_addcarryx_u64(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { // NOTE: edited after generation #if defined(_M_X64) *out2 = _addcarry_u64(arg1, arg2, arg3, out1); #else arg2 += arg1; arg1 = arg2 < arg1; arg3 += arg2; arg1 += arg3 < arg2; *out1 = arg3; *out2 = arg1; #endif } /* * The function fiat_25519_subborrowx_u64 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^64 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_subborrowx_u64(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { #if defined(_M_X64) *out2 = _subborrow_u64(arg1, arg2, arg3, out1); // NOTE: edited after generation #else *out1 = arg2 - arg3 - arg1; *out2 = (arg2 < arg3) | ((arg2 == arg3) & arg1); #endif } /* * The function fiat_25519_addcarryx_u51 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^51 * out2 = ⌊(arg1 + arg2 + arg3) / 2^51⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x7ffffffffffff] * arg3: [0x0 ~> 0x7ffffffffffff] * Output Bounds: * out1: [0x0 ~> 0x7ffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_addcarryx_u51(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { uint64_t x1; uint64_t x2; fiat_25519_uint1 x3; x1 = ((arg1 + arg2) + arg3); x2 = (x1 & UINT64_C(0x7ffffffffffff)); x3 = (fiat_25519_uint1)(x1 >> 51); *out1 = x2; *out2 = x3; } /* * The function fiat_25519_subborrowx_u51 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^51 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^51⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0x7ffffffffffff] * arg3: [0x0 ~> 0x7ffffffffffff] * Output Bounds: * out1: [0x0 ~> 0x7ffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_25519_FIAT_INLINE void fiat_25519_subborrowx_u51(uint64_t* out1, fiat_25519_uint1* out2, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { int64_t x1; fiat_25519_int1 x2; uint64_t x3; x1 = ((int64_t)(arg2 - (int64_t)arg1) - (int64_t)arg3); x2 = (fiat_25519_int1)(x1 >> 51); x3 = (x1 & UINT64_C(0x7ffffffffffff)); *out1 = x3; *out2 = (fiat_25519_uint1)(0x0 - x2); } /* * The function fiat_25519_mulx_u64 is a multiplication, returning the full double-width result. * * Postconditions: * out1 = (arg1 * arg2) mod 2^64 * out2 = ⌊arg1 * arg2 / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0xffffffffffffffff] */ static FIAT_25519_FIAT_INLINE void fiat_25519_mulx_u64(uint64_t* out1, uint64_t* out2, uint64_t arg1, uint64_t arg2) { // NOTE: edited after generation #if defined(_M_X64) *out1 = _umul128(arg1, arg2, out2); #elif defined(_M_ARM64) *out1 = arg1 * arg2; *out2 = __umulh(arg1, arg2); #else #error "This file is intended for MSVC on X64 or ARM64" #endif } /* * The function fiat_25519_cmovznz_u64 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_25519_FIAT_INLINE void fiat_25519_cmovznz_u64(uint64_t* out1, fiat_25519_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_25519_uint1 x1; uint64_t x2; uint64_t x3; x1 = (!(!arg1)); x2 = ((fiat_25519_int1)(0x0 - x1) & UINT64_C(0xffffffffffffffff)); x3 = ((fiat_25519_value_barrier_u64(x2) & arg3) | (fiat_25519_value_barrier_u64((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_25519_carry_mul multiplies two field elements and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_mul(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1, const fiat_25519_loose_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint64_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; uint64_t x50; uint64_t x51; fiat_25519_uint1 x52; uint64_t x53; fiat_25519_uint1 x54; uint64_t x55; fiat_25519_uint1 x56; uint64_t x57; fiat_25519_uint1 x58; uint64_t x59; fiat_25519_uint1 x60; uint64_t x61; fiat_25519_uint1 x62; uint64_t x63; fiat_25519_uint1 x64; uint64_t x65; fiat_25519_uint1 x66; uint64_t x67; uint64_t x68; uint64_t x69; fiat_25519_uint1 x70; uint64_t x71; fiat_25519_uint1 x72; uint64_t x73; fiat_25519_uint1 x74; uint64_t x75; fiat_25519_uint1 x76; uint64_t x77; fiat_25519_uint1 x78; uint64_t x79; fiat_25519_uint1 x80; uint64_t x81; fiat_25519_uint1 x82; uint64_t x83; fiat_25519_uint1 x84; uint64_t x85; fiat_25519_uint1 x86; uint64_t x87; fiat_25519_uint1 x88; uint64_t x89; fiat_25519_uint1 x90; uint64_t x91; fiat_25519_uint1 x92; uint64_t x93; fiat_25519_uint1 x94; uint64_t x95; fiat_25519_uint1 x96; uint64_t x97; fiat_25519_uint1 x98; uint64_t x99; fiat_25519_uint1 x100; uint64_t x101; fiat_25519_uint1 x102; uint64_t x103; fiat_25519_uint1 x104; uint64_t x105; fiat_25519_uint1 x106; uint64_t x107; fiat_25519_uint1 x108; uint64_t x109; fiat_25519_uint1 x110; uint64_t x111; fiat_25519_uint1 x112; uint64_t x113; fiat_25519_uint1 x114; uint64_t x115; fiat_25519_uint1 x116; uint64_t x117; fiat_25519_uint1 x118; uint64_t x119; fiat_25519_uint1 x120; uint64_t x121; fiat_25519_uint1 x122; uint64_t x123; fiat_25519_uint1 x124; uint64_t x125; fiat_25519_uint1 x126; uint64_t x127; fiat_25519_uint1 x128; uint64_t x129; fiat_25519_uint1 x130; uint64_t x131; fiat_25519_uint1 x132; uint64_t x133; fiat_25519_uint1 x134; uint64_t x135; uint64_t x136; uint64_t x137; uint64_t x138; fiat_25519_uint1 x139; uint64_t x140; uint64_t x141; uint64_t x142; uint64_t x143; fiat_25519_uint1 x144; uint64_t x145; uint64_t x146; uint64_t x147; uint64_t x148; fiat_25519_uint1 x149; uint64_t x150; uint64_t x151; uint64_t x152; uint64_t x153; uint64_t x154; uint64_t x155; uint64_t x156; uint64_t x157; fiat_25519_uint1 x158; uint64_t x159; uint64_t x160; fiat_25519_mulx_u64(&x1, &x2, (arg1[4]), ((arg2[4]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x3, &x4, (arg1[4]), ((arg2[3]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x5, &x6, (arg1[4]), ((arg2[2]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x7, &x8, (arg1[4]), ((arg2[1]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x9, &x10, (arg1[3]), ((arg2[4]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x11, &x12, (arg1[3]), ((arg2[3]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x13, &x14, (arg1[3]), ((arg2[2]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x15, &x16, (arg1[2]), ((arg2[4]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x17, &x18, (arg1[2]), ((arg2[3]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x19, &x20, (arg1[1]), ((arg2[4]) * UINT8_C(0x13))); fiat_25519_mulx_u64(&x21, &x22, (arg1[4]), (arg2[0])); fiat_25519_mulx_u64(&x23, &x24, (arg1[3]), (arg2[1])); fiat_25519_mulx_u64(&x25, &x26, (arg1[3]), (arg2[0])); fiat_25519_mulx_u64(&x27, &x28, (arg1[2]), (arg2[2])); fiat_25519_mulx_u64(&x29, &x30, (arg1[2]), (arg2[1])); fiat_25519_mulx_u64(&x31, &x32, (arg1[2]), (arg2[0])); fiat_25519_mulx_u64(&x33, &x34, (arg1[1]), (arg2[3])); fiat_25519_mulx_u64(&x35, &x36, (arg1[1]), (arg2[2])); fiat_25519_mulx_u64(&x37, &x38, (arg1[1]), (arg2[1])); fiat_25519_mulx_u64(&x39, &x40, (arg1[1]), (arg2[0])); fiat_25519_mulx_u64(&x41, &x42, (arg1[0]), (arg2[4])); fiat_25519_mulx_u64(&x43, &x44, (arg1[0]), (arg2[3])); fiat_25519_mulx_u64(&x45, &x46, (arg1[0]), (arg2[2])); fiat_25519_mulx_u64(&x47, &x48, (arg1[0]), (arg2[1])); fiat_25519_mulx_u64(&x49, &x50, (arg1[0]), (arg2[0])); fiat_25519_addcarryx_u64(&x51, &x52, 0x0, x13, x7); fiat_25519_addcarryx_u64(&x53, &x54, x52, x14, x8); fiat_25519_addcarryx_u64(&x55, &x56, 0x0, x17, x51); fiat_25519_addcarryx_u64(&x57, &x58, x56, x18, x53); fiat_25519_addcarryx_u64(&x59, &x60, 0x0, x19, x55); fiat_25519_addcarryx_u64(&x61, &x62, x60, x20, x57); fiat_25519_addcarryx_u64(&x63, &x64, 0x0, x49, x59); fiat_25519_addcarryx_u64(&x65, &x66, x64, x50, x61); x67 = ((x63 >> 51) | ((x65 << 13) & UINT64_C(0xffffffffffffffff))); x68 = (x63 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x69, &x70, 0x0, x23, x21); fiat_25519_addcarryx_u64(&x71, &x72, x70, x24, x22); fiat_25519_addcarryx_u64(&x73, &x74, 0x0, x27, x69); fiat_25519_addcarryx_u64(&x75, &x76, x74, x28, x71); fiat_25519_addcarryx_u64(&x77, &x78, 0x0, x33, x73); fiat_25519_addcarryx_u64(&x79, &x80, x78, x34, x75); fiat_25519_addcarryx_u64(&x81, &x82, 0x0, x41, x77); fiat_25519_addcarryx_u64(&x83, &x84, x82, x42, x79); fiat_25519_addcarryx_u64(&x85, &x86, 0x0, x25, x1); fiat_25519_addcarryx_u64(&x87, &x88, x86, x26, x2); fiat_25519_addcarryx_u64(&x89, &x90, 0x0, x29, x85); fiat_25519_addcarryx_u64(&x91, &x92, x90, x30, x87); fiat_25519_addcarryx_u64(&x93, &x94, 0x0, x35, x89); fiat_25519_addcarryx_u64(&x95, &x96, x94, x36, x91); fiat_25519_addcarryx_u64(&x97, &x98, 0x0, x43, x93); fiat_25519_addcarryx_u64(&x99, &x100, x98, x44, x95); fiat_25519_addcarryx_u64(&x101, &x102, 0x0, x9, x3); fiat_25519_addcarryx_u64(&x103, &x104, x102, x10, x4); fiat_25519_addcarryx_u64(&x105, &x106, 0x0, x31, x101); fiat_25519_addcarryx_u64(&x107, &x108, x106, x32, x103); fiat_25519_addcarryx_u64(&x109, &x110, 0x0, x37, x105); fiat_25519_addcarryx_u64(&x111, &x112, x110, x38, x107); fiat_25519_addcarryx_u64(&x113, &x114, 0x0, x45, x109); fiat_25519_addcarryx_u64(&x115, &x116, x114, x46, x111); fiat_25519_addcarryx_u64(&x117, &x118, 0x0, x11, x5); fiat_25519_addcarryx_u64(&x119, &x120, x118, x12, x6); fiat_25519_addcarryx_u64(&x121, &x122, 0x0, x15, x117); fiat_25519_addcarryx_u64(&x123, &x124, x122, x16, x119); fiat_25519_addcarryx_u64(&x125, &x126, 0x0, x39, x121); fiat_25519_addcarryx_u64(&x127, &x128, x126, x40, x123); fiat_25519_addcarryx_u64(&x129, &x130, 0x0, x47, x125); fiat_25519_addcarryx_u64(&x131, &x132, x130, x48, x127); fiat_25519_addcarryx_u64(&x133, &x134, 0x0, x67, x129); x135 = (x134 + x131); x136 = ((x133 >> 51) | ((x135 << 13) & UINT64_C(0xffffffffffffffff))); x137 = (x133 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x138, &x139, 0x0, x136, x113); x140 = (x139 + x115); x141 = ((x138 >> 51) | ((x140 << 13) & UINT64_C(0xffffffffffffffff))); x142 = (x138 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x143, &x144, 0x0, x141, x97); x145 = (x144 + x99); x146 = ((x143 >> 51) | ((x145 << 13) & UINT64_C(0xffffffffffffffff))); x147 = (x143 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x148, &x149, 0x0, x146, x81); x150 = (x149 + x83); x151 = ((x148 >> 51) | ((x150 << 13) & UINT64_C(0xffffffffffffffff))); x152 = (x148 & UINT64_C(0x7ffffffffffff)); x153 = (x151 * UINT8_C(0x13)); x154 = (x68 + x153); x155 = (x154 >> 51); x156 = (x154 & UINT64_C(0x7ffffffffffff)); x157 = (x155 + x137); x158 = (fiat_25519_uint1)(x157 >> 51); x159 = (x157 & UINT64_C(0x7ffffffffffff)); x160 = (x158 + x142); out1[0] = x156; out1[1] = x159; out1[2] = x160; out1[3] = x147; out1[4] = x152; } /* * The function fiat_25519_carry_square squares a field element and reduces the result. * * Postconditions: * eval out1 mod m = (eval arg1 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_square(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint64_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; fiat_25519_uint1 x40; uint64_t x41; fiat_25519_uint1 x42; uint64_t x43; fiat_25519_uint1 x44; uint64_t x45; fiat_25519_uint1 x46; uint64_t x47; uint64_t x48; uint64_t x49; fiat_25519_uint1 x50; uint64_t x51; fiat_25519_uint1 x52; uint64_t x53; fiat_25519_uint1 x54; uint64_t x55; fiat_25519_uint1 x56; uint64_t x57; fiat_25519_uint1 x58; uint64_t x59; fiat_25519_uint1 x60; uint64_t x61; fiat_25519_uint1 x62; uint64_t x63; fiat_25519_uint1 x64; uint64_t x65; fiat_25519_uint1 x66; uint64_t x67; fiat_25519_uint1 x68; uint64_t x69; fiat_25519_uint1 x70; uint64_t x71; fiat_25519_uint1 x72; uint64_t x73; fiat_25519_uint1 x74; uint64_t x75; fiat_25519_uint1 x76; uint64_t x77; fiat_25519_uint1 x78; uint64_t x79; fiat_25519_uint1 x80; uint64_t x81; fiat_25519_uint1 x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; fiat_25519_uint1 x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; fiat_25519_uint1 x92; uint64_t x93; uint64_t x94; uint64_t x95; uint64_t x96; fiat_25519_uint1 x97; uint64_t x98; uint64_t x99; uint64_t x100; uint64_t x101; uint64_t x102; uint64_t x103; uint64_t x104; uint64_t x105; fiat_25519_uint1 x106; uint64_t x107; uint64_t x108; x1 = ((arg1[4]) * UINT8_C(0x13)); x2 = (x1 * 0x2); x3 = ((arg1[4]) * 0x2); x4 = ((arg1[3]) * UINT8_C(0x13)); x5 = (x4 * 0x2); x6 = ((arg1[3]) * 0x2); x7 = ((arg1[2]) * 0x2); x8 = ((arg1[1]) * 0x2); fiat_25519_mulx_u64(&x9, &x10, (arg1[4]), x1); fiat_25519_mulx_u64(&x11, &x12, (arg1[3]), x2); fiat_25519_mulx_u64(&x13, &x14, (arg1[3]), x4); fiat_25519_mulx_u64(&x15, &x16, (arg1[2]), x2); fiat_25519_mulx_u64(&x17, &x18, (arg1[2]), x5); fiat_25519_mulx_u64(&x19, &x20, (arg1[2]), (arg1[2])); fiat_25519_mulx_u64(&x21, &x22, (arg1[1]), x2); fiat_25519_mulx_u64(&x23, &x24, (arg1[1]), x6); fiat_25519_mulx_u64(&x25, &x26, (arg1[1]), x7); fiat_25519_mulx_u64(&x27, &x28, (arg1[1]), (arg1[1])); fiat_25519_mulx_u64(&x29, &x30, (arg1[0]), x3); fiat_25519_mulx_u64(&x31, &x32, (arg1[0]), x6); fiat_25519_mulx_u64(&x33, &x34, (arg1[0]), x7); fiat_25519_mulx_u64(&x35, &x36, (arg1[0]), x8); fiat_25519_mulx_u64(&x37, &x38, (arg1[0]), (arg1[0])); fiat_25519_addcarryx_u64(&x39, &x40, 0x0, x21, x17); fiat_25519_addcarryx_u64(&x41, &x42, x40, x22, x18); fiat_25519_addcarryx_u64(&x43, &x44, 0x0, x37, x39); fiat_25519_addcarryx_u64(&x45, &x46, x44, x38, x41); x47 = ((x43 >> 51) | ((x45 << 13) & UINT64_C(0xffffffffffffffff))); x48 = (x43 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x49, &x50, 0x0, x23, x19); fiat_25519_addcarryx_u64(&x51, &x52, x50, x24, x20); fiat_25519_addcarryx_u64(&x53, &x54, 0x0, x29, x49); fiat_25519_addcarryx_u64(&x55, &x56, x54, x30, x51); fiat_25519_addcarryx_u64(&x57, &x58, 0x0, x25, x9); fiat_25519_addcarryx_u64(&x59, &x60, x58, x26, x10); fiat_25519_addcarryx_u64(&x61, &x62, 0x0, x31, x57); fiat_25519_addcarryx_u64(&x63, &x64, x62, x32, x59); fiat_25519_addcarryx_u64(&x65, &x66, 0x0, x27, x11); fiat_25519_addcarryx_u64(&x67, &x68, x66, x28, x12); fiat_25519_addcarryx_u64(&x69, &x70, 0x0, x33, x65); fiat_25519_addcarryx_u64(&x71, &x72, x70, x34, x67); fiat_25519_addcarryx_u64(&x73, &x74, 0x0, x15, x13); fiat_25519_addcarryx_u64(&x75, &x76, x74, x16, x14); fiat_25519_addcarryx_u64(&x77, &x78, 0x0, x35, x73); fiat_25519_addcarryx_u64(&x79, &x80, x78, x36, x75); fiat_25519_addcarryx_u64(&x81, &x82, 0x0, x47, x77); x83 = (x82 + x79); x84 = ((x81 >> 51) | ((x83 << 13) & UINT64_C(0xffffffffffffffff))); x85 = (x81 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x86, &x87, 0x0, x84, x69); x88 = (x87 + x71); x89 = ((x86 >> 51) | ((x88 << 13) & UINT64_C(0xffffffffffffffff))); x90 = (x86 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x91, &x92, 0x0, x89, x61); x93 = (x92 + x63); x94 = ((x91 >> 51) | ((x93 << 13) & UINT64_C(0xffffffffffffffff))); x95 = (x91 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x96, &x97, 0x0, x94, x53); x98 = (x97 + x55); x99 = ((x96 >> 51) | ((x98 << 13) & UINT64_C(0xffffffffffffffff))); x100 = (x96 & UINT64_C(0x7ffffffffffff)); x101 = (x99 * UINT8_C(0x13)); x102 = (x48 + x101); x103 = (x102 >> 51); x104 = (x102 & UINT64_C(0x7ffffffffffff)); x105 = (x103 + x85); x106 = (fiat_25519_uint1)(x105 >> 51); x107 = (x105 & UINT64_C(0x7ffffffffffff)); x108 = (x106 + x90); out1[0] = x104; out1[1] = x107; out1[2] = x108; out1[3] = x95; out1[4] = x100; } /* * The function fiat_25519_carry reduces a field element. * * Postconditions: * eval out1 mod m = eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; x1 = (arg1[0]); x2 = ((x1 >> 51) + (arg1[1])); x3 = ((x2 >> 51) + (arg1[2])); x4 = ((x3 >> 51) + (arg1[3])); x5 = ((x4 >> 51) + (arg1[4])); x6 = ((x1 & UINT64_C(0x7ffffffffffff)) + ((x5 >> 51) * UINT8_C(0x13))); x7 = ((fiat_25519_uint1)(x6 >> 51) + (x2 & UINT64_C(0x7ffffffffffff))); x8 = (x6 & UINT64_C(0x7ffffffffffff)); x9 = (x7 & UINT64_C(0x7ffffffffffff)); x10 = ((fiat_25519_uint1)(x7 >> 51) + (x3 & UINT64_C(0x7ffffffffffff))); x11 = (x4 & UINT64_C(0x7ffffffffffff)); x12 = (x5 & UINT64_C(0x7ffffffffffff)); out1[0] = x8; out1[1] = x9; out1[2] = x10; out1[3] = x11; out1[4] = x12; } /* * The function fiat_25519_add adds two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 + eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_add(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = ((arg1[0]) + (arg2[0])); x2 = ((arg1[1]) + (arg2[1])); x3 = ((arg1[2]) + (arg2[2])); x4 = ((arg1[3]) + (arg2[3])); x5 = ((arg1[4]) + (arg2[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_sub subtracts two field elements. * * Postconditions: * eval out1 mod m = (eval arg1 - eval arg2) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_sub(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1, const fiat_25519_tight_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = ((UINT64_C(0xfffffffffffda) + (arg1[0])) - (arg2[0])); x2 = ((UINT64_C(0xffffffffffffe) + (arg1[1])) - (arg2[1])); x3 = ((UINT64_C(0xffffffffffffe) + (arg1[2])) - (arg2[2])); x4 = ((UINT64_C(0xffffffffffffe) + (arg1[3])) - (arg2[3])); x5 = ((UINT64_C(0xffffffffffffe) + (arg1[4])) - (arg2[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_opp negates a field element. * * Postconditions: * eval out1 mod m = -eval arg1 mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_opp(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = (UINT64_C(0xfffffffffffda) - (arg1[0])); x2 = (UINT64_C(0xffffffffffffe) - (arg1[1])); x3 = (UINT64_C(0xffffffffffffe) - (arg1[2])); x4 = (UINT64_C(0xffffffffffffe) - (arg1[3])); x5 = (UINT64_C(0xffffffffffffe) - (arg1[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_selectznz is a multi-limb conditional select. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_selectznz(uint64_t out1[5], fiat_25519_uint1 arg1, const uint64_t arg2[5], const uint64_t arg3[5]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; fiat_25519_cmovznz_u64(&x1, arg1, (arg2[0]), (arg3[0])); fiat_25519_cmovznz_u64(&x2, arg1, (arg2[1]), (arg3[1])); fiat_25519_cmovznz_u64(&x3, arg1, (arg2[2]), (arg3[2])); fiat_25519_cmovznz_u64(&x4, arg1, (arg2[3]), (arg3[3])); fiat_25519_cmovznz_u64(&x5, arg1, (arg2[4]), (arg3[4])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_to_bytes serializes a field element to bytes in little-endian order. * * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_to_bytes(uint8_t out1[32], const fiat_25519_tight_field_element arg1) { uint64_t x1; fiat_25519_uint1 x2; uint64_t x3; fiat_25519_uint1 x4; uint64_t x5; fiat_25519_uint1 x6; uint64_t x7; fiat_25519_uint1 x8; uint64_t x9; fiat_25519_uint1 x10; uint64_t x11; uint64_t x12; fiat_25519_uint1 x13; uint64_t x14; fiat_25519_uint1 x15; uint64_t x16; fiat_25519_uint1 x17; uint64_t x18; fiat_25519_uint1 x19; uint64_t x20; fiat_25519_uint1 x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint8_t x26; uint64_t x27; uint8_t x28; uint64_t x29; uint8_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint8_t x34; uint64_t x35; uint8_t x36; uint8_t x37; uint64_t x38; uint8_t x39; uint64_t x40; uint8_t x41; uint64_t x42; uint8_t x43; uint64_t x44; uint8_t x45; uint64_t x46; uint8_t x47; uint64_t x48; uint8_t x49; uint8_t x50; uint64_t x51; uint8_t x52; uint64_t x53; uint8_t x54; uint64_t x55; uint8_t x56; uint64_t x57; uint8_t x58; uint64_t x59; uint8_t x60; uint64_t x61; uint8_t x62; uint64_t x63; uint8_t x64; fiat_25519_uint1 x65; uint64_t x66; uint8_t x67; uint64_t x68; uint8_t x69; uint64_t x70; uint8_t x71; uint64_t x72; uint8_t x73; uint64_t x74; uint8_t x75; uint64_t x76; uint8_t x77; uint8_t x78; uint64_t x79; uint8_t x80; uint64_t x81; uint8_t x82; uint64_t x83; uint8_t x84; uint64_t x85; uint8_t x86; uint64_t x87; uint8_t x88; uint64_t x89; uint8_t x90; uint8_t x91; fiat_25519_subborrowx_u51(&x1, &x2, 0x0, (arg1[0]), UINT64_C(0x7ffffffffffed)); fiat_25519_subborrowx_u51(&x3, &x4, x2, (arg1[1]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x5, &x6, x4, (arg1[2]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x7, &x8, x6, (arg1[3]), UINT64_C(0x7ffffffffffff)); fiat_25519_subborrowx_u51(&x9, &x10, x8, (arg1[4]), UINT64_C(0x7ffffffffffff)); fiat_25519_cmovznz_u64(&x11, x10, 0x0, UINT64_C(0xffffffffffffffff)); fiat_25519_addcarryx_u51(&x12, &x13, 0x0, x1, (x11 & UINT64_C(0x7ffffffffffed))); fiat_25519_addcarryx_u51(&x14, &x15, x13, x3, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x16, &x17, x15, x5, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x18, &x19, x17, x7, (x11 & UINT64_C(0x7ffffffffffff))); fiat_25519_addcarryx_u51(&x20, &x21, x19, x9, (x11 & UINT64_C(0x7ffffffffffff))); x22 = (x20 << 4); x23 = (x18 * (uint64_t)0x2); x24 = (x16 << 6); x25 = (x14 << 3); x26 = (uint8_t)(x12 & UINT8_C(0xff)); x27 = (x12 >> 8); x28 = (uint8_t)(x27 & UINT8_C(0xff)); x29 = (x27 >> 8); x30 = (uint8_t)(x29 & UINT8_C(0xff)); x31 = (x29 >> 8); x32 = (uint8_t)(x31 & UINT8_C(0xff)); x33 = (x31 >> 8); x34 = (uint8_t)(x33 & UINT8_C(0xff)); x35 = (x33 >> 8); x36 = (uint8_t)(x35 & UINT8_C(0xff)); x37 = (uint8_t)(x35 >> 8); x38 = (x25 + (uint64_t)x37); x39 = (uint8_t)(x38 & UINT8_C(0xff)); x40 = (x38 >> 8); x41 = (uint8_t)(x40 & UINT8_C(0xff)); x42 = (x40 >> 8); x43 = (uint8_t)(x42 & UINT8_C(0xff)); x44 = (x42 >> 8); x45 = (uint8_t)(x44 & UINT8_C(0xff)); x46 = (x44 >> 8); x47 = (uint8_t)(x46 & UINT8_C(0xff)); x48 = (x46 >> 8); x49 = (uint8_t)(x48 & UINT8_C(0xff)); x50 = (uint8_t)(x48 >> 8); x51 = (x24 + (uint64_t)x50); x52 = (uint8_t)(x51 & UINT8_C(0xff)); x53 = (x51 >> 8); x54 = (uint8_t)(x53 & UINT8_C(0xff)); x55 = (x53 >> 8); x56 = (uint8_t)(x55 & UINT8_C(0xff)); x57 = (x55 >> 8); x58 = (uint8_t)(x57 & UINT8_C(0xff)); x59 = (x57 >> 8); x60 = (uint8_t)(x59 & UINT8_C(0xff)); x61 = (x59 >> 8); x62 = (uint8_t)(x61 & UINT8_C(0xff)); x63 = (x61 >> 8); x64 = (uint8_t)(x63 & UINT8_C(0xff)); x65 = (fiat_25519_uint1)(x63 >> 8); x66 = (x23 + (uint64_t)x65); x67 = (uint8_t)(x66 & UINT8_C(0xff)); x68 = (x66 >> 8); x69 = (uint8_t)(x68 & UINT8_C(0xff)); x70 = (x68 >> 8); x71 = (uint8_t)(x70 & UINT8_C(0xff)); x72 = (x70 >> 8); x73 = (uint8_t)(x72 & UINT8_C(0xff)); x74 = (x72 >> 8); x75 = (uint8_t)(x74 & UINT8_C(0xff)); x76 = (x74 >> 8); x77 = (uint8_t)(x76 & UINT8_C(0xff)); x78 = (uint8_t)(x76 >> 8); x79 = (x22 + (uint64_t)x78); x80 = (uint8_t)(x79 & UINT8_C(0xff)); x81 = (x79 >> 8); x82 = (uint8_t)(x81 & UINT8_C(0xff)); x83 = (x81 >> 8); x84 = (uint8_t)(x83 & UINT8_C(0xff)); x85 = (x83 >> 8); x86 = (uint8_t)(x85 & UINT8_C(0xff)); x87 = (x85 >> 8); x88 = (uint8_t)(x87 & UINT8_C(0xff)); x89 = (x87 >> 8); x90 = (uint8_t)(x89 & UINT8_C(0xff)); x91 = (uint8_t)(x89 >> 8); out1[0] = x26; out1[1] = x28; out1[2] = x30; out1[3] = x32; out1[4] = x34; out1[5] = x36; out1[6] = x39; out1[7] = x41; out1[8] = x43; out1[9] = x45; out1[10] = x47; out1[11] = x49; out1[12] = x52; out1[13] = x54; out1[14] = x56; out1[15] = x58; out1[16] = x60; out1[17] = x62; out1[18] = x64; out1[19] = x67; out1[20] = x69; out1[21] = x71; out1[22] = x73; out1[23] = x75; out1[24] = x77; out1[25] = x80; out1[26] = x82; out1[27] = x84; out1[28] = x86; out1[29] = x88; out1[30] = x90; out1[31] = x91; } /* * The function fiat_25519_from_bytes deserializes a field element from bytes in little-endian order. * * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x7f]] */ static FIAT_25519_FIAT_INLINE void fiat_25519_from_bytes(fiat_25519_tight_field_element out1, const uint8_t arg1[32]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint8_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint8_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint8_t x57; uint64_t x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint8_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; uint64_t x71; x1 = ((uint64_t)(arg1[31]) << 44); x2 = ((uint64_t)(arg1[30]) << 36); x3 = ((uint64_t)(arg1[29]) << 28); x4 = ((uint64_t)(arg1[28]) << 20); x5 = ((uint64_t)(arg1[27]) << 12); x6 = ((uint64_t)(arg1[26]) << 4); x7 = ((uint64_t)(arg1[25]) << 47); x8 = ((uint64_t)(arg1[24]) << 39); x9 = ((uint64_t)(arg1[23]) << 31); x10 = ((uint64_t)(arg1[22]) << 23); x11 = ((uint64_t)(arg1[21]) << 15); x12 = ((uint64_t)(arg1[20]) << 7); x13 = ((uint64_t)(arg1[19]) << 50); x14 = ((uint64_t)(arg1[18]) << 42); x15 = ((uint64_t)(arg1[17]) << 34); x16 = ((uint64_t)(arg1[16]) << 26); x17 = ((uint64_t)(arg1[15]) << 18); x18 = ((uint64_t)(arg1[14]) << 10); x19 = ((uint64_t)(arg1[13]) << 2); x20 = ((uint64_t)(arg1[12]) << 45); x21 = ((uint64_t)(arg1[11]) << 37); x22 = ((uint64_t)(arg1[10]) << 29); x23 = ((uint64_t)(arg1[9]) << 21); x24 = ((uint64_t)(arg1[8]) << 13); x25 = ((uint64_t)(arg1[7]) << 5); x26 = ((uint64_t)(arg1[6]) << 48); x27 = ((uint64_t)(arg1[5]) << 40); x28 = ((uint64_t)(arg1[4]) << 32); x29 = ((uint64_t)(arg1[3]) << 24); x30 = ((uint64_t)(arg1[2]) << 16); x31 = ((uint64_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint64_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x28 + x35); x37 = (x27 + x36); x38 = (x26 + x37); x39 = (x38 & UINT64_C(0x7ffffffffffff)); x40 = (uint8_t)(x38 >> 51); x41 = (x25 + (uint64_t)x40); x42 = (x24 + x41); x43 = (x23 + x42); x44 = (x22 + x43); x45 = (x21 + x44); x46 = (x20 + x45); x47 = (x46 & UINT64_C(0x7ffffffffffff)); x48 = (uint8_t)(x46 >> 51); x49 = (x19 + (uint64_t)x48); x50 = (x18 + x49); x51 = (x17 + x50); x52 = (x16 + x51); x53 = (x15 + x52); x54 = (x14 + x53); x55 = (x13 + x54); x56 = (x55 & UINT64_C(0x7ffffffffffff)); x57 = (uint8_t)(x55 >> 51); x58 = (x12 + (uint64_t)x57); x59 = (x11 + x58); x60 = (x10 + x59); x61 = (x9 + x60); x62 = (x8 + x61); x63 = (x7 + x62); x64 = (x63 & UINT64_C(0x7ffffffffffff)); x65 = (uint8_t)(x63 >> 51); x66 = (x6 + (uint64_t)x65); x67 = (x5 + x66); x68 = (x4 + x67); x69 = (x3 + x68); x70 = (x2 + x69); x71 = (x1 + x70); out1[0] = x39; out1[1] = x47; out1[2] = x56; out1[3] = x64; out1[4] = x71; } /* * The function fiat_25519_relax is the identity function converting from tight field elements to loose field elements. * * Postconditions: * out1 = arg1 * */ static FIAT_25519_FIAT_INLINE void fiat_25519_relax(fiat_25519_loose_field_element out1, const fiat_25519_tight_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; x1 = (arg1[0]); x2 = (arg1[1]); x3 = (arg1[2]); x4 = (arg1[3]); x5 = (arg1[4]); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; } /* * The function fiat_25519_carry_scmul_121666 multiplies a field element by 121666 and reduces the result. * * Postconditions: * eval out1 mod m = (121666 * eval arg1) mod m * */ static FIAT_25519_FIAT_INLINE void fiat_25519_carry_scmul_121666(fiat_25519_tight_field_element out1, const fiat_25519_loose_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_25519_uint1 x14; uint64_t x15; uint64_t x16; uint64_t x17; uint64_t x18; fiat_25519_uint1 x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; fiat_25519_uint1 x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; fiat_25519_uint1 x29; uint64_t x30; uint64_t x31; uint64_t x32; uint64_t x33; uint64_t x34; fiat_25519_uint1 x35; uint64_t x36; uint64_t x37; fiat_25519_uint1 x38; uint64_t x39; uint64_t x40; fiat_25519_mulx_u64(&x1, &x2, UINT32_C(0x1db42), (arg1[4])); fiat_25519_mulx_u64(&x3, &x4, UINT32_C(0x1db42), (arg1[3])); fiat_25519_mulx_u64(&x5, &x6, UINT32_C(0x1db42), (arg1[2])); fiat_25519_mulx_u64(&x7, &x8, UINT32_C(0x1db42), (arg1[1])); fiat_25519_mulx_u64(&x9, &x10, UINT32_C(0x1db42), (arg1[0])); x11 = ((x9 >> 51) | ((x10 << 13) & UINT64_C(0xffffffffffffffff))); x12 = (x9 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x13, &x14, 0x0, x11, x7); x15 = (x14 + x8); x16 = ((x13 >> 51) | ((x15 << 13) & UINT64_C(0xffffffffffffffff))); x17 = (x13 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x18, &x19, 0x0, x16, x5); x20 = (x19 + x6); x21 = ((x18 >> 51) | ((x20 << 13) & UINT64_C(0xffffffffffffffff))); x22 = (x18 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x23, &x24, 0x0, x21, x3); x25 = (x24 + x4); x26 = ((x23 >> 51) | ((x25 << 13) & UINT64_C(0xffffffffffffffff))); x27 = (x23 & UINT64_C(0x7ffffffffffff)); fiat_25519_addcarryx_u64(&x28, &x29, 0x0, x26, x1); x30 = (x29 + x2); x31 = ((x28 >> 51) | ((x30 << 13) & UINT64_C(0xffffffffffffffff))); x32 = (x28 & UINT64_C(0x7ffffffffffff)); x33 = (x31 * UINT8_C(0x13)); x34 = (x12 + x33); x35 = (fiat_25519_uint1)(x34 >> 51); x36 = (x34 & UINT64_C(0x7ffffffffffff)); x37 = (x35 + x17); x38 = (fiat_25519_uint1)(x37 >> 51); x39 = (x37 & UINT64_C(0x7ffffffffffff)); x40 = (x38 + x22); out1[0] = x36; out1[1] = x39; out1[2] = x40; out1[3] = x27; out1[4] = x32; } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/p256_32.h ================================================ /* Autogenerated: 'src/ExtractionOCaml/word_by_word_montgomery' --inline --static --use-value-barrier p256 32 '2^256 - 2^224 + 2^192 + 2^96 - 1' mul square add sub opp from_montgomery to_montgomery nonzero selectznz to_bytes from_bytes one msat divstep divstep_precomp */ /* curve description: p256 */ /* machine_wordsize = 32 (from "32") */ /* requested operations: mul, square, add, sub, opp, from_montgomery, to_montgomery, nonzero, selectznz, to_bytes, from_bytes, one, msat, divstep, divstep_precomp */ /* m = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff (from "2^256 - 2^224 + 2^192 + 2^96 - 1") */ /* */ /* NOTE: In addition to the bounds specified above each function, all */ /* functions synthesized for this Montgomery arithmetic require the */ /* input to be strictly less than the prime modulus (m), and also */ /* require the input to be in the unique saturated representation. */ /* All functions also ensure that these two properties are true of */ /* return values. */ /* */ /* Computed values: */ /* eval z = z[0] + (z[1] << 32) + (z[2] << 64) + (z[3] << 96) + (z[4] << 128) + (z[5] << 160) + (z[6] << 192) + (z[7] << 224) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* twos_complement_eval z = let x1 := z[0] + (z[1] << 32) + (z[2] << 64) + (z[3] << 96) + (z[4] << 128) + (z[5] << 160) + (z[6] << 192) + (z[7] << 224) in */ /* if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 */ #include typedef unsigned char fiat_p256_uint1; typedef signed char fiat_p256_int1; #if defined(__GNUC__) || defined(__clang__) # define FIAT_P256_FIAT_INLINE __inline__ #else # define FIAT_P256_FIAT_INLINE inline #endif /* The type fiat_p256_montgomery_domain_field_element is a field element in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ typedef uint32_t fiat_p256_montgomery_domain_field_element[8]; /* The type fiat_p256_non_montgomery_domain_field_element is a field element NOT in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ typedef uint32_t fiat_p256_non_montgomery_domain_field_element[8]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #if !defined(FIAT_P256_NO_ASM) && (defined(__GNUC__) || defined(__clang__)) static __inline__ uint32_t fiat_p256_value_barrier_u32(uint32_t a) { __asm__("" : "+r"(a) : /* no inputs */); return a; } #else # define fiat_p256_value_barrier_u32(x) (x) #endif /* * The function fiat_p256_addcarryx_u32 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^32 * out2 = ⌊(arg1 + arg2 + arg3) / 2^32⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffff] * arg3: [0x0 ~> 0xffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_addcarryx_u32(uint32_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint32_t arg2, uint32_t arg3) { uint64_t x1; uint32_t x2; fiat_p256_uint1 x3; x1 = ((arg1 + (uint64_t)arg2) + arg3); x2 = (uint32_t)(x1 & UINT32_C(0xffffffff)); x3 = (fiat_p256_uint1)(x1 >> 32); *out1 = x2; *out2 = x3; } /* * The function fiat_p256_subborrowx_u32 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^32 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^32⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffff] * arg3: [0x0 ~> 0xffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_subborrowx_u32(uint32_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint32_t arg2, uint32_t arg3) { int64_t x1; fiat_p256_int1 x2; uint32_t x3; x1 = ((arg2 - (int64_t)arg1) - arg3); x2 = (fiat_p256_int1)(x1 >> 32); x3 = (uint32_t)(x1 & UINT32_C(0xffffffff)); *out1 = x3; *out2 = (fiat_p256_uint1)(0x0 - x2); } /* * The function fiat_p256_mulx_u32 is a multiplication, returning the full double-width result. * * Postconditions: * out1 = (arg1 * arg2) mod 2^32 * out2 = ⌊arg1 * arg2 / 2^32⌋ * * Input Bounds: * arg1: [0x0 ~> 0xffffffff] * arg2: [0x0 ~> 0xffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffff] * out2: [0x0 ~> 0xffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_mulx_u32(uint32_t* out1, uint32_t* out2, uint32_t arg1, uint32_t arg2) { uint64_t x1; uint32_t x2; uint32_t x3; x1 = ((uint64_t)arg1 * arg2); x2 = (uint32_t)(x1 & UINT32_C(0xffffffff)); x3 = (uint32_t)(x1 >> 32); *out1 = x2; *out2 = x3; } /* * The function fiat_p256_cmovznz_u32 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffff] * arg3: [0x0 ~> 0xffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_cmovznz_u32(uint32_t* out1, fiat_p256_uint1 arg1, uint32_t arg2, uint32_t arg3) { fiat_p256_uint1 x1; uint32_t x2; uint32_t x3; x1 = (!(!arg1)); x2 = ((fiat_p256_int1)(0x0 - x1) & UINT32_C(0xffffffff)); x3 = ((fiat_p256_value_barrier_u32(x2) & arg3) | (fiat_p256_value_barrier_u32((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_p256_mul multiplies two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_mul(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint32_t x20; uint32_t x21; uint32_t x22; uint32_t x23; uint32_t x24; uint32_t x25; fiat_p256_uint1 x26; uint32_t x27; fiat_p256_uint1 x28; uint32_t x29; fiat_p256_uint1 x30; uint32_t x31; fiat_p256_uint1 x32; uint32_t x33; fiat_p256_uint1 x34; uint32_t x35; fiat_p256_uint1 x36; uint32_t x37; fiat_p256_uint1 x38; uint32_t x39; uint32_t x40; uint32_t x41; uint32_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint32_t x47; uint32_t x48; fiat_p256_uint1 x49; uint32_t x50; fiat_p256_uint1 x51; uint32_t x52; uint32_t x53; fiat_p256_uint1 x54; uint32_t x55; fiat_p256_uint1 x56; uint32_t x57; fiat_p256_uint1 x58; uint32_t x59; fiat_p256_uint1 x60; uint32_t x61; fiat_p256_uint1 x62; uint32_t x63; fiat_p256_uint1 x64; uint32_t x65; fiat_p256_uint1 x66; uint32_t x67; fiat_p256_uint1 x68; uint32_t x69; fiat_p256_uint1 x70; uint32_t x71; uint32_t x72; uint32_t x73; uint32_t x74; uint32_t x75; uint32_t x76; uint32_t x77; uint32_t x78; uint32_t x79; uint32_t x80; uint32_t x81; uint32_t x82; uint32_t x83; uint32_t x84; uint32_t x85; uint32_t x86; uint32_t x87; fiat_p256_uint1 x88; uint32_t x89; fiat_p256_uint1 x90; uint32_t x91; fiat_p256_uint1 x92; uint32_t x93; fiat_p256_uint1 x94; uint32_t x95; fiat_p256_uint1 x96; uint32_t x97; fiat_p256_uint1 x98; uint32_t x99; fiat_p256_uint1 x100; uint32_t x101; uint32_t x102; fiat_p256_uint1 x103; uint32_t x104; fiat_p256_uint1 x105; uint32_t x106; fiat_p256_uint1 x107; uint32_t x108; fiat_p256_uint1 x109; uint32_t x110; fiat_p256_uint1 x111; uint32_t x112; fiat_p256_uint1 x113; uint32_t x114; fiat_p256_uint1 x115; uint32_t x116; fiat_p256_uint1 x117; uint32_t x118; fiat_p256_uint1 x119; uint32_t x120; uint32_t x121; uint32_t x122; uint32_t x123; uint32_t x124; uint32_t x125; uint32_t x126; uint32_t x127; uint32_t x128; fiat_p256_uint1 x129; uint32_t x130; fiat_p256_uint1 x131; uint32_t x132; uint32_t x133; fiat_p256_uint1 x134; uint32_t x135; fiat_p256_uint1 x136; uint32_t x137; fiat_p256_uint1 x138; uint32_t x139; fiat_p256_uint1 x140; uint32_t x141; fiat_p256_uint1 x142; uint32_t x143; fiat_p256_uint1 x144; uint32_t x145; fiat_p256_uint1 x146; uint32_t x147; fiat_p256_uint1 x148; uint32_t x149; fiat_p256_uint1 x150; uint32_t x151; uint32_t x152; uint32_t x153; uint32_t x154; uint32_t x155; uint32_t x156; uint32_t x157; uint32_t x158; uint32_t x159; uint32_t x160; uint32_t x161; uint32_t x162; uint32_t x163; uint32_t x164; uint32_t x165; uint32_t x166; uint32_t x167; uint32_t x168; fiat_p256_uint1 x169; uint32_t x170; fiat_p256_uint1 x171; uint32_t x172; fiat_p256_uint1 x173; uint32_t x174; fiat_p256_uint1 x175; uint32_t x176; fiat_p256_uint1 x177; uint32_t x178; fiat_p256_uint1 x179; uint32_t x180; fiat_p256_uint1 x181; uint32_t x182; uint32_t x183; fiat_p256_uint1 x184; uint32_t x185; fiat_p256_uint1 x186; uint32_t x187; fiat_p256_uint1 x188; uint32_t x189; fiat_p256_uint1 x190; uint32_t x191; fiat_p256_uint1 x192; uint32_t x193; fiat_p256_uint1 x194; uint32_t x195; fiat_p256_uint1 x196; uint32_t x197; fiat_p256_uint1 x198; uint32_t x199; fiat_p256_uint1 x200; uint32_t x201; uint32_t x202; uint32_t x203; uint32_t x204; uint32_t x205; uint32_t x206; uint32_t x207; uint32_t x208; uint32_t x209; fiat_p256_uint1 x210; uint32_t x211; fiat_p256_uint1 x212; uint32_t x213; uint32_t x214; fiat_p256_uint1 x215; uint32_t x216; fiat_p256_uint1 x217; uint32_t x218; fiat_p256_uint1 x219; uint32_t x220; fiat_p256_uint1 x221; uint32_t x222; fiat_p256_uint1 x223; uint32_t x224; fiat_p256_uint1 x225; uint32_t x226; fiat_p256_uint1 x227; uint32_t x228; fiat_p256_uint1 x229; uint32_t x230; fiat_p256_uint1 x231; uint32_t x232; uint32_t x233; uint32_t x234; uint32_t x235; uint32_t x236; uint32_t x237; uint32_t x238; uint32_t x239; uint32_t x240; uint32_t x241; uint32_t x242; uint32_t x243; uint32_t x244; uint32_t x245; uint32_t x246; uint32_t x247; uint32_t x248; uint32_t x249; fiat_p256_uint1 x250; uint32_t x251; fiat_p256_uint1 x252; uint32_t x253; fiat_p256_uint1 x254; uint32_t x255; fiat_p256_uint1 x256; uint32_t x257; fiat_p256_uint1 x258; uint32_t x259; fiat_p256_uint1 x260; uint32_t x261; fiat_p256_uint1 x262; uint32_t x263; uint32_t x264; fiat_p256_uint1 x265; uint32_t x266; fiat_p256_uint1 x267; uint32_t x268; fiat_p256_uint1 x269; uint32_t x270; fiat_p256_uint1 x271; uint32_t x272; fiat_p256_uint1 x273; uint32_t x274; fiat_p256_uint1 x275; uint32_t x276; fiat_p256_uint1 x277; uint32_t x278; fiat_p256_uint1 x279; uint32_t x280; fiat_p256_uint1 x281; uint32_t x282; uint32_t x283; uint32_t x284; uint32_t x285; uint32_t x286; uint32_t x287; uint32_t x288; uint32_t x289; uint32_t x290; fiat_p256_uint1 x291; uint32_t x292; fiat_p256_uint1 x293; uint32_t x294; uint32_t x295; fiat_p256_uint1 x296; uint32_t x297; fiat_p256_uint1 x298; uint32_t x299; fiat_p256_uint1 x300; uint32_t x301; fiat_p256_uint1 x302; uint32_t x303; fiat_p256_uint1 x304; uint32_t x305; fiat_p256_uint1 x306; uint32_t x307; fiat_p256_uint1 x308; uint32_t x309; fiat_p256_uint1 x310; uint32_t x311; fiat_p256_uint1 x312; uint32_t x313; uint32_t x314; uint32_t x315; uint32_t x316; uint32_t x317; uint32_t x318; uint32_t x319; uint32_t x320; uint32_t x321; uint32_t x322; uint32_t x323; uint32_t x324; uint32_t x325; uint32_t x326; uint32_t x327; uint32_t x328; uint32_t x329; uint32_t x330; fiat_p256_uint1 x331; uint32_t x332; fiat_p256_uint1 x333; uint32_t x334; fiat_p256_uint1 x335; uint32_t x336; fiat_p256_uint1 x337; uint32_t x338; fiat_p256_uint1 x339; uint32_t x340; fiat_p256_uint1 x341; uint32_t x342; fiat_p256_uint1 x343; uint32_t x344; uint32_t x345; fiat_p256_uint1 x346; uint32_t x347; fiat_p256_uint1 x348; uint32_t x349; fiat_p256_uint1 x350; uint32_t x351; fiat_p256_uint1 x352; uint32_t x353; fiat_p256_uint1 x354; uint32_t x355; fiat_p256_uint1 x356; uint32_t x357; fiat_p256_uint1 x358; uint32_t x359; fiat_p256_uint1 x360; uint32_t x361; fiat_p256_uint1 x362; uint32_t x363; uint32_t x364; uint32_t x365; uint32_t x366; uint32_t x367; uint32_t x368; uint32_t x369; uint32_t x370; uint32_t x371; fiat_p256_uint1 x372; uint32_t x373; fiat_p256_uint1 x374; uint32_t x375; uint32_t x376; fiat_p256_uint1 x377; uint32_t x378; fiat_p256_uint1 x379; uint32_t x380; fiat_p256_uint1 x381; uint32_t x382; fiat_p256_uint1 x383; uint32_t x384; fiat_p256_uint1 x385; uint32_t x386; fiat_p256_uint1 x387; uint32_t x388; fiat_p256_uint1 x389; uint32_t x390; fiat_p256_uint1 x391; uint32_t x392; fiat_p256_uint1 x393; uint32_t x394; uint32_t x395; uint32_t x396; uint32_t x397; uint32_t x398; uint32_t x399; uint32_t x400; uint32_t x401; uint32_t x402; uint32_t x403; uint32_t x404; uint32_t x405; uint32_t x406; uint32_t x407; uint32_t x408; uint32_t x409; uint32_t x410; uint32_t x411; fiat_p256_uint1 x412; uint32_t x413; fiat_p256_uint1 x414; uint32_t x415; fiat_p256_uint1 x416; uint32_t x417; fiat_p256_uint1 x418; uint32_t x419; fiat_p256_uint1 x420; uint32_t x421; fiat_p256_uint1 x422; uint32_t x423; fiat_p256_uint1 x424; uint32_t x425; uint32_t x426; fiat_p256_uint1 x427; uint32_t x428; fiat_p256_uint1 x429; uint32_t x430; fiat_p256_uint1 x431; uint32_t x432; fiat_p256_uint1 x433; uint32_t x434; fiat_p256_uint1 x435; uint32_t x436; fiat_p256_uint1 x437; uint32_t x438; fiat_p256_uint1 x439; uint32_t x440; fiat_p256_uint1 x441; uint32_t x442; fiat_p256_uint1 x443; uint32_t x444; uint32_t x445; uint32_t x446; uint32_t x447; uint32_t x448; uint32_t x449; uint32_t x450; uint32_t x451; uint32_t x452; fiat_p256_uint1 x453; uint32_t x454; fiat_p256_uint1 x455; uint32_t x456; uint32_t x457; fiat_p256_uint1 x458; uint32_t x459; fiat_p256_uint1 x460; uint32_t x461; fiat_p256_uint1 x462; uint32_t x463; fiat_p256_uint1 x464; uint32_t x465; fiat_p256_uint1 x466; uint32_t x467; fiat_p256_uint1 x468; uint32_t x469; fiat_p256_uint1 x470; uint32_t x471; fiat_p256_uint1 x472; uint32_t x473; fiat_p256_uint1 x474; uint32_t x475; uint32_t x476; uint32_t x477; uint32_t x478; uint32_t x479; uint32_t x480; uint32_t x481; uint32_t x482; uint32_t x483; uint32_t x484; uint32_t x485; uint32_t x486; uint32_t x487; uint32_t x488; uint32_t x489; uint32_t x490; uint32_t x491; uint32_t x492; fiat_p256_uint1 x493; uint32_t x494; fiat_p256_uint1 x495; uint32_t x496; fiat_p256_uint1 x497; uint32_t x498; fiat_p256_uint1 x499; uint32_t x500; fiat_p256_uint1 x501; uint32_t x502; fiat_p256_uint1 x503; uint32_t x504; fiat_p256_uint1 x505; uint32_t x506; uint32_t x507; fiat_p256_uint1 x508; uint32_t x509; fiat_p256_uint1 x510; uint32_t x511; fiat_p256_uint1 x512; uint32_t x513; fiat_p256_uint1 x514; uint32_t x515; fiat_p256_uint1 x516; uint32_t x517; fiat_p256_uint1 x518; uint32_t x519; fiat_p256_uint1 x520; uint32_t x521; fiat_p256_uint1 x522; uint32_t x523; fiat_p256_uint1 x524; uint32_t x525; uint32_t x526; uint32_t x527; uint32_t x528; uint32_t x529; uint32_t x530; uint32_t x531; uint32_t x532; uint32_t x533; fiat_p256_uint1 x534; uint32_t x535; fiat_p256_uint1 x536; uint32_t x537; uint32_t x538; fiat_p256_uint1 x539; uint32_t x540; fiat_p256_uint1 x541; uint32_t x542; fiat_p256_uint1 x543; uint32_t x544; fiat_p256_uint1 x545; uint32_t x546; fiat_p256_uint1 x547; uint32_t x548; fiat_p256_uint1 x549; uint32_t x550; fiat_p256_uint1 x551; uint32_t x552; fiat_p256_uint1 x553; uint32_t x554; fiat_p256_uint1 x555; uint32_t x556; uint32_t x557; uint32_t x558; uint32_t x559; uint32_t x560; uint32_t x561; uint32_t x562; uint32_t x563; uint32_t x564; uint32_t x565; uint32_t x566; uint32_t x567; uint32_t x568; uint32_t x569; uint32_t x570; uint32_t x571; uint32_t x572; uint32_t x573; fiat_p256_uint1 x574; uint32_t x575; fiat_p256_uint1 x576; uint32_t x577; fiat_p256_uint1 x578; uint32_t x579; fiat_p256_uint1 x580; uint32_t x581; fiat_p256_uint1 x582; uint32_t x583; fiat_p256_uint1 x584; uint32_t x585; fiat_p256_uint1 x586; uint32_t x587; uint32_t x588; fiat_p256_uint1 x589; uint32_t x590; fiat_p256_uint1 x591; uint32_t x592; fiat_p256_uint1 x593; uint32_t x594; fiat_p256_uint1 x595; uint32_t x596; fiat_p256_uint1 x597; uint32_t x598; fiat_p256_uint1 x599; uint32_t x600; fiat_p256_uint1 x601; uint32_t x602; fiat_p256_uint1 x603; uint32_t x604; fiat_p256_uint1 x605; uint32_t x606; uint32_t x607; uint32_t x608; uint32_t x609; uint32_t x610; uint32_t x611; uint32_t x612; uint32_t x613; uint32_t x614; fiat_p256_uint1 x615; uint32_t x616; fiat_p256_uint1 x617; uint32_t x618; uint32_t x619; fiat_p256_uint1 x620; uint32_t x621; fiat_p256_uint1 x622; uint32_t x623; fiat_p256_uint1 x624; uint32_t x625; fiat_p256_uint1 x626; uint32_t x627; fiat_p256_uint1 x628; uint32_t x629; fiat_p256_uint1 x630; uint32_t x631; fiat_p256_uint1 x632; uint32_t x633; fiat_p256_uint1 x634; uint32_t x635; fiat_p256_uint1 x636; uint32_t x637; uint32_t x638; fiat_p256_uint1 x639; uint32_t x640; fiat_p256_uint1 x641; uint32_t x642; fiat_p256_uint1 x643; uint32_t x644; fiat_p256_uint1 x645; uint32_t x646; fiat_p256_uint1 x647; uint32_t x648; fiat_p256_uint1 x649; uint32_t x650; fiat_p256_uint1 x651; uint32_t x652; fiat_p256_uint1 x653; uint32_t x654; fiat_p256_uint1 x655; uint32_t x656; uint32_t x657; uint32_t x658; uint32_t x659; uint32_t x660; uint32_t x661; uint32_t x662; uint32_t x663; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[4]); x5 = (arg1[5]); x6 = (arg1[6]); x7 = (arg1[7]); x8 = (arg1[0]); fiat_p256_mulx_u32(&x9, &x10, x8, (arg2[7])); fiat_p256_mulx_u32(&x11, &x12, x8, (arg2[6])); fiat_p256_mulx_u32(&x13, &x14, x8, (arg2[5])); fiat_p256_mulx_u32(&x15, &x16, x8, (arg2[4])); fiat_p256_mulx_u32(&x17, &x18, x8, (arg2[3])); fiat_p256_mulx_u32(&x19, &x20, x8, (arg2[2])); fiat_p256_mulx_u32(&x21, &x22, x8, (arg2[1])); fiat_p256_mulx_u32(&x23, &x24, x8, (arg2[0])); fiat_p256_addcarryx_u32(&x25, &x26, 0x0, x24, x21); fiat_p256_addcarryx_u32(&x27, &x28, x26, x22, x19); fiat_p256_addcarryx_u32(&x29, &x30, x28, x20, x17); fiat_p256_addcarryx_u32(&x31, &x32, x30, x18, x15); fiat_p256_addcarryx_u32(&x33, &x34, x32, x16, x13); fiat_p256_addcarryx_u32(&x35, &x36, x34, x14, x11); fiat_p256_addcarryx_u32(&x37, &x38, x36, x12, x9); x39 = (x38 + x10); fiat_p256_mulx_u32(&x40, &x41, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x42, &x43, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x44, &x45, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x46, &x47, x23, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x48, &x49, 0x0, x47, x44); fiat_p256_addcarryx_u32(&x50, &x51, x49, x45, x42); x52 = (x51 + x43); fiat_p256_addcarryx_u32(&x53, &x54, 0x0, x23, x46); fiat_p256_addcarryx_u32(&x55, &x56, x54, x25, x48); fiat_p256_addcarryx_u32(&x57, &x58, x56, x27, x50); fiat_p256_addcarryx_u32(&x59, &x60, x58, x29, x52); fiat_p256_addcarryx_u32(&x61, &x62, x60, x31, 0x0); fiat_p256_addcarryx_u32(&x63, &x64, x62, x33, 0x0); fiat_p256_addcarryx_u32(&x65, &x66, x64, x35, x23); fiat_p256_addcarryx_u32(&x67, &x68, x66, x37, x40); fiat_p256_addcarryx_u32(&x69, &x70, x68, x39, x41); fiat_p256_mulx_u32(&x71, &x72, x1, (arg2[7])); fiat_p256_mulx_u32(&x73, &x74, x1, (arg2[6])); fiat_p256_mulx_u32(&x75, &x76, x1, (arg2[5])); fiat_p256_mulx_u32(&x77, &x78, x1, (arg2[4])); fiat_p256_mulx_u32(&x79, &x80, x1, (arg2[3])); fiat_p256_mulx_u32(&x81, &x82, x1, (arg2[2])); fiat_p256_mulx_u32(&x83, &x84, x1, (arg2[1])); fiat_p256_mulx_u32(&x85, &x86, x1, (arg2[0])); fiat_p256_addcarryx_u32(&x87, &x88, 0x0, x86, x83); fiat_p256_addcarryx_u32(&x89, &x90, x88, x84, x81); fiat_p256_addcarryx_u32(&x91, &x92, x90, x82, x79); fiat_p256_addcarryx_u32(&x93, &x94, x92, x80, x77); fiat_p256_addcarryx_u32(&x95, &x96, x94, x78, x75); fiat_p256_addcarryx_u32(&x97, &x98, x96, x76, x73); fiat_p256_addcarryx_u32(&x99, &x100, x98, x74, x71); x101 = (x100 + x72); fiat_p256_addcarryx_u32(&x102, &x103, 0x0, x55, x85); fiat_p256_addcarryx_u32(&x104, &x105, x103, x57, x87); fiat_p256_addcarryx_u32(&x106, &x107, x105, x59, x89); fiat_p256_addcarryx_u32(&x108, &x109, x107, x61, x91); fiat_p256_addcarryx_u32(&x110, &x111, x109, x63, x93); fiat_p256_addcarryx_u32(&x112, &x113, x111, x65, x95); fiat_p256_addcarryx_u32(&x114, &x115, x113, x67, x97); fiat_p256_addcarryx_u32(&x116, &x117, x115, x69, x99); fiat_p256_addcarryx_u32(&x118, &x119, x117, x70, x101); fiat_p256_mulx_u32(&x120, &x121, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x122, &x123, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x124, &x125, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x126, &x127, x102, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x128, &x129, 0x0, x127, x124); fiat_p256_addcarryx_u32(&x130, &x131, x129, x125, x122); x132 = (x131 + x123); fiat_p256_addcarryx_u32(&x133, &x134, 0x0, x102, x126); fiat_p256_addcarryx_u32(&x135, &x136, x134, x104, x128); fiat_p256_addcarryx_u32(&x137, &x138, x136, x106, x130); fiat_p256_addcarryx_u32(&x139, &x140, x138, x108, x132); fiat_p256_addcarryx_u32(&x141, &x142, x140, x110, 0x0); fiat_p256_addcarryx_u32(&x143, &x144, x142, x112, 0x0); fiat_p256_addcarryx_u32(&x145, &x146, x144, x114, x102); fiat_p256_addcarryx_u32(&x147, &x148, x146, x116, x120); fiat_p256_addcarryx_u32(&x149, &x150, x148, x118, x121); x151 = ((uint32_t)x150 + x119); fiat_p256_mulx_u32(&x152, &x153, x2, (arg2[7])); fiat_p256_mulx_u32(&x154, &x155, x2, (arg2[6])); fiat_p256_mulx_u32(&x156, &x157, x2, (arg2[5])); fiat_p256_mulx_u32(&x158, &x159, x2, (arg2[4])); fiat_p256_mulx_u32(&x160, &x161, x2, (arg2[3])); fiat_p256_mulx_u32(&x162, &x163, x2, (arg2[2])); fiat_p256_mulx_u32(&x164, &x165, x2, (arg2[1])); fiat_p256_mulx_u32(&x166, &x167, x2, (arg2[0])); fiat_p256_addcarryx_u32(&x168, &x169, 0x0, x167, x164); fiat_p256_addcarryx_u32(&x170, &x171, x169, x165, x162); fiat_p256_addcarryx_u32(&x172, &x173, x171, x163, x160); fiat_p256_addcarryx_u32(&x174, &x175, x173, x161, x158); fiat_p256_addcarryx_u32(&x176, &x177, x175, x159, x156); fiat_p256_addcarryx_u32(&x178, &x179, x177, x157, x154); fiat_p256_addcarryx_u32(&x180, &x181, x179, x155, x152); x182 = (x181 + x153); fiat_p256_addcarryx_u32(&x183, &x184, 0x0, x135, x166); fiat_p256_addcarryx_u32(&x185, &x186, x184, x137, x168); fiat_p256_addcarryx_u32(&x187, &x188, x186, x139, x170); fiat_p256_addcarryx_u32(&x189, &x190, x188, x141, x172); fiat_p256_addcarryx_u32(&x191, &x192, x190, x143, x174); fiat_p256_addcarryx_u32(&x193, &x194, x192, x145, x176); fiat_p256_addcarryx_u32(&x195, &x196, x194, x147, x178); fiat_p256_addcarryx_u32(&x197, &x198, x196, x149, x180); fiat_p256_addcarryx_u32(&x199, &x200, x198, x151, x182); fiat_p256_mulx_u32(&x201, &x202, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x203, &x204, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x205, &x206, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x207, &x208, x183, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x209, &x210, 0x0, x208, x205); fiat_p256_addcarryx_u32(&x211, &x212, x210, x206, x203); x213 = (x212 + x204); fiat_p256_addcarryx_u32(&x214, &x215, 0x0, x183, x207); fiat_p256_addcarryx_u32(&x216, &x217, x215, x185, x209); fiat_p256_addcarryx_u32(&x218, &x219, x217, x187, x211); fiat_p256_addcarryx_u32(&x220, &x221, x219, x189, x213); fiat_p256_addcarryx_u32(&x222, &x223, x221, x191, 0x0); fiat_p256_addcarryx_u32(&x224, &x225, x223, x193, 0x0); fiat_p256_addcarryx_u32(&x226, &x227, x225, x195, x183); fiat_p256_addcarryx_u32(&x228, &x229, x227, x197, x201); fiat_p256_addcarryx_u32(&x230, &x231, x229, x199, x202); x232 = ((uint32_t)x231 + x200); fiat_p256_mulx_u32(&x233, &x234, x3, (arg2[7])); fiat_p256_mulx_u32(&x235, &x236, x3, (arg2[6])); fiat_p256_mulx_u32(&x237, &x238, x3, (arg2[5])); fiat_p256_mulx_u32(&x239, &x240, x3, (arg2[4])); fiat_p256_mulx_u32(&x241, &x242, x3, (arg2[3])); fiat_p256_mulx_u32(&x243, &x244, x3, (arg2[2])); fiat_p256_mulx_u32(&x245, &x246, x3, (arg2[1])); fiat_p256_mulx_u32(&x247, &x248, x3, (arg2[0])); fiat_p256_addcarryx_u32(&x249, &x250, 0x0, x248, x245); fiat_p256_addcarryx_u32(&x251, &x252, x250, x246, x243); fiat_p256_addcarryx_u32(&x253, &x254, x252, x244, x241); fiat_p256_addcarryx_u32(&x255, &x256, x254, x242, x239); fiat_p256_addcarryx_u32(&x257, &x258, x256, x240, x237); fiat_p256_addcarryx_u32(&x259, &x260, x258, x238, x235); fiat_p256_addcarryx_u32(&x261, &x262, x260, x236, x233); x263 = (x262 + x234); fiat_p256_addcarryx_u32(&x264, &x265, 0x0, x216, x247); fiat_p256_addcarryx_u32(&x266, &x267, x265, x218, x249); fiat_p256_addcarryx_u32(&x268, &x269, x267, x220, x251); fiat_p256_addcarryx_u32(&x270, &x271, x269, x222, x253); fiat_p256_addcarryx_u32(&x272, &x273, x271, x224, x255); fiat_p256_addcarryx_u32(&x274, &x275, x273, x226, x257); fiat_p256_addcarryx_u32(&x276, &x277, x275, x228, x259); fiat_p256_addcarryx_u32(&x278, &x279, x277, x230, x261); fiat_p256_addcarryx_u32(&x280, &x281, x279, x232, x263); fiat_p256_mulx_u32(&x282, &x283, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x284, &x285, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x286, &x287, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x288, &x289, x264, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x290, &x291, 0x0, x289, x286); fiat_p256_addcarryx_u32(&x292, &x293, x291, x287, x284); x294 = (x293 + x285); fiat_p256_addcarryx_u32(&x295, &x296, 0x0, x264, x288); fiat_p256_addcarryx_u32(&x297, &x298, x296, x266, x290); fiat_p256_addcarryx_u32(&x299, &x300, x298, x268, x292); fiat_p256_addcarryx_u32(&x301, &x302, x300, x270, x294); fiat_p256_addcarryx_u32(&x303, &x304, x302, x272, 0x0); fiat_p256_addcarryx_u32(&x305, &x306, x304, x274, 0x0); fiat_p256_addcarryx_u32(&x307, &x308, x306, x276, x264); fiat_p256_addcarryx_u32(&x309, &x310, x308, x278, x282); fiat_p256_addcarryx_u32(&x311, &x312, x310, x280, x283); x313 = ((uint32_t)x312 + x281); fiat_p256_mulx_u32(&x314, &x315, x4, (arg2[7])); fiat_p256_mulx_u32(&x316, &x317, x4, (arg2[6])); fiat_p256_mulx_u32(&x318, &x319, x4, (arg2[5])); fiat_p256_mulx_u32(&x320, &x321, x4, (arg2[4])); fiat_p256_mulx_u32(&x322, &x323, x4, (arg2[3])); fiat_p256_mulx_u32(&x324, &x325, x4, (arg2[2])); fiat_p256_mulx_u32(&x326, &x327, x4, (arg2[1])); fiat_p256_mulx_u32(&x328, &x329, x4, (arg2[0])); fiat_p256_addcarryx_u32(&x330, &x331, 0x0, x329, x326); fiat_p256_addcarryx_u32(&x332, &x333, x331, x327, x324); fiat_p256_addcarryx_u32(&x334, &x335, x333, x325, x322); fiat_p256_addcarryx_u32(&x336, &x337, x335, x323, x320); fiat_p256_addcarryx_u32(&x338, &x339, x337, x321, x318); fiat_p256_addcarryx_u32(&x340, &x341, x339, x319, x316); fiat_p256_addcarryx_u32(&x342, &x343, x341, x317, x314); x344 = (x343 + x315); fiat_p256_addcarryx_u32(&x345, &x346, 0x0, x297, x328); fiat_p256_addcarryx_u32(&x347, &x348, x346, x299, x330); fiat_p256_addcarryx_u32(&x349, &x350, x348, x301, x332); fiat_p256_addcarryx_u32(&x351, &x352, x350, x303, x334); fiat_p256_addcarryx_u32(&x353, &x354, x352, x305, x336); fiat_p256_addcarryx_u32(&x355, &x356, x354, x307, x338); fiat_p256_addcarryx_u32(&x357, &x358, x356, x309, x340); fiat_p256_addcarryx_u32(&x359, &x360, x358, x311, x342); fiat_p256_addcarryx_u32(&x361, &x362, x360, x313, x344); fiat_p256_mulx_u32(&x363, &x364, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x365, &x366, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x367, &x368, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x369, &x370, x345, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x371, &x372, 0x0, x370, x367); fiat_p256_addcarryx_u32(&x373, &x374, x372, x368, x365); x375 = (x374 + x366); fiat_p256_addcarryx_u32(&x376, &x377, 0x0, x345, x369); fiat_p256_addcarryx_u32(&x378, &x379, x377, x347, x371); fiat_p256_addcarryx_u32(&x380, &x381, x379, x349, x373); fiat_p256_addcarryx_u32(&x382, &x383, x381, x351, x375); fiat_p256_addcarryx_u32(&x384, &x385, x383, x353, 0x0); fiat_p256_addcarryx_u32(&x386, &x387, x385, x355, 0x0); fiat_p256_addcarryx_u32(&x388, &x389, x387, x357, x345); fiat_p256_addcarryx_u32(&x390, &x391, x389, x359, x363); fiat_p256_addcarryx_u32(&x392, &x393, x391, x361, x364); x394 = ((uint32_t)x393 + x362); fiat_p256_mulx_u32(&x395, &x396, x5, (arg2[7])); fiat_p256_mulx_u32(&x397, &x398, x5, (arg2[6])); fiat_p256_mulx_u32(&x399, &x400, x5, (arg2[5])); fiat_p256_mulx_u32(&x401, &x402, x5, (arg2[4])); fiat_p256_mulx_u32(&x403, &x404, x5, (arg2[3])); fiat_p256_mulx_u32(&x405, &x406, x5, (arg2[2])); fiat_p256_mulx_u32(&x407, &x408, x5, (arg2[1])); fiat_p256_mulx_u32(&x409, &x410, x5, (arg2[0])); fiat_p256_addcarryx_u32(&x411, &x412, 0x0, x410, x407); fiat_p256_addcarryx_u32(&x413, &x414, x412, x408, x405); fiat_p256_addcarryx_u32(&x415, &x416, x414, x406, x403); fiat_p256_addcarryx_u32(&x417, &x418, x416, x404, x401); fiat_p256_addcarryx_u32(&x419, &x420, x418, x402, x399); fiat_p256_addcarryx_u32(&x421, &x422, x420, x400, x397); fiat_p256_addcarryx_u32(&x423, &x424, x422, x398, x395); x425 = (x424 + x396); fiat_p256_addcarryx_u32(&x426, &x427, 0x0, x378, x409); fiat_p256_addcarryx_u32(&x428, &x429, x427, x380, x411); fiat_p256_addcarryx_u32(&x430, &x431, x429, x382, x413); fiat_p256_addcarryx_u32(&x432, &x433, x431, x384, x415); fiat_p256_addcarryx_u32(&x434, &x435, x433, x386, x417); fiat_p256_addcarryx_u32(&x436, &x437, x435, x388, x419); fiat_p256_addcarryx_u32(&x438, &x439, x437, x390, x421); fiat_p256_addcarryx_u32(&x440, &x441, x439, x392, x423); fiat_p256_addcarryx_u32(&x442, &x443, x441, x394, x425); fiat_p256_mulx_u32(&x444, &x445, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x446, &x447, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x448, &x449, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x450, &x451, x426, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x452, &x453, 0x0, x451, x448); fiat_p256_addcarryx_u32(&x454, &x455, x453, x449, x446); x456 = (x455 + x447); fiat_p256_addcarryx_u32(&x457, &x458, 0x0, x426, x450); fiat_p256_addcarryx_u32(&x459, &x460, x458, x428, x452); fiat_p256_addcarryx_u32(&x461, &x462, x460, x430, x454); fiat_p256_addcarryx_u32(&x463, &x464, x462, x432, x456); fiat_p256_addcarryx_u32(&x465, &x466, x464, x434, 0x0); fiat_p256_addcarryx_u32(&x467, &x468, x466, x436, 0x0); fiat_p256_addcarryx_u32(&x469, &x470, x468, x438, x426); fiat_p256_addcarryx_u32(&x471, &x472, x470, x440, x444); fiat_p256_addcarryx_u32(&x473, &x474, x472, x442, x445); x475 = ((uint32_t)x474 + x443); fiat_p256_mulx_u32(&x476, &x477, x6, (arg2[7])); fiat_p256_mulx_u32(&x478, &x479, x6, (arg2[6])); fiat_p256_mulx_u32(&x480, &x481, x6, (arg2[5])); fiat_p256_mulx_u32(&x482, &x483, x6, (arg2[4])); fiat_p256_mulx_u32(&x484, &x485, x6, (arg2[3])); fiat_p256_mulx_u32(&x486, &x487, x6, (arg2[2])); fiat_p256_mulx_u32(&x488, &x489, x6, (arg2[1])); fiat_p256_mulx_u32(&x490, &x491, x6, (arg2[0])); fiat_p256_addcarryx_u32(&x492, &x493, 0x0, x491, x488); fiat_p256_addcarryx_u32(&x494, &x495, x493, x489, x486); fiat_p256_addcarryx_u32(&x496, &x497, x495, x487, x484); fiat_p256_addcarryx_u32(&x498, &x499, x497, x485, x482); fiat_p256_addcarryx_u32(&x500, &x501, x499, x483, x480); fiat_p256_addcarryx_u32(&x502, &x503, x501, x481, x478); fiat_p256_addcarryx_u32(&x504, &x505, x503, x479, x476); x506 = (x505 + x477); fiat_p256_addcarryx_u32(&x507, &x508, 0x0, x459, x490); fiat_p256_addcarryx_u32(&x509, &x510, x508, x461, x492); fiat_p256_addcarryx_u32(&x511, &x512, x510, x463, x494); fiat_p256_addcarryx_u32(&x513, &x514, x512, x465, x496); fiat_p256_addcarryx_u32(&x515, &x516, x514, x467, x498); fiat_p256_addcarryx_u32(&x517, &x518, x516, x469, x500); fiat_p256_addcarryx_u32(&x519, &x520, x518, x471, x502); fiat_p256_addcarryx_u32(&x521, &x522, x520, x473, x504); fiat_p256_addcarryx_u32(&x523, &x524, x522, x475, x506); fiat_p256_mulx_u32(&x525, &x526, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x527, &x528, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x529, &x530, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x531, &x532, x507, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x533, &x534, 0x0, x532, x529); fiat_p256_addcarryx_u32(&x535, &x536, x534, x530, x527); x537 = (x536 + x528); fiat_p256_addcarryx_u32(&x538, &x539, 0x0, x507, x531); fiat_p256_addcarryx_u32(&x540, &x541, x539, x509, x533); fiat_p256_addcarryx_u32(&x542, &x543, x541, x511, x535); fiat_p256_addcarryx_u32(&x544, &x545, x543, x513, x537); fiat_p256_addcarryx_u32(&x546, &x547, x545, x515, 0x0); fiat_p256_addcarryx_u32(&x548, &x549, x547, x517, 0x0); fiat_p256_addcarryx_u32(&x550, &x551, x549, x519, x507); fiat_p256_addcarryx_u32(&x552, &x553, x551, x521, x525); fiat_p256_addcarryx_u32(&x554, &x555, x553, x523, x526); x556 = ((uint32_t)x555 + x524); fiat_p256_mulx_u32(&x557, &x558, x7, (arg2[7])); fiat_p256_mulx_u32(&x559, &x560, x7, (arg2[6])); fiat_p256_mulx_u32(&x561, &x562, x7, (arg2[5])); fiat_p256_mulx_u32(&x563, &x564, x7, (arg2[4])); fiat_p256_mulx_u32(&x565, &x566, x7, (arg2[3])); fiat_p256_mulx_u32(&x567, &x568, x7, (arg2[2])); fiat_p256_mulx_u32(&x569, &x570, x7, (arg2[1])); fiat_p256_mulx_u32(&x571, &x572, x7, (arg2[0])); fiat_p256_addcarryx_u32(&x573, &x574, 0x0, x572, x569); fiat_p256_addcarryx_u32(&x575, &x576, x574, x570, x567); fiat_p256_addcarryx_u32(&x577, &x578, x576, x568, x565); fiat_p256_addcarryx_u32(&x579, &x580, x578, x566, x563); fiat_p256_addcarryx_u32(&x581, &x582, x580, x564, x561); fiat_p256_addcarryx_u32(&x583, &x584, x582, x562, x559); fiat_p256_addcarryx_u32(&x585, &x586, x584, x560, x557); x587 = (x586 + x558); fiat_p256_addcarryx_u32(&x588, &x589, 0x0, x540, x571); fiat_p256_addcarryx_u32(&x590, &x591, x589, x542, x573); fiat_p256_addcarryx_u32(&x592, &x593, x591, x544, x575); fiat_p256_addcarryx_u32(&x594, &x595, x593, x546, x577); fiat_p256_addcarryx_u32(&x596, &x597, x595, x548, x579); fiat_p256_addcarryx_u32(&x598, &x599, x597, x550, x581); fiat_p256_addcarryx_u32(&x600, &x601, x599, x552, x583); fiat_p256_addcarryx_u32(&x602, &x603, x601, x554, x585); fiat_p256_addcarryx_u32(&x604, &x605, x603, x556, x587); fiat_p256_mulx_u32(&x606, &x607, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x608, &x609, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x610, &x611, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x612, &x613, x588, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x614, &x615, 0x0, x613, x610); fiat_p256_addcarryx_u32(&x616, &x617, x615, x611, x608); x618 = (x617 + x609); fiat_p256_addcarryx_u32(&x619, &x620, 0x0, x588, x612); fiat_p256_addcarryx_u32(&x621, &x622, x620, x590, x614); fiat_p256_addcarryx_u32(&x623, &x624, x622, x592, x616); fiat_p256_addcarryx_u32(&x625, &x626, x624, x594, x618); fiat_p256_addcarryx_u32(&x627, &x628, x626, x596, 0x0); fiat_p256_addcarryx_u32(&x629, &x630, x628, x598, 0x0); fiat_p256_addcarryx_u32(&x631, &x632, x630, x600, x588); fiat_p256_addcarryx_u32(&x633, &x634, x632, x602, x606); fiat_p256_addcarryx_u32(&x635, &x636, x634, x604, x607); x637 = ((uint32_t)x636 + x605); fiat_p256_subborrowx_u32(&x638, &x639, 0x0, x621, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x640, &x641, x639, x623, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x642, &x643, x641, x625, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x644, &x645, x643, x627, 0x0); fiat_p256_subborrowx_u32(&x646, &x647, x645, x629, 0x0); fiat_p256_subborrowx_u32(&x648, &x649, x647, x631, 0x0); fiat_p256_subborrowx_u32(&x650, &x651, x649, x633, 0x1); fiat_p256_subborrowx_u32(&x652, &x653, x651, x635, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x654, &x655, x653, x637, 0x0); fiat_p256_cmovznz_u32(&x656, x655, x638, x621); fiat_p256_cmovznz_u32(&x657, x655, x640, x623); fiat_p256_cmovznz_u32(&x658, x655, x642, x625); fiat_p256_cmovznz_u32(&x659, x655, x644, x627); fiat_p256_cmovznz_u32(&x660, x655, x646, x629); fiat_p256_cmovznz_u32(&x661, x655, x648, x631); fiat_p256_cmovznz_u32(&x662, x655, x650, x633); fiat_p256_cmovznz_u32(&x663, x655, x652, x635); out1[0] = x656; out1[1] = x657; out1[2] = x658; out1[3] = x659; out1[4] = x660; out1[5] = x661; out1[6] = x662; out1[7] = x663; } /* * The function fiat_p256_square squares a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_square(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint32_t x20; uint32_t x21; uint32_t x22; uint32_t x23; uint32_t x24; uint32_t x25; fiat_p256_uint1 x26; uint32_t x27; fiat_p256_uint1 x28; uint32_t x29; fiat_p256_uint1 x30; uint32_t x31; fiat_p256_uint1 x32; uint32_t x33; fiat_p256_uint1 x34; uint32_t x35; fiat_p256_uint1 x36; uint32_t x37; fiat_p256_uint1 x38; uint32_t x39; uint32_t x40; uint32_t x41; uint32_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint32_t x47; uint32_t x48; fiat_p256_uint1 x49; uint32_t x50; fiat_p256_uint1 x51; uint32_t x52; uint32_t x53; fiat_p256_uint1 x54; uint32_t x55; fiat_p256_uint1 x56; uint32_t x57; fiat_p256_uint1 x58; uint32_t x59; fiat_p256_uint1 x60; uint32_t x61; fiat_p256_uint1 x62; uint32_t x63; fiat_p256_uint1 x64; uint32_t x65; fiat_p256_uint1 x66; uint32_t x67; fiat_p256_uint1 x68; uint32_t x69; fiat_p256_uint1 x70; uint32_t x71; uint32_t x72; uint32_t x73; uint32_t x74; uint32_t x75; uint32_t x76; uint32_t x77; uint32_t x78; uint32_t x79; uint32_t x80; uint32_t x81; uint32_t x82; uint32_t x83; uint32_t x84; uint32_t x85; uint32_t x86; uint32_t x87; fiat_p256_uint1 x88; uint32_t x89; fiat_p256_uint1 x90; uint32_t x91; fiat_p256_uint1 x92; uint32_t x93; fiat_p256_uint1 x94; uint32_t x95; fiat_p256_uint1 x96; uint32_t x97; fiat_p256_uint1 x98; uint32_t x99; fiat_p256_uint1 x100; uint32_t x101; uint32_t x102; fiat_p256_uint1 x103; uint32_t x104; fiat_p256_uint1 x105; uint32_t x106; fiat_p256_uint1 x107; uint32_t x108; fiat_p256_uint1 x109; uint32_t x110; fiat_p256_uint1 x111; uint32_t x112; fiat_p256_uint1 x113; uint32_t x114; fiat_p256_uint1 x115; uint32_t x116; fiat_p256_uint1 x117; uint32_t x118; fiat_p256_uint1 x119; uint32_t x120; uint32_t x121; uint32_t x122; uint32_t x123; uint32_t x124; uint32_t x125; uint32_t x126; uint32_t x127; uint32_t x128; fiat_p256_uint1 x129; uint32_t x130; fiat_p256_uint1 x131; uint32_t x132; uint32_t x133; fiat_p256_uint1 x134; uint32_t x135; fiat_p256_uint1 x136; uint32_t x137; fiat_p256_uint1 x138; uint32_t x139; fiat_p256_uint1 x140; uint32_t x141; fiat_p256_uint1 x142; uint32_t x143; fiat_p256_uint1 x144; uint32_t x145; fiat_p256_uint1 x146; uint32_t x147; fiat_p256_uint1 x148; uint32_t x149; fiat_p256_uint1 x150; uint32_t x151; uint32_t x152; uint32_t x153; uint32_t x154; uint32_t x155; uint32_t x156; uint32_t x157; uint32_t x158; uint32_t x159; uint32_t x160; uint32_t x161; uint32_t x162; uint32_t x163; uint32_t x164; uint32_t x165; uint32_t x166; uint32_t x167; uint32_t x168; fiat_p256_uint1 x169; uint32_t x170; fiat_p256_uint1 x171; uint32_t x172; fiat_p256_uint1 x173; uint32_t x174; fiat_p256_uint1 x175; uint32_t x176; fiat_p256_uint1 x177; uint32_t x178; fiat_p256_uint1 x179; uint32_t x180; fiat_p256_uint1 x181; uint32_t x182; uint32_t x183; fiat_p256_uint1 x184; uint32_t x185; fiat_p256_uint1 x186; uint32_t x187; fiat_p256_uint1 x188; uint32_t x189; fiat_p256_uint1 x190; uint32_t x191; fiat_p256_uint1 x192; uint32_t x193; fiat_p256_uint1 x194; uint32_t x195; fiat_p256_uint1 x196; uint32_t x197; fiat_p256_uint1 x198; uint32_t x199; fiat_p256_uint1 x200; uint32_t x201; uint32_t x202; uint32_t x203; uint32_t x204; uint32_t x205; uint32_t x206; uint32_t x207; uint32_t x208; uint32_t x209; fiat_p256_uint1 x210; uint32_t x211; fiat_p256_uint1 x212; uint32_t x213; uint32_t x214; fiat_p256_uint1 x215; uint32_t x216; fiat_p256_uint1 x217; uint32_t x218; fiat_p256_uint1 x219; uint32_t x220; fiat_p256_uint1 x221; uint32_t x222; fiat_p256_uint1 x223; uint32_t x224; fiat_p256_uint1 x225; uint32_t x226; fiat_p256_uint1 x227; uint32_t x228; fiat_p256_uint1 x229; uint32_t x230; fiat_p256_uint1 x231; uint32_t x232; uint32_t x233; uint32_t x234; uint32_t x235; uint32_t x236; uint32_t x237; uint32_t x238; uint32_t x239; uint32_t x240; uint32_t x241; uint32_t x242; uint32_t x243; uint32_t x244; uint32_t x245; uint32_t x246; uint32_t x247; uint32_t x248; uint32_t x249; fiat_p256_uint1 x250; uint32_t x251; fiat_p256_uint1 x252; uint32_t x253; fiat_p256_uint1 x254; uint32_t x255; fiat_p256_uint1 x256; uint32_t x257; fiat_p256_uint1 x258; uint32_t x259; fiat_p256_uint1 x260; uint32_t x261; fiat_p256_uint1 x262; uint32_t x263; uint32_t x264; fiat_p256_uint1 x265; uint32_t x266; fiat_p256_uint1 x267; uint32_t x268; fiat_p256_uint1 x269; uint32_t x270; fiat_p256_uint1 x271; uint32_t x272; fiat_p256_uint1 x273; uint32_t x274; fiat_p256_uint1 x275; uint32_t x276; fiat_p256_uint1 x277; uint32_t x278; fiat_p256_uint1 x279; uint32_t x280; fiat_p256_uint1 x281; uint32_t x282; uint32_t x283; uint32_t x284; uint32_t x285; uint32_t x286; uint32_t x287; uint32_t x288; uint32_t x289; uint32_t x290; fiat_p256_uint1 x291; uint32_t x292; fiat_p256_uint1 x293; uint32_t x294; uint32_t x295; fiat_p256_uint1 x296; uint32_t x297; fiat_p256_uint1 x298; uint32_t x299; fiat_p256_uint1 x300; uint32_t x301; fiat_p256_uint1 x302; uint32_t x303; fiat_p256_uint1 x304; uint32_t x305; fiat_p256_uint1 x306; uint32_t x307; fiat_p256_uint1 x308; uint32_t x309; fiat_p256_uint1 x310; uint32_t x311; fiat_p256_uint1 x312; uint32_t x313; uint32_t x314; uint32_t x315; uint32_t x316; uint32_t x317; uint32_t x318; uint32_t x319; uint32_t x320; uint32_t x321; uint32_t x322; uint32_t x323; uint32_t x324; uint32_t x325; uint32_t x326; uint32_t x327; uint32_t x328; uint32_t x329; uint32_t x330; fiat_p256_uint1 x331; uint32_t x332; fiat_p256_uint1 x333; uint32_t x334; fiat_p256_uint1 x335; uint32_t x336; fiat_p256_uint1 x337; uint32_t x338; fiat_p256_uint1 x339; uint32_t x340; fiat_p256_uint1 x341; uint32_t x342; fiat_p256_uint1 x343; uint32_t x344; uint32_t x345; fiat_p256_uint1 x346; uint32_t x347; fiat_p256_uint1 x348; uint32_t x349; fiat_p256_uint1 x350; uint32_t x351; fiat_p256_uint1 x352; uint32_t x353; fiat_p256_uint1 x354; uint32_t x355; fiat_p256_uint1 x356; uint32_t x357; fiat_p256_uint1 x358; uint32_t x359; fiat_p256_uint1 x360; uint32_t x361; fiat_p256_uint1 x362; uint32_t x363; uint32_t x364; uint32_t x365; uint32_t x366; uint32_t x367; uint32_t x368; uint32_t x369; uint32_t x370; uint32_t x371; fiat_p256_uint1 x372; uint32_t x373; fiat_p256_uint1 x374; uint32_t x375; uint32_t x376; fiat_p256_uint1 x377; uint32_t x378; fiat_p256_uint1 x379; uint32_t x380; fiat_p256_uint1 x381; uint32_t x382; fiat_p256_uint1 x383; uint32_t x384; fiat_p256_uint1 x385; uint32_t x386; fiat_p256_uint1 x387; uint32_t x388; fiat_p256_uint1 x389; uint32_t x390; fiat_p256_uint1 x391; uint32_t x392; fiat_p256_uint1 x393; uint32_t x394; uint32_t x395; uint32_t x396; uint32_t x397; uint32_t x398; uint32_t x399; uint32_t x400; uint32_t x401; uint32_t x402; uint32_t x403; uint32_t x404; uint32_t x405; uint32_t x406; uint32_t x407; uint32_t x408; uint32_t x409; uint32_t x410; uint32_t x411; fiat_p256_uint1 x412; uint32_t x413; fiat_p256_uint1 x414; uint32_t x415; fiat_p256_uint1 x416; uint32_t x417; fiat_p256_uint1 x418; uint32_t x419; fiat_p256_uint1 x420; uint32_t x421; fiat_p256_uint1 x422; uint32_t x423; fiat_p256_uint1 x424; uint32_t x425; uint32_t x426; fiat_p256_uint1 x427; uint32_t x428; fiat_p256_uint1 x429; uint32_t x430; fiat_p256_uint1 x431; uint32_t x432; fiat_p256_uint1 x433; uint32_t x434; fiat_p256_uint1 x435; uint32_t x436; fiat_p256_uint1 x437; uint32_t x438; fiat_p256_uint1 x439; uint32_t x440; fiat_p256_uint1 x441; uint32_t x442; fiat_p256_uint1 x443; uint32_t x444; uint32_t x445; uint32_t x446; uint32_t x447; uint32_t x448; uint32_t x449; uint32_t x450; uint32_t x451; uint32_t x452; fiat_p256_uint1 x453; uint32_t x454; fiat_p256_uint1 x455; uint32_t x456; uint32_t x457; fiat_p256_uint1 x458; uint32_t x459; fiat_p256_uint1 x460; uint32_t x461; fiat_p256_uint1 x462; uint32_t x463; fiat_p256_uint1 x464; uint32_t x465; fiat_p256_uint1 x466; uint32_t x467; fiat_p256_uint1 x468; uint32_t x469; fiat_p256_uint1 x470; uint32_t x471; fiat_p256_uint1 x472; uint32_t x473; fiat_p256_uint1 x474; uint32_t x475; uint32_t x476; uint32_t x477; uint32_t x478; uint32_t x479; uint32_t x480; uint32_t x481; uint32_t x482; uint32_t x483; uint32_t x484; uint32_t x485; uint32_t x486; uint32_t x487; uint32_t x488; uint32_t x489; uint32_t x490; uint32_t x491; uint32_t x492; fiat_p256_uint1 x493; uint32_t x494; fiat_p256_uint1 x495; uint32_t x496; fiat_p256_uint1 x497; uint32_t x498; fiat_p256_uint1 x499; uint32_t x500; fiat_p256_uint1 x501; uint32_t x502; fiat_p256_uint1 x503; uint32_t x504; fiat_p256_uint1 x505; uint32_t x506; uint32_t x507; fiat_p256_uint1 x508; uint32_t x509; fiat_p256_uint1 x510; uint32_t x511; fiat_p256_uint1 x512; uint32_t x513; fiat_p256_uint1 x514; uint32_t x515; fiat_p256_uint1 x516; uint32_t x517; fiat_p256_uint1 x518; uint32_t x519; fiat_p256_uint1 x520; uint32_t x521; fiat_p256_uint1 x522; uint32_t x523; fiat_p256_uint1 x524; uint32_t x525; uint32_t x526; uint32_t x527; uint32_t x528; uint32_t x529; uint32_t x530; uint32_t x531; uint32_t x532; uint32_t x533; fiat_p256_uint1 x534; uint32_t x535; fiat_p256_uint1 x536; uint32_t x537; uint32_t x538; fiat_p256_uint1 x539; uint32_t x540; fiat_p256_uint1 x541; uint32_t x542; fiat_p256_uint1 x543; uint32_t x544; fiat_p256_uint1 x545; uint32_t x546; fiat_p256_uint1 x547; uint32_t x548; fiat_p256_uint1 x549; uint32_t x550; fiat_p256_uint1 x551; uint32_t x552; fiat_p256_uint1 x553; uint32_t x554; fiat_p256_uint1 x555; uint32_t x556; uint32_t x557; uint32_t x558; uint32_t x559; uint32_t x560; uint32_t x561; uint32_t x562; uint32_t x563; uint32_t x564; uint32_t x565; uint32_t x566; uint32_t x567; uint32_t x568; uint32_t x569; uint32_t x570; uint32_t x571; uint32_t x572; uint32_t x573; fiat_p256_uint1 x574; uint32_t x575; fiat_p256_uint1 x576; uint32_t x577; fiat_p256_uint1 x578; uint32_t x579; fiat_p256_uint1 x580; uint32_t x581; fiat_p256_uint1 x582; uint32_t x583; fiat_p256_uint1 x584; uint32_t x585; fiat_p256_uint1 x586; uint32_t x587; uint32_t x588; fiat_p256_uint1 x589; uint32_t x590; fiat_p256_uint1 x591; uint32_t x592; fiat_p256_uint1 x593; uint32_t x594; fiat_p256_uint1 x595; uint32_t x596; fiat_p256_uint1 x597; uint32_t x598; fiat_p256_uint1 x599; uint32_t x600; fiat_p256_uint1 x601; uint32_t x602; fiat_p256_uint1 x603; uint32_t x604; fiat_p256_uint1 x605; uint32_t x606; uint32_t x607; uint32_t x608; uint32_t x609; uint32_t x610; uint32_t x611; uint32_t x612; uint32_t x613; uint32_t x614; fiat_p256_uint1 x615; uint32_t x616; fiat_p256_uint1 x617; uint32_t x618; uint32_t x619; fiat_p256_uint1 x620; uint32_t x621; fiat_p256_uint1 x622; uint32_t x623; fiat_p256_uint1 x624; uint32_t x625; fiat_p256_uint1 x626; uint32_t x627; fiat_p256_uint1 x628; uint32_t x629; fiat_p256_uint1 x630; uint32_t x631; fiat_p256_uint1 x632; uint32_t x633; fiat_p256_uint1 x634; uint32_t x635; fiat_p256_uint1 x636; uint32_t x637; uint32_t x638; fiat_p256_uint1 x639; uint32_t x640; fiat_p256_uint1 x641; uint32_t x642; fiat_p256_uint1 x643; uint32_t x644; fiat_p256_uint1 x645; uint32_t x646; fiat_p256_uint1 x647; uint32_t x648; fiat_p256_uint1 x649; uint32_t x650; fiat_p256_uint1 x651; uint32_t x652; fiat_p256_uint1 x653; uint32_t x654; fiat_p256_uint1 x655; uint32_t x656; uint32_t x657; uint32_t x658; uint32_t x659; uint32_t x660; uint32_t x661; uint32_t x662; uint32_t x663; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[4]); x5 = (arg1[5]); x6 = (arg1[6]); x7 = (arg1[7]); x8 = (arg1[0]); fiat_p256_mulx_u32(&x9, &x10, x8, (arg1[7])); fiat_p256_mulx_u32(&x11, &x12, x8, (arg1[6])); fiat_p256_mulx_u32(&x13, &x14, x8, (arg1[5])); fiat_p256_mulx_u32(&x15, &x16, x8, (arg1[4])); fiat_p256_mulx_u32(&x17, &x18, x8, (arg1[3])); fiat_p256_mulx_u32(&x19, &x20, x8, (arg1[2])); fiat_p256_mulx_u32(&x21, &x22, x8, (arg1[1])); fiat_p256_mulx_u32(&x23, &x24, x8, (arg1[0])); fiat_p256_addcarryx_u32(&x25, &x26, 0x0, x24, x21); fiat_p256_addcarryx_u32(&x27, &x28, x26, x22, x19); fiat_p256_addcarryx_u32(&x29, &x30, x28, x20, x17); fiat_p256_addcarryx_u32(&x31, &x32, x30, x18, x15); fiat_p256_addcarryx_u32(&x33, &x34, x32, x16, x13); fiat_p256_addcarryx_u32(&x35, &x36, x34, x14, x11); fiat_p256_addcarryx_u32(&x37, &x38, x36, x12, x9); x39 = (x38 + x10); fiat_p256_mulx_u32(&x40, &x41, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x42, &x43, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x44, &x45, x23, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x46, &x47, x23, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x48, &x49, 0x0, x47, x44); fiat_p256_addcarryx_u32(&x50, &x51, x49, x45, x42); x52 = (x51 + x43); fiat_p256_addcarryx_u32(&x53, &x54, 0x0, x23, x46); fiat_p256_addcarryx_u32(&x55, &x56, x54, x25, x48); fiat_p256_addcarryx_u32(&x57, &x58, x56, x27, x50); fiat_p256_addcarryx_u32(&x59, &x60, x58, x29, x52); fiat_p256_addcarryx_u32(&x61, &x62, x60, x31, 0x0); fiat_p256_addcarryx_u32(&x63, &x64, x62, x33, 0x0); fiat_p256_addcarryx_u32(&x65, &x66, x64, x35, x23); fiat_p256_addcarryx_u32(&x67, &x68, x66, x37, x40); fiat_p256_addcarryx_u32(&x69, &x70, x68, x39, x41); fiat_p256_mulx_u32(&x71, &x72, x1, (arg1[7])); fiat_p256_mulx_u32(&x73, &x74, x1, (arg1[6])); fiat_p256_mulx_u32(&x75, &x76, x1, (arg1[5])); fiat_p256_mulx_u32(&x77, &x78, x1, (arg1[4])); fiat_p256_mulx_u32(&x79, &x80, x1, (arg1[3])); fiat_p256_mulx_u32(&x81, &x82, x1, (arg1[2])); fiat_p256_mulx_u32(&x83, &x84, x1, (arg1[1])); fiat_p256_mulx_u32(&x85, &x86, x1, (arg1[0])); fiat_p256_addcarryx_u32(&x87, &x88, 0x0, x86, x83); fiat_p256_addcarryx_u32(&x89, &x90, x88, x84, x81); fiat_p256_addcarryx_u32(&x91, &x92, x90, x82, x79); fiat_p256_addcarryx_u32(&x93, &x94, x92, x80, x77); fiat_p256_addcarryx_u32(&x95, &x96, x94, x78, x75); fiat_p256_addcarryx_u32(&x97, &x98, x96, x76, x73); fiat_p256_addcarryx_u32(&x99, &x100, x98, x74, x71); x101 = (x100 + x72); fiat_p256_addcarryx_u32(&x102, &x103, 0x0, x55, x85); fiat_p256_addcarryx_u32(&x104, &x105, x103, x57, x87); fiat_p256_addcarryx_u32(&x106, &x107, x105, x59, x89); fiat_p256_addcarryx_u32(&x108, &x109, x107, x61, x91); fiat_p256_addcarryx_u32(&x110, &x111, x109, x63, x93); fiat_p256_addcarryx_u32(&x112, &x113, x111, x65, x95); fiat_p256_addcarryx_u32(&x114, &x115, x113, x67, x97); fiat_p256_addcarryx_u32(&x116, &x117, x115, x69, x99); fiat_p256_addcarryx_u32(&x118, &x119, x117, x70, x101); fiat_p256_mulx_u32(&x120, &x121, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x122, &x123, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x124, &x125, x102, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x126, &x127, x102, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x128, &x129, 0x0, x127, x124); fiat_p256_addcarryx_u32(&x130, &x131, x129, x125, x122); x132 = (x131 + x123); fiat_p256_addcarryx_u32(&x133, &x134, 0x0, x102, x126); fiat_p256_addcarryx_u32(&x135, &x136, x134, x104, x128); fiat_p256_addcarryx_u32(&x137, &x138, x136, x106, x130); fiat_p256_addcarryx_u32(&x139, &x140, x138, x108, x132); fiat_p256_addcarryx_u32(&x141, &x142, x140, x110, 0x0); fiat_p256_addcarryx_u32(&x143, &x144, x142, x112, 0x0); fiat_p256_addcarryx_u32(&x145, &x146, x144, x114, x102); fiat_p256_addcarryx_u32(&x147, &x148, x146, x116, x120); fiat_p256_addcarryx_u32(&x149, &x150, x148, x118, x121); x151 = ((uint32_t)x150 + x119); fiat_p256_mulx_u32(&x152, &x153, x2, (arg1[7])); fiat_p256_mulx_u32(&x154, &x155, x2, (arg1[6])); fiat_p256_mulx_u32(&x156, &x157, x2, (arg1[5])); fiat_p256_mulx_u32(&x158, &x159, x2, (arg1[4])); fiat_p256_mulx_u32(&x160, &x161, x2, (arg1[3])); fiat_p256_mulx_u32(&x162, &x163, x2, (arg1[2])); fiat_p256_mulx_u32(&x164, &x165, x2, (arg1[1])); fiat_p256_mulx_u32(&x166, &x167, x2, (arg1[0])); fiat_p256_addcarryx_u32(&x168, &x169, 0x0, x167, x164); fiat_p256_addcarryx_u32(&x170, &x171, x169, x165, x162); fiat_p256_addcarryx_u32(&x172, &x173, x171, x163, x160); fiat_p256_addcarryx_u32(&x174, &x175, x173, x161, x158); fiat_p256_addcarryx_u32(&x176, &x177, x175, x159, x156); fiat_p256_addcarryx_u32(&x178, &x179, x177, x157, x154); fiat_p256_addcarryx_u32(&x180, &x181, x179, x155, x152); x182 = (x181 + x153); fiat_p256_addcarryx_u32(&x183, &x184, 0x0, x135, x166); fiat_p256_addcarryx_u32(&x185, &x186, x184, x137, x168); fiat_p256_addcarryx_u32(&x187, &x188, x186, x139, x170); fiat_p256_addcarryx_u32(&x189, &x190, x188, x141, x172); fiat_p256_addcarryx_u32(&x191, &x192, x190, x143, x174); fiat_p256_addcarryx_u32(&x193, &x194, x192, x145, x176); fiat_p256_addcarryx_u32(&x195, &x196, x194, x147, x178); fiat_p256_addcarryx_u32(&x197, &x198, x196, x149, x180); fiat_p256_addcarryx_u32(&x199, &x200, x198, x151, x182); fiat_p256_mulx_u32(&x201, &x202, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x203, &x204, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x205, &x206, x183, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x207, &x208, x183, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x209, &x210, 0x0, x208, x205); fiat_p256_addcarryx_u32(&x211, &x212, x210, x206, x203); x213 = (x212 + x204); fiat_p256_addcarryx_u32(&x214, &x215, 0x0, x183, x207); fiat_p256_addcarryx_u32(&x216, &x217, x215, x185, x209); fiat_p256_addcarryx_u32(&x218, &x219, x217, x187, x211); fiat_p256_addcarryx_u32(&x220, &x221, x219, x189, x213); fiat_p256_addcarryx_u32(&x222, &x223, x221, x191, 0x0); fiat_p256_addcarryx_u32(&x224, &x225, x223, x193, 0x0); fiat_p256_addcarryx_u32(&x226, &x227, x225, x195, x183); fiat_p256_addcarryx_u32(&x228, &x229, x227, x197, x201); fiat_p256_addcarryx_u32(&x230, &x231, x229, x199, x202); x232 = ((uint32_t)x231 + x200); fiat_p256_mulx_u32(&x233, &x234, x3, (arg1[7])); fiat_p256_mulx_u32(&x235, &x236, x3, (arg1[6])); fiat_p256_mulx_u32(&x237, &x238, x3, (arg1[5])); fiat_p256_mulx_u32(&x239, &x240, x3, (arg1[4])); fiat_p256_mulx_u32(&x241, &x242, x3, (arg1[3])); fiat_p256_mulx_u32(&x243, &x244, x3, (arg1[2])); fiat_p256_mulx_u32(&x245, &x246, x3, (arg1[1])); fiat_p256_mulx_u32(&x247, &x248, x3, (arg1[0])); fiat_p256_addcarryx_u32(&x249, &x250, 0x0, x248, x245); fiat_p256_addcarryx_u32(&x251, &x252, x250, x246, x243); fiat_p256_addcarryx_u32(&x253, &x254, x252, x244, x241); fiat_p256_addcarryx_u32(&x255, &x256, x254, x242, x239); fiat_p256_addcarryx_u32(&x257, &x258, x256, x240, x237); fiat_p256_addcarryx_u32(&x259, &x260, x258, x238, x235); fiat_p256_addcarryx_u32(&x261, &x262, x260, x236, x233); x263 = (x262 + x234); fiat_p256_addcarryx_u32(&x264, &x265, 0x0, x216, x247); fiat_p256_addcarryx_u32(&x266, &x267, x265, x218, x249); fiat_p256_addcarryx_u32(&x268, &x269, x267, x220, x251); fiat_p256_addcarryx_u32(&x270, &x271, x269, x222, x253); fiat_p256_addcarryx_u32(&x272, &x273, x271, x224, x255); fiat_p256_addcarryx_u32(&x274, &x275, x273, x226, x257); fiat_p256_addcarryx_u32(&x276, &x277, x275, x228, x259); fiat_p256_addcarryx_u32(&x278, &x279, x277, x230, x261); fiat_p256_addcarryx_u32(&x280, &x281, x279, x232, x263); fiat_p256_mulx_u32(&x282, &x283, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x284, &x285, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x286, &x287, x264, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x288, &x289, x264, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x290, &x291, 0x0, x289, x286); fiat_p256_addcarryx_u32(&x292, &x293, x291, x287, x284); x294 = (x293 + x285); fiat_p256_addcarryx_u32(&x295, &x296, 0x0, x264, x288); fiat_p256_addcarryx_u32(&x297, &x298, x296, x266, x290); fiat_p256_addcarryx_u32(&x299, &x300, x298, x268, x292); fiat_p256_addcarryx_u32(&x301, &x302, x300, x270, x294); fiat_p256_addcarryx_u32(&x303, &x304, x302, x272, 0x0); fiat_p256_addcarryx_u32(&x305, &x306, x304, x274, 0x0); fiat_p256_addcarryx_u32(&x307, &x308, x306, x276, x264); fiat_p256_addcarryx_u32(&x309, &x310, x308, x278, x282); fiat_p256_addcarryx_u32(&x311, &x312, x310, x280, x283); x313 = ((uint32_t)x312 + x281); fiat_p256_mulx_u32(&x314, &x315, x4, (arg1[7])); fiat_p256_mulx_u32(&x316, &x317, x4, (arg1[6])); fiat_p256_mulx_u32(&x318, &x319, x4, (arg1[5])); fiat_p256_mulx_u32(&x320, &x321, x4, (arg1[4])); fiat_p256_mulx_u32(&x322, &x323, x4, (arg1[3])); fiat_p256_mulx_u32(&x324, &x325, x4, (arg1[2])); fiat_p256_mulx_u32(&x326, &x327, x4, (arg1[1])); fiat_p256_mulx_u32(&x328, &x329, x4, (arg1[0])); fiat_p256_addcarryx_u32(&x330, &x331, 0x0, x329, x326); fiat_p256_addcarryx_u32(&x332, &x333, x331, x327, x324); fiat_p256_addcarryx_u32(&x334, &x335, x333, x325, x322); fiat_p256_addcarryx_u32(&x336, &x337, x335, x323, x320); fiat_p256_addcarryx_u32(&x338, &x339, x337, x321, x318); fiat_p256_addcarryx_u32(&x340, &x341, x339, x319, x316); fiat_p256_addcarryx_u32(&x342, &x343, x341, x317, x314); x344 = (x343 + x315); fiat_p256_addcarryx_u32(&x345, &x346, 0x0, x297, x328); fiat_p256_addcarryx_u32(&x347, &x348, x346, x299, x330); fiat_p256_addcarryx_u32(&x349, &x350, x348, x301, x332); fiat_p256_addcarryx_u32(&x351, &x352, x350, x303, x334); fiat_p256_addcarryx_u32(&x353, &x354, x352, x305, x336); fiat_p256_addcarryx_u32(&x355, &x356, x354, x307, x338); fiat_p256_addcarryx_u32(&x357, &x358, x356, x309, x340); fiat_p256_addcarryx_u32(&x359, &x360, x358, x311, x342); fiat_p256_addcarryx_u32(&x361, &x362, x360, x313, x344); fiat_p256_mulx_u32(&x363, &x364, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x365, &x366, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x367, &x368, x345, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x369, &x370, x345, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x371, &x372, 0x0, x370, x367); fiat_p256_addcarryx_u32(&x373, &x374, x372, x368, x365); x375 = (x374 + x366); fiat_p256_addcarryx_u32(&x376, &x377, 0x0, x345, x369); fiat_p256_addcarryx_u32(&x378, &x379, x377, x347, x371); fiat_p256_addcarryx_u32(&x380, &x381, x379, x349, x373); fiat_p256_addcarryx_u32(&x382, &x383, x381, x351, x375); fiat_p256_addcarryx_u32(&x384, &x385, x383, x353, 0x0); fiat_p256_addcarryx_u32(&x386, &x387, x385, x355, 0x0); fiat_p256_addcarryx_u32(&x388, &x389, x387, x357, x345); fiat_p256_addcarryx_u32(&x390, &x391, x389, x359, x363); fiat_p256_addcarryx_u32(&x392, &x393, x391, x361, x364); x394 = ((uint32_t)x393 + x362); fiat_p256_mulx_u32(&x395, &x396, x5, (arg1[7])); fiat_p256_mulx_u32(&x397, &x398, x5, (arg1[6])); fiat_p256_mulx_u32(&x399, &x400, x5, (arg1[5])); fiat_p256_mulx_u32(&x401, &x402, x5, (arg1[4])); fiat_p256_mulx_u32(&x403, &x404, x5, (arg1[3])); fiat_p256_mulx_u32(&x405, &x406, x5, (arg1[2])); fiat_p256_mulx_u32(&x407, &x408, x5, (arg1[1])); fiat_p256_mulx_u32(&x409, &x410, x5, (arg1[0])); fiat_p256_addcarryx_u32(&x411, &x412, 0x0, x410, x407); fiat_p256_addcarryx_u32(&x413, &x414, x412, x408, x405); fiat_p256_addcarryx_u32(&x415, &x416, x414, x406, x403); fiat_p256_addcarryx_u32(&x417, &x418, x416, x404, x401); fiat_p256_addcarryx_u32(&x419, &x420, x418, x402, x399); fiat_p256_addcarryx_u32(&x421, &x422, x420, x400, x397); fiat_p256_addcarryx_u32(&x423, &x424, x422, x398, x395); x425 = (x424 + x396); fiat_p256_addcarryx_u32(&x426, &x427, 0x0, x378, x409); fiat_p256_addcarryx_u32(&x428, &x429, x427, x380, x411); fiat_p256_addcarryx_u32(&x430, &x431, x429, x382, x413); fiat_p256_addcarryx_u32(&x432, &x433, x431, x384, x415); fiat_p256_addcarryx_u32(&x434, &x435, x433, x386, x417); fiat_p256_addcarryx_u32(&x436, &x437, x435, x388, x419); fiat_p256_addcarryx_u32(&x438, &x439, x437, x390, x421); fiat_p256_addcarryx_u32(&x440, &x441, x439, x392, x423); fiat_p256_addcarryx_u32(&x442, &x443, x441, x394, x425); fiat_p256_mulx_u32(&x444, &x445, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x446, &x447, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x448, &x449, x426, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x450, &x451, x426, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x452, &x453, 0x0, x451, x448); fiat_p256_addcarryx_u32(&x454, &x455, x453, x449, x446); x456 = (x455 + x447); fiat_p256_addcarryx_u32(&x457, &x458, 0x0, x426, x450); fiat_p256_addcarryx_u32(&x459, &x460, x458, x428, x452); fiat_p256_addcarryx_u32(&x461, &x462, x460, x430, x454); fiat_p256_addcarryx_u32(&x463, &x464, x462, x432, x456); fiat_p256_addcarryx_u32(&x465, &x466, x464, x434, 0x0); fiat_p256_addcarryx_u32(&x467, &x468, x466, x436, 0x0); fiat_p256_addcarryx_u32(&x469, &x470, x468, x438, x426); fiat_p256_addcarryx_u32(&x471, &x472, x470, x440, x444); fiat_p256_addcarryx_u32(&x473, &x474, x472, x442, x445); x475 = ((uint32_t)x474 + x443); fiat_p256_mulx_u32(&x476, &x477, x6, (arg1[7])); fiat_p256_mulx_u32(&x478, &x479, x6, (arg1[6])); fiat_p256_mulx_u32(&x480, &x481, x6, (arg1[5])); fiat_p256_mulx_u32(&x482, &x483, x6, (arg1[4])); fiat_p256_mulx_u32(&x484, &x485, x6, (arg1[3])); fiat_p256_mulx_u32(&x486, &x487, x6, (arg1[2])); fiat_p256_mulx_u32(&x488, &x489, x6, (arg1[1])); fiat_p256_mulx_u32(&x490, &x491, x6, (arg1[0])); fiat_p256_addcarryx_u32(&x492, &x493, 0x0, x491, x488); fiat_p256_addcarryx_u32(&x494, &x495, x493, x489, x486); fiat_p256_addcarryx_u32(&x496, &x497, x495, x487, x484); fiat_p256_addcarryx_u32(&x498, &x499, x497, x485, x482); fiat_p256_addcarryx_u32(&x500, &x501, x499, x483, x480); fiat_p256_addcarryx_u32(&x502, &x503, x501, x481, x478); fiat_p256_addcarryx_u32(&x504, &x505, x503, x479, x476); x506 = (x505 + x477); fiat_p256_addcarryx_u32(&x507, &x508, 0x0, x459, x490); fiat_p256_addcarryx_u32(&x509, &x510, x508, x461, x492); fiat_p256_addcarryx_u32(&x511, &x512, x510, x463, x494); fiat_p256_addcarryx_u32(&x513, &x514, x512, x465, x496); fiat_p256_addcarryx_u32(&x515, &x516, x514, x467, x498); fiat_p256_addcarryx_u32(&x517, &x518, x516, x469, x500); fiat_p256_addcarryx_u32(&x519, &x520, x518, x471, x502); fiat_p256_addcarryx_u32(&x521, &x522, x520, x473, x504); fiat_p256_addcarryx_u32(&x523, &x524, x522, x475, x506); fiat_p256_mulx_u32(&x525, &x526, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x527, &x528, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x529, &x530, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x531, &x532, x507, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x533, &x534, 0x0, x532, x529); fiat_p256_addcarryx_u32(&x535, &x536, x534, x530, x527); x537 = (x536 + x528); fiat_p256_addcarryx_u32(&x538, &x539, 0x0, x507, x531); fiat_p256_addcarryx_u32(&x540, &x541, x539, x509, x533); fiat_p256_addcarryx_u32(&x542, &x543, x541, x511, x535); fiat_p256_addcarryx_u32(&x544, &x545, x543, x513, x537); fiat_p256_addcarryx_u32(&x546, &x547, x545, x515, 0x0); fiat_p256_addcarryx_u32(&x548, &x549, x547, x517, 0x0); fiat_p256_addcarryx_u32(&x550, &x551, x549, x519, x507); fiat_p256_addcarryx_u32(&x552, &x553, x551, x521, x525); fiat_p256_addcarryx_u32(&x554, &x555, x553, x523, x526); x556 = ((uint32_t)x555 + x524); fiat_p256_mulx_u32(&x557, &x558, x7, (arg1[7])); fiat_p256_mulx_u32(&x559, &x560, x7, (arg1[6])); fiat_p256_mulx_u32(&x561, &x562, x7, (arg1[5])); fiat_p256_mulx_u32(&x563, &x564, x7, (arg1[4])); fiat_p256_mulx_u32(&x565, &x566, x7, (arg1[3])); fiat_p256_mulx_u32(&x567, &x568, x7, (arg1[2])); fiat_p256_mulx_u32(&x569, &x570, x7, (arg1[1])); fiat_p256_mulx_u32(&x571, &x572, x7, (arg1[0])); fiat_p256_addcarryx_u32(&x573, &x574, 0x0, x572, x569); fiat_p256_addcarryx_u32(&x575, &x576, x574, x570, x567); fiat_p256_addcarryx_u32(&x577, &x578, x576, x568, x565); fiat_p256_addcarryx_u32(&x579, &x580, x578, x566, x563); fiat_p256_addcarryx_u32(&x581, &x582, x580, x564, x561); fiat_p256_addcarryx_u32(&x583, &x584, x582, x562, x559); fiat_p256_addcarryx_u32(&x585, &x586, x584, x560, x557); x587 = (x586 + x558); fiat_p256_addcarryx_u32(&x588, &x589, 0x0, x540, x571); fiat_p256_addcarryx_u32(&x590, &x591, x589, x542, x573); fiat_p256_addcarryx_u32(&x592, &x593, x591, x544, x575); fiat_p256_addcarryx_u32(&x594, &x595, x593, x546, x577); fiat_p256_addcarryx_u32(&x596, &x597, x595, x548, x579); fiat_p256_addcarryx_u32(&x598, &x599, x597, x550, x581); fiat_p256_addcarryx_u32(&x600, &x601, x599, x552, x583); fiat_p256_addcarryx_u32(&x602, &x603, x601, x554, x585); fiat_p256_addcarryx_u32(&x604, &x605, x603, x556, x587); fiat_p256_mulx_u32(&x606, &x607, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x608, &x609, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x610, &x611, x588, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x612, &x613, x588, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x614, &x615, 0x0, x613, x610); fiat_p256_addcarryx_u32(&x616, &x617, x615, x611, x608); x618 = (x617 + x609); fiat_p256_addcarryx_u32(&x619, &x620, 0x0, x588, x612); fiat_p256_addcarryx_u32(&x621, &x622, x620, x590, x614); fiat_p256_addcarryx_u32(&x623, &x624, x622, x592, x616); fiat_p256_addcarryx_u32(&x625, &x626, x624, x594, x618); fiat_p256_addcarryx_u32(&x627, &x628, x626, x596, 0x0); fiat_p256_addcarryx_u32(&x629, &x630, x628, x598, 0x0); fiat_p256_addcarryx_u32(&x631, &x632, x630, x600, x588); fiat_p256_addcarryx_u32(&x633, &x634, x632, x602, x606); fiat_p256_addcarryx_u32(&x635, &x636, x634, x604, x607); x637 = ((uint32_t)x636 + x605); fiat_p256_subborrowx_u32(&x638, &x639, 0x0, x621, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x640, &x641, x639, x623, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x642, &x643, x641, x625, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x644, &x645, x643, x627, 0x0); fiat_p256_subborrowx_u32(&x646, &x647, x645, x629, 0x0); fiat_p256_subborrowx_u32(&x648, &x649, x647, x631, 0x0); fiat_p256_subborrowx_u32(&x650, &x651, x649, x633, 0x1); fiat_p256_subborrowx_u32(&x652, &x653, x651, x635, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x654, &x655, x653, x637, 0x0); fiat_p256_cmovznz_u32(&x656, x655, x638, x621); fiat_p256_cmovznz_u32(&x657, x655, x640, x623); fiat_p256_cmovznz_u32(&x658, x655, x642, x625); fiat_p256_cmovznz_u32(&x659, x655, x644, x627); fiat_p256_cmovznz_u32(&x660, x655, x646, x629); fiat_p256_cmovznz_u32(&x661, x655, x648, x631); fiat_p256_cmovznz_u32(&x662, x655, x650, x633); fiat_p256_cmovznz_u32(&x663, x655, x652, x635); out1[0] = x656; out1[1] = x657; out1[2] = x658; out1[3] = x659; out1[4] = x660; out1[5] = x661; out1[6] = x662; out1[7] = x663; } /* * The function fiat_p256_add adds two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_add(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint32_t x1; fiat_p256_uint1 x2; uint32_t x3; fiat_p256_uint1 x4; uint32_t x5; fiat_p256_uint1 x6; uint32_t x7; fiat_p256_uint1 x8; uint32_t x9; fiat_p256_uint1 x10; uint32_t x11; fiat_p256_uint1 x12; uint32_t x13; fiat_p256_uint1 x14; uint32_t x15; fiat_p256_uint1 x16; uint32_t x17; fiat_p256_uint1 x18; uint32_t x19; fiat_p256_uint1 x20; uint32_t x21; fiat_p256_uint1 x22; uint32_t x23; fiat_p256_uint1 x24; uint32_t x25; fiat_p256_uint1 x26; uint32_t x27; fiat_p256_uint1 x28; uint32_t x29; fiat_p256_uint1 x30; uint32_t x31; fiat_p256_uint1 x32; uint32_t x33; fiat_p256_uint1 x34; uint32_t x35; uint32_t x36; uint32_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; uint32_t x42; fiat_p256_addcarryx_u32(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_addcarryx_u32(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_addcarryx_u32(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_addcarryx_u32(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_addcarryx_u32(&x9, &x10, x8, (arg1[4]), (arg2[4])); fiat_p256_addcarryx_u32(&x11, &x12, x10, (arg1[5]), (arg2[5])); fiat_p256_addcarryx_u32(&x13, &x14, x12, (arg1[6]), (arg2[6])); fiat_p256_addcarryx_u32(&x15, &x16, x14, (arg1[7]), (arg2[7])); fiat_p256_subborrowx_u32(&x17, &x18, 0x0, x1, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x19, &x20, x18, x3, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x21, &x22, x20, x5, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x23, &x24, x22, x7, 0x0); fiat_p256_subborrowx_u32(&x25, &x26, x24, x9, 0x0); fiat_p256_subborrowx_u32(&x27, &x28, x26, x11, 0x0); fiat_p256_subborrowx_u32(&x29, &x30, x28, x13, 0x1); fiat_p256_subborrowx_u32(&x31, &x32, x30, x15, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x33, &x34, x32, x16, 0x0); fiat_p256_cmovznz_u32(&x35, x34, x17, x1); fiat_p256_cmovznz_u32(&x36, x34, x19, x3); fiat_p256_cmovznz_u32(&x37, x34, x21, x5); fiat_p256_cmovznz_u32(&x38, x34, x23, x7); fiat_p256_cmovznz_u32(&x39, x34, x25, x9); fiat_p256_cmovznz_u32(&x40, x34, x27, x11); fiat_p256_cmovznz_u32(&x41, x34, x29, x13); fiat_p256_cmovznz_u32(&x42, x34, x31, x15); out1[0] = x35; out1[1] = x36; out1[2] = x37; out1[3] = x38; out1[4] = x39; out1[5] = x40; out1[6] = x41; out1[7] = x42; } /* * The function fiat_p256_sub subtracts two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_sub(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint32_t x1; fiat_p256_uint1 x2; uint32_t x3; fiat_p256_uint1 x4; uint32_t x5; fiat_p256_uint1 x6; uint32_t x7; fiat_p256_uint1 x8; uint32_t x9; fiat_p256_uint1 x10; uint32_t x11; fiat_p256_uint1 x12; uint32_t x13; fiat_p256_uint1 x14; uint32_t x15; fiat_p256_uint1 x16; uint32_t x17; uint32_t x18; fiat_p256_uint1 x19; uint32_t x20; fiat_p256_uint1 x21; uint32_t x22; fiat_p256_uint1 x23; uint32_t x24; fiat_p256_uint1 x25; uint32_t x26; fiat_p256_uint1 x27; uint32_t x28; fiat_p256_uint1 x29; uint32_t x30; fiat_p256_uint1 x31; uint32_t x32; fiat_p256_uint1 x33; fiat_p256_subborrowx_u32(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_subborrowx_u32(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_subborrowx_u32(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_subborrowx_u32(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_subborrowx_u32(&x9, &x10, x8, (arg1[4]), (arg2[4])); fiat_p256_subborrowx_u32(&x11, &x12, x10, (arg1[5]), (arg2[5])); fiat_p256_subborrowx_u32(&x13, &x14, x12, (arg1[6]), (arg2[6])); fiat_p256_subborrowx_u32(&x15, &x16, x14, (arg1[7]), (arg2[7])); fiat_p256_cmovznz_u32(&x17, x16, 0x0, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x18, &x19, 0x0, x1, x17); fiat_p256_addcarryx_u32(&x20, &x21, x19, x3, x17); fiat_p256_addcarryx_u32(&x22, &x23, x21, x5, x17); fiat_p256_addcarryx_u32(&x24, &x25, x23, x7, 0x0); fiat_p256_addcarryx_u32(&x26, &x27, x25, x9, 0x0); fiat_p256_addcarryx_u32(&x28, &x29, x27, x11, 0x0); fiat_p256_addcarryx_u32(&x30, &x31, x29, x13, (fiat_p256_uint1)(x17 & 0x1)); fiat_p256_addcarryx_u32(&x32, &x33, x31, x15, x17); out1[0] = x18; out1[1] = x20; out1[2] = x22; out1[3] = x24; out1[4] = x26; out1[5] = x28; out1[6] = x30; out1[7] = x32; } /* * The function fiat_p256_opp negates a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_opp(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint32_t x1; fiat_p256_uint1 x2; uint32_t x3; fiat_p256_uint1 x4; uint32_t x5; fiat_p256_uint1 x6; uint32_t x7; fiat_p256_uint1 x8; uint32_t x9; fiat_p256_uint1 x10; uint32_t x11; fiat_p256_uint1 x12; uint32_t x13; fiat_p256_uint1 x14; uint32_t x15; fiat_p256_uint1 x16; uint32_t x17; uint32_t x18; fiat_p256_uint1 x19; uint32_t x20; fiat_p256_uint1 x21; uint32_t x22; fiat_p256_uint1 x23; uint32_t x24; fiat_p256_uint1 x25; uint32_t x26; fiat_p256_uint1 x27; uint32_t x28; fiat_p256_uint1 x29; uint32_t x30; fiat_p256_uint1 x31; uint32_t x32; fiat_p256_uint1 x33; fiat_p256_subborrowx_u32(&x1, &x2, 0x0, 0x0, (arg1[0])); fiat_p256_subborrowx_u32(&x3, &x4, x2, 0x0, (arg1[1])); fiat_p256_subborrowx_u32(&x5, &x6, x4, 0x0, (arg1[2])); fiat_p256_subborrowx_u32(&x7, &x8, x6, 0x0, (arg1[3])); fiat_p256_subborrowx_u32(&x9, &x10, x8, 0x0, (arg1[4])); fiat_p256_subborrowx_u32(&x11, &x12, x10, 0x0, (arg1[5])); fiat_p256_subborrowx_u32(&x13, &x14, x12, 0x0, (arg1[6])); fiat_p256_subborrowx_u32(&x15, &x16, x14, 0x0, (arg1[7])); fiat_p256_cmovznz_u32(&x17, x16, 0x0, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x18, &x19, 0x0, x1, x17); fiat_p256_addcarryx_u32(&x20, &x21, x19, x3, x17); fiat_p256_addcarryx_u32(&x22, &x23, x21, x5, x17); fiat_p256_addcarryx_u32(&x24, &x25, x23, x7, 0x0); fiat_p256_addcarryx_u32(&x26, &x27, x25, x9, 0x0); fiat_p256_addcarryx_u32(&x28, &x29, x27, x11, 0x0); fiat_p256_addcarryx_u32(&x30, &x31, x29, x13, (fiat_p256_uint1)(x17 & 0x1)); fiat_p256_addcarryx_u32(&x32, &x33, x31, x15, x17); out1[0] = x18; out1[1] = x20; out1[2] = x22; out1[3] = x24; out1[4] = x26; out1[5] = x28; out1[6] = x30; out1[7] = x32; } /* * The function fiat_p256_from_montgomery translates a field element out of the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval out1 mod m = (eval arg1 * ((2^32)⁻¹ mod m)^8) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_montgomery(fiat_p256_non_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; fiat_p256_uint1 x11; uint32_t x12; fiat_p256_uint1 x13; uint32_t x14; fiat_p256_uint1 x15; uint32_t x16; fiat_p256_uint1 x17; uint32_t x18; fiat_p256_uint1 x19; uint32_t x20; fiat_p256_uint1 x21; uint32_t x22; fiat_p256_uint1 x23; uint32_t x24; fiat_p256_uint1 x25; uint32_t x26; fiat_p256_uint1 x27; uint32_t x28; uint32_t x29; uint32_t x30; uint32_t x31; uint32_t x32; uint32_t x33; uint32_t x34; uint32_t x35; uint32_t x36; fiat_p256_uint1 x37; uint32_t x38; fiat_p256_uint1 x39; uint32_t x40; fiat_p256_uint1 x41; uint32_t x42; fiat_p256_uint1 x43; uint32_t x44; fiat_p256_uint1 x45; uint32_t x46; fiat_p256_uint1 x47; uint32_t x48; fiat_p256_uint1 x49; uint32_t x50; fiat_p256_uint1 x51; uint32_t x52; fiat_p256_uint1 x53; uint32_t x54; fiat_p256_uint1 x55; uint32_t x56; fiat_p256_uint1 x57; uint32_t x58; uint32_t x59; uint32_t x60; uint32_t x61; uint32_t x62; uint32_t x63; uint32_t x64; uint32_t x65; uint32_t x66; fiat_p256_uint1 x67; uint32_t x68; fiat_p256_uint1 x69; uint32_t x70; fiat_p256_uint1 x71; uint32_t x72; fiat_p256_uint1 x73; uint32_t x74; fiat_p256_uint1 x75; uint32_t x76; fiat_p256_uint1 x77; uint32_t x78; fiat_p256_uint1 x79; uint32_t x80; fiat_p256_uint1 x81; uint32_t x82; fiat_p256_uint1 x83; uint32_t x84; fiat_p256_uint1 x85; uint32_t x86; fiat_p256_uint1 x87; uint32_t x88; fiat_p256_uint1 x89; uint32_t x90; fiat_p256_uint1 x91; uint32_t x92; fiat_p256_uint1 x93; uint32_t x94; fiat_p256_uint1 x95; uint32_t x96; fiat_p256_uint1 x97; uint32_t x98; fiat_p256_uint1 x99; uint32_t x100; fiat_p256_uint1 x101; uint32_t x102; uint32_t x103; uint32_t x104; uint32_t x105; uint32_t x106; uint32_t x107; uint32_t x108; uint32_t x109; uint32_t x110; fiat_p256_uint1 x111; uint32_t x112; fiat_p256_uint1 x113; uint32_t x114; fiat_p256_uint1 x115; uint32_t x116; fiat_p256_uint1 x117; uint32_t x118; fiat_p256_uint1 x119; uint32_t x120; fiat_p256_uint1 x121; uint32_t x122; fiat_p256_uint1 x123; uint32_t x124; fiat_p256_uint1 x125; uint32_t x126; fiat_p256_uint1 x127; uint32_t x128; fiat_p256_uint1 x129; uint32_t x130; fiat_p256_uint1 x131; uint32_t x132; fiat_p256_uint1 x133; uint32_t x134; fiat_p256_uint1 x135; uint32_t x136; fiat_p256_uint1 x137; uint32_t x138; fiat_p256_uint1 x139; uint32_t x140; fiat_p256_uint1 x141; uint32_t x142; fiat_p256_uint1 x143; uint32_t x144; fiat_p256_uint1 x145; uint32_t x146; fiat_p256_uint1 x147; uint32_t x148; uint32_t x149; uint32_t x150; uint32_t x151; uint32_t x152; uint32_t x153; uint32_t x154; uint32_t x155; uint32_t x156; fiat_p256_uint1 x157; uint32_t x158; fiat_p256_uint1 x159; uint32_t x160; fiat_p256_uint1 x161; uint32_t x162; fiat_p256_uint1 x163; uint32_t x164; fiat_p256_uint1 x165; uint32_t x166; fiat_p256_uint1 x167; uint32_t x168; fiat_p256_uint1 x169; uint32_t x170; fiat_p256_uint1 x171; uint32_t x172; fiat_p256_uint1 x173; uint32_t x174; fiat_p256_uint1 x175; uint32_t x176; fiat_p256_uint1 x177; uint32_t x178; fiat_p256_uint1 x179; uint32_t x180; fiat_p256_uint1 x181; uint32_t x182; fiat_p256_uint1 x183; uint32_t x184; fiat_p256_uint1 x185; uint32_t x186; fiat_p256_uint1 x187; uint32_t x188; fiat_p256_uint1 x189; uint32_t x190; fiat_p256_uint1 x191; uint32_t x192; fiat_p256_uint1 x193; uint32_t x194; uint32_t x195; uint32_t x196; uint32_t x197; uint32_t x198; uint32_t x199; uint32_t x200; uint32_t x201; uint32_t x202; fiat_p256_uint1 x203; uint32_t x204; fiat_p256_uint1 x205; uint32_t x206; fiat_p256_uint1 x207; uint32_t x208; fiat_p256_uint1 x209; uint32_t x210; fiat_p256_uint1 x211; uint32_t x212; fiat_p256_uint1 x213; uint32_t x214; fiat_p256_uint1 x215; uint32_t x216; fiat_p256_uint1 x217; uint32_t x218; fiat_p256_uint1 x219; uint32_t x220; fiat_p256_uint1 x221; uint32_t x222; fiat_p256_uint1 x223; uint32_t x224; fiat_p256_uint1 x225; uint32_t x226; fiat_p256_uint1 x227; uint32_t x228; fiat_p256_uint1 x229; uint32_t x230; fiat_p256_uint1 x231; uint32_t x232; fiat_p256_uint1 x233; uint32_t x234; fiat_p256_uint1 x235; uint32_t x236; fiat_p256_uint1 x237; uint32_t x238; fiat_p256_uint1 x239; uint32_t x240; uint32_t x241; uint32_t x242; uint32_t x243; uint32_t x244; uint32_t x245; uint32_t x246; uint32_t x247; uint32_t x248; fiat_p256_uint1 x249; uint32_t x250; fiat_p256_uint1 x251; uint32_t x252; fiat_p256_uint1 x253; uint32_t x254; fiat_p256_uint1 x255; uint32_t x256; fiat_p256_uint1 x257; uint32_t x258; fiat_p256_uint1 x259; uint32_t x260; fiat_p256_uint1 x261; uint32_t x262; fiat_p256_uint1 x263; uint32_t x264; fiat_p256_uint1 x265; uint32_t x266; fiat_p256_uint1 x267; uint32_t x268; fiat_p256_uint1 x269; uint32_t x270; fiat_p256_uint1 x271; uint32_t x272; fiat_p256_uint1 x273; uint32_t x274; fiat_p256_uint1 x275; uint32_t x276; fiat_p256_uint1 x277; uint32_t x278; fiat_p256_uint1 x279; uint32_t x280; fiat_p256_uint1 x281; uint32_t x282; fiat_p256_uint1 x283; uint32_t x284; fiat_p256_uint1 x285; uint32_t x286; uint32_t x287; uint32_t x288; uint32_t x289; uint32_t x290; uint32_t x291; uint32_t x292; uint32_t x293; uint32_t x294; fiat_p256_uint1 x295; uint32_t x296; fiat_p256_uint1 x297; uint32_t x298; fiat_p256_uint1 x299; uint32_t x300; fiat_p256_uint1 x301; uint32_t x302; fiat_p256_uint1 x303; uint32_t x304; fiat_p256_uint1 x305; uint32_t x306; fiat_p256_uint1 x307; uint32_t x308; fiat_p256_uint1 x309; uint32_t x310; fiat_p256_uint1 x311; uint32_t x312; fiat_p256_uint1 x313; uint32_t x314; fiat_p256_uint1 x315; uint32_t x316; fiat_p256_uint1 x317; uint32_t x318; fiat_p256_uint1 x319; uint32_t x320; fiat_p256_uint1 x321; uint32_t x322; fiat_p256_uint1 x323; uint32_t x324; fiat_p256_uint1 x325; uint32_t x326; fiat_p256_uint1 x327; uint32_t x328; fiat_p256_uint1 x329; uint32_t x330; fiat_p256_uint1 x331; uint32_t x332; fiat_p256_uint1 x333; uint32_t x334; uint32_t x335; uint32_t x336; uint32_t x337; uint32_t x338; uint32_t x339; uint32_t x340; uint32_t x341; x1 = (arg1[0]); fiat_p256_mulx_u32(&x2, &x3, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x4, &x5, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x6, &x7, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x8, &x9, x1, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x10, &x11, 0x0, x9, x6); fiat_p256_addcarryx_u32(&x12, &x13, x11, x7, x4); fiat_p256_addcarryx_u32(&x14, &x15, 0x0, x1, x8); fiat_p256_addcarryx_u32(&x16, &x17, x15, 0x0, x10); fiat_p256_addcarryx_u32(&x18, &x19, x17, 0x0, x12); fiat_p256_addcarryx_u32(&x20, &x21, x19, 0x0, (x13 + x5)); fiat_p256_addcarryx_u32(&x22, &x23, 0x0, x16, (arg1[1])); fiat_p256_addcarryx_u32(&x24, &x25, x23, x18, 0x0); fiat_p256_addcarryx_u32(&x26, &x27, x25, x20, 0x0); fiat_p256_mulx_u32(&x28, &x29, x22, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x30, &x31, x22, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x32, &x33, x22, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x34, &x35, x22, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x36, &x37, 0x0, x35, x32); fiat_p256_addcarryx_u32(&x38, &x39, x37, x33, x30); fiat_p256_addcarryx_u32(&x40, &x41, 0x0, x22, x34); fiat_p256_addcarryx_u32(&x42, &x43, x41, x24, x36); fiat_p256_addcarryx_u32(&x44, &x45, x43, x26, x38); fiat_p256_addcarryx_u32(&x46, &x47, x45, ((uint32_t)x27 + x21), (x39 + x31)); fiat_p256_addcarryx_u32(&x48, &x49, 0x0, x2, x22); fiat_p256_addcarryx_u32(&x50, &x51, x49, x3, x28); fiat_p256_addcarryx_u32(&x52, &x53, 0x0, x42, (arg1[2])); fiat_p256_addcarryx_u32(&x54, &x55, x53, x44, 0x0); fiat_p256_addcarryx_u32(&x56, &x57, x55, x46, 0x0); fiat_p256_mulx_u32(&x58, &x59, x52, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x60, &x61, x52, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x62, &x63, x52, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x64, &x65, x52, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x66, &x67, 0x0, x65, x62); fiat_p256_addcarryx_u32(&x68, &x69, x67, x63, x60); fiat_p256_addcarryx_u32(&x70, &x71, 0x0, x52, x64); fiat_p256_addcarryx_u32(&x72, &x73, x71, x54, x66); fiat_p256_addcarryx_u32(&x74, &x75, x73, x56, x68); fiat_p256_addcarryx_u32(&x76, &x77, x75, ((uint32_t)x57 + x47), (x69 + x61)); fiat_p256_addcarryx_u32(&x78, &x79, x77, x1, 0x0); fiat_p256_addcarryx_u32(&x80, &x81, x79, x48, 0x0); fiat_p256_addcarryx_u32(&x82, &x83, x81, x50, x52); fiat_p256_addcarryx_u32(&x84, &x85, x83, (x51 + x29), x58); fiat_p256_addcarryx_u32(&x86, &x87, 0x0, x72, (arg1[3])); fiat_p256_addcarryx_u32(&x88, &x89, x87, x74, 0x0); fiat_p256_addcarryx_u32(&x90, &x91, x89, x76, 0x0); fiat_p256_addcarryx_u32(&x92, &x93, x91, x78, 0x0); fiat_p256_addcarryx_u32(&x94, &x95, x93, x80, 0x0); fiat_p256_addcarryx_u32(&x96, &x97, x95, x82, 0x0); fiat_p256_addcarryx_u32(&x98, &x99, x97, x84, 0x0); fiat_p256_addcarryx_u32(&x100, &x101, x99, (x85 + x59), 0x0); fiat_p256_mulx_u32(&x102, &x103, x86, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x104, &x105, x86, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x106, &x107, x86, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x108, &x109, x86, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x110, &x111, 0x0, x109, x106); fiat_p256_addcarryx_u32(&x112, &x113, x111, x107, x104); fiat_p256_addcarryx_u32(&x114, &x115, 0x0, x86, x108); fiat_p256_addcarryx_u32(&x116, &x117, x115, x88, x110); fiat_p256_addcarryx_u32(&x118, &x119, x117, x90, x112); fiat_p256_addcarryx_u32(&x120, &x121, x119, x92, (x113 + x105)); fiat_p256_addcarryx_u32(&x122, &x123, x121, x94, 0x0); fiat_p256_addcarryx_u32(&x124, &x125, x123, x96, 0x0); fiat_p256_addcarryx_u32(&x126, &x127, x125, x98, x86); fiat_p256_addcarryx_u32(&x128, &x129, x127, x100, x102); fiat_p256_addcarryx_u32(&x130, &x131, x129, x101, x103); fiat_p256_addcarryx_u32(&x132, &x133, 0x0, x116, (arg1[4])); fiat_p256_addcarryx_u32(&x134, &x135, x133, x118, 0x0); fiat_p256_addcarryx_u32(&x136, &x137, x135, x120, 0x0); fiat_p256_addcarryx_u32(&x138, &x139, x137, x122, 0x0); fiat_p256_addcarryx_u32(&x140, &x141, x139, x124, 0x0); fiat_p256_addcarryx_u32(&x142, &x143, x141, x126, 0x0); fiat_p256_addcarryx_u32(&x144, &x145, x143, x128, 0x0); fiat_p256_addcarryx_u32(&x146, &x147, x145, x130, 0x0); fiat_p256_mulx_u32(&x148, &x149, x132, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x150, &x151, x132, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x152, &x153, x132, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x154, &x155, x132, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x156, &x157, 0x0, x155, x152); fiat_p256_addcarryx_u32(&x158, &x159, x157, x153, x150); fiat_p256_addcarryx_u32(&x160, &x161, 0x0, x132, x154); fiat_p256_addcarryx_u32(&x162, &x163, x161, x134, x156); fiat_p256_addcarryx_u32(&x164, &x165, x163, x136, x158); fiat_p256_addcarryx_u32(&x166, &x167, x165, x138, (x159 + x151)); fiat_p256_addcarryx_u32(&x168, &x169, x167, x140, 0x0); fiat_p256_addcarryx_u32(&x170, &x171, x169, x142, 0x0); fiat_p256_addcarryx_u32(&x172, &x173, x171, x144, x132); fiat_p256_addcarryx_u32(&x174, &x175, x173, x146, x148); fiat_p256_addcarryx_u32(&x176, &x177, x175, ((uint32_t)x147 + x131), x149); fiat_p256_addcarryx_u32(&x178, &x179, 0x0, x162, (arg1[5])); fiat_p256_addcarryx_u32(&x180, &x181, x179, x164, 0x0); fiat_p256_addcarryx_u32(&x182, &x183, x181, x166, 0x0); fiat_p256_addcarryx_u32(&x184, &x185, x183, x168, 0x0); fiat_p256_addcarryx_u32(&x186, &x187, x185, x170, 0x0); fiat_p256_addcarryx_u32(&x188, &x189, x187, x172, 0x0); fiat_p256_addcarryx_u32(&x190, &x191, x189, x174, 0x0); fiat_p256_addcarryx_u32(&x192, &x193, x191, x176, 0x0); fiat_p256_mulx_u32(&x194, &x195, x178, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x196, &x197, x178, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x198, &x199, x178, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x200, &x201, x178, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x202, &x203, 0x0, x201, x198); fiat_p256_addcarryx_u32(&x204, &x205, x203, x199, x196); fiat_p256_addcarryx_u32(&x206, &x207, 0x0, x178, x200); fiat_p256_addcarryx_u32(&x208, &x209, x207, x180, x202); fiat_p256_addcarryx_u32(&x210, &x211, x209, x182, x204); fiat_p256_addcarryx_u32(&x212, &x213, x211, x184, (x205 + x197)); fiat_p256_addcarryx_u32(&x214, &x215, x213, x186, 0x0); fiat_p256_addcarryx_u32(&x216, &x217, x215, x188, 0x0); fiat_p256_addcarryx_u32(&x218, &x219, x217, x190, x178); fiat_p256_addcarryx_u32(&x220, &x221, x219, x192, x194); fiat_p256_addcarryx_u32(&x222, &x223, x221, ((uint32_t)x193 + x177), x195); fiat_p256_addcarryx_u32(&x224, &x225, 0x0, x208, (arg1[6])); fiat_p256_addcarryx_u32(&x226, &x227, x225, x210, 0x0); fiat_p256_addcarryx_u32(&x228, &x229, x227, x212, 0x0); fiat_p256_addcarryx_u32(&x230, &x231, x229, x214, 0x0); fiat_p256_addcarryx_u32(&x232, &x233, x231, x216, 0x0); fiat_p256_addcarryx_u32(&x234, &x235, x233, x218, 0x0); fiat_p256_addcarryx_u32(&x236, &x237, x235, x220, 0x0); fiat_p256_addcarryx_u32(&x238, &x239, x237, x222, 0x0); fiat_p256_mulx_u32(&x240, &x241, x224, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x242, &x243, x224, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x244, &x245, x224, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x246, &x247, x224, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x248, &x249, 0x0, x247, x244); fiat_p256_addcarryx_u32(&x250, &x251, x249, x245, x242); fiat_p256_addcarryx_u32(&x252, &x253, 0x0, x224, x246); fiat_p256_addcarryx_u32(&x254, &x255, x253, x226, x248); fiat_p256_addcarryx_u32(&x256, &x257, x255, x228, x250); fiat_p256_addcarryx_u32(&x258, &x259, x257, x230, (x251 + x243)); fiat_p256_addcarryx_u32(&x260, &x261, x259, x232, 0x0); fiat_p256_addcarryx_u32(&x262, &x263, x261, x234, 0x0); fiat_p256_addcarryx_u32(&x264, &x265, x263, x236, x224); fiat_p256_addcarryx_u32(&x266, &x267, x265, x238, x240); fiat_p256_addcarryx_u32(&x268, &x269, x267, ((uint32_t)x239 + x223), x241); fiat_p256_addcarryx_u32(&x270, &x271, 0x0, x254, (arg1[7])); fiat_p256_addcarryx_u32(&x272, &x273, x271, x256, 0x0); fiat_p256_addcarryx_u32(&x274, &x275, x273, x258, 0x0); fiat_p256_addcarryx_u32(&x276, &x277, x275, x260, 0x0); fiat_p256_addcarryx_u32(&x278, &x279, x277, x262, 0x0); fiat_p256_addcarryx_u32(&x280, &x281, x279, x264, 0x0); fiat_p256_addcarryx_u32(&x282, &x283, x281, x266, 0x0); fiat_p256_addcarryx_u32(&x284, &x285, x283, x268, 0x0); fiat_p256_mulx_u32(&x286, &x287, x270, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x288, &x289, x270, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x290, &x291, x270, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x292, &x293, x270, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x294, &x295, 0x0, x293, x290); fiat_p256_addcarryx_u32(&x296, &x297, x295, x291, x288); fiat_p256_addcarryx_u32(&x298, &x299, 0x0, x270, x292); fiat_p256_addcarryx_u32(&x300, &x301, x299, x272, x294); fiat_p256_addcarryx_u32(&x302, &x303, x301, x274, x296); fiat_p256_addcarryx_u32(&x304, &x305, x303, x276, (x297 + x289)); fiat_p256_addcarryx_u32(&x306, &x307, x305, x278, 0x0); fiat_p256_addcarryx_u32(&x308, &x309, x307, x280, 0x0); fiat_p256_addcarryx_u32(&x310, &x311, x309, x282, x270); fiat_p256_addcarryx_u32(&x312, &x313, x311, x284, x286); fiat_p256_addcarryx_u32(&x314, &x315, x313, ((uint32_t)x285 + x269), x287); fiat_p256_subborrowx_u32(&x316, &x317, 0x0, x300, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x318, &x319, x317, x302, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x320, &x321, x319, x304, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x322, &x323, x321, x306, 0x0); fiat_p256_subborrowx_u32(&x324, &x325, x323, x308, 0x0); fiat_p256_subborrowx_u32(&x326, &x327, x325, x310, 0x0); fiat_p256_subborrowx_u32(&x328, &x329, x327, x312, 0x1); fiat_p256_subborrowx_u32(&x330, &x331, x329, x314, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x332, &x333, x331, x315, 0x0); fiat_p256_cmovznz_u32(&x334, x333, x316, x300); fiat_p256_cmovznz_u32(&x335, x333, x318, x302); fiat_p256_cmovznz_u32(&x336, x333, x320, x304); fiat_p256_cmovznz_u32(&x337, x333, x322, x306); fiat_p256_cmovznz_u32(&x338, x333, x324, x308); fiat_p256_cmovznz_u32(&x339, x333, x326, x310); fiat_p256_cmovznz_u32(&x340, x333, x328, x312); fiat_p256_cmovznz_u32(&x341, x333, x330, x314); out1[0] = x334; out1[1] = x335; out1[2] = x336; out1[3] = x337; out1[4] = x338; out1[5] = x339; out1[6] = x340; out1[7] = x341; } /* * The function fiat_p256_to_montgomery translates a field element into the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = eval arg1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_montgomery(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_non_montgomery_domain_field_element arg1) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint32_t x20; uint32_t x21; uint32_t x22; uint32_t x23; fiat_p256_uint1 x24; uint32_t x25; fiat_p256_uint1 x26; uint32_t x27; fiat_p256_uint1 x28; uint32_t x29; fiat_p256_uint1 x30; uint32_t x31; fiat_p256_uint1 x32; uint32_t x33; uint32_t x34; uint32_t x35; uint32_t x36; uint32_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; fiat_p256_uint1 x42; uint32_t x43; fiat_p256_uint1 x44; uint32_t x45; fiat_p256_uint1 x46; uint32_t x47; fiat_p256_uint1 x48; uint32_t x49; fiat_p256_uint1 x50; uint32_t x51; fiat_p256_uint1 x52; uint32_t x53; fiat_p256_uint1 x54; uint32_t x55; fiat_p256_uint1 x56; uint32_t x57; fiat_p256_uint1 x58; uint32_t x59; fiat_p256_uint1 x60; uint32_t x61; fiat_p256_uint1 x62; uint32_t x63; uint32_t x64; uint32_t x65; uint32_t x66; uint32_t x67; uint32_t x68; uint32_t x69; uint32_t x70; uint32_t x71; uint32_t x72; uint32_t x73; uint32_t x74; uint32_t x75; uint32_t x76; uint32_t x77; fiat_p256_uint1 x78; uint32_t x79; fiat_p256_uint1 x80; uint32_t x81; fiat_p256_uint1 x82; uint32_t x83; fiat_p256_uint1 x84; uint32_t x85; fiat_p256_uint1 x86; uint32_t x87; fiat_p256_uint1 x88; uint32_t x89; fiat_p256_uint1 x90; uint32_t x91; fiat_p256_uint1 x92; uint32_t x93; fiat_p256_uint1 x94; uint32_t x95; fiat_p256_uint1 x96; uint32_t x97; fiat_p256_uint1 x98; uint32_t x99; fiat_p256_uint1 x100; uint32_t x101; fiat_p256_uint1 x102; uint32_t x103; uint32_t x104; uint32_t x105; uint32_t x106; uint32_t x107; uint32_t x108; uint32_t x109; uint32_t x110; uint32_t x111; fiat_p256_uint1 x112; uint32_t x113; fiat_p256_uint1 x114; uint32_t x115; fiat_p256_uint1 x116; uint32_t x117; fiat_p256_uint1 x118; uint32_t x119; fiat_p256_uint1 x120; uint32_t x121; fiat_p256_uint1 x122; uint32_t x123; fiat_p256_uint1 x124; uint32_t x125; fiat_p256_uint1 x126; uint32_t x127; fiat_p256_uint1 x128; uint32_t x129; fiat_p256_uint1 x130; uint32_t x131; fiat_p256_uint1 x132; uint32_t x133; uint32_t x134; uint32_t x135; uint32_t x136; uint32_t x137; uint32_t x138; uint32_t x139; uint32_t x140; uint32_t x141; uint32_t x142; uint32_t x143; uint32_t x144; uint32_t x145; uint32_t x146; uint32_t x147; fiat_p256_uint1 x148; uint32_t x149; fiat_p256_uint1 x150; uint32_t x151; fiat_p256_uint1 x152; uint32_t x153; fiat_p256_uint1 x154; uint32_t x155; fiat_p256_uint1 x156; uint32_t x157; fiat_p256_uint1 x158; uint32_t x159; fiat_p256_uint1 x160; uint32_t x161; fiat_p256_uint1 x162; uint32_t x163; fiat_p256_uint1 x164; uint32_t x165; fiat_p256_uint1 x166; uint32_t x167; fiat_p256_uint1 x168; uint32_t x169; fiat_p256_uint1 x170; uint32_t x171; fiat_p256_uint1 x172; uint32_t x173; uint32_t x174; uint32_t x175; uint32_t x176; uint32_t x177; uint32_t x178; uint32_t x179; uint32_t x180; uint32_t x181; fiat_p256_uint1 x182; uint32_t x183; fiat_p256_uint1 x184; uint32_t x185; fiat_p256_uint1 x186; uint32_t x187; fiat_p256_uint1 x188; uint32_t x189; fiat_p256_uint1 x190; uint32_t x191; fiat_p256_uint1 x192; uint32_t x193; fiat_p256_uint1 x194; uint32_t x195; fiat_p256_uint1 x196; uint32_t x197; fiat_p256_uint1 x198; uint32_t x199; fiat_p256_uint1 x200; uint32_t x201; fiat_p256_uint1 x202; uint32_t x203; uint32_t x204; uint32_t x205; uint32_t x206; uint32_t x207; uint32_t x208; uint32_t x209; uint32_t x210; uint32_t x211; uint32_t x212; uint32_t x213; uint32_t x214; uint32_t x215; uint32_t x216; uint32_t x217; fiat_p256_uint1 x218; uint32_t x219; fiat_p256_uint1 x220; uint32_t x221; fiat_p256_uint1 x222; uint32_t x223; fiat_p256_uint1 x224; uint32_t x225; fiat_p256_uint1 x226; uint32_t x227; fiat_p256_uint1 x228; uint32_t x229; fiat_p256_uint1 x230; uint32_t x231; fiat_p256_uint1 x232; uint32_t x233; fiat_p256_uint1 x234; uint32_t x235; fiat_p256_uint1 x236; uint32_t x237; fiat_p256_uint1 x238; uint32_t x239; fiat_p256_uint1 x240; uint32_t x241; fiat_p256_uint1 x242; uint32_t x243; uint32_t x244; uint32_t x245; uint32_t x246; uint32_t x247; uint32_t x248; uint32_t x249; uint32_t x250; uint32_t x251; fiat_p256_uint1 x252; uint32_t x253; fiat_p256_uint1 x254; uint32_t x255; fiat_p256_uint1 x256; uint32_t x257; fiat_p256_uint1 x258; uint32_t x259; fiat_p256_uint1 x260; uint32_t x261; fiat_p256_uint1 x262; uint32_t x263; fiat_p256_uint1 x264; uint32_t x265; fiat_p256_uint1 x266; uint32_t x267; fiat_p256_uint1 x268; uint32_t x269; fiat_p256_uint1 x270; uint32_t x271; fiat_p256_uint1 x272; uint32_t x273; uint32_t x274; uint32_t x275; uint32_t x276; uint32_t x277; uint32_t x278; uint32_t x279; uint32_t x280; uint32_t x281; uint32_t x282; uint32_t x283; uint32_t x284; uint32_t x285; uint32_t x286; uint32_t x287; fiat_p256_uint1 x288; uint32_t x289; fiat_p256_uint1 x290; uint32_t x291; fiat_p256_uint1 x292; uint32_t x293; fiat_p256_uint1 x294; uint32_t x295; fiat_p256_uint1 x296; uint32_t x297; fiat_p256_uint1 x298; uint32_t x299; fiat_p256_uint1 x300; uint32_t x301; fiat_p256_uint1 x302; uint32_t x303; fiat_p256_uint1 x304; uint32_t x305; fiat_p256_uint1 x306; uint32_t x307; fiat_p256_uint1 x308; uint32_t x309; fiat_p256_uint1 x310; uint32_t x311; fiat_p256_uint1 x312; uint32_t x313; uint32_t x314; uint32_t x315; uint32_t x316; uint32_t x317; uint32_t x318; uint32_t x319; uint32_t x320; uint32_t x321; fiat_p256_uint1 x322; uint32_t x323; fiat_p256_uint1 x324; uint32_t x325; fiat_p256_uint1 x326; uint32_t x327; fiat_p256_uint1 x328; uint32_t x329; fiat_p256_uint1 x330; uint32_t x331; fiat_p256_uint1 x332; uint32_t x333; fiat_p256_uint1 x334; uint32_t x335; fiat_p256_uint1 x336; uint32_t x337; fiat_p256_uint1 x338; uint32_t x339; fiat_p256_uint1 x340; uint32_t x341; fiat_p256_uint1 x342; uint32_t x343; uint32_t x344; uint32_t x345; uint32_t x346; uint32_t x347; uint32_t x348; uint32_t x349; uint32_t x350; uint32_t x351; uint32_t x352; uint32_t x353; uint32_t x354; uint32_t x355; uint32_t x356; uint32_t x357; fiat_p256_uint1 x358; uint32_t x359; fiat_p256_uint1 x360; uint32_t x361; fiat_p256_uint1 x362; uint32_t x363; fiat_p256_uint1 x364; uint32_t x365; fiat_p256_uint1 x366; uint32_t x367; fiat_p256_uint1 x368; uint32_t x369; fiat_p256_uint1 x370; uint32_t x371; fiat_p256_uint1 x372; uint32_t x373; fiat_p256_uint1 x374; uint32_t x375; fiat_p256_uint1 x376; uint32_t x377; fiat_p256_uint1 x378; uint32_t x379; fiat_p256_uint1 x380; uint32_t x381; fiat_p256_uint1 x382; uint32_t x383; uint32_t x384; uint32_t x385; uint32_t x386; uint32_t x387; uint32_t x388; uint32_t x389; uint32_t x390; uint32_t x391; fiat_p256_uint1 x392; uint32_t x393; fiat_p256_uint1 x394; uint32_t x395; fiat_p256_uint1 x396; uint32_t x397; fiat_p256_uint1 x398; uint32_t x399; fiat_p256_uint1 x400; uint32_t x401; fiat_p256_uint1 x402; uint32_t x403; fiat_p256_uint1 x404; uint32_t x405; fiat_p256_uint1 x406; uint32_t x407; fiat_p256_uint1 x408; uint32_t x409; fiat_p256_uint1 x410; uint32_t x411; fiat_p256_uint1 x412; uint32_t x413; uint32_t x414; uint32_t x415; uint32_t x416; uint32_t x417; uint32_t x418; uint32_t x419; uint32_t x420; uint32_t x421; uint32_t x422; uint32_t x423; uint32_t x424; uint32_t x425; uint32_t x426; uint32_t x427; fiat_p256_uint1 x428; uint32_t x429; fiat_p256_uint1 x430; uint32_t x431; fiat_p256_uint1 x432; uint32_t x433; fiat_p256_uint1 x434; uint32_t x435; fiat_p256_uint1 x436; uint32_t x437; fiat_p256_uint1 x438; uint32_t x439; fiat_p256_uint1 x440; uint32_t x441; fiat_p256_uint1 x442; uint32_t x443; fiat_p256_uint1 x444; uint32_t x445; fiat_p256_uint1 x446; uint32_t x447; fiat_p256_uint1 x448; uint32_t x449; fiat_p256_uint1 x450; uint32_t x451; fiat_p256_uint1 x452; uint32_t x453; uint32_t x454; uint32_t x455; uint32_t x456; uint32_t x457; uint32_t x458; uint32_t x459; uint32_t x460; uint32_t x461; fiat_p256_uint1 x462; uint32_t x463; fiat_p256_uint1 x464; uint32_t x465; fiat_p256_uint1 x466; uint32_t x467; fiat_p256_uint1 x468; uint32_t x469; fiat_p256_uint1 x470; uint32_t x471; fiat_p256_uint1 x472; uint32_t x473; fiat_p256_uint1 x474; uint32_t x475; fiat_p256_uint1 x476; uint32_t x477; fiat_p256_uint1 x478; uint32_t x479; fiat_p256_uint1 x480; uint32_t x481; fiat_p256_uint1 x482; uint32_t x483; uint32_t x484; uint32_t x485; uint32_t x486; uint32_t x487; uint32_t x488; uint32_t x489; uint32_t x490; uint32_t x491; uint32_t x492; uint32_t x493; uint32_t x494; uint32_t x495; uint32_t x496; uint32_t x497; fiat_p256_uint1 x498; uint32_t x499; fiat_p256_uint1 x500; uint32_t x501; fiat_p256_uint1 x502; uint32_t x503; fiat_p256_uint1 x504; uint32_t x505; fiat_p256_uint1 x506; uint32_t x507; fiat_p256_uint1 x508; uint32_t x509; fiat_p256_uint1 x510; uint32_t x511; fiat_p256_uint1 x512; uint32_t x513; fiat_p256_uint1 x514; uint32_t x515; fiat_p256_uint1 x516; uint32_t x517; fiat_p256_uint1 x518; uint32_t x519; fiat_p256_uint1 x520; uint32_t x521; fiat_p256_uint1 x522; uint32_t x523; uint32_t x524; uint32_t x525; uint32_t x526; uint32_t x527; uint32_t x528; uint32_t x529; uint32_t x530; uint32_t x531; fiat_p256_uint1 x532; uint32_t x533; fiat_p256_uint1 x534; uint32_t x535; fiat_p256_uint1 x536; uint32_t x537; fiat_p256_uint1 x538; uint32_t x539; fiat_p256_uint1 x540; uint32_t x541; fiat_p256_uint1 x542; uint32_t x543; fiat_p256_uint1 x544; uint32_t x545; fiat_p256_uint1 x546; uint32_t x547; fiat_p256_uint1 x548; uint32_t x549; fiat_p256_uint1 x550; uint32_t x551; fiat_p256_uint1 x552; uint32_t x553; fiat_p256_uint1 x554; uint32_t x555; fiat_p256_uint1 x556; uint32_t x557; fiat_p256_uint1 x558; uint32_t x559; fiat_p256_uint1 x560; uint32_t x561; fiat_p256_uint1 x562; uint32_t x563; fiat_p256_uint1 x564; uint32_t x565; fiat_p256_uint1 x566; uint32_t x567; fiat_p256_uint1 x568; uint32_t x569; fiat_p256_uint1 x570; uint32_t x571; uint32_t x572; uint32_t x573; uint32_t x574; uint32_t x575; uint32_t x576; uint32_t x577; uint32_t x578; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[4]); x5 = (arg1[5]); x6 = (arg1[6]); x7 = (arg1[7]); x8 = (arg1[0]); fiat_p256_mulx_u32(&x9, &x10, x8, 0x4); fiat_p256_mulx_u32(&x11, &x12, x8, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x13, &x14, x8, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x15, &x16, x8, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x17, &x18, x8, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x19, &x20, x8, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x21, &x22, x8, 0x3); fiat_p256_addcarryx_u32(&x23, &x24, 0x0, x20, x17); fiat_p256_addcarryx_u32(&x25, &x26, x24, x18, x15); fiat_p256_addcarryx_u32(&x27, &x28, x26, x16, x13); fiat_p256_addcarryx_u32(&x29, &x30, x28, x14, x11); fiat_p256_addcarryx_u32(&x31, &x32, x30, x12, x9); fiat_p256_mulx_u32(&x33, &x34, x21, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x35, &x36, x21, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x37, &x38, x21, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x39, &x40, x21, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x41, &x42, 0x0, x40, x37); fiat_p256_addcarryx_u32(&x43, &x44, x42, x38, x35); fiat_p256_addcarryx_u32(&x45, &x46, 0x0, x21, x39); fiat_p256_addcarryx_u32(&x47, &x48, x46, x22, x41); fiat_p256_addcarryx_u32(&x49, &x50, x48, x19, x43); fiat_p256_addcarryx_u32(&x51, &x52, x50, x23, (x44 + x36)); fiat_p256_addcarryx_u32(&x53, &x54, x52, x25, 0x0); fiat_p256_addcarryx_u32(&x55, &x56, x54, x27, 0x0); fiat_p256_addcarryx_u32(&x57, &x58, x56, x29, x21); fiat_p256_addcarryx_u32(&x59, &x60, x58, x31, x33); fiat_p256_addcarryx_u32(&x61, &x62, x60, (x32 + x10), x34); fiat_p256_mulx_u32(&x63, &x64, x1, 0x4); fiat_p256_mulx_u32(&x65, &x66, x1, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x67, &x68, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x69, &x70, x1, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x71, &x72, x1, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x73, &x74, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x75, &x76, x1, 0x3); fiat_p256_addcarryx_u32(&x77, &x78, 0x0, x74, x71); fiat_p256_addcarryx_u32(&x79, &x80, x78, x72, x69); fiat_p256_addcarryx_u32(&x81, &x82, x80, x70, x67); fiat_p256_addcarryx_u32(&x83, &x84, x82, x68, x65); fiat_p256_addcarryx_u32(&x85, &x86, x84, x66, x63); fiat_p256_addcarryx_u32(&x87, &x88, 0x0, x47, x75); fiat_p256_addcarryx_u32(&x89, &x90, x88, x49, x76); fiat_p256_addcarryx_u32(&x91, &x92, x90, x51, x73); fiat_p256_addcarryx_u32(&x93, &x94, x92, x53, x77); fiat_p256_addcarryx_u32(&x95, &x96, x94, x55, x79); fiat_p256_addcarryx_u32(&x97, &x98, x96, x57, x81); fiat_p256_addcarryx_u32(&x99, &x100, x98, x59, x83); fiat_p256_addcarryx_u32(&x101, &x102, x100, x61, x85); fiat_p256_mulx_u32(&x103, &x104, x87, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x105, &x106, x87, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x107, &x108, x87, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x109, &x110, x87, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x111, &x112, 0x0, x110, x107); fiat_p256_addcarryx_u32(&x113, &x114, x112, x108, x105); fiat_p256_addcarryx_u32(&x115, &x116, 0x0, x87, x109); fiat_p256_addcarryx_u32(&x117, &x118, x116, x89, x111); fiat_p256_addcarryx_u32(&x119, &x120, x118, x91, x113); fiat_p256_addcarryx_u32(&x121, &x122, x120, x93, (x114 + x106)); fiat_p256_addcarryx_u32(&x123, &x124, x122, x95, 0x0); fiat_p256_addcarryx_u32(&x125, &x126, x124, x97, 0x0); fiat_p256_addcarryx_u32(&x127, &x128, x126, x99, x87); fiat_p256_addcarryx_u32(&x129, &x130, x128, x101, x103); fiat_p256_addcarryx_u32(&x131, &x132, x130, (((uint32_t)x102 + x62) + (x86 + x64)), x104); fiat_p256_mulx_u32(&x133, &x134, x2, 0x4); fiat_p256_mulx_u32(&x135, &x136, x2, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x137, &x138, x2, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x139, &x140, x2, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x141, &x142, x2, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x143, &x144, x2, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x145, &x146, x2, 0x3); fiat_p256_addcarryx_u32(&x147, &x148, 0x0, x144, x141); fiat_p256_addcarryx_u32(&x149, &x150, x148, x142, x139); fiat_p256_addcarryx_u32(&x151, &x152, x150, x140, x137); fiat_p256_addcarryx_u32(&x153, &x154, x152, x138, x135); fiat_p256_addcarryx_u32(&x155, &x156, x154, x136, x133); fiat_p256_addcarryx_u32(&x157, &x158, 0x0, x117, x145); fiat_p256_addcarryx_u32(&x159, &x160, x158, x119, x146); fiat_p256_addcarryx_u32(&x161, &x162, x160, x121, x143); fiat_p256_addcarryx_u32(&x163, &x164, x162, x123, x147); fiat_p256_addcarryx_u32(&x165, &x166, x164, x125, x149); fiat_p256_addcarryx_u32(&x167, &x168, x166, x127, x151); fiat_p256_addcarryx_u32(&x169, &x170, x168, x129, x153); fiat_p256_addcarryx_u32(&x171, &x172, x170, x131, x155); fiat_p256_mulx_u32(&x173, &x174, x157, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x175, &x176, x157, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x177, &x178, x157, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x179, &x180, x157, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x181, &x182, 0x0, x180, x177); fiat_p256_addcarryx_u32(&x183, &x184, x182, x178, x175); fiat_p256_addcarryx_u32(&x185, &x186, 0x0, x157, x179); fiat_p256_addcarryx_u32(&x187, &x188, x186, x159, x181); fiat_p256_addcarryx_u32(&x189, &x190, x188, x161, x183); fiat_p256_addcarryx_u32(&x191, &x192, x190, x163, (x184 + x176)); fiat_p256_addcarryx_u32(&x193, &x194, x192, x165, 0x0); fiat_p256_addcarryx_u32(&x195, &x196, x194, x167, 0x0); fiat_p256_addcarryx_u32(&x197, &x198, x196, x169, x157); fiat_p256_addcarryx_u32(&x199, &x200, x198, x171, x173); fiat_p256_addcarryx_u32(&x201, &x202, x200, (((uint32_t)x172 + x132) + (x156 + x134)), x174); fiat_p256_mulx_u32(&x203, &x204, x3, 0x4); fiat_p256_mulx_u32(&x205, &x206, x3, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x207, &x208, x3, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x209, &x210, x3, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x211, &x212, x3, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x213, &x214, x3, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x215, &x216, x3, 0x3); fiat_p256_addcarryx_u32(&x217, &x218, 0x0, x214, x211); fiat_p256_addcarryx_u32(&x219, &x220, x218, x212, x209); fiat_p256_addcarryx_u32(&x221, &x222, x220, x210, x207); fiat_p256_addcarryx_u32(&x223, &x224, x222, x208, x205); fiat_p256_addcarryx_u32(&x225, &x226, x224, x206, x203); fiat_p256_addcarryx_u32(&x227, &x228, 0x0, x187, x215); fiat_p256_addcarryx_u32(&x229, &x230, x228, x189, x216); fiat_p256_addcarryx_u32(&x231, &x232, x230, x191, x213); fiat_p256_addcarryx_u32(&x233, &x234, x232, x193, x217); fiat_p256_addcarryx_u32(&x235, &x236, x234, x195, x219); fiat_p256_addcarryx_u32(&x237, &x238, x236, x197, x221); fiat_p256_addcarryx_u32(&x239, &x240, x238, x199, x223); fiat_p256_addcarryx_u32(&x241, &x242, x240, x201, x225); fiat_p256_mulx_u32(&x243, &x244, x227, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x245, &x246, x227, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x247, &x248, x227, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x249, &x250, x227, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x251, &x252, 0x0, x250, x247); fiat_p256_addcarryx_u32(&x253, &x254, x252, x248, x245); fiat_p256_addcarryx_u32(&x255, &x256, 0x0, x227, x249); fiat_p256_addcarryx_u32(&x257, &x258, x256, x229, x251); fiat_p256_addcarryx_u32(&x259, &x260, x258, x231, x253); fiat_p256_addcarryx_u32(&x261, &x262, x260, x233, (x254 + x246)); fiat_p256_addcarryx_u32(&x263, &x264, x262, x235, 0x0); fiat_p256_addcarryx_u32(&x265, &x266, x264, x237, 0x0); fiat_p256_addcarryx_u32(&x267, &x268, x266, x239, x227); fiat_p256_addcarryx_u32(&x269, &x270, x268, x241, x243); fiat_p256_addcarryx_u32(&x271, &x272, x270, (((uint32_t)x242 + x202) + (x226 + x204)), x244); fiat_p256_mulx_u32(&x273, &x274, x4, 0x4); fiat_p256_mulx_u32(&x275, &x276, x4, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x277, &x278, x4, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x279, &x280, x4, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x281, &x282, x4, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x283, &x284, x4, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x285, &x286, x4, 0x3); fiat_p256_addcarryx_u32(&x287, &x288, 0x0, x284, x281); fiat_p256_addcarryx_u32(&x289, &x290, x288, x282, x279); fiat_p256_addcarryx_u32(&x291, &x292, x290, x280, x277); fiat_p256_addcarryx_u32(&x293, &x294, x292, x278, x275); fiat_p256_addcarryx_u32(&x295, &x296, x294, x276, x273); fiat_p256_addcarryx_u32(&x297, &x298, 0x0, x257, x285); fiat_p256_addcarryx_u32(&x299, &x300, x298, x259, x286); fiat_p256_addcarryx_u32(&x301, &x302, x300, x261, x283); fiat_p256_addcarryx_u32(&x303, &x304, x302, x263, x287); fiat_p256_addcarryx_u32(&x305, &x306, x304, x265, x289); fiat_p256_addcarryx_u32(&x307, &x308, x306, x267, x291); fiat_p256_addcarryx_u32(&x309, &x310, x308, x269, x293); fiat_p256_addcarryx_u32(&x311, &x312, x310, x271, x295); fiat_p256_mulx_u32(&x313, &x314, x297, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x315, &x316, x297, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x317, &x318, x297, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x319, &x320, x297, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x321, &x322, 0x0, x320, x317); fiat_p256_addcarryx_u32(&x323, &x324, x322, x318, x315); fiat_p256_addcarryx_u32(&x325, &x326, 0x0, x297, x319); fiat_p256_addcarryx_u32(&x327, &x328, x326, x299, x321); fiat_p256_addcarryx_u32(&x329, &x330, x328, x301, x323); fiat_p256_addcarryx_u32(&x331, &x332, x330, x303, (x324 + x316)); fiat_p256_addcarryx_u32(&x333, &x334, x332, x305, 0x0); fiat_p256_addcarryx_u32(&x335, &x336, x334, x307, 0x0); fiat_p256_addcarryx_u32(&x337, &x338, x336, x309, x297); fiat_p256_addcarryx_u32(&x339, &x340, x338, x311, x313); fiat_p256_addcarryx_u32(&x341, &x342, x340, (((uint32_t)x312 + x272) + (x296 + x274)), x314); fiat_p256_mulx_u32(&x343, &x344, x5, 0x4); fiat_p256_mulx_u32(&x345, &x346, x5, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x347, &x348, x5, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x349, &x350, x5, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x351, &x352, x5, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x353, &x354, x5, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x355, &x356, x5, 0x3); fiat_p256_addcarryx_u32(&x357, &x358, 0x0, x354, x351); fiat_p256_addcarryx_u32(&x359, &x360, x358, x352, x349); fiat_p256_addcarryx_u32(&x361, &x362, x360, x350, x347); fiat_p256_addcarryx_u32(&x363, &x364, x362, x348, x345); fiat_p256_addcarryx_u32(&x365, &x366, x364, x346, x343); fiat_p256_addcarryx_u32(&x367, &x368, 0x0, x327, x355); fiat_p256_addcarryx_u32(&x369, &x370, x368, x329, x356); fiat_p256_addcarryx_u32(&x371, &x372, x370, x331, x353); fiat_p256_addcarryx_u32(&x373, &x374, x372, x333, x357); fiat_p256_addcarryx_u32(&x375, &x376, x374, x335, x359); fiat_p256_addcarryx_u32(&x377, &x378, x376, x337, x361); fiat_p256_addcarryx_u32(&x379, &x380, x378, x339, x363); fiat_p256_addcarryx_u32(&x381, &x382, x380, x341, x365); fiat_p256_mulx_u32(&x383, &x384, x367, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x385, &x386, x367, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x387, &x388, x367, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x389, &x390, x367, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x391, &x392, 0x0, x390, x387); fiat_p256_addcarryx_u32(&x393, &x394, x392, x388, x385); fiat_p256_addcarryx_u32(&x395, &x396, 0x0, x367, x389); fiat_p256_addcarryx_u32(&x397, &x398, x396, x369, x391); fiat_p256_addcarryx_u32(&x399, &x400, x398, x371, x393); fiat_p256_addcarryx_u32(&x401, &x402, x400, x373, (x394 + x386)); fiat_p256_addcarryx_u32(&x403, &x404, x402, x375, 0x0); fiat_p256_addcarryx_u32(&x405, &x406, x404, x377, 0x0); fiat_p256_addcarryx_u32(&x407, &x408, x406, x379, x367); fiat_p256_addcarryx_u32(&x409, &x410, x408, x381, x383); fiat_p256_addcarryx_u32(&x411, &x412, x410, (((uint32_t)x382 + x342) + (x366 + x344)), x384); fiat_p256_mulx_u32(&x413, &x414, x6, 0x4); fiat_p256_mulx_u32(&x415, &x416, x6, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x417, &x418, x6, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x419, &x420, x6, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x421, &x422, x6, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x423, &x424, x6, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x425, &x426, x6, 0x3); fiat_p256_addcarryx_u32(&x427, &x428, 0x0, x424, x421); fiat_p256_addcarryx_u32(&x429, &x430, x428, x422, x419); fiat_p256_addcarryx_u32(&x431, &x432, x430, x420, x417); fiat_p256_addcarryx_u32(&x433, &x434, x432, x418, x415); fiat_p256_addcarryx_u32(&x435, &x436, x434, x416, x413); fiat_p256_addcarryx_u32(&x437, &x438, 0x0, x397, x425); fiat_p256_addcarryx_u32(&x439, &x440, x438, x399, x426); fiat_p256_addcarryx_u32(&x441, &x442, x440, x401, x423); fiat_p256_addcarryx_u32(&x443, &x444, x442, x403, x427); fiat_p256_addcarryx_u32(&x445, &x446, x444, x405, x429); fiat_p256_addcarryx_u32(&x447, &x448, x446, x407, x431); fiat_p256_addcarryx_u32(&x449, &x450, x448, x409, x433); fiat_p256_addcarryx_u32(&x451, &x452, x450, x411, x435); fiat_p256_mulx_u32(&x453, &x454, x437, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x455, &x456, x437, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x457, &x458, x437, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x459, &x460, x437, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x461, &x462, 0x0, x460, x457); fiat_p256_addcarryx_u32(&x463, &x464, x462, x458, x455); fiat_p256_addcarryx_u32(&x465, &x466, 0x0, x437, x459); fiat_p256_addcarryx_u32(&x467, &x468, x466, x439, x461); fiat_p256_addcarryx_u32(&x469, &x470, x468, x441, x463); fiat_p256_addcarryx_u32(&x471, &x472, x470, x443, (x464 + x456)); fiat_p256_addcarryx_u32(&x473, &x474, x472, x445, 0x0); fiat_p256_addcarryx_u32(&x475, &x476, x474, x447, 0x0); fiat_p256_addcarryx_u32(&x477, &x478, x476, x449, x437); fiat_p256_addcarryx_u32(&x479, &x480, x478, x451, x453); fiat_p256_addcarryx_u32(&x481, &x482, x480, (((uint32_t)x452 + x412) + (x436 + x414)), x454); fiat_p256_mulx_u32(&x483, &x484, x7, 0x4); fiat_p256_mulx_u32(&x485, &x486, x7, UINT32_C(0xfffffffd)); fiat_p256_mulx_u32(&x487, &x488, x7, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x489, &x490, x7, UINT32_C(0xfffffffe)); fiat_p256_mulx_u32(&x491, &x492, x7, UINT32_C(0xfffffffb)); fiat_p256_mulx_u32(&x493, &x494, x7, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x495, &x496, x7, 0x3); fiat_p256_addcarryx_u32(&x497, &x498, 0x0, x494, x491); fiat_p256_addcarryx_u32(&x499, &x500, x498, x492, x489); fiat_p256_addcarryx_u32(&x501, &x502, x500, x490, x487); fiat_p256_addcarryx_u32(&x503, &x504, x502, x488, x485); fiat_p256_addcarryx_u32(&x505, &x506, x504, x486, x483); fiat_p256_addcarryx_u32(&x507, &x508, 0x0, x467, x495); fiat_p256_addcarryx_u32(&x509, &x510, x508, x469, x496); fiat_p256_addcarryx_u32(&x511, &x512, x510, x471, x493); fiat_p256_addcarryx_u32(&x513, &x514, x512, x473, x497); fiat_p256_addcarryx_u32(&x515, &x516, x514, x475, x499); fiat_p256_addcarryx_u32(&x517, &x518, x516, x477, x501); fiat_p256_addcarryx_u32(&x519, &x520, x518, x479, x503); fiat_p256_addcarryx_u32(&x521, &x522, x520, x481, x505); fiat_p256_mulx_u32(&x523, &x524, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x525, &x526, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x527, &x528, x507, UINT32_C(0xffffffff)); fiat_p256_mulx_u32(&x529, &x530, x507, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x531, &x532, 0x0, x530, x527); fiat_p256_addcarryx_u32(&x533, &x534, x532, x528, x525); fiat_p256_addcarryx_u32(&x535, &x536, 0x0, x507, x529); fiat_p256_addcarryx_u32(&x537, &x538, x536, x509, x531); fiat_p256_addcarryx_u32(&x539, &x540, x538, x511, x533); fiat_p256_addcarryx_u32(&x541, &x542, x540, x513, (x534 + x526)); fiat_p256_addcarryx_u32(&x543, &x544, x542, x515, 0x0); fiat_p256_addcarryx_u32(&x545, &x546, x544, x517, 0x0); fiat_p256_addcarryx_u32(&x547, &x548, x546, x519, x507); fiat_p256_addcarryx_u32(&x549, &x550, x548, x521, x523); fiat_p256_addcarryx_u32(&x551, &x552, x550, (((uint32_t)x522 + x482) + (x506 + x484)), x524); fiat_p256_subborrowx_u32(&x553, &x554, 0x0, x537, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x555, &x556, x554, x539, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x557, &x558, x556, x541, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x559, &x560, x558, x543, 0x0); fiat_p256_subborrowx_u32(&x561, &x562, x560, x545, 0x0); fiat_p256_subborrowx_u32(&x563, &x564, x562, x547, 0x0); fiat_p256_subborrowx_u32(&x565, &x566, x564, x549, 0x1); fiat_p256_subborrowx_u32(&x567, &x568, x566, x551, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x569, &x570, x568, x552, 0x0); fiat_p256_cmovznz_u32(&x571, x570, x553, x537); fiat_p256_cmovznz_u32(&x572, x570, x555, x539); fiat_p256_cmovznz_u32(&x573, x570, x557, x541); fiat_p256_cmovznz_u32(&x574, x570, x559, x543); fiat_p256_cmovznz_u32(&x575, x570, x561, x545); fiat_p256_cmovznz_u32(&x576, x570, x563, x547); fiat_p256_cmovznz_u32(&x577, x570, x565, x549); fiat_p256_cmovznz_u32(&x578, x570, x567, x551); out1[0] = x571; out1[1] = x572; out1[2] = x573; out1[3] = x574; out1[4] = x575; out1[5] = x576; out1[6] = x577; out1[7] = x578; } /* * The function fiat_p256_nonzero outputs a single non-zero word if the input is non-zero and zero otherwise. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 * * Input Bounds: * arg1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_nonzero(uint32_t* out1, const uint32_t arg1[8]) { uint32_t x1; x1 = ((arg1[0]) | ((arg1[1]) | ((arg1[2]) | ((arg1[3]) | ((arg1[4]) | ((arg1[5]) | ((arg1[6]) | (arg1[7])))))))); *out1 = x1; } /* * The function fiat_p256_selectznz is a multi-limb conditional select. * * Postconditions: * eval out1 = (if arg1 = 0 then eval arg2 else eval arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * arg3: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_selectznz(uint32_t out1[8], fiat_p256_uint1 arg1, const uint32_t arg2[8], const uint32_t arg3[8]) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; fiat_p256_cmovznz_u32(&x1, arg1, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u32(&x2, arg1, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u32(&x3, arg1, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u32(&x4, arg1, (arg2[3]), (arg3[3])); fiat_p256_cmovznz_u32(&x5, arg1, (arg2[4]), (arg3[4])); fiat_p256_cmovznz_u32(&x6, arg1, (arg2[5]), (arg3[5])); fiat_p256_cmovznz_u32(&x7, arg1, (arg2[6]), (arg3[6])); fiat_p256_cmovznz_u32(&x8, arg1, (arg2[7]), (arg3[7])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; out1[4] = x5; out1[5] = x6; out1[6] = x7; out1[7] = x8; } /* * The function fiat_p256_to_bytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Input Bounds: * arg1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_bytes(uint8_t out1[32], const uint32_t arg1[8]) { uint32_t x1; uint32_t x2; uint32_t x3; uint32_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint32_t x8; uint8_t x9; uint32_t x10; uint8_t x11; uint32_t x12; uint8_t x13; uint8_t x14; uint8_t x15; uint32_t x16; uint8_t x17; uint32_t x18; uint8_t x19; uint8_t x20; uint8_t x21; uint32_t x22; uint8_t x23; uint32_t x24; uint8_t x25; uint8_t x26; uint8_t x27; uint32_t x28; uint8_t x29; uint32_t x30; uint8_t x31; uint8_t x32; uint8_t x33; uint32_t x34; uint8_t x35; uint32_t x36; uint8_t x37; uint8_t x38; uint8_t x39; uint32_t x40; uint8_t x41; uint32_t x42; uint8_t x43; uint8_t x44; uint8_t x45; uint32_t x46; uint8_t x47; uint32_t x48; uint8_t x49; uint8_t x50; uint8_t x51; uint32_t x52; uint8_t x53; uint32_t x54; uint8_t x55; uint8_t x56; x1 = (arg1[7]); x2 = (arg1[6]); x3 = (arg1[5]); x4 = (arg1[4]); x5 = (arg1[3]); x6 = (arg1[2]); x7 = (arg1[1]); x8 = (arg1[0]); x9 = (uint8_t)(x8 & UINT8_C(0xff)); x10 = (x8 >> 8); x11 = (uint8_t)(x10 & UINT8_C(0xff)); x12 = (x10 >> 8); x13 = (uint8_t)(x12 & UINT8_C(0xff)); x14 = (uint8_t)(x12 >> 8); x15 = (uint8_t)(x7 & UINT8_C(0xff)); x16 = (x7 >> 8); x17 = (uint8_t)(x16 & UINT8_C(0xff)); x18 = (x16 >> 8); x19 = (uint8_t)(x18 & UINT8_C(0xff)); x20 = (uint8_t)(x18 >> 8); x21 = (uint8_t)(x6 & UINT8_C(0xff)); x22 = (x6 >> 8); x23 = (uint8_t)(x22 & UINT8_C(0xff)); x24 = (x22 >> 8); x25 = (uint8_t)(x24 & UINT8_C(0xff)); x26 = (uint8_t)(x24 >> 8); x27 = (uint8_t)(x5 & UINT8_C(0xff)); x28 = (x5 >> 8); x29 = (uint8_t)(x28 & UINT8_C(0xff)); x30 = (x28 >> 8); x31 = (uint8_t)(x30 & UINT8_C(0xff)); x32 = (uint8_t)(x30 >> 8); x33 = (uint8_t)(x4 & UINT8_C(0xff)); x34 = (x4 >> 8); x35 = (uint8_t)(x34 & UINT8_C(0xff)); x36 = (x34 >> 8); x37 = (uint8_t)(x36 & UINT8_C(0xff)); x38 = (uint8_t)(x36 >> 8); x39 = (uint8_t)(x3 & UINT8_C(0xff)); x40 = (x3 >> 8); x41 = (uint8_t)(x40 & UINT8_C(0xff)); x42 = (x40 >> 8); x43 = (uint8_t)(x42 & UINT8_C(0xff)); x44 = (uint8_t)(x42 >> 8); x45 = (uint8_t)(x2 & UINT8_C(0xff)); x46 = (x2 >> 8); x47 = (uint8_t)(x46 & UINT8_C(0xff)); x48 = (x46 >> 8); x49 = (uint8_t)(x48 & UINT8_C(0xff)); x50 = (uint8_t)(x48 >> 8); x51 = (uint8_t)(x1 & UINT8_C(0xff)); x52 = (x1 >> 8); x53 = (uint8_t)(x52 & UINT8_C(0xff)); x54 = (x52 >> 8); x55 = (uint8_t)(x54 & UINT8_C(0xff)); x56 = (uint8_t)(x54 >> 8); out1[0] = x9; out1[1] = x11; out1[2] = x13; out1[3] = x14; out1[4] = x15; out1[5] = x17; out1[6] = x19; out1[7] = x20; out1[8] = x21; out1[9] = x23; out1[10] = x25; out1[11] = x26; out1[12] = x27; out1[13] = x29; out1[14] = x31; out1[15] = x32; out1[16] = x33; out1[17] = x35; out1[18] = x37; out1[19] = x38; out1[20] = x39; out1[21] = x41; out1[22] = x43; out1[23] = x44; out1[24] = x45; out1[25] = x47; out1[26] = x49; out1[27] = x50; out1[28] = x51; out1[29] = x53; out1[30] = x55; out1[31] = x56; } /* * The function fiat_p256_from_bytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. * * Preconditions: * 0 ≤ bytes_eval arg1 < m * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * 0 ≤ eval out1 < m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_bytes(uint32_t out1[8], const uint8_t arg1[32]) { uint32_t x1; uint32_t x2; uint32_t x3; uint8_t x4; uint32_t x5; uint32_t x6; uint32_t x7; uint8_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint8_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint8_t x16; uint32_t x17; uint32_t x18; uint32_t x19; uint8_t x20; uint32_t x21; uint32_t x22; uint32_t x23; uint8_t x24; uint32_t x25; uint32_t x26; uint32_t x27; uint8_t x28; uint32_t x29; uint32_t x30; uint32_t x31; uint8_t x32; uint32_t x33; uint32_t x34; uint32_t x35; uint32_t x36; uint32_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; uint32_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint32_t x47; uint32_t x48; uint32_t x49; uint32_t x50; uint32_t x51; uint32_t x52; uint32_t x53; uint32_t x54; uint32_t x55; uint32_t x56; x1 = ((uint32_t)(arg1[31]) << 24); x2 = ((uint32_t)(arg1[30]) << 16); x3 = ((uint32_t)(arg1[29]) << 8); x4 = (arg1[28]); x5 = ((uint32_t)(arg1[27]) << 24); x6 = ((uint32_t)(arg1[26]) << 16); x7 = ((uint32_t)(arg1[25]) << 8); x8 = (arg1[24]); x9 = ((uint32_t)(arg1[23]) << 24); x10 = ((uint32_t)(arg1[22]) << 16); x11 = ((uint32_t)(arg1[21]) << 8); x12 = (arg1[20]); x13 = ((uint32_t)(arg1[19]) << 24); x14 = ((uint32_t)(arg1[18]) << 16); x15 = ((uint32_t)(arg1[17]) << 8); x16 = (arg1[16]); x17 = ((uint32_t)(arg1[15]) << 24); x18 = ((uint32_t)(arg1[14]) << 16); x19 = ((uint32_t)(arg1[13]) << 8); x20 = (arg1[12]); x21 = ((uint32_t)(arg1[11]) << 24); x22 = ((uint32_t)(arg1[10]) << 16); x23 = ((uint32_t)(arg1[9]) << 8); x24 = (arg1[8]); x25 = ((uint32_t)(arg1[7]) << 24); x26 = ((uint32_t)(arg1[6]) << 16); x27 = ((uint32_t)(arg1[5]) << 8); x28 = (arg1[4]); x29 = ((uint32_t)(arg1[3]) << 24); x30 = ((uint32_t)(arg1[2]) << 16); x31 = ((uint32_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint32_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x27 + (uint32_t)x28); x37 = (x26 + x36); x38 = (x25 + x37); x39 = (x23 + (uint32_t)x24); x40 = (x22 + x39); x41 = (x21 + x40); x42 = (x19 + (uint32_t)x20); x43 = (x18 + x42); x44 = (x17 + x43); x45 = (x15 + (uint32_t)x16); x46 = (x14 + x45); x47 = (x13 + x46); x48 = (x11 + (uint32_t)x12); x49 = (x10 + x48); x50 = (x9 + x49); x51 = (x7 + (uint32_t)x8); x52 = (x6 + x51); x53 = (x5 + x52); x54 = (x3 + (uint32_t)x4); x55 = (x2 + x54); x56 = (x1 + x55); out1[0] = x35; out1[1] = x38; out1[2] = x41; out1[3] = x44; out1[4] = x47; out1[5] = x50; out1[6] = x53; out1[7] = x56; } /* * The function fiat_p256_set_one returns the field element one in the Montgomery domain. * * Postconditions: * eval (from_montgomery out1) mod m = 1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_set_one(fiat_p256_montgomery_domain_field_element out1) { out1[0] = 0x1; out1[1] = 0x0; out1[2] = 0x0; out1[3] = UINT32_C(0xffffffff); out1[4] = UINT32_C(0xffffffff); out1[5] = UINT32_C(0xffffffff); out1[6] = UINT32_C(0xfffffffe); out1[7] = 0x0; } /* * The function fiat_p256_msat returns the saturated representation of the prime modulus. * * Postconditions: * twos_complement_eval out1 = m * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_msat(uint32_t out1[9]) { out1[0] = UINT32_C(0xffffffff); out1[1] = UINT32_C(0xffffffff); out1[2] = UINT32_C(0xffffffff); out1[3] = 0x0; out1[4] = 0x0; out1[5] = 0x0; out1[6] = 0x1; out1[7] = UINT32_C(0xffffffff); out1[8] = 0x0; } /* * The function fiat_p256_divstep computes a divstep. * * Preconditions: * 0 ≤ eval arg4 < m * 0 ≤ eval arg5 < m * Postconditions: * out1 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then 1 - arg1 else 1 + arg1) * twos_complement_eval out2 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then twos_complement_eval arg3 else twos_complement_eval arg2) * twos_complement_eval out3 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then ⌊(twos_complement_eval arg3 - twos_complement_eval arg2) / 2⌋ else ⌊(twos_complement_eval arg3 + (twos_complement_eval arg3 mod 2) * twos_complement_eval arg2) / 2⌋) * eval (from_montgomery out4) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (2 * eval (from_montgomery arg5)) mod m else (2 * eval (from_montgomery arg4)) mod m) * eval (from_montgomery out5) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (eval (from_montgomery arg4) - eval (from_montgomery arg4)) mod m else (eval (from_montgomery arg5) + (twos_complement_eval arg3 mod 2) * eval (from_montgomery arg4)) mod m) * 0 ≤ eval out5 < m * 0 ≤ eval out5 < m * 0 ≤ eval out2 < m * 0 ≤ eval out3 < m * * Input Bounds: * arg1: [0x0 ~> 0xffffffff] * arg2: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * arg3: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * arg4: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * arg5: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffff] * out2: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * out3: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * out4: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] * out5: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep(uint32_t* out1, uint32_t out2[9], uint32_t out3[9], uint32_t out4[8], uint32_t out5[8], uint32_t arg1, const uint32_t arg2[9], const uint32_t arg3[9], const uint32_t arg4[8], const uint32_t arg5[8]) { uint32_t x1; fiat_p256_uint1 x2; fiat_p256_uint1 x3; uint32_t x4; fiat_p256_uint1 x5; uint32_t x6; uint32_t x7; uint32_t x8; uint32_t x9; uint32_t x10; uint32_t x11; uint32_t x12; uint32_t x13; uint32_t x14; uint32_t x15; uint32_t x16; fiat_p256_uint1 x17; uint32_t x18; fiat_p256_uint1 x19; uint32_t x20; fiat_p256_uint1 x21; uint32_t x22; fiat_p256_uint1 x23; uint32_t x24; fiat_p256_uint1 x25; uint32_t x26; fiat_p256_uint1 x27; uint32_t x28; fiat_p256_uint1 x29; uint32_t x30; fiat_p256_uint1 x31; uint32_t x32; fiat_p256_uint1 x33; uint32_t x34; uint32_t x35; uint32_t x36; uint32_t x37; uint32_t x38; uint32_t x39; uint32_t x40; uint32_t x41; uint32_t x42; uint32_t x43; uint32_t x44; uint32_t x45; uint32_t x46; uint32_t x47; uint32_t x48; uint32_t x49; uint32_t x50; uint32_t x51; fiat_p256_uint1 x52; uint32_t x53; fiat_p256_uint1 x54; uint32_t x55; fiat_p256_uint1 x56; uint32_t x57; fiat_p256_uint1 x58; uint32_t x59; fiat_p256_uint1 x60; uint32_t x61; fiat_p256_uint1 x62; uint32_t x63; fiat_p256_uint1 x64; uint32_t x65; fiat_p256_uint1 x66; uint32_t x67; fiat_p256_uint1 x68; uint32_t x69; fiat_p256_uint1 x70; uint32_t x71; fiat_p256_uint1 x72; uint32_t x73; fiat_p256_uint1 x74; uint32_t x75; fiat_p256_uint1 x76; uint32_t x77; fiat_p256_uint1 x78; uint32_t x79; fiat_p256_uint1 x80; uint32_t x81; fiat_p256_uint1 x82; uint32_t x83; fiat_p256_uint1 x84; uint32_t x85; uint32_t x86; uint32_t x87; uint32_t x88; uint32_t x89; uint32_t x90; uint32_t x91; uint32_t x92; uint32_t x93; fiat_p256_uint1 x94; uint32_t x95; fiat_p256_uint1 x96; uint32_t x97; fiat_p256_uint1 x98; uint32_t x99; fiat_p256_uint1 x100; uint32_t x101; fiat_p256_uint1 x102; uint32_t x103; fiat_p256_uint1 x104; uint32_t x105; fiat_p256_uint1 x106; uint32_t x107; fiat_p256_uint1 x108; uint32_t x109; uint32_t x110; fiat_p256_uint1 x111; uint32_t x112; fiat_p256_uint1 x113; uint32_t x114; fiat_p256_uint1 x115; uint32_t x116; fiat_p256_uint1 x117; uint32_t x118; fiat_p256_uint1 x119; uint32_t x120; fiat_p256_uint1 x121; uint32_t x122; fiat_p256_uint1 x123; uint32_t x124; fiat_p256_uint1 x125; uint32_t x126; uint32_t x127; uint32_t x128; uint32_t x129; uint32_t x130; uint32_t x131; uint32_t x132; uint32_t x133; fiat_p256_uint1 x134; uint32_t x135; uint32_t x136; uint32_t x137; uint32_t x138; uint32_t x139; uint32_t x140; uint32_t x141; uint32_t x142; uint32_t x143; uint32_t x144; fiat_p256_uint1 x145; uint32_t x146; fiat_p256_uint1 x147; uint32_t x148; fiat_p256_uint1 x149; uint32_t x150; fiat_p256_uint1 x151; uint32_t x152; fiat_p256_uint1 x153; uint32_t x154; fiat_p256_uint1 x155; uint32_t x156; fiat_p256_uint1 x157; uint32_t x158; fiat_p256_uint1 x159; uint32_t x160; fiat_p256_uint1 x161; uint32_t x162; uint32_t x163; uint32_t x164; uint32_t x165; uint32_t x166; uint32_t x167; uint32_t x168; uint32_t x169; uint32_t x170; fiat_p256_uint1 x171; uint32_t x172; fiat_p256_uint1 x173; uint32_t x174; fiat_p256_uint1 x175; uint32_t x176; fiat_p256_uint1 x177; uint32_t x178; fiat_p256_uint1 x179; uint32_t x180; fiat_p256_uint1 x181; uint32_t x182; fiat_p256_uint1 x183; uint32_t x184; fiat_p256_uint1 x185; uint32_t x186; fiat_p256_uint1 x187; uint32_t x188; fiat_p256_uint1 x189; uint32_t x190; fiat_p256_uint1 x191; uint32_t x192; fiat_p256_uint1 x193; uint32_t x194; fiat_p256_uint1 x195; uint32_t x196; fiat_p256_uint1 x197; uint32_t x198; fiat_p256_uint1 x199; uint32_t x200; fiat_p256_uint1 x201; uint32_t x202; fiat_p256_uint1 x203; uint32_t x204; fiat_p256_uint1 x205; uint32_t x206; uint32_t x207; uint32_t x208; uint32_t x209; uint32_t x210; uint32_t x211; uint32_t x212; uint32_t x213; uint32_t x214; uint32_t x215; uint32_t x216; uint32_t x217; uint32_t x218; uint32_t x219; uint32_t x220; uint32_t x221; uint32_t x222; uint32_t x223; uint32_t x224; uint32_t x225; uint32_t x226; uint32_t x227; uint32_t x228; uint32_t x229; uint32_t x230; fiat_p256_addcarryx_u32(&x1, &x2, 0x0, (~arg1), 0x1); x3 = (fiat_p256_uint1)((fiat_p256_uint1)(x1 >> 31) & (fiat_p256_uint1)((arg3[0]) & 0x1)); fiat_p256_addcarryx_u32(&x4, &x5, 0x0, (~arg1), 0x1); fiat_p256_cmovznz_u32(&x6, x3, arg1, x4); fiat_p256_cmovznz_u32(&x7, x3, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u32(&x8, x3, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u32(&x9, x3, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u32(&x10, x3, (arg2[3]), (arg3[3])); fiat_p256_cmovznz_u32(&x11, x3, (arg2[4]), (arg3[4])); fiat_p256_cmovznz_u32(&x12, x3, (arg2[5]), (arg3[5])); fiat_p256_cmovznz_u32(&x13, x3, (arg2[6]), (arg3[6])); fiat_p256_cmovznz_u32(&x14, x3, (arg2[7]), (arg3[7])); fiat_p256_cmovznz_u32(&x15, x3, (arg2[8]), (arg3[8])); fiat_p256_addcarryx_u32(&x16, &x17, 0x0, 0x1, (~(arg2[0]))); fiat_p256_addcarryx_u32(&x18, &x19, x17, 0x0, (~(arg2[1]))); fiat_p256_addcarryx_u32(&x20, &x21, x19, 0x0, (~(arg2[2]))); fiat_p256_addcarryx_u32(&x22, &x23, x21, 0x0, (~(arg2[3]))); fiat_p256_addcarryx_u32(&x24, &x25, x23, 0x0, (~(arg2[4]))); fiat_p256_addcarryx_u32(&x26, &x27, x25, 0x0, (~(arg2[5]))); fiat_p256_addcarryx_u32(&x28, &x29, x27, 0x0, (~(arg2[6]))); fiat_p256_addcarryx_u32(&x30, &x31, x29, 0x0, (~(arg2[7]))); fiat_p256_addcarryx_u32(&x32, &x33, x31, 0x0, (~(arg2[8]))); fiat_p256_cmovznz_u32(&x34, x3, (arg3[0]), x16); fiat_p256_cmovznz_u32(&x35, x3, (arg3[1]), x18); fiat_p256_cmovznz_u32(&x36, x3, (arg3[2]), x20); fiat_p256_cmovznz_u32(&x37, x3, (arg3[3]), x22); fiat_p256_cmovznz_u32(&x38, x3, (arg3[4]), x24); fiat_p256_cmovznz_u32(&x39, x3, (arg3[5]), x26); fiat_p256_cmovznz_u32(&x40, x3, (arg3[6]), x28); fiat_p256_cmovznz_u32(&x41, x3, (arg3[7]), x30); fiat_p256_cmovznz_u32(&x42, x3, (arg3[8]), x32); fiat_p256_cmovznz_u32(&x43, x3, (arg4[0]), (arg5[0])); fiat_p256_cmovznz_u32(&x44, x3, (arg4[1]), (arg5[1])); fiat_p256_cmovznz_u32(&x45, x3, (arg4[2]), (arg5[2])); fiat_p256_cmovznz_u32(&x46, x3, (arg4[3]), (arg5[3])); fiat_p256_cmovznz_u32(&x47, x3, (arg4[4]), (arg5[4])); fiat_p256_cmovznz_u32(&x48, x3, (arg4[5]), (arg5[5])); fiat_p256_cmovznz_u32(&x49, x3, (arg4[6]), (arg5[6])); fiat_p256_cmovznz_u32(&x50, x3, (arg4[7]), (arg5[7])); fiat_p256_addcarryx_u32(&x51, &x52, 0x0, x43, x43); fiat_p256_addcarryx_u32(&x53, &x54, x52, x44, x44); fiat_p256_addcarryx_u32(&x55, &x56, x54, x45, x45); fiat_p256_addcarryx_u32(&x57, &x58, x56, x46, x46); fiat_p256_addcarryx_u32(&x59, &x60, x58, x47, x47); fiat_p256_addcarryx_u32(&x61, &x62, x60, x48, x48); fiat_p256_addcarryx_u32(&x63, &x64, x62, x49, x49); fiat_p256_addcarryx_u32(&x65, &x66, x64, x50, x50); fiat_p256_subborrowx_u32(&x67, &x68, 0x0, x51, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x69, &x70, x68, x53, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x71, &x72, x70, x55, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x73, &x74, x72, x57, 0x0); fiat_p256_subborrowx_u32(&x75, &x76, x74, x59, 0x0); fiat_p256_subborrowx_u32(&x77, &x78, x76, x61, 0x0); fiat_p256_subborrowx_u32(&x79, &x80, x78, x63, 0x1); fiat_p256_subborrowx_u32(&x81, &x82, x80, x65, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x83, &x84, x82, x66, 0x0); x85 = (arg4[7]); x86 = (arg4[6]); x87 = (arg4[5]); x88 = (arg4[4]); x89 = (arg4[3]); x90 = (arg4[2]); x91 = (arg4[1]); x92 = (arg4[0]); fiat_p256_subborrowx_u32(&x93, &x94, 0x0, 0x0, x92); fiat_p256_subborrowx_u32(&x95, &x96, x94, 0x0, x91); fiat_p256_subborrowx_u32(&x97, &x98, x96, 0x0, x90); fiat_p256_subborrowx_u32(&x99, &x100, x98, 0x0, x89); fiat_p256_subborrowx_u32(&x101, &x102, x100, 0x0, x88); fiat_p256_subborrowx_u32(&x103, &x104, x102, 0x0, x87); fiat_p256_subborrowx_u32(&x105, &x106, x104, 0x0, x86); fiat_p256_subborrowx_u32(&x107, &x108, x106, 0x0, x85); fiat_p256_cmovznz_u32(&x109, x108, 0x0, UINT32_C(0xffffffff)); fiat_p256_addcarryx_u32(&x110, &x111, 0x0, x93, x109); fiat_p256_addcarryx_u32(&x112, &x113, x111, x95, x109); fiat_p256_addcarryx_u32(&x114, &x115, x113, x97, x109); fiat_p256_addcarryx_u32(&x116, &x117, x115, x99, 0x0); fiat_p256_addcarryx_u32(&x118, &x119, x117, x101, 0x0); fiat_p256_addcarryx_u32(&x120, &x121, x119, x103, 0x0); fiat_p256_addcarryx_u32(&x122, &x123, x121, x105, (fiat_p256_uint1)(x109 & 0x1)); fiat_p256_addcarryx_u32(&x124, &x125, x123, x107, x109); fiat_p256_cmovznz_u32(&x126, x3, (arg5[0]), x110); fiat_p256_cmovznz_u32(&x127, x3, (arg5[1]), x112); fiat_p256_cmovznz_u32(&x128, x3, (arg5[2]), x114); fiat_p256_cmovznz_u32(&x129, x3, (arg5[3]), x116); fiat_p256_cmovznz_u32(&x130, x3, (arg5[4]), x118); fiat_p256_cmovznz_u32(&x131, x3, (arg5[5]), x120); fiat_p256_cmovznz_u32(&x132, x3, (arg5[6]), x122); fiat_p256_cmovznz_u32(&x133, x3, (arg5[7]), x124); x134 = (fiat_p256_uint1)(x34 & 0x1); fiat_p256_cmovznz_u32(&x135, x134, 0x0, x7); fiat_p256_cmovznz_u32(&x136, x134, 0x0, x8); fiat_p256_cmovznz_u32(&x137, x134, 0x0, x9); fiat_p256_cmovznz_u32(&x138, x134, 0x0, x10); fiat_p256_cmovznz_u32(&x139, x134, 0x0, x11); fiat_p256_cmovznz_u32(&x140, x134, 0x0, x12); fiat_p256_cmovznz_u32(&x141, x134, 0x0, x13); fiat_p256_cmovznz_u32(&x142, x134, 0x0, x14); fiat_p256_cmovznz_u32(&x143, x134, 0x0, x15); fiat_p256_addcarryx_u32(&x144, &x145, 0x0, x34, x135); fiat_p256_addcarryx_u32(&x146, &x147, x145, x35, x136); fiat_p256_addcarryx_u32(&x148, &x149, x147, x36, x137); fiat_p256_addcarryx_u32(&x150, &x151, x149, x37, x138); fiat_p256_addcarryx_u32(&x152, &x153, x151, x38, x139); fiat_p256_addcarryx_u32(&x154, &x155, x153, x39, x140); fiat_p256_addcarryx_u32(&x156, &x157, x155, x40, x141); fiat_p256_addcarryx_u32(&x158, &x159, x157, x41, x142); fiat_p256_addcarryx_u32(&x160, &x161, x159, x42, x143); fiat_p256_cmovznz_u32(&x162, x134, 0x0, x43); fiat_p256_cmovznz_u32(&x163, x134, 0x0, x44); fiat_p256_cmovznz_u32(&x164, x134, 0x0, x45); fiat_p256_cmovznz_u32(&x165, x134, 0x0, x46); fiat_p256_cmovznz_u32(&x166, x134, 0x0, x47); fiat_p256_cmovznz_u32(&x167, x134, 0x0, x48); fiat_p256_cmovznz_u32(&x168, x134, 0x0, x49); fiat_p256_cmovznz_u32(&x169, x134, 0x0, x50); fiat_p256_addcarryx_u32(&x170, &x171, 0x0, x126, x162); fiat_p256_addcarryx_u32(&x172, &x173, x171, x127, x163); fiat_p256_addcarryx_u32(&x174, &x175, x173, x128, x164); fiat_p256_addcarryx_u32(&x176, &x177, x175, x129, x165); fiat_p256_addcarryx_u32(&x178, &x179, x177, x130, x166); fiat_p256_addcarryx_u32(&x180, &x181, x179, x131, x167); fiat_p256_addcarryx_u32(&x182, &x183, x181, x132, x168); fiat_p256_addcarryx_u32(&x184, &x185, x183, x133, x169); fiat_p256_subborrowx_u32(&x186, &x187, 0x0, x170, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x188, &x189, x187, x172, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x190, &x191, x189, x174, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x192, &x193, x191, x176, 0x0); fiat_p256_subborrowx_u32(&x194, &x195, x193, x178, 0x0); fiat_p256_subborrowx_u32(&x196, &x197, x195, x180, 0x0); fiat_p256_subborrowx_u32(&x198, &x199, x197, x182, 0x1); fiat_p256_subborrowx_u32(&x200, &x201, x199, x184, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u32(&x202, &x203, x201, x185, 0x0); fiat_p256_addcarryx_u32(&x204, &x205, 0x0, x6, 0x1); x206 = ((x144 >> 1) | ((x146 << 31) & UINT32_C(0xffffffff))); x207 = ((x146 >> 1) | ((x148 << 31) & UINT32_C(0xffffffff))); x208 = ((x148 >> 1) | ((x150 << 31) & UINT32_C(0xffffffff))); x209 = ((x150 >> 1) | ((x152 << 31) & UINT32_C(0xffffffff))); x210 = ((x152 >> 1) | ((x154 << 31) & UINT32_C(0xffffffff))); x211 = ((x154 >> 1) | ((x156 << 31) & UINT32_C(0xffffffff))); x212 = ((x156 >> 1) | ((x158 << 31) & UINT32_C(0xffffffff))); x213 = ((x158 >> 1) | ((x160 << 31) & UINT32_C(0xffffffff))); x214 = ((x160 & UINT32_C(0x80000000)) | (x160 >> 1)); fiat_p256_cmovznz_u32(&x215, x84, x67, x51); fiat_p256_cmovznz_u32(&x216, x84, x69, x53); fiat_p256_cmovznz_u32(&x217, x84, x71, x55); fiat_p256_cmovznz_u32(&x218, x84, x73, x57); fiat_p256_cmovznz_u32(&x219, x84, x75, x59); fiat_p256_cmovznz_u32(&x220, x84, x77, x61); fiat_p256_cmovznz_u32(&x221, x84, x79, x63); fiat_p256_cmovznz_u32(&x222, x84, x81, x65); fiat_p256_cmovznz_u32(&x223, x203, x186, x170); fiat_p256_cmovznz_u32(&x224, x203, x188, x172); fiat_p256_cmovznz_u32(&x225, x203, x190, x174); fiat_p256_cmovznz_u32(&x226, x203, x192, x176); fiat_p256_cmovznz_u32(&x227, x203, x194, x178); fiat_p256_cmovznz_u32(&x228, x203, x196, x180); fiat_p256_cmovznz_u32(&x229, x203, x198, x182); fiat_p256_cmovznz_u32(&x230, x203, x200, x184); *out1 = x204; out2[0] = x7; out2[1] = x8; out2[2] = x9; out2[3] = x10; out2[4] = x11; out2[5] = x12; out2[6] = x13; out2[7] = x14; out2[8] = x15; out3[0] = x206; out3[1] = x207; out3[2] = x208; out3[3] = x209; out3[4] = x210; out3[5] = x211; out3[6] = x212; out3[7] = x213; out3[8] = x214; out4[0] = x215; out4[1] = x216; out4[2] = x217; out4[3] = x218; out4[4] = x219; out4[5] = x220; out4[6] = x221; out4[7] = x222; out5[0] = x223; out5[1] = x224; out5[2] = x225; out5[3] = x226; out5[4] = x227; out5[5] = x228; out5[6] = x229; out5[7] = x230; } /* * The function fiat_p256_divstep_precomp returns the precomputed value for Bernstein-Yang-inversion (in montgomery form). * * Postconditions: * eval (from_montgomery out1) = ⌊(m - 1) / 2⌋^(if ⌊log2 m⌋ + 1 < 46 then ⌊(49 * (⌊log2 m⌋ + 1) + 80) / 17⌋ else ⌊(49 * (⌊log2 m⌋ + 1) + 57) / 17⌋) * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff], [0x0 ~> 0xffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep_precomp(uint32_t out1[8]) { out1[0] = UINT32_C(0xb8000000); out1[1] = UINT32_C(0x67ffffff); out1[2] = UINT32_C(0x38000000); out1[3] = UINT32_C(0xc0000000); out1[4] = UINT32_C(0x7fffffff); out1[5] = UINT32_C(0xd8000000); out1[6] = UINT32_C(0xffffffff); out1[7] = UINT32_C(0x2fffffff); } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/p256_64.h ================================================ #include #include "../../crypto/internal.h" #if !defined(OPENSSL_NO_ASM) && defined(__GNUC__) && defined(__x86_64__) extern "C" { void fiat_p256_adx_mul(uint64_t*, const uint64_t*, const uint64_t*); void fiat_p256_adx_sqr(uint64_t*, const uint64_t*); } #endif /* Autogenerated: 'src/ExtractionOCaml/word_by_word_montgomery' --inline --static --use-value-barrier p256 64 '2^256 - 2^224 + 2^192 + 2^96 - 1' mul square add sub opp from_montgomery to_montgomery nonzero selectznz to_bytes from_bytes one msat divstep divstep_precomp */ /* curve description: p256 */ /* machine_wordsize = 64 (from "64") */ /* requested operations: mul, square, add, sub, opp, from_montgomery, to_montgomery, nonzero, selectznz, to_bytes, from_bytes, one, msat, divstep, divstep_precomp */ /* m = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff (from "2^256 - 2^224 + 2^192 + 2^96 - 1") */ /* */ /* NOTE: In addition to the bounds specified above each function, all */ /* functions synthesized for this Montgomery arithmetic require the */ /* input to be strictly less than the prime modulus (m), and also */ /* require the input to be in the unique saturated representation. */ /* All functions also ensure that these two properties are true of */ /* return values. */ /* */ /* Computed values: */ /* eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in */ /* if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 */ #include typedef unsigned char fiat_p256_uint1; typedef signed char fiat_p256_int1; #if defined(__GNUC__) || defined(__clang__) # define FIAT_P256_FIAT_EXTENSION __extension__ # define FIAT_P256_FIAT_INLINE __inline__ #else # define FIAT_P256_FIAT_EXTENSION # define FIAT_P256_FIAT_INLINE #endif FIAT_P256_FIAT_EXTENSION typedef signed __int128 fiat_p256_int128; FIAT_P256_FIAT_EXTENSION typedef unsigned __int128 fiat_p256_uint128; /* The type fiat_p256_montgomery_domain_field_element is a field element in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ typedef uint64_t fiat_p256_montgomery_domain_field_element[4]; /* The type fiat_p256_non_montgomery_domain_field_element is a field element NOT in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ typedef uint64_t fiat_p256_non_montgomery_domain_field_element[4]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #if !defined(FIAT_P256_NO_ASM) && (defined(__GNUC__) || defined(__clang__)) static __inline__ uint64_t fiat_p256_value_barrier_u64(uint64_t a) { __asm__("" : "+r"(a) : /* no inputs */); return a; } #else # define fiat_p256_value_barrier_u64(x) (x) #endif /* * The function fiat_p256_addcarryx_u64 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^64 * out2 = ⌊(arg1 + arg2 + arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_addcarryx_u64(uint64_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_p256_uint128 x1; uint64_t x2; fiat_p256_uint1 x3; x1 = ((arg1 + (fiat_p256_uint128)arg2) + arg3); x2 = (uint64_t)(x1 & UINT64_C(0xffffffffffffffff)); x3 = (fiat_p256_uint1)(x1 >> 64); *out1 = x2; *out2 = x3; } /* * The function fiat_p256_subborrowx_u64 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^64 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_subborrowx_u64(uint64_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_p256_int128 x1; fiat_p256_int1 x2; uint64_t x3; x1 = ((arg2 - (fiat_p256_int128)arg1) - arg3); x2 = (fiat_p256_int1)(x1 >> 64); x3 = (uint64_t)(x1 & UINT64_C(0xffffffffffffffff)); *out1 = x3; *out2 = (fiat_p256_uint1)(0x0 - x2); } /* * The function fiat_p256_mulx_u64 is a multiplication, returning the full double-width result. * * Postconditions: * out1 = (arg1 * arg2) mod 2^64 * out2 = ⌊arg1 * arg2 / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_mulx_u64(uint64_t* out1, uint64_t* out2, uint64_t arg1, uint64_t arg2) { fiat_p256_uint128 x1; uint64_t x2; uint64_t x3; x1 = ((fiat_p256_uint128)arg1 * arg2); x2 = (uint64_t)(x1 & UINT64_C(0xffffffffffffffff)); x3 = (uint64_t)(x1 >> 64); *out1 = x2; *out2 = x3; } /* * The function fiat_p256_cmovznz_u64 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_cmovznz_u64(uint64_t* out1, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_p256_uint1 x1; uint64_t x2; uint64_t x3; x1 = (!(!arg1)); x2 = ((fiat_p256_int1)(0x0 - x1) & UINT64_C(0xffffffffffffffff)); x3 = ((fiat_p256_value_barrier_u64(x2) & arg3) | (fiat_p256_value_barrier_u64((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_p256_mul multiplies two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_mul(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { #if !defined(OPENSSL_NO_ASM) && defined(__GNUC__) && defined(__x86_64__) if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { fiat_p256_adx_mul(out1, arg1, arg2); return; } #endif uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; fiat_p256_uint1 x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; uint64_t x92; fiat_p256_uint1 x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; uint64_t x99; fiat_p256_uint1 x100; uint64_t x101; fiat_p256_uint1 x102; uint64_t x103; fiat_p256_uint1 x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; uint64_t x110; uint64_t x111; uint64_t x112; uint64_t x113; uint64_t x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; fiat_p256_uint1 x119; uint64_t x120; fiat_p256_uint1 x121; uint64_t x122; fiat_p256_uint1 x123; uint64_t x124; fiat_p256_uint1 x125; uint64_t x126; fiat_p256_uint1 x127; uint64_t x128; uint64_t x129; uint64_t x130; uint64_t x131; uint64_t x132; uint64_t x133; uint64_t x134; uint64_t x135; uint64_t x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; fiat_p256_uint1 x140; uint64_t x141; fiat_p256_uint1 x142; uint64_t x143; uint64_t x144; fiat_p256_uint1 x145; uint64_t x146; fiat_p256_uint1 x147; uint64_t x148; fiat_p256_uint1 x149; uint64_t x150; fiat_p256_uint1 x151; uint64_t x152; fiat_p256_uint1 x153; uint64_t x154; uint64_t x155; uint64_t x156; uint64_t x157; uint64_t x158; uint64_t x159; uint64_t x160; fiat_p256_uint1 x161; uint64_t x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; fiat_p256_uint1 x168; uint64_t x169; fiat_p256_uint1 x170; uint64_t x171; fiat_p256_uint1 x172; uint64_t x173; uint64_t x174; fiat_p256_uint1 x175; uint64_t x176; fiat_p256_uint1 x177; uint64_t x178; fiat_p256_uint1 x179; uint64_t x180; fiat_p256_uint1 x181; uint64_t x182; fiat_p256_uint1 x183; uint64_t x184; uint64_t x185; uint64_t x186; uint64_t x187; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, (arg2[3])); fiat_p256_mulx_u64(&x7, &x8, x4, (arg2[2])); fiat_p256_mulx_u64(&x9, &x10, x4, (arg2[1])); fiat_p256_mulx_u64(&x11, &x12, x4, (arg2[0])); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); x19 = (x18 + x6); fiat_p256_mulx_u64(&x20, &x21, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x22, &x23, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x24, &x25, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x26, &x27, 0x0, x25, x22); x28 = (x27 + x23); fiat_p256_addcarryx_u64(&x29, &x30, 0x0, x11, x24); fiat_p256_addcarryx_u64(&x31, &x32, x30, x13, x26); fiat_p256_addcarryx_u64(&x33, &x34, x32, x15, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x17, x20); fiat_p256_addcarryx_u64(&x37, &x38, x36, x19, x21); fiat_p256_mulx_u64(&x39, &x40, x1, (arg2[3])); fiat_p256_mulx_u64(&x41, &x42, x1, (arg2[2])); fiat_p256_mulx_u64(&x43, &x44, x1, (arg2[1])); fiat_p256_mulx_u64(&x45, &x46, x1, (arg2[0])); fiat_p256_addcarryx_u64(&x47, &x48, 0x0, x46, x43); fiat_p256_addcarryx_u64(&x49, &x50, x48, x44, x41); fiat_p256_addcarryx_u64(&x51, &x52, x50, x42, x39); x53 = (x52 + x40); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x31, x45); fiat_p256_addcarryx_u64(&x56, &x57, x55, x33, x47); fiat_p256_addcarryx_u64(&x58, &x59, x57, x35, x49); fiat_p256_addcarryx_u64(&x60, &x61, x59, x37, x51); fiat_p256_addcarryx_u64(&x62, &x63, x61, x38, x53); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x66, &x67, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x68, &x69, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x70, &x71, 0x0, x69, x66); x72 = (x71 + x67); fiat_p256_addcarryx_u64(&x73, &x74, 0x0, x54, x68); fiat_p256_addcarryx_u64(&x75, &x76, x74, x56, x70); fiat_p256_addcarryx_u64(&x77, &x78, x76, x58, x72); fiat_p256_addcarryx_u64(&x79, &x80, x78, x60, x64); fiat_p256_addcarryx_u64(&x81, &x82, x80, x62, x65); x83 = ((uint64_t)x82 + x63); fiat_p256_mulx_u64(&x84, &x85, x2, (arg2[3])); fiat_p256_mulx_u64(&x86, &x87, x2, (arg2[2])); fiat_p256_mulx_u64(&x88, &x89, x2, (arg2[1])); fiat_p256_mulx_u64(&x90, &x91, x2, (arg2[0])); fiat_p256_addcarryx_u64(&x92, &x93, 0x0, x91, x88); fiat_p256_addcarryx_u64(&x94, &x95, x93, x89, x86); fiat_p256_addcarryx_u64(&x96, &x97, x95, x87, x84); x98 = (x97 + x85); fiat_p256_addcarryx_u64(&x99, &x100, 0x0, x75, x90); fiat_p256_addcarryx_u64(&x101, &x102, x100, x77, x92); fiat_p256_addcarryx_u64(&x103, &x104, x102, x79, x94); fiat_p256_addcarryx_u64(&x105, &x106, x104, x81, x96); fiat_p256_addcarryx_u64(&x107, &x108, x106, x83, x98); fiat_p256_mulx_u64(&x109, &x110, x99, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x111, &x112, x99, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x113, &x114, x99, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x115, &x116, 0x0, x114, x111); x117 = (x116 + x112); fiat_p256_addcarryx_u64(&x118, &x119, 0x0, x99, x113); fiat_p256_addcarryx_u64(&x120, &x121, x119, x101, x115); fiat_p256_addcarryx_u64(&x122, &x123, x121, x103, x117); fiat_p256_addcarryx_u64(&x124, &x125, x123, x105, x109); fiat_p256_addcarryx_u64(&x126, &x127, x125, x107, x110); x128 = ((uint64_t)x127 + x108); fiat_p256_mulx_u64(&x129, &x130, x3, (arg2[3])); fiat_p256_mulx_u64(&x131, &x132, x3, (arg2[2])); fiat_p256_mulx_u64(&x133, &x134, x3, (arg2[1])); fiat_p256_mulx_u64(&x135, &x136, x3, (arg2[0])); fiat_p256_addcarryx_u64(&x137, &x138, 0x0, x136, x133); fiat_p256_addcarryx_u64(&x139, &x140, x138, x134, x131); fiat_p256_addcarryx_u64(&x141, &x142, x140, x132, x129); x143 = (x142 + x130); fiat_p256_addcarryx_u64(&x144, &x145, 0x0, x120, x135); fiat_p256_addcarryx_u64(&x146, &x147, x145, x122, x137); fiat_p256_addcarryx_u64(&x148, &x149, x147, x124, x139); fiat_p256_addcarryx_u64(&x150, &x151, x149, x126, x141); fiat_p256_addcarryx_u64(&x152, &x153, x151, x128, x143); fiat_p256_mulx_u64(&x154, &x155, x144, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x156, &x157, x144, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x158, &x159, x144, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x160, &x161, 0x0, x159, x156); x162 = (x161 + x157); fiat_p256_addcarryx_u64(&x163, &x164, 0x0, x144, x158); fiat_p256_addcarryx_u64(&x165, &x166, x164, x146, x160); fiat_p256_addcarryx_u64(&x167, &x168, x166, x148, x162); fiat_p256_addcarryx_u64(&x169, &x170, x168, x150, x154); fiat_p256_addcarryx_u64(&x171, &x172, x170, x152, x155); x173 = ((uint64_t)x172 + x153); fiat_p256_subborrowx_u64(&x174, &x175, 0x0, x165, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x176, &x177, x175, x167, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x178, &x179, x177, x169, 0x0); fiat_p256_subborrowx_u64(&x180, &x181, x179, x171, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x182, &x183, x181, x173, 0x0); fiat_p256_cmovznz_u64(&x184, x183, x174, x165); fiat_p256_cmovznz_u64(&x185, x183, x176, x167); fiat_p256_cmovznz_u64(&x186, x183, x178, x169); fiat_p256_cmovznz_u64(&x187, x183, x180, x171); out1[0] = x184; out1[1] = x185; out1[2] = x186; out1[3] = x187; } /* * The function fiat_p256_square squares a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_square(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { #if !defined(OPENSSL_NO_ASM) && defined(__GNUC__) && defined(__x86_64__) if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() && CRYPTO_is_ADX_capable()) { fiat_p256_adx_sqr(out1, arg1); return; } #endif uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; fiat_p256_uint1 x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; uint64_t x92; fiat_p256_uint1 x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; uint64_t x99; fiat_p256_uint1 x100; uint64_t x101; fiat_p256_uint1 x102; uint64_t x103; fiat_p256_uint1 x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; uint64_t x110; uint64_t x111; uint64_t x112; uint64_t x113; uint64_t x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; fiat_p256_uint1 x119; uint64_t x120; fiat_p256_uint1 x121; uint64_t x122; fiat_p256_uint1 x123; uint64_t x124; fiat_p256_uint1 x125; uint64_t x126; fiat_p256_uint1 x127; uint64_t x128; uint64_t x129; uint64_t x130; uint64_t x131; uint64_t x132; uint64_t x133; uint64_t x134; uint64_t x135; uint64_t x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; fiat_p256_uint1 x140; uint64_t x141; fiat_p256_uint1 x142; uint64_t x143; uint64_t x144; fiat_p256_uint1 x145; uint64_t x146; fiat_p256_uint1 x147; uint64_t x148; fiat_p256_uint1 x149; uint64_t x150; fiat_p256_uint1 x151; uint64_t x152; fiat_p256_uint1 x153; uint64_t x154; uint64_t x155; uint64_t x156; uint64_t x157; uint64_t x158; uint64_t x159; uint64_t x160; fiat_p256_uint1 x161; uint64_t x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; fiat_p256_uint1 x168; uint64_t x169; fiat_p256_uint1 x170; uint64_t x171; fiat_p256_uint1 x172; uint64_t x173; uint64_t x174; fiat_p256_uint1 x175; uint64_t x176; fiat_p256_uint1 x177; uint64_t x178; fiat_p256_uint1 x179; uint64_t x180; fiat_p256_uint1 x181; uint64_t x182; fiat_p256_uint1 x183; uint64_t x184; uint64_t x185; uint64_t x186; uint64_t x187; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, (arg1[3])); fiat_p256_mulx_u64(&x7, &x8, x4, (arg1[2])); fiat_p256_mulx_u64(&x9, &x10, x4, (arg1[1])); fiat_p256_mulx_u64(&x11, &x12, x4, (arg1[0])); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); x19 = (x18 + x6); fiat_p256_mulx_u64(&x20, &x21, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x22, &x23, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x24, &x25, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x26, &x27, 0x0, x25, x22); x28 = (x27 + x23); fiat_p256_addcarryx_u64(&x29, &x30, 0x0, x11, x24); fiat_p256_addcarryx_u64(&x31, &x32, x30, x13, x26); fiat_p256_addcarryx_u64(&x33, &x34, x32, x15, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x17, x20); fiat_p256_addcarryx_u64(&x37, &x38, x36, x19, x21); fiat_p256_mulx_u64(&x39, &x40, x1, (arg1[3])); fiat_p256_mulx_u64(&x41, &x42, x1, (arg1[2])); fiat_p256_mulx_u64(&x43, &x44, x1, (arg1[1])); fiat_p256_mulx_u64(&x45, &x46, x1, (arg1[0])); fiat_p256_addcarryx_u64(&x47, &x48, 0x0, x46, x43); fiat_p256_addcarryx_u64(&x49, &x50, x48, x44, x41); fiat_p256_addcarryx_u64(&x51, &x52, x50, x42, x39); x53 = (x52 + x40); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x31, x45); fiat_p256_addcarryx_u64(&x56, &x57, x55, x33, x47); fiat_p256_addcarryx_u64(&x58, &x59, x57, x35, x49); fiat_p256_addcarryx_u64(&x60, &x61, x59, x37, x51); fiat_p256_addcarryx_u64(&x62, &x63, x61, x38, x53); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x66, &x67, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x68, &x69, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x70, &x71, 0x0, x69, x66); x72 = (x71 + x67); fiat_p256_addcarryx_u64(&x73, &x74, 0x0, x54, x68); fiat_p256_addcarryx_u64(&x75, &x76, x74, x56, x70); fiat_p256_addcarryx_u64(&x77, &x78, x76, x58, x72); fiat_p256_addcarryx_u64(&x79, &x80, x78, x60, x64); fiat_p256_addcarryx_u64(&x81, &x82, x80, x62, x65); x83 = ((uint64_t)x82 + x63); fiat_p256_mulx_u64(&x84, &x85, x2, (arg1[3])); fiat_p256_mulx_u64(&x86, &x87, x2, (arg1[2])); fiat_p256_mulx_u64(&x88, &x89, x2, (arg1[1])); fiat_p256_mulx_u64(&x90, &x91, x2, (arg1[0])); fiat_p256_addcarryx_u64(&x92, &x93, 0x0, x91, x88); fiat_p256_addcarryx_u64(&x94, &x95, x93, x89, x86); fiat_p256_addcarryx_u64(&x96, &x97, x95, x87, x84); x98 = (x97 + x85); fiat_p256_addcarryx_u64(&x99, &x100, 0x0, x75, x90); fiat_p256_addcarryx_u64(&x101, &x102, x100, x77, x92); fiat_p256_addcarryx_u64(&x103, &x104, x102, x79, x94); fiat_p256_addcarryx_u64(&x105, &x106, x104, x81, x96); fiat_p256_addcarryx_u64(&x107, &x108, x106, x83, x98); fiat_p256_mulx_u64(&x109, &x110, x99, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x111, &x112, x99, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x113, &x114, x99, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x115, &x116, 0x0, x114, x111); x117 = (x116 + x112); fiat_p256_addcarryx_u64(&x118, &x119, 0x0, x99, x113); fiat_p256_addcarryx_u64(&x120, &x121, x119, x101, x115); fiat_p256_addcarryx_u64(&x122, &x123, x121, x103, x117); fiat_p256_addcarryx_u64(&x124, &x125, x123, x105, x109); fiat_p256_addcarryx_u64(&x126, &x127, x125, x107, x110); x128 = ((uint64_t)x127 + x108); fiat_p256_mulx_u64(&x129, &x130, x3, (arg1[3])); fiat_p256_mulx_u64(&x131, &x132, x3, (arg1[2])); fiat_p256_mulx_u64(&x133, &x134, x3, (arg1[1])); fiat_p256_mulx_u64(&x135, &x136, x3, (arg1[0])); fiat_p256_addcarryx_u64(&x137, &x138, 0x0, x136, x133); fiat_p256_addcarryx_u64(&x139, &x140, x138, x134, x131); fiat_p256_addcarryx_u64(&x141, &x142, x140, x132, x129); x143 = (x142 + x130); fiat_p256_addcarryx_u64(&x144, &x145, 0x0, x120, x135); fiat_p256_addcarryx_u64(&x146, &x147, x145, x122, x137); fiat_p256_addcarryx_u64(&x148, &x149, x147, x124, x139); fiat_p256_addcarryx_u64(&x150, &x151, x149, x126, x141); fiat_p256_addcarryx_u64(&x152, &x153, x151, x128, x143); fiat_p256_mulx_u64(&x154, &x155, x144, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x156, &x157, x144, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x158, &x159, x144, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x160, &x161, 0x0, x159, x156); x162 = (x161 + x157); fiat_p256_addcarryx_u64(&x163, &x164, 0x0, x144, x158); fiat_p256_addcarryx_u64(&x165, &x166, x164, x146, x160); fiat_p256_addcarryx_u64(&x167, &x168, x166, x148, x162); fiat_p256_addcarryx_u64(&x169, &x170, x168, x150, x154); fiat_p256_addcarryx_u64(&x171, &x172, x170, x152, x155); x173 = ((uint64_t)x172 + x153); fiat_p256_subborrowx_u64(&x174, &x175, 0x0, x165, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x176, &x177, x175, x167, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x178, &x179, x177, x169, 0x0); fiat_p256_subborrowx_u64(&x180, &x181, x179, x171, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x182, &x183, x181, x173, 0x0); fiat_p256_cmovznz_u64(&x184, x183, x174, x165); fiat_p256_cmovznz_u64(&x185, x183, x176, x167); fiat_p256_cmovznz_u64(&x186, x183, x178, x169); fiat_p256_cmovznz_u64(&x187, x183, x180, x171); out1[0] = x184; out1[1] = x185; out1[2] = x186; out1[3] = x187; } /* * The function fiat_p256_add adds two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_add(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; fiat_p256_uint1 x10; uint64_t x11; fiat_p256_uint1 x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; fiat_p256_addcarryx_u64(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_addcarryx_u64(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_addcarryx_u64(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_addcarryx_u64(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_subborrowx_u64(&x9, &x10, 0x0, x1, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x11, &x12, x10, x3, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x13, &x14, x12, x5, 0x0); fiat_p256_subborrowx_u64(&x15, &x16, x14, x7, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x17, &x18, x16, x8, 0x0); fiat_p256_cmovznz_u64(&x19, x18, x9, x1); fiat_p256_cmovznz_u64(&x20, x18, x11, x3); fiat_p256_cmovznz_u64(&x21, x18, x13, x5); fiat_p256_cmovznz_u64(&x22, x18, x15, x7); out1[0] = x19; out1[1] = x20; out1[2] = x21; out1[3] = x22; } /* * The function fiat_p256_sub subtracts two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_sub(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; fiat_p256_subborrowx_u64(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_subborrowx_u64(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_subborrowx_u64(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_subborrowx_u64(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_cmovznz_u64(&x9, x8, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x9); fiat_p256_addcarryx_u64(&x12, &x13, x11, x3, (x9 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x14, &x15, x13, x5, 0x0); fiat_p256_addcarryx_u64(&x16, &x17, x15, x7, (x9 & UINT64_C(0xffffffff00000001))); out1[0] = x10; out1[1] = x12; out1[2] = x14; out1[3] = x16; } /* * The function fiat_p256_opp negates a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_opp(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; fiat_p256_subborrowx_u64(&x1, &x2, 0x0, 0x0, (arg1[0])); fiat_p256_subborrowx_u64(&x3, &x4, x2, 0x0, (arg1[1])); fiat_p256_subborrowx_u64(&x5, &x6, x4, 0x0, (arg1[2])); fiat_p256_subborrowx_u64(&x7, &x8, x6, 0x0, (arg1[3])); fiat_p256_cmovznz_u64(&x9, x8, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x9); fiat_p256_addcarryx_u64(&x12, &x13, x11, x3, (x9 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x14, &x15, x13, x5, 0x0); fiat_p256_addcarryx_u64(&x16, &x17, x15, x7, (x9 & UINT64_C(0xffffffff00000001))); out1[0] = x10; out1[1] = x12; out1[2] = x14; out1[3] = x16; } /* * The function fiat_p256_from_montgomery translates a field element out of the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_montgomery(fiat_p256_non_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; fiat_p256_uint1 x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; fiat_p256_uint1 x23; uint64_t x24; fiat_p256_uint1 x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; fiat_p256_uint1 x29; uint64_t x30; fiat_p256_uint1 x31; uint64_t x32; fiat_p256_uint1 x33; uint64_t x34; fiat_p256_uint1 x35; uint64_t x36; fiat_p256_uint1 x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; fiat_p256_uint1 x45; uint64_t x46; fiat_p256_uint1 x47; uint64_t x48; fiat_p256_uint1 x49; uint64_t x50; fiat_p256_uint1 x51; uint64_t x52; fiat_p256_uint1 x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; uint64_t x66; fiat_p256_uint1 x67; uint64_t x68; fiat_p256_uint1 x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; fiat_p256_uint1 x73; uint64_t x74; fiat_p256_uint1 x75; uint64_t x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; fiat_p256_uint1 x84; uint64_t x85; fiat_p256_uint1 x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; x1 = (arg1[0]); fiat_p256_mulx_u64(&x2, &x3, x1, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x4, &x5, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x6, &x7, x1, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x8, &x9, 0x0, x7, x4); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x6); fiat_p256_addcarryx_u64(&x12, &x13, x11, 0x0, x8); fiat_p256_addcarryx_u64(&x14, &x15, 0x0, x12, (arg1[1])); fiat_p256_mulx_u64(&x16, &x17, x14, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x18, &x19, x14, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x20, &x21, x14, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x22, &x23, 0x0, x21, x18); fiat_p256_addcarryx_u64(&x24, &x25, 0x0, x14, x20); fiat_p256_addcarryx_u64(&x26, &x27, x25, (x15 + (x13 + (x9 + x5))), x22); fiat_p256_addcarryx_u64(&x28, &x29, x27, x2, (x23 + x19)); fiat_p256_addcarryx_u64(&x30, &x31, x29, x3, x16); fiat_p256_addcarryx_u64(&x32, &x33, 0x0, x26, (arg1[2])); fiat_p256_addcarryx_u64(&x34, &x35, x33, x28, 0x0); fiat_p256_addcarryx_u64(&x36, &x37, x35, x30, 0x0); fiat_p256_mulx_u64(&x38, &x39, x32, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x40, &x41, x32, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x42, &x43, x32, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x44, &x45, 0x0, x43, x40); fiat_p256_addcarryx_u64(&x46, &x47, 0x0, x32, x42); fiat_p256_addcarryx_u64(&x48, &x49, x47, x34, x44); fiat_p256_addcarryx_u64(&x50, &x51, x49, x36, (x45 + x41)); fiat_p256_addcarryx_u64(&x52, &x53, x51, (x37 + (x31 + x17)), x38); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x48, (arg1[3])); fiat_p256_addcarryx_u64(&x56, &x57, x55, x50, 0x0); fiat_p256_addcarryx_u64(&x58, &x59, x57, x52, 0x0); fiat_p256_mulx_u64(&x60, &x61, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x62, &x63, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x66, &x67, 0x0, x65, x62); fiat_p256_addcarryx_u64(&x68, &x69, 0x0, x54, x64); fiat_p256_addcarryx_u64(&x70, &x71, x69, x56, x66); fiat_p256_addcarryx_u64(&x72, &x73, x71, x58, (x67 + x63)); fiat_p256_addcarryx_u64(&x74, &x75, x73, (x59 + (x53 + x39)), x60); x76 = (x75 + x61); fiat_p256_subborrowx_u64(&x77, &x78, 0x0, x70, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x79, &x80, x78, x72, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x81, &x82, x80, x74, 0x0); fiat_p256_subborrowx_u64(&x83, &x84, x82, x76, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x85, &x86, x84, 0x0, 0x0); fiat_p256_cmovznz_u64(&x87, x86, x77, x70); fiat_p256_cmovznz_u64(&x88, x86, x79, x72); fiat_p256_cmovznz_u64(&x89, x86, x81, x74); fiat_p256_cmovznz_u64(&x90, x86, x83, x76); out1[0] = x87; out1[1] = x88; out1[2] = x89; out1[3] = x90; } /* * The function fiat_p256_to_montgomery translates a field element into the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = eval arg1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_montgomery(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_non_montgomery_domain_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; fiat_p256_uint1 x26; uint64_t x27; fiat_p256_uint1 x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; fiat_p256_uint1 x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; fiat_p256_uint1 x54; uint64_t x55; fiat_p256_uint1 x56; uint64_t x57; fiat_p256_uint1 x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; fiat_p256_uint1 x66; uint64_t x67; fiat_p256_uint1 x68; uint64_t x69; fiat_p256_uint1 x70; uint64_t x71; fiat_p256_uint1 x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; uint64_t x81; uint64_t x82; uint64_t x83; uint64_t x84; uint64_t x85; fiat_p256_uint1 x86; uint64_t x87; fiat_p256_uint1 x88; uint64_t x89; fiat_p256_uint1 x90; uint64_t x91; fiat_p256_uint1 x92; uint64_t x93; fiat_p256_uint1 x94; uint64_t x95; fiat_p256_uint1 x96; uint64_t x97; fiat_p256_uint1 x98; uint64_t x99; uint64_t x100; uint64_t x101; uint64_t x102; uint64_t x103; uint64_t x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; fiat_p256_uint1 x110; uint64_t x111; fiat_p256_uint1 x112; uint64_t x113; fiat_p256_uint1 x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; uint64_t x119; uint64_t x120; uint64_t x121; uint64_t x122; uint64_t x123; uint64_t x124; uint64_t x125; fiat_p256_uint1 x126; uint64_t x127; fiat_p256_uint1 x128; uint64_t x129; fiat_p256_uint1 x130; uint64_t x131; fiat_p256_uint1 x132; uint64_t x133; fiat_p256_uint1 x134; uint64_t x135; fiat_p256_uint1 x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; uint64_t x140; uint64_t x141; uint64_t x142; uint64_t x143; uint64_t x144; uint64_t x145; fiat_p256_uint1 x146; uint64_t x147; fiat_p256_uint1 x148; uint64_t x149; fiat_p256_uint1 x150; uint64_t x151; fiat_p256_uint1 x152; uint64_t x153; fiat_p256_uint1 x154; uint64_t x155; fiat_p256_uint1 x156; uint64_t x157; fiat_p256_uint1 x158; uint64_t x159; fiat_p256_uint1 x160; uint64_t x161; fiat_p256_uint1 x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; uint64_t x168; uint64_t x169; uint64_t x170; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x7, &x8, x4, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x9, &x10, x4, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x11, &x12, x4, 0x3); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); fiat_p256_mulx_u64(&x19, &x20, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x21, &x22, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x23, &x24, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x25, &x26, 0x0, x24, x21); fiat_p256_addcarryx_u64(&x27, &x28, 0x0, x11, x23); fiat_p256_addcarryx_u64(&x29, &x30, x28, x13, x25); fiat_p256_addcarryx_u64(&x31, &x32, x30, x15, (x26 + x22)); fiat_p256_addcarryx_u64(&x33, &x34, x32, x17, x19); fiat_p256_addcarryx_u64(&x35, &x36, x34, (x18 + x6), x20); fiat_p256_mulx_u64(&x37, &x38, x1, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x39, &x40, x1, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x41, &x42, x1, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x43, &x44, x1, 0x3); fiat_p256_addcarryx_u64(&x45, &x46, 0x0, x44, x41); fiat_p256_addcarryx_u64(&x47, &x48, x46, x42, x39); fiat_p256_addcarryx_u64(&x49, &x50, x48, x40, x37); fiat_p256_addcarryx_u64(&x51, &x52, 0x0, x29, x43); fiat_p256_addcarryx_u64(&x53, &x54, x52, x31, x45); fiat_p256_addcarryx_u64(&x55, &x56, x54, x33, x47); fiat_p256_addcarryx_u64(&x57, &x58, x56, x35, x49); fiat_p256_mulx_u64(&x59, &x60, x51, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x61, &x62, x51, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x63, &x64, x51, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x65, &x66, 0x0, x64, x61); fiat_p256_addcarryx_u64(&x67, &x68, 0x0, x51, x63); fiat_p256_addcarryx_u64(&x69, &x70, x68, x53, x65); fiat_p256_addcarryx_u64(&x71, &x72, x70, x55, (x66 + x62)); fiat_p256_addcarryx_u64(&x73, &x74, x72, x57, x59); fiat_p256_addcarryx_u64(&x75, &x76, x74, (((uint64_t)x58 + x36) + (x50 + x38)), x60); fiat_p256_mulx_u64(&x77, &x78, x2, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x79, &x80, x2, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x81, &x82, x2, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x83, &x84, x2, 0x3); fiat_p256_addcarryx_u64(&x85, &x86, 0x0, x84, x81); fiat_p256_addcarryx_u64(&x87, &x88, x86, x82, x79); fiat_p256_addcarryx_u64(&x89, &x90, x88, x80, x77); fiat_p256_addcarryx_u64(&x91, &x92, 0x0, x69, x83); fiat_p256_addcarryx_u64(&x93, &x94, x92, x71, x85); fiat_p256_addcarryx_u64(&x95, &x96, x94, x73, x87); fiat_p256_addcarryx_u64(&x97, &x98, x96, x75, x89); fiat_p256_mulx_u64(&x99, &x100, x91, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x101, &x102, x91, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x103, &x104, x91, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x105, &x106, 0x0, x104, x101); fiat_p256_addcarryx_u64(&x107, &x108, 0x0, x91, x103); fiat_p256_addcarryx_u64(&x109, &x110, x108, x93, x105); fiat_p256_addcarryx_u64(&x111, &x112, x110, x95, (x106 + x102)); fiat_p256_addcarryx_u64(&x113, &x114, x112, x97, x99); fiat_p256_addcarryx_u64(&x115, &x116, x114, (((uint64_t)x98 + x76) + (x90 + x78)), x100); fiat_p256_mulx_u64(&x117, &x118, x3, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x119, &x120, x3, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x121, &x122, x3, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x123, &x124, x3, 0x3); fiat_p256_addcarryx_u64(&x125, &x126, 0x0, x124, x121); fiat_p256_addcarryx_u64(&x127, &x128, x126, x122, x119); fiat_p256_addcarryx_u64(&x129, &x130, x128, x120, x117); fiat_p256_addcarryx_u64(&x131, &x132, 0x0, x109, x123); fiat_p256_addcarryx_u64(&x133, &x134, x132, x111, x125); fiat_p256_addcarryx_u64(&x135, &x136, x134, x113, x127); fiat_p256_addcarryx_u64(&x137, &x138, x136, x115, x129); fiat_p256_mulx_u64(&x139, &x140, x131, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x141, &x142, x131, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x143, &x144, x131, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x145, &x146, 0x0, x144, x141); fiat_p256_addcarryx_u64(&x147, &x148, 0x0, x131, x143); fiat_p256_addcarryx_u64(&x149, &x150, x148, x133, x145); fiat_p256_addcarryx_u64(&x151, &x152, x150, x135, (x146 + x142)); fiat_p256_addcarryx_u64(&x153, &x154, x152, x137, x139); fiat_p256_addcarryx_u64(&x155, &x156, x154, (((uint64_t)x138 + x116) + (x130 + x118)), x140); fiat_p256_subborrowx_u64(&x157, &x158, 0x0, x149, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x159, &x160, x158, x151, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x161, &x162, x160, x153, 0x0); fiat_p256_subborrowx_u64(&x163, &x164, x162, x155, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x165, &x166, x164, x156, 0x0); fiat_p256_cmovznz_u64(&x167, x166, x157, x149); fiat_p256_cmovznz_u64(&x168, x166, x159, x151); fiat_p256_cmovznz_u64(&x169, x166, x161, x153); fiat_p256_cmovznz_u64(&x170, x166, x163, x155); out1[0] = x167; out1[1] = x168; out1[2] = x169; out1[3] = x170; } /* * The function fiat_p256_nonzero outputs a single non-zero word if the input is non-zero and zero otherwise. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 * * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_nonzero(uint64_t* out1, const uint64_t arg1[4]) { uint64_t x1; x1 = ((arg1[0]) | ((arg1[1]) | ((arg1[2]) | (arg1[3])))); *out1 = x1; } /* * The function fiat_p256_selectznz is a multi-limb conditional select. * * Postconditions: * eval out1 = (if arg1 = 0 then eval arg2 else eval arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_selectznz(uint64_t out1[4], fiat_p256_uint1 arg1, const uint64_t arg2[4], const uint64_t arg3[4]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; fiat_p256_cmovznz_u64(&x1, arg1, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u64(&x2, arg1, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u64(&x3, arg1, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u64(&x4, arg1, (arg2[3]), (arg3[3])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; } /* * The function fiat_p256_to_bytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_bytes(uint8_t out1[32], const uint64_t arg1[4]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint8_t x5; uint64_t x6; uint8_t x7; uint64_t x8; uint8_t x9; uint64_t x10; uint8_t x11; uint64_t x12; uint8_t x13; uint64_t x14; uint8_t x15; uint64_t x16; uint8_t x17; uint8_t x18; uint8_t x19; uint64_t x20; uint8_t x21; uint64_t x22; uint8_t x23; uint64_t x24; uint8_t x25; uint64_t x26; uint8_t x27; uint64_t x28; uint8_t x29; uint64_t x30; uint8_t x31; uint8_t x32; uint8_t x33; uint64_t x34; uint8_t x35; uint64_t x36; uint8_t x37; uint64_t x38; uint8_t x39; uint64_t x40; uint8_t x41; uint64_t x42; uint8_t x43; uint64_t x44; uint8_t x45; uint8_t x46; uint8_t x47; uint64_t x48; uint8_t x49; uint64_t x50; uint8_t x51; uint64_t x52; uint8_t x53; uint64_t x54; uint8_t x55; uint64_t x56; uint8_t x57; uint64_t x58; uint8_t x59; uint8_t x60; x1 = (arg1[3]); x2 = (arg1[2]); x3 = (arg1[1]); x4 = (arg1[0]); x5 = (uint8_t)(x4 & UINT8_C(0xff)); x6 = (x4 >> 8); x7 = (uint8_t)(x6 & UINT8_C(0xff)); x8 = (x6 >> 8); x9 = (uint8_t)(x8 & UINT8_C(0xff)); x10 = (x8 >> 8); x11 = (uint8_t)(x10 & UINT8_C(0xff)); x12 = (x10 >> 8); x13 = (uint8_t)(x12 & UINT8_C(0xff)); x14 = (x12 >> 8); x15 = (uint8_t)(x14 & UINT8_C(0xff)); x16 = (x14 >> 8); x17 = (uint8_t)(x16 & UINT8_C(0xff)); x18 = (uint8_t)(x16 >> 8); x19 = (uint8_t)(x3 & UINT8_C(0xff)); x20 = (x3 >> 8); x21 = (uint8_t)(x20 & UINT8_C(0xff)); x22 = (x20 >> 8); x23 = (uint8_t)(x22 & UINT8_C(0xff)); x24 = (x22 >> 8); x25 = (uint8_t)(x24 & UINT8_C(0xff)); x26 = (x24 >> 8); x27 = (uint8_t)(x26 & UINT8_C(0xff)); x28 = (x26 >> 8); x29 = (uint8_t)(x28 & UINT8_C(0xff)); x30 = (x28 >> 8); x31 = (uint8_t)(x30 & UINT8_C(0xff)); x32 = (uint8_t)(x30 >> 8); x33 = (uint8_t)(x2 & UINT8_C(0xff)); x34 = (x2 >> 8); x35 = (uint8_t)(x34 & UINT8_C(0xff)); x36 = (x34 >> 8); x37 = (uint8_t)(x36 & UINT8_C(0xff)); x38 = (x36 >> 8); x39 = (uint8_t)(x38 & UINT8_C(0xff)); x40 = (x38 >> 8); x41 = (uint8_t)(x40 & UINT8_C(0xff)); x42 = (x40 >> 8); x43 = (uint8_t)(x42 & UINT8_C(0xff)); x44 = (x42 >> 8); x45 = (uint8_t)(x44 & UINT8_C(0xff)); x46 = (uint8_t)(x44 >> 8); x47 = (uint8_t)(x1 & UINT8_C(0xff)); x48 = (x1 >> 8); x49 = (uint8_t)(x48 & UINT8_C(0xff)); x50 = (x48 >> 8); x51 = (uint8_t)(x50 & UINT8_C(0xff)); x52 = (x50 >> 8); x53 = (uint8_t)(x52 & UINT8_C(0xff)); x54 = (x52 >> 8); x55 = (uint8_t)(x54 & UINT8_C(0xff)); x56 = (x54 >> 8); x57 = (uint8_t)(x56 & UINT8_C(0xff)); x58 = (x56 >> 8); x59 = (uint8_t)(x58 & UINT8_C(0xff)); x60 = (uint8_t)(x58 >> 8); out1[0] = x5; out1[1] = x7; out1[2] = x9; out1[3] = x11; out1[4] = x13; out1[5] = x15; out1[6] = x17; out1[7] = x18; out1[8] = x19; out1[9] = x21; out1[10] = x23; out1[11] = x25; out1[12] = x27; out1[13] = x29; out1[14] = x31; out1[15] = x32; out1[16] = x33; out1[17] = x35; out1[18] = x37; out1[19] = x39; out1[20] = x41; out1[21] = x43; out1[22] = x45; out1[23] = x46; out1[24] = x47; out1[25] = x49; out1[26] = x51; out1[27] = x53; out1[28] = x55; out1[29] = x57; out1[30] = x59; out1[31] = x60; } /* * The function fiat_p256_from_bytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. * * Preconditions: * 0 ≤ bytes_eval arg1 < m * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * 0 ≤ eval out1 < m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_bytes(uint64_t out1[4], const uint8_t arg1[32]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint8_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint8_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint8_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint64_t x57; uint64_t x58; uint64_t x59; uint64_t x60; x1 = ((uint64_t)(arg1[31]) << 56); x2 = ((uint64_t)(arg1[30]) << 48); x3 = ((uint64_t)(arg1[29]) << 40); x4 = ((uint64_t)(arg1[28]) << 32); x5 = ((uint64_t)(arg1[27]) << 24); x6 = ((uint64_t)(arg1[26]) << 16); x7 = ((uint64_t)(arg1[25]) << 8); x8 = (arg1[24]); x9 = ((uint64_t)(arg1[23]) << 56); x10 = ((uint64_t)(arg1[22]) << 48); x11 = ((uint64_t)(arg1[21]) << 40); x12 = ((uint64_t)(arg1[20]) << 32); x13 = ((uint64_t)(arg1[19]) << 24); x14 = ((uint64_t)(arg1[18]) << 16); x15 = ((uint64_t)(arg1[17]) << 8); x16 = (arg1[16]); x17 = ((uint64_t)(arg1[15]) << 56); x18 = ((uint64_t)(arg1[14]) << 48); x19 = ((uint64_t)(arg1[13]) << 40); x20 = ((uint64_t)(arg1[12]) << 32); x21 = ((uint64_t)(arg1[11]) << 24); x22 = ((uint64_t)(arg1[10]) << 16); x23 = ((uint64_t)(arg1[9]) << 8); x24 = (arg1[8]); x25 = ((uint64_t)(arg1[7]) << 56); x26 = ((uint64_t)(arg1[6]) << 48); x27 = ((uint64_t)(arg1[5]) << 40); x28 = ((uint64_t)(arg1[4]) << 32); x29 = ((uint64_t)(arg1[3]) << 24); x30 = ((uint64_t)(arg1[2]) << 16); x31 = ((uint64_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint64_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x28 + x35); x37 = (x27 + x36); x38 = (x26 + x37); x39 = (x25 + x38); x40 = (x23 + (uint64_t)x24); x41 = (x22 + x40); x42 = (x21 + x41); x43 = (x20 + x42); x44 = (x19 + x43); x45 = (x18 + x44); x46 = (x17 + x45); x47 = (x15 + (uint64_t)x16); x48 = (x14 + x47); x49 = (x13 + x48); x50 = (x12 + x49); x51 = (x11 + x50); x52 = (x10 + x51); x53 = (x9 + x52); x54 = (x7 + (uint64_t)x8); x55 = (x6 + x54); x56 = (x5 + x55); x57 = (x4 + x56); x58 = (x3 + x57); x59 = (x2 + x58); x60 = (x1 + x59); out1[0] = x39; out1[1] = x46; out1[2] = x53; out1[3] = x60; } /* * The function fiat_p256_set_one returns the field element one in the Montgomery domain. * * Postconditions: * eval (from_montgomery out1) mod m = 1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_set_one(fiat_p256_montgomery_domain_field_element out1) { out1[0] = 0x1; out1[1] = UINT64_C(0xffffffff00000000); out1[2] = UINT64_C(0xffffffffffffffff); out1[3] = UINT32_C(0xfffffffe); } /* * The function fiat_p256_msat returns the saturated representation of the prime modulus. * * Postconditions: * twos_complement_eval out1 = m * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_msat(uint64_t out1[5]) { out1[0] = UINT64_C(0xffffffffffffffff); out1[1] = UINT32_C(0xffffffff); out1[2] = 0x0; out1[3] = UINT64_C(0xffffffff00000001); out1[4] = 0x0; } /* * The function fiat_p256_divstep computes a divstep. * * Preconditions: * 0 ≤ eval arg4 < m * 0 ≤ eval arg5 < m * Postconditions: * out1 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then 1 - arg1 else 1 + arg1) * twos_complement_eval out2 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then twos_complement_eval arg3 else twos_complement_eval arg2) * twos_complement_eval out3 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then ⌊(twos_complement_eval arg3 - twos_complement_eval arg2) / 2⌋ else ⌊(twos_complement_eval arg3 + (twos_complement_eval arg3 mod 2) * twos_complement_eval arg2) / 2⌋) * eval (from_montgomery out4) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (2 * eval (from_montgomery arg5)) mod m else (2 * eval (from_montgomery arg4)) mod m) * eval (from_montgomery out5) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (eval (from_montgomery arg4) - eval (from_montgomery arg4)) mod m else (eval (from_montgomery arg5) + (twos_complement_eval arg3 mod 2) * eval (from_montgomery arg4)) mod m) * 0 ≤ eval out5 < m * 0 ≤ eval out5 < m * 0 ≤ eval out2 < m * 0 ≤ eval out3 < m * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg4: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg5: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out4: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out5: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep(uint64_t* out1, uint64_t out2[5], uint64_t out3[5], uint64_t out4[4], uint64_t out5[4], uint64_t arg1, const uint64_t arg2[5], const uint64_t arg3[5], const uint64_t arg4[4], const uint64_t arg5[4]) { uint64_t x1; fiat_p256_uint1 x2; fiat_p256_uint1 x3; uint64_t x4; fiat_p256_uint1 x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; uint64_t x18; fiat_p256_uint1 x19; uint64_t x20; fiat_p256_uint1 x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; fiat_p256_uint1 x40; uint64_t x41; fiat_p256_uint1 x42; uint64_t x43; fiat_p256_uint1 x44; uint64_t x45; fiat_p256_uint1 x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; fiat_p256_uint1 x54; uint64_t x55; fiat_p256_uint1 x56; uint64_t x57; fiat_p256_uint1 x58; uint64_t x59; fiat_p256_uint1 x60; uint64_t x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; fiat_p256_uint1 x65; uint64_t x66; fiat_p256_uint1 x67; uint64_t x68; fiat_p256_uint1 x69; uint64_t x70; uint64_t x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; uint64_t x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; fiat_p256_uint1 x81; uint64_t x82; fiat_p256_uint1 x83; uint64_t x84; fiat_p256_uint1 x85; uint64_t x86; fiat_p256_uint1 x87; uint64_t x88; fiat_p256_uint1 x89; uint64_t x90; uint64_t x91; uint64_t x92; uint64_t x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; fiat_p256_uint1 x99; uint64_t x100; fiat_p256_uint1 x101; uint64_t x102; fiat_p256_uint1 x103; uint64_t x104; fiat_p256_uint1 x105; uint64_t x106; fiat_p256_uint1 x107; uint64_t x108; fiat_p256_uint1 x109; uint64_t x110; fiat_p256_uint1 x111; uint64_t x112; fiat_p256_uint1 x113; uint64_t x114; uint64_t x115; uint64_t x116; uint64_t x117; uint64_t x118; uint64_t x119; uint64_t x120; uint64_t x121; uint64_t x122; uint64_t x123; uint64_t x124; uint64_t x125; uint64_t x126; fiat_p256_addcarryx_u64(&x1, &x2, 0x0, (~arg1), 0x1); x3 = (fiat_p256_uint1)((fiat_p256_uint1)(x1 >> 63) & (fiat_p256_uint1)((arg3[0]) & 0x1)); fiat_p256_addcarryx_u64(&x4, &x5, 0x0, (~arg1), 0x1); fiat_p256_cmovznz_u64(&x6, x3, arg1, x4); fiat_p256_cmovznz_u64(&x7, x3, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u64(&x8, x3, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u64(&x9, x3, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u64(&x10, x3, (arg2[3]), (arg3[3])); fiat_p256_cmovznz_u64(&x11, x3, (arg2[4]), (arg3[4])); fiat_p256_addcarryx_u64(&x12, &x13, 0x0, 0x1, (~(arg2[0]))); fiat_p256_addcarryx_u64(&x14, &x15, x13, 0x0, (~(arg2[1]))); fiat_p256_addcarryx_u64(&x16, &x17, x15, 0x0, (~(arg2[2]))); fiat_p256_addcarryx_u64(&x18, &x19, x17, 0x0, (~(arg2[3]))); fiat_p256_addcarryx_u64(&x20, &x21, x19, 0x0, (~(arg2[4]))); fiat_p256_cmovznz_u64(&x22, x3, (arg3[0]), x12); fiat_p256_cmovznz_u64(&x23, x3, (arg3[1]), x14); fiat_p256_cmovznz_u64(&x24, x3, (arg3[2]), x16); fiat_p256_cmovznz_u64(&x25, x3, (arg3[3]), x18); fiat_p256_cmovznz_u64(&x26, x3, (arg3[4]), x20); fiat_p256_cmovznz_u64(&x27, x3, (arg4[0]), (arg5[0])); fiat_p256_cmovznz_u64(&x28, x3, (arg4[1]), (arg5[1])); fiat_p256_cmovznz_u64(&x29, x3, (arg4[2]), (arg5[2])); fiat_p256_cmovznz_u64(&x30, x3, (arg4[3]), (arg5[3])); fiat_p256_addcarryx_u64(&x31, &x32, 0x0, x27, x27); fiat_p256_addcarryx_u64(&x33, &x34, x32, x28, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x29, x29); fiat_p256_addcarryx_u64(&x37, &x38, x36, x30, x30); fiat_p256_subborrowx_u64(&x39, &x40, 0x0, x31, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x41, &x42, x40, x33, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x43, &x44, x42, x35, 0x0); fiat_p256_subborrowx_u64(&x45, &x46, x44, x37, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x47, &x48, x46, x38, 0x0); x49 = (arg4[3]); x50 = (arg4[2]); x51 = (arg4[1]); x52 = (arg4[0]); fiat_p256_subborrowx_u64(&x53, &x54, 0x0, 0x0, x52); fiat_p256_subborrowx_u64(&x55, &x56, x54, 0x0, x51); fiat_p256_subborrowx_u64(&x57, &x58, x56, 0x0, x50); fiat_p256_subborrowx_u64(&x59, &x60, x58, 0x0, x49); fiat_p256_cmovznz_u64(&x61, x60, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x62, &x63, 0x0, x53, x61); fiat_p256_addcarryx_u64(&x64, &x65, x63, x55, (x61 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x66, &x67, x65, x57, 0x0); fiat_p256_addcarryx_u64(&x68, &x69, x67, x59, (x61 & UINT64_C(0xffffffff00000001))); fiat_p256_cmovznz_u64(&x70, x3, (arg5[0]), x62); fiat_p256_cmovznz_u64(&x71, x3, (arg5[1]), x64); fiat_p256_cmovznz_u64(&x72, x3, (arg5[2]), x66); fiat_p256_cmovznz_u64(&x73, x3, (arg5[3]), x68); x74 = (fiat_p256_uint1)(x22 & 0x1); fiat_p256_cmovznz_u64(&x75, x74, 0x0, x7); fiat_p256_cmovznz_u64(&x76, x74, 0x0, x8); fiat_p256_cmovznz_u64(&x77, x74, 0x0, x9); fiat_p256_cmovznz_u64(&x78, x74, 0x0, x10); fiat_p256_cmovznz_u64(&x79, x74, 0x0, x11); fiat_p256_addcarryx_u64(&x80, &x81, 0x0, x22, x75); fiat_p256_addcarryx_u64(&x82, &x83, x81, x23, x76); fiat_p256_addcarryx_u64(&x84, &x85, x83, x24, x77); fiat_p256_addcarryx_u64(&x86, &x87, x85, x25, x78); fiat_p256_addcarryx_u64(&x88, &x89, x87, x26, x79); fiat_p256_cmovznz_u64(&x90, x74, 0x0, x27); fiat_p256_cmovznz_u64(&x91, x74, 0x0, x28); fiat_p256_cmovznz_u64(&x92, x74, 0x0, x29); fiat_p256_cmovznz_u64(&x93, x74, 0x0, x30); fiat_p256_addcarryx_u64(&x94, &x95, 0x0, x70, x90); fiat_p256_addcarryx_u64(&x96, &x97, x95, x71, x91); fiat_p256_addcarryx_u64(&x98, &x99, x97, x72, x92); fiat_p256_addcarryx_u64(&x100, &x101, x99, x73, x93); fiat_p256_subborrowx_u64(&x102, &x103, 0x0, x94, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x104, &x105, x103, x96, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x106, &x107, x105, x98, 0x0); fiat_p256_subborrowx_u64(&x108, &x109, x107, x100, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x110, &x111, x109, x101, 0x0); fiat_p256_addcarryx_u64(&x112, &x113, 0x0, x6, 0x1); x114 = ((x80 >> 1) | ((x82 << 63) & UINT64_C(0xffffffffffffffff))); x115 = ((x82 >> 1) | ((x84 << 63) & UINT64_C(0xffffffffffffffff))); x116 = ((x84 >> 1) | ((x86 << 63) & UINT64_C(0xffffffffffffffff))); x117 = ((x86 >> 1) | ((x88 << 63) & UINT64_C(0xffffffffffffffff))); x118 = ((x88 & UINT64_C(0x8000000000000000)) | (x88 >> 1)); fiat_p256_cmovznz_u64(&x119, x48, x39, x31); fiat_p256_cmovznz_u64(&x120, x48, x41, x33); fiat_p256_cmovznz_u64(&x121, x48, x43, x35); fiat_p256_cmovznz_u64(&x122, x48, x45, x37); fiat_p256_cmovznz_u64(&x123, x111, x102, x94); fiat_p256_cmovznz_u64(&x124, x111, x104, x96); fiat_p256_cmovznz_u64(&x125, x111, x106, x98); fiat_p256_cmovznz_u64(&x126, x111, x108, x100); *out1 = x112; out2[0] = x7; out2[1] = x8; out2[2] = x9; out2[3] = x10; out2[4] = x11; out3[0] = x114; out3[1] = x115; out3[2] = x116; out3[3] = x117; out3[4] = x118; out4[0] = x119; out4[1] = x120; out4[2] = x121; out4[3] = x122; out5[0] = x123; out5[1] = x124; out5[2] = x125; out5[3] = x126; } /* * The function fiat_p256_divstep_precomp returns the precomputed value for Bernstein-Yang-inversion (in montgomery form). * * Postconditions: * eval (from_montgomery out1) = ⌊(m - 1) / 2⌋^(if ⌊log2 m⌋ + 1 < 46 then ⌊(49 * (⌊log2 m⌋ + 1) + 80) / 17⌋ else ⌊(49 * (⌊log2 m⌋ + 1) + 57) / 17⌋) * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep_precomp(uint64_t out1[4]) { out1[0] = UINT64_C(0x67ffffffb8000000); out1[1] = UINT64_C(0xc000000038000000); out1[2] = UINT64_C(0xd80000007fffffff); out1[3] = UINT64_C(0x2fffffffffffffff); } ================================================ FILE: Sources/CNIOBoringSSL/third_party/fiat/p256_64_msvc.h ================================================ /* Autogenerated: 'src/ExtractionOCaml/word_by_word_montgomery' --inline --static --use-value-barrier --no-wide-int p256 64 '2^256 - 2^224 + 2^192 + 2^96 - 1' mul square add sub opp from_montgomery to_montgomery nonzero selectznz to_bytes from_bytes one msat divstep divstep_precomp */ /* curve description: p256 */ /* machine_wordsize = 64 (from "64") */ /* requested operations: mul, square, add, sub, opp, from_montgomery, to_montgomery, nonzero, selectznz, to_bytes, from_bytes, one, msat, divstep, divstep_precomp */ /* m = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff (from "2^256 - 2^224 + 2^192 + 2^96 - 1") */ /* */ /* NOTE: In addition to the bounds specified above each function, all */ /* functions synthesized for this Montgomery arithmetic require the */ /* input to be strictly less than the prime modulus (m), and also */ /* require the input to be in the unique saturated representation. */ /* All functions also ensure that these two properties are true of */ /* return values. */ /* */ /* Computed values: */ /* eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) */ /* bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) */ /* twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in */ /* if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 */ #include #include #if defined(_M_X64) #include #endif typedef unsigned char fiat_p256_uint1; typedef signed char fiat_p256_int1; #define FIAT_P256_FIAT_INLINE inline /* The type fiat_p256_montgomery_domain_field_element is a field element in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ typedef uint64_t fiat_p256_montgomery_domain_field_element[4]; /* The type fiat_p256_non_montgomery_domain_field_element is a field element NOT in the Montgomery domain. */ /* Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ typedef uint64_t fiat_p256_non_montgomery_domain_field_element[4]; #if (-1 & 3) != 3 #error "This code only works on a two's complement system" #endif #define fiat_p256_value_barrier_u64(x) (x) /* * The function fiat_p256_addcarryx_u64 is an addition with carry. * * Postconditions: * out1 = (arg1 + arg2 + arg3) mod 2^64 * out2 = ⌊(arg1 + arg2 + arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_addcarryx_u64(uint64_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { #if defined(_M_X64) *out2 = _addcarry_u64(arg1, arg2, arg3, out1); #else arg2 += arg1; arg1 = arg2 < arg1; arg3 += arg2; arg1 += arg3 < arg2; *out1 = arg3; *out2 = arg1; #endif } /* * The function fiat_p256_subborrowx_u64 is a subtraction with borrow. * * Postconditions: * out1 = (-arg1 + arg2 + -arg3) mod 2^64 * out2 = -⌊(-arg1 + arg2 + -arg3) / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0x1] */ static FIAT_P256_FIAT_INLINE void fiat_p256_subborrowx_u64(uint64_t* out1, fiat_p256_uint1* out2, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { #if defined(_M_X64) *out2 = _subborrow_u64(arg1, arg2, arg3, out1); // NOTE: edited after generation #else *out1 = arg2 - arg3 - arg1; *out2 = (arg2 < arg3) | ((arg2 == arg3) & arg1); #endif } /* * The function fiat_p256_mulx_u64 is a multiplication, returning the full double-width result. * * Postconditions: * out1 = (arg1 * arg2) mod 2^64 * out2 = ⌊arg1 * arg2 / 2^64⌋ * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_mulx_u64(uint64_t* out1, uint64_t* out2, uint64_t arg1, uint64_t arg2) { // NOTE: edited after generation #if defined(_M_X64) *out1 = _umul128(arg1, arg2, out2); #elif defined(_M_ARM64) *out1 = arg1 * arg2; *out2 = __umulh(arg1, arg2); #else #error "This file is intended for MSVC on X64 or ARM64" #endif } /* * The function fiat_p256_cmovznz_u64 is a single-word conditional move. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [0x0 ~> 0xffffffffffffffff] * arg3: [0x0 ~> 0xffffffffffffffff] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_cmovznz_u64(uint64_t* out1, fiat_p256_uint1 arg1, uint64_t arg2, uint64_t arg3) { fiat_p256_uint1 x1; uint64_t x2; uint64_t x3; x1 = (!(!arg1)); x2 = ((fiat_p256_int1)(0x0 - x1) & UINT64_C(0xffffffffffffffff)); x3 = ((fiat_p256_value_barrier_u64(x2) & arg3) | (fiat_p256_value_barrier_u64((~x2)) & arg2)); *out1 = x3; } /* * The function fiat_p256_mul multiplies two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_mul(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; fiat_p256_uint1 x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; uint64_t x92; fiat_p256_uint1 x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; uint64_t x99; fiat_p256_uint1 x100; uint64_t x101; fiat_p256_uint1 x102; uint64_t x103; fiat_p256_uint1 x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; uint64_t x110; uint64_t x111; uint64_t x112; uint64_t x113; uint64_t x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; fiat_p256_uint1 x119; uint64_t x120; fiat_p256_uint1 x121; uint64_t x122; fiat_p256_uint1 x123; uint64_t x124; fiat_p256_uint1 x125; uint64_t x126; fiat_p256_uint1 x127; uint64_t x128; uint64_t x129; uint64_t x130; uint64_t x131; uint64_t x132; uint64_t x133; uint64_t x134; uint64_t x135; uint64_t x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; fiat_p256_uint1 x140; uint64_t x141; fiat_p256_uint1 x142; uint64_t x143; uint64_t x144; fiat_p256_uint1 x145; uint64_t x146; fiat_p256_uint1 x147; uint64_t x148; fiat_p256_uint1 x149; uint64_t x150; fiat_p256_uint1 x151; uint64_t x152; fiat_p256_uint1 x153; uint64_t x154; uint64_t x155; uint64_t x156; uint64_t x157; uint64_t x158; uint64_t x159; uint64_t x160; fiat_p256_uint1 x161; uint64_t x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; fiat_p256_uint1 x168; uint64_t x169; fiat_p256_uint1 x170; uint64_t x171; fiat_p256_uint1 x172; uint64_t x173; uint64_t x174; fiat_p256_uint1 x175; uint64_t x176; fiat_p256_uint1 x177; uint64_t x178; fiat_p256_uint1 x179; uint64_t x180; fiat_p256_uint1 x181; uint64_t x182; fiat_p256_uint1 x183; uint64_t x184; uint64_t x185; uint64_t x186; uint64_t x187; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, (arg2[3])); fiat_p256_mulx_u64(&x7, &x8, x4, (arg2[2])); fiat_p256_mulx_u64(&x9, &x10, x4, (arg2[1])); fiat_p256_mulx_u64(&x11, &x12, x4, (arg2[0])); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); x19 = (x18 + x6); fiat_p256_mulx_u64(&x20, &x21, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x22, &x23, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x24, &x25, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x26, &x27, 0x0, x25, x22); x28 = (x27 + x23); fiat_p256_addcarryx_u64(&x29, &x30, 0x0, x11, x24); fiat_p256_addcarryx_u64(&x31, &x32, x30, x13, x26); fiat_p256_addcarryx_u64(&x33, &x34, x32, x15, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x17, x20); fiat_p256_addcarryx_u64(&x37, &x38, x36, x19, x21); fiat_p256_mulx_u64(&x39, &x40, x1, (arg2[3])); fiat_p256_mulx_u64(&x41, &x42, x1, (arg2[2])); fiat_p256_mulx_u64(&x43, &x44, x1, (arg2[1])); fiat_p256_mulx_u64(&x45, &x46, x1, (arg2[0])); fiat_p256_addcarryx_u64(&x47, &x48, 0x0, x46, x43); fiat_p256_addcarryx_u64(&x49, &x50, x48, x44, x41); fiat_p256_addcarryx_u64(&x51, &x52, x50, x42, x39); x53 = (x52 + x40); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x31, x45); fiat_p256_addcarryx_u64(&x56, &x57, x55, x33, x47); fiat_p256_addcarryx_u64(&x58, &x59, x57, x35, x49); fiat_p256_addcarryx_u64(&x60, &x61, x59, x37, x51); fiat_p256_addcarryx_u64(&x62, &x63, x61, x38, x53); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x66, &x67, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x68, &x69, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x70, &x71, 0x0, x69, x66); x72 = (x71 + x67); fiat_p256_addcarryx_u64(&x73, &x74, 0x0, x54, x68); fiat_p256_addcarryx_u64(&x75, &x76, x74, x56, x70); fiat_p256_addcarryx_u64(&x77, &x78, x76, x58, x72); fiat_p256_addcarryx_u64(&x79, &x80, x78, x60, x64); fiat_p256_addcarryx_u64(&x81, &x82, x80, x62, x65); x83 = ((uint64_t)x82 + x63); fiat_p256_mulx_u64(&x84, &x85, x2, (arg2[3])); fiat_p256_mulx_u64(&x86, &x87, x2, (arg2[2])); fiat_p256_mulx_u64(&x88, &x89, x2, (arg2[1])); fiat_p256_mulx_u64(&x90, &x91, x2, (arg2[0])); fiat_p256_addcarryx_u64(&x92, &x93, 0x0, x91, x88); fiat_p256_addcarryx_u64(&x94, &x95, x93, x89, x86); fiat_p256_addcarryx_u64(&x96, &x97, x95, x87, x84); x98 = (x97 + x85); fiat_p256_addcarryx_u64(&x99, &x100, 0x0, x75, x90); fiat_p256_addcarryx_u64(&x101, &x102, x100, x77, x92); fiat_p256_addcarryx_u64(&x103, &x104, x102, x79, x94); fiat_p256_addcarryx_u64(&x105, &x106, x104, x81, x96); fiat_p256_addcarryx_u64(&x107, &x108, x106, x83, x98); fiat_p256_mulx_u64(&x109, &x110, x99, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x111, &x112, x99, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x113, &x114, x99, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x115, &x116, 0x0, x114, x111); x117 = (x116 + x112); fiat_p256_addcarryx_u64(&x118, &x119, 0x0, x99, x113); fiat_p256_addcarryx_u64(&x120, &x121, x119, x101, x115); fiat_p256_addcarryx_u64(&x122, &x123, x121, x103, x117); fiat_p256_addcarryx_u64(&x124, &x125, x123, x105, x109); fiat_p256_addcarryx_u64(&x126, &x127, x125, x107, x110); x128 = ((uint64_t)x127 + x108); fiat_p256_mulx_u64(&x129, &x130, x3, (arg2[3])); fiat_p256_mulx_u64(&x131, &x132, x3, (arg2[2])); fiat_p256_mulx_u64(&x133, &x134, x3, (arg2[1])); fiat_p256_mulx_u64(&x135, &x136, x3, (arg2[0])); fiat_p256_addcarryx_u64(&x137, &x138, 0x0, x136, x133); fiat_p256_addcarryx_u64(&x139, &x140, x138, x134, x131); fiat_p256_addcarryx_u64(&x141, &x142, x140, x132, x129); x143 = (x142 + x130); fiat_p256_addcarryx_u64(&x144, &x145, 0x0, x120, x135); fiat_p256_addcarryx_u64(&x146, &x147, x145, x122, x137); fiat_p256_addcarryx_u64(&x148, &x149, x147, x124, x139); fiat_p256_addcarryx_u64(&x150, &x151, x149, x126, x141); fiat_p256_addcarryx_u64(&x152, &x153, x151, x128, x143); fiat_p256_mulx_u64(&x154, &x155, x144, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x156, &x157, x144, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x158, &x159, x144, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x160, &x161, 0x0, x159, x156); x162 = (x161 + x157); fiat_p256_addcarryx_u64(&x163, &x164, 0x0, x144, x158); fiat_p256_addcarryx_u64(&x165, &x166, x164, x146, x160); fiat_p256_addcarryx_u64(&x167, &x168, x166, x148, x162); fiat_p256_addcarryx_u64(&x169, &x170, x168, x150, x154); fiat_p256_addcarryx_u64(&x171, &x172, x170, x152, x155); x173 = ((uint64_t)x172 + x153); fiat_p256_subborrowx_u64(&x174, &x175, 0x0, x165, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x176, &x177, x175, x167, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x178, &x179, x177, x169, 0x0); fiat_p256_subborrowx_u64(&x180, &x181, x179, x171, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x182, &x183, x181, x173, 0x0); fiat_p256_cmovznz_u64(&x184, x183, x174, x165); fiat_p256_cmovznz_u64(&x185, x183, x176, x167); fiat_p256_cmovznz_u64(&x186, x183, x178, x169); fiat_p256_cmovznz_u64(&x187, x183, x180, x171); out1[0] = x184; out1[1] = x185; out1[2] = x186; out1[3] = x187; } /* * The function fiat_p256_square squares a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_square(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; fiat_p256_uint1 x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; uint64_t x65; uint64_t x66; uint64_t x67; uint64_t x68; uint64_t x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; uint64_t x84; uint64_t x85; uint64_t x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; uint64_t x91; uint64_t x92; fiat_p256_uint1 x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; uint64_t x99; fiat_p256_uint1 x100; uint64_t x101; fiat_p256_uint1 x102; uint64_t x103; fiat_p256_uint1 x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; uint64_t x110; uint64_t x111; uint64_t x112; uint64_t x113; uint64_t x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; fiat_p256_uint1 x119; uint64_t x120; fiat_p256_uint1 x121; uint64_t x122; fiat_p256_uint1 x123; uint64_t x124; fiat_p256_uint1 x125; uint64_t x126; fiat_p256_uint1 x127; uint64_t x128; uint64_t x129; uint64_t x130; uint64_t x131; uint64_t x132; uint64_t x133; uint64_t x134; uint64_t x135; uint64_t x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; fiat_p256_uint1 x140; uint64_t x141; fiat_p256_uint1 x142; uint64_t x143; uint64_t x144; fiat_p256_uint1 x145; uint64_t x146; fiat_p256_uint1 x147; uint64_t x148; fiat_p256_uint1 x149; uint64_t x150; fiat_p256_uint1 x151; uint64_t x152; fiat_p256_uint1 x153; uint64_t x154; uint64_t x155; uint64_t x156; uint64_t x157; uint64_t x158; uint64_t x159; uint64_t x160; fiat_p256_uint1 x161; uint64_t x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; fiat_p256_uint1 x168; uint64_t x169; fiat_p256_uint1 x170; uint64_t x171; fiat_p256_uint1 x172; uint64_t x173; uint64_t x174; fiat_p256_uint1 x175; uint64_t x176; fiat_p256_uint1 x177; uint64_t x178; fiat_p256_uint1 x179; uint64_t x180; fiat_p256_uint1 x181; uint64_t x182; fiat_p256_uint1 x183; uint64_t x184; uint64_t x185; uint64_t x186; uint64_t x187; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, (arg1[3])); fiat_p256_mulx_u64(&x7, &x8, x4, (arg1[2])); fiat_p256_mulx_u64(&x9, &x10, x4, (arg1[1])); fiat_p256_mulx_u64(&x11, &x12, x4, (arg1[0])); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); x19 = (x18 + x6); fiat_p256_mulx_u64(&x20, &x21, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x22, &x23, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x24, &x25, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x26, &x27, 0x0, x25, x22); x28 = (x27 + x23); fiat_p256_addcarryx_u64(&x29, &x30, 0x0, x11, x24); fiat_p256_addcarryx_u64(&x31, &x32, x30, x13, x26); fiat_p256_addcarryx_u64(&x33, &x34, x32, x15, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x17, x20); fiat_p256_addcarryx_u64(&x37, &x38, x36, x19, x21); fiat_p256_mulx_u64(&x39, &x40, x1, (arg1[3])); fiat_p256_mulx_u64(&x41, &x42, x1, (arg1[2])); fiat_p256_mulx_u64(&x43, &x44, x1, (arg1[1])); fiat_p256_mulx_u64(&x45, &x46, x1, (arg1[0])); fiat_p256_addcarryx_u64(&x47, &x48, 0x0, x46, x43); fiat_p256_addcarryx_u64(&x49, &x50, x48, x44, x41); fiat_p256_addcarryx_u64(&x51, &x52, x50, x42, x39); x53 = (x52 + x40); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x31, x45); fiat_p256_addcarryx_u64(&x56, &x57, x55, x33, x47); fiat_p256_addcarryx_u64(&x58, &x59, x57, x35, x49); fiat_p256_addcarryx_u64(&x60, &x61, x59, x37, x51); fiat_p256_addcarryx_u64(&x62, &x63, x61, x38, x53); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x66, &x67, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x68, &x69, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x70, &x71, 0x0, x69, x66); x72 = (x71 + x67); fiat_p256_addcarryx_u64(&x73, &x74, 0x0, x54, x68); fiat_p256_addcarryx_u64(&x75, &x76, x74, x56, x70); fiat_p256_addcarryx_u64(&x77, &x78, x76, x58, x72); fiat_p256_addcarryx_u64(&x79, &x80, x78, x60, x64); fiat_p256_addcarryx_u64(&x81, &x82, x80, x62, x65); x83 = ((uint64_t)x82 + x63); fiat_p256_mulx_u64(&x84, &x85, x2, (arg1[3])); fiat_p256_mulx_u64(&x86, &x87, x2, (arg1[2])); fiat_p256_mulx_u64(&x88, &x89, x2, (arg1[1])); fiat_p256_mulx_u64(&x90, &x91, x2, (arg1[0])); fiat_p256_addcarryx_u64(&x92, &x93, 0x0, x91, x88); fiat_p256_addcarryx_u64(&x94, &x95, x93, x89, x86); fiat_p256_addcarryx_u64(&x96, &x97, x95, x87, x84); x98 = (x97 + x85); fiat_p256_addcarryx_u64(&x99, &x100, 0x0, x75, x90); fiat_p256_addcarryx_u64(&x101, &x102, x100, x77, x92); fiat_p256_addcarryx_u64(&x103, &x104, x102, x79, x94); fiat_p256_addcarryx_u64(&x105, &x106, x104, x81, x96); fiat_p256_addcarryx_u64(&x107, &x108, x106, x83, x98); fiat_p256_mulx_u64(&x109, &x110, x99, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x111, &x112, x99, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x113, &x114, x99, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x115, &x116, 0x0, x114, x111); x117 = (x116 + x112); fiat_p256_addcarryx_u64(&x118, &x119, 0x0, x99, x113); fiat_p256_addcarryx_u64(&x120, &x121, x119, x101, x115); fiat_p256_addcarryx_u64(&x122, &x123, x121, x103, x117); fiat_p256_addcarryx_u64(&x124, &x125, x123, x105, x109); fiat_p256_addcarryx_u64(&x126, &x127, x125, x107, x110); x128 = ((uint64_t)x127 + x108); fiat_p256_mulx_u64(&x129, &x130, x3, (arg1[3])); fiat_p256_mulx_u64(&x131, &x132, x3, (arg1[2])); fiat_p256_mulx_u64(&x133, &x134, x3, (arg1[1])); fiat_p256_mulx_u64(&x135, &x136, x3, (arg1[0])); fiat_p256_addcarryx_u64(&x137, &x138, 0x0, x136, x133); fiat_p256_addcarryx_u64(&x139, &x140, x138, x134, x131); fiat_p256_addcarryx_u64(&x141, &x142, x140, x132, x129); x143 = (x142 + x130); fiat_p256_addcarryx_u64(&x144, &x145, 0x0, x120, x135); fiat_p256_addcarryx_u64(&x146, &x147, x145, x122, x137); fiat_p256_addcarryx_u64(&x148, &x149, x147, x124, x139); fiat_p256_addcarryx_u64(&x150, &x151, x149, x126, x141); fiat_p256_addcarryx_u64(&x152, &x153, x151, x128, x143); fiat_p256_mulx_u64(&x154, &x155, x144, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x156, &x157, x144, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x158, &x159, x144, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x160, &x161, 0x0, x159, x156); x162 = (x161 + x157); fiat_p256_addcarryx_u64(&x163, &x164, 0x0, x144, x158); fiat_p256_addcarryx_u64(&x165, &x166, x164, x146, x160); fiat_p256_addcarryx_u64(&x167, &x168, x166, x148, x162); fiat_p256_addcarryx_u64(&x169, &x170, x168, x150, x154); fiat_p256_addcarryx_u64(&x171, &x172, x170, x152, x155); x173 = ((uint64_t)x172 + x153); fiat_p256_subborrowx_u64(&x174, &x175, 0x0, x165, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x176, &x177, x175, x167, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x178, &x179, x177, x169, 0x0); fiat_p256_subborrowx_u64(&x180, &x181, x179, x171, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x182, &x183, x181, x173, 0x0); fiat_p256_cmovznz_u64(&x184, x183, x174, x165); fiat_p256_cmovznz_u64(&x185, x183, x176, x167); fiat_p256_cmovznz_u64(&x186, x183, x178, x169); fiat_p256_cmovznz_u64(&x187, x183, x180, x171); out1[0] = x184; out1[1] = x185; out1[2] = x186; out1[3] = x187; } /* * The function fiat_p256_add adds two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_add(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; fiat_p256_uint1 x10; uint64_t x11; fiat_p256_uint1 x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; fiat_p256_addcarryx_u64(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_addcarryx_u64(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_addcarryx_u64(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_addcarryx_u64(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_subborrowx_u64(&x9, &x10, 0x0, x1, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x11, &x12, x10, x3, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x13, &x14, x12, x5, 0x0); fiat_p256_subborrowx_u64(&x15, &x16, x14, x7, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x17, &x18, x16, x8, 0x0); fiat_p256_cmovznz_u64(&x19, x18, x9, x1); fiat_p256_cmovznz_u64(&x20, x18, x11, x3); fiat_p256_cmovznz_u64(&x21, x18, x13, x5); fiat_p256_cmovznz_u64(&x22, x18, x15, x7); out1[0] = x19; out1[1] = x20; out1[2] = x21; out1[3] = x22; } /* * The function fiat_p256_sub subtracts two field elements in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * 0 ≤ eval arg2 < m * Postconditions: * eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_sub(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1, const fiat_p256_montgomery_domain_field_element arg2) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; fiat_p256_subborrowx_u64(&x1, &x2, 0x0, (arg1[0]), (arg2[0])); fiat_p256_subborrowx_u64(&x3, &x4, x2, (arg1[1]), (arg2[1])); fiat_p256_subborrowx_u64(&x5, &x6, x4, (arg1[2]), (arg2[2])); fiat_p256_subborrowx_u64(&x7, &x8, x6, (arg1[3]), (arg2[3])); fiat_p256_cmovznz_u64(&x9, x8, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x9); fiat_p256_addcarryx_u64(&x12, &x13, x11, x3, (x9 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x14, &x15, x13, x5, 0x0); fiat_p256_addcarryx_u64(&x16, &x17, x15, x7, (x9 & UINT64_C(0xffffffff00000001))); out1[0] = x10; out1[1] = x12; out1[2] = x14; out1[3] = x16; } /* * The function fiat_p256_opp negates a field element in the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_opp(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint64_t x1; fiat_p256_uint1 x2; uint64_t x3; fiat_p256_uint1 x4; uint64_t x5; fiat_p256_uint1 x6; uint64_t x7; fiat_p256_uint1 x8; uint64_t x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; fiat_p256_subborrowx_u64(&x1, &x2, 0x0, 0x0, (arg1[0])); fiat_p256_subborrowx_u64(&x3, &x4, x2, 0x0, (arg1[1])); fiat_p256_subborrowx_u64(&x5, &x6, x4, 0x0, (arg1[2])); fiat_p256_subborrowx_u64(&x7, &x8, x6, 0x0, (arg1[3])); fiat_p256_cmovznz_u64(&x9, x8, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x9); fiat_p256_addcarryx_u64(&x12, &x13, x11, x3, (x9 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x14, &x15, x13, x5, 0x0); fiat_p256_addcarryx_u64(&x16, &x17, x15, x7, (x9 & UINT64_C(0xffffffff00000001))); out1[0] = x10; out1[1] = x12; out1[2] = x14; out1[3] = x16; } /* * The function fiat_p256_from_montgomery translates a field element out of the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_montgomery(fiat_p256_non_montgomery_domain_field_element out1, const fiat_p256_montgomery_domain_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; fiat_p256_uint1 x9; uint64_t x10; fiat_p256_uint1 x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; fiat_p256_uint1 x23; uint64_t x24; fiat_p256_uint1 x25; uint64_t x26; fiat_p256_uint1 x27; uint64_t x28; fiat_p256_uint1 x29; uint64_t x30; fiat_p256_uint1 x31; uint64_t x32; fiat_p256_uint1 x33; uint64_t x34; fiat_p256_uint1 x35; uint64_t x36; fiat_p256_uint1 x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; fiat_p256_uint1 x45; uint64_t x46; fiat_p256_uint1 x47; uint64_t x48; fiat_p256_uint1 x49; uint64_t x50; fiat_p256_uint1 x51; uint64_t x52; fiat_p256_uint1 x53; uint64_t x54; fiat_p256_uint1 x55; uint64_t x56; fiat_p256_uint1 x57; uint64_t x58; fiat_p256_uint1 x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; uint64_t x66; fiat_p256_uint1 x67; uint64_t x68; fiat_p256_uint1 x69; uint64_t x70; fiat_p256_uint1 x71; uint64_t x72; fiat_p256_uint1 x73; uint64_t x74; fiat_p256_uint1 x75; uint64_t x76; uint64_t x77; fiat_p256_uint1 x78; uint64_t x79; fiat_p256_uint1 x80; uint64_t x81; fiat_p256_uint1 x82; uint64_t x83; fiat_p256_uint1 x84; uint64_t x85; fiat_p256_uint1 x86; uint64_t x87; uint64_t x88; uint64_t x89; uint64_t x90; x1 = (arg1[0]); fiat_p256_mulx_u64(&x2, &x3, x1, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x4, &x5, x1, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x6, &x7, x1, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x8, &x9, 0x0, x7, x4); fiat_p256_addcarryx_u64(&x10, &x11, 0x0, x1, x6); fiat_p256_addcarryx_u64(&x12, &x13, x11, 0x0, x8); fiat_p256_addcarryx_u64(&x14, &x15, 0x0, x12, (arg1[1])); fiat_p256_mulx_u64(&x16, &x17, x14, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x18, &x19, x14, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x20, &x21, x14, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x22, &x23, 0x0, x21, x18); fiat_p256_addcarryx_u64(&x24, &x25, 0x0, x14, x20); fiat_p256_addcarryx_u64(&x26, &x27, x25, (x15 + (x13 + (x9 + x5))), x22); fiat_p256_addcarryx_u64(&x28, &x29, x27, x2, (x23 + x19)); fiat_p256_addcarryx_u64(&x30, &x31, x29, x3, x16); fiat_p256_addcarryx_u64(&x32, &x33, 0x0, x26, (arg1[2])); fiat_p256_addcarryx_u64(&x34, &x35, x33, x28, 0x0); fiat_p256_addcarryx_u64(&x36, &x37, x35, x30, 0x0); fiat_p256_mulx_u64(&x38, &x39, x32, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x40, &x41, x32, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x42, &x43, x32, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x44, &x45, 0x0, x43, x40); fiat_p256_addcarryx_u64(&x46, &x47, 0x0, x32, x42); fiat_p256_addcarryx_u64(&x48, &x49, x47, x34, x44); fiat_p256_addcarryx_u64(&x50, &x51, x49, x36, (x45 + x41)); fiat_p256_addcarryx_u64(&x52, &x53, x51, (x37 + (x31 + x17)), x38); fiat_p256_addcarryx_u64(&x54, &x55, 0x0, x48, (arg1[3])); fiat_p256_addcarryx_u64(&x56, &x57, x55, x50, 0x0); fiat_p256_addcarryx_u64(&x58, &x59, x57, x52, 0x0); fiat_p256_mulx_u64(&x60, &x61, x54, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x62, &x63, x54, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x64, &x65, x54, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x66, &x67, 0x0, x65, x62); fiat_p256_addcarryx_u64(&x68, &x69, 0x0, x54, x64); fiat_p256_addcarryx_u64(&x70, &x71, x69, x56, x66); fiat_p256_addcarryx_u64(&x72, &x73, x71, x58, (x67 + x63)); fiat_p256_addcarryx_u64(&x74, &x75, x73, (x59 + (x53 + x39)), x60); x76 = (x75 + x61); fiat_p256_subborrowx_u64(&x77, &x78, 0x0, x70, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x79, &x80, x78, x72, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x81, &x82, x80, x74, 0x0); fiat_p256_subborrowx_u64(&x83, &x84, x82, x76, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x85, &x86, x84, 0x0, 0x0); fiat_p256_cmovznz_u64(&x87, x86, x77, x70); fiat_p256_cmovznz_u64(&x88, x86, x79, x72); fiat_p256_cmovznz_u64(&x89, x86, x81, x74); fiat_p256_cmovznz_u64(&x90, x86, x83, x76); out1[0] = x87; out1[1] = x88; out1[2] = x89; out1[3] = x90; } /* * The function fiat_p256_to_montgomery translates a field element into the Montgomery domain. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * eval (from_montgomery out1) mod m = eval arg1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_montgomery(fiat_p256_montgomery_domain_field_element out1, const fiat_p256_non_montgomery_domain_field_element arg1) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; fiat_p256_uint1 x14; uint64_t x15; fiat_p256_uint1 x16; uint64_t x17; fiat_p256_uint1 x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; fiat_p256_uint1 x26; uint64_t x27; fiat_p256_uint1 x28; uint64_t x29; fiat_p256_uint1 x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; fiat_p256_uint1 x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; fiat_p256_uint1 x50; uint64_t x51; fiat_p256_uint1 x52; uint64_t x53; fiat_p256_uint1 x54; uint64_t x55; fiat_p256_uint1 x56; uint64_t x57; fiat_p256_uint1 x58; uint64_t x59; uint64_t x60; uint64_t x61; uint64_t x62; uint64_t x63; uint64_t x64; uint64_t x65; fiat_p256_uint1 x66; uint64_t x67; fiat_p256_uint1 x68; uint64_t x69; fiat_p256_uint1 x70; uint64_t x71; fiat_p256_uint1 x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; fiat_p256_uint1 x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; uint64_t x81; uint64_t x82; uint64_t x83; uint64_t x84; uint64_t x85; fiat_p256_uint1 x86; uint64_t x87; fiat_p256_uint1 x88; uint64_t x89; fiat_p256_uint1 x90; uint64_t x91; fiat_p256_uint1 x92; uint64_t x93; fiat_p256_uint1 x94; uint64_t x95; fiat_p256_uint1 x96; uint64_t x97; fiat_p256_uint1 x98; uint64_t x99; uint64_t x100; uint64_t x101; uint64_t x102; uint64_t x103; uint64_t x104; uint64_t x105; fiat_p256_uint1 x106; uint64_t x107; fiat_p256_uint1 x108; uint64_t x109; fiat_p256_uint1 x110; uint64_t x111; fiat_p256_uint1 x112; uint64_t x113; fiat_p256_uint1 x114; uint64_t x115; fiat_p256_uint1 x116; uint64_t x117; uint64_t x118; uint64_t x119; uint64_t x120; uint64_t x121; uint64_t x122; uint64_t x123; uint64_t x124; uint64_t x125; fiat_p256_uint1 x126; uint64_t x127; fiat_p256_uint1 x128; uint64_t x129; fiat_p256_uint1 x130; uint64_t x131; fiat_p256_uint1 x132; uint64_t x133; fiat_p256_uint1 x134; uint64_t x135; fiat_p256_uint1 x136; uint64_t x137; fiat_p256_uint1 x138; uint64_t x139; uint64_t x140; uint64_t x141; uint64_t x142; uint64_t x143; uint64_t x144; uint64_t x145; fiat_p256_uint1 x146; uint64_t x147; fiat_p256_uint1 x148; uint64_t x149; fiat_p256_uint1 x150; uint64_t x151; fiat_p256_uint1 x152; uint64_t x153; fiat_p256_uint1 x154; uint64_t x155; fiat_p256_uint1 x156; uint64_t x157; fiat_p256_uint1 x158; uint64_t x159; fiat_p256_uint1 x160; uint64_t x161; fiat_p256_uint1 x162; uint64_t x163; fiat_p256_uint1 x164; uint64_t x165; fiat_p256_uint1 x166; uint64_t x167; uint64_t x168; uint64_t x169; uint64_t x170; x1 = (arg1[1]); x2 = (arg1[2]); x3 = (arg1[3]); x4 = (arg1[0]); fiat_p256_mulx_u64(&x5, &x6, x4, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x7, &x8, x4, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x9, &x10, x4, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x11, &x12, x4, 0x3); fiat_p256_addcarryx_u64(&x13, &x14, 0x0, x12, x9); fiat_p256_addcarryx_u64(&x15, &x16, x14, x10, x7); fiat_p256_addcarryx_u64(&x17, &x18, x16, x8, x5); fiat_p256_mulx_u64(&x19, &x20, x11, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x21, &x22, x11, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x23, &x24, x11, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x25, &x26, 0x0, x24, x21); fiat_p256_addcarryx_u64(&x27, &x28, 0x0, x11, x23); fiat_p256_addcarryx_u64(&x29, &x30, x28, x13, x25); fiat_p256_addcarryx_u64(&x31, &x32, x30, x15, (x26 + x22)); fiat_p256_addcarryx_u64(&x33, &x34, x32, x17, x19); fiat_p256_addcarryx_u64(&x35, &x36, x34, (x18 + x6), x20); fiat_p256_mulx_u64(&x37, &x38, x1, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x39, &x40, x1, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x41, &x42, x1, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x43, &x44, x1, 0x3); fiat_p256_addcarryx_u64(&x45, &x46, 0x0, x44, x41); fiat_p256_addcarryx_u64(&x47, &x48, x46, x42, x39); fiat_p256_addcarryx_u64(&x49, &x50, x48, x40, x37); fiat_p256_addcarryx_u64(&x51, &x52, 0x0, x29, x43); fiat_p256_addcarryx_u64(&x53, &x54, x52, x31, x45); fiat_p256_addcarryx_u64(&x55, &x56, x54, x33, x47); fiat_p256_addcarryx_u64(&x57, &x58, x56, x35, x49); fiat_p256_mulx_u64(&x59, &x60, x51, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x61, &x62, x51, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x63, &x64, x51, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x65, &x66, 0x0, x64, x61); fiat_p256_addcarryx_u64(&x67, &x68, 0x0, x51, x63); fiat_p256_addcarryx_u64(&x69, &x70, x68, x53, x65); fiat_p256_addcarryx_u64(&x71, &x72, x70, x55, (x66 + x62)); fiat_p256_addcarryx_u64(&x73, &x74, x72, x57, x59); fiat_p256_addcarryx_u64(&x75, &x76, x74, (((uint64_t)x58 + x36) + (x50 + x38)), x60); fiat_p256_mulx_u64(&x77, &x78, x2, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x79, &x80, x2, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x81, &x82, x2, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x83, &x84, x2, 0x3); fiat_p256_addcarryx_u64(&x85, &x86, 0x0, x84, x81); fiat_p256_addcarryx_u64(&x87, &x88, x86, x82, x79); fiat_p256_addcarryx_u64(&x89, &x90, x88, x80, x77); fiat_p256_addcarryx_u64(&x91, &x92, 0x0, x69, x83); fiat_p256_addcarryx_u64(&x93, &x94, x92, x71, x85); fiat_p256_addcarryx_u64(&x95, &x96, x94, x73, x87); fiat_p256_addcarryx_u64(&x97, &x98, x96, x75, x89); fiat_p256_mulx_u64(&x99, &x100, x91, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x101, &x102, x91, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x103, &x104, x91, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x105, &x106, 0x0, x104, x101); fiat_p256_addcarryx_u64(&x107, &x108, 0x0, x91, x103); fiat_p256_addcarryx_u64(&x109, &x110, x108, x93, x105); fiat_p256_addcarryx_u64(&x111, &x112, x110, x95, (x106 + x102)); fiat_p256_addcarryx_u64(&x113, &x114, x112, x97, x99); fiat_p256_addcarryx_u64(&x115, &x116, x114, (((uint64_t)x98 + x76) + (x90 + x78)), x100); fiat_p256_mulx_u64(&x117, &x118, x3, UINT64_C(0x4fffffffd)); fiat_p256_mulx_u64(&x119, &x120, x3, UINT64_C(0xfffffffffffffffe)); fiat_p256_mulx_u64(&x121, &x122, x3, UINT64_C(0xfffffffbffffffff)); fiat_p256_mulx_u64(&x123, &x124, x3, 0x3); fiat_p256_addcarryx_u64(&x125, &x126, 0x0, x124, x121); fiat_p256_addcarryx_u64(&x127, &x128, x126, x122, x119); fiat_p256_addcarryx_u64(&x129, &x130, x128, x120, x117); fiat_p256_addcarryx_u64(&x131, &x132, 0x0, x109, x123); fiat_p256_addcarryx_u64(&x133, &x134, x132, x111, x125); fiat_p256_addcarryx_u64(&x135, &x136, x134, x113, x127); fiat_p256_addcarryx_u64(&x137, &x138, x136, x115, x129); fiat_p256_mulx_u64(&x139, &x140, x131, UINT64_C(0xffffffff00000001)); fiat_p256_mulx_u64(&x141, &x142, x131, UINT32_C(0xffffffff)); fiat_p256_mulx_u64(&x143, &x144, x131, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x145, &x146, 0x0, x144, x141); fiat_p256_addcarryx_u64(&x147, &x148, 0x0, x131, x143); fiat_p256_addcarryx_u64(&x149, &x150, x148, x133, x145); fiat_p256_addcarryx_u64(&x151, &x152, x150, x135, (x146 + x142)); fiat_p256_addcarryx_u64(&x153, &x154, x152, x137, x139); fiat_p256_addcarryx_u64(&x155, &x156, x154, (((uint64_t)x138 + x116) + (x130 + x118)), x140); fiat_p256_subborrowx_u64(&x157, &x158, 0x0, x149, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x159, &x160, x158, x151, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x161, &x162, x160, x153, 0x0); fiat_p256_subborrowx_u64(&x163, &x164, x162, x155, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x165, &x166, x164, x156, 0x0); fiat_p256_cmovznz_u64(&x167, x166, x157, x149); fiat_p256_cmovznz_u64(&x168, x166, x159, x151); fiat_p256_cmovznz_u64(&x169, x166, x161, x153); fiat_p256_cmovznz_u64(&x170, x166, x163, x155); out1[0] = x167; out1[1] = x168; out1[2] = x169; out1[3] = x170; } /* * The function fiat_p256_nonzero outputs a single non-zero word if the input is non-zero and zero otherwise. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 * * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] */ static FIAT_P256_FIAT_INLINE void fiat_p256_nonzero(uint64_t* out1, const uint64_t arg1[4]) { uint64_t x1; x1 = ((arg1[0]) | ((arg1[1]) | ((arg1[2]) | (arg1[3])))); *out1 = x1; } /* * The function fiat_p256_selectznz is a multi-limb conditional select. * * Postconditions: * out1 = (if arg1 = 0 then arg2 else arg3) * * Input Bounds: * arg1: [0x0 ~> 0x1] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_selectznz(uint64_t out1[4], fiat_p256_uint1 arg1, const uint64_t arg2[4], const uint64_t arg3[4]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; fiat_p256_cmovznz_u64(&x1, arg1, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u64(&x2, arg1, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u64(&x3, arg1, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u64(&x4, arg1, (arg2[3]), (arg3[3])); out1[0] = x1; out1[1] = x2; out1[2] = x3; out1[3] = x4; } /* * The function fiat_p256_to_bytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. * * Preconditions: * 0 ≤ eval arg1 < m * Postconditions: * out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] * * Input Bounds: * arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_to_bytes(uint8_t out1[32], const uint64_t arg1[4]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint8_t x5; uint64_t x6; uint8_t x7; uint64_t x8; uint8_t x9; uint64_t x10; uint8_t x11; uint64_t x12; uint8_t x13; uint64_t x14; uint8_t x15; uint64_t x16; uint8_t x17; uint8_t x18; uint8_t x19; uint64_t x20; uint8_t x21; uint64_t x22; uint8_t x23; uint64_t x24; uint8_t x25; uint64_t x26; uint8_t x27; uint64_t x28; uint8_t x29; uint64_t x30; uint8_t x31; uint8_t x32; uint8_t x33; uint64_t x34; uint8_t x35; uint64_t x36; uint8_t x37; uint64_t x38; uint8_t x39; uint64_t x40; uint8_t x41; uint64_t x42; uint8_t x43; uint64_t x44; uint8_t x45; uint8_t x46; uint8_t x47; uint64_t x48; uint8_t x49; uint64_t x50; uint8_t x51; uint64_t x52; uint8_t x53; uint64_t x54; uint8_t x55; uint64_t x56; uint8_t x57; uint64_t x58; uint8_t x59; uint8_t x60; x1 = (arg1[3]); x2 = (arg1[2]); x3 = (arg1[1]); x4 = (arg1[0]); x5 = (uint8_t)(x4 & UINT8_C(0xff)); x6 = (x4 >> 8); x7 = (uint8_t)(x6 & UINT8_C(0xff)); x8 = (x6 >> 8); x9 = (uint8_t)(x8 & UINT8_C(0xff)); x10 = (x8 >> 8); x11 = (uint8_t)(x10 & UINT8_C(0xff)); x12 = (x10 >> 8); x13 = (uint8_t)(x12 & UINT8_C(0xff)); x14 = (x12 >> 8); x15 = (uint8_t)(x14 & UINT8_C(0xff)); x16 = (x14 >> 8); x17 = (uint8_t)(x16 & UINT8_C(0xff)); x18 = (uint8_t)(x16 >> 8); x19 = (uint8_t)(x3 & UINT8_C(0xff)); x20 = (x3 >> 8); x21 = (uint8_t)(x20 & UINT8_C(0xff)); x22 = (x20 >> 8); x23 = (uint8_t)(x22 & UINT8_C(0xff)); x24 = (x22 >> 8); x25 = (uint8_t)(x24 & UINT8_C(0xff)); x26 = (x24 >> 8); x27 = (uint8_t)(x26 & UINT8_C(0xff)); x28 = (x26 >> 8); x29 = (uint8_t)(x28 & UINT8_C(0xff)); x30 = (x28 >> 8); x31 = (uint8_t)(x30 & UINT8_C(0xff)); x32 = (uint8_t)(x30 >> 8); x33 = (uint8_t)(x2 & UINT8_C(0xff)); x34 = (x2 >> 8); x35 = (uint8_t)(x34 & UINT8_C(0xff)); x36 = (x34 >> 8); x37 = (uint8_t)(x36 & UINT8_C(0xff)); x38 = (x36 >> 8); x39 = (uint8_t)(x38 & UINT8_C(0xff)); x40 = (x38 >> 8); x41 = (uint8_t)(x40 & UINT8_C(0xff)); x42 = (x40 >> 8); x43 = (uint8_t)(x42 & UINT8_C(0xff)); x44 = (x42 >> 8); x45 = (uint8_t)(x44 & UINT8_C(0xff)); x46 = (uint8_t)(x44 >> 8); x47 = (uint8_t)(x1 & UINT8_C(0xff)); x48 = (x1 >> 8); x49 = (uint8_t)(x48 & UINT8_C(0xff)); x50 = (x48 >> 8); x51 = (uint8_t)(x50 & UINT8_C(0xff)); x52 = (x50 >> 8); x53 = (uint8_t)(x52 & UINT8_C(0xff)); x54 = (x52 >> 8); x55 = (uint8_t)(x54 & UINT8_C(0xff)); x56 = (x54 >> 8); x57 = (uint8_t)(x56 & UINT8_C(0xff)); x58 = (x56 >> 8); x59 = (uint8_t)(x58 & UINT8_C(0xff)); x60 = (uint8_t)(x58 >> 8); out1[0] = x5; out1[1] = x7; out1[2] = x9; out1[3] = x11; out1[4] = x13; out1[5] = x15; out1[6] = x17; out1[7] = x18; out1[8] = x19; out1[9] = x21; out1[10] = x23; out1[11] = x25; out1[12] = x27; out1[13] = x29; out1[14] = x31; out1[15] = x32; out1[16] = x33; out1[17] = x35; out1[18] = x37; out1[19] = x39; out1[20] = x41; out1[21] = x43; out1[22] = x45; out1[23] = x46; out1[24] = x47; out1[25] = x49; out1[26] = x51; out1[27] = x53; out1[28] = x55; out1[29] = x57; out1[30] = x59; out1[31] = x60; } /* * The function fiat_p256_from_bytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. * * Preconditions: * 0 ≤ bytes_eval arg1 < m * Postconditions: * eval out1 mod m = bytes_eval arg1 mod m * 0 ≤ eval out1 < m * * Input Bounds: * arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]] * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_from_bytes(uint64_t out1[4], const uint8_t arg1[32]) { uint64_t x1; uint64_t x2; uint64_t x3; uint64_t x4; uint64_t x5; uint64_t x6; uint64_t x7; uint8_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; uint64_t x13; uint64_t x14; uint64_t x15; uint8_t x16; uint64_t x17; uint64_t x18; uint64_t x19; uint64_t x20; uint64_t x21; uint64_t x22; uint64_t x23; uint8_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; uint8_t x32; uint64_t x33; uint64_t x34; uint64_t x35; uint64_t x36; uint64_t x37; uint64_t x38; uint64_t x39; uint64_t x40; uint64_t x41; uint64_t x42; uint64_t x43; uint64_t x44; uint64_t x45; uint64_t x46; uint64_t x47; uint64_t x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; uint64_t x54; uint64_t x55; uint64_t x56; uint64_t x57; uint64_t x58; uint64_t x59; uint64_t x60; x1 = ((uint64_t)(arg1[31]) << 56); x2 = ((uint64_t)(arg1[30]) << 48); x3 = ((uint64_t)(arg1[29]) << 40); x4 = ((uint64_t)(arg1[28]) << 32); x5 = ((uint64_t)(arg1[27]) << 24); x6 = ((uint64_t)(arg1[26]) << 16); x7 = ((uint64_t)(arg1[25]) << 8); x8 = (arg1[24]); x9 = ((uint64_t)(arg1[23]) << 56); x10 = ((uint64_t)(arg1[22]) << 48); x11 = ((uint64_t)(arg1[21]) << 40); x12 = ((uint64_t)(arg1[20]) << 32); x13 = ((uint64_t)(arg1[19]) << 24); x14 = ((uint64_t)(arg1[18]) << 16); x15 = ((uint64_t)(arg1[17]) << 8); x16 = (arg1[16]); x17 = ((uint64_t)(arg1[15]) << 56); x18 = ((uint64_t)(arg1[14]) << 48); x19 = ((uint64_t)(arg1[13]) << 40); x20 = ((uint64_t)(arg1[12]) << 32); x21 = ((uint64_t)(arg1[11]) << 24); x22 = ((uint64_t)(arg1[10]) << 16); x23 = ((uint64_t)(arg1[9]) << 8); x24 = (arg1[8]); x25 = ((uint64_t)(arg1[7]) << 56); x26 = ((uint64_t)(arg1[6]) << 48); x27 = ((uint64_t)(arg1[5]) << 40); x28 = ((uint64_t)(arg1[4]) << 32); x29 = ((uint64_t)(arg1[3]) << 24); x30 = ((uint64_t)(arg1[2]) << 16); x31 = ((uint64_t)(arg1[1]) << 8); x32 = (arg1[0]); x33 = (x31 + (uint64_t)x32); x34 = (x30 + x33); x35 = (x29 + x34); x36 = (x28 + x35); x37 = (x27 + x36); x38 = (x26 + x37); x39 = (x25 + x38); x40 = (x23 + (uint64_t)x24); x41 = (x22 + x40); x42 = (x21 + x41); x43 = (x20 + x42); x44 = (x19 + x43); x45 = (x18 + x44); x46 = (x17 + x45); x47 = (x15 + (uint64_t)x16); x48 = (x14 + x47); x49 = (x13 + x48); x50 = (x12 + x49); x51 = (x11 + x50); x52 = (x10 + x51); x53 = (x9 + x52); x54 = (x7 + (uint64_t)x8); x55 = (x6 + x54); x56 = (x5 + x55); x57 = (x4 + x56); x58 = (x3 + x57); x59 = (x2 + x58); x60 = (x1 + x59); out1[0] = x39; out1[1] = x46; out1[2] = x53; out1[3] = x60; } /* * The function fiat_p256_set_one returns the field element one in the Montgomery domain. * * Postconditions: * eval (from_montgomery out1) mod m = 1 mod m * 0 ≤ eval out1 < m * */ static FIAT_P256_FIAT_INLINE void fiat_p256_set_one(fiat_p256_montgomery_domain_field_element out1) { out1[0] = 0x1; out1[1] = UINT64_C(0xffffffff00000000); out1[2] = UINT64_C(0xffffffffffffffff); out1[3] = UINT32_C(0xfffffffe); } /* * The function fiat_p256_msat returns the saturated representation of the prime modulus. * * Postconditions: * twos_complement_eval out1 = m * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_msat(uint64_t out1[5]) { out1[0] = UINT64_C(0xffffffffffffffff); out1[1] = UINT32_C(0xffffffff); out1[2] = 0x0; out1[3] = UINT64_C(0xffffffff00000001); out1[4] = 0x0; } /* * The function fiat_p256_divstep computes a divstep. * * Preconditions: * 0 ≤ eval arg4 < m * 0 ≤ eval arg5 < m * Postconditions: * out1 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then 1 - arg1 else 1 + arg1) * twos_complement_eval out2 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then twos_complement_eval arg3 else twos_complement_eval arg2) * twos_complement_eval out3 = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then ⌊(twos_complement_eval arg3 - twos_complement_eval arg2) / 2⌋ else ⌊(twos_complement_eval arg3 + (twos_complement_eval arg3 mod 2) * twos_complement_eval arg2) / 2⌋) * eval (from_montgomery out4) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (2 * eval (from_montgomery arg5)) mod m else (2 * eval (from_montgomery arg4)) mod m) * eval (from_montgomery out5) mod m = (if 0 < arg1 ∧ (twos_complement_eval arg3) is odd then (eval (from_montgomery arg4) - eval (from_montgomery arg4)) mod m else (eval (from_montgomery arg5) + (twos_complement_eval arg3 mod 2) * eval (from_montgomery arg4)) mod m) * 0 ≤ eval out5 < m * 0 ≤ eval out5 < m * 0 ≤ eval out2 < m * 0 ≤ eval out3 < m * * Input Bounds: * arg1: [0x0 ~> 0xffffffffffffffff] * arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg4: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * arg5: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * Output Bounds: * out1: [0x0 ~> 0xffffffffffffffff] * out2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out4: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] * out5: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep(uint64_t* out1, uint64_t out2[5], uint64_t out3[5], uint64_t out4[4], uint64_t out5[4], uint64_t arg1, const uint64_t arg2[5], const uint64_t arg3[5], const uint64_t arg4[4], const uint64_t arg5[4]) { uint64_t x1; fiat_p256_uint1 x2; fiat_p256_uint1 x3; uint64_t x4; fiat_p256_uint1 x5; uint64_t x6; uint64_t x7; uint64_t x8; uint64_t x9; uint64_t x10; uint64_t x11; uint64_t x12; fiat_p256_uint1 x13; uint64_t x14; fiat_p256_uint1 x15; uint64_t x16; fiat_p256_uint1 x17; uint64_t x18; fiat_p256_uint1 x19; uint64_t x20; fiat_p256_uint1 x21; uint64_t x22; uint64_t x23; uint64_t x24; uint64_t x25; uint64_t x26; uint64_t x27; uint64_t x28; uint64_t x29; uint64_t x30; uint64_t x31; fiat_p256_uint1 x32; uint64_t x33; fiat_p256_uint1 x34; uint64_t x35; fiat_p256_uint1 x36; uint64_t x37; fiat_p256_uint1 x38; uint64_t x39; fiat_p256_uint1 x40; uint64_t x41; fiat_p256_uint1 x42; uint64_t x43; fiat_p256_uint1 x44; uint64_t x45; fiat_p256_uint1 x46; uint64_t x47; fiat_p256_uint1 x48; uint64_t x49; uint64_t x50; uint64_t x51; uint64_t x52; uint64_t x53; fiat_p256_uint1 x54; uint64_t x55; fiat_p256_uint1 x56; uint64_t x57; fiat_p256_uint1 x58; uint64_t x59; fiat_p256_uint1 x60; uint64_t x61; uint64_t x62; fiat_p256_uint1 x63; uint64_t x64; fiat_p256_uint1 x65; uint64_t x66; fiat_p256_uint1 x67; uint64_t x68; fiat_p256_uint1 x69; uint64_t x70; uint64_t x71; uint64_t x72; uint64_t x73; fiat_p256_uint1 x74; uint64_t x75; uint64_t x76; uint64_t x77; uint64_t x78; uint64_t x79; uint64_t x80; fiat_p256_uint1 x81; uint64_t x82; fiat_p256_uint1 x83; uint64_t x84; fiat_p256_uint1 x85; uint64_t x86; fiat_p256_uint1 x87; uint64_t x88; fiat_p256_uint1 x89; uint64_t x90; uint64_t x91; uint64_t x92; uint64_t x93; uint64_t x94; fiat_p256_uint1 x95; uint64_t x96; fiat_p256_uint1 x97; uint64_t x98; fiat_p256_uint1 x99; uint64_t x100; fiat_p256_uint1 x101; uint64_t x102; fiat_p256_uint1 x103; uint64_t x104; fiat_p256_uint1 x105; uint64_t x106; fiat_p256_uint1 x107; uint64_t x108; fiat_p256_uint1 x109; uint64_t x110; fiat_p256_uint1 x111; uint64_t x112; fiat_p256_uint1 x113; uint64_t x114; uint64_t x115; uint64_t x116; uint64_t x117; uint64_t x118; uint64_t x119; uint64_t x120; uint64_t x121; uint64_t x122; uint64_t x123; uint64_t x124; uint64_t x125; uint64_t x126; fiat_p256_addcarryx_u64(&x1, &x2, 0x0, (~arg1), 0x1); x3 = (fiat_p256_uint1)((fiat_p256_uint1)(x1 >> 63) & (fiat_p256_uint1)((arg3[0]) & 0x1)); fiat_p256_addcarryx_u64(&x4, &x5, 0x0, (~arg1), 0x1); fiat_p256_cmovznz_u64(&x6, x3, arg1, x4); fiat_p256_cmovznz_u64(&x7, x3, (arg2[0]), (arg3[0])); fiat_p256_cmovznz_u64(&x8, x3, (arg2[1]), (arg3[1])); fiat_p256_cmovznz_u64(&x9, x3, (arg2[2]), (arg3[2])); fiat_p256_cmovznz_u64(&x10, x3, (arg2[3]), (arg3[3])); fiat_p256_cmovznz_u64(&x11, x3, (arg2[4]), (arg3[4])); fiat_p256_addcarryx_u64(&x12, &x13, 0x0, 0x1, (~(arg2[0]))); fiat_p256_addcarryx_u64(&x14, &x15, x13, 0x0, (~(arg2[1]))); fiat_p256_addcarryx_u64(&x16, &x17, x15, 0x0, (~(arg2[2]))); fiat_p256_addcarryx_u64(&x18, &x19, x17, 0x0, (~(arg2[3]))); fiat_p256_addcarryx_u64(&x20, &x21, x19, 0x0, (~(arg2[4]))); fiat_p256_cmovznz_u64(&x22, x3, (arg3[0]), x12); fiat_p256_cmovznz_u64(&x23, x3, (arg3[1]), x14); fiat_p256_cmovznz_u64(&x24, x3, (arg3[2]), x16); fiat_p256_cmovznz_u64(&x25, x3, (arg3[3]), x18); fiat_p256_cmovznz_u64(&x26, x3, (arg3[4]), x20); fiat_p256_cmovznz_u64(&x27, x3, (arg4[0]), (arg5[0])); fiat_p256_cmovznz_u64(&x28, x3, (arg4[1]), (arg5[1])); fiat_p256_cmovznz_u64(&x29, x3, (arg4[2]), (arg5[2])); fiat_p256_cmovznz_u64(&x30, x3, (arg4[3]), (arg5[3])); fiat_p256_addcarryx_u64(&x31, &x32, 0x0, x27, x27); fiat_p256_addcarryx_u64(&x33, &x34, x32, x28, x28); fiat_p256_addcarryx_u64(&x35, &x36, x34, x29, x29); fiat_p256_addcarryx_u64(&x37, &x38, x36, x30, x30); fiat_p256_subborrowx_u64(&x39, &x40, 0x0, x31, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x41, &x42, x40, x33, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x43, &x44, x42, x35, 0x0); fiat_p256_subborrowx_u64(&x45, &x46, x44, x37, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x47, &x48, x46, x38, 0x0); x49 = (arg4[3]); x50 = (arg4[2]); x51 = (arg4[1]); x52 = (arg4[0]); fiat_p256_subborrowx_u64(&x53, &x54, 0x0, 0x0, x52); fiat_p256_subborrowx_u64(&x55, &x56, x54, 0x0, x51); fiat_p256_subborrowx_u64(&x57, &x58, x56, 0x0, x50); fiat_p256_subborrowx_u64(&x59, &x60, x58, 0x0, x49); fiat_p256_cmovznz_u64(&x61, x60, 0x0, UINT64_C(0xffffffffffffffff)); fiat_p256_addcarryx_u64(&x62, &x63, 0x0, x53, x61); fiat_p256_addcarryx_u64(&x64, &x65, x63, x55, (x61 & UINT32_C(0xffffffff))); fiat_p256_addcarryx_u64(&x66, &x67, x65, x57, 0x0); fiat_p256_addcarryx_u64(&x68, &x69, x67, x59, (x61 & UINT64_C(0xffffffff00000001))); fiat_p256_cmovznz_u64(&x70, x3, (arg5[0]), x62); fiat_p256_cmovznz_u64(&x71, x3, (arg5[1]), x64); fiat_p256_cmovznz_u64(&x72, x3, (arg5[2]), x66); fiat_p256_cmovznz_u64(&x73, x3, (arg5[3]), x68); x74 = (fiat_p256_uint1)(x22 & 0x1); fiat_p256_cmovznz_u64(&x75, x74, 0x0, x7); fiat_p256_cmovznz_u64(&x76, x74, 0x0, x8); fiat_p256_cmovznz_u64(&x77, x74, 0x0, x9); fiat_p256_cmovznz_u64(&x78, x74, 0x0, x10); fiat_p256_cmovznz_u64(&x79, x74, 0x0, x11); fiat_p256_addcarryx_u64(&x80, &x81, 0x0, x22, x75); fiat_p256_addcarryx_u64(&x82, &x83, x81, x23, x76); fiat_p256_addcarryx_u64(&x84, &x85, x83, x24, x77); fiat_p256_addcarryx_u64(&x86, &x87, x85, x25, x78); fiat_p256_addcarryx_u64(&x88, &x89, x87, x26, x79); fiat_p256_cmovznz_u64(&x90, x74, 0x0, x27); fiat_p256_cmovznz_u64(&x91, x74, 0x0, x28); fiat_p256_cmovznz_u64(&x92, x74, 0x0, x29); fiat_p256_cmovznz_u64(&x93, x74, 0x0, x30); fiat_p256_addcarryx_u64(&x94, &x95, 0x0, x70, x90); fiat_p256_addcarryx_u64(&x96, &x97, x95, x71, x91); fiat_p256_addcarryx_u64(&x98, &x99, x97, x72, x92); fiat_p256_addcarryx_u64(&x100, &x101, x99, x73, x93); fiat_p256_subborrowx_u64(&x102, &x103, 0x0, x94, UINT64_C(0xffffffffffffffff)); fiat_p256_subborrowx_u64(&x104, &x105, x103, x96, UINT32_C(0xffffffff)); fiat_p256_subborrowx_u64(&x106, &x107, x105, x98, 0x0); fiat_p256_subborrowx_u64(&x108, &x109, x107, x100, UINT64_C(0xffffffff00000001)); fiat_p256_subborrowx_u64(&x110, &x111, x109, x101, 0x0); fiat_p256_addcarryx_u64(&x112, &x113, 0x0, x6, 0x1); x114 = ((x80 >> 1) | ((x82 << 63) & UINT64_C(0xffffffffffffffff))); x115 = ((x82 >> 1) | ((x84 << 63) & UINT64_C(0xffffffffffffffff))); x116 = ((x84 >> 1) | ((x86 << 63) & UINT64_C(0xffffffffffffffff))); x117 = ((x86 >> 1) | ((x88 << 63) & UINT64_C(0xffffffffffffffff))); x118 = ((x88 & UINT64_C(0x8000000000000000)) | (x88 >> 1)); fiat_p256_cmovznz_u64(&x119, x48, x39, x31); fiat_p256_cmovznz_u64(&x120, x48, x41, x33); fiat_p256_cmovznz_u64(&x121, x48, x43, x35); fiat_p256_cmovznz_u64(&x122, x48, x45, x37); fiat_p256_cmovznz_u64(&x123, x111, x102, x94); fiat_p256_cmovznz_u64(&x124, x111, x104, x96); fiat_p256_cmovznz_u64(&x125, x111, x106, x98); fiat_p256_cmovznz_u64(&x126, x111, x108, x100); *out1 = x112; out2[0] = x7; out2[1] = x8; out2[2] = x9; out2[3] = x10; out2[4] = x11; out3[0] = x114; out3[1] = x115; out3[2] = x116; out3[3] = x117; out3[4] = x118; out4[0] = x119; out4[1] = x120; out4[2] = x121; out4[3] = x122; out5[0] = x123; out5[1] = x124; out5[2] = x125; out5[3] = x126; } /* * The function fiat_p256_divstep_precomp returns the precomputed value for Bernstein-Yang-inversion (in montgomery form). * * Postconditions: * eval (from_montgomery out1) = ⌊(m - 1) / 2⌋^(if ⌊log2 m⌋ + 1 < 46 then ⌊(49 * (⌊log2 m⌋ + 1) + 80) / 17⌋ else ⌊(49 * (⌊log2 m⌋ + 1) + 57) / 17⌋) * 0 ≤ eval out1 < m * * Output Bounds: * out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] */ static FIAT_P256_FIAT_INLINE void fiat_p256_divstep_precomp(uint64_t out1[4]) { out1[0] = UINT64_C(0x67ffffffb8000000); out1[1] = UINT64_C(0xc000000038000000); out1[2] = UINT64_C(0xd80000007fffffff); out1[3] = UINT64_C(0x2fffffffffffffff); } ================================================ FILE: Sources/CNIOBoringSSLShims/include/CNIOBoringSSLShims.h ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// #ifndef C_NIO_BORINGSSL_SHIMS_H #define C_NIO_BORINGSSL_SHIMS_H // This is for instances when `swift package generate-xcodeproj` is used as CNIOBoringSSL // is treated as a framework and requires the framework's name as a prefix. #if __has_include() #include #else #include "CNIOBoringSSL.h" #endif #if defined(__cplusplus) extern "C" { #endif X509_EXTENSION *CNIOBoringSSLShims_sk_X509_EXTENSION_value(const STACK_OF(X509_EXTENSION) *sk, size_t i); size_t CNIOBoringSSLShims_sk_X509_EXTENSION_num(const STACK_OF(X509_EXTENSION) *sk); GENERAL_NAME *CNIOBoringSSLShims_sk_GENERAL_NAME_value(const STACK_OF(GENERAL_NAME) *sk, size_t i); size_t CNIOBoringSSLShims_sk_GENERAL_NAME_num(const STACK_OF(GENERAL_NAME) *sk); void *CNIOBoringSSLShims_SSL_CTX_get_app_data(const SSL_CTX *ctx); int CNIOBoringSSLShims_SSL_CTX_set_app_data(SSL_CTX *ctx, void *data); int CNIOBoringSSLShims_ERR_GET_LIB(uint32_t err); int CNIOBoringSSLShims_ERR_GET_REASON(uint32_t err); #if defined(__cplusplus) } // extern "C" #endif #endif // C_NIO_BORINGSSL_SHIMS_H ================================================ FILE: Sources/CNIOBoringSSLShims/shims.c ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// // Unfortunately, even in our brave BoringSSL world, we have "functions" that are // macros too complex for the clang importer. This file handles them. #include "CNIOBoringSSLShims.h" X509_EXTENSION *CNIOBoringSSLShims_sk_X509_EXTENSION_value(const STACK_OF(X509_EXTENSION) *sk, size_t i) { return sk_X509_EXTENSION_value(sk, i); } size_t CNIOBoringSSLShims_sk_X509_EXTENSION_num(const STACK_OF(X509_EXTENSION) *sk) { return sk_X509_EXTENSION_num(sk); } GENERAL_NAME *CNIOBoringSSLShims_sk_GENERAL_NAME_value(const STACK_OF(GENERAL_NAME) *sk, size_t i) { return sk_GENERAL_NAME_value(sk, i); } size_t CNIOBoringSSLShims_sk_GENERAL_NAME_num(const STACK_OF(GENERAL_NAME) *sk) { return sk_GENERAL_NAME_num(sk); } void *CNIOBoringSSLShims_SSL_CTX_get_app_data(const SSL_CTX *ctx) { return SSL_CTX_get_app_data(ctx); } int CNIOBoringSSLShims_SSL_CTX_set_app_data(SSL_CTX *ctx, void *data) { return SSL_CTX_set_app_data(ctx, data); } int CNIOBoringSSLShims_ERR_GET_LIB(uint32_t err) { return ERR_GET_LIB(err); } int CNIOBoringSSLShims_ERR_GET_REASON(uint32_t err) { return ERR_GET_REASON(err); } ================================================ FILE: Sources/NIOSSL/AndroidCABundle.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// #if os(Android) /// The path to the root CA bundle directory. /// /// May be nil if we could not find the root CA bundle directory. internal let rootCADirectoryPath: String? = locateRootCADirectory() /// This is a list of root CA directory search paths. /// /// This list contains paths as validated against several distributions. If you are aware of a CA bundle on a specific distribution /// that is not present here, please open a pull request that adds the appropriate search path. /// Some distributions do not ship CA directories: as such, it is not a problem if a distribution that is present in rootCAFileSearchPaths /// is not present in this list. //see https://android.googlesource.com/platform/frameworks/base/+/8b192b19f264a8829eac2cfaf0b73f6fc188d933%5E%21/#F0 private let rootCADirectorySearchPaths = [ "/apex/com.android.conscrypt/cacerts", // >= Android14 "/system/etc/security/cacerts", // < Android14 ] private func locateRootCADirectory() -> String? { rootCADirectorySearchPaths.first(where: { FileSystemObject.pathType(path: $0) == .directory }) } #endif ================================================ FILE: Sources/NIOSSL/ByteBufferBIO.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Bionic) import Bionic #else #error("unsupported os") #endif /// The BoringSSL entry point to write to the `ByteBufferBIO`. This thunk unwraps the user data /// and then passes the call on to the specific BIO reference. /// /// This specific type signature is annoying (I'd rather have UnsafeRawPointer, and rather than a separate /// len I'd like a buffer pointer), but this interface is required because this is passed to an BoringSSL /// function pointer and so needs to be @convention(c). internal func boringSSLBIOWriteFunc(bio: UnsafeMutablePointer?, buf: UnsafePointer?, len: CInt) -> CInt { guard let concreteBIO = bio, let concreteBuf = buf else { preconditionFailure( "Invalid pointers in boringSSLBIOWriteFunc: bio: \(String(describing: bio)) buf: \(String(describing: buf))" ) } // This unwrap may fail if the user has dropped the ref to the ByteBufferBIO but still has // a ref to the other pointer. Sigh heavily and just fail. guard let userPtr = CNIOBoringSSL_BIO_get_data(concreteBIO) else { return -1 } // Begin by clearing retry flags. We do this at all BoringSSL entry points. CNIOBoringSSL_BIO_clear_retry_flags(concreteBIO) // In the event a write of 0 bytes has been asked for, just return early, don't bother with the other work. guard len > 0 else { return 0 } let swiftBIO: ByteBufferBIO = Unmanaged.fromOpaque(userPtr).takeUnretainedValue() let bufferPtr = UnsafeRawBufferPointer(start: concreteBuf, count: Int(len)) return swiftBIO.sslWrite(buffer: bufferPtr) } /// The BoringSSL entry point to read from the `ByteBufferBIO`. This thunk unwraps the user data /// and then passes the call on to the specific BIO reference. /// /// This specific type signature is annoying (I'd rather have UnsafeRawPointer, and rather than a separate /// len I'd like a buffer pointer), but this interface is required because this is passed to an BoringSSL /// function pointer and so needs to be @convention(c). internal func boringSSLBIOReadFunc( bio: UnsafeMutablePointer?, buf: UnsafeMutablePointer?, len: CInt ) -> CInt { guard let concreteBIO = bio, let concreteBuf = buf else { preconditionFailure( "Invalid pointers in boringSSLBIOReadFunc: bio: \(String(describing: bio)) buf: \(String(describing: buf))" ) } // This unwrap may fail if the user has dropped the ref to the ByteBufferBIO but still has // a ref to the other pointer. Sigh heavily and just fail. guard let userPtr = CNIOBoringSSL_BIO_get_data(concreteBIO) else { return -1 } // Begin by clearing retry flags. We do this at all BoringSSL entry points. CNIOBoringSSL_BIO_clear_retry_flags(concreteBIO) // In the event a read for 0 bytes has been asked for, just return early, don't bother with the other work. guard len > 0 else { return 0 } let swiftBIO: ByteBufferBIO = Unmanaged.fromOpaque(userPtr).takeUnretainedValue() let bufferPtr = UnsafeMutableRawBufferPointer(start: concreteBuf, count: Int(len)) return swiftBIO.sslRead(buffer: bufferPtr) } /// The BoringSSL entry point for `puts`. This is a silly function, so we're just going to implement it /// in terms of write. /// /// This specific type signature is annoying (I'd rather have UnsafeRawPointer, and rather than a separate /// len I'd like a buffer pointer), but this interface is required because this is passed to an BoringSSL /// function pointer and so needs to be @convention(c). internal func boringSSLBIOPutsFunc(bio: UnsafeMutablePointer?, buf: UnsafePointer?) -> CInt { guard let concreteBIO = bio, let concreteBuf = buf else { preconditionFailure( "Invalid pointers in boringSSLBIOPutsFunc: bio: \(String(describing: bio)) buf: \(String(describing: buf))" ) } return boringSSLBIOWriteFunc(bio: concreteBIO, buf: concreteBuf, len: CInt(strlen(concreteBuf))) } /// The BoringSSL entry point for `gets`. This is a *really* silly function and we can't implement it nicely /// in terms of read, so we just refuse to support it. /// /// This specific type signature is annoying (I'd rather have UnsafeRawPointer, and rather than a separate /// len I'd like a buffer pointer), but this interface is required because this is passed to an BoringSSL /// function pointer and so needs to be @convention(c). internal func boringSSLBIOGetsFunc( bio: UnsafeMutablePointer?, buf: UnsafeMutablePointer?, len: CInt ) -> CInt { -2 } /// The BoringSSL entry point for `BIO_ctrl`. We don't support most of these. internal func boringSSLBIOCtrlFunc( bio: UnsafeMutablePointer?, cmd: CInt, larg: CLong, parg: UnsafeMutableRawPointer? ) -> CLong { switch cmd { case BIO_CTRL_GET_CLOSE: return CLong(CNIOBoringSSL_BIO_get_shutdown(bio)) case BIO_CTRL_SET_CLOSE: CNIOBoringSSL_BIO_set_shutdown(bio, CInt(larg)) return 1 case BIO_CTRL_FLUSH: return 1 default: return 0 } } internal func boringSSLBIOCreateFunc(bio: UnsafeMutablePointer?) -> CInt { 1 } internal func boringSSLBIODestroyFunc(bio: UnsafeMutablePointer?) -> CInt { 1 } /// An BoringSSL BIO object that wraps `ByteBuffer` objects. /// /// BoringSSL extensively uses an abstraction called `BIO` to manage its input and output /// channels. For NIO we want a BIO that operates entirely in-memory, and it's tempting /// to assume that BoringSSL's `BIO_s_mem` is the best choice for that. However, ultimately /// `BIO_s_mem` is a flat memory buffer that we end up using as a staging between one /// `ByteBuffer` of plaintext and one of ciphertext. We'd like to cut out that middleman. /// /// For this reason, we want to create an object that implements the `BIO` abstraction /// but which use `ByteBuffer`s to do so. This allows us to avoid unnecessary memory copies, /// which can be a really large win. final class ByteBufferBIO { /// The unsafe pointer to the BoringSSL BIO_METHOD. /// /// This is used to give BoringSSL pointers to the methods that need to be invoked when /// using a ByteBufferBIO. There will only ever be one value of this in a NIO program, /// and it will always be non-NULL. Failure to initialize this structure is fatal to /// the program. nonisolated(unsafe) private static let boringSSLBIOMethod: UnsafeMutablePointer = buildBoringSSLBIOMethod() private static func buildBoringSSLBIOMethod() -> UnsafeMutablePointer { guard boringSSLIsInitialized else { preconditionFailure("Failed to initialize BoringSSL") } let bioType = CNIOBoringSSL_BIO_get_new_index() | BIO_TYPE_SOURCE_SINK guard let method = CNIOBoringSSL_BIO_meth_new(bioType, "ByteBuffer BIO") else { preconditionFailure("Unable to allocate new BIO_METHOD") } CNIOBoringSSL_BIO_meth_set_write(method, boringSSLBIOWriteFunc) CNIOBoringSSL_BIO_meth_set_read(method, boringSSLBIOReadFunc) CNIOBoringSSL_BIO_meth_set_puts(method, boringSSLBIOPutsFunc) CNIOBoringSSL_BIO_meth_set_gets(method, boringSSLBIOGetsFunc) CNIOBoringSSL_BIO_meth_set_ctrl(method, boringSSLBIOCtrlFunc) CNIOBoringSSL_BIO_meth_set_create(method, boringSSLBIOCreateFunc) CNIOBoringSSL_BIO_meth_set_destroy(method, boringSSLBIODestroyFunc) return method } /// Pointer to the backing BoringSSL BIO object. /// /// Generally speaking BoringSSL wants to own the object initialization logic for a BIO. /// This doesn't work for us, because we'd like to ensure that the `ByteBufferBIO` is /// correctly initialized with all the state it needs. One of those bits of state is /// a `ByteBuffer`, which BoringSSL cannot give us, so we need to build our `ByteBufferBIO` /// *first* and then use that to drive `BIO` initialization. /// /// Because of this split initialization dance, we elect to initialize this data structure, /// and have it own building an BoringSSL `BIO` structure. private let bioPtr: UnsafeMutablePointer /// The buffer of bytes received from the network. /// /// By default, `ByteBufferBIO` expects to pass data directly to BoringSSL whenever it /// is received. It is, in essence, a temporary container for a `ByteBuffer` on the /// read side. This provides a powerful optimisation, which is that the read buffer /// passed to the `NIOSSLHandler` can be re-used immediately upon receipt. Given that /// the `NIOSSLHandler` is almost always the first handler in the pipeline, this greatly /// improves the allocation profile of busy connections, which can more-easily re-use /// the receive buffer. private var inboundBuffer: ByteBuffer? /// The buffer of bytes to send to the network. /// /// While on the read side `ByteBufferBIO` expects to hold each bytebuffer only temporarily, /// on the write side we attempt to coalesce as many writes as possible. This is because a /// buffer can only be re-used if it is flushed to the network, and that can only happen /// on flush calls, so we are incentivised to write as many SSL_write calls into one buffer /// as possible. private var outboundBuffer: ByteBuffer /// An allocator to use for new buffers. private let allocator: ByteBufferAllocator /// The maximum capacity of the outbound buffer that we'll preserve after clearing it. /// /// When `mustClearOutboundBuffer` is `true`, this value is checked against the capacity. /// If the capacity of the buffer is larger than this value, the buffer is replaced with a new /// empty buffer sufficient to hold the next call to `SSL_write`. private let maximumPreservedOutboundBufferCapacity: Int /// Whether the outbound buffer should be cleared before writing. /// /// This is true only if we've flushed the buffer to the network. Rather than track an annoying /// boolean for this, we use a quick check on the properties of the buffer itself. This clear /// wants to be delayed as long as possible to maximise the possibility that it does not /// trigger an allocation. private var mustClearOutboundBuffer: Bool { outboundBuffer.readerIndex == outboundBuffer.writerIndex && outboundBuffer.readerIndex > 0 } /// A test helper to provide the outbound buffer capacity. internal var _testOnly_outboundBufferCapacity: Int { self.outboundBuffer.capacity } init(allocator: ByteBufferAllocator, maximumPreservedOutboundBufferCapacity: Int) { // We allocate enough space for a single TLS record. We may not actually write a record that size, but we want to // give ourselves the option. We may also write more data than that: if we do, the ByteBuffer will just handle it. self.outboundBuffer = allocator.buffer(capacity: SSL_MAX_RECORD_SIZE) guard let bio = CNIOBoringSSL_BIO_new(ByteBufferBIO.boringSSLBIOMethod) else { preconditionFailure("Unable to initialize custom BIO") } // We now need to complete the BIO initialization. The BIO takes an owned reference to self here, // which is broken on close(). self.bioPtr = bio self.maximumPreservedOutboundBufferCapacity = maximumPreservedOutboundBufferCapacity self.allocator = allocator CNIOBoringSSL_BIO_set_data(self.bioPtr, Unmanaged.passRetained(self).toOpaque()) CNIOBoringSSL_BIO_set_init(self.bioPtr, 1) CNIOBoringSSL_BIO_set_shutdown(self.bioPtr, 1) } deinit { // In debug mode we assert that we've been closed. assert(CNIOBoringSSL_BIO_get_data(self.bioPtr) == nil, "must call close() on ByteBufferBIO before deinit") // On deinit we need to drop our reference to the BIO. CNIOBoringSSL_BIO_free(self.bioPtr) } /// Shuts down the BIO, rendering it unable to be used. /// /// This method is idempotent: it is safe to call more than once. internal func close() { guard let selfRef = CNIOBoringSSL_BIO_get_data(self.bioPtr) else { // Shutdown is safe to call more than once. return } // We consume the original retain of self, and then nil out the ref in the BIO so that this can't happen again. Unmanaged.fromOpaque(selfRef).release() CNIOBoringSSL_BIO_set_data(self.bioPtr, nil) } /// Obtain an owned pointer to the backing BoringSSL BIO object. /// /// This pointer is safe to use elsewhere, as it has increased the reference to the backing /// `BIO`. This makes it safe to use with BoringSSL functions that require an owned reference /// (that is, that consume a reference count). /// /// Note that the BIO may not remain useful for long periods of time: if the `ByteBufferBIO` /// object that owns the BIO goes out of scope, the BIO will have its pointers invalidated /// and will no longer be able to send/receive data. internal func retainedBIO() -> UnsafeMutablePointer { CNIOBoringSSL_BIO_up_ref(self.bioPtr) return self.bioPtr } /// Called to obtain the outbound ciphertext written by BoringSSL. /// /// This function obtains a buffer of ciphertext that needs to be written to the network. In a /// normal application, this should be obtained on a call to `flush`. If no bytes have been flushed /// to the network, then this call will return `nil` rather than an empty byte buffer, to help signal /// that the `write` call should be elided. /// /// - returns: A buffer of ciphertext to send to the network, or `nil` if no buffer is available. func outboundCiphertext() -> ByteBuffer? { guard self.outboundBuffer.readableBytes > 0 else { // No data to send. return nil } /// Once we return from this function, we want to account for the bytes we've handed off. defer { self.outboundBuffer.moveReaderIndex(to: self.outboundBuffer.writerIndex) } return self.outboundBuffer } /// Stores a buffer received from the network for delivery to BoringSSL. /// /// Whenever a buffer is received from the network, it is passed to the BIO via this function /// call. In almost all cases this BIO should be immediately consumed by BoringSSL, but in some cases /// it may not be. In those cases, additional calls will cause byte-by-byte copies. This should /// be avoided, but usually only happens during asynchronous certificate verification in the /// handshake. /// /// - parameters: /// - buffer: The buffer of ciphertext bytes received from the network. func receiveFromNetwork(buffer: ByteBuffer) { var buffer = buffer if self.inboundBuffer == nil { self.inboundBuffer = buffer } else { self.inboundBuffer!.writeBuffer(&buffer) } } /// Retrieves any inbound data that has not been processed by BoringSSL. /// /// When unwrapping TLS from a connection, there may be application bytes that follow the terminating /// CLOSE_NOTIFY message. Those bytes may be in the buffer passed to this BIO, and so we need to /// retrieve them. /// /// This function extracts those bytes and returns them to the user, and drops the reference to them /// in this BIO. /// /// - returns: The unconsumed `ByteBuffer`, if any. func evacuateInboundData() -> ByteBuffer? { defer { self.inboundBuffer = nil } return self.inboundBuffer } /// BoringSSL has requested to read ciphertext bytes from the network. /// /// This function is invoked whenever BoringSSL is looking to read data. /// /// - parameters: /// - buffer: The buffer for NIO to copy bytes into. /// - returns: The number of bytes we have copied. fileprivate func sslRead(buffer: UnsafeMutableRawBufferPointer) -> CInt { guard var inboundBuffer = self.inboundBuffer else { // We have no bytes to read. Mark this as "needs read retry". CNIOBoringSSL_BIO_set_retry_read(self.bioPtr) return -1 } let bytesToCopy = min(buffer.count, inboundBuffer.readableBytes) _ = inboundBuffer.readWithUnsafeReadableBytes { bytePointer in assert( bytePointer.count >= bytesToCopy, "Copying more bytes (\(bytesToCopy)) than fits in readable bytes \((bytePointer.count))" ) assert( buffer.count >= bytesToCopy, "Copying more bytes (\(bytesToCopy) than contained in source buffer (\(buffer.count))" ) buffer.baseAddress!.copyMemory(from: bytePointer.baseAddress!, byteCount: bytesToCopy) return bytesToCopy } // If we have read all the bytes from the inbound buffer, nil it out. if inboundBuffer.readableBytes > 0 { self.inboundBuffer = inboundBuffer } else { self.inboundBuffer = nil } return CInt(bytesToCopy) } /// BoringSSL has requested to write ciphertext bytes from the network. /// /// - parameters: /// - buffer: The buffer for NIO to copy bytes from. /// - returns: The number of bytes we have copied. fileprivate func sslWrite(buffer: UnsafeRawBufferPointer) -> CInt { if self.mustClearOutboundBuffer { // We just flushed, and this is a new write. Let's clear the buffer now. if self.outboundBuffer.capacity > self.maximumPreservedOutboundBufferCapacity { self.outboundBuffer = self.allocator.buffer( capacity: max(buffer.count, self.maximumPreservedOutboundBufferCapacity) ) } else { self.outboundBuffer.clear() assert(!self.mustClearOutboundBuffer) } } let writtenBytes = self.outboundBuffer.writeBytes(buffer) return CInt(writtenBytes) } } ================================================ FILE: Sources/NIOSSL/CustomPrivateKey.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore /// ``NIOSSLCustomPrivateKey`` defines the interface of a custom, non-BoringSSL private key. /// /// In a number of circumstances it is advantageous to store a TLS private key in some form of high-security storage, /// such as a smart card. In these cases it is not possible to represent the TLS private key directly as a sequence /// of bytes that BoringSSL will understand. /// /// This protocol allows a type to implement callbacks that perform the specific operation required by the TLS handshake. /// Implementers are required to specify what signature algorithms they support, and then must implement **only one** of /// the ``NIOSSLCustomPrivateKey/sign(channel:algorithm:data:)`` and ``NIOSSLCustomPrivateKey/decrypt(channel:data:)`` /// functions. For elliptic curve keys, implementers should implement ``NIOSSLCustomPrivateKey/sign(channel:algorithm:data:)``. /// For RSA keys, implementers should implement ``NIOSSLCustomPrivateKey/sign(channel:algorithm:data:)`` and, if supporting /// RSA key exchange in TLS versions before 1.3, you should also implement ``NIOSSLCustomPrivateKey/decrypt(channel:data:)``. /// /// If the same ``NIOSSLCustomPrivateKey`` implementation is used by multiple channels at once, then no synchronization /// is imposed by SwiftNIO. The calls to the protocol requirements will be made on event loop threads, so if further /// synchronization is required it is up to the implementer to provide it. Note that it is unacceptable to block in /// these functions, and so potentially blocking operations must delegate to another thread. public protocol NIOSSLCustomPrivateKey: _NIOPreconcurrencySendable { /// The signature algorithms supported by this key. var signatureAlgorithms: [SignatureAlgorithm] { get } /// The DER bytes for this private key. /// /// Custom key implementations should return an appropriate value, but by default, an empty array will be returned. var derBytes: [UInt8] { get } /// Called to perform a signing operation. /// /// The data being passed to the call has not been hashed, and it is the responsibility of the implementer /// to ensure that the data _is_ hashed before use. `algorithm` will control what hash algorithm should be used. /// This call will always execute on `channel.eventLoop`. /// /// This function should be implemented by both EC and RSA keys. /// /// - parameters: /// - channel: The `Channel` representing the connection for which we are performing the signing operation. /// - algorithm: The ``SignatureAlgorithm`` that should be used to generate the signature. /// - data: The data to be signed. /// - returns: An `EventLoopFuture` that will be fulfilled with a `ByteBuffer` containing the signature bytes, if /// the signing operation completes, or that will be failed with a relevant `Error` if the signature could not /// be produced. func sign(channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer) -> EventLoopFuture /// Called to perform a decryption operation. /// /// The data being passed to the call should be decrypted using _raw_ RSA public key decryption, without padding. /// This call will always execute on `channel.eventLoop`. /// /// This function should only be implemented for RSA keys, and then only if you support RSA key exchange. If you /// are only using TLS 1.3 and later, this function is entirely unnecessary and it will never be called. /// /// - parameters: /// - channel: The `Channel` representing the connection for which we are performing the decryption operation. /// - data: The data to be decrypted. /// - returns: An `EventLoopFuture` that will be fulfilled with a `ByteBuffer` containing the decrypted bytes, if /// the decryption operation completes, or that will be failed with a relevant `Error` if the decrypted bytes /// could not be produced. func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture } extension NIOSSLCustomPrivateKey { @inlinable public var derBytes: [UInt8] { [] } } /// This is a type-erased wrapper that can be used to encapsulate a NIOSSLCustomPrivateKey and provide it with /// hashability and equatability. /// /// While generally speaking type-erasure has some nasty performance problems, we need the type-erasure for Hashable conformance. @usableFromInline internal struct AnyNIOSSLCustomPrivateKey: NIOSSLCustomPrivateKey, Hashable { @usableFromInline let _value: NIOSSLCustomPrivateKey @usableFromInline let _equalsFunction: @Sendable (NIOSSLCustomPrivateKey) -> Bool @usableFromInline let _hashFunction: @Sendable (inout Hasher) -> Void @inlinable init(_ key: CustomKey) { self._value = key self._equalsFunction = { ($0 as? CustomKey) == key } self._hashFunction = { $0.combine(key) } } // This method does not need to be @inlinable for performance, but it needs to be _at least_ // @usableFromInline as it's a protocol requirement on a @usableFromInline type. @inlinable var signatureAlgorithms: [SignatureAlgorithm] { self._value.signatureAlgorithms } @inlinable var derBytes: [UInt8] { self._value.derBytes } // This method does not need to be @inlinable for performance, but it needs to be _at least_ // @usableFromInline as it's a protocol requirement on a @usableFromInline type. @inlinable func sign( channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer ) -> EventLoopFuture { self._value.sign(channel: channel, algorithm: algorithm, data: data) } // This method does not need to be @inlinable for performance, but it needs to be _at least_ // @usableFromInline as it's a protocol requirement on a @usableFromInline type. @inlinable func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture { self._value.decrypt(channel: channel, data: data) } // This method does not need to be @inlinable for performance, but it needs to be _at least_ // @usableFromInline as it's a protocol requirement on a @usableFromInline type. @inlinable func hash(into hasher: inout Hasher) { self._hashFunction(&hasher) } // This method does not need to be @inlinable for performance, but it needs to be _at least_ // @usableFromInline as it's a protocol requirement on a @usableFromInline type. @inlinable static func == (lhs: AnyNIOSSLCustomPrivateKey, rhs: AnyNIOSSLCustomPrivateKey) -> Bool { lhs._equalsFunction(rhs._value) } } extension SSLConnection { fileprivate var customKey: NIOSSLCustomPrivateKey? { let source = self.currentOverride?.privateKey ?? self.parentContext.configuration.privateKey guard case .some(.privateKey(let key)) = source, case .custom(let customKey) = key.representation else { return nil } return customKey } fileprivate func customPrivateKeySign( signatureAlgorithm: UInt16, in: UnsafeBufferPointer ) -> ssl_private_key_result_t { precondition(self.customPrivateKeyResult == nil) guard let customKey = self.customKey else { preconditionFailure() } let wrappedAlgorithm = SignatureAlgorithm(rawValue: signatureAlgorithm) guard customKey.signatureAlgorithms.contains(wrappedAlgorithm) else { return ssl_private_key_failure } // This force-unwrap pair is safe: we can only handshake while we're in a pipeline. let channel = self.parentHandler!.channel! var inputBytes = channel.allocator.buffer(capacity: `in`.count) inputBytes.writeBytes(`in`) let result = customKey.sign(channel: channel, algorithm: wrappedAlgorithm, data: inputBytes) result.hop(to: channel.eventLoop).assumeIsolated().whenComplete { signingResult in self.storeCustomPrivateKeyResult(signingResult, channel: channel) } return ssl_private_key_retry } fileprivate func customPrivateKeyDecrypt( in: UnsafeBufferPointer ) -> ssl_private_key_result_t { precondition(self.customPrivateKeyResult == nil) guard let customKey = self.customKey else { preconditionFailure() } // This force-unwrap pair is safe: we can only handshake while we're in a pipeline. let channel = self.parentHandler!.channel! var inputBytes = channel.allocator.buffer(capacity: `in`.count) inputBytes.writeBytes(`in`) let result = customKey.decrypt(channel: channel, data: inputBytes) result.hop(to: channel.eventLoop).assumeIsolated().whenComplete { decryptionResult in self.storeCustomPrivateKeyResult(decryptionResult, channel: channel) } return ssl_private_key_retry } fileprivate func customPrivateKeyComplete(out: inout UnsafeMutableBufferPointer) -> ssl_private_key_result_t { switch self.customPrivateKeyResult { case .none: return ssl_private_key_retry case .some(.failure): return ssl_private_key_failure case .some(.success(let signingResult)): guard signingResult.readableBytes <= out.count else { return ssl_private_key_failure } let (_, lastIndex) = out.initialize(from: signingResult.readableBytesView) out = UnsafeMutableBufferPointer(rebasing: out[.., channel: Channel) { // When we complete here we need to set our result state, and then ask to respin the handshake. // If we can't respin the handshake because we've dropped the parent handler, that's fine, no harm no foul. // For that reason, we tolerate both the verify manager and the parent handler being nil. channel.eventLoop.assumeIsolated().execute { precondition(self.customPrivateKeyResult == nil) self.customPrivateKeyResult = result self.parentHandler?.resumeHandshake() } } } // We heap-allocate the SSL_PRIVATE_KEY_METHOD we need because we can't define a static stored property with fixed address // in Swift. nonisolated(unsafe) internal let customPrivateKeyMethod: UnsafePointer = buildCustomPrivateKeyMethod() private func buildCustomPrivateKeyMethod() -> UnsafePointer { let pointer = UnsafeMutablePointer.allocate(capacity: 1) pointer.pointee = .init(sign: customKeySign, decrypt: customKeyDecrypt, complete: customKeyComplete) return UnsafePointer(pointer) } /// This is our entry point from BoringSSL when we've been asked to do a sign. private func customKeySign( ssl: OpaquePointer?, out: UnsafeMutablePointer?, outLen: UnsafeMutablePointer?, maxOut: size_t, signatureAlgorithm: UInt16, in: UnsafePointer?, inLen: Int ) -> ssl_private_key_result_t { guard let ssl = ssl, out != nil, let outLen = outLen, let `in` = `in` else { preconditionFailure() } let connection = SSLConnection.loadConnectionFromSSL(ssl) let inBuffer = UnsafeBufferPointer(start: `in`, count: inLen) // We never return anything here. outLen.pointee = 0 return connection.customPrivateKeySign(signatureAlgorithm: signatureAlgorithm, in: inBuffer) } /// This is our entry point from BoringSSL when we've been asked to do a decrypt. private func customKeyDecrypt( ssl: OpaquePointer?, out: UnsafeMutablePointer?, outLen: UnsafeMutablePointer?, maxOut: Int, in: UnsafePointer?, inLen: Int ) -> ssl_private_key_result_t { guard let ssl = ssl, out != nil, let outLen = outLen, let `in` = `in` else { preconditionFailure() } let connection = SSLConnection.loadConnectionFromSSL(ssl) let inBuffer = UnsafeBufferPointer(start: `in`, count: inLen) // We never return anything here. outLen.pointee = 0 return connection.customPrivateKeyDecrypt(in: inBuffer) } /// When BoringSSL is asking if we're done with our key operation, we come here. private func customKeyComplete( ssl: OpaquePointer?, out: UnsafeMutablePointer?, outLen: UnsafeMutablePointer?, maxOut: Int ) -> ssl_private_key_result_t { guard let ssl = ssl, let out = out, let outLen = outLen else { preconditionFailure() } let connection = SSLConnection.loadConnectionFromSSL(ssl) var outBuffer = UnsafeMutableBufferPointer(start: out, count: maxOut) let result = connection.customPrivateKeyComplete(out: &outBuffer) if result != ssl_private_key_success { // We need to set outLen to zero here. outLen.pointee = 0 } else { outLen.pointee = outBuffer.count } return result } ================================================ FILE: Sources/NIOSSL/Docs.docc/TLSConfiguration.md ================================================ # ``TLSConfiguration`` ## Topics ### Creating a TLS configuration - ``clientDefault`` - ``makeClientConfiguration()`` - ``makeServerConfiguration(certificateChain:privateKey:)`` - ``makePreSharedKeyConfiguration()`` ### Inspecting a configuration - ``minimumTLSVersion`` - ``maximumTLSVersion`` - ``certificateVerification`` - ``trustRoots`` - ``certificateChain`` - ``privateKey`` - ``applicationProtocols`` - ``shutdownTimeout`` - ``keyLogCallback`` - ``renegotiationSupport`` - ``sslContextCallback`` ### Inspecting configuration ciphers - ``cipherSuites`` - ``verifySignatureAlgorithms`` - ``signingSignatureAlgorithms`` - ``cipherSuiteValues`` - ``curves`` - ``additionalTrustRoots`` - ``sendCANameList`` ### Inspecting pre-shared key configurations - ``pskClientProvider`` - ``pskHint`` - ``pskServerProvider`` - ``pskClientCallback`` - ``pskServerCallback`` ### Comparing and Hashing TLS configurations - ``bestEffortEquals(_:)`` - ``bestEffortHash(into:)`` ### Deprecated initializers - ``forClient(cipherSuites:minimumTLSVersion:maximumTLSVersion:certificateVerification:trustRoots:certificateChain:privateKey:applicationProtocols:shutdownTimeout:keyLogCallback:)`` - ``forClient(cipherSuites:minimumTLSVersion:maximumTLSVersion:certificateVerification:trustRoots:certificateChain:privateKey:applicationProtocols:shutdownTimeout:keyLogCallback:renegotiationSupport:)`` - ``forClient(cipherSuites:verifySignatureAlgorithms:signingSignatureAlgorithms:minimumTLSVersion:maximumTLSVersion:certificateVerification:trustRoots:certificateChain:privateKey:applicationProtocols:shutdownTimeout:keyLogCallback:renegotiationSupport:)`` - ``forServer(certificateChain:privateKey:cipherSuites:minimumTLSVersion:maximumTLSVersion:certificateVerification:trustRoots:applicationProtocols:shutdownTimeout:keyLogCallback:)`` - ``forServer(certificateChain:privateKey:cipherSuites:verifySignatureAlgorithms:signingSignatureAlgorithms:minimumTLSVersion:maximumTLSVersion:certificateVerification:trustRoots:applicationProtocols:shutdownTimeout:keyLogCallback:)`` ================================================ FILE: Sources/NIOSSL/Docs.docc/index.md ================================================ # ``NIOSSL`` TLS for SwiftNIO. SwiftNIO SSL is a Swift package that contains an implementation of TLS based on BoringSSL. This package allows users of SwiftNIO to write protocol clients and servers that use TLS to secure data in flight. The name is inspired primarily by the names of the library this package uses (BoringSSL), and not because we don't know the name of the protocol. We know the protocol is TLS! ## Overview ### Using SwiftNIO SSL SwiftNIO SSL provides two `ChannelHandler`s to use to secure a data stream: the ``NIOSSLClientHandler`` and the ``NIOSSLServerHandler``. Each of these can be added to a `Channel` to secure the communications on that channel. Additionally, we provide a number of low-level primitives for configuring your TLS connections. These will be shown below. To secure a server connection, you will need a X.509 certificate chain in a file (either PEM or DER, but PEM is far easier), and the associated private key for the leaf certificate. These objects can then be wrapped up in a ``TLSConfiguration`` object that is used to initialize the `ChannelHandler`. For example: ```swift let configuration = TLSConfiguration.makeServerConfiguration( certificateChain: try NIOSSLCertificate.fromPEMFile("cert.pem").map { .certificate($0) }, privateKey: try .privateKey(.init(file: "key.pem", format: .pem)) ) let sslContext = try NIOSSLContext(configuration: configuration) let server = ServerBootstrap(group: group) .childChannelInitializer { channel in // important: The handler must be initialized _inside_ the `childChannelInitializer` let handler = NIOSSLServerHandler(context: sslContext) [...] channel.pipeline.addHandler(handler) [...] } ``` For clients, it is a bit simpler as there is no need to have a certificate chain or private key (though clients *may* have these things). Setup for clients may be done like this: ```swift let configuration = TLSConfiguration.makeClientConfiguration() let sslContext = try NIOSSLContext(configuration: configuration) let client = ClientBootstrap(group: group) .channelInitializer { channel in // important: The handler must be initialized _inside_ the `channelInitializer` let handler = try NIOSSLClientHandler(context: sslContext) [...] channel.pipeline.addHandler(handler) [...] } ``` The most recent versions of SwiftNIO SSL support Swift 5.7 and newer. The minimum Swift version supported by SwiftNIO SSL releases are detailed below: SwiftNIO SSL | Minimum Swift Version --------------------|---------------------- `2.0.0 ..< 2.14.0` | 5.0 `2.14.0 ..< 2.19.0` | 5.2 `2.19.0 ..< 2.23.0` | 5.4 `2.23.0 ..< 2.23.2` | 5.5.2 `2.23.2 ..< 2.26.0` | 5.6 `2.26.0 ..< 2.27.0` | 5.7 `2.27.0 ...` | 5.8 ## Topics ### Articles - - ### Channel Handlers - ``NIOSSLClientHandler`` - ``NIOSSLServerHandler`` - ``NIOSSLHandler`` ### Certificates and Keys - ``NIOSSLCertificate`` - ``NIOSSLPrivateKey`` - ``NIOSSLPassphraseCallback`` - ``NIOSSLPassphraseSetter`` - ``NIOSSLPublicKey`` - ``NIOSSLCustomPrivateKey`` - ``NIOSSLPKCS12Bundle`` ### Custom Verification Callbacks - ``NIOSSLCustomVerificationCallback`` - ``NIOSSLVerificationCallback`` - ``NIOSSLVerificationResult`` ### Configuration and State - ``TLSConfiguration`` - ``TLSVersion`` - ``NIOTLSCipher`` - ``NIOSSLSerializationFormats`` - ``CertificateVerification`` - ``NIORenegotiationSupport`` - ``SignatureAlgorithm`` - ``defaultCipherSuites`` - ``NIOSSLTrustRoots`` - ``NIOSSLAdditionalTrustRoots`` - ``NIOSSLCertificateSource`` - ``NIOSSLPrivateKeySource`` - ``NIOSSLKeyLogCallback`` - ``NIOPSKClientIdentityCallback`` - ``NIOPSKServerIdentityCallback`` - ``PSKServerIdentityResponse`` - ``PSKClientIdentityResponse`` - ``NIOSSLContext`` ### Generic TLS Abstractions - ``NIOSSLClientTLSProvider`` ### Utility Objects - ``NIOSSLSecureBytes`` - ``NIOSSLObjectIdentifier`` ### Errors - ``NIOSSLError`` - ``BoringSSLError`` - ``NIOBoringSSLErrorStack`` - ``BoringSSLInternalError`` - ``NIOSSLCloseTimedOutError`` - ``NIOTLSUnwrappingError`` - ``NIOSSLExtraError`` ================================================ FILE: Sources/NIOSSL/Docs.docc/quantum-secure-tls.md ================================================ # Quantum-secure TLS To enable quantum-secure algorithms in swift-nio-ssl requires minimal configuration changes. While the algorithms are being standardised they are off by default, but once the code points are final we will be enabling them by default. In the meantime, if you wish to add support, you can enable ``NIOTLSCurve/x25519_MLKEM768`` with the following change: ```swift tlsConfiguration.curves = [.x25519_MLKEM768, .x25519, .secp384r1] ``` This configuration offers both a post-quantum hybrid key-establishment mechanism, as well as classical options. This is an appropriate choice for general-purpose use as it can support older clients, but it may not be appropriate for your use-case. If you are aiming to support _only_ post-quantum key exchange, you can do so by setting only PQ or hybrid KEMs: ```swift tlsConfiguration.curves = [.x25519_MLKEM768] ``` ================================================ FILE: Sources/NIOSSL/Docs.docc/trust-roots-behavior.md ================================================ # Trust Roots and Certificate Validation Behavior Understanding how ``TLSConfiguration/trustRoots`` and ``TLSConfiguration/additionalTrustRoots`` affect certificate validation across different platforms. ## Overview SwiftNIO SSL provides two properties in ``TLSConfiguration`` for configuring certificate validation: ``TLSConfiguration/trustRoots`` and ``TLSConfiguration/additionalTrustRoots``. The behavior of these properties differs significantly between Apple platforms and other platforms, which can lead to unexpected certificate validation failures. This article explains the behavioral matrix and helps you choose the right configuration for your use case. ## Certificate Validation Backends SwiftNIO SSL uses different certificate validation backends depending on your configuration: - **SecTrust** (Apple platforms only): The system's native certificate validator, which is stricter and follows Apple's certificate validation policies - **BoringSSL**: The embedded certificate validator, which is more permissive and consistent across platforms ## Behavioral Matrix The choice of validation backend depends on your ``TLSConfiguration/trustRoots`` and ``TLSConfiguration/additionalTrustRoots`` settings: | Configuration | Apple Platforms | Other Platforms | |---------------|-----------------|-----------------| | `trustRoots = .default`, no additional trust roots | SecTrust with default settings | BoringSSL with system PEM files | | `trustRoots = nil`, no additional trust roots | SecTrust with default settings | BoringSSL with system PEM files | | `trustRoots = .file(_)`, no additional trust roots | BoringSSL with specified file | BoringSSL with specified file | | `trustRoots = .certificates(_)`, no additional trust roots | BoringSSL with specified certificates | BoringSSL with specified certificates | | `trustRoots = .default`, additional trust roots provided | SecTrust with additional roots via `SecTrustSetAnchorCertificates` | BoringSSL with system PEM files and additional roots | | `trustRoots = nil`, additional trust roots provided | SecTrust with additional roots via `SecTrustSetAnchorCertificates` | BoringSSL with system PEM files and additional roots | | `trustRoots = .file(_)`, additional trust roots provided | BoringSSL with specified file and additional roots | BoringSSL with specified file and additional roots | | `trustRoots = .certificates(_)`, additional trust roots provided | BoringSSL with specified certificates and additional roots | BoringSSL with specified certificates and additional roots | ## Key Behavioral Differences **SecTrust:** - Used when `trustRoots` is `.default` or `nil` on Apple platforms - Enforces stricter certificate chain validation rules - May reject certificate chains that BoringSSL accepts - Behaves the same way that Safari and most other browsers do **BoringSSL:** - Used in all other cases, including on Apple platforms when `trustRoots` is `.file(_)` or `.certificates(_)` - More lenient about certificate formatting and extensions - Consistent behavior across all platforms ## Debugging Certificate Issues When certificate validation fails, check the system logs for detailed error messages: ```bash # macOS/iOS system logs often contain detailed certificate validation errors log show --predicate 'subsystem == "com.apple.security"' --last 1m ``` The error messages will help you understand whether the issue is with certificate formatting, missing extensions, or chain validation problems. ## Topics ### Related Configuration - ``TLSConfiguration/trustRoots`` - ``TLSConfiguration/additionalTrustRoots`` - ``TLSConfiguration/certificateVerification`` - ``NIOSSLTrustRoots`` - ``NIOSSLAdditionalTrustRoots`` ================================================ FILE: Sources/NIOSSL/IdentityVerification.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import CNIOLinux import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Android) import Android #else #error("unsupported os") #endif private let asciiIDNAIdentifier: ArraySlice = Array("xn--".utf8)[...] private let asciiCapitals: ClosedRange = (UInt8(ascii: "A")...UInt8(ascii: "Z")) private let asciiLowercase: ClosedRange = (UInt8(ascii: "a")...UInt8(ascii: "z")) private let asciiNumbers: ClosedRange = (UInt8(ascii: "0")...UInt8(ascii: "9")) private let asciiHyphen: UInt8 = UInt8(ascii: "-") private let asciiPeriod: UInt8 = UInt8(ascii: ".") private let asciiAsterisk: UInt8 = UInt8(ascii: "*") extension String { /// Calls `fn` with an `Array` pointing to a /// non-NULL-terminated sequence of ASCII bytes. If the string this method /// is called on contains non-ACSII code points, this method throws. /// /// This method exists to avoid doing repeated loops over the string buffer. /// In a naive implementation we'd loop at least three times: once to lowercase /// the string, once to get a buffer pointer to a contiguous buffer, and once /// to confirm the string is ASCII. Here we can do that all in one loop. fileprivate func withLowercaseASCIIBuffer(_ fn: ([UInt8]) throws -> T) throws -> T { let buffer: [UInt8] = try self.utf8.map { codeUnit in guard codeUnit.isValidDNSCharacter else { throw NIOSSLExtraError.serverHostnameImpossibleToMatch(hostname: self) } // We know we have only ASCII printables, we can safely unconditionally set the 6 bit to 1 to lowercase. return codeUnit | (0x20) } return try fn(buffer) } } extension Collection { /// Splits a collection in two around a given index. This index may be nil, in which case the split /// will occur around the end. fileprivate func splitAroundIndex(_ index: Index?) -> (SubSequence, SubSequence) { guard let index = index else { return (self[...], self[self.endIndex...]) } let subsequentIndex = self.index(after: index) return (self[.. { fileprivate func caseInsensitiveElementsEqual(_ other: some Sequence) -> Bool { self.elementsEqual(other) { $0.lowercased() == $1.lowercased() } } } extension UInt8 { /// Whether this character is a valid DNS character, which is the ASCII /// letters, digits, the hypen, and the period. fileprivate var isValidDNSCharacter: Bool { switch self { case asciiCapitals, asciiLowercase, asciiNumbers, asciiHyphen, asciiPeriod: return true default: return false } } fileprivate func lowercased() -> UInt8 { asciiCapitals.contains(self) ? self | 0x20 : self } } /// Validates that a given leaf certificate is valid for a service. /// /// This function implements the logic for service validation as specified by /// RFC 6125 (https://tools.ietf.org/search/rfc6125), which loosely speaking /// defines the common algorithm used for validating that an X.509 certificate /// is valid for a given service /// /// The algorithm we're implementing is specified in RFC 6125 Section 6 if you want to /// follow along at home. internal func validIdentityForService( serverHostname: String?, socketAddress: SocketAddress, leafCertificate: NIOSSLCertificate ) throws -> Bool { if let serverHostname = serverHostname { return try serverHostname.withLowercaseASCIIBuffer { try validIdentityForService( serverHostname: $0, socketAddress: socketAddress, leafCertificate: leafCertificate ) } } else { return try validIdentityForService( serverHostname: nil as [UInt8]?, socketAddress: socketAddress, leafCertificate: leafCertificate ) } } private func validIdentityForService( serverHostname: [UInt8]?, socketAddress: SocketAddress, leafCertificate: NIOSSLCertificate ) throws -> Bool { // Before we begin, we want to locate the first period in our own domain name. We need to do // this because we may need to match a wildcard label. var serverHostnameSlice: ArraySlice? = nil var firstPeriodIndex: ArraySlice.Index? = nil if let serverHostname = serverHostname { var tempServerHostnameSlice = serverHostname[...] // Strip trailing period if tempServerHostnameSlice.last == .some(asciiPeriod) { tempServerHostnameSlice = tempServerHostnameSlice.dropLast() } firstPeriodIndex = tempServerHostnameSlice.firstIndex(of: asciiPeriod) serverHostnameSlice = tempServerHostnameSlice } // We want to begin by checking the subjectAlternativeName fields. If there are any fields // in there that we could validate against (either IP or hostname) we will validate against // them, and then refuse to check the commonName field. If there are no SAN fields to // validate against, we'll check commonName. var checkedMatch = false for name in leafCertificate._subjectAlternativeNames() { checkedMatch = true switch name.nameType { case .dnsName: let dnsName = Array(name.contents) if matchHostname(ourHostname: serverHostnameSlice, firstPeriodIndex: firstPeriodIndex, dnsName: dnsName) { return true } case .ipAddress: if let ip = _SubjectAlternativeName.IPAddress(name), matchIpAddress(socketAddress: socketAddress, certificateIP: ip) { return true } default: continue } } guard !checkedMatch else { // We had some subject alternative names, but none matched. We failed here. return false } // In the absence of any matchable subjectAlternativeNames, we can fall back to checking // the common name. This is a deprecated practice, and in a future release we should // stop doing this. guard let commonName = leafCertificate.commonName() else { // No CN, no match. return false } // We have a common name. Let's check it against the provided hostname. We never check // the common name against the IP address. return matchHostname(ourHostname: serverHostnameSlice, firstPeriodIndex: firstPeriodIndex, dnsName: commonName) } private func matchHostname( ourHostname: ArraySlice?, firstPeriodIndex: ArraySlice.Index?, dnsName: [UInt8] ) -> Bool { guard let ourHostname = ourHostname else { // No server hostname was provided, so we cannot match. return false } // Now we validate the cert hostname. var dnsName = ArraySlice(dnsName) guard let validatedHostname = AnalysedCertificateHostname(baseName: &dnsName) else { // This is a hostname we can't match, return false. return false } return validatedHostname.validMatchForName(ourHostname, firstPeriodIndexForName: firstPeriodIndex) } private func matchIpAddress(socketAddress: SocketAddress, certificateIP: _SubjectAlternativeName.IPAddress) -> Bool { // These match if the two underlying IP address structures match. switch (socketAddress, certificateIP) { case (.v4(let address), .ipv4(var addr2)): var addr1 = address.address.sin_addr return memcmp(&addr1, &addr2, MemoryLayout.size) == 0 case (.v6(let address), .ipv6(var addr2)): var addr1 = address.address.sin6_addr return memcmp(&addr1, &addr2, MemoryLayout.size) == 0 default: // Different protocol families, no match. return false } } /// This structure contains a certificate hostname that has been analysed and prepared for matching. /// /// A certificate hostname that is valid for matching meets the following criteria: /// /// 1. Contains only valid DNS characters, plus the ASCII asterisk. /// 2. Contains zero or one ASCII asterisks. /// 3. Any ASCII asterisk present must be in the first DNS label (i.e. before the first period). /// 4. If the first label contains an ASCII asterisk, it must not also be an IDN A label. /// /// Answering these questions potentially relies on multiple searches through the hostname. That's not /// ideal: it'd be better to do a single search that both validates the domain name meets the criteria /// and that also records information needed to validate that the name matches the one we're searching for. /// That's what this structure does. private struct AnalysedCertificateHostname { /// The type we use to store the base name. The other types on this object are chosen relative to that. fileprivate typealias BaseNameType = ArraySlice private var name: NameType fileprivate init?(baseName: inout BaseNameType) { // First, strip a trailing period from this name. if baseName.last == .some(asciiPeriod) { baseName = baseName.dropLast() } // Ok, start looping. var index = baseName.startIndex var firstPeriodIndex: BaseNameType.Index? = nil var asteriskIndex: BaseNameType.Index? = nil while index < baseName.endIndex { switch baseName[index] { case asciiPeriod where firstPeriodIndex == nil: // This is the first period we've seen, great. Future // periods will be ignored. firstPeriodIndex = index case asciiCapitals, asciiLowercase, asciiNumbers, asciiHyphen, asciiPeriod: // Valid character, no notes. break case asciiAsterisk where asteriskIndex == nil && firstPeriodIndex == nil: // Found an asterisk, it's the first one, and it precedes any periods. asteriskIndex = index case asciiAsterisk: // An extra asterisk, or an asterisk after a period, is unacceptable. return nil default: // Unacceptable character in the name. return nil } baseName.formIndex(after: &index) } // Now we can finally initialize ourself. if let asteriskIndex = asteriskIndex { // One final check: if we found a wildcard, we need to confirm that the first label isn't an IDNA A label. if baseName.prefix(4).caseInsensitiveElementsEqual(asciiIDNAIdentifier) { return nil } self.name = .wildcard(baseName, asteriskIndex: asteriskIndex, firstPeriodIndex: firstPeriodIndex) } else { self.name = .singleName(baseName) } } /// Whether this parsed name is a valid match for the one passed in. fileprivate func validMatchForName(_ target: BaseNameType, firstPeriodIndexForName: BaseNameType.Index?) -> Bool { switch self.name { case .singleName(let baseName): // For non-wildcard names, we just do a straightforward string comparison. return baseName.caseInsensitiveElementsEqual(target) case .wildcard(let baseName, let asteriskIndex, let firstPeriodIndex): // The wildcard can appear more-or-less anywhere in the first label. The wildcard // character itself can match any number of characters, though it must match at least // one. // The algorithm for this is simple: first, we split the two names on their first period to get their // first label and their subsequent components. Second, we check that the subcomponents match a straightforward // bytewise comparison: if that fails, we can avoid the expensive wildcard checking operation. // Third, we split the wildcard label on the wildcard character, and and confirm that // the characters *before* the wildcard are the prefix of the target first label, and that the // characters *after* the wildcard are the suffix of the target first label. This works well because // the empty string is a prefix and suffix of all strings. let (wildcardLabel, remainingComponents) = baseName.splitAroundIndex(firstPeriodIndex) let (targetFirstLabel, targetRemainingComponents) = target.splitAroundIndex(firstPeriodIndexForName) guard remainingComponents.caseInsensitiveElementsEqual(targetRemainingComponents) else { // Wildcard is irrelevant, the remaining components don't match. return false } guard targetFirstLabel.count >= wildcardLabel.count else { // The target label cannot possibly match the wildcard. return false } let (wildcardLabelPrefix, wildcardLabelSuffix) = wildcardLabel.splitAroundIndex(asteriskIndex) let targetBeforeWildcard = targetFirstLabel.prefix(wildcardLabelPrefix.count) let targetAfterWildcard = targetFirstLabel.suffix(wildcardLabelSuffix.count) let leadingBytesMatch = targetBeforeWildcard.caseInsensitiveElementsEqual(wildcardLabelPrefix) let trailingBytesMatch = targetAfterWildcard.caseInsensitiveElementsEqual(wildcardLabelSuffix) return leadingBytesMatch && trailingBytesMatch } } } extension AnalysedCertificateHostname { private enum NameType { case wildcard(BaseNameType, asteriskIndex: BaseNameType.Index, firstPeriodIndex: BaseNameType.Index?) case singleName(BaseNameType) } } ================================================ FILE: Sources/NIOSSL/LinuxCABundle.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// #if os(Linux) || os(FreeBSD) /// The path to the root CA bundle file. /// /// May be nil if we could not find the root CA bundle file. internal let rootCAFilePath: String? = locateRootCAFile() /// The path to the root CA bundle directory. /// /// May be nil if we could not find the root CA bundle directory. internal let rootCADirectoryPath: String? = locateRootCADirectory() /// This is a list of root CA file search paths. This list contains paths as validated against several distributions. /// If you are attempting to use SwiftNIO SSL on a platform that is not covered here and certificate validation is /// failing, please open a pull request that adds the appropriate search path. private let rootCAFileSearchPaths = [ "/etc/ssl/certs/ca-certificates.crt", // Ubuntu, Debian, Arch, Alpine, "/etc/pki/tls/certs/ca-bundle.crt", // Fedora ] /// This is a list of root CA directory search paths. /// /// This list contains paths as validated against several distributions. If you are aware of a CA bundle on a specific distribution /// that is not present here, please open a pull request that adds the appropriate search path. /// Some distributions do not ship CA directories: as such, it is not a problem if a distribution that is present in rootCAFileSearchPaths /// is not present in this list. private let rootCADirectorySearchPaths = [ "/etc/ssl/certs" // Ubuntu, Debian, Arch, Alpine ] private func locateRootCAFile() -> String? { // We need to find the root CA file. We have a list of search paths: let's use them. rootCAFileSearchPaths.first(where: { FileSystemObject.pathType(path: $0) == .file }) } private func locateRootCADirectory() -> String? { rootCADirectorySearchPaths.first(where: { FileSystemObject.pathType(path: $0) == .directory }) } #endif ================================================ FILE: Sources/NIOSSL/NIOSSLClientHandler.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Android) import Android #else #error("unsupported os") #endif extension String { private func isIPAddress() -> Bool { // We need some scratch space to let inet_pton write into. var ipv4Addr = in_addr() var ipv6Addr = in6_addr() return self.withCString { ptr in inet_pton(AF_INET, ptr, &ipv4Addr) == 1 || inet_pton(AF_INET6, ptr, &ipv6Addr) == 1 } } func validateSNIServerName() throws { guard !self.isIPAddress() else { throw NIOSSLExtraError.cannotUseIPAddressInSNI(ipAddress: self) } // no 0 bytes guard !self.utf8.contains(0) else { throw NIOSSLExtraError.invalidSNIHostname } guard (1...255).contains(self.utf8.count) else { throw NIOSSLExtraError.invalidSNIHostname } } } /// A channel handler that wraps a channel in TLS using NIOSSL. /// This handler can be used in channels that are acting as the client /// in the TLS dialog. For server connections, use the ``NIOSSLServerHandler``. public final class NIOSSLClientHandler: NIOSSLHandler { /// Construct a new ``NIOSSLClientHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. public convenience init(context: NIOSSLContext, serverHostname: String?) throws { try self.init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: nil, optionalAdditionalPeerCertificateVerificationCallback: nil ) } @available(*, deprecated, renamed: "init(context:serverHostname:customVerificationCallback:)") public init( context: NIOSSLContext, serverHostname: String?, verificationCallback: NIOSSLVerificationCallback? = nil ) throws { guard let connection = context.createConnection() else { fatalError("Failed to create new connection in NIOSSLContext") } connection.setConnectState() if let serverHostname = serverHostname { try serverHostname.validateSNIServerName() // IP addresses must not be provided in the SNI extension, so filter them. do { try connection.setServerName(name: serverHostname) } catch { preconditionFailure( "Bug in NIOSSL (please report): \(Array(serverHostname.utf8)) passed NIOSSL's hostname test but failed in BoringSSL." ) } } if let verificationCallback = verificationCallback { connection.setVerificationCallback(verificationCallback) } super.init( connection: connection, shutdownTimeout: context.configuration.shutdownTimeout, additionalPeerCertificateVerificationCallback: nil, maxWriteSize: NIOSSLHandler.defaultMaxWriteSize, configuration: Configuration() ) } /// Construct a new ``NIOSSLClientHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - customVerificationCallback: A callback to use that will override NIOSSL's normal verification logic. /// /// If set, this callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed them. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - Note: Use ``init(context:serverHostname:customVerificationCallbackWithMetadata:)`` to provide a custom /// verification callback where the peer's *validated* certificate chain can be returned. This data can then be /// accessed from the handler. public convenience init( context: NIOSSLContext, serverHostname: String?, customVerificationCallback: @escaping NIOSSLCustomVerificationCallback ) throws { try self.init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: CustomVerifyManager(callback: customVerificationCallback), optionalAdditionalPeerCertificateVerificationCallback: nil ) } /// Construct a new ``NIOSSLClientHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - customVerificationCallbackWithMetadata: A callback to use that will override NIOSSL's normal verification /// logic. If validation is successful, the peer's validated certificate chain can be returned, and later /// accessed via ``NIOSSLHandler/peerValidatedCertificateChain``. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has /// ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - This callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed /// them. Therefore, a validated chain must be derived *within* this callback (potentially involving fetching /// additional intermediate certificates). The *validated* certificate chain returned in the promise result /// **must** be a verified path to a trusted root. Importantly, the certificates presented by the peer should /// not be assumed to be valid. public convenience init( context: NIOSSLContext, serverHostname: String?, customVerificationCallbackWithMetadata: @escaping NIOSSLCustomVerificationCallbackWithMetadata ) throws { try self.init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: CustomVerifyManager( callback: customVerificationCallbackWithMetadata ), optionalAdditionalPeerCertificateVerificationCallback: nil ) } /// Construct a new ``NIOSSLClientHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - customVerificationCallback: A callback to use that will override NIOSSL's normal verification logic. /// /// If set, this callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed them. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// - configuration: Configuration for this handler. /// /// - Note: Use ``init(context:serverHostname:configuration:customVerificationCallbackWithMetadata:)`` to provide a /// custom verification callback where the peer's *validated* certificate chain can be returned. This data can /// then be accessed from the handler. public convenience init( context: NIOSSLContext, serverHostname: String?, customVerificationCallback: NIOSSLCustomVerificationCallback? = nil, configuration: Configuration ) throws { try self.init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: customVerificationCallback.map(CustomVerifyManager.init), optionalAdditionalPeerCertificateVerificationCallback: nil, configuration: configuration ) } /// Construct a new ``NIOSSLClientHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - configuration: Configuration for this handler. /// - customVerificationCallbackWithMetadata: A callback to use that will override NIOSSL's normal verification /// logic. If validation is successful, the peer's validated certificate chain can be returned, and later /// accessed via ``NIOSSLHandler/peerValidatedCertificateChain``. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has /// ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - This callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed /// them. Therefore, a validated chain must be derived *within* this callback (potentially involving fetching /// additional intermediate certificates). The *validated* certificate chain returned in the promise result /// **must** be a verified path to a trusted root. Importantly, the certificates presented by the peer should /// not be assumed to be valid. public convenience init( context: NIOSSLContext, serverHostname: String?, configuration: Configuration, customVerificationCallbackWithMetadata: @escaping NIOSSLCustomVerificationCallbackWithMetadata ) throws { try self.init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: CustomVerifyManager( callback: customVerificationCallbackWithMetadata ), optionalAdditionalPeerCertificateVerificationCallback: nil, configuration: configuration ) } /// - warning: This API is not guaranteed to be stable and is likely to be changed without further notice, hence the underscore prefix. public static func _makeSSLClientHandler( context: NIOSSLContext, serverHostname: String?, additionalPeerCertificateVerificationCallback: @escaping _NIOAdditionalPeerCertificateVerificationCallback ) throws -> Self { try .init( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: nil, optionalAdditionalPeerCertificateVerificationCallback: additionalPeerCertificateVerificationCallback ) } // This exists to handle the explosion of initializers we got when I tried to deprecate the first one. At least they all pass through one path now. internal init( context: NIOSSLContext, serverHostname: String?, optionalCustomVerificationCallbackManager: CustomVerifyManager?, optionalAdditionalPeerCertificateVerificationCallback: _NIOAdditionalPeerCertificateVerificationCallback?, maxWriteSize: Int = defaultMaxWriteSize, configuration: Configuration = .init() ) throws { guard let connection = context.createConnection() else { fatalError("Failed to create new connection in NIOSSLContext") } connection.setConnectState() if let serverHostname = serverHostname { try serverHostname.validateSNIServerName() // IP addresses must not be provided in the SNI extension, so filter them. do { try connection.setServerName(name: serverHostname) } catch { preconditionFailure( "Bug in NIOSSL (please report): \(Array(serverHostname.utf8)) passed NIOSSL's hostname test but failed in BoringSSL." ) } } if let verificationCallbackManager = optionalCustomVerificationCallbackManager { connection.setCustomVerificationCallback(verificationCallbackManager) } super.init( connection: connection, shutdownTimeout: context.configuration.shutdownTimeout, additionalPeerCertificateVerificationCallback: optionalAdditionalPeerCertificateVerificationCallback, maxWriteSize: maxWriteSize, configuration: configuration ) } } // This conformance is technically redundant - Swift 6.2 compiler finally caught this #if compiler(<6.2) @available(*, unavailable) extension NIOSSLClientHandler: Sendable {} #endif ================================================ FILE: Sources/NIOSSL/NIOSSLHandler+Configuration.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2023 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// extension NIOSSLHandler { /// Configuration for a specific instance of ``NIOSSLHandler``, either client or server. /// /// This type is distinct from ``TLSConfiguration`` because it does not contain settings that /// apply to TLS itself. Instead, this configuration manages how the ``NIOSSLHandler`` itself /// operates. public struct Configuration: Hashable, Sendable { /// The maximum number of bytes we'll preserve in the outbound buffer that ``NIOSSLHandler`` /// holds. /// /// This buffer is not typically deallocated, as it is re-used throughout the lifetime of /// the program. In cases where there are extremely large peak writes that are outliers in /// the code, the buffer may remain excessively large. /// /// Set this value to a lower value to avoid preserving too much memory. This will cause /// ``NIOSSLHandler`` to reallocate memory more often, which can inhibit performance, so /// avoid lowering this value unless you're running into trouble with memory pressure and /// are confident that ``NIOSSLHandler`` is at fault. public var maximumPreservedOutboundBufferCapacity: Int public init() { self.maximumPreservedOutboundBufferCapacity = .max } } } ================================================ FILE: Sources/NIOSSL/NIOSSLHandler.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore import NIOTLS /// The base class for all NIOSSL handlers. /// /// This class cannot actually be instantiated by users directly: instead, users must select /// which mode they would like their handler to operate in, client or server. /// /// This class exists to deal with the reality that for almost the entirety of the lifetime /// of a TLS connection there is no meaningful distinction between a server and a client. /// For this reason almost the entirety of the implementation for the channel and server /// handlers in NIOSSL is shared, in the form of this parent class. public class NIOSSLHandler: ChannelInboundHandler, ChannelOutboundHandler, RemovableChannelHandler { /// The default maximum write size. We cannot pass writes larger than this size to /// BoringSSL. /// /// We have this default here instead of hardcoded into the software for testing purposes. internal static let defaultMaxWriteSize = Int(CInt.max) public typealias OutboundIn = ByteBuffer public typealias OutboundOut = ByteBuffer public typealias InboundIn = ByteBuffer public typealias InboundOut = ByteBuffer private enum ConnectionState { case idle case handshaking case additionalVerification case active case unwrapping(Scheduled) case closing(Scheduled) case unwrapped case inputClosed case outputClosed case closed } private var state: ConnectionState = .idle private var connection: SSLConnection private var plaintextReadBuffer: ByteBuffer? private var bufferedActions: MarkedCircularBuffer private var closeOutputPromise: EventLoopPromise? private var closePromise: EventLoopPromise? private var shutdownPromise: EventLoopPromise? private var didDeliverData: Bool = false private var storedContext: ChannelHandlerContext? = nil private var shutdownTimeout: TimeAmount private let additionalPeerCertificateVerificationCallback: _NIOAdditionalPeerCertificateVerificationCallback? private let maxWriteSize: Int private var configuration: Configuration internal var channel: Channel? { self.storedContext?.channel } internal init( connection: SSLConnection, shutdownTimeout: TimeAmount, additionalPeerCertificateVerificationCallback: _NIOAdditionalPeerCertificateVerificationCallback?, maxWriteSize: Int, configuration: Configuration ) { let tlsConfiguration = connection.parentContext.configuration precondition( additionalPeerCertificateVerificationCallback == nil || tlsConfiguration.certificateVerification != .none, "TLSConfiguration.certificateVerification must be either set to .optionalVerification, .noHostnameVerification, or .fullVerification if additionalPeerCertificateVerificationCallback is specified" ) self.connection = connection // 96 brings the total size of the buffer to just shy of one page self.bufferedActions = MarkedCircularBuffer(initialCapacity: 96) self.shutdownTimeout = shutdownTimeout self.additionalPeerCertificateVerificationCallback = additionalPeerCertificateVerificationCallback self.maxWriteSize = maxWriteSize self.configuration = configuration } public func handlerAdded(context: ChannelHandlerContext) { self.storedContext = context self.connection.setAllocator(context.channel.allocator, maximumPreservedOutboundBufferCapacity: .max) self.connection.parentHandler = self self.connection.eventLoop = context.eventLoop self.plaintextReadBuffer = context.channel.allocator.buffer(capacity: SSL_MAX_RECORD_SIZE) // If this channel is already active, immediately begin handshaking. if context.channel.isActive { doHandshakeStep(context: context) } } public func handlerRemoved(context: ChannelHandlerContext) { /// Get the connection to drop any state it might have. This state can cause reference cycles, /// so we need to break those when we know it's safe to do so. This is a good safe point, as no /// further I/O can possibly occur. self.connection.close() // We now want to drop the stored context. self.storedContext = nil } public func channelActive(context: ChannelHandlerContext) { // We fire this a bit early, entirely on purpose. This is because // in doHandshakeStep we may end up closing the channel again, and // if we do we want to make sure that the channelInactive message received // by later channel handlers makes sense. context.fireChannelActive() doHandshakeStep(context: context) } public func channelInactive(context: ChannelHandlerContext) { // This fires when the TCP connection goes away. Whatever happens, we end up in the closed // state here. This function calls out to a lot of user code, so we need to make sure we're // keeping track of the state we're in properly before we do anything else. let oldState = state state = .closed let channelError: NIOSSLError switch oldState { case .closed, .idle: // Nothing to do, but discard any buffered actions we still have. discardBufferedActions(reason: ChannelError.ioOnClosedChannel) // Return early context.fireChannelInactive() return case .handshaking: // In this case the channel is going through the doHandshake steps and // a channelInactive is fired taking down the connection. // This case propogates a .handshakeFailed instead of an .uncleanShutdown. // We use a synthetic error here as the error stack will be empty, and we should try to // provide some diagnostic help. channelError = NIOSSLError.handshakeFailed(.sslError([.eofDuringHandshake])) case .additionalVerification: // In this case the channel is going through the doHandshake steps and // a channelInactive is fired taking down the connection. // This case propogates a .handshakeFailed instead of an .uncleanShutdown. // We use a synthetic error here as the error stack will be empty, and we should try to // provide some diagnostic help. channelError = NIOSSLError.handshakeFailed(.sslError([.eofDuringAdditionalCertficiateChainValidation])) default: // This is a ragged EOF: we weren't sent a CLOSE_NOTIFY. We want to send a user // event to notify about this before we propagate channelInactive. We also want to fail all // these writes. channelError = NIOSSLError.uncleanShutdown } let shutdownPromise = self.shutdownPromise self.shutdownPromise = nil let closePromise = self.closePromise self.closePromise = nil shutdownPromise?.fail(channelError) closePromise?.fail(channelError) context.fireErrorCaught(channelError) discardBufferedActions(reason: channelError) context.fireChannelInactive() } public func channelRead(context: ChannelHandlerContext, data: NIOAny) { let binaryData = unwrapInboundIn(data) // The logic: feed the buffers, then take an action based on state. connection.consumeDataFromNetwork(binaryData) switch state { case .handshaking: doHandshakeStep(context: context) case .active, .outputClosed: doDecodeData(context: context) doUnbufferActions(context: context) case .closing: // Handle both natural close events and close events where data is still in // flight. Sending through doDecodeData will handle both conditions. doDecodeData(context: context) case .unwrapping: self.doShutdownStep(context: context) default: context.fireErrorCaught(NIOSSLError.readInInvalidTLSState) channelClose(context: context, reason: NIOSSLError.readInInvalidTLSState) } } public func channelReadComplete(context: ChannelHandlerContext) { guard let receiveBuffer = self.plaintextReadBuffer else { preconditionFailure("channelReadComplete called before handlerAdded") } self.doFlushReadData(context: context, receiveBuffer: receiveBuffer, readOnEmptyBuffer: true) self.writeDataToNetwork(context: context, promise: nil) } public func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) { switch event { case ChannelEvent.inputClosed: userInboundInputClosedTriggered(context: context) default: context.fireUserInboundEventTriggered(event) } } private func userInboundInputClosedTriggered(context: ChannelHandlerContext) { let channelError: NIOSSLError switch self.state { case .inputClosed: return case .closed, .idle: context.fireUserInboundEventTriggered(ChannelEvent.inputClosed) return case .handshaking: // In this case the channel is going through the doHandshake steps and // a channelInactive is fired taking down the connection. // This case propogates a .handshakeFailed instead of an .uncleanShutdown. // We use a synthetic error here as the error stack will be empty, and we should try to // provide some diagnostic help. channelError = NIOSSLError.handshakeFailed(.sslError([.eofDuringHandshake])) case .additionalVerification: // In this case the channel is going through the doHandshake steps and // a channelInactive is fired taking down the connection. // This case propogates a .handshakeFailed instead of an .uncleanShutdown. // We use a synthetic error here as the error stack will be empty, and we should try to // provide some diagnostic help. channelError = NIOSSLError.handshakeFailed(.sslError([.eofDuringAdditionalCertficiateChainValidation])) default: // This is a ragged EOF: we weren't sent a CLOSE_NOTIFY. We want to send a user // event to notify about this before we propagate channelInactive. We also want to fail all // these writes. channelError = NIOSSLError.uncleanShutdown } context.fireErrorCaught(channelError) context.fireUserInboundEventTriggered(ChannelEvent.inputClosed) } public func write(context: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise?) { bufferWrite(data: unwrapOutboundIn(data), promise: promise) } public func flush(context: ChannelHandlerContext) { switch self.state { case .idle, .handshaking, .additionalVerification: // we should not flush immediately as we have not completed the handshake and instead buffer the flush self.bufferFlush() case .active, .unwrapping, .closing, .unwrapped, .inputClosed, .outputClosed, .closed: self.bufferFlush() self.doUnbufferActions(context: context) } } public func close(context: ChannelHandlerContext, mode: CloseMode, promise: EventLoopPromise?) { switch mode { case .output: self.closeOutput(context: context, promise: promise) case .all: self.closeAll(context: context, promise: promise) case .input: promise?.fail(ChannelError.operationUnsupported) } } private func closeOutput(context: ChannelHandlerContext, promise: EventLoopPromise?) { switch state { case .closing: // We're in the process of TLS shutdown, which has a higher priority. // Therefore we skip the output closing procedure and cascade the result // of the TLS shutdown request to this new one. if let promise = promise, let closePromise = self.closePromise { closePromise.futureResult.cascade(to: promise) } else if let promise = promise { self.closePromise = promise } case .idle, .outputClosed, .closed, .unwrapping, .unwrapped: // For idle, outputClosed, closed, unwrapping, and unwrapped connections we immediately pass this on to the next // channel handler. context.close(mode: .output, promise: promise) case .handshaking, .additionalVerification: // We are still in the process of handshaking / doing additional verification. // This means our outstanding writes will not get flushed until we have reached the active state. // Therefore we buffer the .closeOuput action and wait for it to be executed after all our // outstanding writes have been flushed in the active state. self.bufferedActions.append(.closeOutput) self.flush(context: context) self.closeOutputPromise = promise case .inputClosed: // Input is already closed and we want to close our output. // This escalates to a full closure. self.close(context: context, mode: .all, promise: promise) case .active: // We need to begin processing closeOutput now. // We can't fire the promise for a while though. self.state = .outputClosed self.closeOutputPromise = promise self.flush(context: context) self.doShutdownStep(context: context) } } private func closeAll(context: ChannelHandlerContext, promise: EventLoopPromise?) { switch state { case .closing: // We're in the process of TLS shutdown, so let's let that happen. However, // we want to cascade the result of the first request into this new one. if let promise = promise, let closePromise = self.closePromise { closePromise.futureResult.cascade(to: promise) } else if let promise = promise { self.closePromise = promise } case .unwrapping(let scheduledShutdown): // We've been asked to close the connection, but we were currently unwrapping. // We don't have to send any CLOSE_NOTIFY, but we now need to upgrade ourselves: // closing is a more extreme activity than unwrapping. self.state = .closing(scheduledShutdown) if let promise = promise, let closePromise = self.closePromise { closePromise.futureResult.cascade(to: promise) } else if let promise = promise { self.closePromise = promise } case .idle: state = .closed fallthrough case .closed, .unwrapped: // For idle, closed, and unwrapped connections we immediately pass this on to the next // channel handler. context.close(promise: promise) case .active, .inputClosed, .outputClosed, .handshaking, .additionalVerification: // We need to begin processing shutdown now. We can't fire the promise for a // while though. self.state = .closing(self.scheduleTimedOutShutdown(context: context)) closePromise = promise doShutdownStep(context: context) } } /// Attempt to perform another stage of the TLS handshake. /// /// A TLS connection has a multi-step handshake that requires at least two messages sent by each /// peer. As a result, a handshake will never complete in a single call to BoringSSL. This method /// will call `doHandshake`, and will then attempt to write whatever data this generated to the /// network. If we are waiting on data from the remote peer, this method will do nothing. /// /// This method must not be called once the connection is established. private func doHandshakeStep(context: ChannelHandlerContext) { switch self.state { case .unwrapped, .inputClosed, .outputClosed, .closed: // We shouldn't be handshaking in any of these state. return case .idle, .handshaking, .additionalVerification, .active, .closing, .unwrapping: () } let result = self.connection.doHandshake() switch result { case .incomplete: state = .handshaking writeDataToNetwork(context: context, promise: nil) case .complete: do { try validateHostname(context: context) } catch { // This counts as a failure. context.fireErrorCaught(error) channelClose(context: context, reason: error) return } if let additionalPeerCertificateVerificationCallback = self.additionalPeerCertificateVerificationCallback { state = .additionalVerification guard let peerCertificate = connection.getPeerCertificate() else { preconditionFailure( """ Couldn't get peer certificate after chain verification was successful. This should be impossible as we have a precondition during creation of this handler that requires certificate verification. Please file an issue. """ ) } additionalPeerCertificateVerificationCallback(peerCertificate, context.channel) .hop(to: context.eventLoop) .assumeIsolated() .whenComplete { result in self.completedAdditionalPeerCertificateVerification(result: result) } return } state = .active completeHandshake(context: context) case .failed(let err): writeDataToNetwork(context: context, promise: nil) // If there's a failed private key operation, we fire both errors. if case .failure(let privateKeyError) = self.connection.customPrivateKeyResult { context.fireErrorCaught(privateKeyError) } // If there's a failed custom context operation, we fire both errors. if let customContextError = self.connection.customContextManager?.loadContextError { context.fireErrorCaught(customContextError) } context.fireErrorCaught(NIOSSLError.handshakeFailed(err)) channelClose(context: context, reason: NIOSSLError.handshakeFailed(err)) } } private func completeHandshake(context: ChannelHandlerContext) { writeDataToNetwork(context: context, promise: nil) // TODO(cory): This event should probably fire out of the BoringSSL info callback. let negotiatedProtocol = connection.getAlpnProtocol() context.fireUserInboundEventTriggered(TLSUserEvent.handshakeCompleted(negotiatedProtocol: negotiatedProtocol)) // We need to unbuffer any pending writes and reads. We will have pending writes if the user attempted to // write before we completed the handshake. We may also have pending reads if the user sent data immediately // after their FINISHED record. We decode the reads first, as those reads may trigger writes. self.doDecodeData(context: context) if let receiveBuffer = self.plaintextReadBuffer { self.doFlushReadData(context: context, receiveBuffer: receiveBuffer, readOnEmptyBuffer: false) } self.doUnbufferActions(context: context) } private func completedAdditionalPeerCertificateVerification(result: Result) { guard let context = self.storedContext else { // `self` may already be removed from the channel pipeline return } context.eventLoop.preconditionInEventLoop() switch self.state { case .idle, .handshaking, .active, .inputClosed, .outputClosed: preconditionFailure("invalid state \(self.state)") case .additionalVerification: switch result { case .failure(let error): // This counts as a failure. context.fireErrorCaught(error) channelClose(context: context, reason: error) case .success: state = .active completeHandshake(context: context) } case .unwrapping, .closing, .unwrapped, .closed: break // we are already about to close, we can safely ignore this event } } /// Attempt to perform a stage of orderly TLS shutdown. /// /// Orderly TLS shutdown requires each peer to send a TLS CloseNotify message. /// This message is a signal that the data being sent has been completely sent, /// without truncation. Where possible we attempt to perform an orderly shutdown, /// and so we will send a CloseNotify. We also try to wait for the remote peer to /// send a CloseNotify in response. This means we may call this multiple times, /// potentially writing our own CloseNotify each time. /// /// Once `state` has transitioned to `.closed`, further calls to this method will /// do nothing. private func doShutdownStep(context: ChannelHandlerContext) { if case .closed = self.state { return } let result = connection.doShutdown() var uncleanScheduledShutdown: Scheduled? let targetCompleteState: ConnectionState switch self.state { case .outputClosed: targetCompleteState = .outputClosed case .closing(let scheduledShutdown): uncleanScheduledShutdown = scheduledShutdown targetCompleteState = .closed case .unwrapping(let scheduledShutdown): uncleanScheduledShutdown = scheduledShutdown targetCompleteState = .unwrapped default: preconditionFailure("Shutting down in a non-shutting-down state") } switch result { case .incomplete: writeDataToNetwork(context: context, promise: nil) if case .outputClosed = targetCompleteState { self.state = targetCompleteState self.channelCloseOutput(context: context) } case .complete: uncleanScheduledShutdown?.cancel() self.state = targetCompleteState writeDataToNetwork(context: context, promise: nil) // TODO(cory): This should probably fire out of the BoringSSL info callback. context.fireUserInboundEventTriggered(TLSUserEvent.shutdownCompleted) switch targetCompleteState { case .outputClosed: /// No full channel close here. We expect users to invoke a full close even when the /// connection has been half-closed in one direction. /// Note: half closure for input and output results in a full close. self.channelCloseOutput(context: context) case .closed: self.channelClose(context: context, reason: NIOTLSUnwrappingError.closeRequestedDuringUnwrap) case .unwrapped: self.channelUnwrap(context: context) default: preconditionFailure("Cannot be in \(targetCompleteState) at this code point") } case .failed(let err): uncleanScheduledShutdown?.cancel() // TODO(cory): This should probably fire out of the BoringSSL info callback. context.fireErrorCaught(NIOSSLError.shutdownFailed(err)) channelClose(context: context, reason: NIOSSLError.shutdownFailed(err)) } } /// Creates a scheduled task to perform an unclean shutdown in event of a clean shutdown timing /// out. This task should be cancelled if the shutdown does not time out. private func scheduleTimedOutShutdown(context: ChannelHandlerContext) -> Scheduled { context.eventLoop.assumeIsolated().scheduleTask(in: self.shutdownTimeout) { switch self.state { case .inputClosed, .outputClosed, .idle, .handshaking, .additionalVerification, .active: preconditionFailure("Cannot schedule timed out shutdown on non-shutting down handler") case .closed, .unwrapped: // This means we raced with the shutdown completing. We just let this one go: do nothing. return case .closing: // We're closing, the only thing we do here is exit. self.state = .closed self.channelClose(context: context, reason: NIOSSLCloseTimedOutError()) case .unwrapping: // The user only wants us to error and unwrap, not to close. self.state = .unwrapped self.channelUnwrap(context: context, failedWithError: NIOSSLCloseTimedOutError()) } } } /// Loops over the `SSL` object, decoding encrypted application data until there is /// no more available. private func doDecodeData(context: ChannelHandlerContext) { guard var receiveBuffer = self.plaintextReadBuffer else { preconditionFailure("didDecodeData called without handlerAdded firing.") } // We nil the read buffer here. This is done on purpose: we do it to ensure // that we don't have two references to the buffer, otherwise readDataFromNetwork // will trigger a CoW every time. We need to put this back on every exit from this // function, or before any call-out, to avoid re-entrancy issues. We validate the // requirement for this being non-nil on exit at the very least. self.plaintextReadBuffer = nil defer { assert(self.plaintextReadBuffer != nil) } readLoop: while true { let result = connection.readDataFromNetwork(outputBuffer: &receiveBuffer) switch result { case .complete: // Good read. Keep going continue readLoop case .incomplete: self.plaintextReadBuffer = receiveBuffer break readLoop case .failed(BoringSSLError.zeroReturn): let allowRemoteHalfClosure = self.getAllowRemoteHalfClosureFromChannel(context: context) switch self.state { case .idle, .handshaking, .additionalVerification: preconditionFailure("Should not get zeroReturn in \(self.state)") case .closed, .unwrapped: // This is an unexpected place to be, but it's not totally impossible. Assume this // is the result of a wonky I/O pattern and just ignore it. self.plaintextReadBuffer = receiveBuffer break readLoop case .active, .outputClosed: if allowRemoteHalfClosure == false { self.state = .closing(self.scheduleTimedOutShutdown(context: context)) } case .unwrapping, .closing, .inputClosed: break } // This is a clean EOF: we can just start doing our own clean shutdown. self.doFlushReadData(context: context, receiveBuffer: receiveBuffer, readOnEmptyBuffer: false) if allowRemoteHalfClosure { switch self.state { case .active, .unwrapping: self.state = .inputClosed case .outputClosed: // Wanting to close input when output is already closed, // escalate to full shutdown self.close(context: context, mode: .all, promise: nil) default: break } context.fireUserInboundEventTriggered(ChannelEvent.inputClosed) } else { self.doShutdownStep(context: context) } writeDataToNetwork(context: context, promise: nil) break readLoop case .failed(let err): self.state = .closed self.plaintextReadBuffer = receiveBuffer context.fireErrorCaught(err) channelClose(context: context, reason: err) break readLoop } } } /// Checks if the `allowRemoteHalfClosure` channel option is set. private func getAllowRemoteHalfClosureFromChannel(context: ChannelHandlerContext) -> Bool { var halfClosureAllowed = false if let syncOptions = context.channel.syncOptions { if let result = try? syncOptions.getOption(ChannelOptions.allowRemoteHalfClosure) { halfClosureAllowed = result } } return halfClosureAllowed } /// Flushes any pending read plaintext. This is called whenever we hit a flush /// point for reads: either channelReadComplete, or we receive a CLOSE_NOTIFY. /// /// This function will always set the empty buffer back to be the plaintext read buffer. /// Do not do this in your own code. private func doFlushReadData(context: ChannelHandlerContext, receiveBuffer: ByteBuffer, readOnEmptyBuffer: Bool) { defer { // All exits from this function must restore the plaintext read buffer. assert(self.plaintextReadBuffer != nil) } // We only want to fire channelReadComplete in a situation where we have actually sent the user some data, otherwise // we'll be confusing the hell out of them. if receiveBuffer.writerIndex > receiveBuffer.readerIndex { // We need to be very careful here: we must not call out before we fix up our local view of this buffer. In this // case, we're going to set the indices back to where they were. In this case we are deliberately *not* calling // clear(), as we don't want to trigger a CoW for our own local refs. var ourNewBuffer = receiveBuffer ourNewBuffer.moveReaderIndex(to: 0) ourNewBuffer.moveWriterIndex(to: 0) self.plaintextReadBuffer = ourNewBuffer // Ok, we can now pass the receive buffer on and fire channelReadComplete. context.fireChannelRead(self.wrapInboundOut(receiveBuffer)) context.fireChannelReadComplete() } else if readOnEmptyBuffer { // We didn't deliver data, but the channel is still active. If this channel has got // autoread turned off then we should call read again, because otherwise the user // will never see any result from their read call. // // In the unlikely event we couldn't get the answer, we assume auto-read is on. self.plaintextReadBuffer = receiveBuffer do { let autoRead = try context.channel.syncOptions?.getOption(ChannelOptions.autoRead) ?? true if !autoRead { context.read() } } catch { context.fireErrorCaught(error) } } else { // Regardless of what happens here, we need to put the plaintext read buffer back. Very important. self.plaintextReadBuffer = receiveBuffer } } /// Encrypts application data and writes it to the channel. /// /// This method always flushes. For this reason, it should only ever be called when a flush /// is intended. private func writeDataToNetwork(context: ChannelHandlerContext, promise: EventLoopPromise?) { // There may be no data to write, in which case we can just exit early. guard let dataToWrite = connection.getDataForNetwork() else { if let promise = promise { // If we have a promise, we need to enforce ordering so we issue a zero-length write that // the event loop will have to handle. let buffer = context.channel.allocator.buffer(capacity: 0) context.writeAndFlush(wrapInboundOut(buffer), promise: promise) } return } context.writeAndFlush(self.wrapInboundOut(dataToWrite), promise: promise) } /// Simply calls `ChannelHandlerContext.close(mode: .output)` with /// any promise we may have already been given. private func channelCloseOutput(context: ChannelHandlerContext) { let closeOutputPromise = self.closeOutputPromise self.closeOutputPromise = nil context.close(mode: .output, promise: closeOutputPromise) } /// Close the underlying channel. /// /// This method does not perform any kind of I/O. Instead, it simply calls ChannelHandlerContext.close with /// any promise we may have already been given. It also transitions our state into closed. This should only be /// used to clean up after an error, or to perform the final call to close after a clean shutdown attempt. private func channelClose(context: ChannelHandlerContext, reason: Error) { state = .closed let shutdownPromise = self.shutdownPromise self.shutdownPromise = nil let closePromise = self.closePromise self.closePromise = nil shutdownPromise?.fail(reason) context.close(promise: closePromise) } private func channelUnwrap(context: ChannelHandlerContext, failedWithError error: Error? = nil) { assert(self.closePromise == nil) self.state = .unwrapped let shutdownPromise = self.shutdownPromise self.shutdownPromise = nil // We create a promise here to make sure we operate in the special magic state // where we are not in the pipeline any more, but we still have a valid context. let removalPromise: EventLoopPromise = context.eventLoop.makePromise() let removalFuture = removalPromise.futureResult.assumeIsolated().map { // Now drop all actions. self.discardBufferedActions(reason: NIOTLSUnwrappingError.unflushedWriteOnUnwrap) if let unconsumedData = self.connection.extractUnconsumedData() { context.fireChannelRead(self.wrapInboundOut(unconsumedData)) } if let error = error { context.fireErrorCaught(error) } } if let promise = shutdownPromise { removalFuture.whenComplete { result in switch (result, error) { case (.success, .none): promise.succeed(()) case (.success, .some(let error)): promise.fail(error) case (.failure(let failure), _): promise.fail(failure) } } removalFuture.nonisolated().cascade(to: promise) } // Ok, we've unwrapped. Let's get out of the channel. context.channel.pipeline.syncOperations.removeHandler(context: context, promise: removalPromise) } /// Validates the hostname from the certificate against the hostname provided by /// the user, assuming one has been provided at all. private func validateHostname(context: ChannelHandlerContext) throws { guard connection.validateHostnames else { return } // If there is no remote address, something weird is happening here. We can't // validate a certificate without it, so bail. guard let ipAddress = context.channel.remoteAddress else { throw NIOSSLError.cannotFindPeerIP } try connection.validateHostname(address: ipAddress) } } @available(*, unavailable) extension NIOSSLHandler: Sendable {} extension NIOSSLHandler { /// Variable that can be queried during the connection lifecycle to grab the ``TLSVersion`` used on this connection. /// /// This variable **is not thread-safe**: you **must** call it from the correct event /// loop thread. public var tlsVersion: TLSVersion? { self.connection.getTLSVersionForConnection() } /// Return a NIOSSLCertificate from the verified peer after handshake has completed. /// /// Similar to getTlsVersionForConnection this **is not thread safe**. public var peerCertificate: NIOSSLCertificate? { self.connection.getPeerCertificate() } /// Return the *validated* certificate chain from the verified peer after handshake has completed. /// /// This property will only contain a value if the handler was initialized with a custom certificate verification /// callback (``NIOSSLCustomVerificationCallbackWithMetadata``) *and* if the promise in the callback was /// successfully completed with ``NIOSSLVerificationResultWithMetadata/certificateVerified(_:)`` (containing a /// ``VerificationMetadata`` instance with a ``ValidatedCertificateChain``). If either of these conditions are not /// met, this property will be `nil`. /// /// To create a `NIOSSLClientHandler` handler with a custom verification callback that can return the certificate /// chain, use: /// - ``NIOSSLClientHandler/init(context:serverHostname:customVerificationCallbackWithMetadata:)`` or /// - ``NIOSSLClientHandler/init(context:serverHostname:configuration:customVerificationCallbackWithMetadata:)`` /// For `NIOSSLServerHandler`, use: /// - ``NIOSSLServerHandler/init(context:customVerificationCallbackWithMetadata:)`` or /// - ``NIOSSLServerHandler/init(context:configuration:customVerificationCallbackWithMetadata:)`` /// public var peerValidatedCertificateChain: ValidatedCertificateChain? { self.connection.customVerificationManager?.verificationMetadata?.validatedCertificateChain } } extension Channel { /// API to extract the ``TLSVersion`` from off the `Channel`. public func nioSSL_tlsVersion() -> EventLoopFuture { self.pipeline.handler(type: NIOSSLHandler.self).map { $0.tlsVersion } } /// API to retrieve the verified NIOSSLCertificate of the peer off the 'Channel' public func nioSSL_peerCertificate() -> EventLoopFuture { self.pipeline.handler(type: NIOSSLHandler.self).map { $0.peerCertificate } } /// API to retrieve the *validated* certificate chain of the peer. See ``NIOSSLHandler/peerValidatedCertificateChain``. public func nioSSL_peerValidatedCertificateChain() -> EventLoopFuture { self.pipeline.handler(type: NIOSSLHandler.self).map { $0.peerValidatedCertificateChain } } } extension ChannelPipeline.SynchronousOperations { /// API to query the ``TLSVersion`` directly from the `ChannelPipeline`. public func nioSSL_tlsVersion() throws -> TLSVersion? { let handler = try self.handler(type: NIOSSLHandler.self) return handler.tlsVersion } /// API to retrieve the verified NIOSSLCertificate of the peer directly from the 'ChannelPipeline' public func nioSSL_peerCertificate() throws -> NIOSSLCertificate? { let handler = try self.handler(type: NIOSSLHandler.self) return handler.peerCertificate } /// API to retrieve the *validated* certificate chain of the peer. See ``NIOSSLHandler/peerValidatedCertificateChain``. public func nioSSL_peerValidatedCertificateChain() throws -> ValidatedCertificateChain? { let handler = try self.handler(type: NIOSSLHandler.self) return handler.peerValidatedCertificateChain } } // MARK:- Extension APIs for users. extension NIOSSLHandler { /// Called to instruct this handler to perform an orderly TLS shutdown and then remove itself /// from the pipeline. This will leave the connection established, but remove the TLS wrapper /// from it. /// /// This will send a `CLOSE_NOTIFY` and wait for the corresponding `CLOSE_NOTIFY`. When that next /// `CLOSE_NOTIFY` is received, this handler will pass on all pending writes and remove itself /// from the channel pipeline. If the shutdown times out then an error will fire down the /// pipeline, this handler will remove itself from the pipeline, but the channel will not be /// automatically closed. /// /// This function **is not thread-safe**: you **must** call it from the correct event /// loop thread. /// /// - parameters: /// - promise: An `EventLoopPromise` that will be completed when the unwrapping has /// completed. public func stopTLS(promise: EventLoopPromise?) { switch self.state { case .unwrapping, .closing: // We're shutting down here. Nothing has to be done, but we should keep track of this promise. if let promise = promise, let shutdownPromise = self.shutdownPromise { shutdownPromise.futureResult.cascade(to: promise) } else if let promise = promise { self.shutdownPromise = promise } case .idle: // We've never activated, it's easy to remove TLS from a connection that never had it. guard let storedContext = self.storedContext else { promise?.fail(NIOTLSUnwrappingError.invalidInternalState) return } self.state = .unwrapped self.shutdownPromise = promise self.channelUnwrap(context: storedContext) case .handshaking, .active, .inputClosed, .outputClosed, .additionalVerification: // Time to try to strip TLS. guard let storedContext = self.storedContext else { promise?.fail(NIOTLSUnwrappingError.invalidInternalState) return } self.state = .unwrapping(self.scheduleTimedOutShutdown(context: storedContext)) self.shutdownPromise = promise self.doShutdownStep(context: storedContext) case .unwrapped: // We are already unwrapped. Succeed the promise, do nothing. promise?.succeed(()) case .closed: promise?.fail(NIOTLSUnwrappingError.alreadyClosed) } } } // MARK: Code that handles buffering/unbuffering actions. extension NIOSSLHandler { private typealias BufferedWrite = (data: ByteBuffer, promise: EventLoopPromise?) private enum BufferedAction { case closeOutput case write(BufferedWrite) } private func bufferWrite(data: ByteBuffer, promise: EventLoopPromise?) { switch self.state { case .idle, .handshaking, .additionalVerification, .active, .unwrapping, .closing, .unwrapped, .inputClosed: () case .outputClosed: promise?.fail(ChannelError.outputClosed) return case .closed: promise?.fail(ChannelError.ioOnClosedChannel) return } var data = data // Here we guard against the possibility that any of these writes are larger than CInt.max. // This is very unusual but it can happen. To work around it, we just pretend that there were // multiple writes. // // During the short writes we set the promise to `nil` to make sure they only arrive at the end. // Note that we make sure that there's always a single write, at the end, that holds the promise. while data.readableBytes > self.maxWriteSize, let slice = data.readSlice(length: self.maxWriteSize) { bufferedActions.append(.write((data: slice, promise: nil))) } assert(data.readableBytes <= maxWriteSize) bufferedActions.append(.write((data: data, promise: promise))) } private func bufferFlush() { bufferedActions.mark() } private func discardBufferedActions(reason: Error) { while let bufferedAction = self.bufferedActions.popFirst() { if case .write(let bufferedWrite) = bufferedAction { bufferedWrite.promise?.fail(reason) } } } private func doUnbufferActions(context: ChannelHandlerContext) { // Return early if the user hasn't called flush. guard bufferedActions.hasMark else { return } // These are some annoying variables we use to persist state across invocations of // our closures. A better version of this code might be able to simplify this somewhat. var promises: [EventLoopPromise] = [] do { var invokeCloseOutput = false var bufferedActionsLoopCount = 0 bufferedActionsLoop: while self.bufferedActions.hasMark, bufferedActionsLoopCount < 1000 { bufferedActionsLoopCount += 1 var didWrite = false writeLoop: while self.bufferedActions.hasMark { let element = self.bufferedActions.first! switch element { case .write(let bufferedWrite): var data = bufferedWrite.data let writeSuccessful = try self._encodeSingleWrite(buf: &data) if writeSuccessful { didWrite = true if let promise = bufferedWrite.promise { promises.append(promise) } _ = self.bufferedActions.removeFirst() } else { // The write into BoringSSL unsuccessful. Break the write loop so any // data is written to the network before resuming. break writeLoop } case .closeOutput: invokeCloseOutput = true _ = self.bufferedActions.removeFirst() break writeLoop } } // If we got this far and did a write, we should shove the data out to the // network. if didWrite { let ourPromise: EventLoopPromise? = promises.flattenPromises(on: context.eventLoop) self.writeDataToNetwork(context: context, promise: ourPromise) } // We detected a .closeOutput action in our action buffer. This means we // close the output after we have written all pending writes. if invokeCloseOutput { self.state = .outputClosed self.doShutdownStep(context: context) self.discardBufferedActions(reason: ChannelError.outputClosed) break bufferedActionsLoop } } // We spun the outer loop too many times, something isn't right so let's bail out // instead of looping any longer. if bufferedActionsLoopCount >= 1000 { assertionFailure( "\(#function) looped too many times, please file a GitHub issue against swift-nio-ssl." ) throw NIOSSLExtraError.noForwardProgress } } catch { // We encountered an error, it's cleanup time. Close ourselves down. channelClose(context: context, reason: error) // Fail any writes we've previously encoded but not flushed. for promise in promises { promise.fail(error) } // Fail close output promise if present let closeOutputPromise = self.closeOutputPromise self.closePromise = nil closeOutputPromise?.fail(error) // Fail everything else. self.discardBufferedActions(reason: error) } } /// Given a ByteBuffer to encode, passes it to BoringSSL and handles the result. private func _encodeSingleWrite(buf: inout ByteBuffer) throws -> Bool { let result = self.connection.writeDataToNetwork(&buf) switch result { case .complete: return true case .incomplete: // Ok, we can't write. Let's stop. return false case .failed(let err): // Once a write fails, all writes must fail. This includes prior writes // that successfully made it through BoringSSL. throw err } } } extension Array where Element == EventLoopPromise { /// Given an array of promises, flattens it out to a single promise. /// If the array is empty, returns nil. fileprivate func flattenPromises(on loop: EventLoop) -> EventLoopPromise? { guard self.count > 0 else { return nil } let ourPromise = loop.makePromise(of: Void.self) // We don't use cascade here because cascade has to create one closure per // promise. We can do better by creating only a single closure that dispatches // the result to all promises. ourPromise.futureResult.whenComplete { result in switch result { case .success: for result in self { result.succeed(()) } case .failure(let error): for result in self { result.fail(error) } } } return ourPromise } } // MARK:- Code for handling asynchronous handshake resumption. extension NIOSSLHandler { internal func resumeHandshake() { guard let storedContext = self.storedContext else { // Oh well, the connection is dead. Do nothing. return } self.doHandshakeStep(context: storedContext) } } ================================================ FILE: Sources/NIOSSL/NIOSSLServerHandler.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore /// A channel handler that wraps a channel in TLS using NIOSSL. This /// handler can be used in channels that are acting as the server in /// the TLS dialog. For client connections, use the ``NIOSSLClientHandler``. public final class NIOSSLServerHandler: NIOSSLHandler { /// Construct a new ``NIOSSLServerHandler`` with the given `context`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. public convenience init(context: NIOSSLContext) { self.init( context: context, optionalCustomVerificationCallbackManager: nil, optionalAdditionalPeerCertificateVerificationCallback: nil ) } @available(*, deprecated, renamed: "init(context:customVerificationCallback:)") public init(context: NIOSSLContext, verificationCallback: NIOSSLVerificationCallback? = nil) throws { guard let connection = context.createConnection() else { fatalError("Failed to create new connection in NIOSSLContext") } connection.setAcceptState() if let verificationCallback = verificationCallback { connection.setVerificationCallback(verificationCallback) } super.init( connection: connection, shutdownTimeout: context.configuration.shutdownTimeout, additionalPeerCertificateVerificationCallback: nil, maxWriteSize: NIOSSLHandler.defaultMaxWriteSize, configuration: .init() ) } /// Construct a new ``NIOSSLServerHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - customVerificationCallback: A callback to use that will override NIOSSL's normal verification logic. /// /// If set, this callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed them. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - Note: Use ``init(context:customVerificationCallbackWithMetadata:)`` to provide a custom verification /// callback where the peer's *validated* certificate chain can be returned. This data can then be accessed from /// the handler. public convenience init( context: NIOSSLContext, customVerificationCallback: @escaping NIOSSLCustomVerificationCallback ) { self.init( context: context, optionalCustomVerificationCallbackManager: CustomVerifyManager(callback: customVerificationCallback), optionalAdditionalPeerCertificateVerificationCallback: nil ) } /// Construct a new ``NIOSSLServerHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - customVerificationCallbackWithMetadata: A callback to use that will override NIOSSL's normal verification /// logic. If validation is successful, the peer's validated certificate chain can be returned, and later /// accessed via ``NIOSSLHandler/peerValidatedCertificateChain``. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has /// ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - This callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed /// them. Therefore, a validated chain must be derived *within* this callback (potentially involving fetching /// additional intermediate certificates). The *validated* certificate chain returned in the promise result /// **must** be a verified path to a trusted root. Importantly, the certificates presented by the peer should /// not be assumed to be valid. public convenience init( context: NIOSSLContext, customVerificationCallbackWithMetadata: @escaping NIOSSLCustomVerificationCallbackWithMetadata ) { self.init( context: context, optionalCustomVerificationCallbackManager: CustomVerifyManager( callback: customVerificationCallbackWithMetadata ), optionalAdditionalPeerCertificateVerificationCallback: nil ) } /// Construct a new ``NIOSSLServerHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - customVerificationCallback: A callback to use that will override NIOSSL's normal verification logic. /// /// If set, this callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed them. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// - configuration: Configuration for this handler. /// /// - Note: Use ``init(context:configuration:customVerificationCallbackWithMetadata:)`` to provide a custom /// verification callback where the peer's *validated* certificate chain can be returned. This data can then be /// accessed from the handler. public convenience init( context: NIOSSLContext, customVerificationCallback: NIOSSLCustomVerificationCallback? = nil, configuration: Configuration ) { self.init( context: context, optionalCustomVerificationCallbackManager: customVerificationCallback.map(CustomVerifyManager.init), optionalAdditionalPeerCertificateVerificationCallback: nil, configuration: configuration ) } /// Construct a new ``NIOSSLServerHandler`` with the given `context` and a specific `serverHostname`. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use on this connection. /// - configuration: Configuration for this handler. /// - customVerificationCallbackWithMetadata: A callback to use that will override NIOSSL's normal verification /// logic. If validation is successful, the peer's validated certificate chain can be returned, and later /// accessed via ``NIOSSLHandler/peerValidatedCertificateChain``. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has /// ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - This callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed /// them. Therefore, a validated chain must be derived *within* this callback (potentially involving fetching /// additional intermediate certificates). The *validated* certificate chain returned in the promise result /// **must** be a verified path to a trusted root. Importantly, the certificates presented by the peer should /// not be assumed to be valid. public convenience init( context: NIOSSLContext, configuration: Configuration, customVerificationCallbackWithMetadata: @escaping NIOSSLCustomVerificationCallbackWithMetadata ) { self.init( context: context, optionalCustomVerificationCallbackManager: CustomVerifyManager( callback: customVerificationCallbackWithMetadata ), optionalAdditionalPeerCertificateVerificationCallback: nil, configuration: configuration ) } /// - warning: This API is not guaranteed to be stable and is likely to be changed without further notice, hence the underscore prefix. public static func _makeSSLServerHandler( context: NIOSSLContext, additionalPeerCertificateVerificationCallback: @escaping _NIOAdditionalPeerCertificateVerificationCallback ) -> Self { .init( context: context, optionalCustomVerificationCallbackManager: nil, optionalAdditionalPeerCertificateVerificationCallback: additionalPeerCertificateVerificationCallback ) } /// This exists to handle the explosion of initializers I got when I deprecated the first one. private init( context: NIOSSLContext, optionalCustomVerificationCallbackManager: CustomVerifyManager?, optionalAdditionalPeerCertificateVerificationCallback: _NIOAdditionalPeerCertificateVerificationCallback?, configuration: Configuration = .init() ) { guard let connection = context.createConnection() else { fatalError("Failed to create new connection in NIOSSLContext") } connection.setAcceptState() if let customVerificationCallbackManager = optionalCustomVerificationCallbackManager { connection.setCustomVerificationCallback(customVerificationCallbackManager) } super.init( connection: connection, shutdownTimeout: context.configuration.shutdownTimeout, additionalPeerCertificateVerificationCallback: optionalAdditionalPeerCertificateVerificationCallback, maxWriteSize: NIOSSLHandler.defaultMaxWriteSize, configuration: configuration ) } } // This conformance is technically redundant - Swift 6.2 compiler finally caught this #if compiler(<6.2) @available(*, unavailable) extension NIOSSLServerHandler: Sendable {} #endif ================================================ FILE: Sources/NIOSSL/ObjectIdentifier.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @_implementationOnly import CNIOBoringSSLShims /// A representation of an ASN.1 Object Identifier (OID) public struct NIOSSLObjectIdentifier { private enum Storage { final class Deallocator { var reference: OpaquePointer! init(takeOwnershipOf reference: OpaquePointer!) { self.reference = reference } deinit { CNIOBoringSSL_ASN1_OBJECT_free(self.reference) } } case owned(Deallocator) case borrowed(reference: OpaquePointer!, owner: AnyObject) init(takeOwnershipOf reference: OpaquePointer!) { self = .owned(.init(takeOwnershipOf: reference)) } init(borrowing reference: OpaquePointer!, owner: AnyObject) { self = .borrowed(reference: reference, owner: owner) } /// All operations accessing `reference` need to be implemented while guaranteeing that we still have a reference to the memory owner. /// Otherwise `reference` could already be freed. This would result in undefined behaviour as we access a dangling pointer. /// This method guarantees that `reference` is valid during execution of `body`. internal func withReference( _ body: (OpaquePointer?) throws -> Result ) rethrows -> Result { try withExtendedLifetime(self) { switch self { case .owned(let deallocator): return try body(deallocator.reference) case .borrowed(let reference, _): return try body(reference) } } } } private let storage: Storage /// Creates a Object Identifier (OID) from its textual dotted representation (e.g. `1.2.3`) /// /// - Parameter string: textual dotted representation of an OID public init?(_ string: String) { let result = string.withCString { string in // If no_name (the last parameter of CNIOBoringSSL_OBJ_txt2obj) is 0 then long names and // short names will be interpreted as well as numerical forms. // If no_name is 1 only the numerical form is acceptable. // source: https://www.openssl.org/docs/manmaster/man3/OBJ_txt2obj.html CNIOBoringSSL_OBJ_txt2obj(string, 1) } guard let reference = result else { return nil } self.storage = .init(takeOwnershipOf: reference) } /// Creates an Object Identifier (OID) from an OpenSSL reference. /// /// - Note: initialising an ``NIOSSLObjectIdentifier`` takes ownership of the reference and will free it after the reference count drops to zero /// - Parameter reference: reference to a valid OpenSSL OID aka OBJ internal init(takingOwnershipOf reference: OpaquePointer!) { self.storage = .init(takeOwnershipOf: reference) } /// Creates an Object Identifier (OID) from an OpenSSL reference. /// - Note: initialising an ``NIOSSLObjectIdentifier`` with *this* constructor does **not** take ownership of the memory. Instead ``NIOSSLObjectIdentifier`` keeps a reference to the owning object which it will retain for the lifetime of itself. /// - Parameters /// - reference: reference to a valid OpenSSL OID aka OBJ /// - owner: owner of the memory `reference` is pointing to which it will retain. internal init(borrowing reference: OpaquePointer!, owner: AnyObject) { self.storage = .init(borrowing: reference, owner: owner) } /// Creates a copy of an Object Identifier (OID) from an OpenSSL reference /// - Parameter reference: reference to a valid OpenSSL OID aka OBJ internal init(copyOf reference: OpaquePointer!) { self.init(takingOwnershipOf: CNIOBoringSSL_OBJ_dup(reference)) } } // NIOSSLObjectIdentifier is immutable and therefore Sendable extension NIOSSLObjectIdentifier: @unchecked Sendable {} extension NIOSSLObjectIdentifier: Equatable { public static func == (lhs: NIOSSLObjectIdentifier, rhs: NIOSSLObjectIdentifier) -> Bool { lhs.storage.withReference { lhsReference in rhs.storage.withReference { rhsReference in CNIOBoringSSL_OBJ_cmp(lhsReference, rhsReference) == 0 } } } } extension NIOSSLObjectIdentifier: Hashable { public func hash(into hasher: inout Hasher) { self.storage.withReference { reference in let length = CNIOBoringSSL_OBJ_length(reference) let data = CNIOBoringSSL_OBJ_get0_data(reference) let buffer = UnsafeRawBufferPointer(start: data, count: length) hasher.combine(bytes: buffer) } } } extension NIOSSLObjectIdentifier: LosslessStringConvertible { public var description: String { self.storage.withReference { reference in var failed = false let oid = String(customUnsafeUninitializedCapacity: 80) { buffer in // OBJ_obj2txt() is awkward and messy to use: it doesn't follow the convention of other OpenSSL functions where the buffer can be set to NULL to determine the amount of data that should be written. Instead buf must point to a valid buffer and buf_len should be set to a positive value. A buffer length of 80 should be more than enough to handle any OID encountered in practice. // source: https://linux.die.net/man/3/obj_obj2txt let result = buffer.withMemoryRebound(to: CChar.self) { buffer in // If no_name (the last argument of CNIOBoringSSL_OBJ_obj2txt) is 0 then // if the object has a long or short name then that will be used, // otherwise the numerical form will be used. // If no_name is 1 then the numerical form will always be used. // source: https://www.openssl.org/docs/manmaster/man3/OBJ_obj2txt.html CNIOBoringSSL_OBJ_obj2txt(buffer.baseAddress, Int32(buffer.count), reference, 1) } guard result >= 0 else { // result of -1 indicates an error failed = true return 0 } return Int(result) } guard !failed else { return "failed to convert OID to string" } return oid } } } ================================================ FILE: Sources/NIOSSL/PosixPort.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore // This file contains a version of the SwiftNIO Posix enum. This is necessary // because SwiftNIO's version is internal. Our version exists for the same reason: // to ensure errno is captured correctly when doing syscalls, and that no ARC traffic // can happen inbetween that *could* change the errno value before we were able to // read it. // // The code is an exact port from SwiftNIO, so if that version ever becomes public we // can lift anything missing from there and move it over without change. #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Android) import Android #else #error("unsupported os") #endif #if os(Android) internal typealias FILEPointer = OpaquePointer #else internal typealias FILEPointer = UnsafeMutablePointer #endif private let sysFopen = fopen private let sysMlock = mlock private let sysMunlock = munlock private let sysFclose = fclose private let sysStat = { @Sendable in stat($0, $1) } private let sysLstat = lstat private let sysReadlink = readlink // MARK:- Copied code from SwiftNIO private func isUnacceptableErrno(_ code: CInt) -> Bool { switch code { case EFAULT, EBADF: return true default: return false } } // Sorry, we really try hard to not use underscored attributes. In this case however we seem to break the inlining threshold which makes a system call take twice the time, ie. we need this exception. @inline(__always) internal func wrapSyscall(where function: String = #function, _ body: () throws -> T) throws -> T { while true { let res = try body() if res == -1 { let err = errno if err == EINTR { continue } assert(!isUnacceptableErrno(err), "unacceptable errno \(err) \(strerror(err)!)") throw IOError(errnoCode: err, reason: function) } return res } } // Sorry, we really try hard to not use underscored attributes. In this case however we seem to break the inlining threshold which makes a system call take twice the time, ie. we need this exception. @inline(__always) internal func wrapErrorIsNullReturnCall( errorReason: @autoclosure () -> String = #function, _ body: () throws -> T? ) throws -> T { while true { guard let res = try body() else { let err = errno if err == EINTR { continue } assert(!isUnacceptableErrno(err), "unacceptable errno \(err) \(strerror(err)!)") throw IOError(errnoCode: err, reason: errorReason()) } return res } } // MARK:- Our functions internal enum Posix { @inline(never) internal static func fopen(file: String, mode: String) throws -> FILEPointer { try file.withCString { fileCString in try wrapErrorIsNullReturnCall(errorReason: "fopen(file: \"\(file)\", mode: \"\(mode)\")") { sysFopen(fileCString, mode) } } } @inline(never) internal static func fclose(file: FILEPointer) throws -> CInt { try wrapSyscall { sysFclose(file) } } @inline(never) internal static func readlink( path: UnsafePointer, buf: UnsafeMutablePointer, bufSize: Int ) throws -> Int { try wrapSyscall { sysReadlink(path, buf, bufSize) } } @inline(never) @discardableResult internal static func stat(path: UnsafePointer, buf: UnsafeMutablePointer) throws -> CInt { try wrapSyscall { sysStat(path, buf) } } @inline(never) @discardableResult internal static func lstat(path: UnsafePointer, buf: UnsafeMutablePointer) throws -> Int32 { try wrapSyscall { sysLstat(path, buf) } } @inline(never) @discardableResult internal static func mlock(addr: UnsafeRawPointer, len: size_t) throws -> CInt { try wrapSyscall { sysMlock(addr, len) } } @inline(never) @discardableResult internal static func munlock(addr: UnsafeRawPointer, len: size_t) throws -> CInt { try wrapSyscall { sysMunlock(addr, len) } } } ================================================ FILE: Sources/NIOSSL/PrivacyInfo.xcprivacy ================================================ NSPrivacyTracking NSPrivacyAccessedAPITypes NSPrivacyCollectedDataTypes NSPrivacyTrackingDomains ================================================ FILE: Sources/NIOSSL/SSLCallbacks.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Bionic) import Bionic #else #error("unsupported os") #endif /// The result of an attempt to verify an X.509 certificate. public enum NIOSSLVerificationResult: Sendable { /// The certificate was successfully verified. case certificateVerified /// The certificate was not verified. case failed internal init(fromBoringSSLPreverify preverify: CInt) { switch preverify { case 1: self = .certificateVerified case 0: self = .failed default: preconditionFailure("Invalid preverify value: \(preverify)") } } } /// The result of an attempt to verify an X.509 certificate, with associated metadata if the certificate was successfully verified. public enum NIOSSLVerificationResultWithMetadata: Sendable, Hashable { /// The certificate was successfully verified; the associated value contains metadata captured during verification. case certificateVerified(VerificationMetadata) /// The certificate was not verified. case failed } /// The metadata captured during the verification of an X.509 certificate. public struct VerificationMetadata: Sendable, Hashable { /// A container for the validated certificate chain: an array of certificates forming a verified and ordered chain /// of trust, starting from the peer's leaf certificate to a trusted root certificate. public var validatedCertificateChain: ValidatedCertificateChain? /// Creates an instance with the peer's *validated* certificate chain. /// /// - Parameter validatedCertificateChain: An optional *validated* certificate chain. If provided, it must **only** /// contain the **validated** chain of trust that was built and verified from the certificates presented by the peer. public init(_ validatedCertificateChain: ValidatedCertificateChain?) { self.validatedCertificateChain = validatedCertificateChain } } /// A custom verification callback. /// /// This verification callback is usually called more than once per connection, as it is called once /// per certificate in the peer's complete certificate chain (including the root CA). The calls proceed /// from root to leaf, ending with the peer's leaf certificate. Each time it is invoked with 2 arguments: /// /// 1. The result of the BoringSSL verification for this certificate /// 2. The ``NIOSSLCertificate`` for this level of the chain. /// /// Please be cautious with calling out from this method. This method is always invoked on the event loop, /// so you must not block or wait. It is not possible to return an `EventLoopFuture` from this method, as it /// must not block or wait. Additionally, this method must take care to ensure that it does not cause any /// ChannelHandler to recursively call back into the ``NIOSSLHandler`` that triggered it, as making re-entrant /// calls into BoringSSL is not supported by SwiftNIO and leads to undefined behaviour. /// /// In general, the only safe thing to do here is to either perform some cryptographic operations, to log, /// or to store the ``NIOSSLCertificate`` somewhere for later consumption. The easiest way to be sure that the /// ``NIOSSLCertificate`` is safe to consume is to wait for a user event that shows the handshake as completed, /// or for channelInactive. /// /// > Warning: This callback uses the old-style OpenSSL callback behaviour and is excessively complex to program with. /// Instead, prefer using the NIOSSLCustomVerificationCallback style which receives the entire trust chain at once, /// and also supports asynchronous certificate verification. public typealias NIOSSLVerificationCallback = (NIOSSLVerificationResult, NIOSSLCertificate) -> NIOSSLVerificationResult /// A custom verification callback that allows completely overriding the certificate verification logic of BoringSSL. /// /// This verification callback is called no more than once per connection attempt. It is invoked with two arguments: /// /// 1. The certificate chain presented by the peer, in the order the peer presented them (with the first certificate /// being the leaf certificate presented by the peer). /// 2. An `EventLoopPromise` that must be completed to signal the result of the verification. /// /// Please be cautious with calling out from this method. This method is always invoked on the event loop, /// so you must not block or wait. However, you may perform asynchronous work by leaving the event loop context: /// when the verification is complete you must complete the provided `EventLoopPromise`. /// /// This method must take care to ensure that it does not cause any `ChannelHandler` to recursively call back into /// the ``NIOSSLHandler`` that triggered it, as making re-entrant calls into BoringSSL is not supported by SwiftNIO and /// leads to undefined behaviour. It is acceptable to leave the event loop context and then call into the ``NIOSSLHandler``, /// as this will not be re-entrant. /// /// - Warning: Note that setting this callback will override _all_ verification logic that BoringSSL provides. public typealias NIOSSLCustomVerificationCallback = ([NIOSSLCertificate], EventLoopPromise) -> Void /// A custom verification callback that allows completely overriding the certificate verification logic of BoringSSL. /// The only difference between this callback and ``NIOSSLCustomVerificationCallback`` is that this callback allows /// the peer's validated certificate chain to be returned. /// /// This verification callback is called no more than once per connection attempt. It is invoked with two arguments: /// /// 1. The certificate chain presented by the peer, in the order the peer presented them (with the first certificate /// being the leaf certificate presented by the peer). /// 2. An `EventLoopPromise` that must be completed to signal the result of the verification. The promise must be /// fulfilled with a ``NIOSSLVerificationResultWithMetadata`` value which contains the validated chain. /// /// Please be cautious with calling out from this method. This method is always invoked on the event loop, /// so you must not block or wait. However, you may perform asynchronous work by leaving the event loop context: /// when the verification is complete you must complete the provided `EventLoopPromise`. /// /// This method must take care to ensure that it does not cause any `ChannelHandler` to recursively call back into /// the ``NIOSSLHandler`` that triggered it, as making re-entrant calls into BoringSSL is not supported by SwiftNIO and /// leads to undefined behaviour. It is acceptable to leave the event loop context and then call into the ``NIOSSLHandler``, /// as this will not be re-entrant. /// /// - Warning: Setting this callback will override _all_ verification logic that BoringSSL provides. Therefore, a /// validated chain must be derived *within* this callback (potentially involving fetching additional intermediate /// certificates). The *validated* certificate chain returned in the promise result **must** be a verified path /// to a trusted root. Importantly, the certificates presented by the peer should not be assumed to be valid. public typealias NIOSSLCustomVerificationCallbackWithMetadata = ( [NIOSSLCertificate], EventLoopPromise ) -> Void /// A custom verification callback that allows additional peer certificate verification logic after the logic of BoringSSL has completed successfully. /// /// It is invoked with two arguments: /// 1. The verified leaf certificate from the peer certificate chain /// 2. The channel to which the certificate belongs /// /// The handshake will only succeed if the returned promise completes successfully. /// /// - warning: This API is not guaranteed to be stable and is likely to be changed without further notice, hence the underscore prefix. public typealias _NIOAdditionalPeerCertificateVerificationCallback = (NIOSSLCertificate, Channel) -> EventLoopFuture< Void > /// A callback that can be used to implement `SSLKEYLOGFILE` support. /// /// Wireshark can decrypt packet captures that contain encrypted TLS connections if they have access to the /// session keys used to perform the encryption. These keys are normally stored in a file that has a specific /// file format. This callback is the low-level primitive that can be used to write such a file. /// /// When set, this callback will be invoked once per secret. The provided `ByteBuffer` will contain the bytes /// that need to be written into the file, including the newline character. /// /// - warning: Please be aware that enabling support for `SSLKEYLOGFILE` through this callback will put the secrecy of /// your connections at risk. You should only do so when you are confident that it will not be possible to /// extract those secrets unnecessarily. /// public typealias NIOSSLKeyLogCallback = @Sendable (ByteBuffer) -> Void /// An object that provides helpers for working with a NIOSSLKeyLogCallback internal struct KeyLogCallbackManager { private var callback: NIOSSLKeyLogCallback init(callback: @escaping NIOSSLKeyLogCallback) { self.callback = callback } } extension KeyLogCallbackManager { /// Called to log a string to the user. func log(_ stringPointer: UnsafePointer) { let len = strlen(stringPointer) // We don't cache this because `log` can be called from arbitrary threads concurrently. var scratchBuffer = ByteBufferAllocator().buffer(capacity: len + 1) let bufferPointer = UnsafeRawBufferPointer(start: stringPointer, count: Int(len)) scratchBuffer.writeBytes(bufferPointer) scratchBuffer.writeInteger(UInt8(ascii: "\n")) self.callback(scratchBuffer) } } /// PSK Server Context provided to the callback. public struct PSKServerContext: Sendable, Hashable { /// Optional identity hint provided to the client by the server. public let hint: String? /// Identity provided by the client to the server. public let clientIdentity: String /// Maximum length of the returned PSK. public let maxPSKLength: Int /// Constructs a ``PSKServerContext``. /// /// - parameter hint: Optional identity hint provided to the client. /// - parameter clientIdentity: Client identity received from the client. /// - parameter maxPSKLength: Maximum possible length of the Pre Shared Key. public init(hint: String?, clientIdentity: String, maxPSKLength: Int) { self.hint = hint self.clientIdentity = clientIdentity self.maxPSKLength = maxPSKLength } } /// PSK Client Context provided to the callback. public struct PSKClientContext: Sendable, Hashable { /// Optional identity hint provided by the server to the client. public let hint: String? /// Maximum length of the returned PSK. public let maxPSKLength: Int /// Constructs a ``PSKClientContext``. /// /// - parameter hint: Optional identity hint provided by the server. /// - parameter maxPSKLength: Maximum possible length of the Pre Shared Key. public init(hint: String?, maxPSKLength: Int) { self.hint = hint self.maxPSKLength = maxPSKLength } } /// PSK Server Identity response type used in the callback. public struct PSKServerIdentityResponse: Sendable { /// The negotiated PSK. public var key: NIOSSLSecureBytes /// Constructs a ``PSKServerIdentityResponse``. /// /// - parameter key: The negotiated PSK. public init(key: NIOSSLSecureBytes) { self.key = key } } /// PSK Client Identity response type used in the callback. public struct PSKClientIdentityResponse: Sendable { /// The negotiated PSK. public var key: NIOSSLSecureBytes /// The identity of the PSK. public var identity: String /// Constructs a ``PSKClientIdentityResponse``. /// /// - parameter key: The negotiated PSK. /// - parameter identity: The identity of the PSK. public init(key: NIOSSLSecureBytes, identity: String) { self.key = key self.identity = identity } } /// A structure representing values from client extensions in the SSL/TLS handshake. /// /// This struct contains values obtained from the client hello message extensions during the TLS handshake process and /// can be manipulated or introspected by the `NIOSSLContextCallback` to alter the TLS handshake behaviour dynamically /// based on these values. public struct NIOSSLClientExtensionValues: Hashable, Sendable { /// The hostname value from the Server Name Indication (SNI) extension. /// /// This value, if available, indicates the requested server hostname by the client. /// In a context where a service is handling multiple hostnames (virtual hosts, for example), /// this value could be used to decide which SSLContext to use for the handshake. public var serverHostname: String? /// Initializes a new `NIOSSLClientExtensionValues` struct. /// /// - parameter serverHostname: The hostname value from the SNI extension. public init(serverHostname: String?) { self.serverHostname = serverHostname } } /// A structure representing changes to the SSL/TLS configuration that can be applied /// after the client hello message extensions have been processed. public struct NIOSSLContextConfigurationOverride: Sendable { /// The new certificate chain to use for the handshake. public var certificateChain: [NIOSSLCertificateSource]? /// The new private key to use for the handshake. public var privateKey: NIOSSLPrivateKeySource? public init() {} } extension NIOSSLContextConfigurationOverride { /// Return inside `NIOSSLContextCallback` when there are no changes to be made public static let noChanges = Self() } /// A callback that can used to support multiple or dynamic TLS hosts. /// /// When set, this callback will be invoked once per TLS hello. The provided `NIOSSLClientExtensionValues` will contain the /// host name indicated in the TLS client hello. /// /// Within this callback, the user can create and return a new `NIOSSLContextConfigurationOverride` for the given host, /// and the delta will be applied to the current handshake configuration. /// public typealias NIOSSLContextCallback = @Sendable ( NIOSSLClientExtensionValues, EventLoopPromise ) -> Void /// A struct that provides helpers for working with a NIOSSLContextCallback. internal struct CustomContextManager: Sendable { private let callback: NIOSSLContextCallback private var state: State init(callback: @escaping NIOSSLContextCallback) { self.callback = callback self.state = .notStarted } } extension CustomContextManager { private enum State { case notStarted case pendingResult case complete(Result) } } extension CustomContextManager { internal var loadContextError: (any Error)? { switch self.state { case .complete(.failure(let error)): return error default: return nil } } } extension CustomContextManager { mutating func loadContext(ssl: OpaquePointer) -> Result? { switch state { case .pendingResult: // In the pending case we return nil return nil case .complete(let result): // In the complete we can return our result return result case .notStarted: // Load the attached connection so we can resume handshake when future resolves let connection = SSLConnection.loadConnectionFromSSL(ssl) guard let eventLoop = connection.eventLoop else { preconditionFailure( """ SSL_CTX_set_cert_cb was executed without an event loop assigned to the connection. This should not be possible, please file an issue. """ ) } // Construct extension values to be passed to callback let cServerHostname = CNIOBoringSSL_SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name) let serverHostname = cServerHostname.map { String(cString: $0) } let values = NIOSSLClientExtensionValues(serverHostname: serverHostname) // Before invoking the user callback we can update our state to pending self.state = .pendingResult // We're responsible for creating the promise and the user provided callback will fulfill it let promise = eventLoop.makePromise(of: NIOSSLContextConfigurationOverride.self) self.callback(values, promise) promise.futureResult.assumeIsolated().whenComplete { result in // Ensure we execute any completion on the next event loop tick // This ensures that we suspend before calling resume eventLoop.assumeIsolated().execute { connection.customContextManager?.state = .complete(result) connection.parentHandler?.resumeHandshake() } } return nil } } mutating func setLoadContextError(_ error: any Error) { self.state = .complete(.failure(error)) } } /// The callback used for providing a PSK on the client side. /// /// The callback is invoked on the event loop with the PSK hint. This callback must complete synchronously: it cannot return a future. /// Additionally, as it is invoked on the NIO event loop, it is not possible for this to perform any I/O. As a result, lookups must be quick. public typealias NIOPSKClientIdentityCallback = @Sendable (String) throws -> PSKClientIdentityResponse /// The callback used for providing a PSK on the client side. /// /// The callback is invoked on the event loop with a PSK context. /// The context include the optional hint provided by the server. /// This callback must complete synchronously: it cannot return a future. /// Additionally, as it is invoked on the NIO event loop, it is not possible for this to perform any I/O. As a result, lookups must be quick. public typealias NIOPSKClientIdentityProvider = @Sendable (PSKClientContext) throws -> PSKClientIdentityResponse /// The callback used for providing a PSK on the server side. /// /// The callback is invoked on the event loop with the PSK hint provided by the server, and the PSK identity provided by the client. /// This callback must complete synchronously: it cannot return a future. Additionally, as it is invoked on the NIO event loop, it is /// not possible for this to perform any I/O. As a result, lookups must be quick. public typealias NIOPSKServerIdentityCallback = @Sendable (String, String) throws -> PSKServerIdentityResponse /// The callback used for providing a PSK on the server side. /// /// The callback is invoked on the event loop with a PSK context provided by the server and the client, and the PSK identity provided by the client /// The context includes the optional hint. /// This callback must complete synchronously: it cannot return a future. Additionally, as it is invoked on the NIO event loop, it is /// not possible for this to perform any I/O. As a result, lookups must be quick. public typealias NIOPSKServerIdentityProvider = @Sendable (PSKServerContext) throws -> PSKServerIdentityResponse /// Allow internally to maintain the compatibility with the deprecated callback internal enum _NIOPSKServerIdentityProvider { case callback(NIOPSKServerIdentityCallback) case provider(NIOPSKServerIdentityProvider) } /// Allow internally to maintain the compatibility with the deprecated callback internal enum _NIOPSKClientIdentityProvider { case callback(NIOPSKClientIdentityCallback) case provider(NIOPSKClientIdentityProvider) } /// A struct that provides helpers for working with a NIOSSLCustomVerificationCallback. internal struct CustomVerifyManager { private var callback: CallbackType private var result: PendingResult = .notStarted /// Contains the metadata that the callback returned. As such, this property will *only* contain a value if /// `self.result` is `.complete` (and if the callback promise returns metadata). var verificationMetadata: VerificationMetadata? init(callback: @escaping NIOSSLCustomVerificationCallback) { self.callback = .public(callback) } init(callback: @escaping NIOSSLCustomVerificationCallbackWithMetadata) { self.callback = .publicWithMetadata(callback) } init(callback: @escaping InternalCallback) { self.callback = .internal(callback) } } extension CustomVerifyManager { fileprivate enum PendingResult: Hashable { case notStarted case pendingResult case complete(NIOSSLVerificationResult) case completeWithMetadata(NIOSSLVerificationResultWithMetadata) } fileprivate protocol PendingResultConvertible { static func pendingResult(_ result: Result) -> PendingResult } } extension CustomVerifyManager { mutating func process(on connection: SSLConnection) -> ssl_verify_result_t { // First, check if we have a result. switch self.result { case .complete(.certificateVerified): return ssl_verify_ok case .completeWithMetadata(.certificateVerified(let metadata)): // Extract the metadata and store it within `self`. self.verificationMetadata = metadata return ssl_verify_ok case .complete(.failed), .completeWithMetadata(.failed): return ssl_verify_invalid case .pendingResult: // Ask me again. return ssl_verify_retry case .notStarted: // The rest of this method handles this case. break } self.result = .pendingResult // Ok, no result. We must invoke the callback. self.callback.invoke(on: connection) return ssl_verify_retry } } extension CustomVerifyManager { private enum CallbackType { case `public`(NIOSSLCustomVerificationCallback) case publicWithMetadata(NIOSSLCustomVerificationCallbackWithMetadata) case `internal`(InternalCallback) // Prepares the promise that will be provided as an argument to the callback. private static func preparePromise( on connection: SSLConnection ) -> EventLoopPromise { // We need a promise for the user to use to supply a result. guard let eventLoop = connection.eventLoop else { // No event loop. We cannot possibly be negotiating here. preconditionFailure("No event loop present") } let promise = eventLoop.makePromise(of: CallbackResult.self) // We need to attach our "do the thing" callback. This will always invoke the "ask me again" API, and it will do so in a separate // event loop tick to avoid awkward re-entrancy with this method. promise.futureResult.assumeIsolated().whenComplete { result in // When we complete here we need to set our result state, and then ask to respin certificate verification. // If we can't respin verification because we've dropped the parent handler, that's fine, no harm no foul. // For that reason, we tolerate both the verify manager and the parent handler being nil. eventLoop.assumeIsolated().execute { // Note that we don't close over self here: that's to deal with the fact that this is a struct, and we don't want to // escape the mutable ownership of self. precondition( connection.customVerificationManager == nil || connection.customVerificationManager?.result == .some(.pendingResult) ) connection.customVerificationManager?.result = CallbackResult.pendingResult(result) connection.parentHandler?.resumeHandshake() } } return promise } /// For user-supplied callbacks we need to give them public types. For internal ones, we just pass the /// `EventLoopPromise` object through. func invoke(on connection: SSLConnection) { switch self { case .internal(let internalCallback): let promise: EventLoopPromise = Self.preparePromise(on: connection) internalCallback(promise) case .public(let callback): let promise: EventLoopPromise = Self.preparePromise(on: connection) do { callback(try connection.peerCertificateChain(), promise) } catch { promise.fail(error) } case .publicWithMetadata(let callback): let promise: EventLoopPromise = Self.preparePromise( on: connection ) do { callback(try connection.peerCertificateChain(), promise) } catch { promise.fail(error) } } } } internal typealias InternalCallback = (EventLoopPromise) -> Void } extension NIOSSLVerificationResult: CustomVerifyManager.PendingResultConvertible { fileprivate static func pendingResult(_ result: Result) -> CustomVerifyManager.PendingResult { switch result { case .success(let s): .complete(s) case .failure: .complete(.failed) } } } extension NIOSSLVerificationResultWithMetadata: CustomVerifyManager.PendingResultConvertible { fileprivate static func pendingResult(_ result: Result) -> CustomVerifyManager.PendingResult { switch result { case .success(let s): .completeWithMetadata(s) case .failure: .completeWithMetadata(.failed) } } } /// Represents a *validated* certificate chain, an array of certificates forming a verified and ordered trust path, /// starting from the peer's certificate to a trusted root certificate. public struct ValidatedCertificateChain: Sendable, Collection, RandomAccessCollection, Hashable { let validatedChain: [NIOSSLCertificate] public typealias Index = Int public typealias Element = NIOSSLCertificate public var startIndex: Index { self.validatedChain.startIndex } public var endIndex: Index { self.validatedChain.endIndex } public subscript(index: Index) -> Element { self.validatedChain[index] } public func index(after i: Index) -> Index { self.validatedChain.index(after: i) } /// Creates a `ValidatedCertificateChain` instance from an array of certificates forming a *verified* chain of trust. /// /// - Parameter validatedChain: An array of `NIOSSLCertificate` objects, representing the verified and ordered trust /// path, starting from the peer's certificate (first element) to a trusted root certificate (last element), with /// intermediate certificates ordered in between. /// /// - Important: Do not blindly pass in the array of certificates presented by the peer; the array *must* represent /// a fully validated and trusted chain. /// /// - Precondition: `validatedChain` must contain at least one certificate. public init(_ validatedChain: [NIOSSLCertificate]) { precondition(validatedChain.count > 0, "The provided validated chain must have at least one certificate") self.validatedChain = validatedChain } /// Returns the first element of the chain: the leaf certificate. public var leaf: NIOSSLCertificate { // We can safely force unwrap: the initializer enforces at least one element in `validatedChain` self.validatedChain.first! } } ================================================ FILE: Sources/NIOSSL/SSLCertificate.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @_implementationOnly import CNIOBoringSSLShims import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Bionic) import Bionic #else #error("unsupported os") #endif #if canImport(Darwin) import struct Darwin.time_t #elseif canImport(Glibc) import struct Glibc.time_t #endif /// A reference to a BoringSSL Certificate object (`X509 *`). /// /// This thin wrapper class allows us to use ARC to automatically manage /// the memory associated with this TLS certificate. That ensures that BoringSSL /// will not free the underlying buffer until we are done with the certificate. /// /// This class also provides several convenience constructors that allow users /// to obtain an in-memory representation of a TLS certificate from a buffer of /// bytes or from a file path. public final class NIOSSLCertificate { @usableFromInline internal let _ref: OpaquePointer // @inlinable internal func withUnsafeMutableX509Pointer( _ body: (OpaquePointer) throws -> ResultType ) rethrows -> ResultType { try body(self._ref) } // Internal to this class we can just access the ref directly. private var ref: OpaquePointer { self._ref } /// The serial number of this certificate, as raw bytes. public var serialNumber: [UInt8] { let serialNumber = CNIOBoringSSL_X509_get_serialNumber(self.ref)! return Array(UnsafeBufferPointer(start: serialNumber.pointee.data, count: Int(serialNumber.pointee.length))) } private init(withOwnedReference ref: OpaquePointer) { self._ref = ref } /// Create a ``NIOSSLCertificate`` from a file at a given path in either PEM or /// DER format. /// /// Note that this method will only ever load the first certificate from a given file. /// /// If you want to load certificates from a PEM file use ``fromPEMFile(_:)``. To load /// a certificate from a DER file use ``fromDERFile(_:)``. /// /// - parameters: /// - file: The path to the file to load the certificate from. /// - format: The format to use to parse the file. @available( *, deprecated, message: """ Use 'fromPEMFile(_:)' to load all certificates from a PEM file or 'fromDERFile(_:)' \ to load a single certificate from a DER file. """ ) public convenience init(file: String, format: NIOSSLSerializationFormats) throws { try self.init(_file: file, format: format) } /// Create a ``NIOSSLCertificate`` from a file at a given path in either PEM or /// DER format. /// /// Note that this method will only ever load the first certificate from a given file. /// /// - parameters: /// - file: The path to the file to load the certificate from. /// - format: The format to use to parse the file. internal convenience init(_file file: String, format: NIOSSLSerializationFormats) throws { let fileObject = try Posix.fopen(file: file, mode: "rb") defer { fclose(fileObject) } let x509: OpaquePointer? switch format { case .pem: x509 = CNIOBoringSSL_PEM_read_X509(fileObject, nil, nil, nil) case .der: x509 = CNIOBoringSSL_d2i_X509_fp(fileObject, nil) } if x509 == nil { throw NIOSSLError.failedToLoadCertificate } self.init(withOwnedReference: x509!) } /// Create a ``NIOSSLCertificate`` from a buffer of bytes in either PEM or /// DER format. /// /// - SeeAlso: `NIOSSLCertificate.init(bytes:format:)` @available(*, deprecated, renamed: "NIOSSLCertificate.init(bytes:format:)") public convenience init(buffer: [Int8], format: NIOSSLSerializationFormats) throws { try self.init(bytes: buffer.map(UInt8.init), format: format) } /// Create a ``NIOSSLCertificate`` from a buffer of bytes in either PEM or /// DER format. /// /// - parameters: /// - bytes: The raw bytes containing the certificate. /// - format: The format to use to parse the file. public convenience init(bytes: [UInt8], format: NIOSSLSerializationFormats) throws { let ref = bytes.withUnsafeBytes { (ptr) -> OpaquePointer? in let bio = CNIOBoringSSL_BIO_new_mem_buf(ptr.baseAddress, ptr.count)! defer { CNIOBoringSSL_BIO_free(bio) } switch format { case .pem: return CNIOBoringSSL_PEM_read_bio_X509(bio, nil, nil, nil) case .der: return CNIOBoringSSL_d2i_X509_bio(bio, nil) } } if ref == nil { throw NIOSSLError.failedToLoadCertificate } self.init(withOwnedReference: ref!) } /// Create a NIOSSLCertificate from a buffer of bytes in either PEM or DER format. internal convenience init(bytes ptr: UnsafeRawBufferPointer, format: NIOSSLSerializationFormats) throws { // TODO(cory): // The body of this method is exactly identical to the initializer above, except for the "withUnsafeBytes" call. // ContiguousBytes would have been the lowest effort way to reduce this duplication, but we can't use it without // bringing Foundation in. Probably we should use Sequence where Element == UInt8 and the withUnsafeContiguousBytesIfAvailable // method, but that's a much more substantial refactor. Let's do it later. let bio = CNIOBoringSSL_BIO_new_mem_buf(ptr.baseAddress, ptr.count)! defer { CNIOBoringSSL_BIO_free(bio) } let ref: OpaquePointer? switch format { case .pem: ref = CNIOBoringSSL_PEM_read_bio_X509(bio, nil, nil, nil) case .der: ref = CNIOBoringSSL_d2i_X509_bio(bio, nil) } if ref == nil { throw NIOSSLError.failedToLoadCertificate } self.init(withOwnedReference: ref!) } /// Create a NIOSSLCertificate wrapping a pointer into BoringSSL. /// /// This is a function that should be avoided as much as possible because it plays poorly with /// BoringSSL's reference-counted memory. This function does not increment the reference count for the `X509` /// object here, nor does it duplicate it: it just takes ownership of the copy here. This object /// **will** deallocate the underlying `X509` object when deinited, and so if you need to keep that /// `X509` object alive you should call `X509_dup` before passing the pointer here. /// /// In general, however, this function should be avoided in favour of one of the convenience /// initializers, which ensure that the lifetime of the `X509` object is better-managed. static func fromUnsafePointer(takingOwnership pointer: OpaquePointer) -> NIOSSLCertificate { NIOSSLCertificate(withOwnedReference: pointer) } /// Get a collection of the alternative names in the certificate. public func _subjectAlternativeNames() -> _SubjectAlternativeNames { let sanExtension = CNIOBoringSSL_X509_get_ext_d2i(self.ref, NID_subject_alt_name, nil, nil) return _SubjectAlternativeNames(nameStack: sanExtension.map(OpaquePointer.init)) } /// Extracts the SHA1 hash of the subject name before it has been truncated. /// /// - returns: Numeric hash of the subject name. internal func getSubjectNameHash() -> UInt32 { CNIOBoringSSL_X509_subject_name_hash(self.ref) } /// Returns the commonName field in the Subject of this certificate. /// /// It is technically possible to have multiple common names in a certificate. As the primary /// purpose of this field in SwiftNIO is to validate TLS certificates, we only ever return /// the *most significant* (i.e. last) instance of commonName in the subject. internal func commonName() -> [UInt8]? { // No subject name is unexpected, but it gives us an easy time of handling this at least. guard let subjectName = CNIOBoringSSL_X509_get_subject_name(self.ref) else { return nil } // Per the man page, to find the first entry we set lastIndex to -1. When there are no // more entries, -1 is returned as the index of the next entry. var lastIndex: CInt = -1 var nextIndex: CInt = -1 repeat { lastIndex = nextIndex nextIndex = CNIOBoringSSL_X509_NAME_get_index_by_NID(subjectName, NID_commonName, lastIndex) } while nextIndex >= 0 // It's totally allowed to have no commonName. guard lastIndex >= 0 else { return nil } // This is very unlikely, but it could happen. guard let nameData = CNIOBoringSSL_X509_NAME_ENTRY_get_data( CNIOBoringSSL_X509_NAME_get_entry(subjectName, lastIndex) ) else { return nil } // Cool, we have the name. Let's have BoringSSL give it to us in UTF-8 form and then put those bytes // into our own array. var encodedName: UnsafeMutablePointer? = nil let stringLength = CNIOBoringSSL_ASN1_STRING_to_UTF8(&encodedName, nameData) guard let namePtr = encodedName else { return nil } let arr = [UInt8](UnsafeBufferPointer(start: namePtr, count: Int(stringLength))) CNIOBoringSSL_OPENSSL_free(namePtr) return arr } deinit { CNIOBoringSSL_X509_free(ref) } } // NIOSSLCertificate is publicly immutable and we do not internally mutate it after initialisation. // It is therefore Sendable. extension NIOSSLCertificate: @unchecked Sendable {} // MARK:- Utility Functions // We don't really want to get too far down the road of providing helpers for things like certificates // and private keys: this is really the domain of alternative cryptography libraries. However, to // enable users of swift-nio-ssl to use other cryptography libraries it will be helpful to provide // the ability to obtain the bytes that correspond to certificates and keys. extension NIOSSLCertificate { /// Obtain the public key for this ``NIOSSLCertificate``. /// /// - returns: This certificate's ``NIOSSLPublicKey``. /// - throws: If an error is encountered extracting the key. public func extractPublicKey() throws -> NIOSSLPublicKey { guard let key = CNIOBoringSSL_X509_get_pubkey(self.ref) else { fatalError("Failed to extract a public key reference") } return NIOSSLPublicKey.fromInternalPointer(takingOwnership: key) } /// Extracts the bytes of this certificate in DER format. /// /// - returns: The DER-encoded bytes for this certificate. /// - throws: If an error occurred while serializing the certificate. public func toDERBytes() throws -> [UInt8] { try self.withUnsafeDERCertificateBuffer { Array($0) } } /// Create an array of ``NIOSSLCertificate``s from a buffer of bytes in PEM format. /// /// - Parameter buffer: The PEM buffer to read certificates from. /// - Throws: If an error is encountered while reading certificates. /// - SeeAlso: `NIOSSLCertificate.fromPEMBytes(_:)` @available(*, deprecated, renamed: "NIOSSLCertificate.fromPEMBytes(_:)") public class func fromPEMBuffer(_ buffer: [Int8]) throws -> [NIOSSLCertificate] { try fromPEMBytes(buffer.map(UInt8.init)) } /// Create an array of ``NIOSSLCertificate``s from a buffer of bytes in PEM format. /// /// - Parameter bytes: The PEM buffer to read certificates from. /// - Throws: If an error is encountered while reading certificates. public class func fromPEMBytes(_ bytes: [UInt8]) throws -> [NIOSSLCertificate] { CNIOBoringSSL_ERR_clear_error() defer { CNIOBoringSSL_ERR_clear_error() } return try bytes.withUnsafeBytes { (ptr) -> [NIOSSLCertificate] in let bio = CNIOBoringSSL_BIO_new_mem_buf(ptr.baseAddress, ptr.count)! defer { CNIOBoringSSL_BIO_free(bio) } return try readCertificatesFromBIO(bio) } } /// Create an array of ``NIOSSLCertificate``s from a file at a given path in PEM format. /// /// - Parameter path: The PEM file to read certificates from. /// - Throws: If an error is encountered while reading certificates. public class func fromPEMFile(_ path: String) throws -> [NIOSSLCertificate] { CNIOBoringSSL_ERR_clear_error() defer { CNIOBoringSSL_ERR_clear_error() } guard let bio = CNIOBoringSSL_BIO_new(CNIOBoringSSL_BIO_s_file()) else { fatalError("Failed to create a BIO handle to read a PEM file") } defer { CNIOBoringSSL_BIO_free(bio) } guard CNIOBoringSSL_BIO_read_filename(bio, path) > 0 else { throw NIOSSLError.failedToLoadCertificate } return try readCertificatesFromBIO(bio) } /// Create a ``NIOSSLCertificate`` from a DER file at a given path. /// /// - parameters: /// - path: The path to the file to load the certificate from. public static func fromDERFile(_ path: String) throws -> NIOSSLCertificate { try NIOSSLCertificate(_file: path, format: .der) } /// Returns the timestamp before which this certificate is not valid. /// /// The value is in seconds since the UNIX epoch. public var notValidBefore: time_t { // This ref is owned by self. let notBefore = CNIOBoringSSL_X509_get0_notBefore(self.ref)! return notBefore.timeSinceEpoch } /// Returns the timestamp after which this certificate is not valid. /// /// The value is in seconds since the UNIX epoch. public var notValidAfter: time_t { // This ref is owned by self. let notAfter = CNIOBoringSSL_X509_get0_notAfter(self.ref)! return notAfter.timeSinceEpoch } /// Reads `NIOSSLCertificate`s from the given BIO. private class func readCertificatesFromBIO(_ bio: UnsafeMutablePointer) throws -> [NIOSSLCertificate] { guard let x509 = CNIOBoringSSL_PEM_read_bio_X509_AUX(bio, nil, nil, nil) else { throw NIOSSLError.failedToLoadCertificate } var certificates = [NIOSSLCertificate(withOwnedReference: x509)] while let x = CNIOBoringSSL_PEM_read_bio_X509(bio, nil, nil, nil) { certificates.append(.init(withOwnedReference: x)) } let err = CNIOBoringSSL_ERR_peek_error() // If we hit the end of the file then it's not a real error, we just read as much as we could. if CNIOBoringSSLShims_ERR_GET_LIB(err) == ERR_LIB_PEM && CNIOBoringSSLShims_ERR_GET_REASON(err) == PEM_R_NO_START_LINE { CNIOBoringSSL_ERR_clear_error() } else { throw NIOSSLError.failedToLoadCertificate } return certificates } /// Calls the given body function with a temporary buffer containing the DER-encoded bytes of this /// certificate. This function does allocate for these bytes, but there is no way to avoid doing so with the /// X509 API in BoringSSL. /// /// The pointer provided to the closure is not valid beyond the lifetime of this method call. private func withUnsafeDERCertificateBuffer(_ body: (UnsafeRawBufferPointer) throws -> T) throws -> T { guard let bio = CNIOBoringSSL_BIO_new(CNIOBoringSSL_BIO_s_mem()) else { fatalError("Failed to malloc for a BIO handler") } defer { CNIOBoringSSL_BIO_free(bio) } let rc = CNIOBoringSSL_i2d_X509_bio(bio, self.ref) guard rc == 1 else { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } var dataPtr: UnsafeMutablePointer? = nil let length = CNIOBoringSSL_BIO_get_mem_data(bio, &dataPtr) guard let bytes = dataPtr.map({ UnsafeRawBufferPointer(start: $0, count: length) }) else { fatalError("Failed to map bytes from a certificate") } return try body(bytes) } } extension NIOSSLCertificate: Equatable { public static func == (lhs: NIOSSLCertificate, rhs: NIOSSLCertificate) -> Bool { CNIOBoringSSL_X509_cmp(lhs.ref, rhs.ref) == 0 } } extension NIOSSLCertificate: Hashable { public func hash(into hasher: inout Hasher) { // We just hash the DER bytes of the cert. If we can't get the bytes, this is a fatal error as // we have no way to recover from it. It's unfortunate that this allocates, but the code to hash // a certificate in any other way is too fragile to justify. try! self.withUnsafeDERCertificateBuffer { hasher.combine(bytes: $0) } } } extension NIOSSLCertificate: CustomStringConvertible { public var description: String { let serialNumber = self.serialNumber.map { String($0, radix: 16) }.reduce("", +) var desc = "" } } extension UnsafePointer where Pointee == ASN1_TIME { var timeSinceEpoch: time_t { let epochTime = CNIOBoringSSL_ASN1_TIME_new()! defer { CNIOBoringSSL_ASN1_TIME_free(epochTime) } // This sets the ASN1_TIME to epoch time. CNIOBoringSSL_ASN1_TIME_set(epochTime, 0) var day = CInt(0) var seconds = CInt(0) let rc = CNIOBoringSSL_ASN1_TIME_diff(&day, &seconds, epochTime, self) precondition(rc != 0) // 86400 seconds in a day return time_t(day) * 86400 + time_t(seconds) } } ================================================ FILE: Sources/NIOSSL/SSLCertificateExtensions.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @_implementationOnly import CNIOBoringSSLShims extension NIOSSLCertificate { public struct _Extensions { private enum Storage { final class Deallocator { /// `reference` is optional because `CNIOBoringSSL_X509_get0_extensions` can return`nil` if no extensions are present. /// We therefore need to handle the `nil` case as if this collection is empty. let reference: OpaquePointer? init(takeOwnershipOf reference: OpaquePointer?) { self.reference = reference } deinit { if let reference = self.reference { CNIOBoringSSL_sk_X509_EXTENSION_free(reference) } } } case owned(Deallocator) /// `reference` is optional because `CNIOBoringSSL_X509_get0_extensions` can return`nil` if no extensions are present. /// We therefore need to handle the `nil` case as if this collection is empty. case borrowed(reference: OpaquePointer?, owner: AnyObject) init(takeOwnershipOf reference: OpaquePointer?) { self = .owned(.init(takeOwnershipOf: reference)) } init(borrowing reference: OpaquePointer?, owner: AnyObject) { self = .borrowed(reference: reference, owner: owner) } /// The owner of the memory to which the reference points var owner: AnyObject { switch self { case .owned(let deallocator): return deallocator case .borrowed(_, let owner): return owner } } /// All operations accessing `reference` need to be implemented while guaranteeing that we still have a reference to the memory owner. /// Otherwise `reference` could already be freed. This would result in undefined behaviour as we access a dangling pointer. /// This method guarantees that `reference` is valid during execution of `body`. internal func withReference( _ body: (OpaquePointer?) throws -> Result ) rethrows -> Result { try withExtendedLifetime(self) { switch self { case .owned(let deallocator): return try body(deallocator.reference) case .borrowed(let reference, _): return try body(reference) } } } } @usableFromInline internal let stackSize: Int private let storage: Storage internal init(takeOwnershipOf reference: OpaquePointer?) { self.storage = .init(takeOwnershipOf: reference) if let reference = reference { self.stackSize = CNIOBoringSSL_sk_X509_EXTENSION_num(reference) } else { self.stackSize = 0 } } internal init(borrowing reference: OpaquePointer?, owner: AnyObject) { self.storage = .init(borrowing: reference, owner: owner) if let reference = reference { self.stackSize = CNIOBoringSSL_sk_X509_EXTENSION_num(reference) } else { self.stackSize = 0 } } } } // NIOSSLCertificate._Extensions is immutable and therefore Sendable extension NIOSSLCertificate._Extensions: @unchecked Sendable {} extension NIOSSLCertificate { public var _extensions: NIOSSLCertificate._Extensions { NIOSSLCertificate._Extensions(borrowing: CNIOBoringSSL_X509_get0_extensions(self._ref), owner: self) } } extension NIOSSLCertificate._Extensions: RandomAccessCollection { public subscript(position: Int) -> NIOSSLCertificate._Extension { precondition(self.indices.contains(position), "index \(position) out of bounds") return self.storage.withReference { reference in let value = CNIOBoringSSLShims_sk_X509_EXTENSION_value(reference!, position)! return .init(borrowing: value, owner: self.storage.owner) } } @inlinable public var startIndex: Int { 0 } @inlinable public var endIndex: Int { self.stackSize } } extension NIOSSLCertificate { public struct _Extension { init(borrowing reference: OpaquePointer, owner: AnyObject) { self.owner = owner self._reference = reference } /// lifetime automatically managed by `owner` private let _reference: OpaquePointer /// only part of this type to keep a strong reference to the underlying storage of `reference` private let owner: AnyObject /// All operations accessing `reference` need to be implemented while guaranteeing that we still have a reference to the memory `owner`. /// Otherwise `reference` could already be freed. This would result in undefined behaviour as we access a dangling pointer. /// This method guarantees that `reference` is valid during execution of `body`. func withReference( _ body: (OpaquePointer?) throws -> Result ) rethrows -> Result { try withExtendedLifetime(owner) { try body(self._reference) } } public var objectIdentifier: NIOSSLObjectIdentifier { withReference { .init(borrowing: CNIOBoringSSL_X509_EXTENSION_get_object($0), owner: self.owner) } } public var isCritical: Bool { withReference { CNIOBoringSSL_X509_EXTENSION_get_critical($0) == 1 } } public var data: Data { withReference { let data = CNIOBoringSSL_X509_EXTENSION_get_data($0) let buffer = UnsafeBufferPointer( start: CNIOBoringSSL_ASN1_STRING_get0_data(data), count: Int(CNIOBoringSSL_ASN1_STRING_length(data)) ) return .init(buffer: buffer, owner: self.owner) } } } } // NIOSSLCertificate._Extension is immutable and therefore Sendable extension NIOSSLCertificate._Extension: @unchecked Sendable {} extension NIOSSLCertificate._Extension { public struct Data { // only part of this type to keep a strong reference to the underlying storage of `buffer` private let owner: AnyObject // lifetime automatically managed by `owner` @usableFromInline internal let buffer: UnsafeBufferPointer internal init(buffer: UnsafeBufferPointer, owner: AnyObject) { self.buffer = buffer self.owner = owner } @inlinable public func withUnsafeBufferPointer( _ body: (UnsafeBufferPointer) throws -> Result ) rethrows -> Result { try withExtendedLifetime(self) { try body(self.buffer) } } @inlinable public func withUnsafeBytes( _ body: (UnsafeRawBufferPointer) throws -> Result ) rethrows -> Result { try withExtendedLifetime(self) { try body(.init(self.buffer)) } } } } // NIOSSLCertificate._Extension.Data is immutable and therefore Sendable extension NIOSSLCertificate._Extension.Data: @unchecked Sendable {} extension NIOSSLCertificate._Extension.Data: RandomAccessCollection { @inlinable public var startIndex: Int { self.buffer.startIndex } @inlinable public var endIndex: Int { self.buffer.endIndex } @inlinable public subscript(position: Int) -> UInt8 { precondition(self.indices.contains(position), "index \(position) out of bounds") return withUnsafeBufferPointer { $0[position] } } @inlinable public func withContiguousStorageIfAvailable( _ body: (UnsafeBufferPointer) throws -> Result ) rethrows -> Result? { try withUnsafeBufferPointer(body) } } ================================================ FILE: Sources/NIOSSL/SSLCertificateName.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2025 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL /// Defines the type of X509 name public struct SSLCertificateNameType: Equatable, Hashable, Sendable { internal var nid: Int32 public static let organization = SSLCertificateNameType(nid: NID_organizationName) public static let organizationalUnit = SSLCertificateNameType(nid: NID_organizationalUnitName) public static let state = SSLCertificateNameType(nid: NID_stateOrProvinceName) public static let country = SSLCertificateNameType(nid: NID_countryName) public static let city = SSLCertificateNameType(nid: NID_localityName) public static let commonName = SSLCertificateNameType(nid: NID_commonName) public static let emailAddress = SSLCertificateNameType(nid: NID_pkcs9_emailAddress) public static let userId = SSLCertificateNameType(nid: NID_userId) } /// Contains the string value of a X509 name public struct SSLCertificateName: Equatable, Hashable, Sendable { public var value: String public var type: SSLCertificateNameType public init(_ value: String, _ type: SSLCertificateNameType) { self.value = value self.type = type } } extension NIOSSLCertificate { private static func convertName(_ name: OpaquePointer) -> [SSLCertificateName] { let count = CNIOBoringSSL_X509_NAME_entry_count(name) var names = [SSLCertificateName]() names.reserveCapacity(Int(count)) for index in 0..? = nil let stringLength = CNIOBoringSSL_ASN1_STRING_to_UTF8(&encodedName, data) guard let namePtr = encodedName else { continue } defer { CNIOBoringSSL_OPENSSL_free(namePtr) } let arr = UnsafeBufferPointer(start: namePtr, count: Int(stringLength)) let nameString = String(decoding: arr, as: UTF8.self) let nid = CNIOBoringSSL_OBJ_obj2nid(object) names.append(SSLCertificateName(nameString, .init(nid: nid))) } return names } /// Return an array of SSLCertificateName enums containing the subject name of the /// underlying X509 Certificate public var subjectName: [SSLCertificateName] { guard let subjectName = CNIOBoringSSL_X509_get_subject_name(self._ref) else { return [] } return Self.convertName(subjectName) } /// Return an array of SSLCertificateName enums containing the issuer name of the /// underlying X509 Certificate public var issuerName: [SSLCertificateName] { guard let issuerName = CNIOBoringSSL_X509_get_issuer_name(self._ref) else { return [] } return Self.convertName(issuerName) } } ================================================ FILE: Sources/NIOSSL/SSLConnection.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore internal let SSL_MAX_RECORD_SIZE = 16 * 1024 /// This is used as the application data index to store pointers to `SSLConnection` objects in /// `SSL` objects. It is only safe to use after BoringSSL initialization. As it's declared global, /// it will be lazily initialized and protected by a dispatch_once, ensuring that it's thread-safe. internal let sslConnectionExDataIndex = CNIOBoringSSL_SSL_get_ex_new_index(0, nil, nil, nil, nil) /// Encodes the return value of a non-blocking BoringSSL method call. /// /// This enum maps BoringSSL's return values to a small number of cases. A success /// value naturally maps to `.complete`, and most errors map to `.failed`. However, /// the BoringSSL "errors" `WANT_READ` and `WANT_WRITE` are mapped to `.incomplete`, to /// help distinguish them from the other error cases. This makes it easier for code to /// handle the "must wait for more data" case by calling it out directly. enum AsyncOperationResult { case incomplete case complete(T) case failed(BoringSSLError) } /// A wrapper class that encapsulates BoringSSL's `SSL *` object. /// /// This class represents a single TLS connection, and performs all of crypto and record /// framing required by TLS. It also records the configuration and parent `NIOSSLContext` object /// used to create the connection. internal final class SSLConnection { private let ssl: OpaquePointer internal let parentContext: NIOSSLContext private var bio: ByteBufferBIO? internal var expectedHostname: String? internal var role: ConnectionRole? internal var parentHandler: NIOSSLHandler? internal var eventLoop: EventLoop? /// Deprecated in favour of customVerificationManager private var verificationCallback: NIOSSLVerificationCallback? internal var customVerificationManager: CustomVerifyManager? internal var customPrivateKeyResult: Result? internal var customContextManager: CustomContextManager? internal var currentOverride: NIOSSLContextConfigurationOverride? /// Whether certificate hostnames should be validated. var validateHostnames: Bool { if case .fullVerification = parentContext.configuration.certificateVerification { return true } return false } init(ownedSSL: OpaquePointer, parentContext: NIOSSLContext) { self.ssl = ownedSSL self.parentContext = parentContext if let customContextCallback = parentContext.configuration.sslContextCallback { self.customContextManager = CustomContextManager(callback: customContextCallback) } // We pass the SSL object an unowned reference to this object. let pointerToSelf = Unmanaged.passUnretained(self).toOpaque() CNIOBoringSSL_SSL_set_ex_data(self.ssl, sslConnectionExDataIndex, pointerToSelf) self.setRenegotiationSupport(self.parentContext.configuration.renegotiationSupport) } deinit { CNIOBoringSSL_SSL_free(ssl) } /// Configures this as a server connection. func setAcceptState() { CNIOBoringSSL_SSL_set_accept_state(ssl) self.role = .server } /// Configures this as a client connection. func setConnectState() { CNIOBoringSSL_SSL_set_connect_state(ssl) self.role = .client } func setAllocator(_ allocator: ByteBufferAllocator, maximumPreservedOutboundBufferCapacity: Int) { self.bio = ByteBufferBIO( allocator: allocator, maximumPreservedOutboundBufferCapacity: maximumPreservedOutboundBufferCapacity ) // This weird dance where we pass the *exact same* pointer in to both objects is because, weirdly, // the BoringSSL docs claim that only one reference count will be consumed here. We therefore need to // avoid calling BIO_up_ref too many times. let bioPtr = self.bio!.retainedBIO() CNIOBoringSSL_SSL_set_bio(self.ssl, bioPtr, bioPtr) } /// Sets the value of the SNI extension to send to the server. /// /// This method must only be called with a hostname, not an IP address. Sending /// an IP address in the SNI extension is invalid, and may result in handshake /// failure. func setServerName(name: String) throws { CNIOBoringSSL_ERR_clear_error() let rc = name.withCString { CNIOBoringSSL_SSL_set_tlsext_host_name(ssl, $0) } guard rc == 1 else { throw BoringSSLError.invalidSNIName(BoringSSLError.buildErrorStack()) } self.expectedHostname = name } /// Sets the BoringSSL old-style verification callback. /// /// This is deprecated in favour of the new-style verification callback in SSLContext. func setVerificationCallback(_ callback: @escaping NIOSSLVerificationCallback) { // Store the verification callback. We need to do this to keep it alive throughout the connection. // We'll drop this when we're told that it's no longer needed to ensure we break the reference cycles // that this callback inevitably produces. self.verificationCallback = callback // We need to know what the current mode is. let currentMode = CNIOBoringSSL_SSL_get_verify_mode(self.ssl) CNIOBoringSSL_SSL_set_verify(self.ssl, currentMode) { preverify, storeContext in // To start out, let's grab the certificate we're operating on. guard let certPointer = CNIOBoringSSL_X509_STORE_CTX_get_current_cert(storeContext) else { preconditionFailure( "Can only have verification function invoked with actual certificate: bad store \(String(describing: storeContext))" ) } CNIOBoringSSL_X509_up_ref(certPointer) let cert = NIOSSLCertificate.fromUnsafePointer(takingOwnership: certPointer) // Next, prepare the verification result. let verificationResult = NIOSSLVerificationResult(fromBoringSSLPreverify: preverify) // Now, grab the SSLConnection object. guard let ssl = CNIOBoringSSL_X509_STORE_CTX_get_ex_data( storeContext, CNIOBoringSSL_SSL_get_ex_data_X509_STORE_CTX_idx() ) else { preconditionFailure("Unable to obtain SSL * from X509_STORE_CTX * \(String(describing: storeContext))") } let connection = SSLConnection.loadConnectionFromSSL(OpaquePointer(ssl)) switch connection.verificationCallback!(verificationResult, cert) { case .certificateVerified: return 1 case .failed: return 0 } } } func setCustomVerificationCallback(_ callbackManager: CustomVerifyManager) { // Store the verification callback. We need to do this to keep it alive throughout the connection. // We'll drop this when we're told that it's no longer needed to ensure we break the reference cycles // that this callback inevitably produces. self.customVerificationManager = callbackManager // We need to know what the current mode is. // Note that this also has the effect of ensuring that if we disabled certificate validation // it actually _stays_ disabled: if the verify mode is no-verification, this callback never gets called. let currentMode = CNIOBoringSSL_SSL_get_verify_mode(self.ssl) CNIOBoringSSL_SSL_set_custom_verify(self.ssl, currentMode) { ssl, outAlert in guard let unwrappedSSL = ssl else { preconditionFailure( "Unexpected null pointer in custom verification callback. ssl: \(String(describing: ssl))" ) } // Ok, this call may be a resumption of a previous negotiation. We need to check if our connection object has a pre-existing verifiation state. let connection = SSLConnection.loadConnectionFromSSL(unwrappedSSL) // We force unwrap the custom verification manager because for it to not be set is a programmer error. return connection.customVerificationManager!.process(on: connection) } } /// Sets whether renegotiation is supported. func setRenegotiationSupport(_ state: NIORenegotiationSupport) { var baseState: ssl_renegotiate_mode_t switch state { case .none: baseState = ssl_renegotiate_never case .once: baseState = ssl_renegotiate_once case .always: baseState = ssl_renegotiate_freely } CNIOBoringSSL_SSL_set_renegotiate_mode(self.ssl, baseState) } /// Performs hostname validation against the peer certificate using the configured server name. func validateHostname(address: SocketAddress) throws { // We want the leaf certificate. guard let peerCert = self.getPeerCertificate() else { throw NIOSSLError.noCertificateToValidate } guard try validIdentityForService( serverHostname: self.expectedHostname, socketAddress: address, leafCertificate: peerCert ) else { throw NIOSSLExtraError.failedToValidateHostname(expectedName: self.expectedHostname ?? "") } } /// Spins the handshake state machine and performs the next step of the handshake /// protocol. /// /// This method may write data into internal buffers that must be sent: call /// `getDataForNetwork` after this method is called. This method also consumes /// data from internal buffers: call `consumeDataFromNetwork` before calling this /// method. func doHandshake() -> AsyncOperationResult { CNIOBoringSSL_ERR_clear_error() let rc = CNIOBoringSSL_SSL_do_handshake(ssl) if rc == 1 { return .complete(rc) } let result = CNIOBoringSSL_SSL_get_error(ssl, rc) let error = BoringSSLError.fromSSLGetErrorResult(result)! switch error { case .wantRead, .wantWrite, .wantCertificateVerify, .wantX509Lookup: return .incomplete default: return .failed(error) } } /// Spins the shutdown state machine and performs the next step of the shutdown /// protocol. /// /// This method may write data into internal buffers that must be sent: call /// `getDataForNetwork` after this method is called. This method also consumes /// data from internal buffers: call `consumeDataFromNetwork` before calling this /// method. func doShutdown() -> AsyncOperationResult { CNIOBoringSSL_ERR_clear_error() let rc = CNIOBoringSSL_SSL_shutdown(ssl) switch rc { case 1: return .complete(rc) case 0: return .incomplete default: let result = CNIOBoringSSL_SSL_get_error(ssl, rc) let error = BoringSSLError.fromSSLGetErrorResult(result)! switch error { case .wantRead, .wantWrite: return .incomplete default: return .failed(error) } } } /// Given some unprocessed data from the remote peer, places it into /// BoringSSL's receive buffer ready for handling by BoringSSL. /// /// This method should be called whenever data is received from the remote /// peer. It must be immediately followed by an I/O operation, e.g. `readDataFromNetwork` /// or `doHandshake` or `doShutdown`. func consumeDataFromNetwork(_ data: ByteBuffer) { self.bio!.receiveFromNetwork(buffer: data) } /// Obtains some encrypted data ready for the network from BoringSSL. /// /// This call obtains only data that BoringSSL has already written into its send /// buffer. As a result, it should be called last, after all other operations have /// been performed, to allow BoringSSL to write as much data as necessary into the /// `BIO`. /// /// Returns `nil` if there is no data to write. Otherwise, returns all of the pending /// data. func getDataForNetwork() -> ByteBuffer? { self.bio!.outboundCiphertext() } /// Attempts to decrypt any application data sent by the remote peer, and fills a buffer /// containing the cleartext bytes. /// /// This method can only consume data previously fed into BoringSSL in `consumeDataFromNetwork`. func readDataFromNetwork(outputBuffer: inout ByteBuffer) -> AsyncOperationResult { // TODO(cory): It would be nice to have an withUnsafeMutableWriteableBytes here, but we don't, so we // need to make do with writeWithUnsafeMutableBytes instead. The core issue is that we can't // safely return any of the error values that SSL_read might provide here because writeWithUnsafeMutableBytes // will try to use that as the number of bytes written and blow up. If we could prevent it doing that (which // we can with reading) that would be grand, but we can't, so instead we need to use a temp variable. Not ideal. // // We require that there is space to write at least one TLS record. var bytesRead: CInt = 0 let rc = outputBuffer.writeWithUnsafeMutableBytes(minimumWritableBytes: SSL_MAX_RECORD_SIZE) { (pointer) -> Int in // We ask for the amount of spare space in the buffer, clamping to CInt.max. let maxReadSize = Int(CInt.max) let readSize = CInt(min(maxReadSize, pointer.count)) bytesRead = CNIOBoringSSL_SSL_read(self.ssl, pointer.baseAddress, readSize) return bytesRead >= 0 ? Int(bytesRead) : 0 } if bytesRead > 0 { return .complete(rc) } else { let result = CNIOBoringSSL_SSL_get_error(ssl, CInt(bytesRead)) let error = BoringSSLError.fromSSLGetErrorResult(result)! switch error { case .wantRead, .wantWrite: return .incomplete default: return .failed(error) } } } /// Encrypts cleartext application data ready for sending on the network. /// /// This call will only write the data into BoringSSL's internal buffers. It needs to be obtained /// by calling `getDataForNetwork` after this call completes. func writeDataToNetwork(_ data: inout ByteBuffer) -> AsyncOperationResult { // BoringSSL does not allow calling SSL_write with zero-length buffers. Zero-length // writes always succeed. guard data.readableBytes > 0 else { return .complete(0) } let writtenBytes = data.withUnsafeReadableBytes { (pointer) -> CInt in CNIOBoringSSL_SSL_write(ssl, pointer.baseAddress, CInt(pointer.count)) } if writtenBytes > 0 { // The default behaviour of SSL_write is to only return once *all* of the data has been written, // unless the underlying BIO cannot satisfy the need (in which case WANT_WRITE will be returned). // We're using our BIO, which is always writable, so WANT_WRITE cannot fire so we'd always // expect this to write the complete quantity of readable bytes in our buffer. precondition(writtenBytes == data.readableBytes) data.moveReaderIndex(forwardBy: Int(writtenBytes)) return .complete(writtenBytes) } else { let result = CNIOBoringSSL_SSL_get_error(ssl, writtenBytes) let error = BoringSSLError.fromSSLGetErrorResult(result)! switch error { case .wantRead, .wantWrite: return .incomplete default: return .failed(error) } } } /// Returns the protocol negotiated via ALPN, if any. Returns `nil` if no protocol /// was negotiated. func getAlpnProtocol() -> String? { var protoName = UnsafePointer(bitPattern: 0) var protoLen: CUnsignedInt = 0 CNIOBoringSSL_SSL_get0_alpn_selected(ssl, &protoName, &protoLen) guard protoLen > 0 else { return nil } return String(decoding: UnsafeBufferPointer(start: protoName, count: Int(protoLen)), as: UTF8.self) } /// Get the leaf certificate from the peer certificate chain as a managed object, /// if available. func getPeerCertificate() -> NIOSSLCertificate? { guard let certPtr = CNIOBoringSSL_SSL_get_peer_certificate(ssl) else { return nil } return NIOSSLCertificate.fromUnsafePointer(takingOwnership: certPtr) } /// Drops persistent connection state. /// /// Must only be called when the connection is no longer needed. The rest of this object /// preconditions on that being true, so we'll find out quickly when that's not the case. func close() { /// Drop the verification callbacks. This breaks any reference cycles that are inevitably /// created by these callbacks. self.verificationCallback = nil self.customVerificationManager = nil self.currentOverride = nil // Also drop the reference to the parent channel handler, which is a trivial reference cycle. self.parentHandler = nil // And finally drop the data stored by the bytebuffer BIO self.bio?.close() } /// Retrieves any inbound data that has not been processed by BoringSSL. /// /// When unwrapping TLS from a connection, there may be application bytes that follow the terminating /// CLOSE_NOTIFY message. Those bytes may have been passed to this `SSLConnection`, and so we need to /// retrieve them. /// /// This function extracts those bytes and returns them to the user. This should only be called when /// the connection has been shutdown. /// /// - returns: The unconsumed `ByteBuffer`, if any. func extractUnconsumedData() -> ByteBuffer? { self.bio?.evacuateInboundData() } /// Returns an optional `TLSVersion` used on a `Channel` through the `NIOSSLHandler` APIs. func getTLSVersionForConnection() -> TLSVersion? { let uint16Version = CNIOBoringSSL_SSL_version(self.ssl) switch uint16Version { case TLS1_3_VERSION: return .tlsv13 case TLS1_2_VERSION: return .tlsv12 case TLS1_1_VERSION: return .tlsv11 case TLS1_VERSION: return .tlsv1 default: return nil } } } /// MARK: ConnectionRole extension SSLConnection { internal enum ConnectionRole { case server case client } } // MARK: Certificate Peer Chain Buffers extension SSLConnection { /// A collection of buffers representing the DER-encoded bytes of the peer certificate chain. struct PeerCertificateChainBuffers { private let basePointer: OpaquePointer fileprivate init(basePointer: OpaquePointer) { self.basePointer = basePointer } } /// Invokes a block with a collection of pointers to DER-encoded bytes of the peer certificate chain. /// /// The pointers are only guaranteed to be valid for the duration of this call: it is undefined behaviour to escape /// any of these pointers from the block, or the certificate iterator itself from the block. Users must either use the /// bytes synchronously within the block, or they must copy them to a new buffer that they own. /// /// If there are no peer certificates, the body will be called with nil. func withPeerCertificateChainBuffers( _ body: (PeerCertificateChainBuffers?) throws -> Result ) rethrows -> Result { guard let stackPointer = CNIOBoringSSL_SSL_get0_peer_certificates(self.ssl) else { return try body(nil) } return try body(PeerCertificateChainBuffers(basePointer: stackPointer)) } /// The certificate chain presented by the peer. func peerCertificateChain() throws -> [NIOSSLCertificate] { try self.withPeerCertificateChainBuffers { buffers in guard let buffers = buffers else { return [] } return try buffers.map { try NIOSSLCertificate(bytes: $0, format: .der) } } } func applyOverride(_ changes: NIOSSLContextConfigurationOverride) throws { let connection = UnsafeKeyAndChainTarget.ssl(self.ssl) if let chain = changes.certificateChain { try connection.useCertificateChain(chain) } // Attempt to load the new private key and abort on failure if let pkey = changes.privateKey { try connection.usePrivateKeySource(pkey) } self.currentOverride = changes } } extension SSLConnection.PeerCertificateChainBuffers: RandomAccessCollection { struct Index: Hashable, Comparable, Strideable { typealias Stride = Int fileprivate var index: Int fileprivate init(_ index: Int) { self.index = index } static func < (lhs: Index, rhs: Index) -> Bool { lhs.index < rhs.index } func advanced( by n: SSLConnection.PeerCertificateChainBuffers.Index.Stride ) -> SSLConnection.PeerCertificateChainBuffers.Index { var result = self result.index += n return result } func distance( to other: SSLConnection.PeerCertificateChainBuffers.Index ) -> SSLConnection.PeerCertificateChainBuffers.Index.Stride { other.index - self.index } } typealias Element = UnsafeRawBufferPointer var startIndex: Index { Index(0) } var endIndex: Index { Index(self.count) } var count: Int { CNIOBoringSSL_sk_CRYPTO_BUFFER_num(self.basePointer) } subscript(_ index: Index) -> UnsafeRawBufferPointer { precondition(index < self.endIndex) guard let ptr = CNIOBoringSSL_sk_CRYPTO_BUFFER_value(self.basePointer, index.index) else { preconditionFailure("Unable to locate backing pointer.") } guard let dataPointer = CNIOBoringSSL_CRYPTO_BUFFER_data(ptr) else { preconditionFailure("Unable to retrieve data pointer from crypto_buffer") } let byteCount = CNIOBoringSSL_CRYPTO_BUFFER_len(ptr) // We want an UnsafeRawBufferPointer here, so we need to erase the pointer type. let bufferDataPointer = UnsafeBufferPointer(start: dataPointer, count: byteCount) return UnsafeRawBufferPointer(bufferDataPointer) } } // MARK: Helpers for managing ex_data extension SSLConnection { // Loads an SSLConnection from an SSL*. Does not take ownership of the pointer. static func loadConnectionFromSSL(_ ssl: OpaquePointer) -> SSLConnection { guard let connectionPointer = CNIOBoringSSL_SSL_get_ex_data(ssl, sslConnectionExDataIndex) else { // Uh-ok, our application state is gone. Don't let this error silently pass, go bang. preconditionFailure("Unable to find application data from SSL * \(ssl), index \(sslConnectionExDataIndex)") } return Unmanaged.fromOpaque(connectionPointer).takeUnretainedValue() } } ================================================ FILE: Sources/NIOSSL/SSLContext.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @_implementationOnly import CNIOBoringSSLShims import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Android) import Android #else #error("unsupported os") #endif // This is a neat trick. Swift lazily initializes module-globals based on when they're first // used. This lets us defer BoringSSL intialization as late as possible and only do it if people // actually create any object that uses BoringSSL. internal let boringSSLIsInitialized: Bool = initializeBoringSSL() internal enum FileSystemObject { case directory case file static internal func pathType(path: String) -> FileSystemObject? { var statObj = stat() do { try Posix.stat(path: path, buf: &statObj) } catch { return nil } #if os(Android) && arch(arm) return (statObj.st_mode & UInt32(S_IFDIR)) != 0 ? .directory : .file #else return (statObj.st_mode & S_IFDIR) != 0 ? .directory : .file #endif } } // This bizarre extension to UnsafeBufferPointer is very useful for handling ALPN identifiers. BoringSSL // likes to work with them in wire format, so rather than us decoding them we can just encode ours to // the wire format and then work with them from there. extension UnsafeBufferPointer where Element == UInt8 { fileprivate func locateAlpnIdentifier(identifier: UnsafeBufferPointer) -> (index: Int, length: Int)? { precondition(identifier.count != 0) let targetLength = Int(identifier[0]) var index = 0 outerLoop: while index < self.count { let length = Int(self[index]) guard index + length + 1 <= self.count else { // Invalid length of ALPN identifier, no match. return nil } guard targetLength == length else { index += length + 1 continue outerLoop } for innerIndex in 1...length { guard identifier[innerIndex] == self[index + innerIndex] else { index += length + 1 continue outerLoop } } // Found it return (index: index + 1, length: length) } return nil } } private func alpnCallback( ssl: OpaquePointer?, out: UnsafeMutablePointer?>?, outlen: UnsafeMutablePointer?, in: UnsafePointer?, inlen: UInt32, appData: UnsafeMutableRawPointer? ) -> CInt { // Perform some soundness checks. We don't want NULL pointers around here. guard let ssl = ssl, let out = out, let outlen = outlen, let `in` = `in` else { return SSL_TLSEXT_ERR_NOACK } // We want to take the SSL pointer and extract the parent Swift object. let parentSwiftContext = NIOSSLContext.lookupFromRawContext(ssl: ssl) let offeredProtocols = UnsafeBufferPointer(start: `in`, count: Int(inlen)) guard let (index, length) = parentSwiftContext.alpnSelectCallback(offeredProtocols: offeredProtocols) else { out.pointee = nil outlen.pointee = 0 return SSL_TLSEXT_ERR_NOACK } out.pointee = `in` + index outlen.pointee = UInt8(length) return SSL_TLSEXT_ERR_OK } /// PSK Callback for the server side context. private func serverPSKCallback( ssl: OpaquePointer?, identity: UnsafePointer?, psk: UnsafeMutablePointer?, max_psk_len: UInt32 ) -> UInt32 { guard let ssl = ssl else { return 0 } // Initial implementation only supports TLS 1.2 due API support exposed from BoringSSL. // TODO (meaton) add TLS 1.3 support when available. let parentSwiftContext = NIOSSLContext.lookupFromRawContext(ssl: ssl) guard let serverCallback = parentSwiftContext.pskServerConfigurationCallback, let unwrappedIdentity = identity, // Incoming identity let strIdentity = String(validatingCString: unwrappedIdentity), let outputPSK = psk // Output PSK key. else { return 0 } // Take the hint and the possible identity and pass them down to the callback to get associated PSK from callback var identityResponse: PSKServerIdentityResponse? = nil switch serverCallback { case .callback(let callback): // Deprecated callback doesn't accept optional hint value guard let hint = parentSwiftContext.configuration.pskHint else { return 0 } identityResponse = try? callback(hint, strIdentity) case .provider(let provider): identityResponse = try? provider( PSKServerContext( hint: parentSwiftContext.configuration.pskHint, clientIdentity: strIdentity, maxPSKLength: Int(max_psk_len) ) ) } guard let identityResponse else { return 0 } let serverPSK = identityResponse.key // From the callback // Make sure the key is returned by the callback and it is of proper length, otherwise, fail. if serverPSK.isEmpty || serverPSK.count > max_psk_len { return 0 } let _ = serverPSK.withUnsafeBytes { (body: UnsafeRawBufferPointer) -> Void in memcpy(outputPSK, body.baseAddress!, body.count) } return UInt32(serverPSK.count) } /// PSK Callback for the client side context. private func clientPSKCallback( ssl: OpaquePointer?, hint: UnsafePointer?, identity: UnsafeMutablePointer?, max_identity_len: UInt32, psk: UnsafeMutablePointer?, max_psk_len: UInt32 ) -> UInt32 { guard let ssl = ssl else { return 0 } let parentSwiftContext = NIOSSLContext.lookupFromRawContext(ssl: ssl) guard let clientCallback = parentSwiftContext.pskClientConfigurationCallback, let unwrappedIdentity = identity, // Output identity that will be later be mapped from client callback let outputPSK = psk // Output PSK key that will later be mapped from client callback else { return 0 } // If set, build out a hint otherwise fallback to an empty string and pass it into the client callback. let clientHint: String? = hint.flatMap({ String(validatingCString: $0) }) // Take the hint and pass it down to the callback to get associated PSK from callback let pskIdentity: PSKClientIdentityResponse? switch clientCallback { case .callback(let callback): // Deprecated callback doesn't accept optional hint value guard let clientHint else { return 0 } pskIdentity = try? callback(clientHint) case .provider(let provider): pskIdentity = try? provider( PSKClientContext( hint: clientHint, maxPSKLength: Int(max_psk_len) ) ) } guard let pskIdentity else { return 0 } let clientPSK = pskIdentity.key // Key from the callback let clientIdentity = pskIdentity.identity // Use max_identity_len so it does not trigger an overrun. if clientIdentity.utf8.isEmpty || clientIdentity.utf8.count > max_identity_len { return 0 } // Map the output identity from the one passed back from the callback. // This helps populate the server callback for the key exchange. let _ = clientIdentity.withCString { ptr in memcpy(unwrappedIdentity, ptr, clientIdentity.utf8.count) } if clientPSK.isEmpty || clientPSK.count > max_psk_len { return 0 } let _ = clientPSK.withUnsafeBytes { (body: UnsafeRawBufferPointer) -> Void in memcpy(outputPSK, body.baseAddress!, body.count) } return UInt32(clientPSK.count) } private func sslContextCallback(ssl: OpaquePointer?, arg: UnsafeMutableRawPointer?) -> Int32 { guard let ssl = ssl else { preconditionFailure( """ SSL_CTX_set_cert_cb was executed with an invalid ssl pointer. This should not be possible, please file an issue. """ ) } let parentSwiftContext = SSLConnection.loadConnectionFromSSL(ssl) // This is a safe force unwrap as this callback is only register directly after setting the manager var contextManager = parentSwiftContext.customContextManager! // Begin loading a new context let result = contextManager.loadContext(ssl: ssl) switch result { case .none: // If we dont have a result yet then we must suspend return -1 case .failure: // If loading a context failed then we must signal as such return 0 case .success(let changes): do { // Attempt to load the new certificate chain and abort on failure let ssl = SSLConnection.loadConnectionFromSSL(ssl) try ssl.applyOverride(changes) // We must return 1 to signal a successful load of the new context return 1 } catch { // Althought the load was successful, the context changes failed and we must mark as such contextManager.setLoadContextError(error) return 0 } } } /// A wrapper class that encapsulates BoringSSL's `SSL_CTX *` object. /// /// This object is thread-safe and can be shared across TLS connections in your application. Even if the connections /// are associated with `Channel`s from different `EventLoop`s. /// /// > Note: Creating a ``NIOSSLContext`` is a very expensive operation because BoringSSL will usually need to load and /// parse large number of certificates from the system trust store. Therefore, creating a /// ``NIOSSLContext`` will likely allocate many thousand times and will also _perform blocking disk I/O_. /// /// > Warning: Avoid creating ``NIOSSLContext``s on any `EventLoop` because it does _blocking disk I/O_. public final class NIOSSLContext { fileprivate let sslContext: OpaquePointer private let callbackManager: CallbackManagerProtocol? private var keyLogManager: KeyLogCallbackManager? internal var pskClientConfigurationCallback: _NIOPSKClientIdentityProvider? internal var pskServerConfigurationCallback: _NIOPSKServerIdentityProvider? public let configuration: TLSConfiguration /// Initialize a context that will create multiple connections, all with the same /// configuration. internal init( configuration: TLSConfiguration, callbackManager: CallbackManagerProtocol? ) throws { guard boringSSLIsInitialized else { fatalError("Failed to initialize BoringSSL") } guard let context = CNIOBoringSSL_SSL_CTX_new(CNIOBoringSSL_TLS_method()) else { fatalError("Failed to create new BoringSSL context") } let minTLSVersion: CInt switch configuration.minimumTLSVersion { case .tlsv13: minTLSVersion = TLS1_3_VERSION case .tlsv12: minTLSVersion = TLS1_2_VERSION case .tlsv11: minTLSVersion = TLS1_1_VERSION case .tlsv1: minTLSVersion = TLS1_VERSION } var returnCode = CNIOBoringSSL_SSL_CTX_set_min_proto_version(context, UInt16(minTLSVersion)) precondition(1 == returnCode) let maxTLSVersion: CInt switch configuration.maximumTLSVersion { case .some(.tlsv1): maxTLSVersion = TLS1_VERSION case .some(.tlsv11): maxTLSVersion = TLS1_1_VERSION case .some(.tlsv12): maxTLSVersion = TLS1_2_VERSION case .some(.tlsv13), .none: // Unset defaults to TLS1.3 for now. BoringSSL's default is TLS 1.2. maxTLSVersion = TLS1_3_VERSION } returnCode = CNIOBoringSSL_SSL_CTX_set_max_proto_version(context, UInt16(maxTLSVersion)) precondition(1 == returnCode) // Cipher suites. We just pass this straight to BoringSSL. returnCode = CNIOBoringSSL_SSL_CTX_set_cipher_list(context, configuration.cipherSuites) precondition(1 == returnCode) // Curves list. if let curves = configuration.curves { returnCode = curves .map { $0.rawValue } .withUnsafeBufferPointer { algo in CNIOBoringSSL_SSL_CTX_set1_group_ids(context, algo.baseAddress, algo.count) } if returnCode != 1 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } } // Set the PSK Client Configuration callback. if let pskClientConfigurationsCallback = configuration._pskClientIdentityProvider { self.pskClientConfigurationCallback = pskClientConfigurationsCallback CNIOBoringSSL_SSL_CTX_set_psk_client_callback(context, clientPSKCallback) } // Set the PSK Server Configuration callback. if let pskServerConfigurationCallback = configuration._pskServerIdentityProvider { self.pskServerConfigurationCallback = pskServerConfigurationCallback CNIOBoringSSL_SSL_CTX_set_psk_server_callback(context, serverPSKCallback) } // Set the SSL Context Configuration callback. // The state is managed on the connection. if configuration.sslContextCallback != nil { CNIOBoringSSL_SSL_CTX_set_cert_cb(context, sslContextCallback, nil) } // Set the hint no matter if it is client or server side. if let pskHint = configuration.pskHint { CNIOBoringSSL_SSL_CTX_use_psk_identity_hint(context, pskHint) } // On non-Linux platforms, when using the platform default trust roots, we make use of a // custom verify callback. If we have also been presented with additional trust roots of // type `.file`, we take the opportunity now to load them in memory to avoid doing so // repeatedly on the request path. // // However, to avoid closely coupling this code with other parts (e.g. the platform-specific // concerns, and the defaulting of `trustRoots` to `.default` when `nil`), we unilaterally // convert any `additionalTrustRoots` of type `.file` to `.certificates`. var configuration = configuration configuration.additionalTrustRoots = try configuration.additionalTrustRoots.map { trustRoots in switch trustRoots { case .file(let path): return .certificates(try NIOSSLCertificate.fromPEMFile(path)) default: return trustRoots } } // Configure certificate validation try NIOSSLContext.configureCertificateValidation( context: context, verification: configuration.certificateVerification, trustRoots: configuration.trustRoots, additionalTrustRoots: configuration.additionalTrustRoots, sendCANames: configuration.sendCANameList ) // Configure verification algorithms if let verifySignatureAlgorithms = configuration.verifySignatureAlgorithms { returnCode = verifySignatureAlgorithms .map { $0.rawValue } .withUnsafeBufferPointer { algo in CNIOBoringSSL_SSL_CTX_set_verify_algorithm_prefs(context, algo.baseAddress, algo.count) } if returnCode != 1 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } } // Configure signing algorithms if let signingSignatureAlgorithms = configuration.resolvedSigningSignatureAlgorithms { returnCode = signingSignatureAlgorithms .map { $0.rawValue } .withUnsafeBufferPointer { algo in CNIOBoringSSL_SSL_CTX_set_signing_algorithm_prefs(context, algo.baseAddress, algo.count) } if returnCode != 1 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } } // If we were given a certificate chain to use, load it and its associated private key. Before // we do, set up a passphrase callback if we need to. if let callbackManager = callbackManager { CNIOBoringSSL_SSL_CTX_set_default_passwd_cb( context, { globalBoringSSLPassphraseCallback(buf: $0, size: $1, rwflag: $2, u: $3) } ) CNIOBoringSSL_SSL_CTX_set_default_passwd_cb_userdata( context, Unmanaged.passUnretained(callbackManager as AnyObject).toOpaque() ) } let handle = UnsafeKeyAndChainTarget.sslContext(context) try handle.useCertificateChain(configuration.certificateChain) if let pkey = configuration.privateKey { try handle.usePrivateKeySource(pkey) } if configuration.encodedApplicationProtocols.count > 0 { try NIOSSLContext.setAlpnProtocols(configuration.encodedApplicationProtocols, context: context) NIOSSLContext.setAlpnCallback(context: context) } // Add a key log callback. if let keyLogCallback = configuration.keyLogCallback { self.keyLogManager = KeyLogCallbackManager(callback: keyLogCallback) try NIOSSLContext.setKeylogCallback(context: context) } else { self.keyLogManager = nil } self.sslContext = context self.configuration = configuration self.callbackManager = callbackManager // Always make it possible to get from an SSL_CTX structure back to this. let ptrToSelf = Unmanaged.passUnretained(self).toOpaque() CNIOBoringSSLShims_SSL_CTX_set_app_data(context, ptrToSelf) } /// Initialize a context that will create multiple connections, all with the same /// configuration. /// /// - Note: Creating a ``NIOSSLContext`` is a very expensive operation because BoringSSL will usually need to load and /// parse large number of certificates from the system trust store. Therefore, creating a /// ``NIOSSLContext`` will likely allocate many thousand times and will also _perform blocking disk I/O_. /// /// - Warning: Avoid creating ``NIOSSLContext``s on any `EventLoop` because it does _blocking disk I/O_. public convenience init(configuration: TLSConfiguration) throws { try self.init(configuration: configuration, callbackManager: nil) } /// Initialize a context that will create multiple connections, all with the same /// configuration, along with a callback that will be called when needed to decrypt any /// encrypted private keys. /// /// - Note: Creating a ``NIOSSLContext`` is a very expensive operation because BoringSSL will usually need to load and /// parse large number of certificates from the system trust store. Therefore, creating a /// ``NIOSSLContext`` will likely allocate many thousand times and will also _perform blocking disk I/O_. /// /// - Warning: Avoid creating ``NIOSSLContext``s on any `EventLoop` because it does _blocking disk I/O_. /// /// - parameters: /// - configuration: The ``TLSConfiguration`` to use for all the connections with this /// ``NIOSSLContext``. /// - passphraseCallback: The callback to use to decrypt any private keys used by this /// ``NIOSSLContext``. For more details on this parameter see the documentation for /// ``NIOSSLPassphraseCallback``. public convenience init( configuration: TLSConfiguration, passphraseCallback: @escaping NIOSSLPassphraseCallback ) throws where T.Element == UInt8 { let manager = BoringSSLPassphraseCallbackManager(userCallback: passphraseCallback) try self.init(configuration: configuration, callbackManager: manager) } /// Create a new connection object with the configuration from this /// context. internal func createConnection() -> SSLConnection? { guard let ssl = CNIOBoringSSL_SSL_new(self.sslContext) else { return nil } let conn = SSLConnection(ownedSSL: ssl, parentContext: self) // If we need to turn on the validation on Apple platforms, do it here. #if canImport(Darwin) switch self.configuration.trustRoots { case .some(.default), .none: conn.setCustomVerificationCallback( CustomVerifyManager(callback: { do { conn.performSecurityFrameworkValidation( promise: $0, peerCertificates: try conn.getPeerCertificatesAsSecCertificate() ) } catch { $0.fail(error) } }) ) case .some(.certificates), .some(.file): break } #endif return conn } fileprivate func alpnSelectCallback(offeredProtocols: UnsafeBufferPointer) -> (index: Int, length: Int)? { for possibility in configuration.encodedApplicationProtocols { let match = possibility.withUnsafeBufferPointer { offeredProtocols.locateAlpnIdentifier(identifier: $0) } if match != nil { return match } } return nil } deinit { CNIOBoringSSL_SSL_CTX_free(self.sslContext) } } // NIOSSLContext is thread-safe and therefore Sendable extension NIOSSLContext: @unchecked Sendable {} extension NIOSSLContext { fileprivate static func lookupFromRawContext(ssl: OpaquePointer) -> NIOSSLContext { // We want to take the SSL pointer and extract the parent Swift object. These force-unwraps are for // safety: a correct NIO program can never fail to set these pointers, and if it does failing loudly is // more useful than failing quietly. let parentCtx = CNIOBoringSSL_SSL_get_SSL_CTX(ssl)! let parentPtr = CNIOBoringSSLShims_SSL_CTX_get_app_data(parentCtx)! let parentSwiftContext: NIOSSLContext = Unmanaged.fromOpaque(parentPtr).takeUnretainedValue() return parentSwiftContext } } extension NIOSSLContext { private static func setAlpnProtocols(_ protocols: [[UInt8]], context: OpaquePointer) throws { // This copy should be done infrequently, so we don't worry too much about it. let protoBuf = protocols.reduce([UInt8](), +) let rc = protoBuf.withUnsafeBufferPointer { CNIOBoringSSL_SSL_CTX_set_alpn_protos(context, $0.baseAddress!, $0.count) } // Annoyingly this function reverses the error convention: 0 is success, non-zero is failure. if rc != 0 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.failedToSetALPN(errorStack) } } private static func setAlpnCallback(context: OpaquePointer) { // This extra closure here is very silly, but it exists to allow us to avoid writing down the type of the first // argument. Combined with the helper above, the compiler will be able to solve its way to success here. CNIOBoringSSL_SSL_CTX_set_alpn_select_cb( context, { alpnCallback(ssl: $0, out: $1, outlen: $2, in: $3, inlen: $4, appData: $5) }, nil ) } } // Configuring certificate verification extension NIOSSLContext { fileprivate enum VerificationMode { case peerCertificateRequired case peerCertificatesOptional } fileprivate static func setupVerification( _ context: OpaquePointer, _ sendCANames: Bool, _ trustRoots: NIOSSLTrustRoots?, _ additionalTrustRoots: [NIOSSLAdditionalTrustRoots], _ verificationMode: VerificationMode ) throws { switch verificationMode { case .peerCertificateRequired: CNIOBoringSSL_SSL_CTX_set_verify(context, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nil) case .peerCertificatesOptional: CNIOBoringSSL_SSL_CTX_set_verify(context, SSL_VERIFY_PEER, nil) } // Also, set TRUSTED_FIRST to work around dumb clients that don't know what they're doing and send // untrusted root certs. X509_VERIFY_PARAM will or-in the flags, so we don't need to load them first. // This is get0 so we can just ignore the pointer, we don't have an owned ref. let trustParams = CNIOBoringSSL_SSL_CTX_get0_param(context)! CNIOBoringSSL_X509_VERIFY_PARAM_set_flags(trustParams, CUnsignedLong(X509_V_FLAG_TRUSTED_FIRST)) func configureTrustRoots(trustRoots: NIOSSLTrustRoots) throws { switch trustRoots { case .default: try NIOSSLContext.platformDefaultConfiguration(context: context) case .file(let path): try NIOSSLContext.loadVerifyLocations(path, context: context, sendCANames: sendCANames) case .certificates(let certs): for cert in certs { try NIOSSLContext.addRootCertificate(cert, context: context) // Add the CA name from the trust root if sendCANames { try NIOSSLContext.addCACertificateNameToList(context: context, certificate: cert) } } } } try configureTrustRoots(trustRoots: trustRoots ?? .default) for root in additionalTrustRoots { try configureTrustRoots(trustRoots: .init(from: root)) } } private static func configureCertificateValidation( context: OpaquePointer, verification: CertificateVerification, trustRoots: NIOSSLTrustRoots?, additionalTrustRoots: [NIOSSLAdditionalTrustRoots], sendCANames: Bool ) throws { // If validation is turned on, set the trust roots and turn on cert validation. switch verification { case .fullVerification, .noHostnameVerification: try setupVerification(context, sendCANames, trustRoots, additionalTrustRoots, .peerCertificateRequired) case .none(let opts): if opts.validatePresentedCertificates { try setupVerification(context, sendCANames, trustRoots, additionalTrustRoots, .peerCertificatesOptional) } } } private static func addCACertificateNameToList(context: OpaquePointer, certificate: NIOSSLCertificate) throws { // Adds the CA name extracted from cert to the list of CAs sent to the client when requesting a client certificate. try certificate.withUnsafeMutableX509Pointer { ref in guard 1 == CNIOBoringSSL_SSL_CTX_add_client_CA(context, ref) else { throw NIOSSLError.failedToLoadCertificate } } } private static func loadVerifyLocations(_ path: String, context: OpaquePointer, sendCANames: Bool) throws { let isDirectory: Bool switch FileSystemObject.pathType(path: path) { case .some(.directory): isDirectory = true case .some(.file): isDirectory = false case .none: throw NIOSSLError.noSuchFilesystemObject } let result = path.withCString { (pointer) -> CInt in let file = !isDirectory ? pointer : nil let directory = isDirectory ? pointer : nil return CNIOBoringSSL_SSL_CTX_load_verify_locations(context, file, directory) } if result == 0 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } else if sendCANames, !isDirectory { // For single CA file, add the CA name from the trust root. // This could be from a location like /etc/ssl/cert.pem as an example. CNIOBoringSSL_SSL_CTX_set_client_CA_list(context, CNIOBoringSSL_SSL_load_client_CA_file(path)) } else if sendCANames, isDirectory { // Match the c_rehash directory format and load the certificate based on this criteria. let certificateFilePaths = try DirectoryContents(path: path).filter { try self._isRehashFormat(path: $0) } // Load only the certificates that resolve to an existing certificate in the directory. for symPath in certificateFilePaths { // c_rehash only support pem files. let cert = try NIOSSLCertificate(_file: symPath, format: .pem) try addCACertificateNameToList(context: context, certificate: cert) } } } private static func addRootCertificate(_ cert: NIOSSLCertificate, context: OpaquePointer) throws { let store = CNIOBoringSSL_SSL_CTX_get_cert_store(context)! let rc = cert.withUnsafeMutableX509Pointer { ref in CNIOBoringSSL_X509_STORE_add_cert(store, ref) } if 0 == rc { throw NIOSSLError.failedToLoadCertificate } } private static func platformDefaultConfiguration(context: OpaquePointer) throws { // Platform default trust is configured differently in different places. // On Linux, we use our searched heuristics to guess about where the platform trust store is. // On Darwin, we use a custom callback that is set later, in createConnection #if os(Linux) let result = rootCAFilePath.withCString { rootCAFilePointer in rootCADirectoryPath.withCString { rootCADirectoryPointer in CNIOBoringSSL_SSL_CTX_load_verify_locations(context, rootCAFilePointer, rootCADirectoryPointer) } } if result == 0 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } #elseif os(Android) let result = rootCADirectoryPath.withCString { rootCADirectoryPointer in CNIOBoringSSL_SSL_CTX_load_verify_locations(context, nil, rootCADirectoryPointer) } if result == 0 { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } #endif } private static func setKeylogCallback(context: OpaquePointer) throws { CNIOBoringSSL_SSL_CTX_set_keylog_callback(context) { (ssl, linePointer) in guard let ssl = ssl, let linePointer = linePointer else { return } let parentSwiftContext = NIOSSLContext.lookupFromRawContext(ssl: ssl) // Similarly, this force-unwrap is safe because a correct NIO program can never fail to unwrap this entry // either. parentSwiftContext.keyLogManager!.log(linePointer) } } /// Takes a path and determines if the file at this path is of c_rehash format . internal static func _isRehashFormat(path: String) throws -> Bool { // Check if the element’s name matches the c_rehash symlink name format. // The links created are of the form HHHHHHHH.D, where each H is a hexadecimal character and D is a single decimal digit. let utf8PathView = path.utf8 let utf8PathSplitView = utf8PathView.split(separator: UInt8(ascii: "/")) // Make sure the path is at least 10 units long guard let lastPathComponent = utf8PathSplitView.last, lastPathComponent.count == 10 else { return false } // Split into filename parts HHHHHHHH.D -> [[HHHHHHHH], [D]] let filenameParts = lastPathComponent.split(separator: UInt8(ascii: ".")) // Double check that the extension did not fail to cast to an integer. // Make sure that the filename is an 8 character hex based file name. guard filenameParts.count == 2, let filename = filenameParts.first, let fileExtension = filenameParts.last, fileExtension.count == 1, filename.count == 8, filename.allSatisfy({ $0.isHexDigit }), fileExtension.first == UInt8(ascii: "0") else { return false } // Check if the element is a symlink. If it is not, return false. var buffer = stat() let _ = try Posix.lstat(path: path, buf: &buffer) // Check the mode to make sure this is a symlink #if os(Android) && arch(arm) if (buffer.st_mode & UInt32(S_IFMT)) != UInt32(S_IFLNK) { return false } #else if (buffer.st_mode & S_IFMT) != S_IFLNK { return false } #endif // Return true at this point because the file format is considered to be in rehash format and a symlink. // Rehash format being "%08lx.%d" or HHHHHHHH.D return true } } extension NIOSSLContext { /// Exposes the CA Name list count from BoringSSL's STACK_OF(X509_NAME) func getX509NameListCount() -> Int { guard let caNameList = CNIOBoringSSL_SSL_CTX_get_client_CA_list(self.sslContext) else { return 0 } return CNIOBoringSSL_sk_X509_NAME_num(caNameList) } } // For accessing STACK_OF(SSL_CIPHER) from a SSLContext extension NIOSSLContext { /// A collection of buffers representing a STACK_OF(SSL_CIPHER) struct NIOTLSCipherBuffers { private let basePointer: OpaquePointer fileprivate init(basePointer: OpaquePointer) { self.basePointer = basePointer } } /// Invokes a block with a collection of pointers to STACK_OF(SSL_CIPHER). /// /// The pointers are only guaranteed to be valid for the duration of this call. This method aligns with the RandomAccessCollection protocol /// to access UInt16 pointers at a specific index. This pointer is used to safely access id values of the cipher to create a new NIOTLSCipher. fileprivate func withStackOfCipherSuiteBuffers( _ body: (NIOTLSCipherBuffers?) throws -> Result ) rethrows -> Result { guard let stackPointer = CNIOBoringSSL_SSL_CTX_get_ciphers(self.sslContext) else { return try body(nil) } return try body(NIOTLSCipherBuffers(basePointer: stackPointer)) } /// Access cipher suites applied to the context internal var cipherSuites: [NIOTLSCipher] { self.withStackOfCipherSuiteBuffers { buffers in guard let buffers = buffers else { return [] } return Array(buffers) } } } extension NIOSSLContext.NIOTLSCipherBuffers: RandomAccessCollection { struct Index: Hashable, Comparable, Strideable { typealias Stride = Int fileprivate var index: Int fileprivate init(_ index: Int) { self.index = index } static func < (lhs: Index, rhs: Index) -> Bool { lhs.index < rhs.index } func advanced(by n: NIOSSLContext.NIOTLSCipherBuffers.Index.Stride) -> NIOSSLContext.NIOTLSCipherBuffers.Index { var result = self result.index += n return result } func distance( to other: NIOSSLContext.NIOTLSCipherBuffers.Index ) -> NIOSSLContext.NIOTLSCipherBuffers.Index.Stride { other.index - self.index } } typealias Element = NIOTLSCipher var startIndex: Index { Index(0) } var endIndex: Index { Index(self.count) } var count: Int { CNIOBoringSSL_sk_SSL_CIPHER_num(self.basePointer) } subscript(position: Index) -> NIOTLSCipher { precondition(position < self.endIndex) precondition(position >= self.startIndex) guard let ptr = CNIOBoringSSL_sk_SSL_CIPHER_value(self.basePointer, position.index) else { preconditionFailure("Unable to locate backing pointer.") } let cipherID = CNIOBoringSSL_SSL_CIPHER_get_protocol_id(ptr) return NIOTLSCipher(cipherID) } } extension Optional where Wrapped == String { internal func withCString(_ body: (UnsafePointer?) throws -> Result) rethrows -> Result { switch self { case .some(let s): return try s.withCString({ try body($0) }) case .none: return try body(nil) } } } internal class DirectoryContents: Sequence, IteratorProtocol { typealias Element = String let path: String // Used to account between the differences of DIR being defined on Darwin. // Otherwise an OpaquePointer needs to be used to account for the non-defined type in glibc. #if canImport(Darwin) let dir: UnsafeMutablePointer #else let dir: OpaquePointer #endif init(path: String) { self.path = path self.dir = opendir(path)! } func next() -> String? { if let dirent: UnsafeMutablePointer = readdir(self.dir) { let name = withUnsafePointer(to: &dirent.pointee.d_name) { (ptr) -> String in // Pointers to homogeneous tuples in Swift are always bound to both the tuple type and the element type, // so the assumption below is safe. let elementPointer = UnsafeRawPointer(ptr).assumingMemoryBound(to: CChar.self) return String(cString: elementPointer) } return self.path + name } return nil } deinit { closedir(dir) } } // Used as part of the `_isRehashFormat` format to determine if the filename is a hexadecimal filename. extension UTF8.CodeUnit { private static let asciiZero = UInt8(ascii: "0") private static let asciiNine = UInt8(ascii: "9") private static let asciiLowercaseA = UInt8(ascii: "a") private static let asciiLowercaseF = UInt8(ascii: "f") private static let asciiUppercaseA = UInt8(ascii: "A") private static let asciiUppercaseF = UInt8(ascii: "F") var isHexDigit: Bool { switch self { case (.asciiZero)...(.asciiNine), (.asciiLowercaseA)...(.asciiLowercaseF), (.asciiUppercaseA)...(.asciiUppercaseF): return true default: return false } } } ================================================ FILE: Sources/NIOSSL/SSLErrors.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore /// Wraps a single error from BoringSSL. public struct BoringSSLInternalError: Equatable, CustomStringConvertible, Sendable { private enum Backing: Hashable { case boringSSLErrorInfo(UInt32, String, UInt) case synthetic(String) } private var backing: Backing private var errorMessage: String? { switch self.backing { case .boringSSLErrorInfo(let errorCode, let filepath, let line): // TODO(cory): This should become non-optional in the future, as it always succeeds. var scratchBuffer = [CChar](repeating: 0, count: 512) return scratchBuffer.withUnsafeMutableBufferPointer { pointer in CNIOBoringSSL_ERR_error_string_n(errorCode, pointer.baseAddress!, pointer.count) let errorString = String(cString: pointer.baseAddress!) return "\(errorString) at \(filepath):\(line)" } case .synthetic(let description): return description } } private var errorCode: String { switch self.backing { case .boringSSLErrorInfo(let code, _, _): return String(code, radix: 10) case .synthetic: return "" } } public var description: String { "Error: \(errorCode) \(errorMessage ?? "")" } init(errorCode: UInt32, filename: String, line: UInt) { self.backing = .boringSSLErrorInfo(errorCode, filename, line) } private init(syntheticErrorDescription description: String) { self.backing = .synthetic(description) } /// Received EOF during the TLS handshake. public static let eofDuringHandshake = Self(syntheticErrorDescription: "EOF during handshake") /// Received EOF during additional certificate chain verification. public static let eofDuringAdditionalCertficiateChainValidation = Self( syntheticErrorDescription: "EOF during addition certificate chain validation" ) } /// A representation of BoringSSL's internal error stack: a list of BoringSSL errors. public typealias NIOBoringSSLErrorStack = [BoringSSLInternalError] /// Errors that can be raised by NIO's BoringSSL wrapper. public enum NIOSSLError: Error { case writeDuringTLSShutdown @available(*, deprecated, message: "unableToAllocateBoringSSLObject can no longer be thrown") case unableToAllocateBoringSSLObject case noSuchFilesystemObject case failedToLoadCertificate case failedToLoadPrivateKey case handshakeFailed(BoringSSLError) case shutdownFailed(BoringSSLError) case cannotMatchULabel case noCertificateToValidate case unableToValidateCertificate case cannotFindPeerIP case readInInvalidTLSState case uncleanShutdown } extension NIOSSLError: Equatable {} /// Closing the TLS channel cleanly timed out, so it was closed uncleanly. public struct NIOSSLCloseTimedOutError: Error {} /// An enum that wraps individual BoringSSL errors directly. public enum BoringSSLError: Error { case noError case zeroReturn case wantRead case wantWrite case wantConnect case wantAccept case wantX509Lookup case wantCertificateVerify case syscallError case sslError(NIOBoringSSLErrorStack) case unknownError(NIOBoringSSLErrorStack) case invalidSNIName(NIOBoringSSLErrorStack) case failedToSetALPN(NIOBoringSSLErrorStack) } extension BoringSSLError: Equatable {} extension BoringSSLError { static func fromSSLGetErrorResult(_ result: CInt) -> BoringSSLError? { switch result { case SSL_ERROR_NONE: return .noError case SSL_ERROR_ZERO_RETURN: return .zeroReturn case SSL_ERROR_WANT_READ: return .wantRead case SSL_ERROR_WANT_WRITE: return .wantWrite case SSL_ERROR_WANT_CONNECT: return .wantConnect case SSL_ERROR_WANT_ACCEPT: return .wantAccept case SSL_ERROR_WANT_CERTIFICATE_VERIFY: return .wantCertificateVerify case SSL_ERROR_WANT_X509_LOOKUP: return .wantX509Lookup case SSL_ERROR_SYSCALL: return .syscallError case SSL_ERROR_WANT_PRIVATE_KEY_OPERATION: // This is a terrible hack: we can't add cases to this enum, so we can't represent // this directly. In all cases this should be the same as wantCertificateVerify, so we'll just use that. return .wantCertificateVerify case SSL_ERROR_SSL: return .sslError(buildErrorStack()) default: return .unknownError(buildErrorStack()) } } static func buildErrorStack() -> NIOBoringSSLErrorStack { var errorStack = NIOBoringSSLErrorStack() while true { var file: UnsafePointer? = nil var line: CInt = 0 let errorCode = CNIOBoringSSL_ERR_get_error_line(&file, &line) if errorCode == 0 { break } let fileAsString = String(cString: file!) errorStack.append(BoringSSLInternalError(errorCode: errorCode, filename: fileAsString, line: UInt(line))) } return errorStack } } /// Represents errors that may occur while attempting to unwrap TLS from a connection. public enum NIOTLSUnwrappingError: Error { /// The TLS channel has already been closed, so it is not possible to unwrap it. case alreadyClosed /// The internal state of the handler is not able to process the unwrapping request. case invalidInternalState /// We were unwrapping the connection, but during the unwrap process a close call /// was made. This means the connection is now closed, not unwrapped. case closeRequestedDuringUnwrap /// This write was failed because the channel was unwrapped before it was flushed. case unflushedWriteOnUnwrap } /// This structure contains errors added to NIOSSL after the original ``NIOSSLError`` enum was /// shipped. This is an extensible error object that allows us to evolve it going forward. public struct NIOSSLExtraError: Error { private var baseError: NIOSSLExtraError.BaseError private var _description: String? private init(baseError: NIOSSLExtraError.BaseError, description: String?) { self.baseError = baseError self._description = description } } extension NIOSSLExtraError { private enum BaseError: Equatable { case failedToValidateHostname case serverHostnameImpossibleToMatch case cannotUseIPAddressInSNI case invalidSNIHostname case unknownPrivateKeyFileType case noForwardProgress } } extension NIOSSLExtraError { /// NIOSSL was unable to validate the hostname presented by the remote peer. public static let failedToValidateHostname = NIOSSLExtraError( baseError: .failedToValidateHostname, description: nil ) /// The server hostname provided by the user cannot match any names in the certificate due to containing invalid characters. public static let serverHostnameImpossibleToMatch = NIOSSLExtraError( baseError: .serverHostnameImpossibleToMatch, description: nil ) /// IP addresses may not be used in SNI. public static let cannotUseIPAddressInSNI = NIOSSLExtraError(baseError: .cannotUseIPAddressInSNI, description: nil) /// The SNI hostname requirements have not been met. /// /// - note: Should the provided SNI hostname be an IP address instead, ``cannotUseIPAddressInSNI`` is thrown instead /// of this error. /// /// Reasons a hostname might not meet the requirements: /// - hostname in UTF8 is more than 255 bytes /// - hostname is the empty string /// - hostname contains the `0` unicode scalar (which would be encoded as the `0` byte which is unsupported). public static let invalidSNIHostname = NIOSSLExtraError(baseError: .invalidSNIHostname, description: nil) /// The private key file for the TLS configuration has an unknown type. public static let unknownPrivateKeyFileType = NIOSSLExtraError( baseError: .unknownPrivateKeyFileType, description: nil ) /// No forward progress is being made. /// /// This can happen when the `NIOSSLHandler` is unbuffering actions and gets into a state where /// it would potentially spin loop indefinitely. static let noForwardProgress = NIOSSLExtraError(baseError: .noForwardProgress, description: nil) @inline(never) internal static func failedToValidateHostname(expectedName: String) -> NIOSSLExtraError { let description = "Couldn't find \(expectedName) in certificate from peer" return NIOSSLExtraError(baseError: .failedToValidateHostname, description: description) } @inline(never) internal static func serverHostnameImpossibleToMatch(hostname: String) -> NIOSSLExtraError { let description = "The server hostname \(hostname) cannot be matched due to containing non-DNS characters" return NIOSSLExtraError(baseError: .serverHostnameImpossibleToMatch, description: description) } @inline(never) internal static func cannotUseIPAddressInSNI(ipAddress: String) -> NIOSSLExtraError { let description = "IP addresses cannot validly be used for Server Name Indication, got \(ipAddress)" return NIOSSLExtraError(baseError: .cannotUseIPAddressInSNI, description: description) } @inline(never) internal static func unknownPrivateKeyFileType(path: String) -> NIOSSLExtraError { let description = "Unknown private key file type for \(path)" return NIOSSLExtraError(baseError: .unknownPrivateKeyFileType, description: description) } } extension NIOSSLExtraError: CustomStringConvertible { public var description: String { let formattedDescription = self._description.map { ": " + $0 } ?? "" return "NIOSSLExtraError.\(String(describing: self.baseError))\(formattedDescription)" } } extension NIOSSLExtraError: Equatable { public static func == (lhs: NIOSSLExtraError, rhs: NIOSSLExtraError) -> Bool { lhs.baseError == rhs.baseError } } ================================================ FILE: Sources/NIOSSL/SSLInit.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL /// Initialize BoringSSL. Note that this function IS NOT THREAD SAFE, and so must be called inside /// either an explicit or implicit dispatch_once. func initializeBoringSSL() -> Bool { CNIOBoringSSL_CRYPTO_library_init() return true } ================================================ FILE: Sources/NIOSSL/SSLPKCS12Bundle.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL /// A container for a single PKCS#12 bundle. /// /// PKCS#12 is a specification that defines an archive format for storing multiple /// cryptographic objects together in one file. Its most common usage, and the one /// that SwiftNIO is most interested in, is its use to bundle one or more X.509 /// certificates (``NIOSSLCertificate``) together with an associated private key /// (``NIOSSLPrivateKey``). /// /// ### Working with TLSConfiguration /// /// In many cases users will want to configure a ``TLSConfiguration`` with the data /// from a PKCS#12 bundle. This object assists in unpacking that bundle into its /// associated pieces. /// /// If you have a PKCS12 bundle, you configure a ``TLSConfiguration`` like this: /// /// let p12Bundle = NIOSSLPKCS12Bundle(file: pathToMyP12) /// let config = TLSConfiguration.makeServerConfiguration( /// certificateChain: p12Bundle.certificateChain, /// privateKey: p12Bundle.privateKey /// ) /// /// The created ``TLSConfiguration`` can then be safely used for your endpoint. public struct NIOSSLPKCS12Bundle: Hashable { /// The chain of ``NIOSSLCertificate`` objects in the PKCS#12 bundle. public let certificateChain: [NIOSSLCertificate] /// The ``NIOSSLPrivateKey`` object for the leaf certificate in the PKCS#12 bundle. public let privateKey: NIOSSLPrivateKey private init(ref: OpaquePointer, passphrase: Bytes?) throws where Bytes.Element == UInt8 { var pkey: OpaquePointer? = nil // var cert: OpaquePointer? = nil // var caCerts: OpaquePointer? = nil let rc = try passphrase.withSecureCString { passphrase in CNIOBoringSSL_PKCS12_parse(ref, passphrase, &pkey, &cert, &caCerts) } guard rc == 1 else { throw BoringSSLError.unknownError(BoringSSLError.buildErrorStack()) } // Successfully parsed, let's unpack. The key and cert are mandatory, // the ca stack is not. guard let actualCert = cert, let actualKey = pkey else { fatalError("Failed to obtain cert and pkey from a PKC12 file") } let certStackSize = caCerts.map { CNIOBoringSSL_sk_X509_num($0) } ?? 0 var certs = [NIOSSLCertificate]() certs.reserveCapacity(Int(certStackSize) + 1) certs.append(NIOSSLCertificate.fromUnsafePointer(takingOwnership: actualCert)) for idx in 0..(buffer: [UInt8], passphrase: Bytes?) throws where Bytes.Element == UInt8 { guard boringSSLIsInitialized else { fatalError("Failed to initialize BoringSSL") } let p12 = buffer.withUnsafeBytes { pointer -> OpaquePointer? in let bio = CNIOBoringSSL_BIO_new_mem_buf(pointer.baseAddress, pointer.count)! defer { CNIOBoringSSL_BIO_free(bio) } return CNIOBoringSSL_d2i_PKCS12_bio(bio, nil) } defer { p12.map { CNIOBoringSSL_PKCS12_free($0) } } if let p12 = p12 { try self.init(ref: p12, passphrase: passphrase) } else { throw BoringSSLError.unknownError(BoringSSLError.buildErrorStack()) } } /// Create a ``NIOSSLPKCS12Bundle`` from the given bytes on disk, /// optionally decrypting the bundle with the given passphrase. /// /// - parameters: /// - file: The path to the PKCS#12 bundle on disk. /// - passphrase: The passphrase used for the bundle, as a sequence of UTF-8 bytes. public init(file: String, passphrase: Bytes?) throws where Bytes.Element == UInt8 { guard boringSSLIsInitialized else { fatalError("Failed to initialize BoringSSL") } let fileObject = try Posix.fopen(file: file, mode: "rb") defer { fclose(fileObject) } let p12 = CNIOBoringSSL_d2i_PKCS12_fp(fileObject, nil) defer { p12.map(CNIOBoringSSL_PKCS12_free) } if let p12 = p12 { try self.init(ref: p12, passphrase: passphrase) } else { throw BoringSSLError.unknownError(BoringSSLError.buildErrorStack()) } } /// Create a ``NIOSSLPKCS12Bundle`` from the given bytes on disk, /// assuming it has no passphrase. /// /// If the bundle does have a passphrase, call ``init(file:passphrase:)`` instead. /// /// - parameters: /// - file: The path to the PKCS#12 bundle on disk. public init(file: String) throws { try self.init(file: file, passphrase: Optional<[UInt8]>.none) } /// Create a ``NIOSSLPKCS12Bundle`` from the given bytes in memory, /// assuming it has no passphrase. /// /// If the bundle does have a passphrase, call ``init(buffer:passphrase:)`` instead. /// /// - parameters: /// - buffer: The bytes of the PKCS#12 bundle. public init(buffer: [UInt8]) throws { try self.init(buffer: buffer, passphrase: Optional<[UInt8]>.none) } } extension NIOSSLPKCS12Bundle: Sendable {} extension NIOSSLPKCS12Bundle { /// Create a ``NIOSSLPKCS12Bundle`` from the given certificate chain and private key. /// This constructor is particularly useful to create a new PKCS#12 file: /// call ``serialize(passphrase:)`` to get the bytes making up the file. /// /// - parameters: /// - certificateChain: The chain of ``NIOSSLCertificate`` objects in the PKCS#12 bundle. /// - privateKey: The ``NIOSSLPrivateKey`` object for the leaf certificate in the PKCS#12 bundle. public init( certificateChain: [NIOSSLCertificate], privateKey: NIOSSLPrivateKey ) { self.certificateChain = certificateChain self.privateKey = privateKey } /// Serialize this bundle into a PKCS#12 file. /// /// The first certificate of the `certificateChain` array will be considered the "primary" certificate for /// this PKCS#12, and the bundle's`privateKey` must be its corresponding private key. /// The other certificates included in `certificates`, if any, will be considered as additional /// certificates in the certificate chain. /// /// - Parameters: /// - passphrase: The password with which to protect this PKCS#12 file. /// - Returns: An array of bytes making up the PKCS#12 file. public func serialize( passphrase: Bytes ) throws -> [UInt8] where Bytes.Element == UInt8 { guard let mainCertificate = self.certificateChain.first else { preconditionFailure("At least one certificate must be provided") } let certificateChainStack = CNIOBoringSSL_sk_X509_new(nil) defer { CNIOBoringSSL_sk_X509_pop_free(certificateChainStack, CNIOBoringSSL_X509_free) } for additionalCertificate in self.certificateChain.dropFirst() { let result = additionalCertificate.withUnsafeMutableX509Pointer { certificate in CNIOBoringSSL_X509_up_ref(certificate) return CNIOBoringSSL_sk_X509_push(certificateChainStack, certificate) } if result == 0 { fatalError("Failed to add certificate to chain") } } let pkcs12 = try passphrase.withSecureCString { passphrase in privateKey.withUnsafeMutableEVPPKEYPointer { privateKey in mainCertificate.withUnsafeMutableX509Pointer { certificate in CNIOBoringSSL_PKCS12_create( passphrase, nil, privateKey, certificate, certificateChainStack, 0, 0, 0, 0, 0 ) } } } defer { CNIOBoringSSL_PKCS12_free(pkcs12) } guard let bio = CNIOBoringSSL_BIO_new(CNIOBoringSSL_BIO_s_mem()) else { fatalError("Failed to malloc for a BIO handler") } defer { CNIOBoringSSL_BIO_free(bio) } let rc = CNIOBoringSSL_i2d_PKCS12_bio(bio, pkcs12) guard rc == 1 else { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } var dataPtr: UnsafeMutablePointer? = nil let length = CNIOBoringSSL_BIO_get_mem_data(bio, &dataPtr) guard let bytes = dataPtr.map({ UnsafeMutableRawBufferPointer(start: $0, count: length) }) else { fatalError("Failed to get bytes from private key") } return Array(bytes) } } extension Collection where Element == UInt8 { /// Provides a contiguous copy of the bytes of this collection in a heap-allocated /// memory region that is locked into memory (that is, which can never be backed by a file), /// and which will be scrubbed and freed after use, and which is null-terminated. /// /// This method should be used when it is necessary to take a secure copy of a collection of /// bytes. Its implementation relies on BoringSSL directly. func withSecureCString(_ block: (UnsafePointer) throws -> T) throws -> T { // We need to allocate some memory and prevent it being swapped to disk while we use it. // For that reason we use mlock. let bufferSize = Int(self.count) + 1 let bufferPtr = UnsafeMutableBufferPointer.allocate(capacity: bufferSize) defer { bufferPtr.deallocate() } try Posix.mlock(addr: bufferPtr.baseAddress!, len: bufferPtr.count) defer { // If munlock fails take out the process. try! Posix.munlock(addr: bufferPtr.baseAddress!, len: bufferPtr.count) } let (_, nextIndex) = bufferPtr.initialize(from: self) assert(nextIndex == (bufferPtr.endIndex - 1)) // Add a null terminator. bufferPtr[nextIndex] = 0 defer { // We use OpenSSL_cleanse here because the compiler can't optimize this away. // .initialize(repeating: 0) can be, and empirically is, optimized away, bzero // is deprecated, memset_s is not well supported cross-platform, and memset-to-zero // is famously easily optimised away. This is our best bet. CNIOBoringSSL_OPENSSL_cleanse(bufferPtr.baseAddress!, bufferPtr.count) bufferPtr.baseAddress!.deinitialize(count: bufferPtr.count) } // Ok, the memory is ready for use. Call the user. return try bufferPtr.withMemoryRebound(to: Int8.self) { try block($0.baseAddress!) } } } extension Optional where Wrapped: Collection, Wrapped.Element == UInt8 { func withSecureCString(_ block: (UnsafePointer?) throws -> T) throws -> T { if let `self` = self { return try self.withSecureCString({ try block($0) }) } else { return try block(nil) } } } ================================================ FILE: Sources/NIOSSL/SSLPrivateKey.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL /// An ``NIOSSLPassphraseCallback`` is a callback that will be invoked by NIOSSL when it needs to /// get access to a private key that is stored in encrypted form. /// /// This callback will be invoked with one argument, a non-escaping closure that must be called with the /// passphrase. Failing to call the closure will cause decryption to fail. /// /// The reason this design has been used is to allow you to secure any memory storing the passphrase after /// use. We guarantee that after the ``NIOSSLPassphraseSetter`` closure has been invoked the `Collection` /// you have passed in will no longer be needed by BoringSSL, and so you can safely destroy any memory it /// may be using if you need to. public typealias NIOSSLPassphraseCallback = (NIOSSLPassphraseSetter) throws -> Void where Bytes.Element == UInt8 /// An ``NIOSSLPassphraseSetter`` is a closure that you must invoke to provide a passphrase to BoringSSL. /// It will be provided to you when your ``NIOSSLPassphraseCallback`` is invoked. public typealias NIOSSLPassphraseSetter = (Bytes) -> Void where Bytes.Element == UInt8 /// An internal protocol that exists to let us avoid problems with generic types. /// /// The issue we have here is that we want to allow users to use whatever collection type suits them best to set /// the passphrase. For this reason, ``NIOSSLPassphraseSetter`` is a generic function, generic over the `Collection` /// protocol. However, that causes us an issue, because we need to stuff that callback into an /// ``BoringSSLPassphraseCallbackManager`` in order to create an `Unmanaged` and round-trip the pointer through C code. /// /// That makes ``BoringSSLPassphraseCallbackManager`` a generic object, and now we're in *real* trouble, becuase /// `Unmanaged` requires us to specify the *complete* type of the object we want to unwrap. In this case, we /// don't know it, because it's generic! /// /// Our way out is to note that while the class itself is generic, the only function we want to call in the /// ``globalBoringSSLPassphraseCallback`` is not. Thus, rather than try to hold the actual specific ``BoringSSLPassphraseManager``, /// we can hold it inside a protocol existential instead, so long as that protocol existential gives us the correct /// function to call. Hence: ``CallbackManagerProtocol``, a private protocol with a single conforming type. internal protocol CallbackManagerProtocol: AnyObject { func invoke(buffer: UnsafeMutableBufferPointer) -> CInt } /// This class exists primarily to work around the fact that Swift does not let us stuff /// a closure into an `Unmanaged`. Instead, we use this object to keep hold of it. final class BoringSSLPassphraseCallbackManager: CallbackManagerProtocol where Bytes.Element == UInt8 { private let userCallback: NIOSSLPassphraseCallback init(userCallback: @escaping NIOSSLPassphraseCallback) { // We have to type-erase this. self.userCallback = userCallback } func invoke(buffer: UnsafeMutableBufferPointer) -> CInt { var count: CInt = 0 do { try self.userCallback { passphraseBytes in // If we don't have enough space for the passphrase plus NUL, bail out. guard passphraseBytes.count < buffer.count else { return } _ = buffer.initialize(from: passphraseBytes.lazy.map { CChar($0) }) count = CInt(passphraseBytes.count) // We need to add a NUL terminator, in case the user did not. buffer[Int(passphraseBytes.count)] = 0 } } catch { // If we hit an error here, we just need to tolerate it. We'll return zero-length. count = 0 } return count } } /// Our global static BoringSSL passphrase callback. This is used as a thunk to dispatch out to /// the user-provided callback. func globalBoringSSLPassphraseCallback( buf: UnsafeMutablePointer?, size: CInt, rwflag: CInt, u: UnsafeMutableRawPointer? ) -> CInt { guard let buffer = buf, let userData = u else { preconditionFailure( "Invalid pointers passed to passphrase callback, buf: \(String(describing: buf)) u: \(String(describing: u))" ) } let bufferPointer = UnsafeMutableBufferPointer(start: buffer, count: Int(size)) guard let cbManager = Unmanaged.fromOpaque(userData).takeUnretainedValue() as? CallbackManagerProtocol else { preconditionFailure("Failed to pass object that can handle callback") } return cbManager.invoke(buffer: bufferPointer) } /// A reference to an BoringSSL private key object in the form of an `EVP_PKEY *`. /// /// This thin wrapper class allows us to use ARC to automatically manage /// the memory associated with this key. That ensures that BoringSSL /// will not free the underlying buffer until we are done with the key. /// /// This class also provides several convenience constructors that allow users /// to obtain an in-memory representation of a key from a buffer of /// bytes or from a file path. public final class NIOSSLPrivateKey { @usableFromInline internal enum Representation { case native(OpaquePointer) // case custom(AnyNIOSSLCustomPrivateKey) } @usableFromInline internal let representation: Representation internal func withUnsafeMutableEVPPKEYPointer( _ body: (OpaquePointer) throws -> ReturnType ) rethrows -> ReturnType { guard case .native(let pointer) = self.representation else { preconditionFailure() } return try body(pointer) } private init(withReference ref: OpaquePointer) { self.representation = .native(ref) } /// A delegating initializer for `init(file:format:passphraseCallback)` and `init(file:format:)`. private convenience init( file: String, format: NIOSSLSerializationFormats, callbackManager: CallbackManagerProtocol? ) throws { let fileObject = try Posix.fopen(file: file, mode: "rb") defer { // If fclose fails there is nothing we can do about it. _ = try? Posix.fclose(file: fileObject) } let key = withExtendedLifetime(callbackManager) { callbackManager -> OpaquePointer? in guard let bio = CNIOBoringSSL_BIO_new_fp(fileObject, BIO_NOCLOSE) else { return nil } defer { CNIOBoringSSL_BIO_free(bio) } switch format { case .pem: // This annoying conditional binding is used to work around the fact that I cannot pass // a variable to a function pointer argument. if let callbackManager = callbackManager { return CNIOBoringSSL_PEM_read_PrivateKey( fileObject, nil, { globalBoringSSLPassphraseCallback(buf: $0, size: $1, rwflag: $2, u: $3) }, Unmanaged.passUnretained(callbackManager as AnyObject).toOpaque() ) } else { return CNIOBoringSSL_PEM_read_PrivateKey(fileObject, nil, nil, nil) } case .der: return CNIOBoringSSL_d2i_PrivateKey_fp(fileObject, nil) } } if key == nil { throw NIOSSLError.failedToLoadPrivateKey } self.init(withReference: key!) } /// A delegating initializer for `init(buffer:format:passphraseCallback)` and `init(buffer:format:)`. private convenience init( bytes: [UInt8], format: NIOSSLSerializationFormats, callbackManager: CallbackManagerProtocol? ) throws { let ref = bytes.withUnsafeBytes { (ptr) -> OpaquePointer? in let bio = CNIOBoringSSL_BIO_new_mem_buf(ptr.baseAddress!, ptr.count)! defer { CNIOBoringSSL_BIO_free(bio) } return withExtendedLifetime(callbackManager) { callbackManager -> OpaquePointer? in switch format { case .pem: if let callbackManager = callbackManager { // This annoying conditional binding is used to work around the fact that I cannot pass // a variable to a function pointer argument. return CNIOBoringSSL_PEM_read_bio_PrivateKey( bio, nil, { globalBoringSSLPassphraseCallback(buf: $0, size: $1, rwflag: $2, u: $3) }, Unmanaged.passUnretained(callbackManager as AnyObject).toOpaque() ) } else { return CNIOBoringSSL_PEM_read_bio_PrivateKey(bio, nil, nil, nil) } case .der: return CNIOBoringSSL_d2i_PrivateKey_bio(bio, nil) } } } if ref == nil { throw NIOSSLError.failedToLoadPrivateKey } self.init(withReference: ref!) } /// Create a ``NIOSSLPrivateKey`` from a file at a given path in either PEM or /// DER format. /// /// - parameters: /// - file: The path to the file to load. /// - format: The format of the key to load, either DER or PEM. public convenience init(file: String, format: NIOSSLSerializationFormats) throws { try self.init(file: file, format: format, callbackManager: nil) } /// Create a ``NIOSSLPrivateKey`` from a file at a given path in either PEM or /// DER format, providing a passphrase callback. /// /// - parameters: /// - file: The path to the file to load. /// - format: The format of the key to load, either DER or PEM. /// - passphraseCallback: A callback to invoke to obtain the passphrase for /// encrypted keys. public convenience init( file: String, format: NIOSSLSerializationFormats, passphraseCallback: @escaping NIOSSLPassphraseCallback ) throws where T.Element == UInt8 { let manager = BoringSSLPassphraseCallbackManager(userCallback: passphraseCallback) try self.init(file: file, format: format, callbackManager: manager) } /// Create a ``NIOSSLPrivateKey`` from a buffer of bytes in either PEM or /// DER format. /// /// - parameters: /// - buffer: The key bytes. /// - format: The format of the key to load, either DER or PEM. /// - SeeAlso: ``NIOSSLPrivateKey/init(bytes:format:)`` @available(*, deprecated, renamed: "NIOSSLPrivateKey.init(bytes:format:)") public convenience init(buffer: [Int8], format: NIOSSLSerializationFormats) throws { try self.init(bytes: buffer.map(UInt8.init), format: format) } /// Create a ``NIOSSLPrivateKey`` from a buffer of bytes in either PEM or /// DER format. /// /// - parameters: /// - bytes: The key bytes. /// - format: The format of the key to load, either DER or PEM. public convenience init(bytes: [UInt8], format: NIOSSLSerializationFormats) throws { try self.init(bytes: bytes, format: format, callbackManager: nil) } /// Create a ``NIOSSLPrivateKey`` from a buffer of bytes in either PEM or /// DER format. /// /// - parameters: /// - buffer: The key bytes. /// - format: The format of the key to load, either DER or PEM. /// - passphraseCallback: Optionally a callback to invoke to obtain the passphrase for /// encrypted keys. If not provided, or set to `nil`, the default BoringSSL /// behaviour will be used, which prints a prompt and requests the passphrase from /// stdin. /// - SeeAlso: `NIOSSLPrivateKey.init(bytes:format:passphraseCallback:)` @available(*, deprecated, renamed: "NIOSSLPrivateKey.init(bytes:format:passphraseCallback:)") public convenience init( buffer: [Int8], format: NIOSSLSerializationFormats, passphraseCallback: @escaping NIOSSLPassphraseCallback ) throws where T.Element == UInt8 { try self.init(bytes: buffer.map(UInt8.init), format: format, passphraseCallback: passphraseCallback) } /// Create a ``NIOSSLPrivateKey`` from a buffer of bytes in either PEM or /// DER format. /// /// - parameters: /// - bytes: The key bytes. /// - format: The format of the key to load, either DER or PEM. /// - passphraseCallback: Optionally a callback to invoke to obtain the passphrase for /// encrypted keys. If not provided, or set to `nil`, the default BoringSSL /// behaviour will be used, which prints a prompt and requests the passphrase from /// stdin. public convenience init( bytes: [UInt8], format: NIOSSLSerializationFormats, passphraseCallback: @escaping NIOSSLPassphraseCallback ) throws where T.Element == UInt8 { let manager = BoringSSLPassphraseCallbackManager(userCallback: passphraseCallback) try self.init(bytes: bytes, format: format, callbackManager: manager) } /// Create a ``NIOSSLPrivateKey`` from a custom private key callback. /// /// The private key, in addition to needing to conform to ``NIOSSLCustomPrivateKey``, /// is also required to be `Hashable`. This is because ``NIOSSLPrivateKey``s are `Hashable`. /// /// - parameters: /// - customPrivateKey: The custom private key to use with the TLS certificate. @inlinable public init(customPrivateKey: CustomKey) { self.representation = .custom(AnyNIOSSLCustomPrivateKey(customPrivateKey)) } /// Create an NIOSSLPrivateKey wrapping a pointer into BoringSSL. /// /// This is a function that should be avoided as much as possible because it plays poorly with /// BoringSSL's reference-counted memory. This function does not increment the reference count for the EVP_PKEY /// object here, nor does it duplicate it: it just takes ownership of the copy here. This object /// **will** deallocate the underlying EVP_PKEY object when deinited, and so if you need to keep that /// EVP_PKEY object alive you create a new EVP_PKEY before passing that object here. /// /// In general, however, this function should be avoided in favour of one of the convenience /// initializers, which ensure that the lifetime of the EVP_PKEY object is better-managed. static internal func fromUnsafePointer(takingOwnership pointer: OpaquePointer) -> NIOSSLPrivateKey { NIOSSLPrivateKey(withReference: pointer) } deinit { switch self.representation { case .native(let ref): CNIOBoringSSL_EVP_PKEY_free(ref) case .custom: // Merely dropping the ref is enough. () } } } // NIOSSLPrivateKey is publicly immutable and we do not internally mutate it after initialisation. // It is therefore Sendable. extension NIOSSLPrivateKey: @unchecked Sendable {} // MARK:- Utilities extension NIOSSLPrivateKey { /// Calls the given body function with a temporary buffer containing the DER-encoded bytes of this /// private key. This function does allocate for these bytes, but there is no way to avoid doing so with the /// X509 API in BoringSSL. /// /// The pointer provided to the closure is not valid beyond the lifetime of this method call. /// /// This method is only safe to call on native private keys. private static func withUnsafeDERBuffer( of ref: OpaquePointer, _ body: (UnsafeRawBufferPointer) throws -> T ) throws -> T { guard let bio = CNIOBoringSSL_BIO_new(CNIOBoringSSL_BIO_s_mem()) else { fatalError("Failed to malloc for a BIO handler") } defer { CNIOBoringSSL_BIO_free(bio) } let rc = CNIOBoringSSL_i2d_PrivateKey_bio(bio, ref) guard rc == 1 else { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } var dataPtr: UnsafeMutablePointer? = nil let length = CNIOBoringSSL_BIO_get_mem_data(bio, &dataPtr) guard let bytes = dataPtr.map({ UnsafeRawBufferPointer(start: $0, count: length) }) else { fatalError("Failed to map bytes from a private key") } return try body(bytes) } /// The custom signing algorithms required by this private key, if any. /// /// Is `nil` when the key is a native key, as this is handled by BoringSSL. internal var customSigningAlgorithms: [SignatureAlgorithm]? { switch self.representation { case .native: return nil case .custom(let customKey): return customKey.signatureAlgorithms } } /// Extracts the bytes of this private key in DER format. /// - Returns: The DER-encoded bytes for this private key. public var derBytes: [UInt8] { get throws { switch self.representation { case .native(let evpKey): return try Self.withUnsafeDERBuffer(of: evpKey) { Array($0) } case .custom(let custom): return custom.derBytes } } } } extension NIOSSLPrivateKey: Equatable { public static func == (lhs: NIOSSLPrivateKey, rhs: NIOSSLPrivateKey) -> Bool { switch (lhs.representation, rhs.representation) { case (.native, .native): // Annoyingly, EVP_PKEY_cmp does not have a traditional return value pattern. 1 means equal, 0 means non-equal, // negative means error. Here we treat "error" as "not equal", because we have no error reporting mechanism from this call site, // and anyway, BoringSSL considers "these keys aren't of the same type" to be an error, which is in my mind pretty ludicrous. return lhs.withUnsafeMutableEVPPKEYPointer { lhsRef in rhs.withUnsafeMutableEVPPKEYPointer { rhsRef in CNIOBoringSSL_EVP_PKEY_cmp(lhsRef, rhsRef) == 1 } } case (.custom(let lhsCustom), .custom(let rhsCustom)): return lhsCustom == rhsCustom case (.native, .custom), (.custom, .native): return false } } } extension NIOSSLPrivateKey: Hashable { public func hash(into hasher: inout Hasher) { switch self.representation { case .native(let ref): // Sadly, BoringSSL doesn't provide us with a nice key hashing function. We therefore have only two options: // we can either serialize the key into DER and feed that into the hasher, or we can attempt to hash the key parameters directly. // We could attempt the latter, but frankly it causes a lot of pain for minimal gain, so we don't bother. This incurs an allocation, // but that's ok. We crash if we hit an error here, as there is no way to recover. hasher.combine(0) try! NIOSSLPrivateKey.withUnsafeDERBuffer(of: ref) { hasher.combine(bytes: $0) } case .custom(let custom): hasher.combine(1) custom.hash(into: &hasher) } } } @available(*, unavailable) extension NIOSSLPrivateKey.Representation: Sendable {} ================================================ FILE: Sources/NIOSSL/SSLPublicKey.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL /// A ``NIOSSLPublicKey`` is an abstract handle to a public key owned by BoringSSL. /// /// This object is of minimal utility, as it cannot be used for very many operations /// in ``NIOSSL``. Its primary purpose is to allow extracting public keys from /// ``NIOSSLCertificate`` objects to be serialized, so that they can be passed to /// general-purpose cryptography libraries. public final class NIOSSLPublicKey { private let ref: OpaquePointer // fileprivate init(withOwnedReference ref: OpaquePointer) { self.ref = ref } deinit { CNIOBoringSSL_EVP_PKEY_free(self.ref) } } // NIOSSLPublicKey is publicly immutable and we do not internally mutate it after initialisation. // It is therefore Sendable. extension NIOSSLPublicKey: @unchecked Sendable {} // MARK:- Helpful initializers extension NIOSSLPublicKey { /// Create a ``NIOSSLPublicKey`` object from an internal `EVP_PKEY` pointer. /// /// This method expects `pointer` to be passed at +1, and consumes that reference. /// /// - parameters: /// - pointer: A pointer to an `EVP_PKEY` structure containing the public key. /// - returns: An `NIOSSLPublicKey` wrapping the pointer. internal static func fromInternalPointer(takingOwnership pointer: OpaquePointer) -> NIOSSLPublicKey { NIOSSLPublicKey(withOwnedReference: pointer) } } extension NIOSSLPublicKey { /// Extracts the bytes of this public key in the SubjectPublicKeyInfo format. /// /// The SubjectPublicKeyInfo format is defined in RFC 5280. In addition to the raw key bytes, it also /// provides an identifier of the algorithm, ensuring that the key can be unambiguously decoded. /// /// - returns: The DER-encoded SubjectPublicKeyInfo bytes for this public key. /// - throws: If an error occurred while serializing the key. public func toSPKIBytes() throws -> [UInt8] { guard let bio = CNIOBoringSSL_BIO_new(CNIOBoringSSL_BIO_s_mem()) else { fatalError("Failed to malloc for a BIO handler") } defer { CNIOBoringSSL_BIO_free(bio) } let rc = CNIOBoringSSL_i2d_PUBKEY_bio(bio, self.ref) guard rc == 1 else { let errorStack = BoringSSLError.buildErrorStack() throw BoringSSLError.unknownError(errorStack) } var dataPtr: UnsafeMutablePointer? = nil let length = CNIOBoringSSL_BIO_get_mem_data(bio, &dataPtr) guard let bytes = dataPtr.map({ UnsafeMutableRawBufferPointer(start: $0, count: length) }) else { fatalError("Failed to map bytes from a public key") } return Array(bytes) } } ================================================ FILE: Sources/NIOSSL/SecurityFrameworkCertificateVerification.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore // We can only use Security.framework to validate TLS certificates on Apple platforms. #if canImport(Darwin) import Dispatch import Foundation @preconcurrency import Security extension SSLConnection { func performSecurityFrameworkValidation( promise: EventLoopPromise, peerCertificates: [SecCertificate] ) { do { guard case .default = self.parentContext.configuration.trustRoots ?? .default else { preconditionFailure("This callback should only be used if we are using the system-default trust.") } let expectedHostname = self.validateHostnames ? self.expectedHostname : nil // This force-unwrap is safe as we must have decided if we're a client or a server before validation. var trust: SecTrust? = nil var result: OSStatus let policy = SecPolicyCreateSSL(self.role! == .client, expectedHostname as CFString?) result = SecTrustCreateWithCertificates(peerCertificates as CFArray, policy, &trust) guard result == errSecSuccess, let actualTrust = trust else { throw NIOSSLError.unableToValidateCertificate } // If there are additional trust roots then we need to add them to the SecTrust as anchors. let additionalAnchorCertificates: [SecCertificate] = try self.parentContext.configuration .additionalTrustRoots.flatMap { trustRoots -> [NIOSSLCertificate] in guard case .certificates(let certs) = trustRoots else { preconditionFailure( "This callback happens on the request path, file-based additional trust roots should be pre-loaded when creating the SSLContext." ) } return certs }.map { guard let secCert = SecCertificateCreateWithData(nil, Data(try $0.toDERBytes()) as CFData) else { throw NIOSSLError.failedToLoadCertificate } return secCert } if !additionalAnchorCertificates.isEmpty { guard SecTrustSetAnchorCertificates(actualTrust, additionalAnchorCertificates as CFArray) == errSecSuccess else { throw NIOSSLError.failedToLoadCertificate } // To use additional anchors _and_ the built-in ones we must reenable the built-in ones expicitly. guard SecTrustSetAnchorCertificatesOnly(actualTrust, false) == errSecSuccess else { throw NIOSSLError.failedToLoadCertificate } } // We create a DispatchQueue here to be called back on, as this validation may perform network activity. let callbackQueue = DispatchQueue(label: "io.swiftnio.ssl.validationCallbackQueue") // SecTrustEvaluateAsync and its cousin withError require that they are called from the same queue given to // them as a parameter. Thus, we async away now. callbackQueue.async { let result: OSStatus if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { result = SecTrustEvaluateAsyncWithError(actualTrust, callbackQueue) { (_, valid, _) in promise.succeed(valid ? .certificateVerified : .failed) } } else { result = SecTrustEvaluateAsync(actualTrust, callbackQueue) { (_, result) in promise.completeWith(result) } } if result != errSecSuccess { promise.fail(NIOSSLError.unableToValidateCertificate) } } } catch { promise.fail(error) } } } extension EventLoopPromise where Value == NIOSSLVerificationResult { fileprivate func completeWith(_ result: SecTrustResultType) { switch result { case .proceed, .unspecified: // These two cases mean we have successfully validated the certificate. We're done! self.succeed(.certificateVerified) default: // Oops, we failed. self.succeed(.failed) } } } extension SSLConnection { func getPeerCertificatesAsSecCertificate() throws -> [SecCertificate] { try self.withPeerCertificateChainBuffers { buffers in guard let buffers = buffers else { throw NIOSSLError.unableToValidateCertificate } return try buffers.map { buffer in let data = Data(bytes: buffer.baseAddress!, count: buffer.count) guard let cert = SecCertificateCreateWithData(nil, data as CFData) else { throw NIOSSLError.unableToValidateCertificate } return cert } } } } #endif ================================================ FILE: Sources/NIOSSL/String+unsafeUninitializedCapacity.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// extension String { /// This is a backport of `String.init(unsafeUninitializedCapacity:initializingUTF8With:)` /// that allows writing directly into an uninitialized String's backing memory. /// /// As this API does not exist on older Apple platforms, we fake it out with a pointer and accept the extra copy. init( customUnsafeUninitializedCapacity capacity: Int, initializingUTF8With initializer: (_ buffer: UnsafeMutableBufferPointer) throws -> Int ) rethrows { if #available(macOS 11.0, iOS 14.0, tvOS 14.0, watchOS 7.0, *) { try self.init(unsafeUninitializedCapacity: capacity, initializingUTF8With: initializer) } else { try self.init(backportUnsafeUninitializedCapacity: capacity, initializingUTF8With: initializer) } } private init( backportUnsafeUninitializedCapacity capacity: Int, initializingUTF8With initializer: (_ buffer: UnsafeMutableBufferPointer) throws -> Int ) rethrows { let buffer = UnsafeMutableBufferPointer.allocate(capacity: capacity) defer { buffer.deallocate() } let initializedCount = try initializer(buffer) precondition(initializedCount <= capacity, "Overran buffer in initializer!") self = String( decoding: UnsafeMutableBufferPointer(start: buffer.baseAddress!, count: initializedCount), as: UTF8.self ) } } ================================================ FILE: Sources/NIOSSL/SubjectAlternativeName.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @_implementationOnly import CNIOBoringSSLShims import NIOCore #if canImport(Darwin) import Darwin.C #elseif canImport(Musl) import Musl #elseif canImport(Glibc) import Glibc #elseif canImport(Android) import Android #else #error("unsupported os") #endif /// Collection of all Subject Alternative Names from a `NIOSSLCertificate` public struct _SubjectAlternativeNames { @usableFromInline internal final class Storage { fileprivate let nameStack: OpaquePointer? @usableFromInline internal let stackSize: Int internal init(nameStack: OpaquePointer?) { self.nameStack = nameStack if let nameStack = nameStack { self.stackSize = CNIOBoringSSLShims_sk_GENERAL_NAME_num(nameStack) } else { self.stackSize = 0 } } public subscript(position: Int) -> Element { guard let name = CNIOBoringSSLShims_sk_GENERAL_NAME_value(self.nameStack!, position) else { fatalError("Unexpected null pointer when unwrapping SAN value") } let contents = UnsafeBufferPointer( start: CNIOBoringSSL_ASN1_STRING_get0_data(name.pointee.d.ia5), count: Int(CNIOBoringSSL_ASN1_STRING_length(name.pointee.d.ia5)) ) return .init(nameType: .init(name.pointee.type), contents: .init(collection: self, buffer: contents)) } deinit { if let nameStack = self.nameStack { CNIOBoringSSL_GENERAL_NAMES_free(nameStack) } } } @usableFromInline internal var storage: Storage internal init(nameStack: OpaquePointer?) { self.storage = .init(nameStack: nameStack) } } // _SubjectAlternativeNames is immutable and therefore Sendable extension _SubjectAlternativeNames: @unchecked Sendable {} // _SubjectAlternativeNames.Storage is immutable and therefore Sendable extension _SubjectAlternativeNames.Storage: @unchecked Sendable {} extension _SubjectAlternativeNames: RandomAccessCollection { @inlinable public subscript(position: Int) -> _SubjectAlternativeName { precondition(self.indices.contains(position), "index \(position) out of bounds") return self.storage[position] } @inlinable public var startIndex: Int { 0 } @inlinable public var endIndex: Int { self.storage.stackSize } } public struct _SubjectAlternativeName { public struct NameType: Hashable, Sendable { public var rawValue: Int public init(_ rawCode: Int) { self.rawValue = rawCode } fileprivate init(_ rawCode: Int32) { self.init(Int(rawCode)) } public static let email = Self(GEN_EMAIL) public static let dnsName = Self(GEN_DNS) public static let ipAddress = Self(GEN_IPADD) public static let uri = Self(GEN_URI) } public struct Contents { // only part of this type to keep a strong reference to the underlying storage of `buffer` private let collection: _SubjectAlternativeNames.Storage // lifetime automatically managed by `collection` @usableFromInline internal let buffer: UnsafeBufferPointer internal init(collection: _SubjectAlternativeNames.Storage, buffer: UnsafeBufferPointer) { self.collection = collection self.buffer = buffer } @inlinable public func withUnsafeBufferPointer( _ body: (UnsafeBufferPointer) throws -> Result ) rethrows -> Result { try body(self.buffer) } } // should be replaced by `swift-nio`s `IPAddress` once https://github.com/apple/swift-nio/issues/1650 is resolved internal enum IPAddress { case ipv4(in_addr) case ipv6(in6_addr) } public var nameType: NameType public var contents: Contents } // _SubjectAlternativeName is immutable and therefore Sendable extension _SubjectAlternativeName: @unchecked Sendable {} // _SubjectAlternativeName.Contents is immutable and therefore Sendable extension _SubjectAlternativeName.Contents: @unchecked Sendable {} extension _SubjectAlternativeName.Contents: RandomAccessCollection { @inlinable public var startIndex: Int { self.buffer.startIndex } @inlinable public var endIndex: Int { self.buffer.endIndex } @inlinable public subscript(position: Int) -> UInt8 { precondition(self.indices.contains(position), "index \(position) out of bounds") return self.buffer[position] } } extension _SubjectAlternativeName.IPAddress { internal init?(_ subjectAlternativeName: _SubjectAlternativeName) { guard subjectAlternativeName.nameType == .ipAddress else { return nil } switch subjectAlternativeName.contents.count { case 4: let addr = subjectAlternativeName.contents.withUnsafeBufferPointer { $0.baseAddress.map { UnsafeRawPointer($0).load(as: in_addr.self) } } guard let innerAddr = addr else { return nil } self = .ipv4(innerAddr) case 16: let addr = subjectAlternativeName.contents.withUnsafeBufferPointer { $0.baseAddress.map { UnsafeRawPointer($0).load(as: in6_addr.self) } } guard let innerAddr = addr else { return nil } self = .ipv6(innerAddr) default: return nil } } } // swift-format-ignore: DontRepeatTypeInStaticProperties extension _SubjectAlternativeName.IPAddress: CustomStringConvertible { private static let ipv4AddressLength = 16 private static let ipv6AddressLength = 46 /// A string representation of the IP address. /// E.g. IPv4: `192.168.0.1` /// E.g. IPv6: `2001:db8::1` public var description: String { switch self { case .ipv4(let addr): return Self.ipv4ToString(addr) case .ipv6(let addr): return Self.ipv6ToString(addr) } } static private func ipv4ToString(_ address: in_addr) -> String { var address = address var dest: [CChar] = Array(repeating: 0, count: Self.ipv4AddressLength) dest.withUnsafeMutableBufferPointer { pointer in let result = inet_ntop(AF_INET, &address, pointer.baseAddress!, socklen_t(pointer.count)) precondition( result != nil, "The IP address was invalid. This should never happen as we're within the IP address struct." ) } return String(cString: &dest) } static private func ipv6ToString(_ address: in6_addr) -> String { var address = address var dest: [CChar] = Array(repeating: 0, count: Self.ipv6AddressLength) dest.withUnsafeMutableBufferPointer { pointer in let result = inet_ntop(AF_INET6, &address, pointer.baseAddress!, socklen_t(pointer.count)) precondition( result != nil, "The IP address was invalid. This should never happen as we're within the IP address struct." ) } return String(cString: &dest) } } ================================================ FILE: Sources/NIOSSL/SwiftCrypto/NIOSSLSecureBytes.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// /// Auto-zeroing storage for data in memory. /// /// ``NIOSSLSecureBytes`` uses a best-effort strategy to try to remove data from memory when it is no longer in use, by /// automatically zeroing the heap memory it uses. This is best-effort becuase it's easy for users to accidentally copy /// data out of this structure. To get its best effect, do not copy this data out into another type, but operate on /// ``NIOSSLSecureBytes`` generically or specifically. public struct NIOSSLSecureBytes { @usableFromInline var backing: Backing /// Create an empty ``NIOSSLSecureBytes``. @inlinable public init() { self = .init(count: 0) } @usableFromInline init(count: Int) { self.backing = NIOSSLSecureBytes.Backing.create(randomBytes: count) } init(bytes: [UInt8]) { self.backing = Backing.create(bytes: bytes) } /// Allows initializing a SecureBytes object with a closure that will initialize the memory. @usableFromInline init( unsafeUninitializedCapacity: Int, initializingWith callback: (inout UnsafeMutableRawBufferPointer, inout Int) throws -> Void ) rethrows { self.backing = Backing.create(capacity: unsafeUninitializedCapacity) try self.backing._withVeryUnsafeMutableBytes { veryUnsafePointer in // As Array does, we want to truncate the initializing pointer to only have the requested size. var veryUnsafePointer = UnsafeMutableRawBufferPointer( rebasing: veryUnsafePointer.prefix(unsafeUninitializedCapacity) ) var initializedCount = 0 try callback(&veryUnsafePointer, &initializedCount) self.backing.count = initializedCount } } } // NIOSSLSecureBytes is a Copy on Write (CoW) type and therefore Sendable extension NIOSSLSecureBytes: @unchecked Sendable {} extension NIOSSLSecureBytes { /// Append the contents of a collection of bytes to this ``NIOSSLSecureBytes``. /// /// - parameter data: The `data` to add to the ``NIOSSLSecureBytes``. @inlinable mutating public func append(_ data: C) where C.Element == UInt8 { let requiredCapacity = self.count + data.count if !isKnownUniquelyReferenced(&self.backing) || requiredCapacity > self.backing.capacity { let newBacking = Backing.create(capacity: requiredCapacity) newBacking._appendBytes(self.backing, inRange: 0..= n { return } let newBacking = Backing.create(capacity: n) newBacking._appendBytes(self.backing, inRange: 0..(_ body: (UnsafeRawBufferPointer) throws -> T) rethrows -> T { try self.backing.withUnsafeBytes(body) } } // MARK: - Equatable conformance, constant-time extension NIOSSLSecureBytes: Equatable { static public func == (lhs: NIOSSLSecureBytes, rhs: NIOSSLSecureBytes) -> Bool { lhs.backing.withUnsafeBytes { lhsPtr in rhs.backing.withUnsafeBytes { rhsPtr in constantTimeCompare(lhsPtr, rhsPtr) } } } } // MARK: - RandomAccessCollection conformance extension NIOSSLSecureBytes: RandomAccessCollection { @inlinable public var startIndex: Int { 0 } @inlinable public var endIndex: Int { self.count } @inlinable public var count: Int { self.backing.count } @inlinable public subscript(_ index: Int) -> UInt8 { get { self.backing[offset: index] } set { self.backing[offset: index] = newValue } } } // MARK: - MutableCollection conformance extension NIOSSLSecureBytes: MutableCollection {} // MARK: - RangeReplaceableCollection conformance extension NIOSSLSecureBytes: RangeReplaceableCollection { @inlinable mutating public func replaceSubrange(_ subrange: Range, with newElements: C) where C.Element == UInt8 { let requiredCapacity = self.backing.count - subrange.count + newElements.count if !isKnownUniquelyReferenced(&self.backing) || requiredCapacity > self.backing.capacity { // We have to allocate anyway, so let's use a nice straightforward copy. let newBacking = Backing.create(capacity: requiredCapacity) let lowerSlice = 0.. { @usableFromInline class func create(capacity: Int) -> Backing { let capacity = Int(UInt32(capacity).nextPowerOf2ClampedToMax()) return Backing.create( minimumCapacity: capacity, makingHeaderWith: { _ in BackingHeader(count: 0, capacity: capacity) } ) as! Backing } @usableFromInline class func create(copying original: Backing) -> Backing { Backing.create(bytes: original.withUnsafeBytes { Array($0) }) } @inlinable class func create(bytes: [UInt8]) -> Backing { bytes.withUnsafeBytes { bytesPtr in let backing = Backing.create(capacity: bytesPtr.count) backing._withVeryUnsafeMutableBytes { targetPtr in targetPtr.copyMemory(from: bytesPtr) } backing.count = bytesPtr.count precondition(backing.count <= backing.capacity) return backing } } @usableFromInline class func create(randomBytes: Int) -> Backing { let backing = Backing.create(capacity: randomBytes) backing._withVeryUnsafeMutableBytes { targetPtr in assert(targetPtr.count >= randomBytes) targetPtr.initializeWithRandomBytes(count: randomBytes) } backing.count = randomBytes return backing } deinit { // We always clear the whole capacity, even if we don't think we used it all. let bytesToClear = self.header.capacity _ = self.withUnsafeMutablePointerToElements { elementsPtr in memset_s(elementsPtr, bytesToClear, 0, bytesToClear) } } @usableFromInline var count: Int { get { self.header.count } set { self.header.count = newValue } } @usableFromInline subscript(offset offset: Int) -> UInt8 { get { // precondition(offset >= 0 && offset < self.count) self.withUnsafeMutablePointerToElements { ($0 + offset).pointee } } set { // precondition(offset >= 0 && offset < self.count) return self.withUnsafeMutablePointerToElements { ($0 + offset).pointee = newValue } } } } } // This conformance is technically redundant - Swift 6.2 compiler finally caught this #if compiler(<6.2) @available(*, unavailable) extension NIOSSLSecureBytes.Backing: Sendable {} #endif extension NIOSSLSecureBytes.Backing { @usableFromInline func replaceSubrangeFittingWithinCapacity(_ subrange: Range, with newElements: C) where C.Element == UInt8 { // This function is called when have a unique reference to the backing storage, and we have enough room to store these bytes without // any problem. We have one pre-existing buffer made up of 4 regions: a prefix set of bytes that are // before the range "subrange", a range of bytes to be replaced (R1), a suffix set of bytes that are after // the range "subrange" but within the valid count, and then a region of uninitialized memory. We also have // a new set of bytes, R2, that may be larger or smaller than R1, and could indeed be empty! // // ┌────────────────────────┬──────────────────┬──────────────────┬───────────────┐ // │ Prefix │ R1 │ Suffix │ Uninitialized │ // └────────────────────────┴──────────────────┴──────────────────┴───────────────┘ // // ┌─────────────────────────────────────┐ // │ R2 │ // └─────────────────────────────────────┘ // // The minimal number of steps we can take in the general case is two steps. We can't just copy R2 into the space // for R1 and then move the suffix, as if R2 is larger than R1 we'll have thrown some suffix bytes away. So we have // to move suffix first. What we do is take the bytes in suffix, and move them (via memmove). We can then copy // R2 in, and feel confident that the space in memory is right. precondition(self.count - subrange.count + newElements.count <= self.capacity, "Insufficient capacity") let moveDistance = newElements.count - subrange.count let suffixRange = subrange.upperBound..(_ bytes: C) where C.Element == UInt8 { let byteCount = bytes.count precondition( self.capacity - self.count - byteCount >= 0, "Insufficient space for byte copying, must have reallocated!" ) let lowerOffset = self.count self._withVeryUnsafeMutableBytes { bytesPtr in let innerPtrSlice = UnsafeMutableRawBufferPointer(rebasing: bytesPtr[lowerOffset...]) innerPtrSlice.copyBytes(from: bytes) } self.count += byteCount } /// Appends the bytes of a slice of another backing buffer to this storage, crashing if there /// is not enough room. @inlinable // private but inlinable func _appendBytes( _ backing: NIOSSLSecureBytes.Backing, inRange range: Range ) { precondition(range.lowerBound >= 0) precondition(range.upperBound <= backing.capacity) precondition( self.capacity - self.count - range.count >= 0, "Insufficient space for byte copying, must have reallocated!" ) backing.withUnsafeBytes { backingPtr in let ptrSlice = UnsafeRawBufferPointer(rebasing: backingPtr[range]) let lowerOffset = self.count self._withVeryUnsafeMutableBytes { bytesPtr in let innerPtrSlice = UnsafeMutableRawBufferPointer(rebasing: bytesPtr[lowerOffset...]) innerPtrSlice.copyMemory(from: ptrSlice) } self.count += ptrSlice.count } } /// Moves the range of bytes identified by the slice by the delta, crashing if the move would /// place the bytes out of the storage. Note that this does not update the count: external code /// must ensure that that happens. @usableFromInline // private but usableFromInline func _moveBytes(range: Range, by delta: Int) { // We have to check that the range is within the delta, as is the new location. precondition(range.lowerBound >= 0) precondition(range.upperBound <= self.capacity) let shiftedRange = (range.lowerBound + delta)..<(range.upperBound + delta) precondition(shiftedRange.lowerBound > 0) precondition(shiftedRange.upperBound <= self.capacity) self._withVeryUnsafeMutableBytes { backingPtr in let source = UnsafeRawBufferPointer(rebasing: backingPtr[range]) let dest = UnsafeMutableRawBufferPointer(rebasing: backingPtr[shiftedRange]) dest.copyMemory(from: source) // copy memory uses memmove under the hood. } } // Copies some bytes into the buffer at the appropriate place. Does not update count: external code must do so. @inlinable // private but inlinable func _copyBytes(_ bytes: C, at offset: Int) where C.Element == UInt8 { precondition(offset >= 0) precondition(offset + bytes.count <= self.capacity) let byteRange = offset..<(offset + bytes.count) self._withVeryUnsafeMutableBytes { backingPtr in let dest = UnsafeMutableRawBufferPointer(rebasing: backingPtr[byteRange]) dest.copyBytes(from: bytes) } } @usableFromInline func withUnsafeBytes(_ body: (UnsafeRawBufferPointer) throws -> T) rethrows -> T { let count = self.count return try self.withUnsafeMutablePointerToElements { elementsPtr in try body(UnsafeRawBufferPointer(start: elementsPtr, count: count)) } } @usableFromInline func withUnsafeMutableBytes(_ body: (UnsafeMutableRawBufferPointer) throws -> T) rethrows -> T { let count = self.count return try self.withUnsafeMutablePointerToElements { elementsPtr in try body(UnsafeMutableRawBufferPointer(start: elementsPtr, count: count)) } } /// Very unsafe in the sense that this points to uninitialized memory. Used only for implementations within this file. @inlinable // private but inlinable func _withVeryUnsafeMutableBytes( _ body: (UnsafeMutableRawBufferPointer) throws -> T ) rethrows -> T { let capacity = self.capacity return try self.withUnsafeMutablePointerToElements { elementsPtr in try body(UnsafeMutableRawBufferPointer(start: elementsPtr, count: capacity)) } } } extension UInt32 { /// Returns the next power of two unless that would overflow, in which case UInt32.max (on 64-bit systems) or /// Int32.max (on 32-bit systems) is returned. The returned value is always safe to be cast to Int and passed /// to malloc on all platforms. func nextPowerOf2ClampedToMax() -> UInt32 { guard self > 0 else { return 1 } var n = self #if arch(arm) || arch(i386) // on 32-bit platforms we can't make use of a whole UInt32.max (as it doesn't fit in an Int) let max = UInt32(Int.max) #else // on 64-bit platforms we're good let max = UInt32.max #endif n -= 1 n |= n >> 1 n |= n >> 2 n |= n >> 4 n |= n >> 8 n |= n >> 16 if n != max { n += 1 } return n } } ================================================ FILE: Sources/NIOSSL/SwiftCrypto/RNG.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// extension UnsafeMutableRawBufferPointer { func initializeWithRandomBytes(count: Int) { guard count > 0 else { return } precondition(count <= self.count) var rng = SystemRandomNumberGenerator() // We store bytes 64-bits at a time until we can't anymore. var targetPtr = self while targetPtr.count > 8 { targetPtr.storeBytes(of: rng.next(), as: UInt64.self) targetPtr = UnsafeMutableRawBufferPointer(rebasing: targetPtr[8...]) } // Now we're down to having to store things an integer at a time. We do this by shifting and // masking. var remainingWord: UInt64 = rng.next() while targetPtr.count > 0 { targetPtr.storeBytes(of: UInt8(remainingWord & 0xFF), as: UInt8.self) remainingWord >>= 8 targetPtr = UnsafeMutableRawBufferPointer(rebasing: targetPtr[1...]) } } } ================================================ FILE: Sources/NIOSSL/SwiftCrypto/SafeCompare.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// /// A straightforward constant-time comparison function for any two collections of bytes. @inlinable internal func constantTimeCompare(_ lhs: LHS, _ rhs: RHS) -> Bool where LHS.Element == UInt8, RHS.Element == UInt8 { guard lhs.count == rhs.count else { return false } return zip(lhs, rhs).reduce(into: 0) { $0 |= $1.0 ^ $1.1 } == 0 } ================================================ FILE: Sources/NIOSSL/SwiftCrypto/Zeroization.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL typealias errno_t = CInt // This is a Swift wrapper for the libc function that does not exist on Linux. We shim it via a call to OPENSSL_cleanse. // We have the same syntax, but mostly ignore it. @discardableResult func memset_s(_ s: UnsafeMutableRawPointer!, _ smax: Int, _ byte: CInt, _ n: Int) -> errno_t { assert(smax == n, "memset_s invariant not met") assert(byte == 0, "memset_s used to not zero anything") CNIOBoringSSL_OPENSSL_cleanse(s, smax) return 0 } ================================================ FILE: Sources/NIOSSL/TLSConfiguration.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2025 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore /// Known and supported TLS versions. public enum TLSVersion: Sendable { case tlsv1 case tlsv11 case tlsv12 case tlsv13 } /// Places NIOSSL can obtain certificates from. public enum NIOSSLCertificateSource: Hashable, Sendable { @available( *, deprecated, message: "Use 'NIOSSLCertificate.fromPEMFile(_:)' to load the certificate(s) and use the '.certificate(NIOSSLCertificate)' case to provide them as a source" ) case file(String) case certificate(NIOSSLCertificate) } /// Places NIOSSL can obtain private keys from. public enum NIOSSLPrivateKeySource: Hashable { /// Path to file in PEM or ASN1 format to load private key from /// /// File Extensions | Expected file format /// --------------- | -------------------- /// `.pem` | PEM /// `.der or .key` | ASN1 @available(*, deprecated, message: "Use 'NIOSSLPrivateKeySource.privateKey(NIOSSLPrivateKey)' to set private key") case file(String) /// Loaded Private key case privateKey(NIOSSLPrivateKey) } extension NIOSSLPrivateKeySource: Sendable {} /// Places NIOSSL can obtain a trust store from. public enum NIOSSLTrustRoots: Hashable, Sendable { /// Path to either a file of CA certificates in PEM format, or a directory containing CA certificates in PEM format. /// /// If a path to a file is provided, the file can contain several CA certificates identified by /// /// -----BEGIN CERTIFICATE----- /// ... (CA certificate in base64 encoding) ... /// -----END CERTIFICATE----- /// /// sequences. Before, between, and after the certificates, text is allowed which can be used e.g. /// for descriptions of the certificates. /// /// If a path to a directory is provided, the files each contain one CA certificate in PEM format. case file(String) /// A list of certificates. case certificates([NIOSSLCertificate]) /// The system default root of trust. case `default` internal init(from trustRoots: NIOSSLAdditionalTrustRoots) { switch trustRoots { case .file(let path): self = .file(path) case .certificates(let certs): self = .certificates(certs) } } } /// Places NIOSSL can obtain additional trust roots from. public enum NIOSSLAdditionalTrustRoots: Hashable, Sendable { /// See ``NIOSSLTrustRoots/file(_:)`` case file(String) /// See ``NIOSSLTrustRoots/certificates(_:)`` case certificates([NIOSSLCertificate]) } /// Available ciphers to use for TLS instead of a string based representation. public struct NIOTLSCipher: RawRepresentable, Hashable, Sendable { /// Construct a ``NIOTLSCipher`` from the RFC code point for that cipher. public init(rawValue: UInt16) { self.rawValue = rawValue } /// Construct a ``NIOTLSCipher`` from the RFC code point for that cipher. public init(_ rawValue: RawValue) { self.rawValue = rawValue } /// The RFC code point for the given cipher. public var rawValue: UInt16 public typealias RawValue = UInt16 public static let TLS_RSA_WITH_AES_128_CBC_SHA = NIOTLSCipher(rawValue: 0x2F) public static let TLS_RSA_WITH_AES_256_CBC_SHA = NIOTLSCipher(rawValue: 0x35) public static let TLS_PSK_WITH_AES_128_CBC_SHA = NIOTLSCipher(rawValue: 0x8C) public static let TLS_PSK_WITH_AES_256_CBC_SHA = NIOTLSCipher(rawValue: 0x8D) public static let TLS_RSA_WITH_AES_128_GCM_SHA256 = NIOTLSCipher(rawValue: 0x9C) public static let TLS_RSA_WITH_AES_256_GCM_SHA384 = NIOTLSCipher(rawValue: 0x9D) public static let TLS_AES_128_GCM_SHA256 = NIOTLSCipher(rawValue: 0x1301) public static let TLS_AES_256_GCM_SHA384 = NIOTLSCipher(rawValue: 0x1302) public static let TLS_CHACHA20_POLY1305_SHA256 = NIOTLSCipher(rawValue: 0x1303) public static let TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = NIOTLSCipher(rawValue: 0xC009) public static let TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = NIOTLSCipher(rawValue: 0xC00A) public static let TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = NIOTLSCipher(rawValue: 0xC013) public static let TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = NIOTLSCipher(rawValue: 0xC014) public static let TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = NIOTLSCipher(rawValue: 0xC035) public static let TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = NIOTLSCipher(rawValue: 0xC036) public static let TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = NIOTLSCipher(rawValue: 0xC02B) public static let TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = NIOTLSCipher(rawValue: 0xC02C) public static let TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = NIOTLSCipher(rawValue: 0xC02F) public static let TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = NIOTLSCipher(rawValue: 0xC030) public static let TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = NIOTLSCipher(rawValue: 0xCCA8) public static let TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = NIOTLSCipher(rawValue: 0xCCA9) var standardName: String { let boringSSLCipher = CNIOBoringSSL_SSL_get_cipher_by_value(self.rawValue) return String(cString: CNIOBoringSSL_SSL_CIPHER_standard_name(boringSSLCipher)) } } /// Available curves to use for TLS. public struct NIOTLSCurve: RawRepresentable, Hashable, Sendable { /// Construct a ``NIOTLSCurve`` from the RFC code point for that curve. public init(rawValue: UInt16) { self.rawValue = rawValue } /// Construct a ``NIOTLSCurve`` from the RFC code point for that curve. public init(_ rawValue: RawValue) { self.rawValue = rawValue } /// The RFC code point for the given curve. public var rawValue: UInt16 public typealias RawValue = UInt16 public static let secp256r1 = NIOTLSCurve(rawValue: 0x17) public static let secp384r1 = NIOTLSCurve(rawValue: 0x18) public static let secp521r1 = NIOTLSCurve(rawValue: 0x19) public static let x25519 = NIOTLSCurve(rawValue: 0x1D) public static let x448 = NIOTLSCurve(rawValue: 0x1E) public static let x25519_MLKEM768 = NIOTLSCurve(rawValue: 0x11EC) } /// Formats NIOSSL supports for serializing keys and certificates. public enum NIOSSLSerializationFormats: Sendable { case pem case der } /// Certificate verification modes. public enum CertificateVerification: Sendable { public struct NoneOptions: Sendable, Equatable, Hashable { /// While the peer does not have to give you certificates, /// they can optionally be verified if the peer offers them. public var validatePresentedCertificates: Bool fileprivate init() { // Backwards-compatible self.validatePresentedCertificates = false } } /// Usable through ``none`` and ``optionalVerification``. case none(NoneOptions) /// Certificates will be validated against the trust store, but will not /// be checked to see if they are valid for the given hostname. case noHostnameVerification /// Certificates will be validated against the trust store and checked /// against the hostname of the service we are contacting. case fullVerification } extension CertificateVerification { /// Certificates will be validated if they are presented by the peer, i.e., if the peer presents /// certificates they must pass validation. However, if the peer does not present certificates, /// the connection will be accepted. public static var optionalVerification: CertificateVerification { var options = NoneOptions() options.validatePresentedCertificates = true return .none(options) } /// All certificate verification disabled. public static var none: CertificateVerification { .none(NoneOptions()) } } extension CertificateVerification: Hashable { // empty } /// Support for TLS renegotiation. /// /// In general, renegotiation should not be enabled except in circumstances where it is absolutely necessary. /// Renegotiation is only supported in TLS 1.2 and earlier, and generally does not work very well. NIOSSL will /// disallow most uses of renegotiation: the only supported use-case is to perform post-connection authentication /// *as a client*. There is no way to initiate a TLS renegotiation in NIOSSL. public enum NIORenegotiationSupport: Sendable { /// No support for TLS renegotiation. The default and recommended setting. case none /// Allow renegotiation exactly once. If you must use renegotiation, use this setting. case once /// Allow repeated renegotiation. To be avoided. case always } /// Signature algorithms. The values are defined as in TLS 1.3 public struct SignatureAlgorithm: RawRepresentable, Hashable, Sendable { public typealias RawValue = UInt16 public var rawValue: UInt16 public init(rawValue: UInt16) { self.rawValue = rawValue } public static let rsaPkcs1Sha1 = SignatureAlgorithm(rawValue: 0x0201) public static let rsaPkcs1Sha256 = SignatureAlgorithm(rawValue: 0x0401) public static let rsaPkcs1Sha384 = SignatureAlgorithm(rawValue: 0x0501) public static let rsaPkcs1Sha512 = SignatureAlgorithm(rawValue: 0x0601) public static let ecdsaSha1 = SignatureAlgorithm(rawValue: 0x0203) public static let ecdsaSecp256R1Sha256 = SignatureAlgorithm(rawValue: 0x0403) public static let ecdsaSecp384R1Sha384 = SignatureAlgorithm(rawValue: 0x0503) public static let ecdsaSecp521R1Sha512 = SignatureAlgorithm(rawValue: 0x0603) public static let rsaPssRsaeSha256 = SignatureAlgorithm(rawValue: 0x0804) public static let rsaPssRsaeSha384 = SignatureAlgorithm(rawValue: 0x0805) public static let rsaPssRsaeSha512 = SignatureAlgorithm(rawValue: 0x0806) public static let ed25519 = SignatureAlgorithm(rawValue: 0x0807) } /// A secure default configuration of cipher suites for TLS 1.2 and earlier. /// /// The goal of this cipher suite string is: /// - Prefer cipher suites that offer Perfect Forward Secrecy (DHE/ECDHE) /// - Prefer ECDH(E) to DH(E) for performance. /// - Prefer any AEAD cipher suite over non-AEAD suites for better performance and security /// - Prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common /// - Disable NULL authentication and encryption and any appearance of MD5 public let defaultCipherSuites = [ "ECDH+AESGCM", "ECDH+CHACHA20", "DH+AESGCM", "DH+CHACHA20", "ECDH+AES256", "DH+AES256", "ECDH+AES128", "DH+AES", "RSA+AESGCM", "RSA+AES", "!aNULL", "!eNULL", "!MD5", ].joined(separator: ":") /// Encodes a string to the wire format of an ALPN identifier. These MUST be ASCII, and so /// this routine will crash the program if they aren't, as these are always user-supplied /// strings. internal func encodeALPNIdentifier(identifier: String) -> [UInt8] { var encodedIdentifier = [UInt8]() encodedIdentifier.append(UInt8(identifier.utf8.count)) for codePoint in identifier.unicodeScalars { encodedIdentifier.append(contentsOf: Unicode.ASCII.encode(codePoint)!) } return encodedIdentifier } /// Decodes a string from the wire format of an ALPN identifier. These MUST be correctly /// formatted ALPN identifiers, and so this routine will crash the program if they aren't. internal func decodeALPNIdentifier(identifier: [UInt8]) -> String { String(decoding: identifier[1.. /// for detailed information about platform-specific behavior differences. public var trustRoots: NIOSSLTrustRoots? /// Additional trust roots to use to validate certificates, used in addition to ``trustRoots``. /// /// - NOTE: The combination of ``trustRoots`` and ``additionalTrustRoots`` affects which certificate validation /// backend is used on Apple platforms. See for detailed information about /// platform-specific behavior differences. public var additionalTrustRoots: [NIOSSLAdditionalTrustRoots] /// The certificates to offer during negotiation. If not present, no certificates will be offered. public var certificateChain: [NIOSSLCertificateSource] /// The private key associated with the leaf certificate. public var privateKey: NIOSSLPrivateKeySource? internal var _pskClientIdentityProvider: _NIOPSKClientIdentityProvider? internal var _pskServerIdentityProvider: _NIOPSKServerIdentityProvider? /// PSK Client Callback to get the key based on hint and identity. @available(*, deprecated, message: "Deprecated in favor of pskClientProvider which can handle optional hint") public var pskClientCallback: NIOPSKClientIdentityCallback? { get { if case .callback(let callback) = self._pskClientIdentityProvider { return callback } return nil } set { self._pskClientIdentityProvider = newValue.flatMap({ .callback($0) }) } } /// SSL Context Callback to provide dynamic context based on server name public var sslContextCallback: NIOSSLContextCallback? = nil @available(*, deprecated, message: "Deprecated in favor of pskServerProvider which can handle optional hint") public var pskServerCallback: NIOPSKServerIdentityCallback? { get { if case .callback(let callback) = self._pskServerIdentityProvider { return callback } return nil } set { self._pskServerIdentityProvider = newValue.flatMap({ .callback($0) }) } } /// PSK Client Callback to get the key based on an optional hint and identity. public var pskClientProvider: NIOPSKClientIdentityProvider? { get { if case .provider(let callback) = self._pskClientIdentityProvider { return callback } return nil } set { self._pskClientIdentityProvider = newValue.flatMap({ .provider($0) }) } } /// PSK Server Callback to get the key based on an optional hint and identity. public var pskServerProvider: NIOPSKServerIdentityProvider? { get { if case .provider(let callback) = self._pskServerIdentityProvider { return callback } return nil } set { self._pskServerIdentityProvider = newValue.flatMap({ .provider($0) }) } } /// Optional PSK hint to be used during SSLContext create. public var pskHint: String? = nil /// The application protocols to use in the connection. Should be an ordered list of ASCII /// strings representing the ALPN identifiers of the protocols to negotiate. For clients, /// the protocols will be offered in the order given. For servers, the protocols will be matched /// against the client's offered protocols in order. public var applicationProtocols: [String] { get { self.encodedApplicationProtocols.map(decodeALPNIdentifier) } set { self.encodedApplicationProtocols = newValue.map(encodeALPNIdentifier) } } internal var encodedApplicationProtocols: [[UInt8]] /// The amount of time to wait after initiating a shutdown before performing an unclean /// shutdown. Defaults to 5 seconds. public var shutdownTimeout: TimeAmount /// A callback that can be used to implement `SSLKEYLOGFILE` support. public var keyLogCallback: NIOSSLKeyLogCallback? /// Whether renegotiation is supported. public var renegotiationSupport: NIORenegotiationSupport /// Send the CA names derived from the ``trustRoots`` for client authentication. /// This instructs the client which identities can be used by evaluating what CA the identity certificate was issued from. public var sendCANameList: Bool private init( cipherSuiteValues: [NIOTLSCipher] = [], cipherSuites: String = defaultCipherSuites, verifySignatureAlgorithms: [SignatureAlgorithm]?, signingSignatureAlgorithms: [SignatureAlgorithm]?, minimumTLSVersion: TLSVersion, maximumTLSVersion: TLSVersion?, certificateVerification: CertificateVerification, trustRoots: NIOSSLTrustRoots, certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource?, applicationProtocols: [String], shutdownTimeout: TimeAmount, keyLogCallback: NIOSSLKeyLogCallback?, renegotiationSupport: NIORenegotiationSupport, additionalTrustRoots: [NIOSSLAdditionalTrustRoots], sendCANameList: Bool = false, sslContextCallback: NIOSSLContextCallback? = nil, pskClientProvider: NIOPSKClientIdentityProvider? = nil, pskServerProvider: NIOPSKServerIdentityProvider? = nil, pskHint: String? = nil ) { self.cipherSuites = cipherSuites self.verifySignatureAlgorithms = verifySignatureAlgorithms self.signingSignatureAlgorithms = signingSignatureAlgorithms self.minimumTLSVersion = minimumTLSVersion self.maximumTLSVersion = maximumTLSVersion self.trustRoots = trustRoots self.additionalTrustRoots = additionalTrustRoots self.certificateVerification = certificateVerification self.certificateChain = certificateChain self.privateKey = privateKey self.encodedApplicationProtocols = [] self.shutdownTimeout = shutdownTimeout self.renegotiationSupport = renegotiationSupport self.sendCANameList = sendCANameList self.applicationProtocols = applicationProtocols self.keyLogCallback = keyLogCallback self.sslContextCallback = sslContextCallback self.pskClientProvider = pskClientProvider self.pskServerProvider = pskServerProvider self.pskHint = pskHint if !cipherSuiteValues.isEmpty { self.cipherSuiteValues = cipherSuiteValues } } } extension TLSConfiguration: Sendable {} // MARK: BestEffortHashable extension TLSConfiguration { /// Returns a best effort result of whether two ``TLSConfiguration`` objects are equal. /// /// The "best effort" stems from the fact that we are checking the pointer to the ``keyLogCallback`` closure. /// /// - warning: You should probably not use this function. This function can return false-negatives, but not false-positives. public func bestEffortEquals(_ comparing: TLSConfiguration) -> Bool { let isKeyLoggerCallbacksEqual = withUnsafeBytes(of: self.keyLogCallback) { callbackPointer1 in withUnsafeBytes(of: comparing.keyLogCallback) { callbackPointer2 in callbackPointer1.elementsEqual(callbackPointer2) } } let isPSKClientProviderEqual = withUnsafeBytes(of: self._pskClientIdentityProvider) { pskClientProvider1 in withUnsafeBytes(of: comparing._pskClientIdentityProvider) { pskClientProvider2 in pskClientProvider1.elementsEqual(pskClientProvider2) } } let isPSKServerProviderEqual = withUnsafeBytes(of: self._pskServerIdentityProvider) { pskServerProvider1 in withUnsafeBytes(of: comparing._pskServerIdentityProvider) { pskServerProvider2 in pskServerProvider1.elementsEqual(pskServerProvider2) } } let isSSLContextCallbackEqual = withUnsafeBytes(of: self.sslContextCallback) { sslContextCallback1 in withUnsafeBytes(of: comparing.sslContextCallback) { sslContextCallback2 in sslContextCallback1.elementsEqual(sslContextCallback2) } } return self.minimumTLSVersion == comparing.minimumTLSVersion && self.maximumTLSVersion == comparing.maximumTLSVersion && self.cipherSuites == comparing.cipherSuites && self.curves == comparing.curves && self.verifySignatureAlgorithms == comparing.verifySignatureAlgorithms && self.signingSignatureAlgorithms == comparing.signingSignatureAlgorithms && self.certificateVerification == comparing.certificateVerification && self.trustRoots == comparing.trustRoots && self.additionalTrustRoots == comparing.additionalTrustRoots && self.certificateChain == comparing.certificateChain && self.privateKey == comparing.privateKey && self.encodedApplicationProtocols == comparing.encodedApplicationProtocols && self.shutdownTimeout == comparing.shutdownTimeout && isKeyLoggerCallbacksEqual && self.renegotiationSupport == comparing.renegotiationSupport && self.sendCANameList == comparing.sendCANameList && isSSLContextCallbackEqual && isPSKClientProviderEqual && isPSKServerProviderEqual && self.pskHint == comparing.pskHint } /// Returns a best effort hash of this TLS configuration. /// /// The "best effort" stems from the fact that we are hashing the pointer bytes of the ``keyLogCallback`` closure. /// /// - warning: You should probably not use this function. This function can return false-negatives, but not false-positives. public func bestEffortHash(into hasher: inout Hasher) { hasher.combine(minimumTLSVersion) hasher.combine(maximumTLSVersion) hasher.combine(cipherSuites) hasher.combine(curves) hasher.combine(verifySignatureAlgorithms) hasher.combine(signingSignatureAlgorithms) hasher.combine(certificateVerification) hasher.combine(trustRoots) hasher.combine(additionalTrustRoots) hasher.combine(certificateChain) hasher.combine(privateKey) hasher.combine(encodedApplicationProtocols) hasher.combine(shutdownTimeout) withUnsafeBytes(of: keyLogCallback) { closureBits in hasher.combine(bytes: closureBits) } hasher.combine(renegotiationSupport) hasher.combine(sendCANameList) withUnsafeBytes(of: _pskClientIdentityProvider) { closureClientBits in hasher.combine(bytes: closureClientBits) } withUnsafeBytes(of: _pskServerIdentityProvider) { closureServerBits in hasher.combine(bytes: closureServerBits) } withUnsafeBytes(of: sslContextCallback) { closureServerBits in hasher.combine(bytes: closureServerBits) } hasher.combine(pskHint) } /// Creates a TLS configuration for use with client-side contexts. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. /// /// For customising fields, modify the returned TLSConfiguration object. public static func makeClientConfiguration() -> TLSConfiguration { TLSConfiguration( cipherSuites: defaultCipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: .tlsv1, maximumTLSVersion: nil, certificateVerification: .fullVerification, trustRoots: .default, certificateChain: [], privateKey: nil, applicationProtocols: [], shutdownTimeout: .seconds(5), keyLogCallback: nil, renegotiationSupport: .none, additionalTrustRoots: [], sendCANameList: false, sslContextCallback: nil, pskClientProvider: nil, pskServerProvider: nil, pskHint: nil ) } /// Create a TLS configuration for use with server-side contexts. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For client use, try ``TLSConfiguration/makeClientConfiguration()`` instead. /// /// For customising fields, modify the returned TLSConfiguration object. public static func makeServerConfiguration( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource ) -> TLSConfiguration { TLSConfiguration( cipherSuites: defaultCipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: .tlsv1, maximumTLSVersion: nil, certificateVerification: .none, trustRoots: .default, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: [], shutdownTimeout: .seconds(5), keyLogCallback: nil, renegotiationSupport: .none, additionalTrustRoots: [], sendCANameList: false, pskClientProvider: nil, pskServerProvider: nil, pskHint: nil ) } /// Create a TLS configuration for use with server-side or client-side contexts that uses Pre-Shared Keys for TLS 1.2 and below. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side or client-side functionality. This configuration uses Pre-Shared Keys instead of certificates. /// /// For customising fields, modify the returned TLSConfiguration object. public static func makePreSharedKeyConfiguration() -> TLSConfiguration { TLSConfiguration( cipherSuites: defaultCipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: .tlsv1, maximumTLSVersion: nil, certificateVerification: .none, trustRoots: .default, certificateChain: [], privateKey: nil, applicationProtocols: [], shutdownTimeout: .seconds(5), keyLogCallback: nil, renegotiationSupport: .none, additionalTrustRoots: [], sendCANameList: false, pskClientProvider: nil, pskServerProvider: nil, pskHint: nil ) } /// Create a TLS configuration for use with server-side contexts that expect to validate a client /// certificate (often called mTLS). /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For servers that don't need mTLS, try /// ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. /// /// This configuration is very similar to ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` but /// adds a `trustRoots` requirement. These roots will be used to validate the certificate /// presented by the peer. It also sets the ``certificateVerification`` field to /// ``CertificateVerification/noHostnameVerification``, which enables verification but disables /// any hostname checking, which cannot succeed in a server context. /// /// For customising fields, modify the returned TLSConfiguration object. public static func makeServerConfigurationWithMTLS( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource, trustRoots: NIOSSLTrustRoots ) -> TLSConfiguration { TLSConfiguration( cipherSuites: defaultCipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: .tlsv1, maximumTLSVersion: nil, certificateVerification: .noHostnameVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: [], shutdownTimeout: .seconds(5), keyLogCallback: nil, renegotiationSupport: .none, additionalTrustRoots: [], sendCANameList: false, pskClientProvider: nil, pskServerProvider: nil, pskHint: nil ) } } // MARK: Deprecated constructors. extension TLSConfiguration { /// Create a TLS configuration for use with server-side contexts. This allows setting the ``NIOTLSCipher`` property specifically. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For client use, try ``TLSConfiguration/makeClientConfiguration()`` instead. @available(*, deprecated, renamed: "makeServerConfiguration(certificateChain:privateKey:)") public static func forServer( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource, cipherSuites: [NIOTLSCipher], verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .none, trustRoots: NIOSSLTrustRoots = .default, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, additionalTrustRoots: [NIOSSLAdditionalTrustRoots] = [] ) -> TLSConfiguration { TLSConfiguration( cipherSuiteValues: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: .none, // Servers never support renegotiation: there's no point. additionalTrustRoots: additionalTrustRoots ) } /// Create a TLS configuration for use with server-side contexts. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For client use, try ``TLSConfiguration/makeClientConfiguration()`` instead. @available(*, deprecated, renamed: "makeServerConfiguration(certificateChain:privateKey:)") public static func forServer( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource, cipherSuites: String = defaultCipherSuites, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .none, trustRoots: NIOSSLTrustRoots = .default, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: .none, // Servers never support renegotiation: there's no point. additionalTrustRoots: [] ) } /// Create a TLS configuration for use with server-side contexts. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For client use, try ``TLSConfiguration/makeClientConfiguration()`` instead. @available(*, deprecated, renamed: "makeServerConfiguration(certificateChain:privateKey:)") public static func forServer( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource, cipherSuites: String = defaultCipherSuites, verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .none, trustRoots: NIOSSLTrustRoots = .default, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: .none, // Servers never support renegotiation: there's no point. additionalTrustRoots: [] ) } /// Create a TLS configuration for use with server-side contexts. /// /// This provides sensible defaults while requiring that you provide any data that is necessary /// for server-side function. For client use, try ``TLSConfiguration/makeClientConfiguration()`` instead. @available(*, deprecated, renamed: "makeServerConfiguration(certificateChain:privateKey:)") public static func forServer( certificateChain: [NIOSSLCertificateSource], privateKey: NIOSSLPrivateKeySource, cipherSuites: String = defaultCipherSuites, verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .none, trustRoots: NIOSSLTrustRoots = .default, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, additionalTrustRoots: [NIOSSLAdditionalTrustRoots] ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: .none, // Servers never support renegotiation: there's no point. additionalTrustRoots: additionalTrustRoots ) } /// Creates a TLS configuration for use with client-side contexts. This allows setting the ``NIOTLSCipher`` property specifically. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. @available(*, deprecated, renamed: "makeClientConfiguration()") public static func forClient( cipherSuites: [NIOTLSCipher], verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .fullVerification, trustRoots: NIOSSLTrustRoots = .default, certificateChain: [NIOSSLCertificateSource] = [], privateKey: NIOSSLPrivateKeySource? = nil, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, renegotiationSupport: NIORenegotiationSupport = .none, additionalTrustRoots: [NIOSSLAdditionalTrustRoots] = [] ) -> TLSConfiguration { TLSConfiguration( cipherSuiteValues: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: renegotiationSupport, additionalTrustRoots: additionalTrustRoots ) } /// Creates a TLS configuration for use with client-side contexts. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. @available(*, deprecated, renamed: "makeClientConfiguration()") public static func forClient( cipherSuites: String = defaultCipherSuites, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .fullVerification, trustRoots: NIOSSLTrustRoots = .default, certificateChain: [NIOSSLCertificateSource] = [], privateKey: NIOSSLPrivateKeySource? = nil, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: .none, // Default value is here for backward-compatibility. additionalTrustRoots: [] ) } /// Creates a TLS configuration for use with client-side contexts. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. @available(*, deprecated, renamed: "makeClientConfiguration()") public static func forClient( cipherSuites: String = defaultCipherSuites, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .fullVerification, trustRoots: NIOSSLTrustRoots = .default, certificateChain: [NIOSSLCertificateSource] = [], privateKey: NIOSSLPrivateKeySource? = nil, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, renegotiationSupport: NIORenegotiationSupport ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: nil, signingSignatureAlgorithms: nil, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: renegotiationSupport, additionalTrustRoots: [] ) } /// Creates a TLS configuration for use with client-side contexts. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. @available(*, deprecated, renamed: "makeClientConfiguration()") public static func forClient( cipherSuites: String = defaultCipherSuites, verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .fullVerification, trustRoots: NIOSSLTrustRoots = .default, certificateChain: [NIOSSLCertificateSource] = [], privateKey: NIOSSLPrivateKeySource? = nil, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, renegotiationSupport: NIORenegotiationSupport ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: renegotiationSupport, additionalTrustRoots: [] ) } /// Creates a TLS configuration for use with client-side contexts. /// /// This provides sensible defaults, and can be used without customisation. For server-side /// contexts, you should use ``TLSConfiguration/makeServerConfiguration(certificateChain:privateKey:)`` instead. @available(*, deprecated, renamed: "makeClientConfiguration()") public static func forClient( cipherSuites: String = defaultCipherSuites, verifySignatureAlgorithms: [SignatureAlgorithm]? = nil, signingSignatureAlgorithms: [SignatureAlgorithm]? = nil, minimumTLSVersion: TLSVersion = .tlsv1, maximumTLSVersion: TLSVersion? = nil, certificateVerification: CertificateVerification = .fullVerification, trustRoots: NIOSSLTrustRoots = .default, certificateChain: [NIOSSLCertificateSource] = [], privateKey: NIOSSLPrivateKeySource? = nil, applicationProtocols: [String] = [], shutdownTimeout: TimeAmount = .seconds(5), keyLogCallback: NIOSSLKeyLogCallback? = nil, renegotiationSupport: NIORenegotiationSupport = .none, additionalTrustRoots: [NIOSSLAdditionalTrustRoots] ) -> TLSConfiguration { TLSConfiguration( cipherSuites: cipherSuites, verifySignatureAlgorithms: verifySignatureAlgorithms, signingSignatureAlgorithms: signingSignatureAlgorithms, minimumTLSVersion: minimumTLSVersion, maximumTLSVersion: maximumTLSVersion, certificateVerification: certificateVerification, trustRoots: trustRoots, certificateChain: certificateChain, privateKey: privateKey, applicationProtocols: applicationProtocols, shutdownTimeout: shutdownTimeout, keyLogCallback: keyLogCallback, renegotiationSupport: renegotiationSupport, additionalTrustRoots: additionalTrustRoots ) } } extension TLSConfiguration { /// Provides the resolved signature algorithms for signing, if any. /// /// Users can override the signature algorithms in two ways. Firstly, they can provide a /// value for the `signingSignatureAlgorithms` field in the `TLSConfiguration` structure. /// This acts as an artificial limiter, preventing certain algorithms from being used even /// though a key might nominally support them. /// /// Secondly, users can provide a custom key. This custom key is only capable of using /// certain signing algorithms. /// /// This property resolves these two into a single unified set by diffing them together. /// If there is no override (i.e. a native key and no override of the /// `signingSignatureAlgorithms` field then this returns `nil`. internal var resolvedSigningSignatureAlgorithms: [SignatureAlgorithm]? { switch (self.signingSignatureAlgorithms, self.privateKey?.customSigningAlgorithms) { case (.none, .none): // No overrides. return nil case (.some(let manualOverrides), .none): return manualOverrides case (.none, .some(let keyRequirements)): return keyRequirements case (.some(let manualOverrides), .some(let keyRequirements)): // Here we have to filter the set. We assume the two lists are small, and so we // just use `Array.filter` instead of composing into a Set. Note that the order // here is _semantic_: we have to filter the manual overrides array becuase // that order was specified by the user, and we want to honor it. return manualOverrides.filter { keyRequirements.contains($0) } } } } extension NIOSSLPrivateKeySource { /// The custom signing algorithms required by this private key, if any. /// /// Is `nil` when the key is a file-backed key, as this is handled by BoringSSL as a native key. fileprivate var customSigningAlgorithms: [SignatureAlgorithm]? { switch self { case .file: return nil case .privateKey(let key): return key.customSigningAlgorithms } } } ================================================ FILE: Sources/NIOSSL/UniversalBootstrapSupport.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2020-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore /// A wrapper around the custom verification callback types (``NIOSSLCustomVerificationCallback`` and ``NIOSSLCustomVerificationCallbackWithMetadata``) enum CustomCallback: Sendable { case callback(@Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void) case callbackWithMetadata( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void ) var manager: CustomVerifyManager { switch self { /// See ``NIOSSLCustomVerificationCallback`` for more documentation case .callback(let callback): CustomVerifyManager(callback: callback) /// See ``NIOSSLCustomVerificationCallbackWithMetadata`` for more documentation case .callbackWithMetadata(let callbackWithMetadata): CustomVerifyManager(callback: callbackWithMetadata) } } } /// A TLS provider to bootstrap TLS-enabled connections with `NIOClientTCPBootstrap`. /// /// Example: /// /// // TLS setup. /// let configuration = TLSConfiguration.makeClientConfiguration() /// let sslContext = try NIOSSLContext(configuration: configuration) /// /// // Creating the "universal bootstrap" with the `NIOSSLClientTLSProvider`. /// let tlsProvider = NIOSSLClientTLSProvider(context: sslContext, serverHostname: "example.com") /// let bootstrap = NIOClientTCPBootstrap(ClientBootstrap(group: group), tls: tlsProvider) /// /// // Bootstrapping a connection using the "universal bootstrapping mechanism" /// let connection = bootstrap.enableTLS() /// .connect(to: "example.com") /// .wait() public struct NIOSSLClientTLSProvider: NIOClientTLSProvider { public typealias Bootstrap = Bootstrap let context: NIOSSLContext let serverHostname: String? let customVerificationCallback: CustomCallback? /// See ``_NIOAdditionalPeerCertificateVerificationCallback`` for more documentation let additionalPeerCertificateVerificationCallback: (@Sendable (NIOSSLCertificate, Channel) -> EventLoopFuture)? internal init( context: NIOSSLContext, serverHostname: String?, customVerificationCallback: CustomCallback? = nil, additionalPeerCertificateVerificationCallback: ( @Sendable (NIOSSLCertificate, Channel) -> EventLoopFuture )? = nil ) throws { try serverHostname.map { try $0.validateSNIServerName() } self.context = context self.serverHostname = serverHostname self.customVerificationCallback = customVerificationCallback self.additionalPeerCertificateVerificationCallback = additionalPeerCertificateVerificationCallback } /// Construct the TLS provider with the necessary configuration. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use with the connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - customVerificationCallback: A callback to use that will override NIOSSL's normal verification logic. See ``NIOSSLCustomVerificationCallback`` for complete documentation. /// /// If set, this callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed them. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. @preconcurrency public init( context: NIOSSLContext, serverHostname: String?, customVerificationCallback: ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void )? = nil ) throws { try self.init( context: context, serverHostname: serverHostname, customVerificationCallback: customVerificationCallback.map { .callback($0) }, additionalPeerCertificateVerificationCallback: nil ) } /// Construct the TLS provider with the necessary configuration. /// /// - parameters: /// - context: The ``NIOSSLContext`` to use with the connection. /// - serverHostname: The hostname of the server we're trying to connect to, if known. This will be used in the SNI extension, /// and used to validate the server certificate. /// - customVerificationCallbackWithMetadata: A callback to use that will override NIOSSL's normal verification /// logic. If validation is successful, the peer's validated certificate chain can be returned, and later /// accessed via ``NIOSSLHandler/peerValidatedCertificateChain``. The callback will not be used if the /// ``TLSConfiguration`` that was used to construct the ``NIOSSLContext`` has /// ``TLSConfiguration/certificateVerification`` set to ``CertificateVerification/none``. /// /// - This callback is provided the certificates presented by the peer. NIOSSL will not have pre-processed /// them. Therefore, a validated chain must be derived *within* this callback (potentially involving fetching /// additional intermediate certificates). The *validated* certificate chain returned in the promise result /// **must** be a verified path to a trusted root. Importantly, the certificates presented by the peer should /// not be assumed to be valid. public init( context: NIOSSLContext, serverHostname: String?, customVerificationCallbackWithMetadata: @escaping ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void ) ) throws { try self.init( context: context, serverHostname: serverHostname, customVerificationCallback: .callbackWithMetadata(customVerificationCallbackWithMetadata), additionalPeerCertificateVerificationCallback: nil ) } /// Enable TLS on the bootstrap. This is not a function you will typically call as a user, it is called by /// `NIOClientTCPBootstrap`. public func enableTLS(_ bootstrap: Bootstrap) -> Bootstrap { // NIOSSLClientHandler.init only throws because of `malloc` error and invalid SNI hostnames. We want to crash // on malloc error and we pre-checked the SNI hostname in `init` so that should be impossible here. bootstrap.protocolHandlers { [context, serverHostname, customVerificationCallback, additionalPeerCertificateVerificationCallback] in [ try! NIOSSLClientHandler( context: context, serverHostname: serverHostname, optionalCustomVerificationCallbackManager: customVerificationCallback?.manager, optionalAdditionalPeerCertificateVerificationCallback: additionalPeerCertificateVerificationCallback ) ] } } } extension NIOSSLClientTLSProvider: Sendable where Bootstrap: Sendable {} ================================================ FILE: Sources/NIOSSL/UnsafeKeyAndChainTarget.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2024 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL enum UnsafeKeyAndChainTarget { case sslContext(OpaquePointer) case ssl(OpaquePointer) func useCertificateChain( _ certificateChain: [NIOSSLCertificateSource] ) throws { // Clear the existing chain first. // So that when this function is called, `certificateChain` becomes the only certificates in the context. self.clearAdditionalChainCertificates() var leaf = true for source in certificateChain { switch source { case .file(let p): self.useCertificateChainFile(p) leaf = false case .certificate(let cert): if leaf { try self.setLeafCertificate(cert) leaf = false } else { try self.addAdditionalChainCertificate(cert) } } } } func useCertificateChainFile(_ path: String) { let result = path.withCString { (pointer) -> CInt in switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_use_certificate_chain_file(context, pointer) case .ssl(let ssl): CNIOBoringSSL_SSL_CTX_use_certificate_chain_file(ssl, pointer) } } precondition(result == 1) } func setLeafCertificate(_ cert: NIOSSLCertificate) throws { let rc = cert.withUnsafeMutableX509Pointer { ref in switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_use_certificate(context, ref) case .ssl(let ssl): CNIOBoringSSL_SSL_use_certificate(ssl, ref) } } guard rc == 1 else { throw NIOSSLError.failedToLoadCertificate } } func clearAdditionalChainCertificates() { switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_clear_chain_certs(context) case .ssl(let ssl): CNIOBoringSSL_SSL_clear_chain_certs(ssl) } } func addAdditionalChainCertificate(_ cert: NIOSSLCertificate) throws { let rc = cert.withUnsafeMutableX509Pointer { ref in switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_add1_chain_cert(context, ref) case .ssl(let ssl): CNIOBoringSSL_SSL_add1_chain_cert(ssl, ref) } } guard rc == 1 else { throw NIOSSLError.failedToLoadCertificate } } func usePrivateKeySource(_ privateKey: NIOSSLPrivateKeySource) throws { switch privateKey { case .file(let p): try self.usePrivateKeyFile(p) case .privateKey(let key): try self.setPrivateKey(key) } } func setPrivateKey(_ key: NIOSSLPrivateKey) throws { switch key.representation { case .native: let rc = key.withUnsafeMutableEVPPKEYPointer { ref in switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_use_PrivateKey(context, ref) case .ssl(let ssl): CNIOBoringSSL_SSL_use_PrivateKey(ssl, ref) } } guard 1 == rc else { throw NIOSSLError.failedToLoadPrivateKey } case .custom: switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_set_private_key_method(context, customPrivateKeyMethod) case .ssl(let ssl): CNIOBoringSSL_SSL_set_private_key_method(ssl, customPrivateKeyMethod) } } } func usePrivateKeyFile(_ path: String) throws { let pathExtension = path.split(separator: ".").last let fileType: CInt switch pathExtension?.lowercased() { case .some("pem"): fileType = SSL_FILETYPE_PEM case .some("der"), .some("key"): fileType = SSL_FILETYPE_ASN1 default: throw NIOSSLExtraError.unknownPrivateKeyFileType(path: path) } let result = path.withCString { (pointer) -> CInt in switch self { case .sslContext(let context): CNIOBoringSSL_SSL_CTX_use_PrivateKey_file(context, pointer, fileType) case .ssl(let ssl): CNIOBoringSSL_SSL_use_PrivateKey_file(ssl, pointer, fileType) } } guard result == 1 else { throw NIOSSLError.failedToLoadPrivateKey } } } ================================================ FILE: Sources/NIOSSLHTTP1Client/README.md ================================================ NIOSSLHTTP1Client --- This sample application provides a https client. Invoke it using one of the following syntaxes. ```bash swift run NIOSSLHTTP1Client # Gets a content on a server on ::1, port 4433, using SSL/TLS swift run NIOSSLHTTP1Client "https://example.com" # Gets a content on a server on example.com, port 443, using SSL/TLS swift run NIOSSLHTTP1Client "https://example.com:4433" # Gets a content on a server on example.com, port 4433, using SSL/TLS ``` ================================================ FILE: Sources/NIOSSLHTTP1Client/main.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Foundation import NIOCore import NIOFoundationCompat import NIOHTTP1 import NIOPosix import NIOSSL private final class HTTPResponseHandler: ChannelInboundHandler { let promise: EventLoopPromise var closeFuture: EventLoopFuture? = nil init(_ promise: EventLoopPromise) { self.promise = promise } typealias InboundIn = HTTPClientResponsePart func channelRead(context: ChannelHandlerContext, data: NIOAny) { let httpResponsePart = unwrapInboundIn(data) switch httpResponsePart { case .head(let httpResponseHeader): print( "\(httpResponseHeader.version) \(httpResponseHeader.status.code) \(httpResponseHeader.status.reasonPhrase)" ) for (name, value) in httpResponseHeader.headers { print("\(name): \(value)") } case .body(var byteBuffer): if let data = byteBuffer.readData(length: byteBuffer.readableBytes) { FileHandle.standardOutput.write(data) } case .end(_): closeFuture = context.channel.close() promise.succeed(()) } } func channelInactive(context: ChannelHandlerContext) { if closeFuture == nil { closeFuture = context.channel.close() promise.fail(ChannelError.inputClosed) } } func errorCaught(context: ChannelHandlerContext, error: Error) { print("Error: ", error) closeFuture = context.channel.close() promise.succeed(()) } } let arguments = CommandLine.arguments let arg1 = arguments.dropFirst().first let url: URL var cert: [NIOSSLCertificateSource] = [] var key: NIOSSLPrivateKeySource? var trustRoot: NIOSSLTrustRoots = .default if let u = arg1 { url = URL(string: u)! } else { url = URL(string: "https://::1:4433/get")! } // These extra arguments aren't expected to be used, we use them for integration tests only. if let c = arguments.dropFirst(2).first { cert.append(contentsOf: try NIOSSLCertificate.fromPEMFile(c).map { .certificate($0) }) } if let k = arguments.dropFirst(3).first { try! key = .privateKey(.init(file: k, format: .pem)) } if let r = arguments.dropFirst(4).first { trustRoot = .file(r) } let eventLoopGroup = MultiThreadedEventLoopGroup(numberOfThreads: 1) let promise: EventLoopPromise = eventLoopGroup.next().makePromise(of: Void.self) defer { try! promise.futureResult.wait() try! eventLoopGroup.syncShutdownGracefully() } var tlsConfiguration = TLSConfiguration.makeClientConfiguration() tlsConfiguration.trustRoots = trustRoot tlsConfiguration.certificateChain = cert tlsConfiguration.privateKey = key tlsConfiguration.renegotiationSupport = .once let sslContext = try! NIOSSLContext(configuration: tlsConfiguration) let bootstrap = ClientBootstrap(group: eventLoopGroup) .channelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .channelInitializer { channel in channel.eventLoop.makeCompletedFuture { let openSslHandler = try NIOSSLClientHandler(context: sslContext, serverHostname: url.host) try channel.pipeline.syncOperations.addHandler(openSslHandler) try channel.pipeline.syncOperations.addHTTPClientHandlers() try channel.pipeline.syncOperations.addHandler(HTTPResponseHandler(promise)) } } func sendRequest(_ channel: Channel) -> EventLoopFuture { var request = HTTPRequestHead( version: HTTPVersion(major: 1, minor: 1), method: HTTPMethod.GET, uri: url.absoluteString ) request.headers = HTTPHeaders([ ("Host", url.host!), ("User-Agent", "swift-nio"), ("Accept", "application/json"), ("Connection", "close"), ]) channel.write(HTTPClientRequestPart.head(request), promise: nil) return channel.writeAndFlush(HTTPClientRequestPart.end(nil)) } bootstrap.connect(host: url.host!, port: url.port ?? 443) .flatMap { sendRequest($0) } .cascadeFailure(to: promise) ================================================ FILE: Sources/NIOSSLPerformanceTester/BenchManyWrites.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOEmbedded import NIOSSL final class BenchManyWrites: Benchmark { let clientContext: NIOSSLContext let serverContext: NIOSSLContext let dummyAddress: SocketAddress let backToBack: BackToBackEmbeddedChannel let loopCount: Int let writeSize: Int var buffer: ByteBuffer? init(loopCount: Int, writeSizeInBytes writeSize: Int) throws { self.loopCount = loopCount self.writeSize = writeSize self.serverContext = try NIOSSLContext( configuration: .makeServerConfiguration( certificateChain: [.certificate(.forTesting())], privateKey: .privateKey(.forTesting()) ) ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = try .certificates([.forTesting()]) self.clientContext = try NIOSSLContext(configuration: clientConfig) self.dummyAddress = try SocketAddress(ipAddress: "1.2.3.4", port: 5678) self.backToBack = BackToBackEmbeddedChannel() } func setUp() throws { let serverHandler = NIOSSLServerHandler(context: self.serverContext) let clientHandler = try NIOSSLClientHandler(context: self.clientContext, serverHostname: "localhost") try self.backToBack.client.pipeline.syncOperations.addHandler(clientHandler) try self.backToBack.server.pipeline.syncOperations.addHandler(serverHandler) // To trigger activation of both channels we use connect(). try self.backToBack.client.connect(to: dummyAddress).wait() try self.backToBack.server.connect(to: dummyAddress).wait() try self.backToBack.interactInMemory() self.buffer = self.backToBack.client.allocator.buffer(capacity: self.writeSize) self.buffer!.writeBytes(repeatElement(0, count: self.writeSize)) } func tearDown() {} func run() throws -> Int { guard let buffer = self.buffer else { fatalError("Couldn't get buffer") } for _ in 0.. Int { for _ in 0.. Int } func measureAndPrint(desc: String, benchmark bench: B) throws { try bench.setUp() defer { bench.tearDown() } try measureAndPrint(desc: desc) { try bench.run() } } ================================================ FILE: Sources/NIOSSLPerformanceTester/main.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Dispatch import Foundation // MARK: Test Harness nonisolated(unsafe) var warning: String = "" assert( { print("======================================================") print("= YOU ARE RUNNING NIOPerformanceTester IN DEBUG MODE =") print("======================================================") warning = " <<< DEBUG MODE >>>" return true }() ) public func measure(_ fn: () throws -> Int) rethrows -> [TimeInterval] { func measureOne(_ fn: () throws -> Int) rethrows -> TimeInterval { let start = Date() _ = try fn() let end = Date() return end.timeIntervalSince(start) } _ = try measureOne(fn) // pre-heat and throw away var measurements = Array(repeating: 0.0, count: 10) for i in 0..<10 { measurements[i] = try measureOne(fn) } return measurements } let limitSet = CommandLine.arguments.dropFirst() public func measureAndPrint(desc: String, fn: () throws -> Int) rethrows { if limitSet.count == 0 || limitSet.contains(desc) { print("measuring\(warning): \(desc): ", terminator: "") let measurements = try measure(fn) print(measurements.reduce("") { $0 + "\($1), " }) } else { print("skipping '\(desc)', limit set = \(limitSet)") } } // MARK: Utilities try measureAndPrint(desc: "repeated_handshakes", benchmark: try BenchRepeatedHandshakes(loopCount: 1000)) try measureAndPrint(desc: "many_writes_512b", benchmark: try BenchManyWrites(loopCount: 2000, writeSizeInBytes: 512)) ================================================ FILE: Sources/NIOSSLPerformanceTester/shared.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Foundation import NIOCore import NIOEmbedded import NIOSSL class BackToBackEmbeddedChannel { private(set) var client: EmbeddedChannel private(set) var server: EmbeddedChannel private var loop: EmbeddedEventLoop init() { self.loop = EmbeddedEventLoop() self.client = EmbeddedChannel(loop: self.loop) self.server = EmbeddedChannel(loop: self.loop) } func run() { self.loop.run() } func interactInMemory() throws { var workToDo = true while workToDo { workToDo = false self.loop.run() let clientDatum = try self.client.readOutbound(as: IOData.self) let serverDatum = try self.server.readOutbound(as: IOData.self) if let clientMsg = clientDatum { try self.server.writeInbound(clientMsg) workToDo = true } if let serverMsg = serverDatum { try self.client.writeInbound(serverMsg) workToDo = true } } } } extension NIOSSLCertificate { static func forTesting() throws -> NIOSSLCertificate { try .init(bytes: certificatePemBytes, format: .pem) } } extension NIOSSLPrivateKey { static func forTesting() throws -> NIOSSLPrivateKey { try .init(bytes: keyPemBytes, format: .pem) } } private let certificatePemBytes = Array( """ -----BEGIN CERTIFICATE----- MIIBTzCB9qADAgECAhQkvv72Je/v+B/cgJ53f84O82z6WTAKBggqhkjOPQQDAjAU MRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkxMTI3MTAxMjMwWhcNMjkxMTI0MTAx MjMwWjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjOPQMB BwNCAAShtZ9TRt7I+7Y0o99XUkrgSYmUmpr4K8CB0IkTCX6b1tXp3Xqs1V5BckTd qrls+zsm3AfeiNBb9EDdxiX9DdzuoyYwJDAUBgNVHREEDTALgglsb2NhbGhvc3Qw DAYDVR0TAQH/BAIwADAKBggqhkjOPQQDAgNIADBFAiAKxYON+YTnIHNR0R6SLP8R R7hjsjV5NDs18XLoeRnA1gIhANwyggmE6NQW/r9l59fexj/ZrjaS3jYOTNCfC1Lo 5NgJ -----END CERTIFICATE----- """.utf8 ) private let keyPemBytes = Array( """ -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgCn182hBmYVMAiNPO +7w05F40SlAqqxgBEYJZOeK47aihRANCAAShtZ9TRt7I+7Y0o99XUkrgSYmUmpr4 K8CB0IkTCX6b1tXp3Xqs1V5BckTdqrls+zsm3AfeiNBb9EDdxiX9Ddzu -----END PRIVATE KEY----- """.utf8 ) ================================================ FILE: Sources/NIOTLSServer/README.md ================================================ # NIOTLSServer --- This sample application provides a TLS server. Invoke it with the following syntax. ```bash swift run NIOTLSServer # Gets a content on a server on ::1, port 4433, using TLS ``` ================================================ FILE: Sources/NIOTLSServer/main.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOPosix import NIOSSL import struct Foundation.URL private final class EchoHandler: ChannelInboundHandler { public typealias InboundIn = ByteBuffer func channelRead(context: ChannelHandlerContext, data: NIOAny) { context.write(data, promise: nil) } func channelReadComplete(context: ChannelHandlerContext) { context.flush() } } let certificateChain = try NIOSSLCertificate.fromPEMFile("cert.pem") let privateKey = try! NIOSSLPrivateKey(file: "key.pem", format: .pem) let sslContext = try! NIOSSLContext( configuration: TLSConfiguration.makeServerConfiguration( certificateChain: certificateChain.map { .certificate($0) }, privateKey: .privateKey(privateKey) ) ) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) let bootstrap = ServerBootstrap(group: group) // Specify backlog and enable SO_REUSEADDR for the server itself .serverChannelOption(ChannelOptions.backlog, value: 256) .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) // Set the handlers that are applied to the accepted channels. .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers(NIOSSLServerHandler(context: sslContext), EchoHandler()) } } // Enable TCP_NODELAY and SO_REUSEADDR for the accepted Channels .childChannelOption(ChannelOptions.socket(IPPROTO_TCP, TCP_NODELAY), value: 1) .childChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) defer { try! group.syncShutdownGracefully() } // First argument is the program path let arguments = CommandLine.arguments let arg1 = arguments.dropFirst().first let arg2 = arguments.dropFirst().dropFirst().first var host: String = "::1" var port: Int = 4433 switch (arg1, arg1.flatMap { Int($0) }, arg2.flatMap { Int($0) }) { case (.some(let h), _, .some(let p)): // we got two arguments, let's interpret that as host and port host = h port = p case (_, .some(let p), _): // only one argument --> port port = p default: () } let channel = try bootstrap.bind(host: host, port: port).wait() print("Server started and listening on \(channel.localAddress!)") // This will never unblock as we don't close the ServerChannel try channel.closeFuture.wait() print("Server closed") ================================================ FILE: Tests/NIOSSLTests/ByteBufferBIOTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore import XCTest @testable import NIOSSL final class ByteBufferBIOTest: XCTestCase { override func setUp() { guard boringSSLIsInitialized else { fatalError("Cannot run tests without BoringSSL") } } /// This leaks on purpose! private func retainedBIO() -> UnsafeMutablePointer { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) swiftBIO.close() return swiftBIO.retainedBIO() } func testExtractingBIOWrite() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } XCTAssertNil(swiftBIO.outboundCiphertext()) var bytesToWrite: [UInt8] = [1, 2, 3, 4, 5] let rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(rc, 5) guard let extractedBytes = swiftBIO.outboundCiphertext().flatMap({ $0.getBytes(at: $0.readerIndex, length: $0.readableBytes) }) else { XCTFail("No received bytes") return } XCTAssertEqual(extractedBytes, bytesToWrite) XCTAssertNil(swiftBIO.outboundCiphertext()) } func testManyBIOWritesAreCoalesced() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } XCTAssertNil(swiftBIO.outboundCiphertext()) var bytesToWrite: [UInt8] = [1, 2, 3, 4, 5] var expectedBytes = [UInt8]() for _ in 0..<10 { let rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(rc, 5) expectedBytes.append(contentsOf: bytesToWrite) } guard let extractedBytes = swiftBIO.outboundCiphertext().flatMap({ $0.getBytes(at: $0.readerIndex, length: $0.readableBytes) }) else { XCTFail("No received bytes") return } XCTAssertEqual(extractedBytes, expectedBytes) XCTAssertNil(swiftBIO.outboundCiphertext()) } func testReadWithNoDataInBIO() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var targetBuffer = [UInt8](repeating: 0, count: 512) let rc = CNIOBoringSSL_BIO_read(cBIO, &targetBuffer, 512) XCTAssertEqual(rc, -1) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) != 0) XCTAssertTrue(CNIOBoringSSL_BIO_should_read(cBIO) != 0) XCTAssertEqual(targetBuffer, [UInt8](repeating: 0, count: 512)) } func testReadWithDataInBIO() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var inboundBytes = ByteBufferAllocator().buffer(capacity: 1024) inboundBytes.writeBytes([1, 2, 3, 4, 5]) swiftBIO.receiveFromNetwork(buffer: inboundBytes) var receivedBytes = ByteBufferAllocator().buffer(capacity: 1024) let rc = receivedBytes.writeWithUnsafeMutableBytes(minimumWritableBytes: 1024) { pointer in let innerRC = CNIOBoringSSL_BIO_read(cBIO, pointer.baseAddress!, CInt(pointer.count)) XCTAssertTrue(innerRC > 0) return innerRC > 0 ? Int(innerRC) : 0 } XCTAssertEqual(rc, 5) XCTAssertEqual(receivedBytes, inboundBytes) let secondRC = receivedBytes.withUnsafeMutableWritableBytes { pointer in CNIOBoringSSL_BIO_read(cBIO, pointer.baseAddress!, CInt(pointer.count)) } XCTAssertEqual(secondRC, -1) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) != 0) XCTAssertTrue(CNIOBoringSSL_BIO_should_read(cBIO) != 0) } func testShortReads() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var inboundBytes = ByteBufferAllocator().buffer(capacity: 1024) inboundBytes.writeBytes([1, 2, 3, 4, 5]) swiftBIO.receiveFromNetwork(buffer: inboundBytes) var receivedBytes = ByteBufferAllocator().buffer(capacity: 1024) for _ in 0..<5 { let rc = receivedBytes.writeWithUnsafeMutableBytes(minimumWritableBytes: 1024) { pointer in let innerRC = CNIOBoringSSL_BIO_read(cBIO, pointer.baseAddress!, 1) XCTAssertTrue(innerRC > 0) return innerRC > 0 ? Int(innerRC) : 0 } XCTAssertEqual(rc, 1) } XCTAssertEqual(receivedBytes, inboundBytes) let secondRC = receivedBytes.withUnsafeMutableWritableBytes { pointer in CNIOBoringSSL_BIO_read(cBIO, pointer.baseAddress!, CInt(pointer.count)) } XCTAssertEqual(secondRC, -1) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) != 0) XCTAssertTrue(CNIOBoringSSL_BIO_should_read(cBIO) != 0) } func testDropRefToBaseObjectOnRead() throws { let cBIO = self.retainedBIO() let receivedBytes = ByteBufferAllocator().buffer(capacity: 1024) receivedBytes.withVeryUnsafeBytes { pointer in let rc = CNIOBoringSSL_BIO_read(cBIO, UnsafeMutableRawPointer(mutating: pointer.baseAddress!), 1) XCTAssertEqual(rc, -1) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) == 0) } } func testDropRefToBaseObjectOnWrite() throws { let cBIO = self.retainedBIO() var receivedBytes = ByteBufferAllocator().buffer(capacity: 1024) receivedBytes.writeBytes([1, 2, 3, 4, 5]) receivedBytes.withVeryUnsafeBytes { pointer in let rc = CNIOBoringSSL_BIO_write(cBIO, pointer.baseAddress!, 1) XCTAssertEqual(rc, -1) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) == 0) } } func testZeroLengthReadsAlwaysSucceed() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var targetBuffer = [UInt8](repeating: 0, count: 512) let rc = CNIOBoringSSL_BIO_read(cBIO, &targetBuffer, 0) XCTAssertEqual(rc, 0) XCTAssertEqual(targetBuffer, [UInt8](repeating: 0, count: 512)) } func testWriteWhenHoldingBufferTriggersCoW() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var bytesToWrite: [UInt8] = [1, 2, 3, 4, 5] let rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(rc, 5) guard let firstWrite = swiftBIO.outboundCiphertext() else { XCTFail("Did not write") return } let secondRC = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(secondRC, 5) guard let secondWrite = swiftBIO.outboundCiphertext() else { XCTFail("Did not write second time") return } XCTAssertNotEqual(firstWrite.baseAddress(), secondWrite.baseAddress()) } func testWriteWhenDroppedBufferDoesNotTriggerCoW() { func writeAddress(swiftBIO: ByteBufferBIO, cBIO: UnsafeMutablePointer) -> UInt? { var bytesToWrite: [UInt8] = [1, 2, 3, 4, 5] let rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(rc, 5) return swiftBIO.outboundCiphertext()?.baseAddress() } let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } let firstAddress = writeAddress(swiftBIO: swiftBIO, cBIO: cBIO) let secondAddress = writeAddress(swiftBIO: swiftBIO, cBIO: cBIO) XCTAssertNotNil(firstAddress) XCTAssertNotNil(secondAddress) XCTAssertEqual(firstAddress, secondAddress) } func testZeroLengthWriteIsNoOp() { // This test works by emulating testWriteWhenHoldingBufferTriggersCoW, but // with the second write at zero length. This will not trigger a CoW, as no // actual write will occur. let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var bytesToWrite: [UInt8] = [1, 2, 3, 4, 5] let rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 5) XCTAssertEqual(rc, 5) guard let firstWrite = swiftBIO.outboundCiphertext() else { XCTFail("Did not write") return } withExtendedLifetime(firstWrite) { let secondRC = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 0) XCTAssertEqual(secondRC, 0) XCTAssertNil(swiftBIO.outboundCiphertext()) } } func testSimplePuts() { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } XCTAssertNil(swiftBIO.outboundCiphertext()) let stringToWrite = "Hello, world!" let rc = stringToWrite.withCString { CNIOBoringSSL_BIO_puts(cBIO, $0) } XCTAssertEqual(rc, 13) let extractedString = swiftBIO.outboundCiphertext().flatMap { $0.getString(at: $0.readerIndex, length: $0.readableBytes) } XCTAssertEqual(extractedString, stringToWrite) XCTAssertNil(swiftBIO.outboundCiphertext()) } func testGetsNotSupported() { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } var buffer = ByteBufferAllocator().buffer(capacity: 1024) buffer.writeStaticString("Hello, world!") swiftBIO.receiveFromNetwork(buffer: buffer) var output = [CChar](repeating: 0, count: 1024) output.withUnsafeMutableBufferPointer { pointer in let rc = CNIOBoringSSL_BIO_gets(cBIO, pointer.baseAddress, CInt(pointer.count)) XCTAssertEqual(rc, -2) XCTAssertTrue(CNIOBoringSSL_BIO_should_retry(cBIO) == 0) } } func testBasicCtrlDance() { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: .max) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } let originalShutdown = CNIOBoringSSL_BIO_ctrl(cBIO, BIO_CTRL_GET_CLOSE, 0, nil) XCTAssertEqual(originalShutdown, CLong(BIO_CLOSE)) let rc = CNIOBoringSSL_BIO_set_close(cBIO, CInt(BIO_NOCLOSE)) XCTAssertEqual(rc, 1) let newShutdown = CNIOBoringSSL_BIO_ctrl(cBIO, BIO_CTRL_GET_CLOSE, 0, nil) XCTAssertEqual(newShutdown, CLong(BIO_NOCLOSE)) let rc2 = CNIOBoringSSL_BIO_set_close(cBIO, CInt(BIO_CLOSE)) XCTAssertEqual(rc2, 1) let newShutdown2 = CNIOBoringSSL_BIO_ctrl(cBIO, BIO_CTRL_GET_CLOSE, 0, nil) XCTAssertEqual(newShutdown2, CLong(BIO_CLOSE)) } func testMaximumPreservedCapacityIsObeyed() throws { let swiftBIO = ByteBufferBIO(allocator: ByteBufferAllocator(), maximumPreservedOutboundBufferCapacity: 64) let cBIO = swiftBIO.retainedBIO() defer { CNIOBoringSSL_BIO_free(cBIO) swiftBIO.close() } XCTAssertNil(swiftBIO.outboundCiphertext()) // We're going to write 1kb, then 1 byte, in a loop. After the 1kB write, the capacity of the buffer will be 1kB (or more). // After the 1 byte write, the capacity will be 64 (exactly). var bytesToWrite: [UInt8] = .init(repeating: 0, count: 1024) for _ in 0..<10 { var rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, CInt(bytesToWrite.count)) XCTAssertEqual(rc, CInt(bytesToWrite.count)) let capacity = swiftBIO._testOnly_outboundBufferCapacity XCTAssertGreaterThanOrEqual(capacity, 1024) guard swiftBIO.outboundCiphertext() != nil else { XCTFail("No received bytes") return } // Capacity hasn't changed yet. XCTAssertEqual(capacity, swiftBIO._testOnly_outboundBufferCapacity) // Now write a short chunk. rc = CNIOBoringSSL_BIO_write(cBIO, &bytesToWrite, 1) XCTAssertEqual(rc, 1) // Check the capacity. It should be exactly 64. XCTAssertEqual(swiftBIO._testOnly_outboundBufferCapacity, 64) guard swiftBIO.outboundCiphertext() != nil else { XCTFail("No received bytes") return } } } } extension ByteBuffer { func baseAddress() -> UInt { self.withVeryUnsafeBytes { UInt(bitPattern: $0.baseAddress!) } } } ================================================ FILE: Tests/NIOSSLTests/CertificateVerificationTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import XCTest @testable import NIOSSL final class CertificateVerificationTests: XCTestCase { func testCanFindCAFileOnLinux() { // This test only runs on Linux #if os(Linux) // A valid Linux system means we can find a CA file. XCTAssertNotNil(rootCAFilePath) #endif } } ================================================ FILE: Tests/NIOSSLTests/ClientSNITests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOPosix import NIOSSL import NIOTLS import XCTest class ClientSNITests: XCTestCase { private func configuredSSLContext() throws -> NIOSSLContext { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) config.trustRoots = .certificates([NIOSSLIntegrationTest.cert]) let context = try NIOSSLContext(configuration: config) return context } private func assertSniResult(sniField: String?, expectedResult: SNIResult) throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try? group.syncShutdownGracefully() } let sniPromise: EventLoopPromise = group.next().makePromise() let serverChannel = try serverTLSChannel( context: context, preHandlers: [ ByteToMessageHandler( SNIHandler { sniPromise.succeed($0) return group.next().makeSucceededFuture(()) } ) ], postHandlers: [], group: group ) defer { _ = try? serverChannel.close().wait() } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress!, serverHostname: sniField ) defer { _ = try? clientChannel.close().wait() } let sniResult = try sniPromise.futureResult.wait() XCTAssertEqual(sniResult, expectedResult) } func testSNIIsTransmitted() throws { try assertSniResult(sniField: "httpbin.org", expectedResult: .hostname("httpbin.org")) } func testNoSNILeadsToNoExtension() throws { try assertSniResult(sniField: nil, expectedResult: .fallback) } func testSNIIsRejectedForIPv4Addresses() throws { let context = try configuredSSLContext() let testString = "192.168.0.1" XCTAssertThrowsError(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) { error in XCTAssertEqual(.cannotUseIPAddressInSNI, error as? NIOSSLExtraError) } XCTAssertThrowsError(try NIOSSLClientHandler(context: context, serverHostname: testString)) { error in XCTAssertEqual(.cannotUseIPAddressInSNI, error as? NIOSSLExtraError) } } func testSNIIsRejectedForIPv6Addresses() throws { let context = try configuredSSLContext() let testString = "fe80::200:f8ff:fe21:67cf" XCTAssertThrowsError(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) { error in XCTAssertEqual(.cannotUseIPAddressInSNI, error as? NIOSSLExtraError) } XCTAssertThrowsError(try NIOSSLClientHandler(context: context, serverHostname: testString)) { error in XCTAssertEqual(.cannotUseIPAddressInSNI, error as? NIOSSLExtraError) } } func testSNIIsRejectedForEmptyHostname() throws { let context = try configuredSSLContext() let testString = "" XCTAssertThrowsError(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } XCTAssertThrowsError(try NIOSSLClientHandler(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } } func testSNIIsRejectedForTooLongHostname() throws { let context = try configuredSSLContext() let testString = String(repeating: "x", count: 256) XCTAssertThrowsError(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } XCTAssertThrowsError(try NIOSSLClientHandler(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } } func testSNIIsRejectedFor0Byte() throws { let context = try configuredSSLContext() let testString = String(UnicodeScalar(0)!) XCTAssertThrowsError(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } XCTAssertThrowsError(try NIOSSLClientHandler(context: context, serverHostname: testString)) { error in XCTAssertEqual(.invalidSNIHostname, error as? NIOSSLExtraError) } } func testSNIIsNotRejectedForAnyOfTheFirst1000CodeUnits() throws { let context = try configuredSSLContext() for testString in (1...Int(1000)).compactMap({ UnicodeScalar($0).map({ String($0) }) }) { XCTAssertNoThrow(try NIOSSLClientHandler(context: context, serverHostname: testString)) XCTAssertNoThrow(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) } } func testSNIIsNotRejectedForVeryWeirdCharacters() throws { let context = try configuredSSLContext() let testString = "😎🥶💥🏴󠁧󠁢󠁥󠁮󠁧󠁿👩‍💻" XCTAssertLessThanOrEqual(testString.utf8.count, 255) // just to check we didn't make this too large. XCTAssertNoThrow(try NIOSSLClientHandler(context: context, serverHostname: testString)) XCTAssertNoThrow(try NIOSSLClientTLSProvider(context: context, serverHostname: testString)) } } ================================================ FILE: Tests/NIOSSLTests/CustomPrivateKeyTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOConcurrencyHelpers import NIOCore import NIOEmbedded import XCTest @testable import NIOSSL // This is a helper that lets us work with an EVP_PKEY. // // This type is thread-safe: it doesn't perform any mutation of the underlying object. private final class CustomPKEY: @unchecked Sendable { private let ref: OpaquePointer init(from key: NIOSSLPrivateKey) { // Extract a copy of the key reference here. self.ref = key.withUnsafeMutableEVPPKEYPointer { pkey in CNIOBoringSSL_EVP_PKEY_up_ref(pkey) return pkey } } init(from generator: () -> OpaquePointer) { self.ref = generator() } deinit { CNIOBoringSSL_EVP_PKEY_free(self.ref) } func sign(algorithm: SignatureAlgorithm, data: ByteBuffer) -> ByteBuffer { let ctx = CNIOBoringSSL_EVP_PKEY_CTX_new(self.ref, nil)! defer { CNIOBoringSSL_EVP_PKEY_CTX_free(ctx) } // Step 1: We need to hash the input before we sign. let hashContext = CNIOBoringSSL_EVP_MD_CTX_new()! defer { CNIOBoringSSL_EVP_MD_CTX_free(hashContext) } CNIOBoringSSL_EVP_MD_CTX_init(hashContext) CNIOBoringSSL_EVP_DigestInit_ex(hashContext, algorithm.md, nil) var rc = data.withUnsafeReadableBytes { bytesPtr in CNIOBoringSSL_EVP_DigestUpdate( hashContext, bytesPtr.baseAddress?.assumingMemoryBound(to: UInt8.self), bytesPtr.count ) } precondition(rc == 1) let signatureSize = CNIOBoringSSL_EVP_MD_size(algorithm.md) var digestBuffer = ByteBuffer() digestBuffer.writeWithUnsafeMutableBytes(minimumWritableBytes: signatureSize) { outputPtr in var actualSize = CUnsignedInt(outputPtr.count) CNIOBoringSSL_EVP_DigestFinal_ex( hashContext, outputPtr.baseAddress?.assumingMemoryBound(to: UInt8.self), &actualSize ) return Int(actualSize) } // Ok, great, we've hashed. Now let's do the signing. precondition(CNIOBoringSSL_EVP_PKEY_sign_init(ctx) == 1) // TODO: Add RSA padding when needed. CNIOBoringSSL_EVP_PKEY_CTX_set_signature_md(ctx, algorithm.md) // For RSA algorithms we may need to add padding. if let padding = algorithm.rsaPadding { CNIOBoringSSL_EVP_PKEY_CTX_set_rsa_padding(ctx, padding) } // And for some RSA padding, that may require a salt. if let saltLength = algorithm.saltLen { CNIOBoringSSL_EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, saltLength) } // Now we find out the length we need. var signatureLength: Int = 0 rc = digestBuffer.withUnsafeReadableBytes { bytesPtr in CNIOBoringSSL_EVP_PKEY_sign( ctx, nil, &signatureLength, bytesPtr.baseAddress?.assumingMemoryBound(to: UInt8.self), bytesPtr.count ) } precondition(rc == 1) // And finally we can do the sign. var outputBuffer = ByteBuffer() outputBuffer.writeWithUnsafeMutableBytes(minimumWritableBytes: signatureLength) { outputPtr in precondition(signatureLength <= outputPtr.count) let rc = digestBuffer.withUnsafeReadableBytes { bytesPtr in CNIOBoringSSL_EVP_PKEY_sign( ctx, outputPtr.baseAddress?.assumingMemoryBound(to: UInt8.self), &signatureLength, bytesPtr.baseAddress?.assumingMemoryBound(to: UInt8.self), bytesPtr.count ) } precondition(rc == 1) return signatureLength } return outputBuffer } func decrypt(data: ByteBuffer) -> ByteBuffer { // Decryption is only needed for RSA, so this has to work. let rsa = CNIOBoringSSL_EVP_PKEY_get0_RSA(self.ref)! let targetSize = CNIOBoringSSL_RSA_size(rsa) var output = ByteBuffer() output.writeWithUnsafeMutableBytes(minimumWritableBytes: Int(targetSize)) { outputBytes in var written = 0 let rc = data.withUnsafeReadableBytes { inputBytes in CNIOBoringSSL_RSA_decrypt( rsa, &written, outputBytes.baseAddress?.assumingMemoryBound(to: UInt8.self), outputBytes.count, inputBytes.baseAddress?.assumingMemoryBound(to: UInt8.self), inputBytes.count, RSA_NO_PADDING ) } precondition(rc == 1) return written } return output } } private final class CustomKeyImmediateResult: NIOSSLCustomPrivateKey, Hashable { let backing: CustomPKEY let signatureAlgorithms: [SignatureAlgorithm] let expectedChannel: Channel let _signCallCount: NIOLockedValueBox let _decryptCallCount: NIOLockedValueBox var signCallCount: Int { self._signCallCount.withLockedValue { $0 } } var decryptCallCount: Int { self._decryptCallCount.withLockedValue { $0 } } fileprivate init(_ backing: CustomPKEY, signatureAlgorithms: [SignatureAlgorithm], expectedChannel: Channel) { self.backing = backing self.signatureAlgorithms = signatureAlgorithms self.expectedChannel = expectedChannel self._signCallCount = .init(0) self._decryptCallCount = .init(0) } func sign(channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer) -> EventLoopFuture { XCTAssertTrue(channel === self.expectedChannel) XCTAssertTrue(self.signatureAlgorithms.contains(algorithm)) self._signCallCount.withLockedValue { $0 += 1 } return channel.eventLoop.makeSucceededFuture(self.backing.sign(algorithm: algorithm, data: data)) } func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture { XCTAssertTrue(channel === self.expectedChannel) self._decryptCallCount.withLockedValue { $0 += 1 } return channel.eventLoop.makeSucceededFuture(self.backing.decrypt(data: data)) } static func == (lhs: CustomKeyImmediateResult, rhs: CustomKeyImmediateResult) -> Bool { lhs.backing === rhs.backing && lhs.signatureAlgorithms == rhs.signatureAlgorithms } func hash(into hasher: inout Hasher) { hasher.combine(ObjectIdentifier(self.backing)) hasher.combine(signatureAlgorithms) } } private final class CustomKeyDelayedCompletion: NIOSSLCustomPrivateKey, Hashable { let backing: CustomPKEY let signatureAlgorithms: [SignatureAlgorithm] let expectedChannel: Channel let _pendingSigningEvents: NIOLockedValueBox<[EventLoopPromise]> let _pendingDecryptionEvents: NIOLockedValueBox<[EventLoopPromise]> var pendingSigningEvents: [EventLoopPromise] { self._pendingSigningEvents.withLockedValue { $0 } } var pendingDecryptionEvents: [EventLoopPromise] { self._pendingDecryptionEvents.withLockedValue { $0 } } fileprivate init(_ backing: CustomPKEY, signatureAlgorithms: [SignatureAlgorithm], expectedChannel: Channel) { self.backing = backing self.signatureAlgorithms = signatureAlgorithms self.expectedChannel = expectedChannel self._pendingSigningEvents = .init([]) self._pendingDecryptionEvents = .init([]) } func sign(channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer) -> EventLoopFuture { XCTAssertTrue(channel === self.expectedChannel) XCTAssertTrue(self.signatureAlgorithms.contains(algorithm)) let promise = channel.eventLoop.makePromise(of: Void.self) self._pendingSigningEvents.withLockedValue { $0.append(promise) } return promise.futureResult.map { self.backing.sign(algorithm: algorithm, data: data) } } func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture { XCTAssertTrue(channel === self.expectedChannel) let promise = channel.eventLoop.makePromise(of: Void.self) self._pendingDecryptionEvents.withLockedValue { $0.append(promise) } return promise.futureResult.map { self.backing.decrypt(data: data) } } static func == (lhs: CustomKeyDelayedCompletion, rhs: CustomKeyDelayedCompletion) -> Bool { lhs.backing === rhs.backing && lhs.signatureAlgorithms == rhs.signatureAlgorithms } func hash(into hasher: inout Hasher) { hasher.combine(ObjectIdentifier(self.backing)) hasher.combine(signatureAlgorithms) } } private final class CustomKeyWithoutDERBytes: NIOSSLCustomPrivateKey, Hashable { var signatureAlgorithms: [SignatureAlgorithm] { [] } func sign(channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer) -> EventLoopFuture { fatalError("Not implemented") } func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture { fatalError("Not implemented") } static func == (lhs: CustomKeyWithoutDERBytes, rhs: CustomKeyWithoutDERBytes) -> Bool { lhs.signatureAlgorithms == rhs.signatureAlgorithms } func hash(into hasher: inout Hasher) { hasher.combine(ObjectIdentifier(self)) hasher.combine(signatureAlgorithms) } } private final class CustomKeyWithDERBytes: NIOSSLCustomPrivateKey, Hashable { var signatureAlgorithms: [NIOSSL.SignatureAlgorithm] { [] } var derBytes: [UInt8] { [42] } func sign(channel: Channel, algorithm: SignatureAlgorithm, data: ByteBuffer) -> EventLoopFuture { fatalError("Not implemented") } func decrypt(channel: Channel, data: ByteBuffer) -> EventLoopFuture { fatalError("Not implemented") } static func == (lhs: CustomKeyWithDERBytes, rhs: CustomKeyWithDERBytes) -> Bool { lhs.signatureAlgorithms == rhs.signatureAlgorithms && lhs.derBytes == rhs.derBytes } func hash(into hasher: inout Hasher) { hasher.combine(ObjectIdentifier(self)) hasher.combine(signatureAlgorithms) hasher.combine(derBytes) } } final class CustomPrivateKeyTests: XCTestCase { fileprivate static let customECDSACertAndKey: (certificate: NIOSSLCertificate, key: CustomPKEY) = { let (cert, originalKey) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let derivedKey = CustomPKEY(from: originalKey) return (certificate: cert, key: derivedKey) }() fileprivate static let customRSACertAndKey: (certificate: NIOSSLCertificate, key: CustomPKEY) = { let (cert, originalKey) = generateSelfSignedCert() let derivedKey = CustomPKEY(from: originalKey) return (certificate: cert, key: derivedKey) }() private func configuredClientContext( trustRoot: NIOSSLCertificate, maxTLSVersion: TLSVersion? = nil, cipherSuites: [NIOTLSCipher]? = nil ) -> NIOSSLContext { var config = TLSConfiguration.makeClientConfiguration() config.trustRoots = .certificates([trustRoot]) config.maximumTLSVersion = maxTLSVersion if let cipherSuites = cipherSuites { config.cipherSuiteValues = cipherSuites } return try! NIOSSLContext(configuration: config) } private func configuredServerContext(certificate: NIOSSLCertificate, privateKey: NIOSSLPrivateKey) -> NIOSSLContext { let config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(certificate)], privateKey: .privateKey(privateKey) ) return try! NIOSSLContext(configuration: config) } func testHappyPathImmediateResultCustomECDSAKey() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyImmediateResult( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customECDSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customECDSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(happyPathKey.signCallCount, 1) XCTAssertEqual(happyPathKey.decryptCallCount, 0) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testHappyPathImmediateResultCustomRSAKeyPSS() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyImmediateResult( CustomPrivateKeyTests.customRSACertAndKey.key, signatureAlgorithms: [.rsaPssRsaeSha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customRSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customRSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(happyPathKey.signCallCount, 1) XCTAssertEqual(happyPathKey.decryptCallCount, 0) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testHappyPathImmediateResultCustomRSAKeyPKCS1() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyImmediateResult( CustomPrivateKeyTests.customRSACertAndKey.key, signatureAlgorithms: [.rsaPkcs1Sha256], expectedChannel: b2b.server ) // Rule out TLSv1.3, which doesn't support RSA decryption. // We also want to force RSA key exchange, which will let us test the decrypt // function. let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customRSACertAndKey.certificate, maxTLSVersion: .tlsv12, cipherSuites: [.TLS_RSA_WITH_AES_128_GCM_SHA256] ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customRSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(happyPathKey.signCallCount, 0) XCTAssertEqual(happyPathKey.decryptCallCount, 1) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testHappyPathDelayedResultCustomECDSAKey() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customECDSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customECDSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) // Complete the promise. XCTAssertEqual(happyPathKey.pendingSigningEvents.count, 1) XCTAssertEqual(happyPathKey.pendingDecryptionEvents.count, 0) happyPathKey.pendingSigningEvents.first?.succeed(()) // Nothing happens until we start doing I/O again. XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) XCTAssertNoThrow(try b2b.interactInMemory()) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testHappyPathDelayedResultCustomRSAKeyPSS() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customRSACertAndKey.key, signatureAlgorithms: [.rsaPssRsaeSha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customRSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customRSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) // Complete the promise. XCTAssertEqual(happyPathKey.pendingSigningEvents.count, 1) XCTAssertEqual(happyPathKey.pendingDecryptionEvents.count, 0) happyPathKey.pendingSigningEvents.first?.succeed(()) // Nothing happens until we start doing I/O again. XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) XCTAssertNoThrow(try b2b.interactInMemory()) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testHappyPathDelayedResultCustomRSAKeyPKCS1() throws { let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customRSACertAndKey.key, signatureAlgorithms: [.rsaPkcs1Sha256], expectedChannel: b2b.server ) // Rule out TLSv1.3, which doesn't support RSA decryption. // We also want to force RSA key exchange, which will let us test the decrypt // function. let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customRSACertAndKey.certificate, maxTLSVersion: .tlsv12, cipherSuites: [.TLS_RSA_WITH_AES_128_GCM_SHA256] ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customRSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) // Complete the promise. XCTAssertEqual(happyPathKey.pendingSigningEvents.count, 0) XCTAssertEqual(happyPathKey.pendingDecryptionEvents.count, 1) happyPathKey.pendingDecryptionEvents.first?.succeed(()) // Nothing happens until we start doing I/O again. XCTAssertFalse(b2b.client.handshakeSucceeded) XCTAssertFalse(b2b.server.handshakeSucceeded) XCTAssertNoThrow(try b2b.interactInMemory()) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testMismatchedKeys() throws { // We're going to generate another ECDSA key here, which we'll give to the backing code. let alternativeKey = generateSelfSignedCert(keygenFunction: { generateECPrivateKey() }).1 let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyImmediateResult( CustomPKEY(from: alternativeKey), signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customECDSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customECDSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost") ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: serverContext) ) ) XCTAssertThrowsError(try b2b.connectInMemory()) } func testThrowingCustomErrorsSigning() throws { struct CustomError: Error {} let b2b = BackToBackEmbeddedChannel() let happyPathKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: CustomPrivateKeyTests.customECDSACertAndKey.certificate ) let serverContext = self.configuredServerContext( certificate: CustomPrivateKeyTests.customECDSACertAndKey.certificate, privateKey: NIOSSLPrivateKey(customPrivateKey: happyPathKey) ) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost") ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: serverContext) ) ) XCTAssertNoThrow(try b2b.connectInMemory()) // Complete the promise. XCTAssertEqual(happyPathKey.pendingSigningEvents.count, 1) XCTAssertEqual(happyPathKey.pendingDecryptionEvents.count, 0) happyPathKey.pendingSigningEvents.first?.fail(CustomError()) XCTAssertThrowsError(try b2b.interactInMemory()) { error in XCTAssertTrue(error is CustomError) } } func testKeyEquatability() throws { let b2b = BackToBackEmbeddedChannel() let firstKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) // Should be non-equal to first let secondKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp384R1Sha384], expectedChannel: b2b.server ) // Different object to first, but same equatable, so should be equal let thirdKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) XCTAssertEqual(firstKey, thirdKey) XCTAssertNotEqual(firstKey, secondKey) XCTAssertEqual(NIOSSLPrivateKey(customPrivateKey: firstKey), NIOSSLPrivateKey(customPrivateKey: thirdKey)) XCTAssertNotEqual(NIOSSLPrivateKey(customPrivateKey: firstKey), NIOSSLPrivateKey(customPrivateKey: secondKey)) } func testKeyHashability() throws { let b2b = BackToBackEmbeddedChannel() let firstKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) // Should hash non-equal to first let secondKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp384R1Sha384], expectedChannel: b2b.server ) // Different object to first, but same hashable, so should hash the same let thirdKey = CustomKeyDelayedCompletion( CustomPrivateKeyTests.customECDSACertAndKey.key, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) XCTAssertEqual(Set([firstKey, secondKey]), Set([firstKey, secondKey, thirdKey])) XCTAssertEqual( Set([NIOSSLPrivateKey(customPrivateKey: firstKey), NIOSSLPrivateKey(customPrivateKey: secondKey)]), Set([ NIOSSLPrivateKey(customPrivateKey: firstKey), NIOSSLPrivateKey(customPrivateKey: secondKey), NIOSSLPrivateKey(customPrivateKey: thirdKey), ]) ) } func testSwitchFromNativeKeyToCustomKeyViaOverride() throws { // Cert A + native key A for the initial server config. let (certA, nativeKeyA) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) // Cert B + key B for the override identity. let (certB, nativeKeyB) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let derivedKeyB = CustomPKEY(from: nativeKeyB) let b2b = BackToBackEmbeddedChannel() let customKey = CustomKeyImmediateResult( derivedKeyB, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) // Client trusts the override cert (cert B), not the initial cert (cert A). let clientContext = self.configuredClientContext(trustRoot: certB) // Server initial config uses cert A + native key A. var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(certA)], privateKey: .privateKey(nativeKeyA) ) // Override with cert B + custom key B. serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(certB)] override.privateKey = .privateKey(NIOSSLPrivateKey(customPrivateKey: customKey)) promise.succeed(`override`) } let serverContext = try NIOSSLContext(configuration: serverConfig) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(customKey.signCallCount, 1) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testCustomKeyDecryptViaSSLContextCallbackOverride() throws { // Same as testSwitchFromNativeKeyToCustomKeyViaOverride but forces TLS 1.2 // with RSA key exchange so BoringSSL hits the decrypt callback instead of sign. let (certA, nativeKeyA) = generateSelfSignedCert() let (certB, nativeKeyB) = generateSelfSignedCert() let derivedKeyB = CustomPKEY(from: nativeKeyB) let b2b = BackToBackEmbeddedChannel() let customKey = CustomKeyImmediateResult( derivedKeyB, signatureAlgorithms: [.rsaPkcs1Sha256], expectedChannel: b2b.server ) let clientContext = self.configuredClientContext( trustRoot: certB, maxTLSVersion: .tlsv12, cipherSuites: [.TLS_RSA_WITH_AES_128_GCM_SHA256] ) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(certA)], privateKey: .privateKey(nativeKeyA) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(certB)] override.privateKey = .privateKey(NIOSSLPrivateKey(customPrivateKey: customKey)) promise.succeed(`override`) } let serverContext = try NIOSSLContext(configuration: serverConfig) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(customKey.signCallCount, 0) XCTAssertEqual(customKey.decryptCallCount, 1) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testClientCustomKeyViaSSLContextCallbackOverride() throws { // mTLS: client starts with no cert/key, sslContextCallback provides a custom key. let (clientCert, clientNativeKey) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let clientDerivedKey = CustomPKEY(from: clientNativeKey) let (serverCert, serverKey) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let b2b = BackToBackEmbeddedChannel() let customKey = CustomKeyImmediateResult( clientDerivedKey, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.client ) // Client starts with no cert/key; the override provides them. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([serverCert]) clientConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(clientCert)] override.privateKey = .privateKey(NIOSSLPrivateKey(customPrivateKey: customKey)) promise.succeed(`override`) } let clientContext = try NIOSSLContext(configuration: clientConfig) // Server requires client certificate (mTLS). var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(serverCert)], privateKey: .privateKey(serverKey) ) serverConfig.certificateVerification = .noHostnameVerification serverConfig.trustRoots = .certificates([clientCert]) let serverContext = try NIOSSLContext(configuration: serverConfig) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertEqual(customKey.signCallCount, 1) XCTAssertEqual(customKey.decryptCallCount, 0) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testSwitchFromCustomKeyToNativeKeyViaOverride() throws { // Server initial config uses cert A + custom key A. // The override switches to cert B + native key B. let (certA, nativeKeyA) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let derivedKeyA = CustomPKEY(from: nativeKeyA) let (certB, nativeKeyB) = generateSelfSignedCert(keygenFunction: { generateECPrivateKey(curveNID: NID_X9_62_prime256v1) }) let b2b = BackToBackEmbeddedChannel() let customKey = CustomKeyImmediateResult( derivedKeyA, signatureAlgorithms: [.ecdsaSecp256R1Sha256], expectedChannel: b2b.server ) // Client trusts the override cert (cert B). let clientContext = self.configuredClientContext(trustRoot: certB) // Server initial config uses cert A + custom key A. var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(certA)], privateKey: .privateKey(NIOSSLPrivateKey(customPrivateKey: customKey)) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(certB)] override.privateKey = .privateKey(nativeKeyB) promise.succeed(`override`) } let serverContext = try NIOSSLContext(configuration: serverConfig) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) // The custom key should NOT have been called. // Assume native sign was called because the handshake succeeded. XCTAssertEqual(customKey.signCallCount, 0) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) } func testDERBytes_DefaultImplementation_ReturnsEmptyArray() throws { let customKey = CustomKeyWithoutDERBytes() let key = NIOSSLPrivateKey(customPrivateKey: customKey) let derBytes = try key.derBytes XCTAssertEqual(derBytes, []) } func testDERBytes_ReturnsBytes() throws { let customKey = CustomKeyWithDERBytes() let key = NIOSSLPrivateKey(customPrivateKey: customKey) let derBytes = try key.derBytes XCTAssertEqual(derBytes, [42]) } } extension SignatureAlgorithm { var md: OpaquePointer { switch self { case .ecdsaSecp256R1Sha256: return CNIOBoringSSL_EVP_sha256() case .rsaPssRsaeSha256: return CNIOBoringSSL_EVP_sha256() case .rsaPkcs1Sha256: return CNIOBoringSSL_EVP_sha256() default: preconditionFailure() } } var rsaPadding: CInt? { switch self { case .rsaPssRsaeSha256, .rsaPssRsaeSha384, .rsaPssRsaeSha512: return CInt(RSA_PKCS1_PSS_PADDING) case .rsaPkcs1Sha1, .rsaPkcs1Sha256, .rsaPkcs1Sha384, .rsaPkcs1Sha512: return CInt(RSA_PKCS1_PADDING) default: return nil } } var saltLen: CInt? { switch self { case .rsaPssRsaeSha256, .rsaPssRsaeSha384, .rsaPssRsaeSha512: // To BoringSSL, -1 means "salt length the size of the hash function". // This is what TLS 1.3 requires. return -1 default: return nil } } } extension EmbeddedChannel { fileprivate var handshakeSucceeded: Bool { let completedHandler = try! self.pipeline.syncOperations.handler(type: HandshakeCompletedHandler.self) return completedHandler.handshakeSucceeded } } ================================================ FILE: Tests/NIOSSLTests/IdentityVerificationTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import XCTest @testable import NIOSSL /// This cert contains the following SAN fields: /// DNS:*.WILDCARD.EXAMPLE.com - A straightforward wildcard, should be accepted /// DNS:FO*.EXAMPLE.com - A suffix wildcard, should be accepted /// DNS:*AR.EXAMPLE.com - A prefix wildcard, should be accepted /// DNS:B*Z.EXAMPLE.com - An infix wildcard /// DNS:TRAILING.PERIOD.EXAMPLE.com. - A domain with a trailing period, should match /// DNS:XN--STRAE-OQA.UNICODE.EXAMPLE.com. - An IDN A-label, should match. /// DNS:XN--X*-GIA.UNICODE.EXAMPLE.com. - An IDN A-label with a wildcard, invalid. /// DNS:WEIRDWILDCARD.*.EXAMPLE.com. - A wildcard not in the leftmost label, invalid. /// DNS:*.*.DOUBLE.EXAMPLE.com. - Two wildcards, invalid. /// DNS:*.XN--STRAE-OQA.EXAMPLE.com. - A wildcard followed by a new IDN A-label, this is fine. /// A SAN with a null in it, should be ignored. /// /// This also contains a commonName of httpbin.org. private let weirdoPEMCert = """ -----BEGIN CERTIFICATE----- MIICZjCCAgygAwIBAgIURNa5MCGhhy1TUo57ogfm5OvVBr8wCgYIKoZIzj0EAwIw FjEUMBIGA1UEAwwLaHR0cGJpbi5vcmcwHhcNMjQwNTEzMTI1MjUwWhcNNDAwMTAx MDAwMDAwWjAWMRQwEgYDVQQDDAtodHRwYmluLm9yZzBZMBMGByqGSM49AgEGCCqG SM49AwEHA0IABHC44jasAWsWYtYdo+cnLOAEuMQHt1zI5A7td2avNIHEfEXqiizj t1VPWYR6wbL/X7ZXb7IjED8v5ZeN/yK0jpGjggE2MIIBMjAJBgNVHRMEAjAAMIIB IwYDVR0RBIIBGjCCARaCFiouV0lMRENBUkQuRVhBTVBMRS5jb22CD0ZPKi5FWEFN UExFLmNvbYIPKkFSLkVYQU1QTEUuY29tgg9CKlouRVhBTVBMRS5jb22CHFRSQUlM SU5HLlBFUklPRC5FWEFNUExFLmNvbS6CIlhOLS1TVFJBRS1PUUEuVU5JQ09ERS5F WEFNUExFLmNvbS6CH1hOLS1YKi1HSUEuVU5JQ09ERS5FWEFNUExFLmNvbS6CHFdF SVJEV0lMRENBUkQuKi5FWEFNUExFLmNvbS6CFyouKi5ET1VCTEUuRVhBTVBMRS5j b20ughwqLlhOLS1TVFJBRS1PUUEuRVhBTVBMRS5jb20ughFOVUwATC5FWEFNUExF LmNvbTAKBggqhkjOPQQDAgNIADBFAiEAoZP9/AT/kI4XV9ComU/3TOBavn2HT4KJ GLTqsl138zwCIFAGdxsBH3CGfuFNYXOdYZOJ/FIqv7Ev0eGxXvTZ+bcs -----END CERTIFICATE----- """ /// Returns whether this system supports resolving IPv6 function. func ipv6Supported() throws -> Bool { do { _ = try SocketAddress.makeAddressResolvingHost("2001:db8::1", port: 443) return true } catch SocketAddressError.unknown { return false } } class IdentityVerificationTest: XCTestCase { func testCanValidateHostnameInFirstSan() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "localhost", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testCanValidateHostnameInSecondSan() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testIgnoresTrailingPeriod() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "example.com.", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testLowercasesHostnameForSan() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "LoCaLhOsT", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testRejectsIncorrectHostname() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "httpbin.org", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testAcceptsIpv4Address() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: nil, socketAddress: try .init(ipAddress: "192.168.0.1", port: 443), leafCertificate: cert ) XCTAssertTrue(matched) } func testAcceptsIpv6Address() throws { guard try ipv6Supported() else { return } let ipv6Address = try SocketAddress(ipAddress: "2001:db8::1", port: 443) let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: nil, socketAddress: ipv6Address, leafCertificate: cert ) XCTAssertTrue(matched) } func testRejectsIncorrectIpv4Address() throws { let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: nil, socketAddress: try .init(ipAddress: "192.168.0.2", port: 443), leafCertificate: cert ) XCTAssertFalse(matched) } func testRejectsIncorrectIpv6Address() throws { guard try ipv6Supported() else { return } let ipv6Address = try SocketAddress(ipAddress: "2001:db8::2", port: 443) let cert = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: nil, socketAddress: ipv6Address, leafCertificate: cert ) XCTAssertFalse(matched) } func testAcceptsWildcards() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "this.wildcard.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testAcceptsSuffixWildcard() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "foo.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testAcceptsPrefixWildcard() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "bar.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testAcceptsInfixWildcard() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "baz.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testIgnoresTrailingPeriodInCert() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "trailing.period.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testRejectsEncodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) XCTAssertThrowsError( try validIdentityForService( serverHostname: "straße.unicode.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) ) { error in XCTAssertEqual(error as? NIOSSLExtraError, .serverHostnameImpossibleToMatch) XCTAssertEqual( String(describing: error), "NIOSSLExtraError.serverHostnameImpossibleToMatch: The server hostname straße.unicode.example.com cannot be matched due to containing non-DNS characters" ) } } func testMatchesUnencodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "xn--strae-oqa.unicode.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testDoesNotMatchIDNALabelWithWildcard() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "xn--xx-gia.unicode.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testDoesNotMatchNonLeftmostWildcards() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "weirdwildcard.nomatch.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testDoesNotMatchMultipleWildcards() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "one.two.double.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testRejectsWildcardBeforeUnencodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) XCTAssertThrowsError( try validIdentityForService( serverHostname: "foo.straße.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) ) { error in XCTAssertEqual(error as? NIOSSLExtraError, .serverHostnameImpossibleToMatch) XCTAssertEqual( String(describing: error), "NIOSSLExtraError.serverHostnameImpossibleToMatch: The server hostname foo.straße.example.com cannot be matched due to containing non-DNS characters" ) } } func testMatchesWildcardBeforeEncodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "foo.xn--strae-oqa.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testDoesNotMatchSANWithEmbeddedNULL() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) XCTAssertThrowsError( try validIdentityForService( serverHostname: "nul\u{0000}l.example.com", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) ) { error in XCTAssertEqual(error as? NIOSSLExtraError, .serverHostnameImpossibleToMatch) XCTAssertEqual( String(describing: error), "NIOSSLExtraError.serverHostnameImpossibleToMatch: The server hostname nul\u{0000}l.example.com cannot be matched due to containing non-DNS characters" ) } } func testFallsBackToCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(multiCNCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "localhost", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testLowercasesForCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(multiCNCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "LoCaLhOsT", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertTrue(matched) } func testRejectsUnicodeCommonNameWithUnencodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(unicodeCNCert.utf8), format: .pem) XCTAssertThrowsError( try validIdentityForService( serverHostname: "straße.org", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) ) { error in XCTAssertEqual(error as? NIOSSLExtraError, .serverHostnameImpossibleToMatch) XCTAssertEqual( String(describing: error), "NIOSSLExtraError.serverHostnameImpossibleToMatch: The server hostname straße.org cannot be matched due to containing non-DNS characters" ) } } func testRejectsUnicodeCommonNameWithEncodedIDNALabel() throws { let cert = try NIOSSLCertificate(bytes: .init(unicodeCNCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "xn--strae-oqa.org", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testHandlesMissingCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(noCNCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "localhost", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } func testDoesNotFallBackToCNWithSans() throws { let cert = try NIOSSLCertificate(bytes: .init(weirdoPEMCert.utf8), format: .pem) let matched = try validIdentityForService( serverHostname: "httpbin.org", socketAddress: try .init(unixDomainSocketPath: "/path"), leafCertificate: cert ) XCTAssertFalse(matched) } } ================================================ FILE: Tests/NIOSSLTests/NIOSSLALPNTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOCore import NIOPosix import NIOSSL import NIOTLS import XCTest class NIOSSLALPNTest: XCTestCase { private func configuredSSLContextWithAlpnProtocols(protocols: [String]) throws -> NIOSSLContext { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) config.trustRoots = .certificates([NIOSSLIntegrationTest.cert]) config.applicationProtocols = protocols return try NIOSSLContext(configuration: config) } private func assertNegotiatedProtocol( protocol: String?, serverContext: NIOSSLContext, clientContext: NIOSSLContext ) throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverHandler = EventRecorderHandler() let serverChannel = try serverTLSChannel( context: serverContext, handlers: [serverHandler, PromiseOnReadHandler(promise: completionPromise)], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() _ = try completionPromise.futureResult.wait() let expectedEvents: [EventRecorderHandler.RecordedEvents] = [ .Registered, .Active, .UserEvent(TLSUserEvent.handshakeCompleted(negotiatedProtocol: `protocol`)), .Read, .ReadComplete, ] XCTAssertEqual(expectedEvents, serverHandler.events) } func testBasicALPNNegotiation() throws { let context: NIOSSLContext context = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["h2", "http/1.1"])) XCTAssertNoThrow(try assertNegotiatedProtocol(protocol: "h2", serverContext: context, clientContext: context)) } func testBasicALPNNegotiationPrefersServerPriority() throws { let serverCtx: NIOSSLContext let clientCtx: NIOSSLContext serverCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["h2", "http/1.1"])) clientCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["http/1.1", "h2"])) XCTAssertNoThrow( try assertNegotiatedProtocol(protocol: "h2", serverContext: serverCtx, clientContext: clientCtx) ) } func testBasicALPNNegotiationNoOverlap() throws { let serverCtx: NIOSSLContext let clientCtx: NIOSSLContext serverCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["h2", "http/1.1"])) clientCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["spdy/3", "webrtc"])) XCTAssertNoThrow( try assertNegotiatedProtocol(protocol: nil, serverContext: serverCtx, clientContext: clientCtx) ) } func testBasicALPNNegotiationNotOfferedByClient() throws { let serverCtx: NIOSSLContext let clientCtx: NIOSSLContext serverCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["h2", "http/1.1"])) clientCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: [])) XCTAssertNoThrow( try assertNegotiatedProtocol(protocol: nil, serverContext: serverCtx, clientContext: clientCtx) ) } func testBasicALPNNegotiationNotSupportedByServer() throws { let serverCtx: NIOSSLContext let clientCtx: NIOSSLContext serverCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: [])) clientCtx = try assertNoThrowWithValue(configuredSSLContextWithAlpnProtocols(protocols: ["h2", "http/1.1"])) XCTAssertNoThrow( try assertNegotiatedProtocol(protocol: nil, serverContext: serverCtx, clientContext: clientCtx) ) } } ================================================ FILE: Tests/NIOSSLTests/NIOSSLIntegrationTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import NIOConcurrencyHelpers import NIOCore import NIOEmbedded import NIOPosix import NIOTLS import XCTest @testable import NIOSSL public func assertNoThrowWithValue( _ body: @autoclosure () throws -> T, defaultValue: T? = nil, file: StaticString = #filePath, line: UInt = #line ) throws -> T { do { return try body() } catch { XCTFail("unexpected error \(error) thrown", file: (file), line: line) if let defaultValue = defaultValue { return defaultValue } else { throw error } } } internal func interactInMemory( clientChannel: EmbeddedChannel, serverChannel: EmbeddedChannel, runLoops: Bool = true ) throws { var workToDo = true while workToDo { workToDo = false if runLoops { clientChannel.embeddedEventLoop.run() serverChannel.embeddedEventLoop.run() } let clientDatum = try clientChannel.readOutbound(as: IOData.self) let serverDatum = try serverChannel.readOutbound(as: IOData.self) if let clientMsg = clientDatum { try serverChannel.writeInbound(clientMsg) workToDo = true } if let serverMsg = serverDatum { try clientChannel.writeInbound(serverMsg) workToDo = true } } } private final class SimpleEchoServer: ChannelInboundHandler, Sendable { public typealias InboundIn = ByteBuffer public typealias OutboundOut = ByteBuffer public func channelRead(context: ChannelHandlerContext, data: NIOAny) { context.write(data, promise: nil) context.fireChannelRead(data) } public func channelReadComplete(context: ChannelHandlerContext) { context.flush() context.fireChannelReadComplete() } } internal final class PromiseOnReadHandler: ChannelInboundHandler { public typealias InboundIn = ByteBuffer public typealias OutboundOut = ByteBuffer private let promise: EventLoopPromise private var data: NIOAny? = nil init(promise: EventLoopPromise) { self.promise = promise } public func channelRead(context: ChannelHandlerContext, data: NIOAny) { self.data = data context.fireChannelRead(data) } public func channelReadComplete(context: ChannelHandlerContext) { promise.succeed(unwrapInboundIn(data!)) context.fireChannelReadComplete() } } private final class PromiseOnChildChannelInitHandler: ChannelInboundHandler, Sendable { typealias InboundIn = ByteBuffer private let promise: EventLoopPromise init(promise: EventLoopPromise) { self.promise = promise } func channelActive(context: ChannelHandlerContext) { self.promise.succeed(context.channel) context.fireChannelActive() } } private final class ChannelInactiveHandler: ChannelInboundHandler, Sendable { typealias InboundIn = ByteBuffer private let promise: EventLoopPromise init(promise: EventLoopPromise) { self.promise = promise } func channelInactive(context: ChannelHandlerContext) { self.promise.succeed() context.fireChannelActive() } } // Modified version taken from swift-nio/ChannelTests.swift enum ShutDownEvent { case input case output } private final class ShutdownVerificationHandler: ChannelInboundHandler { typealias InboundIn = ByteBuffer private var inputShutdownEventReceived = false private var outputShutdownEventReceived = false private let promise: EventLoopPromise private let shutdownEvent: ShutDownEvent init(shutdownEvent: ShutDownEvent, promise: EventLoopPromise) { self.promise = promise self.shutdownEvent = shutdownEvent } public func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) { switch event { case let ev as ChannelEvent: switch ev { case .inputClosed: XCTAssertFalse(inputShutdownEventReceived) inputShutdownEventReceived = true if shutdownEvent == .input { promise.succeed(()) } case .outputClosed: XCTAssertFalse(outputShutdownEventReceived) outputShutdownEventReceived = true if shutdownEvent == .output { promise.succeed(()) } } fallthrough default: context.fireUserInboundEventTriggered(event) } } public func channelInactive(context: ChannelHandlerContext) { switch shutdownEvent { case .input: XCTAssertTrue(inputShutdownEventReceived) XCTAssertFalse(outputShutdownEventReceived) case .output: XCTAssertFalse(inputShutdownEventReceived) XCTAssertTrue(outputShutdownEventReceived) } promise.succeed(()) context.fireChannelInactive() } } private final class ReadRecordingHandler: ChannelInboundHandler { public typealias InboundIn = ByteBuffer private var received: ByteBuffer? private let completePromise: EventLoopPromise init(completePromise: EventLoopPromise) { self.completePromise = completePromise } func channelRead(context: ChannelHandlerContext, data: NIOAny) { var data = self.unwrapInboundIn(data) var newBuffer: ByteBuffer if var received = self.received { received.writeBuffer(&data) newBuffer = received } else { newBuffer = data } self.received = newBuffer } func channelInactive(context: ChannelHandlerContext) { self.completePromise.succeed(self.received ?? context.channel.allocator.buffer(capacity: 0)) } } private final class WriteCountingHandler: ChannelOutboundHandler, Sendable { public typealias OutboundIn = Any public typealias OutboundOut = Any public var writeCount: Int { self._writeCount.withLockedValue { $0 } } private let _writeCount: NIOLockedValueBox = .init(0) public func write(context: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise?) { self._writeCount.withLockedValue { $0 += 1 } context.write(data, promise: promise) } } public final class EventRecorderHandler: ChannelInboundHandler where UserEventType: Equatable { public typealias InboundIn = ByteBuffer public enum RecordedEvents: Equatable { case Registered case Unregistered case Active case Inactive case Read case ReadComplete case WritabilityChanged case UserEvent(UserEventType) // Note that this omits ErrorCaught. This is because Error does not // require Equatable, so we can't safely record these events and expect // a sensible implementation of Equatable here. public static func == (lhs: RecordedEvents, rhs: RecordedEvents) -> Bool { switch (lhs, rhs) { case (.Registered, .Registered), (.Unregistered, .Unregistered), (.Active, .Active), (.Inactive, .Inactive), (.Read, .Read), (.ReadComplete, .ReadComplete), (.WritabilityChanged, .WritabilityChanged): return true case (.UserEvent(let e1), .UserEvent(let e2)): return e1 == e2 default: return false } } } public var events: [RecordedEvents] { self._events.withLockedValue { $0 } } private let _events: NIOLockedValueBox<[RecordedEvents]> = .init([]) public func channelRegistered(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.Registered) } context.fireChannelRegistered() } public func channelUnregistered(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.Unregistered) } context.fireChannelUnregistered() } public func channelActive(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.Active) } context.fireChannelActive() } public func channelInactive(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.Inactive) } context.fireChannelInactive() } public func channelRead(context: ChannelHandlerContext, data: NIOAny) { self._events.withLockedValue { $0.append(.Read) } context.fireChannelRead(data) } public func channelReadComplete(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.ReadComplete) } context.fireChannelReadComplete() } public func channelWritabilityChanged(context: ChannelHandlerContext) { self._events.withLockedValue { $0.append(.WritabilityChanged) } context.fireChannelWritabilityChanged() } public func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) { guard let ourEvent = event as? UserEventType else { context.fireUserInboundEventTriggered(event) return } self._events.withLockedValue { $0.append(.UserEvent(ourEvent)) } } } extension EventRecorderHandler: Sendable where UserEventType: Sendable {} extension EventRecorderHandler.RecordedEvents: Sendable where UserEventType: Sendable {} private final class ChannelActiveWaiter: ChannelInboundHandler, Sendable { public typealias InboundIn = Any private let activePromise: EventLoopPromise public init(promise: EventLoopPromise) { activePromise = promise } public func channelActive(context: ChannelHandlerContext) { activePromise.succeed(()) } public func waitForChannelActive() throws { try activePromise.futureResult.wait() } } /// A channel handler that delays all writes that it receives. This is useful /// in tests that want to ensure that writes propagate through the system in order. /// /// Note that you must call forceFlush to pass all the data through, or your tests will /// explode. private class WriteDelayHandler: ChannelOutboundHandler { public typealias OutboundIn = Any public typealias OutboundOut = Any private var writes: [(ChannelHandlerContext, NIOAny, EventLoopPromise?)] = [] func write(context: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise?) { writes.append((context, data, promise)) } func forceFlush() { let writes = self.writes self.writes = [] for write in writes { write.0.writeAndFlush(write.1, promise: write.2) } } } internal func serverTLSChannel( context: NIOSSLContext, handlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, customVerificationCallback: ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void )? = nil, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel { try assertNoThrowWithValue( serverTLSChannel( context: context, preHandlers: [], postHandlers: handlers(), group: group, customVerificationCallback: customVerificationCallback, file: file, line: line ), file: file, line: line ) } internal func serverTLSChannel( context: NIOSSLContext, preHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], postHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, customVerificationCallback: ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void )? = nil, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel { try assertNoThrowWithValue( ServerBootstrap(group: group) .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers(preHandlers()) let handler: NIOSSLHandler if let verify = customVerificationCallback { handler = NIOSSLServerHandler(context: context, customVerificationCallback: verify) } else { handler = NIOSSLServerHandler(context: context) } try channel.pipeline.syncOperations.addHandler(handler) try channel.pipeline.syncOperations.addHandlers(postHandlers()) } }.bind(host: "127.0.0.1", port: 0).wait(), file: file, line: line ) } typealias SendableAdditionalPeerCertificateVerificationCallback = @Sendable (NIOSSLCertificate, Channel) -> EventLoopFuture internal func clientTLSChannel( context: NIOSSLContext, additionalPeerCertificateVerificationCallback: SendableAdditionalPeerCertificateVerificationCallback? = nil, preHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], postHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, connectingTo: SocketAddress, serverHostname: String? = nil, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel { func tlsFactory() -> NIOSSLClientTLSProvider { try! .init( context: context, serverHostname: serverHostname, additionalPeerCertificateVerificationCallback: additionalPeerCertificateVerificationCallback ) } return try _clientTLSChannel( context: context, preHandlers: preHandlers, postHandlers: postHandlers, group: group, connectingTo: connectingTo, tlsFactory: tlsFactory ) } @available(*, deprecated, message: "just for testing the deprecated functionality") private struct DeprecatedTLSProviderForTests: NIOClientTLSProvider, Sendable { public typealias Bootstrap = Bootstrap let context: NIOSSLContext let serverHostname: String? let verificationCallback: @Sendable (NIOSSLVerificationResult, NIOSSLCertificate) -> NIOSSLVerificationResult @available(*, deprecated, renamed: "init(context:serverHostname:customVerificationCallback:)") public init( context: NIOSSLContext, serverHostname: String?, verificationCallback: @escaping @Sendable (NIOSSLVerificationResult, NIOSSLCertificate) -> NIOSSLVerificationResult ) { self.context = context self.serverHostname = serverHostname self.verificationCallback = verificationCallback } public func enableTLS(_ bootstrap: Bootstrap) -> Bootstrap { bootstrap.protocolHandlers { [context, serverHostname, verificationCallback] in // NIOSSLClientHandler.init only throws because of `malloc` error and invalid SNI hostnames. We want to crash // on malloc error and we pre-checked the SNI hostname in `init` so that should be impossible here. [ try! NIOSSLClientHandler( context: context, serverHostname: serverHostname, verificationCallback: verificationCallback ) ] } } } @available( *, deprecated, renamed: "clientTLSChannel(context:preHandlers:postHandlers:group:connectingTo:serverHostname:customVerificationCallback:file:line:)" ) internal func clientTLSChannel( context: NIOSSLContext, preHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], postHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, connectingTo: SocketAddress, serverHostname: String? = nil, verificationCallback: @escaping @Sendable (NIOSSLVerificationResult, NIOSSLCertificate) -> NIOSSLVerificationResult, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel { func tlsFactory() -> DeprecatedTLSProviderForTests { .init(context: context, serverHostname: serverHostname, verificationCallback: verificationCallback) } return try _clientTLSChannel( context: context, preHandlers: preHandlers, postHandlers: postHandlers, group: group, connectingTo: connectingTo, tlsFactory: tlsFactory ) } internal func clientTLSChannel( context: NIOSSLContext, preHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], postHandlers: @autoclosure @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, connectingTo: SocketAddress, serverHostname: String? = nil, customVerificationCallback: CustomCallback, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel { func tlsFactory() -> NIOSSLClientTLSProvider { try! .init( context: context, serverHostname: serverHostname, customVerificationCallback: customVerificationCallback ) } return try _clientTLSChannel( context: context, preHandlers: preHandlers, postHandlers: postHandlers, group: group, connectingTo: connectingTo, tlsFactory: tlsFactory ) } private func _clientTLSChannel( context: NIOSSLContext, preHandlers: @escaping @Sendable () -> [ChannelHandler], postHandlers: @escaping @Sendable () -> [ChannelHandler], group: EventLoopGroup, connectingTo: SocketAddress, tlsFactory: @escaping () -> TLS, file: StaticString = #filePath, line: UInt = #line ) throws -> Channel where TLS.Bootstrap == ClientBootstrap { let bootstrap = NIOClientTCPBootstrap( ClientBootstrap(group: group), tls: tlsFactory() ) return try assertNoThrowWithValue( bootstrap .channelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers(postHandlers()) } } .enableTLS() .connect(to: connectingTo) .flatMap { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers(preHandlers(), position: .first) return channel } } .wait(), file: file, line: line ) } struct EventLoopFutureTimeoutError: Error {} extension EventLoopFuture { func timeout(after failDelay: TimeAmount) -> EventLoopFuture { let promise = self.eventLoop.makePromise(of: Value.self) self.whenComplete { result in switch result { case .success(let value): promise.assumeIsolated().succeed(value) case .failure(let error): promise.fail(error) } } self.eventLoop.scheduleTask(in: failDelay) { promise.fail(EventLoopFutureTimeoutError()) } return promise.futureResult } } class NIOSSLIntegrationTest: XCTestCase { private static let certAndKey = generateSelfSignedCert() static var cert: NIOSSLCertificate { Self.certAndKey.0 } static var key: NIOSSLPrivateKey { Self.certAndKey.1 } override class func setUp() { super.setUp() guard boringSSLIsInitialized else { fatalError() } } private static func withEncryptedKeyPath( _ body: (String) throws -> ReturnType ) throws -> ReturnType { let path = try keyInFile( key: NIOSSLIntegrationTest.key, passphrase: "thisisagreatpassword" ) defer { unlink(path) } return try body(path) } private func configuredSSLContext( keyLogCallback: NIOSSLKeyLogCallback? = nil, file: StaticString = #filePath, line: UInt = #line ) throws -> NIOSSLContext { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) config.trustRoots = .certificates([NIOSSLIntegrationTest.cert]) config.keyLogCallback = keyLogCallback return try assertNoThrowWithValue(NIOSSLContext(configuration: config), file: file, line: line) } private func configuredClientContext( file: StaticString = #filePath, line: UInt = #line ) throws -> NIOSSLContext { var config = TLSConfiguration.makeClientConfiguration() config.trustRoots = .certificates([NIOSSLIntegrationTest.cert]) return try assertNoThrowWithValue( NIOSSLContext( configuration: config, callbackManager: nil ), file: file, line: line ) } static func keyInFile(key: NIOSSLPrivateKey, passphrase: String) throws -> String { let fileName = try makeTemporaryFile(fileExtension: ".pem") let tempFile = open(fileName, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0o644) precondition(tempFile > 1, String(cString: strerror(errno))) let fileBio = CNIOBoringSSL_BIO_new_fp(fdopen(tempFile, "w+"), BIO_CLOSE) precondition(fileBio != nil) let manager = BoringSSLPassphraseCallbackManager { closure in closure(passphrase.utf8) } let rc = withExtendedLifetime(manager) { manager -> CInt in let userData = Unmanaged.passUnretained(manager).toOpaque() return key.withUnsafeMutableEVPPKEYPointer { ref in CNIOBoringSSL_PEM_write_bio_PrivateKey( fileBio, ref, CNIOBoringSSL_EVP_aes_256_cbc(), nil, 0, globalBoringSSLPassphraseCallback, userData ) } } CNIOBoringSSL_BIO_free(fileBio) precondition(rc == 1) return fileName } func withTrustBundleInFile(tempFile fileName: inout String?, fn: (String) throws -> T) throws -> T { fileName = try makeTemporaryFile() guard let fileName = fileName else { fatalError("couldn't make temp file") } let tempFile = fileName.withCString { ptr in open(ptr, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0o644) } precondition(tempFile > 1, String(cString: strerror(errno))) let fileBio = CNIOBoringSSL_BIO_new_fp(fdopen(tempFile, "w+"), BIO_CLOSE) precondition(fileBio != nil) let rc = NIOSSLIntegrationTest.cert.withUnsafeMutableX509Pointer { ref in CNIOBoringSSL_PEM_write_bio_X509(fileBio, ref) } CNIOBoringSSL_BIO_free(fileBio) precondition(rc == 1) return try fn(fileName) } func testSimpleEcho() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testHandshakeEventSequencing() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let readComplete: EventLoopPromise = group.next().makePromise() let serverHandler: EventRecorderHandler = EventRecorderHandler() let serverChannel = try serverTLSChannel( context: context, handlers: [serverHandler, PromiseOnReadHandler(promise: readComplete)], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [SimpleEchoServer()], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() _ = try readComplete.futureResult.wait() // Ok, the channel is connected and we have written data to it. This means the TLS handshake is // done. Check the events. // TODO(cory): How do we wait until the read is done? Ideally we'd like to re-use the // PromiseOnReadHandler, but we need to get it into the pipeline first. Not sure how yet. Come back to me. // Maybe update serverTLSChannel to take an array of channel handlers? let expectedEvents: [EventRecorderHandler.RecordedEvents] = [ .Registered, .Active, .UserEvent(TLSUserEvent.handshakeCompleted(negotiatedProtocol: nil)), .Read, .ReadComplete, ] XCTAssertEqual(expectedEvents, serverHandler.events) } func testShutdownEventSequencing() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let readComplete: EventLoopPromise = group.next().makePromise() let serverHandler: EventRecorderHandler = EventRecorderHandler() let serverChannel = try serverTLSChannel( context: context, handlers: [serverHandler, PromiseOnReadHandler(promise: readComplete)], group: group ) let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [SimpleEchoServer()], group: group, connectingTo: serverChannel.localAddress! ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() // Ok, we want to wait for the read to finish, then close the server and client connections. _ = try readComplete.futureResult.flatMap { (_: ByteBuffer) in serverChannel.close() }.flatMap { clientChannel.close() }.wait() let expectedEvents: [EventRecorderHandler.RecordedEvents] = [ .Registered, .Active, .UserEvent(TLSUserEvent.handshakeCompleted(negotiatedProtocol: nil)), .Read, .ReadComplete, .UserEvent(TLSUserEvent.shutdownCompleted), .Inactive, .Unregistered, ] XCTAssertEqual(expectedEvents, serverHandler.events) } func testSubsequentWritesFailAfterCloseModeOutput() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try ServerBootstrap(group: group) .childChannelOption(ChannelOptions.allowRemoteHalfClosure, value: true) // Important! .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers( NIOSSLServerHandler(context: context), SimpleEchoServer() ) } } .bind(host: "127.0.0.1", port: 0) .wait() defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [ PromiseOnReadHandler(promise: completionPromise) ], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var buffer = clientChannel.allocator.buffer(capacity: 5) buffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(buffer).wait()) XCTAssertNoThrow(try clientChannel.close(mode: .output).wait()) XCTAssertThrowsError(try clientChannel.writeAndFlush(buffer).wait()) { error in XCTAssertEqual(.outputClosed, error as? ChannelError) } } func testCloseModeOutputServerAndClient() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let childChannelInitPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try ServerBootstrap(group: group) .childChannelOption(ChannelOptions.allowRemoteHalfClosure, value: true) // Important! .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers( NIOSSLServerHandler(context: context), PromiseOnChildChannelInitHandler(promise: childChannelInitPromise), SimpleEchoServer() ) } } .bind(host: "127.0.0.1", port: 0) .wait() defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannelInactivePromise: EventLoopPromise = group.next().makePromise() let shutdownPromise = group.next().makePromise(of: Void.self) let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [ PromiseOnReadHandler(promise: completionPromise), ShutdownVerificationHandler( shutdownEvent: .input, promise: shutdownPromise ), ChannelInactiveHandler(promise: clientChannelInactivePromise), ], group: group, connectingTo: serverChannel.localAddress! ) XCTAssertNoThrow(try clientChannel.setOption(ChannelOptions.allowRemoteHalfClosure, value: true).wait()) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // Ok, the connection is definitely up. // Now retrieve the client channel that our server opened for the connection to our client. let connectionChildChannel = try childChannelInitPromise.futureResult.wait() // Closing the output of the connection on the server should automatically // close the input of the clientChannel. XCTAssertNoThrow(try connectionChildChannel.close(mode: .output).wait()) try shutdownPromise.futureResult.wait() // Closing the output of the client channel (with input closed) should // result in full closure. XCTAssertNoThrow(try clientChannel.close(mode: .output).wait()) XCTAssertNoThrow(try clientChannelInactivePromise.futureResult.wait()) } func testCloseModeOutputTriggersFlush() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try ServerBootstrap(group: group) .childChannelOption(ChannelOptions.allowRemoteHalfClosure, value: true) // Important! .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers( NIOSSLServerHandler(context: context), SimpleEchoServer() ) } } .bind(host: "127.0.0.1", port: 0) .wait() defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [ PromiseOnReadHandler(promise: completionPromise) ], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let clientWriteFuture = clientChannel.write(originalBuffer) XCTAssertNoThrow(try clientChannel.close(mode: .output).wait()) XCTAssertNoThrow(try clientWriteFuture.wait()) let newBuffer = try assertNoThrowWithValue(completionPromise.futureResult.wait()) XCTAssertEqual(newBuffer, originalBuffer) } func testMultipleCloseOutput() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try ServerBootstrap(group: group) .childChannelOption(ChannelOptions.allowRemoteHalfClosure, value: true) // Important! .serverChannelOption(ChannelOptions.socket(SocketOptionLevel(SOL_SOCKET), SO_REUSEADDR), value: 1) .childChannelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandlers( NIOSSLServerHandler(context: context), SimpleEchoServer() ) } } .bind(host: "127.0.0.1", port: 0) .wait() defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // Ok, the connection is definitely up. Now we want to forcibly call close(mode: .output) on the channel several times with // different promises. None of these will fire until clean shutdown happens, but we want to confirm that *all* of them // fire. // // To avoid the risk of the I/O loop actually closing the connection before we're done, we need to hijack the // I/O loop and issue all the closes on that thread. Otherwise, the channel will probably pull off the TLS shutdown // before we get to the third call to close(). let promises: [EventLoopPromise] = [ group.next().makePromise(), group.next().makePromise(), group.next().makePromise(), ] group.next().execute { for promise in promises { clientChannel.close(mode: .output, promise: promise) } } XCTAssertNoThrow(try promises.first!.futureResult.wait()) for promise in promises { // This should never block, but it may throw because the I/O is complete. // Suppress all errors, they're fine. _ = try? promise.futureResult.wait() } } func testMultipleClose() throws { var serverClosed = false let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel = try serverTLSChannel(context: context, handlers: [SimpleEchoServer()], group: group) defer { if !serverClosed { XCTAssertNoThrow(try serverChannel.close().wait()) } } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // Ok, the connection is definitely up. Now we want to forcibly call close() on the channel several times with // different promises. None of these will fire until clean shutdown happens, but we want to confirm that *all* of them // fire. // // To avoid the risk of the I/O loop actually closing the connection before we're done, we need to hijack the // I/O loop and issue all the closes on that thread. Otherwise, the channel will probably pull off the TLS shutdown // before we get to the third call to close(). let promises: [EventLoopPromise] = [ group.next().makePromise(), group.next().makePromise(), group.next().makePromise(), ] group.next().execute { for promise in promises { serverChannel.close(promise: promise) } } XCTAssertNoThrow(try promises.first!.futureResult.wait()) serverClosed = true for promise in promises { // This should never block, but it may throw because the I/O is complete. // Suppress all errors, they're fine. _ = try? promise.futureResult.wait() } } func testCoalescedWrites() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel = try serverTLSChannel(context: context, handlers: [SimpleEchoServer()], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let writeCounter = WriteCountingHandler() let readPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try clientTLSChannel( context: context, preHandlers: [writeCounter], postHandlers: [PromiseOnReadHandler(promise: readPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // We're going to issue a number of small writes. Each of these should be coalesced together // such that the underlying layer sees only one write for them. The total number of // writes should be (after we flush) 3: one for Client Hello, one for Finished, and one // for the coalesced writes. However, we'll tolerate fewer! var originalBuffer = clientChannel.allocator.buffer(capacity: 1) originalBuffer.writeString("A") var writeFutures: [EventLoopFuture<()>] = [] for _ in 0..<5 { writeFutures.append(clientChannel.write(originalBuffer)) } clientChannel.flush() try EventLoopFuture<()>.andAllSucceed(writeFutures, on: clientChannel.eventLoop).wait() let writeCount = try readPromise.futureResult.map { (_: ByteBuffer) in // Here we're in the I/O loop, so we know that no further channel action will happen // while we dispatch this callback. This is the perfect time to check how many writes // happened. writeCounter.writeCount }.wait() XCTAssertLessThanOrEqual(writeCount, 3) } func testCoalescedWritesWithFutures() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel = try serverTLSChannel(context: context, handlers: [SimpleEchoServer()], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // We're going to issue a number of small writes. Each of these should be coalesced together // and all their futures (along with the one for the flush) should fire, in order, with nothing // missed. let firedFutures: NIOLockedValueBox<[Int]> = .init([]) let writeFutures: NIOLockedValueBox<[EventLoopFuture<()>]> = .init([]) var originalBuffer = clientChannel.allocator.buffer(capacity: 1) originalBuffer.writeString("A") for index in 0..<5 { let promise: EventLoopPromise = group.next().makePromise() writeFutures.withLockedValue { $0.append(promise.futureResult) } promise.futureResult.map { XCTAssertEqual(firedFutures.withLockedValue { $0.count }, index) firedFutures.withLockedValue { $0.append(index) } }.whenFailure { error in XCTFail("Write promise failed: \(error)") } clientChannel.write(originalBuffer, promise: promise) } clientChannel.flush() try EventLoopFuture<()>.andAllSucceed( writeFutures.withLockedValue { $0 }, on: clientChannel.eventLoop ).map { XCTAssertEqual(firedFutures.withLockedValue { $0 }, [0, 1, 2, 3, 4]) }.recover { error in XCTFail("Write promised failed: \(error)") }.wait() } func testImmediateCloseSatisfiesPromises() throws { let context = try configuredSSLContext() let channel = EmbeddedChannel() try channel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: context, serverHostname: nil) ) // Start by initiating the handshake. try channel.connect(to: SocketAddress(unixDomainSocketPath: "/tmp/doesntmatter")).wait() // Now call close. This should immediately close, satisfying the promise. let closePromise: EventLoopPromise = channel.eventLoop.makePromise() channel.close(promise: closePromise) XCTAssertNoThrow(try closePromise.futureResult.wait()) } func testAddingTlsToActiveChannelStillHandshakes() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let recorderHandler: EventRecorderHandler = EventRecorderHandler() let channelActiveWaiter = ChannelActiveWaiter(promise: group.next().makePromise()) let serverChannel = try serverTLSChannel( context: context, handlers: [recorderHandler, SimpleEchoServer(), channelActiveWaiter], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } // Create a client channel without TLS in it, and connect it. let readPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try ClientBootstrap(group: group) .channelInitializer { channel in channel.eventLoop.makeCompletedFuture { try channel.pipeline.syncOperations.addHandler( PromiseOnReadHandler(promise: readPromise) ) } } .connect(to: serverChannel.localAddress!).wait() defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // Wait until the channel comes up, then confirm that no handshake has been // received. This hardly proves much, but it's enough. try channelActiveWaiter.waitForChannelActive() try group.next().submit { XCTAssertEqual(recorderHandler.events, [.Registered, .Active]) }.wait() // Now, add the TLS handler to the pipeline. try clientChannel.eventLoop.submit { try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: context, serverHostname: nil), position: .first ) }.wait() var data = clientChannel.allocator.buffer(capacity: 1) data.writeStaticString("x") try clientChannel.writeAndFlush(data).wait() // The echo should come back without error. _ = try readPromise.futureResult.wait() // At this point the handshake should be complete. try group.next().submit { XCTAssertEqual( recorderHandler.events[..<3], [.Registered, .Active, .UserEvent(.handshakeCompleted(negotiatedProtocol: nil))] ) }.wait() } func testValidatesHostnameOnConnectionFails() throws { let serverCtx = try configuredSSLContext() let clientCtx = try configuredClientContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try? group.syncShutdownGracefully() } let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let errorHandler = ErrorCatcher() let clientChannel = try clientTLSChannel( context: clientCtx, preHandlers: [], postHandlers: [errorHandler], group: group, connectingTo: serverChannel.localAddress! ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) let errorsFuture: EventLoopFuture<[NIOSSLExtraError]> = writeFuture.recover { (_: Error) in // We're swallowing errors here, on purpose, because we'll definitely // hit them. () }.map { errorHandler.errors } let actualErrors = try errorsFuture.wait() // This write will have failed, but that's fine: we just want it as a signal that // the handshake is done so we can make our assertions. let expectedErrors: [NIOSSLExtraError] = [NIOSSLExtraError.failedToValidateHostname] XCTAssertEqual(expectedErrors, actualErrors) XCTAssertEqual( actualErrors.first.map { String(describing: $0) }, "NIOSSLExtraError.failedToValidateHostname: Couldn't find in certificate from peer" ) } func testValidatesHostnameOnConnectionSucceeds() throws { let serverCtx = try configuredSSLContext() let clientCtx = try configuredClientContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let eventHandler = EventRecorderHandler() let clientChannel = try clientTLSChannel( context: clientCtx, preHandlers: [], postHandlers: [eventHandler], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) writeFuture.whenComplete { _ in XCTAssertEqual( eventHandler.events[..<3], [.Registered, .Active, .UserEvent(.handshakeCompleted(negotiatedProtocol: nil))] ) } try writeFuture.wait() } func testAdditionalValidationOnConnectionSucceeds() throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverCtx = try configuredSSLContext() let clientCtx = try configuredClientContext() let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let eventHandler = EventRecorderHandler() let clientChannel = try clientTLSChannel( context: clientCtx, additionalPeerCertificateVerificationCallback: { cert, channel in XCTAssertEqual(cert, Self.cert) return channel.eventLoop.makeSucceededFuture(()) }, preHandlers: [], postHandlers: [eventHandler], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) writeFuture.whenComplete { _ in XCTAssertEqual( eventHandler.events[..<3], [.Registered, .Active, .UserEvent(.handshakeCompleted(negotiatedProtocol: nil))] ) } try writeFuture.wait() } func testAdditionalValidationOnConnectionFails() throws { struct CustomUserError: Error {} let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverCtx = try configuredSSLContext() let clientCtx = try configuredClientContext() let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let errorHandler = ErrorCatcher() let clientChannel = try clientTLSChannel( context: clientCtx, additionalPeerCertificateVerificationCallback: { cert, channel in XCTAssertEqual(cert, Self.cert) return channel.eventLoop.makeFailedFuture(CustomUserError()) }, preHandlers: [], postHandlers: [errorHandler], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) let errorsFuture: EventLoopFuture<[Error]> = writeFuture.recover { (_: Error) in // We're swallowing errors here, on purpose, because we'll definitely // hit them. () }.map { errorHandler.errors } let actualErrors = try errorsFuture.wait() // This write will have failed, but that's fine: we just want it as a signal that // the handshake is done so we can make our assertions. XCTAssertEqual(actualErrors.count, 1) XCTAssertTrue(actualErrors.first is CustomUserError) } func testFlushWhileAdditionalValidationIsInProgressDoesNotActuallyFlush() throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let additionalHandshakePromise = group.next().makePromise(of: Void.self) let serverCtx = try configuredSSLContext() let clientCtx = try configuredClientContext() let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let eventHandler = EventRecorderHandler() let clientChannel = try clientTLSChannel( context: clientCtx, additionalPeerCertificateVerificationCallback: { cert, channel in XCTAssertEqual(cert, Self.cert) channel.flush() return additionalHandshakePromise.futureResult }, preHandlers: [], postHandlers: [eventHandler], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) let writeFuture = clientChannel.writeAndFlush(ByteBuffer(string: "Hello")) XCTAssertThrowsError(try writeFuture.timeout(after: .milliseconds(100)).wait()) additionalHandshakePromise.succeed(()) writeFuture.whenComplete { _ in XCTAssertEqual( eventHandler.events[..<3], [.Registered, .Active, .UserEvent(.handshakeCompleted(negotiatedProtocol: nil))] ) } try writeFuture.wait() } func testDontLoseClosePromises() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var channelClosed = false defer { // We know this will throw. _ = try? serverChannel.finish() _ = try? clientChannel.finish() } let context = try configuredSSLContext() try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // Ok, we're connected. Good stuff! Now, we want to hit this specific window: // 1. Call close() on the server channel. This will transition it to the closing state. // 2. Fire channelInactive on the serverChannel. This should cause it to drop all state and // fire the close promise. // Because we're using the embedded channel here, we don't need to worry about thread // synchronization: all of this should succeed synchronously. If it doesn't, that's // a bug too! let closePromise = serverChannel.close() closePromise.assumeIsolated().whenComplete { _ in // This looks like unsynchronized access to channelClosed, but it isn't: as we're // using EmbeddedChannel here there is no cross-thread hopping. channelClosed = true } XCTAssertFalse(channelClosed) serverChannel.pipeline.fireChannelInactive() XCTAssertTrue(channelClosed) closePromise.map { XCTFail("Unexpected success") }.whenFailure { error in switch error { case let e as NIOSSLError where e == .uncleanShutdown: break default: XCTFail("Unexpected error: \(error)") } } // Now clean up the client channel. We need to also fire the channel inactive here as there is // no-one left for the client channel to end the connection with. _ = clientChannel.close() clientChannel.pipeline.fireChannelInactive() } func testTrustStoreOnDisk() throws { var tempFile: String? = nil let serverCtx = try configuredSSLContext() let config: TLSConfiguration = try withTrustBundleInFile(tempFile: &tempFile) { var config = TLSConfiguration.makeClientConfiguration() config.certificateVerification = .noHostnameVerification config.trustRoots = .file($0) config.certificateChain = [.certificate(NIOSSLIntegrationTest.cert)] config.privateKey = .privateKey(NIOSSLIntegrationTest.key) return config } defer { precondition( .some(0) == tempFile.map { unlink($0) }, "couldn't remove temp file \(tempFile.debugDescription)" ) } let clientCtx = try assertNoThrowWithValue(NIOSSLContext(configuration: config)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: serverCtx, handlers: [SimpleEchoServer()], group: group ) defer { _ = try? serverChannel.close().wait() } let clientChannel = try clientTLSChannel( context: clientCtx, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { _ = try? clientChannel.close().wait() } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testChecksTrustStoreOnDisk() throws { let serverCtx = try configuredSSLContext() var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .file(FileManager.default.temporaryDirectory.path) clientConfig.certificateChain = [.certificate(NIOSSLIntegrationTest.cert)] clientConfig.privateKey = .privateKey(NIOSSLIntegrationTest.key) let clientCtx = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel = try serverTLSChannel( context: serverCtx, handlers: [], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let errorHandler = ErrorCatcher() let clientChannel = try clientTLSChannel( context: clientCtx, preHandlers: [], postHandlers: [errorHandler], group: group, connectingTo: serverChannel.localAddress! ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) let errorsFuture: EventLoopFuture<[NIOSSLError]> = writeFuture.recover { (_: Error) in // We're swallowing errors here, on purpose, because we'll definitely // hit them. () }.map { errorHandler.errors } let actualErrors = try errorsFuture.wait() // The actual error is non-deterministic depending on platform and version, so we don't // really try to make too many assertions here. XCTAssertEqual(actualErrors.count, 1) try clientChannel.closeFuture.wait() } func testReadAfterCloseNotifyDoesntKillProcess() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() let context = try configuredSSLContext() try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) let addr = try SocketAddress(unixDomainSocketPath: "/tmp/whatever2") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // Ok, we're connected. Now we want to close the server, and have that trigger a client CLOSE_NOTIFY. // However, when we deliver that CLOSE_NOTIFY we're then going to immediately send another chunk of // data. We can get away with doing this because the Embedded channel fires any promise for close() // before it fires channelInactive, which will allow us to fire channelRead from within the callback. let closePromise = serverChannel.close() closePromise.whenComplete { _ in var buffer = serverChannel.allocator.buffer(capacity: 5) buffer.writeStaticString("hello") serverChannel.pipeline.fireChannelRead(buffer) serverChannel.pipeline.fireChannelReadComplete() } XCTAssertNoThrow(try serverChannel.throwIfErrorCaught()) XCTAssertThrowsError(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) { error in XCTAssertEqual(.readInInvalidTLSState, error as? NIOSSLError) } } func testUnprocessedDataOnReadPathBeforeClosing() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() let context = try configuredSSLContext() let completePromise: EventLoopPromise = serverChannel.eventLoop.makePromise() XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( ReadRecordingHandler(completePromise: completePromise) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) ) let addr = try SocketAddress(unixDomainSocketPath: "/tmp/whatever2") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // Ok, we're connected. Now we want to close the server, and have that trigger a client CLOSE_NOTIFY. // After the CLOSE_NOTIFY create another chunk of data. let serverClosePromise = serverChannel.close() // Create a new chunk of data after the close. var clientBuffer = clientChannel.allocator.buffer(capacity: 5) clientBuffer.writeStaticString("hello") _ = try clientChannel.writeAndFlush(clientBuffer).wait() let clientClosePromise = clientChannel.close() // Use interactInMemory to finish the reads and writes. XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try clientClosePromise.wait()) XCTAssertNoThrow(try serverClosePromise.wait()) // Now check what we read. var readData = try assertNoThrowWithValue(completePromise.futureResult.wait()) XCTAssertEqual(readData.readString(length: readData.readableBytes)!, "hello") } func testZeroLengthWrite() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try? group.syncShutdownGracefully() } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel = try serverTLSChannel( context: context, handlers: [PromiseOnReadHandler(promise: completionPromise)], group: group ) defer { _ = try? serverChannel.close().wait() } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) defer { _ = try? clientChannel.close().wait() } // Write several zero-length buffers *and* one with some actual data. Only one should // be written. var originalBuffer = clientChannel.allocator.buffer(capacity: 5) let promises = (0...5).map { (_: Int) in clientChannel.write(originalBuffer) } originalBuffer.writeStaticString("hello") _ = try clientChannel.writeAndFlush(originalBuffer).wait() // At this time all the writes should have succeeded. for promise in promises { XCTAssertNoThrow(try promise.wait()) } let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testZeroLengthWritePromisesFireInOrder() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { _ = try? serverChannel.finish() _ = try? clientChannel.finish() } let context = try configuredSSLContext() try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) let addr = try SocketAddress(unixDomainSocketPath: "/tmp/whatever2") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // This test fires three writes, flushing between them all. We want to confirm that all of the // writes are succeeded in order. To do that, we want to add a WriteDelayHandler to // prevent the EmbeddedChannel succeeding the early writes. let writeDelayer = WriteDelayHandler() try clientChannel.pipeline.syncOperations.addHandler(writeDelayer, position: .first) var writeCount = 0 let emptyBuffer = clientChannel.allocator.buffer(capacity: 16) var buffer = clientChannel.allocator.buffer(capacity: 16) buffer.writeStaticString("hello world") clientChannel.write(buffer).assumeIsolated().whenComplete { _ in XCTAssertEqual(writeCount, 0) writeCount = 1 } clientChannel.flush() clientChannel.write(emptyBuffer).assumeIsolated().whenComplete { _ in XCTAssertEqual(writeCount, 1) writeCount = 2 } clientChannel.flush() clientChannel.write(buffer).assumeIsolated().whenComplete { _ in XCTAssertEqual(writeCount, 2) writeCount = 3 } clientChannel.flush() XCTAssertEqual(writeCount, 0) writeDelayer.forceFlush() XCTAssertEqual(writeCount, 3) serverChannel.pipeline.fireChannelInactive() clientChannel.pipeline.fireChannelInactive() } func testEncryptedFileInContext() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testFlushPendingReadsOnCloseNotify() throws { let context = try assertNoThrowWithValue(configuredSSLContext()) let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { _ = try? serverChannel.finish() _ = try? clientChannel.finish() } let completePromise: EventLoopPromise = serverChannel.eventLoop.makePromise() XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( ReadRecordingHandler(completePromise: completePromise) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) ) // Connect let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try connectFuture.wait()) // Here we want to issue a write, a flush, and then a close. This will trigger a CLOSE_NOTIFY message to be emitted by the // client. Unfortunately, interactInMemory doesn't do quite what we want, as we need to coalesce all these writes, so // we'll have to do some of this ourselves. var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") clientChannel.writeAndFlush(originalBuffer, promise: nil) let clientClosePromise = clientChannel.close() var buffer = clientChannel.allocator.buffer(capacity: 1024) while case .some(.byteBuffer(var data)) = try clientChannel.readOutbound(as: IOData.self) { buffer.writeBuffer(&data) } XCTAssertNoThrow(try serverChannel.writeInbound(buffer)) // Now we can interact. The server should close. XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try clientClosePromise.wait()) // Now check what we read. var readData = try assertNoThrowWithValue(completePromise.futureResult.wait()) XCTAssertEqual(readData.readString(length: readData.readableBytes)!, "Hello") } @available(*, deprecated, message: "Testing deprecated API surface") func testForcingVerificationFailure() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: context, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let errorHandler = ErrorCatcher() let clientChannel = try clientTLSChannel( context: try configuredClientContext(), preHandlers: [], postHandlers: [errorHandler], group: group, connectingTo: serverChannel.localAddress!, verificationCallback: { preverify, certificate in XCTAssertEqual(preverify, .certificateVerified) return .failed } ) var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let writeFuture = clientChannel.writeAndFlush(originalBuffer) let errorsFuture: EventLoopFuture<[NIOSSLError]> = writeFuture.recover { (_: Error) in // We're swallowing errors here, on purpose, because we'll definitely // hit them. () }.map { errorHandler.errors } let actualErrors = try errorsFuture.wait() // This write will have failed, but that's fine: we just want it as a signal that // the handshake is done so we can make our assertions. XCTAssertEqual(actualErrors.count, 1) switch actualErrors.first! { case .handshakeFailed: // expected break case let error: XCTFail("Unexpected error: \(error)") } } @available(*, deprecated, message: "Testing deprecated API surface") func testExtractingCertificates() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let certificates = NIOLockedValueBox([NIOSSLCertificate]()) let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", verificationCallback: { verify, certificate in certificates.withLockedValue { $0.append(certificate) } return verify } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(originalBuffer).wait()) let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) XCTAssertEqual(certificates.withLockedValue { $0.count }, 1) } func testForcingVerificationFailureNewCallback() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: context, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let clientChannel = try clientTLSChannel( context: try configuredClientContext(), preHandlers: [], postHandlers: [handshakeWatcher], group: group, connectingTo: serverChannel.localAddress!, customVerificationCallback: .callback { _, promise in promise.succeed(.failed) } ) defer { // Ignore errors here, the channel should be closed already by the time this happens. try? clientChannel.close().wait() } XCTAssertThrowsError(try handshakeResultPromise.futureResult.wait()) } func testErroringNewVerificationCallback() throws { enum LocalError: Error { case kaboom } let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: context, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let clientChannel = try clientTLSChannel( context: try configuredClientContext(), preHandlers: [], postHandlers: [handshakeWatcher], group: group, connectingTo: serverChannel.localAddress!, customVerificationCallback: .callback { _, promise in promise.fail(LocalError.kaboom) } ) defer { // Ignore errors here, the channel should be closed already by the time this happens. try? clientChannel.close().wait() } XCTAssertThrowsError(try handshakeResultPromise.futureResult.wait()) } func testReadsAreUnbufferedAfterHandshake() throws { // This is a regression test for rdar://96850712 var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) config.certificateVerification = .noHostnameVerification let context = try assertNoThrowWithValue(NIOSSLContext(configuration: config)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromiseFired = NIOLockedValueBox(false) let completionPromise: EventLoopPromise = group.next().makePromise() completionPromise.futureResult.whenComplete { _ in completionPromiseFired.withLockedValue { $0 = true } } let handshakeCompletePromise: NIOLockedValueBox?> = .init(nil) let handshakeFiredPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: context, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, customVerificationCallback: { innerCertificates, promise in handshakeCompletePromise.withLockedValue { $0 = promise } handshakeFiredPromise.succeed(()) } ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: try configuredSSLContext(), preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") clientChannel.writeAndFlush(originalBuffer, promise: nil) // This has driven the handshake to begin, so we can wait for that. XCTAssertNoThrow(try handshakeFiredPromise.futureResult.wait()) // We can now check whether the completion promise has fired: it should not have. completionPromiseFired.withLockedValue { XCTAssertFalse($0) } // Ok, allow the handshake to run. handshakeCompletePromise.withLockedValue { $0!.succeed(.certificateVerified) } let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testNewCallbackCanDelayHandshake() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromiseFired = NIOLockedValueBox(false) let completionPromise: EventLoopPromise = group.next().makePromise() completionPromise.futureResult.whenComplete { _ in completionPromiseFired.withLockedValue { $0 = true } } let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let handshakeCompletePromise: NIOLockedValueBox?> = .init(nil) let handshakeFiredPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: .callback { innerCertificates, promise in handshakeCompletePromise.withLockedValue { $0 = promise } handshakeFiredPromise.succeed(()) } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") clientChannel.writeAndFlush(originalBuffer, promise: nil) // This has driven the handshake to begin, so we can wait for that. XCTAssertNoThrow(try handshakeFiredPromise.futureResult.wait()) // We can now check whether the completion promise has fired: it should not have. completionPromiseFired.withLockedValue { XCTAssertFalse($0) } // Ok, allow the handshake to run. handshakeCompletePromise.withLockedValue { $0!.succeed(.certificateVerified) } let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testExtractingCertificatesNewCallback() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let certificates = NIOLockedValueBox([NIOSSLCertificate]()) let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: .callback { innerCertificates, promise in certificates.withLockedValue { $0 = innerCertificates } promise.succeed(.certificateVerified) } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(originalBuffer).wait()) let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) XCTAssertEqual(certificates.withLockedValue { $0 }, [NIOSSLIntegrationTest.cert]) } func testCustomVerificationCallbackExtractingCertificateChain() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let completionPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: .callbackWithMetadata { innerCertificates, promise in // Return `innerCertificates` as the validated certificate chain promise.succeed( .certificateVerified( VerificationMetadata(ValidatedCertificateChain(innerCertificates)) ) ) } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(originalBuffer).wait()) let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // We should be able to extract the certificate chain from the channel let extractedCertChain = try clientChannel.nioSSL_peerValidatedCertificateChain().wait() XCTAssertEqual(extractedCertChain?.validatedChain, [NIOSSLIntegrationTest.cert]) } func testCustomVerificationCallbackNotReturningChain() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let completionPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: .callbackWithMetadata { innerCertificates, promise in // Initialize an empty VerificationMetadata without a chain. promise.succeed(.certificateVerified(VerificationMetadata(nil))) } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(originalBuffer).wait()) let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // We should not be able to extract the chain: no chain was returned XCTAssertNil(try clientChannel.nioSSL_peerValidatedCertificateChain().wait()) } func testCustomVerificationCallbackDelayReturningCertificateChain() throws { let context = try configuredSSLContext() let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel( context: context, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let completionPromise: EventLoopPromise = group.next().makePromise() let clientChannel = try clientTLSChannel( context: configuredClientContext(), preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: CustomCallback.callbackWithMetadata { innerCertificates, promise in // Complete the promise in 10 milliseconds promise.futureResult.eventLoop.scheduleTask(in: .milliseconds(10)) { promise.succeed( .certificateVerified( VerificationMetadata(ValidatedCertificateChain(innerCertificates)) ) ) } } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") XCTAssertNoThrow(try clientChannel.writeAndFlush(originalBuffer).wait()) let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) // We should be able to extract the certificate chain. let extractedCertChain = try clientChannel.nioSSL_peerValidatedCertificateChain().wait() XCTAssertEqual(extractedCertChain?.validatedChain, [NIOSSLIntegrationTest.cert]) } func testNewCallbackCombinedWithDefaultTrustStore() throws { // This test is mostly useful on macOS, where it previously failed due to an excessive assertion. let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .fullVerification clientConfig.trustRoots = .default let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: serverContext, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let handshakeCompletePromise = group.next().makePromise(of: Void.self) let customCallbackCalledPromise = group.next().makePromise(of: Void.self) let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [WaitForHandshakeHandler(handshakeResultPromise: handshakeCompletePromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost", customVerificationCallback: .callback { _, promise in // Note that we override certificate verification here. customCallbackCalledPromise.succeed(()) promise.succeed(.certificateVerified) } ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } XCTAssertNoThrow(try customCallbackCalledPromise.futureResult.wait()) XCTAssertNoThrow(try handshakeCompletePromise.futureResult.wait()) } func testMacOSVerificationCallbackIsNotUsedIfVerificationDisabled() throws { // This test is mostly useful on macOS, where it validates that disabling verification actually, well, // disables verification. let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.trustRoots = .default let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: serverContext, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let handshakeCompletePromise = group.next().makePromise(of: Void.self) let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [WaitForHandshakeHandler(handshakeResultPromise: handshakeCompletePromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // This connection should succeed, as certificate verification is disabled. XCTAssertNoThrow(try handshakeCompletePromise.futureResult.wait()) } func testMacOSConnectionFailsIfServerVerificationOptionalAndPeerPresentsUntrustedCert() throws { // This test checks that when setting verification to `.optionalVerification`, a peer cannot successfully // connect when they present an untrusted certificate. On macOS, this exercises the SecTrust validation backend, // as `serverConfig.trustRoots` is set to `.default` (see the behavioral matrix in // `NIOSSL/Docs/trust-roots-behavior.md`). var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([NIOSSLIntegrationTest.cert])] // The client presents a random cert but the server won't trust it let clientCertAndPrivateKey = generateSelfSignedCert() clientConfig.certificateChain = [.certificate(clientCertAndPrivateKey.0)] clientConfig.privateKey = .privateKey(clientCertAndPrivateKey.1) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let handshakeCompletePromise = group.next().makePromise(of: Void.self) let serverChannel: Channel = try serverTLSChannel( context: serverContext, handlers: [WaitForHandshakeHandler(handshakeResultPromise: handshakeCompletePromise)], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // The handshake should fail: certificate verification is optional and the client hasn't presented any certs. XCTAssertThrowsError(try handshakeCompletePromise.futureResult.wait()) } func testMacOSConnectionSuccessfulIfServerVerificationOptionalAndPeerPresentsTrustedCert() throws { // This test checks that when setting verification to `.optionalVerification`, a peer can successfully // connect when they present a trusted certificate. On macOS, this exercises the SecTrust validation backend, // as `serverConfig.trustRoots` is set to `.default` and the client cert is registered under // `additionalTrustRoots` (see the behavioral matrix in `NIOSSL/Docs.docc/trust-roots-behavior.md`). var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([NIOSSLIntegrationTest.cert])] // The client presents a generated cert let clientCertAndPrivateKey = generateSelfSignedCert() clientConfig.certificateChain = [.certificate(clientCertAndPrivateKey.0)] clientConfig.privateKey = .privateKey(clientCertAndPrivateKey.1) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default // The server trusts the client's generated cert serverConfig.additionalTrustRoots = [.certificates([clientCertAndPrivateKey.0])] let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let handshakeCompletePromise = group.next().makePromise(of: Void.self) let serverChannel: Channel = try serverTLSChannel( context: serverContext, handlers: [WaitForHandshakeHandler(handshakeResultPromise: handshakeCompletePromise)], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // The handshake should succeed: verification is optional, and the client presents a cert the server trusts. XCTAssertNoThrow(try handshakeCompletePromise.futureResult.wait()) } func testMacOSConnectionSuccessfulIfServerVerificationOptionalAndNoPeerCert() throws { // This test checks that when setting verification to `.optionalVerification`, a peer can successfully connect // when they don't present any certificate. On macOS, this exercises the SecTrust validation backend, as // `serverConfig.trustRoots` is set to `.default` (see the behavioral matrix in // `NIOSSL/Docs.docc/trust-roots-behavior.md`). var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default // The client doesn't present any certs var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([NIOSSLIntegrationTest.cert])] let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let handshakeCompletePromise = group.next().makePromise(of: Void.self) let serverChannel: Channel = try serverTLSChannel( context: serverContext, handlers: [WaitForHandshakeHandler(handshakeResultPromise: handshakeCompletePromise)], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } // The handshake should succeed: certificate verification is optional and the client hasn't presented any certs. XCTAssertNoThrow(try handshakeCompletePromise.futureResult.wait()) } func testServerHasNewCallbackCalledToo() throws { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(NIOSSLIntegrationTest.cert)], privateKey: .privateKey(NIOSSLIntegrationTest.key) ) config.certificateVerification = .fullVerification config.trustRoots = .default let context = try assertNoThrowWithValue(NIOSSLContext(configuration: config)) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let serverChannel: Channel = try serverTLSChannel( context: context, preHandlers: [], postHandlers: [handshakeWatcher], group: group, customVerificationCallback: { _, promise in promise.succeed(.failed) } ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: try configuredSSLContext(), preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) defer { // Ignore errors here, the channel should be closed already by the time this happens. try? clientChannel.close().wait() } XCTAssertThrowsError(try handshakeResultPromise.futureResult.wait()) } func testRepeatedClosure() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect both cases to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: context, serverHostname: nil) ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a completed handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() XCTAssertTrue(handshakeHandler.handshakeSucceeded) // We're going to close twice: the first one without a promise, the second one with one. let closed = NIOLockedValueBox(false) clientChannel.close(promise: nil) clientChannel.close().whenComplete { _ in closed.withLockedValue { $0 = true } } XCTAssertFalse(closed.withLockedValue { $0 }) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) // The closure should have happened. XCTAssertTrue(closed.withLockedValue { $0 }) } func testClosureTimeout() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect both cases to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: context, serverHostname: nil) ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a completed handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() XCTAssertTrue(handshakeHandler.handshakeSucceeded) let closed = NIOLockedValueBox(false) clientChannel.close().whenComplete { _ in closed.withLockedValue { $0 = true } } clientChannel.close().whenFailure { error in XCTAssertTrue(error is NIOSSLCloseTimedOutError) } // Send CLOSE_NOTIFY from the client. while let clientDatum = try clientChannel.readOutbound(as: IOData.self) { try serverChannel.writeInbound(clientDatum) } XCTAssertFalse(closed.withLockedValue { $0 }) // Let the shutdown timeout. clientChannel.embeddedEventLoop.advanceTime(by: context.configuration.shutdownTimeout) XCTAssertTrue(closed.withLockedValue { $0 }) // Let the server shutdown. try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) } func testReceivingGibberishAfterAttemptingToClose() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() let clientClosed = NIOLockedValueBox(false) defer { XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( clientHandler ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( handshakeHandler ) ) // Mark the closure of the client. clientChannel.closeFuture.whenComplete { _ in clientClosed.withLockedValue { $0 = true } } // Connect. This should lead to a completed handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's close the client connection. clientChannel.close(promise: nil) (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertFalse(clientClosed.withLockedValue { $0 }) // Now we're going to simulate the client receiving gibberish data in response, instead // of a CLOSE_NOTIFY. var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeStaticString("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 0\r\n\r\n") XCTAssertThrowsError(try clientChannel.writeInbound(buffer)) { error in let errorString = String(describing: error) let range = NSRange(location: 0, length: errorString.utf16.count) let regex = try! NSRegularExpression( pattern: "sslError\\(\\[Error\\: 268435703 error\\:100000f7\\:SSL routines\\:OPENSSL_internal\\:WRONG_VERSION_NUMBER at .*\\/[A-Za-z_]+\\.cc\\:[0-9]+\\]\\)" ) XCTAssertNotNil(regex.firstMatch(in: errorString, options: [], range: range)) } (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertTrue(clientClosed.withLockedValue { $0 }) // Clean up by bringing the server up to speed serverChannel.pipeline.fireChannelInactive() } func testPendingWritesFailWhenFlushedOnClose() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( clientHandler ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a completed handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Queue up a write. let writeCompleted = NIOLockedValueBox(false) var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeStaticString("Hello, world!") clientChannel.write(buffer).map { XCTFail("Must not succeed") }.whenFailure { error in XCTAssertEqual(error as? ChannelError, .ioOnClosedChannel) writeCompleted.withLockedValue { $0 = true } } // We haven't spun the event loop, so the handlers are still in the pipeline. Now attempt to close. let closed = NIOLockedValueBox(false) clientChannel.closeFuture.whenComplete { _ in closed.withLockedValue { $0 = true } } XCTAssertFalse(writeCompleted.withLockedValue { $0 }) clientChannel.close(promise: nil) (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertFalse(writeCompleted.withLockedValue { $0 }) XCTAssertFalse(closed.withLockedValue { $0 }) // Now try to flush the write. This should fail the write early, and take out the connection. clientChannel.flush() (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertTrue(writeCompleted.withLockedValue { $0 }) XCTAssertTrue(closed.withLockedValue { $0 }) // Bring the server up to speed. serverChannel.pipeline.fireChannelInactive() } func testChannelInactiveAfterCloseNotify() throws { class SecondChannelInactiveSwallower: ChannelInboundHandler { typealias InboundIn = Any private var channelInactiveCalls = 0 func channelInactive(context: ChannelHandlerContext) { if self.channelInactiveCalls == 0 { self.channelInactiveCalls += 1 context.fireChannelInactive() } } } class FlushOnReadHandler: ChannelInboundHandler { typealias InboundIn = Any func channelRead(context: ChannelHandlerContext, data: NIOAny) { context.pipeline.fireChannelInactive() } } let context = try assertNoThrowWithValue(configuredSSLContext()) let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { _ = try? serverChannel.finish() // The client channel is closed in the test. } XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandlers( SecondChannelInactiveSwallower(), NIOSSLServerHandler(context: context), FlushOnReadHandler() ) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) ) // Connect let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() XCTAssertNoThrow(try serverChannel.connect(to: SocketAddress(ipAddress: "1.2.3.4", port: 5678)).wait()) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try connectFuture.wait()) // Here we want to issue a write, a flush, and then a close. This will trigger a CLOSE_NOTIFY message to be emitted by the // client. Unfortunately, interactInMemory doesn't do quite what we want, as we need to coalesce all these writes, so // we'll have to do some of this ourselves. var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") let clientClose = clientChannel.writeAndFlush(originalBuffer) clientChannel.close(promise: nil) var buffer = clientChannel.allocator.buffer(capacity: 1024) while case .some(.byteBuffer(var data)) = try clientChannel.readOutbound(as: IOData.self) { buffer.writeBuffer(&data) } // The client has sent CLOSE_NOTIFY, so the server will unbuffer any reads it has. This in turn // causes channelInactive to be fired back into the SSL handler. XCTAssertThrowsError(try serverChannel.writeInbound(buffer)) { error in XCTAssertEqual(NIOSSLError.uncleanShutdown, error as? NIOSSLError) } XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try clientClose.wait()) } func testKeyLoggingClientAndServer() throws { let clientLines: UnsafeMutableTransferBox<[ByteBuffer]> = .init([]) let serverLines: UnsafeMutableTransferBox<[ByteBuffer]> = .init([]) let clientContext = try assertNoThrowWithValue( self.configuredSSLContext(keyLogCallback: { clientLines.wrappedValue.append($0) }) ) let serverContext = try assertNoThrowWithValue( self.configuredSSLContext(keyLogCallback: { serverLines.wrappedValue.append($0) }) ) let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // These error as the channel is already closed. XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } // Handshake XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: clientContext, serverHostname: nil) ) ) XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: serverContext) ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a completed handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() XCTAssertTrue(handshakeHandler.handshakeSucceeded) // In our code this should do TLS 1.3, so we expect 5 lines each. XCTAssertEqual(clientLines.wrappedValue.count, 5) XCTAssertEqual(serverLines.wrappedValue.count, 5) // Each in the same order. XCTAssertEqual(clientLines.wrappedValue, serverLines.wrappedValue) // Each line should be newline terminated. for line in clientLines.wrappedValue { XCTAssertTrue(line.readableBytesView.last! == UInt8(ascii: "\n")) } for line in serverLines.wrappedValue { XCTAssertTrue(line.readableBytesView.last! == UInt8(ascii: "\n")) } // Close and let the two channels shutdown. clientChannel.close(promise: nil) try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) } func testLoadsOfCloses() throws { let context = try configuredSSLContext() // 3 threads so server, client, and accepted all have their own thread. let group = MultiThreadedEventLoopGroup(numberOfThreads: 3) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let serverChannel: Channel = try serverTLSChannel(context: context, handlers: [], group: group) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: context, preHandlers: [], postHandlers: [], group: group, connectingTo: serverChannel.localAddress! ) let closeFutures = (0..<20).map { _ in clientChannel.close() } XCTAssertNoThrow(try EventLoopFuture.andAllComplete(closeFutures, on: clientChannel.eventLoop).wait()) } func testWriteFromFailureOfWrite() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // Both were closed uncleanly in the test, so they'll throw. XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try configuredSSLContext() try serverChannel.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: context) ) try clientChannel.pipeline.syncOperations.addHandler( try NIOSSLClientHandler(context: context, serverHostname: nil) ) // Do the handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // Ok, we're gonna do a weird thing here. We're going to queue up a write, whose write promise is going // to issue another write. In older builds, this would crash due to an exclusivity violation. var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeBytes("Hello, world!".utf8) clientChannel.write(buffer).whenComplete { [buffer] _ in clientChannel.writeAndFlush(buffer, promise: nil) } // Now we're going to fire channel inactive on the client. This used to crash: now it doesn't. clientChannel.pipeline.fireChannelInactive() // Do the same for the server, but we don't care about the outcome. serverChannel.pipeline.fireChannelInactive() } func testChannelInactiveDuringHandshakeSucceeded() throws { // This test aims to reproduce a very unusual crash. I've never been able to come up with a clear justification of // how we managed to hit it, but it goes a bit like this: // // 1. During a TLS handshake, a server performs a `channelRead` that triggers a handshakeCompleted message. // 2. Synchronously, during that pipeline traversal, we end up with a read buffer that also contains a CLOSE_NOTIFY alert. // This may have arrived in the same packet with the handshake completion message, or as a result of something we did. // 3. Additionally, we manage to synchronously enter the .closed or .unwrapped state. This is hard to imagine, but it can // happen in a few ways: channelInactive forces this transition, and managing to do a shutdown reentrantly due to extra // I/O can trigger it as well. // 4. We then progress through the handshake and crash. // // To make this manifest in the test we use a pair of promises and finagle the I/O such that everything goes wrong at once. // This can indicate how unusual the circumstance is in which this happens. Nonetheless, we've seen it happen on production // systems. let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // Both were closed uncleanly in the test, but the server error was already // consumed. XCTAssertNoThrow(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try configuredSSLContext() let clientChannelCompletedPromise = clientChannel.eventLoop.makePromise(of: Void.self) let clientChannelCompletedHandler = WaitForHandshakeHandler( handshakeResultPromise: clientChannelCompletedPromise ) let serverChannelCompletedPromise = serverChannel.eventLoop.makePromise(of: Void.self) let serverChannelCompletedHandler = WaitForHandshakeHandler( handshakeResultPromise: serverChannelCompletedPromise ) clientChannelCompletedPromise.futureResult.whenSuccess { // Here we need to immediately (and _recursively_) ask the client channel to shutdown. This should force a CLOSE_NOTIFY // message out in the same tick as the handshake message. clientChannel.close(promise: nil) // Now deliver all the client messages to the server channel _in one go_. var flattenedBytes = clientChannel.allocator.buffer(capacity: 1024) while let clientDatum = try! clientChannel.readOutbound(as: ByteBuffer.self) { flattenedBytes.writeImmutableBuffer(clientDatum) } // Can't use XCTAssertThrowsError here, this function call isn't allowed to throw. do { try serverChannel.writeInbound(flattenedBytes) XCTFail("Expected to throw") } catch { guard case .some(.uncleanShutdown) = error as? NIOSSLError else { XCTFail("Unexpected error \(error)") return } } } serverChannelCompletedPromise.futureResult.whenSuccess { // Here we do something very, very dangerous: we call fireChannelInactive on our own channel. // This simulates us hitting a close condition in some other form. serverChannel.pipeline.fireChannelInactive() } XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandlers([ NIOSSLServerHandler(context: context), serverChannelCompletedHandler, ]) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandlers([ try NIOSSLClientHandler(context: context, serverHostname: nil), clientChannelCompletedHandler, ]) ) // Do the handshake. let addr: SocketAddress = try SocketAddress(unixDomainSocketPath: "/tmp/whatever") let connectFuture = clientChannel.connect(to: addr) serverChannel.pipeline.fireChannelActive() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) try connectFuture.wait() // We now need to forcibly shutdown the client channel, as otherwise it'll wedge waiting for a server that never comes back. clientChannel.pipeline.fireChannelInactive() } func testTrustedFirst() throws { // We need to explain this test a bit. // // BoringSSL has a flag: X509_V_FLAG_TRUSTED_FIRST. This flag affects the way the X509 verifier works. In particular, // it causes the verifier to look for certificates in the trust store _before_ it looks for them in the chain. This // is important, because some misbehaving clients may send an excessively long chain that, in some cases, includes // certificates we don't trust! // // In this case, the server has a cert that was signed by a CA whose original certificate has expired. We, the client, // have a valid root certificate for the intermediate that _actually_ issued the key, which is now a root, as // well as the old cert. (This is important! If we don't also have the old cert, this fails.) // The server is, stupidly, also sending the old, _expired_, CA root cert. This test validates that we // ignore the dumb server and get to the valid trust chain anyway. let oldCA = try NIOSSLCertificate(bytes: Array(sampleExpiredCA.utf8), format: .pem) let oldIntermediate = try NIOSSLCertificate(bytes: Array(sampleIntermediateCA.utf8), format: .pem) let newCA = try NIOSSLCertificate(bytes: Array(sampleIntermediateAsRootCA.utf8), format: .pem) let serverCert = try NIOSSLCertificate(bytes: Array(sampleClientOfIntermediateCA.utf8), format: .pem) let serverKey = try NIOSSLPrivateKey( bytes: Array(sampleKeyForCertificateOfClientOfIntermediateCA.utf8), format: .pem ) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = .certificates([newCA, oldCA]) let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(serverCert), .certificate(oldIntermediate), .certificate(oldCA)], privateKey: .privateKey(serverKey) ) let clientContext = try NIOSSLContext(configuration: clientConfig) let serverContext = try NIOSSLContext(configuration: serverConfig) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let completionPromise: EventLoopPromise = group.next().makePromise() let serverChannel: Channel = try serverTLSChannel( context: serverContext, handlers: [SimpleEchoServer()], group: group ) defer { XCTAssertNoThrow(try serverChannel.close().wait()) } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [PromiseOnReadHandler(promise: completionPromise)], group: group, connectingTo: serverChannel.localAddress!, serverHostname: "localhost" ) defer { XCTAssertNoThrow(try clientChannel.close().wait()) } var originalBuffer = clientChannel.allocator.buffer(capacity: 5) originalBuffer.writeString("Hello") try clientChannel.writeAndFlush(originalBuffer).wait() let newBuffer = try completionPromise.futureResult.wait() XCTAssertEqual(newBuffer, originalBuffer) } func testWriteSplitting() throws { // This test validates that we chunk writes larger than a certain value. This is an attempt // to regression test part of our defense against large writes, without requiring that the value end up being giant. let maxWriteSize = 1024 let targetSize = (maxWriteSize * 4) + 1 let write = ByteBuffer(repeating: 0, count: targetSize) let b2b = BackToBackEmbeddedChannel() var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([Self.cert]) let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(Self.cert)], privateKey: .privateKey(Self.key) ) let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandler( try NIOSSLClientHandler( context: clientContext, serverHostname: "localhost", optionalCustomVerificationCallbackManager: nil, optionalAdditionalPeerCertificateVerificationCallback: nil, maxWriteSize: maxWriteSize ) ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandler( NIOSSLServerHandler(context: serverContext) ) ) XCTAssertNoThrow(try b2b.connectInMemory()) let completed = NIOLockedValueBox(false) let promise = b2b.loop.makePromise(of: Void.self) promise.futureResult.whenComplete { _ in completed.withLockedValue { $0 = true } } let recordObserver = TLS13RecordObserver() XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandler( recordObserver, position: .first ) ) b2b.client.writeAndFlush(write, promise: promise) try b2b.interactInMemory() var reads: [ByteBuffer] = [] while let read = try b2b.server.readInbound(as: ByteBuffer.self) { reads.append(read) } let totalReadBytes = reads.reduce(into: 0, { $0 += $1.readableBytes }) XCTAssertEqual(totalReadBytes, targetSize) XCTAssertTrue(completed.withLockedValue { $0 }) XCTAssertEqual(recordObserver.writtenRecords.filter { $0.contentType == .applicationData }.count, 5) b2b.client.close(promise: nil) try b2b.interactInMemory() } func testDoesNotSpinLoopWhenInactiveAndActiveAreReversed() throws { // This is a regression test for https://github.com/apple/swift-nio-ssl/issues/467 // // If channelInactive occurs before channelActive and a re-entrant write and flush occurred // in channelActive then 'NIOSSLHandler.doUnbufferActions(context:)' would loop // indefinitely. let eventLoop = EmbeddedEventLoop() let promise = eventLoop.makePromise(of: Void.self) final class WriteAndFlushOnActive: ChannelInboundHandler { typealias InboundIn = ByteBuffer typealias OutboundOut = ByteBuffer private let promise: EventLoopPromise init(promise: EventLoopPromise) { self.promise = promise } func channelActive(context: ChannelHandlerContext) { let buffer = context.channel.allocator.buffer(string: "You spin me right 'round") context.writeAndFlush(self.wrapOutboundOut(buffer), promise: self.promise) context.fireChannelActive() } } let context = try self.configuredSSLContext() let handler = try NIOSSLClientHandler(context: context, serverHostname: nil) let channel = EmbeddedChannel( handlers: [handler, WriteAndFlushOnActive(promise: promise)], loop: eventLoop ) // Close _before_ channel active. This shouldn't (but can https://github.com/apple/swift-nio/issues/2773) // happen for 'real' channels by synchronously closing the channel when the connect promise // is succeeded. channel.pipeline.fireChannelInactive() channel.pipeline.fireChannelActive() // The handshake starts in channelActive (and handlerAdded if the channel is already // active). If the events are reordered then the handshake shouldn't start and there // shouldn't be any outbound data. XCTAssertNil(try channel.readOutbound(as: ByteBuffer.self)) // The write promise should fail. XCTAssertThrowsError(try promise.futureResult.wait()) { error in XCTAssertEqual(error as? ChannelError, .ioOnClosedChannel) } // Subsequent writes should also fail. XCTAssertThrowsError(try channel.writeOutbound(ByteBuffer(string: "Like a record, baby, right 'round"))) { error in XCTAssertEqual(error as? ChannelError, .ioOnClosedChannel) } } } ================================================ FILE: Tests/NIOSSLTests/NIOSSLSecureBytesTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import XCTest @testable import NIOSSL public enum NIOSSLSecureBytesError: Error { case incorrectKeySize } extension NIOSSLSecureBytesError: Equatable {} final class NIOSSLSecureBytesTests: XCTestCase { func testBasicSoundness() { var first = NIOSSLSecureBytes() var second = NIOSSLSecureBytes() first.append(Data("hello".utf8)) second.append(Data("hello".utf8)) XCTAssertEqual(first, second) first.append(Data("world".utf8)) second.append(Data("wrold".utf8)) XCTAssertNotEqual(first, second) } func testSimpleCollection() { let base = NIOSSLSecureBytes(0..<100) XCTAssertEqual(base.count, 100) XCTAssertEqual(Array(base), Array(0..<100)) XCTAssertEqual(base.first, 0) XCTAssertEqual(base.last, 99) XCTAssertEqual(base.reduce(Int(0)) { Int($0) + Int($1) }, 4950) } func testSimpleBidirectionalCollection() { let base = NIOSSLSecureBytes(0..<100) let reversed = base.reversed() XCTAssertEqual(Array(reversed), Array(stride(from: 99, through: 0, by: -1))) } func testSimpleRandomAccessCollection() { // Not easy to test this, just try to move the indices around a bit. let base = NIOSSLSecureBytes(0..<100) let aMiddleIndex = base.index(base.startIndex, offsetBy: 48) let aDifferentMiddleIndex = base.index(aMiddleIndex, offsetBy: 5) XCTAssertEqual(base.distance(from: aMiddleIndex, to: aDifferentMiddleIndex), 5) XCTAssertEqual(base[aMiddleIndex], 48) XCTAssertEqual(base[aDifferentMiddleIndex], 48 + 5) } func testSimpleMutableCollection() { var base = NIOSSLSecureBytes(repeating: 0, count: 5) let offset = base.index(base.startIndex, offsetBy: 2) base[offset] = 5 XCTAssertEqual(Array(base), [0, 0, 5, 0, 0]) } func testSimpleRangeReplaceableCollection() { // This test validates RangeReplaceableCollection and the value semantics all at once. let base = NIOSSLSecureBytes(repeating: 0, count: 10) let baseBytes = Array(repeating: UInt8(0), count: 10) // There are a few ways we can "replace" a subrange. The first is to extend at the front by appending. var copy = base copy.insert(contentsOf: [1, 2, 3, 4], at: copy.startIndex) XCTAssertEqual(Array(copy), [1, 2, 3, 4] + baseBytes) XCTAssertEqual(Array(base), baseBytes) XCTAssertNotEqual(copy, base) // The second is to extend at the back. copy = base copy.append(contentsOf: [1, 2, 3, 4]) XCTAssertEqual(Array(copy), baseBytes + [1, 2, 3, 4]) XCTAssertEqual(Array(base), baseBytes) XCTAssertNotEqual(copy, base) // The third is to "shrink" by replacing a subrange in the middle. copy = base var aMiddleIndex = copy.index(copy.startIndex, offsetBy: 2) var aDifferentMiddleIndex = copy.index(aMiddleIndex, offsetBy: 5) copy.removeSubrange(aMiddleIndex.. Void = { _ = try NIOSSLSecureBytes(unsafeUninitializedCapacity: 5) { (_, _) in throw NIOSSLSecureBytesError.incorrectKeySize } } XCTAssertThrowsError(try testThrowingInitialization()) { error in guard case .some(.incorrectKeySize) = error as? NIOSSLSecureBytesError else { XCTFail("unexpected error: \(error)") return } } } func testAppendingDataPerformsACoW() { var base = NIOSSLSecureBytes(repeating: 0, count: 10) let copy = base base.append("Hello, world".utf8) XCTAssertEqual(base.count, 22) XCTAssertEqual(copy.count, 10) } func testRequestingAMutablePointerPerformsACoW() { var base = NIOSSLSecureBytes(repeating: 0, count: 10) let copy = base let lower = base.index(base.startIndex, offsetBy: 4) let upper = base.index(base.startIndex, offsetBy: 7) base.replaceSubrange(lower...upper, with: [1, 2, 3, 4]) XCTAssertEqual(Array(base), [0, 0, 0, 0, 1, 2, 3, 4, 0, 0]) XCTAssertEqual(Array(copy), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) } func testDataCausesCoWs() { var base = NIOSSLSecureBytes(repeating: 0, count: 10) let copy = Data(base) XCTAssertEqual(base.count, copy.count) base.append("Hello, world".utf8) XCTAssertEqual(base.count, 22) XCTAssertEqual(copy.count, 10) } func testDataFromSlice() { var base = NIOSSLSecureBytes(0..<10) let copy = Data(base.prefix(5)) XCTAssertEqual(Array(copy), [0, 1, 2, 3, 4]) base.append("Hello, world".utf8) XCTAssertEqual(base.count, 22) XCTAssertEqual(Array(copy), [0, 1, 2, 3, 4]) } func testEquatable() { var a = NIOSSLSecureBytes() a.append(Data("hello".utf8)) var b = NIOSSLSecureBytes() b.append(Data("hello".utf8)) XCTAssertTrue(a == b) var c = NIOSSLSecureBytes() c.append(Data("world".utf8)) XCTAssertFalse(a == c) } func testByteCreation() { let a = NIOSSLSecureBytes(bytes: [0x01, 0x02, 0x03, 0x04]) let b = NIOSSLSecureBytes(bytes: [0x01, 0x02, 0x03, 0x04, 0x05]) let c = NIOSSLSecureBytes(bytes: [0x01, 0x02, 0x03, 0x04]) XCTAssertTrue(a == c) XCTAssertFalse(a == b) } } ================================================ FILE: Tests/NIOSSLTests/NIOSSLTestHelpers.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import Foundation import NIOCore import NIOEmbedded @testable import NIOSSL let samplePemCert = """ -----BEGIN CERTIFICATE----- MIIGGzCCBAOgAwIBAgIJAJ/X0Fo0ynmEMA0GCSqGSIb3DQEBCwUAMIGjMQswCQYD VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5z b2t5bzEuMCwGA1UECgwlU2FuIEZyYW5zb2t5byBJbnN0aXR1dGUgb2YgVGVjaG5v bG9neTEVMBMGA1UECwwMUm9ib3RpY3MgTGFiMSAwHgYDVQQDDBdyb2JvdHMuc2Fu ZnJhbnNva3lvLmVkdTAeFw0xNzEwMTYyMTAxMDJaFw00NzEwMDkyMTAxMDJaMIGj MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2Fu IEZyYW5zb2t5bzEuMCwGA1UECgwlU2FuIEZyYW5zb2t5byBJbnN0aXR1dGUgb2Yg VGVjaG5vbG9neTEVMBMGA1UECwwMUm9ib3RpY3MgTGFiMSAwHgYDVQQDDBdyb2Jv dHMuc2FuZnJhbnNva3lvLmVkdTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC ggIBAO9rzJOOE8cmsIqAJMCrHDxkBAMgZhMsJ863MnWtVz5JIJK6CKI/Nu26tEzo kHy3EI9565RwikvauheMsWaTFA4PD/P+s1DtxRCGIcK5x+SoTN7Drn5ZueoJNZRf TYuN+gwyhprzrZrYjXpvEVPYuSIeUqK5XGrTyFA2uGj9wY3f9IF4rd7JT0ewRb1U 8OcR7xQbXKGjkY4iJE1TyfmIsBZboKaG/aYa9KbnWyTkDssaELWUIKrjwwuPgVgS vlAYmo12MlsGEzkO9z78jvFmhUOsaEldM8Ua2AhOKW0oSYgauVuro/Ap/o5zn8PD IDapl9g+5vjN2LucqX2a9utoFvxSKXT4NvfpL9fJvzdBNMM4xpqtHIkV0fkiMbWk EW2FFlOXKnIJV8wT4a9iduuIDMg8O7oc+gt9pG9MHTWthXm4S29DARTqfZ48bW77 z8RrEURV03o05b/twuAJSRyyOCUi61yMo3YNytebjY2W3Pxqpq+YmT5qhqBZDLlT LMptuFdISv6SQgg7JoFHGMWRXUavMj/sn5qZD4pQyZToHJ2Vtg5W/MI1pKwc3oKD 6M3/7Gf35r92V/ox6XT7+fnEsAH8AtQiZJkEbvzJ5lpUihSIaV3a/S+jnk7Lw8Tp vjtpfjOg+wBblc38Oa9tk2WdXwYDbnvbeL26WmyHwQTUBi1jAgMBAAGjUDBOMB0G A1UdDgQWBBToPRmTBQEF5F5LcPiUI5qBNPBU+DAfBgNVHSMEGDAWgBToPRmTBQEF 5F5LcPiUI5qBNPBU+DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQCY gxM5lufF2lTB9sH0s1E1VTERv37qoapNP+aw06oZkAD67QOTXFzbsM3JU1diY6rV Y0g9CLzRO7gZY+kmi1WWnsYiMMSIGjIfsB8S+ot43LME+AJXPVeDZQnoZ6KQ/9r+ 71Umi4AKLoZ9dInyUIM3EHg9pg5B0eEINrh4J+OPGtlC3NMiWxdmIkZwzfXa+64Z 8k5aX5piMTI+9BQSMWw5l7tFT/PISuI8b/Ln4IUBXKA0xkONXVnjPOmS0h7MBoc2 EipChDKnK+Mtm9GQewOCKdS2nsrCndGkIBnUix4ConUYIoywVzWGMD+9OzKNg76d O6A7MxdjEdKhf1JDvklxInntDUDTlSFL4iEFELwyRseoTzj8vJE+cL6h6ClasYQ6 p0EeL3UpICYerfIvPhohftCivCH3k7Q1BSf0fq73cQ55nrFAHrqqYjD7HBeBS9hn 3L6bz9Eo6U9cuxX42k3l1N44BmgcDPin0+CRTirEmahUMb3gmvoSZqQ3Cz86GkIg 7cNJosc9NyevQlU9SX3ptEbv33tZtlB5GwgZ2hiGBTY0C3HaVFjLpQiSS5ygZLgI /+AKtah7sTHIAtpUH1ZZEgKPl1Hg6J4x/dBkuk3wxPommNHaYaHREXF+fHMhBrSi yH8agBmmECpa21SVnr7vrL+KSqfuF+GxwjSNsSR4SA== -----END CERTIFICATE----- """ let samplePemKey = """ -----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA72vMk44TxyawioAkwKscPGQEAyBmEywnzrcyda1XPkkgkroI oj827bq0TOiQfLcQj3nrlHCKS9q6F4yxZpMUDg8P8/6zUO3FEIYhwrnH5KhM3sOu flm56gk1lF9Ni436DDKGmvOtmtiNem8RU9i5Ih5SorlcatPIUDa4aP3Bjd/0gXit 3slPR7BFvVTw5xHvFBtcoaORjiIkTVPJ+YiwFlugpob9phr0pudbJOQOyxoQtZQg quPDC4+BWBK+UBiajXYyWwYTOQ73PvyO8WaFQ6xoSV0zxRrYCE4pbShJiBq5W6uj 8Cn+jnOfw8MgNqmX2D7m+M3Yu5ypfZr262gW/FIpdPg29+kv18m/N0E0wzjGmq0c iRXR+SIxtaQRbYUWU5cqcglXzBPhr2J264gMyDw7uhz6C32kb0wdNa2FebhLb0MB FOp9njxtbvvPxGsRRFXTejTlv+3C4AlJHLI4JSLrXIyjdg3K15uNjZbc/Gqmr5iZ PmqGoFkMuVMsym24V0hK/pJCCDsmgUcYxZFdRq8yP+yfmpkPilDJlOgcnZW2Dlb8 wjWkrBzegoPozf/sZ/fmv3ZX+jHpdPv5+cSwAfwC1CJkmQRu/MnmWlSKFIhpXdr9 L6OeTsvDxOm+O2l+M6D7AFuVzfw5r22TZZ1fBgNue9t4vbpabIfBBNQGLWMCAwEA AQKCAgArWV9PEBhwpIaubQk6gUC5hnpbfpA8xG/os67FM79qHZ9yMZDCn6N4Y6el jS4sBpFPCQoodD/2AAJVpTmxksu8x+lhiio5avOVTFPsh+qzce2JH/EGG4TX5Rb4 aFEIBYrSjotknt49/RuQoW+HuOO8U7UulVUwWmwYae/1wow6/eOtVYZVoilil33p C+oaTFr3TwT0l0MRcwkTnyogrikDw09RF3vxiUvmtFkCUvCCwZNo7QsFJfv4qeEH a01d/zZsiowPgwgT+qu1kdDn0GIsoJi5P9DRzUx0JILHqtW1ePE6sdca8t+ON00k Cr5YZ1iA5NK5Fbw6K+FcRqSSduRCLYXAnI5GH1zWMki5TUdl+psvCnpdZK5wysGe tYfIbrVHXIlg7J3R4BrbMF4q3HwOppTHMrqsGyRVCCSjDwXjreugInV0CRzlapDs JNEVyrbt6Ild6ie7c1AJqTpibJ9lVYRVpG35Dni9RJy5Uk5m89uWnF9PCjCRCHOf 4UATY+qie6wlu0E8y43LcTvDi8ROXQQoCnys2ES8DmS+GKJ1uzG1l8jx3jF9BMAJ kyzZfSmPwuS2NUk8sftYQ8neJSgk4DOV4h7x5ghaBWYzseomy3uo3gD4IyuiO56K y7IYZnXSt2s8LfzhVcB5I4IZbSIvP/MAEkGMC09SV+dEcEJSQQKCAQEA/uJex1ef g+q4gb/C4/biPr+ZRFheVuHu49ES0DXxoxmTbosGRDPRFBLwtPxCLuzHXa1Du2Vc c0E12zLy8wNczv5bGAxynPo57twJCyeptFNFJkb+0uxRrCi+CZ56Qertg2jr460Q cg+TMYxauDleLzR7uwL6VnOhTSq3CVTA2TrQ+kjIHgVqmmpwgk5bPBRDj2EuqdyD dEQmt4z/0fFFBmW6iBcXS9y8Q1rCnAHKjDUEoXKyJYL85szupjUuerOt6iTIe7CJ pH0REwQO4djwM4Ju/PEGfBs+RqgNXoHmBMcFdf9RdogCuFit7lX0+LlRT/KJitan LaaFgY1TXTVkcwKCAQEA8HgZuPGVHQTMHCOfNesXxnCY9Dwqa9ZVukqDLMaZ0TVy PIqXhdNeVCWpP+VXWhj9JRLNuW8VWYMxk+poRmsZgbdwSbq30ljsGlfoupCpXfhd AIhUeRwLVl4XnaHW+MjAmY/rqO156/LvNbV5e0YsqObzynlTczmhhYwi48x1tdf0 iuCn8o3+Ikv8xM7MuMnv5QmGp2l8Q3BhwxLN1x4MXfbG+4BGsqavudIkt71RVbSb Sp7U4Khq3UEnCekrceRLQpJykRFu11/ntPsJ0Q+fLuvuRUMg/wsq8WTuVlwLrw46 hlRcq6S99jc9j2TbidxHyps6j8SDnEsEFHMHH8THUQKCAQAd03WN1CYZdL0UidEP hhNhjmAsDD814Yhn5k5SSQ22rUaAWApqrrmXpMPAGgjQnuqRfrX/VtQjtIzN0r91 Sn5wxnj4bnR3BB0FY4A3avPD4z6jRQmKuxavk7DxRTc/QXN7vipkYRscjdAGq0ru ZeAsm/Kipq2Oskc81XPHxsAua2CK+TtZr/6ShUQXK34noKNrQs8IF4LWdycksX46 Hgaawgq65CDYwsLRCuzc/qSqFYYuMlLAavyXMYH3tx9yQlZmoNlJCBaDRhNaa04m hZFOJcRBGx9MJI/8CqxN09uL0ZJFBZSNz0qqMc5gpnRdKqpmNZZ8xbOYdvUGfPg1 XwsbAoIBAGdH7iRU/mp8SP48/oC1/HwqmEcuIDo40JE2t6hflGkav3npPLMp2XXi xxK+egokeXWW4e0nHNBZXM3e+/JixY3FL+E65QDfWGjoIPkgcN3/clJsO3vY47Ww rAv0GtS3xKEwA1OGy7rfmIZE72xW84+HwmXQPltbAVjOm52jj1sO6eVMIFY5TlGE uYf+Gkez0+lXchItaEW+2v5h8S7XpRAmkcgrjDHnDcqNy19vXKOm8pvWJDBppZxq A05qa1J7byekprhP+H9gnbBJsimsv/3zL19oOZ/ROBx98S/+ULZbMh/H1BWUqFI7 36Da/L/1cJBAo6JkEPLr9VCjJwgqCEECggEBAI6+35Lf4jDwRPvZV7kE+FQuFp1G /tKxIJtPOZU3sbOVlsFsOoyEfV6+HbpeWxlWnrOnKRFOLoC3s5MVTjPglu1rC0ZX 4b0wMetvun5S1MGadB808rvu5EsEB1vznz1vOXV8oDdkdgBiiUcKewSeCrG1IrXy B9ux859S3JjELzeuNdz+xHqu2AqR22gtqN72tJUEQ95qLGZ8vo+ytY9MDVDqoSWJ 9pqHXFUVLmwHTM0/pciXN4Kx1IL9FZ3fjXgME0vdYpWYQkcvSKLsswXN+LnYcpoQ h33H/Kz4yji7jPN6Uk9wMyG7XGqpjYAuKCd6V3HEHUiGJZzho/VBgb3TVnw= -----END RSA PRIVATE KEY----- """ let sampleECPemKey = """ -----BEGIN EC PRIVATE KEY----- MHcCAQEEIMJZj2Qw9NGv83izxbgRr5xRvb0RHymOfl5hDJ/RPI2GoAoGCCqGSM49 AwEHoUQDQgAEc5zHoemKB93GfO9MA/vLYEiYMtV3UWDIV88M/TP59R0dKIuPS2Dw EeAoz1vgyHNpgE73eYX8NII6U11Xv8Lmgg== -----END EC PRIVATE KEY----- """ let samplePemRSAEncryptedKey = """ -----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-256-CBC,701BA8806DAD9F13E63F41109F51B2AD i00KcJzy1B9QkBUvzzhp0RSm53Df6QJlylyIODk/F2M/62nj2eCUzRlkiM1AB6ch CILcSKVwKi0h77j7e9Gh5U2JoJiiq4U2PCkU35MSToYz0fxPVvlDYnGfDSa7vxQl 5A41xZGC8b79rE6Kyffoi9I5g3Munvn6yTqDbpg5Zr6qEsjRz5V/EejkcIM+nidl ZtFmKYLqy8DMApprK2O40i96Bj+j7MISZGzhWvK4Sda+HMbj39vMimR1RwtFvuNJ JLoozb4Za6yNjZV8U3yhFtwLZJOVb0SIivsYk29KxOi85D0s3Gv0ldo4Yn6h6Gad HB5Oeb0rXobi09QywiBL7Mjo/wKiVqUSNi09zZ5iNIpnflZib/DT9Ee9sJWcDwzU PIf6dgwU5azm12USpYWdl0Rs1b9QwTllsSmuKRRmI0O2EiQmZjrH9T0DfOYSDSkq Rs3HRQtIXmURSOnP9DTrf4LMjMoAg/qYDF1jXVV7Qd63Fm57H1MTQq+OhFepXBuS zbG7OXylcd0EqL+yiGcUcLoUlfmP0kOtdwQqmcCVwkyCAdTqV4pzeKMyG94b9P4I 4w4Hew717e77PdqmtosRMhxlwtUPrawkIhgatG/jzGAVE9KUxSGkdPRFAbzE8Fpt KiEMEw1eydwzyOxGHRiEb4axxloryBje8jKokFwQMpqmwVnOc1ElX+XagEgVNB3f 6Ra5EhrIIaI3OfrkRJsW0PQRZ9FA+KpDEoEDA8i0Uh69HodPFBtGcUMbGJUQvABQ +fcm2h3fFhD4Jzf+EA8RJPaG4UavacYplZZr8EQ8KEEmlvCz6yuQt0s/N0dCd4p2 Pg+m37SV4d4suNZE9iVesmFzLSHEDuE0nIRRWak++QRPATLCjp6f78OPBJfbq3oU HPfQ6PW/q3qyR6KQ2ZMXWTaMg8G6w5x66C6ykxt/C5ljQ5rxYqCmK5BvGIoDOP3j F/UYJ6rs7sW9vFyws4p0TkvpPjnCeB35rCc+aj7Ddm7WJicW5zwlnpRuxHlSBAm4 ProoGHwtZsESv+CrnHz/ZfW2e2Mg5H1KKFibqAH81FQHGwmeVbIoksy5t00WSvLQ QbEaqHTl8XppfldenOVNbV1gXf8/MuUfc4/2EELrq5ACoLq5SJHPg+CSlAGkQCrm mEfBDmMOJoYG+POANzTHhZNkq53sp8ccFRLnBtOkFZ2+2FxHKQIrU4kECeGoB0OL 8wq6hRIJUYitZd2eYatm4EAaTmG8C5ZkX5Zgbfjm9S1Af6z93FFgeunFMbvrh5c4 lpIpKoEiwzmFwjMysKZPxi0BljbIRlICI0/FM3ZcB/MJCRkqCl4G+ktHYBLa4kfD C7yTIfRLnkCfloF9yA19ulne0HF67Mq6XBhAmNQFTLimwSM+D+QBcSxqFx2z2eSd pGRePIuxzf9uVqL7vi/LVNJftZsSbBj7L6PJSh/3sqUpxYqVuLvkgs9uqV5YIzig UrKjU1fUWnEJxKKi2CdNfKFJUpQQYmQdvGMiGhATZHIocQ1ceui0RrLrczZpNXMd 3piGo8YB9SPXLJ2pqzaTunz/iyUvwOqkjxhOsBt+zuLXgiJ5iP9jpnO9huqkJUJL YIQMaT4QvfhJBkpwujlt5fkW6lXDgDFqsoGyDhXMc8l0859Ucx4lT+IIIUKsB+ho zbpFWgNB+rS/i6TgKNlYO1WkPloVbNV+QQSLEtqVMerWnAnT4xMKwUEJOPrD2NWN N3iPNio0suvhgxAWCgFkN8qm5SnYZtC4f7gPEwLsd55APjvCiMxv1dyKt1nRoQrD CSWz3IvB4ZVZV3M4Ozcgn++I8ggsKfaeHxfO+I8g1NLcAQ8R4uXXjaQVjtmnT7TQ GHEG3kHvIcUhQHIaVu9Ph9pTAw/5BZEqBGhH2lnkb5h5GfqxUCRnDv/V7S2oh+kP OM1IFEEn6wfJxBE3rxBIcRPJmpLQoEulb5uhB0XooFcSJh7hf3DutCs4s3J3DYx4 QtXoZNg+m2gK8IX7/WwG96CF4cBNmHhmzcWZRGDa96tAJ71tVX2RP5i+YshG+7OH VR7KRdyzmt3pvbs0zAw8bsTb8BdslowEACalysHhGNJ8QxOsE+Js/ibAOEHfR+l7 KnmQenMrD29VrPsISxgRhcXh4/pu/GR8IFOkaMiz76zlb31UlzT24G8Go7YmWifD +3g/QCSZP1Fc7sOk59i+9kHXeuuDmDVIwBEBrTdXK1FVzHFqJSotLrQIzJgxCBv7 TGCn4g/Bzn7TIwvDH3cL2/VFMK850Hh4WLkPI35wrjr9H2El+MXsPqY2Lt8dn7kB 0WpDlVcYcfsHLmpB92zxvoSbw7dLyRyDBrGfXfX2E8qrE+0Z+YM5oZamaZf+uErv g96JWgvckRR1+gDJHbl6rShk2RaTmxfxWYSYf83ecyt3a95QxQcZpHNvO0oCt+vC w4qy3CnDfBPv2yXg/EczrUNGSk3f31aQjz8hOsNRt5HWpNthm//bQKkfM0ShgQLW B0ZFeum+EwV81OQzlvgc/Aoq4zfbKZvPSf8aGXoC4yTQN79ZONAlz2rP+ullJ23C mqJU331Szg8rzfmpmA1DVfb12r8QG2OrI4oDM4zwJK/U4fsV5o77ZNznkUYpZIu8 TKIpwvbkx9klES28Zvsl+N/k4yxMF4isfJjVM1DKM3ZgJqxM+AFWQSoC8PmMfUyi ElhvcfzCskSd2rNF3b41W7szP0iNX0jpKbzu/sEFvq2Lk4z8u0cLLvJqCVNLpNC6 lH/FLTiCVIw5e2lfAAhqjeQ0V7g0K0uxysZouivvloIsImzD2b9Yei641Acy8UT+ x3V/qf15oppCtr0okgvr4BZ7v9xLRCKols2xcncrMqNAVPU8xOVke55vlhRYidbl txA0rTk+zHy5jKGN3BHNqJPuyj2shRm7EUce86dWy9omnCk1cHOvqN1fVdq1emHj EX2GAkBeInoPpdn41Kq2X6jGh3NBGgovhnFDqu4ICAzCpalOjnZtb7y+SWdjSSoK lWixvr+CJKM5VDGtAMrGv+xZ/HNpdeghfPc+eCecC07KMSx82tomEHZirVRdcQXd E01IMuJH78wMnZcd2SpFSfrmBttWB+/Z91yL3fnrYsU7R/Gp6EEhRPtxEaOPqnHS -----END RSA PRIVATE KEY----- """ let samplePKCS8PemPrivateKey = """ -----BEGIN ENCRYPTED PRIVATE KEY----- MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIShGta1Mpj/QCAggA MBQGCCqGSIb3DQMHBAjQbLTPjvMqpwSCBMh8omeDIM0ceuoiEhaepFqbst/jUwYh m1pzLokTph0GS/81vmTDr9U7uI9rHiFbACRRMQBH/cCkZFUN2Jo3pJXA4q3RvGsh 4UIaWiP+SNkzKR54QcuWRzYoQs/YH8VickNp2per3zQ9R0Regx1ZaHSCk3cFRFy3 4sJtgoquwJYD2vUdQvhwcuF2Syl/VCpaQ0+KtfBqJ+4YLJPQcsL+OKLlaWFY0ivO 2oSVCg3QJrVbS8TDnrIgeL8MNhyVHQbuSyh2MlXKcjiKlJHdHXSlYSINgpUsc/Eg cTSgod0JXvjbExrtBx2mODwM5hzDkGpdub+TptXinQg3FQjUKhBh/+wrP0HoKBcn UFE1emd3n1s0MFN28uSN3OcX3833Lt4KAnxF4xaPfWEAk/2yuukiUqKU+K9cEhNX V1arxKq8RLB7n7o6YFt3xuVgAJYWDk6nyr/0I2LgFj2Jz/C2v+YBFYGUcQUKgHQw OLzzZnCrPj8JIP2cUqagZrW7JOoMsFCtroJptImaqhsm/4i3tyf2uoUWglZN8DVE WbNbnAr5KZSl9U1/sNuEesixIWd+RrJC/l0tNmScCvJifL9WrJnccOI83EAkmz/+ W8UpcPCscAmAdOcjFQl8T37xHGxwVcvh8LyaoacBqQCYiZzO/M6bA2YuBYVpkk4v DFXMmy2SaHGGhGHDmyn4uuzykGCOn1ZN92eT6PXZCmHz0/QCH6RIGx2cK5frfhUP icU30GnK1jRv8QFHVx9IZQpHbALRgSNMbtF8EqWmONUIs9wQIQtEMZ2AYwq8gKL2 9Cwk2SkqO0Y8dbE/lw+iBA37/NO7KiSLB/Mpq0/zX5SfBVcGZAVzGKiyeOW5sKcI pSOTTv5jLkoEnels2f0jsPM7aMjG+ys6wveL0tDhfKSbtjyC8Zw/eXpK9AHGW8Hr xM7hwTkQpznyt/NUIDmjrDHg7n6O9sp7KWduP1L9bYC/n5Dj2gnxHj6FFTpMqmm7 Q6GEj/dttmqvSYeG93heWqoS/j6j45dppoKG/3vU9UWODStcc3y66WJ2ULEY0/CF IiBd33GJgIKUJlrMGwUSAPxH2wklF3VwWFVXMnLbqpggaWlVxzVnvGjnzoHm3AW6 hWCMnvsP/pYVBMpaKKdPF6PCW1yQXjTbA67gxpGECoin2Bu/rp+t0GeVmgTcCS9a Y2Su4cpwCD1ngIrdodWhVVJSObApRdn3SDI2xOZUgZPVT52AtEMPQ3R5eoIOfLI6 CPC7cYl2JDmMkKGLaSom1zZpCoXtPTkxDAIpaG4ofT6pIDibCSywllL1KeeVw4WX Cr2b/BS5TZNFyPzdrMaN5og6hNkbyca73SyEADnJtHTQc6mi/Q93al4TI3RYaVpk KWwIW4kZE/p5pONeZDNNt7dKrgkjaTylNpM9jdnBL3hU5Fxr4I6a6+IBWQC03EwC o2zT+g6YmVkod050GMv0V60npTpbOpWIamzB+q3GMMkU9NNyw8xH7RkNS78eWLVv niWQmWlbkzLEf5PT264+c4w9IkE8aUKY2V8Ev2k1FXZcLdfw3G5yVzrjXoAwFUaY xnOAdO/QLMtD55Kn+jzV6dCXmyZQkBJAMLBF5xEX9DcnXCptZ2Asgvxa4EpO7jzX v5o= -----END ENCRYPTED PRIVATE KEY----- """ /// A CA that expired a while ago. let sampleExpiredCA = """ -----BEGIN CERTIFICATE----- MIIC5TCCAc2gAwIBAgIUTSNLkfg8YiYSq+fnrXP25txgCkUwDQYJKoZIhvcNAQEL BQAwIjEgMB4GA1UEAwwXYmFkQ2VydGlmaWNhdGVBdXRob3JpdHkwHhcNMTkwODA5 MTk1MTExWhcNMTkxMTE3MTk1MTExWjAiMSAwHgYDVQQDDBdiYWRDZXJ0aWZpY2F0 ZUF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKqWcbgw TFX14tKxUMIrla9Y0aLddlnnTDqsxtxJ7dSjE4+OBkVBslCq4WtjgaeubdHkTCtc GRVeOpXVcEyznGBGW5k/5gCkmaGPe8jI4+caavtXnoTdPU91ukYkZkBXzCgycVS8 kQxyPwvTDUOfHQ3VqUfc2LMTXQYU3vzyrPzq7XAWgZR9d5lOtB9tpGnxCRP8GOFO KHa3KroiRxJb2cReJsayJWx713pje5lPKtSKP0iYICR2kYgtP+8Y3wPzcLzPRM9u 6a0olO6PFFWdPNRtivObCr5Y3Cy0P8i2ZSyOO2c6cn0ksLmCe/qrRX9HKx7TrmEu 7Rs+ql6liiyrQ7ECAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0B AQsFAAOCAQEAWSCh35Fk5Td+8uV3oe+K+IPbTrhtNmrwC42sGw/mpQC56zNjlDt9 jBZVZbu5iAwO/nrtn+JpCSA3ADugjisQKQdELb/ogaCnIu2vY/fjHv7a9/tYoYc2 i/rtcXIQdhSrniZuVnKG1Keu5qohKIP1ne4TAxADTlzl3Dx7QH/32hUBlJFwYiDQ JIuZD9LM5Ic9jtrsfTN79tNPM3eHofWUdKyUk9fTrM7/28kSERLJJz/RcXDMP85z 5Y0zZar+qh+9A6kYy/xcaFVOX0bDsuArBA6d/n0skqJN8gylOvdsnpeJRrXxOSSE dcvafu1dqy0zZdFMSzymwRnprqgdFYC1xw== -----END CERTIFICATE----- """ /// An intermediate signed by the above CA. let sampleIntermediateCA = """ -----BEGIN CERTIFICATE----- MIIC3DCCAcSgAwIBAgIUDK9fkCTocM8Yu3csdNcm86ahG4IwDQYJKoZIhvcNAQEL BQAwIjEgMB4GA1UEAwwXYmFkQ2VydGlmaWNhdGVBdXRob3JpdHkwHhcNMTkxMTE2 MTk1MTExWhcNMzAwNjAyMTk1MTExWjAZMRcwFQYDVQQDDA5pbnRlcm1lZGlhdGVD QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALdj2KDFRR6Es/RpN+07 q4IiQMoLVcDu/CoxCJSteNuNShmScfyqG4e6AFDOKxjv2U2NHWmhVbBYN7b9jStf uZBpvz4/JY4+mVfGASL7mBkcsTLzNG+7rmQ0Oi271KL5WlDmw6DUMIFNvYSy0q9y MFS5qSYJh4JnXXtdxkGIjDmrWy1hCRzIGCpDZXvNjnhJDphgH3Ss+PR7wTJZXRiJ uoO4plWWl3JsRIRoyuL7K2CeWrR7CvIEThTF/D2P/7odf+CNz//46lC83b5eKdIA GD+RECQaA1YFygAbvEln+za5AjnH11Y310zvzAb1gCxGuxNaABNKhYLcDpDL/Mcd Il0CAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEA X4D5jVygEJyp6Ub/Yao9miF/vZW0bep00gOzHVJ8i6y1Qjn9ieVyrX9l6V8ZNwQU wrAkse99WoI94LT8QLWlAlDB7S0IS8IK7gkt+06pSbrhW5GJtEQJjug84DkOVqOm JSCupM2BEiHVQPYerF+sJ7I/4eENkafVn0zXSL9SEh9fPXBYJKiCYIxKWmGF3KOp KG5Y1W9sWz5NaatoL1kHFGDeuDWLwXJ8WZuNrtJNBe1iQ8yvuO1STRzjtq2iTDk3 TCYZoKnV3ui38BJn7libgUsN3lHD4yKdrw5LNeyjrYOZ5oFhe4QBQv0ZA+wUR+h7 1A4gDvFcIkbYSywqlirBQg== -----END CERTIFICATE----- """ /// The intermediate above, self-signed, as a root let sampleIntermediateAsRootCA = """ -----BEGIN CERTIFICATE----- MIIC0zCCAbugAwIBAgIUDK9fkCTocM8Yu3csdNcm86ahG4IwDQYJKoZIhvcNAQEL BQAwGTEXMBUGA1UEAwwOaW50ZXJtZWRpYXRlQ0EwHhcNMTkxMTE2MTk1MTExWhcN MzAwNjAyMTk1MTExWjAZMRcwFQYDVQQDDA5pbnRlcm1lZGlhdGVDQTCCASIwDQYJ KoZIhvcNAQEBBQADggEPADCCAQoCggEBALdj2KDFRR6Es/RpN+07q4IiQMoLVcDu /CoxCJSteNuNShmScfyqG4e6AFDOKxjv2U2NHWmhVbBYN7b9jStfuZBpvz4/JY4+ mVfGASL7mBkcsTLzNG+7rmQ0Oi271KL5WlDmw6DUMIFNvYSy0q9yMFS5qSYJh4Jn XXtdxkGIjDmrWy1hCRzIGCpDZXvNjnhJDphgH3Ss+PR7wTJZXRiJuoO4plWWl3Js RIRoyuL7K2CeWrR7CvIEThTF/D2P/7odf+CNz//46lC83b5eKdIAGD+RECQaA1YF ygAbvEln+za5AjnH11Y310zvzAb1gCxGuxNaABNKhYLcDpDL/McdIl0CAwEAAaMT MBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAWervzZUKKDEb O9nXiJckFEBmCOlQuQ6O6+hVyRLAugtPDUyesCUDqoLF2wmMKNRM322gJKaWShaM ueBrXHIx+ERXKJsgFic8b2m/v+VT16aAVPvQCLmZBpWR2ICqgNTpUzoDXqIZk/9l ZkJZMaS9kiQmEPeTDH2O8acO9TjqmQbdZa+q6kBWBnNzLPOu5ziEdKrh7rNzikUw qe0yKxavA5L8l8uWumGC8L6GE7ie7X8oMLwaLXFXt2TG9ZENrVQ0xcLSKTBAF2yL 4lqh2YnpZhntnCtv9Qvx81Asp2+6YfocAe9IKNIA534R2FgoZwt24SokDBhfg49d 2fV7ZO/cqQ== -----END CERTIFICATE----- """ /// A client signed by the intermediate. let sampleClientOfIntermediateCA = """ -----BEGIN CERTIFICATE----- MIIC4TCCAcmgAwIBAgIUFJCxfytdLl/FpvlUqwJbztiALjcwDQYJKoZIhvcNAQEL BQAwGTEXMBUGA1UEAwwOaW50ZXJtZWRpYXRlQ0EwHhcNMTkxMTE2MTk1MTExWhcN MzAwNTI4MTk1MTExWjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3 DQEBAQUAA4IBDwAwggEKAoIBAQDS9XBpPYlP3ToaYKmWaqhXd4lnLSvjReuknE9I UmvFBoPTyGRU2UNv8N9tFT3xMOX2DrGOn7eVqXBXOvKYRB8+q3CIsh3F/5smdNKQ PfsL2tFL4d2lvrZ+2GOr2yRtPm9nH0N2wrmJi6GtR1J+x2Uvm7EoHvk3Ujbo77fB HvFauvwA3GsFT10J+f5buPcNW0rdpo+ASMfMpfBMsr0Ucy1ys9XM/ehCMeWMiX/d d+fxqmOtl1tGyw4/Bbub5uf/HkiJStbKSCMgs7E4VgVhqFMu6jpeMlADXgDeOKEa rW+Ds8eb3TkdIlYE2nmwxvdOPeW3AgChkE5RCRYW0aALTwEbAgMBAAGjJjAkMAwG A1UdEwEB/wQCMAAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUA A4IBAQBarG3HrdOULNMGfY/UrSoc2qCQoK33SxM43ecFSXsDbPXLOZHp9iQmib1f uKy2m4VVkxtxYrQ2i7bueqgRt91rM7hHR8+uopj/BdNYFZfIik+VNFoyKJeATYcx FRjjAAoMpVYdAJXvtckNix8mlAdan5VNL1AsHYum25BjClQEy+kHM1i3bDLOIiDB dKMwvI/1ZnUgrMFnAvK8U8WxbxVxij8IeloW+YgjOYXqzjCysVh3L7HkI3AOi6yw eMNi5idG30y1NnTJWTSWzwR4UcoeLFdzMAmAxo5IVJBYnngcLTEkfofGFC9k2ODI XANkLW5BKAnSmOQUBrExL4yAj5jt -----END CERTIFICATE----- """ /// The key for the above cert. let sampleKeyForCertificateOfClientOfIntermediateCA = """ -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDS9XBpPYlP3Toa YKmWaqhXd4lnLSvjReuknE9IUmvFBoPTyGRU2UNv8N9tFT3xMOX2DrGOn7eVqXBX OvKYRB8+q3CIsh3F/5smdNKQPfsL2tFL4d2lvrZ+2GOr2yRtPm9nH0N2wrmJi6Gt R1J+x2Uvm7EoHvk3Ujbo77fBHvFauvwA3GsFT10J+f5buPcNW0rdpo+ASMfMpfBM sr0Ucy1ys9XM/ehCMeWMiX/dd+fxqmOtl1tGyw4/Bbub5uf/HkiJStbKSCMgs7E4 VgVhqFMu6jpeMlADXgDeOKEarW+Ds8eb3TkdIlYE2nmwxvdOPeW3AgChkE5RCRYW 0aALTwEbAgMBAAECggEBAIzYFxv8XK+4iPFRdggZ35i+EzuSegm8Be6Z+YjUlmUt y1fbI7lOcOrMy669juR3/CCCgOMzGVPPk1R547vrR10FAxYQrTYjSIetWWO6LeEl T7U08FGXeapIeIslvTU+iQw1YEprCYqecewJgTdpktHtRaL+wu6/ci+k1G8YZJVo qPmkSJigrEppm8ciXjvae+89jgUSEUmumI7A+LwiD2qr1GjGMg01TvKJ3jVrU0yq cGP58zAY/W1DcenJm26bpirE82Wnesosv3hQf2LBMGBMyVp6ErNzITSNN1fUSfyB 231DlGDor9oopfGfk9ApDUUVNXfFUv6ODnCSGBcdUkkCgYEA8snNvwok8IjbXzeG zdDVUCVLX/o/vrFQg0KmktTArklLe7vAgcbmCp5TbdZKnpHam2KNu6ucgla5ZchV 5vHbAdAhhvZFnYEaDPlpvueVT2jLWZHvsld17vfy7PVpZBwJSa2SQL4aC5sk+Bsn 5LbSE4OL2o0KLQr6+BOAa9soVw8CgYEA3nA6u4Pxdhlf4UGo1fMWFbeXvU6myBs2 JXiAPEM/9wKiGS3LOseqBzLBAoiWND9J7ynDJ+w5uuezwJP6MZImj+J0kbXEm0vy 3iUBGBQvj1FJLN+wJx1QEzZBa+rslqX7vE+YsByJwfffqonGwXpj94Qxf6HMMDea fRuHxqAjVTUCgYBsXe7bymdahXuFMH+W9hOARmUyXbx+HR7Wt7Up7JRkNorem5r9 Ug3zx19tsyxzQp7UpFSm455j/tmZuKW/A0zBrmiImPvRpYI/MEQm1a8rVpcNT7ox XCBjnYBsi82SxYDPxg11oGR3sbP6mgRgbcmutBSEZFeaa0BB4lJ70cJbuQKBgQDE a1gBo3ZB8hAvafp7yqby0GbmnKA7zYOXvPuHu16tcR7QmxZ9tjgXGSNEaHYydryD u14AT+F+gQHCiSkCQutYXQDQdjDBbWRt80EvEQwaQw4Z2QDE2WaPQHaupAj80l8j nynWQa0HoilYf0cKLFhABfRrnuUeossBtKDFrTzmDQKBgH2uBQ2v0hV3EW7u2wdy y7V9lkY+GDm51P1GWAH5c0BBZp3iAW1IBNzbUB8wXVJmhYPWO5Mh7wCAnr18HEZz OjJVhqRxwhY4NEUsyI86Xxb7rV23HAM6laDItQ/bPlR+b7py5GWCH/DRhhZjHuta yVOAYA18BnJi7O7Cwd6krmQd -----END PRIVATE KEY----- """ let sampleDerCertSPKI = Array( Data( base64Encoded: """ 'MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA72vMk44TxyawioAkwKscPGQEAyBmEywnzrcyda1XPkkgkroIoj827bq0TOiQfLcQj3nrlHCKS9q6F4yxZpMUDg8P8/6zUO3FEIYhwrnH5KhM3sOuflm56gk1lF9Ni436DDKGmvOtmtiNem8RU9i5Ih5SorlcatPIUDa4aP3Bjd/0gXit3slPR7BFvVTw5xHvFBtcoaORjiIkTVPJ+YiwFlugpob9phr0pudbJOQOyxoQtZQgquPDC4+BWBK+UBiajXYyWwYTOQ73PvyO8WaFQ6xoSV0zxRrYCE4pbShJiBq5W6uj8Cn+jnOfw8MgNqmX2D7m+M3Yu5ypfZr262gW/FIpdPg29+kv18m/N0E0wzjGmq0ciRXR+SIxtaQRbYUWU5cqcglXzBPhr2J264gMyDw7uhz6C32kb0wdNa2FebhLb0MBFOp9njxtbvvPxGsRRFXTejTlv+3C4AlJHLI4JSLrXIyjdg3K15uNjZbc/Gqmr5iZPmqGoFkMuVMsym24V0hK/pJCCDsmgUcYxZFdRq8yP+yfmpkPilDJlOgcnZW2Dlb8wjWkrBzegoPozf/sZ/fmv3ZX+jHpdPv5+cSwAfwC1CJkmQRu/MnmWlSKFIhpXdr9L6OeTsvDxOm+O2l+M6D7AFuVzfw5r22TZZ1fBgNue9t4vbpabIfBBNQGLWMCAwEAAQ==' """, options: .ignoreUnknownCharacters )! ) // Custom Root for the certificates below. // For example the following two certificates were issued from customCARoot: // 1. leafCertificateForTLSIssuedFromCustomCARoot (Used for TLS) // 2. leafCertificateForClientAuthenticationIssuedFromCustomCARoot (Used for client authentication) // The client authentication certificate contains the Extension for Client Authentication. // Which is required for testing with the CertificateVerification case of .fullVerification. // // The certs from the custom root expire once a year, so here are the instructions // for when they expire again around August 14, 2026: // // 1. New custom CA: // - openssl genpkey -algorithm RSA -out ca_key.pem // - openssl req -x509 -new -key ca_key.pem -sha256 -days 1024 -out ca.pem -subj "/CN=ca" // // 2. New server cert: // - openssl genpkey -algorithm RSA -out server_key.pem // - openssl req -new -key server_key.pem -out server.csr -subj "/CN=localhost" // - openssl x509 -req -in server.csr -CA ca.pem -CAkey ca_key.pem -CAcreateserial -out server.pem -days 365 -sha256 // // 3. New client cert: // - openssl genpkey -algorithm RSA -out client_key.pem // - now create a file called client_ext.cnf with the contents: // ``` // [ v3_req ] // # Extensions for client authentication // extendedKeyUsage = clientAuth // ``` // - openssl req -new -key client_key.pem -out client.csr -subj "/CN=localhost" // - openssl x509 -req -in client.csr -CA ca.pem -CAkey ca_key.pem -CAcreateserial -out client.pem -days 365 -sha256 -extfile client_ext.cnf -extensions v3_req // // Then, copy the contents of the files into the literal strings below. let customCARoot = """ -----BEGIN CERTIFICATE----- MIICljCCAX4CCQDV3NUC6QWiyDANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJj YTAeFw0yNTA4MTQxMjM5NTZaFw0yODA2MDMxMjM5NTZaMA0xCzAJBgNVBAMMAmNh MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA9MqFE9SHDnw3Cw5hOQuP gycdKyw3njytnfrRsDSRDEZDplitFmbm4DckrFwfG2xo9WXkiUZhR8JFiqnuc7gc Q0vtmEipoJA21t/nWtL9z0OHx8ngYTFBA72s7UocLw/5+y27CsuoamCR8br1Opxy JrPBihUzJzTJJ8gSPvzFyyg0dnoGswe+68GawPJmgmAzae7Yc/dqEeFYDUpb743P C6uirnw8rE/eLH6doLXoXGHhC0K8thfrny15n20ozMag7FDF0UdWpsfbhX6BINTf 5sR3teCPz+QZ8D4zkoSqf1Oeif5LsmKdtBuE8w+kgRZs+i/WwLSm70DpIfNeuu9r 7wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDd06g6Tk1lTg4IVUKNZ86P2HXDK0V3 WFngU2Vsh5P+s7DHe+VFV6qE8AIs3E0OIYbmRPOm2ZRMTASVevTt+yLFs95txmid tjmQwD52QN3ivUTQTWvpaM8yJcji2qvPVn291ZrIGBsRF/stMlkSHDwhP+p+TQa8 gv9LcWiTR40x/4eyC61fe8elS6vVBENJlXk91SyFSzpTnW3BElUZteaofiU26kXA SHbJxyRhp5xFVpxYUKGjVl0H7sHQn0NDN3wpqy4kBX8dPCEVe3IlBYG8xE/TfRAr 4bqP3ub5/HAabeyheEpljPehFVC65OIGHzfHBluqeEAvIm7vXaWGbwOI -----END CERTIFICATE----- """ let leafCertificateForTLSIssuedFromCustomCARoot = """ -----BEGIN CERTIFICATE----- MIICnTCCAYUCCQDDQzp6nuhApzANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJj YTAeFw0yNTA4MTQxMjM5NTZaFw0yNjA4MTQxMjM5NTZaMBQxEjAQBgNVBAMMCWxv Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALLJKEReHVQT ltlOLEyn1Rgr+WwgzoBuLREUcdrvDFfe/75ClnadOkYcIcXNakvUiTQMHyEUXZ6Z dKI+98Igmcwn9xpd7Jab8S+Z+AXzVg88xMdEWC4rufITG9CSGFBKQdolv8DEGY+I qQMCHzBDi7oMGmmXOugIPqMUgvCYAJ/bncn6bfeWIBjXxtRxJ8Jj6++3G6IvT4gx g/zIdWAf2snPQItZEm6cZZMV9bwjIgxPdxK/GEAzG8rsV7m0zPpgvDS+2waf/NXw uItAcJWUm/ylQCZ7fUv11T9LwfWpItNu7GuLzD5NI1mpNLAbm1W8FWr98GtPzTiV 4GCglAomjokCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAoTejnBrluy6NK+5425an yPS51CzRK+o8hBg2YQ6jzioexSeRXW/ivBkKB7j+iMNkVuRpzzLTA3Wz5+OvJQyI itGpVhLngaAaTBBBhga/cPejaBZKNCRTeXXSe/nMSAJhjO0XaZcyUESDq2rH+m81 LUrqfjNOZW8w7zustKJq+QqY+jEiRAzmbL1hPoDlasromZ1+6TsOM71AgAyLoKgA Utj/VMlOKzAUyH+z/fPzXDM9nslfLqMnhMcD9vIWi0noypYoyrcaIATkhCjNRGXH PvWooCpurD7+JL7imZfT8x+6lp1XI86pC6FG+JTcObBRqvdLpQRMY4chE8G/FS18 qA== -----END CERTIFICATE----- """ let privateKeyForLeafCertificate = """ -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyyShEXh1UE5bZ TixMp9UYK/lsIM6Abi0RFHHa7wxX3v++QpZ2nTpGHCHFzWpL1Ik0DB8hFF2emXSi PvfCIJnMJ/caXeyWm/EvmfgF81YPPMTHRFguK7nyExvQkhhQSkHaJb/AxBmPiKkD Ah8wQ4u6DBpplzroCD6jFILwmACf253J+m33liAY18bUcSfCY+vvtxuiL0+IMYP8 yHVgH9rJz0CLWRJunGWTFfW8IyIMT3cSvxhAMxvK7Fe5tMz6YLw0vtsGn/zV8LiL QHCVlJv8pUAme31L9dU/S8H1qSLTbuxri8w+TSNZqTSwG5tVvBVq/fBrT804leBg oJQKJo6JAgMBAAECggEABQtJ2IvzNeELm3vqIguGJpVvBw7x5Iu3N8kk4TFnXr9K 5dpJFnWfJEU86rC98/++EzrYUf2aGpRnxwARy2dSD4F9JkBKIYGqz1X/umNAJVPo lVqnRj4zk9HYMg09JF7D9tyjyVN/CR6o7g3MRXdSZOBcimga4FsDMWStwQ34zom+ mnwiau/yxOt2GReRmw3ioOlorUbtJ64uU/yYjHgplfM/BMq2TCqx15wgwDTLNyLs Xidh8ksG2qBbESEcvGSisarz2CXkDaaAVYLjQPsE2z6tzLYX+S1nSnbNUM1H9QZF ipJ9AB7g+f9vRQvAYzrvLIIa2SIDQUTRT/XLw59+UQKBgQDW9miB+oEMANvZw6lq p7PlI9m6l1/MFvhTC5upXnhchmVGiuSTpSDazgUHRgy1wHBRk1PwIPsTaURZEUAs nxe4+3/Ft3FqfITNngnKNS/OCCiXQ7ysDbvLLZ2ADbOohZ8Lb2/hNm7lOain4CdE 2qC1Vkb/9vWmm3dhM91Pb/bJFQKBgQDU6rpdGE6TxAytKXiV1fJjuf/Fvb9BLXkn x+0sOO/liKnyPj4SHjePb14jcU4F2cRa7hPHY1dw9i//j17oa7Wvsb/pi06qOg3o /I0Txdea1EqsBCCb5qwPWT+GfspQT3EZN+qwGpN26GzsAGj+bVsCAonkg71eO6NU 1sSw0JAkpQKBgBbXkDtflx7jaHk3ZWVD9MXAjX5aX3+cYT7R2PSiaT/LuC9Kywc1 YMxfYAFp3CfkDwtcEGtP1d42LWEZiCw1q5uofedQmuip2qLOzFOEW1QVYdrRA9d0 jiQE8NuOmSyrJj9c1BKmahpJijZshz+1y6X5SQoh//B4TLMzg6zRRPQRAoGBAK2R QCU9+GhrDG5o/T0gML1tVf0r5mpKmJZ+W3COZbn3A5tPdCgu69oIznQUHKeWU4RQ ylzjNdgHSS+K/7J2g6DbRPgssQ8Bzm8c2iDBSjaUUt8RakfM7nyAo9GPMHvxluAY /j9bGtV3ObvVxcGLAgKMcT6QymG0Ojyh66u8CZVlAoGARoGc28exX64y1+E4QA56 29cu7tN8YIdegV+qIN5OVzWWDco5BxAGFUrjlP0i5S8STBncdchhbu/97xplmREC OW0ct2fwce8+OTFzM9TXpzsLToliL2MLfM1H2bM1lY1xA4QtQudY/Xh+5YyotmNI fhsXm4WK1YTrkD3bSxNpzUE= -----END PRIVATE KEY----- """ let leafCertificateForClientAuthenticationIssuedFromCustomCARoot = """ -----BEGIN CERTIFICATE----- MIICuzCCAaOgAwIBAgIJAMNDOnqe6ECoMA0GCSqGSIb3DQEBCwUAMA0xCzAJBgNV BAMMAmNhMB4XDTI1MDgxNDEyMzk1NloXDTI2MDgxNDEyMzk1NlowFDESMBAGA1UE AwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyGvt 9RAdndCEsZVMUvRoQ0vusQikqozTC/s5O6o6JK9M/u3NvSjywBNHtORqbuPFR3Ct 9X/kdoH9sPAQdCzTmDBdzktV7I2M2t94NcqsLw3k7904XtSwjXtITF7wR2zgZLjZ pXGjgg5ajpifLeOIT6NJo2q8qnuPf6E21dLk6jt3Mv76opfO7CoVNwEuSEEa/RWi IKjsjaCJ1rDaUEd9GTBXF3UoQCK1sYFRpIuZpaizAZxOO6emxqwF1OJDgoAXMrHw e7nWc28ntOSI3W3bkQx0oVS02uHiEvMxF4HDZes2d0kR+2SiOfivTyZQsAVA7N0A Z5qlfllFe9+5Nn/hiQIDAQABoxcwFTATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkq hkiG9w0BAQsFAAOCAQEA0blksL3YrmTnfPeh342jCnNUK9fg6cVXV+W6jccHJ7/g en5t+50VJ4R4NvEhCdx87mrPbozWfpPzE9OifeM+qrljXitajZtblGe2Xv0j3pWP Lx6ulfPSVY2Ss6Yr5O6aTovLR3QHHuud+Bw//J3s1DNpVphbB6GmLSBDx+UHf7wd p4FjrGJj0JrUDzX28s2v/SNhph9AhEgYu9xStrJBn38cao/Ww5rjhQkNATghLlmA 4ljvGb5PQcKo16gkW99gbTziyPIJ97m1+7KGIJGSIixmwZK1NIlN8pf3rJuAak4K Un7Y0TKn8oackbntmk7NYlaL6u3m4JzZp2nSCJ3QTA== -----END CERTIFICATE----- """ let privateKeyForClientAuthentication = """ -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDIa+31EB2d0ISx lUxS9GhDS+6xCKSqjNML+zk7qjokr0z+7c29KPLAE0e05Gpu48VHcK31f+R2gf2w 8BB0LNOYMF3OS1XsjYza33g1yqwvDeTv3The1LCNe0hMXvBHbOBkuNmlcaOCDlqO mJ8t44hPo0mjaryqe49/oTbV0uTqO3cy/vqil87sKhU3AS5IQRr9FaIgqOyNoInW sNpQR30ZMFcXdShAIrWxgVGki5mlqLMBnE47p6bGrAXU4kOCgBcysfB7udZzbye0 5IjdbduRDHShVLTa4eIS8zEXgcNl6zZ3SRH7ZKI5+K9PJlCwBUDs3QBnmqV+WUV7 37k2f+GJAgMBAAECggEAQhPLbVt12D0SMpY9hrAL2/wh4v4thAlP34hhUzmJV+Tv 5rCyfyYL+qWgo5QXPx4bQbV1tRYIVcX/xSEw24yX6novwz71Qjtc8CBzOpDqec0D 6M0vs5w95Td7G6rFX1cXGD4Vi8VOmidvVcod2PxGSbNVKOqc7zwzkGmvcYnJbSu+ XRThz03SqY6s0Jp0lKWvewTuPG6YzOlCeKlMGbxShE5zhEcGdjhND0J+Q6EUynOc 9tINUakrnwIo80WaPeXIk1/eCneCJ+STdTeCIfsY/X564yHwkIrzQ4So+anZ+pCW lZtZnsy3DdpAoP0vDUwbcsJUNtoLDFc3JiU/e/z7nQKBgQDsoHcjero1mNufZlp5 vCGuGblMB5cOceFB0X/oTdkqLwt1+5i/ytOIFI2ZAxzJvVa/wMbsExb5XbeD66l/ pzFj3EFymafv1Mc7k+EqBI13OBUR9udBa/CpYwZwYuxiuLPERo9Cn3UniRKU0IQb MaQFAkOapS9fOwVrd8DPz8dhswKBgQDY1KC18Yv9fvBPvnwoKLb7zWbFeg9CH4/B IbA7OtWWkjhoLew6bQGzYeIVzUpElJXetCNgkwB71fWcHRwHDXvDx+QHU/JOgp1o tpg/nCoiZPyL79vjy1Cr2y7MsRckLP2dOFI4WrwerzmB/vxA3LAoG60i+FYXZkm0 NG6nOjC50wKBgGw0oOZ/i8FQqjXFJ2B9sGUd7EchPWlkmB5x/+yqFMGei74jFGG4 DW0wAORUsQhr5cyACjcQL7ROr8nKrVLrkMFaii8upsYcZhMPd6qwNEStR61UW8Hl 60J6PwqLog8u6T27Cm3r3zX6D54vkAmjdJ65v1JrcTM6GStgsrIVENbTAoGAA6p0 nR7cUwjWX0LFLpihn1g1qJkLsP5/m7BKHnY8LjOCqKA+Ii69nJ7HB79UxhwM/Jrn DjbuByny4RTM6IGd2g2DGWyd6B3lM2QC5vBo9fPnISaI/SzuzDkEbYmA7qekEghl u3YtQAeOXVhGQ4J3p/Xv02uHaRXdoSJRzJn7QOkCgYEAxvn8bj0CCczRrFBHALYV /4p1VRq3xIKFwDOUP42Xu1Bj5ETuVeEsnTN40VwTlvZRv1Gr6zImqcIgs03QPR2F +nCAt3FPsQfM+CFO3gxiir9ycLmij8sEwfqr68nQLQ+zzwEgrMU50h6IZRUGk2BJ /0/E9r/0VwPQn5CZSt7NQvQ= -----END PRIVATE KEY----- """ // This is a root certificate used to setup and test sending CA names to // a client during client authentication. // This certificate is used to test having multiple root certificates in a directory. let secondaryRootCertificateForClientAuthentication = """ -----BEGIN CERTIFICATE----- MIIC5TCCAc2gAwIBAgIUDxjYloPbo7PQteeQLKW499sRxm0wDQYJKoZIhvcNAQEL BQAwGjEYMBYGA1UEAwwPU2FtcGxlUHJvamVjdENBMB4XDTIyMDgwOTE4NDQyMloX DTIzMDgxMTE4NDQyMlowGjEYMBYGA1UEAwwPU2FtcGxlUHJvamVjdENBMIIBIjAN BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5Q19OwSP4UJo35NZ0rg/+bAbBh+v n7lilsPwLwhmhkwWZVSTPQr8bk5ceUGJtPups3w0d1oM/t7oC43O38sFwkCYL5nt Z6YuQfP0ZijDjO6WiQr+gwyaAZt84/Rm1MHYqF1gBCDFQhcba3CTSd4HQzls+uRF EaWqu4n706e10ed9Se1uAqeufYRdGPijskFNYmw+MgXWFC/WrY/TXRIoIQsj/g8A jC66Ovriz+nXWYjPBSLdyXY69WVR5v6qksMeuJAYv37nsWL1H9436Q3WhxlLZ3Hl v3SI13Kk6y7Sp5TYDeomeMi+9aAHOtvZfZcBEw5yLCkJSXGQL3nIpk7oDQIDAQAB oyMwITAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwICpDANBgkqhkiG9w0B AQsFAAOCAQEAx+Ajc4SnzSS/1BpK+bVK2y0vH6NmF9Y9xjAi06pAWOtNpXBTH8Qe QdQbB/00nUDccEcIoEn46WDKwW4ebGKa4sn2BAalM0W2UoPMX0UYtUDPyNkeK8Q+ MQVOaZX295g9t6sfQ/rbRQRGJHFH7VsRQPGHo/vYG91+ZS6judUUZw7Mcltaay2y ljU3QeOeO3m553tfw/MwY6UWiSs9jyZumtzxL3WS/LCssxwnknkE5IM2CA8IzBfM VShvzuAwd3a5ZTju3jD1cK0mwlbEYNw0xj+wjBLqwFuJI/CnQzGSElvQy0v2ygjr R6S+ZRBlGxAnjKbTEMg53A+0XkGg/Kgexg== -----END CERTIFICATE----- """ let samplePemCerts = "\(samplePemCert)\n\(samplePemCert)" let sampleDerCert = pemToDer(samplePemCert) let sampleDerKey = pemToDer(samplePemKey) let sampleECDerKey = pemToDer(sampleECPemKey) // No DER version of the private key becuase encrypted DERs aren't real. func pemToDer(_ pem: String) -> Data { var lines = [String]() // This is very inefficient, but it doesn't really matter because this // code is run very infrequently and only in testing. Blame the inefficiency // on Linux Foundation, which currently lacks String.enumerateLines. let originalLines = pem.split(separator: "\n") for line in originalLines { let line = String(line) if !line.hasPrefix("-----") { lines.append(line) } } let encodedData = lines.joined(separator: "") return Data(base64Encoded: encodedData)! } // This function generates a random number suitable for use in an X509 // serial field. This needs to be a positive number less than 2^159 // (such that it will fit into 20 ASN.1 bytes). // This also needs to be portable across operating systems, and the easiest // way to do that is to use either getentropy() or read from urandom. Sadly // we need to support old Linuxes which may not possess getentropy as a syscall // (and definitely don't support it in glibc), so we need to read from urandom. // In the future we should just use getentropy and be happy. func randomSerialNumber() -> ASN1_INTEGER { let bytesToRead = 20 let fd = open("/dev/urandom", O_RDONLY) precondition(fd != -1) defer { close(fd) } var readBytes = Array.init(repeating: UInt8(0), count: bytesToRead) let readCount = readBytes.withUnsafeMutableBytes { read(fd, $0.baseAddress, bytesToRead) } precondition(readCount == bytesToRead) // Our 20-byte number needs to be converted into an integer. This is // too big for Swift's numbers, but BoringSSL can handle it fine. let bn = CNIOBoringSSL_BN_new() defer { CNIOBoringSSL_BN_free(bn) } _ = readBytes.withUnsafeBufferPointer { CNIOBoringSSL_BN_bin2bn($0.baseAddress, $0.count, bn) } // We want to bitshift this right by 1 bit to ensure it's smaller than // 2^159. CNIOBoringSSL_BN_rshift1(bn, bn) // Now we can turn this into our ASN1_INTEGER. var asn1int = ASN1_INTEGER() CNIOBoringSSL_BN_to_ASN1_INTEGER(bn, &asn1int) return asn1int } func generateRSAPrivateKey() -> OpaquePointer { let exponent = CNIOBoringSSL_BN_new() defer { CNIOBoringSSL_BN_free(exponent) } CNIOBoringSSL_BN_set_u64(exponent, 0x10001) let rsa = CNIOBoringSSL_RSA_new()! let generateRC = CNIOBoringSSL_RSA_generate_key_ex(rsa, CInt(2048), exponent, nil) precondition(generateRC == 1) let pkey = CNIOBoringSSL_EVP_PKEY_new()! let assignRC = CNIOBoringSSL_EVP_PKEY_assign_RSA(pkey, rsa) precondition(assignRC == 1) return pkey } func generateECPrivateKey(curveNID: CInt = NID_X9_62_prime256v1) -> OpaquePointer { let ctx = CNIOBoringSSL_EVP_PKEY_CTX_new_id(EVP_PKEY_EC, nil)! defer { CNIOBoringSSL_EVP_PKEY_CTX_free(ctx) } precondition(CNIOBoringSSL_EVP_PKEY_keygen_init(ctx) == 1) precondition(CNIOBoringSSL_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, curveNID) == 1) var pkey: OpaquePointer? = nil precondition(CNIOBoringSSL_EVP_PKEY_keygen(ctx, &pkey) == 1) return pkey! } func addExtension(x509: OpaquePointer, nid: CInt, value: String) { var extensionContext = X509V3_CTX() CNIOBoringSSL_X509V3_set_ctx(&extensionContext, x509, x509, nil, nil, 0) let ext = value.withCString { (pointer) in CNIOBoringSSL_X509V3_EXT_nconf_nid(nil, &extensionContext, nid, UnsafeMutablePointer(mutating: pointer)) }! CNIOBoringSSL_X509_add_ext(x509, ext, -1) CNIOBoringSSL_X509_EXTENSION_free(ext) } func generateSelfSignedCert( keygenFunction: () -> OpaquePointer = generateRSAPrivateKey ) -> (NIOSSLCertificate, NIOSSLPrivateKey) { let pkey = keygenFunction() let x = CNIOBoringSSL_X509_new()! CNIOBoringSSL_X509_set_version(x, 2) // NB: X509_set_serialNumber uses an internal copy of the ASN1_INTEGER, so this is // safe, there will be no use-after-free. var serial = randomSerialNumber() CNIOBoringSSL_X509_set_serialNumber(x, &serial) let notBefore = CNIOBoringSSL_ASN1_TIME_new()! var now = time(nil) CNIOBoringSSL_ASN1_TIME_set(notBefore, now) CNIOBoringSSL_X509_set_notBefore(x, notBefore) CNIOBoringSSL_ASN1_TIME_free(notBefore) now += 60 * 60 // Give ourselves an hour let notAfter = CNIOBoringSSL_ASN1_TIME_new()! CNIOBoringSSL_ASN1_TIME_set(notAfter, now) CNIOBoringSSL_X509_set_notAfter(x, notAfter) CNIOBoringSSL_ASN1_TIME_free(notAfter) CNIOBoringSSL_X509_set_pubkey(x, pkey) let commonName = "localhost" let name = CNIOBoringSSL_X509_get_subject_name(x) commonName.withCString { (pointer: UnsafePointer) -> Void in pointer.withMemoryRebound(to: UInt8.self, capacity: commonName.lengthOfBytes(using: .utf8)) { (pointer: UnsafePointer) -> Void in CNIOBoringSSL_X509_NAME_add_entry_by_NID( name, NID_commonName, MBSTRING_UTF8, UnsafeMutablePointer(mutating: pointer), ossl_ssize_t(commonName.lengthOfBytes(using: .utf8)), -1, 0 ) } } CNIOBoringSSL_X509_set_issuer_name(x, name) addExtension(x509: x, nid: NID_basic_constraints, value: "critical,CA:FALSE") addExtension(x509: x, nid: NID_subject_key_identifier, value: "hash") addExtension(x509: x, nid: NID_subject_alt_name, value: "DNS:localhost") addExtension(x509: x, nid: NID_ext_key_usage, value: "critical,serverAuth,clientAuth") CNIOBoringSSL_X509_sign(x, pkey, CNIOBoringSSL_EVP_sha256()) return ( NIOSSLCertificate.fromUnsafePointer(takingOwnership: x), NIOSSLPrivateKey.fromUnsafePointer(takingOwnership: pkey) ) } final class BackToBackEmbeddedChannel { private(set) var client: EmbeddedChannel private(set) var server: EmbeddedChannel private(set) var loop: EmbeddedEventLoop init() { self.loop = EmbeddedEventLoop() self.client = EmbeddedChannel(loop: self.loop) self.server = EmbeddedChannel(loop: self.loop) } func run() { self.loop.run() } func connectInMemory() throws { let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) let connectFuture = self.client.connect(to: addr) self.server.pipeline.fireChannelActive() try self.interactInMemory() try connectFuture.wait() } func interactInMemory() throws { var workToDo = true while workToDo { workToDo = false self.loop.run() let clientDatum = try self.client.readOutbound(as: IOData.self) let serverDatum = try self.server.readOutbound(as: IOData.self) // Reads may trigger errors. The write case is automatic. try self.client.throwIfErrorCaught() try self.server.throwIfErrorCaught() if let clientMsg = clientDatum { try self.server.writeInbound(clientMsg) workToDo = true } if let serverMsg = serverDatum { try self.client.writeInbound(serverMsg) workToDo = true } } } } ================================================ FILE: Tests/NIOSSLTests/ObjectIdentifierTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL import XCTest @testable import NIOSSL private final class OIDMemoryOwner { var reference: OpaquePointer! public init?(_ string: String) { let result = string.withCString { string in CNIOBoringSSL_OBJ_txt2obj(string, 1) } guard let reference = result else { return nil } self.reference = reference } deinit { CNIOBoringSSL_ASN1_OBJECT_free(self.reference) } } final class ObjectIdentifierTests: XCTestCase { func testEquatable() { XCTAssertEqual( NIOSSLObjectIdentifier("1.1"), NIOSSLObjectIdentifier("1.1") ) XCTAssertEqual(NIOSSLObjectIdentifier("1.2"), NIOSSLObjectIdentifier("1.2")) XCTAssertEqual(NIOSSLObjectIdentifier("1.2.3"), NIOSSLObjectIdentifier("1.2.3")) XCTAssertNotEqual(NIOSSLObjectIdentifier("1"), NIOSSLObjectIdentifier("1.2")) XCTAssertNotEqual(NIOSSLObjectIdentifier("1.2"), NIOSSLObjectIdentifier("1.2.3")) } func testHashable() { XCTAssertEqual( Set([ NIOSSLObjectIdentifier("1.1") ]), Set([ NIOSSLObjectIdentifier("1.1") ]) ) XCTAssertEqual( Set([ NIOSSLObjectIdentifier("1.1"), NIOSSLObjectIdentifier("1.2"), ]), Set([ NIOSSLObjectIdentifier("1.2"), NIOSSLObjectIdentifier("1.1"), ]) ) } func testCustomStringConvertible() { XCTAssertEqual(NIOSSLObjectIdentifier("1.1")?.description, "1.1") XCTAssertEqual(NIOSSLObjectIdentifier("1.2")?.description, "1.2") XCTAssertEqual(NIOSSLObjectIdentifier("1.2.3")?.description, "1.2.3") XCTAssertEqual(NIOSSLObjectIdentifier("1.2.3.4")?.description, "1.2.3.4") } func testUnowned() { var owner: Optional = OIDMemoryOwner("1.2.3")! #if compiler(>=6.3) weak let weakReferenceToOwner = owner #else weak var weakReferenceToOwner = owner #endif var oid: Optional = NIOSSLObjectIdentifier(borrowing: owner!.reference, owner: owner!) XCTAssertEqual(oid?.description, "1.2.3") owner = nil XCTAssertNotNil(weakReferenceToOwner, "OID should still have a strong reference to the owner") XCTAssertEqual(oid?.description, "1.2.3") oid = nil XCTAssertNil( weakReferenceToOwner, "OID is released and therefore no one should still have a strong reference to the owner" ) } func testCopy() { var owner: Optional = OIDMemoryOwner("1.2.3")! #if compiler(>=6.3) weak let weakReferenceToOwner = owner #else weak var weakReferenceToOwner = owner #endif let oid: Optional = withExtendedLifetime(owner) { NIOSSLObjectIdentifier(copyOf: $0?.reference) } XCTAssertEqual(oid?.description, "1.2.3") owner = nil XCTAssertNil(weakReferenceToOwner, "OID should no longer have a strong reference to the owner") XCTAssertEqual(oid?.description, "1.2.3", "copy should still work after the original owner is deallocated") } } ================================================ FILE: Tests/NIOSSLTests/SSLCertificateExtensionsTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import XCTest @testable import NIOSSL final class SSLCertificateExtensionsTests: XCTestCase { func test() throws { let cert = try NIOSSLCertificate(bytes: Array(samplePemCert.utf8), format: .pem) XCTAssertEqual(cert._extensions.count, 3) let basicConstraint = try XCTUnwrap( cert._extensions.first(where: { $0.objectIdentifier == .init("2.5.29.19")! }) ) let subjectKeyIdentifier = try XCTUnwrap( cert._extensions.first(where: { $0.objectIdentifier == .init("2.5.29.14")! }) ) let authorityKeyIdentifier = try XCTUnwrap( cert._extensions.first(where: { $0.objectIdentifier == .init("2.5.29.35")! }) ) XCTAssertEqual(basicConstraint.isCritical, false) XCTAssertEqual( Array(basicConstraint.data), [ 0x30, 0x3, 0x1, 0x1, 0xFF, ] ) XCTAssertEqual(subjectKeyIdentifier.isCritical, false) XCTAssertEqual( Array(subjectKeyIdentifier.data), [ 0x04, 0x14, 0xE8, 0x3D, 0x19, 0x93, 0x05, 0x01, 0x05, 0xE4, 0x5E, 0x4B, 0x70, 0xF8, 0x94, 0x23, 0x9A, 0x81, 0x34, 0xF0, 0x54, 0xF8, ] ) XCTAssertEqual(authorityKeyIdentifier.isCritical, false) XCTAssertEqual( Array(authorityKeyIdentifier.data), [ 0x30, 0x16, 0x80, 0x14, 0xE8, 0x3D, 0x19, 0x93, 0x05, 0x01, 0x05, 0xE4, 0x5E, 0x4B, 0x70, 0xF8, 0x94, 0x23, 0x9A, 0x81, 0x34, 0xF0, 0x54, 0xF8, ] ) } func testEmptyExtensions() { let extensions = NIOSSLCertificate._Extensions(takeOwnershipOf: nil) XCTAssertEqual(extensions.count, 0) } func testUnowned() throws { var owner: Optional = try NIOSSLCertificate(bytes: Array(samplePemCert.utf8), format: .pem) #if compiler(>=6.3) weak let weakReferenceToOwner = owner #else weak var weakReferenceToOwner = owner #endif var extensions: Optional = owner!._extensions XCTAssertEqual(extensions.map { Array($0) }?.count, 3) owner = nil XCTAssertNotNil(weakReferenceToOwner, "extensions should still have a strong reference to the owner") XCTAssertEqual(extensions.map { Array($0) }?.count, 3) extensions = nil XCTAssertNil( weakReferenceToOwner, "extensions are released and therefore no one should still have a strong reference to the owner" ) } } ================================================ FILE: Tests/NIOSSLTests/SSLCertificateTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Foundation import NIOCore import XCTest @testable import NIOSSL let multiSanCert = """ -----BEGIN CERTIFICATE----- MIIDEzCCAfugAwIBAgIURiMaUmhI1Xr0mZ4p+JmI0XjZTaIwDQYJKoZIhvcNAQEL BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE3MTAzMDEyMDUwMFoXDTQwMDEw MTAwMDAwMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEA26DcKAxqdWivhS/J3Klf+cEnrT2cDzLhmVRCHuQZXiIr tqr5401KDbRTVOg8v2qIyd8x4+YbpE47JP3fBrcMey70UK/Er8nu28RY3z7gZLLi Yf+obHdDFCK5JaCGmM61I0c0vp7aMXsyv7h3vjEzTuBMlKR8p37ftaXSUAe3Qk/D /fzA3k02E2e3ap0Sapd/wUu/0n/MFyy9HkkeykivAzLaaFhhvp3hATdFYC4FLld8 OMB60bC2S13CAljpMlpjU/XLLOUbaPgnNUqE1nFqFBoTl6kV6+ii8Dd5ENVvE7pE SoNoyGLDUkDRJJMNUHAo0zbxyhd7WOtyZ7B4YBbPswIDAQABo10wWzBLBgNVHREE RDBCgglsb2NhbGhvc3SCC2V4YW1wbGUuY29tgRB1c2VyQGV4YW1wbGUuY29thwTA qAABhxAgAQ24AAAAAAAAAAAAAAABMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQEL BQADggEBACYBArIoL9ZzVX3M+WmTD5epmGEffrH7diRJZsfpVXi86brBPrbvpTBx Fa+ZKxBAchPnWn4rxoWVJmTm4WYqZljek7oQKzidu88rMTbsxHA+/qyVPVlQ898I hgnW4h3FFapKOFqq5Hj2gKKItFIcGoVY2oLTBFkyfAx0ofromGQp3fh58KlPhC0W GX1nFCea74mGyq60X86aEWiyecYYj5AEcaDrTnGg3HLGTsD3mh8SUZPAda13rO4+ RGtGsA1C9Yovlu9a6pWLgephYJ73XYPmRIGgM64fkUbSuvXNJMYbWnzpoCdW6hka IEaDUul/WnIkn/JZx8n+wgoWtyQa4EA= -----END CERTIFICATE----- """ let multiCNCert = """ -----BEGIN CERTIFICATE----- MIIDLjCCAhagAwIBAgIUR6eOMdEFZAqorykK6u6rwPGfsh0wDQYJKoZIhvcNAQEL BQAwSDELMAkGA1UEBhMCVVMxEjAQBgNVBAMMCUlnbm9yZSBtZTERMA8GA1UECAwI TmVicmFza2ExEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNzExMDIxMzM5MjRaFw00 MDAxMDEwMDAwMDBaMEgxCzAJBgNVBAYTAlVTMRIwEAYDVQQDDAlJZ25vcmUgbWUx ETAPBgNVBAgMCE5lYnJhc2thMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb/wE6/pF40KmF4bgtrlInWIojsDma08q7 cK9LpzifjYNTrlTv7+8tR3TRkWwThW4sMGckq9u1Bty9aF50sazBZaLDZYoamuHS 43T7hj4aX++lEq+inlXaNX3WmKkq0y0ANLBsXaLC+8J+xemlXErBsacK1Lz8Yz// lVOwD85LG6UN87j8L/L5+t922HyGhQRVTvcbmXa05JovMXILXnoUeEvNteZZtLa0 zcpO+9pN/VwmxVOnQncxTG81FV6Qypx7YFf16QyEDVkXrt7/l6k+I+sAzBHIn28Y cPq/HfcAbWPU+gMiCLCplDi5NCyL7yyiG7bEjxR0oiWhzZG1abgjAgMBAAGjEDAO MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAFAknMMePElmNsuzEUWO m2a6n/cHEaJzDEVLbFifcCUNU2U6o2bgXrJIBjFudISYPjpG+gmnuAwdfu6CA63M wiuLaLQASz6W12pRqlIriUazDn4JnIHu8wCHj8QkYTV7HunhtGJjX7xT89dRS5Y/ IJv0Q9J2CZ16d3ETCzWp2Djq1IPggkBrsgKalJmwsiWi8UkH/GeMA+YQ1p8r9Bvp +Jd1VitqxJFG5tgT68dq1LxlsNb4L1Cm15m8LdhY5BgSO2AG9G4gBbO0ixZJwHbn TLiPC0Jd3x5tf9qeSv1eWHuhQd9R908EhZdC6rgN8fZfMux2tQxNbIsNPYAQhmsB /nc= -----END CERTIFICATE----- """ let noCNCert = """ -----BEGIN CERTIFICATE----- MIIC3jCCAcagAwIBAgIUeB9gFXDDe/kTcsPGlHIZ4M+SpyYwDQYJKoZIhvcNAQEL BQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5lYnJhc2thMB4XDTE3MTEwMjEz NDIwMFoXDTQwMDEwMTAwMDAwMFowIDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l YnJhc2thMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2DqRr+tIXVXz 4VZA5dSJo4pPgC+lNngg8Bpk9pedmOj8GSdvbIkRmXPRqOIw33vurfGVqcYiX3DH HcVKS6ZF/ylE4dDH7JmGvCYpJTK6+02nkpdz3CzoX8lIRHBSJAJwny/UK20QBhsU OWm/mD0uCRfgfp9FasKqA56OBFGNYAOTAM33RHuXQNSSfV5FmSmNkWsiM1S+EUgH PptKQlXUfiSUFBCuyy9iItSg2fOew3C6/dXJ47T4mFi5qD/WKmI3uSNqBKNPcHI8 EGZX4r8w0Hvq2hV13t+hexaLkS6VeZWb1kTrdgDPnjcl43txePPP7tEGRlZFO+bI V2j0pGb/iwIDAQABoxAwDjAMBgNVHRMBAf8EAjAAMA0GCSqGSIb3DQEBCwUAA4IB AQC27ElJn7TWhr2WcPsdXxGNUpepFgXEsEAVnoiJPI9XqXdOZCFWTUmRXrQICPUn 8HnVfYTFl/WZnTYhf8Ky5RB1W3X0Juo7MGge7mx6D6m8yJVBecQxPasVienTvdDg UZI2oodxnS7egFwlvFM2ZUmPLVvq0B7VQBSa2Ix3FChNtJ1u5yfHyoDUMRfYl0bF 0B3poAgLDW3FUQ7QoErMvslUJrFxmJXUKKdhg9z6jQTdcmZ6Dr8sFZQkRADbJRzm AYqSreeyevxdoNwQrpZMAGm61nc7OS8i0Q0JRe3FpGD29BMS0ystlzDeNnUpf+yJ u9dFQrCkq8MilGSO1L2bZsqY -----END CERTIFICATE----- """ let unicodeCNCert = """ -----BEGIN CERTIFICATE----- MIICyjCCAbKgAwIBAgIUeK7KUVK7tcUhxVnkSWEsqHj07TEwDQYJKoZIhvcNAQEL BQAwFjEUMBIGA1UEAwwLc3RyYcOfZS5vcmcwHhcNMTcxMTAyMTM0NzQxWhcNNDAw MTAxMDAwMDAwWjAWMRQwEgYDVQQDDAtzdHJhw59lLm9yZzCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAO0Anpw+WpM897YXUNHI4oTr4BUxIcOC2A7LQiQ0 briNXLIaIN8irwaa4TwCqvjg2B09GGO7EWvi0EX050X0jFFiSDdGhSZGMLL34nfk /HW14XjTCW+LkYcFAyOD8Kf3nGGLagIdtnPWQ3Atf6rTf5A35K75+penURN226xB t0vKqtngYTFu0n6B/+Ip6FI/Bq8yyGtPN74yR79KG3WL7mvrEHxv+TnZkb2F6f2j cJALEJPx8wFug154EnRDOURZMX5gmHRR/Xm9jP1R7Rch+4Ue2Fy38C1a35p0Saap JDKSmxr2430bQ5S41BTT5Q3N6eBD7f+cqaQyoa0u+qvl+gcCAwEAAaMQMA4wDAYD VR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAM7x+J+A2UN+RCKEjJUc9rM3S G9AxfhKO3VN1mrPRs6JG1ED7t/9e2xdjLRRl84Rz9jnaKVTS2sQ8yKYejWGUbXDq WO6KNlrjzspL3M8EIoi7QNwtRktviCkkxxwhzDfuH9N6ncjq0qod0vxGq0nqxrAo VJto6NnrshZEQHGF8uipOFPNTDAR0SpzyzXaK59oqSPJ5VrZiQ3p8izVuRE9r1u2 i5PCcPYi39q101UIxV/WokS0mqHx/XuTYTwhWYd/C49OnM8MLZOUJd8w0VvS0ItY /wAv4vk0ScS4KmXTJBBGSiBqLdroaM9VKcA1p7TN0vzlut2E/nKmBhgzQJFKZA== -----END CERTIFICATE----- """ // created with the following command: // openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes \ // -keyout private.pem -out cert.pem -subj '/CN=example.com' \ // -extensions san \ // -config <(echo '[req]'; echo 'distinguished_name=req'; // echo '[san]'; echo 'subjectAltName=DNS:localhost,DNS:example.com,email:user@example.com,IP:192.168.0.1,IP:2001:db8::1,URI:http://example.com/path?query=param,URI:http://example.org/') let certWithAllSupportedSANTypes = """ -----BEGIN CERTIFICATE----- MIIFOzCCAyOgAwIBAgIJALPXfgvEjcDsMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV BAMMC2V4YW1wbGUuY29tMB4XDTIyMDMwOTE4MTIxN1oXDTMyMDMwNjE4MTIxN1ow FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw ggIKAoICAQC7Bwqt8H+O3zotsGo4KMjipytzfmYiTOS0Id6HY1zfLVGrOSRTbFAE BmpPWbu4TzDZpxoNa6oQYqcpMqHTJe6+U/Coz+Xm+fSqWVZPLzcX2iW6igeS5cH5 2C9sWbzbYJku3qiNc0B0K+sIPQLeUM8sc2UK6rL3Vc6kt/SRRjshZNj6hPRqQNv6 85ul6yxICOooX6Xy/q0lqJaWaIOk2GZa/Genz/93RbKnLCpynSX0JETcIW8uFIPo 3BeyFcvgThYUq/KvpkkNPqOp7SOfO5rFLi9IRlDNuUF9h4hLZ+qV3NaxQ5mk+8xl BcNPDNqucNwQ7UKRNEfipmVPE44txMh06VcahcSzc+FKGsQmlNON0WwMfTTRhCPD Y2JVKZ5BpsgUtrivC4UbNmNJEVQQ9dJBcsALuwhoJo5CL0tkI2Dx/eo6fpwL6KDu ZE71MZ8BSJ8fW620fGedR+Cr+Jeq5H5eGGaWw55hXRKHbOQgjvIC6LKp9CB4/wNK jwlWEgae/EiI7iCuSOLj+yGbWvCnUcYdzYxuxZMY1x097dXxWObzJHgHllIT8639 LqDT7+Xrhqoe0eMxeYwHzE8VMEPPpBeZAGzYO1lXF2lWqzIaPHK2oIeNj8Rskzqd GFJPSvTZqEUBxgITiz5Ba46G9Cyi4oVom5CIPI+UWBLxDLjUiDbYtQIDAQABo4GL MIGIMIGFBgNVHREEfjB8gglsb2NhbGhvc3SCC2V4YW1wbGUuY29tgRB1c2VyQGV4 YW1wbGUuY29thwTAqAABhxAgAQ24AAAAAAAAAAAAAAABhiNodHRwOi8vZXhhbXBs ZS5jb20vcGF0aD9xdWVyeT1wYXJhbYYTaHR0cDovL2V4YW1wbGUub3JnLzANBgkq hkiG9w0BAQsFAAOCAgEAJFzzbzD5+YGnX2cXKms9ZSqzdFyzkU1/Glc4gCJJu0ch GxdqRA1D9eiYtaumtnTwdN/VsJGtHQy87ur+9hawQ7MwA8E45RJoibwT/trCggzK gjWeor9l1ahwr4eBgmmWDzmdUXd2HcCBRR5iXfU3CLj8BUT2EXx8iFbkHHc5uZGi 19ZfyAaWBV5KkkMjk9FMYAoFCsv/eDjtQzlfJrgKDcAZu7GD7ijYcw2buGeRl9SG //QKkyAVEnY2Fpn0v+pwOWBunB4EV2bRK+TbSScaU0EC3+AT9Xl62IAqJsdmTOrr URM7cuo6HVFLhNbAsUZMwd/orLQmKnp+njZOKdcq+J8f3aIUhKBIKg+sYcFVpV1Z Mpmm/M04hN+EGuZqASJRfIE1CI5PXizVd6sQd1A/zhoy9QtfbVGgxWklYgmy7ycB wS41t3bU8LLCC3RXflBOBz4y+/7Oe6muRWUAEXt4rgc4Zv391SfIFpwEaNOtFATl LzVcCAEmtY1Fyp4cOm6GEMjZ0H0buOaCRoYJb3KYZm5L6c58Ahom2GfAdtdoiRcX 7JHZybbOiOTgThxfXxgzABq/HVLC5PNVlAk95SYcoFMjixyDt2S9JD9fnGI3H9CT kVuVyNH7NBMh6YOuTL1dh55bvDjvgkuzudepsZnpfjgQKE1aZ7dL32Xi000gBM8= -----END CERTIFICATE----- """ func makeTemporaryFile(fileExtension: String = "", customPath: String = "") throws -> String { var template = "\(FileManager.default.temporaryDirectory.path)/niotestXXXXXXX\(fileExtension)" // If a custom file path is passed in then a new directory has to also be created. Then the file can be written to that directory. if !customPath.isEmpty { let path = "\(FileManager.default.temporaryDirectory.path)/\(customPath)/" try FileManager.default.createDirectory(at: URL(fileURLWithPath: path), withIntermediateDirectories: true) template = "\(FileManager.default.temporaryDirectory.path)/\(customPath)/niotestXXXXXXX\(fileExtension)" } var templateBytes = template.utf8 + [0] let fd = templateBytes.withUnsafeMutableBufferPointer { ptr in ptr.baseAddress!.withMemoryRebound(to: Int8.self, capacity: ptr.count) { (ptr: UnsafeMutablePointer) in mkstemps(ptr, CInt(fileExtension.utf8.count)) } } close(fd) templateBytes.removeLast() return String(decoding: templateBytes, as: UTF8.self) } internal func dumpToFile(data: Data, fileExtension: String = "", customPath: String = "") throws -> String { let filename = try makeTemporaryFile(fileExtension: fileExtension, customPath: customPath) try data.write(to: URL(fileURLWithPath: filename)) return filename } internal func dumpToFile(text: String, fileExtension: String = "") throws -> String { try dumpToFile(data: text.data(using: .utf8)!, fileExtension: fileExtension) } class SSLCertificateTest: XCTestCase { static let dynamicallyGeneratedCert = generateSelfSignedCert().0 private static func withPemCertPath( _ body: (String) throws -> ReturnType ) throws -> ReturnType { let pemCertFilePath = try dumpToFile(text: samplePemCert) defer { unlink(pemCertFilePath) } return try body(pemCertFilePath) } private static func withPemCertsPath( _ body: (String) throws -> ReturnType ) throws -> ReturnType { let pemCertsFilePath = try dumpToFile(text: samplePemCerts) defer { unlink(pemCertsFilePath) } return try body(pemCertsFilePath) } private static func withDerCertPath( _ body: (String) throws -> ReturnType ) throws -> ReturnType { let derCertFilePath = try dumpToFile(data: sampleDerCert) defer { unlink(derCertFilePath) } return try body(derCertFilePath) } private func dateFromComponents(year: Int, month: Int, day: Int, hour: Int, minute: Int, second: Int) -> Date { var date = DateComponents() date.calendar = Calendar(identifier: .gregorian) date.year = year date.month = month date.day = day date.hour = hour date.minute = minute date.second = second date.timeZone = TimeZone(abbreviation: "UTC") return date.date! } func testLoadingPemCertFromFile() throws { let (cert1, cert2) = try Self.withPemCertPath { let cert = try NIOSSLCertificate.fromPEMFile($0).first! return (cert, cert) } XCTAssertEqual(cert1, cert2) XCTAssertEqual(cert1.hashValue, cert2.hashValue) XCTAssertNotEqual(cert1, SSLCertificateTest.dynamicallyGeneratedCert) XCTAssertNotEqual(cert1.hashValue, SSLCertificateTest.dynamicallyGeneratedCert.hashValue) } func testLoadingDerCertFromFile() throws { let (cert1, cert2) = try Self.withDerCertPath { let cert = try NIOSSLCertificate.fromDERFile($0) return (cert, cert) } XCTAssertEqual(cert1, cert2) XCTAssertEqual(cert1.hashValue, cert2.hashValue) XCTAssertNotEqual(cert1, SSLCertificateTest.dynamicallyGeneratedCert) XCTAssertNotEqual(cert1.hashValue, SSLCertificateTest.dynamicallyGeneratedCert.hashValue) } func testDerAndPemAreIdentical() throws { let cert1 = try Self.withPemCertPath { try NIOSSLCertificate.fromPEMFile($0).first! } let cert2 = try Self.withDerCertPath { try NIOSSLCertificate.fromDERFile($0) } XCTAssertEqual(cert1, cert2) XCTAssertEqual(cert1.hashValue, cert2.hashValue) } func testLoadingPemCertFromMemory() throws { let cert1 = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) let cert2 = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual(cert1, cert2) XCTAssertEqual(cert1.hashValue, cert2.hashValue) } func testPemLoadingMechanismsAreIdentical() throws { let cert11 = try NIOSSLCertificate.fromPEMBytes(.init(samplePemCert.utf8)) let cert12 = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual(cert11, [cert12]) XCTAssertEqual(cert11.map { $0.hashValue }, [cert12.hashValue]) } func testLoadingPemCertsFromMemory() throws { let certs1 = try NIOSSLCertificate.fromPEMBytes(.init(samplePemCerts.utf8)) let certs2 = try NIOSSLCertificate.fromPEMBytes(.init(samplePemCerts.utf8)) XCTAssertEqual(certs1.count, 2) XCTAssertEqual(certs1, certs2) XCTAssertEqual(certs1.map { $0.hashValue }, certs2.map { $0.hashValue }) } func testLoadingPemCertsFromFile() throws { let (certs1, certs2) = try Self.withPemCertsPath { ( try NIOSSLCertificate.fromPEMFile($0), try NIOSSLCertificate.fromPEMFile($0) ) } XCTAssertEqual(certs1.count, 2) XCTAssertEqual(certs1, certs2) XCTAssertEqual(certs1.map { $0.hashValue }, certs2.map { $0.hashValue }) } func testLoadingDerCertFromMemory() throws { let certBytes = [UInt8](sampleDerCert) let cert1 = try NIOSSLCertificate(bytes: certBytes, format: .der) let cert2 = try NIOSSLCertificate(bytes: certBytes, format: .der) XCTAssertEqual(cert1, cert2) XCTAssertEqual(cert1.hashValue, cert2.hashValue) } func testLoadingGibberishFromMemoryAsPemFails() throws { let keyBytes: [UInt8] = [1, 2, 3] XCTAssertThrowsError(try NIOSSLCertificate(bytes: keyBytes, format: .pem)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingGibberishFromPEMBufferFails() throws { let keyBytes: [UInt8] = [1, 2, 3] XCTAssertThrowsError(try NIOSSLCertificate.fromPEMBytes(keyBytes)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingGibberishFromMemoryAsDerFails() throws { let keyBytes: [UInt8] = [1, 2, 3] XCTAssertThrowsError(try NIOSSLCertificate(bytes: keyBytes, format: .der)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingGibberishFromFileAsPemFails() throws { let tempFile = try dumpToFile(text: "hello") defer { _ = tempFile.withCString { unlink($0) } } XCTAssertThrowsError(try NIOSSLCertificate.fromPEMFile(tempFile)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingGibberishFromPEMFileFails() throws { let tempFile = try dumpToFile(text: "hello") defer { _ = tempFile.withCString { unlink($0) } } XCTAssertThrowsError(try NIOSSLCertificate.fromPEMFile(tempFile)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingGibberishFromFileAsDerFails() throws { let tempFile = try dumpToFile(text: "hello") defer { _ = tempFile.withCString { unlink($0) } } XCTAssertThrowsError(try NIOSSLCertificate.fromDERFile(tempFile)) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } @available(*, deprecated, message: "Deprecated to test deprecated functionality") func testLoadingNonexistentFileAsPem() throws { XCTAssertThrowsError(try NIOSSLCertificate(file: "/nonexistent/path", format: .pem)) { error in guard let error = error as? IOError else { return XCTFail("unexpected error \(error)") } XCTAssertEqual(ENOENT, error.errnoCode) XCTAssertEqual( error.description.contains("/nonexistent/path"), true, "error description should contain file path. Description: \(error.description)" ) } } func testLoadingNonexistentPEMFile() throws { XCTAssertThrowsError(try NIOSSLCertificate.fromPEMFile("/nonexistent/path")) { error in XCTAssertEqual(.failedToLoadCertificate, error as? NIOSSLError) } } func testLoadingNonexistentFileAsDer() throws { XCTAssertThrowsError(try NIOSSLCertificate.fromDERFile("/nonexistent/path")) { error in guard let error = error as? IOError else { return XCTFail("unexpected error \(error)") } XCTAssertEqual(ENOENT, error.errnoCode) XCTAssertEqual( error.description.contains("/nonexistent/path"), true, "error description should contain file path. Description: \(error.description)" ) } } func testEnumeratingSanFields() throws { var v4addr = in_addr() var v6addr = in6_addr() precondition(inet_pton(AF_INET, "192.168.0.1", &v4addr) == 1) precondition(inet_pton(AF_INET6, "2001:db8::1", &v6addr) == 1) let cert = try NIOSSLCertificate(bytes: .init(certWithAllSupportedSANTypes.utf8), format: .pem) let sans = cert._subjectAlternativeNames() XCTAssertEqual(sans.count, 7) XCTAssertEqual(sans[0].nameType, .dnsName) XCTAssertEqual(String(decoding: sans[0].contents, as: UTF8.self), "localhost") XCTAssertEqual(sans[1].nameType, .dnsName) XCTAssertEqual(String(decoding: sans[1].contents, as: UTF8.self), "example.com") XCTAssertEqual(sans[2].nameType, .email) XCTAssertEqual(String(decoding: sans[2].contents, as: UTF8.self), "user@example.com") XCTAssertEqual(sans[3].nameType, .ipAddress) withUnsafeBytes(of: &v4addr) { v4addr in XCTAssertEqual(Array(sans[3].contents), Array(v4addr)) } XCTAssertEqual(sans[4].nameType, .ipAddress) withUnsafeBytes(of: &v6addr) { v6addr in XCTAssertEqual(Array(sans[4].contents), Array(v6addr)) } XCTAssertEqual(sans[5].nameType, .uri) XCTAssertEqual(String(decoding: sans[5].contents, as: UTF8.self), "http://example.com/path?query=param") XCTAssertEqual(sans[6].nameType, .uri) XCTAssertEqual(String(decoding: sans[6].contents, as: UTF8.self), "http://example.org/") } func testSubjectName() throws { let cert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual( cert.subjectName, [ SSLCertificateName("US", .country), SSLCertificateName("California", .state), SSLCertificateName("San Fransokyo", .city), SSLCertificateName("San Fransokyo Institute of Technology", .organization), SSLCertificateName("Robotics Lab", .organizationalUnit), SSLCertificateName("robots.sanfransokyo.edu", .commonName), ] ) } func testIssuerName() throws { let cert = try NIOSSLCertificate(bytes: .init(sampleIntermediateCA.utf8), format: .pem) XCTAssertEqual(cert.issuerName, [SSLCertificateName("badCertificateAuthority", .commonName)]) } func testNonexistentSan() throws { let cert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertTrue(cert._subjectAlternativeNames().isEmpty) } func testCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual([UInt8]("robots.sanfransokyo.edu".utf8), cert.commonName()!) } func testCommonNameForGeneratedCert() throws { XCTAssertEqual([UInt8]("localhost".utf8), SSLCertificateTest.dynamicallyGeneratedCert.commonName()!) } func testMultipleCommonNames() throws { let cert = try NIOSSLCertificate(bytes: .init(multiCNCert.utf8), format: .pem) XCTAssertEqual([UInt8]("localhost".utf8), cert.commonName()!) } func testNoCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(noCNCert.utf8), format: .pem) XCTAssertNil(cert.commonName()) } func testUnicodeCommonName() throws { let cert = try NIOSSLCertificate(bytes: .init(unicodeCNCert.utf8), format: .pem) XCTAssertEqual([UInt8]("straße.org".utf8), cert.commonName()!) } func testExtractingPublicKey() throws { let cert = try assertNoThrowWithValue(NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem)) let publicKey = try assertNoThrowWithValue(cert.extractPublicKey()) let spkiBytes = try assertNoThrowWithValue(publicKey.toSPKIBytes()) XCTAssertEqual(spkiBytes, sampleDerCertSPKI) } func testDumpingPEMCert() throws { let expectedCertBytes = [UInt8](sampleDerCert) let cert = try assertNoThrowWithValue(NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem)) let certBytes = try assertNoThrowWithValue(cert.toDERBytes()) XCTAssertEqual(certBytes, expectedCertBytes) } func testDumpingDERCert() throws { let expectedCertBytes = [UInt8](sampleDerCert) let cert = try assertNoThrowWithValue(NIOSSLCertificate(bytes: expectedCertBytes, format: .der)) let certBytes = try assertNoThrowWithValue(cert.toDERBytes()) XCTAssertEqual(certBytes, expectedCertBytes) } func testPrintingDebugDetailsNoAlternativeNames() throws { let expectedDebugDescription = "" let cert = try assertNoThrowWithValue(NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem)) let debugString = String(describing: cert) XCTAssertEqual(debugString, expectedDebugDescription) } func testPrintingDebugDetailsWithAlternativeNames() throws { let expectedDebugDescription = "" let cert = try assertNoThrowWithValue(NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem)) let debugString = String(describing: cert) XCTAssertEqual(debugString, expectedDebugDescription) } func testNotValidBefore() throws { let cert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) let notValidBeforeSeconds = cert.notValidBefore let expectedDate = self.dateFromComponents(year: 2017, month: 10, day: 16, hour: 21, minute: 01, second: 02) let expectedSeconds = time_t(expectedDate.timeIntervalSince1970) XCTAssertEqual(notValidBeforeSeconds, expectedSeconds) } func testNotValidAfter() throws { try XCTSkipUnless(MemoryLayout.size >= 8, "size of time_t must be 64bit or greater") let cert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) let notValidBeforeSeconds = cert.notValidAfter let expectedDate = self.dateFromComponents(year: 2047, month: 10, day: 9, hour: 21, minute: 01, second: 02) let expectedSeconds = time_t(expectedDate.timeIntervalSince1970) XCTAssertEqual(notValidBeforeSeconds, expectedSeconds) } func testNotBeforeAfterGeneratedCert() throws { let notBefore = SSLCertificateTest.dynamicallyGeneratedCert.notValidBefore let notAfter = SSLCertificateTest.dynamicallyGeneratedCert.notValidAfter // Clock movement is tricky so we can't necessarily assert what the delta is in // the notBefore and now, but we know now has to be between the two values, and // that the two values are 1 hour apart. let secondsNow = time_t(Date().timeIntervalSince1970) XCTAssertTrue((notBefore.. NIOSSLContext { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(SSLContextTest.cert2)], privateKey: .privateKey(SSLContextTest.key2) ) config.trustRoots = .certificates([SSLContextTest.cert2]) let context = try NIOSSLContext(configuration: config) return context } private func configuredServerSSLContext( eventLoop: EventLoop, throwing error: TestError? = nil ) throws -> NIOSSLContext { // Initialize with cert1 var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(SSLContextTest.cert1)], privateKey: .privateKey(SSLContextTest.key1) ) // Configure callback to return cert2 config.sslContextCallback = { (values, promise) in promise.completeWithTask { if let error { throw error } var override = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(SSLContextTest.cert2)] override.privateKey = .privateKey(SSLContextTest.key2) return override } } return try NIOSSLContext(configuration: config) } private func assertSniResult(sniField: String?, expectedResult: String?) throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try? group.syncShutdownGracefully() } let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let clientContext = try configuredClientSSLContext() let serverContext = try configuredServerSSLContext(eventLoop: group.next()) let sniPromise: EventLoopPromise = group.next().makePromise() let serverChannel = try serverTLSChannel( context: serverContext, preHandlers: [ ByteToMessageHandler( SNIHandler { sniPromise.succeed($0) return group.next().makeSucceededFuture(()) } ) ], postHandlers: [], group: group ) defer { _ = try? serverChannel.close().wait() } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [handshakeWatcher], group: group, connectingTo: serverChannel.localAddress!, serverHostname: sniField ) defer { _ = try? clientChannel.close().wait() } // This promise ensures we completed the handshake. // If the ssl context callback doesn't properly resume // the handshake this will never resolve. XCTAssertNoThrow(try handshakeResultPromise.futureResult.wait()) let sniResult = try sniPromise.futureResult.wait() if let expectedResult { XCTAssertEqual(sniResult, .hostname(expectedResult)) } else { XCTAssertEqual(sniResult, .fallback) } } private func assertSniError(sniField: String?, expectedError: TestError) throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try? group.syncShutdownGracefully() } let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let clientContext = try configuredClientSSLContext() let serverContext = try configuredServerSSLContext(eventLoop: group.next(), throwing: expectedError) let sniPromise: EventLoopPromise = group.next().makePromise() let eventHandler = ErrorCatcher() let serverChannel = try serverTLSChannel( context: serverContext, preHandlers: [ ByteToMessageHandler( SNIHandler { sniPromise.succeed($0) return group.next().makeSucceededFuture(()) } ) ], postHandlers: [eventHandler], group: group ) defer { _ = try? serverChannel.close().wait() } let clientChannel = try clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [handshakeWatcher], group: group, connectingTo: serverChannel.localAddress!, serverHostname: sniField ) defer { _ = try? clientChannel.close().wait() } // This promise ensures we completed the handshake. // If the ssl context callback doesn't properly resume // the handshake this will never resolve. XCTAssertThrowsError(try handshakeResultPromise.futureResult.wait()) // The first caught item should be the error from the context callback. try serverChannel.eventLoop.submit { XCTAssertEqual(eventHandler.errors.count, 2) switch eventHandler.errors[0] { case let error as TestError: XCTAssertEqual(error, expectedError) default: XCTFail("Unexpected error: \(eventHandler.errors[0])") } }.wait() } func testSNIIsTransmitted() throws { try assertSniResult(sniField: "httpbin.org", expectedResult: "httpbin.org") } func testSNIIsNotTransmitted() throws { try assertSniResult(sniField: nil, expectedResult: nil) } func testSNIContextError() throws { try assertSniError(sniField: "httpbin.org", expectedError: .contextError) } } ================================================ FILE: Tests/NIOSSLTests/SSLPKCS12BundleTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Foundation import NIOCore import XCTest @testable import NIOSSL /// This is a base64-PKCS12 file that contains only samplePemCert and /// samplePemKey, no extra certs. The passphrase is /// "thisisagreatpassword". let base64EncodedSimpleP12 = """ MIIQ2QIBAzCCEJ8GCSqGSIb3DQEHAaCCEJAEghCMMIIQiDCCBr8GCSqGSIb3 DQEHBqCCBrAwggasAgEAMIIGpQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYw DgQI7wzJzFMFVk4CAggAgIIGeF1vL6bWY9kYYUOwGTHdKkFX9sNcI2r3lI5m o4knYU14oCyh8HX1I519/niqVmx9WujM4AfjVrcTh2XcTIKiqSLNXFa0r8kB dVR0VUBcutE7lWth5mYbWNQRrXmnH2KI6WfxplCBMk1+y973YNOTUKqOweQl d+2v7TPgrnKUdOYzIBPKl170F9ENFIWZJaTkvKiZhKIX/MDhj/2JorcdG2Is fpCz23KdWpbYN+7lVe1XuZ/sth478Op52wcd+Yhp96DSnP2Bzw17FjCq6ghu DU0OCp5DAZyWh/3aZHB49NojbsqC/NKrkN2dXfcTL69IzuSoK5z3q+zl5IZA 3AEegCDE+WmHwQu1kC/BkvJcERo2fCkoPQVbS7xQkM1ZecROVlUoYcv+Eo77 /2iu7Yzwe9Ymnl/X7w/GqSm01z4YFwS+7Jkd+v3khlNrUh1UwF2YGeHkIs4R OGb/Pjfo7ciRN3vDW5c1AvmrvxKhM3NN1OcwMxSqnxkxgbdFP+LcbY7+E832 5vR+uhUlnbbERwPRGCxhE1qEyu1fdFpKHJUWbQAW4TtfS3OqYHud2BWMqFbx vAgNMJCmO6y7NUvmUGw7Zc1V/88GWA29fUuHhjvf4i9QNFr70ERm0dXBZlaM CnMmHaP+UlEwbvqAsFc8ZMiz+o7wVSnxIXoLKatmDQ5xn7rGBlL2MjxR/bfp pzLpEnIWk3sJWKYctShxJYsjQ652d+lnHLSyjTY7y5vwxhmZmI1V5cbuuKnp L7P/oG5WRYkV9D8VEvAOEOw5h10Rbfj1dZgn9JNmpu3dBuhaxFPq87F2u2jX GTIrSD8mH+hsFCMEJLsTpIB7YX81+vJ1y5nDctPTnET1qaEWRUKjOIzPOh/d 6acnFoQD2debEa1EB4dLYsxoXMUbTBdCyaXyvy2zhl093vDvWWkS/ufZJqND u/u67+fmyNCl2P94rDMqIpse6OCi6NhNUfjh++a910iiKqGbN2gAhRFv5FYm rqtCsHSs6VRBoF/qe8+kowl14QOAVRXPJri5YKzb1sh13kOottD3ESnabKaI KBp1LQSc+QGC70rdm/agJxGcLgMaR6tGVN7cFjUoebDquh86KQ/trZqcKgLV AjNnN1+6Ee/Vn7nDxBOxLTTvLOkTJ2SDTp31Xfb/DRPOLIdELoGp/J6x2zQT HDBXAHjkg8nknKwqvsLW4AFoGCLEyrREfrDlXOvkYKSOn1VAyHWS4MMi8RpU 9GXrcbDvlndkIIgmvyRc94Au9s1RM443Gsik59FimNCgYYJfMmiw1jx8psVO V6UO7B6OIc1CtgGeU8hghVkL75DevTrSlaunzWrkZ3GPzYQ/D0INp8SPU47O xqqaegItRISb5UHHgwIZlCTSZWz9etxx0zNbFUrZqMfD8IA3X3MZk2N1Z2XR 456CbxeGswzUo5XWchKN3whwCt6S23bVqTOOrX+fyC0RuYa22zbiTiyLVhK1 Wi1c/D2G2cA0cAvSzw15bdXFX7/HUBAekmvyyOlAoKAG4tb9i95GT2qG6DQx ullqB0/R84G3eePTMpDBrDOj/PkmySyGQjQifRdeUMaXzSBSi0lrxLl0VSos wqnvhmZ0Wx6kSurfpuqq9gc8t84Dd7NrQYXpUk024+Mtcyem+jv5BiL4QFkC Dmv07avXTPOIlbxLyYm0vroin+XxsGv0mXTXG6j9ZwwVUtSVhdWVHnkjJ4dU 0ZgrSW+X6co10wrcKMTfMVrM4kcvUOIixP8XYGRQSRkAL4IPdXP3TiYdheDk 2o9PwTpukbjeixLLfqfbVjm5yxsPnAogMYLbo1ZPUarIUWn02cTfzD6uv1cp iXH3t7MU+uCkG6NYY4xzwvwFFaHXEgTTN+cEC5H9l6r6cz5K2hPp+t/FqrKK GXhsJOJGR0fV3FSPuPQWQGCr7skNMHjATEFSneBTfQW5LViQsFLvzk3+3kbI IPuwGsBiqS/jgrjUQQHb1LYsjElJ9Npv1JvybYnEJUZqd0meiho3lpkAjCgC GwAeOgHUinkR6iewCTkeA0+h5ISonjkokWdcDsa4/5owU7RE42wA3twr7K4l xP/Jndy9IUimtrQ81uWZsXQt38KWvEsQC6S19z/8iUYR01qXGm08ernLVcwJ lGbvZK22Z5JW0gseOaipFE3CH3sw1TDn5PzAjcykmYSxGIhyDS2esoA3AvMc GEFFgXNcMf15qjCCCcEGCSqGSIb3DQEHAaCCCbIEggmuMIIJqjCCCaYGCyqG SIb3DQEMCgECoIIJbjCCCWowHAYKKoZIhvcNAQwBAzAOBAiksVRSUP5TBgIC CAAEgglIKda6J5g6raXmRDIOc98FveBozM3SQCjsiaIqq6J+vDg5yatdtrd3 jjdyv3+cR4pYGvUQb4ND7gBevtgSGINAYj8oqKI3blRbkqXPwUNR4/lJKvJ8 01MCEwNaxv0wLkffQocfL3ALaDVfWNdF5lMJ20OKvHOlS/aH0inbNtEGELPW uyYQFFwMQBXCv3EbMb+UXyM1L5tb/lKaazRt0o3IfTbvryH4qYeWD66R3UN5 WqsnARU4b5Td2XPxW5dTHAxjkVulQSYUE/ex1Dbv4TgGPsQ1UKQvyPb3cxD4 U+Y0zHtg1/i/RpDxfpIkmiQesWOD83azKFnFegv0quN+bULShq+aoY98qFNT yuFV6BpAXzD89u0XuSLaDpTFfplPwzHsaAtgK1XAuE9X+DBCn3WRSrKghT60 OTLO1y7L5wQ5v9PbomtpiBFJpAN+fe6Y391vnTlIYQSAyWOtPiy6kuZRncSu kfoJ0phN3oc6KV7lRCOMi87P6TS0zRvGaT7MtL8iljI0paWzsUKf4QkK4jc6 4KqLRH6Uf1e0Mco2AYAJBQzAfPxyFq99v7laxFc9qrC0wMdAs+sY/FHLptMb vuyERFrPxHSbICJcLTjy8951jx/6MQRpzfK4jsA4jio/WNOkiI5IQO7ihOpU pvpxEdNYGKOHB2HPy3/JXLs/9Dv5vwQ9Baj4ncrlL+wt4ltiVKZ36F6dx7Yi S1o/jdkafbuZzbXf3+/iMTc8NgWh8GVhQnkabutyWcqFeTd6rATrRxr0VVeI 5hzwMxlABmDcAc9D4R3F8eJEbTkigah5ccnlT/wxVXB3azXJ3xQ0aEdF/IUX d28g9coXJgKxlMRlHXKSQEud0ffE/qbZvzI2+fycNc+3NhCLssj/76oYf1Ju nA+Yj7edkWLV0pnyYhehEUpC8Y8M+GZLM4li/7fYIxh1hgb6p/5FFjmbnrNM BpRaZdETHeLcf7jGm2gV84XK6WnmneHxIjXbhazE9RIg+VJtfRrQPF0RBy+B jLdwCh1Eh8sF1yOMYlPLfw0btnLTWshbo0mRVK50rElO0mqnFP3j8D6Bf4qZ cqHdlDQKF2or0hB0hM08Ik99Crv7Q0YKIW1BIzNYIHGtOxgntppFHdIZIr+5 PvECPGDgAsxsCIsaHFN4xylRf8gJ3YMm4FaAcSAyfabbU52I+tOAlAaJue6Y GTuyDzWt/IlpvGLwLEDFPf4whDK24wjjvU5laUadWSw5ydATlrH8m1kBlr9g MEd0WXRAfJPMLXjEDPpMalHCtvX3FN/xEo6EkZrszuwqsVp1EKXVXDX9u+RT lIVZOw+y7KusiCqVLvXcA9//6w4DSpDHH2oRdnhCROq49M5EuAdAn+5exmaZ siIs8sNWbzdU6gl5xjRM39MyHk9Xeu82OSEfQCkFy8QprMKoE2Be1kB7onsk R1EhLn3u2+NovXo1tEx5j8ysMQqeDE2XwKuMlb7nCPf6e1q9vHCxn+47IjPf xLQAXNvbwtvUWulNGVrJaTBMbvw0i/LLNkpiLHFZ5YeuAxEQtSkLA6Guj5Fy GohXwz2nIGmsghKh+t7uTkRldVPPhT/YMqq/RGHr+wjLt+/LkOpCnRJ17YFk tN4UF0MN6UJvgOY6kxFPRQy8N19Ekxao2ix0sbqMBbgkpjiARxQaJ+7Bv4jG QqXGAK4+YQ1zfOCfMPNB7/BJ2D0pOHEKc1ush4wp8HVHnE60u62UY7m2oNDT V75ifx4zO2Uoe+kSufuAKee/ZtPbvxzzvy7ctL9tecSTFZ6vzZxbhEO20rnJ lB6PCZeWQTYkbSflEJpBotFaUI+GrO/G5OMSPGDn5M/arDdgfjgrfuXFquyX Cwdf1CTp8N6Oj7AyUnToC3ot6BGXmZethmLQDtvxZzyZyQB2QGoHFH62OPVz UJCwTxtlZYH40jM8n69i/NItjvOrnwcfeeZXvMOJ7cn1BLSgnKgKCRSSJvzh Nvy9IloJC0vnLa2c+WL9e66yp1ihzngg2iMiJei66wrmoVeLtbbAVRyIMFD1 lr6n+vUDvIZYlUwjdH1Z9d/Mo3uS4WygQk8pBFy/3/Btjmum23sh67JTJTC4 aOqmDV8fMlZ4btx1nYqVFlSqbgo98+CkHMN95KnE+T+8QjFbcWT/i2isMqgQ OY14ozTQuvTRUgcyN7wG/wggyTzIDiPCnbZKJJ3Lvg2hymdBbWhFOjUYX4MY 0nARGBfnSSlcYFwG/tjIe6ej1bE8o9kMRD/V1F/P8VXpAz6FlGl1Ii56Iixz usvtd6FfRXnziWBPbPmgIGfLxjmodcWAmD40HKVgLoyBHoW3x5MmTfelJ5rT YFHRqs1Sc0dPqmpH608d+8e+Bn13wgc0s6fYNQTjXnK31Scc/SSnNcTyEsvO UHElOuemp8hnrQnrGhVrB9wZWMJuOcNvi22Ccdkji1mjGB9Onsbj1TdVGsHi 39dQhODlKjC+pimqDQaaodMhFpcY8H9jETl/xxCdvaa2eSCY1NpEkfYWBngC CmJsWtVuNflhCwkiUJKVK1rr+YOBx1xd2HkWJxjcydb75weRIOJmVDHOAXd8 ltfvdAWXb7au3dhhd1ofnFuMZmFkZX2C4rRaKht+gKYcC/lhRd7iTA4j8JsO twW7o2/mLSZDTcymXdZT/DyJp9SurBit4QLdeQc4axoiUycmgX5djJqqyN+l xsH83SAOspBPgl6XMuRHdLyKdC/64mvF2/C8PmjXD9VV/qk0xgwYdLcyLlD7 eJhd5litn4ioxCEokQmTb7DtBBHZYkKb8wyr9MLteUfpg6SRnLpKcuYRYpIi MdWqJjlDytwFSLVqEoRAo/HwzL/ekswpJ3yM2cHZ6vubgdGQKI5zhBo0jzYK vSgdr6nC5pACmJuDbP1aRzw3JSRjOk1U91IQ7/JqBvMKRJPv0rN9YGbTJC1b o6jOHl2s4IpIOSAXqNxDnCXqdqM6S3sk2FcDNva4hNrdA7mbL5TDqZsxh05q NIQDEaE83XdCVU60USCmCjju/dAb/+EfqSYnjWf+Zebfutt9c3nsaksbQSp7 09kE8qnPjJ0wH56gHdfcszwInPXTwxHHW774Y4EKKpqZtl004VUkbAH2SC4F MMJvaHMzXq7rSvMP+x/96rrRhIL3At3NfIsIWjwM82go/wvHKm8mDCENMSUw IwYJKoZIhvcNAQkVMRYEFFd+Wbmul+GY8fpXGfcPZKp7IU20MDEwITAJBgUr DgMCGgUABBTh91DEvniLjCaN8lVBeRIN2l/ZewQIb6KxlvnE9hUCAggA """ /// This is a base64-PKCS12 file that contains samplePemCert and /// samplePemKey as the main cert and key, and then extra certs /// multiSanCert, multiCNCert, noCNCert, and unicodeCNCert. The /// passphrase is "thisisagreatpassword". let base64EncodedComplexP12 = """ MIIdiQIBAzCCHU8GCSqGSIb3DQEHAaCCHUAEgh08MIIdODCCE28GCSqGSIb3 DQEHBqCCE2AwghNcAgEAMIITVQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYw DgQI1FOPOf48OAgCAggAgIITKOaK3r7p3wDsGDjbO5yBp+MA7UelAOiUNilU cNcoJtNsGJmKL+MRvhD8YekoQ9lUg1T4CFmWmgIRVcLDBmOlOScv6/VFTl2T AgRO6cqW7QyZBi13G5wJhvIPjb+zdhZMhBN1R6145oFnDinFi65yPeui2ohn s+GKH0r2sJm4hMu/O8YejtItwQJwbC4Vzasa/pR6XFsXw/MNzzIql7ofziV+ LuiZlE2z+9Ulzxn6Zaqg0B5wDa15iKk3bPe4fpAGZUu656GYYrLZMKBC39xL xEhzu7H5P/AF5ilIbMnVwAeRoeh5vDgs28j7gGXgklTBYJukWKAqE8bOWabd bddwDTCRb+ifvDFnYDSeEXLkmaA12jLEx8NF0MsUoptCq81fGbXkbUPvyU5l YrL1BRnisU4KnDqBDSvu5RLpOUNSq2RDfQvusSx7nXwfavM/RYHSuPEM0kQK XgzOU//lfXmuITnp50hy4CzYeaVhrAlTbfY+98DvKmI/go0otVNFbh9n99rU nwXb6tUduYlfxE2HRWaPBrA5SYF+hQJomMNqj7odeELlq6MAOqa1vzKdo270 zXAp2lpRp5jmC7RXAJV2GKoIvdfDYBc6vci2tsyha9KqNJWZowHsStLA5fyE 3IO3cz5reW9izGdNn4pUAsoh8aAA6MS7cnpPqUUk3vIaAsNxcFdWk7JBDDWA KFVYVQRPOLRvxVyNs/wp3ySda2Dz48aoH6tCvTg6m15iRN4AKh82SBVT13ko jrHlIfTNvcj16ZDzXn6nQH/jKvkLhLEkBiav7+UIKzx2WvnjqBisBVRBSzqB 84diJrK1jHKX8yW5A+RshMcX74H2oS4yddxRAnoCRLsSu+rGoGnDw5a0kgs7 070wN/gE9Nl92pfy8h5G36Gzy9jU3XpfINehCc2XRVGQoCDo/bLvHMnnXzw9 Chyr2/CEPDc2x/l8B22ptoBAU1qFxjq+wGP2ysbmKgQ85LVsLXOJpiInG1g+ nuWBCi+xRRs3TsoTTGOF7yXV6+Tq9J0uorQehaLkAud0NKhwvuZIasWtrzWE QnSQ+rc9u4HkuoIJHnAnBOIIMtcRutiuqSKdUCV8XrpuLeCDBpb0FBWPYQRT bp/fN1rtjwTLoyYi+pfFX2cLExnVSx6eJ79Vi7BTCF/3lx9/CeA/jWgNkBVv +F592quY9oKfg/46aBL/XMMb8H2n1MfBrAXH/2A5cM13BY98P1GzeQ6t7yM5 sR3/Qk1fu4151tsf3unsESIFjr2GiPZVjD5UWlOgCKViIP1UWKAOvPptXac/ IAsQeiMKLQqDAC52pcf74o/o2FJrn31qyaMh4373ItCES5yNGEoRYPRWbydG mNYYOmCjfH3xvmH5j1YGhmppO+psO9B3K42UYoZkpzuFFtRRZnTxLP22SjTO Wwt6x8MuDOetqL85F9Dw/ph/l5Ec1iNFQl7uDczLEKakVFOYTzSstbUWNvAl ZfzjdWb2bSLKjRSVC4T3CK/MqVD8VKuquag167yEJzrgiIGfJ5FX+NRPg/Po mzy2YNivMbKXGFet3ZzYqjBvwGBjFNTmRf9jmWtSZ+TOoHaQu6aPLJzdjmsx AyyMgezr0lhdDdVjlcq3o9mBW3nENHChDPQ76h+NqqwE9WkVX+c8YAG/BvWM NXPYc2cOZhuhPHXfjsPIeyUS3xxuDNLWVihu/zpPNvcZFV+14EC52elwtTGI LRb8ugunBynyRx+YWVY90sGos8U+GLS8K1uWtnC4m9pIXzvTn3kGruxipQRs R2qfwZqaVDQoGwIzKzVxao7HDPKhHk5QApmWRMpKTWrqaLaqBpVD1aoB9Z8U mOEOWwP8kMbm2xD8vk9V5OWnqkmJyFNHjuzAohmGN7KcyHICDOYhbkmPxCxV Qgytc0QX6pEzxHKSRjeCYS3KoqLcLhJEC+5Ou3tdLmfk2WY8H27fuXydqXVY x6UMVPtpKH+ZsJFl3B7PS4qGjoeqI3OuMpeLry6DYVZvaSPPKOen8Ca4qA8H L5xvGMSHmOKxXMqOm19E0xk8DKgRQUu+2hCgzKqV9re/h2IE+cE2H6NXztvf rH5U3ZI6v+7FetXy0DAVwyhqqpoV35lqMFRuvchSXsJ75Q0oQ9786PK9U6hl VsVqVW6pY+T/HUHWv/bLSGIyl7jla3ElVTpRRapQ19ZsVivVpgLYi05EKv4U +/SCnaVCYuqt3xj58HTW6pvqFsxemJyn1qgFVTxqEJ+fNMWopnTKtAA41ch6 OKO9p9KE5cT4YNR5VcpUpPpSUuB5JG9djAn7dZAsn2XiiNmTv8yBXqwuO3y8 lkn5bau1LWvp8vMWCV7zmQy+wcfB+B5zYrUm9hYbLX+GM22hGdVuOxktRxtp aVdQwIMVwh2/4dnGz5VAOaJgpcnrpNLlIOMhIAgCeeqn4RnhJhmSgRqr4PBi G5LnV4gV8l/K5aG0ba03YtiEdvS4W09wFr2AwSXxUV9vhbsufNS0HxioQEmp xZ1EJpahpUiGmFHtItv0hp4Pba+4/ZvpgbP3BtaVXQsYDdpQ/0Tt6S9zwrhY fcAP58fFw3QyjxUoPNhXtcA+NoEdq77zCuByJnS9CWDWMVcpYRqE5eGpCQcT y32kMnRc1DUcOHtK9WzuJVX/cQ0Rmy56hhNSNX577ceAHbfi3WcBUoXERvMZ 1v34EQ5l88fmBn7gwieNfOxKacLad8mYoLyA8KWMKwwCmDVrwtqpcv78bs1Y 7sMt4sJWm85/87AaacuaZyufhZ8+8vju3QvC+aQHPJmQ+U3EERgvD/hx9vaf lPNHBTu0TP+gie+bHpm69eJ0irtAUIt2A74fsWIhu7PLNIAoT2sFcMpLa4Hi NbM5c9h+Q8cibXFjF0FLvDb0kEHW7uBubNHHPbHVcqkVPkDJ7PSMEjrFK7OF k5wHIaiWm1EacpH1CDv+P1WshulwV0HReY3RyboGtQrH6n7ph/bRiWSQaPYC N0L5UbHiiN+4VE96oP/Or6gvCMkEqDza7sv2mQOxNW+2Ntz6GJEkucwq2UMC QWWUcKEW866bbRVd2ceVDIOZE7DhY+FSRiNYoWvIVAPYKV9UC8V6wdojvLww Y/TZ/4XXqKmNEZWGnVrIDV/eI+AhnD0KKNWwapyuQWgNwikAashL48jk8PCu k6oS5z6PBXpIs8oKsBJcONAngHKxCotVzUhaiyiwjvYCDPX/rYuJZ3ABz1sd 4O3DLkn8Ep0FpKuyGtOhaTeqtmVRM7DHH2I4qB96H7ljP7AooILXj3o1Jq9Q tQ4xp9Cyb31hQg+G5ERGPDgtLSq7BtJUyoicHO6mEJiThhP4m/99miuk/MTI yK0V0iwxOfGOjmjC76+mypNd4fEbPd/SWr+sglBdJKh5ALipYugEfBWHZo4l Np7v4I+FuuiuCNXuQNPEi7qe2b9n+jKKtWnviyAgtgmx0JxLwAfvRuEHX0qA 8JhpG4yiKCtg/oEsvFxXCLVCKd9ywCCwq39d0CnN+PFji0pCAp5pai3WlibA 5lloadn6cSGH9BzU0Nkz5e7uQmrRkllkkORto7SwpmimYmU5xQijqZIlO/7N 8DzrnZcALrhojViHUrCjgigpLsMNKia/+tqMj07PXuh/lh7MM8qVs3L+XATi I+9AMcjXI/wLQxyItyHsg0gAQB0rL/gTW1A/TZratZr8FdUwp/mn9UoLeyAt Wp0kNgiCPhTIpNyQyEBJAlPJSPqKrnGBPga5UZZY+MUJf1JqBt28p29lMnm8 3x7kdPqBaBVe1xmJGoAOV4vBK30M///L+IorPGNEilixkhkz/YHEZwAE2NOs Ciq7Ikr3vodhwKclWyH0v5y7C9F+1q8U+61/YcUCJVcy/jEMWenS47ejYKXu VCZ23RkwSCg6MceQ3hLFjvsYJyXJJRzCEIdQUXDTetyAmZM2WQYQVYlFk98C ZQktu2G7OJM9W5FnsuI/3QmMyBFjnE3La1vOtf9RwISdwqWH8tjpaY6aRV0X +Ict0B6BJLslFxWILVg2t6S8hRTEXryzfh1teHkdFQbJ3FzhvQOaW5AJ8GWD l9BWCLwSd4IyQ4XqbdTyRFJ1kqfYhyf9ViChZNMnHVr1vyLOpaFw8yzlIkUq dR23PSTHrh4uFTw8XdNpuiTGW1Tvdg1ajV+rukMVbJK/KYWRNp1a13+ElI/g 52m4Ha+Yl9lkqZNPAI/hYxfHqmoiiAPVvgjtZdY7Fw3xCTdVuuw9kAnz5uOk yvmTwPXOH3EX3Y9TM5UvBL+kN3yH3e2Uycsie5kqNcUVfsbP95K4a6qIcFtk lgDk9k6hqFWuQ+xyfgQL9AJv1QDM0Rq0/0+5svYs0bloPXjIWv4w1ftxiZh5 lvikKDiW8f0ia9eUpgm9wpkBRz3flnhHN3EbAKu204hXBdpHrGPF4hde4iCR 5h46QTflT4+uATgsjKeDRNHSlJotylXi9fDIkF+5FvbAI4NuGbNuYVGBO5Dc RZcE8laiol9BA8WqMy8iznZW9+GzTdH6I8VyNsllVl2Bn0/zYPsOgKgJUd4y 2GsrU98I0Z+VH+o2uvdsHu5DR5mNm0D0p8o6jrOOTp8WM2vdcbDrO9LX5ohP Vhb5Ag5rvmeUcdv6nA1jwV8VpKbmuO5pqhZRDuUBG9zBKwmkk9INVpjxc5AP ONKaSwhhpEi1x0tQSBWrZr3DTVmtrGi48klWIAtOCVTMBXnS85JN2HqQVgu0 jooki5k8XU38dPsbJ4ZW+Z4l85DDvmZIDUOsdXtGDMPYBn/ZEVTFqGNrRvDd fo+viaUZvMZsyAsRItupapd64arkH0o6yKijRSB0XlMOmkbTCk+bk22XGjz3 iWaoXJlssQlOjhje6A/OB17Rl1nCnJXmI/YPfuC99skgkW1AGqBzLYYJxxiJ OP4qHUabr563AdLEs2n6zoBff2hufTFcBbIw9xEyiiYnpIJHC+ADHCqQuJnw 6GGEp2Tv5nqdAhR1jBqnr9TlH1GUC3c2mEC/t9C/slJtY/VYg/JvNN/IFBzz Ppu0JjUdkZyoPl4SD7Yqu8zarcUpxGRSSUKwKSQNW6G4U1lkqzrXogUFSc7o Cp8AlTntRVf8mKTKfunqw1CrLQ0FeeDVtlsrYtQZdYVpMoMP3Ckpb3HRazhk OC8Dwq+9LaZRJxqQSnufhYQneqHa6wdrWzIpLyX74+gCGR5Bqi9FEc0+B6Ot NyNbztac+HxuPmj+VrmCr8Dbekau2ViJ3W7nE2FBzgwc2+pSddXVIRs59IUa QnJUa4F5YB0WgwAta2MoJ+fNYk+dV13rrwcIpZbVHemS7hYOm5pP6pkdb6Al LDoELX0M1xjxMvMByG4uEZcI+brza0BKp+rMDcf8O24komjO2apEltLZpONG 5iDjtKacrvGM4yYwAHJgY2fmg26HYlwv94gM3JS+mL7m+ossM4GjLJSBKnzK wSIhwITtd3LMTlVFr58R8ytZwn8JKmsyh+7rBSmpq1jE5Jx4pyz9sxFtidzQ q44fkAMgoeuqLwo3WOsjdwpOXeFrrYLy0I7lQ0NQwCU2dTy+JweOJy/rjClx GUzzsCgXd18hICO47QeA1hY+pxpBZQnachzBH47VN03Um2wZaK+EJbpIaKJ+ O/ehnzQju2CbStW1EysrD9G0/MHrRgHInOKvIsnJEIwlksEAZNzEiM+v3Y+H hgFWmEkmrePLQBgw9oS1wg2pCJzFarPxITHO+E7E1xAmfUXflvgooQbnOM0V BDoF1Dpxy3yhxkqXPaedKWThhhNlcIlmdpl1W575kY1RBrztQo2HCFeXJIQi MVSYQ0+tGwXyml1uZTZ8dlX+8wC7BN0GBiraYBw4ahMki+TsfPPsPzrygSsu cExavBjez5WsEzAusw+0mBjpC2lR6ynTwI5z/4XpU/sQYZermAPhGAsvLpT3 nhMAgB9KNO9CQQZYgwzIA5QrlY6DC4hlBPSa/ICu0XMOA4c9amhcgXgFJEUj 8uT1sfTcFDdRsIPqsszinEcGvgPTaw5XcJIS3owN8Rp7DjFO3gywPeVV788k NGun8cXF6Qea5neHeKavM91SGYpeyHR75ZS+k/ErZJ3rJ/MBrlNy9aTM8M/a z6Z5IoXddeodf9Ugj3F8HPZjVfFECX+Hq7kd/32BEAHVsctDLjBkPUTOI3oF cqFuFQE5Oz04wnF587bodl4qKUv2ghaBkpINl1oJkr0GKleN4Uma8P9dOmD7 1oy2YjQytQ2rmpUIJFWxkg9ffZqqawlzKqQn6kIAGKVKx5AtSuDasSQSo2u+ w6YL4I042QNndjiZI4FHxmH72nw8Td19ljiy9a54kCISfom4nBhbb2+I4saX 15kbrWLn4lbbmxF9cSNzDIWQoC0uRqFHbbqvfeamvNg3T8IwBd1D4VAoKgW9 Z6CGVQGUxHVN0NBk0Y0Z/cTX8uLe9A4tHmAxqOcFQTmegFW7HY2YbNCU99/o +BphRHp7Uw8OLpisKzi7/UZatazpPnV5K1kNisWcy20q835zeU1x/oJQtejL OIrTK5p1aa6PnQr83fd6fN05MIIJwQYJKoZIhvcNAQcBoIIJsgSCCa4wggmq MIIJpgYLKoZIhvcNAQwKAQKgggluMIIJajAcBgoqhkiG9w0BDAEDMA4ECDje zkPin9gMAgIIAASCCUhJe0uJLIvZERU7JtQgspaBeKRKDH71glq9ZOioDq5O B35+xV3F9v5RvLKveo3cRi79FaR/J2RRZVx29guL8orL+bPwqdgSAGquynL9 A/kFbgP5OYKkaQIGldQ/6X5E2BQ/zLPE9Pj+Dj7ERNfYnWL9n2jXLrEe3ZU4 8w2Gko0KxE36rwxwgxAkNnxO3odGMt2FIkktpxOWpkI8sECzxf5UV1yoYhSx T8sxXcwFO+bIH85TolHTCSgBoM39Z69BZmlCIzRp5+h0TiYnacNwOiY5ugaI tRczyCbY0zXuCV38gAgMqQpfrNKpsv0zC5ro99B45/GiCBe0Kc8wP62xlfDW AzPVZbp0Jv8DB7YOoOn7nXbHMrg07wMmRcbEKk/1nLM0ZoULTWY1lLD0UPtQ YvNunuD6Qgzx6rRlMu6vk2LoXfJ84wXe86pgnDPwmebnVYLwVObrCi5GXQXc LRDNxUoNRoQJk2qJyugFjNfQKKWCTmRxpunlu7HwMgLKkbEjvLDmMjZ7UA8m daNfzXfzKimOzFUUGSicf0SHV02dKWFbYo/P+iIqTJX/Vlpb21tFkDUU6UK3 oLGInUvnHTHsKfDsCACN9dTkGaHVlfQKgg4AY+GXXd9c+gyS4Ahd7hQmjqs9 B5WJfKqRZ/k6XUTqfOF8SIWjrivR/ymMskZeJklCQ9btXwFKePX1PulZ9CI8 NSZxXl5zsOQjsx+zxFzhGCiW1sNQTAkiUEbMBE51v5lOXosk9SSQ2LYoIeIj sc33GQAVxE53rrsNF4XxVTKXS8f94L86wERL5OXHQOGmw0s2lacJ7xITJPtl WqJpwnGqvp8izAfQepjkw+gf0QPTTImk04fDgu+1PcObZYlNFxUjQ6OEAymY B85Ts1fdQU6Pjq+bEo65vPb59+6Pa6K51WkT2FA+kVKm0FG2ACH+YB7iowgF cC8H3/KBKfeFL4MUCJECIxnKj9q4O1NHpW9ELgehTd+voR9Tfz5T8NYBlc/J omXXXsRC1CyNOe2eUmUnwv3w094uBDbt+hkKcjtHfFJ2qb1bRn8SC+EynXX+ Abrge3bgOly/YAhq5Dy2Gv8OblJTgXTyMWz3/U5kkd2wRXSUtmmsuBq90DO5 YKwNst5h/j48QwO1oz5IVpbFLOAOXYeOMo5nqzDcErgh7S39FcoHWegNF5ln EfCh9VgQN33TX0ycO6HaykHj9DGIWjgGLeDeic6Ot03cPWlb1zEVSypAjYxp GYWjNWTUMEnRLFIXF9tXcnLI6dFzi8XtSoOIWmWQfOdDhk5PtWkeRx8NIi6Q 2y0JoP0NMp/YA4Fst97CXEmxA9RkF6F/sdH3K39xCKbSKLBLvEvbjAS2Qnuf a+Hg4Oo1/c+og6oHlV8Z5czjx86Ccsha9O9359/Q1MvWYRucqEX8uJtZhP58 Fr0HmkYeczP1qpNd5rABSpl8a+sQAe/3dj0TANoCxN8E5sZN9MsbGy6u9PEm OMn/AqJ/ts9ya/dtHtDdx5cY0lIbKv/D4GJurbRY6nrhu5l7lFw2w5Ask5ib FJQ3Q/DsJig+i4eA3Hq/oiFvq198gQbjy15HcIUQEP/6hgvkR3sxpjGqxytr KxQXCOANxhzz8lyjHOqLiLDDnmOj3KRXdLBh6UO/iMa480DRHWnfr8I0J5a4 4a9GksPTtu/BSHqKs8Mr1p+VGjrZO6n6sAgs9/1+amo2lKQ4MriVzSvHPJzt xD0cxWdYwvnZG1KVgmVe2GvW83jFkH7MPwco1lNRS6QIKxcNC8wc4u8PNAej 51SbfARpuM2jVTJaECxarRt84OuShXYghwQsci20Tkdz9H/ZNVfV/v+JLC95 6iOyQr3MtfOXSUnSXL+I5cb3uq0Bvnv11rHQfLMuhLbPfon9sRasZHmKfvfI 3A0J6fcH0sEwOXBZTu4TT3Riuju2q0eZyBmhl+k8GmrDZnBh9eN8enBYljWD z6dMTpfp0cF0BTv47r1HqEsUOLwURQR2Q/eNxM6vk6hxYXleCqNmJv+l8Rqn BGLRCh8aIy9CHOP34dRA1Dwfsf4WVOrNkBuxBoAhSoHBEIJErJTq9INWrBp1 wJDH/sy7kEwj7sYBXIFeIx2r7LsAad4akuwdXEMbZVeguF2WPXNPYWQeb6R0 PVFKz5mMBIgJf7ZtQYtt24xTCeSZVeLX4I9IkVhglwy/63n+gniZwSq5X4er SAtlRL01cN+yTDlf4gE6RoYYwrdQITgo6NaAfSgvG1Za87u8aFKmR6C7m4aE F7yc1U6Krf2n7ufTqvH64DkPiDooZ0hsx3VateiKO1q8ljFpvZGkiHkk+vu4 1IgnoVJOL0iREBdSy0IIGIeVN9A6RtHfht+FrwoTqoGLeGMEXNW1H4YhHxjX lbu42EREs45eI3jBdly0BhapkEtvu5A1kg/Q1uZ7BwVhbFcMbcYxkWBjxam6 Wy6GOSOl8PA9IVndX/bUvIyEp+gsiZfNQDTaafuGJ6YKwMOubS3rXg5LXspz NzWmt1bAEGQXcV6iKHCbg9NfvDJG6Ka6oWtxTNtfiDBMpqkwfKh0qplhmh8T MQ8mgZmJYyWTaxqjgkny7HsX/1sbmr9w/uQMb3Z+oWfZfcgDsRsKc2l0jlGy zJUrqSNtTSQ2514gxt87UDYASFVuOfgD6NJ8z5T0uO3UunoHSN0nT5/rhrPJ 8dY3B17rTs0D2HwVoW/5W4hZULffNqQZIROUiB8ji3o8yTYqgGl9E6bOA9cF zDfRPLPvL7RZVxUa9cyfhbkmLN4zWLDsngYe694H5VL9FXPhDtCLK60kY3vi 5YsvOJ8O1nRsKvqial/KPy39TgK09qAbDkNFYZAS2SMQR5RvmZ9oeItCVK79 IRAk9VIbnSj7pDMwxfvM1Rt2fFUu1VtrOd4YS9KsLtVIvowbyXNCDf41VdVS f3q5IQ6Ud9TQLxMF78031jRBNTmw11mpM70X5qadkxr+edCCO+hGmT433Xxs t/HYgr5FUh6MV/b/0runUDbBo2PZu2fNutDFEEm8I1MrwrKgmcXNScgOMBJr eQjJ8bstzEijLoW57G530fHi1xhLj4HyKvCGGsGLxAQnmQZwd6yvz2Rme6+m tlF6DR1qBO0YnmtaXjZpoveQcFLDpn8kAf0YHEWTcGv+2ZiYF7I8Jpokv27a thgINn0xJTAjBgkqhkiG9w0BCRUxFgQUV35Zua6X4Zjx+lcZ9w9kqnshTbQw MTAhMAkGBSsOAwIaBQAEFC9mlQ2bgjJlBI2nmTqAAL/CTILuBAjTjTK3aRzy qQICCAA= """ let base64EncodedNoPassP12 = """ MIIQ2QIBAzCCEJ8GCSqGSIb3DQEHAaCCEJAEghCMMIIQiDCCBr8GCSqGSIb3 DQEHBqCCBrAwggasAgEAMIIGpQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYw DgQIyaU55MJEzbICAggAgIIGeD4Qa22wjWR/Teg4nOs0uZimzn+uprlEi05s B0fKwpz4Ths30avHBoWTGgSlfOG1SyCjeu8L1YOYoOpwOKKLV9cZOSFb7cdk OCSQe8QKP1QmosYIhPs79mGc26dOLYyXV7a1IvR4fDlYfHyWKmsOCMiphVJ9 8NHFtLzr8xs62Su1wx+4wYgJxRQIi1dAL74wzv+SZ6kw//B+K83q+kMIiAkU 6HElSfD7I/V0Rug1h52Pf5OorRmf92PtKXQgxlhzCH5HCpZqQjYZiHNUqC0A P7R5S7zjKdDvgjpjAgE4OFdVCTuqcUYUUwrxjnkFWebJ31jyZRtYul6ItvBi 8wthdS0OkOP/2tu8wZyqEHN5kMMUSxQsBAP/6mGvE/aHlPk8JqWK+vW0ynAj YMgfrkbmAes0YJ8v7aE47mkxTRU2UrfGO/9yXvGPSowm1syTgNtppXr/zYTV 4jucfvGJRX066wzZklHAzTuUl/PbJZzvV4twChX19bd88BUzD1YSl8whHeNW rhcfl8vplMji6SEdLa6Qp0v/xTK4OIX+3CkQ0Q16TvGZqHQalGsw5TzD6kos YMjM+ZslRb4FBtIOPO1HgplP7DXMDJX8tFW6PsHz+YEEHm9jmxQJtypa+DX2 C4ALBzwWMvs/SnkqVjXX1udTj0qWsRBsCFvFsGZamaLBMYSox59zpeLx/dWV 06Zvs0Zn/um0JcCst/GmGnsJOQ36xiSZoraEDrozxfhSuH5nqnw8b1Ja5Tu4 iKp6Am/DP74OYxXvbre+Hg0H022/NKqB1L2tT3RKMdJhZ9Y/gucYwU/t8XmU s7d3gmR4veEo+pXl36bwFJxNWg7Kda7dQL2OmhX4/Z74+yPdrwNXXoFhV4zC lMg+5Z4LThQ2jIYluyZelM6iaHb0j7sQD7OHxdbydhR9T7OTHMAwLQqVlPZd kXIptKjBQqGWU0UPhJDjmjd46ySQcKNbzOv6olc/NY1T0JrAjVRiG4A5NRpy OZOFXlOHEulILRRx+rc4LcpX+TmkN/zYE52I54wILqx7uPnf0LaIGWNy5cop PZ1fiONra6P07N/GJNBn3p7SA4LvAdFN+FRCsF8kNgyw9j59MgPNAlfA+VB7 WoLFUByYqneQbWkitwST5T6+prK4GXTwFJXu8RqHzV3aESZgmWUmgYPAWRQ1 Hmcro0T6iimQiRKuyI6D/fND6OGQ0cfVklk2s8g/r9lFGHrapt/4P3G4Q5aD MZm7ywuFSTOW/7p4C6GhwUofdo8hrjJ6A6oBUVD0dEzt/QZ76a8ee02FBdFL KfvYXUDeOO16oWb+YQdjF9F8yaZJDSF7fMIeKk+u9EGivmjjk90c3wEbBZq9 1OIGlE2Htw+mJLxRBn0UrLs4JFwuw/r9+IgRIv3K1bZDH4IbuFyRAstYvt0r ZiapyiyJLfn58WoODJXsneUxMYREaXcf7p8Nbl+4ibsS+V0vxgtHvA9UTpAb cuXmTbUdwKmRrvdk6NGDCTOPVERKyzYKvJNWF05LnvQi5PJWhR/4kXDAVVwk 9AnnN/QEC8qk8IaYpCoLY+6AUwgNPVOoAmD2+iaoeS4MxEediAHvIzbpO9uh Q7zDv6KZrd7gEVRHI6NpH21648NBmv0GlqLofmzMXdcLtrBIRbaSIfaIYreX PcfcEwfVBrOn4W6aBCgYMUmzXAeOdNKu3TSuX7wtGxNfrcjkCqzzvDFE7ODd zkkBCjVtMzk4r736+g7DVB8pwsVoPffzIVny3SPuf/gbUJq8oeUnuG6Q1dM9 BCaG7hBXNnJmvImn3hq0+oyv877v04XTsOQp9QiVp8ftLoQaBY6IyPMOOmSt tCfHzI9ayc6VBgwtV7iRwZTLqEsgKzObMfuu39Fx5n4JgPeHMkQJS/iI777z 7yLij9YwqkyjJ7B8wjnXLVs8mv6ZNs0a1RdIAcmSzDrkyzxzryLC/0vEBfe+ zFu3C01jOrbZzZJqYTquNu+yHXQ+wYGn9L7DBy0ymyAvcmpgtdfWW1qVPyWQ s33eeoZ/pbpPR0jaDTgPEbsS3+6umu7ulo+w8vFztmJgz+8jUHuLyuUxtd2I uoK4iNjZ883Og8LTRIoqTwEEe36iLH3h7OJceEP5adMBdq9Dhpm+9rBOSU8v ep8f45tJ2kvrPHJLLqQq06d3KS48vZaBX/1s5rA6RjCJfejO3NCVEVYbYoR2 qXHKAEbNkjy2yTCCCcEGCSqGSIb3DQEHAaCCCbIEggmuMIIJqjCCCaYGCyqG SIb3DQEMCgECoIIJbjCCCWowHAYKKoZIhvcNAQwBAzAOBAjdIC6abxVJ9wIC CAAEgglI0/hPzGIYeB+a2OHaH1zXHi3/mBlfKKd+QLdDdmAfd71TfXODLLN/ MEvjyT/5nboccbnE+hWqZCQXY6t+QtSZYPGdpJfVdWbPLlRcEWRMKFXhb0K4 /uw9k21k4gdXhyzUdUkXyopK9O2J3/UHifXRd7qkvUNga4tHrD1jJ6LSw5yI y1HU4wsV0TgHC3nMvjEJy/GG91IGqKRIx6ejbKAeVrsyBNWF0Y7yXnH0IUlV IQJK6JPKiGhPPqZtgAYTzSkT14gF9oQy3NhHQrDzrdPcF4QSi2ocqqzGfuBV 2D5hTnEA9wbRAF69l/5FlPsvTf9Rn+dO7zdUYm7oo0JZC/BWKwkCEdPwybSz OMTQJiuXPYDGm+qQm07HDndYceE8Bfsj9KX6oOwsxkZIcHumrx7qJZBd8jxm tmqRplhzBTiKUgDKYCtup4LwP2NftOgmuZ5RzAMj5tAV8dDR63/rhhfe6oiw qCprixvKMGvxDTAY7ARoruUGt6ziL7m8RqmW3Oqth0i3ZiWpX14KTGNo/DVG aqsqLkfZNpwvyK7TsKjabmocWJSZGbAlGsS77Z9nxleEPaO+pcvKvzXi3/Cv 57nresgGs7cpWxpE8EIWCHaE0eqGgZI1tPvPdzSLo/Qr73j4QQ9JtQWrsO2/ Fc4ksLwcobkNei5mpj7Ipj1DatzGM0ZFDVzKs8vfxbLRGt4jOXXJcTD5+nKK 6h6fYekGbaMhgHT2LKvLA/2/XHOxQnhWlIZqUAULdzgup/R2u94za5yAYBQy Wwx74JQFmdqqyUpdTjU5aVMOrjlgPXE96h4Q6mTa2qUXE28RNaJ+jZy03XNA wb1VtRCoQOMDDlGdcPY2TiwPNNrsQdM/nzq5AXqdQBP10zYPe1E4BEdd6pEq JJrvuwwHxEPHjqd2f0Z0Vgj8b5nRkwxAlJ2xVT+U7aISeqYaUf3bmLAP2ZAx pr2y81gLaOroLKDNwwqx9iMA3lugTAmNHzqZaYQjDmm1fsQOXyMkirnO3WYN WGV81xEq3BJ/Bjszd6Bt1g1lHO5LtdqwiAzAF9e/zYD0mOAZ1A4yLpgz+AOv 2SvngpFmy3JfzVctybzFt7kcuIRlI4xTQP8TJZ3QRsegmKYsAZkSFPiGS66Z JSwPng7KpDOlT2wmTdRJgak6Z1Zh52PQ2VdFkm18n0UAmjqo8u+REt4gzIps s+Wrt2waD920Z0JFBqBD58/RDXYSBsU/XIjxwpmClWsOh0mKMyDw4dO2fTBp JB0reL/0rsCXJL1JFKeM+iRQ8BDyRFsk6c+LDCNwCzBBwVDA1qADC7qSClyS hPAPAAxCpQpF/MYLhJG0QBPHG9bkkGMCYSKFZzEUXSnY63+e6ZxdUHcRKaU0 T8Ue0sEg3LlU3aAYvqBq+2/ILfNGI572zLpAE/8EW26YBZ+lFxKgUFMMM91x Hc8THk015pAd763ZG9sJEpdRtBKkoQ3/3A1sT1fe8xCRTfvLZpdb8RBxiaAC RTV0pXXspG8Va1YsOd9EIDPkRfkH/sRsi4UBO5zmgftBWdVn0qKwXuypCudt faFvoUEIc1z+qzCuMT3jPdj8hNIjacuOe1Lcpods2i5CTqP8Hraim1552PZY TNZsQ2aj7YtTXdoKP+KnpSPf6rrpAK7OcvKOuZHVKwbs6z+TqwGjmDT9/QbR vC+DVGgn2WY3BCRRqUQegY0LBJSrpJlVCmDQ1KfhKCkPyyCbHd3rIi5x6pty T7wp2EKplsXnn8hgdouKJX+24vV/i49DDEyC9eLpNO7WtDwQ0yHBbCael7fy 4CLoSMUptS9DWQPjXQ84qFdaBKgcw+ALtcVHfKmS77zp9qonS7zeGOanAOTL kGKHIVzyhb/cHYqCYE8ldtcGRWa9n4Ri3T6X1fZ83Bp/tzrXiA5uzAI15StY NQyewtou/OnDUX7weFnMMvNp7y34X2J7uIe6ujvTAHg0MFdqcoPB0bKst9iT IQdsWYLYMpBE0fgYlQ83uj081IPowz4FMORHrkU6sK62IViDg/rpYRkTY0E5 AJJ0fd9HK1VTo9qg8VWyh4n9YfOOU6U+g+DXehP+LW7cmQDmsIAFcJGK2wWk G6V3BJgjXV9OuVhC0/2hqV7EhXitQ4FUjjiEAPsrVl3lg4k0tHkn3RTyRfqy HLSgrxdc+YUXIBPx6jjasP6GF3I7j7w4HoEWNI++9NxDLMahKwQTftaAT5at N8JStJg8++VWd7ktPNEz3q7WAKYDFalpyW/EOFQR3l3phQKZEtlEmLGV0r0M NVLIJwUeEhYiFhoZvZThsBhFIU5EDsc1MWbmjZf+NiCVJB9OG6adl5jV6PEV VzsCC9UnlHENTimRocRUzBp/85Pp7IHV10w6r0LSFyQp70OSfsEUR5CK6xTO UfMXOJvrG1cyGyu8I3vK9MqCEdDiXjhhuExI7a5syRAdF/qQ2OYBol57oE5z betp7Ph4btu76Ub43E6nnzqHB9ey5EzXxxNwaqvtlWV405Ux8annKuaiXTlv T69S580zYJSWDAtRhlND3IBMvAUxdTU889ZnhXIjvL/Ads1Fjh1lEkWZsrtI UeMAP2TiskPHNgj3Xl9yqxozYdqjRHLT0PIBmRPcaABGCtXeoX5X4wb0kFnP BDg9Gyxb8YAXXiKzobDOCSDBZK5P1F72y3znQG/Y/xJbKp353WNSDPXZwpvy NfQLotdq+Amt3tfv9OA2hi/719oUtZrIaHTerr2MBagp1SIztCoTQmmfdlyn eHUHi7B35vy24eAGGbSuQMnQyf7+DXnicPmptn3Ltw7hmiEIPe4UdyrrPHdT mpjB4JGzhlRg8s+xMI5zIdOfo+MgA+Ars2zYIoAR2B5dUbuMRU9IoiqdH0Xq 8z2F9MOvublsMlWbtm824Wn1KCFNTA2waVRPo2++m7yzdL8bLpVqdOmAf6UP Qp+RqgixT3VMIz0qORtkahGn8ebOrsVILlf5t8IACVbL37gejABhmayWBQDr 9Zf6dByTW/2zEu6vOkLasQBfeMQBEhOTT8BfOUH+m/XVBtg/vEmM/7STTdrj KzeXQaM+HR3n2bRA6Xi+9lwBnHTm+V1aCsFGKzI7yPx1PJYm5D8QgmFJmjnh rpYLm4HSbzLXTmbkl5Svvy4f1Y92mJdCtheR1oRa5jz7hy3gY99FXxc8MSUw IwYJKoZIhvcNAQkVMRYEFFd+Wbmul+GY8fpXGfcPZKp7IU20MDEwITAJBgUr DgMCGgUABBS/Klvbu+vi4seUykaXDZGkkw73yQQIqCWkicXrRPICAggA """ var simpleP12: [UInt8] { Array(Data(base64Encoded: base64EncodedSimpleP12, options: .ignoreUnknownCharacters)!) } var complexP12: [UInt8] { Array(Data(base64Encoded: base64EncodedComplexP12, options: .ignoreUnknownCharacters)!) } var noPassP12: [UInt8] { Array(Data(base64Encoded: base64EncodedNoPassP12, options: .ignoreUnknownCharacters)!) } class SSLPKCS12BundleTest: XCTestCase { static func withSimpleFilePath(_ body: (String) throws -> Void) throws { let path = try dumpToFile( data: Data( base64Encoded: base64EncodedSimpleP12, options: .ignoreUnknownCharacters )! ) defer { unlink(path) } return try body(path) } static func withComplexFilePath(_ body: (String) throws -> Void) throws { let path = try dumpToFile( data: Data( base64Encoded: base64EncodedComplexP12, options: .ignoreUnknownCharacters )! ) defer { unlink(path) } return try body(path) } static func withNoPasswordFilePath(_ body: (String) throws -> Void) throws { let path = try dumpToFile( data: Data( base64Encoded: base64EncodedNoPassP12, options: .ignoreUnknownCharacters )! ) defer { unlink(path) } return try body(path) } func testDecodingSimpleP12FromMemory() throws { let p12Bundle = try NIOSSLPKCS12Bundle(buffer: simpleP12, passphrase: "thisisagreatpassword".utf8) let expectedKey = try NIOSSLPrivateKey(bytes: Array(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: Array(samplePemCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert]) } func testDecodingComplexP12FromMemory() throws { let p12Bundle = try NIOSSLPKCS12Bundle(buffer: complexP12, passphrase: "thisisagreatpassword".utf8) let expectedKey = try NIOSSLPrivateKey(bytes: Array(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: Array(samplePemCert.utf8), format: .pem) let caOne = try NIOSSLCertificate(bytes: Array(multiSanCert.utf8), format: .pem) let caTwo = try NIOSSLCertificate(bytes: Array(multiCNCert.utf8), format: .pem) let caThree = try NIOSSLCertificate(bytes: Array(noCNCert.utf8), format: .pem) let caFour = try NIOSSLCertificate(bytes: Array(unicodeCNCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert, caOne, caTwo, caThree, caFour]) } func testDecodingSimpleP12FromMemoryWithoutPassphrase() throws { let p12Bundle = try NIOSSLPKCS12Bundle(buffer: noPassP12) let expectedKey = try NIOSSLPrivateKey(bytes: Array(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: Array(samplePemCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert]) } func testDecodingSimpleP12FromFile() throws { try Self.withSimpleFilePath { simpleFilePath in let p12Bundle = try NIOSSLPKCS12Bundle( file: simpleFilePath, passphrase: "thisisagreatpassword".utf8 ) let expectedKey = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert]) } } func testDecodingComplexP12FromFile() throws { try Self.withComplexFilePath { complexFilePath in let p12Bundle = try NIOSSLPKCS12Bundle( file: complexFilePath, passphrase: "thisisagreatpassword".utf8 ) let expectedKey = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) let caOne = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let caTwo = try NIOSSLCertificate(bytes: .init(multiCNCert.utf8), format: .pem) let caThree = try NIOSSLCertificate(bytes: .init(noCNCert.utf8), format: .pem) let caFour = try NIOSSLCertificate(bytes: .init(unicodeCNCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert, caOne, caTwo, caThree, caFour]) } } func testDecodingSimpleP12FromFileWithoutPassphrase() throws { try Self.withNoPasswordFilePath { noPassFilePath in let p12Bundle = try NIOSSLPKCS12Bundle(file: noPassFilePath) let expectedKey = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let expectedCert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) XCTAssertEqual(p12Bundle.privateKey, expectedKey) XCTAssertEqual(p12Bundle.certificateChain, [expectedCert]) } } func testDecodingNonExistentPKCS12File() throws { XCTAssertThrowsError(try NIOSSLPKCS12Bundle(file: "/nonexistent/path")) { error in XCTAssertEqual(ENOENT, (error as? IOError).map { $0.errnoCode }) } } func testEquatableAndHashable() throws { let bundle1_a = try NIOSSLPKCS12Bundle(buffer: simpleP12, passphrase: "thisisagreatpassword".utf8) let bundle1_b = try NIOSSLPKCS12Bundle(buffer: simpleP12, passphrase: "thisisagreatpassword".utf8) let bundle2 = try NIOSSLPKCS12Bundle(buffer: complexP12, passphrase: "thisisagreatpassword".utf8) XCTAssertEqual(bundle1_a, bundle1_a) XCTAssertEqual(bundle1_a, bundle1_b) XCTAssertNotEqual(bundle1_a, bundle2) let set = Set([bundle1_a, bundle1_b, bundle2]) XCTAssertEqual(set.count, 2) XCTAssertTrue(set.contains(bundle1_a)) XCTAssertTrue(set.contains(bundle1_b)) XCTAssertTrue(set.contains(bundle2)) } func testMakePKCS12() throws { let privateKey = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let mainCert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) let caOne = try NIOSSLCertificate(bytes: .init(multiSanCert.utf8), format: .pem) let caTwo = try NIOSSLCertificate(bytes: .init(multiCNCert.utf8), format: .pem) let caThree = try NIOSSLCertificate(bytes: .init(noCNCert.utf8), format: .pem) let caFour = try NIOSSLCertificate(bytes: .init(unicodeCNCert.utf8), format: .pem) let certificates = [mainCert, caOne, caTwo, caThree, caFour] // Create a PKCS#12... let bundle = NIOSSLPKCS12Bundle( certificateChain: certificates, privateKey: privateKey ) let pkcs12 = try bundle.serialize(passphrase: "thisisagreatpassword".utf8) // And then decode it into a NIOSSLPKCS12Bundle let decoded = try NIOSSLPKCS12Bundle(buffer: pkcs12, passphrase: "thisisagreatpassword".utf8) // Make sure everything is there XCTAssertEqual(decoded.privateKey, privateKey) XCTAssertEqual(decoded.certificateChain, certificates) } func testMakePKCS12_IncorrectPassphrase() throws { let privateKey = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let mainCert = try NIOSSLCertificate(bytes: .init(samplePemCert.utf8), format: .pem) // Create a PKCS#12... let bundle = NIOSSLPKCS12Bundle( certificateChain: [mainCert], privateKey: privateKey ) let pkcs12 = try bundle.serialize(passphrase: "thisisagreatpassword".utf8) // And then try decoding it into a NIOSSLPKCS12Bundle, but with the wrong passphrase XCTAssertThrowsError( try NIOSSLPKCS12Bundle( buffer: pkcs12, passphrase: "thisisagreatpasswordbutnottherightone".utf8 ) ) { error in XCTAssertNotNil(error as? BoringSSLError) } } } ================================================ FILE: Tests/NIOSSLTests/SSLPrivateKeyTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import Foundation import NIOCore import XCTest @testable import NIOSSL class SSLPrivateKeyTest: XCTestCase { static let dynamicallyGeneratedKey = generateSelfSignedCert().1 static func withPEMKeyFile(_ body: (String) throws -> Void) throws { let path = try dumpToFile( text: samplePemKey, fileExtension: ".pem" ) defer { unlink(path) } return try body(path) } static func withDERKeyFile(_ body: (String) throws -> Void) throws { let path = try dumpToFile( data: sampleDerKey ) defer { unlink(path) } return try body(path) } static func withPasswordPEMKeyFile(_ body: (String) throws -> Void) throws { let path = try dumpToFile( text: samplePemRSAEncryptedKey, fileExtension: ".pem" ) defer { unlink(path) } return try body(path) } static func withPasswordPKCS8PEMKeyFile(_ body: (String) throws -> Void) throws { let path = try dumpToFile( text: samplePKCS8PemPrivateKey ) defer { unlink(path) } return try body(path) } func testLoadingPemKeyFromFile() throws { try Self.withPEMKeyFile { pemKeyFilePath in let key1 = try NIOSSLPrivateKey(file: pemKeyFilePath, format: .pem) let key2 = try NIOSSLPrivateKey(file: pemKeyFilePath, format: .pem) XCTAssertEqual(key1, key2) XCTAssertNotEqual(key1, SSLPrivateKeyTest.dynamicallyGeneratedKey) } } func testLoadingDerKeyFromFile() throws { try Self.withDERKeyFile { derKeyFilePath in let key1 = try NIOSSLPrivateKey(file: derKeyFilePath, format: .der) let key2 = try NIOSSLPrivateKey(file: derKeyFilePath, format: .der) XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) XCTAssertNotEqual(key1, SSLPrivateKeyTest.dynamicallyGeneratedKey) XCTAssertNotEqual(key1.hashValue, SSLPrivateKeyTest.dynamicallyGeneratedKey.hashValue) } } func testDerAndPemAreIdentical() throws { try Self.withPEMKeyFile { pemKeyFilePath in try Self.withDERKeyFile { derKeyFilePath in let key1 = try NIOSSLPrivateKey(file: pemKeyFilePath, format: .pem) let key2 = try NIOSSLPrivateKey(file: derKeyFilePath, format: .der) XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } } } func testLoadingPemKeyFromMemory() throws { let key1 = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let key2 = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } func testLoadingDerKeyFromMemory() throws { let keyBytes = [UInt8](sampleDerKey) let key1 = try NIOSSLPrivateKey(bytes: keyBytes, format: .der) let key2 = try NIOSSLPrivateKey(bytes: keyBytes, format: .der) XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } func testLoadingGibberishFromMemoryAsPemFails() throws { let keyBytes: [UInt8] = [1, 2, 3] XCTAssertThrowsError(try NIOSSLPrivateKey(bytes: keyBytes, format: .pem)) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testLoadingGibberishFromMemoryAsDerFails() throws { let keyBytes: [UInt8] = [1, 2, 3] XCTAssertThrowsError(try NIOSSLPrivateKey(bytes: keyBytes, format: .der)) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testLoadingGibberishFromFileAsPemFails() throws { let tempFile = try dumpToFile(text: "hello") defer { _ = tempFile.withCString { unlink($0) } } XCTAssertThrowsError(try NIOSSLPrivateKey(file: tempFile, format: .pem)) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testLoadingGibberishFromFileAsDerFails() throws { let tempFile = try dumpToFile(text: "hello") defer { _ = tempFile.withCString { unlink($0) } } XCTAssertThrowsError(try NIOSSLPrivateKey(file: tempFile, format: .der)) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testLoadingNonexistentFileAsPem() throws { XCTAssertThrowsError(try NIOSSLPrivateKey(file: "/nonexistent/path", format: .pem)) { error in XCTAssertEqual(ENOENT, (error as? IOError).map { $0.errnoCode }) } } func testLoadingNonexistentFileAsDer() throws { XCTAssertThrowsError(try NIOSSLPrivateKey(file: "/nonexistent/path", format: .der)) { error in XCTAssertEqual(ENOENT, (error as? IOError).map { $0.errnoCode }) } } func testLoadingNonexistentFileAsPemWithPassphrase() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(file: "/nonexistent/path", format: .pem) { (_: NIOSSLPassphraseSetter<[UInt8]>) in XCTFail("Should not be called") } ) { error in XCTAssertEqual(ENOENT, (error as? IOError).map { $0.errnoCode }) } } func testLoadingNonexistentFileAsDerWithPassphrase() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(file: "/nonexistent/path", format: .der) { (_: NIOSSLPassphraseSetter<[UInt8]>) in XCTFail("Should not be called") } ) { error in XCTAssertEqual(ENOENT, (error as? IOError).map { $0.errnoCode }) } } func testLoadingEncryptedRSAKeyFromMemory() throws { let key1 = try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { closure in closure("thisisagreatpassword".utf8) } let key2 = try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { closure in closure("thisisagreatpassword".utf8) } XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } func testLoadingEncryptedRSAPKCS8KeyFromMemory() throws { let key1 = try NIOSSLPrivateKey(bytes: .init(samplePKCS8PemPrivateKey.utf8), format: .pem) { closure in closure("thisisagreatpassword".utf8) } let key2 = try NIOSSLPrivateKey(bytes: .init(samplePKCS8PemPrivateKey.utf8), format: .pem) { closure in closure("thisisagreatpassword".utf8) } XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } func testLoadingEncryptedRSAKeyFromFile() throws { try Self.withPasswordPEMKeyFile { passwordPemKeyFilePath in let key1 = try NIOSSLPrivateKey(file: passwordPemKeyFilePath, format: .pem) { closure in closure("thisisagreatpassword".utf8) } let key2 = try NIOSSLPrivateKey(file: passwordPemKeyFilePath, format: .pem) { closure in closure("thisisagreatpassword".utf8) } XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } } func testLoadingEncryptedRSAPKCS8KeyFromFile() throws { try Self.withPasswordPKCS8PEMKeyFile { passwordPKCS8PemKeyFilePath in let key1 = try NIOSSLPrivateKey(file: passwordPKCS8PemKeyFilePath, format: .pem) { closure in closure("thisisagreatpassword".utf8) } let key2 = try NIOSSLPrivateKey(file: passwordPKCS8PemKeyFilePath, format: .pem) { closure in closure("thisisagreatpassword".utf8) } XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } } func testWildlyOverlongPassphraseRSAFromMemory() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { closure in closure(Array(repeating: UInt8(8), count: 1 << 16)) } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testWildlyOverlongPassphrasePKCS8FromMemory() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePKCS8PemPrivateKey.utf8), format: .pem) { closure in closure(Array(repeating: UInt8(8), count: 1 << 16)) } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testWildlyOverlongPassphraseRSAFromFile() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { closure in closure(Array(repeating: UInt8(8), count: 1 << 16)) } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testWildlyOverlongPassphrasePKCS8FromFile() throws { XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePKCS8PemPrivateKey.utf8), format: .pem) { closure in closure(Array(repeating: UInt8(8), count: 1 << 16)) } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testThrowingPassphraseCallback() throws { enum MyError: Error { case error } XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { (_: NIOSSLPassphraseSetter<[UInt8]>) in throw MyError.error } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } func testWrongPassword() { XCTAssertThrowsError( try NIOSSLPrivateKey(bytes: .init(samplePemRSAEncryptedKey.utf8), format: .pem) { closure in closure("incorrect password".utf8) } ) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } @available(*, deprecated, message: "`.file` NIOSSLPrivateKeySource option deprecated") func testMissingPassword() throws { try Self.withPasswordPEMKeyFile { passwordPemKeyFilePath in let configuration = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .file(passwordPemKeyFilePath) ) XCTAssertThrowsError(try NIOSSLContext(configuration: configuration)) { error in XCTAssertEqual(.failedToLoadPrivateKey, error as? NIOSSLError) } } } func testECKeysWorkProperly() throws { let keyDerBytes = [UInt8](sampleECDerKey) let keyPemBytes = [UInt8](sampleECPemKey.utf8) let key1 = try assertNoThrowWithValue(NIOSSLPrivateKey(bytes: keyDerBytes, format: .der)) let key2 = try assertNoThrowWithValue(NIOSSLPrivateKey(bytes: keyPemBytes, format: .pem)) XCTAssertEqual(key1, key2) XCTAssertEqual(key1.hashValue, key2.hashValue) } func testECKeysArentEqualToRSAKeys() throws { let key1 = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let key2 = try NIOSSLPrivateKey(bytes: .init(sampleECPemKey.utf8), format: .pem) XCTAssertNotEqual(key1, key2) XCTAssertNotEqual(key1.hashValue, key2.hashValue) } func testDERBytes() throws { let key = try NIOSSLPrivateKey(bytes: .init(samplePemKey.utf8), format: .pem) let derBytes = try key.derBytes XCTAssertEqual(Data(derBytes), pemToDer(samplePemKey)) } } ================================================ FILE: Tests/NIOSSLTests/SecurityFrameworkVerificationTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import XCTest @testable import NIOSSL // We can only use Security.framework to validate TLS certificates on Apple platforms. #if canImport(Darwin) import Dispatch import Foundation import Security import NIOPosix #endif final class SecurityFrameworkVerificationTests: XCTestCase { static let selfSignedCert: NIOSSLCertificate = { generateSelfSignedCert().0 }() static let anotherSelfSignedCert: NIOSSLCertificate = { generateSelfSignedCert().0 }() func testDefaultVerification() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) let context = try NIOSSLContext(configuration: .makeClientConfiguration()) let connection = context.createConnection()! connection.setConnectState() connection.performSecurityFrameworkValidation(promise: p, peerCertificates: Self.appleComCertChain) let result = try p.futureResult.wait() XCTAssertEqual(result, .certificateVerified) #endif } func testDefaultVerificationCanFail() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) let context = try NIOSSLContext(configuration: .makeClientConfiguration()) let connection = context.createConnection()! connection.setConnectState() let certificate = SecCertificateCreateWithData(nil, Data(try! Self.selfSignedCert.toDERBytes()) as CFData)! connection.performSecurityFrameworkValidation(promise: p, peerCertificates: [certificate]) let result = try p.futureResult.wait() XCTAssertEqual(result, .failed) #endif } func testDefaultVerificationCanValidateHostname() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) let context = try NIOSSLContext(configuration: .makeClientConfiguration()) let connection = context.createConnection()! connection.setConnectState() connection.expectedHostname = "www.apple.com" connection.performSecurityFrameworkValidation(promise: p, peerCertificates: Self.appleComCertChain) let result = try p.futureResult.wait() XCTAssertEqual(result, .certificateVerified) #endif } func testDefaultVerificationFailsOnInvalidHostname() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) let context = try NIOSSLContext(configuration: .makeClientConfiguration()) let connection = context.createConnection()! connection.setConnectState() connection.expectedHostname = "www.swift-nio.io" connection.performSecurityFrameworkValidation(promise: p, peerCertificates: Self.appleComCertChain) let result = try p.futureResult.wait() XCTAssertEqual(result, .failed) #endif } func testDefaultVerificationIgnoresHostnamesWhenConfiguredTo() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) var configuration = TLSConfiguration.makeClientConfiguration() configuration.certificateVerification = .noHostnameVerification let context = try NIOSSLContext(configuration: configuration) let connection = context.createConnection()! connection.setConnectState() connection.expectedHostname = "www.swift-nio.io" connection.performSecurityFrameworkValidation(promise: p, peerCertificates: Self.appleComCertChain) let result = try p.futureResult.wait() XCTAssertEqual(result, .certificateVerified) #endif } func testDefaultVerificationPlusAdditionalCanUseAdditionalRoot() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) var config = TLSConfiguration.makeClientConfiguration() config.additionalTrustRoots = [.certificates([Self.selfSignedCert])] let context = try NIOSSLContext(configuration: config) let connection = context.createConnection()! connection.setConnectState() let certificate = SecCertificateCreateWithData(nil, Data(try! Self.selfSignedCert.toDERBytes()) as CFData)! connection.performSecurityFrameworkValidation(promise: p, peerCertificates: [certificate]) let result = try p.futureResult.wait() XCTAssertEqual(result, .certificateVerified) #endif } func testDefaultVerificationPlusAdditionalCanUseDefaultRoots() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) var config = TLSConfiguration.makeClientConfiguration() config.additionalTrustRoots = [.certificates([Self.selfSignedCert])] let context = try NIOSSLContext(configuration: config) let connection = context.createConnection()! connection.setConnectState() connection.performSecurityFrameworkValidation(promise: p, peerCertificates: Self.appleComCertChain) let result = try p.futureResult.wait() XCTAssertEqual(result, .certificateVerified) #endif } func testDefaultVerificationPlusAdditionalCanFailWithUnknownCert() throws { #if canImport(Darwin) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { try! group.syncShutdownGracefully() } let p = group.next().makePromise(of: NIOSSLVerificationResult.self) var config = TLSConfiguration.makeClientConfiguration() config.additionalTrustRoots = [.certificates([Self.selfSignedCert])] let context = try NIOSSLContext(configuration: config) let connection = context.createConnection()! connection.setConnectState() let certificate = SecCertificateCreateWithData( nil, Data(try! Self.anotherSelfSignedCert.toDERBytes()) as CFData )! connection.performSecurityFrameworkValidation(promise: p, peerCertificates: [certificate]) let result = try p.futureResult.wait() XCTAssertEqual(result, .failed) #endif } } // This class allows us to work around an awkward bug with our static below. // We need to mark this type non-Sendable. #if !canImport(Darwin) final class SecCertificate { } @available(*, unavailable) extension SecCertificate: Sendable {} #endif extension SecurityFrameworkVerificationTests { /// If tests fail because of an expired cert, you can regenerate the leaf and intermediate certificates /// by running the following command, and replacing both served certificates as leaf and intermediate, /// in that order: /// `openssl s_client -connect www.apple.com:443 -servername www.apple.com -showcerts` nonisolated(unsafe) fileprivate static let appleComCertChain: [SecCertificate] = buildAppleComCertChain() fileprivate static func buildAppleComCertChain() -> [SecCertificate] { #if canImport(Darwin) // All certs here are PEM format, with the leading/trailing lines stripped. // Not Valid Before: 11 Feb 2026 17:44:10 GMT // Not Valid After: 8 Aug 2026 17:30:10 GMT let leaf = """ MIIHeDCCBmCgAwIBAgIQCiKs5C/HH0Y/lT7wtag/DDANBgkqhkiG9w0BAQsFADBR MQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBwbGUgSW5jLjEtMCsGA1UEAxMkQXBw bGUgUHVibGljIEVWIFNlcnZlciBSU0EgQ0EgMSAtIEcxMB4XDTI2MDIxMTE3NDQx MFoXDTI2MDgxODE3MzAxMFowgccxHTAbBgNVBA8MFFByaXZhdGUgT3JnYW5pemF0 aW9uMRMwEQYLKwYBBAGCNzwCAQMTAlVTMRswGQYLKwYBBAGCNzwCAQIMCkNhbGlm b3JuaWExETAPBgNVBAUTCEMwODA2NTkyMQswCQYDVQQGEwJVUzETMBEGA1UECAwK Q2FsaWZvcm5pYTESMBAGA1UEBwwJQ3VwZXJ0aW5vMRMwEQYDVQQKDApBcHBsZSBJ bmMuMRYwFAYDVQQDDA13d3cuYXBwbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOC AQ8AMIIBCgKCAQEAk90vvxbvjCycmnxqzhVhARxSD3lFU14ECmveg1JJkfWV2tQc kiQjakUx3i5o4oF5BX94TgKHfbQ4CDeNkkTdgiE8c5d1SyQ30OKMmu5Png+MBtyU ERTYE6789ZQSX7Qj4YeZUMnBTB0gF3F0dSNI1gtEi6O0DusC6OkA+kStocebvib9 9VLqCJ7tTDmJGJBQqBIICVTFJrnUOLBrxrW6wYg8t4bieeaWo6aCDgMORyXYEi6s 8QYAVt6ELDmfY3tIK4pQxx2EWOUDAeczIFqx0PtA4csnFOU84OT6DAYZBrcrtahB snW9n2wx1EzvMrnYhuDJPY7dwMJIH1jFIAZLMwIDAQABo4ID0zCCA88wDAYDVR0T AQH/BAIwADAfBgNVHSMEGDAWgBTTvcE8oM81uTTF1NvaEA5M3mr+WDB6BggrBgEF BQcBAQRuMGwwMgYIKwYBBQUHMAKGJmh0dHA6Ly9jZXJ0cy5hcHBsZS5jb20vYXBl dnNyc2ExZzEuZGVyMDYGCCsGAQUFBzABhipodHRwOi8vb2NzcC5hcHBsZS5jb20v b2NzcDAzLWFwZXZzcnNhMWcxMDEwPAYDVR0RBDUwM4IQaW1hZ2VzLmFwcGxlLmNv bYINd3d3LmFwcGxlLmNvbYIQd3d3LmFwcGxlLmNvbS5jbjBgBgNVHSAEWTBXMEgG BWeBDAEBMD8wPQYIKwYBBQUHAgEWMWh0dHBzOi8vd3d3LmFwcGxlLmNvbS9jZXJ0 aWZpY2F0ZWF1dGhvcml0eS9wdWJsaWMwCwYJYIZIAYb9bAIBMBMGA1UdJQQMMAoG CCsGAQUFBwMBMDUGA1UdHwQuMCwwKqAooCaGJGh0dHA6Ly9jcmwuYXBwbGUuY29t L2FwZXZzcnNhMWcxLmNybDAdBgNVHQ4EFgQUsYvlS10MqKLYBaxFqMlTrsUPubAw DgYDVR0PAQH/BAQDAgWgMA8GCSqGSIb3Y2QGVgQCBQAwggH0BgorBgEEAdZ5AgQC BIIB5ASCAeAB3gB1AJROQ4f67MHvgfMZJCaoGGUBx9NfOAIBP3JnfVU3LhnYAAAB nE3W1lgAAAQDAEYwRAIgVZYmJmSrX0I48qD3ga9siB4X+ABlwS46NibY7CGB67oC IBAQ/tzmhDmJTsQPVsxyB8zzNCFILG3x9KXi84xFDSn/AHYAyKPEf8ezrbk1awE/ anoSbeM6TkOlxkb5l605dZkdz5oAAAGcTdbWbAAABAMARzBFAiEAp40+gQNimcjv 5nEpubXnCj+XFNybUatiM0sLr+dXiswCIB+nWWZdG8YT3lLXGMjCOz9+wYEpwnDO m484HO6ExgRlAHYA1219ENGn9XfCx+lf1wC/+YLJM1pl4dCzAXMXwMjFaXcAAAGc TdbWWgAABAMARzBFAiAIC0mUpuft8PG9ro6a3cIre4WawkbDvWNddyCEfow9gQIh AJeHkOZPRSpME/Q8XDSa7hIOYl/0sffaXthwn0MnZaqqAHUAwjF+V0UZo0Xufzje spBB68fCIVoiv3/Vta12mtkOUs0AAAGcTdbWdwAABAMARjBEAiBjs2ij+9e0NCA7 +aUagaghDIaRm2ebKL2Oq8IHTfXrLQIgJ1LUZZI0r+DEnWN84L+kYfDE2bHKLiAq oRlB4aVdxMwwDQYJKoZIhvcNAQELBQADggEBAIYpW3UNZnJJGsYhOZ+CFutCLPGS MbKPr8nIHXlh4tP3cxkE1D8SUDzsp+81DwzhkJ8cFPz2JlxzFTWMlumo/vWFxO54 OLp0gAbEYBjOFuzf8A32XSwSo6Xoj+dpvD24gje8+COxNaAWsbUQdm9E0oRWsMd9 ZcORi6J1Tywhb1uqvMa1XDhtZXEG/yzwtbDQbBHYNBgXPG5odSCzuqfL2aZIcorR zfsZ3Q+cvy/SSsHSFKqXiZHRVuF86IxxcYtnzzppYFBi5ZfvWyyRlXvK2mGWfkjj H+oCFWiClVTllg31d5XLuWTSY1gc8KqbHsGIjKv9FxqHQxokzIjMlyB+T68= """ // Not Valid Before: 29 Apr 2020 12:55:34 GMT // Not Valid After: 11 Apr 2030 23:59:59 GMT let intermediate = """ MIIFHjCCBAagAwIBAgIQBPIuzCH8tDgqwouPLWQfwDANBgkqhkiG9w0BAQsFADBh MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH MjAeFw0yMDA0MjkxMjU1MzRaFw0zMDA0MTAyMzU5NTlaMFExCzAJBgNVBAYTAlVT MRMwEQYDVQQKEwpBcHBsZSBJbmMuMS0wKwYDVQQDEyRBcHBsZSBQdWJsaWMgRVYg U2VydmVyIFJTQSBDQSAxIC0gRzEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQDfn5fdV0A4cCNFu0EvUgduZVPyPZity4q8Re1TcE9qGyWoVpzHm9OluDlq wcvudbLwBe25PWA2Z9xFpVKK9jGW6Vt79N7if7dfi9w2+2zG+/wJu1Z6TA8RENTV uJTaMwqQXw1dTkZEOgjCEKa75uyzXl+mIa0s4GdxkCBclGI9WDsUFlLj3A6eBIze TomTl7LhIV+nQUEWTqkwG5Pcxckiv7Xn1mu7EVWUukZWY8uL+KhcTJQZYtxNj3Pe M73WZ3fraixPA+RBkVnG5NgObRnrk5VHwjntbit9892kp7EISZK4Izfm99QgP4V2 IqHFsKxfCnZqfwUHikxftIVkjXIdAgMBAAGjggHgMIIB3DAdBgNVHQ4EFgQU073B PKDPNbk0xdTb2hAOTN5q/lgwHwYDVR0jBBgwFoAUTiJUIBiV5uNu5g/6+rkS7QYX jzkwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD AjASBgNVHRMBAf8ECDAGAQH/AgEAMDQGCCsGAQUFBwEBBCgwJjAkBggrBgEFBQcw AYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEIGA1UdHwQ7MDkwN6A1oDOGMWh0 dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydEdsb2JhbFJvb3RHMi5jcmww gdwGA1UdIASB1DCB0TCBxQYJYIZIAYb9bAIBMIG3MCgGCCsGAQUFBwIBFhxodHRw czovL3d3dy5kaWdpY2VydC5jb20vQ1BTMIGKBggrBgEFBQcCAjB+DHxBbnkgdXNl IG9mIHRoaXMgQ2VydGlmaWNhdGUgY29uc3RpdHV0ZXMgYWNjZXB0YW5jZSBvZiB0 aGUgUmVseWluZyBQYXJ0eSBBZ3JlZW1lbnQgbG9jYXRlZCBhdCBodHRwczovL3d3 dy5kaWdpY2VydC5jb20vcnBhLXVhMAcGBWeBDAEBMA0GCSqGSIb3DQEBCwUAA4IB AQBD9c6SmtMxGjRwc/A1bPiM+r1qj5xbDzGn6s6m6oggm9UeBLCSUNJthKffNPqq wtULeeUddssewaOZX+uHjG9bY/O9J1VQtGtXI2hndyAPiloqNjf5iBW16h3ZIUFQ L319hISioItFVJZnVe4gjNEWio1ZRwO5A4e/H69/lPAX294yGtYGllAdv2NexhUM fjODhCajoTJmkXbyIpYzTNkgDXvQptTecrvr0rPzEMWfTtGSppbOC+s/5jG3aJ6G Jn49Ram1ZLEGHTx9PWUoHth9Lj7vwFBD9667x9m9nUhuET9a3XvNep+N7w96ZqH2 fAqUBW1kl6u3u67D6mvDsCQr """ return [leaf, intermediate].map { SecCertificateCreateWithData(nil, Data(base64Encoded: $0, options: .ignoreUnknownCharacters)! as CFData)! } #else return [] #endif } } ================================================ FILE: Tests/NIOSSLTests/TLS13RecordObserver.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore final class TLS13RecordObserver: ChannelDuplexHandler { typealias InboundIn = ByteBuffer typealias InboundOut = ByteBuffer typealias OutboundIn = ByteBuffer typealias OutboundOut = ByteBuffer var writtenRecords: [Record] = [] func write(context: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise?) { var payload = self.unwrapOutboundIn(data) while let record = payload.readTLS13Record() { writtenRecords.append(record) } // We should have consumed everything as NIO only writes full records. precondition(payload.readableBytes == 0) // Forward the original payload on. context.write(data, promise: promise) } } extension TLS13RecordObserver { struct Record: Hashable { var contentType: ContentType var legacyRecordVersion: UInt16 var encryptedRecord: ByteBuffer } } extension TLS13RecordObserver.Record { struct ContentType: RawRepresentable, Hashable { var rawValue: UInt8 init(rawValue: UInt8) { self.rawValue = rawValue } static let invalid = Self(rawValue: 0) static let changeCipherSpec = Self(rawValue: 20) static let alert = Self(rawValue: 21) static let handshake = Self(rawValue: 22) static let applicationData = Self(rawValue: 23) } } extension ByteBuffer { fileprivate mutating func readTLS13Record() -> TLS13RecordObserver.Record? { guard let (contentType, legacyRecordVersion, length) = self.readMultipleIntegers(as: (UInt8, UInt16, UInt16).self) else { return nil } guard let encryptedRecord = self.readSlice(length: Int(length)) else { return nil } return .init( contentType: .init(rawValue: contentType), legacyRecordVersion: legacyRecordVersion, encryptedRecord: encryptedRecord ) } } ================================================ FILE: Tests/NIOSSLTests/TLSConfigurationTest.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// @_implementationOnly import CNIOBoringSSL @preconcurrency import Dispatch import NIOConcurrencyHelpers import NIOCore import NIOEmbedded import NIOPosix import NIOTLS import XCTest @testable import NIOSSL final class ErrorCatcher: ChannelInboundHandler, Sendable { public typealias InboundIn = Any let _errors: NIOLockedValueBox<[T]> var errors: [T] { self._errors.withLockedValue { $0 } } public init() { self._errors = .init([]) } public func errorCaught(context: ChannelHandlerContext, error: Error) { self._errors.withLockedValue { $0.append(error as! T) } } } final class HandshakeCompletedHandler: ChannelInboundHandler, Sendable { public typealias InboundIn = Any let _handshakeSucceeded = NIOLockedValueBox(false) var handshakeSucceeded: Bool { self._handshakeSucceeded.withLockedValue { $0 } } public func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) { if let event = event as? TLSUserEvent, case .handshakeCompleted = event { self._handshakeSucceeded.withLockedValue { $0 = true } } context.fireUserInboundEventTriggered(event) } } final class WaitForHandshakeHandler: ChannelInboundHandler, Sendable { public typealias InboundIn = Any public var handshakeResult: EventLoopFuture { self.handshakeResultPromise.futureResult } private let handshakeResultPromise: EventLoopPromise init(handshakeResultPromise: EventLoopPromise) { self.handshakeResultPromise = handshakeResultPromise } public func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) { if let event = event as? TLSUserEvent, case .handshakeCompleted = event { self.handshakeResultPromise.succeed(()) } context.fireUserInboundEventTriggered(event) } public func errorCaught(context: ChannelHandlerContext, error: Error) { if let error = error as? NIOSSLError, case .handshakeFailed = error { self.handshakeResultPromise.fail(error) } context.fireErrorCaught(error) } } class TLSConfigurationTest: XCTestCase { static let _certAndKey1 = generateSelfSignedCert() static let cert1 = TLSConfigurationTest._certAndKey1.0 static let key1 = TLSConfigurationTest._certAndKey1.1 static let _certAndKey2 = generateSelfSignedCert() static let cert2 = TLSConfigurationTest._certAndKey2.0 static let key2 = TLSConfigurationTest._certAndKey2.1 func assertHandshakeError( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, errorTextContains message: String, file: StaticString = #filePath, line: UInt = #line ) throws { try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: [message], file: file, line: line ) } func assertHandshakeError( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, errorTextContainsAnyOf messages: [String], file: StaticString = #filePath, line: UInt = #line ) throws { let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig), file: file, line: line ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig), file: file, line: line ) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let eventHandler = ErrorCatcher() let handshakeHandler = HandshakeCompletedHandler() let serverChannel = try assertNoThrowWithValue( serverTLSChannel(context: serverContext, handlers: [], group: group), file: file, line: line ) let clientChannel = try assertNoThrowWithValue( clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [eventHandler, handshakeHandler], group: group, connectingTo: serverChannel.localAddress! ), file: file, line: line ) // We expect the channel to be closed fairly swiftly as the handshake should fail. clientChannel.closeFuture.whenComplete { _ in XCTAssertEqual(eventHandler.errors.count, 1) switch eventHandler.errors[0] { case .handshakeFailed(.sslError(let errs)): let correctError: Bool = messages.map { errs[0].description.contains($0) }.reduce(false) { $0 || $1 } XCTAssert(correctError, errs[0].description, file: (file), line: line) default: XCTFail("Unexpected error: \(eventHandler.errors[0])", file: (file), line: line) } XCTAssertFalse(handshakeHandler.handshakeSucceeded, file: (file), line: line) } try clientChannel.closeFuture.wait() } func assertPostHandshakeError( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, errorTextContainsAnyOf messages: [String], file: StaticString = #filePath, line: UInt = #line ) throws { let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig), file: file, line: line ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig), file: file, line: line ) let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let eventHandler = ErrorCatcher() let handshakeHandler = HandshakeCompletedHandler() let serverChannel = try assertNoThrowWithValue( serverTLSChannel(context: serverContext, handlers: [], group: group), file: file, line: line ) let clientChannel = try assertNoThrowWithValue( clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [eventHandler, handshakeHandler], group: group, connectingTo: serverChannel.localAddress! ), file: file, line: line ) // We expect the channel to be closed fairly swiftly as the handshake should fail. clientChannel.closeFuture.whenComplete { _ in XCTAssertEqual(eventHandler.errors.count, 1, file: (file), line: line) switch eventHandler.errors[0] { case .sslError(let errs): XCTAssertEqual(errs.count, 1, file: (file), line: line) let correctError: Bool = messages.map { errs[0].description.contains($0) }.reduce(false) { $0 || $1 } XCTAssert(correctError, errs[0].description, file: (file), line: line) default: XCTFail("Unexpected error: \(eventHandler.errors[0])", file: (file), line: line) } XCTAssertTrue(handshakeHandler.handshakeSucceeded, file: (file), line: line) } try clientChannel.closeFuture.wait() } /// Performs a connection in memory and validates that the handshake was successful. /// /// - NOTE: This function should only be used when you know that there is no custom verification /// callback in use, otherwise it will not be thread-safe. func assertHandshakeSucceededInMemory( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, file: StaticString = #filePath, line: UInt = #line ) throws { let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) try self.assertHandshakeSucceededInMemory( withClientContext: clientContext, andServerContext: serverContext, file: file, line: line ) } /// Performs a connection in memory and validates that the handshake was successful. /// /// - NOTE: This function should only be used when you know that there is no custom verification /// callback in use, otherwise it will not be thread-safe. func assertHandshakeSucceededInMemory( withClientContext clientContext: NIOSSLContext, andServerContext serverContext: NIOSSLContext, file: StaticString = #filePath, line: UInt = #line ) throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect the server case to throw _ = try? serverChannel.finish() _ = try? clientChannel.finish() } XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: serverContext)), file: (file), line: line ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: clientContext, serverHostname: nil) ), file: (file), line: line ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait(), file: (file), line: line) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel), file: (file), line: line) XCTAssertTrue(handshakeHandler.handshakeSucceeded, file: (file), line: line) _ = serverChannel.close() try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) } /// Performs a connection using a real event loop and validates that the handshake was successful. /// /// This function is thread-safe in the presence of custom verification callbacks. func assertHandshakeSucceededEventLoop( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, serverCustomVerificationCallback: ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void )? = nil, file: StaticString = #filePath, line: UInt = #line ) throws { let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig), file: file, line: line ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig), file: file, line: line ) try self.assertHandshakeSucceededEventLoop( withClientContext: clientContext, andServerContext: serverContext, serverCustomVerificationCallback: serverCustomVerificationCallback, file: file, line: line ) } /// Performs a connection using a real event loop and validates that the handshake was successful. /// /// This function is thread-safe in the presence of custom verification callbacks. func assertHandshakeSucceededEventLoop( withClientContext clientContext: NIOSSLContext, andServerContext serverContext: NIOSSLContext, serverCustomVerificationCallback: ( @Sendable ([NIOSSLCertificate], EventLoopPromise) -> Void )? = nil, file: StaticString = #filePath, line: UInt = #line ) throws { let group = MultiThreadedEventLoopGroup(numberOfThreads: 1) defer { XCTAssertNoThrow(try group.syncShutdownGracefully()) } let eventHandler = ErrorCatcher() let handshakeHandler = HandshakeCompletedHandler() let handshakeResultPromise = group.next().makePromise(of: Void.self) let handshakeWatcher = WaitForHandshakeHandler(handshakeResultPromise: handshakeResultPromise) let serverChannel = try assertNoThrowWithValue( serverTLSChannel( context: serverContext, handlers: [], group: group, customVerificationCallback: serverCustomVerificationCallback ), file: file, line: line ) let clientChannel = try assertNoThrowWithValue( clientTLSChannel( context: clientContext, preHandlers: [], postHandlers: [eventHandler, handshakeWatcher, handshakeHandler], group: group, connectingTo: serverChannel.localAddress! ), file: file, line: line ) handshakeWatcher.handshakeResult.whenComplete { c in _ = clientChannel.close() } clientChannel.closeFuture.whenComplete { _ in XCTAssertEqual(eventHandler.errors.count, 0, file: file, line: line) XCTAssertTrue(handshakeHandler.handshakeSucceeded, file: file, line: line) } try clientChannel.closeFuture.wait() } func assertHandshakeSucceeded( withClientConfig clientConfig: TLSConfiguration, andServerConfig serverConfig: TLSConfiguration, file: StaticString = #filePath, line: UInt = #line ) throws { // The only use of a custom callback is on Darwin... #if os(Linux) return try assertHandshakeSucceededInMemory( withClientConfig: clientConfig, andServerConfig: serverConfig, file: file, line: line ) #else return try assertHandshakeSucceededEventLoop( withClientConfig: clientConfig, andServerConfig: serverConfig, file: file, line: line ) #endif } func assertHandshakeSucceeded( withClientContext clientContext: NIOSSLContext, andServerContext serverContext: NIOSSLContext, file: StaticString = #filePath, line: UInt = #line ) throws { // The only use of a custom callback is on Darwin... #if os(Linux) return try self.assertHandshakeSucceededInMemory( withClientContext: clientContext, andServerContext: serverContext, file: file, line: line ) #else return try self.assertHandshakeSucceededEventLoop( withClientContext: clientContext, andServerContext: serverContext, file: file, line: line ) #endif } func setupTLSLeafandClientIdentitiesFromCustomCARoot() throws -> ( leafCert: NIOSSLCertificate, leafKey: NIOSSLPrivateKey, clientCert: NIOSSLCertificate, clientKey: NIOSSLPrivateKey ) { let leaf = try NIOSSLCertificate(bytes: .init(leafCertificateForTLSIssuedFromCustomCARoot.utf8), format: .pem) let leaf_privateKey = try NIOSSLPrivateKey.init(bytes: .init(privateKeyForLeafCertificate.utf8), format: .pem) let client_cert = try NIOSSLCertificate( bytes: .init(leafCertificateForClientAuthenticationIssuedFromCustomCARoot.utf8), format: .pem ) let client_privateKey = try NIOSSLPrivateKey.init( bytes: .init(privateKeyForClientAuthentication.utf8), format: .pem ) return (leaf, leaf_privateKey, client_cert, client_privateKey) } // Note that this is a stub to create the rehash file format for a certificate. // If needed in the future the numericExtension should be reworked to check for duplicates and increment as applicable. func getRehashFilename(path: String, testName: String, numericExtension: Int) -> String { var cert: NIOSSLCertificate! if path.suffix(4) == ".pem" { XCTAssertNoThrow(cert = try NIOSSLCertificate.fromPEMFile(path).first) } else { XCTAssertNoThrow(cert = try NIOSSLCertificate.fromDERFile(path)) } // Create a rehash format filename to symlink the hard file above to. let originalSubjectName = cert.getSubjectNameHash() let truncatedHash = String(format: "%08lx.%d", originalSubjectName, numericExtension) let tempDirPath = FileManager.default.temporaryDirectory.path + "/" + testName + "/" return tempDirPath + truncatedHash } func testNonOverlappingTLSVersions() throws { var clientConfig = TLSConfiguration.clientDefault clientConfig.minimumTLSVersion = .tlsv11 clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.maximumTLSVersion = .tlsv1 try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_PROTOCOL_VERSION" ) } func testNonOverlappingCipherSuitesPreTLS13() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_RSA_WITH_AES_128_CBC_SHA] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [.TLS_RSA_WITH_AES_256_CBC_SHA] serverConfig.maximumTLSVersion = .tlsv12 try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testCannotVerifySelfSigned() throws { let clientConfig = TLSConfiguration.makeClientConfiguration() let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "CERTIFICATE_VERIFY_FAILED" ) } func testServerCannotValidateClientPreTLS13() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.maximumTLSVersion = .tlsv12 clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert2)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key2) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .noHostnameVerification try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["ALERT_UNKNOWN_CA", "ALERT_CERTIFICATE_UNKNOWN"] ) } func testServerCannotValidateClientPostTLS13() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.minimumTLSVersion = .tlsv13 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert2)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key2) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.minimumTLSVersion = .tlsv13 serverConfig.certificateVerification = .noHostnameVerification try assertPostHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["ALERT_UNKNOWN_CA", "ALERT_CERTIFICATE_UNKNOWN"] ) } func testMutualValidationWithCertVerificationOptionalSuccess_NoPeerCert() throws { // The client doesn't present a cert chain var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) // The server sets `certificateVerification` to `optionalVerification`; handshake should succeed when the client // hasn't presented any certs serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMutualValidationWithCertVerificationOptionalError_PeerCertNotTrusted() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert2)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key2) clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default // The server doesn't trust any additional roots; the cert presented by the client will not be trusted serverConfig.additionalTrustRoots = [] try assertPostHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["SSLV3_ALERT_CERTIFICATE_UNKNOWN", "TLSV1_ALERT_UNKNOWN_CA"] ) } func testMutualValidationWithCertVerificationOptionalSuccess_PeerCertTrusted() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert2)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key2) clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .optionalVerification serverConfig.trustRoots = .default // The server trusts the cert presented by the client; we expect a successful handshake serverConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert2])] try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMutualValidationRequiresClientCertificatePreTLS13() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .noHostnameVerification serverConfig.trustRoots = .certificates([TLSConfigurationTest.cert2]) try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["ALERT_HANDSHAKE_FAILURE"] ) } func testMutualValidationRequiresClientCertificatePostTLS13() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.minimumTLSVersion = .tlsv13 clientConfig.certificateVerification = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.minimumTLSVersion = .tlsv13 serverConfig.certificateVerification = .noHostnameVerification serverConfig.trustRoots = .certificates([TLSConfigurationTest.cert2]) try assertPostHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["CERTIFICATE_REQUIRED"] ) } func testIncompatibleSignatures() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.verifySignatureAlgorithms = [.ecdsaSecp384R1Sha384] clientConfig.minimumTLSVersion = .tlsv13 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.signingSignatureAlgorithms = [.rsaPssRsaeSha256] serverConfig.minimumTLSVersion = .tlsv13 serverConfig.certificateVerification = .none try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testCompatibleSignatures() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.minimumTLSVersion = .tlsv13 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.signingSignatureAlgorithms = [.rsaPssRsaeSha256] serverConfig.minimumTLSVersion = .tlsv13 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMatchingCompatibleSignatures() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.verifySignatureAlgorithms = [.rsaPssRsaeSha256] clientConfig.minimumTLSVersion = .tlsv13 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.signingSignatureAlgorithms = [.rsaPssRsaeSha256] serverConfig.minimumTLSVersion = .tlsv13 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMutualValidationSuccessNoAdditionalTrustRoots() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none let serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMutualValidationSuccessWithDefaultAndAdditionalTrustRoots() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .default clientConfig.renegotiationSupport = .none clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.trustRoots = .default serverConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert2])] try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMutualValidationSuccessWithOnlyAdditionalTrustRoots() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([]) clientConfig.renegotiationSupport = .none clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.trustRoots = .certificates([]) serverConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert2])] try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testFullVerificationWithCANamesFromCertificate() throws { // Custom certificates for TLS and client authentication. let root = try NIOSSLCertificate(bytes: .init(customCARoot.utf8), format: .pem) let digitalIdentities = try setupTLSLeafandClientIdentitiesFromCustomCARoot() // Client Configuration. // // This configuration disables hostname verification because the hostname verification // code requires IP addresses, which we don't have in EmbeddedChannel. We override the // trust roots to prevent execution of the SecurityFramework verification code, which doesn't // work with EmbeddedChannel. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.renegotiationSupport = .none clientConfig.certificateChain = [.certificate(digitalIdentities.clientCert)] clientConfig.privateKey = .privateKey(digitalIdentities.clientKey) clientConfig.trustRoots = .certificates([root]) clientConfig.certificateVerification = .noHostnameVerification // Server Configuration // // This configuration disables hostname verification because the hostname verification // code requires IP addresses, which we don't have in EmbeddedChannel. We override the // trust roots to prevent execution of the SecurityFramework verification code, which doesn't // work with EmbeddedChannel. var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(digitalIdentities.leafCert)], privateKey: .privateKey(digitalIdentities.leafKey) ) serverConfig.sendCANameList = true serverConfig.trustRoots = .certificates([root]) serverConfig.certificateVerification = .noHostnameVerification let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) // Validation that the CA names are being sent here // This is essentially the heart of this unit test. let countAfter = serverContext.getX509NameListCount() XCTAssertEqual(countAfter, 1, "CA Name List should be 1 after the Server Context is created") let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect the server case to throw _ = try? serverChannel.finish() _ = try? clientChannel.finish() } XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: serverContext)) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: clientContext, serverHostname: nil) ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a successful handshake. let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) clientChannel.connect(to: addr, promise: nil) serverChannel.pipeline.fireChannelActive() XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) serverChannel.close(promise: nil) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) } func testFullVerificationWithCANamesFromFile() throws { // Custom certificates for TLS and client authentication. // In this test create the root certificate in the tmp directory and use it here to send the CA names. // This exercised the loadVerifyLocations file code path out in SSLContext let rootPath = try dumpToFile(data: .init(customCARoot.utf8), fileExtension: ".pem") let digitalIdentities = try setupTLSLeafandClientIdentitiesFromCustomCARoot() // Client Configuration. // // This configuration disables hostname verification because the hostname verification // code requires IP addresses, which we don't have in EmbeddedChannel. We override the // trust roots to prevent execution of the SecurityFramework verification code, which doesn't // work with EmbeddedChannel. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.renegotiationSupport = .none clientConfig.certificateChain = [.certificate(digitalIdentities.clientCert)] clientConfig.privateKey = .privateKey(digitalIdentities.clientKey) clientConfig.trustRoots = .file(rootPath) clientConfig.certificateVerification = .noHostnameVerification // Server Configuration // // This configuration disables hostname verification because the hostname verification // code requires IP addresses, which we don't have in EmbeddedChannel. We override the // trust roots to prevent execution of the SecurityFramework verification code, which doesn't // work with EmbeddedChannel. var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(digitalIdentities.leafCert)], privateKey: .privateKey(digitalIdentities.leafKey) ) serverConfig.sendCANameList = true serverConfig.trustRoots = .file(rootPath) serverConfig.certificateVerification = .noHostnameVerification let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) // Validation that the CA names are being sent here // This is essentially the heart of this unit test. let countAfter = serverContext.getX509NameListCount() XCTAssertEqual(countAfter, 1, "CA Name List should be 1 after the Server Context is created") let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect the server case to throw _ = try? serverChannel.finish() _ = try? clientChannel.finish() } XCTAssertNoThrow( try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: serverContext)) ) XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler( NIOSSLClientHandler(context: clientContext, serverHostname: nil) ) ) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.addHandler(handshakeHandler).wait()) // Connect. This should lead to a completed handshake. let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) clientChannel.connect(to: addr, promise: nil) serverChannel.pipeline.fireChannelActive() XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) serverChannel.close(promise: nil) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + rootPath)!)) } func testRehashFormatToPopulateCANamesFromDirectory() throws { // Use the test name as the directory name in the temporary directory. let testName = String("\(#function)".dropLast(2)) // Create 2 PEM based certs let rootCAPathOne = try dumpToFile(data: .init(customCARoot.utf8), fileExtension: ".pem", customPath: testName) let rootCAPathTwo = try dumpToFile( data: .init(secondaryRootCertificateForClientAuthentication.utf8), fileExtension: ".pem", customPath: testName ) // Create a rehash formatted name of both certificate's subject name that was created above. // Take these rehash certificate names and format a symlink with them below with createSymbolicLink. let rehashSymlinkNameOne = getRehashFilename(path: rootCAPathOne, testName: testName, numericExtension: 0) let rehashSymlinkNameTwo = getRehashFilename(path: rootCAPathTwo, testName: testName, numericExtension: 0) // Extract just the filename of the newly create certs in the tmp directory. let rootCAURLOne = URL(string: "file://" + rootCAPathOne)! let rootCAURLTwo = URL(string: "file://" + rootCAPathTwo)! let rootCAFilenameOne = rootCAURLOne.lastPathComponent let rootCAFilenameTwo = rootCAURLTwo.lastPathComponent // Create an in-directory symlink the same way that c_rehash would do this. // For example: 7f44456a.0 -> niotestIEOFcMI.pem // NOT: 7f44456a.0 -> /var/folders/my/path/niotestIEOFcMI.pem XCTAssertNoThrow( try FileManager.default.createSymbolicLink( atPath: rehashSymlinkNameOne, withDestinationPath: rootCAFilenameOne ) ) XCTAssertNoThrow( try FileManager.default.createSymbolicLink( atPath: rehashSymlinkNameTwo, withDestinationPath: rootCAFilenameTwo ) ) defer { // Delete all files that were created for this test. XCTAssertNoThrow(try FileManager.default.removeItem(at: rootCAURLOne)) XCTAssertNoThrow(try FileManager.default.removeItem(at: rootCAURLTwo)) XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + rehashSymlinkNameOne)!)) XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + rehashSymlinkNameTwo)!)) // Remove the actual directory also. let removePath = "\(FileManager.default.temporaryDirectory.path)/\(testName)/" XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + removePath)!)) } let tempFileDir = FileManager.default.temporaryDirectory.path + "/\(testName)/" let digitalIdentities = try setupTLSLeafandClientIdentitiesFromCustomCARoot() // Server Configuration var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(digitalIdentities.leafCert)], privateKey: .privateKey(digitalIdentities.leafKey) ) serverConfig.sendCANameList = true serverConfig.trustRoots = .file(tempFileDir) // Directory path. serverConfig.certificateVerification = .fullVerification var serverContext: NIOSSLContext! XCTAssertNoThrow(serverContext = try NIOSSLContext(configuration: serverConfig)) // Only setup the serverContext here to define that our two certificate CA names were populated to the SSL_CTX. let countAfter = serverContext.getX509NameListCount() XCTAssertEqual(countAfter, 2, "CA Name List should be 2 after the Server Context is created") } func testRehashFormat() throws { // Use the test name as the directory name in the temporary directory. let testName = String("\(#function)".dropLast(2)) // This test case creates path variables and files to run through the `isRehashFormat` function in `NIOSSLContext`. // Note that the c_rehash file format is a symlink to an original PEM or CER file in the form of HHHHHHHH.D. // Note that CRLs are not supported, only PEM and DER representations of certificates. // Not a valid path. let badPath = try NIOSSLContext._isRehashFormat(path: "") XCTAssertFalse(badPath) // Filename is not in rehash format. let acceptablePathBadFilename = try NIOSSLContext._isRehashFormat(path: "/etc/ssl/certs/myFile.pem") XCTAssertFalse(acceptablePathBadFilename) // Filename is in bad rehash format. let acceptablePathBadRehashFormat = try NIOSSLContext._isRehashFormat(path: "/etc/ssl/certs/7f44456a.z") XCTAssertFalse(acceptablePathBadRehashFormat) // Test with an actual file, but no symlink. let dummyFile = try dumpToFile(data: Data(), fileExtension: ".txt", customPath: testName) let newPath = FileManager.default.temporaryDirectory.path + "/\(testName)/7f44456a.1" let _ = try FileManager.default.moveItem(atPath: dummyFile, toPath: newPath) // Filename is in rehash format, but not a symlink. let acceptablePathAndRehashFormatButNoSymlink = try NIOSSLContext._isRehashFormat(path: newPath) XCTAssertFalse(acceptablePathAndRehashFormatButNoSymlink) // Test actual symlink let rootCAPathOne = try dumpToFile(data: .init(customCARoot.utf8), fileExtension: ".pem", customPath: testName) let rehashSymlinkName = getRehashFilename(path: rootCAPathOne, testName: testName, numericExtension: 0) // Extract just the filename of the newly create certs in the tmp directory. let rootCAURLOne = URL(string: "file://" + rootCAPathOne)! let rootCAFilenameOne = rootCAURLOne.lastPathComponent XCTAssertNoThrow( try FileManager.default.createSymbolicLink( atPath: rehashSymlinkName, withDestinationPath: rootCAFilenameOne ) ) defer { // Delete all files that were created for this test. XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + rootCAPathOne)!)) XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + rehashSymlinkName)!)) XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + newPath)!)) // Remove the actual directory also. let removePath = "\(FileManager.default.temporaryDirectory.path)/\(testName)/" XCTAssertNoThrow(try FileManager.default.removeItem(at: URL(string: "file://" + removePath)!)) } // Test the success case for the symlink let successSymlink = try NIOSSLContext._isRehashFormat(path: rehashSymlinkName) XCTAssertTrue(successSymlink) } func testNonexistentFileObject() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = .file("/thispathbetternotexist/bogus.foo") XCTAssertThrowsError(try NIOSSLContext(configuration: clientConfig)) { error in XCTAssertEqual(.noSuchFilesystemObject, error as? NIOSSLError) } } func testComputedApplicationProtocols() throws { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .privateKey(TLSConfigurationTest.key1) ) config.applicationProtocols = ["http/1.1"] XCTAssertEqual(config.applicationProtocols, ["http/1.1"]) XCTAssertEqual(config.encodedApplicationProtocols, [[8, 104, 116, 116, 112, 47, 49, 46, 49]]) config.applicationProtocols.insert("h2", at: 0) XCTAssertEqual(config.applicationProtocols, ["h2", "http/1.1"]) XCTAssertEqual(config.encodedApplicationProtocols, [[2, 104, 50], [8, 104, 116, 116, 112, 47, 49, 46, 49]]) } func testKeyLogManagerOverlappingAccess() throws { // Tests that we can have overlapping calls to the log() function of the keylog manager. // This test fails probabilistically! DO NOT IGNORE INTERMITTENT FAILURES OF THIS TEST. let semaphore = DispatchSemaphore(value: 0) let group = DispatchGroup() let completionsQueue = DispatchQueue(label: "completionsQueue") let completions: UnsafeMutableTransferBox<[Bool]> = .init([]) let keylogManager = KeyLogCallbackManager { _ in completionsQueue.sync { completions.wrappedValue.append(true) semaphore.wait() } group.leave() } // Now we call log twice, from different threads. These will not complete right away so we // do those on background threads. They should not both complete. group.enter() group.enter() DispatchQueue(label: "first-thread").async { keylogManager.log("hello!") } DispatchQueue(label: "second-thread").async { keylogManager.log("world!") } // We now sleep a short time to let everything catch up and the runtime catch any exclusivity violation. // 10ms is fine. usleep(10_000) // Great, signal the sempahore twice to un-wedge everything and wait for everything to exit. semaphore.signal() semaphore.signal() group.wait() XCTAssertEqual([true, true], completionsQueue.sync { completions.wrappedValue }) } func testTheSameHashValue() { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .privateKey(TLSConfigurationTest.key1) ) config.applicationProtocols = ["http/1.1"] let theSameConfig = config var hasher = Hasher() var hasher2 = Hasher() config.bestEffortHash(into: &hasher) theSameConfig.bestEffortHash(into: &hasher2) XCTAssertEqual(hasher.finalize(), hasher2.finalize()) XCTAssertTrue(config.bestEffortEquals(theSameConfig)) } func testDifferentHashValues() { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .privateKey(TLSConfigurationTest.key1) ) config.applicationProtocols = ["http/1.1"] var differentConfig = config differentConfig.privateKey = .privateKey(TLSConfigurationTest.key2) XCTAssertFalse(config.bestEffortEquals(differentConfig)) } func testDifferentCallbacksNotEqual() { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .privateKey(TLSConfigurationTest.key1) ) config.applicationProtocols = ["http/1.1"] config.keyLogCallback = { _ in } var differentConfig = config differentConfig.keyLogCallback = { _ in } XCTAssertFalse(config.bestEffortEquals(differentConfig)) } func testDifferentSSLContextCallbacksNotEqual() throws { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [], privateKey: .privateKey(TLSConfigurationTest.key1) ) config.applicationProtocols = ["http/1.1"] config.sslContextCallback = { _, _ in } var differentConfig = config differentConfig.sslContextCallback = { _, _ in } XCTAssertFalse(config.bestEffortEquals(differentConfig)) } func testCompatibleCurves() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.x25519] clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] serverConfig.curves = [.x25519] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMultipleCompatibleCurves() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.x25519] clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.curves = [.x25519, .secp256r1] serverConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testNonCompatibleCurves() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.secp521r1] clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.curves = [.x25519] serverConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testPQCompatibleCurves() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.x25519_MLKEM768] clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.curves = [.x25519_MLKEM768] serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testDefaultCurvesExcludePQ() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.x25519_MLKEM768] clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .none try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testUnknownCurveValuesFail() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.curves = [.init(rawValue: 0x9898)] XCTAssertThrowsError(try NIOSSLContext(configuration: clientConfig)) { error in XCTAssertTrue( String(describing: error).contains("UNSUPPORTED_ELLIPTIC_CURVE"), "Error \(error) does not contain UNSUPPORTED_ELLIPTIC_CURVE" ) } } func testCompatibleCipherSuite() throws { // ECDHE_RSA is used here because the public key in .cert1 is derived from a RSA private key. // These could also be RSA based, but cannot be ECDHE_ECDSA. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testNonCompatibleCipherSuite() throws { // This test fails more importantly because ECDHE_ECDSA is being set with a public key that is RSA based. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_RSA_WITH_AES_128_GCM_SHA256] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testDefaultWithRSACipherSuite() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_RSA_WITH_AES_128_GCM_SHA256] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuites = defaultCipherSuites serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testDefaultWithECDHERSACipherSuite() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuites = defaultCipherSuites serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testStringBasedCipherSuite() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuites = "AES256" clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuites = "AES256" serverConfig.maximumTLSVersion = .tlsv12 try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMultipleCompatibleCipherSuites() throws { // This test is for multiple ECDHE_RSA based ciphers on the server side. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [ .TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, .TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, .TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, ] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMultipleCompatibleCipherSuitesWithStringBasedCipher() throws { // This test is for using multiple server side ciphers with the client side string based cipher. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuites = "AES256" clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuiteValues = [ .TLS_RSA_WITH_AES_128_CBC_SHA, .TLS_RSA_WITH_AES_256_CBC_SHA, .TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, .TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, ] serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testMultipleClientCipherSuitesWithDefaultCipher() throws { // Client ciphers should match one of the default ciphers. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [ .TLS_RSA_WITH_AES_128_CBC_SHA, .TLS_RSA_WITH_AES_256_CBC_SHA, .TLS_RSA_WITH_AES_128_GCM_SHA256, .TLS_RSA_WITH_AES_256_GCM_SHA384, .TLS_AES_128_GCM_SHA256, .TLS_AES_256_GCM_SHA384, .TLS_CHACHA20_POLY1305_SHA256, .TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, .TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, .TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, .TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, .TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, ] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuites = defaultCipherSuites serverConfig.maximumTLSVersion = .tlsv12 serverConfig.certificateVerification = .none try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testNonCompatibleClientCiphersWithServerStringBasedCiphers() throws { // This test should fail on client hello negotiation. var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [ .TLS_AES_128_GCM_SHA256, .TLS_AES_256_GCM_SHA384, .TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, ] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.cipherSuites = "AES256" serverConfig.maximumTLSVersion = .tlsv12 try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "ALERT_HANDSHAKE_FAILURE" ) } func testSettingCiphersWithCipherSuiteValues() { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuiteValues = [ .TLS_AES_128_GCM_SHA256, .TLS_AES_256_GCM_SHA384, .TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, ] clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.renegotiationSupport = .none XCTAssertEqual( clientConfig.cipherSuites, "TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" ) } func testSettingCiphersWithCipherSuitesString() { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.cipherSuites = "AES256" clientConfig.maximumTLSVersion = .tlsv12 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) let assignedCiphers = clientConfig.cipherSuiteValues.map { $0.standardName } let createdCipherSuiteValuesFromString = assignedCiphers.joined(separator: ":") // Note that this includes the PSK values as well. XCTAssertEqual( createdCipherSuiteValuesFromString, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:TLS_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_256_CBC_SHA:TLS_PSK_WITH_AES_256_CBC_SHA" ) } func testDefaultCipherSuiteValues() { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([]) clientConfig.renegotiationSupport = .none clientConfig.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] XCTAssertEqual(clientConfig.cipherSuites, defaultCipherSuites) let assignedCiphers = clientConfig.cipherSuiteValues.map { $0.standardName } let defaultCipherSuiteValuesFromString = assignedCiphers.joined(separator: ":") // Note that this includes the PSK values as well. XCTAssertEqual( defaultCipherSuiteValuesFromString, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA:TLS_RSA_WITH_AES_128_GCM_SHA256:TLS_RSA_WITH_AES_256_GCM_SHA384:TLS_RSA_WITH_AES_128_CBC_SHA:TLS_RSA_WITH_AES_256_CBC_SHA" ) } @available( *, deprecated, message: "`TLSConfiguration.pskClientCallback` and `TLSConfiguration.pskClientCallback` are deprecated" ) func testBestEffortEquatableHashableDifferences() { // If this assertion fails, DON'T JUST CHANGE THE NUMBER HERE! Make sure you've added any appropriate transforms below // so that we're testing these best effort functions. XCTAssertEqual( MemoryLayout.size, 234, "TLSConfiguration has changed size: you probably need to update this test!" ) let first = TLSConfiguration.makeClientConfiguration() let pskClientCallback: NIOPSKClientIdentityCallback = { (hint: String) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerCallback: NIOPSKServerIdentityCallback = { (hint: String, identity: String) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } let sslContextCallback: NIOSSLContextCallback = { _, _ in } let transforms: [(inout TLSConfiguration) -> Void] = [ { $0.minimumTLSVersion = .tlsv13 }, { $0.maximumTLSVersion = .tlsv12 }, { $0.cipherSuites = "AES" }, { $0.curves = [.x25519] }, { $0.cipherSuiteValues = [.TLS_RSA_WITH_AES_256_CBC_SHA] }, { $0.verifySignatureAlgorithms = [.ed25519] }, { $0.signingSignatureAlgorithms = [.ed25519] }, { $0.certificateVerification = .noHostnameVerification }, { $0.trustRoots = .certificates([TLSConfigurationTest.cert1]) }, { $0.additionalTrustRoots = [.certificates([TLSConfigurationTest.cert1])] }, { $0.certificateChain = [.certificate(TLSConfigurationTest.cert1)] }, { $0.privateKey = .privateKey(TLSConfigurationTest.key1) }, { $0.applicationProtocols = ["h2"] }, { $0.shutdownTimeout = .seconds((60 * 24 * 24) + 1) }, { $0.keyLogCallback = { _ in } }, { $0.renegotiationSupport = .always }, { $0.sendCANameList = true }, { $0.pskClientCallback = pskClientCallback }, { $0.pskServerCallback = pskServerCallback }, { $0.sslContextCallback = sslContextCallback }, { $0.pskServerCallback = pskServerCallback }, { $0.pskClientProvider = pskClientProvider }, { $0.pskServerProvider = pskServerProvider }, { $0.pskHint = "hint" }, ] for (index, transform) in transforms.enumerated() { var transformed = first transform(&transformed) XCTAssertNotEqual( Wrapper(config: first), Wrapper(config: transformed), "Should have compared not equal in index \(index)" ) XCTAssertEqual( Set([Wrapper(config: first), Wrapper(config: transformed)]).count, 2, "Should have hashed non-equal in index \(index)" ) } } func testObtainingTLSVersionOnClientChannel() throws { let b2b = BackToBackEmbeddedChannel() var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.maximumTLSVersion = .tlsv11 clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.maximumTLSVersion = .tlsv11 serverConfig.certificateVerification = .none let clientContext = try assertNoThrowWithValue(NIOSSLContext(configuration: clientConfig)) let serverContext = try assertNoThrowWithValue(NIOSSLContext(configuration: serverConfig)) XCTAssertNoThrow( try b2b.client.pipeline.syncOperations.addHandlers( [ try NIOSSLClientHandler(context: clientContext, serverHostname: "localhost"), HandshakeCompletedHandler(), ] ) ) XCTAssertNoThrow( try b2b.server.pipeline.syncOperations.addHandlers( [NIOSSLServerHandler(context: serverContext), HandshakeCompletedHandler()] ) ) XCTAssertNoThrow(try b2b.connectInMemory()) XCTAssertTrue(b2b.client.handshakeSucceeded) XCTAssertTrue(b2b.server.handshakeSucceeded) var tlsVersion: TLSVersion? XCTAssertNoThrow(tlsVersion = try b2b.client.pipeline.syncOperations.nioSSL_tlsVersion()) XCTAssertEqual(tlsVersion!, .tlsv11) let tlsVersionForChannel = b2b.client.nioSSL_tlsVersion() var channelTLSVersion: TLSVersion? XCTAssertNoThrow(channelTLSVersion = try tlsVersionForChannel.wait()) XCTAssertEqual(channelTLSVersion!, .tlsv11) } @available( *, deprecated, message: "`TLSConfiguration.pskClientCallback` and `TLSConfiguration.pskClientCallback` are deprecated" ) func testTLSPSKWithTLS13Deprecated() throws { // The idea here is that adding PSKs with certificates in TLS 1.3 should NOT cause a failure. // Also note that the usage here of PSKs with TLS 1.3 is not supported by BoringSSL at this point. let pskClientCallback: NIOPSKClientIdentityCallback = { (hint: String) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerCallback: NIOPSKServerIdentityCallback = { (hint: String, identity: String) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") XCTAssertEqual(identity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.trustRoots = .certificates([]) clientConfig.minimumTLSVersion = .tlsv13 clientConfig.maximumTLSVersion = .tlsv13 clientConfig.pskClientCallback = pskClientCallback clientConfig.pskHint = "clientPskHint" var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.minimumTLSVersion = .tlsv13 serverConfig.maximumTLSVersion = .tlsv13 serverConfig.certificateVerification = .none serverConfig.pskServerCallback = pskServerCallback serverConfig.pskHint = "serverPskHint" try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } @available( *, deprecated, message: "`TLSConfiguration.pskClientCallback` and `TLSConfiguration.pskClientCallback` are deprecated" ) func testTLSPSKWithTLS12Deprecated() throws { // This test ensures that PSK-TLS is supported for TLS 1.2. let pskClientCallback: NIOPSKClientIdentityCallback = { (hint: String) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerCallback: NIOPSKServerIdentityCallback = { (hint: String, identity: String) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") XCTAssertEqual(identity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskHint = "clientPskHint" clientConfig.pskClientCallback = pskClientCallback var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerCallback = pskServerCallback serverConfig.pskHint = "serverPskHint" try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } @available( *, deprecated, message: "`TLSConfiguration.pskClientCallback` and `TLSConfiguration.pskClientCallback` are deprecated" ) func testTLSPSKWithPinnedCiphersDeprecated() throws { // This test ensures that PSK-TLS is supported with pinned ciphers. let pskClientCallback: NIOPSKClientIdentityCallback = { (hint: String) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerCallback: NIOPSKServerIdentityCallback = { (hint: String, identity: String) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") XCTAssertEqual(identity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientCallback = pskClientCallback clientConfig.pskHint = "clientPskHint" clientConfig.cipherSuiteValues = [ .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_PSK_WITH_AES_128_CBC_SHA, .TLS_PSK_WITH_AES_256_CBC_SHA, ] var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerCallback = pskServerCallback serverConfig.pskHint = "serverPskHint" serverConfig.cipherSuiteValues = [ .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_PSK_WITH_AES_128_CBC_SHA, .TLS_PSK_WITH_AES_256_CBC_SHA, ] try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } @available( *, deprecated, message: "`TLSConfiguration.pskClientCallback` and `TLSConfiguration.pskClientCallback` are deprecated" ) func testTLSPSKFailureDeprecated() throws { // This test ensures that different PSKs used on the client and server fail when passed in. let pskClientCallback: NIOPSKClientIdentityCallback = { (hint: String) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerCallback: NIOPSKServerIdentityCallback = { (hint: String, identity: String) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(hint, "serverPskHint") XCTAssertEqual(identity, "world") var psk = NIOSSLSecureBytes() psk.append("server".utf8) // Failure return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientCallback = pskClientCallback clientConfig.pskHint = "clientPskHint" var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerCallback = pskServerCallback serverConfig.pskHint = "serverPskHint" try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["SSLV3_ALERT_BAD_RECORD_MAC"] ) } @available(*, deprecated, message: "`.file` NIOSSLPrivateKeySource option deprecated") func testUnknownPrivateKeyFileType() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.privateKey = .file("key.invalidExtension") XCTAssertThrowsError(try NIOSSLContext(configuration: clientConfig)) { error in guard let sslError = error as? NIOSSLExtraError else { return XCTFail("Expected NIOSSLExtraError but got \(error)") } XCTAssertEqual(sslError, .unknownPrivateKeyFileType) } } func testTLSPSKWithTLS13() throws { // The idea here is that adding PSKs with certificates in TLS 1.3 should NOT cause a failure. // Also note that the usage here of PSKs with TLS 1.3 is not supported by BoringSSL at this point. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") XCTAssertEqual(context.clientIdentity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.trustRoots = .certificates([]) clientConfig.minimumTLSVersion = .tlsv13 clientConfig.maximumTLSVersion = .tlsv13 clientConfig.pskClientProvider = pskClientProvider clientConfig.pskHint = "clientPskHint" var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.minimumTLSVersion = .tlsv13 serverConfig.maximumTLSVersion = .tlsv13 serverConfig.certificateVerification = .none serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = "serverPskHint" try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testTLSPSKWithTLS12() throws { // This test ensures that PSK-TLS is supported for TLS 1.2. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") XCTAssertEqual(context.clientIdentity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskHint = "clientPskHint" clientConfig.pskClientProvider = pskClientProvider var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = "serverPskHint" try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testTLSPSKWithPinnedCiphers() throws { // This test ensures that PSK-TLS is supported with pinned ciphers. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") XCTAssertEqual(context.clientIdentity, "world") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientProvider = pskClientProvider clientConfig.pskHint = "clientPskHint" clientConfig.cipherSuiteValues = [ .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_PSK_WITH_AES_128_CBC_SHA, .TLS_PSK_WITH_AES_256_CBC_SHA, ] var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = "serverPskHint" serverConfig.cipherSuiteValues = [ .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, .TLS_PSK_WITH_AES_128_CBC_SHA, .TLS_PSK_WITH_AES_256_CBC_SHA, ] try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testTLSPSKFailure() throws { // This test ensures that different PSKs used on the client and server fail when passed in. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Evaluate hint and clientIdentity to send back proper PSK. XCTAssertEqual(context.hint, "serverPskHint") XCTAssertEqual(context.clientIdentity, "world") var psk = NIOSSLSecureBytes() psk.append("server".utf8) // Failure return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientProvider = pskClientProvider clientConfig.pskHint = "clientPskHint" var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = "serverPskHint" try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["SSLV3_ALERT_BAD_RECORD_MAC"] ) } func testTLSPSKNoServerHint() throws { let pseudoExpectation = ConditionLock(value: false) // This test ensures that different PSKs used on the client and server fail when passed in. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in pseudoExpectation.lock() pseudoExpectation.unlock(withValue: true) // Ensure server hint is nil XCTAssertEqual(context.hint, nil) // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Ensure server hint is nil XCTAssertEqual(context.hint, nil) XCTAssertEqual(context.clientIdentity, "world") // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) // Failure return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientProvider = pskClientProvider clientConfig.pskHint = "clientPskHint" var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = nil try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) XCTAssertTrue(pseudoExpectation.lock(whenValue: true, timeoutSeconds: 1)) pseudoExpectation.unlock() } func testTLSPSKNoClientHint() throws { let pseudoExpectation = ConditionLock(value: false) // This test ensures that different PSKs used on the client and server fail when passed in. let pskClientProvider: NIOPSKClientIdentityProvider = { (context: PSKClientContext) -> PSKClientIdentityResponse in pseudoExpectation.lock() pseudoExpectation.unlock(withValue: true) // Ensure server hint is nil XCTAssertEqual(context.hint, nil) // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) return PSKClientIdentityResponse(key: psk, identity: "world") } let pskServerProvider: NIOPSKServerIdentityProvider = { (context: PSKServerContext) -> PSKServerIdentityResponse in // Ensure server hint is nil XCTAssertEqual(context.hint, nil) XCTAssertEqual(context.clientIdentity, "world") // Evaluate hint and clientIdentity to send back proper PSK. var psk = NIOSSLSecureBytes() psk.append("hello".utf8) // Failure return PSKServerIdentityResponse(key: psk) } var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .none clientConfig.minimumTLSVersion = .tlsv1 clientConfig.maximumTLSVersion = .tlsv12 clientConfig.pskClientProvider = pskClientProvider clientConfig.pskHint = nil var serverConfig = TLSConfiguration.makePreSharedKeyConfiguration() serverConfig.minimumTLSVersion = .tlsv1 serverConfig.maximumTLSVersion = .tlsv12 serverConfig.pskServerProvider = pskServerProvider serverConfig.pskHint = nil try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) XCTAssertTrue(pseudoExpectation.lock(whenValue: true, timeoutSeconds: 1)) pseudoExpectation.unlock() } func testClientSideCertSelection() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(TLSConfigurationTest.cert2)] override.privateKey = .privateKey(TLSConfigurationTest.key2) promise.succeed(override) } var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .noHostnameVerification serverConfig.trustRoots = .certificates([TLSConfigurationTest.cert2]) try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } /// This test ensures that, when a certificate is overriden, only the new chain is sent, not the previous one. /// This test would have failed prior to the commit in which it was added. func testClientSideCertSelectionWithChain() throws { let (testIntermediate, _) = generateSelfSignedCert() let (testLeaf, privateKey) = generateSelfSignedCert() var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateChain = [.certificate(testLeaf), .certificate(testIntermediate)] clientConfig.privateKey = .privateKey(privateKey) clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) // This callback should be a no-op, it returns the same certs we had already set anyway clientConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(testLeaf), .certificate(testIntermediate)] override.privateKey = .privateKey(privateKey) promise.succeed(override) } var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .noHostnameVerification try assertHandshakeSucceededEventLoop( withClientConfig: clientConfig, andServerConfig: serverConfig, serverCustomVerificationCallback: { certificates, promise in XCTAssertEqual(certificates.count, 2) XCTAssertEqual(certificates, [testLeaf, testIntermediate]) // Always succeed for the purposes of this test promise.succeed(.certificateVerified) } ) } func testServerSideCertSelection() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert2)], privateKey: .privateKey(TLSConfigurationTest.key2) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(TLSConfigurationTest.cert1)] override.privateKey = .privateKey(TLSConfigurationTest.key1) promise.succeed(override) } try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testOverrideWithNothingIsFine() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.sslContextCallback = { _, promise in let `override` = NIOSSLContextConfigurationOverride() promise.succeed(override) } try assertHandshakeSucceeded(withClientConfig: clientConfig, andServerConfig: serverConfig) } func testOverrideToInvalidCertFailsHandshake() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert2)], privateKey: .privateKey(TLSConfigurationTest.key2) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(TLSConfigurationTest.cert1)] promise.succeed(override) } try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "TLSV1_ALERT_INTERNAL_ERROR" ) } func testOverrideToInvalidKeyFailsHandshake() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.privateKey = .privateKey(TLSConfigurationTest.key2) promise.succeed(override) } try assertHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContains: "TLSV1_ALERT_INTERNAL_ERROR" ) } func testClientSideCertSelection_eachConnectionSelectsAgain() throws { let callbackCount = NIOLockedValueBox(0) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.sslContextCallback = { _, promise in callbackCount.withLockedValue { $0 += 1 } var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(TLSConfigurationTest.cert2)] override.privateKey = .privateKey(TLSConfigurationTest.key2) promise.succeed(override) } var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1) ) serverConfig.certificateVerification = .noHostnameVerification serverConfig.trustRoots = .certificates([TLSConfigurationTest.cert2]) let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig) ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig) ) for _ in 0..<5 { try assertHandshakeSucceeded(withClientContext: clientContext, andServerContext: serverContext) } XCTAssertEqual(callbackCount.withLockedValue { $0 }, 5) } func testServerSideCertSelection_eachConnectionSelectsAgain() throws { let callbackCount = NIOLockedValueBox(0) var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.certificateVerification = .noHostnameVerification clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) var serverConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert2)], privateKey: .privateKey(TLSConfigurationTest.key2) ) serverConfig.sslContextCallback = { _, promise in var `override` = NIOSSLContextConfigurationOverride() override.certificateChain = [.certificate(TLSConfigurationTest.cert1)] override.privateKey = .privateKey(TLSConfigurationTest.key1) callbackCount.withLockedValue { $0 += 1 } promise.succeed(override) } let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig) ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig) ) for _ in 0..<5 { try assertHandshakeSucceeded(withClientContext: clientContext, andServerContext: serverContext) } XCTAssertEqual(callbackCount.withLockedValue { $0 }, 5) } func testCorrectSetUpOfMTLSContext() throws { var basicConfig = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(TLSConfigurationTest.cert2)], privateKey: .privateKey(TLSConfigurationTest.key2) ) let mtlsConfig = TLSConfiguration.makeServerConfigurationWithMTLS( certificateChain: [.certificate(TLSConfigurationTest.cert2)], privateKey: .privateKey(TLSConfigurationTest.key2), trustRoots: .default ) XCTAssertFalse(basicConfig.bestEffortEquals(mtlsConfig)) basicConfig.trustRoots = .default basicConfig.certificateVerification = .noHostnameVerification XCTAssertTrue(basicConfig.bestEffortEquals(mtlsConfig)) } func testMTLSContext_happyPath() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert2)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key2) clientConfig.certificateVerification = .noHostnameVerification let serverConfig = TLSConfiguration.makeServerConfigurationWithMTLS( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1), trustRoots: .certificates([TLSConfigurationTest.cert2]) ) let clientContext = try assertNoThrowWithValue( NIOSSLContext(configuration: clientConfig) ) let serverContext = try assertNoThrowWithValue( NIOSSLContext(configuration: serverConfig) ) try assertHandshakeSucceeded(withClientContext: clientContext, andServerContext: serverContext) } func testMTLSContext_clientPresentsWrongCert() throws { var clientConfig = TLSConfiguration.makeClientConfiguration() clientConfig.trustRoots = .certificates([TLSConfigurationTest.cert1]) clientConfig.certificateChain = [.certificate(TLSConfigurationTest.cert1)] clientConfig.privateKey = .privateKey(TLSConfigurationTest.key1) clientConfig.certificateVerification = .noHostnameVerification let serverConfig = TLSConfiguration.makeServerConfigurationWithMTLS( certificateChain: [.certificate(TLSConfigurationTest.cert1)], privateKey: .privateKey(TLSConfigurationTest.key1), trustRoots: .certificates([TLSConfigurationTest.cert2]) ) try assertPostHandshakeError( withClientConfig: clientConfig, andServerConfig: serverConfig, errorTextContainsAnyOf: ["ALERT_UNKNOWN_CA", "ALERT_CERTIFICATE_UNKNOWN"] ) } } extension EmbeddedChannel { fileprivate var handshakeSucceeded: Bool { let completedHandler = try! self.pipeline.syncOperations.handler(type: HandshakeCompletedHandler.self) return completedHandler.handshakeSucceeded } } struct Wrapper: Hashable { var config: TLSConfiguration static func == (lhs: Wrapper, rhs: Wrapper) -> Bool { lhs.config.bestEffortEquals(rhs.config) } func hash(into hasher: inout Hasher) { self.config.bestEffortHash(into: &hasher) } } ================================================ FILE: Tests/NIOSSLTests/UnsafeTransfer.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2022 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// /// ``UnsafeMutableTransferBox`` can be used to make non-`Sendable` values `Sendable` and mutable. /// It can be used to capture local mutable values in a `@Sendable` closure and mutate them from within the closure. /// As the name implies, the usage of this is unsafe because it disables the sendable checking of the compiler and does not add any synchronisation. @usableFromInline final class UnsafeMutableTransferBox { @usableFromInline var wrappedValue: Wrapped @inlinable init(_ wrappedValue: Wrapped) { self.wrappedValue = wrappedValue } } extension UnsafeMutableTransferBox: @unchecked Sendable {} ================================================ FILE: Tests/NIOSSLTests/UnwrappingTests.swift ================================================ //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2017-2021 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// import NIOCore import NIOEmbedded import XCTest @testable import NIOSSL func connectInMemory(client: EmbeddedChannel, server: EmbeddedChannel) throws { let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) let connectFuture = client.connect(to: addr) server.pipeline.fireChannelActive() XCTAssertNoThrow(try interactInMemory(clientChannel: client, serverChannel: server)) XCTAssertNoThrow(try connectFuture.wait()) } extension ChannelPipeline.SynchronousOperations { func assertContains(handler: ChannelHandler, file: StaticString = #filePath, line: UInt = #line) { do { _ = try self.context(handler: handler) } catch { XCTFail("Handler \(handler) missing from \(self)", file: (file), line: line) } } func assertDoesNotContain(handler: ChannelHandler, file: StaticString = #filePath, line: UInt = #line) { do { _ = try self.context(handler: handler) XCTFail("Handler \(handler) present in \(self)", file: (file), line: line) } catch { // Expected } } } final class UnwrappingTests: XCTestCase { static let _certAndKey = generateSelfSignedCert() static let cert = UnwrappingTests._certAndKey.0 static let key = UnwrappingTests._certAndKey.1 private func configuredSSLContext(file: StaticString = #filePath, line: UInt = #line) throws -> NIOSSLContext { var config = TLSConfiguration.makeServerConfiguration( certificateChain: [.certificate(UnwrappingTests.cert)], privateKey: .privateKey(UnwrappingTests.key) ) config.trustRoots = .certificates([UnwrappingTests.cert]) return try assertNoThrowWithValue(NIOSSLContext(configuration: config), file: file, line: line) } func testSimpleUnwrapping() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var serverClosed = false var unwrapped = false defer { // We expect the server case to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the channels. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. With no additional configuration, this will cause the server // to close. The client will not close because interactInMemory does not propagate closure. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { _ in unwrapped = true } clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) clientChannel.pipeline.syncOperations.assertDoesNotContain(handler: clientHandler) (serverChannel.eventLoop as! EmbeddedEventLoop).run() (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertFalse(clientClosed) XCTAssertTrue(serverClosed) XCTAssertTrue(unwrapped) } func testSimultaneousUnwrapping() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var serverClosed = false var clientUnwrapped = false var serverUnwrapped = false defer { XCTAssertNoThrow(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) let serverHandler = try assertNoThrowWithValue(NIOSSLServerHandler(context: context)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(serverHandler)) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the channels. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection and the server connection at the same time. This should // not close either channel. let clientStopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientStopPromise.futureResult.assumeIsolated().whenComplete { _ in clientUnwrapped = true } clientHandler.stopTLS(promise: clientStopPromise) let serverStopPromise: EventLoopPromise = serverChannel.eventLoop.makePromise() serverStopPromise.futureResult.assumeIsolated().whenComplete { _ in serverUnwrapped = true } serverHandler.stopTLS(promise: serverStopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(clientUnwrapped) XCTAssertFalse(serverUnwrapped) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) clientChannel.pipeline.syncOperations.assertDoesNotContain(handler: clientHandler) serverChannel.pipeline.syncOperations.assertDoesNotContain(handler: serverHandler) (serverChannel.eventLoop as! EmbeddedEventLoop).run() (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertTrue(clientUnwrapped) XCTAssertTrue(serverUnwrapped) } func testUnwrappingFollowedByClosure() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var clientUnwrapped = false defer { // Both channels will already be closed XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the client. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. let clientStopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientStopPromise.futureResult.assumeIsolated().map { XCTFail("Must not succeed") }.whenFailure { error in XCTAssertEqual(error as? NIOTLSUnwrappingError, NIOTLSUnwrappingError.closeRequestedDuringUnwrap) clientUnwrapped = true } clientHandler.stopTLS(promise: clientStopPromise) // Now we're going to close the client. clientChannel.close().assumeIsolated().whenComplete { _ in XCTAssertFalse(clientClosed) XCTAssertTrue(clientUnwrapped) } XCTAssertFalse(clientClosed) XCTAssertFalse(clientUnwrapped) XCTAssertNoThrow( try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel, runLoops: false) ) clientChannel.pipeline.syncOperations.assertContains(handler: clientHandler) (serverChannel.eventLoop as! EmbeddedEventLoop).run() (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertTrue(clientClosed) XCTAssertTrue(clientUnwrapped) } func testUnwrappingMeetsTCPFIN() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var clientUnwrapped = false defer { // The errors here are expected XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the client. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. let clientStopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientStopPromise.futureResult.assumeIsolated().map { XCTFail("Must not succeed") }.whenFailure { error in XCTAssertEqual(error as? NIOSSLError, .uncleanShutdown) clientUnwrapped = true } clientHandler.stopTLS(promise: clientStopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(clientUnwrapped) // Now we're going to simulate the client receiving a TCP FIN the other way. clientChannel.pipeline.fireChannelInactive() clientChannel.pipeline.syncOperations.assertContains(handler: clientHandler) (clientChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertTrue(clientUnwrapped) // Clean up by bringing the server up to speed serverChannel.pipeline.fireChannelInactive() } func testDoubleUnwrapping() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var promiseCalled = false defer { // We expect the server case to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection twice. We'll ignore the first promise. let dummyPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { _ in promiseCalled = true } clientHandler.stopTLS(promise: dummyPromise) clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(promiseCalled) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(promiseCalled) } func testUnwrappingAfterIgnoredUnwrapping() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var promiseCalled = false defer { // We expect the server case to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection twice. We'll only send a promise the second time. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { _ in promiseCalled = true } clientHandler.stopTLS(promise: nil) clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(promiseCalled) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(promiseCalled) } func testUnwrappingIdleChannel() throws { let channel = EmbeddedChannel() var promiseCalled = false defer { XCTAssertNoThrow(try channel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let handler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try channel.pipeline.syncOperations.addHandler(handler)) channel.pipeline.syncOperations.assertContains(handler: handler) // Let's unwrap. This should succeed easily. let stopPromise: EventLoopPromise = channel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { _ in promiseCalled = true } XCTAssertFalse(promiseCalled) handler.stopTLS(promise: stopPromise) XCTAssertTrue(promiseCalled) } func testUnwrappingAfterSuccessfulUnwrap() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var promiseCalled = false defer { // We expect the server case to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { _ in promiseCalled = true } clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(promiseCalled) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(promiseCalled) // Now, let's unwrap it again. let secondPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientHandler.stopTLS(promise: secondPromise) do { try secondPromise.futureResult.wait() } catch { XCTFail("Unexpected error: \(error)") } } func testUnwrappingAfterClosure() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect both casees to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's close everything down. clientChannel.close(promise: nil) serverChannel.close(promise: nil) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) // We haven't spun the event loop, so the handlers are still in the pipeline. Now attempt to unwrap. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientHandler.stopTLS(promise: stopPromise) XCTAssertThrowsError(try stopPromise.futureResult.wait()) { error in XCTAssertEqual(.some(.alreadyClosed), error as? NIOTLSUnwrappingError) } } func testReceivingGibberishAfterAttemptingToUnwrap() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var clientUnwrapped = false defer { // The errors here are expected XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the client. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. let clientStopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() clientStopPromise.futureResult.assumeIsolated().map { XCTFail("Must not succeed") }.whenFailure { error in switch error as? NIOSSLError { case .some(.shutdownFailed): // Expected break default: XCTFail("Unexpected error: \(error)") } clientUnwrapped = true } clientHandler.stopTLS(promise: clientStopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(clientUnwrapped) // Now we're going to simulate the client receiving gibberish data in response, instead // of a CLOSE_NOTIFY. var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeStaticString("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 0\r\n\r\n") XCTAssertThrowsError(try clientChannel.writeInbound(buffer)) { error in switch error as? NIOSSLError { case .some(.shutdownFailed): // Expected break default: XCTFail("Unexpected error: \(error)") } } // The client should have errored out now. The handler should still be there, as unwrapping // has failed. XCTAssertTrue(clientUnwrapped) clientChannel.pipeline.syncOperations.assertContains(handler: clientHandler) // Clean up by bringing the server up to speed serverChannel.pipeline.fireChannelInactive() } func testPendingWritesFailOnUnwrap() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect the server to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Queue up a write. var writeCompleted = false var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeStaticString("Hello, world!") clientChannel.write(buffer).assumeIsolated().map { XCTFail("Must not succeed") }.whenFailure { error in XCTAssertEqual(error as? NIOTLSUnwrappingError, .unflushedWriteOnUnwrap) writeCompleted = true } // We haven't spun the event loop, so the handlers are still in the pipeline. Now attempt to unwrap. var unwrapped = false let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenSuccess { XCTAssertTrue(writeCompleted) unwrapped = true } XCTAssertFalse(writeCompleted) clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(writeCompleted) XCTAssertFalse(unwrapped) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) XCTAssertTrue(writeCompleted) XCTAssertTrue(unwrapped) } func testPendingWritesFailWhenFlushedOnUnwrap() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect both cases to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Queue up a write. var writeCompleted = false var buffer = clientChannel.allocator.buffer(capacity: 1024) buffer.writeStaticString("Hello, world!") clientChannel.write(buffer).assumeIsolated().map { XCTFail("Must not succeed") }.whenFailure { error in XCTAssertEqual(error as? ChannelError, .ioOnClosedChannel) writeCompleted = true } // We haven't spun the event loop, so the handlers are still in the pipeline. Now attempt to unwrap. var unwrapped = false let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenFailure { error in switch error as? BoringSSLError { case .some(.sslError): // ok break default: XCTFail("Unexpected error: \(error)") } unwrapped = true } XCTAssertFalse(writeCompleted) clientHandler.stopTLS(promise: stopPromise) XCTAssertFalse(writeCompleted) XCTAssertFalse(unwrapped) // Now try to flush the write. This should fail the write early, and take out the connection. clientChannel.flush() XCTAssertTrue(writeCompleted) XCTAssertTrue(unwrapped) // Bring the server up to speed. serverChannel.pipeline.fireChannelInactive() } func testDataReceivedAfterCloseNotifyIsPassedDownThePipeline() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() defer { // We expect the server case to throw XCTAssertThrowsError(try serverChannel.finish()) XCTAssertNoThrow(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) let readPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() XCTAssertNoThrow( try clientChannel.pipeline.syncOperations.addHandler(PromiseOnReadHandler(promise: readPromise)) ) var readCompleted = false readPromise.futureResult.assumeIsolated().whenSuccess { buffer in XCTAssertEqual(buffer.getString(at: buffer.readerIndex, length: buffer.readableBytes), "Hello, world!") readCompleted = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the client connection. With no additional configuration, this will cause the server // to close. The client will not close because interactInMemory does not propagate closure. var unwrapped = false let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenSuccess { unwrapped = true } clientHandler.stopTLS(promise: stopPromise) // Now we want to manually handle the interaction. The client will have sent a CLOSE_NOTIFY: send it to the server. let clientCloseNotify = try clientChannel.readOutbound(as: ByteBuffer.self)! XCTAssertNoThrow(try serverChannel.writeInbound(clientCloseNotify)) // The server will have sent a CLOSE_NOTIFY: grab it. var serverCloseNotify = try serverChannel.readOutbound(as: ByteBuffer.self)! // We're going to append some plaintext data. serverCloseNotify.writeStaticString("Hello, world!") // Now we're going to send it to the client. XCTAssertFalse(unwrapped) XCTAssertFalse(readCompleted) XCTAssertNoThrow(try clientChannel.writeInbound(serverCloseNotify)) // This will have triggered an unwrap. XCTAssertTrue(unwrapped) // We should also have received the plaintext data. XCTAssertTrue(readCompleted) } func testUnwrappingTimeout() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var serverClosed = false var unwrapped = false defer { XCTAssertNoThrow(try serverChannel.finish(acceptAlreadyClosed: false)) XCTAssertNoThrow(try clientChannel.finish(acceptAlreadyClosed: false)) } let context = try assertNoThrowWithValue(configuredSSLContext()) let serverHandler = try assertNoThrowWithValue(NIOSSLServerHandler(context: context)) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(serverHandler)) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the channels. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the server connection. We are not going to interact in memory, because we want to simulate a // timeout. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { result in unwrapped = true switch result { case .success: XCTFail("Shutdown succeeded unexpectedly") case .failure(let err): XCTAssertTrue(err is NIOSSLCloseTimedOutError, "Unexpected error: \(err)") } } serverHandler.stopTLS(promise: stopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) XCTAssertNoThrow(try serverChannel.throwIfErrorCaught()) // Advance time by 5 seconds. This should fire the timeout. We unwrap. The connection is not closed automatically. serverChannel.embeddedEventLoop.advanceTime(by: .seconds(5)) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertTrue(unwrapped) serverChannel.pipeline.syncOperations.assertDoesNotContain(handler: serverHandler) XCTAssertThrowsError(try serverChannel.throwIfErrorCaught()) { error in XCTAssertTrue(error is NIOSSLCloseTimedOutError, "Unexpected error: \(error)") } // Now we do the same for the client to get it out of the pipeline too. Naturally, it'll time out. clientHandler.stopTLS(promise: nil) clientChannel.embeddedEventLoop.advanceTime(by: .seconds(5)) clientChannel.pipeline.syncOperations.assertDoesNotContain(handler: clientHandler) XCTAssertThrowsError(try clientChannel.throwIfErrorCaught()) { error in XCTAssertTrue(error is NIOSSLCloseTimedOutError, "Unexpected error: \(error)") } XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertTrue(unwrapped) } func testSuccessfulUnwrapCancelsTimeout() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var serverClosed = false var unwrapped = false defer { XCTAssertNoThrow(try serverChannel.finish(acceptAlreadyClosed: false)) XCTAssertNoThrow(try clientChannel.finish(acceptAlreadyClosed: true)) } let context = try assertNoThrowWithValue(configuredSSLContext()) let serverHandler = try assertNoThrowWithValue(NIOSSLServerHandler(context: context)) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(serverHandler)) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the channels. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the server connection. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenSuccess { result in unwrapped = true } serverHandler.stopTLS(promise: stopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) // Now interact in memory. XCTAssertNoThrow( try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel, runLoops: false) ) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertTrue(unwrapped) serverChannel.pipeline.syncOperations.assertDoesNotContain(handler: serverHandler) // Now advance time by 5 seconds and confirm that the server doesn't get closed. serverChannel.embeddedEventLoop.advanceTime(by: .seconds(5)) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertTrue(unwrapped) serverChannel.pipeline.syncOperations.assertDoesNotContain(handler: serverHandler) } func testUnwrappingAndClosingShareATimeout() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var clientClosed = false var serverClosed = false var unwrapped = false var closed = false defer { XCTAssertNoThrow(try serverChannel.finish(acceptAlreadyClosed: true)) XCTAssertNoThrow(try clientChannel.finish(acceptAlreadyClosed: false)) } let context = try assertNoThrowWithValue(configuredSSLContext()) let serverHandler = try assertNoThrowWithValue(NIOSSLServerHandler(context: context)) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(serverHandler)) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(handshakeHandler)) // Mark the closure of the channels. clientChannel.closeFuture.assumeIsolated().whenComplete { _ in clientClosed = true } serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Connect. This should lead to a completed handshake. XCTAssertNoThrow(try connectInMemory(client: clientChannel, server: serverChannel)) XCTAssertTrue(handshakeHandler.handshakeSucceeded) // Let's unwrap the server connection. We are not going to interact in memory, because we want to simulate a // timeout. let stopPromise: EventLoopPromise = clientChannel.eventLoop.makePromise() stopPromise.futureResult.assumeIsolated().whenComplete { result in unwrapped = true switch result { case .success: XCTFail("Shutdown succeeded unexpectedly") case .failure(let err): XCTAssertTrue(err is NIOSSLCloseTimedOutError, "Unexpected error: \(err)") } } serverHandler.stopTLS(promise: stopPromise) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) // Advance time by 3 seconds. This should not fire the timeout. serverChannel.embeddedEventLoop.advanceTime(by: .seconds(3)) XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) serverChannel.pipeline.syncOperations.assertContains(handler: serverHandler) // Now we close. This will report success. serverChannel.close().assumeIsolated().whenSuccess { result in closed = true } XCTAssertFalse(clientClosed) XCTAssertFalse(serverClosed) XCTAssertFalse(unwrapped) XCTAssertFalse(closed) serverChannel.pipeline.syncOperations.assertContains(handler: serverHandler) // Now we advance two more seconds. This closes the connection. All the promises succeed. serverChannel.embeddedEventLoop.advanceTime(by: .seconds(2)) XCTAssertFalse(clientClosed) XCTAssertTrue(serverClosed) XCTAssertTrue(unwrapped) XCTAssertTrue(closed) serverChannel.pipeline.syncOperations.assertDoesNotContain(handler: serverHandler) // Now we do the same for the client to get it out of the pipeline too. Naturally, it'll time out. clientHandler.stopTLS(promise: nil) clientChannel.embeddedEventLoop.advanceTime(by: .seconds(5)) clientChannel.pipeline.syncOperations.assertDoesNotContain(handler: clientHandler) XCTAssertThrowsError(try clientChannel.throwIfErrorCaught()) { error in XCTAssertTrue(error is NIOSSLCloseTimedOutError, "Unexpected error: \(error)") } XCTAssertFalse(clientClosed) XCTAssertTrue(serverClosed) XCTAssertTrue(unwrapped) } func testChannelInactiveDuringHandshake() throws { let serverChannel = EmbeddedChannel() let clientChannel = EmbeddedChannel() var serverClosed = false var serverUnwrapped = false defer { // The errors here are expected XCTAssertThrowsError(try serverChannel.finish()) XCTAssertThrowsError(try clientChannel.finish()) } let context = try assertNoThrowWithValue(configuredSSLContext()) let serverHandler = try assertNoThrowWithValue(NIOSSLServerHandler(context: context)) let clientHandler = try assertNoThrowWithValue(NIOSSLClientHandler(context: context, serverHostname: nil)) XCTAssertNoThrow(try serverChannel.pipeline.syncOperations.addHandler(NIOSSLServerHandler(context: context))) XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(clientHandler)) let handshakeHandler = HandshakeCompletedHandler() XCTAssertNoThrow(try clientChannel.pipeline.syncOperations.addHandler(handshakeHandler)) serverChannel.closeFuture.assumeIsolated().whenComplete { _ in serverClosed = true } // Place the guts of connectInMemory here to abruptly alter the handshake process let addr = try assertNoThrowWithValue(SocketAddress(unixDomainSocketPath: "/tmp/whatever2")) let _ = clientChannel.connect(to: addr) XCTAssertFalse(serverClosed) serverChannel.pipeline.fireChannelActive() clientChannel.pipeline.fireChannelActive() // doHandshakeStep process should start here out in NIOSSLHandler before fireChannelInactive serverChannel.pipeline.fireChannelInactive() clientChannel.pipeline.fireChannelInactive() // Need to test this error as a BoringSSLError because that means success instead of an uncleanShutdown do { try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel) } catch { switch error as? NIOSSLError { case .some(.handshakeFailed(let innerError)): // Expected to fall into .handshakeFailed with .eofDuringHandshake XCTAssertEqual(innerError, .sslError([.eofDuringHandshake])) default: XCTFail("Unexpected error: \(error)") } } clientHandler.stopTLS(promise: nil) // Go through the process of closing and verifying the close on the server side. XCTAssertFalse(serverUnwrapped) let serverStopPromise: EventLoopPromise = serverChannel.eventLoop.makePromise() serverStopPromise.futureResult.assumeIsolated().whenComplete { _ in serverUnwrapped = true } serverHandler.stopTLS(promise: serverStopPromise) XCTAssertNoThrow(try interactInMemory(clientChannel: clientChannel, serverChannel: serverChannel)) (serverChannel.eventLoop as! EmbeddedEventLoop).run() XCTAssertTrue(serverClosed) XCTAssertTrue(serverUnwrapped) } } ================================================ FILE: dev/git.commit.template ================================================ One line description of your change Motivation: Explain here the context, and why you're making that change. What is the problem you're trying to solve. Modifications: Describe the modifications you've done. Result: After your change, what will change. ================================================ FILE: docker/Dockerfile ================================================ ARG swift_version=5.7 ARG ubuntu_version=focal ARG base_image=swift:$swift_version-$ubuntu_version FROM $base_image # needed to do again after FROM due to docker limitation ARG swift_version ARG ubuntu_version # set as UTF-8 RUN apt-get update && apt-get install -y locales locales-all ENV LC_ALL en_US.UTF-8 ENV LANG en_US.UTF-8 ENV LANGUAGE en_US.UTF-8 # dependencies RUN apt-get update && apt-get install -y wget RUN apt-get update && apt-get install -y lsof dnsutils netcat-openbsd net-tools expect curl jq # used by integration tests RUN apt-get update && apt-get install -y libssl-dev RUN apt-get update && apt-get install -y execstack # tools RUN mkdir -p $HOME/.tools RUN echo 'export PATH="$HOME/.tools:$PATH"' >> $HOME/.profile # script to allow mapping framepointers on linux (until part of the toolchain) RUN wget -q https://raw.githubusercontent.com/apple/swift/main/utils/symbolicate-linux-fatal -O $HOME/.tools/symbolicate-linux-fatal RUN chmod 755 $HOME/.tools/symbolicate-linux-fatal ================================================ FILE: docker/docker-compose.2204.510.yaml ================================================ version: "3" services: runtime-setup: image: swift-nio-ssl:22.04-5.10 build: args: ubuntu_version: "jammy" swift_version: "5.10" performance-test: image: swift-nio-ssl:22.04-5.10 shell: image: swift-nio-ssl:22.04-5.10 ================================================ FILE: docker/docker-compose.2204.58.yaml ================================================ version: "3" services: runtime-setup: image: swift-nio-ssl:22.04-5.8 build: args: ubuntu_version: "jammy" swift_version: "5.8" performance-test: image: swift-nio-ssl:22.04-5.8 shell: image: swift-nio-ssl:22.04-5.8 ================================================ FILE: docker/docker-compose.2204.59.yaml ================================================ version: "3" services: runtime-setup: image: swift-nio-ssl:22.04-5.9 build: args: ubuntu_version: "jammy" swift_version: "5.9" performance-test: image: swift-nio-ssl:22.04-5.9 shell: image: swift-nio-ssl:22.04-5.9 ================================================ FILE: docker/docker-compose.2204.main.yaml ================================================ version: "3" services: runtime-setup: image: swift-nio-ssl:22.04-main build: args: base_image: "swiftlang/swift:nightly-main-jammy" performance-test: image: swift-nio-ssl:22.04-main shell: image: swift-nio-ssl:22.04-main ================================================ FILE: docker/docker-compose.yaml ================================================ # this file is not designed to be run directly # instead, use the docker-compose.. files # eg docker-compose -f docker/docker-compose.yaml -f docker/docker-compose.1804.50.yaml run test version: "3" services: runtime-setup: image: swift-nio-ssl:default build: context: . dockerfile: Dockerfile common: &common image: swift-nio-ssl:default depends_on: [runtime-setup] volumes: - ~/.ssh:/root/.ssh - ..:/swift-nio-ssl:z working_dir: /swift-nio-ssl cap_drop: - CAP_NET_RAW - CAP_NET_BIND_SERVICE test: <<: *common command: /bin/bash -xcl "./scripts/integration_tests.sh" performance-test: <<: *common command: /bin/bash -xcl "swift build -c release && ./.build/release/NIOSSLPerformanceTester" # util shell: <<: *common entrypoint: /bin/bash ================================================ FILE: scripts/analyze_performance_results.rb ================================================ #!/usr/bin/env ruby ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## require 'optparse' METRIC="min" # used for comparison module Enumerable def sum return self.inject(0){|accum, i| accum + i } end def mean return self.sum / self.length.to_f end def sample_variance m = self.mean sum = self.inject(0){|accum, i| accum + (i - m) ** 2 } return sum / (self.length - 1).to_f end def standard_deviation return Math.sqrt(self.sample_variance) end end def parse_results(file) results = {} File.open(file, "r") do |f| f.each_line do |line| parts = line.split(':').collect(&:strip) throw "invalid data format" unless parts.length == 3 key = parts[1] values = parts[2].split(',').collect(&:strip).map(&:to_f) results[key] = {} results[key]["values"] = values results[key]["max"] = values.max results[key]["min"] = values.min results[key]["mean"] = values.mean results[key]["std"] = values.standard_deviation end end results end def compare_results(current, previous) results = {} current.keys.each do |key| results[key] = {} results[key]["previous"] = previous[key] || { ::METRIC => "n/a" } results[key]["current"] = current[key] if previous[key] current_value = current[key][::METRIC] previous_value = previous[key][::METRIC] delta = current_value - previous_value results[key]["delta"] = delta results[key]["winner"] = current_value <= previous_value ? "current" : "previous" results[key]["diff"] = (delta / previous_value * 100).to_i else results[key]["winner"] = "n/a" results[key]["diff"] = "n/a" end end results end def print_results_markdown(results) columns = ["min", "max", "mean", "std"] puts "| name | #{columns.join(" | ")} |" puts "|#{Array.new(columns.size+1, '--').join("|")}|" results.keys.each do |key| print "| #{key}" columns.each do |column| print " | #{results[key][column]}" end puts " |\n" end end def print_results_html(results) columns = ["min", "max", "mean", "std"] puts "" puts "" results.keys.each do |key| puts "" puts "" columns.each do |column| puts "" end puts "" end puts "
name#{columns.join("")}
#{key}#{results[key][column]}
" end def print_results_csv(results) puts results.keys.join(",") puts results.keys.map{ |key| results[key][::METRIC] }.join(",") end def print_comparison_markdown(results) puts "| name | current | previous | winner | diff |" puts "|#{Array.new(5, '--').join("|")}|" results.keys.each do |key| puts "| #{key} | #{results[key]["current"][::METRIC]} | #{results[key]["previous"][::METRIC]} | #{results[key]["winner"]} | #{results[key]["diff"]}% |" end end def print_comparison_html(results) puts "" puts " " results.keys.each do |key| puts " " end puts "
name current previous winner diff
#{key} #{results[key]["current"][::METRIC]} #{results[key]["previous"][::METRIC]} #{results[key]["winner"]} #{results[key]["diff"]}%
" end ARGV << '-h' if ARGV.empty? options = {} OptionParser.new do |opt| opt.on('-f', '--file file', 'file to process') { |o| options[:file] = o } opt.on('-p', '--previous previous', 'previous file to process') { |o| options[:previous] = o } opt.on('-o', '--output output', 'output format') { |o| options[:output] = o } opt.on_tail("-h", "--help", "show this message") do puts opt end end.parse! if options.has_key?(:file) && options.has_key?(:previous) current = parse_results(options[:file]) previous = parse_results(options[:previous]) results = compare_results(current, previous) case options[:output] when "html" print_comparison_html(results) when "markdown", nil print_comparison_markdown(results) else throw "invalid output format #{options[:output]}" end elsif options.has_key?(:file) results = parse_results(options[:file]) case options[:output] when "csv" print_results_csv(results) when "html", nil print_results_html(results) when "markdown", nil print_results_markdown(results) else throw "invalid output format #{options[:output]}" end else throw "invalid arguemnts" end ================================================ FILE: scripts/build-asm.py ================================================ #!/usr/bin/env python3 ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2018-2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## import os import subprocess # OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for # that platform and the extension used by asm files. OS_ARCH_COMBOS = [ ('ios', 'arm', 'ios32', [], 'S'), ('ios', 'aarch64', 'ios64', [], 'S'), ('linux', 'arm', 'linux32', [], 'S'), ('linux', 'aarch64', 'linux64', [], 'S'), ('linux', 'x86', 'elf', ['-fPIC', '-DOPENSSL_IA32_SSE2'], 'S'), ('linux', 'x86_64', 'elf', [], 'S'), ('mac', 'x86_64', 'macosx', [], 'S'), ] # NON_PERL_FILES enumerates assembly files that are not processed by the # perlasm system. NON_PERL_FILES = { ('linux', 'arm'): [ 'boringssl/crypto/curve25519/asm/x25519-asm-arm.S', 'boringssl/crypto/poly1305/poly1305_arm_asm.S', ], ('linux', 'x86_64'): [ 'boringssl/crypto/hrss/asm/poly_rq_mul.S', ], } def FindCMakeFiles(directory): """Returns list of all CMakeLists.txt files recursively in directory.""" cmakefiles = [] for (path, _, filenames) in os.walk(directory): for filename in filenames: if filename == 'CMakeLists.txt': cmakefiles.append(os.path.join(path, filename)) return cmakefiles def ExtractPerlAsmFromCMakeFile(cmakefile): """Parses the contents of the CMakeLists.txt file passed as an argument and returns a list of all the perlasm() directives found in the file.""" perlasms = [] with open(cmakefile) as f: for line in f: line = line.strip() if not line.startswith('perlasm('): continue if not line.endswith(')'): raise ValueError('Bad perlasm line in %s' % cmakefile) # Remove "perlasm(" from start and ")" from end params = line[8:-1].split() if len(params) < 4: raise ValueError('Bad perlasm line in %s: %s' % (cmakefile, line)) perlasms.append({ 'arch': params[1], 'output': os.path.join(os.path.dirname(cmakefile), params[2]), 'input': os.path.join(os.path.dirname(cmakefile), params[3]), 'extra_args': params[4:] }) return perlasms def ReadPerlAsmOperations(): """Returns a list of all perlasm() directives found in CMake config files in src/.""" perlasms = [] cmakefiles = FindCMakeFiles('boringssl') for cmakefile in cmakefiles: perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile)) return perlasms def PerlAsm(output_filename, input_filename, perlasm_style, extra_args): """Runs the a perlasm script and puts the output into output_filename.""" base_dir = os.path.dirname(output_filename) if not os.path.isdir(base_dir): os.makedirs(base_dir) subprocess.check_call( ['perl', input_filename, perlasm_style] + extra_args + [output_filename]) def WriteAsmFiles(perlasms): """Generates asm files from perlasm directives for each supported OS x platform combination.""" asmfiles = {} for perlasm in perlasms: for (osname, arch, perlasm_style, extra_args, asm_ext) in OS_ARCH_COMBOS: if arch != perlasm['arch']: continue key = (osname, arch) outDir = '%s-%s' % key output = perlasm['output'] if not output.startswith('boringssl/crypto'): raise ValueError('output missing crypto: %s' % output) output = os.path.join(outDir, output[17:]) output = '%s-%s.%s' % (output, osname, asm_ext) per_command_extra_args = extra_args + perlasm['extra_args'] PerlAsm(output, perlasm['input'], perlasm_style, per_command_extra_args) asmfiles.setdefault(key, []).append(output) return asmfiles def preprocessor_arch_for_arch(arch): if arch == "arm": return "__arm__" elif arch == "aarch64": return "__aarch64__" elif arch == "x86": return "__i386__" elif arch == "x86_64": return "__x86_64__" def preprocessor_platform_for_os(osname): if osname == 'mac' or osname == 'ios': return '__APPLE__' elif osname == 'linux': return '__linux__' def asm_target(osname, arch, asm): components = asm.split('/') new_components = ["boringssl/crypto"] + components[1:-1] + [components[-1].replace('.S', '.' + osname + '.' + arch + '.S')] # noqa: E501 return '/'.join(new_components) def munge_file(pp_arch, pp_platform, source_lines, sink): """ Wraps a single assembly file in appropriate defines. """ sink.write("#if defined({0}) && defined({1})\n".format(pp_arch, pp_platform).encode()) # noqa: E501 for line in source_lines: sink.write(line) sink.write("#endif // defined({0}) && defined({1})\n".format(pp_arch, pp_platform).encode()) # noqa: E501 def munge_all_files(osname, arch, asms): """ Puts the appropriate architecture #ifdefs around the asm. """ for asm in asms: pp_arch = preprocessor_arch_for_arch(arch) pp_platform = preprocessor_platform_for_os(osname) target = asm_target(osname, arch, asm) with open(asm, 'rb') as source: with open(target, 'wb') as sink: munge_file(pp_arch, pp_platform, source, sink) def main(): # First, we build all the .S files using the helper from boringssl. asm_outputs = WriteAsmFiles(ReadPerlAsmOperations()) # Now we need to bring over all the .S files, inserting our preprocessor # directives along the way. We do this to allow the C preprocessor to make # unneeded assembly files vanish. for ((osname, arch), asm_files) in asm_outputs.items(): munge_all_files(osname, arch, asm_files) for ((osname, arch), asm_files) in NON_PERL_FILES.items(): for asm_file in asm_files: with open(asm_file, 'rb') as f: lines = f.readlines() pp_arch = preprocessor_arch_for_arch(arch) pp_platform = preprocessor_platform_for_os(osname) with open(asm_file, 'wb') as sink: munge_file(pp_arch, pp_platform, lines, sink) if __name__ == '__main__': main() ================================================ FILE: scripts/integration_tests.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## set +ex mkdir -p .build # for the junit.xml file ./IntegrationTests/run-tests.sh --junit-xml .build/junit-sh-tests.xml -i "$@" ================================================ FILE: scripts/patch-1-inttypes.patch ================================================ diff --git a/Sources/CNIOBoringSSL/crypto/hrss/hrss.cc b/Sources/CNIOBoringSSL/crypto/hrss/hrss.cc index 93a214e..eee4e58 100644 --- a/Sources/CNIOBoringSSL/crypto/hrss/hrss.cc +++ b/Sources/CNIOBoringSSL/crypto/hrss/hrss.cc @@ -13,6 +13,7 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include +#include #include #include diff --git a/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h b/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h index c86c1ef..7013140 100644 --- a/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h +++ b/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h @@ -126,7 +126,8 @@ #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_thread.h" #include // for PRIu64 and friends +#include #include // for FILE* #if defined(__cplusplus) ================================================ FILE: scripts/patch-2-inttypes.patch ================================================ diff --git a/Sources/CNIOBoringSSL/crypto/x509/t_x509.cc b/Sources/CNIOBoringSSL/crypto/x509/t_x509.cc index 7a3acc8..c0bc1c5 100644 --- a/Sources/CNIOBoringSSL/crypto/x509/t_x509.cc +++ b/Sources/CNIOBoringSSL/crypto/x509/t_x509.cc @@ -8,6 +8,7 @@ */ #include +#include // for PRIu64 and friends #include #include diff --git a/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h b/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h index 557fb1d..825b4ea 100644 --- a/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h +++ b/Sources/CNIOBoringSSL/include/CNIOBoringSSL_bn.h @@ -126,7 +126,6 @@ #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_thread.h" -#include // for PRIu64 and friends #include #include // for FILE* ================================================ FILE: scripts/patch-3-more-inttypes.patch ================================================ diff --git a/Sources/CNIOBoringSSL/crypto/evp/print.cc b/Sources/CNIOBoringSSL/crypto/evp/print.cc index 89ceb32..5e6fb2f 100644 --- a/Sources/CNIOBoringSSL/crypto/evp/print.cc +++ b/Sources/CNIOBoringSSL/crypto/evp/print.cc @@ -7,6 +7,8 @@ * https://www.openssl.org/source/license.html */ +#include + #include #include ================================================ FILE: scripts/vendor-boringssl.sh ================================================ #!/bin/bash ##===----------------------------------------------------------------------===## ## ## This source file is part of the SwiftNIO open source project ## ## Copyright (c) 2018-2019 Apple Inc. and the SwiftNIO project authors ## Licensed under Apache License v2.0 ## ## See LICENSE.txt for license information ## See CONTRIBUTORS.txt for the list of SwiftNIO project authors ## ## SPDX-License-Identifier: Apache-2.0 ## ##===----------------------------------------------------------------------===## # This was substantially adapted from grpc-swift's vendor-boringssl.sh script. # The license for the original work is reproduced below. See NOTICES.txt for # more. # # Copyright 2016, gRPC Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This script creates a vendored copy of BoringSSL that is # suitable for building with the Swift Package Manager. # # Usage: # 1. Run this script in the package root. It will place # a local copy of the BoringSSL sources in Sources/CNIOBoringSSL. # Any prior contents of Sources/CNIOBoringSSL will be deleted. # set -eou pipefail HERE=$(pwd) DSTROOT=Sources/CNIOBoringSSL TMPDIR=$(mktemp -d /tmp/.workingXXXXXX) SRCROOT="${TMPDIR}/src/boringssl.googlesource.com/boringssl" # This function namespaces the awkward inline functions declared in OpenSSL # and BoringSSL. function namespace_inlines { # Pull out all STACK_OF functions. STACKS=$(grep --no-filename -rE -e "DEFINE_(SPECIAL_)?STACK_OF\([A-Z_0-9a-z]+\)" -e "DEFINE_NAMED_STACK_OF\([A-Z_0-9a-z]+, +[A-Z_0-9a-z:]+\)" "$1/"* | grep -v '//' | grep -v '#' | $sed -e 's/DEFINE_\(SPECIAL_\)\?STACK_OF(\(.*\))/\2/' -e 's/DEFINE_NAMED_STACK_OF(\(.*\), .*)/\1/') STACK_FUNCTIONS=("call_free_func" "call_copy_func" "call_cmp_func" "new" "new_null" "num" "zero" "value" "set" "free" "pop_free" "insert" "delete" "delete_ptr" "find" "shift" "push" "pop" "dup" "sort" "is_sorted" "set_cmp_func" "deep_copy") for s in $STACKS; do for f in "${STACK_FUNCTIONS[@]}"; do echo "#define sk_${s}_${f} BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, sk_${s}_${f})" >> "$1/include/openssl/boringssl_prefix_symbols.h" done done # Now pull out all LHASH_OF functions. LHASHES=$(grep --no-filename -rE "DEFINE_LHASH_OF\([A-Z_0-9a-z]+\)" "$1/"* | grep -v '//' | grep -v '#' | grep -v '\\$' | $sed 's/DEFINE_LHASH_OF(\(.*\))/\1/') LHASH_FUNCTIONS=("call_cmp_func" "call_hash_func" "new" "free" "num_items" "retrieve" "call_cmp_key" "retrieve_key" "insert" "delete" "call_doall" "call_doall_arg" "doall" "doall_arg") for l in $LHASHES; do for f in "${LHASH_FUNCTIONS[@]}"; do echo "#define lh_${l}_${f} BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, lh_${l}_${f})" >> "$1/include/openssl/boringssl_prefix_symbols.h" done done } # This function handles mangling the symbols in BoringSSL. function mangle_symbols { echo "GENERATING mangled symbol list" ( # We need a .a: may as well get SwiftPM to give it to us. # Temporarily enable the product we need. $sed -i -e 's/MANGLE_START/MANGLE_START*\//' -e 's/MANGLE_END/\/*MANGLE_END/' "${HERE}/Package.swift" export GOPATH="${TMPDIR}" # Begin by building for macOS. We build for two target triples, Intel # and Apple Silicon swift build --triple "x86_64-apple-macosx" --product CNIOBoringSSL swift build --triple "arm64-apple-macosx" --product CNIOBoringSSL ( cd "${SRCROOT}" go mod tidy -modcacherw go run "util/read_symbols.go" -out "${TMPDIR}/symbols-macOS-intel.txt" "${HERE}/.build/x86_64-apple-macosx/debug/libCNIOBoringSSL.a" go run "util/read_symbols.go" -out "${TMPDIR}/symbols-macOS-as.txt" "${HERE}/.build/arm64-apple-macosx/debug/libCNIOBoringSSL.a" ) # Now build for iOS. We use xcodebuild for this because SwiftPM doesn't # meaningfully support it. Unfortunately we must archive ourselves. # # If xcodebuild complains about not finding the scheme, make sure there # isn't a .xcodeproj kicking around. xcodebuild -sdk iphoneos -scheme CNIOBoringSSL -derivedDataPath "${TMPDIR}/iphoneos-deriveddata" -destination generic/platform=iOS ar -r "${TMPDIR}/libCNIOBoringSSL-iosarm64.a" "${TMPDIR}/iphoneos-deriveddata/Build/Products/Debug-iphoneos/CNIOBoringSSL.o" ( cd "${SRCROOT}" go run "util/read_symbols.go" -out "${TMPDIR}/symbols-iOS.txt" "${TMPDIR}/libCNIOBoringSSL-iosarm64.a" ) # Now cross compile for our targets. docker run -t -i --rm --privileged -v"$(pwd)":/src -w/src --platform linux/arm64 swift:5.9-jammy \ swift build --product CNIOBoringSSL docker run -t -i --rm --privileged -v"$(pwd)":/src -w/src --platform linux/amd64 swift:5.9-jammy \ swift build --product CNIOBoringSSL # Now we need to generate symbol mangles for Linux. We can do this in # one go for all of them. ( cd "${SRCROOT}" go run "util/read_symbols.go" -obj-file-format elf -out "${TMPDIR}/symbols-linux-all.txt" "${HERE}"/.build/*-unknown-linux-gnu/debug/libCNIOBoringSSL.a ) # Now we concatenate all the symbols together and uniquify it. cat "${TMPDIR}"/symbols-*.txt | sort | uniq > "${TMPDIR}/symbols.txt" # Use this as the input to the mangle. ( cd "${SRCROOT}" go run "util/make_prefix_headers.go" -out "${HERE}/${DSTROOT}/include/openssl" "${TMPDIR}/symbols.txt" ) # Remove the product, as we no longer need it. $sed -i -e 's/MANGLE_START\*\//MANGLE_START/' -e 's/\/\*MANGLE_END/MANGLE_END/' "${HERE}/Package.swift" ) # Now remove any weird symbols that got in and would emit warnings. $sed -i -e '/#define .*\..*/d' "${DSTROOT}"/include/openssl/boringssl_prefix_symbols*.h # Now edit the headers again to add the symbol mangling. echo "ADDING symbol mangling" perl -pi -e '$_ .= qq(\n#define BORINGSSL_PREFIX CNIOBoringSSL\n) if /#define OPENSSL_HEADER_BASE_H/' "$DSTROOT/include/openssl/base.h" while IFS= read -r -d '' assembly_file do $sed -i '1 i #define BORINGSSL_PREFIX CNIOBoringSSL' "$assembly_file" done < <(find "$DSTROOT" -name "*.S" -print0) namespace_inlines "$DSTROOT" } # BoringSSL includes a few non-namespaced C++ structures. These aren't namespaced because they're exposed # in C-land, which doesn't know about the namespacing. Sadly, these structures include constructors and destructors, # and if those aren't namespaced we're still able to conflict. # # This function is responsible for identifying them and manually cleaning them up. We run this only on # macOS because we don't believe that the cross-platform architectures will hit any other structures. function mangle_cpp_structures { echo "MANGLING C++ structures" ( # We need a .a: may as well get SwiftPM to give it to us. # Temporarily enable the product we need. $sed -i -e 's/MANGLE_START/MANGLE_START*\//' -e 's/MANGLE_END/\/*MANGLE_END/' "${HERE}/Package.swift" # Build for macOS. swift build --product CNIOBoringSSL # Woah, this is a hell of a command! What does it do? # # The nm command grabs all global defined symbols. We then run the C++ demangler over them and look for methods with '::' in them: # these are C++ methods. We then exclude any that contain CNIOBoringSSL (as those are already namespaced!) and any that contain swift # (as those were put there by the Swift runtime, not us). This gives us a list of symbols. The following cut command # grabs the type name from each of those (the bit preceding the '::'). Then, we sort and uniqify that list. # Finally, we remove any symbol that ends in std. This gives us all the structures that need to be renamed. structures=$(nm -gUj "$(swift build --show-bin-path)/libCNIOBoringSSL.a" | c++filt | grep "::" | grep -v -e "CNIOBoringSSL" -e "swift" | cut -d : -f1 | grep -v "std$" | $sed -E -e 's/([^<>]*)(<[^<>]*>)?/\1/' | sort | uniq) for struct in ${structures}; do echo "#define ${struct} BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ${struct})" >> "${DSTROOT}/include/CNIOBoringSSL_boringssl_prefix_symbols.h" done # Remove the product, as we no longer need it. $sed -i -e 's/MANGLE_START\*\//MANGLE_START/' -e 's/\/\*MANGLE_END/MANGLE_END/' "${HERE}/Package.swift" ) } case "$(uname -s)" in Darwin) sed=gsed ;; *) # shellcheck disable=SC2209 sed=sed ;; esac if ! hash ${sed} 2>/dev/null; then echo "You need sed \"${sed}\" to run this script ..." echo echo "On macOS: brew install gnu-sed" exit 43 fi echo "REMOVING any previously-vendored BoringSSL code" rm -rf $DSTROOT/include rm -rf $DSTROOT/ssl rm -rf $DSTROOT/crypto rm -rf $DSTROOT/third_party rm -rf $DSTROOT/gen echo "CLONING boringssl" mkdir -p "$SRCROOT" git clone https://boringssl.googlesource.com/boringssl "$SRCROOT" cd "$SRCROOT" BORINGSSL_REVISION=$(git rev-parse HEAD) cd "$HERE" echo "CLONED boringssl@${BORINGSSL_REVISION}" echo "OBTAINING submodules" ( cd "$SRCROOT" git submodule update --init ) echo "GENERATING assembly helpers" ( cd "$SRCROOT" cd .. mkdir -p "${SRCROOT}/crypto/third_party/sike/asm" python3 "${HERE}/scripts/build-asm.py" ) PATTERNS=( 'include/openssl/*.h' 'include/openssl/*/*.h' 'ssl/*.h' 'ssl/*.cc' 'crypto/*.h' 'crypto/*.cc' 'crypto/*/*.h' 'crypto/*/*.cc' 'crypto/*/*.S' 'crypto/*/*/*.h' 'crypto/*/*/*.cc.inc' 'crypto/*/*/*.S' 'crypto/*/*/*/*.cc.inc' 'gen/crypto/*.cc' 'gen/crypto/*.S' 'gen/bcm/*.S' 'third_party/fiat/*.h' 'third_party/fiat/asm/*.S' #'third_party/fiat/*.c' ) EXCLUDES=( '*_test.*' 'test_*.*' 'test' 'example_*.cc' ) echo "COPYING boringssl" for pattern in "${PATTERNS[@]}" do for i in $SRCROOT/$pattern; do path=${i#"$SRCROOT"} dest="$DSTROOT$path" dest_dir=$(dirname "$dest") mkdir -p "$dest_dir" cp "$SRCROOT/$path" "$dest" done done for exclude in "${EXCLUDES[@]}" do echo "EXCLUDING $exclude" find $DSTROOT -d -name "$exclude" -exec rm -rf {} \; done mangle_symbols echo "RENAMING header files" ( # We need to rearrange a coouple of things here, the end state will be: # - Headers from 'include/openssl/' will be moved up a level to 'include/' # - Their names will be prefixed with 'CNIOBoringSSL_' # - The headers prefixed with 'boringssl_prefix_symbols' will also be prefixed with 'CNIOBoringSSL_' # - Any include of another header in the 'include/' directory will use quotation marks instead of angle brackets # Let's move the headers up a level first. cd "$DSTROOT" mv include/openssl/* include/ rmdir "include/openssl" # Now let's remove the pki subdirectory, as we don't need it. rm -rf include/pki # Now change the imports from " to "", apply the same prefix to the 'boringssl_prefix_symbols' headers. # shellcheck disable=SC2038 find . -name "*.[ch]" -or -name "*.cc" -or -name "*.S" -or -name "*.cc.inc" | xargs $sed -i -r -e 's#include ]+/)*)(.+.h)>#include <\1CNIOBoringSSL_\3>#' -e 's+include ]+/)*)(.+.h)"#include "\1CNIOBoringSSL_\3"#' # Okay now we need to rename the headers adding the prefix "CNIOBoringSSL_". pushd include for x in *.h; do mv -- "$x" "CNIOBoringSSL_${x}"; done for x in **/*.h; do mv -- "$x" "${x%/*}/CNIOBoringSSL_${x##*/}"; done # Finally, make sure we refer to them by their prefixed names, and change any includes from angle brackets to quotation marks. # shellcheck disable=SC2038 find . -name "*.h" | xargs $sed -i -r -e 's#include "(([^/"]+/)*)(.+.h)"#include "\1CNIOBoringSSL_\3"#' -e 's/include /include "CNIOBoringSSL_\1"/' popd ) echo "PATCHING BoringSSL" git apply "${HERE}/scripts/patch-1-inttypes.patch" git apply "${HERE}/scripts/patch-2-inttypes.patch" git apply "${HERE}/scripts/patch-3-more-inttypes.patch" # We need to avoid having the stack be executable. BoringSSL does this in its build system, but we can't. echo "PROTECTING against executable stacks" ( cd "$DSTROOT" # shellcheck disable=SC2038 find . -name "*.S" | xargs $sed -i '$ a #if defined(__linux__) && defined(__ELF__)\n.section .note.GNU-stack,"",%progbits\n#endif\n' ) mangle_cpp_structures # We need BoringSSL to be modularised echo "MODULARISING BoringSSL" cat << EOF > "$DSTROOT/include/CNIOBoringSSL.h" //===----------------------------------------------------------------------===// // // This source file is part of the SwiftNIO open source project // // Copyright (c) 2019 Apple Inc. and the SwiftNIO project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information // See CONTRIBUTORS.txt for the list of SwiftNIO project authors // // SPDX-License-Identifier: Apache-2.0 // //===----------------------------------------------------------------------===// #ifndef C_NIO_BORINGSSL_H #define C_NIO_BORINGSSL_H #include "CNIOBoringSSL_aead.h" #include "CNIOBoringSSL_aes.h" #include "CNIOBoringSSL_arm_arch.h" #include "CNIOBoringSSL_asm_base.h" #include "CNIOBoringSSL_asn1_mac.h" #include "CNIOBoringSSL_asn1t.h" #include "CNIOBoringSSL_base.h" #include "CNIOBoringSSL_bio.h" #include "CNIOBoringSSL_blake2.h" #include "CNIOBoringSSL_blowfish.h" #include "CNIOBoringSSL_bn.h" #include "CNIOBoringSSL_boringssl_prefix_symbols.h" #include "CNIOBoringSSL_boringssl_prefix_symbols_asm.h" #include "CNIOBoringSSL_cast.h" #include "CNIOBoringSSL_chacha.h" #include "CNIOBoringSSL_ctrdrbg.h" #include "CNIOBoringSSL_cmac.h" #include "CNIOBoringSSL_conf.h" #include "CNIOBoringSSL_cpu.h" #include "CNIOBoringSSL_curve25519.h" #include "CNIOBoringSSL_des.h" #include "CNIOBoringSSL_dtls1.h" #include "CNIOBoringSSL_e_os2.h" #include "CNIOBoringSSL_ec.h" #include "CNIOBoringSSL_ec_key.h" #include "CNIOBoringSSL_ecdsa.h" #include "CNIOBoringSSL_err.h" #include "CNIOBoringSSL_evp.h" #include "CNIOBoringSSL_hkdf.h" #include "CNIOBoringSSL_hmac.h" #include "CNIOBoringSSL_hpke.h" #include "CNIOBoringSSL_hrss.h" #include "CNIOBoringSSL_kdf.h" #include "CNIOBoringSSL_md4.h" #include "CNIOBoringSSL_md5.h" #include "CNIOBoringSSL_mldsa.h" #include "CNIOBoringSSL_mlkem.h" #include "CNIOBoringSSL_obj_mac.h" #include "CNIOBoringSSL_objects.h" #include "CNIOBoringSSL_opensslv.h" #include "CNIOBoringSSL_ossl_typ.h" #include "CNIOBoringSSL_pkcs12.h" #include "CNIOBoringSSL_poly1305.h" #include "CNIOBoringSSL_rand.h" #include "CNIOBoringSSL_rc4.h" #include "CNIOBoringSSL_ripemd.h" #include "CNIOBoringSSL_rsa.h" #include "CNIOBoringSSL_safestack.h" #include "CNIOBoringSSL_service_indicator.h" #include "CNIOBoringSSL_sha.h" #include "CNIOBoringSSL_siphash.h" #include "CNIOBoringSSL_slhdsa.h" #include "CNIOBoringSSL_srtp.h" #include "CNIOBoringSSL_ssl.h" #include "CNIOBoringSSL_time.h" #include "CNIOBoringSSL_trust_token.h" #include "CNIOBoringSSL_type_check.h" #include "CNIOBoringSSL_x509_vfy.h" #include "CNIOBoringSSL_x509v3.h" #include "experimental/CNIOBoringSSL_kyber.h" #endif // C_NIO_BORINGSSL_H EOF cat << EOF > "$DSTROOT/include/module.modulemap" module CNIOBoringSSL { umbrella header "CNIOBoringSSL.h" export * } EOF echo "RECORDING BoringSSL revision" $sed -i -e "s/BoringSSL Commit: [0-9a-f]\+/BoringSSL Commit: ${BORINGSSL_REVISION}/" "$HERE/Package"*.swift echo "This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision ${BORINGSSL_REVISION}" > "$DSTROOT/hash.txt" echo "CLEANING temporary directory" rm -rf "${TMPDIR}"